diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 0000000000..a12c9ce22b --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,34 @@ +# https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes#configuring-automatically-generated-release-notes + +changelog: + categories: + - title: 💥 Breaking Changes + labels: + - "Type: Breaking change" + + - title: 🚀 New Features + labels: + - "Type: Feature" + - "New data source" + - "New resource" + + - title: 🐛 Bugfixes + labels: + - "Type: Bug" + + - title: 🪦 Deprecations + labels: + - "Deprecation" + + - title: 🛠️ Maintenance + labels: + - "Type: Maintenance" + - "dependencies" + + - title: 📝 Documentation + labels: + - "Type: Documentation" + + - title: 🏷 Other Changes + labels: + - "*" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 74de0b0f20..dfeee58de0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,6 +8,8 @@ on: jobs: ci: runs-on: ubuntu-latest + env: + GITHUB_TEST_ORGANIZATION: 'kfcampbell-terraform-provider' steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 diff --git a/.github/workflows/immediate-response.yml b/.github/workflows/immediate-response.yml index 4bb10f5ef3..e8bb8ee5b8 100644 --- a/.github/workflows/immediate-response.yml +++ b/.github/workflows/immediate-response.yml @@ -19,7 +19,7 @@ jobs: run: echo "NUMBER=${{ github.event.issue.number || github.event.pull_request.number }}" >> "$GITHUB_OUTPUT" - name: Respond to issue or PR - uses: peter-evans/create-or-update-comment@v3 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ steps.extract.outputs.NUMBER }} body: > diff --git a/.golangci.yml b/.golangci.yml index 7de0f0bbad..9fdcbc6124 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,5 +21,5 @@ linters: linters-settings: errcheck: - ignore: github.com/hashicorp/terraform-plugin-sdk/helper/schema:ForceNew|Set,fmt:.*,io:Close + ignore: github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema:ForceNew|Set,fmt:.*,io:Close diff --git a/github/config.go b/github/config.go index 723bffedf1..c6094a9b89 100644 --- a/github/config.go +++ b/github/config.go @@ -9,7 +9,7 @@ import ( "time" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/shurcooL/githubv4" "golang.org/x/oauth2" ) @@ -21,6 +21,9 @@ type Config struct { Insecure bool WriteDelay time.Duration ReadDelay time.Duration + RetryDelay time.Duration + RetryableErrors map[int]bool + MaxRetries int ParallelRequests bool } @@ -33,16 +36,20 @@ type Owner struct { IsOrganization bool } -func RateLimitedHTTPClient(client *http.Client, writeDelay time.Duration, readDelay time.Duration, parallelRequests bool) *http.Client { +func RateLimitedHTTPClient(client *http.Client, writeDelay time.Duration, readDelay time.Duration, retryDelay time.Duration, parallelRequests bool, retryableErrors map[int]bool, maxRetries int) *http.Client { client.Transport = NewEtagTransport(client.Transport) client.Transport = NewRateLimitTransport(client.Transport, WithWriteDelay(writeDelay), WithReadDelay(readDelay), WithParallelRequests(parallelRequests)) - client.Transport = logging.NewTransport("GitHub", client.Transport) + client.Transport = logging.NewSubsystemLoggingHTTPTransport("GitHub", client.Transport) client.Transport = newPreviewHeaderInjectorTransport(map[string]string{ // TODO: remove when Stone Crop preview is moved to general availability in the GraphQL API "Accept": "application/vnd.github.stone-crop-preview+json", }, client.Transport) + if maxRetries > 0 { + client.Transport = NewRetryTransport(client.Transport, WithRetryDelay(retryDelay), WithRetryableErrors(retryableErrors), WithMaxRetries(maxRetries)) + } + return client } @@ -54,7 +61,7 @@ func (c *Config) AuthenticatedHTTPClient() *http.Client { ) client := oauth2.NewClient(ctx, ts) - return RateLimitedHTTPClient(client, c.WriteDelay, c.ReadDelay, c.ParallelRequests) + return RateLimitedHTTPClient(client, c.WriteDelay, c.ReadDelay, c.RetryDelay, c.ParallelRequests, c.RetryableErrors, c.MaxRetries) } func (c *Config) Anonymous() bool { @@ -63,7 +70,7 @@ func (c *Config) Anonymous() bool { func (c *Config) AnonymousHTTPClient() *http.Client { client := &http.Client{Transport: &http.Transport{}} - return RateLimitedHTTPClient(client, c.WriteDelay, c.ReadDelay, c.ParallelRequests) + return RateLimitedHTTPClient(client, c.WriteDelay, c.ReadDelay, c.RetryDelay, c.ParallelRequests, c.RetryableErrors, c.MaxRetries) } func (c *Config) NewGraphQLClient(client *http.Client) (*githubv4.Client, error) { @@ -130,7 +137,7 @@ func (c *Config) ConfigureOwner(owner *Owner) (*Owner, error) { } // Meta returns the meta parameter that is passed into subsequent resources -// https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/schema#ConfigureFunc +// https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema#ConfigureFunc func (c *Config) Meta() (interface{}, error) { var client *http.Client @@ -153,6 +160,7 @@ func (c *Config) Meta() (interface{}, error) { var owner Owner owner.v4client = v4client owner.v3client = v3client + owner.StopContext = context.Background() _, err = c.ConfigureOwner(&owner) if err != nil { diff --git a/github/config_test.go b/github/config_test.go index 94472afce6..45cafe69d9 100644 --- a/github/config_test.go +++ b/github/config_test.go @@ -58,6 +58,31 @@ func TestAccConfigMeta(t *testing.T) { }) + t.Run("returns a v3 REST API client with max retries", func(t *testing.T) { + + config := Config{ + Token: testToken, + BaseURL: "https://api.github.com/", + RetryableErrors: map[int]bool{ + 500: true, + 502: true, + }, + MaxRetries: 3, + } + meta, err := config.Meta() + if err != nil { + t.Fatalf("failed to return meta without error: %s", err.Error()) + } + + ctx := context.Background() + client := meta.(*Owner).v3client + _, _, err = client.Meta.Get(ctx) + if err != nil { + t.Fatalf("failed to validate returned client without error: %s", err.Error()) + } + + }) + t.Run("returns a v4 GraphQL API client to manage individual resources", func(t *testing.T) { config := Config{ diff --git a/github/data_source_github_actions_environment_secrets.go b/github/data_source_github_actions_environment_secrets.go index ba0d5c107e..cd2e959bed 100644 --- a/github/data_source_github_actions_environment_secrets.go +++ b/github/data_source_github_actions_environment_secrets.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsEnvironmentSecrets() *schema.Resource { diff --git a/github/data_source_github_actions_environment_secrets_test.go b/github/data_source_github_actions_environment_secrets_test.go index 8c4087ad6c..766e37ad31 100644 --- a/github/data_source_github_actions_environment_secrets_test.go +++ b/github/data_source_github_actions_environment_secrets_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsEnvironmentSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_environment_variables.go b/github/data_source_github_actions_environment_variables.go index cfa3f5c59a..e183e18506 100644 --- a/github/data_source_github_actions_environment_variables.go +++ b/github/data_source_github_actions_environment_variables.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsEnvironmentVariables() *schema.Resource { diff --git a/github/data_source_github_actions_environment_variables_test.go b/github/data_source_github_actions_environment_variables_test.go index 9f0bc2d6fc..e929e53c97 100644 --- a/github/data_source_github_actions_environment_variables_test.go +++ b/github/data_source_github_actions_environment_variables_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsEnvironmentVariablesDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_organization_oidc_subject_claim_customization_template.go b/github/data_source_github_actions_organization_oidc_subject_claim_customization_template.go index 3e4fe51627..9594532dbd 100644 --- a/github/data_source_github_actions_organization_oidc_subject_claim_customization_template.go +++ b/github/data_source_github_actions_organization_oidc_subject_claim_customization_template.go @@ -1,7 +1,7 @@ package github import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplate() *schema.Resource { @@ -38,7 +38,10 @@ func dataSourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplateRea } d.SetId(orgName) - d.Set("include_claim_keys", template.IncludeClaimKeys) + err = d.Set("include_claim_keys", template.IncludeClaimKeys) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_organization_oidc_subject_claim_customization_template_test.go b/github/data_source_github_actions_organization_oidc_subject_claim_customization_template_test.go index 54f77ea4e4..9bf2f5c92b 100644 --- a/github/data_source_github_actions_organization_oidc_subject_claim_customization_template_test.go +++ b/github/data_source_github_actions_organization_oidc_subject_claim_customization_template_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplateDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_organization_public_key.go b/github/data_source_github_actions_organization_public_key.go index 4ed89a5809..35066e2072 100644 --- a/github/data_source_github_actions_organization_public_key.go +++ b/github/data_source_github_actions_organization_public_key.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsOrganizationPublicKey() *schema.Resource { @@ -40,8 +40,14 @@ func dataSourceGithubActionsOrganizationPublicKeyRead(d *schema.ResourceData, me } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_organization_public_key_test.go b/github/data_source_github_actions_organization_public_key_test.go index 604afc0edf..386467ebe1 100644 --- a/github/data_source_github_actions_organization_public_key_test.go +++ b/github/data_source_github_actions_organization_public_key_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_organization_registration_token.go b/github/data_source_github_actions_organization_registration_token.go index 321a52ddb0..e8437cfe56 100644 --- a/github/data_source_github_actions_organization_registration_token.go +++ b/github/data_source_github_actions_organization_registration_token.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsOrganizationRegistrationToken() *schema.Resource { @@ -36,8 +36,14 @@ func dataSourceGithubActionsOrganizationRegistrationTokenRead(d *schema.Resource } d.SetId(owner) - d.Set("token", token.Token) - d.Set("expires_at", token.ExpiresAt.Unix()) + err = d.Set("token", token.Token) + if err != nil { + return err + } + err = d.Set("expires_at", token.ExpiresAt.Unix()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_organization_registration_token_test.go b/github/data_source_github_actions_organization_registration_token_test.go index cd8040a4bf..e920cdf8e5 100644 --- a/github/data_source_github_actions_organization_registration_token_test.go +++ b/github/data_source_github_actions_organization_registration_token_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationRegistrationTokenDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_organization_secrets.go b/github/data_source_github_actions_organization_secrets.go index c7cf9498f1..753f9ef188 100644 --- a/github/data_source_github_actions_organization_secrets.go +++ b/github/data_source_github_actions_organization_secrets.go @@ -5,7 +5,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsOrganizationSecrets() *schema.Resource { @@ -72,7 +72,10 @@ func dataSourceGithubActionsOrganizationSecretsRead(d *schema.ResourceData, meta } d.SetId(owner) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_organization_secrets_test.go b/github/data_source_github_actions_organization_secrets_test.go index d98df2d511..615ff93d44 100644 --- a/github/data_source_github_actions_organization_secrets_test.go +++ b/github/data_source_github_actions_organization_secrets_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_organization_variables.go b/github/data_source_github_actions_organization_variables.go index d445a70c7d..59bc835227 100644 --- a/github/data_source_github_actions_organization_variables.go +++ b/github/data_source_github_actions_organization_variables.go @@ -5,7 +5,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsOrganizationVariables() *schema.Resource { @@ -76,7 +76,10 @@ func dataSourceGithubActionsOrganizationVariablesRead(d *schema.ResourceData, me } d.SetId(owner) - d.Set("variables", all_variables) + err := d.Set("variables", all_variables) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_organization_variables_test.go b/github/data_source_github_actions_organization_variables_test.go index 0861307d8b..a1b0b1eaf2 100644 --- a/github/data_source_github_actions_organization_variables_test.go +++ b/github/data_source_github_actions_organization_variables_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationVariablesDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_public_key.go b/github/data_source_github_actions_public_key.go index cdf22f2e2c..e8e7b38768 100644 --- a/github/data_source_github_actions_public_key.go +++ b/github/data_source_github_actions_public_key.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsPublicKey() *schema.Resource { @@ -40,8 +40,14 @@ func dataSourceGithubActionsPublicKeyRead(d *schema.ResourceData, meta interface } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_public_key_test.go b/github/data_source_github_actions_public_key_test.go index 0b4ebc9422..5f4b1803c7 100644 --- a/github/data_source_github_actions_public_key_test.go +++ b/github/data_source_github_actions_public_key_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_registration_token.go b/github/data_source_github_actions_registration_token.go index 3e4997b46d..7ce08d7bfe 100644 --- a/github/data_source_github_actions_registration_token.go +++ b/github/data_source_github_actions_registration_token.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsRegistrationToken() *schema.Resource { @@ -42,8 +42,14 @@ func dataSourceGithubActionsRegistrationTokenRead(d *schema.ResourceData, meta i } d.SetId(fmt.Sprintf("%s/%s", owner, repoName)) - d.Set("token", token.Token) - d.Set("expires_at", token.ExpiresAt.Unix()) + err = d.Set("token", token.Token) + if err != nil { + return err + } + err = d.Set("expires_at", token.ExpiresAt.Unix()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_registration_token_test.go b/github/data_source_github_actions_registration_token_test.go index a21bcf671e..ca96873df9 100644 --- a/github/data_source_github_actions_registration_token_test.go +++ b/github/data_source_github_actions_registration_token_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsRegistrationTokenDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_repository_oidc_subject_claim_customization_template.go b/github/data_source_github_actions_repository_oidc_subject_claim_customization_template.go index 3f5adf371b..b551edf199 100644 --- a/github/data_source_github_actions_repository_oidc_subject_claim_customization_template.go +++ b/github/data_source_github_actions_repository_oidc_subject_claim_customization_template.go @@ -1,6 +1,6 @@ package github -import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" func dataSourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplate() *schema.Resource { return &schema.Resource{ @@ -40,8 +40,14 @@ func dataSourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplateRead( } d.SetId(repository) - d.Set("use_default", template.UseDefault) - d.Set("include_claim_keys", template.IncludeClaimKeys) + err = d.Set("use_default", template.UseDefault) + if err != nil { + return err + } + err = d.Set("include_claim_keys", template.IncludeClaimKeys) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_repository_oidc_subject_claim_customization_template_test.go b/github/data_source_github_actions_repository_oidc_subject_claim_customization_template_test.go index fa3f19a6a1..7223718712 100644 --- a/github/data_source_github_actions_repository_oidc_subject_claim_customization_template_test.go +++ b/github/data_source_github_actions_repository_oidc_subject_claim_customization_template_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplateDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_secrets.go b/github/data_source_github_actions_secrets.go index 9a5b81a6e8..2f55d617b0 100644 --- a/github/data_source_github_actions_secrets.go +++ b/github/data_source_github_actions_secrets.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsSecrets() *schema.Resource { @@ -96,7 +96,10 @@ func dataSourceGithubActionsSecretsRead(d *schema.ResourceData, meta interface{} } d.SetId(repoName) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_secrets_test.go b/github/data_source_github_actions_secrets_test.go index 1541ec4d84..da78842432 100644 --- a/github/data_source_github_actions_secrets_test.go +++ b/github/data_source_github_actions_secrets_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_actions_variables.go b/github/data_source_github_actions_variables.go index ba7edcc702..0fb3cd081c 100644 --- a/github/data_source_github_actions_variables.go +++ b/github/data_source_github_actions_variables.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubActionsVariables() *schema.Resource { @@ -101,7 +101,10 @@ func dataSourceGithubActionsVariablesRead(d *schema.ResourceData, meta interface } d.SetId(repoName) - d.Set("variables", all_variables) + err := d.Set("variables", all_variables) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_actions_variables_test.go b/github/data_source_github_actions_variables_test.go index b833bdedc0..37c01940a7 100644 --- a/github/data_source_github_actions_variables_test.go +++ b/github/data_source_github_actions_variables_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsVariablesDataSource(t *testing.T) { diff --git a/github/data_source_github_app.go b/github/data_source_github_app.go index 533d517429..e4e22ba942 100644 --- a/github/data_source_github_app.go +++ b/github/data_source_github_app.go @@ -4,7 +4,7 @@ import ( "context" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubApp() *schema.Resource { @@ -44,9 +44,18 @@ func dataSourceGithubAppRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(strconv.FormatInt(app.GetID(), 10)) - d.Set("description", app.GetDescription()) - d.Set("name", app.GetName()) - d.Set("node_id", app.GetNodeID()) + err = d.Set("description", app.GetDescription()) + if err != nil { + return err + } + err = d.Set("name", app.GetName()) + if err != nil { + return err + } + err = d.Set("node_id", app.GetNodeID()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_app_token.go b/github/data_source_github_app_token.go index 1dd0910c93..e726105fcc 100644 --- a/github/data_source_github_app_token.go +++ b/github/data_source_github_app_token.go @@ -3,7 +3,7 @@ package github import ( "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubAppToken() *schema.Resource { @@ -56,7 +56,10 @@ func dataSourceGithubAppTokenRead(d *schema.ResourceData, meta interface{}) erro if err != nil { return err } - d.Set("token", token) + err = d.Set("token", token) + if err != nil { + return err + } d.SetId("id") return nil diff --git a/github/data_source_github_app_token_test.go b/github/data_source_github_app_token_test.go index eb7edb97d9..ebdc1e99f3 100644 --- a/github/data_source_github_app_token_test.go +++ b/github/data_source_github_app_token_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" ) diff --git a/github/data_source_github_branch.go b/github/data_source_github_branch.go index 2ad176ae74..c110c7ed21 100644 --- a/github/data_source_github_branch.go +++ b/github/data_source_github_branch.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubBranch() *schema.Resource { @@ -60,9 +60,18 @@ func dataSourceGithubBranchRead(d *schema.ResourceData, meta interface{}) error } d.SetId(buildTwoPartID(repoName, branchName)) - d.Set("etag", resp.Header.Get("ETag")) - d.Set("ref", *ref.Ref) - d.Set("sha", *ref.Object.SHA) + err = d.Set("etag", resp.Header.Get("ETag")) + if err != nil { + return err + } + err = d.Set("ref", *ref.Ref) + if err != nil { + return err + } + err = d.Set("sha", *ref.Object.SHA) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_branch_protection_rules.go b/github/data_source_github_branch_protection_rules.go index 1726f35cb8..e1cc9705bc 100644 --- a/github/data_source_github_branch_protection_rules.go +++ b/github/data_source_github_branch_protection_rules.go @@ -1,7 +1,7 @@ package github import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -75,7 +75,10 @@ func dataSourceGithubBranchProtectionRulesRead(d *schema.ResourceData, meta inte } d.SetId(string(query.Repository.ID)) - d.Set("rules", rules) + err := d.Set("rules", rules) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_branch_protection_rules_test.go b/github/data_source_github_branch_protection_rules_test.go index bda38a685a..39faaa1414 100644 --- a/github/data_source_github_branch_protection_rules_test.go +++ b/github/data_source_github_branch_protection_rules_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubBranchProtectionRulesDataSource(t *testing.T) { diff --git a/github/data_source_github_branch_test.go b/github/data_source_github_branch_test.go index a90dda10bb..d30f29481f 100644 --- a/github/data_source_github_branch_test.go +++ b/github/data_source_github_branch_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubBranchDataSource(t *testing.T) { diff --git a/github/data_source_github_codespaces_organization_public_key.go b/github/data_source_github_codespaces_organization_public_key.go index 3d654df81e..9487d76987 100644 --- a/github/data_source_github_codespaces_organization_public_key.go +++ b/github/data_source_github_codespaces_organization_public_key.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubCodespacesOrganizationPublicKey() *schema.Resource { @@ -40,8 +40,14 @@ func dataSourceGithubCodespacesOrganizationPublicKeyRead(d *schema.ResourceData, } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_codespaces_organization_public_key_test.go b/github/data_source_github_codespaces_organization_public_key_test.go index d76a0f7c5d..cdf07c1f17 100644 --- a/github/data_source_github_codespaces_organization_public_key_test.go +++ b/github/data_source_github_codespaces_organization_public_key_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesOrganizationPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_codespaces_organization_secrets.go b/github/data_source_github_codespaces_organization_secrets.go index 6fdc19c8c0..a426b14b28 100644 --- a/github/data_source_github_codespaces_organization_secrets.go +++ b/github/data_source_github_codespaces_organization_secrets.go @@ -5,7 +5,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubCodespacesOrganizationSecrets() *schema.Resource { @@ -72,7 +72,10 @@ func dataSourceGithubCodespacesOrganizationSecretsRead(d *schema.ResourceData, m } d.SetId(owner) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_codespaces_organization_secrets_test.go b/github/data_source_github_codespaces_organization_secrets_test.go index d282791d03..6f04a2d8eb 100644 --- a/github/data_source_github_codespaces_organization_secrets_test.go +++ b/github/data_source_github_codespaces_organization_secrets_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesOrganizationSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_codespaces_public_key.go b/github/data_source_github_codespaces_public_key.go index 8b528ebbfb..cc886c4c0b 100644 --- a/github/data_source_github_codespaces_public_key.go +++ b/github/data_source_github_codespaces_public_key.go @@ -4,7 +4,7 @@ import ( "context" "log" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubCodespacesPublicKey() *schema.Resource { @@ -42,8 +42,14 @@ func dataSourceGithubCodespacesPublicKeyRead(d *schema.ResourceData, meta interf } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_codespaces_public_key_test.go b/github/data_source_github_codespaces_public_key_test.go index 727f90e931..431aa8b22b 100644 --- a/github/data_source_github_codespaces_public_key_test.go +++ b/github/data_source_github_codespaces_public_key_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_codespaces_secrets.go b/github/data_source_github_codespaces_secrets.go index 0dbbe60957..ad80bc1b69 100644 --- a/github/data_source_github_codespaces_secrets.go +++ b/github/data_source_github_codespaces_secrets.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubCodespacesSecrets() *schema.Resource { @@ -100,7 +100,10 @@ func dataSourceGithubCodespacesSecretsRead(d *schema.ResourceData, meta interfac } d.SetId(repoName) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_codespaces_secrets_test.go b/github/data_source_github_codespaces_secrets_test.go index c013b4d6ef..368c1137b6 100644 --- a/github/data_source_github_codespaces_secrets_test.go +++ b/github/data_source_github_codespaces_secrets_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_codespaces_user_public_key.go b/github/data_source_github_codespaces_user_public_key.go index d1988f05dd..7ebd119f84 100644 --- a/github/data_source_github_codespaces_user_public_key.go +++ b/github/data_source_github_codespaces_user_public_key.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubCodespacesUserPublicKey() *schema.Resource { @@ -34,8 +34,14 @@ func dataSourceGithubCodespacesUserPublicKeyRead(d *schema.ResourceData, meta in } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_codespaces_user_public_key_test.go b/github/data_source_github_codespaces_user_public_key_test.go index 71fec22b42..0b55a18a23 100644 --- a/github/data_source_github_codespaces_user_public_key_test.go +++ b/github/data_source_github_codespaces_user_public_key_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesUserPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_codespaces_user_secrets.go b/github/data_source_github_codespaces_user_secrets.go index e297283d0c..bd2953a285 100644 --- a/github/data_source_github_codespaces_user_secrets.go +++ b/github/data_source_github_codespaces_user_secrets.go @@ -5,7 +5,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubCodespacesUserSecrets() *schema.Resource { @@ -72,7 +72,10 @@ func dataSourceGithubCodespacesUserSecretsRead(d *schema.ResourceData, meta inte } d.SetId(owner) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_codespaces_user_secrets_test.go b/github/data_source_github_codespaces_user_secrets_test.go index f07c5475c0..2192c41f2d 100644 --- a/github/data_source_github_codespaces_user_secrets_test.go +++ b/github/data_source_github_codespaces_user_secrets_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesUserSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_collaborators.go b/github/data_source_github_collaborators.go index d8f75547db..94380923ec 100644 --- a/github/data_source_github_collaborators.go +++ b/github/data_source_github_collaborators.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func dataSourceGithubCollaborators() *schema.Resource { @@ -24,11 +24,11 @@ func dataSourceGithubCollaborators() *schema.Resource { }, "affiliation": { Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{ + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{ "all", "direct", "outside", - }, false), + }, false), "affiliation"), Optional: true, Default: "all", }, @@ -125,9 +125,18 @@ func dataSourceGithubCollaboratorsRead(d *schema.ResourceData, meta interface{}) } d.SetId(fmt.Sprintf("%s/%s/%s", owner, repo, affiliation)) - d.Set("owner", owner) - d.Set("repository", repo) - d.Set("affiliation", affiliation) + err := d.Set("owner", owner) + if err != nil { + return err + } + err = d.Set("repository", repo) + if err != nil { + return err + } + err = d.Set("affiliation", affiliation) + if err != nil { + return err + } totalCollaborators := make([]interface{}, 0) for { @@ -149,7 +158,10 @@ func dataSourceGithubCollaboratorsRead(d *schema.ResourceData, meta interface{}) options.Page = resp.NextPage } - d.Set("collaborator", totalCollaborators) + err = d.Set("collaborator", totalCollaborators) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_collaborators_test.go b/github/data_source_github_collaborators_test.go index 6cf80cf9bf..7d62f557b9 100644 --- a/github/data_source_github_collaborators_test.go +++ b/github/data_source_github_collaborators_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCollaboratorsDataSource_basic(t *testing.T) { diff --git a/github/data_source_github_dependabot_organization_public_key.go b/github/data_source_github_dependabot_organization_public_key.go index 0d43d58ba3..9dfaa4d6d6 100644 --- a/github/data_source_github_dependabot_organization_public_key.go +++ b/github/data_source_github_dependabot_organization_public_key.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubDependabotOrganizationPublicKey() *schema.Resource { @@ -40,8 +40,14 @@ func dataSourceGithubDependabotOrganizationPublicKeyRead(d *schema.ResourceData, } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_dependabot_organization_public_key_test.go b/github/data_source_github_dependabot_organization_public_key_test.go index feab8f287c..a4f249b3ae 100644 --- a/github/data_source_github_dependabot_organization_public_key_test.go +++ b/github/data_source_github_dependabot_organization_public_key_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotOrganizationPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_dependabot_organization_secrets.go b/github/data_source_github_dependabot_organization_secrets.go index 846e2224a2..34926531e4 100644 --- a/github/data_source_github_dependabot_organization_secrets.go +++ b/github/data_source_github_dependabot_organization_secrets.go @@ -5,7 +5,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubDependabotOrganizationSecrets() *schema.Resource { @@ -72,7 +72,10 @@ func dataSourceGithubDependabotOrganizationSecretsRead(d *schema.ResourceData, m } d.SetId(owner) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_dependabot_organization_secrets_test.go b/github/data_source_github_dependabot_organization_secrets_test.go index 59099be8ba..e227b08a21 100644 --- a/github/data_source_github_dependabot_organization_secrets_test.go +++ b/github/data_source_github_dependabot_organization_secrets_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotOrganizationSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_dependabot_public_key.go b/github/data_source_github_dependabot_public_key.go index 8a44c84660..91ecafa2f3 100644 --- a/github/data_source_github_dependabot_public_key.go +++ b/github/data_source_github_dependabot_public_key.go @@ -4,7 +4,7 @@ import ( "context" "log" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubDependabotPublicKey() *schema.Resource { @@ -42,8 +42,14 @@ func dataSourceGithubDependabotPublicKeyRead(d *schema.ResourceData, meta interf } d.SetId(publicKey.GetKeyID()) - d.Set("key_id", publicKey.GetKeyID()) - d.Set("key", publicKey.GetKey()) + err = d.Set("key_id", publicKey.GetKeyID()) + if err != nil { + return err + } + err = d.Set("key", publicKey.GetKey()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_dependabot_public_key_test.go b/github/data_source_github_dependabot_public_key_test.go index d679caaee8..a6de11b4bd 100644 --- a/github/data_source_github_dependabot_public_key_test.go +++ b/github/data_source_github_dependabot_public_key_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotPublicKeyDataSource(t *testing.T) { diff --git a/github/data_source_github_dependabot_secrets.go b/github/data_source_github_dependabot_secrets.go index 17c6569e97..49b1bce016 100644 --- a/github/data_source_github_dependabot_secrets.go +++ b/github/data_source_github_dependabot_secrets.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubDependabotSecrets() *schema.Resource { @@ -96,7 +96,10 @@ func dataSourceGithubDependabotSecretsRead(d *schema.ResourceData, meta interfac } d.SetId(repoName) - d.Set("secrets", all_secrets) + err := d.Set("secrets", all_secrets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_dependabot_secrets_test.go b/github/data_source_github_dependabot_secrets_test.go index e5be4c739f..992b69603d 100644 --- a/github/data_source_github_dependabot_secrets_test.go +++ b/github/data_source_github_dependabot_secrets_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotSecretsDataSource(t *testing.T) { diff --git a/github/data_source_github_enterprise.go b/github/data_source_github_enterprise.go index 1f0b5b54bb..7a00664dbf 100644 --- a/github/data_source_github_enterprise.go +++ b/github/data_source_github_enterprise.go @@ -3,7 +3,8 @@ package github import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -11,6 +12,10 @@ func dataSourceGithubEnterprise() *schema.Resource { return &schema.Resource{ Read: dataSourceGithubEnterpriseRead, Schema: map[string]*schema.Schema{ + "database_id": { + Type: schema.TypeInt, + Computed: true, + }, "slug": { Type: schema.TypeString, Required: true, @@ -39,6 +44,7 @@ func dataSourceGithubEnterpriseRead(data *schema.ResourceData, meta interface{}) var query struct { Enterprise struct { ID githubv4.String + DatabaseId githubv4.Int Name githubv4.String Description githubv4.String CreatedAt githubv4.String @@ -59,10 +65,26 @@ func dataSourceGithubEnterpriseRead(data *schema.ResourceData, meta interface{}) return fmt.Errorf("could not find enterprise %v", slug) } data.SetId(string(query.Enterprise.ID)) - data.Set("name", query.Enterprise.Name) - data.Set("description", query.Enterprise.Description) - data.Set("created_at", query.Enterprise.CreatedAt) - data.Set("url", query.Enterprise.Url) + err = data.Set("name", query.Enterprise.Name) + if err != nil { + return err + } + err = data.Set("description", query.Enterprise.Description) + if err != nil { + return err + } + err = data.Set("created_at", query.Enterprise.CreatedAt) + if err != nil { + return err + } + err = data.Set("url", query.Enterprise.Url) + if err != nil { + return err + } + err = data.Set("database_id", query.Enterprise.DatabaseId) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_enterprise_test.go b/github/data_source_github_enterprise_test.go index 443323c1a4..4ec28ace2b 100644 --- a/github/data_source_github_enterprise_test.go +++ b/github/data_source_github_enterprise_test.go @@ -2,7 +2,7 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "testing" ) diff --git a/github/data_source_github_external_groups.go b/github/data_source_github_external_groups.go index 9a4a2c3fb6..bf516b8603 100644 --- a/github/data_source_github_external_groups.go +++ b/github/data_source_github_external_groups.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubExternalGroups() *schema.Resource { diff --git a/github/data_source_github_ip_ranges.go b/github/data_source_github_ip_ranges.go index a5115300b2..be1cf4c7ca 100644 --- a/github/data_source_github_ip_ranges.go +++ b/github/data_source_github_ip_ranges.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubIpRanges() *schema.Resource { @@ -32,6 +32,11 @@ func dataSourceGithubIpRanges() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "packages": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "pages": { Type: schema.TypeList, Computed: true, @@ -72,6 +77,11 @@ func dataSourceGithubIpRanges() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "packages_ipv4": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "pages_ipv4": { Type: schema.TypeList, Computed: true, @@ -112,6 +122,11 @@ func dataSourceGithubIpRanges() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "packages_ipv6": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "pages_ipv6": { Type: schema.TypeList, Computed: true, @@ -154,6 +169,11 @@ func dataSourceGithubIpRangesRead(d *schema.ResourceData, meta interface{}) erro return err } + cidrPackagesIpv4, cidrPackagesIpv6, err := splitIpv4Ipv6Cidrs(&api.Packages) + if err != nil { + return err + } + cidrPagesIpv4, cidrPagesIpv6, err := splitIpv4Ipv6Cidrs(&api.Pages) if err != nil { return err @@ -188,44 +208,121 @@ func dataSourceGithubIpRangesRead(d *schema.ResourceData, meta interface{}) erro d.SetId("github-ip-ranges") } if len(api.Hooks) > 0 { - d.Set("hooks", api.Hooks) - d.Set("hooks_ipv4", cidrHooksIpv4) - d.Set("hooks_ipv6", cidrHooksIpv6) + err = d.Set("hooks", api.Hooks) + if err != nil { + return err + } + err = d.Set("hooks_ipv4", cidrHooksIpv4) + if err != nil { + return err + } + err = d.Set("hooks_ipv6", cidrHooksIpv6) + if err != nil { + return err + } } if len(api.Git) > 0 { - d.Set("git", api.Git) - d.Set("git_ipv4", cidrGitIpv4) - d.Set("git_ipv6", cidrGitIpv6) + err = d.Set("git", api.Git) + if err != nil { + return err + } + err = d.Set("git_ipv4", cidrGitIpv4) + if err != nil { + return err + } + err = d.Set("git_ipv6", cidrGitIpv6) + if err != nil { + return err + } + } + if len(api.Packages) > 0 { + d.Set("packages", api.Packages) + d.Set("packages_ipv4", cidrPackagesIpv4) + d.Set("packages_ipv6", cidrPackagesIpv6) } if len(api.Pages) > 0 { - d.Set("pages", api.Pages) - d.Set("pages_ipv4", cidrPagesIpv4) - d.Set("pages_ipv6", cidrPagesIpv6) + err = d.Set("pages", api.Pages) + if err != nil { + return err + } + err = d.Set("pages_ipv4", cidrPagesIpv4) + if err != nil { + return err + } + err = d.Set("pages_ipv6", cidrPagesIpv6) + if err != nil { + return err + } } if len(api.Importer) > 0 { - d.Set("importer", api.Importer) - d.Set("importer_ipv4", cidrImporterIpv4) - d.Set("importer_ipv6", cidrImporterIpv6) + err = d.Set("importer", api.Importer) + if err != nil { + return err + } + err = d.Set("importer_ipv4", cidrImporterIpv4) + if err != nil { + return err + } + err = d.Set("importer_ipv6", cidrImporterIpv6) + if err != nil { + return err + } } if len(api.Actions) > 0 { - d.Set("actions", api.Actions) - d.Set("actions_ipv4", cidrActionsIpv4) - d.Set("actions_ipv6", cidrActionsIpv6) + err = d.Set("actions", api.Actions) + if err != nil { + return err + } + err = d.Set("actions_ipv4", cidrActionsIpv4) + if err != nil { + return err + } + err = d.Set("actions_ipv6", cidrActionsIpv6) + if err != nil { + return err + } } if len(api.Dependabot) > 0 { - d.Set("dependabot", api.Dependabot) - d.Set("dependabot_ipv4", cidrDependabotIpv4) - d.Set("dependabot_ipv6", cidrDependabotIpv6) + err = d.Set("dependabot", api.Dependabot) + if err != nil { + return err + } + err = d.Set("dependabot_ipv4", cidrDependabotIpv4) + if err != nil { + return err + } + err = d.Set("dependabot_ipv6", cidrDependabotIpv6) + if err != nil { + return err + } } if len(api.Web) > 0 { - d.Set("web", api.Web) - d.Set("web_ipv4", cidrWebIpv4) - d.Set("web_ipv6", cidrWebIpv6) + err = d.Set("web", api.Web) + if err != nil { + return err + } + err = d.Set("web_ipv4", cidrWebIpv4) + if err != nil { + return err + } + err = d.Set("web_ipv6", cidrWebIpv6) + if err != nil { + return err + } } if len(api.API) > 0 { - d.Set("api", api.API) - d.Set("api_ipv4", cidrApiIpv4) - d.Set("api_ipv6", cidrApiIpv6) + err = d.Set("api", api.API) + if err != nil { + return err + } + err = d.Set("api_ipv4", cidrApiIpv4) + if err != nil { + return err + } + err = d.Set("api_ipv6", cidrApiIpv6) + if err != nil { + return err + } } return nil diff --git a/github/data_source_github_ip_ranges_test.go b/github/data_source_github_ip_ranges_test.go index 27a1658bfc..f527f59005 100644 --- a/github/data_source_github_ip_ranges_test.go +++ b/github/data_source_github_ip_ranges_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubIpRangesDataSource(t *testing.T) { @@ -17,6 +17,7 @@ func TestAccGithubIpRangesDataSource(t *testing.T) { resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "git.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "api.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "web.#"), + resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "packages.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "pages.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "importer.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "actions.#"), @@ -25,6 +26,7 @@ func TestAccGithubIpRangesDataSource(t *testing.T) { resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "git_ipv4.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "api_ipv4.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "web_ipv4.#"), + resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "packages_ipv4.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "pages_ipv4.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "importer_ipv4.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "actions_ipv4.#"), @@ -33,6 +35,7 @@ func TestAccGithubIpRangesDataSource(t *testing.T) { resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "git_ipv6.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "api_ipv6.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "web_ipv6.#"), + resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "packages_ipv6.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "pages_ipv6.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "importer_ipv6.#"), resource.TestCheckResourceAttrSet("data.github_ip_ranges.test", "actions_ipv6.#"), diff --git a/github/data_source_github_issue_labels.go b/github/data_source_github_issue_labels.go index c54930225b..13677d5e1a 100644 --- a/github/data_source_github_issue_labels.go +++ b/github/data_source_github_issue_labels.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubIssueLabels() *schema.Resource { @@ -77,7 +77,10 @@ func dataSourceGithubIssueLabelsRead(d *schema.ResourceData, meta interface{}) e opts.Page = resp.NextPage } - d.Set("labels", allLabels) + err := d.Set("labels", allLabels) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_issue_labels_test.go b/github/data_source_github_issue_labels_test.go index 94b9caa13b..172c47ba2a 100644 --- a/github/data_source_github_issue_labels_test.go +++ b/github/data_source_github_issue_labels_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubIssueLabelsDataSource(t *testing.T) { diff --git a/github/data_source_github_membership.go b/github/data_source_github_membership.go index bd6fde0780..20d3b9dde8 100644 --- a/github/data_source_github_membership.go +++ b/github/data_source_github_membership.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubMembership() *schema.Resource { @@ -56,9 +56,21 @@ func dataSourceGithubMembershipRead(d *schema.ResourceData, meta interface{}) er d.SetId(buildTwoPartID(membership.GetOrganization().GetLogin(), membership.GetUser().GetLogin())) - d.Set("username", membership.GetUser().GetLogin()) - d.Set("role", membership.GetRole()) - d.Set("etag", resp.Header.Get("ETag")) - d.Set("state", membership.GetState()) + err = d.Set("username", membership.GetUser().GetLogin()) + if err != nil { + return err + } + err = d.Set("role", membership.GetRole()) + if err != nil { + return err + } + err = d.Set("etag", resp.Header.Get("ETag")) + if err != nil { + return err + } + err = d.Set("state", membership.GetState()) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_membership_test.go b/github/data_source_github_membership_test.go index e3127081ad..148250259b 100644 --- a/github/data_source_github_membership_test.go +++ b/github/data_source_github_membership_test.go @@ -5,7 +5,7 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubMembershipDataSource(t *testing.T) { diff --git a/github/data_source_github_organization.go b/github/data_source_github_organization.go index 935ef5b4af..ec42b5f28b 100644 --- a/github/data_source_github_organization.go +++ b/github/data_source_github_organization.go @@ -4,7 +4,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -17,6 +17,11 @@ func dataSourceGithubOrganization() *schema.Resource { Type: schema.TypeString, Required: true, }, + "ignore_archived_repos": { + Type: schema.TypeBool, + Default: false, + Optional: true, + }, "orgname": { Type: schema.TypeString, Computed: true, @@ -176,8 +181,15 @@ func dataSourceGithubOrganizationRead(d *schema.ResourceData, meta interface{}) break } } + + ignoreArchiveRepos := d.Get("ignore_archived_repos").(bool) for index := range allRepos { - repoList = append(repoList, allRepos[index].GetFullName()) + repo := allRepos[index] + if ignoreArchiveRepos && repo.GetArchived() { + continue + } + + repoList = append(repoList, repo.GetFullName()) } var query struct { diff --git a/github/data_source_github_organization_custom_role.go b/github/data_source_github_organization_custom_role.go index 7f39057793..17327a25cf 100644 --- a/github/data_source_github_organization_custom_role.go +++ b/github/data_source_github_organization_custom_role.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubOrganizationCustomRole() *schema.Resource { @@ -69,10 +69,22 @@ func dataSourceGithubOrganizationCustomRoleRead(d *schema.ResourceData, meta int } d.SetId(fmt.Sprint(*role.ID)) - d.Set("name", role.Name) - d.Set("description", role.Description) - d.Set("base_role", role.BaseRole) - d.Set("permissions", role.Permissions) + err = d.Set("name", role.Name) + if err != nil { + return err + } + err = d.Set("description", role.Description) + if err != nil { + return err + } + err = d.Set("base_role", role.BaseRole) + if err != nil { + return err + } + err = d.Set("permissions", role.Permissions) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_organization_custom_role_test.go b/github/data_source_github_organization_custom_role_test.go index d29e77526f..1fa91d6f83 100644 --- a/github/data_source_github_organization_custom_role_test.go +++ b/github/data_source_github_organization_custom_role_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationCustomRoleDataSource(t *testing.T) { diff --git a/github/data_source_github_organization_external_identities.go b/github/data_source_github_organization_external_identities.go index e28449e232..b713d862b2 100644 --- a/github/data_source_github_organization_external_identities.go +++ b/github/data_source_github_organization_external_identities.go @@ -1,7 +1,7 @@ package github import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) diff --git a/github/data_source_github_organization_external_identities_test.go b/github/data_source_github_organization_external_identities_test.go index a6f3dee5f9..7b328befc0 100644 --- a/github/data_source_github_organization_external_identities_test.go +++ b/github/data_source_github_organization_external_identities_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationExternalIdentities(t *testing.T) { diff --git a/github/data_source_github_organization_ip_allow_list.go b/github/data_source_github_organization_ip_allow_list.go index c7a3dff494..44742575fd 100644 --- a/github/data_source_github_organization_ip_allow_list.go +++ b/github/data_source_github_organization_ip_allow_list.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -119,7 +119,10 @@ func dataSourceGithubOrganizationIpAllowListRead(d *schema.ResourceData, meta in } d.SetId(string(query.Organization.ID)) - d.Set("ip_allow_list", ipAllowList) + err = d.Set("ip_allow_list", ipAllowList) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_organization_ip_allow_list_test.go b/github/data_source_github_organization_ip_allow_list_test.go index b159dd8f8f..06e68015ce 100644 --- a/github/data_source_github_organization_ip_allow_list_test.go +++ b/github/data_source_github_organization_ip_allow_list_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationIpAllowListDataSource(t *testing.T) { diff --git a/github/data_source_github_organization_team_sync_groups.go b/github/data_source_github_organization_team_sync_groups.go index c91f979867..7f170acb18 100644 --- a/github/data_source_github_organization_team_sync_groups.go +++ b/github/data_source_github_organization_team_sync_groups.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubOrganizationTeamSyncGroups() *schema.Resource { diff --git a/github/data_source_github_organization_team_sync_groups_test.go b/github/data_source_github_organization_team_sync_groups_test.go index faaab56d85..bd465a4d7b 100644 --- a/github/data_source_github_organization_team_sync_groups_test.go +++ b/github/data_source_github_organization_team_sync_groups_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationTeamSyncGroupsDataSource_existing(t *testing.T) { diff --git a/github/data_source_github_organization_teams.go b/github/data_source_github_organization_teams.go index 25e906dc8e..9754dbd040 100644 --- a/github/data_source_github_organization_teams.go +++ b/github/data_source_github_organization_teams.go @@ -1,8 +1,8 @@ package github import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/shurcooL/githubv4" ) @@ -22,10 +22,10 @@ func dataSourceGithubOrganizationTeams() *schema.Resource { Default: false, }, "results_per_page": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - ValidateFunc: validation.IntBetween(0, 100), + Type: schema.TypeInt, + Optional: true, + Default: 100, + ValidateDiagFunc: toDiagFunc(validation.IntBetween(0, 100), "results_per_page"), }, "teams": { Type: schema.TypeList, @@ -117,7 +117,10 @@ func dataSourceGithubOrganizationTeamsRead(d *schema.ResourceData, meta interfac } d.SetId(string(query.Organization.ID)) - d.Set("teams", teams) + err = d.Set("teams", teams) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_organization_teams_test.go b/github/data_source_github_organization_teams_test.go index b348158aef..6f163cb2cf 100644 --- a/github/data_source_github_organization_teams_test.go +++ b/github/data_source_github_organization_teams_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationTeamsDataSource(t *testing.T) { diff --git a/github/data_source_github_organization_test.go b/github/data_source_github_organization_test.go index 392ea52069..4b66a6e3bd 100644 --- a/github/data_source_github_organization_test.go +++ b/github/data_source_github_organization_test.go @@ -4,7 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationDataSource(t *testing.T) { @@ -72,4 +73,69 @@ func TestAccGithubOrganizationDataSource(t *testing.T) { }) }) + + t.Run("queries for an organization with archived repos", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + + config := fmt.Sprintf(` + resource "github_repository" "archived" { + name = "tf-acc-archived-%s" + archived = true + } + + data "github_organization" "skip_archived" { + name = "%s" + ignore_archived_repos = true + depends_on = [ + github_repository.archived, + ] + } + data "github_organization" "all_repos" { + name = "%s" + ignore_archived_repos = false + depends_on = [ + github_repository.archived, + ] + } + + output "should_be_false" { + value = contains(data.github_organization.skip_archived.repositories, github_repository.archived.full_name) + } + output "should_be_true" { + value = contains(data.github_organization.all_repos.repositories, github_repository.archived.full_name) + } + `, randomID, testOrganization, testOrganization) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckOutput("should_be_false", "false"), + resource.TestCheckOutput("should_be_true", "true"), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an anonymous account", func(t *testing.T) { + t.Skip("anonymous account not supported for this operation") + }) + + t.Run("with an individual account", func(t *testing.T) { + testCase(t, individual) + }) + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + + }) + } diff --git a/github/data_source_github_organization_webhooks.go b/github/data_source_github_organization_webhooks.go index 26a3b53de4..e38f09b011 100644 --- a/github/data_source_github_organization_webhooks.go +++ b/github/data_source_github_organization_webhooks.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubOrganizationWebhooks() *schema.Resource { @@ -70,7 +70,10 @@ func dataSourceGithubOrganizationWebhooksRead(d *schema.ResourceData, meta inter } d.SetId(owner) - d.Set("webhooks", results) + err := d.Set("webhooks", results) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_organization_webhooks_test.go b/github/data_source_github_organization_webhooks_test.go index 41c2a663d8..6343ee0f61 100644 --- a/github/data_source_github_organization_webhooks_test.go +++ b/github/data_source_github_organization_webhooks_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationWebhooksDataSource(t *testing.T) { diff --git a/github/data_source_github_ref.go b/github/data_source_github_ref.go index 9904955eba..a65a12a48a 100644 --- a/github/data_source_github_ref.go +++ b/github/data_source_github_ref.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRef() *schema.Resource { @@ -62,8 +62,14 @@ func dataSourceGithubRefRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(buildTwoPartID(repoName, ref)) - d.Set("etag", resp.Header.Get("ETag")) - d.Set("sha", *refData.Object.SHA) + err = d.Set("etag", resp.Header.Get("ETag")) + if err != nil { + return err + } + err = d.Set("sha", *refData.Object.SHA) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_ref_test.go b/github/data_source_github_ref_test.go index e36343f98b..b7dad6f02f 100644 --- a/github/data_source_github_ref_test.go +++ b/github/data_source_github_ref_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRefDataSource(t *testing.T) { diff --git a/github/data_source_github_release.go b/github/data_source_github_release.go index 5b1fe9755b..45ea642f05 100644 --- a/github/data_source_github_release.go +++ b/github/data_source_github_release.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRelease() *schema.Resource { @@ -28,11 +28,11 @@ func dataSourceGithubRelease() *schema.Resource { "retrieve_by": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{ + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{ "latest", "id", "tag", - }, false), + }, false), "retrieve_by"), }, "release_tag": { Type: schema.TypeString, @@ -187,21 +187,66 @@ func dataSourceGithubReleaseRead(d *schema.ResourceData, meta interface{}) error } d.SetId(strconv.FormatInt(release.GetID(), 10)) - d.Set("release_tag", release.GetTagName()) - d.Set("target_commitish", release.GetTargetCommitish()) - d.Set("name", release.GetName()) - d.Set("body", release.GetBody()) - d.Set("draft", release.GetDraft()) - d.Set("prerelease", release.GetPrerelease()) - d.Set("created_at", release.GetCreatedAt().String()) - d.Set("published_at", release.GetPublishedAt().String()) - d.Set("url", release.GetURL()) - d.Set("html_url", release.GetHTMLURL()) - d.Set("assets_url", release.GetAssetsURL()) - d.Set("asserts_url", release.GetAssetsURL()) // Deprecated, original version of assets_url - d.Set("upload_url", release.GetUploadURL()) - d.Set("zipball_url", release.GetZipballURL()) - d.Set("tarball_url", release.GetTarballURL()) + err = d.Set("release_tag", release.GetTagName()) + if err != nil { + return err + } + err = d.Set("target_commitish", release.GetTargetCommitish()) + if err != nil { + return err + } + err = d.Set("name", release.GetName()) + if err != nil { + return err + } + err = d.Set("body", release.GetBody()) + if err != nil { + return err + } + err = d.Set("draft", release.GetDraft()) + if err != nil { + return err + } + err = d.Set("prerelease", release.GetPrerelease()) + if err != nil { + return err + } + err = d.Set("created_at", release.GetCreatedAt().String()) + if err != nil { + return err + } + err = d.Set("published_at", release.GetPublishedAt().String()) + if err != nil { + return err + } + err = d.Set("url", release.GetURL()) + if err != nil { + return err + } + err = d.Set("html_url", release.GetHTMLURL()) + if err != nil { + return err + } + err = d.Set("assets_url", release.GetAssetsURL()) + if err != nil { + return err + } + err = d.Set("asserts_url", release.GetAssetsURL()) // Deprecated, original version of assets_url + if err != nil { + return err + } + err = d.Set("upload_url", release.GetUploadURL()) + if err != nil { + return err + } + err = d.Set("zipball_url", release.GetZipballURL()) + if err != nil { + return err + } + err = d.Set("tarball_url", release.GetTarballURL()) + if err != nil { + return err + } assets := make([]interface{}, 0, len(release.Assets)) for _, releaseAsset := range release.Assets { @@ -223,7 +268,10 @@ func dataSourceGithubReleaseRead(d *schema.ResourceData, meta interface{}) error }) } - d.Set("assets", assets) + err = d.Set("assets", assets) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_release_test.go b/github/data_source_github_release_test.go index eae10c0f0a..3247ae6323 100644 --- a/github/data_source_github_release_test.go +++ b/github/data_source_github_release_test.go @@ -6,7 +6,7 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubReleaseDataSource(t *testing.T) { diff --git a/github/data_source_github_repositories.go b/github/data_source_github_repositories.go index 1ca42e123a..eff4ace49d 100644 --- a/github/data_source_github_repositories.go +++ b/github/data_source_github_repositories.go @@ -4,8 +4,8 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func dataSourceGithubRepositories() *schema.Resource { @@ -18,10 +18,10 @@ func dataSourceGithubRepositories() *schema.Resource { Required: true, }, "sort": { - Type: schema.TypeString, - Default: "updated", - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"stars", "fork", "updated"}, false), + Type: schema.TypeString, + Default: "updated", + Optional: true, + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"stars", "fork", "updated"}, false), "sort"), }, "include_repo_id": { Type: schema.TypeBool, @@ -29,10 +29,10 @@ func dataSourceGithubRepositories() *schema.Resource { Optional: true, }, "results_per_page": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - ValidateFunc: validation.IntBetween(0, 100), + Type: schema.TypeInt, + Optional: true, + Default: 100, + ValidateDiagFunc: toDiagFunc(validation.IntBetween(0, 1000), "results_per_page"), }, "full_names": { Type: schema.TypeList, @@ -79,10 +79,19 @@ func dataSourceGithubRepositoriesRead(d *schema.ResourceData, meta interface{}) } d.SetId(query) - d.Set("full_names", fullNames) - d.Set("names", names) + err = d.Set("full_names", fullNames) + if err != nil { + return err + } + err = d.Set("names", names) + if err != nil { + return err + } if includeRepoId { - d.Set("repo_ids", repoIDs) + err = d.Set("repo_ids", repoIDs) + if err != nil { + return err + } } return nil diff --git a/github/data_source_github_repositories_test.go b/github/data_source_github_repositories_test.go index 91b0800711..139c985284 100644 --- a/github/data_source_github_repositories_test.go +++ b/github/data_source_github_repositories_test.go @@ -5,7 +5,7 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoriesDataSource(t *testing.T) { diff --git a/github/data_source_github_repository.go b/github/data_source_github_repository.go index ee80b4bce7..6b745d7f09 100644 --- a/github/data_source_github_repository.go +++ b/github/data_source_github_repository.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepository() *schema.Resource { @@ -305,7 +305,6 @@ func dataSourceGithubRepository() *schema.Resource { "template": { Type: schema.TypeList, Computed: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "owner": { @@ -406,7 +405,10 @@ func dataSourceGithubRepositoryRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("error setting pages: %w", err) } } else { - d.Set("pages", flattenPages(nil)) + err = d.Set("pages", flattenPages(nil)) + if err != nil { + return err + } } if repo.License != nil { @@ -422,14 +424,20 @@ func dataSourceGithubRepositoryRead(d *schema.ResourceData, meta interface{}) er } if repo.TemplateRepository != nil { - d.Set("template", []interface{}{ + err = d.Set("template", []interface{}{ map[string]interface{}{ "owner": repo.TemplateRepository.Owner.Login, "repository": repo.TemplateRepository.Name, }, }) + if err != nil { + return err + } } else { - d.Set("template", []interface{}{}) + err = d.Set("template", []interface{}{}) + if err != nil { + return err + } } err = d.Set("topics", flattenStringList(repo.Topics)) diff --git a/github/data_source_github_repository_autolink_references.go b/github/data_source_github_repository_autolink_references.go index 76dfd19255..6bcfcfee2a 100644 --- a/github/data_source_github_repository_autolink_references.go +++ b/github/data_source_github_repository_autolink_references.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryAutolinkReferences() *schema.Resource { @@ -64,7 +64,10 @@ func dataSourceGithubRepositoryAutolinkReferencesRead(d *schema.ResourceData, me } d.SetId(repoName) - d.Set("autolink_references", results) + err := d.Set("autolink_references", results) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_autolink_references_test.go b/github/data_source_github_repository_autolink_references_test.go index 980efee97d..e8f524ebc3 100644 --- a/github/data_source_github_repository_autolink_references_test.go +++ b/github/data_source_github_repository_autolink_references_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryAutolinkReferencesDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_branches.go b/github/data_source_github_repository_branches.go index 6ca70eee6f..6e3ef0590f 100644 --- a/github/data_source_github_repository_branches.go +++ b/github/data_source_github_repository_branches.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryBranches() *schema.Resource { @@ -100,8 +100,14 @@ func dataSourceGithubRepositoryBranchesRead(d *schema.ResourceData, meta interfa } d.SetId(fmt.Sprintf("%s/%s", orgName, repoName)) - d.Set("repository", repoName) - d.Set("branches", results) + err := d.Set("repository", repoName) + if err != nil { + return err + } + err = d.Set("branches", results) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_branches_test.go b/github/data_source_github_repository_branches_test.go index 84b7cedc21..b1d7210b05 100644 --- a/github/data_source_github_repository_branches_test.go +++ b/github/data_source_github_repository_branches_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryBranchesDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_deploy_keys.go b/github/data_source_github_repository_deploy_keys.go index 330069913f..2bcb78bb90 100644 --- a/github/data_source_github_repository_deploy_keys.go +++ b/github/data_source_github_repository_deploy_keys.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryDeployKeys() *schema.Resource { @@ -73,7 +73,10 @@ func dataSourceGithubRepositoryDeployKeysRead(d *schema.ResourceData, meta inter } d.SetId(fmt.Sprintf("%s/%s", owner, repository)) - d.Set("keys", results) + err := d.Set("keys", results) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_deploy_keys_test.go b/github/data_source_github_repository_deploy_keys_test.go index 386941c9d7..8babf7e631 100644 --- a/github/data_source_github_repository_deploy_keys_test.go +++ b/github/data_source_github_repository_deploy_keys_test.go @@ -5,8 +5,8 @@ import ( "path/filepath" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryDeployKeysDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_deployment_branch_policies.go b/github/data_source_github_repository_deployment_branch_policies.go index d3615ad466..af4d3b71df 100644 --- a/github/data_source_github_repository_deployment_branch_policies.go +++ b/github/data_source_github_repository_deployment_branch_policies.go @@ -4,7 +4,7 @@ import ( "context" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryDeploymentBranchPolicies() *schema.Resource { @@ -65,7 +65,10 @@ func dataSourceGithubRepositoryDeploymentBranchPoliciesRead(d *schema.ResourceDa } d.SetId(repoName + ":" + environmentName) - d.Set("deployment_branch_policies", results) + err = d.Set("deployment_branch_policies", results) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_deployment_branch_policies_test.go b/github/data_source_github_repository_deployment_branch_policies_test.go index f32d54ba98..bf8ae0936b 100644 --- a/github/data_source_github_repository_deployment_branch_policies_test.go +++ b/github/data_source_github_repository_deployment_branch_policies_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryDeploymentBranchPolicies(t *testing.T) { diff --git a/github/data_source_github_repository_environments.go b/github/data_source_github_repository_environments.go index 9f3ec4ccd7..96ae3c7d3d 100644 --- a/github/data_source_github_repository_environments.go +++ b/github/data_source_github_repository_environments.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryEnvironments() *schema.Resource { @@ -60,7 +60,10 @@ func dataSourceGithubRepositoryEnvironmentsRead(d *schema.ResourceData, meta int } d.SetId(repoName) - d.Set("environments", results) + err := d.Set("environments", results) + if err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_environments_test.go b/github/data_source_github_repository_environments_test.go index 059cc0237a..55cbb5f16b 100644 --- a/github/data_source_github_repository_environments_test.go +++ b/github/data_source_github_repository_environments_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryEnvironmentsDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_file.go b/github/data_source_github_repository_file.go index 660fa19088..1f327aa4ee 100644 --- a/github/data_source_github_repository_file.go +++ b/github/data_source_github_repository_file.go @@ -9,12 +9,13 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryFile() *schema.Resource { return &schema.Resource{ - Read: dataSourceGithubRepositoryFileRead, + ReadContext: dataSourceGithubRepositoryFileRead, Schema: map[string]*schema.Schema{ "repository": { Type: schema.TypeString, @@ -70,12 +71,12 @@ func dataSourceGithubRepositoryFile() *schema.Resource { } } -func dataSourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceGithubRepositoryFileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*Owner).v3client - ctx := context.WithValue(context.Background(), ctxId, d.Id()) owner := meta.(*Owner).name repo := d.Get("repository").(string) + diags := make(diag.Diagnostics, 0) // checking if repo has a slash in it, which means that full_name was passed // split and replace owner and repo @@ -104,7 +105,7 @@ func dataSourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{} return nil } } - return err + return diag.FromErr(err) } d.Set("repository", repo) @@ -119,7 +120,7 @@ func dataSourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{} content, err := fc.GetContent() if err != nil { - return err + return diag.FromErr(err) } d.Set("content", content) @@ -127,26 +128,40 @@ func dataSourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{} parsedUrl, err := url.Parse(fc.GetURL()) if err != nil { - return err + return diag.FromErr(err) } parsedQuery, err := url.ParseQuery(parsedUrl.RawQuery) if err != nil { - return err + return diag.FromErr(err) } ref := parsedQuery["ref"][0] - d.Set("ref", ref) + if err = d.Set("ref", ref); err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Unable to set ref", + Detail: fmt.Sprintf("Unable to set ref: %s", err), + }) + } log.Printf("[DEBUG] Data Source fetching commit info for repository file: %s/%s/%s", owner, repo, file) commit, err := getFileCommit(client, owner, repo, file, ref) log.Printf("[DEBUG] Found file: %s/%s/%s, in commit SHA: %s ", owner, repo, file, commit.GetSHA()) if err != nil { - return err + return diag.FromErr(err) } - d.Set("commit_sha", commit.GetSHA()) - d.Set("commit_author", commit.Commit.GetCommitter().GetName()) - d.Set("commit_email", commit.Commit.GetCommitter().GetEmail()) - d.Set("commit_message", commit.GetCommit().GetMessage()) + if err = d.Set("commit_sha", commit.GetSHA()); err != nil { + return diag.FromErr(err) + } + if err = d.Set("commit_author", commit.Commit.GetCommitter().GetName()); err != nil { + return diag.FromErr(err) + } + if err = d.Set("commit_email", commit.Commit.GetCommitter().GetEmail()); err != nil { + return diag.FromErr(err) + } + if err = d.Set("commit_message", commit.GetCommit().GetMessage()); err != nil { + return diag.FromErr(err) + } - return nil + return diags } diff --git a/github/data_source_github_repository_file_test.go b/github/data_source_github_repository_file_test.go index 82a4ca7e17..c6d2f27e91 100644 --- a/github/data_source_github_repository_file_test.go +++ b/github/data_source_github_repository_file_test.go @@ -1,6 +1,7 @@ package github import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -11,9 +12,10 @@ import ( "github.com/google/go-github/v57/github" "github.com/stretchr/testify/assert" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func TestAccGithubRepositoryFileDataSource(t *testing.T) { @@ -282,28 +284,36 @@ func TestDataSourceGithubRepositoryFileRead(t *testing.T) { } testSchema := map[string]*schema.Schema{ - "repository": {Type: schema.TypeString}, - "file": {Type: schema.TypeString}, - "branch": {Type: schema.TypeString}, - "commit_sha": {Type: schema.TypeString}, - "content": {Type: schema.TypeString}, - "id": {Type: schema.TypeString}, + "repository": {Type: schema.TypeString}, + "file": {Type: schema.TypeString}, + "branch": {Type: schema.TypeString}, + "commit_sha": {Type: schema.TypeString}, + "commit_email": {Type: schema.TypeString}, + "commit_author": {Type: schema.TypeString}, + "commit_message": {Type: schema.TypeString}, + "content": {Type: schema.TypeString}, + "id": {Type: schema.TypeString}, } schema := schema.TestResourceDataRaw(t, testSchema, map[string]interface{}{ - "repository": repositoryFullName, - "file": fileName, - "branch": branch, - "commit_sha": sha, - "content": "", - "id": "", + "repository": repositoryFullName, + "file": fileName, + "branch": branch, + "commit_sha": sha, + "commit_email": committerEmail, + "commit_author": committerName, + "commit_message": commitMessage, + "content": "", + "id": "", }) // actual call - err := dataSourceGithubRepositoryFileRead(schema, meta) + diags := dataSourceGithubRepositoryFileRead(context.Background(), schema, meta) // assertions - assert.Nil(t, err) + for _, diagnostic := range diags { + assert.Equal(t, diag.Warning, diagnostic.Severity) + } assert.Equal(t, expectedRepo, schema.Get("repository")) assert.Equal(t, fileContent, schema.Get("content")) assert.Equal(t, expectedID, schema.Get("id")) @@ -346,28 +356,36 @@ func TestDataSourceGithubRepositoryFileRead(t *testing.T) { } testSchema := map[string]*schema.Schema{ - "repository": {Type: schema.TypeString}, - "file": {Type: schema.TypeString}, - "branch": {Type: schema.TypeString}, - "commit_sha": {Type: schema.TypeString}, - "content": {Type: schema.TypeString}, - "id": {Type: schema.TypeString}, + "repository": {Type: schema.TypeString}, + "file": {Type: schema.TypeString}, + "branch": {Type: schema.TypeString}, + "commit_sha": {Type: schema.TypeString}, + "commit_email": {Type: schema.TypeString}, + "commit_author": {Type: schema.TypeString}, + "commit_message": {Type: schema.TypeString}, + "content": {Type: schema.TypeString}, + "id": {Type: schema.TypeString}, } schema := schema.TestResourceDataRaw(t, testSchema, map[string]interface{}{ - "repository": repositoryFullName, - "file": fileName, - "branch": branch, - "commit_sha": sha, - "content": "", - "id": "", + "repository": repositoryFullName, + "file": fileName, + "branch": branch, + "commit_sha": sha, + "commit_email": committerEmail, + "commit_author": committerName, + "commit_message": commitMessage, + "content": "", + "id": "", }) // actual call - err := dataSourceGithubRepositoryFileRead(schema, meta) + diags := dataSourceGithubRepositoryFileRead(context.Background(), schema, meta) // assertions - assert.Nil(t, err) + for _, diagnostic := range diags { + assert.Equal(t, diag.Warning, diagnostic.Severity) + } assert.Equal(t, expectedRepo, schema.Get("repository")) assert.Equal(t, fileContent, schema.Get("content")) assert.Equal(t, expectedID, schema.Get("id")) @@ -475,10 +493,12 @@ func TestDataSourceGithubRepositoryFileRead(t *testing.T) { }) // actual call - err := dataSourceGithubRepositoryFileRead(schema, meta) + diags := dataSourceGithubRepositoryFileRead(context.Background(), schema, meta) // assertions - assert.Nil(t, err) + for _, diagnostic := range diags { + assert.Equal(t, diagnostic.Severity, diag.Warning) + } assert.Equal(t, expectedRepo, schema.Get("repository")) assert.Equal(t, expectedID, schema.Get("id")) assert.Equal(t, "", schema.Get("content")) diff --git a/github/data_source_github_repository_milestone.go b/github/data_source_github_repository_milestone.go index 54c4628845..1fb562ac78 100644 --- a/github/data_source_github_repository_milestone.go +++ b/github/data_source_github_repository_milestone.go @@ -2,8 +2,9 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryMilestone() *schema.Resource { @@ -57,10 +58,18 @@ func dataSourceGithubRepositoryMilestoneRead(d *schema.ResourceData, meta interf } d.SetId(strconv.FormatInt(milestone.GetID(), 10)) - d.Set("description", milestone.GetDescription()) - d.Set("due_date", milestone.GetDueOn().Format(layoutISO)) - d.Set("state", milestone.GetState()) - d.Set("title", milestone.GetTitle()) + if err = d.Set("description", milestone.GetDescription()); err != nil { + return err + } + if err = d.Set("due_date", milestone.GetDueOn().Format(layoutISO)); err != nil { + return err + } + if err = d.Set("state", milestone.GetState()); err != nil { + return err + } + if err = d.Set("title", milestone.GetTitle()); err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_milestone_test.go b/github/data_source_github_repository_milestone_test.go index 577bd9b426..702772145e 100644 --- a/github/data_source_github_repository_milestone_test.go +++ b/github/data_source_github_repository_milestone_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryMilestoneDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_pull_request.go b/github/data_source_github_repository_pull_request.go index 34db730f43..d5f0076cd3 100644 --- a/github/data_source_github_repository_pull_request.go +++ b/github/data_source_github_repository_pull_request.go @@ -4,7 +4,7 @@ import ( "context" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryPullRequest() *schema.Resource { @@ -108,41 +108,73 @@ func dataSourceGithubRepositoryPullRequestRead(d *schema.ResourceData, meta inte } if head := pullRequest.GetHead(); head != nil { - d.Set("head_ref", head.GetRef()) - d.Set("head_sha", head.GetSHA()) + if err = d.Set("head_ref", head.GetRef()); err != nil { + return err + } + if err = d.Set("head_sha", head.GetSHA()); err != nil { + return err + } if headRepo := head.Repo; headRepo != nil { - d.Set("head_repository", headRepo.GetName()) + if err = d.Set("head_repository", headRepo.GetName()); err != nil { + return err + } } if headUser := head.User; headUser != nil { - d.Set("head_owner", headUser.GetLogin()) + if err = d.Set("head_owner", headUser.GetLogin()); err != nil { + return err + } } } if base := pullRequest.GetBase(); base != nil { - d.Set("base_ref", base.GetRef()) - d.Set("base_sha", base.GetSHA()) + if err = d.Set("base_ref", base.GetRef()); err != nil { + return err + } + if err = d.Set("base_sha", base.GetSHA()); err != nil { + return err + } } - d.Set("body", pullRequest.GetBody()) - d.Set("draft", pullRequest.GetDraft()) - d.Set("maintainer_can_modify", pullRequest.GetMaintainerCanModify()) - d.Set("number", pullRequest.GetNumber()) - d.Set("opened_at", pullRequest.GetCreatedAt().Unix()) - d.Set("state", pullRequest.GetState()) - d.Set("title", pullRequest.GetTitle()) - d.Set("updated_at", pullRequest.GetUpdatedAt().Unix()) + if err = d.Set("body", pullRequest.GetBody()); err != nil { + return err + } + if err = d.Set("draft", pullRequest.GetDraft()); err != nil { + return err + } + if err = d.Set("maintainer_can_modify", pullRequest.GetMaintainerCanModify()); err != nil { + return err + } + if err = d.Set("number", pullRequest.GetNumber()); err != nil { + return err + } + if err = d.Set("opened_at", pullRequest.GetCreatedAt().Unix()); err != nil { + return err + } + if err = d.Set("state", pullRequest.GetState()); err != nil { + return err + } + if err = d.Set("title", pullRequest.GetTitle()); err != nil { + return err + } + if err = d.Set("updated_at", pullRequest.GetUpdatedAt().Unix()); err != nil { + return err + } if user := pullRequest.GetUser(); user != nil { - d.Set("opened_by", user.GetLogin()) + if err = d.Set("opened_by", user.GetLogin()); err != nil { + return err + } } labels := []string{} for _, label := range pullRequest.Labels { labels = append(labels, label.GetName()) } - d.Set("labels", labels) + if err = d.Set("labels", labels); err != nil { + return err + } d.SetId(buildThreePartID(owner, repository, strconv.Itoa(number))) diff --git a/github/data_source_github_repository_pull_request_test.go b/github/data_source_github_repository_pull_request_test.go index ed74c56494..14057e0197 100644 --- a/github/data_source_github_repository_pull_request_test.go +++ b/github/data_source_github_repository_pull_request_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryPullRequestDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_pull_requests.go b/github/data_source_github_repository_pull_requests.go index 98bee85107..0a969ad2c7 100644 --- a/github/data_source_github_repository_pull_requests.go +++ b/github/data_source_github_repository_pull_requests.go @@ -5,8 +5,8 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) // Docs: https://docs.github.com/en/rest/reference/pulls#list-pull-requests @@ -31,22 +31,22 @@ func dataSourceGithubRepositoryPullRequests() *schema.Resource { Optional: true, }, "sort_by": { - Type: schema.TypeString, - Optional: true, - Default: "created", - ValidateFunc: validation.StringInSlice([]string{"created", "updated", "popularity", "long-running"}, false), + Type: schema.TypeString, + Optional: true, + Default: "created", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"created", "updated", "popularity", "long-running"}, false), "sort_by"), }, "sort_direction": { - Type: schema.TypeString, - Optional: true, - Default: "asc", - ValidateFunc: validation.StringInSlice([]string{"asc", "desc"}, false), + Type: schema.TypeString, + Optional: true, + Default: "asc", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"asc", "desc"}, false), "sort_direction"), }, "state": { - Type: schema.TypeString, - Default: "open", - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"open", "closed", "all"}, false), + Type: schema.TypeString, + Default: "open", + Optional: true, + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"open", "closed", "all"}, false), "state"), }, "results": { Type: schema.TypeList, @@ -221,7 +221,9 @@ func dataSourceGithubRepositoryPullRequestsRead(d *schema.ResourceData, meta int direction, }, "/")) - d.Set("results", results) + if err := d.Set("results", results); err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_pull_requests_test.go b/github/data_source_github_repository_pull_requests_test.go index 9752293377..5403820b49 100644 --- a/github/data_source_github_repository_pull_requests_test.go +++ b/github/data_source_github_repository_pull_requests_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryPullRequestsDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_teams.go b/github/data_source_github_repository_teams.go index 765d1de8f2..3a15e69b78 100644 --- a/github/data_source_github_repository_teams.go +++ b/github/data_source_github_repository_teams.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryTeams() *schema.Resource { @@ -96,7 +96,9 @@ func dataSourceGithubTeamsRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(repoName) - d.Set("teams", all_teams) + if err := d.Set("teams", all_teams); err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_teams_test.go b/github/data_source_github_repository_teams_test.go index 27276efeef..ca9fa16a4f 100644 --- a/github/data_source_github_repository_teams_test.go +++ b/github/data_source_github_repository_teams_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryTeamsDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_test.go b/github/data_source_github_repository_test.go index d667174e0d..9a79ae5366 100644 --- a/github/data_source_github_repository_test.go +++ b/github/data_source_github_repository_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryDataSource(t *testing.T) { diff --git a/github/data_source_github_repository_webhooks.go b/github/data_source_github_repository_webhooks.go index bb2a8fa7b9..50d7148f64 100644 --- a/github/data_source_github_repository_webhooks.go +++ b/github/data_source_github_repository_webhooks.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRepositoryWebhooks() *schema.Resource { @@ -77,8 +77,12 @@ func dataSourceGithubRepositoryWebhooksRead(d *schema.ResourceData, meta interfa } d.SetId(fmt.Sprintf("%s/%s", owner, repository)) - d.Set("repository", repository) - d.Set("webhooks", results) + if err := d.Set("repository", repository); err != nil { + return err + } + if err := d.Set("webhooks", results); err != nil { + return err + } return nil } diff --git a/github/data_source_github_repository_webhooks_test.go b/github/data_source_github_repository_webhooks_test.go index e5015e722d..55c9f563e7 100644 --- a/github/data_source_github_repository_webhooks_test.go +++ b/github/data_source_github_repository_webhooks_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryWebhooksDataSource(t *testing.T) { diff --git a/github/data_source_github_rest_api.go b/github/data_source_github_rest_api.go index 1bbd22f023..014ac90528 100644 --- a/github/data_source_github_rest_api.go +++ b/github/data_source_github_rest_api.go @@ -2,8 +2,10 @@ package github import ( "context" + "encoding/json" + "io" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubRestApi() *schema.Resource { @@ -25,11 +27,11 @@ func dataSourceGithubRestApi() *schema.Resource { Computed: true, }, "headers": { - Type: schema.TypeMap, + Type: schema.TypeString, Computed: true, }, "body": { - Type: schema.TypeMap, + Type: schema.TypeString, Computed: true, }, }, @@ -42,20 +44,38 @@ func dataSourceGithubRestApiRead(d *schema.ResourceData, meta interface{}) error client := meta.(*Owner).v3client ctx := context.Background() - var body map[string]interface{} - req, err := client.NewRequest("GET", u, nil) if err != nil { return err } - resp, _ := client.Do(ctx, req, &body) + resp, err := client.Do(ctx, req, nil) + if err != nil && resp.StatusCode != 404 { + return err + } - d.SetId(resp.Header.Get("x-github-request-id")) - d.Set("code", resp.StatusCode) - d.Set("status", resp.Status) - d.Set("headers", resp.Header) - d.Set("body", body) + h, err := json.Marshal(resp.Header) + if err != nil { + return err + } + + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + d.SetId(resp.Header.Get("x-github-request-id")) + if err = d.Set("code", resp.StatusCode); err != nil { + return err + } + if err = d.Set("status", resp.Status); err != nil { + return err + } + if err = d.Set("headers", string(h)); err != nil { + return err + } + if err = d.Set("body", string(b)); err != nil { + return err + } return nil } diff --git a/github/data_source_github_rest_api_test.go b/github/data_source_github_rest_api_test.go index b69bc6b398..8805921484 100644 --- a/github/data_source_github_rest_api_test.go +++ b/github/data_source_github_rest_api_test.go @@ -3,10 +3,11 @@ package github import ( "fmt" "regexp" + "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRestApiDataSource(t *testing.T) { @@ -30,6 +31,55 @@ func TestAccGithubRestApiDataSource(t *testing.T) { resource.TestMatchResourceAttr( "data.github_rest_api.test", "code", regexp.MustCompile("200"), ), + resource.TestMatchResourceAttr( + "data.github_rest_api.test", "status", regexp.MustCompile("200 OK"), + ), + resource.TestMatchResourceAttr("data.github_rest_api.test", "body", regexp.MustCompile(".*refs/heads/.*")), + resource.TestCheckResourceAttrSet("data.github_rest_api.test", "headers"), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an anonymous account", func(t *testing.T) { + t.Skip("anonymous account not supported for this operation") + }) + + t.Run("with an individual account", func(t *testing.T) { + testCase(t, individual) + }) + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + + }) + + t.Run("queries a collection without error", func(t *testing.T) { + + config := fmt.Sprintf(` + resource "github_repository" "test" { + name = "tf-acc-test-%[1]s" + auto_init = true + } + + data "github_rest_api" "test" { + endpoint = "repos/${github_repository.test.full_name}/git/refs/heads/" + } + `, randomID) + + check := resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.github_rest_api.test", "body", regexp.MustCompile(`\[.*refs/heads/.*\]`)), ) testCase := func(t *testing.T, mode string) { @@ -76,6 +126,11 @@ func TestAccGithubRestApiDataSource(t *testing.T) { resource.TestMatchResourceAttr( "data.github_rest_api.test", "code", regexp.MustCompile("404"), ), + resource.TestMatchResourceAttr( + "data.github_rest_api.test", "status", regexp.MustCompile("404 Not Found"), + ), + resource.TestCheckResourceAttrSet("data.github_rest_api.test", "body"), + resource.TestCheckResourceAttrSet("data.github_rest_api.test", "headers"), ) testCase := func(t *testing.T, mode string) { @@ -104,4 +159,41 @@ func TestAccGithubRestApiDataSource(t *testing.T) { }) }) + + t.Run("fails for invalid endpoint", func(t *testing.T) { + + // 4096 characters is the maximum length for a URL + var endpoint = strings.Repeat("x", 4096) + config := fmt.Sprintf(` + data "github_rest_api" "test" { + endpoint = "/%v" + } + `, endpoint) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + ExpectError: regexp.MustCompile("Error: GET https://api.github.com/xx.*: 414"), + }, + }, + }) + } + + t.Run("with an anonymous account", func(t *testing.T) { + t.Skip("anonymous account not supported for this operation") + }) + + t.Run("with an individual account", func(t *testing.T) { + testCase(t, individual) + }) + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + + }) } diff --git a/github/data_source_github_ssh_keys.go b/github/data_source_github_ssh_keys.go index 5a548e0be6..494fc84cad 100644 --- a/github/data_source_github_ssh_keys.go +++ b/github/data_source_github_ssh_keys.go @@ -1,6 +1,6 @@ package github -import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" func dataSourceGithubSshKeys() *schema.Resource { return &schema.Resource{ @@ -25,7 +25,9 @@ func dataSourceGithubSshKeysRead(d *schema.ResourceData, meta interface{}) error } d.SetId("github-ssh-keys") - d.Set("keys", api.SSHKeys) + if err = d.Set("keys", api.SSHKeys); err != nil { + return err + } return nil } diff --git a/github/data_source_github_ssh_keys_test.go b/github/data_source_github_ssh_keys_test.go index 005d3c1f5b..369b74f79d 100644 --- a/github/data_source_github_ssh_keys_test.go +++ b/github/data_source_github_ssh_keys_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubSshKeysDataSource(t *testing.T) { diff --git a/github/data_source_github_team.go b/github/data_source_github_team.go index 2bcffb9aaa..d33ba62b7e 100644 --- a/github/data_source_github_team.go +++ b/github/data_source_github_team.go @@ -8,8 +8,8 @@ import ( "github.com/shurcooL/githubv4" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func dataSourceGithubTeam() *schema.Resource { @@ -68,10 +68,10 @@ func dataSourceGithubTeam() *schema.Resource { Computed: true, }, "membership_type": { - Type: schema.TypeString, - Default: "all", - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"all", "immediate"}, false), + Type: schema.TypeString, + Default: "all", + Optional: true, + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "immediate"}, false), "membership_type"), }, "summary_only": { Type: schema.TypeBool, @@ -79,10 +79,10 @@ func dataSourceGithubTeam() *schema.Resource { Default: false, }, "results_per_page": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - ValidateFunc: validation.IntBetween(0, 100), + Type: schema.TypeInt, + Optional: true, + Default: 100, + ValidateDiagFunc: toDiagFunc(validation.IntBetween(0, 100), "results_per_page"), }, }, } @@ -192,14 +192,30 @@ func dataSourceGithubTeamRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(strconv.FormatInt(team.GetID(), 10)) - d.Set("name", team.GetName()) - d.Set("members", members) - d.Set("repositories", repositories) - d.Set("repositories_detailed", repositories_detailed) - d.Set("description", team.GetDescription()) - d.Set("privacy", team.GetPrivacy()) - d.Set("permission", team.GetPermission()) - d.Set("node_id", team.GetNodeID()) + if err = d.Set("name", team.GetName()); err != nil { + return err + } + if err = d.Set("members", members); err != nil { + return err + } + if err = d.Set("repositories", repositories); err != nil { + return err + } + if err = d.Set("repositories_detailed", repositories_detailed); err != nil { + return err + } + if err = d.Set("description", team.GetDescription()); err != nil { + return err + } + if err = d.Set("privacy", team.GetPrivacy()); err != nil { + return err + } + if err = d.Set("permission", team.GetPermission()); err != nil { + return err + } + if err = d.Set("node_id", team.GetNodeID()); err != nil { + return err + } return nil } diff --git a/github/data_source_github_team_repository_test.go b/github/data_source_github_team_repository_test.go index e0a47ac950..afdc8e0fed 100644 --- a/github/data_source_github_team_repository_test.go +++ b/github/data_source_github_team_repository_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubTeamRepositories(t *testing.T) { diff --git a/github/data_source_github_team_test.go b/github/data_source_github_team_test.go index 37da8a77b3..48345711fd 100644 --- a/github/data_source_github_team_test.go +++ b/github/data_source_github_team_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubTeamDataSource(t *testing.T) { diff --git a/github/data_source_github_tree.go b/github/data_source_github_tree.go index bf1fee9f65..8f50bc32fc 100644 --- a/github/data_source_github_tree.go +++ b/github/data_source_github_tree.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubTree() *schema.Resource { @@ -83,7 +83,9 @@ func dataSourceGithubTreeRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(tree.GetSHA()) - d.Set("entries", entries) + if err = d.Set("entries", entries); err != nil { + return err + } return nil } diff --git a/github/data_source_github_tree_test.go b/github/data_source_github_tree_test.go index 7b61b8f363..e5b5f78358 100644 --- a/github/data_source_github_tree_test.go +++ b/github/data_source_github_tree_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubTreeDataSource(t *testing.T) { diff --git a/github/data_source_github_user.go b/github/data_source_github_user.go index e34d8600db..541b8f69fa 100644 --- a/github/data_source_github_user.go +++ b/github/data_source_github_user.go @@ -4,7 +4,7 @@ import ( "context" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceGithubUser() *schema.Resource { @@ -133,26 +133,66 @@ func dataSourceGithubUserRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(strconv.FormatInt(user.GetID(), 10)) - d.Set("login", user.GetLogin()) - d.Set("avatar_url", user.GetAvatarURL()) - d.Set("gravatar_id", user.GetGravatarID()) - d.Set("site_admin", user.GetSiteAdmin()) - d.Set("company", user.GetCompany()) - d.Set("blog", user.GetBlog()) - d.Set("location", user.GetLocation()) - d.Set("name", user.GetName()) - d.Set("email", user.GetEmail()) - d.Set("bio", user.GetBio()) - d.Set("gpg_keys", gpgKeys) - d.Set("ssh_keys", sshKeys) - d.Set("public_repos", user.GetPublicRepos()) - d.Set("public_gists", user.GetPublicGists()) - d.Set("followers", user.GetFollowers()) - d.Set("following", user.GetFollowing()) - d.Set("created_at", user.GetCreatedAt()) - d.Set("updated_at", user.GetUpdatedAt()) - d.Set("suspended_at", user.GetSuspendedAt()) - d.Set("node_id", user.GetNodeID()) + if err = d.Set("login", user.GetLogin()); err != nil { + return err + } + if err = d.Set("avatar_url", user.GetAvatarURL()); err != nil { + return err + } + if err = d.Set("gravatar_id", user.GetGravatarID()); err != nil { + return err + } + if err = d.Set("site_admin", user.GetSiteAdmin()); err != nil { + return err + } + if err = d.Set("company", user.GetCompany()); err != nil { + return err + } + if err = d.Set("blog", user.GetBlog()); err != nil { + return err + } + if err = d.Set("location", user.GetLocation()); err != nil { + return err + } + if err = d.Set("name", user.GetName()); err != nil { + return err + } + if err = d.Set("email", user.GetEmail()); err != nil { + return err + } + if err = d.Set("bio", user.GetBio()); err != nil { + return err + } + if err = d.Set("gpg_keys", gpgKeys); err != nil { + return err + } + if err = d.Set("ssh_keys", sshKeys); err != nil { + return err + } + if err = d.Set("public_repos", user.GetPublicRepos()); err != nil { + return err + } + if err = d.Set("public_gists", user.GetPublicGists()); err != nil { + return err + } + if err = d.Set("followers", user.GetFollowers()); err != nil { + return err + } + if err = d.Set("following", user.GetFollowing()); err != nil { + return err + } + if err = d.Set("created_at", user.GetCreatedAt().String()); err != nil { + return err + } + if err = d.Set("updated_at", user.GetUpdatedAt().String()); err != nil { + return err + } + if err = d.Set("suspended_at", user.GetSuspendedAt().String()); err != nil { + return err + } + if err = d.Set("node_id", user.GetNodeID()); err != nil { + return err + } return nil } diff --git a/github/data_source_github_user_external_identity.go b/github/data_source_github_user_external_identity.go index 524c8e1603..67feb4e4d5 100644 --- a/github/data_source_github_user_external_identity.go +++ b/github/data_source_github_user_external_identity.go @@ -3,7 +3,7 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) diff --git a/github/data_source_github_user_external_identity_test.go b/github/data_source_github_user_external_identity_test.go index 1e506c274e..81c53fd9b4 100644 --- a/github/data_source_github_user_external_identity_test.go +++ b/github/data_source_github_user_external_identity_test.go @@ -3,7 +3,7 @@ package github import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubUserExternalIdentity(t *testing.T) { @@ -14,7 +14,7 @@ func TestAccGithubUserExternalIdentity(t *testing.T) { t.Run("queries without error", func(t *testing.T) { config := ` data "github_user_external_identity" "test" { - + }` diff --git a/github/data_source_github_user_test.go b/github/data_source_github_user_test.go index de1e350de3..68f767b61a 100644 --- a/github/data_source_github_user_test.go +++ b/github/data_source_github_user_test.go @@ -5,7 +5,7 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubUserDataSource(t *testing.T) { @@ -19,7 +19,7 @@ func TestAccGithubUserDataSource(t *testing.T) { `, testOwnerFunc()) check := resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet("data.github_user.test", "name"), + resource.TestCheckResourceAttrSet("data.github_user.test", "login"), resource.TestCheckResourceAttrSet("data.github_user.test", "id"), ) diff --git a/github/data_source_github_users.go b/github/data_source_github_users.go index 7e1431d2f5..71cc07bd36 100644 --- a/github/data_source_github_users.go +++ b/github/data_source_github_users.go @@ -6,7 +6,7 @@ import ( "reflect" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -99,10 +99,18 @@ func dataSourceGithubUsersRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(buildChecksumID(usernames)) - d.Set("logins", logins) - d.Set("emails", emails) - d.Set("node_ids", nodeIDs) - d.Set("unknown_logins", unknownLogins) + if err := d.Set("logins", logins); err != nil { + return err + } + if err := d.Set("emails", emails); err != nil { + return err + } + if err := d.Set("node_ids", nodeIDs); err != nil { + return err + } + if err := d.Set("unknown_logins", unknownLogins); err != nil { + return err + } return nil } diff --git a/github/data_source_github_users_test.go b/github/data_source_github_users_test.go index 8f80f2cf62..265d5cce86 100644 --- a/github/data_source_github_users_test.go +++ b/github/data_source_github_users_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubUsersDataSource(t *testing.T) { diff --git a/github/migrate_github_branch_protection.go b/github/migrate_github_branch_protection.go index f54957f23a..2eccae4992 100644 --- a/github/migrate_github_branch_protection.go +++ b/github/migrate_github_branch_protection.go @@ -1,6 +1,10 @@ package github -import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) func resourceGithubBranchProtectionV0() *schema.Resource { return &schema.Resource{ @@ -19,7 +23,7 @@ func resourceGithubBranchProtectionV0() *schema.Resource { } } -func resourceGithubBranchProtectionUpgradeV0(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { +func resourceGithubBranchProtectionUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { repoName := rawState["repository"].(string) repoID, err := getRepositoryID(repoName, meta) if err != nil { @@ -38,3 +42,40 @@ func resourceGithubBranchProtectionUpgradeV0(rawState map[string]interface{}, me return rawState, nil } + +func resourceGithubBranchProtectionV1() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "push_restrictions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "blocks_creations": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func resourceGithubBranchProtectionUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + var blocksCreations bool = false + + if v, ok := rawState["blocks_creations"]; ok { + blocksCreations = v.(bool) + } + + if v, ok := rawState["push_restrictions"]; ok { + rawState["restrict_pushes"] = []interface{}{map[string]interface{}{ + "blocks_creations": blocksCreations, + "push_allowances": v, + }} + } + + delete(rawState, "blocks_creations") + delete(rawState, "push_restrictions") + + return rawState, nil +} diff --git a/github/migrate_github_repository.go b/github/migrate_github_repository.go index c6a06fbbf8..0cb376ef7d 100644 --- a/github/migrate_github_repository.go +++ b/github/migrate_github_repository.go @@ -2,7 +2,7 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "log" "strings" ) diff --git a/github/migrate_github_repository_test.go b/github/migrate_github_repository_test.go index c15cc472ae..5c416889db 100644 --- a/github/migrate_github_repository_test.go +++ b/github/migrate_github_repository_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestMigrateGithubRepositoryStateV0toV1(t *testing.T) { diff --git a/github/migrate_github_repository_webhook.go b/github/migrate_github_repository_webhook.go index 4cc8d74f60..a4b1c3e184 100644 --- a/github/migrate_github_repository_webhook.go +++ b/github/migrate_github_repository_webhook.go @@ -5,7 +5,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func resourceGithubWebhookMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/github/migrate_github_repository_webhook_test.go b/github/migrate_github_repository_webhook_test.go index a9b8374a43..159345a010 100644 --- a/github/migrate_github_repository_webhook_test.go +++ b/github/migrate_github_repository_webhook_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestMigrateGithubWebhookStateV0toV1(t *testing.T) { diff --git a/github/provider.go b/github/provider.go index 70441a0349..39741a2c1d 100644 --- a/github/provider.go +++ b/github/provider.go @@ -1,6 +1,7 @@ package github import ( + "context" "fmt" "log" "net/url" @@ -10,11 +11,11 @@ import ( "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func Provider() terraform.ResourceProvider { +func Provider() *schema.Provider { p := &schema.Provider{ Schema: map[string]*schema.Schema{ "token": { @@ -29,6 +30,26 @@ func Provider() terraform.ResourceProvider { DefaultFunc: schema.EnvDefaultFunc("GITHUB_OWNER", nil), Description: descriptions["owner"], }, + "retryable_errors": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeInt}, + Optional: true, + DefaultFunc: func() (interface{}, error) { + defaultErrors := []int{500, 502, 503, 504} + errorInterfaces := make([]interface{}, len(defaultErrors)) + for i, v := range defaultErrors { + errorInterfaces[i] = v + } + return errorInterfaces, nil + }, + Description: descriptions["retryable_errors"], + }, + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Default: 3, + Description: descriptions["max_retries"], + }, "organization": { Type: schema.TypeString, Optional: true, @@ -60,6 +81,12 @@ func Provider() terraform.ResourceProvider { Default: 0, Description: descriptions["read_delay_ms"], }, + "retry_delay_ms": { + Type: schema.TypeInt, + Optional: true, + Default: 1000, + Description: descriptions["retry_delay_ms"], + }, "parallel_requests": { Type: schema.TypeBool, Optional: true, @@ -98,6 +125,7 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ + "github_enterprise_actions_permissions": resourceGithubActionsEnterprisePermissions(), "github_actions_environment_secret": resourceGithubActionsEnvironmentSecret(), "github_actions_environment_variable": resourceGithubActionsEnvironmentVariable(), "github_actions_organization_oidc_subject_claim_customization_template": resourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplate(), @@ -166,6 +194,7 @@ func Provider() terraform.ResourceProvider { "github_user_invitation_accepter": resourceGithubUserInvitationAccepter(), "github_user_ssh_key": resourceGithubUserSshKey(), "github_enterprise_organization": resourceGithubEnterpriseOrganization(), + "github_enterprise_actions_runner_group": resourceGithubActionsEnterpriseRunnerGroup(), }, DataSourcesMap: map[string]*schema.Resource{ @@ -233,7 +262,7 @@ func Provider() terraform.ResourceProvider { }, } - p.ConfigureFunc = providerConfigure(p) + p.ConfigureContextFunc = providerConfigure(p) return p } @@ -264,16 +293,22 @@ func init() { "Defaults to 1000ms or 1s if not set.", "read_delay_ms": "Amount of time in milliseconds to sleep in between non-write requests to GitHub API. " + "Defaults to 0ms if not set.", + "retry_delay_ms": "Amount of time in milliseconds to sleep in between requests to GitHub API after an error response. " + + "Defaults to 1000ms or 1s if not set, the max_retries must be set to greater than zero.", "parallel_requests": "Allow the provider to make parallel API calls to GitHub. " + "You may want to set it to true when you have a private Github Enterprise without strict rate limits. " + "Although, it is not possible to enable this setting on github.com " + "because we enforce the respect of github.com's best practices to avoid hitting abuse rate limits" + "Defaults to false if not set", + "retryable_errors": "Allow the provider to retry after receiving an error status code, the max_retries should be set for this to work" + + "Defaults to [500, 502, 503, 504]", + "max_retries": "Number of times to retry a request after receiving an error status code" + + "Defaults to 3", } } -func providerConfigure(p *schema.Provider) schema.ConfigureFunc { - return func(d *schema.ResourceData) (interface{}, error) { +func providerConfigure(p *schema.Provider) schema.ConfigureContextFunc { + return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { owner := d.Get("owner").(string) baseURL := d.Get("base_url").(string) token := d.Get("token").(string) @@ -309,13 +344,13 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { if v, ok := appAuthAttr["id"].(string); ok && v != "" { appID = v } else { - return nil, fmt.Errorf("app_auth.id must be set and contain a non-empty value") + return nil, wrapErrors([]error{fmt.Errorf("app_auth.id must be set and contain a non-empty value")}) } if v, ok := appAuthAttr["installation_id"].(string); ok && v != "" { appInstallationID = v } else { - return nil, fmt.Errorf("app_auth.installation_id must be set and contain a non-empty value") + return nil, wrapErrors([]error{fmt.Errorf("app_auth.installation_id must be set and contain a non-empty value")}) } if v, ok := appAuthAttr["pem_file"].(string); ok && v != "" { @@ -328,12 +363,12 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { // actual new line character before decoding. appPemFile = strings.Replace(v, `\n`, "\n", -1) } else { - return nil, fmt.Errorf("app_auth.pem_file must be set and contain a non-empty value") + return nil, wrapErrors([]error{fmt.Errorf("app_auth.pem_file must be set and contain a non-empty value")}) } appToken, err := GenerateOAuthTokenFromApp(baseURL, appID, appInstallationID, appPemFile) if err != nil { - return nil, err + return nil, wrapErrors([]error{err}) } token = appToken @@ -341,33 +376,58 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { isGithubDotCom, err := regexp.MatchString("^"+regexp.QuoteMeta("https://api.github.com"), baseURL) if err != nil { - return nil, err + return nil, diag.FromErr(err) } if token == "" { ghAuthToken, err := tokenFromGhCli(baseURL, isGithubDotCom) if err != nil { - return nil, fmt.Errorf("gh auth token: %w", err) + return nil, diag.FromErr(fmt.Errorf("gh auth token: %w", err)) } token = ghAuthToken } writeDelay := d.Get("write_delay_ms").(int) if writeDelay <= 0 { - return nil, fmt.Errorf("write_delay_ms must be greater than 0ms") + return nil, wrapErrors([]error{fmt.Errorf("write_delay_ms must be greater than 0ms")}) } log.Printf("[INFO] Setting write_delay_ms to %d", writeDelay) readDelay := d.Get("read_delay_ms").(int) if readDelay < 0 { - return nil, fmt.Errorf("read_delay_ms must be greater than or equal to 0ms") + return nil, wrapErrors([]error{fmt.Errorf("read_delay_ms must be greater than or equal to 0ms")}) } log.Printf("[DEBUG] Setting read_delay_ms to %d", readDelay) + retryDelay := d.Get("read_delay_ms").(int) + if retryDelay < 0 { + return nil, diag.FromErr(fmt.Errorf("retry_delay_ms must be greater than or equal to 0ms")) + } + log.Printf("[DEBUG] Setting retry_delay_ms to %d", retryDelay) + + maxRetries := d.Get("max_retries").(int) + if maxRetries < 0 { + return nil, diag.FromErr(fmt.Errorf("max_retries must be greater than or equal to 0")) + } + log.Printf("[DEBUG] Setting max_retries to %d", maxRetries) + retryableErrors := make(map[int]bool) + if maxRetries > 0 { + reParam := d.Get("retryable_errors").([]interface{}) + if len(reParam) == 0 { + retryableErrors = getDefaultRetriableErrors() + } else { + for _, status := range reParam { + retryableErrors[status.(int)] = true + } + } + + log.Printf("[DEBUG] Setting retriableErrors to %v", retryableErrors) + } + parallelRequests := d.Get("parallel_requests").(bool) if parallelRequests && isGithubDotCom { - return nil, fmt.Errorf("parallel_requests cannot be true when connecting to public github") + return nil, wrapErrors([]error{fmt.Errorf("parallel_requests cannot be true when connecting to public github")}) } log.Printf("[DEBUG] Setting parallel_requests to %t", parallelRequests) @@ -378,16 +438,17 @@ func providerConfigure(p *schema.Provider) schema.ConfigureFunc { Owner: owner, WriteDelay: time.Duration(writeDelay) * time.Millisecond, ReadDelay: time.Duration(readDelay) * time.Millisecond, + RetryDelay: time.Duration(retryDelay) * time.Millisecond, + RetryableErrors: retryableErrors, + MaxRetries: maxRetries, ParallelRequests: parallelRequests, } meta, err := config.Meta() if err != nil { - return nil, err + return nil, wrapErrors([]error{err}) } - meta.(*Owner).StopContext = p.StopContext() - return meta, nil } } diff --git a/github/provider_test.go b/github/provider_test.go index 31cb5ffc59..7992fcb174 100644 --- a/github/provider_test.go +++ b/github/provider_test.go @@ -4,25 +4,24 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -var testAccProviders map[string]terraform.ResourceProvider -var testAccProviderFactories func(providers *[]*schema.Provider) map[string]terraform.ResourceProviderFactory +var testAccProviders map[string]*schema.Provider +var testAccProviderFactories func(providers *[]*schema.Provider) map[string]func() (*schema.Provider, error) var testAccProvider *schema.Provider func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ + testAccProvider = Provider() + testAccProviders = map[string]*schema.Provider{ "github": testAccProvider, } - testAccProviderFactories = func(providers *[]*schema.Provider) map[string]terraform.ResourceProviderFactory { - return map[string]terraform.ResourceProviderFactory{ - "github": func() (terraform.ResourceProvider, error) { + testAccProviderFactories = func(providers *[]*schema.Provider) map[string]func() (*schema.Provider, error) { + return map[string]func() (*schema.Provider, error){ + "github": func() (*schema.Provider, error) { p := Provider() - *providers = append(*providers, p.(*schema.Provider)) + *providers = append(*providers, p) return p, nil }, } @@ -33,7 +32,7 @@ func TestProvider(t *testing.T) { t.Run("runs internal validation without error", func(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + if err := Provider().InternalValidate(); err != nil { t.Fatalf("err: %s", err) } @@ -45,7 +44,7 @@ func TestProvider(t *testing.T) { // var _ terraform.ResourceProvider = Provider() // } - var _ terraform.ResourceProvider = Provider() + var _ schema.Provider = *Provider() }) } @@ -162,4 +161,29 @@ func TestAccProviderConfigure(t *testing.T) { }) }) + + t.Run("can be configured with max retries", func(t *testing.T) { + + config := fmt.Sprintf(` + provider "github" { + token = "%s" + owner = "%s" + max_retries = 3 + }`, + testToken, testOwnerFunc(), + ) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, individual) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + ExpectNonEmptyPlan: false, + }, + }, + }) + + }) + } diff --git a/github/repository_utils.go b/github/repository_utils.go index 88919e1eeb..356c84f8fe 100644 --- a/github/repository_utils.go +++ b/github/repository_utils.go @@ -98,7 +98,7 @@ func getAutolinkByKeyPrefix(client *github.Client, owner, repo, keyPrefix string } } - return nil, nil + return nil, fmt.Errorf("cannot find autolink reference %s in repo %s/%s", keyPrefix, owner, repo) } // listAutolinks returns all autolink references for the given repository. @@ -123,3 +123,13 @@ func listAutolinks(client *github.Client, owner, repo string) ([]*github.Autolin return allAutolinks, nil } + +// get the list of retriable errors +func getDefaultRetriableErrors() map[int]bool { + return map[int]bool{ + 500: true, + 502: true, + 503: true, + 504: true, + } +} diff --git a/github/resource_github_actions_environment_secret.go b/github/resource_github_actions_environment_secret.go index ae8ca32dac..9e48959322 100644 --- a/github/resource_github_actions_environment_secret.go +++ b/github/resource_github_actions_environment_secret.go @@ -8,8 +8,8 @@ import ( "net/url" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsEnvironmentSecret() *schema.Resource { @@ -32,20 +32,20 @@ func resourceGithubActionsEnvironmentSecret() *schema.Resource { Description: "Name of the environment.", }, "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", - ConflictsWith: []string{"plaintext_value"}, - ValidateFunc: validation.StringIsBase64, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", + ConflictsWith: []string{"plaintext_value"}, + ValidateDiagFunc: toDiagFunc(validation.StringIsBase64, "encrypted_value"), }, "plaintext_value": { Type: schema.TypeString, @@ -146,9 +146,15 @@ func resourceGithubActionsEnvironmentSecretRead(d *schema.ResourceData, meta int return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -169,7 +175,9 @@ func resourceGithubActionsEnvironmentSecretRead(d *schema.ResourceData, meta int log.Printf("[INFO] The environment secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil diff --git a/github/resource_github_actions_environment_secret_test.go b/github/resource_github_actions_environment_secret_test.go index a7b649de1d..0f55863370 100644 --- a/github/resource_github_actions_environment_secret_test.go +++ b/github/resource_github_actions_environment_secret_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsEnvironmentSecret(t *testing.T) { diff --git a/github/resource_github_actions_environment_variable.go b/github/resource_github_actions_environment_variable.go index efc0771e32..44db0b2dd8 100644 --- a/github/resource_github_actions_environment_variable.go +++ b/github/resource_github_actions_environment_variable.go @@ -7,7 +7,7 @@ import ( "net/url" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubActionsEnvironmentVariable() *schema.Resource { @@ -17,7 +17,7 @@ func resourceGithubActionsEnvironmentVariable() *schema.Resource { Update: resourceGithubActionsEnvironmentVariableUpdate, Delete: resourceGithubActionsEnvironmentVariableDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -33,11 +33,11 @@ func resourceGithubActionsEnvironmentVariable() *schema.Resource { Description: "Name of the environment.", }, "variable_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the variable.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the variable.", + ValidateDiagFunc: validateSecretNameFunc, }, "value": { Type: schema.TypeString, diff --git a/github/resource_github_actions_environment_variable_test.go b/github/resource_github_actions_environment_variable_test.go index aace291091..cbab1e20d2 100644 --- a/github/resource_github_actions_environment_variable_test.go +++ b/github/resource_github_actions_environment_variable_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsEnvironmentVariable(t *testing.T) { diff --git a/github/resource_github_actions_organization_oidc_subject_claim_customization_template.go b/github/resource_github_actions_organization_oidc_subject_claim_customization_template.go index 183602c0b6..c096dcb9b1 100644 --- a/github/resource_github_actions_organization_oidc_subject_claim_customization_template.go +++ b/github/resource_github_actions_organization_oidc_subject_claim_customization_template.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplate() *schema.Resource { @@ -14,7 +14,7 @@ func resourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplate() *s Update: resourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplateCreateOrUpdate, Delete: resourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplateDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "include_claim_keys": { @@ -76,7 +76,9 @@ func resourceGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplateRead( return err } - d.Set("include_claim_keys", template.IncludeClaimKeys) + if err = d.Set("include_claim_keys", template.IncludeClaimKeys); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_organization_oidc_subject_claim_customization_template_test.go b/github/resource_github_actions_organization_oidc_subject_claim_customization_template_test.go index 48553dfa04..bbec3e89e0 100644 --- a/github/resource_github_actions_organization_oidc_subject_claim_customization_template_test.go +++ b/github/resource_github_actions_organization_oidc_subject_claim_customization_template_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationOIDCSubjectClaimCustomizationTemplate(t *testing.T) { diff --git a/github/resource_github_actions_organization_permissions.go b/github/resource_github_actions_organization_permissions.go index 202296d8f6..8c5f010aa2 100644 --- a/github/resource_github_actions_organization_permissions.go +++ b/github/resource_github_actions_organization_permissions.go @@ -3,10 +3,11 @@ package github import ( "context" "errors" + "log" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsOrganizationPermissions() *schema.Resource { @@ -16,21 +17,21 @@ func resourceGithubActionsOrganizationPermissions() *schema.Resource { Update: resourceGithubActionsOrganizationPermissionsCreateOrUpdate, Delete: resourceGithubActionsOrganizationPermissionsDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "allowed_actions": { - Type: schema.TypeString, - Optional: true, - Description: "The permissions policy that controls the actions that are allowed to run. Can be one of: 'all', 'local_only', or 'selected'.", - ValidateFunc: validation.StringInSlice([]string{"all", "local_only", "selected"}, false), + Type: schema.TypeString, + Optional: true, + Description: "The permissions policy that controls the actions that are allowed to run. Can be one of: 'all', 'local_only', or 'selected'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "local_only", "selected"}, false), "allowed_actions"), }, "enabled_repositories": { - Type: schema.TypeString, - Required: true, - Description: "The policy that controls the repositories in the organization that are allowed to run GitHub Actions. Can be one of: 'all', 'none', or 'selected'.", - ValidateFunc: validation.StringInSlice([]string{"all", "none", "selected"}, false), + Type: schema.TypeString, + Required: true, + Description: "The policy that controls the repositories in the organization that are allowed to run GitHub Actions. Can be one of: 'all', 'none', or 'selected'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "none", "selected"}, false), "enabled_repositories"), }, "allowed_actions_config": { Type: schema.TypeList, @@ -106,8 +107,7 @@ func resourceGithubActionsOrganizationAllowedObject(d *schema.ResourceData) (*gi allowed.PatternsAllowed = patternsAllowed } else { - return &github.ActionsAllowed{}, - errors.New("the allowed_actions_config {} block must be specified if allowed_actions == 'selected'") + return nil, nil } return allowed, nil @@ -162,11 +162,16 @@ func resourceGithubActionsOrganizationPermissionsCreateOrUpdate(d *schema.Resour if err != nil { return err } - _, _, err = client.Actions.EditActionsAllowed(ctx, - orgName, - *actionsAllowedData) - if err != nil { - return err + if actionsAllowedData != nil { + log.Printf("[DEBUG] Allowed actions config is set") + _, _, err = client.Actions.EditActionsAllowed(ctx, + orgName, + *actionsAllowedData) + if err != nil { + return err + } + } else { + log.Printf("[DEBUG] Allowed actions config not set, skipping") } } @@ -201,7 +206,17 @@ func resourceGithubActionsOrganizationPermissionsRead(d *schema.ResourceData, me return err } - if actionsPermissions.GetAllowedActions() == "selected" { + // only load and fill allowed_actions_config if allowed_actions_config is also set + // in the TF code. (see #2105) + // on initial import there might not be any value in the state, then we have to import the data + // -> but we can only load an existing state if the current config is set to "selected" (see #2182) + allowedActions := d.Get("allowed_actions").(string) + allowedActionsConfig := d.Get("allowed_actions_config").([]interface{}) + + serverHasAllowedActionsConfig := actionsPermissions.GetAllowedActions() == "selected" + userWantsAllowedActionsConfig := (allowedActions == "selected" && len(allowedActionsConfig) > 0) || allowedActions == "" + + if serverHasAllowedActionsConfig && userWantsAllowedActionsConfig { actionsAllowed, _, err := client.Actions.GetActionsAllowed(ctx, d.Id()) if err != nil { return err @@ -209,16 +224,20 @@ func resourceGithubActionsOrganizationPermissionsRead(d *schema.ResourceData, me // If actionsAllowed set to local/all by removing all actions config settings, the response will be empty if actionsAllowed != nil { - d.Set("allowed_actions_config", []interface{}{ + if err = d.Set("allowed_actions_config", []interface{}{ map[string]interface{}{ "github_owned_allowed": actionsAllowed.GetGithubOwnedAllowed(), "patterns_allowed": actionsAllowed.PatternsAllowed, "verified_allowed": actionsAllowed.GetVerifiedAllowed(), }, - }) + }); err != nil { + return err + } } } else { - d.Set("allowed_actions_config", []interface{}{}) + if err = d.Set("allowed_actions_config", []interface{}{}); err != nil { + return err + } } if actionsPermissions.GetEnabledRepositories() == "selected" { @@ -243,18 +262,26 @@ func resourceGithubActionsOrganizationPermissionsRead(d *schema.ResourceData, me repoList = append(repoList, *allRepos[index].ID) } if allRepos != nil { - d.Set("enabled_repositories_config", []interface{}{ + if err = d.Set("enabled_repositories_config", []interface{}{ map[string]interface{}{ "repository_ids": repoList, }, - }) + }); err != nil { + return err + } } else { - d.Set("enabled_repositories_config", []interface{}{}) + if err = d.Set("enabled_repositories_config", []interface{}{}); err != nil { + return err + } } } - d.Set("allowed_actions", actionsPermissions.GetAllowedActions()) - d.Set("enabled_repositories", actionsPermissions.GetEnabledRepositories()) + if err = d.Set("allowed_actions", actionsPermissions.GetAllowedActions()); err != nil { + return err + } + if err = d.Set("enabled_repositories", actionsPermissions.GetEnabledRepositories()); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_organization_permissions_test.go b/github/resource_github_actions_organization_permissions_test.go index 0b9effba0c..97a01f1719 100644 --- a/github/resource_github_actions_organization_permissions_test.go +++ b/github/resource_github_actions_organization_permissions_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationPermissions(t *testing.T) { @@ -166,6 +166,49 @@ func TestAccGithubActionsOrganizationPermissions(t *testing.T) { }) }) + t.Run("test not setting of organization allowed actions without error", func(t *testing.T) { + + allowedActions := "selected" + enabledRepositories := "all" + + config := fmt.Sprintf(` + + resource "github_actions_organization_permissions" "test" { + allowed_actions = "%s" + enabled_repositories = "%s" + } + `, allowedActions, enabledRepositories) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_actions_organization_permissions.test", "allowed_actions", allowedActions, + ), + resource.TestCheckResourceAttr( + "github_actions_organization_permissions.test", "enabled_repositories", enabledRepositories, + ), + resource.TestCheckResourceAttr( + "github_actions_organization_permissions.test", "allowed_actions_config.#", "0", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + }) + t.Run("test setting of organization enabled repositories", func(t *testing.T) { allowedActions := "all" diff --git a/github/resource_github_actions_organization_secret.go b/github/resource_github_actions_organization_secret.go index ee909ad390..bbb84571d1 100644 --- a/github/resource_github_actions_organization_secret.go +++ b/github/resource_github_actions_organization_secret.go @@ -8,8 +8,8 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsOrganizationSecret() *schema.Resource { @@ -20,27 +20,29 @@ func resourceGithubActionsOrganizationSecret() *schema.Resource { Delete: resourceGithubActionsOrganizationSecretDelete, Importer: &schema.ResourceImporter{ State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("secret_name", d.Id()) + if err := d.Set("secret_name", d.Id()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Sensitive: true, - ConflictsWith: []string{"plaintext_value"}, - Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", - ValidateFunc: validation.StringIsBase64, + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + ConflictsWith: []string{"plaintext_value"}, + Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", + ValidateDiagFunc: toDiagFunc(validation.StringIsBase64, "encrypted_value"), }, "plaintext_value": { Type: schema.TypeString, @@ -51,11 +53,11 @@ func resourceGithubActionsOrganizationSecret() *schema.Resource { Description: "Plaintext value of the secret to be encrypted.", }, "visibility": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateValueFunc([]string{"all", "private", "selected"}), - ForceNew: true, - Description: "Configures the access that repositories have to the organization secret. Must be one of 'all', 'private', or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: validateValueFunc([]string{"all", "private", "selected"}), + ForceNew: true, + Description: "Configures the access that repositories have to the organization secret. Must be one of 'all', 'private', or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", }, "selected_repository_ids": { Type: schema.TypeSet, @@ -157,10 +159,18 @@ func resourceGithubActionsOrganizationSecretRead(d *schema.ResourceData, meta in return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) - d.Set("visibility", secret.Visibility) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } + if err = d.Set("visibility", secret.Visibility); err != nil { + return err + } selectedRepositoryIDs := []int64{} @@ -185,7 +195,9 @@ func resourceGithubActionsOrganizationSecretRead(d *schema.ResourceData, meta in } } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -206,7 +218,9 @@ func resourceGithubActionsOrganizationSecretRead(d *schema.ResourceData, meta in log.Printf("[INFO] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil diff --git a/github/resource_github_actions_organization_secret_repositories.go b/github/resource_github_actions_organization_secret_repositories.go index 3785c1f7cb..3e5162b145 100644 --- a/github/resource_github_actions_organization_secret_repositories.go +++ b/github/resource_github_actions_organization_secret_repositories.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubActionsOrganizationSecretRepositories() *schema.Resource { @@ -14,16 +14,16 @@ func resourceGithubActionsOrganizationSecretRepositories() *schema.Resource { Update: resourceGithubActionsOrganizationSecretRepositoriesCreateOrUpdate, Delete: resourceGithubActionsOrganizationSecretRepositoriesDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the existing secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the existing secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "selected_repository_ids": { Type: schema.TypeSet, @@ -97,7 +97,9 @@ func resourceGithubActionsOrganizationSecretRepositoriesRead(d *schema.ResourceD opt.Page = resp.NextPage } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_organization_secret_repositories_test.go b/github/resource_github_actions_organization_secret_repositories_test.go index dcc9344189..2967be19b5 100644 --- a/github/resource_github_actions_organization_secret_repositories_test.go +++ b/github/resource_github_actions_organization_secret_repositories_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationSecretRepositories(t *testing.T) { diff --git a/github/resource_github_actions_organization_secret_test.go b/github/resource_github_actions_organization_secret_test.go index f7f1bb37ed..fee7aba027 100644 --- a/github/resource_github_actions_organization_secret_test.go +++ b/github/resource_github_actions_organization_secret_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationSecret(t *testing.T) { diff --git a/github/resource_github_actions_organization_variable.go b/github/resource_github_actions_organization_variable.go index 0eaba44253..3bb5b1322a 100644 --- a/github/resource_github_actions_organization_variable.go +++ b/github/resource_github_actions_organization_variable.go @@ -7,7 +7,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubActionsOrganizationVariable() *schema.Resource { @@ -17,16 +17,16 @@ func resourceGithubActionsOrganizationVariable() *schema.Resource { Update: resourceGithubActionsOrganizationVariableUpdate, Delete: resourceGithubActionsOrganizationVariableDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "variable_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the variable.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the variable.", + ValidateDiagFunc: validateSecretNameFunc, }, "value": { Type: schema.TypeString, @@ -44,11 +44,11 @@ func resourceGithubActionsOrganizationVariable() *schema.Resource { Description: "Date of 'actions_variable' update.", }, "visibility": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateValueFunc([]string{"all", "private", "selected"}), - ForceNew: true, - Description: "Configures the access that repositories have to the organization variable. Must be one of 'all', 'private', or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: validateValueFunc([]string{"all", "private", "selected"}), + ForceNew: true, + Description: "Configures the access that repositories have to the organization variable. Must be one of 'all', 'private', or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", }, "selected_repository_ids": { Type: schema.TypeSet, @@ -166,11 +166,21 @@ func resourceGithubActionsOrganizationVariableRead(d *schema.ResourceData, meta return err } - d.Set("variable_name", name) - d.Set("value", variable.Value) - d.Set("created_at", variable.CreatedAt.String()) - d.Set("updated_at", variable.UpdatedAt.String()) - d.Set("visibility", *variable.Visibility) + if err = d.Set("variable_name", name); err != nil { + return err + } + if err = d.Set("value", variable.Value); err != nil { + return err + } + if err = d.Set("created_at", variable.CreatedAt.String()); err != nil { + return err + } + if err = d.Set("updated_at", variable.UpdatedAt.String()); err != nil { + return err + } + if err = d.Set("visibility", *variable.Visibility); err != nil { + return err + } selectedRepositoryIDs := []int64{} @@ -195,7 +205,9 @@ func resourceGithubActionsOrganizationVariableRead(d *schema.ResourceData, meta } } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_organization_variable_test.go b/github/resource_github_actions_organization_variable_test.go index a08a709470..5d22d6dae1 100644 --- a/github/resource_github_actions_organization_variable_test.go +++ b/github/resource_github_actions_organization_variable_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsOrganizationVariable(t *testing.T) { diff --git a/github/resource_github_actions_repository_access_level.go b/github/resource_github_actions_repository_access_level.go index c237cae492..dd2b5f78eb 100644 --- a/github/resource_github_actions_repository_access_level.go +++ b/github/resource_github_actions_repository_access_level.go @@ -4,8 +4,8 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsRepositoryAccessLevel() *schema.Resource { @@ -15,21 +15,21 @@ func resourceGithubActionsRepositoryAccessLevel() *schema.Resource { Update: resourceGithubActionsRepositoryAccessLevelCreateOrUpdate, Delete: resourceGithubActionsRepositoryAccessLevelDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "access_level": { - Type: schema.TypeString, - Required: true, - Description: "Where the actions or reusable workflows of the repository may be used. Possible values are 'none', 'user', 'organization', or 'enterprise'.", - ValidateFunc: validation.StringInSlice([]string{"none", "user", "organization", "enterprise"}, false), + Type: schema.TypeString, + Required: true, + Description: "Where the actions or reusable workflows of the repository may be used. Possible values are 'none', 'user', 'organization', or 'enterprise'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"none", "user", "organization", "enterprise"}, false), "access_level"), }, "repository": { - Type: schema.TypeString, - Required: true, - Description: "The GitHub repository.", - ValidateFunc: validation.StringLenBetween(1, 100), + Type: schema.TypeString, + Required: true, + Description: "The GitHub repository.", + ValidateDiagFunc: toDiagFunc(validation.StringLenBetween(1, 100), "repository"), }, }, } diff --git a/github/resource_github_actions_repository_access_level_test.go b/github/resource_github_actions_repository_access_level_test.go index feda905e32..244631a46f 100644 --- a/github/resource_github_actions_repository_access_level_test.go +++ b/github/resource_github_actions_repository_access_level_test.go @@ -2,8 +2,8 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "testing" ) diff --git a/github/resource_github_actions_repository_oidc_subject_claim_customization_template.go b/github/resource_github_actions_repository_oidc_subject_claim_customization_template.go index d9391e8c72..ef1ad7bd2a 100644 --- a/github/resource_github_actions_repository_oidc_subject_claim_customization_template.go +++ b/github/resource_github_actions_repository_oidc_subject_claim_customization_template.go @@ -5,8 +5,8 @@ import ( "errors" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplate() *schema.Resource { @@ -16,14 +16,14 @@ func resourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplate() *sch Update: resourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplateCreateOrUpdate, Delete: resourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplateDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "repository": { - Type: schema.TypeString, - Required: true, - Description: "The name of the repository.", - ValidateFunc: validation.StringLenBetween(1, 100), + Type: schema.TypeString, + Required: true, + Description: "The name of the repository.", + ValidateDiagFunc: toDiagFunc(validation.StringLenBetween(1, 100), "repository"), }, "use_default": { Type: schema.TypeBool, @@ -98,9 +98,15 @@ func resourceGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplateRead(d return err } - d.Set("repository", repository) - d.Set("use_default", template.UseDefault) - d.Set("include_claim_keys", template.IncludeClaimKeys) + if err = d.Set("repository", repository); err != nil { + return err + } + if err = d.Set("use_default", template.UseDefault); err != nil { + return err + } + if err = d.Set("include_claim_keys", template.IncludeClaimKeys); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_repository_oidc_subject_claim_customization_template_test.go b/github/resource_github_actions_repository_oidc_subject_claim_customization_template_test.go index 96b89e5981..f65288d6a1 100644 --- a/github/resource_github_actions_repository_oidc_subject_claim_customization_template_test.go +++ b/github/resource_github_actions_repository_oidc_subject_claim_customization_template_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsRepositoryOIDCSubjectClaimCustomizationTemplate(t *testing.T) { diff --git a/github/resource_github_actions_repository_permissions.go b/github/resource_github_actions_repository_permissions.go index 62bb2e1e1f..8bb66f3957 100644 --- a/github/resource_github_actions_repository_permissions.go +++ b/github/resource_github_actions_repository_permissions.go @@ -2,12 +2,11 @@ package github import ( "context" - "errors" "log" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsRepositoryPermissions() *schema.Resource { @@ -17,15 +16,15 @@ func resourceGithubActionsRepositoryPermissions() *schema.Resource { Update: resourceGithubActionsRepositoryPermissionsCreateOrUpdate, Delete: resourceGithubActionsRepositoryPermissionsDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "allowed_actions": { - Type: schema.TypeString, - Optional: true, - Description: "The permissions policy that controls the actions that are allowed to run. Can be one of: 'all', 'local_only', or 'selected'.", - ValidateFunc: validation.StringInSlice([]string{"all", "local_only", "selected"}, false), + Type: schema.TypeString, + Optional: true, + Description: "The permissions policy that controls the actions that are allowed to run. Can be one of: 'all', 'local_only', or 'selected'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "local_only", "selected"}, false), "allowed_actions"), }, "allowed_actions_config": { Type: schema.TypeList, @@ -61,10 +60,10 @@ func resourceGithubActionsRepositoryPermissions() *schema.Resource { Description: "Should GitHub actions be enabled on this repository.", }, "repository": { - Type: schema.TypeString, - Required: true, - Description: "The GitHub repository.", - ValidateFunc: validation.StringLenBetween(1, 100), + Type: schema.TypeString, + Required: true, + Description: "The GitHub repository.", + ValidateDiagFunc: toDiagFunc(validation.StringLenBetween(1, 100), "repository"), }, }, } @@ -97,8 +96,7 @@ func resourceGithubActionsRepositoryAllowedObject(d *schema.ResourceData) (*gith allowed.PatternsAllowed = patternsAllowed } else { - return &github.ActionsAllowed{}, - errors.New("the allowed_actions_config {} block must be specified if allowed_actions == 'selected'") + return nil, nil } return allowed, nil @@ -141,12 +139,17 @@ func resourceGithubActionsRepositoryPermissionsCreateOrUpdate(d *schema.Resource if err != nil { return err } - _, _, err = client.Repositories.EditActionsAllowed(ctx, - owner, - repoName, - *actionsAllowedData) - if err != nil { - return err + if actionsAllowedData != nil { + log.Printf("[DEBUG] Allowed actions config is set") + _, _, err = client.Repositories.EditActionsAllowed(ctx, + owner, + repoName, + *actionsAllowedData) + if err != nil { + return err + } + } else { + log.Printf("[DEBUG] Allowed actions config not set, skipping") } } @@ -166,7 +169,17 @@ func resourceGithubActionsRepositoryPermissionsRead(d *schema.ResourceData, meta return err } - if actionsPermissions.GetAllowedActions() == "selected" { + // only load and fill allowed_actions_config if allowed_actions_config is also set + // in the TF code. (see #2105) + // on initial import there might not be any value in the state, then we have to import the data + // -> but we can only load an existing state if the current config is set to "selected" (see #2182) + allowedActions := d.Get("allowed_actions").(string) + allowedActionsConfig := d.Get("allowed_actions_config").([]interface{}) + + serverHasAllowedActionsConfig := actionsPermissions.GetAllowedActions() == "selected" && actionsPermissions.GetEnabled() + userWantsAllowedActionsConfig := (allowedActions == "selected" && len(allowedActionsConfig) > 0) || allowedActions == "" + + if serverHasAllowedActionsConfig && userWantsAllowedActionsConfig { actionsAllowed, _, err := client.Repositories.GetActionsAllowed(ctx, owner, repoName) if err != nil { return err @@ -174,21 +187,31 @@ func resourceGithubActionsRepositoryPermissionsRead(d *schema.ResourceData, meta // If actionsAllowed set to local/all by removing all actions config settings, the response will be empty if actionsAllowed != nil { - d.Set("allowed_actions_config", []interface{}{ + if err = d.Set("allowed_actions_config", []interface{}{ map[string]interface{}{ "github_owned_allowed": actionsAllowed.GetGithubOwnedAllowed(), "patterns_allowed": actionsAllowed.PatternsAllowed, "verified_allowed": actionsAllowed.GetVerifiedAllowed(), }, - }) + }); err != nil { + return err + } } } else { - d.Set("allowed_actions_config", []interface{}{}) + if err = d.Set("allowed_actions_config", []interface{}{}); err != nil { + return err + } } - d.Set("allowed_actions", actionsPermissions.GetAllowedActions()) - d.Set("enabled", actionsPermissions.GetEnabled()) - d.Set("repository", repoName) + if err = d.Set("allowed_actions", actionsPermissions.GetAllowedActions()); err != nil { + return err + } + if err = d.Set("enabled", actionsPermissions.GetEnabled()); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_repository_permissions_test.go b/github/resource_github_actions_repository_permissions_test.go index 050ec41692..7df2eb8f99 100644 --- a/github/resource_github_actions_repository_permissions_test.go +++ b/github/resource_github_actions_repository_permissions_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsRepositoryPermissions(t *testing.T) { @@ -188,6 +188,62 @@ func TestAccGithubActionsRepositoryPermissions(t *testing.T) { }) + t.Run("test not setting of repository allowed actions without error", func(t *testing.T) { + + allowedActions := "selected" + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + + config := fmt.Sprintf(` + resource "github_repository" "test" { + name = "tf-acc-test-topic-%[1]s" + description = "Terraform acceptance tests %[1]s" + topics = ["terraform", "testing"] + } + + resource "github_actions_repository_permissions" "test" { + allowed_actions = "%s" + repository = github_repository.test.name + } + `, randomID, allowedActions) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_actions_repository_permissions.test", "allowed_actions", allowedActions, + ), + // Even if we do not set the allowed_actions_config, + // it will be set to the organization level settings + resource.TestCheckResourceAttr( + "github_actions_repository_permissions.test", "allowed_actions_config.#", "0", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an anonymous account", func(t *testing.T) { + t.Skip("anonymous account not supported for this operation") + }) + + t.Run("with an individual account", func(t *testing.T) { + testCase(t, individual) + }) + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + + }) + t.Run("test disabling actions on a repository", func(t *testing.T) { actionsEnabled := false @@ -238,4 +294,61 @@ func TestAccGithubActionsRepositoryPermissions(t *testing.T) { }) }) + + // https://github.com/integrations/terraform-provider-github/issues/2182 + t.Run("test load with disabled actions", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + + config := fmt.Sprintf(` + locals { + actions_enabled = false + } + + resource "github_repository" "test" { + name = "tf-acc-test-actions-permissions-%[1]s" + description = "Terraform acceptance tests %[1]s" + topics = ["terraform", "testing"] + } + + resource "github_actions_repository_permissions" "test" { + repository = github_repository.test.name + enabled = local.actions_enabled + allowed_actions = local.actions_enabled ? "all" : null + } + `, randomID) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_actions_repository_permissions.test", "enabled", "false", + ), + resource.TestCheckResourceAttr( + "github_actions_repository_permissions.test", "allowed_actions.#", "0", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an anonymous account", func(t *testing.T) { + t.Skip("anonymous account not supported for this operation") + }) + + t.Run("with an individual account", func(t *testing.T) { + testCase(t, individual) + }) + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + }) } diff --git a/github/resource_github_actions_runner_group.go b/github/resource_github_actions_runner_group.go index 06d9e5b7dc..4100a50358 100644 --- a/github/resource_github_actions_runner_group.go +++ b/github/resource_github_actions_runner_group.go @@ -8,8 +8,8 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubActionsRunnerGroup() *schema.Resource { @@ -19,10 +19,15 @@ func resourceGithubActionsRunnerGroup() *schema.Resource { Update: resourceGithubActionsRunnerGroupUpdate, Delete: resourceGithubActionsRunnerGroupDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the runner group.", + }, "allows_public_repositories": { Type: schema.TypeBool, Optional: true, @@ -69,10 +74,10 @@ func resourceGithubActionsRunnerGroup() *schema.Resource { Description: "GitHub API URL for the runner group's repositories.", }, "visibility": { - Type: schema.TypeString, - Required: true, - Description: "The visibility of the runner group.", - ValidateFunc: validation.StringInSlice([]string{"all", "selected", "private"}, false), + Type: schema.TypeString, + Required: true, + Description: "The visibility of the runner group.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "selected", "private"}, false), "visibility"), }, "restricted_to_workflows": { Type: schema.TypeBool, @@ -142,18 +147,43 @@ func resourceGithubActionsRunnerGroupCreate(d *schema.ResourceData, meta interfa return err } d.SetId(strconv.FormatInt(runnerGroup.GetID(), 10)) - d.Set("etag", resp.Header.Get("ETag")) - d.Set("allows_public_repositories", runnerGroup.GetAllowsPublicRepositories()) - d.Set("default", runnerGroup.GetDefault()) - d.Set("id", runnerGroup.GetID()) - d.Set("inherited", runnerGroup.GetInherited()) - d.Set("name", runnerGroup.GetName()) - d.Set("runners_url", runnerGroup.GetRunnersURL()) - d.Set("selected_repositories_url", runnerGroup.GetSelectedRepositoriesURL()) - d.Set("visibility", runnerGroup.GetVisibility()) - d.Set("selected_repository_ids", selectedRepositoryIDs) // Note: runnerGroup has no method to get selected repository IDs - d.Set("restricted_to_workflows", runnerGroup.GetRestrictedToWorkflows()) - d.Set("selected_workflows", runnerGroup.SelectedWorkflows) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("allows_public_repositories", runnerGroup.GetAllowsPublicRepositories()); err != nil { + return err + } + if err = d.Set("default", runnerGroup.GetDefault()); err != nil { + return err + } + + if err = d.Set("id", strconv.FormatInt(runnerGroup.GetID(), 10)); err != nil { + return err + } + if err = d.Set("inherited", runnerGroup.GetInherited()); err != nil { + return err + } + if err = d.Set("name", runnerGroup.GetName()); err != nil { + return err + } + if err = d.Set("runners_url", runnerGroup.GetRunnersURL()); err != nil { + return err + } + if err = d.Set("selected_repositories_url", runnerGroup.GetSelectedRepositoriesURL()); err != nil { + return err + } + if err = d.Set("visibility", runnerGroup.GetVisibility()); err != nil { + return err + } + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { // Note: runnerGroup has no method to get selected repository IDs + return err + } + if err = d.Set("restricted_to_workflows", runnerGroup.GetRestrictedToWorkflows()); err != nil { + return err + } + if err = d.Set("selected_workflows", runnerGroup.SelectedWorkflows); err != nil { + return err + } return resourceGithubActionsRunnerGroupRead(d, meta) } @@ -205,17 +235,39 @@ func resourceGithubActionsRunnerGroupRead(d *schema.ResourceData, meta interface return nil } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("allows_public_repositories", runnerGroup.GetAllowsPublicRepositories()) - d.Set("default", runnerGroup.GetDefault()) - d.Set("id", runnerGroup.GetID()) - d.Set("inherited", runnerGroup.GetInherited()) - d.Set("name", runnerGroup.GetName()) - d.Set("runners_url", runnerGroup.GetRunnersURL()) - d.Set("selected_repositories_url", runnerGroup.GetSelectedRepositoriesURL()) - d.Set("visibility", runnerGroup.GetVisibility()) - d.Set("restricted_to_workflows", runnerGroup.GetRestrictedToWorkflows()) - d.Set("selected_workflows", runnerGroup.SelectedWorkflows) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("allows_public_repositories", runnerGroup.GetAllowsPublicRepositories()); err != nil { + return err + } + if err = d.Set("default", runnerGroup.GetDefault()); err != nil { + return err + } + if err = d.Set("id", strconv.FormatInt(runnerGroup.GetID(), 10)); err != nil { + return err + } + if err = d.Set("inherited", runnerGroup.GetInherited()); err != nil { + return err + } + if err = d.Set("name", runnerGroup.GetName()); err != nil { + return err + } + if err = d.Set("runners_url", runnerGroup.GetRunnersURL()); err != nil { + return err + } + if err = d.Set("selected_repositories_url", runnerGroup.GetSelectedRepositoriesURL()); err != nil { + return err + } + if err = d.Set("visibility", runnerGroup.GetVisibility()); err != nil { + return err + } + if err = d.Set("restricted_to_workflows", runnerGroup.GetRestrictedToWorkflows()); err != nil { + return err + } + if err = d.Set("selected_workflows", runnerGroup.SelectedWorkflows); err != nil { + return err + } selectedRepositoryIDs := []int64{} options := github.ListOptions{ @@ -239,7 +291,9 @@ func resourceGithubActionsRunnerGroupRead(d *schema.ResourceData, meta interface options.Page = resp.NextPage } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_runner_group_test.go b/github/resource_github_actions_runner_group_test.go index f0aa751d00..e92fe90ba7 100644 --- a/github/resource_github_actions_runner_group_test.go +++ b/github/resource_github_actions_runner_group_test.go @@ -2,12 +2,12 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsRunnerGroup(t *testing.T) { diff --git a/github/resource_github_actions_secret.go b/github/resource_github_actions_secret.go index 7f848da2cd..5941dbd8fd 100644 --- a/github/resource_github_actions_secret.go +++ b/github/resource_github_actions_secret.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "golang.org/x/crypto/nacl/box" ) @@ -30,11 +30,11 @@ func resourceGithubActionsSecret() *schema.Resource { Description: "Name of the repository.", }, "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { Type: schema.TypeString, @@ -130,9 +130,15 @@ func resourceGithubActionsSecretRead(d *schema.ResourceData, meta interface{}) e return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -153,7 +159,9 @@ func resourceGithubActionsSecretRead(d *schema.ResourceData, meta interface{}) e log.Printf("[INFO] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil @@ -196,13 +204,21 @@ func resourceGithubActionsSecretImport(d *schema.ResourceData, meta interface{}) return nil, err } - d.Set("repository", repoName) - d.Set("secret_name", secretName) + if err = d.Set("repository", repoName); err != nil { + return nil, err + } + if err = d.Set("secret_name", secretName); err != nil { + return nil, err + } // encrypted_value or plaintext_value can not be imported - d.Set("created_at", secret.CreatedAt.String()) - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return nil, err + } + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil } diff --git a/github/resource_github_actions_secret_test.go b/github/resource_github_actions_secret_test.go index 7b80f4b916..21bf90233a 100644 --- a/github/resource_github_actions_secret_test.go +++ b/github/resource_github_actions_secret_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsSecret(t *testing.T) { diff --git a/github/resource_github_actions_variable.go b/github/resource_github_actions_variable.go index 40cd93594c..74df424f3b 100644 --- a/github/resource_github_actions_variable.go +++ b/github/resource_github_actions_variable.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubActionsVariable() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubActionsVariable() *schema.Resource { Update: resourceGithubActionsVariableUpdate, Delete: resourceGithubActionsVariableDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -26,11 +26,11 @@ func resourceGithubActionsVariable() *schema.Resource { Description: "Name of the repository.", }, "variable_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the variable.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the variable.", + ValidateDiagFunc: validateSecretNameFunc, }, "value": { Type: schema.TypeString, @@ -114,11 +114,21 @@ func resourceGithubActionsVariableRead(d *schema.ResourceData, meta interface{}) return err } - d.Set("repository", repoName) - d.Set("variable_name", variableName) - d.Set("value", variable.Value) - d.Set("created_at", variable.CreatedAt.String()) - d.Set("updated_at", variable.UpdatedAt.String()) + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("variable_name", variableName); err != nil { + return err + } + if err = d.Set("value", variable.Value); err != nil { + return err + } + if err = d.Set("created_at", variable.CreatedAt.String()); err != nil { + return err + } + if err = d.Set("updated_at", variable.UpdatedAt.String()); err != nil { + return err + } return nil } diff --git a/github/resource_github_actions_variable_test.go b/github/resource_github_actions_variable_test.go index 863d3c9eb5..07aa4164c7 100644 --- a/github/resource_github_actions_variable_test.go +++ b/github/resource_github_actions_variable_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubActionsVariable(t *testing.T) { diff --git a/github/resource_github_app_installation_repositories.go b/github/resource_github_app_installation_repositories.go index 69436e98bd..7b9d389393 100644 --- a/github/resource_github_app_installation_repositories.go +++ b/github/resource_github_app_installation_repositories.go @@ -6,7 +6,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubAppInstallationRepositories() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubAppInstallationRepositories() *schema.Resource { Update: resourceGithubAppInstallationRepositoriesCreateOrUpdate, Delete: resourceGithubAppInstallationRepositoriesDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -110,8 +110,12 @@ func resourceGithubAppInstallationRepositoriesRead(d *schema.ResourceData, meta } if len(reposNameIDs) > 0 { - d.Set("installation_id", installationIDString) - d.Set("selected_repositories", repoNames) + if err = d.Set("installation_id", installationIDString); err != nil { + return err + } + if err = d.Set("selected_repositories", repoNames); err != nil { + return err + } return nil } diff --git a/github/resource_github_app_installation_repositories_test.go b/github/resource_github_app_installation_repositories_test.go index 4c3c361f2f..45bd468f68 100644 --- a/github/resource_github_app_installation_repositories_test.go +++ b/github/resource_github_app_installation_repositories_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubAppInstallationRepositories(t *testing.T) { diff --git a/github/resource_github_app_installation_repository.go b/github/resource_github_app_installation_repository.go index 1c3db976ab..378b30a119 100644 --- a/github/resource_github_app_installation_repository.go +++ b/github/resource_github_app_installation_repository.go @@ -6,7 +6,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubAppInstallationRepository() *schema.Resource { @@ -15,7 +15,7 @@ func resourceGithubAppInstallationRepository() *schema.Resource { Read: resourceGithubAppInstallationRepositoryRead, Delete: resourceGithubAppInstallationRepositoryDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -98,9 +98,15 @@ func resourceGithubAppInstallationRepositoryRead(d *schema.ResourceData, meta in for _, r := range repos.Repositories { if r.GetName() == repoName { - d.Set("installation_id", installationIDString) - d.Set("repository", repoName) - d.Set("repo_id", r.GetID()) + if err = d.Set("installation_id", installationIDString); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("repo_id", r.GetID()); err != nil { + return err + } return nil } } diff --git a/github/resource_github_app_installation_repository_test.go b/github/resource_github_app_installation_repository_test.go index 6923dbe248..1a62ef8b78 100644 --- a/github/resource_github_app_installation_repository_test.go +++ b/github/resource_github_app_installation_repository_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubAppInstallationRepository(t *testing.T) { diff --git a/github/resource_github_branch.go b/github/resource_github_branch.go index 3629b307dd..9287f54ff5 100644 --- a/github/resource_github_branch.go +++ b/github/resource_github_branch.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubBranch() *schema.Resource { @@ -86,7 +86,9 @@ func resourceGithubBranchCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error querying GitHub branch reference %s/%s (%s): %s", orgName, repoName, sourceBranchRefName, err) } - d.Set("source_sha", *ref.Object.SHA) + if err = d.Set("source_sha", *ref.Object.SHA); err != nil { + return err + } } sourceBranchSHA := d.Get("source_sha").(string) @@ -138,11 +140,21 @@ func resourceGithubBranchRead(d *schema.ResourceData, meta interface{}) error { } d.SetId(buildTwoPartID(repoName, branchName)) - d.Set("etag", resp.Header.Get("ETag")) - d.Set("repository", repoName) - d.Set("branch", branchName) - d.Set("ref", *ref.Ref) - d.Set("sha", *ref.Object.SHA) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("branch", branchName); err != nil { + return err + } + if err = d.Set("ref", *ref.Ref); err != nil { + return err + } + if err = d.Set("sha", *ref.Object.SHA); err != nil { + return err + } return nil } @@ -182,7 +194,9 @@ func resourceGithubBranchImport(d *schema.ResourceData, meta interface{}) ([]*sc d.SetId(buildTwoPartID(repoName, branchName)) } - d.Set("source_branch", sourceBranch) + if err = d.Set("source_branch", sourceBranch); err != nil { + return nil, err + } err = resourceGithubBranchRead(d, meta) if err != nil { diff --git a/github/resource_github_branch_default.go b/github/resource_github_branch_default.go index e1689342dc..e5f9276a2a 100644 --- a/github/resource_github_branch_default.go +++ b/github/resource_github_branch_default.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubBranchDefault() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubBranchDefault() *schema.Resource { Delete: resourceGithubBranchDefaultDelete, Update: resourceGithubBranchDefaultUpdate, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ diff --git a/github/resource_github_branch_default_test.go b/github/resource_github_branch_default_test.go index 6d60d6a7d8..65c83e1bda 100644 --- a/github/resource_github_branch_default_test.go +++ b/github/resource_github_branch_default_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubBranchDefault(t *testing.T) { diff --git a/github/resource_github_branch_protection.go b/github/resource_github_branch_protection.go index 1470462530..3235aecd89 100644 --- a/github/resource_github_branch_protection.go +++ b/github/resource_github_branch_protection.go @@ -6,14 +6,14 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/shurcooL/githubv4" ) func resourceGithubBranchProtection() *schema.Resource { return &schema.Resource{ - SchemaVersion: 1, + SchemaVersion: 2, Schema: map[string]*schema.Schema{ // Input @@ -40,12 +40,6 @@ func resourceGithubBranchProtection() *schema.Resource { Default: false, Description: "Setting this to 'true' to allow force pushes on the branch.", }, - PROTECTION_BLOCKS_CREATIONS: { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Setting this to 'true' to block creating the branch.", - }, PROTECTION_IS_ADMIN_ENFORCED: { Type: schema.TypeBool, Optional: true, @@ -83,11 +77,11 @@ func resourceGithubBranchProtection() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ PROTECTION_REQUIRED_APPROVING_REVIEW_COUNT: { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "Require 'x' number of approvals to satisfy branch protection requirements. If this is specified it must be a number between 0-6.", - ValidateFunc: validation.IntBetween(0, 6), + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "Require 'x' number of approvals to satisfy branch protection requirements. If this is specified it must be a number between 0-6.", + ValidateDiagFunc: toDiagFunc(validation.IntBetween(0, 6), PROTECTION_REQUIRED_APPROVING_REVIEW_COUNT), }, PROTECTION_REQUIRES_CODE_OWNER_REVIEWS: { Type: schema.TypeBool, @@ -104,7 +98,7 @@ func resourceGithubBranchProtection() *schema.Resource { Optional: true, Description: "Restrict pull request review dismissals.", }, - PROTECTION_RESTRICTS_REVIEW_DISMISSERS: { + PROTECTION_REVIEW_DISMISSAL_ALLOWANCES: { Type: schema.TypeSet, Optional: true, Description: "The list of actor Names/IDs with dismissal access. If not empty, 'restrict_dismissals' is ignored. Actor names must either begin with a '/' for users or the organization name followed by a '/' for teams.", @@ -116,7 +110,7 @@ func resourceGithubBranchProtection() *schema.Resource { Description: "The list of actor Names/IDs that are allowed to bypass pull request requirements. Actor names must either begin with a '/' for users or the organization name followed by a '/' for teams.", Elem: &schema.Schema{Type: schema.TypeString}, }, - PROTECTION_REQUIRES_LAST_PUSH_APPROVAL: { + PROTECTION_REQUIRE_LAST_PUSH_APPROVAL: { Type: schema.TypeBool, Optional: true, Default: false, @@ -146,10 +140,25 @@ func resourceGithubBranchProtection() *schema.Resource { }, }, PROTECTION_RESTRICTS_PUSHES: { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, - Description: "The list of actor Names/IDs that may push to the branch. Actor names must either begin with a '/' for users or the organization name followed by a '/' for teams.", - Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Restrict who can push to matching branches.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + PROTECTION_BLOCKS_CREATIONS: { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Restrict pushes that create matching branches.", + }, + PROTECTION_PUSH_ALLOWANCES: { + Type: schema.TypeSet, + Optional: true, + Description: "The list of actor Names/IDs that may push to the branch. Actor names must either begin with a '/' for users or the organization name followed by a '/' for teams.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, }, PROTECTION_FORCE_PUSHES_BYPASSERS: { Type: schema.TypeSet, @@ -174,6 +183,11 @@ func resourceGithubBranchProtection() *schema.Resource { Upgrade: resourceGithubBranchProtectionUpgradeV0, Version: 0, }, + { + Type: resourceGithubBranchProtectionV1().CoreConfigSchema().ImpliedType(), + Upgrade: resourceGithubBranchProtectionUpgradeV1, + Version: 1, + }, }, } } @@ -265,7 +279,6 @@ func resourceGithubBranchProtectionRead(d *schema.ResourceData, meta interface{} variables := map[string]interface{}{ "id": d.Id(), } - ctx := context.WithValue(context.Background(), ctxId, d.Id()) client := meta.(*Owner).v4client err := client.Query(ctx, &query, variables) @@ -278,7 +291,6 @@ func resourceGithubBranchProtectionRead(d *schema.ResourceData, meta interface{} return err } - protection := query.Node.Node err = d.Set(PROTECTION_PATTERN, protection.Pattern) @@ -296,11 +308,6 @@ func resourceGithubBranchProtectionRead(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Problem setting '%s' in %s %s branch protection (%s)", PROTECTION_ALLOWS_FORCE_PUSHES, protection.Repository.Name, protection.Pattern, d.Id()) } - err = d.Set(PROTECTION_BLOCKS_CREATIONS, protection.BlocksCreations) - if err != nil { - log.Printf("[DEBUG] Problem setting '%s' in %s %s branch protection (%s)", PROTECTION_BLOCKS_CREATIONS, protection.Repository.Name, protection.Pattern, d.Id()) - } - err = d.Set(PROTECTION_IS_ADMIN_ENFORCED, protection.IsAdminEnforced) if err != nil { log.Printf("[DEBUG] Problem setting '%s' in %s %s branch protection (%s)", PROTECTION_IS_ADMIN_ENFORCED, protection.Repository.Name, protection.Pattern, d.Id()) @@ -350,11 +357,6 @@ func resourceGithubBranchProtectionRead(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Problem setting '%s' in %s %s branch protection (%s)", PROTECTION_FORCE_PUSHES_BYPASSERS, protection.Repository.Name, protection.Pattern, d.Id()) } - err = d.Set(PROTECTION_REQUIRES_LAST_PUSH_APPROVAL, protection.RequireLastPushApproval) - if err != nil { - log.Printf("[DEBUG] Problem setting '%s' in %s %s branch protection (%s)", PROTECTION_REQUIRES_LAST_PUSH_APPROVAL, protection.Repository.Name, protection.Pattern, d.Id()) - } - err = d.Set(PROTECTION_LOCK_BRANCH, protection.LockBranch) if err != nil { log.Printf("[DEBUG] Problem setting '%s' in %s %s branch protection (%s)", PROTECTION_LOCK_BRANCH, protection.Repository.Name, protection.Pattern, d.Id()) @@ -468,7 +470,9 @@ func resourceGithubBranchProtectionImport(d *schema.ResourceData, meta interface if err != nil { return nil, err } - d.Set("repository_id", repoID) + if err = d.Set("repository_id", repoID); err != nil { + return nil, err + } id, err := getBranchProtectionID(repoID, pattern, meta) if err != nil { diff --git a/github/resource_github_branch_protection_test.go b/github/resource_github_branch_protection_test.go index 7d883babcc..2cbe21e777 100644 --- a/github/resource_github_branch_protection_test.go +++ b/github/resource_github_branch_protection_test.go @@ -1,20 +1,21 @@ package github import ( + "context" "fmt" + "reflect" "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccGithubBranchProtection(t *testing.T) { - - randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) +func TestAccGithubBranchProtectionV4(t *testing.T) { t.Run("configures default settings when empty", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -25,8 +26,8 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.node_id - pattern = "main" + repository_id = github_repository.test.node_id + pattern = "main" } @@ -49,7 +50,7 @@ func TestAccGithubBranchProtection(t *testing.T) { "github_branch_protection.test", "required_pull_request_reviews.#", "0", ), resource.TestCheckResourceAttr( - "github_branch_protection.test", "push_restrictions.#", "0", + "github_branch_protection.test", "restrict_pushes.#", "0", ), resource.TestCheckResourceAttr( "github_branch_protection.test", "lock_branch", "false", @@ -77,7 +78,7 @@ func TestAccGithubBranchProtection(t *testing.T) { ResourceName: "github_branch_protection.test", ImportState: true, ExpectError: regexp.MustCompile( - `Could not find a branch protection rule with the pattern 'no-such-pattern'\.`, + `could not find a branch protection rule with the pattern 'no-such-pattern'`, ), ImportStateIdFunc: importBranchProtectionByRepoName( fmt.Sprintf("tf-acc-test-%s", randomID), "no-such-pattern", @@ -102,6 +103,7 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures default settings when conversation resolution is true", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -112,8 +114,8 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.node_id - pattern = "main" + repository_id = github_repository.test.node_id + pattern = "main" require_conversation_resolution = true } @@ -140,7 +142,7 @@ func TestAccGithubBranchProtection(t *testing.T) { "github_branch_protection.test", "required_pull_request_reviews.#", "0", ), resource.TestCheckResourceAttr( - "github_branch_protection.test", "push_restrictions.#", "0", + "github_branch_protection.test", "restrict_pushes.#", "0", ), resource.TestCheckResourceAttr( "github_branch_protection.test", "lock_branch", "false", @@ -168,7 +170,7 @@ func TestAccGithubBranchProtection(t *testing.T) { ResourceName: "github_branch_protection.test", ImportState: true, ExpectError: regexp.MustCompile( - `Could not find a branch protection rule with the pattern 'no-such-pattern'\.`, + `could not find a branch protection rule with the pattern 'no-such-pattern'`, ), ImportStateIdFunc: importBranchProtectionByRepoName( fmt.Sprintf("tf-acc-test-%s", randomID), "no-such-pattern", @@ -193,6 +195,7 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures required status checks", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -203,10 +206,10 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.node_id - pattern = "main" + repository_id = github_repository.test.node_id + pattern = "main" - required_status_checks { + required_status_checks { strict = true contexts = ["github/foo"] } @@ -241,7 +244,7 @@ func TestAccGithubBranchProtection(t *testing.T) { ResourceName: "github_branch_protection.test", ImportState: true, ExpectError: regexp.MustCompile( - `Could not find a branch protection rule with the pattern 'no-such-pattern'\.`, + `could not find a branch protection rule with the pattern 'no-such-pattern'`, ), ImportStateIdFunc: importBranchProtectionByRepoID( "github_repository.test", "no-such-pattern"), @@ -265,6 +268,7 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures required pull request reviews", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -275,14 +279,14 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.node_id - pattern = "main" + repository_id = github_repository.test.node_id + pattern = "main" - required_pull_request_reviews { - dismiss_stale_reviews = true - require_code_owner_reviews = true - require_last_push_approval = true - } + required_pull_request_reviews { + dismiss_stale_reviews = true + require_code_owner_reviews = true + require_last_push_approval = true + } } @@ -334,6 +338,64 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures branch push restrictions", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + + config := fmt.Sprintf(` + resource "github_repository" "test" { + name = "tf-acc-test-%s" + auto_init = true + } + + resource "github_branch_protection" "test" { + + repository_id = github_repository.test.node_id + pattern = "main" + + restrict_pushes {} + } + `, randomID) + + check := resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.#", "1", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.blocks_creations", "true", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.push_allowances.#", "0", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an anonymous account", func(t *testing.T) { + t.Skip("anonymous account not supported for this operation") + }) + + t.Run("with an individual account", func(t *testing.T) { + t.Skip("individual account not supported for this operation") + }) + + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + + }) + + t.Run("configures branch push restrictions with node_id", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` resource "github_repository" "test" { @@ -347,19 +409,26 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.name - pattern = "main" - - push_restrictions = [ - data.github_user.test.node_id, - ] + repository_id = github_repository.test.node_id + pattern = "main" + restrict_pushes { + push_allowances = [ + data.github_user.test.node_id, + ] + } } `, randomID, testOwnerFunc()) check := resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( - "github_branch_protection.test", "push_restrictions.#", "1", + "github_branch_protection.test", "restrict_pushes.#", "1", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.blocks_creations", "true", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.push_allowances.#", "1", ), ) @@ -391,6 +460,7 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures branch push restrictions with username", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) user := fmt.Sprintf("/%s", testOwnerFunc()) config := fmt.Sprintf(` @@ -401,19 +471,26 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.name - pattern = "main" - - push_restrictions = [ - "%s", - ] + repository_id = github_repository.test.node_id + pattern = "main" + restrict_pushes { + push_allowances = [ + "%s", + ] + } } `, randomID, user) check := resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( - "github_branch_protection.test", "push_restrictions.#", "1", + "github_branch_protection.test", "restrict_pushes.#", "1", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.blocks_creations", "true", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.push_allowances.#", "1", ), ) @@ -444,7 +521,8 @@ func TestAccGithubBranchProtection(t *testing.T) { }) - t.Run("configures force pushes and deletions", func(t *testing.T) { + t.Run("configures branch push restrictions with blocksCreations false", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` resource "github_repository" "test" { @@ -452,26 +530,26 @@ func TestAccGithubBranchProtection(t *testing.T) { auto_init = true } - data "github_user" "test" { - username = "%s" - } - resource "github_branch_protection" "test" { - repository_id = github_repository.test.name - pattern = "main" - allows_deletions = true - allows_force_pushes = true + repository_id = github_repository.test.node_id + pattern = "main" + restrict_pushes { + blocks_creations = false + } } - `, randomID, testOwnerFunc()) + `, randomID) check := resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( - "github_branch_protection.test", "allows_deletions", "true", + "github_branch_protection.test", "restrict_pushes.#", "1", ), resource.TestCheckResourceAttr( - "github_branch_protection.test", "allows_force_pushes", "true", + "github_branch_protection.test", "restrict_pushes.0.blocks_creations", "false", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "restrict_pushes.0.push_allowances.#", "0", ), ) @@ -502,7 +580,8 @@ func TestAccGithubBranchProtection(t *testing.T) { }) - t.Run("configures blocksCreations", func(t *testing.T) { + t.Run("configures force pushes and deletions", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` resource "github_repository" "test" { @@ -516,16 +595,21 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.name - pattern = "main" - blocks_creations = true + repository_id = github_repository.test.node_id + pattern = "main" + + allows_deletions = true + allows_force_pushes = true } `, randomID, testOwnerFunc()) check := resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( - "github_branch_protection.test", "blocks_creations", "true", + "github_branch_protection.test", "allows_deletions", "true", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "allows_force_pushes", "true", ), ) @@ -557,6 +641,7 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures non-empty list of force push bypassers", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -571,12 +656,12 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.node_id - pattern = "main" + repository_id = github_repository.test.node_id + pattern = "main" - force_push_bypassers = [ - data.github_user.test.node_id - ] + force_push_bypassers = [ + data.github_user.test.node_id + ] } @@ -615,7 +700,8 @@ func TestAccGithubBranchProtection(t *testing.T) { }) - t.Run("configures empty list of force push bypassers", func(t *testing.T) { + t.Run("configures allow force push with a team as bypasser", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -624,12 +710,72 @@ func TestAccGithubBranchProtection(t *testing.T) { auto_init = true } - resource "github_branch_protection" "test" { + resource "github_team" "test" { + name = "tf-acc-test-%s" + } + resource "github_team_repository" "test" { + team_id = github_team.test.id + repository = github_repository.test.name + permission = "admin" + } + + resource "github_branch_protection" "test" { repository_id = github_repository.test.node_id pattern = "main" - force_push_bypassers = [] + force_push_bypassers = [ + "%s/${github_team.test.slug}" + ] + } + + `, randomID, randomID, testOrganization) + + check := resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "github_branch_protection.test", "force_push_bypassers.#", "1", + ), + resource.TestCheckResourceAttr( + "github_branch_protection.test", "allows_force_pushes", "false", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + // This test only works with an organization account + t.Run("with an organization account", func(t *testing.T) { + testCase(t, organization) + }) + + }) + + t.Run("configures empty list of force push bypassers", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + + config := fmt.Sprintf(` + + resource "github_repository" "test" { + name = "tf-acc-test-%s" + auto_init = true + } + + resource "github_branch_protection" "test" { + + repository_id = github_repository.test.node_id + pattern = "main" + + force_push_bypassers = [] } @@ -669,7 +815,9 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures non-empty list of pull request bypassers", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + user := fmt.Sprintf("/%s", testOwnerFunc()) config := fmt.Sprintf(` resource "github_repository" "test" { @@ -679,26 +827,23 @@ func TestAccGithubBranchProtection(t *testing.T) { resource "github_branch_protection" "test" { - repository_id = github_repository.test.node_id - pattern = "main" + repository_id = github_repository.test.node_id + pattern = "main" - required_pull_request_reviews { - pull_request_bypassers = [ - "1234", - ] - } + required_pull_request_reviews { + pull_request_bypassers = [ + "%s", + ] + } } - `, randomID) + `, randomID, user) check := resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( "github_branch_protection.test", "required_pull_request_reviews.0.pull_request_bypassers.#", "1", ), - resource.TestCheckResourceAttr( - "github_branch_protection.test", "required_pull_request_reviews.0.pull_request_bypassers.0", "1234", - ), ) testCase := func(t *testing.T, mode string) { @@ -729,6 +874,7 @@ func TestAccGithubBranchProtection(t *testing.T) { }) t.Run("configures empty list of pull request bypassers", func(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) config := fmt.Sprintf(` @@ -742,9 +888,9 @@ func TestAccGithubBranchProtection(t *testing.T) { repository_id = github_repository.test.node_id pattern = "main" - required_pull_request_reviews { - pull_request_bypassers = [] - } + required_pull_request_reviews { + pull_request_bypassers = [] + } } @@ -807,3 +953,32 @@ func importBranchProtectionByRepoID(repoLogicalName, pattern string) resource.Im return fmt.Sprintf("%s:%s", repoID, pattern), nil } } + +func testGithubBranchProtectionStateDataV1() map[string]interface{} { + return map[string]interface{}{ + "blocks_creations": true, + "push_restrictions": [...]string{"/example-user"}, + } +} + +func testGithubBranchProtectionStateDataV2() map[string]interface{} { + restrictions := []interface{}{map[string]interface{}{ + "blocks_creations": true, + "push_allowances": [...]string{"/example-user"}, + }} + return map[string]interface{}{ + "restrict_pushes": restrictions, + } +} + +func TestAccGithubBranchProtectionV4StateUpgradeV1(t *testing.T) { + expected := testGithubBranchProtectionStateDataV2() + actual, err := resourceGithubBranchProtectionUpgradeV1(context.Background(), testGithubBranchProtectionStateDataV1(), nil) + if err != nil { + t.Fatalf("error migrating state: %s", err) + } + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("\n\nexpected:\n\n%#v\n\ngot:\n\n%#v\n\n", expected, actual) + } +} diff --git a/github/resource_github_branch_protection_v3.go b/github/resource_github_branch_protection_v3.go index 9943a62a77..af9801a2de 100644 --- a/github/resource_github_branch_protection_v3.go +++ b/github/resource_github_branch_protection_v3.go @@ -7,8 +7,8 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubBranchProtectionV3() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubBranchProtectionV3() *schema.Resource { Update: resourceGithubBranchProtectionV3Update, Delete: resourceGithubBranchProtectionV3Delete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -122,11 +122,17 @@ func resourceGithubBranchProtectionV3() *schema.Resource { Description: "Require an approved review in pull requests including files with a designated code owner.", }, "required_approving_review_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "Require 'x' number of approvals to satisfy branch protection requirements. If this is specified it must be a number between 0-6.", - ValidateFunc: validation.IntBetween(0, 6), + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "Require 'x' number of approvals to satisfy branch protection requirements. If this is specified it must be a number between 0-6.", + ValidateDiagFunc: toDiagFunc(validation.IntBetween(0, 6), "required_approving_review_count"), + }, + "require_last_push_approval": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Require that the most recent push must be approved by someone other than the last pusher.", }, "bypass_pull_request_allowances": { Type: schema.TypeList, @@ -290,12 +296,22 @@ func resourceGithubBranchProtectionV3Read(d *schema.ResourceData, meta interface return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("repository", repoName) - d.Set("branch", branch) - d.Set("enforce_admins", githubProtection.GetEnforceAdmins().Enabled) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("branch", branch); err != nil { + return err + } + if err = d.Set("enforce_admins", githubProtection.GetEnforceAdmins().Enabled); err != nil { + return err + } if rcr := githubProtection.GetRequiredConversationResolution(); rcr != nil { - d.Set("require_conversation_resolution", rcr.Enabled) + if err = d.Set("require_conversation_resolution", rcr.Enabled); err != nil { + return err + } } if err := flattenAndSetRequiredStatusChecks(d, githubProtection); err != nil { diff --git a/github/resource_github_branch_protection_v3_test.go b/github/resource_github_branch_protection_v3_test.go index 80d7ab1c16..a585221ce4 100644 --- a/github/resource_github_branch_protection_v3_test.go +++ b/github/resource_github_branch_protection_v3_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubBranchProtectionV3_defaults(t *testing.T) { @@ -320,6 +320,7 @@ func TestAccGithubBranchProtectionV3_required_pull_request_reviews(t *testing.T) dismiss_stale_reviews = true require_code_owner_reviews = true required_approving_review_count = 1 + require_last_push_approval = true dismissal_users = ["a"] dismissal_teams = ["b"] dismissal_apps = ["c"] @@ -347,6 +348,9 @@ func TestAccGithubBranchProtectionV3_required_pull_request_reviews(t *testing.T) resource.TestCheckResourceAttr( "github_branch_protection_v3.test", "required_pull_request_reviews.0.required_approving_review_count", "1", ), + resource.TestCheckResourceAttr( + "github_branch_protection_v3.test", "required_pull_request_reviews.0.require_last_push_approval", "true", + ), resource.TestCheckResourceAttr( "github_branch_protection_v3.test", "required_pull_request_reviews.0.dismissal_users.#", "1", ), diff --git a/github/resource_github_branch_protection_v3_utils.go b/github/resource_github_branch_protection_v3_utils.go index ba71972653..88ab01bc8c 100644 --- a/github/resource_github_branch_protection_v3_utils.go +++ b/github/resource_github_branch_protection_v3_utils.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func buildProtectionRequest(d *schema.ResourceData) (*github.ProtectionRequest, error) { @@ -195,6 +195,7 @@ func flattenAndSetRequiredPullRequestReviews(d *schema.ResourceData, protection "dismissal_teams": schema.NewSet(schema.HashString, teams), "dismissal_apps": schema.NewSet(schema.HashString, apps), "require_code_owner_reviews": rprr.RequireCodeOwnerReviews, + "require_last_push_approval": rprr.RequireLastPushApproval, "required_approving_review_count": rprr.RequiredApprovingReviewCount, "bypass_pull_request_allowances": bpra, }, @@ -351,6 +352,8 @@ func expandRequiredPullRequestReviews(d *schema.ResourceData) (*github.PullReque rprr.DismissStaleReviews = m["dismiss_stale_reviews"].(bool) rprr.RequireCodeOwnerReviews = m["require_code_owner_reviews"].(bool) rprr.RequiredApprovingReviewCount = m["required_approving_review_count"].(int) + requireLastPushApproval := m["require_last_push_approval"].(bool) + rprr.RequireLastPushApproval = &requireLastPushApproval rprr.BypassPullRequestAllowancesRequest = bpra } diff --git a/github/resource_github_branch_test.go b/github/resource_github_branch_test.go index af348e8e5a..b5844a6e6e 100644 --- a/github/resource_github_branch_test.go +++ b/github/resource_github_branch_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubBranch(t *testing.T) { diff --git a/github/resource_github_codespaces_organization_secret.go b/github/resource_github_codespaces_organization_secret.go index 18d91eeb0a..663c76635f 100644 --- a/github/resource_github_codespaces_organization_secret.go +++ b/github/resource_github_codespaces_organization_secret.go @@ -8,8 +8,8 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubCodespacesOrganizationSecret() *schema.Resource { @@ -20,27 +20,29 @@ func resourceGithubCodespacesOrganizationSecret() *schema.Resource { Delete: resourceGithubCodespacesOrganizationSecretDelete, Importer: &schema.ResourceImporter{ State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("secret_name", d.Id()) + if err := d.Set("secret_name", d.Id()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Sensitive: true, - ConflictsWith: []string{"plaintext_value"}, - Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", - ValidateFunc: validation.StringIsBase64, + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + ConflictsWith: []string{"plaintext_value"}, + Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", + ValidateDiagFunc: toDiagFunc(validation.StringIsBase64, "encrypted_value"), }, "plaintext_value": { Type: schema.TypeString, @@ -51,11 +53,11 @@ func resourceGithubCodespacesOrganizationSecret() *schema.Resource { ConflictsWith: []string{"encrypted_value"}, }, "visibility": { - Type: schema.TypeString, - Required: true, - Description: "Configures the access that repositories have to the organization secret. Must be one of 'all', 'private' or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", - ValidateFunc: validateValueFunc([]string{"all", "private", "selected"}), - ForceNew: true, + Type: schema.TypeString, + Required: true, + Description: "Configures the access that repositories have to the organization secret. Must be one of 'all', 'private' or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", + ValidateDiagFunc: validateValueFunc([]string{"all", "private", "selected"}), + ForceNew: true, }, "selected_repository_ids": { Type: schema.TypeSet, @@ -157,10 +159,18 @@ func resourceGithubCodespacesOrganizationSecretRead(d *schema.ResourceData, meta return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) - d.Set("visibility", secret.Visibility) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } + if err = d.Set("visibility", secret.Visibility); err != nil { + return err + } selectedRepositoryIDs := []int64{} @@ -185,7 +195,9 @@ func resourceGithubCodespacesOrganizationSecretRead(d *schema.ResourceData, meta } } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -206,7 +218,9 @@ func resourceGithubCodespacesOrganizationSecretRead(d *schema.ResourceData, meta log.Printf("[WARN] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil diff --git a/github/resource_github_codespaces_organization_secret_repositories.go b/github/resource_github_codespaces_organization_secret_repositories.go index 712924e2e7..8ddf3fc14a 100644 --- a/github/resource_github_codespaces_organization_secret_repositories.go +++ b/github/resource_github_codespaces_organization_secret_repositories.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubCodespacesOrganizationSecretRepositories() *schema.Resource { @@ -14,16 +14,16 @@ func resourceGithubCodespacesOrganizationSecretRepositories() *schema.Resource { Update: resourceGithubCodespaceOrganizationSecretRepositoriesCreateOrUpdate, Delete: resourceGithubCodespaceOrganizationSecretRepositoriesDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the existing secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the existing secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "selected_repository_ids": { Type: schema.TypeSet, diff --git a/github/resource_github_codespaces_organization_secret_repositories_test.go b/github/resource_github_codespaces_organization_secret_repositories_test.go index df27a7ef34..0c373f922e 100644 --- a/github/resource_github_codespaces_organization_secret_repositories_test.go +++ b/github/resource_github_codespaces_organization_secret_repositories_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesOrganizationSecretRepositories(t *testing.T) { diff --git a/github/resource_github_codespaces_organization_secret_test.go b/github/resource_github_codespaces_organization_secret_test.go index c3adbe80c4..597695dcc1 100644 --- a/github/resource_github_codespaces_organization_secret_test.go +++ b/github/resource_github_codespaces_organization_secret_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesOrganizationSecret(t *testing.T) { diff --git a/github/resource_github_codespaces_secret.go b/github/resource_github_codespaces_secret.go index 86984b3b5e..ede59eb544 100644 --- a/github/resource_github_codespaces_secret.go +++ b/github/resource_github_codespaces_secret.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubCodespacesSecret() *schema.Resource { @@ -29,11 +29,11 @@ func resourceGithubCodespacesSecret() *schema.Resource { Description: "Name of the repository.", }, "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { Type: schema.TypeString, @@ -129,9 +129,15 @@ func resourceGithubCodespacesSecretRead(d *schema.ResourceData, meta interface{} return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -152,7 +158,9 @@ func resourceGithubCodespacesSecretRead(d *schema.ResourceData, meta interface{} log.Printf("[WARN] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil @@ -196,13 +204,21 @@ func resourceGithubCodespacesSecretImport(d *schema.ResourceData, meta interface return nil, err } - d.Set("repository", repoName) - d.Set("secret_name", secretName) + if err = d.Set("repository", repoName); err != nil { + return nil, err + } + if err = d.Set("secret_name", secretName); err != nil { + return nil, err + } // encrypted_value or plaintext_value can not be imported - d.Set("created_at", secret.CreatedAt.String()) - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return nil, err + } + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil } diff --git a/github/resource_github_codespaces_secret_test.go b/github/resource_github_codespaces_secret_test.go index c4cef95376..3ed2708b26 100644 --- a/github/resource_github_codespaces_secret_test.go +++ b/github/resource_github_codespaces_secret_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesSecret(t *testing.T) { diff --git a/github/resource_github_codespaces_user_secret.go b/github/resource_github_codespaces_user_secret.go index cc1d9975f6..3aeb37e97b 100644 --- a/github/resource_github_codespaces_user_secret.go +++ b/github/resource_github_codespaces_user_secret.go @@ -7,8 +7,8 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubCodespacesUserSecret() *schema.Resource { @@ -19,27 +19,29 @@ func resourceGithubCodespacesUserSecret() *schema.Resource { Delete: resourceGithubCodespacesUserSecretDelete, Importer: &schema.ResourceImporter{ State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("secret_name", d.Id()) + if err := d.Set("secret_name", d.Id()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Sensitive: true, - ConflictsWith: []string{"plaintext_value"}, - Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", - ValidateFunc: validation.StringIsBase64, + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + ConflictsWith: []string{"plaintext_value"}, + Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", + ValidateDiagFunc: toDiagFunc(validation.StringIsBase64, "encrypted_value"), }, "plaintext_value": { Type: schema.TypeString, @@ -141,9 +143,15 @@ func resourceGithubCodespacesUserSecretRead(d *schema.ResourceData, meta interfa return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } selectedRepositoryIDs := []int64{} @@ -166,7 +174,9 @@ func resourceGithubCodespacesUserSecretRead(d *schema.ResourceData, meta interfa opt.Page = resp.NextPage } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -187,7 +197,9 @@ func resourceGithubCodespacesUserSecretRead(d *schema.ResourceData, meta interfa log.Printf("[WARN] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil diff --git a/github/resource_github_codespaces_user_secret_test.go b/github/resource_github_codespaces_user_secret_test.go index 4b6d6e1927..befae37e06 100644 --- a/github/resource_github_codespaces_user_secret_test.go +++ b/github/resource_github_codespaces_user_secret_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubCodespacesUserSecret(t *testing.T) { diff --git a/github/resource_github_dependabot_organization_secret.go b/github/resource_github_dependabot_organization_secret.go index 1016769e1d..b6ce5f17ae 100644 --- a/github/resource_github_dependabot_organization_secret.go +++ b/github/resource_github_dependabot_organization_secret.go @@ -8,8 +8,8 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubDependabotOrganizationSecret() *schema.Resource { @@ -20,27 +20,29 @@ func resourceGithubDependabotOrganizationSecret() *schema.Resource { Delete: resourceGithubDependabotOrganizationSecretDelete, Importer: &schema.ResourceImporter{ State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("secret_name", d.Id()) + if err := d.Set("secret_name", d.Id()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Sensitive: true, - ConflictsWith: []string{"plaintext_value"}, - Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", - ValidateFunc: validation.StringIsBase64, + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + ConflictsWith: []string{"plaintext_value"}, + Description: "Encrypted value of the secret using the GitHub public key in Base64 format.", + ValidateDiagFunc: toDiagFunc(validation.StringIsBase64, "encrypted_value"), }, "plaintext_value": { Type: schema.TypeString, @@ -51,11 +53,11 @@ func resourceGithubDependabotOrganizationSecret() *schema.Resource { ConflictsWith: []string{"encrypted_value"}, }, "visibility": { - Type: schema.TypeString, - Required: true, - Description: "Configures the access that repositories have to the organization secret. Must be one of 'all', 'private' or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", - ValidateFunc: validateValueFunc([]string{"all", "private", "selected"}), - ForceNew: true, + Type: schema.TypeString, + Required: true, + Description: "Configures the access that repositories have to the organization secret. Must be one of 'all', 'private' or 'selected'. 'selected_repository_ids' is required if set to 'selected'.", + ValidateDiagFunc: validateValueFunc([]string{"all", "private", "selected"}), + ForceNew: true, }, "selected_repository_ids": { Type: schema.TypeSet, @@ -157,10 +159,18 @@ func resourceGithubDependabotOrganizationSecretRead(d *schema.ResourceData, meta return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) - d.Set("visibility", secret.Visibility) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } + if err = d.Set("visibility", secret.Visibility); err != nil { + return err + } selectedRepositoryIDs := []int64{} @@ -185,7 +195,9 @@ func resourceGithubDependabotOrganizationSecretRead(d *schema.ResourceData, meta } } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -206,7 +218,9 @@ func resourceGithubDependabotOrganizationSecretRead(d *schema.ResourceData, meta log.Printf("[WARN] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil diff --git a/github/resource_github_dependabot_organization_secret_repositories.go b/github/resource_github_dependabot_organization_secret_repositories.go index 85ce645015..9103951f7f 100644 --- a/github/resource_github_dependabot_organization_secret_repositories.go +++ b/github/resource_github_dependabot_organization_secret_repositories.go @@ -4,7 +4,7 @@ import ( "context" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubDependabotOrganizationSecretRepositories() *schema.Resource { @@ -14,16 +14,16 @@ func resourceGithubDependabotOrganizationSecretRepositories() *schema.Resource { Update: resourceGithubDependabotOrganizationSecretRepositoriesCreateOrUpdate, Delete: resourceGithubDependabotOrganizationSecretRepositoriesDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the existing secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the existing secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "selected_repository_ids": { Type: schema.TypeSet, @@ -97,7 +97,9 @@ func resourceGithubDependabotOrganizationSecretRepositoriesRead(d *schema.Resour opt.Page = resp.NextPage } - d.Set("selected_repository_ids", selectedRepositoryIDs) + if err = d.Set("selected_repository_ids", selectedRepositoryIDs); err != nil { + return err + } return nil } diff --git a/github/resource_github_dependabot_organization_secret_repositories_test.go b/github/resource_github_dependabot_organization_secret_repositories_test.go index de715829f7..80c2ab09a3 100644 --- a/github/resource_github_dependabot_organization_secret_repositories_test.go +++ b/github/resource_github_dependabot_organization_secret_repositories_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotOrganizationSecretRepositories(t *testing.T) { diff --git a/github/resource_github_dependabot_organization_secret_test.go b/github/resource_github_dependabot_organization_secret_test.go index cb9ffb2d11..50a3a5a81b 100644 --- a/github/resource_github_dependabot_organization_secret_test.go +++ b/github/resource_github_dependabot_organization_secret_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotOrganizationSecret(t *testing.T) { diff --git a/github/resource_github_dependabot_secret.go b/github/resource_github_dependabot_secret.go index fce805f521..a506483b75 100644 --- a/github/resource_github_dependabot_secret.go +++ b/github/resource_github_dependabot_secret.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "golang.org/x/crypto/nacl/box" ) @@ -30,11 +30,11 @@ func resourceGithubDependabotSecret() *schema.Resource { Description: "Name of the repository.", }, "secret_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the secret.", - ValidateFunc: validateSecretNameFunc, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the secret.", + ValidateDiagFunc: validateSecretNameFunc, }, "encrypted_value": { Type: schema.TypeString, @@ -130,9 +130,15 @@ func resourceGithubDependabotSecretRead(d *schema.ResourceData, meta interface{} return err } - d.Set("encrypted_value", d.Get("encrypted_value")) - d.Set("plaintext_value", d.Get("plaintext_value")) - d.Set("created_at", secret.CreatedAt.String()) + if err = d.Set("encrypted_value", d.Get("encrypted_value")); err != nil { + return err + } + if err = d.Set("plaintext_value", d.Get("plaintext_value")); err != nil { + return err + } + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return err + } // This is a drift detection mechanism based on timestamps. // @@ -153,7 +159,9 @@ func resourceGithubDependabotSecretRead(d *schema.ResourceData, meta interface{} log.Printf("[WARN] The secret %s has been externally updated in GitHub", d.Id()) d.SetId("") } else if !ok { - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return err + } } return nil @@ -197,13 +205,21 @@ func resourceGithubDependabotSecretImport(d *schema.ResourceData, meta interface return nil, err } - d.Set("repository", repoName) - d.Set("secret_name", secretName) + if err = d.Set("repository", repoName); err != nil { + return nil, err + } + if err = d.Set("secret_name", secretName); err != nil { + return nil, err + } // encrypted_value or plaintext_value can not be imported - d.Set("created_at", secret.CreatedAt.String()) - d.Set("updated_at", secret.UpdatedAt.String()) + if err = d.Set("created_at", secret.CreatedAt.String()); err != nil { + return nil, err + } + if err = d.Set("updated_at", secret.UpdatedAt.String()); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil } diff --git a/github/resource_github_dependabot_secret_test.go b/github/resource_github_dependabot_secret_test.go index 4cd636a777..fa6488ebaa 100644 --- a/github/resource_github_dependabot_secret_test.go +++ b/github/resource_github_dependabot_secret_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubDependabotSecret(t *testing.T) { diff --git a/github/resource_github_emu_group_mapping.go b/github/resource_github_emu_group_mapping.go index 52d582a3dc..8589d93cb0 100644 --- a/github/resource_github_emu_group_mapping.go +++ b/github/resource_github_emu_group_mapping.go @@ -6,7 +6,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubEMUGroupMapping() *schema.Resource { @@ -95,8 +95,12 @@ func resourceGithubEMUGroupMappingRead(d *schema.ResourceData, meta interface{}) return nil } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("group", group) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("group_id", int(*group.GroupID)); err != nil { + return err + } return nil } diff --git a/github/resource_github_enteprise_actions_runner_group_test.go b/github/resource_github_enteprise_actions_runner_group_test.go new file mode 100644 index 0000000000..1ffb73e090 --- /dev/null +++ b/github/resource_github_enteprise_actions_runner_group_test.go @@ -0,0 +1,228 @@ +package github + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccGithubActionsEnterpriseRunnerGroup(t *testing.T) { + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + + if isEnterprise != "true" { + t.Skip("Skipping because `ENTERPRISE_ACCOUNT` is not set or set to false") + } + + if testEnterprise == "" { + t.Skip("Skipping because `ENTERPRISE_SLUG` is not set") + } + + t.Run("creates enterprise runner groups without error", func(t *testing.T) { + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + + resource "github_enterprise_actions_runner_group" "test" { + enterprise_slug = data.github_enterprise.enterprise.slug + name = "tf-acc-test-%s" + visibility = "all" + allows_public_repositories = true + } + `, testEnterprise, randomID) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "github_enterprise_actions_runner_group.test", "name", + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "name", + fmt.Sprintf(`tf-acc-test-%s`, randomID), + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "visibility", + "all", + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "allows_public_repositories", + "true", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + testCase(t, enterprise) + }) + }) + + t.Run("manages runner group visibility to selected orgs", func(t *testing.T) { + + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + + data "github_organization" "org" { + name = "%s" + } + + resource "github_enterprise_actions_runner_group" "test" { + enterprise_slug = data.github_enterprise.enterprise.slug + name = "tf-acc-test-%s" + visibility = "selected" + selected_organization_ids = [data.github_organization.org.id] + } + `, testEnterprise, testOrganization, randomID) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "github_enterprise_actions_runner_group.test", "name", + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "name", + fmt.Sprintf(`tf-acc-test-%s`, randomID), + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "visibility", + "selected", + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "selected_organization_ids.#", + "1", + ), + resource.TestCheckResourceAttrSet( + "github_enterprise_actions_runner_group.test", "selected_organizations_url", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + testCase(t, enterprise) + }) + }) + + t.Run("imports an all runner group without error", func(t *testing.T) { + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + + resource "github_enterprise_actions_runner_group" "test" { + enterprise_slug = data.github_enterprise.enterprise.slug + name = "tf-acc-test-%s" + visibility = "all" + } + `, testEnterprise, randomID) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("github_enterprise_actions_runner_group.test", "name"), + resource.TestCheckResourceAttrSet("github_enterprise_actions_runner_group.test", "visibility"), + resource.TestCheckResourceAttr("github_enterprise_actions_runner_group.test", "visibility", "all"), + resource.TestCheckResourceAttr("github_enterprise_actions_runner_group.test", "name", fmt.Sprintf(`tf-acc-test-%s`, randomID)), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + { + ResourceName: "github_enterprise_actions_runner_group.test", + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: fmt.Sprintf(`%s/`, testEnterprise), + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + testCase(t, enterprise) + }) + }) + + t.Run("imports a runner group with selected orgs without error", func(t *testing.T) { + + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + + data "github_organization" "org" { + name = "%s" + } + + resource "github_enterprise_actions_runner_group" "test" { + enterprise_slug = data.github_enterprise.enterprise.slug + name = "tf-acc-test-%s" + visibility = "selected" + selected_organization_ids = [data.github_organization.org.id] + } + `, testEnterprise, testOrganization, randomID) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("github_enterprise_actions_runner_group.test", "name"), + resource.TestCheckResourceAttr("github_enterprise_actions_runner_group.test", "name", fmt.Sprintf(`tf-acc-test-%s`, randomID)), + resource.TestCheckResourceAttrSet("github_enterprise_actions_runner_group.test", "visibility"), + resource.TestCheckResourceAttr("github_enterprise_actions_runner_group.test", "visibility", "selected"), + resource.TestCheckResourceAttr( + "github_enterprise_actions_runner_group.test", "selected_organization_ids.#", + "1", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + { + ResourceName: "github_enterprise_actions_runner_group.test", + ImportState: true, + ImportStateVerify: true, + ImportStateIdPrefix: fmt.Sprintf(`%s/`, testEnterprise), + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + testCase(t, enterprise) + }) + }) +} diff --git a/github/resource_github_enterprise_actions_permissions.go b/github/resource_github_enterprise_actions_permissions.go new file mode 100644 index 0000000000..0b26926cf3 --- /dev/null +++ b/github/resource_github_enterprise_actions_permissions.go @@ -0,0 +1,288 @@ +package github + +import ( + "context" + "errors" + + "github.com/google/go-github/v57/github" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceGithubActionsEnterprisePermissions() *schema.Resource { + return &schema.Resource{ + Create: resourceGithubActionsEnterprisePermissionsCreateOrUpdate, + Read: resourceGithubActionsEnterprisePermissionsRead, + Update: resourceGithubActionsEnterprisePermissionsCreateOrUpdate, + Delete: resourceGithubActionsEnterprisePermissionsDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "enterprise_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the enterprise.", + }, + "allowed_actions": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions policy that controls the actions that are allowed to run. Can be one of: 'all', 'local_only', or 'selected'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "local_only", "selected"}, false), "allowed_actions"), + }, + "enabled_organizations": { + Type: schema.TypeString, + Required: true, + Description: "The policy that controls the organizations in the enterprise that are allowed to run GitHub Actions. Can be one of: 'all', 'none', or 'selected'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "none", "selected"}, false), "enabled_organizations"), + }, + "allowed_actions_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Sets the actions that are allowed in an enterprise. Only available when 'allowed_actions' = 'selected'", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "github_owned_allowed": { + Type: schema.TypeBool, + Required: true, + Description: "Whether GitHub-owned actions are allowed in the enterprise.", + }, + "patterns_allowed": { + Type: schema.TypeSet, + Optional: true, + Description: "Specifies a list of string-matching patterns to allow specific action(s). Wildcards, tags, and SHAs are allowed. For example, 'monalisa/octocat@', 'monalisa/octocat@v2', 'monalisa/'.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "verified_allowed": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether actions in GitHub Marketplace from verified creators are allowed. Set to 'true' to allow all GitHub Marketplace actions by verified creators.", + }, + }, + }, + }, + "enabled_organizations_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Sets the list of selected organizations that are enabled for GitHub Actions in an enterprise. Only available when 'enabled_organizations' = 'selected'.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "organization_ids": { + Type: schema.TypeSet, + Description: "List of organization IDs to enable for GitHub Actions.", + Elem: &schema.Schema{Type: schema.TypeInt}, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceGithubActionsEnterpriseAllowedObject(d *schema.ResourceData) (*github.ActionsAllowed, error) { + allowed := &github.ActionsAllowed{} + + config := d.Get("allowed_actions_config").([]interface{}) + if len(config) > 0 { + data := config[0].(map[string]interface{}) + switch x := data["github_owned_allowed"].(type) { + case bool: + allowed.GithubOwnedAllowed = &x + } + + switch x := data["verified_allowed"].(type) { + case bool: + allowed.VerifiedAllowed = &x + } + + patternsAllowed := []string{} + + switch t := data["patterns_allowed"].(type) { + case *schema.Set: + for _, value := range t.List() { + patternsAllowed = append(patternsAllowed, value.(string)) + } + } + + allowed.PatternsAllowed = patternsAllowed + } else { + return &github.ActionsAllowed{}, + errors.New("the allowed_actions_config {} block must be specified if allowed_actions == 'selected'") + } + + return allowed, nil +} + +func resourceGithubActionsEnabledOrganizationsObject(d *schema.ResourceData) ([]int64, error) { + var enabled []int64 + + config := d.Get("enabled_organizations_config").([]interface{}) + if len(config) > 0 { + data := config[0].(map[string]interface{}) + switch x := data["organization_ids"].(type) { + case *schema.Set: + for _, value := range x.List() { + enabled = append(enabled, int64(value.(int))) + } + } + } else { + return nil, errors.New("the enabled_organizations_config {} block must be specified if enabled_organizations == 'selected'") + } + return enabled, nil +} + +func resourceGithubActionsEnterprisePermissionsCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + + ctx := context.Background() + if !d.IsNewResource() { + ctx = context.WithValue(ctx, ctxId, d.Id()) + } + + enterpriseId := d.Get("enterprise_id").(string) + allowedActions := d.Get("allowed_actions").(string) + enabledOrganizations := d.Get("enabled_organizations").(string) + + _, _, err := client.Actions.EditActionsPermissionsInEnterprise(ctx, + enterpriseId, + github.ActionsPermissionsEnterprise{ + AllowedActions: &allowedActions, + EnabledOrganizations: &enabledOrganizations, + }) + if err != nil { + return err + } + + if allowedActions == "selected" { + actionsAllowedData, err := resourceGithubActionsEnterpriseAllowedObject(d) + if err != nil { + return err + } + _, _, err = client.Actions.EditActionsAllowedInEnterprise(ctx, + enterpriseId, + *actionsAllowedData) + if err != nil { + return err + } + } + + if enabledOrganizations == "selected" { + enabledOrgsData, err := resourceGithubActionsEnabledOrganizationsObject(d) + if err != nil { + return err + } + _, err = client.Actions.SetEnabledOrgsInEnterprise(ctx, + enterpriseId, + enabledOrgsData) + if err != nil { + return err + } + } + + d.SetId(enterpriseId) + return resourceGithubActionsEnterprisePermissionsRead(d, meta) +} + +func resourceGithubActionsEnterprisePermissionsRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + ctx := context.Background() + + actionsPermissions, _, err := client.Actions.GetActionsPermissionsInEnterprise(ctx, d.Id()) + if err != nil { + return err + } + + if actionsPermissions.GetAllowedActions() == "selected" { + actionsAllowed, _, err := client.Actions.GetActionsAllowedInEnterprise(ctx, d.Id()) + if err != nil { + return err + } + + // If actionsAllowed set to local/all by removing all actions config settings, the response will be empty + if actionsAllowed != nil { + if err = d.Set("allowed_actions_config", []interface{}{ + map[string]interface{}{ + "github_owned_allowed": actionsAllowed.GetGithubOwnedAllowed(), + "patterns_allowed": actionsAllowed.PatternsAllowed, + "verified_allowed": actionsAllowed.GetVerifiedAllowed(), + }, + }); err != nil { + return err + } + } + } else { + if err = d.Set("allowed_actions_config", []interface{}{}); err != nil { + return err + } + } + + if actionsPermissions.GetEnabledOrganizations() == "selected" { + opts := github.ListOptions{PerPage: 10, Page: 1} + var orgList []int64 + var allOrgs []*github.Organization + + for { + enabledOrgs, resp, err := client.Actions.ListEnabledOrgsInEnterprise(ctx, d.Id(), &opts) + if err != nil { + return err + } + allOrgs = append(allOrgs, enabledOrgs.Organizations...) + + opts.Page = resp.NextPage + + if resp.NextPage == 0 { + break + } + } + for index := range allOrgs { + orgList = append(orgList, *allOrgs[index].ID) + } + if allOrgs != nil { + if err = d.Set("enabled_organizations_config", []interface{}{ + map[string]interface{}{ + "organization_ids": orgList, + }, + }); err != nil { + return err + } + } else { + if err = d.Set("enabled_organizations_config", []interface{}{}); err != nil { + return err + } + } + } + + if err = d.Set("allowed_actions", actionsPermissions.GetAllowedActions()); err != nil { + return err + } + if err = d.Set("enabled_organizations", actionsPermissions.GetEnabledOrganizations()); err != nil { + return err + } + + return nil +} + +func resourceGithubActionsEnterprisePermissionsDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + + ctx := context.WithValue(context.Background(), ctxId, d.Id()) + + // This will nullify any allowedActions elements + _, _, err := client.Actions.EditActionsPermissionsInEnterprise(ctx, + d.Get("enterprise_id").(string), + github.ActionsPermissionsEnterprise{ + AllowedActions: github.String("all"), + EnabledOrganizations: github.String("all"), + }) + if err != nil { + return err + } + + return nil +} diff --git a/github/resource_github_enterprise_actions_permissions_test.go b/github/resource_github_enterprise_actions_permissions_test.go new file mode 100644 index 0000000000..bdb41c5366 --- /dev/null +++ b/github/resource_github_enterprise_actions_permissions_test.go @@ -0,0 +1,302 @@ +package github + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccGithubActionsEnterprisePermissions(t *testing.T) { + + t.Run("test setting of basic actions enterprise permissions", func(t *testing.T) { + + allowedActions := "local_only" + enabledOrganizations := "all" + + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + + resource "github_enterprise_actions_permissions" "test" { + enterprise_id = data.github_enterprise.enterprise.id + allowed_actions = "%s" + enabled_organizations = "%s" + } + `, testEnterprise, allowedActions, enabledOrganizations) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "allowed_actions", allowedActions, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "enabled_organizations", enabledOrganizations, + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + if isEnterprise != "true" { + t.Skip("Skipping because `ENTERPRISE_ACCOUNT` is not set or set to false") + } + if testEnterprise == "" { + t.Skip("Skipping because `ENTERPRISE_SLUG` is not set") + } + testCase(t, enterprise) + }) + }) + + t.Run("imports entire set of github action enterprise permissions without error", func(t *testing.T) { + + allowedActions := "selected" + enabledOrganizations := "selected" + githubOwnedAllowed := true + verifiedAllowed := true + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + orgName := fmt.Sprintf("tf-acc-test-displayname%s", randomID) + + displayName := fmt.Sprintf("Tf Acc Test displayname %s", randomID) + + desc := "Initial org description" + + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + + data "github_user" "current" { + username = "" + } + + resource "github_enterprise_organization" "org" { + enterprise_id = data.github_enterprise.enterprise.id + name = "%s" + display_name = "%s" + description = "%s" + billing_email = data.github_user.current.email + admin_logins = [ + data.github_user.current.login + ] + } + + resource "github_enterprise_actions_permissions" "test" { + enterprise_id = data.github_enterprise.enterprise.id + allowed_actions = "%s" + enabled_organizations = "%s" + allowed_actions_config { + github_owned_allowed = %t + patterns_allowed = ["actions/cache@*", "actions/checkout@*"] + verified_allowed = %t + } + enabled_organizations_config { + organization_ids = [github_enterprise_organization.org.id] + } + } + `, testEnterprise, orgName, displayName, desc, allowedActions, enabledOrganizations, githubOwnedAllowed, verifiedAllowed) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "allowed_actions", allowedActions, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "enabled_organizations", enabledOrganizations, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "allowed_actions_config.#", "1", + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "enabled_organizations_config.#", "1", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + { + ResourceName: "github_enterprise_actions_permissions.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + if isEnterprise != "true" { + t.Skip("Skipping because `ENTERPRISE_ACCOUNT` is not set or set to false") + } + if testEnterprise == "" { + t.Skip("Skipping because `ENTERPRISE_SLUG` is not set") + } + testCase(t, enterprise) + }) + }) + + t.Run("test setting of enterprise allowed actions", func(t *testing.T) { + + allowedActions := "selected" + enabledOrganizations := "all" + githubOwnedAllowed := true + verifiedAllowed := true + + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + resource "github_enterprise_actions_permissions" "test" { + enterprise_id = data.github_enterprise.enterprise.id + allowed_actions = "%s" + enabled_organizations = "%s" + allowed_actions_config { + github_owned_allowed = %t + patterns_allowed = ["actions/cache@*", "actions/checkout@*"] + verified_allowed = %t + } + } + `, testEnterprise, allowedActions, enabledOrganizations, githubOwnedAllowed, verifiedAllowed) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "allowed_actions", allowedActions, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "enabled_organizations", enabledOrganizations, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "allowed_actions_config.#", "1", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + if isEnterprise != "true" { + t.Skip("Skipping because `ENTERPRISE_ACCOUNT` is not set or set to false") + } + if testEnterprise == "" { + t.Skip("Skipping because `ENTERPRISE_SLUG` is not set") + } + testCase(t, enterprise) + }) + }) + + t.Run("test setting of enterprise enabled organizations", func(t *testing.T) { + + allowedActions := "all" + enabledOrganizations := "selected" + randomID := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + randomID2 := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + orgName := fmt.Sprintf("tf-acc-test-displayname%s", randomID) + orgName2 := fmt.Sprintf("tf-acc-test-displayname%s", randomID2) + + displayName := fmt.Sprintf("Tf Acc Test displayname %s", randomID) + displayName2 := fmt.Sprintf("Tf Acc Test displayname %s", randomID2) + + desc := fmt.Sprintf("Initial org description %s", randomID) + desc2 := fmt.Sprintf("Initial org description %s", randomID2) + + config := fmt.Sprintf(` + data "github_enterprise" "enterprise" { + slug = "%s" + } + data "github_user" "current" { + username = "" + } + resource "github_enterprise_organization" "org" { + enterprise_id = data.github_enterprise.enterprise.id + name = "%s" + display_name = "%s" + description = "%s" + billing_email = data.github_user.current.email + admin_logins = [ + data.github_user.current.login + ] + } + resource "github_enterprise_organization" "org2" { + enterprise_id = data.github_enterprise.enterprise.id + name = "%s" + display_name = "%s" + description = "%s" + billing_email = data.github_user.current.email + admin_logins = [ + data.github_user.current.login + ] + } + resource "github_enterprise_actions_permissions" "test" { + enterprise_id = data.github_enterprise.enterprise.id + allowed_actions = "%s" + enabled_organizations = "%s" + enabled_organizations_config { + organization_ids = [github_enterprise_organization.org.id, github_enterprise_organization.org2.id] + } + } + `, testEnterprise, orgName, displayName, desc, orgName2, displayName2, desc2, allowedActions, enabledOrganizations) + + check := resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "allowed_actions", allowedActions, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "enabled_organizations", enabledOrganizations, + ), + resource.TestCheckResourceAttr( + "github_enterprise_actions_permissions.test", "enabled_organizations_config.#", "1", + ), + ) + + testCase := func(t *testing.T, mode string) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { skipUnlessMode(t, mode) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: check, + }, + }, + }) + } + + t.Run("with an enterprise account", func(t *testing.T) { + if isEnterprise != "true" { + t.Skip("Skipping because `ENTERPRISE_ACCOUNT` is not set or set to false") + } + if testEnterprise == "" { + t.Skip("Skipping because `ENTERPRISE_SLUG` is not set") + } + testCase(t, enterprise) + }) + }) + +} diff --git a/github/resource_github_enterprise_actions_runner_group.go b/github/resource_github_enterprise_actions_runner_group.go new file mode 100644 index 0000000000..c8e60de8ec --- /dev/null +++ b/github/resource_github_enterprise_actions_runner_group.go @@ -0,0 +1,360 @@ +package github + +import ( + "context" + "fmt" + "log" + "net/http" + "strconv" + "strings" + + "github.com/google/go-github/v57/github" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceGithubActionsEnterpriseRunnerGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceGithubActionsEnterpriseRunnerGroupCreate, + Read: resourceGithubActionsEnterpriseRunnerGroupRead, + Update: resourceGithubActionsEnterpriseRunnerGroupUpdate, + Delete: resourceGithubActionsEnterpriseRunnerGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceGithubActionsEnterpriseRunnerGroupImport, + }, + + Schema: map[string]*schema.Schema{ + "enterprise_slug": { + Type: schema.TypeString, + Required: true, + Description: "The slug of the enterprise.", + }, + "allows_public_repositories": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether public repositories can be added to the runner group.", + }, + "default": { + Type: schema.TypeBool, + Computed: true, + Description: "Whether this is the default runner group.", + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "An etag representing the runner group object", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the runner group.", + }, + "runners_url": { + Type: schema.TypeString, + Computed: true, + Description: "The GitHub API URL for the runner group's runners.", + }, + "visibility": { + Type: schema.TypeString, + Required: true, + Description: "The visibility of the runner group.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"all", "selected"}, false), "visibility"), + }, + "restricted_to_workflows": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If 'true', the runner group will be restricted to running only the workflows specified in the 'selected_workflows' array. Defaults to 'false'.", + }, + "selected_workflows": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Description: "List of workflows the runner group should be allowed to run. This setting will be ignored unless restricted_to_workflows is set to 'true'.", + }, + "selected_organization_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Set: schema.HashInt, + Optional: true, + Description: "List of organization IDs that can access the runner group.", + }, + "selected_organizations_url": { + Type: schema.TypeString, + Computed: true, + Description: "GitHub API URL for the runner group's organizations.", + }, + }, + } +} + +func resourceGithubActionsEnterpriseRunnerGroupCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + + name := d.Get("name").(string) + enterpriseSlug := d.Get("enterprise_slug").(string) + restrictedToWorkflows := d.Get("restricted_to_workflows").(bool) + visibility := d.Get("visibility").(string) + selectedOrganizations, hasSelectedOrganizations := d.GetOk("selected_organization_ids") + allowsPublicRepositories := d.Get("allows_public_repositories").(bool) + + selectedWorkflows := []string{} + if workflows, ok := d.GetOk("selected_workflows"); ok { + for _, workflow := range workflows.([]interface{}) { + selectedWorkflows = append(selectedWorkflows, workflow.(string)) + } + } + + if visibility != "selected" && hasSelectedOrganizations { + return fmt.Errorf("cannot use selected_organization_ids without visibility being set to selected") + } + + selectedOrganizationIDs := []int64{} + + if hasSelectedOrganizations { + ids := selectedOrganizations.(*schema.Set).List() + + for _, id := range ids { + selectedOrganizationIDs = append(selectedOrganizationIDs, int64(id.(int))) + } + } + + ctx := context.Background() + + enterpriseRunnerGroup, resp, err := client.Enterprise.CreateEnterpriseRunnerGroup(ctx, + enterpriseSlug, + github.CreateEnterpriseRunnerGroupRequest{ + Name: &name, + Visibility: &visibility, + SelectedOrganizationIDs: selectedOrganizationIDs, + AllowsPublicRepositories: &allowsPublicRepositories, + RestrictedToWorkflows: &restrictedToWorkflows, + SelectedWorkflows: selectedWorkflows, + }, + ) + if err != nil { + return err + } + d.SetId(strconv.FormatInt(enterpriseRunnerGroup.GetID(), 10)) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("allows_public_repositories", enterpriseRunnerGroup.GetAllowsPublicRepositories()); err != nil { + return err + } + if err = d.Set("default", enterpriseRunnerGroup.GetDefault()); err != nil { + return err + } + if err = d.Set("name", enterpriseRunnerGroup.GetName()); err != nil { + return err + } + if err = d.Set("runners_url", enterpriseRunnerGroup.GetRunnersURL()); err != nil { + return err + } + if err = d.Set("selected_organizations_url", enterpriseRunnerGroup.GetSelectedOrganizationsURL()); err != nil { + return err + } + if err = d.Set("visibility", enterpriseRunnerGroup.GetVisibility()); err != nil { + return err + } + if err = d.Set("selected_organization_ids", selectedOrganizationIDs); err != nil { // Note: enterpriseRunnerGroup has no method to get selected organization IDs + return err + } + if err = d.Set("restricted_to_workflows", enterpriseRunnerGroup.GetRestrictedToWorkflows()); err != nil { + return err + } + if err = d.Set("selected_workflows", enterpriseRunnerGroup.SelectedWorkflows); err != nil { + return err + } + if err = d.Set("enterprise_slug", enterpriseSlug); err != nil { + return err + } + + return resourceGithubActionsEnterpriseRunnerGroupRead(d, meta) +} + +func getEnterpriseRunnerGroup(client *github.Client, ctx context.Context, ent string, groupID int64) (*github.EnterpriseRunnerGroup, *github.Response, error) { + enterpriseRunnerGroup, resp, err := client.Enterprise.GetEnterpriseRunnerGroup(ctx, ent, groupID) + if err != nil { + if ghErr, ok := err.(*github.ErrorResponse); ok && ghErr.Response.StatusCode == http.StatusNotModified { + // ignore error StatusNotModified + return enterpriseRunnerGroup, resp, nil + } + } + return enterpriseRunnerGroup, resp, err +} + +func resourceGithubActionsEnterpriseRunnerGroupRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + + enterpriseSlug := d.Get("enterprise_slug").(string) + runnerGroupID, err := strconv.ParseInt(d.Id(), 10, 64) + if err != nil { + return err + } + ctx := context.WithValue(context.Background(), ctxId, d.Id()) + if !d.IsNewResource() { + ctx = context.WithValue(ctx, ctxEtag, d.Get("etag").(string)) + } + + enterpriseRunnerGroup, resp, err := getEnterpriseRunnerGroup(client, ctx, enterpriseSlug, runnerGroupID) + if err != nil { + if ghErr, ok := err.(*github.ErrorResponse); ok { + if ghErr.Response.StatusCode == http.StatusNotFound { + log.Printf("[INFO] Removing enterprise runner group %s/%s from state because it no longer exists in GitHub", + enterpriseSlug, d.Id()) + d.SetId("") + return nil + } + } + return err + } + + //if runner group is nil (typically not modified) we can return early + if enterpriseRunnerGroup == nil { + return nil + } + + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("allows_public_repositories", enterpriseRunnerGroup.GetAllowsPublicRepositories()); err != nil { + return err + } + if err = d.Set("default", enterpriseRunnerGroup.GetDefault()); err != nil { + return err + } + if err = d.Set("name", enterpriseRunnerGroup.GetName()); err != nil { + return err + } + if err = d.Set("runners_url", enterpriseRunnerGroup.GetRunnersURL()); err != nil { + return err + } + if err = d.Set("selected_organizations_url", enterpriseRunnerGroup.GetSelectedOrganizationsURL()); err != nil { + return err + } + if err = d.Set("visibility", enterpriseRunnerGroup.GetVisibility()); err != nil { + return err + } + if err = d.Set("restricted_to_workflows", enterpriseRunnerGroup.GetRestrictedToWorkflows()); err != nil { + return err + } + if err = d.Set("selected_workflows", enterpriseRunnerGroup.SelectedWorkflows); err != nil { + return err + } + if err = d.Set("enterprise_slug", enterpriseSlug); err != nil { + return err + } + + selectedOrganizationIDs := []int64{} + optionsOrgs := github.ListOptions{ + PerPage: maxPerPage, + } + + for { + enterpriseRunnerGroupOrganizations, resp, err := client.Enterprise.ListOrganizationAccessRunnerGroup(ctx, enterpriseSlug, runnerGroupID, &optionsOrgs) + if err != nil { + return err + } + + for _, org := range enterpriseRunnerGroupOrganizations.Organizations { + selectedOrganizationIDs = append(selectedOrganizationIDs, *org.ID) + } + + if resp.NextPage == 0 { + break + } + + optionsOrgs.Page = resp.NextPage + } + + if err = d.Set("selected_organization_ids", selectedOrganizationIDs); err != nil { + return err + } + + return nil +} + +func resourceGithubActionsEnterpriseRunnerGroupUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + + name := d.Get("name").(string) + enterpriseSlug := d.Get("enterprise_slug").(string) + visibility := d.Get("visibility").(string) + restrictedToWorkflows := d.Get("restricted_to_workflows").(bool) + selectedWorkflows := []string{} + allowsPublicRepositories := d.Get("allows_public_repositories").(bool) + if workflows, ok := d.GetOk("selected_workflows"); ok { + for _, workflow := range workflows.([]interface{}) { + selectedWorkflows = append(selectedWorkflows, workflow.(string)) + } + } + + options := github.UpdateEnterpriseRunnerGroupRequest{ + Name: &name, + Visibility: &visibility, + RestrictedToWorkflows: &restrictedToWorkflows, + SelectedWorkflows: selectedWorkflows, + AllowsPublicRepositories: &allowsPublicRepositories, + } + + runnerGroupID, err := strconv.ParseInt(d.Id(), 10, 64) + if err != nil { + return err + } + ctx := context.WithValue(context.Background(), ctxId, d.Id()) + + if _, _, err := client.Enterprise.UpdateEnterpriseRunnerGroup(ctx, enterpriseSlug, runnerGroupID, options); err != nil { + return err + } + + selectedOrganizations, hasSelectedOrganizations := d.GetOk("selected_organization_ids") + selectedOrganizationIDs := []int64{} + + if hasSelectedOrganizations { + ids := selectedOrganizations.(*schema.Set).List() + + for _, id := range ids { + selectedOrganizationIDs = append(selectedOrganizationIDs, int64(id.(int))) + } + } + + orgOptions := github.SetOrgAccessRunnerGroupRequest{SelectedOrganizationIDs: selectedOrganizationIDs} + + if _, err := client.Enterprise.SetOrganizationAccessRunnerGroup(ctx, enterpriseSlug, runnerGroupID, orgOptions); err != nil { + return err + } + + return resourceGithubActionsEnterpriseRunnerGroupRead(d, meta) +} + +func resourceGithubActionsEnterpriseRunnerGroupDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Owner).v3client + enterpriseSlug := d.Get("enterprise_slug").(string) + enterpriseRunnerGroupID, err := strconv.ParseInt(d.Id(), 10, 64) + if err != nil { + return err + } + ctx := context.WithValue(context.Background(), ctxId, d.Id()) + + log.Printf("[INFO] Deleting enterprise runner group: %s/%s (%s)", enterpriseSlug, d.Get("name"), d.Id()) + _, err = client.Enterprise.DeleteEnterpriseRunnerGroup(ctx, enterpriseSlug, enterpriseRunnerGroupID) + return err +} + +func resourceGithubActionsEnterpriseRunnerGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid import specified: supplied import must be written as /") + } + + enterpriseId, runnerGroupID := parts[0], parts[1] + + d.SetId(runnerGroupID) + d.Set("enterprise_slug", enterpriseId) + + return []*schema.ResourceData{d}, nil +} diff --git a/github/resource_github_enterprise_organization.go b/github/resource_github_enterprise_organization.go index da4ba18718..809a16cf39 100644 --- a/github/resource_github_enterprise_organization.go +++ b/github/resource_github_enterprise_organization.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -27,6 +27,16 @@ func resourceGithubEnterpriseOrganization() *schema.Resource { ForceNew: true, Description: "The ID of the enterprise.", }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The node ID of the organization.", + }, + "database_id": { + Type: schema.TypeInt, + Computed: true, + Description: "The database ID of the organization.", + }, "name": { Type: schema.TypeString, Required: true, @@ -128,6 +138,7 @@ func resourceGithubEnterpriseOrganizationRead(data *schema.ResourceData, meta in Node struct { Organization struct { ID githubv4.ID + DatabaseId githubv4.Int Name githubv4.String Login githubv4.String Description githubv4.String @@ -199,6 +210,11 @@ func resourceGithubEnterpriseOrganizationRead(data *schema.ResourceData, meta in return err } + err = data.Set("database_id", query.Node.Organization.DatabaseId) + if err != nil { + return err + } + err = data.Set("description", query.Node.Organization.Description) return err } diff --git a/github/resource_github_enterprise_organization_test.go b/github/resource_github_enterprise_organization_test.go index f766c0a0f9..03f78666f8 100644 --- a/github/resource_github_enterprise_organization_test.go +++ b/github/resource_github_enterprise_organization_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubEnterpriseOrganization(t *testing.T) { diff --git a/github/resource_github_issue.go b/github/resource_github_issue.go index a7f73b3aa6..0b77918f7f 100644 --- a/github/resource_github_issue.go +++ b/github/resource_github_issue.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubIssue() *schema.Resource { @@ -17,7 +17,7 @@ func resourceGithubIssue() *schema.Resource { Update: resourceGithubIssueCreateOrUpdate, Delete: resourceGithubIssueDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "repository": { @@ -124,7 +124,9 @@ func resourceGithubIssueCreateOrUpdate(d *schema.ResourceData, meta interface{}) } d.SetId(buildTwoPartID(repoName, strconv.Itoa(issue.GetNumber()))) - d.Set("issue_id", issue.GetID()) + if err = d.Set("issue_id", issue.GetID()); err != nil { + return err + } return resourceGithubIssueRead(d, meta) } @@ -164,26 +166,44 @@ func resourceGithubIssueRead(d *schema.ResourceData, meta interface{}) error { return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("repository", repoName) - d.Set("number", number) - d.Set("title", issue.GetTitle()) - d.Set("body", issue.GetBody()) - d.Set("milestone_number", issue.GetMilestone().GetNumber()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("number", number); err != nil { + return err + } + if err = d.Set("title", issue.GetTitle()); err != nil { + return err + } + if err = d.Set("body", issue.GetBody()); err != nil { + return err + } + if err = d.Set("milestone_number", issue.GetMilestone().GetNumber()); err != nil { + return err + } var labels []string for _, v := range issue.Labels { labels = append(labels, v.GetName()) } - d.Set("labels", flattenStringList(labels)) + if err = d.Set("labels", flattenStringList(labels)); err != nil { + return err + } var assignees []string for _, v := range issue.Assignees { assignees = append(assignees, v.GetLogin()) } - d.Set("assignees", flattenStringList(assignees)) + if err = d.Set("assignees", flattenStringList(assignees)); err != nil { + return err + } - d.Set("issue_id", issue.GetID()) + if err = d.Set("issue_id", issue.GetID()); err != nil { + return err + } return nil } diff --git a/github/resource_github_issue_label.go b/github/resource_github_issue_label.go index b3738c9157..a86b795b87 100644 --- a/github/resource_github_issue_label.go +++ b/github/resource_github_issue_label.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubIssueLabel() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubIssueLabel() *schema.Resource { Update: resourceGithubIssueLabelCreateOrUpdate, Delete: resourceGithubIssueLabelDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -167,12 +167,24 @@ func resourceGithubIssueLabelRead(d *schema.ResourceData, meta interface{}) erro return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("repository", repoName) - d.Set("name", name) - d.Set("color", githubLabel.GetColor()) - d.Set("description", githubLabel.GetDescription()) - d.Set("url", githubLabel.GetURL()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("name", name); err != nil { + return err + } + if err = d.Set("color", githubLabel.GetColor()); err != nil { + return err + } + if err = d.Set("description", githubLabel.GetDescription()); err != nil { + return err + } + if err = d.Set("url", githubLabel.GetURL()); err != nil { + return err + } return nil } diff --git a/github/resource_github_issue_label_test.go b/github/resource_github_issue_label_test.go index eebe06b673..468ae33575 100644 --- a/github/resource_github_issue_label_test.go +++ b/github/resource_github_issue_label_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubIssueLabel(t *testing.T) { diff --git a/github/resource_github_issue_labels.go b/github/resource_github_issue_labels.go index 116517a70b..0b3a9f9496 100644 --- a/github/resource_github_issue_labels.go +++ b/github/resource_github_issue_labels.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubIssueLabels() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubIssueLabels() *schema.Resource { Update: resourceGithubIssueLabelsCreateOrUpdate, Delete: resourceGithubIssueLabelsDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ diff --git a/github/resource_github_issue_labels_test.go b/github/resource_github_issue_labels_test.go index b113e118cf..df925eae69 100644 --- a/github/resource_github_issue_labels_test.go +++ b/github/resource_github_issue_labels_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubIssueLabels(t *testing.T) { diff --git a/github/resource_github_issue_test.go b/github/resource_github_issue_test.go index 00a48e9007..a4309f9022 100644 --- a/github/resource_github_issue_test.go +++ b/github/resource_github_issue_test.go @@ -2,11 +2,11 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubIssue(t *testing.T) { diff --git a/github/resource_github_membership.go b/github/resource_github_membership.go index 537778ea00..acb5d70dfa 100644 --- a/github/resource_github_membership.go +++ b/github/resource_github_membership.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubMembership() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubMembership() *schema.Resource { Update: resourceGithubMembershipCreateOrUpdate, Delete: resourceGithubMembershipDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -28,11 +28,11 @@ func resourceGithubMembership() *schema.Resource { Description: "The user to add to the organization.", }, "role": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateValueFunc([]string{"member", "admin"}), - Default: "member", - Description: "The role of the user within the organization. Must be one of 'member' or 'admin'.", + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateValueFunc([]string{"member", "admin"}), + Default: "member", + Description: "The role of the user within the organization. Must be one of 'member' or 'admin'.", }, "etag": { Type: schema.TypeString, @@ -115,9 +115,15 @@ func resourceGithubMembershipRead(d *schema.ResourceData, meta interface{}) erro return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("username", username) - d.Set("role", membership.GetRole()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("username", username); err != nil { + return err + } + if err = d.Set("role", membership.GetRole()); err != nil { + return err + } return nil } diff --git a/github/resource_github_membership_test.go b/github/resource_github_membership_test.go index a0dbe4d36b..acd8a0df9a 100644 --- a/github/resource_github_membership_test.go +++ b/github/resource_github_membership_test.go @@ -7,8 +7,8 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubMembership_basic(t *testing.T) { diff --git a/github/resource_github_organization_custom_role.go b/github/resource_github_organization_custom_role.go index 24eacd8c09..5e79a2b554 100644 --- a/github/resource_github_organization_custom_role.go +++ b/github/resource_github_organization_custom_role.go @@ -6,7 +6,7 @@ import ( "log" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubOrganizationCustomRole() *schema.Resource { @@ -16,7 +16,7 @@ func resourceGithubOrganizationCustomRole() *schema.Resource { Update: resourceGithubOrganizationCustomRoleUpdate, Delete: resourceGithubOrganizationCustomRoleDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -26,10 +26,10 @@ func resourceGithubOrganizationCustomRole() *schema.Resource { Description: "The organization custom repository role to create.", }, "base_role": { - Type: schema.TypeString, - Required: true, - Description: "The base role for the custom repository role.", - ValidateFunc: validateValueFunc([]string{"read", "triage", "write", "maintain"}), + Type: schema.TypeString, + Required: true, + Description: "The base role for the custom repository role.", + ValidateDiagFunc: validateValueFunc([]string{"read", "triage", "write", "maintain"}), }, "permissions": { Type: schema.TypeSet, @@ -112,10 +112,18 @@ func resourceGithubOrganizationCustomRoleRead(d *schema.ResourceData, meta inter return nil } - d.Set("name", role.Name) - d.Set("description", role.Description) - d.Set("base_role", role.BaseRole) - d.Set("permissions", role.Permissions) + if err = d.Set("name", role.Name); err != nil { + return err + } + if err = d.Set("description", role.Description); err != nil { + return err + } + if err = d.Set("base_role", role.BaseRole); err != nil { + return err + } + if err = d.Set("permissions", role.Permissions); err != nil { + return err + } return nil } diff --git a/github/resource_github_organization_custom_role_test.go b/github/resource_github_organization_custom_role_test.go index 9756c302c8..33875fe992 100644 --- a/github/resource_github_organization_custom_role_test.go +++ b/github/resource_github_organization_custom_role_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationCustomRole(t *testing.T) { diff --git a/github/resource_github_organization_project.go b/github/resource_github_organization_project.go index f584edf36f..c70ccfa854 100644 --- a/github/resource_github_organization_project.go +++ b/github/resource_github_organization_project.go @@ -8,7 +8,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubOrganizationProject() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubOrganizationProject() *schema.Resource { Update: resourceGithubOrganizationProjectUpdate, Delete: resourceGithubOrganizationProjectDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -106,11 +106,19 @@ func resourceGithubOrganizationProjectRead(d *schema.ResourceData, meta interfac return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("name", project.GetName()) - d.Set("body", project.GetBody()) - d.Set("url", fmt.Sprintf("https://github.com/orgs/%s/projects/%d", - orgName, project.GetNumber())) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("name", project.GetName()); err != nil { + return err + } + if err = d.Set("body", project.GetBody()); err != nil { + return err + } + if err = d.Set("url", fmt.Sprintf("https://github.com/orgs/%s/projects/%d", + orgName, project.GetNumber())); err != nil { + return err + } return nil } diff --git a/github/resource_github_organization_project_test.go b/github/resource_github_organization_project_test.go index 99b15c2f94..cf5864626a 100644 --- a/github/resource_github_organization_project_test.go +++ b/github/resource_github_organization_project_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubOrganizationProject_basic(t *testing.T) { diff --git a/github/resource_github_organization_ruleset.go b/github/resource_github_organization_ruleset.go index a8bfe5def1..97edda737d 100644 --- a/github/resource_github_organization_ruleset.go +++ b/github/resource_github_organization_ruleset.go @@ -8,8 +8,8 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubOrganizationRuleset() *schema.Resource { diff --git a/github/resource_github_organization_ruleset_test.go b/github/resource_github_organization_ruleset_test.go index ed83473ffe..0c6f744540 100644 --- a/github/resource_github_organization_ruleset_test.go +++ b/github/resource_github_organization_ruleset_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestGithubOrganizationRulesets(t *testing.T) { diff --git a/github/resource_github_organization_security_manager.go b/github/resource_github_organization_security_manager.go index 4a4551c83d..e133a59d76 100644 --- a/github/resource_github_organization_security_manager.go +++ b/github/resource_github_organization_security_manager.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubOrganizationSecurityManager() *schema.Resource { @@ -17,7 +17,7 @@ func resourceGithubOrganizationSecurityManager() *schema.Resource { Update: resourceGithubOrganizationSecurityManagerUpdate, Delete: resourceGithubOrganizationSecurityManagerDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -100,7 +100,9 @@ func resourceGithubOrganizationSecurityManagerRead(d *schema.ResourceData, meta return nil } - d.Set("team_slug", team.GetSlug()) + if err = d.Set("team_slug", team.GetSlug()); err != nil { + return err + } return nil } diff --git a/github/resource_github_organization_security_manager_test.go b/github/resource_github_organization_security_manager_test.go index 70415c4a93..070ad9ae4d 100644 --- a/github/resource_github_organization_security_manager_test.go +++ b/github/resource_github_organization_security_manager_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationSecurityManagers(t *testing.T) { diff --git a/github/resource_github_organization_settings.go b/github/resource_github_organization_settings.go index b4c46fa239..9ff458a6b3 100644 --- a/github/resource_github_organization_settings.go +++ b/github/resource_github_organization_settings.go @@ -6,8 +6,8 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubOrganizationSettings() *schema.Resource { @@ -17,7 +17,7 @@ func resourceGithubOrganizationSettings() *schema.Resource { Update: resourceGithubOrganizationSettingsCreateOrUpdate, Delete: resourceGithubOrganizationSettingsDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "billing_email": { @@ -68,11 +68,11 @@ func resourceGithubOrganizationSettings() *schema.Resource { Description: "Whether or not repository projects are enabled for the organization.", }, "default_repository_permission": { - Type: schema.TypeString, - Optional: true, - Default: "read", - Description: "The default permission for organization members to create new repositories. Can be one of 'read', 'write', 'admin' or 'none'.", - ValidateFunc: validation.StringInSlice([]string{"read", "write", "admin", "none"}, false), + Type: schema.TypeString, + Optional: true, + Default: "read", + Description: "The default permission for organization members to create new repositories. Can be one of 'read', 'write', 'admin' or 'none'.", + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"read", "write", "admin", "none"}, false), "default_repository_permission"), }, "members_can_create_repositories": { Type: schema.TypeBool, @@ -313,33 +313,84 @@ func resourceGithubOrganizationSettingsRead(d *schema.ResourceData, meta interfa return err } - d.Set("billing_email", orgSettings.GetBillingEmail()) - d.Set("company", orgSettings.GetCompany()) - d.Set("email", orgSettings.GetEmail()) - d.Set("twitter_username", orgSettings.GetTwitterUsername()) - d.Set("location", orgSettings.GetLocation()) - d.Set("name", orgSettings.GetName()) - d.Set("description", orgSettings.GetDescription()) - d.Set("has_organization_projects", orgSettings.GetHasOrganizationProjects()) - d.Set("has_repository_projects", orgSettings.GetHasRepositoryProjects()) - d.Set("default_repository_permission", orgSettings.GetDefaultRepoPermission()) - d.Set("members_can_create_repositories", orgSettings.GetMembersCanCreateRepos()) - d.Set("members_can_create_internal_repositories", orgSettings.GetMembersCanCreateInternalRepos()) - d.Set("members_can_create_private_repositories", orgSettings.GetMembersCanCreatePrivateRepos()) - d.Set("members_can_create_public_repositories", orgSettings.GetMembersCanCreatePublicRepos()) - d.Set("members_can_create_pages", orgSettings.GetMembersCanCreatePages()) - d.Set("members_can_create_public_pages", orgSettings.GetMembersCanCreatePublicPages()) - d.Set("members_can_create_private_pages", orgSettings.GetMembersCanCreatePrivatePages()) - d.Set("members_can_fork_private_repositories", orgSettings.GetMembersCanForkPrivateRepos()) - d.Set("web_commit_signoff_required", orgSettings.GetWebCommitSignoffRequired()) - d.Set("blog", orgSettings.GetBlog()) - d.Set("advanced_security_enabled_for_new_repositories", orgSettings.GetAdvancedSecurityEnabledForNewRepos()) - d.Set("dependabot_alerts_enabled_for_new_repositories", orgSettings.GetDependabotAlertsEnabledForNewRepos()) - d.Set("dependabot_security_updates_enabled_for_new_repositories", orgSettings.GetDependabotSecurityUpdatesEnabledForNewRepos()) - d.Set("dependency_graph_enabled_for_new_repositories", orgSettings.GetDependencyGraphEnabledForNewRepos()) - d.Set("secret_scanning_enabled_for_new_repositories", orgSettings.GetSecretScanningEnabledForNewRepos()) - d.Set("secret_scanning_push_protection_enabled_for_new_repositories", orgSettings.GetSecretScanningPushProtectionEnabledForNewRepos()) - + if err = d.Set("billing_email", orgSettings.GetBillingEmail()); err != nil { + return err + } + if err = d.Set("company", orgSettings.GetCompany()); err != nil { + return err + } + if err = d.Set("email", orgSettings.GetEmail()); err != nil { + return err + } + if err = d.Set("twitter_username", orgSettings.GetTwitterUsername()); err != nil { + return err + } + if err = d.Set("location", orgSettings.GetLocation()); err != nil { + return err + } + if err = d.Set("name", orgSettings.GetName()); err != nil { + return err + } + if err = d.Set("description", orgSettings.GetDescription()); err != nil { + return err + } + if err = d.Set("has_organization_projects", orgSettings.GetHasOrganizationProjects()); err != nil { + return err + } + if err = d.Set("has_repository_projects", orgSettings.GetHasRepositoryProjects()); err != nil { + return err + } + if err = d.Set("default_repository_permission", orgSettings.GetDefaultRepoPermission()); err != nil { + return err + } + if err = d.Set("members_can_create_repositories", orgSettings.GetMembersCanCreateRepos()); err != nil { + return err + } + if err = d.Set("members_can_create_internal_repositories", orgSettings.GetMembersCanCreateInternalRepos()); err != nil { + return err + } + if err = d.Set("members_can_create_private_repositories", orgSettings.GetMembersCanCreatePrivateRepos()); err != nil { + return err + } + if err = d.Set("members_can_create_public_repositories", orgSettings.GetMembersCanCreatePublicRepos()); err != nil { + return err + } + if err = d.Set("members_can_create_pages", orgSettings.GetMembersCanCreatePages()); err != nil { + return err + } + if err = d.Set("members_can_create_public_pages", orgSettings.GetMembersCanCreatePublicPages()); err != nil { + return err + } + if err = d.Set("members_can_create_private_pages", orgSettings.GetMembersCanCreatePrivatePages()); err != nil { + return err + } + if err = d.Set("members_can_fork_private_repositories", orgSettings.GetMembersCanForkPrivateRepos()); err != nil { + return err + } + if err = d.Set("web_commit_signoff_required", orgSettings.GetWebCommitSignoffRequired()); err != nil { + return err + } + if err = d.Set("blog", orgSettings.GetBlog()); err != nil { + return err + } + if err = d.Set("advanced_security_enabled_for_new_repositories", orgSettings.GetAdvancedSecurityEnabledForNewRepos()); err != nil { + return err + } + if err = d.Set("dependabot_alerts_enabled_for_new_repositories", orgSettings.GetDependabotAlertsEnabledForNewRepos()); err != nil { + return err + } + if err = d.Set("dependabot_security_updates_enabled_for_new_repositories", orgSettings.GetDependabotSecurityUpdatesEnabledForNewRepos()); err != nil { + return err + } + if err = d.Set("dependency_graph_enabled_for_new_repositories", orgSettings.GetDependencyGraphEnabledForNewRepos()); err != nil { + return err + } + if err = d.Set("secret_scanning_enabled_for_new_repositories", orgSettings.GetSecretScanningEnabledForNewRepos()); err != nil { + return err + } + if err = d.Set("secret_scanning_push_protection_enabled_for_new_repositories", orgSettings.GetSecretScanningPushProtectionEnabledForNewRepos()); err != nil { + return err + } return nil } diff --git a/github/resource_github_organization_settings_test.go b/github/resource_github_organization_settings_test.go index fc63b7b800..c0b6a8b20d 100644 --- a/github/resource_github_organization_settings_test.go +++ b/github/resource_github_organization_settings_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationSettings(t *testing.T) { diff --git a/github/resource_github_organization_webhook.go b/github/resource_github_organization_webhook.go index 2b2732a8e1..896b73ebbf 100644 --- a/github/resource_github_organization_webhook.go +++ b/github/resource_github_organization_webhook.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubOrganizationWebhook() *schema.Resource { @@ -18,18 +18,13 @@ func resourceGithubOrganizationWebhook() *schema.Resource { Update: resourceGithubOrganizationWebhookUpdate, Delete: resourceGithubOrganizationWebhookDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, SchemaVersion: 1, MigrateState: resourceGithubWebhookMigrateState, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Removed: "The `name` attribute is no longer necessary.", - }, "events": { Type: schema.TypeSet, Required: true, @@ -103,7 +98,12 @@ func resourceGithubOrganizationWebhookCreate(d *schema.ResourceData, meta interf if hook.Config["secret"] != nil { hook.Config["secret"] = webhookObj.Config["secret"] } - d.Set("configuration", []interface{}{hook.Config}) + + hook.Config = insecureSslStringToBool(hook.Config) + + if err = d.Set("configuration", []interface{}{hook.Config}); err != nil { + return err + } return resourceGithubOrganizationWebhookRead(d, meta) } @@ -142,10 +142,18 @@ func resourceGithubOrganizationWebhookRead(d *schema.ResourceData, meta interfac return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("url", hook.GetURL()) - d.Set("active", hook.GetActive()) - d.Set("events", hook.Events) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("url", hook.GetURL()); err != nil { + return err + } + if err = d.Set("active", hook.GetActive()); err != nil { + return err + } + if err = d.Set("events", hook.Events); err != nil { + return err + } // GitHub returns the secret as a string of 8 astrisks "********" // We would prefer to store the real secret in state, so we'll @@ -161,7 +169,9 @@ func resourceGithubOrganizationWebhookRead(d *schema.ResourceData, meta interfac hook.Config = insecureSslStringToBool(hook.Config) - d.Set("configuration", []interface{}{hook.Config}) + if err = d.Set("configuration", []interface{}{hook.Config}); err != nil { + return err + } return nil } diff --git a/github/resource_github_organization_webhook_test.go b/github/resource_github_organization_webhook_test.go index 08b09eb3d4..ae5fb3f70e 100644 --- a/github/resource_github_organization_webhook_test.go +++ b/github/resource_github_organization_webhook_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubOrganizationWebhook(t *testing.T) { diff --git a/github/resource_github_project_card.go b/github/resource_github_project_card.go index 389b046f1a..4deeaef2a5 100644 --- a/github/resource_github_project_card.go +++ b/github/resource_github_project_card.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubProjectCard() *schema.Resource { @@ -92,7 +92,9 @@ func resourceGithubProjectCardCreate(d *schema.ResourceData, meta interface{}) e return err } - d.Set("card_id", card.GetID()) + if err = d.Set("card_id", card.GetID()); err != nil { + return err + } d.SetId(card.GetNodeID()) return resourceGithubProjectCardRead(d, meta) @@ -127,9 +129,15 @@ func resourceGithubProjectCardRead(d *schema.ResourceData, meta interface{}) err return unconvertibleIdErr(columnIDStr, err) } - d.Set("note", card.GetNote()) - d.Set("column_id", columnIDStr) - d.Set("card_id", card.GetID()) + if err = d.Set("note", card.GetNote()); err != nil { + return err + } + if err = d.Set("column_id", columnIDStr); err != nil { + return err + } + if err = d.Set("card_id", card.GetID()); err != nil { + return err + } return nil } @@ -192,7 +200,9 @@ func resourceGithubProjectCardImport(d *schema.ResourceData, meta interface{}) ( } d.SetId(card.GetNodeID()) - d.Set("card_id", cardID) + if err = d.Set("card_id", cardID); err != nil { + return []*schema.ResourceData{d}, err + } return []*schema.ResourceData{d}, nil diff --git a/github/resource_github_project_card_test.go b/github/resource_github_project_card_test.go index 58b891e464..4172f851d9 100644 --- a/github/resource_github_project_card_test.go +++ b/github/resource_github_project_card_test.go @@ -2,11 +2,11 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubProjectCard(t *testing.T) { diff --git a/github/resource_github_project_column.go b/github/resource_github_project_column.go index 5665ed221f..d87f29c6b1 100644 --- a/github/resource_github_project_column.go +++ b/github/resource_github_project_column.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubProjectColumn() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubProjectColumn() *schema.Resource { Update: resourceGithubProjectColumnUpdate, Delete: resourceGithubProjectColumnDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -74,7 +74,9 @@ func resourceGithubProjectColumnCreate(d *schema.ResourceData, meta interface{}) } d.SetId(strconv.FormatInt(column.GetID(), 10)) - d.Set("column_id", column.GetID()) + if err = d.Set("column_id", column.GetID()); err != nil { + return err + } return resourceGithubProjectColumnRead(d, meta) } @@ -106,9 +108,15 @@ func resourceGithubProjectColumnRead(d *schema.ResourceData, meta interface{}) e projectURL := column.GetProjectURL() projectID := strings.TrimPrefix(projectURL, client.BaseURL.String()+`projects/`) - d.Set("name", column.GetName()) - d.Set("project_id", projectID) - d.Set("column_id", column.GetID()) + if err = d.Set("name", column.GetName()); err != nil { + return err + } + if err = d.Set("project_id", projectID); err != nil { + return err + } + if err = d.Set("column_id", column.GetID()); err != nil { + return err + } return nil } diff --git a/github/resource_github_project_column_test.go b/github/resource_github_project_column_test.go index d3b9102b7c..cd7b14ad12 100644 --- a/github/resource_github_project_column_test.go +++ b/github/resource_github_project_column_test.go @@ -7,8 +7,8 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubProjectColumn_basic(t *testing.T) { diff --git a/github/resource_github_release.go b/github/resource_github_release.go index 701520dbed..89ff5bb44f 100644 --- a/github/resource_github_release.go +++ b/github/resource_github_release.go @@ -4,10 +4,11 @@ import ( "context" "fmt" "log" + "net/http" "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRelease() *schema.Resource { @@ -162,6 +163,13 @@ func resourceGithubReleaseRead(d *schema.ResourceData, meta interface{}) error { release, _, err := client.Repositories.GetRelease(ctx, owner, repository, releaseID) if err != nil { + if ghErr, ok := err.(*github.ErrorResponse); ok { + if ghErr.Response.StatusCode == http.StatusNotFound { + log.Printf("[INFO] Removing release ID %d for repository %s from state, because it no longer exists on GitHub", releaseID, repository) + d.SetId("") + return nil + } + } return err } transformResponseToResourceData(d, release, repository) @@ -213,7 +221,9 @@ func resourceGithubReleaseImport(d *schema.ResourceData, meta interface{}) ([]*s if repository == nil || err != nil { return []*schema.ResourceData{d}, err } - d.Set("repository", *repository.Name) + if err = d.Set("repository", *repository.Name); err != nil { + return []*schema.ResourceData{d}, err + } release, _, err := client.Repositories.GetRelease(ctx, owner, *repository.Name, releaseID) if release == nil || err != nil { @@ -226,23 +236,23 @@ func resourceGithubReleaseImport(d *schema.ResourceData, meta interface{}) ([]*s func transformResponseToResourceData(d *schema.ResourceData, release *github.RepositoryRelease, repository string) { d.SetId(strconv.FormatInt(release.GetID(), 10)) - d.Set("release_id", release.GetID()) - d.Set("node_id", release.GetNodeID()) - d.Set("repository", repository) - d.Set("tag_name", release.GetTagName()) - d.Set("target_commitish", release.GetTargetCommitish()) - d.Set("name", release.GetName()) - d.Set("body", release.GetBody()) - d.Set("draft", release.GetDraft()) - d.Set("generate_release_notes", release.GetGenerateReleaseNotes()) - d.Set("prerelease", release.GetPrerelease()) - d.Set("discussion_category_name", release.GetDiscussionCategoryName()) - d.Set("created_at", release.GetCreatedAt()) - d.Set("published_at", release.GetPublishedAt()) - d.Set("url", release.GetURL()) - d.Set("html_url", release.GetHTMLURL()) - d.Set("assets_url", release.GetAssetsURL()) - d.Set("upload_url", release.GetUploadURL()) - d.Set("zipball_url", release.GetZipballURL()) - d.Set("tarball_url", release.GetTarballURL()) + _ = d.Set("release_id", release.GetID()) + _ = d.Set("node_id", release.GetNodeID()) + _ = d.Set("repository", repository) + _ = d.Set("tag_name", release.GetTagName()) + _ = d.Set("target_commitish", release.GetTargetCommitish()) + _ = d.Set("name", release.GetName()) + _ = d.Set("body", release.GetBody()) + _ = d.Set("draft", release.GetDraft()) + _ = d.Set("generate_release_notes", release.GetGenerateReleaseNotes()) + _ = d.Set("prerelease", release.GetPrerelease()) + _ = d.Set("discussion_category_name", release.GetDiscussionCategoryName()) + _ = d.Set("created_at", release.GetCreatedAt().String()) + _ = d.Set("published_at", release.GetPublishedAt().String()) + _ = d.Set("url", release.GetURL()) + _ = d.Set("html_url", release.GetHTMLURL()) + _ = d.Set("assets_url", release.GetAssetsURL()) + _ = d.Set("upload_url", release.GetUploadURL()) + _ = d.Set("zipball_url", release.GetZipballURL()) + _ = d.Set("tarball_url", release.GetTarballURL()) } diff --git a/github/resource_github_release_test.go b/github/resource_github_release_test.go index 57ba6437b1..d523a10c7b 100644 --- a/github/resource_github_release_test.go +++ b/github/resource_github_release_test.go @@ -2,12 +2,12 @@ package github import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "log" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubReleaseResource(t *testing.T) { diff --git a/github/resource_github_repository.go b/github/resource_github_repository.go index 8503830754..47c6e0f0aa 100644 --- a/github/resource_github_repository.go +++ b/github/resource_github_repository.go @@ -10,8 +10,8 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubRepository() *schema.Resource { @@ -22,7 +22,9 @@ func resourceGithubRepository() *schema.Resource { Delete: resourceGithubRepositoryDelete, Importer: &schema.ResourceImporter{ State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("auto_init", false) + if err := d.Set("auto_init", false); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, }, @@ -32,10 +34,10 @@ func resourceGithubRepository() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[-a-zA-Z0-9_.]{1,100}$`), "must include only alphanumeric characters, underscores or hyphens and consist of 100 characters or less"), - Description: "The name of the repository.", + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: toDiagFunc(validation.StringMatch(regexp.MustCompile(`^[-a-zA-Z0-9_.]{1,100}$`), "must include only alphanumeric characters, underscores or hyphens and consist of 100 characters or less"), "name"), + Description: "The name of the repository.", }, "description": { Type: schema.TypeString, @@ -55,11 +57,11 @@ func resourceGithubRepository() *schema.Resource { Deprecated: "use visibility instead", }, "visibility": { - Type: schema.TypeString, - Optional: true, - Computed: true, // is affected by "private" - ValidateFunc: validation.StringInSlice([]string{"public", "private", "internal"}, false), - Description: "Can be 'public' or 'private'. If your organization is associated with an enterprise account using GitHub Enterprise Cloud or GitHub Enterprise Server 2.20+, visibility can also be 'internal'.", + Type: schema.TypeString, + Optional: true, + Computed: true, // is affected by "private" + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"public", "private", "internal"}, false), "visibility"), + Description: "Can be 'public' or 'private'. If your organization is associated with an enterprise account using GitHub Enterprise Cloud or GitHub Enterprise Server 2.20+, visibility can also be 'internal'.", }, "security_and_analysis": { Type: schema.TypeList, @@ -77,10 +79,10 @@ func resourceGithubRepository() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"enabled", "disabled"}, false), - Description: "Set to 'enabled' to enable advanced security features on the repository. Can be 'enabled' or 'disabled'.", + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"enabled", "disabled"}, false), "status"), + Description: "Set to 'enabled' to enable advanced security features on the repository. Can be 'enabled' or 'disabled'.", }, }, }, @@ -93,10 +95,10 @@ func resourceGithubRepository() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"enabled", "disabled"}, false), - Description: "Set to 'enabled' to enable secret scanning on the repository. Can be 'enabled' or 'disabled'. If set to 'enabled', the repository's visibility must be 'public' or 'security_and_analysis[0].advanced_security[0].status' must also be set to 'enabled'.", + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"enabled", "disabled"}, false), "secret_scanning"), + Description: "Set to 'enabled' to enable secret scanning on the repository. Can be 'enabled' or 'disabled'. If set to 'enabled', the repository's visibility must be 'public' or 'security_and_analysis[0].advanced_security[0].status' must also be set to 'enabled'.", }, }, }, @@ -109,10 +111,10 @@ func resourceGithubRepository() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"enabled", "disabled"}, false), - Description: "Set to 'enabled' to enable secret scanning push protection on the repository. Can be 'enabled' or 'disabled'. If set to 'enabled', the repository's visibility must be 'public' or 'security_and_analysis[0].advanced_security[0].status' must also be set to 'enabled'.", + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{"enabled", "disabled"}, false), "secret_scanning_push_protection"), + Description: "Set to 'enabled' to enable secret scanning push protection on the repository. Can be 'enabled' or 'disabled'. If set to 'enabled', the repository's visibility must be 'public' or 'security_and_analysis[0].advanced_security[0].status' must also be set to 'enabled'.", }, }, }, @@ -272,11 +274,11 @@ func resourceGithubRepository() *schema.Resource { }, }, "build_type": { - Type: schema.TypeString, - Optional: true, - Default: "legacy", - Description: "The type the page should be sourced.", - ValidateFunc: validateValueFunc([]string{"legacy", "workflow"}), + Type: schema.TypeString, + Optional: true, + Default: "legacy", + Description: "The type the page should be sourced.", + ValidateDiagFunc: validateValueFunc([]string{"legacy", "workflow"}), }, "cname": { Type: schema.TypeString, @@ -311,8 +313,8 @@ func resourceGithubRepository() *schema.Resource { Computed: true, Description: "The list of topics of the repository.", Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[a-z0-9][a-z0-9-]{0,49}$`), "must include only lowercase alphanumeric characters or hyphens and cannot start with a hyphen and consist of 50 characters or less"), + Type: schema.TypeString, + ValidateDiagFunc: toDiagFunc(validation.StringMatch(regexp.MustCompile(`^[a-z0-9][a-z0-9-]{0,49}$`), "must include only lowercase alphanumeric characters or hyphens and cannot start with a hyphen and consist of 50 characters or less"), "topics"), }, }, "vulnerability_alerts": { @@ -704,14 +706,18 @@ func resourceGithubRepositoryRead(d *schema.ResourceData, meta interface{}) erro } if repo.TemplateRepository != nil { - d.Set("template", []interface{}{ + if err = d.Set("template", []interface{}{ map[string]interface{}{ "owner": repo.TemplateRepository.Owner.Login, "repository": repo.TemplateRepository.Name, }, - }) + }); err != nil { + return err + } } else { - d.Set("template", []interface{}{}) + if err = d.Set("template", []interface{}{}); err != nil { + return err + } } if !d.Get("ignore_vulnerability_alerts_during_read").(bool) { @@ -719,10 +725,14 @@ func resourceGithubRepositoryRead(d *schema.ResourceData, meta interface{}) erro if err != nil { return fmt.Errorf("error reading repository vulnerability alerts: %v", err) } - d.Set("vulnerability_alerts", vulnerabilityAlerts) + if err = d.Set("vulnerability_alerts", vulnerabilityAlerts); err != nil { + return err + } } - d.Set("security_and_analysis", flattenSecurityAndAnalysis(repo.GetSecurityAndAnalysis())) + if err = d.Set("security_and_analysis", flattenSecurityAndAnalysis(repo.GetSecurityAndAnalysis())); err != nil { + return err + } return nil } @@ -856,7 +866,9 @@ func resourceGithubRepositoryDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Repository already archived, nothing to do on delete: %s/%s", owner, repoName) return nil } else { - d.Set("archived", true) + if err := d.Set("archived", true); err != nil { + return err + } repoReq := resourceGithubRepositoryObject(d) log.Printf("[DEBUG] Archiving repository on delete: %s/%s", owner, repoName) _, _, err := client.Repositories.Edit(ctx, owner, repoName, repoReq) @@ -1037,9 +1049,11 @@ func resourceGithubParseFullName(resourceDataLike interface { return parts[0], parts[1], true } -func customDiffFunction(diff *schema.ResourceDiff, v interface{}) error { +func customDiffFunction(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if diff.HasChange("name") { - diff.SetNewComputed("full_name") + if err := diff.SetNewComputed("full_name"); err != nil { + return err + } } return nil } diff --git a/github/resource_github_repository_autolink_reference.go b/github/resource_github_repository_autolink_reference.go index baf0316564..785d0c0830 100644 --- a/github/resource_github_repository_autolink_reference.go +++ b/github/resource_github_repository_autolink_reference.go @@ -10,8 +10,8 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubRepositoryAutolinkReference() *schema.Resource { @@ -47,7 +47,9 @@ func resourceGithubRepositoryAutolinkReference() *schema.Resource { id = strconv.FormatInt(*autolink.ID, 10) } - d.Set("repository", repository) + if err = d.Set("repository", repository); err != nil { + return nil, err + } d.SetId(id) return []*schema.ResourceData{d}, nil }, @@ -68,11 +70,11 @@ func resourceGithubRepositoryAutolinkReference() *schema.Resource { Description: "This prefix appended by a number will generate a link any time it is found in an issue, pull request, or commit", }, "target_url_template": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The template of the target URL used for the links; must be a valid URL and contain `` for the reference number", - ValidateFunc: validation.StringMatch(regexp.MustCompile(`^http[s]?:\/\/[a-z0-9-.]*(:[0-9]+)?\/.*?.*?$`), "must be a valid URL and contain token"), + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The template of the target URL used for the links; must be a valid URL and contain `` for the reference number", + ValidateDiagFunc: toDiagFunc(validation.StringMatch(regexp.MustCompile(`^http[s]?:\/\/[a-z0-9-.]*(:[0-9]+)?\/.*?.*?$`), "must be a valid URL and contain token"), "target_url_template"), }, "is_alphanumeric": { Type: schema.TypeBool, @@ -143,10 +145,18 @@ func resourceGithubRepositoryAutolinkReferenceRead(d *schema.ResourceData, meta // Set resource fields d.SetId(strconv.FormatInt(autolinkRef.GetID(), 10)) - d.Set("repository", repoName) - d.Set("key_prefix", autolinkRef.KeyPrefix) - d.Set("target_url_template", autolinkRef.URLTemplate) - d.Set("is_alphanumeric", autolinkRef.IsAlphanumeric) + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("key_prefix", autolinkRef.KeyPrefix); err != nil { + return err + } + if err = d.Set("target_url_template", autolinkRef.URLTemplate); err != nil { + return err + } + if err = d.Set("is_alphanumeric", autolinkRef.IsAlphanumeric); err != nil { + return err + } return nil } diff --git a/github/resource_github_repository_autolink_reference_test.go b/github/resource_github_repository_autolink_reference_test.go index 66c4e09034..87c01a2cc4 100644 --- a/github/resource_github_repository_autolink_reference_test.go +++ b/github/resource_github_repository_autolink_reference_test.go @@ -2,10 +2,11 @@ package github import ( "fmt" + "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryAutolinkReference(t *testing.T) { @@ -288,6 +289,12 @@ func TestAccGithubRepositoryAutolinkReference(t *testing.T) { ImportStateVerify: true, ImportStateId: fmt.Sprintf("oof-%s/OOF-", randomID), }, + { + ResourceName: "github_repository_autolink_reference.autolink", + ImportState: true, + ImportStateId: fmt.Sprintf("oof-%s/OCTOCAT-", randomID), + ExpectError: regexp.MustCompile(`cannot find autolink reference`), + }, }, }) } diff --git a/github/resource_github_repository_automated_security_fixes.go b/github/resource_github_repository_automated_security_fixes.go index 1bf14b83d8..0647f935e0 100644 --- a/github/resource_github_repository_automated_security_fixes.go +++ b/github/resource_github_repository_automated_security_fixes.go @@ -3,7 +3,7 @@ package github import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryDependabotSecurityUpdates() *schema.Resource { diff --git a/github/resource_github_repository_automated_security_fixes_test.go b/github/resource_github_repository_automated_security_fixes_test.go index 1de0da37e1..2d7e5322fd 100644 --- a/github/resource_github_repository_automated_security_fixes_test.go +++ b/github/resource_github_repository_automated_security_fixes_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubAutomatedSecurityFixes(t *testing.T) { @@ -18,15 +18,15 @@ func TestAccGithubAutomatedSecurityFixes(t *testing.T) { enabled := "enabled = false" updatedEnabled := "enabled = true" config := fmt.Sprintf(` - + resource "github_repository" "test" { name = "tf-acc-test-%s" visibility = "private" auto_init = true vulnerability_alerts = true } - - + + resource "github_repository_dependabot_security_updates" "test" { repository = github_repository.test.id %s @@ -86,15 +86,15 @@ func TestAccGithubAutomatedSecurityFixes(t *testing.T) { updatedEnabled := "enabled = false" config := fmt.Sprintf(` - + resource "github_repository" "test" { name = "tf-acc-test-%s" visibility = "private" auto_init = true vulnerability_alerts = true } - - + + resource "github_repository_dependabot_security_updates" "test" { repository = github_repository.test.id %s diff --git a/github/resource_github_repository_collaborator.go b/github/resource_github_repository_collaborator.go index 171cbfe1bd..f01932ba34 100644 --- a/github/resource_github_repository_collaborator.go +++ b/github/resource_github_repository_collaborator.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryCollaborator() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubRepositoryCollaborator() *schema.Resource { Update: resourceGithubRepositoryCollaboratorUpdate, Delete: resourceGithubRepositoryCollaboratorDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, // editing repository collaborators are not supported by github api so forcing new on any changes @@ -123,10 +123,18 @@ func resourceGithubRepositoryCollaboratorRead(d *schema.ResourceData, meta inter permissionName := getPermission(invitation.GetPermissions()) - d.Set("repository", repoName) - d.Set("username", username) - d.Set("permission", permissionName) - d.Set("invitation_id", fmt.Sprintf("%d", invitation.GetID())) + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("username", username); err != nil { + return err + } + if err = d.Set("permission", permissionName); err != nil { + return err + } + if err = d.Set("invitation_id", fmt.Sprintf("%d", invitation.GetID())); err != nil { + return err + } return nil } @@ -144,9 +152,15 @@ func resourceGithubRepositoryCollaboratorRead(d *schema.ResourceData, meta inter for _, c := range collaborators { if strings.EqualFold(c.GetLogin(), username) { - d.Set("repository", repoName) - d.Set("username", c.GetLogin()) - d.Set("permission", getPermission(c.GetRoleName())) + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("username", c.GetLogin()); err != nil { + return err + } + if err = d.Set("permission", getPermission(c.GetRoleName())); err != nil { + return err + } return nil } } diff --git a/github/resource_github_repository_collaborator_test.go b/github/resource_github_repository_collaborator_test.go index 22bf713ff8..a55b5e1025 100644 --- a/github/resource_github_repository_collaborator_test.go +++ b/github/resource_github_repository_collaborator_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryCollaborator(t *testing.T) { diff --git a/github/resource_github_repository_collaborators.go b/github/resource_github_repository_collaborators.go index 9d366ebefc..ab6d486564 100644 --- a/github/resource_github_repository_collaborators.go +++ b/github/resource_github_repository_collaborators.go @@ -8,8 +8,8 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryCollaborators() *schema.Resource { @@ -19,7 +19,7 @@ func resourceGithubRepositoryCollaborators() *schema.Resource { Update: resourceGithubRepositoryCollaboratorsUpdate, Delete: resourceGithubRepositoryCollaboratorsDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -80,7 +80,7 @@ func resourceGithubRepositoryCollaborators() *schema.Resource { CustomizeDiff: customdiff.Sequence( // If there was a new user added to the list of collaborators, // it's possible a new invitation id will be created in GitHub. - customdiff.ComputedIf("invitation_ids", func(d *schema.ResourceDiff, meta interface{}) bool { + customdiff.ComputedIf("invitation_ids", func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) bool { return d.HasChange("user") }), ), diff --git a/github/resource_github_repository_collaborators_test.go b/github/resource_github_repository_collaborators_test.go index 41a4511215..3ccb39e5ff 100644 --- a/github/resource_github_repository_collaborators_test.go +++ b/github/resource_github_repository_collaborators_test.go @@ -8,9 +8,9 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubRepositoryCollaborators(t *testing.T) { diff --git a/github/resource_github_repository_deploy_key.go b/github/resource_github_repository_deploy_key.go index ad5ea94bc1..66d155f6f2 100644 --- a/github/resource_github_repository_deploy_key.go +++ b/github/resource_github_repository_deploy_key.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryDeployKey() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubRepositoryDeployKey() *schema.Resource { Read: resourceGithubRepositoryDeployKeyRead, Delete: resourceGithubRepositoryDeployKeyDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, // Deploy keys are defined immutable in the API. Updating results in force new. @@ -118,11 +118,21 @@ func resourceGithubRepositoryDeployKeyRead(d *schema.ResourceData, meta interfac return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("key", key.GetKey()) - d.Set("read_only", key.GetReadOnly()) - d.Set("repository", repoName) - d.Set("title", key.GetTitle()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("key", key.GetKey()); err != nil { + return err + } + if err = d.Set("read_only", key.GetReadOnly()); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("title", key.GetTitle()); err != nil { + return err + } return nil } diff --git a/github/resource_github_repository_deploy_key_test.go b/github/resource_github_repository_deploy_key_test.go index 7784f905fa..68a40b5199 100644 --- a/github/resource_github_repository_deploy_key_test.go +++ b/github/resource_github_repository_deploy_key_test.go @@ -10,9 +10,9 @@ import ( "strconv" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestSuppressDeployKeyDiff(t *testing.T) { diff --git a/github/resource_github_repository_deployment_branch_policy.go b/github/resource_github_repository_deployment_branch_policy.go index 1a00b62b28..c2c1e558d2 100644 --- a/github/resource_github_repository_deployment_branch_policy.go +++ b/github/resource_github_repository_deployment_branch_policy.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryDeploymentBranchPolicy() *schema.Resource { @@ -122,10 +122,18 @@ func resourceGithubRepositoryDeploymentBranchPolicyRead(d *schema.ResourceData, return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("repository", repoName) - d.Set("environment_name", environmentName) - d.Set("name", policy.Name) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("repository", repoName); err != nil { + return err + } + if err = d.Set("environment_name", environmentName); err != nil { + return err + } + if err = d.Set("name", policy.Name); err != nil { + return err + } return nil } @@ -157,8 +165,12 @@ func resourceGithubRepositoryDeploymentBranchPolicyImport(d *schema.ResourceData } d.SetId(id) - d.Set("repository", repoName) - d.Set("environment_name", environmentName) + if err = d.Set("repository", repoName); err != nil { + return nil, err + } + if err = d.Set("environment_name", environmentName); err != nil { + return nil, err + } err = resourceGithubRepositoryDeploymentBranchPolicyRead(d, meta) if err != nil { diff --git a/github/resource_github_repository_deployment_branch_policy_test.go b/github/resource_github_repository_deployment_branch_policy_test.go index 7080abe065..083529336f 100644 --- a/github/resource_github_repository_deployment_branch_policy_test.go +++ b/github/resource_github_repository_deployment_branch_policy_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryDeploymentBranchPolicy(t *testing.T) { diff --git a/github/resource_github_repository_environment.go b/github/resource_github_repository_environment.go index 171af8a74f..f8d658bcb2 100644 --- a/github/resource_github_repository_environment.go +++ b/github/resource_github_repository_environment.go @@ -7,8 +7,8 @@ import ( "net/url" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubRepositoryEnvironment() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubRepositoryEnvironment() *schema.Resource { Update: resourceGithubRepositoryEnvironmentUpdate, Delete: resourceGithubRepositoryEnvironmentDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "repository": { @@ -39,11 +39,17 @@ func resourceGithubRepositoryEnvironment() *schema.Resource { Default: true, Description: "Can Admins bypass deployment protections", }, + "prevent_self_review": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Prevent users from approving workflows runs that they triggered.", + }, "wait_timer": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 43200), - Description: "Amount of time to delay a job after the job is initially triggered.", + Type: schema.TypeInt, + Optional: true, + ValidateDiagFunc: toDiagFunc(validation.IntBetween(0, 43200), "wait_timer"), + Description: "Amount of time to delay a job after the job is initially triggered.", }, "reviewers": { Type: schema.TypeList, @@ -146,7 +152,9 @@ func resourceGithubRepositoryEnvironmentRead(d *schema.ResourceData, meta interf for _, pr := range env.ProtectionRules { switch *pr.Type { case "wait_timer": - d.Set("wait_timer", pr.WaitTimer) + if err = d.Set("wait_timer", pr.WaitTimer); err != nil { + return err + } case "required_reviewers": teams := make([]int64, 0) @@ -164,22 +172,30 @@ func resourceGithubRepositoryEnvironmentRead(d *schema.ResourceData, meta interf } } } - d.Set("reviewers", []interface{}{ + if err = d.Set("reviewers", []interface{}{ map[string]interface{}{ "teams": teams, "users": users, }, - }) + }); err != nil { + return err + } + + if err = d.Set("prevent_self_review", pr.PreventSelfReview); err != nil { + return err + } } } if env.DeploymentBranchPolicy != nil { - d.Set("deployment_branch_policy", []interface{}{ + if err = d.Set("deployment_branch_policy", []interface{}{ map[string]interface{}{ "protected_branches": env.DeploymentBranchPolicy.ProtectedBranches, "custom_branch_policies": env.DeploymentBranchPolicy.CustomBranchPolicies, }, - }) + }); err != nil { + return err + } } else { d.Set("deployment_branch_policy", []interface{}{}) } @@ -233,6 +249,8 @@ func createUpdateEnvironmentData(d *schema.ResourceData, meta interface{}) githu data.CanAdminsBypass = github.Bool(d.Get("can_admins_bypass").(bool)) + data.PreventSelfReview = github.Bool(d.Get("prevent_self_review").(bool)) + if v, ok := d.GetOk("reviewers"); ok { envReviewers := make([]*github.EnvReviewers, 0) diff --git a/github/resource_github_repository_environment_deployment_policy.go b/github/resource_github_repository_environment_deployment_policy.go index 8d711ce508..7f116bce97 100644 --- a/github/resource_github_repository_environment_deployment_policy.go +++ b/github/resource_github_repository_environment_deployment_policy.go @@ -8,7 +8,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryEnvironmentDeploymentPolicy() *schema.Resource { @@ -18,7 +18,7 @@ func resourceGithubRepositoryEnvironmentDeploymentPolicy() *schema.Resource { Update: resourceGithubRepositoryEnvironmentDeploymentPolicyUpdate, Delete: resourceGithubRepositoryEnvironmentDeploymentPolicyDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "repository": { diff --git a/github/resource_github_repository_environment_deployment_policy_test.go b/github/resource_github_repository_environment_deployment_policy_test.go index 44fb427b64..f1de1880f3 100644 --- a/github/resource_github_repository_environment_deployment_policy_test.go +++ b/github/resource_github_repository_environment_deployment_policy_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryEnvironmentDeploymentPolicy(t *testing.T) { diff --git a/github/resource_github_repository_environment_test.go b/github/resource_github_repository_environment_test.go index 2b0ed07db9..6198b6b102 100644 --- a/github/resource_github_repository_environment_test.go +++ b/github/resource_github_repository_environment_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryEnvironment(t *testing.T) { @@ -30,6 +30,7 @@ func TestAccGithubRepositoryEnvironment(t *testing.T) { environment = "environment / test" can_admins_bypass = false wait_timer = 10000 + prevent_self_review = true reviewers { users = [data.github_user.current.id] } @@ -44,6 +45,7 @@ func TestAccGithubRepositoryEnvironment(t *testing.T) { check := resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("github_repository_environment.test", "environment", "environment / test"), resource.TestCheckResourceAttr("github_repository_environment.test", "can_admins_bypass", "false"), + resource.TestCheckResourceAttr("github_repository_environment.test", "prevent_self_review", "true"), resource.TestCheckResourceAttr("github_repository_environment.test", "wait_timer", "10000"), ) diff --git a/github/resource_github_repository_file.go b/github/resource_github_repository_file.go index 7acd2ed997..b36dfcb4d8 100644 --- a/github/resource_github_repository_file.go +++ b/github/resource_github_repository_file.go @@ -9,7 +9,7 @@ import ( "fmt" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryFile() *schema.Resource { @@ -34,7 +34,9 @@ func resourceGithubRepositoryFile() *schema.Resource { opts := &github.RepositoryContentGetOptions{} if len(parts) == 2 { opts.Ref = parts[1] - d.Set("branch", parts[1]) + if err := d.Set("branch", parts[1]); err != nil { + return nil, err + } } fc, _, _, err := client.Repositories.GetContents(ctx, owner, repo, file, opts) if err != nil { @@ -45,7 +47,9 @@ func resourceGithubRepositoryFile() *schema.Resource { } d.SetId(fmt.Sprintf("%s/%s", repo, file)) - d.Set("overwrite_on_create", false) + if err = d.Set("overwrite_on_create", false); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, @@ -220,7 +224,9 @@ func resourceGithubRepositoryFileCreate(d *schema.ResourceData, meta interface{} } d.SetId(fmt.Sprintf("%s/%s", repo, file)) - d.Set("commit_sha", create.Commit.GetSHA()) + if err = d.Set("commit_sha", create.Commit.GetSHA()); err != nil { + return err + } return resourceGithubRepositoryFileRead(d, meta) } @@ -259,10 +265,18 @@ func resourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{}) return err } - d.Set("content", content) - d.Set("repository", repo) - d.Set("file", file) - d.Set("sha", fc.GetSHA()) + if err = d.Set("content", content); err != nil { + return err + } + if err = d.Set("repository", repo); err != nil { + return err + } + if err = d.Set("file", file); err != nil { + return err + } + if err = d.Set("sha", fc.GetSHA()); err != nil { + return err + } var commit *github.RepositoryCommit @@ -275,7 +289,9 @@ func resourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{}) return err } ref := parsedQuery["ref"][0] - d.Set("ref", ref) + if err = d.Set("ref", ref); err != nil { + return err + } // Use the SHA to lookup the commit info if we know it, otherwise loop through commits if sha, ok := d.GetOk("commit_sha"); ok { @@ -290,7 +306,9 @@ func resourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{}) return err } - d.Set("commit_sha", commit.GetSHA()) + if err = d.Set("commit_sha", commit.GetSHA()); err != nil { + return err + } commit_author := commit.Commit.GetCommitter().GetName() commit_email := commit.Commit.GetCommitter().GetEmail() @@ -300,10 +318,16 @@ func resourceGithubRepositoryFileRead(d *schema.ResourceData, meta interface{}) //read from state if author+email is set explicitly, and if it was not github signing it for you previously if commit_author != "GitHub" && commit_email != "noreply@github.com" && hasCommitAuthor && hasCommitEmail { - d.Set("commit_author", commit_author) - d.Set("commit_email", commit_email) + if err = d.Set("commit_author", commit_author); err != nil { + return err + } + if err = d.Set("commit_email", commit_email); err != nil { + return err + } + } + if err = d.Set("commit_message", commit.GetCommit().GetMessage()); err != nil { + return err } - d.Set("commit_message", commit.GetCommit().GetMessage()) return nil } @@ -339,7 +363,9 @@ func resourceGithubRepositoryFileUpdate(d *schema.ResourceData, meta interface{} return err } - d.Set("commit_sha", create.GetSHA()) + if err = d.Set("commit_sha", create.GetSHA()); err != nil { + return err + } return resourceGithubRepositoryFileRead(d, meta) } diff --git a/github/resource_github_repository_file_test.go b/github/resource_github_repository_file_test.go index 5f3589cf29..e0357622bd 100644 --- a/github/resource_github_repository_file_test.go +++ b/github/resource_github_repository_file_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryFile(t *testing.T) { diff --git a/github/resource_github_repository_milestone.go b/github/resource_github_repository_milestone.go index 0b11617b99..3b4c99add4 100644 --- a/github/resource_github_repository_milestone.go +++ b/github/resource_github_repository_milestone.go @@ -10,8 +10,8 @@ import ( "time" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubRepositoryMilestone() *schema.Resource { @@ -26,13 +26,19 @@ func resourceGithubRepositoryMilestone() *schema.Resource { if len(parts) != 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { return nil, fmt.Errorf("invalid ID format, must be provided as OWNER/REPOSITORY/NUMBER") } - d.Set("owner", parts[0]) - d.Set("repository", parts[1]) + if err := d.Set("owner", parts[0]); err != nil { + return nil, err + } + if err := d.Set("repository", parts[1]); err != nil { + return nil, err + } number, err := strconv.Atoi(parts[2]) if err != nil { return nil, err } - d.Set("number", number) + if err := d.Set("number", number); err != nil { + return nil, err + } d.SetId(fmt.Sprintf("%s/%s/%d", parts[0], parts[1], number)) return []*schema.ResourceData{d}, nil @@ -68,9 +74,9 @@ func resourceGithubRepositoryMilestone() *schema.Resource { "state": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{ + ValidateDiagFunc: toDiagFunc(validation.StringInSlice([]string{ "open", "closed", - }, true), + }, true), "state"), Default: "open", Description: "The state of the milestone. Either 'open' or 'closed'. Default: 'open'.", }, @@ -151,12 +157,22 @@ func resourceGithubRepositoryMilestoneRead(d *schema.ResourceData, meta interfac return err } - d.Set("title", milestone.GetTitle()) - d.Set("description", milestone.GetDescription()) - d.Set("number", milestone.GetNumber()) - d.Set("state", milestone.GetState()) + if err = d.Set("title", milestone.GetTitle()); err != nil { + return err + } + if err = d.Set("description", milestone.GetDescription()); err != nil { + return err + } + if err = d.Set("number", milestone.GetNumber()); err != nil { + return err + } + if err = d.Set("state", milestone.GetState()); err != nil { + return err + } if dueOn := milestone.GetDueOn(); !dueOn.IsZero() { - d.Set("due_date", milestone.GetDueOn().Format(layoutISO)) + if err := d.Set("due_date", milestone.GetDueOn().Format(layoutISO)); err != nil { + return err + } } return nil diff --git a/github/resource_github_repository_milestone_test.go b/github/resource_github_repository_milestone_test.go index 08e9a12693..ae4bfe2abc 100644 --- a/github/resource_github_repository_milestone_test.go +++ b/github/resource_github_repository_milestone_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryMilestone(t *testing.T) { diff --git a/github/resource_github_repository_project.go b/github/resource_github_repository_project.go index cf04ca79bc..7bc7868afb 100644 --- a/github/resource_github_repository_project.go +++ b/github/resource_github_repository_project.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryProject() *schema.Resource { @@ -24,7 +24,9 @@ func resourceGithubRepositoryProject() *schema.Resource { if len(parts) != 2 { return nil, fmt.Errorf("invalid ID specified: supplied ID must be written as /") } - d.Set("repository", parts[0]) + if err := d.Set("repository", parts[0]); err != nil { + return nil, err + } d.SetId(parts[1]) return []*schema.ResourceData{d}, nil }, @@ -113,11 +115,19 @@ func resourceGithubRepositoryProjectRead(d *schema.ResourceData, meta interface{ return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("name", project.GetName()) - d.Set("body", project.GetBody()) - d.Set("url", fmt.Sprintf("https://github.com/%s/%s/projects/%d", - owner, d.Get("repository"), project.GetNumber())) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("name", project.GetName()); err != nil { + return err + } + if err = d.Set("body", project.GetBody()); err != nil { + return err + } + if err := d.Set("url", fmt.Sprintf("https://github.com/%s/%s/projects/%d", + owner, d.Get("repository"), project.GetNumber())); err != nil { + return err + } return nil } diff --git a/github/resource_github_repository_project_test.go b/github/resource_github_repository_project_test.go index a7b836e842..c9907bf104 100644 --- a/github/resource_github_repository_project_test.go +++ b/github/resource_github_repository_project_test.go @@ -5,8 +5,8 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryProject(t *testing.T) { diff --git a/github/resource_github_repository_pull_request.go b/github/resource_github_repository_pull_request.go index c11083328d..96cea6148c 100644 --- a/github/resource_github_repository_pull_request.go +++ b/github/resource_github_repository_pull_request.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryPullRequest() *schema.Resource { @@ -23,7 +23,9 @@ func resourceGithubRepositoryPullRequest() *schema.Resource { if err != nil { return nil, err } - d.Set("base_repository", baseRepository) + if err := d.Set("base_repository", baseRepository); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, @@ -187,11 +189,18 @@ func resourceGithubRepositoryPullRequestRead(d *schema.ResourceData, meta interf return err } - d.Set("number", pullRequest.GetNumber()) + if err = d.Set("number", pullRequest.GetNumber()); err != nil { + return err + } if head := pullRequest.GetHead(); head != nil { - d.Set("head_ref", head.GetRef()) - d.Set("head_sha", head.GetSHA()) + if err = d.Set("head_ref", head.GetRef()); err != nil { + return err + } + + if err = d.Set("head_sha", head.GetSHA()); err != nil { + return err + } } else { // Totally unexpected condition. Better do that than segfault, I guess? log.Printf("[INFO] Head branch missing, expected %s", d.Get("head_ref")) @@ -200,8 +209,12 @@ func resourceGithubRepositoryPullRequestRead(d *schema.ResourceData, meta interf } if base := pullRequest.GetBase(); base != nil { - d.Set("base_ref", base.GetRef()) - d.Set("base_sha", base.GetSHA()) + if err = d.Set("base_ref", base.GetRef()); err != nil { + return err + } + if err = d.Set("base_sha", base.GetSHA()); err != nil { + return err + } } else { // Seme logic as with the missing head branch. log.Printf("[INFO] Base branch missing, expected %s", d.Get("base_ref")) @@ -209,24 +222,44 @@ func resourceGithubRepositoryPullRequestRead(d *schema.ResourceData, meta interf return nil } - d.Set("body", pullRequest.GetBody()) - d.Set("title", pullRequest.GetTitle()) - d.Set("draft", pullRequest.GetDraft()) - d.Set("maintainer_can_modify", pullRequest.GetMaintainerCanModify()) - d.Set("number", pullRequest.GetNumber()) - d.Set("state", pullRequest.GetState()) - d.Set("opened_at", pullRequest.GetCreatedAt().Unix()) - d.Set("updated_at", pullRequest.GetUpdatedAt().Unix()) + if err = d.Set("body", pullRequest.GetBody()); err != nil { + return err + } + if err = d.Set("title", pullRequest.GetTitle()); err != nil { + return err + } + if err = d.Set("draft", pullRequest.GetDraft()); err != nil { + return err + } + if err = d.Set("maintainer_can_modify", pullRequest.GetMaintainerCanModify()); err != nil { + return err + } + if err = d.Set("number", pullRequest.GetNumber()); err != nil { + return err + } + if err = d.Set("state", pullRequest.GetState()); err != nil { + return err + } + if err = d.Set("opened_at", pullRequest.GetCreatedAt().Unix()); err != nil { + return err + } + if err = d.Set("updated_at", pullRequest.GetUpdatedAt().Unix()); err != nil { + return err + } if user := pullRequest.GetUser(); user != nil { - d.Set("opened_by", user.GetLogin()) + if err = d.Set("opened_by", user.GetLogin()); err != nil { + return err + } } labels := []string{} for _, label := range pullRequest.Labels { labels = append(labels, label.GetName()) } - d.Set("labels", labels) + if err = d.Set("labels", labels); err != nil { + return err + } return nil } diff --git a/github/resource_github_repository_pull_request_test.go b/github/resource_github_repository_pull_request_test.go index c25f7b1d71..c76a1a99f7 100644 --- a/github/resource_github_repository_pull_request_test.go +++ b/github/resource_github_repository_pull_request_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryPullRequest(t *testing.T) { diff --git a/github/resource_github_repository_ruleset.go b/github/resource_github_repository_ruleset.go index 06a933a8c5..33e152a5a4 100644 --- a/github/resource_github_repository_ruleset.go +++ b/github/resource_github_repository_ruleset.go @@ -8,8 +8,8 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubRepositoryRuleset() *schema.Resource { diff --git a/github/resource_github_repository_ruleset_test.go b/github/resource_github_repository_ruleset_test.go index 2c21e64609..a51682507f 100644 --- a/github/resource_github_repository_ruleset_test.go +++ b/github/resource_github_repository_ruleset_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestGithubRepositoryRulesets(t *testing.T) { @@ -64,11 +64,11 @@ func TestGithubRepositoryRulesets(t *testing.T) { } required_status_checks { - + required_check { context = "ci" } - + strict_required_status_checks_policy = true } @@ -313,11 +313,11 @@ func TestGithubRepositoryRulesets(t *testing.T) { } required_status_checks { - + required_check { context = "ci" } - + strict_required_status_checks_policy = true } diff --git a/github/resource_github_repository_tag_protection.go b/github/resource_github_repository_tag_protection.go index 2b303acb89..b166a3a6c2 100644 --- a/github/resource_github_repository_tag_protection.go +++ b/github/resource_github_repository_tag_protection.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryTagProtection() *schema.Resource { @@ -23,8 +23,12 @@ func resourceGithubRepositoryTagProtection() *schema.Resource { if len(parts) != 2 { return nil, fmt.Errorf("invalid ID specified: supplied ID must be written as /") } - d.Set("repository", parts[0]) - d.Set("tag_protection_id", parts[1]) + if err := d.Set("repository", parts[0]); err != nil { + return nil, err + } + if err := d.Set("tag_protection_id", parts[1]); err != nil { + return nil, err + } d.SetId(parts[1]) return []*schema.ResourceData{d}, nil }, @@ -96,7 +100,9 @@ func resourceGithubRepositoryTagProtectionRead(d *schema.ResourceData, meta inte } for _, tag := range tag_protection { if tag.GetID() == id { - d.Set("pattern", tag.GetPattern()) + if err = d.Set("pattern", tag.GetPattern()); err != nil { + return err + } } } diff --git a/github/resource_github_repository_tag_protection_test.go b/github/resource_github_repository_tag_protection_test.go index fbdf767616..a7c764888d 100644 --- a/github/resource_github_repository_tag_protection_test.go +++ b/github/resource_github_repository_tag_protection_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryTagProtection(t *testing.T) { diff --git a/github/resource_github_repository_test.go b/github/resource_github_repository_test.go index 68eb7ce1b0..e09505dac9 100644 --- a/github/resource_github_repository_test.go +++ b/github/resource_github_repository_test.go @@ -8,11 +8,12 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/stretchr/testify/assert" ) @@ -1569,9 +1570,9 @@ func TestAccGithubRepositoryVisibility(t *testing.T) { func TestGithubRepositoryTopicPassesValidation(t *testing.T) { resource := resourceGithubRepository() schema := resource.Schema["topics"].Elem.(*schema.Schema) - _, err := schema.ValidateFunc("ef69e1a3-66be-40ca-bb62-4f36186aa292", "topic") - if err != nil { - t.Error(fmt.Errorf("unexpected topic validation failure: %s", err)) + diags := schema.ValidateDiagFunc("ef69e1a3-66be-40ca-bb62-4f36186aa292", cty.Path{cty.GetAttrStep{Name: "topic"}}) + if diags.HasError() { + t.Error(fmt.Errorf("unexpected topic validation failure: %s", diags[0].Summary)) } } @@ -1579,12 +1580,12 @@ func TestGithubRepositoryTopicFailsValidationWhenOverMaxCharacters(t *testing.T) resource := resourceGithubRepository() schema := resource.Schema["topics"].Elem.(*schema.Schema) - _, err := schema.ValidateFunc(strings.Repeat("a", 51), "topic") - if len(err) != 1 { - t.Error(fmt.Errorf("unexpected number of topic validation failures; expected=1; actual=%d", len(err))) + diags := schema.ValidateDiagFunc(strings.Repeat("a", 51), cty.Path{cty.GetAttrStep{Name: "topic"}}) + if len(diags) != 1 { + t.Error(fmt.Errorf("unexpected number of topic validation failures; expected=1; actual=%d", len(diags))) } - expectedFailure := "invalid value for topic (must include only lowercase alphanumeric characters or hyphens and cannot start with a hyphen and consist of 50 characters or less)" - actualFailure := err[0].Error() + expectedFailure := "invalid value for topics (must include only lowercase alphanumeric characters or hyphens and cannot start with a hyphen and consist of 50 characters or less)" + actualFailure := diags[0].Summary if expectedFailure != actualFailure { t.Error(fmt.Errorf("unexpected topic validation failure; expected=%s; action=%s", expectedFailure, actualFailure)) } @@ -1674,12 +1675,12 @@ func TestGithubRepositoryNameFailsValidationWhenOverMaxCharacters(t *testing.T) resource := resourceGithubRepository() schema := resource.Schema["name"] - _, err := schema.ValidateFunc(strings.Repeat("a", 101), "name") - if len(err) != 1 { - t.Error(fmt.Errorf("unexpected number of name validation failures; expected=1; actual=%d", len(err))) + diags := schema.ValidateDiagFunc(strings.Repeat("a", 101), cty.GetAttrPath("name")) + if len(diags) != 1 { + t.Error(fmt.Errorf("unexpected number of name validation failures; expected=1; actual=%d", len(diags))) } expectedFailure := "invalid value for name (must include only alphanumeric characters, underscores or hyphens and consist of 100 characters or less)" - actualFailure := err[0].Error() + actualFailure := diags[0].Summary if expectedFailure != actualFailure { t.Error(fmt.Errorf("unexpected name validation failure; expected=%s; action=%s", expectedFailure, actualFailure)) } @@ -1689,12 +1690,12 @@ func TestGithubRepositoryNameFailsValidationWithSpace(t *testing.T) { resource := resourceGithubRepository() schema := resource.Schema["name"] - _, err := schema.ValidateFunc("test space", "name") - if len(err) != 1 { - t.Error(fmt.Errorf("unexpected number of name validation failures; expected=1; actual=%d", len(err))) + diags := schema.ValidateDiagFunc("test space", cty.GetAttrPath("name")) + if len(diags) != 1 { + t.Error(fmt.Errorf("unexpected number of name validation failures; expected=1; actual=%d", len(diags))) } expectedFailure := "invalid value for name (must include only alphanumeric characters, underscores or hyphens and consist of 100 characters or less)" - actualFailure := err[0].Error() + actualFailure := diags[0].Summary if expectedFailure != actualFailure { t.Error(fmt.Errorf("unexpected name validation failure; expected=%s; action=%s", expectedFailure, actualFailure)) } diff --git a/github/resource_github_repository_topics.go b/github/resource_github_repository_topics.go index e481042830..1c60e08e96 100644 --- a/github/resource_github_repository_topics.go +++ b/github/resource_github_repository_topics.go @@ -2,10 +2,13 @@ package github import ( "context" + "log" + "net/http" "regexp" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/google/go-github/v57/github" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceGithubRepositoryTopics() *schema.Resource { @@ -68,6 +71,17 @@ func resourceGithubRepositoryTopicsRead(d *schema.ResourceData, meta interface{} topics, _, err := client.Repositories.ListAllTopics(ctx, owner, repoName) if err != nil { + if ghErr, ok := err.(*github.ErrorResponse); ok { + if ghErr.Response.StatusCode == http.StatusNotModified { + return nil + } + if ghErr.Response.StatusCode == http.StatusNotFound { + log.Printf("[INFO] Removing topics from repository %s/%s from state because it no longer exists in GitHub", + owner, repoName) + d.SetId("") + return nil + } + } return err } diff --git a/github/resource_github_repository_topics_test.go b/github/resource_github_repository_topics_test.go index bba430a438..d5f571e202 100644 --- a/github/resource_github_repository_topics_test.go +++ b/github/resource_github_repository_topics_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryTopics(t *testing.T) { diff --git a/github/resource_github_repository_webhook.go b/github/resource_github_repository_webhook.go index 13b4359a11..d2546977f0 100644 --- a/github/resource_github_repository_webhook.go +++ b/github/resource_github_repository_webhook.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRepositoryWebhook() *schema.Resource { @@ -24,7 +24,9 @@ func resourceGithubRepositoryWebhook() *schema.Resource { if len(parts) != 2 { return nil, fmt.Errorf("invalid ID specified: supplied ID must be written as /") } - d.Set("repository", parts[0]) + if err := d.Set("repository", parts[0]); err != nil { + return nil, err + } d.SetId(parts[1]) return []*schema.ResourceData{d}, nil }, @@ -34,11 +36,6 @@ func resourceGithubRepositoryWebhook() *schema.Resource { MigrateState: resourceGithubWebhookMigrateState, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Removed: "The `name` attribute is no longer necessary.", - }, "repository": { Type: schema.TypeString, Required: true, @@ -124,7 +121,9 @@ func resourceGithubRepositoryWebhookCreate(d *schema.ResourceData, meta interfac hook.Config = insecureSslStringToBool(hook.Config) - d.Set("configuration", []interface{}{hook.Config}) + if err = d.Set("configuration", []interface{}{hook.Config}); err != nil { + return err + } return resourceGithubRepositoryWebhookRead(d, meta) } @@ -158,9 +157,15 @@ func resourceGithubRepositoryWebhookRead(d *schema.ResourceData, meta interface{ } return err } - d.Set("url", hook.GetURL()) - d.Set("active", hook.GetActive()) - d.Set("events", hook.Events) + if err = d.Set("url", hook.GetURL()); err != nil { + return err + } + if err = d.Set("active", hook.GetActive()); err != nil { + return err + } + if err = d.Set("events", hook.Events); err != nil { + return err + } // GitHub returns the secret as a string of 8 astrisks "********" // We would prefer to store the real secret in state, so we'll @@ -176,7 +181,9 @@ func resourceGithubRepositoryWebhookRead(d *schema.ResourceData, meta interface{ hook.Config = insecureSslStringToBool(hook.Config) - d.Set("configuration", []interface{}{hook.Config}) + if err = d.Set("configuration", []interface{}{hook.Config}); err != nil { + return err + } return nil } diff --git a/github/resource_github_repository_webhook_test.go b/github/resource_github_repository_webhook_test.go index 6be2a10a3e..1ba5ab8c8e 100644 --- a/github/resource_github_repository_webhook_test.go +++ b/github/resource_github_repository_webhook_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubRepositoryWebhook(t *testing.T) { diff --git a/github/resource_github_team.go b/github/resource_github_team.go index e818e072e8..80cb3c7da7 100644 --- a/github/resource_github_team.go +++ b/github/resource_github_team.go @@ -8,8 +8,8 @@ import ( "time" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -35,7 +35,7 @@ func resourceGithubTeam() *schema.Resource { }, CustomizeDiff: customdiff.Sequence( - customdiff.ComputedIf("slug", func(d *schema.ResourceDiff, meta interface{}) bool { + customdiff.ComputedIf("slug", func(_ context.Context, d *schema.ResourceDiff, meta interface{}) bool { return d.HasChange("name") }), ), @@ -52,11 +52,11 @@ func resourceGithubTeam() *schema.Resource { Description: "A description of the team.", }, "privacy": { - Type: schema.TypeString, - Optional: true, - Default: "secret", - Description: "The level of privacy for the team. Must be one of 'secret' or 'closed'.", - ValidateFunc: validateValueFunc([]string{"secret", "closed"}), + Type: schema.TypeString, + Optional: true, + Default: "secret", + Description: "The level of privacy for the team. Must be one of 'secret' or 'closed'.", + ValidateDiagFunc: validateValueFunc([]string{"secret", "closed"}), }, "parent_team_id": { Type: schema.TypeString, @@ -278,23 +278,51 @@ func resourceGithubTeamRead(d *schema.ResourceData, meta interface{}) error { return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("description", team.GetDescription()) - d.Set("name", team.GetName()) - d.Set("privacy", team.GetPrivacy()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("description", team.GetDescription()); err != nil { + return err + } + if err = d.Set("name", team.GetName()); err != nil { + return err + } + if err = d.Set("privacy", team.GetPrivacy()); err != nil { + return err + } if parent := team.Parent; parent != nil { - d.Set("parent_team_id", strconv.FormatInt(team.Parent.GetID(), 10)) - d.Set("parent_team_read_id", strconv.FormatInt(team.Parent.GetID(), 10)) - d.Set("parent_team_read_slug", parent.Slug) + if err = d.Set("parent_team_id", strconv.FormatInt(team.Parent.GetID(), 10)); err != nil { + return err + } + if err = d.Set("parent_team_read_id", strconv.FormatInt(team.Parent.GetID(), 10)); err != nil { + return err + } + if err = d.Set("parent_team_read_slug", parent.Slug); err != nil { + return err + } } else { - d.Set("parent_team_id", "") - d.Set("parent_team_read_id", "") - d.Set("parent_team_read_slug", "") + if err = d.Set("parent_team_id", ""); err != nil { + return err + } + if err = d.Set("parent_team_read_id", ""); err != nil { + return err + } + if err = d.Set("parent_team_read_slug", ""); err != nil { + return err + } + } + if err = d.Set("ldap_dn", team.GetLDAPDN()); err != nil { + return err + } + if err = d.Set("slug", team.GetSlug()); err != nil { + return err + } + if err = d.Set("node_id", team.GetNodeID()); err != nil { + return err + } + if err = d.Set("members_count", team.GetMembersCount()); err != nil { + return err } - d.Set("ldap_dn", team.GetLDAPDN()) - d.Set("slug", team.GetSlug()) - d.Set("node_id", team.GetNodeID()) - d.Set("members_count", team.GetMembersCount()) return nil } @@ -420,7 +448,9 @@ func resourceGithubTeamImport(d *schema.ResourceData, meta interface{}) ([]*sche } d.SetId(strconv.FormatInt(teamId, 10)) - d.Set("create_default_maintainer", false) + if err = d.Set("create_default_maintainer", false); err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil } diff --git a/github/resource_github_team_members.go b/github/resource_github_team_members.go index f19358347b..02480ba06d 100644 --- a/github/resource_github_team_members.go +++ b/github/resource_github_team_members.go @@ -4,10 +4,11 @@ import ( "context" "log" "reflect" + "strconv" "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -23,7 +24,7 @@ func resourceGithubTeamMembers() *schema.Resource { Update: resourceGithubTeamMembersUpdate, Delete: resourceGithubTeamMembersDelete, Importer: &schema.ResourceImporter{ - State: resourceGithubTeamImport, + State: resourceGithubTeamMembersImport, }, Schema: map[string]*schema.Schema{ @@ -46,11 +47,11 @@ func resourceGithubTeamMembers() *schema.Resource { Description: "The user to add to the team.", }, "role": { - Type: schema.TypeString, - Optional: true, - Default: "member", - Description: "The role of the user within the team. Must be one of 'member' or 'maintainer'.", - ValidateFunc: validateValueFunc([]string{"member", "maintainer"}), + Type: schema.TypeString, + Optional: true, + Default: "member", + Description: "The role of the user within the team. Must be one of 'member' or 'maintainer'.", + ValidateDiagFunc: validateValueFunc([]string{"member", "maintainer"}), }, }, }, @@ -190,7 +191,9 @@ func resourceGithubTeamMembersRead(d *schema.ResourceData, meta interface{}) err // We intentionally set these early to allow reconciliation // from an upstream bug which emptied team_id in state // See https://github.com/integrations/terraform-provider-github/issues/323 - d.Set("team_id", teamIdString) + if err := d.Set("team_id", teamIdString); err != nil { + return err + } ctx := context.WithValue(context.Background(), ctxId, d.Id()) @@ -272,3 +275,14 @@ func resourceGithubTeamMembersDelete(d *schema.ResourceData, meta interface{}) e return nil } + +func resourceGithubTeamMembersImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + teamId, err := getTeamID(d.Id(), meta) + if err != nil { + return nil, err + } + + d.SetId(strconv.FormatInt(teamId, 10)) + + return []*schema.ResourceData{d}, nil +} diff --git a/github/resource_github_team_members_test.go b/github/resource_github_team_members_test.go index 8a07bd27f4..1eabbf9d2a 100644 --- a/github/resource_github_team_members_test.go +++ b/github/resource_github_team_members_test.go @@ -7,9 +7,9 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubTeamMembers(t *testing.T) { diff --git a/github/resource_github_team_membership.go b/github/resource_github_team_membership.go index e767199e28..c035e28f98 100644 --- a/github/resource_github_team_membership.go +++ b/github/resource_github_team_membership.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubTeamMembership() *schema.Resource { @@ -49,11 +49,11 @@ func resourceGithubTeamMembership() *schema.Resource { Description: "The user to add to the team.", }, "role": { - Type: schema.TypeString, - Optional: true, - Default: "member", - Description: "The role of the user within the team. Must be one of 'member' or 'maintainer'.", - ValidateFunc: validateValueFunc([]string{"member", "maintainer"}), + Type: schema.TypeString, + Optional: true, + Default: "member", + Description: "The role of the user within the team. Must be one of 'member' or 'maintainer'.", + ValidateDiagFunc: validateValueFunc([]string{"member", "maintainer"}), }, "etag": { Type: schema.TypeString, @@ -110,8 +110,12 @@ func resourceGithubTeamMembershipRead(d *schema.ResourceData, meta interface{}) // We intentionally set these early to allow reconciliation // from an upstream bug which emptied team_id in state // See https://github.com/integrations/terraform-provider-github/issues/323 - d.Set("team_id", teamIdString) - d.Set("username", username) + if err = d.Set("team_id", teamIdString); err != nil { + return err + } + if err = d.Set("username", username); err != nil { + return err + } ctx := context.WithValue(context.Background(), ctxId, d.Id()) if !d.IsNewResource() { @@ -135,8 +139,12 @@ func resourceGithubTeamMembershipRead(d *schema.ResourceData, meta interface{}) return err } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("role", membership.GetRole()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("role", membership.GetRole()); err != nil { + return err + } return nil } diff --git a/github/resource_github_team_membership_test.go b/github/resource_github_team_membership_test.go index a409caac61..5d794eea1c 100644 --- a/github/resource_github_team_membership_test.go +++ b/github/resource_github_team_membership_test.go @@ -8,9 +8,9 @@ import ( "testing" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubTeamMembership_basic(t *testing.T) { diff --git a/github/resource_github_team_repository.go b/github/resource_github_team_repository.go index 61187cc9c7..75e109b39f 100644 --- a/github/resource_github_team_repository.go +++ b/github/resource_github_team_repository.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubTeamRepository() *schema.Resource { @@ -139,14 +139,22 @@ func resourceGithubTeamRepositoryRead(d *schema.ResourceData, meta interface{}) return err } - d.Set("etag", resp.Header.Get("ETag")) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } if d.Get("team_id") == "" { // If team_id is empty, that means we are importing the resource. // Set the team_id to be the id of the team. - d.Set("team_id", teamIdString) + if err = d.Set("team_id", teamIdString); err != nil { + return err + } + } + if err = d.Set("repository", repo.GetName()); err != nil { + return err + } + if err = d.Set("permission", getPermission(repo.GetRoleName())); err != nil { + return err } - d.Set("repository", repo.GetName()) - d.Set("permission", getPermission(repo.GetRoleName())) return nil } diff --git a/github/resource_github_team_repository_test.go b/github/resource_github_team_repository_test.go index 1df45f6de4..d97ad2c2ca 100644 --- a/github/resource_github_team_repository_test.go +++ b/github/resource_github_team_repository_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubTeamRepository(t *testing.T) { diff --git a/github/resource_github_team_settings.go b/github/resource_github_team_settings.go index e92fc541de..3d6b875746 100644 --- a/github/resource_github_team_settings.go +++ b/github/resource_github_team_settings.go @@ -6,7 +6,7 @@ import ( "fmt" "strconv" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -48,7 +48,7 @@ func resourceGithubTeamSettings() *schema.Resource { Optional: true, Description: "The algorithm to use when assigning pull requests to team members. Supported values are 'ROUND_ROBIN' and 'LOAD_BALANCE'.", Default: "ROUND_ROBIN", - ValidateFunc: func(v interface{}, key string) (we []string, errs []error) { + ValidateDiagFunc: toDiagFunc(func(v interface{}, key string) (we []string, errs []error) { algorithm, ok := v.(string) if !ok { return nil, []error{fmt.Errorf("expected type of %s to be string", key)} @@ -59,14 +59,14 @@ func resourceGithubTeamSettings() *schema.Resource { } return we, errs - }, + }, "algorithm"), }, "member_count": { Type: schema.TypeInt, Optional: true, RequiredWith: []string{"review_request_delegation"}, Description: "The number of team members to assign to a pull request.", - ValidateFunc: func(v interface{}, key string) (we []string, errs []error) { + ValidateDiagFunc: toDiagFunc(func(v interface{}, key string) (we []string, errs []error) { count, ok := v.(int) if !ok { return nil, []error{fmt.Errorf("expected type of %s to be an integer", key)} @@ -75,7 +75,7 @@ func resourceGithubTeamSettings() *schema.Resource { errs = append(errs, errors.New("review request delegation reviewer count must be a positive number")) } return we, errs - }, + }, "member_count"), }, "notify": { Type: schema.TypeBool, @@ -107,8 +107,12 @@ func resourceGithubTeamSettingsCreate(d *schema.ResourceData, meta interface{}) return err } d.SetId(nodeId) - d.Set("team_slug", slug) - d.Set("team_uid", nodeId) + if err = d.Set("team_slug", slug); err != nil { + return err + } + if err = d.Set("team_uid", nodeId); err != nil { + return err + } return resourceGithubTeamSettingsUpdate(d, meta) } @@ -140,9 +144,13 @@ func resourceGithubTeamSettingsRead(d *schema.ResourceData, meta interface{}) er reviewRequestDelegation["algorithm"] = query.Organization.Team.ReviewRequestDelegationAlgorithm reviewRequestDelegation["member_count"] = query.Organization.Team.ReviewRequestDelegationCount reviewRequestDelegation["notify"] = query.Organization.Team.ReviewRequestDelegationNotifyAll - d.Set("review_request_delegation", []interface{}{reviewRequestDelegation}) + if err = d.Set("review_request_delegation", []interface{}{reviewRequestDelegation}); err != nil { + return err + } } else { - d.Set("review_request_delegation", []interface{}{}) + if err = d.Set("review_request_delegation", []interface{}{}); err != nil { + return err + } } return nil @@ -204,10 +212,16 @@ func resourceGithubTeamSettingsImport(d *schema.ResourceData, meta interface{}) if err != nil { return nil, err } - d.Set("team_id", d.Id()) + if err = d.Set("team_id", d.Id()); err != nil { + return nil, err + } d.SetId(nodeId) - d.Set("team_slug", slug) - d.Set("team_uid", nodeId) + if err = d.Set("team_slug", slug); err != nil { + return nil, err + } + if err = d.Set("team_uid", nodeId); err != nil { + return nil, err + } return []*schema.ResourceData{d}, resourceGithubTeamSettingsRead(d, meta) } diff --git a/github/resource_github_team_settings_test.go b/github/resource_github_team_settings_test.go index 908f1e4bdb..323cbe72e6 100644 --- a/github/resource_github_team_settings_test.go +++ b/github/resource_github_team_settings_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestCanUseIDOrSlugForTeamIDWhenChangingSettings(t *testing.T) { diff --git a/github/resource_github_team_sync_group_mapping.go b/github/resource_github_team_sync_group_mapping.go index 01bc8d6343..c579243947 100644 --- a/github/resource_github_team_sync_group_mapping.go +++ b/github/resource_github_team_sync_group_mapping.go @@ -7,7 +7,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubTeamSyncGroupMapping() *schema.Resource { @@ -18,7 +18,9 @@ func resourceGithubTeamSyncGroupMapping() *schema.Resource { Delete: resourceGithubTeamSyncGroupMappingDelete, Importer: &schema.ResourceImporter{ State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("team_slug", d.Id()) + if err := d.Set("team_slug", d.Id()); err != nil { + return nil, err + } d.SetId(fmt.Sprintf("teams/%s/team-sync/group-mappings", d.Id())) return []*schema.ResourceData{d}, nil }, @@ -121,10 +123,12 @@ func resourceGithubTeamSyncGroupMappingRead(d *schema.ResourceData, meta interfa return err } - if err := d.Set("group", groups); err != nil { + if err = d.Set("group", groups); err != nil { return fmt.Errorf("error setting groups: %s", err) } - d.Set("etag", resp.Header.Get("ETag")) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return fmt.Errorf("error setting etag: %s", err) + } return nil } diff --git a/github/resource_github_team_sync_group_mapping_test.go b/github/resource_github_team_sync_group_mapping_test.go index ee034a7285..e635560593 100644 --- a/github/resource_github_team_sync_group_mapping_test.go +++ b/github/resource_github_team_sync_group_mapping_test.go @@ -7,9 +7,9 @@ import ( "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubTeamSyncGroupMapping_basic(t *testing.T) { diff --git a/github/resource_github_team_test.go b/github/resource_github_team_test.go index a6e51fb956..93ed1f4c68 100644 --- a/github/resource_github_team_test.go +++ b/github/resource_github_team_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubTeam(t *testing.T) { diff --git a/github/resource_github_user_gpg_key.go b/github/resource_github_user_gpg_key.go index 3f84b9c91e..c3560e41fb 100644 --- a/github/resource_github_user_gpg_key.go +++ b/github/resource_github_user_gpg_key.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubUserGpgKey() *schema.Resource { @@ -80,7 +80,9 @@ func resourceGithubUserGpgKeyRead(d *schema.ResourceData, meta interface{}) erro return err } - d.Set("key_id", key.GetKeyID()) + if err = d.Set("key_id", key.GetKeyID()); err != nil { + return err + } return nil } diff --git a/github/resource_github_user_gpg_key_test.go b/github/resource_github_user_gpg_key_test.go index 628fa8acca..5e00ae7a30 100644 --- a/github/resource_github_user_gpg_key_test.go +++ b/github/resource_github_user_gpg_key_test.go @@ -6,7 +6,7 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccGithubUserGpgKey(t *testing.T) { diff --git a/github/resource_github_user_invitation_accepter.go b/github/resource_github_user_invitation_accepter.go index b709c3d739..5f3f0618cc 100644 --- a/github/resource_github_user_invitation_accepter.go +++ b/github/resource_github_user_invitation_accepter.go @@ -6,7 +6,7 @@ import ( "strconv" "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubUserInvitationAccepter() *schema.Resource { diff --git a/github/resource_github_user_invitation_accepter_test.go b/github/resource_github_user_invitation_accepter_test.go index 7eae6e4e8d..ae15429873 100644 --- a/github/resource_github_user_invitation_accepter_test.go +++ b/github/resource_github_user_invitation_accepter_test.go @@ -6,10 +6,10 @@ import ( "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccGithubUserInvitationAccepter_basic(t *testing.T) { diff --git a/github/resource_github_user_ssh_key.go b/github/resource_github_user_ssh_key.go index 41e2491d42..2f701c34b9 100644 --- a/github/resource_github_user_ssh_key.go +++ b/github/resource_github_user_ssh_key.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubUserSshKey() *schema.Resource { @@ -17,7 +17,7 @@ func resourceGithubUserSshKey() *schema.Resource { Read: resourceGithubUserSshKeyRead, Delete: resourceGithubUserSshKeyDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -97,10 +97,18 @@ func resourceGithubUserSshKeyRead(d *schema.ResourceData, meta interface{}) erro } } - d.Set("etag", resp.Header.Get("ETag")) - d.Set("title", key.GetTitle()) - d.Set("key", key.GetKey()) - d.Set("url", key.GetURL()) + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } + if err = d.Set("title", key.GetTitle()); err != nil { + return err + } + if err = d.Set("key", key.GetKey()); err != nil { + return err + } + if err = d.Set("url", key.GetURL()); err != nil { + return err + } return nil } diff --git a/github/resource_github_user_ssh_key_test.go b/github/resource_github_user_ssh_key_test.go index 954dcc9743..b26d5b9524 100644 --- a/github/resource_github_user_ssh_key_test.go +++ b/github/resource_github_user_ssh_key_test.go @@ -8,8 +8,8 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "golang.org/x/crypto/ssh" ) diff --git a/github/resource_organization_block.go b/github/resource_organization_block.go index e2266ea1c7..6321369a35 100644 --- a/github/resource_organization_block.go +++ b/github/resource_organization_block.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceOrganizationBlock() *schema.Resource { @@ -15,7 +15,7 @@ func resourceOrganizationBlock() *schema.Resource { Read: resourceOrganizationBlockRead, Delete: resourceOrganizationBlockDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ @@ -87,8 +87,12 @@ func resourceOrganizationBlockRead(d *schema.ResourceData, meta interface{}) err return nil } - d.Set("username", username) - d.Set("etag", resp.Header.Get("ETag")) + if err = d.Set("username", username); err != nil { + return err + } + if err = d.Set("etag", resp.Header.Get("ETag")); err != nil { + return err + } return nil } diff --git a/github/resource_organization_block_test.go b/github/resource_organization_block_test.go index 65b0d54cf8..f4bfb03a62 100644 --- a/github/resource_organization_block_test.go +++ b/github/resource_organization_block_test.go @@ -5,8 +5,8 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccOrganizationBlock_basic(t *testing.T) { diff --git a/github/respository_rules_utils.go b/github/respository_rules_utils.go index 6eeb2fbc6d..3c28b95047 100644 --- a/github/respository_rules_utils.go +++ b/github/respository_rules_utils.go @@ -7,7 +7,7 @@ import ( "sort" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceGithubRulesetObject(d *schema.ResourceData, org string) *github.Ruleset { diff --git a/github/schema_webhook_configuration.go b/github/schema_webhook_configuration.go index b01a2cba10..bc0162b715 100644 --- a/github/schema_webhook_configuration.go +++ b/github/schema_webhook_configuration.go @@ -1,7 +1,7 @@ package github import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func webhookConfigurationSchema() *schema.Schema { diff --git a/github/sweeper_test.go b/github/sweeper_test.go index 2cc9f63d40..92f3f14a1c 100644 --- a/github/sweeper_test.go +++ b/github/sweeper_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestMain(m *testing.M) { diff --git a/github/transport.go b/github/transport.go index 7a8e0c53aa..855ec766e3 100644 --- a/github/transport.go +++ b/github/transport.go @@ -197,3 +197,99 @@ func isWriteMethod(method string) bool { } return false } + +type RetryTransport struct { + transport http.RoundTripper + retryDelay time.Duration + maxRetries int + retryableErrors map[int]bool +} + +type RetryTransportOption func(*RetryTransport) + +// NewRetryTransport takes in an http.RoundTripper and a variadic list of +// optional functions that modify the RetryTransport struct itself. This +// may be used to retry after response errors 5xx, for example. +func NewRetryTransport(rt http.RoundTripper, options ...RetryTransportOption) *RetryTransport { + // Default to no retry if none is provided + defaultErrors := getDefaultRetriableErrors() + rlt := &RetryTransport{transport: rt, retryDelay: time.Second, maxRetries: 0, retryableErrors: defaultErrors} + + for _, opt := range options { + opt(rlt) + } + + return rlt +} + +func (t *RetryTransport) RoundTrip(req *http.Request) (*http.Response, error) { + var err error + var resp *http.Response + var dataBuffer *bytes.Reader + + for retry := 0; retry <= t.maxRetries; retry++ { + // Reset the body + // Code from httpretry (https://github.com/ybbus/httpretry/blob/master/roundtripper.go#L60) + // if request provides GetBody() we use it as Body, + // because GetBody can be retrieved arbitrary times for retry + if req.GetBody != nil { + bodyReadCloser, _ := req.GetBody() + req.Body = bodyReadCloser + } else if req.Body != nil { + + // we need to store the complete body, since we need to reset it if a retry happens + // but: not very efficient because: + // a) huge stream data size will all be buffered completely in the memory + // imagine: 1GB stream data would work efficiently with io.Copy, but has to be buffered completely in memory + // b) unnecessary if first attempt succeeds + // a solution would be to at least support more types for GetBody() + + // store it for the first time + if dataBuffer == nil { + data, err := io.ReadAll(req.Body) + req.Body.Close() + if err != nil { + return nil, err + } + dataBuffer = bytes.NewReader(data) + req.ContentLength = int64(dataBuffer.Len()) + req.Body = io.NopCloser(dataBuffer) + } + + // reset the request body + if _, err = dataBuffer.Seek(0, io.SeekStart); err != nil { + return nil, err + } + } + + resp, err = t.transport.RoundTrip(req) + if resp != nil && !t.retryableErrors[resp.StatusCode] { + return resp, err + } + + time.Sleep(t.retryDelay) + } + + return resp, err +} + +// WithMaxRetries is used to set the max number of retries when encountering an error +func WithMaxRetries(d int) RetryTransportOption { + return func(rt *RetryTransport) { + rt.maxRetries = d + } +} + +// WithRetryableErrors is used to set status codes to retry +func WithRetryableErrors(d map[int]bool) RetryTransportOption { + return func(rt *RetryTransport) { + rt.retryableErrors = d + } +} + +// WithRetryDelay is used to set the delay between requests for retrying +func WithRetryDelay(d time.Duration) RetryTransportOption { + return func(rt *RetryTransport) { + rt.retryDelay = d + } +} diff --git a/github/transport_test.go b/github/transport_test.go index 7eacda8d96..c9697c964c 100644 --- a/github/transport_test.go +++ b/github/transport_test.go @@ -340,7 +340,125 @@ func TestRateLimitTransport_smart_lock(t *testing.T) { t.Fatalf("Expected to succeed instantly, waited 100 milliseconds unsuccessfully") } }) +} + +func TestRetryTransport_retry_post_error(t *testing.T) { + ts := githubApiMock([]*mockResponse{ + { + ExpectedUri: "/orgs/tada/repos", + ExpectedMethod: "POST", + ExpectedBody: []byte(`{"name":"radek-example-48","description":""} +`), + ResponseBody: `{ + "message": "internal server error" +}`, + StatusCode: 500, + }, + { + ExpectedUri: "/orgs/tada/repos", + ExpectedMethod: "POST", + ExpectedBody: []byte(`{"name":"radek-example-48","description":""} +`), + ResponseBody: `{ + "message": "internal server error" +}`, + StatusCode: 500, + }, + { + ExpectedUri: "/orgs/tada/repos", + ExpectedMethod: "POST", + ExpectedBody: []byte(`{"name":"radek-example-48","description":""} +`), + ResponseBody: `{ + "message": "internal server error" +}`, + StatusCode: 201, + }, + }) + defer ts.Close() + + httpClient := http.DefaultClient + httpClient.Transport = NewRetryTransport(http.DefaultTransport, WithMaxRetries(1)) + + client := github.NewClient(httpClient) + u, _ := url.Parse(ts.URL + "/") + client.BaseURL = u + + ctx := context.WithValue(context.Background(), ctxId, t.Name()) + _, _, err := client.Repositories.Create(ctx, "tada", &github.Repository{ + Name: github.String("radek-example-48"), + Description: github.String(""), + }) + if err == nil { + t.Fatal("Expected error not to be nil") + } + + ghErr, ok := err.(*github.ErrorResponse) + if !ok { + t.Fatalf("Expected github.ErrorResponse, got: %#v", err) + } + expectedMessage := "internal server error" + if ghErr.Message != expectedMessage { + t.Fatalf("Expected message %q, got: %q", expectedMessage, ghErr.Message) + } +} + +func TestRetryTransport_retry_post_success(t *testing.T) { + ts := githubApiMock([]*mockResponse{ + { + ExpectedUri: "/orgs/tada/repos", + ExpectedMethod: "POST", + ExpectedBody: []byte(`{"name":"radek-example-48","description":""} +`), + ResponseBody: `{ + "message": "internal server error" +}`, + StatusCode: 500, + }, + { + ExpectedUri: "/orgs/tada/repos", + ExpectedMethod: "POST", + ExpectedBody: []byte(`{"name":"radek-example-48","description":""} +`), + ResponseBody: `{ + "message": "internal server error" +}`, + StatusCode: 500, + }, + { + ExpectedUri: "/orgs/tada/repos", + ExpectedMethod: "POST", + ExpectedBody: []byte(`{"name":"radek-example-48","description":""} +`), + ResponseBody: `{ + "message": "Resource created" +}`, + StatusCode: 201, + }, + }) + defer ts.Close() + + httpClient := http.DefaultClient + httpClient.Transport = NewRetryTransport(http.DefaultTransport, WithMaxRetries(2), WithRetryDelay(time.Second)) + + client := github.NewClient(httpClient) + u, _ := url.Parse(ts.URL + "/") + client.BaseURL = u + + ctx := context.WithValue(context.Background(), ctxId, t.Name()) + _, _, err := client.Repositories.Create(ctx, "tada", &github.Repository{ + Name: github.String("radek-example-48"), + Description: github.String(""), + }) + if err != nil { + t.Fatalf("Expected error to be nil, got %v", err) + } + + ghErr, _ := err.(*github.ErrorResponse) + if ghErr != nil { + t.Fatalf("Expected successful github call, got: %q", ghErr.Message) + } } type mockResponse struct { diff --git a/github/util.go b/github/util.go index 4da019e74a..95cf5c7828 100644 --- a/github/util.go +++ b/github/util.go @@ -13,7 +13,9 @@ import ( "strings" "github.com/google/go-github/v57/github" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) const ( @@ -35,8 +37,52 @@ func caseInsensitive() schema.SchemaDiffSuppressFunc { } } -func validateValueFunc(values []string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (we []string, errors []error) { +// wrapErrors is provided to easily turn errors into diag.Diagnostics +// until we go through the provider and replace error usage +func wrapErrors(errs []error) diag.Diagnostics { + var diags diag.Diagnostics + + for _, err := range errs { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Error", + Detail: err.Error(), + }) + } + + return diags +} + +// toDiagFunc is a helper that operates on Hashicorp's helper/validation functions +// and converts them to the diag.Diagnostic format +// --> nolint: oldFunc needs to be schema.SchemaValidateFunc to keep compatibility with +// the old code until all uses of schema.SchemaValidateFunc are gone +func toDiagFunc(oldFunc schema.SchemaValidateFunc, keyName string) schema.SchemaValidateDiagFunc { //nolint:staticcheck + return func(i interface{}, path cty.Path) diag.Diagnostics { + warnings, errors := oldFunc(i, keyName) + var diags diag.Diagnostics + + for _, err := range errors { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + }) + } + + for _, warn := range warnings { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: warn, + }) + } + + return diags + } +} + +func validateValueFunc(values []string) schema.SchemaValidateDiagFunc { + return func(v interface{}, k cty.Path) diag.Diagnostics { + errs := make([]error, 0) value := v.(string) valid := false for _, role := range values { @@ -47,9 +93,9 @@ func validateValueFunc(values []string) schema.SchemaValidateFunc { } if !valid { - errors = append(errors, fmt.Errorf("%s is an invalid value for argument %s", value, k)) + errs = append(errs, fmt.Errorf("%s is an invalid value for argument %s", value, k)) } - return + return wrapErrors(errs) } } @@ -199,10 +245,11 @@ func getTeamSlug(teamIDString string, meta interface{}) (string, error) { // https://docs.github.com/en/actions/reference/encrypted-secrets#naming-your-secrets var secretNameRegexp = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -func validateSecretNameFunc(v interface{}, keyName string) (we []string, errs []error) { +func validateSecretNameFunc(v interface{}, path cty.Path) diag.Diagnostics { + errs := make([]error, 0) name, ok := v.(string) if !ok { - return nil, []error{fmt.Errorf("expected type of %s to be string", keyName)} + return wrapErrors([]error{fmt.Errorf("expected type of %s to be string", path)}) } if !secretNameRegexp.MatchString(name) { @@ -213,7 +260,7 @@ func validateSecretNameFunc(v interface{}, keyName string) (we []string, errs [] errs = append(errs, errors.New("secret names must not start with the GITHUB_ prefix")) } - return we, errs + return wrapErrors(errs) } // deleteResourceOn404AndSwallow304OtherwiseReturnError will log and delete resource if error is 404 which indicates resource (or any of its ancestors) diff --git a/github/util_test.go b/github/util_test.go index aafe7021d3..67bca149e4 100644 --- a/github/util_test.go +++ b/github/util_test.go @@ -3,6 +3,8 @@ package github import ( "testing" "unicode" + + "github.com/hashicorp/go-cty/cty" ) func TestAccValidateTeamIDFunc(t *testing.T) { @@ -58,9 +60,9 @@ func TestAccGithubUtilRole_validation(t *testing.T) { validationFunc := validateValueFunc([]string{"valid_one", "valid_two"}) for _, tc := range cases { - _, errors := validationFunc(tc.Value, "test_arg") + diags := validationFunc(tc.Value, cty.Path{cty.GetAttrStep{Name: "test_arg"}}) - if len(errors) != tc.ErrCount { + if len(diags) != tc.ErrCount { t.Fatalf("Expected 1 validation error") } } @@ -144,13 +146,13 @@ func TestAccGithubUtilValidateSecretName(t *testing.T) { for _, tc := range cases { var name interface{} = tc.Name - _, errors := validateSecretNameFunc(name, "") + diags := validateSecretNameFunc(name, cty.Path{cty.GetAttrStep{Name: ""}}) - if tc.Error != (len(errors) != 0) { + if tc.Error != (len(diags) != 0) { if tc.Error { t.Fatalf("expected error, got none (%s)", tc.Name) } else { - t.Fatalf("unexpected error(s): %s (%s)", errors, tc.Name) + t.Fatalf("unexpected error(s): %v (%s)", diags, tc.Name) } } } diff --git a/github/util_v4.go b/github/util_v4.go index 87a0e841b3..75b7cce625 100644 --- a/github/util_v4.go +++ b/github/util_v4.go @@ -1,7 +1,7 @@ package github import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) diff --git a/github/util_v4_branch_protection.go b/github/util_v4_branch_protection.go index f5a5705df4..eae3467cda 100644 --- a/github/util_v4_branch_protection.go +++ b/github/util_v4_branch_protection.go @@ -6,7 +6,7 @@ import ( "log" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/shurcooL/githubv4" ) @@ -144,10 +144,6 @@ func branchProtectionResourceData(d *schema.ResourceData, meta interface{}) (Bra data.AllowsForcePushes = v.(bool) } - if v, ok := d.GetOk(PROTECTION_BLOCKS_CREATIONS); ok { - data.BlocksCreations = v.(bool) - } - if v, ok := d.GetOk(PROTECTION_IS_ADMIN_ENFORCED); ok { data.IsAdminEnforced = v.(bool) } @@ -190,7 +186,7 @@ func branchProtectionResourceData(d *schema.ResourceData, meta interface{}) (Bra if v, ok := m[PROTECTION_RESTRICTS_REVIEW_DISMISSALS]; ok { data.RestrictsReviewDismissals = v.(bool) } - if v, ok := m[PROTECTION_RESTRICTS_REVIEW_DISMISSERS]; ok { + if v, ok := m[PROTECTION_REVIEW_DISMISSAL_ALLOWANCES]; ok { reviewDismissalActorIDs := make([]string, 0) vL := v.(*schema.Set).List() for _, v := range vL { @@ -211,7 +207,7 @@ func branchProtectionResourceData(d *schema.ResourceData, meta interface{}) (Bra data.BypassPullRequestActorIDs = bypassPullRequestActorIDs } } - if v, ok := m[PROTECTION_REQUIRES_LAST_PUSH_APPROVAL]; ok { + if v, ok := m[PROTECTION_REQUIRE_LAST_PUSH_APPROVAL]; ok { data.RequireLastPushApproval = v.(bool) } } @@ -239,14 +235,32 @@ func branchProtectionResourceData(d *schema.ResourceData, meta interface{}) (Bra } if v, ok := d.GetOk(PROTECTION_RESTRICTS_PUSHES); ok { - pushActorIDs := make([]string, 0) - vL := v.(*schema.Set).List() - for _, v := range vL { - pushActorIDs = append(pushActorIDs, v.(string)) + vL := v.([]interface{}) + if len(vL) > 1 { + return BranchProtectionResourceData{}, + fmt.Errorf("error multiple %s declarations", PROTECTION_RESTRICTS_PUSHES) } - if len(pushActorIDs) > 0 { - data.PushActorIDs = pushActorIDs + for _, v := range vL { + if v == nil { + break + } + data.RestrictsPushes = true + + m := v.(map[string]interface{}) + if v, ok := m[PROTECTION_BLOCKS_CREATIONS]; ok { + data.BlocksCreations = v.(bool) + } + if v, ok := m[PROTECTION_PUSH_ALLOWANCES]; ok { + pushActorIDs := make([]string, 0) + vL := v.(*schema.Set).List() + for _, v := range vL { + pushActorIDs = append(pushActorIDs, v.(string)) + } + if len(pushActorIDs) > 0 { + data.PushActorIDs = pushActorIDs + } + } } } @@ -283,7 +297,7 @@ func branchProtectionResourceDataActors(d *schema.ResourceData, meta interface{} } m := v.(map[string]interface{}) - if v, ok := m[PROTECTION_RESTRICTS_REVIEW_DISMISSERS]; ok { + if v, ok := m[PROTECTION_REVIEW_DISMISSAL_ALLOWANCES]; ok { reviewDismissalActorIDs := make([]string, 0) vL := v.(*schema.Set).List() for _, v := range vL { @@ -308,14 +322,32 @@ func branchProtectionResourceDataActors(d *schema.ResourceData, meta interface{} } if v, ok := d.GetOk(PROTECTION_RESTRICTS_PUSHES); ok { - pushActorIDs := make([]string, 0) - vL := v.(*schema.Set).List() - for _, v := range vL { - pushActorIDs = append(pushActorIDs, v.(string)) + vL := v.([]interface{}) + if len(vL) > 1 { + return BranchProtectionResourceData{}, + fmt.Errorf("error multiple %s declarations", PROTECTION_RESTRICTS_PUSHES) } - if len(pushActorIDs) > 0 { - data.PushActorIDs = pushActorIDs + for _, v := range vL { + if v == nil { + break + } + data.RestrictsPushes = true + + m := v.(map[string]interface{}) + if v, ok := m[PROTECTION_BLOCKS_CREATIONS]; ok { + data.BlocksCreations = v.(bool) + } + if v, ok := m[PROTECTION_PUSH_ALLOWANCES]; ok { + pushActorIDs := make([]string, 0) + vL := v.(*schema.Set).List() + for _, v := range vL { + pushActorIDs = append(pushActorIDs, v.(string)) + } + if len(pushActorIDs) > 0 { + data.PushActorIDs = pushActorIDs + } + } } } @@ -474,9 +506,9 @@ func setApprovingReviews(protection BranchProtectionRule, data BranchProtectionR PROTECTION_REQUIRES_CODE_OWNER_REVIEWS: protection.RequiresCodeOwnerReviews, PROTECTION_DISMISSES_STALE_REVIEWS: protection.DismissesStaleReviews, PROTECTION_RESTRICTS_REVIEW_DISMISSALS: protection.RestrictsReviewDismissals, - PROTECTION_RESTRICTS_REVIEW_DISMISSERS: dismissalActors, + PROTECTION_REVIEW_DISMISSAL_ALLOWANCES: dismissalActors, PROTECTION_PULL_REQUESTS_BYPASSERS: bypassPullRequestActors, - PROTECTION_REQUIRES_LAST_PUSH_APPROVAL: protection.RequireLastPushApproval, + PROTECTION_REQUIRE_LAST_PUSH_APPROVAL: protection.RequireLastPushApproval, }, } @@ -498,14 +530,22 @@ func setStatusChecks(protection BranchProtectionRule) interface{} { return statusChecks } -func setPushes(protection BranchProtectionRule, data BranchProtectionResourceData, meta interface{}) []string { +func setPushes(protection BranchProtectionRule, data BranchProtectionResourceData, meta interface{}) interface{} { if !protection.RestrictsPushes { return nil } + pushAllowances := protection.PushAllowances.Nodes pushActors := setPushActorIDs(pushAllowances, data, meta) - return pushActors + restrictsPushes := []interface{}{ + map[string]interface{}{ + PROTECTION_BLOCKS_CREATIONS: protection.BlocksCreations, + PROTECTION_PUSH_ALLOWANCES: pushActors, + }, + } + + return restrictsPushes } func setForcePushBypassers(protection BranchProtectionRule, data BranchProtectionResourceData, meta interface{}) []string { diff --git a/github/util_v4_consts.go b/github/util_v4_consts.go index 0d073de51d..43dcf00e8d 100644 --- a/github/util_v4_consts.go +++ b/github/util_v4_consts.go @@ -5,24 +5,25 @@ const ( PROTECTION_ALLOWS_FORCE_PUSHES = "allows_force_pushes" PROTECTION_BLOCKS_CREATIONS = "blocks_creations" PROTECTION_DISMISSES_STALE_REVIEWS = "dismiss_stale_reviews" + PROTECTION_FORCE_PUSHES_BYPASSERS = "force_push_bypassers" PROTECTION_IS_ADMIN_ENFORCED = "enforce_admins" + PROTECTION_LOCK_BRANCH = "lock_branch" PROTECTION_PATTERN = "pattern" + PROTECTION_PULL_REQUESTS_BYPASSERS = "pull_request_bypassers" + PROTECTION_PUSH_ALLOWANCES = "push_allowances" PROTECTION_REQUIRED_APPROVING_REVIEW_COUNT = "required_approving_review_count" PROTECTION_REQUIRED_STATUS_CHECK_CONTEXTS = "contexts" PROTECTION_REQUIRES_APPROVING_REVIEWS = "required_pull_request_reviews" PROTECTION_REQUIRES_CODE_OWNER_REVIEWS = "require_code_owner_reviews" PROTECTION_REQUIRES_COMMIT_SIGNATURES = "require_signed_commits" - PROTECTION_REQUIRES_LINEAR_HISTORY = "required_linear_history" PROTECTION_REQUIRES_CONVERSATION_RESOLUTION = "require_conversation_resolution" + PROTECTION_REQUIRES_LINEAR_HISTORY = "required_linear_history" PROTECTION_REQUIRES_STATUS_CHECKS = "required_status_checks" PROTECTION_REQUIRES_STRICT_STATUS_CHECKS = "strict" - PROTECTION_RESTRICTS_PUSHES = "push_restrictions" + PROTECTION_REQUIRE_LAST_PUSH_APPROVAL = "require_last_push_approval" + PROTECTION_RESTRICTS_PUSHES = "restrict_pushes" PROTECTION_RESTRICTS_REVIEW_DISMISSALS = "restrict_dismissals" - PROTECTION_RESTRICTS_REVIEW_DISMISSERS = "dismissal_restrictions" - PROTECTION_FORCE_PUSHES_BYPASSERS = "force_push_bypassers" - PROTECTION_PULL_REQUESTS_BYPASSERS = "pull_request_bypassers" - PROTECTION_LOCK_BRANCH = "lock_branch" - PROTECTION_REQUIRES_LAST_PUSH_APPROVAL = "require_last_push_approval" + PROTECTION_REVIEW_DISMISSAL_ALLOWANCES = "dismissal_restrictions" REPOSITORY_ID = "repository_id" ) diff --git a/go.mod b/go.mod index fa189f6e17..d78a2a8e0c 100644 --- a/go.mod +++ b/go.mod @@ -1,220 +1,235 @@ -module github.com/integrations/terraform-provider-github/v5 +module github.com/integrations/terraform-provider-github/v6 -go 1.17 +go 1.21 + +toolchain go1.22.0 require ( github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint v1.41.1 + github.com/golangci/golangci-lint v1.57.1 github.com/google/go-github/v57 v57.0.0 - github.com/google/uuid v1.5.0 - github.com/hashicorp/terraform-plugin-sdk v1.17.2 + github.com/google/uuid v1.6.0 + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 github.com/shurcooL/githubv4 v0.0.0-20221126192849-0b5c4c7994eb - github.com/stretchr/testify v1.8.4 - golang.org/x/crypto v0.18.0 - golang.org/x/oauth2 v0.16.0 + github.com/stretchr/testify v1.9.0 + golang.org/x/crypto v0.21.0 + golang.org/x/oauth2 v0.18.0 gopkg.in/square/go-jose.v2 v2.6.0 ) require ( - 4d63.com/gochecknoglobals v0.1.0 // indirect - cloud.google.com/go v0.110.2 // indirect - cloud.google.com/go/compute v1.20.1 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect - cloud.google.com/go/storage v1.29.0 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gochecknoglobals v0.2.1 // indirect + github.com/4meepo/tagalign v1.3.3 // indirect + github.com/Abirdcfly/dupword v0.0.14 // indirect + github.com/Antonboom/errname v0.1.12 // indirect + github.com/Antonboom/nilnil v0.1.7 // indirect + github.com/Antonboom/testifylint v1.2.0 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/OpenPeeDeeP/depguard v1.1.1 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/agext/levenshtein v1.2.2 // indirect + github.com/alecthomas/go-check-sumtype v0.1.4 // indirect + github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect - github.com/apparentlymart/go-textseg/v12 v12.0.0 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/armon/go-radix v1.0.0 // indirect - github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect - github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect - github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/bkielbasa/cyclop v1.2.0 // indirect - github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v4 v4.2.1 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.3.0 // indirect + github.com/butuzov/mirror v1.1.0 // indirect + github.com/catenacyber/perfsprint v0.7.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/charithe/durationcheck v0.0.9 // indirect - github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect - github.com/daixiang0/gci v0.2.9 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/chavacava/garif v0.1.0 // indirect + github.com/ckaznocha/intrange v0.1.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/denis-tingajkin/go-header v0.4.2 // indirect - github.com/esimonov/ifshort v1.0.4 // indirect - github.com/ettle/strcase v0.1.1 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/ettle/strcase v0.2.0 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/go-critic/go-critic v0.6.5 // indirect - github.com/go-toolsmith/astcast v1.0.0 // indirect - github.com/go-toolsmith/astcopy v1.0.2 // indirect - github.com/go-toolsmith/astequal v1.0.3 // indirect - github.com/go-toolsmith/astfmt v1.0.0 // indirect - github.com/go-toolsmith/astp v1.0.0 // indirect - github.com/go-toolsmith/strparse v1.0.0 // indirect - github.com/go-toolsmith/typep v1.0.2 // indirect - github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/ghostiam/protogetter v0.3.5 // indirect + github.com/go-critic/go-critic v0.11.2 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect - github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect - github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.3.5 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect - github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect + github.com/golangci/misspell v0.4.1 // indirect + github.com/golangci/plugin-module-register v0.1.1 // indirect + github.com/golangci/revgrep v0.5.2 // indirect + github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/gostaticanalysis/testutil v0.4.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-getter v1.7.0 // indirect - github.com/hashicorp/go-hclog v1.2.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.3.0 // indirect - github.com/hashicorp/go-safetemp v1.0.0 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hc-install v0.6.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/hcl/v2 v2.8.2 // indirect + github.com/hashicorp/hcl/v2 v2.19.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 // indirect - github.com/hashicorp/terraform-exec v0.13.3 // indirect - github.com/hashicorp/terraform-json v0.10.0 // indirect - github.com/hashicorp/terraform-plugin-test/v2 v2.2.1 // indirect - github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jgautheron/goconst v1.5.1 // indirect + github.com/hashicorp/terraform-exec v0.20.0 // indirect + github.com/hashicorp/terraform-json v0.21.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.22.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.3 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jgautheron/goconst v1.7.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jjti/go-spancheck v0.5.3 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.2 // indirect - github.com/kisielk/gotool v1.0.0 // indirect - github.com/klauspost/compress v1.15.11 // indirect + github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect + github.com/kisielk/errcheck v1.7.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.4 // indirect github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.6 // indirect - github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/kunwardeep/paralleltest v1.0.10 // indirect + github.com/kyoh86/exportloopref v0.1.11 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect - github.com/ldez/tagliatelle v0.3.1 // indirect + github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.1 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect + github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/maratori/testpackage v1.1.0 // indirect - github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect - github.com/mgechev/revive v1.2.4 // indirect - github.com/mitchellh/cli v1.1.2 // indirect - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mgechev/revive v1.3.7 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/moricho/tparallel v0.2.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moricho/tparallel v0.3.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.8.3 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.16.1 // indirect github.com/oklog/run v1.0.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.0.5 // indirect - github.com/posener/complete v1.2.3 // indirect + github.com/polyfloyd/go-errorlint v1.4.8 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/quasilyte/go-ruleguard v0.3.18 // indirect - github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect - github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/quasilyte/go-ruleguard v0.4.2 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/ryancurrah/gomodguard v1.2.4 // indirect - github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.13.1 // indirect + github.com/ryancurrah/gomodguard v1.3.1 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.25.0 // indirect + github.com/securego/gosec/v2 v2.19.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/shurcooL/graphql v0.0.0-20220606043923-3cf50f8a0a29 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/sonatard/noctx v0.0.1 // indirect - github.com/sourcegraph/go-diff v0.6.1 // indirect - github.com/spf13/afero v1.4.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sivchari/tenv v1.7.1 // indirect + github.com/sonatard/noctx v0.0.2 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.0 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.7.1 // indirect + github.com/spf13/viper v1.12.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.1 // indirect - github.com/tdakkota/asciicheck v0.1.1 // indirect - github.com/tetafro/godot v1.4.11 // indirect - github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.3.0 // indirect + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect + github.com/tdakkota/asciicheck v0.2.0 // indirect + github.com/tetafro/godot v1.4.16 // indirect + github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect + github.com/timonwong/loggercheck v0.9.4 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ulikunitz/xz v0.5.10 // indirect - github.com/ultraware/funlen v0.0.3 // indirect - github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.6 // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.1 // indirect + github.com/ultraware/funlen v0.1.0 // indirect + github.com/ultraware/whitespace v0.1.0 // indirect + github.com/uudashr/gocognit v1.1.2 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect - github.com/zclconf/go-cty v1.8.2 // indirect - github.com/zclconf/go-cty-yaml v1.0.2 // indirect - go.opencensus.io v0.24.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.16.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + github.com/zclconf/go-cty v1.14.2 // indirect + gitlab.com/bosi/decorder v0.4.1 // indirect + go-simpler.org/musttag v0.9.0 // indirect + go-simpler.org/sloglint v0.5.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/automaxprocs v1.5.3 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect + golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.126.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.56.3 // indirect - google.golang.org/protobuf v1.31.0 // indirect + golang.org/x/tools v0.19.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/grpc v1.61.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.3.3 // indirect - mvdan.cc/gofumpt v0.4.0 // indirect - mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect - mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect + honnef.co/go/tools v0.4.7 // indirect + mvdan.cc/gofumpt v0.6.0 // indirect + mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect ) diff --git a/go.sum b/go.sum index b3dd3f3ba0..8327b8c11f 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,12 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= -4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -16,893 +15,213 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= -cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= -cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= +github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= +github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= +github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Antonboom/testifylint v1.2.0 h1:015bxD8zc5iY8QwTp4+RG9I4kIbqwvGX9TrBbb7jGdM= +github.com/Antonboom/testifylint v1.2.0/go.mod h1:rkmEqjqVnHDRNsinyN6fPSLnoajzFwsCcguJgwADBkw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= -github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= +github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= -github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= -github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= +github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= +github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= +github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= +github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= +github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= -github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 h1:E7LT642ysztPWE0dfz43cWOvMiF42DyTRC+eZIaO4yI= -github.com/chavacava/garif v0.0.0-20220630083739-93517212f375/go.mod h1:4m1Rv7xfuwWPNKXlThldNuJvutYM6J95wNuuVmn55To= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= +github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= -github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= +github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= -github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= -github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo= -github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.1.0 h1:4pl5BV4o7ZG/lterP4S6WzJ6xr49Ba5ET9ygheTYahk= -github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.3.0 h1:8WKMtJR2j8RntEXR/uvTKagfEt4GYlwQ7mntE4+0GWc= -github.com/go-git/go-git/v5 v5.3.0/go.mod h1:xdX4bWJ48aOrdhnl2XqHYstHbbp6+LFS4r4X+lNVprw= +github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbxOK4Ug= +github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= +github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjBSxDnM= +github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astcopy v1.0.2 h1:YnWf5Rnh1hUudj11kei53kI57quN/VH6Hp1n+erozn0= -github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o= -github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM= -github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -915,9 +234,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -934,41 +250,25 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= -github.com/golangci/golangci-lint v1.41.1 h1:KH28pTSqRu6DTXIAANl1sPXNCmqg4VEH21z6G9Wj4SM= -github.com/golangci/golangci-lint v1.41.1/go.mod h1:LPtcY3aAAU8wydHrKpnanx9Og8K/cblZSyGmI5CJZUk= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.57.1 h1:cqhpzkzjDwdN12rfMf1SUyyKyp88a1SltNqEYGS0nJw= +github.com/golangci/golangci-lint v1.57.1/go.mod h1:zLcHhz3NHc88T5zV2j75lyc0zH3LdOPOybblYa4p0oI= +github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= +github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU= +github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -977,13 +277,10 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v57 v57.0.0 h1:L+Y3UPTY8ALM8x+TV0lg+IEBI+upibemtBD8Q9u7zHs= @@ -991,89 +288,29 @@ github.com/google/go-github/v57 v57.0.0/go.mod h1:s0omdnye0hvK/ecLvpsGfJMiRt85Pi github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= -github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= -github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= @@ -1081,372 +318,206 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.5.3/go.mod h1:BrrV/1clo8cCYu6mxvboYg+KutTiFnXjMEgDD8+i7ZI= -github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= -github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= -github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= -github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= -github.com/hashicorp/terraform-exec v0.13.3 h1:R6L2mNpDGSEqtLrSONN8Xth0xYwNrnEVzDz6LF/oJPk= -github.com/hashicorp/terraform-exec v0.13.3/go.mod h1:SSg6lbUsVB3DmFyCPjBPklqf6EYGX0TlQ6QTxOlikDU= -github.com/hashicorp/terraform-json v0.10.0 h1:9syPD/Y5t+3uFjG8AiWVPu1bklJD8QB8iTCaJASc8oQ= -github.com/hashicorp/terraform-json v0.10.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-plugin-sdk v1.17.2 h1:V7DUR3yBWFrVB9z3ddpY7kiYVSsq4NYR67NiTs93NQo= -github.com/hashicorp/terraform-plugin-sdk v1.17.2/go.mod h1:wkvldbraEMkz23NxkkAsFS88A1R9eUiooiaUZyS6TLw= -github.com/hashicorp/terraform-plugin-test/v2 v2.2.1 h1:d3Rzmi5bnRzcAZon91FY4TDCMUYdU8c5vpPpf2Tz+c8= -github.com/hashicorp/terraform-plugin-test/v2 v2.2.1/go.mod h1:eZ9JL3O69Cb71Skn6OhHyj17sLmHRb+H6VrDcJjKrYU= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-plugin-go v0.22.0 h1:1OS1Jk5mO0f5hrziWJGXXIxBrMe2j/B8E+DVGw43Xmc= +github.com/hashicorp/terraform-plugin-go v0.22.0/go.mod h1:mPULV91VKss7sik6KFEcEu7HuTogMLLO/EvWCuFkRVE= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A= +github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= +github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.0/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= +github.com/jgautheron/goconst v1.7.0 h1:cEqH+YBKLsECnRSd4F4TK5ri8t/aXtt/qoL0Ft252B0= +github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= +github.com/jjti/go-spancheck v0.5.3 h1:vfq4s2IB8T3HvbpiwDTYgVPj1Ze/ZSXrTtaZRTc7CuM= +github.com/jjti/go-spancheck v0.5.3/go.mod h1:eQdOX1k3T+nAKvZDyLC3Eby0La4dZ+I19iOl5NzSPFE= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c= -github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/karamaru-alpha/copyloopvar v1.0.8 h1:gieLARwuByhEMxRwM3GRS/juJqFbLraftXIKDDNJ50Q= +github.com/karamaru-alpha/copyloopvar v1.0.8/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= -github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= +github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= -github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= -github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.7/go.mod h1:vuE5ox/4L/HDd63MCcCk3H6wTLQ6XXezRphJ8cJJOxY= -github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI= -github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= +github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM= -github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nunnatsa/ginkgolinter v0.16.1 h1:uDIPSxgVHZ7PgbJElRDGzymkXH+JaF7mjew+Thjnt6Q= +github.com/nunnatsa/ginkgolinter v0.16.1/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= -github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= -github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= +github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/polyfloyd/go-errorlint v1.0.5 h1:AHB5JRCjlmelh9RrLxT9sgzpalIwwq4hqE8EkwIwKdY= -github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/polyfloyd/go-errorlint v1.4.8 h1:jiEjKDH33ouFktyez7sckv6pHWif9B7SuS8cutDXFHw= +github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -1457,218 +528,144 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= -github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U= -github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= +github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/go-dbus v0.0.0-20121104212943-b7232d34b1d5/go.mod h1:+u151txRmLpwxBmpYn9z3d1sdJdjRPQpsXuYeY9jNls= -github.com/remyoudompheng/go-liblzma v0.0.0-20190506200333-81bf2d431b96/go.mod h1:90HvCY7+oHHUKkbeMCiHt1WuFR2/hPJ9QrljDG+v6ls= -github.com/remyoudompheng/go-misc v0.0.0-20190427085024-2d6ac652a50e/go.mod h1:80FQABjoFzZ2M5uEa6FUaJYEmqU2UOKojlFVak1UAwI= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/ryancurrah/gomodguard v1.2.2/go.mod h1:tpI+C/nzvfUR3bF28b5QHpTn/jM/zlGniI++6ZlIWeE= -github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= -github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= -github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= -github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.8.0/go.mod h1:hJZ6NT5TqoY+jmOsaxAV4cXoEdrMRLVaNPnSpUCvCZs= -github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM= -github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/ryancurrah/gomodguard v1.3.1 h1:fH+fUg+ngsQO0ruZXXHnA/2aNllWA1whly4a6UvyzGE= +github.com/ryancurrah/gomodguard v1.3.1/go.mod h1:DGFHzEhi6iJ0oIDfMuo3TgrS+L9gZvrEfmjjuelnRU0= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.25.0 h1:IK8SI2QyFzy/2OD2PYnhy84dpfNo9qADrRt6LH8vSzU= +github.com/sashamelentyev/usestdlibvars v1.25.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.19.0 h1:gl5xMkOI0/E6Hxx0XCY2XujA3V7SNSefA8sC+3f1gnk= +github.com/securego/gosec/v2 v2.19.0/go.mod h1:hOkDcHz9J/XIgIlPDXalxjeVYsHxoWUc5zJSHxcB8YM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.5/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= github.com/shurcooL/githubv4 v0.0.0-20221126192849-0b5c4c7994eb h1:foJysa74+t41fG7adnt+TkfcNxQUWid8R/HlXe+Mmbw= github.com/shurcooL/githubv4 v0.0.0-20221126192849-0b5c4c7994eb/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/graphql v0.0.0-20220606043923-3cf50f8a0a29 h1:B1PEwpArrNp4dkQrfxh/abbBAOZBVp0ds+fBEOUOqOc= github.com/shurcooL/graphql v0.0.0-20220606043923-3cf50f8a0a29/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.4.1 h1:asw9sl74539yqavKaglDM5hFpdJVK0Y5Dr/JOgQ89nQ= -github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= -github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= -github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.7/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= -github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= -github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= +github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs= +github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= -github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= -github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= -github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= +github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw= +github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= +github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1676,103 +673,59 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.8.2 h1:u+xZfBKgpycDnTNjPhGiTEYZS5qS/Sb5MqSfm7vzcjg= -github.com/zclconf/go-cty v1.8.2/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= -github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= +go-simpler.org/assert v0.7.0 h1:OzWWZqfNxt8cLS+MlUp6Tgk1HjPkmgdKBq9qvy8lZsA= +go-simpler.org/assert v0.7.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.9.0 h1:Dzt6/tyP9ONr5g9h9P3cnYWCxeBFRkd0uJL/w+1Mxos= +go-simpler.org/musttag v0.9.0/go.mod h1:gA9nThnalvNSKpEoyp3Ko4/vCX2xTpqKoUtNqXOnVR4= +go-simpler.org/sloglint v0.5.0 h1:2YCcd+YMuYpuqthCgubcF5lBSjb6berc5VMOYUHKrpY= +go-simpler.org/sloglint v0.5.0/go.mod h1:EUknX5s8iXqf18KQxKnaBHUPVriiPnOrPjjJcsaTcSQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= -golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1783,8 +736,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1793,27 +744,19 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1825,10 +768,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1836,94 +775,37 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1931,41 +813,23 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1978,137 +842,72 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -2118,24 +917,16 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2148,88 +939,39 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -2242,65 +984,15 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= -google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= -google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2308,7 +1000,6 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2322,186 +1013,30 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2514,48 +1049,29 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2565,56 +1081,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= -honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= -mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= -mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= +honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs= +honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= +mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/main.go b/main.go index 83e613697d..3184077fcf 100644 --- a/main.go +++ b/main.go @@ -1,8 +1,8 @@ package main import ( - "github.com/hashicorp/terraform-plugin-sdk/plugin" - "github.com/integrations/terraform-provider-github/v5/github" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + "github.com/integrations/terraform-provider-github/v6/github" ) func main() { diff --git a/vendor/4d63.com/gocheckcompilerdirectives/LICENSE b/vendor/4d63.com/gocheckcompilerdirectives/LICENSE new file mode 100644 index 0000000000..3f12625b06 --- /dev/null +++ b/vendor/4d63.com/gocheckcompilerdirectives/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Leigh McCulloch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go b/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go new file mode 100644 index 0000000000..19948c4547 --- /dev/null +++ b/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go @@ -0,0 +1,105 @@ +package checkcompilerdirectives + +import ( + "strings" + + "golang.org/x/tools/go/analysis" +) + +func Analyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "gocheckcompilerdirectives", + Doc: "Checks that go compiler directive comments (//go:) are valid.", + Run: run, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + for _, group := range file.Comments { + for _, comment := range group.List { + text := comment.Text + if !strings.HasPrefix(text, "//") { + continue + } + start := 2 + spaces := 0 + for _, c := range text[start:] { + if c == ' ' { + spaces++ + continue + } + break + } + start += spaces + if !strings.HasPrefix(text[start:], "go:") { + continue + } + start += 3 + end := strings.Index(text[start:], " ") + if end == -1 { + continue + } + directive := text[start : start+end] + if len(directive) == 0 { + continue + } + prefix := text[:start+end] + // Leading whitespace will cause the go directive to be ignored + // by the compiler with no error, causing it not to work. This + // is an easy mistake. + if spaces > 0 { + pass.ReportRangef(comment, "compiler directive contains space: %s", prefix) + } + // If the directive is unknown it will be ignored by the + // compiler with no error. This is an easy mistake to make, + // especially if you typo a directive. + if !isKnown(directive) { + pass.ReportRangef(comment, "compiler directive unrecognized: %s", prefix) + } + } + } + } + return nil, nil +} + +func isKnown(directive string) bool { + for _, k := range known { + if directive == k { + return true + } + } + return false +} + +var known = []string{ + // Found by running the following command on the source of go. + // git grep -o -E -h '//go:[a-z_]+' -- ':!**/*_test.go' ':!test/' ':!**/testdata/**' | sort -u + "binary", + "build", + "buildsomethingelse", + "cgo_dynamic_linker", + "cgo_export_dynamic", + "cgo_export_static", + "cgo_import_dynamic", + "cgo_import_static", + "cgo_ldflag", + "cgo_unsafe_args", + "embed", + "generate", + "linkname", + "name", + "nocheckptr", + "noescape", + "noinline", + "nointerface", + "norace", + "nosplit", + "notinheap", + "nowritebarrier", + "nowritebarrierrec", + "systemstack", + "uintptrescapes", + "uintptrkeepalive", + "yeswritebarrierrec", +} diff --git a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go index 9ae889d4ba..edf9193ecb 100644 --- a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go +++ b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go @@ -5,6 +5,7 @@ import ( "fmt" "go/ast" "go/token" + "go/types" "strings" "golang.org/x/tools/go/analysis" @@ -48,12 +49,12 @@ func flags() flag.FlagSet { return *flags } -func isAllowed(cm ast.CommentMap, v ast.Node) bool { +func isAllowed(cm ast.CommentMap, v ast.Node, ti *types.Info) bool { switch i := v.(type) { case *ast.GenDecl: return hasEmbedComment(cm, i) case *ast.Ident: - return i.Name == "_" || i.Name == "version" || looksLikeError(i) || identHasEmbedComment(cm, i) + return i.Name == "_" || i.Name == "version" || isError(i, ti) || identHasEmbedComment(cm, i) case *ast.CallExpr: if expr, ok := i.Fun.(*ast.SelectorExpr); ok { return isAllowedSelectorExpression(expr) @@ -86,10 +87,14 @@ func isAllowedSelectorExpression(v *ast.SelectorExpr) bool { return false } +// isError reports whether the AST identifier looks like +// an error and implements the error interface. +func isError(i *ast.Ident, ti *types.Info) bool { + return looksLikeError(i) && implementsError(i, ti) +} + // looksLikeError returns true if the AST identifier starts // with 'err' or 'Err', or false otherwise. -// -// TODO: https://github.com/leighmcculloch/gochecknoglobals/issues/5 func looksLikeError(i *ast.Ident) bool { prefix := "err" if i.IsExported() { @@ -98,6 +103,14 @@ func looksLikeError(i *ast.Ident) bool { return strings.HasPrefix(i.Name, prefix) } +// implementsError reports whether the AST identifier +// implements the error interface. +func implementsError(i *ast.Ident, ti *types.Info) bool { + t := ti.TypeOf(i) + et := types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + return types.Implements(t, et) +} + func identHasEmbedComment(cm ast.CommentMap, i *ast.Ident) bool { if i.Obj == nil { return false @@ -146,7 +159,7 @@ func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { if genDecl.Tok != token.VAR { continue } - if isAllowed(fileCommentMap, genDecl) { + if isAllowed(fileCommentMap, genDecl, pass.TypesInfo) { continue } for _, spec := range genDecl.Specs { @@ -154,7 +167,7 @@ func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { onlyAllowedValues := false for _, vn := range valueSpec.Values { - if isAllowed(fileCommentMap, vn) { + if isAllowed(fileCommentMap, vn, pass.TypesInfo) { onlyAllowedValues = true continue } @@ -168,7 +181,7 @@ func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { } for _, vn := range valueSpec.Names { - if isAllowed(fileCommentMap, vn) { + if isAllowed(fileCommentMap, vn, pass.TypesInfo) { continue } diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/compute/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/compute/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go deleted file mode 100644 index e939b9f5e0..0000000000 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Version is the current tagged release of the library. -const Version = "1.20.1" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md deleted file mode 100644 index 06b957349a..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ /dev/null @@ -1,19 +0,0 @@ -# Changes - -## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) - - -### Bug Fixes - -* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) - -## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) - - -### Bug Fixes - -* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) - -## [0.1.0] (2022-10-26) - -Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md deleted file mode 100644 index f940fb2c85..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Compute API - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) - -This is a utility library for communicating with Google Cloud metadata service -on Google Cloud. - -## Install - -```bash -go get cloud.google.com/go/compute/metadata -``` - -## Go Version Support - -See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) -section in the root directory's README. - -## Contributing - -Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. See -[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index c17faa142a..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://cloud.google.com/compute/docs/metadata/overview. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var defaultClient = &Client{hc: newDefaultHTTPClient()} - -func newDefaultHTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - IdleConnTimeout: 60 * time.Second, - }, - Timeout: 5 * time.Second, - } -} - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on the default client. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. -// Returns the client that uses the specified http.Client for HTTP requests. -// If nil is specified, returns the default client. -func NewClient(c *http.Client) *Client { - if c == nil { - return defaultClient - } - - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - suffix = strings.TrimLeft(suffix, "/") - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return "", "", err - } - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - var res *http.Response - var reqErr error - retryer := newRetryer() - for { - res, reqErr = c.hc.Do(req) - var code int - if res != nil { - code = res.StatusCode - } - if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { - if err := sleep(ctx, delay); err != nil { - return "", "", err - } - continue - } - break - } - if reqErr != nil { - return "", "", reqErr - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Email(serviceAccount string) (string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go deleted file mode 100644 index 0f18f3cda1..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "context" - "io" - "math/rand" - "net/http" - "time" -) - -const ( - maxRetryAttempts = 5 -) - -var ( - syscallRetryable = func(err error) bool { return false } -) - -// defaultBackoff is basically equivalent to gax.Backoff without the need for -// the dependency. -type defaultBackoff struct { - max time.Duration - mul float64 - cur time.Duration -} - -func (b *defaultBackoff) Pause() time.Duration { - d := time.Duration(1 + rand.Int63n(int64(b.cur))) - b.cur = time.Duration(float64(b.cur) * b.mul) - if b.cur > b.max { - b.cur = b.max - } - return d -} - -// sleep is the equivalent of gax.Sleep without the need for the dependency. -func sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -func newRetryer() *metadataRetryer { - return &metadataRetryer{bo: &defaultBackoff{ - cur: 100 * time.Millisecond, - max: 30 * time.Second, - mul: 2, - }} -} - -type backoff interface { - Pause() time.Duration -} - -type metadataRetryer struct { - bo backoff - attempts int -} - -func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { - if status == http.StatusOK { - return 0, false - } - retryOk := shouldRetry(status, err) - if !retryOk { - return 0, false - } - if r.attempts == maxRetryAttempts { - return 0, false - } - r.attempts++ - return r.bo.Pause(), true -} - -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - // Transient network errors should be retried. - if syscallRetryable(err) { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true - } - } - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) - } - return false -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go deleted file mode 100644 index bb412f8917..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package metadata - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go deleted file mode 100644 index 4cef485008..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package metadata - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md deleted file mode 100644 index 40ae15de52..0000000000 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ /dev/null @@ -1,97 +0,0 @@ -# Changes - -## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) - - -### Features - -* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) - -## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) - - -### Features - -* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb)) - -## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16) - - -### Features - -* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8)) - -## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04) - - -### Features - -* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) - -## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15) - - -### Features - -* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca)) - -## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) - - -### Features - -* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) - -## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) - - -### Features - -* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) - -## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) - - -### Features - -* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) - -## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) - - -### Features - -* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) - -## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) - - -### Features - -* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) - -## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) - - -### Features - -* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) - -## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) - - -### Features - -* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) - -### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) - - -### Bug Fixes - -* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) - -## v0.1.0 - -This is the first tag to carve out iam as its own module. See -[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository). diff --git a/vendor/cloud.google.com/go/iam/LICENSE b/vendor/cloud.google.com/go/iam/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/iam/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/iam/README.md b/vendor/cloud.google.com/go/iam/README.md deleted file mode 100644 index 0072cc9e29..0000000000 --- a/vendor/cloud.google.com/go/iam/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# IAM API - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/iam.svg)](https://pkg.go.dev/cloud.google.com/go/iam) - -Go Client Library for IAM API. - -## Install - -```bash -go get cloud.google.com/go/iam -``` - -## Stability - -The stability of this module is indicated by SemVer. - -However, a `v1+` module may have breaking changes in two scenarios: - -* Packages with `alpha` or `beta` in the import path -* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature). - -## Go Version Support - -See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) -section in the root directory's README. - -## Authorization - -See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization) -section in the root directory's README. - -## Contributing - -Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. See -[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go deleted file mode 100644 index 21079f65c3..0000000000 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ /dev/null @@ -1,672 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/iam/v1/iam_policy.proto - -package iampb - -import ( - context "context" - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // REQUIRED: The resource for which the policy is being specified. - // See the operation documentation for the appropriate value for this field. - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // REQUIRED: The complete policy to be applied to the `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as Projects) - // might reject them. - Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` - // OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, the - // following default mask is used: - // - // `paths: "bindings, etag"` - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *SetIamPolicyRequest) Reset() { - *x = SetIamPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetIamPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetIamPolicyRequest) ProtoMessage() {} - -func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead. -func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} -} - -func (x *SetIamPolicyRequest) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *SetIamPolicyRequest) GetPolicy() *Policy { - if x != nil { - return x.Policy - } - return nil -} - -func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Request message for `GetIamPolicy` method. -type GetIamPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // REQUIRED: The resource for which the policy is being requested. - // See the operation documentation for the appropriate value for this field. - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // OPTIONAL: A `GetPolicyOptions` object for specifying options to - // `GetIamPolicy`. - Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *GetIamPolicyRequest) Reset() { - *x = GetIamPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetIamPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetIamPolicyRequest) ProtoMessage() {} - -func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead. -func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} -} - -func (x *GetIamPolicyRequest) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { - if x != nil { - return x.Options - } - return nil -} - -// Request message for `TestIamPermissions` method. -type TestIamPermissionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // REQUIRED: The resource for which the policy detail is being requested. - // See the operation documentation for the appropriate value for this field. - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // The set of permissions to check for the `resource`. Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For more - // information see - // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` -} - -func (x *TestIamPermissionsRequest) Reset() { - *x = TestIamPermissionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TestIamPermissionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TestIamPermissionsRequest) ProtoMessage() {} - -func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead. -func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} -} - -func (x *TestIamPermissionsRequest) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *TestIamPermissionsRequest) GetPermissions() []string { - if x != nil { - return x.Permissions - } - return nil -} - -// Response message for `TestIamPermissions` method. -type TestIamPermissionsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A subset of `TestPermissionsRequest.permissions` that the caller is - // allowed. - Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` -} - -func (x *TestIamPermissionsResponse) Reset() { - *x = TestIamPermissionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TestIamPermissionsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TestIamPermissionsResponse) ProtoMessage() {} - -func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead. -func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} -} - -func (x *TestIamPermissionsResponse) GetPermissions() []string { - if x != nil { - return x.Permissions - } - return nil -} - -var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor - -var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, - 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, - 0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, - 0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, - 0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a, - 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, - 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, - 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, - 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, - 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, - 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, - 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once - file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc -) - -func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { - file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { - file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) - }) - return file_google_iam_v1_iam_policy_proto_rawDescData -} - -var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ - (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest - (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest - (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest - (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse - (*Policy)(nil), // 4: google.iam.v1.Policy - (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask - (*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions -} -var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ - 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy - 5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask - 6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions - 0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy - 4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy - 3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 6, // [6:9] is the sub-list for method output_type - 3, // [3:6] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_iam_policy_proto_init() } -func file_google_iam_v1_iam_policy_proto_init() { - if File_google_iam_v1_iam_policy_proto != nil { - return - } - file_google_iam_v1_options_proto_init() - file_google_iam_v1_policy_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetIamPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetIamPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TestIamPermissionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TestIamPermissionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, - DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, - MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, - }.Build() - File_google_iam_v1_iam_policy_proto = out.File - file_google_iam_v1_iam_policy_proto_rawDesc = nil - file_google_iam_v1_iam_policy_proto_goTypes = nil - file_google_iam_v1_iam_policy_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// IAMPolicyClient is the client API for IAMPolicy service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IAMPolicyClient interface { - // Sets the access control policy on the specified resource. Replaces any - // existing policy. - // - // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. - SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) - // Gets the access control policy for a resource. - // Returns an empty policy if the resource exists and does not have a policy - // set. - GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) - // Returns permissions that a caller has on the specified resource. - // If the resource does not exist, this will return an empty set of - // permissions, not a `NOT_FOUND` error. - // - // Note: This operation is designed to be used for building permission-aware - // UIs and command-line tools, not for authorization checking. This operation - // may "fail open" without warning. - TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) -} - -type iAMPolicyClient struct { - cc grpc.ClientConnInterface -} - -func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { - return &iAMPolicyClient{cc} -} - -func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { - out := new(Policy) - err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { - out := new(Policy) - err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { - out := new(TestIamPermissionsResponse) - err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// IAMPolicyServer is the server API for IAMPolicy service. -type IAMPolicyServer interface { - // Sets the access control policy on the specified resource. Replaces any - // existing policy. - // - // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. - SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) - // Gets the access control policy for a resource. - // Returns an empty policy if the resource exists and does not have a policy - // set. - GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) - // Returns permissions that a caller has on the specified resource. - // If the resource does not exist, this will return an empty set of - // permissions, not a `NOT_FOUND` error. - // - // Note: This operation is designed to be used for building permission-aware - // UIs and command-line tools, not for authorization checking. This operation - // may "fail open" without warning. - TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) -} - -// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations. -type UnimplementedIAMPolicyServer struct { -} - -func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") -} -func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") -} -func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") -} - -func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { - s.RegisterService(&_IAMPolicy_serviceDesc, srv) -} - -func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TestIamPermissionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.iam.v1.IAMPolicy", - HandlerType: (*IAMPolicyServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetIamPolicy", - Handler: _IAMPolicy_SetIamPolicy_Handler, - }, - { - MethodName: "GetIamPolicy", - Handler: _IAMPolicy_GetIamPolicy_Handler, - }, - { - MethodName: "TestIamPermissions", - Handler: _IAMPolicy_TestIamPermissions_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/iam/v1/iam_policy.proto", -} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go deleted file mode 100644 index e8a2aca9c7..0000000000 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/iam/v1/options.proto - -package iampb - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Encapsulates settings provided to GetIamPolicy. -type GetPolicyOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The maximum policy version that will be used to format the - // policy. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value will be - // rejected. - // - // Requests for policies with any conditional role bindings must specify - // version 3. Policies with no conditional role bindings may specify any valid - // value or leave the field unset. - // - // The policy in the response might use the policy version that you specified, - // or it might use a lower policy version. For example, if you specify version - // 3, but the policy has no conditional role bindings, the response uses - // version 1. - // - // To learn which resources support conditions in their IAM policies, see the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` -} - -func (x *GetPolicyOptions) Reset() { - *x = GetPolicyOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_options_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPolicyOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPolicyOptions) ProtoMessage() {} - -func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_options_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead. -func (*GetPolicyOptions) Descriptor() ([]byte, []int) { - return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} -} - -func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { - if x != nil { - return x.RequestedPolicyVersion - } - return 0 -} - -var File_google_iam_v1_options_proto protoreflect.FileDescriptor - -var file_google_iam_v1_options_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_iam_v1_options_proto_rawDescOnce sync.Once - file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc -) - -func file_google_iam_v1_options_proto_rawDescGZIP() []byte { - file_google_iam_v1_options_proto_rawDescOnce.Do(func() { - file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) - }) - return file_google_iam_v1_options_proto_rawDescData -} - -var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_iam_v1_options_proto_goTypes = []interface{}{ - (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions -} -var file_google_iam_v1_options_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_options_proto_init() } -func file_google_iam_v1_options_proto_init() { - if File_google_iam_v1_options_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPolicyOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_options_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_iam_v1_options_proto_goTypes, - DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, - MessageInfos: file_google_iam_v1_options_proto_msgTypes, - }.Build() - File_google_iam_v1_options_proto = out.File - file_google_iam_v1_options_proto_rawDesc = nil - file_google_iam_v1_options_proto_goTypes = nil - file_google_iam_v1_options_proto_depIdxs = nil -} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go deleted file mode 100644 index e521db60fa..0000000000 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ /dev/null @@ -1,1169 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/iam/v1/policy.proto - -package iampb - -import ( - reflect "reflect" - sync "sync" - - expr "google.golang.org/genproto/googleapis/type/expr" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The list of valid permission types for which logging can be configured. -// Admin writes are always logged, and are not configurable. -type AuditLogConfig_LogType int32 - -const ( - // Default case. Should never be this. - AuditLogConfig_LOG_TYPE_UNSPECIFIED AuditLogConfig_LogType = 0 - // Admin reads. Example: CloudIAM getIamPolicy - AuditLogConfig_ADMIN_READ AuditLogConfig_LogType = 1 - // Data writes. Example: CloudSQL Users create - AuditLogConfig_DATA_WRITE AuditLogConfig_LogType = 2 - // Data reads. Example: CloudSQL Users list - AuditLogConfig_DATA_READ AuditLogConfig_LogType = 3 -) - -// Enum value maps for AuditLogConfig_LogType. -var ( - AuditLogConfig_LogType_name = map[int32]string{ - 0: "LOG_TYPE_UNSPECIFIED", - 1: "ADMIN_READ", - 2: "DATA_WRITE", - 3: "DATA_READ", - } - AuditLogConfig_LogType_value = map[string]int32{ - "LOG_TYPE_UNSPECIFIED": 0, - "ADMIN_READ": 1, - "DATA_WRITE": 2, - "DATA_READ": 3, - } -) - -func (x AuditLogConfig_LogType) Enum() *AuditLogConfig_LogType { - p := new(AuditLogConfig_LogType) - *p = x - return p -} - -func (x AuditLogConfig_LogType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AuditLogConfig_LogType) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() -} - -func (AuditLogConfig_LogType) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[0] -} - -func (x AuditLogConfig_LogType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AuditLogConfig_LogType.Descriptor instead. -func (AuditLogConfig_LogType) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} -} - -// The type of action performed on a Binding in a policy. -type BindingDelta_Action int32 - -const ( - // Unspecified. - BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 - // Addition of a Binding. - BindingDelta_ADD BindingDelta_Action = 1 - // Removal of a Binding. - BindingDelta_REMOVE BindingDelta_Action = 2 -) - -// Enum value maps for BindingDelta_Action. -var ( - BindingDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", - } - BindingDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, - } -) - -func (x BindingDelta_Action) Enum() *BindingDelta_Action { - p := new(BindingDelta_Action) - *p = x - return p -} - -func (x BindingDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() -} - -func (BindingDelta_Action) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[1] -} - -func (x BindingDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use BindingDelta_Action.Descriptor instead. -func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5, 0} -} - -// The type of action performed on an audit configuration in a policy. -type AuditConfigDelta_Action int32 - -const ( - // Unspecified. - AuditConfigDelta_ACTION_UNSPECIFIED AuditConfigDelta_Action = 0 - // Addition of an audit configuration. - AuditConfigDelta_ADD AuditConfigDelta_Action = 1 - // Removal of an audit configuration. - AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 -) - -// Enum value maps for AuditConfigDelta_Action. -var ( - AuditConfigDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", - } - AuditConfigDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, - } -) - -func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { - p := new(AuditConfigDelta_Action) - *p = x - return p -} - -func (x AuditConfigDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[2].Descriptor() -} - -func (AuditConfigDelta_Action) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[2] -} - -func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. -func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6, 0} -} - -// An Identity and Access Management (IAM) policy, which specifies access -// controls for Google Cloud resources. -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or more -// `members`, or principals, to a single `role`. Principals can be user -// accounts, service accounts, Google groups, and domains (such as G Suite). A -// `role` is a named list of permissions; each `role` can be an IAM predefined -// role or a user-created custom role. -// -// For some types of Google Cloud resources, a `binding` can also specify a -// `condition`, which is a logical expression that allows access to a resource -// only if the expression evaluates to `true`. A condition can add constraints -// based on attributes of the request, the resource, or both. To learn which -// resources support conditions in their IAM policies, see the -// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). -// -// **JSON example:** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// **YAML example:** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') -// etag: BwWWja0YfJA= -// version: 3 -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). -type Policy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid value - // are rejected. - // - // Any operation that affects conditional role bindings must specify version - // `3`. This requirement applies to the following operations: - // - // - Getting a policy that includes a conditional role binding - // - Adding a conditional role binding to a policy - // - Changing a conditional role binding in a policy - // - Removing any role binding, with or without a condition, from a policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` field - // whenever you call `setIamPolicy`. If you omit this field, then IAM allows - // you to overwrite a version `3` policy with a version `1` policy, and all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, see the - // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Associates a list of `members`, or principals, with a `role`. Optionally, - // may specify a `condition` that determines how and when the `bindings` are - // applied. Each of the `bindings` must contain at least one principal. - // - // The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 - // of these principals can be Google groups. Each occurrence of a principal - // counts towards these limits. For example, if the `bindings` grant 50 - // different roles to `user:alice@example.com`, and not to any other - // principal, then you can add another 1,450 principals to the `bindings` in - // the `Policy`. - Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` - // Specifies cloud audit logging configuration for this policy. - AuditConfigs []*AuditConfig `protobuf:"bytes,6,rep,name=audit_configs,json=auditConfigs,proto3" json:"audit_configs,omitempty"` - // `etag` is used for optimistic concurrency control as a way to help - // prevent simultaneous updates of a policy from overwriting each other. - // It is strongly suggested that systems make use of the `etag` in the - // read-modify-write cycle to perform policy updates in order to avoid race - // conditions: An `etag` is returned in the response to `getIamPolicy`, and - // systems are expected to put that etag in the request to `setIamPolicy` to - // ensure that their change will be applied to the same version of the policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` field - // whenever you call `setIamPolicy`. If you omit this field, then IAM allows - // you to overwrite a version `3` policy with a version `1` policy, and all of - // the conditions in the version `3` policy are lost. - Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *Policy) Reset() { - *x = Policy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Policy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Policy) ProtoMessage() {} - -func (x *Policy) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Policy.ProtoReflect.Descriptor instead. -func (*Policy) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} -} - -func (x *Policy) GetVersion() int32 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *Policy) GetBindings() []*Binding { - if x != nil { - return x.Bindings - } - return nil -} - -func (x *Policy) GetAuditConfigs() []*AuditConfig { - if x != nil { - return x.AuditConfigs - } - return nil -} - -func (x *Policy) GetEtag() []byte { - if x != nil { - return x.Etag - } - return nil -} - -// Associates `members`, or principals, with a `role`. -type Binding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Role that is assigned to the list of `members`, or principals. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // Specifies the principals requesting access for a Cloud Platform resource. - // `members` can have the following values: - // - // - `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // - `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. - // - // - `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . - // - // - `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. - // - // - `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. - // - // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. - // - // - `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. - Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - // The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to the - // current request. - // - // If the condition evaluates to `false`, then this binding does not apply to - // the current request. However, a different role binding might grant the same - // role to one or more of the principals in this binding. - // - // To learn which resources support conditions in their IAM policies, see the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *Binding) Reset() { - *x = Binding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Binding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Binding) ProtoMessage() {} - -func (x *Binding) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Binding.ProtoReflect.Descriptor instead. -func (*Binding) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} -} - -func (x *Binding) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *Binding) GetMembers() []string { - if x != nil { - return x.Members - } - return nil -} - -func (x *Binding) GetCondition() *expr.Expr { - if x != nil { - return x.Condition - } - return nil -} - -// Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific service, -// the union of the two AuditConfigs is used for that service: the log_types -// specified in each AuditConfig are enabled, and the exempted_members in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// }, -// { -// "log_type": "ADMIN_READ" -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ" -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. -type AuditConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies a service that will be enabled for audit logging. - // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - // The configuration for logging of each type of permission. - AuditLogConfigs []*AuditLogConfig `protobuf:"bytes,3,rep,name=audit_log_configs,json=auditLogConfigs,proto3" json:"audit_log_configs,omitempty"` -} - -func (x *AuditConfig) Reset() { - *x = AuditConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditConfig) ProtoMessage() {} - -func (x *AuditConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditConfig.ProtoReflect.Descriptor instead. -func (*AuditConfig) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} -} - -func (x *AuditConfig) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { - if x != nil { - return x.AuditLogConfigs - } - return nil -} - -// Provides the configuration for logging a type of permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting -// jose@example.com from DATA_READ logging. -type AuditLogConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The log type that this config enables. - LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` - // Specifies the identities that do not cause logging for this type of - // permission. - // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. - ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` -} - -func (x *AuditLogConfig) Reset() { - *x = AuditLogConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditLogConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditLogConfig) ProtoMessage() {} - -func (x *AuditLogConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditLogConfig.ProtoReflect.Descriptor instead. -func (*AuditLogConfig) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} -} - -func (x *AuditLogConfig) GetLogType() AuditLogConfig_LogType { - if x != nil { - return x.LogType - } - return AuditLogConfig_LOG_TYPE_UNSPECIFIED -} - -func (x *AuditLogConfig) GetExemptedMembers() []string { - if x != nil { - return x.ExemptedMembers - } - return nil -} - -// The difference delta between two policies. -type PolicyDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The delta for Bindings between two policies. - BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` - // The delta for AuditConfigs between two policies. - AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` -} - -func (x *PolicyDelta) Reset() { - *x = PolicyDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PolicyDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PolicyDelta) ProtoMessage() {} - -func (x *PolicyDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. -func (*PolicyDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} -} - -func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { - if x != nil { - return x.BindingDeltas - } - return nil -} - -func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { - if x != nil { - return x.AuditConfigDeltas - } - return nil -} - -// One delta entry for Binding. Each individual change (only one member in each -// entry) to a binding will be a separate entry. -type BindingDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action that was performed on a Binding. - // Required - Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` - // Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - // Required - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - // A single identity requesting access for a Cloud Platform resource. - // Follows the same format of Binding.members. - // Required - Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` - // The condition that is associated with this binding. - Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *BindingDelta) Reset() { - *x = BindingDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BindingDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BindingDelta) ProtoMessage() {} - -func (x *BindingDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. -func (*BindingDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5} -} - -func (x *BindingDelta) GetAction() BindingDelta_Action { - if x != nil { - return x.Action - } - return BindingDelta_ACTION_UNSPECIFIED -} - -func (x *BindingDelta) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *BindingDelta) GetMember() string { - if x != nil { - return x.Member - } - return "" -} - -func (x *BindingDelta) GetCondition() *expr.Expr { - if x != nil { - return x.Condition - } - return nil -} - -// One delta entry for AuditConfig. Each individual change (only one -// exempted_member in each entry) to a AuditConfig will be a separate entry. -type AuditConfigDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action that was performed on an audit configuration in a policy. - // Required - Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` - // Specifies a service that was configured for Cloud Audit Logging. - // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - // Required - Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` - // A single identity that is exempted from "data access" audit - // logging for the `service` specified above. - // Follows the same format of Binding.members. - ExemptedMember string `protobuf:"bytes,3,opt,name=exempted_member,json=exemptedMember,proto3" json:"exempted_member,omitempty"` - // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always - // enabled, and cannot be configured. - // Required - LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` -} - -func (x *AuditConfigDelta) Reset() { - *x = AuditConfigDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditConfigDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditConfigDelta) ProtoMessage() {} - -func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. -func (*AuditConfigDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6} -} - -func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { - if x != nil { - return x.Action - } - return AuditConfigDelta_ACTION_UNSPECIFIED -} - -func (x *AuditConfigDelta) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *AuditConfigDelta) GetExemptedMember() string { - if x != nil { - return x.ExemptedMember - } - return "" -} - -func (x *AuditConfigDelta) GetLogType() string { - if x != nil { - return x.LogType - } - return "" -} - -var File_google_iam_v1_policy_proto protoreflect.FileDescriptor - -var file_google_iam_v1_policy_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x0d, - 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x22, 0x68, 0x0a, 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, - 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x41, - 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, - 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, - 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, - 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x40, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6c, 0x6f, 0x67, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, - 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, - 0x52, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, - 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x44, 0x4d, 0x49, 0x4e, 0x5f, 0x52, 0x45, - 0x41, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x57, 0x52, 0x49, - 0x54, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x0c, 0x42, 0x69, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x06, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, - 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, - 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, 0x0a, 0x10, 0x41, 0x75, - 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3e, - 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, - 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x6d, - 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0x35, 0x0a, 0x06, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_iam_v1_policy_proto_rawDescOnce sync.Once - file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc -) - -func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { - file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { - file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) - }) - return file_google_iam_v1_policy_proto_rawDescData -} - -var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ - (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType - (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action - (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action - (*Policy)(nil), // 3: google.iam.v1.Policy - (*Binding)(nil), // 4: google.iam.v1.Binding - (*AuditConfig)(nil), // 5: google.iam.v1.AuditConfig - (*AuditLogConfig)(nil), // 6: google.iam.v1.AuditLogConfig - (*PolicyDelta)(nil), // 7: google.iam.v1.PolicyDelta - (*BindingDelta)(nil), // 8: google.iam.v1.BindingDelta - (*AuditConfigDelta)(nil), // 9: google.iam.v1.AuditConfigDelta - (*expr.Expr)(nil), // 10: google.type.Expr -} -var file_google_iam_v1_policy_proto_depIdxs = []int32{ - 4, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding - 5, // 1: google.iam.v1.Policy.audit_configs:type_name -> google.iam.v1.AuditConfig - 10, // 2: google.iam.v1.Binding.condition:type_name -> google.type.Expr - 6, // 3: google.iam.v1.AuditConfig.audit_log_configs:type_name -> google.iam.v1.AuditLogConfig - 0, // 4: google.iam.v1.AuditLogConfig.log_type:type_name -> google.iam.v1.AuditLogConfig.LogType - 8, // 5: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta - 9, // 6: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta - 1, // 7: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action - 10, // 8: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr - 2, // 9: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_policy_proto_init() } -func file_google_iam_v1_policy_proto_init() { - if File_google_iam_v1_policy_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Policy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Binding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditLogConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PolicyDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindingDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditConfigDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, - NumEnums: 3, - NumMessages: 7, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_iam_v1_policy_proto_goTypes, - DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, - EnumInfos: file_google_iam_v1_policy_proto_enumTypes, - MessageInfos: file_google_iam_v1_policy_proto_msgTypes, - }.Build() - File_google_iam_v1_policy_proto = out.File - file_google_iam_v1_policy_proto_rawDesc = nil - file_google_iam_v1_policy_proto_goTypes = nil - file_google_iam_v1_policy_proto_depIdxs = nil -} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go deleted file mode 100644 index f004a7afbc..0000000000 --- a/vendor/cloud.google.com/go/iam/iam.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package iam supports the resource-specific operations of Google Cloud -// IAM (Identity and Access Management) for the Google Cloud Libraries. -// See https://cloud.google.com/iam for more about IAM. -// -// Users of the Google Cloud Libraries will typically not use this package -// directly. Instead they will begin with some resource that supports IAM, like -// a pubsub topic, and call its IAM method to get a Handle for that resource. -package iam - -import ( - "context" - "fmt" - "time" - - pb "cloud.google.com/go/iam/apiv1/iampb" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -// client abstracts the IAMPolicy API to allow multiple implementations. -type client interface { - Get(ctx context.Context, resource string) (*pb.Policy, error) - Set(ctx context.Context, resource string, p *pb.Policy) error - Test(ctx context.Context, resource string, perms []string) ([]string, error) - GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) -} - -// grpcClient implements client for the standard gRPC-based IAMPolicy service. -type grpcClient struct { - c pb.IAMPolicyClient -} - -var withRetry = gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60 * time.Second, - Multiplier: 1.3, - }) -}) - -func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { - return g.GetWithVersion(ctx, resource, 1) -} - -func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { - var proto *pb.Policy - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) - ctx = insertMetadata(ctx, md) - - err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - var err error - proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ - Resource: resource, - Options: &pb.GetPolicyOptions{ - RequestedPolicyVersion: requestedPolicyVersion, - }, - }) - return err - }, withRetry) - if err != nil { - return nil, err - } - return proto, nil -} - -func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) - ctx = insertMetadata(ctx, md) - - return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ - Resource: resource, - Policy: p, - }) - return err - }, withRetry) -} - -func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { - var res *pb.TestIamPermissionsResponse - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) - ctx = insertMetadata(ctx, md) - - err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - var err error - res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ - Resource: resource, - Permissions: perms, - }) - return err - }, withRetry) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// A Handle provides IAM operations for a resource. -type Handle struct { - c client - resource string -} - -// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions). -type Handle3 struct { - c client - resource string - version int32 -} - -// InternalNewHandle is for use by the Google Cloud Libraries only. -// -// InternalNewHandle returns a Handle for resource. -// The conn parameter refers to a server that must support the IAMPolicy service. -func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { - return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) -} - -// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. -// -// InternalNewHandleClient returns a Handle for resource using the given -// grpc service that implements IAM as a mixin -func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { - return InternalNewHandleClient(&grpcClient{c: c}, resource) -} - -// InternalNewHandleClient is for use by the Google Cloud Libraries only. -// -// InternalNewHandleClient returns a Handle for resource using the given -// client implementation. -func InternalNewHandleClient(c client, resource string) *Handle { - return &Handle{ - c: c, - resource: resource, - } -} - -// V3 returns a Handle3, which is like Handle except it sets -// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3 -// when storing a policy. -func (h *Handle) V3() *Handle3 { - return &Handle3{ - c: h.c, - resource: h.resource, - version: 3, - } -} - -// Policy retrieves the IAM policy for the resource. -func (h *Handle) Policy(ctx context.Context) (*Policy, error) { - proto, err := h.c.Get(ctx, h.resource) - if err != nil { - return nil, err - } - return &Policy{InternalProto: proto}, nil -} - -// SetPolicy replaces the resource's current policy with the supplied Policy. -// -// If policy was created from a prior call to Get, then the modification will -// only succeed if the policy has not changed since the Get. -func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { - return h.c.Set(ctx, h.resource, policy.InternalProto) -} - -// TestPermissions returns the subset of permissions that the caller has on the resource. -func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { - return h.c.Test(ctx, h.resource, permissions) -} - -// A RoleName is a name representing a collection of permissions. -type RoleName string - -// Common role names. -const ( - Owner RoleName = "roles/owner" - Editor RoleName = "roles/editor" - Viewer RoleName = "roles/viewer" -) - -const ( - // AllUsers is a special member that denotes all users, even unauthenticated ones. - AllUsers = "allUsers" - - // AllAuthenticatedUsers is a special member that denotes all authenticated users. - AllAuthenticatedUsers = "allAuthenticatedUsers" -) - -// A Policy is a list of Bindings representing roles -// granted to members. -// -// The zero Policy is a valid policy with no bindings. -type Policy struct { - // TODO(jba): when type aliases are available, put Policy into an internal package - // and provide an exported alias here. - - // This field is exported for use by the Google Cloud Libraries only. - // It may become unexported in a future release. - InternalProto *pb.Policy -} - -// Members returns the list of members with the supplied role. -// The return value should not be modified. Use Add and Remove -// to modify the members of a role. -func (p *Policy) Members(r RoleName) []string { - b := p.binding(r) - if b == nil { - return nil - } - return b.Members -} - -// HasRole reports whether member has role r. -func (p *Policy) HasRole(member string, r RoleName) bool { - return memberIndex(member, p.binding(r)) >= 0 -} - -// Add adds member member to role r if it is not already present. -// A new binding is created if there is no binding for the role. -func (p *Policy) Add(member string, r RoleName) { - b := p.binding(r) - if b == nil { - if p.InternalProto == nil { - p.InternalProto = &pb.Policy{} - } - p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ - Role: string(r), - Members: []string{member}, - }) - return - } - if memberIndex(member, b) < 0 { - b.Members = append(b.Members, member) - return - } -} - -// Remove removes member from role r if it is present. -func (p *Policy) Remove(member string, r RoleName) { - bi := p.bindingIndex(r) - if bi < 0 { - return - } - bindings := p.InternalProto.Bindings - b := bindings[bi] - mi := memberIndex(member, b) - if mi < 0 { - return - } - // Order doesn't matter for bindings or members, so to remove, move the last item - // into the removed spot and shrink the slice. - if len(b.Members) == 1 { - // Remove binding. - last := len(bindings) - 1 - bindings[bi] = bindings[last] - bindings[last] = nil - p.InternalProto.Bindings = bindings[:last] - return - } - // Remove member. - // TODO(jba): worry about multiple copies of m? - last := len(b.Members) - 1 - b.Members[mi] = b.Members[last] - b.Members[last] = "" - b.Members = b.Members[:last] -} - -// Roles returns the names of all the roles that appear in the Policy. -func (p *Policy) Roles() []RoleName { - if p.InternalProto == nil { - return nil - } - var rns []RoleName - for _, b := range p.InternalProto.Bindings { - rns = append(rns, RoleName(b.Role)) - } - return rns -} - -// binding returns the Binding for the suppied role, or nil if there isn't one. -func (p *Policy) binding(r RoleName) *pb.Binding { - i := p.bindingIndex(r) - if i < 0 { - return nil - } - return p.InternalProto.Bindings[i] -} - -func (p *Policy) bindingIndex(r RoleName) int { - if p.InternalProto == nil { - return -1 - } - for i, b := range p.InternalProto.Bindings { - if b.Role == string(r) { - return i - } - } - return -1 -} - -// memberIndex returns the index of m in b's Members, or -1 if not found. -func memberIndex(m string, b *pb.Binding) int { - if b == nil { - return -1 - } - for i, mm := range b.Members { - if mm == m { - return i - } - } - return -1 -} - -// insertMetadata inserts metadata into the given context -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -// A Policy3 is a list of Bindings representing roles granted to members. -// -// The zero Policy3 is a valid policy with no bindings. -// -// It is similar to a Policy, except a Policy3 provides direct access to the -// list of Bindings. -// -// The policy version is always set to 3. -type Policy3 struct { - etag []byte - Bindings []*pb.Binding -} - -// Policy retrieves the IAM policy for the resource. -// -// requestedPolicyVersion is always set to 3. -func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { - proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) - if err != nil { - return nil, err - } - return &Policy3{ - Bindings: proto.Bindings, - etag: proto.Etag, - }, nil -} - -// SetPolicy replaces the resource's current policy with the supplied Policy. -// -// If policy was created from a prior call to Get, then the modification will -// only succeed if the policy has not changed since the Get. -func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { - return h.c.Set(ctx, h.resource, &pb.Policy{ - Bindings: policy.Bindings, - Etag: policy.etag, - Version: h.version, - }) -} - -// TestPermissions returns the subset of permissions that the caller has on the resource. -func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { - return h.c.Test(ctx, h.resource, permissions) -} diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json deleted file mode 100644 index 9482956b1d..0000000000 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ /dev/null @@ -1,2099 +0,0 @@ -{ - "cloud.google.com/go/accessapproval/apiv1": { - "distribution_name": "cloud.google.com/go/accessapproval/apiv1", - "description": "Access Approval API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/accessapproval/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/accesscontextmanager/apiv1": { - "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", - "description": "Access Context Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/accesscontextmanager/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/advisorynotifications/apiv1": { - "distribution_name": "cloud.google.com/go/advisorynotifications/apiv1", - "description": "Advisory Notifications API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/advisorynotifications/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/aiplatform/apiv1": { - "distribution_name": "cloud.google.com/go/aiplatform/apiv1", - "description": "Vertex AI API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/aiplatform/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/aiplatform/apiv1beta1": { - "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", - "description": "Vertex AI API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/aiplatform/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/apiv1": { - "distribution_name": "cloud.google.com/go/alloydb/apiv1", - "description": "AlloyDB API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/alloydb/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/apiv1alpha": { - "distribution_name": "cloud.google.com/go/alloydb/apiv1alpha", - "description": "AlloyDB API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/alloydb/apiv1alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/apiv1beta": { - "distribution_name": "cloud.google.com/go/alloydb/apiv1beta", - "description": "AlloyDB API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/alloydb/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/analytics/admin/apiv1alpha": { - "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", - "description": "Google Analytics Admin API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/analytics/admin/apiv1alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apigateway/apiv1": { - "distribution_name": "cloud.google.com/go/apigateway/apiv1", - "description": "API Gateway API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apigateway/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apigeeconnect/apiv1": { - "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", - "description": "Apigee Connect API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apigeeconnect/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apigeeregistry/apiv1": { - "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", - "description": "Apigee Registry API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apigeeregistry/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apikeys/apiv2": { - "distribution_name": "cloud.google.com/go/apikeys/apiv2", - "description": "API Keys API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apikeys/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/appengine/apiv1": { - "distribution_name": "cloud.google.com/go/appengine/apiv1", - "description": "App Engine Admin API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/appengine/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/area120/tables/apiv1alpha1": { - "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", - "description": "Area120 Tables API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/area120/tables/apiv1alpha1", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/artifactregistry/apiv1": { - "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", - "description": "Artifact Registry API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/artifactregistry/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/artifactregistry/apiv1beta2": { - "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", - "description": "Artifact Registry API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/artifactregistry/apiv1beta2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/asset/apiv1": { - "distribution_name": "cloud.google.com/go/asset/apiv1", - "description": "Cloud Asset API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/asset/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/asset/apiv1p2beta1": { - "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", - "description": "Cloud Asset API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/asset/apiv1p2beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/asset/apiv1p5beta1": { - "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", - "description": "Cloud Asset API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/asset/apiv1p5beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/assuredworkloads/apiv1": { - "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", - "description": "Assured Workloads API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/assuredworkloads/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/assuredworkloads/apiv1beta1": { - "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", - "description": "Assured Workloads API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/assuredworkloads/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/automl/apiv1": { - "distribution_name": "cloud.google.com/go/automl/apiv1", - "description": "Cloud AutoML API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/automl/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/automl/apiv1beta1": { - "distribution_name": "cloud.google.com/go/automl/apiv1beta1", - "description": "Cloud AutoML API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/automl/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/baremetalsolution/apiv2": { - "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", - "description": "Bare Metal Solution API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/baremetalsolution/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/batch/apiv1": { - "distribution_name": "cloud.google.com/go/batch/apiv1", - "description": "Batch API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/batch/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/appconnections/apiv1": { - "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", - "description": "BeyondCorp API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/appconnections/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { - "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", - "description": "BeyondCorp API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/appconnectors/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/appgateways/apiv1": { - "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", - "description": "BeyondCorp API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/appgateways/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { - "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", - "description": "BeyondCorp API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { - "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", - "description": "BeyondCorp API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/clientgateways/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery": { - "distribution_name": "cloud.google.com/go/bigquery", - "description": "BigQuery", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/bigquery/analyticshub/apiv1": { - "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", - "description": "Analytics Hub API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/analyticshub/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/connection/apiv1": { - "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", - "description": "BigQuery Connection API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/connection/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/connection/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", - "description": "BigQuery Connection API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/connection/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", - "description": "Analytics Hub API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/dataexchange/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/datapolicies/apiv1": { - "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1", - "description": "BigQuery Data Policy API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/datapolicies/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", - "description": "BigQuery Data Policy API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/datapolicies/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/datatransfer/apiv1": { - "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", - "description": "BigQuery Data Transfer API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/datatransfer/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/migration/apiv2": { - "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", - "description": "BigQuery Migration API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/migration/apiv2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/migration/apiv2alpha": { - "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", - "description": "BigQuery Migration API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/migration/apiv2alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/reservation/apiv1": { - "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", - "description": "BigQuery Reservation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/reservation/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/storage/apiv1": { - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", - "description": "BigQuery Storage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/storage/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/storage/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", - "description": "BigQuery Storage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/storage/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/storage/apiv1beta2": { - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", - "description": "BigQuery Storage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/storage/apiv1beta2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigtable": { - "distribution_name": "cloud.google.com/go/bigtable", - "description": "Cloud BigTable", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/billing/apiv1": { - "distribution_name": "cloud.google.com/go/billing/apiv1", - "description": "Cloud Billing API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/billing/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/billing/budgets/apiv1": { - "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", - "description": "Cloud Billing Budget API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/billing/budgets/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/billing/budgets/apiv1beta1": { - "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", - "description": "Cloud Billing Budget API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/billing/budgets/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/binaryauthorization/apiv1": { - "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", - "description": "Binary Authorization API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/binaryauthorization/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/binaryauthorization/apiv1beta1": { - "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", - "description": "Binary Authorization API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/binaryauthorization/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/certificatemanager/apiv1": { - "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", - "description": "Certificate Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/certificatemanager/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/channel/apiv1": { - "distribution_name": "cloud.google.com/go/channel/apiv1", - "description": "Cloud Channel API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/channel/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudbuild/apiv1/v2": { - "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", - "description": "Cloud Build API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudbuild/apiv1/v2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudbuild/apiv2": { - "distribution_name": "cloud.google.com/go/cloudbuild/apiv2", - "description": "Cloud Build API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudbuild/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/clouddms/apiv1": { - "distribution_name": "cloud.google.com/go/clouddms/apiv1", - "description": "Database Migration API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/clouddms/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudtasks/apiv2": { - "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", - "description": "Cloud Tasks API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudtasks/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudtasks/apiv2beta2": { - "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", - "description": "Cloud Tasks API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudtasks/apiv2beta2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudtasks/apiv2beta3": { - "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", - "description": "Cloud Tasks API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudtasks/apiv2beta3", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/compute/apiv1": { - "distribution_name": "cloud.google.com/go/compute/apiv1", - "description": "Google Compute Engine API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/compute/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/compute/metadata": { - "distribution_name": "cloud.google.com/go/compute/metadata", - "description": "Service Metadata API", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", - "release_level": "ga", - "library_type": "CORE" - }, - "cloud.google.com/go/confidentialcomputing/apiv1": { - "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1", - "description": "Confidential Computing API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/confidentialcomputing/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/confidentialcomputing/apiv1alpha1": { - "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1alpha1", - "description": "Confidential Computing API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/confidentialcomputing/apiv1alpha1", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/contactcenterinsights/apiv1": { - "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", - "description": "Contact Center AI Insights API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/contactcenterinsights/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/container/apiv1": { - "distribution_name": "cloud.google.com/go/container/apiv1", - "description": "Kubernetes Engine API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/container/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/containeranalysis/apiv1beta1": { - "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", - "description": "Container Analysis API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/containeranalysis/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datacatalog/apiv1": { - "distribution_name": "cloud.google.com/go/datacatalog/apiv1", - "description": "Google Cloud Data Catalog API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datacatalog/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datacatalog/apiv1beta1": { - "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", - "description": "Google Cloud Data Catalog API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datacatalog/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datacatalog/lineage/apiv1": { - "distribution_name": "cloud.google.com/go/datacatalog/lineage/apiv1", - "description": "Data Lineage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datacatalog/lineage/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataflow/apiv1beta3": { - "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", - "description": "Dataflow API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataflow/apiv1beta3", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataform/apiv1alpha2": { - "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", - "description": "Dataform API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataform/apiv1alpha2", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataform/apiv1beta1": { - "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", - "description": "Dataform API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataform/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datafusion/apiv1": { - "distribution_name": "cloud.google.com/go/datafusion/apiv1", - "description": "Cloud Data Fusion API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datafusion/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datalabeling/apiv1beta1": { - "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", - "description": "Data Labeling API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datalabeling/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataplex/apiv1": { - "distribution_name": "cloud.google.com/go/dataplex/apiv1", - "description": "Cloud Dataplex API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataplex/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataproc/v2/apiv1": { - "distribution_name": "cloud.google.com/go/dataproc/v2/apiv1", - "description": "Cloud Dataproc API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataproc/v2/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataqna/apiv1alpha": { - "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", - "description": "Data QnA API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataqna/apiv1alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datastore": { - "distribution_name": "cloud.google.com/go/datastore", - "description": "Cloud Datastore", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/datastore/admin/apiv1": { - "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", - "description": "Cloud Datastore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datastore/admin/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datastream/apiv1": { - "distribution_name": "cloud.google.com/go/datastream/apiv1", - "description": "Datastream API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datastream/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datastream/apiv1alpha1": { - "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", - "description": "Datastream API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datastream/apiv1alpha1", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/debugger/apiv2": { - "distribution_name": "cloud.google.com/go/debugger/apiv2", - "description": "Stackdriver Debugger API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/debugger/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/deploy/apiv1": { - "distribution_name": "cloud.google.com/go/deploy/apiv1", - "description": "Google Cloud Deploy API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/deploy/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/apiv2": { - "distribution_name": "cloud.google.com/go/dialogflow/apiv2", - "description": "Dialogflow API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/apiv2beta1": { - "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", - "description": "Dialogflow API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/apiv2beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/cx/apiv3": { - "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", - "description": "Dialogflow API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/cx/apiv3", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/cx/apiv3beta1": { - "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", - "description": "Dialogflow API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/cx/apiv3beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/discoveryengine/apiv1beta": { - "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta", - "description": "Discovery Engine API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/discoveryengine/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dlp/apiv2": { - "distribution_name": "cloud.google.com/go/dlp/apiv2", - "description": "Cloud Data Loss Prevention (DLP) API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dlp/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/documentai/apiv1": { - "distribution_name": "cloud.google.com/go/documentai/apiv1", - "description": "Cloud Document AI API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/documentai/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/documentai/apiv1beta3": { - "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", - "description": "Cloud Document AI API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/documentai/apiv1beta3", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/domains/apiv1beta1": { - "distribution_name": "cloud.google.com/go/domains/apiv1beta1", - "description": "Cloud Domains API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/domains/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/edgecontainer/apiv1": { - "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", - "description": "Distributed Cloud Edge Container API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/edgecontainer/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/errorreporting": { - "distribution_name": "cloud.google.com/go/errorreporting", - "description": "Cloud Error Reporting API", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", - "release_level": "beta", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/errorreporting/apiv1beta1": { - "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", - "description": "Error Reporting API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/errorreporting/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/essentialcontacts/apiv1": { - "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", - "description": "Essential Contacts API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/essentialcontacts/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/eventarc/apiv1": { - "distribution_name": "cloud.google.com/go/eventarc/apiv1", - "description": "Eventarc API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/eventarc/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/eventarc/publishing/apiv1": { - "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", - "description": "Eventarc Publishing API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/eventarc/publishing/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/filestore/apiv1": { - "distribution_name": "cloud.google.com/go/filestore/apiv1", - "description": "Cloud Filestore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/filestore/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/firestore": { - "distribution_name": "cloud.google.com/go/firestore", - "description": "Cloud Firestore API", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/firestore/apiv1": { - "distribution_name": "cloud.google.com/go/firestore/apiv1", - "description": "Cloud Firestore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/firestore/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/apiv1": { - "distribution_name": "cloud.google.com/go/functions/apiv1", - "description": "Cloud Functions API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/functions/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/apiv2": { - "distribution_name": "cloud.google.com/go/functions/apiv2", - "description": "Cloud Functions API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/functions/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/apiv2beta": { - "distribution_name": "cloud.google.com/go/functions/apiv2beta", - "description": "Cloud Functions API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/functions/apiv2beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/metadata": { - "distribution_name": "cloud.google.com/go/functions/metadata", - "description": "Cloud Functions", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", - "release_level": "alpha", - "library_type": "CORE" - }, - "cloud.google.com/go/gaming/apiv1": { - "distribution_name": "cloud.google.com/go/gaming/apiv1", - "description": "Game Services API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gaming/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gaming/apiv1beta": { - "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "Game Services API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gaming/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkebackup/apiv1": { - "distribution_name": "cloud.google.com/go/gkebackup/apiv1", - "description": "Backup for GKE API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkebackup/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { - "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", - "description": "Connect Gateway API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkeconnect/gateway/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkehub/apiv1beta1": { - "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", - "description": "GKE Hub API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkehub/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkemulticloud/apiv1": { - "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", - "description": "Anthos Multi-Cloud API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkemulticloud/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gsuiteaddons/apiv1": { - "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", - "description": "Google Workspace Add-ons API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gsuiteaddons/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iam": { - "distribution_name": "cloud.google.com/go/iam", - "description": "Cloud IAM", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", - "release_level": "ga", - "library_type": "CORE" - }, - "cloud.google.com/go/iam/apiv1": { - "distribution_name": "cloud.google.com/go/iam/apiv1", - "description": "IAM Meta API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iam/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iam/apiv2": { - "distribution_name": "cloud.google.com/go/iam/apiv2", - "description": "Identity and Access Management (IAM) API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iam/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iam/credentials/apiv1": { - "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", - "description": "IAM Service Account Credentials API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iam/credentials/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iap/apiv1": { - "distribution_name": "cloud.google.com/go/iap/apiv1", - "description": "Cloud Identity-Aware Proxy API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iap/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/ids/apiv1": { - "distribution_name": "cloud.google.com/go/ids/apiv1", - "description": "Cloud IDS API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/ids/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iot/apiv1": { - "distribution_name": "cloud.google.com/go/iot/apiv1", - "description": "Cloud IoT API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iot/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/kms/apiv1": { - "distribution_name": "cloud.google.com/go/kms/apiv1", - "description": "Cloud Key Management Service (KMS) API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/kms/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/kms/inventory/apiv1": { - "distribution_name": "cloud.google.com/go/kms/inventory/apiv1", - "description": "KMS Inventory API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/kms/inventory/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/language/apiv1": { - "distribution_name": "cloud.google.com/go/language/apiv1", - "description": "Cloud Natural Language API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/language/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/language/apiv1beta2": { - "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Cloud Natural Language API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/language/apiv1beta2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/lifesciences/apiv2beta": { - "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", - "description": "Cloud Life Sciences API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/lifesciences/apiv2beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/logging": { - "distribution_name": "cloud.google.com/go/logging", - "description": "Cloud Logging API", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/logging/apiv2": { - "distribution_name": "cloud.google.com/go/logging/apiv2", - "description": "Cloud Logging API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/logging/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/longrunning/autogen": { - "distribution_name": "cloud.google.com/go/longrunning/autogen", - "description": "Long Running Operations API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/longrunning/autogen", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/managedidentities/apiv1": { - "distribution_name": "cloud.google.com/go/managedidentities/apiv1", - "description": "Managed Service for Microsoft Active Directory API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/managedidentities/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/addressvalidation/apiv1": { - "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", - "description": "Address Validation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/maps/addressvalidation/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { - "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "description": "Maps Platform Datasets API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/routing/apiv2": { - "distribution_name": "cloud.google.com/go/maps/routing/apiv2", - "description": "Routes API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/maps/routing/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/mediatranslation/apiv1beta1": { - "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", - "description": "Media Translation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/mediatranslation/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/memcache/apiv1": { - "distribution_name": "cloud.google.com/go/memcache/apiv1", - "description": "Cloud Memorystore for Memcached API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/memcache/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/memcache/apiv1beta2": { - "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", - "description": "Cloud Memorystore for Memcached API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/memcache/apiv1beta2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/metastore/apiv1": { - "distribution_name": "cloud.google.com/go/metastore/apiv1", - "description": "Dataproc Metastore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/metastore/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/metastore/apiv1alpha": { - "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", - "description": "Dataproc Metastore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/metastore/apiv1alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/metastore/apiv1beta": { - "distribution_name": "cloud.google.com/go/metastore/apiv1beta", - "description": "Dataproc Metastore API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/metastore/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/monitoring/apiv3/v2": { - "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", - "description": "Cloud Monitoring API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/monitoring/apiv3/v2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/monitoring/dashboard/apiv1": { - "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", - "description": "Cloud Monitoring API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/monitoring/dashboard/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/monitoring/metricsscope/apiv1": { - "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", - "description": "Cloud Monitoring API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/monitoring/metricsscope/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networkconnectivity/apiv1": { - "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", - "description": "Network Connectivity API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networkconnectivity/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networkconnectivity/apiv1alpha1": { - "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", - "description": "Network Connectivity API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networkconnectivity/apiv1alpha1", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networkmanagement/apiv1": { - "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", - "description": "Network Management API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networkmanagement/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networksecurity/apiv1beta1": { - "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", - "description": "Network Security API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networksecurity/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/notebooks/apiv1": { - "distribution_name": "cloud.google.com/go/notebooks/apiv1", - "description": "Notebooks API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/notebooks/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/notebooks/apiv1beta1": { - "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", - "description": "Notebooks API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/notebooks/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/optimization/apiv1": { - "distribution_name": "cloud.google.com/go/optimization/apiv1", - "description": "Cloud Optimization API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/optimization/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/orchestration/airflow/service/apiv1": { - "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", - "description": "Cloud Composer API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/orchestration/airflow/service/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/orgpolicy/apiv2": { - "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", - "description": "Organization Policy API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/orgpolicy/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/agentendpoint/apiv1": { - "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", - "description": "OS Config API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/agentendpoint/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { - "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "description": "OS Config API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/apiv1": { - "distribution_name": "cloud.google.com/go/osconfig/apiv1", - "description": "OS Config API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/apiv1alpha": { - "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", - "description": "OS Config API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/apiv1alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/apiv1beta": { - "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", - "description": "OS Config API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/oslogin/apiv1": { - "distribution_name": "cloud.google.com/go/oslogin/apiv1", - "description": "Cloud OS Login API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/oslogin/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/oslogin/apiv1beta": { - "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", - "description": "Cloud OS Login API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/oslogin/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/phishingprotection/apiv1beta1": { - "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", - "description": "Phishing Protection API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/phishingprotection/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/policytroubleshooter/apiv1": { - "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", - "description": "Policy Troubleshooter API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/policytroubleshooter/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/privatecatalog/apiv1beta1": { - "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", - "description": "Cloud Private Catalog API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/privatecatalog/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/profiler": { - "distribution_name": "cloud.google.com/go/profiler", - "description": "Cloud Profiler", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", - "release_level": "ga", - "library_type": "AGENT" - }, - "cloud.google.com/go/pubsub": { - "distribution_name": "cloud.google.com/go/pubsub", - "description": "Cloud PubSub", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/pubsub/apiv1": { - "distribution_name": "cloud.google.com/go/pubsub/apiv1", - "description": "Cloud Pub/Sub API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/pubsub/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/pubsublite": { - "distribution_name": "cloud.google.com/go/pubsublite", - "description": "Cloud PubSub Lite", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/pubsublite/apiv1": { - "distribution_name": "cloud.google.com/go/pubsublite/apiv1", - "description": "Pub/Sub Lite API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/pubsublite/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", - "description": "reCAPTCHA Enterprise API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recaptchaenterprise/v2/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", - "description": "reCAPTCHA Enterprise API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recommendationengine/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", - "description": "Recommendations AI", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recommendationengine/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recommender/apiv1": { - "distribution_name": "cloud.google.com/go/recommender/apiv1", - "description": "Recommender API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recommender/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recommender/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", - "description": "Recommender API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recommender/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/redis/apiv1": { - "distribution_name": "cloud.google.com/go/redis/apiv1", - "description": "Google Cloud Memorystore for Redis API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/redis/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/redis/apiv1beta1": { - "distribution_name": "cloud.google.com/go/redis/apiv1beta1", - "description": "Google Cloud Memorystore for Redis API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/redis/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/resourcemanager/apiv2": { - "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", - "description": "Cloud Resource Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/resourcemanager/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/resourcemanager/apiv3": { - "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", - "description": "Cloud Resource Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/resourcemanager/apiv3", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/resourcesettings/apiv1": { - "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", - "description": "Resource Settings API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/resourcesettings/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/retail/apiv2": { - "distribution_name": "cloud.google.com/go/retail/apiv2", - "description": "Retail API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/retail/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/retail/apiv2alpha": { - "distribution_name": "cloud.google.com/go/retail/apiv2alpha", - "description": "Retail API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/retail/apiv2alpha", - "release_level": "alpha", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/retail/apiv2beta": { - "distribution_name": "cloud.google.com/go/retail/apiv2beta", - "description": "Retail API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/retail/apiv2beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/rpcreplay": { - "distribution_name": "cloud.google.com/go/rpcreplay", - "description": "RPC Replay", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", - "release_level": "ga", - "library_type": "OTHER" - }, - "cloud.google.com/go/run/apiv2": { - "distribution_name": "cloud.google.com/go/run/apiv2", - "description": "Cloud Run Admin API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/run/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/scheduler/apiv1": { - "distribution_name": "cloud.google.com/go/scheduler/apiv1", - "description": "Cloud Scheduler API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/scheduler/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/scheduler/apiv1beta1": { - "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", - "description": "Cloud Scheduler API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/scheduler/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/secretmanager/apiv1": { - "distribution_name": "cloud.google.com/go/secretmanager/apiv1", - "description": "Secret Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/secretmanager/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/security/privateca/apiv1": { - "distribution_name": "cloud.google.com/go/security/privateca/apiv1", - "description": "Certificate Authority API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/security/privateca/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/security/publicca/apiv1beta1": { - "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", - "description": "Public Certificate Authority API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/security/publicca/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/apiv1": { - "distribution_name": "cloud.google.com/go/securitycenter/apiv1", - "description": "Security Command Center API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/apiv1beta1": { - "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", - "description": "Security Command Center API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", - "description": "Security Command Center API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/apiv1p1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/settings/apiv1beta1": { - "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", - "description": "Cloud Security Command Center API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/settings/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicecontrol/apiv1": { - "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", - "description": "Service Control API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicecontrol/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicedirectory/apiv1": { - "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", - "description": "Service Directory API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicedirectory/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicedirectory/apiv1beta1": { - "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", - "description": "Service Directory API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicedirectory/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicemanagement/apiv1": { - "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", - "description": "Service Management API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicemanagement/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/serviceusage/apiv1": { - "distribution_name": "cloud.google.com/go/serviceusage/apiv1", - "description": "Service Usage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/serviceusage/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/shell/apiv1": { - "distribution_name": "cloud.google.com/go/shell/apiv1", - "description": "Cloud Shell API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/shell/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner": { - "distribution_name": "cloud.google.com/go/spanner", - "description": "Cloud Spanner", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/spanner/admin/database/apiv1": { - "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", - "description": "Cloud Spanner API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/spanner/admin/database/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner/admin/instance/apiv1": { - "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", - "description": "Cloud Spanner Instance Admin API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/spanner/admin/instance/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner/apiv1": { - "distribution_name": "cloud.google.com/go/spanner/apiv1", - "description": "Cloud Spanner API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/spanner/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/speech/apiv1": { - "distribution_name": "cloud.google.com/go/speech/apiv1", - "description": "Cloud Speech-to-Text API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/speech/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/speech/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", - "description": "Cloud Speech-to-Text API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/speech/apiv1p1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/speech/apiv2": { - "distribution_name": "cloud.google.com/go/speech/apiv2", - "description": "Cloud Speech-to-Text API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/speech/apiv2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/storage": { - "distribution_name": "cloud.google.com/go/storage", - "description": "Cloud Storage (GCS)", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", - "release_level": "ga", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/storage/internal/apiv2": { - "distribution_name": "cloud.google.com/go/storage/internal/apiv2", - "description": "Cloud Storage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/storage/internal/apiv2", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/storageinsights/apiv1": { - "distribution_name": "cloud.google.com/go/storageinsights/apiv1", - "description": "Storage Insights API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/storageinsights/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/storagetransfer/apiv1": { - "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", - "description": "Storage Transfer API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/storagetransfer/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/support/apiv2": { - "distribution_name": "cloud.google.com/go/support/apiv2", - "description": "Google Cloud Support API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/support/apiv2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/talent/apiv4": { - "distribution_name": "cloud.google.com/go/talent/apiv4", - "description": "Cloud Talent Solution API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/talent/apiv4", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/talent/apiv4beta1": { - "distribution_name": "cloud.google.com/go/talent/apiv4beta1", - "description": "Cloud Talent Solution API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/talent/apiv4beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/texttospeech/apiv1": { - "distribution_name": "cloud.google.com/go/texttospeech/apiv1", - "description": "Cloud Text-to-Speech API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/texttospeech/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/tpu/apiv1": { - "distribution_name": "cloud.google.com/go/tpu/apiv1", - "description": "Cloud TPU API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/tpu/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/trace/apiv1": { - "distribution_name": "cloud.google.com/go/trace/apiv1", - "description": "Stackdriver Trace API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/trace/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/trace/apiv2": { - "distribution_name": "cloud.google.com/go/trace/apiv2", - "description": "Stackdriver Trace API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/trace/apiv2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/translate/apiv3": { - "distribution_name": "cloud.google.com/go/translate/apiv3", - "description": "Cloud Translation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/translate/apiv3", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/video/livestream/apiv1": { - "distribution_name": "cloud.google.com/go/video/livestream/apiv1", - "description": "Live Stream API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/video/livestream/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/video/stitcher/apiv1": { - "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", - "description": "Video Stitcher API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/video/stitcher/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/video/transcoder/apiv1": { - "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", - "description": "Transcoder API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/video/transcoder/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/videointelligence/apiv1": { - "distribution_name": "cloud.google.com/go/videointelligence/apiv1", - "description": "Cloud Video Intelligence API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/videointelligence/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/videointelligence/apiv1beta2": { - "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", - "description": "Google Cloud Video Intelligence API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/videointelligence/apiv1beta2", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/videointelligence/apiv1p3beta1": { - "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", - "description": "Cloud Video Intelligence API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/videointelligence/apiv1p3beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vision/v2/apiv1": { - "distribution_name": "cloud.google.com/go/vision/v2/apiv1", - "description": "Cloud Vision API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vision/v2/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vision/v2/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", - "description": "Cloud Vision API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vision/v2/apiv1p1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vmmigration/apiv1": { - "distribution_name": "cloud.google.com/go/vmmigration/apiv1", - "description": "VM Migration API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vmmigration/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vmwareengine/apiv1": { - "distribution_name": "cloud.google.com/go/vmwareengine/apiv1", - "description": "VMware Engine API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vmwareengine/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vpcaccess/apiv1": { - "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", - "description": "Serverless VPC Access API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vpcaccess/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/webrisk/apiv1": { - "distribution_name": "cloud.google.com/go/webrisk/apiv1", - "description": "Web Risk API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/webrisk/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/webrisk/apiv1beta1": { - "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", - "description": "Web Risk API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/webrisk/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/websecurityscanner/apiv1": { - "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", - "description": "Web Security Scanner API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/websecurityscanner/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/apiv1": { - "distribution_name": "cloud.google.com/go/workflows/apiv1", - "description": "Workflows API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/apiv1beta": { - "distribution_name": "cloud.google.com/go/workflows/apiv1beta", - "description": "Workflows API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/executions/apiv1": { - "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", - "description": "Workflow Executions API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/executions/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/executions/apiv1beta": { - "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", - "description": "Workflow Executions API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/executions/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workstations/apiv1": { - "distribution_name": "cloud.google.com/go/workstations/apiv1", - "description": "Cloud Workstations API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workstations/apiv1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workstations/apiv1beta": { - "distribution_name": "cloud.google.com/go/workstations/apiv1beta", - "description": "Cloud Workstations API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workstations/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - } -} diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md deleted file mode 100644 index b38a4c1a2d..0000000000 --- a/vendor/cloud.google.com/go/internal/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Internal - -This directory contains internal code for cloud.google.com/go packages. - -## .repo-metadata-full.json - -`.repo-metadata-full.json` contains metadata about the packages in this repo. It -is generated by `internal/gapicgen/generator`. It's processed by external tools -to build lists of all of the packages. - -Don't make breaking changes to the format without consulting with the external -tools. - -One day, we may want to create individual `.repo-metadata.json` files next to -each package, which is the pattern followed by some other languages. External -tools would then talk to pkg.go.dev or some other service to get the overall -list of packages and use the `.repo-metadata.json` files to get the additional -metadata required. For now, `.repo-metadata-full.json` includes everything. - -## cloudbuild.yaml - -The `cloudbuild.yaml` Cloud Build configuration currently supports: - -* Building a docker container from the `internal/postprocessor/Dockerfile`. - -The build can be run locally in the `google-cloud-go` root directory: - -```bash -gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml -``` - -See the [postprocessor/README](postprocessor/README.md) for instructions -regarding updating the post-processor docker container. - -### Updating OwlBot SHA - -You may want to manually update the which version of the post-processor will be -used -- to do this you need to update the SHA in the OwlBot lock file. - -See the [postprocessor/README](postprocessor/README.md) for detailed -instructions. - -*Note*: OwlBot will eventually open a pull request to update this value if it -discovers a new version of the container. diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go deleted file mode 100644 index 30d7bcf77a..0000000000 --- a/vendor/cloud.google.com/go/internal/annotate.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "fmt" - - "google.golang.org/api/googleapi" - "google.golang.org/grpc/status" -) - -// Annotate prepends msg to the error message in err, attempting -// to preserve other information in err, like an error code. -// -// Annotate panics if err is nil. -// -// Annotate knows about these error types: -// - "google.golang.org/grpc/status".Status -// - "google.golang.org/api/googleapi".Error -// If the error is not one of these types, Annotate behaves -// like -// -// fmt.Errorf("%s: %v", msg, err) -func Annotate(err error, msg string) error { - if err == nil { - panic("Annotate called with nil") - } - if s, ok := status.FromError(err); ok { - p := s.Proto() - p.Message = msg + ": " + p.Message - return status.ErrorProto(p) - } - if g, ok := err.(*googleapi.Error); ok { - g.Message = msg + ": " + g.Message - return g - } - return fmt.Errorf("%s: %v", msg, err) -} - -// Annotatef uses format and args to format a string, then calls Annotate. -func Annotatef(err error, format string, args ...interface{}) error { - return Annotate(err, fmt.Sprintf(format, args...)) -} diff --git a/vendor/cloud.google.com/go/internal/cloudbuild.yaml b/vendor/cloud.google.com/go/internal/cloudbuild.yaml deleted file mode 100644 index 71281cec24..0000000000 --- a/vendor/cloud.google.com/go/internal/cloudbuild.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# note: /workspace is a special directory in the docker image where all the files in this folder -# get placed on your behalf - -timeout: 7200s # 2 hours -steps: -- name: gcr.io/cloud-builders/docker - args: ['build', '-t', 'gcr.io/cloud-devrel-public-resources/owlbot-go', '-f', 'postprocessor/Dockerfile', '.'] - dir: internal - -images: -- gcr.io/cloud-devrel-public-resources/owlbot-go:latest diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go deleted file mode 100644 index 72780f764c..0000000000 --- a/vendor/cloud.google.com/go/internal/optional/optional.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package optional provides versions of primitive types that can -// be nil. These are useful in methods that update some of an API object's -// fields. -package optional - -import ( - "fmt" - "strings" - "time" -) - -type ( - // Bool is either a bool or nil. - Bool interface{} - - // String is either a string or nil. - String interface{} - - // Int is either an int or nil. - Int interface{} - - // Uint is either a uint or nil. - Uint interface{} - - // Float64 is either a float64 or nil. - Float64 interface{} - - // Duration is either a time.Duration or nil. - Duration interface{} -) - -// ToBool returns its argument as a bool. -// It panics if its argument is nil or not a bool. -func ToBool(v Bool) bool { - x, ok := v.(bool) - if !ok { - doPanic("Bool", v) - } - return x -} - -// ToString returns its argument as a string. -// It panics if its argument is nil or not a string. -func ToString(v String) string { - x, ok := v.(string) - if !ok { - doPanic("String", v) - } - return x -} - -// ToInt returns its argument as an int. -// It panics if its argument is nil or not an int. -func ToInt(v Int) int { - x, ok := v.(int) - if !ok { - doPanic("Int", v) - } - return x -} - -// ToUint returns its argument as a uint. -// It panics if its argument is nil or not a uint. -func ToUint(v Uint) uint { - x, ok := v.(uint) - if !ok { - doPanic("Uint", v) - } - return x -} - -// ToFloat64 returns its argument as a float64. -// It panics if its argument is nil or not a float64. -func ToFloat64(v Float64) float64 { - x, ok := v.(float64) - if !ok { - doPanic("Float64", v) - } - return x -} - -// ToDuration returns its argument as a time.Duration. -// It panics if its argument is nil or not a time.Duration. -func ToDuration(v Duration) time.Duration { - x, ok := v.(time.Duration) - if !ok { - doPanic("Duration", v) - } - return x -} - -func doPanic(capType string, v interface{}) { - panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) -} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go deleted file mode 100644 index 2943a6d0b4..0000000000 --- a/vendor/cloud.google.com/go/internal/retry.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "context" - "fmt" - "time" - - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/grpc/status" -) - -// Retry calls the supplied function f repeatedly according to the provided -// backoff parameters. It returns when one of the following occurs: -// When f's first return value is true, Retry immediately returns with f's second -// return value. -// When the provided context is done, Retry returns with an error that -// includes both ctx.Error() and the last error returned by f. -func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { - return retry(ctx, bo, f, gax.Sleep) -} - -func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), - sleep func(context.Context, time.Duration) error) error { - var lastErr error - for { - stop, err := f() - if stop { - return err - } - // Remember the last "real" error from f. - if err != nil && err != context.Canceled && err != context.DeadlineExceeded { - lastErr = err - } - p := bo.Pause() - if ctxErr := sleep(ctx, p); ctxErr != nil { - if lastErr != nil { - return wrappedCallErr{ctxErr: ctxErr, wrappedErr: lastErr} - } - return ctxErr - } - } -} - -// Use this error type to return an error which allows introspection of both -// the context error and the error from the service. -type wrappedCallErr struct { - ctxErr error - wrappedErr error -} - -func (e wrappedCallErr) Error() string { - return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) -} - -func (e wrappedCallErr) Unwrap() error { - return e.wrappedErr -} - -// Is allows errors.Is to match the error from the call as well as context -// sentinel errors. -func (e wrappedCallErr) Is(err error) bool { - return e.ctxErr == err || e.wrappedErr == err -} - -// GRPCStatus allows the wrapped error to be used with status.FromError. -func (e wrappedCallErr) GRPCStatus() *status.Status { - if s, ok := status.FromError(e.wrappedErr); ok { - return s - } - return nil -} diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go deleted file mode 100644 index c201d343e9..0000000000 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - "fmt" - - "go.opencensus.io/trace" - "golang.org/x/xerrors" - "google.golang.org/api/googleapi" - "google.golang.org/genproto/googleapis/rpc/code" - "google.golang.org/grpc/status" -) - -// StartSpan adds a span to the trace with the given name. -func StartSpan(ctx context.Context, name string) context.Context { - ctx, _ = trace.StartSpan(ctx, name) - return ctx -} - -// EndSpan ends a span with the given error. -func EndSpan(ctx context.Context, err error) { - span := trace.FromContext(ctx) - if err != nil { - span.SetStatus(toStatus(err)) - } - span.End() -} - -// toStatus interrogates an error and converts it to an appropriate -// OpenCensus status. -func toStatus(err error) trace.Status { - var err2 *googleapi.Error - if ok := xerrors.As(err, &err2); ok { - return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} - } else if s, ok := status.FromError(err); ok { - return trace.Status{Code: int32(s.Code()), Message: s.Message()} - } else { - return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} - } -} - -// TODO(deklerk): switch to using OpenCensus function when it becomes available. -// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto -func httpStatusCodeToOCCode(httpStatusCode int) int32 { - switch httpStatusCode { - case 200: - return int32(code.Code_OK) - case 499: - return int32(code.Code_CANCELLED) - case 500: - return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS - case 400: - return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE - case 504: - return int32(code.Code_DEADLINE_EXCEEDED) - case 404: - return int32(code.Code_NOT_FOUND) - case 409: - return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED - case 403: - return int32(code.Code_PERMISSION_DENIED) - case 401: - return int32(code.Code_UNAUTHENTICATED) - case 429: - return int32(code.Code_RESOURCE_EXHAUSTED) - case 501: - return int32(code.Code_UNIMPLEMENTED) - case 503: - return int32(code.Code_UNAVAILABLE) - default: - return int32(code.Code_UNKNOWN) - } -} - -// TODO: (odeke-em): perhaps just pass around spans due to the cost -// incurred from using trace.FromContext(ctx) yet we could avoid -// throwing away the work done by ctx, span := trace.StartSpan. -func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { - var attrs []trace.Attribute - for k, v := range attrMap { - var a trace.Attribute - switch v := v.(type) { - case string: - a = trace.StringAttribute(k, v) - case bool: - a = trace.BoolAttribute(k, v) - case int: - a = trace.Int64Attribute(k, int64(v)) - case int64: - a = trace.Int64Attribute(k, v) - default: - a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) - } - attrs = append(attrs, a) - } - trace.FromContext(ctx).Annotatef(attrs, format, args...) -} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh deleted file mode 100644 index d7c5a3e219..0000000000 --- a/vendor/cloud.google.com/go/internal/version/update_version.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -today=$(date +%Y%m%d) - -sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE - diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go deleted file mode 100644 index fd9dd91e98..0000000000 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate ./update_version.sh - -// Package version contains version information for Google Cloud Client -// Libraries for Go, as reported in request headers. -package version - -import ( - "runtime" - "strings" - "unicode" -) - -// Repo is the current version of the client libraries in this -// repo. It should be a date in YYYYMMDD format. -const Repo = "20201104" - -// Go returns the Go runtime version. The returned string -// has no whitespace. -func Go() string { - return goVersion -} - -var goVersion = goVer(runtime.Version()) - -const develPrefix = "devel +" - -func goVer(s string) string { - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "" -} - -func notSemverRune(r rune) bool { - return !strings.ContainsRune("0123456789.", r) -} diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md deleted file mode 100644 index 3f4097faea..0000000000 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ /dev/null @@ -1,343 +0,0 @@ -# Changes - - -## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19) - - -### Features - -* **storage:** Add ComponentCount as part of ObjectAttrs ([#7230](https://github.com/googleapis/google-cloud-go/issues/7230)) ([a19bca6](https://github.com/googleapis/google-cloud-go/commit/a19bca60704b4fbb674cf51d828580aa653c8210)) -* **storage:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) - - -### Documentation - -* **storage/internal:** Corrected typos and spellings ([7357077](https://github.com/googleapis/google-cloud-go/commit/735707796d81d7f6f32fc3415800c512fe62297e)) - -## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) - - -### Bug Fixes - -* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) - -## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) - - -### Features - -* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) -* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) - - -### Bug Fixes - -* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) -* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) - - -### Documentation - -* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) - -## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) - - -### Features - -* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) - -## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) - - -### Features - -* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) - - -### Bug Fixes - -* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) - -## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) - - -### Features - -* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) -* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) - -## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) - - -### Features - -* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) - -## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) - - -### Features - -* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) -* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) - - -### Bug Fixes - -* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) - -### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) - - -### Bug Fixes - -* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07)) -* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973)) - -## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31) - - -### Features - -* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072)) -* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) - - -### Bug Fixes - -* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341)) - -## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17) - - -### Features - -* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749) -* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e)) - -## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04) - - -### Features - -* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6)) - -## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25) - - -### Features - -* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314)) - * This release contains changes to fully align this library's retry strategy - with best practices as described in the - Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy). - * The library will now retry only idempotent operations by default. This means - that for certain operations, including object upload, compose, rewrite, - update, and delete, requests will not be retried by default unless - [idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency) - for the request have been met. - * The library now has methods to configure aspects of retry policy for - API calls, including which errors are retried, the timing of the - exponential backoff, and how idempotency is taken into account. - * If you wish to re-enable retries for a non-idempotent request, use the - [RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways) - policy. - * For full details on how to configure retries, see the - [package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests) - and the - [Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy) -* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8)) -* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c)) -* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0)) - -### Bug Fixes - -* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264)) - -### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18) - - -### Bug Fixes - -* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991) - -### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14) - - -### Bug Fixes - -* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb)) - -## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11) - - -### Features - -* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197) -* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315)) - - -### Bug Fixes - -* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8)) - -## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28) - - -### Features - -* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02)) - - -### Bug Fixes - -* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393)) - -### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30) - - -### Bug Fixes - -* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd)) -* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01)) -* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1)) -* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437) -* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470) - -## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28) - - -### Features - -* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203) - - -### Bug Fixes - -* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167) -* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040) - -## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21) - - -### Features - -* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2 - config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)). - -### Bug Fixes - -* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc)) - - -## v1.14.0 - -- Updates to various dependencies. - -## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) - - -### Features - -* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) -* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) - - -### Bug Fixes - -* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) - -## v1.12.0 -- V4 signed URL fixes: - - Fix encoding of spaces in query parameters. - - Add fields that were missing from PostPolicyV4 policy conditions. -- Fix Query to correctly list prefixes as well as objects when SetAttrSelection - is used. - -## v1.11.0 -- Add support for CustomTime and NoncurrentTime object lifecycle management - features. - -## v1.10.0 -- Bump dependency on google.golang.org/api to capture changes to retry logic - which will make retries on writes more resilient. -- Improve documentation for Writer.ChunkSize. -- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. - -## v1.9.0 -- Add retry for transient network errors on most operations (with the exception - of writes). -- Bump dependency for google.golang.org/api to capture a change in the default - HTTP transport which will improve performance for reads under heavy load. -- Add CRC32C checksum validation option to Composer. - -## v1.8.0 -- Add support for V4 signed post policies. - -## v1.7.0 -- V4 signed URL support: - - Add support for bucket-bound domains and virtual hosted style URLs. - - Add support for query parameters in the signature. - - Fix text encoding to align with standards. -- Add the object name to query parameters for write calls. -- Fix retry behavior when reading files with Content-Encoding gzip. -- Fix response header in reader. -- New code examples: - - Error handling for `ObjectHandle` preconditions. - - Existence checks for buckets and objects. - -## v1.6.0 - -- Updated option handling: - - Don't drop custom scopes (#1756) - - Don't drop port in provided endpoint (#1737) - -## v1.5.0 - -- Honor WithEndpoint client option for reads as well as writes. -- Add archive storage class to docs. -- Make fixes to storage benchwrapper. - -## v1.4.0 - -- When listing objects in a bucket, allow callers to specify which attributes - are queried. This allows for performance optimization. - -## v1.3.0 - -- Use `storage.googleapis.com/storage/v1` by default for GCS requests - instead of `www.googleapis.com/storage/v1`. - -## v1.2.1 - -- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not - being sent in all cases. - -## v1.2.0 - -- Add support for UniformBucketLevelAccess. This configures access checks - to use only bucket-level IAM policies. - See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess. -- Fix userAgent to use correct version. - -## v1.1.2 - -- Fix memory leak in BucketIterator and ObjectIterator. - -## v1.1.1 - -- Send BucketPolicyOnly even when it's disabled. - -## v1.1.0 - -- Performance improvements for ObjectIterator and BucketIterator. -- Fix Bucket.ObjectIterator size calculation checks. -- Added HMACKeyOptions to all the methods which allows for options such as - UserProject to be set per invocation and optionally be used. - -## v1.0.0 - -This is the first tag to carve out storage as its own module. See: -https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. diff --git a/vendor/cloud.google.com/go/storage/LICENSE b/vendor/cloud.google.com/go/storage/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/storage/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md deleted file mode 100644 index b2f411210c..0000000000 --- a/vendor/cloud.google.com/go/storage/README.md +++ /dev/null @@ -1,32 +0,0 @@ -## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) - -- [About Cloud Storage](https://cloud.google.com/storage/) -- [API documentation](https://cloud.google.com/storage/docs) -- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) - -### Example Usage - -First create a `storage.Client` to use throughout your application: - -[snip]:# (storage-1) -```go -client, err := storage.NewClient(ctx) -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (storage-2) -```go -// Read the object1 from bucket. -rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) -if err != nil { - log.Fatal(err) -} -defer rc.Close() -body, err := io.ReadAll(rc) -if err != nil { - log.Fatal(err) -} -``` diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go deleted file mode 100644 index e0ab60073c..0000000000 --- a/vendor/cloud.google.com/go/storage/acl.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "net/http" - "reflect" - - "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - raw "google.golang.org/api/storage/v1" -) - -// ACLRole is the level of access to grant. -type ACLRole string - -const ( - RoleOwner ACLRole = "OWNER" - RoleReader ACLRole = "READER" - RoleWriter ACLRole = "WRITER" -) - -// ACLEntity refers to a user or group. -// They are sometimes referred to as grantees. -// -// It could be in the form of: -// "user-", "user-", "group-", "group-", -// "domain-" and "project-team-". -// -// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. -type ACLEntity string - -const ( - AllUsers ACLEntity = "allUsers" - AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" -) - -// ACLRule represents a grant for a role to an entity (user, group or team) for a -// Google Cloud Storage object or bucket. -type ACLRule struct { - Entity ACLEntity - EntityID string - Role ACLRole - Domain string - Email string - ProjectTeam *ProjectTeam -} - -// ProjectTeam is the project team associated with the entity, if any. -type ProjectTeam struct { - ProjectNumber string - Team string -} - -// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. -// ACLHandle on an object operates on the latest generation of that object by default. -// Selecting a specific generation of an object is not currently supported by the client. -type ACLHandle struct { - c *Client - bucket string - object string - isDefault bool - userProject string // for requester-pays buckets - retry *retryConfig -} - -// Delete permanently deletes the ACL entry for the given entity. -func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") - defer func() { trace.EndSpan(ctx, err) }() - - if a.object != "" { - return a.objectDelete(ctx, entity) - } - if a.isDefault { - return a.bucketDefaultDelete(ctx, entity) - } - return a.bucketDelete(ctx, entity) -} - -// Set sets the role for the given entity. -func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") - defer func() { trace.EndSpan(ctx, err) }() - - if a.object != "" { - return a.objectSet(ctx, entity, role, false) - } - if a.isDefault { - return a.objectSet(ctx, entity, role, true) - } - return a.bucketSet(ctx, entity, role) -} - -// List retrieves ACL entries. -func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") - defer func() { trace.EndSpan(ctx, err) }() - - if a.object != "" { - return a.objectList(ctx) - } - if a.isDefault { - return a.bucketDefaultList(ctx) - } - return a.bucketList(ctx) -} - -func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { - opts := makeStorageOpts(true, a.retry, a.userProject) - return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) -} - -func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) -} - -func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { - opts := makeStorageOpts(true, a.retry, a.userProject) - return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) -} - -func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) -} - -func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) -} - -func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { - opts := makeStorageOpts(true, a.retry, a.userProject) - return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) -} - -func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - if isBucketDefault { - return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) - } - return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) -} - -func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) -} - -func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if a.userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) - } - setClientHeader(call.Header()) -} - -func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toObjectACLRule(item)) - } - return rs -} - -func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toObjectACLRuleFromProto(item)) - } - return rs -} - -func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toBucketACLRule(item)) - } - return rs -} - -func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toBucketACLRuleFromProto(item)) - } - return rs -} - -func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.Entity), - EntityID: a.EntityId, - Role: ACLRole(a.Role), - Domain: a.Domain, - Email: a.Email, - ProjectTeam: toObjectProjectTeam(a.ProjectTeam), - } -} - -func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.GetEntity()), - EntityID: a.GetEntityId(), - Role: ACLRole(a.GetRole()), - Domain: a.GetDomain(), - Email: a.GetEmail(), - ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), - } -} - -func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.Entity), - EntityID: a.EntityId, - Role: ACLRole(a.Role), - Domain: a.Domain, - Email: a.Email, - ProjectTeam: toBucketProjectTeam(a.ProjectTeam), - } -} - -func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.GetEntity()), - EntityID: a.GetEntityId(), - Role: ACLRole(a.GetRole()), - Domain: a.GetDomain(), - Email: a.GetEmail(), - ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), - } -} - -func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*raw.ObjectAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary - } - return r -} - -func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*storagepb.ObjectAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary - } - return r -} - -func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*raw.BucketAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary - } - return r -} - -func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*storagepb.BucketAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toProtoBucketAccessControl()) - } - return r -} - -func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { - return &raw.BucketAccessControl{ - Bucket: bucket, - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { - return &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl { - return &storagepb.ObjectAccessControl{ - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl { - return &storagepb.BucketAccessControl{ - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { - if p == nil { - return nil - } - return &ProjectTeam{ - ProjectNumber: p.ProjectNumber, - Team: p.Team, - } -} - -func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam { - if p == nil { - return nil - } - return &ProjectTeam{ - ProjectNumber: p.GetProjectNumber(), - Team: p.GetTeam(), - } -} - -func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { - if p == nil { - return nil - } - return &ProjectTeam{ - ProjectNumber: p.ProjectNumber, - Team: p.Team, - } -} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go deleted file mode 100644 index 19f266ef1e..0000000000 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ /dev/null @@ -1,2165 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "reflect" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - "google.golang.org/api/googleapi" - "google.golang.org/api/iamcredentials/v1" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - raw "google.golang.org/api/storage/v1" - dpb "google.golang.org/genproto/googleapis/type/date" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/durationpb" -) - -// BucketHandle provides operations on a Google Cloud Storage bucket. -// Use Client.Bucket to get a handle. -type BucketHandle struct { - c *Client - name string - acl ACLHandle - defaultObjectACL ACLHandle - conds *BucketConditions - userProject string // project for Requester Pays buckets - retry *retryConfig -} - -// Bucket returns a BucketHandle, which provides operations on the named bucket. -// This call does not perform any network operations. -// -// The supplied name must contain only lowercase letters, numbers, dashes, -// underscores, and dots. The full specification for valid bucket names can be -// found at: -// -// https://cloud.google.com/storage/docs/bucket-naming -func (c *Client) Bucket(name string) *BucketHandle { - retry := c.retry.clone() - return &BucketHandle{ - c: c, - name: name, - acl: ACLHandle{ - c: c, - bucket: name, - retry: retry, - }, - defaultObjectACL: ACLHandle{ - c: c, - bucket: name, - isDefault: true, - retry: retry, - }, - retry: retry, - } -} - -// Create creates the Bucket in the project. -// If attrs is nil the API defaults will be used. -func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, b.retry, b.userProject) - if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil { - return err - } - return nil -} - -// Delete deletes the Bucket. -func (b *BucketHandle) Delete(ctx context.Context) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...) -} - -// ACL returns an ACLHandle, which provides access to the bucket's access control list. -// This controls who can list, create or overwrite the objects in a bucket. -// This call does not perform any network operations. -func (b *BucketHandle) ACL() *ACLHandle { - return &b.acl -} - -// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. -// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. -// This call does not perform any network operations. -func (b *BucketHandle) DefaultObjectACL() *ACLHandle { - return &b.defaultObjectACL -} - -// Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations such as fetching the object or verifying its existence. -// Use methods on ObjectHandle to perform network operations. -// -// name must consist entirely of valid UTF-8-encoded runes. The full specification -// for valid object names can be found at: -// -// https://cloud.google.com/storage/docs/naming-objects -func (b *BucketHandle) Object(name string) *ObjectHandle { - retry := b.retry.clone() - return &ObjectHandle{ - c: b.c, - bucket: b.name, - object: name, - acl: ACLHandle{ - c: b.c, - bucket: b.name, - object: name, - userProject: b.userProject, - retry: retry, - }, - gen: -1, - userProject: b.userProject, - retry: retry, - } -} - -// Attrs returns the metadata for the bucket. -func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.GetBucket(ctx, b.name, b.conds, o...) -} - -// Update updates a bucket's attributes. -func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") - defer func() { trace.EndSpan(ctx, err) }() - - isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 - o := makeStorageOpts(isIdempotent, b.retry, b.userProject) - return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...) -} - -// SignedURL returns a URL for the specified object. Signed URLs allow anyone -// access to a restricted resource for a limited time without needing a Google -// account or signing in. -// For more information about signed URLs, see "[Overview of access control]." -// -// This method requires the Method and Expires fields in the specified -// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and -// PrivateKey fields in some cases. Read more on the [automatic detection of credentials] -// for this method. -// -// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication -// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing -func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { - if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return SignedURL(b.name, object, opts) - } - // Make a copy of opts so we don't modify the pointer parameter. - newopts := opts.clone() - - if newopts.GoogleAccessID == "" { - id, err := b.detectDefaultGoogleAccessID() - if err != nil { - return "", err - } - newopts.GoogleAccessID = id - } - if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 { - if b.c.creds != nil && len(b.c.creds.JSON) > 0 { - var sa struct { - PrivateKey string `json:"private_key"` - } - err := json.Unmarshal(b.c.creds.JSON, &sa) - if err == nil && sa.PrivateKey != "" { - newopts.PrivateKey = []byte(sa.PrivateKey) - } - } - - // Don't error out if we can't unmarshal the private key from the client, - // fallback to the default sign function for the service account. - if len(newopts.PrivateKey) == 0 { - newopts.SignBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) - } - } - return SignedURL(b.name, object, newopts) -} - -// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. -// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. -// -// This method requires the Expires field in the specified PostPolicyV4Options -// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields -// in some cases. Read more on the [automatic detection of credentials] for this method. -// -// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing -func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { - if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return GenerateSignedPostPolicyV4(b.name, object, opts) - } - // Make a copy of opts so we don't modify the pointer parameter. - newopts := opts.clone() - - if newopts.GoogleAccessID == "" { - id, err := b.detectDefaultGoogleAccessID() - if err != nil { - return nil, err - } - newopts.GoogleAccessID = id - } - if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 { - if b.c.creds != nil && len(b.c.creds.JSON) > 0 { - var sa struct { - PrivateKey string `json:"private_key"` - } - err := json.Unmarshal(b.c.creds.JSON, &sa) - if err == nil && sa.PrivateKey != "" { - newopts.PrivateKey = []byte(sa.PrivateKey) - } - } - - // Don't error out if we can't unmarshal the private key from the client, - // fallback to the default sign function for the service account. - if len(newopts.PrivateKey) == 0 { - newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) - } - } - return GenerateSignedPostPolicyV4(b.name, object, newopts) -} - -func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { - returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)") - - if b.c.creds != nil && len(b.c.creds.JSON) > 0 { - var sa struct { - ClientEmail string `json:"client_email"` - SAImpersonationURL string `json:"service_account_impersonation_url"` - CredType string `json:"type"` - } - - err := json.Unmarshal(b.c.creds.JSON, &sa) - if err != nil { - returnErr = err - } else if sa.CredType == "impersonated_service_account" { - start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") - - if end <= start { - returnErr = errors.New("error parsing impersonated service account credentials") - } else { - return sa.SAImpersonationURL[start+1 : end], nil - } - } else if sa.CredType == "service_account" && sa.ClientEmail != "" { - return sa.ClientEmail, nil - } else { - returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported") - } - } - - // Don't error out if we can't unmarshal, fallback to GCE check. - if metadata.OnGCE() { - email, err := metadata.Email("default") - if err == nil && email != "" { - return email, nil - } else if err != nil { - returnErr = err - } else { - returnErr = errors.New("empty email from GCE metadata service") - } - - } - return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr) -} - -func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { - return func(in []byte) ([]byte, error) { - ctx := context.Background() - - // It's ok to recreate this service per call since we pass in the http client, - // circumventing the cost of recreating the auth/transport layer - svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc)) - if err != nil { - return nil, fmt.Errorf("unable to create iamcredentials client: %w", err) - } - - resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{ - Payload: base64.StdEncoding.EncodeToString(in), - }).Do() - if err != nil { - return nil, fmt.Errorf("unable to sign bytes: %w", err) - } - out, err := base64.StdEncoding.DecodeString(resp.SignedBlob) - if err != nil { - return nil, fmt.Errorf("unable to base64 decode response: %w", err) - } - return out, nil - } -} - -// BucketAttrs represents the metadata for a Google Cloud Storage bucket. -// Read-only fields are ignored by BucketHandle.Create. -type BucketAttrs struct { - // Name is the name of the bucket. - // This field is read-only. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of - // UniformBucketLevelAccess is recommended above the use of this field. - // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to - // true, will enable UniformBucketLevelAccess. - BucketPolicyOnly BucketPolicyOnly - - // UniformBucketLevelAccess configures access checks to use only bucket-level IAM - // policies and ignore any ACL rules for the bucket. - // See https://cloud.google.com/storage/docs/uniform-bucket-level-access - // for more information. - UniformBucketLevelAccess UniformBucketLevelAccess - - // PublicAccessPrevention is the setting for the bucket's - // PublicAccessPrevention policy, which can be used to prevent public access - // of data in the bucket. See - // https://cloud.google.com/storage/docs/public-access-prevention for more - // information. - PublicAccessPrevention PublicAccessPrevention - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // DefaultEventBasedHold is the default value for event-based hold on - // newly created objects in this bucket. It defaults to false. - DefaultEventBasedHold bool - - // If not empty, applies a predefined set of access controls. It should be set - // only when creating a bucket. - // It is always empty for BucketAttrs returned from the service. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert - // for valid values. - PredefinedACL string - - // If not empty, applies a predefined set of default object access controls. - // It should be set only when creating a bucket. - // It is always empty for BucketAttrs returned from the service. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert - // for valid values. - PredefinedDefaultObjectACL string - - // Location is the location of the bucket. It defaults to "US". - // If specifying a dual-region, CustomPlacementConfig should be set in conjunction. - Location string - - // The bucket's custom placement configuration that holds a list of - // regional locations for custom dual regions. - CustomPlacementConfig *CustomPlacementConfig - - // MetaGeneration is the metadata generation of the bucket. - // This field is read-only. - MetaGeneration int64 - - // StorageClass is the default storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD", "NEARLINE", - // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". - // See https://cloud.google.com/storage/docs/storage-classes for all - // valid values. - StorageClass string - - // Created is the creation time of the bucket. - // This field is read-only. - Created time.Time - - // VersioningEnabled reports whether this bucket has versioning enabled. - VersioningEnabled bool - - // Labels are the bucket's labels. - Labels map[string]string - - // RequesterPays reports whether the bucket is a Requester Pays bucket. - // Clients performing operations on Requester Pays buckets must provide - // a user project (see BucketHandle.UserProject), which will be billed - // for the operations. - RequesterPays bool - - // Lifecycle is the lifecycle configuration for objects in the bucket. - Lifecycle Lifecycle - - // Retention policy enforces a minimum retention time for all objects - // contained in the bucket. A RetentionPolicy of nil implies the bucket - // has no minimum data retention. - // - // This feature is in private alpha release. It is not currently available to - // most customers. It might be changed in backwards-incompatible ways and is not - // subject to any SLA or deprecation policy. - RetentionPolicy *RetentionPolicy - - // The bucket's Cross-Origin Resource Sharing (CORS) configuration. - CORS []CORS - - // The encryption configuration used by default for newly inserted objects. - Encryption *BucketEncryption - - // The logging configuration. - Logging *BucketLogging - - // The website configuration. - Website *BucketWebsite - - // Etag is the HTTP/1.1 Entity tag for the bucket. - // This field is read-only. - Etag string - - // LocationType describes how data is stored and replicated. - // Typical values are "multi-region", "region" and "dual-region". - // This field is read-only. - LocationType string - - // The project number of the project the bucket belongs to. - // This field is read-only. - ProjectNumber uint64 - - // RPO configures the Recovery Point Objective (RPO) policy of the bucket. - // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. - // See https://cloud.google.com/storage/docs/managing-turbo-replication for - // more information. - RPO RPO - - // Autoclass holds the bucket's autoclass configuration. If enabled, - // allows for the automatic selection of the best storage class - // based on object access patterns. - Autoclass *Autoclass -} - -// BucketPolicyOnly is an alias for UniformBucketLevelAccess. -// Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly. -type BucketPolicyOnly struct { - // Enabled specifies whether access checks use only bucket-level IAM - // policies. Enabled may be disabled until the locked time. - Enabled bool - // LockedTime specifies the deadline for changing Enabled from true to - // false. - LockedTime time.Time -} - -// UniformBucketLevelAccess configures access checks to use only bucket-level IAM -// policies. -type UniformBucketLevelAccess struct { - // Enabled specifies whether access checks use only bucket-level IAM - // policies. Enabled may be disabled until the locked time. - Enabled bool - // LockedTime specifies the deadline for changing Enabled from true to - // false. - LockedTime time.Time -} - -// PublicAccessPrevention configures the Public Access Prevention feature, which -// can be used to disallow public access to any data in a bucket. See -// https://cloud.google.com/storage/docs/public-access-prevention for more -// information. -type PublicAccessPrevention int - -const ( - // PublicAccessPreventionUnknown is a zero value, used only if this field is - // not set in a call to GCS. - PublicAccessPreventionUnknown PublicAccessPrevention = iota - - // PublicAccessPreventionUnspecified corresponds to a value of "unspecified". - // Deprecated: use PublicAccessPreventionInherited - PublicAccessPreventionUnspecified - - // PublicAccessPreventionEnforced corresponds to a value of "enforced". This - // enforces Public Access Prevention on the bucket. - PublicAccessPreventionEnforced - - // PublicAccessPreventionInherited corresponds to a value of "inherited" - // and is the default for buckets. - PublicAccessPreventionInherited - - publicAccessPreventionUnknown string = "" - // TODO: remove unspecified when change is fully completed - publicAccessPreventionUnspecified = "unspecified" - publicAccessPreventionEnforced = "enforced" - publicAccessPreventionInherited = "inherited" -) - -func (p PublicAccessPrevention) String() string { - switch p { - case PublicAccessPreventionInherited, PublicAccessPreventionUnspecified: - return publicAccessPreventionInherited - case PublicAccessPreventionEnforced: - return publicAccessPreventionEnforced - default: - return publicAccessPreventionUnknown - } -} - -// Lifecycle is the lifecycle configuration for objects in the bucket. -type Lifecycle struct { - Rules []LifecycleRule -} - -// RetentionPolicy enforces a minimum retention time for all objects -// contained in the bucket. -// -// Any attempt to overwrite or delete objects younger than the retention -// period will result in an error. An unlocked retention policy can be -// modified or removed from the bucket via the Update method. A -// locked retention policy cannot be removed or shortened in duration -// for the lifetime of the bucket. -// -// This feature is in private alpha release. It is not currently available to -// most customers. It might be changed in backwards-incompatible ways and is not -// subject to any SLA or deprecation policy. -type RetentionPolicy struct { - // RetentionPeriod specifies the duration that objects need to be - // retained. Retention duration must be greater than zero and less than - // 100 years. Note that enforcement of retention periods less than a day - // is not guaranteed. Such periods should only be used for testing - // purposes. - RetentionPeriod time.Duration - - // EffectiveTime is the time from which the policy was enforced and - // effective. This field is read-only. - EffectiveTime time.Time - - // IsLocked describes whether the bucket is locked. Once locked, an - // object retention policy cannot be modified. - // This field is read-only. - IsLocked bool -} - -const ( - // RFC3339 timestamp with only the date segment, used for CreatedBefore, - // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. - rfc3339Date = "2006-01-02" - - // DeleteAction is a lifecycle action that deletes a live and/or archived - // objects. Takes precedence over SetStorageClass actions. - DeleteAction = "Delete" - - // SetStorageClassAction changes the storage class of live and/or archived - // objects. - SetStorageClassAction = "SetStorageClass" - - // AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete - // multipart upload when the multipart upload meets the conditions specified - // in the lifecycle rule. The AgeInDays condition is the only allowed - // condition for this action. AgeInDays is measured from the time the - // multipart upload was created. - AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload" -) - -// LifecycleRule is a lifecycle configuration rule. -// -// When all the configured conditions are met by an object in the bucket, the -// configured action will automatically be taken on that object. -type LifecycleRule struct { - // Action is the action to take when all of the associated conditions are - // met. - Action LifecycleAction - - // Condition is the set of conditions that must be met for the associated - // action to be taken. - Condition LifecycleCondition -} - -// LifecycleAction is a lifecycle configuration action. -type LifecycleAction struct { - // Type is the type of action to take on matching objects. - // - // Acceptable values are storage.DeleteAction, storage.SetStorageClassAction, - // and storage.AbortIncompleteMPUAction. - Type string - - // StorageClass is the storage class to set on matching objects if the Action - // is "SetStorageClass". - StorageClass string -} - -// Liveness specifies whether the object is live or not. -type Liveness int - -const ( - // LiveAndArchived includes both live and archived objects. - LiveAndArchived Liveness = iota - // Live specifies that the object is still live. - Live - // Archived specifies that the object is archived. - Archived -) - -// LifecycleCondition is a set of conditions used to match objects and take an -// action automatically. -// -// All configured conditions must be met for the associated action to be taken. -type LifecycleCondition struct { - // AllObjects is used to select all objects in a bucket by - // setting AgeInDays to 0. - AllObjects bool - - // AgeInDays is the age of the object in days. - // If you want to set AgeInDays to `0` use AllObjects set to `true`. - AgeInDays int64 - - // CreatedBefore is the time the object was created. - // - // This condition is satisfied when an object is created before midnight of - // the specified date in UTC. - CreatedBefore time.Time - - // CustomTimeBefore is the CustomTime metadata field of the object. This - // condition is satisfied when an object's CustomTime timestamp is before - // midnight of the specified date in UTC. - // - // This condition can only be satisfied if CustomTime has been set. - CustomTimeBefore time.Time - - // DaysSinceCustomTime is the days elapsed since the CustomTime date of the - // object. This condition can only be satisfied if CustomTime has been set. - // Note: Using `0` as the value will be ignored by the library and not sent to the API. - DaysSinceCustomTime int64 - - // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp - // of the object. This condition is relevant only for versioned objects. - // Note: Using `0` as the value will be ignored by the library and not sent to the API. - DaysSinceNoncurrentTime int64 - - // Liveness specifies the object's liveness. Relevant only for versioned objects - Liveness Liveness - - // MatchesPrefix is the condition matching an object if any of the - // matches_prefix strings are an exact prefix of the object's name. - MatchesPrefix []string - - // MatchesStorageClasses is the condition matching the object's storage - // class. - // - // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". - MatchesStorageClasses []string - - // MatchesSuffix is the condition matching an object if any of the - // matches_suffix strings are an exact suffix of the object's name. - MatchesSuffix []string - - // NoncurrentTimeBefore is the noncurrent timestamp of the object. This - // condition is satisfied when an object's noncurrent timestamp is before - // midnight of the specified date in UTC. - // - // This condition is relevant only for versioned objects. - NoncurrentTimeBefore time.Time - - // NumNewerVersions is the condition matching objects with a number of newer versions. - // - // If the value is N, this condition is satisfied when there are at least N - // versions (including the live version) newer than this version of the - // object. - // Note: Using `0` as the value will be ignored by the library and not sent to the API. - NumNewerVersions int64 -} - -// BucketLogging holds the bucket's logging configuration, which defines the -// destination bucket and optional name prefix for the current bucket's -// logs. -type BucketLogging struct { - // The destination bucket where the current bucket's logs - // should be placed. - LogBucket string - - // A prefix for log object names. - LogObjectPrefix string -} - -// BucketWebsite holds the bucket's website configuration, controlling how the -// service behaves when accessing bucket contents as a web site. See -// https://cloud.google.com/storage/docs/static-website for more information. -type BucketWebsite struct { - // If the requested object path is missing, the service will ensure the path has - // a trailing '/', append this suffix, and attempt to retrieve the resulting - // object. This allows the creation of index.html objects to represent directory - // pages. - MainPageSuffix string - - // If the requested object path is missing, and any mainPageSuffix object is - // missing, if applicable, the service will return the named object from this - // bucket as the content for a 404 Not Found result. - NotFoundPage string -} - -// CustomPlacementConfig holds the bucket's custom placement -// configuration for Custom Dual Regions. See -// https://cloud.google.com/storage/docs/locations#location-dr for more information. -type CustomPlacementConfig struct { - // The list of regional locations in which data is placed. - // Custom Dual Regions require exactly 2 regional locations. - DataLocations []string -} - -// Autoclass holds the bucket's autoclass configuration. If enabled, -// allows for the automatic selection of the best storage class -// based on object access patterns. See -// https://cloud.google.com/storage/docs/using-autoclass for more information. -type Autoclass struct { - // Enabled specifies whether the autoclass feature is enabled - // on the bucket. - Enabled bool - // ToggleTime is the time from which Autoclass was last toggled. - // If Autoclass is enabled when the bucket is created, the ToggleTime - // is set to the bucket creation time. This field is read-only. - ToggleTime time.Time -} - -func newBucket(b *raw.Bucket) (*BucketAttrs, error) { - if b == nil { - return nil, nil - } - rp, err := toRetentionPolicy(b.RetentionPolicy) - if err != nil { - return nil, err - } - return &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - DefaultEventBasedHold: b.DefaultEventBasedHold, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, - ACL: toBucketACLRules(b.Acl), - DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), - Labels: b.Labels, - RequesterPays: b.Billing != nil && b.Billing.RequesterPays, - Lifecycle: toLifecycle(b.Lifecycle), - RetentionPolicy: rp, - CORS: toCORS(b.Cors), - Encryption: toBucketEncryption(b.Encryption), - Logging: toBucketLogging(b.Logging), - Website: toBucketWebsite(b.Website), - BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), - UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), - PublicAccessPrevention: toPublicAccessPrevention(b.IamConfiguration), - Etag: b.Etag, - LocationType: b.LocationType, - ProjectNumber: b.ProjectNumber, - RPO: toRPO(b), - CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), - Autoclass: toAutoclassFromRaw(b.Autoclass), - }, nil -} - -func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { - if b == nil { - return nil - } - return &BucketAttrs{ - Name: parseBucketName(b.GetName()), - Location: b.GetLocation(), - MetaGeneration: b.GetMetageneration(), - DefaultEventBasedHold: b.GetDefaultEventBasedHold(), - StorageClass: b.GetStorageClass(), - Created: b.GetCreateTime().AsTime(), - VersioningEnabled: b.GetVersioning().GetEnabled(), - ACL: toBucketACLRulesFromProto(b.GetAcl()), - DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), - Labels: b.GetLabels(), - RequesterPays: b.GetBilling().GetRequesterPays(), - Lifecycle: toLifecycleFromProto(b.GetLifecycle()), - RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()), - CORS: toCORSFromProto(b.GetCors()), - Encryption: toBucketEncryptionFromProto(b.GetEncryption()), - Logging: toBucketLoggingFromProto(b.GetLogging()), - Website: toBucketWebsiteFromProto(b.GetWebsite()), - BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()), - UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()), - PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()), - LocationType: b.GetLocationType(), - RPO: toRPOFromProto(b), - CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), - ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based - Autoclass: toAutoclassFromProto(b.GetAutoclass()), - } -} - -// toRawBucket copies the editable attribute from b to the raw library's Bucket type. -func (b *BucketAttrs) toRawBucket() *raw.Bucket { - // Copy label map. - var labels map[string]string - if len(b.Labels) > 0 { - labels = make(map[string]string, len(b.Labels)) - for k, v := range b.Labels { - labels[k] = v - } - } - // Ignore VersioningEnabled if it is false. This is OK because - // we only call this method when creating a bucket, and by default - // new buckets have versioning off. - var v *raw.BucketVersioning - if b.VersioningEnabled { - v = &raw.BucketVersioning{Enabled: true} - } - var bb *raw.BucketBilling - if b.RequesterPays { - bb = &raw.BucketBilling{RequesterPays: true} - } - var bktIAM *raw.BucketIamConfiguration - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM = &raw.BucketIamConfiguration{} - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { - bktIAM.UniformBucketLevelAccess = &raw.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: true, - } - } - if b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() - } - } - return &raw.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toRawBucketACL(b.ACL), - DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toRawLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), - Cors: toRawCORS(b.CORS), - Encryption: b.Encryption.toRawBucketEncryption(), - Logging: b.Logging.toRawBucketLogging(), - Website: b.Website.toRawBucketWebsite(), - IamConfiguration: bktIAM, - Rpo: b.RPO.String(), - CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), - Autoclass: b.Autoclass.toRawAutoclass(), - } -} - -func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { - if b == nil { - return &storagepb.Bucket{} - } - - // Copy label map. - var labels map[string]string - if len(b.Labels) > 0 { - labels = make(map[string]string, len(b.Labels)) - for k, v := range b.Labels { - labels[k] = v - } - } - - // Ignore VersioningEnabled if it is false. This is OK because - // we only call this method when creating a bucket, and by default - // new buckets have versioning off. - var v *storagepb.Bucket_Versioning - if b.VersioningEnabled { - v = &storagepb.Bucket_Versioning{Enabled: true} - } - var bb *storagepb.Bucket_Billing - if b.RequesterPays { - bb = &storagepb.Bucket_Billing{RequesterPays: true} - } - var bktIAM *storagepb.Bucket_IamConfig - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM = &storagepb.Bucket_IamConfig{} - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: true, - } - } - if b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() - } - } - - return &storagepb.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toProtoBucketACL(b.ACL), - DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toProtoLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), - Cors: toProtoCORS(b.CORS), - Encryption: b.Encryption.toProtoBucketEncryption(), - Logging: b.Logging.toProtoBucketLogging(), - Website: b.Website.toProtoBucketWebsite(), - IamConfig: bktIAM, - Rpo: b.RPO.String(), - CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), - Autoclass: b.Autoclass.toProtoAutoclass(), - } -} - -func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { - if ua == nil { - return &storagepb.Bucket{} - } - - // TODO(cathyo): Handle labels. Pending b/230510191. - - var v *storagepb.Bucket_Versioning - if ua.VersioningEnabled != nil { - v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} - } - var bb *storagepb.Bucket_Billing - if ua.RequesterPays != nil { - bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} - } - - var bktIAM *storagepb.Bucket_IamConfig - if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM = &storagepb.Bucket_IamConfig{} - - if ua.BucketPolicyOnly != nil { - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled), - } - } - - if ua.UniformBucketLevelAccess != nil { - // UniformBucketLevelAccess takes precedence over BucketPolicyOnly, - // so Enabled will be overriden here if both are set - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled), - } - } - - if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() - } - } - - var defaultHold bool - if ua.DefaultEventBasedHold != nil { - defaultHold = optional.ToBool(ua.DefaultEventBasedHold) - } - var lifecycle Lifecycle - if ua.Lifecycle != nil { - lifecycle = *ua.Lifecycle - } - var bktACL []*storagepb.BucketAccessControl - if ua.acl != nil { - bktACL = toProtoBucketACL(ua.acl) - } - if ua.PredefinedACL != "" { - // Clear ACL or the call will fail. - bktACL = nil - } - var bktDefaultObjectACL []*storagepb.ObjectAccessControl - if ua.defaultObjectACL != nil { - bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL) - } - if ua.PredefinedDefaultObjectACL != "" { - // Clear ACLs or the call will fail. - bktDefaultObjectACL = nil - } - - return &storagepb.Bucket{ - StorageClass: ua.StorageClass, - Acl: bktACL, - DefaultObjectAcl: bktDefaultObjectACL, - DefaultEventBasedHold: defaultHold, - Versioning: v, - Billing: bb, - Lifecycle: toProtoLifecycle(lifecycle), - RetentionPolicy: ua.RetentionPolicy.toProtoRetentionPolicy(), - Cors: toProtoCORS(ua.CORS), - Encryption: ua.Encryption.toProtoBucketEncryption(), - Logging: ua.Logging.toProtoBucketLogging(), - Website: ua.Website.toProtoBucketWebsite(), - IamConfig: bktIAM, - Rpo: ua.RPO.String(), - Autoclass: ua.Autoclass.toProtoAutoclass(), - } -} - -// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. -type CORS struct { - // MaxAge is the value to return in the Access-Control-Max-Age - // header used in preflight responses. - MaxAge time.Duration - - // Methods is the list of HTTP methods on which to include CORS response - // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list - // of methods, and means "any method". - Methods []string - - // Origins is the list of Origins eligible to receive CORS response - // headers. Note: "*" is permitted in the list of origins, and means - // "any Origin". - Origins []string - - // ResponseHeaders is the list of HTTP headers other than the simple - // response headers to give permission for the user-agent to share - // across domains. - ResponseHeaders []string -} - -// BucketEncryption is a bucket's encryption configuration. -type BucketEncryption struct { - // A Cloud KMS key name, in the form - // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt - // objects inserted into this bucket, if no encryption method is specified. - // The key's location must be the same as the bucket's. - DefaultKMSKeyName string -} - -// BucketAttrsToUpdate define the attributes to update during an Update call. -type BucketAttrsToUpdate struct { - // If set, updates whether the bucket uses versioning. - VersioningEnabled optional.Bool - - // If set, updates whether the bucket is a Requester Pays bucket. - RequesterPays optional.Bool - - // DefaultEventBasedHold is the default value for event-based hold on - // newly created objects in this bucket. - DefaultEventBasedHold optional.Bool - - // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of - // UniformBucketLevelAccess is recommended above the use of this field. - // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to - // true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and - // UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess - // will take precedence. - BucketPolicyOnly *BucketPolicyOnly - - // UniformBucketLevelAccess configures access checks to use only bucket-level IAM - // policies and ignore any ACL rules for the bucket. - // See https://cloud.google.com/storage/docs/uniform-bucket-level-access - // for more information. - UniformBucketLevelAccess *UniformBucketLevelAccess - - // PublicAccessPrevention is the setting for the bucket's - // PublicAccessPrevention policy, which can be used to prevent public access - // of data in the bucket. See - // https://cloud.google.com/storage/docs/public-access-prevention for more - // information. - PublicAccessPrevention PublicAccessPrevention - - // StorageClass is the default storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD", "NEARLINE", - // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". - // See https://cloud.google.com/storage/docs/storage-classes for all - // valid values. - StorageClass string - - // If set, updates the retention policy of the bucket. Using - // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. - // - // This feature is in private alpha release. It is not currently available to - // most customers. It might be changed in backwards-incompatible ways and is not - // subject to any SLA or deprecation policy. - RetentionPolicy *RetentionPolicy - - // If set, replaces the CORS configuration with a new configuration. - // An empty (rather than nil) slice causes all CORS policies to be removed. - CORS []CORS - - // If set, replaces the encryption configuration of the bucket. Using - // BucketEncryption.DefaultKMSKeyName = "" will delete the existing - // configuration. - Encryption *BucketEncryption - - // If set, replaces the lifecycle configuration of the bucket. - Lifecycle *Lifecycle - - // If set, replaces the logging configuration of the bucket. - Logging *BucketLogging - - // If set, replaces the website configuration of the bucket. - Website *BucketWebsite - - // If not empty, applies a predefined set of access controls. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. - PredefinedACL string - - // If not empty, applies a predefined set of default object access controls. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. - PredefinedDefaultObjectACL string - - // RPO configures the Recovery Point Objective (RPO) policy of the bucket. - // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. - // See https://cloud.google.com/storage/docs/managing-turbo-replication for - // more information. - RPO RPO - - // If set, updates the autoclass configuration of the bucket. - // See https://cloud.google.com/storage/docs/using-autoclass for more information. - Autoclass *Autoclass - - // acl is the list of access control rules on the bucket. - // It is unexported and only used internally by the gRPC client. - // Library users should use ACLHandle methods directly. - acl []ACLRule - - // defaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - // It is unexported and only used internally by the gRPC client. - // Library users should use ACLHandle methods directly. - defaultObjectACL []ACLRule - - setLabels map[string]string - deleteLabels map[string]bool -} - -// SetLabel causes a label to be added or modified when ua is used -// in a call to Bucket.Update. -func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { - if ua.setLabels == nil { - ua.setLabels = map[string]string{} - } - ua.setLabels[name] = value -} - -// DeleteLabel causes a label to be deleted when ua is used in a -// call to Bucket.Update. -func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { - if ua.deleteLabels == nil { - ua.deleteLabels = map[string]bool{} - } - ua.deleteLabels[name] = true -} - -func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { - rb := &raw.Bucket{} - if ua.CORS != nil { - rb.Cors = toRawCORS(ua.CORS) - rb.ForceSendFields = append(rb.ForceSendFields, "Cors") - } - if ua.DefaultEventBasedHold != nil { - rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold) - rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold") - } - if ua.RetentionPolicy != nil { - if ua.RetentionPolicy.RetentionPeriod == 0 { - rb.NullFields = append(rb.NullFields, "RetentionPolicy") - rb.RetentionPolicy = nil - } else { - rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() - } - } - if ua.VersioningEnabled != nil { - rb.Versioning = &raw.BucketVersioning{ - Enabled: optional.ToBool(ua.VersioningEnabled), - ForceSendFields: []string{"Enabled"}, - } - } - if ua.RequesterPays != nil { - rb.Billing = &raw.BucketBilling{ - RequesterPays: optional.ToBool(ua.RequesterPays), - ForceSendFields: []string{"RequesterPays"}, - } - } - if ua.BucketPolicyOnly != nil { - rb.IamConfiguration = &raw.BucketIamConfiguration{ - UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: ua.BucketPolicyOnly.Enabled, - ForceSendFields: []string{"Enabled"}, - }, - } - } - if ua.UniformBucketLevelAccess != nil { - rb.IamConfiguration = &raw.BucketIamConfiguration{ - UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: ua.UniformBucketLevelAccess.Enabled, - ForceSendFields: []string{"Enabled"}, - }, - } - } - if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - if rb.IamConfiguration == nil { - rb.IamConfiguration = &raw.BucketIamConfiguration{} - } - rb.IamConfiguration.PublicAccessPrevention = ua.PublicAccessPrevention.String() - } - if ua.Encryption != nil { - if ua.Encryption.DefaultKMSKeyName == "" { - rb.NullFields = append(rb.NullFields, "Encryption") - rb.Encryption = nil - } else { - rb.Encryption = ua.Encryption.toRawBucketEncryption() - } - } - if ua.Lifecycle != nil { - rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) - rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle") - } - if ua.Logging != nil { - if *ua.Logging == (BucketLogging{}) { - rb.NullFields = append(rb.NullFields, "Logging") - rb.Logging = nil - } else { - rb.Logging = ua.Logging.toRawBucketLogging() - } - } - if ua.Website != nil { - if *ua.Website == (BucketWebsite{}) { - rb.NullFields = append(rb.NullFields, "Website") - rb.Website = nil - } else { - rb.Website = ua.Website.toRawBucketWebsite() - } - } - if ua.Autoclass != nil { - rb.Autoclass = &raw.BucketAutoclass{ - Enabled: ua.Autoclass.Enabled, - ForceSendFields: []string{"Enabled"}, - } - } - if ua.PredefinedACL != "" { - // Clear ACL or the call will fail. - rb.Acl = nil - rb.ForceSendFields = append(rb.ForceSendFields, "Acl") - } - if ua.PredefinedDefaultObjectACL != "" { - // Clear ACLs or the call will fail. - rb.DefaultObjectAcl = nil - rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") - } - - rb.StorageClass = ua.StorageClass - rb.Rpo = ua.RPO.String() - - if ua.setLabels != nil || ua.deleteLabels != nil { - rb.Labels = map[string]string{} - for k, v := range ua.setLabels { - rb.Labels[k] = v - } - if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { - rb.ForceSendFields = append(rb.ForceSendFields, "Labels") - } - for l := range ua.deleteLabels { - rb.NullFields = append(rb.NullFields, "Labels."+l) - } - } - return rb -} - -// If returns a new BucketHandle that applies a set of preconditions. -// Preconditions already set on the BucketHandle are ignored. -// Operations on the new handle will return an error if the preconditions are not -// satisfied. The only valid preconditions for buckets are MetagenerationMatch -// and MetagenerationNotMatch. -func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { - b2 := *b - b2.conds = &conds - return &b2 -} - -// BucketConditions constrain bucket methods to act on specific metagenerations. -// -// The zero value is an empty set of constraints. -type BucketConditions struct { - // MetagenerationMatch specifies that the bucket must have the given - // metageneration for the operation to occur. - // If MetagenerationMatch is zero, it has no effect. - MetagenerationMatch int64 - - // MetagenerationNotMatch specifies that the bucket must not have the given - // metageneration for the operation to occur. - // If MetagenerationNotMatch is zero, it has no effect. - MetagenerationNotMatch int64 -} - -func (c *BucketConditions) validate(method string) error { - if *c == (BucketConditions{}) { - return fmt.Errorf("storage: %s: empty conditions", method) - } - if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { - return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) - } - return nil -} - -// UserProject returns a new BucketHandle that passes the project ID as the user -// project for all subsequent calls. Calls with a user project will be billed to that -// project rather than to the bucket's owning project. -// -// A user project is required for all operations on Requester Pays buckets. -func (b *BucketHandle) UserProject(projectID string) *BucketHandle { - b2 := *b - b2.userProject = projectID - b2.acl.userProject = projectID - b2.defaultObjectACL.userProject = projectID - return &b2 -} - -// LockRetentionPolicy locks a bucket's retention policy until a previously-configured -// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less -// than a day, the retention policy is treated as a development configuration and locking -// will have no effect. The BucketHandle must have a metageneration condition that -// matches the bucket's metageneration. See BucketHandle.If. -// -// This feature is in private alpha release. It is not currently available to -// most customers. It might be changed in backwards-incompatible ways and is not -// subject to any SLA or deprecation policy. -func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) -} - -// applyBucketConds modifies the provided call using the conditions in conds. -// call is something that quacks like a *raw.WhateverCall. -func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - cval := reflect.ValueOf(call) - switch { - case conds.MetagenerationMatch != 0: - if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -// applyBucketConds modifies the provided request message using the conditions -// in conds. msg is a protobuf Message that has fields if_metageneration_match -// and if_metageneration_not_match. -func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error { - rmsg := msg.ProtoReflect() - - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - - switch { - case conds.MetagenerationMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { - if rp == nil { - return nil - } - return &raw.BucketRetentionPolicy{ - RetentionPeriod: int64(rp.RetentionPeriod / time.Second), - } -} - -func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy { - if rp == nil { - return nil - } - // RetentionPeriod must be greater than 0, so if it is 0, the user left it - // unset, and so we should not send it in the request i.e. nil is sent. - var dur *durationpb.Duration - if rp.RetentionPeriod != 0 { - dur = durationpb.New(rp.RetentionPeriod) - } - return &storagepb.Bucket_RetentionPolicy{ - RetentionDuration: dur, - } -} - -func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { - if rp == nil || rp.EffectiveTime == "" { - return nil, nil - } - t, err := time.Parse(time.RFC3339, rp.EffectiveTime) - if err != nil { - return nil, err - } - return &RetentionPolicy{ - RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, - EffectiveTime: t, - IsLocked: rp.IsLocked, - }, nil -} - -func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy { - if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 { - return nil - } - return &RetentionPolicy{ - RetentionPeriod: rp.GetRetentionDuration().AsDuration(), - EffectiveTime: rp.GetEffectiveTime().AsTime(), - IsLocked: rp.GetIsLocked(), - } -} - -func toRawCORS(c []CORS) []*raw.BucketCors { - var out []*raw.BucketCors - for _, v := range c { - out = append(out, &raw.BucketCors{ - MaxAgeSeconds: int64(v.MaxAge / time.Second), - Method: v.Methods, - Origin: v.Origins, - ResponseHeader: v.ResponseHeaders, - }) - } - return out -} - -func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors { - var out []*storagepb.Bucket_Cors - for _, v := range c { - out = append(out, &storagepb.Bucket_Cors{ - MaxAgeSeconds: int32(v.MaxAge / time.Second), - Method: v.Methods, - Origin: v.Origins, - ResponseHeader: v.ResponseHeaders, - }) - } - return out -} - -func toCORS(rc []*raw.BucketCors) []CORS { - var out []CORS - for _, v := range rc { - out = append(out, CORS{ - MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, - Methods: v.Method, - Origins: v.Origin, - ResponseHeaders: v.ResponseHeader, - }) - } - return out -} - -func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS { - var out []CORS - for _, v := range rc { - out = append(out, CORS{ - MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second, - Methods: v.GetMethod(), - Origins: v.GetOrigin(), - ResponseHeaders: v.GetResponseHeader(), - }) - } - return out -} - -func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { - var rl raw.BucketLifecycle - if len(l.Rules) == 0 { - rl.ForceSendFields = []string{"Rule"} - } - for _, r := range l.Rules { - rr := &raw.BucketLifecycleRule{ - Action: &raw.BucketLifecycleRuleAction{ - Type: r.Action.Type, - StorageClass: r.Action.StorageClass, - }, - Condition: &raw.BucketLifecycleRuleCondition{ - DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, - DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, - MatchesPrefix: r.Condition.MatchesPrefix, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - MatchesSuffix: r.Condition.MatchesSuffix, - NumNewerVersions: r.Condition.NumNewerVersions, - }, - } - - // AllObjects takes precedent when both AllObjects and AgeInDays are set - // Rationale: If you've opted into using AllObjects, it makes sense that you - // understand the implications of how this option works with AgeInDays. - if r.Condition.AllObjects { - rr.Condition.Age = googleapi.Int64(0) - rr.Condition.ForceSendFields = []string{"Age"} - } else if r.Condition.AgeInDays > 0 { - rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays) - } - - switch r.Condition.Liveness { - case LiveAndArchived: - rr.Condition.IsLive = nil - case Live: - rr.Condition.IsLive = googleapi.Bool(true) - case Archived: - rr.Condition.IsLive = googleapi.Bool(false) - } - - if !r.Condition.CreatedBefore.IsZero() { - rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) - } - if !r.Condition.CustomTimeBefore.IsZero() { - rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) - } - if !r.Condition.NoncurrentTimeBefore.IsZero() { - rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) - } - rl.Rule = append(rl.Rule, rr) - } - return &rl -} - -func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { - var rl storagepb.Bucket_Lifecycle - - for _, r := range l.Rules { - rr := &storagepb.Bucket_Lifecycle_Rule{ - Action: &storagepb.Bucket_Lifecycle_Rule_Action{ - Type: r.Action.Type, - StorageClass: r.Action.StorageClass, - }, - Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{ - // Note: The Apiary types use int64 (even though the Discovery - // doc states "format: int32"), so the client types used int64, - // but the proto uses int32 so we have a potentially lossy - // conversion. - AgeDays: proto.Int32(int32(r.Condition.AgeInDays)), - DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), - DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), - MatchesPrefix: r.Condition.MatchesPrefix, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - MatchesSuffix: r.Condition.MatchesSuffix, - NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)), - }, - } - - // TODO(#6205): This may not be needed for gRPC - if r.Condition.AllObjects { - rr.Condition.AgeDays = proto.Int32(0) - } - - switch r.Condition.Liveness { - case LiveAndArchived: - rr.Condition.IsLive = nil - case Live: - rr.Condition.IsLive = proto.Bool(true) - case Archived: - rr.Condition.IsLive = proto.Bool(false) - } - - if !r.Condition.CreatedBefore.IsZero() { - rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore) - } - if !r.Condition.CustomTimeBefore.IsZero() { - rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore) - } - if !r.Condition.NoncurrentTimeBefore.IsZero() { - rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore) - } - rl.Rule = append(rl.Rule, rr) - } - return &rl -} - -func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { - var l Lifecycle - if rl == nil { - return l - } - for _, rr := range rl.Rule { - r := LifecycleRule{ - Action: LifecycleAction{ - Type: rr.Action.Type, - StorageClass: rr.Action.StorageClass, - }, - Condition: LifecycleCondition{ - DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, - DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, - MatchesPrefix: rr.Condition.MatchesPrefix, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - MatchesSuffix: rr.Condition.MatchesSuffix, - NumNewerVersions: rr.Condition.NumNewerVersions, - }, - } - if rr.Condition.Age != nil { - r.Condition.AgeInDays = *rr.Condition.Age - if *rr.Condition.Age == 0 { - r.Condition.AllObjects = true - } - } - - if rr.Condition.IsLive == nil { - r.Condition.Liveness = LiveAndArchived - } else if *rr.Condition.IsLive { - r.Condition.Liveness = Live - } else { - r.Condition.Liveness = Archived - } - - if rr.Condition.CreatedBefore != "" { - r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) - } - if rr.Condition.CustomTimeBefore != "" { - r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) - } - if rr.Condition.NoncurrentTimeBefore != "" { - r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) - } - l.Rules = append(l.Rules, r) - } - return l -} - -func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { - var l Lifecycle - if rl == nil { - return l - } - for _, rr := range rl.GetRule() { - r := LifecycleRule{ - Action: LifecycleAction{ - Type: rr.GetAction().GetType(), - StorageClass: rr.GetAction().GetStorageClass(), - }, - Condition: LifecycleCondition{ - AgeInDays: int64(rr.GetCondition().GetAgeDays()), - DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()), - DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()), - MatchesPrefix: rr.GetCondition().GetMatchesPrefix(), - MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(), - MatchesSuffix: rr.GetCondition().GetMatchesSuffix(), - NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()), - }, - } - - // TODO(#6205): This may not be needed for gRPC - if rr.GetCondition().GetAgeDays() == 0 { - r.Condition.AllObjects = true - } - - if rr.GetCondition().IsLive == nil { - r.Condition.Liveness = LiveAndArchived - } else if rr.GetCondition().GetIsLive() { - r.Condition.Liveness = Live - } else { - r.Condition.Liveness = Archived - } - - if rr.GetCondition().GetCreatedBefore() != nil { - r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) - } - if rr.GetCondition().GetCustomTimeBefore() != nil { - r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) - } - if rr.GetCondition().GetNoncurrentTimeBefore() != nil { - r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) - } - l.Rules = append(l.Rules, r) - } - return l -} - -func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { - if e == nil { - return nil - } - return &raw.BucketEncryption{ - DefaultKmsKeyName: e.DefaultKMSKeyName, - } -} - -func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption { - if e == nil { - return nil - } - return &storagepb.Bucket_Encryption{ - DefaultKmsKey: e.DefaultKMSKeyName, - } -} - -func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { - if e == nil { - return nil - } - return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} -} - -func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption { - if e == nil { - return nil - } - return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()} -} - -func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { - if b == nil { - return nil - } - return &raw.BucketLogging{ - LogBucket: b.LogBucket, - LogObjectPrefix: b.LogObjectPrefix, - } -} - -func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging { - if b == nil { - return nil - } - return &storagepb.Bucket_Logging{ - LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket), - LogObjectPrefix: b.LogObjectPrefix, - } -} - -func toBucketLogging(b *raw.BucketLogging) *BucketLogging { - if b == nil { - return nil - } - return &BucketLogging{ - LogBucket: b.LogBucket, - LogObjectPrefix: b.LogObjectPrefix, - } -} - -func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging { - if b == nil { - return nil - } - lb := parseBucketName(b.GetLogBucket()) - return &BucketLogging{ - LogBucket: lb, - LogObjectPrefix: b.GetLogObjectPrefix(), - } -} - -func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { - if w == nil { - return nil - } - return &raw.BucketWebsite{ - MainPageSuffix: w.MainPageSuffix, - NotFoundPage: w.NotFoundPage, - } -} - -func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website { - if w == nil { - return nil - } - return &storagepb.Bucket_Website{ - MainPageSuffix: w.MainPageSuffix, - NotFoundPage: w.NotFoundPage, - } -} - -func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { - if w == nil { - return nil - } - return &BucketWebsite{ - MainPageSuffix: w.MainPageSuffix, - NotFoundPage: w.NotFoundPage, - } -} - -func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite { - if w == nil { - return nil - } - return &BucketWebsite{ - MainPageSuffix: w.GetMainPageSuffix(), - NotFoundPage: w.GetNotFoundPage(), - } -} - -func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { - if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { - return BucketPolicyOnly{} - } - lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime) - if err != nil { - return BucketPolicyOnly{ - Enabled: true, - } - } - return BucketPolicyOnly{ - Enabled: true, - LockedTime: lt, - } -} - -func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly { - if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { - return BucketPolicyOnly{} - } - return BucketPolicyOnly{ - Enabled: true, - LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), - } -} - -func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { - if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { - return UniformBucketLevelAccess{} - } - lt, err := time.Parse(time.RFC3339, b.UniformBucketLevelAccess.LockedTime) - if err != nil { - return UniformBucketLevelAccess{ - Enabled: true, - } - } - return UniformBucketLevelAccess{ - Enabled: true, - LockedTime: lt, - } -} - -func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess { - if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { - return UniformBucketLevelAccess{} - } - return UniformBucketLevelAccess{ - Enabled: true, - LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), - } -} - -func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention { - if b == nil { - return PublicAccessPreventionUnknown - } - switch b.PublicAccessPrevention { - case publicAccessPreventionInherited, publicAccessPreventionUnspecified: - return PublicAccessPreventionInherited - case publicAccessPreventionEnforced: - return PublicAccessPreventionEnforced - default: - return PublicAccessPreventionUnknown - } -} - -func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention { - if b == nil { - return PublicAccessPreventionUnknown - } - switch b.GetPublicAccessPrevention() { - case publicAccessPreventionInherited, publicAccessPreventionUnspecified: - return PublicAccessPreventionInherited - case publicAccessPreventionEnforced: - return PublicAccessPreventionEnforced - default: - return PublicAccessPreventionUnknown - } -} - -func toRPO(b *raw.Bucket) RPO { - if b == nil { - return RPOUnknown - } - switch b.Rpo { - case rpoDefault: - return RPODefault - case rpoAsyncTurbo: - return RPOAsyncTurbo - default: - return RPOUnknown - } -} - -func toRPOFromProto(b *storagepb.Bucket) RPO { - if b == nil { - return RPOUnknown - } - switch b.GetRpo() { - case rpoDefault: - return RPODefault - case rpoAsyncTurbo: - return RPOAsyncTurbo - default: - return RPOUnknown - } -} - -func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig { - if c == nil { - return nil - } - return &CustomPlacementConfig{DataLocations: c.DataLocations} -} - -func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig { - if c == nil { - return nil - } - return &raw.BucketCustomPlacementConfig{ - DataLocations: c.DataLocations, - } -} - -func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig { - if c == nil { - return nil - } - return &storagepb.Bucket_CustomPlacementConfig{ - DataLocations: c.DataLocations, - } -} - -func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig { - if c == nil { - return nil - } - return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} -} - -func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { - if a == nil { - return nil - } - // Excluding read only field ToggleTime. - return &raw.BucketAutoclass{ - Enabled: a.Enabled, - } -} - -func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { - if a == nil { - return nil - } - // Excluding read only field ToggleTime. - return &storagepb.Bucket_Autoclass{ - Enabled: a.Enabled, - } -} - -func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { - if a == nil || a.ToggleTime == "" { - return nil - } - // Return Autoclass.ToggleTime only if parsed with a valid value. - t, err := time.Parse(time.RFC3339, a.ToggleTime) - if err != nil { - return &Autoclass{ - Enabled: a.Enabled, - } - } - return &Autoclass{ - Enabled: a.Enabled, - ToggleTime: t, - } -} - -func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { - if a == nil || a.GetToggleTime().AsTime().Unix() == 0 { - return nil - } - return &Autoclass{ - Enabled: a.GetEnabled(), - ToggleTime: a.GetToggleTime().AsTime(), - } -} - -// Objects returns an iterator over the objects in the bucket that match the -// Query q. If q is nil, no filtering is done. Objects will be iterated over -// lexicographically by name. -// -// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. -func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.ListObjects(ctx, b.name, q, o...) -} - -// Retryer returns a bucket handle that is configured with custom retry -// behavior as specified by the options that are passed to it. All operations -// on the new handle will use the customized retry configuration. -// Retry options set on a object handle will take precedence over options set on -// the bucket handle. -// These retry options will merge with the client's retry configuration (if set) -// for the returned handle. Options passed into this method will take precedence -// over retry options on the client. Note that you must explicitly pass in each -// option you want to override. -func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle { - b2 := *b - var retry *retryConfig - if b.retry != nil { - // merge the options with the existing retry - retry = b.retry - } else { - retry = &retryConfig{} - } - for _, opt := range opts { - opt.apply(retry) - } - b2.retry = retry - b2.acl.retry = retry - b2.defaultObjectACL.retry = retry - return &b2 -} - -// An ObjectIterator is an iterator over ObjectAttrs. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -type ObjectIterator struct { - ctx context.Context - query Query - pageInfo *iterator.PageInfo - nextFunc func() error - items []*ObjectAttrs -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// In addition, if Next returns an error other than iterator.Done, all -// subsequent calls will return the same error. To continue iteration, a new -// `ObjectIterator` must be created. Since objects are ordered lexicographically -// by name, `Query.StartOffset` can be used to create a new iterator which will -// start at the desired place. See -// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. -// -// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will -// have a non-empty Prefix field, and a zero value for all other fields. These -// represent prefixes. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *ObjectIterator) Next() (*ObjectAttrs, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - item := it.items[0] - it.items = it.items[1:] - return item, nil -} - -// Buckets returns an iterator over the buckets in the project. You may -// optionally set the iterator's Prefix field to restrict the list to buckets -// whose names begin with the prefix. By default, all buckets in the project -// are returned. -// -// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. -func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { - o := makeStorageOpts(true, c.retry, "") - return c.tc.ListBuckets(ctx, projectID, o...) -} - -// A BucketIterator is an iterator over BucketAttrs. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -type BucketIterator struct { - // Prefix restricts the iterator to buckets whose names begin with it. - Prefix string - - ctx context.Context - projectID string - buckets []*BucketAttrs - pageInfo *iterator.PageInfo - nextFunc func() error -} - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *BucketIterator) Next() (*BucketAttrs, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - b := it.buckets[0] - it.buckets = it.buckets[1:] - return b, nil -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -// RPO (Recovery Point Objective) configures the turbo replication feature. See -// https://cloud.google.com/storage/docs/managing-turbo-replication for more information. -type RPO int - -const ( - // RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO - // is not present in the bucket metadata, that is, the bucket is not dual-region. - // This value is also used if the RPO field is not set in a call to GCS. - RPOUnknown RPO = iota - - // RPODefault represents default replication. It is used to reset RPO on an - // existing bucket that has this field set to RPOAsyncTurbo. Otherwise it - // is equivalent to RPOUnknown, and is always ignored. This value is valid - // for dual- or multi-region buckets. - RPODefault - - // RPOAsyncTurbo represents turbo replication and is used to enable Turbo - // Replication on a bucket. This value is only valid for dual-region buckets. - RPOAsyncTurbo - - rpoUnknown string = "" - rpoDefault = "DEFAULT" - rpoAsyncTurbo = "ASYNC_TURBO" -) - -func (rpo RPO) String() string { - switch rpo { - case RPODefault: - return rpoDefault - case RPOAsyncTurbo: - return rpoAsyncTurbo - default: - return rpoUnknown - } -} - -// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. -// -// Hours, minutes, seconds, and nanoseconds are set to 0. -func protoDateToUTCTime(d *dpb.Date) time.Time { - return protoDateToTime(d, time.UTC) -} - -// protoDateToTime returns a new Time based on the google.type.Date and provided -// *time.Location. -// -// Hours, minutes, seconds, and nanoseconds are set to 0. -func protoDateToTime(d *dpb.Date, l *time.Location) time.Time { - return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) -} - -// timeToProtoDate returns a new google.type.Date based on the provided time.Time. -// The location is ignored, as is anything more precise than the day. -func timeToProtoDate(t time.Time) *dpb.Date { - return &dpb.Date{ - Year: int32(t.Year()), - Month: int32(t.Month()), - Day: int32(t.Day()), - } -} diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go deleted file mode 100644 index d579a2b1ee..0000000000 --- a/vendor/cloud.google.com/go/storage/client.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "io" - "time" - - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/option" - iampb "google.golang.org/genproto/googleapis/iam/v1" -) - -// TODO(noahdietz): Move existing factory methods to this file. - -// storageClient is an internal-only interface designed to separate the -// transport-specific logic of making Storage API calls from the logic of the -// client library. -// -// Implementation requirements beyond implementing the interface include: -// * factory method(s) must accept a `userProject string` param -// * `settings` must be retained per instance -// * `storageOption`s must be resolved in the order they are received -// * all API errors must be wrapped in the gax-go APIError type -// * any unimplemented interface methods must return a StorageUnimplementedErr -// -// TODO(noahdietz): This interface is currently not used in the production code -// paths -type storageClient interface { - - // Top-level methods. - - GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) - CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) - ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator - Close() error - - // Bucket methods. - - DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error - GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) - UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) - LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error - ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator - - // Object metadata methods. - - DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error - GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) - UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) - - // Default Object ACL methods. - - DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error - ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) - UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error - - // Bucket ACL methods. - - DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error - ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) - UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error - - // Object ACL methods. - - DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error - ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) - UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error - - // Media operations. - - ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) - RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) - - NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) - OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) - - // IAM methods. - - GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) - SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error - TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) - - // HMAC Key methods. - - GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) - ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator - UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) - CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) - DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error - - // Notification methods. - ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) - CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) - DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error -} - -// settings contains transport-agnostic configuration for API calls made via -// the storageClient inteface. All implementations must utilize settings -// and respect those that are applicable. -type settings struct { - // retry is the complete retry configuration to use when evaluating if an - // API call should be retried. - retry *retryConfig - - // gax is a set of gax.CallOption to be conveyed to gax.Invoke. - // Note: Not all storageClient interfaces will must use gax.Invoke. - gax []gax.CallOption - - // idempotent indicates if the call is idempotent or not when considering - // if the call should be retired or not. - idempotent bool - - // clientOption is a set of option.ClientOption to be used during client - // transport initialization. See https://pkg.go.dev/google.golang.org/api/option - // for a list of supported options. - clientOption []option.ClientOption - - // userProject is the user project that should be billed for the request. - userProject string -} - -func initSettings(opts ...storageOption) *settings { - s := &settings{} - resolveOptions(s, opts...) - return s -} - -func resolveOptions(s *settings, opts ...storageOption) { - for _, o := range opts { - o.Apply(s) - } -} - -// callSettings is a helper for resolving storage options against the settings -// in the context of an individual call. This is to ensure that client-level -// default settings are not mutated by two different calls getting options. -// -// Example: s := callSettings(c.settings, opts...) -func callSettings(defaults *settings, opts ...storageOption) *settings { - if defaults == nil { - return nil - } - // This does not make a deep copy of the pointer/slice fields, but all - // options replace the settings fields rather than modify their values in - // place. - cs := *defaults - resolveOptions(&cs, opts...) - return &cs -} - -// makeStorageOpts is a helper for generating a set of storageOption based on -// idempotency, retryConfig, and userProject. All top-level client operations -// will generally have to pass these options through the interface. -func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { - opts := []storageOption{idempotent(isIdempotent)} - if retry != nil { - opts = append(opts, withRetryConfig(retry)) - } - if userProject != "" { - opts = append(opts, withUserProject(userProject)) - } - return opts -} - -// storageOption is the transport-agnostic call option for the storageClient -// interface. -type storageOption interface { - Apply(s *settings) -} - -func withGAXOptions(opts ...gax.CallOption) storageOption { - return &gaxOption{opts} -} - -type gaxOption struct { - opts []gax.CallOption -} - -func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } - -func withRetryConfig(rc *retryConfig) storageOption { - return &retryOption{rc} -} - -type retryOption struct { - rc *retryConfig -} - -func (o *retryOption) Apply(s *settings) { s.retry = o.rc } - -func idempotent(i bool) storageOption { - return &idempotentOption{i} -} - -type idempotentOption struct { - idempotency bool -} - -func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency } - -func withClientOptions(opts ...option.ClientOption) storageOption { - return &clientOption{opts: opts} -} - -type clientOption struct { - opts []option.ClientOption -} - -func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts } - -func withUserProject(project string) storageOption { - return &userProjectOption{project} -} - -type userProjectOption struct { - project string -} - -func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } - -type openWriterParams struct { - // Writer configuration - - // ctx is the context used by the writer routine to make all network calls - // and to manage the writer routine - see `Writer.ctx`. - // Required. - ctx context.Context - // chunkSize - see `Writer.ChunkSize`. - // Optional. - chunkSize int - // chunkRetryDeadline - see `Writer.ChunkRetryDeadline`. - // Optional. - chunkRetryDeadline time.Duration - - // Object/request properties - - // bucket - see `Writer.o.bucket`. - // Required. - bucket string - // attrs - see `Writer.ObjectAttrs`. - // Required. - attrs *ObjectAttrs - // conds - see `Writer.o.conds`. - // Optional. - conds *Conditions - // encryptionKey - see `Writer.o.encryptionKey` - // Optional. - encryptionKey []byte - // sendCRC32C - see `Writer.SendCRC32C`. - // Optional. - sendCRC32C bool - - // Writer callbacks - - // donec - see `Writer.donec`. - // Required. - donec chan struct{} - // setError callback for reporting errors - see `Writer.error`. - // Required. - setError func(error) - // progress callback for reporting upload progress - see `Writer.progress`. - // Required. - progress func(int64) - // setObj callback for reporting the resulting object - see `Writer.obj`. - // Required. - setObj func(*ObjectAttrs) -} - -type newRangeReaderParams struct { - bucket string - conds *Conditions - encryptionKey []byte - gen int64 - length int64 - object string - offset int64 - readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. -} - -type composeObjectRequest struct { - dstBucket string - dstObject destinationObject - srcs []sourceObject - predefinedACL string - sendCRC32C bool -} - -type sourceObject struct { - name string - bucket string - gen int64 - conds *Conditions - encryptionKey []byte -} - -type destinationObject struct { - name string - bucket string - conds *Conditions - attrs *ObjectAttrs // attrs to set on the destination object. - encryptionKey []byte - keyName string -} - -type rewriteObjectRequest struct { - srcObject sourceObject - dstObject destinationObject - predefinedACL string - token string - maxBytesRewrittenPerCall int64 -} - -type rewriteObjectResponse struct { - resource *ObjectAttrs - done bool - written int64 - size int64 - token string -} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go deleted file mode 100644 index a0b9a2683c..0000000000 --- a/vendor/cloud.google.com/go/storage/copy.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - - "cloud.google.com/go/internal/trace" -) - -// CopierFrom creates a Copier that can copy src to dst. -// You can immediately call Run on the returned Copier, or -// you can configure it first. -// -// For Requester Pays buckets, the user project of dst is billed, unless it is empty, -// in which case the user project of src is billed. -func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { - return &Copier{dst: dst, src: src} -} - -// A Copier copies a source object to a destination. -type Copier struct { - // ObjectAttrs are optional attributes to set on the destination object. - // Any attributes must be initialized before any calls on the Copier. Nil - // or zero-valued attributes are ignored. - ObjectAttrs - - // RewriteToken can be set before calling Run to resume a copy - // operation. After Run returns a non-nil error, RewriteToken will - // have been updated to contain the value needed to resume the copy. - RewriteToken string - - // ProgressFunc can be used to monitor the progress of a multi-RPC copy - // operation. If ProgressFunc is not nil and copying requires multiple - // calls to the underlying service (see - // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then - // ProgressFunc will be invoked after each call with the number of bytes of - // content copied so far and the total size in bytes of the source object. - // - // ProgressFunc is intended to make upload progress available to the - // application. For example, the implementation of ProgressFunc may update - // a progress bar in the application's UI, or log the result of - // float64(copiedBytes)/float64(totalBytes). - // - // ProgressFunc should return quickly without blocking. - ProgressFunc func(copiedBytes, totalBytes uint64) - - // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, - // that will be used to encrypt the object. Overrides the object's KMSKeyName, if - // any. - // - // Providing both a DestinationKMSKeyName and a customer-supplied encryption key - // (via ObjectHandle.Key) on the destination object will result in an error when - // Run is called. - DestinationKMSKeyName string - - dst, src *ObjectHandle - - // The maximum number of bytes that will be rewritten per rewrite request. - // Most callers shouldn't need to specify this parameter - it is primarily - // in place to support testing. If specified the value must be an integral - // multiple of 1 MiB (1048576). Also, this only applies to requests where - // the source and destination span locations and/or storage classes. Finally, - // this value must not change across rewrite calls else you'll get an error - // that the `rewriteToken` is invalid. - maxBytesRewrittenPerCall int64 -} - -// Run performs the copy. -func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") - defer func() { trace.EndSpan(ctx, err) }() - - if err := c.src.validate(); err != nil { - return nil, err - } - if err := c.dst.validate(); err != nil { - return nil, err - } - if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { - return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") - } - if c.dst.gen != defaultGen { - return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) - } - // Convert destination attributes to raw form, omitting the bucket. - // If the bucket is included but name or content-type aren't, the service - // returns a 400 with "Required" as the only message. Omitting the bucket - // does not cause any problems. - req := &rewriteObjectRequest{ - srcObject: sourceObject{ - name: c.src.object, - bucket: c.src.bucket, - gen: c.src.gen, - conds: c.src.conds, - encryptionKey: c.src.encryptionKey, - }, - dstObject: destinationObject{ - name: c.dst.object, - bucket: c.dst.bucket, - conds: c.dst.conds, - attrs: &c.ObjectAttrs, - encryptionKey: c.dst.encryptionKey, - keyName: c.DestinationKMSKeyName, - }, - predefinedACL: c.PredefinedACL, - token: c.RewriteToken, - maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, - } - - isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) - var userProject string - if c.dst.userProject != "" { - userProject = c.dst.userProject - } else if c.src.userProject != "" { - userProject = c.src.userProject - } - opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) - - for { - res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) - if err != nil { - return nil, err - } - c.RewriteToken = res.token - req.token = res.token - if c.ProgressFunc != nil { - c.ProgressFunc(uint64(res.written), uint64(res.size)) - } - if res.done { // Finished successfully. - return res.resource, nil - } - } -} - -// ComposerFrom creates a Composer that can compose srcs into dst. -// You can immediately call Run on the returned Composer, or you can -// configure it first. -// -// The encryption key for the destination object will be used to decrypt all -// source objects and encrypt the destination object. It is an error -// to specify an encryption key for any of the source objects. -func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { - return &Composer{dst: dst, srcs: srcs} -} - -// A Composer composes source objects into a destination object. -// -// For Requester Pays buckets, the user project of dst is billed. -type Composer struct { - // ObjectAttrs are optional attributes to set on the destination object. - // Any attributes must be initialized before any calls on the Composer. Nil - // or zero-valued attributes are ignored. - ObjectAttrs - - // SendCRC specifies whether to transmit a CRC32C field. It should be set - // to true in addition to setting the Composer's CRC32C field, because zero - // is a valid CRC and normally a zero would not be transmitted. - // If a CRC32C is sent, and the data in the destination object does not match - // the checksum, the compose will be rejected. - SendCRC32C bool - - dst *ObjectHandle - srcs []*ObjectHandle -} - -// Run performs the compose operation. -func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") - defer func() { trace.EndSpan(ctx, err) }() - - if err := c.dst.validate(); err != nil { - return nil, err - } - if c.dst.gen != defaultGen { - return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) - } - if len(c.srcs) == 0 { - return nil, errors.New("storage: at least one source object must be specified") - } - - for _, src := range c.srcs { - if err := src.validate(); err != nil { - return nil, err - } - if src.bucket != c.dst.bucket { - return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) - } - if src.encryptionKey != nil { - return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) - } - } - - req := &composeObjectRequest{ - dstBucket: c.dst.bucket, - predefinedACL: c.PredefinedACL, - sendCRC32C: c.SendCRC32C, - } - req.dstObject = destinationObject{ - name: c.dst.object, - bucket: c.dst.bucket, - conds: c.dst.conds, - attrs: &c.ObjectAttrs, - encryptionKey: c.dst.encryptionKey, - } - for _, src := range c.srcs { - s := sourceObject{ - name: src.object, - bucket: src.bucket, - gen: src.gen, - conds: src.conds, - } - req.srcs = append(req.srcs, s) - } - - isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) - opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) - return c.dst.c.tc.ComposeObject(ctx, req, opts...) -} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go deleted file mode 100644 index 8bf3098431..0000000000 --- a/vendor/cloud.google.com/go/storage/doc.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package storage provides an easy way to work with Google Cloud Storage. -Google Cloud Storage stores data in named objects, which are grouped into buckets. - -More information about Google Cloud Storage is available at -https://cloud.google.com/storage/docs. - -See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts, -connection pooling and similar aspects of this package. - -# Creating a Client - -To start working with this package, create a [Client]: - - ctx := context.Background() - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } - -The client will use your default application credentials. Clients should be -reused instead of created as needed. The methods of [Client] are safe for -concurrent use by multiple goroutines. - -If you only wish to access public data, you can create -an unauthenticated client with - - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) - -To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST -environment variable to the address at which your emulator is running. This will -send requests to that address instead of to Cloud Storage. You can then create -and use a client as usual: - - // Set STORAGE_EMULATOR_HOST environment variable. - err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") - if err != nil { - // TODO: Handle error. - } - - // Create client as usual. - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } - - // This request is now directed to http://localhost:9000/storage/v1/b - // instead of https://storage.googleapis.com/storage/v1/b - if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } - -Please note that there is no official emulator for Cloud Storage. - -# Buckets - -A Google Cloud Storage bucket is a collection of objects. To work with a -bucket, make a bucket handle: - - bkt := client.Bucket(bucketName) - -A handle is a reference to a bucket. You can have a handle even if the -bucket doesn't exist yet. To create a bucket in Google Cloud Storage, -call [BucketHandle.Create]: - - if err := bkt.Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } - -Note that although buckets are associated with projects, bucket names are -global across all projects. - -Each bucket has associated metadata, represented in this package by -[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set -the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use -[BucketHandle.Attrs]: - - attrs, err := bkt.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", - attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) - -# Objects - -An object holds arbitrary data as a sequence of bytes, like a file. You -refer to objects using a handle, just as with buckets, but unlike buckets -you don't explicitly create an object. Instead, the first time you write -to an object it will be created. You can use the standard Go [io.Reader] -and [io.Writer] interfaces to read and write object data: - - obj := bkt.Object("data") - // Write something to obj. - // w implements io.Writer. - w := obj.NewWriter(ctx) - // Write some text to obj. This will either create the object or overwrite whatever is there already. - if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { - // TODO: Handle error. - } - // Close, just like writing a file. - if err := w.Close(); err != nil { - // TODO: Handle error. - } - - // Read it back. - r, err := obj.NewReader(ctx) - if err != nil { - // TODO: Handle error. - } - defer r.Close() - if _, err := io.Copy(os.Stdout, r); err != nil { - // TODO: Handle error. - } - // Prints "This object contains text." - -Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: - - objAttrs, err := obj.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("object %s has size %d and can be read using %s\n", - objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) - -# Listing objects - -Listing objects in a bucket is done with the [BucketHandle.Objects] method: - - query := &storage.Query{Prefix: ""} - - var names []string - it := bkt.Objects(ctx, query) - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - log.Fatal(err) - } - names = append(names, attrs.Name) - } - -Objects are listed lexicographically by name. To filter objects -lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: - - query := &storage.Query{ - Prefix: "", - StartOffset: "bar/", // Only list objects lexicographically >= "bar/" - EndOffset: "foo/", // Only list objects lexicographically < "foo/" - } - - // ... as before - -If only a subset of object attributes is needed when listing, specifying this -subset using [Query.SetAttrSelection] may speed up the listing process: - - query := &storage.Query{Prefix: ""} - query.SetAttrSelection([]string{"Name"}) - - // ... as before - -# ACLs - -Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of -ACLRules, each of which specifies the role of a user, group or project. ACLs -are suitable for fine-grained control, but you may prefer using IAM to control -access at the project level (see [Cloud Storage IAM docs]. - -To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: - - acls, err := obj.ACL().List(ctx) - if err != nil { - // TODO: Handle error. - } - for _, rule := range acls { - fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) - } - -You can also set and delete ACLs. - -# Conditions - -Every object has a generation and a metageneration. The generation changes -whenever the content changes, and the metageneration changes whenever the -metadata changes. [Conditions] let you check these values before an operation; -the operation only executes if the conditions match. You can use conditions to -prevent race conditions in read-modify-write operations. - -For example, say you've read an object's metadata into objAttrs. Now -you want to write to that object, but only if its contents haven't changed -since you read it. Here is how to express that: - - w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) - // Proceed with writing as above. - -# Signed URLs - -You can obtain a URL that lets anyone read or write an object for a limited time. -Signing a URL requires credentials authorized to sign a URL. To use the same -authentication that was used when instantiating the Storage client, use -[BucketHandle.SignedURL]. - - url, err := client.Bucket(bucketName).SignedURL(objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) - -You can also sign a URL without creating a client. See the documentation of -[SignedURL] for details. - - url, err := storage.SignedURL(bucketName, "shared-object", opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) - -# Post Policy V4 Signed Request - -A type of signed request that allows uploads through HTML forms directly to Cloud Storage with -temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised -by a user. - -For more information, please see the [XML POST Object docs] as well -as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. - - pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) - -# Credential requirements for signing - -If the GoogleAccessID and PrivateKey option fields are not provided, they will -be automatically detected by [BucketHandle.SignedURL] and -[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: - - you are authenticated to the Storage Client with a service account's - downloaded private key, either directly in code or by setting the - GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), - - your application is running on Google Compute Engine (GCE), or - - you are logged into [gcloud using application default credentials] - with [impersonation enabled]. - -Detecting GoogleAccessID may not be possible if you are authenticated using a -token source or using [option.WithHTTPClient]. In this case, you can provide a -service account email for GoogleAccessID and the client will attempt to sign -the URL or Post Policy using that service account. - -To generate the signature, you must have: - - iam.serviceAccounts.signBlob permissions on the GoogleAccessID service - account, and - - the [IAM Service Account Credentials API] enabled (unless authenticating - with a downloaded private key). - -# Errors - -Errors returned by this client are often of the type [googleapi.Error]. -These errors can be introspected for more information by using [errors.As] -with the richer [googleapi.Error] type. For example: - - var e *googleapi.Error - if ok := errors.As(err, &e); ok { - if e.Code == 409 { ... } - } - -# Retrying failed requests - -Methods in this package may retry calls that fail with transient errors. -Retrying continues indefinitely unless the controlling context is canceled, the -client is closed, or a non-transient error is received. To stop retries from -continuing, use context timeouts or cancellation. - -The retry strategy in this library follows best practices for Cloud Storage. By -default, operations are retried only if they are idempotent, and exponential -backoff with jitter is employed. In addition, errors are only retried if they -are defined as transient by the service. See the [Cloud Storage retry docs] -for more information. - -Users can configure non-default retry behavior for a single library call (using -[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a -client (using [Client.SetRetry]). For example: - - o := client.Bucket(bucket).Object(object).Retryer( - // Use WithBackoff to change the timing of the exponential backoff. - storage.WithBackoff(gax.Backoff{ - Initial: 2 * time.Second, - }), - // Use WithPolicy to configure the idempotency policy. RetryAlways will - // retry the operation even if it is non-idempotent. - storage.WithPolicy(storage.RetryAlways), - ) - - // Use a context timeout to set an overall deadline on the call, including all - // potential retries. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - // Delete an object using the specified strategy and timeout. - if err := o.Delete(ctx); err != nil { - // Handle err. - } - -[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam -[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object -[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy -[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth -[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login -[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account -[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview -*/ -package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh deleted file mode 100644 index 7bad7cf391..0000000000 --- a/vendor/cloud.google.com/go/storage/emulator_test.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License.. - -# Fail on any error -set -eo pipefail - -# Display commands being run -set -x - -# Only run on Go 1.17+ -min_minor_ver=17 - -v=`go version | { read _ _ v _; echo ${v#go}; }` -comps=(${v//./ }) -minor_ver=${comps[1]} - -if [ "$minor_ver" -lt "$min_minor_ver" ]; then - echo minor version $minor_ver, skipping - exit 0 -fi - -export STORAGE_EMULATOR_HOST="http://localhost:9000" -export STORAGE_EMULATOR_HOST_GRPC="localhost:8888" - -DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench' -DEFAULT_IMAGE_TAG='latest' -DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG} -CONTAINER_NAME=storage_testbench - -# Note: --net=host makes the container bind directly to the Docker host’s network, -# with no network isolation. If we were to use port-mapping instead, reset connection errors -# would be captured differently and cause unexpected test behaviour. -# The host networking driver works only on Linux hosts. -# See more about using host networking: https://docs.docker.com/network/host/ -DOCKER_NETWORK="--net=host" -# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to -# differences in the network errors emitted by the system. -if [ `go env GOOS` == 'darwin' ]; then - DOCKER_NETWORK="-p 9000:9000 -p 8888:8888" -fi - -# Get the docker image for the testbench -docker pull $DOCKER_IMAGE - -# Start the testbench - -docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE -echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST" -sleep 1 - -# Stop the testbench & cleanup environment variables -function cleanup() { - echo "Cleanup testbench" - docker stop $CONTAINER_NAME - unset STORAGE_EMULATOR_HOST; - unset STORAGE_EMULATOR_HOST_GRPC; -} -trap cleanup EXIT - -# Check that the server is running - retry several times to allow for start-up time -response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null) - -if [[ $response != 200 ]] -then - echo "Testbench server did not start correctly" - exit 1 -fi - -# Start the gRPC server on port 8888. -echo "Starting the gRPC server on port 8888" -response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888") - -if [[ $response != 200 ]] -then - echo "Testbench gRPC server did not start correctly" - exit 1 -fi - -# Run tests -go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go deleted file mode 100644 index 1dfb6f8302..0000000000 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ /dev/null @@ -1,1743 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "encoding/base64" - "fmt" - "io" - "net/url" - "os" - - "cloud.google.com/go/internal/trace" - gapic "cloud.google.com/go/storage/internal/apiv2" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - iampb "google.golang.org/genproto/googleapis/iam/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // defaultConnPoolSize is the default number of connections - // to initialize in the GAPIC gRPC connection pool. A larger - // connection pool may be necessary for jobs that require - // high throughput and/or leverage many concurrent streams. - // - // This is only used for the gRPC client. - defaultConnPoolSize = 4 - - // maxPerMessageWriteSize is the maximum amount of content that can be sent - // per WriteObjectRequest message. A buffer reaching this amount will - // precipitate a flush of the buffer. It is only used by the gRPC Writer - // implementation. - maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) - - // globalProjectAlias is the project ID alias used for global buckets. - // - // This is only used for the gRPC API. - globalProjectAlias = "_" - - // msgEntityNotSupported indicates ACL entites using project ID are not currently supported. - // - // This is only used for the gRPC API. - msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead" -) - -// defaultGRPCOptions returns a set of the default client options -// for gRPC client initialization. -func defaultGRPCOptions() []option.ClientOption { - defaults := []option.ClientOption{ - option.WithGRPCConnectionPool(defaultConnPoolSize), - } - - // Set emulator options for gRPC if an emulator was specified. Note that in a - // hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and - // STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a - // local emulator, HTTP and gRPC must use different ports, so this is - // necessary). - // - // TODO: When the newHybridClient is not longer used, remove - // STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the - // HTTP and gRPC based clients. - if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" { - // Strip the scheme from the emulator host. WithEndpoint does not take a - // scheme for gRPC. - host = stripScheme(host) - - defaults = append(defaults, - option.WithEndpoint(host), - option.WithGRPCDialOption(grpc.WithInsecure()), - option.WithoutAuthentication(), - ) - } else { - // Only enable DirectPath when the emulator is not being targeted. - defaults = append(defaults, internaloption.EnableDirectPath(true)) - } - - return defaults -} - -// grpcStorageClient is the gRPC API implementation of the transport-agnostic -// storageClient interface. -type grpcStorageClient struct { - raw *gapic.Client - settings *settings -} - -// newGRPCStorageClient initializes a new storageClient that uses the gRPC -// Storage API. -func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { - s := initSettings(opts...) - s.clientOption = append(defaultGRPCOptions(), s.clientOption...) - - g, err := gapic.NewClient(ctx, s.clientOption...) - if err != nil { - return nil, err - } - - return &grpcStorageClient{ - raw: g, - settings: s, - }, nil -} - -func (c *grpcStorageClient) Close() error { - return c.raw.Close() -} - -// Top-level methods. - -func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetServiceAccountRequest{ - Project: toProjectResource(project), - } - var resp *storagepb.ServiceAccount - err := run(ctx, func() error { - var err error - resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return "", err - } - return resp.EmailAddress, err -} - -func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - b := attrs.toProtoBucket() - b.Name = bucket - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if b.GetLocation() == "" && b.GetLifecycle() != nil { - b.Location = "US" - } - - req := &storagepb.CreateBucketRequest{ - Parent: toProjectResource(project), - Bucket: b, - BucketId: b.GetName(), - } - if attrs != nil { - req.PredefinedAcl = attrs.PredefinedACL - req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL - } - - var battrs *BucketAttrs - err := run(ctx, func() error { - res, err := c.raw.CreateBucket(ctx, req, s.gax...) - - battrs = newBucketFromProto(res) - - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - - return battrs, err -} - -func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { - s := callSettings(c.settings, opts...) - it := &BucketIterator{ - ctx: ctx, - projectID: project, - } - - var gitr *gapic.BucketIterator - fetch := func(pageSize int, pageToken string) (token string, err error) { - // Initialize GAPIC-based iterator when pageToken is empty, which - // indicates that this fetch call is attempting to get the first page. - // - // Note: Initializing the GAPIC-based iterator lazily is necessary to - // capture the BucketIterator.Prefix set by the user *after* the - // BucketIterator is returned to them from the veneer. - if pageToken == "" { - req := &storagepb.ListBucketsRequest{ - Parent: toProjectResource(it.projectID), - Prefix: it.Prefix, - } - gitr = c.raw.ListBuckets(it.ctx, req, s.gax...) - } - - var buckets []*storagepb.Bucket - var next string - err = run(it.ctx, func() error { - buckets, next, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return "", err - } - - for _, bkt := range buckets { - b := newBucketFromProto(bkt) - it.buckets = append(it.buckets, b) - } - - return next, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it -} - -// Bucket methods. - -func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteBucketRequest{ - Name: bucketResourceName(globalProjectAlias, bucket), - } - if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil { - return err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - return run(ctx, func() error { - return c.raw.DeleteBucket(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) -} - -func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetBucketRequest{ - Name: bucketResourceName(globalProjectAlias, bucket), - ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, - } - if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - var battrs *BucketAttrs - err := run(ctx, func() error { - res, err := c.raw.GetBucket(ctx, req, s.gax...) - - battrs = newBucketFromProto(res) - - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return nil, ErrBucketNotExist - } - - return battrs, err -} -func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - b := uattrs.toProtoBucket() - b.Name = bucketResourceName(globalProjectAlias, bucket) - req := &storagepb.UpdateBucketRequest{ - Bucket: b, - PredefinedAcl: uattrs.PredefinedACL, - PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL, - } - if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } - if uattrs.CORS != nil { - fieldMask.Paths = append(fieldMask.Paths, "cors") - } - if uattrs.DefaultEventBasedHold != nil { - fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold") - } - if uattrs.RetentionPolicy != nil { - fieldMask.Paths = append(fieldMask.Paths, "retention_policy") - } - if uattrs.VersioningEnabled != nil { - fieldMask.Paths = append(fieldMask.Paths, "versioning") - } - if uattrs.RequesterPays != nil { - fieldMask.Paths = append(fieldMask.Paths, "billing") - } - if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown { - fieldMask.Paths = append(fieldMask.Paths, "iam_config") - } - if uattrs.Encryption != nil { - fieldMask.Paths = append(fieldMask.Paths, "encryption") - } - if uattrs.Lifecycle != nil { - fieldMask.Paths = append(fieldMask.Paths, "lifecycle") - } - if uattrs.Logging != nil { - fieldMask.Paths = append(fieldMask.Paths, "logging") - } - if uattrs.Website != nil { - fieldMask.Paths = append(fieldMask.Paths, "website") - } - if uattrs.PredefinedACL != "" { - // In cases where PredefinedACL is set, Acl is cleared. - fieldMask.Paths = append(fieldMask.Paths, "acl") - } - if uattrs.PredefinedDefaultObjectACL != "" { - // In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared. - fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - if uattrs.acl != nil { - // In cases where acl is set by UpdateBucketACL method. - fieldMask.Paths = append(fieldMask.Paths, "acl") - } - if uattrs.defaultObjectACL != nil { - // In cases where defaultObjectACL is set by UpdateBucketACL method. - fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") - } - if uattrs.StorageClass != "" { - fieldMask.Paths = append(fieldMask.Paths, "storage_class") - } - if uattrs.RPO != RPOUnknown { - fieldMask.Paths = append(fieldMask.Paths, "rpo") - } - if uattrs.Autoclass != nil { - fieldMask.Paths = append(fieldMask.Paths, "autoclass") - } - // TODO(cathyo): Handle labels. Pending b/230510191. - req.UpdateMask = fieldMask - - var battrs *BucketAttrs - err := run(ctx, func() error { - res, err := c.raw.UpdateBucket(ctx, req, s.gax...) - battrs = newBucketFromProto(res) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - - return battrs, err -} -func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.LockBucketRetentionPolicyRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - } - if err := applyBucketCondsProto("grpcStorageClient.LockBucketRetentionPolicy", conds, req); err != nil { - return err - } - - return run(ctx, func() error { - _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - -} -func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { - s := callSettings(c.settings, opts...) - it := &ObjectIterator{ - ctx: ctx, - } - if q != nil { - it.query = *q - } - req := &storagepb.ListObjectsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - Prefix: it.query.Prefix, - Delimiter: it.query.Delimiter, - Versions: it.query.Versions, - LexicographicStart: it.query.StartOffset, - LexicographicEnd: it.query.EndOffset, - IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, - ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - gitr := c.raw.ListObjects(it.ctx, req, s.gax...) - fetch := func(pageSize int, pageToken string) (token string, err error) { - var objects []*storagepb.Object - err = run(it.ctx, func() error { - objects, token, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { - err = ErrBucketNotExist - } - return "", err - } - - for _, obj := range objects { - b := newObjectFromProto(obj) - it.items = append(it.items, b) - } - - // Response is always non-nil after a successful request. - res := gitr.Response.(*storagepb.ListObjectsResponse) - for _, prefix := range res.GetPrefixes() { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - - return token, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - - return it -} - -// Object metadata methods. - -func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, - } - if err := applyCondsProto("grpcStorageClient.DeleteObject", gen, conds, req); err != nil { - return err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - err := run(ctx, func() error { - return c.raw.DeleteObject(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return ErrObjectNotExist - } - return err -} - -func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, - // ProjectionFull by default. - ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, - } - if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) - } - - var attrs *ObjectAttrs - err := run(ctx, func() error { - res, err := c.raw.GetObject(ctx, req, s.gax...) - attrs = newObjectFromProto(res) - - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return nil, ErrObjectNotExist - } - - return attrs, err -} - -func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) - req := &storagepb.UpdateObjectRequest{ - Object: o, - PredefinedAcl: uattrs.PredefinedACL, - } - if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) - } - - fieldMask := &fieldmaskpb.FieldMask{Paths: nil} - if uattrs.EventBasedHold != nil { - fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") - } - if uattrs.TemporaryHold != nil { - fieldMask.Paths = append(fieldMask.Paths, "temporary_hold") - } - if uattrs.ContentType != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_type") - } - if uattrs.ContentLanguage != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_language") - } - if uattrs.ContentEncoding != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_encoding") - } - if uattrs.ContentDisposition != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_disposition") - } - if uattrs.CacheControl != nil { - fieldMask.Paths = append(fieldMask.Paths, "cache_control") - } - if !uattrs.CustomTime.IsZero() { - fieldMask.Paths = append(fieldMask.Paths, "custom_time") - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { - fieldMask.Paths = append(fieldMask.Paths, "acl") - } - // TODO(cathyo): Handle metadata. Pending b/230510191. - - req.UpdateMask = fieldMask - - var attrs *ObjectAttrs - err := run(ctx, func() error { - res, err := c.raw.UpdateObject(ctx, req, s.gax...) - attrs = newObjectFromProto(res) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { - return nil, ErrObjectNotExist - } - - return attrs, err -} - -// Default Object ACL methods. - -func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Delete the entity and copy other remaining ACL entities. - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - // Return error if entity is not found or a project ID is used. - invalidEntity := true - var acl []ACLRule - for _, a := range attrs.DefaultObjectACL { - if a.Entity != entity { - acl = append(acl, a) - } - if a.Entity == entity { - invalidEntity = false - } - } - if invalidEntity { - return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported) - } - uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return nil, err - } - return attrs.DefaultObjectACL, nil -} - -func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - var acl []ACLRule - aclRule := ACLRule{Entity: entity, Role: role} - acl = append(attrs.DefaultObjectACL, aclRule) - uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -// Bucket ACL methods. - -func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Delete the entity and copy other remaining ACL entities. - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - // Return error if entity is not found or a project ID is used. - invalidEntity := true - var acl []ACLRule - for _, a := range attrs.ACL { - if a.Entity != entity { - acl = append(acl, a) - } - if a.Entity == entity { - invalidEntity = false - } - } - if invalidEntity { - return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) - } - uattrs := &BucketAttrsToUpdate{acl: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return nil, err - } - return attrs.ACL, nil -} - -func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - var acl []ACLRule - aclRule := ACLRule{Entity: entity, Role: role} - acl = append(attrs.ACL, aclRule) - uattrs := &BucketAttrsToUpdate{acl: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -// Object ACL methods. - -func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) - if err != nil { - return err - } - // Delete the entity and copy other remaining ACL entities. - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - // Return error if entity is not found or a project ID is used. - invalidEntity := true - var acl []ACLRule - for _, a := range attrs.ACL { - if a.Entity != entity { - acl = append(acl, a) - } - if a.Entity == entity { - invalidEntity = false - } - } - if invalidEntity { - return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) - } - uattrs := &ObjectAttrsToUpdate{ACL: acl} - // Call UpdateObject with the specified metageneration. - if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { - return err - } - return nil -} - -// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. -// Selecting a specific generation of this object is not currently supported by the client. -func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) - if err != nil { - return nil, err - } - return o.ACL, nil -} - -func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) - if err != nil { - return err - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - var acl []ACLRule - aclRule := ACLRule{Entity: entity, Role: role} - acl = append(attrs.ACL, aclRule) - uattrs := &ObjectAttrsToUpdate{ACL: acl} - // Call UpdateObject with the specified metageneration. - if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { - return err - } - return nil -} - -// Media operations. - -func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) - dstObjPb.Name = req.dstObject.name - if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil { - return nil, err - } - if req.sendCRC32C { - dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C - } - - srcs := []*storagepb.ComposeObjectRequest_SourceObject{} - for _, src := range req.srcs { - srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name} - if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil { - return nil, err - } - srcs = append(srcs, srcObjPb) - } - - rawReq := &storagepb.ComposeObjectRequest{ - Destination: dstObjPb, - SourceObjects: srcs, - } - if req.predefinedACL != "" { - rawReq.DestinationPredefinedAcl = req.predefinedACL - } - if req.dstObject.encryptionKey != nil { - rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) - } - - var obj *storagepb.Object - var err error - if err := run(ctx, func() error { - obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { - return nil, err - } - - return newObjectFromProto(obj), nil -} -func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { - s := callSettings(c.settings, opts...) - obj := req.dstObject.attrs.toProtoObject("") - call := &storagepb.RewriteObjectRequest{ - SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), - SourceObject: req.srcObject.name, - RewriteToken: req.token, - DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), - DestinationName: req.dstObject.name, - Destination: obj, - DestinationKmsKey: req.dstObject.keyName, - DestinationPredefinedAcl: req.predefinedACL, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), - } - - // The userProject, whether source or destination project, is decided by the code calling the interface. - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { - return nil, err - } - if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { - return nil, err - } - - if len(req.dstObject.encryptionKey) > 0 { - call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) - } - if len(req.srcObject.encryptionKey) > 0 { - srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey) - call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm() - call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() - call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() - } - - call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall - - var res *storagepb.RewriteResponse - var err error - - retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } - - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { - return nil, err - } - - r := &rewriteObjectResponse{ - done: res.GetDone(), - written: res.GetTotalBytesRewritten(), - size: res.GetObjectSize(), - token: res.GetRewriteToken(), - resource: newObjectFromProto(res.GetResource()), - } - - return r, nil -} - -func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - // A negative length means "read to the end of the object", but the - // read_limit field it corresponds to uses zero to mean the same thing. Thus - // we coerce the length to 0 to read to the end of the object. - if params.length < 0 { - params.length = 0 - } - - b := bucketResourceName(globalProjectAlias, params.bucket) - req := &storagepb.ReadObjectRequest{ - Bucket: b, - Object: params.object, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), - } - // The default is a negative value, which means latest. - if params.gen >= 0 { - req.Generation = params.gen - } - - // Define a function that initiates a Read with offset and length, assuming - // we have already read seen bytes. - reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { - // If the context has already expired, return immediately without making - // we call. - if err := ctx.Err(); err != nil { - return nil, nil, err - } - - cc, cancel := context.WithCancel(ctx) - - start := params.offset + seen - // Only set a ReadLimit if length is greater than zero, because zero - // means read it all. - if params.length > 0 { - req.ReadLimit = params.length - seen - } - req.ReadOffset = start - - if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { - cancel() - return nil, nil, err - } - - var stream storagepb.Storage_ReadObjectClient - var msg *storagepb.ReadObjectResponse - var err error - - err = run(cc, func() error { - stream, err = c.raw.ReadObject(cc, req, s.gax...) - if err != nil { - return err - } - - msg, err = stream.Recv() - // These types of errors show up on the Recv call, rather than the - // initialization of the stream via ReadObject above. - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return ErrObjectNotExist - } - - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - // Close the stream context we just created to ensure we don't leak - // resources. - cancel() - return nil, nil, err - } - - return &readStreamResponse{stream, msg}, cancel, nil - } - - res, cancel, err := reopen(0) - if err != nil { - return nil, err - } - - // The first message was Recv'd on stream open, use it to populate the - // object metadata. - msg := res.response - obj := msg.GetMetadata() - // This is the size of the entire object, even if only a range was requested. - size := obj.GetSize() - - r = &Reader{ - Attrs: ReaderObjectAttrs{ - Size: size, - ContentType: obj.GetContentType(), - ContentEncoding: obj.GetContentEncoding(), - CacheControl: obj.GetCacheControl(), - LastModified: obj.GetUpdateTime().AsTime(), - Metageneration: obj.GetMetageneration(), - Generation: obj.GetGeneration(), - }, - reader: &gRPCReader{ - stream: res.stream, - reopen: reopen, - cancel: cancel, - size: size, - // Store the content from the first Recv in the - // client buffer for reading later. - leftovers: msg.GetChecksummedData().GetContent(), - settings: s, - }, - } - - cr := msg.GetContentRange() - if cr != nil { - r.Attrs.StartOffset = cr.GetStart() - r.remain = cr.GetEnd() - cr.GetStart() + 1 - } else { - r.remain = size - } - - // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length == 0 { - r.wantCRC = checksums.GetCrc32C() - r.checkCRC = true - } - - return r, nil -} - -func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { - s := callSettings(c.settings, opts...) - - var offset int64 - errorf := params.setError - progress := params.progress - setObj := params.setObj - - pr, pw := io.Pipe() - gw := newGRPCWriter(c, params, pr) - gw.settings = s - if s.userProject != "" { - gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) - } - - // This function reads the data sent to the pipe and sends sets of messages - // on the gRPC client-stream as the buffer is filled. - go func() { - defer close(params.donec) - - // Loop until there is an error or the Object has been finalized. - for { - // Note: This blocks until either the buffer is full or EOF is read. - recvd, doneReading, err := gw.read() - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - - // The chunk buffer is full, but there is no end in sight. This - // means that a resumable upload will need to be used to send - // multiple chunks, until we are done reading data. Start a - // resumable upload if it has not already been started. - // Otherwise, all data will be sent over a single gRPC stream. - if !doneReading && gw.upid == "" { - err = gw.startResumableUpload() - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - } - - o, off, finalized, err := gw.uploadBuffer(recvd, offset, doneReading) - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - // At this point, the current buffer has been uploaded. Capture the - // committed offset here in case the upload was not finalized and - // another chunk is to be uploaded. - offset = off - progress(offset) - - // When we are done reading data and the chunk has been finalized, - // we are done. - if doneReading && finalized { - // Build Object from server's response. - setObj(newObjectFromProto(o)) - return - } - } - }() - - return pw, nil -} - -// IAM methods. - -func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { - // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. - s := callSettings(c.settings, opts...) - req := &iampb.GetIamPolicyRequest{ - Resource: bucketResourceName(globalProjectAlias, resource), - Options: &iampb.GetPolicyOptions{ - RequestedPolicyVersion: version, - }, - } - var rp *iampb.Policy - err := run(ctx, func() error { - var err error - rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - - return rp, err -} - -func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { - // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. - s := callSettings(c.settings, opts...) - - req := &iampb.SetIamPolicyRequest{ - Resource: bucketResourceName(globalProjectAlias, resource), - Policy: policy, - } - - return run(ctx, func() error { - _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) -} - -func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { - // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. - s := callSettings(c.settings, opts...) - req := &iampb.TestIamPermissionsRequest{ - Resource: bucketResourceName(globalProjectAlias, resource), - Permissions: permissions, - } - var res *iampb.TestIamPermissionsResponse - err := run(ctx, func() error { - var err error - res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// HMAC Key methods. - -func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { - var err error - metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil -} - -func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { - s := callSettings(c.settings, opts...) - req := &storagepb.ListHmacKeysRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - ShowDeletedKeys: showDeletedKeys, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - it := &HMACKeysIterator{ - ctx: ctx, - projectID: project, - retry: s.retry, - } - gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) - fetch := func(pageSize int, pageToken string) (token string, err error) { - var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func() error { - hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return "", err - } - for _, hkmd := range hmacKeys { - hk := toHMACKeyFromProto(hkmd) - it.hmacKeys = append(it.hmacKeys, hk) - } - - return token, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it -} - -func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - hk := &storagepb.HmacKeyMetadata{ - AccessId: accessID, - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - State: string(attrs.State), - Etag: attrs.Etag, - } - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } - if attrs.State != "" { - fieldMask.Paths = append(fieldMask.Paths, "state") - } - req := &storagepb.UpdateHmacKeyRequest{ - HmacKey: hk, - UpdateMask: fieldMask, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { - var err error - metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil -} - -func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.CreateHmacKeyRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func() error { - var err error - res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return nil, err - } - key := toHMACKeyFromProto(res.Metadata) - key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) - - return key, nil -} - -func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - return run(ctx, func() error { - return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) -} - -// Notification methods. - -func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - req := &storagepb.ListNotificationsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - } - var notifications []*storagepb.Notification - err = run(ctx, func() error { - gitr := c.raw.ListNotifications(ctx, req, s.gax...) - for { - // PageSize is not set and fallbacks to the API default pageSize of 100. - items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) - if err != nil { - return err - } - notifications = append(notifications, items...) - // If there are no more results, nextPageToken is empty and err is nil. - if nextPageToken == "" { - return err - } - req.PageToken = nextPageToken - } - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return nil, err - } - - return notificationsToMapFromProto(notifications), nil -} - -func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.CreateNotificationRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - Notification: toProtoNotification(n), - } - var pbn *storagepb.Notification - err = run(ctx, func() error { - var err error - pbn, err = c.raw.CreateNotification(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) - if err != nil { - return nil, err - } - return toNotificationFromProto(pbn), err -} - -func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteNotificationRequest{Name: id} - return run(ctx, func() error { - return c.raw.DeleteNotification(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) -} - -// setUserProjectMetadata appends a project ID to the outgoing Context metadata -// via the x-goog-user-project system parameter defined at -// https://cloud.google.com/apis/docs/system-parameters. This is only for -// billing purposes, and is generally optional, except for requester-pays -// buckets. -func setUserProjectMetadata(ctx context.Context, project string) context.Context { - return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project) -} - -type readStreamResponse struct { - stream storagepb.Storage_ReadObjectClient - response *storagepb.ReadObjectResponse -} - -type gRPCReader struct { - seen, size int64 - stream storagepb.Storage_ReadObjectClient - reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) - leftovers []byte - cancel context.CancelFunc - settings *settings -} - -// Read reads bytes into the user's buffer from an open gRPC stream. -func (r *gRPCReader) Read(p []byte) (int, error) { - // No stream to read from, either never initiliazed or Close was called. - // Note: There is a potential concurrency issue if multiple routines are - // using the same reader. One encounters an error and the stream is closed - // and then reopened while the other routine attempts to read from it. - if r.stream == nil { - return 0, fmt.Errorf("reader has been closed") - } - - // The entire object has been read by this reader, return EOF. - if r.size != 0 && r.size == r.seen { - return 0, io.EOF - } - - var n int - // Read leftovers and return what was available to conform to the Reader - // interface: https://pkg.go.dev/io#Reader. - if len(r.leftovers) > 0 { - n = copy(p, r.leftovers) - r.seen += int64(n) - r.leftovers = r.leftovers[n:] - return n, nil - } - - // Attempt to Recv the next message on the stream. - msg, err := r.recv() - if err != nil { - return 0, err - } - - // TODO: Determine if we need to capture incremental CRC32C for this - // chunk. The Object CRC32C checksum is captured when directed to read - // the entire Object. If directed to read a range, we may need to - // calculate the range's checksum for verification if the checksum is - // present in the response here. - // TODO: Figure out if we need to support decompressive transcoding - // https://cloud.google.com/storage/docs/transcoding. - content := msg.GetChecksummedData().GetContent() - n = copy(p[n:], content) - leftover := len(content) - n - if leftover > 0 { - // Wasn't able to copy all of the data in the message, store for - // future Read calls. - r.leftovers = content[n:] - } - r.seen += int64(n) - - return n, nil -} - -// Close cancels the read stream's context in order for it to be closed and -// collected. -func (r *gRPCReader) Close() error { - if r.cancel != nil { - r.cancel() - } - r.stream = nil - return nil -} - -// recv attempts to Recv the next message on the stream. In the event -// that a retryable error is encountered, the stream will be closed, reopened, -// and Recv again. This will attempt to Recv until one of the following is true: -// -// * Recv is successful -// * A non-retryable error is encountered -// * The Reader's context is canceled -// -// The last error received is the one that is returned, which could be from -// an attempt to reopen the stream. -func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { - msg, err := r.stream.Recv() - var shouldRetry = ShouldRetry - if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { - shouldRetry = r.settings.retry.shouldRetry - } - if err != nil && shouldRetry(err) { - // This will "close" the existing stream and immediately attempt to - // reopen the stream, but will backoff if further attempts are necessary. - // Reopening the stream Recvs the first message, so if retrying is - // successful, the next logical chunk will be returned. - msg, err = r.reopenStream() - } - - return msg, err -} - -// reopenStream "closes" the existing stream and attempts to reopen a stream and -// sets the Reader's stream and cancelStream properties in the process. -func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { - // Close existing stream and initialize new stream with updated offset. - r.Close() - - res, cancel, err := r.reopen(r.seen) - if err != nil { - return nil, err - } - r.stream = res.stream - r.cancel = cancel - return res.response, nil -} - -func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { - size := params.chunkSize - if params.chunkSize == 0 { - // TODO: Should we actually use the minimum of 256 KB here when the user - // indicates they want minimal memory usage? We cannot do a zero-copy, - // bufferless upload like HTTP/JSON can. - // TODO: We need to determine if we can avoid starting a - // resumable upload when the user *plans* to send more than bufSize but - // with a bufferless upload. - size = maxPerMessageWriteSize - } - - return &gRPCWriter{ - buf: make([]byte, size), - c: c, - ctx: params.ctx, - reader: r, - bucket: params.bucket, - attrs: params.attrs, - conds: params.conds, - encryptionKey: params.encryptionKey, - sendCRC32C: params.sendCRC32C, - } -} - -// gRPCWriter is a wrapper around the the gRPC client-stream API that manages -// sending chunks of data provided by the user over the stream. -type gRPCWriter struct { - c *grpcStorageClient - buf []byte - reader io.Reader - - ctx context.Context - - bucket string - attrs *ObjectAttrs - conds *Conditions - encryptionKey []byte - settings *settings - - sendCRC32C bool - - // The gRPC client-stream used for sending buffers. - stream storagepb.Storage_WriteObjectClient - - // The Resumable Upload ID started by a gRPC-based Writer. - upid string -} - -// startResumableUpload initializes a Resumable Upload with gRPC and sets the -// upload ID on the Writer. -func (w *gRPCWriter) startResumableUpload() error { - spec, err := w.writeObjectSpec() - if err != nil { - return err - } - req := &storagepb.StartResumableWriteRequest{ - WriteObjectSpec: spec, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), - } - // TODO: Currently the checksums are only sent on the request to initialize - // the upload, but in the future, we must also support sending it - // on the *last* message of the stream. - req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - return run(w.ctx, func() error { - upres, err := w.c.raw.StartResumableWrite(w.ctx, req) - w.upid = upres.GetUploadId() - return err - }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) -} - -// queryProgress is a helper that queries the status of the resumable upload -// associated with the given upload ID. -func (w *gRPCWriter) queryProgress() (int64, error) { - var persistedSize int64 - err := run(w.ctx, func() error { - q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ - UploadId: w.upid, - }) - persistedSize = q.GetPersistedSize() - return err - }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) - - // q.GetCommittedSize() will return 0 if q is nil. - return persistedSize, err -} - -// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if -// uploading a chunk for a resumable uploadBuffer), and will mark the write as -// finished if we are done receiving data from the user. The resulting write -// offset after uploading the buffer is returned, as well as a boolean -// indicating if the Object has been finalized. If it has been finalized, the -// final Object will be returned as well. Finalizing the upload is primarily -// important for Resumable Uploads. A simple or multi-part upload will always -// be finalized once the entire buffer has been written. -func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { - var err error - var finishWrite bool - var sent, limit int = 0, maxPerMessageWriteSize - var shouldRetry = ShouldRetry - if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { - shouldRetry = w.settings.retry.shouldRetry - } - offset := start - toWrite := w.buf[:recvd] - for { - first := sent == 0 - // This indicates that this is the last message and the remaining - // data fits in one message. - belowLimit := recvd-sent <= limit - if belowLimit { - limit = recvd - sent - } - if belowLimit && doneReading { - finishWrite = true - } - - // Prepare chunk section for upload. - data := toWrite[sent : sent+limit] - req := &storagepb.WriteObjectRequest{ - Data: &storagepb.WriteObjectRequest_ChecksummedData{ - ChecksummedData: &storagepb.ChecksummedData{ - Content: data, - }, - }, - WriteOffset: offset, - FinishWrite: finishWrite, - } - - // Open a new stream and set the first_message field on the request. - // The first message on the WriteObject stream must either be the - // Object or the Resumable Upload ID. - if first { - ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket))) - w.stream, err = w.c.raw.WriteObject(ctx) - if err != nil { - return nil, 0, false, err - } - - if w.upid != "" { - req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} - } else { - spec, err := w.writeObjectSpec() - if err != nil { - return nil, 0, false, err - } - req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ - WriteObjectSpec: spec, - } - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) - // For a non-resumable upload, checksums must be sent in this message. - // TODO: Currently the checksums are only sent on the first message - // of the stream, but in the future, we must also support sending it - // on the *last* message of the stream (instead of the first). - req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - } - - } - - err = w.stream.Send(req) - if err == io.EOF { - // err was io.EOF. The client-side of a stream only gets an EOF on Send - // when the backend closes the stream and wants to return an error - // status. Closing the stream receives the status as an error. - _, err = w.stream.CloseAndRecv() - - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - if shouldRetry(err) { - sent = 0 - finishWrite = false - // TODO: Add test case for failure modes of querying progress. - offset, err = w.determineOffset(start) - if err == nil { - continue - } - } - } - if err != nil { - return nil, 0, false, err - } - - // Update the immediate stream's sent total and the upload offset with - // the data sent. - sent += len(data) - offset += int64(len(data)) - - // Not done sending data, do not attempt to commit it yet, loop around - // and send more data. - if recvd-sent > 0 { - continue - } - - // Done sending data. Close the stream to "commit" the data sent. - resp, finalized, err := w.commit() - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - if shouldRetry(err) { - sent = 0 - finishWrite = false - offset, err = w.determineOffset(start) - if err == nil { - continue - } - } - if err != nil { - return nil, 0, false, err - } - - return resp.GetResource(), offset, finalized, nil - } -} - -// determineOffset either returns the offset given to it in the case of a simple -// upload, or queries the write status in the case a resumable upload is being -// used. -func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { - // For a Resumable Upload, we must start from however much data - // was committed. - if w.upid != "" { - committed, err := w.queryProgress() - if err != nil { - return 0, err - } - offset = committed - } - return offset, nil -} - -// commit closes the stream to commit the data sent and potentially receive -// the finalized object if finished uploading. If the last request sent -// indicated that writing was finished, the Object will be finalized and -// returned. If not, then the Object will be nil, and the boolean returned will -// be false. -func (w *gRPCWriter) commit() (*storagepb.WriteObjectResponse, bool, error) { - finalized := true - resp, err := w.stream.CloseAndRecv() - if err == io.EOF { - // Closing a stream for a resumable upload finish_write = false results - // in an EOF which can be ignored, as we aren't done uploading yet. - finalized = false - err = nil - } - // Drop the stream reference as it has been closed. - w.stream = nil - - return resp, finalized, err -} - -// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's -// ObjectAttrs and applies its Conditions. This is only used for gRPC. -func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { - // To avoid modifying the ObjectAttrs embeded in the calling writer, deref - // the ObjectAttrs pointer to make a copy, then assign the desired name to - // the attribute. - attrs := *w.attrs - - spec := &storagepb.WriteObjectSpec{ - Resource: attrs.toProtoObject(w.bucket), - } - // WriteObject doesn't support the generation condition, so use default. - if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { - return nil, err - } - return spec, nil -} - -// read copies the data in the reader to the given buffer and reports how much -// data was read into the buffer and if there is no more data to read (EOF). -// Furthermore, if the attrs.ContentType is unset, the first bytes of content -// will be sniffed for a matching content type. -func (w *gRPCWriter) read() (int, bool, error) { - if w.attrs.ContentType == "" { - w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) - } - // Set n to -1 to start the Read loop. - var n, recvd int = -1, 0 - var err error - for err == nil && n != 0 { - // The routine blocks here until data is received. - n, err = w.reader.Read(w.buf[recvd:]) - recvd += n - } - var done bool - if err == io.EOF { - done = true - err = nil - } - return recvd, done, err -} - -func checkCanceled(err error) error { - if status.Code(err) == codes.Canceled { - return context.Canceled - } - - return err -} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go deleted file mode 100644 index 422a7c2335..0000000000 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "time" - - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - "google.golang.org/api/iterator" - raw "google.golang.org/api/storage/v1" -) - -// HMACState is the state of the HMAC key. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. -type HMACState string - -const ( - // Active is the status for an active key that can be used to sign - // requests. - Active HMACState = "ACTIVE" - - // Inactive is the status for an inactive key thus requests signed by - // this key will be denied. - Inactive HMACState = "INACTIVE" - - // Deleted is the status for a key that is deleted. - // Once in this state the key cannot key cannot be recovered - // and does not count towards key limits. Deleted keys will be cleaned - // up later. - Deleted HMACState = "DELETED" -) - -// HMACKey is the representation of a Google Cloud Storage HMAC key. -// -// HMAC keys are used to authenticate signed access to objects. To enable HMAC key -// authentication, please visit https://cloud.google.com/storage/docs/migrating. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. -type HMACKey struct { - // The HMAC's secret key. - Secret string - - // AccessID is the ID of the HMAC key. - AccessID string - - // Etag is the HTTP/1.1 Entity tag. - Etag string - - // ID is the ID of the HMAC key, including the ProjectID and AccessID. - ID string - - // ProjectID is the ID of the project that owns the - // service account to which the key authenticates. - ProjectID string - - // ServiceAccountEmail is the email address - // of the key's associated service account. - ServiceAccountEmail string - - // CreatedTime is the creation time of the HMAC key. - CreatedTime time.Time - - // UpdatedTime is the last modification time of the HMAC key metadata. - UpdatedTime time.Time - - // State is the state of the HMAC key. - // It can be one of StateActive, StateInactive or StateDeleted. - State HMACState -} - -// HMACKeyHandle helps provide access and management for HMAC keys. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. -type HMACKeyHandle struct { - projectID string - accessID string - retry *retryConfig - tc storageClient -} - -// HMACKeyHandle creates a handle that will be used for HMACKey operations. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { - return &HMACKeyHandle{ - projectID: projectID, - accessID: accessID, - retry: c.retry, - tc: c.tc, - } -} - -// Get invokes an RPC to retrieve the HMAC key referenced by the -// HMACKeyHandle's accessID. -// -// Options such as UserProjectForHMACKeys can be used to set the -// userProject to be billed against for operations. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(true, hkh.retry, desc.userProjectID) - hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) - - return hk, err -} - -// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. -// Only inactive HMAC keys can be deleted. -// After deletion, a key cannot be used to authenticate requests. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(true, hkh.retry, desc.userProjectID) - return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) -} - -func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { - hkmd := hk.Metadata - if hkmd == nil { - return nil, errors.New("field Metadata cannot be nil") - } - createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) - if err != nil { - return nil, fmt.Errorf("field CreatedTime: %w", err) - } - updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) - if err != nil && !updatedTimeCanBeNil { - return nil, fmt.Errorf("field UpdatedTime: %w", err) - } - - hmKey := &HMACKey{ - AccessID: hkmd.AccessId, - Secret: hk.Secret, - Etag: hkmd.Etag, - ID: hkmd.Id, - State: HMACState(hkmd.State), - ProjectID: hkmd.ProjectId, - CreatedTime: createdTime, - UpdatedTime: updatedTime, - - ServiceAccountEmail: hkmd.ServiceAccountEmail, - } - - return hmKey, nil -} - -func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { - if pbmd == nil { - return nil - } - - return &HMACKey{ - AccessID: pbmd.GetAccessId(), - ID: pbmd.GetId(), - State: HMACState(pbmd.GetState()), - ProjectID: pbmd.GetProject(), - CreatedTime: convertProtoTime(pbmd.GetCreateTime()), - UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), - ServiceAccountEmail: pbmd.GetServiceAccountEmail(), - } -} - -// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { - if projectID == "" { - return nil, errors.New("storage: expecting a non-blank projectID") - } - if serviceAccountEmail == "" { - return nil, errors.New("storage: expecting a non-blank service account email") - } - - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(false, c.retry, desc.userProjectID) - hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) - return hk, err -} - -// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. -type HMACKeyAttrsToUpdate struct { - // State is required and must be either StateActive or StateInactive. - State HMACState - - // Etag is an optional field and it is the HTTP/1.1 Entity tag. - Etag string -} - -// Update mutates the HMACKey referred to by accessID. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { - if au.State != Active && au.State != Inactive { - return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) - } - - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - isIdempotent := len(au.Etag) > 0 - o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) - hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) - return hk, err -} - -// An HMACKeysIterator is an iterator over HMACKeys. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. -type HMACKeysIterator struct { - ctx context.Context - raw *raw.ProjectsHmacKeysService - projectID string - hmacKeys []*HMACKey - pageInfo *iterator.PageInfo - nextFunc func() error - index int - desc hmacKeyDesc - retry *retryConfig -} - -// ListHMACKeys returns an iterator for listing HMACKeys. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(true, c.retry, desc.userProjectID) - return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) -} - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (it *HMACKeysIterator) Next() (*HMACKey, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - - key := it.hmacKeys[it.index] - it.index++ - - return key, nil -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. -func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { - // TODO: Remove fetch method upon integration. This method is internalized into - // httpStorageClient.ListHMACKeys() as it is the only caller. - call := it.raw.List(it.projectID) - setClientHeader(call.Header()) - if pageToken != "" { - call = call.PageToken(pageToken) - } - if it.desc.showDeletedKeys { - call = call.ShowDeletedKeys(true) - } - if it.desc.userProjectID != "" { - call = call.UserProject(it.desc.userProjectID) - } - if it.desc.forServiceAccountEmail != "" { - call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail) - } - if pageSize > 0 { - call = call.MaxResults(int64(pageSize)) - } - - ctx := it.ctx - var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { - resp, err = call.Context(ctx).Do() - return err - }, it.retry, true, setRetryHeaderHTTP(call)) - if err != nil { - return "", err - } - - for _, metadata := range resp.Items { - hk := &raw.HmacKey{ - Metadata: metadata, - } - hkey, err := toHMACKeyFromRaw(hk, true) - if err != nil { - return "", err - } - it.hmacKeys = append(it.hmacKeys, hkey) - } - return resp.NextPageToken, nil -} - -type hmacKeyDesc struct { - forServiceAccountEmail string - showDeletedKeys bool - userProjectID string -} - -// HMACKeyOption configures the behavior of HMACKey related methods and actions. -// -// This interface is EXPERIMENTAL and subject to change or removal without notice. -type HMACKeyOption interface { - withHMACKeyDesc(*hmacKeyDesc) -} - -type hmacKeyDescFunc func(*hmacKeyDesc) - -func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { - hkdf(hkd) -} - -// ForHMACKeyServiceAccountEmail returns HMAC Keys that are -// associated with the email address of a service account in the project. -// -// Only one service account email can be used as a filter, so if multiple -// of these options are applied, the last email to be set will be used. -// -// This option is EXPERIMENTAL and subject to change or removal without notice. -func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { - return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { - hkd.forServiceAccountEmail = serviceAccountEmail - }) -} - -// ShowDeletedHMACKeys will also list keys whose state is "DELETED". -// -// This option is EXPERIMENTAL and subject to change or removal without notice. -func ShowDeletedHMACKeys() HMACKeyOption { - return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { - hkd.showDeletedKeys = true - }) -} - -// UserProjectForHMACKeys will bill the request against userProjectID -// if userProjectID is non-empty. -// -// Note: This is a noop right now and only provided for API compatibility. -// -// This option is EXPERIMENTAL and subject to change or removal without notice. -func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { - return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { - hkd.userProjectID = userProjectID - }) -} diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go deleted file mode 100644 index fae96043a9..0000000000 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ /dev/null @@ -1,1351 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "reflect" - "strconv" - "strings" - "time" - - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/trace" - "golang.org/x/oauth2/google" - "google.golang.org/api/googleapi" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - raw "google.golang.org/api/storage/v1" - "google.golang.org/api/transport" - htransport "google.golang.org/api/transport/http" - iampb "google.golang.org/genproto/googleapis/iam/v1" -) - -// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic -// storageClient interface. -// -// This is an experimental API and not intended for public use. -type httpStorageClient struct { - creds *google.Credentials - hc *http.Client - readHost string - raw *raw.Service - scheme string - settings *settings -} - -// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON -// Storage API. -// -// This is an experimental API and not intended for public use. -func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { - s := initSettings(opts...) - o := s.clientOption - - var creds *google.Credentials - // In general, it is recommended to use raw.NewService instead of htransport.NewClient - // since raw.NewService configures the correct default endpoints when initializing the - // internal http client. However, in our case, "NewRangeReader" in reader.go needs to - // access the http client directly to make requests, so we create the client manually - // here so it can be re-used by both reader.go and raw.NewService. This means we need to - // manually configure the default endpoint options on the http client. Furthermore, we - // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { - // Prepend default options to avoid overriding options passed by the user. - o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) - - o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) - o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) - - // Don't error out here. The user may have passed in their own HTTP - // client which does not auth with ADC or other common conventions. - c, err := transport.Creds(ctx, o...) - if err == nil { - creds = c - o = append(o, internaloption.WithCredentials(creds)) - } - } else { - var hostURL *url.URL - - if strings.Contains(host, "://") { - h, err := url.Parse(host) - if err != nil { - return nil, err - } - hostURL = h - } else { - // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST - // URL is only parsed correctly if it has a scheme, so we build it ourselves - hostURL = &url.URL{Scheme: "http", Host: host} - } - - hostURL.Path = "storage/v1/" - endpoint := hostURL.String() - - // Append the emulator host as default endpoint for the user - o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) - - o = append(o, internaloption.WithDefaultEndpoint(endpoint)) - o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) - } - s.clientOption = o - - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. - hc, ep, err := htransport.NewClient(ctx, s.clientOption...) - if err != nil { - return nil, fmt.Errorf("dialing: %w", err) - } - // RawService should be created with the chosen endpoint to take account of user override. - rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) - if err != nil { - return nil, fmt.Errorf("storage client: %w", err) - } - // Update readHost and scheme with the chosen endpoint. - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) - } - - return &httpStorageClient{ - creds: creds, - hc: hc, - readHost: u.Host, - raw: rawService, - scheme: u.Scheme, - settings: s, - }, nil -} - -func (c *httpStorageClient) Close() error { - c.hc.CloseIdleConnections() - return nil -} - -// Top-level methods. - -func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.ServiceAccount.Get(project) - var res *raw.ServiceAccount - err := run(ctx, func() error { - var err error - res, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) - if err != nil { - return "", err - } - return res.EmailAddress, nil -} - -func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = bucket - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if bkt.Location == "" && bkt.Lifecycle != nil { - bkt.Location = "US" - } - req := c.raw.Buckets.Insert(project, bkt) - setClientHeader(req.Header()) - if attrs != nil && attrs.PredefinedACL != "" { - req.PredefinedAcl(attrs.PredefinedACL) - } - if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) - } - var battrs *BucketAttrs - err := run(ctx, func() error { - b, err := req.Context(ctx).Do() - if err != nil { - return err - } - battrs, err = newBucket(b) - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - return battrs, err -} - -func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { - s := callSettings(c.settings, opts...) - it := &BucketIterator{ - ctx: ctx, - projectID: project, - } - - fetch := func(pageSize int, pageToken string) (token string, err error) { - req := c.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) - req.Projection("full") - req.Prefix(it.Prefix) - req.PageToken(pageToken) - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Buckets - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - if err != nil { - return "", err - } - for _, item := range resp.Items { - b, err := newBucket(item) - if err != nil { - return "", err - } - it.buckets = append(it.buckets, b) - } - return resp.NextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it -} - -// Bucket methods. - -func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.Buckets.Delete(bucket) - setClientHeader(req.Header()) - if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { - return err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - req := c.raw.Buckets.Get(bucket).Projection("full") - setClientHeader(req.Header()) - err := applyBucketConds("httpStorageClient.GetBucket", conds, req) - if err != nil { - return nil, err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - - var resp *raw.Bucket - err = run(ctx, func() error { - resp, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp) -} -func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - rb := uattrs.toRawBucket() - req := c.raw.Buckets.Patch(bucket, rb).Projection("full") - setClientHeader(req.Header()) - err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) - if err != nil { - return nil, err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - if uattrs != nil && uattrs.PredefinedACL != "" { - req.PredefinedAcl(uattrs.PredefinedACL) - } - if uattrs != nil && uattrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) - } - - var rawBucket *raw.Bucket - err = run(ctx, func() error { - rawBucket, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return newBucket(rawBucket) -} - -func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - - var metageneration int64 - if conds != nil { - metageneration = conds.MetagenerationMatch - } - req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) - - return run(ctx, func() error { - _, err := req.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} -func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { - s := callSettings(c.settings, opts...) - it := &ObjectIterator{ - ctx: ctx, - } - if q != nil { - it.query = *q - } - fetch := func(pageSize int, pageToken string) (string, error) { - req := c.raw.Objects.List(bucket) - setClientHeader(req.Header()) - projection := it.query.Projection - if projection == ProjectionDefault { - projection = ProjectionFull - } - req.Projection(projection.String()) - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.StartOffset(it.query.StartOffset) - req.EndOffset(it.query.EndOffset) - req.Versions(it.query.Versions) - req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) - if selection := it.query.toFieldSelection(); selection != "" { - req.Fields("nextPageToken", googleapi.Field(selection)) - } - req.PageToken(pageToken) - if s.userProject != "" { - req.UserProject(s.userProject) - } - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Objects - var err error - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - if err != nil { - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - err = ErrBucketNotExist - } - return "", err - } - for _, item := range resp.Items { - it.items = append(it.items, newObject(item)) - } - for _, prefix := range resp.Prefixes { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - return resp.NextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - - return it -} - -// Object metadata methods. - -func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.Objects.Delete(bucket, object).Context(ctx) - if err := applyConds("Delete", gen, conds, req); err != nil { - return err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - return err -} - -func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) - if err := applyConds("Attrs", gen, conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - var err error - err = run(ctx, func() error { - obj, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil -} - -func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - - var attrs ObjectAttrs - // Lists of fields to send, and set to null, in the JSON. - var forceSendFields, nullFields []string - if uattrs.ContentType != nil { - attrs.ContentType = optional.ToString(uattrs.ContentType) - // For ContentType, sending the empty string is a no-op. - // Instead we send a null. - if attrs.ContentType == "" { - nullFields = append(nullFields, "ContentType") - } else { - forceSendFields = append(forceSendFields, "ContentType") - } - } - if uattrs.ContentLanguage != nil { - attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - // For ContentLanguage it's an error to send the empty string. - // Instead we send a null. - if attrs.ContentLanguage == "" { - nullFields = append(nullFields, "ContentLanguage") - } else { - forceSendFields = append(forceSendFields, "ContentLanguage") - } - } - if uattrs.ContentEncoding != nil { - attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentEncoding") - } - if uattrs.ContentDisposition != nil { - attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - forceSendFields = append(forceSendFields, "ContentDisposition") - } - if uattrs.CacheControl != nil { - attrs.CacheControl = optional.ToString(uattrs.CacheControl) - forceSendFields = append(forceSendFields, "CacheControl") - } - if uattrs.EventBasedHold != nil { - attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) - forceSendFields = append(forceSendFields, "EventBasedHold") - } - if uattrs.TemporaryHold != nil { - attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - forceSendFields = append(forceSendFields, "TemporaryHold") - } - if !uattrs.CustomTime.IsZero() { - attrs.CustomTime = uattrs.CustomTime - forceSendFields = append(forceSendFields, "CustomTime") - } - if uattrs.Metadata != nil { - attrs.Metadata = uattrs.Metadata - if len(attrs.Metadata) == 0 { - // Sending the empty map is a no-op. We send null instead. - nullFields = append(nullFields, "Metadata") - } else { - forceSendFields = append(forceSendFields, "Metadata") - } - } - if uattrs.ACL != nil { - attrs.ACL = uattrs.ACL - // It's an error to attempt to delete the ACL, so - // we don't append to nullFields here. - forceSendFields = append(forceSendFields, "Acl") - } - rawObj := attrs.toRawObject(bucket) - rawObj.ForceSendFields = forceSendFields - rawObj.NullFields = nullFields - call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) - if err := applyConds("Update", gen, conds, call); err != nil { - return nil, err - } - if s.userProject != "" { - call.UserProject(s.userProject) - } - if uattrs.PredefinedACL != "" { - call.PredefinedAcl(uattrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - var err error - err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) - var e *googleapi.Error - if errors.As(err, &e) && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil -} - -// Default Object ACL methods. - -func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - s := callSettings(c.settings, opts...) - var acls *raw.ObjectAccessControls - var err error - req := c.raw.DefaultObjectAccessControls.List(bucket) - configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() - return err - }, s.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil -} -func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - var req setRequest - var err error - req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -// Bucket ACL methods. - -func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - s := callSettings(c.settings, opts...) - var acls *raw.BucketAccessControls - var err error - req := c.raw.BucketAccessControls.List(bucket) - configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() - return err - }, s.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return toBucketACLRules(acls.Items), nil -} - -func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - acl := &raw.BucketAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) - configureACLCall(ctx, s.userProject, req) - var err error - return run(ctx, func() error { - _, err = req.Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -// configureACLCall sets the context, user project and headers on the apiary library call. -// This will panic if the call does not have the correct methods. -func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) - } - setClientHeader(call.Header()) -} - -// Object ACL methods. - -func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. -// Selecting a specific generation of this object is not currently supported by the client. -func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - s := callSettings(c.settings, opts...) - var acls *raw.ObjectAccessControls - var err error - req := c.raw.ObjectAccessControls.List(bucket, object) - configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil -} - -func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - var req setRequest - var err error - req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) -} - -// Media operations. - -func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - rawReq := &raw.ComposeRequest{} - // Compose requires a non-empty Destination, so we always set it, - // even if the caller-provided ObjectAttrs is the zero value. - rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket) - if req.sendCRC32C { - rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C) - } - for _, src := range req.srcs { - srcObj := &raw.ComposeRequestSourceObjects{ - Name: src.name, - } - if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { - return nil, err - } - rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) - } - - call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) - if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { - return nil, err - } - if s.userProject != "" { - call.UserProject(s.userProject) - } - if req.predefinedACL != "" { - call.DestinationPredefinedAcl(req.predefinedACL) - } - if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - - var err error - retryCall := func() error { obj, err = call.Do(); return err } - - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - return newObject(obj), nil -} -func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { - s := callSettings(c.settings, opts...) - rawObject := req.dstObject.attrs.toRawObject("") - call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) - - call.Context(ctx).Projection("full") - if req.token != "" { - call.RewriteToken(req.token) - } - if req.dstObject.keyName != "" { - call.DestinationKmsKeyName(req.dstObject.keyName) - } - if req.predefinedACL != "" { - call.DestinationPredefinedAcl(req.predefinedACL) - } - if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { - return nil, err - } - if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { - return nil, err - } - if s.userProject != "" { - call.UserProject(s.userProject) - } - // Set destination encryption headers. - if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { - return nil, err - } - // Set source encryption headers. - if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { - return nil, err - } - - if req.maxBytesRewrittenPerCall != 0 { - call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall) - } - - var res *raw.RewriteResponse - var err error - setClientHeader(call.Header()) - - retryCall := func() error { res, err = call.Do(); return err } - - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - - r := &rewriteObjectResponse{ - done: res.Done, - written: res.TotalBytesRewritten, - size: res.ObjectSize, - token: res.RewriteToken, - resource: newObject(res.Resource), - } - - return r, nil -} - -func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - - u := &url.URL{ - Scheme: c.scheme, - Host: c.readHost, - Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), - } - verb := "GET" - if params.length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if s.userProject != "" { - req.Header.Set("X-Goog-User-Project", s.userProject) - } - if params.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil { - return nil, err - } - - // Define a function that initiates a Read with offset and length, assuming we - // have already read seen bytes. - reopen := func(seen int64) (*http.Response, error) { - // If the context has already expired, return immediately without making a - // call. - if err := ctx.Err(); err != nil { - return nil, err - } - start := params.offset + seen - if params.length < 0 && start < 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if params.length < 0 && start > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if params.length > 0 { - // The end character isn't affected by how many bytes we've seen. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - if err := setConditionsHeaders(req.Header, params.conds); err != nil { - return nil, err - } - // If an object generation is specified, include generation as query string parameters. - if params.gen >= 0 { - req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) - } - - var res *http.Response - err = run(ctx, func() error { - res, err = c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && params.length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - params.gen = gen64 - } - return nil - }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) - if err != nil { - return nil, err - } - return res, nil - } - - res, err := reopen(0) - if err != nil { - return nil, err - } - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - // Content range is formatted -/. We take - // the total size. - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } - - remain := res.ContentLength - body := res.Body - if params.length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } - } - - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } - } - - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: params.gen, - Metageneration: metaGen, - } - return &Reader{ - Attrs: attrs, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reader: &httpReader{ - reopen: reopen, - body: body, - }, - }, nil -} - -func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { - s := callSettings(c.settings, opts...) - errorf := params.setError - setObj := params.setObj - progress := params.progress - attrs := params.attrs - - mediaOpts := []googleapi.MediaOption{ - googleapi.ChunkSize(params.chunkSize), - } - if c := attrs.ContentType; c != "" { - mediaOpts = append(mediaOpts, googleapi.ContentType(c)) - } - if params.chunkRetryDeadline != 0 { - mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline)) - } - - pr, pw := io.Pipe() - - go func() { - defer close(params.donec) - - rawObj := attrs.toRawObject(params.bucket) - if params.sendCRC32C { - rawObj.Crc32c = encodeUint32(attrs.CRC32C) - } - if attrs.MD5 != nil { - rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5) - } - call := c.raw.Objects.Insert(params.bucket, rawObj). - Media(pr, mediaOpts...). - Projection("full"). - Context(params.ctx). - Name(params.attrs.Name) - call.ProgressUpdater(func(n, _ int64) { progress(n) }) - - if attrs.KMSKeyName != "" { - call.KmsKeyName(attrs.KMSKeyName) - } - if attrs.PredefinedACL != "" { - call.PredefinedAcl(attrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { - errorf(err) - pr.CloseWithError(err) - return - } - var resp *raw.Object - err := applyConds("NewWriter", defaultGen, params.conds, call) - if err == nil { - if s.userProject != "" { - call.UserProject(s.userProject) - } - // TODO(tritone): Remove this code when Uploads begin to support - // retry attempt header injection with "client header" injection. - setClientHeader(call.Header()) - - // The internals that perform call.Do automatically retry both the initial - // call to set up the upload as well as calls to upload individual chunks - // for a resumable upload (as long as the chunk size is non-zero). Hence - // there is no need to add retries here. - - // Retry only when the operation is idempotent or the retry policy is RetryAlways. - var useRetry bool - if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent { - useRetry = true - } else if s.retry != nil && s.retry.policy == RetryAlways { - useRetry = true - } - if useRetry { - if s.retry != nil { - call.WithRetry(s.retry.backoff, s.retry.shouldRetry) - } else { - call.WithRetry(nil, nil) - } - } - resp, err = call.Do() - } - if err != nil { - errorf(err) - pr.CloseWithError(err) - return - } - setObj(newObject(resp)) - }() - - return pw, nil -} - -// IAM methods. - -func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) - setClientHeader(call.Header()) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var rp *raw.Policy - err := run(ctx, func() error { - var err error - rp, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return iamFromStoragePolicy(rp), nil -} - -func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - - rp := iamToStoragePolicy(policy) - call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) - if s.userProject != "" { - call.UserProject(s.userProject) - } - - return run(ctx, func() error { - _, err := call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) -} - -func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Buckets.TestIamPermissions(resource, permissions) - setClientHeader(call.Header()) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var res *raw.TestIamPermissionsResponse - err := run(ctx, func() error { - var err error - res, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// HMAC Key methods. - -func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Get(project, accessID) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - - var metadata *raw.HmacKeyMetadata - var err error - if err := run(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - hk := &raw.HmacKey{ - Metadata: metadata, - } - return toHMACKeyFromRaw(hk, false) -} - -func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { - s := callSettings(c.settings, opts...) - it := &HMACKeysIterator{ - ctx: ctx, - raw: c.raw.Projects.HmacKeys, - projectID: project, - retry: s.retry, - } - fetch := func(pageSize int, pageToken string) (token string, err error) { - call := c.raw.Projects.HmacKeys.List(project) - setClientHeader(call.Header()) - if pageToken != "" { - call = call.PageToken(pageToken) - } - if pageSize > 0 { - call = call.MaxResults(int64(pageSize)) - } - if showDeletedKeys { - call = call.ShowDeletedKeys(true) - } - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - if serviceAccountEmail != "" { - call = call.ServiceAccountEmail(serviceAccountEmail) - } - - var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { - resp, err = call.Context(it.ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) - if err != nil { - return "", err - } - - for _, metadata := range resp.Items { - hk := &raw.HmacKey{ - Metadata: metadata, - } - hkey, err := toHMACKeyFromRaw(hk, true) - if err != nil { - return "", err - } - it.hmacKeys = append(it.hmacKeys, hkey) - } - return resp.NextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it -} - -func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{ - Etag: attrs.Etag, - State: string(attrs.State), - }) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - - var metadata *raw.HmacKeyMetadata - var err error - if err := run(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - hk := &raw.HmacKey{ - Metadata: metadata, - } - return toHMACKeyFromRaw(hk, false) -} - -func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - - var hk *raw.HmacKey - if err := run(ctx, func() error { - h, err := call.Context(ctx).Do() - hk = h - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - return toHMACKeyFromRaw(hk, true) -} - -func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Delete(project, accessID) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - return run(ctx, func() error { - return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) -} - -// Notification methods. - -// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID. -// -// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket, -// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets. -func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - call := c.raw.Notifications.List(bucket) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var res *raw.Notifications - err = run(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }, s.retry, true, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return notificationsToMap(res.Items), nil -} - -func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - call := c.raw.Notifications.Insert(bucket, toRawNotification(n)) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var rn *raw.Notification - err = run(ctx, func() error { - rn, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return toNotification(rn), nil -} - -func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - call := c.raw.Notifications.Delete(bucket, id) - if s.userProject != "" { - call.UserProject(s.userProject) - } - return run(ctx, func() error { - return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) -} - -type httpReader struct { - body io.ReadCloser - seen int64 - reopen func(seen int64) (*http.Response, error) -} - -func (r *httpReader) Read(p []byte) (int, error) { - n := 0 - for len(p[n:]) > 0 { - m, err := r.body.Read(p[n:]) - n += m - r.seen += int64(m) - if err == nil || err == io.EOF { - return n, err - } - // Read failed (likely due to connection issues), but we will try to reopen - // the pipe and continue. Send a ranged read request that takes into account - // the number of bytes we've already seen. - res, err := r.reopen(r.seen) - if err != nil { - // reopen already retries - return n, err - } - r.body.Close() - r.body = res.Body - } - return n, nil -} - -func (r *httpReader) Close() error { - return r.body.Close() -} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go deleted file mode 100644 index 408661718f..0000000000 --- a/vendor/cloud.google.com/go/storage/iam.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - - "cloud.google.com/go/iam" - "cloud.google.com/go/internal/trace" - raw "google.golang.org/api/storage/v1" - iampb "google.golang.org/genproto/googleapis/iam/v1" - "google.golang.org/genproto/googleapis/type/expr" -) - -// IAM provides access to IAM access control for the bucket. -func (b *BucketHandle) IAM() *iam.Handle { - return iam.InternalNewHandleClient(&iamClient{ - userProject: b.userProject, - retry: b.retry, - client: b.c, - }, b.name) -} - -// iamClient implements the iam.client interface. -type iamClient struct { - userProject string - retry *retryConfig - client *Client -} - -func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { - return c.GetWithVersion(ctx, resource, 1) -} - -func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, c.retry, c.userProject) - return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) -} - -func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") - defer func() { trace.EndSpan(ctx, err) }() - - isIdempotent := len(p.Etag) > 0 - o := makeStorageOpts(isIdempotent, c.retry, c.userProject) - return c.client.tc.SetIamPolicy(ctx, resource, p, o...) -} - -func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, c.retry, c.userProject) - return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) -} - -func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { - return &raw.Policy{ - Bindings: iamToStorageBindings(ip.Bindings), - Etag: string(ip.Etag), - Version: int64(ip.Version), - } -} - -func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { - var rbs []*raw.PolicyBindings - for _, ib := range ibs { - rbs = append(rbs, &raw.PolicyBindings{ - Role: ib.Role, - Members: ib.Members, - Condition: iamToStorageCondition(ib.Condition), - }) - } - return rbs -} - -func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr { - if exprpb == nil { - return nil - } - return &raw.Expr{ - Expression: exprpb.Expression, - Description: exprpb.Description, - Location: exprpb.Location, - Title: exprpb.Title, - } -} - -func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { - return &iampb.Policy{ - Bindings: iamFromStorageBindings(rp.Bindings), - Etag: []byte(rp.Etag), - } -} - -func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { - var ibs []*iampb.Binding - for _, rb := range rbs { - ibs = append(ibs, &iampb.Binding{ - Role: rb.Role, - Members: rb.Members, - Condition: iamFromStorageCondition(rb.Condition), - }) - } - return ibs -} - -func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr { - if rawexpr == nil { - return nil - } - return &expr.Expr{ - Expression: rawexpr.Expression, - Description: rawexpr.Description, - Location: rawexpr.Location, - Title: rawexpr.Title, - } -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go deleted file mode 100644 index e33b5222ab..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -// Package storage is an auto-generated package for the -// Cloud Storage API. -// -// Lets you store and retrieve potentially-large, immutable data objects. -// -// NOTE: This package is in alpha. It is not stable, and is likely to change. -// -// # General documentation -// -// For information about setting deadlines, reusing contexts, and more -// please visit https://pkg.go.dev/cloud.google.com/go. -// -// # Example usage -// -// To get started with this package, create a client. -// -// ctx := context.Background() -// // This snippet has been automatically generated and should be regarded as a code template only. -// // It will require modifications to work: -// // - It may require correct/in-range values for request initialization. -// // - It may require specifying regional endpoints when creating the service client as shown in: -// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options -// c, err := storage.NewClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// -// The client will use your default application credentials. Clients should be reused instead of created as needed. -// The methods of Client are safe for concurrent use by multiple goroutines. -// The returned client must be Closed when it is done being used. -// -// # Using the Client -// -// The following is an example of making an API call with the newly created client. -// -// ctx := context.Background() -// // This snippet has been automatically generated and should be regarded as a code template only. -// // It will require modifications to work: -// // - It may require correct/in-range values for request initialization. -// // - It may require specifying regional endpoints when creating the service client as shown in: -// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options -// c, err := storage.NewClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// -// req := &storagepb.DeleteBucketRequest{ -// // TODO: Fill request struct fields. -// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest. -// } -// err = c.DeleteBucket(ctx, req) -// if err != nil { -// // TODO: Handle error. -// } -// -// # Use of Context -// -// The ctx passed to NewClient is used for authentication requests and -// for creating the underlying connection, but is not used for subsequent calls. -// Individual methods on the client use the ctx given to them. -// -// To close the open connection, use the Close() method. -package storage // import "cloud.google.com/go/storage/internal/apiv2" - -import ( - "context" - "os" - "runtime" - "strconv" - "strings" - "unicode" - - "google.golang.org/api/option" - "google.golang.org/grpc/metadata" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -var versionClient string - -func getVersionClient() string { - if versionClient == "" { - return "UNKNOWN" - } - return versionClient -} - -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -func checkDisableDeadlines() (bool, error) { - raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") - if !ok { - return false, nil - } - - b, err := strconv.ParseBool(raw) - return b, err -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write", - } -} - -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json deleted file mode 100644 index 01103fa93b..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", - "language": "go", - "protoPackage": "google.storage.v2", - "libraryPackage": "cloud.google.com/go/storage/internal/apiv2", - "services": { - "Storage": { - "clients": { - "grpc": { - "libraryClient": "Client", - "rpcs": { - "CancelResumableWrite": { - "methods": [ - "CancelResumableWrite" - ] - }, - "ComposeObject": { - "methods": [ - "ComposeObject" - ] - }, - "CreateBucket": { - "methods": [ - "CreateBucket" - ] - }, - "CreateHmacKey": { - "methods": [ - "CreateHmacKey" - ] - }, - "CreateNotification": { - "methods": [ - "CreateNotification" - ] - }, - "DeleteBucket": { - "methods": [ - "DeleteBucket" - ] - }, - "DeleteHmacKey": { - "methods": [ - "DeleteHmacKey" - ] - }, - "DeleteNotification": { - "methods": [ - "DeleteNotification" - ] - }, - "DeleteObject": { - "methods": [ - "DeleteObject" - ] - }, - "GetBucket": { - "methods": [ - "GetBucket" - ] - }, - "GetHmacKey": { - "methods": [ - "GetHmacKey" - ] - }, - "GetIamPolicy": { - "methods": [ - "GetIamPolicy" - ] - }, - "GetNotification": { - "methods": [ - "GetNotification" - ] - }, - "GetObject": { - "methods": [ - "GetObject" - ] - }, - "GetServiceAccount": { - "methods": [ - "GetServiceAccount" - ] - }, - "ListBuckets": { - "methods": [ - "ListBuckets" - ] - }, - "ListHmacKeys": { - "methods": [ - "ListHmacKeys" - ] - }, - "ListNotifications": { - "methods": [ - "ListNotifications" - ] - }, - "ListObjects": { - "methods": [ - "ListObjects" - ] - }, - "LockBucketRetentionPolicy": { - "methods": [ - "LockBucketRetentionPolicy" - ] - }, - "QueryWriteStatus": { - "methods": [ - "QueryWriteStatus" - ] - }, - "ReadObject": { - "methods": [ - "ReadObject" - ] - }, - "RewriteObject": { - "methods": [ - "RewriteObject" - ] - }, - "SetIamPolicy": { - "methods": [ - "SetIamPolicy" - ] - }, - "StartResumableWrite": { - "methods": [ - "StartResumableWrite" - ] - }, - "TestIamPermissions": { - "methods": [ - "TestIamPermissions" - ] - }, - "UpdateBucket": { - "methods": [ - "UpdateBucket" - ] - }, - "UpdateHmacKey": { - "methods": [ - "UpdateHmacKey" - ] - }, - "UpdateObject": { - "methods": [ - "UpdateObject" - ] - }, - "WriteObject": { - "methods": [ - "WriteObject" - ] - } - } - } - } - } - } -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go deleted file mode 100644 index 6ff86c4fb4..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -// InsertMetadata inserts the given gRPC metadata into the outgoing context. -func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - return insertMetadata(ctx, mds...) -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go deleted file mode 100644 index aa2c1aff57..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ /dev/null @@ -1,1615 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package storage - -import ( - "context" - "fmt" - "math" - "net/url" - "regexp" - "strings" - - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - gtransport "google.golang.org/api/transport/grpc" - iampb "google.golang.org/genproto/googleapis/iam/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "google.golang.org/protobuf/proto" -) - -var newClientHook clientHook - -// CallOptions contains the retry settings for each method of Client. -type CallOptions struct { - DeleteBucket []gax.CallOption - GetBucket []gax.CallOption - CreateBucket []gax.CallOption - ListBuckets []gax.CallOption - LockBucketRetentionPolicy []gax.CallOption - GetIamPolicy []gax.CallOption - SetIamPolicy []gax.CallOption - TestIamPermissions []gax.CallOption - UpdateBucket []gax.CallOption - DeleteNotification []gax.CallOption - GetNotification []gax.CallOption - CreateNotification []gax.CallOption - ListNotifications []gax.CallOption - ComposeObject []gax.CallOption - DeleteObject []gax.CallOption - CancelResumableWrite []gax.CallOption - GetObject []gax.CallOption - ReadObject []gax.CallOption - UpdateObject []gax.CallOption - WriteObject []gax.CallOption - ListObjects []gax.CallOption - RewriteObject []gax.CallOption - StartResumableWrite []gax.CallOption - QueryWriteStatus []gax.CallOption - GetServiceAccount []gax.CallOption - CreateHmacKey []gax.CallOption - DeleteHmacKey []gax.CallOption - GetHmacKey []gax.CallOption - ListHmacKeys []gax.CallOption - UpdateHmacKey []gax.CallOption -} - -func defaultGRPCClientOptions() []option.ClientOption { - return []option.ClientOption{ - internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), - internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), - internaloption.WithDefaultAudience("https://storage.googleapis.com/"), - internaloption.WithDefaultScopes(DefaultAuthScopes()...), - internaloption.EnableJwtWithScope(), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultCallOptions() *CallOptions { - return &CallOptions{ - DeleteBucket: []gax.CallOption{}, - GetBucket: []gax.CallOption{}, - CreateBucket: []gax.CallOption{}, - ListBuckets: []gax.CallOption{}, - LockBucketRetentionPolicy: []gax.CallOption{}, - GetIamPolicy: []gax.CallOption{}, - SetIamPolicy: []gax.CallOption{}, - TestIamPermissions: []gax.CallOption{}, - UpdateBucket: []gax.CallOption{}, - DeleteNotification: []gax.CallOption{}, - GetNotification: []gax.CallOption{}, - CreateNotification: []gax.CallOption{}, - ListNotifications: []gax.CallOption{}, - ComposeObject: []gax.CallOption{}, - DeleteObject: []gax.CallOption{}, - CancelResumableWrite: []gax.CallOption{}, - GetObject: []gax.CallOption{}, - ReadObject: []gax.CallOption{}, - UpdateObject: []gax.CallOption{}, - WriteObject: []gax.CallOption{}, - ListObjects: []gax.CallOption{}, - RewriteObject: []gax.CallOption{}, - StartResumableWrite: []gax.CallOption{}, - QueryWriteStatus: []gax.CallOption{}, - GetServiceAccount: []gax.CallOption{}, - CreateHmacKey: []gax.CallOption{}, - DeleteHmacKey: []gax.CallOption{}, - GetHmacKey: []gax.CallOption{}, - ListHmacKeys: []gax.CallOption{}, - UpdateHmacKey: []gax.CallOption{}, - } -} - -// internalClient is an interface that defines the methods available from Cloud Storage API. -type internalClient interface { - Close() error - setGoogleClientInfo(...string) - Connection() *grpc.ClientConn - DeleteBucket(context.Context, *storagepb.DeleteBucketRequest, ...gax.CallOption) error - GetBucket(context.Context, *storagepb.GetBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - CreateBucket(context.Context, *storagepb.CreateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - ListBuckets(context.Context, *storagepb.ListBucketsRequest, ...gax.CallOption) *BucketIterator - LockBucketRetentionPolicy(context.Context, *storagepb.LockBucketRetentionPolicyRequest, ...gax.CallOption) (*storagepb.Bucket, error) - GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) - SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) - TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) - UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - DeleteNotification(context.Context, *storagepb.DeleteNotificationRequest, ...gax.CallOption) error - GetNotification(context.Context, *storagepb.GetNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) - CreateNotification(context.Context, *storagepb.CreateNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) - ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator - ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error - CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) - GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) - UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) - ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator - RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) - StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) - QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) - GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) - CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) - DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error - GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) - ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator - UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) -} - -// Client is a client for interacting with Cloud Storage API. -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -// -// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through -// the abstractions of buckets and objects. For a description of these -// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). -// -// Resources are named as follows: -// -// Projects are referred to as they are defined by the Resource Manager API, -// using strings like projects/123456 or projects/my-string-id. -// -// Buckets are named using string names of the form: -// projects/{project}/buckets/{bucket} -// For globally unique buckets, _ may be substituted for the project. -// -// Objects are uniquely identified by their name along with the name of the -// bucket they belong to, as separate strings in this API. For example: -// -// ReadObjectRequest { -// bucket: ‘projects/_/buckets/my-bucket’ -// object: ‘my-object’ -// } -// Note that object names can contain / characters, which are treated as -// any other character (no special directory semantics). -type Client struct { - // The internal transport-dependent client. - internalClient internalClient - - // The call options for this service. - CallOptions *CallOptions -} - -// Wrapper methods routed to the internal client. - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *Client) Close() error { - return c.internalClient.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *Client) setGoogleClientInfo(keyval ...string) { - c.internalClient.setGoogleClientInfo(keyval...) -} - -// Connection returns a connection to the API service. -// -// Deprecated: Connections are now pooled so this method does not always -// return the same resource. -func (c *Client) Connection() *grpc.ClientConn { - return c.internalClient.Connection() -} - -// DeleteBucket permanently deletes an empty bucket. -func (c *Client) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteBucket(ctx, req, opts...) -} - -// GetBucket returns metadata for the specified bucket. -func (c *Client) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.GetBucket(ctx, req, opts...) -} - -// CreateBucket creates a new bucket. -func (c *Client) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.CreateBucket(ctx, req, opts...) -} - -// ListBuckets retrieves a list of buckets for a given project. -func (c *Client) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { - return c.internalClient.ListBuckets(ctx, req, opts...) -} - -// LockBucketRetentionPolicy locks retention policy on a bucket. -func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) -} - -// GetIamPolicy gets the IAM policy for a specified bucket or object. -// The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. -func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - return c.internalClient.GetIamPolicy(ctx, req, opts...) -} - -// SetIamPolicy updates an IAM policy for the specified bucket or object. -// The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. -func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - return c.internalClient.SetIamPolicy(ctx, req, opts...) -} - -// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if -// any, are held by the caller. -// The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. -func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { - return c.internalClient.TestIamPermissions(ctx, req, opts...) -} - -// UpdateBucket updates a bucket. Equivalent to JSON API’s storage.buckets.patch method. -func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.UpdateBucket(ctx, req, opts...) -} - -// DeleteNotification permanently deletes a notification subscription. -func (c *Client) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteNotification(ctx, req, opts...) -} - -// GetNotification view a notification config. -func (c *Client) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - return c.internalClient.GetNotification(ctx, req, opts...) -} - -// CreateNotification creates a notification subscription for a given bucket. -// These notifications, when triggered, publish messages to the specified -// Pub/Sub topics. -// See https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). -func (c *Client) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - return c.internalClient.CreateNotification(ctx, req, opts...) -} - -// ListNotifications retrieves a list of notification subscriptions for a given bucket. -func (c *Client) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { - return c.internalClient.ListNotifications(ctx, req, opts...) -} - -// ComposeObject concatenates a list of existing objects into a new object in the same -// bucket. -func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.ComposeObject(ctx, req, opts...) -} - -// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning -// is not enabled for the bucket, or if the generation parameter is used. -func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteObject(ctx, req, opts...) -} - -// CancelResumableWrite cancels an in-progress resumable upload. -func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { - return c.internalClient.CancelResumableWrite(ctx, req, opts...) -} - -// GetObject retrieves an object’s metadata. -func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.GetObject(ctx, req, opts...) -} - -// ReadObject reads an object’s data. -func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { - return c.internalClient.ReadObject(ctx, req, opts...) -} - -// UpdateObject updates an object’s metadata. -// Equivalent to JSON API’s storage.objects.patch. -func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.UpdateObject(ctx, req, opts...) -} - -// WriteObject stores a new object and metadata. -// -// An object can be written either in a single message stream or in a -// resumable sequence of message streams. To write using a single stream, -// the client should include in the first message of the stream an -// WriteObjectSpec describing the destination bucket, object, and any -// preconditions. Additionally, the final message must set ‘finish_write’ to -// true, or else it is an error. -// -// For a resumable write, the client should instead call -// StartResumableWrite(), populating a WriteObjectSpec into that request. -// They should then attach the returned upload_id to the first message of -// each following call to WriteObject. If the stream is closed before -// finishing the upload (either explicitly by the client or due to a network -// error or an error response from the server), the client should do as -// follows: -// -// Check the result Status of the stream, to determine if writing can be -// resumed on this stream or must be restarted from scratch (by calling -// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED, -// INTERNAL, and UNAVAILABLE. For each case, the client should use binary -// exponential backoff before retrying. Additionally, writes can be -// resumed after RESOURCE_EXHAUSTED errors, but only after taking -// appropriate measures, which may include reducing aggregate send rate -// across clients and/or requesting a quota increase for your project. -// -// If the call to WriteObject returns ABORTED, that indicates -// concurrent attempts to update the resumable write, caused either by -// multiple racing clients or by a single client where the previous -// request was timed out on the client side but nonetheless reached the -// server. In this case the client should take steps to prevent further -// concurrent writes (e.g., increase the timeouts, stop using more than -// one process to perform the upload, etc.), and then should follow the -// steps below for resuming the upload. -// -// For resumable errors, the client should call QueryWriteStatus() and -// then continue writing from the returned persisted_size. This may be -// less than the amount of data the client previously sent. Note also that -// it is acceptable to send data starting at an offset earlier than the -// returned persisted_size; in this case, the service will skip data at -// offsets that were already persisted (without checking that it matches -// the previously written data), and write only the data starting from the -// persisted offset. This behavior can make client-side handling simpler -// in some cases. -// -// The service will not view the object as complete until the client has -// sent a WriteObjectRequest with finish_write set to true. Sending any -// requests on a stream after sending a request with finish_write set to -// true will cause an error. The client should check the response it -// receives to determine how much data the service was able to commit and -// whether the service views the object as complete. -// -// Attempting to resume an already finalized object will result in an OK -// status, with a WriteObjectResponse containing the finalized object’s -// metadata. -func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { - return c.internalClient.WriteObject(ctx, opts...) -} - -// ListObjects retrieves a list of objects matching the criteria. -func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { - return c.internalClient.ListObjects(ctx, req, opts...) -} - -// RewriteObject rewrites a source object to a destination object. Optionally overrides -// metadata. -func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { - return c.internalClient.RewriteObject(ctx, req, opts...) -} - -// StartResumableWrite starts a resumable write. How long the write operation remains valid, and -// what happens when the write operation becomes invalid, are -// service-dependent. -func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { - return c.internalClient.StartResumableWrite(ctx, req, opts...) -} - -// QueryWriteStatus determines the persisted_size for an object that is being written, which -// can then be used as the write_offset for the next Write() call. -// -// If the object does not exist (i.e., the object has been deleted, or the -// first Write() has not yet reached the service), this method returns the -// error NOT_FOUND. -// -// The client may call QueryWriteStatus() at any time to determine how -// much data has been processed for this object. This is useful if the -// client is buffering data and needs to know which data can be safely -// evicted. For any sequence of QueryWriteStatus() calls for a given -// object name, the sequence of returned persisted_size values will be -// non-decreasing. -func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { - return c.internalClient.QueryWriteStatus(ctx, req, opts...) -} - -// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. -func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - return c.internalClient.GetServiceAccount(ctx, req, opts...) -} - -// CreateHmacKey creates a new HMAC key for the given service account. -func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - return c.internalClient.CreateHmacKey(ctx, req, opts...) -} - -// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. -func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteHmacKey(ctx, req, opts...) -} - -// GetHmacKey gets an existing HMAC key metadata for the given id. -func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.GetHmacKey(ctx, req, opts...) -} - -// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. -func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - return c.internalClient.ListHmacKeys(ctx, req, opts...) -} - -// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. -func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.UpdateHmacKey(ctx, req, opts...) -} - -// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type gRPCClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - - // Points back to the CallOptions field of the containing Client - CallOptions **CallOptions - - // The gRPC API client. - client storagepb.StorageClient - - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD -} - -// NewClient creates a new storage client based on gRPC. -// The returned client must be Closed when it is done being used to clean up its underlying connections. -// -// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through -// the abstractions of buckets and objects. For a description of these -// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). -// -// Resources are named as follows: -// -// Projects are referred to as they are defined by the Resource Manager API, -// using strings like projects/123456 or projects/my-string-id. -// -// Buckets are named using string names of the form: -// projects/{project}/buckets/{bucket} -// For globally unique buckets, _ may be substituted for the project. -// -// Objects are uniquely identified by their name along with the name of the -// bucket they belong to, as separate strings in this API. For example: -// -// ReadObjectRequest { -// bucket: ‘projects/_/buckets/my-bucket’ -// object: ‘my-object’ -// } -// Note that object names can contain / characters, which are treated as -// any other character (no special directory semantics). -func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - clientOpts := defaultGRPCClientOptions() - if newClientHook != nil { - hookOpts, err := newClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - client := Client{CallOptions: defaultCallOptions()} - - c := &gRPCClient{ - connPool: connPool, - disableDeadlines: disableDeadlines, - client: storagepb.NewStorageClient(connPool), - CallOptions: &client.CallOptions, - } - c.setGoogleClientInfo() - - client.internalClient = c - - return &client, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated: Connections are now pooled so this method does not always -// return the same resource. -func (c *gRPCClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *gRPCClient) Close() error { - return c.connPool.Close() -} - -func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) - it := &BucketIterator{} - req = proto.Clone(req).(*storagepb.ListBucketsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Bucket, string, error) { - resp := &storagepb.ListBucketsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetBuckets(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) - var resp *iampb.Policy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) - var resp *iampb.Policy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) - var resp *iampb.TestIamPermissionsResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteNotification(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...) - var resp *storagepb.Notification - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetNotification(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...) - var resp *storagepb.Notification - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateNotification(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...) - it := &NotificationIterator{} - req = proto.Clone(req).(*storagepb.ListNotificationsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Notification, string, error) { - resp := &storagepb.ListNotificationsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListNotifications(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetNotifications(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) - var resp *storagepb.CancelResumableWriteResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ReadObject[0:len((*c.CallOptions).ReadObject):len((*c.CallOptions).ReadObject)], opts...) - var resp storagepb.Storage_ReadObjectClient - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) - var resp storagepb.Storage_WriteObjectClient - opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.WriteObject(ctx, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) - it := &ObjectIterator{} - req = proto.Clone(req).(*storagepb.ListObjectsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Object, string, error) { - resp := &storagepb.ListObjectsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetObjects(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 { - routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1]) - } - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) - var resp *storagepb.RewriteResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) - var resp *storagepb.StartResumableWriteResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) - var resp *storagepb.QueryWriteStatusResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) - var resp *storagepb.ServiceAccount - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) - var resp *storagepb.CreateHmacKeyResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) - it := &HmacKeyMetadataIterator{} - req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { - resp := &storagepb.ListHmacKeysResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetHmacKeys(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -// BucketIterator manages a stream of *storagepb.Bucket. -type BucketIterator struct { - items []*storagepb.Bucket - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *BucketIterator) Next() (*storagepb.Bucket, error) { - var item *storagepb.Bucket - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *BucketIterator) bufLen() int { - return len(it.items) -} - -func (it *BucketIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. -type HmacKeyMetadataIterator struct { - items []*storagepb.HmacKeyMetadata - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { - var item *storagepb.HmacKeyMetadata - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *HmacKeyMetadataIterator) bufLen() int { - return len(it.items) -} - -func (it *HmacKeyMetadataIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationIterator manages a stream of *storagepb.Notification. -type NotificationIterator struct { - items []*storagepb.Notification - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Notification, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationIterator) Next() (*storagepb.Notification, error) { - var item *storagepb.Notification - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// ObjectIterator manages a stream of *storagepb.Object. -type ObjectIterator struct { - items []*storagepb.Object - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *ObjectIterator) Next() (*storagepb.Object, error) { - var item *storagepb.Object - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ObjectIterator) bufLen() int { - return len(it.items) -} - -func (it *ObjectIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go deleted file mode 100644 index f81e216c57..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go +++ /dev/null @@ -1,10746 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/storage/v2/storage.proto - -package storage - -import ( - context "context" - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - v1 "google.golang.org/genproto/googleapis/iam/v1" - date "google.golang.org/genproto/googleapis/type/date" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A collection of constant values meaningful to the Storage API. -type ServiceConstants_Values int32 - -const ( - // Unused. Proto3 requires first enum to be 0. - ServiceConstants_VALUES_UNSPECIFIED ServiceConstants_Values = 0 - // The maximum size chunk that can will be returned in a single - // ReadRequest. - // 2 MiB. - ServiceConstants_MAX_READ_CHUNK_BYTES ServiceConstants_Values = 2097152 - // The maximum size chunk that can be sent in a single WriteObjectRequest. - // 2 MiB. - ServiceConstants_MAX_WRITE_CHUNK_BYTES ServiceConstants_Values = 2097152 - // The maximum size of an object in MB - whether written in a single stream - // or composed from multiple other objects. - // 5 TiB. - ServiceConstants_MAX_OBJECT_SIZE_MB ServiceConstants_Values = 5242880 - // The maximum length field name that can be sent in a single - // custom metadata field. - // 1 KiB. - ServiceConstants_MAX_CUSTOM_METADATA_FIELD_NAME_BYTES ServiceConstants_Values = 1024 - // The maximum length field value that can be sent in a single - // custom_metadata field. - // 4 KiB. - ServiceConstants_MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES ServiceConstants_Values = 4096 - // The maximum total bytes that can be populated into all field names and - // values of the custom_metadata for one object. - // 8 KiB. - ServiceConstants_MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 8192 - // The maximum total bytes that can be populated into all bucket metadata - // fields. - // 20 KiB. - ServiceConstants_MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 20480 - // The maximum number of NotificationConfigs that can be registered - // for a given bucket. - ServiceConstants_MAX_NOTIFICATION_CONFIGS_PER_BUCKET ServiceConstants_Values = 100 - // The maximum number of LifecycleRules that can be registered for a given - // bucket. - ServiceConstants_MAX_LIFECYCLE_RULES_PER_BUCKET ServiceConstants_Values = 100 - // The maximum number of custom attributes per NotificationConfigs. - ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTES ServiceConstants_Values = 5 - // The maximum length of a custom attribute key included in - // NotificationConfig. - ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH ServiceConstants_Values = 256 - // The maximum length of a custom attribute value included in a - // NotificationConfig. - ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH ServiceConstants_Values = 1024 - // The maximum number of key/value entries per bucket label. - ServiceConstants_MAX_LABELS_ENTRIES_COUNT ServiceConstants_Values = 64 - // The maximum character length of the key or value in a bucket - // label map. - ServiceConstants_MAX_LABELS_KEY_VALUE_LENGTH ServiceConstants_Values = 63 - // The maximum byte size of the key or value in a bucket label - // map. - ServiceConstants_MAX_LABELS_KEY_VALUE_BYTES ServiceConstants_Values = 128 - // The maximum number of object IDs that can be included in a - // DeleteObjectsRequest. - ServiceConstants_MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST ServiceConstants_Values = 1000 - // The maximum number of days for which a token returned by the - // GetListObjectsSplitPoints RPC is valid. - ServiceConstants_SPLIT_TOKEN_MAX_VALID_DAYS ServiceConstants_Values = 14 -) - -// Enum value maps for ServiceConstants_Values. -var ( - ServiceConstants_Values_name = map[int32]string{ - 0: "VALUES_UNSPECIFIED", - 2097152: "MAX_READ_CHUNK_BYTES", - // Duplicate value: 2097152: "MAX_WRITE_CHUNK_BYTES", - 5242880: "MAX_OBJECT_SIZE_MB", - 1024: "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES", - 4096: "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES", - 8192: "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES", - 20480: "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES", - 100: "MAX_NOTIFICATION_CONFIGS_PER_BUCKET", - // Duplicate value: 100: "MAX_LIFECYCLE_RULES_PER_BUCKET", - 5: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES", - 256: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH", - // Duplicate value: 1024: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH", - 64: "MAX_LABELS_ENTRIES_COUNT", - 63: "MAX_LABELS_KEY_VALUE_LENGTH", - 128: "MAX_LABELS_KEY_VALUE_BYTES", - 1000: "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST", - 14: "SPLIT_TOKEN_MAX_VALID_DAYS", - } - ServiceConstants_Values_value = map[string]int32{ - "VALUES_UNSPECIFIED": 0, - "MAX_READ_CHUNK_BYTES": 2097152, - "MAX_WRITE_CHUNK_BYTES": 2097152, - "MAX_OBJECT_SIZE_MB": 5242880, - "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES": 1024, - "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES": 4096, - "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES": 8192, - "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES": 20480, - "MAX_NOTIFICATION_CONFIGS_PER_BUCKET": 100, - "MAX_LIFECYCLE_RULES_PER_BUCKET": 100, - "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES": 5, - "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH": 256, - "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH": 1024, - "MAX_LABELS_ENTRIES_COUNT": 64, - "MAX_LABELS_KEY_VALUE_LENGTH": 63, - "MAX_LABELS_KEY_VALUE_BYTES": 128, - "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST": 1000, - "SPLIT_TOKEN_MAX_VALID_DAYS": 14, - } -) - -func (x ServiceConstants_Values) Enum() *ServiceConstants_Values { - p := new(ServiceConstants_Values) - *p = x - return p -} - -func (x ServiceConstants_Values) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ServiceConstants_Values) Descriptor() protoreflect.EnumDescriptor { - return file_google_storage_v2_storage_proto_enumTypes[0].Descriptor() -} - -func (ServiceConstants_Values) Type() protoreflect.EnumType { - return &file_google_storage_v2_storage_proto_enumTypes[0] -} - -func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ServiceConstants_Values.Descriptor instead. -func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} -} - -// Request message for DeleteBucket. -type DeleteBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a bucket to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // If set, only deletes the bucket if its metageneration matches this value. - IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // If set, only deletes the bucket if its metageneration does not match this - // value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` -} - -func (x *DeleteBucketRequest) Reset() { - *x = DeleteBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteBucketRequest) ProtoMessage() {} - -func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteBucketRequest.ProtoReflect.Descriptor instead. -func (*DeleteBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{0} -} - -func (x *DeleteBucketRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *DeleteBucketRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *DeleteBucketRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -// Request message for GetBucket. -type GetBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a bucket. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // If set, and if the bucket's current metageneration does not match the - // specified value, the request will return an error. - IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // If set, and if the bucket's current metageneration matches the specified - // value, the request will return an error. - IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Mask specifying which fields to read. - // A "*" field may be used to indicate all fields. - // If no mask is specified, will default to all fields. - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *GetBucketRequest) Reset() { - *x = GetBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetBucketRequest) ProtoMessage() {} - -func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetBucketRequest.ProtoReflect.Descriptor instead. -func (*GetBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{1} -} - -func (x *GetBucketRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *GetBucketRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// Request message for CreateBucket. -type CreateBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project to which this bucket will belong. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Properties of the new bucket being inserted. - // The project and name of the bucket are specified in the parent and - // bucket_id fields, respectively. Populating those fields in `bucket` will - // result in an error. - Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The ID to use for this bucket, which will become the final - // component of the bucket's resource name. For example, the value `foo` might - // result in a bucket with the name `projects/123456/buckets/foo`. - BucketId string `protobuf:"bytes,3,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` - // Apply a predefined set of access controls to this bucket. - // Valid values are "authenticatedRead", "private", "projectPrivate", - // "publicRead", or "publicReadWrite". - PredefinedAcl string `protobuf:"bytes,6,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Apply a predefined set of default object access controls to this bucket. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedDefaultObjectAcl string `protobuf:"bytes,7,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` -} - -func (x *CreateBucketRequest) Reset() { - *x = CreateBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateBucketRequest) ProtoMessage() {} - -func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateBucketRequest.ProtoReflect.Descriptor instead. -func (*CreateBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{2} -} - -func (x *CreateBucketRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateBucketRequest) GetBucket() *Bucket { - if x != nil { - return x.Bucket - } - return nil -} - -func (x *CreateBucketRequest) GetBucketId() string { - if x != nil { - return x.BucketId - } - return "" -} - -func (x *CreateBucketRequest) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *CreateBucketRequest) GetPredefinedDefaultObjectAcl() string { - if x != nil { - return x.PredefinedDefaultObjectAcl - } - return "" -} - -// Request message for ListBuckets. -type ListBucketsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project whose buckets we are listing. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Maximum number of buckets to return in a single response. The service will - // use this parameter or 1,000 items, whichever is smaller. If "acl" is - // present in the read_mask, the service will use this parameter of 200 items, - // whichever is smaller. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously-returned page token representing part of the larger set of - // results to view. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // Filter results to buckets whose names begin with this prefix. - Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` - // Mask specifying which fields to read from each result. - // If no mask is specified, will default to all fields except items.owner, - // items.acl, and items.default_object_acl. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *ListBucketsRequest) Reset() { - *x = ListBucketsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBucketsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBucketsRequest) ProtoMessage() {} - -func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. -func (*ListBucketsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{3} -} - -func (x *ListBucketsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListBucketsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListBucketsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListBucketsRequest) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// The result of a call to Buckets.ListBuckets -type ListBucketsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListBucketsResponse) Reset() { - *x = ListBucketsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBucketsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBucketsResponse) ProtoMessage() {} - -func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. -func (*ListBucketsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{4} -} - -func (x *ListBucketsResponse) GetBuckets() []*Bucket { - if x != nil { - return x.Buckets - } - return nil -} - -func (x *ListBucketsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request message for LockBucketRetentionPolicyRequest. -type LockBucketRetentionPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a bucket. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. Makes the operation conditional on whether bucket's current - // metageneration matches the given value. Must be positive. - IfMetagenerationMatch int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3" json:"if_metageneration_match,omitempty"` -} - -func (x *LockBucketRetentionPolicyRequest) Reset() { - *x = LockBucketRetentionPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LockBucketRetentionPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} - -func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LockBucketRetentionPolicyRequest.ProtoReflect.Descriptor instead. -func (*LockBucketRetentionPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{5} -} - -func (x *LockBucketRetentionPolicyRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *LockBucketRetentionPolicyRequest) GetIfMetagenerationMatch() int64 { - if x != nil { - return x.IfMetagenerationMatch - } - return 0 -} - -// Request for UpdateBucket method. -type UpdateBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The bucket to update. - // The bucket's `name` field will be used to identify the bucket. - Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // If set, will only modify the bucket if its metageneration matches this - // value. - IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // If set, will only modify the bucket if its metageneration does not match - // this value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Apply a predefined set of access controls to this bucket. - // Valid values are "authenticatedRead", "private", "projectPrivate", - // "publicRead", or "publicReadWrite". - PredefinedAcl string `protobuf:"bytes,8,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Apply a predefined set of default object access controls to this bucket. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedDefaultObjectAcl string `protobuf:"bytes,9,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` - // Required. List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateBucketRequest) Reset() { - *x = UpdateBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateBucketRequest) ProtoMessage() {} - -func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateBucketRequest.ProtoReflect.Descriptor instead. -func (*UpdateBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{6} -} - -func (x *UpdateBucketRequest) GetBucket() *Bucket { - if x != nil { - return x.Bucket - } - return nil -} - -func (x *UpdateBucketRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *UpdateBucketRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *UpdateBucketRequest) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { - if x != nil { - return x.PredefinedDefaultObjectAcl - } - return "" -} - -func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Request message for DeleteNotification. -type DeleteNotificationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the notification. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteNotificationRequest) Reset() { - *x = DeleteNotificationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNotificationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNotificationRequest) ProtoMessage() {} - -func (x *DeleteNotificationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNotificationRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} -} - -func (x *DeleteNotificationRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for GetNotification. -type GetNotificationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the notification. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNotificationRequest) Reset() { - *x = GetNotificationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationRequest) ProtoMessage() {} - -func (x *GetNotificationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} -} - -func (x *GetNotificationRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for CreateNotification. -type CreateNotificationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The bucket to which this notification belongs. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the notification to be inserted. - Notification *Notification `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` -} - -func (x *CreateNotificationRequest) Reset() { - *x = CreateNotificationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNotificationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNotificationRequest) ProtoMessage() {} - -func (x *CreateNotificationRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNotificationRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} -} - -func (x *CreateNotificationRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateNotificationRequest) GetNotification() *Notification { - if x != nil { - return x.Notification - } - return nil -} - -// Request message for ListNotifications. -type ListNotificationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a Google Cloud Storage bucket. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of notifications to return. The service may return fewer - // than this value. - // The default value is 100. Specifying a value above 100 will result in a - // page_size of 100. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A page token, received from a previous `ListNotifications` call. - // Provide this to retrieve the subsequent page. - // - // When paginating, all other parameters provided to `ListNotifications` must - // match the call that provided the page token. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListNotificationsRequest) Reset() { - *x = ListNotificationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationsRequest) ProtoMessage() {} - -func (x *ListNotificationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} -} - -func (x *ListNotificationsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListNotificationsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListNotificationsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The result of a call to Notifications.ListNotifications -type ListNotificationsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` - // A token, which can be sent as `page_token` to retrieve the next page. - // If this field is omitted, there are no subsequent pages. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListNotificationsResponse) Reset() { - *x = ListNotificationsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationsResponse) ProtoMessage() {} - -func (x *ListNotificationsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} -} - -func (x *ListNotificationsResponse) GetNotifications() []*Notification { - if x != nil { - return x.Notifications - } - return nil -} - -func (x *ListNotificationsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request message for ComposeObject. -type ComposeObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Properties of the resulting object. - Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` - // The list of source objects that will be concatenated into a single object. - SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"` - // Apply a predefined set of access controls to the destination object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Resource name of the Cloud KMS key, of the form - // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`, - // that will be used to encrypt the object. Overrides the object - // metadata's `kms_key_name` value, if any. - KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be validated against the - // combined checksums of the component objects. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` -} - -func (x *ComposeObjectRequest) Reset() { - *x = ComposeObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ComposeObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ComposeObjectRequest) ProtoMessage() {} - -func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. -func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} -} - -func (x *ComposeObjectRequest) GetDestination() *Object { - if x != nil { - return x.Destination - } - return nil -} - -func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject { - if x != nil { - return x.SourceObjects - } - return nil -} - -func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string { - if x != nil { - return x.DestinationPredefinedAcl - } - return "" -} - -func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *ComposeObjectRequest) GetKmsKey() string { - if x != nil { - return x.KmsKey - } - return "" -} - -func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -// Message for deleting an object. -// `bucket` and `object` **must** be set. -type DeleteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which the object resides. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The name of the finalized object to delete. - // Note: If you want to delete an unfinalized resumable upload please use - // `CancelResumableWrite`. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // If present, permanently deletes a specific revision of this object (as - // opposed to the latest version, the default). - Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *DeleteObjectRequest) Reset() { - *x = DeleteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteObjectRequest) ProtoMessage() {} - -func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. -func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} -} - -func (x *DeleteObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *DeleteObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *DeleteObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Message for canceling an in-progress resumable upload. -// `upload_id` **must** be set. -type CancelResumableWriteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The upload_id of the resumable upload to cancel. This should be - // copied from the `upload_id` field of `StartResumableWriteResponse`. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` -} - -func (x *CancelResumableWriteRequest) Reset() { - *x = CancelResumableWriteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelResumableWriteRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelResumableWriteRequest) ProtoMessage() {} - -func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. -func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} -} - -func (x *CancelResumableWriteRequest) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - -// Empty response message for canceling an in-progress resumable upload, will be -// extended as needed. -type CancelResumableWriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *CancelResumableWriteResponse) Reset() { - *x = CancelResumableWriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelResumableWriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelResumableWriteResponse) ProtoMessage() {} - -func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. -func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} -} - -// Request message for ReadObject. -type ReadObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the bucket containing the object to read. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The name of the object to read. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // If present, selects a specific revision of this object (as opposed - // to the latest version, the default). - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // The offset for the first byte to return in the read, relative to the start - // of the object. - // - // A negative `read_offset` value will be interpreted as the number of bytes - // back from the end of the object to be returned. For example, if an object's - // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and - // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting - // a negative offset with magnitude larger than the size of the object will - // return the entire object. - ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"` - // The maximum number of `data` bytes the server is allowed to return in the - // sum of all `Object` messages. A `read_limit` of zero indicates that there - // is no limit, and a negative `read_limit` will cause an error. - // - // If the stream returns fewer bytes than allowed by the `read_limit` and no - // error occurred, the stream includes all data from the `read_offset` to the - // end of the resource. - ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // Mask specifying which fields to read. - // The checksummed_data field and its children will always be present. - // If no mask is specified, will default to all fields except metadata.owner - // and metadata.acl. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *ReadObjectRequest) Reset() { - *x = ReadObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadObjectRequest) ProtoMessage() {} - -func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. -func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} -} - -func (x *ReadObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *ReadObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *ReadObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *ReadObjectRequest) GetReadOffset() int64 { - if x != nil { - return x.ReadOffset - } - return 0 -} - -func (x *ReadObjectRequest) GetReadLimit() int64 { - if x != nil { - return x.ReadLimit - } - return 0 -} - -func (x *ReadObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// Request message for GetObject. -type GetObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which the object resides. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. Name of the object. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // Mask specifying which fields to read. - // If no mask is specified, will default to all fields except metadata.acl and - // metadata.owner. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *GetObjectRequest) Reset() { - *x = GetObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetObjectRequest) ProtoMessage() {} - -func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. -func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} -} - -func (x *GetObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *GetObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *GetObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *GetObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// Response message for ReadObject. -type ReadObjectResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A portion of the data for the object. The service **may** leave `data` - // empty for any given `ReadResponse`. This enables the service to inform the - // client that the request is still live while it is running an operation to - // generate more data. - ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` - // The checksums of the complete object. The client should compute one of - // these checksums over the downloaded object and compare it against the value - // provided here. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` - // If read_offset and or read_limit was specified on the - // ReadObjectRequest, ContentRange will be populated on the first - // ReadObjectResponse message of the read stream. - ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"` - // Metadata of the object whose media is being returned. - // Only populated in the first response in the stream. - Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *ReadObjectResponse) Reset() { - *x = ReadObjectResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadObjectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadObjectResponse) ProtoMessage() {} - -func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. -func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} -} - -func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { - if x != nil { - return x.ChecksummedData - } - return nil -} - -func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -func (x *ReadObjectResponse) GetContentRange() *ContentRange { - if x != nil { - return x.ContentRange - } - return nil -} - -func (x *ReadObjectResponse) GetMetadata() *Object { - if x != nil { - return x.Metadata - } - return nil -} - -// Describes an attempt to insert an object, possibly over multiple requests. -type WriteObjectSpec struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Destination object, including its name and its metadata. - Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // Apply a predefined set of access controls to this object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Makes the operation conditional on whether the object's current - // generation matches the given value. Setting to 0 makes the operation - // succeed only if there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live - // generation does not match the given value. If no live object exists, the - // precondition fails. Setting to 0 makes the operation succeed only if - // there is a live version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // The expected final object size being uploaded. - // If this value is set, closing the stream after writing fewer or more than - // `object_size` bytes will result in an OUT_OF_RANGE error. - // - // This situation is considered a client error, and if such an error occurs - // you must start the upload over from scratch, this time sending the correct - // number of bytes. - // - // The `object_size` value is ignored for one-shot (non-resumable) writes. - ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` -} - -func (x *WriteObjectSpec) Reset() { - *x = WriteObjectSpec{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteObjectSpec) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteObjectSpec) ProtoMessage() {} - -func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. -func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} -} - -func (x *WriteObjectSpec) GetResource() *Object { - if x != nil { - return x.Resource - } - return nil -} - -func (x *WriteObjectSpec) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *WriteObjectSpec) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetObjectSize() int64 { - if x != nil && x.ObjectSize != nil { - return *x.ObjectSize - } - return 0 -} - -// Request message for WriteObject. -type WriteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The first message of each stream should set one of the following. - // - // Types that are assignable to FirstMessage: - // - // *WriteObjectRequest_UploadId - // *WriteObjectRequest_WriteObjectSpec - FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` - // Required. The offset from the beginning of the object at which the data - // should be written. - // - // In the first `WriteObjectRequest` of a `WriteObject()` action, it - // indicates the initial offset for the `Write()` call. The value **must** be - // equal to the `persisted_size` that a call to `QueryWriteStatus()` would - // return (0 if this is the first write to the object). - // - // On subsequent calls, this value **must** be no larger than the sum of the - // first `write_offset` and the sizes of all `data` chunks sent previously on - // this stream. - // - // An incorrect value will cause an error. - WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` - // A portion of the data for the object. - // - // Types that are assignable to Data: - // - // *WriteObjectRequest_ChecksummedData - Data isWriteObjectRequest_Data `protobuf_oneof:"data"` - // Checksums for the complete object. If the checksums computed by the service - // don't match the specifified checksums the call will fail. May only be - // provided in the first or last request (either with first_message, or - // finish_write set). - ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` - // If `true`, this indicates that the write is complete. Sending any - // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` - // will cause an error. - // For a non-resumable write (where the upload_id was not set in the first - // message), it is an error not to set this field in the final message of the - // stream. - FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *WriteObjectRequest) Reset() { - *x = WriteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteObjectRequest) ProtoMessage() {} - -func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. -func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} -} - -func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { - if m != nil { - return m.FirstMessage - } - return nil -} - -func (x *WriteObjectRequest) GetUploadId() string { - if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok { - return x.UploadId - } - return "" -} - -func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { - if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok { - return x.WriteObjectSpec - } - return nil -} - -func (x *WriteObjectRequest) GetWriteOffset() int64 { - if x != nil { - return x.WriteOffset - } - return 0 -} - -func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data { - if m != nil { - return m.Data - } - return nil -} - -func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData { - if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok { - return x.ChecksummedData - } - return nil -} - -func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -func (x *WriteObjectRequest) GetFinishWrite() bool { - if x != nil { - return x.FinishWrite - } - return false -} - -func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -type isWriteObjectRequest_FirstMessage interface { - isWriteObjectRequest_FirstMessage() -} - -type WriteObjectRequest_UploadId struct { - // For resumable uploads. This should be the `upload_id` returned from a - // call to `StartResumableWriteResponse`. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` -} - -type WriteObjectRequest_WriteObjectSpec struct { - // For non-resumable uploads. Describes the overall upload, including the - // destination bucket and object name, preconditions, etc. - WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` -} - -func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {} - -func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {} - -type isWriteObjectRequest_Data interface { - isWriteObjectRequest_Data() -} - -type WriteObjectRequest_ChecksummedData struct { - // The data to insert. If a crc32c checksum is provided that doesn't match - // the checksum computed by the service, the request will fail. - ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` -} - -func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {} - -// Response message for WriteObject. -type WriteObjectResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The response will set one of the following. - // - // Types that are assignable to WriteStatus: - // - // *WriteObjectResponse_PersistedSize - // *WriteObjectResponse_Resource - WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` -} - -func (x *WriteObjectResponse) Reset() { - *x = WriteObjectResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteObjectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteObjectResponse) ProtoMessage() {} - -func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. -func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} -} - -func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { - if m != nil { - return m.WriteStatus - } - return nil -} - -func (x *WriteObjectResponse) GetPersistedSize() int64 { - if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok { - return x.PersistedSize - } - return 0 -} - -func (x *WriteObjectResponse) GetResource() *Object { - if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok { - return x.Resource - } - return nil -} - -type isWriteObjectResponse_WriteStatus interface { - isWriteObjectResponse_WriteStatus() -} - -type WriteObjectResponse_PersistedSize struct { - // The total number of bytes that have been processed for the given object - // from all `WriteObject` calls. Only set if the upload has not finalized. - PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` -} - -type WriteObjectResponse_Resource struct { - // A resource containing the metadata for the uploaded object. Only set if - // the upload has finalized. - Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` -} - -func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} - -func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} - -// Request message for ListObjects. -type ListObjectsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which to look for objects. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Maximum number of `items` plus `prefixes` to return - // in a single page of responses. As duplicate `prefixes` are - // omitted, fewer total results may be returned than requested. The service - // will use this parameter or 1,000 items, whichever is smaller. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously-returned page token representing part of the larger set of - // results to view. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, returns results in a directory-like mode. `items` will contain - // only objects whose names, aside from the `prefix`, do not - // contain `delimiter`. Objects whose names, aside from the - // `prefix`, contain `delimiter` will have their name, - // truncated after the `delimiter`, returned in - // `prefixes`. Duplicate `prefixes` are omitted. - Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` - // If true, objects that end in exactly one instance of `delimiter` - // will have their metadata included in `items` in addition to - // `prefixes`. - IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` - // Filter results to objects whose names begin with this prefix. - Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` - // If `true`, lists all versions of an object as distinct results. - // For more information, see - // [Object - // Versioning](https://cloud.google.com/storage/docs/object-versioning). - Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` - // Mask specifying which fields to read from each result. - // If no mask is specified, will default to all fields except items.acl and - // items.owner. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` - // Optional. Filter results to objects whose names are lexicographically equal - // to or after lexicographic_start. If lexicographic_end is also set, the - // objects listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` - // Optional. Filter results to objects whose names are lexicographically - // before lexicographic_end. If lexicographic_start is also set, the objects - // listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` -} - -func (x *ListObjectsRequest) Reset() { - *x = ListObjectsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListObjectsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListObjectsRequest) ProtoMessage() {} - -func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. -func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} -} - -func (x *ListObjectsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListObjectsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListObjectsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListObjectsRequest) GetDelimiter() string { - if x != nil { - return x.Delimiter - } - return "" -} - -func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool { - if x != nil { - return x.IncludeTrailingDelimiter - } - return false -} - -func (x *ListObjectsRequest) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *ListObjectsRequest) GetVersions() bool { - if x != nil { - return x.Versions - } - return false -} - -func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -func (x *ListObjectsRequest) GetLexicographicStart() string { - if x != nil { - return x.LexicographicStart - } - return "" -} - -func (x *ListObjectsRequest) GetLexicographicEnd() string { - if x != nil { - return x.LexicographicEnd - } - return "" -} - -// Request object for `QueryWriteStatus`. -type QueryWriteStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the resume token for the object whose write status is - // being requested. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *QueryWriteStatusRequest) Reset() { - *x = QueryWriteStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryWriteStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryWriteStatusRequest) ProtoMessage() {} - -func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. -func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} -} - -func (x *QueryWriteStatusRequest) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - -func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Response object for `QueryWriteStatus`. -type QueryWriteStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The response will set one of the following. - // - // Types that are assignable to WriteStatus: - // - // *QueryWriteStatusResponse_PersistedSize - // *QueryWriteStatusResponse_Resource - WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` -} - -func (x *QueryWriteStatusResponse) Reset() { - *x = QueryWriteStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryWriteStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryWriteStatusResponse) ProtoMessage() {} - -func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. -func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} -} - -func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { - if m != nil { - return m.WriteStatus - } - return nil -} - -func (x *QueryWriteStatusResponse) GetPersistedSize() int64 { - if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok { - return x.PersistedSize - } - return 0 -} - -func (x *QueryWriteStatusResponse) GetResource() *Object { - if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok { - return x.Resource - } - return nil -} - -type isQueryWriteStatusResponse_WriteStatus interface { - isQueryWriteStatusResponse_WriteStatus() -} - -type QueryWriteStatusResponse_PersistedSize struct { - // The total number of bytes that have been processed for the given object - // from all `WriteObject` calls. This is the correct value for the - // 'write_offset' field to use when resuming the `WriteObject` operation. - // Only set if the upload has not finalized. - PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` -} - -type QueryWriteStatusResponse_Resource struct { - // A resource containing the metadata for the uploaded object. Only set if - // the upload has finalized. - Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` -} - -func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {} - -func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {} - -// Request message for RewriteObject. -// If the source object is encrypted using a Customer-Supplied Encryption Key -// the key information must be provided in the copy_source_encryption_algorithm, -// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes -// fields. If the destination object should be encrypted the keying information -// should be provided in the encryption_algorithm, encryption_key_bytes, and -// encryption_key_sha256_bytes fields of the -// common_object_request_params.customer_encryption field. -type RewriteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Immutable. The name of the destination object. - // See the - // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming). - // Example: `test.txt` - // The `name` field by itself does not uniquely identify a Cloud Storage - // object. A Cloud Storage object is uniquely identified by the tuple of - // (bucket, object, generation). - DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` - // Required. Immutable. The name of the bucket containing the destination - // object. - DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` - // The name of the Cloud KMS key that will be used to encrypt the destination - // object. The Cloud KMS key must be located in same location as the object. - // If the parameter is not specified, the request uses the destination - // bucket's default encryption key, if any, or else the Google-managed - // encryption key. - DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"` - // Properties of the destination, post-rewrite object. - // The `name`, `bucket` and `kms_key` fields must not be populated (these - // values are specified in the `destination_name`, `destination_bucket`, and - // `destination_kms_key` fields). - // If `destination` is present it will be used to construct the destination - // object's metadata; otherwise the destination object's metadata will be - // copied from the source object. - Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` - // Required. Name of the bucket in which to find the source object. - SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"` - // Required. Name of the source object. - SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"` - // If present, selects a specific revision of the source object (as opposed to - // the latest version, the default). - SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"` - // Include this field (from the previous rewrite response) on each rewrite - // request after the first one, until the rewrite response 'done' flag is - // true. Calls that provide a rewriteToken can omit all other request fields, - // but if included those fields must match the values provided in the first - // rewrite request. - RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` - // Apply a predefined set of access controls to the destination object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the destination object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the destination object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Makes the operation conditional on whether the source object's live - // generation matches the given value. - IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"` - // Makes the operation conditional on whether the source object's live - // generation does not match the given value. - IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"` - // Makes the operation conditional on whether the source object's current - // metageneration matches the given value. - IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"` - // Makes the operation conditional on whether the source object's current - // metageneration does not match the given value. - IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"` - // The maximum number of bytes that will be rewritten per rewrite request. - // Most callers - // shouldn't need to specify this parameter - it is primarily in place to - // support testing. If specified the value must be an integral multiple of - // 1 MiB (1048576). Also, this only applies to requests where the source and - // destination span locations and/or storage classes. Finally, this value must - // not change across rewrite calls else you'll get an error that the - // `rewriteToken` is invalid. - MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"` - // The algorithm used to encrypt the source object, if any. Used if the source - // object was encrypted with a Customer-Supplied Encryption Key. - CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"` - // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt - // the source object, if it was encrypted with a Customer-Supplied Encryption - // Key. - CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"` - // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used - // to encrypt the source object, if it was encrypted with a Customer-Supplied - // Encryption Key. - CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be used to validate the - // destination object after rewriting. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` -} - -func (x *RewriteObjectRequest) Reset() { - *x = RewriteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RewriteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RewriteObjectRequest) ProtoMessage() {} - -func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. -func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} -} - -func (x *RewriteObjectRequest) GetDestinationName() string { - if x != nil { - return x.DestinationName - } - return "" -} - -func (x *RewriteObjectRequest) GetDestinationBucket() string { - if x != nil { - return x.DestinationBucket - } - return "" -} - -func (x *RewriteObjectRequest) GetDestinationKmsKey() string { - if x != nil { - return x.DestinationKmsKey - } - return "" -} - -func (x *RewriteObjectRequest) GetDestination() *Object { - if x != nil { - return x.Destination - } - return nil -} - -func (x *RewriteObjectRequest) GetSourceBucket() string { - if x != nil { - return x.SourceBucket - } - return "" -} - -func (x *RewriteObjectRequest) GetSourceObject() string { - if x != nil { - return x.SourceObject - } - return "" -} - -func (x *RewriteObjectRequest) GetSourceGeneration() int64 { - if x != nil { - return x.SourceGeneration - } - return 0 -} - -func (x *RewriteObjectRequest) GetRewriteToken() string { - if x != nil { - return x.RewriteToken - } - return "" -} - -func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string { - if x != nil { - return x.DestinationPredefinedAcl - } - return "" -} - -func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 { - if x != nil && x.IfSourceGenerationMatch != nil { - return *x.IfSourceGenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 { - if x != nil && x.IfSourceGenerationNotMatch != nil { - return *x.IfSourceGenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 { - if x != nil && x.IfSourceMetagenerationMatch != nil { - return *x.IfSourceMetagenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 { - if x != nil && x.IfSourceMetagenerationNotMatch != nil { - return *x.IfSourceMetagenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 { - if x != nil { - return x.MaxBytesRewrittenPerCall - } - return 0 -} - -func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string { - if x != nil { - return x.CopySourceEncryptionAlgorithm - } - return "" -} - -func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte { - if x != nil { - return x.CopySourceEncryptionKeyBytes - } - return nil -} - -func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte { - if x != nil { - return x.CopySourceEncryptionKeySha256Bytes - } - return nil -} - -func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -// A rewrite response. -type RewriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The total bytes written so far, which can be used to provide a waiting user - // with a progress indicator. This property is always present in the response. - TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"` - // The total size of the object being copied in bytes. This property is always - // present in the response. - ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"` - // `true` if the copy is finished; otherwise, `false` if - // the copy is in progress. This property is always present in the response. - Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` - // A token to use in subsequent requests to continue copying data. This token - // is present in the response only when there is more data to copy. - RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` - // A resource containing the metadata for the copied-to object. This property - // is present in the response only when copying completes. - Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` -} - -func (x *RewriteResponse) Reset() { - *x = RewriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RewriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RewriteResponse) ProtoMessage() {} - -func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. -func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} -} - -func (x *RewriteResponse) GetTotalBytesRewritten() int64 { - if x != nil { - return x.TotalBytesRewritten - } - return 0 -} - -func (x *RewriteResponse) GetObjectSize() int64 { - if x != nil { - return x.ObjectSize - } - return 0 -} - -func (x *RewriteResponse) GetDone() bool { - if x != nil { - return x.Done - } - return false -} - -func (x *RewriteResponse) GetRewriteToken() string { - if x != nil { - return x.RewriteToken - } - return "" -} - -func (x *RewriteResponse) GetResource() *Object { - if x != nil { - return x.Resource - } - return nil -} - -// Request message StartResumableWrite. -type StartResumableWriteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The destination bucket, object, and metadata, as well as any - // preconditions. - WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be used to validate the - // uploaded object. For each upload, object_checksums can be provided with - // either StartResumableWriteRequest or the WriteObjectRequest with - // finish_write set to `true`. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` -} - -func (x *StartResumableWriteRequest) Reset() { - *x = StartResumableWriteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartResumableWriteRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResumableWriteRequest) ProtoMessage() {} - -func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. -func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} -} - -func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { - if x != nil { - return x.WriteObjectSpec - } - return nil -} - -func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -// Response object for `StartResumableWrite`. -type StartResumableWriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The upload_id of the newly started resumable write operation. This - // value should be copied into the `WriteObjectRequest.upload_id` field. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` -} - -func (x *StartResumableWriteResponse) Reset() { - *x = StartResumableWriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartResumableWriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResumableWriteResponse) ProtoMessage() {} - -func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. -func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} -} - -func (x *StartResumableWriteResponse) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - -// Request message for UpdateObject. -type UpdateObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The object to update. - // The object's bucket and name fields are used to identify the object to - // update. If present, the object's generation field selects a specific - // revision of this object whose metadata should be updated. Otherwise, - // assumes the live version of the object. - Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Apply a predefined set of access controls to this object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Required. List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *UpdateObjectRequest) Reset() { - *x = UpdateObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateObjectRequest) ProtoMessage() {} - -func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. -func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} -} - -func (x *UpdateObjectRequest) GetObject() *Object { - if x != nil { - return x.Object - } - return nil -} - -func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Request message for GetServiceAccount. -type GetServiceAccountRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Project ID, in the format of "projects/". - // can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetServiceAccountRequest) Reset() { - *x = GetServiceAccountRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServiceAccountRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServiceAccountRequest) ProtoMessage() {} - -func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. -func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} -} - -func (x *GetServiceAccountRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request message for CreateHmacKey. -type CreateHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project that the HMAC-owning service account lives in, in the - // format of "projects/". can be the - // project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Required. The service account to create the HMAC for. - ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` -} - -func (x *CreateHmacKeyRequest) Reset() { - *x = CreateHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateHmacKeyRequest) ProtoMessage() {} - -func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} -} - -func (x *CreateHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -// Create hmac response. The only time the secret for an HMAC will be returned. -type CreateHmacKeyResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key metadata. - Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // HMAC key secret material. - // In raw bytes format (not base64-encoded). - SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` -} - -func (x *CreateHmacKeyResponse) Reset() { - *x = CreateHmacKeyResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateHmacKeyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateHmacKeyResponse) ProtoMessage() {} - -func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} -} - -func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { - if x != nil { - return x.SecretKeyBytes - } - return nil -} - -// Request object to delete a given HMAC key. -type DeleteHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project that owns the HMAC key, in the format of - // "projects/". - // can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *DeleteHmacKeyRequest) Reset() { - *x = DeleteHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteHmacKeyRequest) ProtoMessage() {} - -func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} -} - -func (x *DeleteHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *DeleteHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request object to get metadata on a given HMAC key. -type GetHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project the HMAC key lies in, in the format of - // "projects/". - // can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetHmacKeyRequest) Reset() { - *x = GetHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetHmacKeyRequest) ProtoMessage() {} - -func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} -} - -func (x *GetHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *GetHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request to fetch a list of HMAC keys under a given project. -type ListHmacKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project to list HMAC keys for, in the format of - // "projects/". - // can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // The maximum number of keys to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously returned token from ListHmacKeysResponse to get the next page. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, filters to only return HMAC keys for specified service account. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // If set, return deleted keys that have not yet been wiped out. - ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` -} - -func (x *ListHmacKeysRequest) Reset() { - *x = ListHmacKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysRequest) ProtoMessage() {} - -func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. -func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} -} - -func (x *ListHmacKeysRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListHmacKeysRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListHmacKeysRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { - if x != nil { - return x.ShowDeletedKeys - } - return false -} - -// Hmac key list response with next page information. -type ListHmacKeysResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListHmacKeysResponse) Reset() { - *x = ListHmacKeysResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysResponse) ProtoMessage() {} - -func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. -func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} -} - -func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { - if x != nil { - return x.HmacKeys - } - return nil -} - -func (x *ListHmacKeysResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request object to update an HMAC key state. -// HmacKeyMetadata.state is required and the only writable field in -// UpdateHmacKey operation. Specifying fields other than state will result in an -// error. -type UpdateHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The HMAC key to update. - // If present, the hmac_key's `id` field will be used to identify the key. - // Otherwise, the hmac_key's access_id and project fields will be used to - // identify the key. - HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` - // Update mask for hmac_key. - // Not specifying any fields will mean only the `state` field is updated to - // the value specified in `hmac_key`. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateHmacKeyRequest) Reset() { - *x = UpdateHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateHmacKeyRequest) ProtoMessage() {} - -func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} -} - -func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { - if x != nil { - return x.HmacKey - } - return nil -} - -func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Parameters that can be passed to any object request. -type CommonObjectRequestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Encryption algorithm used with the Customer-Supplied Encryption Keys - // feature. - EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` - // Encryption key used with the Customer-Supplied Encryption Keys feature. - // In raw bytes format (not base64-encoded). - EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` - // SHA256 hash of encryption key used with the Customer-Supplied Encryption - // Keys feature. - EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` -} - -func (x *CommonObjectRequestParams) Reset() { - *x = CommonObjectRequestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CommonObjectRequestParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CommonObjectRequestParams) ProtoMessage() {} - -func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. -func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} -} - -func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { - if x != nil { - return x.EncryptionAlgorithm - } - return "" -} - -func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { - if x != nil { - return x.EncryptionKeyBytes - } - return nil -} - -func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { - if x != nil { - return x.EncryptionKeySha256Bytes - } - return nil -} - -// Shared constants. -type ServiceConstants struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ServiceConstants) Reset() { - *x = ServiceConstants{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConstants) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConstants) ProtoMessage() {} - -func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. -func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} -} - -// A bucket. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. The name of the bucket. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Output only. The user-chosen part of the bucket name. The `{bucket}` - // portion of the `name` field. For globally unique buckets, this is equal to - // the "bucket name" of other Cloud Storage APIs. Example: "pub". - BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` - // The etag of the bucket. - // If included in the metadata of an UpdateBucketRequest, the operation will - // only be performed if the etag matches that of the bucket. - Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The project which owns this bucket, in the format of - // "projects/". - // can be the project ID or project number. - Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Output only. The metadata generation of this bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` - // Immutable. The location of the bucket. Object data for objects in the - // bucket resides in physical storage within this region. Defaults to `US`. - // See the - // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's - // guide] for the authoritative list. Attempting to update this field after - // the bucket is created will result in an error. - Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"` - // Output only. The location type of the bucket (region, dual-region, - // multi-region, etc). - LocationType string `protobuf:"bytes,6,opt,name=location_type,json=locationType,proto3" json:"location_type,omitempty"` - // The bucket's default storage class, used whenever no storageClass is - // specified for a newly-created object. This defines how objects in the - // bucket are stored and determines the SLA and the cost of storage. - // If this value is not specified when the bucket is created, it will default - // to `STANDARD`. For more information, see - // https://developers.google.com/storage/docs/storage-classes. - StorageClass string `protobuf:"bytes,7,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` - // The recovery point objective for cross-region replication of the bucket. - // Applicable only for dual- and multi-region buckets. "DEFAULT" uses default - // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region - // buckets only. If rpo is not specified when the bucket is created, it - // defaults to "DEFAULT". For more information, see - // https://cloud.google.com/storage/docs/turbo-replication. - Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` - // Access controls on the bucket. - // If iam_config.uniform_bucket_level_access is enabled on this bucket, - // requests to set, read, or modify acl is an error. - Acl []*BucketAccessControl `protobuf:"bytes,8,rep,name=acl,proto3" json:"acl,omitempty"` - // Default access controls to apply to new objects when no ACL is provided. - // If iam_config.uniform_bucket_level_access is enabled on this bucket, - // requests to set, read, or modify acl is an error. - DefaultObjectAcl []*ObjectAccessControl `protobuf:"bytes,9,rep,name=default_object_acl,json=defaultObjectAcl,proto3" json:"default_object_acl,omitempty"` - // The bucket's lifecycle config. See - // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] - // for more information. - Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` - // Output only. The creation time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] - // (CORS) config. - Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` - // Output only. The modification time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // The default value for event-based hold on newly created objects in this - // bucket. Event-based hold is a way to retain objects indefinitely until an - // event occurs, signified by the - // hold's release. After being released, such objects will be subject to - // bucket-level retention (if any). One sample use case of this flag is for - // banks to hold loan documents for at least 3 years after loan is paid in - // full. Here, bucket-level retention is 3 years and the event is loan being - // paid in full. In this example, these objects will be held intact for any - // number of years until the event has occurred (event-based hold on the - // object is released) and then 3 more years after that. That means retention - // duration of the objects begins from the moment event-based hold - // transitioned from true to false. Objects under event-based hold cannot be - // deleted, overwritten or archived until the hold is removed. - DefaultEventBasedHold bool `protobuf:"varint,14,opt,name=default_event_based_hold,json=defaultEventBasedHold,proto3" json:"default_event_based_hold,omitempty"` - // User-provided labels, in key/value pairs. - Labels map[string]string `protobuf:"bytes,15,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The bucket's website config, controlling how the service behaves - // when accessing bucket contents as a web site. See the - // [https://cloud.google.com/storage/docs/static-website][Static Website - // Examples] for more information. - Website *Bucket_Website `protobuf:"bytes,16,opt,name=website,proto3" json:"website,omitempty"` - // The bucket's versioning config. - Versioning *Bucket_Versioning `protobuf:"bytes,17,opt,name=versioning,proto3" json:"versioning,omitempty"` - // The bucket's logging config, which defines the destination bucket - // and name prefix (if any) for the current bucket's logs. - Logging *Bucket_Logging `protobuf:"bytes,18,opt,name=logging,proto3" json:"logging,omitempty"` - // Output only. The owner of the bucket. This is always the project team's - // owner group. - Owner *Owner `protobuf:"bytes,19,opt,name=owner,proto3" json:"owner,omitempty"` - // Encryption config for a bucket. - Encryption *Bucket_Encryption `protobuf:"bytes,20,opt,name=encryption,proto3" json:"encryption,omitempty"` - // The bucket's billing config. - Billing *Bucket_Billing `protobuf:"bytes,21,opt,name=billing,proto3" json:"billing,omitempty"` - // The bucket's retention policy. The retention policy enforces a minimum - // retention time for all objects contained in the bucket, based on their - // creation time. Any attempt to overwrite or delete objects younger than the - // retention period will result in a PERMISSION_DENIED error. An unlocked - // retention policy can be modified or removed from the bucket via a - // storage.buckets.update operation. A locked retention policy cannot be - // removed or shortened in duration for the lifetime of the bucket. - // Attempting to remove or decrease period of a locked retention policy will - // result in a PERMISSION_DENIED error. - RetentionPolicy *Bucket_RetentionPolicy `protobuf:"bytes,22,opt,name=retention_policy,json=retentionPolicy,proto3" json:"retention_policy,omitempty"` - // The bucket's IAM config. - IamConfig *Bucket_IamConfig `protobuf:"bytes,23,opt,name=iam_config,json=iamConfig,proto3" json:"iam_config,omitempty"` - // Reserved for future use. - SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` - // Configuration that, if present, specifies the data placement for a - // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. - CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` - // The bucket's Autoclass configuration. If there is no configuration, the - // Autoclass feature will be disabled and have no effect on the bucket. - Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` -} - -func (x *Bucket) Reset() { - *x = Bucket{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket) ProtoMessage() {} - -func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. -func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} -} - -func (x *Bucket) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Bucket) GetBucketId() string { - if x != nil { - return x.BucketId - } - return "" -} - -func (x *Bucket) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *Bucket) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *Bucket) GetMetageneration() int64 { - if x != nil { - return x.Metageneration - } - return 0 -} - -func (x *Bucket) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *Bucket) GetLocationType() string { - if x != nil { - return x.LocationType - } - return "" -} - -func (x *Bucket) GetStorageClass() string { - if x != nil { - return x.StorageClass - } - return "" -} - -func (x *Bucket) GetRpo() string { - if x != nil { - return x.Rpo - } - return "" -} - -func (x *Bucket) GetAcl() []*BucketAccessControl { - if x != nil { - return x.Acl - } - return nil -} - -func (x *Bucket) GetDefaultObjectAcl() []*ObjectAccessControl { - if x != nil { - return x.DefaultObjectAcl - } - return nil -} - -func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { - if x != nil { - return x.Lifecycle - } - return nil -} - -func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *Bucket) GetCors() []*Bucket_Cors { - if x != nil { - return x.Cors - } - return nil -} - -func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *Bucket) GetDefaultEventBasedHold() bool { - if x != nil { - return x.DefaultEventBasedHold - } - return false -} - -func (x *Bucket) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Bucket) GetWebsite() *Bucket_Website { - if x != nil { - return x.Website - } - return nil -} - -func (x *Bucket) GetVersioning() *Bucket_Versioning { - if x != nil { - return x.Versioning - } - return nil -} - -func (x *Bucket) GetLogging() *Bucket_Logging { - if x != nil { - return x.Logging - } - return nil -} - -func (x *Bucket) GetOwner() *Owner { - if x != nil { - return x.Owner - } - return nil -} - -func (x *Bucket) GetEncryption() *Bucket_Encryption { - if x != nil { - return x.Encryption - } - return nil -} - -func (x *Bucket) GetBilling() *Bucket_Billing { - if x != nil { - return x.Billing - } - return nil -} - -func (x *Bucket) GetRetentionPolicy() *Bucket_RetentionPolicy { - if x != nil { - return x.RetentionPolicy - } - return nil -} - -func (x *Bucket) GetIamConfig() *Bucket_IamConfig { - if x != nil { - return x.IamConfig - } - return nil -} - -func (x *Bucket) GetSatisfiesPzs() bool { - if x != nil { - return x.SatisfiesPzs - } - return false -} - -func (x *Bucket) GetCustomPlacementConfig() *Bucket_CustomPlacementConfig { - if x != nil { - return x.CustomPlacementConfig - } - return nil -} - -func (x *Bucket) GetAutoclass() *Bucket_Autoclass { - if x != nil { - return x.Autoclass - } - return nil -} - -// An access-control entry. -type BucketAccessControl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The access permission for the entity. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // The ID of the access-control entry. - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - // The entity holding the permission, in one of the following forms: - // * `user-{userid}` - // * `user-{email}` - // * `group-{groupid}` - // * `group-{email}` - // * `domain-{domain}` - // * `project-{team}-{projectnumber}` - // * `project-{team}-{projectid}` - // * `allUsers` - // * `allAuthenticatedUsers` - // Examples: - // * The user `liz@example.com` would be `user-liz@example.com`. - // * The group `example@googlegroups.com` would be - // `group-example@googlegroups.com` - // * All members of the Google Apps for Business domain `example.com` would be - // `domain-example.com` - // For project entities, `project-{team}-{projectnumber}` format will be - // returned on response. - Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` - // Output only. The alternative entity format, if exists. For project - // entities, `project-{team}-{projectid}` format will be returned on response. - EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` - // The ID for the entity, if any. - EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` - // The etag of the BucketAccessControl. - // If included in the metadata of an update or delete request message, the - // operation operation will only be performed if the etag matches that of the - // bucket's BucketAccessControl. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` - // The email address associated with the entity, if any. - Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` - // The domain associated with the entity, if any. - Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` - // The project team associated with the entity, if any. - ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` -} - -func (x *BucketAccessControl) Reset() { - *x = BucketAccessControl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BucketAccessControl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BucketAccessControl) ProtoMessage() {} - -func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. -func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} -} - -func (x *BucketAccessControl) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *BucketAccessControl) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *BucketAccessControl) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *BucketAccessControl) GetEntityAlt() string { - if x != nil { - return x.EntityAlt - } - return "" -} - -func (x *BucketAccessControl) GetEntityId() string { - if x != nil { - return x.EntityId - } - return "" -} - -func (x *BucketAccessControl) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *BucketAccessControl) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *BucketAccessControl) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *BucketAccessControl) GetProjectTeam() *ProjectTeam { - if x != nil { - return x.ProjectTeam - } - return nil -} - -// Message used to convey content being read or written, along with an optional -// checksum. -type ChecksummedData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The data. - Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` - // If set, the CRC32C digest of the content field. - Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` -} - -func (x *ChecksummedData) Reset() { - *x = ChecksummedData{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ChecksummedData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChecksummedData) ProtoMessage() {} - -func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. -func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} -} - -func (x *ChecksummedData) GetContent() []byte { - if x != nil { - return x.Content - } - return nil -} - -func (x *ChecksummedData) GetCrc32C() uint32 { - if x != nil && x.Crc32C != nil { - return *x.Crc32C - } - return 0 -} - -// Message used for storing full (not subrange) object checksums. -type ObjectChecksums struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // CRC32C digest of the object data. Computed by the Cloud Storage service for - // all written objects. - // If set in an WriteObjectRequest, service will validate that the stored - // object matches this checksum. - Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` - // 128 bit MD5 hash of the object data. - // For more information about using the MD5 hash, see - // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and - // ETags: Best Practices]. - // Not all objects will provide an MD5 hash. For example, composite objects - // provide only crc32c hashes. - // This value is equivalent to running `cat object.txt | openssl md5 -binary` - Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` -} - -func (x *ObjectChecksums) Reset() { - *x = ObjectChecksums{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectChecksums) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectChecksums) ProtoMessage() {} - -func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. -func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} -} - -func (x *ObjectChecksums) GetCrc32C() uint32 { - if x != nil && x.Crc32C != nil { - return *x.Crc32C - } - return 0 -} - -func (x *ObjectChecksums) GetMd5Hash() []byte { - if x != nil { - return x.Md5Hash - } - return nil -} - -// Hmac Key Metadata, which includes all information other than the secret. -type HmacKeyMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. Resource name ID of the key in the format - // /. - // can be the project ID or project number. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Immutable. Globally unique id for keys. - AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Immutable. Identifies the project that owns the service account of the - // specified HMAC key, in the format "projects/". - // can be the project ID or project number. - Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Output only. Email of the service account the key authenticates as. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // State of the key. One of ACTIVE, INACTIVE, or DELETED. - // Writable, can be updated by UpdateHmacKey operation. - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - // Output only. The creation time of the HMAC key. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. The last modification time of the HMAC key metadata. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // The etag of the HMAC key. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *HmacKeyMetadata) Reset() { - *x = HmacKeyMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HmacKeyMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HmacKeyMetadata) ProtoMessage() {} - -func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. -func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} -} - -func (x *HmacKeyMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *HmacKeyMetadata) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *HmacKeyMetadata) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *HmacKeyMetadata) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *HmacKeyMetadata) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -// A directive to publish Pub/Sub notifications upon changes to a bucket. -type Notification struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The resource name of this notification. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` - // The `{project}` portion may be `_` for globally unique buckets. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The Pub/Sub topic to which this subscription publishes. Formatted - // as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // The etag of the Notification. - // If included in the metadata of GetNotificationRequest, the operation will - // only be performed if the etag matches that of the Notification. - Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // If present, only send notifications about listed event types. If empty, - // sent notifications for all event types. - EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` - // A list of additional attributes to attach to each Pub/Sub - // message published for this notification subscription. - CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If present, only apply this notification config to object names that - // begin with this prefix. - ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` - // Required. The desired content of the Payload. - PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` -} - -func (x *Notification) Reset() { - *x = Notification{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Notification) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Notification) ProtoMessage() {} - -func (x *Notification) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Notification.ProtoReflect.Descriptor instead. -func (*Notification) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} -} - -func (x *Notification) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Notification) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *Notification) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *Notification) GetEventTypes() []string { - if x != nil { - return x.EventTypes - } - return nil -} - -func (x *Notification) GetCustomAttributes() map[string]string { - if x != nil { - return x.CustomAttributes - } - return nil -} - -func (x *Notification) GetObjectNamePrefix() string { - if x != nil { - return x.ObjectNamePrefix - } - return "" -} - -func (x *Notification) GetPayloadFormat() string { - if x != nil { - return x.PayloadFormat - } - return "" -} - -// Describes the Customer-Supplied Encryption Key mechanism used to store an -// Object's data at rest. -type CustomerEncryption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encryption algorithm. - EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` - // SHA256 hash value of the encryption key. - // In raw bytes format (not base64-encoded). - KeySha256Bytes []byte `protobuf:"bytes,3,opt,name=key_sha256_bytes,json=keySha256Bytes,proto3" json:"key_sha256_bytes,omitempty"` -} - -func (x *CustomerEncryption) Reset() { - *x = CustomerEncryption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomerEncryption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomerEncryption) ProtoMessage() {} - -func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. -func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} -} - -func (x *CustomerEncryption) GetEncryptionAlgorithm() string { - if x != nil { - return x.EncryptionAlgorithm - } - return "" -} - -func (x *CustomerEncryption) GetKeySha256Bytes() []byte { - if x != nil { - return x.KeySha256Bytes - } - return nil -} - -// An object. -type Object struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. The name of this object. Nearly any sequence of unicode - // characters is valid. See - // [Guidelines](https://cloud.google.com/storage/docs/objects#naming). - // Example: `test.txt` - // The `name` field by itself does not uniquely identify a Cloud Storage - // object. A Cloud Storage object is uniquely identified by the tuple of - // (bucket, object, generation). - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Immutable. The name of the bucket containing this object. - Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` - // The etag of the object. - // If included in the metadata of an update or delete request message, the - // operation will only be performed if the etag matches that of the live - // object. - Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The content generation of this object. Used for object - // versioning. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // Output only. The version of the metadata for this generation of this - // object. Used for preconditions and for detecting changes in metadata. A - // metageneration number is only meaningful in the context of a particular - // generation of a particular object. Attempting to set or update this field - // will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. - Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` - // Storage class of the object. - StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` - // Output only. Content-Length of the object data in bytes, matching - // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` - // Content-Encoding of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] - ContentEncoding string `protobuf:"bytes,7,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` - // Content-Disposition of the object data, matching - // [https://tools.ietf.org/html/rfc6266][RFC 6266]. - ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` - // Cache-Control directive for the object data, matching - // [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 §5.2]. - // If omitted, and the object is accessible to all anonymous users, the - // default will be `public, max-age=3600`. - CacheControl string `protobuf:"bytes,9,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` - // Access controls on the object. - // If iam_config.uniform_bucket_level_access is enabled on the parent - // bucket, requests to set, read, or modify acl is an error. - Acl []*ObjectAccessControl `protobuf:"bytes,10,rep,name=acl,proto3" json:"acl,omitempty"` - // Content-Language of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. - ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` - // Output only. The deletion time of the object. Will be returned if and only - // if this version of the object has been deleted. Attempting to set or update - // this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` - // Content-Type of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. - // If an object is stored without a Content-Type, it is served as - // `application/octet-stream`. - ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` - // Output only. The creation time of the object. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. Number of underlying components that make up this object. - // Components are accumulated by compose operations. Attempting to set or - // update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` - // Output only. Hashes for the data part of this object. This field is used - // for output only and will be silently ignored if provided in requests. - Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` - // Output only. The modification time of the object metadata. - // Set initially to object creation time and then updated whenever any - // metadata of the object changes. This includes changes made by a requester, - // such as modifying custom metadata, as well as changes made by Cloud Storage - // on behalf of a requester, such as changing the storage class based on an - // Object Lifecycle Configuration. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // Cloud KMS Key used to encrypt this object, if the object is encrypted by - // such a key. - KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` - // Output only. The time at which the object's storage class was last changed. - // When the object is initially created, it will be set to time_created. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` - // Whether an object is under temporary hold. While this flag is set to true, - // the object is protected against deletion and overwrites. A common use case - // of this flag is regulatory investigations where objects need to be retained - // while the investigation is ongoing. Note that unlike event-based hold, - // temporary hold does not impact retention expiration time of an object. - TemporaryHold bool `protobuf:"varint,20,opt,name=temporary_hold,json=temporaryHold,proto3" json:"temporary_hold,omitempty"` - // A server-determined value that specifies the earliest time that the - // object's retention period expires. - // Note 1: This field is not provided for objects with an active event-based - // hold, since retention expiration is unknown until the hold is removed. - // Note 2: This value can be provided even when temporary hold is set (so that - // the user can reason about policy without having to first unset the - // temporary hold). - RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` - // User-provided metadata, in key/value pairs. - Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Whether an object is under event-based hold. - // An event-based hold is a way to force the retention of an object until - // after some event occurs. Once the hold is released by explicitly setting - // this field to false, the object will become subject to any bucket-level - // retention policy, except that the retention duration will be calculated - // from the time the event based hold was lifted, rather than the time the - // object was created. - // - // In a WriteObject request, not setting this field implies that the value - // should be taken from the parent bucket's "default_event_based_hold" field. - // In a response, this field will always be set to true or false. - EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` - // Output only. The owner of the object. This will always be the uploader of - // the object. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` - // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by - // such a key. - CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` - // A user-specified timestamp set on an object. - CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` -} - -func (x *Object) Reset() { - *x = Object{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Object) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Object) ProtoMessage() {} - -func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Object.ProtoReflect.Descriptor instead. -func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} -} - -func (x *Object) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Object) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *Object) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *Object) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *Object) GetMetageneration() int64 { - if x != nil { - return x.Metageneration - } - return 0 -} - -func (x *Object) GetStorageClass() string { - if x != nil { - return x.StorageClass - } - return "" -} - -func (x *Object) GetSize() int64 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *Object) GetContentEncoding() string { - if x != nil { - return x.ContentEncoding - } - return "" -} - -func (x *Object) GetContentDisposition() string { - if x != nil { - return x.ContentDisposition - } - return "" -} - -func (x *Object) GetCacheControl() string { - if x != nil { - return x.CacheControl - } - return "" -} - -func (x *Object) GetAcl() []*ObjectAccessControl { - if x != nil { - return x.Acl - } - return nil -} - -func (x *Object) GetContentLanguage() string { - if x != nil { - return x.ContentLanguage - } - return "" -} - -func (x *Object) GetDeleteTime() *timestamppb.Timestamp { - if x != nil { - return x.DeleteTime - } - return nil -} - -func (x *Object) GetContentType() string { - if x != nil { - return x.ContentType - } - return "" -} - -func (x *Object) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *Object) GetComponentCount() int32 { - if x != nil { - return x.ComponentCount - } - return 0 -} - -func (x *Object) GetChecksums() *ObjectChecksums { - if x != nil { - return x.Checksums - } - return nil -} - -func (x *Object) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *Object) GetKmsKey() string { - if x != nil { - return x.KmsKey - } - return "" -} - -func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateStorageClassTime - } - return nil -} - -func (x *Object) GetTemporaryHold() bool { - if x != nil { - return x.TemporaryHold - } - return false -} - -func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { - if x != nil { - return x.RetentionExpireTime - } - return nil -} - -func (x *Object) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Object) GetEventBasedHold() bool { - if x != nil && x.EventBasedHold != nil { - return *x.EventBasedHold - } - return false -} - -func (x *Object) GetOwner() *Owner { - if x != nil { - return x.Owner - } - return nil -} - -func (x *Object) GetCustomerEncryption() *CustomerEncryption { - if x != nil { - return x.CustomerEncryption - } - return nil -} - -func (x *Object) GetCustomTime() *timestamppb.Timestamp { - if x != nil { - return x.CustomTime - } - return nil -} - -// An access-control entry. -type ObjectAccessControl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The access permission for the entity. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // The ID of the access-control entry. - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - // The entity holding the permission, in one of the following forms: - // * `user-{userid}` - // * `user-{email}` - // * `group-{groupid}` - // * `group-{email}` - // * `domain-{domain}` - // * `project-{team}-{projectnumber}` - // * `project-{team}-{projectid}` - // * `allUsers` - // * `allAuthenticatedUsers` - // Examples: - // * The user `liz@example.com` would be `user-liz@example.com`. - // * The group `example@googlegroups.com` would be - // `group-example@googlegroups.com`. - // * All members of the Google Apps for Business domain `example.com` would be - // `domain-example.com`. - // For project entities, `project-{team}-{projectnumber}` format will be - // returned on response. - Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` - // Output only. The alternative entity format, if exists. For project - // entities, `project-{team}-{projectid}` format will be returned on response. - EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` - // The ID for the entity, if any. - EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` - // The etag of the ObjectAccessControl. - // If included in the metadata of an update or delete request message, the - // operation will only be performed if the etag matches that of the live - // object's ObjectAccessControl. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` - // The email address associated with the entity, if any. - Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` - // The domain associated with the entity, if any. - Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` - // The project team associated with the entity, if any. - ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` -} - -func (x *ObjectAccessControl) Reset() { - *x = ObjectAccessControl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectAccessControl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectAccessControl) ProtoMessage() {} - -func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. -func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} -} - -func (x *ObjectAccessControl) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *ObjectAccessControl) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *ObjectAccessControl) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *ObjectAccessControl) GetEntityAlt() string { - if x != nil { - return x.EntityAlt - } - return "" -} - -func (x *ObjectAccessControl) GetEntityId() string { - if x != nil { - return x.EntityId - } - return "" -} - -func (x *ObjectAccessControl) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *ObjectAccessControl) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *ObjectAccessControl) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ObjectAccessControl) GetProjectTeam() *ProjectTeam { - if x != nil { - return x.ProjectTeam - } - return nil -} - -// The result of a call to Objects.ListObjects -type ListObjectsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` - // The list of prefixes of objects matching-but-not-listed up to and including - // the requested delimiter. - Prefixes []string `protobuf:"bytes,2,rep,name=prefixes,proto3" json:"prefixes,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListObjectsResponse) Reset() { - *x = ListObjectsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListObjectsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListObjectsResponse) ProtoMessage() {} - -func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. -func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} -} - -func (x *ListObjectsResponse) GetObjects() []*Object { - if x != nil { - return x.Objects - } - return nil -} - -func (x *ListObjectsResponse) GetPrefixes() []string { - if x != nil { - return x.Prefixes - } - return nil -} - -func (x *ListObjectsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Represents the Viewers, Editors, or Owners of a given project. -type ProjectTeam struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The project number. - ProjectNumber string `protobuf:"bytes,1,opt,name=project_number,json=projectNumber,proto3" json:"project_number,omitempty"` - // The team. - Team string `protobuf:"bytes,2,opt,name=team,proto3" json:"team,omitempty"` -} - -func (x *ProjectTeam) Reset() { - *x = ProjectTeam{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProjectTeam) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProjectTeam) ProtoMessage() {} - -func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. -func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} -} - -func (x *ProjectTeam) GetProjectNumber() string { - if x != nil { - return x.ProjectNumber - } - return "" -} - -func (x *ProjectTeam) GetTeam() string { - if x != nil { - return x.Team - } - return "" -} - -// A service account, owned by Cloud Storage, which may be used when taking -// action on behalf of a given project, for example to publish Pub/Sub -// notifications or to retrieve security keys. -type ServiceAccount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ID of the notification. - EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` -} - -func (x *ServiceAccount) Reset() { - *x = ServiceAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceAccount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceAccount) ProtoMessage() {} - -func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. -func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} -} - -func (x *ServiceAccount) GetEmailAddress() string { - if x != nil { - return x.EmailAddress - } - return "" -} - -// The owner of a specific resource. -type Owner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The entity, in the form `user-`*userId*. - Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` - // The ID for the entity. - EntityId string `protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` -} - -func (x *Owner) Reset() { - *x = Owner{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Owner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Owner) ProtoMessage() {} - -func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Owner.ProtoReflect.Descriptor instead. -func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} -} - -func (x *Owner) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *Owner) GetEntityId() string { - if x != nil { - return x.EntityId - } - return "" -} - -// Specifies a requested range of bytes to download. -type ContentRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The starting offset of the object data. - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // The ending offset of the object data. - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` - // The complete length of the object data. - CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` -} - -func (x *ContentRange) Reset() { - *x = ContentRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ContentRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ContentRange) ProtoMessage() {} - -func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. -func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} -} - -func (x *ContentRange) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *ContentRange) GetEnd() int64 { - if x != nil { - return x.End - } - return 0 -} - -func (x *ContentRange) GetCompleteLength() int64 { - if x != nil { - return x.CompleteLength - } - return 0 -} - -// Description of a source object for a composition request. -type ComposeObjectRequest_SourceObject struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The source object's name. All source objects must reside in the - // same bucket. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The generation of this object to use as the source. - Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"` - // Conditions that must be met for this operation to execute. - ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"` -} - -func (x *ComposeObjectRequest_SourceObject) Reset() { - *x = ComposeObjectRequest_SourceObject{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ComposeObjectRequest_SourceObject) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} - -func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. -func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *ComposeObjectRequest_SourceObject) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions { - if x != nil { - return x.ObjectPreconditions - } - return nil -} - -// Preconditions for a source object of a composition request. -type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Only perform the composition if the generation of the source object - // that would be used matches this value. If this value and a generation - // are both specified, they must be the same value or the call will fail. - IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` -} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { - *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. -func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} -} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -// Billing properties of a bucket. -type Bucket_Billing struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // When set to true, Requester Pays is enabled for this bucket. - RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"` -} - -func (x *Bucket_Billing) Reset() { - *x = Bucket_Billing{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Billing) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Billing) ProtoMessage() {} - -func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. -func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} -} - -func (x *Bucket_Billing) GetRequesterPays() bool { - if x != nil { - return x.RequesterPays - } - return false -} - -// Cross-Origin Response sharing (CORS) properties for a bucket. -// For more on Cloud Storage and CORS, see -// https://cloud.google.com/storage/docs/cross-origin. -// For more on CORS in general, see https://tools.ietf.org/html/rfc6454. -type Bucket_Cors struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of Origins eligible to receive CORS response headers. See - // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. - // Note: "*" is permitted in the list of origins, and means "any Origin". - Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"` - // The list of HTTP methods on which to include CORS response headers, - // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of - // methods, and means "any method". - Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"` - // The list of HTTP headers other than the - // [https://www.w3.org/TR/cors/#simple-response-header][simple response - // headers] to give permission for the user-agent to share across domains. - ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"` - // The value, in seconds, to return in the - // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age - // header] used in preflight responses. - MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` -} - -func (x *Bucket_Cors) Reset() { - *x = Bucket_Cors{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Cors) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Cors) ProtoMessage() {} - -func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. -func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} -} - -func (x *Bucket_Cors) GetOrigin() []string { - if x != nil { - return x.Origin - } - return nil -} - -func (x *Bucket_Cors) GetMethod() []string { - if x != nil { - return x.Method - } - return nil -} - -func (x *Bucket_Cors) GetResponseHeader() []string { - if x != nil { - return x.ResponseHeader - } - return nil -} - -func (x *Bucket_Cors) GetMaxAgeSeconds() int32 { - if x != nil { - return x.MaxAgeSeconds - } - return 0 -} - -// Encryption properties of a bucket. -type Bucket_Encryption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the Cloud KMS key that will be used to encrypt objects - // inserted into this bucket, if no encryption method is specified. - DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"` -} - -func (x *Bucket_Encryption) Reset() { - *x = Bucket_Encryption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Encryption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Encryption) ProtoMessage() {} - -func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. -func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} -} - -func (x *Bucket_Encryption) GetDefaultKmsKey() string { - if x != nil { - return x.DefaultKmsKey - } - return "" -} - -// Bucket restriction options. -type Bucket_IamConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Bucket restriction options currently enforced on the bucket. - UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"` - // Whether IAM will enforce public access prevention. Valid values are - // "enforced" or "inherited". - PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"` -} - -func (x *Bucket_IamConfig) Reset() { - *x = Bucket_IamConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_IamConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_IamConfig) ProtoMessage() {} - -func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. -func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} -} - -func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { - if x != nil { - return x.UniformBucketLevelAccess - } - return nil -} - -func (x *Bucket_IamConfig) GetPublicAccessPrevention() string { - if x != nil { - return x.PublicAccessPrevention - } - return "" -} - -// Lifecycle properties of a bucket. -// For more information, see https://cloud.google.com/storage/docs/lifecycle. -type Bucket_Lifecycle struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A lifecycle management rule, which is made of an action to take and the - // condition(s) under which the action will be taken. - Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"` -} - -func (x *Bucket_Lifecycle) Reset() { - *x = Bucket_Lifecycle{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle) ProtoMessage() {} - -func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} -} - -func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { - if x != nil { - return x.Rule - } - return nil -} - -// Logging-related properties of a bucket. -type Bucket_Logging struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The destination bucket where the current bucket's logs should be placed, - // using path format (like `projects/123456/buckets/foo`). - LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"` - // A prefix for log object names. - LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"` -} - -func (x *Bucket_Logging) Reset() { - *x = Bucket_Logging{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Logging) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Logging) ProtoMessage() {} - -func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. -func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} -} - -func (x *Bucket_Logging) GetLogBucket() string { - if x != nil { - return x.LogBucket - } - return "" -} - -func (x *Bucket_Logging) GetLogObjectPrefix() string { - if x != nil { - return x.LogObjectPrefix - } - return "" -} - -// Retention policy properties of a bucket. -type Bucket_RetentionPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Server-determined value that indicates the time from which policy was - // enforced and effective. - EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` - // Once locked, an object retention policy cannot be modified. - IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` - // The duration in seconds that objects need to be retained. Retention - // duration must be greater than zero and less than 100 years. Note that - // enforcement of retention periods less than a day is not guaranteed. Such - // periods should only be used for testing purposes. - RetentionPeriod *int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3,oneof" json:"retention_period,omitempty"` - // The duration that objects need to be retained. Retention duration must be - // greater than zero and less than 100 years. Note that enforcement of - // retention periods less than a day is not guaranteed. Such periods should - // only be used for testing purposes. Any `nanos` value specified will be - // rounded down to the nearest second. - RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"` -} - -func (x *Bucket_RetentionPolicy) Reset() { - *x = Bucket_RetentionPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_RetentionPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_RetentionPolicy) ProtoMessage() {} - -func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. -func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} -} - -func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { - if x != nil { - return x.EffectiveTime - } - return nil -} - -func (x *Bucket_RetentionPolicy) GetIsLocked() bool { - if x != nil { - return x.IsLocked - } - return false -} - -func (x *Bucket_RetentionPolicy) GetRetentionPeriod() int64 { - if x != nil && x.RetentionPeriod != nil { - return *x.RetentionPeriod - } - return 0 -} - -func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { - if x != nil { - return x.RetentionDuration - } - return nil -} - -// Properties of a bucket related to versioning. -// For more on Cloud Storage versioning, see -// https://cloud.google.com/storage/docs/object-versioning. -type Bucket_Versioning struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // While set to true, versioning is fully enabled for this bucket. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *Bucket_Versioning) Reset() { - *x = Bucket_Versioning{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Versioning) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Versioning) ProtoMessage() {} - -func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. -func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} -} - -func (x *Bucket_Versioning) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Properties of a bucket related to accessing the contents as a static -// website. For more on hosting a static website via Cloud Storage, see -// https://cloud.google.com/storage/docs/hosting-static-website. -type Bucket_Website struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If the requested object path is missing, the service will ensure the path - // has a trailing '/', append this suffix, and attempt to retrieve the - // resulting object. This allows the creation of `index.html` - // objects to represent directory pages. - MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"` - // If the requested object path is missing, and any - // `mainPageSuffix` object is missing, if applicable, the service - // will return the named object from this bucket as the content for a - // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] - // result. - NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"` -} - -func (x *Bucket_Website) Reset() { - *x = Bucket_Website{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Website) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Website) ProtoMessage() {} - -func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. -func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} -} - -func (x *Bucket_Website) GetMainPageSuffix() string { - if x != nil { - return x.MainPageSuffix - } - return "" -} - -func (x *Bucket_Website) GetNotFoundPage() string { - if x != nil { - return x.NotFoundPage - } - return "" -} - -// Configuration for Custom Dual Regions. It should specify precisely two -// eligible regions within the same Multiregion. More information on regions -// may be found [https://cloud.google.com/storage/docs/locations][here]. -type Bucket_CustomPlacementConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // List of locations to use for data placement. - DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"` -} - -func (x *Bucket_CustomPlacementConfig) Reset() { - *x = Bucket_CustomPlacementConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_CustomPlacementConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_CustomPlacementConfig) ProtoMessage() {} - -func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. -func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} -} - -func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { - if x != nil { - return x.DataLocations - } - return nil -} - -// Configuration for a bucket's Autoclass feature. -type Bucket_Autoclass struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Enables Autoclass. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // Output only. Latest instant at which the `enabled` field was set to true - // after being disabled/unconfigured or set to false after being enabled. If - // Autoclass is enabled when the bucket is created, the toggle_time is set - // to the bucket creation time. - ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` -} - -func (x *Bucket_Autoclass) Reset() { - *x = Bucket_Autoclass{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Autoclass) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Autoclass) ProtoMessage() {} - -func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. -func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} -} - -func (x *Bucket_Autoclass) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { - if x != nil { - return x.ToggleTime - } - return nil -} - -// Settings for Uniform Bucket level access. -// See https://cloud.google.com/storage/docs/uniform-bucket-level-access. -type Bucket_IamConfig_UniformBucketLevelAccess struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If set, access checks only use bucket-level IAM policies or above. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // The deadline time for changing - // `iam_config.uniform_bucket_level_access.enabled` from `true` to - // `false`. Mutable until the specified deadline is reached, but not - // afterward. - LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { - *x = Bucket_IamConfig_UniformBucketLevelAccess{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. -func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { - if x != nil { - return x.LockTime - } - return nil -} - -// A lifecycle Rule, combining an action to take on an object and a -// condition which will trigger that action. -type Bucket_Lifecycle_Rule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action to take. - Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` - // The condition(s) under which the action will be taken. - Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *Bucket_Lifecycle_Rule) Reset() { - *x = Bucket_Lifecycle_Rule{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle_Rule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle_Rule) ProtoMessage() {} - -func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} -} - -func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { - if x != nil { - return x.Action - } - return nil -} - -func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition { - if x != nil { - return x.Condition - } - return nil -} - -// An action to take on an object. -type Bucket_Lifecycle_Rule_Action struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Type of the action. Currently, only `Delete`, `SetStorageClass`, and - // `AbortIncompleteMultipartUpload` are supported. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Target storage class. Required iff the type of the action is - // SetStorageClass. - StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` -} - -func (x *Bucket_Lifecycle_Rule_Action) Reset() { - *x = Bucket_Lifecycle_Rule_Action{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle_Rule_Action) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} - -func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} -} - -func (x *Bucket_Lifecycle_Rule_Action) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string { - if x != nil { - return x.StorageClass - } - return "" -} - -// A condition of an object which triggers some action. -type Bucket_Lifecycle_Rule_Condition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. - // A value of 0 indicates that all objects immediately match this - // condition. - AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"` - // This condition is satisfied when an object is created before midnight - // of the specified date in UTC. - CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"` - // Relevant only for versioned objects. If the value is - // `true`, this condition matches live objects; if the value - // is `false`, it matches archived objects. - IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"` - // Relevant only for versioned objects. If the value is N, this - // condition is satisfied when there are at least N versions (including - // the live version) newer than this version of the object. - NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"` - // Objects having any of the storage classes specified by this condition - // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, - // `NEARLINE`, `COLDLINE`, `STANDARD`, and - // `DURABLE_REDUCED_AVAILABILITY`. - MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"` - // Number of days that have elapsed since the custom timestamp set on an - // object. - // The value of the field must be a nonnegative integer. - DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"` - // An object matches this condition if the custom timestamp set on the - // object is before the specified date in UTC. - CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"` - // This condition is relevant only for versioned objects. An object - // version satisfies this condition only if these many days have been - // passed since it became noncurrent. The value of the field must be a - // nonnegative integer. If it's zero, the object version will become - // eligible for Lifecycle action as soon as it becomes noncurrent. - DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"` - // This condition is relevant only for versioned objects. An object - // version satisfies this condition only if it became noncurrent before - // the specified date in UTC. - NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"` - // List of object name prefixes. If any prefix exactly matches the - // beginning of the object name, the condition evaluates to true. - MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"` - // List of object name suffixes. If any suffix exactly matches the - // end of the object name, the condition evaluates to true. - MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"` -} - -func (x *Bucket_Lifecycle_Rule_Condition) Reset() { - *x = Bucket_Lifecycle_Rule_Condition{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle_Rule_Condition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} - -func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { - if x != nil && x.AgeDays != nil { - return *x.AgeDays - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date { - if x != nil { - return x.CreatedBefore - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool { - if x != nil && x.IsLive != nil { - return *x.IsLive - } - return false -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 { - if x != nil && x.NumNewerVersions != nil { - return *x.NumNewerVersions - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string { - if x != nil { - return x.MatchesStorageClass - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 { - if x != nil && x.DaysSinceCustomTime != nil { - return *x.DaysSinceCustomTime - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date { - if x != nil { - return x.CustomTimeBefore - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 { - if x != nil && x.DaysSinceNoncurrentTime != nil { - return *x.DaysSinceNoncurrentTime - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date { - if x != nil { - return x.NoncurrentTimeBefore - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string { - if x != nil { - return x.MatchesPrefix - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string { - if x != nil { - return x.MatchesSuffix - } - return nil -} - -var File_google_storage_v2_storage_proto protoreflect.FileDescriptor - -var file_google_storage_v2_storage_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, - 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, - 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0xa1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, - 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, - 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, - 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, - 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, - 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, - 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, - 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, - 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, - 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, - 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, - 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, - 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, - 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, - 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, - 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, - 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, - 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, - 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, - 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, - 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, - 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, - 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, - 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, - 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, - 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, - 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, - 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, - 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, - 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, - 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x93, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, - 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, - 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, - 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, - 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, - 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, - 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, - 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, - 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, - 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, - 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, - 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, - 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, - 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, - 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, - 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, - 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, - 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, - 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, - 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, - 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, - 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, - 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, - 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, - 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, - 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, - 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, - 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, - 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, - 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, - 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, - 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, - 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, - 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, - 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, - 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, - 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, - 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, - 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, - 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, - 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, - 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, - 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, - 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, - 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, - 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, - 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf0, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, - 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, - 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, - 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, - 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, - 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, - 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, - 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, - 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, - 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, - 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, - 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, - 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, - 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, - 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, - 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, - 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, - 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, - 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, - 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, - 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, - 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, - 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, - 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, - 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, - 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, - 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, - 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, - 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, - 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, - 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, - 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, - 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0x80, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, - 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, - 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, - 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, - 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, - 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, - 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, - 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, - 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, - 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, - 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, - 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, - 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, - 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xec, - 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, - 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, - 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, - 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, - 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, - 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, - 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, - 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, - 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, - 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, - 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, - 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, - 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, - 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, 0x4f, - 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaf, 0x25, 0x0a, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, - 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0c, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x14, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, - 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, - 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x96, 0x01, 0x0a, 0x11, - 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, - 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, - 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x22, 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, - 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, - 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x3f, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, - 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, - 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, - 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, - 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, - 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_storage_v2_storage_proto_rawDescOnce sync.Once - file_google_storage_v2_storage_proto_rawDescData = file_google_storage_v2_storage_proto_rawDesc -) - -func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { - file_google_storage_v2_storage_proto_rawDescOnce.Do(func() { - file_google_storage_v2_storage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_storage_v2_storage_proto_rawDescData) - }) - return file_google_storage_v2_storage_proto_rawDescData -} - -var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) -var file_google_storage_v2_storage_proto_goTypes = []interface{}{ - (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values - (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest - (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest - (*CreateBucketRequest)(nil), // 3: google.storage.v2.CreateBucketRequest - (*ListBucketsRequest)(nil), // 4: google.storage.v2.ListBucketsRequest - (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse - (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest - (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest - (*DeleteNotificationRequest)(nil), // 8: google.storage.v2.DeleteNotificationRequest - (*GetNotificationRequest)(nil), // 9: google.storage.v2.GetNotificationRequest - (*CreateNotificationRequest)(nil), // 10: google.storage.v2.CreateNotificationRequest - (*ListNotificationsRequest)(nil), // 11: google.storage.v2.ListNotificationsRequest - (*ListNotificationsResponse)(nil), // 12: google.storage.v2.ListNotificationsResponse - (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest - (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse - (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 41: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata - (*Notification)(nil), // 46: google.storage.v2.Notification - (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption - (*Object)(nil), // 48: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount - (*Owner)(nil), // 53: google.storage.v2.Owner - (*ContentRange)(nil), // 54: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass - nil, // 68: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 73: google.storage.v2.Notification.CustomAttributesEntry - nil, // 74: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 77: google.protobuf.Duration - (*date.Date)(nil), // 78: google.type.Date - (*v1.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 82: google.protobuf.Empty - (*v1.Policy)(nil), // 83: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse -} -var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 46, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification - 46, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification - 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 39, // 13: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 14: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 15: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 16: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 43, // 17: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 18: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 54, // 19: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 48, // 20: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 48, // 21: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 20, // 22: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 43, // 23: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 24: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 25: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 26: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 75, // 27: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 28: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 29: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 48, // 30: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 39, // 31: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 32: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 33: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 20, // 34: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 39, // 35: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 36: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 37: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 75, // 38: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 39, // 39: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 45, // 40: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 41: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 42: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 75, // 43: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 49, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 61, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 76, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 58, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 76, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 68, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 65, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 64, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 62, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 53, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 59, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 57, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 63, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 60, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 66, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 67, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 51, // 61: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 76, // 62: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 76, // 63: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 73, // 64: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry - 49, // 65: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 76, // 66: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 76, // 67: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 44, // 68: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 76, // 69: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 76, // 70: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 76, // 71: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 74, // 72: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 53, // 73: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 47, // 74: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 76, // 75: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 51, // 76: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 48, // 77: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 56, // 78: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 69, // 79: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 70, // 80: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 76, // 81: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 77, // 82: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 76, // 83: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 76, // 84: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 71, // 85: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 72, // 86: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 78, // 87: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 78, // 88: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 78, // 89: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 90: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 91: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 92: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 93: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 94: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 79, // 95: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 80, // 96: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 81, // 97: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 98: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 99: google.storage.v2.Storage.DeleteNotification:input_type -> google.storage.v2.DeleteNotificationRequest - 9, // 100: google.storage.v2.Storage.GetNotification:input_type -> google.storage.v2.GetNotificationRequest - 10, // 101: google.storage.v2.Storage.CreateNotification:input_type -> google.storage.v2.CreateNotificationRequest - 11, // 102: google.storage.v2.Storage.ListNotifications:input_type -> google.storage.v2.ListNotificationsRequest - 13, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 105: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 18, // 106: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 17, // 107: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 30, // 108: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 21, // 109: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 23, // 110: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 26, // 111: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 28, // 112: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 24, // 113: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 31, // 114: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 32, // 115: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 34, // 116: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 35, // 117: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 36, // 118: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 38, // 119: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 82, // 120: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 41, // 121: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 41, // 122: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 123: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 41, // 124: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 83, // 125: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 83, // 126: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 84, // 127: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 41, // 128: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 82, // 129: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty - 46, // 130: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification - 46, // 131: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification - 12, // 132: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse - 48, // 133: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 82, // 134: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 16, // 135: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 48, // 136: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 19, // 137: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 48, // 138: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 22, // 139: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 50, // 140: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 27, // 141: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 29, // 142: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 25, // 143: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 52, // 144: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 33, // 145: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 82, // 146: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 45, // 147: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 37, // 148: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 45, // 149: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 120, // [120:150] is the sub-list for method output_type - 90, // [90:120] is the sub-list for method input_type - 90, // [90:90] is the sub-list for extension type_name - 90, // [90:90] is the sub-list for extension extendee - 0, // [0:90] is the sub-list for field type_name -} - -func init() { file_google_storage_v2_storage_proto_init() } -func file_google_storage_v2_storage_proto_init() { - if File_google_storage_v2_storage_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBucketsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBucketsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LockBucketRetentionPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNotificationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNotificationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectSpec); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceAccountRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonObjectRequestParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConstants); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketAccessControl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChecksummedData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectChecksums); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HmacKeyMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Notification); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomerEncryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectAccessControl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProjectTeam); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceAccount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Owner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContentRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Billing); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Cors); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Encryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Logging); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_RetentionPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Versioning); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Website); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_CustomPlacementConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Autoclass); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule_Action); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ - (*WriteObjectRequest_UploadId)(nil), - (*WriteObjectRequest_WriteObjectSpec)(nil), - (*WriteObjectRequest_ChecksummedData)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ - (*WriteObjectResponse_PersistedSize)(nil), - (*WriteObjectResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ - (*QueryWriteStatusResponse_PersistedSize)(nil), - (*QueryWriteStatusResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[62].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, - NumEnums: 1, - NumMessages: 74, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_storage_v2_storage_proto_goTypes, - DependencyIndexes: file_google_storage_v2_storage_proto_depIdxs, - EnumInfos: file_google_storage_v2_storage_proto_enumTypes, - MessageInfos: file_google_storage_v2_storage_proto_msgTypes, - }.Build() - File_google_storage_v2_storage_proto = out.File - file_google_storage_v2_storage_proto_rawDesc = nil - file_google_storage_v2_storage_proto_goTypes = nil - file_google_storage_v2_storage_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// StorageClient is the client API for Storage service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type StorageClient interface { - // Permanently deletes an empty bucket. - DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Returns metadata for the specified bucket. - GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Creates a new bucket. - CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Retrieves a list of buckets for a given project. - ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) - // Locks retention policy on a bucket. - LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) - // Gets the IAM policy for a specified bucket or object. - // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) - // Updates an IAM policy for the specified bucket or object. - // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. - // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) - // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. - UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Permanently deletes a notification subscription. - DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // View a notification config. - GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) - // Retrieves a list of notification subscriptions for a given bucket. - ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) - // Concatenates a list of existing objects into a new object in the same - // bucket. - ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter is used. - DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Cancels an in-progress resumable upload. - CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) - // Retrieves an object's metadata. - GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Reads an object's data. - ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) - // Updates an object's metadata. - // Equivalent to JSON API's storage.objects.patch. - UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Stores a new object and metadata. - // - // An object can be written either in a single message stream or in a - // resumable sequence of message streams. To write using a single stream, - // the client should include in the first message of the stream an - // `WriteObjectSpec` describing the destination bucket, object, and any - // preconditions. Additionally, the final message must set 'finish_write' to - // true, or else it is an error. - // - // For a resumable write, the client should instead call - // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. - // They should then attach the returned `upload_id` to the first message of - // each following call to `WriteObject`. If the stream is closed before - // finishing the upload (either explicitly by the client or due to a network - // error or an error response from the server), the client should do as - // follows: - // - Check the result Status of the stream, to determine if writing can be - // resumed on this stream or must be restarted from scratch (by calling - // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, - // INTERNAL, and UNAVAILABLE. For each case, the client should use binary - // exponential backoff before retrying. Additionally, writes can be - // resumed after RESOURCE_EXHAUSTED errors, but only after taking - // appropriate measures, which may include reducing aggregate send rate - // across clients and/or requesting a quota increase for your project. - // - If the call to `WriteObject` returns `ABORTED`, that indicates - // concurrent attempts to update the resumable write, caused either by - // multiple racing clients or by a single client where the previous - // request was timed out on the client side but nonetheless reached the - // server. In this case the client should take steps to prevent further - // concurrent writes (e.g., increase the timeouts, stop using more than - // one process to perform the upload, etc.), and then should follow the - // steps below for resuming the upload. - // - For resumable errors, the client should call `QueryWriteStatus()` and - // then continue writing from the returned `persisted_size`. This may be - // less than the amount of data the client previously sent. Note also that - // it is acceptable to send data starting at an offset earlier than the - // returned `persisted_size`; in this case, the service will skip data at - // offsets that were already persisted (without checking that it matches - // the previously written data), and write only the data starting from the - // persisted offset. This behavior can make client-side handling simpler - // in some cases. - // - // The service will not view the object as complete until the client has - // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any - // requests on a stream after sending a request with `finish_write` set to - // `true` will cause an error. The client **should** check the response it - // receives to determine how much data the service was able to commit and - // whether the service views the object as complete. - // - // Attempting to resume an already finalized object will result in an OK - // status, with a WriteObjectResponse containing the finalized object's - // metadata. - WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) - // Retrieves a list of objects matching the criteria. - ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) - // Rewrites a source object to a destination object. Optionally overrides - // metadata. - RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) - // Determines the `persisted_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `persisted_size` values will be - // non-decreasing. - QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) -} - -type storageClient struct { - cc grpc.ClientConnInterface -} - -func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { - return &storageClient{cc} -} - -func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { - out := new(ListBucketsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { - out := new(v1.TestIamPermissionsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { - out := new(Notification) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotification", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { - out := new(Notification) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotification", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) { - out := new(ListNotificationsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotifications", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { - out := new(CancelResumableWriteResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) { - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[0], "/google.storage.v2.Storage/ReadObject", opts...) - if err != nil { - return nil, err - } - x := &storageReadObjectClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Storage_ReadObjectClient interface { - Recv() (*ReadObjectResponse, error) - grpc.ClientStream -} - -type storageReadObjectClient struct { - grpc.ClientStream -} - -func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { - m := new(ReadObjectResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) - if err != nil { - return nil, err - } - x := &storageWriteObjectClient{stream} - return x, nil -} - -type Storage_WriteObjectClient interface { - Send(*WriteObjectRequest) error - CloseAndRecv() (*WriteObjectResponse, error) - grpc.ClientStream -} - -type storageWriteObjectClient struct { - grpc.ClientStream -} - -func (x *storageWriteObjectClient) Send(m *WriteObjectRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(WriteObjectResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { - out := new(ListObjectsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) { - out := new(RewriteResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RewriteObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) { - out := new(StartResumableWriteResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/StartResumableWrite", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) { - out := new(QueryWriteStatusResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/QueryWriteStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { - out := new(ServiceAccount) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { - out := new(CreateHmacKeyResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { - out := new(ListHmacKeysResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// StorageServer is the server API for Storage service. -type StorageServer interface { - // Permanently deletes an empty bucket. - DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) - // Returns metadata for the specified bucket. - GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) - // Creates a new bucket. - CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) - // Retrieves a list of buckets for a given project. - ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) - // Locks retention policy on a bucket. - LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) - // Gets the IAM policy for a specified bucket or object. - // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) - // Updates an IAM policy for the specified bucket or object. - // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. - // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) - // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. - UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) - // Permanently deletes a notification subscription. - DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) - // View a notification config. - GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) - // Retrieves a list of notification subscriptions for a given bucket. - ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) - // Concatenates a list of existing objects into a new object in the same - // bucket. - ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter is used. - DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) - // Cancels an in-progress resumable upload. - CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) - // Retrieves an object's metadata. - GetObject(context.Context, *GetObjectRequest) (*Object, error) - // Reads an object's data. - ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error - // Updates an object's metadata. - // Equivalent to JSON API's storage.objects.patch. - UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) - // Stores a new object and metadata. - // - // An object can be written either in a single message stream or in a - // resumable sequence of message streams. To write using a single stream, - // the client should include in the first message of the stream an - // `WriteObjectSpec` describing the destination bucket, object, and any - // preconditions. Additionally, the final message must set 'finish_write' to - // true, or else it is an error. - // - // For a resumable write, the client should instead call - // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. - // They should then attach the returned `upload_id` to the first message of - // each following call to `WriteObject`. If the stream is closed before - // finishing the upload (either explicitly by the client or due to a network - // error or an error response from the server), the client should do as - // follows: - // - Check the result Status of the stream, to determine if writing can be - // resumed on this stream or must be restarted from scratch (by calling - // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, - // INTERNAL, and UNAVAILABLE. For each case, the client should use binary - // exponential backoff before retrying. Additionally, writes can be - // resumed after RESOURCE_EXHAUSTED errors, but only after taking - // appropriate measures, which may include reducing aggregate send rate - // across clients and/or requesting a quota increase for your project. - // - If the call to `WriteObject` returns `ABORTED`, that indicates - // concurrent attempts to update the resumable write, caused either by - // multiple racing clients or by a single client where the previous - // request was timed out on the client side but nonetheless reached the - // server. In this case the client should take steps to prevent further - // concurrent writes (e.g., increase the timeouts, stop using more than - // one process to perform the upload, etc.), and then should follow the - // steps below for resuming the upload. - // - For resumable errors, the client should call `QueryWriteStatus()` and - // then continue writing from the returned `persisted_size`. This may be - // less than the amount of data the client previously sent. Note also that - // it is acceptable to send data starting at an offset earlier than the - // returned `persisted_size`; in this case, the service will skip data at - // offsets that were already persisted (without checking that it matches - // the previously written data), and write only the data starting from the - // persisted offset. This behavior can make client-side handling simpler - // in some cases. - // - // The service will not view the object as complete until the client has - // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any - // requests on a stream after sending a request with `finish_write` set to - // `true` will cause an error. The client **should** check the response it - // receives to determine how much data the service was able to commit and - // whether the service views the object as complete. - // - // Attempting to resume an already finalized object will result in an OK - // status, with a WriteObjectResponse containing the finalized object's - // metadata. - WriteObject(Storage_WriteObjectServer) error - // Retrieves a list of objects matching the criteria. - ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) - // Rewrites a source object to a destination object. Optionally overrides - // metadata. - RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) - // Determines the `persisted_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `persisted_size` values will be - // non-decreasing. - QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) -} - -// UnimplementedStorageServer can be embedded to have forward compatible implementations. -type UnimplementedStorageServer struct { -} - -func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") -} -func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") -} -func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") -} -func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") -} -func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") -} -func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") -} -func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") -} -func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") -} -func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") -} -func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") -} -func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotification not implemented") -} -func (*UnimplementedStorageServer) CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotification not implemented") -} -func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotifications not implemented") -} -func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") -} -func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") -} -func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") -} -func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") -} -func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { - return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") -} -func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") -} -func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { - return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") -} -func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") -} -func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") -} -func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") -} -func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") -} -func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") -} -func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") -} -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") -} -func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") -} -func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") -} -func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") -} - -func RegisterStorageServer(s *grpc.Server, srv StorageServer) { - s.RegisterService(&_Storage_serviceDesc, srv) -} - -func _Storage_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteBucket(ctx, req.(*DeleteBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetBucket(ctx, req.(*GetBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateBucket(ctx, req.(*CreateBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListBucketsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListBuckets(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListBuckets", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListBuckets(ctx, req.(*ListBucketsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_LockBucketRetentionPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LockBucketRetentionPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).LockBucketRetentionPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/LockBucketRetentionPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).LockBucketRetentionPolicy(ctx, req.(*LockBucketRetentionPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.GetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.SetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).SetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/SetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.TestIamPermissionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).TestIamPermissions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/TestIamPermissions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateBucket(ctx, req.(*UpdateBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteNotification(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteNotification", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteNotification(ctx, req.(*DeleteNotificationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetNotification(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetNotification", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetNotification(ctx, req.(*GetNotificationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateNotification(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateNotification", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateNotification(ctx, req.(*CreateNotificationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListNotifications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListNotifications(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListNotifications", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListNotifications(ctx, req.(*ListNotificationsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ComposeObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ComposeObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ComposeObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ComposeObject(ctx, req.(*ComposeObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteObject(ctx, req.(*DeleteObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelResumableWriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CancelResumableWrite(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CancelResumableWrite", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CancelResumableWrite(ctx, req.(*CancelResumableWriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetObject(ctx, req.(*GetObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ReadObjectRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StorageServer).ReadObject(m, &storageReadObjectServer{stream}) -} - -type Storage_ReadObjectServer interface { - Send(*ReadObjectResponse) error - grpc.ServerStream -} - -type storageReadObjectServer struct { - grpc.ServerStream -} - -func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateObject(ctx, req.(*UpdateObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_WriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StorageServer).WriteObject(&storageWriteObjectServer{stream}) -} - -type Storage_WriteObjectServer interface { - SendAndClose(*WriteObjectResponse) error - Recv() (*WriteObjectRequest, error) - grpc.ServerStream -} - -type storageWriteObjectServer struct { - grpc.ServerStream -} - -func (x *storageWriteObjectServer) SendAndClose(m *WriteObjectResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { - m := new(WriteObjectRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListObjectsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListObjects(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListObjects", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListObjects(ctx, req.(*ListObjectsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_RewriteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RewriteObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).RewriteObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/RewriteObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).RewriteObject(ctx, req.(*RewriteObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_StartResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartResumableWriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).StartResumableWrite(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/StartResumableWrite", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).StartResumableWrite(ctx, req.(*StartResumableWriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryWriteStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).QueryWriteStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/QueryWriteStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).QueryWriteStatus(ctx, req.(*QueryWriteStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServiceAccountRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetServiceAccount(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetServiceAccount", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListHmacKeysRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListHmacKeys(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListHmacKeys", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Storage_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.storage.v2.Storage", - HandlerType: (*StorageServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "DeleteBucket", - Handler: _Storage_DeleteBucket_Handler, - }, - { - MethodName: "GetBucket", - Handler: _Storage_GetBucket_Handler, - }, - { - MethodName: "CreateBucket", - Handler: _Storage_CreateBucket_Handler, - }, - { - MethodName: "ListBuckets", - Handler: _Storage_ListBuckets_Handler, - }, - { - MethodName: "LockBucketRetentionPolicy", - Handler: _Storage_LockBucketRetentionPolicy_Handler, - }, - { - MethodName: "GetIamPolicy", - Handler: _Storage_GetIamPolicy_Handler, - }, - { - MethodName: "SetIamPolicy", - Handler: _Storage_SetIamPolicy_Handler, - }, - { - MethodName: "TestIamPermissions", - Handler: _Storage_TestIamPermissions_Handler, - }, - { - MethodName: "UpdateBucket", - Handler: _Storage_UpdateBucket_Handler, - }, - { - MethodName: "DeleteNotification", - Handler: _Storage_DeleteNotification_Handler, - }, - { - MethodName: "GetNotification", - Handler: _Storage_GetNotification_Handler, - }, - { - MethodName: "CreateNotification", - Handler: _Storage_CreateNotification_Handler, - }, - { - MethodName: "ListNotifications", - Handler: _Storage_ListNotifications_Handler, - }, - { - MethodName: "ComposeObject", - Handler: _Storage_ComposeObject_Handler, - }, - { - MethodName: "DeleteObject", - Handler: _Storage_DeleteObject_Handler, - }, - { - MethodName: "CancelResumableWrite", - Handler: _Storage_CancelResumableWrite_Handler, - }, - { - MethodName: "GetObject", - Handler: _Storage_GetObject_Handler, - }, - { - MethodName: "UpdateObject", - Handler: _Storage_UpdateObject_Handler, - }, - { - MethodName: "ListObjects", - Handler: _Storage_ListObjects_Handler, - }, - { - MethodName: "RewriteObject", - Handler: _Storage_RewriteObject_Handler, - }, - { - MethodName: "StartResumableWrite", - Handler: _Storage_StartResumableWrite_Handler, - }, - { - MethodName: "QueryWriteStatus", - Handler: _Storage_QueryWriteStatus_Handler, - }, - { - MethodName: "GetServiceAccount", - Handler: _Storage_GetServiceAccount_Handler, - }, - { - MethodName: "CreateHmacKey", - Handler: _Storage_CreateHmacKey_Handler, - }, - { - MethodName: "DeleteHmacKey", - Handler: _Storage_DeleteHmacKey_Handler, - }, - { - MethodName: "GetHmacKey", - Handler: _Storage_GetHmacKey_Handler, - }, - { - MethodName: "ListHmacKeys", - Handler: _Storage_ListHmacKeys_Handler, - }, - { - MethodName: "UpdateHmacKey", - Handler: _Storage_UpdateHmacKey_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ReadObject", - Handler: _Storage_ReadObject_Handler, - ServerStreams: true, - }, - { - StreamName: "WriteObject", - Handler: _Storage_WriteObject_Handler, - ClientStreams: true, - }, - }, - Metadata: "google/storage/v2/storage.proto", -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/version.go b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go deleted file mode 100644 index 15920f3f63..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/version.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by gapicgen. DO NOT EDIT. - -package storage - -import "cloud.google.com/go/storage/internal" - -func init() { - versionClient = internal.Version -} diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go deleted file mode 100644 index a08cb7cabc..0000000000 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Version is the current tagged release of the library. -const Version = "1.29.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go deleted file mode 100644 index 810d64285d..0000000000 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "strings" - - "cloud.google.com/go/internal" - "cloud.google.com/go/internal/version" - sinternal "cloud.google.com/go/storage/internal" - "github.com/google/uuid" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var defaultRetry *retryConfig = &retryConfig{} -var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) - -// run determines whether a retry is necessary based on the config and -// idempotency information. It then calls the function with or without retries -// as appropriate, using the configured settings. -func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { - attempts := 1 - invocationID := uuid.New().String() - - if retry == nil { - retry = defaultRetry - } - if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { - setHeader(invocationID, attempts) - return call() - } - bo := gax.Backoff{} - if retry.backoff != nil { - bo.Multiplier = retry.backoff.Multiplier - bo.Initial = retry.backoff.Initial - bo.Max = retry.backoff.Max - } - var errorFunc func(err error) bool = ShouldRetry - if retry.shouldRetry != nil { - errorFunc = retry.shouldRetry - } - - return internal.Retry(ctx, bo, func() (stop bool, err error) { - setHeader(invocationID, attempts) - err = call() - attempts++ - return !errorFunc(err), err - }) -} - -func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { - return func(invocationID string, attempts int) { - if req == nil { - return - } - header := req.Header() - invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) - xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - header.Set("x-goog-api-client", xGoogHeader) - } -} - -// TODO: Implement method setting header via context for gRPC -func setRetryHeaderGRPC(_ context.Context) func(string, int) { - return func(_ string, _ int) { - return - } -} - -// ShouldRetry returns true if an error is retryable, based on best practice -// guidance from GCS. See -// https://cloud.google.com/storage/docs/retry-strategy#go for more information -// on what errors are considered retryable. -// -// If you would like to customize retryable errors, use the WithErrorFunc to -// supply a RetryOption to your library calls. For example, to retry additional -// errors, you can write a custom func that wraps ShouldRetry and also specifies -// additional errors that should return true. -func ShouldRetry(err error) bool { - if err == nil { - return false - } - if errors.Is(err, io.ErrUnexpectedEOF) { - return true - } - - switch e := err.(type) { - case *net.OpError: - if strings.Contains(e.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string - return true - } - case *googleapi.Error: - // Retry on 408, 429, and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). - // Unfortunately the error type is unexported, so we resort to string - // matching. - retriable := []string{"connection refused", "connection reset"} - for _, s := range retriable { - if strings.Contains(e.Error(), s) { - return true - } - } - case interface{ Temporary() bool }: - if e.Temporary() { - return true - } - } - // HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per - // https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html. - // - // This is only necessary for the experimental gRPC-based media operations. - if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable { - return true - } - // Unwrap is only supported in go1.13.x+ - if e, ok := err.(interface{ Unwrap() error }); ok { - return ShouldRetry(e.Unwrap()) - } - return false -} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go deleted file mode 100644 index 614feb7b6d..0000000000 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "regexp" - - "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - raw "google.golang.org/api/storage/v1" -) - -// A Notification describes how to send Cloud PubSub messages when certain -// events occur in a bucket. -type Notification struct { - //The ID of the notification. - ID string - - // The ID of the topic to which this subscription publishes. - TopicID string - - // The ID of the project to which the topic belongs. - TopicProjectID string - - // Only send notifications about listed event types. If empty, send notifications - // for all event types. - // See https://cloud.google.com/storage/docs/pubsub-notifications#events. - EventTypes []string - - // If present, only apply this notification configuration to object names that - // begin with this prefix. - ObjectNamePrefix string - - // An optional list of additional attributes to attach to each Cloud PubSub - // message published for this notification subscription. - CustomAttributes map[string]string - - // The contents of the message payload. - // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. - PayloadFormat string -} - -// Values for Notification.PayloadFormat. -const ( - // Send no payload with notification messages. - NoPayload = "NONE" - - // Send object metadata as JSON with notification messages. - JSONPayload = "JSON_API_V1" -) - -// Values for Notification.EventTypes. -const ( - // Event that occurs when an object is successfully created. - ObjectFinalizeEvent = "OBJECT_FINALIZE" - - // Event that occurs when the metadata of an existing object changes. - ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" - - // Event that occurs when an object is permanently deleted. - ObjectDeleteEvent = "OBJECT_DELETE" - - // Event that occurs when the live version of an object becomes an - // archived version. - ObjectArchiveEvent = "OBJECT_ARCHIVE" -) - -func toNotification(rn *raw.Notification) *Notification { - n := &Notification{ - ID: rn.Id, - EventTypes: rn.EventTypes, - ObjectNamePrefix: rn.ObjectNamePrefix, - CustomAttributes: rn.CustomAttributes, - PayloadFormat: rn.PayloadFormat, - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) - return n -} - -func toNotificationFromProto(pbn *storagepb.Notification) *Notification { - n := &Notification{ - ID: pbn.GetName(), - EventTypes: pbn.GetEventTypes(), - ObjectNamePrefix: pbn.GetObjectNamePrefix(), - CustomAttributes: pbn.GetCustomAttributes(), - PayloadFormat: pbn.GetPayloadFormat(), - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) - return n -} - -func toProtoNotification(n *Notification) *storagepb.Notification { - return &storagepb.Notification{ - Name: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: n.PayloadFormat, - } -} - -var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") - -// parseNotificationTopic extracts the project and topic IDs from from the full -// resource name returned by the service. If the name is malformed, it returns -// "?" for both IDs. -func parseNotificationTopic(nt string) (projectID, topicID string) { - matches := topicRE.FindStringSubmatch(nt) - if matches == nil { - return "?", "?" - } - return matches[1], matches[2] -} - -func toRawNotification(n *Notification) *raw.Notification { - return &raw.Notification{ - Id: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: string(n.PayloadFormat), - } -} - -// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID -// and PayloadFormat, and must not set its ID. The other fields are all optional. The -// returned Notification's ID can be used to refer to it. -func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") - defer func() { trace.EndSpan(ctx, err) }() - - if n.ID != "" { - return nil, errors.New("storage: AddNotification: ID must not be set") - } - if n.TopicProjectID == "" { - return nil, errors.New("storage: AddNotification: missing TopicProjectID") - } - if n.TopicID == "" { - return nil, errors.New("storage: AddNotification: missing TopicID") - } - - opts := makeStorageOpts(false, b.retry, b.userProject) - ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) - return ret, err -} - -// Notifications returns all the Notifications configured for this bucket, as a map -// indexed by notification ID. -func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") - defer func() { trace.EndSpan(ctx, err) }() - - opts := makeStorageOpts(true, b.retry, b.userProject) - n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) - return n, err -} - -func notificationsToMap(rns []*raw.Notification) map[string]*Notification { - m := map[string]*Notification{} - for _, rn := range rns { - m[rn.Id] = toNotification(rn) - } - return m -} - -func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { - m := map[string]*Notification{} - for _, n := range ns { - m[n.Name] = toNotificationFromProto(n) - } - return m -} - -// DeleteNotification deletes the notification with the given ID. -func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - opts := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) -} diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go deleted file mode 100644 index 2961aca206..0000000000 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// PostPolicyV4Options are used to construct a signed post policy. -// Please see https://cloud.google.com/storage/docs/xml-api/post-object -// for reference about the fields. -type PostPolicyV4Options struct { - // GoogleAccessID represents the authorizer of the signed URL generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Exactly one of PrivateKey or SignBytes must be non-nil. - PrivateKey []byte - - // SignBytes is a function for implementing custom signing. - // - // Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined, - // SignBytes will be ignored. - // This SignBytes function expects the bytes it receives to be hashed, while - // SignRawBytes accepts the raw bytes without hashing, allowing more flexibility. - // Add the following to the top of your signing function to hash the bytes - // to use SignRawBytes instead: - // shaSum := sha256.Sum256(bytes) - // bytes = shaSum[:] - // - SignBytes func(hashBytes []byte) (signature []byte, err error) - - // SignRawBytes is a function for implementing custom signing. For example, if - // your application is running on Google App Engine, you can use - // appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // &PostPolicyV4Options{ - // GoogleAccessID: acc, - // SignRawBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) - // - // SignRawBytes is equivalent to the SignBytes field on SignedURLOptions; - // that is, you may use the same signing function for the two. - // - // Exactly one of PrivateKey or SignRawBytes must be non-nil. - SignRawBytes func(bytes []byte) (signature []byte, err error) - - // Expires is the expiration time on the signed URL. - // It must be a time in the future. - // Required. - Expires time.Time - - // Style provides options for the type of URL to use. Options are - // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See - // https://cloud.google.com/storage/docs/request-endpoints for details. - // Optional. - Style URLStyle - - // Insecure when set indicates that the generated URL's scheme - // will use "http" instead of "https" (default). - // Optional. - Insecure bool - - // Fields specifies the attributes of a PostPolicyV4 request. - // When Fields is non-nil, its attributes must match those that will - // passed into field Conditions. - // Optional. - Fields *PolicyV4Fields - - // The conditions that the uploaded file will be expected to conform to. - // When used, the failure of an upload to satisfy a condition will result in - // a 4XX status code, back with the message describing the problem. - // Optional. - Conditions []PostPolicyV4Condition - - shouldHashSignBytes bool -} - -func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { - return &PostPolicyV4Options{ - GoogleAccessID: opts.GoogleAccessID, - PrivateKey: opts.PrivateKey, - SignBytes: opts.SignBytes, - SignRawBytes: opts.SignRawBytes, - Expires: opts.Expires, - Style: opts.Style, - Insecure: opts.Insecure, - Fields: opts.Fields, - Conditions: opts.Conditions, - shouldHashSignBytes: opts.shouldHashSignBytes, - } -} - -// PolicyV4Fields describes the attributes for a PostPolicyV4 request. -type PolicyV4Fields struct { - // ACL specifies the access control permissions for the object. - // Optional. - ACL string - // CacheControl specifies the caching directives for the object. - // Optional. - CacheControl string - // ContentType specifies the media type of the object. - // Optional. - ContentType string - // ContentDisposition specifies how the file will be served back to requesters. - // Optional. - ContentDisposition string - // ContentEncoding specifies the decompressive transcoding that the object. - // This field is complementary to ContentType in that the file could be - // compressed but ContentType specifies the file's original media type. - // Optional. - ContentEncoding string - // Metadata specifies custom metadata for the object. - // If any key doesn't begin with "x-goog-meta-", an error will be returned. - // Optional. - Metadata map[string]string - // StatusCodeOnSuccess when set, specifies the status code that Cloud Storage - // will serve back on successful upload of the object. - // Optional. - StatusCodeOnSuccess int - // RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage - // will serve back on successful upload of the object. - // Optional. - RedirectToURLOnSuccess string -} - -// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request. -type PostPolicyV4 struct { - // URL is the generated URL that the file upload will be made to. - URL string - // Fields specifies the generated key-values that the file uploader - // must include in their multipart upload form. - Fields map[string]string -} - -// PostPolicyV4Condition describes the constraints that the subsequent -// object upload's multipart form fields will be expected to conform to. -type PostPolicyV4Condition interface { - isEmpty() bool - json.Marshaler -} - -type startsWith struct { - key, value string -} - -func (sw *startsWith) MarshalJSON() ([]byte, error) { - return json.Marshal([]string{"starts-with", sw.key, sw.value}) -} -func (sw *startsWith) isEmpty() bool { - return sw.value == "" -} - -// ConditionStartsWith checks that an attributes starts with value. -// An empty value will cause this condition to be ignored. -func ConditionStartsWith(key, value string) PostPolicyV4Condition { - return &startsWith{key, value} -} - -type contentLengthRangeCondition struct { - start, end uint64 -} - -func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) -} -func (clr *contentLengthRangeCondition) isEmpty() bool { - return clr.start == 0 && clr.end == 0 -} - -type singleValueCondition struct { - name, value string -} - -func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]string{svc.name: svc.value}) -} -func (svc *singleValueCondition) isEmpty() bool { - return svc.value == "" -} - -// ConditionContentLengthRange constraints the limits that the -// multipart upload's range header will be expected to be within. -func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { - return &contentLengthRangeCondition{start, end} -} - -func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { - return &singleValueCondition{"success_action_redirect", redirectURL} -} - -func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { - svc := &singleValueCondition{name: "success_action_status"} - if statusCode > 0 { - svc.value = fmt.Sprintf("%d", statusCode) - } - return svc -} - -// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. -// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. -// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4 -// method which uses the Client's credentials to handle authentication. -func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { - if bucket == "" { - return nil, errors.New("storage: bucket must be non-empty") - } - if object == "" { - return nil, errors.New("storage: object must be non-empty") - } - now := utcNow() - if err := validatePostPolicyV4Options(opts, now); err != nil { - return nil, err - } - - var signingFn func(hashedBytes []byte) ([]byte, error) - switch { - case opts.SignRawBytes != nil: - signingFn = opts.SignRawBytes - case opts.shouldHashSignBytes: - signingFn = opts.SignBytes - case len(opts.PrivateKey) != 0: - parsedRSAPrivKey, err := parseKey(opts.PrivateKey) - if err != nil { - return nil, err - } - signingFn = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:]) - } - - default: - return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") - } - - var descFields PolicyV4Fields - if opts.Fields != nil { - descFields = *opts.Fields - } - - if err := validateMetadata(descFields.Metadata); err != nil { - return nil, err - } - - // Build the policy. - conds := make([]PostPolicyV4Condition, len(opts.Conditions)) - copy(conds, opts.Conditions) - conds = append(conds, - // These are ordered lexicographically. Technically the order doesn't matter - // for creating the policy, but we use this order to match the - // cross-language conformance tests for this feature. - &singleValueCondition{"acl", descFields.ACL}, - &singleValueCondition{"cache-control", descFields.CacheControl}, - &singleValueCondition{"content-disposition", descFields.ContentDisposition}, - &singleValueCondition{"content-encoding", descFields.ContentEncoding}, - &singleValueCondition{"content-type", descFields.ContentType}, - conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), - conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), - ) - - YYYYMMDD := now.Format(yearMonthDay) - policyFields := map[string]string{ - "key": object, - "x-goog-date": now.Format(iso8601), - "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", - "x-goog-algorithm": "GOOG4-RSA-SHA256", - "acl": descFields.ACL, - "cache-control": descFields.CacheControl, - "content-disposition": descFields.ContentDisposition, - "content-encoding": descFields.ContentEncoding, - "content-type": descFields.ContentType, - "success_action_redirect": descFields.RedirectToURLOnSuccess, - } - for key, value := range descFields.Metadata { - conds = append(conds, &singleValueCondition{key, value}) - policyFields[key] = value - } - - // Following from the order expected by the conformance test cases, - // hence manually inserting these fields in a specific order. - conds = append(conds, - &singleValueCondition{"bucket", bucket}, - &singleValueCondition{"key", object}, - &singleValueCondition{"x-goog-date", now.Format(iso8601)}, - &singleValueCondition{ - name: "x-goog-credential", - value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", - }, - &singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, - ) - - nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) - for _, cond := range conds { - if cond == nil || !cond.isEmpty() { - nonEmptyConds = append(nonEmptyConds, cond) - } - } - condsAsJSON, err := json.Marshal(map[string]interface{}{ - "conditions": nonEmptyConds, - "expiration": opts.Expires.Format(time.RFC3339), - }) - if err != nil { - return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) - } - - b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) - var signature []byte - var signErr error - - if opts.shouldHashSignBytes { - // SignBytes expects hashed bytes as input instead of raw bytes, so we hash them - shaSum := sha256.Sum256([]byte(b64Policy)) - signature, signErr = signingFn(shaSum[:]) - } else { - signature, signErr = signingFn([]byte(b64Policy)) - } - if signErr != nil { - return nil, signErr - } - - policyFields["policy"] = b64Policy - policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) - - // Construct the URL. - scheme := "https" - if opts.Insecure { - scheme = "http" - } - path := opts.Style.path(bucket, "") + "/" - u := &url.URL{ - Path: path, - RawPath: pathEncodeV4(path), - Host: opts.Style.host(bucket), - Scheme: scheme, - } - - if descFields.StatusCodeOnSuccess > 0 { - policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) - } - - // Clear out fields with blanks values. - for key, value := range policyFields { - if value == "" { - delete(policyFields, key) - } - } - pp4 := &PostPolicyV4{ - Fields: policyFields, - URL: u.String(), - } - return pp4, nil -} - -// validatePostPolicyV4Options checks that: -// * GoogleAccessID is set -// * either PrivateKey or SignRawBytes/SignBytes is set, but not both -// * the deadline set in Expires is not in the past -// * if Style is not set, it'll use PathStyle -// * sets shouldHashSignBytes to true if opts.SignBytes should be used -func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { - if opts == nil || opts.GoogleAccessID == "" { - return errors.New("storage: missing required GoogleAccessID") - } - if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank { - return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") - } - if opts.Expires.Before(now) { - return errors.New("storage: expecting Expires to be in the future") - } - if opts.Style == nil { - opts.Style = PathStyle() - } - if opts.SignRawBytes == nil && opts.SignBytes != nil { - opts.shouldHashSignBytes = true - } - return nil -} - -// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-", -// otherwise it will return an error. -func validateMetadata(hdrs map[string]string) (err error) { - if len(hdrs) == 0 { - return nil - } - - badKeys := make([]string, 0, len(hdrs)) - for key := range hdrs { - if !strings.HasPrefix(key, "x-goog-meta-") { - badKeys = append(badKeys, key) - } - } - if len(badKeys) != 0 { - err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) - } - return -} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go deleted file mode 100644 index 46487d2b77..0000000000 --- a/vendor/cloud.google.com/go/storage/reader.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "cloud.google.com/go/internal/trace" -) - -var crc32cTable = crc32.MakeTable(crc32.Castagnoli) - -// ReaderObjectAttrs are attributes about the object being read. These are populated -// during the New call. This struct only holds a subset of object attributes: to -// get the full set of attributes, use ObjectHandle.Attrs. -// -// Each field is read-only. -type ReaderObjectAttrs struct { - // Size is the length of the object's content. - Size int64 - - // StartOffset is the byte offset within the object - // from which reading begins. - // This value is only non-zero for range requests. - StartOffset int64 - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // CacheControl specifies whether and for how long browser and Internet - // caches are allowed to cache your objects. - CacheControl string - - // LastModified is the time that the object was last modified. - LastModified time.Time - - // Generation is the generation number of the object's content. - Generation int64 - - // Metageneration is the version of the metadata for this object at - // this generation. This field is used for preconditions and for - // detecting changes in metadata. A metageneration number is only - // meaningful in the context of a particular generation of a - // particular object. - Metageneration int64 -} - -// NewReader creates a new Reader to read the contents of the -// object. -// ErrObjectNotExist will be returned if the object is not found. -// -// The caller must call Close on the returned Reader when done reading. -func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { - return o.NewRangeReader(ctx, 0, -1) -} - -// NewRangeReader reads part of an object, reading at most length bytes -// starting at the given offset. If length is negative, the object is read -// until the end. If offset is negative, the object is read abs(offset) bytes -// from the end, and length must also be negative to indicate all remaining -// bytes will be read. -// -// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies -// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding -// that file will be served back whole, regardless of the requested range as -// Google Cloud Storage dictates. -func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() - - if err := o.validate(); err != nil { - return nil, err - } - if offset < 0 && length >= 0 { - return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset) - } - if o.conds != nil { - if err := o.conds.validate("NewRangeReader"); err != nil { - return nil, err - } - } - - opts := makeStorageOpts(true, o.retry, o.userProject) - - params := &newRangeReaderParams{ - bucket: o.bucket, - object: o.object, - gen: o.gen, - offset: offset, - length: length, - encryptionKey: o.encryptionKey, - conds: o.conds, - readCompressed: o.readCompressed, - } - - r, err = o.c.tc.NewRangeReader(ctx, params, opts...) - - return r, err -} - -// decompressiveTranscoding returns true if the request was served decompressed -// and different than its original storage form. This happens when the "Content-Encoding" -// header is "gzip". -// See: -// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip -// - https://github.com/googleapis/google-cloud-go/issues/1800 -func decompressiveTranscoding(res *http.Response) bool { - // Decompressive Transcoding. - return res.Header.Get("Content-Encoding") == "gzip" || - res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" -} - -func uncompressedByServer(res *http.Response) bool { - // If the data is stored as gzip but is not encoded as gzip, then it - // was uncompressed by the server. - return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && - res.Header.Get("Content-Encoding") != "gzip" -} - -func parseCRC32c(res *http.Response) (uint32, bool) { - const prefix = "crc32c=" - for _, spec := range res.Header["X-Goog-Hash"] { - if strings.HasPrefix(spec, prefix) { - c, err := decodeUint32(spec[len(prefix):]) - if err == nil { - return c, true - } - } - } - return 0, false -} - -// setConditionsHeaders sets precondition request headers for downloads -// using the XML API. It assumes that the conditions have been validated. -func setConditionsHeaders(headers http.Header, conds *Conditions) error { - if conds == nil { - return nil - } - if conds.MetagenerationMatch != 0 { - headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch)) - } - switch { - case conds.GenerationMatch != 0: - headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch)) - case conds.DoesNotExist: - headers.Set("x-goog-if-generation-match", "0") - } - return nil -} - -// Wrap a request to look similar to an apiary library request, in order to -// be used by run(). -type readerRequestWrapper struct { - req *http.Request -} - -func (w *readerRequestWrapper) Header() http.Header { - return w.req.Header -} - -var emptyBody = ioutil.NopCloser(strings.NewReader("")) - -// Reader reads a Cloud Storage object. -// It implements io.Reader. -// -// Typically, a Reader computes the CRC of the downloaded content and compares it to -// the stored CRC, returning an error from Read if there is a mismatch. This integrity check -// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. -type Reader struct { - Attrs ReaderObjectAttrs - seen, remain, size int64 - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc - - reader io.ReadCloser -} - -// Close closes the Reader. It must be called when done reading. -func (r *Reader) Close() error { - return r.reader.Close() -} - -func (r *Reader) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if r.remain != -1 { - r.remain -= int64(n) - } - if r.checkCRC { - r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) - // Check CRC here. It would be natural to check it in Close, but - // everybody defers Close on the assumption that it doesn't return - // anything worth looking at. - if err == io.EOF { - if r.gotCRC != r.wantCRC { - return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", - r.gotCRC, r.wantCRC) - } - } - } - return n, err -} - -// Size returns the size of the object in bytes. -// The returned value is always the same and is not affected by -// calls to Read or Close. -// -// Deprecated: use Reader.Attrs.Size. -func (r *Reader) Size() int64 { - return r.Attrs.Size -} - -// Remain returns the number of bytes left to read, or -1 if unknown. -func (r *Reader) Remain() int64 { - return r.remain -} - -// ContentType returns the content type of the object. -// -// Deprecated: use Reader.Attrs.ContentType. -func (r *Reader) ContentType() string { - return r.Attrs.ContentType -} - -// ContentEncoding returns the content encoding of the object. -// -// Deprecated: use Reader.Attrs.ContentEncoding. -func (r *Reader) ContentEncoding() string { - return r.Attrs.ContentEncoding -} - -// CacheControl returns the cache control of the object. -// -// Deprecated: use Reader.Attrs.CacheControl. -func (r *Reader) CacheControl() string { - return r.Attrs.CacheControl -} - -// LastModified returns the value of the Last-Modified header. -// -// Deprecated: use Reader.Attrs.LastModified. -func (r *Reader) LastModified() (time.Time, error) { - return r.Attrs.LastModified, nil -} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go deleted file mode 100644 index 7fc3fc4cb9..0000000000 --- a/vendor/cloud.google.com/go/storage/storage.go +++ /dev/null @@ -1,2213 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "bytes" - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" - - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" - "github.com/googleapis/gax-go/v2" - "golang.org/x/oauth2/google" - "google.golang.org/api/googleapi" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - raw "google.golang.org/api/storage/v1" - "google.golang.org/api/transport" - htransport "google.golang.org/api/transport/http" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/known/fieldmaskpb" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// Methods which can be used in signed URLs. -var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true} - -var ( - // ErrBucketNotExist indicates that the bucket does not exist. - ErrBucketNotExist = errors.New("storage: bucket doesn't exist") - // ErrObjectNotExist indicates that the object does not exist. - ErrObjectNotExist = errors.New("storage: object doesn't exist") - // errMethodNotSupported indicates that the method called is not currently supported by the client. - // TODO: Export this error when launching the transport-agnostic client. - errMethodNotSupported = errors.New("storage: method is not currently supported") - // errMethodNotValid indicates that given HTTP method is not valid. - errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) -) - -var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) - -const ( - // ScopeFullControl grants permissions to manage your - // data and permissions in Google Cloud Storage. - ScopeFullControl = raw.DevstorageFullControlScope - - // ScopeReadOnly grants permissions to - // view your data in Google Cloud Storage. - ScopeReadOnly = raw.DevstorageReadOnlyScope - - // ScopeReadWrite grants permissions to manage your - // data in Google Cloud Storage. - ScopeReadWrite = raw.DevstorageReadWriteScope - - // aes256Algorithm is the AES256 encryption algorithm used with the - // Customer-Supplied Encryption Keys feature. - aes256Algorithm = "AES256" - - // defaultGen indicates the latest object generation by default, - // using a negative value. - defaultGen = int64(-1) -) - -// TODO: remove this once header with invocation ID is applied to all methods. -func setClientHeader(headers http.Header) { - headers.Set("x-goog-api-client", xGoogDefaultHeader) -} - -// Client is a client for interacting with Google Cloud Storage. -// -// Clients should be reused instead of created as needed. -// The methods of Client are safe for concurrent use by multiple goroutines. -type Client struct { - hc *http.Client - raw *raw.Service - // Scheme describes the scheme under the current host. - scheme string - // ReadHost is the default host used on the reader. - readHost string - // May be nil. - creds *google.Credentials - retry *retryConfig - - // tc is the transport-agnostic client implemented with either gRPC or HTTP. - tc storageClient - // useGRPC flags whether the client uses gRPC. This is needed while the - // integration piece is only partially complete. - // TODO: remove before merging to main. - useGRPC bool -} - -// NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like -// ScopeReadOnly, use option.WithScopes. -// -// Clients should be reused instead of created as needed. The methods of Client -// are safe for concurrent use by multiple goroutines. -func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - - // Use the experimental gRPC client if the env var is set. - // This is an experimental API and not intended for public use. - if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { - return newGRPCClient(ctx, opts...) - } - - var creds *google.Credentials - - // In general, it is recommended to use raw.NewService instead of htransport.NewClient - // since raw.NewService configures the correct default endpoints when initializing the - // internal http client. However, in our case, "NewRangeReader" in reader.go needs to - // access the http client directly to make requests, so we create the client manually - // here so it can be re-used by both reader.go and raw.NewService. This means we need to - // manually configure the default endpoint options on the http client. Furthermore, we - // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { - // Prepend default options to avoid overriding options passed by the user. - opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) - - opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) - - // Don't error out here. The user may have passed in their own HTTP - // client which does not auth with ADC or other common conventions. - c, err := transport.Creds(ctx, opts...) - if err == nil { - creds = c - opts = append(opts, internaloption.WithCredentials(creds)) - } - } else { - var hostURL *url.URL - - if strings.Contains(host, "://") { - h, err := url.Parse(host) - if err != nil { - return nil, err - } - hostURL = h - } else { - // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST - // URL is only parsed correctly if it has a scheme, so we build it ourselves - hostURL = &url.URL{Scheme: "http", Host: host} - } - - hostURL.Path = "storage/v1/" - endpoint := hostURL.String() - - // Append the emulator host as default endpoint for the user - opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) - - opts = append(opts, internaloption.WithDefaultEndpoint(endpoint)) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint)) - } - - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. - hc, ep, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, fmt.Errorf("dialing: %w", err) - } - // RawService should be created with the chosen endpoint to take account of user override. - rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) - if err != nil { - return nil, fmt.Errorf("storage client: %w", err) - } - // Update readHost and scheme with the chosen endpoint. - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) - } - - tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) - if err != nil { - return nil, fmt.Errorf("storage: %w", err) - } - - return &Client{ - hc: hc, - raw: rawService, - scheme: u.Scheme, - readHost: u.Host, - creds: creds, - tc: tc, - }, nil -} - -// newGRPCClient creates a new Storage client that initializes a gRPC-based -// client. Calls that have not been implemented in gRPC will panic. -// -// This is an experimental API and not intended for public use. -func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - opts = append(defaultGRPCOptions(), opts...) - tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) - if err != nil { - return nil, err - } - - return &Client{tc: tc, useGRPC: true}, nil -} - -// Close closes the Client. -// -// Close need not be called at program exit. -func (c *Client) Close() error { - // Set fields to nil so that subsequent uses will panic. - c.hc = nil - c.raw = nil - c.creds = nil - if c.tc != nil { - return c.tc.Close() - } - return nil -} - -// SigningScheme determines the API version to use when signing URLs. -type SigningScheme int - -const ( - // SigningSchemeDefault is presently V2 and will change to V4 in the future. - SigningSchemeDefault SigningScheme = iota - - // SigningSchemeV2 uses the V2 scheme to sign URLs. - SigningSchemeV2 - - // SigningSchemeV4 uses the V4 scheme to sign URLs. - SigningSchemeV4 -) - -// URLStyle determines the style to use for the signed URL. pathStyle is the -// default. All non-default options work with V4 scheme only. See -// https://cloud.google.com/storage/docs/request-endpoints for details. -type URLStyle interface { - // host should return the host portion of the signed URL, not including - // the scheme (e.g. storage.googleapis.com). - host(bucket string) string - - // path should return the path portion of the signed URL, which may include - // both the bucket and object name or only the object name depending on the - // style. - path(bucket, object string) string -} - -type pathStyle struct{} - -type virtualHostedStyle struct{} - -type bucketBoundHostname struct { - hostname string -} - -func (s pathStyle) host(bucket string) string { - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { - return stripScheme(host) - } - - return "storage.googleapis.com" -} - -func (s virtualHostedStyle) host(bucket string) string { - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { - return bucket + "." + stripScheme(host) - } - - return bucket + ".storage.googleapis.com" -} - -func (s bucketBoundHostname) host(bucket string) string { - return s.hostname -} - -func (s pathStyle) path(bucket, object string) string { - p := bucket - if object != "" { - p += "/" + object - } - return p -} - -func (s virtualHostedStyle) path(bucket, object string) string { - return object -} - -func (s bucketBoundHostname) path(bucket, object string) string { - return object -} - -// PathStyle is the default style, and will generate a URL of the form -// "storage.googleapis.com//". -func PathStyle() URLStyle { - return pathStyle{} -} - -// VirtualHostedStyle generates a URL relative to the bucket's virtual -// hostname, e.g. ".storage.googleapis.com/". -func VirtualHostedStyle() URLStyle { - return virtualHostedStyle{} -} - -// BucketBoundHostname generates a URL with a custom hostname tied to a -// specific GCS bucket. The desired hostname should be passed in using the -// hostname argument. Generated urls will be of the form -// "/". See -// https://cloud.google.com/storage/docs/request-endpoints#cname and -// https://cloud.google.com/load-balancing/docs/https/adding-backend-buckets-to-load-balancers -// for details. Note that for CNAMEs, only HTTP is supported, so Insecure must -// be set to true. -func BucketBoundHostname(hostname string) URLStyle { - return bucketBoundHostname{hostname: hostname} -} - -// Strips the scheme from a host if it contains it -func stripScheme(host string) string { - if strings.Contains(host, "://") { - host = strings.SplitN(host, "://", 2)[1] - } - return host -} - -// SignedURLOptions allows you to restrict the access to the signed URL. -type SignedURLOptions struct { - // GoogleAccessID represents the authorizer of the signed URL generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Exactly one of PrivateKey or SignBytes must be non-nil. - PrivateKey []byte - - // SignBytes is a function for implementing custom signing. For example, if - // your application is running on Google App Engine, you can use - // appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // url, err := SignedURL("bucket", "object", &SignedURLOptions{ - // GoogleAccessID: acc, - // SignBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) - // - // Exactly one of PrivateKey or SignBytes must be non-nil. - SignBytes func([]byte) ([]byte, error) - - // Method is the HTTP method to be used with the signed URL. - // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. - // Required. - Method string - - // Expires is the expiration time on the signed URL. It must be - // a datetime in the future. For SigningSchemeV4, the expiration may be no - // more than seven days in the future. - // Required. - Expires time.Time - - // ContentType is the content type header the client must provide - // to use the generated signed URL. - // Optional. - ContentType string - - // Headers is a list of extension headers the client must provide - // in order to use the generated signed URL. Each must be a string of the - // form "key:values", with multiple values separated by a semicolon. - // Optional. - Headers []string - - // QueryParameters is a map of additional query parameters. When - // SigningScheme is V4, this is used in computing the signature, and the - // client must use the same query parameters when using the generated signed - // URL. - // Optional. - QueryParameters url.Values - - // MD5 is the base64 encoded MD5 checksum of the file. - // If provided, the client should provide the exact value on the request - // header in order to use the signed URL. - // Optional. - MD5 string - - // Style provides options for the type of URL to use. Options are - // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See - // https://cloud.google.com/storage/docs/request-endpoints for details. - // Only supported for V4 signing. - // Optional. - Style URLStyle - - // Insecure determines whether the signed URL should use HTTPS (default) or - // HTTP. - // Only supported for V4 signing. - // Optional. - Insecure bool - - // Scheme determines the version of URL signing to use. Default is - // SigningSchemeV2. - Scheme SigningScheme -} - -func (opts *SignedURLOptions) clone() *SignedURLOptions { - return &SignedURLOptions{ - GoogleAccessID: opts.GoogleAccessID, - SignBytes: opts.SignBytes, - PrivateKey: opts.PrivateKey, - Method: opts.Method, - Expires: opts.Expires, - ContentType: opts.ContentType, - Headers: opts.Headers, - QueryParameters: opts.QueryParameters, - MD5: opts.MD5, - Style: opts.Style, - Insecure: opts.Insecure, - Scheme: opts.Scheme, - } -} - -var ( - tabRegex = regexp.MustCompile(`[\t]+`) - // I was tempted to call this spacex. :) - spaceRegex = regexp.MustCompile(` +`) - - canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) - excludedCanonicalHeaders = map[string]bool{ - "x-goog-encryption-key": true, - "x-goog-encryption-key-sha256": true, - } -) - -// v2SanitizeHeaders applies the specifications for canonical extension headers at -// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers -func v2SanitizeHeaders(hdrs []string) []string { - headerMap := map[string][]string{} - for _, hdr := range hdrs { - // No leading or trailing whitespaces. - sanitizedHeader := strings.TrimSpace(hdr) - - var header, value string - // Only keep canonical headers, discard any others. - headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) - if len(headerMatches) == 0 { - continue - } - header = headerMatches[1] - value = headerMatches[2] - - header = strings.ToLower(strings.TrimSpace(header)) - value = strings.TrimSpace(value) - - if excludedCanonicalHeaders[header] { - // Do not keep any deliberately excluded canonical headers when signing. - continue - } - - if len(value) > 0 { - // Remove duplicate headers by appending the values of duplicates - // in their order of appearance. - headerMap[header] = append(headerMap[header], value) - } - } - - var sanitizedHeaders []string - for header, values := range headerMap { - // There should be no spaces around the colon separating the header name - // from the header value or around the values themselves. The values - // should be separated by commas. - // - // NOTE: The semantics for headers without a value are not clear. - // However from specifications these should be edge-cases anyway and we - // should assume that there will be no canonical headers using empty - // values. Any such headers are discarded at the regexp stage above. - sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) - } - sort.Strings(sanitizedHeaders) - return sanitizedHeaders -} - -// v4SanitizeHeaders applies the specifications for canonical extension headers -// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. -// -// V4 does a couple things differently from V2: -// - Headers get sorted by key, instead of by key:value. We do this in -// signedURLV4. -// - There's no canonical regexp: we simply split headers on :. -// - We don't exclude canonical headers. -// - We replace leading and trailing spaces in header values, like v2, but also -// all intermediate space duplicates get stripped. That is, there's only ever -// a single consecutive space. -func v4SanitizeHeaders(hdrs []string) []string { - headerMap := map[string][]string{} - for _, hdr := range hdrs { - // No leading or trailing whitespaces. - sanitizedHeader := strings.TrimSpace(hdr) - - var key, value string - headerMatches := strings.Split(sanitizedHeader, ":") - if len(headerMatches) < 2 { - continue - } - - key = headerMatches[0] - value = headerMatches[1] - - key = strings.ToLower(strings.TrimSpace(key)) - value = strings.TrimSpace(value) - value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" "))) - value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t"))) - - if len(value) > 0 { - // Remove duplicate headers by appending the values of duplicates - // in their order of appearance. - headerMap[key] = append(headerMap[key], value) - } - } - - var sanitizedHeaders []string - for header, values := range headerMap { - // There should be no spaces around the colon separating the header name - // from the header value or around the values themselves. The values - // should be separated by commas. - // - // NOTE: The semantics for headers without a value are not clear. - // However from specifications these should be edge-cases anyway and we - // should assume that there will be no canonical headers using empty - // values. Any such headers are discarded at the regexp stage above. - sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) - } - return sanitizedHeaders -} - -// SignedURL returns a URL for the specified object. Signed URLs allow anyone -// access to a restricted resource for a limited time without needing a -// Google account or signing in. For more information about signed URLs, see -// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication -// If initializing a Storage Client, instead use the Bucket.SignedURL method -// which uses the Client's credentials to handle authentication. -func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) { - now := utcNow() - if err := validateOptions(opts, now); err != nil { - return "", err - } - - switch opts.Scheme { - case SigningSchemeV2: - opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, object, opts) - case SigningSchemeV4: - opts.Headers = v4SanitizeHeaders(opts.Headers) - return signedURLV4(bucket, object, opts, now) - default: // SigningSchemeDefault - opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, object, opts) - } -} - -func validateOptions(opts *SignedURLOptions, now time.Time) error { - if opts == nil { - return errors.New("storage: missing required SignedURLOptions") - } - if opts.GoogleAccessID == "" { - return errors.New("storage: missing required GoogleAccessID") - } - if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { - return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") - } - opts.Method = strings.ToUpper(opts.Method) - if _, ok := signedURLMethods[opts.Method]; !ok { - return errMethodNotValid - } - if opts.Expires.IsZero() { - return errors.New("storage: missing required expires option") - } - if opts.MD5 != "" { - md5, err := base64.StdEncoding.DecodeString(opts.MD5) - if err != nil || len(md5) != 16 { - return errors.New("storage: invalid MD5 checksum") - } - } - if opts.Style == nil { - opts.Style = PathStyle() - } - if _, ok := opts.Style.(pathStyle); !ok && opts.Scheme == SigningSchemeV2 { - return errors.New("storage: only path-style URLs are permitted with SigningSchemeV2") - } - if opts.Scheme == SigningSchemeV4 { - cutoff := now.Add(604801 * time.Second) // 7 days + 1 second - if !opts.Expires.Before(cutoff) { - return errors.New("storage: expires must be within seven days from now") - } - } - return nil -} - -const ( - iso8601 = "20060102T150405Z" - yearMonthDay = "20060102" -) - -// utcNow returns the current time in UTC and is a variable to allow for -// reassignment in tests to provide deterministic signed URL values. -var utcNow = func() time.Time { - return time.Now().UTC() -} - -// extractHeaderNames takes in a series of key:value headers and returns the -// header names only. -func extractHeaderNames(kvs []string) []string { - var res []string - for _, header := range kvs { - nameValue := strings.Split(header, ":") - res = append(res, nameValue[0]) - } - return res -} - -// pathEncodeV4 creates an encoded string that matches the v4 signature spec. -// Following the spec precisely is necessary in order to ensure that the URL -// and signing string are correctly formed, and Go's url.PathEncode and -// url.QueryEncode don't generate an exact match without some additional logic. -func pathEncodeV4(path string) string { - segments := strings.Split(path, "/") - var encodedSegments []string - for _, s := range segments { - encodedSegments = append(encodedSegments, url.QueryEscape(s)) - } - encodedStr := strings.Join(encodedSegments, "/") - encodedStr = strings.Replace(encodedStr, "+", "%20", -1) - return encodedStr -} - -// signedURLV4 creates a signed URL using the sigV4 algorithm. -func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%s\n", opts.Method) - - u := &url.URL{Path: opts.Style.path(bucket, name)} - u.RawPath = pathEncodeV4(u.Path) - - // Note: we have to add a / here because GCS does so auto-magically, despite - // our encoding not doing so (and we have to exactly match their - // canonical query). - fmt.Fprintf(buf, "/%s\n", u.RawPath) - - headerNames := append(extractHeaderNames(opts.Headers), "host") - if opts.ContentType != "" { - headerNames = append(headerNames, "content-type") - } - if opts.MD5 != "" { - headerNames = append(headerNames, "content-md5") - } - sort.Strings(headerNames) - signedHeaders := strings.Join(headerNames, ";") - timestamp := now.Format(iso8601) - credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay)) - canonicalQueryString := url.Values{ - "X-Goog-Algorithm": {"GOOG4-RSA-SHA256"}, - "X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)}, - "X-Goog-Date": {timestamp}, - "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, - "X-Goog-SignedHeaders": {signedHeaders}, - } - // Add user-supplied query parameters to the canonical query string. For V4, - // it's necessary to include these. - for k, v := range opts.QueryParameters { - canonicalQueryString[k] = append(canonicalQueryString[k], v...) - } - // url.Values.Encode escaping is correct, except that a space must be replaced - // by `%20` rather than `+`. - escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) - fmt.Fprintf(buf, "%s\n", escapedQuery) - - // Fill in the hostname based on the desired URL style. - u.Host = opts.Style.host(bucket) - - // Fill in the URL scheme. - if opts.Insecure { - u.Scheme = "http" - } else { - u.Scheme = "https" - } - - var headersWithValue []string - headersWithValue = append(headersWithValue, "host:"+u.Host) - headersWithValue = append(headersWithValue, opts.Headers...) - if opts.ContentType != "" { - headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) - } - if opts.MD5 != "" { - headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5) - } - // Trim extra whitespace from headers and replace with a single space. - var trimmedHeaders []string - for _, h := range headersWithValue { - trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " ")) - } - canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n") - fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) - fmt.Fprintf(buf, "%s\n", signedHeaders) - - // If the user provides a value for X-Goog-Content-SHA256, we must use - // that value in the request string. If not, we use UNSIGNED-PAYLOAD. - sha256Header := false - for _, h := range trimmedHeaders { - if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") { - sha256Header = true - fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1]) - break - } - } - if !sha256Header { - fmt.Fprint(buf, "UNSIGNED-PAYLOAD") - } - - sum := sha256.Sum256(buf.Bytes()) - hexDigest := hex.EncodeToString(sum[:]) - signBuf := &bytes.Buffer{} - fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n") - fmt.Fprintf(signBuf, "%s\n", timestamp) - fmt.Fprintf(signBuf, "%s\n", credentialScope) - fmt.Fprintf(signBuf, "%s", hexDigest) - - signBytes := opts.SignBytes - if opts.PrivateKey != nil { - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - signBytes = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - sum[:], - ) - } - } - b, err := signBytes(signBuf.Bytes()) - if err != nil { - return "", err - } - signature := hex.EncodeToString(b) - canonicalQueryString.Set("X-Goog-Signature", string(signature)) - u.RawQuery = canonicalQueryString.Encode() - return u.String(), nil -} - -// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header -// key. -func sortHeadersByKey(hdrs []string) []string { - headersMap := map[string]string{} - var headersKeys []string - for _, h := range hdrs { - parts := strings.Split(h, ":") - k := parts[0] - v := parts[1] - headersMap[k] = v - headersKeys = append(headersKeys, k) - } - sort.Strings(headersKeys) - var sorted []string - for _, k := range headersKeys { - v := headersMap[k] - sorted = append(sorted, fmt.Sprintf("%s:%s", k, v)) - } - return sorted -} - -func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { - signBytes := opts.SignBytes - if opts.PrivateKey != nil { - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - signBytes = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - sum[:], - ) - } - } - - u := &url.URL{ - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%s\n", opts.Method) - fmt.Fprintf(buf, "%s\n", opts.MD5) - fmt.Fprintf(buf, "%s\n", opts.ContentType) - fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) - if len(opts.Headers) > 0 { - fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) - } - fmt.Fprintf(buf, "%s", u.String()) - - b, err := signBytes(buf.Bytes()) - if err != nil { - return "", err - } - encoded := base64.StdEncoding.EncodeToString(b) - u.Scheme = "https" - u.Host = PathStyle().host(bucket) - q := u.Query() - q.Set("GoogleAccessId", opts.GoogleAccessID) - q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) - q.Set("Signature", string(encoded)) - u.RawQuery = q.Encode() - return u.String(), nil -} - -// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. -// Use BucketHandle.Object to get a handle. -type ObjectHandle struct { - c *Client - bucket string - object string - acl ACLHandle - gen int64 // a negative value indicates latest - conds *Conditions - encryptionKey []byte // AES-256 key - userProject string // for requester-pays buckets - readCompressed bool // Accept-Encoding: gzip - retry *retryConfig -} - -// ACL provides access to the object's access control list. -// This controls who can read and write this object. -// This call does not perform any network operations. -func (o *ObjectHandle) ACL() *ACLHandle { - return &o.acl -} - -// Generation returns a new ObjectHandle that operates on a specific generation -// of the object. -// By default, the handle operates on the latest generation. Not -// all operations work when given a specific generation; check the API -// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. -func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { - o2 := *o - o2.gen = gen - return &o2 -} - -// If returns a new ObjectHandle that applies a set of preconditions. -// Preconditions already set on the ObjectHandle are ignored. -// Operations on the new handle will return an error if the preconditions are not -// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions -// for more details. -func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { - o2 := *o - o2.conds = &conds - return &o2 -} - -// Key returns a new ObjectHandle that uses the supplied encryption -// key to encrypt and decrypt the object's contents. -// -// Encryption key must be a 32-byte AES-256 key. -// See https://cloud.google.com/storage/docs/encryption for details. -func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { - o2 := *o - o2.encryptionKey = encryptionKey - return &o2 -} - -// Attrs returns meta information about the object. -// ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") - defer func() { trace.EndSpan(ctx, err) }() - - if err := o.validate(); err != nil { - return nil, err - } - opts := makeStorageOpts(true, o.retry, o.userProject) - return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) -} - -// Update updates an object with the provided attributes. See -// ObjectAttrsToUpdate docs for details on treatment of zero values. -// ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") - defer func() { trace.EndSpan(ctx, err) }() - - if err := o.validate(); err != nil { - return nil, err - } - isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 - opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) - return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...) -} - -// BucketName returns the name of the bucket. -func (o *ObjectHandle) BucketName() string { - return o.bucket -} - -// ObjectName returns the name of the object. -func (o *ObjectHandle) ObjectName() string { - return o.object -} - -// ObjectAttrsToUpdate is used to update the attributes of an object. -// Only fields set to non-nil values will be updated. -// For all fields except CustomTime, set the field to its zero value to delete -// it. CustomTime cannot be deleted or changed to an earlier time once set. -// -// For example, to change ContentType and delete ContentEncoding and -// Metadata, use -// -// ObjectAttrsToUpdate{ -// ContentType: "text/html", -// ContentEncoding: "", -// Metadata: map[string]string{}, -// } -type ObjectAttrsToUpdate struct { - EventBasedHold optional.Bool - TemporaryHold optional.Bool - ContentType optional.String - ContentLanguage optional.String - ContentEncoding optional.String - ContentDisposition optional.String - CacheControl optional.String - CustomTime time.Time // Cannot be deleted or backdated from its current value. - Metadata map[string]string // Set to map[string]string{} to delete. - ACL []ACLRule - - // If not empty, applies a predefined set of access controls. ACL must be nil. - // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. - PredefinedACL string -} - -// Delete deletes the single specified object. -func (o *ObjectHandle) Delete(ctx context.Context) error { - if err := o.validate(); err != nil { - return err - } - // Delete is idempotent if GenerationMatch or Generation have been passed in. - // The default generation is negative to get the latest version of the object. - isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 - opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) - return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...) -} - -// ReadCompressed when true causes the read to happen without decompressing. -func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { - o2 := *o - o2.readCompressed = compressed - return &o2 -} - -// NewWriter returns a storage Writer that writes to the GCS object -// associated with this ObjectHandle. -// -// A new object will be created unless an object with this name already exists. -// Otherwise any previous object with the same name will be replaced. -// The object will not be available (and any previous object will remain) -// until Close has been called. -// -// Attributes can be set on the object by modifying the returned Writer's -// ObjectAttrs field before the first call to Write. If no ContentType -// attribute is specified, the content type will be automatically sniffed -// using net/http.DetectContentType. -// -// Note that each Writer allocates an internal buffer of size Writer.ChunkSize. -// See the ChunkSize docs for more information. -// -// It is the caller's responsibility to call Close when writing is done. To -// stop writing without saving the data, cancel the context. -func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { - return &Writer{ - ctx: ctx, - o: o, - donec: make(chan struct{}), - ObjectAttrs: ObjectAttrs{Name: o.object}, - ChunkSize: googleapi.DefaultUploadChunkSize, - } -} - -func (o *ObjectHandle) validate() error { - if o.bucket == "" { - return errors.New("storage: bucket name is empty") - } - if o.object == "" { - return errors.New("storage: object name is empty") - } - if !utf8.ValidString(o.object) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) - } - return nil -} - -// parseKey converts the binary contents of a private key file to an -// *rsa.PrivateKey. It detects whether the private key is in a PEM container or -// not. If so, it extracts the private key from PEM container before -// conversion. It only supports PEM containers with no passphrase. -func parseKey(key []byte) (*rsa.PrivateKey, error) { - if block, _ := pem.Decode(key); block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, err - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("oauth2: private key is invalid") - } - return parsed, nil -} - -// toRawObject copies the editable attributes from o to the raw library's Object type. -func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { - var ret string - if !o.RetentionExpirationTime.IsZero() { - ret = o.RetentionExpirationTime.Format(time.RFC3339) - } - var ct string - if !o.CustomTime.IsZero() { - ct = o.CustomTime.Format(time.RFC3339) - } - return &raw.Object{ - Bucket: bucket, - Name: o.Name, - EventBasedHold: o.EventBasedHold, - TemporaryHold: o.TemporaryHold, - RetentionExpirationTime: ret, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - StorageClass: o.StorageClass, - Acl: toRawObjectACL(o.ACL), - Metadata: o.Metadata, - CustomTime: ct, - } -} - -// toProtoObject copies the editable attributes from o to the proto library's Object type. -func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { - // For now, there are only globally unique buckets, and "_" is the alias - // project ID for such buckets. If the bucket is not provided, like in the - // destination ObjectAttrs of a Copy, do not attempt to format it. - if b != "" { - b = bucketResourceName(globalProjectAlias, b) - } - - return &storagepb.Object{ - Bucket: b, - Name: o.Name, - EventBasedHold: proto.Bool(o.EventBasedHold), - TemporaryHold: o.TemporaryHold, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - StorageClass: o.StorageClass, - Acl: toProtoObjectACL(o.ACL), - Metadata: o.Metadata, - CreateTime: toProtoTimestamp(o.Created), - CustomTime: toProtoTimestamp(o.CustomTime), - DeleteTime: toProtoTimestamp(o.Deleted), - RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime), - UpdateTime: toProtoTimestamp(o.Updated), - KmsKey: o.KMSKeyName, - Generation: o.Generation, - Size: o.Size, - } -} - -// toProtoObject copies the attributes to update from uattrs to the proto library's Object type. -func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object { - o := &storagepb.Object{ - Name: object, - Bucket: bucket, - } - if uattrs == nil { - return o - } - - if uattrs.EventBasedHold != nil { - o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold)) - } - if uattrs.TemporaryHold != nil { - o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - } - if uattrs.ContentType != nil { - o.ContentType = optional.ToString(uattrs.ContentType) - } - if uattrs.ContentLanguage != nil { - o.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - } - if uattrs.ContentEncoding != nil { - o.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - } - if uattrs.ContentDisposition != nil { - o.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - } - if uattrs.CacheControl != nil { - o.CacheControl = optional.ToString(uattrs.CacheControl) - } - if !uattrs.CustomTime.IsZero() { - o.CustomTime = toProtoTimestamp(uattrs.CustomTime) - } - if uattrs.ACL != nil { - o.Acl = toProtoObjectACL(uattrs.ACL) - } - - // TODO(cathyo): Handle metadata. Pending b/230510191. - - return o -} - -// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. -type ObjectAttrs struct { - // Bucket is the name of the bucket containing this GCS object. - // This field is read-only. - Bucket string - - // Name is the name of the object within the bucket. - // This field is read-only. - Name string - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentLanguage is the content language of the object's content. - ContentLanguage string - - // CacheControl is the Cache-Control header to be sent in the response - // headers when serving the object data. - CacheControl string - - // EventBasedHold specifies whether an object is under event-based hold. New - // objects created in a bucket whose DefaultEventBasedHold is set will - // default to that value. - EventBasedHold bool - - // TemporaryHold specifies whether an object is under temporary hold. While - // this flag is set to true, the object is protected against deletion and - // overwrites. - TemporaryHold bool - - // RetentionExpirationTime is a server-determined value that specifies the - // earliest time that the object's retention period expires. - // This is a read-only field. - RetentionExpirationTime time.Time - - // ACL is the list of access control rules for the object. - ACL []ACLRule - - // If not empty, applies a predefined set of access controls. It should be set - // only when writing, copying or composing an object. When copying or composing, - // it acts as the destinationPredefinedAcl parameter. - // PredefinedACL is always empty for ObjectAttrs returned from the service. - // See https://cloud.google.com/storage/docs/json_api/v1/objects/insert - // for valid values. - PredefinedACL string - - // Owner is the owner of the object. This field is read-only. - // - // If non-zero, it is in the form of "user-". - Owner string - - // Size is the length of the object's content. This field is read-only. - Size int64 - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // ContentDisposition is the optional Content-Disposition header of the object - // sent in the response headers. - ContentDisposition string - - // MD5 is the MD5 hash of the object's content. This field is read-only, - // except when used from a Writer. If set on a Writer, the uploaded - // data is rejected if its MD5 hash does not match this field. - MD5 []byte - - // CRC32C is the CRC32 checksum of the object's content using the Castagnoli93 - // polynomial. This field is read-only, except when used from a Writer or - // Composer. In those cases, if the SendCRC32C field in the Writer or Composer - // is set to is true, the uploaded data is rejected if its CRC32C hash does - // not match this field. - // - // Note: For a Writer, SendCRC32C must be set to true BEFORE the first call to - // Writer.Write() in order to send the checksum. - CRC32C uint32 - - // MediaLink is an URL to the object's content. This field is read-only. - MediaLink string - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if no metadata is provided. - // - // For object downloads using Reader, metadata keys are sent as headers. - // Therefore, avoid setting metadata keys using characters that are not valid - // for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6. - Metadata map[string]string - - // Generation is the generation number of the object's content. - // This field is read-only. - Generation int64 - - // Metageneration is the version of the metadata for this - // object at this generation. This field is used for preconditions - // and for detecting changes in metadata. A metageneration number - // is only meaningful in the context of a particular generation - // of a particular object. This field is read-only. - Metageneration int64 - - // StorageClass is the storage class of the object. This defines - // how objects are stored and determines the SLA and the cost of storage. - // Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". - // Defaults to "STANDARD". - // See https://cloud.google.com/storage/docs/storage-classes for all - // valid values. - StorageClass string - - // Created is the time the object was created. This field is read-only. - Created time.Time - - // Deleted is the time the object was deleted. - // If not deleted, it is the zero value. This field is read-only. - Deleted time.Time - - // Updated is the creation or modification time of the object. - // For buckets with versioning enabled, changing an object's - // metadata does not change this property. This field is read-only. - Updated time.Time - - // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the - // customer-supplied encryption key for the object. It is empty if there is - // no customer-supplied encryption key. - // See // https://cloud.google.com/storage/docs/encryption for more about - // encryption in Google Cloud Storage. - CustomerKeySHA256 string - - // Cloud KMS key name, in the form - // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, - // if the object is encrypted by such a key. - // - // Providing both a KMSKeyName and a customer-supplied encryption key (via - // ObjectHandle.Key) will result in an error when writing an object. - KMSKeyName string - - // Prefix is set only for ObjectAttrs which represent synthetic "directory - // entries" when iterating over buckets using Query.Delimiter. See - // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be - // populated. - Prefix string - - // Etag is the HTTP/1.1 Entity tag for the object. - // This field is read-only. - Etag string - - // A user-specified timestamp which can be applied to an object. This is - // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime - // LifecycleConditions to manage object lifecycles. - // - // CustomTime cannot be removed once set on an object. It can be updated to a - // later value but not to an earlier one. For more information see - // https://cloud.google.com/storage/docs/metadata#custom-time . - CustomTime time.Time - - // ComponentCount is the number of objects contained within a composite object. - // For non-composite objects, the value will be zero. - // This field is read-only. - ComponentCount int64 -} - -// convertTime converts a time in RFC3339 format to time.Time. -// If any error occurs in parsing, the zero-value time.Time is silently returned. -func convertTime(t string) time.Time { - var r time.Time - if t != "" { - r, _ = time.Parse(time.RFC3339, t) - } - return r -} - -func convertProtoTime(t *timestamppb.Timestamp) time.Time { - var r time.Time - if t != nil { - r = t.AsTime() - } - return r -} - -func toProtoTimestamp(t time.Time) *timestamppb.Timestamp { - if t.IsZero() { - return nil - } - - return timestamppb.New(t) -} - -func newObject(o *raw.Object) *ObjectAttrs { - if o == nil { - return nil - } - owner := "" - if o.Owner != nil { - owner = o.Owner.Entity - } - md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) - crc32c, _ := decodeUint32(o.Crc32c) - var sha256 string - if o.CustomerEncryption != nil { - sha256 = o.CustomerEncryption.KeySha256 - } - return &ObjectAttrs{ - Bucket: o.Bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - EventBasedHold: o.EventBasedHold, - TemporaryHold: o.TemporaryHold, - RetentionExpirationTime: convertTime(o.RetentionExpirationTime), - ACL: toObjectACLRules(o.Acl), - Owner: owner, - ContentEncoding: o.ContentEncoding, - ContentDisposition: o.ContentDisposition, - Size: int64(o.Size), - MD5: md5, - CRC32C: crc32c, - MediaLink: o.MediaLink, - Metadata: o.Metadata, - Generation: o.Generation, - Metageneration: o.Metageneration, - StorageClass: o.StorageClass, - CustomerKeySHA256: sha256, - KMSKeyName: o.KmsKeyName, - Created: convertTime(o.TimeCreated), - Deleted: convertTime(o.TimeDeleted), - Updated: convertTime(o.Updated), - Etag: o.Etag, - CustomTime: convertTime(o.CustomTime), - ComponentCount: o.ComponentCount, - } -} - -func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { - if o == nil { - return nil - } - return &ObjectAttrs{ - Bucket: parseBucketName(o.Bucket), - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - EventBasedHold: o.GetEventBasedHold(), - TemporaryHold: o.TemporaryHold, - RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()), - ACL: toObjectACLRulesFromProto(o.GetAcl()), - Owner: o.GetOwner().GetEntity(), - ContentEncoding: o.ContentEncoding, - ContentDisposition: o.ContentDisposition, - Size: int64(o.Size), - MD5: o.GetChecksums().GetMd5Hash(), - CRC32C: o.GetChecksums().GetCrc32C(), - Metadata: o.Metadata, - Generation: o.Generation, - Metageneration: o.Metageneration, - StorageClass: o.StorageClass, - // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. - CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), - KMSKeyName: o.GetKmsKey(), - Created: convertProtoTime(o.GetCreateTime()), - Deleted: convertProtoTime(o.GetDeleteTime()), - Updated: convertProtoTime(o.GetUpdateTime()), - CustomTime: convertProtoTime(o.GetCustomTime()), - ComponentCount: int64(o.ComponentCount), - } -} - -// Decode a uint32 encoded in Base64 in big-endian byte order. -func decodeUint32(b64 string) (uint32, error) { - d, err := base64.StdEncoding.DecodeString(b64) - if err != nil { - return 0, err - } - if len(d) != 4 { - return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) - } - return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil -} - -// Encode a uint32 as Base64 in big-endian byte order. -func encodeUint32(u uint32) string { - b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} - return base64.StdEncoding.EncodeToString(b) -} - -// Projection is enumerated type for Query.Projection. -type Projection int - -const ( - // ProjectionDefault returns all fields of objects. - ProjectionDefault Projection = iota - - // ProjectionFull returns all fields of objects. - ProjectionFull - - // ProjectionNoACL returns all fields of objects except for Owner and ACL. - ProjectionNoACL -) - -func (p Projection) String() string { - switch p { - case ProjectionFull: - return "full" - case ProjectionNoACL: - return "noAcl" - default: - return "" - } -} - -// Query represents a query to filter objects from a bucket. -type Query struct { - // Delimiter returns results in a directory-like fashion. - // Results will contain only objects whose names, aside from the - // prefix, do not contain delimiter. Objects whose names, - // aside from the prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in prefixes. - // Duplicate prefixes are omitted. - // Optional. - Delimiter string - - // Prefix is the prefix filter to query objects - // whose names begin with this prefix. - // Optional. - Prefix string - - // Versions indicates whether multiple versions of the same - // object will be included in the results. - Versions bool - - // attrSelection is used to select only specific fields to be returned by - // the query. It is set by the user calling calling SetAttrSelection. These - // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON - // clients repsectively. - attrSelection []string - - // StartOffset is used to filter results to objects whose names are - // lexicographically equal to or after startOffset. If endOffset is also set, - // the objects listed will have names between startOffset (inclusive) and - // endOffset (exclusive). - StartOffset string - - // EndOffset is used to filter results to objects whose names are - // lexicographically before endOffset. If startOffset is also set, the objects - // listed will have names between startOffset (inclusive) and endOffset (exclusive). - EndOffset string - - // Projection defines the set of properties to return. It will default to ProjectionFull, - // which returns all properties. Passing ProjectionNoACL will omit Owner and ACL, - // which may improve performance when listing many objects. - Projection Projection - - // IncludeTrailingDelimiter controls how objects which end in a single - // instance of Delimiter (for example, if Query.Delimiter = "/" and the - // object name is "foo/bar/") are included in the results. By default, these - // objects only show up as prefixes. If IncludeTrailingDelimiter is set to - // true, they will also be included as objects and their metadata will be - // populated in the returned ObjectAttrs. - IncludeTrailingDelimiter bool -} - -// attrToFieldMap maps the field names of ObjectAttrs to the underlying field -// names in the API call. Only the ObjectAttrs field names are visible to users -// because they are already part of the public API of the package. -var attrToFieldMap = map[string]string{ - "Bucket": "bucket", - "Name": "name", - "ContentType": "contentType", - "ContentLanguage": "contentLanguage", - "CacheControl": "cacheControl", - "EventBasedHold": "eventBasedHold", - "TemporaryHold": "temporaryHold", - "RetentionExpirationTime": "retentionExpirationTime", - "ACL": "acl", - "Owner": "owner", - "ContentEncoding": "contentEncoding", - "ContentDisposition": "contentDisposition", - "Size": "size", - "MD5": "md5Hash", - "CRC32C": "crc32c", - "MediaLink": "mediaLink", - "Metadata": "metadata", - "Generation": "generation", - "Metageneration": "metageneration", - "StorageClass": "storageClass", - "CustomerKeySHA256": "customerEncryption", - "KMSKeyName": "kmsKeyName", - "Created": "timeCreated", - "Deleted": "timeDeleted", - "Updated": "updated", - "Etag": "etag", - "CustomTime": "customTime", - "ComponentCount": "componentCount", -} - -// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field -// names in the protobuf Object message. -var attrToProtoFieldMap = map[string]string{ - "Name": "name", - "Bucket": "bucket", - "Etag": "etag", - "Generation": "generation", - "Metageneration": "metageneration", - "StorageClass": "storage_class", - "Size": "size", - "ContentEncoding": "content_encoding", - "ContentDisposition": "content_disposition", - "CacheControl": "cache_control", - "ACL": "acl", - "ContentLanguage": "content_language", - "Deleted": "delete_time", - "ContentType": "content_type", - "Created": "create_time", - "CRC32C": "checksums.crc32c", - "MD5": "checksums.md5_hash", - "Updated": "update_time", - "KMSKeyName": "kms_key", - "TemporaryHold": "temporary_hold", - "RetentionExpirationTime": "retention_expire_time", - "Metadata": "metadata", - "EventBasedHold": "event_based_hold", - "Owner": "owner", - "CustomerKeySHA256": "customer_encryption", - "CustomTime": "custom_time", - "ComponentCount": "component_count", - // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. - // "MediaLink": "mediaLink", -} - -// SetAttrSelection makes the query populate only specific attributes of -// objects. When iterating over objects, if you only need each object's name -// and size, pass []string{"Name", "Size"} to this method. Only these fields -// will be fetched for each object across the network; the other fields of -// ObjectAttr will remain at their default values. This is a performance -// optimization; for more information, see -// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance -func (q *Query) SetAttrSelection(attrs []string) error { - // Validate selections. - for _, attr := range attrs { - // If the attr is acceptable for one of the two sets, then it is OK. - // If it is not acceptable for either, then return an error. - // The respective masking implementations ignore unknown attrs which - // makes switching between transports a little easier. - _, okJSON := attrToFieldMap[attr] - _, okGRPC := attrToProtoFieldMap[attr] - - if !okJSON && !okGRPC { - return fmt.Errorf("storage: attr %v is not valid", attr) - } - } - - q.attrSelection = attrs - - return nil -} - -func (q *Query) toFieldSelection() string { - if q == nil || len(q.attrSelection) == 0 { - return "" - } - fieldSet := make(map[string]bool) - - for _, attr := range q.attrSelection { - field, ok := attrToFieldMap[attr] - if !ok { - // Future proofing, skip unknown fields, let SetAttrSelection handle - // error modes. - continue - } - fieldSet[field] = true - } - - var s string - if len(fieldSet) > 0 { - var b bytes.Buffer - b.WriteString("prefixes,items(") - first := true - for field := range fieldSet { - if !first { - b.WriteString(",") - } - first = false - b.WriteString(field) - } - b.WriteString(")") - s = b.String() - } - return s -} - -func (q *Query) toFieldMask() *fieldmaskpb.FieldMask { - // The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull). - if q == nil { - return &fieldmaskpb.FieldMask{Paths: []string{"*"}} - } - - // User selected attributes via q.SetAttrSeleciton. This takes precedence - // over the Projection. - if numSelected := len(q.attrSelection); numSelected > 0 { - protoFieldPaths := make([]string, 0, numSelected) - - for _, attr := range q.attrSelection { - pf, ok := attrToProtoFieldMap[attr] - if !ok { - // Future proofing, skip unknown fields, let SetAttrSelection - // handle error modes. - continue - } - protoFieldPaths = append(protoFieldPaths, pf) - } - - return &fieldmaskpb.FieldMask{Paths: protoFieldPaths} - } - - // ProjectDefault == ProjectionFull which means all fields. - fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}} - if q.Projection == ProjectionNoACL { - paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields - for _, f := range attrToProtoFieldMap { - // Skip the acl and owner fields for "NoACL". - if f == "acl" || f == "owner" { - continue - } - paths = append(paths, f) - } - fm.Paths = paths - } - - return fm -} - -// Conditions constrain methods to act on specific generations of -// objects. -// -// The zero value is an empty set of constraints. Not all conditions or -// combinations of conditions are applicable to all methods. -// See https://cloud.google.com/storage/docs/generations-preconditions -// for details on how these operate. -type Conditions struct { - // Generation constraints. - // At most one of the following can be set to a non-zero value. - - // GenerationMatch specifies that the object must have the given generation - // for the operation to occur. - // If GenerationMatch is zero, it has no effect. - // Use DoesNotExist to specify that the object does not exist in the bucket. - GenerationMatch int64 - - // GenerationNotMatch specifies that the object must not have the given - // generation for the operation to occur. - // If GenerationNotMatch is zero, it has no effect. - GenerationNotMatch int64 - - // DoesNotExist specifies that the object must not exist in the bucket for - // the operation to occur. - // If DoesNotExist is false, it has no effect. - DoesNotExist bool - - // Metadata generation constraints. - // At most one of the following can be set to a non-zero value. - - // MetagenerationMatch specifies that the object must have the given - // metageneration for the operation to occur. - // If MetagenerationMatch is zero, it has no effect. - MetagenerationMatch int64 - - // MetagenerationNotMatch specifies that the object must not have the given - // metageneration for the operation to occur. - // If MetagenerationNotMatch is zero, it has no effect. - MetagenerationNotMatch int64 -} - -func (c *Conditions) validate(method string) error { - if *c == (Conditions{}) { - return fmt.Errorf("storage: %s: empty conditions", method) - } - if !c.isGenerationValid() { - return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) - } - if !c.isMetagenerationValid() { - return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) - } - return nil -} - -func (c *Conditions) isGenerationValid() bool { - n := 0 - if c.GenerationMatch != 0 { - n++ - } - if c.GenerationNotMatch != 0 { - n++ - } - if c.DoesNotExist { - n++ - } - return n <= 1 -} - -func (c *Conditions) isMetagenerationValid() bool { - return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 -} - -// applyConds modifies the provided call using the conditions in conds. -// call is something that quacks like a *raw.WhateverCall. -func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { - cval := reflect.ValueOf(call) - if gen >= 0 { - if !setConditionField(cval, "Generation", gen) { - return fmt.Errorf("storage: %s: generation not supported", method) - } - } - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { - return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) - } - case conds.GenerationNotMatch != 0: - if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { - return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) - } - case conds.DoesNotExist: - if !setConditionField(cval, "IfGenerationMatch", int64(0)) { - return fmt.Errorf("storage: %s: DoesNotExist not supported", method) - } - } - switch { - case conds.MetagenerationMatch != 0: - if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { - if gen >= 0 { - call.SourceGeneration(gen) - } - if conds == nil { - return nil - } - if err := conds.validate("CopyTo source"); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - call.IfSourceGenerationMatch(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) - } - return nil -} - -func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { - if gen >= 0 { - call.SourceGeneration = gen - } - if conds == nil { - return nil - } - if err := conds.validate("CopyTo source"); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch = proto.Int64(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) - } - return nil -} - -// setConditionField sets a field on a *raw.WhateverCall. -// We can't use anonymous interfaces because the return type is -// different, since the field setters are builders. -func setConditionField(call reflect.Value, name string, value interface{}) bool { - m := call.MethodByName(name) - if !m.IsValid() { - return false - } - m.Call([]reflect.Value{reflect.ValueOf(value)}) - return true -} - -// Retryer returns an object handle that is configured with custom retry -// behavior as specified by the options that are passed to it. All operations -// on the new handle will use the customized retry configuration. -// These retry options will merge with the bucket's retryer (if set) for the -// returned handle. Options passed into this method will take precedence over -// retry options on the bucket and client. Note that you must explicitly pass in -// each option you want to override. -func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle { - o2 := *o - var retry *retryConfig - if o.retry != nil { - // merge the options with the existing retry - retry = o.retry - } else { - retry = &retryConfig{} - } - for _, opt := range opts { - opt.apply(retry) - } - o2.retry = retry - o2.acl.retry = retry - return &o2 -} - -// SetRetry configures the client with custom retry behavior as specified by the -// options that are passed to it. All operations using this client will use the -// customized retry configuration. -// This should be called once before using the client for network operations, as -// there could be indeterminate behaviour with operations in progress. -// Retry options set on a bucket or object handle will take precedence over -// these options. -func (c *Client) SetRetry(opts ...RetryOption) { - var retry *retryConfig - if c.retry != nil { - // merge the options with the existing retry - retry = c.retry - } else { - retry = &retryConfig{} - } - for _, opt := range opts { - opt.apply(retry) - } - c.retry = retry -} - -// RetryOption allows users to configure non-default retry behavior for API -// calls made to GCS. -type RetryOption interface { - apply(config *retryConfig) -} - -// WithBackoff allows configuration of the backoff timing used for retries. -// Available configuration options (Initial, Max and Multiplier) are described -// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields -// are not supplied by the user, gax default values will be used. -func WithBackoff(backoff gax.Backoff) RetryOption { - return &withBackoff{ - backoff: backoff, - } -} - -type withBackoff struct { - backoff gax.Backoff -} - -func (wb *withBackoff) apply(config *retryConfig) { - config.backoff = &wb.backoff -} - -// RetryPolicy describes the available policies for which operations should be -// retried. The default is `RetryIdempotent`. -type RetryPolicy int - -const ( - // RetryIdempotent causes only idempotent operations to be retried when the - // service returns a transient error. Using this policy, fully idempotent - // operations (such as `ObjectHandle.Attrs()`) will always be retried. - // Conditionally idempotent operations (for example `ObjectHandle.Update()`) - // will be retried only if the necessary conditions have been supplied (in - // the case of `ObjectHandle.Update()` this would mean supplying a - // `Conditions.MetagenerationMatch` condition is required). - RetryIdempotent RetryPolicy = iota - - // RetryAlways causes all operations to be retried when the service returns a - // transient error, regardless of idempotency considerations. - RetryAlways - - // RetryNever causes the client to not perform retries on failed operations. - RetryNever -) - -// WithPolicy allows the configuration of which operations should be performed -// with retries for transient errors. -func WithPolicy(policy RetryPolicy) RetryOption { - return &withPolicy{ - policy: policy, - } -} - -type withPolicy struct { - policy RetryPolicy -} - -func (ws *withPolicy) apply(config *retryConfig) { - config.policy = ws.policy -} - -// WithErrorFunc allows users to pass a custom function to the retryer. Errors -// will be retried if and only if `shouldRetry(err)` returns true. -// By default, the following errors are retried (see ShouldRetry for the default -// function): -// -// - HTTP responses with codes 408, 429, 502, 503, and 504. -// -// - Transient network errors such as connection reset and io.ErrUnexpectedEOF. -// -// - Errors which are considered transient using the Temporary() interface. -// -// - Wrapped versions of these errors. -// -// This option can be used to retry on a different set of errors than the -// default. Users can use the default ShouldRetry function inside their custom -// function if they only want to make minor modifications to default behavior. -func WithErrorFunc(shouldRetry func(err error) bool) RetryOption { - return &withErrorFunc{ - shouldRetry: shouldRetry, - } -} - -type withErrorFunc struct { - shouldRetry func(err error) bool -} - -func (wef *withErrorFunc) apply(config *retryConfig) { - config.shouldRetry = wef.shouldRetry -} - -type retryConfig struct { - backoff *gax.Backoff - policy RetryPolicy - shouldRetry func(err error) bool -} - -func (r *retryConfig) clone() *retryConfig { - if r == nil { - return nil - } - - var bo *gax.Backoff - if r.backoff != nil { - bo = &gax.Backoff{ - Initial: r.backoff.Initial, - Max: r.backoff.Max, - Multiplier: r.backoff.Multiplier, - } - } - - return &retryConfig{ - backoff: bo, - policy: r.policy, - shouldRetry: r.shouldRetry, - } -} - -// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods -// that modifyCall searches for by name. -type composeSourceObj struct { - src *raw.ComposeRequestSourceObjects -} - -func (c composeSourceObj) Generation(gen int64) { - c.src.Generation = gen -} - -func (c composeSourceObj) IfGenerationMatch(gen int64) { - // It's safe to overwrite ObjectPreconditions, since its only field is - // IfGenerationMatch. - c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ - IfGenerationMatch: gen, - } -} - -func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { - if key == nil { - return nil - } - // TODO(jbd): Ask the API team to return a more user-friendly error - // and avoid doing this check at the client level. - if len(key) != 32 { - return errors.New("storage: not a 32-byte AES-256 key") - } - var cs string - if copySource { - cs = "copy-source-" - } - headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm) - headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) - keyHash := sha256.Sum256(key) - headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) - return nil -} - -// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams. -func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams { - if key == nil { - return nil - } - keyHash := sha256.Sum256(key) - return &storagepb.CommonObjectRequestParams{ - EncryptionAlgorithm: aes256Algorithm, - EncryptionKeyBytes: key, - EncryptionKeySha256Bytes: keyHash[:], - } -} - -func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChecksums { - var checksums *storagepb.ObjectChecksums - if sendCRC32C { - checksums = &storagepb.ObjectChecksums{ - Crc32C: proto.Uint32(attrs.CRC32C), - } - } - if len(attrs.MD5) != 0 { - if checksums == nil { - checksums = &storagepb.ObjectChecksums{ - Md5Hash: attrs.MD5, - } - } else { - checksums.Md5Hash = attrs.MD5 - } - } - return checksums -} - -// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. -func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { - o := makeStorageOpts(true, c.retry, "") - return c.tc.GetServiceAccount(ctx, projectID, o...) - -} - -// bucketResourceName formats the given project ID and bucketResourceName ID -// into a Bucket resource name. This is the format necessary for the gRPC API as -// it conforms to the Resource-oriented design practices in https://google.aip.dev/121. -func bucketResourceName(p, b string) string { - return fmt.Sprintf("projects/%s/buckets/%s", p, b) -} - -// parseBucketName strips the leading resource path segment and returns the -// bucket ID, which is the simple Bucket name typical of the v1 API. -func parseBucketName(b string) string { - sep := strings.LastIndex(b, "/") - return b[sep+1:] -} - -// parseProjectNumber consume the given resource name and parses out the project -// number if one is present i.e. it is not a project ID. -func parseProjectNumber(r string) uint64 { - projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`) - if matches := projectID.FindStringSubmatch(r); len(matches) > 0 { - // Capture group follows the matched segment. For example: - // input: projects/123/bars/456 - // output: [projects/123/, 123] - number, err := strconv.ParseUint(matches[1], 10, 64) - if err != nil { - return 0 - } - return number - } - - return 0 -} - -// toProjectResource accepts a project ID and formats it as a Project resource -// name. -func toProjectResource(project string) string { - return fmt.Sprintf("projects/%s", project) -} - -// setConditionProtoField uses protobuf reflection to set named condition field -// to the given condition value if supported on the protobuf message. -// -// This is an experimental API and not intended for public use. -func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { - fields := m.Descriptor().Fields() - if rf := fields.ByName(protoreflect.Name(f)); rf != nil { - m.Set(rf, protoreflect.ValueOfInt64(v)) - return true - } - - return false -} - -// applyCondsProto validates and attempts to set the conditions on a protobuf -// message using protobuf reflection. -// -// This is an experimental API and not intended for public use. -func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { - rmsg := msg.ProtoReflect() - - if gen >= 0 { - if !setConditionProtoField(rmsg, "generation", gen) { - return fmt.Errorf("storage: %s: generation not supported", method) - } - } - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - - switch { - case conds.GenerationMatch != 0: - if !setConditionProtoField(rmsg, "if_generation_match", conds.GenerationMatch) { - return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) - } - case conds.GenerationNotMatch != 0: - if !setConditionProtoField(rmsg, "if_generation_not_match", conds.GenerationNotMatch) { - return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) - } - case conds.DoesNotExist: - if !setConditionProtoField(rmsg, "if_generation_match", int64(0)) { - return fmt.Errorf("storage: %s: DoesNotExist not supported", method) - } - } - switch { - case conds.MetagenerationMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} diff --git a/vendor/cloud.google.com/go/storage/storage.replay b/vendor/cloud.google.com/go/storage/storage.replay deleted file mode 100644 index 07ed1bfaf3..0000000000 --- a/vendor/cloud.google.com/go/storage/storage.replay +++ /dev/null @@ -1,30067 +0,0 @@ -{ - "Initial": "IjIwMTktMDUtMDJUMjI6MjM6NTMuNDAzNDMyMDEzWiI=", - "Version": "0.2", - "Converter": { - "ClearHeaders": [ - "^X-Goog-.*Encryption-Key$" - ], - "RemoveRequestHeaders": [ - "^Authorization$", - "^Proxy-Authorization$", - "^Connection$", - "^Content-Type$", - "^Date$", - "^Host$", - "^Transfer-Encoding$", - "^Via$", - "^X-Forwarded-.*$", - "^X-Cloud-Trace-Context$", - "^X-Goog-Api-Client$", - "^X-Google-.*$", - "^X-Gfe-.*$" - ], - "RemoveResponseHeaders": [ - "^X-Google-.*$", - "^X-Gfe-.*$" - ], - "ClearParams": null, - "RemoveParams": null - }, - "Entries": [ - { - "ID": "f5f231bed6e14b7f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:54 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrZvgYBWgsCwPaGI9bo1ccC0WCBc8kJgydTwioDtXR9xps4HiDoKXI-vjYUl876SMqF0JhmhaEBgvxrIL9Y989YCFrH65xGys_r1JbPdi9M9N0kS3M" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "9a9914424ef59619", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:55 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqFvRYrCleVqpn0QshSvzW5I1-8o7N6vGYh8o5G1f-AHnsX2N_x-NKJrvlxnXqm9auw5gMoWFaJTSTtKL5y85WlQ_eAjmmlrkD4tbHYBZJ386xgaZw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "17f2abbdd781a33b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:55 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:23:55 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoYoTmTG5mxpFGPvmECUTlGMlQwhfmqGsZtBtZ9xV89Pw3q-p5BBeX_3imdofr_7EBT7nBm4v5alpg45Zi8a8ET28qBH2xfNe4n15HR-1fhGou2wQU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "6752f5a9a036af11", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur4bFsk4ylv96GsTkuDKG--hVaCR_UEhZ_fAzMGt5Eu5ZKncHOLjU_f2PcNP9saFGW-UkH9jXwt_nuR0G2zXOBjMJmLdd7Ml61bGMMrJeVa0OtcGpM" - ] - }, - "Body": "" - } - }, - { - "ID": "9c25646df7aacad9", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "543" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "926" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq6CjN9PjjzT3LmHxW_tU_ciQ1rahetoQGbX_gX8EC5il7tPJi2yxi5VZxnDNrp1h14b7Ix8tnvtkHufAyO1-lMRutdHK5GSzonff78Nm6KPAKN5fU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "f795b9adcb1b546e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2872" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up2-mWQyRDbFSpF6U96vQpaBYr74NgiUWh3-KZnLWaFYnhQti1tgKWNtL15YgK8blaRSnzGeACPA6jNuM34yhr7bxztrdN2tobEQAzD5RVgzpqx14w" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "2ee3f84c4e4045fb", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:23:57 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UplJEr-Hxa3hDFT5ozLEHYhHfaxlYFpc9Vwm8AL831-w_7BBgxHjjEsU8Br_uLnLes0h9hz37iuE9V8uVZ2liHY7ZD4piNH31oyapjCtwyXrukIP94" - ] - }, - "Body": "" - } - }, - { - "ID": "16f19dbf8e206756", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:57 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:23:57 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqvp5XjvB8nhNuz-bTeN9OklTfiBGldYKkcY13JF6oUfpV0z_jwoEQD3B3Ss3wWpaSmZfePjo7fkkr-hP3jbrUazNHaqQliiHqOBSNmSoPmwJpzfOI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "949f5ce411d6f672", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:58 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpVeVcmmuUyOt3Hbja89_Ewi6GRsJtRduqK93OT4Ys1aK5GqDWeGxyDbczUyRLeUYvZgtJzYLwVOOUszqAF4ipSXgZ1L_byd9cJ7ttVfQ_ceQBXxY4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "b75303fbdafb66d0", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "64" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2493" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:58 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqDnNlC3m95GplHjE79aqzhtwgfJCWQjHaCFG4i7qmTFliz2gdE4OiOnKAPIoNqxEngE35065YXNYA65aMSSeEluDKmQ__rJXcS_DpRdoYP4rZIyPo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OC40MzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxhYmVscyI6eyJlbXB0eSI6IiIsImwxIjoidjEifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "831805b62d969707", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "93" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiYWJzZW50IjpudWxsLCJlbXB0eSI6bnVsbCwibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2495" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:59 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq8yNb3V9Kq8zPa_pdVrJIYv83v4fu6xAHwktfTz_Cy4K1rpi8xrDzYmw5wICaazfMcAiYPhM8r4Y6WkeOyeCRInvkJ6ndduhNN_dgu1U59uI5F3Qc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS4wMzJaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "8a816d061fe9e7e0", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "77" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2570" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:59 GMT" - ], - "Etag": [ - "CAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrbjywEmDPkqxln7-Nx_8ngRxoWvncfCx1fGVpPZGEjjmg8OJgv0uaczxapjlNeEcvMnqWI_RVzG6_588QaO8nCnPVnE2kkIek3D_t6UNL9CCPYLRc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS41MzBaIiwibWV0YWdlbmVyYXRpb24iOiI0IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FRPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVE9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBUT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVE9In0=" - } - }, - { - "ID": "6a60397ebae323e7", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiYnVja2V0UG9saWN5T25seSJ9Cg==", - "dGVzdA==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3305" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:00 GMT" - ], - "Etag": [ - "CPmu4bnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UokOvJnDKKHAQOa8mJLrxFpWWvQ2U_BC_3uI0Z4x870Q068evHio_t_YudbSq614h77-ofhBsyHpoknWnm_YrnXxkHzopreKoMBykFIcbsSB8TDKEE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRQb2xpY3lPbmx5LzE1NTY4MzU4NDAwNTUxNjEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5IiwibmFtZSI6ImJ1Y2tldFBvbGljeU9ubHkiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMC4wNTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDAuMDU0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAwLjA1NFoiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJDWTlyelVZaDAzUEszazZESmllMDlnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seT9nZW5lcmF0aW9uPTE1NTY4MzU4NDAwNTUxNjEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJocUJ5d0E9PSIsImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0=" - } - }, - { - "ID": "32d392d1da32f27b", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl/user-test%40example.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "111" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:00 GMT" - ], - "Etag": [ - "CPmu4bnx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoau0xgFp6ib5wM0bBWjRlklvDRPOu0VZ-LFCeENUWXutmkXSfgUbtr2Nuefb7Pm_yLvCNqtB9B6k_N1V7AlvkN4_JEz67ZSXQ_sAD5L1teQIpGiqA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InVzZXItdGVzdEBleGFtcGxlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9" - } - }, - { - "ID": "6e3d0eda38a3ab6e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "59" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6dHJ1ZX19fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "663" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:01 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpSPFJHLzusxOr_OvhStJlWEpOs2EuthMO0Ys6pS9bsQeP0fthp_VUZfa8_sN8TX6PJYpxIdFlxB2QUaIujot1cUrssoU74XFrAwoqhlmiE5y9Aw-w" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMS4yNDJaIiwibWV0YWdlbmVyYXRpb24iOiI1IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOnRydWUsImxvY2tlZFRpbWUiOiIyMDE5LTA3LTMxVDIyOjI0OjAxLjIzMFoifX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FVPSJ9" - } - }, - { - "ID": "8f8dbb687dc49edb", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13230" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:01 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:01 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpGpW2zLlN7nAgV5IVkKU3kx4QWHCkzAgMa-QPC1PyCol8CP9W605bMUmFMeerbR4enzmeNMvtb4a2HzBPUZ296YfGdtkt_6Guq82E226xzC5TPq4w" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"invalid","message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.INVALID_VALUE, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=INVALID_VALUE, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., unnamedArguments=[]}, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., reason=invalid, rpcCode=400} Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only."}}" - } - }, - { - "ID": "42ce6421b2c9fae4", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13358" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqJwxja3nYzWYbg_I5gWvOiow2ORuo8tNA-_Vzw7DX_YVhhb6_p1giUk3WjUHWt-lyDA13adhPGi4BDfIXzQyf-ZL3lsHoa2sNm29BIqRznw4mkAdE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., unnamedArguments=[]}, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., reason=forbidden, rpcCode=403} another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly."}}" - } - }, - { - "ID": "0ca0bddf513110f4", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "45" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnt9fX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "624" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Etag": [ - "CAY=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpdDWPDU9-gPzHEieE0Rqx_40yf8fJLhwAP6fVsdS4F7I7sWj0h-Ti7VoDWciZgI_lgNUB7qyh08wjTAxrTLTsSiIYt2GR6ksxhDRupMoPWni5kXmE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FZPSJ9" - } - }, - { - "ID": "8a8bee740102593d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2964" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Etag": [ - "CPmu4bnx/eECEAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq59Mf8Ea4fgpDnUzupeIP3bGt3VpyI6HjL4KJtDKAD_h-Ua-AJSX3u3x4TCsx2MZcIVhMs9pW9SWsrIcsvsr3kGt2Je9W87LElbN5dlw02EItBcR4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJidWNrZXRQb2xpY3lPbmx5IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDAwNTUxNjEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwiZXRhZyI6IkNQbXU0Ym54L2VFQ0VBST0ifV19" - } - }, - { - "ID": "91ba2c22c5f5de9a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrwotKay181GHzZsWfp6BzJA4FDOIfK2s1WlzB9p8QsIEX42AtvMhkgLWqSIyEhb-MSv9snqx0WRwUs5sDN3_5NVoFTzQeLkDBR9mbsixI-udc8SLI" - ] - }, - "Body": "" - } - }, - { - "ID": "43fec6c3a8c8cb63", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY29uZGRlbCJ9Cg==", - "Zm9v" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3161" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Etag": [ - "CNHLq7vx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrrYnzZRz4GFV3BsHoF-vv9v2Lc13Wp6-P5iowSZFjqyykVhYZ6CkesIVxA2v2xNCbVeWgCBXCFFt9fje73cis1cECwwqrYLr5QF5bZCy4TsJ-LT8k" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb25kZGVsLzE1NTY4MzU4NDMzNjg0MDEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsIiwibmFtZSI6ImNvbmRkZWwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMy4zNjhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDMuMzY4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAzLjM2OFoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbD9nZW5lcmF0aW9uPTE1NTY4MzU4NDMzNjg0MDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbmRkZWwvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbmRkZWwvMTU1NjgzNTg0MzM2ODQwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0=" - } - }, - { - "ID": "ba03d9efb1402903", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368400\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12249" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-mcvaL-eBgBoviwBg_r8LyEEK3JNyFaj0cFy2neOMo1pVkWpkGQ-3gz8vQNtAGoX-Q7_CMYLNv_I0pOoy5iRr-MdYKny5ICdC1s3ji5DwL7sqmn0" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/conddel: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel"}}" - } - }, - { - "ID": "e26af2cd4673cc7c", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationMatch=2\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 412, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12051" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uopfd5TJBzhGauzgyZW0h-TNFtDHz34k0kjbeuTeQPnMGBaiSFz9FdWA2gxp7qKp-V586voh7kqHgnVSW_QI4bWEuC35pj8NbCmmpNL_9pstpCgckw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" - } - }, - { - "ID": "857f8a30eb55b023", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationNotMatch=1\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 304, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uprv3QMjU4zz49ScYI4YaY9FitNwO7wGMQ1KZ8Y99w4D7keznFfA8M80bMdKvSH55jsWsDoLKcQ1VvIqIEUw88NqSfUM7E5SR_y5zfDaCf_uHSlgPM" - ] - }, - "Body": "" - } - }, - { - "ID": "4c69a7dc19302935", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368401\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpoiZa1zqMV-8ZkeUrvT6Xi0uJul6yGWWth5s3YbFtA2p0-6vc_54sNtEbxbnsASZyMDXLelHaCSixgcVT6JDVELrXHDim9gHcjjlSTF6BsHWu181A" - ] - }, - "Body": "" - } - }, - { - "ID": "834f05b1eb2dbc32", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMSJ9Cg==", - "TuDshcL7vdCAXh8L42NvEQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UozIFQVqWdhJxDLMyV7dfkeraSOw2Mw_AsI_aCWnAUudrz4HjQgZ4kbnvKXJfNF3SMQhxzxZHhk1Otmw7PgPxL7HyDkPDyK1DeHDc8GyfY1B3Q3YfA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" - } - }, - { - "ID": "34a1730e8bbe1d09", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMiJ9Cg==", - "55GZ37DvGFQS3PnkEKv3Jg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Etag": [ - "CJ2Xkrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVrbP1vUH6TU_zfm_Aaca624z-91MoULnLIBcn9Hg4htv5T5Z6B4XYJMFZUuDjWWeBhR6EpG1UZL4iJe0TJybYQ2CCsB_k63CcOex39xaOIT8UYUA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "b7c2c8ec1e65fb6d", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9Cg==", - "kT7fkMXrRdrhn2P2+EeT5g==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3366" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Etag": [ - "COqurrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrEgFAbU2gAqyKMITI9vr9kAgIBfNY3ZGs1l2_X4e-fVtNb01M9N81N-83LdChVzrk8iB09yuKUKdxRc0nMYrgy7S1fqPwbJdsQ6TJfMB05JJj6qr4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "0dbae3d4b454f434", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3366" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Etag": [ - "COqurrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqk7CzWGAqaT12X_fTymveyPtsPJIPHD814QaAOIQLA_AJo_4OtnQOOXJM2jJhIZ1Co4NgEmboDUG9su4SN5fDTHObNh9elLts9VpUJ9iSYrIsn-Os" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "3793882b91d38a07", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpzUH_BRjU20OGbDAW2dy0lx9Gk_1Ko7zks6KG6OEpq0pBPfCIBjkCeIZTxKxtvxnOd1hqlZF7SlbNoyNUqX5UFXpZY5NjP5GTeUkm512vaBR5vnDI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" - } - }, - { - "ID": "39924c2826bcd33f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Etag": [ - "CJ2Xkrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrgOr-dpWoY-JJi6vRigJX3Mpi_WQ4zWvWKK382DCP-mzWSnrpbaOzijhTCdNz6pmXskVVs30APwlqZgvb-S6xAwXqKJG5n6832Py4UlTdDR_INTew" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "89ea9ab5e21a635e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqj9dj5PbUhIU3baBv98vMViDP-BnVPs0APrFYL4jSbKc7fK6eAGoyDfsccGzxK-t0lGvozizW4ltrga8DXo_oxlZ9-k5a85v9i0PWTFIisPC7xuL8" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "b27c106562362f6b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3446" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoIo3JJa2rfg9_o3bXN_5QU6XU_iIrWYfuEuvKTQL0O5tjIHkODIDd4biyHxjbGNFnrQNbkiEUXWEBlo-3fTVgfFTOWuaPpgae-SafTBP8h5X5ddJ4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" - } - }, - { - "ID": "ca654b46531c4cb2", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqINI0Ii418NlxoYErOrjhTKb3-G3n-8h1Ryc_4YT4Tksc7WXA9m4CcJWInyhJahYC-UUrM47O5EK-3FUt3toZOZxWlbwhE6Sfnra0H-CqjmvxWyRs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "0186889a60e651db", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpFTK3QnFf4m9htpY4s3P9_NHuDiQl8InpJoWPOaQHo0XaYnSoSqkH7CcUbm0-sWamsCZbd4DHoXHLCrea3ff9rLpWlptkL0aMKV_Dleb1Xm-j14fQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "a253776f79c46fe1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3446" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoadifz-4B7kC7DvXOyrNa5omK5-DXUNJBtT_d5I1jciPUvGlRm1L1vkvVMU1Y5n9zFig2CF_qqBLlcdvoYCaUeaNUu706L0fKJhJkA8vV5JUvFq0E" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" - } - }, - { - "ID": "dc8959751769ee9c", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLeV9n2aiAq15pl3CqEWOt3_OYHsxbG0lHFIKr7Emh5btUEXLXl4nZWPi27LinKW7i0LYvp-HZK5b9E742MR_PNFe87treTpz_QgUmchGPKLcdZVM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "23f7f167269db6b2", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo4hY4jSbETgB5jf-AU8_b1sRvfTSrlt_Pt6UXiuTte4GtueVBgDwuMEliwE2-nOSiRE6juXOCbR1LQlRrpek1TLeRRf1cCVbz90YcCIGhWV_zFz0o" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "518c5855f7f2035b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "6581" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGaiKkmr1oBduBvG8APGBVoM_Ve7zHw4TBuyV3QYcFr9SYzEnATEwE5P6BH-yBsVXSmaRLej1a55x18Bra_ZAC7nYh2UKvWM66JGE3U1muaDBabyw" - ] - }, - "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" - } - }, - { - "ID": "5887398d8cd8c906", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGAvr7ojM3PcmnrTMTX-TXgYKzfo-3gzwYU_l9OaCfpth_ad2lWUJf6w6B8pjEGsAFZJvNueCHYGSJdx6YJ7NAe71oah1xi6lGQvQkWEwAl0fXd7Y" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "81d53e304c7ed90d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "6581" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur9XEMeCefUS1K7QkIxrxZboQ1zQ5SkrEUZMzrQdNBNAuDQnpDczXzJvF-rZmxFVYSdMySScTTu-FICOdI91b-fQVRomrqAxJwgn60FoObIVVpgkog" - ] - }, - "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" - } - }, - { - "ID": "0be1bdae5ba86bfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UocXesX_BUCirPixQFUhYfAIoR0Os309HMGHarzTGrqH1PkmHuGaaiNSH_pJq8nnWUXEnjk1cvuPGJWCF21t7EJQtCIGBWQoPBNV6OpvALZGdpPXuI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "592a3fdf8b5a83e5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoS2FM55Dkg1PRaNQBzsu5KBns-svmRrQ5byrUGRHgnsBQQDdE3ZznljVTrNq3wDAClddQ4zbCMQSLdqxPPNlom-n1tMg1hO8s6kS8_CTWy2SOM6As" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "b01846866b585241", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo22tWMx8uW6afXbfFrFT8UZzPbsqPItYMdRAPHNEuksgeqWI2US_xblUG6C8WgcHvfEeR_19eVmBTmW6QE7rmosaIm_raZxW9FuCHHiSi8TJnZ1CI" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "2eeefee032c6e805", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq55nB00t16AHTo1gSxBFUNplz5SJcGYBOeCPsK-MD5OLO_kHKT3YNZq69AAHpozaWVrTU_jDQqiqMFSoPhhmlF2dR1mcq6Ihk_y8uuFY6FM4O_hmI" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "11bfe17bc978cdfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotgN85lDspseXEgwnXhmfSmio9oOxSzhqtyabApVH0KQz_aVg3DbQ7m0Z0L9SzPzzmNEUfU6xhAT5xQAXf-wvY1NHDFxdf9IJ6YoVNhN0aHN-75hY" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "c05bfa6f1a43381b", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrVVjXVa2SMY6cjYeIgdcZOivQwE2Crz6UPZs2VXVBFMwReZs7kKbETuTMwuEMHKKm_LIrk-o6jS07MimubSWVxGMzT1BtCacU1X2Go_RckmyJPZso" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "f2932604385db16e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrADckAOw2vMHdy0qy02f20x-8s_Ue81-NYwSpKeqp6Zx6eCPReLM6qXUyxFz0xHNGob5WJA8J4ctl6ZFzL2fEVwBMNY8xxzpkKOAwSu9n9e0OE63E" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "8581afc6216ad2cb", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"082d70970acc388ab476f32433295486\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845049245" - ], - "X-Goog-Hash": [ - "crc32c=jV5AVQ==", - "md5=CC1wlwrMOIq0dvMkMylUhg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrkoYpGTn57WC5t-sItsEdWimp47AtSuqw8autFOD3TirChrdQThg_Fh-kykfgvnTaOyiw1InKeYs0Z2MISmjUpu4uHUUGDKTX6W0zQEuUks7i2BqQ" - ] - }, - "Body": "55GZ37DvGFQS3PnkEKv3Jg==" - } - }, - { - "ID": "ae129e597d5dd3e8", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"082d70970acc388ab476f32433295486\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845049245" - ], - "X-Goog-Hash": [ - "crc32c=jV5AVQ==", - "md5=CC1wlwrMOIq0dvMkMylUhg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoaoET13zBO6-kmfarxy5tCZhmqc2oO38xOd6m6OQ55e-MyyKYM35hNakm5xYdWaoUNsKwVCiklp4HtYE8afVObxcDERuTCgyTw-olewCN6VBwd-H0" - ] - }, - "Body": "55GZ37DvGFQS3PnkEKv3Jg==" - } - }, - { - "ID": "11acf26dc7680b3e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"b9ecff849eb002b78342771c0d47717e\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845511018" - ], - "X-Goog-Hash": [ - "crc32c=oeo+FA==", - "md5=uez/hJ6wAreDQnccDUdxfg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up_XtsS2bu4fnGI-Z85gqz1MECYqweRqin42arAyEfpJpZwmadI1-Op9V7n-q3UaYyRtUWFR7an7zKxWhJ_CB6PXz-C55C1nPoI7EWVEV2oXcS-q8c" - ] - }, - "Body": "kT7fkMXrRdrhn2P2+EeT5g==" - } - }, - { - "ID": "ff72204b0c538268", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"b9ecff849eb002b78342771c0d47717e\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845511018" - ], - "X-Goog-Hash": [ - "crc32c=oeo+FA==", - "md5=uez/hJ6wAreDQnccDUdxfg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqrjLdnTX47-_XGNBlBZ0rEgsIkYJMzixBaVmhjn8IsK66UpQAUO-njMM-WznI-DBNIbXVTcPQKZBNe2ZHZEskfuRjsFwMjtooyUH_PqTfZnARVv8M" - ] - }, - "Body": "kT7fkMXrRdrhn2P2+EeT5g==" - } - }, - { - "ID": "7972502f866e015e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=0-15" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoNiJIZYFETpngMkDItRU7PtN7Fv4bDU7Kfvndst1JR2eGmW-tOMTfng8Abx6EWwPvvodLkSk-1TM4Gywxzh3c24gPDN4NsQMuh40Mfp0Jvg1syOls" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "251a62254c59cbd1", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=0-7" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "8" - ], - "Content-Range": [ - "bytes 0-7/16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UobLFpkqodTM1bHuzS0GzRCKZypPQkf3nH1hbOexCjgjAcheZhV_zxJRyLhOYZoW8ABEaVMJLULNrzAm1VZs4JrTdtT46fYIcQe7OBeJp0aHI7Lr5s" - ] - }, - "Body": "TuDshcL7vdA=" - } - }, - { - "ID": "2482617229166e50", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=8-23" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "8" - ], - "Content-Range": [ - "bytes 8-15/16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uon8ZtKgOTqFm36bWHd3hf535jPGQBEX9j2yPrr11_ycAJjfI4A-mFWMTsFRR3nwSo68784B9TUPUjQd0cib0QIrfBjfDcz91p6oPJG3VpV9afywIY" - ] - }, - "Body": "gF4fC+NjbxE=" - } - }, - { - "ID": "74ff0c5848c7dba6", - "Request": { - "Method": "HEAD", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqKc-08ZJEWVbyaV84xKlnrHkl2PBuBUQP6xtx6TzZW0fsjyZ-SHshiy-QMGCuP1UF4x1ettHyDvRbAleHIXy4y7wMwoUWST2NKs_0oRA0oVqOoKtQ" - ] - }, - "Body": "" - } - }, - { - "ID": "90888164f5d2d40d", - "Request": { - "Method": "HEAD", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoTSlg3r_LSoiqQyJVJDzL-0xNj6jlSNp9zLUmlPtu3xbhCLBDNAxY9CRbyEjw7UudDcsFOe43C3QlsVjoBJln1q179ZspYWXY-fHqjrztAfJkZUpU" - ] - }, - "Body": "" - } - }, - { - "ID": "7961f3dc74fa0663", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=8-" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "8" - ], - "Content-Range": [ - "bytes 8-15/16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqJjC8HFLb8u_EaJVF0_TUOhNWhbooW0oANE8_LPjoIdLcoU42caZe7LjgbTIjCb0Ss3horP2noxirF3u3FGq0Yoh4rjuHBVhwZcemZHnKLgJUUbQQ" - ] - }, - "Body": "gF4fC+NjbxE=" - } - }, - { - "ID": "af8c1fcfa456592a", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=0-31" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqq6ro5-i6q8pIDup0yW35uFEynuLK95P8OSFhj7b70DK33tjv3fkOdkobsfwplOAGuNME2bLjQuyy8mhd2xBbNjyjvXt13Nc48PB4M2t_46RoM3Ak" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "d312c241da5bd348", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=32-41" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 416, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "167" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpAvzLs-GgdTPEGM2kvABFTxehSb-DhZQ2Y31nQ9ad0RMmfIMtVGRBXvkXo7FwGAy78ZHMSGWRKG-DYMy6JU9PCM4Tpo8PDmm4-rHa_LVpH2dFN-JM" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+SW52YWxpZFJhbmdlPC9Db2RlPjxNZXNzYWdlPlRoZSByZXF1ZXN0ZWQgcmFuZ2UgY2Fubm90IGJlIHNhdGlzZmllZC48L01lc3NhZ2U+PERldGFpbHM+Ynl0ZXM9MzItNDE8L0RldGFpbHM+PC9FcnJvcj4=" - } - }, - { - "ID": "2bbd010d0173c966", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZDScoUU4zzBoXc21jt6C1vcW1SrU6ELiWodrCID-OoNKVMbecv_DSgHZATQGs4GS-BebkzqdalqTq5C77aKXQMxiks-9A5kyrc8N4aY9L5YndaI8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" - } - }, - { - "ID": "e3522e1cc7aef940", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2570" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "CAY=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur5x_r1PKMQrnKbsfUWRR6wBwFQTM8U9mGwyq_fK56rrHE_UoLDxLRpiuaFGLVi9L4Hj67UXH76drA6KQQQgOsMVo0YEl1pVS2P7OKQ64WF4NubRCU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FZPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBWT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBWT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVk9In0=" - } - }, - { - "ID": "77ed46692038573c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3333" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:14 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpBnZgUt8GyAer5VG-_nEuf3YizRjF2t4MS6vOflI_Vid1-zVpQ99TuphB3pZjPZNI5ZoHJxCmDv3lyLwyhUDvk_p_NO9x8qZTEpjf1EOO5uxRsOxo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDE0NDc5MiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC4xNDRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuMTQ0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0LjE0NFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQxNDQ3OTImYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvcHktb2JqMS8xNTU2ODM1ODU0MTQ0NzkyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In19" - } - }, - { - "ID": "9187dbb998c64d40", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "31" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb250ZW50RW5jb2RpbmciOiJpZGVudGl0eSJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3299" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:14 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpyrF7U9KSc3XcJb-T_KGf3Fg2yhFUjJqTl06wpqqhG5IT6chgLCWKnLTuamfHtVq8XcP7acZy4TGyKIEvZ4L36TGTfSM8Zp_JyF8K4rN5p375VBMc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDc0NzU0MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC43NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuNzQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0Ljc0N1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQ3NDc1NDMmYWx0PW1lZGlhIiwiY29udGVudEVuY29kaW5nIjoiaWRlbnRpdHkiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQ3NDc1NDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkNUNmRUQT09IiwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifX0=" - } - }, - { - "ID": "c199412d75fadc45", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "193" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJhY2wiOlt7ImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9XSwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm1ldGFkYXRhIjp7ImtleSI6InZhbHVlIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2046" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:15 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqKO5A_yihnTzTSNbXxyLm-I3MkyC58APsD1U6RZ5_xpjjjL3nXVZIyACkeiDKXRZyU_oP1Yt0FeX23ONp6JeNyoy0J0kW8GwGMgPeN1ATdWGzCMXs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS4yMjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJtZXRhZGF0YSI6eyJrZXkiOiJ2YWx1ZSJ9LCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiIsImRvbWFpbiI6Imdvb2dsZS5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9" - } - }, - { - "ID": "c845025158c6b7d0", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "120" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOm51bGwsImNvbnRlbnRUeXBlIjpudWxsLCJtZXRhZGF0YSI6bnVsbH0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1970" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:15 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UryOVdmGzCqBOS2ZZ0nw1SqbHF7qJ1CeTxgb6a1BJyoA42NTNZ8RdEVQGNF5u2hWdSQW79cBOonxjms_MoogyDJVAtKHl1rys87QHF6-750NNNSoEw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuNTI3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0=" - } - }, - { - "ID": "811b0b9d4980b14f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY2hlY2tzdW0tb2JqZWN0In0K", - "aGVsbG93b3JsZA==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3305" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:16 GMT" - ], - "Etag": [ - "CIGhrMHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqJTh_P91pby-vQkRF12GexDihy7TZAlUPamDV_REldvXeqvbrWY_kB0Vcp1lYyuIY7EK2_eMBYYSRMMIVEYz5fxxt51-9Br_UmYvnuRgjhfC16AuA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jaGVja3N1bS1vYmplY3QvMTU1NjgzNTg1NTk2MjI0MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdCIsIm5hbWUiOiJjaGVja3N1bS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE1Ljk2MVoiLCJzaXplIjoiMTAiLCJtZDVIYXNoIjoiL0Y0RGpUaWxjRElJVkVIbi9uQVFzQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdD9nZW5lcmF0aW9uPTE1NTY4MzU4NTU5NjIyNDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY2hlY2tzdW0tb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NoZWNrc3VtLW9iamVjdC8xNTU2ODM1ODU1OTYyMjQxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jaGVja3N1bS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJWc3UwZ0E9PSIsImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0=" - } - }, - { - "ID": "69c77b8bdd7cc643", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVyby1vYmplY3QifQo=", - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3240" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:16 GMT" - ], - "Etag": [ - "CLirz8Hx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur2iXVwACD0yFKYjt0WT-lW1Tx6PtpOgDYttPBWBnJZ3CPf4cUK7heI_9SwdzYXoieaRHDj9n3w3M_SExedwQqQ-GTOY9qM9DPmrPy11hEny0WjECc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QiLCJuYW1lIjoiemVyby1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNi41MzZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTYuNTM2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE2LjUzNloiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3Q/Z2VuZXJhdGlvbj0xNTU2ODM1ODU2NTM3MDE2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvLW9iamVjdC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQUFBQUFBPT0iLCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9" - } - }, - { - "ID": "1017883279ca634a", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "98" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "417" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:17 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoKqYaaFUj6b4ezS4lnnQgYv4CHVSsqhVSlLP3QqpItNahh25KnLzbTccK_idrfjKV0gExzr17MvFmoRw7oUIWyJJINmpEXEw5EmHScxipsb3KbWZ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvYWxsVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0xuUytidngvZUVDRUFRPSJ9" - } - }, - { - "ID": "35f04bf2400be96f", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:24:17 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:17 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "4" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqC9teasrZdeHJo8KLaQQ251d02ORMxXreH3TKcaQp4NWLVbpiAw1GpW9WeSdFgbpWtVDdyzfjE93gAs6uA0kdlDPZsIEfJVK3GFfaZE2aW5aFCn8Y" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "d01040504d2a91c4", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoib2JqMSJ9Cg==", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 401, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "30343" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:17 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "Www-Authenticate": [ - "Bearer realm=\"https://accounts.google.com/\"" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqRhLZOcm0K7eNByUzCBgYfgiytuZ6Hj06jPkbkK77LS80OPgO_78AL530-2AcrlLe1fIy0jM0tonLLncLuOszrqKcGgVhqEJuE48-67vqBRACjqmY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1."}}" - } - }, - { - "ID": "db327c92dac99584", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoa0LrEhOIeq-iYERbwsOkXjr0QR1ANe3tDsleUqoFNU7fRIruaqYikdnU1svzhc8iGRwx0A5dAGYQ_mM045LN_oxU1c76ugXtnWS2PW8wBcmst7hk" - ] - }, - "Body": "" - } - }, - { - "ID": "e70c028bd3dc9250", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12275" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqrvFq18HewCbAkJcD7BmKgE8_WOFqSNkcwbpbNqCDPIy5jLVf2fIaHdg76_1rHuAHTtFM84Zr6euptOehRZOqA38Zc_BrJUSZKjg686imvazehpBc" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" - } - }, - { - "ID": "4d7120b41e583669", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12215" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoJ1cGOQjoNiRk8qv-igyccVp3Vg_ut7NLSEO3A-yyQwgGnoXR5jPbi3tuomYWrS57GIvd6GpShkcAYSIJ2e94Jx_1SseYkYtlSF8a7qsDU0OVFYgM" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" - } - }, - { - "ID": "e7711da9b067a58f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "156" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6Im9iajEifSx7Im5hbWUiOiJvYmoyIn0seyJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9XX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "750" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "CI2048Lx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoJ5Us6Rp03fUUqSAPFFGPuqW2qR4gCfzJ4w3W1Kx8C0JGfNDxte0QlBoVEi4K7rV5zkUY1IXQvIk6FOU3DxP8ECZdxkVJBW57Jf5iXo8kRyLVu7OA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDEvMTU1NjgzNTg1ODk2Mjk1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMSIsIm5hbWUiOiJjb21wb3NlZDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1ODk2Mjk1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOC45NjJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTguOTYyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE4Ljk2MloiLCJzaXplIjoiNDgiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29tcG9zZWQxP2dlbmVyYXRpb249MTU1NjgzNTg1ODk2Mjk1NyZhbHQ9bWVkaWEiLCJjcmMzMmMiOiJBYldCeVE9PSIsImNvbXBvbmVudENvdW50IjozLCJldGFnIjoiQ0kyMDQ4THgvZUVDRUFFPSJ9" - } - }, - { - "ID": "de5d78de4310ff60", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "48" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "\"-CI2048Lx/eECEAE=\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Component-Count": [ - "3" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:18 GMT" - ], - "X-Goog-Generation": [ - "1556835858962957" - ], - "X-Goog-Hash": [ - "crc32c=AbWByQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "48" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqprnxhGEtAMeMcwiKn43QTEdsHrB7k_jCjOqnvH984bk5CIjIhSalrDVNwlLb37IU67jIYyIY1Nq8uaSz5HGUROWwpE6SVmGvIGXnuAbTP7x8Ez5w" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" - } - }, - { - "ID": "4b7c14a3f35089ce", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "182" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvanNvbiJ9LCJzb3VyY2VPYmplY3RzIjpbeyJuYW1lIjoib2JqMSJ9LHsibmFtZSI6Im9iajIifSx7Im5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIn1dfQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "776" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "CP+RiMPx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrMYc2wH1kAYUthDQQhZ4gGQU6kYXq6KGs1msu9ifujWYQ2dwS5ifqZFWiTYf74Rq2y9VkzALfhjkTK6anDL-SwHxUH3IwMe8j1rrrbaMbv7eLQCR8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDIvMTU1NjgzNTg1OTU2NDc5OSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMiIsIm5hbWUiOiJjb21wb3NlZDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1OTU2NDc5OSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9qc29uIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE5LjU2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOS41NjRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTkuNTY0WiIsInNpemUiOiI0OCIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb21wb3NlZDI/Z2VuZXJhdGlvbj0xNTU2ODM1ODU5NTY0Nzk5JmFsdD1tZWRpYSIsImNyYzMyYyI6IkFiV0J5UT09IiwiY29tcG9uZW50Q291bnQiOjMsImV0YWciOiJDUCtSaU1QeC9lRUNFQUU9In0=" - } - }, - { - "ID": "26a4cb091ed2f1bd", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "48" - ], - "Content-Type": [ - "text/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "\"-CP+RiMPx/eECEAE=\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Component-Count": [ - "3" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:19 GMT" - ], - "X-Goog-Generation": [ - "1556835859564799" - ], - "X-Goog-Hash": [ - "crc32c=AbWByQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "48" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-t-yqapXjBzjIrFWFhaT080f06JahbskCp0DC_-izUjPK4fYn556m2qyRDgj9HjkkDPtKR6dauzF5afjNZZ61bN_dTvw15d82A206-SdS1Jv_YM4" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" - } - }, - { - "ID": "6b59ed7d3d098970", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwibmFtZSI6Imd6aXAtdGVzdCJ9Cg==", - "H4sIAAAAAAAA/2IgEgACAAD//7E97OkoAAAA" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3227" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Etag": [ - "CNuJssPx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoXNmswaWh2Gfcz1pYPy7etwk09Nz_dmuYVq55M2p3ox38A3mC0QgoHf-hL1uCZxQvusJSIHeugTQvmCgZvRdXV-3juD0W8KI669m0EILAHSfnVoJ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nemlwLXRlc3QvMTU1NjgzNTg2MDI1MTg2NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdCIsIm5hbWUiOiJnemlwLXRlc3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24veC1nemlwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIwLjI1MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMC4yNTFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjAuMjUxWiIsInNpemUiOiIyNyIsIm1kNUhhc2giOiJPdEN3K2FSUklScUtHRkFFT2F4K3F3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0P2dlbmVyYXRpb249MTU1NjgzNTg2MDI1MTg2NyZhbHQ9bWVkaWEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2d6aXAtdGVzdC8xNTU2ODM1ODYwMjUxODY3L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nemlwLXRlc3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiI5RGh3QkE9PSIsImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0=" - } - }, - { - "ID": "070a9af78f7967bf", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/gzip-test", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "none" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Type": [ - "application/x-gzip" - ], - "Date": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:20 GMT" - ], - "X-Goog-Generation": [ - "1556835860251867" - ], - "X-Goog-Hash": [ - "crc32c=9DhwBA==", - "md5=OtCw+aRRIRqKGFAEOax+qw==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "27" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UosPALnXk-3j5nBlDDWX7hBP4xBNcCe7lGNkEU-sHHTWsRexFWLoB-1gn8RfnlqS6Q5ZTrR86NBxpP1T0bFxgHnlYUYGh-G3mThRCgGIAWjXggOeOU" - ] - }, - "Body": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" - } - }, - { - "ID": "3ece6665583d65fb", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj-not-exists", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "225" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uph0zmuYk7ZmOWzbNkj24Iai85wz2vKj0mMnMkIHIoDUlDAUCA0e_CgFT5fV_XAvgUx06I9FspomkB_ImflSVCvj55GK6zEoY5iV1hN96nh4AmPBN0" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+Tm9TdWNoS2V5PC9Db2RlPjxNZXNzYWdlPlRoZSBzcGVjaWZpZWQga2V5IGRvZXMgbm90IGV4aXN0LjwvTWVzc2FnZT48RGV0YWlscz5ObyBzdWNoIG9iamVjdDogZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai1ub3QtZXhpc3RzPC9EZXRhaWxzPjwvRXJyb3I+" - } - }, - { - "ID": "602f4fdfff4e490c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic2lnbmVkVVJMIn0K", - "VGhpcyBpcyBhIHRlc3Qgb2YgU2lnbmVkVVJMLgo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3230" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:21 GMT" - ], - "Etag": [ - "CNat6MPx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoWSi2VcVAHJJzhGSPjVT17kZYaCh3uZvWXBU0R4hVmPZJ10gvbv_Ucrfd2kwrBvRYoZaQcYkY5Q3M-oywiCNtLm6SM9InWHz2Omc-0pWJJjfQWPCM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zaWduZWRVUkwvMTU1NjgzNTg2MTE0MTIwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTCIsIm5hbWUiOiJzaWduZWRVUkwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMS4xNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjEuMTQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIxLjE0MFoiLCJzaXplIjoiMjkiLCJtZDVIYXNoIjoiSnl4dmd3bTluMk1zckdUTVBiTWVZQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTD9nZW5lcmF0aW9uPTE1NTY4MzU4NjExNDEyMDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc2lnbmVkVVJML2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3NpZ25lZFVSTC8xNTU2ODM1ODYxMTQxMjA2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zaWduZWRVUkwvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJaVHFBTHc9PSIsImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0=" - } - }, - { - "ID": "4a1c9dc802f4427c", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:23 GMT" - ], - "Etag": [ - "CAc=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrbFZENfdiDkKpD_H_zm44x7h5bTZ3VD6MEoiiXuMwxHHK-zCSSoPRA2ALNUXoR3EcS1c-huuyZBIiw7ULKii7BNqKz0RMu4lWRxrDRQBOjGwKDDgE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQWM9In0=" - } - }, - { - "ID": "e12cb7360f4fd8dd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:23 GMT" - ], - "Etag": [ - "CAc=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:23 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrVJI9g41JQEDEgaIAOXKSCe2B8xklFKSoIciM7uC0uod7t7NzXfxsIrI6mQeZGkWa2B12xdKpSJBndrNbBvygZ6aXxpBBvL8d80NJ42u8KQC7AaFE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWM9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBYz0ifV19" - } - }, - { - "ID": "d25b1dc2d7247768", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMSJ9Cg==", - "/pXmp0sD+azbBKjod9MhwQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:24 GMT" - ], - "Etag": [ - "COr+pcXx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UosBTMUgCzCYlGDKqRCN7Kexl832zyi4FxoRqfYtmTr8yv63z4BAQBeCTLajyZqMbRyzjje7TtPPYomUSv_8zrQ8bFdXlNzbTSeKK8kXi2Ys82jus4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxIiwibmFtZSI6ImFjbDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjI0N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC4yNDdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuMjQ3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiIwRTl0Rk5wWmowL1dLT0o2ZlY5cGF3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMT9nZW5lcmF0aW9uPTE1NTY4MzU4NjQyNDgxNzAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkZpRG1WZz09IiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "1d3df0d90130f5fe", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMiJ9Cg==", - "HSHw5Wsm5u7iJL/jjKkPVQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:24 GMT" - ], - "Etag": [ - "CJrxw8Xx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpMl0_NB41LujALQWmul06CCiuw7okuKlhYG_qilsoXlld2qoYgc2-ABYogriA37QOJMK5ZlJ5sYzP2Mp7CTSzK9uccFLi10TdrGFMUjUpD0OvcXrE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyIiwibmFtZSI6ImFjbDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjczN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC43MzdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuNzM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJjOStPL3JnMjRIVEZCYytldFdqZWZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMj9nZW5lcmF0aW9uPTE1NTY4MzU4NjQ3Mzc5NDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0NzM3OTQ2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0pyeHc4WHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDIvMTU1NjgzNTg2NDczNzk0Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkF0TlJ0QT09IiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "a270daf4f72d9d3d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2767" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:25 GMT" - ], - "Etag": [ - "COr+pcXx/eECEAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:25 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UranlihhkPcnPzTT2UYs4r6Hritk60lULu-pzO6YX-EWIlMlFiXzKmxaZVgOcPfnCgfJsRktKsL_TbFun1PfupraBREfYklCvRXKvJ_526gW-Jf9zs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "328125b2bc991e95", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:25 GMT" - ], - "Etag": [ - "COr+pcXx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UosP78wuW4e1mPLJgcCbCxBC4Je5LWjKugd0fVK51u-P3qeXbQXj4_Amo25ZHyut2LZvoipvmK95xEvebSyVyzVydKyU2VSbHLDkwnRpR-nr9UGIf0" - ] - }, - "Body": "" - } - }, - { - "ID": "1c7595376917851f", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:26 GMT" - ], - "Etag": [ - "CAg=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqnZXiLrdCFoctnkKUqdvXLdSp9xumFk20gsRJc4xB1CTZ14aLZYwbDJmi90E28Q2_E4klaJBOaB1OCcpbDBoF1N9E9Bk3VEaNYYrHKMoECuO7RMPs" - ] - }, - "Body": "" - } - }, - { - "ID": "4183198b151a3557", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "109" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLWpiZEBnb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "386" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:27 GMT" - ], - "Etag": [ - "CAk=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo361-YkcyyOvd5CL4q3LgTM1Bk4RdkPY3cn-qOm6Dn0vghTnmIE9VKkzGiOnjRNnD7SWXGMqpxaCGvCt6P44D55PqOwAtVTF8PNwmEK9HWCoVXABo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In0=" - } - }, - { - "ID": "e98f53a71d8839c9", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "1789" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:28 GMT" - ], - "Etag": [ - "CAk=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrqACfvLjoBUrCN_6ksyMdPlv7yPWQGo1D9u1UiCCgFDRlEwr6NOp18BSxaeTuMIabf2ciNj7_Ba1W6YCz64HlcYyQOoEXNfGYGKcAqGTubXsGHPVk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FrPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In1dfQ==" - } - }, - { - "ID": "28b9a29086758e65", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:28 GMT" - ], - "Etag": [ - "CAo=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UozsFdtI43Ltv91E18-CrdRZ4pQDVZXReJOVhpWDu3mxNA7rrWU7hNGl4kPMd0FgcVWDigZA5JBNsYbFV113Ew6Cp2uTsuk0uI1io_mhA6-Lmd2h18" - ] - }, - "Body": "" - } - }, - { - "ID": "8ef9f7b6caefd750", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiZ29waGVyIn0K", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3196" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:29 GMT" - ], - "Etag": [ - "CIXH3cfx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo97hrh9l4pmrJQqaH8Qu3axsdbEPH2Y8GSEso8-ExKuf4ot8o2uS79ROJcgCtql4vLQJ_4L8q0Z7E9E2WYbbl6vKUYKdQiCI0POisS9o_ViNh9-Yk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlciIsIm5hbWUiOiJnb3BoZXIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTM1MjgzNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS4zNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuMzUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5LjM1MloiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyP2dlbmVyYXRpb249MTU1NjgzNTg2OTM1MjgzNyZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nb3BoZXIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ29waGVyLzE1NTY4MzU4NjkzNTI4MzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlci9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InJ0aDkwUT09IiwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "ae3a5920bc8a9c3c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3548" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:29 GMT" - ], - "Etag": [ - "CPzK+8fx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrPLZGQqU8j04JqxMmfsqas3HGTlrdx5NM99YR5nCedJYGMSaZ1ynvd2UbUcdwps_gfRo__0XapzHuD3hM_bshff65qlnw_i2cOwp97N2EhfUsZ1X4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS/Qk9C+0YTQtdGA0L7QstC4LzE1NTY4MzU4Njk4NDQ4NjAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5Ljg0NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS44NDRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuODQ0WiIsInNpemUiOiI0IiwibWQ1SGFzaCI6ImpYZC9PRjA5L3NpQlhTRDNTV0FtM0E9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjg/Z2VuZXJhdGlvbj0xNTU2ODM1ODY5ODQ0ODYwJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ItCT0L7RhNC10YDQvtCy0LgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTg0NDg2MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vJUQwJTkzJUQwJUJFJUQxJTg0JUQwJUI1JUQxJTgwJUQwJUJFJUQwJUIyJUQwJUI4L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoi0JPQvtGE0LXRgNC+0LLQuCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0=" - } - }, - { - "ID": "0104a746dbe20580", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYSJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3116" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:30 GMT" - ], - "Etag": [ - "COKVlMjx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqY12miALkWDvx8PHEyL23oxWZlw8Jq7Cn6kS8E8_Tn1tTWWoTuS_pWb5pi9qevCIvqK6aTK56MzwrMuy9axIjpw5yyMZ42MaWK_YFkBr95OnK5IbM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hLzE1NTY4MzU4NzAyNDc2NTAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hIiwibmFtZSI6ImEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMC4yNDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzAuMjQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMwLjI0N1oiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYT9nZW5lcmF0aW9uPTE1NTY4MzU4NzAyNDc2NTAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2EvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2EvMTU1NjgzNTg3MDI0NzY1MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0=" - } - }, - { - "ID": "da19af085be0fb47", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYSJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "19484" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:30 GMT" - ], - "Etag": [ - "CLmmusjx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrAJr55dqTDkY_jBKB0vAWyE0owtzB_kEoH91_FKTr9C3bHd8fWUiTrsruL5mgwXg2l8gbPiOXad-A9HIJ3Zt_AeXq_p0m-Q8LrHQqJ6sAKsbzV4Zw" - ] - }, - "Body": "{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","name":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835870872377","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:30.872Z","updated":"2019-05-02T22:24:30.872Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:30.872Z","size":"4","md5Hash":"jXd/OF09/siBXSD3SWAm3A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?generation=1556835870872377&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLmmusjx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"rth90Q==","etag":"CLmmusjx/eECEAE="}" - } - }, - { - "ID": "96f915707a76c50c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAifQo=", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "2948" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrQIwi81TFCNpDIZUjs_5tSUFqURLnaBF7g2l1sGMuHCTWZeLyr45VWkcc8fUDYnF-P84QixYQbTzJGuJzOOx7mw1qluYDqoORFGPICfvKmNxrZM3A" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" - } - }, - { - "ID": "7a562d6e65ef8a7a", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEifQo=", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "4785" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ups-3Re4_9njFMmkYwND6BuK5cgxs5tKVRwQ2CGkcks-K0aALbethXTYaEb6fLos6w_FSnM8YkPGM3TETCAoyYpVbQPlotXR9CH3DXngjQNv0yv_ZQ" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiVGhlIG1heGltdW0gb2JqZWN0IGxlbmd0aCBpcyAxMDI0IGNoYXJhY3RlcnMsIGJ1dCBnb3QgYSBuYW1lIHdpdGggMTAyNSBjaGFyYWN0ZXJzOiAnJ2FhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhLi4uJyciLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5pZC5uYW1lLCBtZXNzYWdlPVRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnLCB1bm5hbWVkQXJndW1lbnRzPVthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1UaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJywgcmVhc29uPWludmFsaWQsIHJwY0NvZGU9NDAwfSBUaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJ1xuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5FcnJvckNvbGxlY3Rvci50b0ZhdWx0KEVycm9yQ29sbGVjdG9yLmphdmE6NTQpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5RXJyb3JDb252ZXJ0ZXIudG9GYXVsdChSb3N5RXJyb3JDb252ZXJ0ZXIuamF2YTo2Nylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjI1OSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjIzOSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnRocmVhZC5UaHJlYWRUcmFja2VycyRUaHJlYWRUcmFja2luZ1J1bm5hYmxlLnJ1bihUaHJlYWRUcmFja2Vycy5qYXZhOjEyNilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW5JbkNvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6NDUzKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuc2VydmVyLkNvbW1vbk1vZHVsZSRDb250ZXh0Q2FycnlpbmdFeGVjdXRvclNlcnZpY2UkMS5ydW5JbkNvbnRleHQoQ29tbW9uTW9kdWxlLmphdmE6ODAyKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlJDEucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ2MClcblx0YXQgaW8uZ3JwYy5Db250ZXh0LnJ1bihDb250ZXh0LmphdmE6NTY1KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuQ3VycmVudENvbnRleHQucnVuSW5Db250ZXh0KEN1cnJlbnRDb250ZXh0LmphdmE6MjA0KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0Tm9VbnJlZihUcmFjZUNvbnRleHQuamF2YTozMTkpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6MzExKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NTcpXG5cdGF0IGNvbS5nb29nbGUuZ3NlLmludGVybmFsLkRpc3BhdGNoUXVldWVJbXBsJFdvcmtlclRocmVhZC5ydW4oRGlzcGF0Y2hRdWV1ZUltcGwuamF2YTo0MDMpXG4ifV0sImNvZGUiOjQwMCwibWVzc2FnZSI6IlRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnIn19" - } - }, - { - "ID": "239b642e1919a84a", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoibmV3XG5saW5lcyJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "3270" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urk-XOPoshQkyIHYZp241R2W5lOuLwqPMx606rubzPOaGggM2z_sZO93dYCJHffXuPDOjy64lIxrBXIYW4QT04eM932jPtzPdVd-PFWssV9RYk83JE" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiRGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJyIsImRlYnVnSW5mbyI6ImNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkZhdWx0OiBJbW11dGFibGVFcnJvckRlZmluaXRpb257YmFzZT1JTlZBTElEX1ZBTFVFLCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLklOVkFMSURfVkFMVUUsIGNyZWF0ZWRCeUJhY2tlbmQ9dHJ1ZSwgZGVidWdNZXNzYWdlPW51bGwsIGVycm9yUHJvdG9Db2RlPUlOVkFMSURfVkFMVUUsIGVycm9yUHJvdG9Eb21haW49Z2RhdGEuQ29yZUVycm9yRG9tYWluLCBmaWx0ZXJlZE1lc3NhZ2U9bnVsbCwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9RGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJywgdW5uYW1lZEFyZ3VtZW50cz1bbmV3XG5saW5lc119LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1EaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IERpc2FsbG93ZWQgdW5pY29kZSBjaGFyYWN0ZXJzIHByZXNlbnQgaW4gb2JqZWN0IG5hbWUgJyduZXdcbmxpbmVzJydcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJEaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnIn19" - } - }, - { - "ID": "3cd795cb0b675f98", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpOXjD9T_c_FRByyyA-CjE753kXZefEzIwQdrNG6OewoHOvKC3Bb9LleziQ6-ymVDg-F1S1eeQeFNJH7nJxDsdA_VmxOjeBzTDB_F3--_1NtwMF6Do" - ] - }, - "Body": "" - } - }, - { - "ID": "361d4541e2cb460a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/a?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpomJFlpfOIJoFvdCmQgD2dRXSREzV1ToI2ntcHi8I9tSyzMCUk0ZrytllkwR_nU8WFZ0YuXVl0Kwsq1EhJL85RlBgCkBNc1P-yVMyNbdt5YrooKok" - ] - }, - "Body": "" - } - }, - { - "ID": "a4e929446e49411d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/%D0%93%D0%BE%D1%84%D0%B5%D1%80%D0%BE%D0%B2%D0%B8?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqmdInqhuXhUxVjtfRvXvHd6Rmo1ODUbeNFFTBW4diuT7HDjzmYnvd4TU6oJPqbck52yFAR99fMx4Tks1TllSUCB4GsMVbBNENfZU6te-hXIB8XyJs" - ] - }, - "Body": "" - } - }, - { - "ID": "89b2ef62a071f7b9", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gopher?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:32 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrRgapcSgK1GOswXS37Vynb9BRj0go8bNPOBtrFZ3TDr-nzIdlvKBQuwHtXl4Tlb-mLl-A65zL5Iw3unCEuy6lwtWl5-V7REtcIVHkrWLK5AoLoHqA" - ] - }, - "Body": "" - } - }, - { - "ID": "ca91b2e4ee4555cb", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", - "SXQgd2FzIHRoZSBiZXN0IG9mIHRpbWVzLCBpdCB3YXMgdGhlIHdvcnN0IG9mIHRpbWVzLg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3213" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:32 GMT" - ], - "Etag": [ - "CLGNmsnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UovWW5HSrWbcvQoWGU3c_n64wwcau06tjEM-fzdADFSbmmylG3VLjae6MRNIM4B9gDitCKfjo1_xPQNAZEmMYzRplVmdI4cIrBC8ursFSUywoCSJeU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" - } - }, - { - "ID": "eb678345630fe080", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3213" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:32 GMT" - ], - "Etag": [ - "CLGNmsnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrtPOErXGrnlQtXCV1PhgO5nz80m75Fzs7DoR-y8Ava3jvL9NDJ4L-i8SIV_tktR8VqCvv9uh-X1ltJCj3isa_oY8TlV-OsZeIiESuDrRTYueQWQP0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" - } - }, - { - "ID": "d76ba152a62e7dc1", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", - "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3212" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:33 GMT" - ], - "Etag": [ - "CLPJv8nx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqk5XoKCyt74JBpeU8oREfzh3e0LLtXupBjcWtsFfHNZDEn4DjcmzWo6resyQ1gEisBp_STlU4hVaU3MbTyX3LM443_Gsu4ouZGdf3ZTvTzsLD_zNw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" - } - }, - { - "ID": "f785ed7f0490bb6d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3212" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:33 GMT" - ], - "Etag": [ - "CLPJv8nx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrCqpaad9mhxk7VZgTc8xIunote5-AHPp3vYkUbSU8uiVcq65rcnrbtIuCqJ63JTuVJvKw7pFwnLjyn5fL37JzfXLhmLAowAV67_Qqtxxhiy4WQars" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" - } - }, - { - "ID": "096d156b863922d6", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm5hbWUiOiJjb250ZW50In0K", - "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3197" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:33 GMT" - ], - "Etag": [ - "CPW+6cnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoivaSzjVdOm2KP4nXCF8VQDNHXmlNjm9rZwTaWzjvBmH3oK01QYTnHqZ5DhYRewu_1H0KoQgFpUIC-M4OKZoOhRHrq0-H8HLT__SqD1D5cmezrqHo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" - } - }, - { - "ID": "0e96d304e9558094", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3197" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:34 GMT" - ], - "Etag": [ - "CPW+6cnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoc-8JwtQprLtec5Ft6q3z4p0ooDaCCLmO9oVgrOXvBpoFhApztj8anfFwEcHdzpB6FBlUFqou0viF_HLHJvHolsxeCCoDOhqyIggOOlWjnSRg8XzY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" - } - }, - { - "ID": "b4c8dedc10a69e07", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6ImltYWdlL2pwZWciLCJuYW1lIjoiY29udGVudCJ9Cg==", - "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3198" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:34 GMT" - ], - "Etag": [ - "CM/Yjsrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpC3UBVkNENjfRoaL2M1fPRh-iTDTwCsgF8O3TOX_iBuXcDxMjOoTmrIhD4cE5g1s7PObP66TZB1lecFypnLz8hL-aowsOxUMuL0KXJvrgoGxKHUFg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" - } - }, - { - "ID": "c45640ec17230f64", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3198" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:34 GMT" - ], - "Etag": [ - "CM/Yjsrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrvqLsOaMe2H6Xh3MyDCOuHmLzU1fn_Zh6kHLkpTpn4EWj3nFkeROu1g-cABD151cTh8YmY798iEku-Q3TnZIMZ5mexmHiCJKdUhAolbSd9b_apMK8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" - } - }, - { - "ID": "752ab630b062d61e", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY3VzdG9tZXItZW5jcnlwdGlvbiJ9Cg==", - "dG9wIHNlY3JldC4=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3482" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpaWw6jJMKNOoCNMXwJNVVsxqzPhGfMo-5g3krJ7A8_PSGTx4W2OakfmtDvzOpg1hexLJcsWunNHfyWVXsjBpM58dtQlM6VucQW4Uxndlb9RIp0UhM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "3f4d8fd0e94ca6de", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3425" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoO-NbHUptMgzQPx9zApWGCeqMgfkk2mt6PJl1iWbq8ocUYq_0ud8gPuj9bcBTBOBdqewKzSouddziC-BFLoOdQJ6ElkN136q5y39ZRDN4zq6UWLUs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "0355971107342253", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3482" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UryZM2k28xNlFIDnXCH6SJ8RA2lDVNIx-5_eYRExB4-D1-dg_3fWvU3AREwoow4DvlYN4eyzA1AthWs5y0tYZEzvamBoZufkrXPmD64aT8kzAatns8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "8f8c7cba5307c918", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3448" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upnljw7UWMuKFeJUJp_gu-lyfzDRVTwgInddze_9zF5waX4glKKpcRrGpe50cvS8dV_xbbKLQa-CqAMVxlEp7qIHsmTdUR1amjrq0w9U_qrn4gYwSw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuOTIwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" - } - }, - { - "ID": "fc766ddbbfcd9d8a", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3505" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqlTuEeQHkdcuHKu_aWtuYjopXaORKyG9TF597i1ybzIPgJHE_oiKH-xYwG_D2txhF_Sv6Jzfy44Dfga1bINCpPLseDEeD_Ez4rvxx8baTX1uJ7lJo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzYuMjI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJyME5Hcmc9PSIsImV0YWciOiJDUGpudWNyeC9lRUNFQU09IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" - } - }, - { - "ID": "fb1e502029272329", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "277" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urm5pF1ghApp-RiVg_TAipd-LdTbF-hcDPcVRYbmj7KUIQxnsMUF5WOcrMA1t7ZltdvPZhyfsELISuH6DGJlUUedaCNH-B5j580GjBYLUKCwmAADeM" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "c151ba4fab0c6499", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Language": [ - "en" - ], - "Content-Length": [ - "11" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Etag": [ - "\"-CPjnucrx/eECEAM=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:35 GMT" - ], - "X-Goog-Generation": [ - "1556835875058680" - ], - "X-Goog-Hash": [ - "crc32c=r0NGrg==", - "md5=xwWNFa0VdXPmlAwrlcAJcg==" - ], - "X-Goog-Metageneration": [ - "3" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "11" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur68mIGQ8VAtTRZs4F6ixZZYKcqe1oTqWHWqmp7gNF-81XKf2xX6eS4PcegbKx7bYnb4i6dzTCoLlbaJRNHwwLVzNbYMkGNy_bTHbWYTgtlBSKVtWs" - ] - }, - "Body": "dG9wIHNlY3JldC4=" - } - }, - { - "ID": "e09c6326cc60ad82", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12563" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqldLQvGKuu2fihIdwh7CvU7W0yE3vUXu8jqefTPSMhlGgvR1EfeYFqjk_Zxj7fEpv7AjugvHnn1DmkJxJWhrZyM8b5gS4uJID92D_018h2YHH1r30" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "6c83b36cc6fc482c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3527" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGOUzu40Jq-_GztkQxoFN64MHcSoZjph7ivV8tqrp6ENxFIB0c82xqEdKKfcpqbJ2YXtXQgwflZgX-uiVOgGY_1c8SLoV7geQMfErTz1Q0NDM4YDM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NzEwODYxNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNy4xMDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzcuMTA4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM3LjEwOFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzcxMDg2MTQmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzcxMDg2MTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifX0=" - } - }, - { - "ID": "e499f3b7077fb222", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Language": [ - "en" - ], - "Content-Length": [ - "11" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Etag": [ - "\"c7058d15ad157573e6940c2b95c00972\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:37 GMT" - ], - "X-Goog-Generation": [ - "1556835877108614" - ], - "X-Goog-Hash": [ - "crc32c=r0NGrg==", - "md5=xwWNFa0VdXPmlAwrlcAJcg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "11" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpfEf1WUM8E5dSmOe1sOfgheagnlrcFUolvMMBiP6jDvPqoS7Sx0IwklM-DwnuAbBJPQ_EHhX-42njQu9vUjXqamH1U6np0Vmi0BngqFYOTc9MVP3M" - ] - }, - "Body": "dG9wIHNlY3JldC4=" - } - }, - { - "ID": "9e67dfa901c4e619", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12563" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoYPZ-j39Sr1Whn6F3PYeVaxqIIjS_fBgSNkBO7f7B8RMWih6iCCLf1cPDXB3Br-0XBkdm6i9N9fGeK84_laVYWuJc0uJwSCXydk0bSWezw0qec5yE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "670f74ac2a0b734e", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3640" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoWwxnT2DHS1ymBhZJmnwxQiajAVYbOH62dyhX86syLNW_TlPnzYaM_7kACpN8ar5EeAHQ9fsMSHP4CqMcb6ZWg5KY3OkbPvfitdLalAA9MXxMFqoo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODE4MzI1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC4xODJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguMTgyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4LjE4MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzgxODMyNTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTlBDK012eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzgxODMyNTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTlBDK012eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkZuQnZmUTFkRHN5UzhrSEQrYUI2SEhJZ2xEb1E1SW03V1lEbTNYWVRHclE9In19fQ==" - } - }, - { - "ID": "9c6e7ad2d8e2c68a", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "277" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoSrQtDkljyQ-aUJoC8m6nY20wZiHLTwh4znEZ5hyKxLQUrZIi7PnU416twfTwfPXWvd4cPGZC3NVdpJWg332KnUKSEdFPY2BQwLJ-_9S5eVdwMrf8" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "b5fbec9f26148f95", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Language": [ - "en" - ], - "Content-Length": [ - "11" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Etag": [ - "\"-CNPC+Mvx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:38 GMT" - ], - "X-Goog-Generation": [ - "1556835878183251" - ], - "X-Goog-Hash": [ - "crc32c=r0NGrg==", - "md5=xwWNFa0VdXPmlAwrlcAJcg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "11" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpTr8Qws5nL-el_ied6M7FYtryiLpFfhGNN-TxakIl1FCAOBJwMeq6_KBQ0l4UmT8VePQQ9h_S8r55NolBB1Ex12-iKvooCkTsNmvQ4rE4hOqQGbLc" - ] - }, - "Body": "dG9wIHNlY3JldC4=" - } - }, - { - "ID": "616de25561ebbffa", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3640" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqyD8XYw6tgOBbcxdUIVAw_vXrrryw0bgh-L-jRW0hyJt0lCHU9K26isn6tykgml7UgX52dgw65xD_ht4yxbHkY_VFl6MkEY6H-mWRx0_52xj49RW8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODk5ODUxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC45OThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguOTk4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4Ljk5OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4Nzg5OTg1MTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4Nzg5OTg1MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkgrTG1uWGhSb2VJNlRNVzVic1Y2SHlVazZweUdjMklNYnFZYkFYQmNwczA9In19fQ==" - } - }, - { - "ID": "2ddf1402c6efc3a8", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "160" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13334" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrFitgTFIY6yt8kk3zprPGtWncWc7Ad6oUFVXVdE0O3QqK0qlYGB7RSw-dwN1AvMk4Sndi8gR22cF8qs88ZlmdAxB-1zYdUNqwArmpP2xni66xX-L4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "eea75f5f6c3c7e89", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "160" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "911" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Etag": [ - "CPDp3szx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uotyvz71ZfU1J_hl3NsDz9ldw92nWlIp9muxpBUsdv0nTyfJEqz-yQFyEwE2UM11wv7tkpLQfffGlKhlx7CVK0S1UYJnS2XQu2P0Uiz5bbamHxE520" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTMvMTU1NjgzNTg3OTg1OTQ0MCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMyIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3OTg1OTQ0MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOS44NTlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzkuODU5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM5Ljg1OVoiLCJzaXplIjoiMjIiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0zP2dlbmVyYXRpb249MTU1NjgzNTg3OTg1OTQ0MCZhbHQ9bWVkaWEiLCJjcmMzMmMiOiI1ajF5cGc9PSIsImNvbXBvbmVudENvdW50IjoyLCJldGFnIjoiQ1BEcDNzengvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "b19fb82ecf450b51", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "277" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urcj3TJ6B5h4Q0DT7G_ioI_Icu9iv65m57OlRH6h9mKD9zZZl320H3QD6A7fNd1UPBSGGDZ9I0TG4_lSFa8JgkJQEVRGVRvVQv36b7ArCuGmg0loV0" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "1f0a12bd00a88965", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "22" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Etag": [ - "\"-CPDp3szx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Component-Count": [ - "2" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:39 GMT" - ], - "X-Goog-Generation": [ - "1556835879859440" - ], - "X-Goog-Hash": [ - "crc32c=5j1ypg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "22" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur7DoEkjdrkGGrYpsCDbd2Y6c_Mu8sEA7cN_k06l4WjY25UJsijDFWSlSIA9SCi-ouLuI3if4Rh33a5n5vKcN3oSYjzK92eSPVCczEpQdILKFWyQh4" - ] - }, - "Body": "dG9wIHNlY3JldC50b3Agc2VjcmV0Lg==" - } - }, - { - "ID": "f649a81672a92823", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3527" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrfhqrYGhW8ew5dxbfg_DcMhaJh6k2oY_JqMwPRSDtS3Ef5kljeo8oTONrxRIAP8I0ScqxFCy7okNejkqOeO4Vr1TBZ2mc4MDjwiJA52ERSgZMUyvM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MDcxNzUwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MC43MTdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDAuNzE3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQwLjcxN1oiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4ODA3MTc1MDYmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4ODA3MTc1MDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifX0=" - } - }, - { - "ID": "61763fd57e906039", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "129" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiJ9XX0K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13444" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:41 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur7TRBQyfrSgWnsPL0CVfQCd5s_QJN9pRUHB1YttIoObh1YkzCEtuRxrwyKYtccpyodUCMmTVRyD6d0oDy-c2_Q7GK-_6OC5h_EF_1e9-t6E8VZCQ8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceNotEncryptedWithCustomerEncryptionKey","message":"The target object is not encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.encryptionKey, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=entity.encryptionKey, message=The target object is not encrypted by a customer-supplied encryption key., reason=resourceNotEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is not encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is not encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "b45cb452c78d7b50", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:41 GMT" - ], - "Etag": [ - "CAo=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:41 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq2YnotZtU1JMnJw10vtgVDeukiWK_4DXx0QFWA91CaCYLPXLDKzCzY8xqb6EGkVxvw731As27REu_hYOqZTwSfJJ6SeHemliLV9JgQYjfngxL0HFA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOC44ODJaIiwibWV0YWdlbmVyYXRpb24iOiIxMCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBbz0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FvPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQW89In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQW89In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7Im5ldyI6Im5ldyIsImwxIjoidjIifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FvPSJ9" - } - }, - { - "ID": "6831287fd0edbcd4", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYyJ9Cg==", - "Zm9v" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3128" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:42 GMT" - ], - "Etag": [ - "CJek2c3x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrXGhYaqbrodjXeCKtZGAt0hM0GrM8GBcZBkGkLJ7550-Iqaxp681SkhDDDOp0V3-xMmoq3rjWOC8A4XvJfAzgxGsO2VekCUJgUBMalL8hEAur49q8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" - } - }, - { - "ID": "7588cdc84e3976f4", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3128" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:42 GMT" - ], - "Etag": [ - "CJek2c3x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpGBHQWnSHLUYlW5nck2w_3QN4x5uYmOM-GIatNzD1tqhzK0JOf7rBe1BKILpdyEUPlSmrLwrukcqJwHgfezE50OQL7ha9n6_fc6qUMoVwkEGawOgA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" - } - }, - { - "ID": "974ed1b42608e806", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "34" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:42 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrKCtuc_GsAj6sf3zXpqT2_KZrdL0SEwsunv07k0DaaEHMj8gkX4kRCcm5r8AUrD9RNYjoKeceiCyY4pDN8nrHljshmEIXF2P29Oyd0KSuqDjsNeps" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMyIsIm9iamVjdFNpemUiOiIzIiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgyNzYwNjA3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYyIsIm5hbWUiOiJwb3NjIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDIuNzYwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQyLjc2MFoiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Mi43NjBaIiwic2l6ZSI6IjMiLCJtZDVIYXNoIjoickwwWTIwekMrRnp0NzJWUHpNU2syQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2M/Z2VuZXJhdGlvbj0xNTU2ODM1ODgyNzYwNjA3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODI3NjA2MDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9fQ==" - } - }, - { - "ID": "a2be30bf1e3cb539", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYzIiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==", - "eHh4" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:43 GMT" - ], - "Etag": [ - "CILrp87x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqziYHpTkcbyMFRtHko_e04Rze8FHLbALw476U2bB9k_SqPwILBzxKRLzFvRN4q3RLt4xR1mpR2lgJ1Zq0BNYCefLOZsOeq4JbzJYLxGL5V799xkQ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIiLCJuYW1lIjoicG9zYzIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My4xNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuMTUyWiIsInN0b3JhZ2VDbGFzcyI6Ik1VTFRJX1JFR0lPTkFMIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjE1MloiLCJzaXplIjoiMyIsIm1kNUhhc2giOiI5V0dxOXU4TDhVMUNDTHRHcE15enJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzI/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzMTUyNzcwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMTdxQUJRPT0iLCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "d570e13411ade628", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMifQo=", - "Zm9v" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3336" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:43 GMT" - ], - "Etag": [ - "CPCDx87x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrltsLp6xgl5GQBA_ZoqyMKcRlP-MvWyo0epRXUAbOAkOUUpAOgezp4fFQRP5wEM1fq771AWUgtYvQ3HLXCnwxmDaCqJxEhICiYhxDp8RpvCnn3xuI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My42NjNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuNjYzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjY2M1oiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnM/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzNjYzODU2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRJbkNvcHlBdHRycy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "284a06aa5d3f4c0f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "62" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "2972" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:43 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqEuzT5vUHzBcQTT84B5lbxQRZ8Wazbvf7pARGdim0OKOWWM6MdR9KrH11f9w9bxrybc2YHzoveHnAwpEgFzwUcXlLN8xDttCt9pwOnPMX3My8utXg" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkuZGVzdGluYXRpb25fcmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LmRlc3RpbmF0aW9uX3Jlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" - } - }, - { - "ID": "631a4dc25c1479df", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBK3c9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3321" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:44 GMT" - ], - "Etag": [ - "CIes587x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up5pzFgsk-trM6xfNGHCutAafrBhKStla4toQDjvEsTPe4TTesYnwc0KLg9WK95RXOKqdm_KUngd4hv6Tucfns_MALlJyx1s6A2cZR3vco6jKPTW00" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC4xOTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuMTkyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjE5MloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0MTkzMjg3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "e60d42eeeafea205", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBL0E9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "3301" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:44 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZ9QOOcNdGntWr097PRNnaDFbt2xarczbyHRwkwxSiwRZIhV26iZGbh5vHIpvl3Z5Dxc7hjtWuutHTMn8jpCTEowLJNAre2A6mZxVNtV52Vh6XDX4" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W2NIK0EvQT09XX0sIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHJlYXNvbj1pbnZhbGlkLCBycGNDb2RlPTQwMH0gUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi5cblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJQcm92aWRlZCBDUkMzMkMgXCJjSCtBL0E9PVwiIGRvZXNuJ3QgbWF0Y2ggY2FsY3VsYXRlZCBDUkMzMkMgXCJjSCtBK3c9PVwiLiJ9fQ==" - } - }, - { - "ID": "5a293e6162b30ef3", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiaGFzaGVzT25VcGxvYWQtMSJ9Cg==", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3321" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:44 GMT" - ], - "Etag": [ - "CJuDg8/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqgpXYBQOYHYXTQAVb7IGna0BRqHaB7oBdZ19frcgAVGErLbEHe3bESKZ7zKSO6r0AtvqwfCEuJty95EeD5MkwqIsXbr88YcTG0j7M-g4yTKkejukI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjY0NloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0NjQ2ODExJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "19f19648f1196c10", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3321" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Etag": [ - "CKDem8/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqAmKuvPxUsjTHDB5nMuNG92dU0gql0Yol2zlvPDWEDuXHQRLhOJzYBAH207Ot-_IB22pH5QSfTaTayqEWBUfkobCR9YGVa79W0LG4fv9fnAjxk5Cs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NS4wNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDUuMDUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ1LjA1MVoiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg1MDUxNjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "880416f9891fc25f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "3515" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFC5_DRlr9xktR_eWpfuFqiZRbenVOATtUEg3tfJC8JbCqagKQzn0_sYLvxvG52ordbl2Nwo0L6Y0AbwuenLAo1uoX4uzHnro-JiY8MkdQEsO3uSA" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5tZDVfaGFzaF9iYXNlNjQsIG1lc3NhZ2U9UHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W292WmpHbGNYUEppR09BZktGYkpsMVE9PV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UubWQ1X2hhc2hfYmFzZTY0LCBtZXNzYWdlPVByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IFByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4ifX0=" - } - }, - { - "ID": "e1a7bef7c2f0f7b9", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "341" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Etag": [ - "CAo=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrDioJ0fktjtyI_mABE4-oH9KqDlurM5mWiYDhoDp_LRJYSPvDxjuoaYzNhjoTHdHPALySTzCxScTstm27R-praR27EAm4Gx9k3wHAAwKJUptqRnFE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfV0sImV0YWciOiJDQW89In0=" - } - }, - { - "ID": "df5371a2b0cd8c3b", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "317" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJiaW5kaW5ncyI6W3sibWVtYmVycyI6WyJwcm9qZWN0RWRpdG9yOmRla2xlcmstc2FuZGJveCIsInByb2plY3RPd25lcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0T3duZXIifSx7Im1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIn0seyJtZW1iZXJzIjpbInByb2plY3RWaWV3ZXI6ZGVrbGVyay1zYW5kYm94Il0sInJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciJ9XSwiZXRhZyI6IkNBbz0ifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "423" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Etag": [ - "CAs=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrAVgetrpv1xjtKv2B7KFBdnbg20V-btiRn3sD4kQF7shWIQ1-FqJMRsbYfcYJyTMQHv_BhW_eVzvcc56z0LOcYfE-wbCRZpqYjen6c-bVcMzPet2A" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" - } - }, - { - "ID": "feaa4b1a4dda0450", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "423" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Etag": [ - "CAs=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqwMVZVQQ8s-ZBUJDLKyC-qMMTANncoBhRQWlMCITVUXQrTnFGGxu4GdKyVcuudrSa-DWy5w5iLE-i4a407GopUNma3xIq1eNiyVAziPD7jQ4YluzA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" - } - }, - { - "ID": "23d94c8a09c0c334", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam/testPermissions?alt=json\u0026permissions=storage.buckets.get\u0026permissions=storage.buckets.delete\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "108" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoR-XKWu9nOWmFZve2-NeDyDvZDo5rYpPD3avVEmZkZ-lODvHCSR0D7zdPeCa61L5EtOIYJBqmC0D9Xc229A1GDS5d91vgAGuzRoxRnNa9DjBw68xM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSN0ZXN0SWFtUGVybWlzc2lvbnNSZXNwb25zZSIsInBlcm1pc3Npb25zIjpbInN0b3JhZ2UuYnVja2V0cy5nZXQiLCJzdG9yYWdlLmJ1Y2tldHMuZGVsZXRlIl19" - } - }, - { - "ID": "3d0a13fe961ed15c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "93" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "518" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:47 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoIRjhruHo5rweyX35Zt-Mzm5UDD5vr4319gSNjtnJHD2RWtVLvQdjuZ0jv-XKT3s1xcJO7CvoU-qAehCknukI5ssvv2LgwazyhnkcuFws3isqM9uo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ni44MDNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImJpbGxpbmciOnsicmVxdWVzdGVyUGF5cyI6dHJ1ZX0sImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "c1f385994f4db8e3", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/user-integration%40gcloud-golang-firestore-tests.iam.gserviceaccount.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "159" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "589" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UozEPu-BKQnmBUxTwsOWB7mmIPC5lppmUt6lA150OiXgQttSVRetVysUp299p7PbjI08qchUQl2idgMbfBCScDL5SuoHi_u9ani0DXYX2OV9QXAovQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "ccb3141a79c55333", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3054" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoG9XpooOFPlHtC8fg7llzYYETQUMEm0PvG4TbxBF5KdfCdB_sxNi0Yy9KrAmMDPBVyq0Iuaq7mzQOhQe9lXe5PBOc8-CGTXZiEMkMMJe3OYxjExIM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "2d4244737318f2d5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3054" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqn_UdEXhLLXE1QsYjwtyqQlzX3nDvpAO1WNpRkHhyV39RJKkxMBrcd8f6Xo3VZFDps7iKuQHnW49yRqjh42062lkisOH7otDXrdAKAih4Iz9fzERM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "b63c3951975a766e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12183" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq-LP6kQLHNpjT5jYVkPzNXyfr838sGr4jGMVPq-FOq01ayCWvyWVAsvUbxp0NSdOYJcW-z0i2NkQLuSKGQYrX_3z8LW1Vb4uL2jxfC8kSiuz28hkA" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "0764f2598b67c664", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3054" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrGddSv5GSaZB7qBmb0PigbMbJmWs91JB0W99oOTcbMeUk9QeBf-7QF7W_BpAn3wNxqVD5cBDPgGuKrZCfYv77SgCGxcPW-jrXsUY5h50q8eFL0_3o" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "bdb0ff507e4df251", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13039" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up8t6bOWiJucB8KpVlIp8WjWLSUKssQaynZNguVy_oBM2ggBePUFnoteMcbw_L9aSHoD4hwi91dMaBC4aVc6VuaMSBSVuCe0L_0IymxhBIx3SRj_As" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "d2e66ae2cc0d11bc", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3133" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:50 GMT" - ], - "Etag": [ - "COq52NHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqQ_qlqWPPh6t8-gwPlD5zRB96zsmFgzyez4LG3XN4uErivnyHcCeeHje_i95VLJcWYXn7_-knEn0faPBCCAIStf4fXGs_Lz_VzHK6CLBZjU-oBxcU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0NiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDI0MDc0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC4yNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuMjQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjI0MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDI0MDc0NiZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTAyNDA3NDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "89b2bc9232a865a4", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3133" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:50 GMT" - ], - "Etag": [ - "CJTz9tHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrymnpCwS6vZ0kM33ZD4nBupWs9hAuaOGpwiNcS4GJctgaLPe9CQ0Ada06yQV1aEfh9FtX6lgYh9jHwbsT6qdgOYOYe9yGDdAF9f91aE4y4VUHQHts" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDczOTYwNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC43MzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuNzM5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjczOVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDczOTYwNCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTA3Mzk2MDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "a70634faba29225b", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqsIKUUc6WyIHUlDvAIDlXVeOofVG4sVV3Rus8ktfPMTUTI7e4PH4657AoDnNOKKy9TOgt8yUtUvCNvDFQC1xwVApFofRhWT3kcjsEYRgZPCDDsUqM" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "5539336327f2e0ba", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrH7pn9r8O1Zu1Nc2aNUO_CO75MyvOJyONGw27TUkfWRSSmFIWJUZ6F3e3OprVF5jKtAkQ73puZbyNCK6mozyHrmPRRa0mtyuBPhkYg4DzPGkXJOSU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "029ec3d54709e7ce", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqcpItVCw5LaIWvDckOjfoChIifJGtuURBAmgtpzzna_iVOfDSaQOhxiMDVfYkV3mKG7__z0WTNRdVpa2Zs2IPcIAKwqwSeIMaLtz--MluSCHv9kc8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "bc0d0ed808bc8e68", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "5" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Etag": [ - "\"5d41402abc4b2a76b9719d911017c592\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1556835891548001" - ], - "X-Goog-Hash": [ - "crc32c=mnG7TA==", - "md5=XUFAKrxLKna5cZ2REBfFkg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "5" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpljfpVGJewuMZUyJZ8DU7j2k6JmfPqXXgkeeu8vGdK5j-C_DUgpubS4nFEp1z8EsN-fUdceCxfiy-FGIJiFpME38hWnztW_WIn6zeSh6LCbwIK9oM" - ] - }, - "Body": "aGVsbG8=" - } - }, - { - "ID": "b695481e00d2d6d9", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-User-Project": [ - "deklerk-sandbox" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "5" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Etag": [ - "\"5d41402abc4b2a76b9719d911017c592\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1556835891548001" - ], - "X-Goog-Hash": [ - "crc32c=mnG7TA==", - "md5=XUFAKrxLKna5cZ2REBfFkg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "5" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrmYf7b7ig2ase_O8qHEsgSAXifAlxVdDy_Zh5Qaqx1IlL1wkTvy1BSblI6ZDz3M2X-Y6KrdJp1IaP6nDU1F_Yhyt84Y7dOG80r2g-x4E7NAyyjV0o" - ] - }, - "Body": "aGVsbG8=" - } - }, - { - "ID": "2fe3b6cc96ffa451", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "266" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-S2G_BUo7MzXUj-7vUDuVFWfBIA5GJYfTdaeruyelzRGFimnhov8wRB_ozWC2cGPQ-a2xQk8bw_cVV_D2Q7c7rdR1ujaTM3oIjr8vjsJBZXa1VeA" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RNaXNzaW5nPC9Db2RlPjxNZXNzYWdlPkJ1Y2tldCBpcyBhIHJlcXVlc3RlciBwYXlzIGJ1Y2tldCBidXQgbm8gdXNlciBwcm9qZWN0IHByb3ZpZGVkLjwvTWVzc2FnZT48RGV0YWlscz5CdWNrZXQgaXMgUmVxdWVzdGVyIFBheXMgYnVja2V0IGJ1dCBubyBiaWxsaW5nIHByb2plY3QgaWQgcHJvdmlkZWQgZm9yIG5vbi1vd25lci48L0RldGFpbHM+PC9FcnJvcj4=" - } - }, - { - "ID": "59bdcad91d723d0e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-User-Project": [ - "gcloud-golang-firestore-tests" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "5" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Etag": [ - "\"5d41402abc4b2a76b9719d911017c592\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1556835891548001" - ], - "X-Goog-Hash": [ - "crc32c=mnG7TA==", - "md5=XUFAKrxLKna5cZ2REBfFkg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "5" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up3lrNfk5bvup2RRbcsT6CeDACQitaiF3BTMyQ7B23ACtTqwWweq9ooOfZn3CX7tsbzljhlY_009nsTXXQwm7V_xNNd8FZwT44CzSpul-SZIRODC2w" - ] - }, - "Body": "aGVsbG8=" - } - }, - { - "ID": "7b7b6a79cd2a53e3", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-User-Project": [ - "veener-jba" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "342" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpT7Ge_QpcDrqajg6a4kZrmoA55ZSma9IM3pyz0Ow1bu5nLURc0V7cAThbvikg47_O-ox0uMdF6zBbtVtKL7olyMUGd9wqCQ9ubELVoJXiNMDehvXY" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RBY2Nlc3NEZW5pZWQ8L0NvZGU+PE1lc3NhZ2U+UmVxdWVzdGVyIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBwZXJtaXNzaW9ucyBvbiB1c2VyIHByb2plY3QuPC9NZXNzYWdlPjxEZXRhaWxzPmludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBhY2Nlc3MgdG8gcHJvamVjdCA2NDIwODA5MTgxMDEuPC9EZXRhaWxzPjwvRXJyb3I+" - } - }, - { - "ID": "914bca19df35cbed", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpM7lBsoxgNb-rh81Lfe0tWFGTkXQYfEB12ofkKZ8SK8kK5bPRUsQMVhm3aYB2N9e5_QSBZA25my-t5LpgR8TVW1lNTGd97OoD7bdBhP_CGo3xMbos" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "164b4ffce0c7e233", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqbRkPvid4Zo9mQsbMXlTlOQ1womp_CnHUJcNWRamG_lp4ABhgVPVOdL8lE11J3tmvVkYhzcEPpIJuA_RTvuAbssXFsNo9Fu14cQ44HwhSRYk7r_dU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "d04d3cb2ac9d6376", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12183" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrcH6PdfDpm0th9SnZgYwMYIiYGYz2VvNs-Nb0VYFbZdA4QXfOJiRFg-xKqmDq09HRt7j8OwTDO7UMm2EiqwjPz7j_JPIvfcCzfCnVVoF3XiZFomuQ" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "8b3aeb1f423d76c5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoX_zzYo9AYFb44YmIz49k5z8v8ITzEQsDrSxEE50-7AC-ndg18Yo5DFAZ9ZFCDtT5P7kch3c0-YlkhX-5oYCUIm-mmPmoTg0CKWVI0TJ9la9z9-vE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "f32edc9a9263c0a1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13039" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uok5lyOpE3YUZDwRz_bsA-E-xzWeqaApyadumOOFoA_YmOi_xnvFaJr6gfp9Gaktbpud9mv8qDCIvq1Yu7CyAVY0ScanSfNCke3naR_G7kPKYgrywY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "da73739950dbfa82", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3216" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrdpofRnchjkCQz5LLoghzg-RHoGATH3xbgJVW0LKGBkH025w5EB5RlGmHHnmXuPtK_5Ffyl1bxjmoc_h7_vGvjcxWbHv9-NKe9fLO_oNYRmw-ffvU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTQuNzM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9" - } - }, - { - "ID": "94e697431c9323b9", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3216" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:55 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqhr0guIL00afW_qpaRvxEGRSJEWTtfrZP2Tv8Vqt2xas1ivvvMHVbSpT12ltU7FjytFhno9SFXTS8pW1S0S8qJdFctUCa83tkcQRXgN1uPUwhX1ws" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuMTMwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9" - } - }, - { - "ID": "3c5d694690a75054", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12375" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:55 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UogsU3nh3dfRYWq9YqTmJc7CfCX1U0b-IrWpDCM7M4YwfuUWgzzzcydWdHLrF0UPqRxNFQj3eMHBbDuHYcF0xuUNah4g5J_nt26feHRp-moh7uFXNw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "e050063f79f64820", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3216" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:55 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq4hs_SY8_2HHPHu8NfVIkOdwmZz7h6XH0nWRSTQJSqzQZQGvv1p83Z6W209h3bXsbN9_ESiU3OIyLRwmEldfBP94hRhN5t1JnagcKPcMEOv8FtvHs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuODI0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9" - } - }, - { - "ID": "f7edef926cdb7d75", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13231" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:56 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqyQ9DLrBDGkx7uOkrIKrzMMYA4YygdFsd1si16n2Lr4I5Rfp1lxwadC3jThiJbE-cDyuZATHtvM2bzpsRDPJzbmKgtxYydHykiB-oCsSFqfwy50gw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "2745d195a40b86f1", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "377" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:57 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpGeWJRYTNsu57knuRFhwHuoeUMIJhUtuQj4PLhGDoFcITB2Y--7JgaOCVhepJ6a5RRKvU9d3UrAyrtbGNOVHyW2WAbN_KpfkiFhYq8l7HPERpLDjg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "5258a70437064146", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "377" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:57 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqImoOCe_iDYCV8XHpaaEXPn-jeAf0M1MyixGju1tNCSTBIMEKDzh_wxVxapMmXWPnOes7YX0T0Fr663x7eJCl85B0EGXggUgp8bqeDV3cJ-mqeDrY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "5cef2c464b7356c8", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:58 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpH5br-AVWN2g1nkkM_FJWZIjgOgh2NGLuiyYUS7QD2DSs6B-c0IQHgC2NUyy0eZq4McQhwxBGhUJ05-WeD_bX35i7skN7Jn_q3ELAaDwpi6_x3Z8c" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "11cd5140a264e423", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "377" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:58 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2_tsxxuZC3Ni-A0zL0E9w8OVne0sWfLi61Cv48eTWj2R4F6G0AmXZU7J8L-WnDJf4s-6GpZV0SyQqrjBA_Mp75pJfWb6cl9_7oimTy8HPnxAtMwc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "dac6629e58d45469", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:58 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrTXUzCa6WOvWD6NqoHhkdwRtG0Z4h0QurHOuKU1gyAWzXOt8lc1NVsLqe-6m8siE76k7l8g0EY3Le273_4GFB81gHMhM4qBOOgbwSBgLl4zKHZspU" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "1c8663192ed534e8", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2370" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqfc2TzyVBAwGuVpWWQt7OYr3IQEmVj5R7LIr5LSRAhwFernkoD6TFVnJno_ria4LL8P9tSQgzCF7uJpStJTpYgTOiW5FqTxQ-AgbOjFmAMjah3o6U" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" - } - }, - { - "ID": "51858ae7ea77845a", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2370" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqc5V7vaPL-d4aMLJi5QXyxZYWWtGBx0QuvfeB8rFlhwPGPFLQUABxm_XXkR0AOeIqdZ-intSu7I5srWdFKX85aY8r-z4oRTuB-CjGUe5uMiXQzXU4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" - } - }, - { - "ID": "8248d1314da9a068", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12203" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqrg4CYWnZtCkMXT5KHCcQXY-aSGc6JuuEoVkTtKgObB9qr-dE-5sMKCrbTfpFudhid3ulDpkNuOUmRmBy4k7QQoRxqgues_a8dB85cOybZKZTM864" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "8c1628782bcf7eaf", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2370" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqmer5wTHG4zziRJUR_QqUbX_xgytHZkMg2KFntbVz8pn55RFN7idAyYcqz3AhqthLD-bHH2Lggdg4MuIdjHVk_kKHxvDdqI0p814avmZPYAhJdvsE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" - } - }, - { - "ID": "6813d64060667ba0", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13059" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsvYC9PE_CO5H9MncFfA0uYzQqFBOqu0_zdexcxDTJ22CO_vv1LiRY6ZSC49_FMeUZBA3KdMfOnS7DsSxBpqrLC9ZM6GAs2sFolmZKhp0RWJjvOzs" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "76ac0290041d5a26", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:01 GMT" - ], - "Etag": [ - "CAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-ejQ-G_PN4CMllngdiEh1SqHQNy8TXBA-htpazVSOadp1oThRsYsQuflX2kwkDQHFpeo1UgDPXqi5BRq8qZUBsM2koJIMOp-APWnB8T_qsTHlY30" - ] - }, - "Body": "" - } - }, - { - "ID": "0de0d173f3f5c2cc", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:01 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:01 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLOtWcLW6yZk8n72e3qkdl4ft2-s7FJFYyRv_5REkk9Q6d7qk6Pcpcy_HMCJFgKTV5RZzzIU3k5hceknIV1XNGVWK7gOCFimyeb8sFjNZC7y7rypI" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "eb2af5bce5b9e1df", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqgaldc1OJvgmfZxcZs4i5d6EXMyFK93ZUJLzSK8Vo-qPYKRwhvPzK8GFv3gqrdyjBbvdc2Uz57nCwy10LhC5AD2pscln8VbaQIOAHqxvkwsqV27Rw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "ad55ad01147a8c62", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoQpkkPNbb1CGY8lAzUfymkmZ7PNlRK6hK3deBeoMip13ARTb212DIgf4lgUDM8PVQsAqcnKwzNulbZICoGzHwrsBZfyi7p5TGX2yJHLXSAGEY0MBE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "1ef9ea9b9bc398a6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:03 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur5WrZwygTHM0dDH6L1kypZzaAQtCwxt9wA7VXvgnjwrfwdRnTw0D7K3VLYLRTPDcf8mRFn5CetkcimFxfqUDoWLMF7bH1TLiBr38K6aBU74aRWUd4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "5c1cadcffc6c95e0", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:04 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo_n__u-lZKgZqveqge9YBw7wQMldCd4t0io7l_L_nlcF4DSPBPvZd9jGPCstE1EYuQS9L-Z3Y53Q15FlqM1IcmRDv6bYEJQWqkpuzCRRa_TLh9P7A" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" - } - }, - { - "ID": "4a615256ab8c55b4", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:04 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpwGIlBhvugAIHxiObS0RPpJ40OY_R3exJfPh5IHLLB3SrdYtGMpD01QLdxc3E0IPGkBiTrwM-Z2X1gMcxuOqJWJUrhV4xTFe_n5G8bBxzievz39F4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" - } - }, - { - "ID": "63c0acd95aa571a1", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqNu5DFt-lpr-_4YX8LRIhoi3KVRYVgFAlG7WOPsY7IGJ2NLZSF3SQhmkvfL_IKZ0FWIwx1nGT6HBBe9t2RYXNE1kXis-9KgEaAWhrEByTud5QGx5c" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "3c6b1be88134f9d9", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqzTVWCTbjlM7VCFWFbOojrtt8ZMY9bRy0eXj0Uxj3gaYalvwqR0WBrtbX0iGQ-aN5pv3-Acc7EWIoHBa37YbMpx9fuoJUpTswmdpiCOH3wbMN7tIk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" - } - }, - { - "ID": "60cde48b740e9a63", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrLyiKgPsp1RDgH3Q2SGlQAwTvGJwVDvb9U9FfvTysIFo6sO-5AoXrGPQfZpr80aMOEKojmPksURjhsteWSV9gdPsOlPIJMorxvQMHeomMCh-Bgmuo" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "95b915570e7c2375", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrOZjNI5VbLPTLuBwhSAdcgxvqgcFpH-ED4XyEgIbnndmtC_zJ0lkLfudymSRpdRE4NBxn1Vp-AmZnb1ONsP2I1GBnLYfpYRbzFQpmihPrgBkDnaB4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" - } - }, - { - "ID": "e4170c637b705043", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqXQonYrNQZxcXoDllGz19Ruu-Eq1_3FVjMndISB0h1O1ojodwFr2Xor2nZHBZgv3NN_YOSrunnT_KYTfBFNdqtVvqVC2L19Qhwk-cxse_FqrMVbIM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" - } - }, - { - "ID": "a3918deffd2f8f0f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12203" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpCCE2tr_4iWFlwmQkFVvzfskSWvZdvohQYV52oXF-tWUfie6Brp5TYrvUYx7jH8zsTg9qqk_kv0th8pOA1WsHahBUbDYuaqAmW0EpSJI6_xjBPdZ8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "42a941620757dac5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoqov2civPKG8oSKmpVgUSoOD4W3rev4av_O6jgxkc5tSpPMgtxZeG3NznVRuFJLgClao2zPH8BvZIxTAAPUi_R-H0zgPz6ou8k9SMzLrcgW_HW100" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" - } - }, - { - "ID": "ae79f2e22d58681a", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13059" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqP_EhJin_8uqVkiSwAv0jBAI8A0o7nSwNAQ-Z_EdAnq2PItIGJZl5NOE_xcDTFce_ncHfsf6LvV4NMvuxiOsl32BOddZj_6x5dC36QqrKvhXKaQJ0" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "99becaf4890b3c21", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Etag": [ - "CAY=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpWMu40YNkuE3QcNByTmrKw_cnq4a8DVtcknBJ7gTnLdH1BC6CqoFOStPpa6y4PMmdpLpquozlOzvI4NWjgZYp1d9F_wBEyaq-wquTQAWGXNy3qktY" - ] - }, - "Body": "" - } - }, - { - "ID": "586ccc26d5a22712", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsRO9GedosQ-kg2lCQpx2S6C0-HH592CdB3SU9yGMwEEGKX9md7KGeSDhecrm_6Ug6tyYtEMlsMq08YBAKGprXfcN9130mRolu_HVey3CcrXK1nZ0" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "fad4bdab588e0f3a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoUefKoF0asUvxzmAtX5z5m_5FRy9UfliKkjQN8cYyrHAotGwiQA72QN0lYfA0HXbe7gFRokwBAY8R5OpHcEdRq5K9sw8AwHuNtruFtBMxjN0D3hR4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "23bb9ce53da2f525", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upa2KE5JJOXq2FOn7RHasJpOeeHqWU2kk3BC4cQcGQ9jGzh6XIksN6Z6u1NVoCbxW7-3zGiZMdAop-oa_EuyPAauD__JgzNPdngeNeXO7lsGaVI6i8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "46482e47e655836d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urze42blCAS2Dskbf5jkbCkdo5IhfOjcTlcufV1ooZd5x_QD0zK1gRoXLXfvLjKHEKaJ7cuDeSRmNjuqgEoxJFMmXUoPOIetWLYbH0G-lJcM6tydew" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "856a1fb5ffdb000f", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "463" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFSnYuPlv_JejZ0nEjoWRXeLwj0MS-ddTlIaSOjaK5BudyIcIOeVDEeyFYWsF-GCZzMC0F7v-UEw0_lbsYBXCMZcLp6WZu_s3OPDREhpz2s3Txw-s" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" - } - }, - { - "ID": "a74c3e1b541a9ffc", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "463" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:10 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ury3MiMZho3xaSHmFgwHNvKIHTa0_gZzfKvX8FW6iVGNkdSbXvmVypIUuIoKXDjd4rOXpFghdoJlES9A_iXUvZyfWAZLvWqR1w6sDRymrIARVd0dPE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" - } - }, - { - "ID": "913da4fa24ea5f1f", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoKAxObyNBS_Mew44t3k1u80cLBGQW_I2ohrF96rIjZRxFscjGFrvsOHfLxw78rTRnpHmfA0uXnNz8T2FPc_Op16Lnn1YkfyOezSEhDBx11qZbinEE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "5dbdf24cae75e565", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "463" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:10 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpoJgRZ665WjiUif7SnQ-Vg4B0kJl-UQ_kIk5XgmcQIL5zmCBp3NCxjksybkvNKpGcQnIJSLevfyWd2fugRv5y5vxmEBgqFffIy8ibTIyBDH4b7R74" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" - } - }, - { - "ID": "f41892a142fe6d2b", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpThInLTHhtepE7J4k-Rg6OoLRyOqnjPz4sZmfIH6wp5TSIyLrZQhExKAP52XbwUwMYp7x-xypSMY5kfR66I-dmpRWaFkfs0Lhy-Z6kqb5JUXSppqk" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "5eb0710be2571dab", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2800" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpbetUCGwYg4rhIR-DP3ntPqbkFwSqutNltVR55I4qedP-rZ5hC7ospMy6hZ4aQaNOhNv0frmst8MQZ0j6Mp8BcTC6qy7mU8bhEAiMGLycR5XxUWfI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" - } - }, - { - "ID": "56aaafb0a24b0644", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2800" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrqQczDm-dzjgTlLFcJ2lXQrMTpQ_8UP-1hNbGj7eZEgZcbxWA-oyXqEe4tJXq3gYdZ3mEIcAzaDtNeBHWidpIev7kdXWank04_JSWbQtU2UomJqeU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" - } - }, - { - "ID": "3bbc40d778939612", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12203" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur5kJWpLwxz76BKPll75-nOsLv_uoh7X-TW0SbNHCYEzkGq8VZ-W8a5tKGUF6rKea4ejr4MhcqJVr1Zu7O-zNAfQWJMsHC9CTkOrPa5Iu1s0KBJDdY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "a2868e9e376e356b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2800" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoyZfAJB3TroGxzYv0gy7dHUwpU0jtIeU_EyQSFQKzHlHbyRa33r_a-B1aHw5tHaoawuDvv7skisdx10yhmm9ZRrPw0ibwpMkX5Cwhf0OK85DF84jM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" - } - }, - { - "ID": "ffb216dc74ccc27e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13059" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqBumxgw_FTJlt7J3xLsBDxljAcEyC2G5aQqhEibawMttB-ogPabqDfz8ZN65WtOikIggZQ8OqW8zigV4Q5vfQkog7NMasAuivcMjhAsgqv_6qWv_E" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "183157f40c6c1bcd", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAY=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrC6xxVQeIIjitNwDv5PhDUIBQbtGdHJDi_AmLs8vTbGzkby-5hguMIwI_UeHlCuGF1u5jBoVwCOgSqJBiJJ5fxh8oCzBn-nHGcGAdIOeQt7gTL0RQ" - ] - }, - "Body": "" - } - }, - { - "ID": "4b6b925053570053", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpiIBwh04pJH1-X9d1oJluF_2ZCJEFLzF4E7SI-easf2noIolxavCbpH7NP9GMmNXnVK_ZJm4kW8coV5ArXDr-Rwodjnd9Bn2hFiJ_puNeWWldJ6mA" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "9b4f9c448170e89d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqHc1VVBdvXFq9aDPAprpXNP5xPt-ZtPdYXwCQPHX_s4gkCMom31AtHeDocV_hker5qryWoYnRD8PPu5c7JqDNQ75v7GgE1vO5xAt7AqWPoLU5MO5A" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "692d41cf9ea91eb2", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpuFB_mdJg9Fd7Sj06Xdcrj7Up2xXVc_P-SgISEEtiv4v9doPXzNbzBz92Q2UfEchYWSQnikHhS34dUAOefj9J4vaBYqjc9RjFmrn8YcPNyS8aWKgI" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "db035a3cc51de007", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrGb7BqO44vNpZjBnLrSI6_jVCVRFevSqm5eux2DIE2zmUY3sH0T0st7OOsLPcBjvLSbDfijMcdJwsgpM1fQGcerB9bS8Ffd4Qgzdjth0I8FreV4Vs" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "e1d95794cb6be989", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3273" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:15 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVcH2L0r9wGab--fnRuKptLRoUoHCUHfRegh3rKNQExHVsYLnt1NFwOKFa_6hRbeYXO1aY8-F-KiD6IY3sWykIKXr5iqelEJHTWEMQ8wHuzIcWLO8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTUyNjYyMTQiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuMjY1WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1LjI2NVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS4yNjVaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1MjY2MjE0JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTI2NjIxNC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In19" - } - }, - { - "ID": "297f13e741cceb04", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3273" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:15 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UroLXQs6945-UOUyN7qOhICUNnGSzCVrNCv5DD9uMyvgD08zpuufBZj8WM8Fxe0DuOuQowqbPEn1UdJdbFTW7puyeY4zX8fM6UgT7SZIVB0Xi-B0OM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTU4NDE5OTciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuODQxWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1Ljg0MVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS44NDFaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1ODQxOTk3JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTg0MTk5Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In19" - } - }, - { - "ID": "9617f04d2dc95a7c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12683" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:16 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqeJiJeDLLZgn9vlRyNk1plY-7dcPXaz47gPNAhMqmSx5ZMvg6pqWj18PVUR65UGuWE7Amtua_zqsv0dVyDZE2FFceMeIQcQ2n6noklvRS9kxPXcEU" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "2309e069a65c894f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3333" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:16 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpxhvVZmlJMvR93BoI0JsL0TYbDN-kmlYHQOOlRfb-Nnqpc8iUImRtJT8Qvx0XWrOsALmLv4Oub_aeCCWkHPvqaF28oAazlhqRZYoxrPlaQlW0N1Qs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTY2MzcxNjUiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTYuNjM2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE2LjYzNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNi42MzZaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE2NjM3MTY1JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNjYzNzE2NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In19" - } - }, - { - "ID": "d2fe101ea1da654d", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13539" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:16 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqjFigs6aYtqXW_7VucOIgJY2MQZwuLv9B12s2ffhWgWDxtJyjLinG4A8U9gKBCpiTt9jUvdHKtaG20qZfyfN9Q1eVAkUQh_zv7ESbqnWhsiheI7zw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "6620f07aff04b6fd", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "742" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:17 GMT" - ], - "Etag": [ - "CP/B0t7x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpyWMaddvNz0IWnQ6KIl-tv6fJZCvrqTlGuaoHvKqXhxb3O1EKUIEirKSVcaedFZyxtjLhPD7Nj6qf1CRGWG1VnVigo5Wf3sYFDuuLvctxVpoHT0rY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTc0MDY0NjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNzQwNjQ2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNy40MDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTcuNDA2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE3LjQwNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTc0MDY0NjMmYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNQL0IwdDd4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "7f88847ad9c49799", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "742" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:18 GMT" - ], - "Etag": [ - "CKav+N7x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urtn69BB0ytbCZpuCbus4w__tiLeBpqeNzD2gKl5KN2LgP9ZJOIjudGKpDOJRhyW_WQjVOCJBC1CSB_AgE0MdPzVDAKWt13VfQ8aKe3pMWh4y1kOlI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTgwMjY2NjIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODAyNjY2MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC4wMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguMDI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4LjAyNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTgwMjY2NjImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLYXYrTjd4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "a21f3ddca57ed424", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12267" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrE9jrLJ0W1PUHWT3ldYhVsL9nNhAeFPU2K8g2rCMimty07QlN95esxj7dBAySQ-nzvK17L3WA7vjmxjCpeQADa2bztj6ke8NZjb8fuWkQHsnnzKXA" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "08fda3fa173e6e11", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "742" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:19 GMT" - ], - "Etag": [ - "CKy2sd/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqfGXVJ2-MQaZWJ-JS6runpSycA3GDKIXYHDGf3VBOXnADbUzL2YhZVmlQcW_95jXY0eMkK4Z5tSRk3cxGP_84T_1t3YDYQS2p37ZouBk7GfX-4xxw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTg5NjE0NTIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODk2MTQ1MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4Ljk2MVoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTg5NjE0NTImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLeTJzZC94L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "0c9e19bd4ec37de2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13123" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:19 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGREJ2AZDcrZiJzPX5ltwlB28vRYhZ_xv4LMIO3La2IgFWk4wd5IVrqBs3z2epgtg7wkPpZm4qPH9zTzSIsXNxdLLHcNMMYr2azidefxtJVgXqgDY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "84daee26f7d989ce", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:19 GMT" - ], - "Etag": [ - "CLmF4d/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoLuw-z5wwTKy-X92_i7pyTrimolUNRUGrElhep6rUH_mmpgu_Vxp-1ncr8x8XNXHLs8NFRW5S7WVUsaDRRTrC5KBPea4EoFLANK8MFY0FpTkXr7_g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxOTc0MTYyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOS43NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTkuNzQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE5Ljc0MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkxOTc0MTYyNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MTk3NDE2MjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "8457a5701f4ced08", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:20 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpKF1M6PpAT354ON3809wMCor-B9KITRuEV34MQczP3OD4Co0sJYtNSub2FUzA8qanTDrtANRK0RuxPTz314o3HQro2HLOoyKZBow5CZ4sfsunKHv0" - ] - }, - "Body": "" - } - }, - { - "ID": "1c2ba8a7670e2218", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:20 GMT" - ], - "Etag": [ - "CP+SgODx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoq9hgjjwBbSH1zXN2UlNtWO31Qf5uk4ijjGCQlETB9_5H_CouvU3q4tyHAKEbIBlaW1_mNk3Fsp6LgXqsPPKvFzs3-WHeADAgc2-GpD-0inoYUHMs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDI1MTI2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC4yNTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuMjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjI1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDI1MTI2MyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjAyNTEyNjMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "7e6d2f97bd881584", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:20 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UohQH2nTuRg_FtGhlfr9zC26C6lKFA-aZTf4GbiV5kWZCJ9E4YLHfIUKLpCLIgEfX9TAvi4oPNWfEBpbY-orlescLxrxYcB1U5fLmzzHQR2R0teR-s" - ] - }, - "Body": "" - } - }, - { - "ID": "48e189abff98ea93", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Etag": [ - "CM3Aq+Dx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq_NscG9s3iq-8NZq8vQhzdc9FJMaAfq_0GborrX84b8gG2zpbUNtEVnrjFjLyOXqBeJwthPgPUPcsGbIcD597LaPlxt-VNaetM2BLtMtYry8pPeQ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDk2MTYxMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjk2MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDk2MTYxMyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjA5NjE2MTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "02d6922c26c06cbd", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrmjSJHhErkbzi8feJ_-8XOMqsIzGrEe6frQzWXQ7dPl0D8MW1mMHClKdwb2zBOTvpx7mXQWy2STZYSqx2o7O1Tr2DB1RZfwCZ3xp1jgw3NCICi7Xw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "3fee93fca0ef933c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Etag": [ - "CLfH1eDx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLO91a0iSaTsK7EA2iN621UX-atvJ-8m8BeqeR8_o_CQLB8Pco5FMXznwQY7DZ8u0GPd1BvLE98s5S7T1Z6yL66OKL0CFFUntLBqNAKlji2Dcme6g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMTY1MDYxNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMS42NTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjEuNjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIxLjY1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMTY1MDYxNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjE2NTA2MTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "5c26cf93373a296a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urx3fvI3sQc5JiIazK1OQw5W12poVdjRyBKxIBM6N98jEJli9F0O33KVHHcHaP-QybHub31QmLNngZpzcVKJq33DSaVMW2Vpi5BUaVAAUL2jMoyOxs" - ] - }, - "Body": "" - } - }, - { - "ID": "f8b5020ef71e4047", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Etag": [ - "CIungeHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq8QEz45d-uPa7qZ6Nl_SMqw2iepQXoM4aAkTx4YCqArXnSKI1UO3ZhJ5-_PDg3Uq6PPa2JDBTbxckiZCRMQvf0Cv6E_8Cwk3jKXGkneWFJYSeMAts" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMjM2NzM3MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMi4zNjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjIuMzY2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIyLjM2NloiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMjM2NzM3MSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjIzNjczNzEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "7cfadd8c1e7c0bd8", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpjVnPaklKmbO8qV2lapwEIAwPjvf4HSJs3kNHHO_xZH4bV65pVPLKCb2qjMPCYSwEKRZzH6NpYjm6WlxsBdJnfBvpyRpIR017q9iJDyBcVPNUoDn8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "b1381b9995cf35f5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:23 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrD1hmfKK3yAwFXKqIwvCsEVMb_e7MWysJc7zbyVjZ_sbD9P41lzc0gYSef9yl8CRa6B0RtPq14V5MBSdy6C0i_cAkgANDN1da3O5QKELvACU11moU" - ] - }, - "Body": "" - } - }, - { - "ID": "5e06ccad06bbd110", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:23 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur2FRcf4QGZH2KhkP3kATb9eOy4S9lgrA3NQXYqhc1KBZDbcZN5NHF4MB6WiTxOraNE0HhkW3SOg8sp1gS4kAcWRZ2G0EGWmLgL6RUcwr3a2O-WY10" - ] - }, - "Body": "" - } - }, - { - "ID": "b637ccaa36faa55b", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:23 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGzSAdUK8pPs53JjaSJVQJObMJRsfQ5FIpSL0D4tNfft68cfE8Xb5fjSsAQG_PfmKZyX1Ind-s6dW6FgwREeHzu_OnEeOwCOaT8EZi2N4vNeWLvac" - ] - }, - "Body": "" - } - }, - { - "ID": "15055c361faad5c1", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqwmXVRTmXX3P5iId4-EjuA1oviIQD-Vbdr7M5rgd7d5IxNvmPVkZz_zFyMBNoHd_DzXnPMN5R7grAe1yDdOzbyafIBwRcFsLh4r0a7ghp4LkNSLZk" - ] - }, - "Body": "" - } - }, - { - "ID": "307f8a44d4ce6e9f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "32" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpaHmcEJBKGI7CtW9fK8-j5Z-oRk_dTwMS40wVaoNH1PN7k6x6d_9aD3w3ce6WGZJF5OYxkdoifp6L2vVz5lIGjzcmmIZ4Wxp1vktBOv8hLa417Mp4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" - } - }, - { - "ID": "f8df88961018be68", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "121" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "297" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqWiAA6LvnU4rExe2rVTwwFd51SI93o_d2tOj9OY1jk_qe1Yd1eiTXIa2eNecJQsmgwuFr4BUQoWy8ndutrCqGucmVIRL0jA6-i-vaGGVRF8uEe0AM" - ] - }, - "Body": "eyJpZCI6IjExIiwidG9waWMiOiIvL3B1YnN1Yi5nb29nbGVhcGlzLmNvbS9wcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvdG9waWNzL2dvLXN0b3JhZ2Utbm90aWZpY2F0aW9uLXRlc3QiLCJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJldGFnIjoiMTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm90aWZpY2F0aW9uQ29uZmlncy8xMSIsImtpbmQiOiJzdG9yYWdlI25vdGlmaWNhdGlvbiJ9" - } - }, - { - "ID": "45f44fa6c3668273", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "340" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFWgB9V-ilyDIBP7KEF3SY7uTg-rikYE1ooEgWYkiXb2cW6oOwwtH1W7oe24WU2A9oyJIGOSskz3_icHqmMn7CfPoZQ-Lu5QdtZzv_5062_-UzAt0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIiwiaXRlbXMiOlt7ImlkIjoiMTEiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCIsInBheWxvYWRfZm9ybWF0IjoiTk9ORSIsImV0YWciOiIxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9ub3RpZmljYXRpb25Db25maWdzLzExIiwia2luZCI6InN0b3JhZ2Ujbm90aWZpY2F0aW9uIn1dfQ==" - } - }, - { - "ID": "08c2546e2c262ee7", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs/11?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoTmXr5TZZPRnABz_XbbZpjwU3iPz2e802RkcmNFr3sLzUx205tdmAb6vVWMNlYMUGZ5tGoddlqQ_oPnz0Xdvpz8CiD2eDLMDyrMbxVFjc3BinwBo0" - ] - }, - "Body": "" - } - }, - { - "ID": "efc8b67f9c2f4b40", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "32" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UraMBYT56qFvKLe_OPJfpJRhTy2qSpQyRjP6CScOnnijgaQ88eQtVahktX_Vsvy-G7J11d7GNT64FnpqZrewu1sUmuNNLiEtmkqVFILdNcWed6i1w4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" - } - }, - { - "ID": "6dd92f9ce3b15a57", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:25 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpedRCcxIOhXCWdipyV2E0R3z00CUlOK6rPlof1gpKuQbLeJmvMoPFn28o8zqmmeVJ5rbX41bB6Hp116-_ISgEXl4Htmc1VS0Aq41lJQiN_mIvozbY" - ] - }, - "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "914cb60452456134", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2F\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "12632" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpLE4EijIrGqoHRLrCqmprZtxBU2JtHLLsZ-nIakblnuZX8s6FZA1SEaczyybdjB32loN1-gVCf83PFM4x06S24ATKp9-xRgQ9QNC9fwVH_ec4e9JQ" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF/1475599144579000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF","bucket":"gcp-public-data-landsat","generation":"1475599144579000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:04.545Z","updated":"2016-10-04T16:39:04.545Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:04.545Z","size":"74721736","md5Hash":"835L6B5frB0zCB6s22r2Sw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF?generation=1475599144579000&alt=media","crc32c":"934Brg==","etag":"CLjf35bLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF/1475599310042000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF","bucket":"gcp-public-data-landsat","generation":"1475599310042000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:50.002Z","updated":"2016-10-04T16:41:50.002Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:50.002Z","size":"58681228","md5Hash":"BW623xHg15IhV24mbrL+Aw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF?generation=1475599310042000&alt=media","crc32c":"xzV2fg==","etag":"CJDn0uXLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF/1475599319188000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF","bucket":"gcp-public-data-landsat","generation":"1475599319188000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:59.149Z","updated":"2016-10-04T16:41:59.149Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:59.149Z","size":"56796439","md5Hash":"FOxiyxJXqAflRT8lFnSdOg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF?generation=1475599319188000&alt=media","crc32c":"p/HFVw==","etag":"CKCEgerLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF/1475599161224000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF","bucket":"gcp-public-data-landsat","generation":"1475599161224000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:21.160Z","updated":"2016-10-04T16:39:21.160Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:21.160Z","size":"77149771","md5Hash":"MP22zjOo2Ns0iY4MTPJRwA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF?generation=1475599161224000&alt=media","crc32c":"rI8YRg==","etag":"CMDW157Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF/1475599178435000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF","bucket":"gcp-public-data-landsat","generation":"1475599178435000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:38.376Z","updated":"2016-10-04T16:39:38.376Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:38.376Z","size":"80293687","md5Hash":"vQMiGeDuBg6cr3XsfIEjoQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF?generation=1475599178435000&alt=media","crc32c":"uZBrnA==","etag":"CLiT8qbLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF/1475599194268000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF","bucket":"gcp-public-data-landsat","generation":"1475599194268000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:54.211Z","updated":"2016-10-04T16:39:54.211Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:54.211Z","size":"84494375","md5Hash":"FWeVA01ZO0+mA+ERFczuhA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF?generation=1475599194268000&alt=media","crc32c":"Wes5oQ==","etag":"CODCuK7Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF/1475599202979000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF","bucket":"gcp-public-data-landsat","generation":"1475599202979000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:02.937Z","updated":"2016-10-04T16:40:02.937Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:02.937Z","size":"89318467","md5Hash":"p4oyKHAGo5Ky3Kg1TK1ZQw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF?generation=1475599202979000&alt=media","crc32c":"pTYuuw==","etag":"CLiZzLLLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF/1475599233481000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF","bucket":"gcp-public-data-landsat","generation":"1475599233481000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:33.349Z","updated":"2016-10-04T16:40:33.349Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:33.349Z","size":"89465767","md5Hash":"2Z72GUOKtlgzT9VRSGYXjA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF?generation=1475599233481000&alt=media","crc32c":"INXHbQ==","etag":"CKjykcHLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF/1475599241055000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF","bucket":"gcp-public-data-landsat","generation":"1475599241055000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:41.021Z","updated":"2016-10-04T16:40:41.021Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:41.021Z","size":"86462614","md5Hash":"8gPNQ7QZoF2CNZZ9Emrlog==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF?generation=1475599241055000&alt=media","crc32c":"uwCD+A==","etag":"CJiW4MTLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF/1475599281338000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF","bucket":"gcp-public-data-landsat","generation":"1475599281338000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:21.300Z","updated":"2016-10-04T16:41:21.300Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:21.300Z","size":"318887774","md5Hash":"y795LrUzBwk2tL6PM01cEA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF?generation=1475599281338000&alt=media","crc32c":"Z3+ZhQ==","etag":"CJDt+tfLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF/1475599291425000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF","bucket":"gcp-public-data-landsat","generation":"1475599291425000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:31.361Z","updated":"2016-10-04T16:41:31.361Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:31.361Z","size":"44308205","md5Hash":"5B41E2DBbY52pYPUGVh95g==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF?generation=1475599291425000&alt=media","crc32c":"a0ODQw==","etag":"COjB4tzLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF/1475599327222000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF","bucket":"gcp-public-data-landsat","generation":"1475599327222000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.159Z","updated":"2016-10-04T16:42:07.159Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.159Z","size":"3354719","md5Hash":"zqigvl5Envmi/GLc8yH51A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF?generation=1475599327222000&alt=media","crc32c":"WOBgKA==","etag":"CPCx6+3Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt/1475599327662000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt","bucket":"gcp-public-data-landsat","generation":"1475599327662000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.618Z","updated":"2016-10-04T16:42:07.618Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.618Z","size":"7903","md5Hash":"el/UdDvWR0hfiElvrbBcUQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt?generation=1475599327662000&alt=media","crc32c":"PWBt8g==","etag":"CLCfhu7Lwc8CEAE="}]}" - } - }, - { - "ID": "3f3f45cd2b718e17", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/noauth", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "247" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqsZlmkTyf2_fqITeaIiM8s2MLUvz5qFiP_rzA4Mf6Q9LMxsiQeP-GBRwHON_XvnG2qef3XL1EzgTLA_GxtLKZRjdzOBndJPf9XbJa9KjJCIPlducQ" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+QWNjZXNzRGVuaWVkPC9Db2RlPjxNZXNzYWdlPkFjY2VzcyBkZW5pZWQuPC9NZXNzYWdlPjxEZXRhaWxzPkFub255bW91cyBjYWxsZXIgZG9lcyBub3QgaGF2ZSBzdG9yYWdlLm9iamVjdHMuZ2V0IGFjY2VzcyB0byBnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm9hdXRoLjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "110cdd2daefc6162", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoibm9hdXRoIn0K", - "Yg==" - ] - }, - "Response": { - "StatusCode": 401, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "30405" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "Www-Authenticate": [ - "Bearer realm=\"https://accounts.google.com/\"" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrGgfg36ZmqdKbc0NqBivwXULVnF2NuBAD31rJqzC5Rj9xgHFbRnwxJCbdxCuUdlAhGW_-H88mBadNGmqch0fmAyAd80HPANBkUmIgkolsL4HK7XAU" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth."}}" - } - }, - { - "ID": "848c7013eb1665b1", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqbrgn51bDFbwZ1c2_BxCHhog7If9w6ooKAtb2YCerQcObpiFJZqT3-Jn7zTXEEPVuysmxKw4PmvEOmkbCAJVkmbEVZm6z877JKFhrXTrkWqWYooq8" - ] - }, - "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "a5f818071a0f22da", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrozB-0-kEtkjITLFZA2uQpw77J_Zc6GErrYrIyxkPTWeUHJNBRLw4JXhyIEFG8szc7bhqblQVKoKYiZ4myOcfL5zIM9KYGIbCyAP9e4sEEdx2pBe0" - ] - }, - "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "55eeb942e5603431", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Range": [ - "bytes=1-" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7902" - ], - "Content-Range": [ - "bytes 1-7902/7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqlAytrsQn43Os8JxSOx4C8v9ApqqtqwEE7kZ-voKAcmcYe32lG7ANHxzNrwkqN8bbLLohoAHd88brZDVaC3U6Q01dhBBoeDFnlkCzHKUJjA8ZWrgM" - ] - }, - "Body": "ROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "320dc27adc377057", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Range": [ - "bytes=0-17" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "18" - ], - "Content-Range": [ - "bytes 0-17/7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UovperPufphUvaf55r54Wd-USAbL2ZQVTseICyulStqI633iJcFBLryyqecsHQcoU2cXp4MsKgB8uQu979IXcnv-aGNm6viDydIrqmPqA7SmPElPGI" - ] - }, - "Body": "R1JPVVAgPSBMMV9NRVRBREFU" - } - }, - { - "ID": "a20a4e3b35dfd271", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Encoding": [ - "gzip" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq7491k1eCc7fe_JApsP1zDcSyo759KmHvh-9YHm9ekpOGaG8v1bZNPjaMEkikJSDYt_LkVMHrb9HTDx9vvDGy3Zm1kPrlxS4933Sw-Wdh35lDomi4" - ] - }, - "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" - } - }, - { - "ID": "d1941f2e08f3bc52", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Encoding": [ - "gzip" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoYIJ-m0VbRdjw7ZGI_0TiIfj6fcuhJkomWPdQGApFxH2_LnekHIdv7igEpJAM3a-zrTOzR20bzvc-JBunTe_-f_Hsyxz_VPxJNrQY7PSrQ3OMfEhQ" - ] - }, - "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" - } - }, - { - "ID": "2451a87df39e1241", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Range": [ - "bytes=1-8" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "W/\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "Warning": [ - "214 UploadServer gunzipped" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Response-Body-Transformations": [ - "gunzipped" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoNfvi75DBdUL5KpenQbqbsYr5A8YSQp2lGAPIhe4FlijJP95WctTUrdrLIyq3riprP50HzntCuw7zh5ycZfqbJBkFbibeIwEp5bVDj7yVpJbqctiI" - ] - }, - "Body": "aGVsbG8gd29ybGQ=" - } - }, - { - "ID": "f0e47a86731e8924", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Range": [ - "bytes=1-8" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Encoding": [ - "gzip" - ], - "Content-Range": [ - "bytes 1-8/31" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upzmwe6UNntf6SxdYLZTz0aZiFJ7UANU6y7I2YKJbADGSVoCRe63OoxcC4uh-9n2JEnkvQgq0dwCHCbQ3qmYG4cWFEovG_fIMKFx6O11iPQUhLmeFw" - ] - }, - "Body": "iwgAAAAAAAA=" - } - }, - { - "ID": "2a14c414736e344d", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "168" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "593" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:28 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrgiftU7BDllYqI1jhKj6RbZME3LoakRb653ThaSSD_kzX1O5odabBcDFfG-oswR6YOcDZTW174qxbUaa2G1EvajUt_wJAD3ViGgoMwT_YjoTvObkc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC4wMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "b03cb69547c1a6de", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "99" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiR0VUIl0sIm9yaWdpbiI6WyIqIl0sInJlc3BvbnNlSGVhZGVyIjpbInNvbWUtaGVhZGVyIl19XX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2528" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:28 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urw36CL-_RvF58GoVX1bJlTZ1YUUdWXJKQDIac7eTOYj5s65KYFsk2ndonwA5ro_9nGPfo4qRIYyt7J1Xxb01TiJUQPRXzH-z9T_S5UQE6fxOssN0g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "735da673f622341d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2528" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:29 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urb-FeCpjdYOEj6HdkFXRMfVXffmMB8xjLqLwPvjgs3FghXs7r94UrXEpX0rW_CgEpjG6v6iHCWCkwxib31kFkLPMZMMZxHTy66dPT2AdBDsIE8Y8k" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "97ce23a7211781ed", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "168" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "593" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:30 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrqrjU-mu7dGgY6-CuNd9AwgGuKtQ_O589el9eyWSAb54cDYnQN74dl8HxRVzIUayStgIinEuuux2afAlvkUGm9lhrefX8nugBXNL2lGluNcfQKfRo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOS43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "a8f5b1c42d7a3c5f", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "12" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbXX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:30 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZ4q-BeojZ4WgF5ZHNzJG7sIvdOxmprH5GqoQ3DM-QllSzHWOwAfhVdJZH7WmVK5Dfs1TQH-pKgneNLb_9uYM-rDFb_5Hh9vs89o4pOPT9ob9ezTE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "cab9472f01ed4bd5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo0__TFFuzUsEi6Eck0G1lblGy_1abmyPHERWBavjN2GHncyWWnKQ6yYaFDpDVQ-kfBS15M6H9NoxXUDS0hZ9VX8S9hK4rUGAAFWXznHjOntSK5ldY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "7a196e6b35872d48", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "168" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "593" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpJhdZl7X4YXxzKdcr7SJ3C6Xadtlo0ixoS8DNO1jaDHs5MeGKu9nx2pHWPpnUWd13HZ8inxG0pMh3un84TucDfyLu4Z-5Gp7Ka3HBfPnt_w_VTiuE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "24b6a3b712ccab17", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2539" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqe_Q9NPVf3WTO_It8-A42wh0FD3KyF4kopi6posCEX6l28cnowG8O7WSYwETklwvpp0Uk1fCXiVmgwqLlWCSc3ez-nYllKp5m_UVtxup1E1UNadGg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "ebed90fad884e1cd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2539" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrjLjhrtR5T2TdsRM6X3HPy_6vnZjhUfpjytCivwknDLKhiMkND-f1qeZ4te6AnGrv_qtnGI4OGajQJaydHJvQZGWpnBQN9VOXHqYjSBTCxnNbcbC4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "31ee7f558375c66a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up70C3kZ6tTqgyRHbTyjXA5pLWMohouKPHCPhexU8UuWw0cAmLxjHcwaa2xBT-Soq-CLyVxoWeEkxFMhRqqK87xaM38Q2hSjGkeXEhmhoZ3iy5s6D8" - ] - }, - "Body": "" - } - }, - { - "ID": "72458186a1839a5a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsiYUGF50kaeS-bMsAR51mlDVCwH9HwmqU1hCFSwzrMDgCmGF46ZuQGfqWx4SJprlBWIxaNwT_EQMKQrTQARvFyee5ZOJq21xZUsVEc1iBW0KkiVk" - ] - }, - "Body": "" - } - }, - { - "ID": "20fcdf5cb9502a64", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:33 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up8ALydoTkq1rLUaKiwrh6YEhczTwI3VX8iKuCNoqr6N2BSBvtxD2Qc9De99Svk_4EX82rynlWctsS2F9Ffr4WuDby30_W1Fl7TMWFZGWfo5Wc_nfI" - ] - }, - "Body": "" - } - }, - { - "ID": "ace99589753f3389", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:33 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotVHCMZG4hcbTx20gUPBV1LsUxIAVuDym9d21c2In0VVLzBxOM_DAULuLEd9LTMmpNg6A7yW4yqEsnRlHhAaMbpe7MwAWbadF2gJJ2M7uGdA-rcLQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "466aeae01d2d2933", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVuGt1j5ranQuT0nS5bzOIcVM5M42RwIHcF2mKK_H1ctJEqC_djWDMm00OBFXY4yxsTFdQ1ZYjWVnFq8GhUAWUsvsj4_B9Ur8MzJQcbAzG9vHgIjs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "34d8505e483be050", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "31" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZWZhdWx0RXZlbnRCYXNlZEhvbGQiOnRydWV9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2460" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo5A5V7O5JcpElASqEqUZ_UiEjPa-39PCu3SwJk8EE9cHfyeVL5DWQkAss8k9iRx7YjD4ZC7WnzE_ENz95PuKdccjgzeC2GLFAUzA-sR2iEf-lNil4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "728568d994e275c7", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2460" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrOVH8W8OREkTrF0UKlK3dOp_AAbRF7Rsx16Zbjdl5caBObFjXETVORm7o-d-YYcPIjcrx1mKcQwxHCIXpdE5P6baH6WeY3Wc3JVF7UcQnKna-a_U4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "b7230d8a6b23fd8b", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "35" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2493" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:35 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uouwpmid1Y6D0uXWBb_WrmlZVHKYnw0RGbreddchrmPzQUE1H_DeclZnbl6Yb2346L4YNt44ZeZWEM2u46h7Zx45lw9rUFxZDQIvlXRn4vT6fW4WQQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" - } - }, - { - "ID": "ec9da9e78b750503", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2493" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:35 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:35 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq25oUSnJMHsIEARHohSGtkz5vw7RCXuQupRBliEjCXMeivgUIK0y9C3U4yWEJ-_182SvGUQLvo5rXxymcoyahwAEhfe21FJU_M2IijdiWsSv83eow" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" - } - }, - { - "ID": "ad49d37f07d631a5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq02fbr_iko_zBCbJljbMwyW7mOaz1cqR6AWHm2ac3m9BcqrUXNkmRqxk0R2rZW6SAUC_KzbOze-1wUGY2E0g6wbYLUqcb8IOdqN2-ROvYO49uxzjE" - ] - }, - "Body": "" - } - }, - { - "ID": "7b04c14c4e323488", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:36 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrJrrmhbKyKRTvA3_mNqzRsaaMOAAlvn8UMwBiOR77-6X6m5InqoXMb4qoollPzgpcFOmchQ54jFNic0xJf-tiGu0tlrzSdbjh5Hzw8Rl3RnJy_jPw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM2LjUwMVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNi41MDFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "ae3e9c648aaff158", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJuYW1lIjoic29tZS1vYmoifQo=", - "X7Xb+/Xtxt2fLTn7y+yBvw==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:37 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrJv7oOcHCzf9atnIPo70j3xSldrZtXTb5CmV1iuM4mMPU4hiBgpDqCAdUDI3i9hlR2qRie9qT30AJNshzO5CSsuzoTQ88W255VO8VVumaCSZ0KXZs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "e7a12fdbeb636933", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:37 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo-mQ6Bnw1qbAyHbMdumiv5YlTBrFQbHo-9tXn02VufGPTp3_Q8nXXp9VIcdRmzH2CJbhaVLu5fUFYZ6VKgcitBwcE-dCwxWrzaljgyT6zmM7x01ls" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "142eb6c42bc6f90e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "84" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3215" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:37 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrBTG1gAXfMDmjm_clsDlv04E6VRtZUsUKnaVr4bC3AIRXILH2f0UebFf1_SmXPSf6Go8sw7TXti9yO112PAG5QNin3LwaSjRuFDSH_unnMZJwRceM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" - } - }, - { - "ID": "12487e4f20538041", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3215" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:38 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqxyG_OCKjwh7TR9zL9nbOGPP1ttxni9cP68b18nTZ7wZ8WqVE0MAGh1XFphNYZEYr-2kdgNgmqQrOvFiNbg6hJi-RfjCWYg0KHPLKfzyWc-gejszc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" - } - }, - { - "ID": "e3a625bb7418cea9", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "82" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:38 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVf0XeyD5sC9KvHEILX2e7x7VyJ9d9xoyrP3NqGFlsMgvK3zgeOEOXDvSpftUSy4opndA74-LNRkmAi8Q_6wc91iC8OWV7X3VR8kENWwclNK30V_U" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "2290ac6bb34b705b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:38 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoKagfJuJDLVLgIcrGtlV_UXlXLSqfEFj2-knllMBWWLktTv22ZfKoeSo8lY6gSfE1LKlvn87WiyJv75QWpcWdXzvablAMBOAZ_XMe-1D8dKZ5ipHs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "572d50dc9b5ce8a4", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6ZmFsc2V9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3194" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:39 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrWVb2L8OGfosYWTjk_F-zmyNwltYI93f2tqyP4EkOx7hIRpsSG2iGDFXB7SP7DKn-teczhb9tiBKRM7rYG5vcJ5jVinideFoX44khld5BhtRaQsUc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC45MTlaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSIsImV2ZW50QmFzZWRIb2xkIjpmYWxzZX0=" - } - }, - { - "ID": "bda340f0117c1fb4", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:39 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotG32AgmOwDD_bLLd_W64JCOZQ2-UgeY_cOZwomlkWb6Vx5JSE54tdsuCPvawuyJ8eNuCf8EM0qXlIjtAyg6QQiuN9t72vHrQLsrSs6WtJBDGZziU" - ] - }, - "Body": "" - } - }, - { - "ID": "9a0b7379af024a3a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:39 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur7S22mcdvPtDq_Z9iUbo4v0ApOQXNFgXFNZuacd8yZUq2FhZKco01Z2T4vJSv8s1yZkGXzoUrAuUKnCBx0vZuE-MDGnmTF_YxhQoXobyvAyktswdE" - ] - }, - "Body": "" - } - }, - { - "ID": "affb380cf86f173c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:40 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrF4q2mbuFoVeV9q22libc-FjfB1eDXSMYhNGZNU6sbaMWEKdc-8eXu1gtMLC7Ia9bSRU0oxgpIRR9516KZRcg7o_VGdrU1lliVPuSY9rYa4OsJymE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjA4MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC4wODFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "88123486ecd710ed", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJuYW1lIjoic29tZS1vYmoifQo=", - "cGCusp668ZMwY59j94Srfg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrixhGvSHn6UzWmshHjh8Ny2NE4LW-gcPPW4dCm9dZTDfBwtENYMJra88jZlphQFcyabnQwTegZW9_6bL8KYNOsyYQYgdtEDvFFMQeJ9B_IXQbrsgI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "042e51e8b073dc93", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqTH44ERv_v2A5OmhnLVBFOkQ_bPdeDI-MNir5xjrZt1ybrczlk9GwseHZ-kt566XrMnuYGTaCkr4ImF__dpbt98CqMpmYkiJaR1vKcI1DC7o3Dbl8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "ed898d656784527a", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "83" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urh-jh9w3q8AC6TmG1NF_6JgV-Js74mV6EkB4ccrlZ1ZCQ9TlA4AhJ-J6rkqtJ9fKiy4YjT_l-TN6dQhnCwZvTUqoaJC-W-z6QQZywuQ1uX9a90PhU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "9ddea19a0c44c449", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoZVnUhVRVPu1unMjxNHQzZrlqTsVtPMZHc_YPTkvy18AAD3d_TOg9qTNAEv5NXUC5n0XF1sPedifaWR19a5w7dCMltRYfuJUfGxaMzyrvpqKbGjx4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "9b6c4955472bdb51", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "82" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3192" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:42 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoybPBMseeVzkBPlzTObhT-eKnJ9TbsC15btCmXKMDvqgD5Uz4JclrgL9lBqMq09UjP8GL2_3zCDkfA0gJ8SHCDZbHEi1XYcuAUT7hmXazBL3IIMqc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" - } - }, - { - "ID": "0227238a512229dc", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3192" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:42 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up6PS1r6NaUKQ9158DD3cKWHqyParXLdcW8k7sToMskeTU6BjpajWENp3qNHqLMFvX9NrK78TIw31h_ANYkltj3vAQoYq2RG5C6ZB2LCvUIch96SMo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" - } - }, - { - "ID": "7a81946424736215", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "84" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjpmYWxzZX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:42 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo1klBHpEl6qb-hfnVYr2KJLmo_3_1fgqlMYMBpBt23b-U9pF7bPcNEiJaMtrGEhZPQb9-QnqHiU8qPfmpNpSdy9KR41FwZm-89gyu3EtE5gq3J9-0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi43MjFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSIsInRlbXBvcmFyeUhvbGQiOmZhbHNlfQ==" - } - }, - { - "ID": "7865e37b40b9ec66", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:43 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2nfd81k4cBaY8ZFRGIu8G6DHyGUVAdIJCB65E8ZqqO9ADJIV4K22sQaOA8fqGL3jyi3tIszjBVsXHOFrCgca1vLXSpA1-3s2cn-MVhTKpzZ5tzY4" - ] - }, - "Body": "" - } - }, - { - "ID": "fd9104ab049c0173", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:43 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqMqIYvjYuUxhnSLyoY-p5BYUiuZIX7cs418asOvABObfrQc8eXZpo-V4LSZVWWgt4CNoviMvFwcdC9sCTKCZU1H-9Ifcuf8lptTMZpiWMJjZfD-DY" - ] - }, - "Body": "" - } - }, - { - "ID": "e735515a80a67931", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "105" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjM2MDAifX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "573" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:44 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqxx-CzGd9qn-QCqeE_iYOkeO93A0IpWx1voocTxhTrdkkBJonYBIWXeaEq3g-LNThw7j85IKVotWyBNZlHzqYSgBchb0DeXXetMqS8WBkY603Sa60" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0My44MDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "94bfaea89e1f47c2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJuYW1lIjoic29tZS1vYmoifQo=", - "29UJUEl2/QvM9FnDBFnPRA==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3245" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:44 GMT" - ], - "Etag": [ - "CPXIv+vx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLSIzBtq8Qn_3f51_hlfoIkcpamafGwB3U8er3K_mzANwxXyscBHWZhaxb2-3M2wOT6GZER6-zLAtdeeSNtSlUMHkHX77gocrZGYj103HA-AGjVSM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" - } - }, - { - "ID": "7db6d4640ce21307", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3245" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:44 GMT" - ], - "Etag": [ - "CPXIv+vx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoUK3qzYL0xUcMKfWD6mmDZqRfnd8q7O14gBrKng7OYalNXZR3ZegpD1VyqjzW6bgQqFvLrtFjWXwPoJ4E78_nThYByR2n3Hm33ms2fvCdaRSOy1Qc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" - } - }, - { - "ID": "cec800ec28205dab", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:45 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqAm9jtf7nMSv2Bbn_pFGaLCmlq2CtqUdGeEOyYrkllqiADBk9_xsJS3BufmzfGIdARRHN7jVwbeHKev2CHMyzWoGA8UxNGNIldPCC4oZ1dMPARKHk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NS4yMjZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "c787f6d5b23123a3", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:45 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrgP5ghUeBxvXks4XlillwzLwfwyRGxt5y-FImGHSwZ-Meht6M0-keE0YTUG2Qy5hH5i1gpWPyGHXweFDyrk8a3QbDPnG5shDEoxzNOlMEsX6V8-4I" - ] - }, - "Body": "" - } - }, - { - "ID": "144726e64e401a23", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:45 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpxuxvNJ3APW11H4y-qweAzn3ChIZ0eRjSetoSHkGwVvO7nMR17h5Hbj8I8zgOGjR7HPA201lDCw47ejD6kj1wCz3ZTIrUz-r-qjdm0vGGszQYFidQ" - ] - }, - "Body": "" - } - }, - { - "ID": "eebfbd37f51a5cc6", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:46 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uox0foiLUu9KuEF-KVU8lIN_yUKAJsYEuwVqgSXvzFRGmzJtSZvFeNvMDS8W0aQ7c8YoI89qS_MxU8zBSBqcljYpIZe1NPIvkEFEG5K22RJkAuwmh8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "c7ddc04b5d29113a", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "47" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:46 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpD4zsQLTflaR7g-z0FszOHvw5oXvxqdiyjlAnMwMcBcUEjidrpIGgpvmrz1EWi2AyWkdDCSY0l8ItQxYnWwSZt24jOxXBlzq-gCNrCCE6j0fJVGKw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "e08a38dca4dda51f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:47 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:47 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrsphQdkHT0ctCFLyPV1JRkq7cyQ7w1tvYmAWv-6tYsuThaMN-vkUJoTNz2gPKGy6jV0yqdbXaAlRlD_5n6GiQa-ULOyKLi-C0q8y25lfk7L6zROE4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "fe586a68284be237", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:47 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpAjkPxULicnoY5wPnfrg-D7yzTbTzTZcUShEN8JSsqVMN758lI4DlkvFibNK9URhYVBH0xclTheAOO_CEDoVb5egvcF-WJqJ3SDisU2YPaZsohRro" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "732c3421adfb99d8", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "47" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UohfaTh8JFEjT7TTMwV7NkrcAsgkdpDyovvbjHJLNbXalzOWX3_HwhowBimaqZXhkuvBx1D38zsUZ5dEzigoyoXRJtgHfy7ClUrshBL_yMfPkZ4JE4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "aebe46d0a31a6e95", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:48 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uop7c4AxllIcwFmLfqhIIaGP3Q9uHfmQoAm9LttwjQIvzHiJvu4fg_Q3UfNP8YbDuP97jLsG78JbCK68eSQv8ZHPp8kpzfdJcvOER0cs75wQ9gC7NA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "bea48b3667650bdd", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo1p1nEuvoUAqYHYa8wuU6qF64gN2JPCxLqqFEUiKXLgo9rwPH_btm63155v5hbuS6sow8cYEvsXVJthsJ1Zn8qfb05USodp1EvTTrt_m1rpJnIxdo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "a951cae4048b1267", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZAHhwxqSSY5MYFK1fdLLirRH41rc7W2YWSnn4CQZkbdR_98fRYjddtqZVfpEizqsZ1BNa5A7ENxQz7BwQSamASc8XAC5gyZp_pwi-Yu8l0XLSCpU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "e7f0506408261db3", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotOOFJlGACBcheGxpS8qxLlGsDT0xOhT_Cg1M_6DxUI16IiOu-YHaPvwzxilOKOrTvL8kRzcVUiNuFeS6rQExTmdK70I1IOH2d2BLOrMm1L5Int1g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "b2c4589e9b9b5ad2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:50 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFdLKiYfqS-J3PXjvHGxpXtJwVnUB7Li8XrR-DTYkmyed-ZlUmUcmZFgGyIayuACtUWVYBcRbhYOKvgkfl8srE0OdJbc15OHWBttYqenraJ0MdXj8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "836baafb4e98da80", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:51 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpNuySlLo4SanUdFJK3SGoCidNgWEW4mUuFDGMLLSzY-UUA3TJtN5_FLoLq7et7d1YSUDlOoGZY4a_GdewGQtsUwGdbHLz-bopObpzPy160Wm3UMpg" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "281ffb73e66efd1f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo8PK9JysFiApLjDSbbo9GloKqKjLO8vr85RWGkeC_Q0iEQf7zu2mwQVeiuJ81pfdJCUja0HrhNG13sczI2XoflqlsWNFvdxHGjCBrvDObr-s6Y32U" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "515ecb8616d48a7f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:55 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqF93J_Bpwg-_yefVpKWteSyWBSx9FqNm8MP5eZKqVfvDWpSF6EPZsISss7hNOkKMM5OuV8stlJIW2UAkzsSgkYOHx5fCwOwylbtqAyb8OlYJCHz6Q" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "771e55d617425f26", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:55 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up6Slob_akPo2UWNmzCe-LD4cb-9DVTq_FPlNxA7mKVF6fBFks6gFnZ83BSajEU2qD8g14Y-pX20kbfoKf8XGR9pIW2xXH_oHCxsjspHZ2ARGePTMA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "9526aab2be8b214c", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UopNhdzHbHv4sXyk0r1owz7bkkPQwYIGbb3gRingbq7vgESKltWLIr9PDDIUNREfumEvZDfpxMineWYNzZ0qdOaI1QNYqXjnuxO6sCUDjBYEhr76uk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "0aabff8aa1cc1e9c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpTj6zYv1NMHDI_ktA1d-p7HKq4YnJMxxI9nXubPfCMyIspOgya8HYibmvOMFwLtCPagnzRHe0HbmTCSqJpYMay0xYZstGSE_IUWRqI9lUhzq0Iqe4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "05147b5b5fdd7882", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2517" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoshhz3RuOTlNgZ3gOLis_dSFBsqr7khyJCdWUD8BGk0_j-AHxA5etttVHuHTrUcDksF4LdtfJLhlVrAJe9PrGiYZ-Wy57j6tb7L6vc0qHoTvolIRg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "7de8d497e7e1a76c", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2517" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up3mjbvLxwRDDhjg8GTGBNcJM7EraDydPsmD77oQcyYYGVlQnUqg0NQbezf4KQOoOHoEnC9jMR052e0olW3qi9KdAi4-qJEiKGI1rGivb5hnJnmU5o" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "3876040740117531", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGdmh-XzrU5d5VwRugwWr452hENxpWf1sEtNCDaGksLJ1taCgnEjL_0oAlELqirqyXLlw-frE8LYo1V37Pk1G3qvWGk9E1iSZV7uECOjsdE3JagK8" - ] - }, - "Body": "" - } - }, - { - "ID": "39fb456e528c7081", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqKcAQsTdqHfiXj71c5kivJTwCNhY6xUQpZtaxywsmr1kNG5_MdJBsOvuLVbDSHKRIsG9tDxk_9Gz-7zaqP_pqke1aqKOFg9NeCU4ZqfjD0SBy0Q5Q" - ] - }, - "Body": "" - } - }, - { - "ID": "3fdacef865a6fc39", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:58 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur0PIkQTUvOl8-6tC08mwiS3nBPydo1OYUeD6Q46ZrU6VkmpRwxc9MikGqJlsejuGZkyY_aaxUVKYGREjOF363tfkbII3of6pGFuQ8rINOf8uFV5ts" - ] - }, - "Body": "" - } - }, - { - "ID": "2da48fa864dd9efe", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:58 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoWkbW_2n8KvRkdk7Lh_5eOVBjW6JvoW0i9LNQUHM-Df2H1QIbUYIwFnUGLt-bPfFa46Ur-v8Q76mU24KpjW7WIC_HVq4Q5H96pRYk7I6ggUsckTs0" - ] - }, - "Body": "" - } - }, - { - "ID": "3ef26f9548e0aec4", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:59 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqZfOfnhYy6nk2TxHz3h1eMu_0KlczCaOb-xcAIBRoRgxk7Q67BqsT5fvb-vAky2phYzQWNHLTk-0T8V2tpPY4Hg23Ub9evuo6_YEndEVdiBRhTcFI" - ] - }, - "Body": "" - } - }, - { - "ID": "8f3abbeca307cc5a", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "574" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:59 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqdSQK1TuFGAgs143LV6RLNjzfrsAl5vt-Jb_RQ4dcVGDzK8I29sKKrPmcY0rLIJuVIQar3mdRGhNLhqXp2xCKli28TFq4rtsIgiA4DxCtdVbajIhY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "bb92a1c4ecd9e884", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", - "aGVsbG8gd29ybGQ=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3315" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Etag": [ - "COup9/Lx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrRoCa6t1nTWVUrdfIXfyxZBI6PKBkzO24svoJRDEYMEsduIrzBpdDiX8mIUWSxcL8-pSRZgcDvy9_iHKpoVbiUNt4QhlG7zosGpa3WOk--YlBxjeM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk1OTk1MjYxOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS45NTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NTkuOTUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5Ljk1MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoiWHJZN3UrQWU3dENUeXlLN2oxck53dz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk1OTk1MjYxOSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvc29tZS1vYmplY3QvMTU1NjgzNTk1OTk1MjYxOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InlaUmxxZz09IiwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0iLCJyZXRlbnRpb25FeHBpcmF0aW9uVGltZSI6IjIwMTktMDUtMDNUMjM6MjU6NTkuOTUyWiJ9" - } - }, - { - "ID": "8f2ac73db961f1d6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13884" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uok63dRFSkGv0Fum7yPhHf_AtYTWceX5_zkck648-jhWkyym1ezg2Og-LqKJ6nnZCW_gPZuU01OkZErKAQbL7mvQroCq8nEtNTEAosMxk9fJRKpYlc" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, unnamedArguments=[]}, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, reason=forbidden, rpcCode=403} Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00"}}" - } - }, - { - "ID": "5a7a0988789dfb17", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrZIBPbBTYIUYYaBFrcXzNnp7uuC8DoPry45bX57PyfL7aM5a83NLxYnvWsXD62w3LrRVksNhJbrLnSL5e8ZS6b0Cto8ufh-V00gNrGI-CaTc4achc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMC43NTZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "8410bda61aa0b6fd", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:01 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2MOVFr2ZwvkwYm32qtqndRBhJnBc28nVBEBowKjIIxpOxiYcSef4wtz9sLTJ23PbXr1F8L_TfGwyShv5j1eTsQ-91OXU8Q8JEZtui9VVyK96j2D4" - ] - }, - "Body": "" - } - }, - { - "ID": "7b024f33e16d850f", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:01 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpDkcsqcqPLSySgf13o9jWNafbKdVjSRqGNu3ZedvF7TlL9TBswdMMpcl0zQOHtvXWIrhrcZODr7RjRhHuvS8gJdMbnYeQDHrBCbiH6x9WrnaZ5uIQ" - ] - }, - "Body": "" - } - }, - { - "ID": "33b0efc1b91cfb42", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "574" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:02 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpItvsNld0b1FGsjQgaVCDxW22UEra8vRGfkWtbDS7R2kMbHRPr0I6iPjzaQBOaa9-xUJxGO-JV6RhMlWwH2NY-FXY2-0lrkZiAzIL-tdFwkCwlF-Q" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "632b9a0387d9cff1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2520" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:02 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpEVtrO8ed8TEHdpzyw857ZykloGKTT56atylmGnu682Y0VxI_T6SWN0JLFupJ_2Cu80ntiZttFSCBqwqFGfxVrRqsNnf0KBDyzqOH8eI-BaWPBXvE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "a32a5924f876577b", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=1\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "0" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "639" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:03 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsYL8RxAKClLw1KRdSSIORQHYKKf_HMml2vtwDOFrh0kTYDzhGEgX2RJAPQKOP2-70q7ZZ2VfkkGz12tmTiRgSuwB2dM3xxZxZucASROVeoit9Shg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjY6MDEuOTA4WiIsImlzTG9ja2VkIjp0cnVlfSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "718775f2f8320cfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2536" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:03 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UprHupyIuGJCYeUCPKigfkARZRkCc2Jz051KFrXK7YS0FbdKxNe3r20jon1ejkooHsAtcXQOi1tDV41Ob9dJ3Zoyj7stDImJ0eQriMO0WLLQ__-V54" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJpc0xvY2tlZCI6dHJ1ZX0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "2f4b1104fa94954b", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "47" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13774" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrafLpm39Fg6hBYYi9lSEizIWUdMduEDQykOeT9Bch6uf9BNQUO9sOAnWpTklSpNac1yW_kSNj-JR8LMmVLirC2kqG2DcQdE-Hg5dtVBkVZ4Xhjlc8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., unnamedArguments=[]}, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., reason=forbidden, rpcCode=403} Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'."}}" - } - }, - { - "ID": "a8964565736a4805", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrNbiNCNtu8DhS-gNXfoe4R9DFKEVzknZ0g6YoI0JdvwH6P199oVJ2oaJwh9WXMte6frhDr_vJo1_PlxLhezMnUTJzMYZgPlPWdU1QN9imk3hXgawE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "49648eb4e4e2ddda", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UplUHmGCg4wOKtpKVz2KH0cn-th8s-KXQR62bOkAF1pV1zIZJvsoI_tURcFttkyHks8g05Hln5fT4Ej_O-Lx6JCTovYYwhhHIK7AAcialHMj5fPHk4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "42cf12714cf13289", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrcjWbo0pc_i_GQTU2WE6YR0XhWYMGRPJutu1MQXo6XbRKJAYhzNK4p2-_gCqZ6BvNGncUP1UZNoTrXsFFGt1RRTRR3DS9XNiws0gt9YQdp_I8dqqM" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "4f0fdb3eb607f33b", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up0EsbUXQesAgz95sFt6OzWRDj5WAXqEpYaqHgGvCClYnBGvjX8eQmor80UQOCIDzT6v9ZAtOPVhFIqkJG6rV0-CDXiLAnr4gwDrPkqgezvm8PQTaY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "a5a8b8af13be21cf", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "574" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:15 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpF7Lqxvm1dNC6q_v81ZB1Ui9c2ZLJ3GXLX6Y6UNZN1nbkF78c0J3NwXavHF1cMFZ5oP8ufCnmCLg9vQHM6tfs2ih8YxQSfE4D3AxGsvBaGNP3VUMo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE1LjA5N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "e1dbaeed8c6d1ffb", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=0\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "0" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 412, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12155" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:16 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur4dRXLSTXYf9VXr3eOCMfuLBwDLvNQMXwIYjwEuSFQO_p-SxZQXQJ7I_VA0T5Hn8gnnEZIqLV3vLCnurXr0cyHDEMpCdxWOiIiqsXlgkbcLKf78to" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" - } - }, - { - "ID": "814dffcceacb3bd8", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026kmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoia21zIn0K", - "bXkgc2VjcmV0" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "CMGB+Prx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upis-8CwJYvr7glFAMnec_3T3YDojfTz3O_uICK5tQQmfJF2_rFtNnbqopcxBL5L3aKyoYe4zOLoqN4_MFFrhYKDm6Tmrl3rytuh0ymlCV15VZKyrQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "9b05110f2b59351b", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/kms", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "9" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "\"-CMGB+Prx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:16 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Kms-Key-Name": [ - "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:16 GMT" - ], - "X-Goog-Generation": [ - "1556835976741057" - ], - "X-Goog-Hash": [ - "crc32c=UI785A==", - "md5=AAPQS46TrnMYnqiKAbagtQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "9" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZ6aSiGPoeZKANPoMPgXhL1QD5nbILvzv8v3sOMId95Y65wOPssqesNA1MXzp86JA4Qa5gAMxxx5cnY4z3-tCUnvsKoIHn3NXOK2U6ZSRLzYNeQeU" - ] - }, - "Body": "bXkgc2VjcmV0" - } - }, - { - "ID": "011b81453e7060ac", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "CMGB+Prx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq5t8aexqBqqxnLJFK9mWmGdVrFfd3gaCGOKfGJMO1phB3ATnxt67-cLVpELRy1gNnjNOK7-zMzWCR6yV1v5PQcv3lccINA--Q92RoECpvBON-QKtc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "0dbd0a37c735be56", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur_RD6un7NLXQpzwRBFEfZdp982qhfEzyvw8f_P1Lx1wKv8sUid99lQ7Nba5Rs73IJfv3qHXjQfUDHjWclvCAcS91nMboiexn-FwiMrUuKqCQ_d__4" - ] - }, - "Body": "" - } - }, - { - "ID": "1b81fc72e201f2a2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY3NlayJ9Cg==", - "bXkgc2VjcmV0" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3262" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "CKCxsfvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqRTb3RtqG1gIL-ENbVZC8nWKRHapQP78uVzv4yyL2t7N8cA5jEBWa014iJALicWO06KthvAMPLctgZCJ2UvPqo72IO4_LheEfTr13-_h43M-roUyc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jc2VrLzE1NTY4MzU5Nzc2ODEwNTYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrIiwibmFtZSI6ImNzZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNy42ODBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTcuNjgwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE3LjY4MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlaz9nZW5lcmF0aW9uPTE1NTY4MzU5Nzc2ODEwNTYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NzZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NzZWsvMTU1NjgzNTk3NzY4MTA1Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJJbzRsbk9QVStFVGhPMFgwbnE3bU5FWEIxcld4WnNCSTRMMzdwQm15ZkRjPSJ9fQ==" - } - }, - { - "ID": "6da27052d0dabd22", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026destinationKmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3372" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpKmOV156VWN4edUkrNip1urAj8WTpDcgGTS520rVGilFT71KrZLZQxUMKloxtxKx5R6V9pUvIqYym68P8lQ8RSc5rEE_D1KlH-NyPgybiPERbxYZ4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiOSIsIm9iamVjdFNpemUiOiI5IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21layIsIm5hbWUiOiJjbWVrIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwic2l6ZSI6IjkiLCJtZDVIYXNoIjoiQUFQUVM0NlRybk1ZbnFpS0FiYWd0UT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWs/Z2VuZXJhdGlvbj0xNTU2ODM1OTc4MTI2NDc3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiVUk3ODVBPT0iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSIsImttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MS9jcnlwdG9LZXlWZXJzaW9ucy8xIn19" - } - }, - { - "ID": "6ff8a4be1ed6bda3", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/cmek", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "9" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Etag": [ - "\"-CI3JzPvx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Kms-Key-Name": [ - "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:18 GMT" - ], - "X-Goog-Generation": [ - "1556835978126477" - ], - "X-Goog-Hash": [ - "crc32c=UI785A==", - "md5=AAPQS46TrnMYnqiKAbagtQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "9" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqDR7RHLf_CQraIBM5Z3vCUxSJndE0E6FBOLFBqmYvMntnrQyVIfxwuTE6BtHL4b8oQszU6rIycc72JMUjiGRM_XoGdjsk-WhsbBmL_3c3x5H1s4bs" - ] - }, - "Body": "bXkgc2VjcmV0" - } - }, - { - "ID": "51f1995364e644ed", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3271" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Etag": [ - "CI3JzPvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpM1hbkOXC-oSxvoqjP8s2MpxREJEpzVp2TTYj-6B8467MLYSA-yrWs9UoGwRGQUCJTR76e-qAmQnbzGsFGT31Kc3qjevb8SM0MynLHL9GhOMkm72w" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrIiwibmFtZSI6ImNtZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21laz9nZW5lcmF0aW9uPTE1NTY4MzU5NzgxMjY0NzcmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDSTNKelB2eC9lRUNFQUU9Iiwia21zS2V5TmFtZSI6InByb2plY3RzL2Rla2xlcmstc2FuZGJveC9sb2NhdGlvbnMvZ2xvYmFsL2tleVJpbmdzL2dvLWludGVncmF0aW9uLXRlc3QvY3J5cHRvS2V5cy9rZXkxL2NyeXB0b0tleVZlcnNpb25zLzEifQ==" - } - }, - { - "ID": "8972a840dc3d95a5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoTmNCzrFh_7cFaryet35nLbKqM6dyo7nvsyZApHIj6fLKcMDb9ZA-_TRNjGOLNFjjoD0IZ1YOz9P-ftNchBFkjWDTaFzBjbHmryCc9JInU3wd_DLw" - ] - }, - "Body": "" - } - }, - { - "ID": "1e28b278fe3e0a66", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqrX5OILXadm6r5e8fLyuLEhQncM5jrmmDaOesSqraquvQbXXKhTqNJ63RYU3qj90M3FLjwnAOz8oOBTQQDOq2sRqUkbpBbJzYj82Rp3Rd9VLpNns0" - ] - }, - "Body": "" - } - }, - { - "ID": "d1ac5d123ee4bae6", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "200" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwibG9jYXRpb24iOiJVUyIsIm5hbWUiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "609" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpbajYTiCBBelYuwcqOs7_O-SyJbcV6WGwYhC4JCMmeImLlFLb93JyaiYrSp-201iYZTIdQ2VViFR_Emxe2Z7b4jV_G-0o2xzkqnkSnz0-Qcw_q0Wc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImVuY3J5cHRpb24iOnsiZGVmYXVsdEttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MSJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "d035cdb991d0fabb", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2555" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoi5SKd4TdRXg-INW-U3pKcPA2ioBWIc2zdC5W3J4efS9iZ0EKFNqprjQ-NGEK_3wSDGezk4Al3Dd4JovGxXSnH73rIw22S_R__XRrpTQmYpTiNFGQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "672cb2bcc1ed4ee2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJuYW1lIjoia21zIn0K", - "bXkgc2VjcmV0" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Etag": [ - "CLHMt/zx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upyst1aH69iaN9J4055oX7H2yReN5TADIX11Vul8KakNuYZbJ7yXm1GBdvCDVB-IPTvbhzELJWgfurnIHxGpzKJszLO70n58xMnVbSLsHX1J3W02Lw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "c037ecafa6249395", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0019/kms", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "9" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Etag": [ - "\"-CLHMt/zx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Kms-Key-Name": [ - "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" - ], - "X-Goog-Generation": [ - "1556835979879985" - ], - "X-Goog-Hash": [ - "crc32c=UI785A==", - "md5=AAPQS46TrnMYnqiKAbagtQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "9" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrrQOFSgCkxAaKVPAQhz0ohMN6irPryraHLjWDEQgV3dNEwc5cxqcDDRChzx6_HR1Z_NPlm3vmjFCJcNK-2rAz-tmvVS8w36Q5U9Lc2wAGUgL_joX4" - ] - }, - "Body": "bXkgc2VjcmV0" - } - }, - { - "ID": "fc3eda648104f042", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Etag": [ - "CLHMt/zx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqHPSbutJRXX-6i05QnH_4UizrPft0fsGc3_ISDNmXQ4C3RPbEAnNfS1Ren0j3k2z8zxk9AV1WZQCWiKG9D5A8gZAZESFxbSZyjbBH8jkLEsEX54NQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "13283bc1ba20f019", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2b3OtHWqpsRWxurpcGgrEtM10ZDdsrIa2z0nFsO-P4G3RAGYMTVMR0MCtwLDePrKyhfnsK-d9T5AIcRpeaIe8Z40Qm7gkZP4Fdi9jXd6ceYe5Iwc" - ] - }, - "Body": "" - } - }, - { - "ID": "ca2255586aa4aeab", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "126" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2555" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:21 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur05CMGbsLFprbwWYFSeE3I5dKFdFUzO--ImyAzkLt6ueNGpMDviyX6c1S4F6t1bqXTSnyQXXQzgugu8JoecrL3-1eJOPsqnN-OBMubntJCblkyWqE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "e1c336910a43425f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2555" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:21 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:21 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urcwbg5Z9C0rFvSTWYj4FxF27ooV0028Cm-yme-BrfmvK0dWsRkfRQEMWylWwokcmM2kIvLKEiucJlvDLHvnbJPB_HjH3zF2Lm8xCwV_9gPN42QDeo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "581a002b653f595d", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "20" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJlbmNyeXB0aW9uIjpudWxsfQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:22 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upv2LawTqc_eDVGR_hv2jSCyEA77H33uMkPevVxUUDKj1u4a9IWOH2f1oMXpU52-49Jg17SUSoJUJai1Ueff3ahN58GC9FCxwJt5v5xOHI3SytFjpo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMi4xMjRaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBTT0ifQ==" - } - }, - { - "ID": "a0db97c641f3f17c", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:22 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo15eoN1OrQFM6eqxJcuNFl7Wfr6x_VvJ0ENBlhhC_Hedf5al2OplGnNslWnRQT9RVs92RVLevZ_F_ohutgnNsUbK1PVsR04UnQ_A6ea3C0ZLy8FkI" - ] - }, - "Body": "" - } - }, - { - "ID": "ca84fc3a71a55ea8", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026predefinedAcl=authenticatedRead\u0026predefinedDefaultObjectAcl=publicRead\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1468" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:23 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrnCCRKNh19fdBAL3pyCzbtLnG7z20pSwSZKl9uTerD6o6-B5mB79mqZRIbXNcUmcpTdpxTreKjvsHK3Y9EKq_5fkzBPInDj8YkjbGViIpd1ldSots" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "ed54d82686b4d390", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "1468" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:23 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:23 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpXJh1_aAMuDVjYxLZJdwqAt7OwZdmTkc0_5wf_LshhNm_So0q87o7gJ31T58vp1b0_CucSecAyyyOLUbYTDrWDDTZGMkZBpfx1iBArjRtBcud1IgI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "224aee5425ffb86e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026predefinedAcl=private\u0026predefinedDefaultObjectAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "33" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJhY2wiOltdLCJkZWZhdWx0T2JqZWN0QWNsIjpbXX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1113" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:24 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoknWPZdEFnvKZGCGO_Z26ZaKeXNdBzc6k6qqlbGdQXVEtPvLPR2oYtKCXsqDgFBjMw4wfWVXwDfmcc5u5kBAxshpuzKGgCp8UOxv-i_Rml56mzrOI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC40MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6ImFsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "b6a80796e20baf03", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o?alt=json\u0026predefinedAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJuYW1lIjoicHJpdmF0ZSJ9Cg==", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1995" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:25 GMT" - ], - "Etag": [ - "CNDq5v7x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqMfGdFW2xElqOJcP4q5XeDjVehJr4FmjcFPij-Y31f_168_Jgb6zibcWxCOJzrV1aUYYTXZW9DMX-BcvfJi6YXoBCPpN23EhrGQyY7tHSb0SVurGY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjQuODQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL3ByaXZhdGUvMTU1NjgzNTk4NDg0ODIwOC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJwcml2YXRlIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODQ4NDgyMDgiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNORHE1djd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "4812f1e0e907256e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026predefinedAcl=private\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "62" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1529" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:25 GMT" - ], - "Etag": [ - "CNDq5v7x/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqTjsMMIlet4CWUmGoB3TuGzhTHXlDwuUe1SAQO8H2vmbTo2QYYV_WozfDO2NyZpsA9LCtgwwthwxeEeEG182WQTTe3xLWnsVjqPZx-iho8JAgI66U" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuNDIzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFJPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTkRxNXY3eC9lRUNFQUk9In0=" - } - }, - { - "ID": "969763dc18755620", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private/rewriteTo/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026destinationPredefinedAcl=publicRead\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2017" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:25 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoNk9xKdyuh-I4RkIXqQxMF4A82DZ9b2HYZOdVK7ELOsnMYBqOmrqsY5hzZJsLXMZCZ6zOSzCeefQZuTgYHwyd-jAttJ4QoA1lDk0HTwZqigIfpjCc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9kc3QiLCJuYW1lIjoiZHN0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODU4NTAyNjkiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW47IGNoYXJzZXQ9dXRmLTgiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuODQ5WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI1Ljg0OVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNS44NDlaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdD9nZW5lcmF0aW9uPTE1NTY4MzU5ODU4NTAyNjkmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2RzdC8xNTU2ODM1OTg1ODUwMjY5L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vZHN0L2FjbC9hbGxVc2VycyIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKMy9vLy94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9fQ==" - } - }, - { - "ID": "8db7e2a6b620e794", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp/compose?alt=json\u0026destinationPredefinedAcl=authenticatedRead\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "130" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6InByaXZhdGUifSx7Im5hbWUiOiJkc3QifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1906" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:26 GMT" - ], - "Etag": [ - "CJGwwP/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpqjCYxNFNTu3V1H_kV5WJxkbEYE9I3H5fYjfGMPO7n6C9NJstYZGcIh6xVA5RAxMCwP1PdzXITgTI1m2vm5xP5nhzLVRHKhder2qzYFGsAooH-kG0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9jb21wLzE1NTY4MzU5ODYzMTUyODEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wIiwibmFtZSI6ImNvbXAiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NjMxNTI4MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNi4zMTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjYuMzE0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI2LjMxNFoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vY29tcD9nZW5lcmF0aW9uPTE1NTY4MzU5ODYzMTUyODEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvY29tcC8xNTU2ODM1OTg2MzE1MjgxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoiY29tcCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg2MzE1MjgxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0pHd3dQL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2NvbXAvMTU1NjgzNTk4NjMxNTI4MS9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJjb21wIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODYzMTUyODEiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "72a0ce58c513f6d5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:26 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpWOQtHF0cKNME8sTEm9vbS9KKKLLK9afxka1b6TTVGxDjCQ4qAdnA9zshsfVFVKK5hNS10aABAQog_gjYCZTXbbsF4bQNBM0i4R7bK2r7saocPtFo" - ] - }, - "Body": "" - } - }, - { - "ID": "111b5a296b726631", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:26 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqPsYXABoYOnGueMXiIvH9AntIPEf4xc6i18Nnr_XEYBxy-3LSK9klIcNyfSB7we4lgbctsiYA2e2WWBJlfqk1GnK6YA2fj-e3IiPbKwBwTSLjr1Jk" - ] - }, - "Body": "" - } - }, - { - "ID": "27e4065cd2000d43", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:27 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpMPK_b_vcRkSIj1t37L47as9idfpbrNWqzirdNBLwoaGDMPlbjatMzjLThnYRNwk9CqTNQ5H8eoKFbunpe5TMktUo_FHyI7rryoUhbCpkbytRrTeM" - ] - }, - "Body": "" - } - }, - { - "ID": "cccb109f5fba2a3a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:27 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVAiJOaePqfycNhiwbkPOptOqgpe9Tvo0jo1aW2nviozj4-QcjTtnGGyhcdrZ8QTxT-KHPo-saYS9ESC_6osYhChJOCi8b9nXtuqH0BqlvywXs9FE" - ] - }, - "Body": "" - } - }, - { - "ID": "cfab6af30981f62b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/projects/deklerk-sandbox/serviceAccount?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "116" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrDBTBzZwm7GH6Jklv6hzuaZaO3JdgliJk8zLZkVNDqwDO7tYP-c06rzrTj03llkWuV_l1k9w2tbUcGG2Rtv-1MdozY6pktTutDMLfyhyHe1DaEIys" - ] - }, - "Body": "eyJlbWFpbF9hZGRyZXNzIjoic2VydmljZS00OTYxNjk2MDE3MTRAZ3MtcHJvamVjdC1hY2NvdW50cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImtpbmQiOiJzdG9yYWdlI3NlcnZpY2VBY2NvdW50In0=" - } - }, - { - "ID": "677e783b9aea8c15", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", - "vcVTjdWWssp9XFk0D3Nk5w==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3262" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "CPryt4Dy/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrdWRt6oMzDM98VpA0VDmWRuNvox2ZBw6ZpjbyOFDKCFKgJ92jsLkF8v4M4UUxaTUVpmr8iElUTld7Q2g5_MZSbRatDE2zVgaY-769Ppq7Nx6f0HAg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "ff3c05fe02cd38ac", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/some-object", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "\"b4769c7d229f0720beffe7251bbabe8f\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:27:28 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:28 GMT" - ], - "X-Goog-Generation": [ - "1556835988273530" - ], - "X-Goog-Hash": [ - "crc32c=Sm1gKw==", - "md5=tHacfSKfByC+/+clG7q+jw==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqn6PsKCJsyZAhI_JtSOcVKs7SWuqYsoprQTUagoZXqu1CHWyBc1TzWfUyF9iv5VBS4iZPP5qz0Lf3b51p8O36Sud2-QS2zDg9FKhtm1uBFKLpcTOs" - ] - }, - "Body": "vcVTjdWWssp9XFk0D3Nk5w==" - } - }, - { - "ID": "d1c7cc7c0e1d2829", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3262" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "CPryt4Dy/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqluzdkA1sUAJQJZodwWmM7SP7tNoX2JJjDiqHTy3XtjEqiVDNpI0whWTFJJH4caozRYjJPjTb6_ywm82dnAiYEo-YCknhlvkWexfTq1BM9MRWwQeg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "951f74cbe6b30b67", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "CA0=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoMRpk7F0qXPvUV4uNPdV7VhfUO2Mm-qPBbx3VV92y1wRyElPF1kbRNEGJ1KqbIW5w2XozD7IzjxBx4KevqcPxZr0_E1iU1_NASedJ4lZj4EFoP9UI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" - } - }, - { - "ID": "aadc7916a3fc24ad", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11805" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqhoAeZtt8ffiWaPr6J0LEaVR3vjerqtJ7gMM9BObOkz_6MtctDuS_s8-DZRxtTZsGfMDtekIKG2v8p2_sb3MxvTgeLhgO4vqvJNIszTaMjjyQcEdY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=null, unnamedArguments=[]}, location=entity.resource_id.name, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "03b45b7eac939177", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/Caf%C3%A9", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "20" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "\"ade43306cb39336d630e101af5fb51b4\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:26:29 GMT" - ], - "Last-Modified": [ - "Fri, 24 Mar 2017 20:04:38 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1490385878535828" - ], - "X-Goog-Hash": [ - "crc32c=fN3yZg==", - "md5=reQzBss5M21jDhAa9ftRtA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "20" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpFU2KyS5OA-bAy_MovHXHg5zJZ2kznc6tgTTrSjilrNHiw0NimPHZApcRXOS_ejrSSy8nIn02xipxUWxvHM3ru5IYhJRTeBZxhlq6_XQVsATkvink" - ] - }, - "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEM=" - } - }, - { - "ID": "725883eeccd4b42c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVybyJ9Cg==", - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3128" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "CKyp9oDy/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UruPh9tBADr1LSQWFlj1C4tjlS6uwlcy_MVRYaTGD4e_qEUlq0iXJx6ns3JHbH7I9olH7aSJpDW6VQcV5EdLwjQMecgA7qKhxzLoYFW4BZdEXbUrro" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLzE1NTY4MzU5ODkyOTYzMDAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvIiwibmFtZSI6Inplcm8iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOS4yOTVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjkuMjk1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI5LjI5NVoiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVybz9nZW5lcmF0aW9uPTE1NTY4MzU5ODkyOTYzMDAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8vMTU1NjgzNTk4OTI5NjMwMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJBQUFBQUE9PSIsImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0=" - } - }, - { - "ID": "036f06773f6d6306", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11821" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqs9uKpqdPwYRv420tAk0KmBT6DS2POPsIj39ROT-r3ymrd2SB-oOJEXuaHGOfih1ckOqp-YrvHHAh7l7A-6rwlFjoBSb4qCtWzXGPjekN0pUBIok8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.bucket, message=null, unnamedArguments=[]}, location=entity.bucket, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "6b9dc540a02ed56f", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/Cafe%CC%81", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "20" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "\"df597679bac7c6150429ad80a1a05680\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:26:29 GMT" - ], - "Last-Modified": [ - "Fri, 24 Mar 2017 20:04:37 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1490385877705600" - ], - "X-Goog-Hash": [ - "crc32c=qBeWjQ==", - "md5=31l2ebrHxhUEKa2AoaBWgA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "20" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrK69XFh4eDvl0RvSevMYIwMVs-nlDelMhoacS-4faSvuEateXMEqzdMvHClWdzkyqcEk7IQ1N0JMcv5uEPt6V98chYK-p9otm8puF4Uf846EBUEmg" - ] - }, - "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEQ=" - } - }, - { - "ID": "09f71762da168c2f", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/zero", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "\"d41d8cd98f00b204e9800998ecf8427e\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:29 GMT" - ], - "X-Goog-Generation": [ - "1556835989296300" - ], - "X-Goog-Hash": [ - "crc32c=AAAAAA==", - "md5=1B2M2Y8AsgTpgAmY7PhCfg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "0" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrL3cJhuZXF-arxiYeo4MSf1to_dAoiyMisikOum5rqnrSnFa0OZ25Sl8hUU7lcIeW6P3-dwXK6qv0yHGFYMFnUB0VYpI9BRT16gMo8ikQ_L3gBmew" - ] - }, - "Body": "" - } - }, - { - "ID": "681729281517b9c0", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrKCNWRWyBsgMgWxsrSjXZMKW22GWVttfUWHw_UNSF7X7hnnz4h-cD7vFzaLO7OVWvnuMyW9qpJAA4eJaERT9hSjGfv41XVw_9ouvBOeIK9ykq-DrE" - ] - }, - "Body": "" - } - }, - { - "ID": "39261fa7e6e9aede", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:33 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpEt-jSVBZqHHBwfNkeP4NXxMdC1ISsFp2E_M6JiSw9rDj2gTnmTZhrvOn-bo_f9Lx_HMxMDgsXfO0ftWqvUjGwdmHAYNnYK5JSJozLfuosPNxv_S4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "3e70965b0aa1111b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrB-4r560VjwZdR64GS_7_mZDefc-iM5ma_H8KjYfrdNXt-IlkgvzShsy7iNLseN6coOnkJwWT5tLbP54_M7nMUjord_jmpb1i19SBRA_3RQ_64yys" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "b80f9a99661cd572", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur4ItFRDN2_vB0_Ej095pAE061aywdVAoU_00t3fazm21R5ZLxUHpQL4ifTJ58218EkOeDBSWNaOaWuuzTnTM0DhXTe4l2X3pytOdfcZdIeyLV1rWA" - ] - }, - "Body": "" - } - }, - { - "ID": "979ffb3f3450c3d5", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "543" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "926" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqM_YYHcsYolLFJJwWhO9AUYmnc3Vp3J6C5AydYOy-0RVAmBxObG8QpwZOsn28vDVllo_OZ1-4bASi_RGi4wdply2CPhzUOCVjP11XNzVYZ4_EiFb4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "4931edd6fbd7e278", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2872" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpxOq-rTQ-0CFxAKezuPpbibtEmD_yASKzhW5dOiUdwVORx6a7_e1dmDnNCdamDPwLsBOpLueQD8iDbwlracb2uxGwwZ6yz92QD-t-JSKM-jSNp2xA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "12c06b5419a1fa81", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoc24sLEBwU00YKpSYT4V6iJu3WxME2LgbyRWaYj4kX8s6Gjatzl0AWcCwPRhj_Bq0BOoUhKQzS8L0GJlhHC36eyEWEIs2rk2BcUyo0aE3U8XbuHuM" - ] - }, - "Body": "" - } - }, - { - "ID": "bf24dffbe6cb8609", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Etag": [ - "CA0=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqVkn03LSqmt8RiOPdW3JO4dpsS9vRp7VK8Qc0HwhmS3L9KDOgmyZIqLnvAwuAyvoIz1VsbqeoLwMMtz9pCIX216P4K3OpEs-x_bf_VSwMiFehgZiM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" - } - }, - { - "ID": "2e41eac5e693f3e1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "64746" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqgGJECa6hRJfdjE3ErHERxS3HPU_SALrzjf-HIDuv4lfxMvOwrkF2pfY2jUJ3b8TcIjWcnaa85Rs4LBucKffiSWfFXcfLZMF0EuiVRj9oH6Kp4tOg" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1","name":"acl1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864248170","metageneration":"2","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.247Z","updated":"2019-05-02T22:24:25.629Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.247Z","size":"16","md5Hash":"0E9tFNpZj0/WKOJ6fV9paw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?generation=1556835864248170&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COr+pcXx/eECEAI="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"FiDmVg==","etag":"COr+pcXx/eECEAI="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2","name":"acl2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864737946","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.737Z","updated":"2019-05-02T22:24:24.737Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.737Z","size":"16","md5Hash":"c9+O/rg24HTFBc+etWjefg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?generation=1556835864737946&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJrxw8Xx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AtNRtA==","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs","name":"bucketInCopyAttrs","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883663856","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.663Z","updated":"2019-05-02T22:24:43.663Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:43.663Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?generation=1556835883663856&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPCDx87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CPCDx87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object","name":"checksum-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835855962241","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:15.961Z","updated":"2019-05-02T22:24:15.961Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:15.961Z","size":"10","md5Hash":"/F4DjTilcDIIVEHn/nAQsA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?generation=1556835855962241&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CIGhrMHx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Vsu0gA==","etag":"CIGhrMHx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1","name":"composed1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835858962957","metageneration":"1","timeCreated":"2019-05-02T22:24:18.962Z","updated":"2019-05-02T22:24:18.962Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:18.962Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?generation=1556835858962957&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CI2048Lx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CI2048Lx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2","name":"composed2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835859564799","metageneration":"1","contentType":"text/json","timeCreated":"2019-05-02T22:24:19.564Z","updated":"2019-05-02T22:24:19.564Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:19.564Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?generation=1556835859564799&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CP+RiMPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content","name":"content","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835874352207","metageneration":"1","contentType":"image/jpeg","timeCreated":"2019-05-02T22:24:34.351Z","updated":"2019-05-02T22:24:34.351Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:34.351Z","size":"54","md5Hash":"N8p8/s9FwdAAnlvr/lEAjQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?generation=1556835874352207&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CM/Yjsrx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"GoUbsQ==","etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption","name":"customer-encryption","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835875058680","metageneration":"3","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:35.058Z","updated":"2019-05-02T22:24:36.225Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:35.058Z","size":"11","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?generation=1556835875058680&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPjnucrx/eECEAM="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"etag":"CPjnucrx/eECEAM=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2","name":"customer-encryption-2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835880717506","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:40.717Z","updated":"2019-05-02T22:24:40.717Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:40.717Z","size":"11","md5Hash":"xwWNFa0VdXPmlAwrlcAJcg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?generation=1556835880717506&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CMKZk83x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"r0NGrg==","etag":"CMKZk83x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3","name":"customer-encryption-3","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835879859440","metageneration":"1","timeCreated":"2019-05-02T22:24:39.859Z","updated":"2019-05-02T22:24:39.859Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:39.859Z","size":"22","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?generation=1556835879859440&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPDp3szx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"componentCount":2,"etag":"CPDp3szx/eECEAE=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test","name":"gzip-test","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835860251867","metageneration":"1","contentType":"application/x-gzip","timeCreated":"2019-05-02T22:24:20.251Z","updated":"2019-05-02T22:24:20.251Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:20.251Z","size":"27","md5Hash":"OtCw+aRRIRqKGFAEOax+qw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?generation=1556835860251867&alt=media","contentEncoding":"gzip","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNuJssPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"9DhwBA==","etag":"CNuJssPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1","name":"hashesOnUpload-1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835885051680","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:45.051Z","updated":"2019-05-02T22:24:45.051Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:45.051Z","size":"27","md5Hash":"ofZjGlcXPJiGOAfKFbJl1Q==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?generation=1556835885051680&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CKDem8/x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"cH+A+w==","etag":"CKDem8/x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"4","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:17.121Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/allUsers","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"allUsers","role":"READER","etag":"CLnS+bvx/eECEAQ="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc","name":"posc","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835882760607","metageneration":"1","timeCreated":"2019-05-02T22:24:42.760Z","updated":"2019-05-02T22:24:42.760Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:42.760Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?generation=1556835882760607&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ/zj87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CJ/zj87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2","name":"posc2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883152770","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.152Z","updated":"2019-05-02T22:24:43.152Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:43.152Z","size":"3","md5Hash":"9WGq9u8L8U1CCLtGpMyzrQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?generation=1556835883152770&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CILrp87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"17qABQ==","etag":"CILrp87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL","name":"signedURL","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835861141206","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:21.140Z","updated":"2019-05-02T22:24:21.140Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:21.140Z","size":"29","md5Hash":"Jyxvgwm9n2MsrGTMPbMeYA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?generation=1556835861141206&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNat6MPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"ZTqALw==","etag":"CNat6MPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object","name":"some-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835988273530","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:26:28.273Z","updated":"2019-05-02T22:26:28.273Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:26:28.273Z","size":"16","md5Hash":"tHacfSKfByC+/+clG7q+jw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?generation=1556835988273530&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPryt4Dy/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Sm1gKw==","etag":"CPryt4Dy/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object","name":"zero-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835856537016","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:16.536Z","updated":"2019-05-02T22:24:16.536Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:16.536Z","size":"0","md5Hash":"1B2M2Y8AsgTpgAmY7PhCfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?generation=1556835856537016&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLirz8Hx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AAAAAA==","etag":"CLirz8Hx/eECEAE="}]}" - } - }, - { - "ID": "744cbe6970c9623b", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpfI-4jHQ7K_XJuo8cMIfOvPLKerfuQA1KlRDBi4Fq11Uc9i--oKNuFL_MbH0CCxK8PeI4r4fmZILF9hdSGAMMpMmyX0w67j7t84C4tRDx2LnbzG4I" - ] - }, - "Body": "" - } - }, - { - "ID": "a2bea52380600211", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrzTcVhZWJuBZITVX_K6haiVlOGb5fEYRdiiH8ODa4XsVSPl4FCDs7zPpKMTfoZA1veIMd4ccdcj8rUqWDMcRfDU5l4cOGvNhGZV-h9S2AVJfl6GdE" - ] - }, - "Body": "" - } - }, - { - "ID": "e187777c49170a6d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UouPwITheFghtKlzs2vxoAGnEm_V7OZZnYkk_0pLhV8g5YN6TPRS2n1h0iAtzkuFXYtjovp3ifqt_351LO6-CobCar6VgRnJ-_EQ9WgOWhIC9DzYTs" - ] - }, - "Body": "" - } - }, - { - "ID": "653fa745ed683364", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoBLZLK-ePmvYxO2ByvMQ1dXb_4-5yhOR-1nYkxjkINZf5fY9ptO2H4RfRtbIrRQ-QQNZHqZGgCOGJakKLkNgVI2_ExobQLkJ-JaJ-ViDWL9GZCEgU" - ] - }, - "Body": "" - } - }, - { - "ID": "91230665f80c46f7", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upasg8dCwbL7dWntUy3O1t_wWVeDNNT3YCz1uVNL8QUNh4z75y0p9OfL57wFX4CUv3NV06Oi9f7a85m42BzgDZl-kqmv4com_INYmRYjKbwrTdFB8A" - ] - }, - "Body": "" - } - }, - { - "ID": "dc68e1f94de4fa2a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqXdUtFVSSFIrK72haTC8pKLogl9pB4lDLpARc8oDKdNt1yfbHj4VAQH0M_Doqj1mLY2LVmThFxvG_1mvtVBm-N4x_J-c5Izq46lZh_xx3wOQEYZKQ" - ] - }, - "Body": "" - } - }, - { - "ID": "db5b5c31ef8e503e", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur2nVuDhUpL1-5n7rzvboeHuOBlvh-59eoy59Y5ZIyvUy6FXYj2eCphVTBFkaIPXK7XIS6DJXkPzZABjiFi4hMA7Ewdgp860sYu44qguzhTfOymXYY" - ] - }, - "Body": "" - } - }, - { - "ID": "f44103066254972d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqul73HMDr4p6XCqdfKz99RILwd68VT50DnynneOl5m5E_a7ky2mbMA8mdM6xyAWCQtiJm8dCxvIuKYtjLavA8JUlkGIqqCLGfLbB2EvWahd55Fw_A" - ] - }, - "Body": "" - } - }, - { - "ID": "99aa9c86d4998414", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqfk5JfeEeFo80cy1Y-OxXgzSIbt_x7qDWML2yJopIrt9DsPmRq4cDVFqg6sUFStj6aMrjxP7iu_tbwZzS_3BtnwqfPrJZvHllZ7tdeFTXLmto742o" - ] - }, - "Body": "" - } - }, - { - "ID": "24bd829460fb4df6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UprF0LO3CObAApP8iwUO6IvC-Yh-5whtr0F_mMzaj0rltEl4C-AZjNuBMGuQUue-v9oBqb-nQU2ykIenSZ3UYnK4XVV7moFIS43VyP7pByYqsHPkMo" - ] - }, - "Body": "" - } - }, - { - "ID": "acdf7531eddfd72c", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqAEWwZD_lXrdbrjMg-PYbmLP1-zSzjESAs-osetXJrFTueJ6ufxVfA3xkMG5KjtEz-68YryfKl9gAGsgfseX6H5COwltvyW6pJSdxDdA-dTV_S8eU" - ] - }, - "Body": "" - } - }, - { - "ID": "8facd7ede0dc71a6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrInkaBVPO1ip6mFSpF0Nihd1wYE9HNfIz8HfFsZkiEVG1mX7eKMItsbmNwWMehWZ0qaKAf081imDi7DmKqs6mEy7vyTokMNXVUp5LrFoQ-yDfVRuc" - ] - }, - "Body": "" - } - }, - { - "ID": "6affac5f0a8dea5a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqhbUYzZ-vUqGcyfzGFaQrr89Q05cXGCB0EbWA2Dy8dmQxB3uWJ7_FUwi6Ish-VMpOrZv78bT9qAHd5Tr5b3s8bTCX6QR9HlelvxkKdyrUv2G8QQuY" - ] - }, - "Body": "" - } - }, - { - "ID": "8258a6e1b5dc73aa", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upola6CIBa9ONjl53PxMpFGQkHvYEau96KG2mgCmUDWDhxZKCw0T58QQRx8OIJW5sisH_acrPcg2o-LLdJgOxCtlVSw-t6wGisVL2-TSopEuezgmu4" - ] - }, - "Body": "" - } - }, - { - "ID": "12186a37ecd09333", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpFxLaOTSsPe7hDbzBznGCoZvAUmeNgWBwS45X2j3rpLlk8_MIFFuCa-aBpAM83d23ixudKpklnhiqCcrQQotCUZhkrMLpWLHBJdmcM0d5rCKL6opY" - ] - }, - "Body": "" - } - }, - { - "ID": "e7a92beca78e4ec6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrpITV4ogt1-_eCsp0KFz7tVeM2A4FzSevs8Q0BPZ36nEPrP9aAaB4nfWASzLZgi2hNM1l9eFkf5UbYYWDT5J-nQFCapVNcm_4Nr43yd94Ce95SVps" - ] - }, - "Body": "" - } - }, - { - "ID": "5bb0506f3ca9135d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqFBt86Wz7Oqelsb0pPpIskBYdMYLR3XuBmD2F5DwyXt84Q1AUyL0Q94YtqTJP1yzgohHCkJWSGibqUARgP8Ilv9v4lVipsdfjSdFHgIjFJ1H-vuU4" - ] - }, - "Body": "" - } - }, - { - "ID": "3ea05ac204d7a112", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up2mLLaTTO-0iqVf4m8WXPEsP-mHlaS6hz_gavjGI5yzU0jCjBNDRw1gj6Wa_cQIBzfjytwP2XYhBIJeAglwa6Uqv87WhzeqgGXt4u820FuZKVgY-w" - ] - }, - "Body": "" - } - }, - { - "ID": "bed8580dc40e850e", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UodR7nshMeihBskPB6loBZoNDRFEUxojhFCKhQ0NpiEMRHY0ZHU8ncI4Sr7FHUmIDXyK8xVg_I7Qt5ESrqnGmRzW0h1eM8OGfEM-3weHyzMqN-VMik" - ] - }, - "Body": "" - } - }, - { - "ID": "57489f1d9de0ddb8", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqxQ_NSeD-rrAbZXERIaDr3_1wDC_RZ4MyuFZnOhdDqLkn4qub7rAIamqSQKDD8-jbn0c3XmxvowTghxhq4tZaxxHTmYJtB_FPvJxRZtCt3Leugx4g" - ] - }, - "Body": "" - } - }, - { - "ID": "006b3176688c0ac8", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo4l8LfaJI78MH37sa7nOmY2A8EB4paZccrewnIFFud-pwGLeVf067ZiRaBAOAPsPYlaxxtsr6gtECA6UY0xiUBHPTLFOe4VNnNQpniNWYJEfZ1-I8" - ] - }, - "Body": "" - } - }, - { - "ID": "a853ce25e734dbfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026pageToken=\u0026prefix=go-integration-test\u0026prettyPrint=false\u0026project=deklerk-sandbox\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "47300" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:39 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:39 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrnXhEY-kWgvl-XqP1kXnVDpQphHD9znE-EyBBlFT-kHZtXEsAW5QG5PuzlaJPfQUwZqnTHP0wxu6YDbRCgDMB31UR760_lfEiX88PtVKIdTgCWm5A" - ] - }, - "Body": "{"kind":"storage#buckets","items":[{"kind":"storage#bucket","id":"go-integration-test-20190502-66597705133146-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66597705133146-0001","timeCreated":"2019-05-02T18:29:58.601Z","updated":"2019-05-02T18:29:58.601Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66611506907602-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66611506907602-0001","timeCreated":"2019-05-02T18:30:12.264Z","updated":"2019-05-02T18:30:12.264Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66633965420818-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66633965420818-0001","timeCreated":"2019-05-02T18:30:34.662Z","updated":"2019-05-02T18:30:34.662Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-78294113514519-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-78294113514519-0001","timeCreated":"2019-05-02T21:44:54.960Z","updated":"2019-05-02T21:44:54.960Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0017","timeCreated":"2019-05-02T22:05:34.375Z","updated":"2019-05-02T22:05:36.341Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:34.375Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0018","timeCreated":"2019-05-02T22:05:37.733Z","updated":"2019-05-02T22:05:37.733Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:37.733Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79911595924903-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79911595924903-0001","timeCreated":"2019-05-02T22:11:52.358Z","updated":"2019-05-02T22:11:52.358Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0001","timeCreated":"2019-05-02T22:12:09.706Z","updated":"2019-05-02T22:13:41.634Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"l1":"v2","new":"new"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0017","timeCreated":"2019-05-02T22:14:17.486Z","updated":"2019-05-02T22:14:18.905Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:17.486Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0018","timeCreated":"2019-05-02T22:14:20.105Z","updated":"2019-05-02T22:14:20.105Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:20.105Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0001","timeCreated":"2019-05-02T22:18:47.398Z","updated":"2019-05-02T22:18:47.398Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0002","timeCreated":"2019-05-02T22:18:48.114Z","updated":"2019-05-02T22:18:48.114Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0001","timeCreated":"2019-05-02T22:18:54.701Z","updated":"2019-05-02T22:18:54.701Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0002","timeCreated":"2019-05-02T22:18:55.293Z","updated":"2019-05-02T22:18:55.293Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0001","timeCreated":"2019-05-02T22:20:46.897Z","updated":"2019-05-02T22:22:07.429Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"new":"new","l1":"v2"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0017","timeCreated":"2019-05-02T22:22:50.428Z","updated":"2019-05-02T22:22:51.935Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:50.428Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0018","timeCreated":"2019-05-02T22:22:52.962Z","updated":"2019-05-02T22:22:52.962Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:52.962Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0017","timeCreated":"2019-05-02T22:26:01.908Z","updated":"2019-05-02T22:26:03.544Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:01.908Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0018","timeCreated":"2019-05-02T22:26:15.097Z","updated":"2019-05-02T22:26:15.097Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:15.097Z"},"storageClass":"STANDARD","etag":"CAE="}]}" - } - } - ] -} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go deleted file mode 100644 index 3a6a1ce0f5..0000000000 --- a/vendor/cloud.google.com/go/storage/writer.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "io" - "sync" - "time" - "unicode/utf8" -) - -// A Writer writes a Cloud Storage object. -type Writer struct { - // ObjectAttrs are optional attributes to set on the object. Any attributes - // must be initialized before the first Write call. Nil or zero-valued - // attributes are ignored. - ObjectAttrs - - // SendCRC32C specifies whether to transmit a CRC32C field. It should be set - // to true in addition to setting the Writer's CRC32C field, because zero - // is a valid CRC and normally a zero would not be transmitted. - // If a CRC32C is sent, and the data written does not match the checksum, - // the write will be rejected. - // - // Note: SendCRC32C must be set to true BEFORE the first call to - // Writer.Write() in order to send the checksum. If it is set after that - // point, the checksum will be ignored. - SendCRC32C bool - - // ChunkSize controls the maximum number of bytes of the object that the - // Writer will attempt to send to the server in a single request. Objects - // smaller than the size will be sent in a single request, while larger - // objects will be split over multiple requests. The value will be rounded up - // to the nearest multiple of 256K. The default ChunkSize is 16MiB. - // - // Each Writer will internally allocate a buffer of size ChunkSize. This is - // used to buffer input data and allow for the input to be sent again if a - // request must be retried. - // - // If you upload small objects (< 16MiB), you should set ChunkSize - // to a value slightly larger than the objects' sizes to avoid memory bloat. - // This is especially important if you are uploading many small objects - // concurrently. See - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size - // for more information about performance trade-offs related to ChunkSize. - // - // If ChunkSize is set to zero, chunking will be disabled and the object will - // be uploaded in a single request without the use of a buffer. This will - // further reduce memory used during uploads, but will also prevent the writer - // from retrying in case of a transient error from the server or resuming an - // upload that fails midway through, since the buffer is required in order to - // retry the failed request. - // - // ChunkSize must be set before the first Write call. - ChunkSize int - - // ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk - // resumable uploads. - // - // For uploads of larger files, the Writer will attempt to retry if the - // request to upload a particular chunk fails with a transient error. - // If a single chunk has been attempting to upload for longer than this - // deadline and the request fails, it will no longer be retried, and the error - // will be returned to the caller. This is only applicable for files which are - // large enough to require a multi-chunk resumable upload. The default value - // is 32s. Users may want to pick a longer deadline if they are using larger - // values for ChunkSize or if they expect to have a slow or unreliable - // internet connection. - // - // To set a deadline on the entire upload, use context timeout or - // cancellation. - ChunkRetryDeadline time.Duration - - // ProgressFunc can be used to monitor the progress of a large write. - // operation. If ProgressFunc is not nil and writing requires multiple - // calls to the underlying service (see - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), - // then ProgressFunc will be invoked after each call with the number of bytes of - // content copied so far. - // - // ProgressFunc should return quickly without blocking. - ProgressFunc func(int64) - - ctx context.Context - o *ObjectHandle - - opened bool - pw *io.PipeWriter - - donec chan struct{} // closed after err and obj are set. - obj *ObjectAttrs - - mu sync.Mutex - err error -} - -// Write appends to w. It implements the io.Writer interface. -// -// Since writes happen asynchronously, Write may return a nil -// error even though the write failed (or will fail). Always -// use the error returned from Writer.Close to determine if -// the upload was successful. -// -// Writes will be retried on transient errors from the server, unless -// Writer.ChunkSize has been set to zero. -func (w *Writer) Write(p []byte) (n int, err error) { - w.mu.Lock() - werr := w.err - w.mu.Unlock() - if werr != nil { - return 0, werr - } - if !w.opened { - if err := w.openWriter(); err != nil { - return 0, err - } - } - n, err = w.pw.Write(p) - if err != nil { - w.mu.Lock() - werr := w.err - w.mu.Unlock() - // Preserve existing functionality that when context is canceled, Write will return - // context.Canceled instead of "io: read/write on closed pipe". This hides the - // pipe implementation detail from users and makes Write seem as though it's an RPC. - if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) { - return n, werr - } - } - return n, err -} - -// Close completes the write operation and flushes any buffered data. -// If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Attrs. -func (w *Writer) Close() error { - if !w.opened { - if err := w.openWriter(); err != nil { - return err - } - } - - // Closing either the read or write causes the entire pipe to close. - if err := w.pw.Close(); err != nil { - return err - } - - <-w.donec - w.mu.Lock() - defer w.mu.Unlock() - return w.err -} - -func (w *Writer) openWriter() (err error) { - if err := w.validateWriteAttrs(); err != nil { - return err - } - if w.o.gen != defaultGen { - return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) - } - - isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) - opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) - params := &openWriterParams{ - ctx: w.ctx, - chunkSize: w.ChunkSize, - chunkRetryDeadline: w.ChunkRetryDeadline, - bucket: w.o.bucket, - attrs: &w.ObjectAttrs, - conds: w.o.conds, - encryptionKey: w.o.encryptionKey, - sendCRC32C: w.SendCRC32C, - donec: w.donec, - setError: w.error, - progress: w.progress, - setObj: func(o *ObjectAttrs) { w.obj = o }, - } - if err := w.ctx.Err(); err != nil { - return err // short-circuit - } - w.pw, err = w.o.c.tc.OpenWriter(params, opts...) - if err != nil { - return err - } - w.opened = true - go w.monitorCancel() - - return nil -} - -// monitorCancel is intended to be used as a background goroutine. It monitors the -// context, and when it observes that the context has been canceled, it manually -// closes things that do not take a context. -func (w *Writer) monitorCancel() { - select { - case <-w.ctx.Done(): - w.mu.Lock() - werr := w.ctx.Err() - w.err = werr - w.mu.Unlock() - - // Closing either the read or write causes the entire pipe to close. - w.CloseWithError(werr) - case <-w.donec: - } -} - -// CloseWithError aborts the write operation with the provided error. -// CloseWithError always returns nil. -// -// Deprecated: cancel the context passed to NewWriter instead. -func (w *Writer) CloseWithError(err error) error { - if !w.opened { - return nil - } - return w.pw.CloseWithError(err) -} - -// Attrs returns metadata about a successfully-written object. -// It's only valid to call it after Close returns nil. -func (w *Writer) Attrs() *ObjectAttrs { - return w.obj -} - -func (w *Writer) validateWriteAttrs() error { - attrs := w.ObjectAttrs - // Check the developer didn't change the object Name (this is unfortunate, but - // we don't want to store an object under the wrong name). - if attrs.Name != w.o.object { - return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) - } - if !utf8.ValidString(attrs.Name) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) - } - if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { - return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") - } - if w.ChunkSize < 0 { - return errors.New("storage: Writer.ChunkSize must be non-negative") - } - return nil -} - -// progress is a convenience wrapper that reports write progress to the Writer -// ProgressFunc if it is set and progress is non-zero. -func (w *Writer) progress(p int64) { - if w.ProgressFunc != nil && p != 0 { - w.ProgressFunc(p) - } -} - -// error acquires the Writer's lock, sets the Writer's err to the given error, -// then relinquishes the lock. -func (w *Writer) error(err error) { - w.mu.Lock() - w.err = err - w.mu.Unlock() -} diff --git a/vendor/github.com/4meepo/tagalign/.gitignore b/vendor/github.com/4meepo/tagalign/.gitignore new file mode 100644 index 0000000000..e37bb52e49 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/.gitignore @@ -0,0 +1,75 @@ +# File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig +# Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,go +# Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,macos,go + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +.vscode + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### macOS Patch ### +# iCloud generated files +*.icloud + +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,go + +# Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) diff --git a/vendor/github.com/4meepo/tagalign/.golangci.yml b/vendor/github.com/4meepo/tagalign/.golangci.yml new file mode 100644 index 0000000000..e65f604fe1 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/.golangci.yml @@ -0,0 +1,106 @@ +# See https://golangci-lint.run/usage/configuration/ + +linters-settings: + revive: + # see https://github.com/mgechev/revive#available-rules for details. + ignore-generated-header: true + severity: warning + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: get-return + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: superfluous-else + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - dogsled + - dupl + - durationcheck + - errcheck + - errorlint + - exhaustive + - exportloopref + - forcetypeassert + - gochecknoinits + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - godox + - goimports + - gomoddirectives + - gomodguard + - goprintffuncname + - gosec + - gosimple + # - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nestif + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - stylecheck + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - whitespace diff --git a/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/vendor/github.com/4meepo/tagalign/.goreleaser.yml new file mode 100644 index 0000000000..e7b6f6800e --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/.goreleaser.yml @@ -0,0 +1,32 @@ +--- +project_name: tagalign + +release: + github: + owner: 4meepo + name: tagalign + +builds: + - binary: tagalign + goos: + - darwin + - windows + - linux + - freebsd + goarch: + - amd64 + - arm64 + - arm + goarm: + - 6 + - 7 + gomips: + - hardfloat + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm64 + main: ./cmd/tagalign/ \ No newline at end of file diff --git a/vendor/github.com/4meepo/tagalign/LICENSE b/vendor/github.com/4meepo/tagalign/LICENSE new file mode 100644 index 0000000000..da3ae82706 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Yifei Liu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/4meepo/tagalign/Makefile b/vendor/github.com/4meepo/tagalign/Makefile new file mode 100644 index 0000000000..614e7773c3 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/Makefile @@ -0,0 +1,7 @@ +.PHONY: lint +lint: + golangci-lint run ./... + +.PHONY: build +build: + go build -o tagalign cmd/tagalign/tagalign.go \ No newline at end of file diff --git a/vendor/github.com/4meepo/tagalign/README.md b/vendor/github.com/4meepo/tagalign/README.md new file mode 100644 index 0000000000..9d04dccbf2 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/README.md @@ -0,0 +1,130 @@ +# Go Tag Align + +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/4meepo/tagalign?style=flat-square) +[![codecov](https://codecov.io/github/4meepo/tagalign/branch/main/graph/badge.svg?token=1R1T61UNBQ)](https://codecov.io/github/4meepo/tagalign) +[![GoDoc](https://godoc.org/github.com/4meepo/tagalign?status.svg)](https://pkg.go.dev/github.com/4meepo/tagalign) +[![Go Report Card](https://goreportcard.com/badge/github.com/4meepo/tagalign)](https://goreportcard.com/report/github.com/4meepo/tagalign) + +TagAlign is used to align and sort tags in Go struct. It can make the struct more readable and easier to maintain. + +For example, this struct + +```go +type FooBar struct { + Foo int `json:"foo" validate:"required"` + Bar string `json:"bar" validate:"required"` + FooFoo int8 `json:"foo_foo" validate:"required"` + BarBar int `json:"bar_bar" validate:"required"` + FooBar struct { + Foo int `json:"foo" yaml:"foo" validate:"required"` + Bar222 string `json:"bar222" validate:"required" yaml:"bar"` + } `json:"foo_bar" validate:"required"` + BarFoo string `json:"bar_foo" validate:"required"` + BarFooBar string `json:"bar_foo_bar" validate:"required"` +} +``` + +can be aligned to: + +```go +type FooBar struct { + Foo int `json:"foo" validate:"required"` + Bar string `json:"bar" validate:"required"` + FooFoo int8 `json:"foo_foo" validate:"required"` + BarBar int `json:"bar_bar" validate:"required"` + FooBar struct { + Foo int `json:"foo" yaml:"foo" validate:"required"` + Bar222 string `json:"bar222" validate:"required" yaml:"bar"` + } `json:"foo_bar" validate:"required"` + BarFoo string `json:"bar_foo" validate:"required"` + BarFooBar string `json:"bar_foo_bar" validate:"required"` +} +``` + +## Usage + +By default tagalign will only align tags, but not sort them. But alignment and [sort feature](https://github.com/4meepo/tagalign#sort-tag) can work together or separately. + +* As a Golangci Linter (Recommended) + + Tagalign is a built-in linter in [Golangci Lint](https://golangci-lint.run/usage/linters/#tagalign) since `v1.53`. + > Note: In order to have the best experience, add the `--fix` flag to `golangci-lint` to enable the autofix feature. + +* Standalone Mode + + Install it using `GO` or download it [here](https://github.com/4meepo/tagalign/releases). + + ```bash + go install github.com/4meepo/tagalign/cmd/tagalign@latest + ``` + + Run it in your terminal. + + ```bash + # Only align tags. + tagalign -fix {package path} + # Only sort tags with fixed order. + tagalign -fix -noalign -sort -order "json,xml" {package path} + # Align and sort together. + tagalign -fix -sort -order "json,xml" {package path} + # Align and sort together in strict style. + tagalign -fix -sort -order "json,xml" -strict {package path} + ``` + +## Advanced Features + +### Sort Tag + +In addition to alignment, it can also sort tags with fixed order. If we enable sort with fixed order `json,xml`, the following code + +```go +type SortExample struct { + Foo int `json:"foo,omitempty" yaml:"bar" xml:"baz" binding:"required" gorm:"column:foo" zip:"foo" validate:"required"` + Bar int `validate:"required" yaml:"foo" xml:"bar" binding:"required" json:"bar,omitempty" gorm:"column:bar" zip:"bar" ` + FooBar int `gorm:"column:bar" validate:"required" xml:"bar" binding:"required" json:"bar,omitempty" zip:"bar" yaml:"foo"` +} +``` + +will be sorted and aligned to: + +```go +type SortExample struct { + Foo int `json:"foo,omitempty" xml:"baz" binding:"required" gorm:"column:foo" validate:"required" yaml:"bar" zip:"foo"` + Bar int `json:"bar,omitempty" xml:"bar" binding:"required" gorm:"column:bar" validate:"required" yaml:"foo" zip:"bar"` + FooBar int `json:"bar,omitempty" xml:"bar" binding:"required" gorm:"column:bar" validate:"required" yaml:"foo" zip:"bar"` +} +``` + +The fixed order is `json,xml`, so the tags `json` and `xml` will be sorted and aligned first, and the rest tags will be sorted and aligned in the dictionary order. + +### Strict Style + +Sometimes, you may want to align your tags in strict style. In this style, the tags will be sorted and aligned in the dictionary order, and the tags with the same name will be aligned together. For example, the following code + +```go +type StrictStyleExample struct { + Foo int ` xml:"baz" yaml:"bar" zip:"foo" binding:"required" gorm:"column:foo" validate:"required"` + Bar int `validate:"required" gorm:"column:bar" yaml:"foo" xml:"bar" binding:"required" json:"bar,omitempty" ` +} +``` + +will be aligned to + +```go +type StrictStyleExample struct { + Foo int `binding:"required" gorm:"column:foo" validate:"required" xml:"baz" yaml:"bar" zip:"foo"` + Bar int `binding:"required" gorm:"column:bar" json:"bar,omitempty" validate:"required" xml:"bar" yaml:"foo"` +} +``` + +> ⚠️Note: The strict style can't run without the align or sort feature enabled. + +## References + +[Golang AST Visualizer](http://goast.yuroyoro.net/) + +[Create New Golang CI Linter](https://golangci-lint.run/contributing/new-linters/) + +[Autofix Example](https://github.com/golangci/golangci-lint/pull/2450/files) + +[Integrating](https://disaev.me/p/writing-useful-go-analysis-linter/#integrating) diff --git a/vendor/github.com/4meepo/tagalign/options.go b/vendor/github.com/4meepo/tagalign/options.go new file mode 100644 index 0000000000..ddec98da73 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/options.go @@ -0,0 +1,37 @@ +package tagalign + +type Option func(*Helper) + +// WithMode specify the mode of tagalign. +func WithMode(mode Mode) Option { + return func(h *Helper) { + h.mode = mode + } +} + +// WithSort enable tags sort. +// fixedOrder specify the order of tags, the other tags will be sorted by name. +// Sory is disabled by default. +func WithSort(fixedOrder ...string) Option { + return func(h *Helper) { + h.sort = true + h.fixedTagOrder = fixedOrder + } +} + +// WithAlign configure whether enable tags align. +// Align is enabled by default. +func WithAlign(enabled bool) Option { + return func(h *Helper) { + h.align = enabled + } +} + +// WithStrictStyle configure whether enable strict style. +// StrictStyle is disabled by default. +// Note: StrictStyle must be used with WithAlign(true) and WithSort(...) together, or it will be ignored. +func WithStrictStyle() Option { + return func(h *Helper) { + h.style = StrictStyle + } +} diff --git a/vendor/github.com/4meepo/tagalign/tagalign.go b/vendor/github.com/4meepo/tagalign/tagalign.go new file mode 100644 index 0000000000..4734b56661 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/tagalign.go @@ -0,0 +1,459 @@ +package tagalign + +import ( + "fmt" + "go/ast" + "go/token" + "log" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/fatih/structtag" + + "golang.org/x/tools/go/analysis" +) + +type Mode int + +const ( + StandaloneMode Mode = iota + GolangciLintMode +) + +type Style int + +const ( + DefaultStyle Style = iota + StrictStyle +) + +const ( + errTagValueSyntax = "bad syntax for struct tag value" +) + +func NewAnalyzer(options ...Option) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "tagalign", + Doc: "check that struct tags are well aligned", + Run: func(p *analysis.Pass) (any, error) { + Run(p, options...) + return nil, nil + }, + } +} + +func Run(pass *analysis.Pass, options ...Option) []Issue { + var issues []Issue + for _, f := range pass.Files { + h := &Helper{ + mode: StandaloneMode, + style: DefaultStyle, + align: true, + } + for _, opt := range options { + opt(h) + } + + // StrictStyle must be used with WithAlign(true) and WithSort(...) together, or it will be ignored. + if h.style == StrictStyle && (!h.align || !h.sort) { + h.style = DefaultStyle + } + + if !h.align && !h.sort { + // do nothing + return nil + } + + ast.Inspect(f, func(n ast.Node) bool { + h.find(pass, n) + return true + }) + h.Process(pass) + issues = append(issues, h.issues...) + } + return issues +} + +type Helper struct { + mode Mode + + style Style + + align bool // whether enable tags align. + sort bool // whether enable tags sort. + fixedTagOrder []string // the order of tags, the other tags will be sorted by name. + + singleFields []*ast.Field + consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct. + issues []Issue +} + +// Issue is used to integrate with golangci-lint's inline auto fix. +type Issue struct { + Pos token.Position + Message string + InlineFix InlineFix +} +type InlineFix struct { + StartCol int // zero-based + Length int + NewString string +} + +func (w *Helper) find(pass *analysis.Pass, n ast.Node) { + v, ok := n.(*ast.StructType) + if !ok { + return + } + + fields := v.Fields.List + if len(fields) == 0 { + return + } + + fs := make([]*ast.Field, 0) + split := func() { + n := len(fs) + if n > 1 { + w.consecutiveFieldsGroups = append(w.consecutiveFieldsGroups, fs) + } else if n == 1 { + w.singleFields = append(w.singleFields, fs[0]) + } + + fs = nil + } + + for i, field := range fields { + if field.Tag == nil { + // field without tags + split() + continue + } + + if i > 0 { + if fields[i-1].Tag == nil { + // if previous filed do not have a tag + fs = append(fs, field) + continue + } + preLineNum := pass.Fset.Position(fields[i-1].Tag.Pos()).Line + lineNum := pass.Fset.Position(field.Tag.Pos()).Line + if lineNum-preLineNum > 1 { + // fields with tags are not consecutive, including two case: + // 1. splited by lines + // 2. splited by a struct + split() + + // check if the field is a struct + if _, ok := field.Type.(*ast.StructType); ok { + continue + } + } + } + + fs = append(fs, field) + } + + split() +} + +func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) { + if w.mode == GolangciLintMode { + iss := Issue{ + Pos: pass.Fset.Position(field.Tag.Pos()), + Message: msg, + InlineFix: InlineFix{ + StartCol: startCol, + Length: len(field.Tag.Value), + NewString: replaceStr, + }, + } + w.issues = append(w.issues, iss) + } + + if w.mode == StandaloneMode { + pass.Report(analysis.Diagnostic{ + Pos: field.Tag.Pos(), + End: field.Tag.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: field.Tag.Pos(), + End: field.Tag.End(), + NewText: []byte(replaceStr), + }, + }, + }, + }, + }) + } +} + +func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit + // process grouped fields + for _, fields := range w.consecutiveFieldsGroups { + offsets := make([]int, len(fields)) + + var maxTagNum int + var tagsGroup, notSortedTagsGroup [][]*structtag.Tag + + var uniqueKeys []string + addKey := func(k string) { + for _, key := range uniqueKeys { + if key == k { + return + } + } + uniqueKeys = append(uniqueKeys, k) + } + + for i := 0; i < len(fields); { + field := fields[i] + column := pass.Fset.Position(field.Tag.Pos()).Column - 1 + offsets[i] = column + + tag, err := strconv.Unquote(field.Tag.Value) + if err != nil { + // if tag value is not a valid string, report it directly + w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + fields = removeField(fields, i) + continue + } + + tags, err := structtag.Parse(tag) + if err != nil { + // if tag value is not a valid struct tag, report it directly + w.report(pass, field, column, err.Error(), field.Tag.Value) + fields = removeField(fields, i) + continue + } + + maxTagNum = max(maxTagNum, tags.Len()) + + if w.sort { + cp := make([]*structtag.Tag, tags.Len()) + for i, tag := range tags.Tags() { + cp[i] = tag + } + notSortedTagsGroup = append(notSortedTagsGroup, cp) + sortBy(w.fixedTagOrder, tags) + } + for _, t := range tags.Tags() { + addKey(t.Key) + } + tagsGroup = append(tagsGroup, tags.Tags()) + + i++ + } + + if w.sort && StrictStyle == w.style { + sortAllKeys(w.fixedTagOrder, uniqueKeys) + maxTagNum = len(uniqueKeys) + } + + // record the max length of each column tag + type tagLen struct { + Key string // present only when sort enabled + Len int + } + tagMaxLens := make([]tagLen, maxTagNum) + for j := 0; j < maxTagNum; j++ { + var maxLength int + var key string + for i := 0; i < len(tagsGroup); i++ { + if w.style == StrictStyle { + key = uniqueKeys[j] + // search by key + for _, tag := range tagsGroup[i] { + if tag.Key == key { + maxLength = max(maxLength, len(tag.String())) + break + } + } + } else { + if len(tagsGroup[i]) <= j { + // in case of index out of range + continue + } + maxLength = max(maxLength, len(tagsGroup[i][j].String())) + } + } + tagMaxLens[j] = tagLen{key, maxLength} + } + + for i, field := range fields { + tags := tagsGroup[i] + + var newTagStr string + if w.align { + // if align enabled, align tags. + newTagBuilder := strings.Builder{} + for i, n := 0, 0; i < len(tags) && n < len(tagMaxLens); { + tag := tags[i] + var format string + if w.style == StrictStyle { + if tagMaxLens[n].Key == tag.Key { + // match + format = alignFormat(tagMaxLens[n].Len + 1) // with an extra space + newTagBuilder.WriteString(fmt.Sprintf(format, tag.String())) + i++ + n++ + } else { + // tag missing + format = alignFormat(tagMaxLens[n].Len + 1) + newTagBuilder.WriteString(fmt.Sprintf(format, "")) + n++ + } + } else { + format = alignFormat(tagMaxLens[n].Len + 1) // with an extra space + newTagBuilder.WriteString(fmt.Sprintf(format, tag.String())) + i++ + n++ + } + } + newTagStr = newTagBuilder.String() + } else { + // otherwise check if tags order changed + if w.sort && reflect.DeepEqual(notSortedTagsGroup[i], tags) { + // if tags order not changed, do nothing + continue + } + tagsStr := make([]string, len(tags)) + for i, tag := range tags { + tagsStr[i] = tag.String() + } + newTagStr = strings.Join(tagsStr, " ") + } + + unquoteTag := strings.TrimRight(newTagStr, " ") + // unquoteTag := newTagStr + newTagValue := fmt.Sprintf("`%s`", unquoteTag) + if field.Tag.Value == newTagValue { + // nothing changed + continue + } + + msg := "tag is not aligned, should be: " + unquoteTag + + w.report(pass, field, offsets[i], msg, newTagValue) + } + } + + // process single fields + for _, field := range w.singleFields { + column := pass.Fset.Position(field.Tag.Pos()).Column - 1 + tag, err := strconv.Unquote(field.Tag.Value) + if err != nil { + w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + continue + } + + tags, err := structtag.Parse(tag) + if err != nil { + w.report(pass, field, column, err.Error(), field.Tag.Value) + continue + } + originalTags := append([]*structtag.Tag(nil), tags.Tags()...) + if w.sort { + sortBy(w.fixedTagOrder, tags) + } + + newTagValue := fmt.Sprintf("`%s`", tags.String()) + if reflect.DeepEqual(originalTags, tags.Tags()) && field.Tag.Value == newTagValue { + // if tags order not changed, do nothing + continue + } + + msg := "tag is not aligned , should be: " + tags.String() + + w.report(pass, field, column, msg, newTagValue) + } +} + +// Issues returns all issues found by the analyzer. +// It is used to integrate with golangci-lint. +func (w *Helper) Issues() []Issue { + log.Println("tagalign 's Issues() should only be called in golangci-lint mode") + return w.issues +} + +// sortBy sorts tags by fixed order. +// If a tag is not in the fixed order, it will be sorted by name. +func sortBy(fixedOrder []string, tags *structtag.Tags) { + // sort by fixed order + sort.Slice(tags.Tags(), func(i, j int) bool { + ti := tags.Tags()[i] + tj := tags.Tags()[j] + + oi := findIndex(fixedOrder, ti.Key) + oj := findIndex(fixedOrder, tj.Key) + + if oi == -1 && oj == -1 { + return ti.Key < tj.Key + } + + if oi == -1 { + return false + } + + if oj == -1 { + return true + } + + return oi < oj + }) +} + +func sortAllKeys(fixedOrder []string, keys []string) { + sort.Slice(keys, func(i, j int) bool { + oi := findIndex(fixedOrder, keys[i]) + oj := findIndex(fixedOrder, keys[j]) + + if oi == -1 && oj == -1 { + return keys[i] < keys[j] + } + + if oi == -1 { + return false + } + + if oj == -1 { + return true + } + + return oi < oj + }) +} + +func findIndex(s []string, e string) int { + for i, a := range s { + if a == e { + return i + } + } + return -1 +} + +func alignFormat(length int) string { + return "%" + fmt.Sprintf("-%ds", length) +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func removeField(fields []*ast.Field, index int) []*ast.Field { + if index < 0 || index >= len(fields) { + return fields + } + + return append(fields[:index], fields[index+1:]...) +} diff --git a/vendor/github.com/Abirdcfly/dupword/.gitignore b/vendor/github.com/Abirdcfly/dupword/.gitignore new file mode 100644 index 0000000000..b5109d2bbf --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/.gitignore @@ -0,0 +1,183 @@ + +# Godot-specific ignores +.import/ +export.cfg +export_presets.cfg + +# Mono-specific ignores +.mono/ +data_*/ + +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +.idea/artifacts +.idea/compiler.xml +.idea/jarRepositories.xml +.idea/modules.xml +.idea/*.iml +.idea/modules +*.iml +*.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### Emacs template +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Vim template +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### macOS template +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + diff --git a/vendor/github.com/Abirdcfly/dupword/.goreleaser.yml b/vendor/github.com/Abirdcfly/dupword/.goreleaser.yml new file mode 100644 index 0000000000..c3401787a0 --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/.goreleaser.yml @@ -0,0 +1,72 @@ +--- +project_name: dupword + +release: + github: + owner: Abirdcfly + name: dupword + +builds: + - binary: dupword + goos: + - darwin + - windows + - linux + - freebsd + goarch: + - amd64 + - arm64 + - arm + - 386 + - ppc64le + - s390x + - mips64 + - mips64le + - riscv64 + goarm: + - 6 + - 7 + gomips: + - hardfloat + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm64 + main: ./cmd/dupword/ + flags: + - -trimpath + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} + +archives: + - format: tar.gz + wrap_in_directory: true + format_overrides: + - goos: windows + format: zip + name_template: '{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + files: + - LICENSE + - README.md + +snapshot: + name_template: SNAPSHOT-{{ .Commit }} + +checksum: + name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - '^build\(deps\): bump .* in /docs \(#\d+\)' + - '^build\(deps\): bump .* in /\.github/peril \(#\d+\)' + - Merge pull request + - Merge branch diff --git a/vendor/github.com/Abirdcfly/dupword/LICENSE b/vendor/github.com/Abirdcfly/dupword/LICENSE new file mode 100644 index 0000000000..afa64c6e1e --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Abirdcfly + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Abirdcfly/dupword/README.md b/vendor/github.com/Abirdcfly/dupword/README.md new file mode 100644 index 0000000000..e6c5b919fa --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/README.md @@ -0,0 +1,153 @@ +# dupword + +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/Abirdcfly/dupword?style=flat-square) +[![GoDoc](https://godoc.org/github.com/Abirdcfly/dupword?status.svg)](https://pkg.go.dev/github.com/Abirdcfly/dupword) +[![Actions Status](https://github.com/Abirdcfly/dupword/actions/workflows/lint.yml/badge.svg)](https://github.com/Abirdcfly/dupword/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/Abirdcfly/dupword)](https://goreportcard.com/report/github.com/Abirdcfly/dupword) + +A linter that checks for duplicate words in the source code (usually miswritten) + +Examples in real code and related issues can be viewed in [dupword#3](https://github.com/Abirdcfly/dupword/issues/3) + +## example + +1. Repeated words appear on two adjacent lines [commit](https://github.com/golang/go/commit/d8f90ce0f8119bf593efb6fb91825de5b61fcda7) + +```diff +--- a/src/cmd/compile/internal/ssa/schedule.go ++++ b/src/cmd/compile/internal/ssa/schedule.go +@@ -179,7 +179,7 @@ func schedule(f *Func) { + // scored CarryChainTail (and prove w is not a tail). + score[w.ID] = ScoreFlags + } +- // Verify v has not been scored. If v has not been visited, v may be the ++ // Verify v has not been scored. If v has not been visited, v may be + // the final (tail) operation in a carry chain. If v is not, v will be + // rescored above when v's carry-using op is scored. When scoring is done, + // only tail operations will retain the CarryChainTail score. +``` + +2. Repeated words appearing on the same line [commit](https://github.com/golang/go/commit/48da729e8468b630ee003ac51cbaac595d53bec8) + +```diff +--- a/src/net/http/cookiejar/jar.go ++++ b/src/net/http/cookiejar/jar.go +@@ -465,7 +465,7 @@ func (j *Jar) domainAndType(host, domain string) (string, bool, error) { + // dot in the domain-attribute before processing the cookie. + // + // Most browsers don't do that for IP addresses, only curl +- // version 7.54) and and IE (version 11) do not reject a ++ // version 7.54) and IE (version 11) do not reject a + // Set-Cookie: a=1; domain=.127.0.0.1 + // This leading dot is optional and serves only as hint for + // humans to indicate that a cookie with "domain=.bbc.co.uk" +``` + +## Install + +```bash +go install github.com/Abirdcfly/dupword/cmd/dupword@latest +``` + +**Or** install the main branch (including the last commit) with: + +```bash +go install github.com/Abirdcfly/dupword/cmd/dupword@main +``` + +## Usage + +### 1. default + +Run with default settings(include test file): + +**But note that not all repeated words are wrong** see [dupword#4](https://github.com/Abirdcfly/dupword/issues/4) for real code example. + +```bash +$ dupword ./... +/Users/xxx/go/src/dupword/dupword_test.go:88:10: Duplicate words (the) found +exit status 3 +``` + +### 2. skip test file + +Skip detection test file(`*_test.go`): + +```bash +$ dupword -test=false ./... +``` + +### 3. auto-fix + +```bash +$ dupword -fix ./... +``` + +### 4. all options + +All options: + +```bash +$ dupword --help +dupword: checks for duplicate words in the source code (usually miswritten) + +Usage: dupword [-flag] [package] + +This analyzer checks miswritten duplicate words in comments or package doc or string declaration + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -ignore value + ignore words + -json + emit JSON output + -keyword value + keywords for detecting duplicate words + -memprofile string + write memory profile to this file + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -test + indicates whether test files should be analyzed, too (default true) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + +### 5. my advice + +use `--keyword=the,and,a` and `-fix` together. I think that specifying only commonly repeated prepositions can effectively avoid false positives. + +see [dupword#4](https://github.com/Abirdcfly/dupword/issues/4) for real code example. + +```bash +$ dupword --keyword=the,and,a -fix ./... +``` + +## TODO + +- [x] add this linter to golangci-lint +- [ ] rewrite the detection logic to make it more efficient + +## Limitation + +1. Only for `*.go` file.But some miswritten occurs in `*.md` or `*.json` file.(example: kubernetes), In this case, my advice is to use [rg](https://github.com/BurntSushi/ripgrep) to do the lookup and replace manually. +2. When use `-fix`, also running `go fmt` in the dark.([This logic is determined upstream](https://github.com/golang/tools/blob/248c34b88a4148128f89e41923498bd86f805b7d/go/analysis/internal/checker/checker.go#L424-L433), the project does not have this part of the code.) + +## License + +MIT diff --git a/vendor/github.com/Abirdcfly/dupword/dupword.go b/vendor/github.com/Abirdcfly/dupword/dupword.go new file mode 100644 index 0000000000..9a78fb6cca --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/dupword.go @@ -0,0 +1,331 @@ +// MIT License +// +// Copyright (c) 2022 Abirdcfly +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package dupword defines an Analyzer that checks that duplicate words +// int the source code. +package dupword + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + Name = "dupword" + Doc = `checks for duplicate words in the source code (usually miswritten) + +This analyzer checks miswritten duplicate words in comments or package doc or string declaration` + Message = "Duplicate words (%s) found" + CommentPrefix = `//` +) + +var ( + defaultWord = []string{} + // defaultWord = []string{"the", "and", "a"} + ignoreWord = map[string]bool{} +) + +type analyzer struct { + KeyWord []string +} + +func (a *analyzer) String() string { + return strings.Join(a.KeyWord, ",") +} + +func (a *analyzer) Set(w string) error { + if len(w) != 0 { + a.KeyWord = make([]string, 0) + a.KeyWord = append(a.KeyWord, strings.Split(w, ",")...) + } + return nil +} + +type ignore struct { +} + +func (a *ignore) String() string { + t := make([]string, 0, len(ignoreWord)) + for k := range ignoreWord { + t = append(t, k) + } + return strings.Join(t, ",") +} + +func (a *ignore) Set(w string) error { + for _, k := range strings.Split(w, ",") { + ignoreWord[k] = true + } + return nil +} + +// for test only +func ClearIgnoreWord() { + ignoreWord = map[string]bool{} +} + +func NewAnalyzer() *analysis.Analyzer { + ignore := &ignore{} + analyzer := &analyzer{KeyWord: defaultWord} + a := &analysis.Analyzer{ + Name: Name, + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: analyzer.run, + RunDespiteErrors: true, + } + a.Flags.Init(Name, flag.ExitOnError) + a.Flags.Var(analyzer, "keyword", "keywords for detecting duplicate words") + a.Flags.Var(ignore, "ignore", "ignore words") + a.Flags.Var(version{}, "V", "print version and exit") + return a +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + a.fixDuplicateWordInComment(pass, file) + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.BasicLit)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + if lit, ok := n.(*ast.BasicLit); ok { + a.fixDuplicateWordInString(pass, lit) + } + }) + return nil, nil +} + +func (a *analyzer) fixDuplicateWordInComment(pass *analysis.Pass, f *ast.File) { + for _, cg := range f.Comments { + var preLine *ast.Comment + for _, c := range cg.List { + update, keyword, find := a.Check(c.Text) + if find { + pass.Report(analysis.Diagnostic{Pos: c.Slash, End: c.End(), Message: fmt.Sprintf(Message, keyword), SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Update", + TextEdits: []analysis.TextEdit{{ + Pos: c.Slash, + End: c.End(), + NewText: []byte(update), + }}, + }}}) + } + if preLine != nil { + fields := strings.Fields(preLine.Text) + if len(fields) < 1 { + continue + } + preLineContent := fields[len(fields)-1] + "\n" + thisLineContent := c.Text + if find { + thisLineContent = update + } + before, after, _ := strings.Cut(thisLineContent, CommentPrefix) + update, keyword, find := a.Check(preLineContent + after) + if find { + var suggestedFixes []analysis.SuggestedFix + if strings.Contains(update, preLineContent) { + update = before + CommentPrefix + strings.TrimPrefix(update, preLineContent) + suggestedFixes = []analysis.SuggestedFix{{ + Message: "Update", + TextEdits: []analysis.TextEdit{{ + Pos: c.Slash, + End: c.End(), + NewText: []byte(update), + }}, + }} + } + pass.Report(analysis.Diagnostic{Pos: c.Slash, End: c.End(), Message: fmt.Sprintf(Message, keyword), SuggestedFixes: suggestedFixes}) + } + } + preLine = c + } + } +} + +func (a *analyzer) fixDuplicateWordInString(pass *analysis.Pass, lit *ast.BasicLit) { + if lit.Kind != token.STRING { + return + } + value, err := strconv.Unquote(lit.Value) + if err != nil { + fmt.Printf("lit.Value:%v, err: %v\n", lit.Value, err) + // fall back to default + value = lit.Value + } + quote := value != lit.Value + update, keyword, find := a.Check(value) + if quote { + update = strconv.Quote(update) + } + if find { + pass.Report(analysis.Diagnostic{Pos: lit.Pos(), End: lit.End(), Message: fmt.Sprintf(Message, keyword), SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Update", + TextEdits: []analysis.TextEdit{{ + Pos: lit.Pos(), + End: lit.End(), + NewText: []byte(update), + }}, + }}}) + } +} + +// CheckOneKey use to check there is a defined duplicate word in a string. +// raw is checked line. key is the keyword to check. empty means just check duplicate word. +func CheckOneKey(raw, key string) (new string, findWord string, find bool) { + if key == "" { + has := false + fields := strings.Fields(raw) + for i := range fields { + if i == len(fields)-1 { + break + } + if fields[i] == fields[i+1] { + has = true + } + } + if !has { + return + } + } else { + if x := strings.Split(raw, key); len(x) < 2 { + return + } + } + + findWordMap := make(map[string]bool, 4) + newLine := strings.Builder{} + wordStart, spaceStart := 0, 0 + curWord, preWord := "", "" + lastSpace := "" + var lastRune int32 + for i, r := range raw { + if !unicode.IsSpace(r) && unicode.IsSpace(lastRune) { + // word start position + /* + i + | + hello[ spaceA ]the[ spaceB ]the[ spaceC ]word + ^ ^ + | curWord: the + preWord: the + */ + symbol := raw[spaceStart:i] + if ((key != "" && curWord == key) || key == "") && curWord == preWord && curWord != "" { + if !ExcludeWords(curWord) { + find = true + findWordMap[curWord] = true + newLine.WriteString(lastSpace) + symbol = "" + } + } else { + newLine.WriteString(lastSpace) + newLine.WriteString(curWord) + } + lastSpace = symbol + preWord = curWord + wordStart = i + } else if unicode.IsSpace(r) && !unicode.IsSpace(lastRune) { + // space start position + spaceStart = i + curWord = raw[wordStart:i] + } else if i == len(raw)-1 { + // last position + word := raw[wordStart:] + if ((key != "" && word == key) || key == "") && word == preWord { + if !ExcludeWords(word) { + find = true + findWordMap[word] = true + } + } else { + newLine.WriteString(lastSpace) + newLine.WriteString(word) + } + } + lastRune = r + } + if find { + new = newLine.String() + findWordSlice := make([]string, len(findWordMap)) + i := 0 + for k := range findWordMap { + findWordSlice[i] = k + i++ + } + sort.Strings(findWordSlice) + findWord = strings.Join(findWordSlice, ",") + } + return +} + +func (a *analyzer) Check(raw string) (update string, keyword string, find bool) { + for _, key := range a.KeyWord { + updateOne, _, findOne := CheckOneKey(raw, key) + if findOne { + raw = updateOne + find = findOne + update = updateOne + if keyword == "" { + keyword = key + } else { + keyword = keyword + "," + key + } + } + } + if len(a.KeyWord) == 0 { + return CheckOneKey(raw, "") + } + return +} + +// ExcludeWords determines whether duplicate words should be reported, +// +// e.g. %s, should not be reported. +func ExcludeWords(word string) (exclude bool) { + firstRune, _ := utf8.DecodeRuneInString(word) + if unicode.IsDigit(firstRune) { + return true + } + if unicode.IsPunct(firstRune) { + return true + } + if unicode.IsSymbol(firstRune) { + return true + } + if _, exist := ignoreWord[word]; exist { + return true + } + return false +} diff --git a/vendor/github.com/Abirdcfly/dupword/version.go b/vendor/github.com/Abirdcfly/dupword/version.go new file mode 100644 index 0000000000..9d892d0c98 --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/version.go @@ -0,0 +1,41 @@ +// MIT License +// +// # Copyright (c) 2022 Abirdcfly +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package dupword + +import ( + "fmt" + "os" +) + +var Version = "dev" + +type version struct{} + +func (version) IsBoolFlag() bool { return true } +func (version) Get() interface{} { return nil } +func (version) String() string { return "" } +func (version) Set(_ string) error { + fmt.Println(Version) + os.Exit(0) + return nil +} diff --git a/vendor/github.com/Antonboom/errname/LICENSE b/vendor/github.com/Antonboom/errname/LICENSE new file mode 100644 index 0000000000..e2002e4d43 --- /dev/null +++ b/vendor/github.com/Antonboom/errname/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..aa85225108 --- /dev/null +++ b/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go @@ -0,0 +1,135 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// New returns new errname analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "errname", + Doc: "Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +type stringSet = map[string]struct{} + +var ( + importNodes = []ast.Node{(*ast.ImportSpec)(nil)} + typeNodes = []ast.Node{(*ast.TypeSpec)(nil)} + funcNodes = []ast.Node{(*ast.FuncDecl)(nil)} +) + +func run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + pkgAliases := map[string]string{} + insp.Preorder(importNodes, func(node ast.Node) { + i := node.(*ast.ImportSpec) + if n := i.Name; n != nil && i.Path != nil { + if path, err := strconv.Unquote(i.Path.Value); err == nil { + pkgAliases[n.Name] = getPkgFromPath(path) + } + } + }) + + allTypes := stringSet{} + typesSpecs := map[string]*ast.TypeSpec{} + insp.Preorder(typeNodes, func(node ast.Node) { + t := node.(*ast.TypeSpec) + allTypes[t.Name.Name] = struct{}{} + typesSpecs[t.Name.Name] = t + }) + + errorTypes := stringSet{} + insp.Preorder(funcNodes, func(node ast.Node) { + f := node.(*ast.FuncDecl) + t, ok := isMethodError(f) + if !ok { + return + } + errorTypes[t] = struct{}{} + + tSpec, ok := typesSpecs[t] + if !ok { + panic(fmt.Sprintf("no specification for type %q", t)) + } + + if _, ok := tSpec.Type.(*ast.ArrayType); ok { + if !isValidErrorArrayTypeName(t) { + reportAboutErrorType(pass, tSpec.Pos(), t, true) + } + } else if !isValidErrorTypeName(t) { + reportAboutErrorType(pass, tSpec.Pos(), t, false) + } + }) + + errorFuncs := stringSet{} + insp.Preorder(funcNodes, func(node ast.Node) { + f := node.(*ast.FuncDecl) + if isFuncReturningErr(f.Type, allTypes, errorTypes) { + errorFuncs[f.Name.Name] = struct{}{} + } + }) + + inspectPkgLevelVarsOnly := func(node ast.Node) bool { + switch v := node.(type) { + case *ast.FuncDecl: + return false + + case *ast.ValueSpec: + if name, ok := isSentinelError(v, pkgAliases, allTypes, errorTypes, errorFuncs); ok && !isValidErrorVarName(name) { + reportAboutErrorVar(pass, v.Pos(), name) + } + } + return true + } + for _, f := range pass.Files { + ast.Inspect(f, inspectPkgLevelVarsOnly) + } + + return nil, nil //nolint:nilnil +} + +func reportAboutErrorType(pass *analysis.Pass, typePos token.Pos, typeName string, isArrayType bool) { + var form string + if unicode.IsLower([]rune(typeName)[0]) { + form = "xxxError" + } else { + form = "XxxError" + } + + if isArrayType { + form += "s" + } + pass.Reportf(typePos, "the type name `%s` should conform to the `%s` format", typeName, form) +} + +func reportAboutErrorVar(pass *analysis.Pass, pos token.Pos, varName string) { + var form string + if unicode.IsLower([]rune(varName)[0]) { + form = "errXxx" + } else { + form = "ErrXxx" + } + pass.Reportf(pos, "the variable name `%s` should conform to the `%s` format", varName, form) +} + +func getPkgFromPath(p string) string { + idx := strings.LastIndex(p, "/") + if idx == -1 { + return p + } + return p[idx+1:] +} diff --git a/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go b/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go new file mode 100644 index 0000000000..06f8d61d8e --- /dev/null +++ b/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go @@ -0,0 +1,272 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + "unicode" +) + +func isMethodError(f *ast.FuncDecl) (typeName string, ok bool) { + if f.Recv == nil || len(f.Recv.List) != 1 { + return "", false + } + if f.Name == nil || f.Name.Name != "Error" { + return "", false + } + + if f.Type == nil || f.Type.Results == nil || len(f.Type.Results.List) != 1 { + return "", false + } + + returnType, ok := f.Type.Results.List[0].Type.(*ast.Ident) + if !ok { + return "", false + } + + var receiverType string + + unwrapIdentName := func(e ast.Expr) string { + switch v := e.(type) { + case *ast.Ident: + return v.Name + case *ast.IndexExpr: + if i, ok := v.X.(*ast.Ident); ok { + return i.Name + } + case *ast.IndexListExpr: + if i, ok := v.X.(*ast.Ident); ok { + return i.Name + } + } + panic(fmt.Errorf("unsupported Error() receiver type %q", types.ExprString(e))) + } + + switch rt := f.Recv.List[0].Type; v := rt.(type) { + case *ast.Ident, *ast.IndexExpr, *ast.IndexListExpr: // SomeError, SomeError[T], SomeError[T1, T2, ...] + receiverType = unwrapIdentName(rt) + + case *ast.StarExpr: // *SomeError, *SomeError[T], *SomeError[T1, T2, ...] + receiverType = unwrapIdentName(v.X) + } + + return receiverType, returnType.Name == "string" +} + +func isValidErrorTypeName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["error"] != 1 { + return false + } + return words[len(words)-1] == "error" +} + +func isValidErrorArrayTypeName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["errors"] != 1 { + return false + } + return words[len(words)-1] == "errors" +} + +func isFuncReturningErr(fType *ast.FuncType, allTypes, errorTypes stringSet) bool { + if fType == nil || fType.Results == nil || len(fType.Results.List) != 1 { + return false + } + + var returnTypeName string + switch rt := fType.Results.List[0].Type.(type) { + case *ast.Ident: + returnTypeName = rt.Name + case *ast.StarExpr: + if i, ok := rt.X.(*ast.Ident); ok { + returnTypeName = i.Name + } + } + + return isErrorType(returnTypeName, allTypes, errorTypes) +} + +func isErrorType(tName string, allTypes, errorTypes stringSet) bool { + _, isUserType := allTypes[tName] + _, isErrType := errorTypes[tName] + return isErrType || (tName == "error" && !isUserType) +} + +var knownErrConstructors = stringSet{ + "fmt.Errorf": {}, + "errors.Errorf": {}, + "errors.New": {}, + "errors.Newf": {}, + "errors.NewWithDepth": {}, + "errors.NewWithDepthf": {}, + "errors.NewAssertionErrorWithWrappedErrf": {}, +} + +func isSentinelError( //nolint:gocognit,gocyclo + v *ast.ValueSpec, + pkgAliases map[string]string, + allTypes, errorTypes, errorFuncs stringSet, +) (varName string, ok bool) { + if len(v.Names) != 1 { + return "", false + } + varName = v.Names[0].Name + + switch vv := v.Type.(type) { + // var ErrEndOfFile error + // var ErrEndOfFile SomeErrType + case *ast.Ident: + if isErrorType(vv.Name, allTypes, errorTypes) { + return varName, true + } + + // var ErrEndOfFile *SomeErrType + case *ast.StarExpr: + if i, ok := vv.X.(*ast.Ident); ok && isErrorType(i.Name, allTypes, errorTypes) { + return varName, true + } + } + + if len(v.Values) != 1 { + return "", false + } + + switch vv := v.Values[0].(type) { + case *ast.CallExpr: + switch fun := vv.Fun.(type) { + // var ErrEndOfFile = errors.New("end of file") + case *ast.SelectorExpr: + pkg, ok := fun.X.(*ast.Ident) + if !ok { + return "", false + } + pkgFun := fun.Sel + + pkgName := pkg.Name + if a, ok := pkgAliases[pkgName]; ok { + pkgName = a + } + + _, ok = knownErrConstructors[pkgName+"."+pkgFun.Name] + return varName, ok + + // var ErrEndOfFile = newErrEndOfFile() + // var ErrEndOfFile = new(EndOfFileError) + // const ErrEndOfFile = constError("end of file") + // var statusCodeError = new(SomePtrError[string]) + case *ast.Ident: + if isErrorType(fun.Name, allTypes, errorTypes) { + return varName, true + } + + if _, ok := errorFuncs[fun.Name]; ok { + return varName, true + } + + if fun.Name == "new" && len(vv.Args) == 1 { + switch i := vv.Args[0].(type) { + case *ast.Ident: + return varName, isErrorType(i.Name, allTypes, errorTypes) + case *ast.IndexExpr: + if ii, ok := i.X.(*ast.Ident); ok { + return varName, isErrorType(ii.Name, allTypes, errorTypes) + } + } + } + + // var ErrEndOfFile = func() error { ... } + case *ast.FuncLit: + return varName, isFuncReturningErr(fun.Type, allTypes, errorTypes) + } + + // var ErrEndOfFile = &EndOfFileError{} + // var ErrOK = &SomePtrError[string]{Code: "200 OK"} + case *ast.UnaryExpr: + if vv.Op == token.AND { // & + if lit, ok := vv.X.(*ast.CompositeLit); ok { + switch i := lit.Type.(type) { + case *ast.Ident: + return varName, isErrorType(i.Name, allTypes, errorTypes) + case *ast.IndexExpr: + if ii, ok := i.X.(*ast.Ident); ok { + return varName, isErrorType(ii.Name, allTypes, errorTypes) + } + } + } + } + + // var ErrEndOfFile = EndOfFileError{} + // var ErrNotFound = SomeError[string]{Code: "Not Found"} + case *ast.CompositeLit: + switch i := vv.Type.(type) { + case *ast.Ident: + return varName, isErrorType(i.Name, allTypes, errorTypes) + case *ast.IndexExpr: + if ii, ok := i.X.(*ast.Ident); ok { + return varName, isErrorType(ii.Name, allTypes, errorTypes) + } + } + } + + return "", false +} + +func isValidErrorVarName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["err"] != 1 { + return false + } + return words[0] == "err" +} + +func isInitialism(s string) bool { + return strings.ToLower(s) == s || strings.ToUpper(s) == s +} + +func split(s string) []string { + var words []string + ss := []rune(s) + + var b strings.Builder + b.WriteRune(ss[0]) + + for _, r := range ss[1:] { + if unicode.IsUpper(r) { + words = append(words, strings.ToLower(b.String())) + b.Reset() + } + b.WriteRune(r) + } + + words = append(words, strings.ToLower(b.String())) + return words +} + +func wordsCount(w []string) map[string]int { + result := make(map[string]int, len(w)) + for _, ww := range w { + result[ww]++ + } + return result +} diff --git a/vendor/github.com/Antonboom/nilnil/LICENSE b/vendor/github.com/Antonboom/nilnil/LICENSE new file mode 100644 index 0000000000..e2002e4d43 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..e980db5462 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -0,0 +1,158 @@ +package analyzer + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "nilnil" + doc = "Checks that there is no simultaneous return of `nil` error and an invalid value." + + reportMsg = "return both the `nil` error and invalid value: use a sentinel error instead" +) + +// New returns new nilnil analyzer. +func New() *analysis.Analyzer { + n := newNilNil() + + a := &analysis.Analyzer{ + Name: name, + Doc: doc, + Run: n.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + a.Flags.Var(&n.checkedTypes, "checked-types", "coma separated list") + + return a +} + +type nilNil struct { + checkedTypes checkedTypes +} + +func newNilNil() *nilNil { + return &nilNil{ + checkedTypes: newDefaultCheckedTypes(), + } +} + +var ( + types = []ast.Node{(*ast.TypeSpec)(nil)} + + funcAndReturns = []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.ReturnStmt)(nil), + } +) + +type typeSpecByName map[string]typer + +func (n *nilNil) run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + typeSpecs := typeSpecByName{ + "any": newTyper(new(ast.InterfaceType)), + } + insp.Preorder(types, func(node ast.Node) { + t := node.(*ast.TypeSpec) + typeSpecs[t.Name.Name] = newTyper(t.Type) + }) + + var fs funcTypeStack + insp.Nodes(funcAndReturns, func(node ast.Node, push bool) (proceed bool) { + switch v := node.(type) { + case *ast.FuncLit: + if push { + fs.Push(v.Type) + } else { + fs.Pop() + } + + case *ast.FuncDecl: + if push { + fs.Push(v.Type) + } else { + fs.Pop() + } + + case *ast.ReturnStmt: + ft := fs.Top() // Current function. + + if !push || len(v.Results) != 2 || ft == nil || ft.Results == nil || len(ft.Results.List) != 2 { + return false + } + + fRes1, fRes2 := ft.Results.List[0], ft.Results.List[1] + if !(n.isDangerNilField(fRes1, typeSpecs) && n.isErrorField(fRes2)) { + return false + } + + rRes1, rRes2 := v.Results[0], v.Results[1] + if isNil(rRes1) && isNil(rRes2) { + pass.Reportf(v.Pos(), reportMsg) + } + } + + return true + }) + + return nil, nil //nolint:nilnil +} + +func (n *nilNil) isDangerNilField(f *ast.Field, typeSpecs typeSpecByName) bool { + return n.isDangerNilType(f.Type, typeSpecs) +} + +func (n *nilNil) isDangerNilType(t ast.Expr, typeSpecs typeSpecByName) bool { + switch v := t.(type) { + case *ast.StarExpr: + return n.checkedTypes.Contains(ptrType) + + case *ast.FuncType: + return n.checkedTypes.Contains(funcType) + + case *ast.InterfaceType: + return n.checkedTypes.Contains(ifaceType) + + case *ast.MapType: + return n.checkedTypes.Contains(mapType) + + case *ast.ChanType: + return n.checkedTypes.Contains(chanType) + + case *ast.Ident: + if t, ok := typeSpecs[v.Name]; ok { + return n.isDangerNilType(t.Type(), typeSpecs) + } + } + return false +} + +func (n *nilNil) isErrorField(f *ast.Field) bool { + return isIdent(f.Type, "error") +} + +func isNil(e ast.Expr) bool { + return isIdent(e, "nil") +} + +func isIdent(n ast.Node, name string) bool { + i, ok := n.(*ast.Ident) + if !ok { + return false + } + return i.Name == name +} + +type typer interface { + Type() ast.Expr +} + +func newTyper(t ast.Expr) typer { return typerImpl{t: t} } // +type typerImpl struct{ t ast.Expr } // +func (ti typerImpl) Type() ast.Expr { return ti.t } diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/config.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/config.go new file mode 100644 index 0000000000..520b813a54 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/config.go @@ -0,0 +1,77 @@ +package analyzer + +import ( + "fmt" + "sort" + "strings" +) + +func newDefaultCheckedTypes() checkedTypes { + return checkedTypes{ + ptrType: struct{}{}, + funcType: struct{}{}, + ifaceType: struct{}{}, + mapType: struct{}{}, + chanType: struct{}{}, + } +} + +const separator = ',' + +type typeName string + +func (t typeName) S() string { + return string(t) +} + +const ( + ptrType typeName = "ptr" + funcType typeName = "func" + ifaceType typeName = "iface" + mapType typeName = "map" + chanType typeName = "chan" +) + +var knownTypes = []typeName{ptrType, funcType, ifaceType, mapType, chanType} + +type checkedTypes map[typeName]struct{} + +func (c checkedTypes) Contains(t typeName) bool { + _, ok := c[t] + return ok +} + +func (c checkedTypes) String() string { + result := make([]string, 0, len(c)) + for t := range c { + result = append(result, t.S()) + } + + sort.Strings(result) + return strings.Join(result, string(separator)) +} + +func (c checkedTypes) Set(s string) error { + types := strings.FieldsFunc(s, func(c rune) bool { return c == separator }) + if len(types) == 0 { + return nil + } + + c.disableAll() + for _, t := range types { + switch tt := typeName(t); tt { + case ptrType, funcType, ifaceType, mapType, chanType: + c[tt] = struct{}{} + default: + return fmt.Errorf("unknown checked type name %q (see help)", t) + } + } + + return nil +} + +func (c checkedTypes) disableAll() { + for k := range c { + delete(c, k) + } +} diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/func_type_stack.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/func_type_stack.go new file mode 100644 index 0000000000..0817615470 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/func_type_stack.go @@ -0,0 +1,29 @@ +package analyzer + +import ( + "go/ast" +) + +type funcTypeStack []*ast.FuncType + +func (s *funcTypeStack) Push(f *ast.FuncType) { + *s = append(*s, f) +} + +func (s *funcTypeStack) Pop() *ast.FuncType { + if len(*s) == 0 { + return nil + } + + last := len(*s) - 1 + f := (*s)[last] + *s = (*s)[:last] + return f +} + +func (s *funcTypeStack) Top() *ast.FuncType { + if len(*s) == 0 { + return nil + } + return (*s)[len(*s)-1] +} diff --git a/vendor/github.com/Antonboom/testifylint/LICENSE b/vendor/github.com/Antonboom/testifylint/LICENSE new file mode 100644 index 0000000000..9b1cf3a393 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/testifylint/analyzer/analyzer.go b/vendor/github.com/Antonboom/testifylint/analyzer/analyzer.go new file mode 100644 index 0000000000..84d7e815dc --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/analyzer/analyzer.go @@ -0,0 +1,93 @@ +package analyzer + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/checkers" + "github.com/Antonboom/testifylint/internal/config" + "github.com/Antonboom/testifylint/internal/testify" +) + +const ( + name = "testifylint" + doc = "Checks usage of " + testify.ModulePath + "." + url = "https://github.com/antonboom/" + name +) + +// New returns new instance of testifylint analyzer. +func New() *analysis.Analyzer { + cfg := config.NewDefault() + + analyzer := &analysis.Analyzer{ + Name: name, + Doc: doc, + URL: url, + Run: func(pass *analysis.Pass) (any, error) { + regularCheckers, advancedCheckers, err := newCheckers(cfg) + if err != nil { + return nil, fmt.Errorf("build checkers: %v", err) + } + + tl := &testifyLint{ + regularCheckers: regularCheckers, + advancedCheckers: advancedCheckers, + } + return tl.run(pass) + }, + } + config.BindToFlags(&cfg, &analyzer.Flags) + + return analyzer +} + +type testifyLint struct { + regularCheckers []checkers.RegularChecker + advancedCheckers []checkers.AdvancedChecker +} + +func (tl *testifyLint) run(pass *analysis.Pass) (any, error) { + filesToAnalysis := make([]*ast.File, 0, len(pass.Files)) + for _, f := range pass.Files { + if !analysisutil.Imports(f, testify.AssertPkgPath, testify.RequirePkgPath, testify.SuitePkgPath) { + continue + } + filesToAnalysis = append(filesToAnalysis, f) + } + + insp := inspector.New(filesToAnalysis) + + // Regular checkers. + insp.Preorder([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node) { + tl.regularCheck(pass, node.(*ast.CallExpr)) + }) + + // Advanced checkers. + for _, ch := range tl.advancedCheckers { + for _, d := range ch.Check(pass, insp) { + pass.Report(d) + } + } + + return nil, nil +} + +func (tl *testifyLint) regularCheck(pass *analysis.Pass, ce *ast.CallExpr) { + call := checkers.NewCallMeta(pass, ce) + if nil == call { + return + } + + for _, ch := range tl.regularCheckers { + if d := ch.Check(pass, call); d != nil { + pass.Report(*d) + // NOTE(a.telyshev): I'm not interested in multiple diagnostics per assertion. + // This simplifies the code and also makes the linter more efficient. + return + } + } +} diff --git a/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go b/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go new file mode 100644 index 0000000000..77573e3952 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go @@ -0,0 +1,74 @@ +package analyzer + +import ( + "fmt" + + "github.com/Antonboom/testifylint/internal/checkers" + "github.com/Antonboom/testifylint/internal/config" +) + +// newCheckers accepts linter config and returns slices of enabled checkers sorted by priority. +func newCheckers(cfg config.Config) ([]checkers.RegularChecker, []checkers.AdvancedChecker, error) { + if err := cfg.Validate(); err != nil { + return nil, nil, err + } + + enabledCheckersSet := make(map[string]struct{}) + + if cfg.EnableAll { + for _, checker := range checkers.All() { + enabledCheckersSet[checker] = struct{}{} + } + } else if !cfg.DisableAll { + for _, checker := range checkers.EnabledByDefault() { + enabledCheckersSet[checker] = struct{}{} + } + } + + for _, checker := range cfg.EnabledCheckers { + enabledCheckersSet[checker] = struct{}{} + } + + for _, checker := range cfg.DisabledCheckers { + delete(enabledCheckersSet, checker) + } + + enabledCheckers := make([]string, 0, len(enabledCheckersSet)) + for v := range enabledCheckersSet { + enabledCheckers = append(enabledCheckers, v) + } + checkers.SortByPriority(enabledCheckers) + + regularCheckers := make([]checkers.RegularChecker, 0, len(enabledCheckers)) + advancedCheckers := make([]checkers.AdvancedChecker, 0, len(enabledCheckers)/2) + + for _, name := range enabledCheckers { + ch, ok := checkers.Get(name) + if !ok { + return nil, nil, fmt.Errorf("unknown checker %q", name) + } + + switch c := ch.(type) { + case *checkers.BoolCompare: + c.SetIgnoreCustomTypes(cfg.BoolCompare.IgnoreCustomTypes) + + case *checkers.ExpectedActual: + c.SetExpVarPattern(cfg.ExpectedActual.ExpVarPattern.Regexp) + + case *checkers.RequireError: + c.SetFnPattern(cfg.RequireError.FnPattern.Regexp) + + case *checkers.SuiteExtraAssertCall: + c.SetMode(cfg.SuiteExtraAssertCall.Mode) + } + + switch casted := ch.(type) { + case checkers.RegularChecker: + regularCheckers = append(regularCheckers, casted) + case checkers.AdvancedChecker: + advancedCheckers = append(advancedCheckers, casted) + } + } + + return regularCheckers, advancedCheckers, nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/doc.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/doc.go new file mode 100644 index 0000000000..b57cbd9384 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/doc.go @@ -0,0 +1,9 @@ +// Package analysisutil contains functions common for `analyzer` and `internal/checkers` packages. +// In addition, it is intended to "lighten" these packages. +// +// If the function is common to several packages, or it makes sense to test it separately without +// "polluting" the target package with tests of private functionality, then you can put function in this package. +// +// It's important to avoid dependency on `golang.org/x/tools/go/analysis` in the helpers API. +// This makes the API "narrower" and also allows you to test functions without some "abstraction leaks". +package analysisutil diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/file.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/file.go new file mode 100644 index 0000000000..3fc1f42b86 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/file.go @@ -0,0 +1,28 @@ +package analysisutil + +import ( + "go/ast" + "strconv" +) + +// Imports tells if the file imports at least one of the packages. +// If no packages provided then function returns false. +func Imports(file *ast.File, pkgs ...string) bool { + for _, i := range file.Imports { + if i.Path == nil { + continue + } + + path, err := strconv.Unquote(i.Path.Value) + if err != nil { + continue + } + // NOTE(a.telyshev): Don't use `slices.Contains` to keep the minimum module version 1.20. + for _, pkg := range pkgs { // Small O(n). + if pkg == path { + return true + } + } + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/format.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/format.go new file mode 100644 index 0000000000..fcb4b847f6 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/format.go @@ -0,0 +1,34 @@ +package analysisutil + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" +) + +// NodeString is a more powerful analogue of types.ExprString. +// Return empty string if node AST is invalid. +func NodeString(fset *token.FileSet, node ast.Node) string { + if v := formatNode(fset, node); v != nil { + return v.String() + } + return "" +} + +// NodeBytes works as NodeString but returns a byte slice. +// Return nil if node AST is invalid. +func NodeBytes(fset *token.FileSet, node ast.Node) []byte { + if v := formatNode(fset, node); v != nil { + return v.Bytes() + } + return nil +} + +func formatNode(fset *token.FileSet, node ast.Node) *bytes.Buffer { + buf := new(bytes.Buffer) + if err := format.Node(buf, fset, node); err != nil { + return nil + } + return buf +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/object.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/object.go new file mode 100644 index 0000000000..4e0346d2ba --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/object.go @@ -0,0 +1,34 @@ +package analysisutil + +import ( + "go/ast" + "go/types" +) + +// ObjectOf works in context of Golang package and returns types.Object for the given object's package and name. +// The search is based on the provided package and its dependencies (imports). +// Returns nil if the object is not found. +func ObjectOf(pkg *types.Package, objPkg, objName string) types.Object { + if pkg.Path() == objPkg { + return pkg.Scope().Lookup(objName) + } + + for _, i := range pkg.Imports() { + if trimVendor(i.Path()) == objPkg { + return i.Scope().Lookup(objName) + } + } + return nil +} + +// IsObj returns true if expression is identifier which notes to given types.Object. +// Useful in combination with types.Universe objects. +func IsObj(typesInfo *types.Info, expr ast.Expr, expected types.Object) bool { + id, ok := expr.(*ast.Ident) + if !ok { + return false + } + + obj := typesInfo.ObjectOf(id) + return obj == expected +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/pkg.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/pkg.go new file mode 100644 index 0000000000..d34be5d341 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/pkg.go @@ -0,0 +1,19 @@ +package analysisutil + +import ( + "go/types" + "strings" +) + +// IsPkg checks that package has corresponding objName and path. +// Supports vendored packages. +func IsPkg(pkg *types.Package, name, path string) bool { + return pkg.Name() == name && trimVendor(pkg.Path()) == path +} + +func trimVendor(path string) string { + if strings.HasPrefix(path, "vendor/") { + return path[len("vendor/"):] + } + return path +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/blank_import.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/blank_import.go new file mode 100644 index 0000000000..403691e270 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/blank_import.go @@ -0,0 +1,69 @@ +package checkers + +import ( + "fmt" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/testify" +) + +// BlankImport detects useless blank imports of testify's packages. +// These imports are useless since testify doesn't do any magic with init() function. +// +// The checker detects situations like +// +// import ( +// "testing" +// +// _ "github.com/stretchr/testify" +// _ "github.com/stretchr/testify/assert" +// _ "github.com/stretchr/testify/http" +// _ "github.com/stretchr/testify/mock" +// _ "github.com/stretchr/testify/require" +// _ "github.com/stretchr/testify/suite" +// ) +// +// and requires +// +// import ( +// "testing" +// ) +type BlankImport struct{} + +// NewBlankImport constructs BlankImport checker. +func NewBlankImport() BlankImport { return BlankImport{} } +func (BlankImport) Name() string { return "blank-import" } + +func (checker BlankImport) Check(pass *analysis.Pass, _ *inspector.Inspector) (diagnostics []analysis.Diagnostic) { + for _, file := range pass.Files { + for _, imp := range file.Imports { + if imp.Name == nil || imp.Name.Name != "_" { + continue + } + + pkg, err := strconv.Unquote(imp.Path.Value) + if err != nil { + continue + } + if _, ok := packagesNotIntendedForBlankImport[pkg]; !ok { + continue + } + + msg := fmt.Sprintf("avoid blank import of %s as it does nothing", pkg) + diagnostics = append(diagnostics, *newDiagnostic(checker.Name(), imp, msg, nil)) + } + } + return diagnostics +} + +var packagesNotIntendedForBlankImport = map[string]struct{}{ + testify.ModulePath: {}, + testify.AssertPkgPath: {}, + testify.HTTPPkgPath: {}, + testify.MockPkgPath: {}, + testify.RequirePkgPath: {}, + testify.SuitePkgPath: {}, +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go new file mode 100644 index 0000000000..43907123bd --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go @@ -0,0 +1,304 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// BoolCompare detects situations like +// +// assert.Equal(t, false, result) +// assert.EqualValues(t, false, result) +// assert.Exactly(t, false, result) +// assert.NotEqual(t, true, result) +// assert.NotEqualValues(t, true, result) +// assert.False(t, !result) +// assert.True(t, result == true) +// ... +// +// and requires +// +// assert.False(t, result) +// assert.True(t, result) +type BoolCompare struct { + ignoreCustomTypes bool +} + +// NewBoolCompare constructs BoolCompare checker. +func NewBoolCompare() *BoolCompare { return new(BoolCompare) } +func (BoolCompare) Name() string { return "bool-compare" } + +func (checker *BoolCompare) SetIgnoreCustomTypes(v bool) *BoolCompare { + checker.ignoreCustomTypes = v + return checker +} + +func (checker BoolCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + newBoolCast := func(e ast.Expr) ast.Expr { + return &ast.CallExpr{Fun: &ast.Ident{Name: "bool"}, Args: []ast.Expr{e}} + } + + newUseFnDiagnostic := func(proposed string, survivingArg ast.Expr, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + if !isBuiltinBool(pass, survivingArg) { + if checker.ignoreCustomTypes { + return nil + } + survivingArg = newBoolCast(survivingArg) + } + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }), + ) + } + + newUseTrueDiagnostic := func(survivingArg ast.Expr, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + return newUseFnDiagnostic("True", survivingArg, replaceStart, replaceEnd) + } + + newUseFalseDiagnostic := func(survivingArg ast.Expr, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + return newUseFnDiagnostic("False", survivingArg, replaceStart, replaceEnd) + } + + newNeedSimplifyDiagnostic := func(survivingArg ast.Expr, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + if !isBuiltinBool(pass, survivingArg) { + if checker.ignoreCustomTypes { + return nil + } + survivingArg = newBoolCast(survivingArg) + } + return newDiagnostic(checker.Name(), call, "need to simplify the assertion", + &analysis.SuggestedFix{ + Message: "Simplify the assertion", + TextEdits: []analysis.TextEdit{{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }}, + }, + ) + } + + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + if len(call.Args) < 2 { + return nil + } + + arg1, arg2 := call.Args[0], call.Args[1] + if anyCondSatisfaction(pass, isEmptyInterface, arg1, arg2) { + return nil + } + if anyCondSatisfaction(pass, isBoolOverride, arg1, arg2) { + return nil + } + + t1, t2 := isUntypedTrue(pass, arg1), isUntypedTrue(pass, arg2) + f1, f2 := isUntypedFalse(pass, arg1), isUntypedFalse(pass, arg2) + + switch { + case xor(t1, t2): + survivingArg, _ := anyVal([]bool{t1, t2}, arg2, arg1) + if call.Fn.NameFTrimmed == "Exactly" && !isBuiltinBool(pass, survivingArg) { + // NOTE(a.telyshev): `Exactly` assumes no type casting. + return nil + } + return newUseTrueDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + + case xor(f1, f2): + survivingArg, _ := anyVal([]bool{f1, f2}, arg2, arg1) + if call.Fn.NameFTrimmed == "Exactly" && !isBuiltinBool(pass, survivingArg) { + // NOTE(a.telyshev): `Exactly` assumes no type casting. + return nil + } + return newUseFalseDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + } + + case "NotEqual", "NotEqualValues": + if len(call.Args) < 2 { + return nil + } + + arg1, arg2 := call.Args[0], call.Args[1] + if anyCondSatisfaction(pass, isEmptyInterface, arg1, arg2) { + return nil + } + if anyCondSatisfaction(pass, isBoolOverride, arg1, arg2) { + return nil + } + + t1, t2 := isUntypedTrue(pass, arg1), isUntypedTrue(pass, arg2) + f1, f2 := isUntypedFalse(pass, arg1), isUntypedFalse(pass, arg2) + + switch { + case xor(t1, t2): + survivingArg, _ := anyVal([]bool{t1, t2}, arg2, arg1) + return newUseFalseDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + + case xor(f1, f2): + survivingArg, _ := anyVal([]bool{f1, f2}, arg2, arg1) + return newUseTrueDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + } + + case "True": + if len(call.Args) < 1 { + return nil + } + expr := call.Args[0] + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.EQL) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.NEQ) + + survivingArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2) + if ok && !isEmptyInterface(pass, survivingArg) { + return newNeedSimplifyDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.NEQ) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.EQL) + arg3, ok3 := isNegation(expr) + + survivingArg, ok := anyVal([]bool{ok1, ok2, ok3}, arg1, arg2, arg3) + if ok && !isEmptyInterface(pass, survivingArg) { + return newUseFalseDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + + case "False": + if len(call.Args) < 1 { + return nil + } + expr := call.Args[0] + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.EQL) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.NEQ) + + survivingArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2) + if ok && !isEmptyInterface(pass, survivingArg) { + return newNeedSimplifyDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.NEQ) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.EQL) + arg3, ok3 := isNegation(expr) + + survivingArg, ok := anyVal([]bool{ok1, ok2, ok3}, arg1, arg2, arg3) + if ok && !isEmptyInterface(pass, survivingArg) { + return newUseTrueDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + } + return nil +} + +func isEmptyInterface(pass *analysis.Pass, expr ast.Expr) bool { + t, ok := pass.TypesInfo.Types[expr] + if !ok { + return false + } + + iface, ok := t.Type.Underlying().(*types.Interface) + return ok && iface.NumMethods() == 0 +} + +func isBuiltinBool(pass *analysis.Pass, e ast.Expr) bool { + basicType, ok := pass.TypesInfo.TypeOf(e).(*types.Basic) + return ok && basicType.Kind() == types.Bool +} + +func isBoolOverride(pass *analysis.Pass, e ast.Expr) bool { + namedType, ok := pass.TypesInfo.TypeOf(e).(*types.Named) + return ok && namedType.Obj().Name() == "bool" +} + +var ( + falseObj = types.Universe.Lookup("false") + trueObj = types.Universe.Lookup("true") +) + +func isUntypedTrue(pass *analysis.Pass, e ast.Expr) bool { + return analysisutil.IsObj(pass.TypesInfo, e, trueObj) +} + +func isUntypedFalse(pass *analysis.Pass, e ast.Expr) bool { + return analysisutil.IsObj(pass.TypesInfo, e, falseObj) +} + +func isComparisonWithTrue(pass *analysis.Pass, e ast.Expr, op token.Token) (ast.Expr, bool) { + return isComparisonWith(pass, e, isUntypedTrue, op) +} + +func isComparisonWithFalse(pass *analysis.Pass, e ast.Expr, op token.Token) (ast.Expr, bool) { + return isComparisonWith(pass, e, isUntypedFalse, op) +} + +type predicate func(pass *analysis.Pass, e ast.Expr) bool + +func isComparisonWith(pass *analysis.Pass, e ast.Expr, predicate predicate, op token.Token) (ast.Expr, bool) { + be, ok := e.(*ast.BinaryExpr) + if !ok { + return nil, false + } + if be.Op != op { + return nil, false + } + + t1, t2 := predicate(pass, be.X), predicate(pass, be.Y) + if xor(t1, t2) { + if t1 { + return be.Y, true + } + return be.X, true + } + return nil, false +} + +func isNegation(e ast.Expr) (ast.Expr, bool) { + ue, ok := e.(*ast.UnaryExpr) + if !ok { + return nil, false + } + return ue.X, ue.Op == token.NOT +} + +func xor(a, b bool) bool { + return a != b +} + +// anyVal returns the first value[i] for which bools[i] is true. +func anyVal[T any](bools []bool, vals ...T) (T, bool) { + if len(bools) != len(vals) { + panic("inconsistent usage of valOr") //nolint:forbidigo // Does not depend on the code being analyzed. + } + + for i, b := range bools { + if b { + return vals[i], true + } + } + + var _default T + return _default, false +} + +func anyCondSatisfaction(pass *analysis.Pass, p predicate, vals ...ast.Expr) bool { + for _, v := range vals { + if p(pass, v) { + return true + } + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/call_meta.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/call_meta.go new file mode 100644 index 0000000000..44eed49a62 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/call_meta.go @@ -0,0 +1,136 @@ +package checkers + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// CallMeta stores meta info about assertion function/method call, for example +// +// assert.Equal(t, 42, result, "helpful comment") +type CallMeta struct { + // Range contains start and end position of assertion call. + analysis.Range + // IsPkg true if this is package (not object) call. + IsPkg bool + // IsAssert true if this is "testify/assert" package (or object) call. + IsAssert bool + // Selector is the AST expression of "assert.Equal". + Selector *ast.SelectorExpr + // SelectorXStr is a string representation of Selector's left part – value before point, e.g. "assert". + SelectorXStr string + // Fn stores meta info about assertion function itself. + Fn FnMeta + // Args stores assertion call arguments but without `t *testing.T` argument. + // E.g [42, result, "helpful comment"]. + Args []ast.Expr + // ArgsRaw stores assertion call initial arguments. + // E.g [t, 42, result, "helpful comment"]. + ArgsRaw []ast.Expr +} + +func (c CallMeta) String() string { + return c.SelectorXStr + "." + c.Fn.Name +} + +// FnMeta stores meta info about assertion function itself, for example "Equal". +type FnMeta struct { + // Range contains start and end position of function Name. + analysis.Range + // Name is a function name. + Name string + // NameFTrimmed is a function name without "f" suffix. + NameFTrimmed string + // IsFmt is true if function is formatted, e.g. "Equalf". + IsFmt bool +} + +// NewCallMeta returns meta information about testify assertion call. +// Returns nil if ast.CallExpr is not testify call. +func NewCallMeta(pass *analysis.Pass, ce *ast.CallExpr) *CallMeta { + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok || se.Sel == nil { + return nil + } + fnName := se.Sel.Name + + initiatorPkg, isPkgCall := func() (*types.Package, bool) { + // Examples: + // s.Assert -> method of *suite.Suite -> package suite ("vendor/github.com/stretchr/testify/suite") + // s.Assert().Equal -> method of *assert.Assertions -> package assert ("vendor/github.com/stretchr/testify/assert") + // s.Equal -> method of *assert.Assertions -> package assert ("vendor/github.com/stretchr/testify/assert") + // reqObj.Falsef -> method of *require.Assertions -> package require ("vendor/github.com/stretchr/testify/require") + if sel, ok := pass.TypesInfo.Selections[se]; ok { + return sel.Obj().Pkg(), false + } + + // Examples: + // assert.False -> assert -> package assert ("vendor/github.com/stretchr/testify/assert") + // require.NotEqualf -> require -> package require ("vendor/github.com/stretchr/testify/require") + if id, ok := se.X.(*ast.Ident); ok { + if selObj := pass.TypesInfo.ObjectOf(id); selObj != nil { + if pkg, ok := selObj.(*types.PkgName); ok { + return pkg.Imported(), true + } + } + } + return nil, false + }() + if initiatorPkg == nil { + return nil + } + + isAssert := analysisutil.IsPkg(initiatorPkg, testify.AssertPkgName, testify.AssertPkgPath) + isRequire := analysisutil.IsPkg(initiatorPkg, testify.RequirePkgName, testify.RequirePkgPath) + if !(isAssert || isRequire) { + return nil + } + + return &CallMeta{ + Range: ce, + IsPkg: isPkgCall, + IsAssert: isAssert, + Selector: se, + SelectorXStr: analysisutil.NodeString(pass.Fset, se.X), + Fn: FnMeta{ + Range: se.Sel, + Name: fnName, + NameFTrimmed: strings.TrimSuffix(fnName, "f"), + IsFmt: strings.HasSuffix(fnName, "f"), + }, + Args: trimTArg(pass, ce.Args), + ArgsRaw: ce.Args, + } +} + +func trimTArg(pass *analysis.Pass, args []ast.Expr) []ast.Expr { + if len(args) == 0 { + return args + } + + if isTestingTPtr(pass, args[0]) { + return args[1:] + } + return args +} + +func isTestingTPtr(pass *analysis.Pass, arg ast.Expr) bool { + assertTestingTObj := analysisutil.ObjectOf(pass.Pkg, testify.AssertPkgPath, "TestingT") + requireTestingTObj := analysisutil.ObjectOf(pass.Pkg, testify.RequirePkgPath, "TestingT") + + argType := pass.TypesInfo.TypeOf(arg) + if argType == nil { + return false + } + + return ((assertTestingTObj != nil) && + types.Implements(argType, assertTestingTObj.Type().Underlying().(*types.Interface))) || + ((requireTestingTObj != nil) && + types.Implements(argType, requireTestingTObj.Type().Underlying().(*types.Interface))) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/checker.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/checker.go new file mode 100644 index 0000000000..ac23af6f6f --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/checker.go @@ -0,0 +1,23 @@ +package checkers + +import ( + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +// Checker describes named checker. +type Checker interface { + Name() string +} + +// RegularChecker check assertion call presented in CallMeta form. +type RegularChecker interface { + Checker + Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic +} + +// AdvancedChecker implements complex Check logic different from trivial CallMeta check. +type AdvancedChecker interface { + Checker + Check(pass *analysis.Pass, inspector *inspector.Inspector) []analysis.Diagnostic +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go new file mode 100644 index 0000000000..e34a21bf9c --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go @@ -0,0 +1,105 @@ +package checkers + +import ( + "sort" +) + +// registry stores checkers meta information in checkers' priority order. +var registry = checkersRegistry{ + // Regular checkers. + {factory: asCheckerFactory(NewFloatCompare), enabledByDefault: true}, + {factory: asCheckerFactory(NewBoolCompare), enabledByDefault: true}, + {factory: asCheckerFactory(NewEmpty), enabledByDefault: true}, + {factory: asCheckerFactory(NewLen), enabledByDefault: true}, + {factory: asCheckerFactory(NewCompares), enabledByDefault: true}, + {factory: asCheckerFactory(NewErrorNil), enabledByDefault: true}, + {factory: asCheckerFactory(NewNilCompare), enabledByDefault: true}, + {factory: asCheckerFactory(NewErrorIsAs), enabledByDefault: true}, + {factory: asCheckerFactory(NewExpectedActual), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteExtraAssertCall), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteDontUsePkg), enabledByDefault: true}, + {factory: asCheckerFactory(NewUselessAssert), enabledByDefault: true}, + // Advanced checkers. + {factory: asCheckerFactory(NewBlankImport), enabledByDefault: true}, + {factory: asCheckerFactory(NewGoRequire), enabledByDefault: true}, + {factory: asCheckerFactory(NewRequireError), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteTHelper), enabledByDefault: false}, +} + +type checkersRegistry []checkerMeta + +type checkerMeta struct { + factory checkerFactory + enabledByDefault bool +} + +type checkerFactory func() Checker + +func asCheckerFactory[T Checker](fn func() T) checkerFactory { + return func() Checker { + return fn() + } +} + +func (r checkersRegistry) get(name string) (m checkerMeta, priority int, found bool) { + for i, meta := range r { + if meta.factory().Name() == name { + return meta, i, true + } + } + return checkerMeta{}, 0, false +} + +// All returns all checkers names sorted by checker's priority. +func All() []string { + result := make([]string, 0, len(registry)) + for _, meta := range registry { + result = append(result, meta.factory().Name()) + } + return result +} + +// EnabledByDefault returns checkers enabled by default sorted by checker's priority. +func EnabledByDefault() []string { + result := make([]string, 0, len(registry)) + for _, meta := range registry { + if meta.enabledByDefault { + result = append(result, meta.factory().Name()) + } + } + return result +} + +// Get returns new checker instance by checker's name. +func Get(name string) (Checker, bool) { + meta, _, ok := registry.get(name) + if ok { + return meta.factory(), true + } + return nil, false +} + +// IsKnown checks if there is a checker with that name. +func IsKnown(name string) bool { + _, _, ok := registry.get(name) + return ok +} + +// IsEnabledByDefault returns true if a checker is enabled by default. +// Returns false if there is no such checker in the registry. +// For pre-validation use Get or IsKnown. +func IsEnabledByDefault(name string) bool { + meta, _, ok := registry.get(name) + return ok && meta.enabledByDefault +} + +// SortByPriority mutates the input checkers names by sorting them in checker priority order. +// Ignores unknown checkers. For pre-validation use Get or IsKnown. +func SortByPriority(checkers []string) { + sort.Slice(checkers, func(i, j int) bool { + lhs, rhs := checkers[i], checkers[j] + _, lhsPriority, _ := registry.get(lhs) + _, rhsPriority, _ := registry.get(rhs) + return lhsPriority < rhsPriority + }) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/compares.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/compares.go new file mode 100644 index 0000000000..336a345124 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/compares.go @@ -0,0 +1,96 @@ +package checkers + +import ( + "bytes" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// Compares detects situations like +// +// assert.True(t, a == b) +// assert.True(t, a != b) +// assert.True(t, a > b) +// assert.True(t, a >= b) +// assert.True(t, a < b) +// assert.True(t, a <= b) +// assert.False(t, a == b) +// ... +// +// and requires +// +// assert.Equal(t, a, b) +// assert.NotEqual(t, a, b) +// assert.Greater(t, a, b) +// assert.GreaterOrEqual(t, a, b) +// assert.Less(t, a, b) +// assert.LessOrEqual(t, a, b) +type Compares struct{} + +// NewCompares constructs Compares checker. +func NewCompares() Compares { return Compares{} } +func (Compares) Name() string { return "compares" } + +func (checker Compares) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if len(call.Args) < 1 { + return nil + } + + be, ok := call.Args[0].(*ast.BinaryExpr) + if !ok { + return nil + } + + var tokenToProposedFn map[token.Token]string + + switch call.Fn.NameFTrimmed { + case "True": + tokenToProposedFn = tokenToProposedFnInsteadOfTrue + case "False": + tokenToProposedFn = tokenToProposedFnInsteadOfFalse + default: + return nil + } + + if proposedFn, ok := tokenToProposedFn[be.Op]; ok { + a, b := be.X, be.Y + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: be.X.Pos(), + End: be.Y.End(), + NewText: formatAsCallArgs(pass, a, b), + }), + ) + } + return nil +} + +var tokenToProposedFnInsteadOfTrue = map[token.Token]string{ + token.EQL: "Equal", + token.NEQ: "NotEqual", + token.GTR: "Greater", + token.GEQ: "GreaterOrEqual", + token.LSS: "Less", + token.LEQ: "LessOrEqual", +} + +var tokenToProposedFnInsteadOfFalse = map[token.Token]string{ + token.EQL: "NotEqual", + token.NEQ: "Equal", + token.GTR: "LessOrEqual", + token.GEQ: "Less", + token.LSS: "GreaterOrEqual", + token.LEQ: "Greater", +} + +// formatAsCallArgs joins a and b and return bytes like `a, b`. +func formatAsCallArgs(pass *analysis.Pass, a, b ast.Node) []byte { + return bytes.Join([][]byte{ + analysisutil.NodeBytes(pass.Fset, a), + analysisutil.NodeBytes(pass.Fset, b), + }, []byte(", ")) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/diagnostic.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/diagnostic.go new file mode 100644 index 0000000000..4ab69c69bb --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/diagnostic.go @@ -0,0 +1,60 @@ +package checkers + +import ( + "fmt" + + "golang.org/x/tools/go/analysis" +) + +func newUseFunctionDiagnostic( + checker string, + call *CallMeta, + proposedFn string, + fix *analysis.SuggestedFix, +) *analysis.Diagnostic { + f := proposedFn + if call.Fn.IsFmt { + f += "f" + } + msg := fmt.Sprintf("use %s.%s", call.SelectorXStr, f) + + return newDiagnostic(checker, call, msg, fix) +} + +func newDiagnostic( + checker string, + rng analysis.Range, + msg string, + fix *analysis.SuggestedFix, +) *analysis.Diagnostic { + d := analysis.Diagnostic{ + Pos: rng.Pos(), + End: rng.End(), + Category: checker, + Message: checker + ": " + msg, + } + if fix != nil { + d.SuggestedFixes = []analysis.SuggestedFix{*fix} + } + return &d +} + +func newSuggestedFuncReplacement( + call *CallMeta, + proposedFn string, + additionalEdits ...analysis.TextEdit, +) *analysis.SuggestedFix { + if call.Fn.IsFmt { + proposedFn += "f" + } + return &analysis.SuggestedFix{ + Message: fmt.Sprintf("Replace `%s` with `%s`", call.Fn.Name, proposedFn), + TextEdits: append([]analysis.TextEdit{ + { + Pos: call.Fn.Pos(), + End: call.Fn.End(), + NewText: []byte(proposedFn), + }, + }, additionalEdits...), + } +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go new file mode 100644 index 0000000000..5ad371bb4f --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go @@ -0,0 +1,172 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// Empty detects situations like +// +// assert.Len(t, arr, 0) +// assert.Equal(t, 0, len(arr)) +// assert.EqualValues(t, 0, len(arr)) +// assert.Exactly(t, 0, len(arr)) +// assert.LessOrEqual(t, len(arr), 0) +// assert.GreaterOrEqual(t, 0, len(arr)) +// assert.Less(t, len(arr), 0) +// assert.Greater(t, 0, len(arr)) +// assert.Less(t, len(arr), 1) +// assert.Greater(t, 1, len(arr)) +// +// assert.NotEqual(t, 0, len(arr)) +// assert.NotEqualValues(t, 0, len(arr)) +// assert.Less(t, 0, len(arr)) +// assert.Greater(t, len(arr), 0) +// +// and requires +// +// assert.Empty(t, arr) +// assert.NotEmpty(t, arr) +type Empty struct{} + +// NewEmpty constructs Empty checker. +func NewEmpty() Empty { return Empty{} } +func (Empty) Name() string { return "empty" } + +func (checker Empty) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if d := checker.checkEmpty(pass, call); d != nil { + return d + } + return checker.checkNotEmpty(pass, call) +} + +func (checker Empty) checkEmpty(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { //nolint:gocognit + newUseEmptyDiagnostic := func(replaceStart, replaceEnd token.Pos, replaceWith ast.Expr) *analysis.Diagnostic { + const proposed = "Empty" + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, replaceWith), + }), + ) + } + + if len(call.Args) < 2 { + return nil + } + a, b := call.Args[0], call.Args[1] + + switch call.Fn.NameFTrimmed { + case "Len": + if isZero(b) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), a) + } + + case "Equal", "EqualValues", "Exactly": + arg1, ok1 := isLenCallAndZero(pass, a, b) + arg2, ok2 := isLenCallAndZero(pass, b, a) + + if lenArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2); ok { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "LessOrEqual": + if lenArg, ok := isBuiltinLenCall(pass, a); ok && isZero(b) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "GreaterOrEqual": + if lenArg, ok := isBuiltinLenCall(pass, b); ok && isZero(a) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Less": + if lenArg, ok := isBuiltinLenCall(pass, a); ok && (isOne(b) || isZero(b)) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Greater": + if lenArg, ok := isBuiltinLenCall(pass, b); ok && (isOne(a) || isZero(a)) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + } + return nil +} + +func (checker Empty) checkNotEmpty(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { //nolint:gocognit + newUseNotEmptyDiagnostic := func(replaceStart, replaceEnd token.Pos, replaceWith ast.Expr) *analysis.Diagnostic { + const proposed = "NotEmpty" + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, replaceWith), + }), + ) + } + + if len(call.Args) < 2 { + return nil + } + a, b := call.Args[0], call.Args[1] + + switch call.Fn.NameFTrimmed { + case "NotEqual", "NotEqualValues": + arg1, ok1 := isLenCallAndZero(pass, a, b) + arg2, ok2 := isLenCallAndZero(pass, b, a) + + if lenArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2); ok { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Less": + if lenArg, ok := isBuiltinLenCall(pass, b); ok && isZero(a) { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Greater": + if lenArg, ok := isBuiltinLenCall(pass, a); ok && isZero(b) { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + } + return nil +} + +var lenObj = types.Universe.Lookup("len") + +func isLenCallAndZero(pass *analysis.Pass, a, b ast.Expr) (ast.Expr, bool) { + lenArg, ok := isBuiltinLenCall(pass, a) + return lenArg, ok && isZero(b) +} + +func isBuiltinLenCall(pass *analysis.Pass, e ast.Expr) (ast.Expr, bool) { + ce, ok := e.(*ast.CallExpr) + if !ok { + return nil, false + } + + if analysisutil.IsObj(pass.TypesInfo, ce.Fun, lenObj) && len(ce.Args) == 1 { + return ce.Args[0], true + } + return nil, false +} + +func isZero(e ast.Expr) bool { + return isIntNumber(e, 0) +} + +func isOne(e ast.Expr) bool { + return isIntNumber(e, 1) +} + +func isIntNumber(e ast.Expr, v int) bool { + bl, ok := e.(*ast.BasicLit) + return ok && bl.Kind == token.INT && bl.Value == fmt.Sprintf("%d", v) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go new file mode 100644 index 0000000000..0363873a63 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go @@ -0,0 +1,166 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// ErrorIsAs detects situations like +// +// assert.Error(t, err, errSentinel) +// assert.NoError(t, err, errSentinel) +// assert.True(t, errors.Is(err, errSentinel)) +// assert.False(t, errors.Is(err, errSentinel)) +// assert.True(t, errors.As(err, &target)) +// +// and requires +// +// assert.ErrorIs(t, err, errSentinel) +// assert.NotErrorIs(t, err, errSentinel) +// assert.ErrorAs(t, err, &target) +// +// Also ErrorIsAs repeats go vet's "errorsas" check logic. +type ErrorIsAs struct{} + +// NewErrorIsAs constructs ErrorIsAs checker. +func NewErrorIsAs() ErrorIsAs { return ErrorIsAs{} } +func (ErrorIsAs) Name() string { return "error-is-as" } + +func (checker ErrorIsAs) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + switch call.Fn.NameFTrimmed { + case "Error": + if len(call.Args) >= 2 && isError(pass, call.Args[1]) { + const proposed = "ErrorIs" + msg := fmt.Sprintf("invalid usage of %[1]s.Error, use %[1]s.%[2]s instead", call.SelectorXStr, proposed) + return newDiagnostic(checker.Name(), call, msg, newSuggestedFuncReplacement(call, proposed)) + } + + case "NoError": + if len(call.Args) >= 2 && isError(pass, call.Args[1]) { + const proposed = "NotErrorIs" + msg := fmt.Sprintf("invalid usage of %[1]s.NoError, use %[1]s.%[2]s instead", call.SelectorXStr, proposed) + return newDiagnostic(checker.Name(), call, msg, newSuggestedFuncReplacement(call, proposed)) + } + + case "True": + if len(call.Args) < 1 { + return nil + } + + ce, ok := call.Args[0].(*ast.CallExpr) + if !ok { + return nil + } + if len(ce.Args) != 2 { + return nil + } + + var proposed string + switch { + case isErrorsIsCall(pass, ce): + proposed = "ErrorIs" + case isErrorsAsCall(pass, ce): + proposed = "ErrorAs" + } + if proposed != "" { + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: ce.Pos(), + End: ce.End(), + NewText: formatAsCallArgs(pass, ce.Args[0], ce.Args[1]), + }), + ) + } + + case "False": + if len(call.Args) < 1 { + return nil + } + + ce, ok := call.Args[0].(*ast.CallExpr) + if !ok { + return nil + } + if len(ce.Args) != 2 { + return nil + } + + if isErrorsIsCall(pass, ce) { + const proposed = "NotErrorIs" + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: ce.Pos(), + End: ce.End(), + NewText: formatAsCallArgs(pass, ce.Args[0], ce.Args[1]), + }), + ) + } + + case "ErrorAs": + if len(call.Args) < 2 { + return nil + } + + // NOTE(a.telyshev): Logic below must be consistent with + // https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/errorsas/errorsas.go + + var ( + defaultReport = fmt.Sprintf("second argument to %s must be a non-nil pointer to either a type that implements error, or to any interface type", call) //nolint:lll + errorPtrReport = fmt.Sprintf("second argument to %s should not be *error", call) + ) + + target := call.Args[1] + + if isEmptyInterface(pass, target) { + // `any` interface case. It is always allowed, since it often indicates + // a value forwarded from another source. + return nil + } + + tv, ok := pass.TypesInfo.Types[target] + if !ok { + return nil + } + + pt, ok := tv.Type.Underlying().(*types.Pointer) + if !ok { + return newDiagnostic(checker.Name(), call, defaultReport, nil) + } + if pt.Elem() == errorType { + return newDiagnostic(checker.Name(), call, errorPtrReport, nil) + } + + _, isInterface := pt.Elem().Underlying().(*types.Interface) + if !isInterface && !types.Implements(pt.Elem(), errorIface) { + return newDiagnostic(checker.Name(), call, defaultReport, nil) + } + } + return nil +} + +func isErrorsIsCall(pass *analysis.Pass, ce *ast.CallExpr) bool { + return isErrorsPkgFnCall(pass, ce, "Is") +} + +func isErrorsAsCall(pass *analysis.Pass, ce *ast.CallExpr) bool { + return isErrorsPkgFnCall(pass, ce, "As") +} + +func isErrorsPkgFnCall(pass *analysis.Pass, ce *ast.CallExpr, fn string) bool { + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + + errorsIsObj := analysisutil.ObjectOf(pass.Pkg, "errors", fn) + if errorsIsObj == nil { + return false + } + + return analysisutil.IsObj(pass.TypesInfo, se.Sel, errorsIsObj) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/error_nil.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_nil.go new file mode 100644 index 0000000000..5b0af7458f --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_nil.go @@ -0,0 +1,113 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// ErrorNil detects situations like +// +// assert.Nil(t, err) +// assert.NotNil(t, err) +// assert.Equal(t, nil, err) +// assert.EqualValues(t, nil, err) +// assert.Exactly(t, nil, err) +// assert.ErrorIs(t, err, nil) +// +// assert.NotEqual(t, nil, err) +// assert.NotEqualValues(t, nil, err) +// assert.NotErrorIs(t, err, nil) +// +// and requires +// +// assert.NoError(t, err) +// assert.Error(t, err) +type ErrorNil struct{} + +// NewErrorNil constructs ErrorNil checker. +func NewErrorNil() ErrorNil { return ErrorNil{} } +func (ErrorNil) Name() string { return "error-nil" } + +func (checker ErrorNil) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + const ( + errorFn = "Error" + noErrorFn = "NoError" + ) + + proposedFn, survivingArg, replacementEndPos := func() (string, ast.Expr, token.Pos) { + switch call.Fn.NameFTrimmed { + case "Nil": + if len(call.Args) >= 1 && isError(pass, call.Args[0]) { + return noErrorFn, call.Args[0], call.Args[0].End() + } + + case "NotNil": + if len(call.Args) >= 1 && isError(pass, call.Args[0]) { + return errorFn, call.Args[0], call.Args[0].End() + } + + case "Equal", "EqualValues", "Exactly", "ErrorIs": + if len(call.Args) < 2 { + return "", nil, token.NoPos + } + a, b := call.Args[0], call.Args[1] + + switch { + case isError(pass, a) && isNil(b): + return noErrorFn, a, b.End() + case isNil(a) && isError(pass, b): + return noErrorFn, b, b.End() + } + + case "NotEqual", "NotEqualValues", "NotErrorIs": + if len(call.Args) < 2 { + return "", nil, token.NoPos + } + a, b := call.Args[0], call.Args[1] + + switch { + case isError(pass, a) && isNil(b): + return errorFn, a, b.End() + case isNil(a) && isError(pass, b): + return errorFn, b, b.End() + } + } + return "", nil, token.NoPos + }() + + if proposedFn != "" { + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: call.Args[0].Pos(), + End: replacementEndPos, + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }), + ) + } + return nil +} + +var ( + errorType = types.Universe.Lookup("error").Type() + errorIface = errorType.Underlying().(*types.Interface) +) + +func isError(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + if t == nil { + return false + } + + _, ok := t.Underlying().(*types.Interface) + return ok && types.Implements(t, errorIface) +} + +func isNil(expr ast.Expr) bool { + ident, ok := expr.(*ast.Ident) + return ok && ident.Name == "nil" +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go new file mode 100644 index 0000000000..e6825eaa67 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go @@ -0,0 +1,215 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + "regexp" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// DefaultExpectedVarPattern matches variables with "expected" or "wanted" prefix or suffix in the name. +var DefaultExpectedVarPattern = regexp.MustCompile( + `(^(exp(ected)?|want(ed)?)([A-Z]\w*)?$)|(^(\w*[a-z])?(Exp(ected)?|Want(ed)?)$)`) + +// ExpectedActual detects situation like +// +// assert.Equal(t, result, expected) +// assert.EqualExportedValues(t, resultObj, User{Name: "Anton"}) +// assert.EqualValues(t, result, 42) +// assert.Exactly(t, result, int64(42)) +// assert.JSONEq(t, result, `{"version": 3}`) +// assert.InDelta(t, result, 42.42, 1.0) +// assert.InDeltaMapValues(t, result, map[string]float64{"score": 0.99}, 1.0) +// assert.InDeltaSlice(t, result, []float64{0.98, 0.99}, 1.0) +// assert.InEpsilon(t, result, 42.42, 0.0001) +// assert.InEpsilonSlice(t, result, []float64{0.9801, 0.9902}, 0.0001) +// assert.IsType(t, result, (*User)(nil)) +// assert.NotEqual(t, result, "expected") +// assert.NotEqualValues(t, result, "expected") +// assert.NotSame(t, resultPtr, &value) +// assert.Same(t, resultPtr, &value) +// assert.WithinDuration(t, resultTime, time.Date(2023, 01, 12, 11, 46, 33, 0, nil), time.Second) +// assert.YAMLEq(t, result, "version: '3'") +// +// and requires +// +// assert.Equal(t, expected, result) +// assert.EqualExportedValues(t, User{Name: "Anton"}, resultObj) +// assert.EqualValues(t, 42, result) +// ... +type ExpectedActual struct { + expVarPattern *regexp.Regexp +} + +// NewExpectedActual constructs ExpectedActual checker using DefaultExpectedVarPattern. +func NewExpectedActual() *ExpectedActual { + return &ExpectedActual{expVarPattern: DefaultExpectedVarPattern} +} + +func (ExpectedActual) Name() string { return "expected-actual" } + +func (checker *ExpectedActual) SetExpVarPattern(p *regexp.Regexp) *ExpectedActual { + if p != nil { + checker.expVarPattern = p + } + return checker +} + +func (checker ExpectedActual) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + switch call.Fn.NameFTrimmed { + case "Equal", + "EqualExportedValues", + "EqualValues", + "Exactly", + "InDelta", + "InDeltaMapValues", + "InDeltaSlice", + "InEpsilon", + "InEpsilonSlice", + "IsType", + "JSONEq", + "NotEqual", + "NotEqualValues", + "NotSame", + "Same", + "WithinDuration", + "YAMLEq": + default: + return nil + } + + if len(call.Args) < 2 { + return nil + } + first, second := call.Args[0], call.Args[1] + + if checker.isWrongExpectedActualOrder(pass, first, second) { + return newDiagnostic(checker.Name(), call, "need to reverse actual and expected values", &analysis.SuggestedFix{ + Message: "Reverse actual and expected values", + TextEdits: []analysis.TextEdit{ + { + Pos: first.Pos(), + End: second.End(), + NewText: formatAsCallArgs(pass, second, first), + }, + }, + }) + } + return nil +} + +func (checker ExpectedActual) isWrongExpectedActualOrder(pass *analysis.Pass, first, second ast.Expr) bool { + leftIsCandidate := checker.isExpectedValueCandidate(pass, first) + rightIsCandidate := checker.isExpectedValueCandidate(pass, second) + return rightIsCandidate && !leftIsCandidate +} + +func (checker ExpectedActual) isExpectedValueCandidate(pass *analysis.Pass, expr ast.Expr) bool { + switch v := expr.(type) { + case *ast.ParenExpr: + return checker.isExpectedValueCandidate(pass, v.X) + + case *ast.StarExpr: // *value + return checker.isExpectedValueCandidate(pass, v.X) + + case *ast.UnaryExpr: + return (v.Op == token.AND) && checker.isExpectedValueCandidate(pass, v.X) // &value + + case *ast.CompositeLit: + return true + + case *ast.CallExpr: + return isParenExpr(v) || + isCastedBasicLitOrExpectedValue(v, checker.expVarPattern) || + isExpectedValueFactory(pass, v, checker.expVarPattern) + } + + return isBasicLit(expr) || + isUntypedConst(pass, expr) || + isTypedConst(pass, expr) || + isIdentNamedAsExpected(checker.expVarPattern, expr) || + isStructVarNamedAsExpected(checker.expVarPattern, expr) || + isStructFieldNamedAsExpected(checker.expVarPattern, expr) +} + +func isParenExpr(ce *ast.CallExpr) bool { + _, ok := ce.Fun.(*ast.ParenExpr) + return ok +} + +func isCastedBasicLitOrExpectedValue(ce *ast.CallExpr, pattern *regexp.Regexp) bool { + if len(ce.Args) != 1 { + return false + } + + fn, ok := ce.Fun.(*ast.Ident) + if !ok { + return false + } + + switch fn.Name { + case "complex64", "complex128": + return true + + case "uint", "uint8", "uint16", "uint32", "uint64", + "int", "int8", "int16", "int32", "int64", + "float32", "float64", + "rune", "string": + return isBasicLit(ce.Args[0]) || isIdentNamedAsExpected(pattern, ce.Args[0]) + } + return false +} + +func isExpectedValueFactory(pass *analysis.Pass, ce *ast.CallExpr, pattern *regexp.Regexp) bool { + switch fn := ce.Fun.(type) { + case *ast.Ident: + return pattern.MatchString(fn.Name) + + case *ast.SelectorExpr: + timeDateFn := analysisutil.ObjectOf(pass.Pkg, "time", "Date") + if timeDateFn != nil && analysisutil.IsObj(pass.TypesInfo, fn.Sel, timeDateFn) { + return true + } + return pattern.MatchString(fn.Sel.Name) + } + return false +} + +func isBasicLit(e ast.Expr) bool { + _, ok := e.(*ast.BasicLit) + return ok +} + +func isUntypedConst(p *analysis.Pass, e ast.Expr) bool { + t := p.TypesInfo.TypeOf(e) + if t == nil { + return false + } + + b, ok := t.(*types.Basic) + return ok && b.Info()&types.IsUntyped > 0 +} + +func isTypedConst(p *analysis.Pass, e ast.Expr) bool { + tt, ok := p.TypesInfo.Types[e] + return ok && tt.IsValue() && tt.Value != nil +} + +func isIdentNamedAsExpected(pattern *regexp.Regexp, e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && pattern.MatchString(id.Name) +} + +func isStructVarNamedAsExpected(pattern *regexp.Regexp, e ast.Expr) bool { + s, ok := e.(*ast.SelectorExpr) + return ok && isIdentNamedAsExpected(pattern, s.X) +} + +func isStructFieldNamedAsExpected(pattern *regexp.Regexp, e ast.Expr) bool { + s, ok := e.(*ast.SelectorExpr) + return ok && isIdentNamedAsExpected(pattern, s.Sel) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/float_compare.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/float_compare.go new file mode 100644 index 0000000000..10b1330de8 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/float_compare.go @@ -0,0 +1,70 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +// FloatCompare detects situation like +// +// assert.Equal(t, 42.42, result) +// assert.EqualValues(t, 42.42, result) +// assert.Exactly(t, 42.42, result) +// assert.True(t, result == 42.42) +// assert.False(t, result != 42.42) +// +// and requires +// +// assert.InEpsilon(t, 42.42, result, 0.0001) // Or assert.InDelta +type FloatCompare struct{} + +// NewFloatCompare constructs FloatCompare checker. +func NewFloatCompare() FloatCompare { return FloatCompare{} } +func (FloatCompare) Name() string { return "float-compare" } + +func (checker FloatCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + invalid := func() bool { + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + return len(call.Args) > 1 && (isFloat(pass, call.Args[0]) || isFloat(pass, call.Args[1])) + + case "True": + return len(call.Args) > 0 && isFloatCompare(pass, call.Args[0], token.EQL) + + case "False": + return len(call.Args) > 0 && isFloatCompare(pass, call.Args[0], token.NEQ) + } + return false + }() + + if invalid { + format := "use %s.InEpsilon (or InDelta)" + if call.Fn.IsFmt { + format = "use %s.InEpsilonf (or InDeltaf)" + } + return newDiagnostic(checker.Name(), call, fmt.Sprintf(format, call.SelectorXStr), nil) + } + return nil +} + +func isFloat(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + if t == nil { + return false + } + + bt, ok := t.Underlying().(*types.Basic) + return ok && (bt.Info()&types.IsFloat > 0) +} + +func isFloatCompare(p *analysis.Pass, e ast.Expr, op token.Token) bool { + be, ok := e.(*ast.BinaryExpr) + if !ok { + return false + } + return be.Op == op && (isFloat(p, be.X) || isFloat(p, be.Y)) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/go_require.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/go_require.go new file mode 100644 index 0000000000..0844f15a09 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/go_require.go @@ -0,0 +1,327 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +const ( + goRequireFnReportFormat = "%s contains assertions that must only be used in the goroutine running the test function" + goRequireCallReportFormat = "%s must only be used in the goroutine running the test function" +) + +// GoRequire takes idea from go vet's "testinggoroutine" check +// and detects usage of require package's functions or assert.FailNow in the non-test goroutines +// +// go func() { +// conn, err = lis.Accept() +// require.NoError(t, err) +// +// if assert.Error(err) { +// assert.FailNow(t, msg) +// } +// }() +type GoRequire struct{} + +// NewGoRequire constructs GoRequire checker. +func NewGoRequire() GoRequire { return GoRequire{} } +func (GoRequire) Name() string { return "go-require" } + +// Check should be consistent with +// https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/testinggoroutine/testinggoroutine.go +// +// But due to the fact that the Check covers cases missed by go vet, +// the implementation turned out to be terribly complicated. +// +// In simple words, the algorithm is as follows: +// - we walk along the call tree and store the status, whether we are in the test goroutine or not; +// - if we are in a test goroutine, then require is allowed, otherwise not; +// - when we encounter the launch of a subtest or `go` statement, the status changes; +// - in order to correctly handle the return to the correct status when exiting the current function, +// we have to store a stack of statuses (inGoroutineRunningTestFunc). +// +// Other test functions called in the test function are also analyzed to make a verdict about the current function. +// This leads to recursion, which the cache of processed functions (processedFuncs) helps reduce the impact of. +// Also, because of this, we have to pre-collect a list of test function declarations (testsDecls). +func (checker GoRequire) Check(pass *analysis.Pass, inspector *inspector.Inspector) (diagnostics []analysis.Diagnostic) { + testsDecls := make(funcDeclarations) + inspector.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + fd := node.(*ast.FuncDecl) + + if isTestingFuncOrMethod(pass, fd) { + if tf, ok := pass.TypesInfo.ObjectOf(fd.Name).(*types.Func); ok { + testsDecls[tf] = fd + } + } + }) + + var inGoroutineRunningTestFunc boolStack + processedFuncs := make(map[*ast.FuncDecl]goRequireVerdict) + + nodesFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncType)(nil), + (*ast.GoStmt)(nil), + (*ast.CallExpr)(nil), + } + inspector.Nodes(nodesFilter, func(node ast.Node, push bool) bool { + if fd, ok := node.(*ast.FuncDecl); ok { + if !isTestingFuncOrMethod(pass, fd) { + return false + } + + if push { + inGoroutineRunningTestFunc.Push(true) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + if ft, ok := node.(*ast.FuncType); ok { + if !isTestingAnonymousFunc(pass, ft) { + return false + } + + if push { + inGoroutineRunningTestFunc.Push(true) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + if _, ok := node.(*ast.GoStmt); ok { + if push { + inGoroutineRunningTestFunc.Push(false) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + ce := node.(*ast.CallExpr) + if isSubTestRun(pass, ce) { + if push { + // t.Run spawns the new testing goroutine and declines + // possible warnings from previous "simple" goroutine. + inGoroutineRunningTestFunc.Push(true) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + if !push { + return false + } + if inGoroutineRunningTestFunc.Len() == 0 { + // Insufficient info. + return true + } + if inGoroutineRunningTestFunc.Last() { + // We are in testing goroutine and can skip any assertion checks. + return true + } + + testifyCall := NewCallMeta(pass, ce) + if testifyCall != nil { + switch checker.checkCall(testifyCall) { + case goRequireVerdictRequire: + d := newDiagnostic(checker.Name(), testifyCall, fmt.Sprintf(goRequireCallReportFormat, "require"), nil) + diagnostics = append(diagnostics, *d) + + case goRequireVerdictAssertFailNow: + d := newDiagnostic(checker.Name(), testifyCall, fmt.Sprintf(goRequireCallReportFormat, testifyCall), nil) + diagnostics = append(diagnostics, *d) + + case goRequireVerdictNoExit: + } + return false + } + + // Case of nested function call. + { + calledFd := testsDecls.Get(pass, ce) + if calledFd == nil { + return true + } + + if v := checker.checkFunc(pass, calledFd, testsDecls, processedFuncs); v != goRequireVerdictNoExit { + caller := analysisutil.NodeString(pass.Fset, ce.Fun) + d := newDiagnostic(checker.Name(), ce, fmt.Sprintf(goRequireFnReportFormat, caller), nil) + diagnostics = append(diagnostics, *d) + } + } + return true + }) + + return diagnostics +} + +func (checker GoRequire) checkFunc( + pass *analysis.Pass, + fd *ast.FuncDecl, + testsDecls funcDeclarations, + processedFuncs map[*ast.FuncDecl]goRequireVerdict, +) (result goRequireVerdict) { + if v, ok := processedFuncs[fd]; ok { + return v + } + + ast.Inspect(fd, func(node ast.Node) bool { + if result != goRequireVerdictNoExit { + return false + } + + if _, ok := node.(*ast.GoStmt); ok { + return false + } + + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + + testifyCall := NewCallMeta(pass, ce) + if testifyCall != nil { + if v := checker.checkCall(testifyCall); v != goRequireVerdictNoExit { + result, processedFuncs[fd] = v, v + } + return false + } + + // Case of nested function call. + { + calledFd := testsDecls.Get(pass, ce) + if calledFd == nil { + return true + } + if calledFd == fd { + // Recursion. + return true + } + + if v := checker.checkFunc(pass, calledFd, testsDecls, processedFuncs); v != goRequireVerdictNoExit { + result = v + return false + } + return true + } + }) + + return result +} + +type goRequireVerdict int + +const ( + goRequireVerdictNoExit goRequireVerdict = iota + goRequireVerdictRequire + goRequireVerdictAssertFailNow +) + +func (checker GoRequire) checkCall(call *CallMeta) goRequireVerdict { + if !call.IsAssert { + return goRequireVerdictRequire + } + if call.Fn.NameFTrimmed == "FailNow" { + return goRequireVerdictAssertFailNow + } + return goRequireVerdictNoExit +} + +type funcDeclarations map[*types.Func]*ast.FuncDecl + +// Get returns the declaration of a called function or method. +// Currently, only static calls within the same package are supported, otherwise returns nil. +func (fd funcDeclarations) Get(pass *analysis.Pass, ce *ast.CallExpr) *ast.FuncDecl { + var obj types.Object + + switch fun := ce.Fun.(type) { + case *ast.SelectorExpr: + obj = pass.TypesInfo.ObjectOf(fun.Sel) + + case *ast.Ident: + obj = pass.TypesInfo.ObjectOf(fun) + + case *ast.IndexExpr: + if id, ok := fun.X.(*ast.Ident); ok { + obj = pass.TypesInfo.ObjectOf(id) + } + + case *ast.IndexListExpr: + if id, ok := fun.X.(*ast.Ident); ok { + obj = pass.TypesInfo.ObjectOf(id) + } + } + + if tf, ok := obj.(*types.Func); ok { + return fd[tf] + } + return nil +} + +type boolStack []bool + +func (s boolStack) Len() int { + return len(s) +} + +func (s *boolStack) Push(v bool) { + *s = append(*s, v) +} + +func (s *boolStack) Pop() bool { + n := len(*s) + if n == 0 { + return false + } + + last := (*s)[n-1] + *s = (*s)[:n-1] + return last +} + +func (s boolStack) Last() bool { + n := len(s) + if n == 0 { + return false + } + return s[n-1] +} + +func isSubTestRun(pass *analysis.Pass, ce *ast.CallExpr) bool { + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok || se.Sel == nil { + return false + } + return (isTestingTPtr(pass, se.X) || implementsTestifySuiteIface(pass, se.X)) && se.Sel.Name == "Run" +} + +func isTestingFuncOrMethod(pass *analysis.Pass, fd *ast.FuncDecl) bool { + return hasTestingTParam(pass, fd.Type) || isTestifySuiteMethod(pass, fd) +} + +func isTestingAnonymousFunc(pass *analysis.Pass, ft *ast.FuncType) bool { + return hasTestingTParam(pass, ft) +} + +func hasTestingTParam(pass *analysis.Pass, ft *ast.FuncType) bool { + if ft == nil || ft.Params == nil { + return false + } + + for _, param := range ft.Params.List { + if isTestingTPtr(pass, param.Type) { + return true + } + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go new file mode 100644 index 0000000000..d4e6a48b5b --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go @@ -0,0 +1,97 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +// Len detects situations like +// +// assert.Equal(t, 3, len(arr)) +// assert.EqualValues(t, 3, len(arr)) +// assert.Exactly(t, 3, len(arr)) +// assert.True(t, len(arr) == 3) +// +// and requires +// +// assert.Len(t, arr, 3) +type Len struct{} + +// NewLen constructs Len checker. +func NewLen() Len { return Len{} } +func (Len) Name() string { return "len" } + +func (checker Len) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + const proposedFn = "Len" + + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + if len(call.Args) < 2 { + return nil + } + a, b := call.Args[0], call.Args[1] + + if lenArg, expectedLen, ok := xorLenCall(pass, a, b); ok { + if expectedLen == b && !isIntBasicLit(expectedLen) { + // https://github.com/Antonboom/testifylint/issues/9 + return nil + } + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: a.Pos(), + End: b.End(), + NewText: formatAsCallArgs(pass, lenArg, expectedLen), + }), + ) + } + + case "True": + if len(call.Args) < 1 { + return nil + } + expr := call.Args[0] + + if lenArg, expectedLen, ok := isLenEquality(pass, expr); ok && isIntBasicLit(expectedLen) { + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: expr.Pos(), + End: expr.End(), + NewText: formatAsCallArgs(pass, lenArg, expectedLen), + }), + ) + } + } + return nil +} + +func xorLenCall(pass *analysis.Pass, a, b ast.Expr) (lenArg ast.Expr, expectedLen ast.Expr, ok bool) { + arg1, ok1 := isBuiltinLenCall(pass, a) + arg2, ok2 := isBuiltinLenCall(pass, b) + + if xor(ok1, ok2) { + if ok1 { + return arg1, b, true + } + return arg2, a, true + } + return nil, nil, false +} + +func isLenEquality(pass *analysis.Pass, e ast.Expr) (ast.Expr, ast.Expr, bool) { + be, ok := e.(*ast.BinaryExpr) + if !ok { + return nil, nil, false + } + + if be.Op != token.EQL { + return nil, nil, false + } + return xorLenCall(pass, be.X, be.Y) +} + +func isIntBasicLit(e ast.Expr) bool { + bl, ok := e.(*ast.BasicLit) + return ok && bl.Kind == token.INT +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/nil_compare.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/nil_compare.go new file mode 100644 index 0000000000..89680a0699 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/nil_compare.go @@ -0,0 +1,69 @@ +package checkers + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// NilCompare detects situations like +// +// assert.Equal(t, nil, value) +// assert.EqualValues(t, nil, value) +// assert.Exactly(t, nil, value) +// +// assert.NotEqual(t, nil, value) +// assert.NotEqualValues(t, nil, value) +// +// and requires +// +// assert.Nil(t, value) +// assert.NotNil(t, value) +type NilCompare struct{} + +// NewNilCompare constructs NilCompare checker. +func NewNilCompare() NilCompare { return NilCompare{} } +func (NilCompare) Name() string { return "nil-compare" } + +func (checker NilCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if len(call.Args) < 2 { + return nil + } + + survivingArg, ok := xorNil(call.Args[0], call.Args[1]) + if !ok { + return nil + } + + var proposedFn string + + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + proposedFn = "Nil" + case "NotEqual", "NotEqualValues": + proposedFn = "NotNil" + default: + return nil + } + + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: call.Args[0].Pos(), + End: call.Args[1].End(), + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }), + ) +} + +func xorNil(first, second ast.Expr) (ast.Expr, bool) { + a, b := isNil(first), isNil(second) + if xor(a, b) { + if a { + return second, true + } + return first, true + } + return nil, false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/require_error.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/require_error.go new file mode 100644 index 0000000000..ab09dd4478 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/require_error.go @@ -0,0 +1,338 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +const requireErrorReport = "for error assertions use require" + +// RequireError detects error assertions like +// +// assert.Error(t, err) // s.Error(err), s.Assert().Error(err) +// assert.ErrorIs(t, err, io.EOF) +// assert.ErrorAs(t, err, &target) +// assert.EqualError(t, err, "end of file") +// assert.ErrorContains(t, err, "end of file") +// assert.NoError(t, err) +// assert.NotErrorIs(t, err, io.EOF) +// +// and requires +// +// require.Error(t, err) // s.Require().Error(err), s.Require().Error(err) +// require.ErrorIs(t, err, io.EOF) +// require.ErrorAs(t, err, &target) +// ... +// +// RequireError ignores: +// - assertion in the `if` condition; +// - the entire `if-else[-if]` block, if there is an assertion in any `if` condition; +// - the last assertion in the block, if there are no methods/functions calls after it; +// - assertions in an explicit goroutine; +// - assertions in an explicit testing cleanup function or suite teardown methods; +// - sequence of NoError assertions. +type RequireError struct { + fnPattern *regexp.Regexp +} + +// NewRequireError constructs RequireError checker. +func NewRequireError() *RequireError { return new(RequireError) } +func (RequireError) Name() string { return "require-error" } + +func (checker *RequireError) SetFnPattern(p *regexp.Regexp) *RequireError { + if p != nil { + checker.fnPattern = p + } + return checker +} + +func (checker RequireError) Check(pass *analysis.Pass, inspector *inspector.Inspector) []analysis.Diagnostic { + callsByFunc := make(map[funcID][]*callMeta) + + // Stage 1. Collect meta information about any calls inside functions. + + inspector.WithStack([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool, stack []ast.Node) bool { + if !push { + return false + } + if len(stack) < 3 { + return true + } + + fID := findSurroundingFunc(pass, stack) + if fID == nil { + return true + } + + _, prevIsIfStmt := stack[len(stack)-2].(*ast.IfStmt) + _, prevIsAssignStmt := stack[len(stack)-2].(*ast.AssignStmt) + _, prevPrevIsIfStmt := stack[len(stack)-3].(*ast.IfStmt) + inIfCond := prevIsIfStmt || (prevPrevIsIfStmt && prevIsAssignStmt) + + callExpr := node.(*ast.CallExpr) + testifyCall := NewCallMeta(pass, callExpr) + + call := &callMeta{ + call: callExpr, + testifyCall: testifyCall, + rootIf: findRootIf(stack), + parentIf: findNearestNode[*ast.IfStmt](stack), + parentBlock: findNearestNode[*ast.BlockStmt](stack), + inIfCond: inIfCond, + inNoErrorSeq: false, // Will be filled in below. + } + + callsByFunc[*fID] = append(callsByFunc[*fID], call) + return testifyCall == nil // Do not support asserts in asserts. + }) + + // Stage 2. Analyze calls and block context. + + var diagnostics []analysis.Diagnostic + + callsByBlock := map[*ast.BlockStmt][]*callMeta{} + for _, calls := range callsByFunc { + for _, c := range calls { + if b := c.parentBlock; b != nil { + callsByBlock[b] = append(callsByBlock[b], c) + } + } + } + + markCallsInNoErrorSequence(callsByBlock) + + for funcInfo, calls := range callsByFunc { + for i, c := range calls { + if funcInfo.isTestCleanup { + continue + } + if funcInfo.isGoroutine { + continue + } + + if c.testifyCall == nil { + continue + } + if !c.testifyCall.IsAssert { + continue + } + switch c.testifyCall.Fn.NameFTrimmed { + default: + continue + case "Error", "ErrorIs", "ErrorAs", "EqualError", "ErrorContains", "NoError", "NotErrorIs": + } + + if needToSkipBasedOnContext(c, i, calls, callsByBlock) { + continue + } + if p := checker.fnPattern; p != nil && !p.MatchString(c.testifyCall.Fn.Name) { + continue + } + + diagnostics = append(diagnostics, + *newDiagnostic(checker.Name(), c.testifyCall, requireErrorReport, nil)) + } + } + + return diagnostics +} + +func needToSkipBasedOnContext( + currCall *callMeta, + currCallIndex int, + otherCalls []*callMeta, + callsByBlock map[*ast.BlockStmt][]*callMeta, +) bool { + if currCall.inNoErrorSeq { + // Skip `assert.NoError` sequence. + return true + } + + if currCall.inIfCond { + // Skip assertions in the "if condition". + return true + } + + if currCall.rootIf != nil { + for _, rootCall := range otherCalls { + if (rootCall.rootIf == currCall.rootIf) && rootCall.inIfCond { + // Skip assertions in the entire if-else[-if] block, if some of "if condition" contains assertion. + return true + } + } + } + + block := currCall.parentBlock + blockCalls := callsByBlock[block] + isLastCallInBlock := blockCalls[len(blockCalls)-1] == currCall + + noCallsAfter := true + + _, blockEndWithReturn := block.List[len(block.List)-1].(*ast.ReturnStmt) + if !blockEndWithReturn { + for i := currCallIndex + 1; i < len(otherCalls); i++ { + nextCall := otherCalls[i] + nextCallInElseBlock := false + + if pIf := currCall.parentIf; pIf != nil && pIf.Else != nil { + ast.Inspect(pIf.Else, func(n ast.Node) bool { + if n == nextCall.call { + nextCallInElseBlock = true + return false + } + return true + }) + } + + if !nextCallInElseBlock { + noCallsAfter = false + break + } + } + } + + // Skip assertion if this is the last operation in the test. + return isLastCallInBlock && noCallsAfter +} + +func findSurroundingFunc(pass *analysis.Pass, stack []ast.Node) *funcID { + for i := len(stack) - 2; i >= 0; i-- { + var fType *ast.FuncType + var fName string + var isTestCleanup bool + var isGoroutine bool + + switch fd := stack[i].(type) { + case *ast.FuncDecl: + fType, fName = fd.Type, fd.Name.Name + + if isTestifySuiteMethod(pass, fd) { + if ident := fd.Name; ident != nil && isAfterTestMethod(ident.Name) { + isTestCleanup = true + } + } + + case *ast.FuncLit: + fType, fName = fd.Type, "anonymous" + + if i >= 2 { //nolint:nestif + if ce, ok := stack[i-1].(*ast.CallExpr); ok { + if se, ok := ce.Fun.(*ast.SelectorExpr); ok { + isTestCleanup = isTestingTPtr(pass, se.X) && se.Sel != nil && (se.Sel.Name == "Cleanup") + } + + if _, ok := stack[i-2].(*ast.GoStmt); ok { + isGoroutine = true + } + } + } + + default: + continue + } + + return &funcID{ + pos: fType.Pos(), + posStr: pass.Fset.Position(fType.Pos()).String(), + name: fName, + isTestCleanup: isTestCleanup, + isGoroutine: isGoroutine, + } + } + return nil +} + +func findRootIf(stack []ast.Node) *ast.IfStmt { + nearestIf, i := findNearestNodeWithIdx[*ast.IfStmt](stack) + for ; i > 0; i-- { + parent, ok := stack[i-1].(*ast.IfStmt) + if ok { + nearestIf = parent + } else { + break + } + } + return nearestIf +} + +func findNearestNode[T ast.Node](stack []ast.Node) (v T) { + v, _ = findNearestNodeWithIdx[T](stack) + return +} + +func findNearestNodeWithIdx[T ast.Node](stack []ast.Node) (v T, index int) { + for i := len(stack) - 2; i >= 0; i-- { + if n, ok := stack[i].(T); ok { + return n, i + } + } + return +} + +func markCallsInNoErrorSequence(callsByBlock map[*ast.BlockStmt][]*callMeta) { + for _, calls := range callsByBlock { + for i, c := range calls { + if c.testifyCall == nil { + continue + } + + var prevIsNoError bool + if i > 0 { + if prev := calls[i-1].testifyCall; prev != nil { + prevIsNoError = isNoErrorAssertion(prev.Fn.Name) + } + } + + var nextIsNoError bool + if i < len(calls)-1 { + if next := calls[i+1].testifyCall; next != nil { + nextIsNoError = isNoErrorAssertion(next.Fn.Name) + } + } + + if isNoErrorAssertion(c.testifyCall.Fn.Name) && (prevIsNoError || nextIsNoError) { + calls[i].inNoErrorSeq = true + } + } + } +} + +type callMeta struct { + call *ast.CallExpr + testifyCall *CallMeta + rootIf *ast.IfStmt // The root `if` in if-else[-if] chain. + parentIf *ast.IfStmt // The nearest `if`, can be equal with rootIf. + parentBlock *ast.BlockStmt + inIfCond bool // True for code like `if assert.ErrorAs(t, err, &target) {`. + inNoErrorSeq bool // True for sequence of `assert.NoError` assertions. +} + +type funcID struct { + pos token.Pos + posStr string + name string + isTestCleanup bool + isGoroutine bool +} + +func (id funcID) String() string { + return fmt.Sprintf("%s at %s", id.name, id.posStr) +} + +func isAfterTestMethod(name string) bool { + // https://github.com/stretchr/testify/blob/master/suite/interfaces.go + switch name { + case "TearDownSuite", "TearDownTest", "AfterTest", "HandleStats", "TearDownSubTest": + return true + } + return false +} + +func isNoErrorAssertion(fnName string) bool { + return (fnName == "NoError") || (fnName == "NoErrorf") +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_dont_use_pkg.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_dont_use_pkg.go new file mode 100644 index 0000000000..bf84f6378e --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_dont_use_pkg.go @@ -0,0 +1,96 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// SuiteDontUsePkg detects situation like +// +// func (s *MySuite) TestSomething() { +// assert.Equal(s.T(), 42, value) +// } +// +// and requires +// +// func (s *MySuite) TestSomething() { +// s.Equal(42, value) +// } +type SuiteDontUsePkg struct{} + +// NewSuiteDontUsePkg constructs SuiteDontUsePkg checker. +func NewSuiteDontUsePkg() SuiteDontUsePkg { return SuiteDontUsePkg{} } +func (SuiteDontUsePkg) Name() string { return "suite-dont-use-pkg" } + +func (checker SuiteDontUsePkg) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if !call.IsPkg { + return nil + } + + args := call.ArgsRaw + if len(args) < 2 { + return nil + } + t := args[0] + + ce, ok := t.(*ast.CallExpr) + if !ok { + return nil + } + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return nil + } + if se.X == nil || !implementsTestifySuiteIface(pass, se.X) { + return nil + } + if se.Sel == nil || se.Sel.Name != "T" { + return nil + } + rcv, ok := se.X.(*ast.Ident) // At this point we ensure that `s.T()` is used as the first argument of assertion. + if !ok { + return nil + } + + newSelector := rcv.Name + if !call.IsAssert { + newSelector += "." + "Require()" + } + + msg := fmt.Sprintf("use %s.%s", newSelector, call.Fn.Name) + return newDiagnostic(checker.Name(), call, msg, &analysis.SuggestedFix{ + Message: fmt.Sprintf("Replace `%s` with `%s`", call.SelectorXStr, newSelector), + TextEdits: []analysis.TextEdit{ + // Replace package function with suite method. + { + Pos: call.Selector.X.Pos(), + End: call.Selector.X.End(), + NewText: []byte(newSelector), + }, + // Remove `s.T()`. + { + Pos: t.Pos(), + End: args[1].Pos(), + NewText: []byte(""), + }, + }, + }) +} + +func implementsTestifySuiteIface(pass *analysis.Pass, rcv ast.Expr) bool { + suiteIface := analysisutil.ObjectOf(pass.Pkg, testify.SuitePkgPath, "TestingSuite") + if suiteIface == nil { + return false + } + + return types.Implements( + pass.TypesInfo.TypeOf(rcv), + suiteIface.Type().Underlying().(*types.Interface), + ) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_extra_assert_call.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_extra_assert_call.go new file mode 100644 index 0000000000..791488b651 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_extra_assert_call.go @@ -0,0 +1,99 @@ +package checkers + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// SuiteExtraAssertCallMode reflects different modes of work of SuiteExtraAssertCall checker. +type SuiteExtraAssertCallMode int + +const ( + SuiteExtraAssertCallModeRemove SuiteExtraAssertCallMode = iota + SuiteExtraAssertCallModeRequire +) + +const DefaultSuiteExtraAssertCallMode = SuiteExtraAssertCallModeRemove + +// SuiteExtraAssertCall detects situation like +// +// func (s *MySuite) TestSomething() { +// s.Assert().Equal(42, value) +// } +// +// and requires +// +// func (s *MySuite) TestSomething() { +// s.Equal(42, value) +// } +// +// or vice versa (depending on the configurable mode). +type SuiteExtraAssertCall struct { + mode SuiteExtraAssertCallMode +} + +// NewSuiteExtraAssertCall constructs SuiteExtraAssertCall checker. +func NewSuiteExtraAssertCall() *SuiteExtraAssertCall { + return &SuiteExtraAssertCall{mode: DefaultSuiteExtraAssertCallMode} +} + +func (SuiteExtraAssertCall) Name() string { return "suite-extra-assert-call" } + +func (checker *SuiteExtraAssertCall) SetMode(m SuiteExtraAssertCallMode) *SuiteExtraAssertCall { + checker.mode = m + return checker +} + +func (checker SuiteExtraAssertCall) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if call.IsPkg { + return nil + } + + switch checker.mode { + case SuiteExtraAssertCallModeRequire: + x, ok := call.Selector.X.(*ast.Ident) // s.True + if !ok || x == nil || !implementsTestifySuiteIface(pass, x) { + return nil + } + + msg := fmt.Sprintf("use an explicit %s.Assert().%s", analysisutil.NodeString(pass.Fset, x), call.Fn.Name) + return newDiagnostic(checker.Name(), call, msg, &analysis.SuggestedFix{ + Message: "Add `Assert()` call", + TextEdits: []analysis.TextEdit{{ + Pos: x.End(), + End: x.End(), // Pure insertion. + NewText: []byte(".Assert()"), + }}, + }) + + case SuiteExtraAssertCallModeRemove: + x, ok := call.Selector.X.(*ast.CallExpr) // s.Assert().True + if !ok { + return nil + } + + se, ok := x.Fun.(*ast.SelectorExpr) + if !ok || se == nil || !implementsTestifySuiteIface(pass, se.X) { + return nil + } + if se.Sel == nil || se.Sel.Name != "Assert" { + return nil + } + + msg := fmt.Sprintf("need to simplify the assertion to %s.%s", analysisutil.NodeString(pass.Fset, se.X), call.Fn.Name) + return newDiagnostic(checker.Name(), call, msg, &analysis.SuggestedFix{ + Message: "Remove `Assert()` call", + TextEdits: []analysis.TextEdit{{ + Pos: se.Sel.Pos(), + End: x.End() + 1, // +1 for dot. + NewText: []byte(""), + }}, + }) + } + + return nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go new file mode 100644 index 0000000000..5cadc93ada --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go @@ -0,0 +1,130 @@ +package checkers + +import ( + "fmt" + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// SuiteTHelper requires t.Helper() call in suite helpers: +// +// func (s *RoomSuite) assertRoomRound(roundID RoundID) { +// s.T().Helper() +// s.Equal(roundID, s.getRoom().CurrentRound.ID) +// } +type SuiteTHelper struct{} + +// NewSuiteTHelper constructs SuiteTHelper checker. +func NewSuiteTHelper() SuiteTHelper { return SuiteTHelper{} } +func (SuiteTHelper) Name() string { return "suite-thelper" } + +func (checker SuiteTHelper) Check(pass *analysis.Pass, inspector *inspector.Inspector) (diagnostics []analysis.Diagnostic) { + inspector.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + fd := node.(*ast.FuncDecl) + if !isTestifySuiteMethod(pass, fd) { + return + } + + if ident := fd.Name; ident == nil || isTestMethod(ident.Name) || isServiceMethod(ident.Name) { + return + } + + if !containsSuiteAssertions(pass, fd) { + return + } + + rcv := fd.Recv.List[0] + if len(rcv.Names) != 1 || rcv.Names[0] == nil { + return + } + rcvName := rcv.Names[0].Name + + helperCallStr := fmt.Sprintf("%s.T().Helper()", rcvName) + + firstStmt := fd.Body.List[0] + if analysisutil.NodeString(pass.Fset, firstStmt) == helperCallStr { + return + } + + msg := fmt.Sprintf("suite helper method must start with " + helperCallStr) + d := newDiagnostic(checker.Name(), fd, msg, &analysis.SuggestedFix{ + Message: fmt.Sprintf("Insert `%s`", helperCallStr), + TextEdits: []analysis.TextEdit{ + { + Pos: firstStmt.Pos(), + End: firstStmt.Pos(), // Pure insertion. + NewText: []byte(helperCallStr + "\n\n"), + }, + }, + }) + diagnostics = append(diagnostics, *d) + }) + return diagnostics +} + +func isTestifySuiteMethod(pass *analysis.Pass, fDecl *ast.FuncDecl) bool { + if fDecl.Recv == nil || len(fDecl.Recv.List) != 1 { + return false + } + + rcv := fDecl.Recv.List[0] + return implementsTestifySuiteIface(pass, rcv.Type) +} + +func isTestMethod(name string) bool { + return strings.HasPrefix(name, "Test") +} + +func isServiceMethod(name string) bool { + // https://github.com/stretchr/testify/blob/master/suite/interfaces.go + switch name { + case "T", "SetT", "SetS", "SetupSuite", "SetupTest", "TearDownSuite", "TearDownTest", + "BeforeTest", "AfterTest", "HandleStats", "SetupSubTest", "TearDownSubTest": + return true + } + return false +} + +func containsSuiteAssertions(pass *analysis.Pass, fn *ast.FuncDecl) bool { + if fn.Body == nil { + return false + } + + for _, s := range fn.Body.List { + if isSuiteAssertion(pass, s) { + return true + } + } + return false +} + +func isSuiteAssertion(pass *analysis.Pass, stmt ast.Stmt) bool { + expr, ok := stmt.(*ast.ExprStmt) + if !ok { + return false + } + + ce, ok := expr.X.(*ast.CallExpr) + if !ok { + return false + } + + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok || se.Sel == nil { + return false + } + + if sel, ok := pass.TypesInfo.Selections[se]; ok { + pkg := sel.Obj().Pkg() + isAssert := analysisutil.IsPkg(pkg, testify.AssertPkgName, testify.AssertPkgPath) + isRequire := analysisutil.IsPkg(pkg, testify.RequirePkgName, testify.RequirePkgPath) + return isAssert || isRequire + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go new file mode 100644 index 0000000000..669f9d187d --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go @@ -0,0 +1,71 @@ +package checkers + +import ( + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// UselessAssert detects useless asserts like +// +// 1) Asserting of the same variable +// +// assert.Equal(t, tt.value, tt.value) +// assert.ElementsMatch(t, users, users) +// ... +// +// 2) Open for contribution... +type UselessAssert struct{} + +// NewUselessAssert constructs UselessAssert checker. +func NewUselessAssert() UselessAssert { return UselessAssert{} } +func (UselessAssert) Name() string { return "useless-assert" } + +func (checker UselessAssert) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + switch call.Fn.NameFTrimmed { + case + "Contains", + "ElementsMatch", + "Equal", + "EqualExportedValues", + "EqualValues", + "ErrorAs", + "ErrorIs", + "Exactly", + "Greater", + "GreaterOrEqual", + "Implements", + "InDelta", + "InDeltaMapValues", + "InDeltaSlice", + "InEpsilon", + "InEpsilonSlice", + "IsType", + "JSONEq", + "Less", + "LessOrEqual", + "NotEqual", + "NotEqualValues", + "NotErrorIs", + "NotRegexp", + "NotSame", + "NotSubset", + "Regexp", + "Same", + "Subset", + "WithinDuration", + "YAMLEq": + default: + return nil + } + + if len(call.Args) < 2 { + return nil + } + first, second := call.Args[0], call.Args[1] + + if analysisutil.NodeString(pass.Fset, first) == analysisutil.NodeString(pass.Fset, second) { + return newDiagnostic(checker.Name(), call, "asserting of the same variable", nil) + } + return nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/config/config.go b/vendor/github.com/Antonboom/testifylint/internal/config/config.go new file mode 100644 index 0000000000..7eba0ea328 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/config/config.go @@ -0,0 +1,111 @@ +package config + +import ( + "errors" + "flag" + "fmt" + + "github.com/Antonboom/testifylint/internal/checkers" +) + +// NewDefault builds default testifylint config. +func NewDefault() Config { + return Config{ + EnableAll: false, + DisabledCheckers: nil, + DisableAll: false, + EnabledCheckers: nil, + ExpectedActual: ExpectedActualConfig{ + ExpVarPattern: RegexpValue{checkers.DefaultExpectedVarPattern}, + }, + RequireError: RequireErrorConfig{ + FnPattern: RegexpValue{nil}, + }, + SuiteExtraAssertCall: SuiteExtraAssertCallConfig{ + Mode: checkers.DefaultSuiteExtraAssertCallMode, + }, + } +} + +// Config implements testifylint configuration. +type Config struct { + EnableAll bool + DisabledCheckers KnownCheckersValue + DisableAll bool + EnabledCheckers KnownCheckersValue + + BoolCompare BoolCompareConfig + ExpectedActual ExpectedActualConfig + RequireError RequireErrorConfig + SuiteExtraAssertCall SuiteExtraAssertCallConfig +} + +// BoolCompareConfig implements configuration of checkers.BoolCompare. +type BoolCompareConfig struct { + IgnoreCustomTypes bool +} + +// ExpectedActualConfig implements configuration of checkers.ExpectedActual. +type ExpectedActualConfig struct { + ExpVarPattern RegexpValue +} + +// RequireErrorConfig implements configuration of checkers.RequireError. +type RequireErrorConfig struct { + FnPattern RegexpValue +} + +// SuiteExtraAssertCallConfig implements configuration of checkers.SuiteExtraAssertCall. +type SuiteExtraAssertCallConfig struct { + Mode checkers.SuiteExtraAssertCallMode +} + +func (cfg Config) Validate() error { + if cfg.EnableAll { + if cfg.DisableAll { + return errors.New("enable-all and disable-all options must not be combined") + } + + if len(cfg.EnabledCheckers) != 0 { + return errors.New("enable-all and enable options must not be combined") + } + } + + if cfg.DisableAll { + if len(cfg.DisabledCheckers) != 0 { + return errors.New("disable-all and disable options must not be combined") + } + + if len(cfg.EnabledCheckers) == 0 { + return errors.New("all checkers were disabled, but no one checker was enabled: at least one must be enabled") + } + } + + for _, checker := range cfg.DisabledCheckers { + if cfg.EnabledCheckers.Contains(checker) { + return fmt.Errorf("checker %q disabled and enabled at one moment", checker) + } + } + + return nil +} + +// BindToFlags binds Config fields to according flags. +func BindToFlags(cfg *Config, fs *flag.FlagSet) { + fs.BoolVar(&cfg.EnableAll, "enable-all", false, "enable all checkers") + fs.Var(&cfg.DisabledCheckers, "disable", "comma separated list of disabled checkers (to exclude from enabled by default)") + fs.BoolVar(&cfg.DisableAll, "disable-all", false, "disable all checkers") + fs.Var(&cfg.EnabledCheckers, "enable", "comma separated list of enabled checkers (in addition to enabled by default)") + + fs.BoolVar(&cfg.BoolCompare.IgnoreCustomTypes, "bool-compare.ignore-custom-types", false, + "ignore user defined types (over builtin bool)") + fs.Var(&cfg.ExpectedActual.ExpVarPattern, "expected-actual.pattern", "regexp for expected variable name") + fs.Var(&cfg.RequireError.FnPattern, "require-error.fn-pattern", "regexp for error assertions that should only be analyzed") + fs.Var(NewEnumValue(suiteExtraAssertCallModeAsString, &cfg.SuiteExtraAssertCall.Mode), + "suite-extra-assert-call.mode", "to require or remove extra Assert() call") +} + +var suiteExtraAssertCallModeAsString = map[string]checkers.SuiteExtraAssertCallMode{ + "remove": checkers.SuiteExtraAssertCallModeRemove, + "require": checkers.SuiteExtraAssertCallModeRequire, +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/config/flag_value_types.go b/vendor/github.com/Antonboom/testifylint/internal/config/flag_value_types.go new file mode 100644 index 0000000000..5b08ec47b1 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/config/flag_value_types.go @@ -0,0 +1,114 @@ +package config + +import ( + "flag" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/Antonboom/testifylint/internal/checkers" +) + +var ( + _ flag.Value = (*KnownCheckersValue)(nil) + _ flag.Value = (*RegexpValue)(nil) + _ flag.Value = (*EnumValue[checkers.SuiteExtraAssertCallMode])(nil) +) + +// KnownCheckersValue implements comma separated list of testify checkers. +type KnownCheckersValue []string + +func (kcv KnownCheckersValue) String() string { + return strings.Join(kcv, ",") +} + +func (kcv *KnownCheckersValue) Set(v string) error { + chckrs := strings.Split(v, ",") + for _, checkerName := range chckrs { + if ok := checkers.IsKnown(checkerName); !ok { + return fmt.Errorf("unknown checker %q", checkerName) + } + } + + *kcv = chckrs + return nil +} + +func (kcv KnownCheckersValue) Contains(v string) bool { + for _, checker := range kcv { + if checker == v { + return true + } + } + return false +} + +// RegexpValue is a special wrapper for support of flag.FlagSet over regexp.Regexp. +// Original regexp is available through RegexpValue.Regexp. +type RegexpValue struct { + *regexp.Regexp +} + +func (rv RegexpValue) String() string { + if rv.Regexp == nil { + return "" + } + return rv.Regexp.String() +} + +func (rv *RegexpValue) Set(v string) error { + compiled, err := regexp.Compile(v) + if err != nil { + return err + } + + rv.Regexp = compiled + return nil +} + +// EnumValue is a special type for support of flag.FlagSet over user-defined constants. +type EnumValue[EnumT comparable] struct { + mapping map[string]EnumT + keys []string + dst *EnumT +} + +// NewEnumValue takes the "enum-value-name to enum-value" mapping and a destination for the value passed through the CLI. +// Returns an EnumValue instance suitable for flag.FlagSet.Var. +func NewEnumValue[EnumT comparable](mapping map[string]EnumT, dst *EnumT) *EnumValue[EnumT] { + keys := make([]string, 0, len(mapping)) + for k := range mapping { + keys = append(keys, k) + } + sort.Strings(keys) + + return &EnumValue[EnumT]{ + mapping: mapping, + keys: keys, + dst: dst, + } +} + +func (e EnumValue[EnumT]) String() string { + if e.dst == nil { + return "" + } + + for k, v := range e.mapping { + if v == *e.dst { + return k + } + } + return "" +} + +func (e *EnumValue[EnumT]) Set(s string) error { + v, ok := e.mapping[s] + if !ok { + return fmt.Errorf("use one of (%v)", strings.Join(e.keys, " | ")) + } + + *e.dst = v + return nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/testify/const.go b/vendor/github.com/Antonboom/testifylint/internal/testify/const.go new file mode 100644 index 0000000000..3476e40402 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/testify/const.go @@ -0,0 +1,17 @@ +package testify + +const ( + ModulePath = "github.com/stretchr/testify" + + AssertPkgName = "assert" + HTTPPkgName = "http" + MockPkgName = "mock" + RequirePkgName = "require" + SuitePkgName = "suite" + + AssertPkgPath = ModulePath + "/" + AssertPkgName + HTTPPkgPath = ModulePath + "/" + HTTPPkgName + MockPkgPath = ModulePath + "/" + MockPkgName + RequirePkgPath = ModulePath + "/" + RequirePkgName + SuitePkgPath = ModulePath + "/" + SuitePkgName +) diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 0ca1dc4fee..4d38f3bfce 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -91,7 +91,7 @@ const ( // UnmarshalText method. See the Unmarshaler example for a demonstration with // email addresses. // -// ### Key mapping +// # Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go struct. // The special `toml` struct tag can be used to map TOML keys to struct fields @@ -248,7 +248,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index c6af3f239d..b9e309717e 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -5,17 +5,25 @@ import ( "io" ) +// TextMarshaler is an alias for encoding.TextMarshaler. +// // Deprecated: use encoding.TextMarshaler type TextMarshaler encoding.TextMarshaler +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// // Deprecated: use MetaData.PrimitiveDecode. func PrimitiveDecode(primValue Primitive, v interface{}) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// // Deprecated: use NewDecoder(reader).Decode(&value). func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 930e1d521a..9cd25d7571 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -136,7 +136,8 @@ func NewEncoder(w io.Writer) *Encoder { // document. func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { return err } return enc.w.Flush() @@ -457,6 +458,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // @@ -471,17 +482,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - if is32Bit { - copyStart := make([]int, len(start)) - copy(copyStart, start) - fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) } } } @@ -490,24 +491,27 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { writeFields := func(fields [][]int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + fieldVal := rv.FieldByIndex(fieldIndex) - if isNil(fieldVal) { /// Don't write anything for nil fields. + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { continue } - opts := getOptions(fieldType.Tag) - if opts.skip { + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && enc.isEmpty(fieldVal) { - continue - } if opts.omitzero && isZero(fieldVal) { continue } @@ -649,7 +653,7 @@ func isZero(rv reflect.Value) bool { return false } -func (enc *Encoder) isEmpty(rv reflect.Value) bool { +func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 @@ -664,13 +668,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool { // type b struct{ s []string } // s := a{field: b{s: []string{"AAA"}}} for i := 0; i < rv.NumField(); i++ { - if !enc.isEmpty(rv.Field(i)) { + if !isEmpty(rv.Field(i)) { return false } } return true case reflect.Bool: return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() } return false } @@ -693,8 +699,11 @@ func (enc *Encoder) newline() { // v v v v vv // key = {k = 1, k2 = 2} func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. if len(key) == 0 { - encPanic(errNoKey) + enc.eElement(val) + return } enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index f4f390e647..efd68865bb 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -84,7 +84,7 @@ func (pe ParseError) Error() string { pe.Position.Line, pe.LastKey, msg) } -// ErrorWithUsage() returns the error with detailed location context. +// ErrorWithPosition returns the error with detailed location context. // // See the documentation on [ParseError]. func (pe ParseError) ErrorWithPosition() string { @@ -124,7 +124,7 @@ func (pe ParseError) ErrorWithPosition() string { return b.String() } -// ErrorWithUsage() returns the error with detailed location context and usage +// ErrorWithUsage returns the error with detailed location context and usage // guidance. // // See the documentation on [ParseError]. diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index d4d70871d8..3545a6ad66 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -46,12 +46,13 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -87,13 +88,14 @@ func (lx *lexer) nextItem() item { } } -func lex(input string) *lexer { +func lex(input string, tomlNext bool) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, } return lx } @@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r) { + if isBareKeyChar(r, lx.tomlNext) { return lexBareName } lx.backup() @@ -618,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValue) @@ -640,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValueEnd) @@ -648,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } return lx.errorf("trailing comma not allowed in inline tables") } return lexInlineTableValue @@ -770,8 +781,8 @@ func lexRawString(lx *lexer) stateFn { } } -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning ''' has already been consumed and +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() @@ -828,6 +839,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn { func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough case 'b': fallthrough case 't': @@ -846,6 +862,11 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '\\': return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape case 'u': return lexShortUnicodeEscape case 'U': @@ -854,6 +875,19 @@ func lexStringEscape(lx *lexer) stateFn { return lx.error(errLexEscape{r}) } +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected two hexadecimal digits after '\x', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { @@ -1225,7 +1259,23 @@ func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHexadecimal(r rune) bool { return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') } -func isBareKeyChar(r rune) bool { + +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 71847a0415..2e78b24e95 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -106,7 +106,7 @@ func (k Key) maybeQuoted(i int) string { return `""` } for _, c := range k[i] { - if !isBareKeyChar(c) { + if !isBareKeyChar(c, false) { return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index d2542d6f92..9c19153698 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,7 @@ package toml import ( "fmt" + "os" "strconv" "strings" "time" @@ -15,6 +16,7 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. + tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. @@ -29,6 +31,8 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -41,9 +45,12 @@ func parse(data string) (p *parser, err error) { }() // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] } // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 @@ -65,9 +72,10 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - lx: lex(data), + lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), + tomlNext: tomlNext, } for { item := p.next() @@ -194,12 +202,12 @@ func (p *parser) topLevel(item item) { for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set value. vItem := p.next() val, typ := p.value(vItem, false) p.set(p.currentKey, val, typ, vItem.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -236,7 +244,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { case itemString: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -331,11 +339,17 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { var dtTypes = []struct { fmt string zone *time.Location + next bool }{ - {time.RFC3339Nano, time.Local}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, - {"2006-01-02", internal.LocalDate}, - {"15:04:05.999999999", internal.LocalTime}, + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, } func (p *parser) valueDatetime(it item) (interface{}, tomlType) { @@ -346,6 +360,9 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { err error ) for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { ok = true @@ -384,6 +401,7 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? + _ = types } return array, tomlArray } @@ -426,11 +444,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set the value. val, typ := p.value(p.next(), false) p.set(p.currentKey, val, typ, it.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) hash[p.currentKey] = val /// Restore context. @@ -551,7 +569,6 @@ func (p *parser) addContext(key Key, array bool) { func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { p.setValue(key, val) p.setType(key, typ, pos) - } // setValue sets the given key to the given value in the current context. @@ -632,14 +649,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) { // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } -func (p *parser) addImplicitContext(key Key) { - p.addImplicit(key) - p.addContext(key, false) -} +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } // current returns the full key name of the current context. func (p *parser) current() string { @@ -662,49 +676,54 @@ func stripFirstNewline(s string) string { return s } -// Remove newlines inside triple-quoted strings if a line ends with "\". +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. func (p *parser) stripEscapedNewlines(s string) string { - split := strings.Split(s, "\n") - if len(split) < 1 { - return s - } - - escNL := false // Keep track of the last non-blank line was escaped. - for i, line := range split { - line = strings.TrimRight(line, " \t\r") - - if len(line) == 0 || line[len(line)-1] != '\\' { - split[i] = strings.TrimRight(split[i], "\r") - if !escNL && i != len(split)-1 { - split[i] += "\n" - } - continue + var b strings.Builder + var i int + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() } + i += ix - escBS := true - for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { - escBS = !escBS + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue } - if escNL { - line = strings.TrimLeft(line, " \t\r") + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } } - escNL = !escBS - - if escBS { - split[i] += "\n" + if j == i+1 { + // Not a whitespace escape. + i++ continue } - - if i == len(split)-1 { - p.panicf("invalid escape: '\\ '") - } - - split[i] = line[:len(line)-1] // Remove \ - if len(split)-1 > i { - split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. + // (It's a bad escape sequence, but we can let + // replaceEscapes catch it.) + i++ + continue } + b.WriteString(s[:i]) + s = s[j:] + i = 0 } - return strings.Join(split, "") } func (p *parser) replaceEscapes(it item, str string) string { @@ -743,12 +762,23 @@ func (p *parser) replaceEscapes(it item, str string) string { case 'r': replaced = append(replaced, rune(0x000D)) r += 1 + case 'e': + if p.tomlNext { + replaced = append(replaced, rune(0x001B)) + r += 1 + } case '"': replaced = append(replaced, rune(0x0022)) r += 1 case '\\': replaced = append(replaced, rune(0x005C)) r += 1 + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) + replaced = append(replaced, escaped) + r += 3 + } case 'u': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/LICENSE b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/LICENSE new file mode 100644 index 0000000000..6698196c5a --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Gaijin Entertainment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go new file mode 100644 index 0000000000..b490f1c640 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go @@ -0,0 +1,291 @@ +package analyzer + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment" + "github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern" + "github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure" +) + +type analyzer struct { + include pattern.List `exhaustruct:"optional"` + exclude pattern.List `exhaustruct:"optional"` + + structFields structure.FieldsCache `exhaustruct:"optional"` + comments comment.Cache `exhaustruct:"optional"` + + typeProcessingNeed map[string]bool + typeProcessingNeedMu sync.RWMutex `exhaustruct:"optional"` +} + +func NewAnalyzer(include, exclude []string) (*analysis.Analyzer, error) { + a := analyzer{ + typeProcessingNeed: make(map[string]bool), + comments: comment.Cache{}, + } + + var err error + + a.include, err = pattern.NewList(include...) + if err != nil { + return nil, err //nolint:wrapcheck + } + + a.exclude, err = pattern.NewList(exclude...) + if err != nil { + return nil, err //nolint:wrapcheck + } + + return &analysis.Analyzer{ //nolint:exhaustruct + Name: "exhaustruct", + Doc: "Checks if all structure fields are initialized", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: a.newFlagSet(), + }, nil +} + +func (a *analyzer) newFlagSet() flag.FlagSet { + fs := flag.NewFlagSet("", flag.PanicOnError) + + fs.Var(&a.include, "i", `Regular expression to match type names, can receive multiple flags. +Anonymous structs can be matched by '' alias. +4ex: + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\. + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\.TypeInfo`) + fs.Var(&a.exclude, "e", `Regular expression to exclude type names, can receive multiple flags. +Anonymous structs can be matched by '' alias. +4ex: + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\. + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\.TypeInfo`) + + return *fs +} + +func (a *analyzer) run(pass *analysis.Pass) (any, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) //nolint:forcetypeassert + + insp.WithStack([]ast.Node{(*ast.CompositeLit)(nil)}, a.newVisitor(pass)) + + return nil, nil //nolint:nilnil +} + +// newVisitor returns visitor that only expects [ast.CompositeLit] nodes. +func (a *analyzer) newVisitor(pass *analysis.Pass) func(n ast.Node, push bool, stack []ast.Node) bool { + return func(n ast.Node, push bool, stack []ast.Node) bool { + if !push { + return true + } + + lit, ok := n.(*ast.CompositeLit) + if !ok { + // this should never happen, but better be prepared + return true + } + + structTyp, typeInfo, ok := getStructType(pass, lit) + if !ok { + return true + } + + if len(lit.Elts) == 0 { + if ret, ok := stackParentIsReturn(stack); ok { + if returnContainsNonNilError(pass, ret) { + // it is okay to return uninitialized structure in case struct's direct parent is + // a return statement containing non-nil error + // + // we're unable to check if returned error is custom, but at least we're able to + // cover str [error] type. + return true + } + } + } + + file := a.comments.Get(pass.Fset, stack[0].(*ast.File)) //nolint:forcetypeassert + rc := getCompositeLitRelatedComments(stack, file) + pos, msg := a.processStruct(pass, lit, structTyp, typeInfo, rc) + + if pos != nil { + pass.Reportf(*pos, msg) + } + + return true + } +} + +// getCompositeLitRelatedComments returns all comments that are related to checked node. We +// have to traverse the stack manually as ast do not associate comments with +// [ast.CompositeLit]. +func getCompositeLitRelatedComments(stack []ast.Node, cm ast.CommentMap) []*ast.CommentGroup { + comments := make([]*ast.CommentGroup, 0) + + for i := len(stack) - 1; i >= 0; i-- { + node := stack[i] + + switch node.(type) { + case *ast.CompositeLit, // stack[len(stack)-1] + *ast.ReturnStmt, // return ... + *ast.IndexExpr, // map[enum]...{...}[key] + *ast.CallExpr, // myfunc(map...) + *ast.UnaryExpr, // &map... + *ast.AssignStmt, // variable assignment (without var keyword) + *ast.DeclStmt, // var declaration, parent of *ast.GenDecl + *ast.GenDecl, // var declaration, parent of *ast.ValueSpec + *ast.ValueSpec: // var declaration + comments = append(comments, cm[node]...) + + default: + return comments + } + } + + return comments +} + +func getStructType(pass *analysis.Pass, lit *ast.CompositeLit) (*types.Struct, *TypeInfo, bool) { + switch typ := pass.TypesInfo.TypeOf(lit).(type) { + case *types.Named: // named type + if structTyp, ok := typ.Underlying().(*types.Struct); ok { + pkg := typ.Obj().Pkg() + ti := TypeInfo{ + Name: typ.Obj().Name(), + PackageName: pkg.Name(), + PackagePath: pkg.Path(), + } + + return structTyp, &ti, true + } + + return nil, nil, false + + case *types.Struct: // anonymous struct + ti := TypeInfo{ + Name: "", + PackageName: pass.Pkg.Name(), + PackagePath: pass.Pkg.Path(), + } + + return typ, &ti, true + + default: + return nil, nil, false + } +} + +func stackParentIsReturn(stack []ast.Node) (*ast.ReturnStmt, bool) { + // it is safe to skip boundary check, since stack always has at least one element + // - whole file. + ret, ok := stack[len(stack)-2].(*ast.ReturnStmt) + + return ret, ok +} + +func returnContainsNonNilError(pass *analysis.Pass, ret *ast.ReturnStmt) bool { + // errors are mostly located at the end of return statement, so we're starting + // from the end. + for i := len(ret.Results) - 1; i >= 0; i-- { + if pass.TypesInfo.TypeOf(ret.Results[i]).String() == "error" { + return true + } + } + + return false +} + +func (a *analyzer) processStruct( + pass *analysis.Pass, + lit *ast.CompositeLit, + structTyp *types.Struct, + info *TypeInfo, + comments []*ast.CommentGroup, +) (*token.Pos, string) { + shouldProcess := a.shouldProcessType(info) + + if shouldProcess && comment.HasDirective(comments, comment.DirectiveIgnore) { + return nil, "" + } + + if !shouldProcess && !comment.HasDirective(comments, comment.DirectiveEnforce) { + return nil, "" + } + + // unnamed structures are only defined in same package, along with types that has + // prefix identical to current package name. + isSamePackage := info.PackagePath == pass.Pkg.Path() + + if f := a.litSkippedFields(lit, structTyp, !isSamePackage); len(f) > 0 { + pos := lit.Pos() + + if len(f) == 1 { + return &pos, fmt.Sprintf("%s is missing field %s", info.ShortString(), f.String()) + } + + return &pos, fmt.Sprintf("%s is missing fields %s", info.ShortString(), f.String()) + } + + return nil, "" +} + +// shouldProcessType returns true if type should be processed basing off include +// and exclude patterns, defined though constructor and\or flags. +func (a *analyzer) shouldProcessType(info *TypeInfo) bool { + if len(a.include) == 0 && len(a.exclude) == 0 { + return true + } + + name := info.String() + + a.typeProcessingNeedMu.RLock() + res, ok := a.typeProcessingNeed[name] + a.typeProcessingNeedMu.RUnlock() + + if !ok { + a.typeProcessingNeedMu.Lock() + res = true + + if a.include != nil && !a.include.MatchFullString(name) { + res = false + } + + if res && a.exclude != nil && a.exclude.MatchFullString(name) { + res = false + } + + a.typeProcessingNeed[name] = res + a.typeProcessingNeedMu.Unlock() + } + + return res +} + +func (a *analyzer) litSkippedFields( + lit *ast.CompositeLit, + typ *types.Struct, + onlyExported bool, +) structure.Fields { + return a.structFields.Get(typ).Skipped(lit, onlyExported) +} + +type TypeInfo struct { + Name string + PackageName string + PackagePath string +} + +func (t TypeInfo) String() string { + return t.PackagePath + "." + t.Name +} + +func (t TypeInfo) ShortString() string { + return t.PackageName + "." + t.Name +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/cache.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/cache.go new file mode 100644 index 0000000000..88edef638a --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/cache.go @@ -0,0 +1,35 @@ +package comment + +import ( + "go/ast" + "go/token" + "sync" +) + +type Cache struct { + comments map[*ast.File]ast.CommentMap + mu sync.RWMutex +} + +// Get returns a comment map for a given file. In case if a comment map is not +// found, it creates a new one. +func (c *Cache) Get(fset *token.FileSet, f *ast.File) ast.CommentMap { + c.mu.RLock() + if cm, ok := c.comments[f]; ok { + c.mu.RUnlock() + return cm + } + c.mu.RUnlock() + + c.mu.Lock() + defer c.mu.Unlock() + + if c.comments == nil { + c.comments = make(map[*ast.File]ast.CommentMap) + } + + cm := ast.NewCommentMap(fset, f, f.Comments) + c.comments[f] = cm + + return cm +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/directive.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/directive.go new file mode 100644 index 0000000000..a39a8076fa --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/directive.go @@ -0,0 +1,28 @@ +package comment + +import ( + "go/ast" + "strings" +) + +type Directive string + +const ( + prefix = `//exhaustruct:` + DirectiveIgnore Directive = prefix + `ignore` + DirectiveEnforce Directive = prefix + `enforce` +) + +// HasDirective parses a directive from a given list of comments. +// If no directive is found, the second return value is `false`. +func HasDirective(comments []*ast.CommentGroup, expected Directive) bool { + for _, cg := range comments { + for _, commentLine := range cg.List { + if strings.HasPrefix(commentLine.Text, string(expected)) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern/list.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern/list.go new file mode 100644 index 0000000000..a16e5058d2 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern/list.go @@ -0,0 +1,82 @@ +package pattern + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + ErrEmptyPattern = fmt.Errorf("pattern can't be empty") + ErrCompilationFailed = fmt.Errorf("pattern compilation failed") +) + +// List is a list of regular expressions. +type List []*regexp.Regexp + +// NewList parses slice of strings to a slice of compiled regular expressions. +func NewList(strs ...string) (List, error) { + if len(strs) == 0 { + return nil, nil + } + + l := make(List, 0, len(strs)) + + for _, str := range strs { + re, err := strToRe(str) + if err != nil { + return nil, err + } + + l = append(l, re) + } + + return l, nil +} + +// MatchFullString matches provided string against all regexps in a slice and returns +// true if any of them matches whole string. +func (l List) MatchFullString(str string) bool { + for i := 0; i < len(l); i++ { + if m := l[i].FindStringSubmatch(str); len(m) > 0 && m[0] == str { + return true + } + } + + return false +} + +func (l *List) Set(value string) error { + re, err := strToRe(value) + if err != nil { + return err + } + + *l = append(*l, re) + + return nil +} + +func (l *List) String() string { + res := make([]string, 0, len(*l)) + + for _, re := range *l { + res = append(res, `"`+re.String()+`"`) + } + + return strings.Join(res, ", ") +} + +// strToRe parses string to a compiled regular expression that matches full string. +func strToRe(str string) (*regexp.Regexp, error) { + if str == "" { + return nil, ErrEmptyPattern + } + + re, err := regexp.Compile(str) + if err != nil { + return nil, fmt.Errorf("%w: %s: %w", ErrCompilationFailed, str, err) + } + + return re, nil +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields-cache.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields-cache.go new file mode 100644 index 0000000000..12a3796926 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields-cache.go @@ -0,0 +1,35 @@ +package structure + +import ( + "go/types" + "sync" +) + +type FieldsCache struct { + fields map[*types.Struct]Fields + mu sync.RWMutex +} + +// Get returns a struct fields for a given type. In case if a struct fields is +// not found, it creates a new one from type definition. +func (c *FieldsCache) Get(typ *types.Struct) Fields { + c.mu.RLock() + fields, ok := c.fields[typ] + c.mu.RUnlock() + + if ok { + return fields + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.fields == nil { + c.fields = make(map[*types.Struct]Fields) + } + + fields = NewFields(typ) + c.fields[typ] = fields + + return fields +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields.go new file mode 100644 index 0000000000..b6b1a48c87 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields.go @@ -0,0 +1,127 @@ +package structure + +import ( + "go/ast" + "go/types" + "reflect" + "strings" +) + +const ( + tagName = "exhaustruct" + optionalTagValue = "optional" +) + +type Field struct { + Name string + Exported bool + Optional bool +} + +type Fields []*Field + +// NewFields creates a new [Fields] from a given struct type. +// Fields items are listed in order they appear in the struct. +func NewFields(strct *types.Struct) Fields { + sf := make(Fields, 0, strct.NumFields()) + + for i := 0; i < strct.NumFields(); i++ { + f := strct.Field(i) + + sf = append(sf, &Field{ + Name: f.Name(), + Exported: f.Exported(), + Optional: HasOptionalTag(strct.Tag(i)), + }) + } + + return sf +} + +func HasOptionalTag(tags string) bool { + return reflect.StructTag(tags).Get(tagName) == optionalTagValue +} + +// String returns a comma-separated list of field names. +func (sf Fields) String() string { + b := strings.Builder{} + + for i := 0; i < len(sf); i++ { + if b.Len() != 0 { + b.WriteString(", ") + } + + b.WriteString(sf[i].Name) + } + + return b.String() +} + +// Skipped returns a list of fields that are not present in the given +// literal, but expected to. +// +//revive:disable-next-line:cyclomatic +func (sf Fields) Skipped(lit *ast.CompositeLit, onlyExported bool) Fields { + if len(lit.Elts) != 0 && !isNamedLiteral(lit) { + if len(lit.Elts) == len(sf) { + return nil + } + + return sf[len(lit.Elts):] + } + + em := sf.existenceMap() + res := make(Fields, 0, len(sf)) + + for i := 0; i < len(lit.Elts); i++ { + kv, ok := lit.Elts[i].(*ast.KeyValueExpr) + if !ok { + continue + } + + k, ok := kv.Key.(*ast.Ident) + if !ok { + continue + } + + em[k.Name] = true + } + + for i := 0; i < len(sf); i++ { + if em[sf[i].Name] || (!sf[i].Exported && onlyExported) || sf[i].Optional { + continue + } + + res = append(res, sf[i]) + } + + if len(res) == 0 { + return nil + } + + return res +} + +func (sf Fields) existenceMap() map[string]bool { + m := make(map[string]bool, len(sf)) + + for i := 0; i < len(sf); i++ { + m[sf[i].Name] = false + } + + return m +} + +// isNamedLiteral returns true if the given literal is unnamed. +// +// The logic is basing on the principle that literal is named or unnamed, +// therefore is literal's first element is a [ast.KeyValueExpr], it is named. +// +// Method will panic if the given literal is empty. +func isNamedLiteral(lit *ast.CompositeLit) bool { + if _, ok := lit.Elts[0].(*ast.KeyValueExpr); !ok { + return false + } + + return true +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml deleted file mode 100644 index 4025e01ec4..0000000000 --- a/vendor/github.com/Masterminds/goutils/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -script: - - go test -v - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md deleted file mode 100644 index d700ec47f2..0000000000 --- a/vendor/github.com/Masterminds/goutils/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# 1.0.1 (2017-05-31) - -## Fixed -- #21: Fix generation of alphanumeric strings (thanks @dbarranco) - -# 1.0.0 (2014-04-30) - -- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/Masterminds/goutils/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md deleted file mode 100644 index 163ffe72a8..0000000000 --- a/vendor/github.com/Masterminds/goutils/README.md +++ /dev/null @@ -1,70 +0,0 @@ -GoUtils -=========== -[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) - - -GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -* WordUtils -* RandomStringUtils -* StringUtils (partial implementation) - -## Installation -If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: - - go get github.com/Masterminds/goutils - -If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. - - -## Documentation -GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) - - -## Usage -The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - } -Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - - } - -## License -GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. - -## Issue Reporting -Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues - -## Website -* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml deleted file mode 100644 index 657564a847..0000000000 --- a/vendor/github.com/Masterminds/goutils/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\goutils -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -build: off - -install: - - go version - - go env - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go deleted file mode 100644 index 8dbd924858..0000000000 --- a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "unicode" -) - -/* -CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNonAlphaNumeric(count int) (string, error) { - return CryptoRandomAlphaNumericCustom(count, false, false) -} - -/* -CryptoRandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAscii(count int) (string, error) { - return CryptoRandom(count, 32, 127, false, false) -} - -/* -CryptoRandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, false, true) -} - -/* -CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphabetic(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, false) -} - -/* -CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, true) -} - -/* -CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return CryptoRandom(count, 0, 0, letters, numbers) -} - -/* -CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(getCryptoRandomInt(gap) + int64(start)) - } else { - ch = chars[getCryptoRandomInt(gap)+int64(start)] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + getCryptoRandomInt(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + getCryptoRandomInt(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} - -func getCryptoRandomInt(count int) int64 { - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) - if err != nil { - panic(err) - } - return nBig.Int64() -} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go deleted file mode 100644 index 272670231a..0000000000 --- a/vendor/github.com/Masterminds/goutils/randomstringutils.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "fmt" - "math" - "math/rand" - "time" - "unicode" -) - -// RANDOM provides the time-based seed used to generate random numbers -var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) - -/* -RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNonAlphaNumeric(count int) (string, error) { - return RandomAlphaNumericCustom(count, false, false) -} - -/* -RandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAscii(count int) (string, error) { - return Random(count, 32, 127, false, false) -} - -/* -RandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNumeric(count int) (string, error) { - return Random(count, 0, 0, false, true) -} - -/* -RandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alphabetic characters. - -Parameters: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphabetic(count int) (string, error) { - return Random(count, 0, 0, true, false) -} - -/* -RandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumeric(count int) (string, error) { - return Random(count, 0, 0, true, true) -} - -/* -RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return Random(count, 0, 0, letters, numbers) -} - -/* -Random creates a random string based on a variety of options, using default source of randomness. -This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -} - -/* -RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. -This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode decimals) to start at - end - the position in set of chars (ASCII/Unicode decimals) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - random - a source of randomness. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { - - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(random.Intn(gap) + start) - } else { - ch = chars[random.Intn(gap)+start] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + random.Intn(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + random.Intn(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go deleted file mode 100644 index 741bb530e8..0000000000 --- a/vendor/github.com/Masterminds/goutils/stringutils.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "bytes" - "fmt" - "strings" - "unicode" -) - -// Typically returned by functions where a searched item cannot be found -const INDEX_NOT_FOUND = -1 - -/* -Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." - -Specifically, the algorithm is as follows: - - - If str is less than maxWidth characters long, return it. - - Else abbreviate it to (str[0:maxWidth - 3] + "..."). - - If maxWidth is less than 4, return an illegal argument error. - - In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func Abbreviate(str string, maxWidth int) (string, error) { - return AbbreviateFull(str, 0, maxWidth) -} - -/* -AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." -This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not -necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -somewhere in the result. -In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - offset - left edge of source string - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { - if str == "" { - return "", nil - } - if maxWidth < 4 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") - return "", err - } - if len(str) <= maxWidth { - return str, nil - } - if offset > len(str) { - offset = len(str) - } - if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 - offset = len(str) - (maxWidth - 3) - } - abrevMarker := "..." - if offset <= 4 { - return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; - } - if maxWidth < 7 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") - return "", err - } - if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 - abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) - return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); - } - return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -} - -/* -DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -It returns the string without whitespaces. - -Parameter: - str - the string to delete whitespace from, may be nil - -Returns: - the string without whitespaces -*/ -func DeleteWhiteSpace(str string) string { - if str == "" { - return str - } - sz := len(str) - var chs bytes.Buffer - count := 0 - for i := 0; i < sz; i++ { - ch := rune(str[i]) - if !unicode.IsSpace(ch) { - chs.WriteRune(ch) - count++ - } - } - if count == sz { - return str - } - return chs.String() -} - -/* -IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. - -Parameters: - str1 - the first string - str2 - the second string - -Returns: - the index where str1 and str2 begin to differ; -1 if they are equal -*/ -func IndexOfDifference(str1 string, str2 string) int { - if str1 == str2 { - return INDEX_NOT_FOUND - } - if IsEmpty(str1) || IsEmpty(str2) { - return 0 - } - var i int - for i = 0; i < len(str1) && i < len(str2); i++ { - if rune(str1[i]) != rune(str2[i]) { - break - } - } - if i < len(str2) || i < len(str1) { - return i - } - return INDEX_NOT_FOUND -} - -/* -IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: - - goutils.IsBlank("") = true - goutils.IsBlank(" ") = true - goutils.IsBlank("bob") = false - goutils.IsBlank(" bob ") = false - -Parameter: - str - the string to check - -Returns: - true - if the string is whitespace or empty ("") -*/ -func IsBlank(str string) bool { - strLen := len(str) - if str == "" || strLen == 0 { - return true - } - for i := 0; i < strLen; i++ { - if unicode.IsSpace(rune(str[i])) == false { - return false - } - } - return true -} - -/* -IndexOf returns the index of the first instance of sub in str, with the search beginning from the -index start point specified. -1 is returned if sub is not present in str. - -An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -A start position greater than the string length returns -1. - -Parameters: - str - the string to check - sub - the substring to find - start - the start position; negative treated as zero - -Returns: - the first index where the sub string was found (always >= start) -*/ -func IndexOf(str string, sub string, start int) int { - - if start < 0 { - start = 0 - } - - if len(str) < start { - return INDEX_NOT_FOUND - } - - if IsEmpty(str) || IsEmpty(sub) { - return INDEX_NOT_FOUND - } - - partialIndex := strings.Index(str[start:len(str)], sub) - if partialIndex == -1 { - return INDEX_NOT_FOUND - } - return partialIndex + start -} - -// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. -func IsEmpty(str string) bool { - return len(str) == 0 -} - -// Returns either the passed in string, or if the string is empty, the value of defaultStr. -func DefaultString(str string, defaultStr string) string { - if IsEmpty(str) { - return defaultStr - } - return str -} - -// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. -func DefaultIfBlank(str string, defaultStr string) string { - if IsBlank(str) { - return defaultStr - } - return str -} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go deleted file mode 100644 index 034cad8e21..0000000000 --- a/vendor/github.com/Masterminds/goutils/wordutils.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package goutils provides utility functions to manipulate strings in various ways. -The code snippets below show examples of how to use goutils. Some functions return -errors while others do not, so usage would vary as a result. - -Example: - - package main - - import ( - "fmt" - "github.com/aokoli/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - } -*/ -package goutils - -import ( - "bytes" - "strings" - "unicode" -) - -// VERSION indicates the current version of goutils -const VERSION = "1.0.0" - -/* -Wrap wraps a single line of text, identifying words by ' '. -New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - -Returns: - a line with newlines inserted -*/ -func Wrap(str string, wrapLength int) string { - return WrapCustom(str, wrapLength, "", false) -} - -/* -WrapCustom wraps a single line of text, identifying words by ' '. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - newLineStr - the string to insert for a new line, "" uses '\n' - wrapLongWords - true if long words (such as URLs) should be wrapped - -Returns: - a line with newlines inserted -*/ -func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { - - if str == "" { - return "" - } - if newLineStr == "" { - newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons - } - if wrapLength < 1 { - wrapLength = 1 - } - - inputLineLength := len(str) - offset := 0 - - var wrappedLine bytes.Buffer - - for inputLineLength-offset > wrapLength { - - if rune(str[offset]) == ' ' { - offset++ - continue - } - - end := wrapLength + offset + 1 - spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset - - if spaceToWrapAt >= offset { - // normal word (not longer than wrapLength) - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - - } else { - // long word or URL - if wrapLongWords { - end := wrapLength + offset - // long words are wrapped one line at a time - wrappedLine.WriteString(str[offset:end]) - wrappedLine.WriteString(newLineStr) - offset += wrapLength - } else { - // long words aren't wrapped, just extended beyond limit - end := wrapLength + offset - index := strings.IndexRune(str[end:len(str)], ' ') - if index == -1 { - wrappedLine.WriteString(str[offset:len(str)]) - offset = inputLineLength - } else { - spaceToWrapAt = index + end - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - } - } - } - } - - wrappedLine.WriteString(str[offset:len(str)]) - - return wrappedLine.String() - -} - -/* -Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -The delimiters represent a set of characters understood to separate words. The first string character -and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func Capitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - capitalizeNext := true - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - capitalizeNext = true - } else if capitalizeNext { - buffer[i] = unicode.ToTitle(ch) - capitalizeNext = false - } - } - return string(buffer) - -} - -/* -CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func CapitalizeFully(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - str = strings.ToLower(str) - return Capitalize(str, delimiters...) -} - -/* -Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to uncapitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - uncapitalized string -*/ -func Uncapitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - uncapitalizeNext = true - } else if uncapitalizeNext { - buffer[i] = unicode.ToLower(ch) - uncapitalizeNext = false - } - } - return string(buffer) -} - -/* -SwapCase swaps the case of a string using a word based algorithm. - -Conversion algorithm: - - Upper case character converts to Lower case - Title case character converts to Lower case - Lower case character after Whitespace or at start converts to Title case - Other Lower case character converts to Upper case - Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to swap case - -Returns: - the changed string -*/ -func SwapCase(str string) string { - if str == "" { - return str - } - buffer := []rune(str) - - whitespace := true - - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if unicode.IsUpper(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsTitle(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsLower(ch) { - if whitespace { - buffer[i] = unicode.ToTitle(ch) - whitespace = false - } else { - buffer[i] = unicode.ToUpper(ch) - } - } else { - whitespace = unicode.IsSpace(ch) - } - } - return string(buffer) -} - -/* -Initials extracts the initial letters from each word in the string. The first letter of the string and all first -letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. - -Parameters: - str - the string to get initials from - delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -Returns: - string of initial letters -*/ -func Initials(str string, delimiters ...rune) string { - if str == "" { - return str - } - if delimiters != nil && len(delimiters) == 0 { - return "" - } - strLen := len(str) - var buf bytes.Buffer - lastWasGap := true - for i := 0; i < strLen; i++ { - ch := rune(str[i]) - - if isDelimiter(ch, delimiters...) { - lastWasGap = true - } else if lastWasGap { - buf.WriteRune(ch) - lastWasGap = false - } - } - return buf.String() -} - -// private function (lower case func name) -func isDelimiter(ch rune, delimiters ...rune) bool { - if delimiters == nil { - return unicode.IsSpace(ch) - } - for _, delimiter := range delimiters { - if ch == delimiter { - return true - } - } - return false -} diff --git a/vendor/github.com/Masterminds/sprig/.gitignore b/vendor/github.com/Masterminds/sprig/.gitignore deleted file mode 100644 index 5e3002f88f..0000000000 --- a/vendor/github.com/Masterminds/sprig/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vendor/ -/.glide diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml deleted file mode 100644 index b9da8b825b..0000000000 --- a/vendor/github.com/Masterminds/sprig/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/CHANGELOG.md deleted file mode 100644 index 6a79fbde46..0000000000 --- a/vendor/github.com/Masterminds/sprig/CHANGELOG.md +++ /dev/null @@ -1,282 +0,0 @@ -# Changelog - -## Release 2.22.0 (2019-10-02) - -### Added - -- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) -- #195: Added deepCopy function for use with dicts - -### Changed - -- Updated merge and mergeOverwrite documentation to explain copying and how to - use deepCopy with it - -## Release 2.21.0 (2019-09-18) - -### Added - -- #122: Added encryptAES/decryptAES functions (thanks @n0madic) -- #128: Added toDecimal support (thanks @Dean-Coakley) -- #169: Added list contcat (thanks @astorath) -- #174: Added deepEqual function (thanks @bonifaido) -- #170: Added url parse and join functions (thanks @astorath) - -### Changed - -- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify - -### Fixed - -- #172: Fix semver wildcard example (thanks @piepmatz) -- #175: Fix dateInZone doc example (thanks @s3than) - -## Release 2.20.0 (2019-06-18) - -### Added - -- #164: Adding function to get unix epoch for a time (@mattfarina) -- #166: Adding tests for date_in_zone (@mattfarina) - -### Changed - -- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) -- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) -- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) - -### Fixed - -## Release 2.19.0 (2019-03-02) - -IMPORTANT: This release reverts a change from 2.18.0 - -In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. - -We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. - -### Changed - -- Fix substr panic 35fb796 (Alexey igrychev) -- Remove extra period 1eb7729 (Matthew Lorimor) -- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) -- README edits/fixes/suggestions 08fe136 (Lauri Apple) - - -## Release 2.18.0 (2019-02-12) - -### Added - -- Added mergeOverwrite function -- cryptographic functions that use secure random (see fe1de12) - -### Changed - -- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) -- Handle has for nil list 9c10885 (Daniel Cohen) -- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) -- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) -- Replace outdated goutils imports 01893d2 (Matthew Lorimor) -- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) -- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) - -### Fixed - -- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) -- Fix substr var names and comments d581f80 (Dean Coakley) -- Fix substr documentation 2737203 (Dean Coakley) - -## Release 2.17.1 (2019-01-03) - -### Fixed - -The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. - -## Release 2.17.0 (2019-01-03) - -### Added - -- adds alder32sum function and test 6908fc2 (marshallford) -- Added kebabcase function ca331a1 (Ilyes512) - -### Changed - -- Update goutils to 1.1.0 4e1125d (Matt Butcher) - -### Fixed - -- Fix 'has' documentation e3f2a85 (dean-coakley) -- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) -- fixes spelling errors... not sure how that happened 4cf188a (marshallford) - -## Release 2.16.0 (2018-08-13) - -### Added - -- add splitn function fccb0b0 (Helgi Þorbjörnsson) -- Add slice func df28ca7 (gongdo) -- Generate serial number a3bdffd (Cody Coons) -- Extract values of dict with values function df39312 (Lawrence Jones) - -### Changed - -- Modify panic message for list.slice ae38335 (gongdo) -- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) -- Remove duplicated documentation 1d97af1 (Matthew Fisher) -- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) - -### Fixed - -- Fix file permissions c5f40b5 (gongdo) -- Fix example for buildCustomCert 7779e0d (Tin Lam) - -## Release 2.15.0 (2018-04-02) - -### Added - -- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -- #66: Add ternary function (thanks @binoculars) -- #67: Allow keys function to take multiple dicts (thanks @binoculars) -- #89: Added sha1sum to crypto function (thanks @benkeil) -- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -- #92: Add travis testing for go 1.10 -- #93: Adding appveyor config for windows testing - -### Changed - -- #90: Updating to more recent dependencies -- #73: replace satori/go.uuid with google/uuid (thanks @petterw) - -### Fixed - -- #76: Fixed documentation typos (thanks @Thiht) -- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older - -## Release 2.14.1 (2017-12-01) - -### Fixed - -- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -- #61: Removing line with {{ due to blocking github pages genertion -- #64: Update the list functions to handle int, string, and other slices for compatibility - -## Release 2.14.0 (2017-10-06) - -This new version of Sprig adds a set of functions for generating and working with SSL certificates. - -- `genCA` generates an SSL Certificate Authority -- `genSelfSignedCert` generates an SSL self-signed certificate -- `genSignedCert` generates an SSL certificate and key based on a given CA - -## Release 2.13.0 (2017-09-18) - -This release adds new functions, including: - -- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -- `floor`, `ceil`, and `round` math functions -- `toDate` converts a string to a date -- `nindent` is just like `indent` but also prepends a new line -- `ago` returns the time from `time.Now` - -### Added - -- #40: Added basic regex functionality (thanks @alanquillin) -- #41: Added ceil floor and round functions (thanks @alanquillin) -- #48: Added toDate function (thanks @andreynering) -- #50: Added nindent function (thanks @binoculars) -- #46: Added ago function (thanks @slayer) - -### Changed - -- #51: Updated godocs to include new string functions (thanks @curtisallen) -- #49: Added ability to merge multiple dicts (thanks @binoculars) - -## Release 2.12.0 (2017-05-17) - -- `snakecase`, `camelcase`, and `shuffle` are three new string functions -- `fail` allows you to bail out of a template render when conditions are not met - -## Release 2.11.0 (2017-05-02) - -- Added `toJson` and `toPrettyJson` -- Added `merge` -- Refactored documentation - -## Release 2.10.0 (2017-03-15) - -- Added `semver` and `semverCompare` for Semantic Versions -- `list` replaces `tuple` -- Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` - -## Release 2.9.0 (2017-02-23) - -- Added `splitList` to split a list -- Added crypto functions of `genPrivateKey` and `derivePassword` - -## Release 2.8.0 (2016-12-21) - -- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) - -## Release 2.7.0 (2016-12-01) - -- Added `sha256sum` to generate a hash of an input -- Added functions to convert a numeric or string to `int`, `int64`, `float64` - -## Release 2.6.0 (2016-10-03) - -- Added a `uuidv4` template function for generating UUIDs inside of a template. - -## Release 2.5.0 (2016-08-19) - -- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 - -## Release 2.4.0 (2016-08-16) - -- Adds two functions: `until` and `untilStep` - -## Release 2.3.0 (2016-06-21) - -- cat: Concatenate strings with whitespace separators. -- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" -- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" -- indent: Indent blocks of text in a way that is sensitive to "\n" characters. - -## Release 2.2.0 (2016-04-21) - -- Added a `genPrivateKey` function (Thanks @bacongobbler) - -## Release 2.1.0 (2016-03-30) - -- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. -- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. - -## Release 2.0.0 (2016-03-29) - -Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. - -- `min` complements `max` (formerly `biggest`) -- `empty` indicates that a value is the empty value for its type -- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` -- Date formatters have been added for HTML dates (as used in `date` input fields) -- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). - -## Release 1.2.0 (2016-02-01) - -- Added quote and squote -- Added b32enc and b32dec -- add now takes varargs -- biggest now takes varargs - -## Release 1.1.0 (2015-12-29) - -- Added #4: Added contains function. strings.Contains, but with the arguments - switched to simplify common pipelines. (thanks krancour) -- Added Travis-CI testing support - -## Release 1.0.0 (2015-12-23) - -- Initial release diff --git a/vendor/github.com/Masterminds/sprig/LICENSE.txt b/vendor/github.com/Masterminds/sprig/LICENSE.txt deleted file mode 100644 index 5c95accc2e..0000000000 --- a/vendor/github.com/Masterminds/sprig/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Sprig -Copyright (C) 2013 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/Makefile b/vendor/github.com/Masterminds/sprig/Makefile deleted file mode 100644 index 63a93fdf79..0000000000 --- a/vendor/github.com/Masterminds/sprig/Makefile +++ /dev/null @@ -1,13 +0,0 @@ - -HAS_GLIDE := $(shell command -v glide;) - -.PHONY: test -test: - go test -v . - -.PHONY: setup -setup: -ifndef HAS_GLIDE - go get -u github.com/Masterminds/glide -endif - glide install diff --git a/vendor/github.com/Masterminds/sprig/README.md b/vendor/github.com/Masterminds/sprig/README.md deleted file mode 100644 index b70569585f..0000000000 --- a/vendor/github.com/Masterminds/sprig/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Sprig: Template functions for Go templates -[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) - -The Go language comes with a [built-in template -language](http://golang.org/pkg/text/template/), but not -very many template functions. Sprig is a library that provides more than 100 commonly -used template functions. - -It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and in various -JavaScript libraries, such as [underscore.js](http://underscorejs.org/). - -## Usage - -**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for -detailed instructions and code snippets for the >100 template functions available. - -**Go developers**: If you'd like to include Sprig as a library in your program, -our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). - -For standard usage, read on. - -### Load the Sprig library - -To load the Sprig `FuncMap`: - -```go - -import ( - "github.com/Masterminds/sprig" - "html/template" -) - -// This example illustrates that the FuncMap *must* be set before the -// templates themselves are loaded. -tpl := template.Must( - template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") -) - - -``` - -### Calling the functions inside of templates - -By convention, all functions are lowercase. This seems to follow the Go -idiom for template functions (as opposed to template methods, which are -TitleCase). For example, this: - -``` -{{ "hello!" | upper | repeat 5 }} -``` - -produces this: - -``` -HELLO!HELLO!HELLO!HELLO!HELLO! -``` - -## Principles Driving Our Function Selection - -We followed these principles to decide which functions to add and how to implement them: - -- Use template functions to build layout. The following - types of operations are within the domain of template functions: - - Formatting - - Layout - - Simple type conversions - - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -- Template functions should not return errors unless there is no way to print - a sensible value. For example, converting a string to an integer should not - produce an error if conversion fails. Instead, it should display a default - value. -- Simple math is necessary for grid layouts, pagers, and so on. Complex math - (anything other than arithmetic) should be done outside of templates. -- Template functions only deal with the data passed into them. They never retrieve - data from a source. -- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/appveyor.yml b/vendor/github.com/Masterminds/sprig/appveyor.yml deleted file mode 100644 index d545a987a3..0000000000 --- a/vendor/github.com/Masterminds/sprig/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ - -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\sprig -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go get -u github.com/Masterminds/glide - - set PATH=%GOPATH%\bin;%PATH% - - go version - - go env - -build_script: - - glide install - - go install ./... - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/crypto.go deleted file mode 100644 index 7a418ba88d..0000000000 --- a/vendor/github.com/Masterminds/sprig/crypto.go +++ /dev/null @@ -1,502 +0,0 @@ -package sprig - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/hmac" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "hash/adler32" - "math/big" - "net" - "time" - - "github.com/google/uuid" - "golang.org/x/crypto/scrypt" -) - -func sha256sum(input string) string { - hash := sha256.Sum256([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func sha1sum(input string) string { - hash := sha1.Sum([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func adler32sum(input string) string { - hash := adler32.Checksum([]byte(input)) - return fmt.Sprintf("%d", hash) -} - -// uuidv4 provides a safe and secure UUID v4 implementation -func uuidv4() string { - return fmt.Sprintf("%s", uuid.New()) -} - -var master_password_seed = "com.lyndir.masterpassword" - -var password_type_templates = map[string][][]byte{ - "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, - "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), - []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), - []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), - []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), - []byte("CvccCvcvCvccno")}, - "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, - "short": {[]byte("Cvcn")}, - "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, - "pin": {[]byte("nnnn")}, -} - -var template_characters = map[byte]string{ - 'V': "AEIOU", - 'C': "BCDFGHJKLMNPQRSTVWXYZ", - 'v': "aeiou", - 'c': "bcdfghjklmnpqrstvwxyz", - 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", - 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", - 'n': "0123456789", - 'o': "@&%?,=[]_:-+*$#!'^~;()/.", - 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", -} - -func derivePassword(counter uint32, password_type, password, user, site string) string { - var templates = password_type_templates[password_type] - if templates == nil { - return fmt.Sprintf("cannot find password template %s", password_type) - } - - var buffer bytes.Buffer - buffer.WriteString(master_password_seed) - binary.Write(&buffer, binary.BigEndian, uint32(len(user))) - buffer.WriteString(user) - - salt := buffer.Bytes() - key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) - if err != nil { - return fmt.Sprintf("failed to derive password: %s", err) - } - - buffer.Truncate(len(master_password_seed)) - binary.Write(&buffer, binary.BigEndian, uint32(len(site))) - buffer.WriteString(site) - binary.Write(&buffer, binary.BigEndian, counter) - - var hmacv = hmac.New(sha256.New, key) - hmacv.Write(buffer.Bytes()) - var seed = hmacv.Sum(nil) - var temp = templates[int(seed[0])%len(templates)] - - buffer.Truncate(0) - for i, element := range temp { - pass_chars := template_characters[element] - pass_char := pass_chars[int(seed[i+1])%len(pass_chars)] - buffer.WriteByte(pass_char) - } - - return buffer.String() -} - -func generatePrivateKey(typ string) string { - var priv interface{} - var err error - switch typ { - case "", "rsa": - // good enough for government work - priv, err = rsa.GenerateKey(rand.Reader, 4096) - case "dsa": - key := new(dsa.PrivateKey) - // again, good enough for government work - if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { - return fmt.Sprintf("failed to generate dsa params: %s", err) - } - err = dsa.GenerateKey(key, rand.Reader) - priv = key - case "ecdsa": - // again, good enough for government work - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - default: - return "Unknown type " + typ - } - if err != nil { - return fmt.Sprintf("failed to generate private key: %s", err) - } - - return string(pem.EncodeToMemory(pemBlockForKey(priv))) -} - -type DSAKeyFormat struct { - Version int - P, Q, G, Y, X *big.Int -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *dsa.PrivateKey: - val := DSAKeyFormat{ - P: k.P, Q: k.Q, G: k.G, - Y: k.Y, X: k.X, - } - bytes, _ := asn1.Marshal(val) - return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} - case *ecdsa.PrivateKey: - b, _ := x509.MarshalECPrivateKey(k) - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - return nil - } -} - -type certificate struct { - Cert string - Key string -} - -func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { - crt := certificate{} - - cert, err := base64.StdEncoding.DecodeString(b64cert) - if err != nil { - return crt, errors.New("unable to decode base64 certificate") - } - - key, err := base64.StdEncoding.DecodeString(b64key) - if err != nil { - return crt, errors.New("unable to decode base64 private key") - } - - decodedCert, _ := pem.Decode(cert) - if decodedCert == nil { - return crt, errors.New("unable to decode certificate") - } - _, err = x509.ParseCertificate(decodedCert.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing certificate: decodedCert.Bytes: %s", - err, - ) - } - - decodedKey, _ := pem.Decode(key) - if decodedKey == nil { - return crt, errors.New("unable to decode key") - } - _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing prive key: decodedKey.Bytes: %s", - err, - ) - } - - crt.Cert = string(cert) - crt.Key = string(key) - - return crt, nil -} - -func generateCertificateAuthority( - cn string, - daysValid int, -) (certificate, error) { - ca := certificate{} - - template, err := getBaseCertTemplate(cn, nil, nil, daysValid) - if err != nil { - return ca, err - } - // Override KeyUsage and IsCA - template.KeyUsage = x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | - x509.KeyUsageCertSign - template.IsCA = true - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return ca, fmt.Errorf("error generating rsa key: %s", err) - } - - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) - if err != nil { - return ca, err - } - - return ca, nil -} - -func generateSelfSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (certificate, error) { - cert := certificate{} - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) - if err != nil { - return cert, err - } - - return cert, nil -} - -func generateSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, -) (certificate, error) { - cert := certificate{} - - decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) - if decodedSignerCert == nil { - return cert, errors.New("unable to decode certificate") - } - signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing certificate: decodedSignerCert.Bytes: %s", - err, - ) - } - decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) - if decodedSignerKey == nil { - return cert, errors.New("unable to decode key") - } - signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing prive key: decodedSignerKey.Bytes: %s", - err, - ) - } - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - - cert.Cert, cert.Key, err = getCertAndKey( - template, - priv, - signerCert, - signerKey, - ) - if err != nil { - return cert, err - } - - return cert, nil -} - -func getCertAndKey( - template *x509.Certificate, - signeeKey *rsa.PrivateKey, - parent *x509.Certificate, - signingKey *rsa.PrivateKey, -) (string, string, error) { - derBytes, err := x509.CreateCertificate( - rand.Reader, - template, - parent, - &signeeKey.PublicKey, - signingKey, - ) - if err != nil { - return "", "", fmt.Errorf("error creating certificate: %s", err) - } - - certBuffer := bytes.Buffer{} - if err := pem.Encode( - &certBuffer, - &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) - } - - keyBuffer := bytes.Buffer{} - if err := pem.Encode( - &keyBuffer, - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), - }, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding key: %s", err) - } - - return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil -} - -func getBaseCertTemplate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (*x509.Certificate, error) { - ipAddresses, err := getNetIPs(ips) - if err != nil { - return nil, err - } - dnsNames, err := getAlternateDNSStrs(alternateDNS) - if err != nil { - return nil, err - } - serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) - if err != nil { - return nil, err - } - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: cn, - }, - IPAddresses: ipAddresses, - DNSNames: dnsNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - BasicConstraintsValid: true, - }, nil -} - -func getNetIPs(ips []interface{}) ([]net.IP, error) { - if ips == nil { - return []net.IP{}, nil - } - var ipStr string - var ok bool - var netIP net.IP - netIPs := make([]net.IP, len(ips)) - for i, ip := range ips { - ipStr, ok = ip.(string) - if !ok { - return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) - } - netIP = net.ParseIP(ipStr) - if netIP == nil { - return nil, fmt.Errorf("error parsing ip: %s", ipStr) - } - netIPs[i] = netIP - } - return netIPs, nil -} - -func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { - if alternateDNS == nil { - return []string{}, nil - } - var dnsStr string - var ok bool - alternateDNSStrs := make([]string, len(alternateDNS)) - for i, dns := range alternateDNS { - dnsStr, ok = dns.(string) - if !ok { - return nil, fmt.Errorf( - "error processing alternate dns name: %v is not a string", - dns, - ) - } - alternateDNSStrs[i] = dnsStr - } - return alternateDNSStrs, nil -} - -func encryptAES(password string, plaintext string) (string, error) { - if plaintext == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - content := []byte(plaintext) - blockSize := block.BlockSize() - padding := blockSize - len(content)%blockSize - padtext := bytes.Repeat([]byte{byte(padding)}, padding) - content = append(content, padtext...) - - ciphertext := make([]byte, aes.BlockSize+len(content)) - - iv := ciphertext[:aes.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return "", err - } - - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(ciphertext[aes.BlockSize:], content) - - return base64.StdEncoding.EncodeToString(ciphertext), nil -} - -func decryptAES(password string, crypt64 string) (string, error) { - if crypt64 == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - - crypt, err := base64.StdEncoding.DecodeString(crypt64) - if err != nil { - return "", err - } - - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - iv := crypt[:aes.BlockSize] - crypt = crypt[aes.BlockSize:] - decrypted := make([]byte, len(crypt)) - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(decrypted, crypt) - - return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil -} diff --git a/vendor/github.com/Masterminds/sprig/date.go b/vendor/github.com/Masterminds/sprig/date.go deleted file mode 100644 index d1d6155d72..0000000000 --- a/vendor/github.com/Masterminds/sprig/date.go +++ /dev/null @@ -1,83 +0,0 @@ -package sprig - -import ( - "strconv" - "time" -) - -// Given a format and a date, format the date string. -// -// Date can be a `time.Time` or an `int, int32, int64`. -// In the later case, it is treated as seconds since UNIX -// epoch. -func date(fmt string, date interface{}) string { - return dateInZone(fmt, date, "Local") -} - -func htmlDate(date interface{}) string { - return dateInZone("2006-01-02", date, "Local") -} - -func htmlDateInZone(date interface{}, zone string) string { - return dateInZone("2006-01-02", date, zone) -} - -func dateInZone(fmt string, date interface{}, zone string) string { - var t time.Time - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case *time.Time: - t = *date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - case int32: - t = time.Unix(int64(date), 0) - } - - loc, err := time.LoadLocation(zone) - if err != nil { - loc, _ = time.LoadLocation("UTC") - } - - return t.In(loc).Format(fmt) -} - -func dateModify(fmt string, date time.Time) time.Time { - d, err := time.ParseDuration(fmt) - if err != nil { - return date - } - return date.Add(d) -} - -func dateAgo(date interface{}) string { - var t time.Time - - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - } - // Drop resolution to seconds - duration := time.Since(t).Round(time.Second) - return duration.String() -} - -func toDate(fmt, str string) time.Time { - t, _ := time.ParseInLocation(fmt, str, time.Local) - return t -} - -func unixEpoch(date time.Time) string { - return strconv.FormatInt(date.Unix(), 10) -} diff --git a/vendor/github.com/Masterminds/sprig/defaults.go b/vendor/github.com/Masterminds/sprig/defaults.go deleted file mode 100644 index ed6a8ab291..0000000000 --- a/vendor/github.com/Masterminds/sprig/defaults.go +++ /dev/null @@ -1,83 +0,0 @@ -package sprig - -import ( - "encoding/json" - "reflect" -) - -// dfault checks whether `given` is set, and returns default if not set. -// -// This returns `d` if `given` appears not to be set, and `given` otherwise. -// -// For numeric types 0 is unset. -// For strings, maps, arrays, and slices, len() = 0 is considered unset. -// For bool, false is unset. -// Structs are never considered unset. -// -// For everything else, including pointers, a nil value is unset. -func dfault(d interface{}, given ...interface{}) interface{} { - - if empty(given) || empty(given[0]) { - return d - } - return given[0] -} - -// empty returns true if the given value has the zero value for its type. -func empty(given interface{}) bool { - g := reflect.ValueOf(given) - if !g.IsValid() { - return true - } - - // Basically adapted from text/template.isTrue - switch g.Kind() { - default: - return g.IsNil() - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return g.Len() == 0 - case reflect.Bool: - return g.Bool() == false - case reflect.Complex64, reflect.Complex128: - return g.Complex() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return g.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return g.Uint() == 0 - case reflect.Float32, reflect.Float64: - return g.Float() == 0 - case reflect.Struct: - return false - } -} - -// coalesce returns the first non-empty value. -func coalesce(v ...interface{}) interface{} { - for _, val := range v { - if !empty(val) { - return val - } - } - return nil -} - -// toJson encodes an item into a JSON string -func toJson(v interface{}) string { - output, _ := json.Marshal(v) - return string(output) -} - -// toPrettyJson encodes an item into a pretty (indented) JSON string -func toPrettyJson(v interface{}) string { - output, _ := json.MarshalIndent(v, "", " ") - return string(output) -} - -// ternary returns the first value if the last value is true, otherwise returns the second value. -func ternary(vt interface{}, vf interface{}, v bool) interface{} { - if v { - return vt - } - - return vf -} diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/dict.go deleted file mode 100644 index 738405b433..0000000000 --- a/vendor/github.com/Masterminds/sprig/dict.go +++ /dev/null @@ -1,119 +0,0 @@ -package sprig - -import ( - "github.com/imdario/mergo" - "github.com/mitchellh/copystructure" -) - -func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { - d[key] = value - return d -} - -func unset(d map[string]interface{}, key string) map[string]interface{} { - delete(d, key) - return d -} - -func hasKey(d map[string]interface{}, key string) bool { - _, ok := d[key] - return ok -} - -func pluck(key string, d ...map[string]interface{}) []interface{} { - res := []interface{}{} - for _, dict := range d { - if val, ok := dict[key]; ok { - res = append(res, val) - } - } - return res -} - -func keys(dicts ...map[string]interface{}) []string { - k := []string{} - for _, dict := range dicts { - for key := range dict { - k = append(k, key) - } - } - return k -} - -func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - for _, k := range keys { - if v, ok := dict[k]; ok { - res[k] = v - } - } - return res -} - -func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - - omit := make(map[string]bool, len(keys)) - for _, k := range keys { - omit[k] = true - } - - for k, v := range dict { - if _, ok := omit[k]; !ok { - res[k] = v - } - } - return res -} - -func dict(v ...interface{}) map[string]interface{} { - dict := map[string]interface{}{} - lenv := len(v) - for i := 0; i < lenv; i += 2 { - key := strval(v[i]) - if i+1 >= lenv { - dict[key] = "" - continue - } - dict[key] = v[i+1] - } - return dict -} - -func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func values(dict map[string]interface{}) []interface{} { - values := []interface{}{} - for _, value := range dict { - values = append(values, value) - } - - return values -} - -func deepCopy(i interface{}) interface{} { - c, err := copystructure.Copy(i) - if err != nil { - panic("deepCopy error: " + err.Error()) - } - - return c -} diff --git a/vendor/github.com/Masterminds/sprig/doc.go b/vendor/github.com/Masterminds/sprig/doc.go deleted file mode 100644 index 8f8f1d7370..0000000000 --- a/vendor/github.com/Masterminds/sprig/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Sprig: Template functions for Go. - -This package contains a number of utility functions for working with data -inside of Go `html/template` and `text/template` files. - -To add these functions, use the `template.Funcs()` method: - - t := templates.New("foo").Funcs(sprig.FuncMap()) - -Note that you should add the function map before you parse any template files. - - In several cases, Sprig reverses the order of arguments from the way they - appear in the standard library. This is to make it easier to pipe - arguments into functions. - -See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. -*/ -package sprig diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/functions.go deleted file mode 100644 index 7b5b0af86c..0000000000 --- a/vendor/github.com/Masterminds/sprig/functions.go +++ /dev/null @@ -1,306 +0,0 @@ -package sprig - -import ( - "errors" - "html/template" - "os" - "path" - "reflect" - "strconv" - "strings" - ttemplate "text/template" - "time" - - util "github.com/Masterminds/goutils" - "github.com/huandu/xstrings" -) - -// Produce the function map. -// -// Use this to pass the functions into the template engine: -// -// tpl := template.New("foo").Funcs(sprig.FuncMap())) -// -func FuncMap() template.FuncMap { - return HtmlFuncMap() -} - -// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. -func HermeticTxtFuncMap() ttemplate.FuncMap { - r := TxtFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. -func HermeticHtmlFuncMap() template.FuncMap { - r := HtmlFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// TxtFuncMap returns a 'text/template'.FuncMap -func TxtFuncMap() ttemplate.FuncMap { - return ttemplate.FuncMap(GenericFuncMap()) -} - -// HtmlFuncMap returns an 'html/template'.Funcmap -func HtmlFuncMap() template.FuncMap { - return template.FuncMap(GenericFuncMap()) -} - -// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. -func GenericFuncMap() map[string]interface{} { - gfm := make(map[string]interface{}, len(genericMap)) - for k, v := range genericMap { - gfm[k] = v - } - return gfm -} - -// These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environemnt or global state. -var nonhermeticFunctions = []string{ - // Date functions - "date", - "date_in_zone", - "date_modify", - "now", - "htmlDate", - "htmlDateInZone", - "dateInZone", - "dateModify", - - // Strings - "randAlphaNum", - "randAlpha", - "randAscii", - "randNumeric", - "uuidv4", - - // OS - "env", - "expandenv", - - // Network - "getHostByName", -} - -var genericMap = map[string]interface{}{ - "hello": func() string { return "Hello!" }, - - // Date functions - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "now": func() time.Time { return time.Now() }, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "dateInZone": dateInZone, - "dateModify": dateModify, - "ago": dateAgo, - "toDate": toDate, - "unixEpoch": unixEpoch, - - // Strings - "abbrev": abbrev, - "abbrevboth": abbrevboth, - "trunc": trunc, - "trim": strings.TrimSpace, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "title": strings.Title, - "untitle": untitle, - "substr": substring, - // Switch order so that "foo" | repeat 5 - "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, - // Deprecated: Use trimAll. - "trimall": func(a, b string) string { return strings.Trim(b, a) }, - // Switch order so that "$foo" | trimall "$" - "trimAll": func(a, b string) string { return strings.Trim(b, a) }, - "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, - "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, - "nospace": util.DeleteWhiteSpace, - "initials": initials, - "randAlphaNum": randAlphaNumeric, - "randAlpha": randAlpha, - "randAscii": randAscii, - "randNumeric": randNumeric, - "swapcase": util.SwapCase, - "shuffle": xstrings.Shuffle, - "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, - // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, - "adler32sum": adler32sum, - "toString": strval, - - // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, - "toDecimal": toDecimal, - - //"gt": func(a, b int) bool {return a > b}, - //"gte": func(a, b int) bool {return a >= b}, - //"lt": func(a, b int) bool {return a < b}, - //"lte": func(a, b int) bool {return a <= b}, - - // split "/" foo/bar returns map[int]string{0: foo, 1: bar} - "split": split, - "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, - // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} - "splitn": splitn, - "toStrings": strslice, - - "until": until, - "untilStep": untilStep, - - // VERY basic arithmetic. - "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, - "add": func(i ...interface{}) int64 { - var a int64 = 0 - for _, b := range i { - a += toInt64(b) - } - return a - }, - "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, - "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, - "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, - "mul": func(a interface{}, v ...interface{}) int64 { - val := toInt64(a) - for _, b := range v { - val = val * toInt64(b) - } - return val - }, - "biggest": max, - "max": max, - "min": min, - "ceil": ceil, - "floor": floor, - "round": round, - - // string slices. Note that we reverse the order b/c that's better - // for template processing. - "join": join, - "sortAlpha": sortAlpha, - - // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "compact": compact, - "deepCopy": deepCopy, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "ternary": ternary, - - // Reflection - "typeOf": typeOf, - "typeIs": typeIs, - "typeIsLike": typeIsLike, - "kindOf": kindOf, - "kindIs": kindIs, - "deepEqual": reflect.DeepEqual, - - // OS: - "env": func(s string) string { return os.Getenv(s) }, - "expandenv": func(s string) string { return os.ExpandEnv(s) }, - - // Network: - "getHostByName": getHostByName, - - // File Paths: - "base": path.Base, - "dir": path.Dir, - "clean": path.Clean, - "ext": path.Ext, - "isAbs": path.IsAbs, - - // Encoding: - "b64enc": base64encode, - "b64dec": base64decode, - "b32enc": base32encode, - "b32dec": base32decode, - - // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, - "mergeOverwrite": mergeOverwrite, - "values": values, - - "append": push, "push": push, - "prepend": prepend, - "first": first, - "rest": rest, - "last": last, - "initial": initial, - "reverse": reverse, - "uniq": uniq, - "without": without, - "has": has, - "slice": slice, - "concat": concat, - - // Crypto: - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genSelfSignedCert": generateSelfSignedCertificate, - "genSignedCert": generateSignedCertificate, - "encryptAES": encryptAES, - "decryptAES": decryptAES, - - // UUIDs: - "uuidv4": uuidv4, - - // SemVer: - "semver": semver, - "semverCompare": semverCompare, - - // Flow Control: - "fail": func(msg string) (string, error) { return "", errors.New(msg) }, - - // Regex - "regexMatch": regexMatch, - "regexFindAll": regexFindAll, - "regexFind": regexFind, - "regexReplaceAll": regexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "regexSplit": regexSplit, - - // URLs: - "urlParse": urlParse, - "urlJoin": urlJoin, -} diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml deleted file mode 100644 index f317d2b2b1..0000000000 --- a/vendor/github.com/Masterminds/sprig/glide.yaml +++ /dev/null @@ -1,19 +0,0 @@ -package: github.com/Masterminds/sprig -import: -- package: github.com/Masterminds/goutils - version: ^1.0.0 -- package: github.com/google/uuid - version: ^1.0.0 -- package: golang.org/x/crypto - subpackages: - - scrypt -- package: github.com/Masterminds/semver - version: ^v1.2.2 -- package: github.com/stretchr/testify - version: ^v1.2.2 -- package: github.com/imdario/mergo - version: ~0.3.7 -- package: github.com/huandu/xstrings - version: ^1.2 -- package: github.com/mitchellh/copystructure - version: ^1.0.0 diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/list.go deleted file mode 100644 index c0381bbb65..0000000000 --- a/vendor/github.com/Masterminds/sprig/list.go +++ /dev/null @@ -1,311 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" - "sort" -) - -// Reflection is used in these functions so that slices and arrays of strings, -// ints, and other types not implementing []interface{} can be worked with. -// For example, this is useful if you need to work on the output of regexs. - -func list(v ...interface{}) []interface{} { - return v -} - -func push(list interface{}, v interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append(nl, v) - - default: - panic(fmt.Sprintf("Cannot push on type %s", tp)) - } -} - -func prepend(list interface{}, v interface{}) []interface{} { - //return append([]interface{}{v}, list...) - - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append([]interface{}{v}, nl...) - - default: - panic(fmt.Sprintf("Cannot prepend on type %s", tp)) - } -} - -func last(list interface{}) interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - return l2.Index(l - 1).Interface() - default: - panic(fmt.Sprintf("Cannot find last on type %s", tp)) - } -} - -func first(list interface{}) interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - return l2.Index(0).Interface() - default: - panic(fmt.Sprintf("Cannot find first on type %s", tp)) - } -} - -func rest(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - nl := make([]interface{}, l-1) - for i := 1; i < l; i++ { - nl[i-1] = l2.Index(i).Interface() - } - - return nl - default: - panic(fmt.Sprintf("Cannot find rest on type %s", tp)) - } -} - -func initial(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - nl := make([]interface{}, l-1) - for i := 0; i < l-1; i++ { - nl[i] = l2.Index(i).Interface() - } - - return nl - default: - panic(fmt.Sprintf("Cannot find initial on type %s", tp)) - } -} - -func sortAlpha(list interface{}) []string { - k := reflect.Indirect(reflect.ValueOf(list)).Kind() - switch k { - case reflect.Slice, reflect.Array: - a := strslice(list) - s := sort.StringSlice(a) - s.Sort() - return s - } - return []string{strval(list)} -} - -func reverse(v interface{}) []interface{} { - tp := reflect.TypeOf(v).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(v) - - l := l2.Len() - // We do not sort in place because the incoming array should not be altered. - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[l-i-1] = l2.Index(i).Interface() - } - - return nl - default: - panic(fmt.Sprintf("Cannot find reverse on type %s", tp)) - } -} - -func compact(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !empty(item) { - nl = append(nl, item) - } - } - - return nl - default: - panic(fmt.Sprintf("Cannot compact on type %s", tp)) - } -} - -func uniq(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - dest := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(dest, item) { - dest = append(dest, item) - } - } - - return dest - default: - panic(fmt.Sprintf("Cannot find uniq on type %s", tp)) - } -} - -func inList(haystack []interface{}, needle interface{}) bool { - for _, h := range haystack { - if reflect.DeepEqual(needle, h) { - return true - } - } - return false -} - -func without(list interface{}, omit ...interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - res := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(omit, item) { - res = append(res, item) - } - } - - return res - default: - panic(fmt.Sprintf("Cannot find without on type %s", tp)) - } -} - -func has(needle interface{}, haystack interface{}) bool { - if haystack == nil { - return false - } - tp := reflect.TypeOf(haystack).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(haystack) - var item interface{} - l := l2.Len() - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if reflect.DeepEqual(needle, item) { - return true - } - } - - return false - default: - panic(fmt.Sprintf("Cannot find has on type %s", tp)) - } -} - -// $list := [1, 2, 3, 4, 5] -// slice $list -> list[0:5] = list[:] -// slice $list 0 3 -> list[0:3] = list[:3] -// slice $list 3 5 -> list[3:5] -// slice $list 3 -> list[3:5] = list[3:] -func slice(list interface{}, indices ...interface{}) interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - var start, end int - if len(indices) > 0 { - start = toInt(indices[0]) - } - if len(indices) < 2 { - end = l - } else { - end = toInt(indices[1]) - } - - return l2.Slice(start, end).Interface() - default: - panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) - } -} - -func concat(lists ...interface{}) interface{} { - var res []interface{} - for _, list := range lists { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - for i := 0; i < l2.Len(); i++ { - res = append(res, l2.Index(i).Interface()) - } - default: - panic(fmt.Sprintf("Cannot concat type %s as list", tp)) - } - } - return res -} diff --git a/vendor/github.com/Masterminds/sprig/network.go b/vendor/github.com/Masterminds/sprig/network.go deleted file mode 100644 index d786cc7363..0000000000 --- a/vendor/github.com/Masterminds/sprig/network.go +++ /dev/null @@ -1,12 +0,0 @@ -package sprig - -import ( - "math/rand" - "net" -) - -func getHostByName(name string) string { - addrs, _ := net.LookupHost(name) - //TODO: add error handing when release v3 cames out - return addrs[rand.Intn(len(addrs))] -} diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go deleted file mode 100644 index f4af4af2a7..0000000000 --- a/vendor/github.com/Masterminds/sprig/numeric.go +++ /dev/null @@ -1,169 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "reflect" - "strconv" -) - -// toFloat64 converts 64-bit floats -func toFloat64(v interface{}) float64 { - if str, ok := v.(string); ok { - iv, err := strconv.ParseFloat(str, 64) - if err != nil { - return 0 - } - return iv - } - - val := reflect.Indirect(reflect.ValueOf(v)) - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return float64(val.Int()) - case reflect.Uint8, reflect.Uint16, reflect.Uint32: - return float64(val.Uint()) - case reflect.Uint, reflect.Uint64: - return float64(val.Uint()) - case reflect.Float32, reflect.Float64: - return val.Float() - case reflect.Bool: - if val.Bool() == true { - return 1 - } - return 0 - default: - return 0 - } -} - -func toInt(v interface{}) int { - //It's not optimal. Bud I don't want duplicate toInt64 code. - return int(toInt64(v)) -} - -// toInt64 converts integer types to 64-bit integers -func toInt64(v interface{}) int64 { - if str, ok := v.(string); ok { - iv, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return 0 - } - return iv - } - - val := reflect.Indirect(reflect.ValueOf(v)) - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return val.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(val.Uint()) - case reflect.Uint, reflect.Uint64: - tv := val.Uint() - if tv <= math.MaxInt64 { - return int64(tv) - } - // TODO: What is the sensible thing to do here? - return math.MaxInt64 - case reflect.Float32, reflect.Float64: - return int64(val.Float()) - case reflect.Bool: - if val.Bool() == true { - return 1 - } - return 0 - default: - return 0 - } -} - -func max(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb > aa { - aa = bb - } - } - return aa -} - -func min(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb < aa { - aa = bb - } - } - return aa -} - -func until(count int) []int { - step := 1 - if count < 0 { - step = -1 - } - return untilStep(0, count, step) -} - -func untilStep(start, stop, step int) []int { - v := []int{} - - if stop < start { - if step >= 0 { - return v - } - for i := start; i > stop; i += step { - v = append(v, i) - } - return v - } - - if step <= 0 { - return v - } - for i := start; i < stop; i += step { - v = append(v, i) - } - return v -} - -func floor(a interface{}) float64 { - aa := toFloat64(a) - return math.Floor(aa) -} - -func ceil(a interface{}) float64 { - aa := toFloat64(a) - return math.Ceil(aa) -} - -func round(a interface{}, p int, r_opt ...float64) float64 { - roundOn := .5 - if len(r_opt) > 0 { - roundOn = r_opt[0] - } - val := toFloat64(a) - places := toFloat64(p) - - var round float64 - pow := math.Pow(10, places) - digit := pow * val - _, div := math.Modf(digit) - if div >= roundOn { - round = math.Ceil(digit) - } else { - round = math.Floor(digit) - } - return round / pow -} - -// converts unix octal to decimal -func toDecimal(v interface{}) int64 { - result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) - if err != nil { - return 0 - } - return result -} diff --git a/vendor/github.com/Masterminds/sprig/reflect.go b/vendor/github.com/Masterminds/sprig/reflect.go deleted file mode 100644 index 8a65c132f0..0000000000 --- a/vendor/github.com/Masterminds/sprig/reflect.go +++ /dev/null @@ -1,28 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" -) - -// typeIs returns true if the src is the type named in target. -func typeIs(target string, src interface{}) bool { - return target == typeOf(src) -} - -func typeIsLike(target string, src interface{}) bool { - t := typeOf(src) - return target == t || "*"+target == t -} - -func typeOf(src interface{}) string { - return fmt.Sprintf("%T", src) -} - -func kindIs(target string, src interface{}) bool { - return target == kindOf(src) -} - -func kindOf(src interface{}) string { - return reflect.ValueOf(src).Kind().String() -} diff --git a/vendor/github.com/Masterminds/sprig/regex.go b/vendor/github.com/Masterminds/sprig/regex.go deleted file mode 100644 index 2016f66336..0000000000 --- a/vendor/github.com/Masterminds/sprig/regex.go +++ /dev/null @@ -1,35 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} diff --git a/vendor/github.com/Masterminds/sprig/semver.go b/vendor/github.com/Masterminds/sprig/semver.go deleted file mode 100644 index c2bf8a1fdf..0000000000 --- a/vendor/github.com/Masterminds/sprig/semver.go +++ /dev/null @@ -1,23 +0,0 @@ -package sprig - -import ( - sv2 "github.com/Masterminds/semver" -) - -func semverCompare(constraint, version string) (bool, error) { - c, err := sv2.NewConstraint(constraint) - if err != nil { - return false, err - } - - v, err := sv2.NewVersion(version) - if err != nil { - return false, err - } - - return c.Check(v), nil -} - -func semver(version string) (*sv2.Version, error) { - return sv2.NewVersion(version) -} diff --git a/vendor/github.com/Masterminds/sprig/strings.go b/vendor/github.com/Masterminds/sprig/strings.go deleted file mode 100644 index 943fa3e8ad..0000000000 --- a/vendor/github.com/Masterminds/sprig/strings.go +++ /dev/null @@ -1,233 +0,0 @@ -package sprig - -import ( - "encoding/base32" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "strings" - - util "github.com/Masterminds/goutils" -) - -func base64encode(v string) string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -func base64decode(v string) string { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func base32encode(v string) string { - return base32.StdEncoding.EncodeToString([]byte(v)) -} - -func base32decode(v string) string { - data, err := base32.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func abbrev(width int, s string) string { - if width < 4 { - return s - } - r, _ := util.Abbreviate(s, width) - return r -} - -func abbrevboth(left, right int, s string) string { - if right < 4 || left > 0 && right < 7 { - return s - } - r, _ := util.AbbreviateFull(s, left, right) - return r -} -func initials(s string) string { - // Wrap this just to eliminate the var args, which templates don't do well. - return util.Initials(s) -} - -func randAlphaNumeric(count int) string { - // It is not possible, it appears, to actually generate an error here. - r, _ := util.CryptoRandomAlphaNumeric(count) - return r -} - -func randAlpha(count int) string { - r, _ := util.CryptoRandomAlphabetic(count) - return r -} - -func randAscii(count int) string { - r, _ := util.CryptoRandomAscii(count) - return r -} - -func randNumeric(count int) string { - r, _ := util.CryptoRandomNumeric(count) - return r -} - -func untitle(str string) string { - return util.Uncapitalize(str) -} - -func quote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("%q", strval(s))) - } - } - return strings.Join(out, " ") -} - -func squote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("'%v'", s)) - } - } - return strings.Join(out, " ") -} - -func cat(v ...interface{}) string { - v = removeNilElements(v) - r := strings.TrimSpace(strings.Repeat("%v ", len(v))) - return fmt.Sprintf(r, v...) -} - -func indent(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) -} - -func nindent(spaces int, v string) string { - return "\n" + indent(spaces, v) -} - -func replace(old, new, src string) string { - return strings.Replace(src, old, new, -1) -} - -func plural(one, many string, count int) string { - if count == 1 { - return one - } - return many -} - -func strslice(v interface{}) []string { - switch v := v.(type) { - case []string: - return v - case []interface{}: - b := make([]string, 0, len(v)) - for _, s := range v { - if s != nil { - b = append(b, strval(s)) - } - } - return b - default: - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Array, reflect.Slice: - l := val.Len() - b := make([]string, 0, l) - for i := 0; i < l; i++ { - value := val.Index(i).Interface() - if value != nil { - b = append(b, strval(value)) - } - } - return b - default: - if v == nil { - return []string{} - } else { - return []string{strval(v)} - } - } - } -} - -func removeNilElements(v []interface{}) []interface{} { - newSlice := make([]interface{}, 0, len(v)) - for _, i := range v { - if i != nil { - newSlice = append(newSlice, i) - } - } - return newSlice -} - -func strval(v interface{}) string { - switch v := v.(type) { - case string: - return v - case []byte: - return string(v) - case error: - return v.Error() - case fmt.Stringer: - return v.String() - default: - return fmt.Sprintf("%v", v) - } -} - -func trunc(c int, s string) string { - if len(s) <= c { - return s - } - return s[0:c] -} - -func join(sep string, v interface{}) string { - return strings.Join(strslice(v), sep) -} - -func split(sep, orig string) map[string]string { - parts := strings.Split(orig, sep) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -func splitn(sep string, n int, orig string) map[string]string { - parts := strings.SplitN(orig, sep, n) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -// substring creates a substring of the given string. -// -// If start is < 0, this calls string[:end]. -// -// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] -// -// Otherwise, this calls string[start, end]. -func substring(start, end int, s string) string { - if start < 0 { - return s[:end] - } - if end < 0 || end > len(s) { - return s[start:] - } - return s[start:end] -} diff --git a/vendor/github.com/Masterminds/sprig/url.go b/vendor/github.com/Masterminds/sprig/url.go deleted file mode 100644 index 5f22d801f9..0000000000 --- a/vendor/github.com/Masterminds/sprig/url.go +++ /dev/null @@ -1,66 +0,0 @@ -package sprig - -import ( - "fmt" - "net/url" - "reflect" -) - -func dictGetOrEmpty(dict map[string]interface{}, key string) string { - value, ok := dict[key]; if !ok { - return "" - } - tp := reflect.TypeOf(value).Kind() - if tp != reflect.String { - panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) - } - return reflect.ValueOf(value).String() -} - -// parses given URL to return dict object -func urlParse(v string) map[string]interface{} { - dict := map[string]interface{}{} - parsedUrl, err := url.Parse(v) - if err != nil { - panic(fmt.Sprintf("unable to parse url: %s", err)) - } - dict["scheme"] = parsedUrl.Scheme - dict["host"] = parsedUrl.Host - dict["hostname"] = parsedUrl.Hostname() - dict["path"] = parsedUrl.Path - dict["query"] = parsedUrl.RawQuery - dict["opaque"] = parsedUrl.Opaque - dict["fragment"] = parsedUrl.Fragment - if parsedUrl.User != nil { - dict["userinfo"] = parsedUrl.User.String() - } else { - dict["userinfo"] = "" - } - - return dict -} - -// join given dict to URL string -func urlJoin(d map[string]interface{}) string { - resUrl := url.URL{ - Scheme: dictGetOrEmpty(d, "scheme"), - Host: dictGetOrEmpty(d, "host"), - Path: dictGetOrEmpty(d, "path"), - RawQuery: dictGetOrEmpty(d, "query"), - Opaque: dictGetOrEmpty(d, "opaque"), - Fragment: dictGetOrEmpty(d, "fragment"), - - } - userinfo := dictGetOrEmpty(d, "userinfo") - var user *url.Userinfo = nil - if userinfo != "" { - tempUrl, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) - if err != nil { - panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) - } - user = tempUrl.User - } - - resUrl.User = user - return resUrl.String() -} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/.gitignore b/vendor/github.com/OpenPeeDeeP/depguard/.gitignore deleted file mode 100644 index 97cca67c67..0000000000 --- a/vendor/github.com/OpenPeeDeeP/depguard/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -.idea diff --git a/vendor/github.com/OpenPeeDeeP/depguard/README.md b/vendor/github.com/OpenPeeDeeP/depguard/README.md deleted file mode 100644 index 07e9f915df..0000000000 --- a/vendor/github.com/OpenPeeDeeP/depguard/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Depguard - -Go linter that checks package imports are in a list of acceptable packages. It -can also deny a list of packages and can do prefix or glob matching. -This allows you to allow imports from a whole organization or only -allow specific packages within a repository. It is recommended to use prefix -matching as it is faster than glob matching. The fewer glob matches the better. - -> If a pattern is matched by prefix it does not try to match via glob. - -## Install - -```bash -go get -u github.com/OpenPeeDeeP/depguard -``` - -## Config - -By default, Depguard looks for a file named `.depguard.json` in the current -current working directory. If it is somewhere else, pass in the `-c` flag with -the location of your configuration file. - -The following is an example configuration file. - -```json -{ - "type": "allowlist", - "packages": ["github.com/OpenPeeDeeP/depguard"], - "packageErrorMessages": { - "github.com/OpenPeeDeeP/depguards": "Please use \"github.com/OpenPeeDeeP/depguard\"," - }, - "inTests": ["github.com/stretchr/testify"], - "includeGoStdLib": true -} -``` - -- `type` can be either `allowlist` or `denylist`. This check is case insensitive. - If not specified the default is `denylist`. The values `whitelist` and `blacklist` - are also accepted for backwards compatibility. -- `packages` is a list of packages for the list type specified. -- `packageErrorMessages` is a mapping from packages to the error message to display -- `inTests` is a list of packages allowed/disallowed only in test files. -- Set `includeGoStdLib` (`includeGoRoot` for backwards compatibility) to true if you want to check the list against standard lib. - If not specified the default is false. - -### Ignore File Rules - -The configuration also allows us to specify rules to ignore certain files considered by the linter. This means that we need not apply package import checks across our entire code base. - -For example, consider the following configuration to block a test package: -```json -{ - "type": "denylist", - "packages": ["github.com/stretchr/testify"], - "inTests": ["github.com/stretchr/testify"] -} -``` - -We can use a `ignoreFileRules` field to write a configuration that only considers test files: -```json -{ - "type": "denylist", - "packages": ["github.com/stretchr/testify"], - "ignoreFileRules": ["!**/*_test.go"] -} -``` - -Or if we wanted to consider only non-test files: -```json -{ - "type": "denylist", - "packages": ["github.com/stretchr/testify"], - "ignoreFileRules": ["**/*_test.go"] -} -``` - -Like the `packages` field, the `ignoreFileRules` field can accept both string prefixes and string glob patterns. Note in the first example above, the use of the `!` character in front of the rule. This is a special character which signals that the linter should negate the rule. This allows for more precise control, but it is only available for glob patterns. - -## Gometalinter - -The binary installation of this linter can be used with -[Gometalinter](https://github.com/alecthomas/gometalinter). - -If you use a configuration file for Gometalinter then the following will need to -be added to your configuration file. - -```json -{ - "linters": { - "depguard": { - "command": "depguard -c path/to/config.json", - "pattern": "PATH:LINE:COL:MESSAGE", - "installFrom": "github.com/OpenPeeDeeP/depguard", - "isFast": true, - "partitionStrategy": "packages" - } - } -} -``` - -If you prefer the command line way the following will work for you as well. - -```bash -gometalinter --linter='depguard:depguard -c path/to/config.json:PATH:LINE:COL:MESSAGE' -``` - -## Golangci-lint - -This linter was built with -[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatible -and read their docs to see how to implement all their linters, including this one. diff --git a/vendor/github.com/OpenPeeDeeP/depguard/depguard.go b/vendor/github.com/OpenPeeDeeP/depguard/depguard.go deleted file mode 100644 index d7011cd9f6..0000000000 --- a/vendor/github.com/OpenPeeDeeP/depguard/depguard.go +++ /dev/null @@ -1,313 +0,0 @@ -package depguard - -import ( - "go/build" - "go/token" - "io/ioutil" - "path" - "sort" - "strings" - "sync" - - "github.com/gobwas/glob" - "golang.org/x/tools/go/loader" -) - -// ListType states what kind of list is passed in. -type ListType int - -const ( - // LTBlacklist states the list given is a blacklist. (default) - LTBlacklist ListType = iota - // LTWhitelist states the list given is a whitelist. - LTWhitelist -) - -// StringToListType makes it easier to turn a string into a ListType. -// It assumes that the string representation is lower case. -var StringToListType = map[string]ListType{ - "allowlist": LTWhitelist, - "denylist": LTBlacklist, - "whitelist": LTWhitelist, - "blacklist": LTBlacklist, -} - -// Issue with the package with PackageName at the Position. -type Issue struct { - PackageName string - Position token.Position -} - -// Wrapper for glob patterns that allows for custom negation -type negatableGlob struct { - g glob.Glob - negate bool -} - -// Depguard checks imports to make sure they follow the given list and constraints. -type Depguard struct { - ListType ListType - IncludeGoRoot bool - - Packages []string - prefixPackages []string - globPackages []glob.Glob - - TestPackages []string - prefixTestPackages []string - globTestPackages []glob.Glob - - IgnoreFileRules []string - prefixIgnoreFileRules []string - globIgnoreFileRules []negatableGlob - - prefixRoot []string - - isInitialized bool - isInitializedMutex sync.Mutex -} - -// Run checks for dependencies given the program and validates them against -// Packages. -func (dg *Depguard) Run(config *loader.Config, prog *loader.Program) ([]*Issue, error) { - // Shortcut execution on an empty blacklist as that means every package is allowed - if dg.ListType == LTBlacklist && len(dg.Packages) == 0 { - return nil, nil - } - - if err := dg.initialize(config, prog); err != nil { - return nil, err - } - directImports, err := dg.createImportMap(prog) - if err != nil { - return nil, err - } - var issues []*Issue - for pkg, positions := range directImports { - for _, pos := range positions { - if ignoreFile(pos.Filename, dg.prefixIgnoreFileRules, dg.globIgnoreFileRules) { - continue - } - - prefixList, globList := dg.prefixPackages, dg.globPackages - if len(dg.TestPackages) > 0 && strings.Index(pos.Filename, "_test.go") != -1 { - prefixList, globList = dg.prefixTestPackages, dg.globTestPackages - } - - if dg.flagIt(pkg, prefixList, globList) { - issues = append(issues, &Issue{ - PackageName: pkg, - Position: pos, - }) - } - } - } - return issues, nil -} - -func (dg *Depguard) initialize(config *loader.Config, prog *loader.Program) error { - dg.isInitializedMutex.Lock() - defer dg.isInitializedMutex.Unlock() - - if dg.isInitialized { - return nil - } - - // parse ordinary guarded packages - for _, pkg := range dg.Packages { - if strings.ContainsAny(pkg, "!?*[]{}") { - g, err := glob.Compile(pkg, '/') - if err != nil { - return err - } - dg.globPackages = append(dg.globPackages, g) - } else { - dg.prefixPackages = append(dg.prefixPackages, pkg) - } - } - - // Sort the packages so we can have a faster search in the array - sort.Strings(dg.prefixPackages) - - // parse guarded tests packages - for _, pkg := range dg.TestPackages { - if strings.ContainsAny(pkg, "!?*[]{}") { - g, err := glob.Compile(pkg, '/') - if err != nil { - return err - } - dg.globTestPackages = append(dg.globTestPackages, g) - } else { - dg.prefixTestPackages = append(dg.prefixTestPackages, pkg) - } - } - - // Sort the test packages so we can have a faster search in the array - sort.Strings(dg.prefixTestPackages) - - // parse ignore file rules - for _, rule := range dg.IgnoreFileRules { - if strings.ContainsAny(rule, "!?*[]{}") { - ng := negatableGlob{} - if strings.HasPrefix(rule, "!") { - ng.negate = true - rule = rule[1:] // Strip out the leading '!' - } else { - ng.negate = false - } - - g, err := glob.Compile(rule, '/') - if err != nil { - return err - } - ng.g = g - - dg.globIgnoreFileRules = append(dg.globIgnoreFileRules, ng) - } else { - dg.prefixIgnoreFileRules = append(dg.prefixIgnoreFileRules, rule) - } - } - - // Sort the rules so we can have a faster search in the array - sort.Strings(dg.prefixIgnoreFileRules) - - if !dg.IncludeGoRoot { - var err error - dg.prefixRoot, err = listRootPrefixs(config.Build) - if err != nil { - return err - } - } - - dg.isInitialized = true - return nil -} - -func (dg *Depguard) createImportMap(prog *loader.Program) (map[string][]token.Position, error) { - importMap := make(map[string][]token.Position) - // For the directly imported packages - for _, imported := range prog.InitialPackages() { - // Go through their files - for _, file := range imported.Files { - // And populate a map of all direct imports and their positions - // This will filter out GoRoot depending on the Depguard.IncludeGoRoot - for _, fileImport := range file.Imports { - fileImportPath := cleanBasicLitString(fileImport.Path.Value) - if !dg.IncludeGoRoot && dg.isRoot(fileImportPath) { - continue - } - position := prog.Fset.Position(fileImport.Pos()) - positions, found := importMap[fileImportPath] - if !found { - importMap[fileImportPath] = []token.Position{ - position, - } - continue - } - importMap[fileImportPath] = append(positions, position) - } - } - } - return importMap, nil -} - -func ignoreFile(filename string, prefixList []string, negatableGlobList []negatableGlob) bool { - if strInPrefixList(filename, prefixList) { - return true - } - return strInNegatableGlobList(filename, negatableGlobList) -} - -func pkgInList(pkg string, prefixList []string, globList []glob.Glob) bool { - if strInPrefixList(pkg, prefixList) { - return true - } - return strInGlobList(pkg, globList) -} - -func strInPrefixList(str string, prefixList []string) bool { - // Idx represents where in the prefix slice the passed in string would go - // when sorted. -1 Just means that it would be at the very front of the slice. - idx := sort.Search(len(prefixList), func(i int) bool { - return prefixList[i] > str - }) - 1 - // This means that the string passed in has no way to be prefixed by anything - // in the prefix list as it is already smaller then everything - if idx == -1 { - return false - } - return strings.HasPrefix(str, prefixList[idx]) -} - -func strInGlobList(str string, globList []glob.Glob) bool { - for _, g := range globList { - if g.Match(str) { - return true - } - } - return false -} - -func strInNegatableGlobList(str string, negatableGlobList []negatableGlob) bool { - for _, ng := range negatableGlobList { - // Return true when: - // - Match is true and negate is off - // - Match is false and negate is on - if ng.g.Match(str) != ng.negate { - return true - } - } - return false -} - -// InList | WhiteList | BlackList -// y | | x -// n | x | -func (dg *Depguard) flagIt(pkg string, prefixList []string, globList []glob.Glob) bool { - return pkgInList(pkg, prefixList, globList) == (dg.ListType == LTBlacklist) -} - -func cleanBasicLitString(value string) string { - return strings.Trim(value, "\"\\") -} - -// We can do this as all imports that are not root are either prefixed with a domain -// or prefixed with `./` or `/` to dictate it is a local file reference -func listRootPrefixs(buildCtx *build.Context) ([]string, error) { - if buildCtx == nil { - buildCtx = &build.Default - } - root := path.Join(buildCtx.GOROOT, "src") - fs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - var pkgPrefix []string - for _, f := range fs { - if !f.IsDir() { - continue - } - pkgPrefix = append(pkgPrefix, f.Name()) - } - return pkgPrefix, nil -} - -func (dg *Depguard) isRoot(importPath string) bool { - // Idx represents where in the package slice the passed in package would go - // when sorted. -1 Just means that it would be at the very front of the slice. - idx := sort.Search(len(dg.prefixRoot), func(i int) bool { - return dg.prefixRoot[i] > importPath - }) - 1 - // This means that the package passed in has no way to be prefixed by anything - // in the package list as it is already smaller then everything - if idx == -1 { - return false - } - // if it is prefixed by a root prefix we need to check if it is an exact match - // or prefix with `/` as this could return false posative if the domain was - // `archive.com` for example as `archive` is a go root package. - if strings.HasPrefix(importPath, dg.prefixRoot[idx]) { - return strings.HasPrefix(importPath, dg.prefixRoot[idx]+"/") || importPath == dg.prefixRoot[idx] - } - return false -} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/.gitignore b/vendor/github.com/OpenPeeDeeP/depguard/v2/.gitignore new file mode 100644 index 0000000000..e189bdb220 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +.idea +.null-ls*.go diff --git a/vendor/github.com/OpenPeeDeeP/depguard/LICENSE b/vendor/github.com/OpenPeeDeeP/depguard/v2/LICENSE similarity index 100% rename from vendor/github.com/OpenPeeDeeP/depguard/LICENSE rename to vendor/github.com/OpenPeeDeeP/depguard/v2/LICENSE diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md b/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md new file mode 100644 index 0000000000..2ccfa22c59 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md @@ -0,0 +1,163 @@ +# Depguard + +A Go linter that checks package imports are in a list of acceptable packages. +This allows you to allow imports from a whole organization or only +allow specific packages within a repository. + +## Install + +```bash +go install github.com/OpenPeeDeeP/depguard@latest +``` + +## Config + +The Depguard binary looks for a file named `^\.?depguard\.(yaml|yml|json|toml)$` in the current working directory. Examples include (`.depguard.yml` or `depguard.toml`). + +The following is an example configuration file. + +```json +{ + "main": { + "files": [ + "$all", + "!$test" + ], + "listMode": "Strict", + "allow": [ + "$gostd", + "github.com/OpenPeeDeeP" + ], + "deny": { + "reflect": "Who needs reflection", + } + }, + "tests": { + "files": [ + "$test" + ], + "listMode": "Lax", + "deny": { + "github.com/stretchr/testify": "Please use standard library for tests" + } + } +} +``` + +- The top level is a map of lists. The key of the map is a name that shows up in +the linter's output. +- `files` - list of file globs that will match this list of settings to compare against +- `allow` - list of allowed packages +- `deny` - map of packages that are not allowed where the value is a suggestion += `listMode` - the mode to use for package matching + +Files are matched using [Globs](https://github.com/gobwas/glob). If the files +list is empty, then all files will match that list. Prefixing a file +with an exclamation mark `!` will put that glob in a "don't match" list. A file +will match a list if it is allowed and not denied. + +> Should always prefix a file glob with `**/` as files are matched against absolute paths. + +Allow is a prefix of packages to allow. A dollar sign `$` can be used at the end +of a package to specify it must be exact match only. + +Deny is a map where the key is a prefix of the package to deny, and the value +is a suggestion on what to use instead. A dollar sign `$` can be used at the end +of a package to specify it must be exact match only. + +A Prefix List just means that a package will match a value, if the value is a +prefix of the package. Example `github.com/OpenPeeDeeP/depguard` package will match +a value of `github.com/OpenPeeDeeP` but won't match `github.com/OpenPeeDeeP/depguard/v2`. + +ListMode is used to determine the package matching priority. There are three +different modes; Original, Strict, and Lax. + +Original is the original way that the package was written to use. It is not recommended +to stay with this and is only here for backwards compatibility. + +Strict, at its roots, is everything is denied unless in allowed. + +Lax, at its roots, is everything is allowed unless it is denied. + +There are cases where a package can be matched in both the allow and denied lists. +You may allow a subpackage but deny the root or vice versa. The `settings_tests.go` file +has many scenarios listed out under `TestListImportAllowed`. These tests will stay +up to date as features are added. + +### Variables + +There are variable replacements for each type of list (file or package). This is +to reduce repetition and tedious behaviors. + +#### File Variables + +> you can still use an exclamation mark `!` in front of a variable to say not to +use it. Example `!$test` will match any file that is not a go test file. + +- `$all` - matches all go files +- `$test` - matches all go test files + +#### Package Variables + +- `$gostd` - matches all of go's standard library (Pulled from GOROOT) + +### Example Configs + +Below: + +- non-test go files will match `Main` and test go files will match `Test`. +- both allow all of go standard library except for the `reflect` package which will +tell the user "Please don't use reflect package". +- go test files are also allowed to use https://github.com/stretchr/testify package +and any sub-package of it. + +```yaml +Main: + files: + - $all + - "!$test" + allow: + - $gostd + deny: + reflect: Please don't use reflect package +Test: + files: + - $test + allow: + - $gostd + - github.com/stretchr/testify + deny: + reflect: Please don't use reflect package +``` + +Below: + +- All go files will match `Main` +- Go files in internal will match both `Main` and `Internal` + +```yaml +Main: + files: + - $all +Internal: + files: + - "**/internal/**/*.go" +``` + +Below: + +- All packages are allowed except for `github.com/OpenPeeDeeP/depguard`. Though +`github.com/OpenPeeDeeP/depguard/v2` and `github.com/OpenPeeDeeP/depguard/somepackage` +would be allowed. + +```yaml +Main: + deny: + - github.com/OpenPeeDeeP/depguard$ +``` + +## Golangci-lint + +This linter was built with +[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatible +and read their docs to see how to implement all their linters, including this one. diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go new file mode 100644 index 0000000000..2729091e8a --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go @@ -0,0 +1,95 @@ +package depguard + +import ( + "fmt" + "go/ast" + "path/filepath" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// NewAnalyzer creates a new analyzer from the settings passed in. +// This can fail if the passed in LinterSettings does not compile. +// Use NewUncompiledAnalyzer if you need control when the compile happens. +func NewAnalyzer(settings *LinterSettings) (*analysis.Analyzer, error) { + s, err := settings.compile() + if err != nil { + return nil, err + } + analyzer := newAnalyzer(s.run) + return analyzer, nil +} + +type UncompiledAnalyzer struct { + Analyzer *analysis.Analyzer + settings *LinterSettings +} + +// NewUncompiledAnalyzer creates a new analyzer from the settings passed in. +// This can never error unlike NewAnalyzer. +// It is advised to call the Compile method on the returned Analyzer before running. +func NewUncompiledAnalyzer(settings *LinterSettings) *UncompiledAnalyzer { + return &UncompiledAnalyzer{ + Analyzer: newAnalyzer(settings.run), + settings: settings, + } +} + +// Compile the settings ahead of time so each subsuquent run of the analyzer doesn't +// need to do this work. +func (ua *UncompiledAnalyzer) Compile() error { + s, err := ua.settings.compile() + if err != nil { + return err + } + ua.Analyzer.Run = s.run + return nil +} + +func (settings LinterSettings) run(pass *analysis.Pass) (interface{}, error) { + s, err := settings.compile() + if err != nil { + return nil, err + } + return s.run(pass) +} + +func newAnalyzer(run func(*analysis.Pass) (interface{}, error)) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "depguard", + Doc: "Go linter that checks if package imports are in a list of acceptable packages", + URL: "https://github.com/OpenPeeDeeP/depguard", + Run: run, + RunDespiteErrors: false, + } +} + +func (s linterSettings) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + // For Windows need to replace separator with '/' + fileName := filepath.ToSlash(pass.Fset.Position(file.Pos()).Filename) + lists := s.whichLists(fileName) + for _, imp := range file.Imports { + for _, l := range lists { + if allowed, sugg := l.importAllowed(rawBasicLit(imp.Path)); !allowed { + diag := analysis.Diagnostic{ + Pos: imp.Pos(), + End: imp.End(), + Message: fmt.Sprintf("import '%s' is not allowed from list '%s'", rawBasicLit(imp.Path), l.name), + } + if sugg != "" { + diag.Message = fmt.Sprintf("%s: %s", diag.Message, sugg) + diag.SuggestedFixes = append(diag.SuggestedFixes, analysis.SuggestedFix{Message: sugg}) + } + pass.Report(diag) + } + } + } + } + return nil, nil +} + +func rawBasicLit(lit *ast.BasicLit) string { + return strings.Trim(lit.Value, "\"") +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/errors.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/errors.go new file mode 100644 index 0000000000..65325f6128 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/errors.go @@ -0,0 +1,18 @@ +package utils + +import ( + "strings" +) + +type MultiError []error + +func (me MultiError) Error() string { + b := strings.Builder{} + for i, e := range me { + b.WriteString(e.Error()) + if i < len(me)-1 { + b.WriteByte('\n') + } + } + return b.String() +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/variables.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/variables.go new file mode 100644 index 0000000000..3363bd8400 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/variables.go @@ -0,0 +1,131 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" +) + +type Expander interface { + Expand() ([]string, error) +} + +type ExpanderMap map[string]Expander + +var ( + PathExpandable = ExpanderMap{ + "$all": &allExpander{}, + "$test": &testExpander{}, + } + PackageExpandable = ExpanderMap{ + "$gostd": &gostdExpander{}, + } +) + +type allExpander struct{} + +func (*allExpander) Expand() ([]string, error) { + return []string{"**/*.go"}, nil +} + +type testExpander struct{} + +func (*testExpander) Expand() ([]string, error) { + return []string{"**/*_test.go"}, nil +} + +type gostdExpander struct { + cache []string +} + +// We can do this as all imports that are not root are either prefixed with a domain +// or prefixed with `./` or `/` to dictate it is a local file reference +func (e *gostdExpander) Expand() ([]string, error) { + if len(e.cache) != 0 { + return e.cache, nil + } + root := path.Join(findGOROOT(), "src") + fs, err := os.ReadDir(root) + if err != nil { + return nil, fmt.Errorf("could not read GOROOT directory: %w", err) + } + var pkgPrefix []string + for _, f := range fs { + if !f.IsDir() { + continue + } + pkgPrefix = append(pkgPrefix, f.Name()) + } + e.cache = pkgPrefix + return pkgPrefix, nil +} + +func findGOROOT() string { + // code borrowed from https://github.com/golang/tools/blob/86c93e8732cce300d0270bce23117456ce92bb17/cmd/godoc/goroot.go#L15-L30 + if env := os.Getenv("GOROOT"); env != "" { + return filepath.Clean(env) + } + def := filepath.Clean(runtime.GOROOT()) + if runtime.Compiler == "gccgo" { + // gccgo has no real GOROOT, and it certainly doesn't + // depend on the executable's location. + return def + } + out, err := exec.Command("go", "env", "GOROOT").Output() + if err != nil { + return def + } + return strings.TrimSpace(string(out)) +} + +func ExpandSlice(sl []string, exp ExpanderMap) ([]string, error) { + for i, s := range sl { + f, found := exp[s] + if !found { + continue + } + e, err := f.Expand() + if err != nil { + return nil, fmt.Errorf("couldn't expand %s: %w", s, err) + } + sl = insertSlice(sl, i, e...) + } + return sl, nil +} + +func ExpandMap(m map[string]string, exp ExpanderMap) error { + for k, v := range m { + f, found := exp[k] + if !found { + continue + } + e, err := f.Expand() + if err != nil { + return fmt.Errorf("couldn't expand %s: %w", k, err) + } + for _, ex := range e { + m[ex] = v + } + delete(m, k) + } + return nil +} + +func insertSlice(a []string, k int, b ...string) []string { + n := len(a) + len(b) - 1 + if n <= cap(a) { + a2 := a[:n] + copy(a2[k+len(b):], a[k+1:]) + copy(a2[k:], b) + return a2 + } + a2 := make([]string, n) + copy(a2, a[:k]) + copy(a2[k:], b) + copy(a2[k+len(b):], a[k+1:]) + return a2 +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go new file mode 100644 index 0000000000..311cacc889 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go @@ -0,0 +1,240 @@ +package depguard + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/OpenPeeDeeP/depguard/v2/internal/utils" + "github.com/gobwas/glob" +) + +type List struct { + ListMode string `json:"listMode" yaml:"listMode" toml:"listMode" mapstructure:"listMode"` + Files []string `json:"files" yaml:"files" toml:"files" mapstructure:"files"` + Allow []string `json:"allow" yaml:"allow" toml:"allow" mapstructure:"allow"` + Deny map[string]string `json:"deny" yaml:"deny" toml:"deny" mapstructure:"deny"` +} + +type listMode int + +const ( + listModeOriginal listMode = iota + listModeStrict + listModeLax +) + +type list struct { + listMode listMode + name string + files []glob.Glob + negFiles []glob.Glob + allow []string + deny []string + suggestions []string +} + +func (l *List) compile() (*list, error) { + if l == nil { + return nil, nil + } + li := &list{} + var errs utils.MultiError + var err error + + // Determine List Mode + switch strings.ToLower(l.ListMode) { + case "": + li.listMode = listModeOriginal + case "original": + li.listMode = listModeOriginal + case "strict": + li.listMode = listModeStrict + case "lax": + li.listMode = listModeLax + default: + errs = append(errs, fmt.Errorf("%s is not a known list mode", l.ListMode)) + } + + // Compile Files + for _, f := range l.Files { + var negate bool + if len(f) > 0 && f[0] == '!' { + negate = true + f = f[1:] + } + // Expand File if needed + fs, err := utils.ExpandSlice([]string{f}, utils.PathExpandable) + if err != nil { + errs = append(errs, err) + } + for _, exp := range fs { + g, err := glob.Compile(exp, '/') + if err != nil { + errs = append(errs, fmt.Errorf("%s could not be compiled: %w", exp, err)) + continue + } + if negate { + li.negFiles = append(li.negFiles, g) + continue + } + li.files = append(li.files, g) + } + } + + if len(l.Allow) > 0 { + // Expand Allow + l.Allow, err = utils.ExpandSlice(l.Allow, utils.PackageExpandable) + if err != nil { + errs = append(errs, err) + } + + // Sort Allow + li.allow = make([]string, len(l.Allow)) + copy(li.allow, l.Allow) + sort.Strings(li.allow) + } + + if l.Deny != nil { + // Expand Deny Map (to keep suggestions) + err = utils.ExpandMap(l.Deny, utils.PackageExpandable) + if err != nil { + errs = append(errs, err) + } + + // Split Deny Into Package Slice + li.deny = make([]string, 0, len(l.Deny)) + for pkg := range l.Deny { + li.deny = append(li.deny, pkg) + } + + // Sort Deny + sort.Strings(li.deny) + + // Populate Suggestions to match the Deny order + li.suggestions = make([]string, 0, len(li.deny)) + for _, dp := range li.deny { + li.suggestions = append(li.suggestions, strings.TrimSpace(l.Deny[dp])) + } + } + + // Populate the type of this list + if len(li.allow) == 0 && len(li.deny) == 0 { + errs = append(errs, errors.New("must have an Allow and/or Deny package list")) + } + + if len(errs) > 0 { + return nil, errs + } + return li, nil +} + +func (l *list) fileMatch(fileName string) bool { + inAllowed := len(l.files) == 0 || strInGlobList(fileName, l.files) + inDenied := strInGlobList(fileName, l.negFiles) + return inAllowed && !inDenied +} + +func (l *list) importAllowed(imp string) (bool, string) { + inAllowed, aIdx := strInPrefixList(imp, l.allow) + inDenied, dIdx := strInPrefixList(imp, l.deny) + var allowed bool + switch l.listMode { + case listModeOriginal: + inAllowed = len(l.allow) == 0 || inAllowed + allowed = inAllowed && !inDenied + case listModeStrict: + allowed = inAllowed && (!inDenied || len(l.allow[aIdx]) > len(l.deny[dIdx])) + case listModeLax: + allowed = !inDenied || (inAllowed && len(l.allow[aIdx]) > len(l.deny[dIdx])) + default: + allowed = false + } + sugg := "" + if !allowed && inDenied && dIdx != -1 { + sugg = l.suggestions[dIdx] + } + return allowed, sugg +} + +type LinterSettings map[string]*List + +type linterSettings []*list + +func (l LinterSettings) compile() (linterSettings, error) { + if len(l) == 0 { + // Only allow $gostd in all files + set := &List{ + Files: []string{"$all"}, + Allow: []string{"$gostd"}, + } + li, err := set.compile() + if err != nil { + return nil, err + } + li.name = "Main" + return linterSettings{li}, nil + } + names := make([]string, 0, len(l)) + for name := range l { + names = append(names, name) + } + sort.Strings(names) + li := make(linterSettings, 0, len(l)) + var errs utils.MultiError + for _, name := range names { + c, err := l[name].compile() + if err != nil { + errs = append(errs, err) + continue + } + if c == nil { + continue + } + c.name = name + li = append(li, c) + } + if len(errs) > 0 { + return nil, errs + } + + return li, nil +} + +func (ls linterSettings) whichLists(fileName string) []*list { + var matches []*list + for _, l := range ls { + if l.fileMatch(fileName) { + matches = append(matches, l) + } + } + return matches +} + +func strInGlobList(str string, globList []glob.Glob) bool { + for _, g := range globList { + if g.Match(str) { + return true + } + } + return false +} + +func strInPrefixList(str string, prefixList []string) (bool, int) { + // Idx represents where in the prefix slice the passed in string would go + // when sorted. -1 Just means that it would be at the very front of the slice. + idx := sort.Search(len(prefixList), func(i int) bool { + return strings.TrimRight(prefixList[i], "$") > str + }) - 1 + // This means that the string passed in has no way to be prefixed by anything + // in the prefix list as it is already smaller then everything + if idx == -1 { + return false, idx + } + ioc := prefixList[idx] + if ioc[len(ioc)-1] == '$' { + return str == ioc[:len(ioc)-1], idx + } + return strings.HasPrefix(str, prefixList[idx]), idx +} diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS new file mode 100644 index 0000000000..2b00ddba0d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS new file mode 100644 index 0000000000..1fbd3e976f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE rename to vendor/github.com/ProtonMail/go-crypto/LICENSE diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS similarity index 100% rename from vendor/golang.org/x/xerrors/PATENTS rename to vendor/github.com/ProtonMail/go-crypto/PATENTS diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go new file mode 100644 index 0000000000..c85e6befec --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -0,0 +1,381 @@ +package bitcurves + +// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2011 ThePiachu. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitelliptic implements several Koblitz elliptic curves over prime +// fields. + +// This package operates, internally, on Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole +// calculation can be performed within the transform (as in ScalarMult and +// ScalarBaseMult). But even for Add and Double, it's faster to apply and +// reverse the transform than to operate in affine coordinates. + +import ( + "crypto/elliptic" + "io" + "math/big" + "sync" +) + +// A BitCurve represents a Koblitz Curve with a=0. +// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html +type BitCurve struct { + Name string + P *big.Int // the order of the underlying field + N *big.Int // the order of the base point + B *big.Int // the constant of the BitCurve equation + Gx, Gy *big.Int // (x,y) of the base point + BitSize int // the size of the underlying field +} + +// Params returns the parameters of the given BitCurve (see BitCurve struct) +func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { + cp = new(elliptic.CurveParams) + cp.Name = bitCurve.Name + cp.P = bitCurve.P + cp.N = bitCurve.N + cp.Gx = bitCurve.Gx + cp.Gy = bitCurve.Gy + cp.BitSize = bitCurve.BitSize + return cp +} + +// IsOnCurve returns true if the given (x,y) lies on the BitCurve. +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { + // y² = x³ + b + y2 := new(big.Int).Mul(y, y) //y² + y2.Mod(y2, bitCurve.P) //y²%P + + x3 := new(big.Int).Mul(x, x) //x² + x3.Mul(x3, x) //x³ + + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P + + return x3.Cmp(y2) == 0 +} + +// affineFromJacobian reverses the Jacobian transform. See the comment at the +// top of the file. +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Cmp(big.NewInt(0)) == 0 { + panic("bitcurve: Can't convert to affine with Jacobian Z = 0") + } + // x = YZ^2 mod P + zinv := new(big.Int).ModInverse(z, bitCurve.P) + zinvsq := new(big.Int).Mul(zinv, zinv) + + xOut = new(big.Int).Mul(x, zinvsq) + xOut.Mod(xOut, bitCurve.P) + // y = YZ^3 mod P + zinvsq.Mul(zinvsq, zinv) + yOut = new(big.Int).Mul(y, zinvsq) + yOut.Mod(yOut, bitCurve.P) + return xOut, yOut +} + +// Add returns the sum of (x1,y1) and (x2,y2) +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + z := new(big.Int).SetInt64(1) + x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) + return bitCurve.affineFromJacobian(x, y, z) +} + +// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and +// (x2, y2, z2) and returns their sum, also in Jacobian form. +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + z1z1 := new(big.Int).Mul(z1, z1) + z1z1.Mod(z1z1, bitCurve.P) + z2z2 := new(big.Int).Mul(z2, z2) + z2z2.Mod(z2z2, bitCurve.P) + + u1 := new(big.Int).Mul(x1, z2z2) + u1.Mod(u1, bitCurve.P) + u2 := new(big.Int).Mul(x2, z1z1) + u2.Mod(u2, bitCurve.P) + h := new(big.Int).Sub(u2, u1) + if h.Sign() == -1 { + h.Add(h, bitCurve.P) + } + i := new(big.Int).Lsh(h, 1) + i.Mul(i, i) + j := new(big.Int).Mul(h, i) + + s1 := new(big.Int).Mul(y1, z2) + s1.Mul(s1, z2z2) + s1.Mod(s1, bitCurve.P) + s2 := new(big.Int).Mul(y2, z1) + s2.Mul(s2, z1z1) + s2.Mod(s2, bitCurve.P) + r := new(big.Int).Sub(s2, s1) + if r.Sign() == -1 { + r.Add(r, bitCurve.P) + } + r.Lsh(r, 1) + v := new(big.Int).Mul(u1, i) + + x3 := new(big.Int).Set(r) + x3.Mul(x3, x3) + x3.Sub(x3, j) + x3.Sub(x3, v) + x3.Sub(x3, v) + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Set(r) + v.Sub(v, x3) + y3.Mul(y3, v) + s1.Mul(s1, j) + s1.Lsh(s1, 1) + y3.Sub(y3, s1) + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Add(z1, z2) + z3.Mul(z3, z3) + z3.Sub(z3, z1z1) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Sub(z3, z2z2) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Mul(z3, h) + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// Double returns 2*(x,y) +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + z1 := new(big.Int).SetInt64(1) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) +} + +// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and +// returns its double, also in Jacobian form. +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + a := new(big.Int).Mul(x, x) //X1² + b := new(big.Int).Mul(y, y) //Y1² + c := new(big.Int).Mul(b, b) //B² + + d := new(big.Int).Add(x, b) //X1+B + d.Mul(d, d) //(X1+B)² + d.Sub(d, a) //(X1+B)²-A + d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) + + e := new(big.Int).Mul(big.NewInt(3), a) //3*A + f := new(big.Int).Mul(e, e) //E² + + x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D + x3.Sub(f, x3) //F-2*D + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Sub(d, x3) //D-X3 + y3.Mul(e, y3) //E*(D-X3) + y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Mul(y, z) //Y1*Z1 + z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// TODO: double check if it is okay +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // We have a slight problem in that the identity of the group (the + // point at infinity) cannot be represented in (x, y) form on a finite + // machine. Thus the standard add/double algorithm has to be tweaked + // slightly: our initial state is not the identity, but x, and we + // ignore the first true bit in |k|. If we don't find any true bits in + // |k|, then we return nil, nil, because we cannot return the identity + // element. + + Bz := new(big.Int).SetInt64(1) + x := Bx + y := By + z := Bz + + seenFirstTrue := false + for _, byte := range k { + for bitNum := 0; bitNum < 8; bitNum++ { + if seenFirstTrue { + x, y, z = bitCurve.doubleJacobian(x, y, z) + } + if byte&0x80 == 0x80 { + if !seenFirstTrue { + seenFirstTrue = true + } else { + x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) + } + } + byte <<= 1 + } + } + + if !seenFirstTrue { + return nil, nil + } + + return bitCurve.affineFromJacobian(x, y, z) +} + +// ScalarBaseMult returns k*G, where G is the base point of the group and k is +// an integer in big-endian form. +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) +} + +var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} + +// TODO: double check if it is okay +// GenerateKey returns a public/private key pair. The private key is generated +// using the given reader, which must return random data. +func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { + byteLen := (bitCurve.BitSize + 7) >> 3 + priv = make([]byte, byteLen) + + for x == nil { + _, err = io.ReadFull(rand, priv) + if err != nil { + return + } + // We have to mask off any excess bits in the case that the size of the + // underlying field is not a whole number of bytes. + priv[0] &= mask[bitCurve.BitSize%8] + // This is because, in tests, rand will return all zeros and we don't + // want to get the point at infinity and loop forever. + priv[1] ^= 0x42 + x, y = bitCurve.ScalarBaseMult(priv) + } + return +} + +// Marshal converts a point into the form specified in section 4.3.6 of ANSI +// X9.62. +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 + + ret := make([]byte, 1+2*byteLen) + ret[0] = 4 // uncompressed point + + xBytes := x.Bytes() + copy(ret[1+byteLen-len(xBytes):], xBytes) + yBytes := y.Bytes() + copy(ret[1+2*byteLen-len(yBytes):], yBytes) + return ret +} + +// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On +// error, x = nil. +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 + if len(data) != 1+2*byteLen { + return + } + if data[0] != 4 { // uncompressed form + return + } + x = new(big.Int).SetBytes(data[1 : 1+byteLen]) + y = new(big.Int).SetBytes(data[1+byteLen:]) + return +} + +//curve parameters taken from: +//http://www.secg.org/collateral/sec2_final.pdf + +var initonce sync.Once +var secp160k1 *BitCurve +var secp192k1 *BitCurve +var secp224k1 *BitCurve +var secp256k1 *BitCurve + +func initAll() { + initS160() + initS192() + initS224() + initS256() +} + +func initS160() { + // See SEC 2 section 2.4.1 + secp160k1 = new(BitCurve) + secp160k1.Name = "secp160k1" + secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) + secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) + secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) + secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) + secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) + secp160k1.BitSize = 160 +} + +func initS192() { + // See SEC 2 section 2.5.1 + secp192k1 = new(BitCurve) + secp192k1.Name = "secp192k1" + secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) + secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) + secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) + secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) + secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) + secp192k1.BitSize = 192 +} + +func initS224() { + // See SEC 2 section 2.6.1 + secp224k1 = new(BitCurve) + secp224k1.Name = "secp224k1" + secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) + secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) + secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) + secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) + secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) + secp224k1.BitSize = 224 +} + +func initS256() { + // See SEC 2 section 2.7.1 + secp256k1 = new(BitCurve) + secp256k1.Name = "secp256k1" + secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) + secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) + secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) + secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) + secp256k1.BitSize = 256 +} + +// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) +func S160() *BitCurve { + initonce.Do(initAll) + return secp160k1 +} + +// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) +func S192() *BitCurve { + initonce.Do(initAll) + return secp192k1 +} + +// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) +func S224() *BitCurve { + initonce.Do(initAll) + return secp224k1 +} + +// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) +func S256() *BitCurve { + initonce.Do(initAll) + return secp256k1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go new file mode 100644 index 0000000000..cb6676de24 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for environments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go new file mode 100644 index 0000000000..7e291d6aa4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go new file mode 100644 index 0000000000..3ae91d594c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package eax provides an implementation of the EAX +// (encrypt-authenticate-translate) mode of operation, as described in +// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS +// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." +// In FSE'04, volume 3017 of LNCS, 2004 +package eax + +import ( + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +const ( + defaultTagSize = 16 + defaultNonceSize = 16 +) + +type eax struct { + block cipher.Block // Only AES-{128, 192, 256} supported + tagSize int // At least 12 bytes recommended + nonceSize int +} + +func (e *eax) NonceSize() int { + return e.nonceSize +} + +func (e *eax) Overhead() int { + return e.tagSize +} + +// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and +// tag lengths. Supports {128, 192, 256}- bit key length. +func NewEAX(block cipher.Block) (cipher.AEAD, error) { + return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and +// given nonce and tag lengths in bytes. Panics on zero nonceSize and +// exceedingly long tags. +// +// It is recommended to use at least 12 bytes as tag length (see, for instance, +// NIST SP 800-38D). +// +// Only to be used for compatibility with existing cryptosystems with +// non-standard parameters. For all other cases, prefer NewEAX. +func NewEAXWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if nonceSize < 1 { + return nil, eaxError("Cannot initialize EAX with nonceSize = 0") + } + if tagSize > block.BlockSize() { + return nil, eaxError("Custom tag length exceeds blocksize") + } + return &eax{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + }, nil +} + +func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize) + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + + // Encrypt message using CTR mode and omacNonce as IV + ctr := cipher.NewCTR(e.block, omacNonce) + ciphertextData := out[:len(plaintext)] + ctr.XORKeyStream(ciphertextData, plaintext) + + omacCiphertext := e.omacT(2, ciphertextData) + + tag := out[len(plaintext):] + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + return ret +} + +func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + if len(ciphertext) < e.tagSize { + return nil, eaxError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - e.tagSize + + // Compute tag + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + omacCiphertext := e.omacT(2, ciphertext[:sep]) + + tag := make([]byte, e.tagSize) + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + + // Compare tags + if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { + return nil, eaxError("Tag authentication failed") + } + + // Decrypt ciphertext + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ctr := cipher.NewCTR(e.block, omacNonce) + ctr.XORKeyStream(out, ciphertext[:sep]) + + return ret[:sep], nil +} + +// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) +func (e *eax) omacT(t byte, plaintext []byte) []byte { + blockSize := e.block.BlockSize() + byteT := make([]byte, blockSize) + byteT[blockSize-1] = t + concat := append(byteT, plaintext...) + return e.omac(concat) +} + +func (e *eax) omac(plaintext []byte) []byte { + blockSize := e.block.BlockSize() + // L ← E_K(0^n); B ← 2L; P ← 4L + L := make([]byte, blockSize) + e.block.Encrypt(L, L) + B := byteutil.GfnDouble(L) + P := byteutil.GfnDouble(B) + + // CBC with IV = 0 + cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) + padded := e.pad(plaintext, B, P) + cbcCiphertext := make([]byte, len(padded)) + cbc.CryptBlocks(cbcCiphertext, padded) + + return cbcCiphertext[len(cbcCiphertext)-blockSize:] +} + +func (e *eax) pad(plaintext, B, P []byte) []byte { + // if |M| in {n, 2n, 3n, ...} + blockSize := e.block.BlockSize() + if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { + return byteutil.RightXor(plaintext, B) + } + + // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P + ending := make([]byte, blockSize-len(plaintext)%blockSize) + ending[0] = 0x80 + padded := append(plaintext, ending...) + return byteutil.RightXor(padded, P) +} + +func eaxError(err string) error { + return errors.New("crypto/eax: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go new file mode 100644 index 0000000000..ddb53d0790 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go @@ -0,0 +1,58 @@ +package eax + +// Test vectors from +// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf +var testVectors = []struct { + msg, key, nonce, header, ciphertext string +}{ + {"", + "233952DEE4D5ED5F9B9C6D6FF80FF478", + "62EC67F9C3A4A407FCB2A8C49031A8B3", + "6BFB914FD07EAE6B", + "E037830E8389F27B025A2D6527E79D01"}, + {"F7FB", + "91945D3F4DCBEE0BF45EF52255F095A4", + "BECAF043B0A23D843194BA972C66DEBD", + "FA3BFD4806EB53FA", + "19DD5C4C9331049D0BDAB0277408F67967E5"}, + {"1A47CB4933", + "01F74AD64077F2E704C0F60ADA3DD523", + "70C3DB4F0D26368400A10ED05D2BFF5E", + "234A3463C1264AC6", + "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, + {"481C9E39B1", + "D07CF6CBB7F313BDDE66B727AFD3C5E8", + "8408DFFF3C1A2B1292DC199E46B7D617", + "33CCE2EABFF5A79D", + "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, + {"40D0C07DA5E4", + "35B6D0580005BBC12B0587124557D2C2", + "FDB6B06676EEDC5C61D74276E1F8E816", + "AEB96EAEBE2970E9", + "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, + {"4DE3B35C3FC039245BD1FB7D", + "BD8E6E11475E60B268784C38C62FEB22", + "6EAC5C93072D8E8513F750935E46DA1B", + "D4482D1CA78DCE0F", + "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, + {"8B0A79306C9CE7ED99DAE4F87F8DD61636", + "7C77D6E813BED5AC98BAA417477A2E7D", + "1A8C98DCD73D38393B2BF1569DEEFC19", + "65D2017990D62528", + "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, + {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", + "5FFF20CAFAB119CA2FC73549E20F5B0D", + "DDE59B97D722156D4D9AFF2BC7559826", + "54B9F04E6A09189A", + "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, + {"6CF36720872B8513F6EAB1A8A44438D5EF11", + "A4A4782BCFFD3EC5E7EF6D8C34A56123", + "B781FCF2F75FA5A8DE97A9CA48E522EC", + "899A175897561D7E", + "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, + {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", + "8395FCF1E95BEBD697BD010BC766AAC3", + "22E7ADD93CFC6393C57EC0B3C17D6B44", + "126735FCC320D25A", + "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go new file mode 100644 index 0000000000..4eb19f28d9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go @@ -0,0 +1,131 @@ +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package eax + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + {"DFDE093F36B0356E5A81F609786982E3", + "1D8AC604419001816905BA72B14CED7E", + "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", + "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", + "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, + {"F10619EF02E5D94D7550EB84ED364A21", + "8DC0D4F2F745BBAE835CC5574B942D20", + "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", + "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", + "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, + {"429F514EFC64D98A698A9247274CFF45", + "976AA5EB072F912D126ACEBC954FEC38", + "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", + "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", + "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, + {"398138F309085F47F8457CDF53895A63", + "F8A8A7F2D28E5FFF7BBC2F24353F7A36", + "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", + "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", + "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, + {"7A4151EBD3901B42CBA45DAFB2E931BA", + "0FC88ACEE74DD538040321C330974EB8", + "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", + "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", + "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, + {"BFB147E1CD5459424F8C0271FC0E0DC5", + "EABCC126442BF373969EA3015988CC45", + "4C0880E1D71AA2C7", + "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", + "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, + {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", + "0F84B5D36CF4BC3B863313AF3B4D2E97", + "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", + "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", + "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, + {"44E6F2DC8FDC778AD007137D11410F50", + "270A237AD977F7187AA6C158A0BAB24F", + "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", + "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", + "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, + {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", + "8B4BBE26CCD9859DCD84884159D6B0A4", + "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", + "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", + "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, + {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", + "92745C018AA708ECFEB1667E9F3F1B01", + "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", + "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", + "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, + {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", + "0F45CF7F0BF31CCEB85D9DA10F4D749F", + "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", + "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", + "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, + {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", + "5B11EA1D6008EBB41CF892FCA5B943D1", + "BAF4FF5C8242", + "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", + "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, + {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", + "FD82B5B31804FF47D44199B533D0CF84", + "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", + "B804", + "164BB965C05EBE0931A1A63293EDF9C38C27"}, + {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", + "343BE00DA9483F05C14F2E9EB8EA6AE8", + "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", + "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", + "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, + {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", + "E280E13D7367042E3AA09A80111B6184", + "21486C9D7A9647", + "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", + "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, + {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", + "656B41CB3F9CF8C08BAD7EBFC80BD225", + "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", + "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", + "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, + {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", + "DA0F3A40983D92F2D4C01FED33C7A192", + "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", + "F37326A80E08", + "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, + {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", + "3EE1182AEBB19A02B128F28E1D5F7F99", + "D9F35ABB16D776CE", + "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", + "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, + {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", + "336F5D681E0410FAE7B607246092C6DC", + "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", + "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", + "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, + {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", + "AB91F7245DDCE3F1C747872D47BE0A8A", + "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", + "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", + "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, + {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", + "C691294EF67CD04D1B9242AF83DD1421", + "879334DAE3", + "1E17F46A98FEF5CBB40759D95354", + "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, + {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", + "4228DA0678CA3534588859E77DFF014C", + "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", + "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", + "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, + {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", + "943188480C99437495958B0AE4831AA9", + "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", + "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", + "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, + {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", + "B0490F5BD68A52459556B3749ACDF40E", + "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", + "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", + "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go new file mode 100644 index 0000000000..affb74a764 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019 ProtonTech AG +// This file contains necessary tools for the aex and ocb packages. +// +// These functions SHOULD NOT be used elsewhere, since they are optimized for +// specific input nature in the EAX and OCB modes of operation. + +package byteutil + +// GfnDouble computes 2 * input in the field of 2^n elements. +// The irreducible polynomial in the finite field for n=128 is +// x^128 + x^7 + x^2 + x + 1 (equals 0x87) +// Constant-time execution in order to avoid side-channel attacks +func GfnDouble(input []byte) []byte { + if len(input) != 16 { + panic("Doubling in GFn only implemented for n = 128") + } + // If the first bit is zero, return 2L = L << 1 + // Else return (L << 1) xor 0^120 10000111 + shifted := ShiftBytesLeft(input) + shifted[15] ^= ((input[0] >> 7) * 0x87) + return shifted +} + +// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. +func ShiftBytesLeft(x []byte) []byte { + l := len(x) + dst := make([]byte, l) + for i := 0; i < l-1; i++ { + dst[i] = (x[i] << 1) | (x[i+1] >> 7) + } + dst[l-1] = x[l-1] << 1 + return dst +} + +// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. +func ShiftNBytesLeft(dst, x []byte, n int) { + // Erase first n / 8 bytes + copy(dst, x[n/8:]) + + // Shift the remaining n % 8 bits + bits := uint(n % 8) + l := len(dst) + for i := 0; i < l-1; i++ { + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits)) + } + dst[l-1] = dst[l-1] << bits + + // Append trailing zeroes + dst = append(dst, make([]byte, n/8)...) +} + +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { + X[i] ^= Y[i] + } +} + +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { + Z[i] = X[i] ^ Y[i] + } +} + +// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) +func RightXor(X, Y []byte) []byte { + offset := len(X) - len(Y) + xored := make([]byte, len(X)) + copy(xored, X) + for i := 0; i < len(Y); i++ { + xored[offset+i] ^= Y[i] + } + return xored +} + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go new file mode 100644 index 0000000000..5022285b44 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -0,0 +1,318 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package ocb provides an implementation of the OCB (offset codebook) mode of +// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, +// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT +// AUTHENTICATED ENCRYPTION (2003). +// Security considerations (from RFC-7253): A private key MUST NOT be used to +// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a +// brute-force forging adversary succeeds after 2^{tag length} attempts). A +// single key SHOULD NOT be used to decrypt ciphertext with different tag +// lengths. Nonces need not be secret, but MUST NOT be reused. +// This package only supports underlying block ciphers with 128-bit blocks, +// such as AES-{128, 192, 256}, but may be extended to other sizes. +package ocb + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "errors" + "math/bits" + + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +type ocb struct { + block cipher.Block + tagSize int + nonceSize int + mask mask + // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' + // internal variable can be reused for en/decrypting with nonces sharing + // all but the last 6 bits with N. The prefix of the first nonce used to + // compute the new Ktop, and the Ktop value itself, are stored in + // reusableKtop. If using incremental nonces, this saves one block cipher + // call every 63 out of 64 OCB encryptions, and stores one nonce and one + // output of the block cipher in memory only. + reusableKtop reusableKtop +} + +type mask struct { + // L_*, L_$, (L_i)_{i ∈ N} + lAst []byte + lDol []byte + L [][]byte +} + +type reusableKtop struct { + noncePrefix []byte + Ktop []byte +} + +const ( + defaultTagSize = 16 + defaultNonceSize = 15 +) + +const ( + enc = iota + dec +) + +func (o *ocb) NonceSize() int { + return o.nonceSize +} + +func (o *ocb) Overhead() int { + return o.tagSize +} + +// NewOCB returns an OCB instance with the given block cipher and default +// tag and nonce sizes. +func NewOCB(block cipher.Block) (cipher.AEAD, error) { + return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewOCBWithNonceAndTagSize returns an OCB instance with the given block +// cipher, nonce length, and tag length. Panics on zero nonceSize and +// exceedingly long tag size. +// +// It is recommended to use at least 12 bytes as tag length. +func NewOCBWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if block.BlockSize() != 16 { + return nil, ocbError("Block cipher must have 128-bit blocks") + } + if nonceSize < 1 { + return nil, ocbError("Incorrect nonce length") + } + if nonceSize >= block.BlockSize() { + return nil, ocbError("Nonce length exceeds blocksize - 1") + } + if tagSize > block.BlockSize() { + return nil, ocbError("Custom tag length exceeds blocksize") + } + return &ocb{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), + reusableKtop: reusableKtop{ + noncePrefix: nil, + Ktop: nil, + }, + }, nil +} + +func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > o.nonceSize { + panic("crypto/ocb: Incorrect nonce length given to OCB") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) + return ret +} + +func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > o.nonceSize { + panic("Nonce too long for this instance") + } + if len(ciphertext) < o.tagSize { + return nil, ocbError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - o.tagSize + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] + return ret, nil + } + for i := range out { + out[i] = 0 + } + return nil, ocbError("Tag authentication failed") +} + +// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +// function. It returns the resulting plain/ciphertext with the tag appended. +func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { + // + // Consider X as a sequence of 128-bit blocks + // + // Note: For encryption (resp. decryption), X is the plaintext (resp., the + // ciphertext without the tag). + blockSize := o.block.BlockSize() + + // + // Nonce-dependent and per-encryption variables + // + // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop + // is already computed. + truncatedNonce := make([]byte, len(nonce)) + copy(truncatedNonce, nonce) + truncatedNonce[len(truncatedNonce)-1] &= 192 + var Ktop []byte + if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { + Ktop = o.reusableKtop.Ktop + } else { + // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N + paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) + paddedNonce = append(paddedNonce, truncatedNonce...) + paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) + // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop + paddedNonce[blockSize-1] &= 192 + Ktop = paddedNonce + o.block.Encrypt(Ktop, Ktop) + o.reusableKtop.noncePrefix = truncatedNonce + o.reusableKtop.Ktop = Ktop + } + + // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) + xorHalves := make([]byte, blockSize/2) + byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) + stretch := append(Ktop, xorHalves...) + bottom := int(nonce[len(nonce)-1] & 63) + offset := make([]byte, len(stretch)) + byteutil.ShiftNBytesLeft(offset, stretch, bottom) + offset = offset[:blockSize] + + // + // Process any whole blocks + // + // Note: For encryption Y is ciphertext || tag, for decryption Y is + // plaintext || tag. + checksum := make([]byte, blockSize) + m := len(X) / blockSize + for i := 0; i < m; i++ { + index := bits.TrailingZeros(uint(i + 1)) + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) + blockX := X[i*blockSize : (i+1)*blockSize] + blockY := Y[i*blockSize : (i+1)*blockSize] + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: + o.block.Decrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockY) + } + } + // + // Process any final partial block and compute raw tag + // + tag := make([]byte, blockSize) + if len(X)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + pad := make([]byte, blockSize) + o.block.Encrypt(pad, offset) + chunkX := X[blockSize*m:] + chunkY := Y[blockSize*m : len(X)] + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +} + +// This hash function is used to compute the tag. Per design, on empty input it +// returns a slice of zeros, of the same length as the underlying block cipher +// block size. +func (o *ocb) hash(adata []byte) []byte { + // + // Consider A as a sequence of 128-bit blocks + // + A := make([]byte, len(adata)) + copy(A, adata) + blockSize := o.block.BlockSize() + + // + // Process any whole blocks + // + sum := make([]byte, blockSize) + offset := make([]byte, blockSize) + m := len(A) / blockSize + for i := 0; i < m; i++ { + chunk := A[blockSize*i : blockSize*(i+1)] + index := bits.TrailingZeros(uint(i + 1)) + // If the mask table is too short + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[index]) + byteutil.XorBytesMut(chunk, offset) + o.block.Encrypt(chunk, chunk) + byteutil.XorBytesMut(sum, chunk) + } + + // + // Process any final partial block; compute final hash value + // + if len(A)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + // Pad block with 1 || 0 ^ 127 - bitlength(a) + ending := make([]byte, blockSize-len(A)%blockSize) + ending[0] = 0x80 + encrypted := append(A[blockSize*m:], ending...) + byteutil.XorBytesMut(encrypted, offset) + o.block.Encrypt(encrypted, encrypted) + byteutil.XorBytesMut(sum, encrypted) + } + return sum +} + +func initializeMaskTable(block cipher.Block) mask { + // + // Key-dependent variables + // + lAst := make([]byte, block.BlockSize()) + block.Encrypt(lAst, lAst) + lDol := byteutil.GfnDouble(lAst) + L := make([][]byte, 1) + L[0] = byteutil.GfnDouble(lDol) + + return mask{ + lAst: lAst, + lDol: lDol, + L: L, + } +} + +// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) +func (m *mask) extendTable(limit int) { + for i := len(m.L); i <= limit; i++ { + m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) + } +} + +func ocbError(err string) error { + return errors.New("crypto/ocb: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go new file mode 100644 index 0000000000..0efaf344fd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go @@ -0,0 +1,136 @@ +// In the test vectors provided by RFC 7253, the "bottom" +// internal variable, which defines "offset" for the first time, does not +// exceed 15. However, it can attain values up to 63. + +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package ocb + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + + {"9438C5D599308EAF13F800D2D31EA7F0", + "C38EE4801BEBFFA1CD8635BE", + "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", + "962D227786FB8913A8BAD5DC3250", + "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, + {"BA7DE631C7D6712167C6724F5B9A2B1D", + "35263EBDA05765DC0E71F1F5", + "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", + "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", + "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, + {"2E74B25289F6FD3E578C24866E9C72A5", + "FD912F15025AF8414642BA1D1D", + "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", + "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", + "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, + {"E7EC507C802528F790AFF5303A017B17", + "4B97A7A568940A9E3CE7A99E93031E", + "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", + "9455B3EA706B74", + "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, + {"6C928AA3224736F28EE7378DE0090191", + "8936138E2E4C6A13280017A1622D", + "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", + "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", + "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, + {"ECEA315CA4B3F425B0C9957A17805EA4", + "664CDAE18403F4F9BA13015A44FC", + "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", + "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", + "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, + {"4EE616C4A58AAA380878F71A373461F6", + "91B8C9C176D9C385E9C47E52", + "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", + "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", + "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, + {"DCD475773136C830D5E3D0C5FE05B7FF", + "BB8E1FBB483BE7616A922C4A", + "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", + "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", + "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, + {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", + "FACF804A4BEBF998505FF9DE", + "8213B9263B2971A5BDA18DBD02208EE1", + "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", + "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, + {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", + "0B0EF53D4606B28D1398355F", + "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", + "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", + "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, + {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", + "5C83F4896D0738E1366B1836", + "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", + "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", + "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, + {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", + "36B88F63ADBB5668588181D774", + "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", + "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", + "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, + {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", + "BC099AEB637361BAC536B57618", + "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", + "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", + "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, + {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", + "B1B0AAF373B8B026EB80422051D8", + "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", + "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", + "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, + {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", + "2F2F17CECF7E5A756D10785A3CB9DB", + "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", + "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", + "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, + {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", + "BCA625674F41D1E3AB47672DC0C3", + "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", + "E525422519ECE070E82C", + "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, + {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", + "36D2EC25ADD33CDEDF495205BBC923", + "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", + "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", + "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, + {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", + "81691D5D20EC818FCFF24B33DECC", + "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", + "EB861165DAF7625F827C6B574ED703F03215", + "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, + {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", + "D2A7B94DD12CDACA909D3AD7", + "E021A78F374FC271389AB9A3E97077D755", + "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", + "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, + {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", + "65ED3D63F79F90BBFD19775E", + "336A8C0B7243582A46B221AA677647FCAE91", + "134A8B34824A290E7B", + "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, + {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", + "C391A856B9FE234E14BA1AC7BB40FF", + "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", + "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", + "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, + {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", + "B15B61C8BB39261A8F55AB178EC3", + "D0729B6B75BB", + "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", + "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, + {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", + "01D7BDB1133E9C347486C1EFA6", + "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", + "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", + "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, + {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", + "0C52B3D11D636E5910A4DD76D32C", + "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", + "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", + "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go new file mode 100644 index 0000000000..330309ff5f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -0,0 +1,78 @@ +package ocb + +import ( + "encoding/hex" +) + +// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is +// shared across tests. +var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") + +var rfc7253testVectors = []struct { + nonce, header, plaintext, ciphertext string +}{ + {"BBAA99887766554433221100", + "", + "", + "785407BFFFC8AD9EDCC5520AC9111EE6"}, + {"BBAA99887766554433221101", + "0001020304050607", + "0001020304050607", + "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, + {"BBAA99887766554433221102", + "0001020304050607", + "", + "81017F8203F081277152FADE694A0A00"}, + {"BBAA99887766554433221103", + "", + "0001020304050607", + "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, + {"BBAA99887766554433221104", + "000102030405060708090A0B0C0D0E0F", + "000102030405060708090A0B0C0D0E0F", + "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, + {"BBAA99887766554433221105", + "000102030405060708090A0B0C0D0E0F", + "", + "8CF761B6902EF764462AD86498CA6B97"}, + {"BBAA99887766554433221106", + "", + "000102030405060708090A0B0C0D0E0F", + "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, + {"BBAA99887766554433221107", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, + {"BBAA99887766554433221108", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "", + "6DC225A071FC1B9F7C69F93B0F1E10DE"}, + {"BBAA99887766554433221109", + "", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, + {"BBAA9988776655443322110A", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, + {"BBAA9988776655443322110B", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "", + "FE80690BEE8A485D11F32965BC9D2A32"}, + {"BBAA9988776655443322110C", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, + {"BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, + {"BBAA9988776655443322110E", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "", + "C5CD9D1850C141E358649994EE701B68"}, + {"BBAA9988776655443322110F", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go new file mode 100644 index 0000000000..14a3c336fb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -0,0 +1,25 @@ +package ocb + +// Second set of test vectors from https://tools.ietf.org/html/rfc7253 +var rfc7253TestVectorTaglen96 = struct { + key, nonce, header, plaintext, ciphertext string +}{"0F0E0D0C0B0A09080706050403020100", + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + +var rfc7253AlgorithmTest = []struct { + KEYLEN, TAGLEN int + OUTPUT string +}{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go new file mode 100644 index 0000000000..3c6251d1ce --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go @@ -0,0 +1,153 @@ +// Copyright 2014 Matthew Endsley +// All rights reserved +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted providing that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// Package keywrap is an implementation of the RFC 3394 AES key wrapping +// algorithm. This is used in OpenPGP with elliptic curve keys. +package keywrap + +import ( + "crypto/aes" + "encoding/binary" + "errors" +) + +var ( + // ErrWrapPlaintext is returned if the plaintext is not a multiple + // of 64 bits. + ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") + + // ErrUnwrapCiphertext is returned if the ciphertext is not a + // multiple of 64 bits. + ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") + + // ErrUnwrapFailed is returned if unwrapping a key fails. + ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") + + // NB: the AES NewCipher call only fails if the key is an invalid length. + + // ErrInvalidKey is returned when the AES key is invalid. + ErrInvalidKey = errors.New("keywrap: invalid AES key") +) + +// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Wrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, ErrWrapPlaintext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = IV, an initial value (see 2.2.3) + for ii := 0; ii < 8; ii++ { + block[ii] = 0xA6 + } + + // - For i = 1 to n + // - Set R[i] = P[i] + intermediate := make([]byte, len(plainText)) + copy(intermediate, plainText) + + // 2) Calculate intermediate values. + for ii := 0; ii < 6; ii++ { + for jj := 0; jj < nblocks; jj++ { + // - B = AES(K, A | R[i]) + copy(block[8:], intermediate[jj*8:jj*8+8]) + c.Encrypt(block[:], block[:]) + + // - A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(ii*nblocks + jj + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + // - R[i] = LSB(64, B) + copy(intermediate[jj*8:jj*8+8], block[8:]) + } + } + + // 3) Output results. + // - Set C[0] = A + // - For i = 1 to n + // - C[i] = R[i] + return append(block[:8], intermediate...), nil +} + +// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Unwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, ErrUnwrapCiphertext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = C[0] + copy(block[:8], cipherText[:8]) + + // - For i = 1 to n + // - Set R[i] = C[i] + intermediate := make([]byte, len(cipherText)-8) + copy(intermediate, cipherText[8:]) + + // 2) Compute intermediate values. + for jj := 5; jj >= 0; jj-- { + for ii := nblocks - 1; ii >= 0; ii-- { + // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 + // - A = MSB(64, B) + t := uint64(jj*nblocks + ii + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + copy(block[8:], intermediate[ii*8:ii*8+8]) + c.Decrypt(block[:], block[:]) + + // - R[i] = LSB(B, 64) + copy(intermediate[ii*8:ii*8+8], block[8:]) + } + } + + // 3) Output results. + // - If A is an appropriate initial value (see 2.2.3), + for ii := 0; ii < 8; ii++ { + if block[ii] != 0xA6 { + return nil, ErrUnwrapFailed + } + } + + // - For i = 1 to n + // - P[i] = R[i] + return intermediate, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go new file mode 100644 index 0000000000..e0a677f284 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum (optional) not checked anymore +// -----END Type----- +// +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor block +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + // Don't check the checksum + + l.eof = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(":")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + var value string + if len(line) > i+2 { + value = string(line[i+2:]) + } + p.Header[lastKey] = value + } + + p.lReader.in = r + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go new file mode 100644 index 0000000000..112f98b835 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,198 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + crcEnabled bool + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + if e.crcEnabled { + e.crc = crc24(e.crc, data) + } + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + if e.crcEnabled { + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + } + return writeSlices(e.out, newline, armorEnd, e.blockType, armorEndOfLine) +} + +func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + blockType: bType, + crc: crc24Init, + crcEnabled: checksum, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, true) +} + +// EncodeWithChecksumOption returns a WriteCloser which will encode the data written to it in +// OpenPGP armor and provides the option to include a checksum. +// When forming ASCII Armor, the CRC24 footer SHOULD NOT be generated, +// unless interoperability with implementations that require the CRC24 footer +// to be present is a concern. +func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, doChecksum) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go new file mode 100644 index 0000000000..5b40e1375d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,71 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "hash" + "io" +) + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { + start := 0 + for i, c := range buf { + switch *s { + case 0: + if c == '\r' { + *s = 1 + } else if c == '\n' { + if _, err := cw.Write(buf[start:i]); err != nil { + return 0, err + } + if _, err := cw.Write(newline); err != nil { + return 0, err + } + start = i + 1 + } + case 1: + *s = 0 + } + } + + if _, err := cw.Write(buf[start:]); err != nil { + return 0, err + } + return len(buf), nil +} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + return writeCanonical(cth.h, buf, &cth.s) +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 0000000000..c895bad6bb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,210 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "bytes" + "errors" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" +) + +type KDF struct { + Hash algorithm.Hash + Cipher algorithm.Cipher +} + +type PublicKey struct { + curve ecc.ECDHCurve + Point []byte + KDF +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey { + return &PublicKey{ + curve: curve, + KDF: KDF{ + Hash: kdfHash, + Cipher: kdfCipher, + }, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDHCurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.Point) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.Point = pk.curve.UnmarshalBytePoint(p) + if pk.Point == nil { + return errors.New("ecdh: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("ecdh: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.KDF = kdf + priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand) + return +} + +func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + if len(msg) > 40 { + return nil, nil, errors.New("ecdh: message too long") + } + // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, + // AES-192, and AES-256, respectively, to provide the same number of + // octets, 40 total, as an input to the key wrapping method. + padding := make([]byte, 40-len(msg)) + for i := range padding { + padding[i] = byte(40 - len(msg)) + } + m := append(msg, padding...) + + ephemeral, zb, err := pub.curve.Encaps(random, pub.Point) + if err != nil { + return nil, nil, err + } + + vsG = pub.curve.MarshalBytePoint(ephemeral) + + z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, m); err != nil { + return nil, nil, err + } + + return vsG, c, nil + +} + +func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) { + var m []byte + zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D) + + // Try buildKey three times to workaround an old bug, see comments in buildKey. + for i := 0; i < 3; i++ { + var z []byte + // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );" + z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]" + m, err = keywrap.Unwrap(z, c) + if err == nil { + break + } + } + + // Only return an error after we've tried all (required) variants of buildKey. + if err != nil { + return nil, err + } + + // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding" + // The last byte should be the length of the padding, as per PKCS5; strip it off. + return m[:len(m)-int(m[len(m)-1])], nil +} + +func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { + // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 + // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap + // || "Anonymous Sender " || recipient_fingerprint; + param := new(bytes.Buffer) + if _, err := param.Write(curveOID); err != nil { + return nil, err + } + algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} + if _, err := param.Write(algKDF); err != nil { + return nil, err + } + if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { + return nil, err + } + // For v5 keys, the 20 leftmost octets of the fingerprint are used. + if _, err := param.Write(fingerprint[:20]); err != nil { + return nil, err + } + if param.Len()-len(curveOID) != 45 { + return nil, errors.New("ecdh: malformed KDF Param") + } + + // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); + h := pub.KDF.Hash.New() + if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { + return nil, err + } + zbLen := len(zb) + i := 0 + j := zbLen - 1 + if stripLeading { + // Work around old go crypto bug where the leading zeros are missing. + for i < zbLen && zb[i] == 0 { + i++ + } + } + if stripTrailing { + // Work around old OpenPGP.js bug where insignificant trailing zeros in + // this little-endian number are missing. + // (See https://github.com/openpgpjs/openpgpjs/pull/853.) + for j >= 0 && zb[j] == 0 { + j-- + } + } + if _, err := h.Write(zb[i : j+1]); err != nil { + return nil, err + } + if _, err := h.Write(param.Bytes()); err != nil { + return nil, err + } + mb := h.Sum(nil) + + return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. + +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDH(priv.Point, priv.D) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go new file mode 100644 index 0000000000..f94ae1b2f5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go @@ -0,0 +1,80 @@ +// Package ecdsa implements ECDSA signature, suitable for OpenPGP, +// as specified in RFC 6637, section 5. +package ecdsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" + "math/big" +) + +type PublicKey struct { + X, Y *big.Int + curve ecc.ECDSACurve +} + +type PrivateKey struct { + PublicKey + D *big.Int +} + +func NewPublicKey(curve ecc.ECDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalIntegerPoint(pk.X, pk.Y) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p) + if pk.X == nil { + return errors.New("ecdsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalIntegerSecret() []byte { + return sk.curve.MarshalIntegerSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error { + sk.D = sk.curve.UnmarshalIntegerSecret(d) + + if sk.D == nil { + return errors.New("ecdsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand) + return +} + +func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) { + return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash) +} + +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + return pub.curve.Verify(pub.X, pub.Y, hash, r, s) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go new file mode 100644 index 0000000000..6abdf7c446 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go @@ -0,0 +1,115 @@ +// Package ed25519 implements the ed25519 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed25519 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed25519lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed25519lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed25519lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | pub key point. + Key []byte +} + +// NewPublicKey creates a new empty ed25519 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed25519 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying 32 byte seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed25519lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed25519lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed25519 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + return ed25519lib.Sign(priv.Key, message), nil +} + +// Verify verifies an ed25519 signature. +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + return ed25519lib.Verify(pub.Point, message, signature) +} + +// Validate checks if the ed25519 private key is valid. +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed25519 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed25519 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go new file mode 100644 index 0000000000..b11fb4fb17 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go @@ -0,0 +1,119 @@ +// Package ed448 implements the ed448 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed448 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed448lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed448lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed448lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | public key point. + Key []byte +} + +// NewPublicKey creates a new empty ed448 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed448 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed448lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed448lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed448 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Sign(priv.Key, message, ""), nil +} + +// Verify verifies a ed448 signature +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Verify(pub.Point, message, signature, "") +} + +// Validate checks if the ed448 private key is valid +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed448 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed448 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go new file mode 100644 index 0000000000..99ecfc7f12 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go @@ -0,0 +1,91 @@ +// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in +// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 +package eddsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" +) + +type PublicKey struct { + X []byte + curve ecc.EdDSACurve +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.EdDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.EdDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.X) +} + +func (pk *PublicKey) UnmarshalPoint(x []byte) error { + pk.X = pk.curve.UnmarshalBytePoint(x) + + if pk.X == nil { + return errors.New("eddsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("eddsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand) + return +} + +func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) { + sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message) + if err != nil { + return nil, nil, err + } + + r, s = priv.PublicKey.curve.MarshalSignature(sig) + return +} + +func Verify(pub *PublicKey, message, r, s []byte) bool { + sig := pub.curve.UnmarshalSignature(r, s) + if sig == nil { + return false + } + + return pub.curve.Verify(pub.X, message, sig) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D) +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go similarity index 88% rename from vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go index 743b35a120..bad2774344 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -10,13 +10,7 @@ // This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it // unsuitable for other protocols. RSA should be used in preference in any // case. -// -// Deprecated: this package was only provided to support ElGamal encryption in -// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see -// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has -// compatibility and security issues (see https://eprint.iacr.org/2021/923). -// Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" +package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" import ( "crypto/rand" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go new file mode 100644 index 0000000000..8d6969c0bf --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/ProtonMail/go-crypto/v2/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +type signatureExpiredError int + +func (se signatureExpiredError) Error() string { + return "openpgp: signature expired" +} + +var ErrSignatureExpired error = signatureExpiredError(0) + +type keyExpiredError int + +func (ke keyExpiredError) Error() string { + return "openpgp: key expired" +} + +var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0) + +type signatureOlderThanKeyError int + +func (ske signatureOlderThanKeyError) Error() string { + return "openpgp: signature is older than the key" +} + +var ErrKeyExpired error = keyExpiredError(0) + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +// KeyInvalidError indicates that the public key parameters are invalid +// as they do not match the private ones +type KeyInvalidError string + +func (e KeyInvalidError) Error() string { + return "openpgp: invalid key: " + string(e) +} + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type WeakAlgorithmError string + +func (e WeakAlgorithmError) Error() string { + return "openpgp: weak algorithms are rejected: " + string(e) +} + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +type CriticalUnknownPacketTypeError uint8 + +func (upte CriticalUnknownPacketTypeError) Error() string { + return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte)) +} + +// AEADError indicates that there is a problem when initializing or using a +// AEAD instance, configuration struct, nonces or index values. +type AEADError string + +func (ae AEADError) Error() string { + return "openpgp: aead error: " + string(ae) +} + +// ErrDummyPrivateKey results when operations are attempted on a private key +// that is just a dummy key. See +// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 +type ErrDummyPrivateKey string + +func (dke ErrDummyPrivateKey) Error() string { + return "openpgp: s2k GNU dummy key: " + string(dke) +} + +// ErrMalformedMessage results when the packet sequence is incorrect +type ErrMalformedMessage string + +func (dke ErrMalformedMessage) Error() string { + return "openpgp: malformed message " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go new file mode 100644 index 0000000000..526bd7777f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go @@ -0,0 +1,24 @@ +package openpgp + +import ( + "crypto" + + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + return algorithm.HashIdToHash(id) +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + return algorithm.HashIdToString(id) +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + return algorithm.HashToHashId(h) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go new file mode 100644 index 0000000000..d067065186 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019 ProtonTech AG + +package algorithm + +import ( + "crypto/cipher" + "github.com/ProtonMail/go-crypto/eax" + "github.com/ProtonMail/go-crypto/ocb" +) + +// AEADMode defines the Authenticated Encryption with Associated Data mode of +// operation. +type AEADMode uint8 + +// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) +const ( + AEADModeEAX = AEADMode(1) + AEADModeOCB = AEADMode(2) + AEADModeGCM = AEADMode(3) +) + +// TagLength returns the length in bytes of authentication tags. +func (mode AEADMode) TagLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 16 + case AEADModeGCM: + return 16 + default: + return 0 + } +} + +// NonceLength returns the length in bytes of nonces. +func (mode AEADMode) NonceLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 15 + case AEADModeGCM: + return 12 + default: + return 0 + } +} + +// New returns a fresh instance of the given mode +func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { + var err error + switch mode { + case AEADModeEAX: + alg, err = eax.NewEAX(block) + case AEADModeOCB: + alg, err = ocb.NewOCB(block) + case AEADModeGCM: + alg, err = cipher.NewGCM(block) + } + if err != nil { + panic(err.Error()) + } + return alg +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go new file mode 100644 index 0000000000..c76a75bcda --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -0,0 +1,97 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + + "golang.org/x/crypto/cast5" +) + +// Cipher is an official symmetric key cipher algorithm. See RFC 4880, +// section 9.2. +type Cipher interface { + // Id returns the algorithm ID, as a byte, of the cipher. + Id() uint8 + // KeySize returns the key size, in bytes, of the cipher. + KeySize() int + // BlockSize returns the block size, in bytes, of the cipher. + BlockSize() int + // New returns a fresh instance of the given cipher. + New(key []byte) cipher.Block +} + +// The following constants mirror the OpenPGP standard (RFC 4880). +const ( + TripleDES = CipherFunction(2) + CAST5 = CipherFunction(3) + AES128 = CipherFunction(7) + AES192 = CipherFunction(8) + AES256 = CipherFunction(9) +) + +// CipherById represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +var CipherById = map[uint8]Cipher{ + TripleDES.Id(): TripleDES, + CAST5.Id(): CAST5, + AES128.Id(): AES128, + AES192.Id(): AES192, + AES256.Id(): AES256, +} + +type CipherFunction uint8 + +// ID returns the algorithm Id, as a byte, of cipher. +func (sk CipherFunction) Id() uint8 { + return uint8(sk) +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case CAST5: + return cast5.KeySize + case AES128: + return 16 + case AES192, TripleDES: + return 24 + case AES256: + return 32 + } + return 0 +} + +// BlockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) BlockSize() int { + switch cipher { + case TripleDES: + return des.BlockSize + case CAST5: + return 8 + case AES128, AES192, AES256: + return 16 + } + return 0 +} + +// New returns a fresh instance of the given cipher. +func (cipher CipherFunction) New(key []byte) (block cipher.Block) { + var err error + switch cipher { + case TripleDES: + block, err = des.NewTripleDESCipher(key) + case CAST5: + block, err = cast5.NewCipher(key) + case AES128, AES192, AES256: + block, err = aes.NewCipher(key) + } + if err != nil { + panic(err.Error()) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go new file mode 100644 index 0000000000..d1a00fc749 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -0,0 +1,143 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto" + "fmt" + "hash" +) + +// Hash is an official hash function algorithm. See RFC 4880, section 9.4. +type Hash interface { + // Id returns the algorithm ID, as a byte, of Hash. + Id() uint8 + // Available reports whether the given hash function is linked into the binary. + Available() bool + // HashFunc simply returns the value of h so that Hash implements SignerOpts. + HashFunc() crypto.Hash + // New returns a new hash.Hash calculating the given hash function. New + // panics if the hash function is not linked into the binary. + New() hash.Hash + // Size returns the length, in bytes, of a digest resulting from the given + // hash function. It doesn't require that the hash function in question be + // linked into the program. + Size() int + // String is the name of the hash function corresponding to the given + // OpenPGP hash id. + String() string +} + +// The following vars mirror the crypto/Hash supported hash functions. +var ( + SHA1 Hash = cryptoHash{2, crypto.SHA1} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} + SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256} + SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512} +) + +// HashById represents the different hash functions specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 +var ( + HashById = map[uint8]Hash{ + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + SHA3_256.Id(): SHA3_256, + SHA3_512.Id(): SHA3_512, + } +) + +// cryptoHash contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +type cryptoHash struct { + id uint8 + crypto.Hash +} + +// Id returns the algorithm ID, as a byte, of cryptoHash. +func (h cryptoHash) Id() uint8 { + return h.id +} + +var hashNames = map[uint8]string{ + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", + SHA3_256.Id(): "SHA3-256", + SHA3_512.Id(): "SHA3-512", +} + +func (h cryptoHash) String() string { + s, ok := hashNames[h.id] + if !ok { + panic(fmt.Sprintf("Unsupported hash function %d", h.id)) + } + return s +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP +// hash id, allowing sha1. +func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + + if id == SHA1.Id() { + return SHA1.HashFunc(), true + } + + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + return 0, false +} + +// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash, +// allowing instances of SHA1 +func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + if h == SHA1.HashFunc() { + return SHA1.Id(), true + } + + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go new file mode 100644 index 0000000000..888767c4e4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go @@ -0,0 +1,171 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" +) + +type curve25519 struct{} + +func NewCurve25519() *curve25519 { + return &curve25519{} +} + +func (c *curve25519) GetCurveName() string { + return "curve25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes the public point to native format, removing the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x25519lib.Size+1 { + return nil + } + + // Remove prefix + return point[1:] +} + +// MarshalByteSecret encodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +// Note that leading zero bytes are stripped later when encoding as an MPI. +func (c *curve25519) MarshalByteSecret(secret []byte) []byte { + d := make([]byte, x25519lib.Size) + copyReversed(d, secret) + + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // the curve. + // + // This masking is done internally in the underlying lib and so is unnecessary + // for security, but OpenPGP implementations require that private keys be + // pre-masked. + d[0] &= 127 + d[0] |= 64 + d[31] &= 248 + + return d +} + +// UnmarshalByteSecret decodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +func (c *curve25519) UnmarshalByteSecret(d []byte) []byte { + if len(d) > x25519lib.Size { + return nil + } + + // Ensure truncated leading bytes are re-added + secret := make([]byte, x25519lib.Size) + copyReversed(secret, d) + + return secret +} + +// generateKeyPairBytes Generates a private-public key-pair. +// 'priv' is a private key; a little-endian scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + + x25519lib.KeyGen(&pub, &priv) + return +} + +func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *genericCurve) MaskSecret(secret []byte) []byte { + return secret +} + +func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}" + // ephemeralPrivate corresponds to `v`. + // ephemeralPublic corresponds to `V`. + ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + + // RFC6637 §8: "Obtain the authenticated recipient public key R" + // pubKey corresponds to `R`. + var pubKey x25519lib.Key + copy(pubKey[:], point) + + // RFC6637 §8: "Compute the shared point S = vR" + // "VB = convert point V to the octet string" + // sharedPoint corresponds to `VB`. + var sharedPoint x25519lib.Key + x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey) + + return ephemeralPublic[:], sharedPoint[:], nil +} + +func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) { + var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key + // RFC6637 §8: "The decryption is the inverse of the method given." + // All quoted descriptions in comments below describe encryption, and + // the reverse is performed. + // vsG corresponds to `VB` in RFC6637 §8 . + + // RFC6637 §8: "VB = convert point V to the octet string" + copy(ephemeralPublic[:], vsG) + + // decodedPrivate corresponds to `r` in RFC6637 §8 . + copy(decodedPrivate[:], secret) + + // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating + // S = rV = rvG, where (r,R) is the recipient's key pair." + // sharedPoint corresponds to `S`. + x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic) + + return sharedPoint[:], nil +} + +func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) { + var pk, sk x25519lib.Key + copy(sk[:], secret) + x25519lib.KeyGen(&pk, &sk) + + if subtle.ConstantTimeCompare(point, pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go new file mode 100644 index 0000000000..97f891ffc0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -0,0 +1,141 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "bytes" + "crypto/elliptic" + + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +type CurveInfo struct { + GenName string + Oid *encoding.OID + Curve Curve +} + +var Curves = []CurveInfo{ + { + // NIST P-256 + GenName: "P256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: NewGenericCurve(elliptic.P256()), + }, + { + // NIST P-384 + GenName: "P384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: NewGenericCurve(elliptic.P384()), + }, + { + // NIST P-521 + GenName: "P521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: NewGenericCurve(elliptic.P521()), + }, + { + // SecP256k1 + GenName: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: NewGenericCurve(bitcurves.S256()), + }, + { + // Curve25519 + GenName: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: NewCurve25519(), + }, + { + // x448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), + Curve: NewX448(), + }, + { + // Ed25519 + GenName: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: NewEd25519(), + }, + { + // Ed448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}), + Curve: NewEd448(), + }, + { + // BrainpoolP256r1 + GenName: "BrainpoolP256", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: NewGenericCurve(brainpool.P256r1()), + }, + { + // BrainpoolP384r1 + GenName: "BrainpoolP384", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: NewGenericCurve(brainpool.P384r1()), + }, + { + // BrainpoolP512r1 + GenName: "BrainpoolP512", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: NewGenericCurve(brainpool.P512r1()), + }, +} + +func FindByCurve(curve Curve) *CurveInfo { + for _, curveInfo := range Curves { + if curveInfo.Curve.GetCurveName() == curve.GetCurveName() { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range Curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindEdDSAByGenName(curveGenName string) EdDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(EdDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDSAByGenName(curveGenName string) ECDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDHByGenName(curveGenName string) ECDHCurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDHCurve) + if ok { + return curve + } + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go new file mode 100644 index 0000000000..5ed9c93b3d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go @@ -0,0 +1,48 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "io" + "math/big" +) + +type Curve interface { + GetCurveName() string +} + +type ECDSACurve interface { + Curve + MarshalIntegerPoint(x, y *big.Int) []byte + UnmarshalIntegerPoint([]byte) (x, y *big.Int) + MarshalIntegerSecret(d *big.Int) []byte + UnmarshalIntegerSecret(d []byte) *big.Int + GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) + Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) + Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool + ValidateECDSA(x, y *big.Int, secret []byte) error +} + +type EdDSACurve interface { + Curve + MarshalBytePoint(x []byte) []byte + UnmarshalBytePoint([]byte) (x []byte) + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + MarshalSignature(sig []byte) (r, s []byte) + UnmarshalSignature(r, s []byte) (sig []byte) + GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) + Sign(publicKey, privateKey, message []byte) (sig []byte, err error) + Verify(publicKey, message, sig []byte) bool + ValidateEdDSA(publicKey, privateKey []byte) (err error) +} +type ECDHCurve interface { + Curve + MarshalBytePoint([]byte) (encoded []byte) + UnmarshalBytePoint(encoded []byte) []byte + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) + Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) + Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) + ValidateECDH(public []byte, secret []byte) error +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go new file mode 100644 index 0000000000..54a08a8a38 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -0,0 +1,112 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ed25519Size = 32 + +type ed25519 struct{} + +func NewEd25519() *ed25519 { + return &ed25519{} +} + +func (c *ed25519) GetCurveName() string { + return "ed25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalBytePoint(x []byte) []byte { + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed25519lib.PublicKeySize+1 { + return nil + } + + // Return unprefixed + return point[1:] +} + +// MarshalByteSecret encodes a scalar in native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalByteSecret(d []byte) []byte { + return d +} + +// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) { + if len(s) > ed25519lib.SeedSize { + return nil + } + + // Handle stripped leading zeroes + d = make([]byte, ed25519lib.SeedSize) + copy(d[ed25519lib.SeedSize-len(s):], s) + return +} + +// MarshalSignature splits a signature in R and S. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) { + return sig[:ed25519Size], sig[ed25519Size:] +} + +// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) { + // Check size + if len(r) > 32 || len(s) > 32 { + return nil + } + + sig = make([]byte, ed25519lib.SignatureSize) + + // Handle stripped leading zeroes + copy(sig[ed25519Size-len(r):ed25519Size], r) + copy(sig[ed25519lib.SignatureSize-len(s):], s) + return sig +} + +func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed25519lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed25519lib.SeedSize], nil +} + +func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { + return append(privateKey, publicKey...) +} + +func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message) + return sig, nil +} + +func (c *ed25519) Verify(publicKey, message, sig []byte) bool { + return ed25519lib.Verify(publicKey, message, sig) +} + +func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd25519Sk(publicKey, privateKey) + expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed25519 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go new file mode 100644 index 0000000000..18cd80434b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -0,0 +1,111 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +type ed448 struct{} + +func NewEd448() *ed448 { + return &ed448{} +} + +func (c *ed448) GetCurveName() string { + return "ed448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalBytePoint(x []byte) []byte { + // Return prefixed + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed448lib.PublicKeySize+1 { + return nil + } + + // Strip prefix + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalByteSecret(d []byte) []byte { + // Return prefixed + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) { + // Check prefixed size + if len(s) != ed448lib.SeedSize+1 { + return nil + } + + // Strip prefix + return s[1:] +} + +// MarshalSignature splits a signature in R and S, where R is in prefixed native format and +// S is an MPI with value zero. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) { + return append([]byte{0x40}, sig...), []byte{} +} + +// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) { + if len(r) != ed448lib.SignatureSize+1 { + return nil + } + + return r[1:] +} + +func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed448lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed448lib.SeedSize], nil +} + +func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { + return append(privateKey, publicKey...) +} + +func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "") + + return sig, nil +} + +func (c *ed448) Verify(publicKey, message, sig []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + return ed448lib.Verify(publicKey, message, sig, "") +} + +func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd448Sk(publicKey, privateKey) + expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed448 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go new file mode 100644 index 0000000000..e28d7c7106 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go @@ -0,0 +1,149 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "math/big" +) + +type genericCurve struct { + Curve elliptic.Curve +} + +func NewGenericCurve(c elliptic.Curve) *genericCurve { + return &genericCurve{ + Curve: c, + } +} + +func (c *genericCurve) GetCurveName() string { + return c.Curve.Params().Name +} + +func (c *genericCurve) MarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte { + return elliptic.Marshal(c.Curve, x, y) +} + +func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) { + return elliptic.Unmarshal(c.Curve, point) +} + +func (c *genericCurve) MarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte { + return d.Bytes() +} + +func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int { + return new(big.Int).SetBytes(d) +} + +func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) { + secret, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + point = elliptic.Marshal(c.Curve, x, y) + return point, secret, nil +} + +func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) { + priv, err := ecdsa.GenerateKey(c.Curve, rand) + if err != nil { + return + } + + return priv.X, priv.Y, priv.D, nil +} + +func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + panic("invalid point") + } + + d, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + vsG := elliptic.Marshal(c.Curve, x, y) + zbBig, _ := c.Curve.ScalarMult(xP, yP, d) + + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return vsG, zb, nil +} + +func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + x, y := elliptic.Unmarshal(c.Curve, ephemeral) + zbBig, _ := c.Curve.ScalarMult(x, y, secret) + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return zb, nil +} + +func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) { + priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}} + return ecdsa.Sign(rand, priv, hash) +} + +func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool { + pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve} + return ecdsa.Verify(pub, hash, r, s) +} + +func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name)) + } + + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := c.Curve.ScalarBaseMult(secret) + if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return nil +} + +func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error { + return c.validate(xP, yP, secret) +} + +func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return c.validate(xP, yP, secret) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go new file mode 100644 index 0000000000..df04262e9e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go @@ -0,0 +1,107 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" +) + +type x448 struct{} + +func NewX448() *x448 { + return &x448{} +} + +func (c *x448) GetCurveName() string { + return "x448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x448lib.Size+1 { + return nil + } + + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) MarshalByteSecret(d []byte) []byte { + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) UnmarshalByteSecret(d []byte) []byte { + if len(d) != x448lib.Size+1 { + return nil + } + + // Store without prefix + return d[1:] +} + +func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) { + if _, err = rand.Read(sk[:]); err != nil { + return + } + + x448lib.KeyGen(&pk, &sk) + return +} + +func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + var pk, ss x448lib.Key + seed, e, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + copy(pk[:], point) + x448lib.Shared(&ss, &seed, &pk) + + return e[:], ss[:], nil +} + +func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + var ss, sk, e x448lib.Key + + copy(sk[:], secret) + copy(e[:], ephemeral) + x448lib.Shared(&ss, &sk, &e) + + return ss[:], nil +} + +func (c *x448) ValidateECDH(point []byte, secret []byte) error { + var sk, pk, expectedPk x448lib.Key + + copy(pk[:], point) + copy(sk[:], secret) + x448lib.KeyGen(&expectedPk, &sk) + + if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go new file mode 100644 index 0000000000..6c921481b7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding implements openpgp packet field encodings as specified in +// RFC 4880 and 6637. +package encoding + +import "io" + +// Field is an encoded field of an openpgp packet. +type Field interface { + // Bytes returns the decoded data. + Bytes() []byte + + // BitLength is the size in bits of the decoded data. + BitLength() uint16 + + // EncodedBytes returns the encoded data. + EncodedBytes() []byte + + // EncodedLength is the size in bytes of the encoded data. + EncodedLength() uint16 + + // ReadFrom reads the next Field from r. + ReadFrom(r io.Reader) (int64, error) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go new file mode 100644 index 0000000000..02e5e695c3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + "math/big" + "math/bits" +) + +// An MPI is used to store the contents of a big integer, along with the bit +// length that was specified in the original input. This allows the MPI to be +// reserialized exactly. +type MPI struct { + bytes []byte + bitLength uint16 +} + +// NewMPI returns a MPI initialized with bytes. +func NewMPI(bytes []byte) *MPI { + for len(bytes) != 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + bitLength := uint16(0) + return &MPI{bytes, bitLength} + } + bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) + return &MPI{bytes, bitLength} +} + +// Bytes returns the decoded data. +func (m *MPI) Bytes() []byte { + return m.bytes +} + +// BitLength is the size in bits of the decoded data. +func (m *MPI) BitLength() uint16 { + return m.bitLength +} + +// EncodedBytes returns the encoded data. +func (m *MPI) EncodedBytes() []byte { + return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (m *MPI) EncodedLength() uint16 { + return uint16(2 + len(m.bytes)) +} + +// ReadFrom reads into m the next MPI from r. +func (m *MPI) ReadFrom(r io.Reader) (int64, error) { + var buf [2]byte + n, err := io.ReadFull(r, buf[0:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + m.bytes = make([]byte, (int(m.bitLength)+7)/8) + + nn, err := io.ReadFull(r, m.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + // remove leading zero bytes from malformed GnuPG encoded MPIs: + // https://bugs.gnupg.org/gnupg/issue1853 + // for _, b := range m.bytes { + // if b != 0 { + // break + // } + // m.bytes = m.bytes[1:] + // m.bitLength -= 8 + // } + + return int64(n) + int64(nn), err +} + +// SetBig initializes m with the bits from n. +func (m *MPI) SetBig(n *big.Int) *MPI { + m.bytes = n.Bytes() + m.bitLength = uint16(n.BitLen()) + return m +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go new file mode 100644 index 0000000000..c9df9fe232 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OID is used to store a variable-length field with a one-octet size +// prefix. See https://tools.ietf.org/html/rfc6637#section-9. +type OID struct { + bytes []byte +} + +const ( + // maxOID is the maximum number of bytes in a OID. + maxOID = 254 + // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC + // specifies are reserved. + reservedOIDLength1 = 0 + reservedOIDLength2 = 0xff +) + +// NewOID returns a OID initialized with bytes. +func NewOID(bytes []byte) *OID { + switch len(bytes) { + case reservedOIDLength1, reservedOIDLength2: + panic("encoding: NewOID argument length is reserved") + default: + if len(bytes) > maxOID { + panic("encoding: NewOID argument too large") + } + } + + return &OID{ + bytes: bytes, + } +} + +// Bytes returns the decoded data. +func (o *OID) Bytes() []byte { + return o.bytes +} + +// BitLength is the size in bits of the decoded data. +func (o *OID) BitLength() uint16 { + return uint16(len(o.bytes) * 8) +} + +// EncodedBytes returns the encoded data. +func (o *OID) EncodedBytes() []byte { + return append([]byte{byte(len(o.bytes))}, o.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (o *OID) EncodedLength() uint16 { + return uint16(1 + len(o.bytes)) +} + +// ReadFrom reads into b the next OID from r. +func (o *OID) ReadFrom(r io.Reader) (int64, error) { + var buf [1]byte + n, err := io.ReadFull(r, buf[:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + switch buf[0] { + case reservedOIDLength1, reservedOIDLength2: + return int64(n), errors.UnsupportedError("reserved for future extensions") + } + + o.bytes = make([]byte, buf[0]) + + nn, err := io.ReadFull(r, o.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return int64(n) + int64(nn), err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go new file mode 100644 index 0000000000..a40e45beee --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -0,0 +1,445 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + goerrors "errors" + "io" + "math/big" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" +) + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + // Generate a primary signing key + primaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, err + } + primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) + if config.V6() { + primary.UpgradeToV6() + } + + e := &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: make(map[string]*Identity), + Subkeys: []Subkey{}, + Signatures: []*packet.Signature{}, + } + + if config.V6() { + // In v6 keys algorithm preferences should be stored in direct key signatures + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypeDirectSignature, config) + err = writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return nil, err + } + err = selfSignature.SignDirectKeyBinding(&primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + e.Signatures = append(e.Signatures, selfSignature) + e.SelfSignature = selfSignature + } + + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) + if err != nil { + return nil, err + } + + // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + err = e.addEncryptionSubkey(config, creationTime, 0) + if err != nil { + return nil, err + } + + return e, nil +} + +func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) +} + +func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, keyLifetimeSecs uint32, config *packet.Config) error { + selfSignature.CreationTime = creationTime + selfSignature.KeyLifetimeSecs = &keyLifetimeSecs + selfSignature.FlagsValid = true + selfSignature.FlagSign = true + selfSignature.FlagCertify = true + selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 + selfSignature.SEIPDv2 = config.AEAD() != nil + + // Set the PreferredHash for the SelfSignature from the packet.Config. + // If it is not the must-implement algorithm from rfc4880bis, append that. + hash, ok := algorithm.HashToHashId(config.Hash()) + if !ok { + return errors.UnsupportedError("unsupported preferred hash function") + } + + selfSignature.PreferredHash = []uint8{hash} + if config.Hash() != crypto.SHA256 { + selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) + } + + // Likewise for DefaultCipher. + selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} + if config.Cipher() != packet.CipherAES128 { + selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) + } + + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + + // And for DefaultMode. + modes := []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeOCB { + modes = append(modes, uint8(packet.AEADModeOCB)) + } + + // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) + for _, cipher := range selfSignature.PreferredSymmetric { + for _, mode := range modes { + selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + } + } + return nil +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32, writeProperties bool) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + isPrimaryId := len(t.Identities) == 0 + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + if writeProperties { + err := writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return err + } + } + selfSignature.IsPrimaryId = &isPrimaryId + + // User ID binding signature + err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + if err != nil { + return err + } + t.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, + } + return nil +} + +// AddSigningSubkey adds a signing keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddSigningSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newSigner(config) + if err != nil { + return err + } + sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config.V6() { + sub.UpgradeToV6() + } + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagSign = true + subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config) + subkey.Sig.EmbeddedSignature.CreationTime = creationTime + + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs) +} + +func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { + subPrivRaw, err := newDecrypter(config) + if err != nil { + return err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config.V6() { + sub.UpgradeToV6() + } + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagEncryptStorage = true + subkey.Sig.FlagEncryptCommunications = true + + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// Generates a signing key +func newSigner(config *packet.Config) (signer interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + if config.V6() { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys") + } + curve := ecc.FindEdDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := eddsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoECDSA: + curve := ecc.FindECDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := ecdsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoEd25519: + priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoEd448: + priv, err := ed448.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +// Generates an encryption/decryption key +func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: + fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey + case packet.PubKeyAlgoECDH: + if config.V6() && + (config.CurveName() == packet.Curve25519 || + config.CurveName() == packet.Curve448) { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys") + } + var kdf = ecdh.KDF{ + Hash: algorithm.SHA512, + Cipher: algorithm.AES256, + } + curve := ecc.FindECDHByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + return ecdh.GenerateKey(config.Random(), curve, kdf) + case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey + return x25519.GenerateKey(config.Random()) + case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey + return x448.GenerateKey(config.Random()) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +var bigOne = big.NewInt(1) + +// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +// given bit size, using the given random source and pre-populated primes. +func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { + priv := new(rsa.PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") + } + + if bits < 1024 { + return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + var err error + if len(prepopulatedPrimes) == 0 { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + } else { + primes[i] = prepopulatedPrimes[0] + prepopulatedPrimes = prepopulatedPrimes[1:] + } + + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + priv.D = new(big.Int) + e := big.NewInt(int64(priv.E)) + ok := priv.D.ModInverse(e, totient) + + if ok != nil { + priv.Primes = primes + priv.N = n + break + } + } + + priv.Precompute() + return priv, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go new file mode 100644 index 0000000000..a071353e2e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -0,0 +1,901 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + "fmt" + "io" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey + SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6) + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature + Revocations []*packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. +func (e *Entity) PrimaryIdentity() *Identity { + var primaryIdentity *Identity + for _, ident := range e.Identities { + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident + } + } + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if existingId == nil { + return true + } + + if len(existingId.Revocations) > len(potentialNewId.Revocations) { + return true + } + + if len(existingId.Revocations) < len(potentialNewId.Revocations) { + return false + } + + if existingId.SelfSignature == nil { + return true + } + + if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return false + } + + if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) +} + +// EncryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { + // Fail to find any encryption key if the... + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired + e.Revoked(now) || // primary key has been revoked + primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any subkeys for encryption and the primary key + // is marked as OK to encrypt with, then we can use it. + if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true + } + + return Key{}, false +} + +// CertificationKey return the best candidate Key for certifying a key with this +// Entity. +func (e *Entity) CertificationKey(now time.Time) (Key, bool) { + return e.CertificationKeyById(now, 0) +} + +// CertificationKeyById return the Key for key certification with this +// Entity and keyID. +func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify) +} + +// SigningKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) SigningKey(now time.Time) (Key, bool) { + return e.SigningKeyById(now, 0) +} + +// SigningKeyById return the Key for signing a message with this +// Entity and keyID. +func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign) +} + +func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { + // Fail to find any signing key if the... + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired + e.Revoked(now) || // primary key has been revoked + primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for idx, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && + (id == 0 || subkey.PublicKey.KeyId == id) { + candidateSubkey = idx + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any subkeys for signing and the primary key + // is marked as OK to sign with, then we can use it. + if primarySelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) && + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true + } + + // No keys with a valid Signing Flag or no keys matched the id passed in + return Key{}, false +} + +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key +// derived from the provided passphrase. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error { + var keysToEncrypt []*packet.PrivateKey + // Add entity private key to encrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) + } + + // Add subkeys to encrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, sub.PrivateKey) + } + } + return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) +} + +// DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase. +// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { + var keysToDecrypt []*packet.PrivateKey + // Add entity private key to decrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, e.PrivateKey) + } + + // Add subkeys to decrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) + } + } + return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + selfSig, _ := e.PrimarySelfSignature() + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if requiredUsage != 0 { + if key.SelfSignature == nil || !key.SelfSignature.FlagsValid { + continue + } + + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature + var directSignatures []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + directSignatures = append(directSignatures, pkt) + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if !pkt.IsSubkey { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if !pkt.IsSubkey { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets. + } + } + + if len(e.Identities) == 0 && e.PrimaryKey.Version < 6 { + return nil, errors.StructuralError(fmt.Sprintf("v%d entity without any identities", e.PrimaryKey.Version)) + } + + // An implementation MUST ensure that a valid direct-key signature is present before using a v6 key. + if e.PrimaryKey.Version == 6 { + if len(directSignatures) == 0 { + return nil, errors.StructuralError("v6 entity without a valid direct-key signature") + } + // Select main direct key signature. + var mainDirectKeySelfSignature *packet.Signature + for _, directSignature := range directSignatures { + if directSignature.SigType == packet.SigTypeDirectSignature && + directSignature.CheckKeyIdOrFingerprint(e.PrimaryKey) && + (mainDirectKeySelfSignature == nil || + directSignature.CreationTime.After(mainDirectKeySelfSignature.CreationTime)) { + mainDirectKeySelfSignature = directSignature + } + } + if mainDirectKeySelfSignature == nil { + return nil, errors.StructuralError("no valid direct-key self-signature for v6 primary key found") + } + // Check that the main self-signature is valid. + err = e.PrimaryKey.VerifyDirectKeySignature(mainDirectKeySelfSignature) + if err != nil { + return nil, errors.StructuralError("invalid direct-key self-signature for v6 primary key") + } + e.SelfSignature = mainDirectKeySelfSignature + e.Signatures = directSignatures + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + identity.SelfSignature = sig + } + identity.Signatures = append(identity.Signatures, sig) + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Revocations = append(subKey.Revocations, sig) + case packet.SigTypeSubkeyBinding: + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + if e.PrivateKey.Dummy() { + return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") + } + return e.serializePrivate(w, config, true) +} + +// SerializePrivateWithoutSigning serializes an Entity, including private key +// material, but excluding signatures from other entities, to the given Writer. +// Self-signatures of identities and subkeys are not re-signed. This is useful +// when serializing GNU dummy keys, among other things. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { + return e.serializePrivate(w, config, false) +} + +func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { + if e.PrivateKey == nil { + return goerrors.New("openpgp: private key is missing") + } + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if reSign { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + if subkey.Sig.EmbeddedSignature != nil { + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, + subkey.PrivateKey, config) + if err != nil { + return + } + } + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + certificationKey, ok := signer.CertificationKey(config.Now()) + if !ok { + return errors.InvalidArgumentError("no valid certification key found") + } + + if certificationKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config) + + signingUserID := config.SigningUserId() + if signingUserID != "" { + if _, ok := signer.Identities[signingUserID]; !ok { + return errors.InvalidArgumentError("signer identity string not found in signer Entity") + } + sig.SignerUserId = &signingUserID + } + + if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the +// specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText + + if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { + return err + } + e.Revocations = append(e.Revocations, revSig) + return nil +} + +// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for +// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { + return errors.InvalidArgumentError("given subkey is not associated with this key") + } + + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText + + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + sk.Revocations = append(sk.Revocations, revSig) + return nil +} + +func (e *Entity) primaryDirectSignature() *packet.Signature { + return e.SelfSignature +} + +// PrimarySelfSignature searches the entity for the self-signature that stores key preferences. +// For V4 keys, returns the self-signature of the primary identity, and the identity. +// For V6 keys, returns the latest valid direct-key self-signature, and no identity (nil). +// This self-signature is to be used to check the key expiration, +// algorithm preferences, and so on. +func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) { + if e.PrimaryKey.Version == 6 { + return e.primaryDirectSignature(), nil + } + primaryIdentity := e.PrimaryIdentity() + if primaryIdentity == nil { + return nil, nil + } + return primaryIdentity.SelfSignature, primaryIdentity +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go new file mode 100644 index 0000000000..108fd096f3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -0,0 +1,538 @@ +package openpgp + +const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" + +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` + +const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e +DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ +uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW +ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx +nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ +x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg +PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy +9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ +1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 +depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl +aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 +DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa +XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU +8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 +b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD +BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG +0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N +s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb +tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 +BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE +/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 +kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z +VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa +PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ +snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi +bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 +K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X +8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ +TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb +OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l +QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V +yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U +heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB +7qTZOahrETw= +=IKnw +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR +s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL +EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ +lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 +2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 +69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 +Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b +wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW +FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR +AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM +vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx +22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj +7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY +AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 +tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX +JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl +mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 +8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC +0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b +4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl +Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY +=3fWu +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L +plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz +r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ +sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ +3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm +k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s +GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G +HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT +CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR +AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN +MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j +UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS +YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs +nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ +U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es +HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK +lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg +AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV +UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM +0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX +12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa +3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi +wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 +Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G +1dh1WoUCyREZsJQg2YoIpWIcvw+a +=bNRo +-----END PGP PUBLIC KEY BLOCK----- +` + +const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1 + +lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 +GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 +I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB +/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl +rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ +6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD +ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX +M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED +Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f +yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk +UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH +e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl +AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r +oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo +UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS +YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ +kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC +WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW +n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 +/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 +R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE +VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix +7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== +=F/T0 +-----END PGP PRIVATE KEY BLOCK-----` + +const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii +B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS +PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 +lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA +aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD +Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v +Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S +rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH +AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA +iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ +Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ +AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou +b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== +=KLN8 +-----END PGP PRIVATE KEY BLOCK-----` + +const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd +oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk +/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ +gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r +njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU +iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK +Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m +kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR +MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI +zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A +rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP +CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr +MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c +bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd +hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ +3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS +ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL +Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 +Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz +YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r +sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV +JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 +EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt +Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw +vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ +iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg +UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y +HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL +Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy +bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP +Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic +BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 +v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV +oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c +wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R +fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa +4g== +=XZm8 +-----END PGP PRIVATE KEY BLOCK-----` + +// https://tests.sequoia-pgp.org/#Certificate_expiration +// P _ U p +const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT +-----END PGP PUBLIC KEY BLOCK-----` + +const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP +kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p +oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr +owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a +DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT +y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX +FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j +bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab +k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E +K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L +sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV ++RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB +cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1 +0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr +4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68 +VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe +88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/ +sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg +/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru +J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY +VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO +BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ +CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE +89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC +sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+ +ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk +b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF +r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D +xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4 +pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B +OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or +fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8 +JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8 +Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9 +hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL +L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO +apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf +Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM +RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC +JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7 +5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq +8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht +crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W +skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC +92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/ +QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl +J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu +DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w +tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM +AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM +8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH +5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB +cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1 +HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC +C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA= +=u442 +-----END PGP PRIVATE KEY BLOCK-----` + +const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH +X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh +bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo +h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA +8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV +AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+ +sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5 +qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf +iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ +SRXn8DmCTfEB +=cELM +-----END PGP PRIVATE KEY BLOCK-----` + +const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52 + +xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV +MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH +gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD +BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv +bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY +akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J +qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b +mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe +c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ +BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx +dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI +ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq +RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA +8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ +YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn +M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU +EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa +HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA +bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ +EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz +YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7 +UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq +OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2 +JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb +PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i +U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO +Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK +1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF +CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA +MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb +huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA +HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB +QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD +l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02 +XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d +QgqsfguR1PqPuJxpXV4bSr6CGAAAAA== +=MSvh +-----END PGP PRIVATE KEY BLOCK-----` + +const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR +ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u +zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI +CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA +AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B +FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG +wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5 +8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT +z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X +joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F +RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev +VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP +=Z8YJ +-----END PGP PRIVATE KEY BLOCK----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go new file mode 100644 index 0000000000..fec41a0e73 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import "math/bits" + +// CipherSuite contains a combination of Cipher and Mode +type CipherSuite struct { + // The cipher function + Cipher CipherFunction + // The AEAD mode of operation. + Mode AEADMode +} + +// AEADConfig collects a number of AEAD parameters along with sensible defaults. +// A nil AEADConfig is valid and results in all default values. +type AEADConfig struct { + // The AEAD mode of operation. + DefaultMode AEADMode + // Amount of octets in each chunk of data + ChunkSize uint64 +} + +// Mode returns the AEAD mode of operation. +func (conf *AEADConfig) Mode() AEADMode { + // If no preference is specified, OCB is used (which is mandatory to implement). + if conf == nil || conf.DefaultMode == 0 { + return AEADModeOCB + } + + mode := conf.DefaultMode + if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM { + panic("AEAD mode unsupported") + } + return mode +} + +// ChunkSizeByte returns the byte indicating the chunk size. The effective +// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +// limit to 16 = 4 MiB +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (conf *AEADConfig) ChunkSizeByte() byte { + if conf == nil || conf.ChunkSize == 0 { + return 12 // 1 << (12 + 6) == 262144 bytes + } + + chunkSize := conf.ChunkSize + exponent := bits.Len64(chunkSize) - 1 + switch { + case exponent < 6: + exponent = 6 + case exponent > 16: + exponent = 16 + } + + return byte(exponent - 6) +} + +// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the +// maximum returned value is 1 << 30. +func decodeAEADChunkSize(c byte) int { + size := uint64(1 << (c + 6)) + if size != uint64(int(size)) { + return 1 << 30 + } + return int(size) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go new file mode 100644 index 0000000000..2d1aeed65c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -0,0 +1,273 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes across chunks +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { + return append(wo.initialNonce, wo.chunkIndex...) + } + + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex performs an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + + if len(cipherChunk) > 0 { + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + if !ar.eof { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return errChunk + } + } + return nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It accesses peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(ar.associatedData, ar.chunkIndex...) + } + + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, ... + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(ar.associatedData, ar.chunkIndex...) + } + + // ... and total number of encrypted octets + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err + } + return nil +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size... + adata := aw.associatedData + + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(aw.associatedData, aw.chunkIndex...) + } + + // ... and total number of encrypted octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed)) + adata = append(adata, amountBytes...) + + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := aw.associatedData + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(aw.associatedData, aw.chunkIndex...) + } + + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go new file mode 100644 index 0000000000..98bd876bf2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// AEADEncrypted represents an AEAD Encrypted Packet. +// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t +type AEADEncrypted struct { + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + Contents io.Reader // Encrypted chunks and tags + initialNonce []byte // Referred to as IV in RFC4880-bis +} + +// Only currently defined version +const aeadEncryptedVersion = 1 + +func (ae *AEADEncrypted) parse(buf io.Reader) error { + headerData := make([]byte, 4) + if n, err := io.ReadFull(buf, headerData); n < 4 { + return errors.AEADError("could not read aead header:" + err.Error()) + } + // Read initial nonce + mode := AEADMode(headerData[2]) + nonceLen := mode.IvLength() + + // This packet supports only EAX and OCB + // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t + if nonceLen == 0 || mode > AEADModeOCB { + return errors.AEADError("unknown mode") + } + + initialNonce := make([]byte, nonceLen) + if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { + return errors.AEADError("could not read aead nonce:" + err.Error()) + } + ae.Contents = buf + ae.initialNonce = initialNonce + c := headerData[1] + if _, ok := algorithm.CipherById[c]; !ok { + return errors.UnsupportedError("unknown cipher: " + string(c)) + } + ae.cipher = CipherFunction(c) + ae.mode = mode + ae.chunkSizeByte = headerData[3] + return nil +} + +// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or +// an error. +func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { + return ae.decrypt(key) +} + +// decrypt prepares an aeadCrypter and returns a ReadCloser from which +// decrypted bytes can be read (see aeadDecrypter.Read()). +func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { + blockCipher := ae.cipher.new(key) + aead := ae.mode.new(blockCipher) + // Carry the first tagLen bytes + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(ae.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) + } + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: chunkSize, + initialNonce: ae.initialNonce, + associatedData: ae.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeAEADEncrypted, + }, + reader: ae.Contents, + peekedBytes: peekedBytes}, nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (ae *AEADEncrypted) associatedData() []byte { + return []byte{ + 0xD4, + aeadEncryptedVersion, + byte(ae.cipher), + byte(ae.mode), + ae.chunkSizeByte} +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 0000000000..334de286b3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,162 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "io" + "io/ioutil" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +// decompressionReader ensures that the whole compression packet is read. +type decompressionReader struct { + compressed io.Reader + decompressed io.ReadCloser + readAll bool +} + +func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader { + return &decompressionReader{ + compressed: r, + decompressed: decompressor, + } +} + +func (dr *decompressionReader) Read(data []byte) (n int, err error) { + if dr.readAll { + return 0, io.EOF + } + n, err = dr.decompressed.Read(data) + if err == io.EOF { + dr.readAll = true + // Close the decompressor. + if errDec := dr.decompressed.Close(); errDec != nil { + return n, errDec + } + // Consume all remaining data from the compressed packet. + consumeAll(dr.compressed) + } + return n, err +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 0: + c.Body = r + case 1: + c.Body = newDecompressionReader(r, flate.NewReader(r)) + case 2: + decompressor, err := zlib.NewReader(r) + if err != nil { + return err + } + c.Body = newDecompressionReader(r, decompressor) + case 3: + c.Body = newDecompressionReader(r, ioutil.NopCloser(bzip2.NewReader(r))) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go new file mode 100644 index 0000000000..181d5d344e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -0,0 +1,352 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "math/big" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +var ( + defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{ + PubKeyAlgoElGamal: true, + PubKeyAlgoDSA: true, + } + defaultRejectMessageHashAlgorithms = map[crypto.Hash]bool{ + crypto.SHA1: true, + crypto.MD5: true, + crypto.RIPEMD160: true, + } + defaultRejectCurves = map[Curve]bool{ + CurveSecP256k1: true, + } +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2K (String to Key) config, used for key derivation in the context of secret key encryption + // and password-encrypted data. + // If nil, the default configuration is used + S2KConfig *s2k.Config + // Iteration count for Iterated S2K (String to Key). + // Only used if sk2.Mode is nil. + // This value is duplicated here from s2k.Config for backwards compatibility. + // It determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + // + // Deprecated: SK2Count should be configured in S2KConfig instead. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // The public key algorithm to use - will always create a signing primary + // key and encryption subkey. + Algorithm PublicKeyAlgorithm + // Some known primes that are optionally prepopulated by the caller + RSAPrimes []*big.Int + // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA, + // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used. + Curve Curve + // AEADConfig configures the use of the new AEAD Encrypted Data Packet, + // defined in the draft of the next version of the OpenPGP specification. + // If a non-nil AEADConfig is passed, usage of this packet is enabled. By + // default, it is disabled. See the documentation of AEADConfig for more + // configuration options related to AEAD. + // **Note: using this option may break compatibility with other OpenPGP + // implementations, as well as future versions of this library.** + AEADConfig *AEADConfig + // V6Keys configures version 6 key generation. If false, this package still + // supports version 6 keys, but produces version 4 keys. + V6Keys bool + // Minimum RSA key size allowed for key generation and message signing, verification and encryption. + MinRSABits uint16 + // Reject insecure algorithms, only works with v2 api + RejectPublicKeyAlgorithms map[PublicKeyAlgorithm]bool + RejectMessageHashAlgorithms map[crypto.Hash]bool + RejectCurves map[Curve]bool + // "The validity period of the key. This is the number of seconds after + // the key creation time that the key expires. If this is not present + // or has a value of zero, the key never expires. This is found only on + // a self-signature."" + // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 + KeyLifetimeSecs uint32 + // "The validity period of the signature. This is the number of seconds + // after the signature creation time that the signature expires. If + // this is not present or has a value of zero, it never expires." + // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 + SigLifetimeSecs uint32 + // SigningKeyId is used to specify the signing key to use (by Key ID). + // By default, the signing key is selected automatically, preferring + // signing subkeys if available. + SigningKeyId uint64 + // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28) + // when producing a generic certification signature onto an existing user ID. + // The identity must be present in the signer Entity. + SigningIdentity string + // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read + // encrypted messages without Modification Detection Code (MDC). + // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented + // in most OpenPGP implementations. Messages without MDC are considered unnecessarily + // insecure and should be prevented whenever possible. + // In case one needs to deal with messages from very old OpenPGP implementations, there + // might be no other way than to tolerate the missing MDC. Setting this flag, allows this + // mode of operation. It should be considered a measure of last resort. + InsecureAllowUnauthenticatedMessages bool + // KnownNotations is a map of Notation Data names to bools, which controls + // the notation names that are allowed to be present in critical Notation Data + // signature subpackets. + KnownNotations map[string]bool + // SignatureNotations is a list of Notations to be added to any signatures. + SignatureNotations []*Notation + // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature + // should be enabled for encryption and decryption. + // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr). + // When the flag is set, encryption produces Intended Recipient Fingerprint signature sub-packets and decryption + // checks whether the key it was encrypted to is one of the included fingerprints in the signature. + // If the flag is disabled, no Intended Recipient Fingerprint sub-packets are created or checked. + // The default behavior, when the config or flag is nil, is to enable the feature. + CheckIntendedRecipients *bool + // CacheSessionKey controls if decryption should return the session key used for decryption. + // If the flag is set, the session key is cached in the message details struct. + CacheSessionKey bool + // CheckPacketSequence is a flag that controls if the pgp message reader should strictly check + // that the packet sequence conforms with the grammar mandated by rfc4880. + // The default behavior, when the config or flag is nil, is to check the packet sequence. + CheckPacketSequence *bool +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now().Truncate(time.Second) + } + return c.Time().Truncate(time.Second) +} + +// KeyLifetime returns the validity period of the key. +func (c *Config) KeyLifetime() uint32 { + if c == nil { + return 0 + } + return c.KeyLifetimeSecs +} + +// SigLifetime returns the validity period of the signature. +func (c *Config) SigLifetime() uint32 { + if c == nil { + return 0 + } + return c.SigLifetimeSecs +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) RSAModulusBits() int { + if c == nil || c.RSABits == 0 { + return 2048 + } + return c.RSABits +} + +func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { + if c == nil || c.Algorithm == 0 { + return PubKeyAlgoRSA + } + return c.Algorithm +} + +func (c *Config) CurveName() Curve { + if c == nil || c.Curve == "" { + return Curve25519 + } + return c.Curve +} + +// Deprecated: The hash iterations should now be queried via the S2K() method. +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) S2K() *s2k.Config { + if c == nil { + return nil + } + // for backwards compatibility + if c != nil && c.S2KCount > 0 && c.S2KConfig == nil { + return &s2k.Config{ + S2KCount: c.S2KCount, + } + } + return c.S2KConfig +} + +func (c *Config) AEAD() *AEADConfig { + if c == nil { + return nil + } + return c.AEADConfig +} + +func (c *Config) SigningKey() uint64 { + if c == nil { + return 0 + } + return c.SigningKeyId +} + +func (c *Config) SigningUserId() string { + if c == nil { + return "" + } + return c.SigningIdentity +} + +func (c *Config) AllowUnauthenticatedMessages() bool { + if c == nil { + return false + } + return c.InsecureAllowUnauthenticatedMessages +} + +func (c *Config) KnownNotation(notationName string) bool { + if c == nil { + return false + } + return c.KnownNotations[notationName] +} + +func (c *Config) Notations() []*Notation { + if c == nil { + return nil + } + return c.SignatureNotations +} + +func (c *Config) V6() bool { + if c == nil { + return false + } + return c.V6Keys +} + +func (c *Config) IntendedRecipients() bool { + if c == nil || c.CheckIntendedRecipients == nil { + return true + } + return *c.CheckIntendedRecipients +} + +func (c *Config) RetrieveSessionKey() bool { + if c == nil { + return false + } + return c.CacheSessionKey +} + +func (c *Config) MinimumRSABits() uint16 { + if c == nil || c.MinRSABits == 0 { + return 2047 + } + return c.MinRSABits +} + +func (c *Config) RejectPublicKeyAlgorithm(alg PublicKeyAlgorithm) bool { + var rejectedAlgorithms map[PublicKeyAlgorithm]bool + if c == nil || c.RejectPublicKeyAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectPublicKeyAlgorithms + } else { + rejectedAlgorithms = c.RejectPublicKeyAlgorithms + } + return rejectedAlgorithms[alg] +} + +func (c *Config) RejectMessageHashAlgorithm(hash crypto.Hash) bool { + var rejectedAlgorithms map[crypto.Hash]bool + if c == nil || c.RejectMessageHashAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectMessageHashAlgorithms + } else { + rejectedAlgorithms = c.RejectMessageHashAlgorithms + } + return rejectedAlgorithms[hash] +} + +func (c *Config) RejectCurve(curve Curve) bool { + var rejectedCurve map[Curve]bool + if c == nil || c.RejectCurves == nil { + // Default + rejectedCurve = defaultRejectCurves + } else { + rejectedCurve = c.RejectCurves + } + return rejectedCurve[curve] +} + +func (c *Config) StrictPacketSequence() bool { + if c == nil || c.CheckPacketSequence == nil { + return true + } + return *c.CheckPacketSequence +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 0000000000..e70f9d9411 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,575 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/rsa" + "encoding/binary" + "encoding/hex" + "io" + "math/big" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" +) + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + Version int + KeyId uint64 + KeyVersion int // v6 + KeyFingerprint []byte // v6 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field + ephemeralPublicX25519 *x25519.PublicKey // used for x25519 + ephemeralPublicX448 *x448.PublicKey // used for x448 + encryptedSession []byte // used for x25519 and x448 +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [8]byte + _, err = readFull(r, buf[:versionSize]) + if err != nil { + return + } + e.Version = int(buf[0]) + if e.Version != 3 && e.Version != 6 { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + if e.Version == 6 { + //Read a one-octet size of the following two fields. + if _, err = readFull(r, buf[:1]); err != nil { + return + } + // The size may also be zero, and the key version and + // fingerprint omitted for an "anonymous recipient" + if buf[0] != 0 { + // non-anonymous case + _, err = readFull(r, buf[:versionSize]) + if err != nil { + return + } + e.KeyVersion = int(buf[0]) + if e.KeyVersion != 4 && e.KeyVersion != 6 { + return errors.UnsupportedError("unknown public key version " + strconv.Itoa(e.KeyVersion)) + } + var fingerprint []byte + if e.KeyVersion == 6 { + fingerprint = make([]byte, fingerprintSizeV6) + } else if e.KeyVersion == 4 { + fingerprint = make([]byte, fingerprintSize) + } + _, err = readFull(r, fingerprint) + if err != nil { + return + } + e.KeyFingerprint = fingerprint + if e.KeyVersion == 6 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[:keyIdSize]) + } else if e.KeyVersion == 4 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[fingerprintSize-keyIdSize : fingerprintSize]) + } + } + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + e.KeyId = binary.BigEndian.Uint64(buf[:keyIdSize]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + e.Algo = PublicKeyAlgorithm(buf[0]) + var cipherFunction byte + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.MPI) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoECDH: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.OID) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoX25519: + e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + case PubKeyAlgoX448: + e.ephemeralPublicX448, e.encryptedSession, cipherFunction, err = x448.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + } + if e.Version < 6 { + switch e.Algo { + case PubKeyAlgoX25519, PubKeyAlgoX448: + e.CipherFunc = CipherFunction(cipherFunction) + // Check for validiy is in the Decrypt method + } + } + + _, err = consumeAll(r) + return +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } + if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint)) + } + if e.Algo != priv.PubKeyAlgo { + return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + vsG := e.encryptedMPI1.Bytes() + m := e.encryptedMPI2.Bytes() + oid := priv.PublicKey.oid.EncodedBytes() + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + case PubKeyAlgoX25519: + b, err = x25519.Decrypt(priv.PrivateKey.(*x25519.PrivateKey), e.ephemeralPublicX25519, e.encryptedSession) + case PubKeyAlgoX448: + b, err = x448.Decrypt(priv.PrivateKey.(*x448.PrivateKey), e.ephemeralPublicX448, e.encryptedSession) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if err != nil { + return err + } + + var key []byte + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + keyOffset := 0 + if e.Version < 6 { + e.CipherFunc = CipherFunction(b[0]) + keyOffset = 1 + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + } + key, err = decodeChecksumKey(b[keyOffset:]) + if err != nil { + return err + } + case PubKeyAlgoX25519, PubKeyAlgoX448: + if e.Version < 6 { + switch e.CipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.StructuralError("v3 PKESK mandates AES as cipher function for x25519 and x448") + } + } + key = b[:] + default: + return errors.UnsupportedError("unsupported algorithm for decryption") + } + e.Key = key + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var encodedLength int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + encodedLength = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoX25519: + encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6) + case PubKeyAlgoX448: + encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength + if e.Version == 6 { + packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */ + if e.KeyVersion == 6 { + packetLen += fingerprintSizeV6 + } else if e.KeyVersion == 4 { + packetLen += fingerprintSize + } + } + + err := serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write([]byte{byte(e.Version)}) + if err != nil { + return err + } + if e.Version == 6 { + _, err = w.Write([]byte{byte(e.KeyVersion)}) + if err != nil { + return err + } + // The key version number may also be zero, + // and the fingerprint omitted + if e.KeyVersion != 0 { + _, err = w.Write(e.KeyFingerprint) + if err != nil { + return err + } + } + } else { + // Write KeyID + err = binary.Write(w, binary.BigEndian, e.KeyId) + if err != nil { + return err + } + } + _, err = w.Write([]byte{byte(e.Algo)}) + if err != nil { + return err + } + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + _, err := w.Write(e.encryptedMPI1.EncodedBytes()) + return err + case PubKeyAlgoElGamal: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoECDH: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoX25519: + err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err + case PubKeyAlgoX448: + err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err + default: + panic("internal error") + } +} + +// SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If aeadSupported is set, PKESK v6 is used else v4. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyAEAD(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, aeadSupported, key, false, config) +} + +// SerializeEncryptedKeyAEADwithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// Offers the hidden flag option to indicated if the PKESK packet should include a wildcard KeyID. +// If aeadSupported is set, PKESK v6 is used else v4. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, hidden bool, config *Config) error { + var buf [36]byte // max possible header size is v6 + lenHeaderWritten := versionSize + version := 3 + + if aeadSupported { + version = 6 + } + // An implementation MUST NOT generate ElGamal v6 PKESKs. + if version == 6 && pub.PubKeyAlgo == PubKeyAlgoElGamal { + return errors.InvalidArgumentError("ElGamal v6 PKESK are not allowed") + } + // In v3 PKESKs, for x25519 and x448, mandate using AES + if version == 3 && (pub.PubKeyAlgo == PubKeyAlgoX25519 || pub.PubKeyAlgo == PubKeyAlgoX448) { + switch cipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.InvalidArgumentError("v3 PKESK mandates AES for x25519 and x448") + } + } + + buf[0] = byte(version) + + // If hidden is set, the key should be hidden + // An implementation MAY accept or use a Key ID of all zeros, + // or a key version of zero and no key fingerprint, to hide the intended decryption key. + // See Section 5.1.8. in the open pgp crypto refresh + if version == 6 { + if !hidden { + // A one-octet size of the following two fields. + buf[1] = byte(keyVersionSize + len(pub.Fingerprint)) + // A one octet key version number. + buf[2] = byte(pub.Version) + lenHeaderWritten += keyVersionSize + 1 + // The fingerprint of the public key + copy(buf[lenHeaderWritten:lenHeaderWritten+len(pub.Fingerprint)], pub.Fingerprint) + lenHeaderWritten += len(pub.Fingerprint) + } else { + // The size may also be zero, and the key version + // and fingerprint omitted for an "anonymous recipient" + buf[1] = 0 + lenHeaderWritten += 1 + } + } else { + if !hidden { + binary.BigEndian.PutUint64(buf[versionSize:(versionSize+keyIdSize)], pub.KeyId) + } + lenHeaderWritten += keyIdSize + } + buf[lenHeaderWritten] = byte(pub.PubKeyAlgo) + lenHeaderWritten += algorithmSize + + var keyBlock []byte + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + lenKeyBlock := len(key) + 2 + if version < 6 { + lenKeyBlock += 1 // cipher type included + } + keyBlock = make([]byte, lenKeyBlock) + keyOffset := 0 + if version < 6 { + keyBlock[0] = byte(cipherFunc) + keyOffset = 1 + } + encodeChecksumKey(keyBlock[keyOffset:], key) + case PubKeyAlgoX25519, PubKeyAlgoX448: + // algorithm is added in plaintext below + keyBlock = key + } + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoX25519: + return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version) + case PubKeyAlgoX448: + return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// PKESKv6 is used if config.AEAD() is not nil. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + return SerializeEncryptedKeyAEAD(w, pub, cipherFunc, config.AEAD() != nil, key, config) +} + +// SerializeEncryptedKeyWithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. PKESKv6 is used if config.AEAD() is not nil. +// The hidden option controls if the packet should be anonymous, i.e., omit key metadata. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, hidden bool, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, config.AEAD() != nil, key, hidden, config) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + cipherMPI := encoding.NewMPI(cipherText) + packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + _, err = w.Write(cipherMPI.EncodedBytes()) + return err +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { + return err + } + _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) + return err +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { + vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) + if err != nil { + return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) + } + + g := encoding.NewMPI(vsG) + m := encoding.NewOID(c) + + packetLen := len(header) /* header length */ + packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(g.EncodedBytes()); err != nil { + return err + } + _, err = w.Write(m.EncodedBytes()) + return err +} + +func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x25519 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x25519.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x25519.EncodeFields(w, ephemeralPublicX25519, ciphertext, cipherFunc, version == 6) +} + +func serializeEncryptedKeyX448(w io.Writer, rand io.Reader, header []byte, pub *x448.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX448, ciphertext, err := x448.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x448 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x448.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x448.EncodeFields(w, ephemeralPublicX448, ciphertext, cipherFunc, version == 6) +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +func decodeChecksumKey(msg []byte) (key []byte, err error) { + key = msg[:len(msg)-2] + expectedChecksum := uint16(msg[len(msg)-2])<<8 | uint16(msg[len(msg)-1]) + checksum := checksumKeyMaterial(key) + if checksum != expectedChecksum { + err = errors.StructuralError("session key checksum is incorrect") + } + return +} + +func encodeChecksumKey(buffer []byte, key []byte) { + copy(buffer, key) + checksum := checksumKeyMaterial(key) + buffer[len(key)] = byte(checksum >> 8) + buffer[len(key)+1] = byte(checksum) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go similarity index 94% rename from vendor/golang.org/x/crypto/openpgp/packet/literal.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go index 1a9ec6e51e..8a028c8a17 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -11,6 +11,7 @@ import ( // LiteralData represents an encrypted file. See RFC 4880, section 5.9. type LiteralData struct { + Format uint8 IsBinary bool FileName string Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. @@ -31,7 +32,8 @@ func (l *LiteralData) parse(r io.Reader) (err error) { return } - l.IsBinary = buf[0] == 'b' + l.Format = buf[0] + l.IsBinary = l.Format == 'b' fileNameLen := int(buf[1]) _, err = readFull(r, buf[:fileNameLen]) @@ -56,9 +58,9 @@ func (l *LiteralData) parse(r io.Reader) (err error) { // on completion. The fileName is truncated to 255 bytes. func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' + buf[0] = 'b' + if !isBinary { + buf[0] = 'u' } if len(fileName) > 255 { fileName = fileName[:255] diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go new file mode 100644 index 0000000000..1ee378ba3c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go @@ -0,0 +1,33 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +type Marker struct{} + +const markerString = "PGP" + +// parse just checks if the packet contains "PGP". +func (m *Marker) parse(reader io.Reader) error { + var buffer [3]byte + if _, err := io.ReadFull(reader, buffer[:]); err != nil { + return err + } + if string(buffer[:]) != markerString { + return errors.StructuralError("invalid marker packet") + } + return nil +} + +// SerializeMarker writes a marker packet to writer. +func SerializeMarker(writer io.Writer) error { + err := serializeHeader(writer, packetTypeMarker, len(markerString)) + if err != nil { + return err + } + _, err = writer.Write([]byte(markerString)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go new file mode 100644 index 0000000000..2c3e3f50b2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go @@ -0,0 +1,29 @@ +package packet + +// Notation type represents a Notation Data subpacket +// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16 +type Notation struct { + Name string + Value []byte + IsCritical bool + IsHumanReadable bool +} + +func (notation *Notation) getData() []byte { + nameData := []byte(notation.Name) + nameLen := len(nameData) + valueLen := len(notation.Value) + + data := make([]byte, 8+nameLen+valueLen) + if notation.IsHumanReadable { + data[0] = 0x80 + } + + data[4] = byte(nameLen >> 8) + data[5] = byte(nameLen) + data[6] = byte(valueLen >> 8) + data[7] = byte(valueLen) + copy(data[8:8+nameLen], nameData) + copy(data[8+nameLen:], notation.Value) + return data +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go similarity index 92% rename from vendor/golang.org/x/crypto/openpgp/packet/ocfb.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go index ce2a33a547..4f26d0a00b 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go @@ -85,8 +85,7 @@ type ocfbDecrypter struct { // NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's // cipher feedback mode using the given cipher.Block. Prefix must be the first // blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into +// block size. On successful exit, blockSize+2 bytes of decrypted data are written into // prefix. Resync determines if the "resynchronization step" from RFC 4880, // 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { @@ -112,11 +111,6 @@ func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption prefixCopy[blockSize] ^= x.fre[0] prefixCopy[blockSize+1] ^= x.fre[1] - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - if resync { block.Encrypt(x.fre, prefix[2:]) } else { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 0000000000..f393c4063b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,157 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + Version int + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool + Salt []byte // v6 only + KeyFingerprint []byte // v6 only +} + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [8]byte + // Read: version | signature type | hash algorithm | public-key algorithm + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + if buf[0] != 3 && buf[0] != 6 { + return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + ops.Version = int(buf[0]) + + var ok bool + ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + + if ops.Version == 6 { + // Only for v6, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(ops.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + ops.Salt = salt + + // Only for v6 packets, 32 octets of the fingerprint of the signing key. + fingerprint := make([]byte, 32) + _, err = readFull(r, fingerprint) + if err != nil { + return + } + ops.KeyFingerprint = fingerprint + ops.KeyId = binary.BigEndian.Uint64(ops.KeyFingerprint[:8]) + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + ops.KeyId = binary.BigEndian.Uint64(buf[:8]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + ops.IsLast = buf[0] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + //v3 length 1+1+1+1+8+1 = + packetLength := 13 + if ops.Version == 6 { + // v6 length 1+1+1+1+1+len(salt)+32+1 = + packetLength = 38 + len(ops.Salt) + } + + if err := serializeHeader(w, packetTypeOnePassSignature, packetLength); err != nil { + return err + } + + var buf [8]byte + buf[0] = byte(ops.Version) + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + + _, err := w.Write(buf[:4]) + if err != nil { + return err + } + + if ops.Version == 6 { + // write salt for v6 signatures + _, err := w.Write([]byte{uint8(len(ops.Salt))}) + if err != nil { + return err + } + _, err = w.Write(ops.Salt) + if err != nil { + return err + } + + // write fingerprint v6 signatures + _, err = w.Write(ops.KeyFingerprint) + if err != nil { + return err + } + } else { + binary.BigEndian.PutUint64(buf[:8], ops.KeyId) + _, err := w.Write(buf[:8]) + if err != nil { + return err + } + } + + isLast := []byte{byte(0)} + if ops.IsLast { + isLast[0] = 1 + } + + _, err = w.Write(isLast) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go similarity index 90% rename from vendor/golang.org/x/crypto/openpgp/packet/opaque.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index 3984477310..cef7c661d3 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -8,7 +8,7 @@ import ( "bytes" "io" - "golang.org/x/crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is @@ -83,8 +83,9 @@ func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { // OpaqueSubpacket represents an unparsed OpenPGP subpacket, // as found in signature and user attribute packets. type OpaqueSubpacket struct { - SubType uint8 - Contents []byte + SubType uint8 + EncodedLength []byte // Store the original encoded length for signature verifications. + Contents []byte } // OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from @@ -108,6 +109,7 @@ func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { // RFC 4880, section 5.2.3.1 var subLen uint32 + var encodedLength []byte if len(contents) < 1 { goto Truncated } @@ -118,6 +120,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:1] subLen = uint32(contents[0]) contents = contents[1:] case contents[0] < 255: @@ -125,6 +128,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:2] subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 contents = contents[2:] default: @@ -132,16 +136,19 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:5] subLen = uint32(contents[1])<<24 | uint32(contents[2])<<16 | uint32(contents[3])<<8 | uint32(contents[4]) contents = contents[5:] + } if subLen > uint32(len(contents)) || subLen == 0 { goto Truncated } subPacket.SubType = contents[0] + subPacket.EncodedLength = encodedLength subPacket.Contents = contents[1:subLen] return Truncated: @@ -151,7 +158,9 @@ Truncated: func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) + copy(buf, osp.EncodedLength) + n := len(osp.EncodedLength) + buf[n] = osp.SubType if _, err = w.Write(buf[:n+1]); err != nil { return diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go new file mode 100644 index 0000000000..da12fbce06 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,675 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/ProtonMail/go-crypto/v2/openpgp/packet" + +import ( + "bytes" + "crypto/cipher" + "crypto/rsa" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + buf bytes.Buffer + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + bufLen := w.buf.Len() + if bufLen > 512 { + for power := uint(30); ; power-- { + l := 1 << power + if bufLen >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(w.buf.Next(l)) + if err != nil { + return + } + if m != l { + return 0, io.ErrShortWrite + } + break + } + } + } + return w.buf.Write(p) +} + +func (w *partialLengthWriter) Close() (err error) { + len := w.buf.Len() + err = serializeLength(w.w, len) + if err != nil { + return err + } + _, err = w.buf.WriteTo(w.w) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + return serializeLength(w, length) +} + +// serializeType writes an OpenPGP packet type to w. See RFC 4880, section +// 4.2. +func serializeType(w io.Writer, ptype packetType) (err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + return +} + +// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section +// 4.2.2. +func serializeLength(w io.Writer, length int) (err error) { + var buf [5]byte + var n int + + if length < 192 { + buf[0] = byte(length) + n = 1 + } else if length < 8384 { + length -= 192 + buf[0] = 192 + byte(length>>8) + buf[1] = byte(length) + n = 2 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeMarker packetType = 10 + packetTypeLiteralData packetType = 11 + packetTypeTrust packetType = 12 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 + packetTypeAEADEncrypted packetType = 20 + packetPadding packetType = 21 +) + +// EncryptedDataPacket holds encrypted data. It is currently implemented by +// SymmetricallyEncrypted and AEADEncrypted. +type EncryptedDataPacket interface { + Decrypt(CipherFunction, []byte) (io.ReadCloser, error) +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, len, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + isSubkey := tag == packetTypePublicSubkey + p = &PublicKey{IsSubkey: isSubkey} + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume + err = errors.UnknownPacketTypeError(tag) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// ReadWithCheck reads a single OpenPGP message packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +// ReadWithCheck additionally checks if the OpenPGP message packet sequence adheres +// to the packet composition rules in rfc4880, if not throws an error. +func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr error, err error) { + tag, len, contents, err := readHeader(r) + if err != nil { + return + } + switch tag { + case packetTypeEncryptedKey: + msgErr = sequence.Next(ESKSymbol) + p = new(EncryptedKey) + case packetTypeSignature: + msgErr = sequence.Next(SigSymbol) + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + msgErr = sequence.Next(ESKSymbol) + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + msgErr = sequence.Next(OPSSymbol) + p = new(OnePassSignature) + case packetTypeCompressed: + msgErr = sequence.Next(CompSymbol) + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + msgErr = sequence.Next(LDSymbol) + p = new(LiteralData) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + msgErr = sequence.Next(EncSymbol) + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume + err = errors.UnknownPacketTypeError(tag) + case packetTypePrivateKey, + packetTypePrivateSubkey, + packetTypePublicKey, + packetTypePublicSubkey, + packetTypeUserId, + packetTypeUserAttribute: + msgErr = sequence.Next(UnknownSymbol) + consumeAll(contents) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0x00 + SigTypeText SignatureType = 0x01 + SigTypeGenericCert SignatureType = 0x10 + SigTypePersonaCert SignatureType = 0x11 + SigTypeCasualCert SignatureType = 0x12 + SigTypePositiveCert SignatureType = 0x13 + SigTypeSubkeyBinding SignatureType = 0x18 + SigTypePrimaryKeyBinding SignatureType = 0x19 + SigTypeDirectSignature SignatureType = 0x1F + SigTypeKeyRevocation SignatureType = 0x20 + SigTypeSubkeyRevocation SignatureType = 0x28 + SigTypeCertificationRevocation SignatureType = 0x30 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh + PubKeyAlgoX25519 PublicKeyAlgorithm = 25 + PubKeyAlgoX448 PublicKeyAlgorithm = 26 + PubKeyAlgoEd25519 PublicKeyAlgorithm = 27 + PubKeyAlgoEd448 PublicKeyAlgorithm = 28 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction algorithm.CipherFunction + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + return algorithm.CipherFunction(cipher).KeySize() +} + +// IsSupported returns true if the cipher is supported from the library +func (cipher CipherFunction) IsSupported() bool { + return algorithm.CipherFunction(cipher).KeySize() > 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + return algorithm.CipherFunction(cipher).BlockSize() +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + return algorithm.CipherFunction(cipher).New(key) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) + +// AEADMode represents the different Authenticated Encryption with Associated +// Data specified for OpenPGP. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 +type AEADMode algorithm.AEADMode + +const ( + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeGCM AEADMode = 3 +) + +func (mode AEADMode) IvLength() int { + return algorithm.AEADMode(mode).NonceLength() +} + +func (mode AEADMode) TagLength() int { + return algorithm.AEADMode(mode).TagLength() +} + +// IsSupported returns true if the aead mode is supported from the library +func (mode AEADMode) IsSupported() bool { + return algorithm.AEADMode(mode).TagLength() > 0 +} + +// new returns a fresh instance of the given mode. +func (mode AEADMode) new(block cipher.Block) cipher.AEAD { + return algorithm.AEADMode(mode).New(block) +} + +// ReasonForRevocation represents a revocation reason code as per RFC4880 +// section 5.2.3.23. +type ReasonForRevocation uint8 + +const ( + NoReason ReasonForRevocation = 0 + KeySuperseded ReasonForRevocation = 1 + KeyCompromised ReasonForRevocation = 2 + KeyRetired ReasonForRevocation = 3 + UserIDNotValid ReasonForRevocation = 32 + Unknown ReasonForRevocation = 200 +) + +func NewReasonForRevocation(value byte) ReasonForRevocation { + if value < 4 || value == 32 { + return ReasonForRevocation(value) + } + return Unknown +} + +// Curve is a mapping to supported ECC curves for key generation. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats +type Curve string + +const ( + Curve25519 Curve = "Curve25519" + Curve448 Curve = "Curve448" + CurveNistP256 Curve = "P256" + CurveNistP384 Curve = "P384" + CurveNistP521 Curve = "P521" + CurveSecP256k1 Curve = "SecP256k1" + CurveBrainpoolP256 Curve = "BrainpoolP256" + CurveBrainpoolP384 Curve = "BrainpoolP384" + CurveBrainpoolP512 Curve = "BrainpoolP512" +) + +// TrustLevel represents a trust level per RFC4880 5.2.3.13 +type TrustLevel uint8 + +// TrustAmount represents a trust amount per RFC4880 5.2.3.13 +type TrustAmount uint8 + +const ( + // versionSize is the length in bytes of the version value. + versionSize = 1 + // algorithmSize is the length in bytes of the key algorithm value. + algorithmSize = 1 + // keyVersionSize is the length in bytes of the key version value + keyVersionSize = 1 + // keyIdSize is the length in bytes of the key identifier value. + keyIdSize = 8 + // timestampSize is the length in bytes of encoded timestamps. + timestampSize = 4 + // fingerprintSizeV6 is the length in bytes of the key fingerprint in v6. + fingerprintSizeV6 = 32 + // fingerprintSize is the length in bytes of the key fingerprint. + fingerprintSize = 20 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go new file mode 100644 index 0000000000..55a8a56c2d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go @@ -0,0 +1,222 @@ +package packet + +// This file implements the pushdown automata (PDA) from PGPainless (Paul Schaub) +// to verify pgp packet sequences. See Paul's blogpost for more details: +// https://blog.jabberhead.tk/2022/10/26/implementing-packet-sequence-validation-using-pushdown-automata/ +import ( + "fmt" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +func NewErrMalformedMessage(from State, input InputSymbol, stackSymbol StackSymbol) errors.ErrMalformedMessage { + return errors.ErrMalformedMessage(fmt.Sprintf("state %d, input symbol %d, stack symbol %d ", from, input, stackSymbol)) +} + +// InputSymbol defines the input alphabet of the PDA +type InputSymbol uint8 + +const ( + LDSymbol InputSymbol = iota + SigSymbol + OPSSymbol + CompSymbol + ESKSymbol + EncSymbol + EOSSymbol + UnknownSymbol +) + +// StackSymbol defines the stack alphabet of the PDA +type StackSymbol int8 + +const ( + MsgStackSymbol StackSymbol = iota + OpsStackSymbol + KeyStackSymbol + EndStackSymbol + EmptyStackSymbol +) + +// State defines the states of the PDA +type State int8 + +const ( + OpenPGPMessage State = iota + ESKMessage + LiteralMessage + CompressedMessage + EncryptedMessage + ValidMessage +) + +// transition represents a state transition in the PDA +type transition func(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) + +// SequenceVerifier is a pushdown automata to verify +// PGP messages packet sequences according to rfc4880. +type SequenceVerifier struct { + stack []StackSymbol + state State +} + +// Next performs a state transition with the given input symbol. +// If the transition fails a ErrMalformedMessage is returned. +func (sv *SequenceVerifier) Next(input InputSymbol) error { + for { + stackSymbol := sv.popStack() + transitionFunc := getTransition(sv.state) + nextState, newStackSymbols, redo, err := transitionFunc(input, stackSymbol) + if err != nil { + return err + } + if redo { + sv.pushStack(stackSymbol) + } + for _, newStackSymbol := range newStackSymbols { + sv.pushStack(newStackSymbol) + } + sv.state = nextState + if !redo { + break + } + } + return nil +} + +// Valid returns true if RDA is in a valid state. +func (sv *SequenceVerifier) Valid() bool { + return sv.state == ValidMessage && len(sv.stack) == 0 +} + +func (sv *SequenceVerifier) AssertValid() error { + if !sv.Valid() { + return errors.ErrMalformedMessage("invalid message") + } + return nil +} + +func NewSequenceVerifier() *SequenceVerifier { + return &SequenceVerifier{ + stack: []StackSymbol{EndStackSymbol, MsgStackSymbol}, + state: OpenPGPMessage, + } +} + +func (sv *SequenceVerifier) popStack() StackSymbol { + if len(sv.stack) == 0 { + return EmptyStackSymbol + } + elemIndex := len(sv.stack) - 1 + stackSymbol := sv.stack[elemIndex] + sv.stack = sv.stack[:elemIndex] + return stackSymbol +} + +func (sv *SequenceVerifier) pushStack(stackSymbol StackSymbol) { + sv.stack = append(sv.stack, stackSymbol) +} + +func getTransition(from State) transition { + switch from { + case OpenPGPMessage: + return fromOpenPGPMessage + case LiteralMessage: + return fromLiteralMessage + case CompressedMessage: + return fromCompressedMessage + case EncryptedMessage: + return fromEncryptedMessage + case ESKMessage: + return fromESKMessage + case ValidMessage: + return fromValidMessage + } + return nil +} + +// fromOpenPGPMessage is the transition for the state OpenPGPMessage. +func fromOpenPGPMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != MsgStackSymbol { + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) + } + switch input { + case LDSymbol: + return LiteralMessage, nil, false, nil + case SigSymbol: + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, false, nil + case OPSSymbol: + return OpenPGPMessage, []StackSymbol{OpsStackSymbol, MsgStackSymbol}, false, nil + case CompSymbol: + return CompressedMessage, nil, false, nil + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) +} + +// fromESKMessage is the transition for the state ESKMessage. +func fromESKMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != KeyStackSymbol { + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) + } + switch input { + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state LiteralMessage. +func fromLiteralMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return LiteralMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return 0, nil, false, NewErrMalformedMessage(LiteralMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state CompressedMessage. +func fromCompressedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return CompressedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromEncryptedMessage is the transition for the state EncryptedMessage. +func fromEncryptedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return EncryptedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromValidMessage is the transition for the state ValidMessage. +func fromValidMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + return 0, nil, false, NewErrMalformedMessage(ValidMessage, input, stackSymbol) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go new file mode 100644 index 0000000000..2d714723cf --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go @@ -0,0 +1,24 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// UnsupportedPackage represents a OpenPGP packet with a known packet type +// but with unsupported content. +type UnsupportedPacket struct { + IncompletePacket Packet + Error errors.UnsupportedError +} + +// Implements the Packet interface +func (up *UnsupportedPacket) parse(read io.Reader) error { + err := up.IncompletePacket.parse(read) + if castedErr, ok := err.(errors.UnsupportedError); ok { + up.Error = castedErr + return nil + } + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go new file mode 100644 index 0000000000..06fa83740d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go @@ -0,0 +1,27 @@ +package packet + +import ( + "io" + "io/ioutil" +) + +// Padding type represents a Padding Packet (Tag 21). +// The padding type is represented by the length of its padding. +// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-padding-packet-tag-21 +type Padding int + +// parse just ignores the padding content. +func (pad Padding) parse(reader io.Reader) error { + _, err := io.CopyN(ioutil.Discard, reader, int64(pad)) + return err +} + +// SerializePadding writes the padding to writer. +func (pad Padding) SerializePadding(writer io.Writer, rand io.Reader) error { + err := serializeHeader(writer, packetPadding, int(pad)) + if err != nil { + return err + } + _, err = io.CopyN(writer, rand, int64(pad)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 0000000000..099b4d9ba0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,1173 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "fmt" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" + "golang.org/x/crypto/hkdf" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + aead AEADMode // only relevant if S2KAEAD is enabled + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or + // 255 (2-byte checksum) + s2kType S2KType + // Full parameters of the S2K packet + s2kParams *s2k.Params +} + +// S2KType s2k packet type +type S2KType uint8 + +const ( + // S2KNON unencrypt + S2KNON S2KType = 0 + // S2KAEAD use authenticated encryption + S2KAEAD S2KType = 253 + // S2KSHA1 sha1 sum check + S2KSHA1 S2KType = 254 + // S2KCHECKSUM sum check + S2KCHECKSUM S2KType = 255 +) + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewX448PrivateKey(creationTime time.Time, priv *x448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd25519PrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA, ECDSA or EdDSA. +func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case *ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case *eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case *ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case *ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) + case ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) + default: + panic("openpgp: unknown signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey. +func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { + pk := new(PrivateKey) + switch priv := decrypter.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + case *elgamal.PrivateKey: + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + case *ecdh.PrivateKey: + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + case *x25519.PrivateKey: + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + case *x448.PrivateKey: + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) + default: + panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") + } + pk.PrivateKey = decrypter + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + v5 := pk.PublicKey.Version == 5 + v6 := pk.PublicKey.Version == 6 + + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.s2kType = S2KType(buf[0]) + var optCount [1]byte + if v5 || (v6 && pk.s2kType != S2KNON) { + if _, err = readFull(r, optCount[:]); err != nil { + return + } + } + + switch pk.s2kType { + case S2KNON: + pk.s2k = nil + pk.Encrypted = false + case S2KSHA1, S2KCHECKSUM, S2KAEAD: + if (v5 || v6) && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version)) + } + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + if pk.cipher != 0 && !pk.cipher.IsSupported() { + return errors.UnsupportedError("unsupported cipher function in private key") + } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.aead = AEADMode(buf[0]) + if !pk.aead.IsSupported() { + return errors.UnsupportedError("unsupported aead mode in private key") + } + } + + // [Optional] Only for a version 6 packet, + // and if string-to-key usage octet was 255, 254, or 253, + // an one-octet count of the following field. + if v6 { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + } + + pk.s2kParams, err = s2k.ParseIntoParams(r) + if err != nil { + return + } + if pk.s2kParams.Dummy() { + return + } + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return + } + pk.Encrypted = true + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + var ivSize int + // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode, + // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size. + // For all other S2K modes, it's always the block size. + if !v5 && pk.s2kType == S2KAEAD { + ivSize = pk.aead.IvLength() + } else { + ivSize = pk.cipher.blockSize() + } + + if ivSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, ivSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + if v5 && pk.s2kType == S2KAEAD { + pk.iv = pk.iv[:pk.aead.IvLength()] + } + } + + var privateKeyData []byte + if v5 { + var n [4]byte /* secret material four octet count */ + _, err = readFull(r, n[:]) + if err != nil { + return + } + count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) + if !pk.Encrypted { + count = count + 2 /* two octet checksum */ + } + privateKeyData = make([]byte, count) + _, err = readFull(r, privateKeyData) + if err != nil { + return + } + } else { + privateKeyData, err = io.ReadAll(r) + if err != nil { + return + } + } + if !pk.Encrypted { + if len(privateKeyData) < 2 { + return errors.StructuralError("truncated private key data") + } + if pk.Version != 6 { + // checksum + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] + return pk.parsePrivateKey(privateKeyData) + } else { + // No checksum + return pk.parsePrivateKey(privateKeyData) + } + } + + pk.encryptedData = privateKeyData + return +} + +// Dummy returns true if the private key is a dummy key. This is a GNU extension. +func (pk *PrivateKey) Dummy() bool { + return pk.s2kParams.Dummy() +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + contents := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(contents) + if err != nil { + return + } + if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { + return + } + + optional := bytes.NewBuffer(nil) + if pk.Encrypted || pk.Dummy() { + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a one-octet symmetric encryption algorithm. + if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil { + return + } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + if _, err = optional.Write([]byte{uint8(pk.aead)}); err != nil { + return + } + } + + s2kBuffer := bytes.NewBuffer(nil) + if err := pk.s2kParams.Serialize(s2kBuffer); err != nil { + return err + } + // [Optional] Only for a version 6 packet, and if string-to-key + // usage octet was 255, 254, or 253, an one-octet + // count of the following field. + if pk.Version == 6 { + if _, err = optional.Write([]byte{uint8(s2kBuffer.Len())}); err != nil { + return + } + } + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a string-to-key (S2K) specifier. The length of the string-to-key specifier + // depends on its type + if _, err = io.Copy(optional, s2kBuffer); err != nil { + return + } + + // IV + if pk.Encrypted { + if _, err = optional.Write(pk.iv); err != nil { + return + } + if pk.Version == 5 && pk.s2kType == S2KAEAD { + // Add padding for version 5 + padding := make([]byte, pk.cipher.blockSize()-len(pk.iv)) + if _, err = optional.Write(padding); err != nil { + return + } + } + } + } + if pk.Version == 5 || (pk.Version == 6 && pk.s2kType != S2KNON) { + contents.Write([]byte{uint8(optional.Len())}) + } + + if _, err := io.Copy(contents, optional); err != nil { + return err + } + + if !pk.Dummy() { + l := 0 + var priv []byte + if !pk.Encrypted { + buf := bytes.NewBuffer(nil) + err = pk.serializePrivateKey(buf) + if err != nil { + return err + } + l = buf.Len() + if pk.Version != 6 { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } + priv = buf.Bytes() + } else { + priv, l = pk.encryptedData, len(pk.encryptedData) + } + + if pk.Version == 5 { + contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) + } + contents.Write(priv) + } + + ptype := packetTypePrivateKey + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, contents.Len()) + if err != nil { + return + } + _, err = io.Copy(w, contents) + if err != nil { + return + } + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { + return err + } + _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) + return err +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes()) + return err +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) + return err +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) + return err +} + +func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeX448PrivateKey(w io.Writer, priv *x448.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeEd25519PrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + +func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + +// decrypt decrypts an encrypted private key using a decryption key. +func (pk *PrivateKey) decrypt(decryptionKey []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + block := pk.cipher.new(decryptionKey) + var data []byte + switch pk.s2kType { + case S2KAEAD: + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err + } + // Decrypt the encrypted key material with aead + data, err = aead.Open(nil, pk.iv, pk.encryptedData, additionalData) + if err != nil { + return err + } + case S2KSHA1, S2KCHECKSUM: + cfb := cipher.NewCFBDecrypter(block, pk.iv) + data = make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + if pk.s2kType == S2KSHA1 { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + default: + return errors.InvalidArgumentError("invalid s2k type") + } + + err := pk.parsePrivateKey(data) + if _, ok := err.(errors.KeyInvalidError); ok { + return errors.KeyInvalidError("invalid key parameters") + } + if err != nil { + return err + } + + // Mark key as unencrypted + pk.s2kType = S2KNON + pk.s2k = nil + pk.Encrypted = false + pk.encryptedData = nil + return nil +} + +func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize()) + if err != nil { + return err + } + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } + return pk.decrypt(key) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } + return pk.decrypt(key) +} + +// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. +// Avoids recomputation of similar s2k key derivations. +func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { + // Create a cache to avoid recomputation of key derviations for the same passphrase. + s2kCache := &s2k.Cache{} + for _, key := range keys { + if key != nil && !key.Dummy() && key.Encrypted { + err := key.decryptWithCache(passphrase, s2kCache) + if err != nil { + return err + } + } + } + return nil +} + +// encrypt encrypts an unencrypted private key. +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if pk.Encrypted { + return nil + } + // check if encryptionKey has the correct size + if len(key) != cipherFunction.KeySize() { + return errors.InvalidArgumentError("supplied encryption key has the wrong size") + } + + priv := bytes.NewBuffer(nil) + err := pk.serializePrivateKey(priv) + if err != nil { + return err + } + + pk.cipher = cipherFunction + pk.s2kParams = params + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return err + } + + privateKeyBytes := priv.Bytes() + pk.s2kType = s2kType + block := pk.cipher.new(key) + switch s2kType { + case S2KAEAD: + if pk.aead == 0 { + return errors.StructuralError("aead mode is not set on key") + } + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err + } + pk.iv = make([]byte, aead.NonceSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + // Decrypt the encrypted key material with aead + pk.encryptedData = aead.Seal(nil, pk.iv, privateKeyBytes, additionalData) + case S2KSHA1, S2KCHECKSUM: + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + if s2kType == S2KSHA1 { + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + privateKeyBytes = append(privateKeyBytes, []byte{uint8(sum >> 8), uint8(sum)}...) + } + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + default: + return errors.InvalidArgumentError("invalid s2k type for encryption") + } + + pk.Encrypted = true + pk.PrivateKey = nil + return err +} + +// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config. +func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + key := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(key, passphrase) + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + pk.aead = config.AEAD().Mode() + pk.cipher = config.Cipher() + key = pk.applyHKDF(key) + } + // Encrypt the private key with the derived encryption key. + return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random()) +} + +// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. +// Only derives one key from the passphrase, which is then used to encrypt each key. +func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + encryptionKey := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(encryptionKey, passphrase) + for _, key := range keys { + if key != nil && !key.Dummy() && !key.Encrypted { + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + key.aead = config.AEAD().Mode() + key.cipher = config.Cipher() + derivedKey := key.applyHKDF(encryptionKey) + err = key.encrypt(derivedKey, params, s2kType, config.Cipher(), config.Random()) + } else { + err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random()) + } + if err != nil { + return err + } + } + } + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + // Default config of private key encryption + config := &Config{ + S2KConfig: &s2k.Config{ + S2KMode: s2k.IteratedSaltedS2K, + S2KCount: 65536, + Hash: crypto.SHA256, + }, + DefaultCipher: CipherAES256, + } + return pk.EncryptWithConfig(passphrase, config) +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *eddsa.PrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + case *x25519.PrivateKey: + err = serializeX25519PrivateKey(w, priv) + case *x448.PrivateKey: + err = serializeX448PrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEd25519PrivateKey(w, priv) + case *ed448.PrivateKey: + err = serializeEd448PrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + return +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + case PubKeyAlgoX25519: + return pk.parseX25519PrivateKey(data) + case PubKeyAlgoX448: + return pk.parseX448PrivateKey(data) + case PubKeyAlgoEd25519: + return pk.parseEd25519PrivateKey(data) + case PubKeyAlgoEd448: + return pk.parseEd448PrivateKey(data) + default: + err = errors.StructuralError("unknown private key type") + return + } +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + p := new(encoding.MPI) + if _, err := p.ReadFrom(buf); err != nil { + return err + } + + q := new(encoding.MPI) + if _, err := q.ReadFrom(buf); err != nil { + return err + } + + rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) + if err := rsaPriv.Validate(); err != nil { + return errors.KeyInvalidError(err.Error()) + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateDSAParameters(dsaPriv); err != nil { + return err + } + pk.PrivateKey = dsaPriv + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + priv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateElGamalParameters(priv); err != nil { + return err + } + pk.PrivateKey = priv + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil { + return err + } + if err := ecdsa.Validate(ecdsaPriv); err != nil { + return err + } + pk.PrivateKey = ecdsaPriv + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + ecdhPriv := ecdh.NewPrivateKey(*ecdhPub) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil { + return err + } + + if err := ecdh.Validate(ecdhPriv); err != nil { + return err + } + + pk.PrivateKey = ecdhPriv + + return nil +} + +func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey) + privateKey := x25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x25519.KeySize) + + if len(data) != x25519.KeySize { + err = errors.StructuralError("wrong x25519 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x25519.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseX448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x448.PublicKey) + privateKey := x448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x448.KeySize) + + if len(data) != x448.KeySize { + err = errors.StructuralError("wrong x448 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x448.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + privateKey := ed25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed25519.SeedSize { + err = errors.StructuralError("wrong ed25519 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed25519.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed448.PublicKey) + privateKey := ed448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed448.SeedSize { + err = errors.StructuralError("wrong ed448 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed448.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) + eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) + eddsaPriv.PublicKey = *eddsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil { + return err + } + + if err := eddsa.Validate(eddsaPriv); err != nil { + return err + } + + pk.PrivateKey = eddsaPriv + + return nil +} + +func (pk *PrivateKey) additionalData() ([]byte, error) { + additionalData := bytes.NewBuffer(nil) + // Write additional data prefix based on packet type + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + // Write public key to additional data + _, err := additionalData.Write([]byte{packetByte}) + if err != nil { + return nil, err + } + err = pk.PublicKey.serializeWithoutHeaders(additionalData) + if err != nil { + return nil, err + } + return additionalData.Bytes(), nil +} + +func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte { + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + associatedData := []byte{packetByte, byte(pk.Version), byte(pk.cipher), byte(pk.aead)} + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + encryptionKey := make([]byte, pk.cipher.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func validateDSAParameters(priv *dsa.PrivateKey) error { + p := priv.P // group prime + q := priv.Q // subgroup order + g := priv.G // g has order q mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("dsa: invalid group") + } + // expect p > q + if p.Cmp(q) <= 0 { + return errors.KeyInvalidError("dsa: invalid group prime") + } + // q should be large enough and divide p-1 + pSub1 := new(big.Int).Sub(p, one) + if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // confirm that g has order q mod p + if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("dsa: mismatching values") + } + + return nil +} + +func validateElGamalParameters(priv *elgamal.PrivateKey) error { + p := priv.P // group prime + g := priv.G // g has order p-1 mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // Expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + if p.BitLen() < 1024 { + return errors.KeyInvalidError("elgamal: group order too small") + } + pSub1 := new(big.Int).Sub(p, one) + if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + // Since p-1 is not prime, g might have a smaller order that divides p-1. + // We cannot confirm the exact order of g, but we make sure it is not too small. + gExpI := new(big.Int).Set(g) + i := 1 + threshold := 2 << 17 // we want order > threshold + for i < threshold { + i++ // we check every order to make sure key validation is not easily bypassed by guessing y' + gExpI.Mod(new(big.Int).Mul(gExpI, g), p) + if gExpI.Cmp(one) == 0 { + return errors.KeyInvalidError("elgamal: order too small") + } + } + // Check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("elgamal: mismatching values") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go new file mode 100644 index 0000000000..029b8f1aab --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go @@ -0,0 +1,12 @@ +package packet + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 0000000000..dd93c98702 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,1035 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/dsa" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" +) + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + Version int + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey + Fingerprint []byte + KeyId uint64 + IsSubkey bool + + // RFC 4880 fields + n, e, p, q, g, y encoding.Field + + // RFC 6637 fields + // oid contains the OID byte sequence identifying the elliptic curve used + oid encoding.Field + + // kdf stores key derivation function parameters + // used for ECDH encryption. See RFC 6637, Section 9. + kdf encoding.Field +} + +// UpgradeToV5 updates the version of the key to v5, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV5() { + pk.Version = 5 + pk.setFingerprintAndKeyId() +} + +// UpgradeToV6 updates the version of the key to v6, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV6() { + pk.Version = 6 + pk.setFingerprintAndKeyId() +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeForHash(io.Writer) error + SerializeSignaturePrefix(io.Writer) error + serializeWithoutHeaders(io.Writer) error +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: new(encoding.MPI).SetBig(pub.N), + e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + q: new(encoding.MPI).SetBig(pub.Q), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + var pk *PublicKey + var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + kdf: kdf, + } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + + if curveInfo == nil { + panic("unknown elliptic curve") + } + + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey { + curveInfo := ecc.FindByCurve(pub.GetCurve()) + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEdDSA, + PublicKey: pub, + oid: curveInfo.Oid, + // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B + p: encoding.NewMPI(pub.MarshalPoint()), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewX448PublicKey(creationTime time.Time, pub *x448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd25519PublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + pk.Version = int(buf[0]) + if pk.Version >= 5 { + // Read the four-octet scalar octet count + // The count is not used in this implementation + var n [4]byte + _, err = readFull(r, n[:]) + if err != nil { + return + } + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + // Ignore four-ocet length + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + err = pk.parseECDSA(r) + case PubKeyAlgoECDH: + err = pk.parseECDH(r) + case PubKeyAlgoEdDSA: + err = pk.parseEdDSA(r) + case PubKeyAlgoX25519: + err = pk.parseX25519(r) + case PubKeyAlgoX448: + err = pk.parseX448(r) + case PubKeyAlgoEd25519: + err = pk.parseEd25519(r) + case PubKeyAlgoEd448: + err = pk.parseEd448(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerprintAndKeyId() + return +} + +func (pk *PublicKey) setFingerprintAndKeyId() { + // RFC 4880, section 12.2 + if pk.Version >= 5 { + fingerprint := sha256.New() + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } + pk.Fingerprint = make([]byte, 32) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) + } else { + fingerprint := sha1.New() + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } + pk.Fingerprint = make([]byte, 20) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + } +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n = new(encoding.MPI) + if _, err = pk.n.ReadFrom(r); err != nil { + return + } + pk.e = new(encoding.MPI) + if _, err = pk.e.ReadFrom(r); err != nil { + return + } + + if len(pk.e.Bytes()) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.Bytes()), + E: 0, + } + for i := 0; i < len(pk.e.Bytes()); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.Bytes()[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.q = new(encoding.MPI) + if _, err = pk.q.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) + dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) + dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) + dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) + elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) + elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = elgamal + return +} + +// parseECDSA parses ECDSA public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + c, ok := curveInfo.Curve.(ecc.ECDSACurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + ecdsaKey := ecdsa.NewPublicKey(c) + err = ecdsaKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdsaKey + + return +} + +// parseECDH parses ECDH public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDH(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.kdf = new(encoding.OID) + if _, err = pk.kdf.ReadFrom(r); err != nil { + return + } + + c, ok := curveInfo.Curve.(ecc.ECDHCurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { + return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { + return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) + } + kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) + } + kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) + } + + ecdhKey := ecdh.NewPublicKey(c, kdfHash, kdfCipher) + err = ecdhKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdhKey + + return +} + +func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.EdDSACurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + if len(pk.p.Bytes()) == 0 { + return errors.StructuralError("empty EdDSA public key") + } + + pub := eddsa.NewPublicKey(c) + + switch flag := pk.p.Bytes()[0]; flag { + case 0x04: + // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + case 0x40: + err = pub.UnmarshalPoint(pk.p.Bytes()) + default: + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + } + + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseX25519(r io.Reader) (err error) { + point := make([]byte, x25519.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseX448(r io.Reader) (err error) { + point := make([]byte, x448.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd25519(r io.Reader) (err error) { + point := make([]byte, ed25519.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd448(r io.Reader) (err error) { + point := make([]byte, ed448.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + if err := pk.SerializeSignaturePrefix(w); err != nil { + return err + } + return pk.serializeWithoutHeaders(w) +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { + var pLength = pk.algorithmSpecificByteCount() + // version, timestamp, algorithm + pLength += versionSize + timestampSize + algorithmSize + if pk.Version >= 5 { + // key octet count (4). + pLength += 4 + _, err := w.Write([]byte{ + // When a v4 signature is made over a key, the hash data starts with the octet 0x99, followed by a two-octet length + // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts + // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet. + 0x95 + byte(pk.Version), + byte(pLength >> 24), + byte(pLength >> 16), + byte(pLength >> 8), + byte(pLength), + }) + if err != nil { + return err + } + return nil + } + if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil { + return err + } + return nil +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version >= 5 { + length += 4 // octet key count + } + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, int(length)) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +func (pk *PublicKey) algorithmSpecificByteCount() uint32 { + length := uint32(0) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += uint32(pk.n.EncodedLength()) + length += uint32(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.q.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + case PubKeyAlgoX25519: + length += x25519.KeySize + case PubKeyAlgoX448: + length += x448.KeySize + case PubKeyAlgoEd25519: + length += ed25519.PublicKeySize + case PubKeyAlgoEd448: + length += ed448.PublicKeySize + default: + panic("unknown public key algorithm") + } + return length +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + t := uint32(pk.CreationTime.Unix()) + if _, err = w.Write([]byte{ + byte(pk.Version), + byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), + byte(pk.PubKeyAlgo), + }); err != nil { + return + } + + if pk.Version >= 5 { + n := pk.algorithmSpecificByteCount() + if _, err = w.Write([]byte{ + byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), + }); err != nil { + return + } + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + if _, err = w.Write(pk.n.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.e.EncodedBytes()) + return + case PubKeyAlgoDSA: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.q.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoElGamal: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoECDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoECDH: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.kdf.EncodedBytes()) + return + case PubKeyAlgoEdDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoX25519: + publicKey := pk.PublicKey.(*x25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoX448: + publicKey := pk.PublicKey.(*x448.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd25519: + publicKey := pk.PublicKey.(*ed25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd448: + publicKey := pk.PublicKey.(*ed448.PublicKey) + _, err = w.Write(publicKey.Point) + return + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + // see discussion https://github.com/ProtonMail/go-crypto/issues/107 + if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey) + if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + case PubKeyAlgoEd25519: + ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey) + if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("Ed25519 verification failure") + } + return nil + case PubKeyAlgoEd448: + ed448PublicKey := pk.PublicKey.(*ed448.PublicKey) + if !ed448.Verify(ed448PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("ed448 verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) { + h = hashFunc + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + if err != nil { + return nil, err + } + + err = signed.SerializeForHash(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify() + if err != nil { + return err + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) { + return pk.SerializeForHash(hashFunc) +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + if keyRevocationHash(pk, preparedHash); err != nil { + return err + } + return pk.VerifySignature(preparedHash, sig) +} + +// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { + + // RFC 4880, section 5.2.4 + if err := pk.SerializeSignaturePrefix(h); err != nil { + return err + } + if err := pk.serializeWithoutHeaders(h); err != nil { + return err + } + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return nil +} + +// directKeySignatureHash returns a Hash of the message that needs to be signed. +func directKeySignatureHash(pk *PublicKey, h hash.Hash) (err error) { + return pk.SerializeForHash(h) +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := sig.PrepareVerify() + if err != nil { + return err + } + if err := userIdSignatureHash(id, pub, h); err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyDirectKeySignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { + h, err := sig.PrepareVerify() + if err != nil { + return err + } + if err := directKeySignatureHash(pk, h); err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.BitLength() + case PubKeyAlgoDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoElGamal: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDH: + bitLength = pk.p.BitLength() + case PubKeyAlgoEdDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoX25519: + bitLength = x25519.KeySize * 8 + case PubKeyAlgoX448: + bitLength = x448.KeySize * 8 + case PubKeyAlgoEd25519: + bitLength = ed25519.PublicKeySize * 8 + case PubKeyAlgoEd448: + bitLength = ed448.PublicKeySize * 8 + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +// Curve returns the used elliptic curve of this public key. +// Returns an error if no elliptic curve is used. +func (pk *PublicKey) Curve() (curve Curve, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoECDSA, PubKeyAlgoECDH, PubKeyAlgoEdDSA: + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return "", errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + curve = Curve(curveInfo.GenName) + case PubKeyAlgoEd25519, PubKeyAlgoX25519: + curve = Curve25519 + case PubKeyAlgoEd448, PubKeyAlgoX448: + curve = Curve448 + default: + err = errors.InvalidArgumentError("public key does not operate with an elliptic curve") + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.Unix() > currentTime.Unix() { + return true + } + if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { + return false + } + expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.Unix() > expiry.Unix() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go new file mode 100644 index 0000000000..b255f1f6f8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go @@ -0,0 +1,24 @@ +package packet + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" + +const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" + +const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" + +const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go new file mode 100644 index 0000000000..dd84092392 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,209 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +type PacketReader interface { + Next() (p Packet, err error) + Push(reader io.Reader) (err error) + Unread(p Packet) +} + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + for { + p, err := r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return p, nil + } + } + return nil, io.EOF +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown/Marker packet types are skipped while unsupported +// packets are returned as UnsupportedPacket type. +func (r *Reader) NextWithUnsupported() (p Packet, err error) { + for { + p, err = r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if casteErr, ok := err.(errors.UnsupportedError); ok { + return &UnsupportedPacket{ + IncompletePacket: p, + Error: casteErr, + }, nil + } + return + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return + } + } + return nil, io.EOF +} + +func (r *Reader) read() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + return p, err + } + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} + +// CheckReader is similar to Reader but additionally +// uses the pushdown automata to verify the read packet sequence. +type CheckReader struct { + Reader + verifier *SequenceVerifier + fullyRead bool +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +// If the read packet sequence does not conform to the packet composition +// rules in rfc4880, it returns an error. +func (r *CheckReader) Next() (p Packet, err error) { + if r.fullyRead { + return nil, io.EOF + } + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + var errMsg error + for len(r.readers) > 0 { + p, errMsg, err = ReadWithCheck(r.readers[len(r.readers)-1], r.verifier) + if errMsg != nil { + err = errMsg + return + } + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } + if errMsg = r.verifier.Next(EOSSymbol); errMsg != nil { + return nil, errMsg + } + if errMsg = r.verifier.AssertValid(); errMsg != nil { + return nil, errMsg + } + r.fullyRead = true + return nil, io.EOF +} + +func NewCheckReader(r io.Reader) *CheckReader { + return &CheckReader{ + Reader: Reader{ + q: nil, + readers: []io.Reader{r}, + }, + verifier: NewSequenceVerifier(), + fullyRead: false, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go new file mode 100644 index 0000000000..fb2e362e4a --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go @@ -0,0 +1,15 @@ +package packet + +// Recipient type represents a Intended Recipient Fingerprint subpacket +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr +type Recipient struct { + KeyVersion int + Fingerprint []byte +} + +func (r *Recipient) Serialize() []byte { + packet := make([]byte, len(r.Fingerprint)+1) + packet[0] = byte(r.KeyVersion) + copy(packet[1:], r.Fingerprint) + return packet +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go new file mode 100644 index 0000000000..ff14da3185 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,1409 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "encoding/binary" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + Version int + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + // salt contains a random salt value for v6 signatures + // See RFC the crypto refresh Section 5.2.3. + salt []byte + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + + // Metadata includes format, filename and time, and is protected by v5 + // signatures of type 0x00 or 0x01. This metadata is included into the hash + // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. + Metadata *LiteralData + + CreationTime time.Time + + RSASignature encoding.Field + DSASigR, DSASigS encoding.Field + ECDSASigR, ECDSASigS encoding.Field + EdDSASigR, EdDSASigS encoding.Field + EdSig []byte + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredCipherSuites [][2]uint8 + IssuerKeyId *uint64 + IssuerFingerprint []byte + SignerUserId *string + IsPrimaryId *bool + Notations []*Notation + IntendedRecipients []*Recipient + + // TrustLevel and TrustAmount can be set by the signer to assert that + // the key is not only valid but also trustworthy at the specified + // level. + // See RFC 4880, section 5.2.3.13 for details. + TrustLevel TrustLevel + TrustAmount TrustAmount + + // TrustRegularExpression can be used in conjunction with trust Signature + // packets to limit the scope of the trust that is extended. + // See RFC 4880, section 5.2.3.14 for details. + TrustRegularExpression *string + + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *ReasonForRevocation + RevocationReasonText string + + // In a self-signature, these flags are set there is a features subpacket + // indicating that the issuer implementation supports these features + // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket + SEIPDv1, SEIPDv2 bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +// VerifiableSignature internally keeps state if the +// the signature has been verified before. +type VerifiableSignature struct { + Valid *bool // nil if it has not been verified yet + Packet *Signature +} + +// SaltedHashSpecifier specifies that the given salt and hash are +// used by a v6 signature. +type SaltedHashSpecifier struct { + Hash crypto.Hash + Salt []byte +} + +// NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature. +func NewVerifiableSig(signature *Signature) *VerifiableSignature { + return &VerifiableSignature{ + Packet: signature, + } +} + +// Salt returns the signature salt for v6 signatures. +func (sig *Signature) Salt() []byte { + if sig == nil { + return nil + } + return sig.salt +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [7]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + sig.Version = int(buf[0]) + if sig.Version == 6 { + _, err = readFull(r, buf[:7]) + } else { + _, err = readFull(r, buf[:5]) + } + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + + if sig.Version < 5 { + sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + } else { + sig.Hash, ok = algorithm.HashIdToHash(buf[2]) + } + + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + var hashedSubpacketsLength int + if sig.Version == 6 { + // For a v6 signature, a four-octet length is used. + hashedSubpacketsLength = + int(buf[3])<<24 | + int(buf[4])<<16 | + int(buf[5])<<8 | + int(buf[6]) + } else { + hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4]) + } + hashedSubpackets := make([]byte, hashedSubpacketsLength) + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + if sig.Version == 6 { + _, err = readFull(r, buf[:4]) + } else { + _, err = readFull(r, buf[:2]) + } + + if err != nil { + return + } + var unhashedSubpacketsLength uint32 + if sig.Version == 6 { + unhashedSubpacketsLength = uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3]) + } else { + unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1]) + } + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + if sig.Version == 6 { + // Only for v6 signatures, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(sig.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + sig.salt = salt + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature = new(encoding.MPI) + _, err = sig.RSASignature.ReadFrom(r) + case PubKeyAlgoDSA: + sig.DSASigR = new(encoding.MPI) + if _, err = sig.DSASigR.ReadFrom(r); err != nil { + return + } + + sig.DSASigS = new(encoding.MPI) + _, err = sig.DSASigS.ReadFrom(r) + case PubKeyAlgoECDSA: + sig.ECDSASigR = new(encoding.MPI) + if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { + return + } + + sig.ECDSASigS = new(encoding.MPI) + _, err = sig.ECDSASigS.ReadFrom(r) + case PubKeyAlgoEdDSA: + sig.EdDSASigR = new(encoding.MPI) + if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { + return + } + + sig.EdDSASigS = new(encoding.MPI) + if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoEd25519: + sig.EdSig, err = ed25519.ReadSignature(r) + if err != nil { + return + } + case PubKeyAlgoEd448: + sig.EdSig, err = ed448.ReadSignature(r) + if err != nil { + return + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + trustSubpacket signatureSubpacketType = 5 + regularExpressionSubpacket signatureSubpacketType = 6 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + notationDataSubpacket signatureSubpacketType = 20 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 + keyFlagsSubpacket signatureSubpacketType = 27 + signerUserIdSubpacket signatureSubpacketType = 28 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprintSubpacket signatureSubpacketType = 33 + intendedRecipientSubpacket signatureSubpacketType = 35 + prefCipherSuitesSubpacket signatureSubpacketType = 39 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + if !isHashed && + packetType != issuerSubpacket && + packetType != issuerFingerprintSubpacket && + packetType != embeddedSignatureSubpacket { + return + } + switch packetType { + case creationTimeSubpacket: + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case trustSubpacket: + if len(subpacket) != 2 { + err = errors.StructuralError("trust subpacket with bad length") + return + } + // Trust level and amount, section 5.2.3.13 + sig.TrustLevel = TrustLevel(subpacket[0]) + sig.TrustAmount = TrustAmount(subpacket[1]) + case regularExpressionSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("regexp subpacket with bad length") + return + } + // Trust regular expression, section 5.2.3.14 + // RFC specifies the string should be null-terminated; remove a null byte from the end + if subpacket[len(subpacket)-1] != 0x00 { + err = errors.StructuralError("expected regular expression to be null-terminated") + return + } + trustRegularExpression := string(subpacket[:len(subpacket)-1]) + sig.TrustRegularExpression = &trustRegularExpression + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if sig.Version > 4 && isHashed { + err = errors.StructuralError("issuer subpacket found in v6 key") + return + } + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + if sig.Version <= 4 { + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + } + case notationDataSubpacket: + // Notation data, section 5.2.3.16 + if len(subpacket) < 8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5]) + valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7]) + if len(subpacket) != int(nameLength)+int(valueLength)+8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + notation := Notation{ + IsHumanReadable: (subpacket[0] & 0x80) == 0x80, + Name: string(subpacket[8:(nameLength + 8)]), + Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)], + IsCritical: isCritical, + } + + sig.Notations = append(sig.Notations, ¬ation) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } + case signerUserIdSubpacket: + userId := string(subpacket) + sig.SignerUserId = &userId + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = NewReasonForRevocation(subpacket[0]) + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. + if len(subpacket) > 0 { + if subpacket[0]&0x01 != 0 { + sig.SEIPDv1 = true + } + // 0x02 and 0x04 are reserved + if subpacket[0]&0x08 != 0 { + sig.SEIPDv2 = true + } + } + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + sig.PolicyURI = string(subpacket) + case issuerFingerprintSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("empty issuer fingerprint subpacket") + return + } + v, l := subpacket[0], len(subpacket[1:]) + if v >= 5 && l != 32 || v < 5 && l != 20 { + return nil, errors.StructuralError("bad fingerprint length") + } + sig.IssuerFingerprint = make([]byte, l) + copy(sig.IssuerFingerprint, subpacket[1:]) + sig.IssuerKeyId = new(uint64) + if v >= 5 { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) + } else { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) + } + case intendedRecipientSubpacket: + // Intended Recipient Fingerprint + // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr + if len(subpacket) < 1 { + return nil, errors.StructuralError("invalid intended recipient fingerpring length") + } + version, length := subpacket[0], len(subpacket[1:]) + if version >= 5 && length != 32 || version < 5 && length != 20 { + return nil, errors.StructuralError("invalid fingerprint length") + } + fingerprint := make([]byte, length) + copy(fingerprint, subpacket[1:]) + sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint}) + case prefCipherSuitesSubpacket: + // Preferred AEAD cipher suites + // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites + if len(subpacket)%2 != 0 { + err = errors.StructuralError("invalid aead cipher suite length") + return + } + + sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2) + + for i := 0; i < len(subpacket)/2; i++ { + sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]} + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { + return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId +} + +func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil { + return bytes.Equal(sig.IssuerFingerprint, fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == keyId +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } +} + +// SigExpired returns whether sig is a signature that has expired or is created +// in the future. +func (sig *Signature) SigExpired(currentTime time.Time) bool { + if sig.CreationTime.Unix() > currentTime.Unix() { + return true + } + if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) + return currentTime.Unix() > expiry.Unix() +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { + var hashId byte + var ok bool + + if sig.Version < 5 { + hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash) + } else { + hashId, ok = algorithm.HashToHashId(sig.Hash) + } + + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + + hashedFields := bytes.NewBuffer([]byte{ + uint8(sig.Version), + uint8(sig.SigType), + uint8(sig.PubKeyAlgo), + uint8(hashId), + }) + hashedSubpacketsLength := len(hashedSubpackets) + if sig.Version == 6 { + // v6 signatures store the length in 4 octets + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 24), + uint8(hashedSubpacketsLength >> 16), + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } else { + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } + lenPrefix := hashedFields.Len() + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(lenPrefix + len(hashedSubpackets)) + if sig.Version == 5 { + // v5 case + hashedFields.Write([]byte{0x05, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } else { + // v4 and v6 case + hashedFields.Write([]byte{byte(sig.Version), 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } + sig.HashSuffix = make([]byte, hashedFields.Len()) + copy(sig.HashSuffix, hashedFields.Bytes()) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + hashedSubpackets := make([]byte, hashedSubpacketsLen) + serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// PrepareSign must be called to create a hash object before Sign for v6 signatures. +// The created hash object initially hashes a randomly generated salt +// as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6, +// the method returns an empty hash object. +// See RFC the crypto refresh Section 3.2.4. +func (sig *Signature) PrepareSign(config *Config) (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + var err error + sig.salt, err = SignatureSaltForHash(sig.Hash, config.Random()) + if err != nil { + return nil, err + } + } + hasher.Write(sig.salt) + } + return hasher, nil +} + +// SetSalt sets the signature salt for v6 signatures. +// Assumes salt is generated correctly and checks if length matches. +// If the signature is not v6, the method ignores the salt. +// Use PrepareSign whenever possible instead of generating and +// hashing the salt externally. +// See RFC the crypto refresh Section 3.2.4. +func (sig *Signature) SetSalt(salt []byte) error { + if sig.Version == 6 { + expectedSaltLength, err := SaltLengthForHash(sig.Hash) + if err != nil { + return err + } + if salt == nil || len(salt) != expectedSaltLength { + return errors.InvalidArgumentError("unexpected salt size for the given hash algorithm") + } + sig.salt = salt + } + return nil +} + +// PrepareVerify must be called to create a hash object before verifying v6 signatures. +// The created hash object initially hashes the internally stored salt. +// If the signature is not v6, the method returns an empty hash object. +// See crypto refresh Section 3.2.4. +func (sig *Signature) PrepareVerify() (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + return nil, errors.StructuralError("v6 requires a salt for the hash to be signed") + } + hasher.Write(sig.salt) + } + return hasher, nil +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + sig.Version = priv.PublicKey.Version + sig.IssuerFingerprint = priv.PublicKey.Fingerprint + sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) + if err != nil { + return err + } + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + sig.RSASignature = encoding.NewMPI(sigdata) + } + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR = new(encoding.MPI).SetBig(r) + sig.DSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoECDSA: + sk := priv.PrivateKey.(*ecdsa.PrivateKey) + r, s, err := ecdsa.Sign(config.Random(), sk, digest) + + if err == nil { + sig.ECDSASigR = new(encoding.MPI).SetBig(r) + sig.ECDSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoEdDSA: + sk := priv.PrivateKey.(*eddsa.PrivateKey) + r, s, err := eddsa.Sign(sk, digest) + if err == nil { + sig.EdDSASigR = encoding.NewMPI(r) + sig.EdDSASigS = encoding.NewMPI(s) + } + case PubKeyAlgoEd25519: + sk := priv.PrivateKey.(*ed25519.PrivateKey) + signature, err := ed25519.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } + case PubKeyAlgoEd448: + sk := priv.PrivateKey.(*ed448.PrivateKey) + signature, err := ed448.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := userIdSignatureHash(id, pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// SignDirectKeyBinding computes a signature from priv +// On success, the signature is stored in sig. +// Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := directKeySignatureHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, +// the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, + config *Config) error { + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(hashKey, pub, prepareHash) + if err != nil { + return err + } + return sig.Sign(h, signingKey, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeKey computes a revocation signature of pub using priv. On success, the signature is +// stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := keyRevocationHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = int(sig.RSASignature.EncodedLength()) + case PubKeyAlgoDSA: + sigLength = int(sig.DSASigR.EncodedLength()) + sigLength += int(sig.DSASigS.EncodedLength()) + case PubKeyAlgoECDSA: + sigLength = int(sig.ECDSASigR.EncodedLength()) + sigLength += int(sig.ECDSASigS.EncodedLength()) + case PubKeyAlgoEdDSA: + sigLength = int(sig.EdDSASigR.EncodedLength()) + sigLength += int(sig.EdDSASigS.EncodedLength()) + case PubKeyAlgoEd25519: + sigLength = ed25519.SignatureSize + case PubKeyAlgoEd448: + sigLength = ed448.SignatureSize + default: + panic("impossible") + } + + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */ + 2 /* length of hashed subpackets */ + hashedSubpacketsLen + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 6 { + length += 4 + /* the two length fields are four-octet instead of two */ + 1 + /* salt length */ + len(sig.salt) /* length salt */ + } + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + err = sig.serializeBody(w) + if err != nil { + return err + } + return +} + +func (sig *Signature) serializeBody(w io.Writer) (err error) { + var fields []byte + if sig.Version == 6 { + // v6 signatures use 4 octets for length + hashedSubpacketsLen := + uint32(uint32(sig.HashSuffix[4])<<24) | + uint32(uint32(sig.HashSuffix[5])<<16) | + uint32(uint32(sig.HashSuffix[6])<<8) | + uint32(sig.HashSuffix[7]) + fields = sig.HashSuffix[:8+hashedSubpacketsLen] + } else { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | + uint16(sig.HashSuffix[5]) + fields = sig.HashSuffix[:6+hashedSubpacketsLen] + + } + _, err = w.Write(fields) + if err != nil { + return + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + var unhashedSubpackets []byte + if sig.Version == 6 { + unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 24) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen >> 16) + unhashedSubpackets[2] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[3] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[4:], sig.outSubpackets, false) + } else { + unhashedSubpackets = make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + } + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + if sig.Version == 6 { + // write salt for v6 signatures + _, err = w.Write([]byte{uint8(len(sig.salt))}) + if err != nil { + return + } + _, err = w.Write(sig.salt) + if err != nil { + return + } + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + _, err = w.Write(sig.RSASignature.EncodedBytes()) + case PubKeyAlgoDSA: + if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.DSASigS.EncodedBytes()) + case PubKeyAlgoECDSA: + if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.ECDSASigS.EncodedBytes()) + case PubKeyAlgoEdDSA: + if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + case PubKeyAlgoEd25519: + err = ed25519.WriteSignature(w, sig.EdSig) + case PubKeyAlgoEd448: + err = ed448.WriteSignature(w, sig.EdSig) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version >= 5, contents}) + } + if sig.SignerUserId != nil { + subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) + } + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + for _, notation := range sig.Notations { + subpackets = append( + subpackets, + outputSubpacket{ + true, + notationDataSubpacket, + notation.IsCritical, + notation.getData(), + }) + } + + for _, recipient := range sig.IntendedRecipients { + subpackets = append( + subpackets, + outputSubpacket{ + true, + intendedRecipientSubpacket, + false, + recipient.Serialize(), + }) + } + + // The following subpackets may only appear in self-signatures. + + var features = byte(0x00) + if sig.SEIPDv1 { + features |= 0x01 + } + if sig.SEIPDv2 { + features |= 0x08 + } + + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + + if sig.TrustLevel != 0 { + subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) + } + + if sig.TrustRegularExpression != nil { + // RFC specifies the string should be null-terminated; add a null byte to the end + subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) + } + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + + if len(sig.PreferredCipherSuites) > 0 { + serialized := make([]byte, len(sig.PreferredCipherSuites)*2) + for i, cipherSuite := range sig.PreferredCipherSuites { + serialized[2*i] = cipherSuite[0] + serialized[2*i+1] = cipherSuite[1] + } + subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) + } + + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) + } + + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + + return +} + +// AddMetadataToHashSuffix modifies the current hash suffix to include metadata +// (format, filename, and time). Version 5 keys protect this data including it +// in the hash computation. See section 5.2.4. +func (sig *Signature) AddMetadataToHashSuffix() { + if sig == nil || sig.Version != 5 { + return + } + if sig.SigType != 0x00 && sig.SigType != 0x01 { + return + } + lit := sig.Metadata + if lit == nil { + // This will translate into six 0x00 bytes. + lit = &LiteralData{} + } + + // Extract the current byte count + n := sig.HashSuffix[len(sig.HashSuffix)-8:] + l := uint64( + uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + + suffix := bytes.NewBuffer(nil) + suffix.Write(sig.HashSuffix[:l]) + + // Add the metadata + var buf [4]byte + buf[0] = lit.Format + fileName := lit.FileName + if len(lit.FileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + suffix.Write(buf[:2]) + suffix.Write([]byte(lit.FileName)) + binary.BigEndian.PutUint32(buf[:], lit.Time) + suffix.Write(buf[:]) + + suffix.Write([]byte{0x05, 0xff}) + suffix.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + sig.HashSuffix = suffix.Bytes() +} + +// SaltLengthForHash selects the required salt length for the given hash algorithm, +// as per Table 23 (Hash algorithm registry) of the crypto refresh. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5. +func SaltLengthForHash(hash crypto.Hash) (int, error) { + switch hash { + case crypto.SHA256, crypto.SHA224, crypto.SHA3_256: + return 16, nil + case crypto.SHA384: + return 24, nil + case crypto.SHA512, crypto.SHA3_512: + return 32, nil + default: + return 0, errors.UnsupportedError("hash function not supported for V6 signatures") + } +} + +// SignatureSaltForHash generates a random signature salt +// with the length for the given hash algorithm. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5. +func SignatureSaltForHash(hash crypto.Hash, randReader io.Reader) ([]byte, error) { + saltLength, err := SaltLengthForHash(hash) + if err != nil { + return nil, err + } + salt := make([]byte, saltLength) + _, err = io.ReadFull(randReader, salt) + if err != nil { + return nil, err + } + return salt, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 0000000000..c97b98b930 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,315 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/sha256" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/hkdf" +) + +// This is the largest session key that we'll support. Since at most 256-bit cipher +// is supported in OpenPGP, this is large enough to contain also the auth tag. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + Version int + CipherFunc CipherFunction + Mode AEADMode + s2k func(out, in []byte) + iv []byte + encryptedKey []byte // Contains also the authentication tag for AEAD +} + +// parse parses an SymmetricKeyEncrypted packet as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + var buf [1]byte + + // Version + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.Version = int(buf[0]) + if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + + // Cipher function + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.CipherFunc = CipherFunction(buf[0]) + if !ske.CipherFunc.IsSupported() { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) + } + + if ske.Version >= 5 { + // AEAD mode + if _, err := readFull(r, buf[:]); err != nil { + return errors.StructuralError("cannot read AEAD octet from packet") + } + ske.Mode = AEADMode(buf[0]) + } + + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + + var err error + if ske.s2k, err = s2k.Parse(r); err != nil { + if _, ok := err.(errors.ErrDummyPrivateKey); ok { + return errors.UnsupportedError("missing key GNU extension in session key") + } + return err + } + + if ske.Version >= 5 { + // AEAD IV + iv := make([]byte, ske.Mode.IvLength()) + _, err := readFull(r, iv) + if err != nil { + return errors.StructuralError("cannot read AEAD IV") + } + + ske.iv = iv + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + switch ske.Version { + case 4: + plaintextKey, cipherFunc, err := ske.decryptV4(key) + return plaintextKey, cipherFunc, err + case 5, 6: + plaintextKey, err := ske.aeadDecrypt(ske.Version, key) + return plaintextKey, CipherFunction(0), err + } + err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + return nil, CipherFunction(0), err +} + +func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError( + "unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if len(plaintextKey) != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError( + "length of decrypted key not equal to cipher keysize") + } + return plaintextKey, cipherFunc, nil +} + +func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) { + adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)} + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version) + + plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) + if err != nil { + return nil, err + } + return plaintextKey, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. +// The packet contains a random session key, encrypted by a key derived from +// the given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + + sessionKey := make([]byte, cipherFunc.KeySize()) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + + err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) + if err != nil { + return + } + + key = sessionKey + return +} + +// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The returned session key must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 6 + } else { + version = 4 + } + cipherFunc := config.Cipher() + // cipherFunc must be AES + if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 { + return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc))) + } + + keySize := cipherFunc.KeySize() + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K()) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + var packetLength int + switch version { + case 4: + packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + case 5, 6: + ivLen := config.AEAD().Mode().IvLength() + tagLen := config.AEAD().Mode().TagLength() + packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen + } + if version > 5 { + packetLength += 2 // additional octet count fields + } + + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + // Symmetric Key Encrypted Version + buf := []byte{byte(version)} + + if version > 5 { + // Scalar octet count + buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) + } + + // Cipher function + buf = append(buf, byte(cipherFunc)) + + if version >= 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + } + if version > 5 { + // Scalar octet count + buf = append(buf, byte(len(s2kBytes))) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + switch version { + case 4: + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + case 5, 6: + mode := config.AEAD().Mode() + adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version) + + // Sample iv using random reader + iv := make([]byte, config.AEAD().Mode().IvLength()) + _, err = io.ReadFull(config.Random(), iv) + if err != nil { + return + } + // Seal and write (encryptedData includes auth. tag) + + encryptedData := aead.Seal(nil, iv, sessionKey, adata) + _, err = w.Write(iv) + if err != nil { + return + } + _, err = w.Write(encryptedData) + if err != nil { + return + } + } + + return +} + +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) { + var blockCipher cipher.Block + if version > 5 { + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + blockCipher = c.new(encryptionKey) + } else { + blockCipher = c.new(inputKey) + } + return mode.new(blockCipher) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 0000000000..e9bbf0327e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,90 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +const aeadSaltSize = 32 + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + Version int + Contents io.Reader // contains tag for version 2 + IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9 + + // Specific to version 1 + prefix []byte + + // Specific to version 2 + Cipher CipherFunction + Mode AEADMode + ChunkSizeByte byte + Salt [aeadSaltSize]byte +} + +const ( + symmetricallyEncryptedVersionMdc = 1 + symmetricallyEncryptedVersionAead = 2 +) + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.IntegrityProtected { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case symmetricallyEncryptedVersionMdc: + se.Version = symmetricallyEncryptedVersionMdc + case symmetricallyEncryptedVersionAead: + se.Version = symmetricallyEncryptedVersionAead + if err := se.parseAead(r); err != nil { + return err + } + default: + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.Contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted Contents of the +// packet can be read. An incorrect key will only be detected after trying +// to decrypt the entire data. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + if se.Version == symmetricallyEncryptedVersionAead { + return se.decryptAead(key) + } + + return se.decryptMdc(c, key) +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected) + if err != nil { + return + } + + if aeadSupported { + return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key) + } + + return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go new file mode 100644 index 0000000000..a8ef0bbbec --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -0,0 +1,156 @@ +// Copyright 2023 Proton AG. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha256" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "golang.org/x/crypto/hkdf" +) + +// parseAead parses a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { + headerData := make([]byte, 3) + if n, err := io.ReadFull(r, headerData); n < 3 { + return errors.StructuralError("could not read aead header: " + err.Error()) + } + + // Cipher + se.Cipher = CipherFunction(headerData[0]) + // cipherFunc must have block size 16 to use AEAD + if se.Cipher.blockSize() != 16 { + return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher)) + } + + // Mode + se.Mode = AEADMode(headerData[1]) + if se.Mode.TagLength() == 0 { + return errors.UnsupportedError("unknown aead mode: " + string(se.Mode)) + } + + // Chunk size + se.ChunkSizeByte = headerData[2] + if se.ChunkSizeByte > 16 { + return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte)) + } + + // Salt + if n, err := io.ReadFull(r, se.Salt[:]); n < aeadSaltSize { + return errors.StructuralError("could not read aead salt: " + err.Error()) + } + + return nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (se *SymmetricallyEncrypted) associatedData() []byte { + return []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(se.Cipher), + byte(se.Mode), + se.ChunkSizeByte, + } +} + +// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { + aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) + + // Carry the first tagLen bytes + tagLen := se.Mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(se.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) + } + + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), + initialNonce: nonce, + associatedData: se.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + reader: se.Contents, + peekedBytes: peekedBytes, + }, nil +} + +// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) { + // cipherFunc must have block size 16 to use AEAD + if cipherSuite.Cipher.blockSize() != 16 { + return nil, errors.InvalidArgumentError("invalid aead cipher function") + } + + if cipherSuite.Cipher.KeySize() != len(inputKey) { + return nil, errors.InvalidArgumentError("error in aead serialization: bad key length") + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + prefix := []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(cipherSuite.Cipher), + byte(cipherSuite.Mode), + chunkSizeByte, + } + + // Write header (that correspond to prefix except first byte) + n, err := ciphertext.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, err + } + + // Random salt + salt := make([]byte, aeadSaltSize) + if _, err := io.ReadFull(rand, salt); err != nil { + return nil, err + } + + if _, err := ciphertext.Write(salt); err != nil { + return nil, err + } + + aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) + + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(chunkSizeByte), + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + writer: ciphertext, + }, nil +} + +func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) { + hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + // Last 64 bits of nonce are the counter + nonce = make([]byte, mode.IvLength()-8) + + _, _ = readFull(hkdfReader, nonce) + + blockCipher := c.new(encryptionKey) + aead = mode.new(blockCipher) + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go new file mode 100644 index 0000000000..645963fa78 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -0,0 +1,259 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// seMdcReader wraps an io.Reader with a no-op Close method. +type seMdcReader struct { + in io.Reader +} + +func (ser seMdcReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seMdcReader) Close() error { + return nil +} + +func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) { + if !c.IsSupported() { + return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c))) + } + + if len(key) != c.KeySize() { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.Contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.IntegrityProtected { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + + plaintext := cipher.StreamReader{S: s, R: se.Contents} + + if se.IntegrityProtected { + // IntegrityProtected packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seMdcReader{plaintext}, nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous Contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.ErrMDCMissing + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.ErrMDCMissing + } + } + + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + // Disallow old cipher suites + if !c.IsSupported() || c < CipherAES128 { + return nil, errors.InvalidArgumentError("invalid mdc cipher function") + } + + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length") + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = io.ReadFull(config.Random(), iv) + if err != nil { + return nil, err + } + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + Contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go similarity index 89% rename from vendor/golang.org/x/crypto/openpgp/packet/userattribute.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index ff7ef53075..63814ed132 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -41,9 +41,16 @@ func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error if err = jpeg.Encode(&buf, photo, nil); err != nil { return } + + lengthBuf := make([]byte, 5) + n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1) + lengthBuf = lengthBuf[:n] + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) + SubType: UserAttrImageSubpacket, + EncodedLength: lengthBuf, + Contents: buf.Bytes(), + }) } return } @@ -68,7 +75,10 @@ func (uat *UserAttribute) parse(r io.Reader) (err error) { func (uat *UserAttribute) Serialize(w io.Writer) (err error) { var buf bytes.Buffer for _, sp := range uat.Contents { - sp.Serialize(&buf) + err = sp.Serialize(&buf) + if err != nil { + return err + } } if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { return err diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go similarity index 96% rename from vendor/golang.org/x/crypto/openpgp/packet/userid.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index 359a462eb8..3c7451a3c3 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -155,5 +155,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go new file mode 100644 index 0000000000..ac897d709e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -0,0 +1,644 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" + +import ( + "bytes" + "crypto" + _ "crypto/sha256" + _ "crypto/sha512" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" + _ "golang.org/x/crypto/sha3" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedByFingerprint []byte // the key fingerprint of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + UnverifiedSignatures []*packet.Signature // all other unverified signature packets. + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted + var edp packet.EncryptedDataPacket + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448: + break + default: + continue + } + if keyring != nil { + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + } + case *packet.SymmetricallyEncrypted: + if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() { + return nil, errors.UnsupportedError("message is not integrity protected") + } + edp = p + break ParsePackets + case *packet.AEADEncrypted: + edp = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring, config) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if errDec != nil { + continue + } + } + // Try to decrypt symmetrically encrypted + decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // only for < 5% of cases we will proceed to decrypt the data + if err == nil { + decrypted, err = edp.Decrypt(cipherFunc, key) + if err != nil { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) + if sensitiveParsingErr != nil { + return nil, errors.StructuralError("parsing error") + } + return mdFinal, nil +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash + var prevLast bool +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if prevLast { + return nil, errors.UnsupportedError("nested signature packets") + } + + if p.IsLast { + prevLast = true + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt) + if err != nil { + md.SignatureError = err + } + + md.IsSigned = true + if p.Version == 6 { + md.SignedByFingerprint = p.KeyFingerprint + } + md.SignedByKeyId = p.KeyId + + if keyring != nil { + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.IsSigned && md.SignatureError == nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} + } else if md.decrypted != nil { + md.UnverifiedBody = &checkReader{md, false} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) { + switch sigType { + case packet.SigTypeBinary: + return hashFunc, nil + case packet.SigTypeText: + return NewCanonicalTextHash(hashFunc), nil + } + return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) { + if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { + return nil, nil, errors.UnsupportedError("unsupported hash function") + } + if !hashFunc.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) + } + h := hashFunc.New() + if sigSalt != nil { + h.Write(sigSalt) + } + wrappedHash, err := wrapHashForSignature(h, sigType) + if err != nil { + return nil, nil, err + } + switch sigType { + case packet.SigTypeBinary: + return h, wrappedHash, nil + case packet.SigTypeText: + return h, wrappedHash, nil + } + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails + checked bool +} + +func (cr *checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { + if cr.checked { + // Only check once + return n, io.EOF + } + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + cr.checked = true + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails + config *packet.Config +} + +func (scr *signatureCheckReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) + + // Hash only if required + if scr.md.SignedBy != nil { + scr.wrappedHash.Write(buf[:n]) + } + + if sensitiveParsingError == io.EOF { + var p packet.Packet + var readError error + var sig *packet.Signature + + p, readError = scr.packets.Next() + for readError == nil { + var ok bool + if sig, ok = p.(*packet.Signature); ok { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.Metadata = scr.md.LiteralData + } + + // If signature KeyID matches + if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + signatureError = checkSignatureDetails(key, sig, scr.config) + } + scr.md.Signature = sig + scr.md.SignatureError = signatureError + } else { + scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) + } + } + + p, readError = scr.packets.Next() + } + + if scr.md.SignedBy != nil && scr.md.Signature == nil { + if scr.md.UnverifiedSignatures == nil { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") + } else { + scr.md.SignatureError = errors.StructuralError("No matching signature found") + } + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// VerifyDetachedSignature takes a signed file and a detached signature and +// returns the signature packet and the entity the signature was signed by, +// if any, and a possible signature verification error. +// If the signer isn't known, ErrUnknownIssuer is returned. +func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, nil, nil, false, config) +} + +// VerifyDetachedSignatureAndHash performs the same actions as +// VerifyDetachedSignature and checks that the expected hash functions were used. +func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, nil, true, config) +} + +// VerifyDetachedSignatureAndSaltedHash performs the same actions as +// VerifyDetachedSignature and checks that the expected hash functions and salts were used. +func VerifyDetachedSignatureAndSaltedHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, expectedSaltedHashes []*packet.SaltedHashSpecifier, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, expectedSaltedHashes, true, config) +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the entity the signature was signed by, if any, and a possible +// signature verification error. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, nil, false, config) + return +} + +// CheckDetachedSignatureAndSaltedHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions or salted hash functions were used. +func CheckDetachedSignatureAndSaltedHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, expectedSaltedHashes []*packet.SaltedHashSpecifier, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, expectedSaltedHashes, true, config) + return +} + +// CheckDetachedSignatureAndHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions were used. +func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, nil, true, config) + return +} + +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, expectedSaltedHashes []*packet.SaltedHashSpecifier, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, nil, err + } + + var ok bool + sig, ok = p.(*packet.Signature) + if !ok { + return nil, nil, errors.StructuralError("non signature packet found") + } + if sig.IssuerKeyId == nil { + return nil, nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + if checkHashes { + matchFound := false + if sig.Version == 6 { + // check for salted hashes + for _, expectedSaltedHash := range expectedSaltedHashes { + if hashFunc == expectedSaltedHash.Hash && bytes.Equal(sig.Salt(), expectedSaltedHash.Salt) { + matchFound = true + break + } + } + + } else { + // check for hashes + for _, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + matchFound = true + break + } + } + } + if !matchFound { + return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers") + } + } + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, err := sig.PrepareVerify() + if err != nil { + return nil, nil, err + } + wrappedHash, err := wrapHashForSignature(h, sigType) + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, nil, err + } + + for _, key := range keys { + err = key.PublicKey.VerifySignature(h, sig) + if err == nil { + return sig, key.Entity, checkSignatureDetails(&key, sig, config) + } + } + + return nil, nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body, config) +} + +// checkSignatureDetails returns an error if: +// - The signature (or one of the binding signatures mentioned below) +// has a unknown critical notation data subpacket +// - The primary key of the signing entity is revoked +// - The primary identity is revoked +// - The signature is expired +// - The primary key of the signing entity is expired according to the +// primary identity binding signature +// +// ... or, if the signature was signed by a subkey and: +// - The signing subkey is revoked +// - The signing subkey is expired according to the subkey binding signature +// - The signing subkey binding signature is expired +// - The signing subkey cross-signature is expired +// +// NOTE: The order of these checks is important, as the caller may choose to +// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never +// ignore any other errors. +func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { + now := config.Now() + primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature() + signedBySubKey := key.PublicKey != key.Entity.PrimaryKey + sigsToCheck := []*packet.Signature{signature, primarySelfSignature} + if signedBySubKey { + sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) + } + for _, sig := range sigsToCheck { + for _, notation := range sig.Notations { + if notation.IsCritical && !config.KnownNotation(notation.Name) { + return errors.SignatureError("unknown critical notation: " + notation.Name) + } + } + } + if key.Entity.Revoked(now) || // primary key is revoked + (signedBySubKey && key.Revoked(now)) || // subkey is revoked + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4 + return errors.ErrKeyRevoked + } + if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired + return errors.ErrKeyExpired + } + if signedBySubKey { + if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired + return errors.ErrKeyExpired + } + } + for _, sig := range sigsToCheck { + if sig.SigExpired(now) { // any of the relevant signatures are expired + return errors.ErrSignatureExpired + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go new file mode 100644 index 0000000000..670d60226a --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -0,0 +1,457 @@ +package openpgp + +const testKey1KeyId uint64 = 0xA34D7E18C20C31BB +const testKey3KeyId uint64 = 0x338934250CCC0360 +const testKeyP256KeyId uint64 = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt +const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" + +const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" + +const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` + +// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ +const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK-----` + +// See OpenPGP crypto refresh Section A.3. +const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUsGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laMAGXKB +exK+cH6NX1hs5hNhIB00TrJmosgv3mg1ditlsLfCsQYfGwoAAABCBYJjh3/jAwsJ +BwUVCg4IDAIWAAKbAwIeCSIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJBScJAgcCAAAAAK0oIBA+LX0ifsDm185Ecds2v8lwgyU2kCcUmKfvBXbAf6rh +RYWzuQOwEn7E/aLwIwRaLsdry0+VcallHhSu4RN6HWaEQsiPlR4zxP/TP7mhfVEe +7XWPxtnMUMtf15OyA51YBMdLBmOHf+MZAAAAIIaTJINn+eUBXbki+PSAld2nhJh/ +LVmFsS+60WyvXkQ1AE1gCk95TUR3XFeibg/u/tVY6a//1q0NWC1X+yui3O24wpsG +GBsKAAAALAWCY4d/4wKbDCIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJAAAAAAQBIKbpGG2dWTX8j+VjFM21J0hqWlEg+bdiojWnKfA5AQpWUWtnNwDE +M0g12vYxoWM8Y81W+bHBw805I8kWVkXU6vFOi+HWvv/ira7ofJu16NnoUkhclkUr +k0mXubZvyl4GBg== +-----END PGP PRIVATE KEY BLOCK-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/304 +const v6PrivKeyMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== +-----END PGP MESSAGE-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/305 +const v6PrivKeyInlineSignMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== +-----END PGP MESSAGE-----` + +// See https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/274 +// decryption password: "correct horse battery staple" +const v6ArgonSealedPrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xYIGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laP9JgkC +FARdb9ccngltHraRe25uHuyuAQQVtKipJ0+r5jL4dacGWSAheCWPpITYiyfyIOPS +3gIDyg8f7strd1OB4+LZsUhcIjOMpVHgmiY/IutJkulneoBYwrEGHxsKAAAAQgWC +Y4d/4wMLCQcFFQoOCAwCFgACmwMCHgkiIQbLGGxPBgmml+TVLfpscisMHx4nwYpW +cI9lJewnutmsyQUnCQIHAgAAAACtKCAQPi19In7A5tfORHHbNr/JcIMlNpAnFJin +7wV2wH+q4UWFs7kDsBJ+xP2i8CMEWi7Ha8tPlXGpZR4UruETeh1mhELIj5UeM8T/ +0z+5oX1RHu11j8bZzFDLX9eTsgOdWATHggZjh3/jGQAAACCGkySDZ/nlAV25Ivj0 +gJXdp4SYfy1ZhbEvutFsr15ENf0mCQIUBA5hhGgp2oaavg6mFUXcFMwBBBUuE8qf +9Ock+xwusd+GAglBr5LVyr/lup3xxQvHXFSjjA2haXfoN6xUGRdDEHI6+uevKjVR +v5oAxgu7eJpaXNjCmwYYGwoAAAAsBYJjh3/jApsMIiEGyxhsTwYJppfk1S36bHIr +DB8eJ8GKVnCPZSXsJ7rZrMkAAAAABAEgpukYbZ1ZNfyP5WMUzbUnSGpaUSD5t2Ki +Nacp8DkBClZRa2c3AMQzSDXa9jGhYzxjzVb5scHDzTkjyRZWRdTq8U6L4da+/+Kt +ruh8m7Xo2ehSSFyWRSuTSZe5tm/KXgYG +-----END PGP PRIVATE KEY BLOCK-----` + +const v4Key25519 = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUkEZB3qzRto01j2k2pwN5ux9w70stPinAdXULLr20CRW7U7h2GSeACch0M+ +qzQg8yjFQ8VBvu3uwgKH9senoHmj72lLSCLTmhFKzQR0ZXN0wogEEBsIAD4F +gmQd6s0ECwkHCAmQIf45+TuC+xMDFQgKBBYAAgECGQECmwMCHgEWIQSWEzMi +jJUHvyIbVKIh/jn5O4L7EwAAUhaHNlgudvxARdPPETUzVgjuWi+YIz8w1xIb +lHQMvIrbe2sGCQIethpWofd0x7DHuv/ciHg+EoxJ/Td6h4pWtIoKx0kEZB3q +zRm4CyA7quliq7yx08AoOqHTuuCgvpkSdEhpp3pEyejQOgBo0p6ywIiLPllY +0t+jpNspHpAGfXID6oqjpYuJw3AfVRBlwnQEGBsIACoFgmQd6s0JkCH+Ofk7 +gvsTApsMFiEElhMzIoyVB78iG1SiIf45+TuC+xMAAGgQuN9G73446ykvJ/mL +sCZ7zGFId2gBd1EnG0FTC4npfOKpck0X8dngByrCxU8LDSfvjsEp/xDAiKsQ +aU71tdtNBQ== +=e7jT +-----END PGP PRIVATE KEY BLOCK-----` + +const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN +aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8 +nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl ++bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J +BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK +chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG +ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw +FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln +cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+ +s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh +6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ +sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz +dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt +YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ +1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i +aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr +fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF +gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q +Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj +jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF +qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH +dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI +zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/ +0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8 +9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x +Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH +UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7 +/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ +nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl +owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM +GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu +a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0 +M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD +lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5 +01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb +hW1Hj9AO8lzggBQ= +=Nt+N +-----END PGP PUBLIC KEY BLOCK----- +` + +const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE----- + +wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8 +N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3 +AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI +NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv +KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y +EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN +AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb +UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB +ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+ +7A5CBid5 +=aQkm +-----END PGP SIGNATURE----- +` + +const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE----- + +owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x +U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM +f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10 +2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n +vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA= +=fRXs +-----END PGP MESSAGE-----` + +const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+ +fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5 +GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0 +JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS +YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6 +AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki +Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf +9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa +JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag +Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr +woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb +LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA +SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP +GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2 +bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X +W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD +AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY +hz3tYjKhoFTKEIq3y3Pp +=h/aX +-----END PGP PUBLIC KEY BLOCK-----` + +const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: Bob's OpenPGP Transferable Secret Key + +lQVYBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAQAL/RZqbJW2IqQDCnJi4Ozm++gPqBPiX1RhTWSjwxfM +cJKUZfzLj414rMKm6Jh1cwwGY9jekROhB9WmwaaKT8HtcIgrZNAlYzANGRCM4TLK +3VskxfSwKKna8l+s+mZglqbAjUg3wmFuf9Tj2xcUZYmyRm1DEmcN2ZzpvRtHgX7z +Wn1mAKUlSDJZSQks0zjuMNbupcpyJokdlkUg2+wBznBOTKzgMxVNC9b2g5/tMPUs +hGGWmF1UH+7AHMTaS6dlmr2ZBIyogdnfUqdNg5sZwsxSNrbglKP4sqe7X61uEAIQ +bD7rT3LonLbhkrj3I8wilUD8usIwt5IecoHhd9HziqZjRCc1BUBkboUEoyedbDV4 +i4qfsFZ6CEWoLuD5pW7dEp0M+WeuHXO164Rc+LnH6i1VQrpb1Okl4qO6ejIpIjBI +1t3GshtUu/mwGBBxs60KBX5g77mFQ9lLCRj8lSYqOsHRKBhUp4qM869VA+fD0BRP +fqPT0I9IH4Oa/A3jYJcg622GwQYA1LhnP208Waf6PkQSJ6kyr8ymY1yVh9VBE/g6 +fRDYA+pkqKnw9wfH2Qho3ysAA+OmVOX8Hldg+Pc0Zs0e5pCavb0En8iFLvTA0Q2E +LR5rLue9uD7aFuKFU/VdcddY9Ww/vo4k5p/tVGp7F8RYCFn9rSjIWbfvvZi1q5Tx ++akoZbga+4qQ4WYzB/obdX6SCmi6BndcQ1QdjCCQU6gpYx0MddVERbIp9+2SXDyL +hpxjSyz+RGsZi/9UAshT4txP4+MZBgDfK3ZqtW+h2/eMRxkANqOJpxSjMyLO/FXN +WxzTDYeWtHNYiAlOwlQZEPOydZFty9IVzzNFQCIUCGjQ/nNyhw7adSgUk3+BXEx/ +MyJPYY0BYuhLxLYcrfQ9nrhaVKxRJj25SVHj2ASsiwGJRZW4CC3uw40OYxfKEvNC +mer/VxM3kg8qqGf9KUzJ1dVdAvjyx2Hz6jY2qWCyRQ6IMjWHyd43C4r3jxooYKUC +YnstRQyb/gCSKahveSEjo07CiXMr88UGALwzEr3npFAsPW3osGaFLj49y1oRe11E +he9gCHFm+fuzbXrWmdPjYU5/ZdqdojzDqfu4ThfnipknpVUM1o6MQqkjM896FHm8 +zbKVFSMhEP6DPHSCexMFrrSgN03PdwHTO6iBaIBBFqmGY01tmJ03SxvSpiBPON9P +NVvy/6UZFedTq8A07OUAxO62YUSNtT5pmK2vzs3SAZJmbFbMh+NN204TRI72GlqT +t5hcfkuv8hrmwPS/ZR6q312mKQ6w/1pqO9qitCFCb2IgQmFiYmFnZSA8Ym9iQG9w +ZW5wZ3AuZXhhbXBsZT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgEC +F4AWIQTRpm4aI7GCyZgPeIz7/MgqAV5zMAUCXaWe+gAKCRD7/MgqAV5zMG9sC/9U +2T3RrqEbw533FPNfEflhEVRIZ8gDXKM8hU6cqqEzCmzZT6xYTe6sv4y+PJBGXJFX +yhj0g6FDkSyboM5litOcTupURObVqMgA/Y4UKERznm4fzzH9qek85c4ljtLyNufe +doL2pp3vkGtn7eD0QFRaLLmnxPKQ/TlZKdLE1G3u8Uot8QHicaR6GnAdc5UXQJE3 +BiV7jZuDyWmZ1cUNwJkKL6oRtp+ZNDOQCrLNLecKHcgCqrpjSQG5oouba1I1Q6Vl +sP44dhA1nkmLHtxlTOzpeHj4jnk1FaXmyasurrrI5CgU/L2Oi39DGKTH/A/cywDN +4ZplIQ9zR8enkbXquUZvFDe+Xz+6xRXtb5MwQyWODB3nHw85HocLwRoIN9WdQEI+ +L8a/56AuOwhs8llkSuiITjR7r9SgKJC2WlAHl7E8lhJ3VDW3ELC56KH308d6mwOG +ZRAqIAKzM1T5FGjMBhq7ZV0eqdEntBh3EcOIfj2M8rg1MzJv+0mHZOIjByawikad +BVgEXaWc8gEMANYwv1xsYyunXYK0X1vY/rP1NNPvhLyLIE7NpK90YNBj+xS1ldGD +bUdZqZeef2xJe8gMQg05DoD1DF3GipZ0Ies65beh+d5hegb7N4pzh0LzrBrVNHar +29b5ExdI7i4iYD5TO6Vr/qTUOiAN/byqELEzAb+L+b2DVz/RoCm4PIp1DU9ewcc2 +WB38Ofqut3nLYA5tqJ9XvAiEQme+qAVcM3ZFcaMt4I4dXhDZZNg+D9LiTWcxdUPB +leu8iwDRjAgyAhPzpFp+nWoqWA81uIiULWD1Fj+IVoY3ZvgivoYOiEFBJ9lbb4te +g9m5UT/AaVDTWuHzbspVlbiVe+qyB77C2daWzNyx6UYBPLOo4r0t0c91kbNE5lgj +Z7xz6los0N1U8vq91EFSeQJoSQ62XWavYmlCLmdNT6BNfgh4icLsT7Vr1QMX9jzn +JtTPxdXytSdHvpSpULsqJ016l0dtmONcK3z9mj5N5z0k1tg1AH970TGYOe2aUcSx +IRDMXDOPyzEfjwARAQABAAv9F2CwsjS+Sjh1M1vegJbZjei4gF1HHpEM0K0PSXsp +SfVvpR4AoSJ4He6CXSMWg0ot8XKtDuZoV9jnJaES5UL9pMAD7JwIOqZm/DYVJM5h +OASCh1c356/wSbFbzRHPtUdZO9Q30WFNJM5pHbCJPjtNoRmRGkf71RxtvHBzy7np +Ga+W6U/NVKHw0i0CYwMI0YlKDakYW3Pm+QL+gHZFvngGweTod0f9l2VLLAmeQR/c ++EZs7lNumhuZ8mXcwhUc9JQIhOkpO+wreDysEFkAcsKbkQP3UDUsA1gFx9pbMzT0 +tr1oZq2a4QBtxShHzP/ph7KLpN+6qtjks3xB/yjTgaGmtrwM8tSe0wD1RwXS+/1o +BHpXTnQ7TfeOGUAu4KCoOQLv6ELpKWbRBLWuiPwMdbGpvVFALO8+kvKAg9/r+/ny +zM2GQHY+J3Jh5JxPiJnHfXNZjIKLbFbIPdSKNyJBuazXW8xIa//mEHMI5OcvsZBK +clAIp7LXzjEjKXIwHwDcTn9pBgDpdOKTHOtJ3JUKx0rWVsDH6wq6iKV/FTVSY5jl +zN+puOEsskF1Lfxn9JsJihAVO3yNsp6RvkKtyNlFazaCVKtDAmkjoh60XNxcNRqr +gCnwdpbgdHP6v/hvZY54ZaJjz6L2e8unNEkYLxDt8cmAyGPgH2XgL7giHIp9jrsQ +aS381gnYwNX6wE1aEikgtY91nqJjwPlibF9avSyYQoMtEqM/1UjTjB2KdD/MitK5 +fP0VpvuXpNYZedmyq4UOMwdkiNMGAOrfmOeT0olgLrTMT5H97Cn3Yxbk13uXHNu/ +ZUZZNe8s+QtuLfUlKAJtLEUutN33TlWQY522FV0m17S+b80xJib3yZVJteVurrh5 +HSWHAM+zghQAvCesg5CLXa2dNMkTCmZKgCBvfDLZuZbjFwnwCI6u/NhOY9egKuUf +SA/je/RXaT8m5VxLYMxwqQXKApzD87fv0tLPlVIEvjEsaf992tFEFSNPcG1l/jpd +5AVXw6kKuf85UkJtYR1x2MkQDrqY1QX/XMw00kt8y9kMZUre19aCArcmor+hDhRJ +E3Gt4QJrD9z/bICESw4b4z2DbgD/Xz9IXsA/r9cKiM1h5QMtXvuhyfVeM01enhxM +GbOH3gjqqGNKysx0UODGEwr6AV9hAd8RWXMchJLaExK9J5SRawSg671ObAU24SdY +vMQ9Z4kAQ2+1ReUZzf3ogSMRZtMT+d18gT6L90/y+APZIaoArLPhebIAGq39HLmJ +26x3z0WAgrpA1kNsjXEXkoiZGPLKIGoe3hqJAbYEGAEKACAWIQTRpm4aI7GCyZgP +eIz7/MgqAV5zMAUCXaWc8gIbDAAKCRD7/MgqAV5zMOn/C/9ugt+HZIwX308zI+QX +c5vDLReuzmJ3ieE0DMO/uNSC+K1XEioSIZP91HeZJ2kbT9nn9fuReuoff0T0Dief +rbwcIQQHFFkrqSp1K3VWmUGp2JrUsXFVdjy/fkBIjTd7c5boWljv/6wAsSfiv2V0 +JSM8EFU6TYXxswGjFVfc6X97tJNeIrXL+mpSmPPqy2bztcCCHkWS5lNLWQw+R7Vg +71Fe6yBSNVrqC2/imYG2J9zlowjx1XU63Wdgqp2Wxt0l8OmsB/W80S1fRF5G4SDH +s9HXglXXqPsBRZJYfP+VStm9L5P/sKjCcX6WtZR7yS6G8zj/X767MLK/djANvpPd +NVniEke6hM3CNBXYPAMhQBMWhCulcoz+0lxi8L34rMN+Dsbma96psdUrn7uLaB91 +6we0CTfF8qqm7BsVAgalon/UUiuMY80U3ueoj3okiSTiHIjD/YtpXSPioC8nMng7 +xqAY9Bwizt4FWgXuLm1a4+So4V9j1TRCXd12Uc2l2RNmgDE= +=miES +-----END PGP PRIVATE KEY BLOCK----- +` + +const certv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK----- +` + +const msgv5Test = `-----BEGIN PGP MESSAGE----- + +wcDMA3wvqk35PDeyAQv+PcQiLsoYTH30nJYQh3j3cJaO2+jErtVCrIQRIU0+ +rmgMddERYST4A9mA0DQIiTI4FQ0Lp440D3BWCgpq3LlNWewGzduaWwym5rN6 +cwHz5ccDqOcqbd9X0GXXGy/ZH/ljSgzuVMIytMAXKdF/vrRrVgH/+I7cxvm9 +HwnhjMN5dF0j4aEt996H2T7cbtzSr2GN9SWGW8Gyu7I8Zx73hgrGUI7gDiJB +Afaff+P6hfkkHSGOItr94dde8J/7AUF4VEwwxdVVPvsNEFyvv6gRIbYtOCa2 +6RE6h1V/QTxW2O7zZgzWALrE2ui0oaYr9QuqQSssd9CdgExLfdPbI+3/ZAnE +v31Idzpk3/6ILiakYHtXkElPXvf46mCNpobty8ysT34irF+fy3C1p3oGwAsx +5VDV9OSFU6z5U+UPbSPYAy9rkc5ZssuIKxCER2oTvZ2L8Q5cfUvEUiJtRGGn +CJlHrVDdp3FssKv2tlKgLkvxJLyoOjuEkj44H1qRk+D02FzmmUT/0sAHAYYx +lTir6mjHeLpcGjn4waUuWIAJyph8SxUexP60bic0L0NBa6Qp5SxxijKsPIDb +FPHxWwfJSDZRrgUyYT7089YFB/ZM4FHyH9TZcnxn0f0xIB7NS6YNDsxzN2zT +EVEYf+De4qT/dQTsdww78Chtcv9JY9r2kDm77dk2MUGHL2j7n8jasbLtgA7h +pn2DMIWLrGamMLWRmlwslolKr1sMV5x8w+5Ias6C33iBMl9phkg42an0gYmc +byVJHvLO/XErtC+GNIJeMg== +=liRq +-----END PGP MESSAGE----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 0000000000..f4f5c7832d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,410 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1, and Argon2 specified in +// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4. +package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "golang.org/x/crypto/argon2" +) + +type Mode uint8 + +// Defines the default S2KMode constants +// +// 0 (simple), 1(salted), 3(iterated), 4(argon2) +const ( + SimpleS2K Mode = 0 + SaltedS2K Mode = 1 + IteratedSaltedS2K Mode = 3 + Argon2S2K Mode = 4 + GnuS2K Mode = 101 +) + +const Argon2SaltSize int = 16 + +// Params contains all the parameters of the s2k packet +type Params struct { + // mode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + mode Mode + // hashId is the ID of the hash function used in any of the modes + hashId byte + // salt is a byte array to use as a salt in hashing process or argon2 + saltBytes [Argon2SaltSize]byte + // countByte is used to determine how many rounds of hashing are to + // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. + countByte byte + // passes is a parameter in Argon2 to determine the number of iterations + // See RFC the crypto refresh Section 3.7.1.4. + passes byte + // parallelism is a parameter in Argon2 to determine the degree of paralellism + // See RFC the crypto refresh Section 3.7.1.4. + parallelism byte + // memoryExp is a parameter in Argon2 to determine the memory usage + // i.e., 2 ** memoryExp kibibytes + // See RFC the crypto refresh Section 3.7.1.4. + memoryExp byte +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 65536 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 96; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to +// 2**31, inclusive, to an encoded memory. The return value is the +// octet that is actually stored in the GPG file. encodeMemory panics +// if is not in the above range +// See OpenPGP crypto refresh Section 3.7.1.4. +func encodeMemory(memory uint32, parallelism uint8) uint8 { + if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) { + panic("Memory argument memory is outside the required range") + } + + for exp := 3; exp < 31; exp++ { + compare := decodeMemory(uint8(exp)) + if compare >= memory { + return uint8(exp) + } + } + + return 31 +} + +// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent +func decodeMemory(memoryExponent uint8) uint32 { + return uint32(1) << memoryExponent +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Argon2 writes to out the key derived from the password (in) with the Argon2 +// function (the crypto refresh, section 3.7.1.4) +func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) { + key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out))) + copy(out[:], key) +} + +// Generate generates valid parameters from given configuration. +// It will enforce the Iterated and Salted or Argon2 S2K method. +func Generate(rand io.Reader, c *Config) (*Params, error) { + var params *Params + if c != nil && c.Mode() == Argon2S2K { + // handle Argon2 case + argonConfig := c.Argon2() + params = &Params{ + mode: Argon2S2K, + passes: argonConfig.Passes(), + parallelism: argonConfig.Parallelism(), + memoryExp: argonConfig.EncodedMemory(), + } + } else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + + params = &Params{ + mode: SaltedS2K, + hashId: hashId, + } + } else { // Enforce IteratedSaltedS2K method otherwise + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + if c != nil { + c.S2KMode = IteratedSaltedS2K + } + params = &Params{ + mode: IteratedSaltedS2K, + hashId: hashId, + countByte: c.EncodedCount(), + } + } + if _, err := io.ReadFull(rand, params.salt()); err != nil { + return nil, err + } + return params, nil +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. If the S2K is a special +// GNU extension that indicates that the private key is missing, then the error +// returned is errors.ErrDummyPrivateKey. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + params, err := ParseIntoParams(r) + if err != nil { + return nil, err + } + + return params.Function() +} + +// ParseIntoParams reads a binary specification for a string-to-key +// transformation from r and returns a struct describing the s2k parameters. +func ParseIntoParams(r io.Reader) (params *Params, err error) { + var buf [Argon2SaltSize + 3]byte + + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + + params = &Params{ + mode: Mode(buf[0]), + } + + switch params.mode { + case SimpleS2K: + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + return params, nil + case SaltedS2K: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + return params, nil + case IteratedSaltedS2K: + _, err = io.ReadFull(r, buf[:10]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + params.countByte = buf[9] + return params, nil + case Argon2S2K: + _, err = io.ReadFull(r, buf[:Argon2SaltSize+3]) + if err != nil { + return nil, err + } + copy(params.salt(), buf[:Argon2SaltSize]) + params.passes = buf[Argon2SaltSize] + params.parallelism = buf[Argon2SaltSize+1] + params.memoryExp = buf[Argon2SaltSize+2] + return params, nil + case GnuS2K: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:5]); err != nil { + return nil, err + } + params.hashId = buf[0] + if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 { + return params, nil + } + return nil, errors.UnsupportedError("GNU S2K extension") + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Dummy() bool { + return params != nil && params.mode == GnuS2K +} + +func (params *Params) salt() []byte { + switch params.mode { + case SaltedS2K, IteratedSaltedS2K: + return params.saltBytes[:8] + case Argon2S2K: + return params.saltBytes[:Argon2SaltSize] + default: + return nil + } +} + +func (params *Params) Function() (f func(out, in []byte), err error) { + if params.Dummy() { + return nil, errors.ErrDummyPrivateKey("dummy key found") + } + var hashObj crypto.Hash + if params.mode != Argon2S2K { + var ok bool + hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } + } + + switch params.mode { + case SimpleS2K: + f := func(out, in []byte) { + Simple(out, hashObj.New(), in) + } + + return f, nil + case SaltedS2K: + f := func(out, in []byte) { + Salted(out, hashObj.New(), in, params.salt()) + } + + return f, nil + case IteratedSaltedS2K: + f := func(out, in []byte) { + Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte)) + } + + return f, nil + case Argon2S2K: + f := func(out, in []byte) { + Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Serialize(w io.Writer) (err error) { + if _, err = w.Write([]byte{uint8(params.mode)}); err != nil { + return + } + if params.mode != Argon2S2K { + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } + } + if params.Dummy() { + _, err = w.Write(append([]byte("GNU"), 1)) + return + } + if params.mode > 0 { + if _, err = w.Write(params.salt()); err != nil { + return + } + if params.mode == IteratedSaltedS2K { + _, err = w.Write([]byte{params.countByte}) + } + if params.mode == Argon2S2K { + _, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp}) + } + } + return +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + params, err := Generate(rand, c) + if err != nil { + return err + } + err = params.Serialize(w) + if err != nil { + return err + } + + f, err := params.Function() + if err != nil { + return err + } + f(key, passphrase) + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go new file mode 100644 index 0000000000..616e0d12c6 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -0,0 +1,26 @@ +package s2k + +// Cache stores keys derived with s2k functions from one passphrase +// to avoid recomputation if multiple items are encrypted with +// the same parameters. +type Cache map[Params][]byte + +// GetOrComputeDerivedKey tries to retrieve the key +// for the given s2k parameters from the cache. +// If there is no hit, it derives the key with the s2k function from the passphrase, +// updates the cache, and returns the key. +func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) { + key, found := (*c)[*params] + if !found || len(key) != expectedKeySize { + var err error + derivedKey := make([]byte, expectedKeySize) + s2k, err := params.Function() + if err != nil { + return nil, err + } + s2k(derivedKey, passphrase) + (*c)[*params] = key + return derivedKey, nil + } + return key, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go new file mode 100644 index 0000000000..b93db1ab85 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -0,0 +1,129 @@ +package s2k + +import "crypto" + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. +type Config struct { + // S2K (String to Key) mode, used for key derivation in the context of secret key encryption + // and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used. + // If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true, + // s2k.SaltedS2K can also be used. + // Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it + //(pending standardisation). + // 0 (simple), 1(salted), 3(iterated), 4(argon2) + // 2(reserved) 100-110(private/experimental). + S2KMode Mode + // Only relevant if S2KMode is not set to s2k.Argon2S2K. + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // Argon2 parameters for S2K (String to Key). + // Only relevant if S2KMode is set to s2k.Argon2S2K. + // If nil, default parameters are used. + // For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4. + Argon2Config *Argon2Config + // Only relevant if S2KMode is set to s2k.IteratedSaltedS2K. + // Iteration count for Iterated S2K (String to Key). It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // Indicates whether the passphrase passed by the application is a + // high-entropy key (e.g. it's randomly generated or derived from + // another passphrase using a strong key derivation function). + // When true, allows the S2KMode to be s2k.SaltedS2K. + // When the passphrase is not a high-entropy key, using SaltedS2K is + // insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08. + PassphraseIsHighEntropy bool +} + +// Argon2Config stores the Argon2 parameters +// A nil *Argon2Config is valid and results in all default +type Argon2Config struct { + NumberOfPasses uint8 + DegreeOfParallelism uint8 + // Memory specifies the desired Argon2 memory usage in kibibytes. + // For example memory=64*1024 sets the memory cost to ~64 MB. + Memory uint32 +} + +func (c *Config) Mode() Mode { + if c == nil { + return IteratedSaltedS2K + } + return c.S2KMode +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +func (c *Config) Argon2() *Argon2Config { + if c == nil || c.Argon2Config == nil { + return nil + } + return c.Argon2Config +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +func (c *Argon2Config) Passes() uint8 { + if c == nil || c.NumberOfPasses == 0 { + return 3 + } + return c.NumberOfPasses +} + +func (c *Argon2Config) Parallelism() uint8 { + if c == nil || c.DegreeOfParallelism == 0 { + return 4 + } + return c.DegreeOfParallelism +} + +func (c *Argon2Config) EncodedMemory() uint8 { + if c == nil || c.Memory == 0 { + return 16 // 64 MiB of RAM + } + + memory := c.Memory + lowerBound := uint32(c.Parallelism()) * 8 + upperBound := uint32(2147483648) + + switch { + case memory < lowerBound: + memory = lowerBound + case memory > upperBound: + memory = upperBound + } + + return encodeMemory(memory, c.Parallelism()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go new file mode 100644 index 0000000000..0db5526ce0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -0,0 +1,614 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return errors.InvalidArgumentError("no valid signing keys") + } + if signingKey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signingKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + if _, ok := algorithm.HashToHashId(config.Hash()); !ok { + return errors.InvalidArgumentError("invalid hash function") + } + + sig := createSignaturePacket(signingKey.PublicKey, sigType, config) + + h, err := sig.PrepareSign(config) + if err != nil { + return + } + wrappedHash, err := wrapHashForSignature(h, sig.SigType) + if err != nil { + return + } + if _, err = io.Copy(wrappedHash, message); err != nil { + return err + } + + err = sig.Sign(h, signingKey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + + var w io.WriteCloser + cipherSuite := packet.CipherSuite{ + Cipher: config.Cipher(), + Mode: config.AEAD().Mode(), + } + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config) + if err != nil { + return + } + + literalData := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literalData, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v[0] == v2[0] && v[1] == v2[1] { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := algorithm.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// EncryptText encrypts a message to a number of recipients and, optionally, +// signs it. Optional information is contained in 'hints', also encrypted, that +// aids the recipients in processing the message. The resulting WriteCloser +// must be closed after the contents of the file have been written. If config +// is nil, sensible defaults will be used. The signing is done in text mode. +func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := algorithm.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + var salt []byte + if signer != nil { + var opsVersion = 3 + if signer.Version == 6 { + opsVersion = signer.Version + } + ops := &packet.OnePassSignature{ + Version: opsVersion, + SigType: sigType, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if opsVersion == 6 { + ops.KeyFingerprint = signer.Fingerprint + salt, err = packet.SignatureSaltForHash(hash, config.Random()) + if err != nil { + return nil, err + } + ops.Salt = salt + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + h, wrappedHash, err := hashForSignature(hash, sigType, salt) + if err != nil { + return nil, err + } + metadata := &packet.LiteralData{ + Format: 'u', + FileName: hints.FileName, + Time: epochSeconds, + } + if hints.IsBinary { + metadata.Format = 'b' + } + return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil + } + return literalData, nil +} + +// encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES256), + uint8(packet.CipherAES128), + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), + } + + // Prefer GCM if everyone supports it + candidateCipherSuites := [][2]uint8{ + {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}, + } + + candidateCompression := []uint8{ + uint8(packet.CompressionNone), + uint8(packet.CompressionZIP), + uint8(packet.CompressionZLIB), + } + + encryptKeys := make([]Key, len(to)) + + // AEAD is used only if config enables it and every key supports it + aeadSupported := config.AEAD() != nil + + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") + } + + primarySelfSignature, _ := to[i].PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.InvalidArgumentError("entity without a self-signature") + } + + if !primarySelfSignature.SEIPDv2 { + aeadSupported = false + } + + candidateCiphers = intersectPreferences(candidateCiphers, primarySelfSignature.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression) + } + + // In the event that the intersection of supported algorithms is empty we use the ones + // labelled as MUST that every implementation supports. + if len(candidateCiphers) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3 + candidateCiphers = []uint8{uint8(packet.CipherAES128)} + } + if len(candidateHashes) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos + candidateHashes = []uint8{hashToHashId(crypto.SHA256)} + } + if len(candidateCipherSuites) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 + candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}} + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + aeadCipherSuite := packet.CipherSuite{ + Cipher: packet.CipherFunction(candidateCipherSuites[0][0]), + Mode: packet.AEADMode(candidateCipherSuites[0][1]), + } + + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil { + return nil, err + } + } + + var payload io.WriteCloser + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config) + if err != nil { + return + } + + payload, err = handleCompression(payload, candidateCompression, config) + if err != nil { + return nil, err + } + + return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), + } + defaultHashes := candidateHashes[0:1] + primarySelfSignature, _ := signed.PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.StructuralError("signed entity has no self-signature") + } + preferredHashes := primarySelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + if len(candidateHashes) == 0 { + return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes") + } + + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + wrappedHash hash.Hash + h hash.Hash + salt []byte // v6 only + signer *packet.PrivateKey + sigType packet.SignatureType + config *packet.Config + metadata *packet.LiteralData // V5 signatures protect document metadata +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.wrappedHash.Write(data) + switch s.sigType { + case packet.SigTypeBinary: + return s.literalData.Write(data) + case packet.SigTypeText: + flag := 0 + return writeCanonical(s.literalData, data, &flag) + } + return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) +} + +func (s signatureWriter) Close() error { + sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config) + sig.Hash = s.hashType + sig.Metadata = s.metadata + + if err := sig.SetSalt(s.salt); err != nil { + return err + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature { + sigLifetimeSecs := config.SigLifetime() + return &packet.Signature{ + Version: signer.Version, + SigType: sigType, + PubKeyAlgo: signer.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.KeyId, + IssuerFingerprint: signer.Fingerprint, + Notations: config.Notations(), + SigLifetimeSecs: &sigLifetimeSecs, + } +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { + data = compressed + confAlgo := config.Compression() + if confAlgo == packet.CompressionNone { + return + } + + // Set algorithm labelled as MUST as fallback + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4 + finalAlgo := packet.CompressionNone + // if compression specified by config available we will use it + for _, c := range candidateCompression { + if uint8(confAlgo) == c { + finalAlgo = confAlgo + break + } + } + + if finalAlgo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) + if err != nil { + return + } + } + return data, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go new file mode 100644 index 0000000000..38afcc74fa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go @@ -0,0 +1,221 @@ +package x25519 + +import ( + "crypto/sha256" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X25519" + aes128KeySize = 16 + // The size of a public or private key in bytes. + KeySize = x25519lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x25519lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x25519lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x25519: invalid key") + } + return nil +} + +// GenerateKey generates a new x25519 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x25519lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x25519lib.Key, publicKey *x25519lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x25519: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x25519lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x25519 according to +// the OpenPGP crypto refresh specification section 5.1.6. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x25519lib.Key + // Check that the input static public key has 32 bytes + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair + err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic) + if err != nil { + return + } + // Compute shared key + ok := x25519lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + return +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x25519 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x25519lib.Key + // Check that the input ephemeral public key has 32 bytes + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key + ok := x25519lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the ephemeral public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + return +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes128KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x25519 session key encryption fields as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + _, err = writer.Write(encryptedSessionKey) + return err +} + +// DecodeField decodes a x25519 session key encryption as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 32 octets representing an ephemeral x25519 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go new file mode 100644 index 0000000000..65a082dabd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go @@ -0,0 +1,229 @@ +package x448 + +import ( + "crypto/sha512" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X448" + aes256KeySize = 32 + // The size of a public or private key in bytes. + KeySize = x448lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches +// the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x448lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x448lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x448: invalid key") + } + return nil +} + +// GenerateKey generates a new x448 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x448lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x448lib.Key, publicKey *x448lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x448: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x448lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x448 according to +// the OpenPGP crypto refresh specification section 5.1.7. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x448lib.Key + // Check that the input static public key has 56 bytes. + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, nil, err + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair. + if err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic); err != nil { + return nil, nil, err + } + // Compute shared key. + ok := x448lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x448: the public key is a low order point") + return nil, nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping. + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + if err != nil { + return nil, nil, err + } + return ephemeralPublicKey, encryptedSessionKey, nil +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x448 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x448lib.Key + // Check that the input ephemeral public key has 56 bytes. + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, err + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key. + ok := x448lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x448: the ephemeral public key is a low order point") + return nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping. + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + if err != nil { + return nil, err + } + return encodedSessionKey, nil +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret. + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha512.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes256KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x448 session key encryption fields as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + if _, err = writer.Write(encryptedSessionKey); err != nil { + return err + } + return nil +} + +// DecodeField decodes a x448 session key encryption as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 56 octets representing an ephemeral x448 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/.goreleaser.yml b/vendor/github.com/alecthomas/go-check-sumtype/.goreleaser.yml new file mode 100644 index 0000000000..33bd03d060 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/.goreleaser.yml @@ -0,0 +1,32 @@ +project_name: go-check-sumtype +release: + github: + owner: alecthomas + name: go-check-sumtype +env: + - CGO_ENABLED=0 +builds: +- goos: + - linux + - darwin + - windows + goarch: + - arm64 + - amd64 + - "386" + goarm: + - "6" + main: ./cmd/go-check-sumtype + binary: go-check-sumtype +archives: + - + format: tar.gz + name_template: '{{ .Binary }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ + .Arm }}{{ end }}' + files: + - COPYING + - README* +snapshot: + name_template: SNAPSHOT-{{ .Commit }} +checksum: + name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' diff --git a/vendor/github.com/alecthomas/go-check-sumtype/COPYING b/vendor/github.com/alecthomas/go-check-sumtype/COPYING new file mode 100644 index 0000000000..bb9c20a094 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/COPYING @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff --git a/vendor/github.com/alecthomas/go-check-sumtype/LICENSE-MIT b/vendor/github.com/alecthomas/go-check-sumtype/LICENSE-MIT new file mode 100644 index 0000000000..3b0a5dc09c --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/alecthomas/go-check-sumtype/README.md b/vendor/github.com/alecthomas/go-check-sumtype/README.md new file mode 100644 index 0000000000..36614ef400 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/README.md @@ -0,0 +1,120 @@ +**Note: This is a fork of the great project [go-sumtype](https://github.com/BurntSushi/go-sumtype) by BurntSushi.** +**The original seems largely unmaintained, and the changes in this fork are backwards incompatible.** + +# go-check-sumtype [![CI](https://github.com/alecthomas/go-check-sumtype/actions/workflows/ci.yml/badge.svg)](https://github.com/alecthomas/go-check-sumtype/actions/workflows/ci.yml) +A simple utility for running exhaustiveness checks on type switch statements. +Exhaustiveness checks are only run on interfaces that are declared to be +"sum types." + +Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). + +This work was inspired by our code at +[Diffeo](https://diffeo.com). + +## Installation + +```go +$ go get github.com/alecthomas/go-check-sumtype +``` + +For usage info, just run the command: + +``` +$ go-check-sumtype +``` + +Typical usage might look like this: + +``` +$ go-check-sumtype $(go list ./... | grep -v vendor) +``` + +## Usage + +`go-check-sumtype` takes a list of Go package paths or files and looks for sum type +declarations in each package/file provided. Exhaustiveness checks are then +performed for each use of a declared sum type in a type switch statement. +Namely, `go-check-sumtype` will report an error for any type switch statement that +either lacks a `default` clause or does not account for all possible variants. + +Declarations are provided in comments like so: + +``` +//sumtype:decl +type MySumType interface { ... } +``` + +`MySumType` must be *sealed*. That is, part of its interface definition +contains an unexported method. + +`go-check-sumtype` will produce an error if any of the above is not true. + +For valid declarations, `go-check-sumtype` will look for all occurrences in which a +value of type `MySumType` participates in a type switch statement. In those +occurrences, it will attempt to detect whether the type switch is exhaustive +or not. If it's not, `go-check-sumtype` will report an error. For example, running +`go-check-sumtype` on this source file: + +```go +package main + +//sumtype:decl +type MySumType interface { + sealed() +} + +type VariantA struct{} + +func (*VariantA) sealed() {} + +type VariantB struct{} + +func (*VariantB) sealed() {} + +func main() { + switch MySumType(nil).(type) { + case *VariantA: + } +} +``` + +produces the following: + +``` +$ sumtype mysumtype.go +mysumtype.go:18:2: exhaustiveness check failed for sum type 'MySumType': missing cases for VariantB +``` + +Adding either a `default` clause or a clause to handle `*VariantB` will cause +exhaustive checks to pass. + +As a special case, if the type switch statement contains a `default` clause +that always panics, then exhaustiveness checks are still performed. + +## Details and motivation + +Sum types are otherwise known as discriminated unions. That is, a sum type is +a finite set of disjoint values. In type systems that support sum types, the +language will guarantee that if one has a sum type `T`, then its value must +be one of its variants. + +Go's type system does not support sum types. A typical proxy for representing +sum types in Go is to use an interface with an unexported method and define +each variant of the sum type in the same package to satisfy said interface. +This guarantees that the set of types that satisfy the interface is closed +at compile time. Performing case analysis on these types is then done with +a type switch statement, e.g., `switch x.(type) { ... }`. Each clause of the +type switch corresponds to a *variant* of the sum type. The downside of this +approach is that Go's type system is not aware of the set of variants, so it +cannot tell you whether case analysis over a sum type is complete or not. + +The `go-check-sumtype` command recognizes this pattern, but it needs a small amount +of help to recognize which interfaces should be treated as sum types, which +is why the `//sumtype:decl` annotation is required. `go-check-sumtype` will +figure out all of the variants of a sum type by finding the set of types +defined in the same package that satisfy the interface specified by the +declaration. + +The `go-check-sumtype` command will prove its worth when you need to add a variant +to an existing sum type. Running `go-check-sumtype` will tell you immediately which +case analyses need to be updated to account for the new variant. diff --git a/vendor/github.com/alecthomas/go-check-sumtype/UNLICENSE b/vendor/github.com/alecthomas/go-check-sumtype/UNLICENSE new file mode 100644 index 0000000000..68a49daad8 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/vendor/github.com/alecthomas/go-check-sumtype/check.go b/vendor/github.com/alecthomas/go-check-sumtype/check.go new file mode 100644 index 0000000000..21d751af42 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/check.go @@ -0,0 +1,184 @@ +package gochecksumtype + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +// inexhaustiveError is returned from check for each occurrence of inexhaustive +// case analysis in a Go type switch statement. +type inexhaustiveError struct { + Position token.Position + Def sumTypeDef + Missing []types.Object +} + +func (e inexhaustiveError) Pos() token.Position { return e.Position } +func (e inexhaustiveError) Error() string { + return fmt.Sprintf( + "%s: exhaustiveness check failed for sum type %q (from %s): missing cases for %s", + e.Pos(), e.Def.Decl.TypeName, e.Def.Decl.Pos, strings.Join(e.Names(), ", ")) +} + +// Names returns a sorted list of names corresponding to the missing variant +// cases. +func (e inexhaustiveError) Names() []string { + var list []string + for _, o := range e.Missing { + list = append(list, o.Name()) + } + sort.Strings(list) + return list +} + +// check does exhaustiveness checking for the given sum type definitions in the +// given package. Every instance of inexhaustive case analysis is returned. +func check(pkg *packages.Package, defs []sumTypeDef) []error { + var errs []error + for _, astfile := range pkg.Syntax { + ast.Inspect(astfile, func(n ast.Node) bool { + swtch, ok := n.(*ast.TypeSwitchStmt) + if !ok { + return true + } + if err := checkSwitch(pkg, defs, swtch); err != nil { + errs = append(errs, err) + } + return true + }) + } + return errs +} + +// checkSwitch performs an exhaustiveness check on the given type switch +// statement. If the type switch is used on a sum type and does not cover +// all variants of that sum type, then an error is returned indicating which +// variants were missed. +// +// Note that if the type switch contains a non-panicing default case, then +// exhaustiveness checks are disabled. +func checkSwitch( + pkg *packages.Package, + defs []sumTypeDef, + swtch *ast.TypeSwitchStmt, +) error { + def, missing := missingVariantsInSwitch(pkg, defs, swtch) + if len(missing) > 0 { + return inexhaustiveError{ + Position: pkg.Fset.Position(swtch.Pos()), + Def: *def, + Missing: missing, + } + } + return nil +} + +// missingVariantsInSwitch returns a list of missing variants corresponding to +// the given switch statement. The corresponding sum type definition is also +// returned. (If no sum type definition could be found, then no exhaustiveness +// checks are performed, and therefore, no missing variants are returned.) +func missingVariantsInSwitch( + pkg *packages.Package, + defs []sumTypeDef, + swtch *ast.TypeSwitchStmt, +) (*sumTypeDef, []types.Object) { + asserted := findTypeAssertExpr(swtch) + ty := pkg.TypesInfo.TypeOf(asserted) + def := findDef(defs, ty) + if def == nil { + // We couldn't find a corresponding sum type, so there's + // nothing we can do to check it. + return nil, nil + } + variantExprs, hasDefault := switchVariants(swtch) + if hasDefault && !defaultClauseAlwaysPanics(swtch) { + // A catch-all case defeats all exhaustiveness checks. + return def, nil + } + var variantTypes []types.Type + for _, expr := range variantExprs { + variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) + } + return def, def.missing(variantTypes) +} + +// switchVariants returns all case expressions found in a type switch. This +// includes expressions from cases that have a list of expressions. +func switchVariants(swtch *ast.TypeSwitchStmt) (exprs []ast.Expr, hasDefault bool) { + for _, stmt := range swtch.Body.List { + clause := stmt.(*ast.CaseClause) + if clause.List == nil { + hasDefault = true + } else { + exprs = append(exprs, clause.List...) + } + } + return +} + +// defaultClauseAlwaysPanics returns true if the given switch statement has a +// default clause that always panics. Note that this is done on a best-effort +// basis. While there will never be any false positives, there may be false +// negatives. +// +// If the given switch statement has no default clause, then this function +// panics. +func defaultClauseAlwaysPanics(swtch *ast.TypeSwitchStmt) bool { + var clause *ast.CaseClause + for _, stmt := range swtch.Body.List { + c := stmt.(*ast.CaseClause) + if c.List == nil { + clause = c + break + } + } + if clause == nil { + panic("switch statement has no default clause") + } + if len(clause.Body) != 1 { + return false + } + exprStmt, ok := clause.Body[0].(*ast.ExprStmt) + if !ok { + return false + } + callExpr, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + return false + } + fun, ok := callExpr.Fun.(*ast.Ident) + if !ok { + return false + } + return fun.Name == "panic" +} + +// findTypeAssertExpr extracts the expression that is being type asserted from a +// type swtich statement. +func findTypeAssertExpr(swtch *ast.TypeSwitchStmt) ast.Expr { + var expr ast.Expr + if assign, ok := swtch.Assign.(*ast.AssignStmt); ok { + expr = assign.Rhs[0] + } else { + expr = swtch.Assign.(*ast.ExprStmt).X + } + return expr.(*ast.TypeAssertExpr).X +} + +// findDef returns the sum type definition corresponding to the given type. If +// no such sum type definition exists, then nil is returned. +func findDef(defs []sumTypeDef, needle types.Type) *sumTypeDef { + for i := range defs { + def := &defs[i] + if types.Identical(needle.Underlying(), def.Ty) { + return def + } + } + return nil +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/decl.go b/vendor/github.com/alecthomas/go-check-sumtype/decl.go new file mode 100644 index 0000000000..9dec9eefd5 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/decl.go @@ -0,0 +1,69 @@ +package gochecksumtype + +import ( + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/packages" +) + +// sumTypeDecl is a declaration of a sum type in a Go source file. +type sumTypeDecl struct { + // The package path that contains this decl. + Package *packages.Package + // The type named by this decl. + TypeName string + // Position where the declaration was found. + Pos token.Position +} + +// Location returns a short string describing where this declaration was found. +func (d sumTypeDecl) Location() string { + return d.Pos.String() +} + +// findSumTypeDecls searches every package given for sum type declarations of +// the form `sumtype:decl`. +func findSumTypeDecls(pkgs []*packages.Package) ([]sumTypeDecl, error) { + var decls []sumTypeDecl + var retErr error + for _, pkg := range pkgs { + for _, file := range pkg.Syntax { + ast.Inspect(file, func(node ast.Node) bool { + if node == nil { + return true + } + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Doc == nil { + return true + } + var tspec *ast.TypeSpec + for _, spec := range decl.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + tspec = ts + } + for _, line := range decl.Doc.List { + if !strings.HasPrefix(line.Text, "//sumtype:decl") { + continue + } + pos := pkg.Fset.Position(decl.Pos()) + if tspec == nil { + retErr = notFoundError{Decl: sumTypeDecl{Package: pkg, Pos: pos}} + return false + } + pos = pkg.Fset.Position(tspec.Pos()) + decl := sumTypeDecl{Package: pkg, TypeName: tspec.Name.Name, Pos: pos} + debugf("found sum type decl: %s.%s", decl.Package.PkgPath, decl.TypeName) + decls = append(decls, decl) + break + } + return true + }) + } + } + return decls, retErr +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/def.go b/vendor/github.com/alecthomas/go-check-sumtype/def.go new file mode 100644 index 0000000000..24729ac01b --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/def.go @@ -0,0 +1,173 @@ +package gochecksumtype + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" +) + +var debug = flag.Bool("debug", false, "enable debug logging") + +func debugf(format string, args ...interface{}) { + if *debug { + log.Printf(format, args...) + } +} + +// Error as returned by Run() +type Error interface { + error + Pos() token.Position +} + +// unsealedError corresponds to a declared sum type whose interface is not +// sealed. A sealed interface requires at least one unexported method. +type unsealedError struct { + Decl sumTypeDecl +} + +func (e unsealedError) Pos() token.Position { return e.Decl.Pos } +func (e unsealedError) Error() string { + return fmt.Sprintf( + "%s: interface '%s' is not sealed "+ + "(sealing requires at least one unexported method)", + e.Decl.Location(), e.Decl.TypeName) +} + +// notFoundError corresponds to a declared sum type whose type definition +// could not be found in the same Go package. +type notFoundError struct { + Decl sumTypeDecl +} + +func (e notFoundError) Pos() token.Position { return e.Decl.Pos } +func (e notFoundError) Error() string { + return fmt.Sprintf("%s: type '%s' is not defined", e.Decl.Location(), e.Decl.TypeName) +} + +// notInterfaceError corresponds to a declared sum type that does not +// correspond to an interface. +type notInterfaceError struct { + Decl sumTypeDecl +} + +func (e notInterfaceError) Pos() token.Position { return e.Decl.Pos } +func (e notInterfaceError) Error() string { + return fmt.Sprintf("%s: type '%s' is not an interface", e.Decl.Location(), e.Decl.TypeName) +} + +// sumTypeDef corresponds to the definition of a Go interface that is +// interpreted as a sum type. Its variants are determined by finding all types +// that implement said interface in the same package. +type sumTypeDef struct { + Decl sumTypeDecl + Ty *types.Interface + Variants []types.Object +} + +// findSumTypeDefs attempts to find a Go type definition for each of the given +// sum type declarations. If no such sum type definition could be found for +// any of the given declarations, then an error is returned. +func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) { + var defs []sumTypeDef + var errs []error + for _, decl := range decls { + def, err := newSumTypeDef(decl.Package.Types, decl) + if err != nil { + errs = append(errs, err) + continue + } + if def == nil { + errs = append(errs, notFoundError{decl}) + continue + } + defs = append(defs, *def) + } + return defs, errs +} + +// newSumTypeDef attempts to extract a sum type definition from a single +// package. If no such type corresponds to the given decl, then this function +// returns a nil def and a nil error. +// +// If the decl corresponds to a type that isn't an interface containing at +// least one unexported method, then this returns an error. +func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) { + obj := pkg.Scope().Lookup(decl.TypeName) + if obj == nil { + return nil, nil + } + iface, ok := obj.Type().Underlying().(*types.Interface) + if !ok { + return nil, notInterfaceError{decl} + } + hasUnexported := false + for i := 0; i < iface.NumMethods(); i++ { + if !iface.Method(i).Exported() { + hasUnexported = true + break + } + } + if !hasUnexported { + return nil, unsealedError{decl} + } + def := &sumTypeDef{ + Decl: decl, + Ty: iface, + } + debugf("searching for variants of %s.%s\n", pkg.Path(), decl.TypeName) + for _, name := range pkg.Scope().Names() { + obj, ok := pkg.Scope().Lookup(name).(*types.TypeName) + if !ok { + continue + } + ty := obj.Type() + if types.Identical(ty.Underlying(), iface) { + continue + } + // Skip generic types. + if named, ok := ty.(*types.Named); ok && named.TypeParams() != nil { + continue + } + if types.Implements(ty, iface) || types.Implements(types.NewPointer(ty), iface) { + debugf(" found variant: %s.%s\n", pkg.Path(), obj.Name()) + def.Variants = append(def.Variants, obj) + } + } + return def, nil +} + +func (def *sumTypeDef) String() string { + return def.Decl.TypeName +} + +// missing returns a list of variants in this sum type that are not in the +// given list of types. +func (def *sumTypeDef) missing(tys []types.Type) []types.Object { + // TODO(ag): This is O(n^2). Fix that. /shrug + var missing []types.Object + for _, v := range def.Variants { + found := false + varty := indirect(v.Type()) + for _, ty := range tys { + ty = indirect(ty) + if types.Identical(varty, ty) { + found = true + } + } + if !found { + missing = append(missing, v) + } + } + return missing +} + +// indirect dereferences through an arbitrary number of pointer types. +func indirect(ty types.Type) types.Type { + if ty, ok := ty.(*types.Pointer); ok { + return indirect(ty.Elem()) + } + return ty +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/doc.go b/vendor/github.com/alecthomas/go-check-sumtype/doc.go new file mode 100644 index 0000000000..2b6e86764e --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/doc.go @@ -0,0 +1,53 @@ +/* +sumtype takes a list of Go package paths or files and looks for sum type +declarations in each package/file provided. Exhaustiveness checks are then +performed for each use of a declared sum type in a type switch statement. +Namely, sumtype will report an error for any type switch statement that +either lacks a default clause or does not account for all possible variants. + +Declarations are provided in comments like so: + + //sumtype:decl + type MySumType interface { ... } + +MySumType must be *sealed*. That is, part of its interface definition contains +an unexported method. + +sumtype will produce an error if any of the above is not true. + +For valid declarations, sumtype will look for all occurrences in which a +value of type MySumType participates in a type switch statement. In those +occurrences, it will attempt to detect whether the type switch is exhaustive +or not. If it's not, sumtype will report an error. For example: + + $ cat mysumtype.go + package gochecksumtype + + //sumtype:decl + type MySumType interface { + sealed() + } + + type VariantA struct{} + + func (a *VariantA) sealed() {} + + type VariantB struct{} + + func (b *VariantB) sealed() {} + + func main() { + switch MySumType(nil).(type) { + case *VariantA: + } + } + $ sumtype mysumtype.go + mysumtype.go:18:2: exhaustiveness check failed for sum type 'MySumType': missing cases for VariantB + +Adding either a default clause or a clause to handle *VariantB will cause +exhaustive checks to pass. + +As a special case, if the type switch statement contains a default clause +that always panics, then exhaustiveness checks are still performed. +*/ +package gochecksumtype diff --git a/vendor/github.com/alecthomas/go-check-sumtype/run.go b/vendor/github.com/alecthomas/go-check-sumtype/run.go new file mode 100644 index 0000000000..fdcb643c5d --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/run.go @@ -0,0 +1,26 @@ +package gochecksumtype + +import "golang.org/x/tools/go/packages" + +// Run sumtype checking on the given packages. +func Run(pkgs []*packages.Package) []error { + var errs []error + + decls, err := findSumTypeDecls(pkgs) + if err != nil { + return []error{err} + } + + defs, defErrs := findSumTypeDefs(decls) + errs = append(errs, defErrs...) + if len(defs) == 0 { + return errs + } + + for _, pkg := range pkgs { + if pkgErrs := check(pkg, defs); pkgErrs != nil { + errs = append(errs, pkgErrs...) + } + } + return errs +} diff --git a/vendor/github.com/alexkohler/nakedret/v2/.gitignore b/vendor/github.com/alexkohler/nakedret/v2/.gitignore new file mode 100644 index 0000000000..b4822913a0 --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/.gitignore @@ -0,0 +1,8 @@ +# editor specific +.vscode + +# binary +/nakedret + +# usage video for docs +.github/images diff --git a/vendor/github.com/alexkohler/nakedret/v2/LICENSE b/vendor/github.com/alexkohler/nakedret/v2/LICENSE new file mode 100644 index 0000000000..9310fbcffb --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Alex Kohler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alexkohler/nakedret/v2/README.md b/vendor/github.com/alexkohler/nakedret/v2/README.md new file mode 100644 index 0000000000..e30a0cde76 --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/README.md @@ -0,0 +1,125 @@ +# nakedret + +nakedret is a Go static analysis tool to find naked returns in functions greater than a specified function length. + +## Installation +Install Nakedret via go install: + +```cmd +go install github.com/alexkohler/nakedret/cmd/nakedret@latest +``` + +If you have not already added your `GOPATH/bin` directory to your `PATH` environment variable then you will need to do so. + +Windows (cmd): +```cmd +set PATH=%PATH%;C:\your\GOPATH\bin +``` + +Bash (you can verify a path has been set): +```Bash +# Check if nakedret is on PATH +which nakedret +export PATH=$PATH:/your/GOPATH/bin #to set path if it does not exist +``` + +## Usage + +Similar to other Go static anaylsis tools (such as `golint`, `go vet`), nakedret can be invoked with one or more filenames, directories, or packages named by its import path. Nakedret also supports the `...` wildcard. + + nakedret [flags] files/directories/packages + +Currently, the only flag supported is -l, which is an optional numeric flag to specify the maximum length a function can be (in terms of line length). If not specified, it defaults to 5. + +It can also be run using `go vet`: + +```shell +go vet -vettool=$(which nakedret) ./... +``` + +## Purpose + +As noted in Go's [Code Review comments](https://github.com/golang/go/wiki/CodeReviewComments#named-result-parameters): + +> Naked returns are okay if the function is a handful of lines. Once it's a medium sized function, be explicit with your return +> values. Corollary: it's not worth it to name result parameters just because it enables you to use naked returns. Clarity of docs is always more important than saving a line or two in your function. + +This tool aims to catch naked returns on non-trivial functions. + +## Example + +Let's take the `types` package in the Go source as an example: + +```Bash +$ nakedret -l 25 types/ +types/check.go:245 checkFiles naked returns on 26 line function +types/typexpr.go:443 collectParams naked returns on 53 line function +types/stmt.go:275 caseTypes naked returns on 27 line function +types/lookup.go:275 MissingMethod naked returns on 39 line function +``` + +Below is one of the not so intuitive uses of naked returns in `types/lookup.go` found by nakedret (nakedret will return the line number of the last naked return in the function): + + +```Go +func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) { + // fast path for common case + if T.Empty() { + return + } + + // TODO(gri) Consider using method sets here. Might be more efficient. + + if ityp, _ := V.Underlying().(*Interface); ityp != nil { + // TODO(gri) allMethods is sorted - can do this more efficiently + for _, m := range T.allMethods { + _, obj := lookupMethod(ityp.allMethods, m.pkg, m.name) + switch { + case obj == nil: + if static { + return m, false + } + case !Identical(obj.Type(), m.typ): + return m, true + } + } + return + } + + // A concrete type implements T if it implements all methods of T. + for _, m := range T.allMethods { + obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name) + + f, _ := obj.(*Func) + if f == nil { + return m, false + } + + if !Identical(f.typ, m.typ) { + return m, true + } + } + + return +} +``` + +## TODO + +- Unit tests (may require some refactoring to do correctly) +- supporting toggling of `build.Context.UseAllFiles` may be useful for some. +- Configuration on whether or not to run on test files +- Vim quickfix format? + + +## Contributing + +Pull requests welcome! + + +## Other static analysis tools + +If you've enjoyed nakedret, take a look at my other static anaylsis tools! + +- [unimport](https://github.com/alexkohler/unimport) - Finds unnecessary import aliases +- [prealloc](https://github.com/alexkohler/prealloc) - Finds slice declarations that could potentially be preallocated. diff --git a/vendor/github.com/alexkohler/nakedret/v2/import.go b/vendor/github.com/alexkohler/nakedret/v2/import.go new file mode 100644 index 0000000000..dea8423336 --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/import.go @@ -0,0 +1,310 @@ +package nakedret + +/* + +This file holds a direct copy of the import path matching code of +https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be +replaced when https://golang.org/issue/8768 is resolved. + +It has been updated to follow upstream changes in a few ways. + +*/ + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var buildContext = build.Default + +var ( + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") +) + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if a == "all" || a == "std" { + out = append(out, allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func importPaths(args []string) []string { + args = importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, allPackagesInFS(a)...) + } else { + out = append(out, allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return func(name string) bool { + return reg.MatchString(name) + } +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages) +// or a path including "...". +func allPackages(pattern string) []string { + pkgs := matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if pattern != "all" && pattern != "std" { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !buildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + // Commands + cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) + filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == cmd { + return nil + } + name := path[len(cmd):] + if !treeCanMatch(name) { + return filepath.SkipDir + } + // Commands are all in cmd/, not in subdirectories. + if strings.Contains(name, string(filepath.Separator)) { + return filepath.SkipDir + } + + // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. + name = "cmd/" + name + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + + for _, src := range buildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, testdata and vendor directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" || elem == "vendor" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func allPackagesInFS(pattern string) []string { + pkgs := matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, testdata and vendor directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" || elem == "vendor" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + if _, err = build.ImportDir(path, 0); err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} diff --git a/vendor/github.com/alexkohler/nakedret/v2/nakedret.go b/vendor/github.com/alexkohler/nakedret/v2/nakedret.go new file mode 100644 index 0000000000..f78bb8cb6c --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/nakedret.go @@ -0,0 +1,309 @@ +package nakedret + +import ( + "bytes" + "errors" + "flag" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const pwd = "./" + +func NakedReturnAnalyzer(defaultLines uint) *analysis.Analyzer { + nakedRet := &NakedReturnRunner{} + flags := flag.NewFlagSet("nakedret", flag.ExitOnError) + flags.UintVar(&nakedRet.MaxLength, "l", defaultLines, "maximum number of lines for a naked return function") + var analyzer = &analysis.Analyzer{ + Name: "nakedret", + Doc: "Checks that functions with naked returns are not longer than a maximum size (can be zero).", + Run: nakedRet.run, + Flags: *flags, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + return analyzer +} + +type NakedReturnRunner struct { + MaxLength uint +} + +func (n *NakedReturnRunner) run(pass *analysis.Pass) (any, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ // filter needed nodes: visit only them + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.ReturnStmt)(nil), + } + retVis := &returnsVisitor{ + pass: pass, + f: pass.Fset, + maxLength: n.MaxLength, + } + inspector.Nodes(nodeFilter, retVis.NodesVisit) + return nil, nil +} + +type returnsVisitor struct { + pass *analysis.Pass + f *token.FileSet + maxLength uint + + // functions contains funcInfo for each nested function definition encountered while visiting the AST. + functions []funcInfo +} + +type funcInfo struct { + // Details of the function we're currently dealing with + funcType *ast.FuncType + funcName string + funcLength int + reportNaked bool +} + +func checkNakedReturns(args []string, maxLength *uint, setExitStatus bool) error { + + fset := token.NewFileSet() + + files, err := parseInput(args, fset) + if err != nil { + return fmt.Errorf("could not parse input: %v", err) + } + + if maxLength == nil { + return errors.New("max length nil") + } + + analyzer := NakedReturnAnalyzer(*maxLength) + pass := &analysis.Pass{ + Analyzer: analyzer, + Fset: fset, + Files: files, + Report: func(d analysis.Diagnostic) { + log.Printf("%s:%d: %s", fset.Position(d.Pos).Filename, fset.Position(d.Pos).Line, d.Message) + }, + ResultOf: map[*analysis.Analyzer]any{}, + } + result, err := inspect.Analyzer.Run(pass) + if err != nil { + return err + } + pass.ResultOf[inspect.Analyzer] = result + + _, err = analyzer.Run(pass) + if err != nil { + return err + } + + return nil +} + +func parseInput(args []string, fset *token.FileSet) ([]*ast.File, error) { + var directoryList []string + var fileMode bool + files := make([]*ast.File, 0) + + if len(args) == 0 { + directoryList = append(directoryList, pwd) + } else { + for _, arg := range args { + if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { + + for _, dirname := range allPackagesInFS(arg) { + directoryList = append(directoryList, dirname) + } + + } else if isDir(arg) { + directoryList = append(directoryList, arg) + + } else if exists(arg) { + if strings.HasSuffix(arg, ".go") { + fileMode = true + f, err := parser.ParseFile(fset, arg, nil, 0) + if err != nil { + return nil, err + } + files = append(files, f) + } else { + return nil, fmt.Errorf("invalid file %v specified", arg) + } + } else { + + // TODO clean this up a bit + imPaths := importPaths([]string{arg}) + for _, importPath := range imPaths { + pkg, err := build.Import(importPath, ".", 0) + if err != nil { + return nil, err + } + var stringFiles []string + stringFiles = append(stringFiles, pkg.GoFiles...) + // files = append(files, pkg.CgoFiles...) + stringFiles = append(stringFiles, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range stringFiles { + stringFiles[i] = filepath.Join(pkg.Dir, f) + } + } + + fileMode = true + for _, stringFile := range stringFiles { + f, err := parser.ParseFile(fset, stringFile, nil, 0) + if err != nil { + return nil, err + } + files = append(files, f) + } + + } + } + } + } + + // if we're not in file mode, then we need to grab each and every package in each directory + // we can to grab all the files + if !fileMode { + for _, fpath := range directoryList { + pkgs, err := parser.ParseDir(fset, fpath, nil, 0) + if err != nil { + return nil, err + } + + for _, pkg := range pkgs { + for _, f := range pkg.Files { + files = append(files, f) + } + } + } + } + + return files, nil +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func hasNamedReturns(funcType *ast.FuncType) bool { + if funcType == nil || funcType.Results == nil { + return false + } + for _, field := range funcType.Results.List { + for _, ident := range field.Names { + if ident != nil { + return true + } + } + } + return false +} + +func nestedFuncName(functions []funcInfo) string { + var names []string + for _, f := range functions { + names = append(names, f.funcName) + } + return strings.Join(names, ".") +} + +func nakedReturnFix(s *ast.ReturnStmt, funcType *ast.FuncType) *ast.ReturnStmt { + var nameExprs []ast.Expr + for _, result := range funcType.Results.List { + for _, ident := range result.Names { + if ident != nil { + nameExprs = append(nameExprs, ident) + } + } + } + var sFix = *s + sFix.Results = nameExprs + return &sFix +} + +func (v *returnsVisitor) NodesVisit(node ast.Node, push bool) bool { + var ( + funcType *ast.FuncType + funcName string + ) + switch s := node.(type) { + case *ast.FuncDecl: + // We've found a function + funcType = s.Type + funcName = s.Name.Name + case *ast.FuncLit: + // We've found a function literal + funcType = s.Type + file := v.f.File(s.Pos()) + funcName = fmt.Sprintf("", file.Position(s.Pos()).Line) + case *ast.ReturnStmt: + // We've found a possibly naked return statement + fun := v.functions[len(v.functions)-1] + funName := nestedFuncName(v.functions) + if fun.reportNaked && len(s.Results) == 0 && push { + sFix := nakedReturnFix(s, fun.funcType) + b := &bytes.Buffer{} + err := printer.Fprint(b, v.f, sFix) + if err != nil { + log.Printf("failed to format named return fix: %s", err) + } + v.pass.Report(analysis.Diagnostic{ + Pos: s.Pos(), + End: s.End(), + Message: fmt.Sprintf("naked return in func `%s` with %d lines of code", funName, fun.funcLength), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "explicit return statement", + TextEdits: []analysis.TextEdit{{ + Pos: s.Pos(), + End: s.End(), + NewText: b.Bytes()}}, + }}, + }) + } + } + + if !push { + if funcType == nil { + return false + } + // Pop function info + v.functions = v.functions[:len(v.functions)-1] + return false + } + + if push && funcType != nil { + // Push function info to track returns for this function + file := v.f.File(node.Pos()) + length := file.Position(node.End()).Line - file.Position(node.Pos()).Line + if length == 0 { + // consider functions that finish on the same line as they start as single line functions, not zero lines! + length = 1 + } + v.functions = append(v.functions, funcInfo{ + funcType: funcType, + funcName: funcName, + funcLength: length, + reportNaked: uint(length) > v.maxLength && hasNamedReturns(funcType), + }) + } + + return true +} diff --git a/vendor/github.com/alingse/asasalint/.gitignore b/vendor/github.com/alingse/asasalint/.gitignore new file mode 100644 index 0000000000..d0fc531c8c --- /dev/null +++ b/vendor/github.com/alingse/asasalint/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +.vscode + +asasalint diff --git a/vendor/github.com/alingse/asasalint/.goreleaser.yml b/vendor/github.com/alingse/asasalint/.goreleaser.yml new file mode 100644 index 0000000000..e45d5860d6 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/.goreleaser.yml @@ -0,0 +1,72 @@ +--- +project_name: asasalint + +release: + github: + owner: alingse + name: asasalint + +builds: + - binary: asasalint + goos: + - darwin + - windows + - linux + - freebsd + goarch: + - amd64 + - arm64 + - arm + - 386 + - ppc64le + - s390x + - mips64 + - mips64le + - riscv64 + goarm: + - 6 + - 7 + gomips: + - hardfloat + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm64 + main: ./cmd/asasalint/ + flags: + - -trimpath + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} + +archives: + - format: tar.gz + wrap_in_directory: true + format_overrides: + - goos: windows + format: zip + name_template: '{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + files: + - LICENSE + - README.md + +snapshot: + name_template: SNAPSHOT-{{ .Commit }} + +checksum: + name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - '^build\(deps\): bump .* in /docs \(#\d+\)' + - '^build\(deps\): bump .* in /\.github/peril \(#\d+\)' + - Merge pull request + - Merge branch diff --git a/vendor/github.com/alingse/asasalint/LICENSE b/vendor/github.com/alingse/asasalint/LICENSE new file mode 100644 index 0000000000..a7c39f2e3d --- /dev/null +++ b/vendor/github.com/alingse/asasalint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 alingse + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alingse/asasalint/Makefile b/vendor/github.com/alingse/asasalint/Makefile new file mode 100644 index 0000000000..12f8ba9283 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -ldflags "-s -w" -trimpath ./cmd/asasalint/ diff --git a/vendor/github.com/alingse/asasalint/README.md b/vendor/github.com/alingse/asasalint/README.md new file mode 100644 index 0000000000..3fa7e65b34 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/README.md @@ -0,0 +1,76 @@ +# asasalint +Golang linter, lint that pass any slice as any in variadic function + + +## Install + +```sh +go install github.com/alingse/asasalint/cmd/asasalint@latest +``` + +## Usage + +```sh +asasalint ./... +``` + +ignore some func that was by desgin + +```sh +asasalint -e append,Append ./... +``` + +## Why + +two kind of unexpected usage, and `go build` success + +```Go +package main + +import "fmt" + +func A(args ...any) int { + return len(args) +} + +func B(args ...any) int { + return A(args) +} + +func main() { + + // 1 + fmt.Println(B(1, 2, 3, 4)) +} +``` + + + +```Go +package main + +import "fmt" + +func errMsg(msg string, args ...any) string { + return fmt.Sprintf(msg, args...) +} + +func Err(msg string, args ...any) string { + return errMsg(msg, args) +} + +func main() { + + // p1 [hello world] p2 %!s(MISSING) + fmt.Println(Err("p1 %s p2 %s", "hello", "world")) +} +``` + + + +## TODO + +1. add to golangci-lint +2. given a SuggestEdition +3. add `append` to default exclude ? +4. ingore pattern `fn(a, b, []any{1,2,3})` , because `[]any{1,2,3}` is most likely by design diff --git a/vendor/github.com/alingse/asasalint/asasalint.go b/vendor/github.com/alingse/asasalint/asasalint.go new file mode 100644 index 0000000000..f34516a465 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/asasalint.go @@ -0,0 +1,166 @@ +package asasalint + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "log" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const BuiltinExclusions = `^(fmt|log|logger|t|)\.(Print|Fprint|Sprint|Fatal|Panic|Error|Warn|Warning|Info|Debug|Log)(|f|ln)$` + +type LinterSetting struct { + Exclude []string + NoBuiltinExclusions bool + IgnoreTest bool +} + +func NewAnalyzer(setting LinterSetting) (*analysis.Analyzer, error) { + a, err := newAnalyzer(setting) + if err != nil { + return nil, err + } + + return &analysis.Analyzer{ + Name: "asasalint", + Doc: "check for pass []any as any in variadic func(...any)", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + }, nil +} + +type analyzer struct { + excludes []*regexp.Regexp + setting LinterSetting +} + +func newAnalyzer(setting LinterSetting) (*analyzer, error) { + a := &analyzer{ + setting: setting, + } + + if !a.setting.NoBuiltinExclusions { + a.excludes = append(a.excludes, regexp.MustCompile(BuiltinExclusions)) + } + + for _, exclude := range a.setting.Exclude { + if exclude != "" { + exp, err := regexp.Compile(exclude) + if err != nil { + return nil, err + } + + a.excludes = append(a.excludes, exp) + } + } + + return a, nil +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspectorInfo := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{(*ast.CallExpr)(nil)} + + inspectorInfo.Preorder(nodeFilter, a.AsCheckVisitor(pass)) + return nil, nil +} + +func (a *analyzer) AsCheckVisitor(pass *analysis.Pass) func(ast.Node) { + return func(n ast.Node) { + if a.setting.IgnoreTest { + pos := pass.Fset.Position(n.Pos()) + if strings.HasSuffix(pos.Filename, "_test.go") { + return + } + } + + caller, ok := n.(*ast.CallExpr) + if !ok { + return + } + if caller.Ellipsis != token.NoPos { + return + } + if len(caller.Args) == 0 { + return + } + + fnName, err := getFuncName(pass.Fset, caller) + if err != nil { + log.Println(err) + return + } + + for _, exclude := range a.excludes { + if exclude.MatchString(fnName) { + return + } + } + + fnType := pass.TypesInfo.TypeOf(caller.Fun) + if !isSliceAnyVariadicFuncType(fnType) { + return + } + + fnSign := fnType.(*types.Signature) + if len(caller.Args) != fnSign.Params().Len() { + return + } + + lastArg := caller.Args[len(caller.Args)-1] + argType := pass.TypesInfo.TypeOf(lastArg) + if !isSliceAnyType(argType) { + return + } + node := lastArg + + d := analysis.Diagnostic{ + Pos: node.Pos(), + End: node.End(), + Message: fmt.Sprintf("pass []any as any to func %s %s", fnName, fnType.String()), + Category: "asasalint", + } + pass.Report(d) + } +} + +func getFuncName(fset *token.FileSet, caller *ast.CallExpr) (string, error) { + buf := new(bytes.Buffer) + if err := printer.Fprint(buf, fset, caller.Fun); err != nil { + return "", fmt.Errorf("unable to print node at %s: %w", fset.Position(caller.Fun.Pos()), err) + } + + return buf.String(), nil +} + +func isSliceAnyVariadicFuncType(typ types.Type) (r bool) { + fnSign, ok := typ.(*types.Signature) + if !ok || !fnSign.Variadic() { + return false + } + + params := fnSign.Params() + lastParam := params.At(params.Len() - 1) + return isSliceAnyType(lastParam.Type()) +} + +func isSliceAnyType(typ types.Type) (r bool) { + sliceType, ok := typ.(*types.Slice) + if !ok { + return + } + elemType, ok := sliceType.Elem().(*types.Interface) + if !ok { + return + } + return elemType.NumMethods() == 0 +} diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE deleted file mode 100644 index 2125378860..0000000000 --- a/vendor/github.com/apparentlymart/go-cidr/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go deleted file mode 100644 index 20823af041..0000000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package cidr is a collection of assorted utilities for computing -// network and host addresses within network ranges. -// -// It expects a CIDR-type address structure where addresses are divided into -// some number of prefix bits representing the network and then the remaining -// suffix bits represent the host. -// -// For example, it can help to calculate addresses for sub-networks of a -// parent network, or to calculate host addresses within a particular prefix. -// -// At present this package is prioritizing simplicity of implementation and -// de-prioritizing speed and memory usage. Thus caution is advised before -// using this package in performance-critical applications or hot codepaths. -// Patches to improve the speed and memory usage may be accepted as long as -// they do not result in a significant increase in code complexity. -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -// Subnet takes a parent CIDR range and creates a subnet within it -// with the given number of additional prefix bits and the given -// network number. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number -// of 5, becomes 10.3.5.0/24 . -func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { - return SubnetBig(base, newBits, big.NewInt(int64(num))) -} - -// SubnetBig takes a parent CIDR range and creates a subnet within it with the -// given number of additional prefix bits and the given network number. It -// differs from Subnet in that it takes a *big.Int for the num, instead of an int. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number of 5, -// becomes 10.3.5.0/24 . -func SubnetBig(base *net.IPNet, newBits int, num *big.Int) (*net.IPNet, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - newPrefixLen := parentLen + newBits - - if newPrefixLen > addrLen { - return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) - } - - maxNetNum := uint64(1< maxNetNum { - return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) - } - - return &net.IPNet{ - IP: insertNumIntoIP(ip, num, newPrefixLen), - Mask: net.CIDRMask(newPrefixLen, addrLen), - }, nil -} - -// Host takes a parent CIDR range and turns it into a host IP address with the -// given host number. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func Host(base *net.IPNet, num int) (net.IP, error) { - return HostBig(base, big.NewInt(int64(num))) -} - -// HostBig takes a parent CIDR range and turns it into a host IP address with -// the given host number. It differs from Host in that it takes a *big.Int for -// the num, instead of an int. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func HostBig(base *net.IPNet, num *big.Int) (net.IP, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - hostLen := addrLen - parentLen - - maxHostNum := big.NewInt(int64(1)) - maxHostNum.Lsh(maxHostNum, uint(hostLen)) - maxHostNum.Sub(maxHostNum, big.NewInt(1)) - - numUint64 := big.NewInt(int64(num.Uint64())) - if num.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(num) - numUint64.Sub(numUint64, big.NewInt(int64(1))) - num.Sub(maxHostNum, numUint64) - } - - if numUint64.Cmp(maxHostNum) == 1 { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) - } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, num, bitlength), nil -} - -// AddressRange returns the first and last addresses in the given CIDR range. -func AddressRange(network *net.IPNet) (net.IP, net.IP) { - // the first IP is easy - firstIP := network.IP - - // the last IP is the network address OR NOT the mask address - prefixLen, bits := network.Mask.Size() - if prefixLen == bits { - // Easy! - // But make sure that our two slices are distinct, since they - // would be in all other cases. - lastIP := make([]byte, len(firstIP)) - copy(lastIP, firstIP) - return firstIP, lastIP - } - - firstIPInt, bits := ipToInt(firstIP) - hostLen := uint(bits) - uint(prefixLen) - lastIPInt := big.NewInt(1) - lastIPInt.Lsh(lastIPInt, hostLen) - lastIPInt.Sub(lastIPInt, big.NewInt(1)) - lastIPInt.Or(lastIPInt, firstIPInt) - - return firstIP, intToIP(lastIPInt, bits) -} - -// AddressCount returns the number of distinct host addresses within the given -// CIDR range. -// -// Since the result is a uint64, this function returns meaningful information -// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. -func AddressCount(network *net.IPNet) uint64 { - prefixLen, bits := network.Mask.Size() - return 1 << (uint64(bits) - uint64(prefixLen)) -} - -//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies -//none of the subnets overlap and all subnets are in the supernet -//it returns an error if any of those conditions are not satisfied -func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { - firstLastIP := make([][]net.IP, len(subnets)) - for i, s := range subnets { - first, last := AddressRange(s) - firstLastIP[i] = []net.IP{first, last} - } - for i, s := range subnets { - if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { - return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) - } - for j := 0; j < len(subnets); j++ { - if i == j { - continue - } - - first := firstLastIP[j][0] - last := firstLastIP[j][1] - if s.Contains(first) || s.Contains(last) { - return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) - } - } - } - return nil -} - -// PreviousSubnet returns the subnet of the desired mask in the IP space -// just lower than the start of IPNet provided. If the IP space rolls over -// then the second return value is true -func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - startIP := checkIPv4(network.IP) - previousIP := make(net.IP, len(startIP)) - copy(previousIP, startIP) - cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) - previousIP = Dec(previousIP) - previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} - if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { - return previous, true - } - return previous, false -} - -// NextSubnet returns the next available subnet of the desired mask size -// starting for the maximum IP of the offset subnet -// If the IP exceeds the maxium IP then the second return value is true -func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - _, currentLast := AddressRange(network) - mask := net.CIDRMask(prefixLen, 8*len(currentLast)) - currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} - _, last := AddressRange(currentSubnet) - last = Inc(last) - next := &net.IPNet{IP: last.Mask(mask), Mask: mask} - if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { - return next, true - } - return next, false -} - -//Inc increases the IP by one this returns a new []byte for the IP -func Inc(IP net.IP) net.IP { - IP = checkIPv4(IP) - incIP := make([]byte, len(IP)) - copy(incIP, IP) - for j := len(incIP) - 1; j >= 0; j-- { - incIP[j]++ - if incIP[j] > 0 { - break - } - } - return incIP -} - -//Dec decreases the IP by one this returns a new []byte for the IP -func Dec(IP net.IP) net.IP { - IP = checkIPv4(IP) - decIP := make([]byte, len(IP)) - copy(decIP, IP) - decIP = checkIPv4(decIP) - for j := len(decIP) - 1; j >= 0; j-- { - decIP[j]-- - if decIP[j] < 255 { - break - } - } - return decIP -} - -func checkIPv4(ip net.IP) net.IP { - // Go for some reason allocs IPv6len for IPv4 so we have to correct it - if v4 := ip.To4(); v4 != nil { - return v4 - } - return ip -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go deleted file mode 100644 index e5e6a2cf91..0000000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ /dev/null @@ -1,37 +0,0 @@ -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -func ipToInt(ip net.IP) (*big.Int, int) { - val := &big.Int{} - val.SetBytes([]byte(ip)) - if len(ip) == net.IPv4len { - return val, 32 - } else if len(ip) == net.IPv6len { - return val, 128 - } else { - panic(fmt.Errorf("Unsupported address length %d", len(ip))) - } -} - -func intToIP(ipInt *big.Int, bits int) net.IP { - ipBytes := ipInt.Bytes() - ret := make([]byte, bits/8) - // Pack our IP bytes into the end of the return array, - // since big.Int.Bytes() removes front zero padding. - for i := 1; i <= len(ipBytes); i++ { - ret[len(ret)-i] = ipBytes[len(ipBytes)-i] - } - return net.IP(ret) -} - -func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { - ipInt, totalBits := ipToInt(ip) - bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) - ipInt.Or(ipInt, bigNum) - return intToIP(ipInt, totalBits) -} diff --git a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/emoji_table.rl b/vendor/github.com/apparentlymart/go-textseg/v12/textseg/emoji_table.rl deleted file mode 100644 index 1c0749c9cb..0000000000 --- a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/emoji_table.rl +++ /dev/null @@ -1,290 +0,0 @@ -# The following Ragel file was autogenerated with unicode2ragel.rb -# from: https://www.unicode.org/Public/emoji/12.0/emoji-data.txt -# -# It defines ["Extended_Pictographic"]. -# -# To use this, make sure that your alphtype is set to byte, -# and that your input is in utf8. - -%%{ - machine Emoji; - - Extended_Pictographic = - 0xC2 0xA9 #1.1 [1] (©️) copyright - | 0xC2 0xAE #1.1 [1] (®️) registered - | 0xE2 0x80 0xBC #1.1 [1] (‼️) double exclamation mark - | 0xE2 0x81 0x89 #3.0 [1] (⁉️) exclamation question mark - | 0xE2 0x84 0xA2 #1.1 [1] (™️) trade mark - | 0xE2 0x84 0xB9 #3.0 [1] (ℹ️) information - | 0xE2 0x86 0x94..0x99 #1.1 [6] (↔️..↙️) left-right arrow..down... - | 0xE2 0x86 0xA9..0xAA #1.1 [2] (↩️..↪️) right arrow curving le... - | 0xE2 0x8C 0x9A..0x9B #1.1 [2] (⌚..⌛) watch..hourglass done - | 0xE2 0x8C 0xA8 #1.1 [1] (⌨️) keyboard - | 0xE2 0x8E 0x88 #3.0 [1] (⎈) HELM SYMBOL - | 0xE2 0x8F 0x8F #4.0 [1] (⏏️) eject button - | 0xE2 0x8F 0xA9..0xB3 #6.0 [11] (⏩..⏳) fast-forward button..hou... - | 0xE2 0x8F 0xB8..0xBA #7.0 [3] (⏸️..⏺️) pause button..record b... - | 0xE2 0x93 0x82 #1.1 [1] (Ⓜ️) circled M - | 0xE2 0x96 0xAA..0xAB #1.1 [2] (▪️..▫️) black small square..wh... - | 0xE2 0x96 0xB6 #1.1 [1] (▶️) play button - | 0xE2 0x97 0x80 #1.1 [1] (◀️) reverse button - | 0xE2 0x97 0xBB..0xBE #3.2 [4] (◻️..◾) white medium square..bl... - | 0xE2 0x98 0x80..0x85 #1.1 [6] (☀️..★) sun..BLACK STAR - | 0xE2 0x98 0x87..0x92 #1.1 [12] (☇..☒) LIGHTNING..BALLOT BOX WI... - | 0xE2 0x98 0x94..0x95 #4.0 [2] (☔..☕) umbrella with rain drops... - | 0xE2 0x98 0x96..0x97 #3.2 [2] (☖..☗) WHITE SHOGI PIECE..BLACK... - | 0xE2 0x98 0x98 #4.1 [1] (☘️) shamrock - | 0xE2 0x98 0x99 #3.0 [1] (☙) REVERSED ROTATED FLORAL ... - | 0xE2 0x98 0x9A..0xFF #1.1 [86] (☚..♯) BLACK LEFT POINTING INDE... - | 0xE2 0x99 0x00..0xAF # - | 0xE2 0x99 0xB0..0xB1 #3.0 [2] (♰..♱) WEST SYRIAC CROSS..EAST ... - | 0xE2 0x99 0xB2..0xBD #3.2 [12] (♲..♽) UNIVERSAL RECYCLING SYMB... - | 0xE2 0x99 0xBE..0xBF #4.1 [2] (♾️..♿) infinity..wheelchair sy... - | 0xE2 0x9A 0x80..0x85 #3.2 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6 - | 0xE2 0x9A 0x90..0x91 #4.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG - | 0xE2 0x9A 0x92..0x9C #4.1 [11] (⚒️..⚜️) hammer and pick..fleur... - | 0xE2 0x9A 0x9D #5.1 [1] (⚝) OUTLINED WHITE STAR - | 0xE2 0x9A 0x9E..0x9F #5.2 [2] (⚞..⚟) THREE LINES CONVERGING R... - | 0xE2 0x9A 0xA0..0xA1 #4.0 [2] (⚠️..⚡) warning..high voltage - | 0xE2 0x9A 0xA2..0xB1 #4.1 [16] (⚢..⚱️) DOUBLED FEMALE SIGN..fu... - | 0xE2 0x9A 0xB2 #5.0 [1] (⚲) NEUTER - | 0xE2 0x9A 0xB3..0xBC #5.1 [10] (⚳..⚼) CERES..SESQUIQUADRATE - | 0xE2 0x9A 0xBD..0xBF #5.2 [3] (⚽..⚿) soccer ball..SQUARED KEY - | 0xE2 0x9B 0x80..0x83 #5.1 [4] (⛀..⛃) WHITE DRAUGHTS MAN..BLAC... - | 0xE2 0x9B 0x84..0x8D #5.2 [10] (⛄..⛍) snowman without snow..DI... - | 0xE2 0x9B 0x8E #6.0 [1] (⛎) Ophiuchus - | 0xE2 0x9B 0x8F..0xA1 #5.2 [19] (⛏️..⛡) pick..RESTRICTED LEFT E... - | 0xE2 0x9B 0xA2 #6.0 [1] (⛢) ASTRONOMICAL SYMBOL FOR ... - | 0xE2 0x9B 0xA3 #5.2 [1] (⛣) HEAVY CIRCLE WITH STROKE... - | 0xE2 0x9B 0xA4..0xA7 #6.0 [4] (⛤..⛧) PENTAGRAM..INVERTED PENT... - | 0xE2 0x9B 0xA8..0xBF #5.2 [24] (⛨..⛿) BLACK CROSS ON SHIELD..W... - | 0xE2 0x9C 0x80 #7.0 [1] (✀) BLACK SAFETY SCISSORS - | 0xE2 0x9C 0x81..0x84 #1.1 [4] (✁..✄) UPPER BLADE SCISSORS..WH... - | 0xE2 0x9C 0x85 #6.0 [1] (✅) check mark button - | 0xE2 0x9C 0x88..0x89 #1.1 [2] (✈️..✉️) airplane..envelope - | 0xE2 0x9C 0x8A..0x8B #6.0 [2] (✊..✋) raised fist..raised hand - | 0xE2 0x9C 0x8C..0x92 #1.1 [7] (✌️..✒️) victory hand..black nib - | 0xE2 0x9C 0x94 #1.1 [1] (✔️) check mark - | 0xE2 0x9C 0x96 #1.1 [1] (✖️) multiplication sign - | 0xE2 0x9C 0x9D #1.1 [1] (✝️) latin cross - | 0xE2 0x9C 0xA1 #1.1 [1] (✡️) star of David - | 0xE2 0x9C 0xA8 #6.0 [1] (✨) sparkles - | 0xE2 0x9C 0xB3..0xB4 #1.1 [2] (✳️..✴️) eight-spoked asterisk.... - | 0xE2 0x9D 0x84 #1.1 [1] (❄️) snowflake - | 0xE2 0x9D 0x87 #1.1 [1] (❇️) sparkle - | 0xE2 0x9D 0x8C #6.0 [1] (❌) cross mark - | 0xE2 0x9D 0x8E #6.0 [1] (❎) cross mark button - | 0xE2 0x9D 0x93..0x95 #6.0 [3] (❓..❕) question mark..white exc... - | 0xE2 0x9D 0x97 #5.2 [1] (❗) exclamation mark - | 0xE2 0x9D 0xA3..0xA7 #1.1 [5] (❣️..❧) heart exclamation..ROTA... - | 0xE2 0x9E 0x95..0x97 #6.0 [3] (➕..➗) plus sign..division sign - | 0xE2 0x9E 0xA1 #1.1 [1] (➡️) right arrow - | 0xE2 0x9E 0xB0 #6.0 [1] (➰) curly loop - | 0xE2 0x9E 0xBF #6.0 [1] (➿) double curly loop - | 0xE2 0xA4 0xB4..0xB5 #3.2 [2] (⤴️..⤵️) right arrow curving up... - | 0xE2 0xAC 0x85..0x87 #4.0 [3] (⬅️..⬇️) left arrow..down arrow - | 0xE2 0xAC 0x9B..0x9C #5.1 [2] (⬛..⬜) black large square..whit... - | 0xE2 0xAD 0x90 #5.1 [1] (⭐) star - | 0xE2 0xAD 0x95 #5.2 [1] (⭕) hollow red circle - | 0xE3 0x80 0xB0 #1.1 [1] (〰️) wavy dash - | 0xE3 0x80 0xBD #3.2 [1] (〽️) part alternation mark - | 0xE3 0x8A 0x97 #1.1 [1] (㊗️) Japanese “congratulatio... - | 0xE3 0x8A 0x99 #1.1 [1] (㊙️) Japanese “secret” button - | 0xF0 0x9F 0x80 0x80..0xAB #5.1 [44] (🀀..🀫) MAHJONG TILE EAST WIN... - | 0xF0 0x9F 0x80 0xAC..0xAF #NA [4] (🀬..🀯) ...... - | 0xF0 0x9F 0x83 0x81..0x8F #6.0 [15] (🃁..🃏) PLAYING CARD ACE OF D... - | 0xF0 0x9F 0x83 0x90 #NA [1] (🃐) - | 0xF0 0x9F 0x83 0x91..0x9F #6.0 [15] (🃑..🃟) PLAYING CARD ACE OF C... - | 0xF0 0x9F 0x83 0xA0..0xB5 #7.0 [22] (🃠..🃵) PLAYING CARD FOOL..PL... - | 0xF0 0x9F 0x83 0xB6..0xBF #NA [10] (🃶..🃿) ................... - | 0xF0 0x9F 0x8A..0x8A 0x00..0xFF # - | 0xF0 0x9F 0x8B 0x00..0xBF # - | 0xF0 0x9F 0x8C 0x80..0xA0 #6.0 [33] (🌀..🌠) cyclone..shooting star - | 0xF0 0x9F 0x8C 0xA1..0xAC #7.0 [12] (🌡️..🌬️) thermometer..wind face - | 0xF0 0x9F 0x8C 0xAD..0xAF #8.0 [3] (🌭..🌯) hot dog..burrito - | 0xF0 0x9F 0x8C 0xB0..0xB5 #6.0 [6] (🌰..🌵) chestnut..cactus - | 0xF0 0x9F 0x8C 0xB6 #7.0 [1] (🌶️) hot pepper - | 0xF0 0x9F 0x8C 0xB7..0xFF #6.0 [70] (🌷..🍼) tulip..baby bottle - | 0xF0 0x9F 0x8D 0x00..0xBC # - | 0xF0 0x9F 0x8D 0xBD #7.0 [1] (🍽️) fork and knife with plate - | 0xF0 0x9F 0x8D 0xBE..0xBF #8.0 [2] (🍾..🍿) bottle with popping c... - | 0xF0 0x9F 0x8E 0x80..0x93 #6.0 [20] (🎀..🎓) ribbon..graduation cap - | 0xF0 0x9F 0x8E 0x94..0x9F #7.0 [12] (🎔..🎟️) HEART WITH TIP ON TH... - | 0xF0 0x9F 0x8E 0xA0..0xFF #6.0 [37] (🎠..🏄) carousel horse..perso... - | 0xF0 0x9F 0x8F 0x00..0x84 # - | 0xF0 0x9F 0x8F 0x85 #7.0 [1] (🏅) sports medal - | 0xF0 0x9F 0x8F 0x86..0x8A #6.0 [5] (🏆..🏊) trophy..person swimming - | 0xF0 0x9F 0x8F 0x8B..0x8E #7.0 [4] (🏋️..🏎️) person lifting weig... - | 0xF0 0x9F 0x8F 0x8F..0x93 #8.0 [5] (🏏..🏓) cricket game..ping pong - | 0xF0 0x9F 0x8F 0x94..0x9F #7.0 [12] (🏔️..🏟️) snow-capped mountai... - | 0xF0 0x9F 0x8F 0xA0..0xB0 #6.0 [17] (🏠..🏰) house..castle - | 0xF0 0x9F 0x8F 0xB1..0xB7 #7.0 [7] (🏱..🏷️) WHITE PENNANT..label - | 0xF0 0x9F 0x8F 0xB8..0xBA #8.0 [3] (🏸..🏺) badminton..amphora - | 0xF0 0x9F 0x90 0x80..0xBE #6.0 [63] (🐀..🐾) rat..paw prints - | 0xF0 0x9F 0x90 0xBF #7.0 [1] (🐿️) chipmunk - | 0xF0 0x9F 0x91 0x80 #6.0 [1] (👀) eyes - | 0xF0 0x9F 0x91 0x81 #7.0 [1] (👁️) eye - | 0xF0 0x9F 0x91 0x82..0xFF #6.0[182] (👂..📷) ear..camera - | 0xF0 0x9F 0x92..0x92 0x00..0xFF # - | 0xF0 0x9F 0x93 0x00..0xB7 # - | 0xF0 0x9F 0x93 0xB8 #7.0 [1] (📸) camera with flash - | 0xF0 0x9F 0x93 0xB9..0xBC #6.0 [4] (📹..📼) video camera..videoca... - | 0xF0 0x9F 0x93 0xBD..0xBE #7.0 [2] (📽️..📾) film projector..PORT... - | 0xF0 0x9F 0x93 0xBF #8.0 [1] (📿) prayer beads - | 0xF0 0x9F 0x94 0x80..0xBD #6.0 [62] (🔀..🔽) shuffle tracks button... - | 0xF0 0x9F 0x95 0x86..0x8A #7.0 [5] (🕆..🕊️) WHITE LATIN CROSS..dove - | 0xF0 0x9F 0x95 0x8B..0x8F #8.0 [5] (🕋..🕏) kaaba..BOWL OF HYGIEIA - | 0xF0 0x9F 0x95 0x90..0xA7 #6.0 [24] (🕐..🕧) one o’clock..twelve-t... - | 0xF0 0x9F 0x95 0xA8..0xB9 #7.0 [18] (🕨..🕹️) RIGHT SPEAKER..joystick - | 0xF0 0x9F 0x95 0xBA #9.0 [1] (🕺) man dancing - | 0xF0 0x9F 0x95 0xBB..0xFF #7.0 [41] (🕻..🖣) LEFT HAND TELEPHONE R... - | 0xF0 0x9F 0x96 0x00..0xA3 # - | 0xF0 0x9F 0x96 0xA4 #9.0 [1] (🖤) black heart - | 0xF0 0x9F 0x96 0xA5..0xFF #7.0 [86] (🖥️..🗺️) desktop computer..w... - | 0xF0 0x9F 0x97 0x00..0xBA # - | 0xF0 0x9F 0x97 0xBB..0xBF #6.0 [5] (🗻..🗿) mount fuji..moai - | 0xF0 0x9F 0x98 0x80 #6.1 [1] (😀) grinning face - | 0xF0 0x9F 0x98 0x81..0x90 #6.0 [16] (😁..😐) beaming face with smi... - | 0xF0 0x9F 0x98 0x91 #6.1 [1] (😑) expressionless face - | 0xF0 0x9F 0x98 0x92..0x94 #6.0 [3] (😒..😔) unamused face..pensiv... - | 0xF0 0x9F 0x98 0x95 #6.1 [1] (😕) confused face - | 0xF0 0x9F 0x98 0x96 #6.0 [1] (😖) confounded face - | 0xF0 0x9F 0x98 0x97 #6.1 [1] (😗) kissing face - | 0xF0 0x9F 0x98 0x98 #6.0 [1] (😘) face blowing a kiss - | 0xF0 0x9F 0x98 0x99 #6.1 [1] (😙) kissing face with smilin... - | 0xF0 0x9F 0x98 0x9A #6.0 [1] (😚) kissing face with closed... - | 0xF0 0x9F 0x98 0x9B #6.1 [1] (😛) face with tongue - | 0xF0 0x9F 0x98 0x9C..0x9E #6.0 [3] (😜..😞) winking face with ton... - | 0xF0 0x9F 0x98 0x9F #6.1 [1] (😟) worried face - | 0xF0 0x9F 0x98 0xA0..0xA5 #6.0 [6] (😠..😥) angry face..sad but r... - | 0xF0 0x9F 0x98 0xA6..0xA7 #6.1 [2] (😦..😧) frowning face with op... - | 0xF0 0x9F 0x98 0xA8..0xAB #6.0 [4] (😨..😫) fearful face..tired face - | 0xF0 0x9F 0x98 0xAC #6.1 [1] (😬) grimacing face - | 0xF0 0x9F 0x98 0xAD #6.0 [1] (😭) loudly crying face - | 0xF0 0x9F 0x98 0xAE..0xAF #6.1 [2] (😮..😯) face with open mouth.... - | 0xF0 0x9F 0x98 0xB0..0xB3 #6.0 [4] (😰..😳) anxious face with swe... - | 0xF0 0x9F 0x98 0xB4 #6.1 [1] (😴) sleeping face - | 0xF0 0x9F 0x98 0xB5..0xFF #6.0 [12] (😵..🙀) dizzy face..weary cat - | 0xF0 0x9F 0x99 0x00..0x80 # - | 0xF0 0x9F 0x99 0x81..0x82 #7.0 [2] (🙁..🙂) slightly frowning fac... - | 0xF0 0x9F 0x99 0x83..0x84 #8.0 [2] (🙃..🙄) upside-down face..fac... - | 0xF0 0x9F 0x99 0x85..0x8F #6.0 [11] (🙅..🙏) person gesturing NO..... - | 0xF0 0x9F 0x9A 0x80..0xFF #6.0 [70] (🚀..🛅) rocket..left luggage - | 0xF0 0x9F 0x9B 0x00..0x85 # - | 0xF0 0x9F 0x9B 0x86..0x8F #7.0 [10] (🛆..🛏️) TRIANGLE WITH ROUNDE... - | 0xF0 0x9F 0x9B 0x90 #8.0 [1] (🛐) place of worship - | 0xF0 0x9F 0x9B 0x91..0x92 #9.0 [2] (🛑..🛒) stop sign..shopping cart - | 0xF0 0x9F 0x9B 0x93..0x94 #10.0 [2] (🛓..🛔) STUPA..PAGODA - | 0xF0 0x9F 0x9B 0x95 #12.0 [1] (🛕) hindu temple - | 0xF0 0x9F 0x9B 0x96..0x9F #NA [10] (🛖..🛟) ...................... - | 0xF0 0x9F 0xA4 0x8D..0x8F #12.0 [3] (🤍..🤏) white heart..pinchin... - | 0xF0 0x9F 0xA4 0x90..0x98 #8.0 [9] (🤐..🤘) zipper-mouth face..si... - | 0xF0 0x9F 0xA4 0x99..0x9E #9.0 [6] (🤙..🤞) call me hand..crossed... - | 0xF0 0x9F 0xA4 0x9F #10.0 [1] (🤟) love-you gesture - | 0xF0 0x9F 0xA4 0xA0..0xA7 #9.0 [8] (🤠..🤧) cowboy hat face..snee... - | 0xF0 0x9F 0xA4 0xA8..0xAF #10.0 [8] (🤨..🤯) face with raised eye... - | 0xF0 0x9F 0xA4 0xB0 #9.0 [1] (🤰) pregnant woman - | 0xF0 0x9F 0xA4 0xB1..0xB2 #10.0 [2] (🤱..🤲) breast-feeding..palm... - | 0xF0 0x9F 0xA4 0xB3..0xBA #9.0 [8] (🤳..🤺) selfie..person fencing - | 0xF0 0x9F 0xA4 0xBC..0xBE #9.0 [3] (🤼..🤾) people wrestling..per... - | 0xF0 0x9F 0xA4 0xBF #12.0 [1] (🤿) diving mask - | 0xF0 0x9F 0xA5 0x80..0x85 #9.0 [6] (🥀..🥅) wilted flower..goal net - | 0xF0 0x9F 0xA5 0x87..0x8B #9.0 [5] (🥇..🥋) 1st place medal..mart... - | 0xF0 0x9F 0xA5 0x8C #10.0 [1] (🥌) curling stone - | 0xF0 0x9F 0xA5 0x8D..0x8F #11.0 [3] (🥍..🥏) lacrosse..flying disc - | 0xF0 0x9F 0xA5 0x90..0x9E #9.0 [15] (🥐..🥞) croissant..pancakes - | 0xF0 0x9F 0xA5 0x9F..0xAB #10.0 [13] (🥟..🥫) dumpling..canned food - | 0xF0 0x9F 0xA5 0xAC..0xB0 #11.0 [5] (🥬..🥰) leafy green..smiling... - | 0xF0 0x9F 0xA5 0xB1 #12.0 [1] (🥱) yawning face - | 0xF0 0x9F 0xA5 0xB2 #NA [1] (🥲) - | 0xF0 0x9F 0xA5 0xB3..0xB6 #11.0 [4] (🥳..🥶) partying face..cold ... - | 0xF0 0x9F 0xA5 0xB7..0xB9 #NA [3] (🥷..🥹) ..................... - | 0xF0 0x9F 0xAB..0xBE 0x00..0xFF # - | 0xF0 0x9F 0xBF 0x00..0xBD # - ; - -}%% diff --git a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/v12/textseg/generate.go deleted file mode 100644 index 9df1263484..0000000000 --- a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/generate.go +++ /dev/null @@ -1,8 +0,0 @@ -package textseg - -//go:generate go run make_tables.go -output tables.go -//go:generate go run make_test_tables.go -output tables_test.go -//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/12.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,ZWJ" -o grapheme_clusters_table.rl -//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/emoji/12.0/emoji-data.txt -m Emoji -p "Extended_Pictographic" -o emoji_table.rl -//go:generate ragel -Z grapheme_clusters.rl -//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters.go deleted file mode 100644 index c389827fed..0000000000 --- a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters.go +++ /dev/null @@ -1,4078 +0,0 @@ -//line grapheme_clusters.rl:1 -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT - -//line grapheme_clusters.go:13 -var _graphclust_actions []byte = []byte{ - 0, 1, 0, 1, 4, 1, 10, 1, 11, - 1, 12, 1, 13, 1, 14, 1, 15, - 1, 16, 1, 17, 1, 18, 1, 19, - 1, 20, 1, 21, 1, 22, 2, 1, - 8, 2, 1, 9, 2, 2, 3, 2, - 5, 1, 3, 0, 1, 9, 3, 5, - 0, 1, 3, 5, 1, 6, 3, 5, - 1, 7, -} - -var _graphclust_key_offsets []int16 = []int16{ - 0, 0, 1, 3, 5, 7, 10, 15, - 17, 20, 28, 31, 33, 35, 38, 68, - 76, 78, 82, 85, 90, 95, 107, 119, - 127, 132, 142, 145, 152, 156, 164, 174, - 180, 188, 190, 198, 201, 203, 206, 208, - 215, 217, 225, 226, 247, 251, 257, 262, - 264, 268, 272, 274, 278, 280, 283, 287, - 289, 296, 298, 302, 306, 310, 312, 314, - 322, 326, 331, 333, 335, 337, 338, 340, - 342, 344, 346, 361, 365, 367, 369, 374, - 378, 384, 386, 388, 392, 396, 398, 402, - 409, 414, 418, 421, 422, 426, 433, 440, - 441, 442, 444, 453, 455, 457, 459, 491, - 495, 497, 501, 505, 508, 512, 516, 519, - 521, 527, 540, 542, 545, 547, 549, 553, - 557, 559, 561, 563, 565, 570, 576, 579, - 581, 585, 589, 596, 599, 605, 607, 611, - 613, 615, 618, 622, 623, 625, 631, 637, - 643, 645, 649, 653, 658, 663, 673, 675, - 677, 679, 680, 682, 683, 689, 691, 693, - 693, 695, 702, 704, 706, 708, 711, 716, - 718, 721, 729, 732, 734, 736, 739, 769, - 777, 779, 783, 786, 791, 796, 808, 820, - 828, 833, 843, 846, 853, 857, 865, 875, - 881, 889, 891, 899, 902, 904, 907, 909, - 916, 918, 926, 927, 948, 952, 958, 963, - 965, 969, 973, 975, 979, 981, 984, 988, - 990, 997, 999, 1003, 1007, 1011, 1013, 1015, - 1023, 1027, 1032, 1034, 1036, 1060, 1063, 1064, - 1066, 1068, 1072, 1075, 1076, 1081, 1082, 1085, - 1088, 1094, 1096, 1100, 1102, 1113, 1122, 1127, - 1129, 1133, 1135, 1137, 1138, 1140, 1143, 1146, - 1148, 1150, 1165, 1169, 1171, 1173, 1178, 1182, - 1188, 1190, 1192, 1196, 1200, 1202, 1206, 1213, - 1218, 1222, 1225, 1226, 1230, 1237, 1244, 1245, - 1246, 1248, 1257, 1259, 1261, 1263, 1295, 1299, - 1301, 1305, 1309, 1312, 1316, 1320, 1323, 1325, - 1331, 1344, 1346, 1349, 1351, 1353, 1357, 1361, - 1363, 1365, 1367, 1369, 1374, 1380, 1383, 1385, - 1389, 1393, 1400, 1403, 1409, 1411, 1415, 1417, - 1419, 1422, 1426, 1427, 1429, 1435, 1441, 1447, - 1449, 1453, 1457, 1462, 1467, 1477, 1479, 1481, - 1483, 1523, 1523, 1526, 1530, 1535, 1537, 1545, - 1547, 1549, 1551, 1553, 1555, 1557, 1559, 1563, - 1567, 1571, 1575, 1577, 1578, 1584, 1586, 1588, - 1590, 1597, 1598, 1600, 1605, 1607, 1609, 1611, - 1614, 1619, 1621, 1624, 1632, 1635, 1637, 1639, - 1642, 1672, 1680, 1682, 1686, 1689, 1694, 1699, - 1711, 1723, 1731, 1736, 1746, 1749, 1756, 1760, - 1768, 1778, 1784, 1792, 1794, 1802, 1805, 1807, - 1810, 1812, 1819, 1821, 1829, 1830, 1851, 1855, - 1861, 1866, 1868, 1872, 1876, 1878, 1882, 1884, - 1887, 1891, 1893, 1900, 1902, 1906, 1910, 1914, - 1916, 1918, 1926, 1930, 1935, 1937, 1939, 1941, - 1942, 1944, 1946, 1948, 1950, 1965, 1969, 1971, - 1973, 1978, 1982, 1988, 1990, 1992, 1996, 2000, - 2002, 2006, 2013, 2018, 2022, 2025, 2026, 2030, - 2037, 2044, 2045, 2046, 2048, 2057, 2059, 2061, - 2063, 2095, 2099, 2101, 2105, 2109, 2112, 2116, - 2120, 2123, 2125, 2131, 2144, 2146, 2149, 2151, - 2153, 2157, 2161, 2163, 2165, 2167, 2169, 2174, - 2180, 2183, 2185, 2189, 2193, 2200, 2203, 2209, - 2211, 2215, 2217, 2219, 2222, 2226, 2227, 2229, - 2235, 2241, 2247, 2249, 2253, 2257, 2262, 2267, - 2277, 2279, 2281, 2283, 2284, 2286, 2287, 2293, - 2295, 2297, 2297, 2299, 2305, 2307, 2309, 2311, - 2314, 2319, 2321, 2324, 2332, 2335, 2337, 2339, - 2342, 2372, 2380, 2382, 2386, 2389, 2394, 2399, - 2411, 2423, 2431, 2436, 2446, 2449, 2456, 2460, - 2468, 2478, 2484, 2492, 2494, 2502, 2505, 2507, - 2510, 2512, 2519, 2521, 2529, 2530, 2551, 2555, - 2561, 2566, 2568, 2572, 2576, 2578, 2582, 2584, - 2587, 2591, 2593, 2600, 2602, 2606, 2610, 2614, - 2616, 2618, 2626, 2630, 2635, 2637, 2639, 2663, - 2666, 2667, 2669, 2671, 2675, 2678, 2679, 2684, - 2685, 2688, 2691, 2697, 2699, 2703, 2705, 2716, - 2725, 2730, 2732, 2736, 2738, 2740, 2741, 2743, - 2746, 2749, 2751, 2753, 2768, 2772, 2774, 2776, - 2781, 2785, 2791, 2793, 2795, 2799, 2803, 2805, - 2809, 2816, 2821, 2825, 2828, 2829, 2833, 2840, - 2847, 2848, 2849, 2851, 2860, 2862, 2864, 2866, - 2898, 2902, 2904, 2908, 2912, 2915, 2919, 2923, - 2926, 2928, 2934, 2947, 2949, 2952, 2954, 2956, - 2960, 2964, 2966, 2968, 2970, 2972, 2977, 2983, - 2986, 2988, 2992, 2996, 3003, 3006, 3012, 3014, - 3018, 3020, 3022, 3025, 3029, 3030, 3032, 3038, - 3044, 3050, 3052, 3056, 3060, 3065, 3070, 3080, - 3082, 3084, 3086, 3126, 3126, 3129, 3133, 3138, - 3140, 3148, 3150, 3152, 3154, 3156, 3158, 3160, - 3162, 3166, 3170, 3174, 3178, 3180, 3181, 3187, - 3189, 3191, 3193, 3200, 3201, 3203, 3209, 3212, - 3215, 3219, 3222, 3225, 3232, 3234, 3258, 3260, - 3284, 3286, 3288, 3311, 3313, 3315, 3316, 3318, - 3320, 3322, 3328, 3330, 3362, 3366, 3371, 3394, - 3396, 3398, 3400, 3402, 3405, 3407, 3409, 3413, - 3413, 3469, 3525, 3556, 3561, 3565, 3587, 3596, - 3601, 3605, 3615, 3622, 3625, 3636, 3639, 3646, - 3652, 3656, 3662, 3679, 3694, 3703, 3709, 3719, - 3723, 3727, 3731, 3735, 3737, 3757, 3763, 3768, - 3770, 3772, 3775, 3777, 3779, 3783, 3839, 3895, - 3928, 3933, 3941, 3945, 3947, 3952, 3959, 3967, - 3970, 3973, 3979, 3982, 3988, 3991, 3994, 3998, - 4001, 4005, 4008, 4012, 4054, 4061, 4069, 4078, - 4082, 4089, 4091, 4093, 4103, 4107, 4111, 4115, - 4119, 4123, 4127, 4131, 4137, 4147, 4155, 4160, - 4163, 4167, 4169, 4172, 4177, 4179, 4182, 4185, - 4189, 4192, 4195, 4202, 4204, 4206, 4208, 4210, - 4213, 4218, 4220, 4223, 4231, 4234, 4236, 4238, - 4241, 4271, 4279, 4281, 4285, 4288, 4293, 4298, - 4310, 4322, 4330, 4335, 4345, 4348, 4355, 4359, - 4367, 4377, 4383, 4391, 4393, 4401, 4404, 4406, - 4409, 4411, 4418, 4420, 4428, 4429, 4450, 4454, - 4460, 4465, 4467, 4471, 4475, 4477, 4481, 4483, - 4486, 4490, 4492, 4499, 4501, 4505, 4509, 4513, - 4515, 4517, 4525, 4529, 4534, 4536, 4538, 4562, - 4565, 4566, 4568, 4570, 4574, 4577, 4578, 4583, - 4584, 4587, 4590, 4596, 4598, 4602, 4604, 4615, - 4624, 4629, 4631, 4635, 4637, 4639, 4640, 4642, - 4645, 4648, 4650, 4652, 4667, 4671, 4673, 4675, - 4680, 4684, 4690, 4692, 4694, 4698, 4702, 4704, - 4708, 4715, 4720, 4724, 4727, 4728, 4732, 4739, - 4746, 4747, 4748, 4750, 4759, 4761, 4763, 4765, - 4797, 4801, 4803, 4807, 4811, 4814, 4818, 4822, - 4825, 4827, 4833, 4846, 4848, 4851, 4853, 4855, - 4859, 4863, 4865, 4867, 4869, 4871, 4876, 4882, - 4885, 4887, 4891, 4895, 4902, 4905, 4911, 4913, - 4917, 4919, 4921, 4924, 4928, 4929, 4931, 4937, - 4943, 4949, 4951, 4955, 4959, 4964, 4969, 4979, - 4981, 4983, 4985, 5025, 5025, 5028, 5032, 5037, - 5039, 5047, 5049, 5051, 5053, 5055, 5057, 5059, - 5061, 5065, 5069, 5073, 5077, 5079, 5080, 5086, - 5088, 5090, 5092, 5099, 5100, 5102, 5126, 5128, - 5152, 5154, 5156, 5179, 5181, 5183, 5184, 5186, - 5188, 5190, 5196, 5198, 5230, 5234, 5239, 5262, - 5264, 5266, 5268, 5270, 5273, 5275, 5277, 5281, - 5281, 5337, 5393, 5424, 5429, 5432, 5454, 5467, - 5469, 5471, 5473, 5476, 5481, 5483, 5486, 5494, - 5497, 5499, 5501, 5504, 5534, 5542, 5544, 5548, - 5551, 5556, 5561, 5573, 5585, 5593, 5598, 5608, - 5611, 5618, 5622, 5630, 5640, 5646, 5654, 5656, - 5664, 5667, 5669, 5672, 5674, 5681, 5683, 5691, - 5692, 5713, 5717, 5723, 5728, 5730, 5734, 5738, - 5740, 5744, 5746, 5749, 5753, 5755, 5762, 5764, - 5768, 5772, 5776, 5778, 5780, 5788, 5792, 5797, - 5799, 5801, 5803, 5804, 5806, 5808, 5810, 5812, - 5827, 5831, 5833, 5835, 5840, 5844, 5850, 5852, - 5854, 5858, 5862, 5864, 5868, 5875, 5880, 5884, - 5887, 5888, 5892, 5899, 5906, 5907, 5908, 5910, - 5919, 5921, 5923, 5925, 5957, 5961, 5963, 5967, - 5971, 5974, 5978, 5982, 5985, 5987, 5993, 6006, - 6008, 6011, 6013, 6015, 6019, 6023, 6025, 6027, - 6029, 6031, 6036, 6042, 6045, 6047, 6051, 6055, - 6062, 6065, 6071, 6073, 6077, 6079, 6081, 6084, - 6088, 6089, 6091, 6097, 6103, 6109, 6111, 6115, - 6119, 6124, 6129, 6139, 6141, 6143, 6145, 6146, - 6148, 6149, 6155, 6157, 6159, 6159, 6166, 6170, - 6180, 6187, 6190, 6201, 6204, 6211, 6217, 6221, - 6227, 6244, 6259, 6268, 6274, 6284, 6288, 6292, - 6296, 6300, 6302, 6322, 6328, 6333, 6335, 6337, - 6340, 6342, 6344, 6348, 6404, 6460, 6493, 6498, - 6506, 6510, 6513, 6520, 6527, 6535, 6538, 6541, - 6547, 6550, 6556, 6559, 6562, 6568, 6571, 6577, - 6580, 6586, 6628, 6635, 6643, 6652, 6656, 6658, - 6660, 6662, 6665, 6670, 6672, 6675, 6683, 6686, - 6688, 6690, 6693, 6723, 6731, 6733, 6737, 6740, - 6745, 6750, 6762, 6774, 6782, 6787, 6797, 6800, - 6807, 6811, 6819, 6829, 6835, 6843, 6845, 6853, - 6856, 6858, 6861, 6863, 6870, 6872, 6880, 6881, - 6902, 6906, 6912, 6917, 6919, 6923, 6927, 6929, - 6933, 6935, 6938, 6942, 6944, 6951, 6953, 6957, - 6961, 6965, 6967, 6969, 6977, 6981, 6986, 6988, - 6990, 7014, 7017, 7018, 7020, 7022, 7026, 7029, - 7030, 7035, 7036, 7039, 7042, 7048, 7050, 7054, - 7056, 7067, 7076, 7081, 7083, 7087, 7089, 7091, - 7092, 7094, 7097, 7100, 7102, 7104, 7119, 7123, - 7125, 7127, 7132, 7136, 7142, 7144, 7146, 7150, - 7154, 7156, 7160, 7167, 7172, 7176, 7179, 7180, - 7184, 7191, 7198, 7199, 7200, 7202, 7211, 7213, - 7215, 7217, 7249, 7253, 7255, 7259, 7263, 7266, - 7270, 7274, 7277, 7279, 7285, 7298, 7300, 7303, - 7305, 7307, 7311, 7315, 7317, 7319, 7321, 7323, - 7328, 7334, 7337, 7339, 7343, 7347, 7354, 7357, - 7363, 7365, 7369, 7371, 7373, 7376, 7380, 7381, - 7383, 7389, 7395, 7401, 7403, 7407, 7411, 7416, - 7421, 7431, 7433, 7435, 7437, 7477, 7477, 7480, - 7484, 7489, 7491, 7499, 7501, 7503, 7505, 7507, - 7509, 7511, 7513, 7517, 7521, 7525, 7529, 7531, - 7532, 7538, 7540, 7542, 7544, 7551, 7552, 7554, - 7561, 7563, 7565, 7575, 7579, 7583, 7587, 7591, - 7595, 7599, 7603, 7609, 7619, 7627, 7632, 7635, - 7639, 7641, 7644, 7653, 7657, 7659, 7661, 7665, - 7665, 7695, 7715, 7735, 7756, 7779, 7799, 7819, - 7840, 7863, 7884, 7905, 7926, 7946, 7969, 7989, - 8010, 8031, 8052, 8073, 8093, 8113, 8133, -} - -var _graphclust_trans_keys []byte = []byte{ - 10, 0, 127, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 189, 171, 179, 160, 161, - 163, 164, 165, 167, 169, 171, 173, 174, - 175, 176, 177, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 166, 170, 172, 178, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 147, 161, - 163, 255, 189, 132, 185, 144, 152, 161, - 164, 255, 188, 129, 131, 190, 255, 133, - 134, 137, 138, 142, 150, 152, 161, 164, - 189, 191, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 185, 192, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 132, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 128, 131, - 187, 188, 190, 255, 133, 137, 142, 150, - 152, 161, 164, 255, 130, 131, 138, 150, - 143, 148, 152, 159, 178, 179, 177, 179, - 186, 135, 142, 177, 179, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 180, 144, 146, 148, 168, - 183, 185, 128, 185, 187, 191, 128, 131, - 179, 181, 183, 140, 141, 144, 176, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 190, 192, 255, - 166, 173, 135, 147, 128, 131, 179, 255, - 129, 164, 166, 255, 169, 182, 131, 188, - 140, 141, 176, 178, 180, 183, 184, 190, - 191, 129, 171, 175, 181, 182, 163, 170, - 172, 173, 172, 184, 190, 158, 128, 143, - 160, 175, 144, 145, 150, 155, 157, 158, - 159, 135, 139, 141, 168, 171, 180, 189, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 164, - 167, 134, 144, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 160, 167, 168, 169, 170, 176, 178, 180, - 181, 182, 187, 128, 130, 184, 255, 135, - 190, 131, 175, 187, 255, 128, 130, 167, - 180, 179, 133, 134, 128, 130, 179, 255, - 129, 136, 141, 255, 190, 172, 183, 159, - 170, 128, 131, 187, 188, 190, 191, 151, - 128, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 191, 158, 128, - 134, 176, 255, 132, 255, 175, 181, 184, - 255, 129, 155, 158, 255, 129, 255, 171, - 183, 157, 171, 172, 186, 164, 145, 151, - 154, 160, 129, 138, 179, 185, 187, 190, - 135, 145, 155, 138, 153, 175, 182, 184, - 191, 146, 167, 169, 182, 186, 177, 182, - 188, 189, 191, 255, 134, 136, 255, 138, - 142, 144, 145, 147, 151, 179, 182, 171, - 172, 189, 190, 176, 180, 176, 182, 143, - 145, 255, 136, 142, 147, 255, 178, 157, - 158, 133, 134, 137, 168, 169, 170, 165, - 169, 173, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 132, 139, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 172, 175, 144, 150, 132, 138, 143, - 187, 191, 160, 128, 129, 132, 135, 133, - 134, 160, 255, 192, 255, 128, 191, 169, - 173, 174, 128, 159, 160, 191, 128, 255, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 189, 171, 179, 160, 161, 163, 164, 165, - 167, 169, 171, 173, 174, 175, 176, 177, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 166, 170, 172, - 178, 150, 153, 155, 163, 165, 167, 169, - 173, 153, 155, 147, 161, 163, 255, 189, - 132, 185, 144, 152, 161, 164, 255, 188, - 129, 131, 190, 255, 133, 134, 137, 138, - 142, 150, 152, 161, 164, 189, 191, 255, - 131, 134, 137, 138, 142, 144, 146, 175, - 178, 180, 182, 255, 134, 138, 142, 161, - 164, 185, 192, 255, 188, 129, 131, 190, - 191, 128, 132, 135, 136, 139, 141, 150, - 151, 162, 163, 130, 190, 191, 151, 128, - 130, 134, 136, 138, 141, 128, 132, 190, - 255, 133, 137, 142, 148, 151, 161, 164, - 255, 128, 132, 134, 136, 138, 141, 149, - 150, 162, 163, 128, 131, 187, 188, 190, - 255, 133, 137, 142, 150, 152, 161, 164, - 255, 130, 131, 138, 150, 143, 148, 152, - 159, 178, 179, 177, 179, 186, 135, 142, - 177, 179, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 177, 191, 128, 132, - 134, 135, 141, 151, 153, 188, 134, 128, - 129, 130, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 173, 183, 185, 190, - 150, 153, 158, 160, 177, 180, 130, 141, - 157, 132, 134, 157, 159, 146, 148, 178, - 180, 146, 147, 178, 179, 180, 255, 148, - 156, 158, 255, 139, 141, 169, 133, 134, - 160, 171, 176, 187, 151, 155, 160, 162, - 191, 149, 158, 165, 188, 176, 190, 128, - 132, 180, 255, 133, 170, 180, 255, 128, - 130, 161, 173, 166, 179, 164, 183, 173, - 180, 144, 146, 148, 168, 183, 185, 128, - 185, 187, 191, 128, 131, 179, 181, 183, - 140, 141, 169, 174, 128, 129, 131, 132, - 134, 140, 142, 143, 147, 150, 151, 152, - 153, 154, 155, 156, 157, 158, 164, 172, - 173, 179, 181, 183, 140, 141, 188, 137, - 144, 176, 162, 185, 148, 153, 169, 170, - 168, 154, 155, 136, 143, 169, 179, 184, - 186, 130, 182, 170, 171, 128, 187, 190, - 128, 133, 135, 146, 148, 255, 192, 255, - 128, 133, 144, 191, 128, 191, 148, 150, - 157, 161, 168, 128, 133, 136, 146, 179, - 180, 132, 135, 140, 142, 151, 147, 149, - 163, 167, 161, 176, 191, 149, 151, 180, - 181, 133, 135, 155, 156, 144, 149, 175, - 177, 191, 160, 191, 128, 130, 138, 189, - 170, 176, 153, 154, 151, 153, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 175, 178, 180, - 189, 158, 159, 176, 177, 130, 134, 139, - 163, 167, 128, 129, 180, 255, 134, 159, - 178, 190, 192, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 180, 189, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 164, 167, 134, 144, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 160, 167, 168, 169, - 170, 176, 178, 180, 181, 182, 187, 128, - 130, 184, 255, 135, 190, 131, 175, 187, - 255, 128, 130, 167, 180, 179, 133, 134, - 128, 130, 179, 255, 129, 136, 141, 255, - 190, 172, 183, 159, 170, 128, 131, 187, - 188, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 158, 128, 134, 176, 255, 132, - 255, 175, 181, 184, 255, 129, 155, 158, - 255, 129, 255, 171, 183, 157, 171, 172, - 186, 164, 145, 151, 154, 160, 129, 138, - 179, 185, 187, 190, 135, 145, 155, 138, - 153, 175, 182, 184, 191, 146, 167, 169, - 182, 186, 177, 182, 188, 189, 191, 255, - 134, 136, 255, 138, 142, 144, 145, 147, - 151, 179, 182, 171, 172, 189, 190, 176, - 180, 176, 182, 143, 145, 255, 136, 142, - 147, 255, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 132, - 139, 163, 165, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 172, 175, 144, - 150, 132, 138, 129, 130, 131, 132, 133, - 134, 135, 136, 137, 138, 139, 141, 143, - 144, 146, 147, 148, 149, 150, 151, 153, - 155, 157, 159, 160, 161, 162, 163, 164, - 165, 169, 191, 128, 154, 166, 167, 168, - 170, 171, 190, 175, 141, 143, 172, 177, - 190, 191, 142, 145, 154, 173, 255, 166, - 255, 154, 175, 129, 143, 178, 186, 188, - 191, 137, 255, 128, 189, 134, 255, 144, - 255, 180, 191, 149, 191, 140, 143, 136, - 143, 154, 159, 136, 143, 174, 255, 140, - 186, 188, 191, 128, 133, 135, 191, 190, - 255, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 128, 130, 170, 175, 144, 145, - 150, 155, 157, 158, 159, 143, 187, 191, - 156, 128, 133, 134, 191, 128, 255, 176, - 255, 131, 137, 191, 145, 189, 135, 129, - 130, 132, 133, 144, 154, 176, 139, 159, - 150, 156, 159, 164, 167, 168, 170, 173, - 145, 176, 255, 139, 255, 166, 176, 189, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 147, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 189, 191, 255, 131, - 134, 137, 138, 142, 144, 146, 175, 178, - 180, 182, 255, 134, 138, 142, 161, 164, - 185, 192, 255, 188, 129, 131, 190, 191, - 128, 132, 135, 136, 139, 141, 150, 151, - 162, 163, 130, 190, 191, 151, 128, 130, - 134, 136, 138, 141, 128, 132, 190, 255, - 133, 137, 142, 148, 151, 161, 164, 255, - 128, 132, 134, 136, 138, 141, 149, 150, - 162, 163, 128, 131, 187, 188, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 188, 136, 141, 181, 183, 185, 152, - 153, 190, 191, 177, 191, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 173, 183, 185, 190, 150, - 153, 158, 160, 177, 180, 130, 141, 157, - 132, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 180, 255, 148, 156, - 158, 255, 139, 141, 169, 133, 134, 160, - 171, 176, 187, 151, 155, 160, 162, 191, - 149, 158, 165, 188, 176, 190, 128, 132, - 180, 255, 133, 170, 180, 255, 128, 130, - 161, 173, 166, 179, 164, 183, 173, 180, - 144, 146, 148, 168, 183, 185, 128, 185, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 144, 176, 175, 177, 191, 160, 191, - 128, 130, 170, 175, 153, 154, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 175, 178, 180, - 189, 158, 159, 176, 177, 130, 134, 139, - 163, 167, 128, 129, 180, 255, 134, 159, - 178, 190, 192, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 180, 189, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 164, 167, 134, 144, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 160, 167, 168, 169, - 170, 176, 178, 180, 181, 182, 187, 128, - 130, 184, 255, 135, 190, 131, 175, 187, - 255, 128, 130, 167, 180, 179, 133, 134, - 128, 130, 179, 255, 129, 136, 141, 255, - 190, 172, 183, 159, 170, 128, 131, 187, - 188, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 158, 128, 134, 176, 255, 132, - 255, 175, 181, 184, 255, 129, 155, 158, - 255, 129, 255, 171, 183, 157, 171, 172, - 186, 164, 145, 151, 154, 160, 129, 138, - 179, 185, 187, 190, 135, 145, 155, 138, - 153, 175, 182, 184, 191, 146, 167, 169, - 182, 186, 177, 182, 188, 189, 191, 255, - 134, 136, 255, 138, 142, 144, 145, 147, - 151, 179, 182, 171, 172, 189, 190, 176, - 180, 176, 182, 143, 145, 255, 136, 142, - 147, 255, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 132, - 139, 163, 165, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 172, 175, 144, - 150, 132, 138, 143, 187, 191, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 128, 191, 169, 174, 160, 172, 175, - 191, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 189, 171, 179, 160, 161, - 163, 164, 165, 167, 169, 171, 173, 174, - 175, 176, 177, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 166, 170, 172, 178, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 147, 161, - 163, 255, 189, 132, 185, 144, 152, 161, - 164, 255, 188, 129, 131, 190, 255, 133, - 134, 137, 138, 142, 150, 152, 161, 164, - 189, 191, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 185, 192, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 132, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 128, 131, - 187, 188, 190, 255, 133, 137, 142, 150, - 152, 161, 164, 255, 130, 131, 138, 150, - 143, 148, 152, 159, 178, 179, 177, 179, - 186, 135, 142, 177, 179, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 180, 144, 146, 148, 168, - 183, 185, 128, 185, 187, 191, 128, 131, - 179, 181, 183, 140, 141, 169, 174, 128, - 129, 131, 132, 134, 140, 142, 143, 147, - 150, 151, 152, 153, 154, 155, 156, 157, - 158, 164, 172, 173, 179, 181, 183, 140, - 141, 188, 137, 144, 176, 162, 185, 148, - 153, 169, 170, 168, 154, 155, 136, 143, - 169, 179, 184, 186, 130, 182, 170, 171, - 128, 187, 190, 128, 133, 135, 146, 148, - 255, 192, 255, 128, 133, 144, 191, 128, - 191, 148, 150, 157, 161, 168, 128, 133, - 136, 146, 179, 180, 132, 135, 140, 142, - 151, 147, 149, 163, 167, 161, 176, 191, - 149, 151, 180, 181, 133, 135, 155, 156, - 144, 149, 175, 177, 191, 160, 191, 128, - 130, 138, 189, 170, 176, 153, 154, 151, - 153, 153, 154, 155, 160, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 175, - 175, 178, 180, 189, 158, 159, 176, 177, - 130, 134, 139, 163, 167, 128, 129, 180, - 255, 134, 159, 178, 190, 192, 255, 166, - 173, 135, 147, 128, 131, 179, 255, 129, - 164, 166, 255, 169, 182, 131, 188, 140, - 141, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 175, 181, 182, 163, 170, 172, - 173, 172, 184, 190, 158, 128, 143, 160, - 175, 144, 145, 150, 155, 157, 158, 159, - 135, 139, 141, 168, 171, 180, 189, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 164, 167, - 134, 144, 128, 129, 130, 132, 133, 134, - 135, 136, 139, 140, 141, 144, 145, 146, - 147, 150, 151, 152, 153, 154, 156, 160, - 167, 168, 169, 170, 176, 178, 180, 181, - 182, 187, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 133, 134, 128, 130, 179, 255, 129, - 136, 141, 255, 190, 172, 183, 159, 170, - 128, 131, 187, 188, 190, 191, 151, 128, - 132, 135, 136, 139, 141, 162, 163, 166, - 172, 176, 180, 181, 191, 158, 128, 134, - 176, 255, 132, 255, 175, 181, 184, 255, - 129, 155, 158, 255, 129, 255, 171, 183, - 157, 171, 172, 186, 164, 145, 151, 154, - 160, 129, 138, 179, 185, 187, 190, 135, - 145, 155, 138, 153, 175, 182, 184, 191, - 146, 167, 169, 182, 186, 177, 182, 188, - 189, 191, 255, 134, 136, 255, 138, 142, - 144, 145, 147, 151, 179, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 143, 145, - 255, 136, 142, 147, 255, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 165, 169, - 173, 178, 187, 255, 131, 132, 140, 169, - 174, 255, 130, 132, 128, 182, 187, 255, - 173, 180, 182, 255, 132, 155, 159, 161, - 175, 128, 132, 139, 163, 165, 128, 134, - 136, 152, 155, 161, 163, 164, 166, 170, - 172, 175, 144, 150, 132, 138, 129, 130, - 131, 132, 133, 134, 135, 136, 137, 138, - 139, 141, 143, 144, 146, 147, 148, 149, - 150, 151, 153, 155, 157, 159, 160, 161, - 162, 163, 164, 165, 169, 191, 128, 154, - 166, 167, 168, 170, 171, 190, 175, 141, - 143, 172, 177, 190, 191, 142, 145, 154, - 173, 255, 166, 255, 154, 175, 129, 143, - 178, 186, 188, 191, 137, 255, 128, 189, - 134, 255, 144, 255, 180, 191, 149, 191, - 140, 143, 136, 143, 154, 159, 136, 143, - 174, 255, 140, 186, 188, 191, 128, 133, - 135, 191, 190, 255, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 128, 130, 170, - 175, 144, 145, 150, 155, 157, 158, 159, - 143, 187, 191, 128, 133, 134, 155, 157, - 191, 157, 128, 191, 143, 128, 191, 163, - 181, 128, 191, 162, 128, 191, 142, 128, - 191, 132, 133, 134, 135, 160, 128, 191, - 128, 255, 128, 129, 130, 132, 133, 134, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 160, 255, 128, 129, 130, 133, - 134, 135, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 160, 255, 168, 255, - 128, 129, 130, 134, 135, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 168, - 255, 192, 255, 159, 139, 187, 158, 159, - 176, 255, 135, 138, 139, 187, 188, 255, - 168, 255, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 176, 190, - 192, 255, 135, 147, 160, 188, 128, 156, - 184, 129, 255, 128, 129, 130, 133, 134, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 158, 159, 135, 255, 148, 176, - 140, 168, 132, 160, 188, 152, 180, 144, - 172, 136, 164, 192, 255, 129, 130, 131, - 132, 133, 134, 136, 137, 138, 139, 140, - 141, 143, 144, 145, 146, 147, 148, 150, - 151, 152, 153, 154, 155, 157, 158, 159, - 160, 161, 162, 164, 165, 166, 167, 168, - 169, 171, 172, 173, 174, 175, 176, 178, - 179, 180, 181, 182, 183, 185, 186, 187, - 188, 189, 190, 128, 191, 129, 130, 131, - 132, 133, 134, 136, 137, 138, 139, 140, - 141, 143, 144, 145, 146, 147, 148, 150, - 151, 152, 153, 154, 155, 157, 158, 159, - 160, 161, 162, 164, 165, 166, 167, 168, - 169, 171, 172, 173, 174, 175, 176, 178, - 179, 180, 181, 182, 183, 185, 186, 187, - 188, 189, 190, 128, 191, 129, 130, 131, - 132, 133, 134, 136, 137, 138, 139, 140, - 141, 143, 144, 145, 146, 147, 148, 150, - 151, 152, 153, 154, 155, 157, 158, 159, - 128, 156, 160, 255, 136, 164, 175, 176, - 255, 128, 141, 143, 191, 128, 129, 132, - 134, 140, 142, 143, 147, 150, 151, 152, - 153, 154, 155, 156, 157, 158, 164, 172, - 173, 130, 191, 188, 128, 138, 140, 141, - 144, 167, 175, 191, 137, 128, 159, 176, - 191, 162, 185, 128, 191, 128, 147, 148, - 153, 154, 168, 169, 170, 171, 191, 168, - 128, 153, 154, 155, 156, 191, 136, 128, - 191, 143, 128, 168, 169, 179, 180, 183, - 184, 186, 187, 191, 130, 128, 191, 182, - 128, 169, 170, 171, 172, 191, 128, 191, - 129, 186, 187, 190, 134, 147, 128, 255, - 128, 133, 134, 143, 144, 191, 147, 149, - 157, 161, 168, 128, 133, 134, 135, 136, - 150, 151, 178, 179, 180, 181, 191, 132, - 135, 140, 142, 150, 128, 146, 147, 151, - 152, 162, 163, 167, 168, 191, 161, 176, - 191, 128, 148, 149, 151, 152, 190, 128, - 179, 180, 181, 182, 191, 128, 132, 133, - 135, 136, 154, 155, 156, 157, 191, 144, - 149, 128, 191, 128, 138, 129, 191, 176, - 189, 128, 191, 151, 153, 128, 191, 128, - 191, 165, 177, 178, 179, 180, 181, 182, - 184, 185, 186, 187, 188, 189, 191, 128, - 175, 176, 190, 192, 255, 128, 159, 160, - 188, 189, 191, 128, 156, 184, 129, 255, - 148, 176, 140, 168, 132, 160, 188, 152, - 180, 144, 172, 136, 164, 192, 255, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 128, 156, 160, 191, 192, 255, - 136, 164, 175, 176, 255, 135, 138, 139, - 187, 188, 191, 192, 255, 187, 191, 128, - 190, 128, 190, 188, 128, 175, 190, 191, - 145, 147, 155, 157, 159, 128, 191, 130, - 131, 135, 168, 170, 181, 128, 191, 189, - 128, 191, 141, 128, 191, 128, 129, 130, - 131, 132, 191, 186, 128, 191, 128, 131, - 132, 137, 138, 191, 134, 128, 191, 144, - 128, 191, 128, 175, 185, 191, 178, 128, - 191, 128, 159, 164, 191, 133, 128, 191, - 128, 178, 187, 191, 129, 130, 131, 132, - 133, 134, 135, 136, 137, 138, 139, 141, - 143, 144, 146, 147, 148, 149, 150, 151, - 153, 156, 157, 158, 159, 160, 161, 162, - 164, 165, 169, 191, 128, 154, 155, 163, - 166, 167, 168, 170, 171, 190, 175, 128, - 140, 141, 143, 144, 191, 128, 171, 172, - 177, 178, 189, 190, 191, 142, 128, 144, - 145, 154, 155, 172, 173, 255, 166, 191, - 192, 255, 144, 145, 150, 155, 157, 158, - 159, 135, 143, 166, 191, 128, 154, 175, - 187, 129, 143, 144, 177, 178, 191, 128, - 136, 137, 255, 187, 191, 192, 255, 128, - 189, 190, 191, 128, 133, 134, 255, 144, - 191, 192, 255, 128, 179, 180, 191, 128, - 148, 149, 191, 128, 139, 140, 143, 144, - 191, 128, 135, 136, 143, 144, 153, 154, - 159, 160, 191, 128, 135, 136, 143, 144, - 173, 174, 255, 187, 128, 139, 140, 191, - 134, 128, 191, 190, 191, 192, 255, 128, - 191, 160, 128, 191, 128, 129, 135, 132, - 134, 128, 175, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 128, 255, 0, 127, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 189, 171, - 179, 160, 161, 163, 164, 165, 167, 169, - 171, 173, 174, 175, 176, 177, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 166, 170, 172, 178, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 147, 161, 163, 255, 189, 132, 185, - 144, 152, 161, 164, 255, 188, 129, 131, - 190, 255, 133, 134, 137, 138, 142, 150, - 152, 161, 164, 189, 191, 255, 131, 134, - 137, 138, 142, 144, 146, 175, 178, 180, - 182, 255, 134, 138, 142, 161, 164, 185, - 192, 255, 188, 129, 131, 190, 191, 128, - 132, 135, 136, 139, 141, 150, 151, 162, - 163, 130, 190, 191, 151, 128, 130, 134, - 136, 138, 141, 128, 132, 190, 255, 133, - 137, 142, 148, 151, 161, 164, 255, 128, - 132, 134, 136, 138, 141, 149, 150, 162, - 163, 128, 131, 187, 188, 190, 255, 133, - 137, 142, 150, 152, 161, 164, 255, 130, - 131, 138, 150, 143, 148, 152, 159, 178, - 179, 177, 179, 186, 135, 142, 177, 179, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 177, 191, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 173, 183, 185, 190, 150, 153, - 158, 160, 177, 180, 130, 141, 157, 132, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 180, 255, 148, 156, 158, - 255, 139, 141, 169, 133, 134, 160, 171, - 176, 187, 151, 155, 160, 162, 191, 149, - 158, 165, 188, 176, 190, 128, 132, 180, - 255, 133, 170, 180, 255, 128, 130, 161, - 173, 166, 179, 164, 183, 173, 180, 144, - 146, 148, 168, 183, 185, 128, 185, 187, - 191, 128, 131, 179, 181, 183, 140, 141, - 169, 174, 128, 129, 131, 132, 134, 140, - 142, 143, 147, 150, 151, 152, 153, 154, - 155, 156, 157, 158, 164, 172, 173, 179, - 181, 183, 140, 141, 188, 137, 144, 176, - 162, 185, 148, 153, 169, 170, 168, 154, - 155, 136, 143, 169, 179, 184, 186, 130, - 182, 170, 171, 128, 187, 190, 128, 133, - 135, 146, 148, 255, 192, 255, 128, 133, - 144, 191, 128, 191, 148, 150, 157, 161, - 168, 128, 133, 136, 146, 179, 180, 132, - 135, 140, 142, 151, 147, 149, 163, 167, - 161, 176, 191, 149, 151, 180, 181, 133, - 135, 155, 156, 144, 149, 175, 177, 191, - 160, 191, 128, 130, 138, 189, 170, 176, - 153, 154, 151, 153, 153, 154, 155, 160, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 175, 175, 178, 180, 189, 158, - 159, 176, 177, 130, 134, 139, 163, 167, - 128, 129, 180, 255, 134, 159, 178, 190, - 192, 255, 166, 173, 135, 147, 128, 131, - 179, 255, 129, 164, 166, 255, 169, 182, - 131, 188, 140, 141, 176, 178, 180, 183, - 184, 190, 191, 129, 171, 175, 181, 182, - 163, 170, 172, 173, 172, 184, 190, 158, - 128, 143, 160, 175, 144, 145, 150, 155, - 157, 158, 159, 135, 139, 141, 168, 171, - 180, 189, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 164, 167, 134, 144, 128, 129, 130, - 132, 133, 134, 135, 136, 139, 140, 141, - 144, 145, 146, 147, 150, 151, 152, 153, - 154, 156, 160, 167, 168, 169, 170, 176, - 178, 180, 181, 182, 187, 128, 130, 184, - 255, 135, 190, 131, 175, 187, 255, 128, - 130, 167, 180, 179, 133, 134, 128, 130, - 179, 255, 129, 136, 141, 255, 190, 172, - 183, 159, 170, 128, 131, 187, 188, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 158, 128, 134, 176, 255, 132, 255, 175, - 181, 184, 255, 129, 155, 158, 255, 129, - 255, 171, 183, 157, 171, 172, 186, 164, - 145, 151, 154, 160, 129, 138, 179, 185, - 187, 190, 135, 145, 155, 138, 153, 175, - 182, 184, 191, 146, 167, 169, 182, 186, - 177, 182, 188, 189, 191, 255, 134, 136, - 255, 138, 142, 144, 145, 147, 151, 179, - 182, 171, 172, 189, 190, 176, 180, 176, - 182, 143, 145, 255, 136, 142, 147, 255, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 132, 139, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 172, 175, 144, 150, 132, - 138, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 141, 143, 144, 146, - 147, 148, 149, 150, 151, 153, 155, 157, - 159, 160, 161, 162, 163, 164, 165, 169, - 191, 128, 154, 166, 167, 168, 170, 171, - 190, 175, 141, 143, 172, 177, 190, 191, - 142, 145, 154, 173, 255, 166, 255, 154, - 175, 129, 143, 178, 186, 188, 191, 137, - 255, 128, 189, 134, 255, 144, 255, 180, - 191, 149, 191, 140, 143, 136, 143, 154, - 159, 136, 143, 174, 255, 140, 186, 188, - 191, 128, 133, 135, 191, 190, 255, 160, - 128, 129, 132, 135, 133, 134, 160, 255, - 128, 130, 170, 175, 144, 145, 150, 155, - 157, 158, 159, 143, 187, 191, 128, 129, - 130, 132, 133, 134, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 160, 255, - 128, 129, 130, 133, 134, 135, 141, 156, - 157, 158, 159, 160, 162, 164, 168, 169, - 170, 172, 173, 174, 175, 176, 179, 183, - 160, 255, 168, 255, 128, 129, 130, 134, - 135, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 168, 255, 192, 255, 159, - 139, 187, 158, 159, 176, 255, 135, 138, - 139, 187, 188, 255, 168, 255, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 177, 178, 179, - 180, 181, 182, 184, 185, 186, 187, 188, - 189, 191, 176, 190, 192, 255, 135, 147, - 160, 188, 128, 156, 184, 129, 255, 128, - 129, 130, 133, 134, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 158, 159, - 135, 255, 148, 176, 140, 168, 132, 160, - 188, 152, 180, 144, 172, 136, 164, 192, - 255, 129, 130, 131, 132, 133, 134, 136, - 137, 138, 139, 140, 141, 143, 144, 145, - 146, 147, 148, 150, 151, 152, 153, 154, - 155, 157, 158, 159, 160, 161, 162, 164, - 165, 166, 167, 168, 169, 171, 172, 173, - 174, 175, 176, 178, 179, 180, 181, 182, - 183, 185, 186, 187, 188, 189, 190, 128, - 191, 129, 130, 131, 132, 133, 134, 136, - 137, 138, 139, 140, 141, 143, 144, 145, - 146, 147, 148, 150, 151, 152, 153, 154, - 155, 157, 158, 159, 160, 161, 162, 164, - 165, 166, 167, 168, 169, 171, 172, 173, - 174, 175, 176, 178, 179, 180, 181, 182, - 183, 185, 186, 187, 188, 189, 190, 128, - 191, 129, 130, 131, 132, 133, 134, 136, - 137, 138, 139, 140, 141, 143, 144, 145, - 146, 147, 148, 150, 151, 152, 153, 154, - 155, 157, 158, 159, 128, 156, 160, 255, - 136, 164, 175, 176, 255, 142, 128, 191, - 128, 129, 132, 134, 140, 142, 143, 147, - 150, 151, 152, 153, 154, 155, 156, 157, - 158, 164, 172, 173, 130, 191, 139, 141, - 188, 128, 140, 142, 143, 144, 167, 168, - 174, 175, 191, 128, 255, 176, 255, 131, - 137, 191, 145, 189, 135, 129, 130, 132, - 133, 144, 154, 176, 139, 159, 150, 156, - 159, 164, 167, 168, 170, 173, 145, 176, - 255, 139, 255, 166, 176, 189, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 147, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 189, 191, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 185, 192, - 255, 188, 129, 131, 190, 191, 128, 132, - 135, 136, 139, 141, 150, 151, 162, 163, - 130, 190, 191, 151, 128, 130, 134, 136, - 138, 141, 128, 132, 190, 255, 133, 137, - 142, 148, 151, 161, 164, 255, 128, 132, - 134, 136, 138, 141, 149, 150, 162, 163, - 128, 131, 187, 188, 190, 255, 133, 137, - 142, 150, 152, 161, 164, 255, 130, 131, - 138, 150, 143, 148, 152, 159, 178, 179, - 177, 179, 186, 135, 142, 177, 179, 188, - 136, 141, 181, 183, 185, 152, 153, 190, - 191, 177, 191, 128, 132, 134, 135, 141, - 151, 153, 188, 134, 128, 129, 130, 141, - 156, 157, 158, 159, 160, 162, 164, 168, - 169, 170, 172, 173, 174, 175, 176, 179, - 183, 173, 183, 185, 190, 150, 153, 158, - 160, 177, 180, 130, 141, 157, 132, 134, - 157, 159, 146, 148, 178, 180, 146, 147, - 178, 179, 180, 255, 148, 156, 158, 255, - 139, 141, 169, 133, 134, 160, 171, 176, - 187, 151, 155, 160, 162, 191, 149, 158, - 165, 188, 176, 190, 128, 132, 180, 255, - 133, 170, 180, 255, 128, 130, 161, 173, - 166, 179, 164, 183, 173, 180, 144, 146, - 148, 168, 183, 185, 128, 185, 187, 191, - 128, 131, 179, 181, 183, 140, 141, 144, - 176, 175, 177, 191, 160, 191, 128, 130, - 170, 175, 153, 154, 153, 154, 155, 160, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 175, 175, 178, 180, 189, 158, - 159, 176, 177, 130, 134, 139, 163, 167, - 128, 129, 180, 255, 134, 159, 178, 190, - 192, 255, 166, 173, 135, 147, 128, 131, - 179, 255, 129, 164, 166, 255, 169, 182, - 131, 188, 140, 141, 176, 178, 180, 183, - 184, 190, 191, 129, 171, 175, 181, 182, - 163, 170, 172, 173, 172, 184, 190, 158, - 128, 143, 160, 175, 144, 145, 150, 155, - 157, 158, 159, 135, 139, 141, 168, 171, - 180, 189, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 164, 167, 134, 144, 128, 129, 130, - 132, 133, 134, 135, 136, 139, 140, 141, - 144, 145, 146, 147, 150, 151, 152, 153, - 154, 156, 160, 167, 168, 169, 170, 176, - 178, 180, 181, 182, 187, 128, 130, 184, - 255, 135, 190, 131, 175, 187, 255, 128, - 130, 167, 180, 179, 133, 134, 128, 130, - 179, 255, 129, 136, 141, 255, 190, 172, - 183, 159, 170, 128, 131, 187, 188, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 158, 128, 134, 176, 255, 132, 255, 175, - 181, 184, 255, 129, 155, 158, 255, 129, - 255, 171, 183, 157, 171, 172, 186, 164, - 145, 151, 154, 160, 129, 138, 179, 185, - 187, 190, 135, 145, 155, 138, 153, 175, - 182, 184, 191, 146, 167, 169, 182, 186, - 177, 182, 188, 189, 191, 255, 134, 136, - 255, 138, 142, 144, 145, 147, 151, 179, - 182, 171, 172, 189, 190, 176, 180, 176, - 182, 143, 145, 255, 136, 142, 147, 255, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 132, 139, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 172, 175, 144, 150, 132, - 138, 143, 187, 191, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 137, - 128, 159, 160, 175, 176, 191, 162, 185, - 128, 191, 128, 147, 148, 153, 154, 168, - 169, 170, 171, 191, 168, 128, 153, 154, - 155, 156, 191, 136, 128, 191, 143, 128, - 168, 169, 179, 180, 183, 184, 186, 187, - 191, 130, 128, 191, 182, 128, 169, 170, - 171, 172, 191, 128, 191, 129, 186, 187, - 190, 134, 147, 128, 255, 128, 133, 134, - 143, 144, 191, 147, 149, 157, 161, 168, - 128, 133, 134, 135, 136, 150, 151, 178, - 179, 180, 181, 191, 132, 135, 140, 142, - 150, 128, 146, 147, 151, 152, 162, 163, - 167, 168, 191, 161, 176, 191, 128, 148, - 149, 151, 152, 190, 128, 179, 180, 181, - 182, 191, 128, 132, 133, 135, 136, 154, - 155, 156, 157, 191, 144, 149, 128, 191, - 128, 138, 129, 191, 176, 189, 128, 191, - 151, 153, 128, 191, 128, 191, 165, 177, - 178, 179, 180, 181, 182, 184, 185, 186, - 187, 188, 189, 191, 128, 175, 176, 190, - 192, 255, 128, 159, 160, 188, 189, 191, - 128, 156, 184, 129, 255, 148, 176, 140, - 168, 132, 160, 188, 152, 180, 144, 172, - 136, 164, 192, 255, 129, 130, 131, 132, - 133, 134, 136, 137, 138, 139, 140, 141, - 143, 144, 145, 146, 147, 148, 150, 151, - 152, 153, 154, 155, 157, 158, 159, 160, - 161, 162, 164, 165, 166, 167, 168, 169, - 171, 172, 173, 174, 175, 176, 178, 179, - 180, 181, 182, 183, 185, 186, 187, 188, - 189, 190, 128, 191, 129, 130, 131, 132, - 133, 134, 136, 137, 138, 139, 140, 141, - 143, 144, 145, 146, 147, 148, 150, 151, - 152, 153, 154, 155, 157, 158, 159, 160, - 161, 162, 164, 165, 166, 167, 168, 169, - 171, 172, 173, 174, 175, 176, 178, 179, - 180, 181, 182, 183, 185, 186, 187, 188, - 189, 190, 128, 191, 129, 130, 131, 132, - 133, 134, 136, 137, 138, 139, 140, 141, - 143, 144, 145, 146, 147, 148, 150, 151, - 152, 153, 154, 155, 157, 158, 159, 128, - 156, 160, 191, 192, 255, 136, 164, 175, - 176, 255, 135, 138, 139, 187, 188, 191, - 192, 255, 187, 191, 128, 190, 191, 128, - 190, 188, 128, 175, 176, 189, 190, 191, - 145, 147, 155, 157, 159, 128, 191, 130, - 131, 135, 168, 170, 181, 128, 191, 189, - 128, 191, 141, 128, 191, 128, 129, 130, - 131, 132, 191, 186, 128, 191, 128, 131, - 132, 137, 138, 191, 134, 128, 191, 144, - 128, 191, 128, 175, 176, 184, 185, 191, - 178, 128, 191, 128, 159, 160, 163, 164, - 191, 133, 128, 191, 128, 178, 179, 186, - 187, 191, 129, 130, 131, 132, 133, 134, - 135, 136, 137, 138, 139, 141, 143, 144, - 146, 147, 148, 149, 150, 151, 153, 156, - 157, 158, 159, 160, 161, 162, 164, 165, - 169, 191, 128, 154, 155, 163, 166, 167, - 168, 170, 171, 190, 175, 128, 140, 141, - 143, 144, 191, 128, 171, 172, 177, 178, - 189, 190, 191, 142, 128, 144, 145, 154, - 155, 172, 173, 255, 166, 191, 192, 255, - 0, 127, 176, 255, 131, 137, 191, 145, - 189, 135, 129, 130, 132, 133, 144, 154, - 176, 139, 159, 150, 156, 159, 164, 167, - 168, 170, 173, 145, 176, 255, 139, 255, - 166, 176, 189, 171, 179, 160, 161, 163, - 164, 165, 167, 169, 171, 173, 174, 175, - 176, 177, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 166, - 170, 172, 178, 150, 153, 155, 163, 165, - 167, 169, 173, 153, 155, 147, 161, 163, - 255, 189, 132, 185, 144, 152, 161, 164, - 255, 188, 129, 131, 190, 255, 133, 134, - 137, 138, 142, 150, 152, 161, 164, 189, - 191, 255, 131, 134, 137, 138, 142, 144, - 146, 175, 178, 180, 182, 255, 134, 138, - 142, 161, 164, 185, 192, 255, 188, 129, - 131, 190, 191, 128, 132, 135, 136, 139, - 141, 150, 151, 162, 163, 130, 190, 191, - 151, 128, 130, 134, 136, 138, 141, 128, - 132, 190, 255, 133, 137, 142, 148, 151, - 161, 164, 255, 128, 132, 134, 136, 138, - 141, 149, 150, 162, 163, 128, 131, 187, - 188, 190, 255, 133, 137, 142, 150, 152, - 161, 164, 255, 130, 131, 138, 150, 143, - 148, 152, 159, 178, 179, 177, 179, 186, - 135, 142, 177, 179, 188, 136, 141, 181, - 183, 185, 152, 153, 190, 191, 177, 191, - 128, 132, 134, 135, 141, 151, 153, 188, - 134, 128, 129, 130, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 173, 183, - 185, 190, 150, 153, 158, 160, 177, 180, - 130, 141, 157, 132, 134, 157, 159, 146, - 148, 178, 180, 146, 147, 178, 179, 180, - 255, 148, 156, 158, 255, 139, 141, 169, - 133, 134, 160, 171, 176, 187, 151, 155, - 160, 162, 191, 149, 158, 165, 188, 176, - 190, 128, 132, 180, 255, 133, 170, 180, - 255, 128, 130, 161, 173, 166, 179, 164, - 183, 173, 180, 144, 146, 148, 168, 183, - 185, 128, 185, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 169, 174, 128, 129, - 131, 132, 134, 140, 142, 143, 147, 150, - 151, 152, 153, 154, 155, 156, 157, 158, - 164, 172, 173, 179, 181, 183, 140, 141, - 188, 137, 144, 176, 162, 185, 148, 153, - 169, 170, 168, 154, 155, 136, 143, 169, - 179, 184, 186, 130, 182, 170, 171, 128, - 187, 190, 128, 133, 135, 146, 148, 255, - 192, 255, 128, 133, 144, 191, 128, 191, - 148, 150, 157, 161, 168, 128, 133, 136, - 146, 179, 180, 132, 135, 140, 142, 151, - 147, 149, 163, 167, 161, 176, 191, 149, - 151, 180, 181, 133, 135, 155, 156, 144, - 149, 175, 177, 191, 160, 191, 128, 130, - 138, 189, 170, 176, 153, 154, 151, 153, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 190, 192, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 180, 189, 189, 160, - 182, 186, 191, 129, 131, 133, 134, 140, - 143, 184, 186, 165, 166, 164, 167, 134, - 144, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 160, 167, - 168, 169, 170, 176, 178, 180, 181, 182, - 187, 128, 130, 184, 255, 135, 190, 131, - 175, 187, 255, 128, 130, 167, 180, 179, - 133, 134, 128, 130, 179, 255, 129, 136, - 141, 255, 190, 172, 183, 159, 170, 128, - 131, 187, 188, 190, 191, 151, 128, 132, - 135, 136, 139, 141, 162, 163, 166, 172, - 176, 180, 181, 191, 158, 128, 134, 176, - 255, 132, 255, 175, 181, 184, 255, 129, - 155, 158, 255, 129, 255, 171, 183, 157, - 171, 172, 186, 164, 145, 151, 154, 160, - 129, 138, 179, 185, 187, 190, 135, 145, - 155, 138, 153, 175, 182, 184, 191, 146, - 167, 169, 182, 186, 177, 182, 188, 189, - 191, 255, 134, 136, 255, 138, 142, 144, - 145, 147, 151, 179, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 145, 255, - 136, 142, 147, 255, 178, 157, 158, 133, - 134, 137, 168, 169, 170, 165, 169, 173, - 178, 187, 255, 131, 132, 140, 169, 174, - 255, 130, 132, 128, 182, 187, 255, 173, - 180, 182, 255, 132, 155, 159, 161, 175, - 128, 132, 139, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 172, - 175, 144, 150, 132, 138, 129, 130, 131, - 132, 133, 134, 135, 136, 137, 138, 139, - 141, 143, 144, 146, 147, 148, 149, 150, - 151, 153, 155, 157, 159, 160, 161, 162, - 163, 164, 165, 169, 191, 128, 154, 166, - 167, 168, 170, 171, 190, 175, 141, 143, - 172, 177, 190, 191, 142, 145, 154, 173, - 255, 166, 255, 154, 175, 129, 143, 178, - 186, 188, 191, 137, 255, 128, 189, 134, - 255, 144, 255, 180, 191, 149, 191, 140, - 143, 136, 143, 154, 159, 136, 143, 174, - 255, 140, 186, 188, 191, 128, 133, 135, - 191, 190, 255, 160, 128, 129, 132, 135, - 133, 134, 160, 255, 128, 130, 170, 175, - 144, 145, 150, 155, 157, 158, 159, 143, - 187, 191, 144, 145, 150, 155, 157, 158, - 159, 135, 143, 166, 191, 128, 154, 175, - 187, 129, 143, 144, 177, 178, 191, 128, - 136, 137, 255, 187, 191, 192, 255, 128, - 189, 190, 191, 128, 133, 134, 255, 144, - 191, 192, 255, 128, 179, 180, 191, 128, - 148, 149, 191, 128, 139, 140, 143, 144, - 191, 128, 135, 136, 143, 144, 153, 154, - 159, 160, 191, 128, 135, 136, 143, 144, - 173, 174, 255, 187, 128, 139, 140, 191, - 134, 128, 191, 190, 191, 192, 255, 128, - 191, 160, 128, 191, 128, 130, 131, 135, - 191, 129, 134, 136, 190, 128, 159, 160, - 191, 0, 127, 192, 255, 128, 175, 176, - 255, 10, 13, 127, 194, 216, 219, 220, - 224, 225, 226, 227, 234, 235, 236, 237, - 239, 240, 243, 0, 31, 128, 191, 192, - 223, 228, 238, 241, 247, 248, 255, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 239, 240, 243, 204, 205, 210, 214, 215, - 216, 217, 219, 220, 221, 222, 223, 224, - 225, 226, 227, 234, 239, 240, 243, 194, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 194, 216, 219, 220, - 224, 225, 226, 227, 234, 235, 236, 237, - 239, 240, 243, 32, 126, 192, 223, 228, - 238, 241, 247, 204, 205, 210, 214, 215, - 216, 217, 219, 220, 221, 222, 223, 224, - 225, 226, 227, 234, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 239, 240, 243, 194, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 235, 236, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 237, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 235, 236, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 194, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 194, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, -} - -var _graphclust_single_lengths []byte = []byte{ - 0, 1, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 1, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 2, - 0, 5, 0, 0, 0, 1, 0, 2, - 0, 0, 15, 0, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 0, 2, 1, - 1, 0, 3, 1, 0, 7, 7, 1, - 1, 0, 1, 0, 0, 0, 32, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 0, 1, 0, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 1, 0, - 0, 0, 1, 1, 0, 0, 4, 0, - 0, 1, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 5, 0, 0, 0, - 0, 1, 0, 1, 4, 0, 0, 0, - 0, 3, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 1, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 2, - 0, 5, 2, 2, 24, 3, 1, 0, - 2, 0, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 5, 5, 3, 0, - 0, 2, 0, 1, 0, 3, 1, 0, - 2, 15, 0, 0, 0, 3, 0, 0, - 0, 0, 0, 0, 0, 2, 1, 1, - 0, 3, 1, 0, 7, 7, 1, 1, - 0, 1, 0, 0, 0, 32, 0, 0, - 0, 0, 1, 0, 0, 1, 0, 0, - 1, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 0, 1, 0, 0, - 0, 1, 1, 0, 0, 4, 0, 0, - 1, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 5, 0, 0, 0, 0, - 32, 0, 1, 0, 1, 0, 2, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 4, 0, 2, 0, - 7, 1, 0, 1, 0, 0, 0, 1, - 1, 0, 1, 0, 1, 0, 0, 1, - 26, 0, 0, 0, 1, 1, 1, 0, - 0, 2, 1, 0, 1, 1, 0, 2, - 0, 0, 2, 0, 2, 1, 0, 1, - 0, 3, 0, 0, 1, 21, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 2, 0, 5, 0, 0, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 7, 1, 1, 0, 1, 0, 0, 0, - 32, 0, 0, 0, 0, 1, 0, 0, - 1, 0, 0, 1, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 1, 0, 0, 0, 1, 1, 0, 0, - 4, 0, 0, 1, 0, 1, 0, 6, - 0, 0, 0, 0, 0, 1, 5, 0, - 0, 0, 0, 1, 0, 1, 4, 0, - 0, 0, 0, 2, 0, 0, 0, 1, - 1, 0, 1, 0, 1, 0, 0, 1, - 26, 0, 0, 0, 1, 1, 1, 0, - 0, 2, 1, 0, 1, 1, 0, 2, - 0, 0, 2, 0, 2, 1, 0, 1, - 0, 3, 0, 0, 1, 21, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 2, 0, 5, 2, 2, 24, 3, - 1, 0, 2, 0, 1, 1, 1, 1, - 1, 1, 0, 0, 0, 0, 5, 5, - 3, 0, 0, 2, 0, 1, 0, 3, - 1, 0, 2, 15, 0, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 0, 2, - 1, 1, 0, 3, 1, 0, 7, 7, - 1, 1, 0, 1, 0, 0, 0, 32, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 0, 1, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 0, 0, 4, - 0, 0, 1, 0, 1, 0, 6, 0, - 0, 0, 0, 0, 1, 5, 0, 0, - 0, 0, 32, 0, 1, 0, 1, 0, - 2, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 4, 0, - 2, 0, 7, 1, 0, 0, 1, 1, - 2, 1, 1, 5, 0, 24, 0, 24, - 0, 0, 23, 0, 0, 1, 0, 2, - 0, 0, 0, 28, 0, 3, 23, 2, - 0, 2, 2, 3, 2, 2, 2, 0, - 54, 54, 27, 1, 0, 20, 1, 1, - 2, 0, 1, 1, 1, 1, 1, 2, - 2, 0, 5, 5, 3, 0, 0, 2, - 2, 2, 2, 0, 14, 0, 3, 2, - 2, 3, 2, 2, 2, 54, 54, 27, - 1, 0, 2, 0, 1, 5, 6, 1, - 1, 0, 1, 0, 1, 1, 0, 1, - 0, 1, 0, 32, 1, 0, 1, 0, - 7, 2, 0, 4, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 1, - 0, 0, 1, 3, 0, 1, 1, 2, - 1, 1, 5, 0, 0, 0, 0, 1, - 1, 0, 1, 0, 1, 0, 0, 1, - 26, 0, 0, 0, 1, 1, 1, 0, - 0, 2, 1, 0, 1, 1, 0, 2, - 0, 0, 2, 0, 2, 1, 0, 1, - 0, 3, 0, 0, 1, 21, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 2, 0, 5, 2, 2, 24, 3, - 1, 0, 2, 0, 1, 1, 1, 1, - 1, 1, 0, 0, 0, 0, 5, 5, - 3, 0, 0, 2, 0, 1, 0, 3, - 1, 0, 2, 15, 0, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 0, 2, - 1, 1, 0, 3, 1, 0, 7, 7, - 1, 1, 0, 1, 0, 0, 0, 32, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 0, 1, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 0, 0, 4, - 0, 0, 1, 0, 1, 0, 6, 0, - 0, 0, 0, 0, 1, 5, 0, 0, - 0, 0, 32, 0, 1, 0, 1, 0, - 2, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 4, 0, - 2, 0, 7, 1, 0, 24, 0, 24, - 0, 0, 23, 0, 0, 1, 0, 2, - 0, 0, 0, 28, 0, 3, 23, 2, - 0, 2, 2, 3, 2, 2, 2, 0, - 54, 54, 27, 1, 1, 20, 3, 0, - 0, 0, 1, 1, 0, 1, 0, 1, - 0, 0, 1, 26, 0, 0, 0, 1, - 1, 1, 0, 0, 2, 1, 0, 1, - 1, 0, 2, 0, 0, 2, 0, 2, - 1, 0, 1, 0, 3, 0, 0, 1, - 21, 0, 0, 3, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 3, 0, 0, - 0, 0, 0, 0, 2, 0, 5, 0, - 0, 0, 1, 0, 2, 0, 0, 15, - 0, 0, 0, 3, 0, 0, 0, 0, - 0, 0, 0, 2, 1, 1, 0, 3, - 1, 0, 7, 7, 1, 1, 0, 1, - 0, 0, 0, 32, 0, 0, 0, 0, - 1, 0, 0, 1, 0, 0, 1, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 1, 0, 0, 0, 1, - 1, 0, 0, 4, 0, 0, 1, 0, - 1, 0, 6, 0, 0, 0, 0, 0, - 1, 5, 0, 0, 0, 0, 1, 0, - 1, 4, 0, 0, 0, 1, 2, 0, - 1, 1, 1, 1, 1, 2, 2, 0, - 5, 5, 3, 0, 0, 2, 2, 2, - 2, 0, 14, 0, 3, 2, 2, 3, - 2, 2, 2, 54, 54, 27, 1, 0, - 2, 1, 1, 5, 6, 1, 1, 0, - 1, 0, 1, 1, 0, 1, 0, 1, - 0, 32, 1, 0, 1, 0, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 1, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 2, 0, 5, 2, 2, - 24, 3, 1, 0, 2, 0, 1, 1, - 1, 1, 1, 1, 0, 0, 0, 0, - 5, 5, 3, 0, 0, 2, 0, 1, - 0, 3, 1, 0, 2, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 7, 1, 1, 0, 1, 0, 0, - 0, 32, 0, 0, 0, 0, 1, 0, - 0, 1, 0, 0, 1, 0, 1, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 1, 0, 0, 0, 1, 1, 0, - 0, 4, 0, 0, 1, 0, 1, 0, - 6, 0, 0, 0, 0, 0, 1, 5, - 0, 0, 0, 0, 32, 0, 1, 0, - 1, 0, 2, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - 4, 0, 2, 0, 7, 1, 0, 7, - 2, 0, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 1, 0, - 0, 1, 5, 0, 0, 0, 0, 0, - 18, 20, 20, 21, 15, 20, 20, 21, - 23, 21, 21, 21, 20, 23, 20, 21, - 21, 21, 21, 20, 20, 20, 21, -} - -var _graphclust_range_lengths []byte = []byte{ - 0, 0, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 6, 6, 3, - 2, 5, 1, 3, 2, 3, 5, 3, - 3, 1, 3, 1, 1, 1, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 3, - 2, 0, 1, 1, 1, 0, 1, 0, - 1, 1, 0, 2, 1, 1, 1, 2, - 3, 1, 1, 2, 2, 1, 1, 3, - 2, 2, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 1, 1, 0, 2, - 1, 2, 2, 1, 2, 2, 1, 1, - 3, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 1, 2, 3, 1, 1, - 2, 2, 3, 1, 3, 1, 0, 1, - 1, 1, 2, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 1, 0, 1, 0, 1, 1, 1, 0, - 1, 2, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 6, 6, 3, - 2, 5, 1, 3, 2, 3, 5, 3, - 3, 1, 3, 1, 1, 1, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 3, - 2, 0, 0, 0, 0, 0, 0, 1, - 0, 2, 1, 0, 2, 0, 1, 1, - 3, 1, 2, 1, 3, 2, 1, 1, - 2, 0, 1, 0, 1, 0, 1, 1, - 0, 0, 2, 1, 1, 1, 2, 3, - 1, 1, 2, 2, 1, 1, 3, 2, - 2, 0, 0, 2, 0, 0, 0, 0, - 1, 4, 1, 1, 1, 0, 2, 1, - 2, 2, 1, 2, 2, 1, 1, 3, - 6, 1, 1, 1, 1, 2, 2, 1, - 1, 1, 1, 2, 3, 1, 1, 2, - 2, 3, 1, 3, 1, 0, 1, 1, - 1, 2, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 1, - 4, 0, 1, 2, 2, 1, 3, 1, - 1, 1, 1, 1, 1, 1, 2, 2, - 2, 2, 1, 0, 1, 1, 0, 1, - 0, 0, 1, 2, 1, 1, 1, 1, - 2, 1, 1, 4, 1, 1, 1, 1, - 2, 4, 1, 2, 1, 2, 2, 6, - 6, 3, 2, 5, 1, 3, 2, 3, - 5, 3, 3, 1, 3, 1, 1, 1, - 1, 2, 1, 4, 0, 0, 2, 3, - 1, 1, 2, 2, 1, 2, 1, 1, - 2, 1, 2, 1, 2, 2, 2, 1, - 1, 3, 2, 0, 1, 1, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 3, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 1, 1, - 0, 2, 1, 2, 2, 1, 2, 2, - 1, 1, 3, 6, 1, 1, 1, 1, - 2, 2, 1, 1, 1, 1, 2, 3, - 1, 1, 2, 2, 3, 1, 3, 1, - 0, 1, 1, 1, 2, 0, 1, 0, - 3, 3, 1, 2, 2, 2, 0, 5, - 1, 1, 1, 0, 1, 0, 1, 1, - 1, 0, 1, 2, 1, 1, 1, 1, - 2, 1, 1, 4, 1, 1, 1, 1, - 2, 4, 1, 2, 1, 2, 2, 6, - 6, 3, 2, 5, 1, 3, 2, 3, - 5, 3, 3, 1, 3, 1, 1, 1, - 1, 2, 1, 4, 0, 0, 2, 3, - 1, 1, 2, 2, 1, 2, 1, 1, - 2, 1, 2, 1, 2, 2, 2, 1, - 1, 3, 2, 0, 0, 0, 0, 0, - 0, 1, 0, 2, 1, 0, 2, 0, - 1, 1, 3, 1, 2, 1, 3, 2, - 1, 1, 2, 0, 1, 0, 1, 0, - 1, 1, 0, 0, 2, 1, 1, 1, - 2, 3, 1, 1, 2, 2, 1, 1, - 3, 2, 2, 0, 0, 2, 0, 0, - 0, 0, 1, 4, 1, 1, 1, 0, - 2, 1, 2, 2, 1, 2, 2, 1, - 1, 3, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 1, 2, 3, 1, - 1, 2, 2, 3, 1, 3, 1, 0, - 1, 1, 1, 2, 0, 1, 0, 3, - 3, 1, 2, 2, 2, 0, 5, 1, - 1, 1, 4, 0, 1, 2, 2, 1, - 3, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 1, 0, 1, 1, - 0, 1, 0, 0, 1, 3, 1, 1, - 1, 1, 1, 1, 1, 0, 1, 0, - 1, 1, 0, 1, 1, 0, 1, 0, - 1, 3, 1, 2, 2, 1, 0, 0, - 1, 0, 0, 0, 0, 0, 1, 0, - 1, 1, 2, 2, 2, 1, 4, 2, - 1, 5, 3, 1, 5, 1, 3, 2, - 1, 3, 6, 5, 3, 3, 5, 1, - 1, 1, 1, 1, 3, 3, 1, 0, - 0, 0, 0, 0, 1, 1, 1, 3, - 2, 4, 1, 1, 2, 1, 1, 1, - 1, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 2, 5, 3, 4, 4, 2, - 0, 0, 1, 3, 2, 2, 2, 2, - 2, 2, 2, 3, 5, 4, 2, 1, - 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 2, 1, 1, 4, 1, 1, 1, 1, - 2, 4, 1, 2, 1, 2, 2, 6, - 6, 3, 2, 5, 1, 3, 2, 3, - 5, 3, 3, 1, 3, 1, 1, 1, - 1, 2, 1, 4, 0, 0, 2, 3, - 1, 1, 2, 2, 1, 2, 1, 1, - 2, 1, 2, 1, 2, 2, 2, 1, - 1, 3, 2, 0, 0, 0, 0, 0, - 0, 1, 0, 2, 1, 0, 2, 0, - 1, 1, 3, 1, 2, 1, 3, 2, - 1, 1, 2, 0, 1, 0, 1, 0, - 1, 1, 0, 0, 2, 1, 1, 1, - 2, 3, 1, 1, 2, 2, 1, 1, - 3, 2, 2, 0, 0, 2, 0, 0, - 0, 0, 1, 4, 1, 1, 1, 0, - 2, 1, 2, 2, 1, 2, 2, 1, - 1, 3, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 1, 2, 3, 1, - 1, 2, 2, 3, 1, 3, 1, 0, - 1, 1, 1, 2, 0, 1, 0, 3, - 3, 1, 2, 2, 2, 0, 5, 1, - 1, 1, 4, 0, 1, 2, 2, 1, - 3, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 1, 0, 1, 1, - 0, 1, 0, 0, 1, 0, 1, 0, - 1, 1, 0, 1, 1, 0, 1, 0, - 1, 3, 1, 2, 2, 1, 0, 0, - 1, 0, 0, 0, 0, 0, 1, 0, - 1, 1, 2, 2, 1, 1, 5, 1, - 1, 1, 1, 2, 1, 1, 4, 1, - 1, 1, 1, 2, 4, 1, 2, 1, - 2, 2, 6, 6, 3, 2, 5, 1, - 3, 2, 3, 5, 3, 3, 1, 3, - 1, 1, 1, 1, 2, 1, 4, 0, - 0, 2, 3, 1, 1, 2, 2, 1, - 2, 1, 1, 2, 1, 2, 1, 2, - 2, 2, 1, 1, 3, 2, 0, 1, - 1, 1, 0, 1, 0, 1, 1, 0, - 2, 1, 1, 1, 2, 3, 1, 1, - 2, 2, 1, 1, 3, 2, 2, 0, - 0, 2, 0, 0, 0, 0, 1, 4, - 1, 1, 1, 0, 2, 1, 2, 2, - 1, 2, 2, 1, 1, 3, 6, 1, - 1, 1, 1, 2, 2, 1, 1, 1, - 1, 2, 3, 1, 1, 2, 2, 3, - 1, 3, 1, 0, 1, 1, 1, 2, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 1, 0, 1, - 0, 1, 1, 1, 0, 3, 1, 5, - 3, 1, 5, 1, 3, 2, 1, 3, - 6, 5, 3, 3, 5, 1, 1, 1, - 1, 1, 3, 3, 1, 0, 0, 0, - 0, 0, 1, 1, 1, 3, 2, 4, - 1, 1, 3, 1, 1, 1, 1, 3, - 1, 3, 1, 1, 3, 1, 3, 1, - 3, 5, 3, 4, 4, 2, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 6, 6, 3, 2, 5, 1, 3, - 2, 3, 5, 3, 3, 1, 3, 1, - 1, 1, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 3, 2, 0, 0, 0, - 0, 0, 0, 1, 0, 2, 1, 0, - 2, 0, 1, 1, 3, 1, 2, 1, - 3, 2, 1, 1, 2, 0, 1, 0, - 1, 0, 1, 1, 0, 0, 2, 1, - 1, 1, 2, 3, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 1, - 1, 0, 2, 1, 2, 2, 1, 2, - 2, 1, 1, 3, 6, 1, 1, 1, - 1, 2, 2, 1, 1, 1, 1, 2, - 3, 1, 1, 2, 2, 3, 1, 3, - 1, 0, 1, 1, 1, 2, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 1, 4, 0, 1, 2, - 2, 1, 3, 1, 1, 1, 1, 1, - 1, 1, 2, 2, 2, 2, 1, 0, - 1, 1, 0, 1, 0, 0, 1, 0, - 0, 1, 3, 2, 2, 2, 2, 2, - 2, 2, 3, 5, 4, 2, 1, 2, - 1, 1, 2, 2, 1, 1, 2, 0, - 6, 0, 0, 0, 4, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, -} - -var _graphclust_index_offsets []int16 = []int16{ - 0, 0, 2, 4, 6, 8, 11, 15, - 17, 20, 25, 28, 30, 32, 35, 64, - 69, 71, 74, 77, 81, 85, 92, 99, - 105, 109, 115, 118, 123, 126, 132, 138, - 142, 148, 150, 156, 159, 161, 164, 166, - 172, 174, 179, 181, 203, 206, 210, 215, - 217, 220, 223, 225, 228, 230, 233, 236, - 238, 244, 246, 249, 252, 255, 257, 259, - 265, 268, 274, 276, 278, 280, 282, 284, - 287, 289, 291, 307, 310, 312, 314, 319, - 322, 326, 328, 330, 333, 336, 338, 342, - 347, 351, 354, 358, 360, 363, 371, 379, - 381, 383, 385, 391, 393, 395, 397, 430, - 433, 435, 438, 441, 444, 447, 450, 453, - 455, 459, 467, 469, 472, 474, 476, 479, - 482, 484, 486, 488, 490, 494, 498, 501, - 503, 506, 509, 514, 517, 521, 523, 528, - 530, 532, 535, 538, 540, 542, 549, 553, - 557, 559, 562, 565, 569, 575, 581, 583, - 585, 587, 589, 591, 593, 599, 601, 603, - 604, 606, 612, 614, 616, 618, 621, 625, - 627, 630, 635, 638, 640, 642, 645, 674, - 679, 681, 684, 687, 691, 695, 702, 709, - 715, 719, 725, 728, 733, 736, 742, 748, - 752, 758, 760, 766, 769, 771, 774, 776, - 782, 784, 789, 791, 813, 816, 820, 825, - 827, 830, 833, 835, 838, 840, 843, 846, - 848, 854, 856, 859, 862, 865, 867, 869, - 875, 878, 884, 887, 890, 915, 919, 921, - 923, 926, 929, 932, 934, 938, 940, 943, - 946, 950, 952, 955, 957, 966, 974, 979, - 981, 984, 987, 989, 991, 993, 997, 1000, - 1002, 1005, 1021, 1024, 1026, 1028, 1033, 1036, - 1040, 1042, 1044, 1047, 1050, 1052, 1056, 1061, - 1065, 1068, 1072, 1074, 1077, 1085, 1093, 1095, - 1097, 1099, 1105, 1107, 1109, 1111, 1144, 1147, - 1149, 1152, 1155, 1158, 1161, 1164, 1167, 1169, - 1173, 1181, 1183, 1186, 1188, 1190, 1193, 1196, - 1198, 1200, 1202, 1204, 1208, 1212, 1215, 1217, - 1220, 1223, 1228, 1231, 1235, 1237, 1242, 1244, - 1246, 1249, 1252, 1254, 1256, 1263, 1267, 1271, - 1273, 1276, 1279, 1283, 1289, 1295, 1297, 1299, - 1301, 1338, 1339, 1342, 1345, 1349, 1351, 1357, - 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1374, - 1377, 1380, 1383, 1385, 1387, 1393, 1395, 1398, - 1400, 1408, 1410, 1412, 1416, 1418, 1420, 1422, - 1425, 1429, 1431, 1434, 1439, 1442, 1444, 1446, - 1449, 1478, 1483, 1485, 1488, 1491, 1495, 1499, - 1506, 1513, 1519, 1523, 1529, 1532, 1537, 1540, - 1546, 1552, 1556, 1562, 1564, 1570, 1573, 1575, - 1578, 1580, 1586, 1588, 1593, 1595, 1617, 1620, - 1624, 1629, 1631, 1634, 1637, 1639, 1642, 1644, - 1647, 1650, 1652, 1658, 1660, 1663, 1666, 1669, - 1671, 1673, 1679, 1682, 1688, 1690, 1692, 1694, - 1696, 1698, 1701, 1703, 1705, 1721, 1724, 1726, - 1728, 1733, 1736, 1740, 1742, 1744, 1747, 1750, - 1752, 1756, 1761, 1765, 1768, 1772, 1774, 1777, - 1785, 1793, 1795, 1797, 1799, 1805, 1807, 1809, - 1811, 1844, 1847, 1849, 1852, 1855, 1858, 1861, - 1864, 1867, 1869, 1873, 1881, 1883, 1886, 1888, - 1890, 1893, 1896, 1898, 1900, 1902, 1904, 1908, - 1912, 1915, 1917, 1920, 1923, 1928, 1931, 1935, - 1937, 1942, 1944, 1946, 1949, 1952, 1954, 1956, - 1963, 1967, 1971, 1973, 1976, 1979, 1983, 1989, - 1995, 1997, 1999, 2001, 2003, 2005, 2007, 2013, - 2015, 2017, 2018, 2020, 2025, 2027, 2029, 2031, - 2034, 2038, 2040, 2043, 2048, 2051, 2053, 2055, - 2058, 2087, 2092, 2094, 2097, 2100, 2104, 2108, - 2115, 2122, 2128, 2132, 2138, 2141, 2146, 2149, - 2155, 2161, 2165, 2171, 2173, 2179, 2182, 2184, - 2187, 2189, 2195, 2197, 2202, 2204, 2226, 2229, - 2233, 2238, 2240, 2243, 2246, 2248, 2251, 2253, - 2256, 2259, 2261, 2267, 2269, 2272, 2275, 2278, - 2280, 2282, 2288, 2291, 2297, 2300, 2303, 2328, - 2332, 2334, 2336, 2339, 2342, 2345, 2347, 2351, - 2353, 2356, 2359, 2363, 2365, 2368, 2370, 2379, - 2387, 2392, 2394, 2397, 2400, 2402, 2404, 2406, - 2410, 2413, 2415, 2418, 2434, 2437, 2439, 2441, - 2446, 2449, 2453, 2455, 2457, 2460, 2463, 2465, - 2469, 2474, 2478, 2481, 2485, 2487, 2490, 2498, - 2506, 2508, 2510, 2512, 2518, 2520, 2522, 2524, - 2557, 2560, 2562, 2565, 2568, 2571, 2574, 2577, - 2580, 2582, 2586, 2594, 2596, 2599, 2601, 2603, - 2606, 2609, 2611, 2613, 2615, 2617, 2621, 2625, - 2628, 2630, 2633, 2636, 2641, 2644, 2648, 2650, - 2655, 2657, 2659, 2662, 2665, 2667, 2669, 2676, - 2680, 2684, 2686, 2689, 2692, 2696, 2702, 2708, - 2710, 2712, 2714, 2751, 2752, 2755, 2758, 2762, - 2764, 2770, 2772, 2774, 2776, 2778, 2780, 2782, - 2784, 2787, 2790, 2793, 2796, 2798, 2800, 2806, - 2808, 2811, 2813, 2821, 2823, 2825, 2829, 2832, - 2835, 2839, 2842, 2845, 2852, 2854, 2879, 2881, - 2906, 2908, 2910, 2934, 2936, 2938, 2940, 2942, - 2945, 2947, 2951, 2953, 2984, 2987, 2992, 3016, - 3019, 3021, 3024, 3027, 3031, 3034, 3037, 3041, - 3042, 3098, 3154, 3184, 3188, 3191, 3213, 3219, - 3223, 3227, 3233, 3238, 3241, 3248, 3251, 3256, - 3261, 3265, 3269, 3281, 3292, 3299, 3303, 3309, - 3313, 3317, 3321, 3325, 3327, 3345, 3349, 3354, - 3357, 3360, 3364, 3367, 3370, 3374, 3430, 3486, - 3517, 3521, 3526, 3530, 3532, 3536, 3543, 3551, - 3554, 3557, 3561, 3564, 3568, 3571, 3574, 3577, - 3580, 3583, 3586, 3589, 3627, 3632, 3637, 3643, - 3646, 3654, 3657, 3659, 3667, 3670, 3673, 3676, - 3679, 3682, 3685, 3688, 3692, 3698, 3703, 3707, - 3710, 3713, 3715, 3718, 3723, 3725, 3728, 3731, - 3735, 3738, 3741, 3748, 3750, 3752, 3754, 3756, - 3759, 3763, 3765, 3768, 3773, 3776, 3778, 3780, - 3783, 3812, 3817, 3819, 3822, 3825, 3829, 3833, - 3840, 3847, 3853, 3857, 3863, 3866, 3871, 3874, - 3880, 3886, 3890, 3896, 3898, 3904, 3907, 3909, - 3912, 3914, 3920, 3922, 3927, 3929, 3951, 3954, - 3958, 3963, 3965, 3968, 3971, 3973, 3976, 3978, - 3981, 3984, 3986, 3992, 3994, 3997, 4000, 4003, - 4005, 4007, 4013, 4016, 4022, 4025, 4028, 4053, - 4057, 4059, 4061, 4064, 4067, 4070, 4072, 4076, - 4078, 4081, 4084, 4088, 4090, 4093, 4095, 4104, - 4112, 4117, 4119, 4122, 4125, 4127, 4129, 4131, - 4135, 4138, 4140, 4143, 4159, 4162, 4164, 4166, - 4171, 4174, 4178, 4180, 4182, 4185, 4188, 4190, - 4194, 4199, 4203, 4206, 4210, 4212, 4215, 4223, - 4231, 4233, 4235, 4237, 4243, 4245, 4247, 4249, - 4282, 4285, 4287, 4290, 4293, 4296, 4299, 4302, - 4305, 4307, 4311, 4319, 4321, 4324, 4326, 4328, - 4331, 4334, 4336, 4338, 4340, 4342, 4346, 4350, - 4353, 4355, 4358, 4361, 4366, 4369, 4373, 4375, - 4380, 4382, 4384, 4387, 4390, 4392, 4394, 4401, - 4405, 4409, 4411, 4414, 4417, 4421, 4427, 4433, - 4435, 4437, 4439, 4476, 4477, 4480, 4483, 4487, - 4489, 4495, 4497, 4499, 4501, 4503, 4505, 4507, - 4509, 4512, 4515, 4518, 4521, 4523, 4525, 4531, - 4533, 4536, 4538, 4546, 4548, 4550, 4575, 4577, - 4602, 4604, 4606, 4630, 4632, 4634, 4636, 4638, - 4641, 4643, 4647, 4649, 4680, 4683, 4688, 4712, - 4715, 4717, 4720, 4723, 4727, 4730, 4733, 4737, - 4738, 4794, 4850, 4880, 4884, 4887, 4909, 4918, - 4920, 4922, 4924, 4927, 4931, 4933, 4936, 4941, - 4944, 4946, 4948, 4951, 4980, 4985, 4987, 4990, - 4993, 4997, 5001, 5008, 5015, 5021, 5025, 5031, - 5034, 5039, 5042, 5048, 5054, 5058, 5064, 5066, - 5072, 5075, 5077, 5080, 5082, 5088, 5090, 5095, - 5097, 5119, 5122, 5126, 5131, 5133, 5136, 5139, - 5141, 5144, 5146, 5149, 5152, 5154, 5160, 5162, - 5165, 5168, 5171, 5173, 5175, 5181, 5184, 5190, - 5192, 5194, 5196, 5198, 5200, 5203, 5205, 5207, - 5223, 5226, 5228, 5230, 5235, 5238, 5242, 5244, - 5246, 5249, 5252, 5254, 5258, 5263, 5267, 5270, - 5274, 5276, 5279, 5287, 5295, 5297, 5299, 5301, - 5307, 5309, 5311, 5313, 5346, 5349, 5351, 5354, - 5357, 5360, 5363, 5366, 5369, 5371, 5375, 5383, - 5385, 5388, 5390, 5392, 5395, 5398, 5400, 5402, - 5404, 5406, 5410, 5414, 5417, 5419, 5422, 5425, - 5430, 5433, 5437, 5439, 5444, 5446, 5448, 5451, - 5454, 5456, 5458, 5465, 5469, 5473, 5475, 5478, - 5481, 5485, 5491, 5497, 5499, 5501, 5503, 5505, - 5507, 5509, 5515, 5517, 5519, 5520, 5525, 5529, - 5535, 5540, 5543, 5550, 5553, 5558, 5563, 5567, - 5571, 5583, 5594, 5601, 5605, 5611, 5615, 5619, - 5623, 5627, 5629, 5647, 5651, 5656, 5659, 5662, - 5666, 5669, 5672, 5676, 5732, 5788, 5819, 5823, - 5828, 5832, 5835, 5840, 5847, 5855, 5858, 5861, - 5865, 5868, 5872, 5875, 5878, 5882, 5885, 5889, - 5892, 5896, 5934, 5939, 5944, 5950, 5953, 5955, - 5957, 5959, 5962, 5966, 5968, 5971, 5976, 5979, - 5981, 5983, 5986, 6015, 6020, 6022, 6025, 6028, - 6032, 6036, 6043, 6050, 6056, 6060, 6066, 6069, - 6074, 6077, 6083, 6089, 6093, 6099, 6101, 6107, - 6110, 6112, 6115, 6117, 6123, 6125, 6130, 6132, - 6154, 6157, 6161, 6166, 6168, 6171, 6174, 6176, - 6179, 6181, 6184, 6187, 6189, 6195, 6197, 6200, - 6203, 6206, 6208, 6210, 6216, 6219, 6225, 6228, - 6231, 6256, 6260, 6262, 6264, 6267, 6270, 6273, - 6275, 6279, 6281, 6284, 6287, 6291, 6293, 6296, - 6298, 6307, 6315, 6320, 6322, 6325, 6328, 6330, - 6332, 6334, 6338, 6341, 6343, 6346, 6362, 6365, - 6367, 6369, 6374, 6377, 6381, 6383, 6385, 6388, - 6391, 6393, 6397, 6402, 6406, 6409, 6413, 6415, - 6418, 6426, 6434, 6436, 6438, 6440, 6446, 6448, - 6450, 6452, 6485, 6488, 6490, 6493, 6496, 6499, - 6502, 6505, 6508, 6510, 6514, 6522, 6524, 6527, - 6529, 6531, 6534, 6537, 6539, 6541, 6543, 6545, - 6549, 6553, 6556, 6558, 6561, 6564, 6569, 6572, - 6576, 6578, 6583, 6585, 6587, 6590, 6593, 6595, - 6597, 6604, 6608, 6612, 6614, 6617, 6620, 6624, - 6630, 6636, 6638, 6640, 6642, 6679, 6680, 6683, - 6686, 6690, 6692, 6698, 6700, 6702, 6704, 6706, - 6708, 6710, 6712, 6715, 6718, 6721, 6724, 6726, - 6728, 6734, 6736, 6739, 6741, 6749, 6751, 6753, - 6761, 6764, 6766, 6774, 6777, 6780, 6783, 6786, - 6789, 6792, 6795, 6799, 6805, 6810, 6814, 6817, - 6820, 6822, 6825, 6833, 6836, 6838, 6840, 6843, - 6844, 6869, 6890, 6911, 6933, 6953, 6974, 6995, - 7017, 7041, 7063, 7085, 7107, 7128, 7152, 7173, - 7195, 7217, 7239, 7261, 7282, 7303, 7324, -} - -var _graphclust_indicies []int16 = []int16{ - 0, 1, 2, 3, 2, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 2, 2, 3, 3, 2, - 3, 3, 2, 4, 5, 6, 7, 8, - 10, 11, 12, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 9, 13, 2, - 3, 3, 3, 3, 2, 3, 2, 3, - 3, 2, 2, 2, 3, 2, 2, 2, - 3, 3, 3, 3, 2, 2, 2, 2, - 2, 2, 2, 3, 2, 2, 2, 2, - 2, 2, 3, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 2, 3, 3, 3, - 3, 3, 2, 3, 3, 2, 3, 3, - 3, 3, 2, 3, 3, 2, 2, 2, - 2, 2, 2, 3, 3, 3, 3, 3, - 3, 2, 3, 3, 3, 2, 2, 2, - 2, 2, 2, 3, 3, 2, 3, 3, - 3, 3, 3, 2, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 2, 3, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 3, 3, 2, 3, 2, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, - 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 2, 3, 3, 2, 3, 3, - 3, 2, 3, 3, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 3, 2, 3, - 2, 2, 2, 3, 3, 2, 3, 3, - 2, 3, 3, 2, 3, 2, 3, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 2, 2, 3, 3, 3, 2, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 3, 2, 53, 54, 55, 56, - 57, 2, 3, 2, 3, 2, 3, 2, - 3, 2, 3, 2, 58, 59, 2, 3, - 2, 3, 2, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, - 73, 74, 2, 3, 3, 2, 3, 2, - 3, 2, 3, 3, 3, 3, 2, 3, - 3, 2, 2, 2, 2, 3, 3, 2, - 3, 2, 3, 3, 2, 2, 2, 3, - 3, 2, 3, 3, 3, 2, 3, 3, - 3, 3, 2, 3, 3, 3, 2, 3, - 3, 2, 75, 76, 61, 2, 3, 2, - 3, 3, 2, 77, 78, 79, 80, 81, - 82, 83, 2, 84, 85, 86, 87, 88, - 89, 90, 2, 3, 2, 3, 2, 3, - 2, 3, 3, 3, 3, 3, 2, 3, - 2, 3, 2, 3, 2, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 104, 108, - 109, 110, 111, 112, 113, 114, 115, 116, - 117, 118, 119, 120, 121, 2, 3, 3, - 2, 2, 3, 2, 2, 3, 3, 3, - 2, 3, 3, 2, 3, 3, 2, 2, - 2, 3, 3, 3, 2, 3, 2, 3, - 3, 3, 2, 3, 3, 3, 3, 3, - 3, 3, 2, 3, 2, 3, 3, 2, - 3, 2, 2, 3, 3, 3, 2, 2, - 2, 3, 2, 3, 3, 2, 3, 2, - 3, 2, 3, 3, 3, 2, 3, 3, - 3, 2, 3, 3, 2, 3, 2, 3, - 3, 2, 3, 3, 2, 3, 3, 3, - 3, 2, 2, 2, 3, 3, 3, 3, - 2, 3, 2, 122, 123, 124, 125, 2, - 3, 2, 3, 2, 3, 3, 2, 2, - 2, 3, 126, 2, 3, 2, 127, 128, - 129, 130, 131, 132, 2, 3, 3, 3, - 2, 2, 2, 2, 3, 3, 2, 3, - 3, 2, 2, 2, 3, 3, 3, 3, - 2, 133, 123, 134, 135, 136, 2, 3, - 3, 3, 3, 3, 2, 3, 2, 3, - 2, 3, 2, 137, 2, 3, 2, 138, - 2, 139, 140, 141, 143, 142, 2, 3, - 2, 2, 3, 3, 3, 1, 145, 144, - 145, 144, 3, 1, 145, 146, 147, 145, - 145, 147, 145, 145, 147, 145, 145, 145, - 147, 145, 147, 145, 145, 147, 145, 145, - 145, 145, 147, 145, 145, 147, 147, 145, - 145, 147, 145, 145, 147, 148, 149, 150, - 151, 152, 154, 155, 156, 158, 159, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 153, - 157, 147, 145, 145, 145, 145, 147, 145, - 147, 145, 145, 147, 147, 147, 145, 147, - 147, 147, 145, 145, 145, 145, 147, 147, - 147, 147, 147, 147, 147, 145, 147, 147, - 147, 147, 147, 147, 145, 147, 147, 147, - 147, 147, 145, 145, 145, 145, 147, 145, - 145, 145, 145, 145, 147, 145, 145, 147, - 145, 145, 145, 145, 147, 145, 145, 147, - 147, 147, 147, 147, 147, 145, 145, 145, - 145, 145, 145, 147, 145, 145, 145, 147, - 147, 147, 147, 147, 147, 145, 145, 147, - 145, 145, 145, 145, 145, 147, 145, 145, - 147, 145, 147, 145, 145, 147, 145, 147, - 145, 145, 145, 145, 145, 147, 145, 147, - 145, 145, 145, 145, 147, 145, 147, 176, - 177, 178, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 192, - 193, 194, 195, 196, 147, 145, 145, 147, - 145, 145, 145, 147, 145, 145, 145, 145, - 147, 145, 147, 145, 145, 147, 145, 145, - 147, 145, 147, 147, 147, 145, 145, 147, - 145, 145, 147, 145, 145, 147, 145, 147, - 145, 145, 145, 145, 145, 147, 145, 147, - 145, 145, 147, 147, 147, 145, 145, 145, - 147, 145, 147, 145, 147, 145, 145, 145, - 145, 145, 147, 145, 145, 147, 197, 198, - 199, 200, 201, 147, 145, 202, 147, 145, - 145, 147, 203, 204, 198, 205, 206, 207, - 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 222, 199, - 200, 201, 147, 145, 202, 145, 147, 145, - 147, 145, 147, 145, 145, 147, 145, 145, - 147, 145, 145, 147, 145, 147, 145, 145, - 145, 147, 145, 147, 145, 145, 147, 145, - 145, 147, 145, 145, 145, 147, 146, 145, - 145, 145, 147, 145, 146, 145, 145, 145, - 145, 145, 145, 145, 145, 147, 145, 145, - 145, 145, 145, 145, 145, 147, 145, 145, - 145, 145, 147, 145, 147, 145, 145, 147, - 145, 145, 147, 145, 147, 145, 147, 145, - 147, 223, 224, 225, 147, 145, 145, 147, - 145, 147, 145, 145, 147, 226, 227, 228, - 229, 230, 231, 232, 233, 234, 235, 236, - 237, 238, 239, 240, 147, 145, 145, 147, - 145, 147, 145, 147, 145, 145, 145, 145, - 147, 145, 145, 147, 147, 147, 147, 145, - 145, 147, 145, 147, 145, 145, 147, 147, - 147, 145, 145, 147, 145, 145, 145, 147, - 145, 145, 145, 145, 147, 145, 145, 145, - 147, 145, 145, 147, 241, 242, 227, 147, - 145, 147, 145, 145, 147, 243, 244, 245, - 246, 247, 248, 249, 147, 250, 251, 252, - 253, 254, 255, 256, 147, 145, 147, 145, - 147, 145, 147, 145, 145, 145, 145, 145, - 147, 145, 147, 145, 147, 145, 147, 257, - 258, 259, 260, 261, 262, 263, 264, 265, - 266, 267, 268, 269, 270, 271, 272, 273, - 270, 274, 275, 276, 277, 278, 279, 280, - 281, 282, 283, 284, 285, 286, 287, 147, - 145, 145, 147, 147, 145, 147, 147, 145, - 145, 145, 147, 145, 145, 147, 145, 145, - 147, 147, 147, 145, 145, 145, 147, 145, - 147, 145, 145, 145, 147, 145, 145, 145, - 145, 145, 145, 145, 147, 145, 147, 145, - 145, 147, 145, 147, 147, 145, 145, 145, - 147, 147, 147, 145, 147, 145, 145, 147, - 145, 147, 145, 147, 145, 145, 145, 147, - 145, 145, 145, 147, 145, 145, 147, 145, - 147, 145, 145, 147, 145, 145, 147, 145, - 145, 145, 145, 147, 147, 147, 145, 145, - 145, 145, 147, 145, 147, 288, 289, 290, - 291, 147, 145, 147, 145, 147, 145, 145, - 147, 147, 147, 145, 292, 147, 145, 147, - 293, 294, 295, 296, 297, 298, 147, 145, - 145, 145, 147, 147, 147, 147, 145, 145, - 147, 145, 145, 147, 147, 147, 145, 145, - 145, 145, 147, 299, 289, 300, 301, 302, - 147, 145, 145, 145, 145, 145, 147, 145, - 147, 145, 147, 145, 147, 304, 214, 216, - 305, 306, 307, 308, 309, 310, 304, 214, - 214, 214, 216, 304, 214, 311, 312, 304, - 214, 313, 214, 314, 315, 316, 317, 318, - 214, 319, 320, 214, 321, 303, 216, 303, - 304, 147, 145, 145, 145, 147, 145, 145, - 147, 145, 145, 145, 147, 147, 145, 145, - 145, 145, 145, 145, 147, 145, 147, 145, - 147, 145, 147, 147, 145, 145, 147, 145, - 147, 145, 147, 145, 145, 147, 145, 145, - 147, 145, 145, 147, 145, 145, 147, 147, - 145, 322, 147, 323, 214, 303, 324, 304, - 147, 145, 147, 325, 224, 147, 145, 147, - 243, 244, 245, 246, 247, 248, 326, 147, - 327, 147, 145, 147, 144, 328, 3, 1, - 330, 329, 329, 330, 330, 329, 330, 330, - 329, 330, 330, 330, 329, 330, 329, 330, - 330, 329, 330, 330, 330, 330, 329, 330, - 330, 329, 329, 330, 330, 329, 330, 330, - 329, 331, 332, 333, 334, 335, 337, 338, - 339, 341, 342, 343, 344, 345, 346, 347, - 348, 349, 350, 351, 352, 353, 354, 355, - 356, 357, 358, 336, 340, 329, 330, 330, - 330, 330, 329, 330, 329, 330, 330, 329, - 329, 329, 330, 329, 329, 329, 330, 330, - 330, 330, 329, 329, 329, 329, 329, 329, - 329, 330, 329, 329, 329, 329, 329, 329, - 330, 329, 329, 329, 329, 329, 330, 330, - 330, 330, 329, 330, 330, 330, 330, 330, - 329, 330, 330, 329, 330, 330, 330, 330, - 329, 330, 330, 329, 329, 329, 329, 329, - 329, 330, 330, 330, 330, 330, 330, 329, - 330, 330, 330, 329, 329, 329, 329, 329, - 329, 330, 330, 329, 330, 330, 330, 330, - 330, 329, 330, 330, 329, 330, 329, 330, - 330, 329, 330, 329, 330, 330, 330, 330, - 330, 329, 330, 329, 330, 330, 330, 330, - 329, 330, 329, 359, 360, 361, 362, 363, - 364, 365, 366, 367, 368, 369, 370, 371, - 372, 373, 374, 375, 376, 377, 378, 379, - 329, 330, 330, 329, 330, 330, 330, 329, - 330, 330, 330, 330, 329, 330, 329, 330, - 330, 329, 330, 330, 329, 330, 329, 329, - 329, 330, 330, 329, 330, 330, 329, 330, - 330, 329, 330, 329, 330, 330, 330, 330, - 330, 329, 330, 329, 330, 330, 329, 329, - 329, 330, 330, 330, 329, 330, 329, 330, - 329, 330, 330, 330, 330, 330, 329, 330, - 330, 329, 380, 381, 382, 383, 384, 329, - 330, 329, 330, 329, 330, 329, 330, 329, - 330, 329, 385, 386, 329, 330, 329, 330, - 329, 387, 388, 389, 390, 391, 392, 393, - 394, 395, 396, 397, 398, 399, 400, 401, - 329, 330, 330, 329, 330, 329, 330, 329, - 330, 330, 330, 330, 329, 330, 330, 329, - 329, 329, 329, 330, 330, 329, 330, 329, - 330, 330, 329, 329, 329, 330, 330, 329, - 330, 330, 330, 329, 330, 330, 330, 330, - 329, 330, 330, 330, 329, 330, 330, 329, - 402, 403, 388, 329, 330, 329, 330, 330, - 329, 404, 405, 406, 407, 408, 409, 410, - 329, 411, 412, 413, 414, 415, 416, 417, - 329, 330, 329, 330, 329, 330, 329, 330, - 330, 330, 330, 330, 329, 330, 329, 330, - 329, 330, 329, 418, 419, 420, 421, 422, - 423, 424, 425, 426, 427, 428, 429, 430, - 431, 432, 433, 434, 431, 435, 436, 437, - 438, 439, 440, 441, 442, 443, 444, 445, - 446, 447, 448, 329, 330, 330, 329, 329, - 330, 329, 329, 330, 330, 330, 329, 330, - 330, 329, 330, 330, 329, 329, 329, 330, - 330, 330, 329, 330, 329, 330, 330, 330, - 329, 330, 330, 330, 330, 330, 330, 330, - 329, 330, 329, 330, 330, 329, 330, 329, - 329, 330, 330, 330, 329, 329, 329, 330, - 329, 330, 330, 329, 330, 329, 330, 329, - 330, 330, 330, 329, 330, 330, 330, 329, - 330, 330, 329, 330, 329, 330, 330, 329, - 330, 330, 329, 330, 330, 330, 330, 329, - 329, 329, 330, 330, 330, 330, 329, 330, - 329, 449, 450, 451, 452, 329, 330, 329, - 330, 329, 330, 330, 329, 329, 329, 330, - 453, 329, 330, 329, 454, 455, 456, 457, - 458, 459, 329, 330, 330, 330, 329, 329, - 329, 329, 330, 330, 329, 330, 330, 329, - 329, 329, 330, 330, 330, 330, 329, 460, - 450, 461, 462, 463, 329, 330, 330, 330, - 330, 330, 329, 330, 329, 330, 329, 330, - 329, 464, 329, 330, 329, 465, 329, 466, - 467, 468, 470, 469, 329, 330, 329, 329, - 330, 330, 330, 329, 471, 471, 330, 330, - 329, 471, 329, 329, 471, 471, 329, 471, - 471, 329, 471, 471, 471, 329, 471, 329, - 471, 471, 329, 471, 471, 471, 471, 329, - 471, 471, 329, 329, 471, 471, 329, 471, - 471, 329, 472, 473, 474, 475, 476, 478, - 479, 480, 482, 483, 484, 485, 486, 487, - 488, 489, 490, 491, 492, 493, 494, 495, - 496, 497, 498, 499, 477, 481, 329, 471, - 471, 471, 471, 329, 471, 329, 471, 471, - 329, 329, 329, 471, 329, 329, 329, 471, - 471, 471, 471, 329, 329, 329, 329, 329, - 329, 329, 471, 329, 329, 329, 329, 329, - 329, 471, 329, 329, 329, 329, 329, 471, - 471, 471, 471, 329, 471, 471, 471, 471, - 471, 329, 471, 471, 329, 471, 471, 471, - 471, 329, 471, 471, 329, 329, 329, 329, - 329, 329, 471, 471, 471, 471, 471, 471, - 329, 471, 471, 471, 329, 329, 329, 329, - 329, 329, 471, 471, 329, 471, 471, 471, - 471, 471, 329, 471, 471, 329, 471, 329, - 471, 471, 329, 471, 329, 471, 471, 471, - 471, 471, 329, 471, 329, 471, 471, 471, - 471, 329, 471, 329, 500, 501, 502, 503, - 504, 505, 506, 507, 508, 509, 510, 511, - 512, 513, 514, 515, 516, 517, 518, 519, - 520, 329, 471, 471, 329, 471, 471, 471, - 329, 471, 471, 471, 471, 329, 471, 329, - 471, 471, 329, 471, 471, 329, 471, 329, - 329, 329, 471, 471, 329, 471, 471, 329, - 471, 471, 329, 471, 329, 471, 471, 471, - 471, 471, 329, 471, 329, 471, 471, 329, - 329, 329, 471, 471, 471, 329, 471, 329, - 471, 329, 471, 471, 471, 471, 471, 329, - 471, 471, 329, 521, 522, 523, 524, 525, - 329, 471, 526, 329, 471, 471, 329, 527, - 528, 522, 529, 530, 531, 532, 533, 534, - 535, 536, 537, 538, 539, 540, 541, 542, - 543, 544, 545, 546, 523, 524, 525, 329, - 471, 526, 471, 329, 471, 329, 471, 329, - 471, 471, 329, 471, 471, 329, 471, 471, - 329, 471, 329, 471, 471, 471, 329, 471, - 329, 471, 471, 329, 471, 471, 329, 471, - 471, 471, 329, 329, 471, 471, 471, 329, - 471, 329, 471, 471, 471, 471, 471, 471, - 471, 471, 329, 471, 471, 471, 471, 471, - 471, 471, 329, 471, 471, 471, 471, 329, - 471, 329, 471, 471, 329, 471, 471, 329, - 471, 329, 471, 329, 471, 329, 547, 548, - 549, 329, 471, 471, 329, 471, 329, 471, - 471, 329, 550, 551, 552, 553, 554, 555, - 556, 557, 558, 559, 560, 561, 562, 563, - 564, 329, 471, 471, 329, 471, 329, 471, - 329, 471, 471, 471, 471, 329, 471, 471, - 329, 329, 329, 329, 471, 471, 329, 471, - 329, 471, 471, 329, 329, 329, 471, 471, - 329, 471, 471, 471, 329, 471, 471, 471, - 471, 329, 471, 471, 471, 329, 471, 471, - 329, 565, 566, 551, 329, 471, 329, 471, - 471, 329, 567, 568, 569, 570, 571, 572, - 573, 329, 574, 575, 576, 577, 578, 579, - 580, 329, 471, 329, 471, 329, 471, 329, - 471, 471, 471, 471, 471, 329, 471, 329, - 471, 329, 471, 329, 581, 582, 583, 584, - 585, 586, 587, 588, 589, 590, 591, 592, - 593, 594, 595, 596, 597, 594, 598, 599, - 600, 601, 602, 603, 604, 605, 606, 607, - 608, 609, 610, 611, 329, 471, 471, 329, - 329, 471, 329, 329, 471, 471, 471, 329, - 471, 471, 329, 471, 471, 329, 329, 329, - 471, 471, 471, 329, 471, 329, 471, 471, - 471, 329, 471, 471, 471, 471, 471, 471, - 471, 329, 471, 329, 471, 471, 329, 471, - 329, 329, 471, 471, 471, 329, 329, 329, - 471, 329, 471, 471, 329, 471, 329, 471, - 329, 471, 471, 471, 329, 471, 471, 471, - 329, 471, 471, 329, 471, 329, 471, 471, - 329, 471, 471, 329, 471, 471, 471, 471, - 329, 329, 329, 471, 471, 471, 471, 329, - 471, 329, 612, 613, 614, 615, 329, 471, - 329, 471, 329, 471, 471, 329, 329, 329, - 471, 616, 329, 471, 329, 617, 618, 619, - 620, 621, 622, 329, 471, 471, 471, 329, - 329, 329, 329, 471, 471, 329, 471, 471, - 329, 329, 329, 471, 471, 471, 471, 329, - 623, 613, 624, 625, 626, 329, 471, 471, - 471, 471, 471, 329, 471, 329, 471, 329, - 471, 329, 628, 538, 540, 629, 630, 631, - 632, 633, 634, 628, 538, 538, 538, 540, - 628, 538, 635, 636, 628, 538, 637, 538, - 638, 639, 640, 641, 642, 538, 643, 644, - 538, 645, 627, 540, 627, 628, 329, 471, - 471, 471, 329, 471, 471, 329, 471, 471, - 471, 329, 329, 471, 471, 471, 471, 471, - 471, 329, 471, 329, 471, 329, 471, 329, - 329, 471, 471, 329, 471, 329, 471, 329, - 471, 471, 329, 471, 471, 329, 471, 471, - 329, 471, 471, 329, 329, 471, 646, 329, - 647, 538, 627, 648, 628, 329, 471, 329, - 649, 548, 329, 471, 329, 567, 568, 569, - 570, 571, 572, 650, 329, 651, 329, 471, - 329, 328, 330, 330, 329, 328, 330, 329, - 328, 330, 329, 653, 654, 652, 329, 328, - 330, 329, 328, 330, 329, 655, 656, 657, - 658, 659, 652, 329, 660, 329, 500, 501, - 502, 655, 656, 661, 503, 504, 505, 506, - 507, 508, 509, 510, 511, 512, 513, 514, - 515, 516, 517, 518, 519, 520, 329, 662, - 660, 500, 501, 502, 663, 657, 658, 503, - 504, 505, 506, 507, 508, 509, 510, 511, - 512, 513, 514, 515, 516, 517, 518, 519, - 520, 329, 662, 329, 664, 662, 500, 501, - 502, 665, 658, 503, 504, 505, 506, 507, - 508, 509, 510, 511, 512, 513, 514, 515, - 516, 517, 518, 519, 520, 329, 664, 329, - 329, 664, 666, 329, 664, 329, 667, 668, - 329, 662, 329, 329, 664, 329, 662, 329, - 662, 550, 551, 552, 553, 554, 555, 556, - 669, 558, 559, 560, 561, 562, 563, 564, - 671, 672, 673, 674, 675, 676, 671, 672, - 673, 674, 675, 676, 671, 670, 677, 329, - 471, 660, 329, 678, 678, 678, 664, 329, - 500, 501, 502, 663, 661, 503, 504, 505, - 506, 507, 508, 509, 510, 511, 512, 513, - 514, 515, 516, 517, 518, 519, 520, 329, - 667, 679, 329, 329, 662, 678, 678, 664, - 678, 678, 664, 678, 678, 678, 664, 678, - 678, 664, 678, 678, 664, 678, 678, 329, - 664, 664, 673, 674, 675, 676, 670, 671, - 673, 674, 675, 676, 670, 671, 673, 674, - 675, 676, 670, 671, 673, 674, 675, 676, - 670, 671, 673, 674, 675, 676, 670, 671, - 673, 674, 675, 676, 670, 671, 673, 674, - 675, 676, 670, 671, 673, 674, 675, 676, - 670, 671, 673, 674, 675, 676, 670, 671, - 672, 677, 674, 675, 676, 670, 671, 672, - 674, 675, 676, 670, 671, 672, 674, 675, - 676, 670, 671, 672, 674, 675, 676, 670, - 671, 672, 674, 675, 676, 670, 671, 672, - 674, 675, 676, 670, 671, 672, 674, 675, - 676, 670, 671, 672, 674, 675, 676, 670, - 671, 672, 674, 675, 676, 670, 671, 672, - 673, 677, 675, 676, 670, 671, 672, 673, - 675, 676, 670, 671, 672, 673, 675, 676, - 670, 671, 672, 673, 675, 676, 670, 671, - 672, 673, 675, 680, 679, 674, 329, 677, - 678, 329, 662, 664, 330, 330, 329, 681, - 682, 683, 684, 685, 686, 687, 688, 689, - 690, 691, 538, 692, 540, 693, 694, 695, - 696, 697, 698, 652, 329, 471, 330, 330, - 330, 330, 329, 471, 330, 330, 329, 471, - 471, 330, 329, 330, 471, 330, 471, 330, - 329, 471, 330, 471, 330, 329, 471, 330, - 329, 471, 330, 471, 330, 471, 330, 329, - 471, 330, 329, 471, 330, 471, 330, 329, - 471, 330, 330, 471, 329, 330, 330, 471, - 329, 471, 330, 471, 329, 330, 330, 471, - 471, 471, 471, 330, 471, 330, 471, 330, - 329, 471, 471, 471, 471, 330, 330, 471, - 330, 471, 330, 329, 471, 471, 471, 330, - 471, 330, 329, 330, 471, 330, 329, 330, - 471, 330, 471, 330, 329, 471, 471, 330, - 329, 699, 700, 652, 329, 471, 471, 330, - 329, 471, 471, 330, 329, 652, 329, 701, - 703, 704, 705, 706, 707, 708, 703, 704, - 705, 706, 707, 708, 703, 652, 702, 677, - 329, 330, 660, 330, 329, 662, 662, 662, - 664, 329, 662, 662, 664, 662, 662, 664, - 662, 662, 662, 664, 662, 662, 664, 662, - 662, 664, 662, 662, 329, 664, 705, 706, - 707, 708, 702, 703, 705, 706, 707, 708, - 702, 703, 705, 706, 707, 708, 702, 703, - 705, 706, 707, 708, 702, 703, 705, 706, - 707, 708, 702, 703, 705, 706, 707, 708, - 702, 703, 705, 706, 707, 708, 702, 703, - 705, 706, 707, 708, 702, 703, 705, 706, - 707, 708, 702, 703, 704, 677, 706, 707, - 708, 702, 703, 704, 706, 707, 708, 702, - 703, 704, 706, 707, 708, 702, 703, 704, - 706, 707, 708, 702, 703, 704, 706, 707, - 708, 702, 703, 704, 706, 707, 708, 702, - 703, 704, 706, 707, 708, 702, 703, 704, - 706, 707, 708, 702, 703, 704, 706, 707, - 708, 702, 703, 704, 705, 677, 707, 708, - 702, 703, 704, 705, 707, 708, 702, 703, - 704, 705, 707, 708, 702, 703, 704, 705, - 707, 708, 702, 703, 704, 705, 707, 709, - 710, 706, 652, 329, 677, 662, 330, 662, - 664, 330, 664, 330, 329, 662, 711, 712, - 652, 329, 330, 329, 330, 330, 330, 329, - 714, 715, 716, 717, 718, 713, 329, 719, - 720, 721, 722, 723, 724, 652, 329, 328, - 330, 329, 328, 330, 329, 330, 328, 330, - 329, 328, 330, 329, 330, 328, 330, 329, - 328, 330, 329, 725, 652, 329, 330, 330, - 329, 726, 652, 329, 330, 330, 329, 727, - 652, 329, 330, 330, 329, 628, 538, 540, - 728, 729, 730, 731, 732, 733, 628, 538, - 538, 734, 540, 628, 538, 735, 736, 628, - 538, 737, 652, 738, 652, 739, 740, 741, - 742, 743, 744, 538, 745, 627, 538, 540, - 627, 628, 329, 471, 330, 471, 330, 329, - 330, 471, 330, 471, 329, 471, 330, 471, - 330, 471, 329, 746, 329, 471, 567, 568, - 569, 570, 571, 572, 747, 329, 748, 651, - 329, 471, 329, 330, 471, 471, 330, 471, - 330, 471, 329, 330, 471, 329, 330, 329, - 471, 471, 330, 329, 330, 471, 329, 330, - 329, 471, 330, 471, 329, 330, 471, 329, - 330, 471, 330, 329, 330, 471, 330, 471, - 330, 329, 330, 471, 330, 471, 329, 330, - 330, 471, 329, 330, 471, 329, 330, 329, - 471, 713, 329, 749, 713, 329, 384, 652, - 750, 652, 329, 330, 329, 328, 3, 1, - 328, 3, 1, 752, 753, 751, 1, 328, - 3, 1, 328, 3, 1, 754, 755, 756, - 757, 758, 751, 1, 759, 146, 760, 761, - 760, 761, 761, 760, 761, 761, 760, 761, - 761, 761, 760, 761, 760, 761, 761, 760, - 761, 761, 761, 761, 760, 761, 761, 760, - 760, 761, 761, 760, 761, 761, 760, 762, - 763, 764, 765, 766, 768, 769, 770, 772, - 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 785, 786, 787, 788, - 789, 767, 771, 760, 761, 761, 761, 761, - 760, 761, 760, 761, 761, 760, 760, 760, - 761, 760, 760, 760, 761, 761, 761, 761, - 760, 760, 760, 760, 760, 760, 760, 761, - 760, 760, 760, 760, 760, 760, 761, 760, - 760, 760, 760, 760, 761, 761, 761, 761, - 760, 761, 761, 761, 761, 761, 760, 761, - 761, 760, 761, 761, 761, 761, 760, 761, - 761, 760, 760, 760, 760, 760, 760, 761, - 761, 761, 761, 761, 761, 760, 761, 761, - 761, 760, 760, 760, 760, 760, 760, 761, - 761, 760, 761, 761, 761, 761, 761, 760, - 761, 761, 760, 761, 760, 761, 761, 760, - 761, 760, 761, 761, 761, 761, 761, 760, - 761, 760, 761, 761, 761, 761, 760, 761, - 760, 790, 791, 792, 793, 794, 795, 796, - 797, 798, 799, 800, 801, 802, 803, 804, - 805, 806, 807, 808, 809, 810, 760, 761, - 761, 760, 761, 761, 761, 760, 761, 761, - 761, 761, 760, 761, 760, 761, 761, 760, - 761, 761, 760, 761, 760, 760, 760, 761, - 761, 760, 761, 761, 760, 761, 761, 760, - 761, 760, 761, 761, 761, 761, 761, 760, - 761, 760, 761, 761, 760, 760, 760, 761, - 761, 761, 760, 761, 760, 761, 760, 761, - 761, 761, 761, 761, 760, 761, 761, 760, - 811, 812, 813, 814, 815, 760, 761, 816, - 760, 761, 761, 760, 817, 818, 812, 819, - 820, 821, 822, 823, 824, 825, 826, 827, - 828, 829, 830, 831, 832, 833, 834, 835, - 836, 813, 814, 815, 760, 761, 816, 761, - 760, 761, 760, 761, 760, 761, 761, 760, - 761, 761, 760, 761, 761, 760, 761, 760, - 761, 761, 761, 760, 761, 760, 761, 761, - 760, 761, 761, 760, 761, 761, 761, 760, - 760, 761, 761, 761, 760, 761, 760, 761, - 761, 761, 761, 761, 761, 761, 761, 760, - 761, 761, 761, 761, 761, 761, 761, 760, - 761, 761, 761, 761, 760, 761, 760, 761, - 761, 760, 761, 761, 760, 761, 760, 761, - 760, 761, 760, 837, 838, 839, 760, 761, - 761, 760, 761, 760, 761, 761, 760, 840, - 841, 842, 843, 844, 845, 846, 847, 848, - 849, 850, 851, 852, 853, 854, 760, 761, - 761, 760, 761, 760, 761, 760, 761, 761, - 761, 761, 760, 761, 761, 760, 760, 760, - 760, 761, 761, 760, 761, 760, 761, 761, - 760, 760, 760, 761, 761, 760, 761, 761, - 761, 760, 761, 761, 761, 761, 760, 761, - 761, 761, 760, 761, 761, 760, 855, 856, - 841, 760, 761, 760, 761, 761, 760, 857, - 858, 859, 860, 861, 862, 863, 760, 864, - 865, 866, 867, 868, 869, 870, 760, 761, - 760, 761, 760, 761, 760, 761, 761, 761, - 761, 761, 760, 761, 760, 761, 760, 761, - 760, 871, 872, 873, 874, 875, 876, 877, - 878, 879, 880, 881, 882, 883, 884, 885, - 886, 887, 884, 888, 889, 890, 891, 892, - 893, 894, 895, 896, 897, 898, 899, 900, - 901, 760, 761, 761, 760, 760, 761, 760, - 760, 761, 761, 761, 760, 761, 761, 760, - 761, 761, 760, 760, 760, 761, 761, 761, - 760, 761, 760, 761, 761, 761, 760, 761, - 761, 761, 761, 761, 761, 761, 760, 761, - 760, 761, 761, 760, 761, 760, 760, 761, - 761, 761, 760, 760, 760, 761, 760, 761, - 761, 760, 761, 760, 761, 760, 761, 761, - 761, 760, 761, 761, 761, 760, 761, 761, - 760, 761, 760, 761, 761, 760, 761, 761, - 760, 761, 761, 761, 761, 760, 760, 760, - 761, 761, 761, 761, 760, 761, 760, 902, - 903, 904, 905, 760, 761, 760, 761, 760, - 761, 761, 760, 760, 760, 761, 906, 760, - 761, 760, 907, 908, 909, 910, 911, 912, - 760, 761, 761, 761, 760, 760, 760, 760, - 761, 761, 760, 761, 761, 760, 760, 760, - 761, 761, 761, 761, 760, 913, 903, 914, - 915, 916, 760, 761, 761, 761, 761, 761, - 760, 761, 760, 761, 760, 761, 760, 918, - 828, 830, 919, 920, 921, 922, 923, 924, - 918, 828, 828, 828, 830, 918, 828, 925, - 926, 918, 828, 927, 828, 928, 929, 930, - 931, 932, 828, 933, 934, 828, 935, 917, - 830, 917, 918, 760, 761, 761, 761, 760, - 761, 761, 760, 761, 761, 761, 760, 760, - 761, 761, 761, 761, 761, 761, 760, 761, - 760, 761, 760, 761, 760, 760, 761, 761, - 760, 761, 760, 761, 760, 761, 761, 760, - 761, 761, 760, 761, 761, 760, 761, 761, - 760, 760, 761, 936, 760, 937, 828, 917, - 938, 918, 760, 761, 760, 939, 838, 760, - 761, 760, 857, 858, 859, 860, 861, 862, - 940, 760, 941, 760, 761, 760, 790, 791, - 792, 754, 755, 942, 793, 794, 795, 796, - 797, 798, 799, 800, 801, 802, 803, 804, - 805, 806, 807, 808, 809, 810, 760, 943, - 759, 790, 791, 792, 944, 756, 757, 793, - 794, 795, 796, 797, 798, 799, 800, 801, - 802, 803, 804, 805, 806, 807, 808, 809, - 810, 760, 943, 760, 945, 943, 790, 791, - 792, 946, 757, 793, 794, 795, 796, 797, - 798, 799, 800, 801, 802, 803, 804, 805, - 806, 807, 808, 809, 810, 760, 945, 760, - 146, 945, 947, 760, 945, 760, 948, 949, - 760, 943, 760, 760, 945, 760, 943, 760, - 943, 840, 841, 842, 843, 844, 845, 846, - 950, 848, 849, 850, 851, 852, 853, 854, - 952, 953, 954, 955, 956, 957, 952, 953, - 954, 955, 956, 957, 952, 951, 958, 760, - 761, 759, 760, 959, 959, 959, 945, 760, - 790, 791, 792, 944, 942, 793, 794, 795, - 796, 797, 798, 799, 800, 801, 802, 803, - 804, 805, 806, 807, 808, 809, 810, 760, - 948, 960, 760, 760, 943, 959, 959, 945, - 959, 959, 945, 959, 959, 959, 945, 959, - 959, 945, 959, 959, 945, 959, 959, 760, - 945, 945, 954, 955, 956, 957, 951, 952, - 954, 955, 956, 957, 951, 952, 954, 955, - 956, 957, 951, 952, 954, 955, 956, 957, - 951, 952, 954, 955, 956, 957, 951, 952, - 954, 955, 956, 957, 951, 952, 954, 955, - 956, 957, 951, 952, 954, 955, 956, 957, - 951, 952, 954, 955, 956, 957, 951, 952, - 953, 958, 955, 956, 957, 951, 952, 953, - 955, 956, 957, 951, 952, 953, 955, 956, - 957, 951, 952, 953, 955, 956, 957, 951, - 952, 953, 955, 956, 957, 951, 952, 953, - 955, 956, 957, 951, 952, 953, 955, 956, - 957, 951, 952, 953, 955, 956, 957, 951, - 952, 953, 955, 956, 957, 951, 952, 953, - 954, 958, 956, 957, 951, 952, 953, 954, - 956, 957, 951, 952, 953, 954, 956, 957, - 951, 952, 953, 954, 956, 957, 951, 952, - 953, 954, 956, 961, 960, 955, 760, 958, - 959, 760, 943, 945, 144, 3, 1, 962, - 963, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 214, 973, 216, 974, 975, 976, - 977, 978, 979, 751, 1, 144, 980, 145, - 3, 144, 3, 144, 3, 1, 980, 981, - 981, 980, 980, 981, 980, 980, 981, 980, - 980, 980, 981, 980, 981, 980, 980, 981, - 980, 980, 980, 980, 981, 980, 980, 981, - 981, 980, 980, 981, 980, 980, 981, 982, - 983, 984, 985, 986, 988, 989, 990, 992, - 993, 994, 995, 996, 997, 998, 999, 1000, - 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, - 1009, 987, 991, 981, 980, 980, 980, 980, - 981, 980, 981, 980, 980, 981, 981, 981, - 980, 981, 981, 981, 980, 980, 980, 980, - 981, 981, 981, 981, 981, 981, 981, 980, - 981, 981, 981, 981, 981, 981, 980, 981, - 981, 981, 981, 981, 980, 980, 980, 980, - 981, 980, 980, 980, 980, 980, 981, 980, - 980, 981, 980, 980, 980, 980, 981, 980, - 980, 981, 981, 981, 981, 981, 981, 980, - 980, 980, 980, 980, 980, 981, 980, 980, - 980, 981, 981, 981, 981, 981, 981, 980, - 980, 981, 980, 980, 980, 980, 980, 981, - 980, 980, 981, 980, 981, 980, 980, 981, - 980, 981, 980, 980, 980, 980, 980, 981, - 980, 981, 980, 980, 980, 980, 981, 980, - 981, 1010, 1011, 1012, 1013, 1014, 1015, 1016, - 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, - 1025, 1026, 1027, 1028, 1029, 1030, 981, 980, - 980, 981, 980, 980, 980, 981, 980, 980, - 980, 980, 981, 980, 981, 980, 980, 981, - 980, 980, 981, 980, 981, 981, 981, 980, - 980, 981, 980, 980, 981, 980, 980, 981, - 980, 981, 980, 980, 980, 980, 980, 981, - 980, 981, 980, 980, 981, 981, 981, 980, - 980, 980, 981, 980, 981, 980, 981, 980, - 980, 980, 980, 980, 981, 980, 980, 981, - 1031, 1032, 1033, 1034, 1035, 981, 980, 981, - 980, 981, 980, 981, 980, 981, 980, 981, - 1036, 1037, 981, 980, 981, 980, 981, 1038, - 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, - 1047, 1048, 1049, 1050, 1051, 1052, 981, 980, - 980, 981, 980, 981, 980, 981, 980, 980, - 980, 980, 981, 980, 980, 981, 981, 981, - 981, 980, 980, 981, 980, 981, 980, 980, - 981, 981, 981, 980, 980, 981, 980, 980, - 980, 981, 980, 980, 980, 980, 981, 980, - 980, 980, 981, 980, 980, 981, 1053, 1054, - 1039, 981, 980, 981, 980, 980, 981, 1055, - 1056, 1057, 1058, 1059, 1060, 1061, 981, 1062, - 1063, 1064, 1065, 1066, 1067, 1068, 981, 980, - 981, 980, 981, 980, 981, 980, 980, 980, - 980, 980, 981, 980, 981, 980, 981, 980, - 981, 1069, 1070, 1071, 1072, 1073, 1074, 1075, - 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, - 1084, 1085, 1082, 1086, 1087, 1088, 1089, 1090, - 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, - 1099, 981, 980, 980, 981, 981, 980, 981, - 981, 980, 980, 980, 981, 980, 980, 981, - 980, 980, 981, 981, 981, 980, 980, 980, - 981, 980, 981, 980, 980, 980, 981, 980, - 980, 980, 980, 980, 980, 980, 981, 980, - 981, 980, 980, 981, 980, 981, 981, 980, - 980, 980, 981, 981, 981, 980, 981, 980, - 980, 981, 980, 981, 980, 981, 980, 980, - 980, 981, 980, 980, 980, 981, 980, 980, - 981, 980, 981, 980, 980, 981, 980, 980, - 981, 980, 980, 980, 980, 981, 981, 981, - 980, 980, 980, 980, 981, 980, 981, 1100, - 1101, 1102, 1103, 981, 980, 981, 980, 981, - 980, 980, 981, 981, 981, 980, 1104, 981, - 980, 981, 1105, 1106, 1107, 1108, 1109, 1110, - 981, 980, 980, 980, 981, 981, 981, 981, - 980, 980, 981, 980, 980, 981, 981, 981, - 980, 980, 980, 980, 981, 1111, 1101, 1112, - 1113, 1114, 981, 980, 980, 980, 980, 980, - 981, 980, 981, 980, 981, 980, 981, 1115, - 981, 980, 981, 1116, 981, 1117, 1118, 1119, - 1121, 1120, 981, 980, 981, 981, 980, 980, - 145, 3, 144, 3, 1, 145, 145, 3, - 1, 3, 145, 3, 145, 3, 1, 145, - 3, 145, 3, 1, 145, 3, 1, 145, - 3, 145, 3, 145, 3, 1, 145, 3, - 1, 145, 3, 145, 3, 1, 145, 3, - 3, 145, 1, 3, 3, 145, 1, 145, - 3, 145, 1, 3, 3, 145, 145, 145, - 145, 3, 145, 3, 145, 3, 1, 145, - 145, 145, 145, 3, 3, 145, 3, 145, - 3, 1, 145, 145, 145, 3, 145, 3, - 1, 3, 145, 3, 1, 3, 145, 3, - 145, 3, 1, 145, 145, 3, 1, 1122, - 1123, 751, 1, 145, 145, 3, 1, 145, - 145, 3, 1, 751, 1, 1124, 1126, 1127, - 1128, 1129, 1130, 1131, 1126, 1127, 1128, 1129, - 1130, 1131, 1126, 751, 1125, 958, 1, 3, - 759, 3, 1, 943, 943, 943, 945, 1, - 943, 943, 945, 943, 943, 945, 943, 943, - 943, 945, 943, 943, 945, 943, 943, 945, - 943, 943, 1, 945, 1128, 1129, 1130, 1131, - 1125, 1126, 1128, 1129, 1130, 1131, 1125, 1126, - 1128, 1129, 1130, 1131, 1125, 1126, 1128, 1129, - 1130, 1131, 1125, 1126, 1128, 1129, 1130, 1131, - 1125, 1126, 1128, 1129, 1130, 1131, 1125, 1126, - 1128, 1129, 1130, 1131, 1125, 1126, 1128, 1129, - 1130, 1131, 1125, 1126, 1128, 1129, 1130, 1131, - 1125, 1126, 1127, 958, 1129, 1130, 1131, 1125, - 1126, 1127, 1129, 1130, 1131, 1125, 1126, 1127, - 1129, 1130, 1131, 1125, 1126, 1127, 1129, 1130, - 1131, 1125, 1126, 1127, 1129, 1130, 1131, 1125, - 1126, 1127, 1129, 1130, 1131, 1125, 1126, 1127, - 1129, 1130, 1131, 1125, 1126, 1127, 1129, 1130, - 1131, 1125, 1126, 1127, 1129, 1130, 1131, 1125, - 1126, 1127, 1128, 958, 1130, 1131, 1125, 1126, - 1127, 1128, 1130, 1131, 1125, 1126, 1127, 1128, - 1130, 1131, 1125, 1126, 1127, 1128, 1130, 1131, - 1125, 1126, 1127, 1128, 1130, 1132, 1133, 1129, - 751, 1, 958, 943, 3, 943, 945, 3, - 945, 3, 1, 943, 1134, 1135, 751, 1, - 144, 3, 1, 3, 3, 144, 3, 1, - 1137, 1138, 1139, 1140, 1141, 1136, 1, 1142, - 1143, 1144, 1145, 1146, 1147, 751, 1, 328, - 3, 1, 328, 3, 1, 3, 328, 3, - 1, 328, 3, 1, 3, 328, 3, 1, - 328, 3, 1, 1148, 751, 1, 3, 144, - 3, 1, 1149, 751, 1, 3, 144, 3, - 1, 1150, 751, 1, 3, 144, 3, 1, - 304, 214, 216, 1151, 1152, 1153, 1154, 1155, - 1156, 304, 214, 214, 1157, 216, 304, 214, - 1158, 1159, 304, 214, 1160, 751, 1161, 751, - 1162, 1163, 1164, 1165, 1166, 1167, 214, 1168, - 303, 214, 216, 303, 304, 1, 145, 3, - 145, 3, 1, 3, 145, 3, 145, 1, - 145, 3, 145, 3, 145, 1, 1169, 1, - 145, 1170, 1171, 1170, 1171, 1171, 1170, 1171, - 1171, 1170, 1171, 1171, 1171, 1170, 1171, 1170, - 1171, 1171, 1170, 1171, 1171, 1171, 1171, 1170, - 1171, 1171, 1170, 1170, 1171, 1171, 1170, 1171, - 1171, 1170, 1172, 1173, 1174, 1175, 1176, 1178, - 1179, 1180, 1182, 1183, 1184, 1185, 1186, 1187, - 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, - 1196, 1197, 1198, 1199, 1177, 1181, 1170, 1171, - 1171, 1171, 1171, 1170, 1171, 1170, 1171, 1171, - 1170, 1170, 1170, 1171, 1170, 1170, 1170, 1171, - 1171, 1171, 1171, 1170, 1170, 1170, 1170, 1170, - 1170, 1170, 1171, 1170, 1170, 1170, 1170, 1170, - 1170, 1171, 1170, 1170, 1170, 1170, 1170, 1171, - 1171, 1171, 1171, 1170, 1171, 1171, 1171, 1171, - 1171, 1170, 1171, 1171, 1170, 1171, 1171, 1171, - 1171, 1170, 1171, 1171, 1170, 1170, 1170, 1170, - 1170, 1170, 1171, 1171, 1171, 1171, 1171, 1171, - 1170, 1171, 1171, 1171, 1170, 1170, 1170, 1170, - 1170, 1170, 1171, 1171, 1170, 1171, 1171, 1171, - 1171, 1171, 1170, 1171, 1171, 1170, 1171, 1170, - 1171, 1171, 1170, 1171, 1170, 1171, 1171, 1171, - 1171, 1171, 1170, 1171, 1170, 1171, 1171, 1171, - 1171, 1170, 1171, 1170, 1200, 1201, 1202, 1203, - 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, - 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, - 1220, 1170, 1171, 1171, 1170, 1171, 1171, 1171, - 1170, 1171, 1171, 1171, 1171, 1170, 1171, 1170, - 1171, 1171, 1170, 1171, 1171, 1170, 1171, 1170, - 1170, 1170, 1171, 1171, 1170, 1171, 1171, 1170, - 1171, 1171, 1170, 1171, 1170, 1171, 1171, 1171, - 1171, 1171, 1170, 1171, 1170, 1171, 1171, 1170, - 1170, 1170, 1171, 1171, 1171, 1170, 1171, 1170, - 1171, 1170, 1171, 1171, 1171, 1171, 1171, 1170, - 1171, 1171, 1170, 1221, 1222, 1223, 1224, 1225, - 1170, 1171, 1226, 1170, 1171, 1171, 1170, 1227, - 1228, 1222, 1229, 1230, 1231, 1232, 1233, 1234, - 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, - 1243, 1244, 1245, 1246, 1223, 1224, 1225, 1170, - 1171, 1226, 1171, 1170, 1171, 1170, 1171, 1170, - 1171, 1171, 1170, 1171, 1171, 1170, 1171, 1171, - 1170, 1171, 1170, 1171, 1171, 1171, 1170, 1171, - 1170, 1171, 1171, 1170, 1171, 1171, 1170, 1171, - 1171, 1171, 1170, 1170, 1171, 1171, 1171, 1170, - 1171, 1170, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1170, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1170, 1171, 1171, 1171, 1171, 1170, - 1171, 1170, 1171, 1171, 1170, 1171, 1171, 1170, - 1171, 1170, 1171, 1170, 1171, 1170, 1247, 1248, - 1249, 1170, 1171, 1171, 1170, 1171, 1170, 1171, - 1171, 1170, 1250, 1251, 1252, 1253, 1254, 1255, - 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, - 1264, 1170, 1171, 1171, 1170, 1171, 1170, 1171, - 1170, 1171, 1171, 1171, 1171, 1170, 1171, 1171, - 1170, 1170, 1170, 1170, 1171, 1171, 1170, 1171, - 1170, 1171, 1171, 1170, 1170, 1170, 1171, 1171, - 1170, 1171, 1171, 1171, 1170, 1171, 1171, 1171, - 1171, 1170, 1171, 1171, 1171, 1170, 1171, 1171, - 1170, 1265, 1266, 1251, 1170, 1171, 1170, 1171, - 1171, 1170, 1267, 1268, 1269, 1270, 1271, 1272, - 1273, 1170, 1274, 1275, 1276, 1277, 1278, 1279, - 1280, 1170, 1171, 1170, 1171, 1170, 1171, 1170, - 1171, 1171, 1171, 1171, 1171, 1170, 1171, 1170, - 1171, 1170, 1171, 1170, 1281, 1282, 1283, 1284, - 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, - 1293, 1294, 1295, 1296, 1297, 1294, 1298, 1299, - 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, - 1308, 1309, 1310, 1311, 1170, 1171, 1171, 1170, - 1170, 1171, 1170, 1170, 1171, 1171, 1171, 1170, - 1171, 1171, 1170, 1171, 1171, 1170, 1170, 1170, - 1171, 1171, 1171, 1170, 1171, 1170, 1171, 1171, - 1171, 1170, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1170, 1171, 1170, 1171, 1171, 1170, 1171, - 1170, 1170, 1171, 1171, 1171, 1170, 1170, 1170, - 1171, 1170, 1171, 1171, 1170, 1171, 1170, 1171, - 1170, 1171, 1171, 1171, 1170, 1171, 1171, 1171, - 1170, 1171, 1171, 1170, 1171, 1170, 1171, 1171, - 1170, 1171, 1171, 1170, 1171, 1171, 1171, 1171, - 1170, 1170, 1170, 1171, 1171, 1171, 1171, 1170, - 1171, 1170, 1312, 1313, 1314, 1315, 1170, 1171, - 1170, 1171, 1170, 1171, 1171, 1170, 1170, 1170, - 1171, 1316, 1170, 1171, 1170, 1317, 1318, 1319, - 1320, 1321, 1322, 1170, 1171, 1171, 1171, 1170, - 1170, 1170, 1170, 1171, 1171, 1170, 1171, 1171, - 1170, 1170, 1170, 1171, 1171, 1171, 1171, 1170, - 1323, 1313, 1324, 1325, 1326, 1170, 1171, 1171, - 1171, 1171, 1171, 1170, 1171, 1170, 1171, 1170, - 1171, 1170, 1328, 1238, 1240, 1329, 1330, 1331, - 1332, 1333, 1334, 1328, 1238, 1238, 1238, 1240, - 1328, 1238, 1335, 1336, 1328, 1238, 1337, 1238, - 1338, 1339, 1340, 1341, 1342, 1238, 1343, 1344, - 1238, 1345, 1327, 1240, 1327, 1328, 1170, 1171, - 1171, 1171, 1170, 1171, 1171, 1170, 1171, 1171, - 1171, 1170, 1170, 1171, 1171, 1171, 1171, 1171, - 1171, 1170, 1171, 1170, 1171, 1170, 1171, 1170, - 1170, 1171, 1171, 1170, 1171, 1170, 1171, 1170, - 1171, 1171, 1170, 1171, 1171, 1170, 1171, 1171, - 1170, 1171, 1171, 1170, 1170, 1171, 1346, 1170, - 1347, 1238, 1327, 1348, 1328, 1170, 1171, 1170, - 1349, 1248, 1170, 1171, 1170, 1267, 1268, 1269, - 1270, 1271, 1272, 1350, 1170, 1351, 1170, 1171, - 1170, 1267, 1268, 1269, 1270, 1271, 1272, 1352, - 1170, 1353, 1351, 1170, 1171, 1170, 3, 145, - 145, 3, 145, 3, 145, 1, 3, 145, - 1, 3, 1, 145, 145, 3, 1, 3, - 145, 1, 3, 1, 145, 3, 145, 1, - 3, 145, 1, 3, 145, 3, 1, 3, - 145, 3, 145, 3, 1, 3, 145, 3, - 145, 1, 3, 3, 145, 1, 3, 145, - 1, 3, 1, 145, 1136, 1, 1354, 1136, - 1, 1355, 1356, 1357, 1358, 1357, 751, 1359, - 1, 144, 3, 1, 1, 144, 1, 144, - 3, 144, 1, 144, 1, 1361, 1360, 1364, - 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1373, - 1374, 1375, 1376, 1377, 1378, 1380, 1360, 1, - 1363, 1372, 1379, 1, 1362, 141, 143, 1382, - 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, - 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, - 1399, 1381, 303, 324, 1401, 1402, 1403, 1404, - 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, - 1413, 1414, 1415, 1416, 1417, 1418, 1400, 1419, - 303, 324, 1401, 1402, 1403, 1404, 1405, 1406, - 1407, 1408, 1409, 1410, 1411, 1412, 1420, 1421, - 1415, 1416, 1422, 1418, 1400, 1424, 1425, 1426, - 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, - 1435, 1436, 1437, 1439, 330, 652, 713, 1438, - 1423, 468, 470, 1440, 1441, 1442, 1443, 1444, - 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, - 1453, 1454, 1455, 1456, 1457, 1423, 627, 648, - 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, - 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, - 1474, 1475, 1423, 1476, 627, 648, 1458, 1459, - 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, - 1468, 1469, 1477, 1478, 1472, 1473, 1479, 1475, - 1423, 627, 648, 1458, 1459, 1460, 1461, 1462, - 1463, 1464, 1465, 1466, 1467, 1468, 1480, 1470, - 1471, 1481, 1482, 1483, 1484, 1473, 1474, 1475, - 1423, 627, 648, 1458, 1459, 1460, 1461, 1462, - 1463, 1464, 1465, 1466, 1467, 1468, 1485, 1470, - 1471, 1472, 1486, 1473, 1474, 1475, 1423, 627, - 648, 1458, 1459, 1460, 1461, 1462, 1463, 1464, - 1465, 1466, 1467, 1468, 1487, 1470, 1471, 1472, - 1488, 1473, 1474, 1475, 1423, 627, 648, 1458, - 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, - 1467, 1468, 1489, 1470, 1471, 1472, 1490, 1473, - 1474, 1475, 1423, 627, 648, 1458, 1459, 1460, - 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, - 1469, 1470, 1471, 1472, 1473, 1491, 1475, 1423, - 917, 938, 1493, 1494, 1495, 1496, 1497, 1498, - 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, - 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1492, - 917, 938, 1493, 1494, 1495, 1496, 1497, 1498, - 1499, 1500, 1501, 1502, 1503, 1514, 1505, 1506, - 1515, 1511, 1512, 1513, 1492, 1516, 917, 938, - 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, - 1501, 1502, 1503, 1514, 1517, 1518, 1515, 1511, - 1519, 1513, 1492, 917, 938, 1493, 1494, 1495, - 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, - 1520, 1505, 1506, 1515, 1521, 1511, 1512, 1513, - 1492, 917, 938, 1493, 1494, 1495, 1496, 1497, - 1498, 1499, 1500, 1501, 1502, 1503, 1522, 1505, - 1506, 1515, 1523, 1511, 1512, 1513, 1492, 917, - 938, 1493, 1494, 1495, 1496, 1497, 1498, 1499, - 1500, 1501, 1502, 1503, 1524, 1505, 1506, 1515, - 1525, 1511, 1512, 1513, 1492, 1119, 1121, 1527, - 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, - 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, - 1544, 1526, 1327, 1348, 1546, 1547, 1548, 1549, - 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, - 1558, 1559, 1560, 1561, 1562, 1563, 1545, 1327, - 1348, 1546, 1547, 1548, 1549, 1550, 1551, 1552, - 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, - 1561, 1564, 1563, 1545, 1565, 1327, 1348, 1546, - 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, - 1555, 1556, 1557, 1566, 1567, 1560, 1561, 1568, - 1563, 1545, -} - -var _graphclust_trans_targs []int16 = []int16{ - 1528, 0, 1528, 1529, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 66, 67, 68, - 69, 70, 72, 73, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 91, 92, 94, 102, 134, - 139, 141, 148, 153, 95, 96, 97, 98, - 99, 100, 101, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, - 124, 125, 126, 127, 128, 129, 130, 131, - 132, 133, 135, 136, 137, 138, 140, 142, - 143, 144, 145, 146, 147, 149, 150, 151, - 152, 154, 156, 157, 158, 2, 159, 3, - 1528, 1530, 1528, 1528, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 192, 193, 194, - 195, 196, 197, 198, 199, 200, 201, 202, - 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 226, 231, 250, - 251, 252, 1531, 229, 230, 232, 233, 234, - 235, 236, 237, 238, 239, 240, 241, 242, - 243, 244, 245, 246, 247, 248, 249, 254, - 255, 256, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, - 272, 274, 275, 277, 285, 317, 322, 324, - 331, 336, 278, 279, 280, 281, 282, 283, - 284, 286, 287, 288, 289, 290, 291, 292, - 293, 294, 295, 296, 297, 298, 299, 300, - 301, 302, 303, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, - 318, 319, 320, 321, 323, 325, 326, 327, - 328, 329, 330, 332, 333, 334, 335, 162, - 337, 338, 339, 340, 341, 342, 343, 344, - 345, 346, 347, 348, 349, 350, 351, 352, - 353, 354, 356, 357, 163, 359, 361, 362, - 1532, 1528, 1533, 377, 378, 379, 380, 381, - 382, 383, 384, 385, 386, 387, 388, 389, - 390, 391, 392, 393, 394, 395, 396, 397, - 398, 399, 400, 401, 402, 403, 404, 406, - 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419, 420, 421, 422, - 423, 424, 425, 426, 428, 429, 430, 431, - 432, 434, 435, 437, 438, 439, 440, 441, - 442, 443, 444, 445, 446, 447, 448, 449, - 450, 451, 453, 454, 456, 464, 496, 501, - 503, 510, 515, 457, 458, 459, 460, 461, - 462, 463, 465, 466, 467, 468, 469, 470, - 471, 472, 473, 474, 475, 476, 477, 478, - 479, 480, 481, 482, 483, 484, 485, 486, - 487, 488, 489, 490, 491, 492, 493, 494, - 495, 497, 498, 499, 500, 502, 504, 505, - 506, 507, 508, 509, 511, 512, 513, 514, - 516, 518, 519, 520, 364, 521, 365, 1534, - 537, 538, 539, 540, 541, 542, 543, 544, - 545, 546, 547, 548, 549, 550, 551, 552, - 553, 554, 555, 556, 557, 558, 559, 560, - 561, 562, 563, 564, 566, 567, 568, 569, - 570, 571, 572, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, - 586, 588, 593, 612, 613, 614, 1535, 591, - 592, 594, 595, 596, 597, 598, 599, 600, - 601, 602, 603, 604, 605, 606, 607, 608, - 609, 610, 611, 616, 617, 618, 620, 621, - 622, 623, 624, 625, 626, 627, 628, 629, - 630, 631, 632, 633, 634, 636, 637, 639, - 647, 679, 684, 686, 693, 698, 640, 641, - 642, 643, 644, 645, 646, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, - 659, 660, 661, 662, 663, 664, 665, 666, - 667, 668, 669, 670, 671, 672, 673, 674, - 675, 676, 677, 678, 680, 681, 682, 683, - 685, 687, 688, 689, 690, 691, 692, 694, - 695, 696, 697, 524, 699, 700, 701, 702, - 703, 704, 705, 706, 707, 708, 709, 710, - 711, 712, 713, 714, 715, 716, 718, 719, - 525, 721, 723, 724, 522, 729, 730, 732, - 734, 737, 740, 764, 1536, 746, 1537, 736, - 1538, 739, 742, 744, 745, 748, 749, 753, - 754, 755, 756, 757, 758, 759, 1539, 752, - 763, 766, 767, 768, 769, 770, 771, 772, - 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 785, 786, 789, 790, 791, - 792, 793, 794, 795, 796, 800, 801, 803, - 804, 787, 806, 813, 815, 817, 819, 807, - 808, 809, 810, 811, 812, 814, 816, 818, - 820, 821, 822, 823, 827, 828, 829, 830, - 831, 832, 833, 834, 835, 836, 837, 838, - 839, 840, 1540, 825, 826, 843, 844, 160, - 848, 849, 851, 1054, 1057, 1060, 1084, 1541, - 1528, 1542, 865, 866, 867, 868, 869, 870, - 871, 872, 873, 874, 875, 876, 877, 878, - 879, 880, 881, 882, 883, 884, 885, 886, - 887, 888, 889, 890, 891, 892, 894, 895, - 896, 897, 898, 899, 900, 901, 902, 903, - 904, 905, 906, 907, 908, 909, 910, 911, - 912, 913, 914, 916, 921, 940, 941, 942, - 1543, 919, 920, 922, 923, 924, 925, 926, - 927, 928, 929, 930, 931, 932, 933, 934, - 935, 936, 937, 938, 939, 944, 945, 946, - 948, 949, 950, 951, 952, 953, 954, 955, - 956, 957, 958, 959, 960, 961, 962, 964, - 965, 967, 975, 1007, 1012, 1014, 1021, 1026, - 968, 969, 970, 971, 972, 973, 974, 976, - 977, 978, 979, 980, 981, 982, 983, 984, - 985, 986, 987, 988, 989, 990, 991, 992, - 993, 994, 995, 996, 997, 998, 999, 1000, - 1001, 1002, 1003, 1004, 1005, 1006, 1008, 1009, - 1010, 1011, 1013, 1015, 1016, 1017, 1018, 1019, - 1020, 1022, 1023, 1024, 1025, 852, 1027, 1028, - 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, - 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, - 1046, 1047, 853, 1049, 1051, 1052, 1066, 1544, - 1056, 1545, 1059, 1062, 1064, 1065, 1068, 1069, - 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1546, - 1072, 1083, 1086, 1245, 1246, 1247, 1248, 1249, - 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, - 1258, 1259, 1260, 1261, 1547, 1528, 1100, 1101, - 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, - 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, - 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, - 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, - 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, - 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1151, - 1152, 1153, 1154, 1155, 1157, 1158, 1160, 1161, - 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, - 1170, 1171, 1172, 1173, 1174, 1176, 1177, 1179, - 1187, 1219, 1224, 1226, 1233, 1238, 1180, 1181, - 1182, 1183, 1184, 1185, 1186, 1188, 1189, 1190, - 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, - 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, - 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, - 1215, 1216, 1217, 1218, 1220, 1221, 1222, 1223, - 1225, 1227, 1228, 1229, 1230, 1231, 1232, 1234, - 1235, 1236, 1237, 1239, 1241, 1242, 1243, 1087, - 1244, 1088, 1263, 1264, 1267, 1268, 1269, 1270, - 1271, 1272, 1273, 1274, 1278, 1279, 1281, 1282, - 1265, 1284, 1291, 1293, 1295, 1297, 1285, 1286, - 1287, 1288, 1289, 1290, 1292, 1294, 1296, 1298, - 1299, 1300, 1301, 1506, 1507, 1508, 1509, 1510, - 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, - 1519, 1548, 1528, 1549, 1315, 1316, 1317, 1318, - 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, - 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, - 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, - 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, - 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, - 1360, 1361, 1362, 1363, 1364, 1366, 1371, 1390, - 1391, 1392, 1550, 1369, 1370, 1372, 1373, 1374, - 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, - 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1394, - 1395, 1396, 1398, 1399, 1400, 1401, 1402, 1403, - 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, - 1412, 1414, 1415, 1417, 1425, 1457, 1462, 1464, - 1471, 1476, 1418, 1419, 1420, 1421, 1422, 1423, - 1424, 1426, 1427, 1428, 1429, 1430, 1431, 1432, - 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, - 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, - 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, - 1458, 1459, 1460, 1461, 1463, 1465, 1466, 1467, - 1468, 1469, 1470, 1472, 1473, 1474, 1475, 1302, - 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, - 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, - 1493, 1494, 1496, 1497, 1303, 1499, 1501, 1502, - 1504, 1505, 1522, 1523, 1524, 1525, 1526, 1527, - 1528, 1, 1529, 160, 161, 363, 845, 846, - 847, 850, 1085, 1262, 1265, 1266, 1275, 1276, - 1277, 1280, 1283, 1520, 1521, 1528, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, - 14, 43, 65, 71, 74, 90, 93, 155, - 1528, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 203, 225, 358, 257, - 273, 360, 355, 227, 228, 253, 276, 1528, - 523, 725, 726, 727, 728, 731, 765, 784, - 788, 797, 798, 799, 802, 805, 841, 842, - 366, 367, 368, 369, 370, 371, 372, 373, - 374, 375, 376, 405, 427, 433, 436, 452, - 455, 517, 526, 527, 528, 529, 530, 531, - 532, 533, 534, 535, 536, 565, 587, 720, - 619, 635, 722, 717, 589, 590, 615, 638, - 733, 747, 760, 761, 762, 735, 743, 738, - 741, 750, 751, 824, 1528, 854, 855, 856, - 857, 858, 859, 860, 861, 862, 863, 864, - 1053, 915, 1048, 1067, 1080, 1081, 1082, 963, - 1050, 1045, 893, 947, 917, 918, 943, 966, - 1055, 1063, 1058, 1061, 1070, 1071, 1528, 1089, - 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, - 1098, 1099, 1128, 1150, 1156, 1159, 1175, 1178, - 1240, 1528, 1304, 1305, 1306, 1307, 1308, 1309, - 1310, 1311, 1312, 1313, 1314, 1343, 1365, 1498, - 1397, 1413, 1503, 1495, 1500, 1367, 1368, 1393, - 1416, -} - -var _graphclust_trans_actions []byte = []byte{ - 31, 0, 27, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 34, 55, 29, 19, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 55, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 40, 25, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 40, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 40, 0, - 40, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 51, - 17, 40, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 40, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 51, - 0, 51, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 40, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 21, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 40, 23, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 43, 1, 47, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 15, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 7, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 13, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 5, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 9, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 11, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, -} - -var _graphclust_to_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 37, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, -} - -var _graphclust_from_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, -} - -var _graphclust_eof_trans []int16 = []int16{ - 0, 0, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 0, 0, 147, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 147, 148, 147, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 147, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 0, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 330, 330, 330, - 330, 330, 330, 330, 330, 0, 0, 0, - 0, 0, 0, 147, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 147, 761, - 761, 147, 761, 761, 147, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 761, - 761, 761, 761, 761, 761, 761, 761, 147, - 761, 761, 761, 761, 0, 0, 0, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 982, 982, 982, - 982, 982, 982, 982, 982, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 1171, 1171, 1171, 1171, 1171, 1171, - 1171, 1171, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1382, 1401, 1401, 1424, 1424, 1424, 1424, - 1424, 1424, 1424, 1424, 1424, 1493, 1493, 1493, - 1493, 1493, 1493, 1527, 1546, 1546, 1546, -} - -const graphclust_start int = 1528 -const graphclust_first_final int = 1528 -const graphclust_error int = 0 - -const graphclust_en_main int = 1528 - -//line grapheme_clusters.rl:14 - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - -//line grapheme_clusters.go:3787 - { - cs = graphclust_start - ts = 0 - te = 0 - act = 0 - } - -//line grapheme_clusters.go:3795 - { - var _klen int - var _trans int - var _acts int - var _nacts uint - var _keys int - if p == pe { - goto _test_eof - } - if cs == 0 { - goto _out - } - _resume: - _acts = int(_graphclust_from_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]) - _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 4: -//line NONE:1 - ts = p - -//line grapheme_clusters.go:3818 - } - } - - _keys = int(_graphclust_key_offsets[cs]) - _trans = int(_graphclust_index_offsets[cs]) - - _klen = int(_graphclust_single_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + _klen - 1) - for { - if _upper < _lower { - break - } - - _mid = _lower + ((_upper - _lower) >> 1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 1 - case data[p] > _graphclust_trans_keys[_mid]: - _lower = _mid + 1 - default: - _trans += int(_mid - int(_keys)) - goto _match - } - } - _keys += _klen - _trans += _klen - } - - _klen = int(_graphclust_range_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + (_klen << 1) - 2) - for { - if _upper < _lower { - break - } - - _mid = _lower + (((_upper - _lower) >> 1) & ^1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 2 - case data[p] > _graphclust_trans_keys[_mid+1]: - _lower = _mid + 2 - default: - _trans += int((_mid - int(_keys)) >> 1) - goto _match - } - } - _trans += _klen - } - - _match: - _trans = int(_graphclust_indicies[_trans]) - _eof_trans: - cs = int(_graphclust_trans_targs[_trans]) - - if _graphclust_trans_actions[_trans] == 0 { - goto _again - } - - _acts = int(_graphclust_trans_actions[_trans]) - _nacts = uint(_graphclust_actions[_acts]) - _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 0: -//line grapheme_clusters.rl:47 - - startPos = p - - case 1: -//line grapheme_clusters.rl:51 - - endPos = p - - case 5: -//line NONE:1 - te = p + 1 - - case 6: -//line grapheme_clusters.rl:55 - act = 3 - case 7: -//line grapheme_clusters.rl:55 - act = 4 - case 8: -//line grapheme_clusters.rl:55 - te = p + 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 9: -//line grapheme_clusters.rl:55 - te = p + 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 10: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 11: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 12: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 13: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 14: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 15: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 16: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 17: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 18: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 19: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 20: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 21: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 22: -//line NONE:1 - switch act { - case 0: - { - cs = 0 - goto _again - } - case 3: - { - p = (te) - 1 - - return endPos + 1, data[startPos : endPos+1], nil - } - case 4: - { - p = (te) - 1 - - return endPos + 1, data[startPos : endPos+1], nil - } - } - -//line grapheme_clusters.go:4017 - } - } - - _again: - _acts = int(_graphclust_to_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]) - _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 2: -//line NONE:1 - ts = 0 - - case 3: -//line NONE:1 - act = 0 - -//line grapheme_clusters.go:4035 - } - } - - if cs == 0 { - goto _out - } - p++ - if p != pe { - goto _resume - } - _test_eof: - { - } - if p == eof { - if _graphclust_eof_trans[cs] > 0 { - _trans = int(_graphclust_eof_trans[cs] - 1) - goto _eof_trans - } - } - - _out: - { - } - } - -//line grapheme_clusters.rl:117 - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters_table.rl deleted file mode 100644 index 5e4b5881de..0000000000 --- a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters_table.rl +++ /dev/null @@ -1,1589 +0,0 @@ -# The following Ragel file was autogenerated with unicode2ragel.rb -# from: https://www.unicode.org/Public/12.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -# -# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "ZWJ"]. -# -# To use this, make sure that your alphtype is set to byte, -# and that your input is in utf8. - -%%{ - machine GraphemeCluster; - - Prepend = - 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... - | 0xDB 0x9D #Cf ARABIC END OF AYAH - | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK - | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH - | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH - | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN - | 0xF0 0x91 0x83 0x8D #Cf KAITHI NUMBER SIGN ABOVE - | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... - | 0xF0 0x91 0xA8 0xBA #Lo ZANABAZAR SQUARE CLUSTER-INITIAL L... - | 0xF0 0x91 0xAA 0x84..0x89 #Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOM... - | 0xF0 0x91 0xB5 0x86 #Lo MASARAM GONDI REPHA - ; - - CR = - 0x0D #Cc - ; - - LF = - 0x0A #Cc - ; - - Control = - 0x00..0x09 #Cc [10] .. - | 0x0B..0x0C #Cc [2] .. - | 0x0E..0x1F #Cc [18] .. - | 0x7F #Cc [33] .. - | 0xC2 0x80..0x9F # - | 0xC2 0xAD #Cf SOFT HYPHEN - | 0xD8 0x9C #Cf ARABIC LETTER MARK - | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR - | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE - | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... - | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR - | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR - | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... - | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS - | 0xE2 0x81 0xA5 #Cn - | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... - | 0xEF 0xBB 0xBF #Cf ZERO WIDTH NO-BREAK SPACE - | 0xEF 0xBF 0xB0..0xB8 #Cn [9] .. - | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... - | 0xF0 0x93 0x90 0xB0..0xB8 #Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JO... - | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... - | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... - | 0xF3 0xA0 0x80 0x80 #Cn - | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG - | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. - | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. - | 0xF3 0xA0 0x83 0x00..0xBF # - | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. -# -# This script uses the unicode spec to generate a Ragel state machine -# that recognizes unicode alphanumeric characters. It generates 5 -# character classes: uupper, ulower, ualpha, udigit, and ualnum. -# Currently supported encodings are UTF-8 [default] and UCS-4. -# -# Usage: unicode2ragel.rb [options] -# -e, --encoding [ucs4 | utf8] Data encoding -# -h, --help Show this message -# -# This script was originally written as part of the Ferret search -# engine library. -# -# Author: Rakan El-Khalil - -require 'optparse' -require 'open-uri' - -ENCODINGS = [ :utf8, :ucs4 ] -ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } -DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" -DEFAULT_MACHINE_NAME= "WChar" - -### -# Display vars & default option - -TOTAL_WIDTH = 80 -RANGE_WIDTH = 23 -@encoding = :utf8 -@chart_url = DEFAULT_CHART_URL -machine_name = DEFAULT_MACHINE_NAME -properties = [] -@output = $stdout - -### -# Option parsing - -cli_opts = OptionParser.new do |opts| - opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| - @encoding = o.downcase.to_sym - end - opts.on("-h", "--help", "Show this message") do - puts opts - exit - end - opts.on("-u", "--url URL", "URL to process") do |o| - @chart_url = o - end - opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| - machine_name = o - end - opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| - properties = o - end - opts.on("-o", "--output FILE", "output file") do |o| - @output = File.new(o, "w+") - end -end - -cli_opts.parse(ARGV) -unless ENCODINGS.member? @encoding - puts "Invalid encoding: #{@encoding}" - puts cli_opts - exit -end - -## -# Downloads the document at url and yields every alpha line's hex -# range and description. - -def each_alpha( url, property ) - open( url ) do |file| - file.each_line do |line| - next if line =~ /^#/; - next if line !~ /; #{property} *#/; - - range, description = line.split(/;/) - range.strip! - description.gsub!(/.*#/, '').strip! - - if range =~ /\.\./ - start, stop = range.split '..' - else start = stop = range - end - - yield start.hex .. stop.hex, description - end - end -end - -### -# Formats to hex at minimum width - -def to_hex( n ) - r = "%0X" % n - r = "0#{r}" unless (r.length % 2).zero? - r -end - -### -# UCS4 is just a straight hex conversion of the unicode codepoint. - -def to_ucs4( range ) - rangestr = "0x" + to_hex(range.begin) - rangestr << "..0x" + to_hex(range.end) if range.begin != range.end - [ rangestr ] -end - -## -# 0x00 - 0x7f -> 0zzzzzzz[7] -# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] -# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] -# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] - -UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] - -def to_utf8_enc( n ) - r = 0 - if n <= 0x7f - r = n - elsif n <= 0x7ff - y = 0xc0 | (n >> 6) - z = 0x80 | (n & 0x3f) - r = y << 8 | z - elsif n <= 0xffff - x = 0xe0 | (n >> 12) - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = x << 16 | y << 8 | z - elsif n <= 0x10ffff - w = 0xf0 | (n >> 18) - x = 0x80 | (n >> 12) & 0x3f - y = 0x80 | (n >> 6) & 0x3f - z = 0x80 | n & 0x3f - r = w << 24 | x << 16 | y << 8 | z - end - - to_hex(r) -end - -def from_utf8_enc( n ) - n = n.hex - r = 0 - if n <= 0x7f - r = n - elsif n <= 0xdfff - y = (n >> 8) & 0x1f - z = n & 0x3f - r = y << 6 | z - elsif n <= 0xefffff - x = (n >> 16) & 0x0f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = x << 10 | y << 6 | z - elsif n <= 0xf7ffffff - w = (n >> 24) & 0x07 - x = (n >> 16) & 0x3f - y = (n >> 8) & 0x3f - z = n & 0x3f - r = w << 18 | x << 12 | y << 6 | z - end - r -end - -### -# Given a range, splits it up into ranges that can be continuously -# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] -# This is not strictly needed since the current [5.1] unicode standard -# doesn't have ranges that straddle utf8 boundaries. This is included -# for completeness as there is no telling if that will ever change. - -def utf8_ranges( range ) - ranges = [] - UTF8_BOUNDARIES.each do |max| - if range.begin <= max - if range.end <= max - ranges << range - return ranges - end - - ranges << (range.begin .. max) - range = (max + 1) .. range.end - end - end - ranges -end - -def build_range( start, stop ) - size = start.size/2 - left = size - 1 - return [""] if size < 1 - - a = start[0..1] - b = stop[0..1] - - ### - # Shared prefix - - if a == b - return build_range(start[2..-1], stop[2..-1]).map do |elt| - "0x#{a} " + elt - end - end - - ### - # Unshared prefix, end of run - - return ["0x#{a}..0x#{b} "] if left.zero? - - ### - # Unshared prefix, not end of run - # Range can be 0x123456..0x56789A - # Which is equivalent to: - # 0x123456 .. 0x12FFFF - # 0x130000 .. 0x55FFFF - # 0x560000 .. 0x56789A - - ret = [] - ret << build_range(start, a + "FF" * left) - - ### - # Only generate middle range if need be. - - if a.hex+1 != b.hex - max = to_hex(b.hex - 1) - max = "FF" if b == "FF" - ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left - end - - ### - # Don't generate last range if it is covered by first range - - ret << build_range(b + "00" * left, stop) unless b == "FF" - ret.flatten! -end - -def to_utf8( range ) - utf8_ranges( range ).map do |r| - begin_enc = to_utf8_enc(r.begin) - end_enc = to_utf8_enc(r.end) - build_range begin_enc, end_enc - end.flatten! -end - -## -# Perform a 3-way comparison of the number of codepoints advertised by -# the unicode spec for the given range, the originally parsed range, -# and the resulting utf8 encoded range. - -def count_codepoints( code ) - code.split(' ').inject(1) do |acc, elt| - if elt =~ /0x(.+)\.\.0x(.+)/ - if @encoding == :utf8 - acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) - else - acc * ($2.hex - $1.hex + 1) - end - else - acc - end - end -end - -def is_valid?( range, desc, codes ) - spec_count = 1 - spec_count = $1.to_i if desc =~ /\[(\d+)\]/ - range_count = range.end - range.begin + 1 - - sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } - sum == spec_count and sum == range_count -end - -## -# Generate the state maching to stdout - -def generate_machine( name, property ) - pipe = " " - @output.puts " #{name} = " - each_alpha( @chart_url, property ) do |range, desc| - - codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) - - #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless - # is_valid? range, desc, codes - - range_width = codes.map { |a| a.size }.max - range_width = RANGE_WIDTH if range_width < RANGE_WIDTH - - desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 - desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH - - if desc.size > desc_width - desc = desc[0..desc_width - 4] + "..." - end - - codes.each_with_index do |r, idx| - desc = "" unless idx.zero? - code = "%-#{range_width}s" % r - @output.puts " #{pipe} #{code} ##{desc}" - pipe = "|" - end - end - @output.puts " ;" - @output.puts "" -end - -@output.puts <..<... - | 0xF0 0x9F 0x84 0x8D..0x8F #E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH S... - | 0xF0 0x9F 0x84 0xAF #E0.0 [1] (🄯) COPYLEFT SYMBOL - | 0xF0 0x9F 0x85 0xAC..0xAF #E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIR... - | 0xF0 0x9F 0x85 0xB0..0xB1 #E0.6 [2] (🅰️..🅱️) A button (blood t... - | 0xF0 0x9F 0x85 0xBE..0xBF #E0.6 [2] (🅾️..🅿️) O button (blood t... - | 0xF0 0x9F 0x86 0x8E #E0.6 [1] (🆎) AB button (blood type) - | 0xF0 0x9F 0x86 0x91..0x9A #E0.6 [10] (🆑..🆚) CL button..VS button - | 0xF0 0x9F 0x86 0xAD..0xFF #E0.0 [57] (🆭..🇥) MASK WORK SYMBOL..<... - | 0xF0 0x9F 0x87 0x00..0xA5 # - | 0xF0 0x9F 0x88 0x81..0x82 #E0.6 [2] (🈁..🈂️) Japanese “here” bu... - | 0xF0 0x9F 0x88 0x83..0x8F #E0.0 [13] (🈃..🈏) ..<... - | 0xF0 0x9F 0x88 0x9A #E0.6 [1] (🈚) Japanese “free of char... - | 0xF0 0x9F 0x88 0xAF #E0.6 [1] (🈯) Japanese “reserved” bu... - | 0xF0 0x9F 0x88 0xB2..0xBA #E0.6 [9] (🈲..🈺) Japanese “prohibite... - | 0xF0 0x9F 0x88 0xBC..0xBF #E0.0 [4] (🈼..🈿) ..<... - | 0xF0 0x9F 0x89 0x89..0x8F #E0.0 [7] (🉉..🉏) ..<... - | 0xF0 0x9F 0x89 0x90..0x91 #E0.6 [2] (🉐..🉑) Japanese “bargain” ... - | 0xF0 0x9F 0x89 0x92..0xFF #E0.0 [174] (🉒..🋿) ..<... - | 0xF0 0x9F 0x9B 0xA0..0xA5 #E0.7 [6] (🛠️..🛥️) hammer and wrench... - | 0xF0 0x9F 0x9B 0xA6..0xA8 #E0.0 [3] (🛦..🛨) UP-POINTING MILITAR... - | 0xF0 0x9F 0x9B 0xA9 #E0.7 [1] (🛩️) small airplane - | 0xF0 0x9F 0x9B 0xAA #E0.0 [1] (🛪) NORTHEAST-POINTING AIR... - | 0xF0 0x9F 0x9B 0xAB..0xAC #E1.0 [2] (🛫..🛬) airplane departure.... - | 0xF0 0x9F 0x9B 0xAD..0xAF #E0.0 [3] (🛭..🛯) ..<... - | 0xF0 0x9F 0x9B 0xB0 #E0.7 [1] (🛰️) satellite - | 0xF0 0x9F 0x9B 0xB1..0xB2 #E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGIN... - | 0xF0 0x9F 0x9B 0xB3 #E0.7 [1] (🛳️) passenger ship - | 0xF0 0x9F 0x9B 0xB4..0xB6 #E3.0 [3] (🛴..🛶) kick scooter..canoe - | 0xF0 0x9F 0x9B 0xB7..0xB8 #E5.0 [2] (🛷..🛸) sled..flying saucer - | 0xF0 0x9F 0x9B 0xB9 #E11.0 [1] (🛹) skateboard - | 0xF0 0x9F 0x9B 0xBA #E12.0 [1] (🛺) auto rickshaw - | 0xF0 0x9F 0x9B 0xBB..0xBC #E13.0 [2] (🛻..🛼) pickup truck..rolle... - | 0xF0 0x9F 0x9B 0xBD..0xBF #E0.0 [3] (🛽..🛿) ..<... - | 0xF0 0x9F 0x9D 0xB4..0xBF #E0.0 [12] (🝴..🝿) ..<... - | 0xF0 0x9F 0x9F 0x95..0x9F #E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE..<... - | 0xF0 0x9F 0x9F 0xA0..0xAB #E12.0 [12] (🟠..🟫) orange circle..brow... - | 0xF0 0x9F 0x9F 0xAC..0xBF #E0.0 [20] (🟬..🟿) ..<... - | 0xF0 0x9F 0xA0 0x8C..0x8F #E0.0 [4] (🠌..🠏) ..<... - | 0xF0 0x9F 0xA1 0x88..0x8F #E0.0 [8] (🡈..🡏) ..<... - | 0xF0 0x9F 0xA1 0x9A..0x9F #E0.0 [6] (🡚..🡟) ..<... - | 0xF0 0x9F 0xA2 0x88..0x8F #E0.0 [8] (🢈..🢏) ..<... - | 0xF0 0x9F 0xA2 0xAE..0xFF #E0.0 [82] (🢮..🣿) ..<... - | 0xF0 0x9F 0xA3 0x00..0xBF # - | 0xF0 0x9F 0xA4 0x8C #E13.0 [1] (🤌) pinched fingers - | 0xF0 0x9F 0xA4 0x8D..0x8F #E12.0 [3] (🤍..🤏) white heart..pinchi... - | 0xF0 0x9F 0xA4 0x90..0x98 #E1.0 [9] (🤐..🤘) zipper-mouth face..... - | 0xF0 0x9F 0xA4 0x99..0x9E #E3.0 [6] (🤙..🤞) call me hand..cross... - | 0xF0 0x9F 0xA4 0x9F #E5.0 [1] (🤟) love-you gesture - | 0xF0 0x9F 0xA4 0xA0..0xA7 #E3.0 [8] (🤠..🤧) cowboy hat face..sn... - | 0xF0 0x9F 0xA4 0xA8..0xAF #E5.0 [8] (🤨..🤯) face with raised ey... - | 0xF0 0x9F 0xA4 0xB0 #E3.0 [1] (🤰) pregnant woman - | 0xF0 0x9F 0xA4 0xB1..0xB2 #E5.0 [2] (🤱..🤲) breast-feeding..pal... - | 0xF0 0x9F 0xA4 0xB3..0xBA #E3.0 [8] (🤳..🤺) selfie..person fencing - | 0xF0 0x9F 0xA4 0xBC..0xBE #E3.0 [3] (🤼..🤾) people wrestling..p... - | 0xF0 0x9F 0xA4 0xBF #E12.0 [1] (🤿) diving mask - | 0xF0 0x9F 0xA5 0x80..0x85 #E3.0 [6] (🥀..🥅) wilted flower..goal... - | 0xF0 0x9F 0xA5 0x87..0x8B #E3.0 [5] (🥇..🥋) 1st place medal..ma... - | 0xF0 0x9F 0xA5 0x8C #E5.0 [1] (🥌) curling stone - | 0xF0 0x9F 0xA5 0x8D..0x8F #E11.0 [3] (🥍..🥏) lacrosse..flying disc - | 0xF0 0x9F 0xA5 0x90..0x9E #E3.0 [15] (🥐..🥞) croissant..pancakes - | 0xF0 0x9F 0xA5 0x9F..0xAB #E5.0 [13] (🥟..🥫) dumpling..canned food - | 0xF0 0x9F 0xA5 0xAC..0xB0 #E11.0 [5] (🥬..🥰) leafy green..smilin... - | 0xF0 0x9F 0xA5 0xB1 #E12.0 [1] (🥱) yawning face - | 0xF0 0x9F 0xA5 0xB2 #E13.0 [1] (🥲) smiling face with tear - | 0xF0 0x9F 0xA5 0xB3..0xB6 #E11.0 [4] (🥳..🥶) partying face..cold... - | 0xF0 0x9F 0xA5 0xB7..0xB8 #E13.0 [2] (🥷..🥸) ninja..disguised face - | 0xF0 0x9F 0xA5 0xB9 #E0.0 [1] (🥹) - | 0xF0 0x9F 0xA5 0xBA #E11.0 [1] (🥺) pleading face - | 0xF0 0x9F 0xA5 0xBB #E12.0 [1] (🥻) sari - | 0xF0 0x9F 0xA5 0xBC..0xBF #E11.0 [4] (🥼..🥿) lab coat..flat shoe - | 0xF0 0x9F 0xA6 0x80..0x84 #E1.0 [5] (🦀..🦄) crab..unicorn - | 0xF0 0x9F 0xA6 0x85..0x91 #E3.0 [13] (🦅..🦑) eagle..squid - | 0xF0 0x9F 0xA6 0x92..0x97 #E5.0 [6] (🦒..🦗) giraffe..cricket - | 0xF0 0x9F 0xA6 0x98..0xA2 #E11.0 [11] (🦘..🦢) kangaroo..swan - | 0xF0 0x9F 0xA6 0xA3..0xA4 #E13.0 [2] (🦣..🦤) mammoth..dodo - | 0xF0 0x9F 0xA6 0xA5..0xAA #E12.0 [6] (🦥..🦪) sloth..oyster - | 0xF0 0x9F 0xA6 0xAB..0xAD #E13.0 [3] (🦫..🦭) beaver..seal - | 0xF0 0x9F 0xA6 0xAE..0xAF #E12.0 [2] (🦮..🦯) guide dog..white cane - | 0xF0 0x9F 0xA6 0xB0..0xB9 #E11.0 [10] (🦰..🦹) red hair..supervillain - | 0xF0 0x9F 0xA6 0xBA..0xBF #E12.0 [6] (🦺..🦿) safety vest..mechan... - | 0xF0 0x9F 0xA7 0x80 #E1.0 [1] (🧀) cheese wedge - | 0xF0 0x9F 0xA7 0x81..0x82 #E11.0 [2] (🧁..🧂) cupcake..salt - | 0xF0 0x9F 0xA7 0x83..0x8A #E12.0 [8] (🧃..🧊) beverage box..ice - | 0xF0 0x9F 0xA7 0x8B #E13.0 [1] (🧋) bubble tea - | 0xF0 0x9F 0xA7 0x8C #E0.0 [1] (🧌) - | 0xF0 0x9F 0xA7 0x8D..0x8F #E12.0 [3] (🧍..🧏) person standing..de... - | 0xF0 0x9F 0xA7 0x90..0xA6 #E5.0 [23] (🧐..🧦) face with monocle..... - | 0xF0 0x9F 0xA7 0xA7..0xBF #E11.0 [25] (🧧..🧿) red envelope..nazar... - | 0xF0 0x9F 0xA8 0x80..0xFF #E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.... - | 0xF0 0x9F 0xA9 0x00..0xAF # - | 0xF0 0x9F 0xA9 0xB0..0xB3 #E12.0 [4] (🩰..🩳) ballet shoes..shorts - | 0xF0 0x9F 0xA9 0xB4 #E13.0 [1] (🩴) thong sandal - | 0xF0 0x9F 0xA9 0xB5..0xB7 #E0.0 [3] (🩵..🩷) ..<... - | 0xF0 0x9F 0xA9 0xB8..0xBA #E12.0 [3] (🩸..🩺) drop of blood..stet... - | 0xF0 0x9F 0xA9 0xBB..0xBF #E0.0 [5] (🩻..🩿) ..<... - | 0xF0 0x9F 0xAA 0x80..0x82 #E12.0 [3] (🪀..🪂) yo-yo..parachute - | 0xF0 0x9F 0xAA 0x83..0x86 #E13.0 [4] (🪃..🪆) boomerang..nesting ... - | 0xF0 0x9F 0xAA 0x87..0x8F #E0.0 [9] (🪇..🪏) ..<... - | 0xF0 0x9F 0xAA 0x90..0x95 #E12.0 [6] (🪐..🪕) ringed planet..banjo - | 0xF0 0x9F 0xAA 0x96..0xA8 #E13.0 [19] (🪖..🪨) military helmet..rock - | 0xF0 0x9F 0xAA 0xA9..0xAF #E0.0 [7] (🪩..🪯) ..<... - | 0xF0 0x9F 0xAA 0xB0..0xB6 #E13.0 [7] (🪰..🪶) fly..feather - | 0xF0 0x9F 0xAA 0xB7..0xBF #E0.0 [9] (🪷..🪿) ..<... - | 0xF0 0x9F 0xAB 0x80..0x82 #E13.0 [3] (🫀..🫂) anatomical heart..p... - | 0xF0 0x9F 0xAB 0x83..0x8F #E0.0 [13] (🫃..🫏) ..<... - | 0xF0 0x9F 0xAB 0x90..0x96 #E13.0 [7] (🫐..🫖) blueberries..teapot - | 0xF0 0x9F 0xAB 0x97..0xBF #E0.0 [41] (🫗..🫿) ..<... - | 0xF0 0x9F 0xB0 0x80..0xFF #E0.0[1022] (🰀..🿽) 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 4: -//line NONE:1 - ts = p - -//line grapheme_clusters.go:3878 - } - } - - _keys = int(_graphclust_key_offsets[cs]) - _trans = int(_graphclust_index_offsets[cs]) - - _klen = int(_graphclust_single_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + _klen - 1) - for { - if _upper < _lower { - break - } - - _mid = _lower + ((_upper - _lower) >> 1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 1 - case data[p] > _graphclust_trans_keys[_mid]: - _lower = _mid + 1 - default: - _trans += int(_mid - int(_keys)) - goto _match - } - } - _keys += _klen - _trans += _klen - } - - _klen = int(_graphclust_range_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + (_klen << 1) - 2) - for { - if _upper < _lower { - break - } - - _mid = _lower + (((_upper - _lower) >> 1) & ^1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 2 - case data[p] > _graphclust_trans_keys[_mid+1]: - _lower = _mid + 2 - default: - _trans += int((_mid - int(_keys)) >> 1) - goto _match - } - } - _trans += _klen - } - - _match: - _trans = int(_graphclust_indicies[_trans]) - _eof_trans: - cs = int(_graphclust_trans_targs[_trans]) - - if _graphclust_trans_actions[_trans] == 0 { - goto _again - } - - _acts = int(_graphclust_trans_actions[_trans]) - _nacts = uint(_graphclust_actions[_acts]) - _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 0: -//line grapheme_clusters.rl:47 - - startPos = p - - case 1: -//line grapheme_clusters.rl:51 - - endPos = p - - case 5: -//line NONE:1 - te = p + 1 - - case 6: -//line grapheme_clusters.rl:55 - act = 3 - case 7: -//line grapheme_clusters.rl:55 - act = 4 - case 8: -//line grapheme_clusters.rl:55 - te = p + 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 9: -//line grapheme_clusters.rl:55 - te = p + 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 10: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 11: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 12: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 13: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 14: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 15: -//line grapheme_clusters.rl:55 - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 16: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 17: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 18: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 19: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 20: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 21: -//line grapheme_clusters.rl:55 - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 22: -//line NONE:1 - switch act { - case 0: - { - cs = 0 - goto _again - } - case 3: - { - p = (te) - 1 - - return endPos + 1, data[startPos : endPos+1], nil - } - case 4: - { - p = (te) - 1 - - return endPos + 1, data[startPos : endPos+1], nil - } - } - -//line grapheme_clusters.go:4077 - } - } - - _again: - _acts = int(_graphclust_to_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]) - _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 2: -//line NONE:1 - ts = 0 - - case 3: -//line NONE:1 - act = 0 - -//line grapheme_clusters.go:4095 - } - } - - if cs == 0 { - goto _out - } - p++ - if p != pe { - goto _resume - } - _test_eof: - { - } - if p == eof { - if _graphclust_eof_trans[cs] > 0 { - _trans = int(_graphclust_eof_trans[cs] - 1) - goto _eof_trans - } - } - - _out: - { - } - } - -//line grapheme_clusters.rl:117 - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl deleted file mode 100644 index 737db18b29..0000000000 --- a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl +++ /dev/null @@ -1,133 +0,0 @@ -package textseg - -import ( - "errors" - "unicode/utf8" -) - -// Generated from grapheme_clusters.rl. DO NOT EDIT -%%{ - # (except you are actually in grapheme_clusters.rl here, so edit away!) - - machine graphclust; - write data; -}%% - -var Error = errors.New("invalid UTF8 text") - -// ScanGraphemeClusters is a split function for bufio.Scanner that splits -// on grapheme cluster boundaries. -func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } - - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data - pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe - - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof - - startPos := 0 - endPos := 0 - - %%{ - include GraphemeCluster "grapheme_clusters_table.rl"; - include Emoji "emoji_table.rl"; - - action start { - startPos = p - } - - action end { - endPos = p - } - - action emit { - return endPos+1, data[startPos:endPos+1], nil - } - - ZWJGlue = ZWJ (Extended_Pictographic Extend*)?; - AnyExtender = Extend | ZWJGlue | SpacingMark; - Extension = AnyExtender*; - ReplacementChar = (0xEF 0xBF 0xBD); - - CRLFSeq = CR LF; - ControlSeq = Control | ReplacementChar; - HangulSeq = ( - L+ (((LV? V+ | LVT) T*)?|LV?) | - LV V* T* | - V+ T* | - LVT T* | - T+ - ) Extension; - EmojiSeq = Extended_Pictographic Extend* Extension; - ZWJSeq = ZWJ (ZWJ | Extend | SpacingMark)*; - EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; - - UTF8Cont = 0x80 .. 0xBF; - AnyUTF8 = ( - 0x00..0x7F | - 0xC0..0xDF . UTF8Cont | - 0xE0..0xEF . UTF8Cont . UTF8Cont | - 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont - ); - - # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension - OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|Extended_Pictographic|ZWJ|Regional_Indicator|Prepend)) (Extend | ZWJ | SpacingMark)*; - - # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break - PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; - - CRLFTok = CRLFSeq >start @end; - ControlTok = ControlSeq >start @end; - HangulTok = HangulSeq >start @end; - EmojiTok = EmojiSeq >start @end; - ZWJTok = ZWJSeq >start @end; - EmojiFlagTok = EmojiFlagSeq >start @end; - OtherTok = OtherSeq >start @end; - PrependTok = PrependSeq >start @end; - - main := |* - CRLFTok => emit; - ControlTok => emit; - HangulTok => emit; - EmojiTok => emit; - ZWJTok => emit; - EmojiFlagTok => emit; - PrependTok => emit; - OtherTok => emit; - - # any single valid UTF-8 character would also be valid per spec, - # but we'll handle that separately after the loop so we can deal - # with requesting more bytes if we're not at EOF. - *|; - - write init; - write exec; - }%% - - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - - if !atEOF { - // Request more - return 0, nil, nil - } - - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil -} \ No newline at end of file diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl deleted file mode 100644 index 803dca19c5..0000000000 --- a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl +++ /dev/null @@ -1,1609 +0,0 @@ -# The following Ragel file was autogenerated with unicode2ragel.rb -# from: https://www.unicode.org/Public/13.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -# -# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "ZWJ"]. -# -# To use this, make sure that your alphtype is set to byte, -# and that your input is in utf8. - -%%{ - machine GraphemeCluster; - - Prepend = - 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... - | 0xDB 0x9D #Cf ARABIC END OF AYAH - | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK - | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH - | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH - | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN - | 0xF0 0x91 0x83 0x8D #Cf KAITHI NUMBER SIGN ABOVE - | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... - | 0xF0 0x91 0xA4 0xBF #Lo DIVES AKURU PREFIXED NASAL SIGN - | 0xF0 0x91 0xA5 0x81 #Lo DIVES AKURU INITIAL RA - | 0xF0 0x91 0xA8 0xBA #Lo ZANABAZAR SQUARE CLUSTER-INITIAL L... - | 0xF0 0x91 0xAA 0x84..0x89 #Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOM... - | 0xF0 0x91 0xB5 0x86 #Lo MASARAM GONDI REPHA - ; - - CR = - 0x0D #Cc - ; - - LF = - 0x0A #Cc - ; - - Control = - 0x00..0x09 #Cc [10] .. - | 0x0B..0x0C #Cc [2] .. - | 0x0E..0x1F #Cc [18] .. - | 0x7F #Cc [33] .. - | 0xC2 0x80..0x9F # - | 0xC2 0xAD #Cf SOFT HYPHEN - | 0xD8 0x9C #Cf ARABIC LETTER MARK - | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR - | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE - | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... - | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR - | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR - | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... - | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS - | 0xE2 0x81 0xA5 #Cn - | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... - | 0xEF 0xBB 0xBF #Cf ZERO WIDTH NO-BREAK SPACE - | 0xEF 0xBF 0xB0..0xB8 #Cn [9] .. - | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... - | 0xF0 0x93 0x90 0xB0..0xB8 #Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JO... - | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... - | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... - | 0xF3 0xA0 0x80 0x80 #Cn - | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG - | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. - | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. - | 0xF3 0xA0 0x83 0x00..0xBF # - | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] ....<... + | 0xF0 0x9F 0x84 0x8D..0x8F #E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH S... + | 0xF0 0x9F 0x84 0xAF #E0.0 [1] (🄯) COPYLEFT SYMBOL + | 0xF0 0x9F 0x85 0xAC..0xAF #E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIR... + | 0xF0 0x9F 0x85 0xB0..0xB1 #E0.6 [2] (🅰️..🅱️) A button (blood t... + | 0xF0 0x9F 0x85 0xBE..0xBF #E0.6 [2] (🅾️..🅿️) O button (blood t... + | 0xF0 0x9F 0x86 0x8E #E0.6 [1] (🆎) AB button (blood type) + | 0xF0 0x9F 0x86 0x91..0x9A #E0.6 [10] (🆑..🆚) CL button..VS button + | 0xF0 0x9F 0x86 0xAD..0xFF #E0.0 [57] (🆭..🇥) MASK WORK SYMBOL..<... + | 0xF0 0x9F 0x87 0x00..0xA5 # + | 0xF0 0x9F 0x88 0x81..0x82 #E0.6 [2] (🈁..🈂️) Japanese “here” bu... + | 0xF0 0x9F 0x88 0x83..0x8F #E0.0 [13] (🈃..🈏) ..<... + | 0xF0 0x9F 0x88 0x9A #E0.6 [1] (🈚) Japanese “free of char... + | 0xF0 0x9F 0x88 0xAF #E0.6 [1] (🈯) Japanese “reserved” bu... + | 0xF0 0x9F 0x88 0xB2..0xBA #E0.6 [9] (🈲..🈺) Japanese “prohibite... + | 0xF0 0x9F 0x88 0xBC..0xBF #E0.0 [4] (🈼..🈿) ..<... + | 0xF0 0x9F 0x89 0x89..0x8F #E0.0 [7] (🉉..🉏) ..<... + | 0xF0 0x9F 0x89 0x90..0x91 #E0.6 [2] (🉐..🉑) Japanese “bargain” ... + | 0xF0 0x9F 0x89 0x92..0xFF #E0.0 [174] (🉒..🋿) ..<... + | 0xF0 0x9F 0x9B 0x9C #E15.0 [1] (🛜) wireless + | 0xF0 0x9F 0x9B 0x9D..0x9F #E14.0 [3] (🛝..🛟) playground slide..r... + | 0xF0 0x9F 0x9B 0xA0..0xA5 #E0.7 [6] (🛠️..🛥️) hammer and wrench... + | 0xF0 0x9F 0x9B 0xA6..0xA8 #E0.0 [3] (🛦..🛨) UP-POINTING MILITAR... + | 0xF0 0x9F 0x9B 0xA9 #E0.7 [1] (🛩️) small airplane + | 0xF0 0x9F 0x9B 0xAA #E0.0 [1] (🛪) NORTHEAST-POINTING AIR... + | 0xF0 0x9F 0x9B 0xAB..0xAC #E1.0 [2] (🛫..🛬) airplane departure.... + | 0xF0 0x9F 0x9B 0xAD..0xAF #E0.0 [3] (🛭..🛯) ..<... + | 0xF0 0x9F 0x9B 0xB0 #E0.7 [1] (🛰️) satellite + | 0xF0 0x9F 0x9B 0xB1..0xB2 #E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGIN... + | 0xF0 0x9F 0x9B 0xB3 #E0.7 [1] (🛳️) passenger ship + | 0xF0 0x9F 0x9B 0xB4..0xB6 #E3.0 [3] (🛴..🛶) kick scooter..canoe + | 0xF0 0x9F 0x9B 0xB7..0xB8 #E5.0 [2] (🛷..🛸) sled..flying saucer + | 0xF0 0x9F 0x9B 0xB9 #E11.0 [1] (🛹) skateboard + | 0xF0 0x9F 0x9B 0xBA #E12.0 [1] (🛺) auto rickshaw + | 0xF0 0x9F 0x9B 0xBB..0xBC #E13.0 [2] (🛻..🛼) pickup truck..rolle... + | 0xF0 0x9F 0x9B 0xBD..0xBF #E0.0 [3] (🛽..🛿) ..<... + | 0xF0 0x9F 0x9D 0xB4..0xBF #E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS + | 0xF0 0x9F 0x9F 0x95..0x9F #E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE..<... + | 0xF0 0x9F 0x9F 0xA0..0xAB #E12.0 [12] (🟠..🟫) orange circle..brow... + | 0xF0 0x9F 0x9F 0xAC..0xAF #E0.0 [4] (🟬..🟯) ..<... + | 0xF0 0x9F 0x9F 0xB0 #E14.0 [1] (🟰) heavy equals sign + | 0xF0 0x9F 0x9F 0xB1..0xBF #E0.0 [15] (🟱..🟿) ..<... + | 0xF0 0x9F 0xA0 0x8C..0x8F #E0.0 [4] (🠌..🠏) ..<... + | 0xF0 0x9F 0xA1 0x88..0x8F #E0.0 [8] (🡈..🡏) ..<... + | 0xF0 0x9F 0xA1 0x9A..0x9F #E0.0 [6] (🡚..🡟) ..<... + | 0xF0 0x9F 0xA2 0x88..0x8F #E0.0 [8] (🢈..🢏) ..<... + | 0xF0 0x9F 0xA2 0xAE..0xFF #E0.0 [82] (🢮..🣿) ..<... + | 0xF0 0x9F 0xA3 0x00..0xBF # + | 0xF0 0x9F 0xA4 0x8C #E13.0 [1] (🤌) pinched fingers + | 0xF0 0x9F 0xA4 0x8D..0x8F #E12.0 [3] (🤍..🤏) white heart..pinchi... + | 0xF0 0x9F 0xA4 0x90..0x98 #E1.0 [9] (🤐..🤘) zipper-mouth face..... + | 0xF0 0x9F 0xA4 0x99..0x9E #E3.0 [6] (🤙..🤞) call me hand..cross... + | 0xF0 0x9F 0xA4 0x9F #E5.0 [1] (🤟) love-you gesture + | 0xF0 0x9F 0xA4 0xA0..0xA7 #E3.0 [8] (🤠..🤧) cowboy hat face..sn... + | 0xF0 0x9F 0xA4 0xA8..0xAF #E5.0 [8] (🤨..🤯) face with raised ey... + | 0xF0 0x9F 0xA4 0xB0 #E3.0 [1] (🤰) pregnant woman + | 0xF0 0x9F 0xA4 0xB1..0xB2 #E5.0 [2] (🤱..🤲) breast-feeding..pal... + | 0xF0 0x9F 0xA4 0xB3..0xBA #E3.0 [8] (🤳..🤺) selfie..person fencing + | 0xF0 0x9F 0xA4 0xBC..0xBE #E3.0 [3] (🤼..🤾) people wrestling..p... + | 0xF0 0x9F 0xA4 0xBF #E12.0 [1] (🤿) diving mask + | 0xF0 0x9F 0xA5 0x80..0x85 #E3.0 [6] (🥀..🥅) wilted flower..goal... + | 0xF0 0x9F 0xA5 0x87..0x8B #E3.0 [5] (🥇..🥋) 1st place medal..ma... + | 0xF0 0x9F 0xA5 0x8C #E5.0 [1] (🥌) curling stone + | 0xF0 0x9F 0xA5 0x8D..0x8F #E11.0 [3] (🥍..🥏) lacrosse..flying disc + | 0xF0 0x9F 0xA5 0x90..0x9E #E3.0 [15] (🥐..🥞) croissant..pancakes + | 0xF0 0x9F 0xA5 0x9F..0xAB #E5.0 [13] (🥟..🥫) dumpling..canned food + | 0xF0 0x9F 0xA5 0xAC..0xB0 #E11.0 [5] (🥬..🥰) leafy green..smilin... + | 0xF0 0x9F 0xA5 0xB1 #E12.0 [1] (🥱) yawning face + | 0xF0 0x9F 0xA5 0xB2 #E13.0 [1] (🥲) smiling face with tear + | 0xF0 0x9F 0xA5 0xB3..0xB6 #E11.0 [4] (🥳..🥶) partying face..cold... + | 0xF0 0x9F 0xA5 0xB7..0xB8 #E13.0 [2] (🥷..🥸) ninja..disguised face + | 0xF0 0x9F 0xA5 0xB9 #E14.0 [1] (🥹) face holding back tears + | 0xF0 0x9F 0xA5 0xBA #E11.0 [1] (🥺) pleading face + | 0xF0 0x9F 0xA5 0xBB #E12.0 [1] (🥻) sari + | 0xF0 0x9F 0xA5 0xBC..0xBF #E11.0 [4] (🥼..🥿) lab coat..flat shoe + | 0xF0 0x9F 0xA6 0x80..0x84 #E1.0 [5] (🦀..🦄) crab..unicorn + | 0xF0 0x9F 0xA6 0x85..0x91 #E3.0 [13] (🦅..🦑) eagle..squid + | 0xF0 0x9F 0xA6 0x92..0x97 #E5.0 [6] (🦒..🦗) giraffe..cricket + | 0xF0 0x9F 0xA6 0x98..0xA2 #E11.0 [11] (🦘..🦢) kangaroo..swan + | 0xF0 0x9F 0xA6 0xA3..0xA4 #E13.0 [2] (🦣..🦤) mammoth..dodo + | 0xF0 0x9F 0xA6 0xA5..0xAA #E12.0 [6] (🦥..🦪) sloth..oyster + | 0xF0 0x9F 0xA6 0xAB..0xAD #E13.0 [3] (🦫..🦭) beaver..seal + | 0xF0 0x9F 0xA6 0xAE..0xAF #E12.0 [2] (🦮..🦯) guide dog..white cane + | 0xF0 0x9F 0xA6 0xB0..0xB9 #E11.0 [10] (🦰..🦹) red hair..supervillain + | 0xF0 0x9F 0xA6 0xBA..0xBF #E12.0 [6] (🦺..🦿) safety vest..mechan... + | 0xF0 0x9F 0xA7 0x80 #E1.0 [1] (🧀) cheese wedge + | 0xF0 0x9F 0xA7 0x81..0x82 #E11.0 [2] (🧁..🧂) cupcake..salt + | 0xF0 0x9F 0xA7 0x83..0x8A #E12.0 [8] (🧃..🧊) beverage box..ice + | 0xF0 0x9F 0xA7 0x8B #E13.0 [1] (🧋) bubble tea + | 0xF0 0x9F 0xA7 0x8C #E14.0 [1] (🧌) troll + | 0xF0 0x9F 0xA7 0x8D..0x8F #E12.0 [3] (🧍..🧏) person standing..de... + | 0xF0 0x9F 0xA7 0x90..0xA6 #E5.0 [23] (🧐..🧦) face with monocle..... + | 0xF0 0x9F 0xA7 0xA7..0xBF #E11.0 [25] (🧧..🧿) red envelope..nazar... + | 0xF0 0x9F 0xA8 0x80..0xFF #E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.... + | 0xF0 0x9F 0xA9 0x00..0xAF # + | 0xF0 0x9F 0xA9 0xB0..0xB3 #E12.0 [4] (🩰..🩳) ballet shoes..shorts + | 0xF0 0x9F 0xA9 0xB4 #E13.0 [1] (🩴) thong sandal + | 0xF0 0x9F 0xA9 0xB5..0xB7 #E15.0 [3] (🩵..🩷) light blue heart..p... + | 0xF0 0x9F 0xA9 0xB8..0xBA #E12.0 [3] (🩸..🩺) drop of blood..stet... + | 0xF0 0x9F 0xA9 0xBB..0xBC #E14.0 [2] (🩻..🩼) x-ray..crutch + | 0xF0 0x9F 0xA9 0xBD..0xBF #E0.0 [3] (🩽..🩿) ..<... + | 0xF0 0x9F 0xAA 0x80..0x82 #E12.0 [3] (🪀..🪂) yo-yo..parachute + | 0xF0 0x9F 0xAA 0x83..0x86 #E13.0 [4] (🪃..🪆) boomerang..nesting ... + | 0xF0 0x9F 0xAA 0x87..0x88 #E15.0 [2] (🪇..🪈) maracas..flute + | 0xF0 0x9F 0xAA 0x89..0x8F #E0.0 [7] (🪉..🪏) ..<... + | 0xF0 0x9F 0xAA 0x90..0x95 #E12.0 [6] (🪐..🪕) ringed planet..banjo + | 0xF0 0x9F 0xAA 0x96..0xA8 #E13.0 [19] (🪖..🪨) military helmet..rock + | 0xF0 0x9F 0xAA 0xA9..0xAC #E14.0 [4] (🪩..🪬) mirror ball..hamsa + | 0xF0 0x9F 0xAA 0xAD..0xAF #E15.0 [3] (🪭..🪯) folding hand fan..k... + | 0xF0 0x9F 0xAA 0xB0..0xB6 #E13.0 [7] (🪰..🪶) fly..feather + | 0xF0 0x9F 0xAA 0xB7..0xBA #E14.0 [4] (🪷..🪺) lotus..nest with eggs + | 0xF0 0x9F 0xAA 0xBB..0xBD #E15.0 [3] (🪻..🪽) hyacinth..wing + | 0xF0 0x9F 0xAA 0xBE #E0.0 [1] (🪾) + | 0xF0 0x9F 0xAA 0xBF #E15.0 [1] (🪿) goose + | 0xF0 0x9F 0xAB 0x80..0x82 #E13.0 [3] (🫀..🫂) anatomical heart..p... + | 0xF0 0x9F 0xAB 0x83..0x85 #E14.0 [3] (🫃..🫅) pregnant man..perso... + | 0xF0 0x9F 0xAB 0x86..0x8D #E0.0 [8] (🫆..🫍) ..<... + | 0xF0 0x9F 0xAB 0x8E..0x8F #E15.0 [2] (🫎..🫏) moose..donkey + | 0xF0 0x9F 0xAB 0x90..0x96 #E13.0 [7] (🫐..🫖) blueberries..teapot + | 0xF0 0x9F 0xAB 0x97..0x99 #E14.0 [3] (🫗..🫙) pouring liquid..jar + | 0xF0 0x9F 0xAB 0x9A..0x9B #E15.0 [2] (🫚..🫛) ginger root..pea pod + | 0xF0 0x9F 0xAB 0x9C..0x9F #E0.0 [4] (🫜..🫟) ..<... + | 0xF0 0x9F 0xAB 0xA0..0xA7 #E14.0 [8] (🫠..🫧) melting face..bubbles + | 0xF0 0x9F 0xAB 0xA8 #E15.0 [1] (🫨) shaking face + | 0xF0 0x9F 0xAB 0xA9..0xAF #E0.0 [7] (🫩..🫯) ..<... + | 0xF0 0x9F 0xAB 0xB0..0xB6 #E14.0 [7] (🫰..🫶) hand with index fin... + | 0xF0 0x9F 0xAB 0xB7..0xB8 #E15.0 [2] (🫷..🫸) leftwards pushing h... + | 0xF0 0x9F 0xAB 0xB9..0xBF #E0.0 [7] (🫹..🫿) ..<... + | 0xF0 0x9F 0xB0 0x80..0xFF #E0.0[1022] (🰀..🿽) 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 4: +//line NONE:1 + ts = p + +//line grapheme_clusters.go:4080 + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_graphclust_indicies[_trans]) + _eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +//line grapheme_clusters.rl:47 + + startPos = p + + case 1: +//line grapheme_clusters.rl:51 + + endPos = p + + case 5: +//line NONE:1 + te = p + 1 + + case 6: +//line grapheme_clusters.rl:55 + act = 3 + case 7: +//line grapheme_clusters.rl:55 + act = 4 + case 8: +//line grapheme_clusters.rl:55 + act = 8 + case 9: +//line grapheme_clusters.rl:55 + te = p + 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 10: +//line grapheme_clusters.rl:55 + te = p + 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 11: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 12: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 13: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 14: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 15: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 16: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 17: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 18: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 19: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 20: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 21: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 22: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 23: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 3: + { + p = (te) - 1 + + return endPos + 1, data[startPos : endPos+1], nil + } + case 4: + { + p = (te) - 1 + + return endPos + 1, data[startPos : endPos+1], nil + } + case 8: + { + p = (te) - 1 + + return endPos + 1, data[startPos : endPos+1], nil + } + } + +//line grapheme_clusters.go:4287 + } + } + + _again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +//line NONE:1 + ts = 0 + + case 3: +//line NONE:1 + act = 0 + +//line grapheme_clusters.go:4305 + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + +//line grapheme_clusters.rl:117 + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl similarity index 100% rename from vendor/github.com/apparentlymart/go-textseg/v12/textseg/grapheme_clusters.rl rename to vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl diff --git a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl new file mode 100644 index 0000000000..3cff4291de --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1637 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "ZWJ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA2 0x90..0x91 #Cf [2] ARABIC POUND MARK ABOVE..ARABIC PI... + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x83 0x8D #Cf KAITHI NUMBER SIGN ABOVE + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + | 0xF0 0x91 0xA4 0xBF #Lo DIVES AKURU PREFIXED NASAL SIGN + | 0xF0 0x91 0xA5 0x81 #Lo DIVES AKURU INITIAL RA + | 0xF0 0x91 0xA8 0xBA #Lo ZANABAZAR SQUARE CLUSTER-INITIAL L... + | 0xF0 0x91 0xAA 0x84..0x89 #Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOM... + | 0xF0 0x91 0xB5 0x86 #Lo MASARAM GONDI REPHA + | 0xF0 0x91 0xBC 0x82 #Lo KAWI SIGN REPHA + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xEF 0xBB 0xBF #Cf ZERO WIDTH NO-BREAK SPACE + | 0xEF 0xBF 0xB0..0xB8 #Cn [9] .. + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x93 0x90 0xB0..0xBF #Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JO... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] ..= label - }) - if idx < num && n.edges[idx].label == label { - n.edges[idx].node = node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -func (n *node) delEdge(label byte) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - copy(n.edges[idx:], n.edges[idx+1:]) - n.edges[len(n.edges)-1] = edge{} - n.edges = n.edges[:len(n.edges)-1] - } -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } - - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.updateEdge(search[0], child) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - var parent *node - var label byte - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - parent = n - label = search[0] - n = n.getEdge(label) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should delete this node from the parent - if parent != nil && len(n.edges) == 0 { - parent.delEdge(label) - } - - // Check if we should merge this node - if n != t.root && len(n.edges) == 1 { - n.mergeChild() - } - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - - return leaf.val, true -} - -// DeletePrefix is used to delete the subtree under a prefix -// Returns how many nodes were deleted -// Use this to delete large subtrees efficiently -func (t *Tree) DeletePrefix(s string) int { - return t.deletePrefix(nil, t.root, s) -} - -// delete does a recursive deletion -func (t *Tree) deletePrefix(parent, n *node, prefix string) int { - // Check for key exhaustion - if len(prefix) == 0 { - // Remove the leaf node - subTreeSize := 0 - //recursively walk from all edges of the node to be deleted - recursiveWalk(n, func(s string, v interface{}) bool { - subTreeSize++ - return false - }) - if n.isLeaf() { - n.leaf = nil - } - n.edges = nil // deletes the entire subtree - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - t.size -= subTreeSize - return subTreeSize - } - - // Look for an edge - label := prefix[0] - child := n.getEdge(label) - if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { - return 0 - } - - // Consume the search prefix - if len(child.prefix) > len(prefix) { - prefix = prefix[len(prefix):] - } else { - prefix = prefix[len(child.prefix):] - } - return t.deletePrefix(n, child, prefix) -} - -func (n *node) mergeChild() { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - break - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go index a39f754f06..3f0ed6682a 100644 --- a/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go @@ -2,6 +2,10 @@ package forbidigo // Code generated by github.com/launchdarkly/go-options. DO NOT EDIT. +import "fmt" + +import "github.com/google/go-cmp/cmp" + type ApplyOptionFunc func(c *config) error func (f ApplyOptionFunc) apply(c *config) error { @@ -28,18 +32,98 @@ type Option interface { apply(*config) error } +type optionExcludeGodocExamplesImpl struct { + o bool +} + +func (o optionExcludeGodocExamplesImpl) apply(c *config) error { + c.ExcludeGodocExamples = o.o + return nil +} + +func (o optionExcludeGodocExamplesImpl) Equal(v optionExcludeGodocExamplesImpl) bool { + switch { + case !cmp.Equal(o.o, v.o): + return false + } + return true +} + +func (o optionExcludeGodocExamplesImpl) String() string { + name := "OptionExcludeGodocExamples" + + // hack to avoid go vet error about passing a function to Sprintf + var value interface{} = o.o + return fmt.Sprintf("%s: %+v", name, value) +} + // OptionExcludeGodocExamples don't check inside Godoc examples (see https://blog.golang.org/examples) -func OptionExcludeGodocExamples(o bool) ApplyOptionFunc { - return func(c *config) error { - c.ExcludeGodocExamples = o - return nil +func OptionExcludeGodocExamples(o bool) Option { + return optionExcludeGodocExamplesImpl{ + o: o, + } +} + +type optionIgnorePermitDirectivesImpl struct { + o bool +} + +func (o optionIgnorePermitDirectivesImpl) apply(c *config) error { + c.IgnorePermitDirectives = o.o + return nil +} + +func (o optionIgnorePermitDirectivesImpl) Equal(v optionIgnorePermitDirectivesImpl) bool { + switch { + case !cmp.Equal(o.o, v.o): + return false } + return true +} + +func (o optionIgnorePermitDirectivesImpl) String() string { + name := "OptionIgnorePermitDirectives" + + // hack to avoid go vet error about passing a function to Sprintf + var value interface{} = o.o + return fmt.Sprintf("%s: %+v", name, value) } // OptionIgnorePermitDirectives don't check for `permit` directives(for example, in favor of `nolint`) -func OptionIgnorePermitDirectives(o bool) ApplyOptionFunc { - return func(c *config) error { - c.IgnorePermitDirectives = o - return nil +func OptionIgnorePermitDirectives(o bool) Option { + return optionIgnorePermitDirectivesImpl{ + o: o, + } +} + +type optionAnalyzeTypesImpl struct { + o bool +} + +func (o optionAnalyzeTypesImpl) apply(c *config) error { + c.AnalyzeTypes = o.o + return nil +} + +func (o optionAnalyzeTypesImpl) Equal(v optionAnalyzeTypesImpl) bool { + switch { + case !cmp.Equal(o.o, v.o): + return false + } + return true +} + +func (o optionAnalyzeTypesImpl) String() string { + name := "OptionAnalyzeTypes" + + // hack to avoid go vet error about passing a function to Sprintf + var value interface{} = o.o + return fmt.Sprintf("%s: %+v", name, value) +} + +// OptionAnalyzeTypes enable to match canonical names for types and interfaces using type info +func OptionAnalyzeTypes(o bool) Option { + return optionAnalyzeTypesImpl{ + o: o, } } diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go index 17740faa7c..a7a3ab591e 100644 --- a/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go @@ -1,4 +1,4 @@ -// forbidigo provides a linter for forbidding the use of specific identifiers +// Package forbidigo provides a linter for forbidding the use of specific identifiers package forbidigo import ( @@ -7,15 +7,15 @@ import ( "go/ast" "go/printer" "go/token" + "go/types" "log" "regexp" "strings" - - "github.com/pkg/errors" ) type Issue interface { Details() string + Pos() token.Pos Position() token.Position String() string } @@ -23,6 +23,7 @@ type Issue interface { type UsedIssue struct { identifier string pattern string + pos token.Pos position token.Position customMsg string } @@ -39,6 +40,10 @@ func (a UsedIssue) Position() token.Position { return a.position } +func (a UsedIssue) Pos() token.Pos { + return a.pos +} + func (a UsedIssue) String() string { return toString(a) } func toString(i UsedIssue) string { @@ -59,12 +64,13 @@ type config struct { // don't check inside Godoc examples (see https://blog.golang.org/examples) ExcludeGodocExamples bool `options:",true"` IgnorePermitDirectives bool // don't check for `permit` directives(for example, in favor of `nolint`) + AnalyzeTypes bool // enable to match canonical names for types and interfaces using type info } func NewLinter(patterns []string, options ...Option) (*Linter, error) { cfg, err := newConfig(options...) if err != nil { - return nil, errors.Wrapf(err, "failed to process options") + return nil, fmt.Errorf("failed to process options: %w", err) } if len(patterns) == 0 { @@ -91,19 +97,43 @@ type visitor struct { linter *Linter comments []*ast.CommentGroup - fset *token.FileSet - issues []Issue + runConfig RunConfig + issues []Issue } +// Deprecated: Run was the original entrypoint before RunWithConfig was introduced to support +// additional match patterns that need additional information. func (l *Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { - var issues []Issue //nolint:prealloc // we don't know how many there will be + return l.RunWithConfig(RunConfig{Fset: fset}, nodes...) +} + +// RunConfig provides information that the linter needs for different kinds +// of match patterns. Ideally, all fields should get set. More fields may get +// added in the future as needed. +type RunConfig struct { + // FSet is required. + Fset *token.FileSet + + // TypesInfo is needed for expanding source code expressions. + // Nil disables that step, i.e. patterns match the literal source code. + TypesInfo *types.Info + + // DebugLog is used to print debug messages. May be nil. + DebugLog func(format string, args ...interface{}) +} + +func (l *Linter) RunWithConfig(config RunConfig, nodes ...ast.Node) ([]Issue, error) { + if config.DebugLog == nil { + config.DebugLog = func(format string, args ...interface{}) {} + } + var issues []Issue for _, node := range nodes { var comments []*ast.CommentGroup isTestFile := false isWholeFileExample := false if file, ok := node.(*ast.File); ok { comments = file.Comments - fileName := fset.Position(file.Pos()).Filename + fileName := config.Fset.Position(file.Pos()).Filename isTestFile = strings.HasSuffix(fileName, "_test.go") // From https://blog.golang.org/examples, a "whole file example" is: @@ -139,7 +169,7 @@ func (l *Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { cfg: l.cfg, isTestFile: isTestFile, linter: l, - fset: fset, + runConfig: config, comments: comments, } ast.Walk(&visitor, node) @@ -156,41 +186,210 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { if isGodocExample && v.cfg.ExcludeGodocExamples { return nil } - return v + ast.Walk(v, node.Type) + if node.Body != nil { + ast.Walk(v, node.Body) + } + return nil + // Ignore constant and type names + case *ast.ValueSpec: + // Look at only type and values for const and variable specs, and not names + if node.Type != nil { + ast.Walk(v, node.Type) + } + if node.Values != nil { + for _, x := range node.Values { + ast.Walk(v, x) + } + } + return nil + // Ignore import alias names + case *ast.ImportSpec: + return nil + // Ignore type names + case *ast.TypeSpec: + // Look at only type parameters for type spec + if node.TypeParams != nil { + ast.Walk(v, node.TypeParams) + } + ast.Walk(v, node.Type) + return nil + // Ignore field names + case *ast.Field: + if node.Type != nil { + ast.Walk(v, node.Type) + } + return nil + // The following two are handled below. case *ast.SelectorExpr: case *ast.Ident: + // Everything else isn't. default: return v } + + // The text as it appears in the source is always used because issues + // use that. It's used for matching unless usage of type information + // is enabled. + srcText := v.textFor(node) + matchTexts, pkgText := v.expandMatchText(node, srcText) + v.runConfig.DebugLog("%s: match %v, package %q", v.runConfig.Fset.Position(node.Pos()), matchTexts, pkgText) for _, p := range v.linter.patterns { - if p.pattern.MatchString(v.textFor(node)) && !v.permit(node) { + if p.matches(matchTexts) && + (p.Package == "" || p.pkgRe.MatchString(pkgText)) && + !v.permit(node) { v.issues = append(v.issues, UsedIssue{ - identifier: v.textFor(node), - pattern: p.pattern.String(), - position: v.fset.Position(node.Pos()), - customMsg: p.msg, + identifier: srcText, // Always report the expression as it appears in the source code. + pattern: p.re.String(), + pos: node.Pos(), + position: v.runConfig.Fset.Position(node.Pos()), + customMsg: p.Msg, }) } } + + // descend into the left-side of selectors + if selector, isSelector := node.(*ast.SelectorExpr); isSelector { + if _, leftSideIsIdentifier := selector.X.(*ast.Ident); !leftSideIsIdentifier { + return v + } + } + return nil } +// textFor returns the expression as it appears in the source code (for +// example, .). func (v *visitor) textFor(node ast.Node) string { buf := new(bytes.Buffer) - if err := printer.Fprint(buf, v.fset, node); err != nil { - log.Fatalf("ERROR: unable to print node at %s: %s", v.fset.Position(node.Pos()), err) + if err := printer.Fprint(buf, v.runConfig.Fset, node); err != nil { + log.Fatalf("ERROR: unable to print node at %s: %s", v.runConfig.Fset.Position(node.Pos()), err) } return buf.String() } +// expandMatchText expands the selector in a selector expression to the full package +// name and (for variables) the type: +// +// - example.com/some/pkg.Function +// - example.com/some/pkg.CustomType.Method +// +// It updates the text to match against and fills the package string if possible, +// otherwise it just returns. +func (v *visitor) expandMatchText(node ast.Node, srcText string) (matchTexts []string, pkgText string) { + // The text to match against is the literal source code if we cannot + // come up with something different. + matchTexts = []string{srcText} + + if !v.cfg.AnalyzeTypes || v.runConfig.TypesInfo == nil { + return matchTexts, pkgText + } + + location := v.runConfig.Fset.Position(node.Pos()) + + switch node := node.(type) { + case *ast.Ident: + if object, ok := v.runConfig.TypesInfo.Uses[node]; !ok { + // No information about the identifier. Should + // not happen, but perhaps there were compile + // errors? + v.runConfig.DebugLog("%s: unknown identifier %q", location, srcText) + } else if pkg := object.Pkg(); pkg != nil { + pkgText = pkg.Path() + // if this is a method, don't include the package name + isMethod := false + if signature, ok := object.Type().(*types.Signature); ok && signature.Recv() != nil { + isMethod = true + } + v.runConfig.DebugLog("%s: identifier: %q -> %q in package %q", location, srcText, matchTexts, pkgText) + // match either with or without package name + if !isMethod { + matchTexts = []string{pkg.Name() + "." + srcText, srcText} + } + } else { + v.runConfig.DebugLog("%s: identifier: %q -> %q without package", location, srcText, matchTexts) + } + case *ast.SelectorExpr: + selector := node.X + field := node.Sel.Name + + // If we are lucky, the entire selector expression has a known + // type. We don't care about the value. + selectorText := v.textFor(node) + if typeAndValue, ok := v.runConfig.TypesInfo.Types[selector]; ok { + m, p, ok := typeNameWithPackage(typeAndValue.Type) + if !ok { + v.runConfig.DebugLog("%s: selector %q with supported type %T", location, selectorText, typeAndValue.Type) + } + matchTexts = []string{m + "." + field} + pkgText = p + v.runConfig.DebugLog("%s: selector %q with supported type %q: %q -> %q, package %q", location, selectorText, typeAndValue.Type.String(), srcText, matchTexts, pkgText) + } + // Some expressions need special treatment. + switch selector := selector.(type) { + case *ast.Ident: + if object, hasUses := v.runConfig.TypesInfo.Uses[selector]; hasUses { + switch object := object.(type) { + case *types.PkgName: + pkgText = object.Imported().Path() + matchTexts = []string{object.Imported().Name() + "." + field} + v.runConfig.DebugLog("%s: selector %q is package: %q -> %q, package %q", location, selectorText, srcText, matchTexts, pkgText) + case *types.Var: + if typeName, packageName, ok := typeNameWithPackage(object.Type()); ok { + matchTexts = []string{typeName + "." + field} + pkgText = packageName + v.runConfig.DebugLog("%s: selector %q is variable of type %q: %q -> %q, package %q", location, selectorText, object.Type().String(), srcText, matchTexts, pkgText) + } else { + v.runConfig.DebugLog("%s: selector %q is variable with unsupported type %T", location, selectorText, object.Type()) + } + default: + // Something else? + v.runConfig.DebugLog("%s: selector %q is identifier with unsupported type %T", location, selectorText, object) + } + } else { + // No information about the identifier. Should + // not happen, but perhaps there were compile + // errors? + v.runConfig.DebugLog("%s: unknown selector identifier %q", location, selectorText) + } + default: + v.runConfig.DebugLog("%s: selector %q of unsupported type %T", location, selectorText, selector) + } + default: + v.runConfig.DebugLog("%s: unsupported type %T", location, node) + } + return matchTexts, pkgText +} + +// typeNameWithPackage tries to determine `.` and the full +// package path. This only needs to work for types of a selector in a selector +// expression. +func typeNameWithPackage(t types.Type) (typeName, packagePath string, ok bool) { + if ptr, ok := t.(*types.Pointer); ok { + t = ptr.Elem() + } + + switch t := t.(type) { + case *types.Named: + obj := t.Obj() + pkg := obj.Pkg() + if pkg == nil { + return "", "", false + } + return pkg.Name() + "." + obj.Name(), pkg.Path(), true + default: + return "", "", false + } +} + func (v *visitor) permit(node ast.Node) bool { if v.cfg.IgnorePermitDirectives { return false } - nodePos := v.fset.Position(node.Pos()) - var nolint = regexp.MustCompile(fmt.Sprintf(`^//\s?permit:%s\b`, regexp.QuoteMeta(v.textFor(node)))) + nodePos := v.runConfig.Fset.Position(node.Pos()) + nolint := regexp.MustCompile(fmt.Sprintf(`^//\s?permit:%s\b`, regexp.QuoteMeta(v.textFor(node)))) for _, c := range v.comments { - commentPos := v.fset.Position(c.Pos()) + commentPos := v.runConfig.Fset.Position(c.Pos()) if commentPos.Line == nodePos.Line && len(c.List) > 0 && nolint.MatchString(c.List[0].Text) { return true } diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go index c236488225..2692dcd24f 100644 --- a/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go @@ -5,39 +5,123 @@ import ( "regexp" "regexp/syntax" "strings" + + "gopkg.in/yaml.v2" ) +// pattern matches code that is not supposed to be used. type pattern struct { - pattern *regexp.Regexp - msg string + re, pkgRe *regexp.Regexp + + // Pattern is the regular expression string that is used for matching. + // It gets matched against the literal source code text or the expanded + // text, depending on the mode in which the analyzer runs. + Pattern string `yaml:"p"` + + // Package is a regular expression for the full package path of + // an imported item. Ignored unless the analyzer is configured to + // determine that information. + Package string `yaml:"pkg,omitempty"` + + // Msg gets printed in addition to the normal message if a match is + // found. + Msg string `yaml:"msg,omitempty"` +} + +// A yamlPattern pattern in a YAML string may be represented either by a string +// (the traditional regular expression syntax) or a struct (for more complex +// patterns). +type yamlPattern pattern + +func (p *yamlPattern) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Try struct first. It's unlikely that a regular expression string + // is valid YAML for a struct. + var ptrn pattern + if err := unmarshal(&ptrn); err != nil { + errStr := err.Error() + // Didn't work, try plain string. + var ptrn string + if err := unmarshal(&ptrn); err != nil { + return fmt.Errorf("pattern is neither a regular expression string (%s) nor a Pattern struct (%s)", err.Error(), errStr) + } + p.Pattern = ptrn + } else { + *p = yamlPattern(ptrn) + } + return ((*pattern)(p)).validate() } +var _ yaml.Unmarshaler = &yamlPattern{} + +// parse accepts a regular expression or, if the string starts with { or contains a line break, a +// JSON or YAML representation of a Pattern. func parse(ptrn string) (*pattern, error) { - ptrnRe, err := regexp.Compile(ptrn) + pattern := &pattern{} + + if strings.HasPrefix(strings.TrimSpace(ptrn), "{") || + strings.Contains(ptrn, "\n") { + // Embedded JSON or YAML. We can decode both with the YAML decoder. + if err := yaml.UnmarshalStrict([]byte(ptrn), pattern); err != nil { + return nil, fmt.Errorf("parsing as JSON or YAML failed: %v", err) + } + } else { + pattern.Pattern = ptrn + } + + if err := pattern.validate(); err != nil { + return nil, err + } + return pattern, nil +} + +func (p *pattern) validate() error { + ptrnRe, err := regexp.Compile(p.Pattern) if err != nil { - return nil, fmt.Errorf("unable to compile pattern `%s`: %s", ptrn, err) + return fmt.Errorf("unable to compile source code pattern `%s`: %s", p.Pattern, err) } - re, err := syntax.Parse(ptrn, syntax.Perl) + re, err := syntax.Parse(p.Pattern, syntax.Perl) if err != nil { - return nil, fmt.Errorf("unable to parse pattern `%s`: %s", ptrn, err) + return fmt.Errorf("unable to parse source code pattern `%s`: %s", p.Pattern, err) } msg := extractComment(re) - return &pattern{pattern: ptrnRe, msg: msg}, nil + if msg != "" { + p.Msg = msg + } + p.re = ptrnRe + + if p.Package != "" { + pkgRe, err := regexp.Compile(p.Package) + if err != nil { + return fmt.Errorf("unable to compile package pattern `%s`: %s", p.Package, err) + } + p.pkgRe = pkgRe + } + + return nil +} + +func (p *pattern) matches(matchTexts []string) bool { + for _, text := range matchTexts { + if p.re.MatchString(text) { + return true + } + } + return false } // Traverse the leaf submatches in the regex tree and extract a comment, if any // is present. func extractComment(re *syntax.Regexp) string { for _, sub := range re.Sub { + subStr := sub.String() + if strings.HasPrefix(subStr, "#") { + return strings.TrimSpace(strings.TrimPrefix(sub.String(), "#")) + } if len(sub.Sub) > 0 { if comment := extractComment(sub); comment != "" { return comment } } - subStr := sub.String() - if strings.HasPrefix(subStr, "#") { - return strings.TrimSpace(strings.TrimPrefix(subStr, "#")) - } } return "" } diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129ecc4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go deleted file mode 100644 index 1c49674290..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package arn provides a parser for interacting with Amazon Resource Names. -package arn - -import ( - "errors" - "strings" -) - -const ( - arnDelimiter = ":" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 - - // errors - invalidPrefix = "arn: invalid prefix" - invalidSections = "arn: not enough sections" -) - -// ARN captures the individual fields of an Amazon Resource Name. -// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. -type ARN struct { - // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in - // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China - // (Beijing) region is "aws-cn". - Partition string - - // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of - // namespaces, see - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. - Service string - - // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this - // component might be omitted. - Region string - - // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the - // ARNs for some resources don't require an account number, so this component might be omitted. - AccountID string - - // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — - // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the - // resource name itself. Some services allows paths for resource names, as described in - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. - Resource string -} - -// Parse parses an ARN into its constituent parts. -// -// Some example ARNs: -// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment -// arn:aws:iam::123456789012:user/David -// arn:aws:rds:eu-west-1:123456789012:db:mysql-db -// arn:aws:s3:::my_corporate_bucket/exampleobject.png -func Parse(arn string) (ARN, error) { - if !strings.HasPrefix(arn, arnPrefix) { - return ARN{}, errors.New(invalidPrefix) - } - sections := strings.SplitN(arn, arnDelimiter, arnSections) - if len(sections) != arnSections { - return ARN{}, errors.New(invalidSections) - } - return ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountID: sections[sectionAccountID], - Resource: sections[sectionResource], - }, nil -} - -// IsARN returns whether the given string is an ARN by looking for -// whether the string starts with "arn:" and contains the correct number -// of sections delimited by colons(:). -func IsARN(arn string) bool { - return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 -} - -// String returns the canonical representation of the ARN -func (arn ARN) String() string { - return arnPrefix + - arn.Partition + arnDelimiter + - arn.Service + arnDelimiter + - arn.Region + arnDelimiter + - arn.AccountID + arnDelimiter + - arn.Resource -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 99849c0e19..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 9cf7eaf400..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,221 +0,0 @@ -package awserr - -import ( - "encoding/hex" - "fmt" -) - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string - bytes []byte -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += e[i].Error() - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106d5c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 142a7a01c5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type they are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index a4eb6a7f43..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,221 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - rvals := rValuesAtPath(i, path, true, false, v == nil) - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 11d4240d61..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,123 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - ft, ok := v.Type().FieldByName(n) - if !ok { - panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - prettify(val, indent+2, buf) - } - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index 3f7cffd957..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,90 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -// -// Deprecated: Use Prettify instead. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index b147f103ce..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,94 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string - ResolvedRegion string - - // States that the signing name did not come from a modeled source but - // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overridden based on metadata the - // service has. - SigningNameDerived bool -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = DefaultRetryerMaxNumRetries - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) - c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 9f6af19dd4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,177 +0,0 @@ -package client - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, you can implement the -// request.Retryer interface. -// -type DefaultRetryer struct { - // Num max Retries is the number of max retries that will be performed. - // By default, this is zero. - NumMaxRetries int - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - // If not set, the value is 0ns. - MinRetryDelay time.Duration - - // MinThrottleRetryDelay is the minimum retry delay when throttled. - // If not set, the value is 0ns. - MinThrottleDelay time.Duration - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - // If not set, the value is 0ns. - MaxRetryDelay time.Duration - - // MaxThrottleDelay is the maximum retry delay when throttled. - // If not set, the value is 0ns. - MaxThrottleDelay time.Duration -} - -const ( - // DefaultRetryerMaxNumRetries sets maximum number of retries - DefaultRetryerMaxNumRetries = 3 - - // DefaultRetryerMinRetryDelay sets minimum retry delay - DefaultRetryerMinRetryDelay = 30 * time.Millisecond - - // DefaultRetryerMinThrottleDelay sets minimum delay when throttled - DefaultRetryerMinThrottleDelay = 500 * time.Millisecond - - // DefaultRetryerMaxRetryDelay sets maximum retry delay - DefaultRetryerMaxRetryDelay = 300 * time.Second - - // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled - DefaultRetryerMaxThrottleDelay = 300 * time.Second -) - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -// setRetryerDefaults sets the default values of the retryer if not set -func (d *DefaultRetryer) setRetryerDefaults() { - if d.MinRetryDelay == 0 { - d.MinRetryDelay = DefaultRetryerMinRetryDelay - } - if d.MaxRetryDelay == 0 { - d.MaxRetryDelay = DefaultRetryerMaxRetryDelay - } - if d.MinThrottleDelay == 0 { - d.MinThrottleDelay = DefaultRetryerMinThrottleDelay - } - if d.MaxThrottleDelay == 0 { - d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay - } -} - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - - // if number of max retries is zero, no retries will be performed. - if d.NumMaxRetries == 0 { - return 0 - } - - // Sets default value for retryer members - d.setRetryerDefaults() - - // minDelay is the minimum retryer delay - minDelay := d.MinRetryDelay - - var initialDelay time.Duration - - isThrottle := r.IsErrorThrottle() - if isThrottle { - if delay, ok := getRetryAfterDelay(r); ok { - initialDelay = delay - } - minDelay = d.MinThrottleDelay - } - - retryCount := r.RetryCount - - // maxDelay the maximum retryer delay - maxDelay := d.MaxRetryDelay - - if isThrottle { - maxDelay = d.MaxThrottleDelay - } - - var delay time.Duration - - // Logic to cap the retry count based on the minDelay provided - actualRetryCount := int(math.Log2(float64(minDelay))) + 1 - if actualRetryCount < 63-retryCount { - delay = time.Duration(1< maxDelay { - delay = getJitterDelay(maxDelay / 2) - } - } else { - delay = getJitterDelay(maxDelay / 2) - } - return delay + initialDelay -} - -// getJitterDelay returns a jittered delay for retry -func getJitterDelay(duration time.Duration) time.Duration { - return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - - // ShouldRetry returns false if number of max retries is 0. - if d.NumMaxRetries == 0 { - return false - } - - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - return r.IsErrorRetryable() || r.IsErrorThrottle() -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index 5ac5c24a1b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent -// to a service. Will include the HTTP request body if the LogLevel of the -// request matches LogDebugWithHTTPBody. -var LogHTTPRequestHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequest", - Fn: logRequest, -} - -func logRequest(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - bodySeekable := aws.IsReaderSeekable(r.Body) - - b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - if !bodySeekable { - r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) - } - // Reset the request body because dumpRequest will re-wrap the - // r.HTTPRequest's Body as a NoOpCloser and will not be reset after - // read by the HTTP client reader. - if err := r.Error; err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent -// to a service. Will only log the HTTP request's headers. The request payload -// will not be read. -var LogHTTPRequestHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequestHeader", - Fn: logRequestHeader, -} - -func logRequestHeader(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - b, err := httputil.DumpRequestOut(r.HTTPRequest, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -// LogHTTPResponseHandler is a SDK request handler to log the HTTP response -// received from a service. Will include the HTTP response body if the LogLevel -// of the request matches LogDebugWithHTTPBody. -var LogHTTPResponseHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponse", - Fn: logResponse, -} - -func logResponse(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - - if r.HTTPResponse == nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - if logBody { - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - } - - handlerFn := func(req *request.Request) { - b, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(fmt.Sprintf(logRespMsg, - req.ClientInfo.ServiceName, req.Operation.Name, string(b))) - - if logBody { - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} - -// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP -// response received from a service. Will only log the HTTP response's headers. -// The response payload will not be read. -var LogHTTPResponseHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponseHeader", - Fn: logResponseHeader, -} - -func logResponseHeader(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - b, err := httputil.DumpResponse(r.HTTPResponse, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index a7530ebb38..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,15 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string - ResolvedRegion string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go deleted file mode 100644 index 881d575f01..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// NoOpRetryer provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type NoOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d NoOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index 4818ea427e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,631 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig structure. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `nil` or the value to `""` to use the default generated endpoint. - // - // Note: You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS - // Regions and Endpoints. - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // Note: This configuration option is specific to the Amazon S3 service. - // - // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // for Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disable 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // S3DisableContentMD5Validation config option is temporarily disabled, - // For S3 GetObject API calls, #1837. - // - // Set this to `true` to disable the S3 service client from automatically - // adding the ContentMD5 to S3 Object Put and Upload API calls. This option - // will also disable the SDK from performing object ContentMD5 validation - // on GetObject API calls. - S3DisableContentMD5Validation *bool - - // Set this to `true` to have the S3 service client to use the region specified - // in the ARN, when an ARN is provided as an argument to a bucket parameter. - S3UseARNRegion *bool - - // Set this to `true` to enable the SDK to unmarshal API response header maps to - // normalized lower case map keys. - // - // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case - // Metadata member's map keys. The value of the header in the map is unaffected. - // - // The AWS SDK for Go v2, uses lower case header maps by default. The v1 - // SDK provides this opt-in for this option, for backwards compatibility. - LowerCaseHeaderMaps *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDisableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpoint to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requests. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - // - // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. - // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients - // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher - // precedence then this option. - UseDualStack *bool - - // Sets the resolver to resolve a dual-stack endpoint for the service. - UseDualStackEndpoint endpoints.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint endpoints.FIPSEndpointState - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool - - // EnableEndpointDiscovery will allow for endpoint discovery on operations that - // have the definition in its model. By default, endpoint discovery is off. - // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // EnableEndpointDiscovery: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("/foo/bar/moo"), - // }) - EnableEndpointDiscovery *bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix *bool - - // STSRegionalEndpoint will enable regional or legacy endpoint resolving - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c - -} - -// WithS3DisableContentMD5Validation sets a config -// S3DisableContentMD5Validation value returning a Config pointer for chaining. -func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { - c.S3DisableContentMD5Validation = &enable - return c - -} - -// WithS3UseARNRegion sets a config S3UseARNRegion value and -// returning a Config pointer for chaining -func (c *Config) WithS3UseARNRegion(enable bool) *Config { - c.S3UseARNRegion = &enable - return c -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// WithEndpointDiscovery will set whether or not to use endpoint discovery. -func (c *Config) WithEndpointDiscovery(t bool) *Config { - c.EnableEndpointDiscovery = &t - return c -} - -// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix -// when making requests. -func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { - c.DisableEndpointHostPrefix = &t - return c -} - -// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { - c.STSRegionalEndpoint = sre - return c -} - -// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { - c.S3UsEast1RegionalEndpoint = sre - return c -} - -// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value -// returning a Config pointer for chaining. -func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { - c.LowerCaseHeaderMaps = &t - return c -} - -// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value -// returning a Config pointer for chaining. -func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { - c.DisableRestProtocolURICleaning = &t - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.S3DisableContentMD5Validation != nil { - dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation - } - - if other.S3UseARNRegion != nil { - dst.S3UseARNRegion = other.S3UseARNRegion - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { - dst.UseDualStackEndpoint = other.UseDualStackEndpoint - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } - - if other.EnableEndpointDiscovery != nil { - dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery - } - - if other.DisableEndpointHostPrefix != nil { - dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix - } - - if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { - dst.STSRegionalEndpoint = other.STSRegionalEndpoint - } - - if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { - dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint - } - - if other.LowerCaseHeaderMaps != nil { - dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps - } - - if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { - dst.UseDualStackEndpoint = other.UseDualStackEndpoint - } - - if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { - dst.UseFIPSEndpoint = other.UseFIPSEndpoint - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go deleted file mode 100644 index 89aad2c677..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !go1.9 -// +build !go1.9 - -package aws - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go deleted file mode 100644 index 6ee9ddd18b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.9 -// +build go1.9 - -package aws - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go deleted file mode 100644 index 3132181904..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package aws - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go deleted file mode 100644 index 9975d561bb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package aws - -import "context" - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go deleted file mode 100644 index 304fd15612..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "time" -) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 4e076c1837..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,918 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pointer to the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int8 returns a pointer to the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Value returns the value of the int8 pointer passed in or -// 0 if the pointer is nil. -func Int8Value(v *int8) int8 { - if v != nil { - return *v - } - return 0 -} - -// Int8Slice converts a slice of int8 values into a slice of -// int8 pointers -func Int8Slice(src []int8) []*int8 { - dst := make([]*int8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int8ValueSlice converts a slice of int8 pointers into a slice of -// int8 values -func Int8ValueSlice(src []*int8) []int8 { - dst := make([]int8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int8Map converts a string map of int8 values into a string -// map of int8 pointers -func Int8Map(src map[string]int8) map[string]*int8 { - dst := make(map[string]*int8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int8ValueMap converts a string map of int8 pointers into a string -// map of int8 values -func Int8ValueMap(src map[string]*int8) map[string]int8 { - dst := make(map[string]int8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int16 returns a pointer to the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Value returns the value of the int16 pointer passed in or -// 0 if the pointer is nil. -func Int16Value(v *int16) int16 { - if v != nil { - return *v - } - return 0 -} - -// Int16Slice converts a slice of int16 values into a slice of -// int16 pointers -func Int16Slice(src []int16) []*int16 { - dst := make([]*int16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int16ValueSlice converts a slice of int16 pointers into a slice of -// int16 values -func Int16ValueSlice(src []*int16) []int16 { - dst := make([]int16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int16Map converts a string map of int16 values into a string -// map of int16 pointers -func Int16Map(src map[string]int16) map[string]*int16 { - dst := make(map[string]*int16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int16ValueMap converts a string map of int16 pointers into a string -// map of int16 values -func Int16ValueMap(src map[string]*int16) map[string]int16 { - dst := make(map[string]int16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint8 returns a pointer to the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Value returns the value of the uint8 pointer passed in or -// 0 if the pointer is nil. -func Uint8Value(v *uint8) uint8 { - if v != nil { - return *v - } - return 0 -} - -// Uint8Slice converts a slice of uint8 values into a slice of -// uint8 pointers -func Uint8Slice(src []uint8) []*uint8 { - dst := make([]*uint8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint8ValueSlice converts a slice of uint8 pointers into a slice of -// uint8 values -func Uint8ValueSlice(src []*uint8) []uint8 { - dst := make([]uint8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint8Map converts a string map of uint8 values into a string -// map of uint8 pointers -func Uint8Map(src map[string]uint8) map[string]*uint8 { - dst := make(map[string]*uint8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint8ValueMap converts a string map of uint8 pointers into a string -// map of uint8 values -func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { - dst := make(map[string]uint8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index 36a915efea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,232 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - if r.Body != nil { - var err error - length, err = aws.SeekerLen(r.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) - return - } - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 5 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(5 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all request errors, and let the default retrier determine - // if the error is retryable. - r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", r.Error) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{ - Name: "core.AfterRetryHandler", - Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } - }} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - // Was any endpoint provided by the user, or one was derived by the - // SDK's endpoint resolver? - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b1557c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go deleted file mode 100644 index ab69c7a6f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package corehandlers - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec-env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = request.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *request.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - request.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 3ad1e798df..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true. - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go deleted file mode 100644 index 6e3406b1f7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package credentials - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go deleted file mode 100644 index a68df0ee73..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package credentials - -import "context" - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go deleted file mode 100644 index 0345fab2d9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !go1.9 -// +build !go1.9 - -package credentials - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go deleted file mode 100644 index 79018aba73..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build go1.9 -// +build go1.9 - -package credentials - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index a880a3de8f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,383 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "fmt" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/sync/singleflight" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// ProviderWithContext is a Provider that can retrieve credentials with a Context -type ProviderWithContext interface { - Provider - - RetrieveWithContext(Context) (Value, error) -} - -// An Expirer is an interface that Providers can implement to expose the expiration -// time, if known. If the Provider cannot accurately provide this info, -// it should not implement this interface. -type Expirer interface { - // The time at which the credentials are no longer valid - ExpiresAt() time.Time -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - // Passed in expirations should have the monotonic clock values stripped. - // This ensures time comparisons will be based on wall-time. - e.expiration = expiration.Round(0) - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - curTime := e.CurrentTime - if curTime == nil { - curTime = time.Now - } - return e.expiration.Before(curTime()) -} - -// ExpiresAt returns the expiration time of the credential -func (e *Expiry) ExpiresAt() time.Time { - return e.expiration -} - -// A Credentials provides concurrency safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - sf singleflight.Group - - m sync.RWMutex - creds Value - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - c := &Credentials{ - provider: provider, - } - return c -} - -// GetWithContext returns the credentials value, or error if the credentials -// Value failed to be retrieved. Will return early if the passed in context is -// canceled. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -// -// Passed in Context is equivalent to aws.Context, and context.Context. -func (c *Credentials) GetWithContext(ctx Context) (Value, error) { - // Check if credentials are cached, and not expired. - select { - case curCreds, ok := <-c.asyncIsExpired(): - // ok will only be true, of the credentials were not expired. ok will - // be false and have no value if the credentials are expired. - if ok { - return curCreds, nil - } - case <-ctx.Done(): - return Value{}, awserr.New("RequestCanceled", - "request context canceled", ctx.Err()) - } - - // Cannot pass context down to the actual retrieve, because the first - // context would cancel the whole group when there is not direct - // association of items in the group. - resCh := c.sf.DoChan("", func() (interface{}, error) { - return c.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Value), res.Err - case <-ctx.Done(): - return Value{}, awserr.New("RequestCanceled", - "request context canceled", ctx.Err()) - } -} - -func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { - c.m.Lock() - defer c.m.Unlock() - - if curCreds := c.creds; !c.isExpiredLocked(curCreds) { - return curCreds, nil - } - - var creds Value - var err error - if p, ok := c.provider.(ProviderWithContext); ok { - creds, err = p.RetrieveWithContext(ctx) - } else { - creds, err = c.provider.Retrieve() - } - if err == nil { - c.creds = creds - } - - return creds, err -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - return c.GetWithContext(backgroundContext()) -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.creds = Value{} -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpiredLocked(c.creds) -} - -// asyncIsExpired returns a channel of credentials Value. If the channel is -// closed the credentials are expired and credentials value are not empty. -func (c *Credentials) asyncIsExpired() <-chan Value { - ch := make(chan Value, 1) - go func() { - c.m.RLock() - defer c.m.RUnlock() - - if curCreds := c.creds; !c.isExpiredLocked(curCreds) { - ch <- curCreds - } - - close(ch) - }() - - return ch -} - -// isExpiredLocked helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpiredLocked(creds interface{}) bool { - return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() -} - -// ExpiresAt provides access to the functionality of the Expirer interface of -// the underlying Provider, if it supports that interface. Otherwise, it returns -// an error. -func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - - expirer, ok := c.provider.(Expirer) - if !ok { - return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", - c.creds.ProviderName), - nil) - } - if c.creds == (Value{}) { - // set expiration time to the distant past - return time.Time{}, nil - } - return expirer.ExpiresAt(), nil -} - -type suppressedContext struct { - Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index 92af5b7250..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,188 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - return m.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - credsList, err := requestCredList(ctx, m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New(request.ErrCodeSerialization, - "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New(request.ErrCodeSerialization, - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go deleted file mode 100644 index 785f30d8e6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the credentials.Provider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - staticCreds bool - credentials.Expiry - - // Requires a AWS Client to make HTTP requests to the endpoint with. - // the Endpoint the request will be made to is provided by the aws.Config's - // Endpoint value. - Client *client.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - AuthorizationToken string -} - -// NewProviderClient returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { - p := &Provider{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: "CredentialsEndpoint", - Endpoint: endpoint, - }, - handlers, - ), - } - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - for _, option := range options { - option(p) - } - - return p -} - -// NewCredentialsClient returns a pointer to a new Credentials object -// wrapping the endpoint credentials Provider. -func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { - return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *Provider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } else { - p.staticCreds = true - } - - return credentials.Value{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - ProviderName: ProviderName, - }, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { - op := &request.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.SetContext(ctx) - req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { - req.HTTPRequest.Header.Set("Authorization", authToken) - } - - return out, req.Send() -} - -func validateEndpointHandler(r *request.Request) { - if len(r.ClientInfo.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 54c5cf7333..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,74 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d20..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go deleted file mode 100644 index e624836002..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Package processcreds is a credential Provider to retrieve `credential_process` -credentials. - -WARNING: The following describes a method of sourcing credentials from an external -process. This can potentially be dangerous, so proceed with caution. Other -credential providers should be preferred if at all possible. If using this -option, you should make sure that the config file is as locked down as possible -using security best practices for your operating system. - -You can use credentials from a `credential_process` in a variety of ways. - -One way is to setup your shared config file, located in the default -location, with the `credential_process` key and the command you want to be -called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. - - [default] - credential_process = /command/to/call - -Creating a new session will use the credential process to retrieve credentials. -NOTE: If there are credentials in the profile you are using, the credential -process will not be used. - - // Initialize a session to load credentials. - sess, _ := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1")}, - ) - - // Create S3 service client to use the credentials. - svc := s3.New(sess) - -Another way to use the `credential_process` method is by using -`credentials.NewCredentials()` and providing a command to be executed to -retrieve credentials: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentials("/path/to/command") - - // Create service client value configured for credentials. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -You can set a non-default timeout for the `credential_process` with another -constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To -set a one minute timeout: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentialsTimeout( - "/path/to/command", - time.Duration(500) * time.Millisecond) - -If you need more control, you can set any configurable options in the -credentials using one or more option functions. For example, you can set a two -minute timeout, a credential duration of 60 minutes, and a maximum stdout -buffer size of 2k. - - creds := processcreds.NewCredentials( - "/path/to/command", - func(opt *ProcessProvider) { - opt.Timeout = time.Duration(2) * time.Minute - opt.Duration = time.Duration(60) * time.Minute - opt.MaxBufSize = 2048 - }) - -You can also use your own `exec.Cmd`: - - // Create an exec.Cmd - myCommand := exec.Command("/path/to/command") - - // Create credentials using your exec.Cmd and custom timeout - creds := processcreds.NewCredentialsCommand( - myCommand, - func(opt *processcreds.ProcessProvider) { - opt.Timeout = time.Duration(1) * time.Second - }) -*/ -package processcreds - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // ErrCodeProcessProviderParse error parsing process output - ErrCodeProcessProviderParse = "ProcessProviderParseError" - - // ErrCodeProcessProviderVersion version error in output - ErrCodeProcessProviderVersion = "ProcessProviderVersionError" - - // ErrCodeProcessProviderRequired required attribute missing in output - ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" - - // ErrCodeProcessProviderExecution execution of command failed - ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" - - // errMsgProcessProviderTimeout process took longer than allowed - errMsgProcessProviderTimeout = "credential process timed out" - - // errMsgProcessProviderProcess process error - errMsgProcessProviderProcess = "error in credential_process" - - // errMsgProcessProviderParse problem parsing output - errMsgProcessProviderParse = "parse failed of credential_process output" - - // errMsgProcessProviderVersion version error in output - errMsgProcessProviderVersion = "wrong version in process output (not 1)" - - // errMsgProcessProviderMissKey missing access key id in output - errMsgProcessProviderMissKey = "missing AccessKeyId in process output" - - // errMsgProcessProviderMissSecret missing secret acess key in output - errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" - - // errMsgProcessProviderPrepareCmd prepare of command failed - errMsgProcessProviderPrepareCmd = "failed to prepare command" - - // errMsgProcessProviderEmptyCmd command must not be empty - errMsgProcessProviderEmptyCmd = "command must not be empty" - - // errMsgProcessProviderPipe failed to initialize pipe - errMsgProcessProviderPipe = "failed to initialize pipe" - - // DefaultDuration is the default amount of time in minutes that the - // credentials will be valid for. - DefaultDuration = time.Duration(15) * time.Minute - - // DefaultBufSize limits buffer size from growing to an enormous - // amount due to a faulty process. - DefaultBufSize = int(8 * sdkio.KibiByte) - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProcessProvider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type ProcessProvider struct { - staticCreds bool - credentials.Expiry - originalCommand []string - - // Expiry duration of the credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // A string representing an os command that should return a JSON with - // credential information. - command *exec.Cmd - - // MaxBufSize limits memory usage from growing to an enormous - // amount due to a faulty process. - MaxBufSize int - - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// ProcessProvider. The credentials will expire every 15 minutes by default. -func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: exec.Command(command), - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsTimeout returns a pointer to a new Credentials object with -// the specified command and timeout, and default duration and max buffer size. -func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { - p := NewCredentials(command, func(opt *ProcessProvider) { - opt.Timeout = timeout - }) - - return p -} - -// NewCredentialsCommand returns a pointer to a new Credentials object with -// the specified command, and default timeout, duration and max buffer size. -func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: command, - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - SessionToken string - Expiration *time.Time -} - -// Retrieve executes the 'credential_process' and returns the credentials. -func (p *ProcessProvider) Retrieve() (credentials.Value, error) { - out, err := p.executeCredentialProcess() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // Serialize and validate response - resp := &credentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderParse, - fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), - err) - } - - if resp.Version != 1 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderVersion, - errMsgProcessProviderVersion, - nil) - } - - if len(resp.AccessKeyID) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissKey, - nil) - } - - if len(resp.SecretAccessKey) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissSecret, - nil) - } - - // Handle expiration - p.staticCreds = resp.Expiration == nil - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } - - return credentials.Value{ - ProviderName: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - }, nil -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *ProcessProvider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// prepareCommand prepares the command to be executed. -func (p *ProcessProvider) prepareCommand() error { - - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(p.originalCommand) == 0 { - p.originalCommand = make([]string, len(p.command.Args)) - copy(p.originalCommand, p.command.Args) - - // check for empty command because it succeeds - if len(strings.TrimSpace(p.originalCommand[0])) < 1 { - return awserr.New( - ErrCodeProcessProviderExecution, - fmt.Sprintf( - "%s: %s", - errMsgProcessProviderPrepareCmd, - errMsgProcessProviderEmptyCmd), - nil) - } - } - - cmdArgs = append(cmdArgs, p.originalCommand...) - p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) - p.command.Env = os.Environ() - - return nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { - - if err := p.prepareCommand(); err != nil { - return nil, err - } - - // Setup the pipes - outReadPipe, outWritePipe, err := os.Pipe() - if err != nil { - return nil, awserr.New( - ErrCodeProcessProviderExecution, - errMsgProcessProviderPipe, - err) - } - - p.command.Stderr = os.Stderr // display stderr on console for MFA - p.command.Stdout = outWritePipe // get creds json on process's stdout - p.command.Stdin = os.Stdin // enable stdin for MFA - - output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) - - stdoutCh := make(chan error, 1) - go readInput( - io.LimitReader(outReadPipe, int64(p.MaxBufSize)), - output, - stdoutCh) - - execCh := make(chan error, 1) - go executeCommand(*p.command, execCh) - - finished := false - var errors []error - for !finished { - select { - case readError := <-stdoutCh: - errors = appendError(errors, readError) - finished = true - case execError := <-execCh: - err := outWritePipe.Close() - errors = appendError(errors, err) - errors = appendError(errors, execError) - if errors != nil { - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderProcess, - errors) - } - case <-time.After(p.Timeout): - finished = true - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderTimeout, - errors) // errors can be nil - } - } - - out := output.Bytes() - - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) - } - - return out, nil -} - -// appendError conveniently checks for nil before appending slice -func appendError(errors []error, err error) []error { - if err != nil { - return append(errors, err) - } - return errors -} - -func executeCommand(cmd exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} - -func readInput(r io.Reader, w io.Writer, read chan error) { - tee := io.TeeReader(r, w) - - _, err := ioutil.ReadAll(tee) - - if err == io.EOF { - err = nil - } - - read <- err // will only arrive here when write end of pipe is closed -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 22b5c5d9f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,151 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/ini" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves access key pair (access key ID, -// secret access key, and session token if present) credentials from the current -// user's home directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.OpenFile(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - - iniProfile, ok := config.GetSection(profile) - if !ok { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) - } - - id := iniProfile.String("aws_access_key_id") - if len(id) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - nil) - } - - secret := iniProfile.String("aws_secret_access_key") - if len(secret) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.String("aws_session_token") - - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go deleted file mode 100644 index 18c940ab3c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. -// -// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider -// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by -// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in -// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. -// -// Loading AWS SSO credentials with the AWS shared configuration file -// -// You can use configure AWS SSO credentials from the AWS shared configuration file by -// providing the specifying the required keys in the profile: -// -// sso_account_id -// sso_region -// sso_role_name -// sso_start_url -// -// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target -// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be -// provided, or an error will be returned. -// -// [profile devsso] -// sso_start_url = https://my-sso-portal.awsapps.com/start -// sso_role_name = SSOReadOnlyRole -// sso_region = us-east-1 -// sso_account_id = 123456789012 -// -// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to -// retrieve credentials. For example: -// -// sess, err := session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// Profile: "devsso", -// }) -// if err != nil { -// return err -// } -// -// Programmatically loading AWS SSO credentials directly -// -// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information -// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. -// -// svc := sso.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region -// }) -// -// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") -// -// credentials, err := provider.Get() -// if err != nil { -// return err -// } -// -// Additional Resources -// -// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -// -// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go deleted file mode 100644 index d4df39a7a2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package ssocreds - -import "os" - -func getHomeDirectory() string { - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go deleted file mode 100644 index eb48f61e5b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package ssocreds - -import "os" - -func getHomeDirectory() string { - return os.Getenv("USERPROFILE") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go deleted file mode 100644 index 6eda2a5557..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ /dev/null @@ -1,180 +0,0 @@ -package ssocreds - -import ( - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sso" - "github.com/aws/aws-sdk-go/service/sso/ssoiface" -) - -// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. -// To refresh the SSO session run aws sso login with the corresponding profile. -const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" - -const invalidTokenMessage = "the SSO session has expired or is invalid" - -func init() { - nowTime = time.Now - defaultCacheLocation = defaultCacheLocationImpl -} - -var nowTime func() time.Time - -// ProviderName is the name of the provider used to specify the source of credentials. -const ProviderName = "SSOProvider" - -var defaultCacheLocation func() string - -func defaultCacheLocationImpl() string { - return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") -} - -// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. -type Provider struct { - credentials.Expiry - - // The Client which is configured for the AWS Region where the AWS SSO user portal is located. - Client ssoiface.SSOAPI - - // The AWS account that is assigned to the user. - AccountID string - - // The role name that is assigned to the user. - RoleName string - - // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. - StartURL string -} - -// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured -// for the AWS Region where the AWS SSO user portal is located. -func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { - return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) -} - -// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured -// for the AWS Region where the AWS SSO user portal is located. -func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { - p := &Provider{ - Client: client, - AccountID: accountID, - RoleName: roleName, - StartURL: startURL, - } - - for _, fn := range optFns { - fn(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal -// by exchanging the accessToken present in ~/.aws/sso/cache. -func (p *Provider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal -// by exchanging the accessToken present in ~/.aws/sso/cache. -func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err - } - - output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, - AccountId: &p.AccountID, - RoleName: &p.RoleName, - }) - if err != nil { - return credentials.Value{}, err - } - - expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() - p.SetExpiration(expireTime, 0) - - return credentials.Value{ - AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), - SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), - SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), - ProviderName: ProviderName, - }, nil -} - -func getCacheFileName(url string) (string, error) { - hash := sha1.New() - _, err := hash.Write([]byte(url)) - if err != nil { - return "", err - } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil -} - -type token struct { - AccessToken string `json:"accessToken"` - ExpiresAt rfc3339 `json:"expiresAt"` - Region string `json:"region,omitempty"` - StartURL string `json:"startUrl,omitempty"` -} - -func (t token) Expired() bool { - return nowTime().Round(0).After(time.Time(t.ExpiresAt)) -} - -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - if err := json.Unmarshal(fileBytes, &t); err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - if len(t.AccessToken) == 0 { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) - } - - if t.Expired() { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) - } - - return t, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index cbba1e3d56..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,57 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. Token is only required -// for temporary security credentials retrieved via STS, otherwise an empty -// string can be passed for this parameter. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 260a37cbba..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials, Sessions or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "myRoleArn") - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" - "github.com/aws/aws-sdk-go/service/sts" -) - -// StdinTokenProvider will prompt on stderr and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function to read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -type assumeRolerWithContext interface { - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - credentials.Expiry - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. - Tags []*sts.Tag - - // A list of keys for session tags that you want to set as transitive. - // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. - TransitiveTagKeys []*string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*sts.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // MaxJitterFrac reduces the effective Duration of each credential requested - // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must - // have a value between 0 and 1. Any other value may lead to expected behavior. - // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // - // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the - // AssumeRole call will be made with an arbitrary Duration between 27m and - // 30m. - // - // MaxJitterFrac should not be negative. - MaxJitterFrac float64 -} - -// NewCredentials returns a pointer to a new Credentials value wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. The -// Credentials value will attempt to refresh the credentials using the provider -// when Credentials.Get is called, if the cached credentials are expiring. -// -// Takes a Config provider to create the STS client. The ConfigProvider is -// satisfied by the session.Session type. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: sts.New(c), - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. The -// Credentials value will attempt to refresh the credentials using the provider -// when Credentials.Get is called, if the cached credentials are expiring. -// -// Takes an AssumeRoler which can be satisfied by the STS client. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: svc, - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - // Apply defaults where parameters are not set. - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - Tags: p.Tags, - PolicyArns: p.PolicyArns, - TransitiveTagKeys: p.TransitiveTagKeys, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - input.TokenCode = aws.String(code) - } else { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - var roleOutput *sts.AssumeRoleOutput - var err error - - if c, ok := p.Client.(assumeRolerWithContext); ok { - roleOutput, err = c.AssumeRoleWithContext(ctx, input) - } else { - roleOutput, err = p.Client.AssumeRole(input) - } - - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyId, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - ProviderName: ProviderName, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index 19ad619aa3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,182 +0,0 @@ -package stscreds - -import ( - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -const ( - // ErrCodeWebIdentity will be used as an error code when constructing - // a new error to be returned during session creation or retrieval. - ErrCodeWebIdentity = "WebIdentityErr" - - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// now is used to return a time.Time object representing -// the current time. This can be used to easily test and -// compare test values. -var now = time.Now - -// TokenFetcher should return WebIdentity token bytes or an error -type TokenFetcher interface { - FetchToken(credentials.Context) ([]byte, error) -} - -// FetchTokenPath is a path to a WebIdentity token file -type FetchTokenPath string - -// FetchToken returns a token by reading from the filesystem -func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { - data, err := ioutil.ReadFile(string(f)) - if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", f) - return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) - } - return data, nil -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - credentials.Expiry - - // The policy ARNs to use with the web identity assumed role. - PolicyArns []*sts.PolicyDescriptorType - - // Duration the STS credentials will be valid for. Truncated to seconds. - // If unset, the assumed role will use AssumeRoleWithWebIdentity's default - // expiry duration. See - // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity - // for more information. - Duration time.Duration - - // The amount of time the credentials will be refreshed before they expire. - // This is useful refresh credentials before they expire to reduce risk of - // using credentials as they expire. If unset, will default to no expiry - // window. - ExpiryWindow time.Duration - - client stsiface.STSAPI - - tokenFetcher TokenFetcher - roleARN string - roleSessionName string -} - -// NewWebIdentityCredentials will return a new set of credentials with a given -// configuration, role arn, and token file path. -// -// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible -// functional options, and wrap with credentials.NewCredentials helper. -func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { - svc := sts.New(c) - p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) - return credentials.NewCredentials(p) -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI -// -// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible -// functional options. -func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) -} - -// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI and a TokenFetcher -// -// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible -// functional options. -func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) -} - -// NewWebIdentityRoleProviderWithOptions will return an initialize -// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a -// TokenFetcher. Additional options can be provided as functional options. -// -// TokenFetcher is the implementation that will retrieve the JWT token from to -// assume the role with. Use the provided FetchTokenPath implementation to -// retrieve the JWT token using a file system path. -func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { - p := WebIdentityRoleProvider{ - client: svc, - tokenFetcher: tokenFetcher, - roleARN: roleARN, - roleSessionName: roleSessionName, - } - - for _, fn := range optFns { - fn(&p) - } - - return &p -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to assume a role from a token which is -// located at 'WebIdentityTokenFilePath' specified destination and if that is -// empty an error will be returned. -func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - b, err := p.tokenFetcher.FetchToken(ctx) - if err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) - } - - sessionName := p.roleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(now().UnixNano(), 10) - } - - var duration *int64 - if p.Duration != 0 { - duration = aws.Int64(int64(p.Duration / time.Second)) - } - - req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.PolicyArns, - RoleArn: &p.roleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - DurationSeconds: duration, - }) - - req.SetContext(ctx) - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) - if err := req.Send(); err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) - } - - p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) - - value := credentials.Value{ - AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), - SessionToken: aws.StringValue(resp.Credentials.SessionToken), - ProviderName: WebIdentityProviderName, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go deleted file mode 100644 index 25a66d1dda..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package csm provides the Client Side Monitoring (CSM) client which enables -// sending metrics via UDP connection to the CSM agent. This package provides -// control options, and configuration for the CSM client. The client can be -// controlled manually, or automatically via the SDK's Session configuration. -// -// Enabling CSM client via SDK's Session configuration -// -// The CSM client can be enabled automatically via SDK's Session configuration. -// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT -// environment variable is set to a non-empty value. -// -// The configuration options for the CSM client via the SDK's session -// configuration are: -// -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. -// -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. -// -// Manually enabling the CSM client -// -// The CSM client can be started, paused, and resumed manually. The Start -// function will enable the CSM client to publish metrics to the CSM agent. It -// is safe to call Start concurrently, but if Start is called additional times -// with different ClientID or address it will panic. -// -// r, err := csm.Start("clientID", ":31000") -// if err != nil { -// panic(fmt.Errorf("failed starting CSM: %v", err)) -// } -// -// When controlling the CSM client manually, you must also inject its request -// handlers into the SDK's Session configuration for the SDK's API clients to -// publish metrics. -// -// sess, err := session.NewSession(&aws.Config{}) -// if err != nil { -// panic(fmt.Errorf("failed loading session: %v", err)) -// } -// -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. -// r.InjectHandlers(&sess.Handlers) -// -// Controlling CSM client -// -// Once the CSM client has been enabled the Get function will return a Reporter -// value that you can use to pause and resume the metrics published to the CSM -// agent. If Get function is called before the reporter is enabled with the -// Start function or via SDK's Session configuration nil will be returned. -// -// The Pause method can be called to stop the CSM client publishing metrics to -// the CSM agent. The Continue method will resume metric publishing. -// -// // Get the CSM client Reporter. -// r := csm.Get() -// -// // Will pause monitoring -// r.Pause() -// resp, err = client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -// -// // Resume monitoring -// r.Continue() -package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go deleted file mode 100644 index 4b19e2800e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ /dev/null @@ -1,89 +0,0 @@ -package csm - -import ( - "fmt" - "strings" - "sync" -) - -var ( - lock sync.Mutex -) - -const ( - // DefaultPort is used when no port is specified. - DefaultPort = "31000" - - // DefaultHost is the host that will be used when none is specified. - DefaultHost = "127.0.0.1" -) - -// AddressWithDefaults returns a CSM address built from the host and port -// values. If the host or port is not set, default values will be used -// instead. If host is "localhost" it will be replaced with "127.0.0.1". -func AddressWithDefaults(host, port string) string { - if len(host) == 0 || strings.EqualFold(host, "localhost") { - host = DefaultHost - } - - if len(port) == 0 { - port = DefaultPort - } - - // Only IP6 host can contain a colon - if strings.Contains(host, ":") { - return "[" + host + "]:" + port - } - - return host + ":" + port -} - -// Start will start a long running go routine to capture -// client side metrics. Calling start multiple time will only -// start the metric listener once and will panic if a different -// client ID or port is passed in. -// -// r, err := csm.Start("clientID", "127.0.0.1:31000") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// sess := session.NewSession() -// r.InjectHandlers(sess.Handlers) -// -// svc := s3.New(sess) -// out, err := svc.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -func Start(clientID string, url string) (*Reporter, error) { - lock.Lock() - defer lock.Unlock() - - if sender == nil { - sender = newReporter(clientID, url) - } else { - if sender.clientID != clientID { - panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) - } - - if sender.url != url { - panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) - } - } - - if err := connect(url); err != nil { - sender = nil - return nil, err - } - - return sender, nil -} - -// Get will return a reporter if one exists, if one does not exist, nil will -// be returned. -func Get() *Reporter { - lock.Lock() - defer lock.Unlock() - - return sender -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go deleted file mode 100644 index 5bacc791a1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ /dev/null @@ -1,109 +0,0 @@ -package csm - -import ( - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" -) - -type metricTime time.Time - -func (t metricTime) MarshalJSON() ([]byte, error) { - ns := time.Duration(time.Time(t).UnixNano()) - return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil -} - -type metric struct { - ClientID *string `json:"ClientId,omitempty"` - API *string `json:"Api,omitempty"` - Service *string `json:"Service,omitempty"` - Timestamp *metricTime `json:"Timestamp,omitempty"` - Type *string `json:"Type,omitempty"` - Version *int `json:"Version,omitempty"` - - AttemptCount *int `json:"AttemptCount,omitempty"` - Latency *int `json:"Latency,omitempty"` - - Fqdn *string `json:"Fqdn,omitempty"` - UserAgent *string `json:"UserAgent,omitempty"` - AttemptLatency *int `json:"AttemptLatency,omitempty"` - - SessionToken *string `json:"SessionToken,omitempty"` - Region *string `json:"Region,omitempty"` - AccessKey *string `json:"AccessKey,omitempty"` - HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` - XAmzID2 *string `json:"XAmzId2,omitempty"` - XAmzRequestID *string `json:"XAmznRequestId,omitempty"` - - AWSException *string `json:"AwsException,omitempty"` - AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` - SDKException *string `json:"SdkException,omitempty"` - SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` - - FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` - FinalAWSException *string `json:"FinalAwsException,omitempty"` - FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` - FinalSDKException *string `json:"FinalSdkException,omitempty"` - FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` - - DestinationIP *string `json:"DestinationIp,omitempty"` - ConnectionReused *int `json:"ConnectionReused,omitempty"` - - AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` - ConnectLatency *int `json:"ConnectLatency,omitempty"` - RequestLatency *int `json:"RequestLatency,omitempty"` - DNSLatency *int `json:"DnsLatency,omitempty"` - TCPLatency *int `json:"TcpLatency,omitempty"` - SSLLatency *int `json:"SslLatency,omitempty"` - - MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` -} - -func (m *metric) TruncateFields() { - m.ClientID = truncateString(m.ClientID, 255) - m.UserAgent = truncateString(m.UserAgent, 256) - - m.AWSException = truncateString(m.AWSException, 128) - m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) - - m.SDKException = truncateString(m.SDKException, 128) - m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) - - m.FinalAWSException = truncateString(m.FinalAWSException, 128) - m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) - - m.FinalSDKException = truncateString(m.FinalSDKException, 128) - m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) -} - -func truncateString(v *string, l int) *string { - if v != nil && len(*v) > l { - nv := (*v)[:l] - return &nv - } - - return v -} - -func (m *metric) SetException(e metricException) { - switch te := e.(type) { - case awsException: - m.AWSException = aws.String(te.exception) - m.AWSExceptionMessage = aws.String(te.message) - case sdkException: - m.SDKException = aws.String(te.exception) - m.SDKExceptionMessage = aws.String(te.message) - } -} - -func (m *metric) SetFinalException(e metricException) { - switch te := e.(type) { - case awsException: - m.FinalAWSException = aws.String(te.exception) - m.FinalAWSExceptionMessage = aws.String(te.message) - case sdkException: - m.FinalSDKException = aws.String(te.exception) - m.FinalSDKExceptionMessage = aws.String(te.message) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go deleted file mode 100644 index 82a3e345e9..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ /dev/null @@ -1,55 +0,0 @@ -package csm - -import ( - "sync/atomic" -) - -const ( - runningEnum = iota - pausedEnum -) - -var ( - // MetricsChannelSize of metrics to hold in the channel - MetricsChannelSize = 100 -) - -type metricChan struct { - ch chan metric - paused *int64 -} - -func newMetricChan(size int) metricChan { - return metricChan{ - ch: make(chan metric, size), - paused: new(int64), - } -} - -func (ch *metricChan) Pause() { - atomic.StoreInt64(ch.paused, pausedEnum) -} - -func (ch *metricChan) Continue() { - atomic.StoreInt64(ch.paused, runningEnum) -} - -func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(ch.paused) - return v == pausedEnum -} - -// Push will push metrics to the metric channel if the channel -// is not paused -func (ch *metricChan) Push(m metric) bool { - if ch.IsPaused() { - return false - } - - select { - case ch.ch <- m: - return true - default: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go deleted file mode 100644 index 54a99280ce..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go +++ /dev/null @@ -1,26 +0,0 @@ -package csm - -type metricException interface { - Exception() string - Message() string -} - -type requestException struct { - exception string - message string -} - -func (e requestException) Exception() string { - return e.exception -} -func (e requestException) Message() string { - return e.message -} - -type awsException struct { - requestException -} - -type sdkException struct { - requestException -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go deleted file mode 100644 index 835bcd49cb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ /dev/null @@ -1,264 +0,0 @@ -package csm - -import ( - "encoding/json" - "net" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Reporter will gather metrics of API requests made and -// send those metrics to the CSM endpoint. -type Reporter struct { - clientID string - url string - conn net.Conn - metricsCh metricChan - done chan struct{} -} - -var ( - sender *Reporter -) - -func connect(url string) error { - const network = "udp" - if err := sender.connect(network, url); err != nil { - return err - } - - if sender.done == nil { - sender.done = make(chan struct{}) - go sender.start() - } - - return nil -} - -func newReporter(clientID, url string) *Reporter { - return &Reporter{ - clientID: clientID, - url: url, - metricsCh: newMetricChan(MetricsChannelSize), - } -} - -func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - creds, _ := r.Config.Credentials.Get() - - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Region: r.Config.Region, - Type: aws.String("ApiCallAttempt"), - Version: aws.Int(1), - - XAmzRequestID: aws.String(r.RequestID), - - AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), - AccessKey: aws.String(creds.AccessKeyID), - } - - if r.HTTPResponse != nil { - m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetException(getMetricException(awserr)) - } - } - - m.TruncateFields() - rep.metricsCh.Push(m) -} - -func getMetricException(err awserr.Error) metricException { - msg := err.Error() - code := err.Code() - - switch code { - case request.ErrCodeRequestError, - request.ErrCodeSerialization, - request.CanceledErrorCode: - return sdkException{ - requestException{exception: code, message: msg}, - } - default: - return awsException{ - requestException{exception: code, message: msg}, - } - } -} - -func (rep *Reporter) sendAPICallMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Region: r.Config.Region, - Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), - MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), - } - - if r.HTTPResponse != nil { - m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetFinalException(getMetricException(awserr)) - } - } - - m.TruncateFields() - - // TODO: Probably want to figure something out for logging dropped - // metrics - rep.metricsCh.Push(m) -} - -func (rep *Reporter) connect(network, url string) error { - if rep.conn != nil { - rep.conn.Close() - } - - conn, err := net.Dial(network, url) - if err != nil { - return awserr.New("UDPError", "Could not connect", err) - } - - rep.conn = conn - - return nil -} - -func (rep *Reporter) close() { - if rep.done != nil { - close(rep.done) - } - - rep.metricsCh.Pause() -} - -func (rep *Reporter) start() { - defer func() { - rep.metricsCh.Pause() - }() - - for { - select { - case <-rep.done: - rep.done = nil - return - case m := <-rep.metricsCh.ch: - // TODO: What to do with this error? Probably should just log - b, err := json.Marshal(m) - if err != nil { - continue - } - - rep.conn.Write(b) - } - } -} - -// Pause will pause the metric channel preventing any new metrics from being -// added. It is safe to call concurrently with other calls to Pause, but if -// called concurently with Continue can lead to unexpected state. -func (rep *Reporter) Pause() { - lock.Lock() - defer lock.Unlock() - - if rep == nil { - return - } - - rep.close() -} - -// Continue will reopen the metric channel and allow for monitoring to be -// resumed. It is safe to call concurrently with other calls to Continue, but -// if called concurently with Pause can lead to unexpected state. -func (rep *Reporter) Continue() { - lock.Lock() - defer lock.Unlock() - if rep == nil { - return - } - - if !rep.metricsCh.IsPaused() { - return - } - - rep.metricsCh.Continue() -} - -// Client side metric handler names -const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" -) - -// InjectHandlers will will enable client side metrics and inject the proper -// handlers to handle how metrics are sent. -// -// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers -// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). -// -// // Start must be called in order to inject the correct handlers -// r, err := csm.Start("clientID", "127.0.0.1:8094") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// -// sess := session.NewSession() -// r.InjectHandlers(&sess.Handlers) -// -// // create a new service client with our client side metric session -// svc := s3.New(sess) -func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { - if rep == nil { - return - } - - handlers.Complete.PushFrontNamed(request.NamedHandler{ - Name: APICallMetricHandlerName, - Fn: rep.sendAPICallMetric, - }) - - handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ - Name: APICallAttemptMetricHandlerName, - Fn: rep.sendAPICallAttemptMetric, - }) -} - -// boolIntValue return 1 for true and 0 for false. -func boolIntValue(b bool) int { - if b { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 23bb639e01..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithEndpointResolver(endpoints.DefaultResolver()) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: CredProviders(cfg, handlers), - }) -} - -// CredProviders returns the slice of providers used in -// the default credential chain. -// -// For applications that need to use some other provider (for example use -// different environment variables for legacy reasons) but still fall back -// on the default chain of providers. This allows that default chaint to be -// automatically updated -func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { - return []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - } -} - -const ( - httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" -) - -// RemoteCredProvider returns a credentials provider for the default remote -// endpoints such as EC2 or ECS Roles. -func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { - return localHTTPCredProvider(cfg, handlers, u) - } - - if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) - return httpCredProvider(cfg, handlers, u) - } - - return ec2RoleProvider(cfg, handlers) -} - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - var errMsg string - - parsed, err := url.Parse(u) - if err != nil { - errMsg = fmt.Sprintf("invalid URL, %v", err) - } else { - host := aws.URLHostname(parsed) - if len(host) == 0 { - errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) - } - } - - if len(errMsg) > 0 { - if cfg.Logger != nil { - cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) - } - return credentials.ErrorProvider{ - Err: awserr.New("CredentialsEndpointError", errMsg, err), - ProviderName: endpointcreds.ProviderName, - } - } - - return httpCredProvider(cfg, handlers, u) -} - -func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - return endpointcreds.NewProviderClient(cfg, handlers, u, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) - }, - ) -} - -func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - resolver := cfg.EndpointResolver - if resolver == nil { - resolver = endpoints.DefaultResolver() - } - - e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") - return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), - ExpiryWindow: 5 * time.Minute, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go deleted file mode 100644 index ca0ee1dcc7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return shareddefaults.SharedCredentialsFilename() -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return shareddefaults.SharedConfigFilename() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb616184..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 69fa63dc08..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,250 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// getToken uses the duration to return a token for EC2 metadata service, -// or an error if the request failed. -func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { - op := &request.Operation{ - Name: "GetToken", - HTTPMethod: "PUT", - HTTPPath: "/latest/api/token", - } - - var output tokenOutput - req := c.NewRequest(op, nil, &output) - req.SetContext(ctx) - - // remove the fetch token handler from the request handlers to avoid infinite recursion - req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) - - // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. - req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) - - ttl := strconv.FormatInt(int64(duration/time.Second), 10) - req.HTTPRequest.Header.Set(ttlHeader, ttl) - - err := req.Send() - - // Errors with bad request status should be returned. - if err != nil { - err = awserr.NewRequestFailure( - awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), - req.HTTPResponse.StatusCode, req.RequestID) - } - - return output, err -} - -// GetMetadata uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - return c.GetMetadataWithContext(aws.BackgroundContext(), p) -} - -// GetMetadataWithContext uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), - } - output := &metadataOutput{} - - req := c.NewRequest(op, nil, output) - - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - return c.GetUserDataWithContext(aws.BackgroundContext()) -} - -// GetUserDataWithContext returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: "/latest/user-data", - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) -} - -// GetDynamicDataWithContext uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) -} - -// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - return c.IAMInfoWithContext(aws.BackgroundContext()) -} - -// IAMInfoWithContext retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { - resp, err := c.GetMetadataWithContext(ctx, "iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - return c.RegionWithContext(aws.BackgroundContext()) -} - -// RegionWithContext returns the region the instance is running in. -func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) - if err != nil { - return "", err - } - // extract region from the ec2InstanceIdentityDocument - region := ec2InstanceIdentityDocument.Region - if len(region) == 0 { - return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) - } - // returns region - return region, nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - return c.AvailableWithContext(aws.BackgroundContext()) -} - -// AvailableWithContext returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { - if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index df63bade10..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,245 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environment variable is set to true, (case insensitive). -// -// The endpoint of the EC2 IMDS client can be configured via the environment -// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a -// Session. See aws/session#Options.EC2IMDSEndpoint for more details. -package ec2metadata - -import ( - "bytes" - "io" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ServiceName is the name of the service. - ServiceName = "ec2metadata" - disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Headers for Token and TTL - ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" - tokenHeader = "x-aws-ec2-metadata-token" - - // Named Handler constants - fetchTokenHandlerName = "FetchTokenHandler" - unmarshalMetadataHandlerName = "unmarshalMetadataHandler" - unmarshalTokenHandlerName = "unmarshalTokenHandler" - enableTokenProviderHandlerName = "enableTokenProviderHandler" - - // TTL constants - defaultTTL = 21600 * time.Second - ttlExpirationWindow = 30 * time.Second -) - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS -// client is able to communicate with the EC2 IMDS API. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 1 * time.Second, - } - // max number of retries on the client operation - cfg.MaxRetries = aws.Int(2) - } - - if u, err := url.Parse(endpoint); err == nil { - // Remove path from the endpoint since it will be added by requests. - // This is an artifact of the SDK adding `/latest` to the endpoint for - // EC2 IMDS, but this is now moved to the operation definition. - u.Path = "" - u.RawPath = "" - endpoint = u.String() - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - // token provider instance - tp := newTokenProvider(svc, defaultTTL) - - // NamedHandler for fetching token - svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ - Name: fetchTokenHandlerName, - Fn: tp.fetchTokenHandler, - }) - // NamedHandler for enabling token provider - svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ - Name: enableTokenProviderHandlerName, - Fn: tp.enableTokenProviderHandler, - }) - - svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This short-circuits the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(request.NamedHandler{ - Name: corehandlers.SendHandler.Name, - Fn: func(r *request.Request) { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - } - r.Error = awserr.New( - request.CanceledErrorCode, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -type tokenOutput struct { - Token string - TTL time.Duration -} - -// unmarshal token handler is used to parse the response of a getToken operation -var unmarshalTokenHandler = request.NamedHandler{ - Name: unmarshalTokenHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - v := r.HTTPResponse.Header.Get(ttlHeader) - data, ok := r.Data.(*tokenOutput) - if !ok { - return - } - - data.Token = b.String() - // TTL is in seconds - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, - "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - t := time.Duration(i) * time.Second - data.TTL = t - }, -} - -var unmarshalHandler = request.NamedHandler{ - Name: unmarshalMetadataHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } - }, -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), - r.HTTPResponse.StatusCode, r.RequestID) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.NewRequestFailure( - awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), - r.HTTPResponse.StatusCode, r.RequestID) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go deleted file mode 100644 index 4b29f190bf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ /dev/null @@ -1,93 +0,0 @@ -package ec2metadata - -import ( - "net/http" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A tokenProvider struct provides access to EC2Metadata client -// and atomic instance of a token, along with configuredTTL for it. -// tokenProvider also provides an atomic flag to disable the -// fetch token operation. -// The disabled member will use 0 as false, and 1 as true. -type tokenProvider struct { - client *EC2Metadata - token atomic.Value - configuredTTL time.Duration - disabled uint32 -} - -// A ec2Token struct helps use of token in EC2 Metadata service ops -type ec2Token struct { - token string - credentials.Expiry -} - -// newTokenProvider provides a pointer to a tokenProvider instance -func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { - return &tokenProvider{client: c, configuredTTL: duration} -} - -// fetchTokenHandler fetches token for EC2Metadata service client by default. -func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { - return - } - - if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - return - } - - output, err := t.client.getToken(r.Context(), t.configuredTTL) - - if err != nil { - - // change the disabled flag on token provider to true, - // when error is request timeout error. - if requestFailureError, ok := err.(awserr.RequestFailure); ok { - switch requestFailureError.StatusCode() { - case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - atomic.StoreUint32(&t.disabled, 1) - case http.StatusBadRequest: - r.Error = requestFailureError - } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } - } - return - } - - newToken := ec2Token{ - token: output.Token, - } - newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) - t.token.Store(newToken) - - // Inject token header to the request. - if ec2Token, ok := t.token.Load().(ec2Token); ok { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - } -} - -// enableTokenProviderHandler enables the token provider -func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { - // If the error code status is 401, we enable the token provider - if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && - e.StatusCode() == http.StatusUnauthorized { - t.token.Store(ec2Token{}) - atomic.StoreUint32(&t.disabled, 0) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index cad3b9a488..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,193 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custRegionalS3(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) - } - - return ps, nil -} - -func custRegionalS3(p *partition) { - if p.ID != "aws" { - return - } - - service, ok := p.Services["s3"] - if !ok { - return - } - - const awsGlobal = "aws-global" - const usEast1 = "us-east-1" - - // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { - return - } - - service.PartitionEndpoint = awsGlobal - if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { - service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} - } - service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ - Hostname: "s3.amazonaws.com", - CredentialScope: credentialScope{ - Region: usEast1, - }, - } - - p.Services["s3"] = service -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - serviceDefault := s.Defaults[defaultKey{}] - if e, a := expectHostname, serviceDefault.Hostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - serviceDefault.Hostname = expectHostname + ".cn" - s.Defaults[defaultKey{}] = serviceDefault - p.Services[serviceName] = s -} - -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - serviceDefault := s.Defaults[defaultKey{}] - if a := serviceDefault.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := serviceDefault.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - serviceDefault.CredentialScope.Service = "application-autoscaling" - serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" - - if s.Defaults == nil { - s.Defaults = make(endpointDefaults) - } - - s.Defaults[defaultKey{}] = serviceDefault - - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index 3a230db4f5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,34364 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. -) - -// AWS Standard partition's regions. -const ( - AfSouth1RegionID = "af-south-1" // Africa (Cape Town). - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). - EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). - EuSouth1RegionID = "eu-south-1" // Europe (Milan). - EuWest1RegionID = "eu-west-1" // Europe (Ireland). - EuWest2RegionID = "eu-west-2" // Europe (London). - EuWest3RegionID = "eu-west-3" // Europe (Paris). - MeCentral1RegionID = "me-central-1" // Middle East (UAE). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). -) - -// AWS ISO (US) partition's regions. -const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. - UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, - awsisoPartition, - awsisobPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "af-south-1": region{ - Description: "Africa (Cape Town)", - }, - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-northeast-3": region{ - Description: "Asia Pacific (Osaka)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ap-southeast-3": region{ - Description: "Asia Pacific (Jakarta)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "Europe (Frankfurt)", - }, - "eu-north-1": region{ - Description: "Europe (Stockholm)", - }, - "eu-south-1": region{ - Description: "Europe (Milan)", - }, - "eu-west-1": region{ - Description: "Europe (Ireland)", - }, - "eu-west-2": region{ - Description: "Europe (London)", - }, - "eu-west-3": region{ - Description: "Europe (Paris)", - }, - "me-central-1": region{ - Description: "Middle East (UAE)", - }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", - }, - }, - }, - "account": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "account.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "acm-pca": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - }, - }, - }, - "airflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplify": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplifybackend": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplifyuibuilder": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "api.detective": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.ecr": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "api.ecr.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "dkr-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "api.ecr.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-dkr-us-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-east-2", - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-west-2", - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "api.ecr.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.elastic-inference": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.elastic-inference.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "api.elastic-inference.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.elastic-inference.us-west-2.amazonaws.com", - }, - }, - }, - "api.fleethub.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "api.iotdeviceadvisor": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.iotwireless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.mediatailor": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "api.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.tunneling.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "apigateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "apigateway-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "apigateway-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "apigateway-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "apigateway-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-west-2.amazonaws.com", - }, - }, - }, - "app-integrations": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appmesh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "appmesh-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-west-3.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "apprunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "apprunner-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "apprunner-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "apprunner-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apprunner-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apprunner-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apprunner-fips.us-west-2.amazonaws.com", - }, - }, - }, - "appstream2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "appsync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "aps": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", - }, - }, - }, - "auditmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backup-gateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - }, - }, - }, - "billingconductor": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "billingconductor.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "braket": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cases": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cassandra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cassandra-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cassandra-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cassandra-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cassandra-fips.us-west-2.amazonaws.com", - }, - }, - }, - "catalog.marketplace": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "chime.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", - }, - }, - }, - "clouddirectory": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudsearch": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - }, - }, - }, - "codeartifact": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codeguru-reviewer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - }, - }, - }, - "codestar": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codestar-connections": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codestar-notifications": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-sync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - }, - }, - }, - "comprehendmedical": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - }, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "compute-optimizer.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "compute-optimizer.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "compute-optimizer.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "compute-optimizer.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "compute-optimizer.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "compute-optimizer.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "compute-optimizer.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "compute-optimizer.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", - }, - }, - }, - "connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "connect-campaigns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", - }, - }, - }, - "contact-lens": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "controltower": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "controltower-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "controltower-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "controltower-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "controltower-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cur": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "data-ats.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "iotdata", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "data.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "data.mediastore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "databrew-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "databrew-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "databrew-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "databrew-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dataexchange": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "datapipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dax": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "devicefarm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "devops-guru": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "devops-guru-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "devops-guru-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "devops-guru-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-west-2.amazonaws.com", - }, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - }, - }, - }, - "discovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "dms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "drs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "edge.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.{region}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - }, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elastictranscoder": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "email": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "email-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "email-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-west-2.amazonaws.com", - }, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", - }, - }, - }, - "emr-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", - }, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "es-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "es-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "es-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - }, - }, - }, - "evidently": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "evidently.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "evidently.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "evidently.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "evidently.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "evidently.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "evidently.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "evidently.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "evidently.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "evidently.us-west-2.amazonaws.com", - }, - }, - }, - "finspace": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "finspace-api": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - }, - }, - }, - "forecast": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", - }, - }, - }, - "forecastquery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", - }, - }, - }, - "frauddetector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-ca-central-1", - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-east-2", - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-west-2", - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "prod-ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - }, - }, - }, - "gamelift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "gamesparks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "geo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - }, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - }, - }, - }, - "grafana": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "grafana.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "grafana.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "grafana.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "grafana.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "grafana.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "grafana.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "grafana.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "grafana.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "grafana.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "grafana.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "groundstation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "health": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "health.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "global.health.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "healthlake": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global-fips", - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-fips", - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identity-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identitystore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "ingest.timestream": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "ingest-fips-us-east-1", - }: endpoint{ - Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ingest-fips-us-east-2", - }: endpoint{ - Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "ingest-fips-us-west-2", - }: endpoint{ - Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "inspector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - }, - }, - }, - "inspector2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iotevents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iotevents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iotevents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iotevents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "data.iotevents.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "data.iotevents.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotfleetwise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotthingsgraph": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iottwinmaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iotwireless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ivs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ivschat": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kafkaconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kendra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kendra-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kendra-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kendra-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kinesisvideo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "af-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-south-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-south-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.me-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "kms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "kms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ca-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-west-3.api.aws", - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager-user-subscriptions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lightsail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lookoutequipment": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "lookoutmetrics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "lookoutvision": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "m2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "machinelearning": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "macie": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - }, - }, - }, - "macie2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "managedblockchain": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "media-pipelines-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "mediaconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - }, - }, - }, - "medialive": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mediapackage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediapackage-vod": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediastore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "meetings-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "memory-db": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "memory-db-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "messaging-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgn": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "migrationhub-orchestrator": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "migrationhub-strategy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mobileanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "models-v2-lex": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "sandbox", - }: endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "rds.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "rds.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", - }, - }, - }, - "networkmanager": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "networkmanager.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "nimble": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "oidc.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "oidc.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "oidc.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "oidc.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "oidc.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "opsworks-cm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - }, - }, - }, - "participant.connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "participant.connect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "participant.connect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "participant.connect-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "participant.connect-fips.us-west-2.amazonaws.com", - }, - }, - }, - "personalize": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "pi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "pinpoint-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "pinpoint.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - }, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "portal.sso.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "portal.sso.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "portal.sso.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "portal.sso.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "portal.sso.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "portal.sso.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "portal.sso.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "profile": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "projects.iot1click": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "proton": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "qldb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "qldb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "api", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "rbin-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "rbin-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "rbin-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "rbin-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "rbin-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "rds-fips.ca-central-1", - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-east-1", - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-east-2", - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-west-1", - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-west-2", - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - SSLCommonName: "{service}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "rds-data": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "rds-data-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "rds-data-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "rds-data-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "rds-data-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-west-2.amazonaws.com", - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - }, - }, - }, - "redshift-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rekognition": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "rekognition-fips.ca-central-1", - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-east-1", - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-east-2", - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-west-1", - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-west-2", - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "resiliencehub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rolesanywhere": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53-recovery-control-config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "route53domains": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rum": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "runtime-v2-lex": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "runtime.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", - }, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.me-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "s3-external-1", - }: endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "s3-control.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3-outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "schemas": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "sdb.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog-appregistry": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", - }, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "session.qldb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", - }, - }, - }, - "shield": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sms-voice": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - SSLCommonName: "queue.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm-incidents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "supportapp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - }, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "textract-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "textract-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "textract-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "textract-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "textract-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-west-2.amazonaws.com", - }, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fips.transcribe.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "transcribestreaming-ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-ca-central-1", - }: endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-east-1", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-east-2", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-west-2", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", - }, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "voiceid": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws-fips", - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global-fips", - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "waf-regional.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "waf-regional.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-3", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "wafv2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "wafv2.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "wafv2.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "wafv2.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "wafv2.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "wafv2.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "wafv2.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "wafv2.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "wafv2.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "wafv2.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "wafv2.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "wafv2.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "wafv2.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "wafv2.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "wafv2.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "wafv2.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "wafv2.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "wafv2-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-3", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "wafv2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "wafv2-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "wafv2-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "wafv2-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "wafv2-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "wafv2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "wafv2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "wafv2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "wafv2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "wafv2.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "wafv2.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "wafv2.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "wafv2.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "wafv2.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "wafv2.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "wellarchitected": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "wisdom": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "workdocs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "workmail": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", - }, - }, - }, - "workspaces-web": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", - }, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "account": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "account.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "api.tunneling.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appmesh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "appsync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "batch": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "budgets.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cassandra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "ce.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cur": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "data-ats.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "iotdata", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dax": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "gamelift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "health": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "global.health.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iotanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "memory-db": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "rds.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "personalize": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "pi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "route53.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "route53resolver": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "wafv2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "wafv2.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US-West)", - }, - }, - Services: services{ - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "acm": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "acm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "acm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "acm-pca": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - }, - }, - }, - "api.detective": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.ecr": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dkr-us-gov-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-gov-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-gov-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-fips-secondary", - }: endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-secondary", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-secondary", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.tunneling.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "appconfig.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "appconfig.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appconfig.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appconfig.us-gov-west-1.amazonaws.com", - }, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "appstream2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "backup-gateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cassandra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "cassandra.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "cassandra.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "clouddirectory": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "cloudhsm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - }, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "comprehendmedical": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "config": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", - }, - }, - }, - "connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "controltower": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "data-ats.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "iotdata", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dms.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "ec2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-gov-east-1.api.aws", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "ec2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-gov-west-1.api.aws", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.{region}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", - }, - }, - }, - "elasticache": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - }, - "email": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "es-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - }, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-prod-us-gov-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-gov-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dataplane-us-gov-east-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "dataplane-us-gov-west-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global-fips", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud-fips", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identitystore": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "identitystore.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "identitystore.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.us-gov-west-1.amazonaws.com", - }, - }, - }, - "inspector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "kendra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "kinesis.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "kinesis.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "logs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "logs.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs.us-gov-west-1.amazonaws.com", - }, - }, - }, - "managedblockchain": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "meetings-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - }, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "mq-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "mq-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "networkmanager": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "networkmanager.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "oidc.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "oidc.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "outposts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "outposts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "participant.connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "participant.connect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "pinpoint.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "portal.sso.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "portal.sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "api", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "ram.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "ram.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "rds": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds.us-gov-east-1", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "redshift.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "redshift.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rekognition": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rekognition-fips.us-gov-west-1", - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "resource-groups": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "runtime.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3-outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog-appregistry": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "servicequotas.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "servicequotas.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sms-voice": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "sqs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "sqs.us-gov-west-1.amazonaws.com", - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ssm": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "sso.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sts": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-us-gov-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "textract-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "textract-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "waf-regional.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "waf-regional.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "wafv2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "wafv2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "wafv2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "wellarchitected": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "xray-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "xray-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - "us-iso-west-1": region{ - Description: "US ISO WEST", - }, - }, - Services: services{ - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{ - Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "datapipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1-fips", - }: endpoint{ - Hostname: "dms.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", - }, - }, - }, - "elasticloadbalancing": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticmapreduce": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "medialive": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "mediapackage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "sns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{ - Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "elasticloadbalancing": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "route53.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc828e1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 66dec6bebf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// # Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// # Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index a686a48fa2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,708 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// A Logger is a minimalistic interface for the SDK to log messages to. -type Logger interface { - Log(...interface{}) -} - -// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution -// behavior. -type DualStackEndpointState uint - -const ( - // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint - // resolution. - DualStackEndpointStateUnset DualStackEndpointState = iota - - // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. - DualStackEndpointStateEnabled - - // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. - DualStackEndpointStateDisabled -) - -// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. -type FIPSEndpointState uint - -const ( - // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. - FIPSEndpointStateUnset FIPSEndpointState = iota - - // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. - FIPSEndpointStateEnabled - - // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. - FIPSEndpointStateDisabled -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - // - // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. - // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients - // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher - // precedence then this option. - UseDualStack bool - - // Sets the resolver to resolve a dual-stack endpoint for the service. - UseDualStackEndpoint DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint FIPSEndpointState - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - EC2MetadataEndpointMode EC2IMDSEndpointModeState - - // STS Regional Endpoint flag helps with resolving the STS endpoint - STSRegionalEndpoint STSRegionalEndpoint - - // S3 Regional Endpoint flag helps with resolving the S3 endpoint - S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint - - // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority - // over the region name passed to the ResolveEndpoint call. - ResolvedRegion string - - // Logger is the logger that will be used to log messages. - Logger Logger - - // Determines whether logging of deprecated endpoints usage is enabled. - LogDeprecated bool -} - -func (o Options) getEndpointVariant(service string) (v endpointVariant) { - const s3 = "s3" - const s3Control = "s3-control" - - if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || - ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { - v |= dualStackVariant - } - if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { - v |= fipsVariant - } - return v -} - -// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. -type EC2IMDSEndpointModeState uint - -// Enumeration values for EC2IMDSEndpointModeState -const ( - EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota - EC2IMDSEndpointModeStateIPv4 - EC2IMDSEndpointModeStateIPv6 -) - -// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset -func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { - v = strings.TrimSpace(v) - - switch { - case len(v) == 0: - *e = EC2IMDSEndpointModeStateUnset - case strings.EqualFold(v, "IPv6"): - *e = EC2IMDSEndpointModeStateIPv6 - case strings.EqualFold(v, "IPv4"): - *e = EC2IMDSEndpointModeStateIPv4 - default: - return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") - } - return nil -} - -// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint -// options. -type STSRegionalEndpoint int - -func (e STSRegionalEndpoint) String() string { - switch e { - case LegacySTSEndpoint: - return "legacy" - case RegionalSTSEndpoint: - return "regional" - case UnsetSTSEndpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. - UnsetSTSEndpoint STSRegionalEndpoint = iota - - // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified - // to use legacy endpoints. - LegacySTSEndpoint - - // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified - // to use regional endpoints. - RegionalSTSEndpoint -) - -// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the STS regional Endpoint flag. -func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacySTSEndpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalSTSEndpoint, nil - default: - return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) - } -} - -// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 -// Regional Endpoint options. -type S3UsEast1RegionalEndpoint int - -func (e S3UsEast1RegionalEndpoint) String() string { - switch e { - case LegacyS3UsEast1Endpoint: - return "legacy" - case RegionalS3UsEast1Endpoint: - return "regional" - case UnsetS3UsEast1Endpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not - // specified. - UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota - - // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use legacy endpoints. - LegacyS3UsEast1Endpoint - - // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use regional endpoints. - RegionalS3UsEast1Endpoint -) - -// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the S3 regional Endpoint flag. -func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacyS3UsEast1Endpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalS3UsEast1Endpoint, nil - default: - return UnsetS3UsEast1Endpoint, - fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) - } -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -// -// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. -// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional -// option when resolving endpoints. -func UseDualStackEndpointOption(o *Options) { - o.UseDualStackEndpoint = DualStackEndpointStateEnabled -} - -// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional -// option when resolving endpoints. -func UseFIPSEndpointOption(o *Options) { - o.UseFIPSEndpoint = FIPSEndpointStateEnabled -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve -// STS endpoint to their regional endpoint, instead of the global endpoint. -func STSRegionalEndpointOption(o *Options) { - o.STSRegionalEndpoint = RegionalSTSEndpoint -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id, dnsSuffix string - p *partition -} - -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned may look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// - UnknownServiceError -// - UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := make(map[string]Region, len(p.p.Regions)) - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := make(map[string]Service, len(p.p.Services)) - - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - // Since we have removed the customization that injected this into the model - // we still need to pretend that this is a modeled service. - if _, ok := ss[Ec2metadataServiceID]; !ok { - ss[Ec2metadataServiceID] = Service{ - id: Ec2metadataServiceID, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - - service, ok := s.p.Services[s.id] - - // Since ec2metadata customization has been removed we need to check - // if it was defined in non-standard endpoints.json file. If it's not - // then we can return the empty map as there is no regional-endpoints for IMDS. - // Otherwise, we iterate need to iterate the non-standard model. - if s.id == Ec2metadataServiceID && !ok { - return rs - } - - for id := range service.Endpoints { - if id.Variant != 0 { - continue - } - if r, ok := s.p.Regions[id.Region]; ok { - rs[id.Region] = Region{ - id: id.Region, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) - for id := range s.p.Services[s.id].Endpoints { - if id.Variant != 0 { - continue - } - es[id.Region] = Endpoint{ - id: id.Region, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The endpoint partition - PartitionID string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go deleted file mode 100644 index df75e899ad..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package endpoints - -var legacyGlobalRegions = map[string]map[string]struct{}{ - "sts": { - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, - }, - "s3": { - "us-east-1": {}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index 89f6627dc6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,594 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "regexp" - "strconv" - "strings" -) - -const ( - ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" - ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" -) - -const dnsSuffixTemplateKey = "{dnsSuffix}" - -// defaultKey is a compound map key of a variant and other values. -type defaultKey struct { - Variant endpointVariant - ServiceVariant serviceVariant -} - -// endpointKey is a compound map key of a region and associated variant value. -type endpointKey struct { - Region string - Variant endpointVariant -} - -// endpointVariant is a bit field to describe the endpoints attributes. -type endpointVariant uint64 - -// serviceVariant is a bit field to describe the service endpoint attributes. -type serviceVariant uint64 - -const ( - // fipsVariant indicates that the endpoint is FIPS capable. - fipsVariant endpointVariant = 1 << (64 - 1 - iota) - - // dualStackVariant indicates that the endpoint is DualStack capable. - dualStackVariant -) - -var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - if len(opt.ResolvedRegion) > 0 { - region = opt.ResolvedRegion - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type endpointWithVariants struct { - endpoint - Variants []endpointWithTags `json:"variants"` -} - -type endpointWithTags struct { - endpoint - Tags []string `json:"tags"` -} - -type endpointDefaults map[defaultKey]endpoint - -func (p *endpointDefaults) UnmarshalJSON(data []byte) error { - if *p == nil { - *p = make(endpointDefaults) - } - - var e endpointWithVariants - if err := json.Unmarshal(data, &e); err != nil { - return err - } - - (*p)[defaultKey{Variant: 0}] = e.endpoint - - e.Hostname = "" - e.DNSSuffix = "" - - for _, variant := range e.Variants { - endpointVariant, unknown := parseVariantTags(variant.Tags) - if unknown { - continue - } - - var ve endpoint - ve.mergeIn(e.endpoint) - ve.mergeIn(variant.endpoint) - - (*p)[defaultKey{Variant: endpointVariant}] = ve - } - - return nil -} - -func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { - if len(tags) == 0 { - unknown = true - return - } - - for _, tag := range tags { - switch { - case strings.EqualFold("fips", tag): - ev |= fipsVariant - case strings.EqualFold("dualstack", tag): - ev |= dualStackVariant - default: - unknown = true - } - } - return ev, unknown -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpointDefaults `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, options Options) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[endpointKey{ - Region: region, - Variant: options.getEndpointVariant(service), - }] - - if hasEndpoint && hasService { - return true - } - - if options.StrictMatching { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func allowLegacyEmptyRegion(service string) bool { - legacy := map[string]struct{}{ - "budgets": {}, - "ce": {}, - "chime": {}, - "cloudfront": {}, - "ec2metadata": {}, - "iam": {}, - "importexport": {}, - "organizations": {}, - "route53": {}, - "sts": {}, - "support": {}, - "waf": {}, - } - - _, allowed := legacy[service] - return allowed -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - if len(opt.ResolvedRegion) > 0 { - region = opt.ResolvedRegion - } - - s, hasService := p.Services[service] - - if service == Ec2metadataServiceID && !hasService { - endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) - return endpoint, nil - } - - if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { - region = s.PartitionEndpoint - } - - if r, ok := isLegacyGlobalRegion(service, region, opt); ok { - region = r - } - - variant := opt.getEndpointVariant(service) - - endpoints := s.Endpoints - - serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] - // If we searched for a variant which may have no explicit service defaults, - // then we need to inherit the standard service defaults except the hostname and dnsSuffix - if variant != 0 && !hasServiceDefault { - serviceDefaults = s.Defaults[defaultKey{}] - serviceDefaults.Hostname = "" - serviceDefaults.DNSSuffix = "" - } - - partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] - - var dnsSuffix string - if len(serviceDefaults.DNSSuffix) > 0 { - dnsSuffix = serviceDefaults.DNSSuffix - } else if variant == 0 { - // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for - // a non-variant endpoint then we need to set the dnsSuffix. - dnsSuffix = p.DNSSuffix - } - - noDefaults := !hasServiceDefault && !hasPartitionDefault - - e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) - if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) - } - - defs := []endpoint{partitionDefaults, serviceDefaults} - - return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) -} - -func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { - switch mode { - case EC2IMDSEndpointModeStateIPv6: - return ResolvedEndpoint{ - URL: ec2MetadataEndpointIPv6, - PartitionID: partitionID, - SigningRegion: "aws-global", - SigningName: service, - SigningNameDerived: true, - SigningMethod: "v4", - } - case EC2IMDSEndpointModeStateIPv4: - fallthrough - default: - return ResolvedEndpoint{ - URL: ec2MetadataEndpointIPv4, - PartitionID: partitionID, - SigningRegion: "aws-global", - SigningName: service, - SigningNameDerived: true, - SigningMethod: "v4", - } - } -} - -func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { - if opt.getEndpointVariant(service) != 0 { - return "", false - } - - const ( - sts = "sts" - s3 = "s3" - awsGlobal = "aws-global" - ) - - switch { - case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: - return region, false - case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: - return region, false - default: - if _, ok := legacyGlobalRegions[service][region]; ok { - return awsGlobal, true - } - } - - return region, false -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es serviceEndpoints, variant endpointVariant) []string { - list := make([]string, 0, len(es)) - for k := range es { - if k.Variant != variant { - continue - } - list = append(list, k.Region) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpointDefaults `json:"defaults"` - Endpoints serviceEndpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { - if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { - return e, true - } - - if s.IsRegionalized == boxedFalse { - return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type serviceEndpoints map[endpointKey]endpoint - -func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { - if *s == nil { - *s = make(serviceEndpoints) - } - - var regionToEndpoint map[string]endpointWithVariants - - if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { - return err - } - - for region, e := range regionToEndpoint { - (*s)[endpointKey{Region: region}] = e.endpoint - - e.Hostname = "" - e.DNSSuffix = "" - - for _, variant := range e.Variants { - endpointVariant, unknown := parseVariantTags(variant.Tags) - if unknown { - continue - } - - var ve endpoint - ve.mergeIn(e.endpoint) - ve.mergeIn(variant.endpoint) - - (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve - } - } - - return nil -} - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - DNSSuffix string `json:"dnsSuffix"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` - - Deprecated boxedBool `json:"deprecated"` -} - -// isZero returns whether the endpoint structure is an empty (zero) value. -func (e endpoint) isZero() bool { - switch { - case len(e.Hostname) != 0: - return false - case len(e.Protocols) != 0: - return false - case e.CredentialScope != (credentialScope{}): - return false - case len(e.SignatureVersions) != 0: - return false - case len(e.SSLCommonName) != 0: - return false - } - return true -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - hostname := e.Hostname - - if !validateInputRegion(region) { - return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") - } - - if len(merged.DNSSuffix) > 0 { - dnsSuffix = merged.DNSSuffix - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { - opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) - } - - return ResolvedEndpoint{ - URL: u, - PartitionID: partitionID, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if len(other.DNSSuffix) > 0 { - e.DNSSuffix = other.DNSSuffix - } - if other.Deprecated != boxedBoolUnset { - e.Deprecated = other.Deprecated - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) - -func validateInputRegion(region string) bool { - return regionValidationRegex.MatchString(region) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 84922bca8a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,412 +0,0 @@ -//go:build codegen -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -func endpointVariantSetter(variant endpointVariant) (string, error) { - if variant == 0 { - return "0", nil - } - - if variant > (fipsVariant | dualStackVariant) { - return "", fmt.Errorf("unknown endpoint variant") - } - - var symbols []string - if variant&fipsVariant != 0 { - symbols = append(symbols, "fipsVariant") - } - if variant&dualStackVariant != 0 { - symbols = append(symbols, "dualStackVariant") - } - v := strings.Join(symbols, "|") - - return v, nil -} - -func endpointKeySetter(e endpointKey) (string, error) { - var sb strings.Builder - sb.WriteString("endpointKey{\n") - sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) - if e.Variant != 0 { - variantSetter, err := endpointVariantSetter(e.Variant) - if err != nil { - return "", err - } - sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) - } - sb.WriteString("}") - return sb.String(), nil -} - -func defaultKeySetter(e defaultKey) (string, error) { - var sb strings.Builder - sb.WriteString("defaultKey{\n") - if e.Variant != 0 { - variantSetter, err := endpointVariantSetter(e.Variant) - if err != nil { - return "", err - } - sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) - } - sb.WriteString("}") - return sb.String(), nil -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, - "EndpointVariantSetter": endpointVariantSetter, - "EndpointKeySetter": endpointKeySetter, - "DefaultKeySetter": defaultKeySetter, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" $.Resolver }} - - {{ range $_, $partition := $.Resolver }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} - - {{ template "endpoint resolvers" $.Resolver }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if (gt (len .Defaults) 0) -}} - Defaults: {{ template "gocode Defaults" .Defaults -}}, - {{ end -}} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if (gt (len .Defaults) 0) -}} - Defaults: {{ template "gocode Defaults" .Defaults -}}, - {{ end -}} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Defaults" -}} -endpointDefaults{ - {{ range $id, $endpoint := . -}} - {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -serviceEndpoints{ - {{ range $id, $endpoint := . -}} - {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index fa06f7a8f8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f277a7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 49674cc79e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,121 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors - - // LogDebugWithEventStreamBody states the SDK should log EventStream - // request and response bodys. This should be used to log the EventStream - // wire unmarshaled message content of requests and responses made while - // using the SDK Will also enable LogDebug. - LogDebugWithEventStreamBody - - // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. - LogDebugWithDeprecated -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index 2ba3c56c11..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,19 +0,0 @@ -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - if strings.Contains(err.Error(), "read: connection reset") { - return false - } - - if strings.Contains(err.Error(), "use of closed network connection") || - strings.Contains(err.Error(), "connection reset") || - strings.Contains(err.Error(), "broken pipe") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index 9556332b65..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,346 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - BuildStream HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalStream HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - CompleteAttempt HandlerList - Complete HandlerList -} - -// Copy returns a copy of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - BuildStream: h.BuildStream.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalStream: h.UnmarshalStream.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - CompleteAttempt: h.CompleteAttempt.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers. -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.BuildStream.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalStream.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.CompleteAttempt.Clear() - h.Complete.Clear() -} - -// IsEmpty returns if there are no handlers in any of the handlerlists. -func (h *Handlers) IsEmpty() bool { - if h.Validate.Len() != 0 { - return false - } - if h.Build.Len() != 0 { - return false - } - if h.BuildStream.Len() != 0 { - return false - } - if h.Send.Len() != 0 { - return false - } - if h.Sign.Len() != 0 { - return false - } - if h.Unmarshal.Len() != 0 { - return false - } - if h.UnmarshalStream.Len() != 0 { - return false - } - if h.UnmarshalMeta.Len() != 0 { - return false - } - if h.UnmarshalError.Len() != 0 { - return false - } - if h.ValidateResponse.Len() != 0 { - return false - } - if h.Retry.Len() != 0 { - return false - } - if h.AfterRetry.Len() != 0 { - return false - } - if h.CompleteAttempt.Len() != 0 { - return false - } - if h.Complete.Len() != 0 { - return false - } - - return true -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// Swap will swap out all handlers matching the name passed in. The matched -// handlers will be swapped in. True is returned if the handlers were swapped. -func (l *HandlerList) Swap(name string, replace NamedHandler) bool { - var swapped bool - - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == name { - l.list[i] = replace - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} - -// WithSetRequestHeaders updates the operation request's HTTP header to contain -// the header key value pairs provided. If the header key already exists in the -// request's HTTP header set, the existing value(s) will be replaced. -// -// Header keys added will be added as canonical format with title casing -// applied via http.Header.Set method. -func WithSetRequestHeaders(h map[string]string) Option { - return withRequestHeader(h).SetRequestHeaders -} - -type withRequestHeader map[string]string - -func (h withRequestHeader) SetRequestHeaders(r *Request) { - for k, v := range h { - r.HTTPRequest.Header.Set(k, v) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f79602b0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index 9370fa50c3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,65 +0,0 @@ -package request - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { - reader := &offsetReader{} - _, err := buf.Seek(offset, sdkio.SeekStart) - if err != nil { - return nil, err - } - - reader.buf = buf - return reader, nil -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { - if err := o.Close(); err != nil { - return nil, err - } - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index 636d9ec943..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,722 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeInvalidPresignExpire is returned when the expire time provided to - // presign is invalid - ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" - - // ErrCodeRequestError is an error preventing the SDK from continuing to - // process the request. - ErrCodeRequestError = "RequestError" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - AttemptTime time.Time - Time time.Time - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - streamingBody io.ReadCloser - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - // Additional API error codes that should be retried. IsErrorRetryable - // will consider these codes in addition to its built in cases. - RetryErrorCodes []string - - // Additional API error codes that should be retried with throttle backoff - // delay. IsErrorThrottle will consider these codes in addition to its - // built in cases. - ThrottleErrorCodes []string - - // A value greater than 0 instructs the request to be signed as Presigned URL - // You should not set this field directly. Instead use Request's - // Presign or PresignRequest methods. - ExpireTime time.Duration - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API operation and -// parameters. -// -// A Retryer should be provided to direct how the request is retried. If -// Retryer is nil, a default no retry value will be used. You can use -// NoOpRetryer in the Client package to disable retry behavior directly. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - if retryer == nil { - retryer = noOpRetryer{} - } - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - if len(operation.HTTPPath) != 0 { - opHTTPPath := operation.HTTPPath - var opQueryString string - if idx := strings.Index(opHTTPPath, "?"); idx >= 0 { - opQueryString = opHTTPPath[idx+1:] - opHTTPPath = opHTTPPath[:idx] - } - - if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") { - opHTTPPath = opHTTPPath[1:] - } - httpReq.URL.Path += opHTTPPath - httpReq.URL.RawQuery = opQueryString - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { - return false - } - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -func fmtAttemptCount(retryCount, maxRetries int) string { - return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - - if aws.IsReaderSeekable(reader) { - var err error - // Get the Bodies current offset so retries will start from the same - // initial position. - r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to determine start of request body", err) - return - } - } - r.ResetBody() -} - -// SetStreamingBody set the reader to be used for the request that will stream -// bytes to the server. Request's Body must not be set to any reader. -func (r *Request) SetStreamingBody(reader io.ReadCloser) { - r.streamingBody = reader - r.SetReaderBody(aws.ReadSeekCloser(reader)) -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expiration -// time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -func (r *Request) Presign(expire time.Duration) (string, error) { - r = r.copy() - - // Presign requires all headers be hoisted. There is no way to retrieve - // the signed headers not hoisted without this. Making the presigned URL - // useless. - r.NotHoist = false - - u, _, err := getPresignedURL(r, expire) - return u, err -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. The expire parameter is only used for -// presigned Amazon S3 API requests. All other AWS services will use a fixed -// expiration time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { - r = r.copy() - return getPresignedURL(r, expire) -} - -// IsPresigned returns true if the request represents a presigned API url. -func (r *Request) IsPresigned() bool { - return r.ExpireTime != 0 -} - -func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { - if expire <= 0 { - return "", nil, awserr.New( - ErrCodeInvalidPresignExpire, - "presigned URL requires an expire duration greater than 0", - nil, - ) - } - - r.ExpireTime = expire - - if r.Operation.BeforePresignFn != nil { - if err := r.Operation.BeforePresignFn(r); err != nil { - return "", nil, err - } - } - - if err := r.Sign(); err != nil { - return "", nil, err - } - - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -const ( - notRetrying = "not retrying" -) - -func debugLogReqError(r *Request, stage, retryStr string, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Any additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", notRetrying, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request, returning error if errors are encountered. -// -// Sign will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - - SanitizeHostForHeader(r.HTTPRequest) - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { - if r.streamingBody != nil { - return r.streamingBody, nil - } - - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to get next request body reader", err) - } - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := aws.SeekerLen(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to compute request body size", err) - } - - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker, or did not implement - // Len() method. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request, returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Ensure a non-nil HTTPResponse parameter is set to ensure handlers - // checking for HTTPResponse values, don't fail. - if r.HTTPResponse == nil { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - Body: ioutil.NopCloser(&bytes.Buffer{}), - } - } - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - if err := r.Error; err != nil { - return err - } - - for { - r.Error = nil - r.AttemptTime = time.Now() - - if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", notRetrying, err) - return err - } - - if err := r.sendRequest(); err == nil { - return nil - } - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - } -} - -func (r *Request) prepareRetry() error { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - if err := r.Error; err != nil { - return awserr.New(ErrCodeSerialization, - "failed to prepare body for retry", err) - - } - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - - return nil -} - -func (r *Request) sendRequest() (sendErr error) { - defer r.Handlers.CompleteAttempt.Run(r) - - r.Retryable = nil - r.Handlers.Send.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - if r.URL == nil { - return "" - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index 5921b8ff2a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !go1.8 -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index ea643c9c44..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build go1.8 -// +build go1.8 - -package request - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to reset request body", err) - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index d8c5053025..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 49a243ef2d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 64784e16f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,266 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// for p.Next() { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// // ... -// // break out of loop to stop fetching additional pages -// } -// -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - // EndPageOnSameToken, when enabled, will allow the paginator to stop on - // token that are the same as its previous tokens. - EndPageOnSameToken bool - - started bool - prevTokens []interface{} - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - if !p.started { - return true - } - - hasNextPage := len(p.nextTokens) != 0 - if p.EndPageOnSameToken { - return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) - } - return hasNextPage -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.prevTokens = p.nextTokens - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if !v { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(aws.StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 3f0001f918..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,309 +0,0 @@ -package request - -import ( - "net" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer provides the interface drive the SDK's request retry behavior. The -// Retryer implementation is responsible for implementing exponential backoff, -// and determine if a request API error should be retried. -// -// client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to -// determine if the request is retried. -type Retryer interface { - // RetryRules return the retry delay that should be used by the SDK before - // making another request attempt for the failed request. - RetryRules(*Request) time.Duration - - // ShouldRetry returns if the failed request is retryable. - // - // Implementations may consider request attempt count when determining if a - // request is retryable, but the SDK will use MaxRetries to limit the - // number of attempts a request are made. - ShouldRetry(*Request) bool - - // MaxRetries is the number of times a request may be retried before - // failing. - MaxRetries() int -} - -// WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. The value must not be nil. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - if retryer == nil { - if cfg.Logger != nil { - cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") - } - retryer = noOpRetryer{} - } - cfg.Retryer = retryer - return cfg - -} - -// noOpRetryer is a internal no op retryer used when a request is created -// without a retryer. -// -// Provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type noOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d noOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d noOpRetryer) ShouldRetry(_ *Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d noOpRetryer) RetryRules(_ *Request) time.Duration { - return 0 -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - ErrCodeRequestError: {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 - "TransactionInProgressException": {}, - "EC2ThrottledException": {}, // EC2 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporary); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err == nil { - return false - } - return shouldRetryError(err) -} - -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - if isNestedErrorRetryable(err) { - return true - } - - origErr := err.OrigErr() - var shouldRetry bool - if origErr != nil { - shouldRetry = shouldRetryError(origErr) - if err.Code() == ErrCodeRequestError && !shouldRetry { - return false - } - } - if isCodeRetryable(err.Code()) { - return true - } - return shouldRetry - - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeThrottle(aerr.Code()) - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry -// error. Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeExpiredCreds(aerr.Code()) - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - if isErrCode(r.Error, r.RetryErrorCodes) { - return true - } - - // HTTP response status code 501 should not be retried. - // 501 represents Not Implemented which means the request method is not - // supported by the server and cannot be handled. - if r.HTTPResponse != nil { - // HTTP response status code 500 represents internal server error and - // should be retried without any throttle. - if r.HTTPResponse.StatusCode == 500 { - return true - } - } - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its -// code. Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - if isErrCode(r.Error, r.ThrottleErrorCodes) { - return true - } - - if r.HTTPResponse != nil { - switch r.HTTPResponse.StatusCode { - case - 429, // error caused due to too many requests - 502, // Bad Gateway error should be throttled - 503, // caused when service is unavailable - 504: // error occurred due to gateway timeout - return true - } - } - - return IsErrorThrottle(r.Error) -} - -func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - for _, code := range codes { - if code == aerr.Code() { - return true - } - } - } - - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb987..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 8630683f31..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,286 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" - - // ParamFormatErrCode is the error code for a field with invalid - // format or characters. - ParamFormatErrCode = "ParamFormatInvalidError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} - -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - -// An ErrParamFormat represents a invalid format parameter error. -type ErrParamFormat struct { - errInvalidParam - format string -} - -// NewErrParamFormat creates a new invalid format parameter error. -func NewErrParamFormat(field string, format, value string) *ErrParamFormat { - return &ErrParamFormat{ - errInvalidParam: errInvalidParam{ - code: ParamFormatErrCode, - field: field, - msg: fmt.Sprintf("format %v, %v", format, value), - }, - format: format, - } -} - -// Format returns the field's required format. -func (e *ErrParamFormat) Format() string { - return e.format -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 4601f883cc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,295 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(aws.Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else { - sleepCtxFn := w.SleepWithContext - if sleepCtxFn == nil { - sleepCtxFn = aws.SleepWithContext - } - - if err := sleepCtxFn(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go deleted file mode 100644 index 1d3f4c3adc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ /dev/null @@ -1,303 +0,0 @@ -package session - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" - "github.com/aws/aws-sdk-go/service/sts" -) - -// CredentialsProviderOptions specifies additional options for configuring -// credentials providers. -type CredentialsProviderOptions struct { - // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, - // such as setting its ExpiryWindow. - WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) -} - -func resolveCredentials(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (*credentials.Credentials, error) { - - switch { - case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration - // so load that profile from shared config first. - // Github(aws/aws-sdk-go#2727) - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - - case envCfg.Creds.HasKeys(): - // Environment credentials - return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil - - case len(envCfg.WebIdentityTokenFilePath) != 0: - // Web identity token from environment, RoleARN required to also be - // set. - return assumeWebIdentity(cfg, handlers, - envCfg.WebIdentityTokenFilePath, - envCfg.RoleARN, - envCfg.RoleSessionName, - sessOpts.CredentialsProviderOptions, - ) - - default: - // Fallback to the "default" credential resolution chain. - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - } -} - -// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_ROLE_ARN' was not set. -var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) - -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but -// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. -var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) - -func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, - filepath string, - roleARN, sessionName string, - credOptions *CredentialsProviderOptions, -) (*credentials.Credentials, error) { - - if len(filepath) == 0 { - return nil, WebIdentityEmptyTokenFilePathErr - } - - if len(roleARN) == 0 { - return nil, WebIdentityEmptyRoleARNErr - } - - svc := sts.New(&Session{ - Config: cfg, - Handlers: handlers.Copy(), - }) - - var optFns []func(*stscreds.WebIdentityRoleProvider) - if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { - optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) - } - - p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) - return credentials.NewCredentials(p), nil -} - -func resolveCredsFromProfile(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch { - case sharedCfg.SourceProfile != nil: - // Assume IAM role with credentials source from a different profile. - creds, err = resolveCredsFromProfile(cfg, envCfg, - *sharedCfg.SourceProfile, handlers, sessOpts, - ) - - case sharedCfg.Creds.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - creds = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - - case len(sharedCfg.CredentialSource) != 0: - creds, err = resolveCredsFromSource(cfg, envCfg, - sharedCfg, handlers, sessOpts, - ) - - case len(sharedCfg.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(cfg, handlers, - sharedCfg.WebIdentityTokenFile, - sharedCfg.RoleARN, - sharedCfg.RoleSessionName, - sessOpts.CredentialsProviderOptions, - ) - - case sharedCfg.hasSSOConfiguration(): - creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - - default: - // Fallback to default credentials provider, include mock errors for - // the credential chain so user can identify why credentials failed to - // be retrieved. - creds = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{ - Err: awserr.New("EnvAccessKeyNotFound", - "failed to find credentials in the environment.", nil), - }, - &credProviderError{ - Err: awserr.New("SharedCredsLoad", - fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), - }, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - if err != nil { - return nil, err - } - - if len(sharedCfg.RoleARN) > 0 { - cfgCp := *cfg - cfgCp.Credentials = creds - return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) - } - - return creds, nil -} - -func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { - if err := sharedCfg.validateSSOConfiguration(); err != nil { - return nil, err - } - - cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion - - return ssocreds.NewCredentials( - &Session{ - Config: cfgCopy, - Handlers: handlers.Copy(), - }, - sharedCfg.SSOAccountID, - sharedCfg.SSORoleName, - sharedCfg.SSOStartURL, - ), nil -} - -// valid credential source values -const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" -) - -func resolveCredsFromSource(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - case credSourceEnvironment: - creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) - - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return nil, ErrSharedConfigECSContainerEnvVarEmpty - } - - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - default: - return nil, ErrSharedConfigInvalidCredSource - } - - return creds, nil -} - -func credsFromAssumeRole(cfg aws.Config, - handlers request.Handlers, - sharedCfg sharedConfig, - sessOpts Options, -) (*credentials.Credentials, error) { - - if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return nil, AssumeRoleTokenProviderNotSetError{} - } - - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.RoleSessionName - - if sessOpts.AssumeRoleDuration == 0 && - sharedCfg.AssumeRoleDuration != nil && - *sharedCfg.AssumeRoleDuration/time.Minute > 15 { - opt.Duration = *sharedCfg.AssumeRoleDuration - } else if sessOpts.AssumeRoleDuration != 0 { - opt.Duration = sessOpts.AssumeRoleDuration - } - - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ), nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go deleted file mode 100644 index 4390ad52f4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go deleted file mode 100644 index 668565bea0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !go1.13 && go1.7 -// +build !go1.13,go1.7 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go deleted file mode 100644 index e101aa6b6c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !go1.6 && go1.5 -// +build !go1.6,go1.5 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go deleted file mode 100644 index b5fcbe0d1e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !go1.7 && go1.6 -// +build !go1.7,go1.6 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go deleted file mode 100644 index ff3cc012ae..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. - -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. - -Sessions options from Shared Config - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. - -Credential and config loading order - -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: - - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) - -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). - - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) - -Creating Sessions - -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. - - // Create Session - sess, err := session.NewSession() - - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) - -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ - // Options - }) - - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", - - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, - - // Force enable Shared Config support - SharedConfigState: session.SharedConfigEnable, - }) - -Adding Handlers - -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Shared Config Fields - -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). - -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - - -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Custom Shared Config and Credential Files - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Custom CA Bundle - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. - -Custom Client TLS Certificate - -The SDK supports the environment and session option being configured with -Client TLS certificates that are sent as a part of the client's TLS handshake -for client authentication. If used, both Cert and Key values are required. If -one is missing, or either fail to load the contents of the file an error will -be returned. - -HTTP Client's Transport concrete implementation must be a http.Transport -or creating the session will fail. - - AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - -This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. - - sess, err := session.NewSessionWithOptions(session.Options{ - ClientTLSCert: myCertFile, - ClientTLSKey: myKeyFile, - }) - -Custom EC2 IMDS Endpoint - -The endpoint of the EC2 IMDS client can be configured via the environment -variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a -Session. See Options.EC2IMDSEndpoint for more details. - - AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 - -If using an URL with an IPv6 address literal, the IPv6 address -component must be enclosed in square brackets. - - AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - -The custom EC2 IMDS endpoint can also be specified via the Session options. - - sess, err := session.NewSessionWithOptions(session.Options{ - EC2MetadataEndpoint: "http://[::1]", - }) - -FIPS and DualStack Endpoints - -The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. - -You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), -or programmatically. - -To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable -or disable FIPS endpoint resolution. - - AWS_USE_FIPS_ENDPOINT=true - -To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable -or disable FIPS endpoint resolution. - - [profile myprofile] - region=us-west-2 - use_fips_endpoint=true - -To configure a FIPS endpoint programmatically - - // Option 1: Configure it on a session for all clients - sess, err := session.NewSessionWithOptions(session.Options{ - UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, - }) - if err != nil { - // handle error - } - - client := s3.New(sess) - - // Option 2: Configure it per client - sess, err := session.NewSession() - if err != nil { - // handle error - } - - client := s3.New(sess, &aws.Config{ - UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, - }) - -You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), -or programmatically. - -To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to -enable or disable DualStack endpoint resolution. - - AWS_USE_DUALSTACK_ENDPOINT=true - -To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable -or disable DualStack endpoint resolution. - - [profile myprofile] - region=us-west-2 - use_dualstack_endpoint=true - -To configure a DualStack endpoint programmatically - - // Option 1: Configure it on a session for all clients - sess, err := session.NewSessionWithOptions(session.Options{ - UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, - }) - if err != nil { - // handle error - } - - client := s3.New(sess) - - // Option 2: Configure it per client - sess, err := session.NewSession() - if err != nil { - // handle error - } - - client := s3.New(sess, &aws.Config{ - UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, - }) -*/ -package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go deleted file mode 100644 index d6fa24776c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ /dev/null @@ -1,471 +0,0 @@ -package session - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// EnvProviderName provides a name of the provider when config is loaded from environment. -const EnvProviderName = "EnvConfigCredentials" - -// envConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type envConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Creds credentials.Value - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-east-1 - // - // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_REGION is not also set. - // AWS_DEFAULT_REGION=us-east-1 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // - // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_PROFILE is not also set. - // AWS_DEFAULT_PROFILE=my_profile - Profile string - - // SDK load config instructs the SDK to load the shared config in addition to - // shared credentials. This also expands the configuration loaded from the shared - // credentials to have parity with the shared config file. This also enables - // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE - // env values as well. - // - // AWS_SDK_LOAD_CONFIG=1 - EnableSharedConfig bool - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the session. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - // Sets the TLC client certificate that should be used by the SDK's HTTP transport - // when making requests. The certificate must be paired with a TLS client key file. - // - // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - ClientTLSCert string - - // Sets the TLC client key that should be used by the SDK's HTTP transport - // when making requests. The key must be paired with a TLS client certificate file. - // - // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - ClientTLSKey string - - csmEnabled string - CSMEnabled *bool - CSMPort string - CSMHost string - CSMClientID string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery *bool - enableEndpointDiscovery string - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint - // for a service. - // - // AWS_STS_REGIONAL_ENDPOINTS=regional - // This can take value as `regional` or `legacy` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the S3 Regional Endpoint flag for the SDK to resolve the - // endpoint for a service. - // - // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional - // This can take value as `regional` or `legacy` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion bool - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - EC2IMDSEndpoint string - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // AWS_USE_DUALSTACK_ENDPOINT=true - UseDualStackEndpoint endpoints.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // AWS_USE_FIPS_ENDPOINT=true - UseFIPSEndpoint endpoints.FIPSEndpointState -} - -var ( - csmEnabledEnvKey = []string{ - "AWS_CSM_ENABLED", - } - csmHostEnvKey = []string{ - "AWS_CSM_HOST", - } - csmPortEnvKey = []string{ - "AWS_CSM_PORT", - } - csmClientIDEnvKey = []string{ - "AWS_CSM_CLIENT_ID", - } - credAccessEnvKey = []string{ - "AWS_ACCESS_KEY_ID", - "AWS_ACCESS_KEY", - } - credSecretEnvKey = []string{ - "AWS_SECRET_ACCESS_KEY", - "AWS_SECRET_KEY", - } - credSessionEnvKey = []string{ - "AWS_SESSION_TOKEN", - } - - enableEndpointDiscoveryEnvKey = []string{ - "AWS_ENABLE_ENDPOINT_DISCOVERY", - } - - regionEnvKeys = []string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - profileEnvKeys = []string{ - "AWS_PROFILE", - "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - sharedCredsFileEnvKey = []string{ - "AWS_SHARED_CREDENTIALS_FILE", - } - sharedConfigFileEnvKey = []string{ - "AWS_CONFIG_FILE", - } - webIdentityTokenFilePathEnvKey = []string{ - "AWS_WEB_IDENTITY_TOKEN_FILE", - } - roleARNEnvKey = []string{ - "AWS_ROLE_ARN", - } - roleSessionNameEnvKey = []string{ - "AWS_ROLE_SESSION_NAME", - } - stsRegionalEndpointKey = []string{ - "AWS_STS_REGIONAL_ENDPOINTS", - } - s3UsEast1RegionalEndpoint = []string{ - "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", - } - s3UseARNRegionEnvKey = []string{ - "AWS_S3_USE_ARN_REGION", - } - ec2IMDSEndpointEnvKey = []string{ - "AWS_EC2_METADATA_SERVICE_ENDPOINT", - } - ec2IMDSEndpointModeEnvKey = []string{ - "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", - } - useCABundleKey = []string{ - "AWS_CA_BUNDLE", - } - useClientTLSCert = []string{ - "AWS_SDK_GO_CLIENT_TLS_CERT", - } - useClientTLSKey = []string{ - "AWS_SDK_GO_CLIENT_TLS_KEY", - } - awsUseDualStackEndpoint = []string{ - "AWS_USE_DUALSTACK_ENDPOINT", - } - awsUseFIPSEndpoint = []string{ - "AWS_USE_FIPS_ENDPOINT", - } -) - -// loadEnvConfig retrieves the SDK's environment configuration. -// See `envConfig` for the values that will be retrieved. -// -// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value -// the shared SDK config will be loaded in addition to the SDK's specific -// configuration values. -func loadEnvConfig() (envConfig, error) { - enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) - return envConfigLoad(enableSharedConfig) -} - -// loadEnvSharedConfig retrieves the SDK's environment configuration, and the -// SDK shared config. See `envConfig` for the values that will be retrieved. -// -// Loads the shared configuration in addition to the SDK's specific configuration. -// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` -// environment variable is set. -func loadSharedEnvConfig() (envConfig, error) { - return envConfigLoad(true) -} - -func envConfigLoad(enableSharedConfig bool) (envConfig, error) { - cfg := envConfig{} - - cfg.EnableSharedConfig = enableSharedConfig - - // Static environment credentials - var creds credentials.Value - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&creds.SessionToken, credSessionEnvKey) - if creds.HasKeys() { - // Require logical grouping of credentials - creds.ProviderName = EnvProviderName - cfg.Creds = creds - } - - // Role Metadata - setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) - setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) - - // Web identity environment variables - setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) - - // CSM environment variables - setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) - setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) - setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - - if len(cfg.csmEnabled) != 0 { - v, _ := strconv.ParseBool(cfg.csmEnabled) - cfg.CSMEnabled = &v - } - - regionKeys := regionEnvKeys - profileKeys := profileEnvKeys - if !cfg.EnableSharedConfig { - regionKeys = regionKeys[:1] - profileKeys = profileKeys[:1] - } - - setFromEnvVal(&cfg.Region, regionKeys) - setFromEnvVal(&cfg.Profile, profileKeys) - - // endpoint discovery is in reference to it being enabled. - setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) - if len(cfg.enableEndpointDiscovery) > 0 { - cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") - } - - setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) - setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) - - if len(cfg.SharedCredentialsFile) == 0 { - cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(cfg.SharedConfigFile) == 0 { - cfg.SharedConfigFile = defaults.SharedConfigFilename() - } - - setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) - setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) - setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) - - var err error - // STS Regional Endpoint variable - for _, k := range stsRegionalEndpointKey { - if v := os.Getenv(k); len(v) != 0 { - cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - // S3 Regional Endpoint variable - for _, k := range s3UsEast1RegionalEndpoint { - if v := os.Getenv(k); len(v) != 0 { - cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - var s3UseARNRegion string - setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) - if len(s3UseARNRegion) != 0 { - switch { - case strings.EqualFold(s3UseARNRegion, "false"): - cfg.S3UseARNRegion = false - case strings.EqualFold(s3UseARNRegion, "true"): - cfg.S3UseARNRegion = true - default: - return envConfig{}, fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - s3UseARNRegionEnvKey[0], s3UseARNRegion) - } - } - - setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { - return envConfig{}, err - } - - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { - return cfg, err - } - - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { - return cfg, err - } - - return cfg, nil -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) != 0 { - *dst = v - break - } - } -} - -func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - if err := mode.SetFromString(value); err != nil { - return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) - } - return nil - } - return nil -} - -func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = endpoints.DualStackEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = endpoints.DualStackEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = endpoints.FIPSEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = endpoints.FIPSEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 4293dbe10b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,997 +0,0 @@ -package session - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/csm" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ErrCodeSharedConfig represents an error that occurs in the shared - // configuration logic - ErrCodeSharedConfig = "SharedConfigErr" - - // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. - ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" - - // ErrCodeLoadClientTLSCert error code for unable to load client TLS - // certificate or key - ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" -) - -// ErrSharedConfigSourceCollision will be returned if a section contains both -// source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) - -// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment -// variables are empty and Environment was set as the credential source -var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) - -// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided -var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the Session concurrently. -// -// The Session satisfies the service client's client.ConfigProvider. -type Session struct { - Config *aws.Config - Handlers request.Handlers - - options Options -} - -// New creates a new instance of the handlers merging in the provided configs -// on top of the SDK's default configurations. Once the Session is created it -// can be mutated to modify the Config or Handlers. The Session is safe to be -// read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New -// method could now encounter an error when loading the configuration. When -// The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occurred while -// loading the session. Use NewSession to get the error when creating the -// session. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. -// -// Deprecated: Use NewSession functions to create sessions instead. NewSession -// has the same functionality as New except an error can be returned when the -// func is called instead of waiting to receive an error until a request is made. -func New(cfgs ...*aws.Config) *Session { - // load initial config from environment - envCfg, envErr := loadEnvConfig() - - if envCfg.EnableSharedConfig { - var cfg aws.Config - cfg.MergeIn(cfgs...) - s, err := NewSessionWithOptions(Options{ - Config: cfg, - SharedConfigState: SharedConfigEnable, - }) - if err != nil { - // Old session.New expected all errors to be discovered when - // a request is made, and would report the errors then. This - // needs to be replicated if an error occurs while creating - // the session. - msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occurring during session creation." - - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s = &Session{Config: defaults.Config()} - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - - return s - } - - s := deprecatedNewSession(envCfg, cfgs...) - if envErr != nil { - msg := "failed to load env config" - s.logDeprecatedNewSessionError(msg, envErr, cfgs) - } - - if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - msg := "failed to enable CSM" - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - } - - return s -} - -// NewSession returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. Once the Session is created -// it can be mutated to modify the Config or Handlers. The Session is safe to -// be read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created, such as specifying the -// config profile, and controlling if shared config is enabled or not. -func NewSession(cfgs ...*aws.Config) (*Session, error) { - opts := Options{} - opts.Config.MergeIn(cfgs...) - - return NewSessionWithOptions(opts) -} - -// SharedConfigState provides the ability to optionally override the state -// of the session's creation based on the shared config being enabled or -// disabled. -type SharedConfigState int - -const ( - // SharedConfigStateFromEnv does not override any state of the - // AWS_SDK_LOAD_CONFIG env var. It is the default value of the - // SharedConfigState type. - SharedConfigStateFromEnv SharedConfigState = iota - - // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value - // and disables the shared config functionality. - SharedConfigDisable - - // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value - // and enables the shared config functionality. - SharedConfigEnable -) - -// Options provides the means to control how a Session is created and what -// configuration values will be loaded. -// -type Options struct { - // Provides config values for the SDK to use when creating service clients - // and making API requests to services. Any value set in with this field - // will override the associated value provided by the SDK defaults, - // environment or config files where relevant. - // - // If not set, configuration values from from SDK defaults, environment, - // config will be used. - Config aws.Config - - // Overrides the config profile the Session should be created from. If not - // set the value of the environment variable will be loaded (AWS_PROFILE, - // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). - // - // If not set and environment variables are not set the "default" - // (DefaultSharedConfigProfile) will be used as the profile to load the - // session config from. - Profile string - - // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG - // environment variable. By default a Session will be created using the - // value provided by the AWS_SDK_LOAD_CONFIG environment variable. - // - // Setting this value to SharedConfigEnable or SharedConfigDisable - // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable - // and enable or disable the shared config functionality. - SharedConfigState SharedConfigState - - // Ordered list of files the session will load configuration from. - // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. - SharedConfigFiles []string - - // When the SDK's shared config is configured to assume a role with MFA - // this option is required in order to provide the mechanism that will - // retrieve the MFA token. There is no default value for this field. If - // it is not set an error will be returned when creating the session. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed. Within the context of service clients - // all sharing the same session the SDK will ensure calls to the token - // provider are atomic. When sharing a token provider across multiple - // sessions additional synchronization logic is needed to ensure the - // token providers do not introduce race conditions. It is recommend to - // share the session where possible. - // - // stscreds.StdinTokenProvider is a basic implementation that will prompt - // from stdin for the MFA token code. - // - // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. - AssumeRoleTokenProvider func() (string, error) - - // When the SDK's shared config is configured to assume a role this option - // may be provided to set the expiry duration of the STS credentials. - // Defaults to 15 minutes if not set as documented in the - // stscreds.AssumeRoleProvider. - AssumeRoleDuration time.Duration - - // Reader for a custom Credentials Authority (CA) bundle in PEM format that - // the SDK will use instead of the default system's root CA bundle. Use this - // only if you want to replace the CA bundle the SDK uses for TLS requests. - // - // HTTP Client's Transport concrete implementation must be a http.Transport - // or creating the session will fail. - // - // If the Transport's TLS config is set this option will cause the SDK - // to overwrite the Transport's TLS config's RootCAs value. If the CA - // bundle reader contains multiple certificates all of them will be loaded. - // - // Can also be specified via the environment variable: - // - // AWS_CA_BUNDLE=$HOME/ca_bundle - // - // Can also be specified via the shared config field: - // - // ca_bundle = $HOME/ca_bundle - CustomCABundle io.Reader - - // Reader for the TLC client certificate that should be used by the SDK's - // HTTP transport when making requests. The certificate must be paired with - // a TLS client key file. Will be ignored if both are not provided. - // - // HTTP Client's Transport concrete implementation must be a http.Transport - // or creating the session will fail. - // - // Can also be specified via the environment variable: - // - // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - ClientTLSCert io.Reader - - // Reader for the TLC client key that should be used by the SDK's HTTP - // transport when making requests. The key must be paired with a TLS client - // certificate file. Will be ignored if both are not provided. - // - // HTTP Client's Transport concrete implementation must be a http.Transport - // or creating the session will fail. - // - // Can also be specified via the environment variable: - // - // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - ClientTLSKey io.Reader - - // The handlers that the session and all API clients will be created with. - // This must be a complete set of handlers. Use the defaults.Handlers() - // function to initialize this value before changing the handlers to be - // used by the SDK. - Handlers request.Handlers - - // Allows specifying a custom endpoint to be used by the EC2 IMDS client - // when making requests to the EC2 IMDS API. The endpoint value should - // include the URI scheme. If the scheme is not present it will be defaulted to http. - // - // If unset, will the EC2 IMDS client will use its default endpoint. - // - // Can also be specified via the environment variable, - // AWS_EC2_METADATA_SERVICE_ENDPOINT. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 - // - // If using an URL with an IPv6 address literal, the IPv6 address - // component must be enclosed in square brackets. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - EC2IMDSEndpoint string - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState - - // Specifies options for creating credential providers. - // These are only used if the aws.Config does not already - // include credentials. - CredentialsProviderOptions *CredentialsProviderOptions -} - -// NewSessionWithOptions returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. This func uses the Options -// values to configure how the Session is created. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) -// -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) -// -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) -// -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) -func NewSessionWithOptions(opts Options) (*Session, error) { - var envCfg envConfig - var err error - if opts.SharedConfigState == SharedConfigEnable { - envCfg, err = loadSharedEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load shared config, %v", err) - } - } else { - envCfg, err = loadEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load environment config, %v", err) - } - } - - if len(opts.Profile) != 0 { - envCfg.Profile = opts.Profile - } - - switch opts.SharedConfigState { - case SharedConfigDisable: - envCfg.EnableSharedConfig = false - case SharedConfigEnable: - envCfg.EnableSharedConfig = true - } - - return newSession(opts, envCfg, &opts.Config) -} - -// Must is a helper function to ensure the Session is valid and there was no -// error when calling a NewSession function. -// -// This helper is intended to be used in variable initialization to load the -// Session and configuration at startup. Such as: -// -// var sess = session.Must(session.NewSession()) -func Must(sess *Session, err error) *Session { - if err != nil { - panic(err) - } - - return sess -} - -// Wraps the endpoint resolver with a resolver that will return a custom -// endpoint for EC2 IMDS. -func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { - return endpoints.ResolverFunc( - func(service, region string, opts ...func(*endpoints.Options)) ( - endpoints.ResolvedEndpoint, error, - ) { - if service == ec2MetadataServiceID && len(endpoint) > 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoint, - SigningName: ec2MetadataServiceID, - SigningRegion: region, - }, nil - } else if service == ec2MetadataServiceID { - opts = append(opts, func(o *endpoints.Options) { - o.EC2MetadataEndpointMode = mode - }) - } - return resolver.EndpointFor(service, region, opts...) - }) -} - -func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - if cfg.EndpointResolver == nil { - // An endpoint resolver is required for a session to be able to provide - // endpoints for service client configurations. - cfg.EndpointResolver = endpoints.DefaultResolver() - } - - if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { - cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) - } - - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - options: Options{ - EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, - }, - } - - initHandlers(s) - return s -} - -func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { - if logger != nil { - logger.Log("Enabling CSM") - } - - r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) - if err != nil { - return err - } - r.InjectHandlers(handlers) - - return nil -} - -func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { - cfg := defaults.Config() - - handlers := opts.Handlers - if handlers.IsEmpty() { - handlers = defaults.Handlers() - } - - // Get a merged version of the user provided config to determine if - // credentials were. - userCfg := &aws.Config{} - userCfg.MergeIn(cfgs...) - cfg.MergeIn(userCfg) - - // Ordered config files will be loaded in with later files overwriting - // previous config file values. - var cfgFiles []string - if opts.SharedConfigFiles != nil { - cfgFiles = opts.SharedConfigFiles - } else { - cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] - } - } - - // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) - if err != nil { - if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { - // Special case where the user has not explicitly specified an AWS_PROFILE, - // or session.Options.profile, shared config is not enabled, and the - // environment has credentials, allow the shared config file to fail to - // load since the user has already provided credentials, and nothing else - // is required to be read file. Github(aws/aws-sdk-go#2455) - } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return nil, err - } - } - - if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { - return nil, err - } - - if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { - return nil, err - } - - s := &Session{ - Config: cfg, - Handlers: handlers, - options: opts, - } - - initHandlers(s) - - if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - return nil, err - } - } - - return s, nil -} - -type csmConfig struct { - Enabled bool - Host string - Port string - ClientID string -} - -var csmProfileName = "aws_csm" - -func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { - if envCfg.CSMEnabled != nil { - if *envCfg.CSMEnabled { - return csmConfig{ - Enabled: true, - ClientID: envCfg.CSMClientID, - Host: envCfg.CSMHost, - Port: envCfg.CSMPort, - }, nil - } - return csmConfig{}, nil - } - - sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return csmConfig{}, err - } - } - if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { - return csmConfig{ - Enabled: true, - ClientID: sharedCfg.CSMClientID, - Host: sharedCfg.CSMHost, - Port: sharedCfg.CSMPort, - }, nil - } - - return csmConfig{}, nil -} - -func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { - // CA Bundle can be specified in both environment variable shared config file. - var caBundleFilename = envCfg.CustomCABundle - if len(caBundleFilename) == 0 { - caBundleFilename = sharedCfg.CustomCABundle - } - - // Only use environment value if session option is not provided. - customTLSOptions := map[string]struct { - filename string - field *io.Reader - errCode string - }{ - "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, - "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, - "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, - } - for name, v := range customTLSOptions { - if len(v.filename) != 0 && *v.field == nil { - f, err := os.Open(v.filename) - if err != nil { - return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) - } - defer f.Close() - *v.field = f - } - } - - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { - return err - } - } - - // Setup HTTP client TLS certificate and key for client TLS authentication. - if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { - if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { - return err - } - } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { - // Do nothing if neither values are available. - - } else { - return awserr.New(ErrCodeLoadClientTLSCert, - fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", - opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) - } - - return nil -} - -func getHTTPTransport(client *http.Client) (*http.Transport, error) { - var t *http.Transport - switch v := client.Transport.(type) { - case *http.Transport: - t = v - default: - if client.Transport != nil { - return nil, fmt.Errorf("unsupported transport, %T", client.Transport) - } - } - if t == nil { - // Nil transport implies `http.DefaultTransport` should be used. Since - // the SDK cannot modify, nor copy the `DefaultTransport` specifying - // the values the next closest behavior. - t = getCustomTransport() - } - - return t, nil -} - -func loadCustomCABundle(client *http.Client, bundle io.Reader) error { - t, err := getHTTPTransport(client) - if err != nil { - return awserr.New(ErrCodeLoadCustomCABundle, - "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) - } - - p, err := loadCertPool(bundle) - if err != nil { - return err - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - t.TLSClientConfig.RootCAs = p - - client.Transport = t - - return nil -} - -func loadCertPool(r io.Reader) (*x509.CertPool, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeLoadCustomCABundle, - "failed to read custom CA bundle PEM file", err) - } - - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(b) { - return nil, awserr.New(ErrCodeLoadCustomCABundle, - "failed to load custom CA bundle PEM file", err) - } - - return p, nil -} - -func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { - t, err := getHTTPTransport(client) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to get usable HTTP transport from client", err) - } - - cert, err := ioutil.ReadAll(certFile) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to get read client TLS cert file", err) - } - - key, err := ioutil.ReadAll(keyFile) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to get read client TLS key file", err) - } - - clientCert, err := tls.X509KeyPair(cert, key) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to load x509 key pair from client cert", err) - } - - tlsCfg := t.TLSClientConfig - if tlsCfg == nil { - tlsCfg = &tls.Config{} - } - - tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) - - t.TLSClientConfig = tlsCfg - client.Transport = t - - return nil -} - -func mergeConfigSrcs(cfg, userCfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) error { - - // Region if not already set by user - if len(aws.StringValue(cfg.Region)) == 0 { - if len(envCfg.Region) > 0 { - cfg.WithRegion(envCfg.Region) - } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { - cfg.WithRegion(sharedCfg.Region) - } - } - - if cfg.EnableEndpointDiscovery == nil { - if envCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) - } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) - } - } - - // Regional Endpoint flag for STS endpoint resolving - mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ - userCfg.STSRegionalEndpoint, - envCfg.STSRegionalEndpoint, - sharedCfg.STSRegionalEndpoint, - endpoints.LegacySTSEndpoint, - }) - - // Regional Endpoint flag for S3 endpoint resolving - mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ - userCfg.S3UsEast1RegionalEndpoint, - envCfg.S3UsEast1RegionalEndpoint, - sharedCfg.S3UsEast1RegionalEndpoint, - endpoints.LegacyS3UsEast1Endpoint, - }) - - var ec2IMDSEndpoint string - for _, v := range []string{ - sessOpts.EC2IMDSEndpoint, - envCfg.EC2IMDSEndpoint, - sharedCfg.EC2IMDSEndpoint, - } { - if len(v) != 0 { - ec2IMDSEndpoint = v - break - } - } - - var endpointMode endpoints.EC2IMDSEndpointModeState - for _, v := range []endpoints.EC2IMDSEndpointModeState{ - sessOpts.EC2IMDSEndpointMode, - envCfg.EC2IMDSEndpointMode, - sharedCfg.EC2IMDSEndpointMode, - } { - if v != endpoints.EC2IMDSEndpointModeStateUnset { - endpointMode = v - break - } - } - - if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { - cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) - } - - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - - cfg.S3UseARNRegion = userCfg.S3UseARNRegion - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &envCfg.S3UseARNRegion - } - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion - } - - for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { - if v != endpoints.DualStackEndpointStateUnset { - cfg.UseDualStackEndpoint = v - break - } - } - - for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { - if v != endpoints.FIPSEndpointStateUnset { - cfg.UseFIPSEndpoint = v - break - } - } - - return nil -} - -func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = v - break - } - } -} - -func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetS3UsEast1Endpoint { - cfg.S3UsEast1RegionalEndpoint = v - break - } - } -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current Session, copying the config -// and handlers. If any additional configs are provided they will be merged -// on top of the Session's copied config. -// -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - options: s.options, - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - resolvedRegion := normalizeRegion(s.Config) - - region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) - if err != nil { - s.Handlers.Validate.PushBack(func(r *request.Request) { - if len(r.ClientInfo.Endpoint) != 0 { - // Error occurred while resolving endpoint, but the request - // being invoked has had an endpoint specified after the client - // was created. - return - } - r.Error = err - }) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - PartitionID: resolved.PartitionID, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - ResolvedRegion: resolvedRegion, - } -} - -const ec2MetadataServiceID = "ec2metadata" - -func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { - - if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), - SigningRegion: region, - }, nil - } - - resolved, err := cfg.EndpointResolver.EndpointFor(service, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) - - opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) - opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint - - opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint - - // Support for STSRegionalEndpoint where the STSRegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint - - // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - - opt.ResolvedRegion = resolvedRegion - - opt.Logger = cfg.Logger - opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) - }, - ) - if err != nil { - return endpoints.ResolvedEndpoint{}, err - } - - return resolved, nil -} - -// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception -// that the EndpointResolver will not be used to resolve the endpoint. The only -// endpoint set must come from the aws.Config.Endpoint field. -func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - resolvedRegion := normalizeRegion(s.Config) - - var resolved endpoints.ResolvedEndpoint - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { - resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = aws.StringValue(s.Config.Region) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - ResolvedRegion: resolvedRegion, - } -} - -// logDeprecatedNewSessionError function enables error handling for session -func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) -} - -// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided -// config to have the equivalent options for resolution and returns the resolved region name. -func normalizeRegion(cfg *aws.Config) (resolved string) { - const fipsInfix = "-fips-" - const fipsPrefix = "-fips" - const fipsSuffix = "fips-" - - region := aws.StringValue(cfg.Region) - - if strings.Contains(region, fipsInfix) || - strings.Contains(region, fipsPrefix) || - strings.Contains(region, fipsSuffix) { - resolved = strings.Replace(strings.Replace(strings.Replace( - region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) - cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled - } - - return resolved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go deleted file mode 100644 index 424c82b4d3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ /dev/null @@ -1,729 +0,0 @@ -package session - -import ( - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/internal/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // AWS Single Sign-On (AWS SSO) group - ssoAccountIDKey = "sso_account_id" - ssoRegionKey = "sso_region" - ssoRoleNameKey = "sso_role_name" - ssoStartURL = "sso_start_url" - - // CSM options - csmEnabledKey = `csm_enabled` - csmHostKey = `csm_host` - csmPortKey = `csm_port` - csmClientIDKey = `csm_client_id` - - // Additional Config fields - regionKey = `region` - - // custom CA Bundle filename - customCABundleKey = `ca_bundle` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential Process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // Additional config fields for regional or legacy endpoints - stsRegionalEndpointSharedKey = `sts_regional_endpoints` - - // Additional config fields for regional or legacy endpoints - s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" - - // EC2 IMDS Endpoint Mode - ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" - - // EC2 IMDS Endpoint - ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" - - // Use DualStack Endpoint Resolution - useDualStackEndpoint = "use_dualstack_endpoint" - - // Use FIPS Endpoint Resolution - useFIPSEndpointKey = "use_fips_endpoint" -) - -// sharedConfig represents the configuration fields of the SDK config files. -type sharedConfig struct { - Profile string - - // Credentials values from the config file. Both aws_access_key_id and - // aws_secret_access_key must be provided together in the same file to be - // considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of - // the other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Creds credentials.Value - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - SSOAccountID string - SSORegion string - SSORoleName string - SSOStartURL string - - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string - AssumeRoleDuration *time.Duration - - SourceProfileName string - SourceProfile *sharedConfig - - // Region is the region the SDK should use for looking up AWS service - // endpoints and signing requests. - // - // region - Region string - - // CustomCABundle is the file path to a PEM file the SDK will read and - // use to configure the HTTP transport with additional CA certs that are - // not present in the platforms default CA store. - // - // This value will be ignored if the file does not exist. - // - // ca_bundle - CustomCABundle string - - // EnableEndpointDiscovery can be enabled in the shared config by setting - // endpoint_discovery_enabled to true - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery *bool - - // CSM Options - CSMEnabled *bool - CSMHost string - CSMPort string - CSMClientID string - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // sts_regional_endpoints = regional - // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // s3_us_east_1_regional_endpoint = regional - // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // ec2_metadata_service_endpoint_mode=IPv6 - EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // ec2_metadata_service_endpoint=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // use_dualstack_endpoint=true - UseDualStackEndpoint endpoints.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // use_fips_endpoint=true - UseFIPSEndpoint endpoints.FIPSEndpointState -} - -type sharedConfigFile struct { - Filename string - IniData ini.Sections -} - -// loadSharedConfig retrieves the configuration from the list of files using -// the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of -// A's. -// -// See sharedConfig.setFromFile for information how the config files -// will be loaded. -func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { - if len(profile) == 0 { - profile = DefaultSharedConfigProfile - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return sharedConfig{}, err - } - - cfg := sharedConfig{} - profiles := map[string]struct{}{} - if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { - return sharedConfig{}, err - } - - return cfg, nil -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { - // Skip files which can't be opened and read for whatever reason - continue - } else if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: sections, - }) - } - - return files, nil -} - -func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { - cfg.Profile = profile - - // Trim files from the list that don't exist. - var skippedFiles int - var profileNotFoundErr error - for _, f := range files { - if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore profiles not defined in individual files. - profileNotFoundErr = err - skippedFiles++ - continue - } - return err - } - } - if skippedFiles == len(files) { - // If all files were skipped because the profile is not found, return - // the original profile not found error. - return profileNotFoundErr - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - cfg.clearAssumeRoleOptions() - } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. - if err := cfg.validateCredentialsConfig(profile); err != nil { - return err - } - } - profiles[profile] = struct{}{} - - if err := cfg.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(cfg.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - cfg.clearCredentialOptions() - - srcCfg := &sharedConfig{} - err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) - if err != nil { - // SourceProfile that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - - cfg.SourceProfile = srcCfg - } - - return nil -} - -// setFromFile loads the configuration from the file using the profile -// provided. A sharedConfig pointer type value is used so that multiple config -// file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For -// example if a config file only includes aws_access_key_id but no -// aws_secret_access_key the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { - section, ok := file.IniData.GetSection(profile) - if !ok { - // Fallback to to alternate profile name: profile - section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if !ok { - return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} - } - } - - if exOpts { - // Assume Role Parameters - updateString(&cfg.RoleARN, section, roleArnKey) - updateString(&cfg.ExternalID, section, externalIDKey) - updateString(&cfg.MFASerial, section, mfaSerialKey) - updateString(&cfg.RoleSessionName, section, roleSessionNameKey) - updateString(&cfg.SourceProfileName, section, sourceProfileKey) - updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) - updateString(&cfg.CustomCABundle, section, customCABundleKey) - - if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second - cfg.AssumeRoleDuration = &d - } - - if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { - sre, err := endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - stsRegionalEndpointSharedKey, file.Filename, err) - } - cfg.STSRegionalEndpoint = sre - } - - if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { - sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - s3UsEast1RegionalSharedKey, file.Filename, err) - } - cfg.S3UsEast1RegionalEndpoint = sre - } - - // AWS Single Sign-On (AWS SSO) - updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) - updateString(&cfg.SSORegion, section, ssoRegionKey) - updateString(&cfg.SSORoleName, section, ssoRoleNameKey) - updateString(&cfg.SSOStartURL, section, ssoStartURL) - - if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - ec2MetadataServiceEndpointModeKey, file.Filename, err) - } - updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) - - updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) - - updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) - } - - updateString(&cfg.CredentialProcess, section, credentialProcessKey) - updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - // Shared Credentials - creds := credentials.Value{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - if creds.HasKeys() { - cfg.Creds = creds - } - - // Endpoint discovery - updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - - // CSM options - updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) - updateString(&cfg.CSMHost, section, csmHostKey) - updateString(&cfg.CSMPort, section, csmPortKey) - updateString(&cfg.CSMClientID, section, csmClientIDKey) - - updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) - - return nil -} - -func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - return endpointMode.SetFromString(value) -} - -func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { - if err := cfg.validateCredentialsRequireARN(profile); err != nil { - return err - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(cfg.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(cfg.CredentialSource) != 0: - credSource = credentialSourceKey - case len(cfg.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(cfg.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(cfg.SourceProfileName) != 0, - len(cfg.CredentialSource) != 0, - len(cfg.CredentialProcess) != 0, - len(cfg.WebIdentityTokenFile) != 0, - ) { - return ErrSharedConfigSourceCollision - } - - return nil -} - -func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { - return nil - } - - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) - } - - return nil -} - -func (cfg *sharedConfig) hasCredentials() bool { - switch { - case len(cfg.SourceProfileName) != 0: - case len(cfg.CredentialSource) != 0: - case len(cfg.CredentialProcess) != 0: - case len(cfg.WebIdentityTokenFile) != 0: - case cfg.hasSSOConfiguration(): - case cfg.Creds.HasKeys(): - default: - return false - } - - return true -} - -func (cfg *sharedConfig) clearCredentialOptions() { - cfg.CredentialSource = "" - cfg.CredentialProcess = "" - cfg.WebIdentityTokenFile = "" - cfg.Creds = credentials.Value{} - cfg.SSOAccountID = "" - cfg.SSORegion = "" - cfg.SSORoleName = "" - cfg.SSOStartURL = "" -} - -func (cfg *sharedConfig) clearAssumeRoleOptions() { - cfg.RoleARN = "" - cfg.ExternalID = "" - cfg.MFASerial = "" - cfg.RoleSessionName = "" - cfg.SourceProfileName = "" -} - -func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false - } - return true -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.Bool(key) -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = new(bool) - **dst = section.Bool(key) -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigLoadError) Code() string { - return "SharedConfigLoadError" -} - -// Message is the description of the error -func (e SharedConfigLoadError) Message() string { - return fmt.Sprintf("failed to load config file, %s", e.Filename) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigLoadError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigLoadError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigProfileNotExistsError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistsError struct { - Profile string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigProfileNotExistsError) Code() string { - return "SharedConfigProfileNotExistsError" -} - -// Message is the description of the error -func (e SharedConfigProfileNotExistsError) Message() string { - return fmt.Sprintf("failed to get profile, %s", e.Profile) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistsError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigProfileNotExistsError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - RoleARN string - SourceProfile string -} - -// Code is the short id of the error. -func (e SharedConfigAssumeRoleError) Code() string { - return "SharedConfigAssumeRoleError" -} - -// Message is the description of the error -func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf( - "failed to load assume role for %s, source profile %s has no shared credentials", - e.RoleARN, e.SourceProfile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e SharedConfigAssumeRoleError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Code is the short id of the error. -func (e CredentialRequiresARNError) Code() string { - return "CredentialRequiresARNError" -} - -// Message is the description of the error -func (e CredentialRequiresARNError) Message() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e CredentialRequiresARNError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - if section.Bool(key) { - *dst = endpoints.DualStackEndpointStateEnabled - } else { - *dst = endpoints.DualStackEndpointStateDisabled - } - - return -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - if section.Bool(key) { - *dst = endpoints.FIPSEndpointStateEnabled - } else { - *dst = endpoints.FIPSEndpointStateDisabled - } - - return -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 9937538317..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,81 +0,0 @@ -package v4 - -import ( - "github.com/aws/aws-sdk-go/internal/strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// allowList is a generic rule for allow listing -type allowList struct { - rule -} - -// IsValid for allow list checks if the value is within the allow list -func (w allowList) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// excludeList is a generic rule for exclude listing -type excludeList struct { - rule -} - -// IsValid for exclude list checks if the value is within the exclude list -func (b excludeList) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed241b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go deleted file mode 100644 index cf672b6ac4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return aws.BackgroundContext() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go deleted file mode 100644 index 21fe74e6fa..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return r.Context() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go deleted file mode 100644 index 02cbd97e23..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go +++ /dev/null @@ -1,63 +0,0 @@ -package v4 - -import ( - "encoding/hex" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -type credentialValueProvider interface { - Get() (credentials.Value, error) -} - -// StreamSigner implements signing of event stream encoded payloads -type StreamSigner struct { - region string - service string - - credentials credentialValueProvider - - prevSig []byte -} - -// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages -func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { - return &StreamSigner{ - region: region, - service: service, - credentials: credentials, - prevSig: seedSignature, - } -} - -// GetSignature takes an event stream encoded headers and payload and returns a signature -func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { - credValue, err := s.credentials.Get() - if err != nil { - return nil, err - } - - sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) - - keyPath := buildSigningScope(s.region, s.service, date) - - stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) - - signature := hmacSHA256(sigKey, []byte(stringToSign)) - s.prevSig = signature - - return signature, nil -} - -func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - formatTime(date), - scope, - hex.EncodeToString(prevSig), - hex.EncodeToString(hashSHA256(headers)), - hex.EncodeToString(hashSHA256(payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go deleted file mode 100644 index 7711ec7377..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build go1.5 -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index 4d78162c03..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,854 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authorizationHeader = "Authorization" - authHeaderSignatureElem = "Signature=" - signatureQueryKey = "X-Amz-Signature" - - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - awsV4Request = "aws4_request" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - excludeList{ - mapRule{ - authorizationHeader: struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a allow list for build canonical headers. -var requiredSignedHeaders = rules{ - allowList{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, - patterns{"X-Amz-Object-Lock-"}, -} - -// allowedHoisting is a allow list for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - excludeList{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disables the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues credentials.Value - isPresign bool - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, false, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, true, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: isPresign, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) - if err != nil { - return http.Header{}, err - } - - ctx.sanitizeHostForHeader() - ctx.assignAmzQueryValues() - if err := ctx.build(v4.DisableHeaderHoisting); err != nil { - return nil, err - } - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) sanitizeHostForHeader() { - request.SanitizeHostForHeader(ctx.Request) -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now) -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { - return request.NamedHandler{ - Name: name, - Fn: func(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now, opts...) - }, - } -} - -// SignSDKRequestWithCurrentTime will sign the SDK's request using the time -// function passed in. Behaves the same as SignSDKRequest with the exception -// the request is signed with the value returned by the current time function. -func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - curTime := curTimeFn() - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, curTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTime -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) error { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - if err := ctx.buildBodyDigest(); err != nil { - return err - } - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - authHeaderSignatureElem + ctx.signature, - } - ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) - } - - return nil -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func (ctx *signingCtx) buildTime() { - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - if !r.IsValid(k) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerItems := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerItems[i] = "host:" + ctx.Request.Host - } else { - headerItems[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues := make([]string, len(ctx.SignedHeaderVals[k])) - for i, v := range ctx.SignedHeaderVals[k] { - headerValues[i] = strings.TrimSpace(v) - } - headerItems[i] = k + ":" + - strings.Join(headerValues, ",") - } - } - stripExcessSpaces(headerItems) - ctx.canonicalHeaders = strings.Join(headerItems, "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - formatTime(ctx.Time), - ctx.credentialString, - hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) - signature := hmacSHA256(creds, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() error { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - includeSHA256Header := ctx.unsignedPayload || - ctx.ServiceName == "s3" || - ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" - - s3Presign := ctx.isPresign && - (ctx.ServiceName == "s3" || - ctx.ServiceName == "s3-object-lambda") - - if ctx.unsignedPayload || s3Presign { - hash = "UNSIGNED-PAYLOAD" - includeSHA256Header = !s3Presign - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - if !aws.IsReaderSeekable(ctx.Body) { - return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) - } - hashBytes, err := makeSha256Reader(ctx.Body) - if err != nil { - return err - } - hash = hex.EncodeToString(hashBytes) - } - - if includeSHA256Header { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash - - return nil -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func hmacSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func hashSHA256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { - hash := sha256.New() - start, err := reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - return nil, err - } - defer func() { - // ensure error is return if unable to seek back to start of payload. - _, err = reader.Seek(start, sdkio.SeekStart) - }() - - // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies - // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. - size, err := aws.SeekerLen(reader) - if err != nil { - io.Copy(hash, reader) - } else { - io.CopyN(hash, reader, size) - } - - return hash.Sum(nil), nil -} - -const doubleSpace = " " - -// stripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func stripExcessSpaces(vals []string) { - var j, k, l, m, spaces int - for i, str := range vals { - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - vals[i] = str - continue - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - vals[i] = string(buf[:m]) - } -} - -func buildSigningScope(region, service string, dt time.Time) string { - return strings.Join([]string{ - formatShortTime(dt), - region, - service, - awsV4Request, - }, "/") -} - -func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { - kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) - kRegion := hmacSHA256(kDate, []byte(region)) - kService := hmacSHA256(kRegion, []byte(service)) - signingKey := hmacSHA256(kService, []byte(awsV4Request)) - return signingKey -} - -func formatShortTime(dt time.Time) string { - return dt.UTC().Format(shortTimeFormat) -} - -func formatTime(dt time.Time) string { - return dt.UTC().Format(timeFormat) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 98751ee84f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,264 +0,0 @@ -package aws - -import ( - "io" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API -// operation's input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if the operation -// requires payload signing. -// -// Note: If using With S3 PutObject to stream an object upload The SDK's S3 -// Upload manager (s3manager.Uploader) provides support for streaming with the -// ability to retry network errors. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// IsReaderSeekable returns if the underlying reader type can be seeked. A -// io.Reader might not actually be seekable if it is the ReaderSeekerCloser -// type. -func IsReaderSeekable(r io.Reader) bool { - switch v := r.(type) { - case ReaderSeekerCloser: - return v.IsSeeker() - case *ReaderSeekerCloser: - return v.IsSeeker() - case io.ReadSeeker: - return true - default: - return false - } -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return seekerLen(s) - } - - return -1, nil -} - -// SeekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func SeekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case ReaderSeekerCloser: - return v.GetLen() - case *ReaderSeekerCloser: - return v.GetLen() - } - - return seekerLen(s) -} - -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, sdkio.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} - -// MultiCloser is a utility to close multiple io.Closers within a single -// statement. -type MultiCloser []io.Closer - -// Close closes all of the io.Closers making up the MultiClosers. Any -// errors that occur while closing will be returned in the order they -// occur. -func (m MultiCloser) Close() error { - var errs errors - for _, c := range m { - err := c.Close() - if err != nil { - errs = append(errs, err) - } - } - if len(errs) != 0 { - return errs - } - - return nil -} - -type errors []error - -func (es errors) Error() string { - var parts []string - for _, e := range es { - parts = append(parts, e.Error()) - } - - return strings.Join(parts, "\n") -} - -// CopySeekableBody copies the seekable body to an io.Writer -func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // copy errors may be assumed to be from the body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - // seek back to the first position after reading to reset - // the body for transmission. - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index fed561bd59..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.8 -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 95282db03b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build !go1.8 -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index bc123f30cd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.44.122" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go deleted file mode 100644 index 365345353e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package context - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case BackgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -// BackgroundCtx is the common base context. -var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go deleted file mode 100644 index e83a99886b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go deleted file mode 100644 index 0895d53cbe..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go deleted file mode 100644 index 0b76999ba1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go deleted file mode 100644 index 1e55bbd07b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// Grammar: -// stmt -> section | stmt' -// stmt' -> epsilon | expr -// expr -> value (stmt)* | equal_expr (stmt)* -// equal_expr -> value ( ':' | '=' ) equal_expr' -// equal_expr' -> number | string | quoted_string -// quoted_string -> " quoted_string' -// quoted_string' -> string quoted_string_end -// quoted_string_end -> " -// -// section -> [ section' -// section' -> section_value section_close -// section_value -> number | string_subset | boolean | quoted_string_subset -// quoted_string_subset -> " quoted_string_subset' -// quoted_string_subset' -> string_subset quoted_string_end -// quoted_string_subset -> " -// section_close -> ] -// -// value -> number | string_subset | boolean -// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? -// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go deleted file mode 100644 index 04345a54c2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go deleted file mode 100644 index 91ba2a59dd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go deleted file mode 100644 index 6e545b63bc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go deleted file mode 100644 index 3b0ca7afe3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go +++ /dev/null @@ -1,51 +0,0 @@ -package ini - -import ( - "io" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// OpenFile takes a path to a given file, and will open and parse -// that file. -func OpenFile(path string) (Sections, error) { - f, err := os.Open(path) - if err != nil { - return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) - } - defer f.Close() - - return Parse(f) -} - -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader) (Sections, error) { - tree, err := ParseAST(f) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go deleted file mode 100644 index 582c024ad1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go +++ /dev/null @@ -1,165 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // ErrCodeUnableToReadFile is used when a file is failed to be - // opened or read from. - ErrCodeUnableToReadFile = "FailedRead" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go deleted file mode 100644 index 0ba319491c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ /dev/null @@ -1,350 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// ParseState represents the current state of the parser. -type ParseState uint - -// State enums for the parse table -const ( - InvalidState ParseState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]ParseState{ - ASTKindStart: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: { - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: { - TokenLit: ValueState, - TokenSep: ValueState, - TokenOp: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - TokenNone: SkipState, - }, - ASTKindStatement: { - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: { - TokenLit: ValueState, - TokenSep: ValueState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: { - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: { - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - // if should skip is true, we skip the tokens until should skip is set to false. - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - switch k.Kind { - case ASTKindEqualExpr: - // assigning a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - root.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - // If OpenScopeState is not at the start, we must mark the previous ast as complete - // - // for example: if previous ast was a skip statement; - // we should mark it as complete before we create a new statement - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k, tok.Type())) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) - } - - // returns a sublist which excludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go deleted file mode 100644 index 34a481afbd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ /dev/null @@ -1,340 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isCaselessLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. -func isCaselessLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != unicode.ToLower(have[i]) { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = isCaselessLitValue(runesTrue, v.raw) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// Append will append values and change the type to a string -// type. -func (v *Value) Append(tok Token) { - r := tok.Raw() - if v.Type != QuotedStringType { - v.Type = StringType - r = tok.raw[1 : len(tok.raw)-1] - } - if tok.Type() != TokenLit { - v.raw = append(v.raw, tok.Raw()...) - } else { - v.raw = append(v.raw, r...) - } -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go deleted file mode 100644 index e52ac399f1..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc566..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7cbe0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go deleted file mode 100644 index 4572870193..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import "fmt" - -const ( - // ErrCodeParseError is returned when a parsing error - // has occurred. - ErrCodeParseError = "INIParseError" -) - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -// Code will return the ErrCodeParseError -func (err *ParseError) Code() string { - return ErrCodeParseError -} - -// Message returns the error's message -func (err *ParseError) Message() string { - return err.msg -} - -// OrigError return nothing since there will never be any -// original error. -func (err *ParseError) OrigError() error { - return nil -} - -func (err *ParseError) Error() string { - return fmt.Sprintf("%s: %s", err.Code(), err.Message()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7c70..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go deleted file mode 100644 index f82095ba25..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go deleted file mode 100644 index da7a4049cf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - // should skip state will be modified only if previous token was new line (NL); - // and the current token is not WhiteSpace (WS). - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - s.Continue() - return false - } - s.prevTok = tok - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true -} - -func (s *skipper) Continue() { - s.shouldSkip = false - // empty token is assigned as we return to default state, when should skip is false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go deleted file mode 100644 index 18f3fe8931..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini definition. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go deleted file mode 100644 index b5480fdeb3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isCaselessLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go deleted file mode 100644 index 081cf43342..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ /dev/null @@ -1,169 +0,0 @@ -package ini - -import ( - "fmt" - "sort" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - scope string - Sections Sections -} - -// NewDefaultVisitor return a DefaultVisitor -func NewDefaultVisitor() *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. - // If the token is not either a literal or one of the token types that identifies those four additional - // tokens then error. - if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - t.values[key] = v - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - v.Sections.container[name] = Section{} - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - Name string - values values -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go deleted file mode 100644 index 99915f7f77..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae06f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go deleted file mode 100644 index bf18031a38..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go +++ /dev/null @@ -1,50 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -// AccessPointARN provides representation -type AccessPointARN struct { - arn.ARN - AccessPointName string -} - -// GetARN returns the base ARN for the Access Point resource -func (a AccessPointARN) GetARN() arn.ARN { - return a.ARN -} - -// ParseAccessPointResource attempts to parse the ARN's resource as an -// AccessPoint resource. -// -// Supported Access point resource format: -// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} -// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint -// -func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { - if len(a.Region) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} - } - if len(a.AccountID) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} - } - if len(resParts) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} - } - if len(resParts) > 1 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} - } - - resID := resParts[0] - if len(strings.TrimSpace(resID)) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} - } - - return AccessPointARN{ - ARN: a, - AccessPointName: resID, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go deleted file mode 100644 index 216c4baabf..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go +++ /dev/null @@ -1,94 +0,0 @@ -package arn - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -var supportedServiceARN = []string{ - "s3", - "s3-outposts", - "s3-object-lambda", -} - -func isSupportedServiceARN(service string) bool { - for _, name := range supportedServiceARN { - if name == service { - return true - } - } - return false -} - -// Resource provides the interfaces abstracting ARNs of specific resource -// types. -type Resource interface { - GetARN() arn.ARN - String() string -} - -// ResourceParser provides the function for parsing an ARN's resource -// component into a typed resource. -type ResourceParser func(arn.ARN) (Resource, error) - -// ParseResource parses an AWS ARN into a typed resource for the S3 API. -func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { - a, err := arn.Parse(s) - if err != nil { - return nil, err - } - - if len(a.Partition) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "partition not set"} - } - - if !isSupportedServiceARN(a.Service) { - return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} - } - - if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") { - return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} - } - - if len(a.Resource) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "resource not set"} - } - - return resParser(a) -} - -// SplitResource splits the resource components by the ARN resource delimiters. -func SplitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// IsARN returns whether the given string is an ARN -func IsARN(s string) bool { - return arn.IsARN(s) -} - -// InvalidARNError provides the error for an invalid ARN error. -type InvalidARNError struct { - ARN arn.ARN - Reason string -} - -// Error returns a string denoting the occurred InvalidARNError -func (e InvalidARNError) Error() string { - return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go deleted file mode 100644 index 1e10f8de00..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go +++ /dev/null @@ -1,126 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -// OutpostARN interface that should be satisfied by outpost ARNs -type OutpostARN interface { - Resource - GetOutpostID() string -} - -// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format -// and return a specific OutpostARN type -// -// Currently supported outpost ARN formats: -// * Outpost AccessPoint ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint -// -// * Outpost Bucket ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket -// -// Other outpost ARN formats may be supported and added in the future. -// -func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { - if len(a.Region) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "region not set"} - } - - if len(a.AccountID) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} - } - - // verify if outpost id is present and valid - if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - // verify possible resource type exists - if len(resParts) < 3 { - return nil, InvalidARNError{ - ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", - } - } - - // Since we know this is a OutpostARN fetch outpostID - outpostID := strings.TrimSpace(resParts[0]) - - switch resParts[1] { - case "accesspoint": - accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) - if err != nil { - return OutpostAccessPointARN{}, err - } - return OutpostAccessPointARN{ - AccessPointARN: accesspointARN, - OutpostID: outpostID, - }, nil - - case "bucket": - bucketName, err := parseBucketResource(a, resParts[2:]) - if err != nil { - return nil, err - } - return OutpostBucketARN{ - ARN: a, - BucketName: bucketName, - OutpostID: outpostID, - }, nil - - default: - return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} - } -} - -// OutpostAccessPointARN represents outpost access point ARN. -type OutpostAccessPointARN struct { - AccessPointARN - OutpostID string -} - -// GetOutpostID returns the outpost id of outpost access point arn -func (o OutpostAccessPointARN) GetOutpostID() string { - return o.OutpostID -} - -// OutpostBucketARN represents the outpost bucket ARN. -type OutpostBucketARN struct { - arn.ARN - BucketName string - OutpostID string -} - -// GetOutpostID returns the outpost id of outpost bucket arn -func (o OutpostBucketARN) GetOutpostID() string { - return o.OutpostID -} - -// GetARN retrives the base ARN from outpost bucket ARN resource -func (o OutpostBucketARN) GetARN() arn.ARN { - return o.ARN -} - -// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the -// bucket resource id. -// -// parseBucketResource only parses the bucket resource id. -// -func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { - if len(resParts) == 0 { - return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} - } - if len(resParts) > 1 { - return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} - } - - bucketName = strings.TrimSpace(resParts[0]) - if len(bucketName) == 0 { - return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} - } - return bucketName, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go deleted file mode 100644 index 513154cc0e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go +++ /dev/null @@ -1,15 +0,0 @@ -package arn - -// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service -type S3ObjectLambdaARN interface { - Resource - - isS3ObjectLambdasARN() -} - -// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type -type S3ObjectLambdaAccessPointARN struct { - AccessPointARN -} - -func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go deleted file mode 100644 index 4290ff6760..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go +++ /dev/null @@ -1,202 +0,0 @@ -package s3shared - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/s3shared/arn" -) - -const ( - invalidARNErrorErrCode = "InvalidARNError" - configurationErrorErrCode = "ConfigurationError" -) - -// InvalidARNError denotes the error for Invalid ARN -type InvalidARNError struct { - message string - resource arn.Resource - origErr error -} - -// Error returns the InvalidARNError -func (e InvalidARNError) Error() string { - var extra string - if e.resource != nil { - extra = "ARN: " + e.resource.String() - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) -} - -// Code returns the invalid ARN error code -func (e InvalidARNError) Code() string { - return invalidARNErrorErrCode -} - -// Message returns the message for Invalid ARN error -func (e InvalidARNError) Message() string { - return e.message -} - -// OrigErr is the original error wrapped by Invalid ARN Error -func (e InvalidARNError) OrigErr() error { - return e.origErr -} - -// NewInvalidARNError denotes invalid arn error -func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "invalid ARN", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints -func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported with custom client endpoints", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition -func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported for the target ARN partition", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithFIPSError ARN not supported for FIPS region -// -// Deprecated: FIPS will not appear in the ARN region component. -func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported for FIPS region", - resource: resource, - origErr: err, - } -} - -// ConfigurationError is used to denote a client configuration error -type ConfigurationError struct { - message string - resource arn.Resource - clientPartitionID string - clientRegion string - origErr error -} - -// Error returns the Configuration error string -func (e ConfigurationError) Error() string { - extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", - e.resource, e.clientPartitionID, e.clientRegion) - - return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) -} - -// Code returns configuration error's error-code -func (e ConfigurationError) Code() string { - return configurationErrorErrCode -} - -// Message returns the configuration error message -func (e ConfigurationError) Message() string { - return e.message -} - -// OrigErr is the original error wrapped by Configuration Error -func (e ConfigurationError) OrigErr() error { - return e.origErr -} - -// NewClientPartitionMismatchError stub -func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client partition does not match provided ARN partition", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientRegionMismatchError denotes cross region access error -func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client region does not match provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewFailedToResolveEndpointError denotes endpoint resolving error -func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "endpoint resolver failed to find an endpoint for the provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access -func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for fips but cross-region resource ARN provided", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS -func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "use of ARN is not supported when client or request is configured for FIPS", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate -func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for S3 Accelerate but is not supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request -func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack -func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for S3 Dual-stack but is not supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go deleted file mode 100644 index ef43d6c589..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go +++ /dev/null @@ -1,45 +0,0 @@ -package s3shared - -import ( - "github.com/aws/aws-sdk-go/aws" - awsarn "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/s3shared/arn" -) - -// ResourceRequest represents the request and arn resource -type ResourceRequest struct { - Resource arn.Resource - Request *request.Request -} - -// ARN returns the resource ARN -func (r ResourceRequest) ARN() awsarn.ARN { - return r.Resource.GetARN() -} - -// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set -func (r ResourceRequest) AllowCrossRegion() bool { - return aws.BoolValue(r.Request.Config.S3UseARNRegion) -} - -// IsCrossPartition returns true if client is configured for another partition, than -// the partition that resource ARN region resolves to. -func (r ResourceRequest) IsCrossPartition() bool { - return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition -} - -// IsCrossRegion returns true if ARN region is different than client configured region -func (r ResourceRequest) IsCrossRegion() bool { - return IsCrossRegion(r.Request, r.Resource.GetARN().Region) -} - -// HasCustomEndpoint returns true if custom client endpoint is provided -func (r ResourceRequest) HasCustomEndpoint() bool { - return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 -} - -// IsCrossRegion returns true if request signing region is not same as configured region -func IsCrossRegion(req *request.Request, otherRegion string) bool { - return req.ClientInfo.SigningRegion != otherRegion -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go deleted file mode 100644 index 0b9b0dfce0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go +++ /dev/null @@ -1,57 +0,0 @@ -package s3err - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequestFailure provides additional S3 specific metadata for the request -// failure. -type RequestFailure struct { - awserr.RequestFailure - - hostID string -} - -// NewRequestFailure returns a request failure error decordated with S3 -// specific metadata. -func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { - return &RequestFailure{RequestFailure: err, hostID: hostID} -} - -func (r RequestFailure) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", - r.StatusCode(), r.RequestID(), r.hostID) - return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} -func (r RequestFailure) String() string { - return r.Error() -} - -// HostID returns the HostID request response value. -func (r RequestFailure) HostID() string { - return r.hostID -} - -// RequestFailureWrapperHandler returns a handler to rap an -// awserr.RequestFailure with the S3 request ID 2 from the response. -func RequestFailureWrapperHandler() request.NamedHandler { - return request.NamedHandler{ - Name: "awssdk.s3.errorHandler", - Fn: func(req *request.Request) { - reqErr, ok := req.Error.(awserr.RequestFailure) - if !ok || reqErr == nil { - return - } - - hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") - if req.Error == nil { - return - } - - req.Error = NewRequestFailure(reqErr, hostID) - }, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go deleted file mode 100644 index 6c443988bb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go deleted file mode 100644 index 037a998c4c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package sdkio - -// Copy of Go 1.7 io package's Seeker constants. -const ( - SeekStart = 0 // seek relative to the origin of the file - SeekCurrent = 1 // seek relative to the current offset - SeekEnd = 2 // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go deleted file mode 100644 index 65e7c60c4d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package sdkio - -import "io" - -// Alias for Go 1.7 io package Seeker constants -const ( - SeekStart = io.SeekStart // seek relative to the origin of the file - SeekCurrent = io.SeekCurrent // seek relative to the current offset - SeekEnd = io.SeekEnd // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go deleted file mode 100644 index a845287832..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.10 -// +build go1.10 - -package sdkmath - -import "math" - -// Round returns the nearest integer, rounding half away from zero. -// -// Special cases are: -// Round(±0) = ±0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64) float64 { - return math.Round(x) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go deleted file mode 100644 index a3ae3e5dba..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build !go1.10 -// +build !go1.10 - -package sdkmath - -import "math" - -// Copied from the Go standard library's (Go 1.12) math/floor.go for use in -// Go version prior to Go 1.10. -const ( - uvone = 0x3FF0000000000000 - mask = 0x7FF - shift = 64 - 11 - 1 - bias = 1023 - signMask = 1 << 63 - fracMask = 1<= 0.5 { - // return t + Copysign(1, x) - // } - // return t - // } - bits := math.Float64bits(x) - e := uint(bits>>shift) & mask - if e < bias { - // Round abs(x) < 1 including denormals. - bits &= signMask // +-0 - if e == bias-1 { - bits |= uvone // +-1 - } - } else if e < bias+shift { - // Round any abs(x) >= 1 containing a fractional component [0,1). - // - // Numbers with larger exponents are returned unchanged since they - // must be either an integer, infinity, or NaN. - const half = 1 << (shift - 1) - e -= bias - bits += half >> e - bits &^= fracMask >> e - } - return math.Float64frombits(bits) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go deleted file mode 100644 index 0c9802d877..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go +++ /dev/null @@ -1,29 +0,0 @@ -package sdkrand - -import ( - "math/rand" - "sync" - "time" -) - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// SeededRand is a new RNG using a thread safe implementation of rand.Source -var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go deleted file mode 100644 index 4bae66ceed..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.6 -// +build go1.6 - -package sdkrand - -import "math/rand" - -// Read provides the stub for math.Rand.Read method support for go version's -// 1.6 and greater. -func Read(r *rand.Rand, p []byte) (int, error) { - return r.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go deleted file mode 100644 index 3a6ab88251..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !go1.6 -// +build !go1.6 - -package sdkrand - -import "math/rand" - -// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 -func Read(r *rand.Rand, p []byte) (n int, err error) { - // Copy of Go standard libraries math package's read function not added to - // standard library until Go 1.6. - var pos int8 - var val int64 - for n = 0; n < len(p); n++ { - if pos == 0 { - val = r.Int63() - pos = 7 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - - return n, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go deleted file mode 100644 index 38ea61afea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go +++ /dev/null @@ -1,23 +0,0 @@ -package sdkuri - -import ( - "path" - "strings" -) - -// PathJoin will join the elements of the path delimited by the "/" -// character. Similar to path.Join with the exception the trailing "/" -// character is preserved if present. -func PathJoin(elems ...string) string { - if len(elems) == 0 { - return "" - } - - hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") - str := path.Join(elems...) - if hasTrailing && str != "/" { - str += "/" - } - - return str -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go deleted file mode 100644 index 7da8a49ce5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ /dev/null @@ -1,12 +0,0 @@ -package shareddefaults - -const ( - // ECSCredsProviderEnvVar is an environmental variable key used to - // determine which path needs to be hit. - ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" -) - -// ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overridden to test to ensure the credential process -// is behaving correctly. -var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index 34fea49ca8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,46 +0,0 @@ -package shareddefaults - -import ( - "os/user" - "path/filepath" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - var home string - - home = userHomeDir() - if len(home) > 0 { - return home - } - - currUser, _ := user.Current() - if currUser != nil { - home = currUser.HomeDir - } - - return home -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go deleted file mode 100644 index eb298ae0fc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build !go1.12 -// +build !go1.12 - -package shareddefaults - -import ( - "os" - "runtime" -) - -func userHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go deleted file mode 100644 index 51541b5087..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.12 -// +build go1.12 - -package shareddefaults - -import ( - "os" -) - -func userHomeDir() string { - home, _ := os.UserHomeDir() - return home -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go deleted file mode 100644 index d008ae27cb..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index 14ad0c5891..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package singleflight provides a duplicate function call suppression -// mechanism. -package singleflight - -import "sync" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - c.val, c.err = fn() - c.wg.Done() - - g.mu.Lock() - if !c.forgotten { - delete(g.m, key) - } - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - g.mu.Unlock() -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go deleted file mode 100644 index e045f38d83..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go +++ /dev/null @@ -1,53 +0,0 @@ -package checksum - -import ( - "crypto/md5" - "encoding/base64" - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -const contentMD5Header = "Content-Md5" - -// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that -// require it. -func AddBodyContentMD5Handler(r *request.Request) { - // if Content-MD5 header is already present, return - if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { - return - } - - // if S3DisableContentMD5Validation flag is set, return - if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { - return - } - - // if request is presigned, return - if r.IsPresigned() { - return - } - - // if body is not seekable, return - if !aws.IsReaderSeekable(r.Body) { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "Unable to compute Content-MD5 for unseekable body, S3.%s", - r.Operation.Name)) - } - return - } - - h := md5.New() - - if _, err := aws.CopySeekableBody(h, r.Body); err != nil { - r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - v := base64.StdEncoding.EncodeToString(h.Sum(nil)) - r.HTTPRequest.Header.Set(contentMD5Header, v) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go deleted file mode 100644 index 151054971a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go +++ /dev/null @@ -1,144 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "strconv" -) - -type decodedMessage struct { - rawMessage - Headers decodedHeaders `json:"headers"` -} -type jsonMessage struct { - Length json.Number `json:"total_length"` - HeadersLen json.Number `json:"headers_length"` - PreludeCRC json.Number `json:"prelude_crc"` - Headers decodedHeaders `json:"headers"` - Payload []byte `json:"payload"` - CRC json.Number `json:"message_crc"` -} - -func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { - var jsonMsg jsonMessage - if err = json.Unmarshal(b, &jsonMsg); err != nil { - return err - } - - d.Length, err = numAsUint32(jsonMsg.Length) - if err != nil { - return err - } - d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) - if err != nil { - return err - } - d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) - if err != nil { - return err - } - d.Headers = jsonMsg.Headers - d.Payload = jsonMsg.Payload - d.CRC, err = numAsUint32(jsonMsg.CRC) - if err != nil { - return err - } - - return nil -} - -func (d *decodedMessage) MarshalJSON() ([]byte, error) { - jsonMsg := jsonMessage{ - Length: json.Number(strconv.Itoa(int(d.Length))), - HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), - PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), - Headers: d.Headers, - Payload: d.Payload, - CRC: json.Number(strconv.Itoa(int(d.CRC))), - } - - return json.Marshal(jsonMsg) -} - -func numAsUint32(n json.Number) (uint32, error) { - v, err := n.Int64() - if err != nil { - return 0, fmt.Errorf("failed to get int64 json number, %v", err) - } - - return uint32(v), nil -} - -func (d decodedMessage) Message() Message { - return Message{ - Headers: Headers(d.Headers), - Payload: d.Payload, - } -} - -type decodedHeaders Headers - -func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { - var jsonHeaders []struct { - Name string `json:"name"` - Type valueType `json:"type"` - Value interface{} `json:"value"` - } - - decoder := json.NewDecoder(bytes.NewReader(b)) - decoder.UseNumber() - if err := decoder.Decode(&jsonHeaders); err != nil { - return err - } - - var headers Headers - for _, h := range jsonHeaders { - value, err := valueFromType(h.Type, h.Value) - if err != nil { - return err - } - headers.Set(h.Name, value) - } - *hs = decodedHeaders(headers) - - return nil -} - -func valueFromType(typ valueType, val interface{}) (Value, error) { - switch typ { - case trueValueType: - return BoolValue(true), nil - case falseValueType: - return BoolValue(false), nil - case int8ValueType: - v, err := val.(json.Number).Int64() - return Int8Value(int8(v)), err - case int16ValueType: - v, err := val.(json.Number).Int64() - return Int16Value(int16(v)), err - case int32ValueType: - v, err := val.(json.Number).Int64() - return Int32Value(int32(v)), err - case int64ValueType: - v, err := val.(json.Number).Int64() - return Int64Value(v), err - case bytesValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return BytesValue(v), err - case stringValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return StringValue(string(v)), err - case timestampValueType: - v, err := val.(json.Number).Int64() - return TimestampValue(timeFromEpochMilli(v)), err - case uuidValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - var tv UUIDValue - copy(tv[:], v) - return tv, err - default: - panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go deleted file mode 100644 index 4743393918..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go +++ /dev/null @@ -1,216 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "hash/crc32" - "io" - - "github.com/aws/aws-sdk-go/aws" -) - -// Decoder provides decoding of an Event Stream messages. -type Decoder struct { - r io.Reader - logger aws.Logger -} - -// NewDecoder initializes and returns a Decoder for decoding event -// stream messages from the reader provided. -func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { - d := &Decoder{ - r: r, - } - - for _, opt := range opts { - opt(d) - } - - return d -} - -// DecodeWithLogger adds a logger to be used by the decoder when decoding -// stream events. -func DecodeWithLogger(logger aws.Logger) func(*Decoder) { - return func(d *Decoder) { - d.logger = logger - } -} - -// Decode attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if Decode fails to read -// the message from the stream. -func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { - reader := d.r - if d.logger != nil { - debugMsgBuf := bytes.NewBuffer(nil) - reader = io.TeeReader(reader, debugMsgBuf) - defer func() { - logMessageDecode(d.logger, debugMsgBuf, m, err) - }() - } - - m, err = Decode(reader, payloadBuf) - - return m, err -} - -// Decode attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if Decode fails to read -// the message from the reader. -func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { - crc := crc32.New(crc32IEEETable) - hashReader := io.TeeReader(reader, crc) - - prelude, err := decodePrelude(hashReader, crc) - if err != nil { - return Message{}, err - } - - if prelude.HeadersLen > 0 { - lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) - m.Headers, err = decodeHeaders(lr) - if err != nil { - return Message{}, err - } - } - - if payloadLen := prelude.PayloadLen(); payloadLen > 0 { - buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) - if err != nil { - return Message{}, err - } - m.Payload = buf - } - - msgCRC := crc.Sum32() - if err := validateCRC(reader, msgCRC); err != nil { - return Message{}, err - } - - return m, nil -} - -func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Log(w.String()) }() - - fmt.Fprintf(w, "Raw message:\n%s\n", - hex.Dump(msgBuf.Bytes())) - - if decodeErr != nil { - fmt.Fprintf(w, "Decode error: %v\n", decodeErr) - return - } - - rawMsg, err := msg.rawMessage() - if err != nil { - fmt.Fprintf(w, "failed to create raw message, %v\n", err) - return - } - - decodedMsg := decodedMessage{ - rawMessage: rawMsg, - Headers: decodedHeaders(msg.Headers), - } - - fmt.Fprintf(w, "Decoded message:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(decodedMsg); err != nil { - fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) - } -} - -func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { - var p messagePrelude - - var err error - p.Length, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - p.HeadersLen, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - if err := p.ValidateLens(); err != nil { - return messagePrelude{}, err - } - - preludeCRC := crc.Sum32() - if err := validateCRC(r, preludeCRC); err != nil { - return messagePrelude{}, err - } - - p.PreludeCRC = preludeCRC - - return p, nil -} - -func decodePayload(buf []byte, r io.Reader) ([]byte, error) { - w := bytes.NewBuffer(buf[0:0]) - - _, err := io.Copy(w, r) - return w.Bytes(), err -} - -func decodeUint8(r io.Reader) (uint8, error) { - type byteReader interface { - ReadByte() (byte, error) - } - - if br, ok := r.(byteReader); ok { - v, err := br.ReadByte() - return uint8(v), err - } - - var b [1]byte - _, err := io.ReadFull(r, b[:]) - return uint8(b[0]), err -} -func decodeUint16(r io.Reader) (uint16, error) { - var b [2]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(bs), nil -} -func decodeUint32(r io.Reader) (uint32, error) { - var b [4]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(bs), nil -} -func decodeUint64(r io.Reader) (uint64, error) { - var b [8]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint64(bs), nil -} - -func validateCRC(r io.Reader, expect uint32) error { - msgCRC, err := decodeUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return ChecksumError{} - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go deleted file mode 100644 index ffade3bc0c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go +++ /dev/null @@ -1,162 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "hash/crc32" - "io" - - "github.com/aws/aws-sdk-go/aws" -) - -// Encoder provides EventStream message encoding. -type Encoder struct { - w io.Writer - logger aws.Logger - - headersBuf *bytes.Buffer -} - -// NewEncoder initializes and returns an Encoder to encode Event Stream -// messages to an io.Writer. -func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { - e := &Encoder{ - w: w, - headersBuf: bytes.NewBuffer(nil), - } - - for _, opt := range opts { - opt(e) - } - - return e -} - -// EncodeWithLogger adds a logger to be used by the encode when decoding -// stream events. -func EncodeWithLogger(logger aws.Logger) func(*Encoder) { - return func(d *Encoder) { - d.logger = logger - } -} - -// Encode encodes a single EventStream message to the io.Writer the Encoder -// was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(msg Message) (err error) { - e.headersBuf.Reset() - - writer := e.w - if e.logger != nil { - encodeMsgBuf := bytes.NewBuffer(nil) - writer = io.MultiWriter(writer, encodeMsgBuf) - defer func() { - logMessageEncode(e.logger, encodeMsgBuf, msg, err) - }() - } - - if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { - return err - } - - crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(writer, crc) - - headersLen := uint32(e.headersBuf.Len()) - payloadLen := uint32(len(msg.Payload)) - - if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { - return err - } - - if headersLen > 0 { - if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { - return err - } - } - - if payloadLen > 0 { - if _, err = hashWriter.Write(msg.Payload); err != nil { - return err - } - } - - msgCRC := crc.Sum32() - return binary.Write(writer, binary.BigEndian, msgCRC) -} - -func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Log(w.String()) }() - - fmt.Fprintf(w, "Message to encode:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(msg); err != nil { - fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) - } - - if encodeErr != nil { - fmt.Fprintf(w, "Encode error: %v\n", encodeErr) - return - } - - fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) -} - -func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { - p := messagePrelude{ - Length: minMsgLen + headersLen + payloadLen, - HeadersLen: headersLen, - } - if err := p.ValidateLens(); err != nil { - return err - } - - err := binaryWriteFields(w, binary.BigEndian, - p.Length, - p.HeadersLen, - ) - if err != nil { - return err - } - - p.PreludeCRC = crc.Sum32() - err = binary.Write(w, binary.BigEndian, p.PreludeCRC) - if err != nil { - return err - } - - return nil -} - -// EncodeHeaders writes the header values to the writer encoded in the event -// stream format. Returns an error if a header fails to encode. -func EncodeHeaders(w io.Writer, headers Headers) error { - for _, h := range headers { - hn := headerName{ - Len: uint8(len(h.Name)), - } - copy(hn.Name[:hn.Len], h.Name) - if err := hn.encode(w); err != nil { - return err - } - - if err := h.Value.encode(w); err != nil { - return err - } - } - - return nil -} - -func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { - for _, v := range vs { - if err := binary.Write(w, order, v); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go deleted file mode 100644 index 5481ef3079..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go +++ /dev/null @@ -1,23 +0,0 @@ -package eventstream - -import "fmt" - -// LengthError provides the error for items being larger than a maximum length. -type LengthError struct { - Part string - Want int - Have int - Value interface{} -} - -func (e LengthError) Error() string { - return fmt.Sprintf("%s length invalid, %d/%d, %v", - e.Part, e.Want, e.Have, e.Value) -} - -// ChecksumError provides the error for message checksum invalidation errors. -type ChecksumError struct{} - -func (e ChecksumError) Error() string { - return "message checksum mismatch" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go deleted file mode 100644 index 0a63340e41..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go +++ /dev/null @@ -1,81 +0,0 @@ -package eventstreamapi - -import ( - "fmt" - "sync" -) - -// InputWriterCloseErrorCode is used to denote an error occurred -// while closing the event stream input writer. -const InputWriterCloseErrorCode = "EventStreamInputWriterCloseError" - -type messageError struct { - code string - msg string -} - -func (e messageError) Code() string { - return e.code -} - -func (e messageError) Message() string { - return e.msg -} - -func (e messageError) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.msg) -} - -func (e messageError) OrigErr() error { - return nil -} - -// OnceError wraps the behavior of recording an error -// once and signal on a channel when this has occurred. -// Signaling is done by closing of the channel. -// -// Type is safe for concurrent usage. -type OnceError struct { - mu sync.RWMutex - err error - ch chan struct{} -} - -// NewOnceError return a new OnceError -func NewOnceError() *OnceError { - return &OnceError{ - ch: make(chan struct{}, 1), - } -} - -// Err acquires a read-lock and returns an -// error if one has been set. -func (e *OnceError) Err() error { - e.mu.RLock() - err := e.err - e.mu.RUnlock() - - return err -} - -// SetError acquires a write-lock and will set -// the underlying error value if one has not been set. -func (e *OnceError) SetError(err error) { - if err == nil { - return - } - - e.mu.Lock() - if e.err == nil { - e.err = err - close(e.ch) - } - e.mu.Unlock() -} - -// ErrorSet returns a channel that will be used to signal -// that an error has been set. This channel will be closed -// when the error value has been set for OnceError. -func (e *OnceError) ErrorSet() <-chan struct{} { - return e.ch -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go deleted file mode 100644 index 0e4aa42f3e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go +++ /dev/null @@ -1,173 +0,0 @@ -package eventstreamapi - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/eventstream" -) - -// Unmarshaler provides the interface for unmarshaling a EventStream -// message into a SDK type. -type Unmarshaler interface { - UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error -} - -// EventReader provides reading from the EventStream of an reader. -type EventReader struct { - decoder *eventstream.Decoder - - unmarshalerForEventType func(string) (Unmarshaler, error) - payloadUnmarshaler protocol.PayloadUnmarshaler - - payloadBuf []byte -} - -// NewEventReader returns a EventReader built from the reader and unmarshaler -// provided. Use ReadStream method to start reading from the EventStream. -func NewEventReader( - decoder *eventstream.Decoder, - payloadUnmarshaler protocol.PayloadUnmarshaler, - unmarshalerForEventType func(string) (Unmarshaler, error), -) *EventReader { - return &EventReader{ - decoder: decoder, - payloadUnmarshaler: payloadUnmarshaler, - unmarshalerForEventType: unmarshalerForEventType, - payloadBuf: make([]byte, 10*1024), - } -} - -// ReadEvent attempts to read a message from the EventStream and return the -// unmarshaled event value that the message is for. -// -// For EventStream API errors check if the returned error satisfies the -// awserr.Error interface to get the error's Code and Message components. -// -// EventUnmarshalers called with EventStream messages must take copies of the -// message's Payload. The payload will is reused between events read. -func (r *EventReader) ReadEvent() (event interface{}, err error) { - msg, err := r.decoder.Decode(r.payloadBuf) - if err != nil { - return nil, err - } - defer func() { - // Reclaim payload buffer for next message read. - r.payloadBuf = msg.Payload[0:0] - }() - - typ, err := GetHeaderString(msg, MessageTypeHeader) - if err != nil { - return nil, err - } - - switch typ { - case EventMessageType: - return r.unmarshalEventMessage(msg) - case ExceptionMessageType: - return nil, r.unmarshalEventException(msg) - case ErrorMessageType: - return nil, r.unmarshalErrorMessage(msg) - default: - return nil, &UnknownMessageTypeError{ - Type: typ, Message: msg.Clone(), - } - } -} - -// UnknownMessageTypeError provides an error when a message is received from -// the stream, but the reader is unable to determine what kind of message it is. -type UnknownMessageTypeError struct { - Type string - Message eventstream.Message -} - -func (e *UnknownMessageTypeError) Error() string { - return "unknown eventstream message type, " + e.Type -} - -func (r *EventReader) unmarshalEventMessage( - msg eventstream.Message, -) (event interface{}, err error) { - eventType, err := GetHeaderString(msg, EventTypeHeader) - if err != nil { - return nil, err - } - - ev, err := r.unmarshalerForEventType(eventType) - if err != nil { - return nil, err - } - - err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) - if err != nil { - return nil, err - } - - return ev, nil -} - -func (r *EventReader) unmarshalEventException( - msg eventstream.Message, -) (err error) { - eventType, err := GetHeaderString(msg, ExceptionTypeHeader) - if err != nil { - return err - } - - ev, err := r.unmarshalerForEventType(eventType) - if err != nil { - return err - } - - err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) - if err != nil { - return err - } - - var ok bool - err, ok = ev.(error) - if !ok { - err = messageError{ - code: "SerializationError", - msg: fmt.Sprintf( - "event stream exception %s mapped to non-error %T, %v", - eventType, ev, ev, - ), - } - } - - return err -} - -func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { - var msgErr messageError - - msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) - if err != nil { - return err - } - - msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) - if err != nil { - return err - } - - return msgErr -} - -// GetHeaderString returns the value of the header as a string. If the header -// is not set or the value is not a string an error will be returned. -func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { - headerVal := msg.Headers.Get(headerName) - if headerVal == nil { - return "", fmt.Errorf("error header %s not present", headerName) - } - - v, ok := headerVal.Get().(string) - if !ok { - return "", fmt.Errorf("error header value is not a string, %T", headerVal) - } - - return v, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go deleted file mode 100644 index e46b8acc20..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go +++ /dev/null @@ -1,23 +0,0 @@ -package eventstreamapi - -// EventStream headers with specific meaning to async API functionality. -const ( - ChunkSignatureHeader = `:chunk-signature` // chunk signature for message - DateHeader = `:date` // Date header for signature - - // Message header and values - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go deleted file mode 100644 index 3a7ba5cd57..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go +++ /dev/null @@ -1,123 +0,0 @@ -package eventstreamapi - -import ( - "bytes" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol/eventstream" -) - -var timeNow = time.Now - -// StreamSigner defines an interface for the implementation of signing of event stream payloads -type StreamSigner interface { - GetSignature(headers, payload []byte, date time.Time) ([]byte, error) -} - -// SignEncoder envelopes event stream messages -// into an event stream message payload with included -// signature headers using the provided signer and encoder. -type SignEncoder struct { - signer StreamSigner - encoder Encoder - bufEncoder *BufferEncoder - - closeErr error - closed bool -} - -// NewSignEncoder returns a new SignEncoder using the provided stream signer and -// event stream encoder. -func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { - // TODO: Need to pass down logging - - return &SignEncoder{ - signer: signer, - encoder: encoder, - bufEncoder: NewBufferEncoder(), - } -} - -// Close encodes a final event stream signing envelope with an empty event stream -// payload. This final end-frame is used to mark the conclusion of the stream. -func (s *SignEncoder) Close() error { - if s.closed { - return s.closeErr - } - - if err := s.encode([]byte{}); err != nil { - if strings.Contains(err.Error(), "on closed pipe") { - return nil - } - - s.closeErr = err - s.closed = true - return s.closeErr - } - - return nil -} - -// Encode takes the provided message and add envelopes the message -// with the required signature. -func (s *SignEncoder) Encode(msg eventstream.Message) error { - payload, err := s.bufEncoder.Encode(msg) - if err != nil { - return err - } - - return s.encode(payload) -} - -func (s SignEncoder) encode(payload []byte) error { - date := timeNow() - - var msg eventstream.Message - msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) - msg.Payload = payload - - var headers bytes.Buffer - if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { - return err - } - - sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) - if err != nil { - return err - } - - msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) - - return s.encoder.Encode(msg) -} - -// BufferEncoder is a utility that provides a buffered -// event stream encoder -type BufferEncoder struct { - encoder Encoder - buffer *bytes.Buffer -} - -// NewBufferEncoder returns a new BufferEncoder initialized -// with a 1024 byte buffer. -func NewBufferEncoder() *BufferEncoder { - buf := bytes.NewBuffer(make([]byte, 1024)) - return &BufferEncoder{ - encoder: eventstream.NewEncoder(buf), - buffer: buf, - } -} - -// Encode returns the encoded message as a byte slice. -// The returned byte slice will be modified on the next encode call -// and should not be held onto. -func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { - e.buffer.Reset() - - if err := e.encoder.Encode(msg); err != nil { - return nil, err - } - - return e.buffer.Bytes(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go deleted file mode 100644 index 433bb1630a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go +++ /dev/null @@ -1,129 +0,0 @@ -package eventstreamapi - -import ( - "fmt" - "io" - "sync" - - "github.com/aws/aws-sdk-go/aws" -) - -// StreamWriter provides concurrent safe writing to an event stream. -type StreamWriter struct { - eventWriter *EventWriter - stream chan eventWriteAsyncReport - - done chan struct{} - closeOnce sync.Once - err *OnceError - - streamCloser io.Closer -} - -// NewStreamWriter returns a StreamWriter for the event writer, and stream -// closer provided. -func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { - w := &StreamWriter{ - eventWriter: eventWriter, - streamCloser: streamCloser, - stream: make(chan eventWriteAsyncReport), - done: make(chan struct{}), - err: NewOnceError(), - } - go w.writeStream() - - return w -} - -// Close terminates the writers ability to write new events to the stream. Any -// future call to Send will fail with an error. -func (w *StreamWriter) Close() error { - w.closeOnce.Do(w.safeClose) - return w.Err() -} - -func (w *StreamWriter) safeClose() { - close(w.done) -} - -// ErrorSet returns a channel which will be closed -// if an error occurs. -func (w *StreamWriter) ErrorSet() <-chan struct{} { - return w.err.ErrorSet() -} - -// Err returns any error that occurred while attempting to write an event to the -// stream. -func (w *StreamWriter) Err() error { - return w.err.Err() -} - -// Send writes a single event to the stream returning an error if the write -// failed. -// -// Send may be called concurrently. Events will be written to the stream -// safely. -func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { - if err := w.Err(); err != nil { - return err - } - - resultCh := make(chan error) - wrapped := eventWriteAsyncReport{ - Event: event, - Result: resultCh, - } - - select { - case w.stream <- wrapped: - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return fmt.Errorf("stream closed, unable to send event") - } - - select { - case err := <-resultCh: - return err - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return fmt.Errorf("stream closed, unable to send event") - } -} - -func (w *StreamWriter) writeStream() { - defer w.Close() - - for { - select { - case wrapper := <-w.stream: - err := w.eventWriter.WriteEvent(wrapper.Event) - wrapper.ReportResult(w.done, err) - if err != nil { - w.err.SetError(err) - return - } - - case <-w.done: - if err := w.streamCloser.Close(); err != nil { - w.err.SetError(err) - } - return - } - } -} - -type eventWriteAsyncReport struct { - Event Marshaler - Result chan<- error -} - -func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { - select { - case e.Result <- err: - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go deleted file mode 100644 index 4bf2b27b2b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package eventstreamapi - -import "github.com/aws/aws-sdk-go/aws/request" - -// ApplyHTTPTransportFixes is a no-op for Go 1.18 and above. -func ApplyHTTPTransportFixes(r *request.Request) { -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go deleted file mode 100644 index 2ee2c36fd3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !go1.18 -// +build !go1.18 - -package eventstreamapi - -import "github.com/aws/aws-sdk-go/aws/request" - -// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event -// stream functionality. Go 1.15 through 1.17 HTTP client could hang forever -// when an HTTP/2 connection failed with an non-200 status code and err. Using -// Expect 100-Continue, allows the HTTP client to gracefully handle the non-200 -// status code, and close the connection. -// -// This is a no-op for Go 1.18 and above. -func ApplyHTTPTransportFixes(r *request.Request) { - r.Handlers.Sign.PushBack(func(r *request.Request) { - r.HTTPRequest.Header.Set("Expect", "100-Continue") - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go deleted file mode 100644 index 7d7a793528..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go +++ /dev/null @@ -1,63 +0,0 @@ -package eventstreamapi - -import ( - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/eventstream" -) - -// Marshaler provides a marshaling interface for event types to event stream -// messages. -type Marshaler interface { - MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) -} - -// Encoder is an stream encoder that will encode an event stream message for -// the transport. -type Encoder interface { - Encode(eventstream.Message) error -} - -// EventWriter provides a wrapper around the underlying event stream encoder -// for an io.WriteCloser. -type EventWriter struct { - encoder Encoder - payloadMarshaler protocol.PayloadMarshaler - eventTypeFor func(Marshaler) (string, error) -} - -// NewEventWriter returns a new event stream writer, that will write to the -// writer provided. Use the WriteEvent method to write an event to the stream. -func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), -) *EventWriter { - return &EventWriter{ - encoder: encoder, - payloadMarshaler: pm, - eventTypeFor: eventTypeFor, - } -} - -// WriteEvent writes an event to the stream. Returns an error if the event -// fails to marshal into a message, or writing to the underlying writer fails. -func (w *EventWriter) WriteEvent(event Marshaler) error { - msg, err := w.marshal(event) - if err != nil { - return err - } - - return w.encoder.Encode(msg) -} - -func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { - eventType, err := w.eventTypeFor(event) - if err != nil { - return eventstream.Message{}, err - } - - msg, err := event.MarshalEvent(w.payloadMarshaler) - if err != nil { - return eventstream.Message{}, err - } - - msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) - return msg, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go deleted file mode 100644 index f6f8c5674e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go +++ /dev/null @@ -1,175 +0,0 @@ -package eventstream - -import ( - "encoding/binary" - "fmt" - "io" -) - -// Headers are a collection of EventStream header values. -type Headers []Header - -// Header is a single EventStream Key Value header pair. -type Header struct { - Name string - Value Value -} - -// Set associates the name with a value. If the header name already exists in -// the Headers the value will be replaced with the new one. -func (hs *Headers) Set(name string, value Value) { - var i int - for ; i < len(*hs); i++ { - if (*hs)[i].Name == name { - (*hs)[i].Value = value - return - } - } - - *hs = append(*hs, Header{ - Name: name, Value: value, - }) -} - -// Get returns the Value associated with the header. Nil is returned if the -// value does not exist. -func (hs Headers) Get(name string) Value { - for i := 0; i < len(hs); i++ { - if h := hs[i]; h.Name == name { - return h.Value - } - } - return nil -} - -// Del deletes the value in the Headers if it exists. -func (hs *Headers) Del(name string) { - for i := 0; i < len(*hs); i++ { - if (*hs)[i].Name == name { - copy((*hs)[i:], (*hs)[i+1:]) - (*hs) = (*hs)[:len(*hs)-1] - } - } -} - -// Clone returns a deep copy of the headers -func (hs Headers) Clone() Headers { - o := make(Headers, 0, len(hs)) - for _, h := range hs { - o.Set(h.Name, h.Value) - } - return o -} - -func decodeHeaders(r io.Reader) (Headers, error) { - hs := Headers{} - - for { - name, err := decodeHeaderName(r) - if err != nil { - if err == io.EOF { - // EOF while getting header name means no more headers - break - } - return nil, err - } - - value, err := decodeHeaderValue(r) - if err != nil { - return nil, err - } - - hs.Set(name, value) - } - - return hs, nil -} - -func decodeHeaderName(r io.Reader) (string, error) { - var n headerName - - var err error - n.Len, err = decodeUint8(r) - if err != nil { - return "", err - } - - name := n.Name[:n.Len] - if _, err := io.ReadFull(r, name); err != nil { - return "", err - } - - return string(name), nil -} - -func decodeHeaderValue(r io.Reader) (Value, error) { - var raw rawValue - - typ, err := decodeUint8(r) - if err != nil { - return nil, err - } - raw.Type = valueType(typ) - - var v Value - - switch raw.Type { - case trueValueType: - v = BoolValue(true) - case falseValueType: - v = BoolValue(false) - case int8ValueType: - var tv Int8Value - err = tv.decode(r) - v = tv - case int16ValueType: - var tv Int16Value - err = tv.decode(r) - v = tv - case int32ValueType: - var tv Int32Value - err = tv.decode(r) - v = tv - case int64ValueType: - var tv Int64Value - err = tv.decode(r) - v = tv - case bytesValueType: - var tv BytesValue - err = tv.decode(r) - v = tv - case stringValueType: - var tv StringValue - err = tv.decode(r) - v = tv - case timestampValueType: - var tv TimestampValue - err = tv.decode(r) - v = tv - case uuidValueType: - var tv UUIDValue - err = tv.decode(r) - v = tv - default: - panic(fmt.Sprintf("unknown value type %d", raw.Type)) - } - - // Error could be EOF, let caller deal with it - return v, err -} - -const maxHeaderNameLen = 255 - -type headerName struct { - Len uint8 - Name [maxHeaderNameLen]byte -} - -func (v headerName) encode(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { - return err - } - - _, err := w.Write(v.Name[:v.Len]) - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go deleted file mode 100644 index 9f509d8f6d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go +++ /dev/null @@ -1,506 +0,0 @@ -package eventstream - -import ( - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" -) - -const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 - -// valueType is the EventStream header value type. -type valueType uint8 - -// Header value types -const ( - trueValueType valueType = iota - falseValueType - int8ValueType // Byte - int16ValueType // Short - int32ValueType // Integer - int64ValueType // Long - bytesValueType - stringValueType - timestampValueType - uuidValueType -) - -func (t valueType) String() string { - switch t { - case trueValueType: - return "bool" - case falseValueType: - return "bool" - case int8ValueType: - return "int8" - case int16ValueType: - return "int16" - case int32ValueType: - return "int32" - case int64ValueType: - return "int64" - case bytesValueType: - return "byte_array" - case stringValueType: - return "string" - case timestampValueType: - return "timestamp" - case uuidValueType: - return "uuid" - default: - return fmt.Sprintf("unknown value type %d", uint8(t)) - } -} - -type rawValue struct { - Type valueType - Len uint16 // Only set for variable length slices - Value []byte // byte representation of value, BigEndian encoding. -} - -func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { - return binaryWriteFields(w, binary.BigEndian, - r.Type, - v, - ) -} - -func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { - binary.Write(w, binary.BigEndian, r.Type) - - _, err := w.Write(v) - return err -} - -func (r rawValue) encodeBytes(w io.Writer, v []byte) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - _, err = w.Write(v) - return err -} - -func (r rawValue) encodeString(w io.Writer, v string) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - type stringWriter interface { - WriteString(string) (int, error) - } - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - if sw, ok := w.(stringWriter); ok { - _, err = sw.WriteString(v) - } else { - _, err = w.Write([]byte(v)) - } - - return err -} - -func decodeFixedBytesValue(r io.Reader, buf []byte) error { - _, err := io.ReadFull(r, buf) - return err -} - -func decodeBytesValue(r io.Reader) ([]byte, error) { - var raw rawValue - var err error - raw.Len, err = decodeUint16(r) - if err != nil { - return nil, err - } - - buf := make([]byte, raw.Len) - _, err = io.ReadFull(r, buf) - if err != nil { - return nil, err - } - - return buf, nil -} - -func decodeStringValue(r io.Reader) (string, error) { - v, err := decodeBytesValue(r) - return string(v), err -} - -// Value represents the abstract header value. -type Value interface { - Get() interface{} - String() string - valueType() valueType - encode(io.Writer) error -} - -// An BoolValue provides eventstream encoding, and representation -// of a Go bool value. -type BoolValue bool - -// Get returns the underlying type -func (v BoolValue) Get() interface{} { - return bool(v) -} - -// valueType returns the EventStream header value type value. -func (v BoolValue) valueType() valueType { - if v { - return trueValueType - } - return falseValueType -} - -func (v BoolValue) String() string { - return strconv.FormatBool(bool(v)) -} - -// encode encodes the BoolValue into an eventstream binary value -// representation. -func (v BoolValue) encode(w io.Writer) error { - return binary.Write(w, binary.BigEndian, v.valueType()) -} - -// An Int8Value provides eventstream encoding, and representation of a Go -// int8 value. -type Int8Value int8 - -// Get returns the underlying value. -func (v Int8Value) Get() interface{} { - return int8(v) -} - -// valueType returns the EventStream header value type value. -func (Int8Value) valueType() valueType { - return int8ValueType -} - -func (v Int8Value) String() string { - return fmt.Sprintf("0x%02x", int8(v)) -} - -// encode encodes the Int8Value into an eventstream binary value -// representation. -func (v Int8Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeScalar(w, v) -} - -func (v *Int8Value) decode(r io.Reader) error { - n, err := decodeUint8(r) - if err != nil { - return err - } - - *v = Int8Value(n) - return nil -} - -// An Int16Value provides eventstream encoding, and representation of a Go -// int16 value. -type Int16Value int16 - -// Get returns the underlying value. -func (v Int16Value) Get() interface{} { - return int16(v) -} - -// valueType returns the EventStream header value type value. -func (Int16Value) valueType() valueType { - return int16ValueType -} - -func (v Int16Value) String() string { - return fmt.Sprintf("0x%04x", int16(v)) -} - -// encode encodes the Int16Value into an eventstream binary value -// representation. -func (v Int16Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int16Value) decode(r io.Reader) error { - n, err := decodeUint16(r) - if err != nil { - return err - } - - *v = Int16Value(n) - return nil -} - -// An Int32Value provides eventstream encoding, and representation of a Go -// int32 value. -type Int32Value int32 - -// Get returns the underlying value. -func (v Int32Value) Get() interface{} { - return int32(v) -} - -// valueType returns the EventStream header value type value. -func (Int32Value) valueType() valueType { - return int32ValueType -} - -func (v Int32Value) String() string { - return fmt.Sprintf("0x%08x", int32(v)) -} - -// encode encodes the Int32Value into an eventstream binary value -// representation. -func (v Int32Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int32Value) decode(r io.Reader) error { - n, err := decodeUint32(r) - if err != nil { - return err - } - - *v = Int32Value(n) - return nil -} - -// An Int64Value provides eventstream encoding, and representation of a Go -// int64 value. -type Int64Value int64 - -// Get returns the underlying value. -func (v Int64Value) Get() interface{} { - return int64(v) -} - -// valueType returns the EventStream header value type value. -func (Int64Value) valueType() valueType { - return int64ValueType -} - -func (v Int64Value) String() string { - return fmt.Sprintf("0x%016x", int64(v)) -} - -// encode encodes the Int64Value into an eventstream binary value -// representation. -func (v Int64Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int64Value) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = Int64Value(n) - return nil -} - -// An BytesValue provides eventstream encoding, and representation of a Go -// byte slice. -type BytesValue []byte - -// Get returns the underlying value. -func (v BytesValue) Get() interface{} { - return []byte(v) -} - -// valueType returns the EventStream header value type value. -func (BytesValue) valueType() valueType { - return bytesValueType -} - -func (v BytesValue) String() string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -// encode encodes the BytesValue into an eventstream binary value -// representation. -func (v BytesValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeBytes(w, []byte(v)) -} - -func (v *BytesValue) decode(r io.Reader) error { - buf, err := decodeBytesValue(r) - if err != nil { - return err - } - - *v = BytesValue(buf) - return nil -} - -// An StringValue provides eventstream encoding, and representation of a Go -// string. -type StringValue string - -// Get returns the underlying value. -func (v StringValue) Get() interface{} { - return string(v) -} - -// valueType returns the EventStream header value type value. -func (StringValue) valueType() valueType { - return stringValueType -} - -func (v StringValue) String() string { - return string(v) -} - -// encode encodes the StringValue into an eventstream binary value -// representation. -func (v StringValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeString(w, string(v)) -} - -func (v *StringValue) decode(r io.Reader) error { - s, err := decodeStringValue(r) - if err != nil { - return err - } - - *v = StringValue(s) - return nil -} - -// An TimestampValue provides eventstream encoding, and representation of a Go -// timestamp. -type TimestampValue time.Time - -// Get returns the underlying value. -func (v TimestampValue) Get() interface{} { - return time.Time(v) -} - -// valueType returns the EventStream header value type value. -func (TimestampValue) valueType() valueType { - return timestampValueType -} - -func (v TimestampValue) epochMilli() int64 { - nano := time.Time(v).UnixNano() - msec := nano / int64(time.Millisecond) - return msec -} - -func (v TimestampValue) String() string { - msec := v.epochMilli() - return strconv.FormatInt(msec, 10) -} - -// encode encodes the TimestampValue into an eventstream binary value -// representation. -func (v TimestampValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - msec := v.epochMilli() - return raw.encodeScalar(w, msec) -} - -func (v *TimestampValue) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = TimestampValue(timeFromEpochMilli(int64(n))) - return nil -} - -// MarshalJSON implements the json.Marshaler interface -func (v TimestampValue) MarshalJSON() ([]byte, error) { - return []byte(v.String()), nil -} - -func timeFromEpochMilli(t int64) time.Time { - secs := t / 1e3 - msec := t % 1e3 - return time.Unix(secs, msec*int64(time.Millisecond)).UTC() -} - -// An UUIDValue provides eventstream encoding, and representation of a UUID -// value. -type UUIDValue [16]byte - -// Get returns the underlying value. -func (v UUIDValue) Get() interface{} { - return v[:] -} - -// valueType returns the EventStream header value type value. -func (UUIDValue) valueType() valueType { - return uuidValueType -} - -func (v UUIDValue) String() string { - return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) -} - -// encode encodes the UUIDValue into an eventstream binary value -// representation. -func (v UUIDValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeFixedSlice(w, v[:]) -} - -func (v *UUIDValue) decode(r io.Reader) error { - tv := (*v)[:] - return decodeFixedBytesValue(r, tv) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go deleted file mode 100644 index f7427da039..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ /dev/null @@ -1,117 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "hash/crc32" -) - -const preludeLen = 8 -const preludeCRCLen = 4 -const msgCRCLen = 4 -const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen -const maxPayloadLen = 1024 * 1024 * 16 // 16MB -const maxHeadersLen = 1024 * 128 // 128KB -const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen - -var crc32IEEETable = crc32.MakeTable(crc32.IEEE) - -// A Message provides the eventstream message representation. -type Message struct { - Headers Headers - Payload []byte -} - -func (m *Message) rawMessage() (rawMessage, error) { - var raw rawMessage - - if len(m.Headers) > 0 { - var headers bytes.Buffer - if err := EncodeHeaders(&headers, m.Headers); err != nil { - return rawMessage{}, err - } - raw.Headers = headers.Bytes() - raw.HeadersLen = uint32(len(raw.Headers)) - } - - raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen - - hash := crc32.New(crc32IEEETable) - binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) - raw.PreludeCRC = hash.Sum32() - - binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) - - if raw.HeadersLen > 0 { - hash.Write(raw.Headers) - } - - // Read payload bytes and update hash for it as well. - if len(m.Payload) > 0 { - raw.Payload = m.Payload - hash.Write(raw.Payload) - } - - raw.CRC = hash.Sum32() - - return raw, nil -} - -// Clone returns a deep copy of the message. -func (m Message) Clone() Message { - var payload []byte - if m.Payload != nil { - payload = make([]byte, len(m.Payload)) - copy(payload, m.Payload) - } - - return Message{ - Headers: m.Headers.Clone(), - Payload: payload, - } -} - -type messagePrelude struct { - Length uint32 - HeadersLen uint32 - PreludeCRC uint32 -} - -func (p messagePrelude) PayloadLen() uint32 { - return p.Length - p.HeadersLen - minMsgLen -} - -func (p messagePrelude) ValidateLens() error { - if p.Length == 0 || p.Length > maxMsgLen { - return LengthError{ - Part: "message prelude", - Want: maxMsgLen, - Have: int(p.Length), - } - } - if p.HeadersLen > maxHeadersLen { - return LengthError{ - Part: "message headers", - Want: maxHeadersLen, - Have: int(p.HeadersLen), - } - } - if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { - return LengthError{ - Part: "message payload", - Want: maxPayloadLen, - Have: int(payloadLen), - } - } - - return nil -} - -type rawMessage struct { - messagePrelude - - Headers []byte - Payload []byte - - CRC uint32 -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go deleted file mode 100644 index 1f1d27aea4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ /dev/null @@ -1,104 +0,0 @@ -package protocol - -import ( - "github.com/aws/aws-sdk-go/aws/request" - "net" - "strconv" - "strings" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = request.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *request.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := request.ErrInvalidParams{Context: opName} - - var hostname string - var port string - var err error - - if strings.Contains(host, ":") { - hostname, port, err = net.SplitHostPort(host) - - if err != nil { - paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) - } - - if !ValidPortNumber(port) { - paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) - } - } else { - hostname = host - } - - labels := strings.Split(hostname, ".") - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(request.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(hostname) == 0 { - paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) - } - - if len(hostname) > 255 { - paramErrs.Add(request.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} - -// ValidPortNumber return if the port is valid RFC 3986 port -func ValidPortNumber(port string) bool { - i, err := strconv.Atoi(port) - if err != nil { - return false - } - - if i < 0 || i > 65535 { - return false - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go deleted file mode 100644 index 915b0fcafd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go +++ /dev/null @@ -1,54 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - - return request.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *request.Request) { - if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { - return - } - - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff98..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 2aec80661a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,298 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - if !value.IsValid() && tag.Get("type") != "structure" { - return nil - } - } - - buf.WriteByte('{') - defer buf.WriteString("}") - - if !value.IsValid() { - return nil - } - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.UnixTimeFormatName - } - - ts := protocol.FormatTime(format, converted) - if format != protocol.UnixTimeFormatName { - ts = `"` + ts + `"` - } - - buf.WriteString(ts) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index 8b2c9bbeba..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,304 +0,0 @@ -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math/big" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var millisecondsFloat = new(big.Float).SetInt64(1e3) - -// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in -// type. The value to unmarshal the json document into must be a pointer to the -// type. -func UnmarshalJSONError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := json.NewDecoder(body).Decode(v) - if err != nil { - msg := "failed decoding error message" - if err == io.EOF { - msg = "error message missing" - err = nil - } - return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) - } - - return nil -} - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - decoder := json.NewDecoder(stream) - decoder.UseNumber() - err := decoder.Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") -} - -// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the -// object v. Ignores casing for structure members. -func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { - var out interface{} - - decoder := json.NewDecoder(stream) - decoder.UseNumber() - err := decoder.Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{ - caseInsensitive: true, - }.unmarshalAny(reflect.ValueOf(v), out, "") -} - -type unmarshaler struct { - caseInsensitive bool -} - -func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return u.unmarshalStruct(value, data, tag) - case "list": - return u.unmarshalList(value, data, tag) - case "map": - return u.unmarshalMap(value, data, tag) - default: - return u.unmarshalScalar(value, data, tag) - } -} - -func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if u.caseInsensitive { - if _, ok := mapData[name]; !ok { - // Fallback to uncased name search if the exact name didn't match. - for kn, v := range mapData { - if strings.EqualFold(kn, name) { - mapData[name] = v - } - } - } - } - - member := value.FieldByIndex(field.Index) - err := u.unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := u.unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - u.unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case json.Number: - switch value.Interface().(type) { - case *int64: - // Retain the old behavior where we would just truncate the float64 - // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt - f, err := d.Float64() - if err != nil { - return err - } - di := int64(f) - value.Set(reflect.ValueOf(&di)) - case *float64: - f, err := d.Float64() - if err != nil { - return err - } - value.Set(reflect.ValueOf(&f)) - case *time.Time: - float, ok := new(big.Float).SetString(d.String()) - if !ok { - return fmt.Errorf("unsupported float time representation: %v", d.String()) - } - float = float.Mul(float, millisecondsFloat) - ms, _ := float.Int64() - t := time.Unix(0, ms*1e6).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go deleted file mode 100644 index d9aa271148..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ /dev/null @@ -1,87 +0,0 @@ -// Package jsonrpc provides JSON RPC utilities for serialization of AWS -// requests and responses. -package jsonrpc - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -var emptyJSON = []byte("{}") - -// BuildHandler is a named request handler for building jsonrpc protocol -// requests -var BuildHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.Build", - Fn: Build, -} - -// UnmarshalHandler is a named request handler for unmarshaling jsonrpc -// protocol requests -var UnmarshalHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.Unmarshal", - Fn: Unmarshal, -} - -// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc -// protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.UnmarshalMeta", - Fn: UnmarshalMeta, -} - -// Build builds a JSON payload for a JSON RPC request. -func Build(req *request.Request) { - var buf []byte - var err error - if req.ParamsFilled() { - buf, err = jsonutil.BuildJSON(req.Params) - if err != nil { - req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) - return - } - } else { - buf = emptyJSON - } - - // Always serialize the body, don't suppress it. - req.SetBufferBody(buf) - - if req.ClientInfo.TargetPrefix != "" { - target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name - req.HTTPRequest.Header.Add("X-Amz-Target", target) - } - - // Only set the content type if one is not already specified and an - // JSONVersion is specified. - if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { - jsonVersion := req.ClientInfo.JSONVersion - req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) - } -} - -// Unmarshal unmarshals a response for a JSON RPC service. -func Unmarshal(req *request.Request) { - defer req.HTTPResponse.Body.Close() - if req.DataFilled() { - err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), - req.HTTPResponse.StatusCode, - req.RequestID, - ) - } - } - return -} - -// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. -func UnmarshalMeta(req *request.Request) { - rest.UnmarshalMeta(req) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go deleted file mode 100644 index c0c52e2db0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ /dev/null @@ -1,107 +0,0 @@ -package jsonrpc - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// UnmarshalTypedError provides unmarshaling errors API response errors -// for both typed and untyped errors. -type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error -} - -// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the -// set of exception names to the error unmarshalers -func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { - return &UnmarshalTypedError{ - exceptions: exceptions, - } -} - -// UnmarshalError attempts to unmarshal the HTTP response error as a known -// error type. If unable to unmarshal the error type, the generic SDK error -// type will be used. -func (u *UnmarshalTypedError) UnmarshalError( - resp *http.Response, - respMeta protocol.ResponseMetadata, -) (error, error) { - - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - body := ioutil.NopCloser(&buf) - - // Code may be separated by hash(#), with the last element being the code - // used by the SDK. - codeParts := strings.SplitN(jsonErr.Code, "#", 2) - code := codeParts[len(codeParts)-1] - msg := jsonErr.Message - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) - if err != nil { - return nil, err - } - - return v, nil - } - - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil -} - -// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc -// protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.UnmarshalError", - Fn: UnmarshalError, -} - -// UnmarshalError unmarshals an error response for a JSON RPC service. -func UnmarshalError(req *request.Request) { - defer req.HTTPResponse.Body.Close() - - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - req.HTTPResponse.StatusCode, - req.RequestID, - ) - return - } - - codes := strings.SplitN(jsonErr.Code, "#", 2) - req.Error = awserr.NewRequestFailure( - awserr.New(codes[len(codes)-1], jsonErr.Message, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"__type"` - Message string `json:"message"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go deleted file mode 100644 index 776d110184..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go +++ /dev/null @@ -1,76 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go/aws" -) - -// EscapeMode is the mode that should be use for escaping a value -type EscapeMode uint - -// The modes for escaping a value before it is marshaled, and unmarshaled. -const ( - NoEscape EscapeMode = iota - Base64Escape - QuotedEscape -) - -// EncodeJSONValue marshals the value into a JSON string, and optionally base64 -// encodes the string before returning it. -// -// Will panic if the escape mode is unknown. -func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - switch escape { - case NoEscape: - return string(b), nil - case Base64Escape: - return base64.StdEncoding.EncodeToString(b), nil - case QuotedEscape: - return strconv.Quote(string(b)), nil - } - - panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) -} - -// DecodeJSONValue will attempt to decode the string input as a JSONValue. -// Optionally decoding base64 the value first before JSON unmarshaling. -// -// Will panic if the escape mode is unknown. -func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { - var b []byte - var err error - - switch escape { - case NoEscape: - b = []byte(v) - case Base64Escape: - b, err = base64.StdEncoding.DecodeString(v) - case QuotedEscape: - var u string - u, err = strconv.Unquote(v) - b = []byte(u) - default: - panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) - } - - if err != nil { - return nil, err - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go deleted file mode 100644 index 0ea0647a57..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ /dev/null @@ -1,81 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// PayloadUnmarshaler provides the interface for unmarshaling a payload's -// reader into a SDK shape. -type PayloadUnmarshaler interface { - UnmarshalPayload(io.Reader, interface{}) error -} - -// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a -// HandlerList. This provides the support for unmarshaling a payload reader to -// a shape without needing a SDK request first. -type HandlerPayloadUnmarshal struct { - Unmarshalers request.HandlerList -} - -// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using -// the Unmarshalers HandlerList provided. Returns an error if unable -// unmarshaling fails. -func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { - req := &request.Request{ - HTTPRequest: &http.Request{}, - HTTPResponse: &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: ioutil.NopCloser(r), - }, - Data: v, - } - - h.Unmarshalers.Run(req) - - return req.Error -} - -// PayloadMarshaler provides the interface for marshaling a SDK shape into and -// io.Writer. -type PayloadMarshaler interface { - MarshalPayload(io.Writer, interface{}) error -} - -// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. -// This provides support for marshaling a SDK shape into an io.Writer without -// needing a SDK request first. -type HandlerPayloadMarshal struct { - Marshalers request.HandlerList -} - -// MarshalPayload marshals the SDK shape into the io.Writer using the -// Marshalers HandlerList provided. Returns an error if unable if marshal -// fails. -func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { - req := request.New( - aws.Config{}, - metadata.ClientInfo{}, - request.Handlers{}, - nil, - &request.Operation{HTTPMethod: "PUT"}, - v, - nil, - ) - - h.Marshalers.Run(req) - - if req.Error != nil { - return req.Error - } - - io.Copy(w, req.GetBody()) - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go deleted file mode 100644 index 9d521dcb95..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go +++ /dev/null @@ -1,49 +0,0 @@ -package protocol - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequireHTTPMinProtocol request handler is used to enforce that -// the target endpoint supports the given major and minor HTTP protocol version. -type RequireHTTPMinProtocol struct { - Major, Minor int -} - -// Handler will mark the request.Request with an error if the -// target endpoint did not connect with the required HTTP protocol -// major and minor version. -func (p RequireHTTPMinProtocol) Handler(r *request.Request) { - if r.Error != nil || r.HTTPResponse == nil { - return - } - - if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } - - if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } -} - -// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint -// did not match the required HTTP major and minor protocol version. -const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" - -func newMinHTTPProtoError(major, minor int, r *request.Request) error { - return awserr.NewRequestFailure( - awserr.New("MinimumHTTPProtocolError", - fmt.Sprintf( - "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - major, minor, r.HTTPResponse.Proto, - ), - nil, - ), - r.HTTPResponse.StatusCode, r.RequestID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index d40346a779..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) - return - } - - if !r.IsPresigned() { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 75866d0121..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,246 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - if _, ok := value.Interface().([]byte); ok { - return q.parseScalar(v, value, prefix, tag) - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - v.Set(name, protocol.FormatTime(format, value)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index 9231e95d16..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,39 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index 2c0cbba909..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,70 +0,0 @@ -package query - -import ( - "encoding/xml" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -type xmlErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlResponseError struct { - xmlErrorResponse -} - -func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - const svcUnavailableTagName = "ServiceUnavailableException" - const errorResponseTagName = "ErrorResponse" - - switch start.Name.Local { - case svcUnavailableTagName: - e.Code = svcUnavailableTagName - e.Message = "service is unavailable" - return d.Skip() - - case errorResponseTagName: - return d.DecodeElement(&e.xmlErrorResponse, &start) - - default: - return fmt.Errorf("unknown error response tag, %v", start) - } -} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var respErr xmlResponseError - err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - reqID := respErr.RequestID - if len(reqID) == 0 { - reqID = r.RequestID - } - - r.Error = awserr.NewRequestFailure( - awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil), - r.HTTPResponse.StatusCode, - reqID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 63f66af2c6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,333 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -var byteSliceType = reflect.TypeOf([]byte{}) - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if kind := m.Kind(); kind == reflect.Ptr { - m = m.Elem() - } else if kind == reflect.Interface { - if !m.Elem().IsValid() { - continue - } - } - if !m.IsValid() { - continue - } - if field.Tag.Get("ignore") != "" { - continue - } - - // Support the ability to customize values to be marshaled as a - // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as string but - // required to be base64 encoded in request. - if field.Tag.Get("marshal-as") == "blob" { - m = m.Convert(byteSliceType) - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - name = strings.TrimSpace(name) - str = strings.TrimSpace(str) - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - - } - keyStr := strings.TrimSpace(key.String()) - str = strings.TrimSpace(str) - - header.Add(prefix+keyStr, str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - switch value := v.Interface().(type) { - case string: - if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { - value = base64.StdEncoding.EncodeToString([]byte(value)) - } - str = value - case []*string: - if tag.Get("location") != "header" || tag.Get("enum") == "" { - return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) - } - buff := &bytes.Buffer{} - for i, sv := range value { - if sv == nil || len(*sv) == 0 { - continue - } - if i != 0 { - buff.WriteRune(',') - } - item := *sv - if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { - item = strconv.Quote(item) - } - buff.WriteString(item) - } - str = string(buff.Bytes()) - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - str = protocol.FormatTime(format, value) - case aws.JSONValue: - if len(value) == 0 { - return "", errValueNotSet - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) - if err != nil { - return "", fmt.Errorf("unable to encode JSONValue, %v", err) - } - default: - err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index b54c99edae..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,54 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -const nopayloadPayloadType = "nopayload" - -// PayloadType returns the type of a payload field member of i if there is one, -// or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - - if field, ok := v.Type().FieldByName("_"); ok { - if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { - return nopayloadPayloadType - } - - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index cdef403e21..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,264 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - awsStrings "github.com/aws/aws-sdk-go/internal/strings" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - if err := unmarshalBody(r, v); err != nil { - r.Error = err - } - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { - r.Error = err - } - } -} - -// UnmarshalResponse attempts to unmarshal the REST response headers to -// the data type passed in. The type must be a pointer. An error is returned -// with any error unmarshaling the response into the target datatype. -func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { - v := reflect.Indirect(reflect.ValueOf(data)) - return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) -} - -func unmarshalBody(r *request.Request, v reflect.Value) error { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - payload.Set(reflect.ValueOf(b)) - - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - str := string(b) - payload.Set(reflect.ValueOf(&str)) - - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to read response body", err) - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() - return awserr.New(request.ErrCodeSerialization, - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } - - return nil -} - -func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, resp.StatusCode) - - case "header": - err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - } - } - } - - return nil -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { - if len(headers) == 0 { - return nil - } - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - if awsStrings.HasPrefixFold(k, prefix) { - if normalize == true { - k = strings.ToLower(k) - } else { - k = http.CanonicalHeaderKey(k) - } - out[k[len(prefix):]] = &v[0] - } - } - if len(out) != 0 { - r.Set(reflect.ValueOf(out)) - } - - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - switch tag.Get("type") { - case "jsonvalue": - if len(header) == 0 { - return nil - } - case "blob": - if len(header) == 0 { - return nil - } - default: - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - } - - switch v.Interface().(type) { - case *string: - if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return fmt.Errorf("failed to decode JSONValue, %v", err) - } - header = string(b) - } - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - } - t, err := protocol.ParseTime(format, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - m, err := protocol.DecodeJSONValue(header, escaping) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go deleted file mode 100644 index 2e0e205af3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package restjson provides RESTful JSON serialization of AWS -// requests and responses. -package restjson - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go - -import ( - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -// BuildHandler is a named request handler for building restjson protocol -// requests -var BuildHandler = request.NamedHandler{ - Name: "awssdk.restjson.Build", - Fn: Build, -} - -// UnmarshalHandler is a named request handler for unmarshaling restjson -// protocol requests -var UnmarshalHandler = request.NamedHandler{ - Name: "awssdk.restjson.Unmarshal", - Fn: Unmarshal, -} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restjson -// protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{ - Name: "awssdk.restjson.UnmarshalMeta", - Fn: UnmarshalMeta, -} - -// Build builds a request for the REST JSON protocol. -func Build(r *request.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { - r.HTTPRequest.Header.Set("Content-Type", "application/json") - } - jsonrpc.Build(r) - } -} - -// Unmarshal unmarshals a response body for the REST JSON protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - jsonrpc.Unmarshal(r) - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST JSON protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go deleted file mode 100644 index d756d8cc52..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go +++ /dev/null @@ -1,134 +0,0 @@ -package restjson - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - errorTypeHeader = "X-Amzn-Errortype" - errorMessageHeader = "X-Amzn-Errormessage" -) - -// UnmarshalTypedError provides unmarshaling errors API response errors -// for both typed and untyped errors. -type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error -} - -// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the -// set of exception names to the error unmarshalers -func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { - return &UnmarshalTypedError{ - exceptions: exceptions, - } -} - -// UnmarshalError attempts to unmarshal the HTTP response error as a known -// error type. If unable to unmarshal the error type, the generic SDK error -// type will be used. -func (u *UnmarshalTypedError) UnmarshalError( - resp *http.Response, - respMeta protocol.ResponseMetadata, -) (error, error) { - - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - code = jsonErr.Code - msg = jsonErr.Message - } - - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } - - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } - - return v, nil - } - - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil -} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restjson -// protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{ - Name: "awssdk.restjson.UnmarshalError", - Fn: UnmarshalError, -} - -// UnmarshalError unmarshals a response error for the REST JSON protocol. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] - r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"code"` - Message string `json:"message"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go deleted file mode 100644 index b1ae364871..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ /dev/null @@ -1,79 +0,0 @@ -// Package restxml provides RESTful XML serialization of AWS -// requests and responses. -package restxml - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go - -import ( - "bytes" - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/protocol/rest" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// BuildHandler is a named request handler for building restxml protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} - -// Build builds a request payload for the REST XML protocol. -func Build(r *request.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - var buf bytes.Buffer - err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to encode rest XML request", err), - 0, - r.RequestID, - ) - return - } - r.SetBufferBody(buf.Bytes()) - } -} - -// Unmarshal unmarshals a payload response for the REST XML protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - defer r.HTTPResponse.Body.Close() - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, "") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode REST XML response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST XML protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} - -// UnmarshalError unmarshals a response error for the REST XML protocol. -func UnmarshalError(r *request.Request) { - query.UnmarshalError(r) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go deleted file mode 100644 index d9a4e76493..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ /dev/null @@ -1,134 +0,0 @@ -package protocol - -import ( - "bytes" - "fmt" - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/internal/sdkmath" -) - -// Names of time formats supported by the SDK -const ( - RFC822TimeFormatName = "rfc822" - ISO8601TimeFormatName = "iso8601" - UnixTimeFormatName = "unixTimestamp" -) - -// Time formats supported by the SDK -// Output time is intended to not contain decimals -const ( - // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" - rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" - - // This format is used for output time without seconds precision - RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - - // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" - - // This format is used for output time with fractional second precision up to milliseconds - ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" -) - -// IsKnownTimestampFormat returns if the timestamp format name -// is know to the SDK's protocols. -func IsKnownTimestampFormat(name string) bool { - switch name { - case RFC822TimeFormatName: - fallthrough - case ISO8601TimeFormatName: - fallthrough - case UnixTimeFormatName: - return true - default: - return false - } -} - -// FormatTime returns a string value of the time. -func FormatTime(name string, t time.Time) string { - t = t.UTC().Truncate(time.Millisecond) - - switch name { - case RFC822TimeFormatName: - return t.Format(RFC822OutputTimeFormat) - case ISO8601TimeFormatName: - return t.Format(ISO8601OutputTimeFormat) - case UnixTimeFormatName: - ms := t.UnixNano() / int64(time.Millisecond) - return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) - default: - panic("unknown timestamp format name, " + name) - } -} - -// ParseTime attempts to parse the time given the format. Returns -// the time if it was able to be parsed, and fails otherwise. -func ParseTime(formatName, value string) (time.Time, error) { - switch formatName { - case RFC822TimeFormatName: // Smithy HTTPDate format - return tryParse(value, - RFC822TimeFormat, - rfc822TimeFormatSingleDigitDay, - rfc822TimeFormatSingleDigitDayTwoDigitYear, - time.RFC850, - time.ANSIC, - ) - case ISO8601TimeFormatName: // Smithy DateTime format - return tryParse(value, - ISO8601TimeFormat, - iso8601TimeFormatNoZ, - time.RFC3339Nano, - time.RFC3339, - ) - case UnixTimeFormatName: - v, err := strconv.ParseFloat(value, 64) - _, dec := math.Modf(v) - dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 - if err != nil { - return time.Time{}, err - } - return time.Unix(int64(v), int64(dec*(1e9))), nil - default: - panic("unknown timestamp format name, " + formatName) - } -} - -func tryParse(v string, formats ...string) (time.Time, error) { - var errs parseErrors - for _, f := range formats { - t, err := time.Parse(f, v) - if err != nil { - errs = append(errs, parseError{ - Format: f, - Err: err, - }) - continue - } - return t, nil - } - - return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs) -} - -type parseErrors []parseError - -func (es parseErrors) Error() string { - var s bytes.Buffer - for _, e := range es { - fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) - } - - return "parse errors:" + s.String() -} - -type parseError struct { - Format string - Err error -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index f614ef898b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,27 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} - -// ResponseMetadata provides the SDK response metadata attributes. -type ResponseMetadata struct { - StatusCode int - RequestID string -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go deleted file mode 100644 index cc857f136c..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go +++ /dev/null @@ -1,65 +0,0 @@ -package protocol - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalErrorHandler provides unmarshaling errors API response errors for -// both typed and untyped errors. -type UnmarshalErrorHandler struct { - unmarshaler ErrorUnmarshaler -} - -// ErrorUnmarshaler is an abstract interface for concrete implementations to -// unmarshal protocol specific response errors. -type ErrorUnmarshaler interface { - UnmarshalError(*http.Response, ResponseMetadata) (error, error) -} - -// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler -// initialized for the set of exception names to the error unmarshalers -func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { - return &UnmarshalErrorHandler{ - unmarshaler: unmarshaler, - } -} - -// UnmarshalErrorHandlerName is the name of the named handler. -const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" - -// NamedHandler returns a NamedHandler for the unmarshaler using the set of -// errors the unmarshaler was initialized for. -func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { - return request.NamedHandler{ - Name: UnmarshalErrorHandlerName, - Fn: u.UnmarshalError, - } -} - -// UnmarshalError will attempt to unmarshal the API response's error message -// into either a generic SDK error type, or a typed error corresponding to the -// errors exception name. -func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - respMeta := ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - } - - v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - respMeta.StatusCode, - respMeta.RequestID, - ) - return - } - - r.Error = v -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index 2fbb93ae76..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,317 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. Error will be returned -// if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - return buildXML(params, e, false) -} - -func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, sorted) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - var payloadFields, nonPayloadFields int - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - nonPayloadFields++ - continue - } - payloadFields++ - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - } - - // Only case where the child shape is not added is if the shape only contains - // non-payload fields, e.g headers/query. - if !(payloadFields == 0 && nonPayloadFields > 0) { - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - str = protocol.FormatTime(format, converted) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else if len(xname.Local) == 0 { - current.Text = str - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go deleted file mode 100644 index c1a511851f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "strings" -) - -type xmlAttrSlice []xml.Attr - -func (x xmlAttrSlice) Len() int { - return len(x) -} - -func (x xmlAttrSlice) Less(i, j int) bool { - spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space - localI, localJ := x[i].Name.Local, x[j].Name.Local - valueI, valueJ := x[i].Value, x[j].Value - - spaceCmp := strings.Compare(spaceI, spaceJ) - localCmp := strings.Compare(localI, localJ) - valueCmp := strings.Compare(valueI, valueJ) - - if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { - return true - } - - return false -} - -func (x xmlAttrSlice) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 107c053f8a..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,299 +0,0 @@ -package xmlutil - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalXMLError unmarshals the XML error from the stream into the value -// type specified. The value must be a pointer. If the message fails to -// unmarshal, the message content will be included in the returned error as a -// awserr.UnmarshalError. -func UnmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return nil -} - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := r.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := r.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index c85b79fddd..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,173 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// textEncoder is a string type alias that implemnts the TextMarshaler interface. -// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped. -type textEncoder string - -func (t textEncoder) MarshalText() ([]byte, error) { - return []byte(t), nil -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - child.parent = n - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - // Sort Attributes - attrs := node.Attr - if sorted { - sortedAttrs := make([]xml.Attr, len(attrs)) - for _, k := range node.Attr { - sortedAttrs = append(sortedAttrs, k) - } - sort.Sort(xmlAttrSlice(sortedAttrs)) - attrs = sortedAttrs - } - - startElement := xml.StartElement{Name: node.Name, Attr: attrs} - - if node.Text != "" { - e.EncodeElement(textEncoder(node.Text), startElement) - return e.Flush() - } - - e.EncodeToken(startElement) - - if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(startElement.End()) - - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go deleted file mode 100644 index a9c653a00d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ /dev/null @@ -1,42113 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "bytes" - "fmt" - "io" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/internal/s3shared/arn" - "github.com/aws/aws-sdk-go/private/checksum" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/eventstream" - "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" - "github.com/aws/aws-sdk-go/private/protocol/rest" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -const opAbortMultipartUpload = "AbortMultipartUpload" - -// AbortMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the AbortMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AbortMultipartUpload for more information on using the AbortMultipartUpload -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the AbortMultipartUploadRequest method. -// req, resp := client.AbortMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload -func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { - op := &request.Operation{ - Name: opAbortMultipartUpload, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &AbortMultipartUploadInput{} - } - - output = &AbortMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// AbortMultipartUpload API operation for Amazon Simple Storage Service. -// -// This action aborts a multipart upload. After a multipart upload is aborted, -// no additional parts can be uploaded using that upload ID. The storage consumed -// by any previously uploaded parts will be freed. However, if any part uploads -// are currently in progress, those part uploads might or might not succeed. -// As a result, it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// action and ensure that the parts list is empty. -// -// For information about permissions required to use the multipart upload, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// The following operations are related to AbortMultipartUpload: -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation AbortMultipartUpload for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchUpload "NoSuchUpload" -// The specified multipart upload does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload -func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - return out, req.Send() -} - -// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See AbortMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCompleteMultipartUpload = "CompleteMultipartUpload" - -// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CompleteMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CompleteMultipartUploadRequest method. -// req, resp := client.CompleteMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload -func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { - op := &request.Operation{ - Name: opCompleteMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CompleteMultipartUploadInput{} - } - - output = &CompleteMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// CompleteMultipartUpload API operation for Amazon Simple Storage Service. -// -// Completes a multipart upload by assembling previously uploaded parts. -// -// You first initiate the multipart upload and then upload all parts using the -// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// operation. After successfully uploading all relevant parts of an upload, -// you call this action to complete the upload. Upon receiving this request, -// Amazon S3 concatenates all the parts in ascending order by part number to -// create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This action -// concatenates the parts that you provide in the list. For each part in the -// list, you must provide the part number and the ETag value, returned after -// that part was uploaded. -// -// Processing of a Complete Multipart Upload request could take several minutes -// to complete. After Amazon S3 begins processing the request, it sends an HTTP -// response header that specifies a 200 OK response. While processing is in -// progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. Because a request could fail after the initial -// 200 OK response has been sent, it is important that you check the response -// body to determine whether the request succeeded. -// -// Note that if CompleteMultipartUpload fails, applications should be prepared -// to retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). -// -// You cannot use Content-Type: application/x-www-form-urlencoded with Complete -// Multipart Upload requests. Also, if you do not provide a Content-Type header, -// CompleteMultipartUpload returns a 200 OK response. -// -// For more information about multipart uploads, see Uploading Objects Using -// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -// -// For information about permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// CompleteMultipartUpload has the following special errors: -// -// - Error code: EntityTooSmall Description: Your proposed upload is smaller -// than the minimum allowed object size. Each part must be at least 5 MB -// in size, except the last part. 400 Bad Request -// -// - Error code: InvalidPart Description: One or more of the specified parts -// could not be found. The part might not have been uploaded, or the specified -// entity tag might not have matched the part's entity tag. 400 Bad Request -// -// - Error code: InvalidPartOrder Description: The list of parts was not -// in ascending order. The parts list must be specified in order by part -// number. 400 Bad Request -// -// - Error code: NoSuchUpload Description: The specified multipart upload -// does not exist. The upload ID might be invalid, or the multipart upload -// might have been aborted or completed. 404 Not Found -// -// The following operations are related to CompleteMultipartUpload: -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CompleteMultipartUpload for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload -func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - return out, req.Send() -} - -// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See CompleteMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCopyObject = "CopyObject" - -// CopyObjectRequest generates a "aws/request.Request" representing the -// client's request for the CopyObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CopyObject for more information on using the CopyObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CopyObjectRequest method. -// req, resp := client.CopyObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject -func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { - op := &request.Operation{ - Name: opCopyObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CopyObjectInput{} - } - - output = &CopyObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// CopyObject API operation for Amazon Simple Storage Service. -// -// Creates a copy of an object that is already stored in Amazon S3. -// -// You can store individual objects of up to 5 TB in Amazon S3. You create a -// copy of your object up to 5 GB in size in a single atomic action using this -// API. However, to copy an object greater than 5 GB, you must use the multipart -// upload Upload Part - Copy (UploadPartCopy) API. For more information, see -// Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). -// -// All copy requests must be authenticated. Additionally, you must have read -// access to the source object and write access to the destination bucket. For -// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). -// Both the Region that you want to copy the object from and the Region that -// you want to copy the object to must be enabled for your account. -// -// A copy request might return an error when Amazon S3 receives the copy request -// or while Amazon S3 is copying the files. If the error occurs before the copy -// action starts, you receive a standard Amazon S3 error. If the error occurs -// during the copy operation, the error response is embedded in the 200 OK response. -// This means that a 200 OK response can contain either a success or an error. -// Design your application to parse the contents of the response and handle -// it appropriately. -// -// If the copy is successful, you receive a response with information about -// the copied object. -// -// If the request is an HTTP 1.1 request, the response is chunk encoded. If -// it were not, it would not contain the content-length, and you would need -// to read the entire body. -// -// The copy request charge is based on the storage class and Region that you -// specify for the destination object. For pricing information, see Amazon S3 -// pricing (http://aws.amazon.com/s3/pricing/). -// -// Amazon S3 transfer acceleration does not support cross-Region copies. If -// you request a cross-Region copy using a transfer acceleration endpoint, you -// get a 400 Bad Request error. For more information, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -// -// # Metadata -// -// When copying an object, you can preserve all metadata (default) or specify -// new metadata. However, the ACL is not preserved and is set to private for -// the user making the request. To override the default ACL setting, specify -// a new ACL when generating a copy request. For more information, see Using -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// -// To specify whether you want the object metadata copied from the source object -// or replaced with metadata provided in the request, you can optionally add -// the x-amz-metadata-directive header. When you grant permissions, you can -// use the s3:x-amz-metadata-directive condition key to enforce certain metadata -// behavior when objects are uploaded. For more information, see Specifying -// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) -// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition -// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). -// -// x-amz-copy-source-if Headers -// -// To only copy an object under certain conditions, such as whether the Etag -// matches or whether the object was modified before or after a specified date, -// use the following request parameters: -// -// - x-amz-copy-source-if-match -// -// - x-amz-copy-source-if-none-match -// -// - x-amz-copy-source-if-unmodified-since -// -// - x-amz-copy-source-if-modified-since -// -// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// 200 OK and copies the data: -// -// - x-amz-copy-source-if-match condition evaluates to true -// -// - x-amz-copy-source-if-unmodified-since condition evaluates to false -// -// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// the 412 Precondition Failed response code: -// -// - x-amz-copy-source-if-none-match condition evaluates to false -// -// - x-amz-copy-source-if-modified-since condition evaluates to true -// -// All headers with the x-amz- prefix, including x-amz-copy-source, must be -// signed. -// -// # Server-side encryption -// -// When you perform a CopyObject operation, you can optionally use the appropriate -// encryption-related headers to encrypt the object using server-side encryption -// with Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS) or a -// customer-provided encryption key. With server-side encryption, Amazon S3 -// encrypts your data as it writes it to disks in its data centers and decrypts -// the data when you access it. For more information about server-side encryption, -// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the -// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. -// -// # Access Control List (ACL)-Specific Request Headers -// -// When copying an object, you can optionally use headers to grant ACL-based -// permissions. By default, all objects are private. Only the owner has full -// access control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups defined by Amazon S3. -// These permissions are then added to the ACL on the object. For more information, -// see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). -// -// If the bucket that you're copying objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify -// an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. -// -// For more information, see Controlling ownership of objects and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for Object Ownership, -// all objects written to the bucket by any account will be owned by the bucket -// owner. -// -// # Checksums -// -// When copying an object, if it has a checksum, that checksum will be copied -// to the new object by default. When you copy the object over, you may optionally -// specify a different checksum algorithm to use with the x-amz-checksum-algorithm -// header. -// -// # Storage Class Options -// -// You can use the CopyObject action to change the storage class of an object -// that is already stored in Amazon S3 using the StorageClass parameter. For -// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. -// -// # Versioning -// -// By default, x-amz-copy-source identifies the current version of an object -// to copy. If the current version is a delete marker, Amazon S3 behaves as -// if the object was deleted. To copy a different version, use the versionId -// subresource. -// -// If you enable versioning on the target bucket, Amazon S3 generates a unique -// version ID for the object being copied. This version ID is different from -// the version ID of the source object. Amazon S3 returns the version ID of -// the copied object in the x-amz-version-id response header in the response. -// -// If you do not enable versioning or suspend it on the target bucket, the version -// ID that Amazon S3 generates is always null. -// -// If the source object's storage class is GLACIER, you must restore a copy -// of this object before you can use it as a source object for the copy operation. -// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// -// The following operations are related to CopyObject: -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CopyObject for usage and error information. -// -// Returned Error Codes: -// - ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY action is not in the active tier and is only -// stored in Amazon S3 Glacier. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject -func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - return out, req.Send() -} - -// CopyObjectWithContext is the same as CopyObject with the addition of -// the ability to pass a context and additional request options. -// -// See CopyObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateBucket = "CreateBucket" - -// CreateBucketRequest generates a "aws/request.Request" representing the -// client's request for the CreateBucket operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateBucket for more information on using the CreateBucket -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateBucketRequest method. -// req, resp := client.CreateBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket -func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { - op := &request.Operation{ - Name: opCreateBucket, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &CreateBucketInput{} - } - - output = &CreateBucketOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateBucket API operation for Amazon Simple Storage Service. -// -// Creates a new S3 bucket. To create a bucket, you must register with Amazon -// S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. -// Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. -// -// Not every string is an acceptable bucket name. For information about bucket -// naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). -// -// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). -// -// By default, the bucket is created in the US East (N. Virginia) Region. You -// can optionally specify a Region in the request body. You might choose a Region -// to optimize latency, minimize costs, or address regulatory requirements. -// For example, if you reside in Europe, you will probably find it advantageous -// to create buckets in the Europe (Ireland) Region. For more information, see -// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). -// -// If you send your create bucket request to the s3.amazonaws.com endpoint, -// the request goes to the us-east-1 Region. Accordingly, the signature calculations -// in Signature Version 4 must use us-east-1 as the Region, even if the location -// constraint in the request specifies another Region where the bucket is to -// be created. If you create a bucket in a Region other than US East (N. Virginia), -// your application must be able to handle 307 redirect. For more information, -// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). -// -// Access control lists (ACLs) -// -// When creating a bucket using this operation, you can optionally configure -// the bucket ACL to specify the accounts or groups that should be granted specific -// permissions on the bucket. -// -// If your CreateBucket request sets bucket owner enforced for S3 Object Ownership -// and specifies a bucket ACL that provides access to an external Amazon Web -// Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership -// error code. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// There are two ways to grant the appropriate permissions using the request -// headers. -// -// - Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. For more information, see -// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. These headers map to the set of permissions Amazon S3 supports -// in an ACL. For more information, see Access control list (ACL) overview -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: id – if the value specified is the canonical user ID -// of an Amazon Web Services account uri – if you are granting permissions -// to a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified -// by account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// # Permissions -// -// In addition to s3:CreateBucket, the following permissions are required when -// your CreateBucket includes specific headers: -// -// - ACLs - If your CreateBucket request specifies ACL permissions and the -// ACL is public-read, public-read-write, authenticated-read, or if you specify -// access permissions explicitly through any other ACL, both s3:CreateBucket -// and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket -// request is private or doesn't specify any ACLs, only s3:CreateBucket permission -// is needed. -// -// - Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket -// request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning -// permissions are required. -// -// - S3 Object Ownership - If your CreateBucket request includes the the -// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission -// is required. -// -// The following operations are related to CreateBucket: -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CreateBucket for usage and error information. -// -// Returned Error Codes: -// -// - ErrCodeBucketAlreadyExists "BucketAlreadyExists" -// The requested bucket name is not available. The bucket namespace is shared -// by all users of the system. Select a different name and try again. -// -// - ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" -// The bucket you tried to create already exists, and you own it. Amazon S3 -// returns this error in all Amazon Web Services Regions except in the North -// Virginia Region. For legacy compatibility, if you re-create an existing bucket -// that you already own in the North Virginia Region, Amazon S3 returns 200 -// OK and resets the bucket access control lists (ACLs). -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket -func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - return out, req.Send() -} - -// CreateBucketWithContext is the same as CreateBucket with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateMultipartUpload = "CreateMultipartUpload" - -// CreateMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CreateMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateMultipartUpload for more information on using the CreateMultipartUpload -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateMultipartUploadRequest method. -// req, resp := client.CreateMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload -func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { - op := &request.Operation{ - Name: opCreateMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?uploads", - } - - if input == nil { - input = &CreateMultipartUploadInput{} - } - - output = &CreateMultipartUploadOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateMultipartUpload API operation for Amazon Simple Storage Service. -// -// This action initiates a multipart upload and returns an upload ID. This upload -// ID is used to associate all of the parts in the specific multipart upload. -// You specify this upload ID in each of your subsequent upload part requests -// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). -// You also include this upload ID in the final request to either complete or -// abort the multipart upload request. -// -// For more information about multipart uploads, see Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). -// -// If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the upload must complete within the number of days specified in the bucket -// lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort action and Amazon S3 aborts the multipart upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). -// -// For information about the permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// For request signing, multipart upload is just a series of regular requests. -// You initiate a multipart upload, send one or more requests to upload parts, -// and then complete the multipart upload process. You sign each request individually. -// There is nothing special about signing multipart upload requests. For more -// information about signing, see Authenticating Requests (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or -// abort the multipart upload. Amazon S3 frees up the space used to store the -// parts and stop charging you for storing them only after you either complete -// or abort a multipart upload. -// -// You can optionally request server-side encryption. For server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You can provide your own encryption key, -// or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. -// If you choose to provide your own encryption key, the request headers you -// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload. -// -// To perform a multipart upload with encryption using an Amazon Web Services -// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* -// actions on the key. These permissions are required because Amazon S3 must -// decrypt and read data from the encrypted file parts before it completes the -// multipart upload. For more information, see Multipart upload API and permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// in the Amazon S3 User Guide. -// -// If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the KMS key, then you must have these permissions -// on the key policy. If your IAM user or role belongs to a different account -// than the key, then you must have the permissions on both the key policy and -// your IAM user or role. -// -// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// # Access Permissions -// -// When copying an object, you can optionally specify the accounts or groups -// that should be granted specific permissions on the new object. There are -// two ways to grant the permissions using the request headers: -// -// - Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// # Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use Amazon Web Services managed encryption keys or provide your own encryption -// key. -// -// - Use encryption keys managed by Amazon S3 or customer managed key stored -// in Amazon Web Services Key Management Service (Amazon Web Services KMS) -// – If you want Amazon Web Services to manage the keys used to encrypt -// data, specify the following headers in the request. x-amz-server-side-encryption -// x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context -// If you specify x-amz-server-side-encryption:aws:kms, but don't provide -// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon -// Web Services managed key in Amazon Web Services KMS to protect the data. -// All GET and PUT requests for an object protected by Amazon Web Services -// KMS fail if you don't make them with SSL or by using SigV4. For more information -// about server-side encryption with KMS key (SSE-KMS), see Protecting Data -// Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// - Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key -// x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using -// Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// # Access-Control-List (ACL)-Specific Request Headers -// -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups defined by Amazon S3. -// These permissions are then added to the access control list (ACL) on the -// object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// - Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly — To explicitly grant access -// permissions to specific Amazon Web Services accounts or groups, use the -// following headers. Each header maps to specific permissions that Amazon -// S3 supports in an ACL. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: id – if the value specified is the canonical user ID of an -// Amazon Web Services account uri – if you are granting permissions to -// a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified -// by account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" -// -// The following operations are related to CreateMultipartUpload: -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation CreateMultipartUpload for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload -func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - return out, req.Send() -} - -// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of -// the ability to pass a context and additional request options. -// -// See CreateMultipartUpload for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucket = "DeleteBucket" - -// DeleteBucketRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucket operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucket for more information on using the DeleteBucket -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketRequest method. -// req, resp := client.DeleteBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket -func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { - op := &request.Operation{ - Name: opDeleteBucket, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &DeleteBucketInput{} - } - - output = &DeleteBucketOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucket API operation for Amazon Simple Storage Service. -// -// Deletes the S3 bucket. All objects (including all object versions and delete -// markers) in the bucket must be deleted before the bucket itself can be deleted. -// -// Related Resources -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucket for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket -func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - return out, req.Send() -} - -// DeleteBucketWithContext is the same as DeleteBucket with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" - -// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. -// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration -func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketAnalyticsConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &DeleteBucketAnalyticsConfigurationInput{} - } - - output = &DeleteBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics -// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// -// The following operations are related to DeleteBucketAnalyticsConfiguration: -// -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration -func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { - req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { - req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketCors = "DeleteBucketCors" - -// DeleteBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketCors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketCors for more information on using the DeleteBucketCors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketCorsRequest method. -// req, resp := client.DeleteBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors -func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { - op := &request.Operation{ - Name: opDeleteBucketCors, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &DeleteBucketCorsInput{} - } - - output = &DeleteBucketCorsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketCors API operation for Amazon Simple Storage Service. -// -// Deletes the cors configuration information set for the bucket. -// -// To use this operation, you must have permission to perform the s3:PutBucketCORS -// action. The bucket owner has this permission by default and can grant this -// permission to others. -// -// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. -// -// Related Resources: -// -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors -func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - return out, req.Send() -} - -// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketEncryption = "DeleteBucketEncryption" - -// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketEncryptionRequest method. -// req, resp := client.DeleteBucketEncryptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption -func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { - op := &request.Operation{ - Name: opDeleteBucketEncryption, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?encryption", - } - - if input == nil { - input = &DeleteBucketEncryptionInput{} - } - - output = &DeleteBucketEncryptionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketEncryption API operation for Amazon Simple Storage Service. -// -// This implementation of the DELETE action removes default encryption from -// the bucket. For information about the Amazon S3 default encryption feature, -// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. -// -// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// Related Resources -// -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption -func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { - req, out := c.DeleteBucketEncryptionRequest(input) - return out, req.Send() -} - -// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketEncryption for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { - req, out := c.DeleteBucketEncryptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" - -// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. -// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration -func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketIntelligentTieringConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?intelligent-tiering", - } - - if input == nil { - input = &DeleteBucketIntelligentTieringConfigurationInput{} - } - - output = &DeleteBucketIntelligentTieringConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage -// costs by automatically moving data to the most cost-effective storage access -// tier, without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput -// access tiers. To get the lowest storage cost on data that can be accessed -// in minutes to hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of -// object size or retention period. If the size of an object is less than 128 -// KB, it is not monitored and not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the Frequent Access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// For more information, see Storage class for automatically optimizing frequently -// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// -// Operations related to DeleteBucketIntelligentTieringConfiguration include: -// -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration -func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { - req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { - req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" - -// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. -// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration -func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketInventoryConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &DeleteBucketInventoryConfigurationInput{} - } - - output = &DeleteBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes an inventory configuration (identified by the inventory ID) from -// the bucket. -// -// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). -// -// Operations related to DeleteBucketInventoryConfiguration include: -// -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration -func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { - req, out := c.DeleteBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { - req, out := c.DeleteBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketLifecycle = "DeleteBucketLifecycle" - -// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketLifecycleRequest method. -// req, resp := client.DeleteBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle -func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { - op := &request.Operation{ - Name: opDeleteBucketLifecycle, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &DeleteBucketLifecycleInput{} - } - - output = &DeleteBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. -// -// Deletes the lifecycle configuration from the specified bucket. Amazon S3 -// removes all the lifecycle configuration rules in the lifecycle subresource -// associated with the bucket. Your objects never expire, and Amazon S3 no longer -// automatically deletes any objects on the basis of rules contained in the -// deleted lifecycle configuration. -// -// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration -// action. By default, the bucket owner has this permission and the bucket owner -// can grant this permission to others. -// -// There is usually some time lag before lifecycle configuration deletion is -// fully propagated to all the Amazon S3 systems. -// -// For more information about the object expiration, see Elements to Describe -// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). -// -// Related actions include: -// -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle -func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - return out, req.Send() -} - -// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" - -// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. -// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration -func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opDeleteBucketMetricsConfiguration, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &DeleteBucketMetricsConfigurationInput{} - } - - output = &DeleteBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Deletes a metrics configuration for the Amazon CloudWatch request metrics -// (specified by the metrics configuration ID) from the bucket. Note that this -// doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration -func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { - req, out := c.DeleteBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { - req, out := c.DeleteBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" - -// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketOwnershipControls operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. -// req, resp := client.DeleteBucketOwnershipControlsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls -func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { - op := &request.Operation{ - Name: opDeleteBucketOwnershipControls, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?ownershipControls", - } - - if input == nil { - input = &DeleteBucketOwnershipControlsInput{} - } - - output = &DeleteBucketOwnershipControlsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. -// -// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// For information about Amazon S3 Object Ownership, see Using Object Ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). -// -// The following operations are related to DeleteBucketOwnershipControls: -// -// - GetBucketOwnershipControls -// -// - PutBucketOwnershipControls -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketOwnershipControls for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls -func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { - req, out := c.DeleteBucketOwnershipControlsRequest(input) - return out, req.Send() -} - -// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketOwnershipControls for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { - req, out := c.DeleteBucketOwnershipControlsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketPolicy = "DeleteBucketPolicy" - -// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketPolicyRequest method. -// req, resp := client.DeleteBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy -func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { - op := &request.Operation{ - Name: opDeleteBucketPolicy, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &DeleteBucketPolicyInput{} - } - - output = &DeleteBucketPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketPolicy API operation for Amazon Simple Storage Service. -// -// This implementation of the DELETE action uses the policy subresource to delete -// the policy of a specified bucket. If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must have the DeleteBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account to use this operation. -// -// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 -// Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. -// -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. -// -// For more information about bucket policies, see Using Bucket Policies and -// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following operations are related to DeleteBucketPolicy -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy -func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - return out, req.Send() -} - -// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketReplication = "DeleteBucketReplication" - -// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketReplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketReplication for more information on using the DeleteBucketReplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketReplicationRequest method. -// req, resp := client.DeleteBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication -func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { - op := &request.Operation{ - Name: opDeleteBucketReplication, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &DeleteBucketReplicationInput{} - } - - output = &DeleteBucketReplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketReplication API operation for Amazon Simple Storage Service. -// -// Deletes the replication configuration from the bucket. -// -// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration -// action. The bucket owner has these permissions by default and can grant it -// to others. For more information about permissions, see Permissions Related -// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// It can take a while for the deletion of a replication configuration to fully -// propagate. -// -// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. -// -// The following operations are related to DeleteBucketReplication: -// -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication -func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - return out, req.Send() -} - -// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketTagging = "DeleteBucketTagging" - -// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketTagging for more information on using the DeleteBucketTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketTaggingRequest method. -// req, resp := client.DeleteBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging -func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { - op := &request.Operation{ - Name: opDeleteBucketTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &DeleteBucketTaggingInput{} - } - - output = &DeleteBucketTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketTagging API operation for Amazon Simple Storage Service. -// -// Deletes the tags from the bucket. -// -// To use this operation, you must have permission to perform the s3:PutBucketTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. -// -// The following operations are related to DeleteBucketTagging: -// -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging -func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - return out, req.Send() -} - -// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBucketWebsite = "DeleteBucketWebsite" - -// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBucketWebsiteRequest method. -// req, resp := client.DeleteBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite -func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { - op := &request.Operation{ - Name: opDeleteBucketWebsite, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &DeleteBucketWebsiteInput{} - } - - output = &DeleteBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteBucketWebsite API operation for Amazon Simple Storage Service. -// -// This action removes the website configuration for a bucket. Amazon S3 returns -// a 200 OK response upon successfully deleting a website configuration on the -// specified bucket. You will get a 200 OK response if the website configuration -// you are trying to delete does not exist on the bucket. Amazon S3 returns -// a 404 response if the bucket specified in the request does not exist. -// -// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, -// only the bucket owner can delete the website configuration attached to a -// bucket. However, bucket owners can grant other users permission to delete -// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite -// permission. -// -// For more information about hosting websites, see Hosting Websites on Amazon -// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). -// -// The following operations are related to DeleteBucketWebsite: -// -// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) -// -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite -func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - return out, req.Send() -} - -// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObject = "DeleteObject" - -// DeleteObjectRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObject for more information on using the DeleteObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteObjectRequest method. -// req, resp := client.DeleteObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject -func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { - op := &request.Operation{ - Name: opDeleteObject, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &DeleteObjectInput{} - } - - output = &DeleteObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObject API operation for Amazon Simple Storage Service. -// -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects but will still respond -// that the command was successful. -// -// To remove a specific version, you must be the bucket owner and you must use -// the version Id subresource. Using this subresource permanently deletes the -// version. If the object deleted is a delete marker, Amazon S3 sets the response -// header, x-amz-delete-marker, to true. -// -// If the object you want to delete is in a bucket where the bucket versioning -// configuration is MFA Delete enabled, you must include the x-amz-mfa request -// header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. -// -// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). -// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). -// -// You can delete objects by explicitly calling DELETE Object or configure its -// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) -// to enable Amazon S3 to remove them for you. If you want to block users or -// accounts from removing or deleting objects from your bucket, you must deny -// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration -// actions. -// -// The following action is related to DeleteObject: -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject -func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - return out, req.Send() -} - -// DeleteObjectWithContext is the same as DeleteObject with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObjectTagging = "DeleteObjectTagging" - -// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjectTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObjectTagging for more information on using the DeleteObjectTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteObjectTaggingRequest method. -// req, resp := client.DeleteObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging -func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { - op := &request.Operation{ - Name: opDeleteObjectTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &DeleteObjectTaggingInput{} - } - - output = &DeleteObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// DeleteObjectTagging API operation for Amazon Simple Storage Service. -// -// Removes the entire tag set from the specified object. For more information -// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// To use this operation, you must have permission to perform the s3:DeleteObjectTagging -// action. -// -// To delete tags of a specific object version, add the versionId query parameter -// in the request. You will need permission for the s3:DeleteObjectVersionTagging -// action. -// -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging -func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { - req, out := c.DeleteObjectTaggingRequest(input) - return out, req.Send() -} - -// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { - req, out := c.DeleteObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteObjects = "DeleteObjects" - -// DeleteObjectsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjects operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteObjects for more information on using the DeleteObjects -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteObjectsRequest method. -// req, resp := client.DeleteObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects -func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { - op := &request.Operation{ - Name: opDeleteObjects, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}?delete", - } - - if input == nil { - input = &DeleteObjectsInput{} - } - - output = &DeleteObjectsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// DeleteObjects API operation for Amazon Simple Storage Service. -// -// This action enables you to delete multiple objects from a bucket using a -// single HTTP request. If you know the object keys that you want to delete, -// then this action provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. -// -// The request contains a list of up to 1000 keys that you want to delete. In -// the XML, you provide the object key names, and optionally, version IDs if -// you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete action and returns the -// result of that delete, success, or failure, in the response. Note that if -// the object specified in the request is not found, Amazon S3 returns the result -// as deleted. -// -// The action supports two modes for the response: verbose and quiet. By default, -// the action uses verbose mode in which the response includes the result of -// deletion of each key in your request. In quiet mode the response includes -// only keys where the delete action encountered an error. For a successful -// deletion, the action does not return any information about the delete in -// the response body. -// -// When performing this action on an MFA Delete enabled bucket, that attempts -// to delete any versioned objects, you must include an MFA token. If you do -// not provide one, the entire request will fail, even if there are non-versioned -// objects you are trying to delete. If you provide an invalid token, whether -// there are versioned keys in the request or not, the entire Multi-Object Delete -// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). -// -// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. -// Amazon S3 uses the header value to ensure that your request body has not -// been altered in transit. -// -// The following operations are related to DeleteObjects: -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeleteObjects for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects -func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - return out, req.Send() -} - -// DeleteObjectsWithContext is the same as DeleteObjects with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteObjects for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeletePublicAccessBlock = "DeletePublicAccessBlock" - -// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the DeletePublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeletePublicAccessBlockRequest method. -// req, resp := client.DeletePublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock -func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { - op := &request.Operation{ - Name: opDeletePublicAccessBlock, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &DeletePublicAccessBlockInput{} - } - - output = &DeletePublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:PutBucketPublicAccessBlock permission. -// For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// The following operations are related to DeletePublicAccessBlock: -// -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation DeletePublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock -func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { - req, out := c.DeletePublicAccessBlockRequest(input) - return out, req.Send() -} - -// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See DeletePublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { - req, out := c.DeletePublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" - -// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. -// req, resp := client.GetBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration -func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAccelerateConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &GetBucketAccelerateConfigurationInput{} - } - - output = &GetBucketAccelerateConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. -// -// This implementation of the GET action uses the accelerate subresource to -// return the Transfer Acceleration state of a bucket, which is either Enabled -// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that -// enables you to perform faster data transfers to and from Amazon S3. -// -// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// You set the Transfer Acceleration state of an existing bucket to Enabled -// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) -// operation. -// -// A GET accelerate request does not return a state value for a bucket that -// has no transfer acceleration state. A bucket has no Transfer Acceleration -// state if a state has never been set on the bucket. -// -// For more information about transfer acceleration, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. -// -// Related Resources -// -// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAccelerateConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration -func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAccelerateConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAcl = "GetBucketAcl" - -// GetBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketAcl for more information on using the GetBucketAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketAclRequest method. -// req, resp := client.GetBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl -func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { - op := &request.Operation{ - Name: opGetBucketAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &GetBucketAclInput{} - } - - output = &GetBucketAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAcl API operation for Amazon Simple Storage Service. -// -// This implementation of the GET action uses the acl subresource to return -// the access control list (ACL) of a bucket. To use GET to return the ACL of -// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission -// is granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// requests to read ACLs are still supported and return the bucket-owner-full-control -// ACL with the owner being the account that created the bucket. For more information, -// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// Related Resources -// -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAcl for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl -func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - return out, req.Send() -} - -// GetBucketAclWithContext is the same as GetBucketAcl with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" - -// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. -// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration -func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAnalyticsConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &GetBucketAnalyticsConfigurationInput{} - } - - output = &GetBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// This implementation of the GET action returns an analytics configuration -// (identified by the analytics configuration ID) from the bucket. -// -// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// For information about Amazon S3 analytics feature, see Amazon S3 Analytics -// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon S3 User Guide. -// -// Related Resources -// -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration -func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { - req, out := c.GetBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { - req, out := c.GetBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketCors = "GetBucketCors" - -// GetBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketCors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketCors for more information on using the GetBucketCors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketCorsRequest method. -// req, resp := client.GetBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors -func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { - op := &request.Operation{ - Name: opGetBucketCors, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &GetBucketCorsInput{} - } - - output = &GetBucketCorsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketCors API operation for Amazon Simple Storage Service. -// -// Returns the Cross-Origin Resource Sharing (CORS) configuration information -// set for the bucket. -// -// To use this operation, you must have permission to perform the s3:GetBucketCORS -// action. By default, the bucket owner has this permission and can grant it -// to others. -// -// For more information about CORS, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). -// -// The following operations are related to GetBucketCors: -// -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors -func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - return out, req.Send() -} - -// GetBucketCorsWithContext is the same as GetBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketEncryption = "GetBucketEncryption" - -// GetBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketEncryption for more information on using the GetBucketEncryption -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketEncryptionRequest method. -// req, resp := client.GetBucketEncryptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption -func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { - op := &request.Operation{ - Name: opGetBucketEncryption, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?encryption", - } - - if input == nil { - input = &GetBucketEncryptionInput{} - } - - output = &GetBucketEncryptionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketEncryption API operation for Amazon Simple Storage Service. -// -// Returns the default encryption configuration for an Amazon S3 bucket. If -// the bucket does not have a default encryption configuration, GetBucketEncryption -// returns ServerSideEncryptionConfigurationNotFoundError. -// -// For information about the Amazon S3 default encryption feature, see Amazon -// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). -// -// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// The following operations are related to GetBucketEncryption: -// -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption -func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { - req, out := c.GetBucketEncryptionRequest(input) - return out, req.Send() -} - -// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketEncryption for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { - req, out := c.GetBucketEncryptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" - -// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. -// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration -func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketIntelligentTieringConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?intelligent-tiering", - } - - if input == nil { - input = &GetBucketIntelligentTieringConfigurationInput{} - } - - output = &GetBucketIntelligentTieringConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. -// -// Gets the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage -// costs by automatically moving data to the most cost-effective storage access -// tier, without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput -// access tiers. To get the lowest storage cost on data that can be accessed -// in minutes to hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of -// object size or retention period. If the size of an object is less than 128 -// KB, it is not monitored and not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the Frequent Access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// For more information, see Storage class for automatically optimizing frequently -// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// -// Operations related to GetBucketIntelligentTieringConfiguration include: -// -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketIntelligentTieringConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration -func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { - req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { - req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" - -// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketInventoryConfigurationRequest method. -// req, resp := client.GetBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration -func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketInventoryConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &GetBucketInventoryConfigurationInput{} - } - - output = &GetBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// Returns an inventory configuration (identified by the inventory configuration -// ID) from the bucket. -// -// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). -// -// The following operations are related to GetBucketInventoryConfiguration: -// -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration -func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { - req, out := c.GetBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { - req, out := c.GetBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLifecycle = "GetBucketLifecycle" - -// GetBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLifecycle for more information on using the GetBucketLifecycle -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketLifecycleRequest method. -// req, resp := client.GetBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle -// -// Deprecated: GetBucketLifecycle has been deprecated -func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketLifecycle, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleInput{} - } - - output = &GetBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLifecycle API operation for Amazon Simple Storage Service. -// -// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). -// If you configured a bucket lifecycle using the filter element, you should -// see the updated version of this topic. This topic is provided for backward -// compatibility. -// -// Returns the lifecycle configuration information set on the bucket. For information -// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). -// -// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// GetBucketLifecycle has the following special error: -// -// - Error code: NoSuchLifecycleConfiguration Description: The lifecycle -// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault -// Code Prefix: Client -// -// The following operations are related to GetBucketLifecycle: -// -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle -// -// Deprecated: GetBucketLifecycle has been deprecated -func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - return out, req.Send() -} - -// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: GetBucketLifecycleWithContext has been deprecated -func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" - -// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. -// req, resp := client.GetBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration -func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketLifecycleConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleConfigurationInput{} - } - - output = &GetBucketLifecycleConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The response describes -// the new filter element that you can use to specify a filter to select a subset -// of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier action, see -// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). -// -// Returns the lifecycle configuration information set on the bucket. For information -// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). -// -// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration -// action. The bucket owner has this permission, by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// GetBucketLifecycleConfiguration has the following special error: -// -// - Error code: NoSuchLifecycleConfiguration Description: The lifecycle -// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault -// Code Prefix: Client -// -// The following operations are related to GetBucketLifecycleConfiguration: -// -// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// -// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLifecycleConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration -func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLifecycleConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLocation = "GetBucketLocation" - -// GetBucketLocationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLocation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLocation for more information on using the GetBucketLocation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketLocationRequest method. -// req, resp := client.GetBucketLocationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { - op := &request.Operation{ - Name: opGetBucketLocation, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", - } - - if input == nil { - input = &GetBucketLocationInput{} - } - - output = &GetBucketLocationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLocation API operation for Amazon Simple Storage Service. -// -// Returns the Region the bucket resides in. You set the bucket's Region using -// the LocationConstraint request parameter in a CreateBucket request. For more -// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). -// -// To use this implementation of the operation, you must be the bucket owner. -// -// To use this API against an access point, provide the alias of the access -// point in place of the bucket name. -// -// The following operations are related to GetBucketLocation: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLocation for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - return out, req.Send() -} - -// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLocation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketLogging = "GetBucketLogging" - -// GetBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLogging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketLogging for more information on using the GetBucketLogging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketLoggingRequest method. -// req, resp := client.GetBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { - op := &request.Operation{ - Name: opGetBucketLogging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &GetBucketLoggingInput{} - } - - output = &GetBucketLoggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketLogging API operation for Amazon Simple Storage Service. -// -// Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. -// -// The following operations are related to GetBucketLogging: -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketLogging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging -func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - return out, req.Send() -} - -// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketLogging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" - -// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketMetricsConfigurationRequest method. -// req, resp := client.GetBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration -func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketMetricsConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &GetBucketMetricsConfigurationInput{} - } - - output = &GetBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. Note that this doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to GetBucketMetricsConfiguration: -// -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration -func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { - req, out := c.GetBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { - req, out := c.GetBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketNotification = "GetBucketNotification" - -// GetBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotification operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketNotification for more information on using the GetBucketNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketNotificationRequest method. -// req, resp := client.GetBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification -// -// Deprecated: GetBucketNotification has been deprecated -func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketNotification, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - output = &NotificationConfigurationDeprecated{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketNotification API operation for Amazon Simple Storage Service. -// -// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketNotification for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification -// -// Deprecated: GetBucketNotification has been deprecated -func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - return out, req.Send() -} - -// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: GetBucketNotificationWithContext has been deprecated -func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" - -// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketNotificationConfigurationRequest method. -// req, resp := client.GetBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration -func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { - op := &request.Operation{ - Name: opGetBucketNotificationConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - output = &NotificationConfiguration{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. -// -// Returns the notification configuration of a bucket. -// -// If notifications are not enabled on the bucket, the action returns an empty -// NotificationConfiguration element. -// -// By default, you must be the bucket owner to read the notification configuration -// of a bucket. However, the bucket owner can use a bucket policy to grant permission -// to other users to read this configuration with the s3:GetBucketNotification -// permission. -// -// For more information about setting and reading the notification configuration -// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). -// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following action is related to GetBucketNotification: -// -// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketNotificationConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration -func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - return out, req.Send() -} - -// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketNotificationConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketOwnershipControls = "GetBucketOwnershipControls" - -// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketOwnershipControls operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketOwnershipControlsRequest method. -// req, resp := client.GetBucketOwnershipControlsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls -func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { - op := &request.Operation{ - Name: opGetBucketOwnershipControls, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?ownershipControls", - } - - if input == nil { - input = &GetBucketOwnershipControlsInput{} - } - - output = &GetBucketOwnershipControlsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. -// -// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, -// you must have the s3:GetBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). -// -// For information about Amazon S3 Object Ownership, see Using Object Ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). -// -// The following operations are related to GetBucketOwnershipControls: -// -// - PutBucketOwnershipControls -// -// - DeleteBucketOwnershipControls -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketOwnershipControls for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls -func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { - req, out := c.GetBucketOwnershipControlsRequest(input) - return out, req.Send() -} - -// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketOwnershipControls for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { - req, out := c.GetBucketOwnershipControlsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketPolicy = "GetBucketPolicy" - -// GetBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketPolicy for more information on using the GetBucketPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketPolicyRequest method. -// req, resp := client.GetBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy -func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { - op := &request.Operation{ - Name: opGetBucketPolicy, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &GetBucketPolicyInput{} - } - - output = &GetBucketPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketPolicy API operation for Amazon Simple Storage Service. -// -// Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the Amazon Web Services account that owns the bucket, -// the calling identity must have the GetBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. -// -// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a -// 405 Method Not Allowed error. -// -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. -// -// For more information about bucket policies, see Using Bucket Policies and -// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// The following action is related to GetBucketPolicy: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy -func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - return out, req.Send() -} - -// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketPolicyStatus = "GetBucketPolicyStatus" - -// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicyStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketPolicyStatusRequest method. -// req, resp := client.GetBucketPolicyStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus -func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { - op := &request.Operation{ - Name: opGetBucketPolicyStatus, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policyStatus", - } - - if input == nil { - input = &GetBucketPolicyStatusInput{} - } - - output = &GetBucketPolicyStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. -// -// Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// For more information about when Amazon S3 considers a bucket public, see -// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// -// The following operations are related to GetBucketPolicyStatus: -// -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketPolicyStatus for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus -func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { - req, out := c.GetBucketPolicyStatusRequest(input) - return out, req.Send() -} - -// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketPolicyStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { - req, out := c.GetBucketPolicyStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketReplication = "GetBucketReplication" - -// GetBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketReplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketReplication for more information on using the GetBucketReplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketReplicationRequest method. -// req, resp := client.GetBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication -func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { - op := &request.Operation{ - Name: opGetBucketReplication, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &GetBucketReplicationInput{} - } - - output = &GetBucketReplicationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketReplication API operation for Amazon Simple Storage Service. -// -// Returns the replication configuration of a bucket. -// -// It can take a while to propagate the put or delete a replication configuration -// to all Amazon S3 systems. Therefore, a get request soon after put or delete -// can return a wrong result. -// -// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. -// -// This action requires permissions for the s3:GetReplicationConfiguration action. -// For more information about permissions, see Using Bucket Policies and User -// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). -// -// If you include the Filter element in a replication configuration, you must -// also include the DeleteMarkerReplication and Priority elements. The response -// also returns those elements. -// -// For information about GetBucketReplication errors, see List of replication-related -// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) -// -// The following operations are related to GetBucketReplication: -// -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication -func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - return out, req.Send() -} - -// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketRequestPayment = "GetBucketRequestPayment" - -// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketRequestPayment operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketRequestPaymentRequest method. -// req, resp := client.GetBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment -func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opGetBucketRequestPayment, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &GetBucketRequestPaymentInput{} - } - - output = &GetBucketRequestPaymentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketRequestPayment API operation for Amazon Simple Storage Service. -// -// Returns the request payment configuration of a bucket. To use this version -// of the operation, you must be the bucket owner. For more information, see -// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). -// -// The following operations are related to GetBucketRequestPayment: -// -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketRequestPayment for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment -func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - return out, req.Send() -} - -// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketRequestPayment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketTagging = "GetBucketTagging" - -// GetBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketTagging for more information on using the GetBucketTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketTaggingRequest method. -// req, resp := client.GetBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging -func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { - op := &request.Operation{ - Name: opGetBucketTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &GetBucketTaggingInput{} - } - - output = &GetBucketTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketTagging API operation for Amazon Simple Storage Service. -// -// Returns the tag set associated with the bucket. -// -// To use this operation, you must have permission to perform the s3:GetBucketTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. -// -// GetBucketTagging has the following special error: -// -// - Error code: NoSuchTagSet Description: There is no tag set associated -// with the bucket. -// -// The following operations are related to GetBucketTagging: -// -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging -func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - return out, req.Send() -} - -// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketVersioning = "GetBucketVersioning" - -// GetBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketVersioning operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketVersioning for more information on using the GetBucketVersioning -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketVersioningRequest method. -// req, resp := client.GetBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning -func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { - op := &request.Operation{ - Name: opGetBucketVersioning, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &GetBucketVersioningInput{} - } - - output = &GetBucketVersioningOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketVersioning API operation for Amazon Simple Storage Service. -// -// Returns the versioning state of a bucket. -// -// To retrieve the versioning state of a bucket, you must be the bucket owner. -// -// This implementation also returns the MFA Delete status of the versioning -// state. If the MFA Delete status is enabled, the bucket owner must use an -// authentication device to change the versioning state of the bucket. -// -// The following operations are related to GetBucketVersioning: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketVersioning for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning -func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - return out, req.Send() -} - -// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketVersioning for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetBucketWebsite = "GetBucketWebsite" - -// GetBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetBucketWebsite for more information on using the GetBucketWebsite -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetBucketWebsiteRequest method. -// req, resp := client.GetBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite -func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { - op := &request.Operation{ - Name: opGetBucketWebsite, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &GetBucketWebsiteInput{} - } - - output = &GetBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetBucketWebsite API operation for Amazon Simple Storage Service. -// -// Returns the website configuration for a bucket. To host website on Amazon -// S3, you can configure a bucket as website by adding a website configuration. -// For more information about hosting websites, see Hosting Websites on Amazon -// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). -// -// This GET action requires the S3:GetBucketWebsite permission. By default, -// only the bucket owner can read the bucket website configuration. However, -// bucket owners can allow other users to read the website configuration by -// writing a bucket policy granting them the S3:GetBucketWebsite permission. -// -// The following operations are related to DeleteBucketWebsite: -// -// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) -// -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite -func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - return out, req.Send() -} - -// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See GetBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObject = "GetObject" - -// GetObjectRequest generates a "aws/request.Request" representing the -// client's request for the GetObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObject for more information on using the GetObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectRequest method. -// req, resp := client.GetObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject -func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { - op := &request.Operation{ - Name: opGetObject, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &GetObjectInput{} - } - - output = &GetObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObject API operation for Amazon Simple Storage Service. -// -// Retrieves objects from Amazon S3. To use GET, you must have READ access to -// the object. If you grant READ access to the anonymous user, you can return -// the object without using an authorization header. -// -// An Amazon S3 bucket has no directory hierarchy such as you would find in -// a typical computer file system. You can, however, create a logical hierarchy -// by using object key names that imply a folder structure. For example, instead -// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. -// -// To get an object from such a logical hierarchy, specify the full key name -// for the object in the GET operation. For a virtual hosted-style request example, -// if you have the object photos/2006/February/sample.jpg, specify the resource -// as /photos/2006/February/sample.jpg. For a path-style request example, if -// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, -// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For -// more information about request types, see HTTP Host Header Bucket Specification -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). -// -// For more information about returning the ACL of an object, see GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). -// -// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier -// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering -// Deep Archive tiers, before you can retrieve the object you must first restore -// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this action returns an InvalidObjectStateError error. For information -// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). -// -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption -// keys (SSE-S3). If your object does use these types of keys, you’ll get -// an HTTP 400 BadRequest error. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you GET the object, you must use the following headers: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// Assuming you have the relevant permission to read object tags, the response -// also returns the x-amz-tagging-count header that provides the count of number -// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// to retrieve the tag set associated with an object. -// -// # Permissions -// -// You need the relevant read object (or version) permission for this operation. -// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 will -// return an HTTP status code 404 ("no such key") error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 will return -// an HTTP status code 403 ("access denied") error. -// -// # Versioning -// -// By default, the GET action returns the current version of an object. To return -// a different version, use the versionId subresource. -// -// - If you supply a versionId, you need the s3:GetObjectVersion permission -// to access a specific version of an object. If you request a specific version, -// you do not need to have the s3:GetObject permission. -// -// - If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in -// the response. -// -// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). -// -// # Overriding Response Header Values -// -// There are times when you want to override certain response header values -// in a GET response. For example, you might override the Content-Disposition -// response header value in your GET request. -// -// You can override values for a set of response headers using the following -// query parameters. These response header values are sent only on a successful -// request, that is, when status code 200 OK is returned. The set of headers -// you can override using these parameters is a subset of the headers that Amazon -// S3 accepts when you create an object. The response headers that you can override -// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, -// Content-Disposition, and Content-Encoding. To override these header values -// in the GET response, you use the following request parameters. -// -// You must sign the request, either using an Authorization header or a presigned -// URL, when using these parameters. They cannot be used with an unsigned (anonymous) -// request. -// -// - response-content-type -// -// - response-content-language -// -// - response-expires -// -// - response-cache-control -// -// - response-content-disposition -// -// - response-content-encoding -// -// # Additional Considerations about Request Headers -// -// If both of the If-Match and If-Unmodified-Since headers are present in the -// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since -// condition evaluates to false; then, S3 returns 200 OK and the data requested. -// -// If both of the If-None-Match and If-Modified-Since headers are present in -// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since -// condition evaluates to true; then, S3 returns 304 Not Modified response code. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). -// -// The following operations are related to GetObject: -// -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObject for usage and error information. -// -// Returned Error Codes: -// -// - ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// - ErrCodeInvalidObjectState "InvalidObjectState" -// Object is archived and inaccessible until restored. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject -func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - return out, req.Send() -} - -// GetObjectWithContext is the same as GetObject with the addition of -// the ability to pass a context and additional request options. -// -// See GetObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectAcl = "GetObjectAcl" - -// GetObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectAcl for more information on using the GetObjectAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectAclRequest method. -// req, resp := client.GetObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl -func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { - op := &request.Operation{ - Name: opGetObjectAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &GetObjectAclInput{} - } - - output = &GetObjectAclOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectAcl API operation for Amazon Simple Storage Service. -// -// Returns the access control list (ACL) of an object. To use this operation, -// you must have s3:GetObjectAcl permissions or READ_ACP access to the object. -// For more information, see Mapping of ACL permissions and access policy permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide -// -// This action is not supported by Amazon S3 on Outposts. -// -// # Versioning -// -// By default, GET returns ACL information about the current version of an object. -// To return ACL information about a different version, use the versionId subresource. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// requests to read ACLs are still supported and return the bucket-owner-full-control -// ACL with the owner being the account that created the bucket. For more information, -// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// The following operations are related to GetObjectAcl: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectAcl for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl -func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - return out, req.Send() -} - -// GetObjectAclWithContext is the same as GetObjectAcl with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectAttributes = "GetObjectAttributes" - -// GetObjectAttributesRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectAttributes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectAttributes for more information on using the GetObjectAttributes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectAttributesRequest method. -// req, resp := client.GetObjectAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes -func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *request.Request, output *GetObjectAttributesOutput) { - op := &request.Operation{ - Name: opGetObjectAttributes, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?attributes", - } - - if input == nil { - input = &GetObjectAttributesInput{} - } - - output = &GetObjectAttributesOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectAttributes API operation for Amazon Simple Storage Service. -// -// Retrieves all the metadata from an object without returning the object itself. -// This action is useful if you're interested only in an object's metadata. -// To use GetObjectAttributes, you must have READ access to the object. -// -// GetObjectAttributes combines the functionality of GetObjectAcl, GetObjectLegalHold, -// GetObjectLockConfiguration, GetObjectRetention, GetObjectTagging, HeadObject, -// and ListParts. All of the data returned with each of those individual calls -// can be returned with a single call to GetObjectAttributes. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// -// - Encryption request headers, such as x-amz-server-side-encryption, should -// not be sent for GET requests if your object uses server-side encryption -// with Amazon Web Services KMS keys stored in Amazon Web Services Key Management -// Service (SSE-KMS) or server-side encryption with Amazon S3 managed encryption -// keys (SSE-S3). If your object does use these types of keys, you'll get -// an HTTP 400 Bad Request error. -// -// - The last modified property in this case is the creation date of the -// object. -// -// Consider the following when using request headers: -// -// - If both of the If-Match and If-Unmodified-Since headers are present -// in the request as follows, then Amazon S3 returns the HTTP status code -// 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since -// condition evaluates to false. -// -// - If both of the If-None-Match and If-Modified-Since headers are present -// in the request as follows, then Amazon S3 returns the HTTP status code -// 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since -// condition evaluates to true. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). -// -// # Permissions -// -// The permissions that you need to use this operation depend on whether the -// bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion -// and s3:GetObjectVersionAttributes permissions for this operation. If the -// bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If the object that you request does not exist, -// the error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 Not Found ("no such key") error. -// -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an -// HTTP status code 403 Forbidden ("access denied") error. -// -// The following actions are related to GetObjectAttributes: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// -// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) -// -// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) -// -// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) -// -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// -// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectAttributes for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes -func (c *S3) GetObjectAttributes(input *GetObjectAttributesInput) (*GetObjectAttributesOutput, error) { - req, out := c.GetObjectAttributesRequest(input) - return out, req.Send() -} - -// GetObjectAttributesWithContext is the same as GetObjectAttributes with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectAttributes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectAttributesWithContext(ctx aws.Context, input *GetObjectAttributesInput, opts ...request.Option) (*GetObjectAttributesOutput, error) { - req, out := c.GetObjectAttributesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectLegalHold = "GetObjectLegalHold" - -// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectLegalHold operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectLegalHold for more information on using the GetObjectLegalHold -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectLegalHoldRequest method. -// req, resp := client.GetObjectLegalHoldRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold -func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { - op := &request.Operation{ - Name: opGetObjectLegalHold, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?legal-hold", - } - - if input == nil { - input = &GetObjectLegalHoldInput{} - } - - output = &GetObjectLegalHoldOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectLegalHold API operation for Amazon Simple Storage Service. -// -// Gets an object's current legal hold status. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// This action is not supported by Amazon S3 on Outposts. -// -// The following action is related to GetObjectLegalHold: -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectLegalHold for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold -func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { - req, out := c.GetObjectLegalHoldRequest(input) - return out, req.Send() -} - -// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectLegalHold for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { - req, out := c.GetObjectLegalHoldRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectLockConfiguration = "GetObjectLockConfiguration" - -// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectLockConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectLockConfigurationRequest method. -// req, resp := client.GetObjectLockConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration -func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { - op := &request.Operation{ - Name: opGetObjectLockConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?object-lock", - } - - if input == nil { - input = &GetObjectLockConfigurationInput{} - } - - output = &GetObjectLockConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. -// -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. For more information, see Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// The following action is related to GetObjectLockConfiguration: -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectLockConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration -func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { - req, out := c.GetObjectLockConfigurationRequest(input) - return out, req.Send() -} - -// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectLockConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { - req, out := c.GetObjectLockConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectRetention = "GetObjectRetention" - -// GetObjectRetentionRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectRetention operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectRetention for more information on using the GetObjectRetention -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectRetentionRequest method. -// req, resp := client.GetObjectRetentionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention -func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { - op := &request.Operation{ - Name: opGetObjectRetention, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?retention", - } - - if input == nil { - input = &GetObjectRetentionInput{} - } - - output = &GetObjectRetentionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectRetention API operation for Amazon Simple Storage Service. -// -// Retrieves an object's retention settings. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// This action is not supported by Amazon S3 on Outposts. -// -// The following action is related to GetObjectRetention: -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectRetention for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention -func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { - req, out := c.GetObjectRetentionRequest(input) - return out, req.Send() -} - -// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectRetention for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { - req, out := c.GetObjectRetentionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectTagging = "GetObjectTagging" - -// GetObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectTagging for more information on using the GetObjectTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectTaggingRequest method. -// req, resp := client.GetObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { - op := &request.Operation{ - Name: opGetObjectTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &GetObjectTaggingInput{} - } - - output = &GetObjectTaggingOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectTagging API operation for Amazon Simple Storage Service. -// -// Returns the tag-set of an object. You send the GET request against the tagging -// subresource associated with the object. -// -// To use this operation, you must have permission to perform the s3:GetObjectTagging -// action. By default, the GET action returns information about current version -// of an object. For a versioned bucket, you can have multiple versions of an -// object in your bucket. To retrieve tags of any other version, use the versionId -// query parameter. You also need permission for the s3:GetObjectVersionTagging -// action. -// -// By default, the bucket owner has this permission and can grant this permission -// to others. -// -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// The following actions are related to GetObjectTagging: -// -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - return out, req.Send() -} - -// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetObjectTorrent = "GetObjectTorrent" - -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetObjectTorrent for more information on using the GetObjectTorrent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { - op := &request.Operation{ - Name: opGetObjectTorrent, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", - } - - if input == nil { - input = &GetObjectTorrentInput{} - } - - output = &GetObjectTorrentOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetObjectTorrent API operation for Amazon Simple Storage Service. -// -// Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. For more information about BitTorrent, see -// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). -// -// You can get torrent only for objects that are less than 5 GB in size, and -// that are not encrypted using server-side encryption with a customer-provided -// encryption key. -// -// To use GET, you must have READ access to the object. -// -// This action is not supported by Amazon S3 on Outposts. -// -// The following action is related to GetObjectTorrent: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTorrent for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - return out, req.Send() -} - -// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of -// the ability to pass a context and additional request options. -// -// See GetObjectTorrent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPublicAccessBlock = "GetPublicAccessBlock" - -// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetPublicAccessBlockRequest method. -// req, resp := client.GetPublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock -func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { - op := &request.Operation{ - Name: opGetPublicAccessBlock, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &GetPublicAccessBlockInput{} - } - - output = &GetPublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To -// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. -// For more information about Amazon S3 permissions, see Specifying Permissions -// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket -// or an object, it checks the PublicAccessBlock configuration for both the -// bucket (or the bucket that contains the object) and the bucket owner's account. -// If the PublicAccessBlock settings are different between the bucket and the -// account, Amazon S3 uses the most restrictive combination of the bucket-level -// and account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// -// The following operations are related to GetPublicAccessBlock: -// -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock -func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { - req, out := c.GetPublicAccessBlockRequest(input) - return out, req.Send() -} - -// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See GetPublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { - req, out := c.GetPublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opHeadBucket = "HeadBucket" - -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See HeadBucket for more information on using the HeadBucket -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { - op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &HeadBucketInput{} - } - - output = &HeadBucketOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// HeadBucket API operation for Amazon Simple Storage Service. -// -// This action is useful to determine if a bucket exists and you have permission -// to access it. The action returns a 200 OK if the bucket exists and you have -// permission to access it. -// -// If the bucket does not exist or you do not have permission to access it, -// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A -// message body is not included, so you cannot determine the exception beyond -// these error codes. -// -// To use this operation, you must have permissions to perform the s3:ListBucket -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// To use this API against an access point, you must provide the alias of the -// access point in place of the bucket name or specify the access point ARN. -// When using the access point ARN, you must direct requests to the access point -// hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. -// When using the Amazon Web Services SDKs, you provide the ARN in place of -// the bucket name. For more information see, Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadBucket for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - return out, req.Send() -} - -// HeadBucketWithContext is the same as HeadBucket with the addition of -// the ability to pass a context and additional request options. -// -// See HeadBucket for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opHeadObject = "HeadObject" - -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See HeadObject for more information on using the HeadObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { - op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &HeadObjectInput{} - } - - output = &HeadObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// HeadObject API operation for Amazon Simple Storage Service. -// -// The HEAD action retrieves metadata from an object without returning the object -// itself. This action is useful if you're only interested in an object's metadata. -// To use HEAD, you must have READ access to the object. -// -// A HEAD request has the same options as a GET action on an object. The response -// is identical to the GET response except that there is no response body. Because -// of this, if the HEAD request generates an error, it returns a generic 404 -// Not Found or 403 Forbidden code. It is not possible to retrieve the exact -// exception beyond these error codes. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// - Encryption request headers, like x-amz-server-side-encryption, should -// not be sent for GET requests if your object uses server-side encryption -// with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, -// you’ll get an HTTP 400 BadRequest error. -// -// - The last modified property in this case is the creation date of the -// object. -// -// Request headers are limited to 8 KB in size. For more information, see Common -// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). -// -// Consider the following when using request headers: -// -// - Consideration 1 – If both of the If-Match and If-Unmodified-Since -// headers are present in the request as follows: If-Match condition evaluates -// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -// S3 returns 200 OK and the data requested. -// -// - Consideration 2 – If both of the If-None-Match and If-Modified-Since -// headers are present in the request as follows: If-None-Match condition -// evaluates to false, and; If-Modified-Since condition evaluates to true; -// Then Amazon S3 returns the 304 Not Modified response code. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). -// -// # Permissions -// -// You need the relevant read object (or version) permission for this operation. -// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 ("no such key") error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns -// an HTTP status code 403 ("access denied") error. -// -// The following actions are related to HeadObject: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses -// for more information on returned errors. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - return out, req.Send() -} - -// HeadObjectWithContext is the same as HeadObject with the addition of -// the ability to pass a context and additional request options. -// -// See HeadObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" - -// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. -// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketAnalyticsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &ListBucketAnalyticsConfigurationsInput{} - } - - output = &ListBucketAnalyticsConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the analytics configurations for the bucket. You can have up to 1,000 -// analytics configurations per bucket. -// -// This action supports list pagination and does not return more than 100 configurations -// at a time. You should always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there will be a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about Amazon S3 analytics feature, see Amazon S3 Analytics -// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// -// The following operations are related to ListBucketAnalyticsConfigurations: -// -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketAnalyticsConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketAnalyticsConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { - req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" - -// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. -// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations -func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketIntelligentTieringConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?intelligent-tiering", - } - - if input == nil { - input = &ListBucketIntelligentTieringConfigurationsInput{} - } - - output = &ListBucketIntelligentTieringConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage -// costs by automatically moving data to the most cost-effective storage access -// tier, without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput -// access tiers. To get the lowest storage cost on data that can be accessed -// in minutes to hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of -// object size or retention period. If the size of an object is less than 128 -// KB, it is not monitored and not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the Frequent Access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// For more information, see Storage class for automatically optimizing frequently -// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// -// Operations related to ListBucketIntelligentTieringConfigurations include: -// -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketIntelligentTieringConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations -func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { - req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { - req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" - -// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. -// req, resp := client.ListBucketInventoryConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketInventoryConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &ListBucketInventoryConfigurationsInput{} - } - - output = &ListBucketInventoryConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. -// -// Returns a list of inventory configurations for the bucket. You can have up -// to 1,000 analytics configurations per bucket. -// -// This action supports list pagination and does not return more than 100 configurations -// at a time. Always check the IsTruncated element in the response. If there -// are no more configurations to list, IsTruncated is set to false. If there -// are more configurations to list, IsTruncated is set to true, and there is -// a value in NextContinuationToken. You use the NextContinuationToken value -// to continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// -// The following operations are related to ListBucketInventoryConfigurations: -// -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketInventoryConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations -func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketInventoryConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { - req, out := c.ListBucketInventoryConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" - -// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. -// req, resp := client.ListBucketMetricsConfigurationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { - op := &request.Operation{ - Name: opListBucketMetricsConfigurations, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &ListBucketMetricsConfigurationsInput{} - } - - output = &ListBucketMetricsConfigurationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. -// -// Lists the metrics configurations for the bucket. The metrics configurations -// are only for the request metrics of the bucket and do not provide information -// on daily storage metrics. You can have up to 1,000 configurations per bucket. -// -// This action supports list pagination and does not return more than 100 configurations -// at a time. Always check the IsTruncated element in the response. If there -// are no more configurations to list, IsTruncated is set to false. If there -// are more configurations to list, IsTruncated is set to true, and there is -// a value in NextContinuationToken. You use the NextContinuationToken value -// to continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to ListBucketMetricsConfigurations: -// -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketMetricsConfigurations for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations -func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) - return out, req.Send() -} - -// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of -// the ability to pass a context and additional request options. -// -// See ListBucketMetricsConfigurations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { - req, out := c.ListBucketMetricsConfigurationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBuckets = "ListBuckets" - -// ListBucketsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuckets operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBuckets for more information on using the ListBuckets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListBucketsRequest method. -// req, resp := client.ListBucketsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { - op := &request.Operation{ - Name: opListBuckets, - HTTPMethod: "GET", - HTTPPath: "/", - } - - if input == nil { - input = &ListBucketsInput{} - } - - output = &ListBucketsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListBuckets API operation for Amazon Simple Storage Service. -// -// Returns a list of all buckets owned by the authenticated sender of the request. -// To use this operation, you must have the s3:ListAllMyBuckets permission. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBuckets for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - return out, req.Send() -} - -// ListBucketsWithContext is the same as ListBuckets with the addition of -// the ability to pass a context and additional request options. -// -// See ListBuckets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListMultipartUploads = "ListMultipartUploads" - -// ListMultipartUploadsRequest generates a "aws/request.Request" representing the -// client's request for the ListMultipartUploads operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListMultipartUploads for more information on using the ListMultipartUploads -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListMultipartUploadsRequest method. -// req, resp := client.ListMultipartUploadsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { - op := &request.Operation{ - Name: opListMultipartUploads, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListMultipartUploadsInput{} - } - - output = &ListMultipartUploadsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListMultipartUploads API operation for Amazon Simple Storage Service. -// -// This action lists in-progress multipart uploads. An in-progress multipart -// upload is a multipart upload that has been initiated using the Initiate Multipart -// Upload request, but has not yet been completed or aborted. -// -// This action returns at most 1,000 multipart uploads in the response. 1,000 -// multipart uploads is the maximum number of uploads a response can include, -// which is also the default value. You can further limit the number of uploads -// in a response by specifying the max-uploads parameter in the response. If -// additional multipart uploads satisfy the list criteria, the response will -// contain an IsTruncated element with the value true. To list the additional -// multipart uploads, use the key-marker and upload-id-marker request parameters. -// -// In the response, the uploads are sorted by key. If your application has initiated -// more than one multipart upload using the same object key, then uploads in -// the response are first sorted by key. Additionally, uploads are sorted in -// ascending order within each key by the upload initiation time. -// -// For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -// -// For information on permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// The following operations are related to ListMultipartUploads: -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListMultipartUploads for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - return out, req.Send() -} - -// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of -// the ability to pass a context and additional request options. -// -// See ListMultipartUploads for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListMultipartUploads method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListMultipartUploads operation. -// pageNum := 0 -// err := client.ListMultipartUploadsPages(params, -// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { - return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListMultipartUploadsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListMultipartUploadsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListObjectVersions = "ListObjectVersions" - -// ListObjectVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjectVersions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListObjectVersions for more information on using the ListObjectVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListObjectVersionsRequest method. -// req, resp := client.ListObjectVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { - op := &request.Operation{ - Name: opListObjectVersions, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectVersionsInput{} - } - - output = &ListObjectVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListObjectVersions API operation for Amazon Simple Storage Service. -// -// Returns metadata about all versions of the objects in a bucket. You can also -// use request parameters as selection criteria to return metadata about a subset -// of all the object versions. -// -// To use this operation, you must have permissions to perform the s3:ListBucketVersions -// action. Be aware of the name difference. -// -// A 200 OK response can contain valid or invalid XML. Make sure to design your -// application to parse the contents of the response and handle it appropriately. -// -// To use this operation, you must have READ access to the bucket. -// -// This action is not supported by Amazon S3 on Outposts. -// -// The following operations are related to ListObjectVersions: -// -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectVersions for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - return out, req.Send() -} - -// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjectVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectVersions operation. -// pageNum := 0 -// err := client.ListObjectVersionsPages(params, -// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { - return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListObjects = "ListObjects" - -// ListObjectsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjects operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListObjects for more information on using the ListObjects -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListObjectsRequest method. -// req, resp := client.ListObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { - op := &request.Operation{ - Name: opListObjects, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectsInput{} - } - - output = &ListObjectsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListObjects API operation for Amazon Simple Storage Service. -// -// Returns some or all (up to 1,000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure -// to design your application to parse the contents of the response and handle -// it appropriately. -// -// This action has been revised. We recommend that you use the newer version, -// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), -// when developing applications. For backward compatibility, Amazon S3 continues -// to support ListObjects. -// -// The following operations are related to ListObjects: -// -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjects for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - return out, req.Send() -} - -// ListObjectsWithContext is the same as ListObjects with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjects for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectsPages iterates over the pages of a ListObjects operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjects method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjects operation. -// pageNum := 0 -// err := client.ListObjectsPages(params, -// func(page *s3.ListObjectsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { - return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectsPagesWithContext same as ListObjectsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListObjectsV2 = "ListObjectsV2" - -// ListObjectsV2Request generates a "aws/request.Request" representing the -// client's request for the ListObjectsV2 operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListObjectsV2 for more information on using the ListObjectsV2 -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListObjectsV2Request method. -// req, resp := client.ListObjectsV2Request(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { - op := &request.Operation{ - Name: opListObjectsV2, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?list-type=2", - Paginator: &request.Paginator{ - InputTokens: []string{"ContinuationToken"}, - OutputTokens: []string{"NextContinuationToken"}, - LimitToken: "MaxKeys", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListObjectsV2Input{} - } - - output = &ListObjectsV2Output{} - req = c.newRequest(op, input, output) - return -} - -// ListObjectsV2 API operation for Amazon Simple Storage Service. -// -// Returns some or all (up to 1,000) of the objects in a bucket with each request. -// You can use the request parameters as selection criteria to return a subset -// of the objects in a bucket. A 200 OK response can contain valid or invalid -// XML. Make sure to design your application to parse the contents of the response -// and handle it appropriately. Objects are returned sorted in an ascending -// order of the respective key names in the list. For more information about -// listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// -// To use this operation, you must have READ access to the bucket. -// -// To use this action in an Identity and Access Management (IAM) policy, you -// must have permissions to perform the s3:ListBucket action. The bucket owner -// has this permission by default and can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// This section describes the latest revision of this action. We recommend that -// you use this revised API for application development. For backward compatibility, -// Amazon S3 continues to support the prior version of this API, ListObjects -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). -// -// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). -// -// The following operations are related to ListObjectsV2: -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListObjectsV2 for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 -func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - return out, req.Send() -} - -// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of -// the ability to pass a context and additional request options. -// -// See ListObjectsV2 for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectsV2 method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectsV2 operation. -// pageNum := 0 -// err := client.ListObjectsV2Pages(params, -// func(page *s3.ListObjectsV2Output, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { - return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListObjectsV2Input - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListObjectsV2Request(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListParts = "ListParts" - -// ListPartsRequest generates a "aws/request.Request" representing the -// client's request for the ListParts operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListParts for more information on using the ListParts -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListPartsRequest method. -// req, resp := client.ListPartsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { - op := &request.Operation{ - Name: opListParts, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - Paginator: &request.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListPartsInput{} - } - - output = &ListPartsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListParts API operation for Amazon Simple Storage Service. -// -// Lists the parts that have been uploaded for a specific multipart upload. -// This operation must include the upload ID, which you obtain by sending the -// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). -// This request returns a maximum of 1,000 uploaded parts. The default number -// of parts returned is 1,000 parts. You can restrict the number of parts returned -// by specifying the max-parts request parameter. If your multipart upload consists -// of more than 1,000 parts, the response returns an IsTruncated field with -// the value of true, and a NextPartNumberMarker element. In subsequent ListParts -// requests you can include the part-number-marker query string parameter and -// set its value to the NextPartNumberMarker field value from the previous response. -// -// If the upload was created using a checksum algorithm, you will need to have -// permission to the kms:Decrypt action for the request to succeed. -// -// For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -// -// For information on permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// -// The following operations are related to ListParts: -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListParts for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - return out, req.Send() -} - -// ListPartsWithContext is the same as ListParts with the addition of -// the ability to pass a context and additional request options. -// -// See ListParts for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListPartsPages iterates over the pages of a ListParts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListParts method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListParts operation. -// pageNum := 0 -// err := client.ListPartsPages(params, -// func(page *s3.ListPartsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { - return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPartsPagesWithContext same as ListPartsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPartsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPartsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" - -// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. -// req, resp := client.PutBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAccelerateConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &PutBucketAccelerateConfigurationInput{} - } - - output = &PutBucketAccelerateConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. -// -// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer -// Acceleration is a bucket-level feature that enables you to perform faster -// data transfers to Amazon S3. -// -// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// The Transfer Acceleration state of a bucket can be set to one of the following -// two values: -// -// - Enabled – Enables accelerated data transfers to the bucket. -// -// - Suspended – Disables accelerated data transfers to the bucket. -// -// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// action returns the transfer acceleration state of a bucket. -// -// After setting the Transfer Acceleration state of a bucket to Enabled, it -// might take up to thirty minutes before the data transfer rates to the bucket -// increase. -// -// The name of the bucket used for Transfer Acceleration must be DNS-compliant -// and must not contain periods ("."). -// -// For more information about transfer acceleration, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -// -// The following operations are related to PutBucketAccelerateConfiguration: -// -// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAccelerateConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration -func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAccelerateConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketAcl = "PutBucketAcl" - -// PutBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketAcl for more information on using the PutBucketAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketAclRequest method. -// req, resp := client.PutBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { - op := &request.Operation{ - Name: opPutBucketAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &PutBucketAclInput{} - } - - output = &PutBucketAclOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketAcl API operation for Amazon Simple Storage Service. -// -// Sets the permissions on an existing bucket using access control lists (ACL). -// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// To set the ACL of a bucket, you must have WRITE_ACP permission. -// -// You can use one of the following two ways to set a bucket's permissions: -// -// - Specify the ACL in the request body -// -// - Specify permissions using request headers -// -// You cannot specify access permission using both the body and the request -// headers. -// -// Depending on your application needs, you may choose to set the ACL on a bucket -// using either the request body or the headers. For example, if you have an -// existing application that updates a bucket ACL using the request body, then -// you can continue to use that approach. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// ACLs are disabled and no longer affect permissions. You must use policies -// to grant access to your bucket and the objects in it. Requests to set ACLs -// or update ACLs fail and return the AccessControlListNotSupported error code. -// Requests to read ACLs are still supported. For more information, see Controlling -// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// # Access Permissions -// -// You can set access permissions using one of the following methods: -// -// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. Specify the canned ACL name -// as the value of x-amz-acl. If you use this header, you cannot use other -// access control-specific headers in your request. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using -// these headers, you specify explicit access permissions and grantees (Amazon -// Web Services accounts or Amazon S3 groups) who will receive the permission. -// If you use these ACL-specific headers, you cannot use the x-amz-acl header -// to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: id – if the value specified is the canonical user ID -// of an Amazon Web Services account uri – if you are granting permissions -// to a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-write header grants create, overwrite, and delete objects -// permission to LogDelivery group predefined by Amazon S3 and two Amazon -// Web Services accounts identified by their email addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// # Grantee Values -// -// You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: -// -// - By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request -// -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// - By Email address: <>Grantees@email.com<>lt;/Grantee> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. Using email addresses -// to specify a grantee is only supported in the following Amazon Web Services -// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) -// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe -// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported -// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. -// -// Related Resources -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAcl for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl -func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - return out, req.Send() -} - -// PutBucketAclWithContext is the same as PutBucketAcl with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" - -// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. -// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAnalyticsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?analytics", - } - - if input == nil { - input = &PutBucketAnalyticsConfigurationInput{} - } - - output = &PutBucketAnalyticsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. -// -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). You can have up to 1,000 analytics configurations per -// bucket. -// -// You can choose to have storage class analysis export analysis reports sent -// to a comma-separated values (CSV) flat file. See the DataExport request element. -// Reports are updated daily and are based on the object filters that you configure. -// When selecting data export, you specify a destination bucket and an optional -// destination prefix where the file is written. You can export the data to -// a destination bucket in a different account. However, the destination bucket -// must be in the same Region as the bucket that you are making the PUT analytics -// configuration to. For more information, see Amazon S3 Analytics – Storage -// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// -// You must create a bucket policy on the destination bucket where the exported -// file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). -// -// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// Special Errors -// -// - HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid -// argument. -// -// - HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: -// You are attempting to create a new configuration but have already reached -// the 1,000-configuration limit. -// -// - HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not -// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration -// bucket permission to set the configuration on the bucket. -// -// Related Resources -// -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketAnalyticsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration -func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketAnalyticsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { - req, out := c.PutBucketAnalyticsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketCors = "PutBucketCors" - -// PutBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketCors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketCors for more information on using the PutBucketCors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketCorsRequest method. -// req, resp := client.PutBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { - op := &request.Operation{ - Name: opPutBucketCors, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &PutBucketCorsInput{} - } - - output = &PutBucketCorsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketCors API operation for Amazon Simple Storage Service. -// -// Sets the cors configuration for your bucket. If the configuration exists, -// Amazon S3 replaces it. -// -// To use this operation, you must be allowed to perform the s3:PutBucketCORS -// action. By default, the bucket owner has this permission and can grant it -// to others. -// -// You set this configuration on a bucket so that the bucket can service cross-origin -// requests. For example, you might want to enable a request whose origin is -// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com -// by using the browser's XMLHttpRequest capability. -// -// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors -// subresource to the bucket. The cors subresource is an XML document in which -// you configure rules that identify origins and the HTTP methods that can be -// executed on your bucket. The document is limited to 64 KB in size. -// -// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) -// against a bucket, it evaluates the cors configuration on the bucket and uses -// the first CORSRule rule that matches the incoming browser request to enable -// a cross-origin request. For a rule to match, the following conditions must -// be met: -// -// - The request's Origin header must match AllowedOrigin elements. -// -// - The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method -// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod -// elements. -// -// - Every header specified in the Access-Control-Request-Headers request -// header of a pre-flight request must match an AllowedHeader element. -// -// For more information about CORS, go to Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// S3 User Guide. -// -// Related Resources -// -// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) -// -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketCors for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors -func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - return out, req.Send() -} - -// PutBucketCorsWithContext is the same as PutBucketCors with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketCors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketEncryption = "PutBucketEncryption" - -// PutBucketEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketEncryption for more information on using the PutBucketEncryption -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketEncryptionRequest method. -// req, resp := client.PutBucketEncryptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption -func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { - op := &request.Operation{ - Name: opPutBucketEncryption, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?encryption", - } - - if input == nil { - input = &PutBucketEncryptionInput{} - } - - output = &PutBucketEncryptionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketEncryption API operation for Amazon Simple Storage Service. -// -// This action uses the encryption subresource to configure default encryption -// and Amazon S3 Bucket Key for an existing bucket. -// -// Default encryption for a bucket can use server-side encryption with Amazon -// S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify -// default encryption using SSE-KMS, you can also configure Amazon S3 Bucket -// Key. When the default encryption is SSE-KMS, if you upload an object to the -// bucket and do not specify the KMS key to use for encryption, Amazon S3 uses -// the default Amazon Web Services managed KMS key for your account. For information -// about default encryption, see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. -// -// This action requires Amazon Web Services Signature Version 4. For more information, -// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// -// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// Related Resources -// -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketEncryption for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption -func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { - req, out := c.PutBucketEncryptionRequest(input) - return out, req.Send() -} - -// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketEncryption for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { - req, out := c.PutBucketEncryptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" - -// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. -// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration -func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketIntelligentTieringConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?intelligent-tiering", - } - - if input == nil { - input = &PutBucketIntelligentTieringConfigurationInput{} - } - - output = &PutBucketIntelligentTieringConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. -// -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You -// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage -// costs by automatically moving data to the most cost-effective storage access -// tier, without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput -// access tiers. To get the lowest storage cost on data that can be accessed -// in minutes to hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of -// object size or retention period. If the size of an object is less than 128 -// KB, it is not monitored and not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the Frequent Access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// For more information, see Storage class for automatically optimizing frequently -// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// -// Operations related to PutBucketIntelligentTieringConfiguration include: -// -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) -// -// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically -// move objects stored in the S3 Intelligent-Tiering storage class to the Archive -// Access or Deep Archive Access tier. -// -// Special Errors -// -// - HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument -// -// - HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. -// -// - HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration -// bucket permission to set the configuration on the bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketIntelligentTieringConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration -func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { - req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { - req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" - -// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketInventoryConfigurationRequest method. -// req, resp := client.PutBucketInventoryConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketInventoryConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?inventory", - } - - if input == nil { - input = &PutBucketInventoryConfigurationInput{} - } - - output = &PutBucketInventoryConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. -// -// This implementation of the PUT action adds an inventory configuration (identified -// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations -// per bucket. -// -// Amazon S3 inventory generates inventories of the objects in the bucket on -// a daily or weekly basis, and the results are published to a flat file. The -// bucket that is inventoried is called the source bucket, and the bucket where -// the inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. -// -// When you configure an inventory for a source bucket, you specify the destination -// bucket where you want the inventory to be stored, and whether to generate -// the inventory daily or weekly. You can also configure what object metadata -// to include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon S3 User Guide. -// -// You must create a bucket policy on the destination bucket to grant permissions -// to Amazon S3 to write objects to the bucket in the defined location. For -// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage -// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). -// -// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// Special Errors -// -// - HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument -// -// - HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. -// -// - HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. -// -// Related Resources -// -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketInventoryConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration -func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketInventoryConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { - req, out := c.PutBucketInventoryConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLifecycle = "PutBucketLifecycle" - -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketLifecycle for more information on using the PutBucketLifecycle -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -// -// Deprecated: PutBucketLifecycle has been deprecated -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketLifecycle, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleInput{} - } - - output = &PutBucketLifecycleOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketLifecycle API operation for Amazon Simple Storage Service. -// -// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). -// This version has been deprecated. Existing lifecycle configurations will -// work. For new lifecycle configurations, use the updated API. -// -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. For information about lifecycle configuration, see -// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. -// -// By default, all Amazon S3 resources, including buckets, objects, and related -// subresources (for example, lifecycle configuration and website configuration) -// are private. Only the resource owner, the Amazon Web Services account that -// created the resource, can access it. The resource owner can optionally grant -// access permissions to others by writing an access policy. For this operation, -// users must get the s3:PutLifecycleConfiguration permission. -// -// You can also explicitly deny permissions. Explicit denial also supersedes -// any other permissions. If you want to prevent users or accounts from removing -// or deleting objects from your bucket, you must deny them permissions for -// the following actions: -// -// - s3:DeleteObject -// -// - s3:DeleteObjectVersion -// -// - s3:PutLifecycleConfiguration -// -// For more information about permissions, see Managing Access Permissions to -// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// For more examples of transitioning objects to storage classes such as STANDARD_IA -// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). -// -// Related Resources -// -// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) -// -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// - RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// -// - By default, a resource owner—in this case, a bucket owner, which is -// the Amazon Web Services account that created the bucket—can perform -// any of the operations. A resource owner can also grant others permission -// to perform the operation. For more information, see the following topics -// in the Amazon S3 User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycle for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle -// -// Deprecated: PutBucketLifecycle has been deprecated -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - return out, req.Send() -} - -// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLifecycle for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: PutBucketLifecycleWithContext has been deprecated -func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" - -// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. -// req, resp := client.PutBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketLifecycleConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleConfigurationInput{} - } - - output = &PutBucketLifecycleConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. -// -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. Keep in mind that this will overwrite an existing -// lifecycle configuration, so if you want to retain any configuration details, -// they must be included in the new lifecycle configuration. For information -// about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version -// of the API supported filtering based only on an object key name prefix, which -// is supported for backward compatibility. For the related API description, -// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). -// -// # Rules -// -// You specify the lifecycle configuration in your request body. The lifecycle -// configuration is specified as XML consisting of one or more rules. An Amazon -// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not -// adjustable. Each rule consists of the following: -// -// - Filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination -// of both. -// -// - Status whether the rule is in effect. -// -// - One or more lifecycle transition and expiration actions that you want -// Amazon S3 to perform on the objects identified by the filter. If the state -// of your bucket is versioning-enabled or versioning-suspended, you can -// have many versions of the same object (one current version and zero or -// more noncurrent versions). Amazon S3 provides predefined actions that -// you can specify for current and noncurrent object versions. -// -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). -// -// # Permissions -// -// By default, all Amazon S3 resources are private, including buckets, objects, -// and related subresources (for example, lifecycle configuration and website -// configuration). Only the resource owner (that is, the Amazon Web Services -// account that created it) can access the resource. The resource owner can -// optionally grant access permissions to others by writing an access policy. -// For this operation, a user must get the s3:PutLifecycleConfiguration permission. -// -// You can also explicitly deny permissions. Explicit deny also supersedes any -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: -// -// - s3:DeleteObject -// -// - s3:DeleteObjectVersion -// -// - s3:PutLifecycleConfiguration -// -// For more information about permissions, see Managing Access Permissions to -// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// The following are related to PutBucketLifecycleConfiguration: -// -// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycleConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLifecycleConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketLogging = "PutBucketLogging" - -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketLogging for more information on using the PutBucketLogging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { - op := &request.Operation{ - Name: opPutBucketLogging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &PutBucketLoggingInput{} - } - - output = &PutBucketLoggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketLogging API operation for Amazon Simple Storage Service. -// -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. All logs are saved to buckets -// in the same Amazon Web Services Region as the source bucket. To set the logging -// status of a bucket, you must be the bucket owner. -// -// The bucket owner is automatically granted FULL_CONTROL to all logs. You use -// the Grantee request element to grant access to other people. The Permissions -// request element specifies the kind of access the grantee has to the logs. -// -// If the target bucket for log delivery uses the bucket owner enforced setting -// for S3 Object Ownership, you can't use the Grantee request element to grant -// access to others. Permissions can only be granted using policies. For more -// information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. -// -// # Grantee Values -// -// You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: -// -// - By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request. -// -// - By Email address: <>Grantees@email.com<> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. -// -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// To enable logging, you use LoggingEnabled and its children request elements. -// To disable logging, you use an empty BucketLoggingStatus request element: -// -// -// -// For more information about server access logging, see Server Access Logging -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in -// the Amazon S3 User Guide. -// -// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). -// For more information about returning the logging status of a bucket, see -// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). -// -// The following operations are related to PutBucketLogging: -// -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLogging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - return out, req.Send() -} - -// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketLogging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" - -// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketMetricsConfigurationRequest method. -// req, resp := client.PutBucketMetricsConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketMetricsConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?metrics", - } - - if input == nil { - input = &PutBucketMetricsConfigurationInput{} - } - - output = &PutBucketMetricsConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. -// -// Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. You can have up to 1,000 metrics configurations per bucket. -// If you're updating an existing metrics configuration, note that this is a -// full replacement of the existing metrics configuration. If you don't include -// the elements you want to keep, they are erased. -// -// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration -// action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// -// The following operations are related to PutBucketMetricsConfiguration: -// -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// -// GetBucketLifecycle has the following special error: -// -// - Error code: TooManyConfigurations Description: You are attempting to -// create a new configuration but have already reached the 1,000-configuration -// limit. HTTP Status Code: HTTP 400 Bad Request -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketMetricsConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration -func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketMetricsConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { - req, out := c.PutBucketMetricsConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketNotification = "PutBucketNotification" - -// PutBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotification operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketNotification for more information on using the PutBucketNotification -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketNotificationRequest method. -// req, resp := client.PutBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -// -// Deprecated: PutBucketNotification has been deprecated -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketNotification, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationInput{} - } - - output = &PutBucketNotificationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketNotification API operation for Amazon Simple Storage Service. -// -// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) -// operation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotification for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification -// -// Deprecated: PutBucketNotification has been deprecated -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - return out, req.Send() -} - -// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketNotification for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -// -// Deprecated: PutBucketNotificationWithContext has been deprecated -func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" - -// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketNotificationConfigurationRequest method. -// req, resp := client.PutBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketNotificationConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationConfigurationInput{} - } - - output = &PutBucketNotificationConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. -// -// Enables notifications of specified events for a bucket. For more information -// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). -// -// Using this API, you can replace an existing notification configuration. The -// configuration is an XML file that defines the event types that you want Amazon -// S3 to publish and the destination where you want Amazon S3 to publish an -// event notification when it detects an event of the specified type. -// -// By default, your bucket has no event notifications configured. That is, the -// notification configuration will be an empty NotificationConfiguration. -// -// -// -// -// -// This action replaces the existing notification configuration with the configuration -// you include in the request body. -// -// After Amazon S3 receives this request, it first verifies that any Amazon -// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon -// SQS) destination exists, and that the bucket owner has permission to publish -// to it by sending a test notification. In the case of Lambda destinations, -// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission -// to invoke the function from the Amazon S3 bucket. For more information, see -// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). -// -// You can disable notifications by adding the empty NotificationConfiguration -// element. -// -// For more information about the number of event notification configurations -// that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) -// in Amazon Web Services General Reference. -// -// By default, only the bucket owner can configure notifications on a bucket. -// However, bucket owners can use a bucket policy to grant permission to other -// users to set this configuration with s3:PutBucketNotification permission. -// -// The PUT notification is an atomic operation. For example, suppose your notification -// configuration includes SNS topic, SQS queue, and Lambda function configurations. -// When you send a PUT request with this configuration, Amazon S3 sends test -// messages to your SNS topic. If the message fails, the entire PUT action will -// fail, and Amazon S3 will not add the configuration to your bucket. -// -// # Responses -// -// If the configuration in the request body includes only one TopicConfiguration -// specifying only the s3:ReducedRedundancyLostObject event type, the response -// will also include the x-amz-sns-test-message-id header containing the message -// ID of the test notification sent to the topic. -// -// The following action is related to PutBucketNotificationConfiguration: -// -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketNotificationConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - return out, req.Send() -} - -// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketNotificationConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketOwnershipControls = "PutBucketOwnershipControls" - -// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketOwnershipControls operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketOwnershipControlsRequest method. -// req, resp := client.PutBucketOwnershipControlsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls -func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { - op := &request.Operation{ - Name: opPutBucketOwnershipControls, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?ownershipControls", - } - - if input == nil { - input = &PutBucketOwnershipControlsInput{} - } - - output = &PutBucketOwnershipControlsOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. -// -// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:PutBucketOwnershipControls permission. For -// more information about Amazon S3 permissions, see Specifying permissions -// in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). -// -// For information about Amazon S3 Object Ownership, see Using object ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). -// -// The following operations are related to PutBucketOwnershipControls: -// -// - GetBucketOwnershipControls -// -// - DeleteBucketOwnershipControls -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketOwnershipControls for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls -func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { - req, out := c.PutBucketOwnershipControlsRequest(input) - return out, req.Send() -} - -// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketOwnershipControls for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { - req, out := c.PutBucketOwnershipControlsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketPolicy = "PutBucketPolicy" - -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketPolicy for more information on using the PutBucketPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { - op := &request.Operation{ - Name: opPutBucketPolicy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &PutBucketPolicyInput{} - } - - output = &PutBucketPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketPolicy API operation for Amazon Simple Storage Service. -// -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using -// an identity other than the root user of the Amazon Web Services account that -// owns the bucket, the calling identity must have the PutBucketPolicy permissions -// on the specified bucket and belong to the bucket owner's account in order -// to use this operation. -// -// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a -// 405 Method Not Allowed error. -// -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. -// -// For more information, see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). -// -// The following operations are related to PutBucketPolicy: -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - return out, req.Send() -} - -// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketReplication = "PutBucketReplication" - -// PutBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketReplication operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketReplication for more information on using the PutBucketReplication -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketReplicationRequest method. -// req, resp := client.PutBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { - op := &request.Operation{ - Name: opPutBucketReplication, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &PutBucketReplicationInput{} - } - - output = &PutBucketReplicationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketReplication API operation for Amazon Simple Storage Service. -// -// Creates a replication configuration or replaces an existing one. For more -// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. -// -// Specify the replication configuration in the request body. In the replication -// configuration, you provide the name of the destination bucket or buckets -// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 -// can assume to replicate objects on your behalf, and other relevant information. -// -// A replication configuration must include at least one rule, and can contain -// a maximum of 1,000. Each rule identifies a subset of objects to replicate -// by filtering the objects in the source bucket. To choose additional subsets -// of objects to replicate, add a rule for each subset. -// -// To specify a subset of the objects in the source bucket to apply a replication -// rule to, add the Filter element as a child of the Rule element. You can filter -// objects based on an object key prefix, one or more object tags, or both. -// When you add the Filter element in the configuration, you must also add the -// following elements: DeleteMarkerReplication, Status, and Priority. -// -// If you are using an earlier version of the replication configuration, Amazon -// S3 handles replication of delete markers differently. For more information, -// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). -// -// For information about enabling versioning on a bucket, see Using Versioning -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). -// -// # Handling Replication of Encrypted Objects -// -// By default, Amazon S3 doesn't replicate objects that are stored at rest using -// server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted -// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, -// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about -// replication configuration, see Replicating Objects Created with SSE Using -// KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). -// -// For information on PutBucketReplication errors, see List of replication-related -// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) -// -// # Permissions -// -// To create a PutBucketReplication request, you must have s3:PutReplicationConfiguration -// permissions for the bucket. -// -// By default, a resource owner, in this case the Amazon Web Services account -// that created the bucket, can perform this operation. The resource owner can -// also grant others permissions to perform the operation. For more information -// about permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// To perform this operation, the user or role performing the action must have -// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. -// -// The following operations are related to PutBucketReplication: -// -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketReplication for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - return out, req.Send() -} - -// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketReplication for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketRequestPayment = "PutBucketRequestPayment" - -// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketRequestPayment operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketRequestPaymentRequest method. -// req, resp := client.PutBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opPutBucketRequestPayment, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &PutBucketRequestPaymentInput{} - } - - output = &PutBucketRequestPaymentOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketRequestPayment API operation for Amazon Simple Storage Service. -// -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. For more information, see Requester Pays -// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). -// -// The following operations are related to PutBucketRequestPayment: -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketRequestPayment for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - return out, req.Send() -} - -// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketRequestPayment for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketTagging = "PutBucketTagging" - -// PutBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketTagging for more information on using the PutBucketTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketTaggingRequest method. -// req, resp := client.PutBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { - op := &request.Operation{ - Name: opPutBucketTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &PutBucketTaggingInput{} - } - - output = &PutBucketTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketTagging API operation for Amazon Simple Storage Service. -// -// Sets the tags for a bucket. -// -// Use tags to organize your Amazon Web Services bill to reflect your own cost -// structure. To do this, sign up to get your Amazon Web Services account bill -// with tag key values included. Then, to see the cost of combined resources, -// organize your billing information according to resources with the same tag -// key values. For example, you can tag several resources with a specific application -// name, and then organize your billing information to see the total cost of -// that application across several services. For more information, see Cost -// Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). -// -// When this operation sets the tags for a bucket, it will overwrite any current -// tags the bucket already has. You cannot use this operation to add tags to -// an existing list of tags. -// -// To use this operation, you must have permissions to perform the s3:PutBucketTagging -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// -// PutBucketTagging has the following special errors: -// -// - Error code: InvalidTagError Description: The tag provided was not a -// valid tag. This error can occur if the tag did not pass input validation. -// For information about tag restrictions, see User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). -// -// - Error code: MalformedXMLError Description: The XML provided does not -// match the schema. -// -// - Error code: OperationAbortedError Description: A conflicting conditional -// action is currently in progress against this resource. Please try again. -// -// - Error code: InternalError Description: The service was unable to apply -// the provided tag to the bucket. -// -// The following operations are related to PutBucketTagging: -// -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - return out, req.Send() -} - -// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketVersioning = "PutBucketVersioning" - -// PutBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketVersioning operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketVersioning for more information on using the PutBucketVersioning -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketVersioningRequest method. -// req, resp := client.PutBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { - op := &request.Operation{ - Name: opPutBucketVersioning, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &PutBucketVersioningInput{} - } - - output = &PutBucketVersioningOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketVersioning API operation for Amazon Simple Storage Service. -// -// Sets the versioning state of an existing bucket. -// -// You can set the versioning state with one of the following values: -// -// Enabled—Enables versioning for the objects in the bucket. All objects added -// to the bucket receive a unique version ID. -// -// Suspended—Disables versioning for the objects in the bucket. All objects -// added to the bucket receive the version ID null. -// -// If the versioning state has never been set on a bucket, it has no versioning -// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// request does not return a versioning state value. -// -// In order to enable MFA Delete, you must be the bucket owner. If you are the -// bucket owner and want to enable MFA Delete in the bucket versioning configuration, -// you must include the x-amz-mfa request header and the Status and the MfaDelete -// request elements in a request to set the versioning state of the bucket. -// -// If you have an object expiration lifecycle policy in your non-versioned bucket -// and you want to maintain the same permanent delete behavior when you enable -// versioning, you must add a noncurrent expiration policy. The noncurrent expiration -// lifecycle policy will manage the deletes of the noncurrent object versions -// in the version-enabled bucket. (A version-enabled bucket maintains one current -// and zero or more noncurrent object versions.) For more information, see Lifecycle -// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). -// -// Related Resources -// -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketVersioning for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - return out, req.Send() -} - -// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketVersioning for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutBucketWebsite = "PutBucketWebsite" - -// PutBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutBucketWebsite for more information on using the PutBucketWebsite -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutBucketWebsiteRequest method. -// req, resp := client.PutBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { - op := &request.Operation{ - Name: opPutBucketWebsite, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &PutBucketWebsiteInput{} - } - - output = &PutBucketWebsiteOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutBucketWebsite API operation for Amazon Simple Storage Service. -// -// Sets the configuration of the website that is specified in the website subresource. -// To configure a bucket as a website, you can add this subresource on the bucket -// with website configuration information such as the file name of the index -// document and any redirect rules. For more information, see Hosting Websites -// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). -// -// This PUT action requires the S3:PutBucketWebsite permission. By default, -// only the bucket owner can configure the website attached to a bucket; however, -// bucket owners can allow other users to set the website configuration by writing -// a bucket policy that grants them the S3:PutBucketWebsite permission. -// -// To redirect all website requests sent to the bucket's website endpoint, you -// add a website configuration with the following elements. Because all requests -// are sent to another website, you don't need to provide index document name -// for the bucket. -// -// - WebsiteConfiguration -// -// - RedirectAllRequestsTo -// -// - HostName -// -// - Protocol -// -// If you want granular control over redirects, you can use the following elements -// to add routing rules that describe conditions for redirecting requests and -// information about the redirect destination. In this case, the website configuration -// must provide an index document for the bucket, because some requests might -// not be redirected. -// -// - WebsiteConfiguration -// -// - IndexDocument -// -// - Suffix -// -// - ErrorDocument -// -// - Key -// -// - RoutingRules -// -// - RoutingRule -// -// - Condition -// -// - HttpErrorCodeReturnedEquals -// -// - KeyPrefixEquals -// -// - Redirect -// -// - Protocol -// -// - HostName -// -// - ReplaceKeyPrefixWith -// -// - ReplaceKeyWith -// -// - HttpRedirectCode -// -// Amazon S3 has a limitation of 50 routing rules per website configuration. -// If you require more than 50 routing rules, you can use object redirect. For -// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon S3 User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketWebsite for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - return out, req.Send() -} - -// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of -// the ability to pass a context and additional request options. -// -// See PutBucketWebsite for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObject = "PutObject" - -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObject for more information on using the PutObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { - op := &request.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &PutObjectInput{} - } - - output = &PutObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutObject API operation for Amazon Simple Storage Service. -// -// Adds an object to a bucket. You must have WRITE permissions on a bucket to -// add an object to it. -// -// Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. -// -// Amazon S3 is a distributed system. If it receives multiple write requests -// for the same object simultaneously, it overwrites all but the last object -// written. Amazon S3 does not provide object locking; if you need this, make -// sure to build it into your application layer or use versioning instead. -// -// To ensure that data is not corrupted traversing the network, use the Content-MD5 -// header. When you use this header, Amazon S3 checks the object against the -// provided MD5 value and, if they do not match, returns an error. Additionally, -// you can calculate the MD5 while putting an object to Amazon S3 and compare -// the returned ETag to the calculated MD5 value. -// -// - To successfully complete the PutObject request, you must have the s3:PutObject -// in your IAM permissions. -// -// - To successfully change the objects acl of your PutObject request, you -// must have the s3:PutObjectAcl in your IAM permissions. -// -// - The Content-MD5 header is required for any request to upload an object -// with a retention period configured using Amazon S3 Object Lock. For more -// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) -// in the Amazon S3 User Guide. -// -// # Server-side Encryption -// -// You can optionally request server-side encryption. With server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts the data when you access it. You have the option to provide -// your own encryption key or use Amazon Web Services managed encryption keys -// (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -// -// If you request server-side encryption using Amazon Web Services Key Management -// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For -// more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. -// -// # Access Control List (ACL)-Specific Request Headers -// -// You can use headers to grant ACL- based permissions. By default, all objects -// are private. Only the owner has full access control. When adding a new object, -// you can grant permissions to individual Amazon Web Services accounts or to -// predefined groups defined by Amazon S3. These permissions are then added -// to the ACL on the object. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). -// -// If the bucket that you're uploading objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify -// an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. PUT requests that contain other ACLs (for -// example, custom grants to certain Amazon Web Services accounts) fail and -// return a 400 error with the error code AccessControlListNotSupported. -// -// For more information, see Controlling ownership of objects and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for Object Ownership, -// all objects written to the bucket by any account will be owned by the bucket -// owner. -// -// # Storage Class Options -// -// By default, Amazon S3 uses the STANDARD Storage Class to store newly created -// objects. The STANDARD storage class provides high durability and high availability. -// Depending on performance needs, you can specify a different Storage Class. -// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, -// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. -// -// # Versioning -// -// If you enable versioning for a bucket, Amazon S3 automatically generates -// a unique version ID for the object being stored. Amazon S3 returns this ID -// in the response. When you enable versioning for a bucket, if Amazon S3 receives -// multiple write requests for the same object simultaneously, it stores all -// of the objects. -// -// For more information about versioning, see Adding Objects to Versioning Enabled -// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). -// For information about returning the versioning state of a bucket, see GetBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). -// -// Related Resources -// -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObject for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - return out, req.Send() -} - -// PutObjectWithContext is the same as PutObject with the addition of -// the ability to pass a context and additional request options. -// -// See PutObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectAcl = "PutObjectAcl" - -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectAcl for more information on using the PutObjectAcl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { - op := &request.Operation{ - Name: opPutObjectAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &PutObjectAclInput{} - } - - output = &PutObjectAclOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutObjectAcl API operation for Amazon Simple Storage Service. -// -// Uses the acl subresource to set the access control list (ACL) permissions -// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission -// to set the ACL of an object. For more information, see What permissions can -// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. -// -// This action is not supported by Amazon S3 on Outposts. -// -// Depending on your application needs, you can choose to set the ACL on an -// object using either the request body or the headers. For example, if you -// have an existing application that updates a bucket ACL using the request -// body, you can continue to use that approach. For more information, see Access -// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// ACLs are disabled and no longer affect permissions. You must use policies -// to grant access to your bucket and the objects in it. Requests to set ACLs -// or update ACLs fail and return the AccessControlListNotSupported error code. -// Requests to read ACLs are still supported. For more information, see Controlling -// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// # Access Permissions -// -// You can set access permissions using one of the following methods: -// -// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. Specify the canned ACL name -// as the value of x-amz-acl. If you use this header, you cannot use other -// access control-specific headers in your request. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using -// these headers, you specify explicit access permissions and grantees (Amazon -// Web Services accounts or Amazon S3 groups) who will receive the permission. -// If you use these ACL-specific headers, you cannot use x-amz-acl header -// to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: id – if the value specified is the canonical user ID -// of an Amazon Web Services account uri – if you are granting permissions -// to a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants list objects permission to the two Amazon -// Web Services accounts identified by their email addresses. x-amz-grant-read: -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// # Grantee Values -// -// You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: -// -// - By the person's ID: <>ID<><>GranteesEmail<> -// DisplayName is optional and ignored in the request. -// -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// - By Email address: <>Grantees@email.com<>lt;/Grantee> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. Using email addresses -// to specify a grantee is only supported in the following Amazon Web Services -// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) -// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe -// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported -// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. -// -// # Versioning -// -// The ACL of an object is set at the object version level. By default, PUT -// sets the ACL of the current version of an object. To set the ACL of a different -// version, use the versionId subresource. -// -// Related Resources -// -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectAcl for usage and error information. -// -// Returned Error Codes: -// - ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl -func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - return out, req.Send() -} - -// PutObjectAclWithContext is the same as PutObjectAcl with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectAcl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectLegalHold = "PutObjectLegalHold" - -// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectLegalHold operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectLegalHold for more information on using the PutObjectLegalHold -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutObjectLegalHoldRequest method. -// req, resp := client.PutObjectLegalHoldRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold -func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { - op := &request.Operation{ - Name: opPutObjectLegalHold, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?legal-hold", - } - - if input == nil { - input = &PutObjectLegalHoldInput{} - } - - output = &PutObjectLegalHoldOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutObjectLegalHold API operation for Amazon Simple Storage Service. -// -// Applies a legal hold configuration to the specified object. For more information, -// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// This action is not supported by Amazon S3 on Outposts. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectLegalHold for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold -func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { - req, out := c.PutObjectLegalHoldRequest(input) - return out, req.Send() -} - -// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectLegalHold for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { - req, out := c.PutObjectLegalHoldRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectLockConfiguration = "PutObjectLockConfiguration" - -// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectLockConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutObjectLockConfigurationRequest method. -// req, resp := client.PutObjectLockConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration -func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { - op := &request.Operation{ - Name: opPutObjectLockConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?object-lock", - } - - if input == nil { - input = &PutObjectLockConfigurationInput{} - } - - output = &PutObjectLockConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. -// -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new -// object placed in the specified bucket. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// - The DefaultRetention settings require both a mode and a period. -// -// - The DefaultRetention period can be either Days or Years but you must -// select one. You cannot specify Days and Years at the same time. -// -// - You can only enable Object Lock for new buckets. If you want to turn -// on Object Lock for an existing bucket, contact Amazon Web Services Support. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectLockConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration -func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { - req, out := c.PutObjectLockConfigurationRequest(input) - return out, req.Send() -} - -// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectLockConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { - req, out := c.PutObjectLockConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectRetention = "PutObjectRetention" - -// PutObjectRetentionRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectRetention operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectRetention for more information on using the PutObjectRetention -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutObjectRetentionRequest method. -// req, resp := client.PutObjectRetentionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention -func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { - op := &request.Operation{ - Name: opPutObjectRetention, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?retention", - } - - if input == nil { - input = &PutObjectRetentionInput{} - } - - output = &PutObjectRetentionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutObjectRetention API operation for Amazon Simple Storage Service. -// -// Places an Object Retention configuration on an object. For more information, -// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// Users or accounts require the s3:PutObjectRetention permission in order to -// place an Object Retention configuration on objects. Bypassing a Governance -// Retention configuration requires the s3:BypassGovernanceRetention permission. -// -// This action is not supported by Amazon S3 on Outposts. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectRetention for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention -func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { - req, out := c.PutObjectRetentionRequest(input) - return out, req.Send() -} - -// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectRetention for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { - req, out := c.PutObjectRetentionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutObjectTagging = "PutObjectTagging" - -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutObjectTagging for more information on using the PutObjectTagging -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { - op := &request.Operation{ - Name: opPutObjectTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", - } - - if input == nil { - input = &PutObjectTaggingInput{} - } - - output = &PutObjectTaggingOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutObjectTagging API operation for Amazon Simple Storage Service. -// -// Sets the supplied tag-set to an object that already exists in a bucket. -// -// A tag is a key-value pair. You can associate tags with an object by sending -// a PUT request against the tagging subresource that is associated with the -// object. You can retrieve tags by sending a GET request. For more information, -// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). -// -// For tagging-related restrictions related to characters and encodings, see -// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). -// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. -// -// To use this operation, you must have permission to perform the s3:PutObjectTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. -// -// To put tags of any other version, use the versionId query parameter. You -// also need permission for the s3:PutObjectVersionTagging action. -// -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// Special Errors -// -// - Code: InvalidTagError Cause: The tag provided was not a valid tag. This -// error can occur if the tag did not pass input validation. For more information, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// - Code: MalformedXMLError Cause: The XML provided does not match the schema. -// -// - Code: OperationAbortedError Cause: A conflicting conditional action -// is currently in progress against this resource. Please try again. -// -// - Code: InternalError Cause: The service was unable to apply the provided -// tag to the object. -// -// Related Resources -// -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - return out, req.Send() -} - -// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of -// the ability to pass a context and additional request options. -// -// See PutObjectTagging for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutPublicAccessBlock = "PutPublicAccessBlock" - -// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the PutPublicAccessBlock operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutPublicAccessBlockRequest method. -// req, resp := client.PutPublicAccessBlockRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { - op := &request.Operation{ - Name: opPutPublicAccessBlock, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?publicAccessBlock", - } - - if input == nil { - input = &PutPublicAccessBlockInput{} - } - - output = &PutPublicAccessBlockOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "contentMd5Handler", - Fn: checksum.AddBodyContentMD5Handler, - }) - return -} - -// PutPublicAccessBlock API operation for Amazon Simple Storage Service. -// -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket -// or an object, it checks the PublicAccessBlock configuration for both the -// bucket (or the bucket that contains the object) and the bucket owner's account. -// If the PublicAccessBlock configurations are different between the bucket -// and the account, Amazon S3 uses the most restrictive combination of the bucket-level -// and account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// -// Related Resources -// -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock -func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) - return out, req.Send() -} - -// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of -// the ability to pass a context and additional request options. -// -// See PutPublicAccessBlock for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRestoreObject = "RestoreObject" - -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RestoreObject for more information on using the RestoreObject -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { - op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", - } - - if input == nil { - input = &RestoreObjectInput{} - } - - output = &RestoreObjectOutput{} - req = c.newRequest(op, input, output) - return -} - -// RestoreObject API operation for Amazon Simple Storage Service. -// -// # Restores an archived copy of an object back into Amazon S3 -// -// This action is not supported by Amazon S3 on Outposts. -// -// This action performs the following types of requests: -// -// - select - Perform a select query on an archived object -// -// - restore an archive - Restore an archived object -// -// To use this operation, you must have permissions to perform the s3:RestoreObject -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. -// -// # Querying Archives with Select Requests -// -// You use a select type of request to perform SQL queries on archived objects. -// The archived objects that are being queried by the select request must be -// formatted as uncompressed comma-separated values (CSV) files. You can run -// queries and custom analytics on your archived data without having to restore -// your data to a hotter Amazon S3 tier. For an overview about select requests, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. -// -// When making a select request, do the following: -// -// - Define an output location for the select query's output. This must be -// an Amazon S3 bucket in the same Amazon Web Services Region as the bucket -// that contains the archive object that is being queried. The Amazon Web -// Services account that initiates the job must have permissions to write -// to the S3 bucket. You can specify the storage class and encryption for -// the output objects stored in the bucket. For more information about output, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. For more information about the S3 structure -// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide -// -// - Define the SQL expression for the SELECT type of restoration for your -// query in the request body's SelectParameters structure. You can use expressions -// like the following examples. The following expression returns all records -// from the specified object. SELECT * FROM Object Assuming that you are -// not using any headers for data stored in the object, you can specify columns -// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > -// 100 If you have headers and you set the fileHeaderInfo in the CSV structure -// in the request body to USE, you can specify headers in the query. (If -// you set the fileHeaderInfo field to IGNORE, the first row is skipped for -// the query.) You cannot mix ordinal positions with header column names. -// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s -// -// For more information about using SQL with S3 Glacier Select restore, see -// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. -// -// When making a select request, you can also do the following: -// -// - To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// -// - Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded -// query results. -// -// The following are additional important facts about the select feature: -// -// - The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// policy. -// -// - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. -// -// - Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409. -// -// # Restoring objects -// -// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage -// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers are not accessible in real time. For objects in Archive Access -// or Deep Archive Access tiers you must first initiate a restore request, and -// then wait until the object is moved into the Frequent Access tier. For objects -// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate -// a restore request, and then wait until a temporary copy of the object is -// available. To access an archived object, you must restore the object for -// the duration (number of days) that you specify. -// -// To restore a specific object version, you can provide a version ID. If you -// don't provide a version ID, Amazon S3 restores the current version. -// -// When restoring an archived object (or using a select request), you can specify -// one of the following data access tier options in the Tier element of the -// request body: -// -// - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive -// tier when occasional urgent requests for a subset of archives are required. -// For all but the largest archived objects (250 MB+), data accessed using -// Expedited retrievals is typically made available within 1–5 minutes. -// Provisioned capacity ensures that retrieval capacity for Expedited retrievals -// is available when you need it. Expedited retrievals and provisioned capacity -// are not available for objects stored in the S3 Glacier Deep Archive storage -// class or S3 Intelligent-Tiering Deep Archive tier. -// -// - Standard - Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for retrieval -// requests that do not specify the retrieval option. Standard retrievals -// typically finish within 3–5 hours for objects stored in the S3 Glacier -// storage class or S3 Intelligent-Tiering Archive tier. They typically finish -// within 12 hours for objects stored in the S3 Glacier Deep Archive storage -// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals -// are free for objects stored in S3 Intelligent-Tiering. -// -// - Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, -// enabling you to retrieve large amounts, even petabytes, of data inexpensively. -// Bulk retrievals typically finish within 5–12 hours for objects stored -// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. -// They typically finish within 48 hours for objects stored in the S3 Glacier -// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. -// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. -// -// For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. -// -// You can use Amazon S3 restore speed upgrade to change the restore speed to -// a faster speed while it is in progress. For more information, see Upgrading -// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon S3 User Guide. -// -// To get the status of object restoration, you can send a HEAD request. Operations -// return the x-amz-restore header, which provides information about the restoration -// status, in the response. You can use Amazon S3 event notifications to notify -// you when a restore is initiated or completed. For more information, see Configuring -// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon S3 User Guide. -// -// After restoring an archived object, you can update the restoration period -// by reissuing the request with a new period. Amazon S3 updates the restoration -// period relative to the current time and charges only for the request-there -// are no data transfer charges. You cannot update the restoration period when -// Amazon S3 is actively processing your current restore request for the object. -// -// If your bucket has a lifecycle configuration with a rule that includes an -// expiration action, the object expiration overrides the life span that you -// specify in a restore request. For example, if you restore an object copy -// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes -// the object in 3 days. For more information about lifecycle configuration, -// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon S3 User Guide. -// -// # Responses -// -// A successful action returns either the 200 OK or 202 Accepted status code. -// -// - If the object is not previously restored, then Amazon S3 returns 202 -// Accepted in the response. -// -// - If the object is previously restored, Amazon S3 returns 200 OK in the -// response. -// -// Special Errors -// -// - Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. -// (This error does not apply to SELECT type requests.) HTTP Status Code: -// 409 Conflict SOAP Fault Code Prefix: Client -// -// - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals -// are currently not available. Try again later. (Returned if there is insufficient -// capacity to process the Expedited request. This error applies only to -// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP -// Status Code: 503 SOAP Fault Code Prefix: N/A -// -// Related Resources -// -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) -// -// - SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. -// -// Returned Error Codes: -// - ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This action is not allowed against this storage tier. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - return out, req.Send() -} - -// RestoreObjectWithContext is the same as RestoreObject with the addition of -// the ability to pass a context and additional request options. -// -// See RestoreObject for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSelectObjectContent = "SelectObjectContent" - -// SelectObjectContentRequest generates a "aws/request.Request" representing the -// client's request for the SelectObjectContent operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See SelectObjectContent for more information on using the SelectObjectContent -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the SelectObjectContentRequest method. -// req, resp := client.SelectObjectContentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { - op := &request.Operation{ - Name: opSelectObjectContent, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", - } - - if input == nil { - input = &SelectObjectContentInput{} - } - - output = &SelectObjectContentOutput{} - req = c.newRequest(op, input, output) - - es := NewSelectObjectContentEventStream() - req.Handlers.Unmarshal.PushBack(es.setStreamCloser) - output.EventStream = es - - req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) - req.Handlers.Unmarshal.PushBack(es.runOutputStream) - req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) - return -} - -// SelectObjectContent API operation for Amazon Simple Storage Service. -// -// This action filters the contents of an Amazon S3 object based on a simple -// structured query language (SQL) statement. In the request, along with the -// SQL expression, you must also specify a data serialization format (JSON, -// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse -// object data into records, and returns only records that match the specified -// SQL expression. You must also specify the data serialization format for the -// response. -// -// This action is not supported by Amazon S3 on Outposts. -// -// For more information about Amazon S3 Select, see Selecting Content from Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. -// -// For more information about using SQL with Amazon S3 Select, see SQL Reference -// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. -// -// # Permissions -// -// You must have s3:GetObject permission for this operation. Amazon S3 Select -// does not support anonymous access. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. -// -// # Object Data Formats -// -// You can use Amazon S3 Select to query objects that have the following format -// properties: -// -// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. -// -// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. -// -// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. -// GZIP and BZIP2 are the only compression formats that Amazon S3 Select -// supports for CSV and JSON files. Amazon S3 Select supports columnar compression -// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object -// compression for Parquet objects. -// -// - Server-side encryption - Amazon S3 Select supports querying objects -// that are protected with server-side encryption. For objects that are encrypted -// with customer-provided encryption keys (SSE-C), you must use HTTPS, and -// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. For objects that are encrypted with Amazon -// S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), -// server-side encryption is handled transparently, so you don't need to -// specify anything. For more information about server-side encryption, including -// SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide. -// -// # Working with the Response Body -// -// Given the response size is unknown, Amazon S3 Select streams the response -// as a series of messages and includes a Transfer-Encoding header with chunked -// as its value in the response. For more information, see Appendix: SelectObjectContent -// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). -// -// # GetObject Support -// -// The SelectObjectContent action does not support the following GetObject functionality. -// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). -// -// - Range: Although you can specify a scan range for an Amazon S3 Select -// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) -// in the request parameters), you cannot specify the range of bytes of an -// object to return. -// -// - GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot -// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. -// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon S3 User Guide. -// -// # Special Errors -// -// For a list of special errors for this operation, see List of SELECT Object -// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) -// -// Related Resources -// -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation SelectObjectContent for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent -func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { - req, out := c.SelectObjectContentRequest(input) - return out, req.Send() -} - -// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of -// the ability to pass a context and additional request options. -// -// See SelectObjectContent for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { - req, out := c.SelectObjectContentRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -var _ awserr.Error - -// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. -// -// For testing and mocking the event stream this type should be initialized via -// the NewSelectObjectContentEventStream constructor function. Using the functional options -// to pass in nested mock behavior. -type SelectObjectContentEventStream struct { - - // Reader is the EventStream reader for the SelectObjectContentEventStream - // events. This value is automatically set by the SDK when the API call is made - // Use this member when unit testing your code with the SDK to mock out the - // EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader - - outputReader io.ReadCloser - - // StreamCloser is the io.Closer for the EventStream connection. For HTTP - // EventStream this is the response Body. The stream will be closed when - // the Close method of the EventStream is called. - StreamCloser io.Closer - - done chan struct{} - closeOnce sync.Once - err *eventstreamapi.OnceError -} - -// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. -// This function should only be used for testing and mocking the SelectObjectContentEventStream -// stream within your application. -// -// The Reader member must be set before reading events from the stream. -// -// The StreamCloser member should be set to the underlying io.Closer, -// (e.g. http.Response.Body), that will be closed when the stream Close method -// is called. -// -// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream){ -// es.Reader = myMockStreamReader -// es.StreamCloser = myMockStreamCloser -// }) -func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { - es := &SelectObjectContentEventStream{ - done: make(chan struct{}), - err: eventstreamapi.NewOnceError(), - } - - for _, fn := range opts { - fn(es) - } - - return es -} - -func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { - es.StreamCloser = r.HTTPResponse.Body -} - -func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { - if es.done == nil { - return - } - go es.waitStreamPartClose() - -} - -func (es *SelectObjectContentEventStream) waitStreamPartClose() { - var outputErrCh <-chan struct{} - if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { - outputErrCh = v.ErrorSet() - } - var outputClosedCh <-chan struct{} - if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { - outputClosedCh = v.Closed() - } - - select { - case <-es.done: - case <-outputErrCh: - es.err.SetError(es.Reader.Err()) - es.Close() - case <-outputClosedCh: - if err := es.Reader.Err(); err != nil { - es.err.SetError(es.Reader.Err()) - } - es.Close() - } -} - -// Events returns a channel to read events from. -// -// These events are: -// -// - ContinuationEvent -// - EndEvent -// - ProgressEvent -// - RecordsEvent -// - StatsEvent -// - SelectObjectContentEventStreamUnknownEvent -func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return es.Reader.Events() -} - -func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { - var opts []func(*eventstream.Decoder) - if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { - opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) - } - - unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ - metadata: protocol.ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - }, - }.UnmarshalerForEventName - - decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) - eventReader := eventstreamapi.NewEventReader(decoder, - protocol.HandlerPayloadUnmarshal{ - Unmarshalers: r.Handlers.UnmarshalStream, - }, - unmarshalerForEvent, - ) - - es.outputReader = r.HTTPResponse.Body - es.Reader = newReadSelectObjectContentEventStream(eventReader) -} - -// Close closes the stream. This will also cause the stream to be closed. -// Close must be called when done using the stream API. Not calling Close -// may result in resource leaks. -// -// You can use the closing of the Reader's Events channel to terminate your -// application's read from the API's stream. -func (es *SelectObjectContentEventStream) Close() (err error) { - es.closeOnce.Do(es.safeClose) - return es.Err() -} - -func (es *SelectObjectContentEventStream) safeClose() { - if es.done != nil { - close(es.done) - } - - es.Reader.Close() - if es.outputReader != nil { - es.outputReader.Close() - } - - es.StreamCloser.Close() -} - -// Err returns any error that occurred while reading or writing EventStream -// Events from the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.err.Err(); err != nil { - return err - } - if err := es.Reader.Err(); err != nil { - return err - } - - return nil -} - -const opUploadPart = "UploadPart" - -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UploadPart for more information on using the UploadPart -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { - op := &request.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartInput{} - } - - output = &UploadPartOutput{} - req = c.newRequest(op, input, output) - return -} - -// UploadPart API operation for Amazon Simple Storage Service. -// -// Uploads a part in a multipart upload. -// -// In this operation, you provide part data in your request. However, you have -// an option to specify your existing Amazon S3 object as a data source for -// the part you are uploading. To upload a part from an existing object, you -// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. -// -// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) -// before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier, that you must include in your -// upload part request. -// -// Part numbers can be any number from 1 to 10,000, inclusive. A part number -// uniquely identifies a part and also defines its position within the object -// being created. If you upload a new part using the same part number that was -// used with a previous part, the previously uploaded part is overwritten. -// -// For information about maximum and minimum part sizes and other multipart -// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. -// -// To ensure that data is not corrupted when traversing the network, specify -// the Content-MD5 header in the upload part request. Amazon S3 checks the part -// data against the provided MD5 value. If they do not match, Amazon S3 returns -// an error. -// -// If the upload request is signed with Signature Version 4, then Amazon Web -// Services S3 uses the x-amz-content-sha256 header as a checksum instead of -// Content-MD5. For more information see Authenticating Requests: Using the -// Authorization Header (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -// -// For more information on multipart uploads, go to Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the -// Amazon S3 User Guide . -// -// For information on the permissions required to use the multipart upload API, -// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// -// You can optionally request server-side encryption where Amazon S3 encrypts -// your data as it writes it to disks in its data centers and decrypts it for -// you when you access it. You have the option of providing your own encryption -// key, or you can use the Amazon Web Services managed encryption keys. If you -// choose to provide your own encryption key, the request headers you provide -// in the request must match the headers you used in the request to initiate -// the upload by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). -// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. -// -// Server-side encryption is supported by the S3 Multipart Upload actions. Unless -// you are using a customer-provided encryption key, you don't need to specify -// the encryption parameters in each UploadPart request. Instead, you only need -// to specify the server-side encryption parameters in the initial Initiate -// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). -// -// If you requested server-side encryption using a customer-provided encryption -// key in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following headers. -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// Special Errors -// -// - Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code -// Prefix: Client -// -// Related Resources -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPart for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - return out, req.Send() -} - -// UploadPartWithContext is the same as UploadPart with the addition of -// the ability to pass a context and additional request options. -// -// See UploadPart for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUploadPartCopy = "UploadPartCopy" - -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UploadPartCopy for more information on using the UploadPartCopy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { - op := &request.Operation{ - Name: opUploadPartCopy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartCopyInput{} - } - - output = &UploadPartCopyOutput{} - req = c.newRequest(op, input, output) - return -} - -// UploadPartCopy API operation for Amazon Simple Storage Service. -// -// Uploads a part by copying data from an existing object as data source. You -// specify the data source by adding the request header x-amz-copy-source in -// your request and a byte range by adding the request header x-amz-copy-source-range -// in your request. -// -// For information about maximum and minimum part sizes and other multipart -// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. -// -// Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action -// and provide data in your request. -// -// You must initiate a multipart upload before you can upload any part. In response -// to your initiate request. Amazon S3 returns a unique identifier, the upload -// ID, that you must include in your upload part request. -// -// For more information about using the UploadPartCopy operation, see the following: -// -// - For conceptual information about multipart uploads, see Uploading Objects -// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. -// -// - For information about permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// -// - For information about copying objects using a single atomic action vs. -// a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. -// -// - For information about using server-side encryption with customer-provided -// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). -// -// Note the following additional considerations about the request headers x-amz-copy-source-if-match, -// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and -// x-amz-copy-source-if-modified-since: -// -// - Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request as follows: x-amz-copy-source-if-match -// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since -// condition evaluates to false; Amazon S3 returns 200 OK and copies the -// data. -// -// - Consideration 2 - If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request -// as follows: x-amz-copy-source-if-none-match condition evaluates to false, -// and; x-amz-copy-source-if-modified-since condition evaluates to true; -// Amazon S3 returns 412 Precondition Failed response code. -// -// # Versioning -// -// If your bucket has versioning enabled, you could have multiple versions of -// the same object. By default, x-amz-copy-source identifies the current version -// of the object to copy. If the current version is a delete marker and you -// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 -// error, because the object does not exist. If you specify versionId in the -// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns -// an HTTP 400 error, because you are not allowed to specify a delete marker -// as a version for the x-amz-copy-source. -// -// You can optionally specify a specific version of the source object to copy -// by adding the versionId subresource as shown in the following example: -// -// x-amz-copy-source: /bucket/object?versionId=version id -// -// Special Errors -// -// - Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found -// -// - Code: InvalidRequest Cause: The specified copy source is not supported -// as a byte-range copy source. HTTP Status Code: 400 Bad Request -// -// Related Resources -// -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPartCopy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - return out, req.Send() -} - -// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of -// the ability to pass a context and additional request options. -// -// See UploadPartCopy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opWriteGetObjectResponse = "WriteGetObjectResponse" - -// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the -// client's request for the WriteGetObjectResponse operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the WriteGetObjectResponseRequest method. -// req, resp := client.WriteGetObjectResponseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse -func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) { - op := &request.Operation{ - Name: opWriteGetObjectResponse, - HTTPMethod: "POST", - HTTPPath: "/WriteGetObjectResponse", - } - - if input == nil { - input = &WriteGetObjectResponseInput{} - } - - output = &WriteGetObjectResponseOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Sign.Remove(v4.SignRequestHandler) - handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) - req.Handlers.Sign.PushFrontNamed(handler) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels)) - req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) - return -} - -// WriteGetObjectResponse API operation for Amazon Simple Storage Service. -// -// Passes transformed objects to a GetObject operation when using Object Lambda -// access points. For information about Object Lambda access points, see Transforming -// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) -// in the Amazon S3 User Guide. -// -// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), -// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. -// The GetObject response metadata is supported so that the WriteGetObjectResponse -// caller, typically an Lambda function, can provide the same metadata when -// it internally invokes GetObject. When WriteGetObjectResponse is called by -// a customer-owned Lambda function, the metadata returned to the end user GetObject -// call might differ from what Amazon S3 would normally return. -// -// You can include any number of metadata headers. When including a metadata -// header, it should be prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: -// MyCustomValue. The primary use case for this is to forward GetObject metadata. -// -// Amazon Web Services provides some prebuilt Lambda functions that you can -// use with S3 Object Lambda to detect and redact personally identifiable information -// (PII) and decompress S3 objects. These Lambda functions are available in -// the Amazon Web Services Serverless Application Repository, and can be selected -// through the Amazon Web Services Management Console when you create your Object -// Lambda access point. -// -// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, -// a natural language processing (NLP) service using machine learning to find -// insights and relationships in text. It automatically detects personally identifiable -// information (PII) such as names, addresses, dates, credit card numbers, and -// social security numbers from documents in your Amazon S3 bucket. -// -// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a -// natural language processing (NLP) service using machine learning to find -// insights and relationships in text. It automatically redacts personally identifiable -// information (PII) such as names, addresses, dates, credit card numbers, and -// social security numbers from documents in your Amazon S3 bucket. -// -// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, -// is equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. -// -// For information on how to view and use these functions, see Using Amazon -// Web Services built Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) -// in the Amazon S3 User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation WriteGetObjectResponse for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse -func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) { - req, out := c.WriteGetObjectResponseRequest(input) - return out, req.Send() -} - -// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of -// the ability to pass a context and additional request options. -// -// See WriteGetObjectResponse for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) { - req, out := c.WriteGetObjectResponseRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Specifies the days since the initiation of an incomplete multipart upload -// that Amazon S3 will wait before permanently removing all parts of the upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon S3 User Guide. -type AbortIncompleteMultipartUpload struct { - _ struct{} `type:"structure"` - - // Specifies the number of days after which Amazon S3 aborts an incomplete multipart - // upload. - DaysAfterInitiation *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AbortIncompleteMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AbortIncompleteMultipartUpload) GoString() string { - return s.String() -} - -// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. -func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { - s.DaysAfterInitiation = &v - return s -} - -type AbortMultipartUploadInput struct { - _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` - - // The bucket name to which the upload was taking place. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Key of the object for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Upload ID that identifies the multipart upload. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AbortMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AbortMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AbortMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { - s.Bucket = &v - return s -} - -func (s *AbortMultipartUploadInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { - s.UploadId = &v - return s -} - -func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *AbortMultipartUploadInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type AbortMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AbortMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AbortMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. -type AccelerateConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the transfer acceleration status of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccelerateConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccelerateConfiguration) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { - s.Status = &v - return s -} - -// Contains the elements that set the ACL permissions for an object per grantee. -type AccessControlPolicy struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - // Container for the bucket owner's display name and ID. - Owner *Owner `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccessControlPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccessControlPolicy) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} - if s.Grants != nil { - for i, v := range s.Grants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrants sets the Grants field's value. -func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { - s.Owner = v - return s -} - -// A container for information about access control for replicas. -type AccessControlTranslation struct { - _ struct{} `type:"structure"` - - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon S3 API Reference. - // - // Owner is a required field - Owner *string `type:"string" required:"true" enum:"OwnerOverride"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccessControlTranslation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccessControlTranslation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlTranslation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} - if s.Owner == nil { - invalidParams.Add(request.NewErrParamRequired("Owner")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetOwner sets the Owner field's value. -func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { - s.Owner = &v - return s -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates in any combination, -// and an object must match all of the predicates for the filter to apply. -type AnalyticsAndOperator struct { - _ struct{} `type:"structure"` - - // The prefix to use when evaluating an AND predicate: The prefix that an object - // must have to be included in the metrics results. - Prefix *string `type:"string"` - - // The list of tags to use when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { - s.Tags = v - return s -} - -// Specifies the configuration and any analyses for the analytics filter of -// an Amazon S3 bucket. -type AnalyticsConfiguration struct { - _ struct{} `type:"structure"` - - // The filter used to describe a set of objects for analyses. A filter must - // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). - // If no filter is provided, all objects will be considered in any analysis. - Filter *AnalyticsFilter `type:"structure"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Contains data related to access patterns to be collected and made available - // to analyze the tradeoffs between different storage classes. - // - // StorageClassAnalysis is a required field - StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.StorageClassAnalysis == nil { - invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.StorageClassAnalysis != nil { - if err := s.StorageClassAnalysis.Validate(); err != nil { - invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { - s.Id = &v - return s -} - -// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. -func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { - s.StorageClassAnalysis = v - return s -} - -// Where to publish the analytics results. -type AnalyticsExportDestination struct { - _ struct{} `type:"structure"` - - // A destination signifying output to an S3 bucket. - // - // S3BucketDestination is a required field - S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsExportDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsExportDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsExportDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { - s.S3BucketDestination = v - return s -} - -// The filter used to describe a set of objects for analyses. A filter must -// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). -// If no filter is provided, all objects will be considered in any analysis. -type AnalyticsFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating an - // analytics filter. The operator must have at least two predicates. - And *AnalyticsAndOperator `type:"structure"` - - // The prefix to use when evaluating an analytics filter. - Prefix *string `type:"string"` - - // The tag to use when evaluating an analytics filter. - Tag *Tag `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { - s.Tag = v - return s -} - -// Contains information about where to publish the analytics results. -type AnalyticsS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the bucket to which data is exported. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // The account ID that owns the destination S3 bucket. If no account ID is provided, - // the owner is not validated before exporting data. - // - // Although this value is optional, we strongly recommend that you set it to - // help prevent problems if the destination bucket ownership changes. - BucketAccountId *string `type:"string"` - - // Specifies the file format used when exporting data to Amazon S3. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - - // The prefix to use when exporting data. The prefix is prepended to all results. - Prefix *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsS3BucketDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AnalyticsS3BucketDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AnalyticsS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { - s.Bucket = &v - return s -} - -func (s *AnalyticsS3BucketDestination) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketAccountId sets the BucketAccountId field's value. -func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { - s.BucketAccountId = &v - return s -} - -// SetFormat sets the Format field's value. -func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { - s.Format = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { - s.Prefix = &v - return s -} - -// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name -// is globally unique, and the namespace is shared by all Amazon Web Services -// accounts. -type Bucket struct { - _ struct{} `type:"structure"` - - // Date the bucket was created. This date can change when making changes to - // your bucket, such as editing its bucket policy. - CreationDate *time.Time `type:"timestamp"` - - // The name of the bucket. - Name *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Bucket) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Bucket) GoString() string { - return s.String() -} - -// SetCreationDate sets the CreationDate field's value. -func (s *Bucket) SetCreationDate(v time.Time) *Bucket { - s.CreationDate = &v - return s -} - -// SetName sets the Name field's value. -func (s *Bucket) SetName(v string) *Bucket { - s.Name = &v - return s -} - -// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. -type BucketLifecycleConfiguration struct { - _ struct{} `type:"structure"` - - // A lifecycle rule for individual objects in an Amazon S3 bucket. - // - // Rules is a required field - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BucketLifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BucketLifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { - s.Rules = v - return s -} - -// Container for logging status information. -type BucketLoggingStatus struct { - _ struct{} `type:"structure"` - - // Describes where logs are stored and the prefix that Amazon S3 assigns to - // all log object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BucketLoggingStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BucketLoggingStatus) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLoggingStatus) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} - if s.LoggingEnabled != nil { - if err := s.LoggingEnabled.Validate(); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { - s.LoggingEnabled = v - return s -} - -// Describes the cross-origin access configuration for objects in an Amazon -// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// S3 User Guide. -type CORSConfiguration struct { - _ struct{} `type:"structure"` - - // A set of origins and methods (cross-origin access that you want to allow). - // You can add up to 100 rules to the configuration. - // - // CORSRules is a required field - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CORSConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CORSConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} - if s.CORSRules == nil { - invalidParams.Add(request.NewErrParamRequired("CORSRules")) - } - if s.CORSRules != nil { - for i, v := range s.CORSRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCORSRules sets the CORSRules field's value. -func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { - s.CORSRules = v - return s -} - -// Specifies a cross-origin access rule for an Amazon S3 bucket. -type CORSRule struct { - _ struct{} `type:"structure"` - - // Headers that are specified in the Access-Control-Request-Headers header. - // These headers are allowed in a preflight OPTIONS request. In response to - // any preflight OPTIONS request, Amazon S3 returns any requested headers that - // are allowed. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - - // An HTTP method that you allow the origin to execute. Valid values are GET, - // PUT, HEAD, POST, and DELETE. - // - // AllowedMethods is a required field - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` - - // One or more origins you want customers to be able to access the bucket from. - // - // AllowedOrigins is a required field - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CORSRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CORSRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSRule"} - if s.AllowedMethods == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) - } - if s.AllowedOrigins == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAllowedHeaders sets the AllowedHeaders field's value. -func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { - s.AllowedHeaders = v - return s -} - -// SetAllowedMethods sets the AllowedMethods field's value. -func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { - s.AllowedMethods = v - return s -} - -// SetAllowedOrigins sets the AllowedOrigins field's value. -func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { - s.AllowedOrigins = v - return s -} - -// SetExposeHeaders sets the ExposeHeaders field's value. -func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { - s.ExposeHeaders = v - return s -} - -// SetID sets the ID field's value. -func (s *CORSRule) SetID(v string) *CORSRule { - s.ID = &v - return s -} - -// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. -func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { - s.MaxAgeSeconds = &v - return s -} - -// Describes how an uncompressed comma-separated values (CSV)-formatted input -// object is formatted. -type CSVInput struct { - _ struct{} `type:"structure"` - - // Specifies that CSV field values may contain quoted record delimiters and - // such records should be allowed. Default value is FALSE. Setting this value - // to TRUE may lower performance. - AllowQuotedRecordDelimiter *bool `type:"boolean"` - - // A single character used to indicate that a row should be ignored when the - // character is present at the start of that row. You can specify any character - // to indicate a comment line. - Comments *string `type:"string"` - - // A single character used to separate individual fields in a record. You can - // specify an arbitrary delimiter. - FieldDelimiter *string `type:"string"` - - // Describes the first line of input. Valid values are: - // - // * NONE: First line is not a header. - // - // * IGNORE: First line is a header, but you can't use the header values - // to indicate the column in an expression. You can use column position (such - // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). - // - // * Use: First line is a header, and you can use the header value to identify - // a column in an expression (SELECT "name" FROM OBJECT). - FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` - - // A single character used for escaping when the field delimiter is part of - // the value. For example, if the value is a, b, Amazon S3 wraps this field - // value in quotation marks, as follows: " a , b ". - // - // Type: String - // - // Default: " - // - // Ancestors: CSV - QuoteCharacter *string `type:"string"` - - // A single character used for escaping the quotation mark character inside - // an already escaped value. For example, the value """ a , b """ is parsed - // as " a , b ". - QuoteEscapeCharacter *string `type:"string"` - - // A single character used to separate individual records in the input. Instead - // of the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CSVInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CSVInput) GoString() string { - return s.String() -} - -// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. -func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { - s.AllowQuotedRecordDelimiter = &v - return s -} - -// SetComments sets the Comments field's value. -func (s *CSVInput) SetComments(v string) *CSVInput { - s.Comments = &v - return s -} - -// SetFieldDelimiter sets the FieldDelimiter field's value. -func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { - s.FieldDelimiter = &v - return s -} - -// SetFileHeaderInfo sets the FileHeaderInfo field's value. -func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { - s.FileHeaderInfo = &v - return s -} - -// SetQuoteCharacter sets the QuoteCharacter field's value. -func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { - s.QuoteCharacter = &v - return s -} - -// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. -func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { - s.QuoteEscapeCharacter = &v - return s -} - -// SetRecordDelimiter sets the RecordDelimiter field's value. -func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { - s.RecordDelimiter = &v - return s -} - -// Describes how uncompressed comma-separated values (CSV)-formatted results -// are formatted. -type CSVOutput struct { - _ struct{} `type:"structure"` - - // The value used to separate individual fields in a record. You can specify - // an arbitrary delimiter. - FieldDelimiter *string `type:"string"` - - // A single character used for escaping when the field delimiter is part of - // the value. For example, if the value is a, b, Amazon S3 wraps this field - // value in quotation marks, as follows: " a , b ". - QuoteCharacter *string `type:"string"` - - // The single character used for escaping the quote character inside an already - // escaped value. - QuoteEscapeCharacter *string `type:"string"` - - // Indicates whether to use quotation marks around output fields. - // - // * ALWAYS: Always use quotation marks for output fields. - // - // * ASNEEDED: Use quotation marks for output fields when needed. - QuoteFields *string `type:"string" enum:"QuoteFields"` - - // A single character used to separate individual records in the output. Instead - // of the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CSVOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CSVOutput) GoString() string { - return s.String() -} - -// SetFieldDelimiter sets the FieldDelimiter field's value. -func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { - s.FieldDelimiter = &v - return s -} - -// SetQuoteCharacter sets the QuoteCharacter field's value. -func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { - s.QuoteCharacter = &v - return s -} - -// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. -func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { - s.QuoteEscapeCharacter = &v - return s -} - -// SetQuoteFields sets the QuoteFields field's value. -func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { - s.QuoteFields = &v - return s -} - -// SetRecordDelimiter sets the RecordDelimiter field's value. -func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { - s.RecordDelimiter = &v - return s -} - -// Contains all the possible checksum or digest values for an object. -type Checksum struct { - _ struct{} `type:"structure"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Checksum) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Checksum) GoString() string { - return s.String() -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *Checksum) SetChecksumCRC32(v string) *Checksum { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *Checksum) SetChecksumCRC32C(v string) *Checksum { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *Checksum) SetChecksumSHA1(v string) *Checksum { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *Checksum) SetChecksumSHA256(v string) *Checksum { - s.ChecksumSHA256 = &v - return s -} - -// Container for specifying the Lambda notification configuration. -type CloudFunctionConfiguration struct { - _ struct{} `type:"structure"` - - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - CloudFunction *string `type:"string"` - - // The bucket event for which to send notifications. - // - // Deprecated: Event has been deprecated - Event *string `deprecated:"true" type:"string" enum:"Event"` - - // Bucket events for which to send notifications. - Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The role supporting the invocation of the Lambda function - InvocationRole *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudFunctionConfiguration) GoString() string { - return s.String() -} - -// SetCloudFunction sets the CloudFunction field's value. -func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { - s.CloudFunction = &v - return s -} - -// SetEvent sets the Event field's value. -func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { - s.Id = &v - return s -} - -// SetInvocationRole sets the InvocationRole field's value. -func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { - s.InvocationRole = &v - return s -} - -// Container for all (if there are any) keys between Prefix and the next occurrence -// of the string specified by a delimiter. CommonPrefixes lists keys that act -// like subdirectories in the directory specified by Prefix. For example, if -// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, -// the common prefix is notes/summer/. -type CommonPrefix struct { - _ struct{} `type:"structure"` - - // Container for the specified common prefix. - Prefix *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CommonPrefix) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CommonPrefix) GoString() string { - return s.String() -} - -// SetPrefix sets the Prefix field's value. -func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { - s.Prefix = &v - return s -} - -type CompleteMultipartUploadInput struct { - _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` - - // Name of the bucket to which the multipart upload was initiated. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The container for the multipart upload request information. - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // The server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // The MD5 server-side encryption (SSE) customer managed key. This parameter - // is needed only when the object was created using a checksum algorithm. For - // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // ID for the initiated multipart upload. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompleteMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompleteMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { - s.Bucket = &v - return s -} - -func (s *CompleteMultipartUploadInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *CompleteMultipartUploadInput) SetChecksumCRC32(v string) *CompleteMultipartUploadInput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *CompleteMultipartUploadInput) SetChecksumCRC32C(v string) *CompleteMultipartUploadInput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *CompleteMultipartUploadInput) SetChecksumSHA1(v string) *CompleteMultipartUploadInput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *CompleteMultipartUploadInput) SetChecksumSHA256(v string) *CompleteMultipartUploadInput { - s.ChecksumSHA256 = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { - s.Key = &v - return s -} - -// SetMultipartUpload sets the MultipartUpload field's value. -func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { - s.MultipartUpload = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CompleteMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CompleteMultipartUploadInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CompleteMultipartUploadInput) SetSSECustomerKey(v string) *CompleteMultipartUploadInput { - s.SSECustomerKey = &v - return s -} - -func (s *CompleteMultipartUploadInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CompleteMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CompleteMultipartUploadInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { - s.UploadId = &v - return s -} - -func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type CompleteMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - Bucket *string `type:"string"` - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` - - // Entity tag that identifies the newly created object's data. Objects with - // different object data will have different entity tags. The entity tag is - // an opaque string. The entity tag may or may not be an MD5 digest of the object - // data. If the entity tag is not an MD5 digest of the object data, it will - // contain one or more nonhexadecimal characters and/or will consist of less - // than 32 or more than 32 hexadecimal digits. For more information about how - // the entity tag is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ETag *string `type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The object key of the newly created object. - Key *string `min:"1" type:"string"` - - // The URI that identifies the newly created object. - Location *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS key in your initiate multipart - // upload request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version ID of the newly created object, in case the bucket has versioning - // turned on. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompleteMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompleteMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetBucket sets the Bucket field's value. -func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { - s.Bucket = &v - return s -} - -func (s *CompleteMultipartUploadOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *CompleteMultipartUploadOutput) SetChecksumCRC32(v string) *CompleteMultipartUploadOutput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *CompleteMultipartUploadOutput) SetChecksumCRC32C(v string) *CompleteMultipartUploadOutput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *CompleteMultipartUploadOutput) SetChecksumSHA1(v string) *CompleteMultipartUploadOutput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *CompleteMultipartUploadOutput) SetChecksumSHA256(v string) *CompleteMultipartUploadOutput { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { - s.Expiration = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { - s.Key = &v - return s -} - -// SetLocation sets the Location field's value. -func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { - s.Location = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { - s.VersionId = &v - return s -} - -// The container for the completed multipart upload details. -type CompletedMultipartUpload struct { - _ struct{} `type:"structure"` - - // Array of CompletedPart data types. - // - // If you do not supply a valid Part with your request, the service sends back - // an HTTP 400 response. - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompletedMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompletedMultipartUpload) GoString() string { - return s.String() -} - -// SetParts sets the Parts field's value. -func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { - s.Parts = v - return s -} - -// Details of the parts that were uploaded. -type CompletedPart struct { - _ struct{} `type:"structure"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompletedPart) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CompletedPart) GoString() string { - return s.String() -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *CompletedPart) SetChecksumCRC32(v string) *CompletedPart { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *CompletedPart) SetChecksumCRC32C(v string) *CompletedPart { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *CompletedPart) SetChecksumSHA1(v string) *CompletedPart { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *CompletedPart) SetChecksumSHA256(v string) *CompletedPart { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *CompletedPart) SetETag(v string) *CompletedPart { - s.ETag = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { - s.PartNumber = &v - return s -} - -// A container for describing a condition that must be met for the specified -// redirect to apply. For example, 1. If request is for pages in the /docs folder, -// redirect to the /documents folder. 2. If request results in HTTP error 4xx, -// redirect request to another host where you might process the error. -type Condition struct { - _ struct{} `type:"structure"` - - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string `type:"string"` - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - KeyPrefixEquals *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Condition) GoString() string { - return s.String() -} - -// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. -func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { - s.HttpErrorCodeReturnedEquals = &v - return s -} - -// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. -func (s *Condition) SetKeyPrefixEquals(v string) *Condition { - s.KeyPrefixEquals = &v - return s -} - -type ContinuationEvent struct { - _ struct{} `locationName:"ContinuationEvent" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContinuationEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContinuationEvent) GoString() string { - return s.String() -} - -// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. -func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *ContinuationEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - return nil -} - -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - return msg, err -} - -type CopyObjectInput struct { - _ struct{} `locationName:"CopyObjectRequest" type:"structure"` - - // The canned ACL to apply to the object. - // - // This action is not supported by Amazon S3 on Outposts. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // The name of the destination bucket. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. - // - // Specifying this header with a COPY action doesn’t affect bucket-level settings - // for S3 Bucket Key. - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Indicates the algorithm you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies the source object for the copy operation. You specify the value - // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): - // - // * For objects not accessed through an access point, specify the name of - // the source bucket and the key of the source object, separated by a slash - // (/). For example, to copy the object reports/january.pdf from the bucket - // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value - // must be URL-encoded. - // - // * For objects accessed through access points, specify the Amazon Resource - // Name (ARN) of the object as accessed through the access point, in the - // format arn:aws:s3:::accesspoint//object/. - // For example, to copy the object reports/january.pdf through access point - // my-access-point owned by account 123456789012 in Region us-west-2, use - // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. - // The value must be URL encoded. Amazon S3 supports copy operations using - // access points only when the source and destination buckets are in the - // same Amazon Web Services Region. Alternatively, for objects accessed through - // Amazon S3 on Outposts, specify the ARN of the object as accessed in the - // format arn:aws:s3-outposts:::outpost//object/. - // For example, to copy the object reports/january.pdf through outpost my-outpost - // owned by account 123456789012 in Region us-west-2, use the URL encoding - // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - // The value must be URL-encoded. - // - // To copy a specific version of an object, append ?versionId= to - // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). - // If you don't specify a version ID, Amazon S3 copies the latest version of - // the source object. - // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - // - // CopySourceSSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CopyObjectInput's - // String and GoString methods. - CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request fails with the HTTP status code - // 403 Forbidden (access denied). - ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // This action is not supported by Amazon S3 on Outposts. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - // - // This action is not supported by Amazon S3 on Outposts. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - // - // This action is not supported by Amazon S3 on Outposts. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - // - // This action is not supported by Amazon S3 on Outposts. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // The key of the destination object. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - - // Specifies whether you want to apply a legal hold to the copied object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode that you want to apply to the copied object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want the copied object's Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CopyObjectInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. - // - // SSEKMSEncryptionContext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CopyObjectInput's - // String and GoString methods. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // Specifies the Amazon Web Services KMS key ID to use for object encryption. - // All GET and PUT requests for an object protected by Amazon Web Services KMS - // will fail if not made via SSL or using SigV4. For information about configuring - // using any of the officially supported Amazon Web Services SDKs and Amazon - // Web Services CLI, see Specifying the Signature Version in Request Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CopyObjectInput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high availability. - // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters. - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. - TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CopyObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { - s.Bucket = &v - return s -} - -func (s *CopyObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { - s.BucketKeyEnabled = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { - s.CacheControl = &v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *CopyObjectInput) SetChecksumAlgorithm(v string) *CopyObjectInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { - s.ContentLanguage = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { - s.ContentType = &v - return s -} - -// SetCopySource sets the CopySource field's value. -func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { - s.CopySource = &v - return s -} - -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { - s.CopySourceIfMatch = &v - return s -} - -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfModifiedSince = &v - return s -} - -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { - s.CopySourceIfNoneMatch = &v - return s -} - -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { - s.CopySourceIfUnmodifiedSince = &v - return s -} - -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { - s.CopySourceSSECustomerAlgorithm = &v - return s -} - -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { - s.CopySourceSSECustomerKey = &v - return s -} - -func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { - if s.CopySourceSSECustomerKey == nil { - return v - } - return *s.CopySourceSSECustomerKey -} - -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. -func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { - s.ExpectedSourceBucketOwner = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { - s.Metadata = v - return s -} - -// SetMetadataDirective sets the MetadataDirective field's value. -func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { - s.MetadataDirective = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *CopyObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { - s.Tagging = &v - return s -} - -// SetTaggingDirective sets the TaggingDirective field's value. -func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { - s.TaggingDirective = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *CopyObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type CopyObjectOutput struct { - _ struct{} `type:"structure" payload:"CopyObjectResult"` - - // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Container for all response elements. - CopyObjectResult *CopyObjectResult `type:"structure"` - - // Version of the copied object in the destination bucket. - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the Amazon Web Services KMS Encryption Context to use - // for object encryption. The value of this header is a base64-encoded UTF-8 - // string holding JSON with the encryption context key-value pairs. - // - // SSEKMSEncryptionContext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CopyObjectOutput's - // String and GoString methods. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CopyObjectOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version ID of the newly created copy. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyObjectOutput) GoString() string { - return s.String() -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetCopyObjectResult sets the CopyObjectResult field's value. -func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { - s.CopyObjectResult = v - return s -} - -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { - s.CopySourceVersionId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { - s.Expiration = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { - s.VersionId = &v - return s -} - -// Container for all response elements. -type CopyObjectResult struct { - _ struct{} `type:"structure"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` - - // Returns the ETag of the new object. The ETag reflects only changes to the - // contents of an object, not its metadata. - ETag *string `type:"string"` - - // Creation date of the object. - LastModified *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyObjectResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyObjectResult) GoString() string { - return s.String() -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *CopyObjectResult) SetChecksumCRC32(v string) *CopyObjectResult { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *CopyObjectResult) SetChecksumCRC32C(v string) *CopyObjectResult { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *CopyObjectResult) SetChecksumSHA1(v string) *CopyObjectResult { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *CopyObjectResult) SetChecksumSHA256(v string) *CopyObjectResult { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { - s.LastModified = &v - return s -} - -// Container for all response elements. -type CopyPartResult struct { - _ struct{} `type:"structure"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CopyPartResult) GoString() string { - return s.String() -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *CopyPartResult) SetChecksumCRC32(v string) *CopyPartResult { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *CopyPartResult) SetChecksumCRC32C(v string) *CopyPartResult { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *CopyPartResult) SetChecksumSHA1(v string) *CopyPartResult { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *CopyPartResult) SetChecksumSHA256(v string) *CopyPartResult { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *CopyPartResult) SetETag(v string) *CopyPartResult { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { - s.LastModified = &v - return s -} - -// The configuration information for the bucket. -type CreateBucketConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the Region where the bucket will be created. If you don't specify - // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBucketConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBucketConfiguration) GoString() string { - return s.String() -} - -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { - s.LocationConstraint = &v - return s -} - -type CreateBucketInput struct { - _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - // The name of the bucket to create. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The configuration information for the bucket. - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions - // and overwrites of those objects. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Specifies whether you want S3 Object Lock to be enabled for the new bucket. - ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` - - // The container element for object ownership for a bucket's ownership controls. - // - // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to - // the bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. - // - // ObjectWriter - The uploading account will own the object if the object is - // uploaded with the bucket-owner-full-control canned ACL. - // - // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer - // affect permissions. The bucket owner automatically owns and has full control - // over every object in the bucket. The bucket only accepts PUT requests that - // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control - // canned ACL or an equivalent form of this ACL expressed in the XML format. - ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { - s.Bucket = &v - return s -} - -func (s *CreateBucketInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. -func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { - s.CreateBucketConfiguration = v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { - s.GrantWriteACP = &v - return s -} - -// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. -func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { - s.ObjectLockEnabledForBucket = &v - return s -} - -// SetObjectOwnership sets the ObjectOwnership field's value. -func (s *CreateBucketInput) SetObjectOwnership(v string) *CreateBucketInput { - s.ObjectOwnership = &v - return s -} - -type CreateBucketOutput struct { - _ struct{} `type:"structure"` - - // A forward slash followed by the name of the bucket. - Location *string `location:"header" locationName:"Location" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBucketOutput) GoString() string { - return s.String() -} - -// SetLocation sets the Location field's value. -func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { - s.Location = &v - return s -} - -type CreateMultipartUploadInput struct { - _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` - - // The canned ACL to apply to the object. - // - // This action is not supported by Amazon S3 on Outposts. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // The name of the bucket to which to initiate the upload - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. - // - // Specifying this header with an object action doesn’t affect bucket-level - // settings for S3 Bucket Key. - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Indicates the algorithm you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // This action is not supported by Amazon S3 on Outposts. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - // - // This action is not supported by Amazon S3 on Outposts. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - // - // This action is not supported by Amazon S3 on Outposts. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - // - // This action is not supported by Amazon S3 on Outposts. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the multipart upload is to be initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether you want to apply a legal hold to the uploaded object. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // Specifies the Object Lock mode that you want to apply to the uploaded object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // Specifies the date and time when you want the Object Lock to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateMultipartUploadInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. - // - // SSEKMSEncryptionContext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateMultipartUploadInput's - // String and GoString methods. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // Specifies the ID of the symmetric customer managed key to use for object - // encryption. All GET and PUT requests for an object protected by Amazon Web - // Services KMS will fail if not made via SSL or using SigV4. For information - // about configuring using any of the officially supported Amazon Web Services - // SDKs and Amazon Web Services CLI, see Specifying the Signature Version in - // Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateMultipartUploadInput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high availability. - // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { - s.ACL = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { - s.Bucket = &v - return s -} - -func (s *CreateMultipartUploadInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { - s.BucketKeyEnabled = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { - s.CacheControl = &v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *CreateMultipartUploadInput) SetChecksumAlgorithm(v string) *CreateMultipartUploadInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { - s.ContentLanguage = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { - s.ContentType = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { - s.Metadata = v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { - s.SSECustomerKey = &v - return s -} - -func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { - s.Tagging = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { - s.WebsiteRedirectLocation = &v - return s -} - -func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *CreateMultipartUploadInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type CreateMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // If the bucket has a lifecycle rule configured with an action to abort incomplete - // multipart uploads and the prefix in the lifecycle rule matches the object - // name in the request, the response includes this header. The header indicates - // when the initiated multipart upload becomes eligible for an abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - // - // The response also includes the x-amz-abort-rule-id header that provides the - // ID of the lifecycle configuration rule that defines this action. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - - // This header is returned along with the x-amz-abort-date header. It identifies - // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // The name of the bucket to which the multipart upload was initiated. Does - // not return the access point ARN or access point alias if used. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - Bucket *string `locationName:"Bucket" type:"string"` - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the Amazon Web Services KMS Encryption Context to use - // for object encryption. The value of this header is a base64-encoded UTF-8 - // string holding JSON with the encryption context key-value pairs. - // - // SSEKMSEncryptionContext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's - // String and GoString methods. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // ID for the initiated multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateMultipartUploadOutput) GoString() string { - return s.String() -} - -// SetAbortDate sets the AbortDate field's value. -func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { - s.AbortDate = &v - return s -} - -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { - s.AbortRuleId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { - s.Bucket = &v - return s -} - -func (s *CreateMultipartUploadOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *CreateMultipartUploadOutput) SetChecksumAlgorithm(v string) *CreateMultipartUploadOutput { - s.ChecksumAlgorithm = &v - return s -} - -// SetKey sets the Key field's value. -func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { - s.Key = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { - s.ServerSideEncryption = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { - s.UploadId = &v - return s -} - -// The container element for specifying the default Object Lock retention settings -// for new objects placed in the specified bucket. -// -// - The DefaultRetention settings require both a mode and a period. -// -// - The DefaultRetention period can be either Days or Years but you must -// select one. You cannot specify Days and Years at the same time. -type DefaultRetention struct { - _ struct{} `type:"structure"` - - // The number of days that you want to specify for the default retention period. - // Must be used with Mode. - Days *int64 `type:"integer"` - - // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. Must be used with either Days or Years. - Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - - // The number of years that you want to specify for the default retention period. - // Must be used with Mode. - Years *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DefaultRetention) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DefaultRetention) GoString() string { - return s.String() -} - -// SetDays sets the Days field's value. -func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { - s.Days = &v - return s -} - -// SetMode sets the Mode field's value. -func (s *DefaultRetention) SetMode(v string) *DefaultRetention { - s.Mode = &v - return s -} - -// SetYears sets the Years field's value. -func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { - s.Years = &v - return s -} - -// Container for the objects to delete. -type Delete struct { - _ struct{} `type:"structure"` - - // The objects to delete. - // - // Objects is a required field - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` - - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Delete) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Delete) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Objects == nil { - invalidParams.Add(request.NewErrParamRequired("Objects")) - } - if s.Objects != nil { - for i, v := range s.Objects { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetObjects sets the Objects field's value. -func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { - s.Objects = v - return s -} - -// SetQuiet sets the Quiet field's value. -func (s *Delete) SetQuiet(v bool) *Delete { - s.Quiet = &v - return s -} - -type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` - - // The name of the bucket from which an analytics configuration is deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketCorsInput struct { - _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` - - // Specifies the bucket whose cors configuration is being deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketCorsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketCorsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketCorsOutput) GoString() string { - return s.String() -} - -type DeleteBucketEncryptionInput struct { - _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` - - // The name of the bucket containing the server-side encryption configuration - // to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketEncryptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketEncryptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketEncryptionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketEncryptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketEncryptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketEncryptionOutput) GoString() string { - return s.String() -} - -type DeleteBucketInput struct { - _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` - - // Specifies the bucket being deleted. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketIntelligentTieringConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketIntelligentTieringConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` - - // The name of the bucket containing the inventory configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketLifecycleInput struct { - _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` - - // The bucket name of the lifecycle to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketLifecycleInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketLifecycleOutput) GoString() string { - return s.String() -} - -type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` - - // The name of the bucket containing the metrics configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -type DeleteBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketOutput) GoString() string { - return s.String() -} - -type DeleteBucketOwnershipControlsInput struct { - _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` - - // The Amazon S3 bucket whose OwnershipControls you want to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketOwnershipControlsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketOwnershipControlsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketOwnershipControlsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketOwnershipControlsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketOwnershipControlsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketOwnershipControlsOutput) GoString() string { - return s.String() -} - -type DeleteBucketPolicyInput struct { - _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketPolicyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketPolicyOutput) GoString() string { - return s.String() -} - -type DeleteBucketReplicationInput struct { - _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketReplicationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketReplicationOutput) GoString() string { - return s.String() -} - -type DeleteBucketTaggingInput struct { - _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` - - // The bucket that has the tag set to be removed. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() -} - -type DeleteBucketWebsiteInput struct { - _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` - - // The bucket name for which you want to remove the website configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { - s.Bucket = &v - return s -} - -func (s *DeleteBucketWebsiteInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBucketWebsiteOutput) GoString() string { - return s.String() -} - -// Information about the delete marker. -type DeleteMarkerEntry struct { - _ struct{} `type:"structure"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp"` - - // The account that created the delete marker.> - Owner *Owner `type:"structure"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteMarkerEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteMarkerEntry) GoString() string { - return s.String() -} - -// SetIsLatest sets the IsLatest field's value. -func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { - s.IsLatest = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { - s.Owner = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { - s.VersionId = &v - return s -} - -// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter -// in your replication configuration, you must also include a DeleteMarkerReplication -// element. If your Filter includes a Tag element, the DeleteMarkerReplication -// Status must be set to Disabled, because Amazon S3 does not support replicating -// delete markers for tag-based rules. For an example configuration, see Basic -// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). -// -// For more information about delete marker replication, see Basic Rule Configuration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). -// -// If you are using an earlier version of the replication configuration, Amazon -// S3 handles replication of delete markers differently. For more information, -// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). -type DeleteMarkerReplication struct { - _ struct{} `type:"structure"` - - // Indicates whether to replicate delete markers. - // - // Indicates whether to replicate delete markers. - Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteMarkerReplication) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteMarkerReplication) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { - s.Status = &v - return s -} - -type DeleteObjectInput struct { - _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` - - // The bucket name of the bucket containing the object. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates whether S3 Object Lock should bypass Governance-mode restrictions - // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention - // permission. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Key name of the object to delete. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { - s.Bucket = &v - return s -} - -func (s *DeleteObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { - s.Key = &v - return s -} - -// SetMFA sets the MFA field's value. -func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { - s.MFA = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { - s.VersionId = &v - return s -} - -func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectOutput) GoString() string { - return s.String() -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { - s.RequestCharged = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { - s.VersionId = &v - return s -} - -type DeleteObjectTaggingInput struct { - _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` - - // The bucket name containing the objects from which to remove the tags. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The key that identifies the object in the bucket from which to remove all - // tags. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The versionId of the object that the tag-set will be removed from. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { - s.Bucket = &v - return s -} - -func (s *DeleteObjectTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { - s.VersionId = &v - return s -} - -func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // The versionId of the object the tag-set was removed from. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetVersionId sets the VersionId field's value. -func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { - s.VersionId = &v - return s -} - -type DeleteObjectsInput struct { - _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` - - // The bucket name containing the objects to delete. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies whether you want to delete this object even if it has a Governance-type - // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention - // permission. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // This checksum algorithm must be the same for all parts and it match the checksum - // value supplied in the CreateMultipartUpload request. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // Container for the request. - // - // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Delete == nil { - invalidParams.Add(request.NewErrParamRequired("Delete")) - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { - s.Bucket = &v - return s -} - -func (s *DeleteObjectsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *DeleteObjectsInput) SetChecksumAlgorithm(v string) *DeleteObjectsInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetDelete sets the Delete field's value. -func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { - s.Delete = v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetMFA sets the MFA field's value. -func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { - s.MFA = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { - s.RequestPayer = &v - return s -} - -func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeleteObjectsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeleteObjectsOutput struct { - _ struct{} `type:"structure"` - - // Container element for a successful delete. It identifies the object that - // was successfully deleted. - Deleted []*DeletedObject `type:"list" flattened:"true"` - - // Container for a failed delete action that describes the object that Amazon - // S3 attempted to delete and the error it encountered. - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteObjectsOutput) GoString() string { - return s.String() -} - -// SetDeleted sets the Deleted field's value. -func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { - s.Deleted = v - return s -} - -// SetErrors sets the Errors field's value. -func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { - s.Errors = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { - s.RequestCharged = &v - return s -} - -type DeletePublicAccessBlockInput struct { - _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` - - // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePublicAccessBlockInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePublicAccessBlockInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { - s.Bucket = &v - return s -} - -func (s *DeletePublicAccessBlockInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type DeletePublicAccessBlockOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePublicAccessBlockOutput) GoString() string { - return s.String() -} - -// Information about the deleted object. -type DeletedObject struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. In a simple DELETE, this header indicates - // whether (true) or not (false) a delete marker was created. - DeleteMarker *bool `type:"boolean"` - - // The version ID of the delete marker created as a result of the DELETE operation. - // If you delete a specific object version, the value returned by this header - // is the version ID of the object version deleted. - DeleteMarkerVersionId *string `type:"string"` - - // The name of the deleted object. - Key *string `min:"1" type:"string"` - - // The version ID of the deleted object. - VersionId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletedObject) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletedObject) GoString() string { - return s.String() -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { - s.DeleteMarker = &v - return s -} - -// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. -func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { - s.DeleteMarkerVersionId = &v - return s -} - -// SetKey sets the Key field's value. -func (s *DeletedObject) SetKey(v string) *DeletedObject { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *DeletedObject) SetVersionId(v string) *DeletedObject { - s.VersionId = &v - return s -} - -// Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). -type Destination struct { - _ struct{} `type:"structure"` - - // Specify this only in a cross-account scenario (where source and destination - // bucket owners are not the same), and you want to change replica ownership - // to the Amazon Web Services account that owns the destination bucket. If this - // is not specified in the replication configuration, the replicas are owned - // by same Amazon Web Services account that owns the source object. - AccessControlTranslation *AccessControlTranslation `type:"structure"` - - // Destination bucket owner account ID. In a cross-account scenario, if you - // direct Amazon S3 to change replica ownership to the Amazon Web Services account - // that owns the destination bucket by specifying the AccessControlTranslation - // property, this is the account ID of the destination bucket owner. For more - // information, see Replication Additional Configuration: Changing the Replica - // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon S3 User Guide. - Account *string `type:"string"` - - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to - // store the results. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // A container that provides information about encryption. If SourceSelectionCriteria - // is specified, you must specify this element. - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // A container specifying replication metrics-related settings enabling replication - // metrics and events. - Metrics *Metrics `type:"structure"` - - // A container specifying S3 Replication Time Control (S3 RTC), including whether - // S3 RTC is enabled and the time when all objects and operations on objects - // must be replicated. Must be specified together with a Metrics block. - ReplicationTime *ReplicationTime `type:"structure"` - - // The storage class to use when replicating objects, such as S3 Standard or - // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. - // - // For valid values, see the StorageClass element of the PUT Bucket replication - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon S3 API Reference. - StorageClass *string `type:"string" enum:"StorageClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Destination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Destination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Destination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.AccessControlTranslation != nil { - if err := s.AccessControlTranslation.Validate(); err != nil { - invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) - } - } - if s.Metrics != nil { - if err := s.Metrics.Validate(); err != nil { - invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) - } - } - if s.ReplicationTime != nil { - if err := s.ReplicationTime.Validate(); err != nil { - invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessControlTranslation sets the AccessControlTranslation field's value. -func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { - s.AccessControlTranslation = v - return s -} - -// SetAccount sets the Account field's value. -func (s *Destination) SetAccount(v string) *Destination { - s.Account = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *Destination) SetBucket(v string) *Destination { - s.Bucket = &v - return s -} - -func (s *Destination) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { - s.EncryptionConfiguration = v - return s -} - -// SetMetrics sets the Metrics field's value. -func (s *Destination) SetMetrics(v *Metrics) *Destination { - s.Metrics = v - return s -} - -// SetReplicationTime sets the ReplicationTime field's value. -func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { - s.ReplicationTime = v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Destination) SetStorageClass(v string) *Destination { - s.StorageClass = &v - return s -} - -// Contains the type of server-side encryption used. -type Encryption struct { - _ struct{} `type:"structure"` - - // The server-side encryption algorithm used when storing job results in Amazon - // S3 (for example, AES256, aws:kms). - // - // EncryptionType is a required field - EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` - - // If the encryption type is aws:kms, this optional value can be used to specify - // the encryption context for the restore results. - KMSContext *string `type:"string"` - - // If the encryption type is aws:kms, this optional value specifies the ID of - // the symmetric customer managed key to use for encryption of job results. - // Amazon S3 only supports symmetric keys. For more information, see Using symmetric - // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. - // - // KMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by Encryption's - // String and GoString methods. - KMSKeyId *string `type:"string" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Encryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Encryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Encryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Encryption"} - if s.EncryptionType == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptionType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionType sets the EncryptionType field's value. -func (s *Encryption) SetEncryptionType(v string) *Encryption { - s.EncryptionType = &v - return s -} - -// SetKMSContext sets the KMSContext field's value. -func (s *Encryption) SetKMSContext(v string) *Encryption { - s.KMSContext = &v - return s -} - -// SetKMSKeyId sets the KMSKeyId field's value. -func (s *Encryption) SetKMSKeyId(v string) *Encryption { - s.KMSKeyId = &v - return s -} - -// Specifies encryption-related information for an Amazon S3 bucket that is -// a destination for replicated objects. -type EncryptionConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web - // Services KMS key stored in Amazon Web Services Key Management Service (KMS) - // for the destination bucket. Amazon S3 uses this key to encrypt replica objects. - // Amazon S3 only supports symmetric, customer managed KMS keys. For more information, - // see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. - ReplicaKmsKeyID *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptionConfiguration) GoString() string { - return s.String() -} - -// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. -func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { - s.ReplicaKmsKeyID = &v - return s -} - -// A message that indicates the request is complete and no more messages will -// be sent. You should not assume that the request is complete until the client -// receives an EndEvent. -type EndEvent struct { - _ struct{} `locationName:"EndEvent" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EndEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EndEvent) GoString() string { - return s.String() -} - -// The EndEvent is and event in the SelectObjectContentEventStream group of events. -func (s *EndEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *EndEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - return nil -} - -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - return msg, err -} - -// Container for all error elements. -type Error struct { - _ struct{} `type:"structure"` - - // The error code is a string that uniquely identifies an error condition. It - // is meant to be read and understood by programs that detect and handle errors - // by type. - // - // Amazon S3 error codes - // - // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 - // Forbidden SOAP Fault Code Prefix: Client - // - // * Code: AccountProblem Description: There is a problem with your Amazon - // Web Services account that prevents the action from completing successfully. - // Contact Amazon Web Services Support for further assistance. HTTP Status - // Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource - // has been disabled. Contact Amazon Web Services Support for further assistance. - // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: AmbiguousGrantByEmailAddress Description: The email address you - // provided is associated with more than one account. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: AuthorizationHeaderMalformed Description: The authorization header - // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status - // Code: N/A - // - // * Code: BadDigest Description: The Content-MD5 you specified did not match - // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: BucketAlreadyExists Description: The requested bucket name is - // not available. The bucket namespace is shared by all users of the system. - // Please select a different name and try again. HTTP Status Code: 409 Conflict - // SOAP Fault Code Prefix: Client - // - // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create - // already exists, and you own it. Amazon S3 returns this error in all Amazon - // Web Services Regions except in the North Virginia Region. For legacy compatibility, - // if you re-create an existing bucket that you already own in the North - // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access - // control lists (ACLs). Code: 409 Conflict (in all Regions except the North - // Virginia Region) SOAP Fault Code Prefix: Client - // - // * Code: BucketNotEmpty Description: The bucket you tried to delete is - // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client - // - // * Code: CredentialsNotSupported Description: This request does not support - // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: CrossLocationLoggingProhibited Description: Cross-location logging - // not allowed. Buckets in one geographic location cannot log information - // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client - // - // * Code: EntityTooSmall Description: Your proposed upload is smaller than - // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum - // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code - // Prefix: Client - // - // * Code: ExpiredToken Description: The provided token has expired. HTTP - // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: IllegalVersioningConfigurationException Description: Indicates - // that the versioning configuration specified in the request is invalid. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: IncompleteBody Description: You did not provide the number of - // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires - // exactly one file upload per request. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum - // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: InternalError Description: We encountered an internal error. Please - // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code - // Prefix: Server - // - // * Code: InvalidAccessKeyId Description: The Amazon Web Services access - // key ID you provided does not exist in our records. HTTP Status Code: 403 - // Forbidden SOAP Fault Code Prefix: Client - // - // * Code: InvalidAddressingHeader Description: You must specify the Anonymous - // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client - // - // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: - // 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidBucketName Description: The specified bucket is not valid. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidBucketState Description: The request is not valid with - // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault - // Code Prefix: Client - // - // * Code: InvalidDigest Description: The Content-MD5 you specified is not - // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidEncryptionAlgorithmError Description: The encryption request - // you specified is not valid. The valid value is AES256. HTTP Status Code: - // 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidLocationConstraint Description: The specified location - // constraint is not valid. For more information about Regions, see How to - // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidObjectState Description: The action is not valid for the - // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client - // - // * Code: InvalidPart Description: One or more of the specified parts could - // not be found. The part might not have been uploaded, or the specified - // entity tag might not have matched the part's entity tag. HTTP Status Code: - // 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidPartOrder Description: The list of parts was not in ascending - // order. Parts list must be specified in order by part number. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidPayer Description: All access to this object has been disabled. - // Please contact Amazon Web Services Support for further assistance. HTTP - // Status Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: InvalidPolicyDocument Description: The content of the form does - // not meet the conditions specified in the policy document. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidRange Description: The requested range cannot be satisfied. - // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code - // Prefix: Client - // - // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP - // Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: SOAP requests must be made over an - // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is - // not supported for buckets with non-DNS compliant names. HTTP Status Code: - // 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is - // not supported for buckets with periods (.) in their names. HTTP Status - // Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint - // only supports virtual style requests. HTTP Status Code: 400 Bad Request - // Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not - // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled - // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is - // not supported on this bucket. Contact Amazon Web Services Support for - // more information. HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot - // be enabled on this bucket. Contact Amazon Web Services Support for more - // information. HTTP Status Code: 400 Bad Request Code: N/A - // - // * Code: InvalidSecurity Description: The provided security credentials - // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidStorageClass Description: The storage class you specified - // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidTargetBucketForLogging Description: The target bucket for - // logging does not exist, is not owned by you, or does not have the appropriate - // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: InvalidToken Description: The provided token is malformed or otherwise - // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP - // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: KeyTooLongError Description: Your key is too long. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MalformedACLError Description: The XML you provided was not well-formed - // or did not validate against our published schema. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MalformedPOSTRequest Description: The body of your POST request - // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: MalformedXML Description: This happens when the user sends malformed - // XML (XML that doesn't conform to the published XSD) for the configuration. - // The error message is, "The XML you provided was not well-formed or did - // not validate against our published schema." HTTP Status Code: 400 Bad - // Request SOAP Fault Code Prefix: Client - // - // * Code: MaxMessageLengthExceeded Description: Your request was too big. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MaxPostPreDataLengthExceededError Description: Your POST request - // fields preceding the upload file were too large. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: MetadataTooLarge Description: Your metadata headers exceed the - // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: MethodNotAllowed Description: The specified method is not allowed - // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault - // Code Prefix: Client - // - // * Code: MissingAttachment Description: A SOAP attachment was expected, - // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client - // - // * Code: MissingContentLength Description: You must provide the Content-Length - // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: - // Client - // - // * Code: MissingRequestBodyError Description: This happens when the user - // sends an empty XML document as a request. The error message is, "Request - // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: - // Client - // - // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing - // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code - // Prefix: Client - // - // * Code: MissingSecurityHeader Description: Your request is missing a required - // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: NoLoggingStatusForKey Description: There is no such thing as a - // logging status subresource for a key. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: NoSuchBucket Description: The specified bucket does not exist. - // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client - // - // * Code: NoSuchBucketPolicy Description: The specified bucket does not - // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code - // Prefix: Client - // - // * Code: NoSuchKey Description: The specified key does not exist. HTTP - // Status Code: 404 Not Found SOAP Fault Code Prefix: Client - // - // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration - // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: - // Client - // - // * Code: NoSuchUpload Description: The specified multipart upload does - // not exist. The upload ID might be invalid, or the multipart upload might - // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault - // Code Prefix: Client - // - // * Code: NoSuchVersion Description: Indicates that the version ID specified - // in the request does not match an existing version. HTTP Status Code: 404 - // Not Found SOAP Fault Code Prefix: Client - // - // * Code: NotImplemented Description: A header you provided implies functionality - // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault - // Code Prefix: Server - // - // * Code: NotSignedUp Description: Your account is not signed up for the - // Amazon S3 service. You must sign up before you can use Amazon S3. You - // can sign up at the following URL: Amazon S3 (http://aws.amazon.com/s3) - // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: OperationAborted Description: A conflicting conditional action - // is currently in progress against this resource. Try again. HTTP Status - // Code: 409 Conflict SOAP Fault Code Prefix: Client - // - // * Code: PermanentRedirect Description: The bucket you are attempting to - // access must be addressed using the specified endpoint. Send all future - // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP - // Fault Code Prefix: Client - // - // * Code: PreconditionFailed Description: At least one of the preconditions - // you specified did not hold. HTTP Status Code: 412 Precondition Failed - // SOAP Fault Code Prefix: Client - // - // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 - // Moved Temporarily SOAP Fault Code Prefix: Client - // - // * Code: RestoreAlreadyInProgress Description: Object restore is already - // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client - // - // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be - // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request - // SOAP Fault Code Prefix: Client - // - // * Code: RequestTimeout Description: Your socket connection to the server - // was not read from or written to within the timeout period. HTTP Status - // Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: RequestTimeTooSkewed Description: The difference between the request - // time and the server's time is too large. HTTP Status Code: 403 Forbidden - // SOAP Fault Code Prefix: Client - // - // * Code: RequestTorrentOfBucketError Description: Requesting the torrent - // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP - // Fault Code Prefix: Client - // - // * Code: SignatureDoesNotMatch Description: The request signature we calculated - // does not match the signature you provided. Check your Amazon Web Services - // secret access key and signing method. For more information, see REST Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) - // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client - // - // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP - // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server - // - // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: - // 503 Slow Down SOAP Fault Code Prefix: Server - // - // * Code: TemporaryRedirect Description: You are being redirected to the - // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP - // Fault Code Prefix: Client - // - // * Code: TokenRefreshRequired Description: The provided token must be refreshed. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: TooManyBuckets Description: You have attempted to create more - // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code - // Prefix: Client - // - // * Code: UnexpectedContent Description: This request does not support content. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - // - // * Code: UnresolvableGrantByEmailAddress Description: The email address - // you provided does not match any account on record. HTTP Status Code: 400 - // Bad Request SOAP Fault Code Prefix: Client - // - // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain - // the specified field name. If it is specified, check the order of the fields. - // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client - Code *string `type:"string"` - - // The error key. - Key *string `min:"1" type:"string"` - - // The error message contains a generic description of the error condition in - // English. It is intended for a human audience. Simple programs display the - // message directly to the end user if they encounter an error condition they - // don't know how or don't care to handle. Sophisticated programs with more - // exhaustive error handling and proper internationalization are more likely - // to ignore the error message. - Message *string `type:"string"` - - // The version ID of the error. - VersionId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Error) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Error) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *Error) SetCode(v string) *Error { - s.Code = &v - return s -} - -// SetKey sets the Key field's value. -func (s *Error) SetKey(v string) *Error { - s.Key = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *Error) SetMessage(v string) *Error { - s.Message = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *Error) SetVersionId(v string) *Error { - s.VersionId = &v - return s -} - -// The error information. -type ErrorDocument struct { - _ struct{} `type:"structure"` - - // The object key name to use when a 4XX class error occurs. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ErrorDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ErrorDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *ErrorDocument) SetKey(v string) *ErrorDocument { - s.Key = &v - return s -} - -// A container for specifying the configuration for Amazon EventBridge. -type EventBridgeConfiguration struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EventBridgeConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EventBridgeConfiguration) GoString() string { - return s.String() -} - -// Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 User Guide. -type ExistingObjectReplication struct { - _ struct{} `type:"structure"` - - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExistingObjectReplication) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExistingObjectReplication) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExistingObjectReplication) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { - s.Status = &v - return s -} - -// Specifies the Amazon S3 object key name to filter on and whether to filter -// on the suffix or prefix of the key name. -type FilterRule struct { - _ struct{} `type:"structure"` - - // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. - Name *string `type:"string" enum:"FilterRuleName"` - - // The value that the filter searches for in object key names. - Value *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FilterRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FilterRule) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *FilterRule) SetName(v string) *FilterRule { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *FilterRule) SetValue(v string) *FilterRule { - s.Value = &v - return s -} - -type GetBucketAccelerateConfigurationInput struct { - _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` - - // The name of the bucket for which the accelerate configuration is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { - s.Status = &v - return s -} - -type GetBucketAclInput struct { - _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` - - // Specifies the S3 bucket whose ACL is being requested. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { - s.Bucket = &v - return s -} - -func (s *GetBucketAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - // Container for the bucket owner's display name and ID. - Owner *Owner `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAclOutput) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { - s.Owner = v - return s -} - -type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` - - // The name of the bucket from which an analytics configuration is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { - s.AnalyticsConfiguration = v - return s -} - -type GetBucketCorsInput struct { - _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` - - // The bucket name for which to get the cors configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { - s.Bucket = &v - return s -} - -func (s *GetBucketCorsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketCorsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketCorsOutput struct { - _ struct{} `type:"structure"` - - // A set of origins and methods (cross-origin access that you want to allow). - // You can add up to 100 rules to the configuration. - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketCorsOutput) GoString() string { - return s.String() -} - -// SetCORSRules sets the CORSRules field's value. -func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { - s.CORSRules = v - return s -} - -type GetBucketEncryptionInput struct { - _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` - - // The name of the bucket from which the server-side encryption configuration - // is retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketEncryptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketEncryptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { - s.Bucket = &v - return s -} - -func (s *GetBucketEncryptionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketEncryptionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketEncryptionOutput struct { - _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - - // Specifies the default server-side-encryption configuration. - ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketEncryptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketEncryptionOutput) GoString() string { - return s.String() -} - -// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. -func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { - s.ServerSideEncryptionConfiguration = v - return s -} - -type GetBucketIntelligentTieringConfigurationInput struct { - _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketIntelligentTieringConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketIntelligentTieringConfigurationOutput struct { - _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` - - // Container for S3 Intelligent-Tiering configuration. - IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketIntelligentTieringConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { - return s.String() -} - -// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. -func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { - s.IntelligentTieringConfiguration = v - return s -} - -type GetBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` - - // The name of the bucket containing the inventory configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` - - // Specifies the inventory configuration. - InventoryConfiguration *InventoryConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { - s.InventoryConfiguration = v - return s -} - -type GetBucketLifecycleConfigurationInput struct { - _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` - - // The name of the bucket for which to get the lifecycle information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` - - // Container for a lifecycle rule. - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { - s.Rules = v - return s -} - -type GetBucketLifecycleInput struct { - _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` - - // The name of the bucket for which to get the lifecycle information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLifecycleInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLifecycleInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketLifecycleOutput struct { - _ struct{} `type:"structure"` - - // Container for a lifecycle rule. - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLifecycleOutput) GoString() string { - return s.String() -} - -// SetRules sets the Rules field's value. -func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { - s.Rules = v - return s -} - -type GetBucketLocationInput struct { - _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` - - // The name of the bucket for which to get the location. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLocationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLocationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLocationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLocationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLocationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketLocationOutput struct { - _ struct{} `type:"structure"` - - // Specifies the Region where the bucket resides. For a list of all the Amazon - // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). - // Buckets in Region us-east-1 have a LocationConstraint of null. - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLocationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLocationOutput) GoString() string { - return s.String() -} - -// SetLocationConstraint sets the LocationConstraint field's value. -func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { - s.LocationConstraint = &v - return s -} - -type GetBucketLoggingInput struct { - _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` - - // The bucket name for which to get the logging information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { - s.Bucket = &v - return s -} - -func (s *GetBucketLoggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketLoggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketLoggingOutput struct { - _ struct{} `type:"structure"` - - // Describes where logs are stored and the prefix that Amazon S3 assigns to - // all log object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketLoggingOutput) GoString() string { - return s.String() -} - -// SetLoggingEnabled sets the LoggingEnabled field's value. -func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { - s.LoggingEnabled = v - return s -} - -type GetBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` - - // The name of the bucket containing the metrics configuration to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` - - // Specifies the metrics configuration. - MetricsConfiguration *MetricsConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { - s.MetricsConfiguration = v - return s -} - -type GetBucketNotificationConfigurationRequest struct { - _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - - // The name of the bucket for which to get the notification configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketNotificationConfigurationRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketNotificationConfigurationRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketNotificationConfigurationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { - s.Bucket = &v - return s -} - -func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketOwnershipControlsInput struct { - _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketOwnershipControlsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketOwnershipControlsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketOwnershipControlsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { - s.Bucket = &v - return s -} - -func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketOwnershipControlsOutput struct { - _ struct{} `type:"structure" payload:"OwnershipControls"` - - // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) - // currently in effect for this Amazon S3 bucket. - OwnershipControls *OwnershipControls `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketOwnershipControlsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketOwnershipControlsOutput) GoString() string { - return s.String() -} - -// SetOwnershipControls sets the OwnershipControls field's value. -func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { - s.OwnershipControls = v - return s -} - -type GetBucketPolicyInput struct { - _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` - - // The bucket name for which to get the bucket policy. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { - s.Bucket = &v - return s -} - -func (s *GetBucketPolicyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketPolicyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketPolicyOutput struct { - _ struct{} `type:"structure" payload:"Policy"` - - // The bucket policy as a JSON document. - Policy *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyOutput) GoString() string { - return s.String() -} - -// SetPolicy sets the Policy field's value. -func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { - s.Policy = &v - return s -} - -type GetBucketPolicyStatusInput struct { - _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose policy status you want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { - s.Bucket = &v - return s -} - -func (s *GetBucketPolicyStatusInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketPolicyStatusOutput struct { - _ struct{} `type:"structure" payload:"PolicyStatus"` - - // The policy status for the specified bucket. - PolicyStatus *PolicyStatus `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketPolicyStatusOutput) GoString() string { - return s.String() -} - -// SetPolicyStatus sets the PolicyStatus field's value. -func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { - s.PolicyStatus = v - return s -} - -type GetBucketReplicationInput struct { - _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` - - // The bucket name for which to get the replication information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { - s.Bucket = &v - return s -} - -func (s *GetBucketReplicationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketReplicationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketReplicationOutput) GoString() string { - return s.String() -} - -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { - s.ReplicationConfiguration = v - return s -} - -type GetBucketRequestPaymentInput struct { - _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` - - // The name of the bucket for which to get the payment request configuration - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { - s.Bucket = &v - return s -} - -func (s *GetBucketRequestPaymentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -// SetPayer sets the Payer field's value. -func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { - s.Payer = &v - return s -} - -type GetBucketTaggingInput struct { - _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` - - // The name of the bucket for which to get the tagging information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { - s.Bucket = &v - return s -} - -func (s *GetBucketTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketTaggingOutput struct { - _ struct{} `type:"structure"` - - // Contains the tag set. - // - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketTaggingOutput) GoString() string { - return s.String() -} - -// SetTagSet sets the TagSet field's value. -func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { - s.TagSet = v - return s -} - -type GetBucketVersioningInput struct { - _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` - - // The name of the bucket for which to get the versioning information. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { - s.Bucket = &v - return s -} - -func (s *GetBucketVersioningInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketVersioningInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketVersioningOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketVersioningOutput) GoString() string { - return s.String() -} - -// SetMFADelete sets the MFADelete field's value. -func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { - s.MFADelete = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { - s.Status = &v - return s -} - -type GetBucketWebsiteInput struct { - _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` - - // The bucket name for which to get the website configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { - s.Bucket = &v - return s -} - -func (s *GetBucketWebsiteInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetBucketWebsiteInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetBucketWebsiteOutput struct { - _ struct{} `type:"structure"` - - // The object key name of the website error document to use for 4XX class errors. - ErrorDocument *ErrorDocument `type:"structure"` - - // The name of the index document for the website (for example index.html). - IndexDocument *IndexDocument `type:"structure"` - - // Specifies the redirect behavior of all requests to a website endpoint of - // an Amazon S3 bucket. - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetBucketWebsiteOutput) GoString() string { - return s.String() -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { - s.ErrorDocument = v - return s -} - -// SetIndexDocument sets the IndexDocument field's value. -func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { - s.IndexDocument = v - return s -} - -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { - s.RedirectAllRequestsTo = v - return s -} - -// SetRoutingRules sets the RoutingRules field's value. -func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { - s.RoutingRules = v - return s -} - -type GetObjectAclInput struct { - _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` - - // The bucket name that contains the object for which to get the ACL information. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The key of the object for which to get the ACL information. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { - s.Bucket = &v - return s -} - -func (s *GetObjectAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { - s.VersionId = &v - return s -} - -func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - // Container for the bucket owner's display name and ID. - Owner *Owner `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAclOutput) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { - s.Grants = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { - s.Owner = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { - s.RequestCharged = &v - return s -} - -type GetObjectAttributesInput struct { - _ struct{} `locationName:"GetObjectAttributesRequest" type:"structure"` - - // The name of the bucket that contains the object. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The object key. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` - - // An XML header that specifies the fields at the root level that you want returned - // in the response. Fields that you do not specify are not returned. - // - // ObjectAttributes is a required field - ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetObjectAttributesInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // The version ID used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAttributesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAttributesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAttributesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAttributesInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.ObjectAttributes == nil { - invalidParams.Add(request.NewErrParamRequired("ObjectAttributes")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectAttributesInput) SetBucket(v string) *GetObjectAttributesInput { - s.Bucket = &v - return s -} - -func (s *GetObjectAttributesInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectAttributesInput) SetExpectedBucketOwner(v string) *GetObjectAttributesInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectAttributesInput) SetKey(v string) *GetObjectAttributesInput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *GetObjectAttributesInput) SetMaxParts(v int64) *GetObjectAttributesInput { - s.MaxParts = &v - return s -} - -// SetObjectAttributes sets the ObjectAttributes field's value. -func (s *GetObjectAttributesInput) SetObjectAttributes(v []*string) *GetObjectAttributesInput { - s.ObjectAttributes = v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *GetObjectAttributesInput) SetPartNumberMarker(v int64) *GetObjectAttributesInput { - s.PartNumberMarker = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectAttributesInput) SetRequestPayer(v string) *GetObjectAttributesInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectAttributesInput) SetSSECustomerAlgorithm(v string) *GetObjectAttributesInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectAttributesInput) SetSSECustomerKey(v string) *GetObjectAttributesInput { - s.SSECustomerKey = &v - return s -} - -func (s *GetObjectAttributesInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectAttributesInput) SetSSECustomerKeyMD5(v string) *GetObjectAttributesInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectAttributesInput) SetVersionId(v string) *GetObjectAttributesInput { - s.VersionId = &v - return s -} - -func (s *GetObjectAttributesInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectAttributesInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectAttributesInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectAttributesOutput struct { - _ struct{} `type:"structure"` - - // The checksum or digest of the object. - Checksum *Checksum `type:"structure"` - - // Specifies whether the object retrieved was (true) or was not (false) a delete - // marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. - ETag *string `type:"string"` - - // The creation date of the object. - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` - - // A collection of parts associated with a multipart upload. - ObjectParts *GetObjectAttributesParts `type:"structure"` - - // The size of the object in bytes. - ObjectSize *int64 `type:"long"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. - // - // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). - StorageClass *string `type:"string" enum:"StorageClass"` - - // The version ID of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAttributesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAttributesOutput) GoString() string { - return s.String() -} - -// SetChecksum sets the Checksum field's value. -func (s *GetObjectAttributesOutput) SetChecksum(v *Checksum) *GetObjectAttributesOutput { - s.Checksum = v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *GetObjectAttributesOutput) SetDeleteMarker(v bool) *GetObjectAttributesOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *GetObjectAttributesOutput) SetETag(v string) *GetObjectAttributesOutput { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *GetObjectAttributesOutput) SetLastModified(v time.Time) *GetObjectAttributesOutput { - s.LastModified = &v - return s -} - -// SetObjectParts sets the ObjectParts field's value. -func (s *GetObjectAttributesOutput) SetObjectParts(v *GetObjectAttributesParts) *GetObjectAttributesOutput { - s.ObjectParts = v - return s -} - -// SetObjectSize sets the ObjectSize field's value. -func (s *GetObjectAttributesOutput) SetObjectSize(v int64) *GetObjectAttributesOutput { - s.ObjectSize = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectAttributesOutput) SetRequestCharged(v string) *GetObjectAttributesOutput { - s.RequestCharged = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *GetObjectAttributesOutput) SetStorageClass(v string) *GetObjectAttributesOutput { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectAttributesOutput) SetVersionId(v string) *GetObjectAttributesOutput { - s.VersionId = &v - return s -} - -// A collection of parts associated with a multipart upload. -type GetObjectAttributesParts struct { - _ struct{} `type:"structure"` - - // Indicates whether the returned list of parts is truncated. A value of true - // indicates that the list was truncated. A list can be truncated if the number - // of parts exceeds the limit returned in the MaxParts element. - IsTruncated *bool `type:"boolean"` - - // The maximum number of parts allowed in the response. - MaxParts *int64 `type:"integer"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the PartNumberMarker request parameter in - // a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` - - // The marker for the current part. - PartNumberMarker *int64 `type:"integer"` - - // A container for elements related to a particular part. A response can contain - // zero or more Parts elements. - Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` - - // The total number of parts. - TotalPartsCount *int64 `locationName:"PartsCount" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAttributesParts) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectAttributesParts) GoString() string { - return s.String() -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *GetObjectAttributesParts) SetIsTruncated(v bool) *GetObjectAttributesParts { - s.IsTruncated = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *GetObjectAttributesParts) SetMaxParts(v int64) *GetObjectAttributesParts { - s.MaxParts = &v - return s -} - -// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. -func (s *GetObjectAttributesParts) SetNextPartNumberMarker(v int64) *GetObjectAttributesParts { - s.NextPartNumberMarker = &v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *GetObjectAttributesParts) SetPartNumberMarker(v int64) *GetObjectAttributesParts { - s.PartNumberMarker = &v - return s -} - -// SetParts sets the Parts field's value. -func (s *GetObjectAttributesParts) SetParts(v []*ObjectPart) *GetObjectAttributesParts { - s.Parts = v - return s -} - -// SetTotalPartsCount sets the TotalPartsCount field's value. -func (s *GetObjectAttributesParts) SetTotalPartsCount(v int64) *GetObjectAttributesParts { - s.TotalPartsCount = &v - return s -} - -type GetObjectInput struct { - _ struct{} `locationName:"GetObjectRequest" type:"structure"` - - // The bucket name containing the object. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // To retrieve the checksum, this mode must be enabled. - // - // The AWS SDK for Go v1 does not support automatic response payload checksum - // validation. This feature is available in the AWS SDK for Go v2. - ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Return the object only if its entity tag (ETag) is the same as the one specified; - // otherwise, return a 412 (precondition failed) error. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` - - // Key of the object to get. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. - // Useful for downloading just a part of an object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). - // - // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` - - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` - - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` - - // Specifies the algorithm to use to when decrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 used to encrypt - // the data. This value is used to decrypt the object when recovering it and - // must match the one used when storing the data. The key must be appropriate - // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetObjectInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { - s.Bucket = &v - return s -} - -func (s *GetObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumMode sets the ChecksumMode field's value. -func (s *GetObjectInput) SetChecksumMode(v string) *GetObjectInput { - s.ChecksumMode = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetIfMatch sets the IfMatch field's value. -func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { - s.IfMatch = &v - return s -} - -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { - s.IfModifiedSince = &v - return s -} - -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { - s.IfNoneMatch = &v - return s -} - -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { - s.IfUnmodifiedSince = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectInput) SetKey(v string) *GetObjectInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { - s.PartNumber = &v - return s -} - -// SetRange sets the Range field's value. -func (s *GetObjectInput) SetRange(v string) *GetObjectInput { - s.Range = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { - s.RequestPayer = &v - return s -} - -// SetResponseCacheControl sets the ResponseCacheControl field's value. -func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { - s.ResponseCacheControl = &v - return s -} - -// SetResponseContentDisposition sets the ResponseContentDisposition field's value. -func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { - s.ResponseContentDisposition = &v - return s -} - -// SetResponseContentEncoding sets the ResponseContentEncoding field's value. -func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { - s.ResponseContentEncoding = &v - return s -} - -// SetResponseContentLanguage sets the ResponseContentLanguage field's value. -func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { - s.ResponseContentLanguage = &v - return s -} - -// SetResponseContentType sets the ResponseContentType field's value. -func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { - s.ResponseContentType = &v - return s -} - -// SetResponseExpires sets the ResponseExpires field's value. -func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { - s.ResponseExpires = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *GetObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { - s.VersionId = &v - return s -} - -func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectLegalHoldInput struct { - _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - - // The bucket name containing the object whose legal hold status you want to - // retrieve. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The key name for the object whose legal hold status you want to retrieve. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID of the object whose legal hold status you want to retrieve. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLegalHoldInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLegalHoldInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectLegalHoldInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { - s.Bucket = &v - return s -} - -func (s *GetObjectLegalHoldInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { - s.VersionId = &v - return s -} - -func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectLegalHoldOutput struct { - _ struct{} `type:"structure" payload:"LegalHold"` - - // The current legal hold status for the specified object. - LegalHold *ObjectLockLegalHold `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLegalHoldOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLegalHoldOutput) GoString() string { - return s.String() -} - -// SetLegalHold sets the LegalHold field's value. -func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { - s.LegalHold = v - return s -} - -type GetObjectLockConfigurationInput struct { - _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` - - // The bucket whose Object Lock configuration you want to retrieve. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLockConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLockConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectLockConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { - s.Bucket = &v - return s -} - -func (s *GetObjectLockConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectLockConfigurationOutput struct { - _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - - // The specified bucket's Object Lock configuration. - ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLockConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectLockConfigurationOutput) GoString() string { - return s.String() -} - -// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. -func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { - s.ObjectLockConfiguration = v - return s -} - -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - // Indicates that a range of bytes was specified. - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` - - // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An entity tag (ETag) is an opaque identifier assigned by a web server to - // a specific version of a resource found at a URL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL-encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Creation date of the object. - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` - - // A map of metadata to store with the object in S3. - // - // By default unmarshaled keys are written as a map keys in following canonicalized format: - // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. - // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode currently in place for this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when this object's Object Lock will expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // The count of parts this object has. This value is only returned if you specify - // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - - // Amazon S3 can return this if your request involves a bucket that is either - // a source or destination in a replication rule. - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration action and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetObjectOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The number of tags, if any, on the object. - TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectOutput) GoString() string { - return s.String() -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { - s.AcceptRanges = &v - return s -} - -// SetBody sets the Body field's value. -func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { - s.Body = v - return s -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { - s.CacheControl = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *GetObjectOutput) SetChecksumCRC32(v string) *GetObjectOutput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *GetObjectOutput) SetChecksumCRC32C(v string) *GetObjectOutput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *GetObjectOutput) SetChecksumSHA1(v string) *GetObjectOutput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *GetObjectOutput) SetChecksumSHA256(v string) *GetObjectOutput { - s.ChecksumSHA256 = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentRange sets the ContentRange field's value. -func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { - s.ContentRange = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { - s.MissingMeta = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { - s.StorageClass = &v - return s -} - -// SetTagCount sets the TagCount field's value. -func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { - s.TagCount = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { - s.VersionId = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { - s.WebsiteRedirectLocation = &v - return s -} - -type GetObjectRetentionInput struct { - _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` - - // The bucket name containing the object whose retention settings you want to - // retrieve. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The key name for the object whose retention settings you want to retrieve. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID for the object whose retention settings you want to retrieve. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectRetentionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectRetentionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectRetentionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { - s.Bucket = &v - return s -} - -func (s *GetObjectRetentionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { - s.VersionId = &v - return s -} - -func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectRetentionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectRetentionOutput struct { - _ struct{} `type:"structure" payload:"Retention"` - - // The container element for an object's retention settings. - Retention *ObjectLockRetention `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectRetentionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectRetentionOutput) GoString() string { - return s.String() -} - -// SetRetention sets the Retention field's value. -func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { - s.Retention = v - return s -} - -type GetObjectTaggingInput struct { - _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` - - // The bucket name containing the object for which to get the tagging information. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Object key for which to get the tagging information. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The versionId of the object for which to get the tagging information. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { - s.Bucket = &v - return s -} - -func (s *GetObjectTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { - s.VersionId = &v - return s -} - -func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // Contains the tag set. - // - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` - - // The versionId of the object for which you got the tagging information. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetTagSet sets the TagSet field's value. -func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { - s.TagSet = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { - s.VersionId = &v - return s -} - -type GetObjectTorrentInput struct { - _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` - - // The name of the bucket containing the object for which to get the torrent - // files. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The object key for which to get the information. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTorrentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTorrentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTorrentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { - s.Bucket = &v - return s -} - -func (s *GetObjectTorrentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { - s.RequestPayer = &v - return s -} - -func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetObjectTorrentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetObjectTorrentOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - // A Bencoded dictionary as defined by the BitTorrent specification - Body io.ReadCloser `type:"blob"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTorrentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetObjectTorrentOutput) GoString() string { - return s.String() -} - -// SetBody sets the Body field's value. -func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { - s.Body = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { - s.RequestCharged = &v - return s -} - -type GetPublicAccessBlockInput struct { - _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you - // want to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicAccessBlockInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicAccessBlockInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { - s.Bucket = &v - return s -} - -func (s *GetPublicAccessBlockInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type GetPublicAccessBlockOutput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` - - // The PublicAccessBlock configuration currently in effect for this Amazon S3 - // bucket. - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicAccessBlockOutput) GoString() string { - return s.String() -} - -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { - s.PublicAccessBlockConfiguration = v - return s -} - -// Container for S3 Glacier job parameters. -type GlacierJobParameters struct { - _ struct{} `type:"structure"` - - // Retrieval tier at which the restore will be processed. - // - // Tier is a required field - Tier *string `type:"string" required:"true" enum:"Tier"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlacierJobParameters) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlacierJobParameters) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlacierJobParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} - if s.Tier == nil { - invalidParams.Add(request.NewErrParamRequired("Tier")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTier sets the Tier field's value. -func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { - s.Tier = &v - return s -} - -// Container for grant information. -type Grant struct { - _ struct{} `type:"structure"` - - // The person being granted permissions. - Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Grant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Grant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantee sets the Grantee field's value. -func (s *Grant) SetGrantee(v *Grantee) *Grant { - s.Grantee = v - return s -} - -// SetPermission sets the Permission field's value. -func (s *Grant) SetPermission(v string) *Grant { - s.Permission = &v - return s -} - -// Container for the person being granted permissions. -type Grantee struct { - _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Screen name of the grantee. - DisplayName *string `type:"string"` - - // Email address of the grantee. - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // * US East (N. Virginia) - // - // * US West (N. California) - // - // * US West (Oregon) - // - // * Asia Pacific (Singapore) - // - // * Asia Pacific (Sydney) - // - // * Asia Pacific (Tokyo) - // - // * Europe (Ireland) - // - // * South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see Regions - // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. - EmailAddress *string `type:"string"` - - // The canonical user ID of the grantee. - ID *string `type:"string"` - - // Type of grantee - // - // Type is a required field - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` - - // URI of the grantee group. - URI *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Grantee) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Grantee) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grantee) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grantee"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Grantee) SetDisplayName(v string) *Grantee { - s.DisplayName = &v - return s -} - -// SetEmailAddress sets the EmailAddress field's value. -func (s *Grantee) SetEmailAddress(v string) *Grantee { - s.EmailAddress = &v - return s -} - -// SetID sets the ID field's value. -func (s *Grantee) SetID(v string) *Grantee { - s.ID = &v - return s -} - -// SetType sets the Type field's value. -func (s *Grantee) SetType(v string) *Grantee { - s.Type = &v - return s -} - -// SetURI sets the URI field's value. -func (s *Grantee) SetURI(v string) *Grantee { - s.URI = &v - return s -} - -type HeadBucketInput struct { - _ struct{} `locationName:"HeadBucketRequest" type:"structure"` - - // The bucket name. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { - s.Bucket = &v - return s -} - -func (s *HeadBucketInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *HeadBucketInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type HeadBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadBucketOutput) GoString() string { - return s.String() -} - -type HeadObjectInput struct { - _ struct{} `locationName:"HeadObjectRequest" type:"structure"` - - // The name of the bucket containing the object. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // To retrieve the checksum, this parameter must be enabled. - // - // In addition, if you enable ChecksumMode and the object is encrypted with - // Amazon Web Services Key Management Service (Amazon Web Services KMS), you - // must have permission to use the kms:Decrypt action for the request to succeed. - ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Return the object only if its entity tag (ETag) is the same as the one specified; - // otherwise, return a 412 (precondition failed) error. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` - - // The object key. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of the object being read. This is a positive integer between - // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. - // Useful querying about the size of the part and the number of parts in this - // object. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - - // Because HeadObject returns only the metadata for an object, this parameter - // has no effect. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by HeadObjectInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { - s.Bucket = &v - return s -} - -func (s *HeadObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumMode sets the ChecksumMode field's value. -func (s *HeadObjectInput) SetChecksumMode(v string) *HeadObjectInput { - s.ChecksumMode = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetIfMatch sets the IfMatch field's value. -func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { - s.IfMatch = &v - return s -} - -// SetIfModifiedSince sets the IfModifiedSince field's value. -func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { - s.IfModifiedSince = &v - return s -} - -// SetIfNoneMatch sets the IfNoneMatch field's value. -func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { - s.IfNoneMatch = &v - return s -} - -// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. -func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { - s.IfUnmodifiedSince = &v - return s -} - -// SetKey sets the Key field's value. -func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { - s.PartNumber = &v - return s -} - -// SetRange sets the Range field's value. -func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { - s.Range = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *HeadObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { - s.VersionId = &v - return s -} - -func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *HeadObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type HeadObjectOutput struct { - _ struct{} `type:"structure"` - - // Indicates that a range of bytes was specified. - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // The archive state of the head object. - ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` - - // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An entity tag (ETag) is an opaque identifier assigned by a web server to - // a specific version of a resource found at a URL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL-encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Creation date of the object. - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` - - // A map of metadata to store with the object in S3. - // - // By default unmarshaled keys are written as a map keys in following canonicalized format: - // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. - // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - // Specifies whether a legal hold is in effect for this object. This header - // is only returned if the requester has the s3:GetObjectLegalHold permission. - // This header is not returned if the specified version of this object has never - // had a legal hold applied. For more information about S3 Object Lock, see - // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode, if any, that's in effect for this object. This header - // is only returned if the requester has the s3:GetObjectRetention permission. - // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when the Object Lock retention period expires. This header - // is only returned if the requester has the s3:GetObjectRetention permission. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // The count of parts this object has. This value is only returned if you specify - // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` - - // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. - // - // In replication, you have a source bucket on which you configure replication - // and destination bucket or buckets where Amazon S3 stores object replicas. - // When you request an object (GetObject) or object metadata (HeadObject) from - // these buckets, Amazon S3 will return the x-amz-replication-status header - // in the response as follows: - // - // * If requesting an object from the source bucket, Amazon S3 will return - // the x-amz-replication-status header if the object in your request is eligible - // for replication. For example, suppose that in your replication configuration, - // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects - // with key prefix TaxDocs. Any objects you upload with this key name prefix, - // for example TaxDocs/document1.pdf, are eligible for replication. For any - // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status - // header with value PENDING, COMPLETED or FAILED indicating object replication - // status. - // - // * If requesting an object from a destination bucket, Amazon S3 will return - // the x-amz-replication-status header with value REPLICA if the object in - // your request is a replica that Amazon S3 created and there is no replica - // modification replication in progress. - // - // * When replicating objects to multiple destination buckets, the x-amz-replication-status - // header acts differently. The header of the source object will only return - // a value of COMPLETED when replication is successful to all destinations. - // The header will remain at value PENDING until replication has completed - // for all destinations. If one or more destinations fails replication the - // header will return FAILED. - // - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If the object is an archived object (an object whose storage class is GLACIER), - // the response includes this header if either the archive restoration is in - // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // or an archive copy is already restored. - // - // If an archive copy is already restored, the header value indicates when Amazon - // S3 is scheduled to delete the object copy. For example: - // - // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 - // GMT" - // - // If the object restoration is in progress, the header returns the value ongoing-request="true". - // - // For more information about archiving objects, see Transitioning Objects: - // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by HeadObjectOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // If the object is stored using server-side encryption either with an Amazon - // Web Services KMS key or an Amazon S3-managed encryption key, the response - // includes this header with the value of the server-side encryption algorithm - // used when storing this object in Amazon S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. - // - // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s HeadObjectOutput) GoString() string { - return s.String() -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { - s.AcceptRanges = &v - return s -} - -// SetArchiveStatus sets the ArchiveStatus field's value. -func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { - s.ArchiveStatus = &v - return s -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { - s.CacheControl = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *HeadObjectOutput) SetChecksumCRC32(v string) *HeadObjectOutput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *HeadObjectOutput) SetChecksumCRC32C(v string) *HeadObjectOutput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *HeadObjectOutput) SetChecksumSHA1(v string) *HeadObjectOutput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *HeadObjectOutput) SetChecksumSHA256(v string) *HeadObjectOutput { - s.ChecksumSHA256 = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { - s.ContentLength = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { - s.MissingMeta = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { - s.VersionId = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { - s.WebsiteRedirectLocation = &v - return s -} - -// Container for the Suffix element. -type IndexDocument struct { - _ struct{} `type:"structure"` - - // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request - // to samplebucket/images/ the data that is returned will be for the object - // with the key name images/index.html) The suffix must not be empty and must - // not include a slash character. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - // - // Suffix is a required field - Suffix *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IndexDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IndexDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IndexDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} - if s.Suffix == nil { - invalidParams.Add(request.NewErrParamRequired("Suffix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSuffix sets the Suffix field's value. -func (s *IndexDocument) SetSuffix(v string) *IndexDocument { - s.Suffix = &v - return s -} - -// Container element that identifies who initiated the multipart upload. -type Initiator struct { - _ struct{} `type:"structure"` - - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an Amazon Web Services account, it provides the Canonical - // User ID. If the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Initiator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Initiator) GoString() string { - return s.String() -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Initiator) SetDisplayName(v string) *Initiator { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Initiator) SetID(v string) *Initiator { - s.ID = &v - return s -} - -// Describes the serialization format of the object. -type InputSerialization struct { - _ struct{} `type:"structure"` - - // Describes the serialization of a CSV-encoded object. - CSV *CSVInput `type:"structure"` - - // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default - // Value: NONE. - CompressionType *string `type:"string" enum:"CompressionType"` - - // Specifies JSON as object's input serialization format. - JSON *JSONInput `type:"structure"` - - // Specifies Parquet as object's input serialization format. - Parquet *ParquetInput `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InputSerialization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InputSerialization) GoString() string { - return s.String() -} - -// SetCSV sets the CSV field's value. -func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { - s.CSV = v - return s -} - -// SetCompressionType sets the CompressionType field's value. -func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { - s.CompressionType = &v - return s -} - -// SetJSON sets the JSON field's value. -func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { - s.JSON = v - return s -} - -// SetParquet sets the Parquet field's value. -func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { - s.Parquet = v - return s -} - -// A container for specifying S3 Intelligent-Tiering filters. The filters determine -// the subset of objects to which the rule applies. -type IntelligentTieringAndOperator struct { - _ struct{} `type:"structure"` - - // An object key name prefix that identifies the subset of objects to which - // the configuration applies. - Prefix *string `type:"string"` - - // All of these tags must exist in the object's tag set in order for the configuration - // to apply. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IntelligentTieringAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IntelligentTieringAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IntelligentTieringAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { - s.Tags = v - return s -} - -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. -// -// For information about the S3 Intelligent-Tiering storage class, see Storage -// class for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -type IntelligentTieringConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies a bucket filter. The configuration only includes objects that meet - // the filter's criteria. - Filter *IntelligentTieringFilter `type:"structure"` - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Specifies the status of the configuration. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` - - // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. - // - // Tierings is a required field - Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IntelligentTieringConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IntelligentTieringConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IntelligentTieringConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Tierings == nil { - invalidParams.Add(request.NewErrParamRequired("Tierings")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.Tierings != nil { - for i, v := range s.Tierings { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { - s.Id = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { - s.Status = &v - return s -} - -// SetTierings sets the Tierings field's value. -func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { - s.Tierings = v - return s -} - -// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration -// applies to. -type IntelligentTieringFilter struct { - _ struct{} `type:"structure"` - - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *IntelligentTieringAndOperator `type:"structure"` - - // An object key name prefix that identifies the subset of objects to which - // the rule applies. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - Prefix *string `type:"string"` - - // A container of a key value name pair. - Tag *Tag `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IntelligentTieringFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IntelligentTieringFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IntelligentTieringFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { - s.Tag = v - return s -} - -// Specifies the inventory configuration for an Amazon S3 bucket. For more information, -// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon S3 API Reference. -type InventoryConfiguration struct { - _ struct{} `type:"structure"` - - // Contains information about where to publish the inventory results. - // - // Destination is a required field - Destination *InventoryDestination `type:"structure" required:"true"` - - // Specifies an inventory filter. The inventory only includes objects that meet - // the filter's criteria. - Filter *InventoryFilter `type:"structure"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // Object versions to include in the inventory list. If set to All, the list - // includes all the object versions, which adds the version-related fields VersionId, - // IsLatest, and DeleteMarker to the list. If set to Current, the list does - // not contain these version-related fields. - // - // IncludedObjectVersions is a required field - IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - - // Specifies whether the inventory is enabled or disabled. If set to True, an - // inventory list is generated. If set to False, no inventory list is generated. - // - // IsEnabled is a required field - IsEnabled *bool `type:"boolean" required:"true"` - - // Contains the optional fields that are included in the inventory results. - OptionalFields []*string `locationNameList:"Field" type:"list" enum:"InventoryOptionalField"` - - // Specifies the schedule for generating inventory results. - // - // Schedule is a required field - Schedule *InventorySchedule `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.IncludedObjectVersions == nil { - invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) - } - if s.IsEnabled == nil { - invalidParams.Add(request.NewErrParamRequired("IsEnabled")) - } - if s.Schedule == nil { - invalidParams.Add(request.NewErrParamRequired("Schedule")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.Schedule != nil { - if err := s.Schedule.Validate(); err != nil { - invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { - s.Destination = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { - s.Id = &v - return s -} - -// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. -func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { - s.IncludedObjectVersions = &v - return s -} - -// SetIsEnabled sets the IsEnabled field's value. -func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { - s.IsEnabled = &v - return s -} - -// SetOptionalFields sets the OptionalFields field's value. -func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { - s.OptionalFields = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { - s.Schedule = v - return s -} - -// Specifies the inventory configuration for an Amazon S3 bucket. -type InventoryDestination struct { - _ struct{} `type:"structure"` - - // Contains the bucket name, file format, bucket owner (optional), and prefix - // (optional) where inventory results are published. - // - // S3BucketDestination is a required field - S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} - if s.S3BucketDestination == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) - } - if s.S3BucketDestination != nil { - if err := s.S3BucketDestination.Validate(); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3BucketDestination sets the S3BucketDestination field's value. -func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { - s.S3BucketDestination = v - return s -} - -// Contains the type of server-side encryption used to encrypt the inventory -// results. -type InventoryEncryption struct { - _ struct{} `type:"structure"` - - // Specifies the use of SSE-KMS to encrypt delivered inventory reports. - SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` - - // Specifies the use of SSE-S3 to encrypt delivered inventory reports. - SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryEncryption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryEncryption) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryEncryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} - if s.SSEKMS != nil { - if err := s.SSEKMS.Validate(); err != nil { - invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSSEKMS sets the SSEKMS field's value. -func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { - s.SSEKMS = v - return s -} - -// SetSSES3 sets the SSES3 field's value. -func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { - s.SSES3 = v - return s -} - -// Specifies an inventory filter. The inventory only includes objects that meet -// the filter's criteria. -type InventoryFilter struct { - _ struct{} `type:"structure"` - - // The prefix that an object must have to be included in the inventory results. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { - s.Prefix = &v - return s -} - -// Contains the bucket name, file format, bucket owner (optional), and prefix -// (optional) where inventory results are published. -type InventoryS3BucketDestination struct { - _ struct{} `type:"structure"` - - // The account ID that owns the destination S3 bucket. If no account ID is provided, - // the owner is not validated before exporting data. - // - // Although this value is optional, we strongly recommend that you set it to - // help prevent problems if the destination bucket ownership changes. - AccountId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the bucket where inventory results will - // be published. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // Contains the type of server-side encryption used to encrypt the inventory - // results. - Encryption *InventoryEncryption `type:"structure"` - - // Specifies the output format of the inventory results. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"InventoryFormat"` - - // The prefix that is prepended to all inventory results. - Prefix *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryS3BucketDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventoryS3BucketDestination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventoryS3BucketDestination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { - s.AccountId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { - s.Bucket = &v - return s -} - -func (s *InventoryS3BucketDestination) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetEncryption sets the Encryption field's value. -func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { - s.Encryption = v - return s -} - -// SetFormat sets the Format field's value. -func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { - s.Format = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { - s.Prefix = &v - return s -} - -// Specifies the schedule for generating inventory results. -type InventorySchedule struct { - _ struct{} `type:"structure"` - - // Specifies how frequently inventory results are produced. - // - // Frequency is a required field - Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventorySchedule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InventorySchedule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InventorySchedule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} - if s.Frequency == nil { - invalidParams.Add(request.NewErrParamRequired("Frequency")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFrequency sets the Frequency field's value. -func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { - s.Frequency = &v - return s -} - -// Specifies JSON as object's input serialization format. -type JSONInput struct { - _ struct{} `type:"structure"` - - // The type of JSON. Valid values: Document, Lines. - Type *string `type:"string" enum:"JSONType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s JSONInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s JSONInput) GoString() string { - return s.String() -} - -// SetType sets the Type field's value. -func (s *JSONInput) SetType(v string) *JSONInput { - s.Type = &v - return s -} - -// Specifies JSON as request's output serialization format. -type JSONOutput struct { - _ struct{} `type:"structure"` - - // The value used to separate individual records in the output. If no value - // is specified, Amazon S3 uses a newline character ('\n'). - RecordDelimiter *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s JSONOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s JSONOutput) GoString() string { - return s.String() -} - -// SetRecordDelimiter sets the RecordDelimiter field's value. -func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { - s.RecordDelimiter = &v - return s -} - -// A container for object key name prefix and suffix filtering rules. -type KeyFilter struct { - _ struct{} `type:"structure"` - - // A list of containers for the key-value pair that defines the criteria for - // the filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyFilter) GoString() string { - return s.String() -} - -// SetFilterRules sets the FilterRules field's value. -func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { - s.FilterRules = v - return s -} - -// A container for specifying the configuration for Lambda notifications. -type LambdaFunctionConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. - // - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` - - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes - // when the specified event type occurs. - // - // LambdaFunctionArn is a required field - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LambdaFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LambdaFunctionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { - s.Id = &v - return s -} - -// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. -func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { - s.LambdaFunctionArn = &v - return s -} - -// Container for lifecycle rules. You can add as many as 1000 rules. -type LifecycleConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies lifecycle configuration rules for an Amazon S3 bucket. - // - // Rules is a required field - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { - s.Rules = v - return s -} - -// Container for the expiration for the lifecycle of the object. -type LifecycleExpiration struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false - // the policy takes no action. This cannot be specified with Days or Date in - // a Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleExpiration) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { - s.Date = &v - return s -} - -// SetDays sets the Days field's value. -func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { - s.Days = &v - return s -} - -// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. -func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { - s.ExpiredObjectDeleteMarker = &v - return s -} - -// A lifecycle rule for individual objects in an Amazon S3 bucket. -type LifecycleRule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an incomplete multipart upload - // that Amazon S3 will wait before permanently removing all parts of the upload. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - // Specifies the expiration for the lifecycle of the object in the form of date, - // days and, whether the object has a delete marker. - Expiration *LifecycleExpiration `type:"structure"` - - // The Filter is used to identify objects that a Lifecycle Rule applies to. - // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is - // required if the LifecycleRule does not contain a Prefix element. - Filter *LifecycleRuleFilter `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Specifies the transition rule for the lifecycle rule that describes when - // noncurrent objects transition to a specific storage class. If your bucket - // is versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to a specific - // storage class at a set period in the object's lifetime. - NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` - - // Prefix identifying one or more objects to which the rule applies. This is - // no longer used; use Filter instead. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - // - // Deprecated: Prefix has been deprecated - Prefix *string `deprecated:"true" type:"string"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - // Specifies when an Amazon S3 object transitions to a specified storage class. - Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { - s.AbortIncompleteMultipartUpload = v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { - s.Expiration = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { - s.Filter = v - return s -} - -// SetID sets the ID field's value. -func (s *LifecycleRule) SetID(v string) *LifecycleRule { - s.ID = &v - return s -} - -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { - s.NoncurrentVersionExpiration = v - return s -} - -// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. -func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { - s.NoncurrentVersionTransitions = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { - s.Status = &v - return s -} - -// SetTransitions sets the Transitions field's value. -func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { - s.Transitions = v - return s -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or -// more predicates. The Lifecycle Rule will apply to any object matching all -// of the predicates configured inside the And operator. -type LifecycleRuleAndOperator struct { - _ struct{} `type:"structure"` - - // Minimum object size to which the rule applies. - ObjectSizeGreaterThan *int64 `type:"long"` - - // Maximum object size to which the rule applies. - ObjectSizeLessThan *int64 `type:"long"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string"` - - // All of these tags must exist in the object's tag set in order for the rule - // to apply. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleRuleAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleRuleAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. -func (s *LifecycleRuleAndOperator) SetObjectSizeGreaterThan(v int64) *LifecycleRuleAndOperator { - s.ObjectSizeGreaterThan = &v - return s -} - -// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. -func (s *LifecycleRuleAndOperator) SetObjectSizeLessThan(v int64) *LifecycleRuleAndOperator { - s.ObjectSizeLessThan = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { - s.Tags = v - return s -} - -// The Filter is used to identify objects that a Lifecycle Rule applies to. -// A Filter must have exactly one of Prefix, Tag, or And specified. -type LifecycleRuleFilter struct { - _ struct{} `type:"structure"` - - // This is used in a Lifecycle Rule Filter to apply a logical AND to two or - // more predicates. The Lifecycle Rule will apply to any object matching all - // of the predicates configured inside the And operator. - And *LifecycleRuleAndOperator `type:"structure"` - - // Minimum object size to which the rule applies. - ObjectSizeGreaterThan *int64 `type:"long"` - - // Maximum object size to which the rule applies. - ObjectSizeLessThan *int64 `type:"long"` - - // Prefix identifying one or more objects to which the rule applies. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - Prefix *string `type:"string"` - - // This tag must exist in the object's tag set in order for the rule to apply. - Tag *Tag `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleRuleFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LifecycleRuleFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRuleFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { - s.And = v - return s -} - -// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. -func (s *LifecycleRuleFilter) SetObjectSizeGreaterThan(v int64) *LifecycleRuleFilter { - s.ObjectSizeGreaterThan = &v - return s -} - -// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. -func (s *LifecycleRuleFilter) SetObjectSizeLessThan(v int64) *LifecycleRuleFilter { - s.ObjectSizeLessThan = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { - s.Tag = v - return s -} - -type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` - - // The name of the bucket from which analytics configurations are retrieved. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketAnalyticsConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketAnalyticsConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { - s.ContinuationToken = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListBucketAnalyticsConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The list of analytics configurations for a bucket. - AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - - // The marker that is used as a starting point for this analytics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string `type:"string"` - - // Indicates whether the returned list of analytics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken. The token is obfuscated and is not a usable value. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketAnalyticsConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { - return s.String() -} - -// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { - s.AnalyticsConfigurationList = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketIntelligentTieringConfigurationsInput struct { - _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketIntelligentTieringConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { - s.ContinuationToken = &v - return s -} - -func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListBucketIntelligentTieringConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string `type:"string"` - - // The list of S3 Intelligent-Tiering configurations for a bucket. - IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` - - // Indicates whether the returned list of analytics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. -func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { - s.IntelligentTieringConfigurationList = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketInventoryConfigurationsInput struct { - _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` - - // The name of the bucket containing the inventory configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker used to continue an inventory configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketInventoryConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketInventoryConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketInventoryConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { - s.ContinuationToken = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListBucketInventoryConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // If sent in the request, the marker that is used as a starting point for this - // inventory configuration list response. - ContinuationToken *string `type:"string"` - - // The list of inventory configurations for a bucket. - InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - - // Tells whether the returned list of inventory configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // is provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketInventoryConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketInventoryConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { - s.InventoryConfigurationList = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketMetricsConfigurationsInput struct { - _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` - - // The name of the bucket containing the metrics configurations to retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The marker that is used to continue a metrics configuration listing that - // has been truncated. Use the NextContinuationToken from a previously truncated - // list response to continue the listing. The continuation token is an opaque - // value that Amazon S3 understands. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketMetricsConfigurationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketMetricsConfigurationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBucketMetricsConfigurationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { - s.Bucket = &v - return s -} - -func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { - s.ContinuationToken = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListBucketMetricsConfigurationsOutput struct { - _ struct{} `type:"structure"` - - // The marker that is used as a starting point for this metrics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string `type:"string"` - - // Indicates whether the returned list of metrics configurations is complete. - // A value of true indicates that the list is not complete and the NextContinuationToken - // will be provided for a subsequent request. - IsTruncated *bool `type:"boolean"` - - // The list of metrics configurations for a bucket. - MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` - - // The marker used to continue a metrics configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value - // that Amazon S3 understands. - NextContinuationToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketMetricsConfigurationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketMetricsConfigurationsOutput) GoString() string { - return s.String() -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.ContinuationToken = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { - s.IsTruncated = &v - return s -} - -// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { - s.MetricsConfigurationList = v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { - s.NextContinuationToken = &v - return s -} - -type ListBucketsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketsInput) GoString() string { - return s.String() -} - -type ListBucketsOutput struct { - _ struct{} `type:"structure"` - - // The list of buckets owned by the requester. - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` - - // The owner of the buckets listed. - Owner *Owner `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBucketsOutput) GoString() string { - return s.String() -} - -// SetBuckets sets the Buckets field's value. -func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { - s.Buckets = v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { - s.Owner = v - return s -} - -type ListMultipartUploadsInput struct { - _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` - - // The name of the bucket to which the multipart upload was initiated. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - // - // All keys that contain the same string between the prefix, if specified, and - // the first occurrence of the delimiter after the prefix are grouped under - // a single result element, CommonPrefixes. If you don't specify the prefix - // parameter, then the substring starts at the beginning of the key. The keys - // that are grouped under CommonPrefixes result element are not returned elsewhere - // in the response. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - // - // If upload-id-marker is not specified, only the keys lexicographically greater - // than the specified key-marker will be included in the list. - // - // If upload-id-marker is specified, any multipart uploads for a key equal to - // the key-marker might also be included, provided those multipart uploads have - // upload IDs lexicographically greater than the specified upload-id-marker. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different grouping - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker - // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker. - UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListMultipartUploadsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListMultipartUploadsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListMultipartUploadsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { - s.Bucket = &v - return s -} - -func (s *ListMultipartUploadsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { - s.EncodingType = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { - s.KeyMarker = &v - return s -} - -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { - s.MaxUploads = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { - s.Prefix = &v - return s -} - -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { - s.UploadIdMarker = &v - return s -} - -func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListMultipartUploadsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListMultipartUploadsOutput struct { - _ struct{} `type:"structure"` - - // The name of the bucket to which the multipart upload was initiated. Does - // not return the access point ARN or access point alias if used. - Bucket *string `type:"string"` - - // If you specify a delimiter in the request, then the result returns each distinct - // key prefix containing the delimiter in a CommonPrefixes element. The distinct - // key prefixes are returned in the Prefix child element. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Contains the delimiter you specified in the request. If you don't specify - // a delimiter in your request, this element is absent from the response. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: - // - // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` - - // The key at or after which the listing began. - KeyMarker *string `type:"string"` - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` - - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIdMarker *string `type:"string"` - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` - - // Upload ID after which listing began. - UploadIdMarker *string `type:"string"` - - // Container for elements related to a particular multipart upload. A response - // can contain zero or more Upload elements. - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListMultipartUploadsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListMultipartUploadsOutput) GoString() string { - return s.String() -} - -// SetBucket sets the Bucket field's value. -func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { - s.Bucket = &v - return s -} - -func (s *ListMultipartUploadsOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { - s.CommonPrefixes = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { - s.IsTruncated = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { - s.KeyMarker = &v - return s -} - -// SetMaxUploads sets the MaxUploads field's value. -func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { - s.MaxUploads = &v - return s -} - -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { - s.NextKeyMarker = &v - return s -} - -// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.NextUploadIdMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { - s.Prefix = &v - return s -} - -// SetUploadIdMarker sets the UploadIdMarker field's value. -func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { - s.UploadIdMarker = &v - return s -} - -// SetUploads sets the Uploads field's value. -func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { - s.Uploads = v - return s -} - -type ListObjectVersionsInput struct { - _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` - - // The bucket name that contains the objects. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character that you specify to group keys. All keys that - // contain the same string between the prefix and the first occurrence of the - // delimiter are grouped under a single result element in CommonPrefixes. These - // groups are counted as one result against the max-keys limitation. These keys - // are not returned elsewhere in the response. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of keys returned in the response. By default the - // action returns up to 1,000 key names. The response might contain fewer keys - // but will never contain more. If additional keys satisfy the search criteria, - // but were not returned because max-keys was exceeded, the response contains - // true. To return the additional keys, see key-marker - // and version-id-marker. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Use this parameter to select only those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different groupings - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) You can use prefix with delimiter to roll - // up numerous objects into a single result under CommonPrefixes. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Specifies the object version you want to start listing from. - VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { - s.Bucket = &v - return s -} - -func (s *ListObjectVersionsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { - s.EncodingType = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { - s.KeyMarker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { - s.Prefix = &v - return s -} - -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { - s.VersionIdMarker = &v - return s -} - -func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListObjectVersionsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListObjectVersionsOutput struct { - _ struct{} `type:"structure"` - - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Container for an object that is a delete marker. - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - // The delimiter grouping the included keys. A delimiter is a character that - // you specify to group keys. All keys that contain the same string between - // the prefix and the first occurrence of the delimiter are grouped under a - // single result element in CommonPrefixes. These groups are counted as one - // result against the max-keys limitation. These keys are not returned elsewhere - // in the response. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: - // - // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. If your results were truncated, you can make - // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` - - // Marks the last key returned in a truncated response. - KeyMarker *string `type:"string"` - - // Specifies the maximum number of objects to return. - MaxKeys *int64 `type:"integer"` - - // The bucket name. - Name *string `type:"string"` - - // When the number of responses exceeds the value of MaxKeys, NextKeyMarker - // specifies the first key not returned that satisfies the search criteria. - // Use this value for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker - // specifies the first object version not returned that satisfies the search - // criteria. Use this value for the version-id-marker request parameter in a - // subsequent request. - NextVersionIdMarker *string `type:"string"` - - // Selects objects that start with the value supplied by this parameter. - Prefix *string `type:"string"` - - // Marks the last version of the key returned in a truncated response. - VersionIdMarker *string `type:"string"` - - // Container for version information. - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectVersionsOutput) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { - s.CommonPrefixes = v - return s -} - -// SetDeleteMarkers sets the DeleteMarkers field's value. -func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { - s.DeleteMarkers = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { - s.IsTruncated = &v - return s -} - -// SetKeyMarker sets the KeyMarker field's value. -func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { - s.KeyMarker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { - s.Name = &v - return s -} - -// SetNextKeyMarker sets the NextKeyMarker field's value. -func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { - s.NextKeyMarker = &v - return s -} - -// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { - s.NextVersionIdMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { - s.Prefix = &v - return s -} - -// SetVersionIdMarker sets the VersionIdMarker field's value. -func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { - s.VersionIdMarker = &v - return s -} - -// SetVersions sets the Versions field's value. -func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { - s.Versions = v - return s -} - -type ListObjectsInput struct { - _ struct{} `locationName:"ListObjectsRequest" type:"structure"` - - // The name of the bucket containing the objects. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. Marker can be any key in the bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` - - // Sets the maximum number of keys returned in the response. By default the - // action returns up to 1,000 key names. The response might contain fewer keys - // but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // list objects request. Bucket owners need not specify this parameter in their - // requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { - s.Bucket = &v - return s -} - -func (s *ListObjectsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { - s.EncodingType = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { - s.Marker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { - s.Prefix = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { - s.RequestPayer = &v - return s -} - -func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListObjectsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListObjectsOutput struct { - _ struct{} `type:"structure"` - - // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by the delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory specified - // by Prefix. - // - // For example, if the prefix is notes/ and the delimiter is a slash (/) as - // in notes/summer/july, the common prefix is notes/summer/. All of the keys - // that roll up into a common prefix count as a single return when calculating - // the number of returns. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element - // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere - // in the response. Each rolled-up result counts as only one return against - // the MaxKeys value. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - // Indicates where in the bucket listing begins. Marker is included in the response - // if it was sent with the request. - Marker *string `type:"string"` - - // The maximum number of keys returned in the response body. - MaxKeys *int64 `type:"integer"` - - // The bucket name. - Name *string `type:"string"` - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMarker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` - - // Keys that begin with the indicated prefix. - Prefix *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsOutput) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { - s.CommonPrefixes = v - return s -} - -// SetContents sets the Contents field's value. -func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { - s.Contents = v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { - s.IsTruncated = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { - s.Marker = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { - s.Name = &v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { - s.NextMarker = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { - s.Prefix = &v - return s -} - -type ListObjectsV2Input struct { - _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - - // Bucket name to list. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true. - FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - - // Sets the maximum number of keys returned in the response. By default the - // action returns up to 1,000 key names. The response might contain fewer keys - // but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // list objects request in V2 style. Bucket owners need not specify this parameter - // in their requests. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. - StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsV2Input) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsV2Input) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsV2Input) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { - s.Bucket = &v - return s -} - -func (s *ListObjectsV2Input) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { - s.ContinuationToken = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { - s.EncodingType = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { - s.ExpectedBucketOwner = &v - return s -} - -// SetFetchOwner sets the FetchOwner field's value. -func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { - s.FetchOwner = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { - s.MaxKeys = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { - s.Prefix = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { - s.RequestPayer = &v - return s -} - -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { - s.StartAfter = &v - return s -} - -func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListObjectsV2Input) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListObjectsV2Output struct { - _ struct{} `type:"structure"` - - // All of the keys (up to 1,000) rolled up into a common prefix count as a single - // return when calculating the number of returns. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by a delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory specified - // by Prefix. - // - // For example, if the prefix is notes/ and the delimiter is a slash (/) as - // in notes/summer/july, the common prefix is notes/summer/. All of the keys - // that roll up into a common prefix count as a single return when calculating - // the number of returns. - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // If ContinuationToken was sent with the request, it is included in the response. - ContinuationToken *string `type:"string"` - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element - // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere - // in the response. Each rolled-up result counts as only one return against - // the MaxKeys value. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // Delimiter, Prefix, Key, and StartAfter. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Set to false if all of the results were returned. Set to true if more keys - // are available to return. If the number of results exceeds that specified - // by MaxKeys, all of the results might not be returned. - IsTruncated *bool `type:"boolean"` - - // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than or equals to MaxKeys field. Say you ask for 50 keys, - // your result will include less than equals 50 keys - KeyCount *int64 `type:"integer"` - - // Sets the maximum number of keys returned in the response. By default the - // action returns up to 1,000 key names. The response might contain fewer keys - // but will never contain more. - MaxKeys *int64 `type:"integer"` - - // The bucket name. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - Name *string `type:"string"` - - // NextContinuationToken is sent when isTruncated is true, which means there - // are more keys in the bucket that can be listed. The next list requests to - // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken - // is obfuscated and is not a real key - NextContinuationToken *string `type:"string"` - - // Keys that begin with the indicated prefix. - Prefix *string `type:"string"` - - // If StartAfter was sent with the request, it is included in the response. - StartAfter *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsV2Output) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListObjectsV2Output) GoString() string { - return s.String() -} - -// SetCommonPrefixes sets the CommonPrefixes field's value. -func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { - s.CommonPrefixes = v - return s -} - -// SetContents sets the Contents field's value. -func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { - s.Contents = v - return s -} - -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { - s.ContinuationToken = &v - return s -} - -// SetDelimiter sets the Delimiter field's value. -func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { - s.Delimiter = &v - return s -} - -// SetEncodingType sets the EncodingType field's value. -func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { - s.EncodingType = &v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { - s.IsTruncated = &v - return s -} - -// SetKeyCount sets the KeyCount field's value. -func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { - s.KeyCount = &v - return s -} - -// SetMaxKeys sets the MaxKeys field's value. -func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { - s.MaxKeys = &v - return s -} - -// SetName sets the Name field's value. -func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { - s.Name = &v - return s -} - -// SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { - s.NextContinuationToken = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { - s.Prefix = &v - return s -} - -// SetStartAfter sets the StartAfter field's value. -func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { - s.StartAfter = &v - return s -} - -type ListPartsInput struct { - _ struct{} `locationName:"ListPartsRequest" type:"structure"` - - // The name of the bucket to which the parts are being uploaded. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // The server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by ListPartsInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // The MD5 server-side encryption (SSE) customer managed key. This parameter - // is needed only when the object was created using a checksum algorithm. For - // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose parts are being listed. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPartsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPartsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListPartsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { - s.Bucket = &v - return s -} - -func (s *ListPartsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ListPartsInput) SetKey(v string) *ListPartsInput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { - s.MaxParts = &v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { - s.PartNumberMarker = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *ListPartsInput) SetSSECustomerAlgorithm(v string) *ListPartsInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *ListPartsInput) SetSSECustomerKey(v string) *ListPartsInput { - s.SSECustomerKey = &v - return s -} - -func (s *ListPartsInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *ListPartsInput) SetSSECustomerKeyMD5(v string) *ListPartsInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { - s.UploadId = &v - return s -} - -func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *ListPartsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type ListPartsOutput struct { - _ struct{} `type:"structure"` - - // If the bucket has a lifecycle rule configured with an action to abort incomplete - // multipart uploads and the prefix in the lifecycle rule matches the object - // name in the request, then the response includes this header indicating when - // the initiated multipart upload will become eligible for abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - // - // The response will also include the x-amz-abort-rule-id header that will provide - // the ID of the lifecycle configuration rule that defines this action. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - - // This header is returned along with the x-amz-abort-date header. It identifies - // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // The name of the bucket to which the multipart upload was initiated. Does - // not return the access point ARN or access point alias if used. - Bucket *string `type:"string"` - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` - - // Container element that identifies who initiated the multipart upload. If - // the initiator is an Amazon Web Services account, this element provides the - // same information as the Owner element. If the initiator is an IAM User, this - // element provides the user ARN and display name. - Initiator *Initiator `type:"structure"` - - // Indicates whether the returned list of parts is truncated. A true value indicates - // that the list was truncated. A list can be truncated if the number of parts - // exceeds the limit returned in the MaxParts element. - IsTruncated *bool `type:"boolean"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` - - // Container element that identifies the object owner, after the object is created. - // If multipart upload is initiated by an IAM user, this element provides the - // parent account ID and display name. - Owner *Owner `type:"structure"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - PartNumberMarker *int64 `type:"integer"` - - // Container for elements related to a particular part. A response can contain - // zero or more Part elements. - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded - // object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPartsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPartsOutput) GoString() string { - return s.String() -} - -// SetAbortDate sets the AbortDate field's value. -func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { - s.AbortDate = &v - return s -} - -// SetAbortRuleId sets the AbortRuleId field's value. -func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { - s.AbortRuleId = &v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { - s.Bucket = &v - return s -} - -func (s *ListPartsOutput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *ListPartsOutput) SetChecksumAlgorithm(v string) *ListPartsOutput { - s.ChecksumAlgorithm = &v - return s -} - -// SetInitiator sets the Initiator field's value. -func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { - s.Initiator = v - return s -} - -// SetIsTruncated sets the IsTruncated field's value. -func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { - s.IsTruncated = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { - s.Key = &v - return s -} - -// SetMaxParts sets the MaxParts field's value. -func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { - s.MaxParts = &v - return s -} - -// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. -func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { - s.NextPartNumberMarker = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { - s.Owner = v - return s -} - -// SetPartNumberMarker sets the PartNumberMarker field's value. -func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { - s.PartNumberMarker = &v - return s -} - -// SetParts sets the Parts field's value. -func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { - s.Parts = v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { - s.RequestCharged = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { - s.StorageClass = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { - s.UploadId = &v - return s -} - -// Describes an Amazon S3 location that will receive the results of the restore -// request. -type Location struct { - _ struct{} `type:"structure"` - - // A list of grants that control access to the staged results. - AccessControlList []*Grant `locationNameList:"Grant" type:"list"` - - // The name of the bucket where the restore results will be placed. - // - // BucketName is a required field - BucketName *string `type:"string" required:"true"` - - // The canned ACL to apply to the restore results. - CannedACL *string `type:"string" enum:"ObjectCannedACL"` - - // Contains the type of server-side encryption used. - Encryption *Encryption `type:"structure"` - - // The prefix that is prepended to the restore results for this request. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` - - // The class of storage used to store the restore results. - StorageClass *string `type:"string" enum:"StorageClass"` - - // The tag-set that is applied to the restore results. - Tagging *Tagging `type:"structure"` - - // A list of metadata to store with the restore results in S3. - UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Location) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Location) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Location) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Location"} - if s.BucketName == nil { - invalidParams.Add(request.NewErrParamRequired("BucketName")) - } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.AccessControlList != nil { - for i, v := range s.AccessControlList { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessControlList sets the AccessControlList field's value. -func (s *Location) SetAccessControlList(v []*Grant) *Location { - s.AccessControlList = v - return s -} - -// SetBucketName sets the BucketName field's value. -func (s *Location) SetBucketName(v string) *Location { - s.BucketName = &v - return s -} - -// SetCannedACL sets the CannedACL field's value. -func (s *Location) SetCannedACL(v string) *Location { - s.CannedACL = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *Location) SetEncryption(v *Encryption) *Location { - s.Encryption = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *Location) SetPrefix(v string) *Location { - s.Prefix = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Location) SetStorageClass(v string) *Location { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *Location) SetTagging(v *Tagging) *Location { - s.Tagging = v - return s -} - -// SetUserMetadata sets the UserMetadata field's value. -func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { - s.UserMetadata = v - return s -} - -// Describes where logs are stored and the prefix that Amazon S3 assigns to -// all log object keys for a bucket. For more information, see PUT Bucket logging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon S3 API Reference. -type LoggingEnabled struct { - _ struct{} `type:"structure"` - - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case, you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - // - // TargetBucket is a required field - TargetBucket *string `type:"string" required:"true"` - - // Container for granting information. - // - // Buckets that use the bucket owner enforced setting for Object Ownership don't - // support target grants. For more information, see Permissions for server access - // log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) - // in the Amazon S3 User Guide. - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - - // A prefix for all log object keys. If you store log files from multiple Amazon - // S3 buckets in a single bucket, you can use a prefix to distinguish which - // log files came from which bucket. - // - // TargetPrefix is a required field - TargetPrefix *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LoggingEnabled) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LoggingEnabled) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetBucket == nil { - invalidParams.Add(request.NewErrParamRequired("TargetBucket")) - } - if s.TargetPrefix == nil { - invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) - } - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTargetBucket sets the TargetBucket field's value. -func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { - s.TargetBucket = &v - return s -} - -// SetTargetGrants sets the TargetGrants field's value. -func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { - s.TargetGrants = v - return s -} - -// SetTargetPrefix sets the TargetPrefix field's value. -func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { - s.TargetPrefix = &v - return s -} - -// A metadata key-value pair to store with an object. -type MetadataEntry struct { - _ struct{} `type:"structure"` - - // Name of the Object. - Name *string `type:"string"` - - // Value of the Object. - Value *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetadataEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetadataEntry) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *MetadataEntry) SetName(v string) *MetadataEntry { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *MetadataEntry) SetValue(v string) *MetadataEntry { - s.Value = &v - return s -} - -// A container specifying replication metrics-related settings enabling replication -// metrics and events. -type Metrics struct { - _ struct{} `type:"structure"` - - // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold - // event. - EventThreshold *ReplicationTimeValue `type:"structure"` - - // Specifies whether the replication metrics are enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"MetricsStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Metrics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Metrics) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Metrics) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Metrics"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEventThreshold sets the EventThreshold field's value. -func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { - s.EventThreshold = v - return s -} - -// SetStatus sets the Status field's value. -func (s *Metrics) SetStatus(v string) *Metrics { - s.Status = &v - return s -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates, and an object -// must match all of the predicates in order for the filter to apply. -type MetricsAndOperator struct { - _ struct{} `type:"structure"` - - // The access point ARN used when evaluating an AND predicate. - AccessPointArn *string `type:"string"` - - // The prefix used when evaluating an AND predicate. - Prefix *string `type:"string"` - - // The list of tags used when evaluating an AND predicate. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetricsAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetricsAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessPointArn sets the AccessPointArn field's value. -func (s *MetricsAndOperator) SetAccessPointArn(v string) *MetricsAndOperator { - s.AccessPointArn = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { - s.Tags = v - return s -} - -// Specifies a metrics configuration for the CloudWatch request metrics (specified -// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating -// an existing metrics configuration, note that this is a full replacement of -// the existing metrics configuration. If you don't include the elements you -// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). -type MetricsConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies a metrics configuration filter. The metrics configuration will - // only include objects that meet the filter's criteria. A filter must be a - // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). - Filter *MetricsFilter `type:"structure"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetricsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetricsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { - s.Id = &v - return s -} - -// Specifies a metrics configuration filter. The metrics configuration only -// includes objects that meet the filter's criteria. A filter must be a prefix, -// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). -// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). -type MetricsFilter struct { - _ struct{} `type:"structure"` - - // The access point ARN used when evaluating a metrics filter. - AccessPointArn *string `type:"string"` - - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *MetricsAndOperator `type:"structure"` - - // The prefix used when evaluating a metrics filter. - Prefix *string `type:"string"` - - // The tag used when evaluating a metrics filter. - Tag *Tag `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetricsFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetricsFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricsFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessPointArn sets the AccessPointArn field's value. -func (s *MetricsFilter) SetAccessPointArn(v string) *MetricsFilter { - s.AccessPointArn = &v - return s -} - -// SetAnd sets the And field's value. -func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { - s.Tag = v - return s -} - -// Container for the MultipartUpload for the Amazon S3 object. -type MultipartUpload struct { - _ struct{} `type:"structure"` - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Key of the object for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // Specifies the owner of the object that is part of the multipart upload. - Owner *Owner `type:"structure"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID that identifies the multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultipartUpload) GoString() string { - return s.String() -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *MultipartUpload) SetChecksumAlgorithm(v string) *MultipartUpload { - s.ChecksumAlgorithm = &v - return s -} - -// SetInitiated sets the Initiated field's value. -func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { - s.Initiated = &v - return s -} - -// SetInitiator sets the Initiator field's value. -func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { - s.Initiator = v - return s -} - -// SetKey sets the Key field's value. -func (s *MultipartUpload) SetKey(v string) *MultipartUpload { - s.Key = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { - s.Owner = v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { - s.StorageClass = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { - s.UploadId = &v - return s -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -type NoncurrentVersionExpiration struct { - _ struct{} `type:"structure"` - - // Specifies how many noncurrent versions Amazon S3 will retain. If there are - // this many more recent noncurrent versions, Amazon S3 will take the associated - // action. For more information about noncurrent versions, see Lifecycle configuration - // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. - NewerNoncurrentVersions *int64 `type:"integer"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see How Amazon S3 - // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. - NoncurrentDays *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NoncurrentVersionExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NoncurrentVersionExpiration) GoString() string { - return s.String() -} - -// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. -func (s *NoncurrentVersionExpiration) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionExpiration { - s.NewerNoncurrentVersions = &v - return s -} - -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { - s.NoncurrentDays = &v - return s -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, -// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled -// (or versioning is suspended), you can set this action to request that Amazon -// S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, -// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at -// a specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - _ struct{} `type:"structure"` - - // Specifies how many noncurrent versions Amazon S3 will retain. If there are - // this many more recent noncurrent versions, Amazon S3 will take the associated - // action. For more information about noncurrent versions, see Lifecycle configuration - // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. - NewerNoncurrentVersions *int64 `type:"integer"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. - NoncurrentDays *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NoncurrentVersionTransition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NoncurrentVersionTransition) GoString() string { - return s.String() -} - -// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. -func (s *NoncurrentVersionTransition) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionTransition { - s.NewerNoncurrentVersions = &v - return s -} - -// SetNoncurrentDays sets the NoncurrentDays field's value. -func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { - s.NoncurrentDays = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { - s.StorageClass = &v - return s -} - -// A container for specifying the notification configuration of the bucket. -// If this element is empty, notifications are turned off for the bucket. -type NotificationConfiguration struct { - _ struct{} `type:"structure"` - - // Enables delivery of events to Amazon EventBridge. - EventBridgeConfiguration *EventBridgeConfiguration `type:"structure"` - - // Describes the Lambda functions to invoke and the events for which to invoke - // them. - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - - // The topic to which notifications are sent and the events for which notifications - // are generated. - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotificationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} - if s.LambdaFunctionConfigurations != nil { - for i, v := range s.LambdaFunctionConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.QueueConfigurations != nil { - for i, v := range s.QueueConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.TopicConfigurations != nil { - for i, v := range s.TopicConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEventBridgeConfiguration sets the EventBridgeConfiguration field's value. -func (s *NotificationConfiguration) SetEventBridgeConfiguration(v *EventBridgeConfiguration) *NotificationConfiguration { - s.EventBridgeConfiguration = v - return s -} - -// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. -func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { - s.LambdaFunctionConfigurations = v - return s -} - -// SetQueueConfigurations sets the QueueConfigurations field's value. -func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { - s.QueueConfigurations = v - return s -} - -// SetTopicConfigurations sets the TopicConfigurations field's value. -func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { - s.TopicConfigurations = v - return s -} - -type NotificationConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Container for specifying the Lambda notification configuration. - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` - - // This data type is deprecated. This data type specifies the configuration - // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue - // when Amazon S3 detects specified events. - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` - - // This data type is deprecated. A container for specifying the configuration - // for publication of messages to an Amazon Simple Notification Service (Amazon - // SNS) topic when Amazon S3 detects specified events. - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotificationConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotificationConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { - s.CloudFunctionConfiguration = v - return s -} - -// SetQueueConfiguration sets the QueueConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.QueueConfiguration = v - return s -} - -// SetTopicConfiguration sets the TopicConfiguration field's value. -func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { - s.TopicConfiguration = v - return s -} - -// Specifies object key name filtering rules. For information about key name -// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon S3 User Guide. -type NotificationConfigurationFilter struct { - _ struct{} `type:"structure"` - - // A container for object key name prefix and suffix filtering rules. - Key *KeyFilter `locationName:"S3Key" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotificationConfigurationFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotificationConfigurationFilter) GoString() string { - return s.String() -} - -// SetKey sets the Key field's value. -func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { - s.Key = v - return s -} - -// An object consists of data and its descriptive metadata. -type Object struct { - _ struct{} `type:"structure"` - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` - - // The entity tag is a hash of the object. The ETag reflects changes only to - // the contents of an object, not its metadata. The ETag may or may not be an - // MD5 digest of the object data. Whether or not it is depends on how the object - // was created and how it is encrypted as described below: - // - // * Objects created by the PUT Object, POST Object, or Copy operation, or - // through the Amazon Web Services Management Console, and are encrypted - // by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object - // data. - // - // * Objects created by the PUT Object, POST Object, or Copy operation, or - // through the Amazon Web Services Management Console, and are encrypted - // by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object - // data. - // - // * If an object is created by either the Multipart Upload or Part Copy - // operation, the ETag is not an MD5 digest, regardless of the method of - // encryption. If an object is larger than 16 MB, the Amazon Web Services - // Management Console will upload or copy that object as a Multipart Upload, - // and therefore the ETag will not be an MD5 digest. - ETag *string `type:"string"` - - // The name that you assign to an object. You use the object key to retrieve - // the object. - Key *string `min:"1" type:"string"` - - // Creation date of the object. - LastModified *time.Time `type:"timestamp"` - - // The owner of the object - Owner *Owner `type:"structure"` - - // Size in bytes of the object - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Object) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Object) GoString() string { - return s.String() -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *Object) SetChecksumAlgorithm(v []*string) *Object { - s.ChecksumAlgorithm = v - return s -} - -// SetETag sets the ETag field's value. -func (s *Object) SetETag(v string) *Object { - s.ETag = &v - return s -} - -// SetKey sets the Key field's value. -func (s *Object) SetKey(v string) *Object { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Object) SetLastModified(v time.Time) *Object { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *Object) SetOwner(v *Owner) *Object { - s.Owner = v - return s -} - -// SetSize sets the Size field's value. -func (s *Object) SetSize(v int64) *Object { - s.Size = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Object) SetStorageClass(v string) *Object { - s.StorageClass = &v - return s -} - -// Object Identifier is unique value to identify objects. -type ObjectIdentifier struct { - _ struct{} `type:"structure"` - - // Key name of the object. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // VersionId for the specific version of the object to delete. - VersionId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectIdentifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectIdentifier) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ObjectIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { - s.Key = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { - s.VersionId = &v - return s -} - -// The container element for Object Lock configuration parameters. -type ObjectLockConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates whether this bucket has an Object Lock configuration enabled. Enable - // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. - ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - - // Specifies the Object Lock rule for the specified object. Enable the this - // rule when you apply ObjectLockConfiguration to a bucket. Bucket settings - // require both a mode and a period. The period can be either Days or Years - // but you must select one. You cannot specify Days and Years at the same time. - Rule *ObjectLockRule `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockConfiguration) GoString() string { - return s.String() -} - -// SetObjectLockEnabled sets the ObjectLockEnabled field's value. -func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { - s.ObjectLockEnabled = &v - return s -} - -// SetRule sets the Rule field's value. -func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { - s.Rule = v - return s -} - -// A legal hold configuration for an object. -type ObjectLockLegalHold struct { - _ struct{} `type:"structure"` - - // Indicates whether the specified object has a legal hold in place. - Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockLegalHold) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockLegalHold) GoString() string { - return s.String() -} - -// SetStatus sets the Status field's value. -func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { - s.Status = &v - return s -} - -// A Retention configuration for an object. -type ObjectLockRetention struct { - _ struct{} `type:"structure"` - - // Indicates the Retention mode for the specified object. - Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - - // The date on which this Object Lock Retention will expire. - RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockRetention) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockRetention) GoString() string { - return s.String() -} - -// SetMode sets the Mode field's value. -func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { - s.Mode = &v - return s -} - -// SetRetainUntilDate sets the RetainUntilDate field's value. -func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { - s.RetainUntilDate = &v - return s -} - -// The container element for an Object Lock rule. -type ObjectLockRule struct { - _ struct{} `type:"structure"` - - // The default Object Lock retention mode and period that you want to apply - // to new objects placed in the specified bucket. Bucket settings require both - // a mode and a period. The period can be either Days or Years but you must - // select one. You cannot specify Days and Years at the same time. - DefaultRetention *DefaultRetention `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectLockRule) GoString() string { - return s.String() -} - -// SetDefaultRetention sets the DefaultRetention field's value. -func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { - s.DefaultRetention = v - return s -} - -// A container for elements related to an individual part. -type ObjectPart struct { - _ struct{} `type:"structure"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` - - // The part number identifying the part. This value is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` - - // The size of the uploaded part in bytes. - Size *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectPart) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectPart) GoString() string { - return s.String() -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *ObjectPart) SetChecksumCRC32(v string) *ObjectPart { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *ObjectPart) SetChecksumCRC32C(v string) *ObjectPart { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *ObjectPart) SetChecksumSHA1(v string) *ObjectPart { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *ObjectPart) SetChecksumSHA256(v string) *ObjectPart { - s.ChecksumSHA256 = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *ObjectPart) SetPartNumber(v int64) *ObjectPart { - s.PartNumber = &v - return s -} - -// SetSize sets the Size field's value. -func (s *ObjectPart) SetSize(v int64) *ObjectPart { - s.Size = &v - return s -} - -// The version of an object. -type ObjectVersion struct { - _ struct{} `type:"structure"` - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` - - // The entity tag is an MD5 hash of that version of the object. - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp"` - - // Specifies the owner of the object. - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ObjectVersion) GoString() string { - return s.String() -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *ObjectVersion) SetChecksumAlgorithm(v []*string) *ObjectVersion { - s.ChecksumAlgorithm = v - return s -} - -// SetETag sets the ETag field's value. -func (s *ObjectVersion) SetETag(v string) *ObjectVersion { - s.ETag = &v - return s -} - -// SetIsLatest sets the IsLatest field's value. -func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { - s.IsLatest = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ObjectVersion) SetKey(v string) *ObjectVersion { - s.Key = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { - s.LastModified = &v - return s -} - -// SetOwner sets the Owner field's value. -func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { - s.Owner = v - return s -} - -// SetSize sets the Size field's value. -func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { - s.Size = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { - s.StorageClass = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { - s.VersionId = &v - return s -} - -// Describes the location where the restore job's output is stored. -type OutputLocation struct { - _ struct{} `type:"structure"` - - // Describes an S3 location that will receive the results of the restore request. - S3 *Location `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OutputLocation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OutputLocation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *OutputLocation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} - if s.S3 != nil { - if err := s.S3.Validate(); err != nil { - invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3 sets the S3 field's value. -func (s *OutputLocation) SetS3(v *Location) *OutputLocation { - s.S3 = v - return s -} - -// Describes how results of the Select job are serialized. -type OutputSerialization struct { - _ struct{} `type:"structure"` - - // Describes the serialization of CSV-encoded Select results. - CSV *CSVOutput `type:"structure"` - - // Specifies JSON as request's output serialization format. - JSON *JSONOutput `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OutputSerialization) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OutputSerialization) GoString() string { - return s.String() -} - -// SetCSV sets the CSV field's value. -func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { - s.CSV = v - return s -} - -// SetJSON sets the JSON field's value. -func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { - s.JSON = v - return s -} - -// Container for the owner's display name and ID. -type Owner struct { - _ struct{} `type:"structure"` - - // Container for the display name of the owner. - DisplayName *string `type:"string"` - - // Container for the ID of the owner. - ID *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Owner) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Owner) GoString() string { - return s.String() -} - -// SetDisplayName sets the DisplayName field's value. -func (s *Owner) SetDisplayName(v string) *Owner { - s.DisplayName = &v - return s -} - -// SetID sets the ID field's value. -func (s *Owner) SetID(v string) *Owner { - s.ID = &v - return s -} - -// The container element for a bucket's ownership controls. -type OwnershipControls struct { - _ struct{} `type:"structure"` - - // The container element for an ownership control rule. - // - // Rules is a required field - Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OwnershipControls) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OwnershipControls) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *OwnershipControls) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { - s.Rules = v - return s -} - -// The container element for an ownership control rule. -type OwnershipControlsRule struct { - _ struct{} `type:"structure"` - - // The container element for object ownership for a bucket's ownership controls. - // - // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to - // the bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. - // - // ObjectWriter - The uploading account will own the object if the object is - // uploaded with the bucket-owner-full-control canned ACL. - // - // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer - // affect permissions. The bucket owner automatically owns and has full control - // over every object in the bucket. The bucket only accepts PUT requests that - // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control - // canned ACL or an equivalent form of this ACL expressed in the XML format. - // - // ObjectOwnership is a required field - ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OwnershipControlsRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OwnershipControlsRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *OwnershipControlsRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} - if s.ObjectOwnership == nil { - invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetObjectOwnership sets the ObjectOwnership field's value. -func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { - s.ObjectOwnership = &v - return s -} - -// Container for Parquet. -type ParquetInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ParquetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ParquetInput) GoString() string { - return s.String() -} - -// Container for elements related to a part. -type Part struct { - _ struct{} `type:"structure"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `type:"string"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp"` - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` - - // Size in bytes of the uploaded part data. - Size *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Part) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Part) GoString() string { - return s.String() -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *Part) SetChecksumCRC32(v string) *Part { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *Part) SetChecksumCRC32C(v string) *Part { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *Part) SetChecksumSHA1(v string) *Part { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *Part) SetChecksumSHA256(v string) *Part { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *Part) SetETag(v string) *Part { - s.ETag = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *Part) SetLastModified(v time.Time) *Part { - s.LastModified = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *Part) SetPartNumber(v int64) *Part { - s.PartNumber = &v - return s -} - -// SetSize sets the Size field's value. -func (s *Part) SetSize(v int64) *Part { - s.Size = &v - return s -} - -// The container element for a bucket's policy status. -type PolicyStatus struct { - _ struct{} `type:"structure"` - - // The policy status for this bucket. TRUE indicates that this bucket is public. - // FALSE indicates that the bucket is not public. - IsPublic *bool `locationName:"IsPublic" type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyStatus) GoString() string { - return s.String() -} - -// SetIsPublic sets the IsPublic field's value. -func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { - s.IsPublic = &v - return s -} - -// This data type contains information about progress of an operation. -type Progress struct { - _ struct{} `type:"structure"` - - // The current number of uncompressed object bytes processed. - BytesProcessed *int64 `type:"long"` - - // The current number of bytes of records payload data returned. - BytesReturned *int64 `type:"long"` - - // The current number of object bytes scanned. - BytesScanned *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Progress) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Progress) GoString() string { - return s.String() -} - -// SetBytesProcessed sets the BytesProcessed field's value. -func (s *Progress) SetBytesProcessed(v int64) *Progress { - s.BytesProcessed = &v - return s -} - -// SetBytesReturned sets the BytesReturned field's value. -func (s *Progress) SetBytesReturned(v int64) *Progress { - s.BytesReturned = &v - return s -} - -// SetBytesScanned sets the BytesScanned field's value. -func (s *Progress) SetBytesScanned(v int64) *Progress { - s.BytesScanned = &v - return s -} - -// This data type contains information about the progress event of an operation. -type ProgressEvent struct { - _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` - - // The Progress event details. - Details *Progress `locationName:"Details" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProgressEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProgressEvent) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { - s.Details = v - return s -} - -// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. -func (s *ProgressEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *ProgressEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - if err := payloadUnmarshaler.UnmarshalPayload( - bytes.NewReader(msg.Payload), s, - ); err != nil { - return err - } - return nil -} - -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - var buf bytes.Buffer - if err = pm.MarshalPayload(&buf, s); err != nil { - return eventstream.Message{}, err - } - msg.Payload = buf.Bytes() - return msg, err -} - -// The PublicAccessBlock configuration that you want to apply to this Amazon -// S3 bucket. You can enable the configuration options in any combination. For -// more information about when Amazon S3 considers a bucket or object public, -// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon S3 User Guide. -type PublicAccessBlockConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 should block public access control lists (ACLs) - // for this bucket and objects in this bucket. Setting this element to TRUE - // causes the following behavior: - // - // * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is - // public. - // - // * PUT Object calls fail if the request includes a public ACL. - // - // * PUT Bucket calls fail if the request includes a public ACL. - // - // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` - - // Specifies whether Amazon S3 should block public bucket policies for this - // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to - // PUT Bucket policy if the specified bucket policy allows public access. - // - // Enabling this setting doesn't affect existing bucket policies. - BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` - - // Specifies whether Amazon S3 should ignore public ACLs for this bucket and - // objects in this bucket. Setting this element to TRUE causes Amazon S3 to - // ignore all public ACLs on this bucket and objects in this bucket. - // - // Enabling this setting doesn't affect the persistence of any existing ACLs - // and doesn't prevent new public ACLs from being set. - IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` - - // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. Setting this element to TRUE restricts access to this bucket to only - // Amazon Web Service principals and authorized users within this account if - // the bucket has a public policy. - // - // Enabling this setting doesn't affect previously stored bucket policies, except - // that public and cross-account access within any public bucket policy, including - // non-public delegation to specific accounts, is blocked. - RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublicAccessBlockConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PublicAccessBlockConfiguration) GoString() string { - return s.String() -} - -// SetBlockPublicAcls sets the BlockPublicAcls field's value. -func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { - s.BlockPublicAcls = &v - return s -} - -// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. -func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { - s.BlockPublicPolicy = &v - return s -} - -// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. -func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { - s.IgnorePublicAcls = &v - return s -} - -// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. -func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { - s.RestrictPublicBuckets = &v - return s -} - -type PutBucketAccelerateConfigurationInput struct { - _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` - - // Container for setting the transfer acceleration state. - // - // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The name of the bucket for which the accelerate configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} - if s.AccelerateConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. -func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { - s.AccelerateConfiguration = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketAccelerateConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketAccelerateConfigurationInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketAclInput struct { - _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The bucket to which to apply the ACL. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions - // and overwrites of those objects. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { - s.AccessControlPolicy = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { - s.Bucket = &v - return s -} - -func (s *PutBucketAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketAclInput) SetChecksumAlgorithm(v string) *PutBucketAclInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { - s.GrantWriteACP = &v - return s -} - -func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketAclOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAclOutput) GoString() string { - return s.String() -} - -type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` - - // The configuration and any analyses for the analytics filter. - // - // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The name of the bucket to which an analytics configuration is stored. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID that identifies the analytics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAnalyticsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAnalyticsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAnalyticsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} - if s.AnalyticsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.AnalyticsConfiguration != nil { - if err := s.AnalyticsConfiguration.Validate(); err != nil { - invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { - s.AnalyticsConfiguration = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { - s.Id = &v - return s -} - -func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketAnalyticsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAnalyticsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketAnalyticsConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketCorsInput struct { - _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` - - // Specifies the bucket impacted by the corsconfiguration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Describes the cross-origin access configuration for objects in an Amazon - // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon - // S3 User Guide. - // - // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) - } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { - s.Bucket = &v - return s -} - -func (s *PutBucketCorsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCORSConfiguration sets the CORSConfiguration field's value. -func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { - s.CORSConfiguration = v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketCorsInput) SetChecksumAlgorithm(v string) *PutBucketCorsInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketCorsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketCorsOutput) GoString() string { - return s.String() -} - -type PutBucketEncryptionInput struct { - _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` - - // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Specifies the default server-side-encryption configuration. - // - // ServerSideEncryptionConfiguration is a required field - ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketEncryptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketEncryptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.ServerSideEncryptionConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) - } - if s.ServerSideEncryptionConfiguration != nil { - if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { - s.Bucket = &v - return s -} - -func (s *PutBucketEncryptionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketEncryptionInput) SetChecksumAlgorithm(v string) *PutBucketEncryptionInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. -func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { - s.ServerSideEncryptionConfiguration = v - return s -} - -func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketEncryptionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketEncryptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketEncryptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketEncryptionOutput) GoString() string { - return s.String() -} - -type PutBucketIntelligentTieringConfigurationInput struct { - _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Container for S3 Intelligent-Tiering configuration. - // - // IntelligentTieringConfiguration is a required field - IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketIntelligentTieringConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.IntelligentTieringConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) - } - if s.IntelligentTieringConfiguration != nil { - if err := s.IntelligentTieringConfiguration.Validate(); err != nil { - invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetId sets the Id field's value. -func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { - s.Id = &v - return s -} - -// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. -func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { - s.IntelligentTieringConfiguration = v - return s -} - -func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketIntelligentTieringConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketIntelligentTieringConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` - - // The name of the bucket where the inventory configuration will be stored. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the inventory configuration. - // - // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketInventoryConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketInventoryConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.InventoryConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) - } - if s.InventoryConfiguration != nil { - if err := s.InventoryConfiguration.Validate(); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { - s.Id = &v - return s -} - -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { - s.InventoryConfiguration = v - return s -} - -func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketInventoryConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketInventoryConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketInventoryConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` - - // The name of the bucket for which to set the configuration. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Container for lifecycle rules. You can add as many as 1,000 rules. - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketLifecycleConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleConfigurationInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { - s.LifecycleConfiguration = v - return s -} - -func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleInput struct { - _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Container for lifecycle rules. You can add as many as 1000 rules. - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { - s.Bucket = &v - return s -} - -func (s *PutBucketLifecycleInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketLifecycleInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. -func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { - s.LifecycleConfiguration = v - return s -} - -func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketLifecycleInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} - -type PutBucketLoggingInput struct { - _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` - - // The name of the bucket for which to set the logging parameters. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for logging status information. - // - // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.BucketLoggingStatus == nil { - invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) - } - if s.BucketLoggingStatus != nil { - if err := s.BucketLoggingStatus.Validate(); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { - s.Bucket = &v - return s -} - -func (s *PutBucketLoggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. -func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { - s.BucketLoggingStatus = v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketLoggingInput) SetChecksumAlgorithm(v string) *PutBucketLoggingInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { - s.ExpectedBucketOwner = &v - return s -} - -func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketLoggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketLoggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketLoggingOutput) GoString() string { - return s.String() -} - -type PutBucketMetricsConfigurationInput struct { - _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` - - // The name of the bucket for which the metrics configuration is set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The ID used to identify the metrics configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the metrics configuration. - // - // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketMetricsConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketMetricsConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketMetricsConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.MetricsConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) - } - if s.MetricsConfiguration != nil { - if err := s.MetricsConfiguration.Validate(); err != nil { - invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetId sets the Id field's value. -func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { - s.Id = &v - return s -} - -// SetMetricsConfiguration sets the MetricsConfiguration field's value. -func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { - s.MetricsConfiguration = v - return s -} - -func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketMetricsConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketMetricsConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketMetricsConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationConfigurationInput struct { - _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` - - // The name of the bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // A container for specifying the notification configuration of the bucket. - // If this element is empty, notifications are turned off for the bucket. - // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True - // or false value. - SkipDestinationValidation *bool `location:"header" locationName:"x-amz-skip-destination-validation" type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { - s.NotificationConfiguration = v - return s -} - -// SetSkipDestinationValidation sets the SkipDestinationValidation field's value. -func (s *PutBucketNotificationConfigurationInput) SetSkipDestinationValidation(v bool) *PutBucketNotificationConfigurationInput { - s.SkipDestinationValidation = &v - return s -} - -func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketNotificationConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationInput struct { - _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` - - // The name of the bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The container for the configuration. - // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketNotificationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketNotificationInput) SetChecksumAlgorithm(v string) *PutBucketNotificationInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { - s.NotificationConfiguration = v - return s -} - -func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketNotificationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketNotificationOutput) GoString() string { - return s.String() -} - -type PutBucketOwnershipControlsInput struct { - _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` - - // The name of the Amazon S3 bucket whose OwnershipControls you want to set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) - // that you want to apply to this Amazon S3 bucket. - // - // OwnershipControls is a required field - OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketOwnershipControlsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketOwnershipControlsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketOwnershipControlsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.OwnershipControls == nil { - invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) - } - if s.OwnershipControls != nil { - if err := s.OwnershipControls.Validate(); err != nil { - invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { - s.Bucket = &v - return s -} - -func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetOwnershipControls sets the OwnershipControls field's value. -func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { - s.OwnershipControls = v - return s -} - -func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketOwnershipControlsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketOwnershipControlsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketOwnershipControlsOutput) GoString() string { - return s.String() -} - -type PutBucketPolicyInput struct { - _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` - - // The name of the bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. - ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The bucket policy as a JSON document. - // - // Policy is a required field - Policy *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { - s.Bucket = &v - return s -} - -func (s *PutBucketPolicyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketPolicyInput) SetChecksumAlgorithm(v string) *PutBucketPolicyInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. -func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { - s.ConfirmRemoveSelfBucketAccess = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { - s.Policy = &v - return s -} - -func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketPolicyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketPolicyOutput) GoString() string { - return s.String() -} - -type PutBucketReplicationInput struct { - _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` - - // The name of the bucket - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - // - // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.ReplicationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) - } - if s.ReplicationConfiguration != nil { - if err := s.ReplicationConfiguration.Validate(); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { - s.Bucket = &v - return s -} - -func (s *PutBucketReplicationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketReplicationInput) SetChecksumAlgorithm(v string) *PutBucketReplicationInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { - s.ReplicationConfiguration = v - return s -} - -// SetToken sets the Token field's value. -func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { - s.Token = &v - return s -} - -func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketReplicationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketReplicationOutput) GoString() string { - return s.String() -} - -type PutBucketRequestPaymentInput struct { - _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Container for Payer. - // - // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.RequestPaymentConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) - } - if s.RequestPaymentConfiguration != nil { - if err := s.RequestPaymentConfiguration.Validate(); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { - s.Bucket = &v - return s -} - -func (s *PutBucketRequestPaymentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketRequestPaymentInput) SetChecksumAlgorithm(v string) *PutBucketRequestPaymentInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. -func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { - s.RequestPaymentConfiguration = v - return s -} - -func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -type PutBucketTaggingInput struct { - _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Container for the TagSet and Tag elements. - // - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { - s.Bucket = &v - return s -} - -func (s *PutBucketTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketTaggingInput) SetChecksumAlgorithm(v string) *PutBucketTaggingInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { - s.Tagging = v - return s -} - -func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketTaggingOutput) GoString() string { - return s.String() -} - -type PutBucketVersioningInput struct { - _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Container for setting the versioning state. - // - // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.VersioningConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { - s.Bucket = &v - return s -} - -func (s *PutBucketVersioningInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketVersioningInput) SetChecksumAlgorithm(v string) *PutBucketVersioningInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetMFA sets the MFA field's value. -func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { - s.MFA = &v - return s -} - -// SetVersioningConfiguration sets the VersioningConfiguration field's value. -func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { - s.VersioningConfiguration = v - return s -} - -func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketVersioningInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketVersioningOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketVersioningOutput) GoString() string { - return s.String() -} - -type PutBucketWebsiteInput struct { - _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` - - // The bucket name. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Container for the request. - // - // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.WebsiteConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) - } - if s.WebsiteConfiguration != nil { - if err := s.WebsiteConfiguration.Validate(); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { - s.Bucket = &v - return s -} - -func (s *PutBucketWebsiteInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutBucketWebsiteInput) SetChecksumAlgorithm(v string) *PutBucketWebsiteInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. -func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { - s.WebsiteConfiguration = v - return s -} - -func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutBucketWebsiteInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutBucketWebsiteOutput) GoString() string { - return s.String() -} - -type PutObjectAclInput struct { - _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The bucket name that contains the object to which you want to attach the - // ACL. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - // - // This action is not supported by Amazon S3 on Outposts. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - // - // This action is not supported by Amazon S3 on Outposts. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - // - // This action is not supported by Amazon S3 on Outposts. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions - // and overwrites of those objects. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - // - // This action is not supported by Amazon S3 on Outposts. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Key for which the PUT action was initiated. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { - s.ACL = &v - return s -} - -// SetAccessControlPolicy sets the AccessControlPolicy field's value. -func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { - s.AccessControlPolicy = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { - s.Bucket = &v - return s -} - -func (s *PutObjectAclInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutObjectAclInput) SetChecksumAlgorithm(v string) *PutObjectAclInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWrite sets the GrantWrite field's value. -func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { - s.GrantWrite = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { - s.VersionId = &v - return s -} - -func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectAclInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutObjectAclOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectAclOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { - s.RequestCharged = &v - return s -} - -type PutObjectInput struct { - _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - // - // This action is not supported by Amazon S3 on Outposts. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // The bucket name to which the PUT action was initiated. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. - // - // Specifying this header with a PUT action doesn’t affect bucket-level settings - // for S3 Bucket Key. - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Can be used to specify caching behavior along the request/reply chain. For - // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // Specifies presentational information for the object. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The base64-encoded 128-bit MD5 digest of the message (without the headers) - // according to RFC 1864. This header can be used as a message integrity check - // to verify that the data is the same data that was originally sent. Although - // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, - // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). - ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - - // A standard MIME type describing the format of the contents. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The date and time at which the object is no longer cacheable. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // This action is not supported by Amazon S3 on Outposts. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - // - // This action is not supported by Amazon S3 on Outposts. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - // - // This action is not supported by Amazon S3 on Outposts. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - // - // This action is not supported by Amazon S3 on Outposts. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the PUT action was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // The Object Lock mode that you want to apply to this object. - ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when you want this object's Object Lock to expire. Must - // be formatted as a timestamp parameter. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by PutObjectInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. - // - // SSEKMSEncryptionContext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by PutObjectInput's - // String and GoString methods. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms, but - // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses - // the Amazon Web Services managed key to protect the data. If the KMS key does - // not exist in the same account issuing the command, you must use the full - // ARN and not just the ID. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by PutObjectInput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high availability. - // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - // (For example, "Key1=Value1") - Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). - // - // In the following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: - // - // x-amz-website-redirect-location: /anotherPage.html - // - // In the following example, the request header sets the object redirect to - // another website: - // - // x-amz-website-redirect-location: http://www.example.com/ - // - // For more information about website hosting in Amazon S3, see Hosting Websites - // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetACL sets the ACL field's value. -func (s *PutObjectInput) SetACL(v string) *PutObjectInput { - s.ACL = &v - return s -} - -// SetBody sets the Body field's value. -func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { - s.Body = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { - s.Bucket = &v - return s -} - -func (s *PutObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { - s.BucketKeyEnabled = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { - s.CacheControl = &v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutObjectInput) SetChecksumAlgorithm(v string) *PutObjectInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *PutObjectInput) SetChecksumCRC32(v string) *PutObjectInput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *PutObjectInput) SetChecksumCRC32C(v string) *PutObjectInput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *PutObjectInput) SetChecksumSHA1(v string) *PutObjectInput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *PutObjectInput) SetChecksumSHA256(v string) *PutObjectInput { - s.ChecksumSHA256 = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { - s.ContentLength = &v - return s -} - -// SetContentMD5 sets the ContentMD5 field's value. -func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { - s.ContentMD5 = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { - s.ContentType = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { - s.Expires = &v - return s -} - -// SetGrantFullControl sets the GrantFullControl field's value. -func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { - s.GrantFullControl = &v - return s -} - -// SetGrantRead sets the GrantRead field's value. -func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { - s.GrantRead = &v - return s -} - -// SetGrantReadACP sets the GrantReadACP field's value. -func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { - s.GrantReadACP = &v - return s -} - -// SetGrantWriteACP sets the GrantWriteACP field's value. -func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { - s.GrantWriteACP = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectInput) SetKey(v string) *PutObjectInput { - s.Key = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { - s.Metadata = v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { - s.SSECustomerKey = &v - return s -} - -func (s *PutObjectInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { - s.ServerSideEncryption = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { - s.StorageClass = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { - s.Tagging = &v - return s -} - -// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. -func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { - s.WebsiteRedirectLocation = &v - return s -} - -func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutObjectLegalHoldInput struct { - _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - - // The bucket name containing the object that you want to place a legal hold - // on. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The key name for the object that you want to place a legal hold on. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Container element for the legal hold configuration you want to apply to the - // specified object. - LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The version ID of the object that you want to place a legal hold on. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLegalHoldInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLegalHoldInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectLegalHoldInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { - s.Bucket = &v - return s -} - -func (s *PutObjectLegalHoldInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutObjectLegalHoldInput) SetChecksumAlgorithm(v string) *PutObjectLegalHoldInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { - s.Key = &v - return s -} - -// SetLegalHold sets the LegalHold field's value. -func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { - s.LegalHold = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { - s.RequestPayer = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { - s.VersionId = &v - return s -} - -func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutObjectLegalHoldOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLegalHoldOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLegalHoldOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { - s.RequestCharged = &v - return s -} - -type PutObjectLockConfigurationInput struct { - _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` - - // The bucket whose Object Lock configuration you want to create or replace. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The Object Lock configuration that you want to apply to the specified bucket. - ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLockConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLockConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectLockConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { - s.Bucket = &v - return s -} - -func (s *PutObjectLockConfigurationInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutObjectLockConfigurationInput) SetChecksumAlgorithm(v string) *PutObjectLockConfigurationInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. -func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { - s.ObjectLockConfiguration = v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { - s.RequestPayer = &v - return s -} - -// SetToken sets the Token field's value. -func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { - s.Token = &v - return s -} - -func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutObjectLockConfigurationOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLockConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectLockConfigurationOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { - s.RequestCharged = &v - return s -} - -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the expiration is configured for the object (see PutBucketLifecycleConfiguration - // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs that provide information about object expiration. The value - // of the rule-id is URL-encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the Amazon Web Services KMS Encryption Context to use - // for object encryption. The value of this header is a base64-encoded UTF-8 - // string holding JSON with the encryption context key-value pairs. - // - // SSEKMSEncryptionContext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by PutObjectOutput's - // String and GoString methods. - SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by PutObjectOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // If you specified server-side encryption either with an Amazon Web Services - // KMS key or Amazon S3-managed encryption key in your PUT request, the response - // includes this header. It confirms the encryption algorithm that Amazon S3 - // used to encrypt the object. - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectOutput) GoString() string { - return s.String() -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *PutObjectOutput) SetChecksumCRC32(v string) *PutObjectOutput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *PutObjectOutput) SetChecksumCRC32C(v string) *PutObjectOutput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *PutObjectOutput) SetChecksumSHA1(v string) *PutObjectOutput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *PutObjectOutput) SetChecksumSHA256(v string) *PutObjectOutput { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { - s.ETag = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { - s.Expiration = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. -func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { - s.SSEKMSEncryptionContext = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { - s.ServerSideEncryption = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { - s.VersionId = &v - return s -} - -type PutObjectRetentionInput struct { - _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` - - // The bucket name that contains the object you want to apply this Object Retention - // configuration to. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates whether this action should bypass Governance-mode restrictions. - BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The key name for the object that you want to apply this Object Retention - // configuration to. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // The container element for the Object Retention configuration. - Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The version ID for the object that you want to apply this Object Retention - // configuration to. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectRetentionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectRetentionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectRetentionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { - s.Bucket = &v - return s -} - -func (s *PutObjectRetentionInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. -func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { - s.BypassGovernanceRetention = &v - return s -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutObjectRetentionInput) SetChecksumAlgorithm(v string) *PutObjectRetentionInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { - s.RequestPayer = &v - return s -} - -// SetRetention sets the Retention field's value. -func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { - s.Retention = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { - s.VersionId = &v - return s -} - -func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectRetentionInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutObjectRetentionOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectRetentionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectRetentionOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { - s.RequestCharged = &v - return s -} - -type PutObjectTaggingInput struct { - _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` - - // The bucket name containing the object. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Name of the object key. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Container for the TagSet and Tag elements - // - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The versionId of the object that the tag-set will be added to. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { - s.Bucket = &v - return s -} - -func (s *PutObjectTaggingInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutObjectTaggingInput) SetChecksumAlgorithm(v string) *PutObjectTaggingInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { - s.RequestPayer = &v - return s -} - -// SetTagging sets the Tagging field's value. -func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { - s.Tagging = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { - s.VersionId = &v - return s -} - -func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutObjectTaggingInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutObjectTaggingOutput struct { - _ struct{} `type:"structure"` - - // The versionId of the object the tag-set was added to. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutObjectTaggingOutput) GoString() string { - return s.String() -} - -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { - s.VersionId = &v - return s -} - -type PutPublicAccessBlockInput struct { - _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you - // want to set. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - // - // The SDK will automatically compute the Content-MD5 checksum for this operation. - // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm - // to be used. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The PublicAccessBlock configuration that you want to apply to this Amazon - // S3 bucket. You can enable the configuration options in any combination. For - // more information about when Amazon S3 considers a bucket or object public, - // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon S3 User Guide. - // - // PublicAccessBlockConfiguration is a required field - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutPublicAccessBlockInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutPublicAccessBlockInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutPublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.PublicAccessBlockConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { - s.Bucket = &v - return s -} - -func (s *PutPublicAccessBlockInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *PutPublicAccessBlockInput) SetChecksumAlgorithm(v string) *PutPublicAccessBlockInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { - s.PublicAccessBlockConfiguration = v - return s -} - -func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type PutPublicAccessBlockOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutPublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutPublicAccessBlockOutput) GoString() string { - return s.String() -} - -// Specifies the configuration for publishing messages to an Amazon Simple Queue -// Service (Amazon SQS) queue when Amazon S3 detects specified events. -type QueueConfiguration struct { - _ struct{} `type:"structure"` - - // A collection of bucket events for which to send notifications - // - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` - - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // QueueArn is a required field - QueueArn *string `locationName:"Queue" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueueConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueueConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { - s.Id = &v - return s -} - -// SetQueueArn sets the QueueArn field's value. -func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { - s.QueueArn = &v - return s -} - -// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) -// for the same purposes. This data type specifies the configuration for publishing -// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon -// S3 detects specified events. -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // The bucket event for which to send notifications. - // - // Deprecated: Event has been deprecated - Event *string `deprecated:"true" type:"string" enum:"Event"` - - // A collection of bucket events for which to send notifications. - Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. - Queue *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueueConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueueConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetEvent sets the Event field's value. -func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { - s.Id = &v - return s -} - -// SetQueue sets the Queue field's value. -func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { - s.Queue = &v - return s -} - -// The container for the records event. -type RecordsEvent struct { - _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` - - // The byte array of partial, one or more result records. - // Payload is automatically base64 encoded/decoded by the SDK. - Payload []byte `type:"blob"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RecordsEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RecordsEvent) GoString() string { - return s.String() -} - -// SetPayload sets the Payload field's value. -func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { - s.Payload = v - return s -} - -// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. -func (s *RecordsEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *RecordsEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - s.Payload = make([]byte, len(msg.Payload)) - copy(s.Payload, msg.Payload) - return nil -} - -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) - msg.Payload = s.Payload - return msg, err -} - -// Specifies how requests are redirected. In the event of an error, you can -// specify a different error code to return. -type Redirect struct { - _ struct{} `type:"structure"` - - // The host name to use in the redirect request. - HostName *string `type:"string"` - - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` - - // Protocol to use when redirecting requests. The default is the protocol that - // is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - ReplaceKeyPrefixWith *string `type:"string"` - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the siblings is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - ReplaceKeyWith *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Redirect) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Redirect) GoString() string { - return s.String() -} - -// SetHostName sets the HostName field's value. -func (s *Redirect) SetHostName(v string) *Redirect { - s.HostName = &v - return s -} - -// SetHttpRedirectCode sets the HttpRedirectCode field's value. -func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { - s.HttpRedirectCode = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *Redirect) SetProtocol(v string) *Redirect { - s.Protocol = &v - return s -} - -// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. -func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { - s.ReplaceKeyPrefixWith = &v - return s -} - -// SetReplaceKeyWith sets the ReplaceKeyWith field's value. -func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { - s.ReplaceKeyWith = &v - return s -} - -// Specifies the redirect behavior of all requests to a website endpoint of -// an Amazon S3 bucket. -type RedirectAllRequestsTo struct { - _ struct{} `type:"structure"` - - // Name of the host where requests are redirected. - // - // HostName is a required field - HostName *string `type:"string" required:"true"` - - // Protocol to use when redirecting requests. The default is the protocol that - // is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RedirectAllRequestsTo) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetHostName sets the HostName field's value. -func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { - s.HostName = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { - s.Protocol = &v - return s -} - -// A filter that you can specify for selection for modifications on replicas. -// Amazon S3 doesn't replicate replica modifications by default. In the latest -// version of replication configuration (when Filter is specified), you can -// specify this element and set the status to Enabled to replicate modifications -// on replicas. -// -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, this element -// is not allowed. -type ReplicaModifications struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 replicates modifications on replicas. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaModifications) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaModifications) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaModifications) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { - s.Status = &v - return s -} - -// A container for replication rules. You can add up to 1,000 rules. The maximum -// size of a replication configuration is 2 MB. -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) - // role that Amazon S3 assumes when replicating objects. For more information, - // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon S3 User Guide. - // - // Role is a required field - Role *string `type:"string" required:"true"` - - // A container for one or more replication rules. A replication configuration - // must have at least one rule and can contain a maximum of 1,000 rules. - // - // Rules is a required field - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRole sets the Role field's value. -func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { - s.Role = &v - return s -} - -// SetRules sets the Rules field's value. -func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { - s.Rules = v - return s -} - -// Specifies which Amazon S3 objects to replicate and where to store the replicas. -type ReplicationRule struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter - // in your replication configuration, you must also include a DeleteMarkerReplication - // element. If your Filter includes a Tag element, the DeleteMarkerReplication - // Status must be set to Disabled, because Amazon S3 does not support replicating - // delete markers for tag-based rules. For an example configuration, see Basic - // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). - // - // For more information about delete marker replication, see Basic Rule Configuration - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). - // - // If you are using an earlier version of the replication configuration, Amazon - // S3 handles replication of delete markers differently. For more information, - // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). - DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - - // A container for information about the replication destination and its configurations - // including enabling the S3 Replication Time Control (S3 RTC). - // - // Destination is a required field - Destination *Destination `type:"structure" required:"true"` - - // Optional configuration to replicate existing source bucket objects. For more - // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 User Guide. - ExistingObjectReplication *ExistingObjectReplication `type:"structure"` - - // A filter that identifies the subset of objects to which the replication rule - // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. - Filter *ReplicationRuleFilter `type:"structure"` - - // A unique identifier for the rule. The maximum value is 255 characters. - ID *string `type:"string"` - - // An object key name prefix that identifies the object or objects to which - // the rule applies. The maximum prefix length is 1,024 characters. To include - // all objects in a bucket, specify an empty string. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - // - // Deprecated: Prefix has been deprecated - Prefix *string `deprecated:"true" type:"string"` - - // The priority indicates which rule has precedence whenever two or more replication - // rules conflict. Amazon S3 will attempt to replicate objects according to - // all replication rules. However, if there are two or more rules with the same - // destination bucket, then objects will be replicated according to the rule - // with the highest priority. The higher the number, the higher the priority. - // - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon S3 User Guide. - Priority *int64 `type:"integer"` - - // A container that describes additional filters for identifying the source - // objects that you want to replicate. You can choose to enable or disable the - // replication of these objects. Currently, Amazon S3 supports only the filter - // that you can specify for objects created with server-side encryption using - // a customer managed key stored in Amazon Web Services Key Management Service - // (SSE-KMS). - SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - - // Specifies whether the rule is enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - if s.ExistingObjectReplication != nil { - if err := s.ExistingObjectReplication.Validate(); err != nil { - invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) - } - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - if s.SourceSelectionCriteria != nil { - if err := s.SourceSelectionCriteria.Validate(); err != nil { - invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. -func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { - s.DeleteMarkerReplication = v - return s -} - -// SetDestination sets the Destination field's value. -func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { - s.Destination = v - return s -} - -// SetExistingObjectReplication sets the ExistingObjectReplication field's value. -func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { - s.ExistingObjectReplication = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { - s.Filter = v - return s -} - -// SetID sets the ID field's value. -func (s *ReplicationRule) SetID(v string) *ReplicationRule { - s.ID = &v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { - s.Prefix = &v - return s -} - -// SetPriority sets the Priority field's value. -func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { - s.Priority = &v - return s -} - -// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. -func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { - s.SourceSelectionCriteria = v - return s -} - -// SetStatus sets the Status field's value. -func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { - s.Status = &v - return s -} - -// A container for specifying rule filters. The filters determine the subset -// of objects to which the rule applies. This element is required only if you -// specify more than one filter. -// -// For example: -// -// - If you specify both a Prefix and a Tag filter, wrap these filters in -// an And tag. -// -// - If you specify a filter based on multiple tags, wrap the Tag elements -// in an And tag. -type ReplicationRuleAndOperator struct { - _ struct{} `type:"structure"` - - // An object key name prefix that identifies the subset of objects to which - // the rule applies. - Prefix *string `type:"string"` - - // An array of tags containing key and value pairs. - Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationRuleAndOperator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationRuleAndOperator) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRuleAndOperator) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { - s.Prefix = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { - s.Tags = v - return s -} - -// A filter that identifies the subset of objects to which the replication rule -// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. -type ReplicationRuleFilter struct { - _ struct{} `type:"structure"` - - // A container for specifying rule filters. The filters determine the subset - // of objects to which the rule applies. This element is required only if you - // specify more than one filter. For example: - // - // * If you specify both a Prefix and a Tag filter, wrap these filters in - // an And tag. - // - // * If you specify a filter based on multiple tags, wrap the Tag elements - // in an And tag. - And *ReplicationRuleAndOperator `type:"structure"` - - // An object key name prefix that identifies the subset of objects to which - // the rule applies. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - Prefix *string `type:"string"` - - // A container for specifying a tag key and value. - // - // The rule applies only to objects that have the tag in their tag set. - Tag *Tag `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationRuleFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationRuleFilter) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRuleFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} - if s.And != nil { - if err := s.And.Validate(); err != nil { - invalidParams.AddNested("And", err.(request.ErrInvalidParams)) - } - } - if s.Tag != nil { - if err := s.Tag.Validate(); err != nil { - invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAnd sets the And field's value. -func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { - s.And = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { - s.Prefix = &v - return s -} - -// SetTag sets the Tag field's value. -func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { - s.Tag = v - return s -} - -// A container specifying S3 Replication Time Control (S3 RTC) related information, -// including whether S3 RTC is enabled and the time when all objects and operations -// on objects must be replicated. Must be specified together with a Metrics -// block. -type ReplicationTime struct { - _ struct{} `type:"structure"` - - // Specifies whether the replication time is enabled. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` - - // A container specifying the time by which replication should be complete for - // all objects and operations on objects. - // - // Time is a required field - Time *ReplicationTimeValue `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationTime) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationTime) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationTime) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Time == nil { - invalidParams.Add(request.NewErrParamRequired("Time")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { - s.Status = &v - return s -} - -// SetTime sets the Time field's value. -func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { - s.Time = v - return s -} - -// A container specifying the time value for S3 Replication Time Control (S3 -// RTC) and replication metrics EventThreshold. -type ReplicationTimeValue struct { - _ struct{} `type:"structure"` - - // Contains an integer specifying time in minutes. - // - // Valid value: 15 - Minutes *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationTimeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationTimeValue) GoString() string { - return s.String() -} - -// SetMinutes sets the Minutes field's value. -func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { - s.Minutes = &v - return s -} - -// Container for Payer. -type RequestPaymentConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - // - // Payer is a required field - Payer *string `type:"string" required:"true" enum:"Payer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RequestPaymentConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RequestPaymentConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPayer sets the Payer field's value. -func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { - s.Payer = &v - return s -} - -// Container for specifying if periodic QueryProgress messages should be sent. -type RequestProgress struct { - _ struct{} `type:"structure"` - - // Specifies whether periodic QueryProgress frames should be sent. Valid values: - // TRUE, FALSE. Default value: FALSE. - Enabled *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RequestProgress) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RequestProgress) GoString() string { - return s.String() -} - -// SetEnabled sets the Enabled field's value. -func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { - s.Enabled = &v - return s -} - -type RestoreObjectInput struct { - _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` - - // The bucket name containing the object to restore. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Object key for which the action was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Container for restore job parameters. - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { - s.Bucket = &v - return s -} - -func (s *RestoreObjectInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *RestoreObjectInput) SetChecksumAlgorithm(v string) *RestoreObjectInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { - s.RequestPayer = &v - return s -} - -// SetRestoreRequest sets the RestoreRequest field's value. -func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { - s.RestoreRequest = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { - s.VersionId = &v - return s -} - -func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *RestoreObjectInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type RestoreObjectOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Indicates the path in the provided S3 output location where Select results - // will be restored to. - RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreObjectOutput) GoString() string { - return s.String() -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { - s.RequestCharged = &v - return s -} - -// SetRestoreOutputPath sets the RestoreOutputPath field's value. -func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { - s.RestoreOutputPath = &v - return s -} - -// Container for restore job parameters. -type RestoreRequest struct { - _ struct{} `type:"structure"` - - // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation. - // - // The Days element is required for regular restores, and must not be provided - // for select requests. - Days *int64 `type:"integer"` - - // The optional description for the job. - Description *string `type:"string"` - - // S3 Glacier related parameters pertaining to this job. Do not use with restores - // that specify OutputLocation. - GlacierJobParameters *GlacierJobParameters `type:"structure"` - - // Describes the location where the restore job's output is stored. - OutputLocation *OutputLocation `type:"structure"` - - // Describes the parameters for Select job types. - SelectParameters *SelectParameters `type:"structure"` - - // Retrieval tier at which the restore will be processed. - Tier *string `type:"string" enum:"Tier"` - - // Type of restore request. - Type *string `type:"string" enum:"RestoreRequestType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.GlacierJobParameters != nil { - if err := s.GlacierJobParameters.Validate(); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) - } - } - if s.OutputLocation != nil { - if err := s.OutputLocation.Validate(); err != nil { - invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) - } - } - if s.SelectParameters != nil { - if err := s.SelectParameters.Validate(); err != nil { - invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDays sets the Days field's value. -func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { - s.Days = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { - s.Description = &v - return s -} - -// SetGlacierJobParameters sets the GlacierJobParameters field's value. -func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { - s.GlacierJobParameters = v - return s -} - -// SetOutputLocation sets the OutputLocation field's value. -func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { - s.OutputLocation = v - return s -} - -// SetSelectParameters sets the SelectParameters field's value. -func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { - s.SelectParameters = v - return s -} - -// SetTier sets the Tier field's value. -func (s *RestoreRequest) SetTier(v string) *RestoreRequest { - s.Tier = &v - return s -} - -// SetType sets the Type field's value. -func (s *RestoreRequest) SetType(v string) *RestoreRequest { - s.Type = &v - return s -} - -// Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see Configuring advanced conditional redirects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon S3 User Guide. -type RoutingRule struct { - _ struct{} `type:"structure"` - - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can specify a different error code to return. - // - // Redirect is a required field - Redirect *Redirect `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoutingRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoutingRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCondition sets the Condition field's value. -func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { - s.Condition = v - return s -} - -// SetRedirect sets the Redirect field's value. -func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { - s.Redirect = v - return s -} - -// Specifies lifecycle rules for an Amazon S3 bucket. For more information, -// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) -// in the Amazon S3 API Reference. For examples, see Put Bucket Lifecycle Configuration -// Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). -type Rule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an incomplete multipart upload - // that Amazon S3 will wait before permanently removing all parts of the upload. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - // Specifies the expiration for the lifecycle of the object. - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value can't be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, - // GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled - // (or versioning is suspended), you can set this action to request that Amazon - // S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, - // INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at - // a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - - // Object key prefix that identifies one or more objects to which this rule - // applies. - // - // Replacement must be made for object keys containing special characters (such - // as carriage returns) when using XML requests. For more information, see XML - // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` - - // If Enabled, the rule is currently being applied. If Disabled, the rule is - // not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - // Specifies when an object transitions to a specified storage class. For more - // information about Amazon S3 lifecycle configuration rules, see Transitioning - // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) - // in the Amazon S3 User Guide. - Transition *Transition `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Rule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Rule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { - s.AbortIncompleteMultipartUpload = v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { - s.Expiration = v - return s -} - -// SetID sets the ID field's value. -func (s *Rule) SetID(v string) *Rule { - s.ID = &v - return s -} - -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { - s.NoncurrentVersionExpiration = v - return s -} - -// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. -func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { - s.NoncurrentVersionTransition = v - return s -} - -// SetPrefix sets the Prefix field's value. -func (s *Rule) SetPrefix(v string) *Rule { - s.Prefix = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *Rule) SetStatus(v string) *Rule { - s.Status = &v - return s -} - -// SetTransition sets the Transition field's value. -func (s *Rule) SetTransition(v *Transition) *Rule { - s.Transition = v - return s -} - -// Specifies the use of SSE-KMS to encrypt delivered inventory reports. -type SSEKMS struct { - _ struct{} `locationName:"SSE-KMS" type:"structure"` - - // Specifies the ID of the Amazon Web Services Key Management Service (Amazon - // Web Services KMS) symmetric customer managed key to use for encrypting inventory - // reports. - // - // KeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by SSEKMS's - // String and GoString methods. - // - // KeyId is a required field - KeyId *string `type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSEKMS) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSEKMS) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SSEKMS) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *SSEKMS) SetKeyId(v string) *SSEKMS { - s.KeyId = &v - return s -} - -// Specifies the use of SSE-S3 to encrypt delivered inventory reports. -type SSES3 struct { - _ struct{} `locationName:"SSE-S3" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSES3) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSES3) GoString() string { - return s.String() -} - -// Specifies the byte range of the object to get the records from. A record -// is processed when its first byte is contained by the range. This parameter -// is optional, but when specified, it must not be empty. See RFC 2616, Section -// 14.35.1 about how to specify the start and end of the range. -type ScanRange struct { - _ struct{} `type:"structure"` - - // Specifies the end of the byte range. This parameter is optional. Valid values: - // non-negative integers. The default value is one less than the size of the - // object being queried. If only the End parameter is supplied, it is interpreted - // to mean scan the last N bytes of the file. For example, 50 - // means scan the last 50 bytes. - End *int64 `type:"long"` - - // Specifies the start of the byte range. This parameter is optional. Valid - // values: non-negative integers. The default value is 0. If only start is supplied, - // it means scan from that point to the end of the file. For example, 50 - // means scan from byte 50 until the end of the file. - Start *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScanRange) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScanRange) GoString() string { - return s.String() -} - -// SetEnd sets the End field's value. -func (s *ScanRange) SetEnd(v int64) *ScanRange { - s.End = &v - return s -} - -// SetStart sets the Start field's value. -func (s *ScanRange) SetStart(v int64) *ScanRange { - s.Start = &v - return s -} - -// SelectObjectContentEventStreamEvent groups together all EventStream -// events writes for SelectObjectContentEventStream. -// -// These events are: -// -// - ContinuationEvent -// - EndEvent -// - ProgressEvent -// - RecordsEvent -// - StatsEvent -type SelectObjectContentEventStreamEvent interface { - eventSelectObjectContentEventStream() - eventstreamapi.Marshaler - eventstreamapi.Unmarshaler -} - -// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The -// default implementation for this interface will be SelectObjectContentEventStreamData. -// -// The reader's Close method must allow multiple concurrent calls. -// -// These events are: -// -// - ContinuationEvent -// - EndEvent -// - ProgressEvent -// - RecordsEvent -// - StatsEvent -// - SelectObjectContentEventStreamUnknownEvent -type SelectObjectContentEventStreamReader interface { - // Returns a channel of events as they are read from the event stream. - Events() <-chan SelectObjectContentEventStreamEvent - - // Close will stop the reader reading events from the stream. - Close() error - - // Returns any error that has occurred while reading from the event stream. - Err() error -} - -type readSelectObjectContentEventStream struct { - eventReader *eventstreamapi.EventReader - stream chan SelectObjectContentEventStreamEvent - err *eventstreamapi.OnceError - - done chan struct{} - closeOnce sync.Once -} - -func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { - r := &readSelectObjectContentEventStream{ - eventReader: eventReader, - stream: make(chan SelectObjectContentEventStreamEvent), - done: make(chan struct{}), - err: eventstreamapi.NewOnceError(), - } - go r.readEventStream() - - return r -} - -// Close will close the underlying event stream reader. -func (r *readSelectObjectContentEventStream) Close() error { - r.closeOnce.Do(r.safeClose) - return r.Err() -} - -func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { - return r.err.ErrorSet() -} - -func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { - return r.done -} - -func (r *readSelectObjectContentEventStream) safeClose() { - close(r.done) -} - -func (r *readSelectObjectContentEventStream) Err() error { - return r.err.Err() -} - -func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return r.stream -} - -func (r *readSelectObjectContentEventStream) readEventStream() { - defer r.Close() - defer close(r.stream) - - for { - event, err := r.eventReader.ReadEvent() - if err != nil { - if err == io.EOF { - return - } - select { - case <-r.done: - // If closed already ignore the error - return - default: - } - if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { - continue - } - r.err.SetError(err) - return - } - - select { - case r.stream <- event.(SelectObjectContentEventStreamEvent): - case <-r.done: - return - } - } -} - -type unmarshalerForSelectObjectContentEventStreamEvent struct { - metadata protocol.ResponseMetadata -} - -func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { - switch eventType { - case "Cont": - return &ContinuationEvent{}, nil - case "End": - return &EndEvent{}, nil - case "Progress": - return &ProgressEvent{}, nil - case "Records": - return &RecordsEvent{}, nil - case "Stats": - return &StatsEvent{}, nil - default: - return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil - } -} - -// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the -// SelectObjectContentEventStream group of events when an unknown event is received. -type SelectObjectContentEventStreamUnknownEvent struct { - Type string - Message eventstream.Message -} - -// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream -// group of events. -func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} - -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( - msg eventstream.Message, err error, -) { - return e.Message.Clone(), nil -} - -// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. -// This method is only used internally within the SDK's EventStream handling. -func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - e.Message = msg.Clone() - return nil -} - -// Request to filter the contents of an Amazon S3 object based on a simple Structured -// Query Language (SQL) statement. In the request, along with the SQL expression, -// you must specify a data serialization format (JSON or CSV) of the object. -// Amazon S3 uses this to parse object data into records. It returns only records -// that match the specified SQL expression. You must also specify the data serialization -// format for the response. For more information, see S3Select API Documentation -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). -type SelectObjectContentInput struct { - _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - - // The S3 bucket. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The expression that is used to query the object. - // - // Expression is a required field - Expression *string `type:"string" required:"true"` - - // The type of the provided expression (for example, SQL). - // - // ExpressionType is a required field - ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - - // Describes the format of the data in the object that is being queried. - // - // InputSerialization is a required field - InputSerialization *InputSerialization `type:"structure" required:"true"` - - // The object key. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Describes the format of the data that you want Amazon S3 to return in response. - // - // OutputSerialization is a required field - OutputSerialization *OutputSerialization `type:"structure" required:"true"` - - // Specifies if periodic request progress information should be enabled. - RequestProgress *RequestProgress `type:"structure"` - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // The server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by SelectObjectContentInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // The MD5 server-side encryption (SSE) customer managed key. This parameter - // is needed only when the object was created using a checksum algorithm. For - // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the byte range of the object to get the records from. A record - // is processed when its first byte is contained by the range. This parameter - // is optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. - // - // ScanRangemay be used in the following ways: - // - // * 50100 - process only - // the records starting between the bytes 50 and 100 (inclusive, counting - // from zero) - // - // * 50 - process only the records - // starting after the byte 50 - // - // * 50 - process only the records within - // the last 50 bytes of the file. - ScanRange *ScanRange `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SelectObjectContentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SelectObjectContentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SelectObjectContentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Expression == nil { - invalidParams.Add(request.NewErrParamRequired("Expression")) - } - if s.ExpressionType == nil { - invalidParams.Add(request.NewErrParamRequired("ExpressionType")) - } - if s.InputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("InputSerialization")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.OutputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { - s.Bucket = &v - return s -} - -func (s *SelectObjectContentInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetExpression sets the Expression field's value. -func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { - s.Expression = &v - return s -} - -// SetExpressionType sets the ExpressionType field's value. -func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { - s.ExpressionType = &v - return s -} - -// SetInputSerialization sets the InputSerialization field's value. -func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { - s.InputSerialization = v - return s -} - -// SetKey sets the Key field's value. -func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { - s.Key = &v - return s -} - -// SetOutputSerialization sets the OutputSerialization field's value. -func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { - s.OutputSerialization = v - return s -} - -// SetRequestProgress sets the RequestProgress field's value. -func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { - s.RequestProgress = v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { - s.SSECustomerKey = &v - return s -} - -func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetScanRange sets the ScanRange field's value. -func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { - s.ScanRange = v - return s -} - -func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *SelectObjectContentInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type SelectObjectContentOutput struct { - _ struct{} `type:"structure" payload:"Payload"` - - EventStream *SelectObjectContentEventStream -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SelectObjectContentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SelectObjectContentOutput) GoString() string { - return s.String() -} - -func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { - s.EventStream = v - return s -} -func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { - return s.EventStream -} - -// GetStream returns the type to interact with the event stream. -func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { - return s.EventStream -} - -// Describes the parameters for Select job types. -type SelectParameters struct { - _ struct{} `type:"structure"` - - // The expression that is used to query the object. - // - // Expression is a required field - Expression *string `type:"string" required:"true"` - - // The type of the provided expression (for example, SQL). - // - // ExpressionType is a required field - ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - - // Describes the serialization format of the object. - // - // InputSerialization is a required field - InputSerialization *InputSerialization `type:"structure" required:"true"` - - // Describes how the results of the Select job are serialized. - // - // OutputSerialization is a required field - OutputSerialization *OutputSerialization `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SelectParameters) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SelectParameters) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SelectParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} - if s.Expression == nil { - invalidParams.Add(request.NewErrParamRequired("Expression")) - } - if s.ExpressionType == nil { - invalidParams.Add(request.NewErrParamRequired("ExpressionType")) - } - if s.InputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("InputSerialization")) - } - if s.OutputSerialization == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExpression sets the Expression field's value. -func (s *SelectParameters) SetExpression(v string) *SelectParameters { - s.Expression = &v - return s -} - -// SetExpressionType sets the ExpressionType field's value. -func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { - s.ExpressionType = &v - return s -} - -// SetInputSerialization sets the InputSerialization field's value. -func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { - s.InputSerialization = v - return s -} - -// SetOutputSerialization sets the OutputSerialization field's value. -func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { - s.OutputSerialization = v - return s -} - -// Describes the default server-side encryption to apply to new objects in the -// bucket. If a PUT Object request doesn't specify any server-side encryption, -// this default encryption will be applied. If you don't specify a customer -// managed key at configuration, Amazon S3 automatically creates an Amazon Web -// Services KMS key in your Amazon Web Services account the first time that -// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon -// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon S3 API Reference. -type ServerSideEncryptionByDefault struct { - _ struct{} `type:"structure"` - - // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services - // KMS key ID to use for the default encryption. This parameter is allowed if - // and only if SSEAlgorithm is set to aws:kms. - // - // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. - // However, if you are using encryption with cross-account or Amazon Web Services - // service operations you must use a fully qualified KMS key ARN. For more information, - // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For - // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. - // - // KMSMasterKeyID is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by ServerSideEncryptionByDefault's - // String and GoString methods. - KMSMasterKeyID *string `type:"string" sensitive:"true"` - - // Server-side encryption algorithm to use for the default encryption. - // - // SSEAlgorithm is a required field - SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ServerSideEncryptionByDefault) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ServerSideEncryptionByDefault) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerSideEncryptionByDefault) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} - if s.SSEAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. -func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { - s.KMSMasterKeyID = &v - return s -} - -// SetSSEAlgorithm sets the SSEAlgorithm field's value. -func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { - s.SSEAlgorithm = &v - return s -} - -// Specifies the default server-side-encryption configuration. -type ServerSideEncryptionConfiguration struct { - _ struct{} `type:"structure"` - - // Container for information about a particular server-side encryption configuration - // rule. - // - // Rules is a required field - Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ServerSideEncryptionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ServerSideEncryptionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerSideEncryptionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRules sets the Rules field's value. -func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { - s.Rules = v - return s -} - -// Specifies the default server-side encryption configuration. -type ServerSideEncryptionRule struct { - _ struct{} `type:"structure"` - - // Specifies the default server-side encryption to apply to new objects in the - // bucket. If a PUT Object request doesn't specify any server-side encryption, - // this default encryption will be applied. - ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` - - // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side - // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects - // are not affected. Setting the BucketKeyEnabled element to true causes Amazon - // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. - // - // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. - BucketKeyEnabled *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ServerSideEncryptionRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ServerSideEncryptionRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServerSideEncryptionRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} - if s.ApplyServerSideEncryptionByDefault != nil { - if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { - invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. -func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { - s.ApplyServerSideEncryptionByDefault = v - return s -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { - s.BucketKeyEnabled = &v - return s -} - -// A container that describes additional filters for identifying the source -// objects that you want to replicate. You can choose to enable or disable the -// replication of these objects. Currently, Amazon S3 supports only the filter -// that you can specify for objects created with server-side encryption using -// a customer managed key stored in Amazon Web Services Key Management Service -// (SSE-KMS). -type SourceSelectionCriteria struct { - _ struct{} `type:"structure"` - - // A filter that you can specify for selections for modifications on replicas. - // Amazon S3 doesn't replicate replica modifications by default. In the latest - // version of replication configuration (when Filter is specified), you can - // specify this element and set the status to Enabled to replicate modifications - // on replicas. - // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, this element - // is not allowed - ReplicaModifications *ReplicaModifications `type:"structure"` - - // A container for filter information for the selection of Amazon S3 objects - // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria - // in the replication configuration, this element is required. - SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SourceSelectionCriteria) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SourceSelectionCriteria) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SourceSelectionCriteria) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} - if s.ReplicaModifications != nil { - if err := s.ReplicaModifications.Validate(); err != nil { - invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) - } - } - if s.SseKmsEncryptedObjects != nil { - if err := s.SseKmsEncryptedObjects.Validate(); err != nil { - invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetReplicaModifications sets the ReplicaModifications field's value. -func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { - s.ReplicaModifications = v - return s -} - -// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. -func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { - s.SseKmsEncryptedObjects = v - return s -} - -// A container for filter information for the selection of S3 objects encrypted -// with Amazon Web Services KMS. -type SseKmsEncryptedObjects struct { - _ struct{} `type:"structure"` - - // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using an Amazon Web Services KMS key stored in Amazon Web Services Key Management - // Service. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SseKmsEncryptedObjects) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SseKmsEncryptedObjects) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SseKmsEncryptedObjects) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatus sets the Status field's value. -func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { - s.Status = &v - return s -} - -// Container for the stats details. -type Stats struct { - _ struct{} `type:"structure"` - - // The total number of uncompressed object bytes processed. - BytesProcessed *int64 `type:"long"` - - // The total number of bytes of records payload data returned. - BytesReturned *int64 `type:"long"` - - // The total number of object bytes scanned. - BytesScanned *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Stats) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Stats) GoString() string { - return s.String() -} - -// SetBytesProcessed sets the BytesProcessed field's value. -func (s *Stats) SetBytesProcessed(v int64) *Stats { - s.BytesProcessed = &v - return s -} - -// SetBytesReturned sets the BytesReturned field's value. -func (s *Stats) SetBytesReturned(v int64) *Stats { - s.BytesReturned = &v - return s -} - -// SetBytesScanned sets the BytesScanned field's value. -func (s *Stats) SetBytesScanned(v int64) *Stats { - s.BytesScanned = &v - return s -} - -// Container for the Stats Event. -type StatsEvent struct { - _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` - - // The Stats event details. - Details *Stats `locationName:"Details" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StatsEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StatsEvent) GoString() string { - return s.String() -} - -// SetDetails sets the Details field's value. -func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { - s.Details = v - return s -} - -// The StatsEvent is and event in the SelectObjectContentEventStream group of events. -func (s *StatsEvent) eventSelectObjectContentEventStream() {} - -// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *StatsEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - if err := payloadUnmarshaler.UnmarshalPayload( - bytes.NewReader(msg.Payload), s, - ); err != nil { - return err - } - return nil -} - -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) - var buf bytes.Buffer - if err = pm.MarshalPayload(&buf, s); err != nil { - return eventstream.Message{}, err - } - msg.Payload = buf.Bytes() - return msg, err -} - -// Specifies data related to access patterns to be collected and made available -// to analyze the tradeoffs between different storage classes for an Amazon -// S3 bucket. -type StorageClassAnalysis struct { - _ struct{} `type:"structure"` - - // Specifies how data related to the storage class analysis for an Amazon S3 - // bucket should be exported. - DataExport *StorageClassAnalysisDataExport `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StorageClassAnalysis) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StorageClassAnalysis) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageClassAnalysis) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} - if s.DataExport != nil { - if err := s.DataExport.Validate(); err != nil { - invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDataExport sets the DataExport field's value. -func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { - s.DataExport = v - return s -} - -// Container for data related to the storage class analysis for an Amazon S3 -// bucket for export. -type StorageClassAnalysisDataExport struct { - _ struct{} `type:"structure"` - - // The place to store the data for an analysis. - // - // Destination is a required field - Destination *AnalyticsExportDestination `type:"structure" required:"true"` - - // The version of the output schema to use when exporting data. Must be V_1. - // - // OutputSchemaVersion is a required field - OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StorageClassAnalysisDataExport) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StorageClassAnalysisDataExport) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StorageClassAnalysisDataExport) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.OutputSchemaVersion == nil { - invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDestination sets the Destination field's value. -func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { - s.Destination = v - return s -} - -// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. -func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { - s.OutputSchemaVersion = &v - return s -} - -// A container of a key value name pair. -type Tag struct { - _ struct{} `type:"structure"` - - // Name of the object key. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // Value of the tag. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -// Container for TagSet elements. -type Tagging struct { - _ struct{} `type:"structure"` - - // A collection for a set of tags - // - // TagSet is a required field - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tagging) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tagging) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tagging) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tagging"} - if s.TagSet == nil { - invalidParams.Add(request.NewErrParamRequired("TagSet")) - } - if s.TagSet != nil { - for i, v := range s.TagSet { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTagSet sets the TagSet field's value. -func (s *Tagging) SetTagSet(v []*Tag) *Tagging { - s.TagSet = v - return s -} - -// Container for granting information. -// -// Buckets that use the bucket owner enforced setting for Object Ownership don't -// support target grants. For more information, see Permissions server access -// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. -type TargetGrant struct { - _ struct{} `type:"structure"` - - // Container for the person being granted permissions. - Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Logging permissions assigned to the grantee for the bucket. - Permission *string `type:"string" enum:"BucketLogsPermission"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TargetGrant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TargetGrant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TargetGrant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantee sets the Grantee field's value. -func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { - s.Grantee = v - return s -} - -// SetPermission sets the Permission field's value. -func (s *TargetGrant) SetPermission(v string) *TargetGrant { - s.Permission = &v - return s -} - -// The S3 Intelligent-Tiering storage class is designed to optimize storage -// costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. -type Tiering struct { - _ struct{} `type:"structure"` - - // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing - // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) - // for a list of access tiers in the S3 Intelligent-Tiering storage class. - // - // AccessTier is a required field - AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` - - // The number of consecutive days of no access after which an object will be - // eligible to be transitioned to the corresponding tier. The minimum number - // of days specified for Archive Access tier must be at least 90 days and Deep - // Archive Access tier must be at least 180 days. The maximum can be up to 2 - // years (730 days). - // - // Days is a required field - Days *int64 `type:"integer" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tiering) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tiering) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tiering) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tiering"} - if s.AccessTier == nil { - invalidParams.Add(request.NewErrParamRequired("AccessTier")) - } - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessTier sets the AccessTier field's value. -func (s *Tiering) SetAccessTier(v string) *Tiering { - s.AccessTier = &v - return s -} - -// SetDays sets the Days field's value. -func (s *Tiering) SetDays(v int64) *Tiering { - s.Days = &v - return s -} - -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 -// detects specified events. -type TopicConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon S3 bucket event about which to send notifications. For more information, - // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. - // - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` - - // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // TopicArn is a required field - TopicArn *string `locationName:"Topic" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TopicConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TopicConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TopicConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEvents sets the Events field's value. -func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { - s.Events = v - return s -} - -// SetFilter sets the Filter field's value. -func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { - s.Filter = v - return s -} - -// SetId sets the Id field's value. -func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { - s.Id = &v - return s -} - -// SetTopicArn sets the TopicArn field's value. -func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { - s.TopicArn = &v - return s -} - -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 -// detects specified events. This data type is deprecated. Use TopicConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) -// instead. -type TopicConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - // - // Deprecated: Event has been deprecated - Event *string `deprecated:"true" type:"string" enum:"Event"` - - // A collection of events related to objects - Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` - - // An optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SNS topic to which Amazon S3 will publish a message to report the - // specified events for the bucket. - Topic *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TopicConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TopicConfigurationDeprecated) GoString() string { - return s.String() -} - -// SetEvent sets the Event field's value. -func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { - s.Event = &v - return s -} - -// SetEvents sets the Events field's value. -func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { - s.Events = v - return s -} - -// SetId sets the Id field's value. -func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { - s.Id = &v - return s -} - -// SetTopic sets the Topic field's value. -func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { - s.Topic = &v - return s -} - -// Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see Transitioning -// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon S3 User Guide. -type Transition struct { - _ struct{} `type:"structure"` - - // Indicates when objects are transitioned to the specified storage class. The - // date value must be in ISO 8601 format. The time is always midnight UTC. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the number of days after creation when objects are transitioned - // to the specified storage class. The value must be a positive integer. - Days *int64 `type:"integer"` - - // The storage class to which you want the object to transition. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Transition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Transition) GoString() string { - return s.String() -} - -// SetDate sets the Date field's value. -func (s *Transition) SetDate(v time.Time) *Transition { - s.Date = &v - return s -} - -// SetDays sets the Days field's value. -func (s *Transition) SetDays(v int64) *Transition { - s.Days = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *Transition) SetStorageClass(v string) *Transition { - s.StorageClass = &v - return s -} - -type UploadPartCopyInput struct { - _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` - - // The bucket name. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies the source object for the copy operation. You specify the value - // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): - // - // * For objects not accessed through an access point, specify the name of - // the source bucket and key of the source object, separated by a slash (/). - // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, - // use awsexamplebucket/reports/january.pdf. The value must be URL-encoded. - // - // * For objects accessed through access points, specify the Amazon Resource - // Name (ARN) of the object as accessed through the access point, in the - // format arn:aws:s3:::accesspoint//object/. - // For example, to copy the object reports/january.pdf through access point - // my-access-point owned by account 123456789012 in Region us-west-2, use - // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. - // The value must be URL encoded. Amazon S3 supports copy operations using - // access points only when the source and destination buckets are in the - // same Amazon Web Services Region. Alternatively, for objects accessed through - // Amazon S3 on Outposts, specify the ARN of the object as accessed in the - // format arn:aws:s3-outposts:::outpost//object/. - // For example, to copy the object reports/january.pdf through outpost my-outpost - // owned by account 123456789012 in Region us-west-2, use the URL encoding - // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - // The value must be URL-encoded. - // - // To copy a specific version of an object, append ?versionId= to - // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). - // If you don't specify a version ID, Amazon S3 copies the latest version of - // the source object. - // - // CopySource is a required field - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - - // The range of bytes to copy from the source object. The range value must use - // the form bytes=first-last, where the first and last are the zero-based byte - // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first 10 bytes of the source. You can copy a range only if the source object - // is greater than 5 MB. - CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - // - // CopySourceSSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UploadPartCopyInput's - // String and GoString methods. - CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request fails with the HTTP status code - // 403 Forbidden (access denied). - ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - // - // PartNumber is a required field - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UploadPartCopyInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being copied. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartCopyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartCopyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartCopyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { - s.Bucket = &v - return s -} - -func (s *UploadPartCopyInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetCopySource sets the CopySource field's value. -func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { - s.CopySource = &v - return s -} - -// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. -func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { - s.CopySourceIfMatch = &v - return s -} - -// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. -func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { - s.CopySourceIfModifiedSince = &v - return s -} - -// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. -func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { - s.CopySourceIfNoneMatch = &v - return s -} - -// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. -func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { - s.CopySourceIfUnmodifiedSince = &v - return s -} - -// SetCopySourceRange sets the CopySourceRange field's value. -func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { - s.CopySourceRange = &v - return s -} - -// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerAlgorithm = &v - return s -} - -// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerKey = &v - return s -} - -func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { - if s.CopySourceSSECustomerKey == nil { - return v - } - return *s.CopySourceSSECustomerKey -} - -// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. -func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { - s.CopySourceSSECustomerKeyMD5 = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. -func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { - s.ExpectedSourceBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { - s.PartNumber = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { - s.SSECustomerKey = &v - return s -} - -func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { - s.UploadId = &v - return s -} - -func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *UploadPartCopyInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type UploadPartCopyOutput struct { - _ struct{} `type:"structure" payload:"CopyPartResult"` - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Container for all response elements. - CopyPartResult *CopyPartResult `type:"structure"` - - // The version of the source object that was copied, if you have enabled versioning - // on the source bucket. - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UploadPartCopyOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartCopyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartCopyOutput) GoString() string { - return s.String() -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetCopyPartResult sets the CopyPartResult field's value. -func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { - s.CopyPartResult = v - return s -} - -// SetCopySourceVersionId sets the CopySourceVersionId field's value. -func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { - s.CopySourceVersionId = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { - s.ServerSideEncryption = &v - return s -} - -type UploadPartInput struct { - _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // The name of the bucket to which the multipart upload was initiated. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. - // - // This checksum algorithm must be the same for all parts and it match the checksum - // value supplied in the CreateMultipartUpload request. - // - // The AWS SDK for Go v1 does not support automatic computing request payload - // checksum. This feature is available in the AWS SDK for Go v2. If a value - // is specified for this parameter, the matching algorithm's checksum member - // must be populated with the algorithm's checksum of the request payload. - ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies - // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, - // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. - ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). - ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - - // Object key for which the multipart upload was initiated. - // - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being uploaded. This is a positive integer between 1 - // and 10,000. - // - // PartNumber is a required field - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - // - // SSECustomerKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UploadPartInput's - // String and GoString methods. - SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being uploaded. - // - // UploadId is a required field - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBody sets the Body field's value. -func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { - s.Body = v - return s -} - -// SetBucket sets the Bucket field's value. -func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { - s.Bucket = &v - return s -} - -func (s *UploadPartInput) getBucket() (v string) { - if s.Bucket == nil { - return v - } - return *s.Bucket -} - -// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. -func (s *UploadPartInput) SetChecksumAlgorithm(v string) *UploadPartInput { - s.ChecksumAlgorithm = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *UploadPartInput) SetChecksumCRC32(v string) *UploadPartInput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *UploadPartInput) SetChecksumCRC32C(v string) *UploadPartInput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *UploadPartInput) SetChecksumSHA1(v string) *UploadPartInput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *UploadPartInput) SetChecksumSHA256(v string) *UploadPartInput { - s.ChecksumSHA256 = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { - s.ContentLength = &v - return s -} - -// SetContentMD5 sets the ContentMD5 field's value. -func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { - s.ContentMD5 = &v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { - s.ExpectedBucketOwner = &v - return s -} - -// SetKey sets the Key field's value. -func (s *UploadPartInput) SetKey(v string) *UploadPartInput { - s.Key = &v - return s -} - -// SetPartNumber sets the PartNumber field's value. -func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { - s.PartNumber = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { - s.RequestPayer = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKey sets the SSECustomerKey field's value. -func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { - s.SSECustomerKey = &v - return s -} - -func (s *UploadPartInput) getSSECustomerKey() (v string) { - if s.SSECustomerKey == nil { - return v - } - return *s.SSECustomerKey -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetUploadId sets the UploadId field's value. -func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { - s.UploadId = &v - return s -} - -func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - return parseEndpointARN(*s.Bucket) -} - -func (s *UploadPartInput) hasEndpointARN() bool { - if s.Bucket == nil { - return false - } - return arn.IsARN(*s.Bucket) -} - -// updateArnableField updates the value of the input field that -// takes an ARN as an input. This method is useful to backfill -// the parsed resource name from ARN into the input member. -// It returns a pointer to a modified copy of input and an error. -// Note that original input is not modified. -func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { - if s.Bucket == nil { - return nil, fmt.Errorf("member Bucket is nil") - } - s.Bucket = aws.String(v) - return &s, nil -} - -type UploadPartOutput struct { - _ struct{} `type:"structure"` - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` - - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` - - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` - - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key was used for the - // object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UploadPartOutput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UploadPartOutput) GoString() string { - return s.String() -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { - s.BucketKeyEnabled = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *UploadPartOutput) SetChecksumCRC32(v string) *UploadPartOutput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *UploadPartOutput) SetChecksumCRC32C(v string) *UploadPartOutput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *UploadPartOutput) SetChecksumSHA1(v string) *UploadPartOutput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *UploadPartOutput) SetChecksumSHA256(v string) *UploadPartOutput { - s.ChecksumSHA256 = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { - s.ETag = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { - s.RequestCharged = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { - s.ServerSideEncryption = &v - return s -} - -// Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon S3 API Reference. -type VersioningConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VersioningConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VersioningConfiguration) GoString() string { - return s.String() -} - -// SetMFADelete sets the MFADelete field's value. -func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { - s.MFADelete = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { - s.Status = &v - return s -} - -// Specifies website configuration parameters for an Amazon S3 bucket. -type WebsiteConfiguration struct { - _ struct{} `type:"structure"` - - // The name of the error document for the website. - ErrorDocument *ErrorDocument `type:"structure"` - - // The name of the index document for the website. - IndexDocument *IndexDocument `type:"structure"` - - // The redirect behavior for every request to this bucket's website endpoint. - // - // If you specify this property, you can't specify any other property. - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WebsiteConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WebsiteConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WebsiteConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} - if s.ErrorDocument != nil { - if err := s.ErrorDocument.Validate(); err != nil { - invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) - } - } - if s.IndexDocument != nil { - if err := s.IndexDocument.Validate(); err != nil { - invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) - } - } - if s.RedirectAllRequestsTo != nil { - if err := s.RedirectAllRequestsTo.Validate(); err != nil { - invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) - } - } - if s.RoutingRules != nil { - for i, v := range s.RoutingRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetErrorDocument sets the ErrorDocument field's value. -func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { - s.ErrorDocument = v - return s -} - -// SetIndexDocument sets the IndexDocument field's value. -func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { - s.IndexDocument = v - return s -} - -// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. -func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { - s.RedirectAllRequestsTo = v - return s -} - -// SetRoutingRules sets the RoutingRules field's value. -func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { - s.RoutingRules = v - return s -} - -type WriteGetObjectResponseInput struct { - _ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"` - - // Indicates that a range of bytes was specified. - AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"` - - // The object data. - // - // To use an non-seekable io.Reader for this request wrap the io.Reader with - // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable - // readers. This will allow the SDK to send the reader's payload as chunked - // transfer encoding. - Body io.ReadSeeker `type:"blob"` - - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for - // server-side encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the base64-encoded, - // 32-bit CRC32 checksum of the object returned by the Object Lambda function. - // This may not match the checksum for the object stored in Amazon S3. Amazon - // S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - ChecksumCRC32 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the base64-encoded, - // 32-bit CRC32C checksum of the object returned by the Object Lambda function. - // This may not match the checksum for the object stored in Amazon S3. Amazon - // S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - ChecksumCRC32C *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32c" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the base64-encoded, - // 160-bit SHA-1 digest of the object returned by the Object Lambda function. - // This may not match the checksum for the object stored in Amazon S3. Amazon - // S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - ChecksumSHA1 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha1" type:"string"` - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the base64-encoded, - // 256-bit SHA-256 digest of the object returned by the Object Lambda function. - // This may not match the checksum for the object stored in Amazon S3. Amazon - // S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - ChecksumSHA256 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha256" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"` - - // The size of the content body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"` - - // Specifies whether an object stored in Amazon S3 is (true) or is not (false) - // a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"` - - // An opaque identifier assigned by a web server to a specific version of a - // resource found at a URL. - ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"` - - // A string that uniquely identifies an error condition. Returned in the - // tag of the error XML response for a corresponding GetObject call. Cannot - // be used with a successful StatusCode header or when the transformed object - // is provided in the body. All error codes from S3 are sentence-cased. The - // regular expression (regex) value is "^[A-Z][a-zA-Z]+$". - ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` - - // Contains a generic description of the error condition. Returned in the - // tag of the error XML response for a corresponding GetObject call. Cannot - // be used with a successful StatusCode header or when the transformed object - // is provided in body. - ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // that provide the object expiration information. The value of the rule-id - // is URL-encoded. - Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"` - - // The date and time that the object was last modified. - LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Set to the number of metadata entries not returned in x-amz-meta headers. - // This can happen if you create metadata using an API like SOAP that supports - // more flexible metadata than the REST API. For example, using SOAP, you can - // create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"` - - // Indicates whether an object stored in Amazon S3 has an active legal hold. - ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - - // Indicates whether an object stored in Amazon S3 has Object Lock enabled. - // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). - ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - - // The date and time when Object Lock is configured to expire. - ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - - // The count of parts this object has. - PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"` - - // Indicates if request involves bucket that is either a source or destination - // in a Replication rule. For more information about S3 Replication, see Replication - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). - ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Route prefix to the HTTP URL generated. - // - // RequestRoute is a required field - RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"` - - // A single use encrypted token that maps WriteGetObjectResponse to the end - // user GetObject request. - // - // RequestToken is a required field - RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"` - - // Encryption algorithm used if server-side encryption with a customer-provided - // encryption key was specified for object stored in Amazon S3. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 - // to encrypt data stored in S3. For more information, see Protecting data using - // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // stored in Amazon S3 object. - // - // SSEKMSKeyId is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's - // String and GoString methods. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - - // The server-side encryption algorithm used when storing requested object in - // Amazon S3 (for example, AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The integer status code for an HTTP response of a corresponding GetObject - // request. - // - // Status Codes - // - // * 200 - OK - // - // * 206 - Partial Content - // - // * 304 - Not Modified - // - // * 400 - Bad Request - // - // * 401 - Unauthorized - // - // * 403 - Forbidden - // - // * 404 - Not Found - // - // * 405 - Method Not Allowed - // - // * 409 - Conflict - // - // * 411 - Length Required - // - // * 412 - Precondition Failed - // - // * 416 - Range Not Satisfiable - // - // * 500 - Internal Server Error - // - // * 503 - Service Unavailable - StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` - - // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. - // - // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). - StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` - - // The number of tags, if any, on the object. - TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"` - - // An ID used to reference a specific version of the object. - VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WriteGetObjectResponseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WriteGetObjectResponseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WriteGetObjectResponseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"} - if s.RequestRoute == nil { - invalidParams.Add(request.NewErrParamRequired("RequestRoute")) - } - if s.RequestRoute != nil && len(*s.RequestRoute) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1)) - } - if s.RequestToken == nil { - invalidParams.Add(request.NewErrParamRequired("RequestToken")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAcceptRanges sets the AcceptRanges field's value. -func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput { - s.AcceptRanges = &v - return s -} - -// SetBody sets the Body field's value. -func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput { - s.Body = v - return s -} - -// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. -func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput { - s.BucketKeyEnabled = &v - return s -} - -// SetCacheControl sets the CacheControl field's value. -func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput { - s.CacheControl = &v - return s -} - -// SetChecksumCRC32 sets the ChecksumCRC32 field's value. -func (s *WriteGetObjectResponseInput) SetChecksumCRC32(v string) *WriteGetObjectResponseInput { - s.ChecksumCRC32 = &v - return s -} - -// SetChecksumCRC32C sets the ChecksumCRC32C field's value. -func (s *WriteGetObjectResponseInput) SetChecksumCRC32C(v string) *WriteGetObjectResponseInput { - s.ChecksumCRC32C = &v - return s -} - -// SetChecksumSHA1 sets the ChecksumSHA1 field's value. -func (s *WriteGetObjectResponseInput) SetChecksumSHA1(v string) *WriteGetObjectResponseInput { - s.ChecksumSHA1 = &v - return s -} - -// SetChecksumSHA256 sets the ChecksumSHA256 field's value. -func (s *WriteGetObjectResponseInput) SetChecksumSHA256(v string) *WriteGetObjectResponseInput { - s.ChecksumSHA256 = &v - return s -} - -// SetContentDisposition sets the ContentDisposition field's value. -func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { - s.ContentDisposition = &v - return s -} - -// SetContentEncoding sets the ContentEncoding field's value. -func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput { - s.ContentEncoding = &v - return s -} - -// SetContentLanguage sets the ContentLanguage field's value. -func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput { - s.ContentLanguage = &v - return s -} - -// SetContentLength sets the ContentLength field's value. -func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput { - s.ContentLength = &v - return s -} - -// SetContentRange sets the ContentRange field's value. -func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput { - s.ContentRange = &v - return s -} - -// SetContentType sets the ContentType field's value. -func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput { - s.ContentType = &v - return s -} - -// SetDeleteMarker sets the DeleteMarker field's value. -func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput { - s.DeleteMarker = &v - return s -} - -// SetETag sets the ETag field's value. -func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput { - s.ETag = &v - return s -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput { - s.ErrorMessage = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput { - s.Expiration = &v - return s -} - -// SetExpires sets the Expires field's value. -func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput { - s.Expires = &v - return s -} - -// SetLastModified sets the LastModified field's value. -func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput { - s.LastModified = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput { - s.Metadata = v - return s -} - -// SetMissingMeta sets the MissingMeta field's value. -func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput { - s.MissingMeta = &v - return s -} - -// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. -func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput { - s.ObjectLockLegalHoldStatus = &v - return s -} - -// SetObjectLockMode sets the ObjectLockMode field's value. -func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput { - s.ObjectLockMode = &v - return s -} - -// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. -func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput { - s.ObjectLockRetainUntilDate = &v - return s -} - -// SetPartsCount sets the PartsCount field's value. -func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput { - s.PartsCount = &v - return s -} - -// SetReplicationStatus sets the ReplicationStatus field's value. -func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput { - s.ReplicationStatus = &v - return s -} - -// SetRequestCharged sets the RequestCharged field's value. -func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput { - s.RequestCharged = &v - return s -} - -// SetRequestRoute sets the RequestRoute field's value. -func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput { - s.RequestRoute = &v - return s -} - -// SetRequestToken sets the RequestToken field's value. -func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput { - s.RequestToken = &v - return s -} - -// SetRestore sets the Restore field's value. -func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput { - s.Restore = &v - return s -} - -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput { - s.SSECustomerAlgorithm = &v - return s -} - -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput { - s.SSECustomerKeyMD5 = &v - return s -} - -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput { - s.SSEKMSKeyId = &v - return s -} - -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput { - s.ServerSideEncryption = &v - return s -} - -// SetStatusCode sets the StatusCode field's value. -func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput { - s.StatusCode = &v - return s -} - -// SetStorageClass sets the StorageClass field's value. -func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput { - s.StorageClass = &v - return s -} - -// SetTagCount sets the TagCount field's value. -func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput { - s.TagCount = &v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput { - s.VersionId = &v - return s -} - -func (s *WriteGetObjectResponseInput) hostLabels() map[string]string { - return map[string]string{ - "RequestRoute": aws.StringValue(s.RequestRoute), - } -} - -type WriteGetObjectResponseOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WriteGetObjectResponseOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WriteGetObjectResponseOutput) GoString() string { - return s.String() -} - -const ( - // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value - AnalyticsS3ExportFileFormatCsv = "CSV" -) - -// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum -func AnalyticsS3ExportFileFormat_Values() []string { - return []string{ - AnalyticsS3ExportFileFormatCsv, - } -} - -const ( - // ArchiveStatusArchiveAccess is a ArchiveStatus enum value - ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" - - // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value - ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" -) - -// ArchiveStatus_Values returns all elements of the ArchiveStatus enum -func ArchiveStatus_Values() []string { - return []string{ - ArchiveStatusArchiveAccess, - ArchiveStatusDeepArchiveAccess, - } -} - -const ( - // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value - BucketAccelerateStatusEnabled = "Enabled" - - // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value - BucketAccelerateStatusSuspended = "Suspended" -) - -// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum -func BucketAccelerateStatus_Values() []string { - return []string{ - BucketAccelerateStatusEnabled, - BucketAccelerateStatusSuspended, - } -} - -const ( - // BucketCannedACLPrivate is a BucketCannedACL enum value - BucketCannedACLPrivate = "private" - - // BucketCannedACLPublicRead is a BucketCannedACL enum value - BucketCannedACLPublicRead = "public-read" - - // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value - BucketCannedACLPublicReadWrite = "public-read-write" - - // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value - BucketCannedACLAuthenticatedRead = "authenticated-read" -) - -// BucketCannedACL_Values returns all elements of the BucketCannedACL enum -func BucketCannedACL_Values() []string { - return []string{ - BucketCannedACLPrivate, - BucketCannedACLPublicRead, - BucketCannedACLPublicReadWrite, - BucketCannedACLAuthenticatedRead, - } -} - -const ( - // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value - BucketLocationConstraintAfSouth1 = "af-south-1" - - // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApEast1 = "ap-east-1" - - // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApNortheast1 = "ap-northeast-1" - - // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value - BucketLocationConstraintApNortheast2 = "ap-northeast-2" - - // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value - BucketLocationConstraintApNortheast3 = "ap-northeast-3" - - // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value - BucketLocationConstraintApSouth1 = "ap-south-1" - - // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast1 = "ap-southeast-1" - - // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - - // BucketLocationConstraintApSoutheast3 is a BucketLocationConstraint enum value - BucketLocationConstraintApSoutheast3 = "ap-southeast-3" - - // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value - BucketLocationConstraintCaCentral1 = "ca-central-1" - - // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value - BucketLocationConstraintCnNorth1 = "cn-north-1" - - // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value - BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" - - // BucketLocationConstraintEu is a BucketLocationConstraint enum value - BucketLocationConstraintEu = "EU" - - // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuCentral1 = "eu-central-1" - - // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuNorth1 = "eu-north-1" - - // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuSouth1 = "eu-south-1" - - // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuWest1 = "eu-west-1" - - // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value - BucketLocationConstraintEuWest2 = "eu-west-2" - - // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value - BucketLocationConstraintEuWest3 = "eu-west-3" - - // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value - BucketLocationConstraintMeSouth1 = "me-south-1" - - // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value - BucketLocationConstraintSaEast1 = "sa-east-1" - - // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value - BucketLocationConstraintUsEast2 = "us-east-2" - - // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value - BucketLocationConstraintUsGovEast1 = "us-gov-east-1" - - // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintUsGovWest1 = "us-gov-west-1" - - // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest1 = "us-west-1" - - // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest2 = "us-west-2" -) - -// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum -func BucketLocationConstraint_Values() []string { - return []string{ - BucketLocationConstraintAfSouth1, - BucketLocationConstraintApEast1, - BucketLocationConstraintApNortheast1, - BucketLocationConstraintApNortheast2, - BucketLocationConstraintApNortheast3, - BucketLocationConstraintApSouth1, - BucketLocationConstraintApSoutheast1, - BucketLocationConstraintApSoutheast2, - BucketLocationConstraintApSoutheast3, - BucketLocationConstraintCaCentral1, - BucketLocationConstraintCnNorth1, - BucketLocationConstraintCnNorthwest1, - BucketLocationConstraintEu, - BucketLocationConstraintEuCentral1, - BucketLocationConstraintEuNorth1, - BucketLocationConstraintEuSouth1, - BucketLocationConstraintEuWest1, - BucketLocationConstraintEuWest2, - BucketLocationConstraintEuWest3, - BucketLocationConstraintMeSouth1, - BucketLocationConstraintSaEast1, - BucketLocationConstraintUsEast2, - BucketLocationConstraintUsGovEast1, - BucketLocationConstraintUsGovWest1, - BucketLocationConstraintUsWest1, - BucketLocationConstraintUsWest2, - } -} - -const ( - // BucketLogsPermissionFullControl is a BucketLogsPermission enum value - BucketLogsPermissionFullControl = "FULL_CONTROL" - - // BucketLogsPermissionRead is a BucketLogsPermission enum value - BucketLogsPermissionRead = "READ" - - // BucketLogsPermissionWrite is a BucketLogsPermission enum value - BucketLogsPermissionWrite = "WRITE" -) - -// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum -func BucketLogsPermission_Values() []string { - return []string{ - BucketLogsPermissionFullControl, - BucketLogsPermissionRead, - BucketLogsPermissionWrite, - } -} - -const ( - // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value - BucketVersioningStatusEnabled = "Enabled" - - // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value - BucketVersioningStatusSuspended = "Suspended" -) - -// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum -func BucketVersioningStatus_Values() []string { - return []string{ - BucketVersioningStatusEnabled, - BucketVersioningStatusSuspended, - } -} - -const ( - // ChecksumAlgorithmCrc32 is a ChecksumAlgorithm enum value - ChecksumAlgorithmCrc32 = "CRC32" - - // ChecksumAlgorithmCrc32c is a ChecksumAlgorithm enum value - ChecksumAlgorithmCrc32c = "CRC32C" - - // ChecksumAlgorithmSha1 is a ChecksumAlgorithm enum value - ChecksumAlgorithmSha1 = "SHA1" - - // ChecksumAlgorithmSha256 is a ChecksumAlgorithm enum value - ChecksumAlgorithmSha256 = "SHA256" -) - -// ChecksumAlgorithm_Values returns all elements of the ChecksumAlgorithm enum -func ChecksumAlgorithm_Values() []string { - return []string{ - ChecksumAlgorithmCrc32, - ChecksumAlgorithmCrc32c, - ChecksumAlgorithmSha1, - ChecksumAlgorithmSha256, - } -} - -const ( - // ChecksumModeEnabled is a ChecksumMode enum value - ChecksumModeEnabled = "ENABLED" -) - -// ChecksumMode_Values returns all elements of the ChecksumMode enum -func ChecksumMode_Values() []string { - return []string{ - ChecksumModeEnabled, - } -} - -const ( - // CompressionTypeNone is a CompressionType enum value - CompressionTypeNone = "NONE" - - // CompressionTypeGzip is a CompressionType enum value - CompressionTypeGzip = "GZIP" - - // CompressionTypeBzip2 is a CompressionType enum value - CompressionTypeBzip2 = "BZIP2" -) - -// CompressionType_Values returns all elements of the CompressionType enum -func CompressionType_Values() []string { - return []string{ - CompressionTypeNone, - CompressionTypeGzip, - CompressionTypeBzip2, - } -} - -const ( - // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value - DeleteMarkerReplicationStatusEnabled = "Enabled" - - // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value - DeleteMarkerReplicationStatusDisabled = "Disabled" -) - -// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum -func DeleteMarkerReplicationStatus_Values() []string { - return []string{ - DeleteMarkerReplicationStatusEnabled, - DeleteMarkerReplicationStatusDisabled, - } -} - -// Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters -// with an ASCII value from 0 to 10. For characters that are not supported in -// XML 1.0, you can add this parameter to request that Amazon S3 encode the -// keys in the response. -const ( - // EncodingTypeUrl is a EncodingType enum value - EncodingTypeUrl = "url" -) - -// EncodingType_Values returns all elements of the EncodingType enum -func EncodingType_Values() []string { - return []string{ - EncodingTypeUrl, - } -} - -// The bucket event for which to send notifications. -const ( - // EventS3ReducedRedundancyLostObject is a Event enum value - EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - - // EventS3ObjectCreated is a Event enum value - EventS3ObjectCreated = "s3:ObjectCreated:*" - - // EventS3ObjectCreatedPut is a Event enum value - EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" - - // EventS3ObjectCreatedPost is a Event enum value - EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" - - // EventS3ObjectCreatedCopy is a Event enum value - EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" - - // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value - EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - - // EventS3ObjectRemoved is a Event enum value - EventS3ObjectRemoved = "s3:ObjectRemoved:*" - - // EventS3ObjectRemovedDelete is a Event enum value - EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - - // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value - EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - - // EventS3ObjectRestore is a Event enum value - EventS3ObjectRestore = "s3:ObjectRestore:*" - - // EventS3ObjectRestorePost is a Event enum value - EventS3ObjectRestorePost = "s3:ObjectRestore:Post" - - // EventS3ObjectRestoreCompleted is a Event enum value - EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" - - // EventS3Replication is a Event enum value - EventS3Replication = "s3:Replication:*" - - // EventS3ReplicationOperationFailedReplication is a Event enum value - EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" - - // EventS3ReplicationOperationNotTracked is a Event enum value - EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" - - // EventS3ReplicationOperationMissedThreshold is a Event enum value - EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" - - // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value - EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" - - // EventS3ObjectRestoreDelete is a Event enum value - EventS3ObjectRestoreDelete = "s3:ObjectRestore:Delete" - - // EventS3LifecycleTransition is a Event enum value - EventS3LifecycleTransition = "s3:LifecycleTransition" - - // EventS3IntelligentTiering is a Event enum value - EventS3IntelligentTiering = "s3:IntelligentTiering" - - // EventS3ObjectAclPut is a Event enum value - EventS3ObjectAclPut = "s3:ObjectAcl:Put" - - // EventS3LifecycleExpiration is a Event enum value - EventS3LifecycleExpiration = "s3:LifecycleExpiration:*" - - // EventS3LifecycleExpirationDelete is a Event enum value - EventS3LifecycleExpirationDelete = "s3:LifecycleExpiration:Delete" - - // EventS3LifecycleExpirationDeleteMarkerCreated is a Event enum value - EventS3LifecycleExpirationDeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated" - - // EventS3ObjectTagging is a Event enum value - EventS3ObjectTagging = "s3:ObjectTagging:*" - - // EventS3ObjectTaggingPut is a Event enum value - EventS3ObjectTaggingPut = "s3:ObjectTagging:Put" - - // EventS3ObjectTaggingDelete is a Event enum value - EventS3ObjectTaggingDelete = "s3:ObjectTagging:Delete" -) - -// Event_Values returns all elements of the Event enum -func Event_Values() []string { - return []string{ - EventS3ReducedRedundancyLostObject, - EventS3ObjectCreated, - EventS3ObjectCreatedPut, - EventS3ObjectCreatedPost, - EventS3ObjectCreatedCopy, - EventS3ObjectCreatedCompleteMultipartUpload, - EventS3ObjectRemoved, - EventS3ObjectRemovedDelete, - EventS3ObjectRemovedDeleteMarkerCreated, - EventS3ObjectRestore, - EventS3ObjectRestorePost, - EventS3ObjectRestoreCompleted, - EventS3Replication, - EventS3ReplicationOperationFailedReplication, - EventS3ReplicationOperationNotTracked, - EventS3ReplicationOperationMissedThreshold, - EventS3ReplicationOperationReplicatedAfterThreshold, - EventS3ObjectRestoreDelete, - EventS3LifecycleTransition, - EventS3IntelligentTiering, - EventS3ObjectAclPut, - EventS3LifecycleExpiration, - EventS3LifecycleExpirationDelete, - EventS3LifecycleExpirationDeleteMarkerCreated, - EventS3ObjectTagging, - EventS3ObjectTaggingPut, - EventS3ObjectTaggingDelete, - } -} - -const ( - // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value - ExistingObjectReplicationStatusEnabled = "Enabled" - - // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value - ExistingObjectReplicationStatusDisabled = "Disabled" -) - -// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum -func ExistingObjectReplicationStatus_Values() []string { - return []string{ - ExistingObjectReplicationStatusEnabled, - ExistingObjectReplicationStatusDisabled, - } -} - -const ( - // ExpirationStatusEnabled is a ExpirationStatus enum value - ExpirationStatusEnabled = "Enabled" - - // ExpirationStatusDisabled is a ExpirationStatus enum value - ExpirationStatusDisabled = "Disabled" -) - -// ExpirationStatus_Values returns all elements of the ExpirationStatus enum -func ExpirationStatus_Values() []string { - return []string{ - ExpirationStatusEnabled, - ExpirationStatusDisabled, - } -} - -const ( - // ExpressionTypeSql is a ExpressionType enum value - ExpressionTypeSql = "SQL" -) - -// ExpressionType_Values returns all elements of the ExpressionType enum -func ExpressionType_Values() []string { - return []string{ - ExpressionTypeSql, - } -} - -const ( - // FileHeaderInfoUse is a FileHeaderInfo enum value - FileHeaderInfoUse = "USE" - - // FileHeaderInfoIgnore is a FileHeaderInfo enum value - FileHeaderInfoIgnore = "IGNORE" - - // FileHeaderInfoNone is a FileHeaderInfo enum value - FileHeaderInfoNone = "NONE" -) - -// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum -func FileHeaderInfo_Values() []string { - return []string{ - FileHeaderInfoUse, - FileHeaderInfoIgnore, - FileHeaderInfoNone, - } -} - -const ( - // FilterRuleNamePrefix is a FilterRuleName enum value - FilterRuleNamePrefix = "prefix" - - // FilterRuleNameSuffix is a FilterRuleName enum value - FilterRuleNameSuffix = "suffix" -) - -// FilterRuleName_Values returns all elements of the FilterRuleName enum -func FilterRuleName_Values() []string { - return []string{ - FilterRuleNamePrefix, - FilterRuleNameSuffix, - } -} - -const ( - // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value - IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" - - // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value - IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" -) - -// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum -func IntelligentTieringAccessTier_Values() []string { - return []string{ - IntelligentTieringAccessTierArchiveAccess, - IntelligentTieringAccessTierDeepArchiveAccess, - } -} - -const ( - // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value - IntelligentTieringStatusEnabled = "Enabled" - - // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value - IntelligentTieringStatusDisabled = "Disabled" -) - -// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum -func IntelligentTieringStatus_Values() []string { - return []string{ - IntelligentTieringStatusEnabled, - IntelligentTieringStatusDisabled, - } -} - -const ( - // InventoryFormatCsv is a InventoryFormat enum value - InventoryFormatCsv = "CSV" - - // InventoryFormatOrc is a InventoryFormat enum value - InventoryFormatOrc = "ORC" - - // InventoryFormatParquet is a InventoryFormat enum value - InventoryFormatParquet = "Parquet" -) - -// InventoryFormat_Values returns all elements of the InventoryFormat enum -func InventoryFormat_Values() []string { - return []string{ - InventoryFormatCsv, - InventoryFormatOrc, - InventoryFormatParquet, - } -} - -const ( - // InventoryFrequencyDaily is a InventoryFrequency enum value - InventoryFrequencyDaily = "Daily" - - // InventoryFrequencyWeekly is a InventoryFrequency enum value - InventoryFrequencyWeekly = "Weekly" -) - -// InventoryFrequency_Values returns all elements of the InventoryFrequency enum -func InventoryFrequency_Values() []string { - return []string{ - InventoryFrequencyDaily, - InventoryFrequencyWeekly, - } -} - -const ( - // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value - InventoryIncludedObjectVersionsAll = "All" - - // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value - InventoryIncludedObjectVersionsCurrent = "Current" -) - -// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum -func InventoryIncludedObjectVersions_Values() []string { - return []string{ - InventoryIncludedObjectVersionsAll, - InventoryIncludedObjectVersionsCurrent, - } -} - -const ( - // InventoryOptionalFieldSize is a InventoryOptionalField enum value - InventoryOptionalFieldSize = "Size" - - // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value - InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" - - // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value - InventoryOptionalFieldStorageClass = "StorageClass" - - // InventoryOptionalFieldEtag is a InventoryOptionalField enum value - InventoryOptionalFieldEtag = "ETag" - - // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value - InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" - - // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value - InventoryOptionalFieldReplicationStatus = "ReplicationStatus" - - // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value - InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" - - // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" - - // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockMode = "ObjectLockMode" - - // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value - InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" - - // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value - InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" - - // InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value - InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus" - - // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value - InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" -) - -// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum -func InventoryOptionalField_Values() []string { - return []string{ - InventoryOptionalFieldSize, - InventoryOptionalFieldLastModifiedDate, - InventoryOptionalFieldStorageClass, - InventoryOptionalFieldEtag, - InventoryOptionalFieldIsMultipartUploaded, - InventoryOptionalFieldReplicationStatus, - InventoryOptionalFieldEncryptionStatus, - InventoryOptionalFieldObjectLockRetainUntilDate, - InventoryOptionalFieldObjectLockMode, - InventoryOptionalFieldObjectLockLegalHoldStatus, - InventoryOptionalFieldIntelligentTieringAccessTier, - InventoryOptionalFieldBucketKeyStatus, - InventoryOptionalFieldChecksumAlgorithm, - } -} - -const ( - // JSONTypeDocument is a JSONType enum value - JSONTypeDocument = "DOCUMENT" - - // JSONTypeLines is a JSONType enum value - JSONTypeLines = "LINES" -) - -// JSONType_Values returns all elements of the JSONType enum -func JSONType_Values() []string { - return []string{ - JSONTypeDocument, - JSONTypeLines, - } -} - -const ( - // MFADeleteEnabled is a MFADelete enum value - MFADeleteEnabled = "Enabled" - - // MFADeleteDisabled is a MFADelete enum value - MFADeleteDisabled = "Disabled" -) - -// MFADelete_Values returns all elements of the MFADelete enum -func MFADelete_Values() []string { - return []string{ - MFADeleteEnabled, - MFADeleteDisabled, - } -} - -const ( - // MFADeleteStatusEnabled is a MFADeleteStatus enum value - MFADeleteStatusEnabled = "Enabled" - - // MFADeleteStatusDisabled is a MFADeleteStatus enum value - MFADeleteStatusDisabled = "Disabled" -) - -// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum -func MFADeleteStatus_Values() []string { - return []string{ - MFADeleteStatusEnabled, - MFADeleteStatusDisabled, - } -} - -const ( - // MetadataDirectiveCopy is a MetadataDirective enum value - MetadataDirectiveCopy = "COPY" - - // MetadataDirectiveReplace is a MetadataDirective enum value - MetadataDirectiveReplace = "REPLACE" -) - -// MetadataDirective_Values returns all elements of the MetadataDirective enum -func MetadataDirective_Values() []string { - return []string{ - MetadataDirectiveCopy, - MetadataDirectiveReplace, - } -} - -const ( - // MetricsStatusEnabled is a MetricsStatus enum value - MetricsStatusEnabled = "Enabled" - - // MetricsStatusDisabled is a MetricsStatus enum value - MetricsStatusDisabled = "Disabled" -) - -// MetricsStatus_Values returns all elements of the MetricsStatus enum -func MetricsStatus_Values() []string { - return []string{ - MetricsStatusEnabled, - MetricsStatusDisabled, - } -} - -const ( - // ObjectAttributesEtag is a ObjectAttributes enum value - ObjectAttributesEtag = "ETag" - - // ObjectAttributesChecksum is a ObjectAttributes enum value - ObjectAttributesChecksum = "Checksum" - - // ObjectAttributesObjectParts is a ObjectAttributes enum value - ObjectAttributesObjectParts = "ObjectParts" - - // ObjectAttributesStorageClass is a ObjectAttributes enum value - ObjectAttributesStorageClass = "StorageClass" - - // ObjectAttributesObjectSize is a ObjectAttributes enum value - ObjectAttributesObjectSize = "ObjectSize" -) - -// ObjectAttributes_Values returns all elements of the ObjectAttributes enum -func ObjectAttributes_Values() []string { - return []string{ - ObjectAttributesEtag, - ObjectAttributesChecksum, - ObjectAttributesObjectParts, - ObjectAttributesStorageClass, - ObjectAttributesObjectSize, - } -} - -const ( - // ObjectCannedACLPrivate is a ObjectCannedACL enum value - ObjectCannedACLPrivate = "private" - - // ObjectCannedACLPublicRead is a ObjectCannedACL enum value - ObjectCannedACLPublicRead = "public-read" - - // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value - ObjectCannedACLPublicReadWrite = "public-read-write" - - // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value - ObjectCannedACLAuthenticatedRead = "authenticated-read" - - // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value - ObjectCannedACLAwsExecRead = "aws-exec-read" - - // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value - ObjectCannedACLBucketOwnerRead = "bucket-owner-read" - - // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value - ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" -) - -// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum -func ObjectCannedACL_Values() []string { - return []string{ - ObjectCannedACLPrivate, - ObjectCannedACLPublicRead, - ObjectCannedACLPublicReadWrite, - ObjectCannedACLAuthenticatedRead, - ObjectCannedACLAwsExecRead, - ObjectCannedACLBucketOwnerRead, - ObjectCannedACLBucketOwnerFullControl, - } -} - -const ( - // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value - ObjectLockEnabledEnabled = "Enabled" -) - -// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum -func ObjectLockEnabled_Values() []string { - return []string{ - ObjectLockEnabledEnabled, - } -} - -const ( - // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value - ObjectLockLegalHoldStatusOn = "ON" - - // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value - ObjectLockLegalHoldStatusOff = "OFF" -) - -// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum -func ObjectLockLegalHoldStatus_Values() []string { - return []string{ - ObjectLockLegalHoldStatusOn, - ObjectLockLegalHoldStatusOff, - } -} - -const ( - // ObjectLockModeGovernance is a ObjectLockMode enum value - ObjectLockModeGovernance = "GOVERNANCE" - - // ObjectLockModeCompliance is a ObjectLockMode enum value - ObjectLockModeCompliance = "COMPLIANCE" -) - -// ObjectLockMode_Values returns all elements of the ObjectLockMode enum -func ObjectLockMode_Values() []string { - return []string{ - ObjectLockModeGovernance, - ObjectLockModeCompliance, - } -} - -const ( - // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value - ObjectLockRetentionModeGovernance = "GOVERNANCE" - - // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value - ObjectLockRetentionModeCompliance = "COMPLIANCE" -) - -// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum -func ObjectLockRetentionMode_Values() []string { - return []string{ - ObjectLockRetentionModeGovernance, - ObjectLockRetentionModeCompliance, - } -} - -// The container element for object ownership for a bucket's ownership controls. -// -// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to -// the bucket owner if the objects are uploaded with the bucket-owner-full-control -// canned ACL. -// -// ObjectWriter - The uploading account will own the object if the object is -// uploaded with the bucket-owner-full-control canned ACL. -// -// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer -// affect permissions. The bucket owner automatically owns and has full control -// over every object in the bucket. The bucket only accepts PUT requests that -// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control -// canned ACL or an equivalent form of this ACL expressed in the XML format. -const ( - // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value - ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" - - // ObjectOwnershipObjectWriter is a ObjectOwnership enum value - ObjectOwnershipObjectWriter = "ObjectWriter" - - // ObjectOwnershipBucketOwnerEnforced is a ObjectOwnership enum value - ObjectOwnershipBucketOwnerEnforced = "BucketOwnerEnforced" -) - -// ObjectOwnership_Values returns all elements of the ObjectOwnership enum -func ObjectOwnership_Values() []string { - return []string{ - ObjectOwnershipBucketOwnerPreferred, - ObjectOwnershipObjectWriter, - ObjectOwnershipBucketOwnerEnforced, - } -} - -const ( - // ObjectStorageClassStandard is a ObjectStorageClass enum value - ObjectStorageClassStandard = "STANDARD" - - // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value - ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - - // ObjectStorageClassGlacier is a ObjectStorageClass enum value - ObjectStorageClassGlacier = "GLACIER" - - // ObjectStorageClassStandardIa is a ObjectStorageClass enum value - ObjectStorageClassStandardIa = "STANDARD_IA" - - // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value - ObjectStorageClassOnezoneIa = "ONEZONE_IA" - - // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value - ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value - ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" - - // ObjectStorageClassOutposts is a ObjectStorageClass enum value - ObjectStorageClassOutposts = "OUTPOSTS" - - // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value - ObjectStorageClassGlacierIr = "GLACIER_IR" -) - -// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum -func ObjectStorageClass_Values() []string { - return []string{ - ObjectStorageClassStandard, - ObjectStorageClassReducedRedundancy, - ObjectStorageClassGlacier, - ObjectStorageClassStandardIa, - ObjectStorageClassOnezoneIa, - ObjectStorageClassIntelligentTiering, - ObjectStorageClassDeepArchive, - ObjectStorageClassOutposts, - ObjectStorageClassGlacierIr, - } -} - -const ( - // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value - ObjectVersionStorageClassStandard = "STANDARD" -) - -// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum -func ObjectVersionStorageClass_Values() []string { - return []string{ - ObjectVersionStorageClassStandard, - } -} - -const ( - // OwnerOverrideDestination is a OwnerOverride enum value - OwnerOverrideDestination = "Destination" -) - -// OwnerOverride_Values returns all elements of the OwnerOverride enum -func OwnerOverride_Values() []string { - return []string{ - OwnerOverrideDestination, - } -} - -const ( - // PayerRequester is a Payer enum value - PayerRequester = "Requester" - - // PayerBucketOwner is a Payer enum value - PayerBucketOwner = "BucketOwner" -) - -// Payer_Values returns all elements of the Payer enum -func Payer_Values() []string { - return []string{ - PayerRequester, - PayerBucketOwner, - } -} - -const ( - // PermissionFullControl is a Permission enum value - PermissionFullControl = "FULL_CONTROL" - - // PermissionWrite is a Permission enum value - PermissionWrite = "WRITE" - - // PermissionWriteAcp is a Permission enum value - PermissionWriteAcp = "WRITE_ACP" - - // PermissionRead is a Permission enum value - PermissionRead = "READ" - - // PermissionReadAcp is a Permission enum value - PermissionReadAcp = "READ_ACP" -) - -// Permission_Values returns all elements of the Permission enum -func Permission_Values() []string { - return []string{ - PermissionFullControl, - PermissionWrite, - PermissionWriteAcp, - PermissionRead, - PermissionReadAcp, - } -} - -const ( - // ProtocolHttp is a Protocol enum value - ProtocolHttp = "http" - - // ProtocolHttps is a Protocol enum value - ProtocolHttps = "https" -) - -// Protocol_Values returns all elements of the Protocol enum -func Protocol_Values() []string { - return []string{ - ProtocolHttp, - ProtocolHttps, - } -} - -const ( - // QuoteFieldsAlways is a QuoteFields enum value - QuoteFieldsAlways = "ALWAYS" - - // QuoteFieldsAsneeded is a QuoteFields enum value - QuoteFieldsAsneeded = "ASNEEDED" -) - -// QuoteFields_Values returns all elements of the QuoteFields enum -func QuoteFields_Values() []string { - return []string{ - QuoteFieldsAlways, - QuoteFieldsAsneeded, - } -} - -const ( - // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value - ReplicaModificationsStatusEnabled = "Enabled" - - // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value - ReplicaModificationsStatusDisabled = "Disabled" -) - -// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum -func ReplicaModificationsStatus_Values() []string { - return []string{ - ReplicaModificationsStatusEnabled, - ReplicaModificationsStatusDisabled, - } -} - -const ( - // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value - ReplicationRuleStatusEnabled = "Enabled" - - // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value - ReplicationRuleStatusDisabled = "Disabled" -) - -// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum -func ReplicationRuleStatus_Values() []string { - return []string{ - ReplicationRuleStatusEnabled, - ReplicationRuleStatusDisabled, - } -} - -const ( - // ReplicationStatusComplete is a ReplicationStatus enum value - ReplicationStatusComplete = "COMPLETE" - - // ReplicationStatusPending is a ReplicationStatus enum value - ReplicationStatusPending = "PENDING" - - // ReplicationStatusFailed is a ReplicationStatus enum value - ReplicationStatusFailed = "FAILED" - - // ReplicationStatusReplica is a ReplicationStatus enum value - ReplicationStatusReplica = "REPLICA" -) - -// ReplicationStatus_Values returns all elements of the ReplicationStatus enum -func ReplicationStatus_Values() []string { - return []string{ - ReplicationStatusComplete, - ReplicationStatusPending, - ReplicationStatusFailed, - ReplicationStatusReplica, - } -} - -const ( - // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value - ReplicationTimeStatusEnabled = "Enabled" - - // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value - ReplicationTimeStatusDisabled = "Disabled" -) - -// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum -func ReplicationTimeStatus_Values() []string { - return []string{ - ReplicationTimeStatusEnabled, - ReplicationTimeStatusDisabled, - } -} - -// If present, indicates that the requester was successfully charged for the -// request. -const ( - // RequestChargedRequester is a RequestCharged enum value - RequestChargedRequester = "requester" -) - -// RequestCharged_Values returns all elements of the RequestCharged enum -func RequestCharged_Values() []string { - return []string{ - RequestChargedRequester, - } -} - -// Confirms that the requester knows that they will be charged for the request. -// Bucket owners need not specify this parameter in their requests. For information -// about downloading objects from Requester Pays buckets, see Downloading Objects -// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) -// in the Amazon S3 User Guide. -const ( - // RequestPayerRequester is a RequestPayer enum value - RequestPayerRequester = "requester" -) - -// RequestPayer_Values returns all elements of the RequestPayer enum -func RequestPayer_Values() []string { - return []string{ - RequestPayerRequester, - } -} - -const ( - // RestoreRequestTypeSelect is a RestoreRequestType enum value - RestoreRequestTypeSelect = "SELECT" -) - -// RestoreRequestType_Values returns all elements of the RestoreRequestType enum -func RestoreRequestType_Values() []string { - return []string{ - RestoreRequestTypeSelect, - } -} - -const ( - // ServerSideEncryptionAes256 is a ServerSideEncryption enum value - ServerSideEncryptionAes256 = "AES256" - - // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value - ServerSideEncryptionAwsKms = "aws:kms" -) - -// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum -func ServerSideEncryption_Values() []string { - return []string{ - ServerSideEncryptionAes256, - ServerSideEncryptionAwsKms, - } -} - -const ( - // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value - SseKmsEncryptedObjectsStatusEnabled = "Enabled" - - // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value - SseKmsEncryptedObjectsStatusDisabled = "Disabled" -) - -// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum -func SseKmsEncryptedObjectsStatus_Values() []string { - return []string{ - SseKmsEncryptedObjectsStatusEnabled, - SseKmsEncryptedObjectsStatusDisabled, - } -} - -const ( - // StorageClassStandard is a StorageClass enum value - StorageClassStandard = "STANDARD" - - // StorageClassReducedRedundancy is a StorageClass enum value - StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - - // StorageClassStandardIa is a StorageClass enum value - StorageClassStandardIa = "STANDARD_IA" - - // StorageClassOnezoneIa is a StorageClass enum value - StorageClassOnezoneIa = "ONEZONE_IA" - - // StorageClassIntelligentTiering is a StorageClass enum value - StorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // StorageClassGlacier is a StorageClass enum value - StorageClassGlacier = "GLACIER" - - // StorageClassDeepArchive is a StorageClass enum value - StorageClassDeepArchive = "DEEP_ARCHIVE" - - // StorageClassOutposts is a StorageClass enum value - StorageClassOutposts = "OUTPOSTS" - - // StorageClassGlacierIr is a StorageClass enum value - StorageClassGlacierIr = "GLACIER_IR" -) - -// StorageClass_Values returns all elements of the StorageClass enum -func StorageClass_Values() []string { - return []string{ - StorageClassStandard, - StorageClassReducedRedundancy, - StorageClassStandardIa, - StorageClassOnezoneIa, - StorageClassIntelligentTiering, - StorageClassGlacier, - StorageClassDeepArchive, - StorageClassOutposts, - StorageClassGlacierIr, - } -} - -const ( - // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value - StorageClassAnalysisSchemaVersionV1 = "V_1" -) - -// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum -func StorageClassAnalysisSchemaVersion_Values() []string { - return []string{ - StorageClassAnalysisSchemaVersionV1, - } -} - -const ( - // TaggingDirectiveCopy is a TaggingDirective enum value - TaggingDirectiveCopy = "COPY" - - // TaggingDirectiveReplace is a TaggingDirective enum value - TaggingDirectiveReplace = "REPLACE" -) - -// TaggingDirective_Values returns all elements of the TaggingDirective enum -func TaggingDirective_Values() []string { - return []string{ - TaggingDirectiveCopy, - TaggingDirectiveReplace, - } -} - -const ( - // TierStandard is a Tier enum value - TierStandard = "Standard" - - // TierBulk is a Tier enum value - TierBulk = "Bulk" - - // TierExpedited is a Tier enum value - TierExpedited = "Expedited" -) - -// Tier_Values returns all elements of the Tier enum -func Tier_Values() []string { - return []string{ - TierStandard, - TierBulk, - TierExpedited, - } -} - -const ( - // TransitionStorageClassGlacier is a TransitionStorageClass enum value - TransitionStorageClassGlacier = "GLACIER" - - // TransitionStorageClassStandardIa is a TransitionStorageClass enum value - TransitionStorageClassStandardIa = "STANDARD_IA" - - // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value - TransitionStorageClassOnezoneIa = "ONEZONE_IA" - - // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value - TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" - - // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value - TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" - - // TransitionStorageClassGlacierIr is a TransitionStorageClass enum value - TransitionStorageClassGlacierIr = "GLACIER_IR" -) - -// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum -func TransitionStorageClass_Values() []string { - return []string{ - TransitionStorageClassGlacier, - TransitionStorageClassStandardIa, - TransitionStorageClassOnezoneIa, - TransitionStorageClassIntelligentTiering, - TransitionStorageClassDeepArchive, - TransitionStorageClassGlacierIr, - } -} - -const ( - // TypeCanonicalUser is a Type enum value - TypeCanonicalUser = "CanonicalUser" - - // TypeAmazonCustomerByEmail is a Type enum value - TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - - // TypeGroup is a Type enum value - TypeGroup = "Group" -) - -// Type_Values returns all elements of the Type enum -func Type_Values() []string { - return []string{ - TypeCanonicalUser, - TypeAmazonCustomerByEmail, - TypeGroup, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go deleted file mode 100644 index 407f06b6ed..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go +++ /dev/null @@ -1,202 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "hash" - "io" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - contentMD5Header = "Content-Md5" - contentSha256Header = "X-Amz-Content-Sha256" - amzTeHeader = "X-Amz-Te" - amzTxEncodingHeader = "X-Amz-Transfer-Encoding" - - appendMD5TxEncoding = "append-md5" -) - -// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the -// request. If the body is not seekable or S3DisableContentMD5Validation set -// this handler will be ignored. -func computeBodyHashes(r *request.Request) { - if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { - return - } - if r.IsPresigned() { - return - } - if r.Error != nil || !aws.IsReaderSeekable(r.Body) { - return - } - - var md5Hash, sha256Hash hash.Hash - hashers := make([]io.Writer, 0, 2) - - // Determine upfront which hashes can be set without overriding user - // provide header data. - if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { - md5Hash = md5.New() - hashers = append(hashers, md5Hash) - } - - if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { - sha256Hash = sha256.New() - hashers = append(hashers, sha256Hash) - } - - // Create the destination writer based on the hashes that are not already - // provided by the user. - var dst io.Writer - switch len(hashers) { - case 0: - return - case 1: - dst = hashers[0] - default: - dst = io.MultiWriter(hashers...) - } - - if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { - r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) - return - } - - // For the hashes created, set the associated headers that the user did not - // already provide. - if md5Hash != nil { - sum := make([]byte, md5.Size) - encoded := make([]byte, md5Base64EncLen) - - base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) - r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} - } - - if sha256Hash != nil { - encoded := make([]byte, sha256HexEncLen) - sum := make([]byte, sha256.Size) - - hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) - r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} - } -} - -const ( - md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen - sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen -) - -// Adds the x-amz-te: append_md5 header to the request. This requests the service -// responds with a trailing MD5 checksum. -// -// Will not ask for append MD5 if disabled, the request is presigned or, -// or the API operation does not support content MD5 validation. -func askForTxEncodingAppendMD5(r *request.Request) { - if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { - return - } - if r.IsPresigned() { - return - } - r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) -} - -func useMD5ValidationReader(r *request.Request) { - if r.Error != nil { - return - } - - if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { - return - } - - var bodyReader *io.ReadCloser - var contentLen int64 - switch tv := r.Data.(type) { - case *GetObjectOutput: - bodyReader = &tv.Body - contentLen = aws.Int64Value(tv.ContentLength) - // Update ContentLength hiden the trailing MD5 checksum. - tv.ContentLength = aws.Int64(contentLen - md5.Size) - tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) - default: - r.Error = awserr.New("ChecksumValidationError", - fmt.Sprintf("%s: %s header received on unsupported API, %s", - amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, - ), nil) - return - } - - if contentLen < md5.Size { - r.Error = awserr.New("ChecksumValidationError", - fmt.Sprintf("invalid Content-Length %d for %s %s", - contentLen, appendMD5TxEncoding, amzTxEncodingHeader, - ), nil) - return - } - - // Wrap and swap the response body reader with the validation reader. - *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) -} - -type md5ValidationReader struct { - rawReader io.ReadCloser - payload io.Reader - hash hash.Hash - - payloadLen int64 - read int64 -} - -func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { - h := md5.New() - return &md5ValidationReader{ - rawReader: reader, - payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), - hash: h, - payloadLen: payloadLen, - } -} - -func (v *md5ValidationReader) Read(p []byte) (n int, err error) { - n, err = v.payload.Read(p) - if err != nil && err != io.EOF { - return n, err - } - - v.read += int64(n) - - if err == io.EOF { - if v.read != v.payloadLen { - return n, io.ErrUnexpectedEOF - } - expectSum := make([]byte, md5.Size) - actualSum := make([]byte, md5.Size) - if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { - return n, sumReadErr - } - actualSum = v.hash.Sum(actualSum[0:0]) - if !bytes.Equal(expectSum, actualSum) { - return n, awserr.New("InvalidChecksum", - fmt.Sprintf("expected MD5 checksum %s, got %s", - hex.EncodeToString(expectSum), - hex.EncodeToString(actualSum), - ), - nil) - } - } - - return n, err -} - -func (v *md5ValidationReader) Close() error { - return v.rawReader.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go deleted file mode 100644 index 20828387ea..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ /dev/null @@ -1,107 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) - -// NormalizeBucketLocation is a utility function which will update the -// passed in value to always be a region ID. Generally this would be used -// with GetBucketLocation API operation. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -func NormalizeBucketLocation(loc string) string { - switch loc { - case "": - loc = "us-east-1" - case "EU": - loc = "eu-west-1" - } - - return loc -} - -// NormalizeBucketLocationHandler is a request handler which will update the -// GetBucketLocation's result LocationConstraint value to always be a region ID. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -// -// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ -// Bucket: aws.String(bucket), -// }) -// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) -// err := req.Send() -var NormalizeBucketLocationHandler = request.NamedHandler{ - Name: "awssdk.s3.NormalizeBucketLocation", - Fn: func(req *request.Request) { - if req.Error != nil { - return - } - - out := req.Data.(*GetBucketLocationOutput) - loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) - out.LocationConstraint = aws.String(loc) - }, -} - -// WithNormalizeBucketLocation is a request option which will update the -// GetBucketLocation's result LocationConstraint value to always be a region ID. -// -// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html -// for more information on the values that can be returned. -// -// result, err := svc.GetBucketLocationWithContext(ctx, -// &s3.GetBucketLocationInput{ -// Bucket: aws.String(bucket), -// }, -// s3.WithNormalizeBucketLocation, -// ) -func WithNormalizeBucketLocation(r *request.Request) { - r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) -} - -func buildGetBucketLocation(r *request.Request) { - if r.DataFilled() { - out := r.Data.(*GetBucketLocationOutput) - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed reading response body", err) - return - } - - match := reBucketLocation.FindSubmatch(b) - if len(match) > 1 { - loc := string(match[1]) - out.LocationConstraint = aws.String(loc) - } - } -} - -func populateLocationConstraint(r *request.Request) { - if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { - in := r.Params.(*CreateBucketInput) - if in.CreateBucketConfiguration == nil { - r.Params = awsutil.CopyOf(r.Params) - in = r.Params.(*CreateBucketInput) - in.CreateBucketConfiguration = &CreateBucketConfiguration{ - LocationConstraint: r.Config.Region, - } - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go deleted file mode 100644 index 229606b708..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ /dev/null @@ -1,89 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/s3shared/arn" - "github.com/aws/aws-sdk-go/internal/s3shared/s3err" -) - -func init() { - initClient = defaultInitClientFn - initRequest = defaultInitRequestFn -} - -func defaultInitClientFn(c *client.Client) { - if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset { - if aws.BoolValue(c.Config.UseDualStack) { - c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled - } else { - c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled - } - } - - // Support building custom endpoints based on config - c.Handlers.Build.PushFront(endpointHandler) - - // Require SSL when using SSE keys - c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeyMD5) - c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) - - // S3 uses custom error unmarshaling logic - c.Handlers.UnmarshalError.Clear() - c.Handlers.UnmarshalError.PushBack(unmarshalError) - c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) -} - -func defaultInitRequestFn(r *request.Request) { - // Add request handlers for specific platforms. - // e.g. 100-continue support for PUT requests using Go 1.6 - platformRequestHandlers(r) - - switch r.Operation.Name { - case opGetBucketLocation: - // GetBucketLocation has custom parsing logic - r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) - case opCreateBucket: - // Auto-populate LocationConstraint with current region - r.Handlers.Validate.PushFront(populateLocationConstraint) - case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: - r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError) - r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) - case opPutObject, opUploadPart: - r.Handlers.Build.PushBack(computeBodyHashes) - // Disabled until #1837 root issue is resolved. - // case opGetObject: - // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) - // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) - case opWriteGetObjectResponse: - r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint) - } -} - -// bucketGetter is an accessor interface to grab the "Bucket" field from -// an S3 type. -type bucketGetter interface { - getBucket() string -} - -// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" -// field from an S3 type. -type sseCustomerKeyGetter interface { - getSSECustomerKey() string -} - -// copySourceSSECustomerKeyGetter is an accessor interface to grab the -// "CopySourceSSECustomerKey" field from an S3 type. -type copySourceSSECustomerKeyGetter interface { - getCopySourceSSECustomerKey() string -} - -// endpointARNGetter is an accessor interface to grab the -// the field corresponding to an endpoint ARN input. -type endpointARNGetter interface { - getEndpointARN() (arn.Resource, error) - hasEndpointARN() bool -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go deleted file mode 100644 index c148f757ee..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package s3 provides the client and types for making API -// requests to Amazon Simple Storage Service. -// -// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. -// -// See s3 package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ -// -// # Using the Client -// -// To contact Amazon Simple Storage Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Simple Storage Service client S3 for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go deleted file mode 100644 index 2e8244f8f6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ /dev/null @@ -1,109 +0,0 @@ -// Upload Managers -// -// The s3manager package's Uploader provides concurrent upload of content to S3 -// by taking advantage of S3's Multipart APIs. The Uploader also supports both -// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker -// for optimizations if the Body satisfies that type. Once the Uploader instance -// is created you can call Upload concurrently from multiple goroutines safely. -// -// // The session the S3 Uploader will use -// sess := session.Must(session.NewSession()) -// -// // Create an uploader with the session and default options -// uploader := s3manager.NewUploader(sess) -// -// f, err := os.Open(filename) -// if err != nil { -// return fmt.Errorf("failed to open file %q, %v", filename, err) -// } -// -// // Upload the file to S3. -// result, err := uploader.Upload(&s3manager.UploadInput{ -// Bucket: aws.String(myBucket), -// Key: aws.String(myString), -// Body: f, -// }) -// if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) -// } -// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) -// -// See the s3manager package's Uploader type documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader -// -// # Download Manager -// -// The s3manager package's Downloader provides concurrently downloading of Objects -// from S3. The Downloader will write S3 Object content with an io.WriterAt. -// Once the Downloader instance is created you can call Download concurrently from -// multiple goroutines safely. -// -// // The session the S3 Downloader will use -// sess := session.Must(session.NewSession()) -// -// // Create a downloader with the session and default options -// downloader := s3manager.NewDownloader(sess) -// -// // Create a file to write the S3 Object contents to. -// f, err := os.Create(filename) -// if err != nil { -// return fmt.Errorf("failed to create file %q, %v", filename, err) -// } -// -// // Write the contents of S3 Object to the file -// n, err := downloader.Download(f, &s3.GetObjectInput{ -// Bucket: aws.String(myBucket), -// Key: aws.String(myString), -// }) -// if err != nil { -// return fmt.Errorf("failed to download file, %v", err) -// } -// fmt.Printf("file downloaded, %d bytes\n", n) -// -// See the s3manager package's Downloader type documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader -// -// # Automatic URI cleaning -// -// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) -// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct -// used by the service client. -// -// svc := s3.New(sess, &aws.Config{ -// DisableRestProtocolURICleaning: aws.Bool(true), -// }) -// out, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String("bucketname"), -// Key: aws.String("//foo//bar//moo"), -// }) -// -// # Get Bucket Region -// -// GetBucketRegion will attempt to get the region for a bucket using a region -// hint to determine which AWS partition to perform the query on. Use this utility -// to determine the region a bucket is in. -// -// sess := session.Must(session.NewSession()) -// -// bucket := "my-bucket" -// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") -// if err != nil { -// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { -// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) -// } -// return err -// } -// fmt.Printf("Bucket %s is in %s region\n", bucket, region) -// -// See the s3manager package's GetBucketRegion function documentation for more information -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion -// -// # S3 Crypto Client -// -// The s3crypto package provides the tools to upload and download encrypted -// content from S3. The Encryption and Decryption clients can be used concurrently -// once the client is created. -// -// See the s3crypto package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go deleted file mode 100644 index 71b4386926..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ /dev/null @@ -1,298 +0,0 @@ -package s3 - -import ( - "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go/aws" - awsarn "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/s3shared" - "github.com/aws/aws-sdk-go/internal/s3shared/arn" -) - -const ( - s3Namespace = "s3" - s3AccessPointNamespace = "s3-accesspoint" - s3ObjectsLambdaNamespace = "s3-object-lambda" - s3OutpostsNamespace = "s3-outposts" -) - -// Used by shapes with members decorated as endpoint ARN. -func parseEndpointARN(v string) (arn.Resource, error) { - return arn.ParseResource(v, accessPointResourceParser) -} - -func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { - resParts := arn.SplitResource(a.Resource) - switch resParts[0] { - case "accesspoint": - switch a.Service { - case s3Namespace: - return arn.ParseAccessPointResource(a, resParts[1:]) - case s3ObjectsLambdaNamespace: - return parseS3ObjectLambdaAccessPointResource(a, resParts) - default: - return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} - } - case "outpost": - if a.Service != "s3-outposts" { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} - } - return parseOutpostAccessPointResource(a, resParts[1:]) - default: - return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} - } -} - -// parseOutpostAccessPointResource attempts to parse the ARNs resource as an -// outpost access-point resource. -// -// Supported Outpost AccessPoint ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint -func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { - // outpost accesspoint arn is only valid if service is s3-outposts - if a.Service != "s3-outposts" { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} - } - - if len(resParts) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - if len(resParts) < 3 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ - ARN: a, Reason: "access-point resource not set in Outpost ARN", - } - } - - resID := strings.TrimSpace(resParts[0]) - if len(resID) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - var outpostAccessPointARN = arn.OutpostAccessPointARN{} - switch resParts[1] { - case "accesspoint": - accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) - if err != nil { - return arn.OutpostAccessPointARN{}, err - } - // set access-point arn - outpostAccessPointARN.AccessPointARN = accessPointARN - default: - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} - } - - // set outpost id - outpostAccessPointARN.OutpostID = resID - return outpostAccessPointARN, nil -} - -func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { - if a.Service != s3ObjectsLambdaNamespace { - return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} - } - - accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) - if err != nil { - return arn.S3ObjectLambdaAccessPointARN{}, err - } - - if len(accessPointARN.Region) == 0 { - return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} - } - - return arn.S3ObjectLambdaAccessPointARN{ - AccessPointARN: accessPointARN, - }, nil -} - -func endpointHandler(req *request.Request) { - endpoint, ok := req.Params.(endpointARNGetter) - if !ok || !endpoint.hasEndpointARN() { - updateBucketEndpointFromParams(req) - return - } - - resource, err := endpoint.getEndpointARN() - if err != nil { - req.Error = s3shared.NewInvalidARNError(nil, err) - return - } - - resReq := s3shared.ResourceRequest{ - Resource: resource, - Request: req, - } - - if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { - req.Error = s3shared.NewClientPartitionMismatchError(resource, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - return - } - - if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { - req.Error = s3shared.NewClientRegionMismatchError(resource, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - return - } - - switch tv := resource.(type) { - case arn.AccessPointARN: - err = updateRequestAccessPointEndpoint(req, tv) - if err != nil { - req.Error = err - } - case arn.S3ObjectLambdaAccessPointARN: - err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) - if err != nil { - req.Error = err - } - case arn.OutpostAccessPointARN: - // outposts does not support FIPS regions - if req.Config.UseFIPSEndpoint == endpoints.FIPSEndpointStateEnabled { - req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, - aws.StringValue(req.Config.Region), nil) - return - } - - err = updateRequestOutpostAccessPointEndpoint(req, tv) - if err != nil { - req.Error = err - } - default: - req.Error = s3shared.NewInvalidARNError(resource, nil) - } -} - -func updateBucketEndpointFromParams(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucket name was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - updateEndpointForS3Config(r, bucket) -} - -func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { - // Accelerate not supported - if aws.BoolValue(req.Config.S3UseAccelerate) { - return s3shared.NewClientConfiguredForAccelerateError(accessPoint, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - } - - // Ignore the disable host prefix for access points - req.Config.DisableEndpointHostPrefix = aws.Bool(false) - - if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { - return err - } - - removeBucketFromPath(req.HTTPRequest.URL) - - return nil -} - -func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { - // DualStack not supported - if isUseDualStackEndpoint(req) { - return s3shared.NewClientConfiguredForDualStackError(accessPoint, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - } - - // Accelerate not supported - if aws.BoolValue(req.Config.S3UseAccelerate) { - return s3shared.NewClientConfiguredForAccelerateError(accessPoint, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - } - - // Ignore the disable host prefix for access points - req.Config.DisableEndpointHostPrefix = aws.Bool(false) - - if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { - return err - } - - removeBucketFromPath(req.HTTPRequest.URL) - - return nil -} - -func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { - // Accelerate not supported - if aws.BoolValue(req.Config.S3UseAccelerate) { - return s3shared.NewClientConfiguredForAccelerateError(accessPoint, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - } - - // Dualstack not supported - if isUseDualStackEndpoint(req) { - return s3shared.NewClientConfiguredForDualStackError(accessPoint, - req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) - } - - // Ignore the disable host prefix for access points - req.Config.DisableEndpointHostPrefix = aws.Bool(false) - - if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { - return err - } - - removeBucketFromPath(req.HTTPRequest.URL) - return nil -} - -func removeBucketFromPath(u *url.URL) { - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } -} - -func buildWriteGetObjectResponseEndpoint(req *request.Request) { - // DualStack not supported - if isUseDualStackEndpoint(req) { - req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) - return - } - - // Accelerate not supported - if aws.BoolValue(req.Config.S3UseAccelerate) { - req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) - return - } - - signingName := s3ObjectsLambdaNamespace - signingRegion := req.ClientInfo.SigningRegion - - if !hasCustomEndpoint(req) { - endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID) - if err != nil { - req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) - return - } - signingRegion = endpoint.SigningRegion - - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - req.Error = err - return - } - updateS3HostPrefixForS3ObjectLambda(req) - } - - redirectSigner(req, signingName, signingRegion) -} - -func isUseDualStackEndpoint(req *request.Request) bool { - if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { - return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled - } - return aws.BoolValue(req.Config.UseDualStack) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go deleted file mode 100644 index 7ae18ef548..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go +++ /dev/null @@ -1,239 +0,0 @@ -package s3 - -import ( - "net/url" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/s3shared" - "github.com/aws/aws-sdk-go/internal/s3shared/arn" - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - accessPointPrefixLabel = "accesspoint" - accountIDPrefixLabel = "accountID" - accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." - - outpostPrefixLabel = "outpost" - outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." -) - -// hasCustomEndpoint returns true if endpoint is a custom endpoint -func hasCustomEndpoint(r *request.Request) bool { - return len(aws.StringValue(r.Config.Endpoint)) > 0 -} - -// accessPointEndpointBuilder represents the endpoint builder for access point arn -type accessPointEndpointBuilder arn.AccessPointARN - -// build builds the endpoint for corresponding access point arn -// -// For building an endpoint from access point arn, format used is: -// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} -// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com -// -// Access Point Endpoint requests are signed using "s3" as signing name. -func (a accessPointEndpointBuilder) build(req *request.Request) error { - resolveService := arn.AccessPointARN(a).Service - resolveRegion := arn.AccessPointARN(a).Region - - endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService) - if err != nil { - return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, resolveRegion, err) - } - - endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) - - if !hasCustomEndpoint(req) { - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - return err - } - - // dual stack provided by endpoint resolver - updateS3HostForS3AccessPoint(req) - } - - protocol.HostPrefixBuilder{ - Prefix: accessPointPrefixTemplate, - LabelsFn: a.hostPrefixLabelValues, - }.Build(req) - - // signer redirection - redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) - - err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) - if err != nil { - return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) - } - - return nil -} - -func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { - return map[string]string{ - accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, - accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, - } -} - -// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn -type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN - -// build builds the endpoint for corresponding access point arn -// -// For building an endpoint from access point arn, format used is: -// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} -// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com -// -// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. -func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { - resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region - - endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID) - if err != nil { - return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), - req.ClientInfo.PartitionID, resolveRegion, err) - } - - endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) - - endpoint.SigningName = s3ObjectsLambdaNamespace - - if !hasCustomEndpoint(req) { - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - return err - } - - updateS3HostPrefixForS3ObjectLambda(req) - } - - protocol.HostPrefixBuilder{ - Prefix: accessPointPrefixTemplate, - LabelsFn: a.hostPrefixLabelValues, - }.Build(req) - - // signer redirection - redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) - - err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) - if err != nil { - return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) - } - - return nil -} - -func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { - return map[string]string{ - accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, - accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, - } -} - -// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. -type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN - -// build builds an endpoint corresponding to the outpost access point arn. -// -// For building an endpoint from outpost access point arn, format used is: -// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} -// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com -// -// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. -func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { - resolveRegion := o.Region - resolveService := o.Service - - endpointsID := resolveService - if resolveService == s3OutpostsNamespace { - endpointsID = "s3" - } - - endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID) - if err != nil { - return s3shared.NewFailedToResolveEndpointError(o, - req.ClientInfo.PartitionID, resolveRegion, err) - } - - endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) - - if !hasCustomEndpoint(req) { - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - return err - } - updateHostPrefix(req, endpointsID, resolveService) - } - - protocol.HostPrefixBuilder{ - Prefix: outpostAccessPointPrefixTemplate, - LabelsFn: o.hostPrefixLabelValues, - }.Build(req) - - // set the signing region, name to resolved names from ARN - redirectSigner(req, resolveService, resolveRegion) - - err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) - if err != nil { - return s3shared.NewInvalidARNError(o, err) - } - - return nil -} - -func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { - return map[string]string{ - accessPointPrefixLabel: o.AccessPointName, - accountIDPrefixLabel: o.AccountID, - outpostPrefixLabel: o.OutpostID, - } -} - -func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) { - return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { - opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) - opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) - opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint - opts.UseFIPSEndpoint = r.Config.UseFIPSEndpoint - opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint - opts.ResolvedRegion = resolvedRegion - opts.Logger = r.Config.Logger - opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) - }) -} - -func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { - r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to parse endpoint URL", err) - } - - return nil -} - -// redirectSigner sets signing name, signing region for a request -func redirectSigner(req *request.Request, signingName string, signingRegion string) { - req.ClientInfo.SigningName = signingName - req.ClientInfo.SigningRegion = signingRegion -} - -func updateS3HostForS3AccessPoint(req *request.Request) { - updateHostPrefix(req, "s3", s3AccessPointNamespace) -} - -func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { - updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) -} - -func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { - host := req.HTTPRequest.URL.Host - if strings.HasPrefix(host, oldEndpointPrefix) { - // replace service hostlabel oldEndpointPrefix to newEndpointPrefix - req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go deleted file mode 100644 index cd6a2e8ae4..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -const ( - - // ErrCodeBucketAlreadyExists for service response error code - // "BucketAlreadyExists". - // - // The requested bucket name is not available. The bucket namespace is shared - // by all users of the system. Select a different name and try again. - ErrCodeBucketAlreadyExists = "BucketAlreadyExists" - - // ErrCodeBucketAlreadyOwnedByYou for service response error code - // "BucketAlreadyOwnedByYou". - // - // The bucket you tried to create already exists, and you own it. Amazon S3 - // returns this error in all Amazon Web Services Regions except in the North - // Virginia Region. For legacy compatibility, if you re-create an existing bucket - // that you already own in the North Virginia Region, Amazon S3 returns 200 - // OK and resets the bucket access control lists (ACLs). - ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" - - // ErrCodeInvalidObjectState for service response error code - // "InvalidObjectState". - // - // Object is archived and inaccessible until restored. - ErrCodeInvalidObjectState = "InvalidObjectState" - - // ErrCodeNoSuchBucket for service response error code - // "NoSuchBucket". - // - // The specified bucket does not exist. - ErrCodeNoSuchBucket = "NoSuchBucket" - - // ErrCodeNoSuchKey for service response error code - // "NoSuchKey". - // - // The specified key does not exist. - ErrCodeNoSuchKey = "NoSuchKey" - - // ErrCodeNoSuchUpload for service response error code - // "NoSuchUpload". - // - // The specified multipart upload does not exist. - ErrCodeNoSuchUpload = "NoSuchUpload" - - // ErrCodeObjectAlreadyInActiveTierError for service response error code - // "ObjectAlreadyInActiveTierError". - // - // This action is not allowed against this storage tier. - ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" - - // ErrCodeObjectNotInActiveTierError for service response error code - // "ObjectNotInActiveTierError". - // - // The source object of the COPY action is not in the active tier and is only - // stored in Amazon S3 Glacier. - ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go deleted file mode 100644 index 81cdec1ae7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ /dev/null @@ -1,136 +0,0 @@ -package s3 - -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// an operationBlacklist is a list of operation names that should a -// request handler should not be executed with. -type operationBlacklist []string - -// Continue will return true of the Request's operation name is not -// in the blacklist. False otherwise. -func (b operationBlacklist) Continue(r *request.Request) bool { - for i := 0; i < len(b); i++ { - if b[i] == r.Operation.Name { - return false - } - } - return true -} - -var accelerateOpBlacklist = operationBlacklist{ - opListBuckets, opCreateBucket, opDeleteBucket, -} - -// Automatically add the bucket name to the endpoint domain -// if possible. This style of bucket is valid for all bucket names which are -// DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request, bucketName string) { - forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) - accelerate := aws.BoolValue(r.Config.S3UseAccelerate) - - if accelerate && accelerateOpBlacklist.Continue(r) { - if forceHostStyle { - if r.Config.Logger != nil { - r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") - } - } - updateEndpointForAccelerate(r, bucketName) - } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r, bucketName) - } -} - -func updateEndpointForHostStyle(r *request.Request, bucketName string) { - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { - // bucket name must be valid to put into the host - return - } - - moveBucketToHost(r.HTTPRequest.URL, bucketName) -} - -var ( - accelElem = []byte("s3-accelerate.dualstack.") -) - -func updateEndpointForAccelerate(r *request.Request, bucketName string) { - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { - r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), - nil) - return - } - - parts := strings.Split(r.HTTPRequest.URL.Host, ".") - if len(parts) < 3 { - r.Error = awserr.New("InvalidParameterExecption", - fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", - r.HTTPRequest.URL.Host), nil) - return - } - - if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { - parts[0] = "s3-accelerate" - } - for i := 1; i+1 < len(parts); i++ { - if parts[i] == aws.StringValue(r.Config.Region) { - parts = append(parts[:i], parts[i+1:]...) - break - } - } - - r.HTTPRequest.URL.Host = strings.Join(parts, ".") - - moveBucketToHost(r.HTTPRequest.URL, bucketName) -} - -// Attempts to retrieve the bucket name from the request input parameters. -// If no bucket is found, or the field is empty "", false will be returned. -func bucketNameFromReqParams(params interface{}) (string, bool) { - if iface, ok := params.(bucketGetter); ok { - b := iface.getBucket() - return b, len(b) > 0 - } - - return "", false -} - -// hostCompatibleBucketName returns true if the request should -// put the bucket in the host. This is false if S3ForcePathStyle is -// explicitly set or if the bucket is not DNS compatible. -func hostCompatibleBucketName(u *url.URL, bucket string) bool { - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if u.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) -var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - return reDomain.MatchString(bucket) && - !reIPAddress.MatchString(bucket) && - !strings.Contains(bucket, "..") -} - -// moveBucketToHost moves the bucket name from the URI path to URL host. -func moveBucketToHost(u *url.URL, bucket string) { - u.Host = bucket + "." + u.Host - removeBucketFromPath(u) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go deleted file mode 100644 index 308b7d473e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !go1.6 -// +build !go1.6 - -package s3 - -import "github.com/aws/aws-sdk-go/aws/request" - -func platformRequestHandlers(r *request.Request) { -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go deleted file mode 100644 index 339019d321..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build go1.6 -// +build go1.6 - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -func platformRequestHandlers(r *request.Request) { - if r.Operation.HTTPMethod == "PUT" { - // 100-Continue should only be used on put requests. - r.Handlers.Sign.PushBack(add100Continue) - } -} - -func add100Continue(r *request.Request) { - if aws.BoolValue(r.Config.S3Disable100Continue) { - return - } - if r.HTTPRequest.ContentLength < 1024*1024*2 { - // Ignore requests smaller than 2MB. This helps prevent delaying - // requests unnecessarily. - return - } - - r.HTTPRequest.Header.Set("Expect", "100-Continue") -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go deleted file mode 100644 index 3e75d0e942..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -// S3 provides the API operation methods for making requests to -// Amazon Simple Storage Service. See this package's package overview docs -// for details on the service. -// -// S3 methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type S3 struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "s3" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "S3" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the S3 client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// -// mySession := session.Must(session.NewSession()) -// -// // Create a S3 client from just a session. -// svc := s3.New(mySession) -// -// // Create a S3 client with additional configuration -// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = "s3" - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 { - svc := &S3{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2006-03-01", - ResolvedRegion: resolvedRegion, - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { - s.DisableURIPathEscaping = true - })) - svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) - - svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) - svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a S3 operation and runs any -// custom request initialization. -func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go deleted file mode 100644 index 57a0bd92ca..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ /dev/null @@ -1,84 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) - -func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme == "https" { - return - } - - if iface, ok := r.Params.(sseCustomerKeyGetter); ok { - if len(iface.getSSECustomerKey()) > 0 { - r.Error = errSSERequiresSSL - return - } - } - - if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { - if len(iface.getCopySourceSSECustomerKey()) > 0 { - r.Error = errSSERequiresSSL - return - } - } -} - -const ( - sseKeyHeader = "x-amz-server-side-encryption-customer-key" - sseKeyMD5Header = sseKeyHeader + "-md5" -) - -func computeSSEKeyMD5(r *request.Request) { - var key string - if g, ok := r.Params.(sseCustomerKeyGetter); ok { - key = g.getSSECustomerKey() - } - - computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) -} - -const ( - copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" - copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" -) - -func computeCopySourceSSEKeyMD5(r *request.Request) { - var key string - if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { - key = g.getCopySourceSSECustomerKey() - } - - computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) -} - -func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { - if len(key) == 0 { - // Backwards compatiablity where user just set the header value instead - // of using the API parameter, or setting the header value for an - // operation without the parameters modeled. - key = r.Header.Get(keyHeader) - if len(key) == 0 { - return - } - - // In backwards compatible, the header's value is not base64 encoded, - // and needs to be encoded and updated by the SDK's customizations. - b64Key := base64.StdEncoding.EncodeToString([]byte(key)) - r.Header.Set(keyHeader, b64Key) - } - - // Only update Key's MD5 if not already set. - if len(r.Header.Get(keyMD5Header)) == 0 { - sum := md5.Sum([]byte(key)) - keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) - r.Header.Set(keyMD5Header, keyMD5) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go deleted file mode 100644 index 096adc091d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ /dev/null @@ -1,47 +0,0 @@ -package s3 - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -func copyMultipartStatusOKUnmarshalError(r *request.Request) { - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to read response body", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - // Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body - // even in case of an error, so we replace it with an empty Reader. - r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil)) - return - } - - body := bytes.NewReader(b) - r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, sdkio.SeekStart) - - unmarshalError(r) - if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == request.ErrCodeSerialization && - err.OrigErr() != io.EOF { - r.Error = nil - return - } - // if empty payload - if err.OrigErr() == io.EOF { - r.HTTPResponse.StatusCode = http.StatusInternalServerError - } else { - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go deleted file mode 100644 index 6eecf66910..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ /dev/null @@ -1,114 +0,0 @@ -package s3 - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"Error"` - Code string `xml:"Code"` - Message string `xml:"Message"` -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - - // Bucket exists in a different region, and request needs - // to be made to the correct region. - if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - msg := fmt.Sprintf( - "incorrect region, the bucket is not in '%s' region at endpoint '%s'", - aws.StringValue(r.Config.Region), - aws.StringValue(r.Config.Endpoint), - ) - if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { - msg += fmt.Sprintf(", bucket is in '%s' region", v) - } - r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", msg, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Attempt to parse error from body if it is known - var errResp xmlErrorResponse - var err error - if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { - err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) - } else { - err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) - } - - if err != nil { - var errorMsg string - if err == io.EOF { - errorMsg = "empty response payload" - } else { - errorMsg = "failed to unmarshal error message" - } - - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - errorMsg, err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Fallback to status code converted to message if still no error code - if len(errResp.Code) == 0 { - statusText := http.StatusText(r.HTTPResponse.StatusCode) - errResp.Code = strings.Replace(statusText, " ", "", -1) - errResp.Message = statusText - } - - r.Error = awserr.NewRequestFailure( - awserr.New(errResp.Code, errResp.Message, err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) -} - -// A RequestFailure provides access to the S3 Request ID and Host ID values -// returned from API operation errors. Getting the error as a string will -// return the formated error with the same information as awserr.RequestFailure, -// while also adding the HostID value from the response. -type RequestFailure interface { - awserr.RequestFailure - - // Host ID is the S3 Host ID needed for debug, and contacting support - HostID() string -} - -// s3unmarshalXMLError is s3 specific xml error unmarshaler -// for 200 OK errors and response payloads. -// This function differs from the xmlUtil.UnmarshalXMLError -// func. It does not ignore the EOF error and passes it up. -// Related to bug fix for `s3 200 OK response with empty payload` -func s3unmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go deleted file mode 100644 index 2596c694b5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package s3 - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// WaitUntilBucketExists uses the Amazon S3 API operation -// HeadBucket to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilBucketExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 200, - }, - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 301, - }, - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 403, - }, - { - State: request.RetryWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadBucketInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadBucketRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilBucketNotExists uses the Amazon S3 API operation -// HeadBucket to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilBucketNotExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadBucketInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadBucketRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilObjectExists uses the Amazon S3 API operation -// HeadObject to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilObjectExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 200, - }, - { - State: request.RetryWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadObjectInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadObjectRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilObjectNotExists uses the Amazon S3 API operation -// HeadObject to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilObjectNotExists", - MaxAttempts: 20, - Delay: request.ConstantWaiterDelay(5 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.StatusWaiterMatch, - Expected: 404, - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *HeadObjectInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.HeadObjectRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go deleted file mode 100644 index b8f590f71d..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ /dev/null @@ -1,1367 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sso - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -const opGetRoleCredentials = "GetRoleCredentials" - -// GetRoleCredentialsRequest generates a "aws/request.Request" representing the -// client's request for the GetRoleCredentials operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetRoleCredentials for more information on using the GetRoleCredentials -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetRoleCredentialsRequest method. -// req, resp := client.GetRoleCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials -func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { - op := &request.Operation{ - Name: opGetRoleCredentials, - HTTPMethod: "GET", - HTTPPath: "/federation/credentials", - } - - if input == nil { - input = &GetRoleCredentialsInput{} - } - - output = &GetRoleCredentialsOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// GetRoleCredentials API operation for AWS Single Sign-On. -// -// Returns the STS short-term credentials for a given role name that is assigned -// to the user. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation GetRoleCredentials for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// - UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// - TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// - ResourceNotFoundException -// The specified resource doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials -func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { - req, out := c.GetRoleCredentialsRequest(input) - return out, req.Send() -} - -// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of -// the ability to pass a context and additional request options. -// -// See GetRoleCredentials for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { - req, out := c.GetRoleCredentialsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListAccountRoles = "ListAccountRoles" - -// ListAccountRolesRequest generates a "aws/request.Request" representing the -// client's request for the ListAccountRoles operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAccountRoles for more information on using the ListAccountRoles -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListAccountRolesRequest method. -// req, resp := client.ListAccountRolesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles -func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { - op := &request.Operation{ - Name: opListAccountRoles, - HTTPMethod: "GET", - HTTPPath: "/assignment/roles", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListAccountRolesInput{} - } - - output = &ListAccountRolesOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// ListAccountRoles API operation for AWS Single Sign-On. -// -// Lists all roles that are assigned to the user for a given AWS account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation ListAccountRoles for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// - UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// - TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// - ResourceNotFoundException -// The specified resource doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles -func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { - req, out := c.ListAccountRolesRequest(input) - return out, req.Send() -} - -// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of -// the ability to pass a context and additional request options. -// -// See ListAccountRoles for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { - req, out := c.ListAccountRolesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAccountRoles method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAccountRoles operation. -// pageNum := 0 -// err := client.ListAccountRolesPages(params, -// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { - return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAccountRolesPagesWithContext same as ListAccountRolesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAccountRolesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAccountRolesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListAccounts = "ListAccounts" - -// ListAccountsRequest generates a "aws/request.Request" representing the -// client's request for the ListAccounts operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAccounts for more information on using the ListAccounts -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListAccountsRequest method. -// req, resp := client.ListAccountsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts -func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { - op := &request.Operation{ - Name: opListAccounts, - HTTPMethod: "GET", - HTTPPath: "/assignment/accounts", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListAccountsInput{} - } - - output = &ListAccountsOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// ListAccounts API operation for AWS Single Sign-On. -// -// Lists all AWS accounts assigned to the user. These AWS accounts are assigned -// by the administrator of the account. For more information, see Assign User -// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the IAM Identity Center User Guide. This operation returns a paginated -// response. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation ListAccounts for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// - UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// - TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// - ResourceNotFoundException -// The specified resource doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts -func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { - req, out := c.ListAccountsRequest(input) - return out, req.Send() -} - -// ListAccountsWithContext is the same as ListAccounts with the addition of -// the ability to pass a context and additional request options. -// -// See ListAccounts for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { - req, out := c.ListAccountsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListAccountsPages iterates over the pages of a ListAccounts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAccounts method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAccounts operation. -// pageNum := 0 -// err := client.ListAccountsPages(params, -// func(page *sso.ListAccountsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { - return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAccountsPagesWithContext same as ListAccountsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAccountsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAccountsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opLogout = "Logout" - -// LogoutRequest generates a "aws/request.Request" representing the -// client's request for the Logout operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Logout for more information on using the Logout -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the LogoutRequest method. -// req, resp := client.LogoutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout -func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { - op := &request.Operation{ - Name: opLogout, - HTTPMethod: "POST", - HTTPPath: "/logout", - } - - if input == nil { - input = &LogoutInput{} - } - - output = &LogoutOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// Logout API operation for AWS Single Sign-On. -// -// Removes the locally stored SSO tokens from the client-side cache and sends -// an API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. -// -// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM -// Identity Center sign in session is used to obtain an IAM session, as specified -// in the corresponding IAM Identity Center permission set. More specifically, -// IAM Identity Center assumes an IAM role in the target account on behalf of -// the user, and the corresponding temporary AWS credentials are returned to -// the client. -// -// After user logout, any existing IAM role sessions that were created by using -// IAM Identity Center permission sets continue based on the duration configured -// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the IAM Identity Center User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation Logout for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// - UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// - TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout -func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { - req, out := c.LogoutRequest(input) - return out, req.Send() -} - -// LogoutWithContext is the same as Logout with the addition of -// the ability to pass a context and additional request options. -// -// See Logout for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { - req, out := c.LogoutRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Provides information about your AWS account. -type AccountInfo struct { - _ struct{} `type:"structure"` - - // The identifier of the AWS account that is assigned to the user. - AccountId *string `locationName:"accountId" type:"string"` - - // The display name of the AWS account that is assigned to the user. - AccountName *string `locationName:"accountName" type:"string"` - - // The email address of the AWS account that is assigned to the user. - EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccountInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccountInfo) GoString() string { - return s.String() -} - -// SetAccountId sets the AccountId field's value. -func (s *AccountInfo) SetAccountId(v string) *AccountInfo { - s.AccountId = &v - return s -} - -// SetAccountName sets the AccountName field's value. -func (s *AccountInfo) SetAccountName(v string) *AccountInfo { - s.AccountName = &v - return s -} - -// SetEmailAddress sets the EmailAddress field's value. -func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { - s.EmailAddress = &v - return s -} - -type GetRoleCredentialsInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetRoleCredentialsInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - - // The identifier for the AWS account that is assigned to the user. - // - // AccountId is a required field - AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` - - // The friendly name of the role that is assigned to the user. - // - // RoleName is a required field - RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetRoleCredentialsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.RoleName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { - s.AccessToken = &v - return s -} - -// SetAccountId sets the AccountId field's value. -func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { - s.AccountId = &v - return s -} - -// SetRoleName sets the RoleName field's value. -func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { - s.RoleName = &v - return s -} - -type GetRoleCredentialsOutput struct { - _ struct{} `type:"structure"` - - // The credentials for the role that is assigned to the user. - RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsOutput) GoString() string { - return s.String() -} - -// SetRoleCredentials sets the RoleCredentials field's value. -func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { - s.RoleCredentials = v - return s -} - -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -type InvalidRequestException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRequestException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRequestException) GoString() string { - return s.String() -} - -func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { - return &InvalidRequestException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidRequestException) Code() string { - return "InvalidRequestException" -} - -// Message returns the exception's message. -func (s *InvalidRequestException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidRequestException) OrigErr() error { - return nil -} - -func (s *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidRequestException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidRequestException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ListAccountRolesInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by ListAccountRolesInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - - // The identifier for the AWS account that is assigned to the user. - // - // AccountId is a required field - AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` - - // The number of items that clients can request per page. - MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` - - // The page token from the previous response output when you request subsequent - // pages. - NextToken *string `location:"querystring" locationName:"next_token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAccountRolesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { - s.AccessToken = &v - return s -} - -// SetAccountId sets the AccountId field's value. -func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { - s.AccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { - s.NextToken = &v - return s -} - -type ListAccountRolesOutput struct { - _ struct{} `type:"structure"` - - // The page token client that is used to retrieve the list of accounts. - NextToken *string `locationName:"nextToken" type:"string"` - - // A paginated response with the list of roles and the next token if more results - // are available. - RoleList []*RoleInfo `locationName:"roleList" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { - s.NextToken = &v - return s -} - -// SetRoleList sets the RoleList field's value. -func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { - s.RoleList = v - return s -} - -type ListAccountsInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by ListAccountsInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - - // This is the number of items clients can request per page. - MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` - - // (Optional) When requesting subsequent pages, this is the page token from - // the previous response output. - NextToken *string `location:"querystring" locationName:"next_token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAccountsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { - s.AccessToken = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { - s.NextToken = &v - return s -} - -type ListAccountsOutput struct { - _ struct{} `type:"structure"` - - // A paginated response with the list of account information and the next token - // if more results are available. - AccountList []*AccountInfo `locationName:"accountList" type:"list"` - - // The page token client that is used to retrieve the list of accounts. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsOutput) GoString() string { - return s.String() -} - -// SetAccountList sets the AccountList field's value. -func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { - s.AccountList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { - s.NextToken = &v - return s -} - -type LogoutInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by LogoutInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LogoutInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { - s.AccessToken = &v - return s -} - -type LogoutOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutOutput) GoString() string { - return s.String() -} - -// The specified resource doesn't exist. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) GoString() string { - return s.String() -} - -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" -} - -// Message returns the exception's message. -func (s *ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ResourceNotFoundException) OrigErr() error { - return nil -} - -func (s *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ResourceNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ResourceNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Provides information about the role credentials that are assigned to the -// user. -type RoleCredentials struct { - _ struct{} `type:"structure"` - - // The identifier used for the temporary security credentials. For more information, - // see Using Temporary Security Credentials to Request Access to AWS Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - AccessKeyId *string `locationName:"accessKeyId" type:"string"` - - // The date on which temporary security credentials expire. - Expiration *int64 `locationName:"expiration" type:"long"` - - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - // - // SecretAccessKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by RoleCredentials's - // String and GoString methods. - SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` - - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - // - // SessionToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by RoleCredentials's - // String and GoString methods. - SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleCredentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleCredentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { - s.SessionToken = &v - return s -} - -// Provides information about the role that is assigned to the user. -type RoleInfo struct { - _ struct{} `type:"structure"` - - // The identifier of the AWS account assigned to the user. - AccountId *string `locationName:"accountId" type:"string"` - - // The friendly name of the role that is assigned to the user. - RoleName *string `locationName:"roleName" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleInfo) GoString() string { - return s.String() -} - -// SetAccountId sets the AccountId field's value. -func (s *RoleInfo) SetAccountId(v string) *RoleInfo { - s.AccountId = &v - return s -} - -// SetRoleName sets the RoleName field's value. -func (s *RoleInfo) SetRoleName(v string) *RoleInfo { - s.RoleName = &v - return s -} - -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -type TooManyRequestsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TooManyRequestsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TooManyRequestsException) GoString() string { - return s.String() -} - -func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { - return &TooManyRequestsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TooManyRequestsException) Code() string { - return "TooManyRequestsException" -} - -// Message returns the exception's message. -func (s *TooManyRequestsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TooManyRequestsException) OrigErr() error { - return nil -} - -func (s *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TooManyRequestsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TooManyRequestsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -type UnauthorizedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnauthorizedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnauthorizedException) GoString() string { - return s.String() -} - -func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { - return &UnauthorizedException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *UnauthorizedException) Code() string { - return "UnauthorizedException" -} - -// Message returns the exception's message. -func (s *UnauthorizedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *UnauthorizedException) OrigErr() error { - return nil -} - -func (s *UnauthorizedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *UnauthorizedException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *UnauthorizedException) RequestID() string { - return s.RespMetadata.RequestID -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go deleted file mode 100644 index 15e61a3228..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sso provides the client and types for making API -// requests to AWS Single Sign-On. -// -// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web -// service that makes it easy for you to assign user access to IAM Identity -// Center resources such as the AWS access portal. Users can get AWS account -// applications and roles assigned to them and get federated into the application. -// -// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces -// will continue to retain their original name for backward compatibility purposes. -// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). -// -// This reference guide describes the IAM Identity Center Portal operations -// that you can call programatically and includes detailed information on data -// types and errors. -// -// AWS provides SDKs that consist of libraries and sample code for various programming -// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs -// provide a convenient way to create programmatic access to IAM Identity Center -// and other AWS services. For more information about the AWS SDKs, including -// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. -// -// See sso package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ -// -// # Using the Client -// -// To contact AWS Single Sign-On with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Single Sign-On client SSO for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New -package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go deleted file mode 100644 index 77a6792e35..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sso - -import ( - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - - // ErrCodeInvalidRequestException for service response error code - // "InvalidRequestException". - // - // Indicates that a problem occurred with the input to the request. For example, - // a required parameter might be missing or out of range. - ErrCodeInvalidRequestException = "InvalidRequestException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // The specified resource doesn't exist. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - // - // Indicates that the request is being made too frequently and is more than - // what the server can handle. - ErrCodeTooManyRequestsException = "TooManyRequestsException" - - // ErrCodeUnauthorizedException for service response error code - // "UnauthorizedException". - // - // Indicates that the request is not authorized. This can happen due to an invalid - // access token in the request. - ErrCodeUnauthorizedException = "UnauthorizedException" -) - -var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "InvalidRequestException": newErrorInvalidRequestException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "TooManyRequestsException": newErrorTooManyRequestsException, - "UnauthorizedException": newErrorUnauthorizedException, -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go deleted file mode 100644 index 7094cfe413..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sso - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// SSO provides the API operation methods for making requests to -// AWS Single Sign-On. See this package's package overview docs -// for details on the service. -// -// SSO methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type SSO struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "SSO" // Name of service. - EndpointsID = "portal.sso" // ID to lookup a service endpoint with. - ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the SSO client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// -// mySession := session.Must(session.NewSession()) -// -// // Create a SSO client from just a session. -// svc := sso.New(mySession) -// -// // Create a SSO client with additional configuration -// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = "awsssoportal" - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { - svc := &SSO{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2019-06-10", - ResolvedRegion: resolvedRegion, - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed( - protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), - ) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a SSO operation and runs any -// custom request initialization. -func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go deleted file mode 100644 index 818cab7cda..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package ssoiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sso" -) - -// SSOAPI provides an interface to enable mocking the -// sso.SSO service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Single Sign-On. -// func myFunc(svc ssoiface.SSOAPI) bool { -// // Make svc.GetRoleCredentials request -// } -// -// func main() { -// sess := session.New() -// svc := sso.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSSOClient struct { -// ssoiface.SSOAPI -// } -// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSSOClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type SSOAPI interface { - GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) - GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) - GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) - - ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) - ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) - ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) - - ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error - ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error - - ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) - ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) - ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) - - ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error - ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error - - Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) - LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) - LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) -} - -var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go deleted file mode 100644 index 2b7e675ab8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ /dev/null @@ -1,3441 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRole for more information on using the AssumeRole -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { - op := &request.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output = &AssumeRoleOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRole API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. -// These temporary credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use AssumeRole within your account -// or for cross-account access. For a comparison of AssumeRole with other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// # Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any Amazon Web Services service with the following exception: -// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken -// API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// When you create a role, you create two policies: A role trust policy that -// specifies who can assume the role and a permissions policy that specifies -// what can be done with the role. You specify the trusted principal who is -// allowed to assume the role in the role trust policy. -// -// To assume a role from a different account, your Amazon Web Services account -// must be trusted by the role. The trust relationship is defined in the role's -// trust policy when the role is created. That trust policy states which accounts -// are allowed to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. -// -// To allow a user to assume a role in the same account, you can do either of -// the following: -// -// - Attach a policy to the user that allows the user to call AssumeRole -// (as long as the role's trust policy trusts the account). -// -// - Add the user as a principal directly in the role's trust policy. -// -// You can do either because the role’s trust policy acts as an IAM resource-based -// policy. When a resource-based policy grants access to a principal in the -// same account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. -// -// # Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see Passing -// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// # Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an Amazon -// Web Services MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication. If the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA device produces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRole for usage and error information. -// -// Returned Error Codes: -// -// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// - ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// - ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - return out, req.Send() -} - -// AssumeRoleWithContext is the same as AssumeRole with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRole for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output = &AssumeRoleWithSAMLOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithSAML API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based Amazon Web -// Services access without user-specific credentials or configuration. For a -// comparison of AssumeRoleWithSAML with the other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to Amazon Web -// Services services. -// -// # Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. You can provide -// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour -// to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one -// hour. When you use the AssumeRole API operation to assume a role, you can -// specify the duration of your role session with the DurationSeconds parameter. -// You can specify a parameter value of up to 43200 seconds (12 hours), depending -// on the maximum session duration setting for your role. However, if you assume -// a role using role chaining and provide a DurationSeconds parameter value -// greater than one hour, the operation fails. -// -// # Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any Amazon Web Services service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services -// security credentials. The identity of the caller is validated by using keys -// in the metadata document that is uploaded for the SAML provider entity for -// your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. -// The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the persistent -// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// # Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML assertion -// as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, session tags override the role's tags with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// # SAML Configuration -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. -// Additionally, you must use Identity and Access Management (IAM) to create -// a SAML provider entity in your Amazon Web Services account that represents -// your identity provider. You must also create an IAM role that specifies this -// SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithSAML for usage and error information. -// -// Returned Error Codes: -// -// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// - ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// - ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - return out, req.Send() -} - -// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithSAML for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output = &AssumeRoleWithWebIdentityOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider. Example providers -// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID -// Connect-compatible identity provider such as Google or Amazon Cognito federated -// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide -// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android -// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify -// a user. You can also supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web -// Services security credentials. Therefore, you can distribute an application -// (for example, on mobile devices) that requests temporary security credentials -// without including long-term Amazon Web Services credentials in the application. -// You also don't need to deploy server-based proxy services that use long-term -// Amazon Web Services credentials. Instead, the identity of the caller is validated -// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to Amazon Web Services service -// API operations. -// -// # Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithWebIdentity -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// # Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any Amazon Web Services service with the following -// exception: you cannot call the STS GetFederationToken or GetSessionToken -// API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// # Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, the session tag overrides the role tag with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// # Identities -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to Amazon Web Services. -// -// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithWebIdentity for usage and error information. -// -// Returned Error Codes: -// -// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// - ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// - ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - return out, req.Send() -} - -// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithWebIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the -// client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { - op := &request.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output = &DecodeAuthorizationMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecodeAuthorizationMessage API operation for AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an Amazon Web Services request. -// -// For example, if a user is not authorized to perform an operation that he -// or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some Amazon Web Services operations additionally -// return an encoded message that can provide details about this authorization -// failure. -// -// Only certain Amazon Web Services operations return an encoded authorization -// message. The documentation for an individual operation indicates whether -// that operation returns an encoded message in addition to returning an HTTP -// code. -// -// The message is encoded because the details of the authorization status can -// contain privileged information that the user who requested the operation -// should not see. To decode an authorization status message, a user must be -// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) -// action. -// -// The decoded message includes the following type of information: -// -// - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// - The principal who made the request. -// -// - The requested action. -// -// - The requested resource. -// -// - The values of condition keys in the context of the user's request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation DecodeAuthorizationMessage for usage and error information. -// -// Returned Error Codes: -// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - return out, req.Send() -} - -// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DecodeAuthorizationMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetAccessKeyInfo = "GetAccessKeyInfo" - -// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessKeyInfo operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { - op := &request.Operation{ - Name: opGetAccessKeyInfo, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetAccessKeyInfoInput{} - } - - output = &GetAccessKeyInfoOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetAccessKeyInfo API operation for AWS Security Token Service. -// -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) -// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -// For more information about access keys, see Managing Access Keys for IAM -// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// Amazon Web Services account to which the keys belong. Access key IDs beginning -// with AKIA are long-term credentials for an IAM user or the Amazon Web Services -// account root user. Access key IDs beginning with ASIA are temporary credentials -// that are created using STS operations. If the account in the response belongs -// to you, you can sign in as the root user and review your root user access -// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might -// be active, inactive, or deleted. Active keys might not have permissions to -// perform an operation. Providing a deleted access key might return an error -// that the key doesn't exist. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetAccessKeyInfo for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - return out, req.Send() -} - -// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of -// the ability to pass a context and additional request options. -// -// See GetAccessKeyInfo for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCallerIdentity for more information on using the GetCallerIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { - op := &request.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output = &GetCallerIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCallerIdentity API operation for AWS Security Token Service. -// -// Returns details about the IAM user or role whose credentials are used to -// call the operation. -// -// No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetCallerIdentity for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - return out, req.Send() -} - -// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See GetCallerIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetFederationToken for more information on using the GetFederationToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { - op := &request.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output = &GetFederationTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetFederationToken API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// # Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials obtained -// by using the Amazon Web Services account root user credentials have a maximum -// duration of 3,600 seconds (1 hour). -// -// # Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: -// -// - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. -// -// - You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// # Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This -// means that you cannot have separate Department and department tag keys. Assume -// that the user that you are federating has the Department=Marketing tag and -// you pass the department=engineering session tag. Department and department -// are not saved as separate tags, and the session tag passed in the request -// takes precedence over the user tag. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetFederationToken for usage and error information. -// -// Returned Error Codes: -// -// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// - ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - return out, req.Send() -} - -// GetFederationTokenWithContext is the same as GetFederationToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetFederationToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSessionToken for more information on using the GetSessionToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { - op := &request.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output = &GetSessionTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionToken API operation for AWS Security Token Service. -// -// Returns a set of temporary credentials for an Amazon Web Services account -// or IAM user. The credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use GetSessionToken if you want -// to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// No permissions are required for users to perform this operation. The purpose -// of the sts:GetSessionToken operation is to authenticate the user using MFA. -// You cannot use policies to control authentication operations. For more information, -// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) -// in the IAM User Guide. -// -// # Session Duration -// -// The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. -// -// # Permissions -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any Amazon Web Services service with the following exceptions: -// -// - You cannot call any IAM API operations unless MFA authentication information -// is included in the request. -// -// - You cannot call any STS API except AssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. -// -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetSessionToken for usage and error information. -// -// Returned Error Codes: -// - ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - return out, req.Send() -} - -// GetSessionTokenWithContext is the same as GetSessionToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value specified can range - // from 900 seconds (15 minutes) up to the maximum session duration set for - // the role. The maximum session duration setting can have a value from 1 hour - // to 12 hours. If you specify a value higher than this setting or the administrator - // setting (whichever is lower), the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. - // - // Role chaining limits your Amazon Web Services CLI or Amazon Web Services - // API role session to a maximum of one hour. When you use the AssumeRole API - // operation to assume a role, you can specify the duration of your role session - // with the DurationSeconds parameter. You can specify a parameter value of - // up to 43200 seconds (12 hours), depending on the maximum session duration - // setting for your role. However, if you assume a role using role chaining - // and provide a DurationSeconds parameter value greater than one hour, the - // operation fails. To learn how to view the maximum value for your role, see - // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A cross-account - // role is usually set up to trust everyone in an account. Therefore, the administrator - // of the trusting account might send an external ID to the administrator of - // the trusted account. That way, only someone with the ID can assume the role, - // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your Amazon Web - // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent Amazon Web Services API calls - // to access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent Amazon Web Services API calls to access resources in the account - // that owns the role. You cannot use session policies to grant more permissions - // than those allowed by the identity-based policy of the role that is being - // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests that use the temporary security credentials will expose the role - // session name to the external account in their CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // The source identity specified by the principal that is calling the AssumeRole - // operation. - // - // You can require users to specify a source identity when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition - // key to further control access to Amazon Web Services resources based on the - // value of source identity. For more information about using source identity, - // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@-. You cannot - // use a value that begins with the text aws:. This prefix is reserved for Amazon - // Web Services internal use. - SourceIdentity *string `min:"2" type:"string"` - - // A list of session tags that you want to pass. Each session tag consists of - // a key name and an associated value. For more information about session tags, - // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters, and the values can’t exceed - // 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same - // key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - // - // Additionally, if you used temporary credentials to perform this operation, - // the new session inherits any transitive session tags from the calling session. - // If you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) - // in the IAM User Guide. - Tags []*Tag `type:"list"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA. (In other words, if the policy includes a condition - // that tests for MFA). If the role being assumed requires MFA and if the TokenCode - // value is missing or expired, the AssumeRole call returns an "access denied" - // error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` - - // A list of keys for session tags that you want to set as transitive. If you - // set a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. - // - // This parameter is optional. When you set session tags as transitive, the - // session policy and session tags packed binary limit is not affected. - // - // If you choose not to specify a transitive tag key, then no tags are passed - // from this session to any subsequent sessions. - TransitiveTagKeys []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { - invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { - s.DurationSeconds = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { - s.ExternalId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { - s.PolicyArns = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { - s.RoleSessionName = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { - s.SerialNumber = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { - s.SourceIdentity = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { - s.Tags = v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { - s.TokenCode = &v - return s -} - -// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. -func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { - s.TransitiveTagKeys = v - return s -} - -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The source identity specified by the principal that is calling the AssumeRole - // operation. - // - // You can require users to specify a source identity when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition - // key to further control access to Amazon Web Services resources based on the - // value of source identity. For more information about using source identity, - // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SourceIdentity *string `min:"2" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { - s.AssumedRoleUser = v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { - s.PackedPolicySize = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { - s.SourceIdentity = &v - return s -} - -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. Your role session lasts for - // the duration that you specify for the DurationSeconds parameter, or until - // the time specified in the SAML authentication response's SessionNotOnOrAfter - // value, whichever is shorter. You can provide a DurationSeconds value from - // 900 seconds (15 minutes) up to the maximum session duration setting for the - // role. This setting can have a value from 1 hour to 12 hours. If you specify - // a value higher than this setting, the operation fails. For example, if you - // specify a session duration of 12 hours, but your administrator set the maximum - // session duration to 6 hours, your operation fails. To learn how to view the - // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent Amazon Web Services API calls - // to access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent Amazon Web Services API calls to access resources in the account - // that owns the role. You cannot use session policies to grant more permissions - // than those allowed by the identity-based policy of the role that is being - // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PrincipalArn == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.SAMLAssertion == nil { - invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { - s.PolicyArns = v - return s -} - -// SetPrincipalArn sets the PrincipalArn field's value. -func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { - s.PrincipalArn = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { - s.RoleArn = &v - return s -} - -// SetSAMLAssertion sets the SAMLAssertion field's value. -func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { - s.SAMLAssertion = &v - return s -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon -// Web Services requests. -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the following: - // - // * The Issuer response value. - // - // * The Amazon Web Services account ID. - // - // * The friendly name (the last part of the ARN) of the SAML provider in - // IAM. - // - // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value in the SourceIdentity attribute in the SAML assertion. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with - // that user. After the source identity is set, the value cannot be changed. - // It is present in the request for all actions that are taken by the role and - // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity - // when calling AssumeRoleWithSAML. You do this by adding an attribute to the - // SAML assertion. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SourceIdentity *string `min:"2" type:"string"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { - s.Credentials = v - return s -} - -// SetIssuer sets the Issuer field's value. -func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { - s.Issuer = &v - return s -} - -// SetNameQualifier sets the NameQualifier field's value. -func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { - s.NameQualifier = &v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { - s.PackedPolicySize = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { - s.SourceIdentity = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { - s.Subject = &v - return s -} - -// SetSubjectType sets the SubjectType field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { - s.SubjectType = &v - return s -} - -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent Amazon Web Services API calls - // to access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent Amazon Web Services API calls to access resources in the account - // that owns the role. You cannot use session policies to grant more permissions - // than those allowed by the identity-based policy of the role that is being - // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The fully qualified host component of the domain name of the OAuth 2.0 identity - // provider. Do not specify this value for an OpenID Connect identity provider. - // - // Currently www.amazon.com and graph.facebook.com are the only supported identity - // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.WebIdentityToken == nil { - invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { - s.PolicyArns = v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { - s.ProviderId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { - s.RoleSessionName = &v - return s -} - -// SetWebIdentityToken sets the WebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { - s.WebIdentityToken = &v - return s -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with - // that user. After the source identity is set, the value cannot be changed. - // It is present in the request for all actions that are taken by the role and - // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute associated - // with your users, like user name or email, as the source identity when calling - // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web - // token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SourceIdentity *string `min:"2" type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { - s.PackedPolicySize = &v - return s -} - -// SetProvider sets the Provider field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { - s.Provider = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { - s.SourceIdentity = &v - return s -} - -// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { - s.SubjectFromWebIdentityToken = &v - return s -} - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by Amazon Web Services - // when the role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { - s.Arn = &v - return s -} - -// SetAssumedRoleId sets the AssumedRoleId field's value. -func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { - s.AssumedRoleId = &v - return s -} - -// Amazon Web Services credentials for API authentication. -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Credentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *Credentials) SetAccessKeyId(v string) *Credentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Credentials) SetExpiration(v time.Time) *Credentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *Credentials) SetSecretAccessKey(v string) *Credentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *Credentials) SetSessionToken(v string) *Credentials { - s.SessionToken = &v - return s -} - -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - if s.EncodedMessage == nil { - invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncodedMessage sets the EncodedMessage field's value. -func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { - s.EncodedMessage = &v - return s -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an Amazon -// Web Services request. -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - // The API returns a response with the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SetDecodedMessage sets the DecodedMessage field's value. -func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { - s.DecodedMessage = &v - return s -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FederatedUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *FederatedUser) SetArn(v string) *FederatedUser { - s.Arn = &v - return s -} - -// SetFederatedUserId sets the FederatedUserId field's value. -func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { - s.FederatedUserId = &v - return s -} - -type GetAccessKeyInfoInput struct { - _ struct{} `type:"structure"` - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercase letter or digit. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessKeyInfoInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} - if s.AccessKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) - } - if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { - invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { - s.AccessKeyId = &v - return s -} - -type GetAccessKeyInfoOutput struct { - _ struct{} `type:"structure"` - - // The number used to identify the Amazon Web Services account. - Account *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { - s.Account = &v - return s -} - -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Web Services account ID number of the account that owns or contains - // the calling entity. - Account *string `type:"string"` - - // The Amazon Web Services ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed - // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { - s.Account = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { - s.Arn = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { - s.UserId = &v - return s -} - -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as a managed session policy. The policies must exist in the same account - // as the IAM user that is requesting federated access. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // A list of session tags. Each session tag consists of a key name and an associated - // value. For more information about session tags, see Passing Session Tags - // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters and the values can’t exceed - // 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user - // tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { - s.DurationSeconds = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { - s.Name = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { - s.PolicyArns = v - return s -} - -// SetTags sets the Tags field's value. -func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { - s.Tags = v - return s -} - -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon -// Web Services requests. -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { - s.Credentials = v - return s -} - -// SetFederatedUser sets the FederatedUser field's value. -func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { - s.FederatedUser = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { - s.PackedPolicySize = &v - return s -} - -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for Amazon Web Services account owners are restricted to a maximum of 3,600 - // seconds (one hour). If the duration is longer than one hour, the session - // for Amazon Web Services account owners defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { - s.DurationSeconds = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon -// Web Services requests. -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { - s.Credentials = v - return s -} - -// A reference to the IAM managed policy that is passed as a session policy -// for a role session or a federated user session. -type PolicyDescriptorType struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - Arn *string `locationName:"arn" min:"20" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyDescriptorType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyDescriptorType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PolicyDescriptorType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} - if s.Arn != nil && len(*s.Arn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { - s.Arn = &v - return s -} - -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go deleted file mode 100644 index d5307fcaa0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ /dev/null @@ -1,11 +0,0 @@ -package sts - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = customizeRequest -} - -func customizeRequest(r *request.Request) { - r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go deleted file mode 100644 index c40f5a2a52..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// # Using the Client -// -// To contact AWS Security Token Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go deleted file mode 100644 index b680bbd5d7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ /dev/null @@ -1,84 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the identity provider (IDP) that - // was asked to verify the incoming identity token could not be reached. This - // is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by Amazon Web - // Services. Get a new identity token from the identity provider and then retry - // the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the total packed size of the session policies - // and session tags combined was too large. An Amazon Web Services conversion - // compresses the session policy document, session policy ARNs, and session - // tags into a packed binary format that has a separate limit. The error message - // indicates by percentage how close the policies and tags are to the upper - // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // You could receive this error even though you meet other defined session policy - // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating Amazon Web Services STS in an Amazon Web Services Region - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go deleted file mode 100644 index 12327d0533..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sts" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the STS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// -// mySession := session.Must(session.NewSession()) -// -// // Create a STS client from just a session. -// svc := sts.New(mySession) -// -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = EndpointsID - // No Fallback - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { - svc := &STS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", - ResolvedRegion: resolvedRegion, - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go deleted file mode 100644 index bf06b2e7d0..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package stsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" -) - -// STSAPI provides an interface to enable mocking the -// sts.STS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } -// -// func main() { -// sess := session.New() -// svc := sts.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type STSAPI interface { - AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) - AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) - - AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) - - AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) - - DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) - - GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) - - GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) - - GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) - GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) - GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) - - GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) - GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) - GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) -} - -var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE deleted file mode 100644 index aade9a58b1..0000000000 --- a/vendor/github.com/bgentry/go-netrc/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Original version Copyright © 2010 Fazlul Shahriar . Newer -portions Copyright © 2014 Blake Gentry . - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go deleted file mode 100644 index ea49987c08..0000000000 --- a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go +++ /dev/null @@ -1,510 +0,0 @@ -package netrc - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -type tkType int - -const ( - tkMachine tkType = iota - tkDefault - tkLogin - tkPassword - tkAccount - tkMacdef - tkComment - tkWhitespace -) - -var keywords = map[string]tkType{ - "machine": tkMachine, - "default": tkDefault, - "login": tkLogin, - "password": tkPassword, - "account": tkAccount, - "macdef": tkMacdef, - "#": tkComment, -} - -type Netrc struct { - tokens []*token - machines []*Machine - macros Macros - updateLock sync.Mutex -} - -// FindMachine returns the Machine in n named by name. If a machine named by -// name exists, it is returned. If no Machine with name name is found and there -// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil -// is returned. -func (n *Netrc) FindMachine(name string) (m *Machine) { - // TODO(bgentry): not safe for concurrency - var def *Machine - for _, m = range n.machines { - if m.Name == name { - return m - } - if m.IsDefault() { - def = m - } - } - if def == nil { - return nil - } - return def -} - -// MarshalText implements the encoding.TextMarshaler interface to encode a -// Netrc into text format. -func (n *Netrc) MarshalText() (text []byte, err error) { - // TODO(bgentry): not safe for concurrency - for i := range n.tokens { - switch n.tokens[i].kind { - case tkComment, tkDefault, tkWhitespace: // always append these types - text = append(text, n.tokens[i].rawkind...) - default: - if n.tokens[i].value != "" { // skip empty-value tokens - text = append(text, n.tokens[i].rawkind...) - } - } - if n.tokens[i].kind == tkMacdef { - text = append(text, ' ') - text = append(text, n.tokens[i].macroName...) - } - text = append(text, n.tokens[i].rawvalue...) - } - return -} - -func (n *Netrc) NewMachine(name, login, password, account string) *Machine { - n.updateLock.Lock() - defer n.updateLock.Unlock() - - prefix := "\n" - if len(n.tokens) == 0 { - prefix = "" - } - m := &Machine{ - Name: name, - Login: login, - Password: password, - Account: account, - - nametoken: &token{ - kind: tkMachine, - rawkind: []byte(prefix + "machine"), - value: name, - rawvalue: []byte(" " + name), - }, - logintoken: &token{ - kind: tkLogin, - rawkind: []byte("\n\tlogin"), - value: login, - rawvalue: []byte(" " + login), - }, - passtoken: &token{ - kind: tkPassword, - rawkind: []byte("\n\tpassword"), - value: password, - rawvalue: []byte(" " + password), - }, - accounttoken: &token{ - kind: tkAccount, - rawkind: []byte("\n\taccount"), - value: account, - rawvalue: []byte(" " + account), - }, - } - n.insertMachineTokensBeforeDefault(m) - for i := range n.machines { - if n.machines[i].IsDefault() { - n.machines = append(append(n.machines[:i], m), n.machines[i:]...) - return m - } - } - n.machines = append(n.machines, m) - return m -} - -func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) { - newtokens := []*token{m.nametoken} - if m.logintoken.value != "" { - newtokens = append(newtokens, m.logintoken) - } - if m.passtoken.value != "" { - newtokens = append(newtokens, m.passtoken) - } - if m.accounttoken.value != "" { - newtokens = append(newtokens, m.accounttoken) - } - for i := range n.tokens { - if n.tokens[i].kind == tkDefault { - // found the default, now insert tokens before it - n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...) - return - } - } - // didn't find a default, just add the newtokens to the end - n.tokens = append(n.tokens, newtokens...) - return -} - -func (n *Netrc) RemoveMachine(name string) { - n.updateLock.Lock() - defer n.updateLock.Unlock() - - for i := range n.machines { - if n.machines[i] != nil && n.machines[i].Name == name { - m := n.machines[i] - for _, t := range []*token{ - m.nametoken, m.logintoken, m.passtoken, m.accounttoken, - } { - n.removeToken(t) - } - n.machines = append(n.machines[:i], n.machines[i+1:]...) - return - } - } -} - -func (n *Netrc) removeToken(t *token) { - if t != nil { - for i := range n.tokens { - if n.tokens[i] == t { - n.tokens = append(n.tokens[:i], n.tokens[i+1:]...) - return - } - } - } -} - -// Machine contains information about a remote machine. -type Machine struct { - Name string - Login string - Password string - Account string - - nametoken *token - logintoken *token - passtoken *token - accounttoken *token -} - -// IsDefault returns true if the machine is a "default" token, denoted by an -// empty name. -func (m *Machine) IsDefault() bool { - return m.Name == "" -} - -// UpdatePassword sets the password for the Machine m. -func (m *Machine) UpdatePassword(newpass string) { - m.Password = newpass - updateTokenValue(m.passtoken, newpass) -} - -// UpdateLogin sets the login for the Machine m. -func (m *Machine) UpdateLogin(newlogin string) { - m.Login = newlogin - updateTokenValue(m.logintoken, newlogin) -} - -// UpdateAccount sets the login for the Machine m. -func (m *Machine) UpdateAccount(newaccount string) { - m.Account = newaccount - updateTokenValue(m.accounttoken, newaccount) -} - -func updateTokenValue(t *token, value string) { - oldvalue := t.value - t.value = value - newraw := make([]byte, len(t.rawvalue)) - copy(newraw, t.rawvalue) - t.rawvalue = append( - bytes.TrimSuffix(newraw, []byte(oldvalue)), - []byte(value)..., - ) -} - -// Macros contains all the macro definitions in a netrc file. -type Macros map[string]string - -type token struct { - kind tkType - macroName string - value string - rawkind []byte - rawvalue []byte -} - -// Error represents a netrc file parse error. -type Error struct { - LineNum int // Line number - Msg string // Error message -} - -// Error returns a string representation of error e. -func (e *Error) Error() string { - return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg) -} - -func (e *Error) BadDefaultOrder() bool { - return e.Msg == errBadDefaultOrder -} - -const errBadDefaultOrder = "default token must appear after all machine tokens" - -// scanLinesKeepPrefix is a split function for a Scanner that returns each line -// of text. The returned token may include newlines if they are before the -// first non-space character. The returned line may be empty. The end-of-line -// marker is one optional carriage return followed by one mandatory newline. In -// regular expression notation, it is `\r?\n`. The last non-empty line of -// input will be returned even if it has no newline. -func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - // Skip leading spaces. - start := 0 - for width := 0; start < len(data); start += width { - var r rune - r, width = utf8.DecodeRune(data[start:]) - if !unicode.IsSpace(r) { - break - } - } - if i := bytes.IndexByte(data[start:], '\n'); i >= 0 { - // We have a full newline-terminated line. - return start + i, data[0 : start+i], nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -// scanWordsKeepPrefix is a split function for a Scanner that returns each -// space-separated word of text, with prefixing spaces included. It will never -// return an empty string. The definition of space is set by unicode.IsSpace. -// -// Adapted from bufio.ScanWords(). -func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { - // Skip leading spaces. - start := 0 - for width := 0; start < len(data); start += width { - var r rune - r, width = utf8.DecodeRune(data[start:]) - if !unicode.IsSpace(r) { - break - } - } - if atEOF && len(data) == 0 || start == len(data) { - return len(data), data, nil - } - if len(data) > start && data[start] == '#' { - return scanLinesKeepPrefix(data, atEOF) - } - // Scan until space, marking end of word. - for width, i := 0, start; i < len(data); i += width { - var r rune - r, width = utf8.DecodeRune(data[i:]) - if unicode.IsSpace(r) { - return i, data[:i], nil - } - } - // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. - if atEOF && len(data) > start { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -func newToken(rawb []byte) (*token, error) { - _, tkind, err := bufio.ScanWords(rawb, true) - if err != nil { - return nil, err - } - var ok bool - t := token{rawkind: rawb} - t.kind, ok = keywords[string(tkind)] - if !ok { - trimmed := strings.TrimSpace(string(tkind)) - if trimmed == "" { - t.kind = tkWhitespace // whitespace-only, should happen only at EOF - return &t, nil - } - if strings.HasPrefix(trimmed, "#") { - t.kind = tkComment // this is a comment - return &t, nil - } - return &t, fmt.Errorf("keyword expected; got " + string(tkind)) - } - return &t, nil -} - -func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { - if scanner.Scan() { - raw := scanner.Bytes() - pos += bytes.Count(raw, []byte{'\n'}) - return raw, strings.TrimSpace(string(raw)), pos, nil - } - if err := scanner.Err(); err != nil { - return nil, "", pos, &Error{pos, err.Error()} - } - return nil, "", pos, nil -} - -func parse(r io.Reader, pos int) (*Netrc, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)} - - defaultSeen := false - var currentMacro *token - var m *Machine - var t *token - scanner := bufio.NewScanner(bytes.NewReader(b)) - scanner.Split(scanTokensKeepPrefix) - - for scanner.Scan() { - rawb := scanner.Bytes() - if len(rawb) == 0 { - break - } - pos += bytes.Count(rawb, []byte{'\n'}) - t, err = newToken(rawb) - if err != nil { - if currentMacro == nil { - return nil, &Error{pos, err.Error()} - } - currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...) - continue - } - - if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) { - // if macro rawvalue + rawb would contain \n\n, then macro def is over - currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n") - nrc.macros[currentMacro.macroName] = currentMacro.value - currentMacro = nil - } - - switch t.kind { - case tkMacdef: - if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - currentMacro = t - case tkDefault: - if defaultSeen { - return nil, &Error{pos, "multiple default token"} - } - if m != nil { - nrc.machines, m = append(nrc.machines, m), nil - } - m = new(Machine) - m.Name = "" - defaultSeen = true - case tkMachine: - if defaultSeen { - return nil, &Error{pos, errBadDefaultOrder} - } - if m != nil { - nrc.machines, m = append(nrc.machines, m), nil - } - m = new(Machine) - if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Name - m.nametoken = t - case tkLogin: - if m == nil || m.Login != "" { - return nil, &Error{pos, "unexpected token login "} - } - if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Login - m.logintoken = t - case tkPassword: - if m == nil || m.Password != "" { - return nil, &Error{pos, "unexpected token password"} - } - if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Password - m.passtoken = t - case tkAccount: - if m == nil || m.Account != "" { - return nil, &Error{pos, "unexpected token account"} - } - if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil { - return nil, &Error{pos, err.Error()} - } - t.value = m.Account - m.accounttoken = t - } - - nrc.tokens = append(nrc.tokens, t) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - if m != nil { - nrc.machines, m = append(nrc.machines, m), nil - } - return &nrc, nil -} - -// ParseFile opens the file at filename and then passes its io.Reader to -// Parse(). -func ParseFile(filename string) (*Netrc, error) { - fd, err := os.Open(filename) - if err != nil { - return nil, err - } - defer fd.Close() - return Parse(fd) -} - -// Parse parses from the the Reader r as a netrc file and returns the set of -// machine information and macros defined in it. The ``default'' machine, -// which is intended to be used when no machine name matches, is identified -// by an empty machine name. There can be only one ``default'' machine. -// -// If there is a parsing error, an Error is returned. -func Parse(r io.Reader) (*Netrc, error) { - return parse(r, 1) -} - -// FindMachine parses the netrc file identified by filename and returns the -// Machine named by name. If a problem occurs parsing the file at filename, an -// error is returned. If a machine named by name exists, it is returned. If no -// Machine with name name is found and there is a ``default'' machine, the -// ``default'' machine is returned. Otherwise, nil is returned. -func FindMachine(filename, name string) (m *Machine, err error) { - n, err := ParseFile(filename) - if err != nil { - return nil, err - } - return n.FindMachine(name), nil -} diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore deleted file mode 100644 index 9e1311461e..0000000000 --- a/vendor/github.com/bgentry/speakeasy/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -example/example -example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE deleted file mode 100644 index 37d60fc354..0000000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -MIT License - -Copyright (c) 2017 Blake Gentry - -This license applies to the non-Windows portions of this library. The Windows -portion maintains its own Apache 2.0 license. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS deleted file mode 100644 index ff177f6124..0000000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [2013] [the CloudFoundry Authors] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md deleted file mode 100644 index fceda7518c..0000000000 --- a/vendor/github.com/bgentry/speakeasy/Readme.md +++ /dev/null @@ -1,30 +0,0 @@ -# Speakeasy - -This package provides cross-platform Go (#golang) helpers for taking user input -from the terminal while not echoing the input back (similar to `getpasswd`). The -package uses syscalls to avoid any dependence on cgo, and is therefore -compatible with cross-compiling. - -[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] - -## Unicode - -Multi-byte unicode characters work successfully on Mac OS X. On Windows, -however, this may be problematic (as is UTF in general on Windows). Other -platforms have not been tested. - -## License - -The code herein was not written by me, but was compiled from two separate open -source packages. Unix portions were imported from [gopass][gopass], while -Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s -[Windows terminal helpers][cf-ui-windows]. - -The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly -from the source (though I attempted to fill in the correct owner in the -boilerplate copyright notice). - -[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" -[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" -[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" -[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go deleted file mode 100644 index 71c1dd1b96..0000000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy.go +++ /dev/null @@ -1,49 +0,0 @@ -package speakeasy - -import ( - "fmt" - "io" - "os" - "strings" -) - -// Ask the user to enter a password with input hidden. prompt is a string to -// display before the user's input. Returns the provided password, or an error -// if the command failed. -func Ask(prompt string) (password string, err error) { - return FAsk(os.Stdout, prompt) -} - -// FAsk is the same as Ask, except it is possible to specify the file to write -// the prompt to. If 'nil' is passed as the writer, no prompt will be written. -func FAsk(wr io.Writer, prompt string) (password string, err error) { - if wr != nil && prompt != "" { - fmt.Fprint(wr, prompt) // Display the prompt. - } - password, err = getPassword() - - // Carriage return after the user input. - if wr != nil { - fmt.Fprintln(wr, "") - } - return -} - -func readline() (value string, err error) { - var valb []byte - var n int - b := make([]byte, 1) - for { - // read one byte at a time so we don't accidentally read extra bytes - n, err = os.Stdin.Read(b) - if err != nil && err != io.EOF { - return "", err - } - if n == 0 || b[0] == '\n' { - break - } - valb = append(valb, b[0]) - } - - return strings.TrimSuffix(string(valb), "\r"), nil -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go deleted file mode 100644 index d99fda1919..0000000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go +++ /dev/null @@ -1,93 +0,0 @@ -// based on https://code.google.com/p/gopass -// Author: johnsiilver@gmail.com (John Doak) -// -// Original code is based on code by RogerV in the golang-nuts thread: -// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package speakeasy - -import ( - "fmt" - "os" - "os/signal" - "strings" - "syscall" -) - -const sttyArg0 = "/bin/stty" - -var ( - sttyArgvEOff = []string{"stty", "-echo"} - sttyArgvEOn = []string{"stty", "echo"} -) - -// getPassword gets input hidden from the terminal from a user. This is -// accomplished by turning off terminal echo, reading input from the user and -// finally turning on terminal echo. -func getPassword() (password string, err error) { - sig := make(chan os.Signal, 10) - brk := make(chan bool) - - // File descriptors for stdin, stdout, and stderr. - fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} - - // Setup notifications of termination signals to channel sig, create a process to - // watch for these signals so we can turn back on echo if need be. - signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, - syscall.SIGTERM) - go catchSignal(fd, sig, brk) - - // Turn off the terminal echo. - pid, err := echoOff(fd) - if err != nil { - return "", err - } - - // Turn on the terminal echo and stop listening for signals. - defer signal.Stop(sig) - defer close(brk) - defer echoOn(fd) - - syscall.Wait4(pid, nil, 0, nil) - - line, err := readline() - if err == nil { - password = strings.TrimSpace(line) - } else { - err = fmt.Errorf("failed during password entry: %s", err) - } - - return password, err -} - -// echoOff turns off the terminal echo. -func echoOff(fd []uintptr) (int, error) { - pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) - if err != nil { - return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) - } - return pid, nil -} - -// echoOn turns back on the terminal echo. -func echoOn(fd []uintptr) { - // Turn on the terminal echo. - pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) - if e == nil { - syscall.Wait4(pid, nil, 0, nil) - } -} - -// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn -// terminal echo back on before the program ends. Otherwise the user is left -// with echo off on their terminal. -func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { - select { - case <-sig: - echoOn(fd) - os.Exit(-1) - case <-brk: - } -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go deleted file mode 100644 index c2093a8091..0000000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package speakeasy - -import ( - "syscall" -) - -// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx -const ENABLE_ECHO_INPUT = 0x0004 - -func getPassword() (password string, err error) { - var oldMode uint32 - - err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) - if err != nil { - return - } - - var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) - - err = setConsoleMode(syscall.Stdin, newMode) - defer setConsoleMode(syscall.Stdin, oldMode) - if err != nil { - return - } - - return readline() -} - -func setConsoleMode(console syscall.Handle, mode uint32) (err error) { - dll := syscall.MustLoadDLL("kernel32") - proc := dll.MustFindProc("SetConsoleMode") - r, _, err := proc.Call(uintptr(console), uintptr(mode)) - - if r == 0 { - return err - } - return nil -} diff --git a/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go b/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go index 9b2801352d..eaf408d6f3 100644 --- a/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go +++ b/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go @@ -9,13 +9,22 @@ import ( "golang.org/x/tools/go/analysis" ) +//nolint:gochecknoglobals +var flagSet flag.FlagSet + +//nolint:gochecknoglobals var ( - flagSet flag.FlagSet + maxComplexity int + packageAverage float64 + skipTests bool ) -var maxComplexity int -var packageAverage float64 -var skipTests bool +//nolint:gochecknoinits +func init() { + flagSet.IntVar(&maxComplexity, "maxComplexity", 10, "max complexity the function can have") + flagSet.Float64Var(&packageAverage, "packageAverage", 0, "max average complexity in package") + flagSet.BoolVar(&skipTests, "skipTests", false, "should the linter execute on test files as well") +} func NewAnalyzer() *analysis.Analyzer { return &analysis.Analyzer{ @@ -26,12 +35,6 @@ func NewAnalyzer() *analysis.Analyzer { } } -func init() { - flagSet.IntVar(&maxComplexity, "maxComplexity", 10, "max complexity the function can have") - flagSet.Float64Var(&packageAverage, "packageAverage", 0, "max avarage complexity in package") - flagSet.BoolVar(&skipTests, "skipTests", false, "should the linter execute on test files as well") -} - func run(pass *analysis.Pass) (interface{}, error) { var sum, count float64 var pkgName string @@ -70,7 +73,7 @@ func run(pass *analysis.Pass) (interface{}, error) { if packageAverage > 0 { avg := sum / count if avg > packageAverage { - pass.Reportf(pkgPos, "the avarage complexity for the package %s is %f, max is %f", pkgName, avg, packageAverage) + pass.Reportf(pkgPos, "the average complexity for the package %s is %f, max is %f", pkgName, avg, packageAverage) } } diff --git a/vendor/github.com/blizzy78/varnamelen/.editorconfig b/vendor/github.com/blizzy78/varnamelen/.editorconfig new file mode 100644 index 0000000000..7b64615455 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[**] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[**/*.yml] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/blizzy78/varnamelen/.gitignore b/vendor/github.com/blizzy78/varnamelen/.gitignore new file mode 100644 index 0000000000..1a4a71669f --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/.gitignore @@ -0,0 +1 @@ +/cmd/__debug_bin diff --git a/vendor/github.com/blizzy78/varnamelen/.golangci.yml b/vendor/github.com/blizzy78/varnamelen/.golangci.yml new file mode 100644 index 0000000000..566572363c --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/.golangci.yml @@ -0,0 +1,70 @@ +# https://github.com/golangci/golangci-lint/issues/456#issuecomment-617470264 +issues: + exclude-use-default: false + exclude: + # errcheck: Almost all programs ignore errors on these functions and in most cases it's ok + - Error return value of .((os\.)?std(out|err)\..*|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # golint: False positive when tests are defined in package 'test' + - func name will be used as test\.Test.* by other packages, and that stutters; consider calling this + # gosec: Duplicated errcheck checks + - G104 + # gosec: Too many issues in popular repos + - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) + # gosec: False positive is triggered by 'src, err := ioutil.ReadFile(filename)' + - Potential file inclusion via variable + +linters: + enable: + - asciicheck + - bodyclose + - cyclop + - durationcheck + - errname + - errorlint + - exportloopref + - forcetypeassert + - gocognit + - gocritic + - goerr113 + - gofmt + - goprintffuncname + - gosec + - ifshort + - nakedret + - nestif + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - promlinter + - revive + - rowserrcheck + - sqlclosecheck + - stylecheck + - thelper + - tparallel + - unconvert + - unparam + - varnamelen + - wastedassign + - wrapcheck + - wsl + +linters-settings: + gocognit: + min-complexity: 15 + nakedret: + max-func-lines: 0 + nolintlint: + allow-unused: false + allow-leading-space: false + require-explanation: true + require-specific: true + unused: + go: 1.16 + varnamelen: + check-return: true + ignore-type-assert-ok: true + ignore-map-index-ok: true + ignore-chan-recv-ok: true diff --git a/vendor/github.com/blizzy78/varnamelen/LICENSE b/vendor/github.com/blizzy78/varnamelen/LICENSE new file mode 100644 index 0000000000..c45156a2c3 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/LICENSE @@ -0,0 +1,18 @@ +Copyright 2021-2022 Maik Schreiber + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/blizzy78/varnamelen/README.md b/vendor/github.com/blizzy78/varnamelen/README.md new file mode 100644 index 0000000000..02131ff298 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/README.md @@ -0,0 +1,155 @@ +[![GoDoc](https://pkg.go.dev/badge/github.com/blizzy78/varnamelen)](https://pkg.go.dev/github.com/blizzy78/varnamelen) + + +varnamelen +========== + +A Go Analyzer that checks that the length of a variable's name matches its usage scope: + +Variables with short names can be hard to use if the variable is used over a longer span of lines of code. +A longer variable name may be easier to comprehend. + +The analyzer also checks method receiver names, named return values, and type parameter names. + +Arbitrary declarations such as `f *foo` can be ignored, as well as idiomatic `ok` variables. +Conventional Go parameters such as `ctx context.Context` or `t *testing.T` will always be ignored. + +**Example output** + +``` +test.go:4:2: variable name 'x' is too short for the scope of its usage (varnamelen) + x := 123 + ^ +test.go:6:2: variable name 'i' is too short for the scope of its usage (varnamelen) + i := 10 + ^ +``` + + +golangci-lint Integration +------------------------- + +varnamelen is integrated into [golangci-lint] (though it may not always be the most recent version.) + +Example configuration for golangci-lint: + +```yaml +linters-settings: + varnamelen: + # The longest distance, in source lines, that is being considered a "small scope." (defaults to 5) + # Variables used in at most this many lines will be ignored. + max-distance: 5 + # The minimum length of a variable's name that is considered "long." (defaults to 3) + # Variable names that are at least this long will be ignored. + min-name-length: 3 + # Check method receiver. (defaults to false) + check-receiver: false + # Check named return values. (defaults to false) + check-return: false + # Check type parameters. (defaults to false) + check-type-param: false + # Ignore "ok" variables that hold the bool return value of a type assertion. (defaults to false) + ignore-type-assert-ok: false + # Ignore "ok" variables that hold the bool return value of a map index. (defaults to false) + ignore-map-index-ok: false + # Ignore "ok" variables that hold the bool return value of a channel receive. (defaults to false) + ignore-chan-recv-ok: false + # Optional list of variable names that should be ignored completely. (defaults to empty list) + ignore-names: + - err + # Optional list of variable declarations that should be ignored completely. (defaults to empty list) + # Entries must be in one of the following forms (see below for examples): + # - for variables, parameters, or named return values: + # - + # - * + # - for type parameters: + # - + # - for constants: + # - const + ignore-decls: + - c echo.Context + - t testing.T + - f *foo.Bar + - e error + - i int + - const C + - T any +``` + + +Standalone Usage +---------------- + +The `cmd/` folder provides a standalone command line utility. You can build it like this: + +``` +go build -o varnamelen ./cmd/ +``` + +**Usage** + +``` +varnamelen: checks that the length of a variable's name matches its scope + +Usage: varnamelen [-flag] [package] + +A variable with a short name can be hard to use if the variable is used +over a longer span of lines of code. A longer variable name may be easier +to comprehend. + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -checkReceiver + check method receiver names + -checkReturn + check named return values + -checkTypeParam + check type parameter names + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -ignoreChanRecvOk + ignore 'ok' variables that hold the bool return value of a channel receive + -ignoreDecls value + comma-separated list of ignored variable declarations + -ignoreMapIndexOk + ignore 'ok' variables that hold the bool return value of a map index + -ignoreNames value + comma-separated list of ignored variable names + -ignoreTypeAssertOk + ignore 'ok' variables that hold the bool return value of a type assertion + -json + emit JSON output + -maxDistance int + maximum number of lines of variable usage scope considered 'short' (default 5) + -memprofile string + write memory profile to this file + -minNameLength int + minimum length of variable name considered 'long' (default 3) + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + + +License +------- + +This package is licensed under the MIT license. + + + +[golangci-lint]: https://github.com/golangci/golangci-lint diff --git a/vendor/github.com/blizzy78/varnamelen/doc.go b/vendor/github.com/blizzy78/varnamelen/doc.go new file mode 100644 index 0000000000..d63c71cfb7 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/doc.go @@ -0,0 +1,3 @@ +// Package varnamelen implements an analyzer checking that the length of a variable's name +// matches its usage scope. +package varnamelen diff --git a/vendor/github.com/blizzy78/varnamelen/flags.go b/vendor/github.com/blizzy78/varnamelen/flags.go new file mode 100644 index 0000000000..ee80774f96 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/flags.go @@ -0,0 +1,109 @@ +package varnamelen + +import "strings" + +// stringsValue is the value of a list-of-strings flag. +type stringsValue struct { + Values []string +} + +// declarationsValue is the value of a list-of-declarations flag. +type declarationsValue struct { + Values []declaration +} + +// Set implements Value. +func (sv *stringsValue) Set(values string) error { + if strings.TrimSpace(values) == "" { + sv.Values = nil + return nil + } + + parts := strings.Split(values, ",") + + sv.Values = make([]string, len(parts)) + + for i, part := range parts { + sv.Values[i] = strings.TrimSpace(part) + } + + return nil +} + +// String implements Value. +func (sv *stringsValue) String() string { + return strings.Join(sv.Values, ",") +} + +// contains returns true if sv contains s. +func (sv *stringsValue) contains(s string) bool { + for _, v := range sv.Values { + if v == s { + return true + } + } + + return false +} + +// Set implements Value. +func (dv *declarationsValue) Set(values string) error { + if strings.TrimSpace(values) == "" { + dv.Values = nil + return nil + } + + parts := strings.Split(values, ",") + + dv.Values = make([]declaration, len(parts)) + + for idx, part := range parts { + dv.Values[idx] = parseDeclaration(strings.TrimSpace(part)) + } + + return nil +} + +// String implements Value. +func (dv *declarationsValue) String() string { + parts := make([]string, len(dv.Values)) + + for idx, val := range dv.Values { + parts[idx] = val.name + " " + val.typ + } + + return strings.Join(parts, ",") +} + +// matchVariable returns true if vari matches any of the declarations in dv. +func (dv *declarationsValue) matchVariable(vari variable) bool { + for _, decl := range dv.Values { + if vari.match(decl) { + return true + } + } + + return false +} + +// matchParameter returns true if param matches any of the declarations in dv. +func (dv *declarationsValue) matchParameter(param parameter) bool { + for _, decl := range dv.Values { + if param.match(decl) { + return true + } + } + + return false +} + +// matchParameter returns true if param matches any of the declarations in dv. +func (dv *declarationsValue) matchTypeParameter(param typeParam) bool { + for _, decl := range dv.Values { + if param.match(decl) { + return true + } + } + + return false +} diff --git a/vendor/github.com/blizzy78/varnamelen/typeparam.go b/vendor/github.com/blizzy78/varnamelen/typeparam.go new file mode 100644 index 0000000000..a1f3de99a3 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/typeparam.go @@ -0,0 +1,35 @@ +//go:build go1.18 +// +build go1.18 + +package varnamelen + +import "go/ast" + +// isTypeParam returns true if field is a type parameter of any of the given funcs. +func isTypeParam(field *ast.Field, funcs []*ast.FuncDecl, funcLits []*ast.FuncLit) bool { //nolint:gocognit // it's not that complicated + for _, f := range funcs { + if f.Type.TypeParams == nil { + continue + } + + for _, p := range f.Type.TypeParams.List { + if p == field { + return true + } + } + } + + for _, f := range funcLits { + if f.Type.TypeParams == nil { + continue + } + + for _, p := range f.Type.TypeParams.List { + if p == field { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/blizzy78/varnamelen/typeparam_go1.16.go b/vendor/github.com/blizzy78/varnamelen/typeparam_go1.16.go new file mode 100644 index 0000000000..7856651b90 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/typeparam_go1.16.go @@ -0,0 +1,11 @@ +//go:build (go1.16 && !go1.18) || (go1.17 && !go1.18) +// +build go1.16,!go1.18 go1.17,!go1.18 + +package varnamelen + +import "go/ast" + +// isTypeParam returns true if field is a type parameter of any of the given funcs. +func isTypeParam(_ *ast.Field, _ []*ast.FuncDecl, _ []*ast.FuncLit) bool { + return false +} diff --git a/vendor/github.com/blizzy78/varnamelen/varnamelen.code-workspace b/vendor/github.com/blizzy78/varnamelen/varnamelen.code-workspace new file mode 100644 index 0000000000..68c485c968 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/varnamelen.code-workspace @@ -0,0 +1,13 @@ +{ + "folders": [ + { + "path": "." + } + ], + "extensions": { + "recommendations": [ + "EditorConfig.EditorConfig", + "golang.go" + ] + } +} diff --git a/vendor/github.com/blizzy78/varnamelen/varnamelen.go b/vendor/github.com/blizzy78/varnamelen/varnamelen.go new file mode 100644 index 0000000000..a5b9603114 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/varnamelen.go @@ -0,0 +1,891 @@ +package varnamelen + +import ( + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// varNameLen is an analyzer that checks that the length of a variable's name matches its usage scope. +// It will create a report for a variable's assignment if that variable has a short name, but its +// usage scope is not considered "small." +type varNameLen struct { + // maxDistance is the longest distance, in source lines, that is being considered a "small scope." + maxDistance int + + // minNameLength is the minimum length of a variable's name that is considered "long." + minNameLength int + + // ignoreNames is an optional list of variable names that should be ignored completely. + ignoreNames stringsValue + + // checkReceiver determines whether method receivers should be checked. + checkReceiver bool + + // checkReturn determines whether named return values should be checked. + checkReturn bool + + // ignoreTypeAssertOk determines whether "ok" variables that hold the bool return value of a type assertion should be ignored. + ignoreTypeAssertOk bool + + // ignoreMapIndexOk determines whether "ok" variables that hold the bool return value of a map index should be ignored. + ignoreMapIndexOk bool + + // ignoreChannelReceiveOk determines whether "ok" variables that hold the bool return value of a channel receive should be ignored. + ignoreChannelReceiveOk bool + + // ignoreDeclarations is an optional list of variable declarations that should be ignored completely. + ignoreDeclarations declarationsValue + + // checkTypeParameters determines whether type parameters should be checked. + checkTypeParameters bool +} + +// variable represents a declared variable. +type variable struct { + // name is the name of the variable. + name string + + // constant is true if the variable is actually a constant. + constant bool + + // typ is the type of the variable. + typ string + + // assign is the assign statement that declares the variable. + assign *ast.AssignStmt + + // valueSpec is the value specification that declares the variable. + valueSpec *ast.ValueSpec +} + +// parameter represents a declared function or method parameter. +type parameter struct { + // name is the name of the parameter. + name string + + // typ is the type of the parameter. + typ string + + // field is the declaration of the parameter. + field *ast.Field +} + +// typeParam represents a declared type parameter. +type typeParam struct { + // name is the name of the type parameter. + name string + + // typ is the type of the type parameter. + typ string + + // field is the field that declares the type parameter. + field *ast.Field +} + +// declaration is a variable declaration. +type declaration struct { + // name is the name of the variable. + name string + + // constant is true if the variable is actually a constant. + constant bool + + // typ is the type of the variable. Not used for constants. + typ string +} + +// importDeclaration is an import declaration. +type importDeclaration struct { + // name is the short name or alias for the imported package. This is either the package's default name, + // or the alias specified in the import statement. + // Not used if self is true. + name string + + // path is the full path to the imported package. + path string + + // self is true when this is an implicit import declaration for the current package. + self bool +} + +const ( + // defaultMaxDistance is the default value for the maximum distance between the declaration of a variable and its usage + // that is considered a "small scope." + defaultMaxDistance = 5 + + // defaultMinNameLength is the default value for the minimum length of a variable's name that is considered "long." + defaultMinNameLength = 3 +) + +// conventionalDecls is a list of conventional variable declarations. +var conventionalDecls = []declaration{ + parseDeclaration("ctx context.Context"), + + parseDeclaration("b *testing.B"), + parseDeclaration("f *testing.F"), + parseDeclaration("m *testing.M"), + parseDeclaration("pb *testing.PB"), + parseDeclaration("t *testing.T"), + parseDeclaration("tb testing.TB"), +} + +// NewAnalyzer returns a new analyzer. +func NewAnalyzer() *analysis.Analyzer { + vnl := varNameLen{ + maxDistance: defaultMaxDistance, + minNameLength: defaultMinNameLength, + ignoreNames: stringsValue{}, + ignoreDeclarations: declarationsValue{}, + } + + analyzer := analysis.Analyzer{ + Name: "varnamelen", + Doc: "checks that the length of a variable's name matches its scope\n\n" + + "A variable with a short name can be hard to use if the variable is used\n" + + "over a longer span of lines of code. A longer variable name may be easier\n" + + "to comprehend.", + + Run: func(pass *analysis.Pass) (interface{}, error) { + (&vnl).run(pass) + return nil, nil + }, + + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + } + + analyzer.Flags.IntVar(&vnl.maxDistance, "maxDistance", defaultMaxDistance, "maximum number of lines of variable usage scope considered 'short'") + analyzer.Flags.IntVar(&vnl.minNameLength, "minNameLength", defaultMinNameLength, "minimum length of variable name considered 'long'") + analyzer.Flags.Var(&vnl.ignoreNames, "ignoreNames", "comma-separated list of ignored variable names") + analyzer.Flags.BoolVar(&vnl.checkReceiver, "checkReceiver", false, "check method receivers") + analyzer.Flags.BoolVar(&vnl.checkReturn, "checkReturn", false, "check named return values") + analyzer.Flags.BoolVar(&vnl.ignoreTypeAssertOk, "ignoreTypeAssertOk", false, "ignore 'ok' variables that hold the bool return value of a type assertion") + analyzer.Flags.BoolVar(&vnl.ignoreMapIndexOk, "ignoreMapIndexOk", false, "ignore 'ok' variables that hold the bool return value of a map index") + analyzer.Flags.BoolVar(&vnl.ignoreChannelReceiveOk, "ignoreChanRecvOk", false, "ignore 'ok' variables that hold the bool return value of a channel receive") + analyzer.Flags.Var(&vnl.ignoreDeclarations, "ignoreDecls", "comma-separated list of ignored variable declarations") + analyzer.Flags.BoolVar(&vnl.checkTypeParameters, "checkTypeParam", false, "check type parameters") + + return &analyzer +} + +// Run applies v to a package, according to pass. +func (v *varNameLen) run(pass *analysis.Pass) { + varToDist, paramToDist, returnToDist, typeParamToDist := v.distances(pass) + + v.checkVariables(pass, varToDist) + v.checkParams(pass, paramToDist) + v.checkReturns(pass, returnToDist) + v.checkTypeParams(pass, typeParamToDist) +} + +// checkVariables applies v to variables in varToDist. +func (v *varNameLen) checkVariables(pass *analysis.Pass, varToDist map[variable]int) { //nolint:gocognit // it's not that complicated + for variable, dist := range varToDist { + if v.ignoreNames.contains(variable.name) { + continue + } + + if v.ignoreDeclarations.matchVariable(variable) { + continue + } + + if v.checkNameAndDistance(variable.name, dist) { + continue + } + + if v.checkTypeAssertOk(variable) { + continue + } + + if v.checkMapIndexOk(variable) { + continue + } + + if v.checkChannelReceiveOk(variable) { + continue + } + + if variable.isConventional() { + continue + } + + if variable.assign != nil { + pass.Reportf(variable.assign.Pos(), "%s name '%s' is too short for the scope of its usage", variable.kindName(), variable.name) + continue + } + + pass.Reportf(variable.valueSpec.Pos(), "%s name '%s' is too short for the scope of its usage", variable.kindName(), variable.name) + } +} + +// checkParams applies v to parameters in paramToDist. +func (v *varNameLen) checkParams(pass *analysis.Pass, paramToDist map[parameter]int) { + for param, dist := range paramToDist { + if v.ignoreNames.contains(param.name) { + continue + } + + if v.ignoreDeclarations.matchParameter(param) { + continue + } + + if v.checkNameAndDistance(param.name, dist) { + continue + } + + if param.isConventional() { + continue + } + + pass.Reportf(param.field.Pos(), "parameter name '%s' is too short for the scope of its usage", param.name) + } +} + +// checkReturns applies v to named return values in returnToDist. +func (v *varNameLen) checkReturns(pass *analysis.Pass, returnToDist map[parameter]int) { + for returnValue, dist := range returnToDist { + if v.ignoreNames.contains(returnValue.name) { + continue + } + + if v.ignoreDeclarations.matchParameter(returnValue) { + continue + } + + if v.checkNameAndDistance(returnValue.name, dist) { + continue + } + + pass.Reportf(returnValue.field.Pos(), "return value name '%s' is too short for the scope of its usage", returnValue.name) + } +} + +// checkTypeParams applies v to type parameters in paramToDist. +func (v *varNameLen) checkTypeParams(pass *analysis.Pass, paramToDist map[typeParam]int) { + for param, dist := range paramToDist { + if v.ignoreNames.contains(param.name) { + continue + } + + if v.ignoreDeclarations.matchTypeParameter(param) { + continue + } + + if v.checkNameAndDistance(param.name, dist) { + continue + } + + pass.Reportf(param.field.Pos(), "type parameter name '%s' is too short for the scope of its usage", param.name) + } +} + +// checkNameAndDistance returns true if name or dist are considered "short". +func (v *varNameLen) checkNameAndDistance(name string, dist int) bool { + if len(name) >= v.minNameLength { + return true + } + + if dist <= v.maxDistance { + return true + } + + return false +} + +// checkTypeAssertOk returns true if "ok" variables that hold the bool return value of a type assertion +// should be ignored, and if vari is such a variable. +func (v *varNameLen) checkTypeAssertOk(vari variable) bool { + return v.ignoreTypeAssertOk && vari.isTypeAssertOk() +} + +// checkMapIndexOk returns true if "ok" variables that hold the bool return value of a map index +// should be ignored, and if vari is such a variable. +func (v *varNameLen) checkMapIndexOk(vari variable) bool { + return v.ignoreMapIndexOk && vari.isMapIndexOk() +} + +// checkChannelReceiveOk returns true if "ok" variables that hold the bool return value of a channel receive +// should be ignored, and if vari is such a variable. +func (v *varNameLen) checkChannelReceiveOk(vari variable) bool { + return v.ignoreChannelReceiveOk && vari.isChannelReceiveOk() +} + +// distances returns maps of variables, parameters, return values, and type parameters mapping to their longest usage distances. +func (v *varNameLen) distances(pass *analysis.Pass) (map[variable]int, map[parameter]int, map[parameter]int, map[typeParam]int) { + assignIdents, valueSpecIdents, paramIdents, returnIdents, typeParamIdents, imports, switches := v.identsAndImports(pass) + + varToDist := map[variable]int{} + + for _, ident := range assignIdents { + assign := ident.Obj.Decl.(*ast.AssignStmt) //nolint:forcetypeassert // check is done in identsAndImports + + var typ string + if isTypeSwitchAssign(assign, switches) { + typ = "" + } else { + typ = shortTypeName(pass.TypesInfo.TypeOf(ident), imports) + } + + variable := variable{ + name: ident.Name, + typ: typ, + assign: assign, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(assign.Pos()).Line + varToDist[variable] = useLine - declLine + } + + for _, ident := range valueSpecIdents { + valueSpec := ident.Obj.Decl.(*ast.ValueSpec) //nolint:forcetypeassert // check is done in identsAndImports + + variable := variable{ + name: ident.Name, + constant: ident.Obj.Kind == ast.Con, + typ: shortTypeName(pass.TypesInfo.TypeOf(ident), imports), + valueSpec: valueSpec, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(valueSpec.Pos()).Line + varToDist[variable] = useLine - declLine + } + + paramToDist := map[parameter]int{} + + for _, ident := range paramIdents { + field := ident.Obj.Decl.(*ast.Field) //nolint:forcetypeassert // check is done in identsAndImports + + param := parameter{ + name: ident.Name, + typ: shortTypeName(pass.TypesInfo.TypeOf(field.Type), imports), + field: field, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(field.Pos()).Line + paramToDist[param] = useLine - declLine + } + + returnToDist := map[parameter]int{} + + for _, ident := range returnIdents { + field := ident.Obj.Decl.(*ast.Field) //nolint:forcetypeassert // check is done in identsAndImports + + param := parameter{ + name: ident.Name, + typ: shortTypeName(pass.TypesInfo.TypeOf(ident), imports), + field: field, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(field.Pos()).Line + returnToDist[param] = useLine - declLine + } + + typeParamToDist := map[typeParam]int{} + + for _, ident := range typeParamIdents { + field := ident.Obj.Decl.(*ast.Field) //nolint:forcetypeassert // check is done in identsAndImports + + param := typeParam{ + name: ident.Name, + typ: shortTypeName(pass.TypesInfo.TypeOf(field.Type), imports), + field: field, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(field.Pos()).Line + typeParamToDist[param] = useLine - declLine + } + + return varToDist, paramToDist, returnToDist, typeParamToDist +} + +// identsAndImports returns Idents referencing assign statements, value specifications, parameters, +// return values, and type parameters, respectively, as well as import declarations, and type switch statements. +func (v *varNameLen) identsAndImports(pass *analysis.Pass) ([]*ast.Ident, []*ast.Ident, []*ast.Ident, []*ast.Ident, //nolint:gocognit,cyclop // this is complex stuff + []*ast.Ident, []importDeclaration, []*ast.TypeSwitchStmt) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) //nolint:forcetypeassert // inspect.Analyzer always returns *inspector.Inspector + + filter := []ast.Node{ + (*ast.ImportSpec)(nil), + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.CompositeLit)(nil), + (*ast.TypeSwitchStmt)(nil), + (*ast.Ident)(nil), + } + + assignIdents := []*ast.Ident{} + valueSpecIdents := []*ast.Ident{} + paramIdents := []*ast.Ident{} + returnIdents := []*ast.Ident{} + typeParamIdents := []*ast.Ident{} + imports := []importDeclaration{} + switches := []*ast.TypeSwitchStmt{} + + funcs := []*ast.FuncDecl{} + methods := []*ast.FuncDecl{} + funcLits := []*ast.FuncLit{} + compositeLits := []*ast.CompositeLit{} + + inspector.Preorder(filter, func(node ast.Node) { + switch node2 := node.(type) { + case *ast.ImportSpec: + decl, ok := importSpecToDecl(node2, pass.Pkg.Imports()) + if !ok { + return + } + + imports = append(imports, decl) + + case *ast.FuncDecl: + funcs = append(funcs, node2) + + if node2.Recv == nil { + return + } + + methods = append(methods, node2) + + case *ast.FuncLit: + funcLits = append(funcLits, node2) + + case *ast.CompositeLit: + compositeLits = append(compositeLits, node2) + + case *ast.TypeSwitchStmt: + switches = append(switches, node2) + + case *ast.Ident: + if node2.Obj == nil { + return + } + + if isCompositeLitKey(node2, compositeLits) { + return + } + + switch objDecl := node2.Obj.Decl.(type) { + case *ast.AssignStmt: + assignIdents = append(assignIdents, node2) + + case *ast.ValueSpec: + valueSpecIdents = append(valueSpecIdents, node2) + + case *ast.Field: + switch { + case isReceiver(objDecl, methods): + if !v.checkReceiver { + return + } + + paramIdents = append(paramIdents, node2) + + case isReturn(objDecl, funcs, funcLits): + if !v.checkReturn { + return + } + + returnIdents = append(returnIdents, node2) + + case isTypeParam(objDecl, funcs, funcLits): + if !v.checkTypeParameters { + return + } + + typeParamIdents = append(typeParamIdents, node2) + + case isParam(objDecl, funcs, funcLits, methods): + paramIdents = append(paramIdents, node2) + } + } + } + }) + + imports = append(imports, importDeclaration{ + path: pass.Pkg.Path(), + self: true, + }) + + sort.Slice(imports, func(a, b int) bool { + // reversed: longest path first + return len(imports[a].path) > len(imports[b].path) + }) + + return assignIdents, valueSpecIdents, paramIdents, returnIdents, typeParamIdents, imports, switches +} + +func importSpecToDecl(spec *ast.ImportSpec, imports []*types.Package) (importDeclaration, bool) { + path := strings.TrimSuffix(strings.TrimPrefix(spec.Path.Value, "\""), "\"") + + if spec.Name != nil { + return importDeclaration{ + name: spec.Name.Name, + path: path, + }, true + } + + for _, imp := range imports { + if imp.Path() == path { + return importDeclaration{ + name: imp.Name(), + path: path, + }, true + } + } + + return importDeclaration{}, false +} + +// isTypeAssertOk returns true if v is an "ok" variable that holds the bool return value of a type assertion. +func (v variable) isTypeAssertOk() bool { + if v.name != "ok" { + return false + } + + if v.assign == nil { + return false + } + + if len(v.assign.Lhs) != 2 { + return false + } + + ident, ok := v.assign.Lhs[1].(*ast.Ident) + if !ok { + return false + } + + if ident.Name != "ok" { + return false + } + + if len(v.assign.Rhs) != 1 { + return false + } + + if _, ok := v.assign.Rhs[0].(*ast.TypeAssertExpr); !ok { + return false + } + + return true +} + +// isMapIndexOk returns true if v is an "ok" variable that holds the bool return value of a map index. +func (v variable) isMapIndexOk() bool { + if v.name != "ok" { + return false + } + + if v.assign == nil { + return false + } + + if len(v.assign.Lhs) != 2 { + return false + } + + ident, ok := v.assign.Lhs[1].(*ast.Ident) + if !ok { + return false + } + + if ident.Name != "ok" { + return false + } + + if len(v.assign.Rhs) != 1 { + return false + } + + if _, ok := v.assign.Rhs[0].(*ast.IndexExpr); !ok { + return false + } + + return true +} + +// isChannelReceiveOk returns true if v is an "ok" variable that holds the bool return value of a channel receive. +func (v variable) isChannelReceiveOk() bool { + if v.name != "ok" { + return false + } + + if v.assign == nil { + return false + } + + if len(v.assign.Lhs) != 2 { + return false + } + + ident, ok := v.assign.Lhs[1].(*ast.Ident) + if !ok { + return false + } + + if ident.Name != "ok" { + return false + } + + if len(v.assign.Rhs) != 1 { + return false + } + + unary, ok := v.assign.Rhs[0].(*ast.UnaryExpr) + if !ok { + return false + } + + if unary.Op != token.ARROW { + return false + } + + return true +} + +// isConventional returns true if v matches a conventional Go variable/parameter name and type, +// such as "ctx context.Context" or "t *testing.T". +func (v variable) isConventional() bool { + for _, decl := range conventionalDecls { + if v.match(decl) { + return true + } + } + + return false +} + +// match returns true if v matches decl. +func (v variable) match(decl declaration) bool { + if v.name != decl.name { + return false + } + + if v.constant != decl.constant { + return false + } + + if v.constant { + return true + } + + if v.typ == "" { + return false + } + + return decl.matchType(v.typ) +} + +// kindName returns "constant" if v.constant==true, else "variable". +func (v variable) kindName() string { + if v.constant { + return "constant" + } + + return "variable" +} + +// isReceiver returns true if field is a receiver parameter of any of the given methods. +func isReceiver(field *ast.Field, methods []*ast.FuncDecl) bool { + for _, m := range methods { + for _, recv := range m.Recv.List { + if recv == field { + return true + } + } + } + + return false +} + +// isReturn returns true if field is a return value of any of the given funcs. +func isReturn(field *ast.Field, funcs []*ast.FuncDecl, funcLits []*ast.FuncLit) bool { //nolint:gocognit // it's not that complicated + for _, f := range funcs { + if f.Type.Results == nil { + continue + } + + for _, r := range f.Type.Results.List { + if r == field { + return true + } + } + } + + for _, f := range funcLits { + if f.Type.Results == nil { + continue + } + + for _, r := range f.Type.Results.List { + if r == field { + return true + } + } + } + + return false +} + +// isParam returns true if field is a parameter of any of the given funcs. +func isParam(field *ast.Field, funcs []*ast.FuncDecl, funcLits []*ast.FuncLit, methods []*ast.FuncDecl) bool { //nolint:gocognit,cyclop // it's not that complicated + for _, f := range funcs { + if f.Type.Params == nil { + continue + } + + for _, p := range f.Type.Params.List { + if p == field { + return true + } + } + } + + for _, f := range funcLits { + if f.Type.Params == nil { + continue + } + + for _, p := range f.Type.Params.List { + if p == field { + return true + } + } + } + + for _, m := range methods { + if m.Type.Params == nil { + continue + } + + for _, p := range m.Type.Params.List { + if p == field { + return true + } + } + } + + return false +} + +// isCompositeLitKey returns true if ident is a key of any of the given composite literals. +func isCompositeLitKey(ident *ast.Ident, compositeLits []*ast.CompositeLit) bool { + for _, cl := range compositeLits { + if _, ok := cl.Type.(*ast.MapType); ok { + continue + } + + for _, kvExpr := range cl.Elts { + kv, ok := kvExpr.(*ast.KeyValueExpr) + if !ok { + continue + } + + if kv.Key == ident { + return true + } + } + } + + return false +} + +// isTypeSwitchAssign returns true if assign is an assign statement of any of the given type switch statements. +func isTypeSwitchAssign(assign *ast.AssignStmt, switches []*ast.TypeSwitchStmt) bool { + for _, s := range switches { + if s.Assign == assign { + return true + } + } + + return false +} + +// isConventional returns true if v matches a conventional Go variable/parameter name and type, +// such as "ctx context.Context" or "t *testing.T". +func (p parameter) isConventional() bool { + for _, decl := range conventionalDecls { + if p.match(decl) { + return true + } + } + + return false +} + +// match returns whether p matches decl. +func (p parameter) match(decl declaration) bool { + if p.name != decl.name { + return false + } + + return decl.matchType(p.typ) +} + +// match returns whether p matches decl. +func (p typeParam) match(decl declaration) bool { + if p.name != decl.name { + return false + } + + return decl.matchType(p.typ) +} + +// parseDeclaration parses and returns a variable declaration parsed from decl. +func parseDeclaration(decl string) declaration { + if strings.HasPrefix(decl, "const ") { + return declaration{ + name: strings.TrimPrefix(decl, "const "), + constant: true, + } + } + + parts := strings.SplitN(decl, " ", 2) + + return declaration{ + name: parts[0], + typ: parts[1], + } +} + +// matchType returns true if typ matches d.typ. +func (d declaration) matchType(typ string) bool { + return d.typ == typ +} + +// shortTypeName returns the short name of typ, with respect to imports. +// For example, if package github.com/matryer/is is imported with alias "x", +// and typ represents []*github.com/matryer/is.I, shortTypeName will return "[]*x.I". +// For imports without aliases, the package's default name will be used. +func shortTypeName(typ types.Type, imports []importDeclaration) string { + if typ == nil { + return "" + } + + typStr := typ.String() + + for _, imp := range imports { + prefix := imp.path + "." + + replace := "" + if !imp.self { + replace = imp.name + "." + } + + typStr = strings.ReplaceAll(typStr, prefix, replace) + } + + return typStr +} diff --git a/vendor/github.com/bombsimon/wsl/v3/.travis.yml b/vendor/github.com/bombsimon/wsl/v3/.travis.yml deleted file mode 100644 index 5e2e26ed1c..0000000000 --- a/vendor/github.com/bombsimon/wsl/v3/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -language: go - -go: - - 1.13.x - - 1.12.x - - 1.11.x - -env: - global: - - GO111MODULE=on - -install: - - go get -v golang.org/x/tools/cmd/cover github.com/mattn/goveralls - -script: - - go test -v -covermode=count -coverprofile=coverage.out - -after_script: - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci - -notifications: - email: false - -# vim: set ts=2 sw=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v3/README.md b/vendor/github.com/bombsimon/wsl/v3/README.md deleted file mode 100644 index 9812f94a79..0000000000 --- a/vendor/github.com/bombsimon/wsl/v3/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# WSL - Whitespace Linter - -[![forthebadge](https://forthebadge.com/images/badges/made-with-go.svg)](https://forthebadge.com) -[![forthebadge](https://forthebadge.com/images/badges/built-with-love.svg)](https://forthebadge.com) - -[![Build Status](https://travis-ci.org/bombsimon/wsl.svg?branch=master)](https://travis-ci.org/bombsimon/wsl) -[![Coverage Status](https://coveralls.io/repos/github/bombsimon/wsl/badge.svg?branch=master)](https://coveralls.io/github/bombsimon/wsl?branch=master) - -WSL is a linter that enforces a very **non scientific** vision of how to make -code more readable by enforcing empty lines at the right places. - -I think too much code out there is to cuddly and a bit too warm for it's own -good, making it harder for other people to read and understand. The linter will -warn about newlines in and around blocks, in the beginning of files and other -places in the code. - -**I know this linter is aggressive** and a lot of projects I've tested it on -have failed miserably. For this linter to be useful at all I want to be open to -new ideas, configurations and discussions! Also note that some of the warnings -might be bugs or unintentional false positives so I would love an -[issue](https://github.com/bombsimon/wsl/issues/new) to fix, discuss, change or -make something configurable! - -## Installation - -### By `go get` (local installation) - -You can do that by using: - -```sh -go get -u github.com/bombsimon/wsl/cmd/... -``` - -### By golangci-lint (CI automation) - -`wsl` is already integrated with -[golangci-lint](https://github.com/golangci/golangci-lint). Please refer to the -instructions there. - -## Usage - -How to use depends on how you install `wsl`. - -### With local binary - -The general command format for `wsl` is: - -```sh -$ wsl [flags] [files...] -$ wsl [flags] - -# Examples - -$ wsl ./main.go -$ wsl --no-test ./main.go -$ wsl --allow-cuddle-declarations ./main.go -$ wsl --no-test --allow-cuddle-declaration ./main.go -$ wsl --no-test --allow-trailing-comment ./myProject/... -``` - -The "..." wildcard is not used like other `go` commands but instead can only -be to a relative or absolute path. - -By default, the linter will run on `./...` which means all go files in the -current path and all subsequent paths, including test files. To disable linting -test files, use `-n` or `--no-test`. - -### By `golangci-lint` (CI automation) - -The recommended command is: - -```sh -golangci-lint run --disable-all --enable wsl -``` - -For more information, please refer to -[golangci-lint](https://github.com/golangci/golangci-lint)'s documentation. - -## Issues and configuration - -The linter suppers a few ways to configure it to satisfy more than one kind of -code style. These settings could be set either with flags or with YAML -configuration if used via `golangci-lint`. - -The supported configuration can be found [in the documentation](doc/configuration.md). - -Below are the available checklist for any hit from `wsl`. If you do not see any, -feel free to raise an [issue](https://github.com/bombsimon/wsl/issues/new). - -> **Note**: this linter doesn't take in consideration the issues that will be -> fixed with `go fmt -s` so ensure that the code is properly formatted before -> use. - -* [Anonymous switch statements should never be cuddled](doc/rules.md#anonymous-switch-statements-should-never-be-cuddled) -* [Append only allowed to cuddle with appended value](doc/rules.md#append-only-allowed-to-cuddle-with-appended-value) -* [Assignments should only be cuddled with other assignments](doc/rules.md#assignments-should-only-be-cuddled-with-other-assignments) -* [Block should not end with a whitespace (or comment)](doc/rules.md#block-should-not-end-with-a-whitespace-or-comment) -* [Block should not start with a whitespace](doc/rules.md#block-should-not-start-with-a-whitespace) -* [Case block should end with newline at this size](doc/rules.md#case-block-should-end-with-newline-at-this-size) -* [Branch statements should not be cuddled if block has more than two lines](doc/rules.md#branch-statements-should-not-be-cuddled-if-block-has-more-than-two-lines) -* [Declarations should never be cuddled](doc/rules.md#declarations-should-never-be-cuddled) -* [Defer statements should only be cuddled with expressions on same variable](doc/rules.md#defer-statements-should-only-be-cuddled-with-expressions-on-same-variable) -* [Expressions should not be cuddled with blocks](doc/rules.md#expressions-should-not-be-cuddled-with-blocks) -* [Expressions should not be cuddled with declarations or returns](doc/rules.md#expressions-should-not-be-cuddled-with-declarations-or-returns) -* [For statement without condition should never be cuddled](doc/rules.md#for-statement-without-condition-should-never-be-cuddled) -* [For statements should only be cuddled with assignments used in the iteration](doc/rules.md#for-statements-should-only-be-cuddled-with-assignments-used-in-the-iteration) -* [Go statements can only invoke functions assigned on line above](doc/rules.md#go-statements-can-only-invoke-functions-assigned-on-line-above) -* [If statements should only be cuddled with assignments](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments) -* [If statements should only be cuddled with assignments used in the if - statement - itself](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments-used-in-the-if-statement-itself) -* [If statements that check an error must be cuddled with the statement that assigned the error](doc/rules.md#if-statements-that-check-an-error-must-be-cuddled-with-the-statement-that-assigned-the-error) -* [Only cuddled expressions if assigning variable or using from line - above](doc/rules.md#only-cuddled-expressions-if-assigning-variable-or-using-from-line-above) -* [Only one cuddle assignment allowed before defer statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-defer-statement) -* [Only one cuddle assginment allowed before for statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-for-statement) -* [Only one cuddle assignment allowed before go statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-go-statement) -* [Only one cuddle assignment allowed before if statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-if-statement) -* [Only one cuddle assignment allowed before range statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-range-statement) -* [Only one cuddle assignment allowed before switch statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-switch-statement) -* [Only one cuddle assignment allowed before type switch statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-type-switch-statement) -* [Ranges should only be cuddled with assignments used in the iteration](doc/rules.md#ranges-should-only-be-cuddled-with-assignments-used-in-the-iteration) -* [Return statements should not be cuddled if block has more than two lines](doc/rules.md#return-statements-should-not-be-cuddled-if-block-has-more-than-two-lines) -* [Short declarations should cuddle only with other short declarations](doc/rules.md#short-declaration-should-cuddle-only-with-other-short-declarations) -* [Switch statements should only be cuddled with variables switched](doc/rules.md#switch-statements-should-only-be-cuddled-with-variables-switched) -* [Type switch statements should only be cuddled with variables switched](doc/rules.md#type-switch-statements-should-only-be-cuddled-with-variables-switched) diff --git a/vendor/github.com/bombsimon/wsl/v3/wsl.go b/vendor/github.com/bombsimon/wsl/v3/wsl.go deleted file mode 100644 index 313b527872..0000000000 --- a/vendor/github.com/bombsimon/wsl/v3/wsl.go +++ /dev/null @@ -1,1247 +0,0 @@ -package wsl - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "io/ioutil" - "reflect" - "strings" -) - -// Error reason strings -const ( - reasonMustCuddleErrCheck = "if statements that check an error must be cuddled with the statement that assigned the error" - reasonOnlyCuddleIfWithAssign = "if statements should only be cuddled with assignments" - reasonOnlyOneCuddle = "only one cuddle assignment allowed before if statement" - reasonOnlyCuddleWithUsedAssign = "if statements should only be cuddled with assignments used in the if statement itself" - reasonOnlyCuddle2LineReturn = "return statements should not be cuddled if block has more than two lines" - reasonMultiLineBranchCuddle = "branch statements should not be cuddled if block has more than two lines" - reasonAppendCuddledWithoutUse = "append only allowed to cuddle with appended value" - reasonAssignsCuddleAssign = "assignments should only be cuddled with other assignments" - reasonNeverCuddleDeclare = "declarations should never be cuddled" - reasonExpressionCuddledWithDeclOrRet = "expressions should not be cuddled with declarations or returns" - reasonExpressionCuddledWithBlock = "expressions should not be cuddled with blocks" - reasonExprCuddlingNonAssignedVar = "only cuddled expressions if assigning variable or using from line above" - reasonOneCuddleBeforeRange = "only one cuddle assignment allowed before range statement" - reasonRangeCuddledWithoutUse = "ranges should only be cuddled with assignments used in the iteration" - reasonOneCuddleBeforeDefer = "only one cuddle assignment allowed before defer statement" - reasonDeferCuddledWithOtherVar = "defer statements should only be cuddled with expressions on same variable" - reasonForWithoutCondition = "for statement without condition should never be cuddled" - reasonForWithMoreThanOneCuddle = "only one cuddle assignment allowed before for statement" - reasonForCuddledAssignWithoutUse = "for statements should only be cuddled with assignments used in the iteration" - reasonOneCuddleBeforeGo = "only one cuddle assignment allowed before go statement" - reasonGoFuncWithoutAssign = "go statements can only invoke functions assigned on line above" - reasonSwitchManyCuddles = "only one cuddle assignment allowed before switch statement" - reasonAnonSwitchCuddled = "anonymous switch statements should never be cuddled" - reasonSwitchCuddledWithoutUse = "switch statements should only be cuddled with variables switched" - reasonTypeSwitchTooCuddled = "only one cuddle assignment allowed before type switch statement" - reasonTypeSwitchCuddledWithoutUse = "type switch statements should only be cuddled with variables switched" - reasonBlockStartsWithWS = "block should not start with a whitespace" - reasonBlockEndsWithWS = "block should not end with a whitespace (or comment)" - reasonCaseBlockTooCuddly = "case block should end with newline at this size" - reasonShortDeclNotExclusive = "short declaration should cuddle only with other short declarations" -) - -// Warning strings -const ( - warnTypeNotImplement = "type not implemented" - warnStmtNotImplemented = "stmt type not implemented" - warnBodyStmtTypeNotImplemented = "body statement type not implemented " - warnWSNodeTypeNotImplemented = "whitespace node type not implemented " - warnUnknownLHS = "UNKNOWN LHS" - warnUnknownRHS = "UNKNOWN RHS" -) - -type Configuration struct { - // StrictAppend will do strict checking when assigning from append (x = - // append(x, y)). If this is set to true the append call must append either - // a variable assigned, called or used on the line above. Example on not - // allowed when this is true: - // - // x := []string{} - // y := "not going in X" - // x = append(x, "not y") // This is not allowed with StrictAppend - // z := "going in X" - // - // x = append(x, z) // This is allowed with StrictAppend - // - // m := transform(z) - // x = append(x, z) // So is this because Z is used above. - StrictAppend bool - - // AllowAssignAndCallCuddle allows assignments to be cuddled with variables - // used in calls on line above and calls to be cuddled with assignments of - // variables used in call on line above. - // Example supported with this set to true: - // - // x.Call() - // x = Assign() - // x.AnotherCall() - // x = AnotherAssign() - AllowAssignAndCallCuddle bool - - // AllowAssignAndCallCuddle allows assignments to be cuddled with anything. - // Example supported with this set to true: - // if x == 1 { - // x = 0 - // } - // z := x + 2 - // fmt.Println("x") - // y := "x" - AllowAssignAndAnythingCuddle bool - - // AllowMultiLineAssignCuddle allows cuddling to assignments even if they - // span over multiple lines. This defaults to true which allows the - // following example: - // - // err := function( - // "multiple", "lines", - // ) - // if err != nil { - // // ... - // } - AllowMultiLineAssignCuddle bool - - // If the number of lines in a case block is equal to or lager than this - // number, the case *must* end white a newline. - ForceCaseTrailingWhitespaceLimit int - - // AllowTrailingComment will allow blocks to end with comments. - AllowTrailingComment bool - - // AllowSeparatedLeadingComment will allow multiple comments in the - // beginning of a block separated with newline. Example: - // func () { - // // Comment one - // - // // Comment two - // fmt.Println("x") - // } - AllowSeparatedLeadingComment bool - - // AllowCuddleDeclaration will allow multiple var/declaration statements to - // be cuddled. This defaults to false but setting it to true will enable the - // following example: - // var foo bool - // var err error - AllowCuddleDeclaration bool - - // AllowCuddleWithCalls is a list of call idents that everything can be - // cuddled with. Defaults to calls looking like locks to support a flow like - // this: - // - // mu.Lock() - // allow := thisAssignment - AllowCuddleWithCalls []string - - // AllowCuddleWithRHS is a list of right hand side variables that is allowed - // to be cuddled with anything. Defaults to assignments or calls looking - // like unlocks to support a flow like this: - // - // allow := thisAssignment() - // mu.Unlock() - AllowCuddleWithRHS []string - - // ForceCuddleErrCheckAndAssign will cause an error when an If statement that - // checks an error variable doesn't cuddle with the assignment of that variable. - // This defaults to false but setting it to true will cause the following - // to generate an error: - // - // err := ProduceError() - // - // if err != nil { - // return err - // } - ForceCuddleErrCheckAndAssign bool - - // When ForceCuddleErrCheckAndAssign is enabled this is a list of names - // used for error variables to check for in the conditional. - // Defaults to just "err" - ErrorVariableNames []string - - // ForceExclusiveShortDeclarations will cause an error if a short declaration - // (:=) cuddles with anything other than another short declaration. For example - // - // a := 2 - // b := 3 - // - // is allowed, but - // - // a := 2 - // b = 3 - // - // is not allowed. This logic overrides ForceCuddleErrCheckAndAssign among others. - ForceExclusiveShortDeclarations bool -} - -// DefaultConfig returns default configuration -func DefaultConfig() Configuration { - return Configuration{ - StrictAppend: true, - AllowAssignAndCallCuddle: true, - AllowAssignAndAnythingCuddle: false, - AllowMultiLineAssignCuddle: true, - AllowTrailingComment: false, - AllowSeparatedLeadingComment: false, - ForceCuddleErrCheckAndAssign: false, - ForceExclusiveShortDeclarations: false, - ForceCaseTrailingWhitespaceLimit: 0, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, - ErrorVariableNames: []string{"err"}, - } -} - -// Result represents the result of one error. -type Result struct { - FileName string - LineNumber int - Position token.Position - Reason string -} - -// String returns the filename, line number and reason of a Result. -func (r *Result) String() string { - return fmt.Sprintf("%s:%d: %s", r.FileName, r.LineNumber, r.Reason) -} - -type Processor struct { - config Configuration - result []Result - warnings []string - fileSet *token.FileSet - file *ast.File -} - -// NewProcessor will create a Processor. -func NewProcessorWithConfig(cfg Configuration) *Processor { - return &Processor{ - result: []Result{}, - config: cfg, - } -} - -// NewProcessor will create a Processor. -func NewProcessor() *Processor { - return NewProcessorWithConfig(DefaultConfig()) -} - -// ProcessFiles takes a string slice with file names (full paths) and lints -// them. -// nolint: gocritic -func (p *Processor) ProcessFiles(filenames []string) ([]Result, []string) { - for _, filename := range filenames { - data, err := ioutil.ReadFile(filename) - if err != nil { - panic(err) - } - - p.process(filename, data) - } - - return p.result, p.warnings -} - -func (p *Processor) process(filename string, data []byte) { - fileSet := token.NewFileSet() - file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments) - - // If the file is not parsable let's add a syntax error and move on. - if err != nil { - p.result = append(p.result, Result{ - FileName: filename, - LineNumber: 0, - Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()), - }) - - return - } - - p.fileSet = fileSet - p.file = file - - for _, d := range p.file.Decls { - switch v := d.(type) { - case *ast.FuncDecl: - p.parseBlockBody(v.Name, v.Body) - case *ast.GenDecl: - // `go fmt` will handle proper spacing for GenDecl such as imports, - // constants etc. - default: - p.addWarning(warnTypeNotImplement, d.Pos(), v) - } - } -} - -// parseBlockBody will parse any kind of block statements such as switch cases -// and if statements. A list of Result is returned. -func (p *Processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { - // Nothing to do if there's no value. - if reflect.ValueOf(block).IsNil() { - return - } - - // Start by finding leading and trailing whitespaces. - p.findLeadingAndTrailingWhitespaces(ident, block, nil) - - // Parse the block body contents. - p.parseBlockStatements(block.List) -} - -// parseBlockStatements will parse all the statements found in the body of a -// node. A list of Result is returned. -// nolint: gocognit -func (p *Processor) parseBlockStatements(statements []ast.Stmt) { - for i, stmt := range statements { - // Start by checking if this statement is another block (other than if, - // for and range). This could be assignment to a function, defer or go - // call with an inline function or similar. If this is found we start by - // parsing this body block before moving on. - for _, stmtBlocks := range p.findBlockStmt(stmt) { - p.parseBlockBody(nil, stmtBlocks) - } - - firstBodyStatement := p.firstBodyStatement(i, statements) - - // First statement, nothing to do. - if i == 0 { - continue - } - - previousStatement := statements[i-1] - previousStatementIsMultiline := p.nodeStart(previousStatement) != p.nodeEnd(previousStatement) - cuddledWithLastStmt := p.nodeEnd(previousStatement) == p.nodeStart(stmt)-1 - - // If we're not cuddled and we don't need to enforce err-check cuddling - // then we can bail out here - if !cuddledWithLastStmt && !p.config.ForceCuddleErrCheckAndAssign { - continue - } - - // We don't force error cuddling for multilines. (#86) - if p.config.ForceCuddleErrCheckAndAssign && previousStatementIsMultiline && !cuddledWithLastStmt { - continue - } - - // Extract assigned variables on the line above - // which is the only thing we allow cuddling with. If the assignment is - // made over multiple lines we should not allow cuddling. - var assignedOnLineAbove []string - - // We want to keep track of what was called on the line above to support - // special handling of things such as mutexes. - var calledOnLineAbove []string - - // Check if the previous statement spans over multiple lines. - var cuddledWithMultiLineAssignment = cuddledWithLastStmt && p.nodeStart(previousStatement) != p.nodeStart(stmt)-1 - - // Ensure previous line is not a multi line assignment and if not get - // rightAndLeftHandSide assigned variables. - if !cuddledWithMultiLineAssignment { - assignedOnLineAbove = p.findLHS(previousStatement) - calledOnLineAbove = p.findRHS(previousStatement) - } - - // If previous assignment is multi line and we allow it, fetch - // assignments (but only assignments). - if cuddledWithMultiLineAssignment && p.config.AllowMultiLineAssignCuddle { - if _, ok := previousStatement.(*ast.AssignStmt); ok { - assignedOnLineAbove = p.findLHS(previousStatement) - } - } - - // We could potentially have a block which require us to check the first - // argument before ruling out an allowed cuddle. - var calledOrAssignedFirstInBlock []string - - if firstBodyStatement != nil { - calledOrAssignedFirstInBlock = append(p.findLHS(firstBodyStatement), p.findRHS(firstBodyStatement)...) - } - - var ( - leftHandSide = p.findLHS(stmt) - rightHandSide = p.findRHS(stmt) - rightAndLeftHandSide = append(leftHandSide, rightHandSide...) - calledOrAssignedOnLineAbove = append(calledOnLineAbove, assignedOnLineAbove...) - ) - - // If we called some kind of lock on the line above we allow cuddling - // anything. - if atLeastOneInListsMatch(calledOnLineAbove, p.config.AllowCuddleWithCalls) { - continue - } - - // If we call some kind of unlock on this line we allow cuddling with - // anything. - if atLeastOneInListsMatch(rightHandSide, p.config.AllowCuddleWithRHS) { - continue - } - - moreThanOneStatementAbove := func() bool { - if i < 2 { - return false - } - - statementBeforePreviousStatement := statements[i-2] - - return p.nodeStart(previousStatement)-1 == p.nodeEnd(statementBeforePreviousStatement) - } - - isLastStatementInBlockOfOnlyTwoLines := func() bool { - // If we're the last statement, check if there's no more than two - // lines from the starting statement and the end of this statement. - // This is to support short return functions such as: - // func (t *Typ) X() { - // t.X = true - // return t - // } - // nolint: gocritic - if i == len(statements)-1 && i == 1 { - if p.nodeEnd(stmt)-p.nodeStart(previousStatement) <= 2 { - return true - } - } - - return false - } - - // If it's a short declaration we should not cuddle with anything else - // if ForceExclusiveShortDeclarations is set on; either this or the - // previous statement could be the short decl, so we'll find out which - // it was and use *that* statement's position - if p.config.ForceExclusiveShortDeclarations && cuddledWithLastStmt { - if p.isShortDecl(stmt) && !p.isShortDecl(previousStatement) { - p.addError(stmt.Pos(), reasonShortDeclNotExclusive) - } else if p.isShortDecl(previousStatement) && !p.isShortDecl(stmt) { - p.addError(previousStatement.Pos(), reasonShortDeclNotExclusive) - } - } - - // If it's not an if statement and we're not cuddled move on. The only - // reason we need to keep going for if statements is to check if we - // should be cuddled with an error check. - if _, ok := stmt.(*ast.IfStmt); !ok { - if !cuddledWithLastStmt { - continue - } - } - - switch t := stmt.(type) { - case *ast.IfStmt: - checkingErrInitializedInline := func() bool { - if t.Init == nil { - return false - } - - // Variables were initialized inline in the if statement - // Let's make sure it's the err just to be safe - return atLeastOneInListsMatch(p.findLHS(t.Init), p.config.ErrorVariableNames) - } - - if !cuddledWithLastStmt { - checkingErr := atLeastOneInListsMatch(rightAndLeftHandSide, p.config.ErrorVariableNames) - if checkingErr { - // We only want to enforce cuddling error checks if the - // error was assigned on the line above. See - // https://github.com/bombsimon/wsl/issues/78. - // This is needed since `assignedOnLineAbove` is not - // actually just assignments but everything from LHS in the - // previous statement. This means that if previous line was - // `if err ...`, `err` will now be in the list - // `assignedOnLineAbove`. - if _, ok := previousStatement.(*ast.AssignStmt); !ok { - continue - } - - if checkingErrInitializedInline() { - continue - } - - if atLeastOneInListsMatch(assignedOnLineAbove, p.config.ErrorVariableNames) { - p.addError(t.Pos(), reasonMustCuddleErrCheck) - } - } - - continue - } - - if len(assignedOnLineAbove) == 0 { - p.addError(t.Pos(), reasonOnlyCuddleIfWithAssign) - continue - } - - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOnlyOneCuddle) - continue - } - - if atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - continue - } - - if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - continue - } - - p.addError(t.Pos(), reasonOnlyCuddleWithUsedAssign) - case *ast.ReturnStmt: - if isLastStatementInBlockOfOnlyTwoLines() { - continue - } - - p.addError(t.Pos(), reasonOnlyCuddle2LineReturn) - case *ast.BranchStmt: - if isLastStatementInBlockOfOnlyTwoLines() { - continue - } - - p.addError(t.Pos(), reasonMultiLineBranchCuddle) - case *ast.AssignStmt: - // append is usually an assignment but should not be allowed to be - // cuddled with anything not appended. - if len(rightHandSide) > 0 && rightHandSide[len(rightHandSide)-1] == "append" { - if p.config.StrictAppend { - if !atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightHandSide) { - p.addError(t.Pos(), reasonAppendCuddledWithoutUse) - } - } - - continue - } - - if _, ok := previousStatement.(*ast.AssignStmt); ok { - continue - } - - if p.config.AllowAssignAndAnythingCuddle { - continue - } - - if _, ok := previousStatement.(*ast.DeclStmt); ok && p.config.AllowCuddleDeclaration { - continue - } - - // If the assignment is from a type or variable called on the line - // above we can allow it by setting AllowAssignAndCallCuddle to - // true. - // Example (x is used): - // x.function() - // a.Field = x.anotherFunction() - if p.config.AllowAssignAndCallCuddle { - if atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightAndLeftHandSide) { - continue - } - } - - p.addError(t.Pos(), reasonAssignsCuddleAssign) - case *ast.DeclStmt: - if !p.config.AllowCuddleDeclaration { - p.addError(t.Pos(), reasonNeverCuddleDeclare) - } - case *ast.ExprStmt: - switch previousStatement.(type) { - case *ast.DeclStmt, *ast.ReturnStmt: - if p.config.AllowAssignAndCallCuddle && p.config.AllowCuddleDeclaration { - continue - } - - p.addError(t.Pos(), reasonExpressionCuddledWithDeclOrRet) - case *ast.IfStmt, *ast.RangeStmt, *ast.SwitchStmt: - p.addError(t.Pos(), reasonExpressionCuddledWithBlock) - } - - // If the expression is called on a type or variable used or - // assigned on the line we can allow it by setting - // AllowAssignAndCallCuddle to true. - // Example of allowed cuddled (x is used): - // a.Field = x.func() - // x.function() - if p.config.AllowAssignAndCallCuddle { - if atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightAndLeftHandSide) { - continue - } - } - - // If we assigned variables on the line above but didn't use them in - // this expression there should probably be a newline between them. - if len(assignedOnLineAbove) > 0 && !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - p.addError(t.Pos(), reasonExprCuddlingNonAssignedVar) - } - case *ast.RangeStmt: - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOneCuddleBeforeRange) - continue - } - - if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - p.addError(t.Pos(), reasonRangeCuddledWithoutUse) - } - } - case *ast.DeferStmt: - if _, ok := previousStatement.(*ast.DeferStmt); ok { - // We may cuddle multiple defers to group logic. - continue - } - - // Special treatment of deferring body closes after error checking - // according to best practices. See - // https://github.com/bombsimon/wsl/issues/31 which links to - // discussion about error handling after HTTP requests. This is hard - // coded and very specific but for now this is to be seen as a - // special case. What this does is that it *only* allows a defer - // statement with `Close` on the right hand side to be cuddled with - // an if-statement to support this: - // resp, err := client.Do(req) - // if err != nil { - // return err - // } - // defer resp.Body.Close() - if _, ok := previousStatement.(*ast.IfStmt); ok { - if atLeastOneInListsMatch(rightHandSide, []string{"Close"}) { - continue - } - } - - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOneCuddleBeforeDefer) - - continue - } - - // Be extra nice with RHS, it's common to use this for locks: - // m.Lock() - // defer m.Unlock() - previousRHS := p.findRHS(previousStatement) - if atLeastOneInListsMatch(rightHandSide, previousRHS) { - continue - } - - // Allow use to cuddled defer func literals with usages on line - // abouve. Example: - // b := getB() - // defer func() { - // makesSenseToUse(b) - // }() - if c, ok := t.Call.Fun.(*ast.FuncLit); ok { - funcLitFirstStmt := append(p.findLHS(c.Body), p.findRHS(c.Body)...) - - if atLeastOneInListsMatch(assignedOnLineAbove, funcLitFirstStmt) { - continue - } - } - - if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - continue - } - - if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - p.addError(t.Pos(), reasonDeferCuddledWithOtherVar) - } - case *ast.ForStmt: - if len(rightAndLeftHandSide) == 0 { - p.addError(t.Pos(), reasonForWithoutCondition) - - continue - } - - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonForWithMoreThanOneCuddle) - - continue - } - - // The same rule applies for ranges as for if statements, see - // comments regarding variable usages on the line before or as the - // first line in the block for details. - if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - p.addError(t.Pos(), reasonForCuddledAssignWithoutUse) - } - } - case *ast.GoStmt: - if _, ok := previousStatement.(*ast.GoStmt); ok { - continue - } - - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOneCuddleBeforeGo) - - continue - } - - if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - p.addError(t.Pos(), reasonGoFuncWithoutAssign) - } - case *ast.SwitchStmt: - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonSwitchManyCuddles) - - continue - } - - if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - if len(rightAndLeftHandSide) == 0 { - p.addError(t.Pos(), reasonAnonSwitchCuddled) - } else { - p.addError(t.Pos(), reasonSwitchCuddledWithoutUse) - } - } - case *ast.TypeSwitchStmt: - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonTypeSwitchTooCuddled) - - continue - } - - // Allowed to type assert on variable assigned on line above. - if !atLeastOneInListsMatch(rightHandSide, assignedOnLineAbove) { - // Allow type assertion on variables used in the first case - // immediately. - if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - p.addError(t.Pos(), reasonTypeSwitchCuddledWithoutUse) - } - } - case *ast.CaseClause, *ast.CommClause: - // Case clauses will be checked by not allowing leading ot trailing - // whitespaces within the block. There's nothing in the case itself - // that may be cuddled. - default: - p.addWarning(warnStmtNotImplemented, t.Pos(), t) - } - } -} - -// firstBodyStatement returns the first statement inside a body block. This is -// because variables may be cuddled with conditions or statements if it's used -// directly as the first argument inside a body. -// The body will then be parsed as a *ast.BlockStmt (regular block) or as a list -// of []ast.Stmt (case block). -func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { - stmt := allStmt[i] - - // Start by checking if the statement has a body (probably if-statement, - // a range, switch case or similar. Whenever a body is found we start by - // parsing it before moving on in the AST. - statementBody := reflect.Indirect(reflect.ValueOf(stmt)).FieldByName("Body") - - // Some cases allow cuddling depending on the first statement in a body - // of a block or case. If possible extract the first statement. - var firstBodyStatement ast.Node - - if !statementBody.IsValid() { - return firstBodyStatement - } - - switch statementBodyContent := statementBody.Interface().(type) { - case *ast.BlockStmt: - if len(statementBodyContent.List) > 0 { - firstBodyStatement = statementBodyContent.List[0] - - // If the first body statement is a *ast.CaseClause we're - // actually interested in the **next** body to know what's - // inside the first case. - if x, ok := firstBodyStatement.(*ast.CaseClause); ok { - if len(x.Body) > 0 { - firstBodyStatement = x.Body[0] - } - } - } - - p.parseBlockBody(nil, statementBodyContent) - case []ast.Stmt: - // The Body field for an *ast.CaseClause or *ast.CommClause is of type - // []ast.Stmt. We must check leading and trailing whitespaces and then - // pass the statements to parseBlockStatements to parse it's content. - var nextStatement ast.Node - - // Check if there's more statements (potential cases) after the - // current one. - if len(allStmt)-1 > i { - nextStatement = allStmt[i+1] - } - - p.findLeadingAndTrailingWhitespaces(nil, stmt, nextStatement) - p.parseBlockStatements(statementBodyContent) - default: - p.addWarning( - warnBodyStmtTypeNotImplemented, - stmt.Pos(), statementBodyContent, - ) - } - - return firstBodyStatement -} - -func (p *Processor) findLHS(node ast.Node) []string { - var lhs []string - - if node == nil { - return lhs - } - - switch t := node.(type) { - case *ast.BasicLit, *ast.FuncLit, *ast.SelectStmt, - *ast.LabeledStmt, *ast.ForStmt, *ast.SwitchStmt, - *ast.ReturnStmt, *ast.GoStmt, *ast.CaseClause, - *ast.CommClause, *ast.CallExpr, *ast.UnaryExpr, - *ast.BranchStmt, *ast.TypeSpec, *ast.ChanType, - *ast.DeferStmt, *ast.TypeAssertExpr, *ast.RangeStmt: - // Nothing to add to LHS - case *ast.IncDecStmt: - return p.findLHS(t.X) - case *ast.Ident: - return []string{t.Name} - case *ast.AssignStmt: - for _, v := range t.Lhs { - lhs = append(lhs, p.findLHS(v)...) - } - case *ast.GenDecl: - for _, v := range t.Specs { - lhs = append(lhs, p.findLHS(v)...) - } - case *ast.ValueSpec: - for _, v := range t.Names { - lhs = append(lhs, p.findLHS(v)...) - } - case *ast.BlockStmt: - for _, v := range t.List { - lhs = append(lhs, p.findLHS(v)...) - } - case *ast.BinaryExpr: - return append( - p.findLHS(t.X), - p.findLHS(t.Y)..., - ) - case *ast.DeclStmt: - return p.findLHS(t.Decl) - case *ast.IfStmt: - return p.findLHS(t.Cond) - case *ast.TypeSwitchStmt: - return p.findLHS(t.Assign) - case *ast.SendStmt: - return p.findLHS(t.Chan) - default: - if x, ok := maybeX(t); ok { - return p.findLHS(x) - } - - p.addWarning(warnUnknownLHS, t.Pos(), t) - } - - return lhs -} - -func (p *Processor) findRHS(node ast.Node) []string { - var rhs []string - - if node == nil { - return rhs - } - - switch t := node.(type) { - case *ast.BasicLit, *ast.SelectStmt, *ast.ChanType, - *ast.LabeledStmt, *ast.DeclStmt, *ast.BranchStmt, - *ast.TypeSpec, *ast.ArrayType, *ast.CaseClause, - *ast.CommClause, *ast.KeyValueExpr, *ast.MapType, - *ast.FuncLit: - // Nothing to add to RHS - case *ast.Ident: - return []string{t.Name} - case *ast.SelectorExpr: - // TODO: Should this be RHS? - // t.X is needed for defer as of now and t.Sel needed for special - // functions such as Lock() - rhs = p.findRHS(t.X) - rhs = append(rhs, p.findRHS(t.Sel)...) - case *ast.AssignStmt: - for _, v := range t.Rhs { - rhs = append(rhs, p.findRHS(v)...) - } - case *ast.CallExpr: - for _, v := range t.Args { - rhs = append(rhs, p.findRHS(v)...) - } - - rhs = append(rhs, p.findRHS(t.Fun)...) - case *ast.CompositeLit: - for _, v := range t.Elts { - rhs = append(rhs, p.findRHS(v)...) - } - case *ast.IfStmt: - rhs = append(rhs, p.findRHS(t.Cond)...) - rhs = append(rhs, p.findRHS(t.Init)...) - case *ast.BinaryExpr: - return append( - p.findRHS(t.X), - p.findRHS(t.Y)..., - ) - case *ast.TypeSwitchStmt: - return p.findRHS(t.Assign) - case *ast.ReturnStmt: - for _, v := range t.Results { - rhs = append(rhs, p.findRHS(v)...) - } - case *ast.BlockStmt: - for _, v := range t.List { - rhs = append(rhs, p.findRHS(v)...) - } - case *ast.SwitchStmt: - return p.findRHS(t.Tag) - case *ast.GoStmt: - return p.findRHS(t.Call) - case *ast.ForStmt: - return p.findRHS(t.Cond) - case *ast.DeferStmt: - return p.findRHS(t.Call) - case *ast.SendStmt: - return p.findLHS(t.Value) - case *ast.IndexExpr: - rhs = append(rhs, p.findRHS(t.Index)...) - rhs = append(rhs, p.findRHS(t.X)...) - case *ast.SliceExpr: - rhs = append(rhs, p.findRHS(t.X)...) - rhs = append(rhs, p.findRHS(t.Low)...) - rhs = append(rhs, p.findRHS(t.High)...) - default: - if x, ok := maybeX(t); ok { - return p.findRHS(x) - } - - p.addWarning(warnUnknownRHS, t.Pos(), t) - } - - return rhs -} - -func (p *Processor) isShortDecl(node ast.Node) bool { - if t, ok := node.(*ast.AssignStmt); ok { - return t.Tok == token.DEFINE - } - - return false -} - -func (p *Processor) findBlockStmt(node ast.Node) []*ast.BlockStmt { - var blocks []*ast.BlockStmt - - switch t := node.(type) { - case *ast.AssignStmt: - for _, x := range t.Rhs { - blocks = append(blocks, p.findBlockStmt(x)...) - } - case *ast.CallExpr: - blocks = append(blocks, p.findBlockStmt(t.Fun)...) - case *ast.FuncLit: - blocks = append(blocks, t.Body) - case *ast.ExprStmt: - blocks = append(blocks, p.findBlockStmt(t.X)...) - case *ast.ReturnStmt: - for _, x := range t.Results { - blocks = append(blocks, p.findBlockStmt(x)...) - } - case *ast.DeferStmt: - blocks = append(blocks, p.findBlockStmt(t.Call)...) - case *ast.GoStmt: - blocks = append(blocks, p.findBlockStmt(t.Call)...) - } - - return blocks -} - -// maybeX extracts the X field from an AST node and returns it with a true value -// if it exists. If the node doesn't have an X field nil and false is returned. -// Known fields with X that are handled: -// IndexExpr, ExprStmt, SelectorExpr, StarExpr, ParentExpr, TypeAssertExpr, -// RangeStmt, UnaryExpr, ParenExpr, SliceExpr, IncDecStmt. -func maybeX(node interface{}) (ast.Node, bool) { - maybeHasX := reflect.Indirect(reflect.ValueOf(node)).FieldByName("X") - if !maybeHasX.IsValid() { - return nil, false - } - - n, ok := maybeHasX.Interface().(ast.Node) - if !ok { - return nil, false - } - - return n, true -} - -func atLeastOneInListsMatch(listOne, listTwo []string) bool { - sliceToMap := func(s []string) map[string]struct{} { - m := map[string]struct{}{} - - for _, v := range s { - m[v] = struct{}{} - } - - return m - } - - m1 := sliceToMap(listOne) - m2 := sliceToMap(listTwo) - - for k1 := range m1 { - if _, ok := m2[k1]; ok { - return true - } - } - - for k2 := range m2 { - if _, ok := m1[k2]; ok { - return true - } - } - - return false -} - -// findLeadingAndTrailingWhitespaces will find leading and trailing whitespaces -// in a node. The method takes comments in consideration which will make the -// parser more gentle. -// nolint: gocognit -func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { - var ( - allowedLinesBeforeFirstStatement = 1 - commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) - blockStatements []ast.Stmt - blockStartLine int - blockEndLine int - blockStartPos token.Pos - blockEndPos token.Pos - ) - - // Depending on the block type, get the statements in the block and where - // the block starts (and ends). - switch t := stmt.(type) { - case *ast.BlockStmt: - blockStatements = t.List - blockStartPos = t.Lbrace - blockEndPos = t.Rbrace - case *ast.CaseClause: - blockStatements = t.Body - blockStartPos = t.Colon - case *ast.CommClause: - blockStatements = t.Body - blockStartPos = t.Colon - default: - p.addWarning(warnWSNodeTypeNotImplemented, stmt.Pos(), stmt) - - return - } - - // Ignore empty blocks even if they have newlines or just comments. - if len(blockStatements) < 1 { - return - } - - blockStartLine = p.fileSet.Position(blockStartPos).Line - blockEndLine = p.fileSet.Position(blockEndPos).Line - - // No whitespace possible if LBrace and RBrace is on the same line. - if blockStartLine == blockEndLine { - return - } - - var ( - firstStatement = blockStatements[0] - lastStatement = blockStatements[len(blockStatements)-1] - seenCommentGroups = 0 - ) - - // Get the comment related to the first statement, we do allow commends in - // the beginning of a block before the first statement. - if c, ok := commentMap[firstStatement]; ok { - for _, commentGroup := range c { - // If the comment group is on the same line as the block start - // (LBrace) we should not consider it. - if p.nodeStart(commentGroup) == blockStartLine { - continue - } - - // We only care about comments before our statement from the comment - // map. As soon as we hit comments after our statement let's break - // out! - if commentGroup.Pos() > firstStatement.Pos() { - break - } - - // We store number of seen comment groups because we allow multiple - // groups with a newline between them; but if the first one has WS - // before it, we're not going to count it to force an error. - if p.config.AllowSeparatedLeadingComment { - cg := p.fileSet.Position(commentGroup.Pos()).Line - - if seenCommentGroups > 0 || cg == blockStartLine+1 { - seenCommentGroups++ - } - } else { - seenCommentGroups++ - } - - // Support both /* multiline */ and //single line comments - for _, c := range commentGroup.List { - allowedLinesBeforeFirstStatement += len(strings.Split(c.Text, "\n")) - } - } - } - - // If we allow separated comments, allow for a space after each group - if p.config.AllowSeparatedLeadingComment { - if seenCommentGroups > 1 { - allowedLinesBeforeFirstStatement += seenCommentGroups - 1 - } else if seenCommentGroups == 1 { - allowedLinesBeforeFirstStatement += 1 - } - } - - // And now if the first statement is passed the number of allowed lines, - // then we had extra WS, possibly before the first comment group. - if p.nodeStart(firstStatement) > blockStartLine+allowedLinesBeforeFirstStatement { - p.addError( - blockStartPos, - reasonBlockStartsWithWS, - ) - } - - // If the blockEndLine is not 0 we're a regular block (not case). - if blockEndLine != 0 { - if p.config.AllowTrailingComment { - if lastComment, ok := commentMap[lastStatement]; ok { - var ( - lastCommentGroup = lastComment[len(lastComment)-1] - lastCommentLine = lastCommentGroup.List[len(lastCommentGroup.List)-1] - countNewlines = 0 - ) - - countNewlines += len(strings.Split(lastCommentLine.Text, "\n")) - - // No newlines between trailing comments and end of block. - if p.nodeStart(lastCommentLine)+countNewlines != blockEndLine-1 { - return - } - } - } - - if p.nodeEnd(lastStatement) != blockEndLine-1 && !isExampleFunc(ident) { - p.addError(blockEndPos, reasonBlockEndsWithWS) - } - - return - } - - // If we don't have any nextStatement the trailing whitespace will be - // handled when parsing the switch. If we do have a next statement we can - // see where it starts by getting it's colon position. We set the end of the - // current case to the position of the next case. - switch n := nextStatement.(type) { - case *ast.CaseClause: - blockEndPos = n.Case - case *ast.CommClause: - blockEndPos = n.Case - default: - // No more cases - return - } - - blockEndLine = p.fileSet.Position(blockEndPos).Line - 1 - - var ( - blockSize = blockEndLine - blockStartLine - caseTrailingCommentLines int - ) - - // TODO: I don't know what comments are bound to in cases. For regular - // blocks the last comment is bound to the last statement but for cases - // they are bound to the case clause expression. This will however get us all - // comments and depending on the case expression this gets tricky. - // - // To handle this I get the comment map from the current statement (the case - // itself) and iterate through all groups and all comment within all groups. - // I then get the comments after the last statement but before the next case - // clause and just map each line of comment that way. - for _, commentGroups := range commentMap { - for _, commentGroup := range commentGroups { - for _, comment := range commentGroup.List { - commentLine := p.fileSet.Position(comment.Pos()).Line - - // Ignore comments before the last statement. - if commentLine <= p.nodeStart(lastStatement) { - continue - } - - // Ignore comments after the end of this case. - if commentLine > blockEndLine { - continue - } - - // This allows /* multiline */ comments with newlines as well - // as regular (//) ones - caseTrailingCommentLines += len(strings.Split(comment.Text, "\n")) - } - } - } - - hasTrailingWhitespace := p.nodeEnd(lastStatement)+caseTrailingCommentLines != blockEndLine - - // If the force trailing limit is configured and we don't end with a newline. - if p.config.ForceCaseTrailingWhitespaceLimit > 0 && !hasTrailingWhitespace { - // Check if the block size is too big to miss the newline. - if blockSize >= p.config.ForceCaseTrailingWhitespaceLimit { - p.addError(lastStatement.Pos(), reasonCaseBlockTooCuddly) - } - } -} - -func isExampleFunc(ident *ast.Ident) bool { - return ident != nil && strings.HasPrefix(ident.Name, "Example") -} - -func (p *Processor) nodeStart(node ast.Node) int { - return p.fileSet.Position(node.Pos()).Line -} - -func (p *Processor) nodeEnd(node ast.Node) int { - var line = p.fileSet.Position(node.End()).Line - - if isEmptyLabeledStmt(node) { - return p.fileSet.Position(node.Pos()).Line - } - - return line -} - -func isEmptyLabeledStmt(node ast.Node) bool { - v, ok := node.(*ast.LabeledStmt) - if !ok { - return false - } - - _, empty := v.Stmt.(*ast.EmptyStmt) - - return empty -} - -// Add an error for the file and line number for the current token.Pos with the -// given reason. -func (p *Processor) addError(pos token.Pos, reason string) { - position := p.fileSet.Position(pos) - - p.result = append(p.result, Result{ - FileName: position.Filename, - LineNumber: position.Line, - Position: position, - Reason: reason, - }) -} - -func (p *Processor) addWarning(w string, pos token.Pos, t interface{}) { - position := p.fileSet.Position(pos) - - p.warnings = append(p.warnings, - fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), - ) -} diff --git a/vendor/github.com/bombsimon/wsl/v3/.gitignore b/vendor/github.com/bombsimon/wsl/v4/.gitignore similarity index 100% rename from vendor/github.com/bombsimon/wsl/v3/.gitignore rename to vendor/github.com/bombsimon/wsl/v4/.gitignore diff --git a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml new file mode 100644 index 0000000000..543012008f --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml @@ -0,0 +1,81 @@ +--- +run: + deadline: 1m + issues-exit-code: 1 + tests: true + skip-dirs: + - vendor$ + +output: + format: colored-line-number + print-issued-lines: false + +linters-settings: + gocognit: + min-complexity: 10 + + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/davecgh/go-spew/spew + + misspell: + locale: US + + gocritic: + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` + # to see all tags and checks. Empty list by default. See + # https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + +linters: + enable-all: true + disable: + - cyclop + - deadcode + - depguard + - dupl + - dupword + - exhaustivestruct + - exhaustruct + - forbidigo + - funlen + - gci + - gocognit + - gocyclo + - godox + - golint + - gomnd + - ifshort + - interfacer + - lll + - maintidx + - maligned + - nakedret + - nestif + - nlreturn + - nosnakecase + - paralleltest + - prealloc + - rowserrcheck + - scopelint + - structcheck + - testpackage + - varcheck + - varnamelen + - wastedassign + fast: false + + +issues: + exclude-use-default: true + max-issues-per-linter: 0 + max-same-issues: 0 + +# vim: set sw=2 ts=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v3/LICENSE b/vendor/github.com/bombsimon/wsl/v4/LICENSE similarity index 100% rename from vendor/github.com/bombsimon/wsl/v3/LICENSE rename to vendor/github.com/bombsimon/wsl/v4/LICENSE diff --git a/vendor/github.com/bombsimon/wsl/v4/README.md b/vendor/github.com/bombsimon/wsl/v4/README.md new file mode 100644 index 0000000000..0bcf01d96a --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v4/README.md @@ -0,0 +1,98 @@ +# wsl - Whitespace Linter + +[![forthebadge](https://forthebadge.com/images/badges/made-with-go.svg)](https://forthebadge.com) +[![forthebadge](https://forthebadge.com/images/badges/built-with-love.svg)](https://forthebadge.com) + +[![GitHub Actions](https://github.com/bombsimon/wsl/actions/workflows/go.yml/badge.svg)](https://github.com/bombsimon/wsl/actions/workflows/go.yml) +[![Coverage Status](https://coveralls.io/repos/github/bombsimon/wsl/badge.svg?branch=master)](https://coveralls.io/github/bombsimon/wsl?branch=master) + +`wsl` is a linter that enforces a very **non scientific** vision of how to make +code more readable by enforcing empty lines at the right places. + +**This linter is aggressive** and a lot of projects I've tested it on have +failed miserably. For this linter to be useful at all I want to be open to new +ideas, configurations and discussions! Also note that some of the warnings might +be bugs or unintentional false positives so I would love an +[issue](https://github.com/bombsimon/wsl/issues/new) to fix, discuss, change or +make something configurable! + +## Installation + +```sh +# Latest release +go install github.com/bombsimon/wsl/v4/cmd/wsl + +# Main branch +go install github.com/bombsimon/wsl/v4/cmd/wsl@master +``` + +## Usage + +> **Note**: This linter provides a fixer that can fix most issues with the +> `--fix` flag. However, currently `golangci-lint` [does not support suggested +> fixes](https://github.com/golangci/golangci-lint/issues/1779) so the `--fix` +> flag in `golangci-lint` will **not** work. + +`wsl` uses the [analysis](https://pkg.go.dev/golang.org/x/tools/go/analysis) +package meaning it will operate on package level with the default analysis flags +and way of working. + +```sh +wsl --help +wsl [flags] + +wsl --allow-cuddle-declarations --fix ./... +``` + +`wsl` is also integrated in [`golangci-lint`](https://golangci-lint.run) + +```sh +golangci-lint run --no-config --disable-all --enable wsl +``` + +## Issues and configuration + +The linter suppers a few ways to configure it to satisfy more than one kind of +code style. These settings could be set either with flags or with YAML +configuration if used via `golangci-lint`. + +The supported configuration can be found [in the +documentation](doc/configuration.md). + +Below are the available checklist for any hit from `wsl`. If you do not see any, +feel free to raise an [issue](https://github.com/bombsimon/wsl/issues/new). + +> **Note**: this linter doesn't take in consideration the issues that will be +> fixed with `go fmt -s` so ensure that the code is properly formatted before +> use. + +* [Anonymous switch statements should never be cuddled](doc/rules.md#anonymous-switch-statements-should-never-be-cuddled) +* [Append only allowed to cuddle with appended value](doc/rules.md#append-only-allowed-to-cuddle-with-appended-value) +* [Assignments should only be cuddled with other assignments](doc/rules.md#assignments-should-only-be-cuddled-with-other-assignments) +* [Block should not end with a whitespace (or comment)](doc/rules.md#block-should-not-end-with-a-whitespace-or-comment) +* [Block should not start with a whitespace](doc/rules.md#block-should-not-start-with-a-whitespace) +* [Case block should end with newline at this size](doc/rules.md#case-block-should-end-with-newline-at-this-size) +* [Branch statements should not be cuddled if block has more than two lines](doc/rules.md#branch-statements-should-not-be-cuddled-if-block-has-more-than-two-lines) +* [Declarations should never be cuddled](doc/rules.md#declarations-should-never-be-cuddled) +* [Defer statements should only be cuddled with expressions on same variable](doc/rules.md#defer-statements-should-only-be-cuddled-with-expressions-on-same-variable) +* [Expressions should not be cuddled with blocks](doc/rules.md#expressions-should-not-be-cuddled-with-blocks) +* [Expressions should not be cuddled with declarations or returns](doc/rules.md#expressions-should-not-be-cuddled-with-declarations-or-returns) +* [For statement without condition should never be cuddled](doc/rules.md#for-statement-without-condition-should-never-be-cuddled) +* [For statements should only be cuddled with assignments used in the iteration](doc/rules.md#for-statements-should-only-be-cuddled-with-assignments-used-in-the-iteration) +* [Go statements can only invoke functions assigned on line above](doc/rules.md#go-statements-can-only-invoke-functions-assigned-on-line-above) +* [If statements should only be cuddled with assignments](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments) +* [If statements should only be cuddled with assignments used in the if statement itself](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments-used-in-the-if-statement-itself) +* [If statements that check an error must be cuddled with the statement that assigned the error](doc/rules.md#if-statements-that-check-an-error-must-be-cuddled-with-the-statement-that-assigned-the-error) +* [Only cuddled expressions if assigning variable or using from line above](doc/rules.md#only-cuddled-expressions-if-assigning-variable-or-using-from-line-above) +* [Only one cuddle assignment allowed before defer statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-defer-statement) +* [Only one cuddle assignment allowed before for statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-for-statement) +* [Only one cuddle assignment allowed before go statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-go-statement) +* [Only one cuddle assignment allowed before if statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-if-statement) +* [Only one cuddle assignment allowed before range statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-range-statement) +* [Only one cuddle assignment allowed before switch statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-switch-statement) +* [Only one cuddle assignment allowed before type switch statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-type-switch-statement) +* [Ranges should only be cuddled with assignments used in the iteration](doc/rules.md#ranges-should-only-be-cuddled-with-assignments-used-in-the-iteration) +* [Return statements should not be cuddled if block has more than two lines](doc/rules.md#return-statements-should-not-be-cuddled-if-block-has-more-than-two-lines) +* [Short declarations should cuddle only with other short declarations](doc/rules.md#short-declaration-should-cuddle-only-with-other-short-declarations) +* [Switch statements should only be cuddled with variables switched](doc/rules.md#switch-statements-should-only-be-cuddled-with-variables-switched) +* [Type switch statements should only be cuddled with variables switched](doc/rules.md#type-switch-statements-should-only-be-cuddled-with-variables-switched) diff --git a/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/vendor/github.com/bombsimon/wsl/v4/analyzer.go new file mode 100644 index 0000000000..b8eac15875 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -0,0 +1,141 @@ +package wsl + +import ( + "flag" + "strings" + + "golang.org/x/tools/go/analysis" +) + +func NewAnalyzer(config *Configuration) *analysis.Analyzer { + wa := &wslAnalyzer{config: config} + + return &analysis.Analyzer{ + Name: "wsl", + Doc: "add or remove empty lines", + Flags: wa.flags(), + Run: wa.run, + RunDespiteErrors: true, + } +} + +func defaultConfig() *Configuration { + return &Configuration{ + AllowAssignAndAnythingCuddle: false, + AllowAssignAndCallCuddle: true, + AllowCuddleDeclaration: false, + AllowMultiLineAssignCuddle: true, + AllowSeparatedLeadingComment: false, + AllowTrailingComment: false, + ForceCuddleErrCheckAndAssign: false, + ForceExclusiveShortDeclarations: false, + StrictAppend: true, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + ErrorVariableNames: []string{"err"}, + ForceCaseTrailingWhitespaceLimit: 0, + } +} + +// wslAnalyzer is a wrapper around the configuration which is used to be able to +// set the configuration when creating the analyzer and later be able to update +// flags and running method. +type wslAnalyzer struct { + config *Configuration +} + +func (wa *wslAnalyzer) flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + + // If we have a configuration set we're not running from the command line so + // we don't use any flags. + if wa.config != nil { + return *flags + } + + wa.config = defaultConfig() + + flags.BoolVar(&wa.config.AllowAssignAndAnythingCuddle, "allow-assign-and-anything", false, "Allow assignments and anything to be cuddled") + flags.BoolVar(&wa.config.AllowAssignAndCallCuddle, "allow-assign-and-call", true, "Allow assignments and calls to be cuddled (if using same variable/type)") + flags.BoolVar(&wa.config.AllowCuddleDeclaration, "allow-cuddle-declarations", false, "Allow declarations to be cuddled") + flags.BoolVar(&wa.config.AllowMultiLineAssignCuddle, "allow-multi-line-assign", true, "Allow cuddling with multi line assignments") + flags.BoolVar(&wa.config.AllowSeparatedLeadingComment, "allow-separated-leading-comment", false, "Allow empty newlines in leading comments") + flags.BoolVar(&wa.config.AllowTrailingComment, "allow-trailing-comment", false, "Allow blocks to end with a comment") + flags.BoolVar(&wa.config.ForceCuddleErrCheckAndAssign, "force-err-cuddling", false, "Force cuddling of error checks with error var assignment") + flags.BoolVar(&wa.config.ForceExclusiveShortDeclarations, "force-short-decl-cuddling", false, "Force short declarations to cuddle by themselves") + flags.BoolVar(&wa.config.StrictAppend, "strict-append", true, "Strict rules for append") + flags.IntVar(&wa.config.ForceCaseTrailingWhitespaceLimit, "force-case-trailing-whitespace", 0, "Force newlines for case blocks > this number.") + + flags.Var(&multiStringValue{slicePtr: &wa.config.AllowCuddleWithCalls}, "allow-cuddle-with-calls", "Comma separated list of idents that can have cuddles after") + flags.Var(&multiStringValue{slicePtr: &wa.config.AllowCuddleWithRHS}, "allow-cuddle-with-rhs", "Comma separated list of idents that can have cuddles before") + flags.Var(&multiStringValue{slicePtr: &wa.config.ErrorVariableNames}, "error-variable-names", "Comma separated list of error variable names") + + return *flags +} + +func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + filename := pass.Fset.PositionFor(file.Pos(), false).Filename + if !strings.HasSuffix(filename, ".go") { + continue + } + + processor := newProcessorWithConfig(file, pass.Fset, wa.config) + processor.parseAST() + + for pos, fix := range processor.result { + textEdits := []analysis.TextEdit{} + for _, f := range fix.fixRanges { + textEdits = append(textEdits, analysis.TextEdit{ + Pos: f.fixRangeStart, + End: f.fixRangeEnd, + NewText: []byte("\n"), + }) + } + + pass.Report(analysis.Diagnostic{ + Pos: pos, + Category: "whitespace", + Message: fix.reason, + SuggestedFixes: []analysis.SuggestedFix{ + { + TextEdits: textEdits, + }, + }, + }) + } + } + + //nolint:nilnil // A pass don't need to return anything. + return nil, nil +} + +// multiStringValue is a flag that supports multiple values. It's implemented to +// contain a pointer to a string slice that will be overwritten when the flag's +// `Set` method is called. +type multiStringValue struct { + slicePtr *[]string +} + +// Set implements the flag.Value interface and will overwrite the pointer to the +// slice with a new pointer after splitting the flag by comma. +func (m *multiStringValue) Set(value string) error { + s := []string{} + + for _, v := range strings.Split(value, ",") { + s = append(s, strings.TrimSpace(v)) + } + + *m.slicePtr = s + + return nil +} + +// Set implements the flag.Value interface. +func (m *multiStringValue) String() string { + if m.slicePtr == nil { + return "" + } + + return strings.Join(*m.slicePtr, ", ") +} diff --git a/vendor/github.com/bombsimon/wsl/v4/wsl.go b/vendor/github.com/bombsimon/wsl/v4/wsl.go new file mode 100644 index 0000000000..6fd33335a1 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -0,0 +1,1411 @@ +package wsl + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + "sort" + "strings" +) + +// Error reason strings. +const ( + reasonAnonSwitchCuddled = "anonymous switch statements should never be cuddled" + reasonAppendCuddledWithoutUse = "append only allowed to cuddle with appended value" + reasonAssignsCuddleAssign = "assignments should only be cuddled with other assignments" + reasonBlockEndsWithWS = "block should not end with a whitespace (or comment)" + reasonBlockStartsWithWS = "block should not start with a whitespace" + reasonCaseBlockTooCuddly = "case block should end with newline at this size" + reasonDeferCuddledWithOtherVar = "defer statements should only be cuddled with expressions on same variable" + reasonExprCuddlingNonAssignedVar = "only cuddled expressions if assigning variable or using from line above" + reasonExpressionCuddledWithBlock = "expressions should not be cuddled with blocks" + reasonExpressionCuddledWithDeclOrRet = "expressions should not be cuddled with declarations or returns" + reasonForCuddledAssignWithoutUse = "for statements should only be cuddled with assignments used in the iteration" + reasonForWithoutCondition = "for statement without condition should never be cuddled" + reasonGoFuncWithoutAssign = "go statements can only invoke functions assigned on line above" + reasonMultiLineBranchCuddle = "branch statements should not be cuddled if block has more than two lines" + reasonMustCuddleErrCheck = "if statements that check an error must be cuddled with the statement that assigned the error" + reasonNeverCuddleDeclare = "declarations should never be cuddled" + reasonOnlyCuddle2LineReturn = "return statements should not be cuddled if block has more than two lines" + reasonOnlyCuddleIfWithAssign = "if statements should only be cuddled with assignments" + reasonOnlyCuddleWithUsedAssign = "if statements should only be cuddled with assignments used in the if statement itself" + reasonOnlyOneCuddleBeforeDefer = "only one cuddle assignment allowed before defer statement" + reasonOnlyOneCuddleBeforeFor = "only one cuddle assignment allowed before for statement" + reasonOnlyOneCuddleBeforeGo = "only one cuddle assignment allowed before go statement" + reasonOnlyOneCuddleBeforeIf = "only one cuddle assignment allowed before if statement" + reasonOnlyOneCuddleBeforeRange = "only one cuddle assignment allowed before range statement" + reasonOnlyOneCuddleBeforeSwitch = "only one cuddle assignment allowed before switch statement" + reasonOnlyOneCuddleBeforeTypeSwitch = "only one cuddle assignment allowed before type switch statement" + reasonRangeCuddledWithoutUse = "ranges should only be cuddled with assignments used in the iteration" + reasonShortDeclNotExclusive = "short declaration should cuddle only with other short declarations" + reasonSwitchCuddledWithoutUse = "switch statements should only be cuddled with variables switched" + reasonTypeSwitchCuddledWithoutUse = "type switch statements should only be cuddled with variables switched" +) + +// Warning strings. +const ( + warnTypeNotImplement = "type not implemented" + warnStmtNotImplemented = "stmt type not implemented" + warnBodyStmtTypeNotImplemented = "body statement type not implemented " + warnWSNodeTypeNotImplemented = "whitespace node type not implemented " + warnUnknownLHS = "UNKNOWN LHS" + warnUnknownRHS = "UNKNOWN RHS" +) + +// Configuration represents configurable settings for the linter. +type Configuration struct { + // StrictAppend will do strict checking when assigning from append (x = + // append(x, y)). If this is set to true the append call must append either + // a variable assigned, called or used on the line above. Example on not + // allowed when this is true: + // + // x := []string{} + // y := "not going in X" + // x = append(x, "not y") // This is not allowed with StrictAppend + // z := "going in X" + // + // x = append(x, z) // This is allowed with StrictAppend + // + // m := transform(z) + // x = append(x, z) // So is this because Z is used above. + StrictAppend bool + + // AllowAssignAndCallCuddle allows assignments to be cuddled with variables + // used in calls on line above and calls to be cuddled with assignments of + // variables used in call on line above. + // Example supported with this set to true: + // + // x.Call() + // x = Assign() + // x.AnotherCall() + // x = AnotherAssign() + AllowAssignAndCallCuddle bool + + // AllowAssignAndAnythingCuddle allows assignments to be cuddled with anything. + // Example supported with this set to true: + // if x == 1 { + // x = 0 + // } + // z := x + 2 + // fmt.Println("x") + // y := "x" + AllowAssignAndAnythingCuddle bool + + // AllowMultiLineAssignCuddle allows cuddling to assignments even if they + // span over multiple lines. This defaults to true which allows the + // following example: + // + // err := function( + // "multiple", "lines", + // ) + // if err != nil { + // // ... + // } + AllowMultiLineAssignCuddle bool + + // If the number of lines in a case block is equal to or lager than this + // number, the case *must* end white a newline. + ForceCaseTrailingWhitespaceLimit int + + // AllowTrailingComment will allow blocks to end with comments. + AllowTrailingComment bool + + // AllowSeparatedLeadingComment will allow multiple comments in the + // beginning of a block separated with newline. Example: + // func () { + // // Comment one + // + // // Comment two + // fmt.Println("x") + // } + AllowSeparatedLeadingComment bool + + // AllowCuddleDeclaration will allow multiple var/declaration statements to + // be cuddled. This defaults to false but setting it to true will enable the + // following example: + // var foo bool + // var err error + AllowCuddleDeclaration bool + + // AllowCuddleWithCalls is a list of call idents that everything can be + // cuddled with. Defaults to calls looking like locks to support a flow like + // this: + // + // mu.Lock() + // allow := thisAssignment + AllowCuddleWithCalls []string + + // AllowCuddleWithRHS is a list of right hand side variables that is allowed + // to be cuddled with anything. Defaults to assignments or calls looking + // like unlocks to support a flow like this: + // + // allow := thisAssignment() + // mu.Unlock() + AllowCuddleWithRHS []string + + // ForceCuddleErrCheckAndAssign will cause an error when an If statement that + // checks an error variable doesn't cuddle with the assignment of that variable. + // This defaults to false but setting it to true will cause the following + // to generate an error: + // + // err := ProduceError() + // + // if err != nil { + // return err + // } + ForceCuddleErrCheckAndAssign bool + + // When ForceCuddleErrCheckAndAssign is enabled this is a list of names + // used for error variables to check for in the conditional. + // Defaults to just "err" + ErrorVariableNames []string + + // ForceExclusiveShortDeclarations will cause an error if a short declaration + // (:=) cuddles with anything other than another short declaration. For example + // + // a := 2 + // b := 3 + // + // is allowed, but + // + // a := 2 + // b = 3 + // + // is not allowed. This logic overrides ForceCuddleErrCheckAndAssign among others. + ForceExclusiveShortDeclarations bool +} + +// fix is a range to fixup. +type fix struct { + fixRangeStart token.Pos + fixRangeEnd token.Pos +} + +// result represents the result of one error. +type result struct { + fixRanges []fix + reason string +} + +// processor is the type that keeps track of the file and fileset and holds the +// results from parsing the AST. +type processor struct { + config *Configuration + file *ast.File + fileSet *token.FileSet + result map[token.Pos]result + warnings []string +} + +// newProcessorWithConfig will create a Processor with the passed configuration. +func newProcessorWithConfig(file *ast.File, fileSet *token.FileSet, cfg *Configuration) *processor { + return &processor{ + config: cfg, + file: file, + fileSet: fileSet, + result: make(map[token.Pos]result), + } +} + +// parseAST will parse the AST attached to the Processor instance. +func (p *processor) parseAST() { + for _, d := range p.file.Decls { + switch v := d.(type) { + case *ast.FuncDecl: + p.parseBlockBody(v.Name, v.Body) + case *ast.GenDecl: + // `go fmt` will handle proper spacing for GenDecl such as imports, + // constants etc. + default: + p.addWarning(warnTypeNotImplement, d.Pos(), v) + } + } +} + +// parseBlockBody will parse any kind of block statements such as switch cases +// and if statements. A list of Result is returned. +func (p *processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { + // Nothing to do if there's no value. + if reflect.ValueOf(block).IsNil() { + return + } + + // Start by finding leading and trailing whitespaces. + p.findLeadingAndTrailingWhitespaces(ident, block, nil) + + // Parse the block body contents. + p.parseBlockStatements(block.List) +} + +// parseBlockStatements will parse all the statements found in the body of a +// node. A list of Result is returned. +func (p *processor) parseBlockStatements(statements []ast.Stmt) { + for i, stmt := range statements { + // Start by checking if this statement is another block (other than if, + // for and range). This could be assignment to a function, defer or go + // call with an inline function or similar. If this is found we start by + // parsing this body block before moving on. + for _, stmtBlocks := range p.findBlockStmt(stmt) { + p.parseBlockBody(nil, stmtBlocks) + } + + firstBodyStatement := p.firstBodyStatement(i, statements) + + // First statement, nothing to do. + if i == 0 { + continue + } + + previousStatement := statements[i-1] + previousStatementIsMultiline := p.nodeStart(previousStatement) != p.nodeEnd(previousStatement) + cuddledWithLastStmt := p.nodeEnd(previousStatement) == p.nodeStart(stmt)-1 + + // If we're not cuddled and we don't need to enforce err-check cuddling + // then we can bail out here + if !cuddledWithLastStmt && !p.config.ForceCuddleErrCheckAndAssign { + continue + } + + // We don't force error cuddling for multilines. (#86) + if p.config.ForceCuddleErrCheckAndAssign && previousStatementIsMultiline && !cuddledWithLastStmt { + continue + } + + // Extract assigned variables on the line above + // which is the only thing we allow cuddling with. If the assignment is + // made over multiple lines we should not allow cuddling. + var assignedOnLineAbove []string + + // We want to keep track of what was called on the line above to support + // special handling of things such as mutexes. + var calledOnLineAbove []string + + // Check if the previous statement spans over multiple lines. + cuddledWithMultiLineAssignment := cuddledWithLastStmt && p.nodeStart(previousStatement) != p.nodeStart(stmt)-1 + + // Ensure previous line is not a multi line assignment and if not get + // rightAndLeftHandSide assigned variables. + if !cuddledWithMultiLineAssignment { + assignedOnLineAbove = p.findLHS(previousStatement) + calledOnLineAbove = p.findRHS(previousStatement) + } + + // If previous assignment is multi line and we allow it, fetch + // assignments (but only assignments). + if cuddledWithMultiLineAssignment && p.config.AllowMultiLineAssignCuddle { + if _, ok := previousStatement.(*ast.AssignStmt); ok { + assignedOnLineAbove = p.findLHS(previousStatement) + } + } + + // We could potentially have a block which require us to check the first + // argument before ruling out an allowed cuddle. + var calledOrAssignedFirstInBlock []string + + if firstBodyStatement != nil { + calledOrAssignedFirstInBlock = append(p.findLHS(firstBodyStatement), p.findRHS(firstBodyStatement)...) + } + + var ( + leftHandSide = p.findLHS(stmt) + rightHandSide = p.findRHS(stmt) + rightAndLeftHandSide = append(leftHandSide, rightHandSide...) + calledOrAssignedOnLineAbove = append(calledOnLineAbove, assignedOnLineAbove...) + ) + + // If we called some kind of lock on the line above we allow cuddling + // anything. + if atLeastOneInListsMatch(calledOnLineAbove, p.config.AllowCuddleWithCalls) { + continue + } + + // If we call some kind of unlock on this line we allow cuddling with + // anything. + if atLeastOneInListsMatch(rightHandSide, p.config.AllowCuddleWithRHS) { + continue + } + + nStatementsBefore := func(n int) bool { + if i < n { + return false + } + + for j := 1; j < n; j++ { + s1 := statements[i-j] + s2 := statements[i-(j+1)] + + if p.nodeStart(s1)-1 != p.nodeEnd(s2) { + return false + } + } + + return true + } + + nStatementsAfter := func(n int) bool { + if len(statements)-1 < i+n { + return false + } + + for j := 0; j < n; j++ { + s1 := statements[i+j] + s2 := statements[i+j+1] + + if p.nodeEnd(s1)+1 != p.nodeStart(s2) { + return false + } + } + + return true + } + + isLastStatementInBlockOfOnlyTwoLines := func() bool { + // If we're the last statement, check if there's no more than two + // lines from the starting statement and the end of this statement. + // This is to support short return functions such as: + // func (t *Typ) X() { + // t.X = true + // return t + // } + if len(statements) == 2 && i == 1 { + if p.nodeEnd(stmt)-p.nodeStart(previousStatement) <= 2 { + return true + } + } + + return false + } + + // If it's a short declaration we should not cuddle with anything else + // if ForceExclusiveShortDeclarations is set on; either this or the + // previous statement could be the short decl, so we'll find out which + // it was and use *that* statement's position + if p.config.ForceExclusiveShortDeclarations && cuddledWithLastStmt { + if p.isShortDecl(stmt) && !p.isShortDecl(previousStatement) { + var reportNode ast.Node = previousStatement + + cm := ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) + if cg, ok := cm[stmt]; ok && len(cg) > 0 { + for _, c := range cg { + if c.Pos() > previousStatement.End() && c.End() < stmt.Pos() { + reportNode = c + } + } + } + + p.addErrorRange( + stmt.Pos(), + reportNode.End(), + reportNode.End(), + reasonShortDeclNotExclusive, + ) + } else if p.isShortDecl(previousStatement) && !p.isShortDecl(stmt) { + p.addErrorRange( + previousStatement.Pos(), + stmt.Pos(), + stmt.Pos(), + reasonShortDeclNotExclusive, + ) + } + } + + // If it's not an if statement and we're not cuddled move on. The only + // reason we need to keep going for if statements is to check if we + // should be cuddled with an error check. + if _, ok := stmt.(*ast.IfStmt); !ok { + if !cuddledWithLastStmt { + continue + } + } + + reportNewlineTwoLinesAbove := func(n1, n2 ast.Node, reason string) { + if atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) || + atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + // If both the assignment on the line above _and_ the assignment + // two lines above is part of line or first in block, add the + // newline as if non were. + _, isAssignmentTwoLinesAbove := statements[i-2].(*ast.AssignStmt) + assignedTwoLinesAbove := p.findLHS(statements[i-2]) + + if isAssignmentTwoLinesAbove && + (atLeastOneInListsMatch(rightAndLeftHandSide, assignedTwoLinesAbove) || + atLeastOneInListsMatch(assignedTwoLinesAbove, calledOrAssignedFirstInBlock)) { + p.addWhitespaceBeforeError(n1, reason) + } else { + // If the variable on the line above is allowed to be + // cuddled, break two lines above so we keep the proper + // cuddling. + p.addErrorRange(n1.Pos(), n2.Pos(), n2.Pos(), reason) + } + } else { + // If not, break here so we separate the cuddled variable. + p.addWhitespaceBeforeError(n1, reason) + } + } + + switch t := stmt.(type) { + case *ast.IfStmt: + checkingErrInitializedInline := func() bool { + if t.Init == nil { + return false + } + + // Variables were initialized inline in the if statement + // Let's make sure it's the err just to be safe + return atLeastOneInListsMatch(p.findLHS(t.Init), p.config.ErrorVariableNames) + } + + if !cuddledWithLastStmt { + checkingErr := atLeastOneInListsMatch(rightAndLeftHandSide, p.config.ErrorVariableNames) + if checkingErr { + // We only want to enforce cuddling error checks if the + // error was assigned on the line above. See + // https://github.com/bombsimon/wsl/issues/78. + // This is needed since `assignedOnLineAbove` is not + // actually just assignments but everything from LHS in the + // previous statement. This means that if previous line was + // `if err ...`, `err` will now be in the list + // `assignedOnLineAbove`. + if _, ok := previousStatement.(*ast.AssignStmt); !ok { + continue + } + + if checkingErrInitializedInline() { + continue + } + + if atLeastOneInListsMatch(assignedOnLineAbove, p.config.ErrorVariableNames) { + p.addErrorRange( + stmt.Pos(), + previousStatement.End(), + stmt.Pos(), + reasonMustCuddleErrCheck, + ) + } + } + + continue + } + + if len(assignedOnLineAbove) == 0 { + p.addWhitespaceBeforeError(t, reasonOnlyCuddleIfWithAssign) + continue + } + + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeIf) + continue + } + + if atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + continue + } + + if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + continue + } + + p.addWhitespaceBeforeError(t, reasonOnlyCuddleWithUsedAssign) + case *ast.ReturnStmt: + if isLastStatementInBlockOfOnlyTwoLines() { + continue + } + + p.addWhitespaceBeforeError(t, reasonOnlyCuddle2LineReturn) + case *ast.BranchStmt: + if isLastStatementInBlockOfOnlyTwoLines() { + continue + } + + p.addWhitespaceBeforeError(t, reasonMultiLineBranchCuddle) + case *ast.AssignStmt: + // append is usually an assignment but should not be allowed to be + // cuddled with anything not appended. + if len(rightHandSide) > 0 && rightHandSide[len(rightHandSide)-1] == "append" { + if p.config.StrictAppend { + if !atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightHandSide) { + p.addWhitespaceBeforeError(t, reasonAppendCuddledWithoutUse) + } + } + + continue + } + + switch previousStatement.(type) { + case *ast.AssignStmt, *ast.IncDecStmt: + continue + } + + if p.config.AllowAssignAndAnythingCuddle { + continue + } + + if _, ok := previousStatement.(*ast.DeclStmt); ok && p.config.AllowCuddleDeclaration { + continue + } + + // If the assignment is from a type or variable called on the line + // above we can allow it by setting AllowAssignAndCallCuddle to + // true. + // Example (x is used): + // x.function() + // a.Field = x.anotherFunction() + if p.config.AllowAssignAndCallCuddle { + if atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightAndLeftHandSide) { + continue + } + } + + p.addWhitespaceBeforeError(t, reasonAssignsCuddleAssign) + case *ast.IncDecStmt: + switch previousStatement.(type) { + case *ast.AssignStmt, *ast.IncDecStmt: + continue + } + + p.addWhitespaceBeforeError(t, reasonAssignsCuddleAssign) + + case *ast.DeclStmt: + if !p.config.AllowCuddleDeclaration { + p.addWhitespaceBeforeError(t, reasonNeverCuddleDeclare) + } + case *ast.ExprStmt: + switch previousStatement.(type) { + case *ast.DeclStmt, *ast.ReturnStmt: + if p.config.AllowAssignAndCallCuddle && p.config.AllowCuddleDeclaration { + continue + } + + p.addWhitespaceBeforeError(t, reasonExpressionCuddledWithDeclOrRet) + case *ast.IfStmt, *ast.RangeStmt, *ast.SwitchStmt: + p.addWhitespaceBeforeError(t, reasonExpressionCuddledWithBlock) + } + + // If the expression is called on a type or variable used or + // assigned on the line we can allow it by setting + // AllowAssignAndCallCuddle to true. + // Example of allowed cuddled (x is used): + // a.Field = x.func() + // x.function() + if p.config.AllowAssignAndCallCuddle { + if atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightAndLeftHandSide) { + continue + } + } + + // If we assigned variables on the line above but didn't use them in + // this expression there should probably be a newline between them. + if len(assignedOnLineAbove) > 0 && !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + p.addWhitespaceBeforeError(t, reasonExprCuddlingNonAssignedVar) + } + case *ast.RangeStmt: + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeRange) + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + p.addWhitespaceBeforeError(t, reasonRangeCuddledWithoutUse) + } + } + case *ast.DeferStmt: + if _, ok := previousStatement.(*ast.DeferStmt); ok { + // We may cuddle multiple defers to group logic. + continue + } + + if nStatementsBefore(2) { + // We allow cuddling defer if the defer references something + // used two lines above. + // There are several reasons to why we do this. + // Originally there was a special use case only for "Close" + // + // https://github.com/bombsimon/wsl/issues/31 which links to + // resp, err := client.Do(req) + // if err != nil { + // return err + // } + // defer resp.Body.Close() + // + // After a discussion in a followup issue it makes sense to not + // only hard code `Close` but for anything that's referenced two + // statements above. + // + // https://github.com/bombsimon/wsl/issues/85 + // db, err := OpenDB() + // require.NoError(t, err) + // defer db.Close() + // + // All of this is only allowed if there's exactly three cuddled + // statements, otherwise the regular rules apply. + if !nStatementsBefore(3) && !nStatementsAfter(1) { + variablesTwoLinesAbove := append(p.findLHS(statements[i-2]), p.findRHS(statements[i-2])...) + if atLeastOneInListsMatch(rightHandSide, variablesTwoLinesAbove) { + continue + } + } + + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeDefer) + + continue + } + + // Be extra nice with RHS, it's common to use this for locks: + // m.Lock() + // defer m.Unlock() + previousRHS := p.findRHS(previousStatement) + if atLeastOneInListsMatch(rightHandSide, previousRHS) { + continue + } + + // Allow use to cuddled defer func literals with usages on line + // above. Example: + // b := getB() + // defer func() { + // makesSenseToUse(b) + // }() + if c, ok := t.Call.Fun.(*ast.FuncLit); ok { + funcLitFirstStmt := append(p.findLHS(c.Body), p.findRHS(c.Body)...) + + if atLeastOneInListsMatch(assignedOnLineAbove, funcLitFirstStmt) { + continue + } + } + + if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + p.addWhitespaceBeforeError(t, reasonDeferCuddledWithOtherVar) + } + case *ast.ForStmt: + if len(rightAndLeftHandSide) == 0 { + p.addWhitespaceBeforeError(t, reasonForWithoutCondition) + continue + } + + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeFor) + continue + } + + // The same rule applies for ranges as for if statements, see + // comments regarding variable usages on the line before or as the + // first line in the block for details. + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + p.addWhitespaceBeforeError(t, reasonForCuddledAssignWithoutUse) + } + } + case *ast.GoStmt: + if _, ok := previousStatement.(*ast.GoStmt); ok { + continue + } + + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeGo) + continue + } + + if c, ok := t.Call.Fun.(*ast.SelectorExpr); ok { + goCallArgs := append(p.findLHS(c.X), p.findRHS(c.X)...) + + if atLeastOneInListsMatch(calledOnLineAbove, goCallArgs) { + continue + } + } + + if c, ok := t.Call.Fun.(*ast.FuncLit); ok { + goCallArgs := append(p.findLHS(c.Body), p.findRHS(c.Body)...) + + if atLeastOneInListsMatch(assignedOnLineAbove, goCallArgs) { + continue + } + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + p.addWhitespaceBeforeError(t, reasonGoFuncWithoutAssign) + } + case *ast.SwitchStmt: + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeSwitch) + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + if len(rightAndLeftHandSide) == 0 { + p.addWhitespaceBeforeError(t, reasonAnonSwitchCuddled) + } else { + p.addWhitespaceBeforeError(t, reasonSwitchCuddledWithoutUse) + } + } + case *ast.TypeSwitchStmt: + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeTypeSwitch) + continue + } + + // Allowed to type assert on variable assigned on line above. + if !atLeastOneInListsMatch(rightHandSide, assignedOnLineAbove) { + // Allow type assertion on variables used in the first case + // immediately. + if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + p.addWhitespaceBeforeError(t, reasonTypeSwitchCuddledWithoutUse) + } + } + case *ast.CaseClause, *ast.CommClause: + // Case clauses will be checked by not allowing leading of trailing + // whitespaces within the block. There's nothing in the case itself + // that may be cuddled. + default: + p.addWarning(warnStmtNotImplemented, t.Pos(), t) + } + } +} + +// firstBodyStatement returns the first statement inside a body block. This is +// because variables may be cuddled with conditions or statements if it's used +// directly as the first argument inside a body. +// The body will then be parsed as a *ast.BlockStmt (regular block) or as a list +// of []ast.Stmt (case block). +func (p *processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { + stmt := allStmt[i] + + // Start by checking if the statement has a body (probably if-statement, + // a range, switch case or similar. Whenever a body is found we start by + // parsing it before moving on in the AST. + statementBody := reflect.Indirect(reflect.ValueOf(stmt)).FieldByName("Body") + + // Some cases allow cuddling depending on the first statement in a body + // of a block or case. If possible extract the first statement. + var firstBodyStatement ast.Node + + if !statementBody.IsValid() { + return firstBodyStatement + } + + switch statementBodyContent := statementBody.Interface().(type) { + case *ast.BlockStmt: + if len(statementBodyContent.List) > 0 { + firstBodyStatement = statementBodyContent.List[0] + + // If the first body statement is a *ast.CaseClause we're + // actually interested in the **next** body to know what's + // inside the first case. + if x, ok := firstBodyStatement.(*ast.CaseClause); ok { + if len(x.Body) > 0 { + firstBodyStatement = x.Body[0] + } + } + } + + // If statement bodies will be parsed already when finding block bodies. + // The reason is because if/else-if/else chains is nested in the AST + // where the else bit is a part of the if statement. Since if statements + // is the only statement that can be chained like this we exclude it + // from parsing it again here. + if _, ok := stmt.(*ast.IfStmt); !ok { + p.parseBlockBody(nil, statementBodyContent) + } + case []ast.Stmt: + // The Body field for an *ast.CaseClause or *ast.CommClause is of type + // []ast.Stmt. We must check leading and trailing whitespaces and then + // pass the statements to parseBlockStatements to parse it's content. + var nextStatement ast.Node + + // Check if there's more statements (potential cases) after the + // current one. + if len(allStmt)-1 > i { + nextStatement = allStmt[i+1] + } + + p.findLeadingAndTrailingWhitespaces(nil, stmt, nextStatement) + p.parseBlockStatements(statementBodyContent) + default: + p.addWarning( + warnBodyStmtTypeNotImplemented, + stmt.Pos(), statementBodyContent, + ) + } + + return firstBodyStatement +} + +func (p *processor) findLHS(node ast.Node) []string { + var lhs []string + + if node == nil { + return lhs + } + + switch t := node.(type) { + case *ast.BasicLit, *ast.FuncLit, *ast.SelectStmt, + *ast.LabeledStmt, *ast.ForStmt, *ast.SwitchStmt, + *ast.ReturnStmt, *ast.GoStmt, *ast.CaseClause, + *ast.CommClause, *ast.CallExpr, *ast.UnaryExpr, + *ast.BranchStmt, *ast.TypeSpec, *ast.ChanType, + *ast.DeferStmt, *ast.TypeAssertExpr, *ast.RangeStmt: + // Nothing to add to LHS + case *ast.IncDecStmt: + return p.findLHS(t.X) + case *ast.Ident: + return []string{t.Name} + case *ast.AssignStmt: + for _, v := range t.Lhs { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.GenDecl: + for _, v := range t.Specs { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.ValueSpec: + for _, v := range t.Names { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.BlockStmt: + for _, v := range t.List { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.BinaryExpr: + return append( + p.findLHS(t.X), + p.findLHS(t.Y)..., + ) + case *ast.DeclStmt: + return p.findLHS(t.Decl) + case *ast.IfStmt: + return p.findLHS(t.Cond) + case *ast.TypeSwitchStmt: + return p.findLHS(t.Assign) + case *ast.SendStmt: + return p.findLHS(t.Chan) + default: + if x, ok := maybeX(t); ok { + return p.findLHS(x) + } + + p.addWarning(warnUnknownLHS, t.Pos(), t) + } + + return lhs +} + +func (p *processor) findRHS(node ast.Node) []string { + var rhs []string + + if node == nil { + return rhs + } + + switch t := node.(type) { + case *ast.BasicLit, *ast.SelectStmt, *ast.ChanType, + *ast.LabeledStmt, *ast.DeclStmt, *ast.BranchStmt, + *ast.TypeSpec, *ast.ArrayType, *ast.CaseClause, + *ast.CommClause, *ast.MapType, *ast.FuncLit: + // Nothing to add to RHS + case *ast.Ident: + return []string{t.Name} + case *ast.SelectorExpr: + // TODO: Should this be RHS? + // t.X is needed for defer as of now and t.Sel needed for special + // functions such as Lock() + rhs = p.findRHS(t.X) + rhs = append(rhs, p.findRHS(t.Sel)...) + case *ast.AssignStmt: + for _, v := range t.Rhs { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.CallExpr: + for _, v := range t.Args { + rhs = append(rhs, p.findRHS(v)...) + } + + rhs = append(rhs, p.findRHS(t.Fun)...) + case *ast.CompositeLit: + for _, v := range t.Elts { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.IfStmt: + rhs = append(rhs, p.findRHS(t.Cond)...) + rhs = append(rhs, p.findRHS(t.Init)...) + case *ast.BinaryExpr: + return append( + p.findRHS(t.X), + p.findRHS(t.Y)..., + ) + case *ast.TypeSwitchStmt: + return p.findRHS(t.Assign) + case *ast.ReturnStmt: + for _, v := range t.Results { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.BlockStmt: + for _, v := range t.List { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.SwitchStmt: + return p.findRHS(t.Tag) + case *ast.GoStmt: + return p.findRHS(t.Call) + case *ast.ForStmt: + return p.findRHS(t.Cond) + case *ast.DeferStmt: + return p.findRHS(t.Call) + case *ast.SendStmt: + return p.findLHS(t.Value) + case *ast.IndexExpr: + rhs = append(rhs, p.findRHS(t.Index)...) + rhs = append(rhs, p.findRHS(t.X)...) + case *ast.SliceExpr: + rhs = append(rhs, p.findRHS(t.X)...) + rhs = append(rhs, p.findRHS(t.Low)...) + rhs = append(rhs, p.findRHS(t.High)...) + case *ast.KeyValueExpr: + rhs = p.findRHS(t.Key) + rhs = append(rhs, p.findRHS(t.Value)...) + default: + if x, ok := maybeX(t); ok { + return p.findRHS(x) + } + + p.addWarning(warnUnknownRHS, t.Pos(), t) + } + + return rhs +} + +func (p *processor) isShortDecl(node ast.Node) bool { + if t, ok := node.(*ast.AssignStmt); ok { + return t.Tok == token.DEFINE + } + + return false +} + +func (p *processor) findBlockStmt(node ast.Node) []*ast.BlockStmt { + var blocks []*ast.BlockStmt + + switch t := node.(type) { + case *ast.BlockStmt: + return []*ast.BlockStmt{t} + case *ast.AssignStmt: + for _, x := range t.Rhs { + blocks = append(blocks, p.findBlockStmt(x)...) + } + case *ast.CallExpr: + blocks = append(blocks, p.findBlockStmt(t.Fun)...) + + for _, x := range t.Args { + blocks = append(blocks, p.findBlockStmt(x)...) + } + case *ast.FuncLit: + blocks = append(blocks, t.Body) + case *ast.ExprStmt: + blocks = append(blocks, p.findBlockStmt(t.X)...) + case *ast.ReturnStmt: + for _, x := range t.Results { + blocks = append(blocks, p.findBlockStmt(x)...) + } + case *ast.DeferStmt: + blocks = append(blocks, p.findBlockStmt(t.Call)...) + case *ast.GoStmt: + blocks = append(blocks, p.findBlockStmt(t.Call)...) + case *ast.IfStmt: + blocks = append([]*ast.BlockStmt{t.Body}, p.findBlockStmt(t.Else)...) + } + + return blocks +} + +// maybeX extracts the X field from an AST node and returns it with a true value +// if it exists. If the node doesn't have an X field nil and false is returned. +// Known fields with X that are handled: +// IndexExpr, ExprStmt, SelectorExpr, StarExpr, ParentExpr, TypeAssertExpr, +// RangeStmt, UnaryExpr, ParenExpr, SliceExpr, IncDecStmt. +func maybeX(node interface{}) (ast.Node, bool) { + maybeHasX := reflect.Indirect(reflect.ValueOf(node)).FieldByName("X") + if !maybeHasX.IsValid() { + return nil, false + } + + n, ok := maybeHasX.Interface().(ast.Node) + if !ok { + return nil, false + } + + return n, true +} + +func atLeastOneInListsMatch(listOne, listTwo []string) bool { + sliceToMap := func(s []string) map[string]struct{} { + m := map[string]struct{}{} + + for _, v := range s { + m[v] = struct{}{} + } + + return m + } + + m1 := sliceToMap(listOne) + m2 := sliceToMap(listTwo) + + for k1 := range m1 { + if _, ok := m2[k1]; ok { + return true + } + } + + for k2 := range m2 { + if _, ok := m1[k2]; ok { + return true + } + } + + return false +} + +// findLeadingAndTrailingWhitespaces will find leading and trailing whitespaces +// in a node. The method takes comments in consideration which will make the +// parser more gentle. +func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { + var ( + commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) + blockStatements []ast.Stmt + blockStartLine int + blockEndLine int + blockStartPos token.Pos + blockEndPos token.Pos + isCase bool + ) + + // Depending on the block type, get the statements in the block and where + // the block starts (and ends). + switch t := stmt.(type) { + case *ast.BlockStmt: + blockStatements = t.List + blockStartPos = t.Lbrace + blockEndPos = t.Rbrace + case *ast.CaseClause: + blockStatements = t.Body + blockStartPos = t.Colon + isCase = true + case *ast.CommClause: + blockStatements = t.Body + blockStartPos = t.Colon + isCase = true + default: + p.addWarning(warnWSNodeTypeNotImplemented, stmt.Pos(), stmt) + + return + } + + // Ignore empty blocks even if they have newlines or just comments. + if len(blockStatements) < 1 { + return + } + + blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line + blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line + + // No whitespace possible if LBrace and RBrace is on the same line. + if blockStartLine == blockEndLine { + return + } + + var ( + firstStatement = blockStatements[0] + lastStatement = blockStatements[len(blockStatements)-1] + ) + + // Get the comment related to the first statement, we do allow comments in + // the beginning of a block before the first statement. + var ( + openingNodePos = blockStartPos + 1 + lastLeadingComment ast.Node + ) + + var ( + firstStatementCommentGroups []*ast.CommentGroup + lastStatementCommentGroups []*ast.CommentGroup + ) + + if cg, ok := commentMap[firstStatement]; ok && !isCase { + firstStatementCommentGroups = cg + } else { + // TODO: Just like with trailing whitespaces comments in a case block is + // tied to the last token of the first statement. For now we iterate over + // all comments in the stmt and grab those that's after colon and before + // first statement. + for _, cg := range commentMap { + if len(cg) < 1 { + continue + } + + // If we have comments and the last comment ends before the first + // statement and the node is after the colon, this must be the node + // mapped to comments. + for _, c := range cg { + if c.End() < firstStatement.Pos() && c.Pos() > blockStartPos { + firstStatementCommentGroups = append(firstStatementCommentGroups, c) + } + } + + // And same if we have comments where the first comment is after the + // last statement but before the next statement (next case). As with + // the other things, if there is not next statement it's no next + // case and the logic will be handled when parsing the block. + if nextStatement == nil { + continue + } + + for _, c := range cg { + if c.Pos() > lastStatement.End() && c.End() < nextStatement.Pos() { + lastStatementCommentGroups = append(lastStatementCommentGroups, c) + } + } + } + + // Since the comments come from a map they might not be ordered meaning + // that the last and first comment groups can be in the wrong order. We + // fix this by sorting all comments by pos after adding them all to the + // slice. + sort.Slice(firstStatementCommentGroups, func(i, j int) bool { + return firstStatementCommentGroups[i].Pos() < firstStatementCommentGroups[j].Pos() + }) + + sort.Slice(lastStatementCommentGroups, func(i, j int) bool { + return lastStatementCommentGroups[i].Pos() < lastStatementCommentGroups[j].Pos() + }) + } + + for _, commentGroup := range firstStatementCommentGroups { + // If the comment group is on the same line as the block start + // (LBrace) we should not consider it. + if p.nodeEnd(commentGroup) == blockStartLine { + openingNodePos = commentGroup.End() + continue + } + + // We only care about comments before our statement from the comment + // map. As soon as we hit comments after our statement let's break + // out! + if commentGroup.Pos() > firstStatement.Pos() { + break + } + + // We never allow leading whitespace for the first comment. + if lastLeadingComment == nil && p.nodeStart(commentGroup)-1 != blockStartLine { + p.addErrorRange( + openingNodePos, + openingNodePos, + commentGroup.Pos(), + reasonBlockStartsWithWS, + ) + } + + // If lastLeadingComment is set this is not the first comment so we + // should remove whitespace between them if we don't explicitly + // allow it. + if lastLeadingComment != nil && !p.config.AllowSeparatedLeadingComment { + if p.nodeStart(commentGroup)+1 != p.nodeEnd(lastLeadingComment) { + p.addErrorRange( + openingNodePos, + lastLeadingComment.End(), + commentGroup.Pos(), + reasonBlockStartsWithWS, + ) + } + } + + lastLeadingComment = commentGroup + } + + lastNodePos := openingNodePos + if lastLeadingComment != nil { + lastNodePos = lastLeadingComment.End() + blockStartLine = p.nodeEnd(lastLeadingComment) + } + + // Check if we have a whitespace between the last node which can be the + // Lbrace, a comment on the same line or the last comment if we have + // comments inside the actual block and the first statement. This is never + // allowed. + if p.nodeStart(firstStatement)-1 != blockStartLine { + p.addErrorRange( + openingNodePos, + lastNodePos, + firstStatement.Pos(), + reasonBlockStartsWithWS, + ) + } + + // If the blockEndLine is not 0 we're a regular block (not case). + if blockEndLine != 0 { + // We don't want to reject example functions since they have to end with + // a comment. + if isExampleFunc(ident) { + return + } + + var ( + lastNode ast.Node = lastStatement + trailingComments []ast.Node + ) + + // Check if we have an comments _after_ the last statement and update + // the last node if so. + if c, ok := commentMap[lastStatement]; ok { + lastComment := c[len(c)-1] + if lastComment.Pos() > lastStatement.End() && lastComment.Pos() < stmt.End() { + lastNode = lastComment + } + } + + // TODO: This should be improved. + // The trailing comments are mapped to the last statement item which can + // be anything depending on what the last statement is. + // In `fmt.Println("hello")`, trailing comments will be mapped to + // `*ast.BasicLit` for the "hello" string. + // A short term improvement can be to cache this but for now we naively + // iterate over all items when we check a block. + for _, commentGroups := range commentMap { + for _, commentGroup := range commentGroups { + if commentGroup.Pos() < lastNode.End() || commentGroup.End() > stmt.End() { + continue + } + + trailingComments = append(trailingComments, commentGroup) + } + } + + // TODO: Should this be relaxed? + // Given the old code we only allowed trailing newline if it was + // directly tied to the last statement so for backwards compatibility + // we'll do the same. This means we fail all but the last whitespace + // even when allowing trailing comments. + for _, comment := range trailingComments { + if p.nodeStart(comment)-p.nodeEnd(lastNode) > 1 { + p.addErrorRange( + blockEndPos, + lastNode.End(), + comment.Pos(), + reasonBlockEndsWithWS, + ) + } + + lastNode = comment + } + + if !p.config.AllowTrailingComment && p.nodeEnd(stmt)-1 != p.nodeEnd(lastStatement) { + p.addErrorRange( + blockEndPos, + lastNode.End(), + stmt.End()-1, + reasonBlockEndsWithWS, + ) + } + + return + } + + // Nothing to do if we're not looking for enforced newline. + if p.config.ForceCaseTrailingWhitespaceLimit == 0 { + return + } + + // If we don't have any nextStatement the trailing whitespace will be + // handled when parsing the switch. If we do have a next statement we can + // see where it starts by getting it's colon position. We set the end of the + // current case to the position of the next case. + switch nextStatement.(type) { + case *ast.CaseClause, *ast.CommClause: + default: + // No more cases + return + } + + var closingNode ast.Node = lastStatement + for _, commentGroup := range lastStatementCommentGroups { + // TODO: In future versions we might want to close the gaps between + // comments. However this is not currently reported in v3 so we + // won't add this for now. + // if p.nodeStart(commentGroup)-1 != p.nodeEnd(closingNode) {} + closingNode = commentGroup + } + + totalRowsInCase := p.nodeEnd(closingNode) - blockStartLine + if totalRowsInCase < p.config.ForceCaseTrailingWhitespaceLimit { + return + } + + if p.nodeEnd(closingNode)+1 == p.nodeStart(nextStatement) { + p.addErrorRange( + closingNode.Pos(), + closingNode.End(), + closingNode.End(), + reasonCaseBlockTooCuddly, + ) + } +} + +func isExampleFunc(ident *ast.Ident) bool { + return ident != nil && strings.HasPrefix(ident.Name, "Example") +} + +func (p *processor) nodeStart(node ast.Node) int { + return p.fileSet.PositionFor(node.Pos(), false).Line +} + +func (p *processor) nodeEnd(node ast.Node) int { + line := p.fileSet.PositionFor(node.End(), false).Line + + if isEmptyLabeledStmt(node) { + return p.fileSet.PositionFor(node.Pos(), false).Line + } + + return line +} + +func isEmptyLabeledStmt(node ast.Node) bool { + v, ok := node.(*ast.LabeledStmt) + if !ok { + return false + } + + _, empty := v.Stmt.(*ast.EmptyStmt) + + return empty +} + +func (p *processor) addWhitespaceBeforeError(node ast.Node, reason string) { + p.addErrorRange(node.Pos(), node.Pos(), node.Pos(), reason) +} + +func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string) { + report, ok := p.result[reportAt] + if !ok { + report = result{ + reason: reason, + fixRanges: []fix{}, + } + } + + report.fixRanges = append(report.fixRanges, fix{ + fixRangeStart: start, + fixRangeEnd: end, + }) + + p.result[reportAt] = report +} + +func (p *processor) addWarning(w string, pos token.Pos, t interface{}) { + position := p.fileSet.PositionFor(pos, false) + + p.warnings = append(p.warnings, + fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), + ) +} diff --git a/vendor/github.com/breml/bidichk/LICENSE b/vendor/github.com/breml/bidichk/LICENSE new file mode 100644 index 0000000000..47a8419ce9 --- /dev/null +++ b/vendor/github.com/breml/bidichk/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Lucas Bremgartner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/breml/bidichk/pkg/bidichk/bidichk.go b/vendor/github.com/breml/bidichk/pkg/bidichk/bidichk.go new file mode 100644 index 0000000000..f1bf20faba --- /dev/null +++ b/vendor/github.com/breml/bidichk/pkg/bidichk/bidichk.go @@ -0,0 +1,180 @@ +package bidichk + +import ( + "bytes" + "flag" + "fmt" + "go/token" + "os" + "sort" + "strings" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" +) + +const ( + doc = "bidichk detects dangerous unicode character sequences" + disallowedDoc = `comma separated list of disallowed runes (full name or short name) + +Supported runes + +LEFT-TO-RIGHT-EMBEDDING, LRE (u+202A) +RIGHT-TO-LEFT-EMBEDDING, RLE (u+202B) +POP-DIRECTIONAL-FORMATTING, PDF (u+202C) +LEFT-TO-RIGHT-OVERRIDE, LRO (u+202D) +RIGHT-TO-LEFT-OVERRIDE, RLO (u+202E) +LEFT-TO-RIGHT-ISOLATE, LRI (u+2066) +RIGHT-TO-LEFT-ISOLATE, RLI (u+2067) +FIRST-STRONG-ISOLATE, FSI (u+2068) +POP-DIRECTIONAL-ISOLATE, PDI (u+2069) +` +) + +type disallowedRunes map[string]rune + +func (m disallowedRunes) String() string { + ss := make([]string, 0, len(m)) + for s := range m { + ss = append(ss, s) + } + sort.Strings(ss) + return strings.Join(ss, ",") +} + +func (m disallowedRunes) Set(s string) error { + ss := strings.FieldsFunc(s, func(c rune) bool { return c == ',' }) + if len(ss) == 0 { + return nil + } + + for k := range m { + delete(m, k) + } + + for _, v := range ss { + switch v { + case runeShortNameLRE, runeShortNameRLE, runeShortNamePDF, + runeShortNameLRO, runeShortNameRLO, runeShortNameLRI, + runeShortNameRLI, runeShortNameFSI, runeShortNamePDI: + v = shortNameLookup[v] + fallthrough + case runeNameLRE, runeNameRLE, runeNamePDF, + runeNameLRO, runeNameRLO, runeNameLRI, + runeNameRLI, runeNameFSI, runeNamePDI: + m[v] = runeLookup[v] + default: + return fmt.Errorf("unknown check name %q (see help for full list)", v) + } + } + return nil +} + +const ( + runeNameLRE = "LEFT-TO-RIGHT-EMBEDDING" + runeNameRLE = "RIGHT-TO-LEFT-EMBEDDING" + runeNamePDF = "POP-DIRECTIONAL-FORMATTING" + runeNameLRO = "LEFT-TO-RIGHT-OVERRIDE" + runeNameRLO = "RIGHT-TO-LEFT-OVERRIDE" + runeNameLRI = "LEFT-TO-RIGHT-ISOLATE" + runeNameRLI = "RIGHT-TO-LEFT-ISOLATE" + runeNameFSI = "FIRST-STRONG-ISOLATE" + runeNamePDI = "POP-DIRECTIONAL-ISOLATE" + + runeShortNameLRE = "LRE" // LEFT-TO-RIGHT-EMBEDDING + runeShortNameRLE = "RLE" // RIGHT-TO-LEFT-EMBEDDING + runeShortNamePDF = "PDF" // POP-DIRECTIONAL-FORMATTING + runeShortNameLRO = "LRO" // LEFT-TO-RIGHT-OVERRIDE + runeShortNameRLO = "RLO" // RIGHT-TO-LEFT-OVERRIDE + runeShortNameLRI = "LRI" // LEFT-TO-RIGHT-ISOLATE + runeShortNameRLI = "RLI" // RIGHT-TO-LEFT-ISOLATE + runeShortNameFSI = "FSI" // FIRST-STRONG-ISOLATE + runeShortNamePDI = "PDI" // POP-DIRECTIONAL-ISOLATE +) + +var runeLookup = map[string]rune{ + runeNameLRE: '\u202A', // LEFT-TO-RIGHT-EMBEDDING + runeNameRLE: '\u202B', // RIGHT-TO-LEFT-EMBEDDING + runeNamePDF: '\u202C', // POP-DIRECTIONAL-FORMATTING + runeNameLRO: '\u202D', // LEFT-TO-RIGHT-OVERRIDE + runeNameRLO: '\u202E', // RIGHT-TO-LEFT-OVERRIDE + runeNameLRI: '\u2066', // LEFT-TO-RIGHT-ISOLATE + runeNameRLI: '\u2067', // RIGHT-TO-LEFT-ISOLATE + runeNameFSI: '\u2068', // FIRST-STRONG-ISOLATE + runeNamePDI: '\u2069', // POP-DIRECTIONAL-ISOLATE +} + +var shortNameLookup = map[string]string{ + runeShortNameLRE: runeNameLRE, + runeShortNameRLE: runeNameRLE, + runeShortNamePDF: runeNamePDF, + runeShortNameLRO: runeNameLRO, + runeShortNameRLO: runeNameRLO, + runeShortNameLRI: runeNameLRI, + runeShortNameRLI: runeNameRLI, + runeShortNameFSI: runeNameFSI, + runeShortNamePDI: runeNamePDI, +} + +type bidichk struct { + disallowedRunes disallowedRunes +} + +// NewAnalyzer return a new bidichk analyzer. +func NewAnalyzer() *analysis.Analyzer { + bidichk := bidichk{} + bidichk.disallowedRunes = make(map[string]rune, len(runeLookup)) + for k, v := range runeLookup { + bidichk.disallowedRunes[k] = v + } + + a := &analysis.Analyzer{ + Name: "bidichk", + Doc: doc, + Run: bidichk.run, + } + + a.Flags.Init("bidichk", flag.ExitOnError) + a.Flags.Var(&bidichk.disallowedRunes, "disallowed-runes", disallowedDoc) + a.Flags.Var(versionFlag{}, "V", "print version and exit") + + return a +} + +func (b bidichk) run(pass *analysis.Pass) (interface{}, error) { + var err error + + pass.Fset.Iterate(func(f *token.File) bool { + if strings.HasPrefix(f.Name(), "$GOROOT") { + return true + } + + return b.check(f.Name(), f.Pos(0), pass) == nil + }) + + return nil, err +} + +func (b bidichk) check(filename string, pos token.Pos, pass *analysis.Pass) error { + body, err := os.ReadFile(filename) + if err != nil { + return err + } + + for name, r := range b.disallowedRunes { + start := 0 + for { + idx := bytes.IndexRune(body[start:], r) + if idx == -1 { + break + } + start += idx + + pass.Reportf(pos+token.Pos(start), "found dangerous unicode character sequence %s", name) + + start += utf8.RuneLen(r) + } + } + + return nil +} diff --git a/vendor/github.com/breml/bidichk/pkg/bidichk/version.go b/vendor/github.com/breml/bidichk/pkg/bidichk/version.go new file mode 100644 index 0000000000..4cfc57dd1e --- /dev/null +++ b/vendor/github.com/breml/bidichk/pkg/bidichk/version.go @@ -0,0 +1,19 @@ +package bidichk + +import ( + "fmt" + "os" +) + +var Version = "bidichk version dev" + +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + fmt.Println(Version) + os.Exit(0) + return nil +} diff --git a/vendor/github.com/breml/errchkjson/.gitignore b/vendor/github.com/breml/errchkjson/.gitignore new file mode 100644 index 0000000000..0362de3016 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/.gitignore @@ -0,0 +1,29 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +/errchkjson +/cmd/errchkjson/errchkjson + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +coverage.html + +# Log files +*.log + +# Env files +.env + +# Exclude todo +TODO.md + +# Exclude IDE settings +.idea/ +*.iml +.vscode/ diff --git a/vendor/github.com/breml/errchkjson/.goreleaser.yml b/vendor/github.com/breml/errchkjson/.goreleaser.yml new file mode 100644 index 0000000000..a05c172cb6 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/.goreleaser.yml @@ -0,0 +1,34 @@ +# This is an example .goreleaser.yml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + # You may remove this if you don't use go modules. + - go mod tidy +builds: + - main: ./cmd/errchkjson + binary: errchkjson + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +archives: + - name_template: >- + {{- .Binary }}_ + {{- .Version }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end -}} +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + skip: true +release: + github: + owner: breml + name: errchkjson +gomod: + proxy: true diff --git a/vendor/github.com/breml/errchkjson/LICENSE b/vendor/github.com/breml/errchkjson/LICENSE new file mode 100644 index 0000000000..08db5cb6fc --- /dev/null +++ b/vendor/github.com/breml/errchkjson/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Lucas Bremgartner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/breml/errchkjson/README.md b/vendor/github.com/breml/errchkjson/README.md new file mode 100644 index 0000000000..1979597387 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/README.md @@ -0,0 +1,131 @@ +# errchkjson + +[![Test Status](https://github.com/breml/errchkjson/actions/workflows/ci.yml/badge.svg)](https://github.com/breml/errchkjson/actions/workflows/ci.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/breml/errchkjson)](https://goreportcard.com/report/github.com/breml/errchkjson) [![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) + +Checks types passed to the json encoding functions. Reports unsupported types and reports occurrences where the check for the returned error can be omitted. + +Consider this [http.Handler](https://pkg.go.dev/net/http#Handler): + +```Go +func JSONHelloWorld(w http.ResponseWriter, r *http.Request) { + response := struct { + Message string + Code int + }{ + Message: "Hello World", + Code: 200, + } + + body, err := json.Marshal(response) + if err != nil { + panic(err) // unreachable, because json encoding of a struct with just a string and an int will never return an error. + } + + w.Write(body) +} +``` + +Because the `panic` is not possible to happen, one might refactor the code like this: + +```Go +func JSONHelloWorld(w http.ResponseWriter, r *http.Request) { + response := struct { + Message string + Code int + }{ + Message: "Hello World", + Code: 200, + } + + body, _ := json.Marshal(response) + + w.Write(body) +} +``` + +This is ok, as long as the struct is not altered in such a way, that could potentially lead +to `json.Marshal` returning an error. + +`errchkjson` allows you to lint your code such that the above error returned from `json.Marshal` +can be omitted while still staying safe, because as soon as an unsafe type is added to the +response type, the linter will warn you. + +## Installation + +Download `errchkjson` from the [releases](https://github.com/breml/errchkjson/releases) or get the latest version from source with: + +```shell +go get github.com/breml/errchkjson/cmd/errchkjson +``` + +## Usage + +### Shell + +Check everything: + +```shell +errchkjson ./... +``` + +`errchkjson` also recognizes the following command-line options: + +The `-omit-safe` flag disables checking for safe returns of errors from json.Marshal + +## Types + +### Safe + +The following types are safe to use with [json encoding functions](https://pkg.go.dev/encoding/json), that is, the encoding to JSON can not fail: + +Safe basic types: + +* `bool` +* `int`, `int8`, `int16`, `int32`, `int64`, `uint`, `uint8`, `uint16`, `uint32`, `uint64`, `uintptr` +* `string` +* Pointer type of the above listed basic types + +Composed types (struct, map, slice, array) are safe, if the type of the value is +safe. For structs, only exported fields are relevant. For maps, the key needs to be either an integer type or a string. + +### Unsafe + +The following types are unsafe to use with [json encoding functions](https://pkg.go.dev/encoding/json), that is, the encoding to JSON can fail (return an error): + +Unsafe basic types: + +* `float32`, `float64` +* `interface{}` +* Pointer type of the above listed basic types + +Any composed types (struct, map, slice, array) containing an unsafe basic type. + +If a type implements the `json.Marshaler` or `encoding.TextMarshaler` interface (e.g. `json.Number`). + +### Forbidden + +Forbidden basic types: + +* `complex64`, `complex128` +* `chan` +* `func` +* `unsafe.Pointer` + +Any composed types (struct, map, slice, array) containing a forbidden basic type. Any map +using a key with a forbidden type (`bool`, `float32`, `float64`, `struct`). + +## Accepted edge case + +For `encoding/json.MarshalIndent`, there is a (pathological) edge case, where this +function could [return an error](https://cs.opensource.google/go/go/+/refs/tags/go1.18:src/encoding/json/scanner.go;drc=refs%2Ftags%2Fgo1.18;l=181) for an otherwise safe argument, if the argument has +a nesting depth larger than [`10000`](https://cs.opensource.google/go/go/+/refs/tags/go1.18:src/encoding/json/scanner.go;drc=refs%2Ftags%2Fgo1.18;l=144) (as of Go 1.18). + +## Bugs found during development + +During the development of `errcheckjson`, the following issues in package `encoding/json` of the Go standard library have been found and PR have been merged: + +* [Issue #34154: encoding/json: string option (struct tag) on string field with SetEscapeHTML(false) escapes anyway](https://github.com/golang/go/issues/34154) +* [PR #34127: encoding/json: fix and optimize marshal for quoted string](https://github.com/golang/go/pull/34127) +* [Issue #34268: encoding/json: wrong encoding for json.Number field with string option (struct tag)](https://github.com/golang/go/issues/34268) +* [PR #34269: encoding/json: make Number with the ,string option marshal with quotes](https://github.com/golang/go/pull/34269) +* [PR #34272: encoding/json: validate strings when decoding into Number](https://github.com/golang/go/pull/34272) diff --git a/vendor/github.com/breml/errchkjson/errchkjson.go b/vendor/github.com/breml/errchkjson/errchkjson.go new file mode 100644 index 0000000000..4a23929cf2 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/errchkjson.go @@ -0,0 +1,348 @@ +// Package errchkjson defines an Analyzer that finds places, where it is +// safe to omit checking the error returned from json.Marshal. +package errchkjson + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/typeutil" +) + +type errchkjson struct { + omitSafe bool // -omit-safe flag + reportNoExported bool // -report-no-exported flag +} + +// NewAnalyzer returns a new errchkjson analyzer. +func NewAnalyzer() *analysis.Analyzer { + errchkjson := &errchkjson{} + + a := &analysis.Analyzer{ + Name: "errchkjson", + Doc: "Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted.", + Run: errchkjson.run, + } + + a.Flags.Init("errchkjson", flag.ExitOnError) + a.Flags.BoolVar(&errchkjson.omitSafe, "omit-safe", false, "if omit-safe is true, checking of safe returns is omitted") + a.Flags.BoolVar(&errchkjson.reportNoExported, "report-no-exported", false, "if report-no-exported is true, encoding a struct without exported fields is reported as issue") + a.Flags.Var(versionFlag{}, "V", "print version and exit") + + return a +} + +func (e *errchkjson) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + ast.Inspect(file, func(n ast.Node) bool { + if n == nil { + return true + } + + // if the error is returned, it is the caller's responsibility to check + // the return value. + if _, ok := n.(*ast.ReturnStmt); ok { + return false + } + + ce, ok := n.(*ast.CallExpr) + if ok { + fn, _ := typeutil.Callee(pass.TypesInfo, ce).(*types.Func) + if fn == nil { + return true + } + + switch fn.FullName() { + case "encoding/json.Marshal", "encoding/json.MarshalIndent": + e.handleJSONMarshal(pass, ce, fn.FullName(), blankIdentifier, e.omitSafe) + case "(*encoding/json.Encoder).Encode": + e.handleJSONMarshal(pass, ce, fn.FullName(), blankIdentifier, true) + default: + e.inspectArgs(pass, ce.Args) + } + return false + } + + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + + ce, ok = as.Rhs[0].(*ast.CallExpr) + if !ok { + return true + } + + fn, _ := typeutil.Callee(pass.TypesInfo, ce).(*types.Func) + if fn == nil { + return true + } + + switch fn.FullName() { + case "encoding/json.Marshal", "encoding/json.MarshalIndent": + e.handleJSONMarshal(pass, ce, fn.FullName(), evaluateMarshalErrorTarget(as.Lhs[1]), e.omitSafe) + case "(*encoding/json.Encoder).Encode": + e.handleJSONMarshal(pass, ce, fn.FullName(), evaluateMarshalErrorTarget(as.Lhs[0]), true) + default: + return true + } + return false + }) + } + + return nil, nil +} + +func evaluateMarshalErrorTarget(n ast.Expr) marshalErrorTarget { + if errIdent, ok := n.(*ast.Ident); ok { + if errIdent.Name == "_" { + return blankIdentifier + } + } + return variableAssignment +} + +type marshalErrorTarget int + +const ( + blankIdentifier = iota // the returned error from the JSON marshal function is assigned to the blank identifier "_". + variableAssignment // the returned error from the JSON marshal function is assigned to a variable. + functionArgument // the returned error from the JSON marshal function is passed to an other function as argument. +) + +func (e *errchkjson) handleJSONMarshal(pass *analysis.Pass, ce *ast.CallExpr, fnName string, errorTarget marshalErrorTarget, omitSafe bool) { + t := pass.TypesInfo.TypeOf(ce.Args[0]) + if t == nil { + // Not sure, if this is at all possible + if errorTarget == blankIdentifier { + pass.Reportf(ce.Pos(), "Type of argument to `%s` could not be evaluated and error return value is not checked", fnName) + } + return + } + + if _, ok := t.(*types.Pointer); ok { + t = t.(*types.Pointer).Elem() + } + + err := e.jsonSafe(t, 0, map[types.Type]struct{}{}) + if err != nil { + if _, ok := err.(unsupported); ok { + pass.Reportf(ce.Pos(), "`%s` for %v", fnName, err) + return + } + if _, ok := err.(noexported); ok { + pass.Reportf(ce.Pos(), "Error argument passed to `%s` does not contain any exported field", fnName) + } + // Only care about unsafe types if they are assigned to the blank identifier. + if errorTarget == blankIdentifier { + pass.Reportf(ce.Pos(), "Error return value of `%s` is not checked: %v", fnName, err) + } + } + if err == nil && errorTarget == variableAssignment && !omitSafe { + pass.Reportf(ce.Pos(), "Error return value of `%s` is checked but passed argument is safe", fnName) + } + // Report an error, if err for json.Marshal is not checked and safe types are omitted + if err == nil && errorTarget == blankIdentifier && omitSafe { + pass.Reportf(ce.Pos(), "Error return value of `%s` is not checked", fnName) + } +} + +const ( + allowedBasicTypes = types.IsBoolean | types.IsInteger | types.IsString + allowedMapKeyBasicTypes = types.IsInteger | types.IsString + unsupportedBasicTypes = types.IsComplex +) + +func (e *errchkjson) jsonSafe(t types.Type, level int, seenTypes map[types.Type]struct{}) error { + if _, ok := seenTypes[t]; ok { + return nil + } + + if types.Implements(t, textMarshalerInterface()) || types.Implements(t, jsonMarshalerInterface()) { + return fmt.Errorf("unsafe type `%s` found", t.String()) + } + + switch ut := t.Underlying().(type) { + case *types.Basic: + if ut.Info()&allowedBasicTypes > 0 { // bool, int-family, string + if ut.Info()&types.IsString > 0 && t.String() == "encoding/json.Number" { + return fmt.Errorf("unsafe type `%s` found", t.String()) + } + return nil + } + if ut.Info()&unsupportedBasicTypes > 0 { // complex64, complex128 + return newUnsupportedError(fmt.Errorf("unsupported type `%s` found", ut.String())) + } + switch ut.Kind() { + case types.UntypedNil: + return nil + case types.UnsafePointer: + return newUnsupportedError(fmt.Errorf("unsupported type `%s` found", ut.String())) + default: + // E.g. float32, float64 + return fmt.Errorf("unsafe type `%s` found", ut.String()) + } + + case *types.Array: + err := e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Slice: + err := e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Struct: + seenTypes[t] = struct{}{} + exported := 0 + for i := 0; i < ut.NumFields(); i++ { + if !ut.Field(i).Exported() { + // Unexported fields can be ignored + continue + } + if tag, ok := reflect.StructTag(ut.Tag(i)).Lookup("json"); ok { + if tag == "-" { + // Fields omitted in json can be ignored + continue + } + } + err := e.jsonSafe(ut.Field(i).Type(), level+1, seenTypes) + if err != nil { + return err + } + exported++ + } + if e.reportNoExported && level == 0 && exported == 0 { + return newNoexportedError(fmt.Errorf("struct does not export any field")) + } + return nil + + case *types.Pointer: + err := e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Map: + err := jsonSafeMapKey(ut.Key()) + if err != nil { + return err + } + err = e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Chan, *types.Signature: + // Types that are not supported for encoding to json: + return newUnsupportedError(fmt.Errorf("unsupported type `%s` found", ut.String())) + + default: + // Types that are not supported for encoding to json or are not completely safe, like: interfaces + return fmt.Errorf("unsafe type `%s` found", t.String()) + } +} + +func jsonSafeMapKey(t types.Type) error { + if types.Implements(t, textMarshalerInterface()) || types.Implements(t, jsonMarshalerInterface()) { + return fmt.Errorf("unsafe type `%s` as map key found", t.String()) + } + switch ut := t.Underlying().(type) { + case *types.Basic: + if ut.Info()&types.IsString > 0 && t.String() == "encoding/json.Number" { + return fmt.Errorf("unsafe type `%s` as map key found", t.String()) + } + if ut.Info()&allowedMapKeyBasicTypes > 0 { // bool, int-family, string + return nil + } + // E.g. bool, float32, float64, complex64, complex128 + return newUnsupportedError(fmt.Errorf("unsupported type `%s` as map key found", t.String())) + case *types.Interface: + return fmt.Errorf("unsafe type `%s` as map key found", t.String()) + default: + // E.g. struct composed solely of basic types, that are comparable + return newUnsupportedError(fmt.Errorf("unsupported type `%s` as map key found", t.String())) + } +} + +func (e *errchkjson) inspectArgs(pass *analysis.Pass, args []ast.Expr) { + for _, a := range args { + ast.Inspect(a, func(n ast.Node) bool { + if n == nil { + return true + } + + ce, ok := n.(*ast.CallExpr) + if !ok { + return false + } + + fn, _ := typeutil.Callee(pass.TypesInfo, ce).(*types.Func) + if fn == nil { + return true + } + + switch fn.FullName() { + case "encoding/json.Marshal", "encoding/json.MarshalIndent": + e.handleJSONMarshal(pass, ce, fn.FullName(), functionArgument, e.omitSafe) + case "(*encoding/json.Encoder).Encode": + e.handleJSONMarshal(pass, ce, fn.FullName(), functionArgument, true) + default: + e.inspectArgs(pass, ce.Args) + } + return false + }) + } +} + +// Construct *types.Interface for interface encoding.TextMarshaler +// +// type TextMarshaler interface { +// MarshalText() (text []byte, err error) +// } +func textMarshalerInterface() *types.Interface { + textMarshalerInterface := types.NewInterfaceType([]*types.Func{ + types.NewFunc(token.NoPos, nil, "MarshalText", types.NewSignatureType( + nil, nil, nil, nil, types.NewTuple( + types.NewVar(token.NoPos, nil, "text", + types.NewSlice( + types.Universe.Lookup("byte").Type())), + types.NewVar(token.NoPos, nil, "err", types.Universe.Lookup("error").Type())), + false)), + }, nil) + textMarshalerInterface.Complete() + + return textMarshalerInterface +} + +// Construct *types.Interface for interface json.Marshaler +// +// type Marshaler interface { +// MarshalJSON() ([]byte, error) +// } +func jsonMarshalerInterface() *types.Interface { + textMarshalerInterface := types.NewInterfaceType([]*types.Func{ + types.NewFunc(token.NoPos, nil, "MarshalJSON", types.NewSignatureType( + nil, nil, nil, nil, types.NewTuple( + types.NewVar(token.NoPos, nil, "", + types.NewSlice( + types.Universe.Lookup("byte").Type())), + types.NewVar(token.NoPos, nil, "", types.Universe.Lookup("error").Type())), + false)), + }, nil) + textMarshalerInterface.Complete() + + return textMarshalerInterface +} diff --git a/vendor/github.com/breml/errchkjson/noexported_error.go b/vendor/github.com/breml/errchkjson/noexported_error.go new file mode 100644 index 0000000000..07b7a07d26 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/noexported_error.go @@ -0,0 +1,23 @@ +package errchkjson + +type noexported interface { + noexported() +} + +var _ noexported = noexportedError{} + +type noexportedError struct { + err error +} + +func newNoexportedError(err error) error { + return noexportedError{ + err: err, + } +} + +func (u noexportedError) noexported() {} + +func (u noexportedError) Error() string { + return u.err.Error() +} diff --git a/vendor/github.com/breml/errchkjson/unsupported_error.go b/vendor/github.com/breml/errchkjson/unsupported_error.go new file mode 100644 index 0000000000..1a38c3f532 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/unsupported_error.go @@ -0,0 +1,23 @@ +package errchkjson + +type unsupported interface { + unsupported() +} + +var _ unsupported = unsupportedError{} + +type unsupportedError struct { + err error +} + +func newUnsupportedError(err error) error { + return unsupportedError{ + err: err, + } +} + +func (u unsupportedError) unsupported() {} + +func (u unsupportedError) Error() string { + return u.err.Error() +} diff --git a/vendor/github.com/breml/errchkjson/version.go b/vendor/github.com/breml/errchkjson/version.go new file mode 100644 index 0000000000..77d8ef8bb0 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/version.go @@ -0,0 +1,19 @@ +package errchkjson + +import ( + "fmt" + "os" +) + +var Version = "errchkjson version dev" + +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + fmt.Println(Version) + os.Exit(0) + return nil +} diff --git a/vendor/github.com/butuzov/ireturn/LICENSE b/vendor/github.com/butuzov/ireturn/LICENSE new file mode 100644 index 0000000000..a9752e9726 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Oleg Butuzov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go new file mode 100644 index 0000000000..f68170fb31 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go @@ -0,0 +1,277 @@ +package analyzer + +import ( + "flag" + "go/ast" + gotypes "go/types" + "runtime" + "strings" + "sync" + + "github.com/butuzov/ireturn/analyzer/internal/config" + "github.com/butuzov/ireturn/analyzer/internal/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const name string = "ireturn" // linter name + +type validator interface { + IsValid(types.IFace) bool +} + +type analyzer struct { + once sync.Once + mu sync.RWMutex + handler validator + err error + diabledNolint bool + + found []analysis.Diagnostic +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + // 00. Part 1. Handling Configuration Only Once. + a.once.Do(func() { a.readConfiguration(&pass.Analyzer.Flags) }) + + // 00. Part 2. Handling Errors + if a.err != nil { + return nil, a.err + } + + ins, _ := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // 00. does file have dot-imported standard packages? + dotImportedStd := make(map[string]struct{}) + ins.Preorder([]ast.Node{(*ast.ImportSpec)(nil)}, func(node ast.Node) { + i, _ := node.(*ast.ImportSpec) + if i.Name != nil && i.Name.Name == "." { + dotImportedStd[strings.Trim(i.Path.Value, `"`)] = struct{}{} + } + }) + + // 01. Running Inspection. + ins.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + // 001. Casting to funcdecl + f, _ := node.(*ast.FuncDecl) + + // 002. Does it return any results ? + if f.Type == nil || f.Type.Results == nil { + return + } + + // 003. Is it allowed to be checked? + if !a.diabledNolint && hasDisallowDirective(f.Doc) { + return + } + + seen := make(map[string]bool, 4) + + // 004. Filtering Results. + for _, issue := range filterInterfaces(pass, f.Type, dotImportedStd) { + if a.handler.IsValid(issue) { + continue + } + + issue.Enrich(f) + + key := issue.HashString() + + if ok := seen[key]; ok { + continue + } + seen[key] = true + + a.addDiagnostic(issue.ExportDiagnostic()) + } + }) + + // 02. Printing reports. + a.mu.RLock() + defer a.mu.RUnlock() + for i := range a.found { + pass.Report(a.found[i]) + } + + return nil, nil +} + +func (a *analyzer) addDiagnostic(d analysis.Diagnostic) { + a.mu.Lock() + defer a.mu.Unlock() + + a.found = append(a.found, d) +} + +func (a *analyzer) readConfiguration(fs *flag.FlagSet) { + cnf, err := config.New(fs) + if err != nil { + a.err = err + return + } + + // First: checking nonolint directive + val := fs.Lookup("nonolint") + if val != nil { + a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true" + } + + // Second: validators implementation next + if validatorImpl, ok := cnf.(validator); ok { + a.handler = validatorImpl + return + } + + a.handler = config.DefaultValidatorConfig() +} + +func NewAnalyzer() *analysis.Analyzer { + a := analyzer{} //nolint: exhaustivestruct + + return &analysis.Analyzer{ + Name: name, + Doc: "Accept Interfaces, Return Concrete Types", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: flags(), + } +} + +func flags() flag.FlagSet { + set := flag.NewFlagSet("", flag.PanicOnError) + set.String("allow", "", "accept-list of the comma-separated interfaces") + set.String("reject", "", "reject-list of the comma-separated interfaces") + set.Bool("nonolint", false, "disable nolint checks") + return *set +} + +func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{}) []types.IFace { + var results []types.IFace + + if ft.Results == nil { // this can't happen, but double checking. + return results + } + + for _, el := range ft.Results.List { + switch v := el.Type.(type) { + // ----- empty or anonymous interfaces + case *ast.InterfaceType: + if len(v.Methods.List) == 0 { + results = append(results, types.NewIssue("interface{}", types.EmptyInterface)) + continue + } + + results = append(results, types.NewIssue("anonymous interface", types.AnonInterface)) + + // ------ Errors and interfaces from same package + case *ast.Ident: + + t1 := p.TypesInfo.TypeOf(el.Type) + val, ok := t1.Underlying().(*gotypes.Interface) + if !ok { + continue + } + + var ( + name = t1.String() + isNamed = strings.Contains(name, ".") + isEmpty = val.Empty() + ) + + // catching any + if isEmpty && name == "any" { + results = append(results, types.NewIssue(name, types.EmptyInterface)) + continue + } + + // NOTE: FIXED! + if name == "error" { + results = append(results, types.NewIssue(name, types.ErrorInterface)) + continue + } + + if !isNamed { + + typeParams := val.String() + prefix, suffix := "interface{", "}" + if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple + typeParams = typeParams[len(prefix):] + } + if strings.HasSuffix(typeParams, suffix) { + typeParams = typeParams[:len(typeParams)-1] + } + + goVersion := runtime.Version() + if strings.HasPrefix(goVersion, "go1.18") || strings.HasPrefix(goVersion, "go1.19") { + typeParams = strings.ReplaceAll(typeParams, "|", " | ") + } + + results = append(results, types.IFace{ + Name: name, + Type: types.Generic, + OfType: typeParams, + }) + continue + } + + // is it dot-imported package? + // handling cases when stdlib package imported via "." dot-import + if len(di) > 0 { + pkgName := stdPkgInterface(name) + if _, ok := di[pkgName]; ok { + results = append(results, types.NewIssue(name, types.NamedStdInterface)) + + continue + } + } + + results = append(results, types.NewIssue(name, types.NamedInterface)) + + // ------- standard library and 3rd party interfaces + case *ast.SelectorExpr: + + t1 := p.TypesInfo.TypeOf(el.Type) + if !gotypes.IsInterface(t1.Underlying()) { + continue + } + + word := t1.String() + if isStdPkgInterface(word) { + results = append(results, types.NewIssue(word, types.NamedStdInterface)) + continue + } + + results = append(results, types.NewIssue(word, types.NamedInterface)) + } + } + + return results +} + +// stdPkgInterface will return package name if tis std lib package +// or empty string on fail. +func stdPkgInterface(named string) string { + // find last "." index. + idx := strings.LastIndex(named, ".") + if idx == -1 { + return "" + } + + return stdPkg(named[0:idx]) +} + +// isStdPkgInterface will run small checks against pkg to find out if named +// interface we looking on - comes from a standard library or not. +func isStdPkgInterface(namedInterface string) bool { + return stdPkgInterface(namedInterface) != "" +} + +func stdPkg(pkg string) string { + if _, ok := std[pkg]; ok { + return pkg + } + + return "" +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/disallow.go b/vendor/github.com/butuzov/ireturn/analyzer/disallow.go new file mode 100644 index 0000000000..36b6fcb4f3 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/disallow.go @@ -0,0 +1,45 @@ +package analyzer + +import ( + "go/ast" + "strings" +) + +const nolintPrefix = "//nolint" + +func hasDisallowDirective(cg *ast.CommentGroup) bool { + if cg == nil { + return false + } + + return directiveFound(cg) +} + +func directiveFound(cg *ast.CommentGroup) bool { + for i := len(cg.List) - 1; i >= 0; i-- { + comment := cg.List[i] + if !strings.HasPrefix(comment.Text, nolintPrefix) { + continue + } + + startingIdx := len(nolintPrefix) + for { + idx := strings.Index(comment.Text[startingIdx:], name) + if idx == -1 { + break + } + + if len(comment.Text[startingIdx+idx:]) == len(name) { + return true + } + + c := comment.Text[startingIdx+idx+len(name)] + if c == '.' || c == ',' || c == ' ' || c == ' ' { + return true + } + startingIdx += idx + 1 + } + } + + return false +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go new file mode 100644 index 0000000000..6a294ca35f --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go @@ -0,0 +1,17 @@ +package config + +import "github.com/butuzov/ireturn/analyzer/internal/types" + +// allowConfig specifies a list of interfaces (keywords, patters and regular expressions) +// that are allowed by ireturn as valid to return, any non listed interface are rejected. +type allowConfig struct { + *defaultConfig +} + +func allowAll(patterns []string) *allowConfig { + return &allowConfig{&defaultConfig{List: patterns}} +} + +func (ac *allowConfig) IsValid(i types.IFace) bool { + return ac.Has(i) +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/config.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/config.go new file mode 100644 index 0000000000..46c73170ae --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/config.go @@ -0,0 +1,66 @@ +package config + +import ( + "regexp" + "sync" + + "github.com/butuzov/ireturn/analyzer/internal/types" +) + +// defaultConfig is core of the validation, ... +// todo(butuzov): write proper intro... + +type defaultConfig struct { + List []string + + // private fields (for search optimization look ups) + once sync.Once + quick uint8 + list []*regexp.Regexp +} + +func (config *defaultConfig) Has(i types.IFace) bool { + config.once.Do(config.compileList) + + if config.quick&uint8(i.Type) > 0 { + return true + } + + // not a named interface (because error, interface{}, anon interface has keywords.) + if i.Type&types.NamedInterface == 0 && i.Type&types.NamedStdInterface == 0 { + return false + } + + for _, re := range config.list { + if re.MatchString(i.Name) { + return true + } + } + + return false +} + +// compileList will transform text list into a bitmask for quick searches and +// slice of regular expressions for quick searches. +func (config *defaultConfig) compileList() { + for _, str := range config.List { + switch str { + case types.NameError: + config.quick |= uint8(types.ErrorInterface) + case types.NameEmpty: + config.quick |= uint8(types.EmptyInterface) + case types.NameAnon: + config.quick |= uint8(types.AnonInterface) + case types.NameStdLib: + config.quick |= uint8(types.NamedStdInterface) + case types.NameGeneric: + config.quick |= uint8(types.Generic) + } + + // allow to parse regular expressions + // todo(butuzov): how can we log error in golangci-lint? + if re, err := regexp.Compile(str); err == nil { + config.list = append(config.list, re) + } + } +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go new file mode 100644 index 0000000000..6aa04e52e8 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go @@ -0,0 +1,74 @@ +package config + +import ( + "errors" + "flag" + "strings" + + "github.com/butuzov/ireturn/analyzer/internal/types" +) + +var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time") + +// nolint: exhaustivestruct +func DefaultValidatorConfig() *allowConfig { + return allowAll([]string{ + types.NameEmpty, // "empty": empty interfaces (interface{}) + types.NameError, // "error": for all error's + types.NameAnon, // "anon": for all empty interfaces with methods (interface {Method()}) + types.NameStdLib, // "std": for all standard library packages + }) +} + +// New is factory function that return allowConfig or rejectConfig depending +// on provided arguments. +func New(fs *flag.FlagSet) (interface{}, error) { + var ( + allowList = toSlice(getFlagVal(fs, "allow")) + rejectList = toSlice(getFlagVal(fs, "reject")) + ) + + // can't have both at same time. + if len(allowList) != 0 && len(rejectList) != 0 { + return nil, ErrCollisionOfInterests + } + + switch { + case len(allowList) > 0: + return allowAll(allowList), nil + case len(rejectList) > 0: + return rejectAll(rejectList), nil + } + + // can have none (defaults are used) at same time. + return nil, nil +} + +// both constants used to cleanup items provided in comma separated list. +const ( + SepTab string = " " + SepSpace string = " " +) + +func toSlice(s string) []string { + var results []string + + for _, pattern := range strings.Split(s, ",") { + pattern = strings.Trim(pattern, SepTab+SepSpace) + if pattern != "" { + results = append(results, pattern) + } + } + + return results +} + +func getFlagVal(fs *flag.FlagSet, name string) string { + flg := fs.Lookup(name) + + if flg == nil { + return "" + } + + return flg.Value.String() +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go new file mode 100644 index 0000000000..bef6913bb8 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go @@ -0,0 +1,17 @@ +package config + +import "github.com/butuzov/ireturn/analyzer/internal/types" + +// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions) +// that are rejected by ireturn as valid to return, any non listed interface are allowed. +type rejectConfig struct { + *defaultConfig +} + +func rejectAll(patterns []string) *rejectConfig { + return &rejectConfig{&defaultConfig{List: patterns}} +} + +func (rc *rejectConfig) IsValid(i types.IFace) bool { + return !rc.Has(i) +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go new file mode 100644 index 0000000000..5e576374d5 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go @@ -0,0 +1,54 @@ +package types + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +type IFace struct { + Name string // Interface name + Type IType // Type of the interface + + Pos token.Pos // Token Position + FuncName string // + OfType string +} + +func NewIssue(name string, interfaceType IType) IFace { + return IFace{ + Name: name, + // Pos: pos, + Type: interfaceType, + } +} + +func (i *IFace) Enrich(f *ast.FuncDecl) { + i.FuncName = f.Name.Name + i.Pos = f.Pos() +} + +func (i IFace) String() string { + if i.Type != Generic { + return fmt.Sprintf("%s returns interface (%s)", i.FuncName, i.Name) + } + + if i.OfType != "" { + return fmt.Sprintf("%s returns generic interface (%s) of type param %s", i.FuncName, i.Name, i.OfType) + } + + return fmt.Sprintf("%s returns generic interface (%s)", i.FuncName, i.Name) +} + +func (i IFace) HashString() string { + return fmt.Sprintf("%v-%s", i.Pos, i.String()) +} + +func (i IFace) ExportDiagnostic() analysis.Diagnostic { + return analysis.Diagnostic{ //nolint: exhaustivestruct + Pos: i.Pos, + Message: i.String(), + } +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/names.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/names.go new file mode 100644 index 0000000000..1092c9667c --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/names.go @@ -0,0 +1,9 @@ +package types + +const ( + NameEmpty = "empty" + NameAnon = "anon" + NameError = "error" + NameStdLib = "stdlib" + NameGeneric = "generic" +) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/types.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/types.go new file mode 100644 index 0000000000..5c0bd74077 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/types.go @@ -0,0 +1,12 @@ +package types + +type IType uint8 + +const ( + EmptyInterface IType = 1 << iota // ref as empty + AnonInterface // ref as anon + ErrorInterface // ref as error + NamedInterface // ref as named + NamedStdInterface // ref as named stdlib + Generic // ref as generic type parameter +) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/std.go b/vendor/github.com/butuzov/ireturn/analyzer/std.go new file mode 100644 index 0000000000..cac4646126 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/std.go @@ -0,0 +1,203 @@ +// Code generated using std.sh; DO NOT EDIT. + +// We will ignore that fact that some of packages +// were removed from stdlib. + +package analyzer + +var std = map[string]struct{}{ + // added in Go v1.2 in compare to v1.1 (docker image) + "archive/tar": {}, + "archive/zip": {}, + "bufio": {}, + "bytes": {}, + "cmd/cgo": {}, + "cmd/fix": {}, + "cmd/go": {}, + "cmd/gofmt": {}, + "cmd/yacc": {}, + "compress/bzip2": {}, + "compress/flate": {}, + "compress/gzip": {}, + "compress/lzw": {}, + "compress/zlib": {}, + "container/heap": {}, + "container/list": {}, + "container/ring": {}, + "crypto": {}, + "crypto/aes": {}, + "crypto/cipher": {}, + "crypto/des": {}, + "crypto/dsa": {}, + "crypto/ecdsa": {}, + "crypto/elliptic": {}, + "crypto/hmac": {}, + "crypto/md5": {}, + "crypto/rand": {}, + "crypto/rc4": {}, + "crypto/rsa": {}, + "crypto/sha1": {}, + "crypto/sha256": {}, + "crypto/sha512": {}, + "crypto/subtle": {}, + "crypto/tls": {}, + "crypto/x509": {}, + "crypto/x509/pkix": {}, + "database/sql": {}, + "database/sql/driver": {}, + "debug/dwarf": {}, + "debug/elf": {}, + "debug/gosym": {}, + "debug/macho": {}, + "debug/pe": {}, + "encoding": {}, + "encoding/ascii85": {}, + "encoding/asn1": {}, + "encoding/base32": {}, + "encoding/base64": {}, + "encoding/binary": {}, + "encoding/csv": {}, + "encoding/gob": {}, + "encoding/hex": {}, + "encoding/json": {}, + "encoding/pem": {}, + "encoding/xml": {}, + "errors": {}, + "expvar": {}, + "flag": {}, + "fmt": {}, + "go/ast": {}, + "go/build": {}, + "go/doc": {}, + "go/format": {}, + "go/parser": {}, + "go/printer": {}, + "go/scanner": {}, + "go/token": {}, + "hash": {}, + "hash/adler32": {}, + "hash/crc32": {}, + "hash/crc64": {}, + "hash/fnv": {}, + "html": {}, + "html/template": {}, + "image": {}, + "image/color": {}, + "image/color/palette": {}, + "image/draw": {}, + "image/gif": {}, + "image/jpeg": {}, + "image/png": {}, + "index/suffixarray": {}, + "io": {}, + "io/ioutil": {}, + "log": {}, + "log/syslog": {}, + "math": {}, + "math/big": {}, + "math/cmplx": {}, + "math/rand": {}, + "mime": {}, + "mime/multipart": {}, + "net": {}, + "net/http": {}, + "net/http/cgi": {}, + "net/http/cookiejar": {}, + "net/http/fcgi": {}, + "net/http/httptest": {}, + "net/http/httputil": {}, + "net/http/pprof": {}, + "net/mail": {}, + "net/rpc": {}, + "net/rpc/jsonrpc": {}, + "net/smtp": {}, + "net/textproto": {}, + "net/url": {}, + "os": {}, + "os/exec": {}, + "os/signal": {}, + "os/user": {}, + "path": {}, + "path/filepath": {}, + "reflect": {}, + "regexp": {}, + "regexp/syntax": {}, + "runtime": {}, + "runtime/cgo": {}, + "runtime/debug": {}, + "runtime/pprof": {}, + "runtime/race": {}, + "sort": {}, + "strconv": {}, + "strings": {}, + "sync": {}, + "sync/atomic": {}, + "syscall": {}, + "testing": {}, + "testing/iotest": {}, + "testing/quick": {}, + "text/scanner": {}, + "text/tabwriter": {}, + "text/template": {}, + "text/template/parse": {}, + "time": {}, + "unicode": {}, + "unicode/utf16": {}, + "unicode/utf8": {}, + "unsafe": {}, + // added in Go v1.3 in compare to v1.2 (docker image) + "cmd/addr2line": {}, + "cmd/nm": {}, + "cmd/objdump": {}, + "cmd/pack": {}, + "debug/plan9obj": {}, + // added in Go v1.4 in compare to v1.3 (docker image) + "cmd/pprof": {}, + // added in Go v1.5 in compare to v1.4 (docker image) + "go/constant": {}, + "go/importer": {}, + "go/types": {}, + "mime/quotedprintable": {}, + "runtime/trace": {}, + // added in Go v1.6 in compare to v1.5 (docker image) + // added in Go v1.7 in compare to v1.6 (docker image) + "context": {}, + "net/http/httptrace": {}, + // added in Go v1.8 in compare to v1.7 (docker image) + "plugin": {}, + // added in Go v1.9 in compare to v1.8 (docker image) + "math/bits": {}, + // added in Go v1.10 in compare to v1.9 (docker image) + // added in Go v1.11 in compare to v1.10 (docker image) + // added in Go v1.12 in compare to v1.11 (docker image) + // added in Go v1.13 in compare to v1.12 (docker image) + "crypto/ed25519": {}, + // added in Go v1.14 in compare to v1.13 (docker image) + "hash/maphash": {}, + // added in Go v1.15 in compare to v1.14 (docker image) + "time/tzdata": {}, + // added in Go v1.16 in compare to v1.15 (docker image) + "embed": {}, + "go/build/constraint": {}, + "io/fs": {}, + "runtime/metrics": {}, + "testing/fstest": {}, + // added in Go v1.17 in compare to v1.16 (docker image) + // added in Go v1.18 in compare to v1.17 (docker image) + "debug/buildinfo": {}, + "net/netip": {}, + // added in Go v1.19 in compare to v1.18 (docker image) + "go/doc/comment": {}, + // added in Go v1.20 in compare to v1.19 (docker image) + "crypto/ecdh": {}, + "runtime/coverage": {}, + // added in Go v1.21 in compare to v1.20 (docker image) + "cmp": {}, + "log/slog": {}, + "maps": {}, + "slices": {}, + "testing/slogtest": {}, + // added in Go v1.22 in compare to v1.21 (docker image) + "go/version": {}, + "math/rand/v2": {}, +} diff --git a/vendor/github.com/butuzov/mirror/.act b/vendor/github.com/butuzov/mirror/.act new file mode 100644 index 0000000000..8182d703ae --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.act @@ -0,0 +1,2 @@ +--platform ubuntu-latest=butuzov/act-go:latest +--env DRY_RUN=1 diff --git a/vendor/github.com/butuzov/mirror/.editorconfig b/vendor/github.com/butuzov/mirror/.editorconfig new file mode 100644 index 0000000000..4d9c20d8d9 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.editorconfig @@ -0,0 +1,28 @@ +# top-most EditorConfig file +root = true + + +[*] +end_of_line = lf # Unix-style newlines +charset = utf-8 + +indent_style = space # default identation - spaces +indent_size = 4 # default identation - size + +insert_final_newline = true # new line at the end of file +trim_trailing_whitespace = true # no extra sapces at the end of lines + +[*.{go,gohtml,gotpl}] # Go +indent_style = tab +indent_size = 2 + +[{Makefile,makefile}] # CMake +indent_style = tab + +[*.md] # Markdown +trim_trailing_whitespace = true +max_line_length = 100 +insert_final_newline = true +indent_size = 2 + + diff --git a/vendor/github.com/butuzov/mirror/.gitignore b/vendor/github.com/butuzov/mirror/.gitignore new file mode 100644 index 0000000000..109f33b98e --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.gitignore @@ -0,0 +1,11 @@ +# artifacts +coverage.cov +bin/* +dist/* +tmp/* +out* +sandbox* +demo* +.task* +.ipynb* +.jupyter* diff --git a/vendor/github.com/butuzov/mirror/.goreleaser.yaml b/vendor/github.com/butuzov/mirror/.goreleaser.yaml new file mode 100644 index 0000000000..11749ed2b3 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.goreleaser.yaml @@ -0,0 +1,61 @@ +--- +project_name: mirror + +builds: + - binary: mirror + env: + - CGO_ENABLED=0 + goos: + - darwin + - linux + - windows + goarch: + - amd64 + - 386 + - arm64 + - arm + goarm: + - 6 + ignore: + - goos: windows + goarm: 6 + - goos: windows + goarch: arm64 + - goos: linux + goarm: 6 + - goos: darwin + goarch: 386 + main: ./cmd/mirror/ + flags: + - -trimpath + ldflags: -s -w + +checksum: + name_template: 'checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - Merge pull request + - Merge branch + +archives: + - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}' + replacements: + darwin: darwin + linux: linux + windows: windows + 386: i386 + amd64: x86_64 + format_overrides: + - goos: windows + format: zip + files: + - LICENSE + - readme.md diff --git a/vendor/github.com/butuzov/mirror/LICENSE b/vendor/github.com/butuzov/mirror/LICENSE new file mode 100644 index 0000000000..a9752e9726 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Oleg Butuzov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md new file mode 100644 index 0000000000..776816e514 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md @@ -0,0 +1,150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
func (b *bufio.Writer) WriteString(s string) (int, error) + func (b *bufio.Writer) Write(p []byte) (int, error) + func (b *bufio.Writer) WriteRune(r rune) (int, error) +
func (b *bytes.Buffer) WriteString(s string) (int, error) + func (b *bytes.Buffer) Write(p []byte) (int, error) + func (b *bytes.Buffer) WriteRune(r rune) (int, error) +
func strings.Compare(a, b string) intfunc bytes.Compare(a, b []byte) int
func strings.Contains(s, substr string) boolfunc bytes.Contains(b, subslice []byte) bool
func strings.ContainsAny(s, chars string) boolfunc bytes.ContainsAny(b []byte, chars string) bool
func strings.ContainsRune(s string, r rune) boolfunc bytes.ContainsRune(b []byte, r rune) bool
func strings.Count(s, substr string) intfunc bytes.Count(s, sep []byte) int
func strings.EqualFold(s, t string) boolfunc bytes.EqualFold(s, t []byte) bool
func strings.HasPrefix(s, prefix string) boolfunc bytes.HasPrefix(s, prefix []byte) bool
func strings.HasSuffix(s, suffix string) boolfunc bytes.HasSuffix(s, suffix []byte) bool
func strings.Index(s, substr string) intfunc bytes.Index(s, sep []byte) int
func strings.IndexAny(s, chars string) intfunc bytes.IndexAny(s []byte, chars string) int
func strings.IndexByte(s string, c byte) intfunc bytes.IndexByte(b []byte, c byte) int
func strings.IndexFunc(s string, f func(rune) bool) intfunc bytes.IndexFunc(s []byte, f func(r rune) bool) int
func strings.IndexRune(s string, r rune) intfunc bytes.IndexRune(s []byte, r rune) int
func strings.LastIndex(s, sep string) intfunc bytes.LastIndex(s, sep []byte) int
func strings.LastIndexAny(s, chars string) intfunc bytes.LastIndexAny(s []byte, chars string) int
func strings.LastIndexByte(s string, c byte) intfunc bytes.LastIndexByte(s []byte, c byte) int
func strings.LastIndexFunc(s string, f func(rune) bool) intfunc bytes.LastIndexFunc(s []byte, f func(r rune) bool) int
func bytes.NewBufferString(s string) *bytes.Bufferfunc bytes.NewBuffer(buf []byte *bytes.Buffer
func (h *hash/maphash.Hash) WriteString(s string) (int, error)func (h *hash/maphash.Hash) Write(b []byte) (int, error)
func (rw *net/http/httptest.ResponseRecorder) WriteString(str string) (int, error)func (rw *net/http/httptest.ResponseRecorder) Write(buf []byte) (int, error)
func (f *os.File) WriteString(s string) (n int, err error)func (f *os.File) Write(b []byte) (n int, err error)
func regexp.MatchString(pattern string, s string) (bool, error)func regexp.Match(pattern string, b []byte) (bool, error)
func (re *regexp.Regexp) FindAllStringIndex(s string, n int) [][]intfunc (re *regexp.Regexp) FindAllIndex(b []byte, n int) [][]int
func (re *regexp.Regexp) FindAllStringSubmatch(s string, n int) [][]stringfunc (re *regexp.Regexp) FindAllSubmatch(b []byte, n int) [][][]byte
func (re *regexp.Regexp) FindStringIndex(s string) (loc []int)func (re *regexp.Regexp) FindIndex(b []byte) (loc []int)
func (re *regexp.Regexp) FindStringSubmatchIndex(s string) []intfunc (re *regexp.Regexp) FindSubmatchIndex(b []byte) []int
func (re *regexp.Regexp) MatchString(s string) boolfunc (re *regexp.Regexp) Match(b []byte) bool
func (b *strings.Builder) WriteString(s string) error + func (b *strings.Builder) Write(p []byte) (int, error) + func (b *strings.Builder) WriteRune(r rune) (int, error) +
func utf8.ValidString(s string) boolfunc utf8.Valid(p []byte) bool
func utf8.FullRuneInString(s string) boolfunc utf8.FullRune(p []byte) bool
func utf8.RuneCountInString(s string) (n int)func utf8.RuneCount(p []byte) int
func utf8.DecodeLastRuneInString(s string) (rune, int)func utf8.DecodeLastRune(p []byte) (rune, int)
func utf8.DecodeRuneInString(s string) (une, int)func utf8.DecodeRune(p []byte) (rune, int)
diff --git a/vendor/github.com/butuzov/mirror/Makefile b/vendor/github.com/butuzov/mirror/Makefile new file mode 100644 index 0000000000..b4b952b012 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/Makefile @@ -0,0 +1,58 @@ +# --- Required ---------------------------------------------------------------- +export PATH := $(PWD)/bin:$(PATH) # ./bin to $PATH +export SHELL := bash # Default Shell + +GOPKGS := $(shell go list ./... | grep -vE "(cmd|sandbox|testdata)" | tr -s '\n' ',' | sed 's/.\{1\}$$//' ) + + +build: + @ go build -trimpath -ldflags="-w -s" \ + -o bin/mirror ./cmd/mirror/ + +build-race: + @ go build -race -trimpath -ldflags="-w -s" \ + -o bin/mirror ./cmd/mirror/ + +tests: + go test -v -count=1 -race \ + -failfast \ + -parallel=2 \ + -timeout=1m \ + -covermode=atomic \ + -coverpkg=$(GOPKGS) -coverprofile=coverage.cov ./... + +tests-summary: + go test -v -count=1 -race \ + -failfast \ + -parallel=2 \ + -timeout=1m \ + -covermode=atomic \ + -coverpkg=$(GOPKGS) -coverprofile=coverage.cov --json ./... | tparse -all + +test-generate: + go run ./cmd/internal/generate-tests/ "$(PWD)/testdata" + +lints: + golangci-lint run --no-config ./... -D deadcode --skip-dirs "^(cmd|sandbox|testdata)" + + +cover: + go tool cover -html=coverage.cov + +install: + go install -trimpath -v -ldflags="-w -s" \ + ./cmd/mirror + +funcs: + echo "" > "out/results.txt" + go list std | grep -v "vendor" | grep -v "internal" | \ + xargs -I {} sh -c 'go doc -all {} > out/$(basename {}).txt' + +bin/goreleaser: + @curl -Ls https://github.com/goreleaser/goreleaser/releases/download/v1.17.2/goreleaser_Darwin_all.tar.gz | tar -zOxf - goreleaser > ./bin/goreleaser + chmod 0755 ./bin/goreleaser + +test-release: bin/goreleaser + goreleaser release --help + goreleaser release -f .goreleaser.yaml \ + --skip-validate --skip-publish --clean diff --git a/vendor/github.com/butuzov/mirror/Taskfile.yml b/vendor/github.com/butuzov/mirror/Taskfile.yml new file mode 100644 index 0000000000..26c9ba2571 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/Taskfile.yml @@ -0,0 +1,28 @@ +version: '3' + +tasks: + default: + sources: + - "./**/*.go" + method: timestamp + cmds: + - clear + - make build + - make build-race + - task: lints + # - make test-generate + - task: tests + - cmd: go run ./cmd/mirror/ --with-tests --with-debug ./sandbox + ignore_error: true + + testcase: go test -v -failfast -count=1 -run "TestAll/{{ .Case }}" ./... + + tests: + cmds: + - cmd: make tests + ignore_error: true + + lints: + cmds: + - cmd: make lints + ignore_error: true diff --git a/vendor/github.com/butuzov/mirror/analyzer.go b/vendor/github.com/butuzov/mirror/analyzer.go new file mode 100644 index 0000000000..13ded46c6d --- /dev/null +++ b/vendor/github.com/butuzov/mirror/analyzer.go @@ -0,0 +1,144 @@ +package mirror + +import ( + "flag" + "go/ast" + "strings" + + "github.com/butuzov/mirror/internal/checker" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +func NewAnalyzer() *analysis.Analyzer { + flags := flags() + + return &analysis.Analyzer{ + Name: "mirror", + Doc: "reports wrong mirror patterns of bytes/strings usage", + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Flags: flags, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + withTests := pass.Analyzer.Flags.Lookup("with-tests").Value.String() == "true" + // --- Reporting violations via issues --------------------------------------- + for _, violation := range Run(pass, withTests) { + pass.Report(violation.Diagnostic(pass.Fset)) + } + + return nil, nil +} + +func Run(pass *analysis.Pass, withTests bool) []*checker.Violation { + violations := []*checker.Violation{} + // --- Setup ----------------------------------------------------------------- + + check := checker.New( + BytesFunctions, BytesBufferMethods, + RegexpFunctions, RegexpRegexpMethods, + StringFunctions, StringsBuilderMethods, + BufioMethods, HTTPTestMethods, + OsFileMethods, MaphashMethods, + UTF8Functions, + ) + + check.Type = checker.WrapType(pass.TypesInfo) + check.Print = checker.WrapPrint(pass.Fset) + + ins, _ := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + imports := checker.Load(pass.Fset, ins) + + // --- Preorder Checker ------------------------------------------------------ + ins.Preorder([]ast.Node{(*ast.CallExpr)(nil)}, func(n ast.Node) { + callExpr := n.(*ast.CallExpr) + fileName := pass.Fset.Position(callExpr.Pos()).Filename + + if !withTests && strings.HasSuffix(fileName, "_test.go") { + return + } + + // ------------------------------------------------------------------------- + switch expr := callExpr.Fun.(type) { + // NOTE(butuzov): Regular calls (`*ast.SelectorExpr`) like strings.HasPrefix + // or re.Match are handled by this check + case *ast.SelectorExpr: + + x, ok := expr.X.(*ast.Ident) + if !ok { + return + } + + // TODO(butuzov): Add check for the ast.ParenExpr in e.Fun so we can + // target the constructions like this (and other calls) + // ----------------------------------------------------------------------- + // Example: + // (&maphash.Hash{}).Write([]byte("foobar")) + // ----------------------------------------------------------------------- + + // Case 1: Is this is a function call? + pkgName, name := x.Name, expr.Sel.Name + if pkg, ok := imports.Lookup(fileName, pkgName); ok { + if v := check.Match(pkg, name); v != nil { + if args, found := check.Handle(v, callExpr); found { + violations = append(violations, v.With(check.Print(expr.X), callExpr, args)) + } + return + } + } + + // Case 2: Is this is a method call? + tv := pass.TypesInfo.Types[expr.X] + if !tv.IsValue() || tv.Type == nil { + return + } + + pkgStruct, name := cleanAsterisk(tv.Type.String()), expr.Sel.Name + for _, v := range check.Matches(pkgStruct, name) { + if v == nil { + continue + } + + if args, found := check.Handle(v, callExpr); found { + violations = append(violations, v.With(check.Print(expr.X), callExpr, args)) + return + } + } + + case *ast.Ident: + // NOTE(butuzov): Special case of "." imported packages, only functions. + + if pkg, ok := imports.Lookup(fileName, "."); ok { + if v := check.Match(pkg, expr.Name); v != nil { + if args, found := check.Handle(v, callExpr); found { + violations = append(violations, v.With(nil, callExpr, args)) + } + return + } + } + } + }) + + return violations +} + +func flags() flag.FlagSet { + set := flag.NewFlagSet("", flag.PanicOnError) + set.Bool("with-tests", false, "do not skip tests in reports") + set.Bool("with-debug", false, "debug linter run (development only)") + return *set +} + +func cleanAsterisk(s string) string { + if strings.HasPrefix(s, "*") { + return s[1:] + } + + return s +} diff --git a/vendor/github.com/butuzov/mirror/checkers_bufio.go b/vendor/github.com/butuzov/mirror/checkers_bufio.go new file mode 100644 index 0000000000..292ed269aa --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_bufio.go @@ -0,0 +1,56 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var BufioMethods = []checker.Violation{ + { // (*bufio.Writer).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "bufio", + Struct: "Writer", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `b := bufio.Writer{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*bufio.Writer).WriteString + Type: checker.Method, + Targets: checker.Strings, + Package: "bufio", + Struct: "Writer", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `b := bufio.Writer{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, + { // (*bufio.Writer).WriteString -> (*bufio.Writer).WriteRune + Targets: checker.Strings, + Type: checker.Method, + Package: "bufio", + Struct: "Writer", + Caller: "WriteString", + Args: []int{0}, + ArgsType: checker.Rune, + AltCaller: "WriteRune", + }, + // { // (*bufio.Writer).WriteString -> (*bufio.Writer).WriteByte + // Targets: checker.Strings, + // Type: checker.Method, + // Package: "strings", + // Struct: "Builder", + // Caller: "WriteString", + // Args: []int{0}, + // ArgsType: checker.Byte, + // AltCaller: "WriteByte", // byte + // }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_bytes.go b/vendor/github.com/butuzov/mirror/checkers_bytes.go new file mode 100644 index 0000000000..c490a3784e --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_bytes.go @@ -0,0 +1,326 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var ( + BytesFunctions = []checker.Violation{ + { // bytes.NewBuffer + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "NewBuffer", + Args: []int{0}, + AltCaller: "NewBufferString", + + Generate: &checker.Generate{ + Pattern: `NewBuffer($0)`, + Returns: 1, + }, + }, + { // bytes.NewBufferString + Targets: checker.Strings, + Type: checker.Function, + Package: "bytes", + Caller: "NewBufferString", + Args: []int{0}, + AltCaller: "NewBuffer", + + Generate: &checker.Generate{ + Pattern: `NewBufferString($0)`, + Returns: 1, + }, + }, + { // bytes.Compare: + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Compare", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Compare", + + Generate: &checker.Generate{ + Pattern: `Compare($0, $1)`, + Returns: 1, + }, + }, + { // bytes.Contains: + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Contains", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Contains", + + Generate: &checker.Generate{ + Pattern: `Contains($0, $1)`, + Returns: 1, + }, + }, + { // bytes.ContainsAny + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "ContainsAny", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "ContainsAny", + + Generate: &checker.Generate{ + Pattern: `ContainsAny($0, "f")`, + Returns: 1, + }, + }, + { // bytes.ContainsRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "ContainsRune", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "ContainsRune", + + Generate: &checker.Generate{ + Pattern: `ContainsRune($0, 'ф')`, + Returns: 1, + }, + }, + { // bytes.Count + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Count", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Count", + + Generate: &checker.Generate{ + Pattern: `Count($0, $1)`, + Returns: 1, + }, + }, + { // bytes.EqualFold + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "EqualFold", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "EqualFold", + + Generate: &checker.Generate{ + Pattern: `EqualFold($0, $1)`, + Returns: 1, + }, + }, + + { // bytes.HasPrefix + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "HasPrefix", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "HasPrefix", + + Generate: &checker.Generate{ + Pattern: `HasPrefix($0, $1)`, + Returns: 1, + }, + }, + { // bytes.HasSuffix + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "HasSuffix", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "HasSuffix", + + Generate: &checker.Generate{ + Pattern: `HasSuffix($0, $1)`, + Returns: 1, + }, + }, + { // bytes.Index + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Index", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Index", + + Generate: &checker.Generate{ + Pattern: `Index($0, $1)`, + Returns: 1, + }, + }, + { // bytes.IndexAny + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexAny", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexAny", + + Generate: &checker.Generate{ + Pattern: `IndexAny($0, "f")`, + Returns: 1, + }, + }, + { // bytes.IndexByte + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexByte", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexByte", + + Generate: &checker.Generate{ + Pattern: `IndexByte($0, 'f')`, + Returns: 1, + }, + }, + { // bytes.IndexFunc + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexFunc", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexFunc", + + Generate: &checker.Generate{ + Pattern: `IndexFunc($0, func(rune) bool {return true })`, + Returns: 1, + }, + }, + { // bytes.IndexRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexRune", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexRune", + + Generate: &checker.Generate{ + Pattern: `IndexRune($0, rune('ф'))`, + Returns: 1, + }, + }, + { // bytes.LastIndex + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndex", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "LastIndex", + + Generate: &checker.Generate{ + Pattern: `LastIndex($0, $1)`, + Returns: 1, + }, + }, + { // bytes.LastIndexAny + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndexAny", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "LastIndexAny", + + Generate: &checker.Generate{ + Pattern: `LastIndexAny($0, "ф")`, + Returns: 1, + }, + }, + { // bytes.LastIndexByte + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndexByte", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "LastIndexByte", + + Generate: &checker.Generate{ + Pattern: `LastIndexByte($0, 'f')`, + Returns: 1, + }, + }, + { // bytes.LastIndexFunc + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndexFunc", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "LastIndexFunc", + + Generate: &checker.Generate{ + Pattern: `LastIndexFunc($0, func(rune) bool {return true })`, + Returns: 1, + }, + }, + } + + BytesBufferMethods = []checker.Violation{ + { // (*bytes.Buffer).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "bytes", + Struct: "Buffer", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `bb := bytes.Buffer{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*bytes.Buffer).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "bytes", + Struct: "Buffer", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `bb := bytes.Buffer{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, + { // (*bytes.Buffer).WriteString -> (*bytes.Buffer).WriteRune + Targets: checker.Strings, + Type: checker.Method, + Package: "bytes", + Struct: "Buffer", + Caller: "WriteString", + Args: []int{0}, + ArgsType: checker.Rune, + AltCaller: "WriteRune", + }, + // { // (*bytes.Buffer).WriteString -> (*bytes.Buffer).WriteByte + // Targets: checker.Strings, + // Type: checker.Method, + // Package: "bytes", + // Struct: "Buffer", + // Caller: "WriteString", + // Args: []int{0}, + // ArgsType: checker.Byte, + // AltCaller: "WriteByte", + // }, + } +) diff --git a/vendor/github.com/butuzov/mirror/checkers_httptest.go b/vendor/github.com/butuzov/mirror/checkers_httptest.go new file mode 100644 index 0000000000..ae67509300 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_httptest.go @@ -0,0 +1,36 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var HTTPTestMethods = []checker.Violation{ + { // (*net/http/httptest.ResponseRecorder).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "net/http/httptest", + Struct: "ResponseRecorder", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := httptest.ResponseRecorder{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*net/http/httptest.ResponseRecorder).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "net/http/httptest", + Struct: "ResponseRecorder", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := httptest.ResponseRecorder{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_maphash.go b/vendor/github.com/butuzov/mirror/checkers_maphash.go new file mode 100644 index 0000000000..4d184d2a95 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_maphash.go @@ -0,0 +1,36 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var MaphashMethods = []checker.Violation{ + { // (*hash/maphash).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*hash/maphash).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_os.go b/vendor/github.com/butuzov/mirror/checkers_os.go new file mode 100644 index 0000000000..09f5a18e58 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_os.go @@ -0,0 +1,36 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var OsFileMethods = []checker.Violation{ + { // (*os.File).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "os", + Struct: "File", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `f := &os.File{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*os.File).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "os", + Struct: "File", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `f := &os.File{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_regexp.go b/vendor/github.com/butuzov/mirror/checkers_regexp.go new file mode 100644 index 0000000000..17175e0286 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_regexp.go @@ -0,0 +1,187 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var ( + RegexpFunctions = []checker.Violation{ + { // regexp.Match + Targets: checker.Bytes, + Type: checker.Function, + Package: "regexp", + Caller: "Match", + Args: []int{1}, + AltCaller: "MatchString", + + Generate: &checker.Generate{ + Pattern: `Match("foo", $0)`, + Returns: 2, + }, + }, + { // regexp.MatchString + Targets: checker.Strings, + Type: checker.Function, + Package: "regexp", + Caller: "MatchString", + Args: []int{1}, + AltCaller: "Match", + + Generate: &checker.Generate{ + Pattern: `MatchString("foo", $0)`, + Returns: 2, + }, + }, + } + + RegexpRegexpMethods = []checker.Violation{ + { // (*regexp.Regexp).Match + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "Match", + Args: []int{0}, + AltCaller: "MatchString", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `Match($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).MatchString + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "MatchString", + Args: []int{0}, + AltCaller: "Match", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `MatchString($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllIndex", + Args: []int{0}, + AltCaller: "FindAllStringIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllStringIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllStringIndex", + Args: []int{0}, + AltCaller: "FindAllIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllStringIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllSubmatchIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllSubmatchIndex", + Args: []int{0}, + AltCaller: "FindAllStringSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllSubmatchIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllStringSubmatchIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllStringSubmatchIndex", + Args: []int{0}, + AltCaller: "FindAllSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllStringSubmatchIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindIndex", + Args: []int{0}, + AltCaller: "FindStringIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindIndex($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindStringIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindStringIndex", + Args: []int{0}, + AltCaller: "FindIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindStringIndex($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindSubmatchIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindSubmatchIndex", + Args: []int{0}, + AltCaller: "FindStringSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindSubmatchIndex($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindStringSubmatchIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindStringSubmatchIndex", + Args: []int{0}, + AltCaller: "FindSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindStringSubmatchIndex($0)`, + Returns: 1, + }, + }, + } +) diff --git a/vendor/github.com/butuzov/mirror/checkers_strings.go b/vendor/github.com/butuzov/mirror/checkers_strings.go new file mode 100644 index 0000000000..ead7e9cc7e --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_strings.go @@ -0,0 +1,299 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var ( + StringFunctions = []checker.Violation{ + { // strings.Compare + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Compare", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Compare", + + Generate: &checker.Generate{ + Pattern: `Compare($0,$1)`, + Returns: 1, + }, + }, + { // strings.Contains + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Contains", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Contains", + + Generate: &checker.Generate{ + Pattern: `Contains($0,$1)`, + Returns: 1, + }, + }, + { // strings.ContainsAny + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "ContainsAny", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "ContainsAny", + + Generate: &checker.Generate{ + Pattern: `ContainsAny($0,"foobar")`, + Returns: 1, + }, + }, + { // strings.ContainsRune + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "ContainsRune", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "ContainsRune", + + Generate: &checker.Generate{ + Pattern: `ContainsRune($0,'ф')`, + Returns: 1, + }, + }, + { // strings.Count + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Count", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Count", + + Generate: &checker.Generate{ + Pattern: `Count($0, $1)`, + Returns: 1, + }, + }, + { // strings.EqualFold + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "EqualFold", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "EqualFold", + + Generate: &checker.Generate{ + Pattern: `EqualFold($0,$1)`, + Returns: 1, + }, + }, + { // strings.HasPrefix + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "HasPrefix", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "HasPrefix", + + Generate: &checker.Generate{ + Pattern: `HasPrefix($0,$1)`, + Returns: 1, + }, + }, + { // strings.HasSuffix + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "HasSuffix", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "HasSuffix", + + Generate: &checker.Generate{ + Pattern: `HasSuffix($0,$1)`, + Returns: 1, + }, + }, + { // strings.Index + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Index", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Index", + + Generate: &checker.Generate{ + Pattern: `Index($0,$1)`, + Returns: 1, + }, + }, + { // strings.IndexAny + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexAny", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexAny", + + Generate: &checker.Generate{ + Pattern: `IndexAny($0, "f")`, + Returns: 1, + }, + }, + { // strings.IndexByte + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexByte", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexByte", + + Generate: &checker.Generate{ + Pattern: `IndexByte($0, byte('f'))`, + Returns: 1, + }, + }, + { // strings.IndexFunc + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexFunc", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexFunc", + + Generate: &checker.Generate{ + Pattern: `IndexFunc($0,func(r rune) bool { return true })`, + Returns: 1, + }, + }, + { // strings.IndexRune + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexRune", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexRune", + + Generate: &checker.Generate{ + Pattern: `IndexRune($0, rune('ф'))`, + Returns: 1, + }, + }, + { // strings.LastIndex + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndex", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "LastIndex", + + Generate: &checker.Generate{ + Pattern: `LastIndex($0,$1)`, + Returns: 1, + }, + }, + { // strings.LastIndexAny + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndexAny", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "LastIndexAny", + + Generate: &checker.Generate{ + Pattern: `LastIndexAny($0,"f")`, + Returns: 1, + }, + }, + { // strings.LastIndexByte + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndexByte", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "LastIndexByte", + + Generate: &checker.Generate{ + Pattern: `LastIndexByte($0, byte('f'))`, + Returns: 1, + }, + }, + { // strings.LastIndexFunc + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndexFunc", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "LastIndexFunc", + + Generate: &checker.Generate{ + Pattern: `LastIndexFunc($0, func(r rune) bool { return true })`, + Returns: 1, + }, + }, + } + + StringsBuilderMethods = []checker.Violation{ + { // (*strings.Builder).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "strings", + Struct: "Builder", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `builder := strings.Builder{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*strings.Builder).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "strings", + Struct: "Builder", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `builder := strings.Builder{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, + { // (*strings.Builder).WriteString -> (*strings.Builder).WriteRune + Targets: checker.Strings, + Type: checker.Method, + Package: "strings", + Struct: "Builder", + Caller: "WriteString", + Args: []int{0}, + ArgsType: checker.Rune, + AltCaller: "WriteRune", + }, + // { // (*strings.Builder).WriteString -> (*strings.Builder).WriteByte + // Targets: checker.Strings, + // Type: checker.Method, + // Package: "strings", + // Struct: "Builder", + // Caller: "WriteString", + // Args: []int{0}, + // ArgsType: checker.Byte, + // AltCaller: "WriteByte", // byte + // }, + } +) diff --git a/vendor/github.com/butuzov/mirror/checkers_utf8.go b/vendor/github.com/butuzov/mirror/checkers_utf8.go new file mode 100644 index 0000000000..e7c4d5ba4d --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_utf8.go @@ -0,0 +1,138 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var UTF8Functions = []checker.Violation{ + { // utf8.Valid + Type: checker.Function, + Targets: checker.Bytes, + Package: "unicode/utf8", + Caller: "Valid", + Args: []int{0}, + AltCaller: "ValidString", + + Generate: &checker.Generate{ + Pattern: `Valid($0)`, + Returns: 1, + }, + }, + { // utf8.ValidString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "ValidString", + Args: []int{0}, + AltCaller: "Valid", + + Generate: &checker.Generate{ + Pattern: `ValidString($0)`, + Returns: 1, + }, + }, + { // utf8.FullRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "FullRune", + Args: []int{0}, + AltCaller: "FullRuneInString", + + Generate: &checker.Generate{ + Pattern: `FullRune($0)`, + Returns: 1, + }, + }, + { // utf8.FullRuneInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "FullRuneInString", + Args: []int{0}, + AltCaller: "FullRune", + + Generate: &checker.Generate{ + Pattern: `FullRuneInString($0)`, + Returns: 1, + }, + }, + + { // bytes.RuneCount + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "RuneCount", + Args: []int{0}, + AltCaller: "RuneCountInString", + + Generate: &checker.Generate{ + Pattern: `RuneCount($0)`, + Returns: 1, + }, + }, + { // bytes.RuneCountInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "RuneCountInString", + Args: []int{0}, + AltCaller: "RuneCount", + + Generate: &checker.Generate{ + Pattern: `RuneCountInString($0)`, + Returns: 1, + }, + }, + + { // bytes.DecodeLastRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "DecodeLastRune", + Args: []int{0}, + AltCaller: "DecodeLastRuneInString", + + Generate: &checker.Generate{ + Pattern: `DecodeLastRune($0)`, + Returns: 2, + }, + }, + { // utf8.DecodeLastRuneInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "DecodeLastRuneInString", + Args: []int{0}, + AltCaller: "DecodeLastRune", + + Generate: &checker.Generate{ + Pattern: `DecodeLastRuneInString($0)`, + Returns: 2, + }, + }, + { // utf8.DecodeRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "DecodeRune", + Args: []int{0}, + AltCaller: "DecodeRuneInString", + + Generate: &checker.Generate{ + Pattern: `DecodeRune($0)`, + Returns: 2, + }, + }, + { // utf8.DecodeRuneInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Args: []int{0}, + Caller: "DecodeRuneInString", + AltCaller: "DecodeRune", + + Generate: &checker.Generate{ + Pattern: `DecodeRuneInString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/vendor/github.com/butuzov/mirror/internal/checker/checker.go new file mode 100644 index 0000000000..c1a9416314 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/internal/checker/checker.go @@ -0,0 +1,147 @@ +package checker + +import ( + "bytes" + "go/ast" + "go/printer" + "go/token" + "go/types" + "strings" +) + +// Checker will perform standart check on package and its methods. +type Checker struct { + Violations []Violation // List of available violations + Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct + Type func(ast.Expr) string // Type Checker closure. + Print func(ast.Node) []byte // String representation of the expresion. +} + +func New(violations ...[]Violation) Checker { + c := Checker{ + Packages: make(map[string][]int), + } + + for i := range violations { + c.register(violations[i]) + } + + return c +} + +// Match will check the available violations we got from checks against +// the `name` caller from package `pkgName`. +func (c *Checker) Match(pkgName, name string) *Violation { + for _, v := range c.Matches(pkgName, name) { + return v + } + + return nil +} + +// Matches do same thing as Match but return a slice of violations +// as only things that require this are bytes.Buffer and strings.Builder +// it only be used in matching methods in analyzer. +func (c *Checker) Matches(pkgName, name string) []*Violation { + var matches []*Violation + checkStruct := strings.Contains(pkgName, ".") + + for _, idx := range c.Packages[pkgName] { + if c.Violations[idx].Caller == name { + if checkStruct == (len(c.Violations[idx].Struct) == 0) { + continue + } + + // copy violation + v := c.Violations[idx] + matches = append(matches, &v) + } + } + + return matches +} + +func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool) { + m := map[int]ast.Expr{} + + // We going to check each of elements we mark for checking, in order to find, + // a call that violates our rules. + for _, i := range v.Args { + if i >= len(ce.Args) { + continue + } + + call, ok := ce.Args[i].(*ast.CallExpr) + if !ok { + continue + } + + // is it convertsion call + if !c.callConverts(call) { + continue + } + + // somehow no argument of call + if len(call.Args) == 0 { + continue + } + + // wrong argument type + if normalType(c.Type(call.Args[0])) != v.getArgType() { + continue + } + + m[i] = call.Args[0] + } + + return m, len(m) == len(v.Args) +} + +func (c *Checker) callConverts(ce *ast.CallExpr) bool { + switch ce.Fun.(type) { + case *ast.ArrayType, *ast.Ident: + res := c.Type(ce.Fun) + return res == "[]byte" || res == "string" + } + + return false +} + +// register violations. +func (c *Checker) register(violations []Violation) { + for _, v := range violations { // nolint: gocritic + c.Violations = append(c.Violations, v) + if len(v.Struct) > 0 { + c.registerIdxPer(v.Package + "." + v.Struct) + } + c.registerIdxPer(v.Package) + } +} + +// registerIdxPer will register last added violation element +// under pkg string. +func (c *Checker) registerIdxPer(pkg string) { + c.Packages[pkg] = append(c.Packages[pkg], len(c.Violations)-1) +} + +func WrapType(info *types.Info) func(node ast.Expr) string { + return func(node ast.Expr) string { + if t := info.TypeOf(node); t != nil { + return t.String() + } + + if tv, ok := info.Types[node]; ok { + return tv.Type.Underlying().String() + } + + return "" + } +} + +func WrapPrint(fSet *token.FileSet) func(ast.Node) []byte { + return func(node ast.Node) []byte { + var buf bytes.Buffer + printer.Fprint(&buf, fSet, node) + return buf.Bytes() + } +} diff --git a/vendor/github.com/butuzov/mirror/internal/checker/imports.go b/vendor/github.com/butuzov/mirror/internal/checker/imports.go new file mode 100644 index 0000000000..4015de5970 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/internal/checker/imports.go @@ -0,0 +1,89 @@ +package checker + +import ( + "go/ast" + "go/token" + "path" + "sort" + "strings" + "sync" + + "golang.org/x/tools/go/ast/inspector" +) + +// Imports represents an imported package in a nice for lookup way... +// +// examples: +// import . "bytes" -> checker.Import{Pkg:"bytes", Val:"."} +// import name "bytes" -> checker.Import{Pkg:"bytes", Val:"name"} +type Import struct { + Pkg string // package name + Name string // alias +} + +type Imports map[string][]Import + +// we are going to have Imports entries to be sorted, but if it has less then +// `sortLowerLimit` elements we are skipping this step as its not going to +// be worth of effort. +const sortLowerLimit int = 13 + +// Package level lock is to prevent import map corruption +var lock sync.RWMutex + +func Load(fs *token.FileSet, ins *inspector.Inspector) Imports { + lock.Lock() + defer lock.Unlock() + + imports := make(Imports) + + // Populate imports map + ins.Preorder([]ast.Node{(*ast.ImportSpec)(nil)}, func(node ast.Node) { + importSpec, _ := node.(*ast.ImportSpec) + + var ( + key = fs.Position(node.Pos()).Filename + pkg = strings.Trim(importSpec.Path.Value, `"`) + name = importSpec.Name.String() + ) + + if importSpec.Name == nil { + name = path.Base(pkg) // note: we need only basename of the package + } + + imports[key] = append(imports[key], Import{ + Pkg: pkg, + Name: name, + }) + }) + + imports.sort() + + return imports +} + +// sort will sort imports for each of the checking files. +func (i *Imports) sort() { + for k := range *i { + if len((*i)[k]) < sortLowerLimit { + continue + } + + k := k + sort.Slice((*i)[k], func(left, right int) bool { + return (*i)[k][left].Name < (*i)[k][right].Name + }) + } +} + +func (i Imports) Lookup(file, pkg string) (string, bool) { + if _, ok := i[file]; ok { + for idx := range i[file] { + if i[file][idx].Name == pkg { + return i[file][idx].Pkg, true + } + } + } + + return "", false +} diff --git a/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/vendor/github.com/butuzov/mirror/internal/checker/violation.go new file mode 100644 index 0000000000..375d3c8e65 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/internal/checker/violation.go @@ -0,0 +1,208 @@ +package checker + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "path" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// Type of violation: can be method or function +type ViolationType int + +const ( + Function ViolationType = iota + 1 + Method +) + +const ( + Strings string = "string" + Bytes string = "[]byte" + Byte string = "byte" + Rune string = "rune" + UntypedRune string = "untyped rune" +) + +// Violation describs what message we going to give to a particular code violation +type Violation struct { + Type ViolationType // + Args []int // Indexes of the arguments needs to be checked + ArgsType string + + Targets string + Package string + AltPackage string + Struct string + Caller string + AltCaller string + + // --- tests generation information + Generate *Generate + + // --- suggestions related info about violation of rules. + base []byte // receiver of the method or pkg name + callExpr *ast.CallExpr // actual call expression, to extract arguments + arguments map[int]ast.Expr // fixed arguments +} + +// Tests (generation) related struct. +type Generate struct { + PreCondition string // Precondition we want to be generated + Pattern string // Generate pattern (for the `want` message) + Returns int // Expected to return n elements +} + +func (v *Violation) With(base []byte, e *ast.CallExpr, args map[int]ast.Expr) *Violation { + v.base = base + v.callExpr = e + v.arguments = args + + return v +} + +func (v *Violation) getArgType() string { + if v.ArgsType != "" { + return v.ArgsType + } + + if v.Targets == Strings { + return Bytes + } + + return Strings +} + +func (v *Violation) Message() string { + if v.Type == Method { + return fmt.Sprintf("avoid allocations with (*%s.%s).%s", + path.Base(v.Package), v.Struct, v.AltCaller) + } + + pkg := v.Package + if len(v.AltPackage) > 0 { + pkg = v.AltPackage + } + + return fmt.Sprintf("avoid allocations with %s.%s", path.Base(pkg), v.AltCaller) +} + +func (v *Violation) suggest(fSet *token.FileSet) []byte { + var buf bytes.Buffer + + if len(v.base) > 0 { + buf.Write(v.base) + buf.WriteString(".") + } + + buf.WriteString(v.AltCaller) + buf.WriteByte('(') + for idx := range v.callExpr.Args { + if arg, ok := v.arguments[idx]; ok { + printer.Fprint(&buf, fSet, arg) + } else { + printer.Fprint(&buf, fSet, v.callExpr.Args[idx]) + } + + if idx != len(v.callExpr.Args)-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(')') + + return buf.Bytes() +} + +func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic { + diagnostic := analysis.Diagnostic{ + Pos: v.callExpr.Pos(), + End: v.callExpr.Pos(), + Message: v.Message(), + } + + var buf bytes.Buffer + printer.Fprint(&buf, fSet, v.callExpr) + noNl := bytes.IndexByte(buf.Bytes(), '\n') < 0 + + // Struct based fix. + if v.Type == Method && noNl { + diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ + Message: "Fix Issue With", + TextEdits: []analysis.TextEdit{{ + Pos: v.callExpr.Pos(), End: v.callExpr.End(), NewText: v.suggest(fSet), + }}, + }} + } + + if v.AltPackage == "" { + v.AltPackage = v.Package + } + + // Hooray! we dont need to change package and redo imports. + if v.Type == Function && v.AltPackage == v.Package && noNl { + diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ + Message: "Fix Issue With", + TextEdits: []analysis.TextEdit{{ + Pos: v.callExpr.Pos(), End: v.callExpr.End(), NewText: v.suggest(fSet), + }}, + }} + } + + // do not change + + return diagnostic +} + +type GolangIssue struct { + Start token.Position + End token.Position + Message string + InlineFix string + Original string +} + +// Issue inteanded to be used only with golangci-lint, bu you can use use it +// alongside Diagnostic if you wish. +func (v *Violation) Issue(fSet *token.FileSet) GolangIssue { + issue := GolangIssue{ + Start: fSet.Position(v.callExpr.Pos()), + End: fSet.Position(v.callExpr.End()), + Message: v.Message(), + } + + // original expression (useful for debug & requied for replace) + var buf bytes.Buffer + printer.Fprint(&buf, fSet, v.callExpr) + issue.Original = buf.String() + + noNl := strings.IndexByte(issue.Original, '\n') < 0 + + if v.Type == Method && noNl { + fix := v.suggest(fSet) + issue.InlineFix = string(fix) + } + + if v.AltPackage == "" { + v.AltPackage = v.Package + } + + // Hooray! we don't need to change package and redo imports. + if v.Type == Function && v.AltPackage == v.Package && noNl { + fix := v.suggest(fSet) + issue.InlineFix = string(fix) + } + + return issue +} + +// ofType normalize input types (mostly typed and untyped runes). +func normalType(s string) string { + if s == UntypedRune { + return Rune + } + return s +} diff --git a/vendor/github.com/butuzov/mirror/readme.md b/vendor/github.com/butuzov/mirror/readme.md new file mode 100644 index 0000000000..fcfd1de11a --- /dev/null +++ b/vendor/github.com/butuzov/mirror/readme.md @@ -0,0 +1,60 @@ +# `mirror` [![Code Coverage](https://coveralls.io/repos/github/butuzov/mirror/badge.svg?branch=main)](https://coveralls.io/github/butuzov/mirror?branch=main) [![build status](https://github.com/butuzov/mirror/actions/workflows/main.yaml/badge.svg?branch=main)]() + +`mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib. + +## 🇺🇦 PLEASE HELP ME 🇺🇦 +Fundrise for scout drone **DJI Matrice 30T** for my squad (Ukrainian Forces). See more details at [butuzov/README.md](https://github.com/butuzov/butuzov/) + +## Linter Use Cases + +### `github.com/argoproj/argo-cd` + +```go +// Before +func IsValidHostname(hostname string, fqdn bool) bool { + if !fqdn { + return validHostNameRegexp.Match([]byte(hostname)) || validIPv6Regexp.Match([]byte(hostname)) + } else { + return validFQDNRegexp.Match([]byte(hostname)) + } +} + +// After: With alternative method (and lost `else` case) +func IsValidHostname(hostname string, fqdn bool) bool { + if !fqdn { + return validHostNameRegexp.MatchString(hostname) || validIPv6Regexp.MatchString(hostname) + } + + return validFQDNRegexp.MatchString(hostname) +} +``` + +## Install + +``` +go install github.com/butuzov/mirror/cmd/mirror@latest +``` + +## How to use + +You run `mirror` with [`go vet`](https://pkg.go.dev/cmd/vet): + +``` +go vet -vettool=$(which mirror) ./... +# github.com/jcmoraisjr/haproxy-ingress/pkg/common/net/ssl +pkg/common/net/ssl/ssl.go:64:11: avoid allocations with (*os.File).WriteString +pkg/common/net/ssl/ssl.go:161:12: avoid allocations with (*os.File).WriteString +pkg/common/net/ssl/ssl.go:166:3: avoid allocations with (*os.File).WriteString +``` + +Can be called directly: +``` +mirror ./... +# https://github.com/cosmtrek/air +/air/runner/util.go:149:6: avoid allocations with (*regexp.Regexp).MatchString +/air/runner/util.go:173:14: avoid allocations with (*os.File).WriteString +``` + +## Command line + +- You can add checks for `_test.go` files with cli option `--with-tests` diff --git a/vendor/github.com/catenacyber/perfsprint/LICENSE b/vendor/github.com/catenacyber/perfsprint/LICENSE new file mode 100644 index 0000000000..14c2b9e737 --- /dev/null +++ b/vendor/github.com/catenacyber/perfsprint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Catena cyber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go new file mode 100644 index 0000000000..543b4bdbc7 --- /dev/null +++ b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go @@ -0,0 +1,603 @@ +package analyzer + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "golang.org/x/tools/go/analysis" +) + +type perfSprint struct { + intConv bool + errError bool + errorf bool + sprintf1 bool + fiximports bool + strconcat bool +} + +func newPerfSprint() *perfSprint { + return &perfSprint{ + intConv: true, + errError: false, + errorf: true, + sprintf1: true, + fiximports: true, + strconcat: true, + } +} + +func New() *analysis.Analyzer { + n := newPerfSprint() + r := &analysis.Analyzer{ + Name: "perfsprint", + Doc: "Checks that fmt.Sprintf can be replaced with a faster alternative.", + Run: n.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + r.Flags.BoolVar(&n.intConv, "int-conversion", true, "optimizes even if it requires an int or uint type cast") + r.Flags.BoolVar(&n.errError, "err-error", false, "optimizes into err.Error() even if it is only equivalent for non-nil errors") + r.Flags.BoolVar(&n.errorf, "errorf", true, "optimizes fmt.Errorf") + r.Flags.BoolVar(&n.sprintf1, "sprintf1", true, "optimizes fmt.Sprintf with only one argument") + r.Flags.BoolVar(&n.fiximports, "fiximports", true, "fix needed imports from other fixes") + r.Flags.BoolVar(&n.strconcat, "strconcat", true, "optimizes into strings concatenation") + return r +} + +// true if verb is a format string that could be replaced with concatenation. +func isConcatable(verb string) bool { + hasPrefix := + (strings.HasPrefix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || + (strings.HasPrefix(verb, "%[1]s") && !strings.Contains(verb, "%s")) + hasSuffix := + (strings.HasSuffix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || + (strings.HasSuffix(verb, "%[1]s") && !strings.Contains(verb, "%s")) + + if strings.Count(verb, "%[1]s") > 1 { + return false + } + return (hasPrefix || hasSuffix) && !(hasPrefix && hasSuffix) +} + +func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { + var fmtSprintObj, fmtSprintfObj, fmtErrorfObj types.Object + for _, pkg := range pass.Pkg.Imports() { + if pkg.Path() == "fmt" { + fmtSprintObj = pkg.Scope().Lookup("Sprint") + fmtSprintfObj = pkg.Scope().Lookup("Sprintf") + fmtErrorfObj = pkg.Scope().Lookup("Errorf") + } + } + if fmtSprintfObj == nil && fmtSprintObj == nil && fmtErrorfObj == nil { + return nil, nil + } + removedFmtUsages := make(map[string]int) + neededPackages := make(map[string]map[string]bool) + + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + called, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + calledObj := pass.TypesInfo.ObjectOf(called.Sel) + + var ( + fn string + verb string + value ast.Expr + err error + ) + switch { + case calledObj == fmtErrorfObj && len(call.Args) == 1: + if n.errorf { + fn = "fmt.Errorf" + verb = "%s" + value = call.Args[0] + } else { + return + } + + case calledObj == fmtSprintObj && len(call.Args) == 1: + fn = "fmt.Sprint" + verb = "%v" + value = call.Args[0] + + case calledObj == fmtSprintfObj && len(call.Args) == 1: + if n.sprintf1 { + fn = "fmt.Sprintf" + verb = "%s" + value = call.Args[0] + } else { + return + } + + case calledObj == fmtSprintfObj && len(call.Args) == 2: + verbLit, ok := call.Args[0].(*ast.BasicLit) + if !ok { + return + } + verb, err = strconv.Unquote(verbLit.Value) + if err != nil { + // Probably unreachable. + return + } + // one single explicit arg is simplified + if strings.HasPrefix(verb, "%[1]") { + verb = "%" + verb[4:] + } + + fn = "fmt.Sprintf" + value = call.Args[1] + + default: + return + } + + switch verb { + default: + if fn == "fmt.Sprintf" && isConcatable(verb) && n.strconcat { + break + } + return + case "%d", "%v", "%x", "%t", "%s": + } + + valueType := pass.TypesInfo.TypeOf(value) + a, isArray := valueType.(*types.Array) + s, isSlice := valueType.(*types.Slice) + + var d *analysis.Diagnostic + switch { + case isBasicType(valueType, types.String) && oneOf(verb, "%v", "%s"): + fname := pass.Fset.File(call.Pos()).Name() + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + removedFmtUsages[fname]++ + if fn == "fmt.Errorf" { + neededPackages[fname]["errors"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with errors.New", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use errors.New", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("errors.New("), + }}, + }, + }, + } + } else { + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with just using the string", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Just use string value", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(formatNode(pass.Fset, value)), + }}, + }, + }, + } + } + case types.Implements(valueType, errIface) && oneOf(verb, "%v", "%s") && n.errError: + // known false positive if this error is nil + // fmt.Sprint(nil) does not panic like nil.Error() does + errMethodCall := formatNode(pass.Fset, value) + ".Error()" + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with " + errMethodCall, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use " + errMethodCall, + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(errMethodCall), + }}, + }, + }, + } + + case isBasicType(valueType, types.Bool) && oneOf(verb, "%v", "%t"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatBool", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatBool", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatBool("), + }}, + }, + }, + } + + case isArray && isBasicType(a.Elem(), types.Uint8) && oneOf(verb, "%x"): + if _, ok := value.(*ast.Ident); !ok { + // Doesn't support array literals. + return + } + + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["encoding/hex"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster hex.EncodeToString", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use hex.EncodeToString", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("hex.EncodeToString("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: []byte("[:]"), + }, + }, + }, + }, + } + case isSlice && isBasicType(s.Elem(), types.Uint8) && oneOf(verb, "%x"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["encoding/hex"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster hex.EncodeToString", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use hex.EncodeToString", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("hex.EncodeToString("), + }}, + }, + }, + } + + case isBasicType(valueType, types.Int8, types.Int16, types.Int32) && oneOf(verb, "%v", "%d") && n.intConv: + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.Itoa", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.Itoa", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.Itoa(int("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: []byte(")"), + }, + }, + }, + }, + } + case isBasicType(valueType, types.Int) && oneOf(verb, "%v", "%d"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.Itoa", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.Itoa", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.Itoa("), + }}, + }, + }, + } + case isBasicType(valueType, types.Int64) && oneOf(verb, "%v", "%d"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatInt", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatInt", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatInt("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: []byte(", 10"), + }, + }, + }, + }, + } + + case isBasicType(valueType, types.Uint8, types.Uint16, types.Uint32, types.Uint) && oneOf(verb, "%v", "%d", "%x") && n.intConv: + base := []byte("), 10") + if verb == "%x" { + base = []byte("), 16") + } + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatUint", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatUint", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatUint(uint64("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: base, + }, + }, + }, + }, + } + case isBasicType(valueType, types.Uint64) && oneOf(verb, "%v", "%d", "%x"): + base := []byte(", 10") + if verb == "%x" { + base = []byte(", 16") + } + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatUint", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatUint", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatUint("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: base, + }, + }, + }, + }, + } + case isBasicType(valueType, types.String) && fn == "fmt.Sprintf" && isConcatable(verb): + var fix string + if strings.HasSuffix(verb, "%s") { + fix = strconv.Quote(verb[:len(verb)-2]) + "+" + formatNode(pass.Fset, value) + } else if strings.HasSuffix(verb, "%[1]s") { + fix = strconv.Quote(verb[:len(verb)-5]) + "+" + formatNode(pass.Fset, value) + } else if strings.HasPrefix(verb, "%s") { + fix = formatNode(pass.Fset, value) + "+" + strconv.Quote(verb[2:]) + } else { + fix = formatNode(pass.Fset, value) + "+" + strconv.Quote(verb[5:]) + } + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with string concatenation", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use string concatenation", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(fix), + }}, + }, + }, + } + } + + if d != nil { + pass.Report(*d) + } + }) + + if len(removedFmtUsages) > 0 && n.fiximports { + for _, pkg := range pass.Pkg.Imports() { + if pkg.Path() == "fmt" { + insp = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter = []ast.Node{ + (*ast.SelectorExpr)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + selec := node.(*ast.SelectorExpr) + selecok, ok := selec.X.(*ast.Ident) + if ok { + pkgname, ok := pass.TypesInfo.ObjectOf(selecok).(*types.PkgName) + if ok && pkgname.Name() == pkg.Name() { + fname := pass.Fset.File(pkgname.Pos()).Name() + removedFmtUsages[fname]-- + } + } + }) + } else if pkg.Path() == "errors" || pkg.Path() == "strconv" || pkg.Path() == "encoding/hex" { + insp = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter = []ast.Node{ + (*ast.ImportSpec)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + gd := node.(*ast.ImportSpec) + if gd.Path.Value == strconv.Quote(pkg.Path()) { + fname := pass.Fset.File(gd.Pos()).Name() + _, ok := neededPackages[fname] + if ok { + delete(neededPackages[fname], pkg.Path()) + } + } + }) + } + } + insp = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter = []ast.Node{ + (*ast.ImportSpec)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + gd := node.(*ast.ImportSpec) + if gd.Path.Value == `"fmt"` { + fix := "" + fname := pass.Fset.File(gd.Pos()).Name() + if removedFmtUsages[fname] < 0 { + fix += `"fmt"` + if len(neededPackages[fname]) == 0 { + return + } + } + keys := make([]string, 0, len(neededPackages[fname])) + for k := range neededPackages[fname] { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fix = fix + "\n\t\"" + k + `"` + } + pass.Report(analysis.Diagnostic{ + Pos: gd.Pos(), + End: gd.End(), + Message: "Fix imports", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Fix imports", + TextEdits: []analysis.TextEdit{{ + Pos: gd.Pos(), + End: gd.End(), + NewText: []byte(fix), + }}, + }, + }}) + } + }) + } + + return nil, nil +} + +var errIface = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +func isBasicType(lhs types.Type, expected ...types.BasicKind) bool { + for _, rhs := range expected { + if types.Identical(lhs, types.Typ[rhs]) { + return true + } + } + return false +} + +func formatNode(fset *token.FileSet, node ast.Node) string { + buf := new(bytes.Buffer) + if err := format.Node(buf, fset, node); err != nil { + return "" + } + return buf.String() +} + +func oneOf[T comparable](v T, expected ...T) bool { + for _, rhs := range expected { + if v == rhs { + return true + } + } + return false +} diff --git a/vendor/github.com/ccojocar/zxcvbn-go/.gitignore b/vendor/github.com/ccojocar/zxcvbn-go/.gitignore new file mode 100644 index 0000000000..e032cc2fcb --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/.gitignore @@ -0,0 +1,5 @@ +zxcvbn +debug.test + +# SBOMs generated during CI +/bom.json diff --git a/vendor/github.com/ccojocar/zxcvbn-go/.golangci.yml b/vendor/github.com/ccojocar/zxcvbn-go/.golangci.yml new file mode 100644 index 0000000000..b54f70092e --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/.golangci.yml @@ -0,0 +1,39 @@ +linters: + enable: + - asciicheck + - bodyclose + - dogsled + - durationcheck + - errcheck + - errorlint + - exportloopref + - gci + - ginkgolinter + - gofmt + - gofumpt + - goimports + - gosimple + - govet + - importas + - ineffassign + - megacheck + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + - wastedassign + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/ccojocar) + +run: + timeout: 5m diff --git a/vendor/github.com/ccojocar/zxcvbn-go/.goreleaser.yml b/vendor/github.com/ccojocar/zxcvbn-go/.goreleaser.yml new file mode 100644 index 0000000000..2386aeee52 --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/.goreleaser.yml @@ -0,0 +1,27 @@ +--- +project_name: zxcvbn-go + +release: + extra_files: + - glob: ./bom.json + github: + owner: ccojocar + name: zxcvbn-go + +builds: + - main: ./testapp/ + binary: zxcvbn-go + goos: + - darwin + - linux + - windows + goarch: + - amd64 + - arm64 + - s390x + ldflags: -X main.Version={{.Version}} -X main.GitTag={{.Tag}} -X main.BuildDate={{.Date}} + env: + - CGO_ENABLED=0 + +gomod: + proxy: true diff --git a/vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt b/vendor/github.com/ccojocar/zxcvbn-go/LICENSE.txt similarity index 100% rename from vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt rename to vendor/github.com/ccojocar/zxcvbn-go/LICENSE.txt diff --git a/vendor/github.com/ccojocar/zxcvbn-go/Makefile b/vendor/github.com/ccojocar/zxcvbn-go/Makefile new file mode 100644 index 0000000000..0690f37538 --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/Makefile @@ -0,0 +1,61 @@ +GIT_TAG?= $(shell git describe --always --tags) +BIN = zxcvbn-go +FMT_CMD = $(gofmt -s -l -w $(find . -type f -name '*.go' -not -path './vendor/*') | tee /dev/stderr) +IMAGE_REPO = ccojocar +DATE_FMT=+%Y-%m-%d +ifdef SOURCE_DATE_EPOCH + BUILD_DATE ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u "$(DATE_FMT)") +else + BUILD_DATE ?= $(shell date "$(DATE_FMT)") +endif +BUILDFLAGS := "-w -s -X 'main.Version=$(GIT_TAG)' -X 'main.GitTag=$(GIT_TAG)' -X 'main.BuildDate=$(BUILD_DATE)'" +CGO_ENABLED = 0 +GO := GO111MODULE=on go +GO_NOMOD :=GO111MODULE=off go +GOPATH ?= $(shell $(GO) env GOPATH) +GOBIN ?= $(GOPATH)/bin +GO_MINOR_VERSION = $(shell $(GO) version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) +GOVULN_MIN_VERSION = 17 +GO_VERSION = 1.20 + +default: + $(MAKE) test + +install-govulncheck: + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + go install golang.org/x/vuln/cmd/govulncheck@latest; \ + fi + +test-all: fmt vet lint sec govulncheck test + +test: + go test -v ./... + +fmt: + @echo "FORMATTING" + @FORMATTED=`$(GO) fmt ./...` + @([ ! -z "$(FORMATTED)" ] && printf "Fixed unformatted files:\n$(FORMATTED)") || true + +vet: + @echo "VETTING" + $(GO) vet ./... + +lint: + @echo "LINTING: golangci-lint" + golangci-lint run + +sec: + @echo "SECURITY SCANNING" + gosec ./... + +govulncheck: install-govulncheck + @echo "CHECKING VULNERABILITIES" + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + govulncheck ./...; \ + fi + +clean: + rm -rf build vendor dist coverage.txt + rm -f release image $(BIN) + +.PHONY: test test-all fmt vet govulncheck clean diff --git a/vendor/github.com/nbutton23/zxcvbn-go/README.md b/vendor/github.com/ccojocar/zxcvbn-go/README.md similarity index 100% rename from vendor/github.com/nbutton23/zxcvbn-go/README.md rename to vendor/github.com/ccojocar/zxcvbn-go/README.md diff --git a/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go b/vendor/github.com/ccojocar/zxcvbn-go/adjacency/adjcmartix.go similarity index 82% rename from vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go rename to vendor/github.com/ccojocar/zxcvbn-go/adjacency/adjcmartix.go index 66ad30b822..34526685cc 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/adjacency/adjcmartix.go @@ -4,7 +4,7 @@ import ( "encoding/json" "log" - "github.com/nbutton23/zxcvbn-go/data" + "github.com/ccojocar/zxcvbn-go/data" ) // Graph holds information about different graphs @@ -25,7 +25,7 @@ func init() { GraphMap["l33t"] = BuildLeet() } -//BuildQwerty builds the Qwerty Graph +// BuildQwerty builds the Qwerty Graph func BuildQwerty() Graph { data, err := data.Asset("data/Qwerty.json") if err != nil { @@ -34,7 +34,7 @@ func BuildQwerty() Graph { return getAdjancencyGraphFromFile(data, "qwerty") } -//BuildDvorak builds the Dvorak Graph +// BuildDvorak builds the Dvorak Graph func BuildDvorak() Graph { data, err := data.Asset("data/Dvorak.json") if err != nil { @@ -43,7 +43,7 @@ func BuildDvorak() Graph { return getAdjancencyGraphFromFile(data, "dvorak") } -//BuildKeypad builds the Keypad Graph +// BuildKeypad builds the Keypad Graph func BuildKeypad() Graph { data, err := data.Asset("data/Keypad.json") if err != nil { @@ -52,7 +52,7 @@ func BuildKeypad() Graph { return getAdjancencyGraphFromFile(data, "keypad") } -//BuildMacKeypad builds the Mac Keypad Graph +// BuildMacKeypad builds the Mac Keypad Graph func BuildMacKeypad() Graph { data, err := data.Asset("data/MacKeypad.json") if err != nil { @@ -61,7 +61,7 @@ func BuildMacKeypad() Graph { return getAdjancencyGraphFromFile(data, "mac_keypad") } -//BuildLeet builds the L33T Graph +// BuildLeet builds the L33T Graph func BuildLeet() Graph { data, err := data.Asset("data/L33t.json") if err != nil { @@ -71,7 +71,6 @@ func BuildLeet() Graph { } func getAdjancencyGraphFromFile(data []byte, name string) Graph { - var graph Graph err := json.Unmarshal(data, &graph) if err != nil { @@ -82,9 +81,9 @@ func getAdjancencyGraphFromFile(data []byte, name string) Graph { } // CalculateAvgDegree calclates the average degree between nodes in the graph -//on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1. -//this calculates the average over all keys. -//TODO double check that i ported this correctly scoring.coffee ln 5 +// on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1. +// this calculates the average over all keys. +// TODO double check that i ported this correctly scoring.coffee ln 5 func (adjGrp Graph) CalculateAvgDegree() float64 { if adjGrp.averageDegree != float64(0) { return adjGrp.averageDegree @@ -92,14 +91,12 @@ func (adjGrp Graph) CalculateAvgDegree() float64 { var avg float64 var count float64 for _, value := range adjGrp.Graph { - for _, char := range value { if len(char) != 0 || char != " " { avg += float64(len(char)) count++ } } - } adjGrp.averageDegree = avg / count diff --git a/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go b/vendor/github.com/ccojocar/zxcvbn-go/data/bindata.go similarity index 99% rename from vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go rename to vendor/github.com/ccojocar/zxcvbn-go/data/bindata.go index f3a0c010ca..3db0f1b100 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/data/bindata.go @@ -33,7 +33,7 @@ func bindataRead(data []byte, name string) ([]byte, error) { } var buf bytes.Buffer - _, err = io.Copy(&buf, gz) + _, err = io.Copy(&buf, gz) // #nosec clErr := gz.Close() if err != nil { @@ -345,11 +345,13 @@ var _bindata = map[string]func() (*asset, error){ // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error diff --git a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go b/vendor/github.com/ccojocar/zxcvbn-go/entropy/entropyCalculator.go similarity index 77% rename from vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go rename to vendor/github.com/ccojocar/zxcvbn-go/entropy/entropyCalculator.go index 8f57ea0a47..80432572bd 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/entropy/entropyCalculator.go @@ -1,12 +1,13 @@ package entropy import ( - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/match" - "github.com/nbutton23/zxcvbn-go/utils/math" "math" "regexp" "unicode" + + "github.com/ccojocar/zxcvbn-go/adjacency" + "github.com/ccojocar/zxcvbn-go/match" + zxcvbnmath "github.com/ccojocar/zxcvbn-go/utils/math" ) const ( @@ -27,7 +28,7 @@ var ( func DictionaryEntropy(match match.Match, rank float64) float64 { baseEntropy := math.Log2(rank) upperCaseEntropy := extraUpperCaseEntropy(match) - //TODO: L33t + // TODO: L33t return baseEntropy + upperCaseEntropy } @@ -46,18 +47,18 @@ func extraUpperCaseEntropy(match match.Match) float64 { return float64(0) } - //a capitalized word is the most common capitalization scheme, - //so it only doubles the search space (uncapitalized + capitalized): 1 extra bit of entropy. - //allcaps and end-capitalized are common enough too, underestimate as 1 extra bit to be safe. + // a capitalized word is the most common capitalization scheme, + // so it only doubles the search space (uncapitalized + capitalized): 1 extra bit of entropy. + // allcaps and end-capitalized are common enough too, underestimate as 1 extra bit to be safe. for _, matcher := range []*regexp.Regexp{startUpperRx, endUpperRx, allUpperRx} { if matcher.MatchString(word) { return float64(1) } } - //Otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters with U uppercase letters or - //less. Or, if there's more uppercase than lower (for e.g. PASSwORD), the number of ways to lowercase U+L letters - //with L lowercase letters or less. + // Otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters with U uppercase letters or + // less. Or, if there's more uppercase than lower (for e.g. PASSwORD), the number of ways to lowercase U+L letters + // with L lowercase letters or less. countUpper, countLower := float64(0), float64(0) for _, char := range word { @@ -71,21 +72,21 @@ func extraUpperCaseEntropy(match match.Match) float64 { var possibililities float64 for i := float64(0); i <= math.Min(countUpper, countLower); i++ { - possibililities += float64(zxcvbnmath.NChoseK(totalLenght, i)) + possibililities += zxcvbnmath.NChoseK(totalLenght, i) } if possibililities < 1 { return float64(1) } - return float64(math.Log2(possibililities)) + return (math.Log2(possibililities)) } // SpatialEntropy calculates the entropy for spatial matches func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { var s, d float64 if match.DictionaryName == "qwerty" || match.DictionaryName == "dvorak" { - //todo: verify qwerty and dvorak have the same length and degree + // todo: verify qwerty and dvorak have the same length and degree s = float64(len(adjacency.BuildQwerty().Graph)) d = adjacency.BuildQwerty().CalculateAvgDegree() } else { @@ -97,8 +98,8 @@ func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { length := float64(len(match.Token)) - //TODO: Should this be <= or just < ? - //Estimate the number of possible patterns w/ length L or less with t turns or less + // TODO: Should this be <= or just < ? + // Estimate the number of possible patterns w/ length L or less with t turns or less for i := float64(2); i <= length+1; i++ { possibleTurns := math.Min(float64(turns), i-1) for j := float64(1); j <= possibleTurns+1; j++ { @@ -108,8 +109,8 @@ func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { } entropy := math.Log2(possibilities) - //add extra entropu for shifted keys. ( % instead of 5 A instead of a) - //Math is similar to extra entropy for uppercase letters in dictionary matches. + // add extra entropu for shifted keys. ( % instead of 5 A instead of a) + // Math is similar to extra entropy for uppercase letters in dictionary matches. if S := float64(shiftCount); S > float64(0) { possibilities = float64(0) @@ -134,7 +135,7 @@ func RepeatEntropy(match match.Match) float64 { } // CalcBruteForceCardinality calculates the brute force cardinality -//TODO: Validate against python +// TODO: Validate against python func CalcBruteForceCardinality(password string) float64 { lower, upper, digits, symbols := float64(0), float64(0), float64(0), float64(0) @@ -157,12 +158,12 @@ func CalcBruteForceCardinality(password string) float64 { // SequenceEntropy calculates the entropy for sequences such as 4567 or cdef func SequenceEntropy(match match.Match, dictionaryLength int, ascending bool) float64 { firstChar := match.Token[0] - baseEntropy := float64(0) + var baseEntropy float64 if string(firstChar) == "a" || string(firstChar) == "1" { baseEntropy = float64(0) } else { baseEntropy = math.Log2(float64(dictionaryLength)) - //TODO: should this be just the first or any char? + // TODO: should this be just the first or any char? if unicode.IsUpper(rune(firstChar)) { baseEntropy++ } @@ -183,7 +184,7 @@ func ExtraLeetEntropy(match match.Match, password string) float64 { if string(char) != string(match.Token[index]) { subsitutions++ } else { - //TODO: Make this only true for 1337 chars that are not subs? + // TODO: Make this only true for 1337 chars that are not subs? unsub++ } } @@ -210,7 +211,7 @@ func DateEntropy(dateMatch match.DateMatch) float64 { } if dateMatch.Separator != "" { - entropy += 2 //add two bits for separator selection [/,-,.,etc] + entropy += 2 // add two bits for separator selection [/,-,.,etc] } return entropy } diff --git a/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go b/vendor/github.com/ccojocar/zxcvbn-go/frequency/frequency.go similarity index 96% rename from vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go rename to vendor/github.com/ccojocar/zxcvbn-go/frequency/frequency.go index d056e4d4e6..4f51369e1f 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/frequency/frequency.go @@ -4,7 +4,7 @@ import ( "encoding/json" "log" - "github.com/nbutton23/zxcvbn-go/data" + "github.com/ccojocar/zxcvbn-go/data" ) // List holds a frequency list @@ -28,8 +28,8 @@ func init() { Lists["Surname"] = getStringListFromAsset(surnameFilePath, "Surname") Lists["English"] = getStringListFromAsset(englishFilePath, "English") Lists["Passwords"] = getStringListFromAsset(passwordsFilePath, "Passwords") - } + func getAsset(name string) []byte { data, err := data.Asset(name) if err != nil { @@ -38,8 +38,8 @@ func getAsset(name string) []byte { return data } -func getStringListFromAsset(data []byte, name string) List { +func getStringListFromAsset(data []byte, name string) List { var tempList List err := json.Unmarshal(data, &tempList) if err != nil { diff --git a/vendor/github.com/ccojocar/zxcvbn-go/match/match.go b/vendor/github.com/ccojocar/zxcvbn-go/match/match.go new file mode 100644 index 0000000000..da3e894ece --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/match/match.go @@ -0,0 +1,45 @@ +package match + +// Matches is an alies for []Match used for sorting +type Matches []Match + +func (s Matches) Len() int { + return len(s) +} + +func (s Matches) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Matches) Less(i, j int) bool { + if s[i].I < s[j].I { + return true + } else if s[i].I == s[j].I { + return s[i].J < s[j].J + } + return false +} + +// Match represents different matches +type Match struct { + Pattern string + I, J int + Token string + DictionaryName string + Entropy float64 +} + +// DateMatch is specifilly a match for type date +type DateMatch struct { + Pattern string + I, J int + Token string + Separator string + Day, Month, Year int64 +} + +// Matcher are a func and ID that can be used to match different passwords +type Matcher struct { + MatchingFunc func(password string) []Match + ID string +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/dateMatchers.go similarity index 93% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/dateMatchers.go index 8dfdf2410b..fd7f383320 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/dateMatchers.go @@ -5,8 +5,8 @@ import ( "strconv" "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) const ( @@ -20,12 +20,12 @@ var ( dateWithOutSepMatch = regexp.MustCompile(`\d{4,8}`) ) -//FilterDateSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterDateSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterDateSepMatcher(m match.Matcher) bool { return m.ID == dateSepMatcherName } -//FilterDateWithoutSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterDateWithoutSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterDateWithoutSepMatcher(m match.Matcher) bool { return m.ID == dateWithOutSepMatcherName } @@ -64,8 +64,8 @@ func dateSepMatcher(password string) []match.Match { return matches } -func dateSepMatchHelper(password string) []match.DateMatch { +func dateSepMatchHelper(password string) []match.DateMatch { var matches []match.DateMatch for _, v := range dateRxYearSuffix.FindAllString(password, len(password)) { @@ -101,7 +101,6 @@ func dateSepMatchHelper(password string) []match.DateMatch { } } return out - } type dateMatchCandidate struct { @@ -136,7 +135,7 @@ func dateWithoutSepMatch(password string) []match.Match { return matches } -//TODO Has issues with 6 digit dates +// TODO Has issues with 6 digit dates func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { for _, v := range dateWithOutSepMatch.FindAllString(password, len(password)) { i := strings.Index(password, v) @@ -146,17 +145,17 @@ func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { var candidatesRoundOne []dateMatchCandidate if length <= 6 { - //2-digit year prefix + // 2-digit year prefix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[2:], v[0:2], i, j)) - //2-digityear suffix + // 2-digityear suffix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[0:lastIndex-2], v[lastIndex-2:], i, j)) } if length >= 6 { - //4-digit year prefix + // 4-digit year prefix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[4:], v[0:4], i, j)) - //4-digit year sufix + // 4-digit year sufix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[0:lastIndex-3], v[lastIndex-3:], i, j)) } @@ -179,7 +178,6 @@ func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { } intMonth, err := strconv.ParseInt(candidate.Month, 10, 16) - if err != nil { continue } @@ -204,6 +202,5 @@ func buildDateMatchCandidate(dayMonth, year string, i, j int) dateMatchCandidate } func buildDateMatchCandidateTwo(day, month string, year string, i, j int) dateMatchCandidateTwo { - return dateMatchCandidateTwo{Day: day, Month: month, Year: year, I: i, J: j} } diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/dictionaryMatch.go similarity index 89% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/dictionaryMatch.go index 4ddb2c3b01..d0d4501880 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/dictionaryMatch.go @@ -3,8 +3,8 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) func buildDictMatcher(dictName string, rankedDict map[string]int) func(password string) []match.Match { @@ -15,7 +15,6 @@ func buildDictMatcher(dictName string, rankedDict map[string]int) func(password } return matches } - } func dictionaryMatch(password string, dictionaryName string, rankedDict map[string]int) []match.Match { @@ -29,7 +28,8 @@ func dictionaryMatch(password string, dictionaryName string, rankedDict map[stri for j := i; j < length; j++ { word := pwLowerRunes[i : j+1] if val, ok := rankedDict[string(word)]; ok { - matchDic := match.Match{Pattern: "dictionary", + matchDic := match.Match{ + Pattern: "dictionary", DictionaryName: dictionaryName, I: i, J: j, @@ -46,7 +46,6 @@ func dictionaryMatch(password string, dictionaryName string, rankedDict map[stri } func buildRankedDict(unrankedList []string) map[string]int { - result := make(map[string]int) for i, v := range unrankedList { diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/leet.go similarity index 95% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/leet.go index 610f1973fc..1f303aa6ea 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/leet.go @@ -3,14 +3,14 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) // L33TMatcherName id const L33TMatcherName = "l33t" -//FilterL33tMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterL33tMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterL33tMatcher(m match.Matcher) bool { return m.ID == L33TMatcherName } @@ -105,7 +105,7 @@ func createListOfMapsWithoutConflicts(table map[string][]string) []map[string][] return result } -// This function retrieves the list of values that appear for one or more keys. This is usefull to +// This function retrieves the list of values that appear for one or more keys. This is useful to // know which l33t chars can represent more than one letter. func retrieveConflictsListFromTable(table map[string][]string) []string { result := []string{} @@ -128,7 +128,7 @@ func retrieveConflictsListFromTable(table map[string][]string) []string { } // This function aims to create different maps for a given char if this char represents a conflict. -// If the specified char is not a conflit one, the same map will be returned. In scenarios which +// If the specified char is not a conflict one, the same map will be returned. In scenarios which // the provided char can not be found on map, an empty list will be returned. This function was // designed to be used on conflicts situations. func createDifferentMapsForLeetChar(table map[string][]string, leetChar string) []map[string][]string { @@ -158,7 +158,7 @@ func retrieveListOfKeysWithSpecificValueFromTable(table map[string][]string, val return result } -// This function returns a lsit of substitution map from a given table. Each map in the result will +// This function returns a list of substitution map from a given table. Each map in the result will // provide only one representation for each value. As an example, if the provided map contains the // values "@" and "4" in the possibilities to represent "a", two maps will be created where one // will contain "a" mapping to "@" and the other one will provide "a" mapping to "4". diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/matching.go similarity index 87% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/matching.go index 4577db8a4f..c6948067bc 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/matching.go @@ -3,9 +3,9 @@ package matching import ( "sort" - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/frequency" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/adjacency" + "github.com/ccojocar/zxcvbn-go/frequency" + "github.com/ccojocar/zxcvbn-go/match" ) var ( @@ -23,8 +23,7 @@ func init() { // Omnimatch runs all matchers against the password func Omnimatch(password string, userInputs []string, filters ...func(match.Matcher) bool) (matches []match.Match) { - - //Can I run into the issue where nil is not equal to nil? + // Can I run into the issue where nil is not equal to nil? if dictionaryMatchers == nil || adjacencyGraphs == nil { loadFrequencyList() } @@ -51,7 +50,6 @@ func Omnimatch(password string, userInputs []string, filters ...func(match.Match } func loadFrequencyList() { - for n, list := range frequency.Lists { dictionaryMatchers = append(dictionaryMatchers, match.Matcher{MatchingFunc: buildDictMatcher(n, buildRankedDict(list.List)), ID: n}) } @@ -63,8 +61,8 @@ func loadFrequencyList() { adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["keypad"]) adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["macKeypad"]) - //l33tFilePath, _ := filepath.Abs("adjacency/L33t.json") - //L33T_TABLE = adjacency.GetAdjancencyGraphFromFile(l33tFilePath, "l33t") + // l33tFilePath, _ := filepath.Abs("adjacency/L33t.json") + // L33T_TABLE = adjacency.GetAdjancencyGraphFromFile(l33tFilePath, "l33t") sequences = make(map[string]string) sequences["lower"] = "abcdefghijklmnopqrstuvwxyz" @@ -78,5 +76,4 @@ func loadFrequencyList() { matchers = append(matchers, match.Matcher{MatchingFunc: l33tMatch, ID: L33TMatcherName}) matchers = append(matchers, match.Matcher{MatchingFunc: dateSepMatcher, ID: dateSepMatcherName}) matchers = append(matchers, match.Matcher{MatchingFunc: dateWithoutSepMatch, ID: dateWithOutSepMatcherName}) - } diff --git a/vendor/github.com/ccojocar/zxcvbn-go/matching/repeatMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/repeatMatch.go new file mode 100644 index 0000000000..d52ba4254b --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/repeatMatch.go @@ -0,0 +1,68 @@ +package matching + +import ( + "strings" + + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" +) + +const repeatMatcherName = "REPEAT" + +// FilterRepeatMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterRepeatMatcher(m match.Matcher) bool { + return m.ID == repeatMatcherName +} + +func repeatMatch(password string) []match.Match { + var matches []match.Match + + // Loop through password. if current == prev currentStreak++ else if currentStreak > 2 {buildMatch; currentStreak = 1} prev = current + var current, prev string + currentStreak := 1 + var i int + var char rune + for i, char = range password { + current = string(char) + if i == 0 { + prev = current + continue + } + + if strings.EqualFold(current, prev) { + currentStreak++ + } else if currentStreak > 2 { + iPos := i - currentStreak + jPos := i - 1 + matchRepeat := match.Match{ + Pattern: "repeat", + I: iPos, + J: jPos, + Token: password[iPos : jPos+1], + DictionaryName: prev, + } + matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) + matches = append(matches, matchRepeat) + currentStreak = 1 + } else { + currentStreak = 1 + } + + prev = current + } + + if currentStreak > 2 { + iPos := i - currentStreak + 1 + jPos := i + matchRepeat := match.Match{ + Pattern: "repeat", + I: iPos, + J: jPos, + Token: password[iPos : jPos+1], + DictionaryName: prev, + } + matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) + matches = append(matches, matchRepeat) + } + return matches +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/sequenceMatch.go similarity index 88% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/sequenceMatch.go index e0ed052293..6971945838 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/sequenceMatch.go @@ -3,13 +3,13 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) const sequenceMatcherName = "SEQ" -//FilterSequenceMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterSequenceMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterSequenceMatcher(m match.Matcher) bool { return m.ID == sequenceMatcherName } @@ -64,10 +64,8 @@ func sequenceMatch(password string) []match.Match { matches = append(matches, matchSequence) } break - } else { - j++ } - + j++ } } i = j diff --git a/vendor/github.com/ccojocar/zxcvbn-go/matching/spatialMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/spatialMatch.go new file mode 100644 index 0000000000..101ccea5e5 --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/spatialMatch.go @@ -0,0 +1,87 @@ +package matching + +import ( + "strings" + + "github.com/ccojocar/zxcvbn-go/adjacency" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" +) + +const spatialMatcherName = "SPATIAL" + +// FilterSpatialMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterSpatialMatcher(m match.Matcher) bool { + return m.ID == spatialMatcherName +} + +func spatialMatch(password string) (matches []match.Match) { + for _, graph := range adjacencyGraphs { + if graph.Graph != nil { + matches = append(matches, spatialMatchHelper(password, graph)...) + } + } + return matches +} + +func spatialMatchHelper(password string, graph adjacency.Graph) (matches []match.Match) { + for i := 0; i < len(password)-1; { + j := i + 1 + lastDirection := -99 // an int that it should never be! + turns := 0 + shiftedCount := 0 + + for { + prevChar := password[j-1] + found := false + var foundDirection int + curDirection := -1 + // My graphs seem to be wrong. . . and where the hell is qwerty + adjacents := graph.Graph[string(prevChar)] + // Consider growing pattern by one character if j hasn't gone over the edge + if j < len(password) { + curChar := password[j] + for _, adj := range adjacents { + curDirection++ + + if strings.Contains(adj, string(curChar)) { + found = true + foundDirection = curDirection + + if strings.Index(adj, string(curChar)) == 1 { + // index 1 in the adjacency means the key is shifted, 0 means unshifted: A vs a, % vs 5, etc. + // for example, 'q' is adjacent to the entry '2@'. @ is shifted w/ index 1, 2 is unshifted. + shiftedCount++ + } + + if lastDirection != foundDirection { + // adding a turn is correct even in the initial case when last_direction is null: + // every spatial pattern starts with a turn. + turns++ + lastDirection = foundDirection + } + break + } + } + } + + // if the current pattern continued, extend j and try to grow again + if found { + j++ + } else { + // otherwise push the pattern discovered so far, if any... + // don't consider length 1 or 2 chains. + if j-i > 2 { + matchSpc := match.Match{Pattern: "spatial", I: i, J: j - 1, Token: password[i:j], DictionaryName: graph.Name} + matchSpc.Entropy = entropy.SpatialEntropy(matchSpc, turns, shiftedCount) + matches = append(matches, matchSpc) + } + //. . . and then start a new search from the rest of the password + i = j + break + } + } + + } + return matches +} diff --git a/vendor/github.com/ccojocar/zxcvbn-go/renovate.json b/vendor/github.com/ccojocar/zxcvbn-go/renovate.json new file mode 100644 index 0000000000..58ee1e0ea8 --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/renovate.json @@ -0,0 +1,25 @@ +{ + "dependencyDashboard": true, + "dependencyDashboardTitle" : "Renovate(bot) : dependency dashboard", + "vulnerabilityAlerts": { + "enabled": true + }, + "extends": [ + ":preserveSemverRanges", + "group:all", + "schedule:weekly" + ], + "lockFileMaintenance": { + "commitMessageAction": "Update", + "enabled": true, + "extends": [ + "group:all", + "schedule:weekly" + ] + }, + "postUpdateOptions": [ + "gomodTidy", + "gomodUpdateImportPaths" + ], + "separateMajorMinor": false +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go b/vendor/github.com/ccojocar/zxcvbn-go/scoring/scoring.go similarity index 84% rename from vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go rename to vendor/github.com/ccojocar/zxcvbn-go/scoring/scoring.go index 4f68a6dca6..f25606a8d6 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/scoring/scoring.go @@ -2,11 +2,12 @@ package scoring import ( "fmt" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" - "github.com/nbutton23/zxcvbn-go/utils/math" "math" "sort" + + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" + zxcvbnmath "github.com/ccojocar/zxcvbn-go/utils/math" ) const ( @@ -15,7 +16,7 @@ const ( //adjust for your site accordingly if you use another hash function, possibly by //several orders of magnitude! singleGuess float64 = 0.010 - numAttackers float64 = 100 //Cores used to make guesses + numAttackers float64 = 100 // Cores used to make guesses secondsPerGuess float64 = singleGuess / numAttackers ) @@ -33,11 +34,11 @@ type MinEntropyMatch struct { /* MinimumEntropyMatchSequence returns the minimum entropy - Takes a list of overlapping matches, returns the non-overlapping sublist with - minimum entropy. O(nm) dp alg for length-n password with m candidate matches. + Takes a list of overlapping matches, returns the non-overlapping sublist with + minimum entropy. O(nm) dp alg for length-n password with m candidate matches. */ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntropyMatch { - bruteforceCardinality := float64(entropy.CalcBruteForceCardinality(password)) + bruteforceCardinality := entropy.CalcBruteForceCardinality(password) upToK := make([]float64, len(password)) backPointers := make([]match.Match, len(password)) @@ -50,7 +51,7 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr } i, j := match.I, match.J - //see if best entropy up to i-1 + entropy of match is less that current min at j + // see if best entropy up to i-1 + entropy of match is less that current min at j upTo := get(upToK, i-1) candidateEntropy := upTo + match.Entropy @@ -62,7 +63,7 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr } } - //walk backwards and decode the best sequence + // walk backwards and decode the best sequence var matchSequence []match.Match passwordLen := len(password) passwordLen-- @@ -80,12 +81,13 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr sort.Sort(match.Matches(matchSequence)) makeBruteForceMatch := func(i, j int) match.Match { - return match.Match{Pattern: "bruteforce", + return match.Match{ + Pattern: "bruteforce", I: i, J: j, Token: password[i : j+1], - Entropy: math.Log2(math.Pow(bruteforceCardinality, float64(j-i)))} - + Entropy: math.Log2(math.Pow(bruteforceCardinality, float64(j-i))), + } } k := 0 @@ -110,14 +112,16 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr } crackTime := roundToXDigits(entropyToCrackTime(minEntropy), 3) - return MinEntropyMatch{Password: password, + return MinEntropyMatch{ + Password: password, Entropy: roundToXDigits(minEntropy, 3), MatchSequence: matchSequenceCopy, CrackTime: crackTime, CrackTimeDisplay: displayTime(crackTime), - Score: crackTimeToScore(crackTime)} - + Score: crackTimeToScore(crackTime), + } } + func get(a []float64, i int) float64 { if i < 0 || i >= len(a) { return float64(0) @@ -157,9 +161,8 @@ func displayTime(seconds float64) string { return fmt.Sprintf(formater, (1 + math.Ceil(seconds/month)), "months") } else if seconds < century { return fmt.Sprintf(formater, (1 + math.Ceil(seconds/century)), "years") - } else { - return "centuries" } + return "centuries" } func crackTimeToScore(seconds float64) int { diff --git a/vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go b/vendor/github.com/ccojocar/zxcvbn-go/utils/math/mathutils.go similarity index 100% rename from vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go rename to vendor/github.com/ccojocar/zxcvbn-go/utils/math/mathutils.go diff --git a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go b/vendor/github.com/ccojocar/zxcvbn-go/zxcvbn.go similarity index 76% rename from vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go rename to vendor/github.com/ccojocar/zxcvbn-go/zxcvbn.go index 9c34b1c8c0..f3dc19e4c5 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/zxcvbn.go @@ -3,10 +3,10 @@ package zxcvbn import ( "time" - "github.com/nbutton23/zxcvbn-go/match" - "github.com/nbutton23/zxcvbn-go/matching" - "github.com/nbutton23/zxcvbn-go/scoring" - "github.com/nbutton23/zxcvbn-go/utils/math" + "github.com/ccojocar/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/matching" + "github.com/ccojocar/zxcvbn-go/scoring" + zxcvbnmath "github.com/ccojocar/zxcvbn-go/utils/math" ) // PasswordStrength takes a password, userInputs and optional filters and returns a MinEntropyMatch diff --git a/vendor/github.com/chavacava/garif/enums.go b/vendor/github.com/chavacava/garif/enums.go new file mode 100644 index 0000000000..dea2daf131 --- /dev/null +++ b/vendor/github.com/chavacava/garif/enums.go @@ -0,0 +1,41 @@ +package garif + +type ResultKind string + +// declare JSON values +const ( + _pass ResultKind = "pass" + _open ResultKind = "open" + _informational ResultKind = "informational" + _notApplicable ResultKind = "notApplicable" + _review ResultKind = "review" + _fail ResultKind = "fail" +) + +// create public visible constants with a namespace as enums +const ( + ResultKind_Pass ResultKind = _pass + ResultKind_Open ResultKind = _open + ResultKind_Informational ResultKind = _informational + ResultKind_NotApplicable ResultKind = _notApplicable + ResultKind_Review ResultKind = _review + ResultKind_Fail ResultKind = _fail +) + +type ResultLevel string + +// declare JSON values +const ( + _warning ResultLevel = "warning" + _error ResultLevel = "error" + _note ResultLevel = "note" + _none ResultLevel = "none" +) + +// create public visible constants with a namespace as enums +const ( + ResultLevel_Warning ResultLevel = _warning + ResultLevel_Error ResultLevel = _error + ResultLevel_Note ResultLevel = _note + ResultLevel_None ResultLevel = _none +) diff --git a/vendor/github.com/chavacava/garif/models.go b/vendor/github.com/chavacava/garif/models.go index 3668436a3c..f16a86136e 100644 --- a/vendor/github.com/chavacava/garif/models.go +++ b/vendor/github.com/chavacava/garif/models.go @@ -935,10 +935,10 @@ type Result struct { HostedViewerUri string `json:"hostedViewerUri,omitempty"` // A value that categorizes results by evaluation state. - Kind interface{} `json:"kind,omitempty"` + Kind ResultKind `json:"kind,omitempty"` // A value specifying the severity level of the result. - Level interface{} `json:"level,omitempty"` + Level ResultLevel `json:"level,omitempty"` // The set of locations where the result was detected. Specify only one location unless the problem indicated by the result can only be corrected by making a change at every specified location. Locations []*Location `json:"locations,omitempty"` diff --git a/vendor/github.com/ckaznocha/intrange/.gitignore b/vendor/github.com/ckaznocha/intrange/.gitignore new file mode 100644 index 0000000000..eb97422caf --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/.gitignore @@ -0,0 +1 @@ +go.work.sum diff --git a/vendor/github.com/ckaznocha/intrange/.golangci.yml b/vendor/github.com/ckaznocha/intrange/.golangci.yml new file mode 100644 index 0000000000..2ad830d1b2 --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/.golangci.yml @@ -0,0 +1,99 @@ +linters-settings: + gci: + local-prefixes: github.com/ckaznocha/intrange + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + goimports: + local-prefixes: github.com/ckaznocha/intrange + golint: + min-confidence: 0 + govet: + check-shadowing: true + enable: + - asmdecl + - assign + - atomic + - atomicalign + - bools + - buildtag + - cgocall + - composite + - copylock + - deepequalerrors + - errorsas + - fieldalignment + - findcall + - framepointer + - httpresponse + - ifaceassert + - loopclosure + - lostcancel + - nilfunc + - nilness + - printf + - shadow + - shift + - sortslice + - stdmethods + - stringintconv + - structtag + - testinggoroutine + - tests + - unmarshal + - unreachable + - unsafeptr + - unusedresult + misspell: + locale: US +linters: + disable-all: true + enable: + - asciicheck + - dupl + - errcheck + - errorlint + - exportloopref + - gci + - gochecknoinits + - goconst + - gocritic + - godot + - godox + - goerr113 + - gofmt + - gofumpt + - goimports + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - nestif + - nilerr + - nlreturn + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + - wastedassign + - whitespace + - wsl +run: + skip-dirs: + - testdata/ diff --git a/vendor/github.com/ckaznocha/intrange/LICENSE b/vendor/github.com/ckaznocha/intrange/LICENSE new file mode 100644 index 0000000000..b68bde54b5 --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2024 Clifton Kaznocha + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ckaznocha/intrange/README.md b/vendor/github.com/ckaznocha/intrange/README.md new file mode 100644 index 0000000000..05d78b63a9 --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/README.md @@ -0,0 +1,54 @@ +# intrange + +[![Build Status](https://github.com/ckaznocha/intrange/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/ckaznocha/intrange/actions/workflows/ci.yml) +[![Release](http://img.shields.io/github/release/ckaznocha/intrange.svg)](https://github.com/ckaznocha/intrange/releases/latest) +[![GoDoc](https://godoc.org/github.com/ckaznocha/intrange?status.svg)](https://godoc.org/github.com/ckaznocha/intrange) + +intrange is a program for checking for loops that could use the [Go 1.22](https://go.dev/ref/spec#Go_1.22) integer +range feature. + +## Installation + +```bash +go install github.com/ckaznocha/intrange/cmd/intrange@latest +``` + +## Usage + +```bash +go vet -vettool=$(which intrange) ./... +``` + +## Example + +```go +package main + +import "fmt" + +func main() { + for i := 0; i < 10; i++ { + fmt.Println(i) + } +} +``` + +Running `intrange` on the above code will produce the following output: + +```bash +main.go:5:2: for loop can be changed to use an integer range (Go 1.22+) +``` + +The loop can be rewritten as: + +```go +package main + +import "fmt" + +func main() { + for i := range 10 { + fmt.Println(i) + } +} +``` diff --git a/vendor/github.com/ckaznocha/intrange/SECURITY.md b/vendor/github.com/ckaznocha/intrange/SECURITY.md new file mode 100644 index 0000000000..e2c44c4e21 --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/SECURITY.md @@ -0,0 +1,5 @@ +# Security Policy + +## Reporting a Vulnerability + +Please open a [github issue](https://github.com/ckaznocha/intrange/issues) diff --git a/vendor/github.com/ckaznocha/intrange/go.work b/vendor/github.com/ckaznocha/intrange/go.work new file mode 100644 index 0000000000..f41a04a2fb --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/go.work @@ -0,0 +1,6 @@ +go 1.22.0 + +use ( + . + ./testdata +) diff --git a/vendor/github.com/ckaznocha/intrange/intrange.go b/vendor/github.com/ckaznocha/intrange/intrange.go new file mode 100644 index 0000000000..0a7ae3d95f --- /dev/null +++ b/vendor/github.com/ckaznocha/intrange/intrange.go @@ -0,0 +1,300 @@ +package intrange + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var ( + Analyzer = &analysis.Analyzer{ + Name: "intrange", + Doc: "intrange is a linter to find places where for loops could make use of an integer range.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + + errFailedAnalysis = errors.New("failed analysis") +) + +const msg = "for loop can be changed to use an integer range (Go 1.22+)" + +func run(pass *analysis.Pass) (any, error) { + result, ok := pass.ResultOf[inspect.Analyzer] + if !ok { + return nil, fmt.Errorf( + "%w: %s", + errFailedAnalysis, + inspect.Analyzer.Name, + ) + } + + resultInspector, ok := result.(*inspector.Inspector) + if !ok { + return nil, fmt.Errorf( + "%w: %s", + errFailedAnalysis, + inspect.Analyzer.Name, + ) + } + + resultInspector.Preorder([]ast.Node{(*ast.ForStmt)(nil)}, check(pass)) + + return nil, nil +} + +func check(pass *analysis.Pass) func(node ast.Node) { + return func(node ast.Node) { + forStmt, ok := node.(*ast.ForStmt) + if !ok { + return + } + + if forStmt.Init == nil || forStmt.Cond == nil || forStmt.Post == nil { + return + } + + // i := 0;; + init, ok := forStmt.Init.(*ast.AssignStmt) + if !ok { + return + } + + if len(init.Lhs) != 1 || len(init.Rhs) != 1 { + return + } + + initIdent, ok := init.Lhs[0].(*ast.Ident) + if !ok { + return + } + + if !compareNumberLit(init.Rhs[0], 0) { + return + } + + cond, ok := forStmt.Cond.(*ast.BinaryExpr) + if !ok { + return + } + + switch cond.Op { + case token.LSS: // ;i < x; + x, ok := cond.X.(*ast.Ident) + if !ok { + return + } + + if x.Name != initIdent.Name { + return + } + case token.GTR: // ;x > i; + y, ok := cond.Y.(*ast.Ident) + if !ok { + return + } + + if y.Name != initIdent.Name { + return + } + default: + return + } + + switch post := forStmt.Post.(type) { + case *ast.IncDecStmt: // ;;i++ + if post.Tok != token.INC { + return + } + + ident, ok := post.X.(*ast.Ident) + if !ok { + return + } + + if ident.Name != initIdent.Name { + return + } + case *ast.AssignStmt: + switch post.Tok { + case token.ADD_ASSIGN: // ;;i += 1 + if len(post.Lhs) != 1 { + return + } + + ident, ok := post.Lhs[0].(*ast.Ident) + if !ok { + return + } + + if ident.Name != initIdent.Name { + return + } + + if len(post.Rhs) != 1 { + return + } + + if !compareNumberLit(post.Rhs[0], 1) { + return + } + case token.ASSIGN: // ;;i = i + 1 && ;;i = 1 + i + if len(post.Lhs) != 1 || len(post.Rhs) != 1 { + return + } + + ident, ok := post.Lhs[0].(*ast.Ident) + if !ok { + return + } + + if ident.Name != initIdent.Name { + return + } + + bin, ok := post.Rhs[0].(*ast.BinaryExpr) + if !ok { + return + } + + if bin.Op != token.ADD { + return + } + + switch x := bin.X.(type) { + case *ast.Ident: // ;;i = i + 1 + if x.Name != initIdent.Name { + return + } + + if !compareNumberLit(bin.Y, 1) { + return + } + case *ast.BasicLit: // ;;i = 1 + i + if !compareNumberLit(x, 1) { + return + } + + ident, ok := bin.Y.(*ast.Ident) + if !ok { + return + } + + if ident.Name != initIdent.Name { + return + } + default: + return + } + default: + return + } + default: + return + } + + bc := &bodyChecker{ + initIdent: initIdent, + } + + ast.Inspect(forStmt.Body, bc.check) + + if bc.modified { + return + } + + pass.Report(analysis.Diagnostic{ + Pos: forStmt.Pos(), + Message: msg, + }) + } +} + +type bodyChecker struct { + initIdent *ast.Ident + modified bool +} + +func (b *bodyChecker) check(n ast.Node) bool { + switch stmt := n.(type) { + case *ast.AssignStmt: + for _, lhs := range stmt.Lhs { + ident, ok := lhs.(*ast.Ident) + if !ok { + continue + } + + if b.initIdent.Name == ident.Name { + b.modified = true + + return false + } + } + case *ast.IncDecStmt: + ident, ok := stmt.X.(*ast.Ident) + if !ok { + return true + } + + if b.initIdent.Name == ident.Name { + b.modified = true + + return false + } + } + + return true +} + +func compareNumberLit(exp ast.Expr, val int) bool { + switch lit := exp.(type) { + case *ast.BasicLit: + if lit.Kind != token.INT { + return false + } + + n := strconv.Itoa(val) + + switch lit.Value { + case n, "0x" + n, "0X" + n: + return true + default: + return false + } + case *ast.CallExpr: + switch fun := lit.Fun.(type) { + case *ast.Ident: + switch fun.Name { + case + "int", + "int8", + "int16", + "int32", + "int64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64": + default: + return false + } + default: + return false + } + + if len(lit.Args) != 1 { + return false + } + + return compareNumberLit(lit.Args[0], val) + default: + return false + } +} diff --git a/vendor/github.com/cloudflare/circl/LICENSE b/vendor/github.com/cloudflare/circl/LICENSE new file mode 100644 index 0000000000..67edaa90a0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/LICENSE @@ -0,0 +1,57 @@ +Copyright (c) 2019 Cloudflare. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Cloudflare nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================================================================== + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go new file mode 100644 index 0000000000..f9057c2b86 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go @@ -0,0 +1,96 @@ +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + fp.SetOne(&w[1]) // x1 = 1 + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e, + 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb, + 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad, + 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 255 + const h = 3 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 255 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [5]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve25519 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) point of order 4 on Curve25519 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (x,_,1) first point of order 8 on Curve25519 */ + 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00, + }, + { /* (x,_,1) second point of order 8 on Curve25519 */ + 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57, + }, + { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */ + 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go new file mode 100644 index 0000000000..8a3d54c570 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h new file mode 100644 index 0000000000..8c1ae4d0fb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s new file mode 100644 index 0000000000..b7723185b6 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s @@ -0,0 +1,156 @@ +// +build amd64 + +#include "textflag.h" + +// Depends on circl/math/fp25519 package +#include "../../math/fp25519/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve25519 +#define CTE_A24 121666 + +#define Size 32 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R13, FLAGS +// Instr: x86_64, cmov +#define multiplyA24Leg(z,x) \ + MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \ + MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \ + MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \ + MOVL $CTE_A24, AX; MULQ 24+x; \ + ADDQ R12, R9; \ + ADCQ R13, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R12, FLAGS +// Instr: x86_64, cmov, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R10; \ + MULXQ 8+x, R9, R11; ADDQ R10, R9; \ + MULXQ 16+x, R10, AX; ADCQ R11, R10; \ + MULXQ 24+x, R11, R12; ADCQ AX, R11; \ + ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \ + MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ DX, R12; \ + ADDQ R12, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, R12; \ + CMOVQCS DX, R12; \ + ADDQ R12, R8; MOVQ R8, 0+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp255.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + + +// func ladderStepAmd64(w *[5]fp255.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(w *[5]fp255.Elt, b uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// w = (mu,x1,z1,x2,z2) are five fp.Elt, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp255.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go new file mode 100644 index 0000000000..dae67ea37d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go @@ -0,0 +1,85 @@ +package x25519 + +import ( + "encoding/binary" + "math/bits" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +func doubleGeneric(x, z *fp.Elt) { + t0, t1 := &fp.Elt{}, &fp.Elt{} + fp.AddSub(x, z) + fp.Sqr(x, x) + fp.Sqr(z, z) + fp.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z) + fp.Mul(x, x, z) + fp.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp.Cswap(x1, x2, b) + fp.Cswap(z1, z2, b) + fp.AddSub(x1, z1) + fp.Mul(z1, z1, mu) + fp.AddSub(x1, z1) + fp.Sqr(x1, x1) + fp.Sqr(z1, z1) + fp.Mul(x1, x1, z2) + fp.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp.Elt{} + t1 := &fp.Elt{} + fp.AddSub(x2, z2) + fp.AddSub(x3, z3) + fp.Mul(t0, x2, z3) + fp.Mul(t1, x3, z2) + fp.AddSub(t0, t1) + fp.Cmov(x2, x3, b) + fp.Cmov(z2, z3, b) + fp.Sqr(x3, t0) + fp.Sqr(z3, t1) + fp.Mul(z3, x1, z3) + fp.Sqr(x2, x2) + fp.Sqr(z2, z2) + fp.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z2) + fp.Mul(x2, x2, z2) + fp.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp.Elt) { + const A24 = 121666 + const n = 8 + var xx [4]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + + var c3 uint64 + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, 0, c2) + xx[0], _ = bits.Add64(l0, (-c3)&38, 0) + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go new file mode 100644 index 0000000000..07fab97d2a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go new file mode 100644 index 0000000000..3ce102d145 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go @@ -0,0 +1,19 @@ +/* +Package x25519 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x25519 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go new file mode 100644 index 0000000000..c76f72ac7f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/key.go @@ -0,0 +1,47 @@ +package x25519 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +// Size is the length in bytes of a X25519 key. +const Size = 32 + +// Key represents a X25519 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 248 + k[31] = (k[31] & 127) | 64 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + validPk[31] &= (1 << (255 % 8)) - 1 + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go new file mode 100644 index 0000000000..28c8c4ac03 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/table.go @@ -0,0 +1,268 @@ +package x25519 + +import "github.com/cloudflare/circl/math/fp25519" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (256)*(256/8) = 8192 bytes. +var tableGenerator = [256 * fp25519.Size]byte{ + /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, + /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51, + /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b, + /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30, + /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17, + /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c, + /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29, + /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69, + /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a, + /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d, + /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d, + /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75, + /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75, + /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e, + /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f, + /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37, + /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41, + /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d, + /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20, + /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51, + /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a, + /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61, + /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29, + /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d, + /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31, + /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02, + /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f, + /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23, + /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18, + /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27, + /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60, + /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79, + /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13, + /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f, + /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74, + /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f, + /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23, + /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29, + /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40, + /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a, + /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26, + /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d, + /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39, + /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48, + /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07, + /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70, + /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62, + /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26, + /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37, + /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32, + /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d, + /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26, + /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a, + /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c, + /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f, + /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a, + /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25, + /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e, + /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75, + /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77, + /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b, + /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c, + /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61, + /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44, + /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a, + /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69, + /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49, + /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e, + /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59, + /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44, + /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79, + /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f, + /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e, + /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e, + /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56, + /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04, + /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22, + /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61, + /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29, + /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a, + /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f, + /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68, + /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c, + /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61, + /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14, + /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f, + /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34, + /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e, + /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01, + /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c, + /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23, + /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a, + /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b, + /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18, + /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c, + /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72, + /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b, + /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40, + /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00, + /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45, + /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e, + /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70, + /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37, + /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09, + /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62, + /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a, + /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13, + /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f, + /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66, + /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04, + /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47, + /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48, + /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31, + /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56, + /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22, + /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77, + /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a, + /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32, + /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25, + /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e, + /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67, + /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68, + /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e, + /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d, + /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48, + /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47, + /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a, + /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c, + /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26, + /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c, + /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72, + /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c, + /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06, + /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11, + /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10, + /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b, + /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b, + /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50, + /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15, + /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c, + /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35, + /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f, + /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61, + /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e, + /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66, + /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d, + /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a, + /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76, + /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02, + /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35, + /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d, + /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f, + /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52, + /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07, + /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a, + /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77, + /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34, + /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c, + /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d, + /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58, + /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30, + /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28, + /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a, + /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60, + /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74, + /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67, + /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48, + /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e, + /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58, + /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f, + /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27, + /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b, + /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74, + /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06, + /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63, + /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b, + /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a, + /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a, + /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f, + /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f, + /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29, + /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07, + /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f, + /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d, + /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44, + /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21, + /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e, + /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49, + /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56, + /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59, + /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c, + /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61, + /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d, + /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09, + /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e, + /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f, + /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c, + /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58, + /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53, + /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72, + /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58, + /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e, + /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d, + /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c, + /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07, + /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59, + /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e, + /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18, + /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e, + /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51, + /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54, + /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22, + /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43, + /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e, + /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43, + /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e, + /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69, + /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09, + /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42, + /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49, + /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53, + /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25, + /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51, + /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71, + /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48, + /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07, + /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c, + /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33, + /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27, + /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78, + /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50, + /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53, + /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a, + /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78, + /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48, + /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41, + /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16, + /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31, + /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05, + /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70, + /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e, + /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60, + /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52, + /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f, + /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25, + /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46, + /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f, + /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b, + /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31, + /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b, + /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d, + /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02, + /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05, + /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79, + /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b, + /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go new file mode 100644 index 0000000000..d59564e4b4 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve.go @@ -0,0 +1,104 @@ +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + w[1] = fp.Elt{ // x1 = S + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac, + 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23, + 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1, + 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29, + 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda, + 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a, + 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 448 + const h = 2 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 448 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [3]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve448 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) a point of order 4 on the twist of Curve448 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (-1,_,1) point of order 4 on Curve448 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go new file mode 100644 index 0000000000..a062266613 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h new file mode 100644 index 0000000000..8c1ae4d0fb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s new file mode 100644 index 0000000000..810aa9e648 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s @@ -0,0 +1,193 @@ +// +build amd64 + +#include "textflag.h" + +// Depends on circl/math/fp448 package +#include "../../math/fp448/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve448 +#define CTE_A24 39082 + +#define Size 56 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, cmov, adx +#define multiplyA24Leg(z,x) \ + MOVQ $CTE_A24, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R9; \ + MULXQ 8+x, AX, R10; ADDQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCQ AX, R13; \ + MULXQ 48+x, AX, DX; ADCQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp448.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + +// func ladderStepAmd64(w *[5]fp448.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(work *[5]fp.Elt, swap uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +// This is Equation 7 at https://eprint.iacr.org/2017/264. +TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp448.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go new file mode 100644 index 0000000000..b0b65ccf7e --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go @@ -0,0 +1,100 @@ +package x448 + +import ( + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/math/fp448" +) + +func doubleGeneric(x, z *fp448.Elt) { + t0, t1 := &fp448.Elt{}, &fp448.Elt{} + fp448.AddSub(x, z) + fp448.Sqr(x, x) + fp448.Sqr(z, z) + fp448.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z) + fp448.Mul(x, x, z) + fp448.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp448.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp448.Cswap(x1, x2, b) + fp448.Cswap(z1, z2, b) + fp448.AddSub(x1, z1) + fp448.Mul(z1, z1, mu) + fp448.AddSub(x1, z1) + fp448.Sqr(x1, x1) + fp448.Sqr(z1, z1) + fp448.Mul(x1, x1, z2) + fp448.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp448.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp448.Elt{} + t1 := &fp448.Elt{} + fp448.AddSub(x2, z2) + fp448.AddSub(x3, z3) + fp448.Mul(t0, x2, z3) + fp448.Mul(t1, x3, z2) + fp448.AddSub(t0, t1) + fp448.Cmov(x2, x3, b) + fp448.Cmov(z2, z3, b) + fp448.Sqr(x3, t0) + fp448.Sqr(z3, t1) + fp448.Mul(z3, x1, z3) + fp448.Sqr(x2, x2) + fp448.Sqr(z2, z2) + fp448.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z2) + fp448.Mul(x2, x2, z2) + fp448.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp448.Elt) { + const A24 = 39082 + const n = 8 + var xx [7]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + h4, l4 := bits.Mul64(xx[4], A24) + h5, l5 := bits.Mul64(xx[5], A24) + h6, l6 := bits.Mul64(xx[6], A24) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, c3 := bits.Add64(h3, l4, c2) + l5, c4 := bits.Add64(h4, l5, c3) + l6, c5 := bits.Add64(h5, l6, c4) + l7, _ := bits.Add64(h6, 0, c5) + + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + xx[0], c0 = bits.Add64(l0, l7, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, l7<<32, c2) + xx[4], c4 = bits.Add64(l4, 0, c3) + xx[5], c5 = bits.Add64(l5, 0, c4) + xx[6], _ = bits.Add64(l6, 0, c5) + + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go new file mode 100644 index 0000000000..3755b7c83b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go new file mode 100644 index 0000000000..c02904feda --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/doc.go @@ -0,0 +1,19 @@ +/* +Package x448 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x448 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go new file mode 100644 index 0000000000..2fdde51168 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/key.go @@ -0,0 +1,46 @@ +package x448 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Size is the length in bytes of a X448 key. +const Size = 56 + +// Key represents a X448 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 252 + k[55] |= 128 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go new file mode 100644 index 0000000000..eef53c30f8 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/table.go @@ -0,0 +1,460 @@ +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (448)*(448/8) = 25088 bytes. +var tableGenerator = [448 * fp.Size]byte{ + /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28, + /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8, + /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03, + /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa, + /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9, + /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67, + /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e, + /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7, + /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a, + /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d, + /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60, + /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a, + /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae, + /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95, + /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b, + /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34, + /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06, + /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17, + /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d, + /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc, + /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde, + /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11, + /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98, + /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c, + /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed, + /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9, + /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f, + /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75, + /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7, + /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c, + /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b, + /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e, + /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28, + /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2, + /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9, + /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08, + /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8, + /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20, + /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29, + /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e, + /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57, + /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1, + /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d, + /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59, + /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba, + /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee, + /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1, + /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe, + /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae, + /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb, + /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71, + /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f, + /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41, + /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc, + /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef, + /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5, + /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14, + /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91, + /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a, + /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4, + /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08, + /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74, + /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9, + /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95, + /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa, + /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25, + /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc, + /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b, + /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85, + /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51, + /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc, + /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85, + /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5, + /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9, + /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b, + /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82, + /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99, + /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb, + /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1, + /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0, + /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42, + /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b, + /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb, + /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42, + /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd, + /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57, + /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e, + /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59, + /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81, + /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54, + /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b, + /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68, + /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9, + /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77, + /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb, + /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3, + /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0, + /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c, + /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01, + /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7, + /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1, + /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8, + /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda, + /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec, + /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90, + /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29, + /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20, + /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8, + /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d, + /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d, + /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6, + /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32, + /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47, + /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb, + /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e, + /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7, + /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f, + /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5, + /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c, + /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94, + /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf, + /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1, + /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f, + /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8, + /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e, + /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24, + /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac, + /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc, + /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69, + /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7, + /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51, + /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68, + /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8, + /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12, + /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29, + /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9, + /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd, + /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43, + /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9, + /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02, + /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70, + /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32, + /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10, + /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74, + /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f, + /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda, + /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf, + /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0, + /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b, + /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe, + /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e, + /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45, + /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9, + /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33, + /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8, + /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed, + /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43, + /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a, + /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1, + /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a, + /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9, + /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a, + /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29, + /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52, + /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45, + /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3, + /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04, + /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73, + /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4, + /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd, + /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa, + /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2, + /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed, + /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0, + /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22, + /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69, + /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea, + /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f, + /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91, + /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf, + /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a, + /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12, + /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54, + /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad, + /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9, + /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2, + /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3, + /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2, + /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1, + /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b, + /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27, + /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7, + /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d, + /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9, + /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52, + /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a, + /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c, + /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5, + /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74, + /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92, + /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4, + /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7, + /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1, + /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d, + /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69, + /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe, + /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5, + /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d, + /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e, + /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6, + /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e, + /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed, + /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51, + /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48, + /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b, + /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94, + /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c, + /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8, + /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7, + /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79, + /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2, + /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70, + /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82, + /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f, + /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa, + /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c, + /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02, + /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16, + /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76, + /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3, + /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c, + /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65, + /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f, + /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b, + /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6, + /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda, + /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b, + /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1, + /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1, + /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee, + /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22, + /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95, + /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f, + /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd, + /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5, + /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f, + /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6, + /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6, + /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b, + /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80, + /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4, + /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0, + /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95, + /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82, + /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18, + /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82, + /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53, + /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9, + /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8, + /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82, + /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7, + /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8, + /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3, + /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5, + /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87, + /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56, + /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65, + /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce, + /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a, + /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1, + /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0, + /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e, + /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45, + /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9, + /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63, + /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36, + /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55, + /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3, + /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc, + /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9, + /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35, + /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb, + /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33, + /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1, + /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48, + /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08, + /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7, + /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17, + /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16, + /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec, + /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92, + /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c, + /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b, + /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3, + /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8, + /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe, + /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60, + /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61, + /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b, + /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2, + /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23, + /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf, + /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96, + /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6, + /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb, + /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2, + /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7, + /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03, + /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57, + /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89, + /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f, + /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56, + /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27, + /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37, + /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5, + /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc, + /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5, + /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12, + /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28, + /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2, + /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97, + /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0, + /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14, + /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8, + /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64, + /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0, + /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28, + /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2, + /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9, + /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7, + /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b, + /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca, + /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86, + /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28, + /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02, + /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c, + /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc, + /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f, + /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00, + /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c, + /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96, + /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3, + /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75, + /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a, + /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4, + /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04, + /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1, + /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0, + /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08, + /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f, + /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18, + /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a, + /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38, + /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8, + /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35, + /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf, + /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d, + /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98, + /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e, + /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed, + /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad, + /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc, + /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1, + /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7, + /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b, + /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48, + /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93, + /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06, + /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4, + /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5, + /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5, + /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa, + /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9, + /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5, + /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1, + /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24, + /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda, + /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba, + /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7, + /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a, + /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10, + /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59, + /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d, + /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21, + /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5, + /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0, + /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc, + /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f, + /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0, + /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9, + /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7, + /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6, + /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f, + /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb, + /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb, + /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93, + /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c, + /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2, + /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a, + /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f, + /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5, + /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75, + /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f, + /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad, + /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f, + /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7, + /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e, + /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95, + /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0, + /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35, + /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9, + /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67, + /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd, + /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7, + /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce, + /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd, + /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a, + /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96, + /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24, + /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46, + /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7, + /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29, + /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87, + /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec, + /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3, + /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4, + /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2, + /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83, + /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67, + /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25, + /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d, + /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91, + /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9, + /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb, + /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87, + /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12, + /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2, + /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32, + /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc, + /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a, + /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36, + /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2, + /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e, + /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b, + /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd, + /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d, + /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go new file mode 100644 index 0000000000..b6b236e5d3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go @@ -0,0 +1,71 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var ( + // genX is the x-coordinate of the generator of Goldilocks curve. + genX = fp.Elt{ + 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26, + 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43, + 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12, + 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea, + 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e, + 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22, + 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f, + } + // genY is the y-coordinate of the generator of Goldilocks curve. + genY = fp.Elt{ + 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98, + 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd, + 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a, + 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87, + 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b, + 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88, + 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69, + } + // paramD is -39081 in Fp. + paramD = fp.Elt{ + 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d, + // which is the number of points in the prime subgroup. + order = Scalar{ + 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23, + 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21, + 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4, + 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, + } + // residue448 is 2^448 mod order. + residue448 = [4]uint64{ + 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058, + } + // invFour is 1/4 mod order. + invFour = Scalar{ + 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48, + 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08, + 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71, + 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, + } + // paramDTwist is -39082 in Fp. The D parameter of the twist curve. + paramDTwist = fp.Elt{ + 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +) diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go new file mode 100644 index 0000000000..5a939100d2 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go @@ -0,0 +1,80 @@ +// Package goldilocks provides elliptic curve operations over the goldilocks curve. +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2. +type Curve struct{} + +// Identity returns the identity point. +func (Curve) Identity() *Point { + return &Point{ + y: fp.One(), + z: fp.One(), + } +} + +// IsOnCurve returns true if the point lies on the curve. +func (Curve) IsOnCurve(P *Point) bool { + x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + rhs, lhs := &fp.Elt{}, &fp.Elt{} + fp.Mul(t, &P.ta, &P.tb) // t = ta*tb + fp.Sqr(x2, &P.x) // x^2 + fp.Sqr(y2, &P.y) // y^2 + fp.Sqr(z2, &P.z) // z^2 + fp.Sqr(t2, t) // t^2 + fp.Add(lhs, x2, y2) // x^2 + y^2 + fp.Mul(rhs, t2, ¶mD) // dt^2 + fp.Add(rhs, rhs, z2) // z^2 + dt^2 + fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2) + eq0 := fp.IsZero(lhs) + + fp.Mul(lhs, &P.x, &P.y) // xy + fp.Mul(rhs, t, &P.z) // tz + fp.Sub(lhs, lhs, rhs) // xy - tz + eq1 := fp.IsZero(lhs) + return eq0 && eq1 +} + +// Generator returns the generator point. +func (Curve) Generator() *Point { + return &Point{ + x: genX, + y: genY, + z: fp.One(), + ta: genX, + tb: genY, + } +} + +// Order returns the number of points in the prime subgroup. +func (Curve) Order() Scalar { return order } + +// Double returns 2P. +func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R } + +// Add returns P+Q. +func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R } + +// ScalarMult returns kP. This function runs in constant time. +func (e Curve) ScalarMult(k *Scalar, P *Point) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarMult(k4, e.push(P))) +} + +// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time. +func (e Curve) ScalarBaseMult(k *Scalar) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarBaseMult(k4)) +} + +// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time. +func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point { + m4 := &Scalar{} + n4 := &Scalar{} + m4.divBy4(m) + n4.divBy4(n) + return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P))) +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go new file mode 100644 index 0000000000..b1daab851c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go @@ -0,0 +1,52 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) } +func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) } + +// push sends a point on the Goldilocks curve to a point on the twist curve. +func (Curve) push(P *Point) *twistPoint { + Q := &twistPoint{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + *d = *a // D = A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} + +// push sends a point on the twist curve to a point on the Goldilocks curve. +func (twistCurve) push(P *twistPoint) *Point { + Q := &Point{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Neg(d, a) // D = -A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go new file mode 100644 index 0000000000..11f73de054 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go @@ -0,0 +1,171 @@ +package goldilocks + +import ( + "errors" + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Point is a point on the Goldilocks Curve. +type Point struct{ x, y, z, ta, tb fp.Elt } + +func (P Point) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// FromAffine creates a point from affine coordinates. +func FromAffine(x, y *fp.Elt) (*Point, error) { + P := &Point{ + x: *x, + y: *y, + z: fp.One(), + ta: *x, + tb: *y, + } + if !(Curve{}).IsOnCurve(P) { + return P, errors.New("point not on curve") + } + return P, nil +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices are of the +// same length and are interpreted in little-endian order. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// FromBytes returns a point from the input buffer. +func FromBytes(in []byte) (*Point, error) { + if len(in) < fp.Size+1 { + return nil, errors.New("wrong input length") + } + err := errors.New("invalid decoding") + P := &Point{} + signX := in[fp.Size] >> 7 + copy(P.y[:], in[:fp.Size]) + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return nil, err + } + + u, v := &fp.Elt{}, &fp.Elt{} + one := fp.One() + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, &one) // u = y^2-1 + fp.Sub(v, v, &one) // v = dy^2-1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return nil, err + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return nil, err + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + P.z = fp.One() + return P, nil +} + +// IsIdentity returns true is P is the identity Point. +func (P *Point) IsIdentity() bool { + return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z +} + +// IsEqual returns true if P is equivalent to Q. +func (P *Point) IsEqual(Q *Point) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +// Neg obtains the inverse of the Point. +func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) } + +// ToAffine returns the x,y affine coordinates of P. +func (P *Point) ToAffine() (x, y fp.Elt) { + fp.Inv(&P.z, &P.z) // 1/z + fp.Mul(&P.x, &P.x, &P.z) // x/z + fp.Mul(&P.y, &P.y, &P.z) // y/z + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y + return P.x, P.y +} + +// ToBytes stores P into a slice of bytes. +func (P *Point) ToBytes(out []byte) error { + if len(out) < fp.Size+1 { + return errors.New("invalid decoding") + } + x, y := P.ToAffine() + out[fp.Size] = (x[0] & 1) << 7 + return fp.ToBytes(out[:fp.Size], &y) +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +func (P *Point) MarshalBinary() (data []byte, err error) { + data = make([]byte, fp.Size+1) + err = P.ToBytes(data[:fp.Size+1]) + return data, err +} + +// UnmarshalBinary must be able to decode the form generated by MarshalBinary. +func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err } + +// Double sets P = 2Q. +func (P *Point) Double() { P.Add(P) } + +// Add sets P =P+Q.. +func (P *Point) Add(Q *Point) { + // This is formula (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb + x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb + x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb + A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{} + fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1 + fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2 + fp.Mul(A, x1, x2) // A = x1*x2 + fp.Mul(B, y1, y2) // B = y1*y2 + fp.Mul(C, t1, t2) // t1*t2 + fp.Mul(C, C, ¶mD) // C = d*t1*t2 + fp.Mul(D, z1, z2) // D = z1*z2 + fp.Add(F, x1, y1) // x1+y1 + fp.Add(E, x2, y2) // x2+y2 + fp.Mul(E, E, F) // (x1+y1)*(x2+y2) + fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A + fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B + fp.Sub(F, D, C) // F = D-C + fp.Add(G, D, C) // G = D+C + fp.Sub(H, B, A) // H = B-A + fp.Mul(z3, F, G) // Z = F * G + fp.Mul(x3, E, F) // X = E * F + fp.Mul(y3, G, H) // Y = G * H, T = E * H +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go new file mode 100644 index 0000000000..f98117b252 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go @@ -0,0 +1,203 @@ +package goldilocks + +import ( + "encoding/binary" + "math/bits" +) + +// ScalarSize is the size (in bytes) of scalars. +const ScalarSize = 56 // 448 / 8 + +// _N is the number of 64-bit words to store scalars. +const _N = 7 // 448 / 64 + +// Scalar represents a positive integer stored in little-endian order. +type Scalar [ScalarSize]byte + +type scalar64 [_N]uint64 + +func (z *scalar64) fromScalar(x *Scalar) { + z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8]) + z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8]) + z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8]) + z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8]) + z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8]) + z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8]) + z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8]) +} + +func (z *scalar64) toScalar(x *Scalar) { + binary.LittleEndian.PutUint64(x[0*8:1*8], z[0]) + binary.LittleEndian.PutUint64(x[1*8:2*8], z[1]) + binary.LittleEndian.PutUint64(x[2*8:3*8], z[2]) + binary.LittleEndian.PutUint64(x[3*8:4*8], z[3]) + binary.LittleEndian.PutUint64(x[4*8:5*8], z[4]) + binary.LittleEndian.PutUint64(x[5*8:6*8], z[5]) + binary.LittleEndian.PutUint64(x[6*8:7*8], z[6]) +} + +// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)). +func add(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Add64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Add64(zz[i], 0, c) + } + return c +} + +// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)). +func sub(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Sub64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Sub64(zz[i], 0, c) + } + return c +} + +// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1. +func mulWord(z, x []uint64, y uint64) { + for i := range z { + z[i] = 0 + } + carry := uint64(0) + for i := range x { + hi, lo := bits.Mul64(x[i], y) + lo, cc := bits.Add64(lo, z[i], 0) + hi, _ = bits.Add64(hi, 0, cc) + z[i], cc = bits.Add64(lo, carry, 0) + carry, _ = bits.Add64(hi, 0, cc) + } + z[len(x)] = carry +} + +// Cmov moves x into z if b=1. +func (z *scalar64) Cmov(b uint64, x *scalar64) { + m := uint64(0) - b + for i := range z { + z[i] = (z[i] &^ m) | (x[i] & m) + } +} + +// leftShift shifts to the left the words of z returning the more significant word. +func (z *scalar64) leftShift(low uint64) uint64 { + high := z[_N-1] + for i := _N - 1; i > 0; i-- { + z[i] = z[i-1] + } + z[0] = low + return high +} + +// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar. +func (z *scalar64) reduceOneWord(x uint64) { + prod := (&scalar64{})[:] + mulWord(prod, residue448[:], x) + cc := add(z[:], z[:], prod) + mulWord(prod, residue448[:], cc) + add(z[:], z[:], prod) +} + +// modOrder reduces z mod order. +func (z *scalar64) modOrder() { + var o64, x scalar64 + o64.fromScalar(&order) + // Performs: while (z >= order) { z = z-order } + // At most 8 (eight) iterations reduce 3 bits by subtracting. + for i := 0; i < 8; i++ { + c := sub(x[:], z[:], o64[:]) // (c || x) = z-order + z.Cmov(1-c, &x) // if c != 0 { z = x } + } +} + +// FromBytes stores z = x mod order, where x is a number stored in little-endian order. +func (z *Scalar) FromBytes(x []byte) { + n := len(x) + nCeil := (n + 7) >> 3 + for i := range z { + z[i] = 0 + } + if nCeil < _N { + copy(z[:], x) + return + } + copy(z[:], x[8*(nCeil-_N):]) + var z64 scalar64 + z64.fromScalar(z) + for i := nCeil - _N - 1; i >= 0; i-- { + low := binary.LittleEndian.Uint64(x[8*i:]) + high := z64.leftShift(low) + z64.reduceOneWord(high) + } + z64.modOrder() + z64.toScalar(z) +} + +// divBy4 calculates z = x/4 mod order. +func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) } + +// Red reduces z mod order. +func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) } + +// Neg calculates z = -z mod order. +func (z *Scalar) Neg() { z.Sub(&order, z) } + +// Add calculates z = x+y mod order. +func (z *Scalar) Add(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := add(z64[:], x64[:], y64[:]) + add(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Sub calculates z = x-y mod order. +func (z *Scalar) Sub(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := sub(z64[:], x64[:], y64[:]) + sub(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Mul calculates z = x*y mod order. +func (z *Scalar) Mul(x, y *Scalar) { + var z64, x64, y64 scalar64 + prod := (&[_N + 1]uint64{})[:] + x64.fromScalar(x) + y64.fromScalar(y) + mulWord(prod, x64[:], y64[_N-1]) + copy(z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N]) + for i := _N - 2; i >= 0; i-- { + h := z64.leftShift(0) + z64.reduceOneWord(h) + mulWord(prod, x64[:], y64[i]) + c := add(z64[:], z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N] + c) + } + z64.modOrder() + z64.toScalar(z) +} + +// IsZero returns true if z=0. +func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} } diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go new file mode 100644 index 0000000000..83d7cdadd3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go @@ -0,0 +1,138 @@ +package goldilocks + +import ( + "crypto/subtle" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp448" +) + +// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks. +type twistCurve struct{} + +// Identity returns the identity point. +func (twistCurve) Identity() *twistPoint { + return &twistPoint{ + y: fp.One(), + z: fp.One(), + } +} + +// subYDiv16 update x = (x - y) / 16. +func subYDiv16(x *scalar64, y int64) { + s := uint64(y >> 63) + x0, b0 := bits.Sub64((*x)[0], uint64(y), 0) + x1, b1 := bits.Sub64((*x)[1], s, b0) + x2, b2 := bits.Sub64((*x)[2], s, b1) + x3, b3 := bits.Sub64((*x)[3], s, b2) + x4, b4 := bits.Sub64((*x)[4], s, b3) + x5, b5 := bits.Sub64((*x)[5], s, b4) + x6, _ := bits.Sub64((*x)[6], s, b5) + x[0] = (x0 >> 4) | (x1 << 60) + x[1] = (x1 >> 4) | (x2 << 60) + x[2] = (x2 >> 4) | (x3 << 60) + x[3] = (x3 >> 4) | (x4 << 60) + x[4] = (x4 >> 4) | (x5 << 60) + x[5] = (x5 >> 4) | (x6 << 60) + x[6] = (x6 >> 4) +} + +func recodeScalar(d *[113]int8, k *Scalar) { + var k64 scalar64 + k64.fromScalar(k) + for i := 0; i < 112; i++ { + d[i] = int8((k64[0] & 0x1f) - 16) + subYDiv16(&k64, int64(d[i])) + } + d[112] = int8(k64[0]) +} + +// ScalarMult returns kP. +func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint { + var TabP [8]preTwistPointProy + var S preTwistPointProy + var d [113]int8 + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + recodeScalar(&d, k) + + P.oddMultiples(TabP[:]) + Q := e.Identity() + for i := 112; i >= 0; i-- { + Q.Double() + Q.Double() + Q.Double() + Q.Double() + mask := d[i] >> 7 + absDi := (d[i] + mask) ^ mask + inx := int32((absDi - 1) >> 1) + sig := int((d[i] >> 7) & 0x1) + for j := range TabP { + S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j)))) + } + S.cneg(sig) + Q.mixAdd(&S) + } + Q.cneg(uint(isEven)) + return Q +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// CombinedMult returns mG+nP. +func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]preTwistPointProy + P.oddMultiples(TabQ[:]) + Q := e.Identity() + for i := len(nafFix) - 1; i >= 0; i-- { + Q.Double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + Q.mixAddZ1(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + Q.mixAdd(&S) + } + } + return Q +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go new file mode 100644 index 0000000000..c55db77b06 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go @@ -0,0 +1,135 @@ +package goldilocks + +import ( + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +type twistPoint struct{ x, y, z, ta, tb fp.Elt } + +type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt } + +type preTwistPointProy struct { + preTwistPointAffine + z2 fp.Elt +} + +func (P *twistPoint) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// cneg conditionally negates the point if b=1. +func (P *twistPoint) cneg(b uint) { + t := &fp.Elt{} + fp.Neg(t, &P.x) + fp.Cmov(&P.x, t, b) + fp.Neg(t, &P.ta) + fp.Cmov(&P.ta, t, b) +} + +// Double updates P with 2P. +func (P *twistPoint) Double() { + // This is formula (7) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1. +func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1) + P.coreAddition(Q) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *twistPoint) coreAddition(Q *preTwistPointAffine) { + // This is the formula following (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *preTwistPointAffine) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *preTwistPointAffine) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) { + fp.Cmov(&P.addYX, &Q.addYX, b) + fp.Cmov(&P.subYX, &Q.subYX, b) + fp.Cmov(&P.dt2, &Q.dt2, b) +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1. +func (P *twistPoint) mixAdd(Q *preTwistPointProy) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.preTwistPointAffine) +} + +// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T). +func (P *twistPoint) oddMultiples(T []preTwistPointProy) { + if n := len(T); n > 0 { + T[0].FromTwistPoint(P) + _2P := *P + _2P.Double() + R := &preTwistPointProy{} + R.FromTwistPoint(&_2P) + for i := 1; i < n; i++ { + P.mixAdd(R) + T[i].FromTwistPoint(P) + } + } +} + +// cmov conditionally moves Q into P if b=1. +func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) { + P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b) + fp.Cmov(&P.z2, &Q.z2, b) +} + +// FromTwistPoint precomputes some coordinates of Q for missed addition. +func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) { + fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y + fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X + fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb + fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T + fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T + fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go new file mode 100644 index 0000000000..ed432e02c7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go @@ -0,0 +1,216 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var tabFixMult = [fxV][fx2w1]preTwistPointAffine{ + { + { + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { + addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab}, + subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70}, + dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c}, + }, + { + addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb}, + subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d}, + dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde}, + }, + { + addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33}, + subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab}, + dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4}, + }, + }, + { + { + addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d}, + subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3}, + dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58}, + }, + { + addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf}, + subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49}, + dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68}, + }, + { + addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb}, + subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c}, + dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb}, + }, + { + addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66}, + subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd}, + dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff}, + }, + }, +} + +// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where +// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a +// 4-degree isogeny. +var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{ + { /* 1P*/ + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { /* 3P*/ + addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5}, + subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4}, + dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d}, + }, + { /* 5P*/ + addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f}, + subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38}, + dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82}, + }, + { /* 7P*/ + addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82}, + subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9}, + dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11}, + }, + { /* 9P*/ + addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac}, + subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90}, + dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10}, + }, + { /* 11P*/ + addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3}, + subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63}, + dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3}, + }, + { /* 13P*/ + addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b}, + subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c}, + dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7}, + }, + { /* 15P*/ + addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b}, + subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0}, + dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78}, + }, + { /* 17P*/ + addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97}, + subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6}, + dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5}, + }, + { /* 19P*/ + addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed}, + subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8}, + dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48}, + }, + { /* 21P*/ + addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02}, + subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62}, + dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f}, + }, + { /* 23P*/ + addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c}, + subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3}, + dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1}, + }, + { /* 25P*/ + addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82}, + subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06}, + dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44}, + }, + { /* 27P*/ + addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a}, + subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c}, + dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee}, + }, + { /* 29P*/ + addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f}, + subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82}, + dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e}, + }, + { /* 31P*/ + addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1}, + subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae}, + dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81}, + }, + { /* 33P*/ + addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a}, + subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34}, + dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2}, + }, + { /* 35P*/ + addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a}, + subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a}, + dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e}, + }, + { /* 37P*/ + addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17}, + subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98}, + dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7}, + }, + { /* 39P*/ + addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb}, + subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2}, + dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec}, + }, + { /* 41P*/ + addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15}, + subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51}, + dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49}, + }, + { /* 43P*/ + addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa}, + subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32}, + dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0}, + }, + { /* 45P*/ + addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89}, + subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4}, + dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1}, + }, + { /* 47P*/ + addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a}, + subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91}, + dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f}, + }, + { /* 49P*/ + addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09}, + subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89}, + dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b}, + }, + { /* 51P*/ + addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0}, + subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83}, + dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50}, + }, + { /* 53P*/ + addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3}, + subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63}, + dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18}, + }, + { /* 55P*/ + addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d}, + subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0}, + dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5}, + }, + { /* 57P*/ + addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b}, + subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88}, + dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb}, + }, + { /* 59P*/ + addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33}, + subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb}, + dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10}, + }, + { /* 61P*/ + addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04}, + subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13}, + dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75}, + }, + { /* 63P*/ + addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76}, + subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01}, + dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go new file mode 100644 index 0000000000..f6ac5edbbb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go @@ -0,0 +1,62 @@ +package goldilocks + +import ( + "crypto/subtle" + + mlsb "github.com/cloudflare/circl/math/mlsbset" +) + +const ( + // MLSBRecoding parameters + fxT = 448 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) +) + +// ScalarBaseMult returns kG where G is the generator point. +func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint { + m, err := mlsb.New(fxT, fxV, fxW) + if err != nil { + panic(err) + } + if m.IsExtended() { + panic("not extended") + } + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + c, err := m.Encode(k[:]) + if err != nil { + panic(err) + } + + gP := c.Exp(groupMLSB{}) + P := gP.(*twistPoint) + P.cneg(uint(isEven)) + return P +} + +type groupMLSB struct{} + +func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil } +func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() } +func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) } +func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() } +func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} } +func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) { + Tabj := &tabFixMult[v] + P := a.(*preTwistPointAffine) + for k := range Tabj { + P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u))) + } + P.cneg(int(s >> 31)) +} diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go new file mode 100644 index 0000000000..649a8e931d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/conv/conv.go @@ -0,0 +1,140 @@ +package conv + +import ( + "encoding/binary" + "fmt" + "math/big" + "strings" +) + +// BytesLe2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func BytesLe2Hex(x []byte) string { + b := &strings.Builder{} + b.Grow(2*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%02x", x[i]) + } + return b.String() +} + +// BytesLe2BigInt converts a little-endian slice x into a big-endian +// math/big.Int. +func BytesLe2BigInt(x []byte) *big.Int { + n := len(x) + b := new(big.Int) + if len(x) > 0 { + y := make([]byte, n) + for i := 0; i < n; i++ { + y[n-1-i] = x[i] + } + b.SetBytes(y) + } + return b +} + +// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64. +func BytesBe2Uint64Le(x []byte) []uint64 { + l := len(x) + z := make([]uint64, (l+7)/8) + blocks := l / 8 + for i := 0; i < blocks; i++ { + z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):]) + } + remBytes := l % 8 + for i := 0; i < remBytes; i++ { + z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i) + } + return z +} + +// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2BytesLe(z []byte, x *big.Int) { + xLen := (x.BitLen() + 7) >> 3 + zLen := len(z) + if zLen >= xLen && x.Sign() >= 0 { + y := x.Bytes() + for i := 0; i < xLen; i++ { + z[i] = y[xLen-1-i] + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } + } +} + +// Uint64Le2BigInt converts a little-endian slice x into a big number. +func Uint64Le2BigInt(x []uint64) *big.Int { + n := len(x) + b := new(big.Int) + var bi big.Int + for i := n - 1; i >= 0; i-- { + bi.SetUint64(x[i]) + b.Lsh(b, 64) + b.Add(b, &bi) + } + return b +} + +// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes. +func Uint64Le2BytesLe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.LittleEndian.PutUint64(b[i*8:], x[i]) + } + return b +} + +// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes. +func Uint64Le2BytesBe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.BigEndian.PutUint64(b[i*8:], x[n-1-i]) + } + return b +} + +// Uint64Le2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func Uint64Le2Hex(x []uint64) string { + b := new(strings.Builder) + b.Grow(16*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%016x", x[i]) + } + return b.String() +} + +// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2Uint64Le(z []uint64, x *big.Int) { + xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words + zLen := len(z) + if zLen >= xLen && x.Sign() > 0 { + var y, yi big.Int + y.Set(x) + two64 := big.NewInt(1) + two64.Lsh(two64, 64).Sub(two64, big.NewInt(1)) + for i := 0; i < xLen; i++ { + yi.And(&y, two64) + z[i] = yi.Uint64() + y.Rsh(&y, 64) + } + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go new file mode 100644 index 0000000000..7e02309070 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go new file mode 100644 index 0000000000..7d2365a76e --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() State { + return State{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() State { + return State{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() State { + return State{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() State { + return State{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go new file mode 100644 index 0000000000..1755fd1e6d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// KeccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +// If turbo is true, applies the 12-round variant instead of the +// regular 24-round variant. +// nolint:funlen +func KeccakF1600(a *[25]uint64, turbo bool) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + i := 0 + + if turbo { + i = 12 + } + + for ; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go new file mode 100644 index 0000000000..6a3df42f30 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go @@ -0,0 +1,29 @@ +package sha3 + +// RC stores the round constants for use in the ι step. +var RC = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go new file mode 100644 index 0000000000..a0df5aa6c5 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +func (d *State) buf() []byte { + return d.storage.asBytes()[d.bufo:d.bufe] +} + +type State struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + rate int // the number of bytes of state to use + + bufo int // offset of buffer in storage + bufe int // end of buffer in storage + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing + turbo bool // Whether we're using 12 rounds instead of 24 +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *State) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *State) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *State) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.bufo = 0 + d.bufe = 0 +} + +func (d *State) clone() *State { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *State) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf()) + d.bufe = 0 + d.bufo = 0 + KeccakF1600(&d.a, d.turbo) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + KeccakF1600(&d.a, d.turbo) + d.bufe = d.rate + d.bufo = 0 + copyOut(d, d.buf()) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *State) padAndPermute(dsbyte byte) { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf() because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + zerosStart := d.bufe + 1 + d.bufe = d.rate + buf := d.buf() + buf[zerosStart-1] = dsbyte + for i := zerosStart; i < d.rate; i++ { + buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.bufe = d.rate + copyOut(d, buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *State) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + written = len(p) + + for len(p) > 0 { + bufl := d.bufe - d.bufo + if bufl == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + KeccakF1600(&d.a, d.turbo) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - bufl + if todo > len(p) { + todo = len(p) + } + d.bufe += todo + buf := d.buf() + copy(buf[bufl:], p[:todo]) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if d.bufe == d.rate { + d.permute() + } + } + } + + return written, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *State) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + buf := d.buf() + n := copy(out, buf) + d.bufo += n + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if d.bufo == d.bufe { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *State) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + _, _ = dup.Read(hash) + return append(in, hash...) +} + +func (d *State) IsAbsorbing() bool { + return d.state == spongeAbsorbing +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s new file mode 100644 index 0000000000..8a4458f63f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go new file mode 100644 index 0000000000..77817f758c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + rate128 = 168 + rate256 = 136 +) + +// Clone returns copy of SHAKE context within its current state. +func (d *State) Clone() ShakeHash { + return d.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() State { + return State{rate: rate128, dsbyte: dsbyteShake} +} + +// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake128(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate128, dsbyte: D, turbo: true} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() State { + return State{rate: rate256, dsbyte: dsbyteShake} +} + +// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake256(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate256, dsbyte: D, turbo: true} +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum128 writes an arbitrary-length digest of data into hash. +func TurboShakeSum128(hash, data []byte, D byte) { + h := NewTurboShake128(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum256 writes an arbitrary-length digest of data into hash. +func TurboShakeSum256(hash, data []byte, D byte) { + h := NewTurboShake256(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +func (d *State) SwitchDS(D byte) { + d.dsbyte = D +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go new file mode 100644 index 0000000000..1e21337454 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || appengine +// +build !amd64,!386,!ppc64le appengine + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go new file mode 100644 index 0000000000..2b0c661790 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine) +// +build !amd64 appengine +// +build !386 appengine +// +build !ppc64le appengine + +package sha3 + +import "encoding/binary" + +// xorIn xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorIn(d *State, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOut copies ulint64s to a byte buffer. +func copyOut(d *State, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go new file mode 100644 index 0000000000..052fc8d32d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !appengine +// +build amd64 386 ppc64le +// +build !appengine + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInuses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorIn(d *State, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOut(d *State, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go new file mode 100644 index 0000000000..57a50ff5e9 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go @@ -0,0 +1,205 @@ +// Package fp25519 provides prime field arithmetic over GF(2^255-19). +package fp25519 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 32 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^255-19. +var p = Elt{ + 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, +} + +// P returns the prime modulus 2^255-19. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is +// indicated by returning isQR = true. Otherwise, when x/y is a quadratic +// non-residue, z will have an undetermined value and isQR = false. +func InvSqrt(z, x, y *Elt) (isQR bool) { + sqrtMinusOne := &Elt{ + 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4, + 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f, + 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b, + 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b, + } + t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{} + + Mul(t0, x, y) // t0 = u*v + Sqr(t1, y) // t1 = v^2 + Mul(t2, t0, t1) // t2 = u*v^3 + Sqr(t0, t1) // t0 = v^4 + Mul(t1, t0, t2) // t1 = u*v^7 + + var Tab [4]*Elt + Tab[0] = &Elt{} + Tab[1] = &Elt{} + Tab[2] = t3 + Tab[3] = t1 + + *Tab[0] = *t1 + Sqr(Tab[0], Tab[0]) + Sqr(Tab[1], Tab[0]) + Sqr(Tab[1], Tab[1]) + Mul(Tab[1], Tab[1], Tab[3]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[0], Tab[0]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[1], Tab[0]) + for i := 0; i < 4; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 4; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[0]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 14; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 29; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 59; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + for i := 0; i < 5; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 124; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[2], Tab[2]) + Sqr(Tab[2], Tab[2]) + Mul(Tab[2], Tab[2], Tab[3]) + + Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8 + // Checking whether y z^2 == x + Sqr(t0, z) // t0 = z^2 + Mul(t0, t0, y) // t0 = yz^2 + Sub(t1, t0, x) // t1 = t0-u + Add(t2, t0, x) // t2 = t0+u + if IsZero(t1) { + return true + } else if IsZero(t2) { + Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1) + return true + } else { + return false + } +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + x0, x1, x2 := &Elt{}, &Elt{}, &Elt{} + Sqr(x1, x) + Sqr(x0, x1) + Sqr(x0, x0) + Mul(x0, x0, x) + Mul(z, x0, x1) + Sqr(x1, z) + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 4; i++ { + Sqr(x1, x1) + } + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 9; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + Sqr(x2, x1) + for i := 0; i < 19; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x1) + for i := 0; i < 10; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x0) + Sqr(x0, x2) + for i := 0; i < 49; i++ { + Sqr(x0, x0) + } + Mul(x0, x0, x2) + Sqr(x1, x0) + for i := 0; i < 99; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + for i := 0; i < 50; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x2) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { modp(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go new file mode 100644 index 0000000000..057f0d2803 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go @@ -0,0 +1,45 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp25519 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } +func modp(z *Elt) { modpAmd64(z) } + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) + +//go:noescape +func modpAmd64(z *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h new file mode 100644 index 0000000000..b884b584ab --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h @@ -0,0 +1,351 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define additionLeg(z,x,y) \ + MOVL $38, AX; \ + MOVL $0, DX; \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov, adx +#define additionAdx(z,x,y) \ + MOVL $38, AX; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + CMOVQCS AX, DX ; \ + XORL AX, AX; \ + ADCXQ DX, R8; \ + ADCXQ AX, R9; MOVQ R9, 8+z; \ + ADCXQ AX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + MOVL $38, DX; \ + CMOVQCS DX, AX; \ + ADDQ AX, R8; MOVQ R8, 0+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define subtraction(z,x,y) \ + MOVL $38, AX; \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; MOVQ R8, 0+z; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \ + MOVQ 8+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \ + MOVQ 16+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \ + MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \ + MOVQ 24+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R13, R15; \ + ADCQ R14, R10; MOVQ R10, 16+z; \ + ADCQ AX, R11; MOVQ R11, 24+z; \ + ADCQ $0, DX; MOVQ DX, 32+z; \ + MOVQ 8+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 8+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 16+z, R9; MOVQ R9, R15; \ + ADCQ 24+z, R10; MOVQ R10, 24+z; \ + ADCQ 32+z, R11; MOVQ R11, 32+z; \ + ADCQ $0, DX; MOVQ DX, 40+z; \ + MOVQ 16+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 16+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 24+z, R9; MOVQ R9, R15; \ + ADCQ 32+z, R10; MOVQ R10, 32+z; \ + ADCQ 40+z, R11; MOVQ R11, 40+z; \ + ADCQ $0, DX; MOVQ DX, 48+z; \ + MOVQ 24+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 24+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 32+z, R9; MOVQ R9, 32+z; \ + ADCQ 40+z, R10; MOVQ R10, 40+z; \ + ADCQ 48+z, R11; MOVQ R11, 48+z; \ + ADCQ $0, DX; MOVQ DX, 56+z; + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + MOVQ 0+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \ + MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \ + MOVQ 24+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \ + \ + ADDQ R14, R10;\ + ADCQ R15, R11; MOVL $0, R15;\ + ADCQ CX, R12;\ + ADCQ AX, R13;\ + ADCQ $0, DX; MOVQ DX, R14;\ + MOVQ 8+x, AX; MULQ 16+x;\ + \ + ADDQ AX, R11;\ + ADCQ DX, R12;\ + ADCQ $0, R13;\ + ADCQ $0, R14;\ + ADCQ $0, R15;\ + \ + SHLQ $1, R14, R15; MOVQ R15, 56+z;\ + SHLQ $1, R13, R14; MOVQ R14, 48+z;\ + SHLQ $1, R12, R13; MOVQ R13, 40+z;\ + SHLQ $1, R11, R12; MOVQ R12, 32+z;\ + SHLQ $1, R10, R11; MOVQ R11, 24+z;\ + SHLQ $1, R9, R10; MOVQ R10, 16+z;\ + SHLQ $1, R9; MOVQ R9, 8+z;\ + \ + MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\ + MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\ + MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\ + MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\ + \ + ADDQ 8+z, R9; MOVQ R9, 8+z;\ + ADCQ 16+z, R10; MOVQ R10, 16+z;\ + ADCQ 24+z, R11; MOVQ R11, 24+z;\ + ADCQ 32+z, R12; MOVQ R12, 32+z;\ + ADCQ 40+z, R13; MOVQ R13, 40+z;\ + ADCQ 48+z, R14; MOVQ R14, 48+z;\ + ADCQ 56+z, R15; MOVQ R15, 56+z; + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + MOVQ 0+x, DX; /* A[0] */ \ + MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \ + MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \ + MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \ + MOVQ 24+x, DX; /* A[3] */ \ + MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \ + MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \ + MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \ + MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \ + XORL R15, R15; \ + ADOXQ AX, R10; ADCXQ R8, R8; \ + ADOXQ CX, R11; ADCXQ R9, R9; \ + ADOXQ R15, R12; ADCXQ R10, R10; \ + ADOXQ R15, R13; ADCXQ R11, R11; \ + ADOXQ R15, R14; ADCXQ R12, R12; \ + ;;;;;;;;;;;;;;; ADCXQ R13, R13; \ + ;;;;;;;;;;;;;;; ADCXQ R14, R14; \ + MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \ + ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \ + ADDQ CX, R8; MOVQ R8, 8+z; \ + MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \ + ADCQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R10; MOVQ R10, 24+z; \ + MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \ + ADCQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R12; MOVQ R12, 40+z; \ + MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \ + ADCQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R14; MOVQ R14, 56+z; + +// reduceFromDouble finds z congruent to x modulo p such that 0> 63) + // PUT BIT 255 IN CARRY FLAG AND CLEAR + x3 &^= 1 << 63 + + x0, c0 := bits.Add64(x0, cx, 0) + x1, c1 := bits.Add64(x1, 0, c0) + x2, c2 := bits.Add64(x2, 0, c1) + x3, _ = bits.Add64(x3, 0, c2) + + // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19 + // cx = C[255] ? 0 : 19 + cx = uint64(19) &^ (-(x3 >> 63)) + // CLEAR BIT 255 + x3 &^= 1 << 63 + + x0, c0 = bits.Sub64(x0, cx, 0) + x1, c1 = bits.Sub64(x1, 0, c0) + x2, c2 = bits.Sub64(x2, 0, c1) + x3, _ = bits.Sub64(x3, 0, c2) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) +} + +func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) { + h0, l0 := bits.Mul64(x4, 38) + h1, l1 := bits.Mul64(x5, 38) + h2, l2 := bits.Mul64(x6, 38) + h3, l3 := bits.Mul64(x7, 38) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + l0, c0 = bits.Add64(l0, x0, 0) + l1, c1 = bits.Add64(l1, x1, c0) + l2, c2 = bits.Add64(l2, x2, c1) + l3, c3 := bits.Add64(l3, x3, c2) + l4, _ = bits.Add64(l4, 0, c3) + + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + z1, c1 := bits.Add64(l1, 0, c0) + z2, c2 := bits.Add64(l2, 0, c1) + z3, c3 := bits.Add64(l3, 0, c2) + z0, _ := bits.Add64(l0, (-c3)&38, 0) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go new file mode 100644 index 0000000000..26ca4d01b7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go @@ -0,0 +1,13 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp25519 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } +func modp(z *Elt) { modpGeneric(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go new file mode 100644 index 0000000000..a5e36600bb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp.go @@ -0,0 +1,164 @@ +// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1). +package fp448 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 56 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^448-2^224-1. +var p = Elt{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +} + +// P returns the prime modulus 2^448-2^224-1. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// IsOne returns true if x is equal to 1. +func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{1} } + +// One returns the 1 element. +func One() (x Elt) { x = Elt{1}; return } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { Sub(z, z, &p) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so, +// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue, +// and z = sqrt(-x/y). +func InvSqrt(z, x, y *Elt) (isQR bool) { + // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x + // so that's x if x is a quadratic residue and -x otherwise. + // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y). + // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y). + // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y). + t0, t1 := &Elt{}, &Elt{} + Mul(t0, x, y) // x*y + Sqr(t1, y) // y^2 + Mul(t1, t0, t1) // x*y^3 + powPminus3div4(z, t1) // (x*y^3)^k + Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1) + + // Check if x/y is a quadratic residue + Sqr(t0, z) // z^2 + Mul(t0, t0, y) // y*z^2 + Sub(t0, t0, x) // y*z^2-x + return IsZero(t0) +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4. + t := &Elt{} + powPminus3div4(t, x) // t = x^k + Sqr(t, t) // t = x^2k + Sqr(t, t) // t = x^4k + Mul(z, t, x) // z = x^(4k+1) +} + +// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4. +func powPminus3div4(z, x *Elt) { + x0, x1 := &Elt{}, &Elt{} + Sqr(z, x) + Mul(z, z, x) + Sqr(x0, z) + Mul(x0, x0, x) + Sqr(z, x0) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 11; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 26; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 53; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 110; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + Mul(z, z, x) + for i := 0; i < 223; i++ { + Sqr(z, z) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go new file mode 100644 index 0000000000..6a12209a70 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go @@ -0,0 +1,43 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp448 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } + +/* Functions defined in fp_amd64.s */ + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h new file mode 100644 index 0000000000..536fe5bdfe --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h @@ -0,0 +1,591 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \ + MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \ + MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \ + MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \ + MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \ + MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \ + MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define additionLeg(z,x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ R8, 0+z; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ DX, R11; MOVQ R11, 24+z; \ + ADCQ $0, R12; MOVQ R12, 32+z; \ + ADCQ $0, R13; MOVQ R13, 40+z; \ + ADCQ $0, R14; MOVQ R14, 48+z; + + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, adx +#define additionAdx(z,x,y) \ + MOVL $32, R15; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + MOVQ 32+x, R12; ADCXQ 32+y, R12; \ + MOVQ 40+x, R13; ADCXQ 40+y, R13; \ + MOVQ 48+x, R14; ADCXQ 48+y, R14; \ + ;;;;;;;;;;;;;;; ADCXQ DX, DX; \ + XORL AX, AX; \ + ADCXQ DX, R8; SHLXQ R15, DX, DX; \ + ADCXQ AX, R9; \ + ADCXQ AX, R10; \ + ADCXQ DX, R11; \ + ADCXQ AX, R12; \ + ADCXQ AX, R13; \ + ADCXQ AX, R14; \ + ADCXQ AX, AX; \ + XORL DX, DX; \ + ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \ + ADCXQ DX, R9; MOVQ R9, 8+z; \ + ADCXQ DX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + ADCXQ DX, R12; MOVQ R12, 32+z; \ + ADCXQ DX, R13; MOVQ R13, 40+z; \ + ADCXQ DX, R14; MOVQ R14, 48+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define subtraction(z,x,y) \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVQ 32+x, R12; SBBQ 32+y, R12; \ + MOVQ 40+x, R13; SBBQ 40+y, R13; \ + MOVQ 48+x, R14; SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+z; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ DX, R11; MOVQ R11, 24+z; \ + SBBQ $0, R12; MOVQ R12, 32+z; \ + SBBQ $0, R13; MOVQ R13, 40+z; \ + SBBQ $0, R14; MOVQ R14, 48+z; + +// maddBmi2Adx multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64, bmi2, adx +#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \ + MOVQ i+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \ + MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \ + MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \ + MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \ + MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \ + MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \ + MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \ + ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \ + MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCXQ AX, R13; \ + MULXQ 48+x, AX, R15; ADCXQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \ + maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \ + maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \ + maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \ + maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \ + maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \ + maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \ + MOVQ R15, 56+z; \ + MOVQ R9, 64+z; \ + MOVQ R10, 72+z; \ + MOVQ R11, 80+z; \ + MOVQ R12, 88+z; \ + MOVQ R13, 96+z; \ + MOVQ R14, 104+z; + +// maddLegacy multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64 +#define maddLegacy(z,x,y,i) \ + MOVQ i+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \ + ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \ + ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \ + ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \ + ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \ + ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \ + ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \ + ADCQ $0, DX; MOVQ DX, 56+i+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \ + maddLegacy(z,x,y, 8) \ + maddLegacy(z,x,y,16) \ + maddLegacy(z,x,y,24) \ + maddLegacy(z,x,y,32) \ + maddLegacy(z,x,y,40) \ + maddLegacy(z,x,y,48) + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, CX; \ + MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \ + ADDQ CX, CX; ADCQ $0, R15; \ + MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + \ + MOVQ 8+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 8+x, CX; ADCQ $0, R15; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \ + \ + MOVQ 16+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 16+x, CX; ADCQ $0, R15; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \ + \ + MOVQ 24+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ADDQ 24+x, CX; ADCQ $0, R15; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \ + \ + MOVQ 32+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 32+x, CX; ADCQ $0, R15; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \ + \ + XORL R13, R13; \ + XORL R14, R14; \ + MOVQ 40+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 40+x, CX; ADCQ $0, R15; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \ + \ + XORL R9, R9; \ + MOVQ 48+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z; + + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, DX; \ + ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \ + ADDQ DX, DX; ADCQ $0, R15; CLC; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 32+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 40+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 48+x, AX, R14; ADCXQ AX, R13; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \ + \ + MOVQ 8+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \ + ADDQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 8+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \ + \ + MOVQ 16+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \ + ADDQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 16+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \ + \ + MOVQ 24+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \ + ADDQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R8; \ + ADCQ $0, R9; \ + ADDQ 24+x, DX; \ + ADCQ $0, R15; \ + XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \ + \ + MOVQ 32+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \ + ADDQ AX, R9; MOVQ R9, 64+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 32+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \ + \ + MOVQ 40+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \ + ADDQ AX, R11; MOVQ R11, 80+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 40+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \ + \ + MOVQ 48+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \ + XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \ + ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \ + ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z; + +// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64 +#define reduceFromDoubleLeg(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCQ $0,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + ADDQ 0+z,R10; \ + ADCQ 8+z,R11; \ + ADCQ 16+z,R12; \ + ADCQ 24+z,R13; \ + ADCQ 32+z,R15; \ + ADCQ 40+z, R8; \ + ADCQ 48+z, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + ADDQ R14,R10; MOVQ $0,R14; \ + ADCQ $0,R11; \ + ADCQ $0,R12; \ + ADCQ AX,R13; \ + ADCQ $0,R15; \ + ADCQ $0, R8; \ + ADCQ $0, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32,AX; \ + ADDQ R14,R10; MOVQ R10, 0+z; \ + ADCQ $0,R11; MOVQ R11, 8+z; \ + ADCQ $0,R12; MOVQ R12,16+z; \ + ADCQ AX,R13; MOVQ R13,24+z; \ + ADCQ $0,R15; MOVQ R15,32+z; \ + ADCQ $0, R8; MOVQ R8,40+z; \ + ADCQ $0, R9; MOVQ R9,48+z; + +// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64, adx +#define reduceFromDoubleAdx(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + XORL AX,AX; \ + ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCXQ AX,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + XORL AX,AX; \ + ADCXQ 0+z,R10; \ + ADCXQ 8+z,R11; \ + ADCXQ 16+z,R12; \ + ADCXQ 24+z,R13; \ + ADCXQ 32+z,R15; \ + ADCXQ 40+z, R8; \ + ADCXQ 48+z, R9; \ + ADCXQ AX,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ $0,R14; \ + ADCXQ R14,R11; \ + ADCXQ R14,R12; \ + ADCXQ AX,R13; \ + ADCXQ R14,R15; \ + ADCXQ R14, R8; \ + ADCXQ R14, R9; \ + ADCXQ R14,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \ + ADCXQ R14,R11; MOVQ R11, 8+z; \ + ADCXQ R14,R12; MOVQ R12,16+z; \ + ADCXQ AX,R13; MOVQ R13,24+z; \ + ADCXQ R14,R15; MOVQ R15,32+z; \ + ADCXQ R14, R8; MOVQ R8,40+z; \ + ADCXQ R14, R9; MOVQ R9,48+z; + +// addSub calculates two operations: x,y = x+y,x-y +// Uses: AX, DX, R8-R15, FLAGS +#define addSub(x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \ + ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \ + ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \ + ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \ + ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \ + ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \ + ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \ + SUBQ 0+y, R8; \ + SBBQ 8+y, R9; \ + SBBQ 16+y, R10; \ + SBBQ 24+y, R11; \ + SBBQ 32+y, R12; \ + SBBQ 40+y, R13; \ + SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+y; \ + SBBQ $0, R9; MOVQ R9, 8+y; \ + SBBQ $0, R10; MOVQ R10, 16+y; \ + SBBQ DX, R11; MOVQ R11, 24+y; \ + SBBQ $0, R12; MOVQ R12, 32+y; \ + SBBQ $0, R13; MOVQ R13, 40+y; \ + SBBQ $0, R14; MOVQ R14, 48+y; diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s new file mode 100644 index 0000000000..435addf5e6 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s @@ -0,0 +1,74 @@ +// +build amd64 + +#include "textflag.h" +#include "fp_amd64.h" + +// func cmovAmd64(x, y *Elt, n uint) +TEXT ·cmovAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cselect(0(DI),0(SI),BX) + RET + +// func cswapAmd64(x, y *Elt, n uint) +TEXT ·cswapAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cswap(0(DI),0(SI),BX) + RET + +// func subAmd64(z, x, y *Elt) +TEXT ·subAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + subtraction(0(DI),0(SI),0(BX)) + RET + +// func addsubAmd64(x, y *Elt) +TEXT ·addsubAmd64(SB),NOSPLIT,$0-16 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + addSub(0(DI),0(SI)) + RET + +#define addLegacy \ + additionLeg(0(DI),0(SI),0(BX)) +#define addBmi2Adx \ + additionAdx(0(DI),0(SI),0(BX)) + +#define mulLegacy \ + integerMulLeg(0(SP),0(SI),0(BX)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define mulBmi2Adx \ + integerMulAdx(0(SP),0(SI),0(BX)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +#define sqrLegacy \ + integerSqrLeg(0(SP),0(SI)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define sqrBmi2Adx \ + integerSqrAdx(0(SP),0(SI)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +// func addAmd64(z, x, y *Elt) +TEXT ·addAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx) + +// func mulAmd64(z, x, y *Elt) +TEXT ·mulAmd64(SB),NOSPLIT,$112-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx) + +// func sqrAmd64(z, x *Elt) +TEXT ·sqrAmd64(SB),NOSPLIT,$112-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go new file mode 100644 index 0000000000..47a0b63205 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go @@ -0,0 +1,339 @@ +package fp448 + +import ( + "encoding/binary" + "math/bits" +) + +func cmovGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + x0 = (x0 &^ m) | (y0 & m) + x1 = (x1 &^ m) | (y1 & m) + x2 = (x2 &^ m) | (y2 & m) + x3 = (x3 &^ m) | (y3 & m) + x4 = (x4 &^ m) | (y4 & m) + x5 = (x5 &^ m) | (y5 & m) + x6 = (x6 &^ m) | (y6 & m) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) +} + +func cswapGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + t0 := m & (x0 ^ y0) + t1 := m & (x1 ^ y1) + t2 := m & (x2 ^ y2) + t3 := m & (x3 ^ y3) + t4 := m & (x4 ^ y4) + t5 := m & (x5 ^ y5) + t6 := m & (x6 ^ y6) + x0 ^= t0 + x1 ^= t1 + x2 ^= t2 + x3 ^= t3 + x4 ^= t4 + x5 ^= t5 + x6 ^= t6 + y0 ^= t0 + y1 ^= t1 + y2 ^= t2 + y3 ^= t3 + y4 ^= t4 + y5 ^= t5 + y6 ^= t6 + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) + + binary.LittleEndian.PutUint64(y[0*8:1*8], y0) + binary.LittleEndian.PutUint64(y[1*8:2*8], y1) + binary.LittleEndian.PutUint64(y[2*8:3*8], y2) + binary.LittleEndian.PutUint64(y[3*8:4*8], y3) + binary.LittleEndian.PutUint64(y[4*8:5*8], y4) + binary.LittleEndian.PutUint64(y[5*8:6*8], y5) + binary.LittleEndian.PutUint64(y[6*8:7*8], y6) +} + +func addGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Add64(x0, y0, 0) + z1, c1 := bits.Add64(x1, y1, c0) + z2, c2 := bits.Add64(x2, y2, c1) + z3, c3 := bits.Add64(x3, y3, c2) + z4, c4 := bits.Add64(x4, y4, c3) + z5, c5 := bits.Add64(x5, y5, c4) + z6, z7 := bits.Add64(x6, y6, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, z7 = bits.Add64(z6, 0, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, _ = bits.Add64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func subGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Sub64(x0, y0, 0) + z1, c1 := bits.Sub64(x1, y1, c0) + z2, c2 := bits.Sub64(x2, y2, c1) + z3, c3 := bits.Sub64(x3, y3, c2) + z4, c4 := bits.Sub64(x4, y4, c3) + z5, c5 := bits.Sub64(x5, y5, c4) + z6, z7 := bits.Sub64(x6, y6, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, z7 = bits.Sub64(z6, 0, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, _ = bits.Sub64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func addsubGeneric(x, y *Elt) { + z := &Elt{} + addGeneric(z, x, y) + subGeneric(y, x, y) + *x = *z +} + +func mulGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6} + zz := [7]uint64{} + + yi := yy[0] + h0, l0 := bits.Mul64(x0, yi) + h1, l1 := bits.Mul64(x1, yi) + h2, l2 := bits.Mul64(x2, yi) + h3, l3 := bits.Mul64(x3, yi) + h4, l4 := bits.Mul64(x4, yi) + h5, l5 := bits.Mul64(x5, yi) + h6, l6 := bits.Mul64(x6, yi) + + zz[0] = l0 + a0, c0 := bits.Add64(h0, l1, 0) + a1, c1 := bits.Add64(h1, l2, c0) + a2, c2 := bits.Add64(h2, l3, c1) + a3, c3 := bits.Add64(h3, l4, c2) + a4, c4 := bits.Add64(h4, l5, c3) + a5, c5 := bits.Add64(h5, l6, c4) + a6, _ := bits.Add64(h6, 0, c5) + + for i := 1; i < 7; i++ { + yi = yy[i] + h0, l0 = bits.Mul64(x0, yi) + h1, l1 = bits.Mul64(x1, yi) + h2, l2 = bits.Mul64(x2, yi) + h3, l3 = bits.Mul64(x3, yi) + h4, l4 = bits.Mul64(x4, yi) + h5, l5 = bits.Mul64(x5, yi) + h6, l6 = bits.Mul64(x6, yi) + + zz[i], c0 = bits.Add64(a0, l0, 0) + a0, c1 = bits.Add64(a1, l1, c0) + a1, c2 = bits.Add64(a2, l2, c1) + a2, c3 = bits.Add64(a3, l3, c2) + a3, c4 = bits.Add64(a4, l4, c3) + a4, c5 = bits.Add64(a5, l5, c4) + a5, a6 = bits.Add64(a6, l6, c5) + + a0, c0 = bits.Add64(a0, h0, 0) + a1, c1 = bits.Add64(a1, h1, c0) + a2, c2 = bits.Add64(a2, h2, c1) + a3, c3 = bits.Add64(a3, h3, c2) + a4, c4 = bits.Add64(a4, h4, c3) + a5, c5 = bits.Add64(a5, h5, c4) + a6, _ = bits.Add64(a6, h6, c5) + } + red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6}) +} + +func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) } + +func red64(z *Elt, l, h *[7]uint64) { + /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */ + h0 := h[0] + h1 := h[1] + h2 := h[2] + h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF) + h4 := (h[3] >> 63) | (h[4] << 1) + h5 := (h[4] >> 63) | (h[5] << 1) + h6 := (h[5] >> 63) | (h[6] << 1) + h7 := (h[6] >> 63) + + l0, c0 := bits.Add64(h0, l[0], 0) + l1, c1 := bits.Add64(h1, l[1], c0) + l2, c2 := bits.Add64(h2, l[2], c1) + l3, c3 := bits.Add64(h3, l[3], c2) + l4, c4 := bits.Add64(h4, l[4], c3) + l5, c5 := bits.Add64(h5, l[5], c4) + l6, c6 := bits.Add64(h6, l[6], c5) + l7, _ := bits.Add64(h7, 0, c6) + + /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */ + h0 = (h[3] >> 32) | (h[4] << 32) + h1 = (h[4] >> 32) | (h[5] << 32) + h2 = (h[5] >> 32) | (h[6] << 32) + h3 = (h[6] >> 32) | (h[0] << 32) + h4 = (h[0] >> 32) | (h[1] << 32) + h5 = (h[1] >> 32) | (h[2] << 32) + h6 = (h[2] >> 32) | (h[3] << 32) + + l0, c0 = bits.Add64(l0, h0, 0) + l1, c1 = bits.Add64(l1, h1, c0) + l2, c2 = bits.Add64(l2, h2, c1) + l3, c3 = bits.Add64(l3, h3, c2) + l4, c4 = bits.Add64(l4, h4, c3) + l5, c5 = bits.Add64(l5, h5, c4) + l6, c6 = bits.Add64(l6, h6, c5) + l7, _ = bits.Add64(l7, 0, c6) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, _ = bits.Add64(l6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], l0) + binary.LittleEndian.PutUint64(z[1*8:2*8], l1) + binary.LittleEndian.PutUint64(z[2*8:3*8], l2) + binary.LittleEndian.PutUint64(z[3*8:4*8], l3) + binary.LittleEndian.PutUint64(z[4*8:5*8], l4) + binary.LittleEndian.PutUint64(z[5*8:6*8], l5) + binary.LittleEndian.PutUint64(z[6*8:7*8], l6) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go new file mode 100644 index 0000000000..a62225d296 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go @@ -0,0 +1,12 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp448 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go new file mode 100644 index 0000000000..2d7afc8059 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go @@ -0,0 +1,75 @@ +//go:build gofuzz +// +build gofuzz + +// How to run the fuzzer: +// +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build +// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a +// $ clang -fsanitize=fuzzer lib.a -o fu.exe +// $ ./fu.exe +package fp448 + +import ( + "encoding/binary" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// FuzzReduction is a fuzzer target for red64 function, which reduces t +// (112 bits) to a number t' (56 bits) congruent modulo p448. +func FuzzReduction(data []byte) int { + if len(data) != 2*Size { + return -1 + } + var got, want Elt + var lo, hi [7]uint64 + a := data[:Size] + b := data[Size:] + lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8]) + lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8]) + lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8]) + lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8]) + lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8]) + lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8]) + lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8]) + + hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8]) + hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8]) + hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8]) + hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8]) + hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8]) + hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8]) + hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8]) + + red64(&got, &lo, &hi) + + t := conv.BytesLe2BigInt(data[:2*Size]) + + two448 := big.NewInt(1) + two448.Lsh(two448, 448) // 2^448 + mask448 := big.NewInt(1) + mask448.Sub(two448, mask448) // 2^448-1 + two224plus1 := big.NewInt(1) + two224plus1.Lsh(two224plus1, 224) + two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1 + + var loBig, hiBig big.Int + for t.Cmp(two448) >= 0 { + loBig.And(t, mask448) + hiBig.Rsh(t, 448) + t.Mul(&hiBig, two224plus1) + t.Add(t, &loBig) + } + conv.BigInt2BytesLe(want[:], t) + + if got != want { + fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size])) + fmt.Printf("got: %v\n", got) + fmt.Printf("want: %v\n", want) + panic("error found") + } + return 1 +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go new file mode 100644 index 0000000000..a43851b8bb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go @@ -0,0 +1,122 @@ +// Package mlsbset provides a constant-time exponentiation method with precomputation. +// +// References: "Efficient and secure algorithms for GLV-based scalar +// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// - https://doi.org/10.1007/s13389-014-0085-7 +// - https://eprint.iacr.org/2013/158 +package mlsbset + +import ( + "errors" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// EltG is a group element. +type EltG interface{} + +// EltP is a precomputed group element. +type EltP interface{} + +// Group defines the operations required by MLSBSet exponentiation method. +type Group interface { + Identity() EltG // Returns the identity of the group. + Sqr(x EltG) // Calculates x = x^2. + Mul(x EltG, y EltP) // Calculates x = x*y. + NewEltP() EltP // Returns an arbitrary precomputed element. + ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)). + Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u]. +} + +// Params contains the parameters of the encoding. +type Params struct { + T uint // T is the maximum size (in bits) of exponents. + V uint // V is the number of tables. + W uint // W is the window size. + E uint // E is the number of digits per table. + D uint // D is the number of digits in total. + L uint // L is the length of the code. +} + +// Encoder allows to convert integers into valid powers. +type Encoder struct{ p Params } + +// New produces an encoder of the MLSBSet algorithm. +func New(t, v, w uint) (Encoder, error) { + if !(t > 1 && v >= 1 && w >= 2) { + return Encoder{}, errors.New("t>1, v>=1, w>=2") + } + e := (t + w*v - 1) / (w * v) + d := e * v + l := d * w + return Encoder{Params{t, v, w, e, d, l}}, nil +} + +// Encode converts an odd integer k into a valid power for exponentiation. +func (m Encoder) Encode(k []byte) (*Power, error) { + if len(k) == 0 { + return nil, errors.New("empty slice") + } + if !(len(k) <= int(m.p.L+7)>>3) { + return nil, errors.New("k too big") + } + if k[0]%2 == 0 { + return nil, errors.New("k must be odd") + } + ap := int((m.p.L+7)/8) - len(k) + k = append(k, make([]byte, ap)...) + s := m.signs(k) + b := make([]int32, m.p.L-m.p.D) + c := conv.BytesLe2BigInt(k) + c.Rsh(c, m.p.D) + var bi big.Int + for i := m.p.D; i < m.p.L; i++ { + c0 := int32(c.Bit(0)) + b[i-m.p.D] = s[i%m.p.D] * c0 + bi.SetInt64(int64(b[i-m.p.D] >> 1)) + c.Rsh(c, 1) + c.Sub(c, &bi) + } + carry := int(c.Int64()) + return &Power{m, s, b, carry}, nil +} + +// signs calculates the set of signs. +func (m Encoder) signs(k []byte) []int32 { + s := make([]int32, m.p.D) + s[m.p.D-1] = 1 + for i := uint(1); i < m.p.D; i++ { + ki := int32((k[i>>3] >> (i & 0x7)) & 0x1) + s[i-1] = 2*ki - 1 + } + return s +} + +// GetParams returns the complementary parameters of the encoding. +func (m Encoder) GetParams() Params { return m.p } + +// tableSize returns the size of each table. +func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) } + +// Elts returns the total number of elements that must be precomputed. +func (m Encoder) Elts() uint { return m.p.V * m.tableSize() } + +// IsExtended returns true if the element x^(2^(wd)) must be calculated. +func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W } + +// Ops returns the number of squares and multiplications executed during an exponentiation. +func (m Encoder) Ops() (S uint, M uint) { + S = m.p.E + M = m.p.E * m.p.V + if m.IsExtended() { + M++ + } + return +} + +func (m Encoder) String() string { + return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v", + m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended()) +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go new file mode 100644 index 0000000000..3f214c3046 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go @@ -0,0 +1,64 @@ +package mlsbset + +import "fmt" + +// Power is a valid exponent produced by the MLSBSet encoding algorithm. +type Power struct { + set Encoder // parameters of code. + s []int32 // set of signs. + b []int32 // set of digits. + c int // carry is {0,1}. +} + +// Exp is calculates x^k, where x is a predetermined element of a group G. +func (p *Power) Exp(G Group) EltG { + a, b := G.Identity(), G.NewEltP() + for e := int(p.set.p.E - 1); e >= 0; e-- { + G.Sqr(a) + for v := uint(0); v < p.set.p.V; v++ { + sgnElt, idElt := p.Digit(v, uint(e)) + G.Lookup(b, v, sgnElt, idElt) + G.Mul(a, b) + } + } + if p.set.IsExtended() && p.c == 1 { + G.Mul(a, G.ExtendedEltP()) + } + return a +} + +// Digit returns the (v,e)-th digit and its sign. +func (p *Power) Digit(v, e uint) (sgn, dig int32) { + sgn = p.bit(0, v, e) + dig = 0 + for i := p.set.p.W - 1; i > 0; i-- { + dig = 2*dig + p.bit(i, v, e) + } + mask := dig >> 31 + dig = (dig + mask) ^ mask + return sgn, dig +} + +// bit returns the (w,v,e)-th bit of the code. +func (p *Power) bit(w, v, e uint) int32 { + if !(w < p.set.p.W && + v < p.set.p.V && + e < p.set.p.E) { + panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e)) + } + if w == 0 { + return p.s[p.set.p.E*v+e] + } + return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e] +} + +func (p *Power) String() string { + dig := "" + for j := uint(0); j < p.set.p.V; j++ { + for i := uint(0); i < p.set.p.E; i++ { + s, d := p.Digit(j, i) + dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d) + } + } + return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig) +} diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go new file mode 100644 index 0000000000..158fd83a7a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/primes.go @@ -0,0 +1,34 @@ +package math + +import ( + "crypto/rand" + "io" + "math/big" +) + +// IsSafePrime reports whether p is (probably) a safe prime. +// The prime p=2*q+1 is safe prime if both p and q are primes. +// Note that ProbablyPrime is not suitable for judging primes +// that an adversary may have crafted to fool the test. +func IsSafePrime(p *big.Int) bool { + pdiv2 := new(big.Int).Rsh(p, 1) + return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20) +} + +// SafePrime returns a number of the given bit length that is a safe prime with high probability. +// The number returned p=2*q+1 is a safe prime if both p and q are primes. +// SafePrime will return error for any error returned by rand.Read or if bits < 2. +func SafePrime(random io.Reader, bits int) (*big.Int, error) { + one := big.NewInt(1) + p := new(big.Int) + for { + q, err := rand.Prime(random, bits-1) + if err != nil { + return nil, err + } + p.Lsh(q, 1).Add(p, one) + if p.ProbablyPrime(20) { + return p, nil + } + } +} diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go new file mode 100644 index 0000000000..94a1ec5042 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/wnaf.go @@ -0,0 +1,84 @@ +// Package math provides some utility functions for big integers. +package math + +import "math/big" + +// SignedDigit obtains the signed-digit recoding of n and returns a list L of +// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number +// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the +// output has ceil(l/(w-1)) digits. +// +// Restrictions: +// - n is odd and n > 0. +// - 1 < w < 32. +// - l >= bit length of n. +// +// References: +// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms" +// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21 +// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and +// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y +func SignedDigit(n *big.Int, w, l uint) []int32 { + if n.Sign() <= 0 || n.Bit(0) == 0 { + panic("n must be non-zero, odd, and positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + if uint(n.BitLen()) > l { + panic("n is too big to fit in l digits") + } + lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1)) + L := make([]int32, lenN+1) + var k, v big.Int + k.Set(n) + + var i uint + for i = 0; i < lenN; i++ { + words := k.Bits() + value := int32(words[0] & ((1 << w) - 1)) + value -= int32(1) << (w - 1) + L[i] = value + v.SetInt64(int64(value)) + k.Sub(&k, &v) + k.Rsh(&k, w-1) + } + L[i] = int32(k.Int64()) + return L +} + +// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and +// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ). +// +// Reference: +// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas. +// http://doi.org/10.1023/A:1008306223194 +func OmegaNAF(n *big.Int, w uint) (L []int32) { + if n.Sign() < 0 { + panic("n must be positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + + L = make([]int32, n.BitLen()+1) + var k, v big.Int + k.Set(n) + + i := 0 + for ; k.Sign() > 0; i++ { + value := int32(0) + if k.Bit(0) == 1 { + words := k.Bits() + value = int32(words[0] & ((1 << w) - 1)) + if value >= (int32(1) << (w - 1)) { + value -= int32(1) << w + } + v.SetInt64(int64(value)) + k.Sub(&k, &v) + } + L[i] = value + k.Rsh(&k, 1) + } + return L[:i] +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go new file mode 100644 index 0000000000..2c73c26fb1 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go @@ -0,0 +1,453 @@ +// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. +// +// This package provides optimized implementations of the three signature +// variants and maintaining closer compatibility with crypto/ed25519. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed25519 | Sign | Verify | None | +// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty | +// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain +// separation. This parameter is passed using a SignerOptions struct defined +// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx +// enforces non-empty context strings. +// +// # Compatibility with crypto.ed25519 +// +// These functions are compatible with the “Ed25519” function defined in +// RFC-8032. However, unlike RFC 8032's formulation, this package's private +// key representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the +// RFC-8032 private key as the “seed”. +// +// References +// +// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt +// - Ed25519: https://ed25519.cr.yp.to/ +// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1 +package ed25519 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +const ( + paramB = 256 / 8 // Size of keys in bytes. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed25519 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512 + // for Ed25519ph. + crypto.Hash + + // Context is an optional domain separation string for Ed25519ph and a + // must for Ed25519ctx. Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. The zero value + // is ED25519. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED25519 SchemeID = iota + ED25519Ph + ED25519Ctx +) + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return publicKey +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// Sign creates a signature of a message with priv key. +// This function is compatible with crypto.ed25519 and also supports the +// three signature variants defined in RFC-8032, namely Ed25519 (or pure +// EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for +// opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct (defined in this package) to pass a context +// string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message), nil + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return SignPh(priv, message, ctx), nil + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return SignWithCtx(priv, message, ctx), nil + default: + return nil, errors.New("ed25519: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make(PrivateKey, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + var P pointR1 + k := sha512.Sum512(seed) + clamp(k[:]) + reduceModOrder(k[:paramB], false) + P.fixedMult(k[:paramB]) + copy(privateKey[:SeedSize], seed) + _ = P.ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 32-byte private key using SHA-512. + _, _ = H.Write(privateKey[:SeedSize]) + h := H.Sum(nil) + clamp(h[:]) + prefix, s := h[paramB:], h[:paramB] + + // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)) + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + r := H.Sum(nil) + reduceModOrder(r[:], true) + + // 3. Compute the point [r]B. + var P pointR1 + P.fixedMult(r[:paramB]) + R := (&[paramB]byte{})[:] + if err := P.ToBytes(R); err != nil { + panic(err) + } + + // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)). + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + + reduceModOrder(hRAM[:], true) + + // 5. Compute S = (r + k * s) mod order. + S := (&[paramB]byte{})[:] + calculateS(S, r[:paramB], hRAM[:paramB], s) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(""), false) + return signature +} + +// SignPh creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512, and optionally +// accepts a context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context could be passed to this function, which length should be no more than +// ContextMaxSize=255. It can be empty. +func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx))) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), true) + return signature +} + +// SignWithCtx creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it accepts a non-empty context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context must be passed to this function, which length should be no more than +// ContextMaxSize=255 and cannot be empty. +func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize)) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), false) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + var P pointR1 + if ok := P.FromBytes(public); !ok { + return false + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + R := signature[:paramB] + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + reduceModOrder(hRAM[:], true) + + var Q pointR1 + encR := (&[paramB]byte{})[:] + P.neg() + Q.doubleMult(&P, signature[paramB:], hRAM[:paramB]) + _ = Q.ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the three signature variants defined in RFC-8032, +// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature) + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return VerifyPh(public, message, signature, ctx) + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return VerifyWithCtx(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte) bool { + return verify(public, message, signature, []byte(""), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded, or when context is +// not provided. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it does not handle prehashed messages. Non-empty context string must be +// provided, and must not be more than 255 of length. +func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + return false + } + + return verify(public, message, signature, []byte(ctx), false) +} + +func clamp(k []byte) { + k[0] &= 248 + k[paramB-1] = (k[paramB-1] & 127) | 64 +} + +// isLessThanOrder returns true if 0 <= x < order. +func isLessThanOrder(x []byte) bool { + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom2 := "SigEd25519 no Ed25519 collisions" + + if len(ctx) > 0 { + _, _ = h.Write([]byte(dom2)) + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) + } else if preHash { + _, _ = h.Write([]byte(dom2)) + _, _ = h.Write([]byte{0x01, 0x00}) + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go new file mode 100644 index 0000000000..10efafdcaf --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go @@ -0,0 +1,175 @@ +package ed25519 + +import ( + "encoding/binary" + "math/bits" +) + +var order = [paramB]byte{ + 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, + 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// reduceModOrder calculates k = k mod order of the curve. +func reduceModOrder(k []byte, is512Bit bool) { + var X [((2 * paramB) * 8) / 64]uint64 + numWords := len(k) >> 3 + for i := 0; i < numWords; i++ { + X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8]) + } + red512(&X, is512Bit) + for i := 0; i < numWords; i++ { + binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i]) + } +} + +// red512 calculates x = x mod Order of the curve. +func red512(x *[8]uint64, full bool) { + // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied + // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone. + const ( + ell0 = uint64(0x5812631a5cf5d3ed) + ell1 = uint64(0x14def9dea2f79cd6) + ell160 = uint64(0x812631a5cf5d3ed0) + ell161 = uint64(0x4def9dea2f79cd65) + ell162 = uint64(0x0000000000000001) + ) + + var c0, c1, c2, c3 uint64 + r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0) + + if full { + q0, q1, q2, q3 := x[4], x[5], x[6], x[7] + + for i := 0; i < 3; i++ { + h0, s0 := bits.Mul64(q0, ell160) + h1, s1 := bits.Mul64(q1, ell160) + h2, s2 := bits.Mul64(q2, ell160) + h3, s3 := bits.Mul64(q3, ell160) + + s1, c0 = bits.Add64(h0, s1, 0) + s2, c1 = bits.Add64(h1, s2, c0) + s3, c2 = bits.Add64(h2, s3, c1) + s4, _ := bits.Add64(h3, 0, c2) + + h0, l0 := bits.Mul64(q0, ell161) + h1, l1 := bits.Mul64(q1, ell161) + h2, l2 := bits.Mul64(q2, ell161) + h3, l3 := bits.Mul64(q3, ell161) + + l1, c0 = bits.Add64(h0, l1, 0) + l2, c1 = bits.Add64(h1, l2, c0) + l3, c2 = bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + s1, c0 = bits.Add64(s1, l0, 0) + s2, c1 = bits.Add64(s2, l1, c0) + s3, c2 = bits.Add64(s3, l2, c1) + s4, c3 = bits.Add64(s4, l3, c2) + s5, s6 := bits.Add64(l4, 0, c3) + + s2, c0 = bits.Add64(s2, q0, 0) + s3, c1 = bits.Add64(s3, q1, c0) + s4, c2 = bits.Add64(s4, q2, c1) + s5, c3 = bits.Add64(s5, q3, c2) + s6, s7 := bits.Add64(s6, 0, c3) + + q := q0 | q1 | q2 | q3 + m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1 + s0 &= m + s1 &= m + s2 &= m + s3 &= m + q0, q1, q2, q3 = s4, s5, s6, s7 + + if (i+1)%2 == 0 { + r0, c0 = bits.Add64(r0, s0, 0) + r1, c1 = bits.Add64(r1, s1, c0) + r2, c2 = bits.Add64(r2, s2, c1) + r3, c3 = bits.Add64(r3, s3, c2) + r4, _ = bits.Add64(r4, 0, c3) + } else { + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, c3 = bits.Sub64(r3, s3, c2) + r4, _ = bits.Sub64(r4, 0, c3) + } + } + + m := -(r4 >> 63) + r0, c0 = bits.Add64(r0, m&ell160, 0) + r1, c1 = bits.Add64(r1, m&ell161, c0) + r2, c2 = bits.Add64(r2, m&ell162, c1) + r3, c3 = bits.Add64(r3, 0, c2) + r4, _ = bits.Add64(r4, m&1, c3) + x[4], x[5], x[6], x[7] = 0, 0, 0, 0 + } + + q0 := (r4 << 4) | (r3 >> 60) + r3 &= (uint64(1) << 60) - 1 + + h0, s0 := bits.Mul64(ell0, q0) + h1, s1 := bits.Mul64(ell1, q0) + s1, c0 = bits.Add64(h0, s1, 0) + s2, _ := bits.Add64(h1, 0, c0) + + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, _ = bits.Sub64(r3, 0, c2) + + x[0], x[1], x[2], x[3] = r0, r1, r2, r3 +} + +// calculateS performs s = r+k*a mod Order of the curve. +func calculateS(s, r, k, a []byte) { + K := [4]uint64{ + binary.LittleEndian.Uint64(k[0*8 : 1*8]), + binary.LittleEndian.Uint64(k[1*8 : 2*8]), + binary.LittleEndian.Uint64(k[2*8 : 3*8]), + binary.LittleEndian.Uint64(k[3*8 : 4*8]), + } + S := [8]uint64{ + binary.LittleEndian.Uint64(r[0*8 : 1*8]), + binary.LittleEndian.Uint64(r[1*8 : 2*8]), + binary.LittleEndian.Uint64(r[2*8 : 3*8]), + binary.LittleEndian.Uint64(r[3*8 : 4*8]), + } + var c3 uint64 + for i := range K { + ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8]) + + h0, l0 := bits.Mul64(K[0], ai) + h1, l1 := bits.Mul64(K[1], ai) + h2, l2 := bits.Mul64(K[2], ai) + h3, l3 := bits.Mul64(K[3], ai) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + S[i+0], c0 = bits.Add64(S[i+0], l0, 0) + S[i+1], c1 = bits.Add64(S[i+1], l1, c0) + S[i+2], c2 = bits.Add64(S[i+2], l2, c1) + S[i+3], c3 = bits.Add64(S[i+3], l3, c2) + S[i+4], _ = bits.Add64(S[i+4], l4, c3) + } + red512(&S, true) + binary.LittleEndian.PutUint64(s[0*8:1*8], S[0]) + binary.LittleEndian.PutUint64(s[1*8:2*8], S[1]) + binary.LittleEndian.PutUint64(s[2*8:3*8], S[2]) + binary.LittleEndian.PutUint64(s[3*8:4*8], S[3]) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go new file mode 100644 index 0000000000..3216aae303 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go @@ -0,0 +1,180 @@ +package ed25519 + +import ( + "crypto/subtle" + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp25519" +) + +var paramD = fp.Elt{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52, +} + +// mLSBRecoding parameters. +const ( + fxT = 257 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) + numWords64 = (paramB * 8 / 64) +) + +// mLSBRecoding is the odd-only modified LSB-set. +// +// Reference: +// +// "Efficient and secure algorithms for GLV-based scalar multiplication and +// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// http://doi.org/10.1007/s13389-014-0085-7. +func mLSBRecoding(L []int8, k []byte) { + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + if len(L) == (ll + 1) { + var m [numWords64 + 1]uint64 + for i := 0; i < numWords64; i++ { + m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8]) + } + condAddOrderN(&m) + L[dd-1] = 1 + for i := 0; i < dd-1; i++ { + kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1 + L[i] = int8(kip1<<1) - 1 + } + { // right-shift by d + right := uint(dd % 64) + left := uint(64) - right + lim := ((numWords64+1)*64 - dd) / 64 + j := dd / 64 + for i := 0; i < lim; i++ { + m[i] = (m[i+j] >> right) | (m[i+j+1] << left) + } + m[lim] = m[lim+j] >> right + } + for i := dd; i < ll; i++ { + L[i] = L[i%dd] * int8(m[0]&0x1) + div2subY(m[:], int64(L[i]>>1), numWords64) + } + L[ll] = int8(m[0]) + } +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} + +// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged. +func condAddOrderN(x *[numWords64 + 1]uint64) { + isOdd := (x[0] & 0x1) - 1 + c := uint64(0) + for i := 0; i < numWords64; i++ { + orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8]) + o := isOdd & orderWord + x0, c0 := bits.Add64(x[i], o, c) + x[i] = x0 + c = c0 + } + x[numWords64], _ = bits.Add64(x[numWords64], 0, c) +} + +// div2subY update x = (x/2) - y. +func div2subY(x []uint64, y int64, l int) { + s := uint64(y >> 63) + for i := 0; i < l-1; i++ { + x[i] = (x[i] >> 1) | (x[i+1] << 63) + } + x[l-1] = (x[l-1] >> 1) + + b := uint64(0) + x0, b0 := bits.Sub64(x[0], uint64(y), b) + x[0] = x0 + b = b0 + for i := 1; i < l-1; i++ { + x0, b0 := bits.Sub64(x[i], s, b) + x[i] = x0 + b = b0 + } + x[l-1], _ = bits.Sub64(x[l-1], s, b) +} + +func (P *pointR1) fixedMult(scalar []byte) { + if len(scalar) != paramB { + panic("wrong scalar size") + } + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + + L := make([]int8, ll+1) + mLSBRecoding(L[:], scalar) + S := &pointR3{} + P.SetIdentity() + for ii := ee - 1; ii >= 0; ii-- { + P.double() + for j := 0; j < fxV; j++ { + dig := L[fxW*dd-j*ee+ii-ee] + for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd { + dig = 2*dig + L[i] + } + idx := absolute(int32(dig)) + sig := L[dd-j*ee+ii-ee] + Tabj := &tabSign[fxV-j-1] + for k := 0; k < fx2w1; k++ { + S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx)) + } + S.cneg(subtle.ConstantTimeEq(int32(sig), -1)) + P.mixAdd(S) + } + } +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// doubleMult returns P=mG+nQ. +func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]pointR2 + Q.oddMultiples(TabQ[:]) + P.SetIdentity() + for i := len(nafFix) - 1; i >= 0; i-- { + P.double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + P.mixAdd(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + P.add(&S) + } + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go new file mode 100644 index 0000000000..374a69503c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go @@ -0,0 +1,195 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +type ( + pointR1 struct{ x, y, z, ta, tb fp.Elt } + pointR2 struct { + pointR3 + z2 fp.Elt + } +) +type pointR3 struct{ addYX, subYX, dt2 fp.Elt } + +func (P *pointR1) neg() { + fp.Neg(&P.x, &P.x) + fp.Neg(&P.ta, &P.ta) +} + +func (P *pointR1) SetIdentity() { + P.x = fp.Elt{} + fp.SetOne(&P.y) + fp.SetOne(&P.z) + P.ta = fp.Elt{} + P.tb = fp.Elt{} +} + +func (P *pointR1) toAffine() { + fp.Inv(&P.z, &P.z) + fp.Mul(&P.x, &P.x, &P.z) + fp.Mul(&P.y, &P.y, &P.z) + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y +} + +func (P *pointR1) ToBytes(k []byte) error { + P.toAffine() + var x [fp.Size]byte + err := fp.ToBytes(k[:fp.Size], &P.y) + if err != nil { + return err + } + err = fp.ToBytes(x[:], &P.x) + if err != nil { + return err + } + b := x[0] & 1 + k[paramB-1] = k[paramB-1] | (b << 7) + return nil +} + +func (P *pointR1) FromBytes(k []byte) bool { + if len(k) != paramB { + panic("wrong size") + } + signX := k[paramB-1] >> 7 + copy(P.y[:], k[:fp.Size]) + P.y[fp.Size-1] &= 0x7F + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return false + } + + one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + fp.SetOne(one) + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, one) // u = y^2-1 + fp.Add(v, v, one) // v = dy^2+1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return false + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return false + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + fp.SetOne(&P.z) + return true +} + +// double calculates 2P for curves with A=-1. +func (P *pointR1) double() { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) mixAdd(Q *pointR3) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 + P.coreAddition(Q) +} + +func (P *pointR1) add(Q *pointR2) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.pointR3) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *pointR1) coreAddition(Q *pointR3) { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) oddMultiples(T []pointR2) { + var R pointR2 + n := len(T) + T[0].fromR1(P) + _2P := *P + _2P.double() + R.fromR1(&_2P) + for i := 1; i < n; i++ { + P.add(&R) + T[i].fromR1(P) + } +} + +func (P *pointR1) isEqual(Q *pointR1) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +func (P *pointR3) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *pointR2) fromR1(Q *pointR1) { + fp.Add(&P.addYX, &Q.y, &Q.x) + fp.Sub(&P.subYX, &Q.y, &Q.x) + fp.Mul(&P.dt2, &Q.ta, &Q.tb) + fp.Mul(&P.dt2, &P.dt2, ¶mD) + fp.Add(&P.dt2, &P.dt2, &P.dt2) + fp.Add(&P.z2, &Q.z, &Q.z) +} + +func (P *pointR3) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *pointR3) cmov(Q *pointR3, b int) { + fp.Cmov(&P.addYX, &Q.addYX, uint(b)) + fp.Cmov(&P.subYX, &Q.subYX, uint(b)) + fp.Cmov(&P.dt2, &Q.dt2, uint(b)) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go new file mode 100644 index 0000000000..c3505b67ac --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package ed25519 + +import cryptoEd25519 "crypto/ed25519" + +// PublicKey is the type of Ed25519 public keys. +type PublicKey cryptoEd25519.PublicKey diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go new file mode 100644 index 0000000000..d57d86eff0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go @@ -0,0 +1,7 @@ +//go:build !go1.13 +// +build !go1.13 + +package ed25519 + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go new file mode 100644 index 0000000000..e4520f5203 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go @@ -0,0 +1,87 @@ +package ed25519 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed25519" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0807 } +func (*scheme) SupportsContext() bool { return false } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 112} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil && opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + return Sign(priv, message) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil { + if opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + } + return Verify(pub, message, signature) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go new file mode 100644 index 0000000000..8763b426fc --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go @@ -0,0 +1,213 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +var tabSign = [fxV][fx2w1]pointR3{ + { + pointR3{ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { + addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72}, + subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35}, + dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44}, + }, + { + addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e}, + subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12}, + dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35}, + }, + { + addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d}, + subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a}, + dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45}, + }, + }, + { + { + addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41}, + subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d}, + dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19}, + }, + { + addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b}, + subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53}, + dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31}, + }, + { + addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33}, + subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c}, + dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e}, + }, + { + addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23}, + subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76}, + dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19}, + }, + }, +} + +var tabVerif = [1 << (omegaFix - 2)]pointR3{ + { /* 1P */ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { /* 3P */ + addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a}, + subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a}, + dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a}, + }, + { /* 5P */ + addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29}, + subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15}, + dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43}, + }, + { /* 7P */ + addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46}, + subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d}, + dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a}, + }, + { /* 9P */ + addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34}, + subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49}, + dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73}, + }, + { /* 11P */ + addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42}, + subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18}, + dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d}, + }, + { /* 13P */ + addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23}, + subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02}, + dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49}, + }, + { /* 15P */ + addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61}, + subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43}, + dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51}, + }, + { /* 17P */ + addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d}, + subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71}, + dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47}, + }, + { /* 19P */ + addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c}, + subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21}, + dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48}, + }, + { /* 21P */ + addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55}, + subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77}, + dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a}, + }, + { /* 23P */ + addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44}, + subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01}, + dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15}, + }, + { /* 25P */ + addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b}, + subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78}, + dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72}, + }, + { /* 27P */ + addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79}, + subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c}, + dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70}, + }, + { /* 29P */ + addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06}, + subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13}, + dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c}, + }, + { /* 31P */ + addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a}, + subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a}, + dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28}, + }, + { /* 33P */ + addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32}, + subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76}, + dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56}, + }, + { /* 35P */ + addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa}, + subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03}, + dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20}, + }, + { /* 37P */ + addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75}, + subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69}, + dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54}, + }, + { /* 39P */ + addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d}, + subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78}, + dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70}, + }, + { /* 41P */ + addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c}, + subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40}, + dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20}, + }, + { /* 43P */ + addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b}, + subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38}, + dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04}, + }, + { /* 45P */ + addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79}, + subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51}, + dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c}, + }, + { /* 47P */ + addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e}, + subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75}, + dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71}, + }, + { /* 49P */ + addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41}, + subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e}, + dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f}, + }, + { /* 51P */ + addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32}, + subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62}, + dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b}, + }, + { /* 53P */ + addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a}, + subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a}, + dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a}, + }, + { /* 55P */ + addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c}, + subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11}, + dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05}, + }, + { /* 57P */ + addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28}, + subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e}, + dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a}, + }, + { /* 59P */ + addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12}, + subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d}, + dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33}, + }, + { /* 61P */ + addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21}, + subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23}, + dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d}, + }, + { /* 63P */ + addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a}, + subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31}, + dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go new file mode 100644 index 0000000000..324bd8f334 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go @@ -0,0 +1,411 @@ +// Package ed448 implements Ed448 signature scheme as described in RFC-8032. +// +// This package implements two signature variants. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed448 | Sign | Verify | Yes, can be empty | +// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Both schemes require a context string for domain separation. This parameter +// is passed using a SignerOptions struct defined in this package. +// +// References: +// +// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt +// - EdDSA for more curves: https://eprint.iacr.org/2015/677 +// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1 +package ed448 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/ecc/goldilocks" + "github.com/cloudflare/circl/internal/sha3" + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the length in bytes of Ed448 public keys. + PublicKeySize = 57 + // PrivateKeySize is the length in bytes of Ed448 private keys. + PrivateKeySize = 114 + // SignatureSize is the length in bytes of signatures. + SignatureSize = 114 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 57 +) + +const ( + paramB = 456 / 8 // Size of keys in bytes. + hashSize = 2 * paramB // Size of the hash function's output. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed448 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph. + crypto.Hash + + // Context is an optional domain separation string for signing. + // Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED448 SchemeID = iota + ED448Ph +) + +// PublicKey is the type of Ed448 public keys. +type PublicKey []byte + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Sign creates a signature of a message given a key pair. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero to the specify Ed448 variant. This can +// be achieved by passing crypto.Hash(0) as the value for opts. +// Use an Options struct to pass a bool indicating that the ed448Ph variant +// should be used. +// The struct can also be optionally used to pass a context string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message, ctx), nil + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return SignPh(priv, message, ctx), nil + default: + return nil, errors.New("ed448: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make(PrivateKey, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make([]byte, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed448: bad seed length: " + strconv.Itoa(l)) + } + + var h [hashSize]byte + H := sha3.NewShake256() + _, _ = H.Write(seed) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + + copy(privateKey[:SeedSize], seed) + _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx)))) + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 57-byte private key using SHAKE256(x, 114). + var h [hashSize]byte + _, _ = H.Write(privateKey[:SeedSize]) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + prefix := h[paramB:] + + // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114). + var rPM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + _, _ = H.Read(rPM[:]) + + // 3. Compute the point [r]B. + r := &goldilocks.Scalar{} + r.FromBytes(rPM[:]) + R := (&[paramB]byte{})[:] + if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil { + panic(err) + } + // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114) + var hRAM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + // 5. Compute S = (r + k * s) mod order. + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.Mul(k, s) + S.Add(S, r) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), false) + return signature +} + +// SignPh creates a signature of a message given a keypair. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func SignPh(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), true) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + len(ctx) > ContextMaxSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + P, err := goldilocks.FromBytes(public) + if err != nil { + return false + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + var hRAM [hashSize]byte + R := signature[:paramB] + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.FromBytes(signature[paramB:]) + + encR := (&[paramB]byte{})[:] + P.Neg() + _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero, this can be achieved by passing +// crypto.Hash(0) as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature, ctx) + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return VerifyPh(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +func deriveSecretScalar(s *goldilocks.Scalar, h []byte) { + h[0] &= 0xFC // The two least significant bits of the first octet are cleared, + h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and + h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set. + s.FromBytes(h[:paramB]) +} + +// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero. +func isLessThanOrder(x []byte) bool { + order := goldilocks.Curve{}.Order() + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[paramB-1] == 0 && x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom4 := "SigEd448" + _, _ = h.Write([]byte(dom4)) + + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go new file mode 100644 index 0000000000..22da8bc0a5 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go @@ -0,0 +1,87 @@ +package ed448 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed448" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0808 } +func (*scheme) SupportsContext() bool { return true } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 113} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Sign(priv, message, ctx) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Verify(pub, message, signature, ctx) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go new file mode 100644 index 0000000000..13b20fa4b0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/sign.go @@ -0,0 +1,110 @@ +// Package sign provides unified interfaces for signature schemes. +// +// A register of schemes is available in the package +// +// github.com/cloudflare/circl/sign/schemes +package sign + +import ( + "crypto" + "encoding" + "errors" +) + +type SignatureOpts struct { + // If non-empty, includes the given context in the signature if supported + // and will cause an error during signing otherwise. + Context string +} + +// A public key is used to verify a signature set by the corresponding private +// key. +type PublicKey interface { + // Returns the signature scheme for this public key. + Scheme() Scheme + Equal(crypto.PublicKey) bool + encoding.BinaryMarshaler + crypto.PublicKey +} + +// A private key allows one to create signatures. +type PrivateKey interface { + // Returns the signature scheme for this private key. + Scheme() Scheme + Equal(crypto.PrivateKey) bool + // For compatibility with Go standard library + crypto.Signer + crypto.PrivateKey + encoding.BinaryMarshaler +} + +// A Scheme represents a specific instance of a signature scheme. +type Scheme interface { + // Name of the scheme. + Name() string + + // GenerateKey creates a new key-pair. + GenerateKey() (PublicKey, PrivateKey, error) + + // Creates a signature using the PrivateKey on the given message and + // returns the signature. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte + + // Checks whether the given signature is a valid signature set by + // the private key corresponding to the given public key on the + // given message. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool + + // Deterministically derives a keypair from a seed. If you're unsure, + // you're better off using GenerateKey(). + // + // Panics if seed is not of length SeedSize(). + DeriveKey(seed []byte) (PublicKey, PrivateKey) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPublicKey([]byte) (PublicKey, error) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error) + + // Size of binary marshalled public keys. + PublicKeySize() int + + // Size of binary marshalled public keys. + PrivateKeySize() int + + // Size of signatures. + SignatureSize() int + + // Size of seeds. + SeedSize() int + + // Returns whether contexts are supported. + SupportsContext() bool +} + +var ( + // ErrTypeMismatch is the error used if types of, for instance, private + // and public keys don't match. + ErrTypeMismatch = errors.New("types mismatch") + + // ErrSeedSize is the error used if the provided seed is of the wrong + // size. + ErrSeedSize = errors.New("wrong seed size") + + // ErrPubKeySize is the error used if the provided public key is of + // the wrong size. + ErrPubKeySize = errors.New("wrong size for public key") + + // ErrPrivKeySize is the error used if the provided private key is of + // the wrong size. + ErrPrivKeySize = errors.New("wrong size for private key") + + // ErrContextNotSupported is the error used if a context is not + // supported. + ErrContextNotSupported = errors.New("context not supported") +) diff --git a/vendor/github.com/curioswitch/go-reassign/.gitattributes b/vendor/github.com/curioswitch/go-reassign/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/curioswitch/go-reassign/.gitignore b/vendor/github.com/curioswitch/go-reassign/.gitignore new file mode 100644 index 0000000000..59fa33613b --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.gitignore @@ -0,0 +1,6 @@ +.idea +.VSCode +.envrc + +build +dist diff --git a/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/vendor/github.com/curioswitch/go-reassign/.golangci.yml new file mode 100644 index 0000000000..e3bf79ae72 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.golangci.yml @@ -0,0 +1,38 @@ +linters: + enable: + - asasalint + - bidichk + - bodyclose + - decorder + - durationcheck + - errchkjson + - errname + - errorlint + - execinquery + - exhaustive + - exportloopref + - gocritic + - goerr113 + - gofmt + - goimports + - goprintffuncname + - gosec + - importas + - misspell + - nolintlint + - nosnakecase + - prealloc + - predeclared + - promlinter + - revive + - stylecheck + - tagliatelle + - tenv + - thelper + - unconvert + - usestdlibvars +issues: + exclude-rules: + - path: magefile\.go + linters: + - deadcode diff --git a/vendor/github.com/curioswitch/go-reassign/.goreleaser.yaml b/vendor/github.com/curioswitch/go-reassign/.goreleaser.yaml new file mode 100644 index 0000000000..25f2dc0c17 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.goreleaser.yaml @@ -0,0 +1,27 @@ +builds: + - main: ./cmd + env: + - CGO_ENABLED=0 + targets: + - linux_amd64 + - linux_arm64 + - darwin_amd64 + - darwin_arm64 + - windows_amd64 + - windows_arm64 +archives: + - format_overrides: + - goos: windows + format: zip +release: + mode: append +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/curioswitch/go-reassign/LICENSE b/vendor/github.com/curioswitch/go-reassign/LICENSE new file mode 100644 index 0000000000..9f18bde002 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Choko (choko@curioswitch.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/curioswitch/go-reassign/README.md b/vendor/github.com/curioswitch/go-reassign/README.md new file mode 100644 index 0000000000..ac9c131df2 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/README.md @@ -0,0 +1,53 @@ +# reassign + +A linter that detects when reassigning a top-level variable in another package. + +## Install + +```bash +go install github.com/curioswitch/go-reassign +``` + +## Usage + +```bash +reassign ./... +``` + +Change the pattern to match against variables being reassigned. By default, only `EOF` and `Err*` variables are checked. + +```bash +reassign -pattern ".*" ./... +``` + +## Background + +Package variables are commonly used to define sentinel errors which callers can use with `errors.Is` to determine the +type of a returned `error`. Some examples exist in the standard [os](https://pkg.go.dev/os#pkg-variables) library. + +Unfortunately, as with any variable, these are mutable, and it is possible to write this very dangerous code. + +```go +package main +import "io" +func bad() { + // breaks file reading + io.EOF = nil +} +``` + +This caused a new pattern for [constant errors](https://dave.cheney.net/2016/04/07/constant-errors) +to gain popularity, but they don't work well with improvements to the `errors` package in recent versions of Go and may +be considered to be non-idiomatic compared to normal `errors.New`. If we can catch reassignment of sentinel errors, we +gain much of the safety of constant errors. + +This linter will catch reassignment of variables in other packages. By default it intends to apply to as many codebases +as possible and only checks a restricted set of variable names, `EOF` and `ErrFoo`, to restrict to sentinel errors. +Package variable reassignment is generally confusing, though, and we recommend avoiding it for all variables, not just errors. +The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is +recommended if it works with your code. + +## Limitations + +If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing +can be confusing, so it's recommended to rename the variable. diff --git a/vendor/github.com/curioswitch/go-reassign/analyzer.go b/vendor/github.com/curioswitch/go-reassign/analyzer.go new file mode 100644 index 0000000000..48707adebe --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/analyzer.go @@ -0,0 +1,13 @@ +package reassign + +import ( + "github.com/curioswitch/go-reassign/internal/analyzer" + "golang.org/x/tools/go/analysis" +) + +const FlagPattern = analyzer.FlagPattern + +// NewAnalyzer returns an analyzer for checking that package variables are not reassigned. +func NewAnalyzer() *analysis.Analyzer { + return analyzer.New() +} diff --git a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go new file mode 100644 index 0000000000..e1b47d5b95 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go @@ -0,0 +1,84 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/types" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const FlagPattern = "pattern" + +func New() *analysis.Analyzer { + a := &analysis.Analyzer{ + Name: "reassign", + Doc: "Checks that package variables are not reassigned", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, + } + a.Flags.String(FlagPattern, `^(Err.*|EOF)$`, "Pattern to match package variables against to prevent reassignment") + return a +} + +func run(pass *analysis.Pass) (interface{}, error) { + checkRE, err := regexp.Compile(pass.Analyzer.Flags.Lookup(FlagPattern).Value.String()) + if err != nil { + return nil, fmt.Errorf("invalid pattern: %w", err) + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + inspect.Preorder([]ast.Node{(*ast.AssignStmt)(nil), (*ast.UnaryExpr)(nil)}, func(node ast.Node) { + switch node := node.(type) { + case *ast.AssignStmt: + for _, lhs := range node.Lhs { + reportImported(pass, lhs, checkRE, "reassigning") + } + default: + // TODO(chokoswitch): Consider handling operations other than assignment on globals, for example + // taking their address. + } + }) + return nil, nil +} + +func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) { + switch x := expr.(type) { + case *ast.SelectorExpr: + if !checkRE.MatchString(x.Sel.Name) { + return + } + + selectIdent, ok := x.X.(*ast.Ident) + if !ok { + return + } + + if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { + if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg { + return + } + } + + pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + + case *ast.Ident: + use, ok := pass.TypesInfo.Uses[x].(*types.Var) + if !ok { + return + } + + if use.Pkg() == pass.Pkg { + return + } + + if !checkRE.MatchString(x.Name) { + return + } + + pass.Reportf(expr.Pos(), "%s variable %s from other package %s", prefix, x.Name, use.Pkg().Path()) + } +} diff --git a/vendor/github.com/daixiang0/gci/pkg/config/config.go b/vendor/github.com/daixiang0/gci/pkg/config/config.go new file mode 100644 index 0000000000..51f6ccf3b7 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/config/config.go @@ -0,0 +1,90 @@ +package config + +import ( + "sort" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/daixiang0/gci/pkg/section" +) + +var defaultOrder = map[string]int{ + section.StandardType: 0, + section.DefaultType: 1, + section.CustomType: 2, + section.BlankType: 3, + section.DotType: 4, + section.AliasType: 5, +} + +type BoolConfig struct { + NoInlineComments bool `yaml:"no-inlineComments"` + NoPrefixComments bool `yaml:"no-prefixComments"` + Debug bool `yaml:"-"` + SkipGenerated bool `yaml:"skipGenerated"` + SkipVendor bool `yaml:"skipVendor"` + CustomOrder bool `yaml:"customOrder"` +} + +type Config struct { + BoolConfig + Sections section.SectionList + SectionSeparators section.SectionList +} + +type YamlConfig struct { + Cfg BoolConfig `yaml:",inline"` + SectionStrings []string `yaml:"sections"` + SectionSeparatorStrings []string `yaml:"sectionseparators"` +} + +func (g YamlConfig) Parse() (*Config, error) { + var err error + + sections, err := section.Parse(g.SectionStrings) + if err != nil { + return nil, err + } + if sections == nil { + sections = section.DefaultSections() + } + + // if default order sorted sections + if !g.Cfg.CustomOrder { + sort.Slice(sections, func(i, j int) bool { + sectionI, sectionJ := sections[i].Type(), sections[j].Type() + + if strings.Compare(sectionI, sectionJ) == 0 { + return strings.Compare(sections[i].String(), sections[j].String()) < 0 + } + return defaultOrder[sectionI] < defaultOrder[sectionJ] + }) + } + + sectionSeparators, err := section.Parse(g.SectionSeparatorStrings) + if err != nil { + return nil, err + } + if sectionSeparators == nil { + sectionSeparators = section.DefaultSectionSeparators() + } + + return &Config{g.Cfg, sections, sectionSeparators}, nil +} + +func ParseConfig(in string) (*Config, error) { + config := YamlConfig{} + + err := yaml.Unmarshal([]byte(in), &config) + if err != nil { + return nil, err + } + + gciCfg, err := config.Parse() + if err != nil { + return nil, err + } + + return gciCfg, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/format/format.go b/vendor/github.com/daixiang0/gci/pkg/format/format.go new file mode 100644 index 0000000000..062701d2e4 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/format/format.go @@ -0,0 +1,46 @@ +package format + +import ( + "fmt" + + "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/log" + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/section" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Block struct { + Start, End int +} + +type resultMap map[string][]*Block + +func Format(data []*parse.GciImports, cfg *config.Config) (resultMap, error) { + result := make(resultMap, len(cfg.Sections)) + for _, d := range data { + // determine match specificity for every available section + var bestSection section.Section + var bestSectionSpecificity specificity.MatchSpecificity = specificity.MisMatch{} + for _, section := range cfg.Sections { + sectionSpecificity := section.MatchSpecificity(d) + if sectionSpecificity.IsMoreSpecific(specificity.MisMatch{}) && sectionSpecificity.Equal(bestSectionSpecificity) { + // specificity is identical + // return nil, section.EqualSpecificityMatchError{} + return nil, nil + } + if sectionSpecificity.IsMoreSpecific(bestSectionSpecificity) { + // better match found + bestSectionSpecificity = sectionSpecificity + bestSection = section + } + } + if bestSection == nil { + return nil, section.NoMatchingSectionForImportError{Imports: d} + } + log.L().Debug(fmt.Sprintf("Matched import %v to section %s", d, bestSection)) + result[bestSection.String()] = append(result[bestSection.String()], &Block{d.Start, d.End}) + } + + return result, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/gci.go b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go index 7efa576ca9..163e95a861 100644 --- a/vendor/github.com/daixiang0/gci/pkg/gci/gci.go +++ b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go @@ -2,382 +2,228 @@ package gci import ( "bytes" + "errors" "fmt" - "io" - "io/ioutil" + goFormat "go/format" "os" - "os/exec" - "path/filepath" - "sort" - "strings" + "sync" + + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + "golang.org/x/sync/errgroup" + + "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/format" + "github.com/daixiang0/gci/pkg/io" + "github.com/daixiang0/gci/pkg/log" + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/section" + "github.com/daixiang0/gci/pkg/utils" ) -const ( - // pkg type: standard, remote, local - standard int = iota - // 3rd-party packages - remote - local - - commentFlag = "//" -) - -var ( - importStartFlag = []byte(` -import ( -`) - importEndFlag = []byte(` -) -`) -) - -type FlagSet struct { - LocalFlag []string - DoWrite, DoDiff *bool +func LocalFlagsToSections(localFlags []string) section.SectionList { + sections := section.DefaultSections() + // Add all local arguments as ImportPrefix sections + // for _, l := range localFlags { + // sections = append(sections, section.Section{l, nil, nil}) + // } + return sections } -type pkg struct { - list map[int][]string - comment map[string]string - alias map[string]string -} - -// ParseLocalFlag takes a comma-separated list of -// package-name-prefixes (as passed to the "-local" flag), and splits -// it in to a list. This is different than strings.Split in that it -// handles the empty string and empty entries in the list. -func ParseLocalFlag(str string) []string { - return strings.FieldsFunc(str, func(c rune) bool { return c == ',' }) +func PrintFormattedFiles(paths []string, cfg config.Config) error { + return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fmt.Print(string(formattedFile)) + return nil + }) } -func newPkg(data [][]byte, localFlag []string) *pkg { - listMap := make(map[int][]string) - commentMap := make(map[string]string) - aliasMap := make(map[string]string) - p := &pkg{ - list: listMap, - comment: commentMap, - alias: aliasMap, - } - - formatData := make([]string, 0) - // remove all empty lines - for _, v := range data { - if len(v) > 0 { - formatData = append(formatData, strings.TrimSpace(string(v))) - } - } - - n := len(formatData) - for i := n - 1; i >= 0; i-- { - line := formatData[i] - - // check commentFlag: - // 1. one line commentFlag - // 2. commentFlag after import path - commentIndex := strings.Index(line, commentFlag) - if commentIndex == 0 { - // comment in the last line is useless, ignore it - if i+1 >= n { - continue - } - pkg, _, _ := getPkgInfo(formatData[i+1], strings.Index(formatData[i+1], commentFlag) >= 0) - p.comment[pkg] = line - continue - } else if commentIndex > 0 { - pkg, alias, comment := getPkgInfo(line, true) - if alias != "" { - p.alias[pkg] = alias - } - - p.comment[pkg] = comment - pkgType := getPkgType(pkg, localFlag) - p.list[pkgType] = append(p.list[pkgType], pkg) - continue +func WriteFormattedFiles(paths []string, cfg config.Config) error { + return processGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + if bytes.Equal(unmodifiedFile, formattedFile) { + log.L().Debug(fmt.Sprintf("Skipping correctly formatted File: %s", filePath)) + return nil } - - pkg, alias, _ := getPkgInfo(line, false) - - if alias != "" { - p.alias[pkg] = alias - } - - pkgType := getPkgType(pkg, localFlag) - p.list[pkgType] = append(p.list[pkgType], pkg) - } - - return p + log.L().Info(fmt.Sprintf("Writing formatted File: %s", filePath)) + return os.WriteFile(filePath, formattedFile, 0o644) + }) } -// fmt format import pkgs as expected -func (p *pkg) fmt() []byte { - ret := make([]string, 0, 100) - - for pkgType := range []int{standard, remote, local} { - sort.Strings(p.list[pkgType]) - for _, s := range p.list[pkgType] { - if p.comment[s] != "" { - l := fmt.Sprintf("%s%s%s%s", linebreak, indent, p.comment[s], linebreak) - ret = append(ret, l) - } - - if p.alias[s] != "" { - s = fmt.Sprintf("%s%s%s%s%s", indent, p.alias[s], blank, s, linebreak) - } else { - s = fmt.Sprintf("%s%s%s", indent, s, linebreak) - } - - ret = append(ret, s) +func ListUnFormattedFiles(paths []string, cfg config.Config) error { + return processGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + if bytes.Equal(unmodifiedFile, formattedFile) { + return nil } - - if len(p.list[pkgType]) > 0 { - ret = append(ret, linebreak) - } - } - if len(ret) > 0 && ret[len(ret)-1] == linebreak { - ret = ret[:len(ret)-1] - } - - // remove duplicate empty lines - s1 := fmt.Sprintf("%s%s%s%s", linebreak, linebreak, linebreak, indent) - s2 := fmt.Sprintf("%s%s%s", linebreak, linebreak, indent) - return []byte(strings.ReplaceAll(strings.Join(ret, ""), s1, s2)) + fmt.Println(filePath) + return nil + }) } -// getPkgInfo assume line is a import path, and return (path, alias, comment) -func getPkgInfo(line string, comment bool) (string, string, string) { - if comment { - s := strings.Split(line, commentFlag) - pkgArray := strings.Split(s[0], blank) - if len(pkgArray) > 1 { - return pkgArray[1], pkgArray[0], fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1])) - } else { - return strings.TrimSpace(pkgArray[0]), "", fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1])) - } - } else { - pkgArray := strings.Split(line, blank) - if len(pkgArray) > 1 { - return pkgArray[1], pkgArray[0], "" - } else { - return pkgArray[0], "", "" - } - } +func DiffFormattedFiles(paths []string, cfg config.Config) error { + return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + fmt.Printf("%v", unifiedEdits) + return nil + }) +} + +func DiffFormattedFilesToArray(paths []string, cfg config.Config, diffs *[]string, lock *sync.Mutex) error { + log.InitLogger() + defer log.L().Sync() + return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + lock.Lock() + *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) + lock.Unlock() + return nil + }) } -func getPkgType(line string, localFlag []string) int { - pkgName := strings.Trim(line, "\"\\`") - - for _, localPkg := range localFlag { - if strings.HasPrefix(pkgName, localPkg) { - return local - } - } - - if isStandardPackage(pkgName) { - return standard - } +type fileFormattingFunc func(filePath string, unmodifiedFile, formattedFile []byte) error - return remote +func processStdInAndGoFilesInPaths(paths []string, cfg config.Config, fileFunc fileFormattingFunc) error { + return ProcessFiles(io.StdInGenerator.Combine(io.GoFilesInPathsGenerator(paths, cfg.SkipVendor)), cfg, fileFunc) } -const ( - blank = " " - indent = "\t" - linebreak = "\n" -) - -func diff(b1, b2 []byte, filename string) (data []byte, err error) { - f1, err := writeTempFile("", "gci", b1) - if err != nil { - return - } - defer os.Remove(f1) - - f2, err := writeTempFile("", "gci", b2) - if err != nil { - return - } - defer os.Remove(f2) - - cmd := "diff" - - data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - return replaceTempFilename(data, filename) - } - return +func processGoFilesInPaths(paths []string, cfg config.Config, fileFunc fileFormattingFunc) error { + return ProcessFiles(io.GoFilesInPathsGenerator(paths, cfg.SkipVendor), cfg, fileFunc) } -func writeTempFile(dir, prefix string, data []byte) (string, error) { - file, err := ioutil.TempFile(dir, prefix) +func ProcessFiles(fileGenerator io.FileGeneratorFunc, cfg config.Config, fileFunc fileFormattingFunc) error { + var taskGroup errgroup.Group + files, err := fileGenerator() if err != nil { - return "", err - } - _, err = file.Write(data) - if err1 := file.Close(); err == nil { - err = err1 - } - if err != nil { - os.Remove(file.Name()) - return "", err - } - return file.Name(), nil -} - -// replaceTempFilename replaces temporary filenames in diff with actual one. -// -// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 -// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 -// ... -// -> -// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 -// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 -// ... -func replaceTempFilename(diff []byte, filename string) ([]byte, error) { - bs := bytes.SplitN(diff, []byte{'\n'}, 3) - if len(bs) < 3 { - return nil, fmt.Errorf("got unexpected diff for %s", filename) - } - // Preserve timestamps. - var t0, t1 []byte - if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { - t0 = bs[0][i:] + return err } - if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { - t1 = bs[1][i:] + for _, file := range files { + // run file processing in parallel + taskGroup.Go(processingFunc(file, cfg, fileFunc)) } - // Always print filepath with slash separator. - f := filepath.ToSlash(filename) - bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) - bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) - return bytes.Join(bs, []byte{'\n'}), nil + return taskGroup.Wait() } -func visitFile(set *FlagSet) filepath.WalkFunc { - return func(path string, f os.FileInfo, err error) error { - if err == nil && isGoFile(f) { - err = processFile(path, os.Stdout, set) +func processingFunc(file io.FileObj, cfg config.Config, formattingFunc fileFormattingFunc) func() error { + return func() error { + unmodifiedFile, formattedFile, err := LoadFormatGoFile(file, cfg) + if err != nil { + // if errors.Is(err, FileParsingError{}) { + // // do not process files that are improperly formatted + // return nil + // } + return err } - return err + return formattingFunc(file.Path(), unmodifiedFile, formattedFile) } } -func WalkDir(path string, set *FlagSet) error { - return filepath.Walk(path, visitFile(set)) -} - -func isGoFile(f os.FileInfo) bool { - // ignore non-Go files - name := f.Name() - return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") -} +func LoadFormatGoFile(file io.FileObj, cfg config.Config) (src, dist []byte, err error) { + src, err = file.Load() + log.L().Debug(fmt.Sprintf("Loaded File: %s", file.Path())) + if err != nil { + return nil, nil, err + } -func ProcessFile(filename string, out io.Writer, set *FlagSet) error { - return processFile(filename, out, set) + return LoadFormat(src, file.Path(), cfg) } -func processFile(filename string, out io.Writer, set *FlagSet) error { - var err error +func LoadFormat(in []byte, path string, cfg config.Config) (src, dist []byte, err error) { + src = in - f, err := os.Open(filename) - if err != nil { - return err + if cfg.SkipGenerated && parse.IsGeneratedFileByComment(string(src)) { + return src, src, nil } - defer f.Close() - src, err := ioutil.ReadAll(f) + imports, headEnd, tailStart, cStart, cEnd, err := parse.ParseFile(src, path) if err != nil { - return err + if errors.Is(err, parse.NoImportError{}) { + return src, src, nil + } + return nil, nil, err } - ori := make([]byte, len(src)) - copy(ori, src) - start := bytes.Index(src, importStartFlag) - // in case no importStartFlag or importStartFlag exist in the commentFlag - if start < 0 { - fmt.Printf("skip file %s since no import\n", filename) - return nil + // do not do format if only one import + if len(imports) <= 1 { + return src, src, nil } - end := bytes.Index(src[start:], importEndFlag) + start - ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak)) + result, err := format.Format(imports, &cfg) + if err != nil { + return nil, nil, err + } - p := newPkg(ret, set.LocalFlag) + firstWithIndex := true - res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...) + var body []byte - if !bytes.Equal(ori, res) { - if *set.DoWrite { - // On Windows, we need to re-set the permissions from the file. See golang/go#38225. - var perms os.FileMode - if fi, err := os.Stat(filename); err == nil { - perms = fi.Mode() & os.ModePerm + // order by section list + for _, s := range cfg.Sections { + if len(result[s.String()]) > 0 { + if len(body) > 0 { + body = append(body, utils.Linebreak) } - err = ioutil.WriteFile(filename, res, perms) - if err != nil { - return err + for _, d := range result[s.String()] { + AddIndent(&body, &firstWithIndex) + body = append(body, src[d.Start:d.End]...) } } - if *set.DoDiff { - data, err := diff(ori, res, filename) - if err != nil { - return fmt.Errorf("failed to diff: %v", err) - } - fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) - if _, err := out.Write(data); err != nil { - return fmt.Errorf("failed to write: %v", err) - } - } - } - if !*set.DoWrite && !*set.DoDiff { - if _, err = out.Write(res); err != nil { - return fmt.Errorf("failed to write: %v", err) - } } - return err -} - -// Run return source and result in []byte if succeed -func Run(filename string, set *FlagSet) ([]byte, []byte, error) { - var err error + head := make([]byte, headEnd) + copy(head, src[:headEnd]) + tail := make([]byte, len(src)-tailStart) + copy(tail, src[tailStart:]) - f, err := os.Open(filename) - if err != nil { - return nil, nil, err + // ensure C + if cStart != 0 { + head = append(head, src[cStart:cEnd]...) + head = append(head, utils.Linebreak) } - defer f.Close() - src, err := ioutil.ReadAll(f) - if err != nil { - return nil, nil, err + // add beginning of import block + head = append(head, `import (`...) + head = append(head, utils.Linebreak) + // add end of import block + body = append(body, []byte{utils.RightParenthesis, utils.Linebreak}...) + + log.L().Debug(fmt.Sprintf("head:\n%s", head)) + log.L().Debug(fmt.Sprintf("body:\n%s", body)) + if len(tail) > 20 { + log.L().Debug(fmt.Sprintf("tail:\n%s", tail[:20])) + } else { + log.L().Debug(fmt.Sprintf("tail:\n%s", tail)) } - ori := make([]byte, len(src)) - copy(ori, src) - start := bytes.Index(src, importStartFlag) - // in case no importStartFlag or importStartFlag exist in the commentFlag - if start < 0 { - return nil, nil, nil + var totalLen int + slices := [][]byte{head, body, tail} + for _, s := range slices { + totalLen += len(s) } - end := bytes.Index(src[start:], importEndFlag) + start - - // in case import flags are part of a codegen template, or otherwise "wrong" - if start+len(importStartFlag) > end { - return nil, nil, nil + dist = make([]byte, totalLen) + var i int + for _, s := range slices { + i += copy(dist[i:], s) } - ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak)) + // remove ^M(\r\n) from Win to Unix + dist = bytes.ReplaceAll(dist, []byte{utils.WinLinebreak}, []byte{utils.Linebreak}) - p := newPkg(ret, set.LocalFlag) + log.L().Debug(fmt.Sprintf("raw:\n%s", dist)) + dist, err = goFormat.Source(dist) + if err != nil { + return nil, nil, err + } - res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...) + return src, dist, nil +} - if bytes.Equal(ori, res) { - return ori, nil, nil +func AddIndent(in *[]byte, first *bool) { + if *first { + *first = false + return } - - return ori, res, nil + *in = append(*in, utils.Indent) } diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/std.go b/vendor/github.com/daixiang0/gci/pkg/gci/std.go deleted file mode 100644 index ac96b55ab1..0000000000 --- a/vendor/github.com/daixiang0/gci/pkg/gci/std.go +++ /dev/null @@ -1,161 +0,0 @@ -package gci - -// Code generated based on go1.16beta1. DO NOT EDIT. - -var standardPackages = map[string]struct{}{ - "archive/tar": {}, - "archive/zip": {}, - "bufio": {}, - "bytes": {}, - "compress/bzip2": {}, - "compress/flate": {}, - "compress/gzip": {}, - "compress/lzw": {}, - "compress/zlib": {}, - "container/heap": {}, - "container/list": {}, - "container/ring": {}, - "context": {}, - "crypto": {}, - "crypto/aes": {}, - "crypto/cipher": {}, - "crypto/des": {}, - "crypto/dsa": {}, - "crypto/ecdsa": {}, - "crypto/ed25519": {}, - "crypto/elliptic": {}, - "crypto/hmac": {}, - "crypto/md5": {}, - "crypto/rand": {}, - "crypto/rc4": {}, - "crypto/rsa": {}, - "crypto/sha1": {}, - "crypto/sha256": {}, - "crypto/sha512": {}, - "crypto/subtle": {}, - "crypto/tls": {}, - "crypto/x509": {}, - "crypto/x509/pkix": {}, - "database/sql": {}, - "database/sql/driver": {}, - "debug/dwarf": {}, - "debug/elf": {}, - "debug/gosym": {}, - "debug/macho": {}, - "debug/pe": {}, - "debug/plan9obj": {}, - "embed": {}, - "encoding": {}, - "encoding/ascii85": {}, - "encoding/asn1": {}, - "encoding/base32": {}, - "encoding/base64": {}, - "encoding/binary": {}, - "encoding/csv": {}, - "encoding/gob": {}, - "encoding/hex": {}, - "encoding/json": {}, - "encoding/pem": {}, - "encoding/xml": {}, - "errors": {}, - "expvar": {}, - "flag": {}, - "fmt": {}, - "go/ast": {}, - "go/build": {}, - "go/constant": {}, - "go/doc": {}, - "go/format": {}, - "go/importer": {}, - "go/parser": {}, - "go/printer": {}, - "go/scanner": {}, - "go/token": {}, - "go/types": {}, - "hash": {}, - "hash/adler32": {}, - "hash/crc32": {}, - "hash/crc64": {}, - "hash/fnv": {}, - "hash/maphash": {}, - "html": {}, - "html/template": {}, - "image": {}, - "image/color": {}, - "image/color/palette": {}, - "image/draw": {}, - "image/gif": {}, - "image/jpeg": {}, - "image/png": {}, - "index/suffixarray": {}, - "io": {}, - "io/fs": {}, - "io/ioutil": {}, - "log": {}, - "log/syslog": {}, - "math": {}, - "math/big": {}, - "math/bits": {}, - "math/cmplx": {}, - "math/rand": {}, - "mime": {}, - "mime/multipart": {}, - "mime/quotedprintable": {}, - "net": {}, - "net/http": {}, - "net/http/cgi": {}, - "net/http/cookiejar": {}, - "net/http/fcgi": {}, - "net/http/httptest": {}, - "net/http/httptrace": {}, - "net/http/httputil": {}, - "net/http/pprof": {}, - "net/mail": {}, - "net/rpc": {}, - "net/rpc/jsonrpc": {}, - "net/smtp": {}, - "net/textproto": {}, - "net/url": {}, - "os": {}, - "os/exec": {}, - "os/signal": {}, - "os/user": {}, - "path": {}, - "path/filepath": {}, - "plugin": {}, - "reflect": {}, - "regexp": {}, - "regexp/syntax": {}, - "runtime": {}, - "runtime/cgo": {}, - "runtime/debug": {}, - "runtime/metrics": {}, - "runtime/pprof": {}, - "runtime/race": {}, - "runtime/trace": {}, - "sort": {}, - "strconv": {}, - "strings": {}, - "sync": {}, - "sync/atomic": {}, - "syscall": {}, - "testing": {}, - "testing/fstest": {}, - "testing/iotest": {}, - "testing/quick": {}, - "text/scanner": {}, - "text/tabwriter": {}, - "text/template": {}, - "text/template/parse": {}, - "time": {}, - "time/tzdata": {}, - "unicode": {}, - "unicode/utf16": {}, - "unicode/utf8": {}, - "unsafe": {}, -} - -func isStandardPackage(pkg string) bool { - _, ok := standardPackages[pkg] - return ok -} diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/testdata.go b/vendor/github.com/daixiang0/gci/pkg/gci/testdata.go new file mode 100644 index 0000000000..4f60e88069 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/gci/testdata.go @@ -0,0 +1,1298 @@ +package gci + +type Cases struct { + name, config, in, out string +} + +var commonConfig = `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +` + +var testCases = []Cases{ + { + "already-good", + + commonConfig, + + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "blank-format", + + commonConfig, + + `package main +import ( + "fmt" + + // comment + g "github.com/golang" // comment + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + // comment + g "github.com/golang" // comment + + "github.com/daixiang0/gci" +) +`, + }, + { + "cgo-block", + + commonConfig, + + `package main + +import ( + /* + #include "types.h" + */ + "C" +) +`, + `package main + +import ( + /* + #include "types.h" + */ + "C" +) +`, + }, + { + "cgo-block-after-import", + + commonConfig, + + `package main + +import ( + "fmt" + + "github.com/daixiang0/gci" + g "github.com/golang" +) + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" +`, + `package main + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "cgo-block-before-import", + + commonConfig, + + `package main + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" + +import ( + "fmt" + + "github.com/daixiang0/gci" + + g "github.com/golang" +) +`, + `package main + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "cgo-block-mixed", + + commonConfig, + + `package main + +import ( + /* #include "types.h" + */"C" +) +`, + `package main + +import ( + /* #include "types.h" + */"C" +) +`, + }, + { + "cgo-block-mixed-with-content", + + commonConfig, + + `package main + +import ( + /* #include "types.h" + #include "other.h" */"C" +) +`, + `package main + +import ( + /* #include "types.h" + #include "other.h" */"C" +) +`, + }, + { + "cgo-block-prefix", + + commonConfig, + + `package main + +import ( + /* #include "types.h" */ "C" +) +`, + `package main + +import ( + /* #include "types.h" */ "C" +) +`, + }, + { + "cgo-block-single-line", + + commonConfig, + + `package main + +import ( + /* #include "types.h" */ + "C" +) +`, + `package main + +import ( + /* #include "types.h" */ + "C" +) +`, + }, + { + "cgo-line", + + commonConfig, + + `package main + +import ( + // #include "types.h" + "C" +) +`, + `package main + +import ( + // #include "types.h" + "C" +) +`, + }, + { + "cgo-multiline", + + commonConfig, + + `package main + +import ( + // #include "types.h" + // #include "other.h" + "C" +) +`, + `package main + +import ( + // #include "types.h" + // #include "other.h" + "C" +) +`, + }, + { + "cgo-single", + + commonConfig, + + `package main + +import ( + "fmt" + + "github.com/daixiang0/gci" +) + +import "C" + +import "github.com/golang" + +import ( + "github.com/daixiang0/gci" +) +`, + `package main + +import "C" + +import ( + "fmt" + + "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "comment", + + commonConfig, + + `package main +import ( + //Do not forget to run Gci + "fmt" +) +`, + `package main +import ( + //Do not forget to run Gci + "fmt" +) +`, + }, + { + "comment-before-import", + + commonConfig, + + `package main + +// comment +import ( + "fmt" + "os" + + "github.com/daixiang0/gci" +) +`, + `package main + +// comment +import ( + "fmt" + "os" + + "github.com/daixiang0/gci" +) +`, + }, + { + "comment-in-the-tail", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) + +type test int + +// test +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) + +type test int + +// test +`, + }, + { + "comment-top", + + commonConfig, + + `package main + +import ( + "os" // https://pkg.go.dev/os + // https://pkg.go.dev/fmt + "fmt" +) +`, + `package main + +import ( + // https://pkg.go.dev/fmt + "fmt" + "os" // https://pkg.go.dev/os +) +`, + }, + { + "comment-whithout-whitespace", + + commonConfig, + + `package proc + +import ( + "context"// no separating whitespace here //nolint:confusion +) +`, + `package proc + +import ( + "context"// no separating whitespace here //nolint:confusion +) +`, + }, + { + "comment-with-slashslash", + + commonConfig, + + `package main + +import ( + "fmt" // https://pkg.go.dev/fmt +) +`, + `package main + +import ( + "fmt" // https://pkg.go.dev/fmt +) +`, + }, + { + "custom-order", + + `customOrder: true +sections: + - Prefix(github.com/daixiang0) + - Default + - Standard +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" +) +`, + `package main + +import ( + "github.com/daixiang0/a" + + g "github.com/golang" + + "fmt" +) +`, + }, + { + "default-order", + + `sections: + - Standard + - Prefix(github.com/daixiang0) + - Default +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" +) +`, + }, + { + "dot-and-blank", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) + - Blank + - Dot +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + . "github.com/golang/dot" + _ "github.com/golang/blank" + + "github.com/daixiang0/a" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + . "github.com/daixiang0/gci/dot" + _ "github.com/daixiang0/gci/blank" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + + _ "github.com/daixiang0/gci/blank" + _ "github.com/golang/blank" + + . "github.com/daixiang0/gci/dot" + . "github.com/golang/dot" +) +`, + }, + { + "duplicate-imports", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + a "github.com/daixiang0/gci" + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + a "github.com/daixiang0/gci" +) +`, + }, + { + "grouped-multiple-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0,gitlab.com/daixiang0,daixiang0) +`, + `package main + +import ( + "daixiang0/lib1" + "fmt" + "github.com/daixiang0/gci" + "gitlab.com/daixiang0/gci" + g "github.com/golang" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "daixiang0/lib1" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + "gitlab.com/daixiang0/gci" +) +`, + }, + { + "leading-comment", + + commonConfig, + + `package main + +import ( + // foo + "fmt" +) +`, + `package main + +import ( + // foo + "fmt" +) +`, + }, + { + "linebreak", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + g "github.com/golang" + + "fmt" + + "github.com/daixiang0/gci" + +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "linebreak-no-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + g "github.com/golang" + + "fmt" + +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" +) +`, + }, + { + "mismatch-section", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) + - Prefix(github.com/daixiang0/gci) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "multiple-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "multiple-imports", + + commonConfig, + + `package main + +import "fmt" + +import "context" + +import ( + "os" + + "github.com/daixiang0/test" +) + +import "math" + + +// main +func main() { +} +`, + `package main + +import ( + "context" + "fmt" + "math" + "os" + + "github.com/daixiang0/test" +) + +// main +func main() { +} +`, + }, + { + "multiple-line-comment", + + commonConfig, + + `package proc + +import ( + "context" // in-line comment + "fmt" + "os" + + //nolint:depguard // A multi-line comment explaining why in + // this one case it's OK to use os/exec even though depguard + // is configured to force us to use dlib/exec instead. + "os/exec" + + "golang.org/x/sys/unix" + "github.com/local/dlib/dexec" +) +`, + `package proc + +import ( + "context" // in-line comment + "fmt" + "os" + //nolint:depguard // A multi-line comment explaining why in + // this one case it's OK to use os/exec even though depguard + // is configured to force us to use dlib/exec instead. + "os/exec" + + "github.com/local/dlib/dexec" + "golang.org/x/sys/unix" +) +`, + }, + { + "nochar-after-import", + + commonConfig, + + `package main + +import ( + "fmt" +) +`, + `package main + +import ( + "fmt" +) +`, + }, + { + "no-format", + + commonConfig, + + `package main + +import( +"fmt" + +g "github.com/golang" + +"github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "nolint", + + commonConfig, + + `package main + +import ( + "fmt" + + "github.com/forbidden/pkg" //nolint:depguard + + _ "github.com/daixiang0/gci" //nolint:depguard +) +`, + `package main + +import ( + "fmt" + + "github.com/forbidden/pkg" //nolint:depguard + + _ "github.com/daixiang0/gci" //nolint:depguard +) +`, + }, + { + "number-in-alias", + + commonConfig, + + `package main + +import ( + "fmt" + + go_V1 "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + go_V1 "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "one-import", + + commonConfig, + + `package main +import ( + "fmt" +) + +func main() { +} +`, + `package main +import ( + "fmt" +) + +func main() { +} +`, + }, + { + "one-import-one-line", + + commonConfig, + + `package main + +import "fmt" + +func main() { +} +`, + `package main + +import "fmt" + +func main() { +} +`, + }, + { + "one-line-import-after-import", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + "fmt" + "os" + + "github.com/daixiang0/test" +) + +import "context" +`, + `package main + +import ( + "context" + "fmt" + "os" + + "github.com/daixiang0/test" +) +`, + }, + { + "same-prefix-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "simple-case", + + commonConfig, + + `package main + +import ( + "golang.org/x/tools" + + "fmt" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + "golang.org/x/tools" + + "github.com/daixiang0/gci" +) +`, + }, + { + "whitespace-test", + + commonConfig, + + `package main + +import ( + "fmt" + "github.com/golang" // golang + alias "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + "github.com/golang" // golang + + alias "github.com/daixiang0/gci" +) +`, + }, + { + "with-above-comment-and-alias", + + commonConfig, + + `package main + +import ( + "fmt" + // golang + _ "github.com/golang" + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + // golang + _ "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "with-comment-and-alias", + + commonConfig, + + `package main + +import ( + "fmt" + _ "github.com/golang" // golang + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + _ "github.com/golang" // golang + + "github.com/daixiang0/gci" +) +`, + }, + { + "same-prefix-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "same-prefix-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "blank-in-config", + + `sections: + - Standard + - Default + - Prefix( github.com/daixiang0/gci, github.com/daixiang0/gci/subtest ) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "alias", + + `sections: + - Standard + - Default + - Alias +`, + `package main + +import ( + testing "github.com/daixiang0/test" + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + + testing "github.com/daixiang0/test" + g "github.com/golang" +) +`, + }, + { + "no-trailing-newline", + + `sections: + - Standard +`, + `package main + +import ( + "net" + "fmt" +)`, + `package main + +import ( + "fmt" + "net" +) +`, + }, +} diff --git a/vendor/github.com/daixiang0/gci/pkg/io/file.go b/vendor/github.com/daixiang0/gci/pkg/io/file.go new file mode 100644 index 0000000000..79950792ca --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/io/file.go @@ -0,0 +1,64 @@ +package io + +import "io/ioutil" + +// FileObj allows mocking the access to files +type FileObj interface { + Load() ([]byte, error) + Path() string +} + +// File represents a file that can be loaded from the file system +type File struct { + FilePath string +} + +func (f File) Path() string { + return f.FilePath +} + +func (f File) Load() ([]byte, error) { + return ioutil.ReadFile(f.FilePath) +} + +// FileGeneratorFunc returns a list of files that can be loaded and processed +type FileGeneratorFunc func() ([]FileObj, error) + +func (a FileGeneratorFunc) Combine(b FileGeneratorFunc) FileGeneratorFunc { + return func() ([]FileObj, error) { + files, err := a() + if err != nil { + return nil, err + } + additionalFiles, err := b() + if err != nil { + return nil, err + } + files = append(files, additionalFiles...) + return files, err + } +} + +func GoFilesInPathsGenerator(paths []string, skipVendor bool) FileGeneratorFunc { + checkFunc := isGoFile + if skipVendor { + checkFunc = checkChains(isGoFile, isOutsideVendorDir) + } + + return FilesInPathsGenerator(paths, checkFunc) +} + +func FilesInPathsGenerator(paths []string, fileCheckFun fileCheckFunction) FileGeneratorFunc { + return func() (foundFiles []FileObj, err error) { + for _, path := range paths { + files, err := FindFilesForPath(path, fileCheckFun) + if err != nil { + return nil, err + } + for _, filePath := range files { + foundFiles = append(foundFiles, File{filePath}) + } + } + return foundFiles, nil + } +} diff --git a/vendor/github.com/daixiang0/gci/pkg/io/search.go b/vendor/github.com/daixiang0/gci/pkg/io/search.go new file mode 100644 index 0000000000..cd821582e7 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/io/search.go @@ -0,0 +1,77 @@ +package io + +import ( + "io/fs" + "os" + "path/filepath" +) + +type fileCheckFunction func(path string, file os.FileInfo) bool + +func FindFilesForPath(path string, fileCheckFun fileCheckFunction) ([]string, error) { + switch entry, err := os.Stat(path); { + case err != nil: + return nil, err + case entry.IsDir(): + return findFilesForDirectory(path, fileCheckFun) + case fileCheckFun(path, entry): + return []string{filepath.Clean(path)}, nil + default: + return []string{}, nil + } +} + +func findFilesForDirectory(dirPath string, fileCheckFun fileCheckFunction) ([]string, error) { + var filePaths []string + err := filepath.WalkDir(dirPath, func(path string, entry fs.DirEntry, err error) error { + if err != nil { + return err + } + file, err := entry.Info() + if err != nil { + return err + } + if !entry.IsDir() && fileCheckFun(path, file) { + filePaths = append(filePaths, filepath.Clean(path)) + } + return nil + }) + if err != nil { + return nil, err + } + return filePaths, nil +} + +func isGoFile(_ string, file os.FileInfo) bool { + return !file.IsDir() && filepath.Ext(file.Name()) == ".go" +} + +func isOutsideVendorDir(path string, _ os.FileInfo) bool { + for { + base := filepath.Base(path) + if base == "vendor" { + return false + } + + prevPath := path + path = filepath.Dir(path) + + if prevPath == path { + break + } + } + + return true +} + +func checkChains(funcs ...fileCheckFunction) fileCheckFunction { + return func(path string, file os.FileInfo) bool { + for _, checkFunc := range funcs { + if !checkFunc(path, file) { + return false + } + } + + return true + } +} diff --git a/vendor/github.com/daixiang0/gci/pkg/io/stdin.go b/vendor/github.com/daixiang0/gci/pkg/io/stdin.go new file mode 100644 index 0000000000..ccab2844f4 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/io/stdin.go @@ -0,0 +1,27 @@ +package io + +import ( + "io/ioutil" + "os" +) + +type stdInFile struct{} + +func (s stdInFile) Load() ([]byte, error) { + return ioutil.ReadAll(os.Stdin) +} + +func (s stdInFile) Path() string { + return "StdIn" +} + +var StdInGenerator FileGeneratorFunc = func() ([]FileObj, error) { + stat, err := os.Stdin.Stat() + if err != nil { + return nil, err + } + if (stat.Mode() & os.ModeCharDevice) == 0 { + return []FileObj{stdInFile{}}, nil + } + return []FileObj{}, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/log/log.go b/vendor/github.com/daixiang0/gci/pkg/log/log.go new file mode 100644 index 0000000000..ab33739ca3 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/log/log.go @@ -0,0 +1,50 @@ +package log + +import ( + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Use L to log with Zap +var logger *zap.Logger + +// Keep the config to reference the atomicLevel for changing levels +var logConfig zap.Config + +var doOnce sync.Once + +// InitLogger sets up the logger +func InitLogger() { + doOnce.Do(func() { + logConfig = zap.NewDevelopmentConfig() + + logConfig.EncoderConfig.TimeKey = "timestamp" + logConfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + logConfig.Level.SetLevel(zapcore.InfoLevel) + logConfig.OutputPaths = []string{"stderr"} + + var err error + logger, err = logConfig.Build() + if err != nil { + panic(err) + } + }) +} + +// SetLevel allows you to set the level of the default gci logger. +// This will not work if you replace the logger +func SetLevel(level zapcore.Level) { + logConfig.Level.SetLevel(level) +} + +// L returns the logger +func L() *zap.Logger { + return logger +} + +// SetLogger allows you to set the logger to whatever you want +func SetLogger(l *zap.Logger) { + logger = l +} diff --git a/vendor/github.com/daixiang0/gci/pkg/parse/parse.go b/vendor/github.com/daixiang0/gci/pkg/parse/parse.go new file mode 100644 index 0000000000..e8532f850d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/parse/parse.go @@ -0,0 +1,200 @@ +package parse + +import ( + "go/ast" + "go/parser" + "go/token" + "sort" + "strings" +) + +const C = "\"C\"" + +type GciImports struct { + // original index of import group, include doc, name, path and comment + Start, End int + Name, Path string +} +type ImportList []*GciImports + +func (l ImportList) Len() int { + return len(l) +} + +func (l ImportList) Less(i, j int) bool { + if strings.Compare(l[i].Path, l[j].Path) == 0 { + return strings.Compare(l[i].Name, l[j].Name) < 0 + } + + return strings.Compare(l[i].Path, l[j].Path) < 0 +} + +func (l ImportList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +/* + * AST considers a import block as below: + * ``` + * Doc + * Name Path Comment + * ``` + * An example is like below: + * ``` + * // test + * test "fmt" // test + * ``` + * getImports return a import block with name, start and end index + */ +func getImports(imp *ast.ImportSpec) (start, end int, name string) { + if imp.Doc != nil { + // doc poc need minus one to get the first index of comment + start = int(imp.Doc.Pos()) - 1 + } else { + if imp.Name != nil { + // name pos need minus one too + start = int(imp.Name.Pos()) - 1 + } else { + // path pos start without quote, need minus one for it + start = int(imp.Path.Pos()) - 1 + } + } + + if imp.Name != nil { + name = imp.Name.Name + } + + if imp.Comment != nil { + end = int(imp.Comment.End()) + } else { + end = int(imp.Path.End()) + } + return +} + +func ParseFile(src []byte, filename string) (ImportList, int, int, int, int, error) { + fileSet := token.NewFileSet() + f, err := parser.ParseFile(fileSet, filename, src, parser.ParseComments) + if err != nil { + return nil, 0, 0, 0, 0, err + } + + if len(f.Imports) == 0 { + return nil, 0, 0, 0, 0, NoImportError{} + } + + var ( + // headEnd means the start of import block + headEnd int + // tailStart means the end + 1 of import block + tailStart int + // cStart means the start of C import block + cStart int + // cEnd means the end of C import block + cEnd int + data ImportList + ) + + for index, decl := range f.Decls { + switch decl.(type) { + // skip BadDecl and FuncDecl + case *ast.GenDecl: + genDecl := decl.(*ast.GenDecl) + + if genDecl.Tok == token.IMPORT { + // there are two cases, both end with linebreak: + // 1. + // import ( + // "xxxx" + // ) + // 2. + // import "xxx" + if headEnd == 0 { + headEnd = int(decl.Pos()) - 1 + } + tailStart = int(decl.End()) + if tailStart > len(src) { + tailStart = len(src) + } + + for _, spec := range genDecl.Specs { + imp := spec.(*ast.ImportSpec) + // there are only one C import block + // ensure C import block is the first import block + if imp.Path.Value == C { + /* + common case: + + // #include + import "C" + + notice that decl.Pos() == genDecl.Pos() > genDecl.Doc.Pos() + */ + if genDecl.Doc != nil { + cStart = int(genDecl.Doc.Pos()) - 1 + // if C import block is the first, update headEnd + if index == 0 { + headEnd = cStart + } + } else { + /* + special case: + + import "C" + */ + cStart = int(decl.Pos()) - 1 + } + + cEnd = int(decl.End()) + + continue + } + + start, end, name := getImports(imp) + + data = append(data, &GciImports{ + Start: start, + End: end, + Name: name, + Path: strings.Trim(imp.Path.Value, `"`), + }) + } + } + } + } + + sort.Sort(data) + return data, headEnd, tailStart, cStart, cEnd, nil +} + +// IsGeneratedFileByComment reports whether the source file is generated code. +// Using a bit laxer rules than https://golang.org/s/generatedcode to +// match more generated code. +// Taken from https://github.com/golangci/golangci-lint. +func IsGeneratedFileByComment(in string) bool { + const ( + genCodeGenerated = "code generated" + genDoNotEdit = "do not edit" + genAutoFile = "autogenerated file" // easyjson + genAutoGenerated = "automatically generated" // genny + ) + + markers := []string{genCodeGenerated, genDoNotEdit, genAutoFile, genAutoGenerated} + in = strings.ToLower(in) + for _, marker := range markers { + if strings.Contains(in, marker) { + return true + } + } + + return false +} + +type NoImportError struct{} + +func (n NoImportError) Error() string { + return "No imports" +} + +func (i NoImportError) Is(err error) bool { + _, ok := err.(NoImportError) + return ok +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/alias.go b/vendor/github.com/daixiang0/gci/pkg/section/alias.go new file mode 100644 index 0000000000..423e96acf0 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/alias.go @@ -0,0 +1,25 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Alias struct{} + +const AliasType = "alias" + +func (b Alias) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if spec.Name != "." && spec.Name != "_" && spec.Name != "" { + return specificity.NameMatch{} + } + return specificity.MisMatch{} +} + +func (b Alias) String() string { + return AliasType +} + +func (b Alias) Type() string { + return AliasType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/blank.go b/vendor/github.com/daixiang0/gci/pkg/section/blank.go new file mode 100644 index 0000000000..4a2741773d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/blank.go @@ -0,0 +1,25 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Blank struct{} + +const BlankType = "blank" + +func (b Blank) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if spec.Name == "_" { + return specificity.NameMatch{} + } + return specificity.MisMatch{} +} + +func (b Blank) String() string { + return BlankType +} + +func (b Blank) Type() string { + return BlankType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/commentline.go b/vendor/github.com/daixiang0/gci/pkg/section/commentline.go new file mode 100644 index 0000000000..c3ddd08249 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/commentline.go @@ -0,0 +1,24 @@ +package section + +import ( + "fmt" + + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type CommentLine struct { + Comment string +} + +func (c CommentLine) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + return specificity.MisMatch{} +} + +func (c CommentLine) String() string { + return fmt.Sprintf("commentline(%s)", c.Comment) +} + +func (c CommentLine) Type() string { + return "commentline" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/default.go b/vendor/github.com/daixiang0/gci/pkg/section/default.go new file mode 100644 index 0000000000..3af07a0927 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/default.go @@ -0,0 +1,22 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const DefaultType = "default" + +type Default struct{} + +func (d Default) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + return specificity.Default{} +} + +func (d Default) String() string { + return DefaultType +} + +func (d Default) Type() string { + return DefaultType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/dot.go b/vendor/github.com/daixiang0/gci/pkg/section/dot.go new file mode 100644 index 0000000000..8112eeb1dc --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/dot.go @@ -0,0 +1,25 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Dot struct{} + +const DotType = "dot" + +func (d Dot) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if spec.Name == "." { + return specificity.NameMatch{} + } + return specificity.MisMatch{} +} + +func (d Dot) String() string { + return DotType +} + +func (d Dot) Type() string { + return DotType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/errors.go b/vendor/github.com/daixiang0/gci/pkg/section/errors.go new file mode 100644 index 0000000000..0a12091356 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/errors.go @@ -0,0 +1,107 @@ +package section + +import ( + "errors" + "fmt" + + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/utils" +) + +type SectionParsingError struct { + error +} + +func (s SectionParsingError) Unwrap() error { + return s.error +} + +func (s SectionParsingError) Wrap(sectionStr string) error { + return fmt.Errorf("failed to parse section %q: %w", sectionStr, s) +} + +func (s SectionParsingError) Is(err error) bool { + _, ok := err.(SectionParsingError) + return ok +} + +var MissingParameterClosingBracketsError = fmt.Errorf("section parameter is missing closing %q", utils.RightParenthesis) + +var MoreThanOneOpeningQuotesError = fmt.Errorf("found more than one %q parameter start sequences", utils.RightParenthesis) + +var SectionTypeDoesNotAcceptParametersError = errors.New("section type does not accept a parameter") + +var SectionTypeDoesNotAcceptPrefixError = errors.New("section may not contain a Prefix") + +var SectionTypeDoesNotAcceptSuffixError = errors.New("section may not contain a Suffix") + +type EqualSpecificityMatchError struct { + Imports *parse.GciImports + SectionA, SectionB Section +} + +func (e EqualSpecificityMatchError) Error() string { + return fmt.Sprintf("Import %v matched section %s and %s equally", e.Imports, e.SectionA, e.SectionB) +} + +func (e EqualSpecificityMatchError) Is(err error) bool { + _, ok := err.(EqualSpecificityMatchError) + return ok +} + +type NoMatchingSectionForImportError struct { + Imports *parse.GciImports +} + +func (n NoMatchingSectionForImportError) Error() string { + return fmt.Sprintf("No section found for Import: %v", n.Imports) +} + +func (n NoMatchingSectionForImportError) Is(err error) bool { + _, ok := err.(NoMatchingSectionForImportError) + return ok +} + +type InvalidImportSplitError struct { + segments []string +} + +func (i InvalidImportSplitError) Error() string { + return fmt.Sprintf("separating the inline comment from the import yielded an invalid number of segments: %v", i.segments) +} + +func (i InvalidImportSplitError) Is(err error) bool { + _, ok := err.(InvalidImportSplitError) + return ok +} + +type InvalidAliasSplitError struct { + segments []string +} + +func (i InvalidAliasSplitError) Error() string { + return fmt.Sprintf("separating the alias from the path yielded an invalid number of segments: %v", i.segments) +} + +func (i InvalidAliasSplitError) Is(err error) bool { + _, ok := err.(InvalidAliasSplitError) + return ok +} + +var ( + MissingImportStatementError = FileParsingError{errors.New("no import statement present in File")} + ImportStatementNotClosedError = FileParsingError{errors.New("import statement not closed")} +) + +type FileParsingError struct { + error +} + +func (f FileParsingError) Unwrap() error { + return f.error +} + +func (f FileParsingError) Is(err error) bool { + _, ok := err.(FileParsingError) + return ok +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/newline.go b/vendor/github.com/daixiang0/gci/pkg/section/newline.go new file mode 100644 index 0000000000..4bff91b9d4 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/newline.go @@ -0,0 +1,22 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const newLineName = "newline" + +type NewLine struct{} + +func (n NewLine) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + return specificity.MisMatch{} +} + +func (n NewLine) String() string { + return newLineName +} + +func (n NewLine) Type() string { + return newLineName +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/parser.go b/vendor/github.com/daixiang0/gci/pkg/section/parser.go new file mode 100644 index 0000000000..38435f540e --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/parser.go @@ -0,0 +1,46 @@ +package section + +import ( + "errors" + "fmt" + "strings" +) + +func Parse(data []string) (SectionList, error) { + if len(data) == 0 { + return nil, nil + } + + var list SectionList + var errString string + for _, d := range data { + s := strings.ToLower(d) + if len(s) == 0 { + return nil, nil + } + + if s == "default" { + list = append(list, Default{}) + } else if s == "standard" { + list = append(list, Standard{}) + } else if s == "newline" { + list = append(list, NewLine{}) + } else if strings.HasPrefix(s, "prefix(") && len(d) > 8 { + list = append(list, Custom{d[7 : len(d)-1]}) + } else if strings.HasPrefix(s, "commentline(") && len(d) > 13 { + list = append(list, Custom{d[12 : len(d)-1]}) + } else if s == "dot" { + list = append(list, Dot{}) + } else if s == "blank" { + list = append(list, Blank{}) + } else if s == "alias" { + list = append(list, Alias{}) + } else { + errString += fmt.Sprintf(" %s", s) + } + } + if errString != "" { + return nil, errors.New(fmt.Sprintf("invalid params:%s", errString)) + } + return list, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/prefix.go b/vendor/github.com/daixiang0/gci/pkg/section/prefix.go new file mode 100644 index 0000000000..30bdd8f4ea --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/prefix.go @@ -0,0 +1,38 @@ +package section + +import ( + "fmt" + "strings" + + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Custom struct { + Prefix string +} + +// CustomSeparator allows you to group multiple custom prefix together in the same section +// gci diff -s standard -s default -s prefix(github.com/company,gitlab.com/company,companysuffix) +const CustomSeparator = "," + +const CustomType = "custom" + +func (c Custom) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + for _, prefix := range strings.Split(c.Prefix, CustomSeparator) { + prefix = strings.TrimSpace(prefix) + if strings.HasPrefix(spec.Path, prefix) { + return specificity.Match{Length: len(prefix)} + } + } + + return specificity.MisMatch{} +} + +func (c Custom) String() string { + return fmt.Sprintf("prefix(%s)", c.Prefix) +} + +func (c Custom) Type() string { + return CustomType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/section.go b/vendor/github.com/daixiang0/gci/pkg/section/section.go new file mode 100644 index 0000000000..cc0a43f2f1 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/section.go @@ -0,0 +1,36 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +// Section defines a part of the formatted output. +type Section interface { + // MatchSpecificity returns how well an Import matches to this Section + MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity + + // String Implements the stringer interface + String() string + + // return section type + Type() string +} + +type SectionList []Section + +func (list SectionList) String() []string { + var output []string + for _, section := range list { + output = append(output, section.String()) + } + return output +} + +func DefaultSections() SectionList { + return SectionList{Standard{}, Default{}} +} + +func DefaultSectionSeparators() SectionList { + return SectionList{NewLine{}} +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/standard.go b/vendor/github.com/daixiang0/gci/pkg/section/standard.go new file mode 100644 index 0000000000..26c7e9dc7d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/standard.go @@ -0,0 +1,30 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const StandardType = "standard" + +type Standard struct{} + +func (s Standard) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if isStandard(spec.Path) { + return specificity.StandardMatch{} + } + return specificity.MisMatch{} +} + +func (s Standard) String() string { + return StandardType +} + +func (s Standard) Type() string { + return StandardType +} + +func isStandard(pkg string) bool { + _, ok := standardPackages[pkg] + return ok +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/standard_list.go b/vendor/github.com/daixiang0/gci/pkg/section/standard_list.go new file mode 100644 index 0000000000..551bba4285 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/standard_list.go @@ -0,0 +1,170 @@ +package section + +// Code generated based on go1.22.0 X:arenas. DO NOT EDIT. + +var standardPackages = map[string]struct{}{ + "archive/tar": {}, + "archive/zip": {}, + "arena": {}, + "bufio": {}, + "bytes": {}, + "cmp": {}, + "compress/bzip2": {}, + "compress/flate": {}, + "compress/gzip": {}, + "compress/lzw": {}, + "compress/zlib": {}, + "container/heap": {}, + "container/list": {}, + "container/ring": {}, + "context": {}, + "crypto": {}, + "crypto/aes": {}, + "crypto/cipher": {}, + "crypto/des": {}, + "crypto/dsa": {}, + "crypto/ecdh": {}, + "crypto/ecdsa": {}, + "crypto/ed25519": {}, + "crypto/elliptic": {}, + "crypto/hmac": {}, + "crypto/md5": {}, + "crypto/rand": {}, + "crypto/rc4": {}, + "crypto/rsa": {}, + "crypto/sha1": {}, + "crypto/sha256": {}, + "crypto/sha512": {}, + "crypto/subtle": {}, + "crypto/tls": {}, + "crypto/x509": {}, + "crypto/x509/pkix": {}, + "database/sql": {}, + "database/sql/driver": {}, + "debug/buildinfo": {}, + "debug/dwarf": {}, + "debug/elf": {}, + "debug/gosym": {}, + "debug/macho": {}, + "debug/pe": {}, + "debug/plan9obj": {}, + "embed": {}, + "encoding": {}, + "encoding/ascii85": {}, + "encoding/asn1": {}, + "encoding/base32": {}, + "encoding/base64": {}, + "encoding/binary": {}, + "encoding/csv": {}, + "encoding/gob": {}, + "encoding/hex": {}, + "encoding/json": {}, + "encoding/pem": {}, + "encoding/xml": {}, + "errors": {}, + "expvar": {}, + "flag": {}, + "fmt": {}, + "go/ast": {}, + "go/build": {}, + "go/build/constraint": {}, + "go/constant": {}, + "go/doc": {}, + "go/doc/comment": {}, + "go/format": {}, + "go/importer": {}, + "go/parser": {}, + "go/printer": {}, + "go/scanner": {}, + "go/token": {}, + "go/types": {}, + "go/version": {}, + "hash": {}, + "hash/adler32": {}, + "hash/crc32": {}, + "hash/crc64": {}, + "hash/fnv": {}, + "hash/maphash": {}, + "html": {}, + "html/template": {}, + "image": {}, + "image/color": {}, + "image/color/palette": {}, + "image/draw": {}, + "image/gif": {}, + "image/jpeg": {}, + "image/png": {}, + "index/suffixarray": {}, + "io": {}, + "io/fs": {}, + "io/ioutil": {}, + "log": {}, + "log/slog": {}, + "log/syslog": {}, + "maps": {}, + "math": {}, + "math/big": {}, + "math/bits": {}, + "math/cmplx": {}, + "math/rand": {}, + "math/rand/v2": {}, + "mime": {}, + "mime/multipart": {}, + "mime/quotedprintable": {}, + "net": {}, + "net/http": {}, + "net/http/cgi": {}, + "net/http/cookiejar": {}, + "net/http/fcgi": {}, + "net/http/httptest": {}, + "net/http/httptrace": {}, + "net/http/httputil": {}, + "net/http/pprof": {}, + "net/mail": {}, + "net/netip": {}, + "net/rpc": {}, + "net/rpc/jsonrpc": {}, + "net/smtp": {}, + "net/textproto": {}, + "net/url": {}, + "os": {}, + "os/exec": {}, + "os/signal": {}, + "os/user": {}, + "path": {}, + "path/filepath": {}, + "plugin": {}, + "reflect": {}, + "regexp": {}, + "regexp/syntax": {}, + "runtime": {}, + "runtime/cgo": {}, + "runtime/coverage": {}, + "runtime/debug": {}, + "runtime/metrics": {}, + "runtime/pprof": {}, + "runtime/race": {}, + "runtime/trace": {}, + "slices": {}, + "sort": {}, + "strconv": {}, + "strings": {}, + "sync": {}, + "sync/atomic": {}, + "syscall": {}, + "testing": {}, + "testing/fstest": {}, + "testing/iotest": {}, + "testing/quick": {}, + "testing/slogtest": {}, + "text/scanner": {}, + "text/tabwriter": {}, + "text/template": {}, + "text/template/parse": {}, + "time": {}, + "time/tzdata": {}, + "unicode": {}, + "unicode/utf16": {}, + "unicode/utf8": {}, + "unsafe": {}, +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/default.go b/vendor/github.com/daixiang0/gci/pkg/specificity/default.go new file mode 100644 index 0000000000..f7ae4b87bc --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/default.go @@ -0,0 +1,19 @@ +package specificity + +type Default struct{} + +func (d Default) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(d, than) +} + +func (d Default) Equal(to MatchSpecificity) bool { + return equalSpecificity(d, to) +} + +func (d Default) class() specificityClass { + return DefaultClass +} + +func (d Default) String() string { + return "Default" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/match.go b/vendor/github.com/daixiang0/gci/pkg/specificity/match.go new file mode 100644 index 0000000000..f08d2b66bb --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/match.go @@ -0,0 +1,24 @@ +package specificity + +import "fmt" + +type Match struct { + Length int +} + +func (m Match) IsMoreSpecific(than MatchSpecificity) bool { + otherMatch, isMatch := than.(Match) + return isMoreSpecific(m, than) || (isMatch && m.Length > otherMatch.Length) +} + +func (m Match) Equal(to MatchSpecificity) bool { + return equalSpecificity(m, to) +} + +func (m Match) class() specificityClass { + return MatchClass +} + +func (m Match) String() string { + return fmt.Sprintf("Match(length: %d)", m.Length) +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/mismatch.go b/vendor/github.com/daixiang0/gci/pkg/specificity/mismatch.go new file mode 100644 index 0000000000..8e87111461 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/mismatch.go @@ -0,0 +1,19 @@ +package specificity + +type MisMatch struct{} + +func (m MisMatch) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(m, than) +} + +func (m MisMatch) Equal(to MatchSpecificity) bool { + return equalSpecificity(m, to) +} + +func (m MisMatch) class() specificityClass { + return MisMatchClass +} + +func (m MisMatch) String() string { + return "Mismatch" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/name.go b/vendor/github.com/daixiang0/gci/pkg/specificity/name.go new file mode 100644 index 0000000000..1900a0ac5d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/name.go @@ -0,0 +1,19 @@ +package specificity + +type NameMatch struct{} + +func (n NameMatch) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(n, than) +} + +func (n NameMatch) Equal(to MatchSpecificity) bool { + return equalSpecificity(n, to) +} + +func (n NameMatch) class() specificityClass { + return NameClass +} + +func (n NameMatch) String() string { + return "Name" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/specificity.go b/vendor/github.com/daixiang0/gci/pkg/specificity/specificity.go new file mode 100644 index 0000000000..842da18579 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/specificity.go @@ -0,0 +1,27 @@ +package specificity + +type specificityClass int + +const ( + MisMatchClass = 0 + DefaultClass = 10 + StandardClass = 20 + MatchClass = 30 + NameClass = 40 +) + +// MatchSpecificity is used to determine which section matches an import best +type MatchSpecificity interface { + IsMoreSpecific(than MatchSpecificity) bool + Equal(to MatchSpecificity) bool + class() specificityClass +} + +func isMoreSpecific(this, than MatchSpecificity) bool { + return this.class() > than.class() +} + +func equalSpecificity(base, to MatchSpecificity) bool { + // m.class() == to.class() would not work for Match + return !base.IsMoreSpecific(to) && !to.IsMoreSpecific(base) +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/standard.go b/vendor/github.com/daixiang0/gci/pkg/specificity/standard.go new file mode 100644 index 0000000000..72ccaf7e1e --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/standard.go @@ -0,0 +1,19 @@ +package specificity + +type StandardMatch struct{} + +func (s StandardMatch) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(s, than) +} + +func (s StandardMatch) Equal(to MatchSpecificity) bool { + return equalSpecificity(s, to) +} + +func (s StandardMatch) class() specificityClass { + return StandardClass +} + +func (s StandardMatch) String() string { + return "Standard" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/utils/constants.go b/vendor/github.com/daixiang0/gci/pkg/utils/constants.go new file mode 100644 index 0000000000..2fafbc32cc --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/utils/constants.go @@ -0,0 +1,12 @@ +package utils + +const ( + Indent = '\t' + Linebreak = '\n' + WinLinebreak = '\r' + + Colon = ":" + + LeftParenthesis = '(' + RightParenthesis = ')' +) diff --git a/vendor/github.com/denis-tingajkin/go-header/.gitignore b/vendor/github.com/denis-tingaikin/go-header/.gitignore similarity index 100% rename from vendor/github.com/denis-tingajkin/go-header/.gitignore rename to vendor/github.com/denis-tingaikin/go-header/.gitignore diff --git a/vendor/github.com/denis-tingajkin/go-header/.go-header.yml b/vendor/github.com/denis-tingaikin/go-header/.go-header.yml similarity index 89% rename from vendor/github.com/denis-tingajkin/go-header/.go-header.yml rename to vendor/github.com/denis-tingaikin/go-header/.go-header.yml index 446d7317ea..3aa6d060db 100644 --- a/vendor/github.com/denis-tingajkin/go-header/.go-header.yml +++ b/vendor/github.com/denis-tingaikin/go-header/.go-header.yml @@ -1,6 +1,6 @@ values: regexp: - copyright-holder: Copyright \(c\) {{year-range}} Denis Tingajkin + copyright-holder: Copyright \(c\) {{mod-year-range}} Denis Tingaikin template: | {{copyright-holder}} diff --git a/vendor/github.com/denis-tingajkin/go-header/LICENSE b/vendor/github.com/denis-tingaikin/go-header/LICENSE similarity index 100% rename from vendor/github.com/denis-tingajkin/go-header/LICENSE rename to vendor/github.com/denis-tingaikin/go-header/LICENSE diff --git a/vendor/github.com/denis-tingaikin/go-header/README.md b/vendor/github.com/denis-tingaikin/go-header/README.md new file mode 100644 index 0000000000..fcddad1fa1 --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/README.md @@ -0,0 +1,83 @@ +# go-header +[![ci](https://github.com/denis-tingaikin/go-header/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/denis-tingaikin/go-header/actions/workflows/ci.yml) + +Go source code linter providing checks for license headers. + +## Installation + +For installation you can simply use `go get`. + +```bash +go install github.com/denis-tingaikin/go-header/cmd/go-header +``` + +## Configuration + +To configuring `.go-header.yml` linter you simply need to fill the next fields: + +```yaml +--- +template: # expects header template string. +template-path: # expects path to file with license header string. +values: # expects `const` or `regexp` node with values where values is a map string to string. + const: + key1: value1 # const value just checks equality. Note `key1` should be used in template string as {{ key1 }} or {{ KEY1 }}. + regexp: + key2: value2 # regexp value just checks regex match. The value should be a valid regexp pattern. Note `key2` should be used in template string as {{ key2 }} or {{ KEY2 }}. +``` + +Where `values` also can be used recursively. Example: + +```yaml +values: + const: + key1: "value" + regexp: + key2: "{{key1}} value1" # Reads as regex pattern "value value1" +``` + +## Bult-in values + +- **MOD-YEAR** - Returns the year when the file was modified. +- **MOD-YEAR-RANGE** - Returns a year-range where the range starts from the year when the file was modified. +- **YEAR** - Expects current year. Example header value: `2020`. Example of template using: `{{YEAR}}` or `{{year}}`. +- **YEAR-RANGE** - Expects any valid year interval or current year. Example header value: `2020` or `2000-2020`. Example of template using: `{{year-range}}` or `{{YEAR-RANGE}}`. + +## Execution + +`go-header` linter expects file paths on input. If you want to run `go-header` only on diff files, then you can use this command: + +```bash +go-header $(git diff --name-only | grep -E '.*\.go') +``` + +## Setup example + +### Step 1 + +Create configuration file `.go-header.yml` in the root of project. + +```yaml +--- +values: + const: + MY COMPANY: mycompany.com +template: | + {{ MY COMPANY }} + SPDX-License-Identifier: Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` + +### Step 2 +You are ready! Execute `go-header ${PATH_TO_FILES}` from the root of the project. diff --git a/vendor/github.com/denis-tingaikin/go-header/analyzer.go b/vendor/github.com/denis-tingaikin/go-header/analyzer.go new file mode 100644 index 0000000000..c6b361f01d --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/analyzer.go @@ -0,0 +1,256 @@ +// Copyright (c) 2020-2024 Denis Tingaikin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import ( + "fmt" + "go/ast" + "os" + "os/exec" + "strings" + "time" +) + +type Target struct { + Path string + File *ast.File +} + +const iso = "2006-01-02 15:04:05 -0700" + +func (t *Target) ModTime() (time.Time, error) { + diff, err := exec.Command("git", "diff", t.Path).CombinedOutput() + if err == nil && len(diff) == 0 { + line, err := exec.Command("git", "log", "-1", "--pretty=format:%cd", "--date=iso", "--", t.Path).CombinedOutput() + if err == nil { + return time.Parse(iso, string(line)) + } + } + info, err := os.Stat(t.Path) + if err != nil { + return time.Time{}, err + } + return info.ModTime(), nil +} + +type Analyzer struct { + values map[string]Value + template string +} + +func (a *Analyzer) processPerTargetValues(target *Target) error { + a.values["mod-year"] = a.values["year"] + a.values["mod-year-range"] = a.values["year-range"] + if t, err := target.ModTime(); err == nil { + a.values["mod-year"] = &ConstValue{RawValue: fmt.Sprint(t.Year())} + a.values["mod-year-range"] = &RegexpValue{RawValue: `((20\d\d\-{{mod-year}})|({{mod-year}}))`} + } + + for _, v := range a.values { + if err := v.Calculate(a.values); err != nil { + return err + } + } + return nil +} + +func (a *Analyzer) Analyze(target *Target) (i Issue) { + if a.template == "" { + return NewIssue("Missed template for check") + } + + if err := a.processPerTargetValues(target); err != nil { + return &issue{msg: err.Error()} + } + + file := target.File + var header string + var offset = Location{ + Position: 1, + } + if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package { + if strings.HasPrefix(file.Comments[0].List[0].Text, "/*") { + header = (&ast.CommentGroup{List: []*ast.Comment{file.Comments[0].List[0]}}).Text() + } else { + header = file.Comments[0].Text() + offset.Position += 3 + } + } + defer func() { + if i == nil { + return + } + fix, ok := a.generateFix(i, file, header) + if !ok { + return + } + i = NewIssueWithFix(i.Message(), i.Location(), fix) + }() + header = strings.TrimSpace(header) + if header == "" { + return NewIssue("Missed header for check") + } + s := NewReader(header) + s.SetOffset(offset) + t := NewReader(a.template) + for !s.Done() && !t.Done() { + templateCh := t.Peek() + if templateCh == '{' { + name := a.readField(t) + if a.values[name] == nil { + return NewIssue(fmt.Sprintf("Template has unknown value: %v", name)) + } + if i := a.values[name].Read(s); i != nil { + return i + } + continue + } + sourceCh := s.Peek() + if sourceCh != templateCh { + l := s.Location() + notNextLine := func(r rune) bool { + return r != '\n' + } + actual := s.ReadWhile(notNextLine) + expected := t.ReadWhile(notNextLine) + return NewIssueWithLocation(fmt.Sprintf("Actual: %v\nExpected:%v", actual, expected), l) + } + s.Next() + t.Next() + } + if !s.Done() { + l := s.Location() + return NewIssueWithLocation(fmt.Sprintf("Unexpected string: %v", s.Finish()), l) + } + if !t.Done() { + l := s.Location() + return NewIssueWithLocation(fmt.Sprintf("Missed string: %v", t.Finish()), l) + } + return nil +} + +func (a *Analyzer) readField(reader *Reader) string { + _ = reader.Next() + _ = reader.Next() + + r := reader.ReadWhile(func(r rune) bool { + return r != '}' + }) + + _ = reader.Next() + _ = reader.Next() + + return strings.ToLower(strings.TrimSpace(r)) +} + +func New(options ...Option) *Analyzer { + a := &Analyzer{values: make(map[string]Value)} + for _, o := range options { + o.apply(a) + } + return a +} + +func (a *Analyzer) generateFix(i Issue, file *ast.File, header string) (Fix, bool) { + var expect string + t := NewReader(a.template) + for !t.Done() { + ch := t.Peek() + if ch == '{' { + f := a.values[a.readField(t)] + if f == nil { + return Fix{}, false + } + if f.Calculate(a.values) != nil { + return Fix{}, false + } + expect += f.Get() + continue + } + + expect += string(ch) + t.Next() + } + + fix := Fix{Expected: strings.Split(expect, "\n")} + if !(len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package) { + for i := range fix.Expected { + fix.Expected[i] = "// " + fix.Expected[i] + } + return fix, true + } + + actual := file.Comments[0].List[0].Text + if !strings.HasPrefix(actual, "/*") { + for i := range fix.Expected { + fix.Expected[i] = "// " + fix.Expected[i] + } + for _, c := range file.Comments[0].List { + fix.Actual = append(fix.Actual, c.Text) + } + i = NewIssueWithFix(i.Message(), i.Location(), fix) + return fix, true + } + + gets := func(i int, end bool) string { + if i < 0 { + return header + } + if end { + return header[i+1:] + } + return header[:i] + } + start := strings.Index(actual, gets(strings.IndexByte(header, '\n'), false)) + if start < 0 { + return Fix{}, false // Should be impossible + } + nl := strings.LastIndexByte(actual[:start], '\n') + if nl >= 0 { + fix.Actual = strings.Split(actual[:nl], "\n") + fix.Expected = append(fix.Actual, fix.Expected...) + actual = actual[nl+1:] + start -= nl + 1 + } + + prefix := actual[:start] + if nl < 0 { + fix.Expected[0] = prefix + fix.Expected[0] + } else { + n := len(fix.Actual) + for i := range fix.Expected[n:] { + fix.Expected[n+i] = prefix + fix.Expected[n+i] + } + } + + last := gets(strings.LastIndexByte(header, '\n'), true) + end := strings.Index(actual, last) + if end < 0 { + return Fix{}, false // Should be impossible + } + + trailing := actual[end+len(last):] + if i := strings.IndexRune(trailing, '\n'); i < 0 { + fix.Expected[len(fix.Expected)-1] += trailing + } else { + fix.Expected[len(fix.Expected)-1] += trailing[:i] + fix.Expected = append(fix.Expected, strings.Split(trailing[i+1:], "\n")...) + } + + fix.Actual = append(fix.Actual, strings.Split(actual, "\n")...) + return fix, true +} diff --git a/vendor/github.com/denis-tingaikin/go-header/config.go b/vendor/github.com/denis-tingaikin/go-header/config.go new file mode 100644 index 0000000000..c881b63acd --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/config.go @@ -0,0 +1,99 @@ +// Copyright (c) 2020-2024 Denis Tingaikin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import ( + "errors" + "fmt" + "os" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +// Configuration represents go-header linter setup parameters +type Configuration struct { + // Values is map of values. Supports two types 'const` and `regexp`. Values can be used recursively. + Values map[string]map[string]string `yaml:"values"'` + // Template is template for checking. Uses values. + Template string `yaml:"template"` + // TemplatePath path to the template file. Useful if need to load the template from a specific file. + TemplatePath string `yaml:"template-path"` +} + +func (c *Configuration) builtInValues() map[string]Value { + var result = make(map[string]Value) + year := fmt.Sprint(time.Now().Year()) + result["year-range"] = &RegexpValue{ + RawValue: `((20\d\d\-{{YEAR}})|({{YEAR}}))`, + } + result["year"] = &ConstValue{ + RawValue: year, + } + return result +} + +func (c *Configuration) GetValues() (map[string]Value, error) { + var result = c.builtInValues() + createConst := func(raw string) Value { + return &ConstValue{RawValue: raw} + } + createRegexp := func(raw string) Value { + return &RegexpValue{RawValue: raw} + } + appendValues := func(m map[string]string, create func(string) Value) { + for k, v := range m { + key := strings.ToLower(k) + result[key] = create(v) + } + } + for k, v := range c.Values { + switch k { + case "const": + appendValues(v, createConst) + case "regexp": + appendValues(v, createRegexp) + default: + return nil, fmt.Errorf("unknown value type %v", k) + } + } + return result, nil +} + +func (c *Configuration) GetTemplate() (string, error) { + if c.Template != "" { + return c.Template, nil + } + if c.TemplatePath == "" { + return "", errors.New("template has not passed") + } + if b, err := os.ReadFile(c.TemplatePath); err != nil { + return "", err + } else { + c.Template = strings.TrimSpace(string(b)) + return c.Template, nil + } +} + +func (c *Configuration) Parse(p string) error { + b, err := os.ReadFile(p) + if err != nil { + return err + } + return yaml.Unmarshal(b, c) +} diff --git a/vendor/github.com/denis-tingaikin/go-header/issue.go b/vendor/github.com/denis-tingaikin/go-header/issue.go new file mode 100644 index 0000000000..e92279793c --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/issue.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020-2024 Denis Tingaikin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +type Issue interface { + Location() Location + Message() string + Fix() *Fix +} + +type issue struct { + msg string + location Location + fix *Fix +} + +type Fix struct { + Actual []string + Expected []string +} + +func (i *issue) Location() Location { + return i.location +} + +func (i *issue) Message() string { + return i.msg +} + +func (i *issue) Fix() *Fix { + return i.fix +} + +func NewIssueWithLocation(msg string, location Location) Issue { + return &issue{ + msg: msg, + location: location, + } +} + +func NewIssueWithFix(msg string, location Location, fix Fix) Issue { + return &issue{ + msg: msg, + location: location, + fix: &fix, + } +} + +func NewIssue(msg string) Issue { + return &issue{ + msg: msg, + } +} diff --git a/vendor/github.com/denis-tingajkin/go-header/location.go b/vendor/github.com/denis-tingaikin/go-header/location.go similarity index 95% rename from vendor/github.com/denis-tingajkin/go-header/location.go rename to vendor/github.com/denis-tingaikin/go-header/location.go index ba4d1907b1..9f18394855 100644 --- a/vendor/github.com/denis-tingajkin/go-header/location.go +++ b/vendor/github.com/denis-tingaikin/go-header/location.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/denis-tingaikin/go-header/option.go b/vendor/github.com/denis-tingaikin/go-header/option.go new file mode 100644 index 0000000000..a9689e811e --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/option.go @@ -0,0 +1,44 @@ +// Copyright (c) 2020-2022 Denis Tingaikin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import "strings" + +type Option interface { + apply(*Analyzer) +} + +type applyAnalyzerOptionFunc func(*Analyzer) + +func (f applyAnalyzerOptionFunc) apply(a *Analyzer) { + f(a) +} + +func WithValues(values map[string]Value) Option { + return applyAnalyzerOptionFunc(func(a *Analyzer) { + a.values = make(map[string]Value) + for k, v := range values { + a.values[strings.ToLower(k)] = v + } + }) +} + +func WithTemplate(template string) Option { + return applyAnalyzerOptionFunc(func(a *Analyzer) { + a.template = template + }) +} diff --git a/vendor/github.com/denis-tingaikin/go-header/reader.go b/vendor/github.com/denis-tingaikin/go-header/reader.go new file mode 100644 index 0000000000..9c9e88a177 --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/reader.go @@ -0,0 +1,116 @@ +/* +Copyright (c) 2020-2022 Denis Tingaikin + +SPDX-License-Identifier: Apache-2.0 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package goheader + +func NewReader(text string) *Reader { + return &Reader{source: text} +} + +type Reader struct { + source string + position int + location Location + offset Location +} + +func (r *Reader) SetOffset(offset Location) { + r.offset = offset +} + +func (r *Reader) Position() int { + return r.position +} + +func (r *Reader) Location() Location { + return r.location.Add(r.offset) +} + +func (r *Reader) Peek() rune { + if r.Done() { + return rune(0) + } + return rune(r.source[r.position]) +} + +func (r *Reader) Done() bool { + return r.position >= len(r.source) +} + +func (r *Reader) Next() rune { + if r.Done() { + return rune(0) + } + reuslt := r.Peek() + if reuslt == '\n' { + r.location.Line++ + r.location.Position = 0 + } else { + r.location.Position++ + } + r.position++ + return reuslt +} + +func (r *Reader) Finish() string { + if r.position >= len(r.source) { + return "" + } + defer r.till() + return r.source[r.position:] +} + +func (r *Reader) SetPosition(pos int) { + if pos < 0 { + r.position = 0 + } + r.position = pos + r.location = r.calculateLocation() +} + +func (r *Reader) ReadWhile(match func(rune) bool) string { + if match == nil { + return "" + } + start := r.position + for !r.Done() && match(r.Peek()) { + r.Next() + } + return r.source[start:r.position] +} + +func (r *Reader) till() { + r.position = len(r.source) + r.location = r.calculateLocation() +} + +func (r *Reader) calculateLocation() Location { + min := len(r.source) + if min > r.position { + min = r.position + } + x, y := 0, 0 + for i := 0; i < min; i++ { + if r.source[i] == '\n' { + y++ + x = 0 + } else { + x++ + } + } + return Location{Line: y, Position: x} +} diff --git a/vendor/github.com/denis-tingaikin/go-header/value.go b/vendor/github.com/denis-tingaikin/go-header/value.go new file mode 100644 index 0000000000..706a84f18a --- /dev/null +++ b/vendor/github.com/denis-tingaikin/go-header/value.go @@ -0,0 +1,150 @@ +// Copyright (c) 2020-2024 Denis Tingaikin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +type Calculable interface { + Calculate(map[string]Value) error + Get() string + Raw() string +} + +type Value interface { + Calculable + Read(*Reader) Issue +} + +func calculateValue(calculable Calculable, values map[string]Value) (string, error) { + sb := strings.Builder{} + r := calculable.Raw() + var endIndex int + var startIndex int + for startIndex = strings.Index(r, "{{"); startIndex >= 0; startIndex = strings.Index(r, "{{") { + _, _ = sb.WriteString(r[:startIndex]) + endIndex = strings.Index(r, "}}") + if endIndex < 0 { + return "", errors.New("missed value ending") + } + subVal := strings.ToLower(strings.TrimSpace(r[startIndex+2 : endIndex])) + if val := values[subVal]; val != nil { + if err := val.Calculate(values); err != nil { + return "", err + } + sb.WriteString(val.Get()) + } else { + return "", fmt.Errorf("unknown value name %v", subVal) + } + endIndex += 2 + r = r[endIndex:] + } + _, _ = sb.WriteString(r) + return sb.String(), nil +} + +type ConstValue struct { + RawValue, Value string +} + +func (c *ConstValue) Calculate(values map[string]Value) error { + v, err := calculateValue(c, values) + if err != nil { + return err + } + c.Value = v + return nil +} + +func (c *ConstValue) Raw() string { + return c.RawValue +} + +func (c *ConstValue) Get() string { + if c.Value != "" { + return c.Value + } + return c.RawValue +} + +func (c *ConstValue) String() string { + return c.Get() +} + +func (c *ConstValue) Read(s *Reader) Issue { + l := s.Location() + p := s.Position() + for _, ch := range c.Get() { + if ch != s.Peek() { + s.SetPosition(p) + f := s.ReadWhile(func(r rune) bool { + return r != '\n' + }) + return NewIssueWithLocation(fmt.Sprintf("Expected:%v, Actual: %v", c.Get(), f), l) + } + s.Next() + } + return nil +} + +type RegexpValue struct { + RawValue, Value string +} + +func (r *RegexpValue) Calculate(values map[string]Value) error { + v, err := calculateValue(r, values) + if err != nil { + return err + } + r.Value = v + return nil +} + +func (r *RegexpValue) Raw() string { + return r.RawValue +} +func (r *RegexpValue) Get() string { + if r.Value != "" { + return r.Value + } + return r.RawValue +} + +func (r *RegexpValue) String() string { + return r.Get() +} + +func (r *RegexpValue) Read(s *Reader) Issue { + l := s.Location() + p := regexp.MustCompile(r.Get()) + pos := s.Position() + str := s.Finish() + s.SetPosition(pos) + indexes := p.FindAllIndex([]byte(str), -1) + if len(indexes) == 0 { + return NewIssueWithLocation(fmt.Sprintf("Pattern %v doesn't match.", p.String()), l) + } + s.SetPosition(pos + indexes[0][1]) + return nil +} + +var _ Value = &ConstValue{} +var _ Value = &RegexpValue{} diff --git a/vendor/github.com/denis-tingajkin/go-header/README.md b/vendor/github.com/denis-tingajkin/go-header/README.md deleted file mode 100644 index 1a2a3d9a6d..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# go-header -[![Actions Status](https://github.com/denis-tingajkin/go-header/workflows/ci/badge.svg)](https://github.com/denis-tingajkin/go-header/actions) - -Go source code linter providing checks for license headers. - -## Installation - -For installation you can simply use `go get`. - -```bash -go get github.com/denis-tingajkin/go-header/cmd/go-header -``` - -## Configuration - -To configuring `.go-header.yml` linter you simply need to fill the next fields: - -```yaml ---- -temaplte: # expects header template string. -tempalte-path: # expects path to file with license header string. -values: # expects `const` or `regexp` node with values where values is a map string to string. - const: - key1: value1 # const value just checks equality. Note `key1` should be used in template string as {{ key1 }} or {{ KEY1 }}. - regexp: - key2: value2 # regexp value just checks regex match. The value should be a valid regexp pattern. Note `key2` should be used in template string as {{ key2 }} or {{ KEY2 }}. -``` - -Where `values` also can be used recursively. Example: - -```yaml -values: - const: - key1: "value" - regexp: - key2: "{{key1}} value1" # Reads as regex pattern "value value1" -``` - -## Bult-in values - -- **YEAR** - Expects current year. Example header value: `2020`. Example of template using: `{{YEAR}}` or `{{year}}`. -- **YEAR-RANGE** - Expects any valid year interval or current year. Example header value: `2020` or `2000-2020`. Example of template using: `{{year-range}}` or `{{YEAR-RANGE}}`. - -## Execution - -`go-header` linter expects file paths on input. If you want to run `go-header` only on diff files, then you can use this command: - -```bash -go-header $(git diff --name-only | grep -E '.*\.go') -``` - -## Setup example - -### Step 1 - -Create configuration file `.go-header.yml` in the root of project. - -```yaml ---- -values: - const: - MY COMPANY: mycompany.com -template: | - {{ MY COMPANY }} - SPDX-License-Identifier: Apache-2.0 - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at: - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` - -### Step 2 -You are ready! Execute `go-header ${PATH_TO_FILES}` from the root of the project. diff --git a/vendor/github.com/denis-tingajkin/go-header/analyzer.go b/vendor/github.com/denis-tingajkin/go-header/analyzer.go deleted file mode 100644 index 5707890b02..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/analyzer.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) 2020 Denis Tingajkin -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goheader - -import ( - "fmt" - "go/ast" - "os" - "os/exec" - "strings" - "time" -) - -type Target struct { - Path string - File *ast.File -} - -const iso = "2006-01-02 15:04:05 -0700" - -func (t *Target) ModTime() (time.Time, error) { - diff, err := exec.Command("git", "diff", t.Path).CombinedOutput() - if err == nil && len(diff) == 0 { - line, err := exec.Command("git", "log", "-1", "--pretty=format:%cd", "--date=iso", "--", t.Path).CombinedOutput() - if err == nil { - return time.Parse(iso, string(line)) - } - } - info, err := os.Stat(t.Path) - if err != nil { - return time.Time{}, err - } - return info.ModTime(), nil -} - -type Analyzer struct { - values map[string]Value - template string -} - -func (a *Analyzer) Analyze(target *Target) Issue { - if a.template == "" { - return NewIssue("Missed template for check") - } - if t, err := target.ModTime(); err == nil { - if t.Year() != time.Now().Year() { - return nil - } - } - file := target.File - var header string - var offset = Location{ - Position: 1, - } - if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package { - if strings.HasPrefix(file.Comments[0].List[0].Text, "/*") { - header = (&ast.CommentGroup{List: []*ast.Comment{file.Comments[0].List[0]}}).Text() - } else { - header = file.Comments[0].Text() - offset.Position += 3 - } - } - header = strings.TrimSpace(header) - if header == "" { - return NewIssue("Missed header for check") - } - s := NewReader(header) - s.SetOffset(offset) - t := NewReader(a.template) - for !s.Done() && !t.Done() { - templateCh := t.Peek() - if templateCh == '{' { - name := a.readField(t) - if a.values[name] == nil { - return NewIssue(fmt.Sprintf("Template has unknown value: %v", name)) - } - if i := a.values[name].Read(s); i != nil { - return i - } - continue - } - sourceCh := s.Peek() - if sourceCh != templateCh { - l := s.Location() - notNextLine := func(r rune) bool { - return r != '\n' - } - actual := s.ReadWhile(notNextLine) - expected := t.ReadWhile(notNextLine) - return NewIssueWithLocation(fmt.Sprintf("Actual: %v\nExpected:%v", actual, expected), l) - } - s.Next() - t.Next() - } - if !s.Done() { - l := s.Location() - return NewIssueWithLocation(fmt.Sprintf("Unexpected string: %v", s.Finish()), l) - } - if !t.Done() { - l := s.Location() - return NewIssueWithLocation(fmt.Sprintf("Missed string: %v", t.Finish()), l) - } - return nil -} - -func (a *Analyzer) readField(reader *Reader) string { - _ = reader.Next() - _ = reader.Next() - - r := reader.ReadWhile(func(r rune) bool { - return r != '}' - }) - - _ = reader.Next() - _ = reader.Next() - - return strings.ToLower(strings.TrimSpace(r)) -} - -func New(options ...Option) *Analyzer { - a := &Analyzer{} - for _, o := range options { - o.apply(a) - } - for _, v := range a.values { - err := v.Calculate(a.values) - if err != nil { - panic(err.Error()) - } - } - return a -} diff --git a/vendor/github.com/denis-tingajkin/go-header/config.go b/vendor/github.com/denis-tingajkin/go-header/config.go deleted file mode 100644 index fa8b23c2d8..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/config.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2020 Denis Tingajkin -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goheader - -import ( - "errors" - "fmt" - "io/ioutil" - "strings" - "time" - - "gopkg.in/yaml.v2" -) - -// Configuration represents go-header linter setup parameters -type Configuration struct { - // Values is map of values. Supports two types 'const` and `regexp`. Values can be used recursively. - Values map[string]map[string]string `yaml:"values"'` - // Template is template for checking. Uses values. - Template string `yaml:"template"` - // TemplatePath path to the template file. Useful if need to load the template from a specific file. - TemplatePath string `yaml:"template-path"` -} - -func (c *Configuration) builtInValues() map[string]Value { - var result = make(map[string]Value) - year := fmt.Sprint(time.Now().Year()) - result["year-range"] = &RegexpValue{ - RawValue: strings.ReplaceAll(`(20\d\d\-YEAR)|(YEAR)`, "YEAR", year), - } - result["year"] = &ConstValue{ - RawValue: year, - } - return result -} - -func (c *Configuration) GetValues() (map[string]Value, error) { - var result = c.builtInValues() - createConst := func(raw string) Value { - return &ConstValue{RawValue: raw} - } - createRegexp := func(raw string) Value { - return &RegexpValue{RawValue: raw} - } - appendValues := func(m map[string]string, create func(string) Value) { - for k, v := range m { - key := strings.ToLower(k) - result[key] = create(v) - } - } - for k, v := range c.Values { - switch k { - case "const": - appendValues(v, createConst) - case "regexp": - appendValues(v, createRegexp) - default: - return nil, fmt.Errorf("unknown value type %v", k) - } - } - return result, nil -} - -func (c *Configuration) GetTemplate() (string, error) { - if c.Template != "" { - return c.Template, nil - } - if c.TemplatePath == "" { - return "", errors.New("template has not passed") - } - if b, err := ioutil.ReadFile(c.TemplatePath); err != nil { - return "", err - } else { - c.Template = strings.TrimSpace(string(b)) - return c.Template, nil - } -} - -func (c *Configuration) Parse(p string) error { - b, err := ioutil.ReadFile(p) - if err != nil { - return err - } - return yaml.Unmarshal(b, c) -} diff --git a/vendor/github.com/denis-tingajkin/go-header/issue.go b/vendor/github.com/denis-tingajkin/go-header/issue.go deleted file mode 100644 index 2ff7bfd3cc..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/issue.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2020 Denis Tingajkin -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goheader - -type Issue interface { - Location() Location - Message() string -} - -type issue struct { - msg string - location Location -} - -func (i *issue) Location() Location { - return i.location -} - -func (i *issue) Message() string { - return i.msg -} - -func NewIssueWithLocation(msg string, location Location) Issue { - return &issue{ - msg: msg, - location: location, - } -} - -func NewIssue(msg string) Issue { - return &issue{ - msg: msg, - } -} diff --git a/vendor/github.com/denis-tingajkin/go-header/option.go b/vendor/github.com/denis-tingajkin/go-header/option.go deleted file mode 100644 index afbcb62e15..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/option.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2020 Denis Tingajkin -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goheader - -import "strings" - -type Option interface { - apply(*Analyzer) -} - -type applyAnalyzerOptionFunc func(*Analyzer) - -func (f applyAnalyzerOptionFunc) apply(a *Analyzer) { - f(a) -} - -func WithValues(values map[string]Value) Option { - return applyAnalyzerOptionFunc(func(a *Analyzer) { - a.values = make(map[string]Value) - for k, v := range values { - a.values[strings.ToLower(k)] = v - } - }) -} - -func WithTemplate(template string) Option { - return applyAnalyzerOptionFunc(func(a *Analyzer) { - a.template = template - }) -} diff --git a/vendor/github.com/denis-tingajkin/go-header/reader.go b/vendor/github.com/denis-tingajkin/go-header/reader.go deleted file mode 100644 index 2393c94882..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/reader.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright (c) 2020 Denis Tingajkin - -SPDX-License-Identifier: Apache-2.0 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at: - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package goheader - -func NewReader(text string) *Reader { - return &Reader{source: text} -} - -type Reader struct { - source string - position int - location Location - offset Location -} - -func (r *Reader) SetOffset(offset Location) { - r.offset = offset -} - -func (r *Reader) Position() int { - return r.position -} - -func (r *Reader) Location() Location { - return r.location.Add(r.offset) -} - -func (r *Reader) Peek() rune { - if r.Done() { - return rune(0) - } - return rune(r.source[r.position]) -} - -func (r *Reader) Done() bool { - return r.position >= len(r.source) -} - -func (r *Reader) Next() rune { - if r.Done() { - return rune(0) - } - reuslt := r.Peek() - if reuslt == '\n' { - r.location.Line++ - r.location.Position = 0 - } else { - r.location.Position++ - } - r.position++ - return reuslt -} - -func (r *Reader) Finish() string { - if r.position >= len(r.source) { - return "" - } - defer r.till() - return r.source[r.position:] -} - -func (r *Reader) SetPosition(pos int) { - if pos < 0 { - r.position = 0 - } - r.position = pos - r.location = r.calculateLocation() -} - -func (r *Reader) ReadWhile(match func(rune) bool) string { - if match == nil { - return "" - } - start := r.position - for !r.Done() && match(r.Peek()) { - r.Next() - } - return r.source[start:r.position] -} - -func (r *Reader) till() { - r.position = len(r.source) - r.location = r.calculateLocation() -} - -func (r *Reader) calculateLocation() Location { - min := len(r.source) - if min > r.position { - min = r.position - } - x, y := 0, 0 - for i := 0; i < min; i++ { - if r.source[i] == '\n' { - y++ - x = 0 - } else { - x++ - } - } - return Location{Line: y, Position: x} -} diff --git a/vendor/github.com/denis-tingajkin/go-header/value.go b/vendor/github.com/denis-tingajkin/go-header/value.go deleted file mode 100644 index 2a3adcdce2..0000000000 --- a/vendor/github.com/denis-tingajkin/go-header/value.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) 2020 Denis Tingajkin -// -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goheader - -import ( - "errors" - "fmt" - "regexp" - "strings" -) - -type Calculable interface { - Calculate(map[string]Value) error - Get() string -} - -type Value interface { - Calculable - Read(*Reader) Issue -} - -func calculateValue(calculable Calculable, values map[string]Value) (string, error) { - sb := strings.Builder{} - r := calculable.Get() - var endIndex int - var startIndex int - for startIndex = strings.Index(r, "{{"); startIndex >= 0; startIndex = strings.Index(r, "{{") { - _, _ = sb.WriteString(r[:startIndex]) - endIndex = strings.Index(r, "}}") - if endIndex < 0 { - return "", errors.New("missed value ending") - } - subVal := strings.ToLower(strings.TrimSpace(r[startIndex+2 : endIndex])) - if val := values[subVal]; val != nil { - if err := val.Calculate(values); err != nil { - return "", err - } - sb.WriteString(val.Get()) - } else { - return "", fmt.Errorf("unknown value name %v", subVal) - } - endIndex += 2 - r = r[endIndex:] - } - _, _ = sb.WriteString(r) - return sb.String(), nil -} - -type ConstValue struct { - RawValue string -} - -func (c *ConstValue) Calculate(values map[string]Value) error { - v, err := calculateValue(c, values) - if err != nil { - return err - } - c.RawValue = v - return nil -} - -func (c *ConstValue) Get() string { - return c.RawValue -} - -func (c *ConstValue) Read(s *Reader) Issue { - l := s.Location() - p := s.Position() - for _, ch := range c.Get() { - if ch != s.Peek() { - s.SetPosition(p) - f := s.ReadWhile(func(r rune) bool { - return r != '\n' - }) - return NewIssueWithLocation(fmt.Sprintf("Expected:%v, Actual: %v", c.Get(), f), l) - } - s.Next() - } - return nil -} - -type RegexpValue struct { - RawValue string -} - -func (r *RegexpValue) Calculate(values map[string]Value) error { - v, err := calculateValue(r, values) - if err != nil { - return err - } - r.RawValue = v - return nil -} - -func (r *RegexpValue) Get() string { - return r.RawValue -} - -func (r *RegexpValue) Read(s *Reader) Issue { - l := s.Location() - p := regexp.MustCompile(r.Get()) - pos := s.Position() - str := s.Finish() - s.SetPosition(pos) - indexes := p.FindAllIndex([]byte(str), -1) - if len(indexes) == 0 { - return NewIssueWithLocation(fmt.Sprintf("Pattern %v doesn't match.", p.String()), l) - } - s.SetPosition(pos + indexes[0][1]) - return nil -} - -var _ Value = &ConstValue{} -var _ Value = &RegexpValue{} diff --git a/vendor/github.com/esimonov/ifshort/LICENSE b/vendor/github.com/esimonov/ifshort/LICENSE deleted file mode 100644 index a04e339c01..0000000000 --- a/vendor/github.com/esimonov/ifshort/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Eugene Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go b/vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go deleted file mode 100644 index b2d06881d7..0000000000 --- a/vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go +++ /dev/null @@ -1,280 +0,0 @@ -package analyzer - -import ( - "go/ast" - "go/token" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" -) - -var maxDeclChars, maxDeclLines int - -const ( - maxDeclLinesUsage = `maximum length of variable declaration measured in number of lines, after which the linter won't suggest using short syntax. -Has precedence over max-decl-chars.` - maxDeclCharsUsage = `maximum length of variable declaration measured in number of characters, after which the linter won't suggest using short syntax.` -) - -func init() { - Analyzer.Flags.IntVar(&maxDeclLines, "max-decl-lines", 1, maxDeclLinesUsage) - Analyzer.Flags.IntVar(&maxDeclChars, "max-decl-chars", 30, maxDeclCharsUsage) -} - -// Analyzer is an analysis.Analyzer instance for ifshort linter. -var Analyzer = &analysis.Analyzer{ - Name: "ifshort", - Doc: "Checks that your code uses short syntax for if-statements whenever possible.", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, -} - -func run(pass *analysis.Pass) (interface{}, error) { - inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - nodeFilter := []ast.Node{ - (*ast.FuncDecl)(nil), - } - - inspector.Preorder(nodeFilter, func(node ast.Node) { - fdecl := node.(*ast.FuncDecl) - - /*if fdecl.Name.Name != "notUsed_BinaryExpressionInIndex_OK" { - return - }*/ - - if fdecl == nil || fdecl.Body == nil { - return - } - - candidates := getNamedOccurrenceMap(fdecl, pass) - - for _, stmt := range fdecl.Body.List { - candidates.checkStatement(stmt, token.NoPos) - } - - for varName := range candidates { - for marker, occ := range candidates[varName] { - // If two or more vars with the same scope marker - skip them. - if candidates.isFoundByScopeMarker(marker) { - continue - } - - pass.Reportf(occ.declarationPos, - "variable '%s' is only used in the if-statement (%s); consider using short syntax", - varName, pass.Fset.Position(occ.ifStmtPos)) - } - } - }) - return nil, nil -} - -func (nom namedOccurrenceMap) checkStatement(stmt ast.Stmt, ifPos token.Pos) { - switch v := stmt.(type) { - case *ast.AssignStmt: - for _, el := range v.Rhs { - nom.checkExpression(el, ifPos) - } - if isAssign(v.Tok) { - for _, el := range v.Lhs { - nom.checkExpression(el, ifPos) - } - } - case *ast.DeferStmt: - for _, a := range v.Call.Args { - nom.checkExpression(a, ifPos) - } - case *ast.ExprStmt: - switch v.X.(type) { - case *ast.CallExpr, *ast.UnaryExpr: - nom.checkExpression(v.X, ifPos) - } - case *ast.ForStmt: - for _, el := range v.Body.List { - nom.checkStatement(el, ifPos) - } - - if bexpr, ok := v.Cond.(*ast.BinaryExpr); ok { - nom.checkExpression(bexpr.X, ifPos) - nom.checkExpression(bexpr.Y, ifPos) - } - - nom.checkStatement(v.Post, ifPos) - case *ast.GoStmt: - for _, a := range v.Call.Args { - nom.checkExpression(a, ifPos) - } - case *ast.IfStmt: - for _, el := range v.Body.List { - nom.checkStatement(el, v.If) - } - if elseBlock, ok := v.Else.(*ast.BlockStmt); ok { - for _, el := range elseBlock.List { - nom.checkStatement(el, v.If) - } - } - - switch cond := v.Cond.(type) { - case *ast.UnaryExpr: - nom.checkExpression(cond.X, v.If) - case *ast.BinaryExpr: - nom.checkExpression(cond.X, v.If) - nom.checkExpression(cond.Y, v.If) - case *ast.CallExpr: - nom.checkExpression(cond, v.If) - } - - if init, ok := v.Init.(*ast.AssignStmt); ok { - for _, e := range init.Rhs { - nom.checkExpression(e, v.If) - } - } - case *ast.IncDecStmt: - nom.checkExpression(v.X, ifPos) - case *ast.RangeStmt: - nom.checkExpression(v.X, ifPos) - if v.Body != nil { - for _, e := range v.Body.List { - nom.checkStatement(e, ifPos) - } - } - case *ast.ReturnStmt: - for _, r := range v.Results { - nom.checkExpression(r, ifPos) - } - case *ast.SendStmt: - nom.checkExpression(v.Chan, ifPos) - nom.checkExpression(v.Value, ifPos) - case *ast.SwitchStmt: - nom.checkExpression(v.Tag, ifPos) - - for _, el := range v.Body.List { - clauses, ok := el.(*ast.CaseClause) - if !ok { - continue - } - - for _, c := range clauses.List { - switch v := c.(type) { - case *ast.BinaryExpr: - nom.checkExpression(v.X, ifPos) - nom.checkExpression(v.Y, ifPos) - case *ast.Ident: - nom.checkExpression(v, ifPos) - } - } - - for _, c := range clauses.Body { - switch v := c.(type) { - case *ast.AssignStmt: - for _, el := range v.Lhs { - nom.checkExpression(el, ifPos) - } - for _, el := range v.Rhs { - nom.checkExpression(el, ifPos) - } - case *ast.ExprStmt: - nom.checkExpression(v.X, ifPos) - } - } - } - case *ast.SelectStmt: - for _, el := range v.Body.List { - clause := el.(*ast.CommClause) - - nom.checkStatement(clause.Comm, ifPos) - - for _, c := range clause.Body { - switch v := c.(type) { - case *ast.AssignStmt: - for _, el := range v.Lhs { - nom.checkExpression(el, ifPos) - } - for _, el := range v.Rhs { - nom.checkExpression(el, ifPos) - } - case *ast.ExprStmt: - nom.checkExpression(v.X, ifPos) - } - } - } - case *ast.LabeledStmt: - nom.checkStatement(v.Stmt, ifPos) - } -} - -func (nom namedOccurrenceMap) checkExpression(candidate ast.Expr, ifPos token.Pos) { - switch v := candidate.(type) { - case *ast.BinaryExpr: - nom.checkExpression(v.X, ifPos) - nom.checkExpression(v.Y, ifPos) - case *ast.CallExpr: - for _, arg := range v.Args { - nom.checkExpression(arg, ifPos) - } - nom.checkExpression(v.Fun, ifPos) - if fun, ok := v.Fun.(*ast.SelectorExpr); ok { - nom.checkExpression(fun.X, ifPos) - } - case *ast.CompositeLit: - for _, el := range v.Elts { - switch v := el.(type) { - case *ast.Ident, *ast.CompositeLit: - nom.checkExpression(v, ifPos) - case *ast.KeyValueExpr: - nom.checkExpression(v.Key, ifPos) - nom.checkExpression(v.Value, ifPos) - case *ast.SelectorExpr: - nom.checkExpression(v.X, ifPos) - } - } - case *ast.FuncLit: - for _, el := range v.Body.List { - nom.checkStatement(el, ifPos) - } - case *ast.Ident: - if _, ok := nom[v.Name]; !ok || nom[v.Name].isEmponymousKey(ifPos) { - return - } - - scopeMarker1 := nom[v.Name].getScopeMarkerForPosition(v.Pos()) - - delete(nom[v.Name], scopeMarker1) - - for k := range nom { - for scopeMarker2 := range nom[k] { - if scopeMarker1 == scopeMarker2 { - delete(nom[k], scopeMarker2) - } - } - } - case *ast.StarExpr: - nom.checkExpression(v.X, ifPos) - case *ast.IndexExpr: - nom.checkExpression(v.X, ifPos) - switch index := v.Index.(type) { - case *ast.BinaryExpr: - nom.checkExpression(index.X, ifPos) - case *ast.Ident: - nom.checkExpression(index, ifPos) - } - case *ast.SelectorExpr: - nom.checkExpression(v.X, ifPos) - case *ast.SliceExpr: - nom.checkExpression(v.High, ifPos) - nom.checkExpression(v.Low, ifPos) - nom.checkExpression(v.X, ifPos) - case *ast.TypeAssertExpr: - nom.checkExpression(v.X, ifPos) - case *ast.UnaryExpr: - nom.checkExpression(v.X, ifPos) - } -} - -func isAssign(tok token.Token) bool { - return (tok == token.ASSIGN || - tok == token.ADD_ASSIGN || tok == token.SUB_ASSIGN || - tok == token.MUL_ASSIGN || tok == token.QUO_ASSIGN || tok == token.REM_ASSIGN || - tok == token.AND_ASSIGN || tok == token.OR_ASSIGN || tok == token.XOR_ASSIGN || tok == token.AND_NOT_ASSIGN || - tok == token.SHL_ASSIGN || tok == token.SHR_ASSIGN) -} diff --git a/vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go b/vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go deleted file mode 100644 index 0d3793a57e..0000000000 --- a/vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go +++ /dev/null @@ -1,268 +0,0 @@ -package analyzer - -import ( - "go/ast" - "go/token" - "time" - - "golang.org/x/tools/go/analysis" -) - -// occurrence is a variable occurrence. -type occurrence struct { - declarationPos token.Pos - ifStmtPos token.Pos -} - -func (occ *occurrence) isComplete() bool { - return occ.ifStmtPos != token.NoPos && occ.declarationPos != token.NoPos -} - -// scopeMarkeredOccurences is a map of scope markers to variable occurrences. -type scopeMarkeredOccurences map[int64]occurrence - -func (smo scopeMarkeredOccurences) getGreatestMarker() int64 { - var maxScopeMarker int64 - - for marker := range smo { - if marker > maxScopeMarker { - maxScopeMarker = marker - } - } - return maxScopeMarker -} - -// find scope marker of the greatest token.Pos that is smaller than provided. -func (smo scopeMarkeredOccurences) getScopeMarkerForPosition(pos token.Pos) int64 { - var m int64 - var foundPos token.Pos - - for marker, occ := range smo { - if occ.declarationPos < pos && occ.declarationPos >= foundPos { - m = marker - foundPos = occ.declarationPos - } - } - return m -} - -func (smo scopeMarkeredOccurences) isEmponymousKey(pos token.Pos) bool { - if pos == token.NoPos { - return false - } - - for _, occ := range smo { - if occ.ifStmtPos == pos { - return true - } - } - return false -} - -// namedOccurrenceMap is a map of variable names to scopeMarkeredOccurences. -type namedOccurrenceMap map[string]scopeMarkeredOccurences - -func getNamedOccurrenceMap(fdecl *ast.FuncDecl, pass *analysis.Pass) namedOccurrenceMap { - nom := namedOccurrenceMap(map[string]scopeMarkeredOccurences{}) - - if fdecl == nil || fdecl.Body == nil { - return nom - } - - for _, stmt := range fdecl.Body.List { - switch v := stmt.(type) { - case *ast.AssignStmt: - nom.addFromAssignment(pass, v) - case *ast.IfStmt: - nom.addFromCondition(v) - nom.addFromIfClause(v) - nom.addFromElseClause(v) - } - } - - candidates := namedOccurrenceMap(map[string]scopeMarkeredOccurences{}) - - for varName, markeredOccs := range nom { - for marker, occ := range markeredOccs { - if !occ.isComplete() && !nom.isFoundByScopeMarker(marker) { - continue - } - if _, ok := candidates[varName]; !ok { - candidates[varName] = scopeMarkeredOccurences{ - marker: occ, - } - } else { - candidates[varName][marker] = occ - } - } - } - return candidates -} - -func (nom namedOccurrenceMap) isFoundByScopeMarker(scopeMarker int64) bool { - var i int - - for _, markeredOccs := range nom { - for marker := range markeredOccs { - if marker == scopeMarker { - i++ - } - } - } - return i >= 2 -} - -func (nom namedOccurrenceMap) addFromAssignment(pass *analysis.Pass, assignment *ast.AssignStmt) { - if assignment.Tok != token.DEFINE { - return - } - - scopeMarker := time.Now().UnixNano() - - for i, el := range assignment.Lhs { - ident, ok := el.(*ast.Ident) - if !ok { - continue - } - - if ident.Name == "_" || ident.Obj == nil || isUnshortenableAssignment(ident.Obj.Decl) { - continue - } - - if markeredOccs, ok := nom[ident.Name]; ok { - markeredOccs[scopeMarker] = occurrence{ - declarationPos: ident.Pos(), - } - nom[ident.Name] = markeredOccs - } else { - newOcc := occurrence{} - if areFlagSettingsSatisfied(pass, assignment, i) { - newOcc.declarationPos = ident.Pos() - } - nom[ident.Name] = scopeMarkeredOccurences{scopeMarker: newOcc} - } - } -} - -func isUnshortenableAssignment(decl interface{}) bool { - assign, ok := decl.(*ast.AssignStmt) - if !ok { - return false - } - - for _, el := range assign.Rhs { - u, ok := el.(*ast.UnaryExpr) - if !ok { - continue - } - - if u.Op == token.AND { - if _, ok := u.X.(*ast.CompositeLit); ok { - return true - } - } - } - return false -} - -func areFlagSettingsSatisfied(pass *analysis.Pass, assignment *ast.AssignStmt, i int) bool { - lh := assignment.Lhs[i] - rh := assignment.Rhs[len(assignment.Rhs)-1] - - if len(assignment.Rhs) == len(assignment.Lhs) { - rh = assignment.Rhs[i] - } - - if pass.Fset.Position(rh.End()).Line-pass.Fset.Position(rh.Pos()).Line > maxDeclLines { - return false - } - if int(rh.End()-lh.Pos()) > maxDeclChars { - return false - } - return true -} - -func (nom namedOccurrenceMap) addFromCondition(stmt *ast.IfStmt) { - switch v := stmt.Cond.(type) { - case *ast.BinaryExpr: - for _, v := range [2]ast.Expr{v.X, v.Y} { - switch e := v.(type) { - case *ast.CallExpr: - nom.addFromCallExpr(stmt.If, e) - case *ast.Ident: - nom.addFromIdent(stmt.If, e) - case *ast.SelectorExpr: - nom.addFromIdent(stmt.If, e.X) - } - } - case *ast.CallExpr: - for _, a := range v.Args { - switch e := a.(type) { - case *ast.Ident: - nom.addFromIdent(stmt.If, e) - case *ast.CallExpr: - nom.addFromCallExpr(stmt.If, e) - } - } - case *ast.Ident: - nom.addFromIdent(stmt.If, v) - case *ast.UnaryExpr: - switch e := v.X.(type) { - case *ast.Ident: - nom.addFromIdent(stmt.If, e) - case *ast.SelectorExpr: - nom.addFromIdent(stmt.If, e.X) - } - } -} - -func (nom namedOccurrenceMap) addFromIfClause(stmt *ast.IfStmt) { - nom.addFromBlockStmt(stmt.Body, stmt.If) -} - -func (nom namedOccurrenceMap) addFromElseClause(stmt *ast.IfStmt) { - nom.addFromBlockStmt(stmt.Else, stmt.If) -} - -func (nom namedOccurrenceMap) addFromBlockStmt(stmt ast.Stmt, ifPos token.Pos) { - blockStmt, ok := stmt.(*ast.BlockStmt) - if !ok { - return - } - - for _, el := range blockStmt.List { - exptStmt, ok := el.(*ast.ExprStmt) - if !ok { - continue - } - - if callExpr, ok := exptStmt.X.(*ast.CallExpr); ok { - nom.addFromCallExpr(ifPos, callExpr) - } - } -} - -func (nom namedOccurrenceMap) addFromCallExpr(ifPos token.Pos, callExpr *ast.CallExpr) { - for _, arg := range callExpr.Args { - nom.addFromIdent(ifPos, arg) - } -} - -func (nom namedOccurrenceMap) addFromIdent(ifPos token.Pos, v ast.Expr) { - ident, ok := v.(*ast.Ident) - if !ok { - return - } - - if markeredOccs, ok := nom[ident.Name]; ok { - marker := nom[ident.Name].getGreatestMarker() - - occ := markeredOccs[marker] - if occ.isComplete() { - return - } - - occ.ifStmtPos = ifPos - nom[ident.Name][marker] = occ - } -} diff --git a/vendor/github.com/ettle/strcase/.golangci.yml b/vendor/github.com/ettle/strcase/.golangci.yml index 4d31fcc5b4..b7ce85d424 100644 --- a/vendor/github.com/ettle/strcase/.golangci.yml +++ b/vendor/github.com/ettle/strcase/.golangci.yml @@ -14,8 +14,6 @@ linters-settings: - ifElseChain - whyNoLint - wrapperFunc - golint: - min-confidence: 0.5 govet: check-shadowing: true lll: @@ -37,7 +35,6 @@ linters: disable-all: true enable: - bodyclose - - deadcode - depguard - dogsled - dupl @@ -47,26 +44,23 @@ linters: - gocyclo - gofmt - goimports - - golint - goprintffuncname - gosec - gosimple - govet - ineffassign - - interfacer - lll - misspell - nakedret - nolintlint + - revive - rowserrcheck - staticcheck - - structcheck - stylecheck - typecheck - unconvert - unparam - unused - - varcheck - whitespace # don't enable: diff --git a/vendor/github.com/ettle/strcase/.readme.tmpl b/vendor/github.com/ettle/strcase/.readme.tmpl index 135765c40a..4d7a894f0e 100644 --- a/vendor/github.com/ettle/strcase/.readme.tmpl +++ b/vendor/github.com/ettle/strcase/.readme.tmpl @@ -16,10 +16,10 @@ Convert strings to `snake_case`, `camelCase`, `PascalCase`, `kebab-case` and mor ## Index{{if .Consts}} * [Constants](#pkg-constants){{end}}{{if .Vars}} * [Variables](#pkg-variables){{end}}{{- range .Funcs -}}{{$name_html := html .Name}} -* [{{node_html $ .Decl false | sanitize}}](#{{$name_html}}){{- end}}{{- range .Types}}{{$tname_html := html .Name}} -* [type {{$tname_html}}](#{{$tname_html}}){{- range .Funcs}}{{$name_html := html .Name}} - * [{{node_html $ .Decl false | sanitize}}](#{{$name_html}}){{- end}}{{- range .Methods}}{{$name_html := html .Name}} - * [{{node_html $ .Decl false | sanitize}}](#{{$tname_html}}.{{$name_html}}){{- end}}{{- end}}{{- if $.Notes}}{{- range $marker, $item := $.Notes}} +* [{{node_html $ .Decl false | sanitize}}](#func-{{$name_html}}){{- end}}{{- range .Types}}{{$tname_html := html .Name}} +* [type {{$tname_html}}](#type-{{$tname_html}}){{- range .Funcs}}{{$name_html := html .Name}} + * [{{node_html $ .Decl false | sanitize}}](#func-{{$name_html}}){{- end}}{{- range .Methods}}{{$name_html := html .Name}} + * [{{node_html $ .Decl false | sanitize}}](#type-{{$tname_html}}.{{$name_html}}){{- end}}{{- end}}{{- if $.Notes}}{{- range $marker, $item := $.Notes}} * [{{noteTitle $marker | html}}s](#pkg-note-{{$marker}}){{end}}{{end}} {{if $.Examples}} #### Examples{{- range $.Examples}} diff --git a/vendor/github.com/ettle/strcase/Makefile b/vendor/github.com/ettle/strcase/Makefile index 462f8b473a..ac98b4aa54 100644 --- a/vendor/github.com/ettle/strcase/Makefile +++ b/vendor/github.com/ettle/strcase/Makefile @@ -1,16 +1,19 @@ .PHONY: benchmark docs lint test docs: - which godoc2ghmd || ( go get github.com/DevotedHealth/godoc2ghmd && go mod tidy ) + which godoc2ghmd || go get github.com/DevotedHealth/godoc2ghmd godoc2ghmd -template .readme.tmpl github.com/ettle/strcase > README.md + go mod tidy test: go test -cover ./... lint: - which golangci-lint || ( go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.27.0 && go mod tidy ) + which golangci-lint || go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1 golangci-lint run golangci-lint run benchmark/*.go + go mod tidy benchmark: - cd benchmark && go test -bench=. -test.benchmem && go mod tidy + cd benchmark && go test -bench=. -test.benchmem + go mod tidy diff --git a/vendor/github.com/ettle/strcase/README.md b/vendor/github.com/ettle/strcase/README.md index ee165e3e5e..a984da80da 100644 --- a/vendor/github.com/ettle/strcase/README.md +++ b/vendor/github.com/ettle/strcase/README.md @@ -32,21 +32,24 @@ Example usage strcase.ToSnake("FOOBar") // foo_bar // Support Go initialisms - strcase.ToGoCamel("http_response") // HTTPResponse + strcase.ToGoPascal("http_response") // HTTPResponse // Specify case and delimiter strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD -### Why this package +## Why this package + String strcase is pretty straight forward and there are a number of methods to do it. This package is fully featured, more customizable, better tested, and -faster* than other packages and what you would probably whip up yourself. +faster than other packages and what you would probably whip up yourself. ### Unicode support + We work for with unicode strings and pay very little performance penalty for it as we optimized for the common use case of ASCII only strings. ### Customization + You can create a custom caser that changes the behavior to what you want. This customization also reduces the pressure for us to change the default behavior which means that things are more stable for everyone involved. The goal is to @@ -71,19 +74,22 @@ make the common path easy and fast, while making the uncommon path possible. assert.Equal(t, "http_200", c.ToSnake("http200")) ### Initialism support + By default, we use the golint intialisms list. You can customize and override the initialisms if you wish to add additional ones, such as "SSL" or "CMS" or domain specific ones to your industry. - ToGoCamel("http_response") // HTTPResponse + ToGoPascal("http_response") // HTTPResponse ToGoSnake("http_response") // HTTP_response ### Test coverage + We have a wide ranging test suite to make sure that we understand our behavior. Test coverage isn't everything, but we aim for 100% coverage. ### Fast + Optimized to reduce memory allocations with Builder. Benchmarked and optimized around common cases. @@ -96,56 +102,57 @@ Hopefully I was fair to each library and happy to rerun benchmarks differently or reword my commentary based on suggestions or updates. - // This package - // Go intialisms and custom casers are slower - BenchmarkToTitle-4 992491 1559 ns/op 32 B/op 1 allocs/op - BenchmarkToSnake-4 1000000 1475 ns/op 32 B/op 1 allocs/op - BenchmarkToSNAKE-4 1000000 1609 ns/op 32 B/op 1 allocs/op - BenchmarkToGoSnake-4 275010 3697 ns/op 44 B/op 4 allocs/op - BenchmarkToCustomCaser-4 342704 4191 ns/op 56 B/op 4 allocs/op + // This package - faster then almost all libraries + // Initialisms are more complicated and slightly slower, but still fast + BenchmarkToTitle-96 9617142 125.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSnake-96 10659919 120.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSNAKE-96 9018282 126.4 ns/op 16 B/op 1 allocs/op + BenchmarkToGoSnake-96 4903687 254.5 ns/op 26 B/op 4 allocs/op + BenchmarkToCustomCaser-96 4434489 265.0 ns/op 28 B/op 4 allocs/op // Segment has very fast snake case and camel case libraries // No features or customization, but very very fast - BenchmarkSegment-4 1303809 938 ns/op 16 B/op 1 allocs/op + BenchmarkSegment-96 33625734 35.54 ns/op 16 B/op 1 allocs/op - // Stdlib strings.Title for comparison, even though it only splits on spaces - BenchmarkToTitleStrings-4 1213467 1164 ns/op 16 B/op 1 allocs/op + // Iancoleman has gotten some performance improvements, but remains + // without unicode support and lacks fine-grained customization + BenchmarkToSnakeIan-96 13141522 92.99 ns/op 16 B/op 1 allocs/op + + // Stdlib strings.Title is deprecated; using golang.org/x.text + BenchmarkGolangOrgXTextCases-96 4665676 262.5 ns/op 272 B/op 2 allocs/op // Other libraries or code snippets // - Most are slower, by up to an order of magnitude - // - None support initialisms or customization + // - No support for initialisms or customization // - Some generate only camelCase or snake_case // - Many lack unicode support - BenchmarkToSnakeStoewer-4 973200 2075 ns/op 64 B/op 2 allocs/op + BenchmarkToSnakeStoewer-96 8095468 148.9 ns/op 64 B/op 2 allocs/op // Copying small rune arrays is slow - BenchmarkToSnakeSiongui-4 264315 4229 ns/op 48 B/op 10 allocs/op - BenchmarkGoValidator-4 206811 5152 ns/op 184 B/op 9 allocs/op + BenchmarkToSnakeSiongui-96 2912593 401.7 ns/op 112 B/op 19 allocs/op + BenchmarkGoValidator-96 3493800 342.6 ns/op 184 B/op 9 allocs/op // String alloction is slow - BenchmarkToSnakeFatih-4 82675 12280 ns/op 392 B/op 26 allocs/op - BenchmarkToSnakeIanColeman-4 83276 13903 ns/op 145 B/op 13 allocs/op + BenchmarkToSnakeFatih-96 1282648 945.1 ns/op 616 B/op 26 allocs/op // Regexp is slow - BenchmarkToSnakeGolangPrograms-4 74448 18586 ns/op 176 B/op 11 allocs/op + BenchmarkToSnakeGolangPrograms-96 778674 1495 ns/op 227 B/op 11 allocs/op // These results aren't a surprise - my initial version of this library was // painfully slow. I think most of us, without spending some time with // profilers and benchmarks, would write also something on the slower side. -### Why not this package +### Zero dependencies + +That's right - zero. We only import the Go standard library. No hassles with +dependencies, licensing, security alerts. + +## Why not this package + If every nanosecond matters and this is used in a tight loop, use segment.io's libraries (https://github.com/segmentio/go-snakecase and https://github.com/segmentio/go-camelcase). They lack features, but make up for -it by being blazing fast. Alternatively, if you need your code to work slightly -differently, fork them and tailor it for your use case. - -If you don't like having external imports, I get it. This package only imports -packages for testing, otherwise it only uses the standard library. If that's -not enough, you can use this repo as the foundation for your own. MIT Licensed. +it by being blazing fast. -This package is still relatively new and while I've used it for a while -personally, it doesn't have the miles that other packages do. I've tested this -code agains't their test cases to make sure that there aren't any surprises. +## Migrating from other packages -### Migrating from other packages If you are migrating from from another package, you may find slight differences in output. To reduce the delta, you may find it helpful to use the following custom casers to mimic the behavior of the other package. @@ -161,32 +168,32 @@ custom casers to mimic the behavior of the other package. ## Index -* [func ToCamel(s string) string](#ToCamel) -* [func ToCase(s string, wordCase WordCase, delimiter rune) string](#ToCase) -* [func ToGoCamel(s string) string](#ToGoCamel) -* [func ToGoCase(s string, wordCase WordCase, delimiter rune) string](#ToGoCase) -* [func ToGoKebab(s string) string](#ToGoKebab) -* [func ToGoPascal(s string) string](#ToGoPascal) -* [func ToGoSnake(s string) string](#ToGoSnake) -* [func ToKEBAB(s string) string](#ToKEBAB) -* [func ToKebab(s string) string](#ToKebab) -* [func ToPascal(s string) string](#ToPascal) -* [func ToSNAKE(s string) string](#ToSNAKE) -* [func ToSnake(s string) string](#ToSnake) -* [type Caser](#Caser) - * [func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser](#NewCaser) - * [func (c *Caser) ToCamel(s string) string](#Caser.ToCamel) - * [func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string](#Caser.ToCase) - * [func (c *Caser) ToKEBAB(s string) string](#Caser.ToKEBAB) - * [func (c *Caser) ToKebab(s string) string](#Caser.ToKebab) - * [func (c *Caser) ToPascal(s string) string](#Caser.ToPascal) - * [func (c *Caser) ToSNAKE(s string) string](#Caser.ToSNAKE) - * [func (c *Caser) ToSnake(s string) string](#Caser.ToSnake) -* [type SplitAction](#SplitAction) -* [type SplitFn](#SplitFn) - * [func NewSplitFn(delimiters []rune, splitOptions ...SplitOption) SplitFn](#NewSplitFn) -* [type SplitOption](#SplitOption) -* [type WordCase](#WordCase) +* [func ToCamel(s string) string](#func-ToCamel) +* [func ToCase(s string, wordCase WordCase, delimiter rune) string](#func-ToCase) +* [func ToGoCamel(s string) string](#func-ToGoCamel) +* [func ToGoCase(s string, wordCase WordCase, delimiter rune) string](#func-ToGoCase) +* [func ToGoKebab(s string) string](#func-ToGoKebab) +* [func ToGoPascal(s string) string](#func-ToGoPascal) +* [func ToGoSnake(s string) string](#func-ToGoSnake) +* [func ToKEBAB(s string) string](#func-ToKEBAB) +* [func ToKebab(s string) string](#func-ToKebab) +* [func ToPascal(s string) string](#func-ToPascal) +* [func ToSNAKE(s string) string](#func-ToSNAKE) +* [func ToSnake(s string) string](#func-ToSnake) +* [type Caser](#type-Caser) + * [func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser](#func-NewCaser) + * [func (c *Caser) ToCamel(s string) string](#type-Caser.ToCamel) + * [func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string](#type-Caser.ToCase) + * [func (c *Caser) ToKEBAB(s string) string](#type-Caser.ToKEBAB) + * [func (c *Caser) ToKebab(s string) string](#type-Caser.ToKebab) + * [func (c *Caser) ToPascal(s string) string](#type-Caser.ToPascal) + * [func (c *Caser) ToSNAKE(s string) string](#type-Caser.ToSNAKE) + * [func (c *Caser) ToSnake(s string) string](#type-Caser.ToSnake) +* [type SplitAction](#type-SplitAction) +* [type SplitFn](#type-SplitFn) + * [func NewSplitFn(delimiters []rune, splitOptions ...SplitOption) SplitFn](#func-NewSplitFn) +* [type SplitOption](#type-SplitOption) +* [type WordCase](#type-WordCase) @@ -201,7 +208,7 @@ Also known as lowerCamelCase or mixedCase. -## func [ToCase](./strcase.go#L70) +## func [ToCase](./strcase.go#L72) ``` go func ToCase(s string, wordCase WordCase, delimiter rune) string ``` @@ -209,18 +216,20 @@ ToCase returns words in given case and delimiter. -## func [ToGoCamel](./strcase.go#L65) +## func [ToGoCamel](./strcase.go#L67) ``` go func ToGoCamel(s string) string ``` ToGoCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). Also known as lowerCamelCase or mixedCase. -Respects Go's common initialisms (e.g. httpResponse -> HTTPResponse). +Respects Go's common initialisms, but first word remains lowercased which is +important for code generator use cases (e.g. toJson -> toJSON, httpResponse +-> httpResponse). -## func [ToGoCase](./strcase.go#L77) +## func [ToGoCase](./strcase.go#L79) ``` go func ToGoCase(s string, wordCase WordCase, delimiter rune) string ``` @@ -415,7 +424,7 @@ ToSnake returns words in snake_case (lower case words with underscores). -## type [SplitAction](./split.go#L110) +## type [SplitAction](./split.go#L111) ``` go type SplitAction int ``` @@ -457,7 +466,7 @@ SplitFn defines how to split a string into words -### func [NewSplitFn](./split.go#L14-L17) +### func [NewSplitFn](./split.go#L15-L18) ``` go func NewSplitFn( delimiters []rune, @@ -469,13 +478,12 @@ NewSplitFn returns a SplitFn based on the options provided. NewSplitFn covers the majority of common options that other strcase libraries provide and should allow you to simply create a custom caser. For more complicated use cases, feel free to write your own SplitFn -nolint:gocyclo -## type [SplitOption](./split.go#L93) +## type [SplitOption](./split.go#L94) ``` go type SplitOption int ``` @@ -524,6 +532,9 @@ const ( // TitleCase - Only first letter upper cased (Example) TitleCase // CamelCase - TitleCase except lower case first word (exampleText) + // Notably, even if the first word is an initialism, it will be lower + // cased. This is important for code generators where capital letters + // mean exported functions. i.e. jsonString(), not JSONString() CamelCase ) ``` diff --git a/vendor/github.com/ettle/strcase/assert.go b/vendor/github.com/ettle/strcase/assert.go new file mode 100644 index 0000000000..09344e40f2 --- /dev/null +++ b/vendor/github.com/ettle/strcase/assert.go @@ -0,0 +1,24 @@ +package strcase + +// We use a lightweight replacement for testify/assert to reduce dependencies + +// testingT interface allows us to test our assert functions +type testingT interface { + Logf(format string, args ...interface{}) + Fail() +} + +// assertTrue will fail if the value is not true +func assertTrue(t testingT, value bool) { + if !value { + t.Fail() + } +} + +// assertEqual will fail if the two strings are not equal +func assertEqual(t testingT, expected, actual string) { + if expected != actual { + t.Logf("Expected: %s Actual: %s", expected, actual) + t.Fail() + } +} diff --git a/vendor/github.com/ettle/strcase/caser.go b/vendor/github.com/ettle/strcase/caser.go index 891a671897..2e7eb955ba 100644 --- a/vendor/github.com/ettle/strcase/caser.go +++ b/vendor/github.com/ettle/strcase/caser.go @@ -10,17 +10,17 @@ type Caser struct { // // A Caser should be created when you want fine grained control over how the words are split. // -// Notes on function arguments +// Notes on function arguments // -// goInitialisms: Whether to use Golint's intialisms +// goInitialisms: Whether to use Golint's intialisms // -// initialismOverrides: A mapping of extra initialisms -// Keys must be in ALL CAPS. Merged with Golint's if goInitialisms is set. -// Setting a key to false will override Golint's. +// initialismOverrides: A mapping of extra initialisms +// Keys must be in ALL CAPS. Merged with Golint's if goInitialisms is set. +// Setting a key to false will override Golint's. // -// splitFn: How to separate words -// Override the default split function. Consider using NewSplitFn to -// configure one instead of writing your own. +// splitFn: How to separate words +// Override the default split function. Consider using NewSplitFn to +// configure one instead of writing your own. func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser { c := &Caser{ initialisms: golintInitialisms, diff --git a/vendor/github.com/ettle/strcase/convert.go b/vendor/github.com/ettle/strcase/convert.go index 70fedb1449..cb901d079d 100644 --- a/vendor/github.com/ettle/strcase/convert.go +++ b/vendor/github.com/ettle/strcase/convert.go @@ -29,6 +29,7 @@ const ( // Case 2: UpperCase words, which don't need to support initialisms since everything is in upper case // convertWithoutInitialims only works for to UpperCase and LowerCase +// //nolint:gocyclo func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) string { input = strings.TrimSpace(input) @@ -38,7 +39,7 @@ func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) } var b strings.Builder - b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + b.Grow(len(input) + 4) // In case we need to write delimiters where they weren't before var prev, curr rune next := runes[0] // 0 length will have already returned so safe to index @@ -90,13 +91,14 @@ func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) // Must be original case b.WriteRune(curr) } - inWord = inWord || true + inWord = true } return b.String() } // convertWithGoInitialisms changes a input string to a certain case with a // delimiter, respecting go initialisms but not skip runes +// //nolint:gocyclo func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) string { input = strings.TrimSpace(input) @@ -106,7 +108,7 @@ func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) s } var b strings.Builder - b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + b.Grow(len(input) + 4) // In case we need to write delimiters where they weren't before firstWord := true @@ -122,10 +124,15 @@ func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) s // Don't bother with initialisms if the word is longer than 5 // A quick proxy to avoid the extra memory allocations if end-start <= 5 { - key := strings.ToUpper(string(runes[start:end])) - if golintInitialisms[key] { + var word strings.Builder + word.Grow(end - start) + for i := start; i < end; i++ { + word.WriteRune(toUpper(runes[i])) + } + w := word.String() + if golintInitialisms[w] { if !firstWord || wordCase != CamelCase { - b.WriteString(key) + b.WriteString(w) firstWord = false return } @@ -188,6 +195,7 @@ func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) s // convert changes a input string to a certain case with a delimiter, // respecting arbitrary initialisms and skip characters +// //nolint:gocyclo func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, initialisms map[string]bool) string { @@ -198,7 +206,7 @@ func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, } var b strings.Builder - b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + b.Grow(len(input) + 4) // In case we need to write delimiters where they weren't before firstWord := true var skipIndexes []int @@ -221,13 +229,14 @@ func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, // I'm open to it if there is a use case if initialisms != nil { var word strings.Builder + word.Grow(end - start) for i := start; i < end; i++ { word.WriteRune(toUpper(runes[i])) } - key := word.String() - if initialisms[key] { + w := word.String() + if initialisms[w] { if !firstWord || wordCase != CamelCase { - b.WriteString(key) + b.WriteString(w) firstWord = false return } diff --git a/vendor/github.com/ettle/strcase/doc.go b/vendor/github.com/ettle/strcase/doc.go index b898a4e45f..c3bf14a8f5 100644 --- a/vendor/github.com/ettle/strcase/doc.go +++ b/vendor/github.com/ettle/strcase/doc.go @@ -2,78 +2,78 @@ Package strcase is a package for converting strings into various word cases (e.g. snake_case, camelCase) - go get -u github.com/ettle/strcase + go get -u github.com/ettle/strcase Example usage - strcase.ToSnake("Hello World") // hello_world - strcase.ToSNAKE("Hello World") // HELLO_WORLD + strcase.ToSnake("Hello World") // hello_world + strcase.ToSNAKE("Hello World") // HELLO_WORLD - strcase.ToKebab("helloWorld") // hello-world - strcase.ToKEBAB("helloWorld") // HELLO-WORLD + strcase.ToKebab("helloWorld") // hello-world + strcase.ToKEBAB("helloWorld") // HELLO-WORLD - strcase.ToPascal("hello-world") // HelloWorld - strcase.ToCamel("hello-world") // helloWorld + strcase.ToPascal("hello-world") // HelloWorld + strcase.ToCamel("hello-world") // helloWorld - // Handle odd cases - strcase.ToSnake("FOOBar") // foo_bar + // Handle odd cases + strcase.ToSnake("FOOBar") // foo_bar - // Support Go initialisms - strcase.ToGoPascal("http_response") // HTTPResponse + // Support Go initialisms + strcase.ToGoPascal("http_response") // HTTPResponse - // Specify case and delimiter - strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD + // Specify case and delimiter + strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD -Why this package +## Why this package String strcase is pretty straight forward and there are a number of methods to do it. This package is fully featured, more customizable, better tested, and -faster* than other packages and what you would probably whip up yourself. +faster than other packages and what you would probably whip up yourself. -Unicode support +### Unicode support We work for with unicode strings and pay very little performance penalty for it as we optimized for the common use case of ASCII only strings. -Customization +### Customization You can create a custom caser that changes the behavior to what you want. This customization also reduces the pressure for us to change the default behavior which means that things are more stable for everyone involved. The goal is to make the common path easy and fast, while making the uncommon path possible. - c := NewCaser( - // Use Go's default initialisms e.g. ID, HTML - true, - // Override initialisms (e.g. don't initialize HTML but initialize SSL - map[string]bool{"SSL": true, "HTML": false}, - // Write your own custom SplitFn - // - NewSplitFn( - []rune{'*', '.', ','}, - SplitCase, - SplitAcronym, - PreserveNumberFormatting, - SplitBeforeNumber, - SplitAfterNumber, - )) - assert.Equal(t, "http_200", c.ToSnake("http200")) - -Initialism support + c := NewCaser( + // Use Go's default initialisms e.g. ID, HTML + true, + // Override initialisms (e.g. don't initialize HTML but initialize SSL + map[string]bool{"SSL": true, "HTML": false}, + // Write your own custom SplitFn + // + NewSplitFn( + []rune{'*', '.', ','}, + SplitCase, + SplitAcronym, + PreserveNumberFormatting, + SplitBeforeNumber, + SplitAfterNumber, + )) + assert.Equal(t, "http_200", c.ToSnake("http200")) + +### Initialism support By default, we use the golint intialisms list. You can customize and override the initialisms if you wish to add additional ones, such as "SSL" or "CMS" or domain specific ones to your industry. - ToGoPascal("http_response") // HTTPResponse - ToGoSnake("http_response") // HTTP_response + ToGoPascal("http_response") // HTTPResponse + ToGoSnake("http_response") // HTTP_response -Test coverage +### Test coverage We have a wide ranging test suite to make sure that we understand our behavior. Test coverage isn't everything, but we aim for 100% coverage. -Fast +### Fast Optimized to reduce memory allocations with Builder. Benchmarked and optimized around common cases. @@ -86,70 +86,65 @@ common cases have a large performance impact. Hopefully I was fair to each library and happy to rerun benchmarks differently or reword my commentary based on suggestions or updates. - // This package - faster then almost all libraries - // Initialisms are more complicated and slightly slower, but still faster then other libraries that do less - BenchmarkToTitle-4 7821166 221 ns/op 32 B/op 1 allocs/op - BenchmarkToSnake-4 9378589 202 ns/op 32 B/op 1 allocs/op - BenchmarkToSNAKE-4 6174453 223 ns/op 32 B/op 1 allocs/op - BenchmarkToGoSnake-4 3114266 434 ns/op 44 B/op 4 allocs/op - BenchmarkToCustomCaser-4 2973855 448 ns/op 56 B/op 4 allocs/op - - // Segment has very fast snake case and camel case libraries - // No features or customization, but very very fast - BenchmarkSegment-4 24003495 64.9 ns/op 16 B/op 1 allocs/op - - // Stdlib strings.Title for comparison, even though it only splits on spaces - BenchmarkToTitleStrings-4 11259376 161 ns/op 16 B/op 1 allocs/op - - // Other libraries or code snippets - // - Most are slower, by up to an order of magnitude - // - None support initialisms or customization - // - Some generate only camelCase or snake_case - // - Many lack unicode support - BenchmarkToSnakeStoewer-4 7103268 297 ns/op 64 B/op 2 allocs/op - // Copying small rune arrays is slow - BenchmarkToSnakeSiongui-4 3710768 413 ns/op 48 B/op 10 allocs/op - BenchmarkGoValidator-4 2416479 1049 ns/op 184 B/op 9 allocs/op - // String alloction is slow - BenchmarkToSnakeFatih-4 1000000 2407 ns/op 624 B/op 26 allocs/op - BenchmarkToSnakeIanColeman-4 1005766 1426 ns/op 160 B/op 13 allocs/op - // Regexp is slow - BenchmarkToSnakeGolangPrograms-4 614689 2237 ns/op 225 B/op 11 allocs/op - - - - // These results aren't a surprise - my initial version of this library was - // painfully slow. I think most of us, without spending some time with - // profilers and benchmarks, would write also something on the slower side. - - -Why not this package + // This package - faster then almost all libraries + // Initialisms are more complicated and slightly slower, but still fast + BenchmarkToTitle-96 9617142 125.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSnake-96 10659919 120.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSNAKE-96 9018282 126.4 ns/op 16 B/op 1 allocs/op + BenchmarkToGoSnake-96 4903687 254.5 ns/op 26 B/op 4 allocs/op + BenchmarkToCustomCaser-96 4434489 265.0 ns/op 28 B/op 4 allocs/op + + // Segment has very fast snake case and camel case libraries + // No features or customization, but very very fast + BenchmarkSegment-96 33625734 35.54 ns/op 16 B/op 1 allocs/op + + // Iancoleman has gotten some performance improvements, but remains + // without unicode support and lacks fine-grained customization + BenchmarkToSnakeIan-96 13141522 92.99 ns/op 16 B/op 1 allocs/op + + // Stdlib strings.Title is deprecated; using golang.org/x.text + BenchmarkGolangOrgXTextCases-96 4665676 262.5 ns/op 272 B/op 2 allocs/op + + // Other libraries or code snippets + // - Most are slower, by up to an order of magnitude + // - No support for initialisms or customization + // - Some generate only camelCase or snake_case + // - Many lack unicode support + BenchmarkToSnakeStoewer-96 8095468 148.9 ns/op 64 B/op 2 allocs/op + // Copying small rune arrays is slow + BenchmarkToSnakeSiongui-96 2912593 401.7 ns/op 112 B/op 19 allocs/op + BenchmarkGoValidator-96 3493800 342.6 ns/op 184 B/op 9 allocs/op + // String alloction is slow + BenchmarkToSnakeFatih-96 1282648 945.1 ns/op 616 B/op 26 allocs/op + // Regexp is slow + BenchmarkToSnakeGolangPrograms-96 778674 1495 ns/op 227 B/op 11 allocs/op + + // These results aren't a surprise - my initial version of this library was + // painfully slow. I think most of us, without spending some time with + // profilers and benchmarks, would write also something on the slower side. + +### Zero dependencies + +That's right - zero. We only import the Go standard library. No hassles with +dependencies, licensing, security alerts. + +## Why not this package If every nanosecond matters and this is used in a tight loop, use segment.io's libraries (https://github.com/segmentio/go-snakecase and https://github.com/segmentio/go-camelcase). They lack features, but make up for -it by being blazing fast. Alternatively, if you need your code to work slightly -differently, fork them and tailor it for your use case. - -If you don't like having external imports, I get it. This package only imports -packages for testing, otherwise it only uses the standard library. If that's -not enough, you can use this repo as the foundation for your own. MIT Licensed. +it by being blazing fast. -This package is still relatively new and while I've used it for a while -personally, it doesn't have the miles that other packages do. I've tested this -code agains't their test cases to make sure that there aren't any surprises. - -Migrating from other packages +## Migrating from other packages If you are migrating from from another package, you may find slight differences in output. To reduce the delta, you may find it helpful to use the following custom casers to mimic the behavior of the other package. - // From https://github.com/iancoleman/strcase - var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-', '.'}, SplitCase, SplitAcronym, SplitBeforeNumber)) - - // From https://github.com/stoewer/go-strcase - var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-'}, SplitCase), SplitAcronym) + // From https://github.com/iancoleman/strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-', '.'}, SplitCase, SplitAcronym, SplitBeforeNumber)) + // From https://github.com/stoewer/go-strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-'}, SplitCase), SplitAcronym) */ package strcase diff --git a/vendor/github.com/ettle/strcase/split.go b/vendor/github.com/ettle/strcase/split.go index 84381106bc..32bc29759a 100644 --- a/vendor/github.com/ettle/strcase/split.go +++ b/vendor/github.com/ettle/strcase/split.go @@ -10,6 +10,7 @@ type SplitFn func(prev, curr, next rune) SplitAction // NewSplitFn covers the majority of common options that other strcase // libraries provide and should allow you to simply create a custom caser. // For more complicated use cases, feel free to write your own SplitFn +// //nolint:gocyclo func NewSplitFn( delimiters []rune, diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 5152bf59bf..be82827cac 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -7,7 +7,6 @@ suits you. ![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) - ## Install ```bash @@ -124,17 +123,17 @@ fmt.Println("All text will now be bold magenta.") ``` ### Disable/Enable color - + There might be a case where you want to explicitly disable/enable color output. the `go-isatty` package will automatically disable color output for non-tty output streams (for example if the output were piped directly to `less`). The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment -variable is set (regardless of its value). +variable is set to a non-empty string. -`Color` has support to disable/enable colors programatically both globally and +`Color` has support to disable/enable colors programmatically both globally and for single color definitions. For example suppose you have a CLI app and a -`--no-color` bool flag. You can easily disable the color output with: +`-no-color` bool flag. You can easily disable the color output with: ```go var flagNoColor = flag.Bool("no-color", false, "Disable color output") @@ -167,11 +166,10 @@ To output color in GitHub Actions (or other CI systems that support ANSI colors) * Save/Return previous values * Evaluate fmt.Formatter interface - ## Credits - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) ## License diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 98a60f3c88..c4234287dc 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -19,10 +19,10 @@ var ( // set (regardless of its value). This is a global option and affects all // colors. For more control over each color block use the methods // DisableColor() individually. - NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - // Output defines the standard output of the print functions. By default + // Output defines the standard output of the print functions. By default, // os.Stdout is used. Output = colorable.NewColorableStdout() @@ -35,10 +35,9 @@ var ( colorsCacheMu sync.Mutex // protects colorsCache ) -// noColorExists returns true if the environment variable NO_COLOR exists. -func noColorExists() bool { - _, exists := os.LookupEnv("NO_COLOR") - return exists +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" } // Color defines a custom color object which is defined by SGR parameters. @@ -66,6 +65,29 @@ const ( CrossedOut ) +const ( + ResetBold Attribute = iota + 22 + ResetItalic + ResetUnderline + ResetBlinking + _ + ResetReversed + ResetConcealed + ResetCrossedOut +) + +var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{ + Bold: ResetBold, + Faint: ResetBold, + Italic: ResetItalic, + Underline: ResetUnderline, + BlinkSlow: ResetBlinking, + BlinkRapid: ResetBlinking, + ReverseVideo: ResetReversed, + Concealed: ResetConcealed, + CrossedOut: ResetCrossedOut, +} + // Foreground text colors const ( FgBlack Attribute = iota + 30 @@ -120,7 +142,7 @@ func New(value ...Attribute) *Color { params: make([]Attribute, 0), } - if noColorExists() { + if noColorIsSet() { c.noColor = boolPtr(true) } @@ -152,7 +174,7 @@ func (c *Color) Set() *Color { return c } - fmt.Fprintf(Output, c.format()) + fmt.Fprint(Output, c.format()) return c } @@ -164,16 +186,21 @@ func (c *Color) unset() { Unset() } -func (c *Color) setWriter(w io.Writer) *Color { +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { if c.isNoColorSet() { return c } - fmt.Fprintf(w, c.format()) + fmt.Fprint(w, c.format()) return c } -func (c *Color) unsetWriter(w io.Writer) { +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { if c.isNoColorSet() { return } @@ -192,20 +219,14 @@ func (c *Color) Add(value ...Attribute) *Color { return c } -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - // Fprint formats using the default formats for its operands and writes to w. // Spaces are added between operands when neither is a string. // It returns the number of bytes written and any write error encountered. // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprint(w, a...) } @@ -227,8 +248,8 @@ func (c *Color) Print(a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprintf(w, format, a...) } @@ -248,10 +269,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) + return fmt.Fprintln(w, c.wrap(fmt.Sprint(a...))) } // Println formats using the default formats for its operands and writes to @@ -260,10 +278,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { // encountered. This is the standard fmt.Print() method wrapped with the given // color. func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) + return fmt.Fprintln(Output, c.wrap(fmt.Sprint(a...))) } // Sprint is just like Print, but returns a string instead of printing it. @@ -273,7 +288,7 @@ func (c *Color) Sprint(a ...interface{}) string { // Sprintln is just like Println, but returns a string instead of printing it. func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) + return fmt.Sprintln(c.Sprint(a...)) } // Sprintf is just like Printf, but returns a string instead of printing it. @@ -355,7 +370,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { // string. Windows users should use this in conjunction with color.Output. func (c *Color) SprintlnFunc() func(a ...interface{}) string { return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) + return fmt.Sprintln(c.Sprint(a...)) } } @@ -385,7 +400,18 @@ func (c *Color) format() string { } func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) + //return fmt.Sprintf("%s[%dm", escape, Reset) + //for each element in sequence let's use the speficic reset escape, ou the generic one if not found + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(Reset)) + ra, ok := mapResetAttributes[v] + if ok { + format[i] = strconv.Itoa(int(ra)) + } + } + + return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";")) } // DisableColor disables the color output. Useful to not change any existing @@ -396,7 +422,7 @@ func (c *Color) DisableColor() { } // EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. +// DisableColor(). Otherwise, this method has no side effects. func (c *Color) EnableColor() { c.noColor = boolPtr(false) } @@ -413,6 +439,12 @@ func (c *Color) isNoColorSet() bool { // Equals returns a boolean value indicating whether two colors are equal. func (c *Color) Equals(c2 *Color) bool { + if c == nil && c2 == nil { + return true + } + if c == nil || c2 == nil { + return false + } if len(c.params) != len(c2.params) { return false } diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 0000000000..be01c558e5 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index 04541de786..9491ad5413 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -5,106 +5,105 @@ that suits you. Use simple and default helper functions with predefined foreground colors: - color.Cyan("Prints text in cyan.") + color.Cyan("Prints text in cyan.") - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") -However there are times where custom color mixes are required. Below are some +However, there are times when custom color mixes are required. Below are some examples to create custom color objects and use the print functions of each separate color object. - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") You can create PrintXxx functions to simplify even more: - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") You can also FprintXxx functions to pass your own io.Writer: - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") Or create SprintXxx functions to mix strings with other non-colorized strings: - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and set the output to color.Output: - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) Using with existing code is possible. Just use the Set() method to set the standard output to the given parameters. That way a rewrite of an existing code is not required. - // Use handy standard colors. - color.Set(color.FgYellow) + // Use handy standard colors. + color.Set(color.FgYellow) - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") - color.Unset() // don't forget to unset + color.Unset() // don't forget to unset - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function - fmt.Println("All text will be now bold magenta.") + fmt.Println("All text will be now bold magenta.") There might be a case where you want to disable color output (for example to pipe the standard output of your app to somewhere else). `Color` has support to @@ -112,24 +111,24 @@ disable colors both globally and for single color definition. For example suppose you have a CLI app and a `--no-color` bool flag. You can easily disable the color output with: - var flagNoColor = flag.Bool("no-color", false, "Disable color output") + var flagNoColor = flag.Bool("no-color", false, "Disable color output") - if *flagNoColor { - color.NoColor = true // disables colorized output - } + if *flagNoColor { + color.NoColor = true // disables colorized output + } You can also disable the color by setting the NO_COLOR environment variable to any value. It also has support for single color definitions (local). You can disable/enable color output on the fly: - c := color.New(color.FgCyan) - c.Println("Prints cyan text") + c := color.New(color.FgCyan) + c.Println("Prints cyan text") - c.DisableColor() - c.Println("This is printed without any color") + c.DisableColor() + c.Println("This is printed without any color") - c.EnableColor() - c.Println("This prints again cyan...") + c.EnableColor() + c.Println("This prints again cyan...") */ package color diff --git a/vendor/github.com/firefart/nonamedreturns/LICENSE b/vendor/github.com/firefart/nonamedreturns/LICENSE new file mode 100644 index 0000000000..f288702d2f --- /dev/null +++ b/vendor/github.com/firefart/nonamedreturns/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/firefart/nonamedreturns/analyzer/analyzer.go b/vendor/github.com/firefart/nonamedreturns/analyzer/analyzer.go new file mode 100644 index 0000000000..6ad97ab491 --- /dev/null +++ b/vendor/github.com/firefart/nonamedreturns/analyzer/analyzer.go @@ -0,0 +1,134 @@ +package analyzer + +import ( + "flag" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const FlagReportErrorInDefer = "report-error-in-defer" + +var Analyzer = &analysis.Analyzer{ + Name: "nonamedreturns", + Doc: "Reports all named returns", + Flags: flags(), + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func flags() flag.FlagSet { + fs := flag.FlagSet{} + fs.Bool(FlagReportErrorInDefer, false, "report named error if it is assigned inside defer") + return fs +} + +func run(pass *analysis.Pass) (interface{}, error) { + reportErrorInDefer := pass.Analyzer.Flags.Lookup(FlagReportErrorInDefer).Value.String() == "true" + errorType := types.Universe.Lookup("error").Type() + + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // only filter function defintions + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + var funcResults *ast.FieldList + var funcBody *ast.BlockStmt + + switch n := node.(type) { + case *ast.FuncLit: + funcResults = n.Type.Results + funcBody = n.Body + case *ast.FuncDecl: + funcResults = n.Type.Results + funcBody = n.Body + default: + return + } + + // no return values + if funcResults == nil { + return + } + + resultsList := funcResults.List + + for _, p := range resultsList { + if len(p.Names) == 0 { + // all good, the parameter is not named + continue + } + + for _, n := range p.Names { + if n.Name == "_" { + continue + } + + if !reportErrorInDefer && + types.Identical(pass.TypesInfo.TypeOf(p.Type), errorType) && + findDeferWithVariableAssignment(funcBody, pass.TypesInfo, pass.TypesInfo.ObjectOf(n)) { + continue + } + + pass.Reportf(node.Pos(), "named return %q with type %q found", n.Name, types.ExprString(p.Type)) + } + } + }) + + return nil, nil +} + +func findDeferWithVariableAssignment(body *ast.BlockStmt, info *types.Info, variable types.Object) bool { + found := false + + ast.Inspect(body, func(node ast.Node) bool { + if found { + return false // stop inspection + } + + if d, ok := node.(*ast.DeferStmt); ok { + if fn, ok2 := d.Call.Fun.(*ast.FuncLit); ok2 { + if findVariableAssignment(fn.Body, info, variable) { + found = true + return false + } + } + } + + return true + }) + + return found +} + +func findVariableAssignment(body *ast.BlockStmt, info *types.Info, variable types.Object) bool { + found := false + + ast.Inspect(body, func(node ast.Node) bool { + if found { + return false // stop inspection + } + + if a, ok := node.(*ast.AssignStmt); ok { + for _, lh := range a.Lhs { + if i, ok2 := lh.(*ast.Ident); ok2 { + if info.ObjectOf(i) == variable { + found = true + return false + } + } + } + } + + return true + }) + + return found +} diff --git a/vendor/github.com/ghostiam/protogetter/.goreleaser.yaml b/vendor/github.com/ghostiam/protogetter/.goreleaser.yaml new file mode 100644 index 0000000000..a70d0fb006 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/.goreleaser.yaml @@ -0,0 +1,24 @@ +before: + hooks: + - go mod tidy +builds: + - id: protogetter + main: ./cmd/protogetter + binary: protogetter + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + - '^ci:' \ No newline at end of file diff --git a/vendor/github.com/ghostiam/protogetter/LICENSE b/vendor/github.com/ghostiam/protogetter/LICENSE new file mode 100644 index 0000000000..b4449661b7 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Vladislav Fursov (GhostIAm) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ghostiam/protogetter/Makefile b/vendor/github.com/ghostiam/protogetter/Makefile new file mode 100644 index 0000000000..4c2a62af18 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + $(MAKE) -C testdata vendor + go test -v ./... + +.PHONY: install +install: + go install ./cmd/protogetter + @echo "Installed in $(shell which protogetter)" diff --git a/vendor/github.com/ghostiam/protogetter/README.md b/vendor/github.com/ghostiam/protogetter/README.md new file mode 100644 index 0000000000..c033e9597f --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/README.md @@ -0,0 +1,73 @@ +# Protogetter +Welcome to the Protogetter project! + +## Overview +Protogetter is a linter developed specifically for Go programmers working with nested `protobuf` types.\ +It's designed to aid developers in preventing `invalid memory address or nil pointer dereference` errors arising from direct access of nested `protobuf` fields. + +When working with `protobuf`, it's quite common to have complex structures where a message field is contained within another message, which itself can be part of another message, and so on. +If these fields are accessed directly and some field in the call chain will not be initialized, it can result in application panic. + +Protogetter addresses this issue by suggesting use of getter methods for field access. + +## How does it work? +Protogetter analyzes your Go code and helps detect direct `protobuf` field accesses that could give rise to panic.\ +The linter suggests using getters: +```go +m.GetFoo().GetBar().GetBaz() +``` +instead of direct field access: +```go +m.Foo.Bar.Baz +``` + +And you will then only need to perform a nil check after the final call: +```go +if m.GetFoo().GetBar().GetBaz() != nil { + // Do something with m.GetFoo().GetBar().GetBaz() +} +``` +instead of: +```go +if m.Foo != nil { + if m.Foo.Bar != nil { + if m.Foo.Bar.Baz != nil { + // Do something with m.Foo.Bar.Baz + } + } +} +``` + +or use zero values: + +```go +// If one of the methods returns `nil` we will receive 0 instead of panic. +v := m.GetFoo().GetBar().GetBaz().GetInt() +``` + +instead of panic: + +```go +// If at least one structure in the chains is not initialized, we will get a panic. +v := m.Foo.Bar.Baz.Int +``` + +which simplifies the code and makes it more reliable. + +## Installation + +```bash +go install github.com/ghostiam/protogetter/cmd/protogetter@latest +``` + +## Usage + +To run the linter: +```bash +protogetter ./... +``` + +Or to apply suggested fixes directly: +```bash +protogetter --fix ./... +``` diff --git a/vendor/github.com/ghostiam/protogetter/posfilter.go b/vendor/github.com/ghostiam/protogetter/posfilter.go new file mode 100644 index 0000000000..82075ccb16 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/posfilter.go @@ -0,0 +1,65 @@ +package protogetter + +import ( + "go/token" +) + +type PosFilter struct { + positions map[token.Pos]struct{} + alreadyReplaced map[string]map[int][2]int // map[filename][line][start, end] +} + +func NewPosFilter() *PosFilter { + return &PosFilter{ + positions: make(map[token.Pos]struct{}), + alreadyReplaced: make(map[string]map[int][2]int), + } +} + +func (f *PosFilter) IsFiltered(pos token.Pos) bool { + _, ok := f.positions[pos] + return ok +} + +func (f *PosFilter) AddPos(pos token.Pos) { + f.positions[pos] = struct{}{} +} + +func (f *PosFilter) IsAlreadyReplaced(fset *token.FileSet, pos, end token.Pos) bool { + filePos := fset.Position(pos) + fileEnd := fset.Position(end) + + lines, ok := f.alreadyReplaced[filePos.Filename] + if !ok { + return false + } + + lineRange, ok := lines[filePos.Line] + if !ok { + return false + } + + if lineRange[0] <= filePos.Offset && fileEnd.Offset <= lineRange[1] { + return true + } + + return false +} + +func (f *PosFilter) AddAlreadyReplaced(fset *token.FileSet, pos, end token.Pos) { + filePos := fset.Position(pos) + fileEnd := fset.Position(end) + + lines, ok := f.alreadyReplaced[filePos.Filename] + if !ok { + lines = make(map[int][2]int) + f.alreadyReplaced[filePos.Filename] = lines + } + + lineRange, ok := lines[filePos.Line] + if ok && lineRange[0] <= filePos.Offset && fileEnd.Offset <= lineRange[1] { + return + } + + lines[filePos.Line] = [2]int{filePos.Offset, fileEnd.Offset} +} diff --git a/vendor/github.com/ghostiam/protogetter/processor.go b/vendor/github.com/ghostiam/protogetter/processor.go new file mode 100644 index 0000000000..ed52fb6ebf --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/processor.go @@ -0,0 +1,351 @@ +package protogetter + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + "strings" +) + +type processor struct { + info *types.Info + filter *PosFilter + cfg *Config + + to strings.Builder + from strings.Builder + err error +} + +func Process(info *types.Info, filter *PosFilter, n ast.Node, cfg *Config) (*Result, error) { + p := &processor{ + info: info, + filter: filter, + cfg: cfg, + } + + return p.process(n) +} + +func (c *processor) process(n ast.Node) (*Result, error) { + switch x := n.(type) { + case *ast.AssignStmt: + // Skip any assignment to the field. + for _, s := range x.Lhs { + c.filter.AddPos(s.Pos()) + + if se, ok := s.(*ast.StarExpr); ok { + c.filter.AddPos(se.X.Pos()) + } + } + + case *ast.IncDecStmt: + // Skip any increment/decrement to the field. + c.filter.AddPos(x.X.Pos()) + + case *ast.UnaryExpr: + if x.Op == token.AND { + // Skip all expressions when the field is used as a pointer. + // Because this is not direct reading, but most likely writing by pointer (for example like sql.Scan). + c.filter.AddPos(x.X.Pos()) + } + + case *ast.CallExpr: + if !c.cfg.ReplaceFirstArgInAppend && len(x.Args) > 0 { + if v, ok := x.Fun.(*ast.Ident); ok && v.Name == "append" { + // Skip first argument of append function. + c.filter.AddPos(x.Args[0].Pos()) + break + } + } + + f, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return &Result{}, nil + } + + if !isProtoMessage(c.info, f.X) { + return &Result{}, nil + } + + c.processInner(x) + + case *ast.SelectorExpr: + if !isProtoMessage(c.info, x.X) { + // If the selector is not on a proto message, skip it. + return &Result{}, nil + } + + c.processInner(x) + + case *ast.StarExpr: + f, ok := x.X.(*ast.SelectorExpr) + if !ok { + return &Result{}, nil + } + + if !isProtoMessage(c.info, f.X) { + return &Result{}, nil + } + + // proto2 generates fields as pointers. Hence, the indirection + // must be removed when generating the fix for the case. + // The `*` is retained in `c.from`, but excluded from the fix + // present in the `c.to`. + c.writeFrom("*") + c.processInner(x.X) + + case *ast.BinaryExpr: + // Check if the expression is a comparison. + if x.Op != token.EQL && x.Op != token.NEQ { + return &Result{}, nil + } + + // Check if one of the operands is nil. + + xIdent, xOk := x.X.(*ast.Ident) + yIdent, yOk := x.Y.(*ast.Ident) + + xIsNil := xOk && xIdent.Name == "nil" + yIsNil := yOk && yIdent.Name == "nil" + + if !xIsNil && !yIsNil { + return &Result{}, nil + } + + // Extract the non-nil operand for further checks + + var expr ast.Expr + if xIsNil { + expr = x.Y + } else { + expr = x.X + } + + se, ok := expr.(*ast.SelectorExpr) + if !ok { + return &Result{}, nil + } + + if !isProtoMessage(c.info, se.X) { + return &Result{}, nil + } + + // Check if the Getter function of the protobuf message returns a pointer. + hasPointer, ok := getterResultHasPointer(c.info, se.X, se.Sel.Name) + if !ok || hasPointer { + return &Result{}, nil + } + + c.filter.AddPos(x.X.Pos()) + + default: + return nil, fmt.Errorf("not implemented for type: %s (%s)", reflect.TypeOf(x), formatNode(n)) + } + + if c.err != nil { + return nil, c.err + } + + return &Result{ + From: c.from.String(), + To: c.to.String(), + }, nil +} + +func (c *processor) processInner(expr ast.Expr) { + switch x := expr.(type) { + case *ast.Ident: + c.write(x.Name) + + case *ast.BasicLit: + c.write(x.Value) + + case *ast.UnaryExpr: + if x.Op == token.AND { + c.write(formatNode(x)) + return + } + + c.write(x.Op.String()) + c.processInner(x.X) + + case *ast.SelectorExpr: + c.processInner(x.X) + c.write(".") + + // If getter exists, use it. + if methodIsExists(c.info, x.X, "Get"+x.Sel.Name) { + c.writeFrom(x.Sel.Name) + c.writeTo("Get" + x.Sel.Name + "()") + return + } + + // If the selector is not a proto-message or the method has already been called, we leave it unchanged. + // This approach is significantly more efficient than verifying the presence of methods in all cases. + c.write(x.Sel.Name) + + case *ast.CallExpr: + c.processInner(x.Fun) + c.write("(") + for i, arg := range x.Args { + if i > 0 { + c.write(",") + } + c.processInner(arg) + } + c.write(")") + + case *ast.IndexExpr: + c.processInner(x.X) + c.write("[") + c.processInner(x.Index) + c.write("]") + + case *ast.BinaryExpr: + c.processInner(x.X) + c.write(x.Op.String()) + c.processInner(x.Y) + + case *ast.ParenExpr: + c.write("(") + c.processInner(x.X) + c.write(")") + + case *ast.StarExpr: + c.write("*") + c.processInner(x.X) + + case *ast.CompositeLit, *ast.TypeAssertExpr, *ast.ArrayType: + // Process the node as is. + c.write(formatNode(x)) + + default: + c.err = fmt.Errorf("processInner: not implemented for type: %s", reflect.TypeOf(x)) + } +} + +func (c *processor) write(s string) { + c.writeTo(s) + c.writeFrom(s) +} + +func (c *processor) writeTo(s string) { + c.to.WriteString(s) +} + +func (c *processor) writeFrom(s string) { + c.from.WriteString(s) +} + +// Result contains source code (from) and suggested change (to) +type Result struct { + From string + To string +} + +func (r *Result) Skipped() bool { + // If from and to are the same, skip it. + return r.From == r.To +} + +func isProtoMessage(info *types.Info, expr ast.Expr) bool { + // First, we are checking for the presence of the ProtoReflect method which is currently being generated + // and corresponds to v2 version. + // https://pkg.go.dev/google.golang.org/protobuf@v1.31.0/proto#Message + const protoV2Method = "ProtoReflect" + ok := methodIsExists(info, expr, protoV2Method) + if ok { + return true + } + + // Afterwards, we are checking the ProtoMessage method. All the structures that implement the proto.Message interface + // have a ProtoMessage method and are proto-structures. This interface has been generated since version 1.0.0 and + // continues to exist for compatibility. + // https://pkg.go.dev/github.com/golang/protobuf/proto?utm_source=godoc#Message + const protoV1Method = "ProtoMessage" + ok = methodIsExists(info, expr, protoV1Method) + if ok { + // Since there is a protoc-gen-gogo generator that implements the proto.Message interface, but may not generate + // getters or generate from without checking for nil, so even if getters exist, we skip them. + const protocGenGoGoMethod = "MarshalToSizedBuffer" + return !methodIsExists(info, expr, protocGenGoGoMethod) + } + + return false +} + +func typesNamed(info *types.Info, x ast.Expr) (*types.Named, bool) { + if info == nil { + return nil, false + } + + t := info.TypeOf(x) + if t == nil { + return nil, false + } + + ptr, ok := t.Underlying().(*types.Pointer) + if ok { + t = ptr.Elem() + } + + named, ok := t.(*types.Named) + if !ok { + return nil, false + } + + return named, true +} + +func methodIsExists(info *types.Info, x ast.Expr, name string) bool { + named, ok := typesNamed(info, x) + if !ok { + return false + } + + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i).Name() == name { + return true + } + } + + return false +} + +func getterResultHasPointer(info *types.Info, x ast.Expr, name string) (hasPointer, ok bool) { + named, ok := typesNamed(info, x) + if !ok { + return false, false + } + + for i := 0; i < named.NumMethods(); i++ { + method := named.Method(i) + if method.Name() != "Get"+name { + continue + } + + var sig *types.Signature + sig, ok = method.Type().(*types.Signature) + if !ok { + return false, false + } + + results := sig.Results() + if results.Len() == 0 { + return false, false + } + + firstType := results.At(0) + _, ok = firstType.Type().(*types.Pointer) + if !ok { + return false, true + } + + return true, true + } + + return false, false +} diff --git a/vendor/github.com/ghostiam/protogetter/protogetter.go b/vendor/github.com/ghostiam/protogetter/protogetter.go new file mode 100644 index 0000000000..31eee8572a --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/protogetter.go @@ -0,0 +1,279 @@ +package protogetter + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/token" + "log" + "path/filepath" + "strings" + + "github.com/gobwas/glob" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +type Mode int + +const ( + StandaloneMode Mode = iota + GolangciLintMode +) + +const msgFormat = "avoid direct access to proto field %s, use %s instead" + +func NewAnalyzer(cfg *Config) *analysis.Analyzer { + if cfg == nil { + cfg = &Config{} + } + + return &analysis.Analyzer{ + Name: "protogetter", + Doc: "Reports direct reads from proto message fields when getters should be used", + Flags: flags(cfg), + Run: func(pass *analysis.Pass) (any, error) { + _, err := Run(pass, cfg) + return nil, err + }, + } +} + +func flags(opts *Config) flag.FlagSet { + fs := flag.NewFlagSet("protogetter", flag.ContinueOnError) + + fs.Func("skip-generated-by", "skip files generated with the given prefixes", func(s string) error { + for _, prefix := range strings.Split(s, ",") { + opts.SkipGeneratedBy = append(opts.SkipGeneratedBy, prefix) + } + return nil + }) + fs.Func("skip-files", "skip files with the given glob patterns", func(s string) error { + for _, pattern := range strings.Split(s, ",") { + opts.SkipFiles = append(opts.SkipFiles, pattern) + } + return nil + }) + fs.BoolVar(&opts.SkipAnyGenerated, "skip-any-generated", false, "skip any generated files") + + return *fs +} + +type Config struct { + Mode Mode // Zero value is StandaloneMode. + SkipGeneratedBy []string + SkipFiles []string + SkipAnyGenerated bool + ReplaceFirstArgInAppend bool +} + +func Run(pass *analysis.Pass, cfg *Config) ([]Issue, error) { + skipGeneratedBy := make([]string, 0, len(cfg.SkipGeneratedBy)+3) + // Always skip files generated by protoc-gen-go, protoc-gen-go-grpc and protoc-gen-grpc-gateway. + skipGeneratedBy = append(skipGeneratedBy, "protoc-gen-go", "protoc-gen-go-grpc", "protoc-gen-grpc-gateway") + for _, s := range cfg.SkipGeneratedBy { + s = strings.TrimSpace(s) + if s == "" { + continue + } + skipGeneratedBy = append(skipGeneratedBy, s) + } + + skipFilesGlobPatterns := make([]glob.Glob, 0, len(cfg.SkipFiles)) + for _, s := range cfg.SkipFiles { + s = strings.TrimSpace(s) + if s == "" { + continue + } + + compile, err := glob.Compile(s) + if err != nil { + return nil, fmt.Errorf("invalid glob pattern: %w", err) + } + + skipFilesGlobPatterns = append(skipFilesGlobPatterns, compile) + } + + nodeTypes := []ast.Node{ + (*ast.AssignStmt)(nil), + (*ast.BinaryExpr)(nil), + (*ast.CallExpr)(nil), + (*ast.SelectorExpr)(nil), + (*ast.StarExpr)(nil), + (*ast.IncDecStmt)(nil), + (*ast.UnaryExpr)(nil), + } + + // Skip filtered files. + var files []*ast.File + for _, f := range pass.Files { + if skipGeneratedFile(f, skipGeneratedBy, cfg.SkipAnyGenerated) { + continue + } + + if skipFilesByGlob(pass.Fset.File(f.Pos()).Name(), skipFilesGlobPatterns) { + continue + } + + files = append(files, f) + + // ast.Print(pass.Fset, f) + } + + ins := inspector.New(files) + + var issues []Issue + + filter := NewPosFilter() + ins.Preorder(nodeTypes, func(node ast.Node) { + report := analyse(pass, filter, node, cfg) + if report == nil { + return + } + + switch cfg.Mode { + case StandaloneMode: + pass.Report(report.ToDiagReport()) + case GolangciLintMode: + issues = append(issues, report.ToIssue(pass.Fset)) + } + }) + + return issues, nil +} + +func analyse(pass *analysis.Pass, filter *PosFilter, n ast.Node, cfg *Config) *Report { + // fmt.Printf("\n>>> check: %s\n", formatNode(n)) + // ast.Print(pass.Fset, n) + if filter.IsFiltered(n.Pos()) { + // fmt.Printf(">>> filtered\n") + return nil + } + + result, err := Process(pass.TypesInfo, filter, n, cfg) + if err != nil { + pass.Report(analysis.Diagnostic{ + Pos: n.Pos(), + End: n.End(), + Message: fmt.Sprintf("error: %v", err), + }) + + return nil + } + + // If existing in filter, skip it. + if filter.IsFiltered(n.Pos()) { + return nil + } + + if result.Skipped() { + return nil + } + + // If the expression has already been replaced, skip it. + if filter.IsAlreadyReplaced(pass.Fset, n.Pos(), n.End()) { + return nil + } + // Add the expression to the filter. + filter.AddAlreadyReplaced(pass.Fset, n.Pos(), n.End()) + + return &Report{ + node: n, + result: result, + } +} + +// Issue is used to integrate with golangci-lint's inline auto fix. +type Issue struct { + Pos token.Position + Message string + InlineFix InlineFix +} + +type InlineFix struct { + StartCol int // zero-based + Length int + NewString string +} + +type Report struct { + node ast.Node + result *Result +} + +func (r *Report) ToDiagReport() analysis.Diagnostic { + msg := fmt.Sprintf(msgFormat, r.result.From, r.result.To) + + return analysis.Diagnostic{ + Pos: r.node.Pos(), + End: r.node.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: r.node.Pos(), + End: r.node.End(), + NewText: []byte(r.result.To), + }, + }, + }, + }, + } +} + +func (r *Report) ToIssue(fset *token.FileSet) Issue { + msg := fmt.Sprintf(msgFormat, r.result.From, r.result.To) + return Issue{ + Pos: fset.Position(r.node.Pos()), + Message: msg, + InlineFix: InlineFix{ + StartCol: fset.Position(r.node.Pos()).Column - 1, + Length: len(r.result.From), + NewString: r.result.To, + }, + } +} + +func skipGeneratedFile(f *ast.File, prefixes []string, skipAny bool) bool { + if len(f.Comments) == 0 { + return false + } + firstComment := f.Comments[0].Text() + + // https://golang.org/s/generatedcode + if skipAny && strings.HasPrefix(firstComment, "Code generated") { + return true + } + + for _, prefix := range prefixes { + if strings.HasPrefix(firstComment, "Code generated by "+prefix) { + return true + } + } + + return false +} + +func skipFilesByGlob(filename string, patterns []glob.Glob) bool { + for _, pattern := range patterns { + if pattern.Match(filename) || pattern.Match(filepath.Base(filename)) { + return true + } + } + + return false +} + +func formatNode(node ast.Node) string { + buf := new(bytes.Buffer) + if err := format.Node(buf, token.NewFileSet(), node); err != nil { + log.Printf("Error formatting expression: %v", err) + return "" + } + + return buf.String() +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go index a9324dd02e..2a67dccec8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go @@ -6,7 +6,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" "golang.org/x/tools/go/ast/astutil" @@ -15,7 +16,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "appendAssign" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious append result assignments" info.Before = ` p.positives = append(p.negatives, x) diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go index 3c81449e9c..81a7aa30b3 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "appendCombine" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Summary = "Detects `append` chains to the same slice that can be done in a single `append` call" info.Before = ` xs = append(xs, 1) diff --git a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go index 149f0ac88a..9be45ccc78 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" @@ -18,7 +19,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "badCond" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious condition expressions" info.Before = ` for i := 0; i > n; i++ { @@ -114,30 +115,43 @@ func (c *badCondChecker) checkForStmt(stmt *ast.ForStmt) { iter := astcast.ToIdent(init.Lhs[0]) cond := astcast.ToBinaryExpr(stmt.Cond) - if cond.Op != token.GTR || !astequal.Expr(iter, cond.X) { + + var i, n ast.Expr + var op token.Token + switch { + case cond.Op == token.GTR && astequal.Expr(iter, cond.X): + i = cond.X + n = cond.Y + op = token.LSS + case cond.Op == token.LSS && astequal.Expr(iter, cond.Y): + i = cond.Y + n = cond.X + op = token.GTR + default: return } - if !typep.SideEffectFree(c.ctx.TypesInfo, cond.Y) { + + if !typep.SideEffectFree(c.ctx.TypesInfo, n) { return } post := astcast.ToIncDecStmt(stmt.Post) - if post.Tok != token.INC || !astequal.Expr(iter, post.X) { + if post.Tok != token.INC || !astequal.Expr(iter, i) { return } - mutated := lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, cond.Y) || + mutated := lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, n) || lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, iter) if mutated { return } - c.warnForStmt(stmt, cond) + c.warnForStmt(stmt, op, cond) } -func (c *badCondChecker) warnForStmt(cause ast.Node, cond *ast.BinaryExpr) { +func (c *badCondChecker) warnForStmt(cause ast.Node, op token.Token, cond *ast.BinaryExpr) { suggest := astcopy.BinaryExpr(cond) - suggest.Op = token.LSS + suggest.Op = op c.ctx.Warn(cause, "`%s` in loop; probably meant `%s`?", cond, suggest) } diff --git a/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go index e0d4b7487f..6c6845053d 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go @@ -9,14 +9,15 @@ import ( "unicode/utf8" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/quasilyte/regex/syntax" ) func init() { var info linter.CheckerInfo info.Name = "badRegexp" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious regexp patterns" info.Before = "regexp.MustCompile(`(?:^aa|bb|cc)foo[aba]`)" info.After = "regexp.MustCompile(`^(?:aa|bb|cc)foo[ab]`)" @@ -365,7 +366,7 @@ func (c *badRegexpChecker) checkCharClassDups(cc syntax.Expr) { } // 2. Sort ranges, O(nlogn). - sort.Slice(ranges, func(i, j int) bool { + sort.SliceStable(ranges, func(i, j int) bool { return ranges[i].low < ranges[j].low }) diff --git a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go index b4000a8ce7..a1c69cb7ab 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go @@ -5,22 +5,22 @@ import ( "go/token" "strconv" + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" "github.com/go-toolsmith/typep" "golang.org/x/tools/go/ast/astutil" - - "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" ) func init() { var info linter.CheckerInfo info.Name = "boolExprSimplify" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects bool expressions that can be simplified" info.Before = ` a := !(elapsed >= expectElapsedMin) diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go index 94d51a996a..d8be10ce9c 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "builtinShadowDecl" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects top-level declarations that shadow the predeclared identifiers" info.Before = `type int struct {}` info.After = `type myInt struct {}` diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go index 1e1661deb2..0b4b7bafb8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "builtinShadow" - info.Tags = []string{"style", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} info.Summary = "Detects when predeclared identifiers are shadowed in assignments" info.Before = `len := 10` info.After = `length := 10` diff --git a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go index d9b4b7e75d..b31a6f7fd3 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "captLocal" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Params = linter.CheckerParams{ "paramsOnly": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go index 047ea4fee0..306756834b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "caseOrder" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects erroneous case order inside switch statements" info.Before = ` switch x.(type) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/checkers.go b/vendor/github.com/go-critic/go-critic/checkers/checkers.go index 0c2ebc00ca..5797dafdf4 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/checkers.go +++ b/vendor/github.com/go-critic/go-critic/checkers/checkers.go @@ -4,7 +4,7 @@ package checkers import ( "os" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) var collection = &linter.CheckerCollection{ diff --git a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go index 52a72d28c8..6eeb0bb5db 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go @@ -6,13 +6,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "codegenComment" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects malformed 'code generated' file comments" info.Before = `// This file was automatically generated by foogen` info.After = `// Code generated by foogen. DO NOT EDIT.` diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go index f330b723a0..b834158eca 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go @@ -8,13 +8,13 @@ import ( "unicode/utf8" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "commentFormatting" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects comments with non-idiomatic formatting" info.Before = `//This is a comment` info.After = `// This is a comment` @@ -27,12 +27,16 @@ func init() { "//nolint", } parts := []string{ - "//go:generate ", // e.g.: go:generate value - "//line /", // e.g.: line /path/to/file:123 - "//nolint ", // e.g.: nolint - "//noinspection ", // e.g.: noinspection ALL, some GoLand and friends versions - "//export ", // e.g.: export Foo - "///", // e.g.: vertical breaker ///////////// + "//go:generate ", // e.g.: go:generate value + "//line /", // e.g.: line /path/to/file:123 + "//nolint ", // e.g.: nolint + "//noinspection ", // e.g.: noinspection ALL, some GoLand and friends versions + "//region", // e.g.: region awawa, used by GoLand and friends for custom folding + "//endregion", // e.g.: endregion awawa or endregion, closes GoLand regions + "// or , used by VSCode for custom folding + "//", // e.g.: , closes VSCode regions + "//export ", // e.g.: export Foo + "///", // e.g.: vertical breaker ///////////// "//+", "//#", "//-", diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go index 554e0621fd..8595b79515 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go @@ -6,17 +6,25 @@ import ( "go/token" "regexp" "strings" + "unicode/utf8" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/strparse" ) func init() { var info linter.CheckerInfo info.Name = "commentedOutCode" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects commented-out code inside function bodies" + info.Params = linter.CheckerParams{ + "minLength": { + Value: 15, + Usage: "min length of the comment that triggers a warning", + }, + } info.Before = ` // fmt.Println("Debugging hard") foo(1, 2)` @@ -26,6 +34,7 @@ foo(1, 2)` return astwalk.WalkerForLocalComment(&commentedOutCodeChecker{ ctx: ctx, notQuiteFuncCall: regexp.MustCompile(`\w+\s+\([^)]*\)\s*$`), + minLength: info.Params.Int("minLength"), }), nil }) } @@ -36,6 +45,7 @@ type commentedOutCodeChecker struct { fn *ast.FuncDecl notQuiteFuncCall *regexp.Regexp + minLength int } func (c *commentedOutCodeChecker) EnterFunc(fn *ast.FuncDecl) bool { @@ -68,7 +78,7 @@ func (c *commentedOutCodeChecker) VisitLocalComment(cg *ast.CommentGroup) { // Some very short comment that can be skipped. // Usually triggering on these results in false positive. // Unless there is a very popular call like print/println. - cond := len(s) < len("quite too short") && + cond := utf8.RuneCountInString(s) < c.minLength && !strings.Contains(s, "print") && !strings.Contains(s, "fmt.") && !strings.Contains(s, "log.") diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go index 3c086569b1..e0855da812 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go @@ -6,13 +6,13 @@ import ( "regexp" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "commentedOutImport" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects commented-out imports" info.Before = ` import ( diff --git a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go index e06944d624..cdebaef987 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "defaultCaseOrder" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects when default case in switch isn't on 1st or last position" info.Before = ` switch { diff --git a/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go index da90fe67a2..37c80c864a 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "deferInLoop" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects loops inside functions that use defer" info.Before = ` for _, filename := range []string{"foo", "bar"} { diff --git a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go index 0eb5072375..c61d773da6 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go @@ -5,13 +5,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "deprecatedComment" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects malformed 'deprecated' doc-comments" info.Before = ` // deprecated, use FuncNew instead diff --git a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go index d8aaaf7437..aa23de42c4 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go @@ -7,13 +7,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "docStub" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects comments that silence go lint complaints about doc-comment" info.Before = ` // Foo ... diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go index 83de505280..c4f0183878 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" ) func init() { var info linter.CheckerInfo info.Name = "dupBranchBody" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects duplicated branch bodies inside conditional statements" info.Before = ` if cond { @@ -54,5 +55,5 @@ func (c *dupBranchBodyChecker) checkIf(stmt *ast.IfStmt) { } func (c *dupBranchBodyChecker) warnIf(cause ast.Node) { - c.ctx.Warn(cause, "both branches in if statement has same body") + c.ctx.Warn(cause, "both branches in if statement have same body") } diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go index a565007601..381bad68b8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go @@ -5,13 +5,13 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "dupCase" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects duplicated case clauses inside switch or select statements" info.Before = ` switch x { diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go index 54658eb9f4..ed674eb85c 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go @@ -4,18 +4,18 @@ import ( "fmt" "go/ast" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "dupImport" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects multiple imports of the same package under different aliases" info.Before = ` import ( "fmt" - priting "fmt" // Imported the second time + printing "fmt" // Imported the second time )` info.After = ` import( diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go index 00f8fd0eb5..9ab75945cd 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go @@ -6,7 +6,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" ) @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "dupSubExpr" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious duplicated sub-expressions" info.Before = ` sort.Slice(xs, func(i, j int) bool { diff --git a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go index dcc964846f..857d09fa0e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astp" ) func init() { var info linter.CheckerInfo info.Name = "elseif" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Params = linter.CheckerParams{ "skipBalanced": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go b/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go index b17178e09a..ad507425e6 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go +++ b/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go @@ -7,10 +7,10 @@ import ( "go/token" "os" - "github.com/quasilyte/go-ruleguard/ruleguard" - "github.com/go-critic/go-critic/checkers/rulesdata" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + + "github.com/quasilyte/go-ruleguard/ruleguard" ) //go:generate go run ./rules/precompile.go -rules ./rules/rules.go -o ./rulesdata/rulesdata.go @@ -101,6 +101,7 @@ func (c *embeddedRuleguardChecker) WalkFile(f *ast.File) { Pkg: c.ctx.Pkg, Types: c.ctx.TypesInfo, Sizes: c.ctx.SizesInfo, + GoVersion: ruleguard.GoVersion(c.ctx.GoVersion), Fset: c.ctx.FileSet, TruncateLen: 100, }) diff --git a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go index ebb8dad455..a008c61870 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go @@ -5,13 +5,13 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "emptyFallthrough" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects fallthrough that can be avoided by using multi case values" info.Before = `switch kind { case reflect.Int: diff --git a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go index 6ba07fe869..f8c5ae5423 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "evalOrder" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects unwanted dependencies on the evaluation order" info.Before = `return x, f(&x)` info.After = ` diff --git a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go index 63e0049f2c..9889f48e8e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go @@ -4,7 +4,8 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astfmt" "github.com/go-toolsmith/astp" "golang.org/x/tools/go/ast/astutil" @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "exitAfterDefer" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects calls to exit/fatal inside functions that use defer" info.Before = ` defer os.Remove(filename) diff --git a/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go index 698f5366d6..17ab0ea83f 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go @@ -5,14 +5,15 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "filepathJoin" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects problems in filepath.Join() function calls" info.Before = `filepath.Join("dir/", filename)` info.After = `filepath.Join("dir", filename)` diff --git a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go index 7f6ce3c01f..7010668608 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go @@ -7,14 +7,15 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "flagName" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious flag names" info.Before = `b := flag.Bool(" foo ", false, "description")` info.After = `b := flag.Bool("foo", false, "description")` @@ -63,7 +64,7 @@ func (c *flagNameChecker) checkFlagName(call *ast.CallExpr, arg ast.Expr) { case name == "": c.warnEmpty(call) case strings.HasPrefix(name, "-"): - c.warnHypenPrefix(call, name) + c.warnHyphenPrefix(call, name) case strings.Contains(name, "="): c.warnEq(call, name) case strings.Contains(name, " "): @@ -75,8 +76,8 @@ func (c *flagNameChecker) warnEmpty(cause ast.Node) { c.ctx.Warn(cause, "empty flag name") } -func (c *flagNameChecker) warnHypenPrefix(cause ast.Node, name string) { - c.ctx.Warn(cause, "flag name %q should not start with a hypen", name) +func (c *flagNameChecker) warnHyphenPrefix(cause ast.Node, name string) { + c.ctx.Warn(cause, "flag name %q should not start with a hyphen", name) } func (c *flagNameChecker) warnEq(cause ast.Node, name string) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go index ae61a1125e..7301bd325a 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go @@ -6,14 +6,15 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "hexLiteral" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects hex literals that have mixed case letter digits" info.Before = ` x := 0X12 diff --git a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go index 910be180b2..7b7a3c538b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" - "golang.org/x/exp/typeparams" + "github.com/go-critic/go-critic/linter" + + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "hugeParam" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Params = linter.CheckerParams{ "sizeThreshold": { Value: 80, @@ -40,19 +41,34 @@ type hugeParamChecker struct { func (c *hugeParamChecker) VisitFuncDecl(decl *ast.FuncDecl) { // TODO(quasilyte): maybe it's worthwhile to permit skipping // test files for this checker? + if c.isImplementStringer(decl) { + return + } + if decl.Recv != nil { c.checkParams(decl.Recv.List) } c.checkParams(decl.Type.Params.List) } +// isImplementStringer check method signature is: String() string. +func (*hugeParamChecker) isImplementStringer(decl *ast.FuncDecl) bool { + if decl.Recv != nil && + decl.Name.Name == "String" && + decl.Type != nil && + len(decl.Type.Params.List) == 0 && + len(decl.Type.Results.List) == 1 && + astcast.ToIdent(decl.Type.Results.List[0].Type).Name == "string" { + return true + } + + return false +} + func (c *hugeParamChecker) checkParams(params []*ast.Field) { for _, p := range params { for _, id := range p.Names { typ := c.ctx.TypeOf(id) - if _, ok := typ.(*typeparams.TypeParam); ok { - continue - } size, ok := c.ctx.SizeOf(typ) if ok && size >= c.sizeThreshold { c.warn(id, size) diff --git a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go index b1fcf41472..e73c609d5c 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go @@ -4,13 +4,19 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "ifElseChain" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} + info.Params = linter.CheckerParams{ + "minThreshold": { + Value: 2, + Usage: "min number of if-else blocks that makes the warning trigger", + }, + } info.Summary = "Detects repeated if-else statements and suggests to replace them with switch statement" info.Before = ` if cond1 { @@ -35,7 +41,10 @@ will trigger suggestion to use switch statement. See [EffectiveGo#switch](https://golang.org/doc/effective_go.html#switch).` collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { - return astwalk.WalkerForStmt(&ifElseChainChecker{ctx: ctx}), nil + return astwalk.WalkerForStmt(&ifElseChainChecker{ + ctx: ctx, + minThreshold: info.Params.Int("minThreshold"), + }), nil }) } @@ -45,6 +54,8 @@ type ifElseChainChecker struct { cause *ast.IfStmt visited map[*ast.IfStmt]bool + + minThreshold int } func (c *ifElseChainChecker) EnterFunc(fn *ast.FuncDecl) bool { @@ -66,8 +77,7 @@ func (c *ifElseChainChecker) VisitStmt(stmt ast.Stmt) { } func (c *ifElseChainChecker) checkIfStmt(stmt *ast.IfStmt) { - const minThreshold = 2 - if c.countIfelseLen(stmt) >= minThreshold { + if c.countIfelseLen(stmt) >= c.minThreshold { c.warn() } } @@ -75,11 +85,12 @@ func (c *ifElseChainChecker) checkIfStmt(stmt *ast.IfStmt) { func (c *ifElseChainChecker) countIfelseLen(stmt *ast.IfStmt) int { count := 0 for { + if stmt.Init != nil { + return 0 // Give up + } + switch e := stmt.Else.(type) { case *ast.IfStmt: - if e.Init != nil { - return 0 // Give up - } // Else if. stmt = e count++ diff --git a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go index 5ac711fc1e..b690487b7b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "importShadow" - info.Tags = []string{"style", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} info.Summary = "Detects when imported package names shadowed in the assignments" info.Before = ` // "path/filepath" is imported. diff --git a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go index a1b6b2a8a8..8612717b27 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astp" ) func init() { var info linter.CheckerInfo info.Name = "initClause" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects non-assignment statements inside if/switch init clause" info.Before = `if sideEffect(); cond { }` diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go index 47de589a46..0c9c14955e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go @@ -7,9 +7,9 @@ import ( // LocalDefVisitor visits every name definitions inside a function. // // Next elements are considered as name definitions: -// - Function parameters (input, output, receiver) -// - Every LHS of ":=" assignment that defines a new name -// - Every local var/const declaration. +// - Function parameters (input, output, receiver) +// - Every LHS of ":=" assignment that defines a new name +// - Every local var/const declaration. // // NOTE: this visitor is experimental. // This is also why it lives in a separate file. @@ -18,20 +18,18 @@ type LocalDefVisitor interface { VisitLocalDef(Name, ast.Expr) } -type ( - // NameKind describes what kind of name Name object holds. - NameKind int +// NameKind describes what kind of name Name object holds. +type NameKind int - // Name holds ver/const/param definition symbol info. - Name struct { - ID *ast.Ident - Kind NameKind +// Name holds ver/const/param definition symbol info. +type Name struct { + ID *ast.Ident + Kind NameKind - // Index is NameVar-specific field that is used to - // specify nth tuple element being assigned to the name. - Index int - } -) + // Index is NameVar-specific field that is used to + // specify nth tuple element being assigned to the name. + Index int +} // NOTE: set of name kinds is not stable and may change over time. // diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go index e5031a909f..3486a8e622 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go @@ -4,67 +4,64 @@ import ( "go/ast" ) -// Visitor interfaces. -type ( - // DocCommentVisitor visits every doc-comment. - // Does not visit doc-comments for function-local definitions (types, etc). - // Also does not visit package doc-comment (file-level doc-comments). - DocCommentVisitor interface { - VisitDocComment(*ast.CommentGroup) - } +// DocCommentVisitor visits every doc-comment. +// Does not visit doc-comments for function-local definitions (types, etc). +// Also does not visit package doc-comment (file-level doc-comments). +type DocCommentVisitor interface { + VisitDocComment(*ast.CommentGroup) +} - // FuncDeclVisitor visits every top-level function declaration. - FuncDeclVisitor interface { - walkerEvents - VisitFuncDecl(*ast.FuncDecl) - } +// FuncDeclVisitor visits every top-level function declaration. +type FuncDeclVisitor interface { + walkerEvents + VisitFuncDecl(*ast.FuncDecl) +} - // ExprVisitor visits every expression inside AST file. - ExprVisitor interface { - walkerEvents - VisitExpr(ast.Expr) - } +// ExprVisitor visits every expression inside AST file. +type ExprVisitor interface { + walkerEvents + VisitExpr(ast.Expr) +} - // LocalExprVisitor visits every expression inside function body. - LocalExprVisitor interface { - walkerEvents - VisitLocalExpr(ast.Expr) - } +// LocalExprVisitor visits every expression inside function body. +type LocalExprVisitor interface { + walkerEvents + VisitLocalExpr(ast.Expr) +} - // StmtListVisitor visits every statement list inside function body. - // This includes block statement bodies as well as implicit blocks - // introduced by case clauses and alike. - StmtListVisitor interface { - walkerEvents - VisitStmtList(ast.Node, []ast.Stmt) - } +// StmtListVisitor visits every statement list inside function body. +// This includes block statement bodies as well as implicit blocks +// introduced by case clauses and alike. +type StmtListVisitor interface { + walkerEvents + VisitStmtList(ast.Node, []ast.Stmt) +} - // StmtVisitor visits every statement inside function body. - StmtVisitor interface { - walkerEvents - VisitStmt(ast.Stmt) - } +// StmtVisitor visits every statement inside function body. +type StmtVisitor interface { + walkerEvents + VisitStmt(ast.Stmt) +} - // TypeExprVisitor visits every type describing expression. - // It also traverses struct types and interface types to run - // checker over their fields/method signatures. - TypeExprVisitor interface { - walkerEvents - VisitTypeExpr(ast.Expr) - } +// TypeExprVisitor visits every type describing expression. +// It also traverses struct types and interface types to run +// checker over their fields/method signatures. +type TypeExprVisitor interface { + walkerEvents + VisitTypeExpr(ast.Expr) +} - // LocalCommentVisitor visits every comment inside function body. - LocalCommentVisitor interface { - walkerEvents - VisitLocalComment(*ast.CommentGroup) - } +// LocalCommentVisitor visits every comment inside function body. +type LocalCommentVisitor interface { + walkerEvents + VisitLocalComment(*ast.CommentGroup) +} - // CommentVisitor visits every comment. - CommentVisitor interface { - walkerEvents - VisitComment(*ast.CommentGroup) - } -) +// CommentVisitor visits every comment. +type CommentVisitor interface { + walkerEvents + VisitComment(*ast.CommentGroup) +} // walkerEvents describes common hooks available for most visitor types. type walkerEvents interface { diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go index cd5e1c9793..f838a64c15 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go @@ -3,7 +3,7 @@ package astwalk import ( "go/types" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) // WalkerForFuncDecl returns file walker implementation for FuncDeclVisitor. diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go index 63d181e5eb..f64907d69b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go @@ -18,7 +18,7 @@ import ( // // If proven really useful, can be moved to go-toolsmith library. -// IsImmutable reports whether n can be midified through any operation. +// IsImmutable reports whether n can be modified through any operation. func IsImmutable(info *types.Info, n ast.Expr) bool { if astp.IsBasicLit(n) { return true diff --git a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go index 64c2821dd1..2885dc7254 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astp" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "mapKey" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious map literal keys" info.Before = ` _ = map[string]int{ @@ -116,7 +117,7 @@ func (c *mapKeyChecker) checkWhitespace(lit *ast.CompositeLit) { } func (c *mapKeyChecker) warnWhitespace(key ast.Node) { - c.ctx.Warn(key, "suspucious whitespace in %s key", key) + c.ctx.Warn(key, "suspicious whitespace in %s key", key) } func (c *mapKeyChecker) warnDupKey(key ast.Node) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go index 2553def14f..755d3b4722 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/typep" @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "methodExprCall" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects method expression call that can be replaced with a method call" info.Before = `f := foo{} foo.bar(f)` diff --git a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go index a68acecca5..dfe73018c8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "nestingReduce" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "bodyWidth": { Value: 5, diff --git a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go index 7e564b70f9..1a1b05e0df 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go @@ -2,10 +2,12 @@ package checkers import ( "go/ast" + "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "golang.org/x/tools/go/ast/astutil" ) @@ -13,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "newDeref" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects immediate dereferencing of `new` expressions" info.Before = `x := *new(bool)` info.After = `x := false` @@ -33,6 +35,10 @@ func (c *newDerefChecker) VisitExpr(expr ast.Expr) { call := astcast.ToCallExpr(deref.X) if astcast.ToIdent(call.Fun).Name == "new" { typ := c.ctx.TypeOf(call.Args[0]) + // allow *new(T) if T is a type parameter, see #1272 for details + if _, ok := typ.(*types.TypeParam); ok { + return + } zv := lintutil.ZeroValueOf(astutil.Unparen(call.Args[0]), typ) if zv != nil { c.warn(expr, zv) diff --git a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go index 0a8e793eea..9a1213f5c2 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "nilValReturn" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects return statements those results evaluate to nil" info.Before = ` if err == nil { diff --git a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go index bed227ac3d..a25fac85cc 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go @@ -7,14 +7,15 @@ import ( "unicode" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "octalLiteral" - info.Tags = []string{"style", "experimental", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag, linter.OpinionatedTag} info.Summary = "Detects old-style octal literals" info.Before = `foo(02)` info.After = `foo(0o2)` @@ -30,6 +31,9 @@ type octalLiteralChecker struct { } func (c *octalLiteralChecker) VisitExpr(expr ast.Expr) { + if !c.ctx.GoVersion.GreaterOrEqual(linter.GoVersion{Major: 1, Minor: 13}) { + return + } lit := astcast.ToBasicLit(expr) if lit.Kind != token.INT { return diff --git a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go index c80e6f8bcd..c777fec9e6 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go @@ -4,7 +4,8 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" ) @@ -12,7 +13,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "paramTypeCombine" - info.Tags = []string{"style", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} info.Summary = "Detects if function parameters could be combined by type and suggest the way to do it" info.Before = `func foo(a, b int, c, d int, e, f int, g int) {}` info.After = `func foo(a, b, c, d, e, f, g int) {}` @@ -46,6 +47,7 @@ func (c *paramTypeCombineChecker) optimizeFuncType(f *ast.FuncType) *ast.FuncTyp return optimizedParamFunc } + func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.FieldList { // To avoid false positives, skip unnamed param lists. // @@ -71,8 +73,7 @@ func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.Fie names = make([]*ast.Ident, len(p.Names)) copy(names, p.Names) if astequal.Expr(p.Type, params.List[i].Type) { - list[len(list)-1].Names = - append(list[len(list)-1].Names, names...) + list[len(list)-1].Names = append(list[len(list)-1].Names, names...) } else { list = append(list, &ast.Field{ Names: names, diff --git a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go index 88c8f4cb36..172a4acb58 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "ptrToRefParam" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects input and output parameters that have a type of pointer to referential type" info.Before = `func f(m *map[string]int) (*chan *int)` info.After = `func f(m map[string]int) (chan *int)` diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go index 813fff36a4..3f61ee0bda 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "rangeExprCopy" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Params = linter.CheckerParams{ "sizeThreshold": { Value: 512, diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go index eafc549d60..6d15c30cd1 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go @@ -4,14 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" - "golang.org/x/exp/typeparams" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "rangeValCopy" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Params = linter.CheckerParams{ "sizeThreshold": { Value: 128, @@ -66,10 +65,8 @@ func (c *rangeValCopyChecker) VisitStmt(stmt ast.Stmt) { if typ == nil { return } - if _, ok := typ.(*typeparams.TypeParam); ok { - return - } - if size, ok := c.ctx.SizeOf(typ); ok && size >= c.sizeThreshold { + size, ok := c.ctx.SizeOf(typ) + if ok && size >= c.sizeThreshold { c.warn(rng, size) } } diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go index 31dc4aad3e..45aba261ba 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go @@ -7,13 +7,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "regexpPattern" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious regexp patterns" info.Before = "regexp.MustCompile(`google.com|yandex.ru`)" info.After = "regexp.MustCompile(`google\\.com|yandex\\.ru`)" diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go index 5b15e05ed2..f500f43500 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go @@ -8,16 +8,16 @@ import ( "strings" "unicode/utf8" - "github.com/quasilyte/regex/syntax" - "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + + "github.com/quasilyte/regex/syntax" ) func init() { var info linter.CheckerInfo info.Name = "regexpSimplify" - info.Tags = []string{"style", "experimental", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag, linter.OpinionatedTag} info.Summary = "Detects regexp patterns that can be simplified" info.Before = "regexp.MustCompile(`(?:a|b|c) [a-z][a-z]*`)" info.After = "regexp.MustCompile(`[abc] {3}[a-z]+`)" diff --git a/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go index 35c6a64490..29723a69a9 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go @@ -12,15 +12,15 @@ import ( "sort" "strings" - "github.com/quasilyte/go-ruleguard/ruleguard" + "github.com/go-critic/go-critic/linter" - "github.com/go-critic/go-critic/framework/linter" + "github.com/quasilyte/go-ruleguard/ruleguard" ) func init() { var info linter.CheckerInfo info.Name = "ruleguard" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "rules": { Value: "", @@ -84,7 +84,7 @@ func newErrorHandler(failOnErrorFlag string) (*parseErrorHandler, error) { h := parseErrorHandler{ failureConditions: make(map[string]func(err error) bool), } - var failOnErrorPredicates = map[string]func(error) bool{ + failOnErrorPredicates := map[string]func(error) bool{ "dsl": func(err error) bool { var e *ruleguard.ImportError; return !errors.As(err, &e) }, "import": func(err error) bool { var e *ruleguard.ImportError; return errors.As(err, &e) }, "all": func(err error) bool { return true }, @@ -160,8 +160,8 @@ func newRuleguardChecker(info *linter.CheckerInfo, ctx *linter.CheckerContext) ( } } - if !enabledTags["experimental"] { - disabledTags["experimental"] = true + if !enabledTags[linter.ExperimentalTag] { + disabledTags[linter.ExperimentalTag] = true } ruleguardDebug := os.Getenv("GOCRITIC_RULEGUARD_DEBUG") != "" @@ -274,7 +274,7 @@ func (c *ruleguardChecker) WalkFile(f *ast.File) { func runRuleguardEngine(ctx *linter.CheckerContext, f *ast.File, e *ruleguard.Engine, runCtx *ruleguard.RunContext) { type ruleguardReport struct { - node ast.Node + pos token.Pos message string fix linter.QuickFix } @@ -284,7 +284,7 @@ func runRuleguardEngine(ctx *linter.CheckerContext, f *ast.File, e *ruleguard.En // TODO(quasilyte): investigate whether we should add a rule name as // a message prefix here. r := ruleguardReport{ - node: data.Node, + pos: data.Node.Pos(), message: data.Message, } fix := data.Suggestion @@ -310,9 +310,9 @@ func runRuleguardEngine(ctx *linter.CheckerContext, f *ast.File, e *ruleguard.En }) for _, report := range reports { if report.fix.Replacement != nil { - ctx.WarnFixable(report.node, report.fix, "%s", report.message) + ctx.WarnFixableWithPos(report.pos, report.fix, "%s", report.message) } else { - ctx.Warn(report.node, "%s", report.message) + ctx.WarnWithPos(report.pos, "%s", report.message) } } } diff --git a/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go b/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go index b5dc58236c..4ab31076fc 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go +++ b/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go @@ -61,37 +61,20 @@ var PrecompiledRules = &ir.File{ {Line: 17, Value: "fmt.Sprintf(\"%s\", $x)"}, {Line: 17, Value: "fmt.Sprintf(\"%v\", $x)"}, }, - ReportTemplate: "use $x.Error() instead", - SuggestTemplate: "$x.Error()", - WhereExpr: ir.FilterExpr{ - Line: 18, - Op: ir.FilterVarTypeImplementsOp, - Src: "m[\"x\"].Type.Implements(`error`)", - Value: "x", - Args: []ir.FilterExpr{{Line: 18, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, - }, - }, - { - Line: 22, - SyntaxPatterns: []ir.PatternString{ - {Line: 22, Value: "fmt.Sprint($x)"}, - {Line: 22, Value: "fmt.Sprintf(\"%s\", $x)"}, - {Line: 22, Value: "fmt.Sprintf(\"%v\", $x)"}, - }, ReportTemplate: "$x is already string", SuggestTemplate: "$x", WhereExpr: ir.FilterExpr{ - Line: 23, + Line: 18, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 23, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 18, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 32, + Line: 27, Name: "deferUnlambda", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -100,55 +83,55 @@ var PrecompiledRules = &ir.File{ DocAfter: "defer f()", Rules: []ir.Rule{ { - Line: 33, - SyntaxPatterns: []ir.PatternString{{Line: 33, Value: "defer func() { $f($*args) }()"}}, + Line: 28, + SyntaxPatterns: []ir.PatternString{{Line: 28, Value: "defer func() { $f($*args) }()"}}, ReportTemplate: "can rewrite as `defer $f($args)`", WhereExpr: ir.FilterExpr{ - Line: 34, + Line: 29, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"f\"].Text != \"panic\" && m[\"f\"].Text != \"recover\" && m[\"args\"].Const", Args: []ir.FilterExpr{ { - Line: 34, + Line: 29, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"f\"].Text != \"panic\" && m[\"f\"].Text != \"recover\"", Args: []ir.FilterExpr{ { - Line: 34, + Line: 29, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"f\"].Text != \"panic\"", Args: []ir.FilterExpr{ { - Line: 34, + Line: 29, Op: ir.FilterVarNodeIsOp, Src: "m[\"f\"].Node.Is(`Ident`)", Value: "f", - Args: []ir.FilterExpr{{Line: 34, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 29, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }, { - Line: 34, + Line: 29, Op: ir.FilterNeqOp, Src: "m[\"f\"].Text != \"panic\"", Args: []ir.FilterExpr{ - {Line: 34, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, - {Line: 34, Op: ir.FilterStringOp, Src: "\"panic\"", Value: "panic"}, + {Line: 29, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, + {Line: 29, Op: ir.FilterStringOp, Src: "\"panic\"", Value: "panic"}, }, }, }, }, { - Line: 34, + Line: 29, Op: ir.FilterNeqOp, Src: "m[\"f\"].Text != \"recover\"", Args: []ir.FilterExpr{ - {Line: 34, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, - {Line: 34, Op: ir.FilterStringOp, Src: "\"recover\"", Value: "recover"}, + {Line: 29, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, + {Line: 29, Op: ir.FilterStringOp, Src: "\"recover\"", Value: "recover"}, }, }, }, }, { - Line: 34, + Line: 29, Op: ir.FilterVarConstOp, Src: "m[\"args\"].Const", Value: "args", @@ -157,28 +140,28 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 37, - SyntaxPatterns: []ir.PatternString{{Line: 37, Value: "defer func() { $pkg.$f($*args) }()"}}, + Line: 32, + SyntaxPatterns: []ir.PatternString{{Line: 32, Value: "defer func() { $pkg.$f($*args) }()"}}, ReportTemplate: "can rewrite as `defer $pkg.$f($args)`", WhereExpr: ir.FilterExpr{ - Line: 38, + Line: 33, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"args\"].Const && m[\"pkg\"].Object.Is(`PkgName`)", Args: []ir.FilterExpr{ { - Line: 38, + Line: 33, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"args\"].Const", Args: []ir.FilterExpr{ { - Line: 38, + Line: 33, Op: ir.FilterVarNodeIsOp, Src: "m[\"f\"].Node.Is(`Ident`)", Value: "f", - Args: []ir.FilterExpr{{Line: 38, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 33, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }, { - Line: 38, + Line: 33, Op: ir.FilterVarConstOp, Src: "m[\"args\"].Const", Value: "args", @@ -186,11 +169,11 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 38, + Line: 33, Op: ir.FilterVarObjectIsOp, Src: "m[\"pkg\"].Object.Is(`PkgName`)", Value: "pkg", - Args: []ir.FilterExpr{{Line: 38, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, + Args: []ir.FilterExpr{{Line: 33, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, }, }, }, @@ -198,84 +181,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 46, - Name: "ioutilDeprecated", - MatcherName: "m", - DocTags: []string{"style", "experimental"}, - DocSummary: "Detects deprecated io/ioutil package usages", - DocBefore: "ioutil.ReadAll(r)", - DocAfter: "io.ReadAll(r)", - Rules: []ir.Rule{ - { - Line: 47, - SyntaxPatterns: []ir.PatternString{{Line: 47, Value: "ioutil.ReadAll($_)"}}, - ReportTemplate: "ioutil.ReadAll is deprecated, use io.ReadAll instead", - WhereExpr: ir.FilterExpr{ - Line: 48, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 51, - SyntaxPatterns: []ir.PatternString{{Line: 51, Value: "ioutil.ReadFile($_)"}}, - ReportTemplate: "ioutil.ReadFile is deprecated, use os.ReadFile instead", - WhereExpr: ir.FilterExpr{ - Line: 52, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 55, - SyntaxPatterns: []ir.PatternString{{Line: 55, Value: "ioutil.WriteFile($_, $_, $_)"}}, - ReportTemplate: "ioutil.WriteFile is deprecated, use os.WriteFile instead", - WhereExpr: ir.FilterExpr{ - Line: 56, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 59, - SyntaxPatterns: []ir.PatternString{{Line: 59, Value: "ioutil.ReadDir($_)"}}, - ReportTemplate: "ioutil.ReadDir is deprecated, use os.ReadDir instead", - WhereExpr: ir.FilterExpr{ - Line: 60, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 63, - SyntaxPatterns: []ir.PatternString{{Line: 63, Value: "ioutil.NopCloser($_)"}}, - ReportTemplate: "ioutil.NopCloser is deprecated, use io.NopCloser instead", - WhereExpr: ir.FilterExpr{ - Line: 64, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 67, - SyntaxPatterns: []ir.PatternString{{Line: 67, Value: "ioutil.Discard"}}, - ReportTemplate: "ioutil.Discard is deprecated, use io.Discard instead", - WhereExpr: ir.FilterExpr{ - Line: 68, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - }, - }, - { - Line: 76, + Line: 41, Name: "badLock", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -284,91 +190,91 @@ var PrecompiledRules = &ir.File{ DocAfter: "mu.Lock(); defer mu.Unlock()", Rules: []ir.Rule{ { - Line: 80, - SyntaxPatterns: []ir.PatternString{{Line: 80, Value: "$mu1.Lock(); $mu2.Unlock()"}}, + Line: 45, + SyntaxPatterns: []ir.PatternString{{Line: 45, Value: "$mu1.Lock(); $mu2.Unlock()"}}, ReportTemplate: "defer is missing, mutex is unlocked immediately", WhereExpr: ir.FilterExpr{ - Line: 81, + Line: 46, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 81, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 81, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 46, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 46, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 85, - SyntaxPatterns: []ir.PatternString{{Line: 85, Value: "$mu1.RLock(); $mu2.RUnlock()"}}, + Line: 50, + SyntaxPatterns: []ir.PatternString{{Line: 50, Value: "$mu1.RLock(); $mu2.RUnlock()"}}, ReportTemplate: "defer is missing, mutex is unlocked immediately", WhereExpr: ir.FilterExpr{ - Line: 86, + Line: 51, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 86, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 86, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 51, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 51, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 91, - SyntaxPatterns: []ir.PatternString{{Line: 91, Value: "$mu1.Lock(); defer $mu2.RUnlock()"}}, + Line: 56, + SyntaxPatterns: []ir.PatternString{{Line: 56, Value: "$mu1.Lock(); defer $mu2.RUnlock()"}}, ReportTemplate: "suspicious unlock, maybe Unlock was intended?", WhereExpr: ir.FilterExpr{ - Line: 92, + Line: 57, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 92, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 92, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 57, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 57, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 96, - SyntaxPatterns: []ir.PatternString{{Line: 96, Value: "$mu1.RLock(); defer $mu2.Unlock()"}}, + Line: 61, + SyntaxPatterns: []ir.PatternString{{Line: 61, Value: "$mu1.RLock(); defer $mu2.Unlock()"}}, ReportTemplate: "suspicious unlock, maybe RUnlock was intended?", WhereExpr: ir.FilterExpr{ - Line: 97, + Line: 62, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 97, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 97, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 62, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 62, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 102, - SyntaxPatterns: []ir.PatternString{{Line: 102, Value: "$mu1.Lock(); defer $mu2.Lock()"}}, + Line: 67, + SyntaxPatterns: []ir.PatternString{{Line: 67, Value: "$mu1.Lock(); defer $mu2.Lock()"}}, ReportTemplate: "maybe defer $mu1.Unlock() was intended?", WhereExpr: ir.FilterExpr{ - Line: 103, + Line: 68, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 103, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 103, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 68, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 68, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 107, - SyntaxPatterns: []ir.PatternString{{Line: 107, Value: "$mu1.RLock(); defer $mu2.RLock()"}}, + Line: 72, + SyntaxPatterns: []ir.PatternString{{Line: 72, Value: "$mu1.RLock(); defer $mu2.RLock()"}}, ReportTemplate: "maybe defer $mu1.RUnlock() was intended?", WhereExpr: ir.FilterExpr{ - Line: 108, + Line: 73, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 108, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 108, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 73, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 73, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", @@ -376,7 +282,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 117, + Line: 82, Name: "httpNoBody", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -385,39 +291,54 @@ var PrecompiledRules = &ir.File{ DocAfter: "http.NewRequest(\"GET\", url, http.NoBody)", Rules: []ir.Rule{ { - Line: 118, - SyntaxPatterns: []ir.PatternString{{Line: 118, Value: "http.NewRequest($method, $url, $nil)"}}, + Line: 83, + SyntaxPatterns: []ir.PatternString{{Line: 83, Value: "http.NewRequest($method, $url, $nil)"}}, ReportTemplate: "http.NoBody should be preferred to the nil request body", SuggestTemplate: "http.NewRequest($method, $url, http.NoBody)", WhereExpr: ir.FilterExpr{ - Line: 119, + Line: 84, Op: ir.FilterEqOp, Src: "m[\"nil\"].Text == \"nil\"", Args: []ir.FilterExpr{ - {Line: 119, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, - {Line: 119, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, + {Line: 84, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, + {Line: 84, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, }, }, }, { - Line: 123, - SyntaxPatterns: []ir.PatternString{{Line: 123, Value: "http.NewRequestWithContext($ctx, $method, $url, $nil)"}}, + Line: 88, + SyntaxPatterns: []ir.PatternString{{Line: 88, Value: "http.NewRequestWithContext($ctx, $method, $url, $nil)"}}, ReportTemplate: "http.NoBody should be preferred to the nil request body", SuggestTemplate: "http.NewRequestWithContext($ctx, $method, $url, http.NoBody)", WhereExpr: ir.FilterExpr{ - Line: 124, + Line: 89, + Op: ir.FilterEqOp, + Src: "m[\"nil\"].Text == \"nil\"", + Args: []ir.FilterExpr{ + {Line: 89, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, + {Line: 89, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, + }, + }, + }, + { + Line: 93, + SyntaxPatterns: []ir.PatternString{{Line: 93, Value: "httptest.NewRequest($method, $url, $nil)"}}, + ReportTemplate: "http.NoBody should be preferred to the nil request body", + SuggestTemplate: "httptest.NewRequest($method, $url, http.NoBody)", + WhereExpr: ir.FilterExpr{ + Line: 94, Op: ir.FilterEqOp, Src: "m[\"nil\"].Text == \"nil\"", Args: []ir.FilterExpr{ - {Line: 124, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, - {Line: 124, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, + {Line: 94, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, + {Line: 94, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, }, }, }, }, }, { - Line: 134, + Line: 104, Name: "preferDecodeRune", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -426,46 +347,46 @@ var PrecompiledRules = &ir.File{ DocAfter: "r, _ := utf8.DecodeRuneInString(s)", DocNote: "See Go issue for details: https://github.com/golang/go/issues/45260", Rules: []ir.Rule{{ - Line: 135, - SyntaxPatterns: []ir.PatternString{{Line: 135, Value: "[]rune($s)[0]"}}, + Line: 105, + SyntaxPatterns: []ir.PatternString{{Line: 105, Value: "[]rune($s)[0]"}}, ReportTemplate: "consider replacing $$ with utf8.DecodeRuneInString($s)", WhereExpr: ir.FilterExpr{ - Line: 136, + Line: 106, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 136, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 106, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }}, }, { - Line: 144, + Line: 114, Name: "sloppyLen", MatcherName: "m", - DocTags: []string{"style"}, + DocTags: []string{"diagnostic"}, DocSummary: "Detects usage of `len` when result is obvious or doesn't make sense", DocBefore: "len(arr) <= 0", DocAfter: "len(arr) == 0", Rules: []ir.Rule{ { - Line: 145, - SyntaxPatterns: []ir.PatternString{{Line: 145, Value: "len($_) >= 0"}}, + Line: 115, + SyntaxPatterns: []ir.PatternString{{Line: 115, Value: "len($_) >= 0"}}, ReportTemplate: "$$ is always true", }, { - Line: 146, - SyntaxPatterns: []ir.PatternString{{Line: 146, Value: "len($_) < 0"}}, + Line: 116, + SyntaxPatterns: []ir.PatternString{{Line: 116, Value: "len($_) < 0"}}, ReportTemplate: "$$ is always false", }, { - Line: 147, - SyntaxPatterns: []ir.PatternString{{Line: 147, Value: "len($x) <= 0"}}, + Line: 117, + SyntaxPatterns: []ir.PatternString{{Line: 117, Value: "len($x) <= 0"}}, ReportTemplate: "$$ can be len($x) == 0", }, }, }, { - Line: 154, + Line: 124, Name: "valSwap", MatcherName: "m", DocTags: []string{"style"}, @@ -473,13 +394,13 @@ var PrecompiledRules = &ir.File{ DocBefore: "*tmp = *x; *x = *y; *y = *tmp", DocAfter: "*x, *y = *y, *x", Rules: []ir.Rule{{ - Line: 155, - SyntaxPatterns: []ir.PatternString{{Line: 155, Value: "$tmp := $y; $y = $x; $x = $tmp"}}, + Line: 125, + SyntaxPatterns: []ir.PatternString{{Line: 125, Value: "$tmp := $y; $y = $x; $x = $tmp"}}, ReportTemplate: "can re-write as `$y, $x = $x, $y`", }}, }, { - Line: 163, + Line: 133, Name: "switchTrue", MatcherName: "m", DocTags: []string{"style"}, @@ -488,19 +409,19 @@ var PrecompiledRules = &ir.File{ DocAfter: "switch {...}", Rules: []ir.Rule{ { - Line: 164, - SyntaxPatterns: []ir.PatternString{{Line: 164, Value: "switch true { $*_ }"}}, + Line: 134, + SyntaxPatterns: []ir.PatternString{{Line: 134, Value: "switch true { $*_ }"}}, ReportTemplate: "replace 'switch true {}' with 'switch {}'", }, { - Line: 166, - SyntaxPatterns: []ir.PatternString{{Line: 166, Value: "switch $x; true { $*_ }"}}, + Line: 136, + SyntaxPatterns: []ir.PatternString{{Line: 136, Value: "switch $x; true { $*_ }"}}, ReportTemplate: "replace 'switch $x; true {}' with 'switch $x; {}'", }, }, }, { - Line: 174, + Line: 144, Name: "flagDeref", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -509,49 +430,49 @@ var PrecompiledRules = &ir.File{ DocAfter: "var b bool; flag.BoolVar(&b, \"b\", false, \"b docs\")", Rules: []ir.Rule{ { - Line: 175, - SyntaxPatterns: []ir.PatternString{{Line: 175, Value: "*flag.Bool($*_)"}}, + Line: 145, + SyntaxPatterns: []ir.PatternString{{Line: 145, Value: "*flag.Bool($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.BoolVar", }, { - Line: 176, - SyntaxPatterns: []ir.PatternString{{Line: 176, Value: "*flag.Duration($*_)"}}, + Line: 146, + SyntaxPatterns: []ir.PatternString{{Line: 146, Value: "*flag.Duration($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.DurationVar", }, { - Line: 177, - SyntaxPatterns: []ir.PatternString{{Line: 177, Value: "*flag.Float64($*_)"}}, + Line: 147, + SyntaxPatterns: []ir.PatternString{{Line: 147, Value: "*flag.Float64($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.Float64Var", }, { - Line: 178, - SyntaxPatterns: []ir.PatternString{{Line: 178, Value: "*flag.Int($*_)"}}, + Line: 148, + SyntaxPatterns: []ir.PatternString{{Line: 148, Value: "*flag.Int($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.IntVar", }, { - Line: 179, - SyntaxPatterns: []ir.PatternString{{Line: 179, Value: "*flag.Int64($*_)"}}, + Line: 149, + SyntaxPatterns: []ir.PatternString{{Line: 149, Value: "*flag.Int64($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.Int64Var", }, { - Line: 180, - SyntaxPatterns: []ir.PatternString{{Line: 180, Value: "*flag.String($*_)"}}, + Line: 150, + SyntaxPatterns: []ir.PatternString{{Line: 150, Value: "*flag.String($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.StringVar", }, { - Line: 181, - SyntaxPatterns: []ir.PatternString{{Line: 181, Value: "*flag.Uint($*_)"}}, + Line: 151, + SyntaxPatterns: []ir.PatternString{{Line: 151, Value: "*flag.Uint($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.UintVar", }, { - Line: 182, - SyntaxPatterns: []ir.PatternString{{Line: 182, Value: "*flag.Uint64($*_)"}}, + Line: 152, + SyntaxPatterns: []ir.PatternString{{Line: 152, Value: "*flag.Uint64($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.Uint64Var", }, }, }, { - Line: 189, + Line: 159, Name: "emptyStringTest", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -560,33 +481,57 @@ var PrecompiledRules = &ir.File{ DocAfter: "s == \"\"", Rules: []ir.Rule{ { - Line: 190, - SyntaxPatterns: []ir.PatternString{{Line: 190, Value: "len($s) != 0"}}, + Line: 160, + SyntaxPatterns: []ir.PatternString{{Line: 160, Value: "len($s) != 0"}}, ReportTemplate: "replace `$$` with `$s != \"\"`", WhereExpr: ir.FilterExpr{ - Line: 191, + Line: 161, + Op: ir.FilterVarTypeIsOp, + Src: "m[\"s\"].Type.Is(`string`)", + Value: "s", + Args: []ir.FilterExpr{{Line: 161, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + }, + }, + { + Line: 163, + SyntaxPatterns: []ir.PatternString{{Line: 163, Value: "len($s) > 0"}}, + ReportTemplate: "replace `$$` with `$s != \"\"`", + WhereExpr: ir.FilterExpr{ + Line: 164, + Op: ir.FilterVarTypeIsOp, + Src: "m[\"s\"].Type.Is(`string`)", + Value: "s", + Args: []ir.FilterExpr{{Line: 164, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + }, + }, + { + Line: 167, + SyntaxPatterns: []ir.PatternString{{Line: 167, Value: "len($s) == 0"}}, + ReportTemplate: "replace `$$` with `$s == \"\"`", + WhereExpr: ir.FilterExpr{ + Line: 168, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 191, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 168, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, { - Line: 194, - SyntaxPatterns: []ir.PatternString{{Line: 194, Value: "len($s) == 0"}}, + Line: 170, + SyntaxPatterns: []ir.PatternString{{Line: 170, Value: "len($s) <= 0"}}, ReportTemplate: "replace `$$` with `$s == \"\"`", WhereExpr: ir.FilterExpr{ - Line: 195, + Line: 171, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 195, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 171, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 203, + Line: 179, Name: "stringXbytes", MatcherName: "m", DocTags: []string{"performance"}, @@ -595,180 +540,180 @@ var PrecompiledRules = &ir.File{ DocAfter: "copy(b, s)", Rules: []ir.Rule{ { - Line: 204, - SyntaxPatterns: []ir.PatternString{{Line: 204, Value: "copy($_, []byte($s))"}}, + Line: 180, + SyntaxPatterns: []ir.PatternString{{Line: 180, Value: "copy($_, []byte($s))"}}, ReportTemplate: "can simplify `[]byte($s)` to `$s`", }, { - Line: 206, - SyntaxPatterns: []ir.PatternString{{Line: 206, Value: "string($b) == \"\""}}, + Line: 182, + SyntaxPatterns: []ir.PatternString{{Line: 182, Value: "string($b) == \"\""}}, ReportTemplate: "suggestion: len($b) == 0", SuggestTemplate: "len($b) == 0", WhereExpr: ir.FilterExpr{ - Line: 206, + Line: 182, Op: ir.FilterVarTypeIsOp, Src: "m[\"b\"].Type.Is(`[]byte`)", Value: "b", - Args: []ir.FilterExpr{{Line: 206, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 182, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, { - Line: 207, - SyntaxPatterns: []ir.PatternString{{Line: 207, Value: "string($b) != \"\""}}, + Line: 183, + SyntaxPatterns: []ir.PatternString{{Line: 183, Value: "string($b) != \"\""}}, ReportTemplate: "suggestion: len($b) != 0", SuggestTemplate: "len($b) != 0", WhereExpr: ir.FilterExpr{ - Line: 207, + Line: 183, Op: ir.FilterVarTypeIsOp, Src: "m[\"b\"].Type.Is(`[]byte`)", Value: "b", - Args: []ir.FilterExpr{{Line: 207, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 183, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, { - Line: 209, - SyntaxPatterns: []ir.PatternString{{Line: 209, Value: "len(string($b))"}}, + Line: 185, + SyntaxPatterns: []ir.PatternString{{Line: 185, Value: "len(string($b))"}}, ReportTemplate: "suggestion: len($b)", SuggestTemplate: "len($b)", WhereExpr: ir.FilterExpr{ - Line: 209, + Line: 185, Op: ir.FilterVarTypeIsOp, Src: "m[\"b\"].Type.Is(`[]byte`)", Value: "b", - Args: []ir.FilterExpr{{Line: 209, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 185, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, { - Line: 211, - SyntaxPatterns: []ir.PatternString{{Line: 211, Value: "string($x) == string($y)"}}, + Line: 187, + SyntaxPatterns: []ir.PatternString{{Line: 187, Value: "string($x) == string($y)"}}, ReportTemplate: "suggestion: bytes.Equal($x, $y)", SuggestTemplate: "bytes.Equal($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 212, + Line: 188, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`[]byte`) && m[\"y\"].Type.Is(`[]byte`)", Args: []ir.FilterExpr{ { - Line: 212, + Line: 188, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]byte`)", Value: "x", - Args: []ir.FilterExpr{{Line: 212, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 188, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, { - Line: 212, + Line: 188, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`[]byte`)", Value: "y", - Args: []ir.FilterExpr{{Line: 212, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 188, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, }, }, { - Line: 215, - SyntaxPatterns: []ir.PatternString{{Line: 215, Value: "string($x) != string($y)"}}, + Line: 191, + SyntaxPatterns: []ir.PatternString{{Line: 191, Value: "string($x) != string($y)"}}, ReportTemplate: "suggestion: !bytes.Equal($x, $y)", SuggestTemplate: "!bytes.Equal($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 216, + Line: 192, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`[]byte`) && m[\"y\"].Type.Is(`[]byte`)", Args: []ir.FilterExpr{ { - Line: 216, + Line: 192, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]byte`)", Value: "x", - Args: []ir.FilterExpr{{Line: 216, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 192, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, { - Line: 216, + Line: 192, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`[]byte`)", Value: "y", - Args: []ir.FilterExpr{{Line: 216, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 192, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, }, }, { - Line: 219, - SyntaxPatterns: []ir.PatternString{{Line: 219, Value: "$re.Match([]byte($s))"}}, + Line: 195, + SyntaxPatterns: []ir.PatternString{{Line: 195, Value: "$re.Match([]byte($s))"}}, ReportTemplate: "suggestion: $re.MatchString($s)", SuggestTemplate: "$re.MatchString($s)", WhereExpr: ir.FilterExpr{ - Line: 220, + Line: 196, Op: ir.FilterAndOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`) && m[\"s\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 220, + Line: 196, Op: ir.FilterVarTypeIsOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`)", Value: "re", - Args: []ir.FilterExpr{{Line: 220, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, + Args: []ir.FilterExpr{{Line: 196, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, }, { - Line: 220, + Line: 196, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 220, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 196, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 223, - SyntaxPatterns: []ir.PatternString{{Line: 223, Value: "$re.FindIndex([]byte($s))"}}, + Line: 199, + SyntaxPatterns: []ir.PatternString{{Line: 199, Value: "$re.FindIndex([]byte($s))"}}, ReportTemplate: "suggestion: $re.FindStringIndex($s)", SuggestTemplate: "$re.FindStringIndex($s)", WhereExpr: ir.FilterExpr{ - Line: 224, + Line: 200, Op: ir.FilterAndOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`) && m[\"s\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 224, + Line: 200, Op: ir.FilterVarTypeIsOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`)", Value: "re", - Args: []ir.FilterExpr{{Line: 224, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, + Args: []ir.FilterExpr{{Line: 200, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, }, { - Line: 224, + Line: 200, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 224, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 200, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 227, - SyntaxPatterns: []ir.PatternString{{Line: 227, Value: "$re.FindAllIndex([]byte($s), $n)"}}, + Line: 203, + SyntaxPatterns: []ir.PatternString{{Line: 203, Value: "$re.FindAllIndex([]byte($s), $n)"}}, ReportTemplate: "suggestion: $re.FindAllStringIndex($s, $n)", SuggestTemplate: "$re.FindAllStringIndex($s, $n)", WhereExpr: ir.FilterExpr{ - Line: 228, + Line: 204, Op: ir.FilterAndOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`) && m[\"s\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 228, + Line: 204, Op: ir.FilterVarTypeIsOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`)", Value: "re", - Args: []ir.FilterExpr{{Line: 228, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, + Args: []ir.FilterExpr{{Line: 204, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, }, { - Line: 228, + Line: 204, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 228, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 204, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, @@ -776,7 +721,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 237, + Line: 213, Name: "indexAlloc", MatcherName: "m", DocTags: []string{"performance"}, @@ -785,22 +730,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "bytes.Index(x, []byte(y))", DocNote: "See Go issue for details: https://github.com/golang/go/issues/25864", Rules: []ir.Rule{{ - Line: 238, - SyntaxPatterns: []ir.PatternString{{Line: 238, Value: "strings.Index(string($x), $y)"}}, + Line: 214, + SyntaxPatterns: []ir.PatternString{{Line: 214, Value: "strings.Index(string($x), $y)"}}, ReportTemplate: "consider replacing $$ with bytes.Index($x, []byte($y))", WhereExpr: ir.FilterExpr{ - Line: 239, + Line: 215, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 239, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 239, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 215, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 215, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, }}, }, { - Line: 247, + Line: 223, Name: "wrapperFunc", MatcherName: "m", DocTags: []string{"style"}, @@ -809,83 +754,169 @@ var PrecompiledRules = &ir.File{ DocAfter: "wg.Done()", Rules: []ir.Rule{ { - Line: 248, - SyntaxPatterns: []ir.PatternString{{Line: 248, Value: "$wg.Add(-1)"}}, + Line: 224, + SyntaxPatterns: []ir.PatternString{{Line: 224, Value: "$wg.Add(-1)"}}, ReportTemplate: "use WaitGroup.Done method in `$$`", WhereExpr: ir.FilterExpr{ - Line: 249, + Line: 225, Op: ir.FilterVarTypeIsOp, Src: "m[\"wg\"].Type.Is(`sync.WaitGroup`)", Value: "wg", - Args: []ir.FilterExpr{{Line: 249, Op: ir.FilterStringOp, Src: "`sync.WaitGroup`", Value: "sync.WaitGroup"}}, + Args: []ir.FilterExpr{{Line: 225, Op: ir.FilterStringOp, Src: "`sync.WaitGroup`", Value: "sync.WaitGroup"}}, }, }, { - Line: 252, - SyntaxPatterns: []ir.PatternString{{Line: 252, Value: "$buf.Truncate(0)"}}, + Line: 228, + SyntaxPatterns: []ir.PatternString{{Line: 228, Value: "$buf.Truncate(0)"}}, ReportTemplate: "use Buffer.Reset method in `$$`", WhereExpr: ir.FilterExpr{ - Line: 253, + Line: 229, Op: ir.FilterVarTypeIsOp, Src: "m[\"buf\"].Type.Is(`bytes.Buffer`)", Value: "buf", - Args: []ir.FilterExpr{{Line: 253, Op: ir.FilterStringOp, Src: "`bytes.Buffer`", Value: "bytes.Buffer"}}, + Args: []ir.FilterExpr{{Line: 229, Op: ir.FilterStringOp, Src: "`bytes.Buffer`", Value: "bytes.Buffer"}}, }, }, { - Line: 256, - SyntaxPatterns: []ir.PatternString{{Line: 256, Value: "http.HandlerFunc(http.NotFound)"}}, + Line: 232, + SyntaxPatterns: []ir.PatternString{{Line: 232, Value: "http.HandlerFunc(http.NotFound)"}}, ReportTemplate: "use http.NotFoundHandler method in `$$`", }, { - Line: 258, - SyntaxPatterns: []ir.PatternString{{Line: 258, Value: "strings.SplitN($_, $_, -1)"}}, + Line: 234, + SyntaxPatterns: []ir.PatternString{{Line: 234, Value: "strings.SplitN($_, $_, -1)"}}, ReportTemplate: "use strings.Split method in `$$`", }, { - Line: 259, - SyntaxPatterns: []ir.PatternString{{Line: 259, Value: "strings.Replace($_, $_, $_, -1)"}}, + Line: 235, + SyntaxPatterns: []ir.PatternString{{Line: 235, Value: "strings.Replace($_, $_, $_, -1)"}}, ReportTemplate: "use strings.ReplaceAll method in `$$`", }, { - Line: 260, - SyntaxPatterns: []ir.PatternString{{Line: 260, Value: "strings.Map(unicode.ToTitle, $_)"}}, + Line: 236, + SyntaxPatterns: []ir.PatternString{{Line: 236, Value: "strings.Map(unicode.ToTitle, $_)"}}, ReportTemplate: "use strings.ToTitle method in `$$`", }, { - Line: 262, - SyntaxPatterns: []ir.PatternString{{Line: 262, Value: "bytes.SplitN(b, []byte(\".\"), -1)"}}, + Line: 237, + SyntaxPatterns: []ir.PatternString{ + {Line: 237, Value: "strings.Index($s1, $s2) >= 0"}, + {Line: 237, Value: "strings.Index($s1, $s2) != -1"}, + }, + ReportTemplate: "suggestion: strings.Contains($s1, $s2)", + SuggestTemplate: "strings.Contains($s1, $s2)", + }, + { + Line: 238, + SyntaxPatterns: []ir.PatternString{ + {Line: 238, Value: "strings.IndexAny($s1, $s2) >= 0"}, + {Line: 238, Value: "strings.IndexAny($s1, $s2) != -1"}, + }, + ReportTemplate: "suggestion: strings.ContainsAny($s1, $s2)", + SuggestTemplate: "strings.ContainsAny($s1, $s2)", + }, + { + Line: 239, + SyntaxPatterns: []ir.PatternString{ + {Line: 239, Value: "strings.IndexRune($s1, $s2) >= 0"}, + {Line: 239, Value: "strings.IndexRune($s1, $s2) != -1"}, + }, + ReportTemplate: "suggestion: strings.ContainsRune($s1, $s2)", + SuggestTemplate: "strings.ContainsRune($s1, $s2)", + }, + { + Line: 241, + SyntaxPatterns: []ir.PatternString{ + {Line: 241, Value: "$i := strings.Index($s, $sep); $*_; $x, $y = $s[:$i], $s[$i+1:]"}, + {Line: 242, Value: "$i := strings.Index($s, $sep); $*_; $x = $s[:$i]; $*_; $y = $s[$i+1:]"}, + }, + ReportTemplate: "suggestion: $x, $y, _ = strings.Cut($s, $sep)", + SuggestTemplate: "$x, $y, _ = strings.Cut($s, $sep)", + WhereExpr: ir.FilterExpr{ + Line: 243, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.18\")", + Value: "1.18", + }, + }, + { + Line: 246, + SyntaxPatterns: []ir.PatternString{ + {Line: 247, Value: "if $i := strings.Index($s, $sep); $i != -1 { $*_; $x, $y = $s[:$i], $s[$i+1:]; $*_ }"}, + {Line: 248, Value: "if $i := strings.Index($s, $sep); $i != -1 { $*_; $x = $s[:$i]; $*_; $y = $s[$i+1:]; $*_ }"}, + {Line: 249, Value: "if $i := strings.Index($s, $sep); $i >= 0 { $*_; $x, $y = $s[:$i], $s[$i+1:]; $*_ }"}, + {Line: 250, Value: "if $i := strings.Index($s, $sep); $i >= 0 { $*_; $x = $s[:$i]; $*_; $y = $s[$i+1:]; $*_ }"}, + }, + ReportTemplate: "suggestion: if $x, $y, ok = strings.Cut($s, $sep); ok { ... }", + SuggestTemplate: "if $x, $y, ok = strings.Cut($s, $sep); ok { ... }", + WhereExpr: ir.FilterExpr{ + Line: 251, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.18\")", + Value: "1.18", + }, + }, + { + Line: 254, + SyntaxPatterns: []ir.PatternString{{Line: 254, Value: "bytes.SplitN(b, []byte(\".\"), -1)"}}, ReportTemplate: "use bytes.Split method in `$$`", }, { - Line: 263, - SyntaxPatterns: []ir.PatternString{{Line: 263, Value: "bytes.Replace($_, $_, $_, -1)"}}, + Line: 255, + SyntaxPatterns: []ir.PatternString{{Line: 255, Value: "bytes.Replace($_, $_, $_, -1)"}}, ReportTemplate: "use bytes.ReplaceAll method in `$$`", }, { - Line: 264, - SyntaxPatterns: []ir.PatternString{{Line: 264, Value: "bytes.Map(unicode.ToUpper, $_)"}}, + Line: 256, + SyntaxPatterns: []ir.PatternString{{Line: 256, Value: "bytes.Map(unicode.ToUpper, $_)"}}, ReportTemplate: "use bytes.ToUpper method in `$$`", }, { - Line: 265, - SyntaxPatterns: []ir.PatternString{{Line: 265, Value: "bytes.Map(unicode.ToLower, $_)"}}, + Line: 257, + SyntaxPatterns: []ir.PatternString{{Line: 257, Value: "bytes.Map(unicode.ToLower, $_)"}}, ReportTemplate: "use bytes.ToLower method in `$$`", }, { - Line: 266, - SyntaxPatterns: []ir.PatternString{{Line: 266, Value: "bytes.Map(unicode.ToTitle, $_)"}}, + Line: 258, + SyntaxPatterns: []ir.PatternString{{Line: 258, Value: "bytes.Map(unicode.ToTitle, $_)"}}, ReportTemplate: "use bytes.ToTitle method in `$$`", }, { - Line: 268, - SyntaxPatterns: []ir.PatternString{{Line: 268, Value: "draw.DrawMask($_, $_, $_, $_, nil, image.Point{}, $_)"}}, + Line: 259, + SyntaxPatterns: []ir.PatternString{ + {Line: 259, Value: "bytes.Index($b1, $b2) >= 0"}, + {Line: 259, Value: "bytes.Index($b1, $b2) != -1"}, + }, + ReportTemplate: "suggestion: bytes.Contains($b1, $b2)", + SuggestTemplate: "bytes.Contains($b1, $b2)", + }, + { + Line: 260, + SyntaxPatterns: []ir.PatternString{ + {Line: 260, Value: "bytes.IndexAny($b1, $b2) >= 0"}, + {Line: 260, Value: "bytes.IndexAny($b1, $b2) != -1"}, + }, + ReportTemplate: "suggestion: bytes.ContainsAny($b1, $b2)", + SuggestTemplate: "bytes.ContainsAny($b1, $b2)", + }, + { + Line: 261, + SyntaxPatterns: []ir.PatternString{ + {Line: 261, Value: "bytes.IndexRune($b1, $b2) >= 0"}, + {Line: 261, Value: "bytes.IndexRune($b1, $b2) != -1"}, + }, + ReportTemplate: "suggestion: bytes.ContainsRune($b1, $b2)", + SuggestTemplate: "bytes.ContainsRune($b1, $b2)", + }, + { + Line: 263, + SyntaxPatterns: []ir.PatternString{{Line: 263, Value: "draw.DrawMask($_, $_, $_, $_, nil, image.Point{}, $_)"}}, ReportTemplate: "use draw.Draw method in `$$`", }, }, }, { - Line: 276, + Line: 271, Name: "regexpMust", MatcherName: "m", DocTags: []string{"style"}, @@ -894,22 +925,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "re := regexp.MustCompile(\"const pattern\")", Rules: []ir.Rule{ { - Line: 277, - SyntaxPatterns: []ir.PatternString{{Line: 277, Value: "regexp.Compile($pat)"}}, + Line: 272, + SyntaxPatterns: []ir.PatternString{{Line: 272, Value: "regexp.Compile($pat)"}}, ReportTemplate: "for const patterns like $pat, use regexp.MustCompile", WhereExpr: ir.FilterExpr{ - Line: 278, + Line: 273, Op: ir.FilterVarConstOp, Src: "m[\"pat\"].Const", Value: "pat", }, }, { - Line: 281, - SyntaxPatterns: []ir.PatternString{{Line: 281, Value: "regexp.CompilePOSIX($pat)"}}, + Line: 276, + SyntaxPatterns: []ir.PatternString{{Line: 276, Value: "regexp.CompilePOSIX($pat)"}}, ReportTemplate: "for const patterns like $pat, use regexp.MustCompilePOSIX", WhereExpr: ir.FilterExpr{ - Line: 282, + Line: 277, Op: ir.FilterVarConstOp, Src: "m[\"pat\"].Const", Value: "pat", @@ -918,7 +949,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 290, + Line: 285, Name: "badCall", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -927,22 +958,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "strings.Replace(s, from, to, -1)", Rules: []ir.Rule{ { - Line: 291, - SyntaxPatterns: []ir.PatternString{{Line: 291, Value: "strings.Replace($_, $_, $_, $zero)"}}, + Line: 286, + SyntaxPatterns: []ir.PatternString{{Line: 286, Value: "strings.Replace($_, $_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 292, + Line: 287, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 292, + Line: 287, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 292, + Line: 287, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -952,22 +983,22 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 294, - SyntaxPatterns: []ir.PatternString{{Line: 294, Value: "bytes.Replace($_, $_, $_, $zero)"}}, + Line: 289, + SyntaxPatterns: []ir.PatternString{{Line: 289, Value: "bytes.Replace($_, $_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 295, + Line: 290, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 295, + Line: 290, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 295, + Line: 290, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -977,22 +1008,22 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 298, - SyntaxPatterns: []ir.PatternString{{Line: 298, Value: "strings.SplitN($_, $_, $zero)"}}, + Line: 293, + SyntaxPatterns: []ir.PatternString{{Line: 293, Value: "strings.SplitN($_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 299, + Line: 294, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 299, + Line: 294, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 299, + Line: 294, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1002,22 +1033,22 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 301, - SyntaxPatterns: []ir.PatternString{{Line: 301, Value: "bytes.SplitN($_, $_, $zero)"}}, + Line: 296, + SyntaxPatterns: []ir.PatternString{{Line: 296, Value: "bytes.SplitN($_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 302, + Line: 297, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 302, + Line: 297, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 302, + Line: 297, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1027,19 +1058,19 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 305, - SyntaxPatterns: []ir.PatternString{{Line: 305, Value: "append($_)"}}, + Line: 300, + SyntaxPatterns: []ir.PatternString{{Line: 300, Value: "append($_)"}}, ReportTemplate: "no-op append call, probably missing arguments", }, { - Line: 307, - SyntaxPatterns: []ir.PatternString{{Line: 307, Value: "filepath.Join($_)"}}, + Line: 302, + SyntaxPatterns: []ir.PatternString{{Line: 302, Value: "filepath.Join($_)"}}, ReportTemplate: "suspicious Join on 1 argument", }, }, }, { - Line: 314, + Line: 309, Name: "assignOp", MatcherName: "m", DocTags: []string{"style"}, @@ -1048,87 +1079,87 @@ var PrecompiledRules = &ir.File{ DocAfter: "x *= 2", Rules: []ir.Rule{ { - Line: 315, - SyntaxPatterns: []ir.PatternString{{Line: 315, Value: "$x = $x + 1"}}, + Line: 310, + SyntaxPatterns: []ir.PatternString{{Line: 310, Value: "$x = $x + 1"}}, ReportTemplate: "replace `$$` with `$x++`", - WhereExpr: ir.FilterExpr{Line: 315, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 310, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 316, - SyntaxPatterns: []ir.PatternString{{Line: 316, Value: "$x = $x - 1"}}, + Line: 311, + SyntaxPatterns: []ir.PatternString{{Line: 311, Value: "$x = $x - 1"}}, ReportTemplate: "replace `$$` with `$x--`", - WhereExpr: ir.FilterExpr{Line: 316, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 311, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 318, - SyntaxPatterns: []ir.PatternString{{Line: 318, Value: "$x = $x + $y"}}, + Line: 313, + SyntaxPatterns: []ir.PatternString{{Line: 313, Value: "$x = $x + $y"}}, ReportTemplate: "replace `$$` with `$x += $y`", - WhereExpr: ir.FilterExpr{Line: 318, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 313, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 319, - SyntaxPatterns: []ir.PatternString{{Line: 319, Value: "$x = $x - $y"}}, + Line: 314, + SyntaxPatterns: []ir.PatternString{{Line: 314, Value: "$x = $x - $y"}}, ReportTemplate: "replace `$$` with `$x -= $y`", - WhereExpr: ir.FilterExpr{Line: 319, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 314, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 321, - SyntaxPatterns: []ir.PatternString{{Line: 321, Value: "$x = $x * $y"}}, + Line: 316, + SyntaxPatterns: []ir.PatternString{{Line: 316, Value: "$x = $x * $y"}}, ReportTemplate: "replace `$$` with `$x *= $y`", - WhereExpr: ir.FilterExpr{Line: 321, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 316, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 322, - SyntaxPatterns: []ir.PatternString{{Line: 322, Value: "$x = $x / $y"}}, + Line: 317, + SyntaxPatterns: []ir.PatternString{{Line: 317, Value: "$x = $x / $y"}}, ReportTemplate: "replace `$$` with `$x /= $y`", - WhereExpr: ir.FilterExpr{Line: 322, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 317, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 323, - SyntaxPatterns: []ir.PatternString{{Line: 323, Value: "$x = $x % $y"}}, + Line: 318, + SyntaxPatterns: []ir.PatternString{{Line: 318, Value: "$x = $x % $y"}}, ReportTemplate: "replace `$$` with `$x %= $y`", - WhereExpr: ir.FilterExpr{Line: 323, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 318, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 324, - SyntaxPatterns: []ir.PatternString{{Line: 324, Value: "$x = $x & $y"}}, + Line: 319, + SyntaxPatterns: []ir.PatternString{{Line: 319, Value: "$x = $x & $y"}}, ReportTemplate: "replace `$$` with `$x &= $y`", - WhereExpr: ir.FilterExpr{Line: 324, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 319, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 325, - SyntaxPatterns: []ir.PatternString{{Line: 325, Value: "$x = $x | $y"}}, + Line: 320, + SyntaxPatterns: []ir.PatternString{{Line: 320, Value: "$x = $x | $y"}}, ReportTemplate: "replace `$$` with `$x |= $y`", - WhereExpr: ir.FilterExpr{Line: 325, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 320, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 326, - SyntaxPatterns: []ir.PatternString{{Line: 326, Value: "$x = $x ^ $y"}}, + Line: 321, + SyntaxPatterns: []ir.PatternString{{Line: 321, Value: "$x = $x ^ $y"}}, ReportTemplate: "replace `$$` with `$x ^= $y`", - WhereExpr: ir.FilterExpr{Line: 326, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 321, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 327, - SyntaxPatterns: []ir.PatternString{{Line: 327, Value: "$x = $x << $y"}}, + Line: 322, + SyntaxPatterns: []ir.PatternString{{Line: 322, Value: "$x = $x << $y"}}, ReportTemplate: "replace `$$` with `$x <<= $y`", - WhereExpr: ir.FilterExpr{Line: 327, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 322, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 328, - SyntaxPatterns: []ir.PatternString{{Line: 328, Value: "$x = $x >> $y"}}, + Line: 323, + SyntaxPatterns: []ir.PatternString{{Line: 323, Value: "$x = $x >> $y"}}, ReportTemplate: "replace `$$` with `$x >>= $y`", - WhereExpr: ir.FilterExpr{Line: 328, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 323, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 329, - SyntaxPatterns: []ir.PatternString{{Line: 329, Value: "$x = $x &^ $y"}}, + Line: 324, + SyntaxPatterns: []ir.PatternString{{Line: 324, Value: "$x = $x &^ $y"}}, ReportTemplate: "replace `$$` with `$x &^= $y`", - WhereExpr: ir.FilterExpr{Line: 329, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 324, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, }, }, { - Line: 336, + Line: 331, Name: "preferWriteByte", MatcherName: "m", DocTags: []string{"performance", "experimental", "opinionated"}, @@ -1136,45 +1167,45 @@ var PrecompiledRules = &ir.File{ DocBefore: "w.WriteRune('\\n')", DocAfter: "w.WriteByte('\\n')", Rules: []ir.Rule{{ - Line: 340, - SyntaxPatterns: []ir.PatternString{{Line: 340, Value: "$w.WriteRune($c)"}}, + Line: 335, + SyntaxPatterns: []ir.PatternString{{Line: 335, Value: "$w.WriteRune($c)"}}, ReportTemplate: "consider writing single byte rune $c with $w.WriteByte($c)", WhereExpr: ir.FilterExpr{ - Line: 341, + Line: 336, Op: ir.FilterAndOp, Src: "m[\"w\"].Type.Implements(\"io.ByteWriter\") && (m[\"c\"].Const && m[\"c\"].Value.Int() < runeSelf)", Args: []ir.FilterExpr{ { - Line: 341, + Line: 336, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.ByteWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 341, Op: ir.FilterStringOp, Src: "\"io.ByteWriter\"", Value: "io.ByteWriter"}}, + Args: []ir.FilterExpr{{Line: 336, Op: ir.FilterStringOp, Src: "\"io.ByteWriter\"", Value: "io.ByteWriter"}}, }, { - Line: 341, + Line: 336, Op: ir.FilterAndOp, Src: "(m[\"c\"].Const && m[\"c\"].Value.Int() < runeSelf)", Args: []ir.FilterExpr{ { - Line: 341, + Line: 336, Op: ir.FilterVarConstOp, Src: "m[\"c\"].Const", Value: "c", }, { - Line: 341, + Line: 336, Op: ir.FilterLtOp, Src: "m[\"c\"].Value.Int() < runeSelf", Args: []ir.FilterExpr{ { - Line: 341, + Line: 336, Op: ir.FilterVarValueIntOp, Src: "m[\"c\"].Value.Int()", Value: "c", }, { - Line: 341, + Line: 336, Op: ir.FilterIntOp, Src: "runeSelf", Value: int64(128), @@ -1188,7 +1219,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 349, + Line: 344, Name: "preferFprint", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1197,66 +1228,147 @@ var PrecompiledRules = &ir.File{ DocAfter: "fmt.Fprintf(w, \"%x\", 10)", Rules: []ir.Rule{ { - Line: 350, - SyntaxPatterns: []ir.PatternString{{Line: 350, Value: "$w.Write([]byte(fmt.Sprint($*args)))"}}, + Line: 345, + SyntaxPatterns: []ir.PatternString{{Line: 345, Value: "$w.Write([]byte(fmt.Sprint($*args)))"}}, ReportTemplate: "fmt.Fprint($w, $args) should be preferred to the $$", SuggestTemplate: "fmt.Fprint($w, $args)", WhereExpr: ir.FilterExpr{ - Line: 351, + Line: 346, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.Writer\")", Value: "w", - Args: []ir.FilterExpr{{Line: 351, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + Args: []ir.FilterExpr{{Line: 346, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, }, }, { - Line: 355, - SyntaxPatterns: []ir.PatternString{{Line: 355, Value: "$w.Write([]byte(fmt.Sprintf($*args)))"}}, + Line: 350, + SyntaxPatterns: []ir.PatternString{{Line: 350, Value: "$w.Write([]byte(fmt.Sprintf($*args)))"}}, ReportTemplate: "fmt.Fprintf($w, $args) should be preferred to the $$", SuggestTemplate: "fmt.Fprintf($w, $args)", WhereExpr: ir.FilterExpr{ - Line: 356, + Line: 351, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.Writer\")", Value: "w", - Args: []ir.FilterExpr{{Line: 356, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + Args: []ir.FilterExpr{{Line: 351, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, }, }, { - Line: 360, - SyntaxPatterns: []ir.PatternString{{Line: 360, Value: "$w.Write([]byte(fmt.Sprintln($*args)))"}}, + Line: 355, + SyntaxPatterns: []ir.PatternString{{Line: 355, Value: "$w.Write([]byte(fmt.Sprintln($*args)))"}}, ReportTemplate: "fmt.Fprintln($w, $args) should be preferred to the $$", SuggestTemplate: "fmt.Fprintln($w, $args)", WhereExpr: ir.FilterExpr{ - Line: 361, + Line: 356, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.Writer\")", Value: "w", - Args: []ir.FilterExpr{{Line: 361, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + Args: []ir.FilterExpr{{Line: 356, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, }, }, { - Line: 365, - SyntaxPatterns: []ir.PatternString{{Line: 365, Value: "io.WriteString($w, fmt.Sprint($*args))"}}, + Line: 360, + SyntaxPatterns: []ir.PatternString{{Line: 360, Value: "io.WriteString($w, fmt.Sprint($*args))"}}, ReportTemplate: "suggestion: fmt.Fprint($w, $args)", SuggestTemplate: "fmt.Fprint($w, $args)", }, { - Line: 366, - SyntaxPatterns: []ir.PatternString{{Line: 366, Value: "io.WriteString($w, fmt.Sprintf($*args))"}}, + Line: 361, + SyntaxPatterns: []ir.PatternString{{Line: 361, Value: "io.WriteString($w, fmt.Sprintf($*args))"}}, ReportTemplate: "suggestion: fmt.Fprintf($w, $args)", SuggestTemplate: "fmt.Fprintf($w, $args)", }, + { + Line: 362, + SyntaxPatterns: []ir.PatternString{{Line: 362, Value: "io.WriteString($w, fmt.Sprintln($*args))"}}, + ReportTemplate: "suggestion: fmt.Fprintln($w, $args)", + SuggestTemplate: "fmt.Fprintln($w, $args)", + }, + { + Line: 364, + SyntaxPatterns: []ir.PatternString{{Line: 364, Value: "$w.WriteString(fmt.Sprint($*args))"}}, + ReportTemplate: "suggestion: fmt.Fprint($w, $args)", + SuggestTemplate: "fmt.Fprint($w, $args)", + WhereExpr: ir.FilterExpr{ + Line: 365, + Op: ir.FilterAndOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\") && m[\"w\"].Type.Implements(\"io.StringWriter\")", + Args: []ir.FilterExpr{ + { + Line: 365, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 365, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + }, + { + Line: 365, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 365, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + }, + }, + }, + }, { Line: 367, - SyntaxPatterns: []ir.PatternString{{Line: 367, Value: "io.WriteString($w, fmt.Sprintln($*args))"}}, + SyntaxPatterns: []ir.PatternString{{Line: 367, Value: "$w.WriteString(fmt.Sprintf($*args))"}}, + ReportTemplate: "suggestion: fmt.Fprintf($w, $args)", + SuggestTemplate: "fmt.Fprintf($w, $args)", + WhereExpr: ir.FilterExpr{ + Line: 368, + Op: ir.FilterAndOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\") && m[\"w\"].Type.Implements(\"io.StringWriter\")", + Args: []ir.FilterExpr{ + { + Line: 368, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 368, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + }, + { + Line: 368, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 368, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + }, + }, + }, + }, + { + Line: 370, + SyntaxPatterns: []ir.PatternString{{Line: 370, Value: "$w.WriteString(fmt.Sprintln($*args))"}}, ReportTemplate: "suggestion: fmt.Fprintln($w, $args)", SuggestTemplate: "fmt.Fprintln($w, $args)", + WhereExpr: ir.FilterExpr{ + Line: 371, + Op: ir.FilterAndOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\") && m[\"w\"].Type.Implements(\"io.StringWriter\")", + Args: []ir.FilterExpr{ + { + Line: 371, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 371, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + }, + { + Line: 371, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 371, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + }, + }, + }, }, }, }, { - Line: 374, + Line: 379, Name: "dupArg", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1265,62 +1377,62 @@ var PrecompiledRules = &ir.File{ DocAfter: "copy(dst, src)", Rules: []ir.Rule{ { - Line: 375, + Line: 380, SyntaxPatterns: []ir.PatternString{ - {Line: 375, Value: "$x.Equal($x)"}, - {Line: 375, Value: "$x.Equals($x)"}, - {Line: 375, Value: "$x.Compare($x)"}, - {Line: 375, Value: "$x.Cmp($x)"}, + {Line: 380, Value: "$x.Equal($x)"}, + {Line: 380, Value: "$x.Equals($x)"}, + {Line: 380, Value: "$x.Compare($x)"}, + {Line: 380, Value: "$x.Cmp($x)"}, }, ReportTemplate: "suspicious method call with the same argument and receiver", - WhereExpr: ir.FilterExpr{Line: 376, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 381, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 379, + Line: 384, SyntaxPatterns: []ir.PatternString{ - {Line: 379, Value: "copy($x, $x)"}, - {Line: 380, Value: "math.Max($x, $x)"}, - {Line: 381, Value: "math.Min($x, $x)"}, - {Line: 382, Value: "reflect.Copy($x, $x)"}, - {Line: 383, Value: "reflect.DeepEqual($x, $x)"}, - {Line: 384, Value: "strings.Contains($x, $x)"}, - {Line: 385, Value: "strings.Compare($x, $x)"}, - {Line: 386, Value: "strings.EqualFold($x, $x)"}, - {Line: 387, Value: "strings.HasPrefix($x, $x)"}, - {Line: 388, Value: "strings.HasSuffix($x, $x)"}, - {Line: 389, Value: "strings.Index($x, $x)"}, - {Line: 390, Value: "strings.LastIndex($x, $x)"}, - {Line: 391, Value: "strings.Split($x, $x)"}, - {Line: 392, Value: "strings.SplitAfter($x, $x)"}, - {Line: 393, Value: "strings.SplitAfterN($x, $x, $_)"}, - {Line: 394, Value: "strings.SplitN($x, $x, $_)"}, - {Line: 395, Value: "strings.Replace($_, $x, $x, $_)"}, - {Line: 396, Value: "strings.ReplaceAll($_, $x, $x)"}, - {Line: 397, Value: "bytes.Contains($x, $x)"}, - {Line: 398, Value: "bytes.Compare($x, $x)"}, - {Line: 399, Value: "bytes.Equal($x, $x)"}, - {Line: 400, Value: "bytes.EqualFold($x, $x)"}, - {Line: 401, Value: "bytes.HasPrefix($x, $x)"}, - {Line: 402, Value: "bytes.HasSuffix($x, $x)"}, - {Line: 403, Value: "bytes.Index($x, $x)"}, - {Line: 404, Value: "bytes.LastIndex($x, $x)"}, - {Line: 405, Value: "bytes.Split($x, $x)"}, - {Line: 406, Value: "bytes.SplitAfter($x, $x)"}, - {Line: 407, Value: "bytes.SplitAfterN($x, $x, $_)"}, - {Line: 408, Value: "bytes.SplitN($x, $x, $_)"}, - {Line: 409, Value: "bytes.Replace($_, $x, $x, $_)"}, - {Line: 410, Value: "bytes.ReplaceAll($_, $x, $x)"}, - {Line: 411, Value: "types.Identical($x, $x)"}, - {Line: 412, Value: "types.IdenticalIgnoreTags($x, $x)"}, - {Line: 413, Value: "draw.Draw($x, $_, $x, $_, $_)"}, + {Line: 384, Value: "copy($x, $x)"}, + {Line: 385, Value: "math.Max($x, $x)"}, + {Line: 386, Value: "math.Min($x, $x)"}, + {Line: 387, Value: "reflect.Copy($x, $x)"}, + {Line: 388, Value: "reflect.DeepEqual($x, $x)"}, + {Line: 389, Value: "strings.Contains($x, $x)"}, + {Line: 390, Value: "strings.Compare($x, $x)"}, + {Line: 391, Value: "strings.EqualFold($x, $x)"}, + {Line: 392, Value: "strings.HasPrefix($x, $x)"}, + {Line: 393, Value: "strings.HasSuffix($x, $x)"}, + {Line: 394, Value: "strings.Index($x, $x)"}, + {Line: 395, Value: "strings.LastIndex($x, $x)"}, + {Line: 396, Value: "strings.Split($x, $x)"}, + {Line: 397, Value: "strings.SplitAfter($x, $x)"}, + {Line: 398, Value: "strings.SplitAfterN($x, $x, $_)"}, + {Line: 399, Value: "strings.SplitN($x, $x, $_)"}, + {Line: 400, Value: "strings.Replace($_, $x, $x, $_)"}, + {Line: 401, Value: "strings.ReplaceAll($_, $x, $x)"}, + {Line: 402, Value: "bytes.Contains($x, $x)"}, + {Line: 403, Value: "bytes.Compare($x, $x)"}, + {Line: 404, Value: "bytes.Equal($x, $x)"}, + {Line: 405, Value: "bytes.EqualFold($x, $x)"}, + {Line: 406, Value: "bytes.HasPrefix($x, $x)"}, + {Line: 407, Value: "bytes.HasSuffix($x, $x)"}, + {Line: 408, Value: "bytes.Index($x, $x)"}, + {Line: 409, Value: "bytes.LastIndex($x, $x)"}, + {Line: 410, Value: "bytes.Split($x, $x)"}, + {Line: 411, Value: "bytes.SplitAfter($x, $x)"}, + {Line: 412, Value: "bytes.SplitAfterN($x, $x, $_)"}, + {Line: 413, Value: "bytes.SplitN($x, $x, $_)"}, + {Line: 414, Value: "bytes.Replace($_, $x, $x, $_)"}, + {Line: 415, Value: "bytes.ReplaceAll($_, $x, $x)"}, + {Line: 416, Value: "types.Identical($x, $x)"}, + {Line: 417, Value: "types.IdenticalIgnoreTags($x, $x)"}, + {Line: 418, Value: "draw.Draw($x, $_, $x, $_, $_)"}, }, ReportTemplate: "suspicious duplicated args in $$", - WhereExpr: ir.FilterExpr{Line: 414, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 419, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, }, }, { - Line: 422, + Line: 427, Name: "returnAfterHttpError", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1328,14 +1440,14 @@ var PrecompiledRules = &ir.File{ DocBefore: "if err != nil { http.Error(...); }", DocAfter: "if err != nil { http.Error(...); return; }", Rules: []ir.Rule{{ - Line: 423, - SyntaxPatterns: []ir.PatternString{{Line: 423, Value: "if $_ { $*_; http.Error($w, $err, $code) }"}}, + Line: 428, + SyntaxPatterns: []ir.PatternString{{Line: 428, Value: "if $_ { $*_; http.Error($w, $err, $code) }"}}, ReportTemplate: "Possibly return is missed after the http.Error call", LocationVar: "w", }}, }, { - Line: 432, + Line: 437, Name: "preferFilepathJoin", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1343,35 +1455,35 @@ var PrecompiledRules = &ir.File{ DocBefore: "x + string(os.PathSeparator) + y", DocAfter: "filepath.Join(x, y)", Rules: []ir.Rule{{ - Line: 433, - SyntaxPatterns: []ir.PatternString{{Line: 433, Value: "$x + string(os.PathSeparator) + $y"}}, + Line: 438, + SyntaxPatterns: []ir.PatternString{{Line: 438, Value: "$x + string(os.PathSeparator) + $y"}}, ReportTemplate: "filepath.Join($x, $y) should be preferred to the $$", SuggestTemplate: "filepath.Join($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 434, + Line: 439, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`string`) && m[\"y\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 434, + Line: 439, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 434, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 439, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, { - Line: 434, + Line: 439, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`string`)", Value: "y", - Args: []ir.FilterExpr{{Line: 434, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 439, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }}, }, { - Line: 443, + Line: 448, Name: "preferStringWriter", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1380,35 +1492,35 @@ var PrecompiledRules = &ir.File{ DocAfter: "w.WriteString(\"foo\")", Rules: []ir.Rule{ { - Line: 444, - SyntaxPatterns: []ir.PatternString{{Line: 444, Value: "$w.Write([]byte($s))"}}, + Line: 449, + SyntaxPatterns: []ir.PatternString{{Line: 449, Value: "$w.Write([]byte($s))"}}, ReportTemplate: "$w.WriteString($s) should be preferred to the $$", SuggestTemplate: "$w.WriteString($s)", WhereExpr: ir.FilterExpr{ - Line: 445, + Line: 450, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 445, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + Args: []ir.FilterExpr{{Line: 450, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, }, }, { - Line: 449, - SyntaxPatterns: []ir.PatternString{{Line: 449, Value: "io.WriteString($w, $s)"}}, + Line: 454, + SyntaxPatterns: []ir.PatternString{{Line: 454, Value: "io.WriteString($w, $s)"}}, ReportTemplate: "$w.WriteString($s) should be preferred to the $$", SuggestTemplate: "$w.WriteString($s)", WhereExpr: ir.FilterExpr{ - Line: 450, + Line: 455, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 450, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + Args: []ir.FilterExpr{{Line: 455, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, }, }, }, }, { - Line: 459, + Line: 464, Name: "sliceClear", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1416,22 +1528,22 @@ var PrecompiledRules = &ir.File{ DocBefore: "for i := 0; i < len(buf); i++ { buf[i] = 0 }", DocAfter: "for i := range buf { buf[i] = 0 }", Rules: []ir.Rule{{ - Line: 460, - SyntaxPatterns: []ir.PatternString{{Line: 460, Value: "for $i := 0; $i < len($xs); $i++ { $xs[$i] = $zero }"}}, + Line: 465, + SyntaxPatterns: []ir.PatternString{{Line: 465, Value: "for $i := 0; $i < len($xs); $i++ { $xs[$i] = $zero }"}}, ReportTemplate: "rewrite as for-range so compiler can recognize this pattern", WhereExpr: ir.FilterExpr{ - Line: 461, + Line: 466, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 461, + Line: 466, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 461, + Line: 466, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1441,7 +1553,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 469, + Line: 474, Name: "syncMapLoadAndDelete", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1449,33 +1561,33 @@ var PrecompiledRules = &ir.File{ DocBefore: "v, ok := m.Load(k); if ok { m.Delete($k); f(v); }", DocAfter: "v, deleted := m.LoadAndDelete(k); if deleted { f(v) }", Rules: []ir.Rule{{ - Line: 470, - SyntaxPatterns: []ir.PatternString{{Line: 470, Value: "$_, $ok := $m.Load($k); if $ok { $m.Delete($k); $*_ }"}}, + Line: 475, + SyntaxPatterns: []ir.PatternString{{Line: 475, Value: "$_, $ok := $m.Load($k); if $ok { $m.Delete($k); $*_ }"}}, ReportTemplate: "use $m.LoadAndDelete to perform load+delete operations atomically", WhereExpr: ir.FilterExpr{ - Line: 471, + Line: 476, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.15\") &&\n\tm[\"m\"].Type.Is(`*sync.Map`)", Args: []ir.FilterExpr{ { - Line: 471, + Line: 476, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.15\")", Value: "1.15", }, { - Line: 472, + Line: 477, Op: ir.FilterVarTypeIsOp, Src: "m[\"m\"].Type.Is(`*sync.Map`)", Value: "m", - Args: []ir.FilterExpr{{Line: 472, Op: ir.FilterStringOp, Src: "`*sync.Map`", Value: "*sync.Map"}}, + Args: []ir.FilterExpr{{Line: 477, Op: ir.FilterStringOp, Src: "`*sync.Map`", Value: "*sync.Map"}}, }, }, }, }}, }, { - Line: 480, + Line: 485, Name: "sprintfQuotedString", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1483,34 +1595,34 @@ var PrecompiledRules = &ir.File{ DocBefore: "fmt.Sprintf(`\"%s\"`, s)", DocAfter: "fmt.Sprintf(`%q`, s)", Rules: []ir.Rule{{ - Line: 481, - SyntaxPatterns: []ir.PatternString{{Line: 481, Value: "fmt.Sprintf($s, $*_)"}}, + Line: 486, + SyntaxPatterns: []ir.PatternString{{Line: 486, Value: "fmt.Sprintf($s, $*_)"}}, ReportTemplate: "use %q instead of \"%s\" for quoted strings", WhereExpr: ir.FilterExpr{ - Line: 482, + Line: 487, Op: ir.FilterOrOp, Src: "m[\"s\"].Text.Matches(\"^`.*\\\"%s\\\".*`$\") ||\n\tm[\"s\"].Text.Matches(`^\".*\\\\\"%s\\\\\".*\"$`)", Args: []ir.FilterExpr{ { - Line: 482, + Line: 487, Op: ir.FilterVarTextMatchesOp, Src: "m[\"s\"].Text.Matches(\"^`.*\\\"%s\\\".*`$\")", Value: "s", - Args: []ir.FilterExpr{{Line: 482, Op: ir.FilterStringOp, Src: "\"^`.*\\\"%s\\\".*`$\"", Value: "^`.*\"%s\".*`$"}}, + Args: []ir.FilterExpr{{Line: 487, Op: ir.FilterStringOp, Src: "\"^`.*\\\"%s\\\".*`$\"", Value: "^`.*\"%s\".*`$"}}, }, { - Line: 483, + Line: 488, Op: ir.FilterVarTextMatchesOp, Src: "m[\"s\"].Text.Matches(`^\".*\\\\\"%s\\\\\".*\"$`)", Value: "s", - Args: []ir.FilterExpr{{Line: 483, Op: ir.FilterStringOp, Src: "`^\".*\\\\\"%s\\\\\".*\"$`", Value: "^\".*\\\\\"%s\\\\\".*\"$"}}, + Args: []ir.FilterExpr{{Line: 488, Op: ir.FilterStringOp, Src: "`^\".*\\\\\"%s\\\\\".*\"$`", Value: "^\".*\\\\\"%s\\\\\".*\"$"}}, }, }, }, }}, }, { - Line: 491, + Line: 496, Name: "offBy1", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1519,80 +1631,80 @@ var PrecompiledRules = &ir.File{ DocAfter: "xs[len(xs)-1]", Rules: []ir.Rule{ { - Line: 492, - SyntaxPatterns: []ir.PatternString{{Line: 492, Value: "$x[len($x)]"}}, + Line: 497, + SyntaxPatterns: []ir.PatternString{{Line: 497, Value: "$x[len($x)]"}}, ReportTemplate: "index expr always panics; maybe you wanted $x[len($x)-1]?", SuggestTemplate: "$x[len($x)-1]", WhereExpr: ir.FilterExpr{ - Line: 493, + Line: 498, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"x\"].Type.Is(`[]$_`)", Args: []ir.FilterExpr{ - {Line: 493, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 498, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, { - Line: 493, + Line: 498, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]$_`)", Value: "x", - Args: []ir.FilterExpr{{Line: 493, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, + Args: []ir.FilterExpr{{Line: 498, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, }, }, }, }, { - Line: 500, + Line: 505, SyntaxPatterns: []ir.PatternString{ - {Line: 501, Value: "$i := strings.Index($s, $_); $_ := $slicing[$i:]"}, - {Line: 502, Value: "$i := strings.Index($s, $_); $_ = $slicing[$i:]"}, - {Line: 503, Value: "$i := bytes.Index($s, $_); $_ := $slicing[$i:]"}, - {Line: 504, Value: "$i := bytes.Index($s, $_); $_ = $slicing[$i:]"}, + {Line: 506, Value: "$i := strings.Index($s, $_); $_ := $slicing[$i:]"}, + {Line: 507, Value: "$i := strings.Index($s, $_); $_ = $slicing[$i:]"}, + {Line: 508, Value: "$i := bytes.Index($s, $_); $_ := $slicing[$i:]"}, + {Line: 509, Value: "$i := bytes.Index($s, $_); $_ = $slicing[$i:]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do $s[$i+1:]", WhereExpr: ir.FilterExpr{ - Line: 505, + Line: 510, Op: ir.FilterEqOp, Src: "m[\"s\"].Text == m[\"slicing\"].Text", Args: []ir.FilterExpr{ - {Line: 505, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, - {Line: 505, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, + {Line: 510, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, + {Line: 510, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, }, }, LocationVar: "slicing", }, { - Line: 509, + Line: 514, SyntaxPatterns: []ir.PatternString{ - {Line: 510, Value: "$i := strings.Index($s, $_); $_ := $slicing[:$i]"}, - {Line: 511, Value: "$i := strings.Index($s, $_); $_ = $slicing[:$i]"}, - {Line: 512, Value: "$i := bytes.Index($s, $_); $_ := $slicing[:$i]"}, - {Line: 513, Value: "$i := bytes.Index($s, $_); $_ = $slicing[:$i]"}, + {Line: 515, Value: "$i := strings.Index($s, $_); $_ := $slicing[:$i]"}, + {Line: 516, Value: "$i := strings.Index($s, $_); $_ = $slicing[:$i]"}, + {Line: 517, Value: "$i := bytes.Index($s, $_); $_ := $slicing[:$i]"}, + {Line: 518, Value: "$i := bytes.Index($s, $_); $_ = $slicing[:$i]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do $s[:$i+1]", WhereExpr: ir.FilterExpr{ - Line: 514, + Line: 519, Op: ir.FilterEqOp, Src: "m[\"s\"].Text == m[\"slicing\"].Text", Args: []ir.FilterExpr{ - {Line: 514, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, - {Line: 514, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, + {Line: 519, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, + {Line: 519, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, }, }, LocationVar: "slicing", }, { - Line: 518, + Line: 523, SyntaxPatterns: []ir.PatternString{ - {Line: 519, Value: "$s[strings.Index($s, $_):]"}, - {Line: 520, Value: "$s[:strings.Index($s, $_)]"}, - {Line: 521, Value: "$s[bytes.Index($s, $_):]"}, - {Line: 522, Value: "$s[:bytes.Index($s, $_)]"}, + {Line: 524, Value: "$s[strings.Index($s, $_):]"}, + {Line: 525, Value: "$s[:strings.Index($s, $_)]"}, + {Line: 526, Value: "$s[bytes.Index($s, $_):]"}, + {Line: 527, Value: "$s[:bytes.Index($s, $_)]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do Index()+1", }, }, }, { - Line: 530, + Line: 535, Name: "unslice", MatcherName: "m", DocTags: []string{"style"}, @@ -1600,35 +1712,35 @@ var PrecompiledRules = &ir.File{ DocBefore: "copy(b[:], values...)", DocAfter: "copy(b, values...)", Rules: []ir.Rule{{ - Line: 531, - SyntaxPatterns: []ir.PatternString{{Line: 531, Value: "$s[:]"}}, + Line: 536, + SyntaxPatterns: []ir.PatternString{{Line: 536, Value: "$s[:]"}}, ReportTemplate: "could simplify $$ to $s", SuggestTemplate: "$s", WhereExpr: ir.FilterExpr{ - Line: 532, + Line: 537, Op: ir.FilterOrOp, Src: "m[\"s\"].Type.Is(`string`) || m[\"s\"].Type.Is(`[]$_`)", Args: []ir.FilterExpr{ { - Line: 532, + Line: 537, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 532, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 537, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, { - Line: 532, + Line: 537, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`[]$_`)", Value: "s", - Args: []ir.FilterExpr{{Line: 532, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, + Args: []ir.FilterExpr{{Line: 537, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, }, }, }, }}, }, { - Line: 541, + Line: 546, Name: "yodaStyleExpr", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1637,105 +1749,105 @@ var PrecompiledRules = &ir.File{ DocAfter: "return ptr != nil", Rules: []ir.Rule{ { - Line: 542, - SyntaxPatterns: []ir.PatternString{{Line: 542, Value: "$constval != $x"}}, + Line: 547, + SyntaxPatterns: []ir.PatternString{{Line: 547, Value: "$constval != $x"}}, ReportTemplate: "consider to change order in expression to $x != $constval", WhereExpr: ir.FilterExpr{ - Line: 542, + Line: 547, Op: ir.FilterAndOp, Src: "m[\"constval\"].Node.Is(`BasicLit`) && !m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{ { - Line: 542, + Line: 547, Op: ir.FilterVarNodeIsOp, Src: "m[\"constval\"].Node.Is(`BasicLit`)", Value: "constval", - Args: []ir.FilterExpr{{Line: 542, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }, { - Line: 542, + Line: 547, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 542, + Line: 547, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 542, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 544, - SyntaxPatterns: []ir.PatternString{{Line: 544, Value: "$constval == $x"}}, + Line: 549, + SyntaxPatterns: []ir.PatternString{{Line: 549, Value: "$constval == $x"}}, ReportTemplate: "consider to change order in expression to $x == $constval", WhereExpr: ir.FilterExpr{ - Line: 544, + Line: 549, Op: ir.FilterAndOp, Src: "m[\"constval\"].Node.Is(`BasicLit`) && !m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{ { - Line: 544, + Line: 549, Op: ir.FilterVarNodeIsOp, Src: "m[\"constval\"].Node.Is(`BasicLit`)", Value: "constval", - Args: []ir.FilterExpr{{Line: 544, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }, { - Line: 544, + Line: 549, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 544, + Line: 549, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 544, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 547, - SyntaxPatterns: []ir.PatternString{{Line: 547, Value: "nil != $x"}}, + Line: 552, + SyntaxPatterns: []ir.PatternString{{Line: 552, Value: "nil != $x"}}, ReportTemplate: "consider to change order in expression to $x != nil", WhereExpr: ir.FilterExpr{ - Line: 547, + Line: 552, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 547, + Line: 552, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 552, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, { - Line: 549, - SyntaxPatterns: []ir.PatternString{{Line: 549, Value: "nil == $x"}}, + Line: 554, + SyntaxPatterns: []ir.PatternString{{Line: 554, Value: "nil == $x"}}, ReportTemplate: "consider to change order in expression to $x == nil", WhereExpr: ir.FilterExpr{ - Line: 549, + Line: 554, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 549, + Line: 554, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 554, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 557, + Line: 562, Name: "equalFold", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1744,114 +1856,114 @@ var PrecompiledRules = &ir.File{ DocAfter: "strings.EqualFold(x, y)", Rules: []ir.Rule{ { - Line: 566, + Line: 571, SyntaxPatterns: []ir.PatternString{ - {Line: 567, Value: "strings.ToLower($x) == $y"}, - {Line: 568, Value: "strings.ToLower($x) == strings.ToLower($y)"}, - {Line: 569, Value: "$x == strings.ToLower($y)"}, - {Line: 570, Value: "strings.ToUpper($x) == $y"}, - {Line: 571, Value: "strings.ToUpper($x) == strings.ToUpper($y)"}, - {Line: 572, Value: "$x == strings.ToUpper($y)"}, + {Line: 572, Value: "strings.ToLower($x) == $y"}, + {Line: 573, Value: "strings.ToLower($x) == strings.ToLower($y)"}, + {Line: 574, Value: "$x == strings.ToLower($y)"}, + {Line: 575, Value: "strings.ToUpper($x) == $y"}, + {Line: 576, Value: "strings.ToUpper($x) == strings.ToUpper($y)"}, + {Line: 577, Value: "$x == strings.ToUpper($y)"}, }, ReportTemplate: "consider replacing with strings.EqualFold($x, $y)", SuggestTemplate: "strings.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 573, + Line: 578, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 573, + Line: 578, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 573, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 573, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 578, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 578, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 573, + Line: 578, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 573, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 573, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 578, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 578, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, }, }, { - Line: 578, + Line: 583, SyntaxPatterns: []ir.PatternString{ - {Line: 579, Value: "strings.ToLower($x) != $y"}, - {Line: 580, Value: "strings.ToLower($x) != strings.ToLower($y)"}, - {Line: 581, Value: "$x != strings.ToLower($y)"}, - {Line: 582, Value: "strings.ToUpper($x) != $y"}, - {Line: 583, Value: "strings.ToUpper($x) != strings.ToUpper($y)"}, - {Line: 584, Value: "$x != strings.ToUpper($y)"}, + {Line: 584, Value: "strings.ToLower($x) != $y"}, + {Line: 585, Value: "strings.ToLower($x) != strings.ToLower($y)"}, + {Line: 586, Value: "$x != strings.ToLower($y)"}, + {Line: 587, Value: "strings.ToUpper($x) != $y"}, + {Line: 588, Value: "strings.ToUpper($x) != strings.ToUpper($y)"}, + {Line: 589, Value: "$x != strings.ToUpper($y)"}, }, ReportTemplate: "consider replacing with !strings.EqualFold($x, $y)", SuggestTemplate: "!strings.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 585, + Line: 590, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 585, + Line: 590, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 585, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 585, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 590, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 590, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 585, + Line: 590, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 585, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 585, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 590, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 590, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, }, }, { - Line: 590, + Line: 595, SyntaxPatterns: []ir.PatternString{ - {Line: 591, Value: "bytes.Equal(bytes.ToLower($x), $y)"}, - {Line: 592, Value: "bytes.Equal(bytes.ToLower($x), bytes.ToLower($y))"}, - {Line: 593, Value: "bytes.Equal($x, bytes.ToLower($y))"}, - {Line: 594, Value: "bytes.Equal(bytes.ToUpper($x), $y)"}, - {Line: 595, Value: "bytes.Equal(bytes.ToUpper($x), bytes.ToUpper($y))"}, - {Line: 596, Value: "bytes.Equal($x, bytes.ToUpper($y))"}, + {Line: 596, Value: "bytes.Equal(bytes.ToLower($x), $y)"}, + {Line: 597, Value: "bytes.Equal(bytes.ToLower($x), bytes.ToLower($y))"}, + {Line: 598, Value: "bytes.Equal($x, bytes.ToLower($y))"}, + {Line: 599, Value: "bytes.Equal(bytes.ToUpper($x), $y)"}, + {Line: 600, Value: "bytes.Equal(bytes.ToUpper($x), bytes.ToUpper($y))"}, + {Line: 601, Value: "bytes.Equal($x, bytes.ToUpper($y))"}, }, ReportTemplate: "consider replacing with bytes.EqualFold($x, $y)", SuggestTemplate: "bytes.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 597, + Line: 602, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 597, + Line: 602, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 597, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 597, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 602, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 602, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 597, + Line: 602, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 597, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 597, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 602, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 602, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, @@ -1860,7 +1972,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 606, + Line: 611, Name: "argOrder", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1868,45 +1980,45 @@ var PrecompiledRules = &ir.File{ DocBefore: "strings.HasPrefix(\"#\", userpass)", DocAfter: "strings.HasPrefix(userpass, \"#\")", Rules: []ir.Rule{{ - Line: 607, + Line: 612, SyntaxPatterns: []ir.PatternString{ - {Line: 608, Value: "strings.HasPrefix($lit, $s)"}, - {Line: 609, Value: "bytes.HasPrefix($lit, $s)"}, - {Line: 610, Value: "strings.HasSuffix($lit, $s)"}, - {Line: 611, Value: "bytes.HasSuffix($lit, $s)"}, - {Line: 612, Value: "strings.Contains($lit, $s)"}, - {Line: 613, Value: "bytes.Contains($lit, $s)"}, - {Line: 614, Value: "strings.TrimPrefix($lit, $s)"}, - {Line: 615, Value: "bytes.TrimPrefix($lit, $s)"}, - {Line: 616, Value: "strings.TrimSuffix($lit, $s)"}, - {Line: 617, Value: "bytes.TrimSuffix($lit, $s)"}, - {Line: 618, Value: "strings.Split($lit, $s)"}, - {Line: 619, Value: "bytes.Split($lit, $s)"}, + {Line: 613, Value: "strings.HasPrefix($lit, $s)"}, + {Line: 614, Value: "bytes.HasPrefix($lit, $s)"}, + {Line: 615, Value: "strings.HasSuffix($lit, $s)"}, + {Line: 616, Value: "bytes.HasSuffix($lit, $s)"}, + {Line: 617, Value: "strings.Contains($lit, $s)"}, + {Line: 618, Value: "bytes.Contains($lit, $s)"}, + {Line: 619, Value: "strings.TrimPrefix($lit, $s)"}, + {Line: 620, Value: "bytes.TrimPrefix($lit, $s)"}, + {Line: 621, Value: "strings.TrimSuffix($lit, $s)"}, + {Line: 622, Value: "bytes.TrimSuffix($lit, $s)"}, + {Line: 623, Value: "strings.Split($lit, $s)"}, + {Line: 624, Value: "bytes.Split($lit, $s)"}, }, ReportTemplate: "$lit and $s arguments order looks reversed", WhereExpr: ir.FilterExpr{ - Line: 620, + Line: 625, Op: ir.FilterAndOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice) &&\n\t!(m[\"s\"].Const || m[\"s\"].ConstSlice) &&\n\t!m[\"lit\"].Node.Is(`Ident`)", Args: []ir.FilterExpr{ { - Line: 620, + Line: 625, Op: ir.FilterAndOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice) &&\n\t!(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 620, + Line: 625, Op: ir.FilterOrOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 620, + Line: 625, Op: ir.FilterVarConstOp, Src: "m[\"lit\"].Const", Value: "lit", }, { - Line: 620, + Line: 625, Op: ir.FilterVarConstSliceOp, Src: "m[\"lit\"].ConstSlice", Value: "lit", @@ -1914,22 +2026,22 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 621, + Line: 626, Op: ir.FilterNotOp, Src: "!(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{{ - Line: 621, + Line: 626, Op: ir.FilterOrOp, Src: "(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 621, + Line: 626, Op: ir.FilterVarConstOp, Src: "m[\"s\"].Const", Value: "s", }, { - Line: 621, + Line: 626, Op: ir.FilterVarConstSliceOp, Src: "m[\"s\"].ConstSlice", Value: "s", @@ -1940,15 +2052,15 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 622, + Line: 627, Op: ir.FilterNotOp, Src: "!m[\"lit\"].Node.Is(`Ident`)", Args: []ir.FilterExpr{{ - Line: 622, + Line: 627, Op: ir.FilterVarNodeIsOp, Src: "m[\"lit\"].Node.Is(`Ident`)", Value: "lit", - Args: []ir.FilterExpr{{Line: 622, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 627, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }}, }, }, @@ -1956,7 +2068,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 630, + Line: 635, Name: "stringConcatSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1965,27 +2077,27 @@ var PrecompiledRules = &ir.File{ DocAfter: "x + \"_\" + y", Rules: []ir.Rule{ { - Line: 631, - SyntaxPatterns: []ir.PatternString{{Line: 631, Value: "strings.Join([]string{$x, $y}, \"\")"}}, + Line: 636, + SyntaxPatterns: []ir.PatternString{{Line: 636, Value: "strings.Join([]string{$x, $y}, \"\")"}}, ReportTemplate: "suggestion: $x + $y", SuggestTemplate: "$x + $y", }, { - Line: 632, - SyntaxPatterns: []ir.PatternString{{Line: 632, Value: "strings.Join([]string{$x, $y, $z}, \"\")"}}, + Line: 637, + SyntaxPatterns: []ir.PatternString{{Line: 637, Value: "strings.Join([]string{$x, $y, $z}, \"\")"}}, ReportTemplate: "suggestion: $x + $y + $z", SuggestTemplate: "$x + $y + $z", }, { - Line: 633, - SyntaxPatterns: []ir.PatternString{{Line: 633, Value: "strings.Join([]string{$x, $y}, $glue)"}}, + Line: 638, + SyntaxPatterns: []ir.PatternString{{Line: 638, Value: "strings.Join([]string{$x, $y}, $glue)"}}, ReportTemplate: "suggestion: $x + $glue + $y", SuggestTemplate: "$x + $glue + $y", }, }, }, { - Line: 640, + Line: 645, Name: "timeExprSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1994,39 +2106,39 @@ var PrecompiledRules = &ir.File{ DocAfter: "t.UnixMilli()", Rules: []ir.Rule{ { - Line: 645, - SyntaxPatterns: []ir.PatternString{{Line: 645, Value: "$t.Unix() / 1000"}}, + Line: 650, + SyntaxPatterns: []ir.PatternString{{Line: 650, Value: "$t.Unix() / 1000"}}, ReportTemplate: "use $t.UnixMilli() instead of $$", SuggestTemplate: "$t.UnixMilli()", WhereExpr: ir.FilterExpr{ - Line: 646, + Line: 651, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\") && isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 646, + Line: 651, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\")", Value: "1.17", }, { - Line: 646, + Line: 651, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 646, + Line: 651, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 646, + Line: 651, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2034,39 +2146,39 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 650, - SyntaxPatterns: []ir.PatternString{{Line: 650, Value: "$t.UnixNano() * 1000"}}, + Line: 655, + SyntaxPatterns: []ir.PatternString{{Line: 655, Value: "$t.UnixNano() * 1000"}}, ReportTemplate: "use $t.UnixMicro() instead of $$", SuggestTemplate: "$t.UnixMicro()", WhereExpr: ir.FilterExpr{ - Line: 651, + Line: 656, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\") && isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 651, + Line: 656, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\")", Value: "1.17", }, { - Line: 651, + Line: 656, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 651, + Line: 656, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 651, + Line: 656, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2076,72 +2188,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 660, - Name: "timeCmpSimplify", - MatcherName: "m", - DocTags: []string{"style", "experimental"}, - DocSummary: "Detects Before/After call of time.Time that can be simplified", - DocBefore: "!t.Before(tt)", - DocAfter: "t.After(tt)", - Rules: []ir.Rule{ - { - Line: 665, - SyntaxPatterns: []ir.PatternString{{Line: 665, Value: "!$t.Before($tt)"}}, - ReportTemplate: "suggestion: $t.After($tt)", - SuggestTemplate: "$t.After($tt)", - WhereExpr: ir.FilterExpr{ - Line: 666, - Op: ir.FilterOrOp, - Src: "isTime(m[\"t\"])", - Args: []ir.FilterExpr{ - { - Line: 666, - Op: ir.FilterVarTypeIsOp, - Src: "m[\"t\"].Type.Is(`time.Time`)", - Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, - }, - { - Line: 666, - Op: ir.FilterVarTypeIsOp, - Src: "m[\"t\"].Type.Is(`*time.Time`)", - Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, - }, - }, - }, - }, - { - Line: 669, - SyntaxPatterns: []ir.PatternString{{Line: 669, Value: "!$t.After($tt)"}}, - ReportTemplate: "suggestion: $t.Before($tt)", - SuggestTemplate: "$t.Before($tt)", - WhereExpr: ir.FilterExpr{ - Line: 670, - Op: ir.FilterOrOp, - Src: "isTime(m[\"t\"])", - Args: []ir.FilterExpr{ - { - Line: 670, - Op: ir.FilterVarTypeIsOp, - Src: "m[\"t\"].Type.Is(`time.Time`)", - Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, - }, - { - Line: 670, - Op: ir.FilterVarTypeIsOp, - Src: "m[\"t\"].Type.Is(`*time.Time`)", - Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, - }, - }, - }, - }, - }, - }, - { - Line: 678, + Line: 665, Name: "exposedSyncMutex", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2150,57 +2197,57 @@ var PrecompiledRules = &ir.File{ DocAfter: "type Foo struct{ ...; mu sync.Mutex; ... }", Rules: []ir.Rule{ { - Line: 683, - SyntaxPatterns: []ir.PatternString{{Line: 683, Value: "type $x struct { $*_; sync.Mutex; $*_ }"}}, + Line: 670, + SyntaxPatterns: []ir.PatternString{{Line: 670, Value: "type $x struct { $*_; sync.Mutex; $*_ }"}}, ReportTemplate: "don't embed sync.Mutex", WhereExpr: ir.FilterExpr{ - Line: 684, + Line: 671, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 687, - SyntaxPatterns: []ir.PatternString{{Line: 687, Value: "type $x struct { $*_; *sync.Mutex; $*_ }"}}, + Line: 674, + SyntaxPatterns: []ir.PatternString{{Line: 674, Value: "type $x struct { $*_; *sync.Mutex; $*_ }"}}, ReportTemplate: "don't embed *sync.Mutex", WhereExpr: ir.FilterExpr{ - Line: 688, + Line: 675, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 691, - SyntaxPatterns: []ir.PatternString{{Line: 691, Value: "type $x struct { $*_; sync.RWMutex; $*_ }"}}, + Line: 678, + SyntaxPatterns: []ir.PatternString{{Line: 678, Value: "type $x struct { $*_; sync.RWMutex; $*_ }"}}, ReportTemplate: "don't embed sync.RWMutex", WhereExpr: ir.FilterExpr{ - Line: 692, + Line: 679, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 695, - SyntaxPatterns: []ir.PatternString{{Line: 695, Value: "type $x struct { $*_; *sync.RWMutex; $*_ }"}}, + Line: 682, + SyntaxPatterns: []ir.PatternString{{Line: 682, Value: "type $x struct { $*_; *sync.RWMutex; $*_ }"}}, ReportTemplate: "don't embed *sync.RWMutex", WhereExpr: ir.FilterExpr{ - Line: 696, + Line: 683, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, }, }, { - Line: 704, + Line: 691, Name: "badSorting", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2209,83 +2256,83 @@ var PrecompiledRules = &ir.File{ DocAfter: "sort.Strings(xs)", Rules: []ir.Rule{ { - Line: 705, - SyntaxPatterns: []ir.PatternString{{Line: 705, Value: "$x = sort.IntSlice($x)"}}, + Line: 692, + SyntaxPatterns: []ir.PatternString{{Line: 692, Value: "$x = sort.IntSlice($x)"}}, ReportTemplate: "suspicious sort.IntSlice usage, maybe sort.Ints was intended?", SuggestTemplate: "sort.Ints($x)", WhereExpr: ir.FilterExpr{ - Line: 706, + Line: 693, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]int`)", Value: "x", - Args: []ir.FilterExpr{{Line: 706, Op: ir.FilterStringOp, Src: "`[]int`", Value: "[]int"}}, + Args: []ir.FilterExpr{{Line: 693, Op: ir.FilterStringOp, Src: "`[]int`", Value: "[]int"}}, }, }, { - Line: 710, - SyntaxPatterns: []ir.PatternString{{Line: 710, Value: "$x = sort.Float64Slice($x)"}}, + Line: 697, + SyntaxPatterns: []ir.PatternString{{Line: 697, Value: "$x = sort.Float64Slice($x)"}}, ReportTemplate: "suspicious sort.Float64s usage, maybe sort.Float64s was intended?", SuggestTemplate: "sort.Float64s($x)", WhereExpr: ir.FilterExpr{ - Line: 711, + Line: 698, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]float64`)", Value: "x", - Args: []ir.FilterExpr{{Line: 711, Op: ir.FilterStringOp, Src: "`[]float64`", Value: "[]float64"}}, + Args: []ir.FilterExpr{{Line: 698, Op: ir.FilterStringOp, Src: "`[]float64`", Value: "[]float64"}}, }, }, { - Line: 715, - SyntaxPatterns: []ir.PatternString{{Line: 715, Value: "$x = sort.StringSlice($x)"}}, + Line: 702, + SyntaxPatterns: []ir.PatternString{{Line: 702, Value: "$x = sort.StringSlice($x)"}}, ReportTemplate: "suspicious sort.StringSlice usage, maybe sort.Strings was intended?", SuggestTemplate: "sort.Strings($x)", WhereExpr: ir.FilterExpr{ - Line: 716, + Line: 703, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 716, Op: ir.FilterStringOp, Src: "`[]string`", Value: "[]string"}}, + Args: []ir.FilterExpr{{Line: 703, Op: ir.FilterStringOp, Src: "`[]string`", Value: "[]string"}}, }, }, }, }, { - Line: 725, + Line: 712, Name: "externalErrorReassign", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, - DocSummary: "Detects suspicious reassigment of error from another package", + DocSummary: "Detects suspicious reassignment of error from another package", DocBefore: "io.EOF = nil", DocAfter: "/* don't do it */", Rules: []ir.Rule{{ - Line: 726, - SyntaxPatterns: []ir.PatternString{{Line: 726, Value: "$pkg.$err = $x"}}, - ReportTemplate: "suspicious reassigment of error from another package", + Line: 713, + SyntaxPatterns: []ir.PatternString{{Line: 713, Value: "$pkg.$err = $x"}}, + ReportTemplate: "suspicious reassignment of error from another package", WhereExpr: ir.FilterExpr{ - Line: 727, + Line: 714, Op: ir.FilterAndOp, Src: "m[\"err\"].Type.Is(`error`) && m[\"pkg\"].Object.Is(`PkgName`)", Args: []ir.FilterExpr{ { - Line: 727, + Line: 714, Op: ir.FilterVarTypeIsOp, Src: "m[\"err\"].Type.Is(`error`)", Value: "err", - Args: []ir.FilterExpr{{Line: 727, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, + Args: []ir.FilterExpr{{Line: 714, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, }, { - Line: 727, + Line: 714, Op: ir.FilterVarObjectIsOp, Src: "m[\"pkg\"].Object.Is(`PkgName`)", Value: "pkg", - Args: []ir.FilterExpr{{Line: 727, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, + Args: []ir.FilterExpr{{Line: 714, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, }, }, }, }}, }, { - Line: 735, + Line: 722, Name: "emptyDecl", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2294,24 +2341,24 @@ var PrecompiledRules = &ir.File{ DocAfter: "/* nothing */", Rules: []ir.Rule{ { - Line: 736, - SyntaxPatterns: []ir.PatternString{{Line: 736, Value: "var()"}}, + Line: 723, + SyntaxPatterns: []ir.PatternString{{Line: 723, Value: "var()"}}, ReportTemplate: "empty var() block", }, { - Line: 737, - SyntaxPatterns: []ir.PatternString{{Line: 737, Value: "const()"}}, + Line: 724, + SyntaxPatterns: []ir.PatternString{{Line: 724, Value: "const()"}}, ReportTemplate: "empty const() block", }, { - Line: 738, - SyntaxPatterns: []ir.PatternString{{Line: 738, Value: "type()"}}, + Line: 725, + SyntaxPatterns: []ir.PatternString{{Line: 725, Value: "type()"}}, ReportTemplate: "empty type() block", }, }, }, { - Line: 745, + Line: 732, Name: "dynamicFmtString", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2320,16 +2367,16 @@ var PrecompiledRules = &ir.File{ DocAfter: "fmt.Errorf(\"%s\", msg)", Rules: []ir.Rule{ { - Line: 746, - SyntaxPatterns: []ir.PatternString{{Line: 746, Value: "fmt.Errorf($f)"}}, + Line: 733, + SyntaxPatterns: []ir.PatternString{{Line: 733, Value: "fmt.Errorf($f)"}}, ReportTemplate: "use errors.New($f) or fmt.Errorf(\"%s\", $f) instead", SuggestTemplate: "errors.New($f)", WhereExpr: ir.FilterExpr{ - Line: 747, + Line: 734, Op: ir.FilterNotOp, Src: "!m[\"f\"].Const", Args: []ir.FilterExpr{{ - Line: 747, + Line: 734, Op: ir.FilterVarConstOp, Src: "m[\"f\"].Const", Value: "f", @@ -2337,15 +2384,15 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 751, - SyntaxPatterns: []ir.PatternString{{Line: 751, Value: "fmt.Errorf($f($*args))"}}, + Line: 738, + SyntaxPatterns: []ir.PatternString{{Line: 738, Value: "fmt.Errorf($f($*args))"}}, ReportTemplate: "use errors.New($f($*args)) or fmt.Errorf(\"%s\", $f($*args)) instead", SuggestTemplate: "errors.New($f($*args))", }, }, }, { - Line: 760, + Line: 747, Name: "stringsCompare", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2354,31 +2401,121 @@ var PrecompiledRules = &ir.File{ DocAfter: "x < y", Rules: []ir.Rule{ { - Line: 761, - SyntaxPatterns: []ir.PatternString{{Line: 761, Value: "strings.Compare($s1, $s2) == 0"}}, + Line: 748, + SyntaxPatterns: []ir.PatternString{{Line: 748, Value: "strings.Compare($s1, $s2) == 0"}}, ReportTemplate: "suggestion: $s1 == $s2", SuggestTemplate: "$s1 == $s2", }, { - Line: 764, + Line: 751, SyntaxPatterns: []ir.PatternString{ - {Line: 764, Value: "strings.Compare($s1, $s2) == -1"}, - {Line: 765, Value: "strings.Compare($s1, $s2) < 0"}, + {Line: 751, Value: "strings.Compare($s1, $s2) == -1"}, + {Line: 752, Value: "strings.Compare($s1, $s2) < 0"}, }, ReportTemplate: "suggestion: $s1 < $s2", SuggestTemplate: "$s1 < $s2", }, { - Line: 768, + Line: 755, SyntaxPatterns: []ir.PatternString{ - {Line: 768, Value: "strings.Compare($s1, $s2) == 1"}, - {Line: 769, Value: "strings.Compare($s1, $s2) > 0"}, + {Line: 755, Value: "strings.Compare($s1, $s2) == 1"}, + {Line: 756, Value: "strings.Compare($s1, $s2) > 0"}, }, ReportTemplate: "suggestion: $s1 > $s2", SuggestTemplate: "$s1 > $s2", }, }, }, + { + Line: 764, + Name: "uncheckedInlineErr", + MatcherName: "m", + DocTags: []string{"diagnostic", "experimental"}, + DocSummary: "Detects unchecked errors in if statements", + DocBefore: "if err := expr(); err2 != nil { /*...*/ }", + DocAfter: "if err := expr(); err != nil { /*...*/ }", + Rules: []ir.Rule{{ + Line: 765, + SyntaxPatterns: []ir.PatternString{ + {Line: 766, Value: "if $err := $_($*_); $err2 != nil { $*_ }"}, + {Line: 767, Value: "if $err = $_($*_); $err2 != nil { $*_ }"}, + {Line: 768, Value: "if $*_, $err := $_($*_); $err2 != nil { $*_ }"}, + {Line: 769, Value: "if $*_, $err = $_($*_); $err2 != nil { $*_ }"}, + }, + ReportTemplate: "$err error is unchecked, maybe intended to check it instead of $err2", + WhereExpr: ir.FilterExpr{ + Line: 770, + Op: ir.FilterAndOp, + Src: "m[\"err\"].Type.Implements(\"error\") && m[\"err2\"].Type.Implements(\"error\") &&\n\tm[\"err\"].Text != m[\"err2\"].Text", + Args: []ir.FilterExpr{ + { + Line: 770, + Op: ir.FilterAndOp, + Src: "m[\"err\"].Type.Implements(\"error\") && m[\"err2\"].Type.Implements(\"error\")", + Args: []ir.FilterExpr{ + { + Line: 770, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"err\"].Type.Implements(\"error\")", + Value: "err", + Args: []ir.FilterExpr{{Line: 770, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, + }, + { + Line: 770, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"err2\"].Type.Implements(\"error\")", + Value: "err2", + Args: []ir.FilterExpr{{Line: 770, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, + }, + }, + }, + { + Line: 771, + Op: ir.FilterNeqOp, + Src: "m[\"err\"].Text != m[\"err2\"].Text", + Args: []ir.FilterExpr{ + {Line: 771, Op: ir.FilterVarTextOp, Src: "m[\"err\"].Text", Value: "err"}, + {Line: 771, Op: ir.FilterVarTextOp, Src: "m[\"err2\"].Text", Value: "err2"}, + }, + }, + }, + }, + LocationVar: "err", + }}, + }, + { + Line: 780, + Name: "badSyncOnceFunc", + MatcherName: "m", + DocTags: []string{"diagnostic", "experimental"}, + DocSummary: "Detects bad usage of sync.OnceFunc", + DocBefore: "sync.OnceFunc(foo)()", + DocAfter: "fooOnce := sync.OnceFunc(foo); ...; fooOnce()", + Rules: []ir.Rule{ + { + Line: 781, + SyntaxPatterns: []ir.PatternString{{Line: 781, Value: "$*_; sync.OnceFunc($x); $*_;"}}, + ReportTemplate: "possible sync.OnceFunc misuse, sync.OnceFunc($x) result is not used", + WhereExpr: ir.FilterExpr{ + Line: 783, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.21\")", + Value: "1.21", + }, + }, + { + Line: 785, + SyntaxPatterns: []ir.PatternString{{Line: 785, Value: "sync.OnceFunc($x)()"}}, + ReportTemplate: "possible sync.OnceFunc misuse, consider to assign sync.OnceFunc($x) to a variable", + WhereExpr: ir.FilterExpr{ + Line: 787, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.21\")", + Value: "1.21", + }, + }, + }, + }, }, } diff --git a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go index b369a43447..a1a399fdaa 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go @@ -5,14 +5,15 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "golang.org/x/tools/go/ast/astutil" ) func init() { var info linter.CheckerInfo info.Name = "singleCaseSwitch" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects switch statements that could be better written as if statement" info.Before = ` switch x := x.(type) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go index 2f9ac62e1f..d83d7fd5a1 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "sloppyReassign" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious/confusing re-assignments" info.Before = `if err = f(); err != nil { return err }` info.After = `if err := f(); err != nil { return err }` diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go index 554197768e..454ab78b19 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go @@ -5,14 +5,15 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "sloppyTypeAssert" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects redundant type assertions" info.Before = ` func f(r io.Reader) interface{} { diff --git a/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go index 29550da3fb..22ef3b16a7 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go @@ -6,7 +6,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "sortSlice" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious sort.Slice calls" info.Before = `sort.Slice(xs, func(i, j) bool { return keys[i] < keys[j] })` info.After = `sort.Slice(kv, func(i, j) bool { return kv[i].key < kv[j].key })` diff --git a/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go index eb3b49d881..8a132b5860 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go @@ -5,14 +5,15 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "sqlQuery" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects issue in Query() and Exec() calls" info.Before = `_, err := db.Query("UPDATE ...")` info.After = `_, err := db.Exec("UPDATE ...")` diff --git a/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go b/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go index 5ec2881b4b..f8e4b9b3c0 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go @@ -5,13 +5,13 @@ import ( "regexp" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "todoCommentWithoutDetail" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects TODO comments without detail/assignee" info.Before = ` // TODO diff --git a/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go b/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go index 4d4dcc26e7..57411ba249 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "tooManyResultsChecker" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "maxResults": { Value: 5, diff --git a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go index 9d40c2b631..b369025267 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go @@ -6,7 +6,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astp" ) @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "truncateCmp" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "skipArchDependent": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go index d87657c3b9..e0d20fd4c5 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go @@ -6,7 +6,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" @@ -15,7 +16,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "typeAssertChain" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects repeated type assertions and suggests to replace them with type switch statement" info.Before = ` if x, ok := v.(T1); ok { diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go index bc59eef1ca..11381c4014 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go @@ -5,14 +5,13 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" - "golang.org/x/exp/typeparams" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "typeDefFirst" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects method declarations preceding the type definition itself" info.Before = ` func (r rec) Method() {} @@ -81,7 +80,7 @@ func (c *typeDefFirstChecker) receiverType(e ast.Expr) string { return e.Name case *ast.IndexExpr: return c.receiverType(e.X) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return c.receiverType(e.X) default: panic("unreachable") diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go index 1e11e49372..4b27b17928 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go @@ -5,7 +5,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "typeSwitchVar" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects type switches that can benefit from type guard clause with variable" info.Before = ` switch v.(type) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go index cd8e04337a..e2e225ebf2 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go @@ -4,7 +4,8 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" ) @@ -12,8 +13,8 @@ import ( func init() { var info linter.CheckerInfo info.Name = "typeUnparen" - info.Tags = []string{"style", "opinionated"} - info.Summary = "Detects unneded parenthesis inside type expressions and suggests to remove them" + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} + info.Summary = "Detects unneeded parenthesis inside type expressions and suggests to remove them" info.Before = `type foo [](func([](func())))` info.After = `type foo []func([]func())` diff --git a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go index d0426a9a50..0ce2c89ba7 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go @@ -5,7 +5,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astp" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "underef" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Params = linter.CheckerParams{ "skipRecvDeref": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go index bcca24d2a8..d0e83f3c2e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go @@ -6,13 +6,13 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "unlabelStmt" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects redundant statement labels" info.Before = ` derp: diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go index cce995d7a2..0401bf5d37 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "unlambda" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects function literals that can be simplified" info.Before = `func(x int) int { return fn(x) }` info.After = `fn` @@ -90,7 +91,7 @@ func (c *unlambdaChecker) VisitExpr(x ast.Expr) { } } - if len(result.Args) == n { + if c.lenArgs(result.Args) == n { c.warn(fn, callable) } } @@ -98,3 +99,20 @@ func (c *unlambdaChecker) VisitExpr(x ast.Expr) { func (c *unlambdaChecker) warn(cause ast.Node, suggestion string) { c.ctx.Warn(cause, "replace `%s` with `%s`", cause, suggestion) } + +func (c *unlambdaChecker) lenArgs(args []ast.Expr) int { + lenArgs := len(args) + + for _, arg := range args { + callExp, ok := arg.(*ast.CallExpr) + if !ok { + continue + } + + // Don't count function call. only args. + lenArgs-- + lenArgs += c.lenArgs(callExp.Args) + } + + return lenArgs +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go index 3149d9e87d..0d40addf75 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "unnamedResult" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "checkExported": { Value: false, diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go index 6cbdfdfd0b..b577ff4219 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go @@ -5,14 +5,15 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astp" ) func init() { var info linter.CheckerInfo info.Name = "unnecessaryBlock" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects unnecessary braced statement blocks" info.Before = ` x := 1 diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go index ef72142a10..4c1ed41f6f 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astfmt" ) func init() { var info linter.CheckerInfo info.Name = "unnecessaryDefer" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects redundantly deferred calls" info.Before = ` func() { @@ -36,7 +37,7 @@ type unnecessaryDeferChecker struct { // Visit implements the ast.Visitor. This visitor keeps track of the block // statement belongs to a function or any other block. If the block is not a // function and ends with a defer statement that should be OK since it's -// defering the outer function. +// deferring the outer function. func (c *unnecessaryDeferChecker) Visit(node ast.Node) ast.Visitor { switch n := node.(type) { case *ast.FuncDecl, *ast.FuncLit: diff --git a/vendor/github.com/go-critic/go-critic/checkers/utils.go b/vendor/github.com/go-critic/go-critic/checkers/utils.go index b71f24d749..4757bbf5f3 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/utils.go +++ b/vendor/github.com/go-critic/go-critic/checkers/utils.go @@ -5,7 +5,7 @@ import ( "go/types" "strings" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) // goStdlib contains `go list std` command output list. @@ -260,7 +260,7 @@ func isUnitTestFunc(ctx *linter.CheckerContext, fn *ast.FuncDecl) bool { return false } -// qualifiedName returns called expr fully-quallified name. +// qualifiedName returns called expr fully-qualified name. // // It works for simple identifiers like f => "f" and identifiers // from other package like pkg.f => "pkg.f". diff --git a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go index 831857c41a..3d7c9c1225 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go @@ -6,7 +6,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "weakCond" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects conditions that are unsafe due to not being exhaustive" info.Before = `xs != nil && xs[0] != nil` info.After = `len(xs) != 0 && xs[0] != nil` diff --git a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go index 6829433ea0..eaa53e5d5b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go @@ -6,17 +6,16 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { - info := linter.CheckerInfo{ - Name: "whyNoLint", - Tags: []string{"style", "experimental"}, - Summary: "Ensures that `//nolint` comments include an explanation", - Before: `//nolint`, - After: `//nolint // reason`, - } + var info linter.CheckerInfo + info.Name = "whyNoLint" + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} + info.Summary = "Ensures that `//nolint` comments include an explanation" + info.Before = `//nolint` + info.After = `//nolint // reason` collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { return astwalk.WalkerForComment(&whyNoLintChecker{ diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/context.go b/vendor/github.com/go-critic/go-critic/framework/linter/context.go deleted file mode 100644 index 6e108ab6a5..0000000000 --- a/vendor/github.com/go-critic/go-critic/framework/linter/context.go +++ /dev/null @@ -1,35 +0,0 @@ -package linter - -import ( - "go/ast" - "go/types" - "strconv" -) - -func resolvePkgObjects(ctx *Context, f *ast.File) { - ctx.PkgObjects = make(map[*types.PkgName]string, len(f.Imports)) - - for _, spec := range f.Imports { - if spec.Name != nil { - obj := ctx.TypesInfo.ObjectOf(spec.Name) - ctx.PkgObjects[obj.(*types.PkgName)] = spec.Name.Name - } else { - obj := ctx.TypesInfo.Implicits[spec] - ctx.PkgObjects[obj.(*types.PkgName)] = obj.Name() - } - } -} - -func resolvePkgRenames(ctx *Context, f *ast.File) { - ctx.PkgRenames = make(map[string]string) - - for _, spec := range f.Imports { - if spec.Name != nil { - path, err := strconv.Unquote(spec.Path.Value) - if err != nil { - panic(err) - } - ctx.PkgRenames[path] = spec.Name.Name - } - } -} diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/go_version.go b/vendor/github.com/go-critic/go-critic/linter/go_version.go similarity index 91% rename from vendor/github.com/go-critic/go-critic/framework/linter/go_version.go rename to vendor/github.com/go-critic/go-critic/linter/go_version.go index d8091d4535..b5ef2f75ff 100644 --- a/vendor/github.com/go-critic/go-critic/framework/linter/go_version.go +++ b/vendor/github.com/go-critic/go-critic/linter/go_version.go @@ -18,13 +18,14 @@ type GoVersion struct { // // As a special case, Major=0 covers all versions. func (v GoVersion) GreaterOrEqual(other GoVersion) bool { - if v.Major == 0 { + switch { + case v.Major == 0: return true - } - if v.Major == other.Major { + case v.Major == other.Major: return v.Minor >= other.Minor + default: + return v.Major >= other.Major } - return v.Major >= other.Major } func ParseGoVersion(version string) (GoVersion, error) { diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go b/vendor/github.com/go-critic/go-critic/linter/helpers.go similarity index 100% rename from vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go rename to vendor/github.com/go-critic/go-critic/linter/helpers.go diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/linter.go b/vendor/github.com/go-critic/go-critic/linter/linter.go similarity index 81% rename from vendor/github.com/go-critic/go-critic/framework/linter/linter.go rename to vendor/github.com/go-critic/go-critic/linter/linter.go index 750ff7cd9f..d4bc17536e 100644 --- a/vendor/github.com/go-critic/go-critic/framework/linter/linter.go +++ b/vendor/github.com/go-critic/go-critic/linter/linter.go @@ -4,11 +4,33 @@ import ( "go/ast" "go/token" "go/types" + "strconv" + "strings" "github.com/go-toolsmith/astfmt" - "golang.org/x/exp/typeparams" ) +const ( + DiagnosticTag = "diagnostic" + ExperimentalTag = "experimental" + OpinionatedTag = "opinionated" + PerformanceTag = "performance" + SecurityTag = "security" + StyleTag = "style" +) + +// UnknownType is a special sentinel value that is returned from the CheckerContext.TypeOf +// method instead of the nil type. +var UnknownType types.Type = types.Typ[types.Invalid] + +// FileWalker is an interface every checker should implement. +// +// The WalkFile method is executed for every Go file inside the +// package that is being checked. +type FileWalker interface { + WalkFile(*ast.File) +} + // CheckerCollection provides additional information for a group of checkers. type CheckerCollection struct { // URL is a link for a main source of information on the collection. @@ -124,6 +146,14 @@ type Checker struct { fileWalker FileWalker } +// NewChecker returns initialized checker identified by an info. +// info must be non-nil. +// Returns an error if info describes a checker that was not properly registered, +// or if checker fails to initialize. +func NewChecker(ctx *Context, info *CheckerInfo) (*Checker, error) { + return newChecker(ctx, info) +} + // Check runs rule checker over file f. func (c *Checker) Check(f *ast.File) []Warning { c.ctx.warnings = c.ctx.warnings[:0] @@ -141,9 +171,7 @@ type QuickFix struct { // Warning represents issue that is found by checker. type Warning struct { - // Node is an AST node that caused warning to trigger. - // Can be used to obtain proper error location. - Node ast.Node + Pos token.Pos // Text is warning message without source location info. Text string @@ -162,14 +190,6 @@ func (warn Warning) HasQuickFix() bool { return warn.Suggestion.Replacement != nil } -// NewChecker returns initialized checker identified by an info. -// info must be non-nil. -// Returns an error if info describes a checker that was not properly registered, -// or if checker fails to initialize. -func NewChecker(ctx *Context, info *CheckerInfo) (*Checker, error) { - return newChecker(ctx, info) -} - // Context is a readonly state shared among every checker. type Context struct { // TypesInfo carries parsed packages types information. @@ -279,25 +299,31 @@ type CheckerContext struct { // Warn adds a Warning to checker output. func (ctx *CheckerContext) Warn(node ast.Node, format string, args ...interface{}) { + ctx.WarnWithPos(node.Pos(), format, args...) +} + +// WarnFixable emits a warning with a fix suggestion provided by the caller. +func (ctx *CheckerContext) WarnFixable(node ast.Node, fix QuickFix, format string, args ...interface{}) { + ctx.WarnFixableWithPos(node.Pos(), fix, format, args...) +} + +// WarnWithPos adds a Warning to checker output. Useful for ruleguard's Report func. +func (ctx *CheckerContext) WarnWithPos(pos token.Pos, format string, args ...interface{}) { ctx.warnings = append(ctx.warnings, Warning{ Text: ctx.printer.Sprintf(format, args...), - Node: node, + Pos: pos, }) } -// WarnFixable emits a warning with a fix suggestion provided by the caller. -func (ctx *CheckerContext) WarnFixable(node ast.Node, fix QuickFix, format string, args ...interface{}) { +// WarnFixableWithPos adds a Warning to checker output. Useful for ruleguard's Report func. +func (ctx *CheckerContext) WarnFixableWithPos(pos token.Pos, fix QuickFix, format string, args ...interface{}) { ctx.warnings = append(ctx.warnings, Warning{ Text: ctx.printer.Sprintf(format, args...), - Node: node, + Pos: pos, Suggestion: fix, }) } -// UnknownType is a special sentinel value that is returned from the CheckerContext.TypeOf -// method instead of the nil type. -var UnknownType types.Type = types.Typ[types.Invalid] - // TypeOf returns the type of expression x. // // Unlike TypesInfo.TypeOf, it never returns nil. @@ -319,16 +345,57 @@ func (ctx *CheckerContext) TypeOf(x ast.Expr) types.Type { // // Unlike SizesInfo.SizeOf, it will not panic on generic types. func (ctx *CheckerContext) SizeOf(typ types.Type) (int64, bool) { - if _, ok := typ.(*typeparams.TypeParam); ok { + if _, ok := typ.(*types.TypeParam); ok { + return 0, false + } + if named, ok := typ.(*types.Named); ok && named.TypeParams() != nil { return 0, false } - return ctx.SizesInfo.Sizeof(typ), true + return ctx.safeSizesInfoSizeof(typ) } -// FileWalker is an interface every checker should implement. -// -// The WalkFile method is executed for every Go file inside the -// package that is being checked. -type FileWalker interface { - WalkFile(*ast.File) +// safeSizesInfoSizeof unlike SizesInfo.Sizeof will not panic on struct with generic fields. +// it will catch a panic and recover from it, see https://github.com/go-critic/go-critic/issues/1354 +func (ctx *CheckerContext) safeSizesInfoSizeof(typ types.Type) (size int64, ok bool) { + ok = true + defer func() { + if r := recover(); r != nil { + if strings.Contains(r.(string), "assertion failed") { + size, ok = 0, false + } else { + panic(r) + } + } + }() + + size = ctx.SizesInfo.Sizeof(typ) + return size, ok +} + +func resolvePkgObjects(ctx *Context, f *ast.File) { + ctx.PkgObjects = make(map[*types.PkgName]string, len(f.Imports)) + + for _, spec := range f.Imports { + if spec.Name != nil { + obj := ctx.TypesInfo.ObjectOf(spec.Name) + ctx.PkgObjects[obj.(*types.PkgName)] = spec.Name.Name + } else { + obj := ctx.TypesInfo.Implicits[spec] + ctx.PkgObjects[obj.(*types.PkgName)] = obj.Name() + } + } +} + +func resolvePkgRenames(ctx *Context, f *ast.File) { + ctx.PkgRenames = make(map[string]string) + + for _, spec := range f.Imports { + if spec.Name != nil { + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + panic(err) + } + ctx.PkgRenames[path] = spec.Name.Name + } + } } diff --git a/vendor/github.com/go-toolsmith/astcast/.travis.yml b/vendor/github.com/go-toolsmith/astcast/.travis.yml deleted file mode 100644 index c32ac00627..0000000000 --- a/vendor/github.com/go-toolsmith/astcast/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/astcast/README.md b/vendor/github.com/go-toolsmith/astcast/README.md index b618da4615..19ca0e71d2 100644 --- a/vendor/github.com/go-toolsmith/astcast/README.md +++ b/vendor/github.com/go-toolsmith/astcast/README.md @@ -1,15 +1,19 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astcast)](https://goreportcard.com/report/github.com/go-toolsmith/astcast) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astcast?status.svg)](https://godoc.org/github.com/go-toolsmith/astcast) - # astcast -Package astcast wraps type assertion operations in such way that you don't have +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astcast` wraps type assertion operations in such way that you don't have to worry about nil pointer results anymore. ## Installation +Go version 1.16+ + ```bash -go get -v github.com/go-toolsmith/astcast +go get github.com/go-toolsmith/astcast ``` ## Example @@ -84,3 +88,16 @@ func main() { fmt.Printf("%T %s\n", bar, bar.Name) } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astcast/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astcast/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astcast +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astcast +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astcast +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astcast +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astcast +[version-url]: https://github.com/go-toolsmith/astcast/releases diff --git a/vendor/github.com/go-toolsmith/astcopy/.travis.yml b/vendor/github.com/go-toolsmith/astcopy/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/astcopy/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astcopy/README.md b/vendor/github.com/go-toolsmith/astcopy/README.md index 4dae5c41b2..7adc665250 100644 --- a/vendor/github.com/go-toolsmith/astcopy/README.md +++ b/vendor/github.com/go-toolsmith/astcopy/README.md @@ -1,13 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astcopy)](https://goreportcard.com/report/github.com/go-toolsmith/astcopy) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astcopy?status.svg)](https://godoc.org/github.com/go-toolsmith/astcopy) -[![Build Status](https://travis-ci.org/go-toolsmith/astcopy.svg?branch=master)](https://travis-ci.org/go-toolsmith/astcopy) - # astcopy -Package astcopy implements Go AST reflection-free deep copy operations. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astcopy` implements Go AST reflection-free deep copy operations. ## Installation: +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astcopy ``` @@ -39,3 +42,16 @@ func main() { fmt.Println(astequal.Expr(x, y)) // => false } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astp/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astp/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astp +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astp +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astp +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astp +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astp +[version-url]: https://github.com/go-toolsmith/astp/releases diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy.go b/vendor/github.com/go-toolsmith/astcopy/astcopy.go index 91e1f31096..72bc58ce6d 100644 --- a/vendor/github.com/go-toolsmith/astcopy/astcopy.go +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy.go @@ -346,21 +346,6 @@ func FieldList(x *ast.FieldList) *ast.FieldList { return &cp } -// FuncType returns x deep copy. -// Copy of nil argument is nil. -func FuncType(x *ast.FuncType) *ast.FuncType { - if x == nil { - return nil - } - cp := *x - cp.Params = FieldList(x.Params) - cp.Results = FieldList(x.Results) - if typeParams := typeparams.ForFuncType(x); typeParams != nil { - *typeparams.ForFuncType(&cp) = *FieldList(typeParams) - } - return &cp -} - // InterfaceType returns x deep copy. // Copy of nil argument is nil. func InterfaceType(x *ast.InterfaceType) *ast.InterfaceType { @@ -435,23 +420,6 @@ func ValueSpec(x *ast.ValueSpec) *ast.ValueSpec { return &cp } -// TypeSpec returns x deep copy. -// Copy of nil argument is nil. -func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { - if x == nil { - return nil - } - cp := *x - cp.Name = Ident(x.Name) - cp.Type = copyExpr(x.Type) - cp.Doc = CommentGroup(x.Doc) - cp.Comment = CommentGroup(x.Comment) - if typeParams := typeparams.ForTypeSpec(x); typeParams != nil { - *typeparams.ForTypeSpec(&cp) = *FieldList(typeParams) - } - return &cp -} - // Spec returns x deep copy. // Copy of nil argument is nil. func Spec(x ast.Spec) ast.Spec { diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy_go117.go b/vendor/github.com/go-toolsmith/astcopy/astcopy_go117.go new file mode 100644 index 0000000000..1b748bae50 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy_go117.go @@ -0,0 +1,30 @@ +//go:build !go1.18 +// +build !go1.18 + +package astcopy + +// FuncType returns x deep copy. +// Copy of nil argument is nil. +func FuncType(x *ast.FuncType) *ast.FuncType { + if x == nil { + return nil + } + cp := *x + cp.Params = FieldList(x.Params) + cp.Results = FieldList(x.Results) + return &cp +} + +// TypeSpec returns x deep copy. +// Copy of nil argument is nil. +func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { + if x == nil { + return nil + } + cp := *x + cp.Name = Ident(x.Name) + cp.Type = copyExpr(x.Type) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + return &cp +} diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy_go118.go b/vendor/github.com/go-toolsmith/astcopy/astcopy_go118.go new file mode 100644 index 0000000000..72f800acc1 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy_go118.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +package astcopy + +import ( + "go/ast" +) + +// FuncType returns x deep copy. +// Copy of nil argument is nil. +func FuncType(x *ast.FuncType) *ast.FuncType { + if x == nil { + return nil + } + cp := *x + cp.Params = FieldList(x.Params) + cp.Results = FieldList(x.Results) + cp.TypeParams = FieldList(x.TypeParams) + return &cp +} + +// TypeSpec returns x deep copy. +// Copy of nil argument is nil. +func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { + if x == nil { + return nil + } + cp := *x + cp.Name = Ident(x.Name) + cp.Type = copyExpr(x.Type) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + cp.TypeParams = FieldList(x.TypeParams) + return &cp +} diff --git a/vendor/github.com/go-toolsmith/astequal/.travis.yml b/vendor/github.com/go-toolsmith/astequal/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/astequal/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astequal/README.md b/vendor/github.com/go-toolsmith/astequal/README.md index b14f80f6f7..db5e3a8c97 100644 --- a/vendor/github.com/go-toolsmith/astequal/README.md +++ b/vendor/github.com/go-toolsmith/astequal/README.md @@ -1,14 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astequal)](https://goreportcard.com/report/github.com/go-toolsmith/astequal) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astequal?status.svg)](https://godoc.org/github.com/go-toolsmith/astequal) -[![Build Status](https://travis-ci.org/go-toolsmith/astequal.svg?branch=master)](https://travis-ci.org/go-toolsmith/astequal) - - # astequal -Package astequal provides AST (deep) equallity check operations. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astequal` provides AST (deep) equallity check operations. ## Installation: +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astequal ``` @@ -65,3 +67,16 @@ BenchmarkEqualExpr/astequal.Expr-8 5000000 298 ns/op 0 B/op 0 BenchmarkEqualExpr/astequal.Node-8 3000000 409 ns/op 0 B/op 0 allocs/op BenchmarkEqualExpr/reflect.DeepEqual-8 50000 38898 ns/op 10185 B/op 156 allocs/op ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astequal/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astequal/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astequal +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astequal +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astequal +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astequal +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astequal +[version-url]: https://github.com/go-toolsmith/astequal/releases diff --git a/vendor/github.com/go-toolsmith/astequal/astequal.go b/vendor/github.com/go-toolsmith/astequal/astequal.go index 3d8db4af90..d1a04e9427 100644 --- a/vendor/github.com/go-toolsmith/astequal/astequal.go +++ b/vendor/github.com/go-toolsmith/astequal/astequal.go @@ -4,8 +4,6 @@ package astequal import ( "go/ast" "go/token" - - "golang.org/x/exp/typeparams" ) // Node reports whether two AST nodes are structurally (deep) equal. @@ -109,8 +107,8 @@ func astExprEq(x, y ast.Expr) bool { y, ok := y.(*ast.IndexExpr) return ok && astIndexExprEq(x, y) - case *typeparams.IndexListExpr: - y, ok := y.(*typeparams.IndexListExpr) + case *ast.IndexListExpr: + y, ok := y.(*ast.IndexListExpr) return ok && astIndexListExprEq(x, y) case *ast.SliceExpr: @@ -323,7 +321,7 @@ func astFuncTypeEq(x, y *ast.FuncType) bool { } return astFieldListEq(x.Params, y.Params) && astFieldListEq(x.Results, y.Results) && - astFieldListEq(typeparams.ForFuncType(x), typeparams.ForFuncType(y)) + astFieldListEq(forFuncType(x), forFuncType(y)) } func astBasicLitEq(x, y *ast.BasicLit) bool { @@ -378,7 +376,7 @@ func astIndexExprEq(x, y *ast.IndexExpr) bool { return astExprEq(x.X, y.X) && astExprEq(x.Index, y.Index) } -func astIndexListExprEq(x, y *typeparams.IndexListExpr) bool { +func astIndexListExprEq(x, y *ast.IndexListExpr) bool { if x == nil || y == nil { return x == y } @@ -690,7 +688,7 @@ func astTypeSpecEq(x, y *ast.TypeSpec) bool { return x == y } return astIdentEq(x.Name, y.Name) && astExprEq(x.Type, y.Type) && - astFieldListEq(typeparams.ForTypeSpec(x), typeparams.ForTypeSpec(y)) + astFieldListEq(forTypeSpec(x), forTypeSpec(y)) } func astValueSpecEq(x, y *ast.ValueSpec) bool { @@ -755,3 +753,19 @@ func astExprSliceEq(xs, ys []ast.Expr) bool { } return true } + +// forTypeSpec returns n.TypeParams. +func forTypeSpec(n *ast.TypeSpec) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// forFuncType returns n.TypeParams. +func forFuncType(n *ast.FuncType) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} diff --git a/vendor/github.com/go-toolsmith/astequal/diff.go b/vendor/github.com/go-toolsmith/astequal/diff.go new file mode 100644 index 0000000000..cd69b45250 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/diff.go @@ -0,0 +1,23 @@ +package astequal + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" + + "github.com/google/go-cmp/cmp" +) + +func Diff(x, y ast.Node) string { + var buf bytes.Buffer + format.Node(&buf, token.NewFileSet(), x) + s1 := buf.String() + + buf.Reset() + format.Node(&buf, token.NewFileSet(), y) + s2 := buf.String() + + // TODO(cristaloleg): replace with a more lightweight diff impl. + return cmp.Diff(s1, s2) +} diff --git a/vendor/github.com/go-toolsmith/astfmt/.travis.yml b/vendor/github.com/go-toolsmith/astfmt/.travis.yml deleted file mode 100644 index c32ac00627..0000000000 --- a/vendor/github.com/go-toolsmith/astfmt/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/astfmt/README.md b/vendor/github.com/go-toolsmith/astfmt/README.md index 954c92bf46..00f790fd2f 100644 --- a/vendor/github.com/go-toolsmith/astfmt/README.md +++ b/vendor/github.com/go-toolsmith/astfmt/README.md @@ -1,13 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/strparse)](https://goreportcard.com/report/github.com/go-toolsmith/strparse) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/strparse?status.svg)](https://godoc.org/github.com/go-toolsmith/strparse) - - # astfmt -Package astfmt implements ast.Node formatting with fmt-like API. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astfmt` implements ast.Node formatting with fmt-like API. ## Installation +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astfmt ``` @@ -37,3 +40,16 @@ func Example() { pp.Println(x) // => foo(bar(baz(1 + 2))) } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astfmt/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astfmt/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astfmt +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astfmt +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astfmt +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astfmt +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astfmt +[version-url]: https://github.com/go-toolsmith/astfmt/releases diff --git a/vendor/github.com/go-toolsmith/astp/.gitignore b/vendor/github.com/go-toolsmith/astp/.gitignore deleted file mode 100644 index 1f6187ecd6..0000000000 --- a/vendor/github.com/go-toolsmith/astp/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -bin -pkg -src/main -tmp \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astp/.travis.yml b/vendor/github.com/go-toolsmith/astp/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/astp/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astp/README.md b/vendor/github.com/go-toolsmith/astp/README.md index 7313c6ab8e..cf5197e811 100644 --- a/vendor/github.com/go-toolsmith/astp/README.md +++ b/vendor/github.com/go-toolsmith/astp/README.md @@ -1,14 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astp)](https://goreportcard.com/report/github.com/go-toolsmith/astp) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astp?status.svg)](https://godoc.org/github.com/go-toolsmith/astp) -[![Build Status](https://travis-ci.org/go-toolsmith/astp.svg?branch=master)](https://travis-ci.org/go-toolsmith/astp) - - # astp -Package astp provides AST predicates. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astp` provides AST predicates. ## Installation: +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astp ``` @@ -37,3 +39,16 @@ func main() { } } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astp/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astp/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astp +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astp +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astp +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astp +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astp +[version-url]: https://github.com/go-toolsmith/astp/releases diff --git a/vendor/github.com/go-toolsmith/strparse/.travis.yml b/vendor/github.com/go-toolsmith/strparse/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/strparse/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/strparse/README.md b/vendor/github.com/go-toolsmith/strparse/README.md index ae80a5398a..ac04d516fe 100644 --- a/vendor/github.com/go-toolsmith/strparse/README.md +++ b/vendor/github.com/go-toolsmith/strparse/README.md @@ -1,15 +1,17 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/strparse)](https://goreportcard.com/report/github.com/go-toolsmith/strparse) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/strparse?status.svg)](https://godoc.org/github.com/go-toolsmith/strparse) -[![Build Status](https://travis-ci.org/go-toolsmith/strparse.svg?branch=master)](https://travis-ci.org/go-toolsmith/strparse) - - # strparse -Package strparse provides convenience wrappers around `go/parser` for simple +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `strparse` provides convenience wrappers around `go/parser` for simple expression, statement and declaretion parsing from string. ## Installation +Go version 1.16+ + ```bash go get github.com/go-toolsmith/strparse ``` @@ -20,8 +22,8 @@ go get github.com/go-toolsmith/strparse package main import ( - "go-toolsmith/astequal" - "go-toolsmith/strparse" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/strparse" ) func main() { @@ -30,5 +32,17 @@ func main() { y := strparse.Expr(` 1+f( v[0].X ) `) fmt.Println(astequal.Expr(x, y)) // => true } - ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/strparse/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/strparse/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/strparse +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/strparse +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/strparse +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/strparse +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/strparse +[version-url]: https://github.com/go-toolsmith/strparse/releases diff --git a/vendor/github.com/go-toolsmith/typep/.travis.yml b/vendor/github.com/go-toolsmith/typep/.travis.yml deleted file mode 100644 index d3ff3cca8b..0000000000 --- a/vendor/github.com/go-toolsmith/typep/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go vet ./... - - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/typep/README.md b/vendor/github.com/go-toolsmith/typep/README.md index f7979148fa..77478c4434 100644 --- a/vendor/github.com/go-toolsmith/typep/README.md +++ b/vendor/github.com/go-toolsmith/typep/README.md @@ -1,15 +1,18 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/typep)](https://goreportcard.com/report/github.com/go-toolsmith/typep) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/typep?status.svg)](https://godoc.org/github.com/go-toolsmith/typep) -[![Build Status](https://travis-ci.org/go-toolsmith/typep.svg?branch=master)](https://travis-ci.org/go-toolsmith/typep) - # typep -Package typep provides type predicates. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `typep` provides type predicates. ## Installation: +Go version 1.16+ + ```bash -go get -v github.com/go-toolsmith/typep +go get github.com/go-toolsmith/typep ``` ## Example @@ -29,9 +32,23 @@ func main() { intTyp := types.Typ[types.Int] ptr := types.NewPointer(intTyp) arr := types.NewArray(intTyp, 64) + fmt.Println(typep.HasFloatProp(floatTyp)) // => true fmt.Println(typep.HasFloatProp(intTyp)) // => false fmt.Println(typep.IsPointer(ptr)) // => true fmt.Println(typep.IsArray(arr)) // => true } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/typep/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/typep/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/typep +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/typep +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/typep +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/typep +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/typep +[version-url]: https://github.com/go-toolsmith/typep/releases diff --git a/vendor/github.com/go-toolsmith/typep/safeExpr.go b/vendor/github.com/go-toolsmith/typep/safe_expr.go similarity index 100% rename from vendor/github.com/go-toolsmith/typep/safeExpr.go rename to vendor/github.com/go-toolsmith/typep/safe_expr.go diff --git a/vendor/github.com/go-toolsmith/typep/simplePredicates.go b/vendor/github.com/go-toolsmith/typep/simplePredicates.go deleted file mode 100644 index 3bc9c29c8f..0000000000 --- a/vendor/github.com/go-toolsmith/typep/simplePredicates.go +++ /dev/null @@ -1,359 +0,0 @@ -// Code generated by simplePredicates_generate.go; DO NOT EDIT - -package typep - -import ( - "go/types" -) - -// Simple 1-to-1 type predicates via type assertion. - -// IsBasic reports whether a given type has *types.Basic type. -func IsBasic(typ types.Type) bool { - _, ok := typ.(*types.Basic) - return ok -} - -// IsArray reports whether a given type has *types.Array type. -func IsArray(typ types.Type) bool { - _, ok := typ.(*types.Array) - return ok -} - -// IsSlice reports whether a given type has *types.Slice type. -func IsSlice(typ types.Type) bool { - _, ok := typ.(*types.Slice) - return ok -} - -// IsStruct reports whether a given type has *types.Struct type. -func IsStruct(typ types.Type) bool { - _, ok := typ.(*types.Struct) - return ok -} - -// IsPointer reports whether a given type has *types.Pointer type. -func IsPointer(typ types.Type) bool { - _, ok := typ.(*types.Pointer) - return ok -} - -// IsTuple reports whether a given type has *types.Tuple type. -func IsTuple(typ types.Type) bool { - _, ok := typ.(*types.Tuple) - return ok -} - -// IsSignature reports whether a given type has *types.Signature type. -func IsSignature(typ types.Type) bool { - _, ok := typ.(*types.Signature) - return ok -} - -// IsInterface reports whether a given type has *types.Interface type. -func IsInterface(typ types.Type) bool { - _, ok := typ.(*types.Interface) - return ok -} - -// IsMap reports whether a given type has *types.Map type. -func IsMap(typ types.Type) bool { - _, ok := typ.(*types.Map) - return ok -} - -// IsChan reports whether a given type has *types.Chan type. -func IsChan(typ types.Type) bool { - _, ok := typ.(*types.Chan) - return ok -} - -// IsNamed reports whether a given type has *types.Named type. -func IsNamed(typ types.Type) bool { - _, ok := typ.(*types.Named) - return ok -} - -// *types.Basic predicates for the info field. - -// HasBooleanProp reports whether typ is a *types.Basic has IsBoolean property. -func HasBooleanProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsBoolean != 0 - } - return false -} - -// HasIntegerProp reports whether typ is a *types.Basic has IsInteger property. -func HasIntegerProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsInteger != 0 - } - return false -} - -// HasUnsignedProp reports whether typ is a *types.Basic has IsUnsigned property. -func HasUnsignedProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsUnsigned != 0 - } - return false -} - -// HasFloatProp reports whether typ is a *types.Basic has IsFloat property. -func HasFloatProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsFloat != 0 - } - return false -} - -// HasComplexProp reports whether typ is a *types.Basic has IsComplex property. -func HasComplexProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsComplex != 0 - } - return false -} - -// HasStringProp reports whether typ is a *types.Basic has IsString property. -func HasStringProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsString != 0 - } - return false -} - -// HasUntypedProp reports whether typ is a *types.Basic has IsUntyped property. -func HasUntypedProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsUntyped != 0 - } - return false -} - -// HasOrderedProp reports whether typ is a *types.Basic has IsOrdered property. -func HasOrderedProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsOrdered != 0 - } - return false -} - -// HasNumericProp reports whether typ is a *types.Basic has IsNumeric property. -func HasNumericProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsNumeric != 0 - } - return false -} - -// HasConstTypeProp reports whether typ is a *types.Basic has IsConstType property. -func HasConstTypeProp(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Info()&types.IsConstType != 0 - } - return false -} - -// *types.Basic predicates for the kind field. - -// HasBoolKind reports whether typ is a *types.Basic with its kind set to types.Bool. -func HasBoolKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Bool - } - return false -} - -// HasIntKind reports whether typ is a *types.Basic with its kind set to types.Int. -func HasIntKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Int - } - return false -} - -// HasInt8Kind reports whether typ is a *types.Basic with its kind set to types.Int8. -func HasInt8Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Int8 - } - return false -} - -// HasInt16Kind reports whether typ is a *types.Basic with its kind set to types.Int16. -func HasInt16Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Int16 - } - return false -} - -// HasInt32Kind reports whether typ is a *types.Basic with its kind set to types.Int32. -func HasInt32Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Int32 - } - return false -} - -// HasInt64Kind reports whether typ is a *types.Basic with its kind set to types.Int64. -func HasInt64Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Int64 - } - return false -} - -// HasUintKind reports whether typ is a *types.Basic with its kind set to types.Uint. -func HasUintKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Uint - } - return false -} - -// HasUint8Kind reports whether typ is a *types.Basic with its kind set to types.Uint8. -func HasUint8Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Uint8 - } - return false -} - -// HasUint16Kind reports whether typ is a *types.Basic with its kind set to types.Uint16. -func HasUint16Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Uint16 - } - return false -} - -// HasUint32Kind reports whether typ is a *types.Basic with its kind set to types.Uint32. -func HasUint32Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Uint32 - } - return false -} - -// HasUint64Kind reports whether typ is a *types.Basic with its kind set to types.Uint64. -func HasUint64Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Uint64 - } - return false -} - -// HasUintptrKind reports whether typ is a *types.Basic with its kind set to types.Uintptr. -func HasUintptrKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Uintptr - } - return false -} - -// HasFloat32Kind reports whether typ is a *types.Basic with its kind set to types.Float32. -func HasFloat32Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Float32 - } - return false -} - -// HasFloat64Kind reports whether typ is a *types.Basic with its kind set to types.Float64. -func HasFloat64Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Float64 - } - return false -} - -// HasComplex64Kind reports whether typ is a *types.Basic with its kind set to types.Complex64. -func HasComplex64Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Complex64 - } - return false -} - -// HasComplex128Kind reports whether typ is a *types.Basic with its kind set to types.Complex128. -func HasComplex128Kind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.Complex128 - } - return false -} - -// HasStringKind reports whether typ is a *types.Basic with its kind set to types.String. -func HasStringKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.String - } - return false -} - -// HasUnsafePointerKind reports whether typ is a *types.Basic with its kind set to types.UnsafePointer. -func HasUnsafePointerKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UnsafePointer - } - return false -} - -// HasUntypedBoolKind reports whether typ is a *types.Basic with its kind set to types.UntypedBool. -func HasUntypedBoolKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedBool - } - return false -} - -// HasUntypedIntKind reports whether typ is a *types.Basic with its kind set to types.UntypedInt. -func HasUntypedIntKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedInt - } - return false -} - -// HasUntypedRuneKind reports whether typ is a *types.Basic with its kind set to types.UntypedRune. -func HasUntypedRuneKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedRune - } - return false -} - -// HasUntypedFloatKind reports whether typ is a *types.Basic with its kind set to types.UntypedFloat. -func HasUntypedFloatKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedFloat - } - return false -} - -// HasUntypedComplexKind reports whether typ is a *types.Basic with its kind set to types.UntypedComplex. -func HasUntypedComplexKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedComplex - } - return false -} - -// HasUntypedStringKind reports whether typ is a *types.Basic with its kind set to types.UntypedString. -func HasUntypedStringKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedString - } - return false -} - -// HasUntypedNilKind reports whether typ is a *types.Basic with its kind set to types.UntypedNil. -func HasUntypedNilKind(typ types.Type) bool { - if typ, ok := typ.(*types.Basic); ok { - return typ.Kind() == types.UntypedNil - } - return false -} diff --git a/vendor/github.com/go-toolsmith/typep/simple_predicates.go b/vendor/github.com/go-toolsmith/typep/simple_predicates.go new file mode 100644 index 0000000000..61e7d5b7f7 --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/simple_predicates.go @@ -0,0 +1,359 @@ +// Code generated by simple_predicates_generate.go; DO NOT EDIT + +package typep + +import ( + "go/types" +) + +// Simple 1-to-1 type predicates via type assertion. + +// IsBasic reports whether a given type has *types.Basic type. +func IsBasic(typ types.Type) bool { + _, ok := typ.(*types.Basic) + return ok +} + +// IsArray reports whether a given type has *types.Array type. +func IsArray(typ types.Type) bool { + _, ok := typ.(*types.Array) + return ok +} + +// IsSlice reports whether a given type has *types.Slice type. +func IsSlice(typ types.Type) bool { + _, ok := typ.(*types.Slice) + return ok +} + +// IsStruct reports whether a given type has *types.Struct type. +func IsStruct(typ types.Type) bool { + _, ok := typ.(*types.Struct) + return ok +} + +// IsPointer reports whether a given type has *types.Pointer type. +func IsPointer(typ types.Type) bool { + _, ok := typ.(*types.Pointer) + return ok +} + +// IsTuple reports whether a given type has *types.Tuple type. +func IsTuple(typ types.Type) bool { + _, ok := typ.(*types.Tuple) + return ok +} + +// IsSignature reports whether a given type has *types.Signature type. +func IsSignature(typ types.Type) bool { + _, ok := typ.(*types.Signature) + return ok +} + +// IsInterface reports whether a given type has *types.Interface type. +func IsInterface(typ types.Type) bool { + _, ok := typ.(*types.Interface) + return ok +} + +// IsMap reports whether a given type has *types.Map type. +func IsMap(typ types.Type) bool { + _, ok := typ.(*types.Map) + return ok +} + +// IsChan reports whether a given type has *types.Chan type. +func IsChan(typ types.Type) bool { + _, ok := typ.(*types.Chan) + return ok +} + +// IsNamed reports whether a given type has *types.Named type. +func IsNamed(typ types.Type) bool { + _, ok := typ.(*types.Named) + return ok +} + +// *types.Basic predicates for the info field. + +// HasBooleanProp reports whether typ is a *types.Basic has IsBoolean property. +func HasBooleanProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsBoolean != 0 + } + return false +} + +// HasIntegerProp reports whether typ is a *types.Basic has IsInteger property. +func HasIntegerProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsInteger != 0 + } + return false +} + +// HasUnsignedProp reports whether typ is a *types.Basic has IsUnsigned property. +func HasUnsignedProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsUnsigned != 0 + } + return false +} + +// HasFloatProp reports whether typ is a *types.Basic has IsFloat property. +func HasFloatProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsFloat != 0 + } + return false +} + +// HasComplexProp reports whether typ is a *types.Basic has IsComplex property. +func HasComplexProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsComplex != 0 + } + return false +} + +// HasStringProp reports whether typ is a *types.Basic has IsString property. +func HasStringProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsString != 0 + } + return false +} + +// HasUntypedProp reports whether typ is a *types.Basic has IsUntyped property. +func HasUntypedProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsUntyped != 0 + } + return false +} + +// HasOrderedProp reports whether typ is a *types.Basic has IsOrdered property. +func HasOrderedProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsOrdered != 0 + } + return false +} + +// HasNumericProp reports whether typ is a *types.Basic has IsNumeric property. +func HasNumericProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsNumeric != 0 + } + return false +} + +// HasConstTypeProp reports whether typ is a *types.Basic has IsConstType property. +func HasConstTypeProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsConstType != 0 + } + return false +} + +// *types.Basic predicates for the kind field. + +// HasBoolKind reports whether typ is a *types.Basic with its kind set to types.Bool. +func HasBoolKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Bool + } + return false +} + +// HasIntKind reports whether typ is a *types.Basic with its kind set to types.Int. +func HasIntKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int + } + return false +} + +// HasInt8Kind reports whether typ is a *types.Basic with its kind set to types.Int8. +func HasInt8Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int8 + } + return false +} + +// HasInt16Kind reports whether typ is a *types.Basic with its kind set to types.Int16. +func HasInt16Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int16 + } + return false +} + +// HasInt32Kind reports whether typ is a *types.Basic with its kind set to types.Int32. +func HasInt32Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int32 + } + return false +} + +// HasInt64Kind reports whether typ is a *types.Basic with its kind set to types.Int64. +func HasInt64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int64 + } + return false +} + +// HasUintKind reports whether typ is a *types.Basic with its kind set to types.Uint. +func HasUintKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint + } + return false +} + +// HasUint8Kind reports whether typ is a *types.Basic with its kind set to types.Uint8. +func HasUint8Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint8 + } + return false +} + +// HasUint16Kind reports whether typ is a *types.Basic with its kind set to types.Uint16. +func HasUint16Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint16 + } + return false +} + +// HasUint32Kind reports whether typ is a *types.Basic with its kind set to types.Uint32. +func HasUint32Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint32 + } + return false +} + +// HasUint64Kind reports whether typ is a *types.Basic with its kind set to types.Uint64. +func HasUint64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint64 + } + return false +} + +// HasUintptrKind reports whether typ is a *types.Basic with its kind set to types.Uintptr. +func HasUintptrKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uintptr + } + return false +} + +// HasFloat32Kind reports whether typ is a *types.Basic with its kind set to types.Float32. +func HasFloat32Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Float32 + } + return false +} + +// HasFloat64Kind reports whether typ is a *types.Basic with its kind set to types.Float64. +func HasFloat64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Float64 + } + return false +} + +// HasComplex64Kind reports whether typ is a *types.Basic with its kind set to types.Complex64. +func HasComplex64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Complex64 + } + return false +} + +// HasComplex128Kind reports whether typ is a *types.Basic with its kind set to types.Complex128. +func HasComplex128Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Complex128 + } + return false +} + +// HasStringKind reports whether typ is a *types.Basic with its kind set to types.String. +func HasStringKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.String + } + return false +} + +// HasUnsafePointerKind reports whether typ is a *types.Basic with its kind set to types.UnsafePointer. +func HasUnsafePointerKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UnsafePointer + } + return false +} + +// HasUntypedBoolKind reports whether typ is a *types.Basic with its kind set to types.UntypedBool. +func HasUntypedBoolKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedBool + } + return false +} + +// HasUntypedIntKind reports whether typ is a *types.Basic with its kind set to types.UntypedInt. +func HasUntypedIntKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedInt + } + return false +} + +// HasUntypedRuneKind reports whether typ is a *types.Basic with its kind set to types.UntypedRune. +func HasUntypedRuneKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedRune + } + return false +} + +// HasUntypedFloatKind reports whether typ is a *types.Basic with its kind set to types.UntypedFloat. +func HasUntypedFloatKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedFloat + } + return false +} + +// HasUntypedComplexKind reports whether typ is a *types.Basic with its kind set to types.UntypedComplex. +func HasUntypedComplexKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedComplex + } + return false +} + +// HasUntypedStringKind reports whether typ is a *types.Basic with its kind set to types.UntypedString. +func HasUntypedStringKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedString + } + return false +} + +// HasUntypedNilKind reports whether typ is a *types.Basic with its kind set to types.UntypedNil. +func HasUntypedNilKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedNil + } + return false +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 0000000000..1f664d13a5 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 0000000000..c66fc0d354 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.1; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.1/direnvrc" "sha256-17G+Mvt/JsyJrwsf7bqMr7ho7liHP+0Lo4RMIHgp0F8=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 0000000000..470e7ca2bd --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 0000000000..763143aa77 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,23 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/go-viper/mapstructure) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/go-viper/maptstructure + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - goimports + - staticcheck + # - stylecheck diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md new file mode 100644 index 0000000000..ae634d1cc0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -0,0 +1,101 @@ +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 0000000000..2b28db8948 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,59 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 0000000000..840d6adce0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,334 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}, +) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddr(data.(string)) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddrPort(data.(string)) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/error.go b/vendor/github.com/go-viper/mapstructure/v2/error.go new file mode 100644 index 0000000000..47a99e5af3 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 0000000000..5a387d3299 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,273 @@ +{ + "nodes": { + "devenv": { + "inputs": { + "flake-compat": "flake-compat", + "nix": "nix", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1702549996, + "narHash": "sha256-mEN+8gjWUXRxBCcixeth+jlDNuzxbpFwZNOEc4K22vw=", + "owner": "cachix", + "repo": "devenv", + "rev": "e681a99ffe2d2882f413a5d771129223c838ddce", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1701473968, + "narHash": "sha256-YcVE5emp1qQ8ieHUnxt1wCZCC3ZfAS+SRRWZ2TMda7E=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "34fed993f1674c8d06d58b37ce1e0fe5eebcb9f5", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1660459072, + "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "lowdown-src": { + "flake": false, + "locked": { + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "owner": "kristapsdz", + "repo": "lowdown", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "type": "github" + }, + "original": { + "owner": "kristapsdz", + "repo": "lowdown", + "type": "github" + } + }, + "nix": { + "inputs": { + "lowdown-src": "lowdown-src", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1676545802, + "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "owner": "domenkozar", + "repo": "nix", + "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "relaxed-flakes", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1678875422, + "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1701253981, + "narHash": "sha256-ztaDIyZ7HrTAfEEUt9AtTDNoCYxUdSd6NrRHaYOIxtk=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "e92039b55bcd58469325ded85d4f58dd5a4eaf58", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1702539185, + "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1688056373, + "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_2" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 0000000000..4ed0f53311 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,39 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go new file mode 100644 index 0000000000..27f21bc721 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -0,0 +1,1562 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// # Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// # Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// # Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// # Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// # Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// # Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// # Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %w", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + + // Check input type and based on the input type jump to the proper func + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type()) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 0000000000..d0913fff6c --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 0000000000..f8255a1b17 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/README.md b/vendor/github.com/go-xmlfmt/xmlfmt/README.md index 4eb6d69a03..9f661ea2d2 100644 --- a/vendor/github.com/go-xmlfmt/xmlfmt/README.md +++ b/vendor/github.com/go-xmlfmt/xmlfmt/README.md @@ -3,7 +3,6 @@ [![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) [![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt) [![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt) -[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297) ## Synopsis @@ -15,9 +14,18 @@ package main import "github.com/go-xmlfmt/xmlfmt" func main() { - xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` + xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` x := xmlfmt.FormatXML(xml1, "\t", " ") print(x) + + // If the XML Comments have nested tags in them + xml1 = ` Fred + + 23456 ` + x = xmlfmt.FormatXML(xml1, "", " ", true) + print(x) } ``` @@ -27,48 +35,107 @@ Output: ```xml - a - + a - Some org-or-other - - Wouldnt you like to know - + Some org-or-other + Wouldnt you like to know - Pat - - Califia - + Pat + Califia + + + + Fred + + 23456 + ``` There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place. +Note that + +- the default line ending is handled by the package automatically now. For Windows it's `CRLF`, and standard for anywhere else. No need to change the default line ending now. +- the case of XML comments nested within XML comments is ***not*** supported. Please avoid them or use any other tools to correct them before using this package. +- don't turn on the `nestedTagsInComments` parameter blindly, as the code has become 10+ times more complicated because of it. + ## Command To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt): ``` -$ xmlfmt -XML Formatter -built on 2019-12-08 +$ xmlfmt -V +xmlfmt - XML Formatter +Copyright (C) 2016-2022, Antonio Sun The xmlfmt will format the XML string without rewriting the document -Options: - - -h, --help display help information - -f, --file *The xml file to read from (or stdin) - -p, --prefix each element begins on a new line and this prefix - -i, --indent[= ] indent string for nested elements +Built on 2022-02-06 +Version 1.1.1 + +$ xmlfmt +the required flag `-f, --file' was not specified + +Usage: + xmlfmt [OPTIONS] + +Application Options: + -f, --file= The xml file to read from (or "-" for stdin) [$XMLFMT_FILEI] + -p, --prefix= Each element begins on a new line and this prefix [$XMLFMT_PREFIX] + -i, --indent= Indent string for nested elements (default: ) [$XMLFMT_INDENT] + -n, --nested Nested tags in comments [$XMLFMT_NESTED] + -v, --verbose Verbose mode (Multiple -v options increase the verbosity) + -V, --version Show program version and exit + +Help Options: + -h, --help Show this help message + + +$ curl -sL https://pastebin.com/raw/z3euQ5PR | xmlfmt -f - + + + + a + + + + + Some org-or-other + Wouldnt you like to know + + + Pat + Califia + + + + + +$ curl -sL https://pastebin.com/raw/Zs0qy0qz | tee /tmp/xmlfmt.xml | xmlfmt -f - -n + + + Fred + + 23456 + + +$ XMLFMT_NESTED=true XMLFMT_PREFIX='|' xmlfmt -f /tmp/xmlfmt.xml + +| +| +| Fred +| +| 23456 +| ``` @@ -76,7 +143,7 @@ Options: ### The format -The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., +The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- most of the closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., When it comes to very big XML strings, which is what I’m dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me. @@ -175,4 +242,4 @@ echo ']+?)(/?)>`) - // NL is the newline string used in XML output, define for DOS-convenient. - NL = "\r\n" + // NL is the newline string used in XML output. + NL = "\n" ) -// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure -func FormatXML(xmls, prefix, indent string) string { - src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") +func init() { + // define NL for Windows + if runtime.GOOS == "windows" { + NL = "\r\n" + } +} +// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure. +// If your XML Comments have nested tags in them, or you're not 100% sure otherwise, pass `true` as the third parameter to this function. But don't turn it on blindly, as the code has become ten times more complicated because of it. +func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string { + nestedTagsInComment := false + if len(nestedTagsInComments) > 0 { + nestedTagsInComment = nestedTagsInComments[0] + } + reXmlComments := regexp.MustCompile(`(?s)()`) + src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + if nestedTagsInComment { + src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { + parts := reXmlComments.FindStringSubmatch(m) + p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") + return parts[1] + html.EscapeString(p2) + parts[3] + }) + } rf := replaceTag(prefix, indent) - return (prefix + reg.ReplaceAllStringFunc(src, rf)) + r := prefix + reg.ReplaceAllStringFunc(src, rf) + if nestedTagsInComment { + r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { + parts := reXmlComments.FindStringSubmatch(m) + return parts[1] + html.UnescapeString(parts[2]) + parts[3] + }) + } + + return r } // replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl // and deal with comments as well func replaceTag(prefix, indent string) func(string) string { indentLevel := 0 + lastEndElem := true return func(m string) string { // head elem if strings.HasPrefix(m, "") { + lastEndElem = true return NL + prefix + strings.Repeat(indent, indentLevel) + m } // comment elem @@ -45,12 +76,17 @@ func replaceTag(prefix, indent string) func(string) string { // end elem if strings.HasPrefix(m, " c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/vendor/github.com/golangci/check/LICENSE b/vendor/github.com/golangci/check/LICENSE deleted file mode 100644 index 5a1774b8e6..0000000000 --- a/vendor/github.com/golangci/check/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ -GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/} - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {one line to give the program's name and a brief idea of what it does.} - Copyright (C) {year} {name of author} - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see {http://www.gnu.org/licenses/}. - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - opennota Copyright (C) 2013 opennota - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -{http://www.gnu.org/licenses/}. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -{http://www.gnu.org/philosophy/why-not-lgpl.html}. diff --git a/vendor/github.com/golangci/check/cmd/structcheck/structcheck.go b/vendor/github.com/golangci/check/cmd/structcheck/structcheck.go deleted file mode 100644 index 5dc5f83804..0000000000 --- a/vendor/github.com/golangci/check/cmd/structcheck/structcheck.go +++ /dev/null @@ -1,193 +0,0 @@ -// structcheck -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -package structcheck - -import ( - "flag" - "fmt" - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/loader" -) - -var ( - assignmentsOnly = flag.Bool("structcheck.a", false, "Count assignments only") - loadTestFiles = flag.Bool("structcheck.t", false, "Load test files too") - buildTags = flag.String("structcheck.tags", "", "Build tags") -) - -type visitor struct { - prog *loader.Program - pkg *loader.PackageInfo - m map[types.Type]map[string]int - skip map[types.Type]struct{} -} - -func (v *visitor) decl(t types.Type, fieldName string) { - if _, ok := v.m[t]; !ok { - v.m[t] = make(map[string]int) - } - if _, ok := v.m[t][fieldName]; !ok { - v.m[t][fieldName] = 0 - } -} - -func (v *visitor) assignment(t types.Type, fieldName string) { - if _, ok := v.m[t]; !ok { - v.m[t] = make(map[string]int) - } - if _, ok := v.m[t][fieldName]; ok { - v.m[t][fieldName]++ - } else { - v.m[t][fieldName] = 1 - } -} - -func (v *visitor) typeSpec(node *ast.TypeSpec) { - if strukt, ok := node.Type.(*ast.StructType); ok { - t := v.pkg.Info.Defs[node.Name].Type() - for _, f := range strukt.Fields.List { - if len(f.Names) > 0 { - fieldName := f.Names[0].Name - v.decl(t, fieldName) - } - } - } -} - -func (v *visitor) typeAndFieldName(expr *ast.SelectorExpr) (types.Type, string, bool) { - selection := v.pkg.Info.Selections[expr] - if selection == nil { - return nil, "", false - } - recv := selection.Recv() - if ptr, ok := recv.(*types.Pointer); ok { - recv = ptr.Elem() - } - return recv, selection.Obj().Name(), true -} - -func (v *visitor) assignStmt(node *ast.AssignStmt) { - for _, lhs := range node.Lhs { - var selector *ast.SelectorExpr - switch expr := lhs.(type) { - case *ast.SelectorExpr: - selector = expr - case *ast.IndexExpr: - if expr, ok := expr.X.(*ast.SelectorExpr); ok { - selector = expr - } - } - if selector != nil { - if t, fn, ok := v.typeAndFieldName(selector); ok { - v.assignment(t, fn) - } - } - } -} - -func (v *visitor) compositeLiteral(node *ast.CompositeLit) { - t := v.pkg.Info.Types[node.Type].Type - for _, expr := range node.Elts { - if kv, ok := expr.(*ast.KeyValueExpr); ok { - if ident, ok := kv.Key.(*ast.Ident); ok { - v.assignment(t, ident.Name) - } - } else { - // Struct literal with positional values. - // All the fields are assigned. - v.skip[t] = struct{}{} - break - } - } -} - -func (v *visitor) Visit(node ast.Node) ast.Visitor { - switch node := node.(type) { - case *ast.TypeSpec: - v.typeSpec(node) - - case *ast.AssignStmt: - if *assignmentsOnly { - v.assignStmt(node) - } - - case *ast.SelectorExpr: - if !*assignmentsOnly { - if t, fn, ok := v.typeAndFieldName(node); ok { - v.assignment(t, fn) - } - } - - case *ast.CompositeLit: - v.compositeLiteral(node) - } - - return v -} - -type Issue struct { - Pos token.Position - Type string - FieldName string -} - -func Run(program *loader.Program, reportExported bool) []Issue { - var issues []Issue - for _, pkg := range program.InitialPackages() { - visitor := &visitor{ - m: make(map[types.Type]map[string]int), - skip: make(map[types.Type]struct{}), - prog: program, - pkg: pkg, - } - for _, f := range pkg.Files { - ast.Walk(visitor, f) - } - - for t := range visitor.m { - if _, skip := visitor.skip[t]; skip { - continue - } - for fieldName, v := range visitor.m[t] { - if !reportExported && ast.IsExported(fieldName) { - continue - } - if v == 0 { - field, _, _ := types.LookupFieldOrMethod(t, false, pkg.Pkg, fieldName) - if field == nil { - fmt.Printf("%s: unknown field or method: %s.%s\n", pkg.Pkg.Path(), t, fieldName) - continue - } - if fieldName == "XMLName" { - if named, ok := field.Type().(*types.Named); ok && named.Obj().Pkg().Path() == "encoding/xml" { - continue - } - } - pos := program.Fset.Position(field.Pos()) - issues = append(issues, Issue{ - Pos: pos, - Type: types.TypeString(t, nil), - FieldName: fieldName, - }) - } - } - } - } - - return issues -} diff --git a/vendor/github.com/golangci/check/cmd/varcheck/varcheck.go b/vendor/github.com/golangci/check/cmd/varcheck/varcheck.go deleted file mode 100644 index 8e93e0473b..0000000000 --- a/vendor/github.com/golangci/check/cmd/varcheck/varcheck.go +++ /dev/null @@ -1,163 +0,0 @@ -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -package varcheck - -import ( - "flag" - "go/ast" - "go/token" - "strings" - - "go/types" - - "golang.org/x/tools/go/loader" -) - -var ( - buildTags = flag.String("varcheck.tags", "", "Build tags") -) - -type object struct { - pkgPath string - name string -} - -type visitor struct { - prog *loader.Program - pkg *loader.PackageInfo - uses map[object]int - positions map[object]token.Position - insideFunc bool -} - -func getKey(obj types.Object) object { - if obj == nil { - return object{} - } - - pkg := obj.Pkg() - pkgPath := "" - if pkg != nil { - pkgPath = pkg.Path() - } - - return object{ - pkgPath: pkgPath, - name: obj.Name(), - } -} - -func (v *visitor) decl(obj types.Object) { - key := getKey(obj) - if _, ok := v.uses[key]; !ok { - v.uses[key] = 0 - } - if _, ok := v.positions[key]; !ok { - v.positions[key] = v.prog.Fset.Position(obj.Pos()) - } -} - -func (v *visitor) use(obj types.Object) { - key := getKey(obj) - if _, ok := v.uses[key]; ok { - v.uses[key]++ - } else { - v.uses[key] = 1 - } -} - -func isReserved(name string) bool { - return name == "_" || strings.HasPrefix(strings.ToLower(name), "_cgo_") -} - -func (v *visitor) Visit(node ast.Node) ast.Visitor { - switch node := node.(type) { - case *ast.Ident: - v.use(v.pkg.Info.Uses[node]) - - case *ast.ValueSpec: - if !v.insideFunc { - for _, ident := range node.Names { - if !isReserved(ident.Name) { - v.decl(v.pkg.Info.Defs[ident]) - } - } - } - for _, val := range node.Values { - ast.Walk(v, val) - } - if node.Type != nil { - ast.Walk(v, node.Type) - } - return nil - - case *ast.FuncDecl: - if node.Body != nil { - v.insideFunc = true - ast.Walk(v, node.Body) - v.insideFunc = false - } - - if node.Recv != nil { - ast.Walk(v, node.Recv) - } - if node.Type != nil { - ast.Walk(v, node.Type) - } - - return nil - } - - return v -} - -type Issue struct { - Pos token.Position - VarName string -} - -func Run(program *loader.Program, reportExported bool) []Issue { - var issues []Issue - uses := make(map[object]int) - positions := make(map[object]token.Position) - - for _, pkgInfo := range program.InitialPackages() { - if pkgInfo.Pkg.Path() == "unsafe" { - continue - } - - v := &visitor{ - prog: program, - pkg: pkgInfo, - uses: uses, - positions: positions, - } - - for _, f := range v.pkg.Files { - ast.Walk(v, f) - } - } - - for obj, useCount := range uses { - if useCount == 0 && (reportExported || !ast.IsExported(obj.name)) { - pos := positions[obj] - issues = append(issues, Issue{ - Pos: pos, - VarName: obj.name, - }) - } - } - - return issues -} diff --git a/vendor/github.com/golangci/go-misc/LICENSE b/vendor/github.com/golangci/go-misc/LICENSE deleted file mode 100644 index cc42dd45d1..0000000000 --- a/vendor/github.com/golangci/go-misc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rémy Oudompheng. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * The name of Rémy Oudompheng may not be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golangci/go-misc/deadcode/README.md b/vendor/github.com/golangci/go-misc/deadcode/README.md deleted file mode 100644 index 5504231281..0000000000 --- a/vendor/github.com/golangci/go-misc/deadcode/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# deadcode - -`deadcode` is a very simple utility which detects unused declarations in a Go package. - -## Usage -``` -deadcode [-test] [packages] - - -test Include test files - packages A list of packages using the same conventions as the go tool -``` - -## Limitations - -* Self-referential unused code is not currently reported -* A single package can be tested at a time -* Unused methods are not reported - diff --git a/vendor/github.com/golangci/go-misc/deadcode/deadcode.go b/vendor/github.com/golangci/go-misc/deadcode/deadcode.go deleted file mode 100644 index c154a576b3..0000000000 --- a/vendor/github.com/golangci/go-misc/deadcode/deadcode.go +++ /dev/null @@ -1,138 +0,0 @@ -package deadcode - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "os" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/go/loader" -) - -var exitCode int - -var ( - withTestFiles bool -) - -type Issue struct { - Pos token.Position - UnusedIdentName string -} - -func Run(program *loader.Program) ([]Issue, error) { - ctx := &Context{ - program: program, - } - report := ctx.Process() - var issues []Issue - for _, obj := range report { - issues = append(issues, Issue{ - Pos: program.Fset.Position(obj.Pos()), - UnusedIdentName: obj.Name(), - }) - } - - return issues, nil -} - -func fatalf(format string, args ...interface{}) { - panic(fmt.Errorf(format, args...)) -} - -type Context struct { - cwd string - withTests bool - - program *loader.Program -} - -// pos resolves a compact position encoding into a verbose one -func (ctx *Context) pos(pos token.Pos) token.Position { - if ctx.cwd == "" { - ctx.cwd, _ = os.Getwd() - } - p := ctx.program.Fset.Position(pos) - f, err := filepath.Rel(ctx.cwd, p.Filename) - if err == nil { - p.Filename = f - } - return p -} - -// error formats the error to standard error, adding program -// identification and a newline -func (ctx *Context) errorf(pos token.Pos, format string, args ...interface{}) { - p := ctx.pos(pos) - fmt.Fprintf(os.Stderr, p.String()+": "+format+"\n", args...) - exitCode = 2 -} - -func (ctx *Context) Load(args ...string) { - // TODO -} - -func (ctx *Context) Process() []types.Object { - prog := ctx.program - var allUnused []types.Object - for _, pkg := range prog.Imported { - unused := ctx.doPackage(prog, pkg) - allUnused = append(allUnused, unused...) - } - for _, pkg := range prog.Created { - unused := ctx.doPackage(prog, pkg) - allUnused = append(allUnused, unused...) - } - sort.Sort(objects(allUnused)) - return allUnused -} - -func isTestFuncByName(name string) bool { - return strings.HasPrefix(name, "Test") || - strings.HasPrefix(name, "Benchmark") || - strings.HasPrefix(name, "Fuzz") || - strings.HasPrefix(name, "Example") -} - -func (ctx *Context) doPackage(prog *loader.Program, pkg *loader.PackageInfo) []types.Object { - used := make(map[types.Object]bool) - for _, file := range pkg.Files { - ast.Inspect(file, func(n ast.Node) bool { - id, ok := n.(*ast.Ident) - if !ok { - return true - } - obj := pkg.Info.Uses[id] - if obj != nil { - used[obj] = true - } - return false - }) - } - - global := pkg.Pkg.Scope() - var unused []types.Object - for _, name := range global.Names() { - if pkg.Pkg.Name() == "main" && name == "main" { - continue - } - obj := global.Lookup(name) - _, isSig := obj.Type().(*types.Signature) - pos := ctx.pos(obj.Pos()) - isTestMethod := isSig && isTestFuncByName(obj.Name()) && strings.HasSuffix(pos.Filename, "_test.go") - if !used[obj] && ((pkg.Pkg.Name() == "main" && !isTestMethod) || !ast.IsExported(name)) { - unused = append(unused, obj) - } - } - return unused -} - -type objects []types.Object - -func (s objects) Len() int { return len(s) } -func (s objects) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s objects) Less(i, j int) bool { return s[i].Pos() < s[j].Pos() } diff --git a/vendor/github.com/golangci/gofmt/gofmt/doc.go b/vendor/github.com/golangci/gofmt/gofmt/doc.go index da0c8581dd..d0a4580219 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/doc.go +++ b/vendor/github.com/golangci/gofmt/gofmt/doc.go @@ -13,9 +13,11 @@ that directory, recursively. (Files starting with a period are ignored.) By default, gofmt prints the reformatted sources to standard output. Usage: + gofmt [flags] [path ...] The flags are: + -d Do not print reformatted sources to standard output. If a file's formatting is different than gofmt's, print diffs @@ -37,10 +39,10 @@ The flags are: the original file is restored from an automatic backup. Debugging support: + -cpuprofile filename Write cpu profile to the specified file. - The rewrite rule specified with the -r flag must be a string of the form: pattern -> replacement @@ -57,7 +59,7 @@ such a fragment, gofmt preserves leading indentation as well as leading and trailing spaces, so that individual sections of a Go program can be formatted by piping them through gofmt. -Examples +# Examples To check files for unnecessary parentheses: @@ -71,7 +73,7 @@ To convert the package tree from explicit slice upper bounds to implicit ones: gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src -The simplify command +# The simplify command When invoked with -s gofmt will make the following source transformations where possible. diff --git a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go index e7612afae0..be046f34cf 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go +++ b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go @@ -76,6 +76,11 @@ func initParserMode() { if *allErrors { parserMode |= parser.AllErrors } + // It's only -r that makes use of go/ast's object resolution, + // so avoid the unnecessary work if the flag isn't used. + if *rewriteRule == "" { + parserMode |= parser.SkipObjectResolution + } } func isGoFile(f fs.DirEntry) bool { @@ -286,12 +291,9 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e } } if *doDiff { - data, err := diffWithReplaceTempFile(src, res, filename) - if err != nil { - return fmt.Errorf("computing diff: %s", err) - } - fmt.Fprintf(r, "diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) - r.Write(data) + newName := filepath.ToSlash(filename) + oldName := newName + ".orig" + r.Write(diff.Diff(oldName, src, newName, res)) } } @@ -350,7 +352,12 @@ func readFile(filename string, info fs.FileInfo, in io.Reader) ([]byte, error) { // stop to avoid corrupting it.) src := make([]byte, size+1) n, err := io.ReadFull(in, src) - if err != nil && err != io.ErrUnexpectedEOF { + switch err { + case nil, io.EOF, io.ErrUnexpectedEOF: + // io.ReadFull returns io.EOF (for an empty file) or io.ErrUnexpectedEOF + // (for a non-empty file) if the file was changed unexpectedly. Continue + // with comparing file sizes in those cases. + default: return nil, err } if n < size { @@ -463,43 +470,6 @@ func fileWeight(path string, info fs.FileInfo) int64 { return info.Size() } -func diffWithReplaceTempFile(b1, b2 []byte, filename string) ([]byte, error) { - data, err := diff.Diff("gofmt", b1, b2) - if len(data) > 0 { - return replaceTempFilename(data, filename) - } - return data, err -} - -// replaceTempFilename replaces temporary filenames in diff with actual one. -// -// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 -// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 -// ... -// -> -// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 -// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 -// ... -func replaceTempFilename(diff []byte, filename string) ([]byte, error) { - bs := bytes.SplitN(diff, []byte{'\n'}, 3) - if len(bs) < 3 { - return nil, fmt.Errorf("got unexpected diff for %s", filename) - } - // Preserve timestamps. - var t0, t1 []byte - if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { - t0 = bs[0][i:] - } - if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { - t1 = bs[1][i:] - } - // Always print filepath with slash separator. - f := filepath.ToSlash(filename) - bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) - bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) - return bytes.Join(bs, []byte{'\n'}), nil -} - const chmodSupported = runtime.GOOS != "windows" // backupFile writes data to a new file named filename with permissions perm, diff --git a/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/vendor/github.com/golangci/gofmt/gofmt/golangci.go index c9c3fe2ae5..a69611e1d3 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/golangci.go +++ b/vendor/github.com/golangci/gofmt/gofmt/golangci.go @@ -8,8 +8,14 @@ import ( "go/printer" "go/token" "os" + "path/filepath" + "sync" + + "github.com/golangci/gofmt/gofmt/internal/diff" ) +var parserModeMu sync.RWMutex + type RewriteRule struct { Pattern string Replacement string @@ -31,7 +37,9 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) fset := token.NewFileSet() + parserModeMu.Lock() initParserMode() + parserModeMu.Unlock() file, sourceAdj, indentAdj, err := parse(fset, filename, src, false) if err != nil { @@ -59,12 +67,10 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) } // formatting has changed - data, err := diffWithReplaceTempFile(src, res, filename) - if err != nil { - return nil, fmt.Errorf("error computing diff: %s", err) - } + newName := filepath.ToSlash(filename) + oldName := newName + ".orig" - return data, nil + return diff.Diff(oldName, src, newName, res), nil } func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) { diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal.go b/vendor/github.com/golangci/gofmt/gofmt/internal.go index 1abbdd6989..31a825bf83 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/internal.go +++ b/vendor/github.com/golangci/gofmt/gofmt/internal.go @@ -26,6 +26,13 @@ func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( indentAdj int, err error, ) { + + // START - Change related to usgae inside golangci-lint + parserModeMu.Lock() + parserMode := parserMode + parserModeMu.Unlock() + // END - Change related to usgae inside golangci-lint + // Try as whole source file. file, err = parser.ParseFile(fset, filename, src, parserMode) // If there's no error, return. If the error is that the source file didn't begin with a diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go b/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go index cbd0529ec6..47b2856714 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go +++ b/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go @@ -1,79 +1,261 @@ -// Copyright 2019 The Go Authors. All rights reserved. +// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package diff implements a Diff function that compare two inputs -// using the 'diff' tool. package diff import ( "bytes" - "io/ioutil" - "os" - "runtime" - - exec "github.com/golangci/gofmt/gofmt/internal/execabs" + "fmt" + "sort" + "strings" ) -// Returns diff of two arrays of bytes in diff tool format. -func Diff(prefix string, b1, b2 []byte) ([]byte, error) { - f1, err := writeTempFile(prefix, b1) - if err != nil { - return nil, err +// A pair is a pair of values tracked for both the x and y side of a diff. +// It is typically a pair of line indexes. +type pair struct{ x, y int } + +// Diff returns an anchored diff of the two texts old and new +// in the “unified diff” format. If old and new are identical, +// Diff returns a nil slice (no output). +// +// Unix diff implementations typically look for a diff with +// the smallest number of lines inserted and removed, +// which can in the worst case take time quadratic in the +// number of lines in the texts. As a result, many implementations +// either can be made to run for a long time or cut off the search +// after a predetermined amount of work. +// +// In contrast, this implementation looks for a diff with the +// smallest number of “unique” lines inserted and removed, +// where unique means a line that appears just once in both old and new. +// We call this an “anchored diff” because the unique lines anchor +// the chosen matching regions. An anchored diff is usually clearer +// than a standard diff, because the algorithm does not try to +// reuse unrelated blank lines or closing braces. +// The algorithm also guarantees to run in O(n log n) time +// instead of the standard O(n²) time. +// +// Some systems call this approach a “patience diff,” named for +// the “patience sorting” algorithm, itself named for a solitaire card game. +// We avoid that name for two reasons. First, the name has been used +// for a few different variants of the algorithm, so it is imprecise. +// Second, the name is frequently interpreted as meaning that you have +// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, +// when in fact the algorithm is faster than the standard one. +func Diff(oldName string, old []byte, newName string, new []byte) []byte { + if bytes.Equal(old, new) { + return nil } - defer os.Remove(f1) + x := lines(old) + y := lines(new) + + // Print diff header. + var out bytes.Buffer + fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) + fmt.Fprintf(&out, "--- %s\n", oldName) + fmt.Fprintf(&out, "+++ %s\n", newName) + + // Loop over matches to consider, + // expanding each match to include surrounding lines, + // and then printing diff chunks. + // To avoid setup/teardown cases outside the loop, + // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair + // in the sequence of matches. + var ( + done pair // printed up to x[:done.x] and y[:done.y] + chunk pair // start lines of current chunk + count pair // number of lines from each side in current chunk + ctext []string // lines for current chunk + ) + for _, m := range tgs(x, y) { + if m.x < done.x { + // Already handled scanning forward from earlier match. + continue + } - f2, err := writeTempFile(prefix, b2) - if err != nil { - return nil, err + // Expand matching lines as far possible, + // establishing that x[start.x:end.x] == y[start.y:end.y]. + // Note that on the first (or last) iteration we may (or definitey do) + // have an empty match: start.x==end.x and start.y==end.y. + start := m + for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { + start.x-- + start.y-- + } + end := m + for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { + end.x++ + end.y++ + } + + // Emit the mismatched lines before start into this chunk. + // (No effect on first sentinel iteration, when start = {0,0}.) + for _, s := range x[done.x:start.x] { + ctext = append(ctext, "-"+s) + count.x++ + } + for _, s := range y[done.y:start.y] { + ctext = append(ctext, "+"+s) + count.y++ + } + + // If we're not at EOF and have too few common lines, + // the chunk includes all the common lines and continues. + const C = 3 // number of context lines + if (end.x < len(x) || end.y < len(y)) && + (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { + for _, s := range x[start.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + continue + } + + // End chunk with common lines for context. + if len(ctext) > 0 { + n := end.x - start.x + if n > C { + n = C + } + for _, s := range x[start.x : start.x+n] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = pair{start.x + n, start.y + n} + + // Format and emit chunk. + // Convert line numbers to 1-indexed. + // Special case: empty file shows up as 0,0 not 1,0. + if count.x > 0 { + chunk.x++ + } + if count.y > 0 { + chunk.y++ + } + fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) + for _, s := range ctext { + out.WriteString(s) + } + count.x = 0 + count.y = 0 + ctext = ctext[:0] + } + + // If we reached EOF, we're done. + if end.x >= len(x) && end.y >= len(y) { + break + } + + // Otherwise start a new chunk. + chunk = pair{end.x - C, end.y - C} + for _, s := range x[chunk.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end } - defer os.Remove(f2) - cmd := "diff" - if runtime.GOOS == "plan9" { - cmd = "/bin/ape/diff" + return out.Bytes() +} + +// lines returns the lines in the file x, including newlines. +// If the file does not end in a newline, one is supplied +// along with a warning about the missing newline. +func lines(x []byte) []string { + l := strings.SplitAfter(string(x), "\n") + if l[len(l)-1] == "" { + l = l[:len(l)-1] + } else { + // Treat last line as having a message about the missing newline attached, + // using the same text as BSD/GNU diff (including the leading backslash). + l[len(l)-1] += "\n\\ No newline at end of file\n" } + return l +} - data, err := exec.Command(cmd, "-u", f1, f2).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - err = nil +// tgs returns the pairs of indexes of the longest common subsequence +// of unique lines in x and y, where a unique line is one that appears +// once in x and once in y. +// +// The longest common subsequence algorithm is as described in +// Thomas G. Szymanski, “A Special Case of the Maximal Common +// Subsequence Problem,” Princeton TR #170 (January 1975), +// available at https://research.swtch.com/tgs170.pdf. +func tgs(x, y []string) []pair { + // Count the number of times each string appears in a and b. + // We only care about 0, 1, many, counted as 0, -1, -2 + // for the x side and 0, -4, -8 for the y side. + // Using negative numbers now lets us distinguish positive line numbers later. + m := make(map[string]int) + for _, s := range x { + if c := m[s]; c > -2 { + m[s] = c - 1 + } + } + for _, s := range y { + if c := m[s]; c > -8 { + m[s] = c - 4 + } } - // If we are on Windows and the diff is Cygwin diff, - // machines can get into a state where every Cygwin - // command works fine but prints a useless message like: + // Now unique strings can be identified by m[s] = -1+-4. // - // Cygwin WARNING: - // Couldn't compute FAST_CWD pointer. This typically occurs if you're using - // an older Cygwin version on a newer Windows. Please update to the latest - // available Cygwin version from https://cygwin.com/. If the problem persists, - // please see https://cygwin.com/problems.html - // - // Skip over that message and just return the actual diff. - if len(data) > 0 && !bytes.HasPrefix(data, []byte("--- ")) { - i := bytes.Index(data, []byte("\n--- ")) - if i >= 0 && i < 80*10 && bytes.Contains(data[:i], []byte("://cygwin.com/")) { - data = data[i+1:] + // Gather the indexes of those strings in x and y, building: + // xi[i] = increasing indexes of unique strings in x. + // yi[i] = increasing indexes of unique strings in y. + // inv[i] = index j such that x[xi[i]] = y[yi[j]]. + var xi, yi, inv []int + for i, s := range y { + if m[s] == -1+-4 { + m[s] = len(yi) + yi = append(yi, i) + } + } + for i, s := range x { + if j, ok := m[s]; ok && j >= 0 { + xi = append(xi, i) + inv = append(inv, j) } } - return data, err -} - -func writeTempFile(prefix string, data []byte) (string, error) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - return "", err + // Apply Algorithm A from Szymanski's paper. + // In those terms, A = J = inv and B = [0, n). + // We add sentinel pairs {0,0}, and {len(x),len(y)} + // to the returned sequence, to help the processing loop. + J := inv + n := len(xi) + T := make([]int, n) + L := make([]int, n) + for i := range T { + T[i] = n + 1 + } + for i := 0; i < n; i++ { + k := sort.Search(n, func(k int) bool { + return T[k] >= J[i] + }) + T[k] = J[i] + L[i] = k + 1 } - _, err = file.Write(data) - if err1 := file.Close(); err == nil { - err = err1 + k := 0 + for _, v := range L { + if k < v { + k = v + } } - if err != nil { - os.Remove(file.Name()) - return "", err + seq := make([]pair, 2+k) + seq[1+k] = pair{len(x), len(y)} // sentinel at end + lastj := n + for i := n - 1; i >= 0; i-- { + if L[i] == k && J[i] < lastj { + seq[k] = pair{xi[i], yi[J[i]]} + k-- + } } - return file.Name(), nil + seq[0] = pair{0, 0} // sentinel at start + return seq } diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal/execabs/execabs.go b/vendor/github.com/golangci/gofmt/gofmt/internal/execabs/execabs.go deleted file mode 100644 index 9a05d971da..0000000000 --- a/vendor/github.com/golangci/gofmt/gofmt/internal/execabs/execabs.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -var ErrNotFound = exec.ErrNotFound - -type ( - Cmd = exec.Cmd - Error = exec.Error - ExitError = exec.ExitError -) - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable relative to current directory (.%c%s)", file, filepath.Separator, path) -} - -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/github.com/golangci/gofmt/gofmt/readme.md b/vendor/github.com/golangci/gofmt/gofmt/readme.md index 36a716d819..c2faaab82d 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/readme.md +++ b/vendor/github.com/golangci/gofmt/gofmt/readme.md @@ -1,3 +1,5 @@ # Hard Fork of gofmt 2022-08-31: Sync with go1.18.5 +2023-10-04: Sync with go1.19.13 +2023-10-04: Sync with go1.20.8 diff --git a/vendor/github.com/golangci/gofmt/gofmt/simplify.go b/vendor/github.com/golangci/gofmt/gofmt/simplify.go index 2c75495a69..3b34d562ba 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/simplify.go +++ b/vendor/github.com/golangci/gofmt/gofmt/simplify.go @@ -53,22 +53,26 @@ func (s simplifier) Visit(node ast.Node) ast.Visitor { // can be simplified to: s[a:] // if s is "simple enough" (for now we only accept identifiers) // - // Note: This may not be correct because len may have been redeclared in another - // file belonging to the same package. However, this is extremely unlikely - // and so far (April 2016, after years of supporting this rewrite feature) + // Note: This may not be correct because len may have been redeclared in + // the same package. However, this is extremely unlikely and so far + // (April 2022, after years of supporting this rewrite feature) // has never come up, so let's keep it working as is (see also #15153). + // + // Also note that this code used to use go/ast's object tracking, + // which was removed in exchange for go/parser.Mode.SkipObjectResolution. + // False positives are extremely unlikely as described above, + // and go/ast's object tracking is incomplete in any case. if n.Max != nil { // - 3-index slices always require the 2nd and 3rd index break } - if s, _ := n.X.(*ast.Ident); s != nil && s.Obj != nil { - // the array/slice object is a single, resolved identifier + if s, _ := n.X.(*ast.Ident); s != nil { + // the array/slice object is a single identifier if call, _ := n.High.(*ast.CallExpr); call != nil && len(call.Args) == 1 && !call.Ellipsis.IsValid() { // the high expression is a function call with a single argument - if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" && fun.Obj == nil { - // the function called is "len" and it is not locally defined; and - // because we don't have dot imports, it must be the predefined len() - if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Obj == s.Obj { + if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" { + // the function called is "len" + if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Name == s.Name { // the len argument is the array/slice object n.High = nil } diff --git a/vendor/github.com/golangci/gofmt/goimports/goimports.go b/vendor/github.com/golangci/gofmt/goimports/goimports.go index 1fa3328f8a..20d92e119c 100644 --- a/vendor/github.com/golangci/gofmt/goimports/goimports.go +++ b/vendor/github.com/golangci/gofmt/goimports/goimports.go @@ -14,7 +14,7 @@ import ( "runtime" ) -// Extracted from golang.org/x/tools@v0.1.12/cmd/goimports/goimports.go +// Extracted from golang.org/x/tools@v0.13.0/cmd/goimports/goimports.go func writeTempFile(dir, prefix string, data []byte) (string, error) { file, err := ioutil.TempFile(dir, prefix) diff --git a/vendor/github.com/golangci/gofmt/goimports/golangci.go b/vendor/github.com/golangci/gofmt/goimports/golangci.go index 7edc37937c..6ff286ae06 100644 --- a/vendor/github.com/golangci/gofmt/goimports/golangci.go +++ b/vendor/github.com/golangci/gofmt/goimports/golangci.go @@ -3,7 +3,7 @@ package goimports import ( "bytes" "fmt" - "io/ioutil" + "os" "golang.org/x/tools/imports" ) @@ -11,7 +11,7 @@ import ( // Run runs goimports. // The local prefixes (comma separated) must be defined through the global variable imports.LocalPrefix. func Run(filename string) ([]byte, error) { - src, err := ioutil.ReadFile(filename) + src, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/github.com/golangci/gofmt/goimports/readme.md b/vendor/github.com/golangci/gofmt/goimports/readme.md index 6c793eb7d1..e57ed550b1 100644 --- a/vendor/github.com/golangci/gofmt/goimports/readme.md +++ b/vendor/github.com/golangci/gofmt/goimports/readme.md @@ -1,3 +1,4 @@ # Hard Fork of goimports 2022-08-31: Sync with golang.org/x/tools v0.1.12 +2023-10-04: Sync with golang.org/x/tools v0.13.0 diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go index 282d794b82..413e071d65 100644 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -3,23 +3,79 @@ package main import ( "fmt" "os" + "runtime/debug" "github.com/golangci/golangci-lint/pkg/commands" "github.com/golangci/golangci-lint/pkg/exitcodes" ) var ( + goVersion = "unknown" + // Populated by goreleaser during build - version = "master" + version = "unknown" commit = "?" date = "" ) func main() { - e := commands.NewExecutor(version, commit, date) + info := createBuildInfo() - if err := e.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "failed executing command with error %v\n", err) + if err := commands.Execute(info); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Failed executing command with error: %v\n", err) os.Exit(exitcodes.Failure) } } + +func createBuildInfo() commands.BuildInfo { + info := commands.BuildInfo{ + Commit: commit, + Version: version, + GoVersion: goVersion, + Date: date, + } + + buildInfo, available := debug.ReadBuildInfo() + if !available { + return info + } + + info.GoVersion = buildInfo.GoVersion + + if date != "" { + return info + } + + info.Version = buildInfo.Main.Version + + var revision string + var modified string + for _, setting := range buildInfo.Settings { + // The `vcs.xxx` information is only available with `go build`. + // This information is not available with `go install` or `go run`. + switch setting.Key { + case "vcs.time": + info.Date = setting.Value + case "vcs.revision": + revision = setting.Value + case "vcs.modified": + modified = setting.Value + } + } + + if revision == "" { + revision = "unknown" + } + + if modified == "" { + modified = "?" + } + + if info.Date == "" { + info.Date = "(unknown)" + } + + info.Commit = fmt.Sprintf("(%s, modified: %s, mod sum: %q)", revision, modified, buildInfo.Main.Sum) + + return info +} diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go deleted file mode 100644 index 119a8a60db..0000000000 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "fmt" - "runtime/debug" -) - -//nolint:gochecknoinits -func init() { - if info, available := debug.ReadBuildInfo(); available { - if date == "" { - version = info.Main.Version - commit = fmt.Sprintf("(unknown, mod sum: %q)", info.Main.Sum) - date = "(unknown)" - } - } -} diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/plugins.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/plugins.go new file mode 100644 index 0000000000..541ff76242 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/plugins.go @@ -0,0 +1,3 @@ +package main + +// This file is used to declare module plugins. diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go index 51c75a77d2..299fd52790 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go @@ -12,6 +12,7 @@ import ( "bytes" "crypto/sha256" "encoding/hex" + "errors" "fmt" "io" "os" @@ -20,8 +21,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/internal/renameio" "github.com/golangci/golangci-lint/internal/robustio" ) @@ -51,14 +50,13 @@ type Cache struct { // to share a cache directory (for example, if the directory were stored // in a network file system). File locking is notoriously unreliable in // network file systems and may not suffice to protect the cache. -// func Open(dir string) (*Cache, error) { info, err := os.Stat(dir) if err != nil { return nil, err } if !info.IsDir() { - return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} + return nil, &os.PathError{Op: "open", Path: dir, Err: errors.New("not a directory")} } for i := 0; i < 256; i++ { name := filepath.Join(dir, fmt.Sprintf("%02x", i)) @@ -81,7 +79,7 @@ func (c *Cache) fileName(id [HashSize]byte, key string) string { var errMissing = errors.New("cache entry not found") func IsErrMissing(err error) bool { - return errors.Cause(err) == errMissing + return errors.Is(err, errMissing) } const ( @@ -159,7 +157,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { defer f.Close() entry := make([]byte, entrySize+1) // +1 to detect whether f is too long if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF { - return failed(fmt.Errorf("read %d/%d bytes from %s with error %s", n, entrySize, fileName, readErr)) + return failed(fmt.Errorf("read %d/%d bytes from %s with error %w", n, entrySize, fileName, readErr)) } if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { return failed(fmt.Errorf("bad data in %s", fileName)) @@ -170,10 +168,10 @@ func (c *Cache) get(id ActionID) (Entry, error) { etime := entry[1 : 1+20] var buf [HashSize]byte if _, err = hex.Decode(buf[:], eid); err != nil || buf != id { - return failed(errors.Wrapf(err, "failed to hex decode eid data in %s", fileName)) + return failed(fmt.Errorf("failed to hex decode eid data in %s: %w", fileName, err)) } if _, err = hex.Decode(buf[:], eout); err != nil { - return failed(errors.Wrapf(err, "failed to hex decode eout data in %s", fileName)) + return failed(fmt.Errorf("failed to hex decode eout data in %s: %w", fileName, err)) } i := 0 for i < len(esize) && esize[i] == ' ' { @@ -181,7 +179,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { } size, err := strconv.ParseInt(string(esize[i:]), 10, 64) if err != nil || size < 0 { - return failed(fmt.Errorf("failed to parse esize int from %s with error %s", fileName, err)) + return failed(fmt.Errorf("failed to parse esize int from %s with error %w", fileName, err)) } i = 0 for i < len(etime) && etime[i] == ' ' { @@ -189,11 +187,11 @@ func (c *Cache) get(id ActionID) (Entry, error) { } tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) if err != nil || tm < 0 { - return failed(fmt.Errorf("failed to parse etime int from %s with error %s", fileName, err)) + return failed(fmt.Errorf("failed to parse etime int from %s with error %w", fileName, err)) } if err = c.used(fileName); err != nil { - return failed(errors.Wrapf(err, "failed to mark %s as used", fileName)) + return failed(fmt.Errorf("failed to mark %s as used: %w", fileName, err)) } return Entry{buf, size, time.Unix(0, tm)}, nil @@ -257,7 +255,7 @@ const ( // and to reduce the amount of disk activity caused by using // cache entries, used only updates the mtime if the current // mtime is more than an hour old. This heuristic eliminates -// nearly all of the mtime updates that would otherwise happen, +// nearly all the mtime updates that would otherwise happen, // while still keeping the mtimes useful for cache trimming. func (c *Cache) used(file string) error { info, err := os.Stat(file) @@ -265,7 +263,7 @@ func (c *Cache) used(file string) error { if os.IsNotExist(err) { return errMissing } - return errors.Wrapf(err, "failed to stat file %s", file) + return fmt.Errorf("failed to stat file %s: %w", file, err) } if c.now().Sub(info.ModTime()) < mtimeInterval { @@ -273,7 +271,7 @@ func (c *Cache) used(file string) error { } if err := os.Chtimes(file, c.now(), c.now()); err != nil { - return errors.Wrapf(err, "failed to change time of file %s", file) + return fmt.Errorf("failed to change time of file %s: %w", file, err) } return nil @@ -311,7 +309,7 @@ func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { // Read all directory entries from subdir before removing // any files, in case removing files invalidates the file offset // in the directory scan. Also, ignore error from f.Readdirnames, - // because we don't care about reporting the error and we still + // because we don't care about reporting the error, and we still // want to process any entries found before the error. f, err := os.Open(subdir) if err != nil { @@ -370,7 +368,7 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify // Truncate the file only *after* writing it. // (This should be a no-op, but truncate just in case of previous corruption.) // - // This differs from ioutil.WriteFile, which truncates to 0 *before* writing + // This differs from os.WriteFile, which truncates to 0 *before* writing // via os.O_TRUNC. Truncating only after writing ensures that a second write // of the same content to the same file is idempotent, and does not — even // temporarily! — undo the effect of the first write. @@ -386,7 +384,7 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify return err } if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests - return errors.Wrapf(err, "failed to change time of file %s", file) + return fmt.Errorf("failed to change time of file %s: %w", file, err) } return nil @@ -444,7 +442,7 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { if f, openErr := os.Open(name); openErr == nil { h := sha256.New() if _, copyErr := io.Copy(h, f); copyErr != nil { - return errors.Wrap(copyErr, "failed to copy to sha256") + return fmt.Errorf("failed to copy to sha256: %w", copyErr) } f.Close() @@ -498,13 +496,13 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { return err } if n, wErr := h.Write(buf); n != len(buf) { - return fmt.Errorf("wrote to hash %d/%d bytes with error %s", n, len(buf), wErr) + return fmt.Errorf("wrote to hash %d/%d bytes with error %w", n, len(buf), wErr) } sum := h.Sum(nil) if !bytes.Equal(sum, out[:]) { _ = f.Truncate(0) - return fmt.Errorf("file content changed underfoot") + return errors.New("file content changed underfoot") } // Commit cache file entry. @@ -520,7 +518,7 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { return err } if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests - return errors.Wrapf(err, "failed to change time of file %s", name) + return fmt.Errorf("failed to change time of file %s: %w", name, err) } return nil diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go index e8866cb30c..399cc84cf0 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go @@ -6,13 +6,14 @@ package cache import ( "fmt" - "io/ioutil" "log" "os" "path/filepath" "sync" ) +const envGolangciLintCache = "GOLANGCI_LINT_CACHE" + // Default returns the default cache to use. func Default() (*Cache, error) { defaultOnce.Do(initDefaultCache) @@ -39,7 +40,7 @@ func initDefaultCache() { } if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { // Best effort. - if wErr := ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { + if wErr := os.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err) } } @@ -65,19 +66,19 @@ func DefaultDir() string { // otherwise distinguish between an explicit "off" and a UserCacheDir error. defaultDirOnce.Do(func() { - defaultDir = os.Getenv("GOLANGCI_LINT_CACHE") + defaultDir = os.Getenv(envGolangciLintCache) if filepath.IsAbs(defaultDir) { return } if defaultDir != "" { - defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not an absolute path") + defaultDirErr = fmt.Errorf("%s is not an absolute path", envGolangciLintCache) return } // Compute default location. dir, err := os.UserCacheDir() if err != nil { - defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not defined and %v", err) + defaultDirErr = fmt.Errorf("%s is not defined and %w", envGolangciLintCache, err) return } defaultDir = filepath.Join(dir, "golangci-lint") diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md b/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md new file mode 100644 index 0000000000..b469711edd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md @@ -0,0 +1,18 @@ +# cache + +Extracted from go/src/cmd/go/internal/cache/ +I don't know what version of Go this package was pulled from. + +Adapted for golangci-lint: +- https://github.com/golangci/golangci-lint/pull/699 +- https://github.com/golangci/golangci-lint/pull/779 +- https://github.com/golangci/golangci-lint/pull/788 +- https://github.com/golangci/golangci-lint/pull/808 +- https://github.com/golangci/golangci-lint/pull/1063 +- https://github.com/golangci/golangci-lint/pull/1070 +- https://github.com/golangci/golangci-lint/pull/1162 +- https://github.com/golangci/golangci-lint/pull/2318 +- https://github.com/golangci/golangci-lint/pull/2352 +- https://github.com/golangci/golangci-lint/pull/3012 +- https://github.com/golangci/golangci-lint/pull/3096 +- https://github.com/golangci/golangci-lint/pull/3204 diff --git a/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go b/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go index 5cb86d6698..c8a3a0357e 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go +++ b/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go @@ -6,11 +6,11 @@ import ( // PanicError can be used to not print stacktrace twice type PanicError struct { - recovered interface{} + recovered any stack []byte } -func NewPanicError(recovered interface{}, stack []byte) *PanicError { +func NewPanicError(recovered any, stack []byte) *PanicError { return &PanicError{recovered: recovered, stack: stack} } diff --git a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go index 86007d0427..3b3422eb7a 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go @@ -4,12 +4,12 @@ import ( "bytes" "encoding/gob" "encoding/hex" + "errors" "fmt" "runtime" "sort" "sync" - "github.com/pkg/errors" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/internal/cache" @@ -26,7 +26,7 @@ const ( ) // Cache is a per-package data cache. A cached data is invalidated when -// package or it's dependencies change. +// package, or it's dependencies change. type Cache struct { lowLevelCache *cache.Cache pkgHashes sync.Map @@ -54,14 +54,14 @@ func (c *Cache) Trim() { }) } -func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data interface{}) error { +func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data any) error { var err error buf := &bytes.Buffer{} c.sw.TrackStage("gob", func() { err = gob.NewEncoder(buf).Encode(data) }) if err != nil { - return errors.Wrap(err, "failed to gob encode") + return fmt.Errorf("failed to gob encode: %w", err) } var aID cache.ActionID @@ -71,13 +71,13 @@ func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data inter if err == nil { subkey, subkeyErr := cache.Subkey(aID, key) if subkeyErr != nil { - err = errors.Wrap(subkeyErr, "failed to build subkey") + err = fmt.Errorf("failed to build subkey: %w", subkeyErr) } aID = subkey } }) if err != nil { - return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name) + return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err) } c.ioSem <- struct{}{} c.sw.TrackStage("cache io", func() { @@ -85,7 +85,7 @@ func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data inter }) <-c.ioSem if err != nil { - return errors.Wrapf(err, "failed to save data to low-level cache by key %s for package %s", key, pkg.Name) + return fmt.Errorf("failed to save data to low-level cache by key %s for package %s: %w", key, pkg.Name, err) } return nil @@ -93,7 +93,7 @@ func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data inter var ErrMissing = errors.New("missing data") -func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data interface{}) error { +func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data any) error { var aID cache.ActionID var err error c.sw.TrackStage("key build", func() { @@ -101,13 +101,13 @@ func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data inter if err == nil { subkey, subkeyErr := cache.Subkey(aID, key) if subkeyErr != nil { - err = errors.Wrap(subkeyErr, "failed to build subkey") + err = fmt.Errorf("failed to build subkey: %w", subkeyErr) } aID = subkey } }) if err != nil { - return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name) + return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err) } var b []byte @@ -120,14 +120,14 @@ func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data inter if cache.IsErrMissing(err) { return ErrMissing } - return errors.Wrapf(err, "failed to get data from low-level cache by key %s for package %s", key, pkg.Name) + return fmt.Errorf("failed to get data from low-level cache by key %s for package %s: %w", key, pkg.Name, err) } c.sw.TrackStage("gob", func() { err = gob.NewDecoder(bytes.NewReader(b)).Decode(data) }) if err != nil { - return errors.Wrap(err, "failed to gob decode") + return fmt.Errorf("failed to gob decode: %w", err) } return nil @@ -136,12 +136,12 @@ func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data inter func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) { hash, err := c.packageHash(pkg, mode) if err != nil { - return cache.ActionID{}, errors.Wrap(err, "failed to get package hash") + return cache.ActionID{}, fmt.Errorf("failed to get package hash: %w", err) } key, err := cache.NewHash("action ID") if err != nil { - return cache.ActionID{}, errors.Wrap(err, "failed to make a hash") + return cache.ActionID{}, fmt.Errorf("failed to make a hash: %w", err) } fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) fmt.Fprintf(key, "pkghash %s\n", hash) @@ -167,7 +167,7 @@ func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error key, err := cache.NewHash("package hash") if err != nil { - return "", errors.Wrap(err, "failed to make a hash") + return "", fmt.Errorf("failed to make a hash: %w", err) } fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) @@ -176,7 +176,7 @@ func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error h, fErr := cache.FileHash(f) <-c.ioSem if fErr != nil { - return "", errors.Wrapf(fErr, "failed to calculate file %s hash", f) + return "", fmt.Errorf("failed to calculate file %s hash: %w", f, fErr) } fmt.Fprintf(key, "file %s %x\n", f, h) } @@ -199,7 +199,7 @@ func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error depHash, depErr := c.packageHash(dep, depMode) if depErr != nil { - return errors.Wrapf(depErr, "failed to calculate hash for dependency %s with mode %d", dep.Name, depMode) + return fmt.Errorf("failed to calculate hash for dependency %s with mode %d: %w", dep.Name, depMode, depErr) } fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md b/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md new file mode 100644 index 0000000000..36ec6ed499 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md @@ -0,0 +1,10 @@ +# renameio + +Extracted from go/src/cmd/go/internal/renameio/ +I don't know what version of Go this package was pulled from. + +Adapted for golangci-lint: +- https://github.com/golangci/golangci-lint/pull/699 +- https://github.com/golangci/golangci-lint/pull/808 +- https://github.com/golangci/golangci-lint/pull/1063 +- https://github.com/golangci/golangci-lint/pull/3204 diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go index fa9d93bf7a..2f88f4f7cc 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go +++ b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go @@ -24,7 +24,7 @@ func Pattern(filename string) string { return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) } -// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary +// WriteFile is like os.WriteFile, but first writes data to an arbitrary // file in the same directory as filename, then renames it atomically to the // final name. // @@ -79,7 +79,7 @@ func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) { return } -// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that +// ReadFile is like os.ReadFile, but on Windows retries spurious errors that // may occur if the file is concurrently replaced. // // Errors are classified heuristically and retries are bounded, so even this diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md b/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md new file mode 100644 index 0000000000..7c7ba0483a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md @@ -0,0 +1,6 @@ +# robustio + +Extracted from go1.19.1/src/cmd/go/internal/robustio + +There is only one modification: +- ERROR_SHARING_VIOLATION extracted from go1.19.1/src/internal/syscall/windows/syscall_windows.go to remove the dependencies to `internal/syscall/windows` diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go index 76e47ad1ff..15b33773cf 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go @@ -22,7 +22,7 @@ func Rename(oldpath, newpath string) error { return rename(oldpath, newpath) } -// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may +// ReadFile is like os.ReadFile, but on Windows retries errors that may // occur if the file is concurrently replaced. // // (See golang.org/issue/31247 and golang.org/issue/32188.) @@ -42,9 +42,9 @@ func RemoveAll(path string) error { // in this package attempt to mitigate. // // Errors considered ephemeral include: -// - syscall.ERROR_ACCESS_DENIED -// - syscall.ERROR_FILE_NOT_FOUND -// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION // // This set may be expanded in the future; programs must not rely on the // non-ephemerality of any given error. diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go index 1ac0d10d7f..99fd8ebc2f 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go @@ -5,7 +5,7 @@ package robustio import ( - "os" + "errors" "syscall" ) @@ -13,16 +13,8 @@ const errFileNotFound = syscall.ENOENT // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - switch werr := err.(type) { - case *os.PathError: - err = werr.Err - case *os.LinkError: - err = werr.Err - case *os.SyscallError: - err = werr.Err - - } - if errno, ok := err.(syscall.Errno); ok { + var errno syscall.Errno + if errors.As(err, &errno) { return errno == errFileNotFound } return false diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go index e0bf5b9b3b..c56e36ca62 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go @@ -2,21 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows darwin +//go:build windows || darwin package robustio import ( - "io/ioutil" + "errors" "math/rand" "os" "syscall" "time" ) -const arbitraryTimeout = 500 * time.Millisecond - -const ERROR_SHARING_VIOLATION = 32 +const arbitraryTimeout = 2000 * time.Millisecond // retry retries ephemeral errors from f up to an arbitrary timeout // to work around filesystem flakiness on Windows and Darwin. @@ -33,7 +31,8 @@ func retry(f func() (err error, mayRetry bool)) error { return err } - if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) { + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { bestErr = err lowestErrno = errno } else if bestErr == nil { @@ -54,7 +53,7 @@ func retry(f func() (err error, mayRetry bool)) error { // rename is like os.Rename, but retries ephemeral errors. // -// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with // MOVEFILE_REPLACE_EXISTING. // // Windows also provides a different system call, ReplaceFile, @@ -70,17 +69,16 @@ func rename(oldpath, newpath string) (err error) { }) } -// readFile is like ioutil.ReadFile, but retries ephemeral errors. +// readFile is like os.ReadFile, but retries ephemeral errors. func readFile(filename string) ([]byte, error) { var b []byte err := retry(func() (err error, mayRetry bool) { - b, err = ioutil.ReadFile(filename) + b, err = os.ReadFile(filename) // Unlike in rename, we do not retry errFileNotFound here: it can occur // as a spurious error, but the file may also genuinely not exist, so the // increase in robustness is probably not worth the extra latency. - - return err, isEphemeralError(err) && err != errFileNotFound + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) }) return b, err } diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go index a2428856f2..da9a46e4fa 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//+build !windows,!darwin +//go:build !windows && !darwin package robustio import ( - "io/ioutil" "os" ) @@ -16,7 +15,7 @@ func rename(oldpath, newpath string) error { } func readFile(filename string) ([]byte, error) { - return ioutil.ReadFile(filename) + return os.ReadFile(filename) } func removeAll(path string) error { diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go index a35237d44a..fe1728954c 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go @@ -5,23 +5,20 @@ package robustio import ( - "os" + "errors" "syscall" ) const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND +// ERROR_SHARING_VIOLATION (ldez) extract from go1.19.1/src/internal/syscall/windows/syscall_windows.go. +// This is the only modification of this file. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 + // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - switch werr := err.(type) { - case *os.PathError: - err = werr.Err - case *os.LinkError: - err = werr.Err - case *os.SyscallError: - err = werr.Err - } - if errno, ok := err.(syscall.Errno); ok { + var errno syscall.Errno + if errors.As(err, &errno) { switch errno { case syscall.ERROR_ACCESS_DENIED, syscall.ERROR_FILE_NOT_FOUND, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go index 359e2d63c7..4aa8130518 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go @@ -12,70 +12,68 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) -func (e *Executor) initCache() { +type cacheCommand struct { + cmd *cobra.Command +} + +func newCacheCommand() *cacheCommand { + c := &cacheCommand{} + cacheCmd := &cobra.Command{ Use: "cache", Short: "Cache control and information", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint cache") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run cache: %s", err) - } + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, } - e.rootCmd.AddCommand(cacheCmd) - cacheCmd.AddCommand(&cobra.Command{ - Use: "clean", - Short: "Clean cache", - Run: e.executeCleanCache, - }) - cacheCmd.AddCommand(&cobra.Command{ - Use: "status", - Short: "Show cache status", - Run: e.executeCacheStatus, - }) + cacheCmd.AddCommand( + &cobra.Command{ + Use: "clean", + Short: "Clean cache", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: c.executeClean, + }, + &cobra.Command{ + Use: "status", + Short: "Show cache status", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: c.executeStatus, + }, + ) - // TODO: add trim command? -} + c.cmd = cacheCmd -func (e *Executor) executeCleanCache(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint cache clean") - } + return c +} +func (c *cacheCommand) executeClean(_ *cobra.Command, _ []string) error { cacheDir := cache.DefaultDir() + if err := os.RemoveAll(cacheDir); err != nil { - e.log.Fatalf("Failed to remove dir %s: %s", cacheDir, err) + return fmt.Errorf("failed to remove dir %s: %w", cacheDir, err) } - os.Exit(0) + return nil } -func (e *Executor) executeCacheStatus(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint cache status") - } - +func (c *cacheCommand) executeStatus(_ *cobra.Command, _ []string) { cacheDir := cache.DefaultDir() - fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir) + _, _ = fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir) + cacheSizeBytes, err := dirSizeBytes(cacheDir) if err == nil { - fmt.Fprintf(logutils.StdOut, "Size: %s\n", fsutils.PrettifyBytesCount(cacheSizeBytes)) + _, _ = fmt.Fprintf(logutils.StdOut, "Size: %s\n", fsutils.PrettifyBytesCount(cacheSizeBytes)) } - - os.Exit(0) } func dirSizeBytes(path string) (int64, error) { var size int64 err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { + if err == nil && !info.IsDir() { size += info.Size() } return err diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/completion.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/completion.go deleted file mode 100644 index e2be6f2929..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/completion.go +++ /dev/null @@ -1,85 +0,0 @@ -package commands - -import ( - "fmt" - "os" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -func (e *Executor) initCompletion() { - completionCmd := &cobra.Command{ - Use: "completion", - Short: "Output completion script", - } - e.rootCmd.AddCommand(completionCmd) - - bashCmd := &cobra.Command{ - Use: "bash", - Short: "Output bash completion script", - RunE: e.executeBashCompletion, - } - completionCmd.AddCommand(bashCmd) - - zshCmd := &cobra.Command{ - Use: "zsh", - Short: "Output zsh completion script", - RunE: e.executeZshCompletion, - } - completionCmd.AddCommand(zshCmd) - - fishCmd := &cobra.Command{ - Use: "fish", - Short: "Output fish completion script", - RunE: e.executeFishCompletion, - } - completionCmd.AddCommand(fishCmd) - - powerShell := &cobra.Command{ - Use: "powershell", - Short: "Output powershell completion script", - RunE: e.executePowerShellCompletion, - } - completionCmd.AddCommand(powerShell) -} - -func (e *Executor) executeBashCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenBashCompletion(os.Stdout) - if err != nil { - return errors.Wrap(err, "unable to generate bash completions: %v") - } - - return nil -} - -func (e *Executor) executeZshCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenZshCompletion(os.Stdout) - if err != nil { - return errors.Wrap(err, "unable to generate zsh completions: %v") - } - // Add extra compdef directive to support sourcing command directly. - // https://github.com/spf13/cobra/issues/881 - // https://github.com/spf13/cobra/pull/887 - fmt.Println("compdef _golangci-lint golangci-lint") - - return nil -} - -func (e *Executor) executeFishCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenFishCompletion(os.Stdout, true) - if err != nil { - return errors.Wrap(err, "generate fish completion") - } - - return nil -} - -func (e *Executor) executePowerShellCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenPowerShellCompletion(os.Stdout) - if err != nil { - return errors.Wrap(err, "generate powershell completion") - } - - return nil -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go index 4b63e2e523..cfb7d67ac3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go @@ -4,63 +4,123 @@ import ( "fmt" "os" + "github.com/fatih/color" "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/exitcodes" "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" ) -func (e *Executor) initConfig() { - cmd := &cobra.Command{ +type configCommand struct { + viper *viper.Viper + cmd *cobra.Command + + opts config.LoaderOptions + verifyOpts verifyOptions + + buildInfo BuildInfo + + log logutils.Log +} + +func newConfigCommand(log logutils.Log, info BuildInfo) *configCommand { + c := &configCommand{ + viper: viper.New(), + log: log, + buildInfo: info, + } + + configCmd := &cobra.Command{ Use: "config", - Short: "Config", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint config") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run help: %s", err) - } + Short: "Config file information", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, + PersistentPreRunE: c.preRunE, } - e.rootCmd.AddCommand(cmd) - pathCmd := &cobra.Command{ - Use: "path", - Short: "Print used config path", - Run: e.executePathCmd, + verifyCommand := &cobra.Command{ + Use: "verify", + Short: "Verify configuration against JSON schema", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: c.executeVerify, } - e.initRunConfiguration(pathCmd) // allow --config - cmd.AddCommand(pathCmd) + + configCmd.AddCommand( + &cobra.Command{ + Use: "path", + Short: "Print used config path", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: c.executePath, + }, + verifyCommand, + ) + + flagSet := configCmd.PersistentFlags() + flagSet.SortFlags = false // sort them as they are defined here + + setupConfigFileFlagSet(flagSet, &c.opts) + + // ex: --schema jsonschema/golangci.next.jsonschema.json + verifyFlagSet := verifyCommand.Flags() + verifyFlagSet.StringVar(&c.verifyOpts.schemaURL, "schema", "", color.GreenString("JSON schema URL")) + _ = verifyFlagSet.MarkHidden("schema") + + c.cmd = configCmd + + return c } -func (e *Executor) getUsedConfig() string { - usedConfigFile := viper.ConfigFileUsed() - if usedConfigFile == "" { - return "" - } +func (c *configCommand) preRunE(cmd *cobra.Command, _ []string) error { + // The command doesn't depend on the real configuration. + // It only needs to know the path of the configuration file. + cfg := config.NewDefault() - prettyUsedConfigFile, err := fsutils.ShortestRelPath(usedConfigFile, "") - if err != nil { - e.log.Warnf("Can't pretty print config file path: %s", err) - return usedConfigFile + // Hack to hide deprecation messages related to `--skip-dirs-use-default`: + // Flags are not bound then the default values, defined only through flags, are not applied. + // In this command, file path and file information are the only requirements, i.e. it don't need flag values. + // + // TODO(ldez) add an option (check deprecation) to `Loader.Load()` but this require a dedicated PR. + cfg.Run.UseDefaultSkipDirs = true + + loader := config.NewLoader(c.log.Child(logutils.DebugKeyConfigReader), c.viper, cmd.Flags(), c.opts, cfg) + + if err := loader.Load(); err != nil { + return fmt.Errorf("can't load config: %w", err) } - return prettyUsedConfigFile + return nil } -func (e *Executor) executePathCmd(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint config path") +func (c *configCommand) executePath(cmd *cobra.Command, _ []string) { + usedConfigFile := c.getUsedConfig() + if usedConfigFile == "" { + c.log.Warnf("No config file detected") + os.Exit(exitcodes.NoConfigFileDetected) } - usedConfigFile := e.getUsedConfig() + cmd.Println(usedConfigFile) +} + +// getUsedConfig returns the resolved path to the golangci config file, +// or the empty string if no configuration could be found. +func (c *configCommand) getUsedConfig() string { + usedConfigFile := c.viper.ConfigFileUsed() if usedConfigFile == "" { - e.log.Warnf("No config file detected") - os.Exit(exitcodes.NoConfigFileDetected) + return "" + } + + prettyUsedConfigFile, err := fsutils.ShortestRelPath(usedConfigFile, "") + if err != nil { + c.log.Warnf("Can't pretty print config file path: %s", err) + return usedConfigFile } - fmt.Println(usedConfigFile) - os.Exit(0) + return prettyUsedConfigFile } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go new file mode 100644 index 0000000000..291c99a025 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go @@ -0,0 +1,176 @@ +package commands + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + hcversion "github.com/hashicorp/go-version" + "github.com/pelletier/go-toml/v2" + "github.com/santhosh-tekuri/jsonschema/v5" + _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "gopkg.in/yaml.v3" + + "github.com/golangci/golangci-lint/pkg/exitcodes" +) + +type verifyOptions struct { + schemaURL string // For debugging purpose only (Flag only). +} + +func (c *configCommand) executeVerify(cmd *cobra.Command, _ []string) error { + usedConfigFile := c.getUsedConfig() + if usedConfigFile == "" { + c.log.Warnf("No config file detected") + os.Exit(exitcodes.NoConfigFileDetected) + } + + schemaURL, err := createSchemaURL(cmd.Flags(), c.buildInfo) + if err != nil { + return fmt.Errorf("get JSON schema: %w", err) + } + + err = validateConfiguration(schemaURL, usedConfigFile) + if err != nil { + var v *jsonschema.ValidationError + if !errors.As(err, &v) { + return fmt.Errorf("[%s] validate: %w", usedConfigFile, err) + } + + detail := v.DetailedOutput() + + printValidationDetail(cmd, &detail) + + return fmt.Errorf("the configuration contains invalid elements") + } + + return nil +} + +func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error) { + schemaURL, err := flags.GetString("schema") + if err != nil { + return "", fmt.Errorf("get schema flag: %w", err) + } + + if schemaURL != "" { + return schemaURL, nil + } + + switch { + case buildInfo.Version != "" && buildInfo.Version != "(devel)": + version, err := hcversion.NewVersion(buildInfo.Version) + if err != nil { + return "", fmt.Errorf("parse version: %w", err) + } + + schemaURL = fmt.Sprintf("https://golangci-lint.run/jsonschema/golangci.v%d.%d.jsonschema.json", + version.Segments()[0], version.Segments()[1]) + + case buildInfo.Commit != "" && buildInfo.Commit != "?": + if buildInfo.Commit == "unknown" { + return "", errors.New("unknown commit information") + } + + commit := buildInfo.Commit + + if strings.HasPrefix(commit, "(") { + c, _, ok := strings.Cut(strings.TrimPrefix(commit, "("), ",") + if !ok { + return "", errors.New("commit information not found") + } + + commit = c + } + + schemaURL = fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", + commit) + + default: + return "", errors.New("version not found") + } + + return schemaURL, nil +} + +func validateConfiguration(schemaPath, targetFile string) error { + compiler := jsonschema.NewCompiler() + compiler.Draft = jsonschema.Draft7 + + schema, err := compiler.Compile(schemaPath) + if err != nil { + return fmt.Errorf("compile schema: %w", err) + } + + var m any + + switch strings.ToLower(filepath.Ext(targetFile)) { + case ".yaml", ".yml", ".json": + m, err = decodeYamlFile(targetFile) + if err != nil { + return err + } + + case ".toml": + m, err = decodeTomlFile(targetFile) + if err != nil { + return err + } + + default: + // unsupported + return errors.New("unsupported configuration format") + } + + return schema.Validate(m) +} + +func printValidationDetail(cmd *cobra.Command, detail *jsonschema.Detailed) { + if detail.Error != "" { + cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", + strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, detail.Error) + } + + for _, d := range detail.Errors { + d := d + printValidationDetail(cmd, &d) + } +} + +func decodeYamlFile(filename string) (any, error) { + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("[%s] file open: %w", filename, err) + } + + defer func() { _ = file.Close() }() + + var m any + err = yaml.NewDecoder(file).Decode(&m) + if err != nil { + return nil, fmt.Errorf("[%s] YAML decode: %w", filename, err) + } + + return m, nil +} + +func decodeTomlFile(filename string) (any, error) { + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("[%s] file open: %w", filename, err) + } + + defer func() { _ = file.Close() }() + + var m any + err = toml.NewDecoder(file).Decode(&m) + if err != nil { + return nil, fmt.Errorf("[%s] TOML decode: %w", filename, err) + } + + return m, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/custom.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/custom.go new file mode 100644 index 0000000000..1bc9f90146 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/custom.go @@ -0,0 +1,79 @@ +package commands + +import ( + "fmt" + "log" + "os" + + "github.com/spf13/cobra" + + "github.com/golangci/golangci-lint/pkg/commands/internal" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +const envKeepTempFiles = "CUSTOM_GCL_KEEP_TEMP_FILES" + +type customCommand struct { + cmd *cobra.Command + + cfg *internal.Configuration + + log logutils.Log +} + +func newCustomCommand(logger logutils.Log) *customCommand { + c := &customCommand{log: logger} + + customCmd := &cobra.Command{ + Use: "custom", + Short: "Build a version of golangci-lint with custom linters", + Args: cobra.NoArgs, + PreRunE: c.preRunE, + RunE: c.runE, + SilenceUsage: true, + } + + c.cmd = customCmd + + return c +} + +func (c *customCommand) preRunE(_ *cobra.Command, _ []string) error { + cfg, err := internal.LoadConfiguration() + if err != nil { + return err + } + + err = cfg.Validate() + if err != nil { + return err + } + + c.cfg = cfg + + return nil +} + +func (c *customCommand) runE(cmd *cobra.Command, _ []string) error { + tmp, err := os.MkdirTemp(os.TempDir(), "custom-gcl") + if err != nil { + return fmt.Errorf("create temporary directory: %w", err) + } + + defer func() { + if os.Getenv(envKeepTempFiles) != "" { + log.Printf("WARN: The env var %s has been detected: the temporary directory is preserved: %s", envKeepTempFiles, tmp) + + return + } + + _ = os.RemoveAll(tmp) + }() + + err = internal.NewBuilder(c.log, c.cfg, tmp).Build(cmd.Context()) + if err != nil { + return fmt.Errorf("build process: %w", err) + } + + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go deleted file mode 100644 index a060709ef7..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go +++ /dev/null @@ -1,251 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "crypto/sha256" - "io" - "os" - "path/filepath" - "strings" - "time" - - "github.com/fatih/color" - "github.com/gofrs/flock" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "gopkg.in/yaml.v3" - - "github.com/golangci/golangci-lint/internal/cache" - "github.com/golangci/golangci-lint/internal/pkgcache" - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/fsutils" - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" - "github.com/golangci/golangci-lint/pkg/goutil" - "github.com/golangci/golangci-lint/pkg/lint" - "github.com/golangci/golangci-lint/pkg/lint/lintersdb" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/report" - "github.com/golangci/golangci-lint/pkg/timeutils" -) - -type Executor struct { - rootCmd *cobra.Command - runCmd *cobra.Command - lintersCmd *cobra.Command - - exitCode int - version, commit, date string - - cfg *config.Config - log logutils.Log - reportData report.Data - DBManager *lintersdb.Manager - EnabledLintersSet *lintersdb.EnabledSet - contextLoader *lint.ContextLoader - goenv *goutil.Env - fileCache *fsutils.FileCache - lineCache *fsutils.LineCache - pkgCache *pkgcache.Cache - debugf logutils.DebugFunc - sw *timeutils.Stopwatch - - loadGuard *load.Guard - flock *flock.Flock -} - -func NewExecutor(version, commit, date string) *Executor { - startedAt := time.Now() - e := &Executor{ - cfg: config.NewDefault(), - version: version, - commit: commit, - date: date, - DBManager: lintersdb.NewManager(nil, nil), - debugf: logutils.Debug("exec"), - } - - e.debugf("Starting execution...") - e.log = report.NewLogWrapper(logutils.NewStderrLog(""), &e.reportData) - - // to setup log level early we need to parse config from command line extra time to - // find `-v` option - commandLineCfg, err := e.getConfigForCommandLine() - if err != nil && err != pflag.ErrHelp { - e.log.Fatalf("Can't get config for command line: %s", err) - } - if commandLineCfg != nil { - logutils.SetupVerboseLog(e.log, commandLineCfg.Run.IsVerbose) - - switch commandLineCfg.Output.Color { - case "always": - color.NoColor = false - case "never": - color.NoColor = true - case "auto": - // nothing - default: - e.log.Fatalf("invalid value %q for --color; must be 'always', 'auto', or 'never'", commandLineCfg.Output.Color) - } - } - - // init of commands must be done before config file reading because - // init sets config with the default values of flags - e.initRoot() - e.initRun() - e.initHelp() - e.initLinters() - e.initConfig() - e.initCompletion() - e.initVersion() - e.initCache() - - // init e.cfg by values from config: flags parse will see these values - // like the default ones. It will overwrite them only if the same option - // is found in command-line: it's ok, command-line has higher priority. - - r := config.NewFileReader(e.cfg, commandLineCfg, e.log.Child("config_reader")) - if err = r.Read(); err != nil { - e.log.Fatalf("Can't read config: %s", err) - } - - // recreate after getting config - e.DBManager = lintersdb.NewManager(e.cfg, e.log).WithCustomLinters() - - e.cfg.LintersSettings.Gocritic.InferEnabledChecks(e.log) - if err = e.cfg.LintersSettings.Gocritic.Validate(e.log); err != nil { - e.log.Fatalf("Invalid gocritic settings: %s", err) - } - - // Slice options must be explicitly set for proper merging of config and command-line options. - fixSlicesFlags(e.runCmd.Flags()) - fixSlicesFlags(e.lintersCmd.Flags()) - - e.EnabledLintersSet = lintersdb.NewEnabledSet(e.DBManager, - lintersdb.NewValidator(e.DBManager), e.log.Child("lintersdb"), e.cfg) - e.goenv = goutil.NewEnv(e.log.Child("goenv")) - e.fileCache = fsutils.NewFileCache() - e.lineCache = fsutils.NewLineCache(e.fileCache) - - e.sw = timeutils.NewStopwatch("pkgcache", e.log.Child("stopwatch")) - e.pkgCache, err = pkgcache.NewCache(e.sw, e.log.Child("pkgcache")) - if err != nil { - e.log.Fatalf("Failed to build packages cache: %s", err) - } - e.loadGuard = load.NewGuard() - e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv, - e.lineCache, e.fileCache, e.pkgCache, e.loadGuard) - if err = e.initHashSalt(version); err != nil { - e.log.Fatalf("Failed to init hash salt: %s", err) - } - e.debugf("Initialized executor in %s", time.Since(startedAt)) - return e -} - -func (e *Executor) Execute() error { - return e.rootCmd.Execute() -} - -func (e *Executor) initHashSalt(version string) error { - binSalt, err := computeBinarySalt(version) - if err != nil { - return errors.Wrap(err, "failed to calculate binary salt") - } - - configSalt, err := computeConfigSalt(e.cfg) - if err != nil { - return errors.Wrap(err, "failed to calculate config salt") - } - - var b bytes.Buffer - b.Write(binSalt) - b.Write(configSalt) - cache.SetSalt(b.Bytes()) - return nil -} - -func computeBinarySalt(version string) ([]byte, error) { - if version != "" && version != "(devel)" { - return []byte(version), nil - } - - if logutils.HaveDebugTag("bin_salt") { - return []byte("debug"), nil - } - - p, err := os.Executable() - if err != nil { - return nil, err - } - f, err := os.Open(p) - if err != nil { - return nil, err - } - defer f.Close() - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return nil, err - } - return h.Sum(nil), nil -} - -func computeConfigSalt(cfg *config.Config) ([]byte, error) { - // We don't hash all config fields to reduce meaningless cache - // invalidations. At least, it has a huge impact on tests speed. - - lintersSettingsBytes, err := yaml.Marshal(cfg.LintersSettings) - if err != nil { - return nil, errors.Wrap(err, "failed to json marshal config linter settings") - } - - var configData bytes.Buffer - configData.WriteString("linters-settings=") - configData.Write(lintersSettingsBytes) - configData.WriteString("\nbuild-tags=%s" + strings.Join(cfg.Run.BuildTags, ",")) - - h := sha256.New() - if _, err := h.Write(configData.Bytes()); err != nil { - return nil, err - } - return h.Sum(nil), nil -} - -func (e *Executor) acquireFileLock() bool { - if e.cfg.Run.AllowParallelRunners { - e.debugf("Parallel runners are allowed, no locking") - return true - } - - lockFile := filepath.Join(os.TempDir(), "golangci-lint.lock") - e.debugf("Locking on file %s...", lockFile) - f := flock.New(lockFile) - const retryDelay = time.Second - - ctx := context.Background() - if !e.cfg.Run.AllowSerialRunners { - const totalTimeout = 5 * time.Second - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, totalTimeout) - defer cancel() - } - if ok, _ := f.TryLockContext(ctx, retryDelay); !ok { - return false - } - - e.flock = f - return true -} - -func (e *Executor) releaseFileLock() { - if e.cfg.Run.AllowParallelRunners { - return - } - - if err := e.flock.Unlock(); err != nil { - e.debugf("Failed to unlock on file: %s", err) - } - if err := os.Remove(e.flock.Path()); err != nil { - e.debugf("Failed to remove lock file: %s", err) - } -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go new file mode 100644 index 0000000000..af5c351c5e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go @@ -0,0 +1,137 @@ +package commands + +import ( + "fmt" + "strings" + + "github.com/fatih/color" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/golangci/golangci-lint/pkg/commands/internal" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" + "github.com/golangci/golangci-lint/pkg/packages" +) + +func setupLintersFlagSet(v *viper.Viper, fs *pflag.FlagSet) { + internal.AddHackedStringSliceP(fs, "disable", "D", color.GreenString("Disable specific linter")) + internal.AddFlagAndBind(v, fs, fs.Bool, "disable-all", "linters.disable-all", false, color.GreenString("Disable all linters")) + + internal.AddHackedStringSliceP(fs, "enable", "E", color.GreenString("Enable specific linter")) + internal.AddFlagAndBind(v, fs, fs.Bool, "enable-all", "linters.enable-all", false, color.GreenString("Enable all linters")) + + internal.AddFlagAndBind(v, fs, fs.Bool, "fast", "linters.fast", false, + color.GreenString("Enable only fast linters from enabled linters set (first run won't be fast)")) + + internal.AddHackedStringSliceP(fs, "presets", "p", + color.GreenString(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint help linters' to see them. "+ + "This option implies option --disable-all", strings.Join(lintersdb.AllPresets(), "|")))) + + fs.StringSlice("enable-only", nil, + color.GreenString("Override linters configuration section to only run the specific linter(s)")) // Flags only. +} + +func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { + internal.AddFlagAndBindP(v, fs, fs.IntP, "concurrency", "j", "run.concurrency", getDefaultConcurrency(), + color.GreenString("Number of CPUs to use (Default: number of logical CPUs)")) + + internal.AddFlagAndBind(v, fs, fs.String, "modules-download-mode", "run.modules-download-mode", "", + color.GreenString("Modules download mode. If not empty, passed as -mod= to go tools")) + internal.AddFlagAndBind(v, fs, fs.Int, "issues-exit-code", "run.issues-exit-code", exitcodes.IssuesFound, + color.GreenString("Exit code when issues were found")) + internal.AddFlagAndBind(v, fs, fs.String, "go", "run.go", "", color.GreenString("Targeted Go version")) + internal.AddHackedStringSlice(fs, "build-tags", color.GreenString("Build tags")) + + internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work")) + + internal.AddFlagAndBind(v, fs, fs.Bool, "tests", "run.tests", true, color.GreenString("Analyze tests (*_test.go)")) + + internal.AddDeprecatedHackedStringSlice(fs, "skip-files", color.GreenString("Regexps of files to skip")) + internal.AddDeprecatedHackedStringSlice(fs, "skip-dirs", color.GreenString("Regexps of directories to skip")) + internal.AddDeprecatedFlagAndBind(v, fs, fs.Bool, "skip-dirs-use-default", "run.skip-dirs-use-default", true, + getDefaultDirectoryExcludeHelp()) + + const allowParallelDesc = "Allow multiple parallel golangci-lint instances running. " + + "If false (default) - golangci-lint acquires file lock on start." + internal.AddFlagAndBind(v, fs, fs.Bool, "allow-parallel-runners", "run.allow-parallel-runners", false, + color.GreenString(allowParallelDesc)) + const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " + + "If false (default) - golangci-lint exits with an error if it fails to acquire file lock on start." + internal.AddFlagAndBind(v, fs, fs.Bool, "allow-serial-runners", "run.allow-serial-runners", false, color.GreenString(allowSerialDesc)) +} + +func setupOutputFlagSet(v *viper.Viper, fs *pflag.FlagSet) { + internal.AddFlagAndBind(v, fs, fs.String, "out-format", "output.formats", config.OutFormatColoredLineNumber, + color.GreenString(fmt.Sprintf("Formats of output: %s", strings.Join(config.AllOutputFormats, "|")))) + internal.AddFlagAndBind(v, fs, fs.Bool, "print-issued-lines", "output.print-issued-lines", true, + color.GreenString("Print lines of code with issue")) + internal.AddFlagAndBind(v, fs, fs.Bool, "print-linter-name", "output.print-linter-name", true, + color.GreenString("Print linter name in issue line")) + internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "output.uniq-by-line", true, + color.GreenString("Make issues output unique by line")) + internal.AddFlagAndBind(v, fs, fs.Bool, "sort-results", "output.sort-results", false, + color.GreenString("Sort linter results")) + internal.AddFlagAndBind(v, fs, fs.StringSlice, "sort-order", "output.sort-order", nil, + color.GreenString("Sort order of linter results")) + internal.AddFlagAndBind(v, fs, fs.String, "path-prefix", "output.path-prefix", "", + color.GreenString("Path prefix to add to output")) + internal.AddFlagAndBind(v, fs, fs.Bool, "show-stats", "output.show-stats", false, color.GreenString("Show statistics per linter")) +} + +//nolint:gomnd // magic numbers here is ok +func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { + internal.AddHackedStringSliceP(fs, "exclude", "e", color.GreenString("Exclude issue by regexp")) + internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-use-default", "issues.exclude-use-default", true, + getDefaultIssueExcludeHelp()) + internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-case-sensitive", "issues.exclude-case-sensitive", false, + color.GreenString("If set to true exclude and exclude rules regular expressions are case-sensitive")) + + internal.AddFlagAndBind(v, fs, fs.Int, "max-issues-per-linter", "issues.max-issues-per-linter", 50, + color.GreenString("Maximum issues count per one linter. Set to 0 to disable")) + internal.AddFlagAndBind(v, fs, fs.Int, "max-same-issues", "issues.max-same-issues", 3, + color.GreenString("Maximum count of issues with the same text. Set to 0 to disable")) + + internal.AddHackedStringSlice(fs, "exclude-files", color.GreenString("Regexps of files to exclude")) + internal.AddHackedStringSlice(fs, "exclude-dirs", color.GreenString("Regexps of directories to exclude")) + internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-dirs-use-default", "issues.exclude-dirs-use-default", true, + getDefaultDirectoryExcludeHelp()) + + const newDesc = "Show only new issues: if there are unstaged changes or untracked files, only those changes " + + "are analyzed, else only changes in HEAD~ are analyzed.\nIt's a super-useful option for integration " + + "of golangci-lint into existing large codebase.\nIt's not practical to fix all existing issues at " + + "the moment of integration: much better to not allow issues in new code.\nFor CI setups, prefer " + + "--new-from-rev=HEAD~, as --new can skip linting the current patch if any scripts generate " + + "unstaged files before golangci-lint runs." + internal.AddFlagAndBindP(v, fs, fs.BoolP, "new", "n", "issues.new", false, color.GreenString(newDesc)) + internal.AddFlagAndBind(v, fs, fs.String, "new-from-rev", "issues.new-from-rev", "", + color.GreenString("Show only new issues created after git revision `REV`")) + internal.AddFlagAndBind(v, fs, fs.String, "new-from-patch", "issues.new-from-patch", "", + color.GreenString("Show only new issues created in git patch with file path `PATH`")) + internal.AddFlagAndBind(v, fs, fs.Bool, "whole-files", "issues.whole-files", false, + color.GreenString("Show issues in any part of update files (requires new-from-rev or new-from-patch)")) + internal.AddFlagAndBind(v, fs, fs.Bool, "fix", "issues.fix", false, + color.GreenString("Fix found issues (if it's supported by the linter)")) +} + +func getDefaultIssueExcludeHelp() string { + parts := []string{color.GreenString("Use or not use default excludes:")} + for _, ep := range config.DefaultExcludePatterns { + parts = append(parts, + fmt.Sprintf(" # %s %s: %s", ep.ID, ep.Linter, ep.Why), + fmt.Sprintf(" - %s", color.YellowString(ep.Pattern)), + "", + ) + } + return strings.Join(parts, "\n") +} + +func getDefaultDirectoryExcludeHelp() string { + parts := []string{color.GreenString("Use or not use default excluded directories:")} + for _, dir := range packages.StdExcludeDirRegexps { + parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) + } + parts = append(parts, "") + return strings.Join(parts, "\n") +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go index ef276481c7..42da4a3dc9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -2,69 +2,74 @@ package commands import ( "fmt" - "os" "sort" "strings" "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" "github.com/golangci/golangci-lint/pkg/logutils" ) -func (e *Executor) initHelp() { +type helpCommand struct { + cmd *cobra.Command + + dbManager *lintersdb.Manager + + log logutils.Log +} + +func newHelpCommand(logger logutils.Log) *helpCommand { + c := &helpCommand{log: logger} + helpCmd := &cobra.Command{ Use: "help", Short: "Help", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint help") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run help: %s", err) - } + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, } - e.rootCmd.SetHelpCommand(helpCmd) - lintersHelpCmd := &cobra.Command{ - Use: "linters", - Short: "Help about linters", - Run: e.executeLintersHelp, - } - helpCmd.AddCommand(lintersHelpCmd) -} - -func printLinterConfigs(lcs []*linter.Config) { - sort.Slice(lcs, func(i, j int) bool { - return strings.Compare(lcs[i].Name(), lcs[j].Name()) < 0 - }) - for _, lc := range lcs { - altNamesStr := "" - if len(lc.AlternativeNames) != 0 { - altNamesStr = fmt.Sprintf(" (%s)", strings.Join(lc.AlternativeNames, ", ")) - } + helpCmd.AddCommand( + &cobra.Command{ + Use: "linters", + Short: "Help about linters", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: c.execute, + PreRunE: c.preRunE, + }, + ) - // If the linter description spans multiple lines, truncate everything following the first newline - linterDescription := lc.Linter.Desc() - firstNewline := strings.IndexRune(linterDescription, '\n') - if firstNewline > 0 { - linterDescription = linterDescription[:firstNewline] - } + c.cmd = helpCmd - fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", color.YellowString(lc.Name()), - altNamesStr, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) - } + return c } -func (e *Executor) executeLintersHelp(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint help linters") +func (c *helpCommand) preRunE(_ *cobra.Command, _ []string) error { + // The command doesn't depend on the real configuration. + // It just needs the list of all plugins and all presets. + dbManager, err := lintersdb.NewManager(c.log.Child(logutils.DebugKeyLintersDB), config.NewDefault(), lintersdb.NewLinterBuilder()) + if err != nil { + return err } + c.dbManager = dbManager + + return nil +} + +func (c *helpCommand) execute(_ *cobra.Command, _ []string) { var enabledLCs, disabledLCs []*linter.Config - for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { + if lc.Internal { + continue + } + if lc.EnabledByDefault { enabledLCs = append(enabledLCs, lc) } else { @@ -73,20 +78,57 @@ func (e *Executor) executeLintersHelp(_ *cobra.Command, args []string) { } color.Green("Enabled by default linters:\n") - printLinterConfigs(enabledLCs) + printLinters(enabledLCs) + color.Red("\nDisabled by default linters:\n") - printLinterConfigs(disabledLCs) + printLinters(disabledLCs) color.Green("\nLinters presets:") - for _, p := range e.DBManager.AllPresets() { - linters := e.DBManager.GetAllLinterConfigsForPreset(p) - linterNames := []string{} + c.printPresets() +} + +func (c *helpCommand) printPresets() { + for _, p := range lintersdb.AllPresets() { + linters := c.dbManager.GetAllLinterConfigsForPreset(p) + + var linterNames []string for _, lc := range linters { + if lc.Internal { + continue + } + linterNames = append(linterNames, lc.Name()) } sort.Strings(linterNames) - fmt.Fprintf(logutils.StdOut, "%s: %s\n", color.YellowString(p), strings.Join(linterNames, ", ")) + + _, _ = fmt.Fprintf(logutils.StdOut, "%s: %s\n", color.YellowString(p), strings.Join(linterNames, ", ")) } +} + +func printLinters(lcs []*linter.Config) { + sort.Slice(lcs, func(i, j int) bool { + return lcs[i].Name() < lcs[j].Name() + }) + + for _, lc := range lcs { + altNamesStr := "" + if len(lc.AlternativeNames) != 0 { + altNamesStr = fmt.Sprintf(" (%s)", strings.Join(lc.AlternativeNames, ", ")) + } + + // If the linter description spans multiple lines, truncate everything following the first newline + linterDescription := lc.Linter.Desc() + firstNewline := strings.IndexRune(linterDescription, '\n') + if firstNewline > 0 { + linterDescription = linterDescription[:firstNewline] + } - os.Exit(0) + deprecatedMark := "" + if lc.IsDeprecated() { + deprecatedMark = " [" + color.RedString("deprecated") + "]" + } + + _, _ = fmt.Fprintf(logutils.StdOut, "%s%s%s: %s [fast: %t, auto-fix: %t]\n", + color.YellowString(lc.Name()), altNamesStr, deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) + } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/builder.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/builder.go new file mode 100644 index 0000000000..39ec2a251c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/builder.go @@ -0,0 +1,219 @@ +package internal + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + "unicode" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +// Builder runs all the required commands to build a binary. +type Builder struct { + cfg *Configuration + + log logutils.Log + + root string + repo string +} + +// NewBuilder creates a new Builder. +func NewBuilder(logger logutils.Log, cfg *Configuration, root string) *Builder { + return &Builder{ + cfg: cfg, + log: logger, + root: root, + repo: filepath.Join(root, "golangci-lint"), + } +} + +// Build builds the custom binary. +func (b Builder) Build(ctx context.Context) error { + b.log.Infof("Cloning golangci-lint repository") + + err := b.clone(ctx) + if err != nil { + return fmt.Errorf("clone golangci-lint: %w", err) + } + + b.log.Infof("Adding plugin imports") + + err = b.updatePluginsFile() + if err != nil { + return fmt.Errorf("update plugin file: %w", err) + } + + b.log.Infof("Adding replace directives") + + err = b.addReplaceDirectives(ctx) + if err != nil { + return fmt.Errorf("add replace directives: %w", err) + } + + b.log.Infof("Running go mod tidy") + + err = b.goModTidy(ctx) + if err != nil { + return fmt.Errorf("go mod tidy: %w", err) + } + + b.log.Infof("Building golangci-lint binary") + + binaryName := b.getBinaryName() + + err = b.goBuild(ctx, binaryName) + if err != nil { + return fmt.Errorf("build golangci-lint binary: %w", err) + } + + b.log.Infof("Moving golangci-lint binary") + + err = b.copyBinary(binaryName) + if err != nil { + return fmt.Errorf("move golangci-lint binary: %w", err) + } + + return nil +} + +func (b Builder) clone(ctx context.Context) error { + //nolint:gosec // the variable is sanitized. + cmd := exec.CommandContext(ctx, + "git", "clone", "--branch", sanitizeVersion(b.cfg.Version), + "--single-branch", "--depth", "1", "-c advice.detachedHead=false", "-q", + "https://github.com/golangci/golangci-lint.git", + ) + cmd.Dir = b.root + + output, err := cmd.CombinedOutput() + if err != nil { + b.log.Infof(string(output)) + + return fmt.Errorf("%s: %w", strings.Join(cmd.Args, " "), err) + } + + return nil +} + +func (b Builder) addReplaceDirectives(ctx context.Context) error { + for _, plugin := range b.cfg.Plugins { + if plugin.Path == "" { + continue + } + + replace := fmt.Sprintf("%s=%s", plugin.Module, plugin.Path) + + cmd := exec.CommandContext(ctx, "go", "mod", "edit", "-replace", replace) + cmd.Dir = b.repo + + b.log.Infof("run: %s", strings.Join(cmd.Args, " ")) + + output, err := cmd.CombinedOutput() + if err != nil { + b.log.Warnf(string(output)) + + return fmt.Errorf("%s: %w", strings.Join(cmd.Args, " "), err) + } + } + + return nil +} + +func (b Builder) goModTidy(ctx context.Context) error { + cmd := exec.CommandContext(ctx, "go", "mod", "tidy") + cmd.Dir = b.repo + + output, err := cmd.CombinedOutput() + if err != nil { + b.log.Warnf(string(output)) + + return fmt.Errorf("%s: %w", strings.Join(cmd.Args, " "), err) + } + + return nil +} + +func (b Builder) goBuild(ctx context.Context, binaryName string) error { + //nolint:gosec // the variable is sanitized. + cmd := exec.CommandContext(ctx, "go", "build", + "-ldflags", + fmt.Sprintf( + "-s -w -X 'main.version=%s-custom-gcl' -X 'main.date=%s'", + sanitizeVersion(b.cfg.Version), time.Now().UTC().String(), + ), + "-o", binaryName, + "./cmd/golangci-lint", + ) + cmd.Dir = b.repo + + output, err := cmd.CombinedOutput() + if err != nil { + b.log.Warnf(string(output)) + + return fmt.Errorf("%s: %w", strings.Join(cmd.Args, " "), err) + } + + return nil +} + +func (b Builder) copyBinary(binaryName string) error { + src := filepath.Join(b.repo, binaryName) + + source, err := os.Open(filepath.Clean(src)) + if err != nil { + return fmt.Errorf("open source file: %w", err) + } + + defer func() { _ = source.Close() }() + + info, err := source.Stat() + if err != nil { + return fmt.Errorf("stat source file: %w", err) + } + + if b.cfg.Destination != "" { + err = os.MkdirAll(b.cfg.Destination, os.ModePerm) + if err != nil { + return fmt.Errorf("create destination directory: %w", err) + } + } + + dst, err := os.OpenFile(filepath.Join(b.cfg.Destination, binaryName), os.O_RDWR|os.O_CREATE|os.O_TRUNC, info.Mode()) + if err != nil { + return fmt.Errorf("create destination file: %w", err) + } + + defer func() { _ = dst.Close() }() + + _, err = io.Copy(dst, source) + if err != nil { + return fmt.Errorf("copy source to destination: %w", err) + } + + return nil +} + +func (b Builder) getBinaryName() string { + name := b.cfg.Name + if runtime.GOOS == "windows" { + name += ".exe" + } + + return name +} + +func sanitizeVersion(v string) string { + fn := func(c rune) bool { + return !(unicode.IsLetter(c) || unicode.IsNumber(c) || c == '.' || c == '/') + } + + return strings.Join(strings.FieldsFunc(v, fn), "") +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/configuration.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/configuration.go new file mode 100644 index 0000000000..5327025946 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/configuration.go @@ -0,0 +1,138 @@ +package internal + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +const base = ".custom-gcl" + +const defaultBinaryName = "custom-gcl" + +// Configuration represents the configuration file. +type Configuration struct { + // golangci-lint version. + Version string `yaml:"version"` + + // Name of the binary. + Name string `yaml:"name,omitempty"` + + // Destination is the path to a directory to store the binary. + Destination string `yaml:"destination,omitempty"` + + // Plugins information. + Plugins []*Plugin `yaml:"plugins,omitempty"` +} + +// Validate checks and clean the configuration. +func (c *Configuration) Validate() error { + if strings.TrimSpace(c.Version) == "" { + return errors.New("root field 'version' is required") + } + + if strings.TrimSpace(c.Name) == "" { + c.Name = defaultBinaryName + } + + if len(c.Plugins) == 0 { + return errors.New("no plugins defined") + } + + for _, plugin := range c.Plugins { + if strings.TrimSpace(plugin.Module) == "" { + return errors.New("field 'module' is required") + } + + if strings.TrimSpace(plugin.Import) == "" { + plugin.Import = plugin.Module + } + + if strings.TrimSpace(plugin.Path) == "" && strings.TrimSpace(plugin.Version) == "" { + return errors.New("missing information: 'version' or 'path' should be provided") + } + + if strings.TrimSpace(plugin.Path) != "" && strings.TrimSpace(plugin.Version) != "" { + return errors.New("invalid configuration: 'version' and 'path' should not be provided at the same time") + } + + if strings.TrimSpace(plugin.Path) == "" { + continue + } + + abs, err := filepath.Abs(plugin.Path) + if err != nil { + return err + } + + plugin.Path = abs + } + + return nil +} + +// Plugin represents information about a plugin. +type Plugin struct { + // Module name. + Module string `yaml:"module"` + + // Import to use. + Import string `yaml:"import,omitempty"` + + // Version of the module. + // Only for module available through a Go proxy. + Version string `yaml:"version,omitempty"` + + // Path to the local module. + // Only for local module. + Path string `yaml:"path,omitempty"` +} + +func LoadConfiguration() (*Configuration, error) { + configFilePath, err := findConfigurationFile() + if err != nil { + return nil, fmt.Errorf("file %s not found: %w", configFilePath, err) + } + + file, err := os.Open(configFilePath) + if err != nil { + return nil, fmt.Errorf("file %s open: %w", configFilePath, err) + } + + var cfg Configuration + + err = yaml.NewDecoder(file).Decode(&cfg) + if err != nil { + return nil, fmt.Errorf("YAML decoding: %w", err) + } + + return &cfg, nil +} + +func findConfigurationFile() (string, error) { + entries, err := os.ReadDir(".") + if err != nil { + return "", fmt.Errorf("read directory: %w", err) + } + + for _, entry := range entries { + ext := filepath.Ext(entry.Name()) + + switch strings.ToLower(strings.TrimPrefix(ext, ".")) { + case "yml", "yaml", "json": + if isConf(ext, entry.Name()) { + return entry.Name(), nil + } + } + } + + return "", errors.New("configuration file not found") +} + +func isConf(ext, name string) bool { + return base+ext == name +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/imports.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/imports.go new file mode 100644 index 0000000000..3bebf596b1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/imports.go @@ -0,0 +1,69 @@ +package internal + +import ( + "bytes" + "fmt" + "go/format" + "os" + "path/filepath" + "text/template" +) + +const importsTemplate = ` +package main + +import ( +{{range .Imports -}} + _ "{{.}}" +{{end -}} +) +` + +func (b Builder) updatePluginsFile() error { + importsDest := filepath.Join(b.repo, "cmd", "golangci-lint", "plugins.go") + + info, err := os.Stat(importsDest) + if err != nil { + return fmt.Errorf("file %s not found: %w", importsDest, err) + } + + source, err := generateImports(b.cfg) + if err != nil { + return fmt.Errorf("generate imports: %w", err) + } + + b.log.Infof("generated imports info %s:\n%s\n", importsDest, source) + + err = os.WriteFile(filepath.Clean(importsDest), source, info.Mode()) + if err != nil { + return fmt.Errorf("write file %s: %w", importsDest, err) + } + + return nil +} + +func generateImports(cfg *Configuration) ([]byte, error) { + impTmpl, err := template.New("plugins.go").Parse(importsTemplate) + if err != nil { + return nil, fmt.Errorf("parse template: %w", err) + } + + var imps []string + for _, plugin := range cfg.Plugins { + imps = append(imps, plugin.Import) + } + + buf := &bytes.Buffer{} + + err = impTmpl.Execute(buf, map[string]any{"Imports": imps}) + if err != nil { + return nil, fmt.Errorf("execute template: %w", err) + } + + source, err := format.Source(buf.Bytes()) + if err != nil { + return nil, fmt.Errorf("format source: %w", err) + } + + return source, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/vibra.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/vibra.go new file mode 100644 index 0000000000..ece2483fe0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/internal/vibra.go @@ -0,0 +1,59 @@ +package internal + +import ( + "fmt" + + "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +type FlagFunc[T any] func(name string, value T, usage string) *T + +type FlagPFunc[T any] func(name, shorthand string, value T, usage string) *T + +// AddFlagAndBind adds a Cobra/pflag flag and binds it with Viper. +func AddFlagAndBind[T any](v *viper.Viper, fs *pflag.FlagSet, pfn FlagFunc[T], name, bind string, value T, usage string) { + pfn(name, value, usage) + + err := v.BindPFlag(bind, fs.Lookup(name)) + if err != nil { + panic(fmt.Sprintf("failed to bind flag %s: %v", name, err)) + } +} + +// AddFlagAndBindP adds a Cobra/pflag flag and binds it with Viper. +func AddFlagAndBindP[T any](v *viper.Viper, fs *pflag.FlagSet, pfn FlagPFunc[T], name, shorthand, bind string, value T, usage string) { + pfn(name, shorthand, value, usage) + + err := v.BindPFlag(bind, fs.Lookup(name)) + if err != nil { + panic(fmt.Sprintf("failed to bind flag %s: %v", name, err)) + } +} + +// AddDeprecatedFlagAndBind similar to AddFlagAndBind but deprecate the flag. +func AddDeprecatedFlagAndBind[T any](v *viper.Viper, fs *pflag.FlagSet, pfn FlagFunc[T], name, bind string, value T, usage string) { + AddFlagAndBind(v, fs, pfn, name, bind, value, usage) + deprecateFlag(fs, name) +} + +// AddHackedStringSliceP Hack for slice, see Loader.applyStringSliceHack. +func AddHackedStringSliceP(fs *pflag.FlagSet, name, shorthand, usage string) { + fs.StringSliceP(name, shorthand, nil, usage) +} + +// AddHackedStringSlice Hack for slice, see Loader.applyStringSliceHack. +func AddHackedStringSlice(fs *pflag.FlagSet, name, usage string) { + AddHackedStringSliceP(fs, name, "", usage) +} + +// AddDeprecatedHackedStringSlice similar to AddHackedStringSlice but deprecate the flag. +func AddDeprecatedHackedStringSlice(fs *pflag.FlagSet, name, usage string) { + AddHackedStringSlice(fs, name, usage) + deprecateFlag(fs, name) +} + +func deprecateFlag(fs *pflag.FlagSet, name string) { + _ = fs.MarkHidden(name) + _ = fs.MarkDeprecated(name, "check the documentation for more information.") +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go index 873dab8177..61ed0f22f2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go @@ -1,51 +1,106 @@ package commands import ( - "log" - "os" + "fmt" "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" + "github.com/golangci/golangci-lint/pkg/logutils" ) -func (e *Executor) initLinters() { - e.lintersCmd = &cobra.Command{ - Use: "linters", - Short: "List current linters configuration", - Run: e.executeLinters, +type lintersOptions struct { + config.LoaderOptions +} + +type lintersCommand struct { + viper *viper.Viper + cmd *cobra.Command + + opts lintersOptions + + cfg *config.Config + + log logutils.Log + + dbManager *lintersdb.Manager +} + +func newLintersCommand(logger logutils.Log) *lintersCommand { + c := &lintersCommand{ + viper: viper.New(), + cfg: config.NewDefault(), + log: logger, + } + + lintersCmd := &cobra.Command{ + Use: "linters", + Short: "List current linters configuration", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: c.execute, + PreRunE: c.preRunE, + SilenceUsage: true, } - e.rootCmd.AddCommand(e.lintersCmd) - e.initRunConfiguration(e.lintersCmd) + + fs := lintersCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + setupConfigFileFlagSet(fs, &c.opts.LoaderOptions) + setupLintersFlagSet(c.viper, fs) + + c.cmd = lintersCmd + + return c } -func (e *Executor) executeLinters(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint linters") +func (c *lintersCommand) preRunE(cmd *cobra.Command, _ []string) error { + loader := config.NewLoader(c.log.Child(logutils.DebugKeyConfigReader), c.viper, cmd.Flags(), c.opts.LoaderOptions, c.cfg) + + if err := loader.Load(); err != nil { + return fmt.Errorf("can't load config: %w", err) } - enabledLintersMap, err := e.EnabledLintersSet.GetEnabledLintersMap() + dbManager, err := lintersdb.NewManager(c.log.Child(logutils.DebugKeyLintersDB), c.cfg, + lintersdb.NewLinterBuilder(), lintersdb.NewPluginModuleBuilder(c.log), lintersdb.NewPluginGoBuilder(c.log)) if err != nil { - log.Fatalf("Can't get enabled linters: %s", err) + return err } - color.Green("Enabled by your configuration linters:\n") - enabledLinters := make([]*linter.Config, 0, len(enabledLintersMap)) - for _, linter := range enabledLintersMap { - enabledLinters = append(enabledLinters, linter) + c.dbManager = dbManager + + return nil +} + +func (c *lintersCommand) execute(_ *cobra.Command, _ []string) error { + enabledLintersMap, err := c.dbManager.GetEnabledLintersMap() + if err != nil { + return fmt.Errorf("can't get enabled linters: %w", err) } - printLinterConfigs(enabledLinters) + var enabledLinters []*linter.Config var disabledLCs []*linter.Config - for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + + for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { + if lc.Internal { + continue + } + if enabledLintersMap[lc.Name()] == nil { disabledLCs = append(disabledLCs, lc) + } else { + enabledLinters = append(enabledLinters, lc) } } + color.Green("Enabled by your configuration linters:\n") + printLinters(enabledLinters) color.Red("\nDisabled by your configuration linters:\n") - printLinterConfigs(disabledLCs) + printLinters(disabledLCs) - os.Exit(0) + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go index f90df9901f..cbb838aac2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go @@ -1,165 +1,167 @@ package commands import ( + "errors" "fmt" "os" - "runtime" - "runtime/pprof" - "runtime/trace" - "strconv" + "slices" + "github.com/fatih/color" "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/logutils" ) -func (e *Executor) persistentPreRun(_ *cobra.Command, _ []string) { - if e.cfg.Run.PrintVersion { - fmt.Fprintf(logutils.StdOut, "golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date) - os.Exit(0) - } +func Execute(info BuildInfo) error { + return newRootCommand(info).Execute() +} - runtime.GOMAXPROCS(e.cfg.Run.Concurrency) +type rootOptions struct { + PrintVersion bool // Flag only. - if e.cfg.Run.CPUProfilePath != "" { - f, err := os.Create(e.cfg.Run.CPUProfilePath) - if err != nil { - e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.CPUProfilePath, err) - } - if err := pprof.StartCPUProfile(f); err != nil { - e.log.Fatalf("Can't start CPU profiling: %s", err) - } - } + Verbose bool // Flag only. + Color string // Flag only. +} - if e.cfg.Run.MemProfilePath != "" { - if rate := os.Getenv("GL_MEMPROFILE_RATE"); rate != "" { - runtime.MemProfileRate, _ = strconv.Atoi(rate) - } - } +type rootCommand struct { + cmd *cobra.Command + opts rootOptions - if e.cfg.Run.TracePath != "" { - f, err := os.Create(e.cfg.Run.TracePath) - if err != nil { - e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.TracePath, err) - } - if err = trace.Start(f); err != nil { - e.log.Fatalf("Can't start tracing: %s", err) - } - } + log logutils.Log } -func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) { - if e.cfg.Run.CPUProfilePath != "" { - pprof.StopCPUProfile() - } - if e.cfg.Run.MemProfilePath != "" { - f, err := os.Create(e.cfg.Run.MemProfilePath) - if err != nil { - e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.MemProfilePath, err) - } +func newRootCommand(info BuildInfo) *rootCommand { + c := &rootCommand{} - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - printMemStats(&ms, e.log) + rootCmd := &cobra.Command{ + Use: "golangci-lint", + Short: "golangci-lint is a smart linters runner.", + Long: `Smart, fast linters runner.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + if c.opts.PrintVersion { + _ = printVersion(logutils.StdOut, info) + return nil + } - if err := pprof.WriteHeapProfile(f); err != nil { - e.log.Fatalf("Can't write heap profile: %s", err) - } - f.Close() - } - if e.cfg.Run.TracePath != "" { - trace.Stop() + return cmd.Help() + }, } - os.Exit(e.exitCode) + fs := rootCmd.Flags() + fs.BoolVar(&c.opts.PrintVersion, "version", false, color.GreenString("Print version")) + + setupRootPersistentFlags(rootCmd.PersistentFlags(), &c.opts) + + log := logutils.NewStderrLog(logutils.DebugKeyEmpty) + + // Each command uses a dedicated configuration structure to avoid side effects of bindings. + rootCmd.AddCommand( + newLintersCommand(log).cmd, + newRunCommand(log, info).cmd, + newCacheCommand().cmd, + newConfigCommand(log, info).cmd, + newVersionCommand(info).cmd, + newCustomCommand(log).cmd, + ) + + rootCmd.SetHelpCommand(newHelpCommand(log).cmd) + + c.log = log + c.cmd = rootCmd + + return c } -func printMemStats(ms *runtime.MemStats, logger logutils.Log) { - logger.Infof("Mem stats: alloc=%s total_alloc=%s sys=%s "+ - "heap_alloc=%s heap_sys=%s heap_idle=%s heap_released=%s heap_in_use=%s "+ - "stack_in_use=%s stack_sys=%s "+ - "mspan_sys=%s mcache_sys=%s buck_hash_sys=%s gc_sys=%s other_sys=%s "+ - "mallocs_n=%d frees_n=%d heap_objects_n=%d gc_cpu_fraction=%.2f", - formatMemory(ms.Alloc), formatMemory(ms.TotalAlloc), formatMemory(ms.Sys), - formatMemory(ms.HeapAlloc), formatMemory(ms.HeapSys), - formatMemory(ms.HeapIdle), formatMemory(ms.HeapReleased), formatMemory(ms.HeapInuse), - formatMemory(ms.StackInuse), formatMemory(ms.StackSys), - formatMemory(ms.MSpanSys), formatMemory(ms.MCacheSys), formatMemory(ms.BuckHashSys), - formatMemory(ms.GCSys), formatMemory(ms.OtherSys), - ms.Mallocs, ms.Frees, ms.HeapObjects, ms.GCCPUFraction) +func (c *rootCommand) Execute() error { + err := setupLogger(c.log) + if err != nil { + return err + } + + return c.cmd.Execute() } -func formatMemory(memBytes uint64) string { - const Kb = 1024 - const Mb = Kb * 1024 +func setupRootPersistentFlags(fs *pflag.FlagSet, opts *rootOptions) { + fs.BoolP("help", "h", false, color.GreenString("Help for a command")) + fs.BoolVarP(&opts.Verbose, "verbose", "v", false, color.GreenString("Verbose output")) + fs.StringVar(&opts.Color, "color", "auto", color.GreenString("Use color when printing; can be 'always', 'auto', or 'never'")) +} - if memBytes < Kb { - return fmt.Sprintf("%db", memBytes) +func setupLogger(logger logutils.Log) error { + opts, err := forceRootParsePersistentFlags() + if err != nil && !errors.Is(err, pflag.ErrHelp) { + return err } - if memBytes < Mb { - return fmt.Sprintf("%dkb", memBytes/Kb) + + if opts == nil { + return nil } - return fmt.Sprintf("%dmb", memBytes/Mb) -} -func getDefaultConcurrency() int { - if os.Getenv("HELP_RUN") == "1" { - // Make stable concurrency for README help generating builds. - const prettyConcurrency = 8 - return prettyConcurrency + logutils.SetupVerboseLog(logger, opts.Verbose) + + switch opts.Color { + case "always": + color.NoColor = false + case "never": + color.NoColor = true + case "auto": + // nothing + default: + logger.Fatalf("invalid value %q for --color; must be 'always', 'auto', or 'never'", opts.Color) } - return runtime.NumCPU() + return nil } -func (e *Executor) initRoot() { - rootCmd := &cobra.Command{ - Use: "golangci-lint", - Short: "golangci-lint is a smart linters runner.", - Long: `Smart, fast linters runner. Run it in cloud for every GitHub pull request on https://golangci.com`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run help: %s", err) - } - }, - PersistentPreRun: e.persistentPreRun, - PersistentPostRun: e.persistentPostRun, - } +func forceRootParsePersistentFlags() (*rootOptions, error) { + // We use another pflag.FlagSet here to not set `changed` flag on cmd.Flags() options. + // Otherwise, string slice options will be duplicated. + fs := pflag.NewFlagSet("config flag set", pflag.ContinueOnError) - initRootFlagSet(rootCmd.PersistentFlags(), e.cfg, e.needVersionOption()) - e.rootCmd = rootCmd -} + // Ignore unknown flags because we will parse the command flags later. + fs.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true} -func (e *Executor) needVersionOption() bool { - return e.date != "" -} + opts := &rootOptions{} -func initRootFlagSet(fs *pflag.FlagSet, cfg *config.Config, needVersionOption bool) { - fs.BoolVarP(&cfg.Run.IsVerbose, "verbose", "v", false, wh("verbose output")) + // Don't do `fs.AddFlagSet(cmd.Flags())` because it shares flags representations: + // `changed` variable inside string slice vars will be shared. + // Use another config variable here, + // to not affect main parsing by this parsing of only config option. + setupRootPersistentFlags(fs, opts) - var silent bool - fs.BoolVarP(&silent, "silent", "s", false, wh("disables congrats outputs")) - if err := fs.MarkHidden("silent"); err != nil { - panic(err) - } - err := fs.MarkDeprecated("silent", - "now golangci-lint by default is silent: it doesn't print Congrats message") - if err != nil { - panic(err) + fs.Usage = func() {} // otherwise, help text will be printed twice + + if err := fs.Parse(safeArgs(fs, os.Args)); err != nil { + if errors.Is(err, pflag.ErrHelp) { + return nil, err + } + + return nil, fmt.Errorf("can't parse args: %w", err) } - fs.StringVar(&cfg.Run.CPUProfilePath, "cpu-profile-path", "", wh("Path to CPU profile output file")) - fs.StringVar(&cfg.Run.MemProfilePath, "mem-profile-path", "", wh("Path to memory profile output file")) - fs.StringVar(&cfg.Run.TracePath, "trace-path", "", wh("Path to trace output file")) - fs.IntVarP(&cfg.Run.Concurrency, "concurrency", "j", getDefaultConcurrency(), wh("Concurrency (default NumCPU)")) - if needVersionOption { - fs.BoolVar(&cfg.Run.PrintVersion, "version", false, wh("Print version")) + return opts, nil +} + +// Shorthands are a problem because pflag, with UnknownFlags, will try to parse all the letters as options. +// A shorthand can aggregate several letters (ex `ps -aux`) +// The function replaces non-supported shorthands by a dumb flag. +func safeArgs(fs *pflag.FlagSet, args []string) []string { + var shorthands []string + fs.VisitAll(func(flag *pflag.Flag) { + shorthands = append(shorthands, flag.Shorthand) + }) + + var cleanArgs []string + for _, arg := range args { + if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' && !slices.Contains(shorthands, string(arg[1])) { + cleanArgs = append(cleanArgs, "--potato") + continue + } + + cleanArgs = append(cleanArgs, arg) } - fs.StringVar(&cfg.Output.Color, "color", "auto", wh("Use color when printing; can be 'always', 'auto', or 'never'")) + return cleanArgs } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index 271fffe94f..8ed8c5eed2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -1,373 +1,399 @@ package commands import ( + "bytes" "context" + "crypto/sha256" + "errors" "fmt" - "io/ioutil" + "io" "log" "os" + "path/filepath" "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strconv" "strings" "time" "github.com/fatih/color" - "github.com/pkg/errors" + "github.com/gofrs/flock" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/spf13/viper" + "go.uber.org/automaxprocs/maxprocs" + "golang.org/x/exp/maps" + "gopkg.in/yaml.v3" + "github.com/golangci/golangci-lint/internal/cache" + "github.com/golangci/golangci-lint/internal/pkgcache" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/lint" + "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/packages" "github.com/golangci/golangci-lint/pkg/printers" + "github.com/golangci/golangci-lint/pkg/report" "github.com/golangci/golangci-lint/pkg/result" - "github.com/golangci/golangci-lint/pkg/result/processors" + "github.com/golangci/golangci-lint/pkg/timeutils" ) -func getDefaultIssueExcludeHelp() string { - parts := []string{"Use or not use default excludes:"} - for _, ep := range config.DefaultExcludePatterns { - parts = append(parts, - fmt.Sprintf(" # %s %s: %s", ep.ID, ep.Linter, ep.Why), - fmt.Sprintf(" - %s", color.YellowString(ep.Pattern)), - "", - ) - } - return strings.Join(parts, "\n") +const defaultTimeout = time.Minute + +const ( + // envFailOnWarnings value: "1" + envFailOnWarnings = "FAIL_ON_WARNINGS" + // envMemLogEvery value: "1" + envMemLogEvery = "GL_MEM_LOG_EVERY" +) + +const ( + // envHelpRun value: "1". + envHelpRun = "HELP_RUN" + envMemProfileRate = "GL_MEM_PROFILE_RATE" +) + +type runOptions struct { + config.LoaderOptions + + CPUProfilePath string // Flag only. + MemProfilePath string // Flag only. + TracePath string // Flag only. + + PrintResourcesUsage bool // Flag only. } -func getDefaultDirectoryExcludeHelp() string { - parts := []string{"Use or not use default excluded directories:"} - for _, dir := range packages.StdExcludeDirRegexps { - parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) - } - parts = append(parts, "") - return strings.Join(parts, "\n") +type runCommand struct { + viper *viper.Viper + cmd *cobra.Command + + opts runOptions + + cfg *config.Config + + buildInfo BuildInfo + + dbManager *lintersdb.Manager + + printer *printers.Printer + + log logutils.Log + debugf logutils.DebugFunc + reportData *report.Data + + contextBuilder *lint.ContextBuilder + goenv *goutil.Env + + fileCache *fsutils.FileCache + lineCache *fsutils.LineCache + + flock *flock.Flock + + exitCode int } -func wh(text string) string { - return color.GreenString(text) +func newRunCommand(logger logutils.Log, info BuildInfo) *runCommand { + reportData := &report.Data{} + + c := &runCommand{ + viper: viper.New(), + log: report.NewLogWrapper(logger, reportData), + debugf: logutils.Debug(logutils.DebugKeyExec), + cfg: config.NewDefault(), + reportData: reportData, + buildInfo: info, + } + + runCmd := &cobra.Command{ + Use: "run", + Short: "Run the linters", + Run: c.execute, + PreRunE: c.preRunE, + PostRun: c.postRun, + PersistentPreRunE: c.persistentPreRunE, + PersistentPostRunE: c.persistentPostRunE, + SilenceUsage: true, + } + + runCmd.SetOut(logutils.StdOut) // use custom output to properly color it in Windows terminals + runCmd.SetErr(logutils.StdErr) + + fs := runCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + // Only for testing purpose. + // Don't add other flags here. + fs.BoolVar(&c.cfg.InternalCmdTest, "internal-cmd-test", false, + color.GreenString("Option is used only for testing golangci-lint command, don't use it")) + _ = fs.MarkHidden("internal-cmd-test") + + setupConfigFileFlagSet(fs, &c.opts.LoaderOptions) + + setupLintersFlagSet(c.viper, fs) + setupRunFlagSet(c.viper, fs) + setupOutputFlagSet(c.viper, fs) + setupIssuesFlagSet(c.viper, fs) + + setupRunPersistentFlags(runCmd.PersistentFlags(), &c.opts) + + c.cmd = runCmd + + return c } -const defaultTimeout = time.Minute +func (c *runCommand) persistentPreRunE(cmd *cobra.Command, _ []string) error { + if err := c.startTracing(); err != nil { + return err + } -//nolint:funlen -func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, isFinalInit bool) { - hideFlag := func(name string) { - if err := fs.MarkHidden(name); err != nil { - panic(err) - } + loader := config.NewLoader(c.log.Child(logutils.DebugKeyConfigReader), c.viper, cmd.Flags(), c.opts.LoaderOptions, c.cfg) - // we run initFlagSet multiple times, but we wouldn't like to see deprecation message multiple times - if isFinalInit { - const deprecateMessage = "flag will be removed soon, please, use .golangci.yml config" - if err := fs.MarkDeprecated(name, deprecateMessage); err != nil { - panic(err) - } + if err := loader.Load(); err != nil { + return fmt.Errorf("can't load config: %w", err) + } + + if c.cfg.Run.Concurrency == 0 { + backup := runtime.GOMAXPROCS(0) + + // Automatically set GOMAXPROCS to match Linux container CPU quota. + _, err := maxprocs.Set(maxprocs.Logger(c.log.Infof)) + if err != nil { + runtime.GOMAXPROCS(backup) } + } else { + runtime.GOMAXPROCS(c.cfg.Run.Concurrency) } - // Output config - oc := &cfg.Output - fs.StringVar(&oc.Format, "out-format", - config.OutFormatColoredLineNumber, - wh(fmt.Sprintf("Format of output: %s", strings.Join(config.OutFormats, "|")))) - fs.BoolVar(&oc.PrintIssuedLine, "print-issued-lines", true, wh("Print lines of code with issue")) - fs.BoolVar(&oc.PrintLinterName, "print-linter-name", true, wh("Print linter name in issue line")) - fs.BoolVar(&oc.UniqByLine, "uniq-by-line", true, wh("Make issues output unique by line")) - fs.BoolVar(&oc.SortResults, "sort-results", false, wh("Sort linter results")) - fs.BoolVar(&oc.PrintWelcomeMessage, "print-welcome", false, wh("Print welcome message")) - fs.StringVar(&oc.PathPrefix, "path-prefix", "", wh("Path prefix to add to output")) - hideFlag("print-welcome") // no longer used - - fs.BoolVar(&cfg.InternalCmdTest, "internal-cmd-test", false, wh("Option is used only for testing golangci-lint command, don't use it")) - if err := fs.MarkHidden("internal-cmd-test"); err != nil { - panic(err) - } - - // Run config - rc := &cfg.Run - fs.StringVar(&rc.ModulesDownloadMode, "modules-download-mode", "", - "Modules download mode. If not empty, passed as -mod= to go tools") - fs.IntVar(&rc.ExitCodeIfIssuesFound, "issues-exit-code", - exitcodes.IssuesFound, wh("Exit code when issues were found")) - fs.StringSliceVar(&rc.BuildTags, "build-tags", nil, wh("Build tags")) - - fs.DurationVar(&rc.Timeout, "deadline", defaultTimeout, wh("Deadline for total work")) - if err := fs.MarkHidden("deadline"); err != nil { - panic(err) - } - fs.DurationVar(&rc.Timeout, "timeout", defaultTimeout, wh("Timeout for total work")) - - fs.BoolVar(&rc.AnalyzeTests, "tests", true, wh("Analyze tests (*_test.go)")) - fs.BoolVar(&rc.PrintResourcesUsage, "print-resources-usage", false, - wh("Print avg and max memory usage of golangci-lint and total time")) - fs.StringVarP(&rc.Config, "config", "c", "", wh("Read config from file path `PATH`")) - fs.BoolVar(&rc.NoConfig, "no-config", false, wh("Don't read config")) - fs.StringSliceVar(&rc.SkipDirs, "skip-dirs", nil, wh("Regexps of directories to skip")) - fs.BoolVar(&rc.UseDefaultSkipDirs, "skip-dirs-use-default", true, getDefaultDirectoryExcludeHelp()) - fs.StringSliceVar(&rc.SkipFiles, "skip-files", nil, wh("Regexps of files to skip")) - - const allowParallelDesc = "Allow multiple parallel golangci-lint instances running. " + - "If false (default) - golangci-lint acquires file lock on start." - fs.BoolVar(&rc.AllowParallelRunners, "allow-parallel-runners", false, wh(allowParallelDesc)) - const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " + - "If false (default) - golangci-lint exits with an error if it fails to acquire file lock on start." - fs.BoolVar(&rc.AllowSerialRunners, "allow-serial-runners", false, wh(allowSerialDesc)) - - // Linters settings config - lsc := &cfg.LintersSettings - - // Hide all linters settings flags: they were initially visible, - // but when number of linters started to grow it became obvious that - // we can't fill 90% of flags by linters settings: common flags became hard to find. - // New linters settings should be done only through config file. - fs.BoolVar(&lsc.Errcheck.CheckTypeAssertions, "errcheck.check-type-assertions", - false, "Errcheck: check for ignored type assertion results") - hideFlag("errcheck.check-type-assertions") - fs.BoolVar(&lsc.Errcheck.CheckAssignToBlank, "errcheck.check-blank", false, - "Errcheck: check for errors assigned to blank identifier: _ = errFunc()") - hideFlag("errcheck.check-blank") - fs.StringVar(&lsc.Errcheck.Exclude, "errcheck.exclude", "", - "Path to a file containing a list of functions to exclude from checking") - hideFlag("errcheck.exclude") - fs.StringVar(&lsc.Errcheck.Ignore, "errcheck.ignore", "fmt:.*", - `Comma-separated list of pairs of the form pkg:regex. The regex is used to ignore names within pkg`) - hideFlag("errcheck.ignore") - - fs.BoolVar(&lsc.Govet.CheckShadowing, "govet.check-shadowing", false, - "Govet: check for shadowed variables") - hideFlag("govet.check-shadowing") - - fs.Float64Var(&lsc.Golint.MinConfidence, "golint.min-confidence", 0.8, - "Golint: minimum confidence of a problem to print it") - hideFlag("golint.min-confidence") - - fs.BoolVar(&lsc.Gofmt.Simplify, "gofmt.simplify", true, "Gofmt: simplify code") - hideFlag("gofmt.simplify") - - fs.IntVar(&lsc.Gocyclo.MinComplexity, "gocyclo.min-complexity", - 30, "Minimal complexity of function to report it") - hideFlag("gocyclo.min-complexity") - - fs.BoolVar(&lsc.Maligned.SuggestNewOrder, "maligned.suggest-new", false, - "Maligned: print suggested more optimal struct fields ordering") - hideFlag("maligned.suggest-new") - - fs.IntVar(&lsc.Dupl.Threshold, "dupl.threshold", - 150, "Dupl: Minimal threshold to detect copy-paste") - hideFlag("dupl.threshold") - - fs.BoolVar(&lsc.Goconst.MatchWithConstants, "goconst.match-constant", - true, "Goconst: look for existing constants matching the values") - hideFlag("goconst.match-constant") - fs.IntVar(&lsc.Goconst.MinStringLen, "goconst.min-len", - 3, "Goconst: minimum constant string length") - hideFlag("goconst.min-len") - fs.IntVar(&lsc.Goconst.MinOccurrencesCount, "goconst.min-occurrences", - 3, "Goconst: minimum occurrences of constant string count to trigger issue") - hideFlag("goconst.min-occurrences") - fs.BoolVar(&lsc.Goconst.ParseNumbers, "goconst.numbers", - false, "Goconst: search also for duplicated numbers") - hideFlag("goconst.numbers") - fs.IntVar(&lsc.Goconst.NumberMin, "goconst.min", - 3, "minimum value, only works with goconst.numbers") - hideFlag("goconst.min") - fs.IntVar(&lsc.Goconst.NumberMax, "goconst.max", - 3, "maximum value, only works with goconst.numbers") - hideFlag("goconst.max") - fs.BoolVar(&lsc.Goconst.IgnoreCalls, "goconst.ignore-calls", - true, "Goconst: ignore when constant is not used as function argument") - hideFlag("goconst.ignore-calls") - - // (@dixonwille) These flag is only used for testing purposes. - fs.StringSliceVar(&lsc.Depguard.Packages, "depguard.packages", nil, - "Depguard: packages to add to the list") - hideFlag("depguard.packages") - - fs.BoolVar(&lsc.Depguard.IncludeGoRoot, "depguard.include-go-root", false, - "Depguard: check list against standard lib") - hideFlag("depguard.include-go-root") - - fs.IntVar(&lsc.Lll.TabWidth, "lll.tab-width", 1, - "Lll: tab width in spaces") - hideFlag("lll.tab-width") - - // Linters config - lc := &cfg.Linters - fs.StringSliceVarP(&lc.Enable, "enable", "E", nil, wh("Enable specific linter")) - fs.StringSliceVarP(&lc.Disable, "disable", "D", nil, wh("Disable specific linter")) - fs.BoolVar(&lc.EnableAll, "enable-all", false, wh("Enable all linters")) - if err := fs.MarkHidden("enable-all"); err != nil { - panic(err) - } - - fs.BoolVar(&lc.DisableAll, "disable-all", false, wh("Disable all linters")) - fs.StringSliceVarP(&lc.Presets, "presets", "p", nil, - wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint linters' to see "+ - "them. This option implies option --disable-all", strings.Join(m.AllPresets(), "|")))) - fs.BoolVar(&lc.Fast, "fast", false, wh("Run only fast linters from enabled linters set (first run won't be fast)")) - - // Issues config - ic := &cfg.Issues - fs.StringSliceVarP(&ic.ExcludePatterns, "exclude", "e", nil, wh("Exclude issue by regexp")) - fs.BoolVar(&ic.UseDefaultExcludes, "exclude-use-default", true, getDefaultIssueExcludeHelp()) - fs.BoolVar(&ic.ExcludeCaseSensitive, "exclude-case-sensitive", false, wh("If set to true exclude "+ - "and exclude rules regular expressions are case sensitive")) - - fs.IntVar(&ic.MaxIssuesPerLinter, "max-issues-per-linter", 50, - wh("Maximum issues count per one linter. Set to 0 to disable")) - fs.IntVar(&ic.MaxSameIssues, "max-same-issues", 3, - wh("Maximum count of issues with the same text. Set to 0 to disable")) - - fs.BoolVarP(&ic.Diff, "new", "n", false, - wh("Show only new issues: if there are unstaged changes or untracked files, only those changes "+ - "are analyzed, else only changes in HEAD~ are analyzed.\nIt's a super-useful option for integration "+ - "of golangci-lint into existing large codebase.\nIt's not practical to fix all existing issues at "+ - "the moment of integration: much better to not allow issues in new code.\nFor CI setups, prefer "+ - "--new-from-rev=HEAD~, as --new can skip linting the current patch if any scripts generate "+ - "unstaged files before golangci-lint runs.")) - fs.StringVar(&ic.DiffFromRevision, "new-from-rev", "", - wh("Show only new issues created after git revision `REV`")) - fs.StringVar(&ic.DiffPatchFilePath, "new-from-patch", "", - wh("Show only new issues created in git patch with file path `PATH`")) - fs.BoolVar(&ic.NeedFix, "fix", false, "Fix found issues (if it's supported by the linter)") + return c.startTracing() } -func (e *Executor) initRunConfiguration(cmd *cobra.Command) { - fs := cmd.Flags() - fs.SortFlags = false // sort them as they are defined here - initFlagSet(fs, e.cfg, e.DBManager, true) +func (c *runCommand) persistentPostRunE(_ *cobra.Command, _ []string) error { + if err := c.stopTracing(); err != nil { + return err + } + + os.Exit(c.exitCode) + + return nil } -func (e *Executor) getConfigForCommandLine() (*config.Config, error) { - // We use another pflag.FlagSet here to not set `changed` flag - // on cmd.Flags() options. Otherwise string slice options will be duplicated. - fs := pflag.NewFlagSet("config flag set", pflag.ContinueOnError) - - var cfg config.Config - // Don't do `fs.AddFlagSet(cmd.Flags())` because it shares flags representations: - // `changed` variable inside string slice vars will be shared. - // Use another config variable here, not e.cfg, to not - // affect main parsing by this parsing of only config option. - initFlagSet(fs, &cfg, e.DBManager, false) - initVersionFlagSet(fs, &cfg) - - // Parse max options, even force version option: don't want - // to get access to Executor here: it's error-prone to use - // cfg vs e.cfg. - initRootFlagSet(fs, &cfg, true) - - fs.Usage = func() {} // otherwise help text will be printed twice - if err := fs.Parse(os.Args); err != nil { - if err == pflag.ErrHelp { - return nil, err - } +func (c *runCommand) preRunE(_ *cobra.Command, args []string) error { + dbManager, err := lintersdb.NewManager(c.log.Child(logutils.DebugKeyLintersDB), c.cfg, + lintersdb.NewLinterBuilder(), lintersdb.NewPluginModuleBuilder(c.log), lintersdb.NewPluginGoBuilder(c.log)) + if err != nil { + return err + } - return nil, fmt.Errorf("can't parse args: %s", err) + c.dbManager = dbManager + + printer, err := printers.NewPrinter(c.log, &c.cfg.Output, c.reportData) + if err != nil { + return err } - return &cfg, nil + c.printer = printer + + c.goenv = goutil.NewEnv(c.log.Child(logutils.DebugKeyGoEnv)) + + c.fileCache = fsutils.NewFileCache() + c.lineCache = fsutils.NewLineCache(c.fileCache) + + sw := timeutils.NewStopwatch("pkgcache", c.log.Child(logutils.DebugKeyStopwatch)) + + pkgCache, err := pkgcache.NewCache(sw, c.log.Child(logutils.DebugKeyPkgCache)) + if err != nil { + return fmt.Errorf("failed to build packages cache: %w", err) + } + + guard := load.NewGuard() + + pkgLoader := lint.NewPackageLoader(c.log.Child(logutils.DebugKeyLoader), c.cfg, args, c.goenv, guard) + + c.contextBuilder = lint.NewContextBuilder(c.cfg, pkgLoader, c.fileCache, pkgCache, guard) + + if err = initHashSalt(c.buildInfo.Version, c.cfg); err != nil { + return fmt.Errorf("failed to init hash salt: %w", err) + } + + if ok := c.acquireFileLock(); !ok { + return errors.New("parallel golangci-lint is running") + } + + return nil } -func (e *Executor) initRun() { - e.runCmd = &cobra.Command{ - Use: "run", - Short: "Run the linters", - Run: e.executeRun, - PreRun: func(_ *cobra.Command, _ []string) { - if ok := e.acquireFileLock(); !ok { - e.log.Fatalf("Parallel golangci-lint is running") - } - }, - PostRun: func(_ *cobra.Command, _ []string) { - e.releaseFileLock() - }, +func (c *runCommand) postRun(_ *cobra.Command, _ []string) { + c.releaseFileLock() +} + +func (c *runCommand) execute(_ *cobra.Command, args []string) { + needTrackResources := logutils.IsVerbose() || c.opts.PrintResourcesUsage + + trackResourcesEndCh := make(chan struct{}) + defer func() { // XXX: this defer must be before ctx.cancel defer + if needTrackResources { // wait until resource tracking finished to print properly + <-trackResourcesEndCh + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), c.cfg.Run.Timeout) + defer cancel() + + if needTrackResources { + go watchResources(ctx, trackResourcesEndCh, c.log, c.debugf) } - e.rootCmd.AddCommand(e.runCmd) - e.runCmd.SetOut(logutils.StdOut) // use custom output to properly color it in Windows terminals - e.runCmd.SetErr(logutils.StdErr) + if err := c.runAndPrint(ctx, args); err != nil { + c.log.Errorf("Running error: %s", err) + if c.exitCode == exitcodes.Success { + var exitErr *exitcodes.ExitError + if errors.As(err, &exitErr) { + c.exitCode = exitErr.Code + } else { + c.exitCode = exitcodes.Failure + } + } + } - e.initRunConfiguration(e.runCmd) + c.setupExitCode(ctx) } -func fixSlicesFlags(fs *pflag.FlagSet) { - // It's a dirty hack to set flag.Changed to true for every string slice flag. - // It's necessary to merge config and command-line slices: otherwise command-line - // flags will always overwrite ones from the config. - fs.VisitAll(func(f *pflag.Flag) { - if f.Value.Type() != "stringSlice" { - return +func (c *runCommand) startTracing() error { + if c.opts.CPUProfilePath != "" { + f, err := os.Create(c.opts.CPUProfilePath) + if err != nil { + return fmt.Errorf("can't create file %s: %w", c.opts.CPUProfilePath, err) + } + if err := pprof.StartCPUProfile(f); err != nil { + return fmt.Errorf("can't start CPU profiling: %w", err) + } + } + + if c.opts.MemProfilePath != "" { + if rate := os.Getenv(envMemProfileRate); rate != "" { + runtime.MemProfileRate, _ = strconv.Atoi(rate) } + } - s, err := fs.GetStringSlice(f.Name) + if c.opts.TracePath != "" { + f, err := os.Create(c.opts.TracePath) if err != nil { - return + return fmt.Errorf("can't create file %s: %w", c.opts.TracePath, err) } + if err = trace.Start(f); err != nil { + return fmt.Errorf("can't start tracing: %w", err) + } + } + + return nil +} - if s == nil { // assume that every string slice flag has nil as the default - return +func (c *runCommand) stopTracing() error { + if c.opts.CPUProfilePath != "" { + pprof.StopCPUProfile() + } + + if c.opts.MemProfilePath != "" { + f, err := os.Create(c.opts.MemProfilePath) + if err != nil { + return fmt.Errorf("can't create file %s: %w", c.opts.MemProfilePath, err) } - var safe []string - for _, v := range s { - // add quotes to escape comma because spf13/pflag use a CSV parser: - // https://github.com/spf13/pflag/blob/85dd5c8bc61cfa382fecd072378089d4e856579d/string_slice.go#L43 - safe = append(safe, `"`+v+`"`) + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + printMemStats(&ms, c.log) + + if err := pprof.WriteHeapProfile(f); err != nil { + return fmt.Errorf("can't write heap profile: %w", err) } + _ = f.Close() + } + + if c.opts.TracePath != "" { + trace.Stop() + } - // calling Set sets Changed to true: next Set calls will append, not overwrite - _ = f.Value.Set(strings.Join(safe, ",")) - }) + return nil } -func (e *Executor) runAnalysis(ctx context.Context, args []string) ([]result.Issue, error) { - e.cfg.Run.Args = args +func (c *runCommand) runAndPrint(ctx context.Context, args []string) error { + if err := c.goenv.Discover(ctx); err != nil { + c.log.Warnf("Failed to discover go env: %s", err) + } - lintersToRun, err := e.EnabledLintersSet.GetOptimizedLinters() + if !logutils.HaveDebugTag(logutils.DebugKeyLintersOutput) { + // Don't allow linters and loader to print anything + log.SetOutput(io.Discard) + savedStdout, savedStderr := c.setOutputToDevNull() + defer func() { + os.Stdout, os.Stderr = savedStdout, savedStderr + }() + } + + enabledLintersMap, err := c.dbManager.GetEnabledLintersMap() if err != nil { - return nil, err + return err } - enabledLintersMap, err := e.EnabledLintersSet.GetEnabledLintersMap() + c.printDeprecatedLinterMessages(enabledLintersMap) + + issues, err := c.runAnalysis(ctx, args) if err != nil { - return nil, err + return err // XXX: don't lose type } - for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + // Fills linters information for the JSON printer. + for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { isEnabled := enabledLintersMap[lc.Name()] != nil - e.reportData.AddLinter(lc.Name(), isEnabled, lc.EnabledByDefault) + c.reportData.AddLinter(lc.Name(), isEnabled, lc.EnabledByDefault) } - lintCtx, err := e.contextLoader.Load(ctx, lintersToRun) + err = c.printer.Print(issues) if err != nil { - return nil, errors.Wrap(err, "context loading failed") + return err } - lintCtx.Log = e.log.Child("linters context") - runner, err := lint.NewRunner(e.cfg, e.log.Child("runner"), - e.goenv, e.EnabledLintersSet, e.lineCache, e.DBManager, lintCtx.Packages) + c.printStats(issues) + + c.setExitCodeIfIssuesFound(issues) + + c.fileCache.PrintStats(c.log) + + return nil +} + +// runAnalysis executes the linters that have been enabled in the configuration. +func (c *runCommand) runAnalysis(ctx context.Context, args []string) ([]result.Issue, error) { + lintersToRun, err := c.dbManager.GetOptimizedLinters() if err != nil { return nil, err } - issues, err := runner.Run(ctx, lintersToRun, lintCtx) + lintCtx, err := c.contextBuilder.Build(ctx, c.log.Child(logutils.DebugKeyLintersContext), lintersToRun) + if err != nil { + return nil, fmt.Errorf("context loading failed: %w", err) + } + + runner, err := lint.NewRunner(c.log.Child(logutils.DebugKeyRunner), c.cfg, args, + c.goenv, c.lineCache, c.fileCache, c.dbManager, lintCtx) if err != nil { return nil, err } - fixer := processors.NewFixer(e.cfg, e.log, e.fileCache) - return fixer.Process(issues), nil + return runner.Run(ctx, lintersToRun) } -func (e *Executor) setOutputToDevNull() (savedStdout, savedStderr *os.File) { +func (c *runCommand) setOutputToDevNull() (savedStdout, savedStderr *os.File) { savedStdout, savedStderr = os.Stdout, os.Stderr devNull, err := os.Open(os.DevNull) if err != nil { - e.log.Warnf("Can't open null device %q: %s", os.DevNull, err) + c.log.Warnf("Can't open null device %q: %s", os.DevNull, err) return } @@ -375,137 +401,116 @@ func (e *Executor) setOutputToDevNull() (savedStdout, savedStderr *os.File) { return } -func (e *Executor) setExitCodeIfIssuesFound(issues []result.Issue) { +func (c *runCommand) setExitCodeIfIssuesFound(issues []result.Issue) { if len(issues) != 0 { - e.exitCode = e.cfg.Run.ExitCodeIfIssuesFound + c.exitCode = c.cfg.Run.ExitCodeIfIssuesFound } } -func (e *Executor) runAndPrint(ctx context.Context, args []string) error { - if err := e.goenv.Discover(ctx); err != nil { - e.log.Warnf("Failed to discover go env: %s", err) +func (c *runCommand) printDeprecatedLinterMessages(enabledLinters map[string]*linter.Config) { + if c.cfg.InternalCmdTest { + return } - if !logutils.HaveDebugTag("linters_output") { - // Don't allow linters and loader to print anything - log.SetOutput(ioutil.Discard) - savedStdout, savedStderr := e.setOutputToDevNull() - defer func() { - os.Stdout, os.Stderr = savedStdout, savedStderr - }() - } + for name, lc := range enabledLinters { + if !lc.IsDeprecated() { + continue + } - issues, err := e.runAnalysis(ctx, args) - if err != nil { - return err // XXX: don't loose type + var extra string + if lc.Deprecation.Replacement != "" { + extra = fmt.Sprintf("Replaced by %s.", lc.Deprecation.Replacement) + } + + c.log.Warnf("The linter '%s' is deprecated (since %s) due to: %s %s", name, lc.Deprecation.Since, lc.Deprecation.Message, extra) } +} - p, err := e.createPrinter() - if err != nil { - return err +func (c *runCommand) printStats(issues []result.Issue) { + if !c.cfg.Output.ShowStats { + return } - e.setExitCodeIfIssuesFound(issues) + if len(issues) == 0 { + c.cmd.Println("0 issues.") + return + } - if err = p.Print(ctx, issues); err != nil { - return fmt.Errorf("can't print %d issues: %s", len(issues), err) + stats := map[string]int{} + for idx := range issues { + stats[issues[idx].FromLinter]++ } - e.fileCache.PrintStats(e.log) + c.cmd.Printf("%d issues:\n", len(issues)) - return nil -} + keys := maps.Keys(stats) + sort.Strings(keys) -func (e *Executor) createPrinter() (printers.Printer, error) { - var p printers.Printer - format := e.cfg.Output.Format - switch format { - case config.OutFormatJSON: - p = printers.NewJSON(&e.reportData) - case config.OutFormatColoredLineNumber, config.OutFormatLineNumber: - p = printers.NewText(e.cfg.Output.PrintIssuedLine, - format == config.OutFormatColoredLineNumber, e.cfg.Output.PrintLinterName, - e.log.Child("text_printer")) - case config.OutFormatTab: - p = printers.NewTab(e.cfg.Output.PrintLinterName, e.log.Child("tab_printer")) - case config.OutFormatCheckstyle: - p = printers.NewCheckstyle() - case config.OutFormatCodeClimate: - p = printers.NewCodeClimate() - case config.OutFormatHTML: - p = printers.NewHTML() - case config.OutFormatJunitXML: - p = printers.NewJunitXML() - case config.OutFormatGithubActions: - p = printers.NewGithub() - default: - return nil, fmt.Errorf("unknown output format %s", format) - } - - return p, nil + for _, key := range keys { + c.cmd.Printf("* %s: %d\n", key, stats[key]) + } } -func (e *Executor) executeRun(_ *cobra.Command, args []string) { - needTrackResources := e.cfg.Run.IsVerbose || e.cfg.Run.PrintResourcesUsage - trackResourcesEndCh := make(chan struct{}) - defer func() { // XXX: this defer must be before ctx.cancel defer - if needTrackResources { // wait until resource tracking finished to print properly - <-trackResourcesEndCh - } - }() - - e.setTimeoutToDeadlineIfOnlyDeadlineIsSet() - ctx, cancel := context.WithTimeout(context.Background(), e.cfg.Run.Timeout) - defer cancel() - - if needTrackResources { - go watchResources(ctx, trackResourcesEndCh, e.log, e.debugf) +func (c *runCommand) setupExitCode(ctx context.Context) { + if ctx.Err() != nil { + c.exitCode = exitcodes.Timeout + c.log.Errorf("Timeout exceeded: try increasing it by passing --timeout option") + return } - if err := e.runAndPrint(ctx, args); err != nil { - e.log.Errorf("Running error: %s", err) - if e.exitCode == exitcodes.Success { - if exitErr, ok := errors.Cause(err).(*exitcodes.ExitError); ok { - e.exitCode = exitErr.Code - } else { - e.exitCode = exitcodes.Failure - } - } + if c.exitCode != exitcodes.Success { + return } - e.setupExitCode(ctx) -} + needFailOnWarnings := os.Getenv(logutils.EnvTestRun) == "1" || os.Getenv(envFailOnWarnings) == "1" + if needFailOnWarnings && len(c.reportData.Warnings) != 0 { + c.exitCode = exitcodes.WarningInTest + return + } -// to be removed when deadline is finally decommissioned -func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() { - // nolint:staticcheck - deadlineValue := e.cfg.Run.Deadline - if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout { - e.cfg.Run.Timeout = deadlineValue + if c.reportData.Error != "" { + // it's a case e.g. when typecheck linter couldn't parse and error and just logged it + c.exitCode = exitcodes.ErrorWasLogged + return } } -func (e *Executor) setupExitCode(ctx context.Context) { - if ctx.Err() != nil { - e.exitCode = exitcodes.Timeout - e.log.Errorf("Timeout exceeded: try increasing it by passing --timeout option") - return +func (c *runCommand) acquireFileLock() bool { + if c.cfg.Run.AllowParallelRunners { + c.debugf("Parallel runners are allowed, no locking") + return true } - if e.exitCode != exitcodes.Success { - return + lockFile := filepath.Join(os.TempDir(), "golangci-lint.lock") + c.debugf("Locking on file %s...", lockFile) + f := flock.New(lockFile) + const retryDelay = time.Second + + ctx := context.Background() + if !c.cfg.Run.AllowSerialRunners { + const totalTimeout = 5 * time.Second + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, totalTimeout) + defer cancel() + } + if ok, _ := f.TryLockContext(ctx, retryDelay); !ok { + return false } - needFailOnWarnings := (os.Getenv("GL_TEST_RUN") == "1" || os.Getenv("FAIL_ON_WARNINGS") == "1") - if needFailOnWarnings && len(e.reportData.Warnings) != 0 { - e.exitCode = exitcodes.WarningInTest + c.flock = f + return true +} + +func (c *runCommand) releaseFileLock() { + if c.cfg.Run.AllowParallelRunners { return } - if e.reportData.Error != "" { - // it's a case e.g. when typecheck linter couldn't parse and error and just logged it - e.exitCode = exitcodes.ErrorWasLogged - return + if err := c.flock.Unlock(); err != nil { + c.debugf("Failed to unlock on file: %s", err) + } + if err := os.Remove(c.flock.Path()); err != nil { + c.debugf("Failed to remove lock file: %s", err) } } @@ -520,7 +525,7 @@ func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log ticker := time.NewTicker(intervalMS * time.Millisecond) defer ticker.Stop() - logEveryRecord := os.Getenv("GL_MEM_LOG_EVERY") == "1" + logEveryRecord := os.Getenv(envMemLogEvery) == "1" const MB = 1024 * 1024 track := func() { @@ -564,3 +569,120 @@ func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log logger.Infof("Execution took %s", time.Since(startedAt)) close(done) } + +func setupConfigFileFlagSet(fs *pflag.FlagSet, cfg *config.LoaderOptions) { + fs.StringVarP(&cfg.Config, "config", "c", "", color.GreenString("Read config from file path `PATH`")) + fs.BoolVar(&cfg.NoConfig, "no-config", false, color.GreenString("Don't read config file")) +} + +func setupRunPersistentFlags(fs *pflag.FlagSet, opts *runOptions) { + fs.BoolVar(&opts.PrintResourcesUsage, "print-resources-usage", false, + color.GreenString("Print avg and max memory usage of golangci-lint and total time")) + + fs.StringVar(&opts.CPUProfilePath, "cpu-profile-path", "", color.GreenString("Path to CPU profile output file")) + fs.StringVar(&opts.MemProfilePath, "mem-profile-path", "", color.GreenString("Path to memory profile output file")) + fs.StringVar(&opts.TracePath, "trace-path", "", color.GreenString("Path to trace output file")) +} + +func getDefaultConcurrency() int { + if os.Getenv(envHelpRun) == "1" { + // Make stable concurrency for generating help documentation. + const prettyConcurrency = 8 + return prettyConcurrency + } + + return runtime.NumCPU() +} + +func printMemStats(ms *runtime.MemStats, logger logutils.Log) { + logger.Infof("Mem stats: alloc=%s total_alloc=%s sys=%s "+ + "heap_alloc=%s heap_sys=%s heap_idle=%s heap_released=%s heap_in_use=%s "+ + "stack_in_use=%s stack_sys=%s "+ + "mspan_sys=%s mcache_sys=%s buck_hash_sys=%s gc_sys=%s other_sys=%s "+ + "mallocs_n=%d frees_n=%d heap_objects_n=%d gc_cpu_fraction=%.2f", + formatMemory(ms.Alloc), formatMemory(ms.TotalAlloc), formatMemory(ms.Sys), + formatMemory(ms.HeapAlloc), formatMemory(ms.HeapSys), + formatMemory(ms.HeapIdle), formatMemory(ms.HeapReleased), formatMemory(ms.HeapInuse), + formatMemory(ms.StackInuse), formatMemory(ms.StackSys), + formatMemory(ms.MSpanSys), formatMemory(ms.MCacheSys), formatMemory(ms.BuckHashSys), + formatMemory(ms.GCSys), formatMemory(ms.OtherSys), + ms.Mallocs, ms.Frees, ms.HeapObjects, ms.GCCPUFraction) +} + +func formatMemory(memBytes uint64) string { + const Kb = 1024 + const Mb = Kb * 1024 + + if memBytes < Kb { + return fmt.Sprintf("%db", memBytes) + } + if memBytes < Mb { + return fmt.Sprintf("%dkb", memBytes/Kb) + } + return fmt.Sprintf("%dmb", memBytes/Mb) +} + +// Related to cache. + +func initHashSalt(version string, cfg *config.Config) error { + binSalt, err := computeBinarySalt(version) + if err != nil { + return fmt.Errorf("failed to calculate binary salt: %w", err) + } + + configSalt, err := computeConfigSalt(cfg) + if err != nil { + return fmt.Errorf("failed to calculate config salt: %w", err) + } + + b := bytes.NewBuffer(binSalt) + b.Write(configSalt) + cache.SetSalt(b.Bytes()) + return nil +} + +func computeBinarySalt(version string) ([]byte, error) { + if version != "" && version != "(devel)" { + return []byte(version), nil + } + + if logutils.HaveDebugTag(logutils.DebugKeyBinSalt) { + return []byte("debug"), nil + } + + p, err := os.Executable() + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +// computeConfigSalt computes configuration hash. +// We don't hash all config fields to reduce meaningless cache invalidations. +// At least, it has a huge impact on tests speed. +// Fields: `LintersSettings` and `Run.BuildTags`. +func computeConfigSalt(cfg *config.Config) ([]byte, error) { + lintersSettingsBytes, err := yaml.Marshal(cfg.LintersSettings) + if err != nil { + return nil, fmt.Errorf("failed to json marshal config linter settings: %w", err) + } + + configData := bytes.NewBufferString("linters-settings=") + configData.Write(lintersSettingsBytes) + configData.WriteString("\nbuild-tags=%s" + strings.Join(cfg.Run.BuildTags, ",")) + + h := sha256.New() + if _, err := h.Write(configData.Bytes()); err != nil { + return nil, err + } + return h.Sum(nil), nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go index 8b48e515bf..a03e46e221 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go @@ -3,58 +3,96 @@ package commands import ( "encoding/json" "fmt" + "io" + "os" + "runtime/debug" "strings" + "github.com/fatih/color" "github.com/spf13/cobra" - "github.com/spf13/pflag" - - "github.com/golangci/golangci-lint/pkg/config" ) -type jsonVersion struct { - Version string `json:"version"` - Commit string `json:"commit"` - Date string `json:"date"` +type BuildInfo struct { + GoVersion string `json:"goVersion"` + Version string `json:"version"` + Commit string `json:"commit"` + Date string `json:"date"` } -func (e *Executor) initVersionConfiguration(cmd *cobra.Command) { - fs := cmd.Flags() - fs.SortFlags = false // sort them as they are defined here - initVersionFlagSet(fs, e.cfg) +type versionInfo struct { + Info BuildInfo + BuildInfo *debug.BuildInfo +} + +type versionOptions struct { + Format string + Debug bool } -func initVersionFlagSet(fs *pflag.FlagSet, cfg *config.Config) { - // Version config - vc := &cfg.Version - fs.StringVar(&vc.Format, "format", "", wh("The version's format can be: 'short', 'json'")) +type versionCommand struct { + cmd *cobra.Command + opts versionOptions + + info BuildInfo } -func (e *Executor) initVersion() { +func newVersionCommand(info BuildInfo) *versionCommand { + c := &versionCommand{info: info} + versionCmd := &cobra.Command{ - Use: "version", - Short: "Version", - RunE: func(cmd *cobra.Command, _ []string) error { - switch strings.ToLower(e.cfg.Version.Format) { - case "short": - fmt.Println(e.version) - case "json": - ver := jsonVersion{ - Version: e.version, - Commit: e.commit, - Date: e.date, - } - data, err := json.Marshal(&ver) - if err != nil { - return err - } - fmt.Println(string(data)) - default: - fmt.Printf("golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date) - } + Use: "version", + Short: "Version", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: c.execute, + } + + fs := versionCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + fs.StringVar(&c.opts.Format, "format", "", color.GreenString("The version's format can be: 'short', 'json'")) + fs.BoolVar(&c.opts.Debug, "debug", false, color.GreenString("Add build information")) + + c.cmd = versionCmd + + return c +} + +func (c *versionCommand) execute(_ *cobra.Command, _ []string) error { + if c.opts.Debug { + info, ok := debug.ReadBuildInfo() + if !ok { return nil - }, + } + + switch strings.ToLower(c.opts.Format) { + case "json": + return json.NewEncoder(os.Stdout).Encode(versionInfo{ + Info: c.info, + BuildInfo: info, + }) + + default: + fmt.Println(info.String()) + return printVersion(os.Stdout, c.info) + } } - e.rootCmd.AddCommand(versionCmd) - e.initVersionConfiguration(versionCmd) + switch strings.ToLower(c.opts.Format) { + case "short": + fmt.Println(c.info.Version) + return nil + + case "json": + return json.NewEncoder(os.Stdout).Encode(c.info) + + default: + return printVersion(os.Stdout, c.info) + } +} + +func printVersion(w io.Writer, info BuildInfo) error { + _, err := fmt.Fprintf(w, "golangci-lint has version %s built with %s from %s on %s\n", + info.Version, info.GoVersion, info.Commit, info.Date) + return err } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 931ddbbbee..af27a91b55 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -1,20 +1,55 @@ package config +import ( + "os" + "regexp" + "strings" + + hcversion "github.com/hashicorp/go-version" + "github.com/ldez/gomoddirectives" +) + +// Config encapsulates the config data specified in the golangci-lint yaml config file. type Config struct { - Run Run + cfgDir string // The directory containing the golangci-lint config file. - Output Output + Run Run `mapstructure:"run"` + + Output Output `mapstructure:"output"` LintersSettings LintersSettings `mapstructure:"linters-settings"` - Linters Linters - Issues Issues - Severity Severity - Version Version + Linters Linters `mapstructure:"linters"` + Issues Issues `mapstructure:"issues"` + Severity Severity `mapstructure:"severity"` - InternalCmdTest bool `mapstructure:"internal-cmd-test"` // Option is used only for testing golangci-lint command, don't use it + InternalCmdTest bool // Option is used only for testing golangci-lint command, don't use it InternalTest bool // Option is used only for testing golangci-lint code, don't use it } +// GetConfigDir returns the directory that contains golangci config file. +func (c *Config) GetConfigDir() string { + return c.cfgDir +} + +func (c *Config) Validate() error { + validators := []func() error{ + c.Issues.Validate, + c.Severity.Validate, + c.LintersSettings.Validate, + c.Linters.Validate, + c.Output.Validate, + c.Run.Validate, + } + + for _, v := range validators { + if err := v(); err != nil { + return err + } + } + + return nil +} + func NewDefault() *Config { return &Config{ LintersSettings: defaultLintersSettings, @@ -23,4 +58,53 @@ func NewDefault() *Config { type Version struct { Format string `mapstructure:"format"` + Debug bool `mapstructure:"debug"` +} + +func IsGoGreaterThanOrEqual(current, limit string) bool { + v1, err := hcversion.NewVersion(strings.TrimPrefix(current, "go")) + if err != nil { + return false + } + + l, err := hcversion.NewVersion(limit) + if err != nil { + return false + } + + return v1.GreaterThanOrEqual(l) +} + +func detectGoVersion() string { + file, _ := gomoddirectives.GetModuleFile() + + if file != nil && file.Go != nil && file.Go.Version != "" { + return file.Go.Version + } + + v := os.Getenv("GOVERSION") + if v != "" { + return v + } + + return "1.17" +} + +// Trims the Go version to keep only M.m. +// Since Go 1.21 the version inside the go.mod can be a patched version (ex: 1.21.0). +// The version can also include information which we want to remove (ex: 1.21alpha1) +// https://go.dev/doc/toolchain#versions +// This a problem with staticcheck and gocritic. +func trimGoVersion(v string) string { + if v == "" { + return "" + } + + exp := regexp.MustCompile(`(\d\.\d+)(?:\.\d+|[a-z]+\d)`) + + if exp.MatchString(v) { + return exp.FindStringSubmatch(v)[1] + } + + return v } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go index 71bf2a90ef..45424b1793 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" "regexp" ) @@ -54,7 +55,7 @@ var DefaultExcludePatterns = []ExcludePattern{ }, { ID: "EXC0008", - Pattern: "(G104|G307)", + Pattern: "(G104)", Linter: "gosec", Why: "Duplicated errcheck checks", }, @@ -97,7 +98,7 @@ var DefaultExcludePatterns = []ExcludePattern{ }, { ID: "EXC0015", - Pattern: `should have a package comment, unless it's in another file for this package`, + Pattern: `should have a package comment`, Linter: "revive", Why: "Annoying issue about not having a comment. The rare codebase has such comments", }, @@ -108,59 +109,95 @@ type Issues struct { ExcludeCaseSensitive bool `mapstructure:"exclude-case-sensitive"` ExcludePatterns []string `mapstructure:"exclude"` ExcludeRules []ExcludeRule `mapstructure:"exclude-rules"` + ExcludeGeneratedStrict bool `mapstructure:"exclude-generated-strict"` UseDefaultExcludes bool `mapstructure:"exclude-use-default"` + ExcludeFiles []string `mapstructure:"exclude-files"` + ExcludeDirs []string `mapstructure:"exclude-dirs"` + UseDefaultExcludeDirs bool `mapstructure:"exclude-dirs-use-default"` + MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` MaxSameIssues int `mapstructure:"max-same-issues"` DiffFromRevision string `mapstructure:"new-from-rev"` DiffPatchFilePath string `mapstructure:"new-from-patch"` + WholeFiles bool `mapstructure:"whole-files"` Diff bool `mapstructure:"new"` NeedFix bool `mapstructure:"fix"` } +func (i *Issues) Validate() error { + for i, rule := range i.ExcludeRules { + if err := rule.Validate(); err != nil { + return fmt.Errorf("error in exclude rule #%d: %w", i, err) + } + } + + return nil +} + type ExcludeRule struct { BaseRule `mapstructure:",squash"` } -func (e ExcludeRule) Validate() error { +func (e *ExcludeRule) Validate() error { return e.BaseRule.Validate(excludeRuleMinConditionsCount) } type BaseRule struct { - Linters []string - Path string - Text string - Source string + Linters []string + Path string + PathExcept string `mapstructure:"path-except"` + Text string + Source string } -func (b BaseRule) Validate(minConditionsCount int) error { +func (b *BaseRule) Validate(minConditionsCount int) error { if err := validateOptionalRegex(b.Path); err != nil { - return fmt.Errorf("invalid path regex: %v", err) + return fmt.Errorf("invalid path regex: %w", err) } + + if err := validateOptionalRegex(b.PathExcept); err != nil { + return fmt.Errorf("invalid path-except regex: %w", err) + } + if err := validateOptionalRegex(b.Text); err != nil { - return fmt.Errorf("invalid text regex: %v", err) + return fmt.Errorf("invalid text regex: %w", err) } + if err := validateOptionalRegex(b.Source); err != nil { - return fmt.Errorf("invalid source regex: %v", err) + return fmt.Errorf("invalid source regex: %w", err) + } + + if b.Path != "" && b.PathExcept != "" { + return errors.New("path and path-except should not be set at the same time") } + nonBlank := 0 if len(b.Linters) > 0 { nonBlank++ } - if b.Path != "" { + + // Filtering by path counts as one condition, regardless how it is done (one or both). + // Otherwise, a rule with Path and PathExcept set would pass validation + // whereas before the introduction of path-except that wouldn't have been precise enough. + if b.Path != "" || b.PathExcept != "" { nonBlank++ } + if b.Text != "" { nonBlank++ } + if b.Source != "" { nonBlank++ } + if nonBlank < minConditionsCount { - return fmt.Errorf("at least %d of (text, source, path, linters) should be set", minConditionsCount) + return fmt.Errorf("at least %d of (text, source, path[-except], linters) should be set", minConditionsCount) } + return nil } @@ -168,6 +205,7 @@ func validateOptionalRegex(value string) error { if value == "" { return nil } + _, err := regexp.Compile(value) return err } @@ -187,15 +225,16 @@ func GetDefaultExcludePatternsStrings() []string { return ret } +// TODO(ldez): this behavior must be changed in v2, because this is confusing. func GetExcludePatterns(include []string) []ExcludePattern { - includeMap := make(map[string]bool, len(include)) + includeMap := make(map[string]struct{}, len(include)) for _, inc := range include { - includeMap[inc] = true + includeMap[inc] = struct{}{} } var ret []ExcludePattern for _, p := range DefaultExcludePatterns { - if !includeMap[p.ID] { + if _, ok := includeMap[p.ID]; !ok { ret = append(ret, p) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go index ccbdc123a1..5c2628272c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go @@ -1,5 +1,10 @@ package config +import ( + "errors" + "fmt" +) + type Linters struct { Enable []string Disable []string @@ -9,3 +14,52 @@ type Linters struct { Presets []string } + +func (l *Linters) Validate() error { + if err := l.validateAllDisableEnableOptions(); err != nil { + return err + } + + if err := l.validateDisabledAndEnabledAtOneMoment(); err != nil { + return err + } + + return nil +} + +func (l *Linters) validateAllDisableEnableOptions() error { + if l.EnableAll && l.DisableAll { + return errors.New("--enable-all and --disable-all options must not be combined") + } + + if l.DisableAll { + if len(l.Enable) == 0 && len(l.Presets) == 0 { + return errors.New("all linters were disabled, but no one linter was enabled: must enable at least one") + } + + if len(l.Disable) != 0 { + return errors.New("can't combine options --disable-all and --disable") + } + } + + if l.EnableAll && len(l.Enable) != 0 && !l.Fast { + return errors.New("can't combine options --enable-all and --enable") + } + + return nil +} + +func (l *Linters) validateDisabledAndEnabledAtOneMoment() error { + enabledLintersSet := map[string]bool{} + for _, name := range l.Enable { + enabledLintersSet[name] = true + } + + for _, name := range l.Disable { + if enabledLintersSet[name] { + return fmt.Errorf("linter %q can't be disabled and enabled at one moment", name) + } + } + + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go index 3ee1854f6a..6097be06b1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -1,143 +1,322 @@ package config -import "github.com/pkg/errors" +import ( + "encoding" + "errors" + "fmt" + "runtime" + + "gopkg.in/yaml.v3" +) var defaultLintersSettings = LintersSettings{ + Asasalint: AsasalintSettings{ + UseBuiltinExclusions: true, + }, + Decorder: DecorderSettings{ + DecOrder: []string{"type", "const", "var", "func"}, + DisableDecNumCheck: true, + DisableDecOrderCheck: true, + DisableInitFuncFirstCheck: true, + }, + Dogsled: DogsledSettings{ + MaxBlankIdentifiers: 2, + }, + Dupl: DuplSettings{ + Threshold: 150, + }, + Errcheck: ErrcheckSettings{ + Ignore: "fmt:.*", + }, + ErrorLint: ErrorLintSettings{ + Errorf: true, + ErrorfMulti: true, + Asserts: true, + Comparison: true, + }, + Exhaustive: ExhaustiveSettings{ + Check: []string{"switch"}, + CheckGenerated: false, + DefaultSignifiesExhaustive: false, + IgnoreEnumMembers: "", + PackageScopeOnly: false, + ExplicitExhaustiveMap: false, + ExplicitExhaustiveSwitch: false, + }, + Forbidigo: ForbidigoSettings{ + ExcludeGodocExamples: true, + }, + Gci: GciSettings{ + Sections: []string{"standard", "default"}, + SkipGenerated: true, + }, + Gocognit: GocognitSettings{ + MinComplexity: 30, + }, + Goconst: GoConstSettings{ + MatchWithConstants: true, + MinStringLen: 3, + MinOccurrencesCount: 3, + NumberMin: 3, + NumberMax: 3, + IgnoreCalls: true, + }, + Gocritic: GoCriticSettings{ + SettingsPerCheck: map[string]GoCriticCheckSettings{}, + }, + Gocyclo: GoCycloSettings{ + MinComplexity: 30, + }, + Godox: GodoxSettings{ + Keywords: []string{}, + }, + Godot: GodotSettings{ + Scope: "declarations", + Period: true, + }, + Gofmt: GoFmtSettings{ + Simplify: true, + }, + Gofumpt: GofumptSettings{ + LangVersion: "", + ModulePath: "", + ExtraRules: false, + }, + Gosec: GoSecSettings{ + Concurrency: runtime.NumCPU(), + }, + Gosmopolitan: GosmopolitanSettings{ + AllowTimeLocal: false, + EscapeHatches: []string{}, + IgnoreTests: true, + WatchForScripts: []string{"Han"}, + }, + Inamedparam: INamedParamSettings{ + SkipSingleParam: false, + }, + InterfaceBloat: InterfaceBloatSettings{ + Max: 10, + }, Lll: LllSettings{ LineLength: 120, TabWidth: 1, }, - Unparam: UnparamSettings{ - Algo: "cha", + LoggerCheck: LoggerCheckSettings{ + Kitlog: true, + Klog: true, + Logr: true, + Zap: true, + RequireStringKey: false, + NoPrintfLike: false, + Rules: nil, + }, + MaintIdx: MaintIdxSettings{ + Under: 20, }, Nakedret: NakedretSettings{ MaxFuncLines: 30, }, + Nestif: NestifSettings{ + MinComplexity: 5, + }, + NoLintLint: NoLintLintSettings{ + RequireExplanation: false, + RequireSpecific: false, + AllowUnused: false, + }, + PerfSprint: PerfSprintSettings{ + IntConversion: true, + ErrError: false, + ErrorF: true, + SprintF1: true, + StrConcat: true, + }, Prealloc: PreallocSettings{ Simple: true, RangeLoops: true, ForLoops: false, }, - Gocritic: GocriticSettings{ - SettingsPerCheck: map[string]GocriticCheckSettings{}, + Predeclared: PredeclaredSettings{ + Ignore: "", + Qualified: false, }, - Godox: GodoxSettings{ - Keywords: []string{}, + SlogLint: SlogLintSettings{ + NoMixedArgs: true, + KVOnly: false, + AttrOnly: false, + ContextOnly: false, + StaticMsg: false, + NoRawKeys: false, + KeyNamingCase: "", + ArgsOnSepLines: false, }, - Dogsled: DogsledSettings{ - MaxBlankIdentifiers: 2, + TagAlign: TagAlignSettings{ + Align: true, + Sort: true, + Order: nil, + Strict: false, }, - Gocognit: GocognitSettings{ - MinComplexity: 30, + Testpackage: TestpackageSettings{ + SkipRegexp: `(export|internal)_test\.go`, + AllowPackages: []string{"main"}, + }, + Unparam: UnparamSettings{ + Algo: "cha", + }, + Unused: UnusedSettings{ + FieldWritesAreUses: true, + PostStatementsAreReads: false, + ExportedIsUsed: true, + ExportedFieldsAreUsed: true, + ParametersAreUsed: true, + LocalVariablesAreUsed: true, + GeneratedIsUsed: true, + }, + UseStdlibVars: UseStdlibVarsSettings{ + HTTPMethod: true, + HTTPStatusCode: true, + }, + Varnamelen: VarnamelenSettings{ + MaxDistance: 5, + MinNameLength: 3, }, WSL: WSLSettings{ StrictAppend: true, AllowAssignAndCallCuddle: true, AllowAssignAndAnythingCuddle: false, AllowMultiLineAssignCuddle: true, - AllowCuddleDeclaration: false, + ForceCaseTrailingWhitespaceLimit: 0, AllowTrailingComment: false, AllowSeparatedLeadingComment: false, + AllowCuddleDeclaration: false, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, ForceCuddleErrCheckAndAssign: false, + ErrorVariableNames: []string{"err"}, ForceExclusiveShortDeclarations: false, - ForceCaseTrailingWhitespaceLimit: 0, - }, - NoLintLint: NoLintLintSettings{ - RequireExplanation: false, - AllowLeadingSpace: true, - RequireSpecific: false, - AllowUnused: false, - }, - Testpackage: TestpackageSettings{ - SkipRegexp: `(export|internal)_test\.go`, - }, - Nestif: NestifSettings{ - MinComplexity: 5, - }, - Exhaustive: ExhaustiveSettings{ - CheckGenerated: false, - DefaultSignifiesExhaustive: false, - }, - Gofumpt: GofumptSettings{ - ExtraRules: false, - }, - ErrorLint: ErrorLintSettings{ - Errorf: true, - Asserts: true, - Comparison: true, - }, - Ifshort: IfshortSettings{ - MaxDeclLines: 1, - MaxDeclChars: 30, - }, - Predeclared: PredeclaredSettings{ - Ignore: "", - Qualified: false, - }, - Forbidigo: ForbidigoSettings{ - ExcludeGodocExamples: true, }, } type LintersSettings struct { - Cyclop Cyclop - Depguard DepGuardSettings - Dogsled DogsledSettings - Dupl DuplSettings - Errcheck ErrcheckSettings - ErrorLint ErrorLintSettings - Exhaustive ExhaustiveSettings - ExhaustiveStruct ExhaustiveStructSettings - Forbidigo ForbidigoSettings - Funlen FunlenSettings - Gci GciSettings - Gocognit GocognitSettings - Goconst GoConstSettings - Gocritic GocriticSettings - Gocyclo GoCycloSettings - Godot GodotSettings - Godox GodoxSettings - Gofmt GoFmtSettings - Gofumpt GofumptSettings - Goheader GoHeaderSettings - Goimports GoImportsSettings - Golint GoLintSettings - Gomnd GoMndSettings - GoModDirectives GoModDirectivesSettings - Gomodguard GoModGuardSettings - Gosec GoSecSettings - Gosimple StaticCheckSettings - Govet GovetSettings - Ifshort IfshortSettings - ImportAs ImportAsSettings - Lll LllSettings - Makezero MakezeroSettings - Maligned MalignedSettings - Misspell MisspellSettings - Nakedret NakedretSettings - Nestif NestifSettings - NoLintLint NoLintLintSettings - Prealloc PreallocSettings - Predeclared PredeclaredSettings - Promlinter PromlinterSettings - Revive ReviveSettings - RowsErrCheck RowsErrCheckSettings - Staticcheck StaticCheckSettings - Structcheck StructCheckSettings - Stylecheck StaticCheckSettings - Tagliatelle TagliatelleSettings - Testpackage TestpackageSettings - Thelper ThelperSettings - Unparam UnparamSettings - Unused StaticCheckSettings - Varcheck VarCheckSettings - Whitespace WhitespaceSettings - Wrapcheck WrapcheckSettings - WSL WSLSettings + Asasalint AsasalintSettings + BiDiChk BiDiChkSettings + CopyLoopVar CopyLoopVarSettings + Cyclop Cyclop + Decorder DecorderSettings + Depguard DepGuardSettings + Dogsled DogsledSettings + Dupl DuplSettings + DupWord DupWordSettings + Errcheck ErrcheckSettings + ErrChkJSON ErrChkJSONSettings + ErrorLint ErrorLintSettings + Exhaustive ExhaustiveSettings + Exhaustruct ExhaustructSettings + Forbidigo ForbidigoSettings + Funlen FunlenSettings + Gci GciSettings + GinkgoLinter GinkgoLinterSettings + Gocognit GocognitSettings + Goconst GoConstSettings + Gocritic GoCriticSettings + Gocyclo GoCycloSettings + Godot GodotSettings + Godox GodoxSettings + Gofmt GoFmtSettings + Gofumpt GofumptSettings + Goheader GoHeaderSettings + Goimports GoImportsSettings + Gomnd GoMndSettings + GoModDirectives GoModDirectivesSettings + Gomodguard GoModGuardSettings + Gosec GoSecSettings + Gosimple StaticCheckSettings + Gosmopolitan GosmopolitanSettings + Govet GovetSettings + Grouper GrouperSettings + ImportAs ImportAsSettings + Inamedparam INamedParamSettings + InterfaceBloat InterfaceBloatSettings + Ireturn IreturnSettings + Lll LllSettings + LoggerCheck LoggerCheckSettings + MaintIdx MaintIdxSettings + Makezero MakezeroSettings + Misspell MisspellSettings + MustTag MustTagSettings + Nakedret NakedretSettings + Nestif NestifSettings + NilNil NilNilSettings + Nlreturn NlreturnSettings + NoLintLint NoLintLintSettings + NoNamedReturns NoNamedReturnsSettings + ParallelTest ParallelTestSettings + PerfSprint PerfSprintSettings + Prealloc PreallocSettings + Predeclared PredeclaredSettings + Promlinter PromlinterSettings + ProtoGetter ProtoGetterSettings + Reassign ReassignSettings + Revive ReviveSettings + RowsErrCheck RowsErrCheckSettings + SlogLint SlogLintSettings + Spancheck SpancheckSettings + Staticcheck StaticCheckSettings + Stylecheck StaticCheckSettings + TagAlign TagAlignSettings + Tagliatelle TagliatelleSettings + Tenv TenvSettings + Testifylint TestifylintSettings + Testpackage TestpackageSettings + Thelper ThelperSettings + Unconvert UnconvertSettings + Unparam UnparamSettings + Unused UnusedSettings + UseStdlibVars UseStdlibVarsSettings + Varnamelen VarnamelenSettings + Whitespace WhitespaceSettings + Wrapcheck WrapcheckSettings + WSL WSLSettings Custom map[string]CustomLinterSettings } +func (s *LintersSettings) Validate() error { + if err := s.Govet.Validate(); err != nil { + return err + } + + for name, settings := range s.Custom { + if err := settings.Validate(); err != nil { + return fmt.Errorf("custom linter %q: %w", name, err) + } + } + + return nil +} + +type AsasalintSettings struct { + Exclude []string `mapstructure:"exclude"` + UseBuiltinExclusions bool `mapstructure:"use-builtin-exclusions"` + IgnoreTest bool `mapstructure:"ignore-test"` +} + +type BiDiChkSettings struct { + LeftToRightEmbedding bool `mapstructure:"left-to-right-embedding"` + RightToLeftEmbedding bool `mapstructure:"right-to-left-embedding"` + PopDirectionalFormatting bool `mapstructure:"pop-directional-formatting"` + LeftToRightOverride bool `mapstructure:"left-to-right-override"` + RightToLeftOverride bool `mapstructure:"right-to-left-override"` + LeftToRightIsolate bool `mapstructure:"left-to-right-isolate"` + RightToLeftIsolate bool `mapstructure:"right-to-left-isolate"` + FirstStrongIsolate bool `mapstructure:"first-strong-isolate"` + PopDirectionalIsolate bool `mapstructure:"pop-directional-isolate"` +} + +type CopyLoopVarSettings struct { + IgnoreAlias bool `mapstructure:"ignore-alias"` +} + type Cyclop struct { MaxComplexity int `mapstructure:"max-complexity"` PackageAverage float64 `mapstructure:"package-average"` @@ -145,10 +324,30 @@ type Cyclop struct { } type DepGuardSettings struct { - ListType string `mapstructure:"list-type"` - Packages []string - IncludeGoRoot bool `mapstructure:"include-go-root"` - PackagesWithErrorMessage map[string]string `mapstructure:"packages-with-error-message"` + Rules map[string]*DepGuardList `mapstructure:"rules"` +} + +type DepGuardList struct { + ListMode string `mapstructure:"list-mode"` + Files []string `mapstructure:"files"` + Allow []string `mapstructure:"allow"` + Deny []DepGuardDeny `mapstructure:"deny"` +} + +type DepGuardDeny struct { + Pkg string `mapstructure:"pkg"` + Desc string `mapstructure:"desc"` +} + +type DecorderSettings struct { + DecOrder []string `mapstructure:"dec-order"` + IgnoreUnderscoreVars bool `mapstructure:"ignore-underscore-vars"` + DisableDecNumCheck bool `mapstructure:"disable-dec-num-check"` + DisableTypeDecNumCheck bool `mapstructure:"disable-type-dec-num-check"` + DisableConstDecNumCheck bool `mapstructure:"disable-const-dec-num-check"` + DisableVarDecNumCheck bool `mapstructure:"disable-var-dec-num-check"` + DisableDecOrderCheck bool `mapstructure:"disable-dec-order-check"` + DisableInitFuncFirstCheck bool `mapstructure:"disable-init-func-first-check"` } type DogsledSettings struct { @@ -159,40 +358,118 @@ type DuplSettings struct { Threshold int } +type DupWordSettings struct { + Keywords []string `mapstructure:"keywords"` + Ignore []string `mapstructure:"ignore"` +} + type ErrcheckSettings struct { - CheckTypeAssertions bool `mapstructure:"check-type-assertions"` - CheckAssignToBlank bool `mapstructure:"check-blank"` - Ignore string `mapstructure:"ignore"` - Exclude string `mapstructure:"exclude"` + DisableDefaultExclusions bool `mapstructure:"disable-default-exclusions"` + CheckTypeAssertions bool `mapstructure:"check-type-assertions"` + CheckAssignToBlank bool `mapstructure:"check-blank"` + Ignore string `mapstructure:"ignore"` + ExcludeFunctions []string `mapstructure:"exclude-functions"` + + // Deprecated: use ExcludeFunctions instead + Exclude string `mapstructure:"exclude"` +} + +type ErrChkJSONSettings struct { + CheckErrorFreeEncoding bool `mapstructure:"check-error-free-encoding"` + ReportNoExported bool `mapstructure:"report-no-exported"` } type ErrorLintSettings struct { - Errorf bool `mapstructure:"errorf"` - Asserts bool `mapstructure:"asserts"` - Comparison bool `mapstructure:"comparison"` + Errorf bool `mapstructure:"errorf"` + ErrorfMulti bool `mapstructure:"errorf-multi"` + Asserts bool `mapstructure:"asserts"` + Comparison bool `mapstructure:"comparison"` } type ExhaustiveSettings struct { - CheckGenerated bool `mapstructure:"check-generated"` - DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + Check []string `mapstructure:"check"` + CheckGenerated bool `mapstructure:"check-generated"` + DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + IgnoreEnumMembers string `mapstructure:"ignore-enum-members"` + IgnoreEnumTypes string `mapstructure:"ignore-enum-types"` + PackageScopeOnly bool `mapstructure:"package-scope-only"` + ExplicitExhaustiveMap bool `mapstructure:"explicit-exhaustive-map"` + ExplicitExhaustiveSwitch bool `mapstructure:"explicit-exhaustive-switch"` + DefaultCaseRequired bool `mapstructure:"default-case-required"` +} + +type ExhaustructSettings struct { + Include []string `mapstructure:"include"` + Exclude []string `mapstructure:"exclude"` } -type ExhaustiveStructSettings struct { - StructPatterns []string `mapstructure:"struct-patterns"` +type ForbidigoSettings struct { + Forbid []ForbidigoPattern `mapstructure:"forbid"` + ExcludeGodocExamples bool `mapstructure:"exclude-godoc-examples"` + AnalyzeTypes bool `mapstructure:"analyze-types"` } -type ForbidigoSettings struct { - Forbid []string `mapstructure:"forbid"` - ExcludeGodocExamples bool `mapstructure:"exclude-godoc-examples"` +var _ encoding.TextUnmarshaler = &ForbidigoPattern{} + +// ForbidigoPattern corresponds to forbidigo.pattern and adds mapstructure support. +// The YAML field names must match what forbidigo expects. +type ForbidigoPattern struct { + // patternString gets populated when the config contains a string as entry in ForbidigoSettings.Forbid[] + // because ForbidigoPattern implements encoding.TextUnmarshaler + // and the reader uses the mapstructure.TextUnmarshallerHookFunc as decoder hook. + // + // If the entry is a map, then the other fields are set as usual by mapstructure. + patternString string + + Pattern string `yaml:"p" mapstructure:"p"` + Package string `yaml:"pkg,omitempty" mapstructure:"pkg,omitempty"` + Msg string `yaml:"msg,omitempty" mapstructure:"msg,omitempty"` +} + +func (p *ForbidigoPattern) UnmarshalText(text []byte) error { + // Validation happens when instantiating forbidigo. + p.patternString = string(text) + return nil +} + +// MarshalString converts the pattern into a string as needed by forbidigo.NewLinter. +// +// MarshalString is intentionally not called MarshalText, +// although it has the same signature +// because implementing encoding.TextMarshaler led to infinite recursion when yaml.Marshal called MarshalText. +func (p *ForbidigoPattern) MarshalString() ([]byte, error) { + if p.patternString != "" { + return []byte(p.patternString), nil + } + + return yaml.Marshal(p) } type FunlenSettings struct { - Lines int - Statements int + Lines int + Statements int + IgnoreComments bool `mapstructure:"ignore-comments"` } type GciSettings struct { - LocalPrefixes string `mapstructure:"local-prefixes"` + LocalPrefixes string `mapstructure:"local-prefixes"` // Deprecated + Sections []string `mapstructure:"sections"` + SkipGenerated bool `mapstructure:"skip-generated"` + CustomOrder bool `mapstructure:"custom-order"` +} + +type GinkgoLinterSettings struct { + SuppressLenAssertion bool `mapstructure:"suppress-len-assertion"` + SuppressNilAssertion bool `mapstructure:"suppress-nil-assertion"` + SuppressErrAssertion bool `mapstructure:"suppress-err-assertion"` + SuppressCompareAssertion bool `mapstructure:"suppress-compare-assertion"` + SuppressAsyncAssertion bool `mapstructure:"suppress-async-assertion"` + SuppressTypeCompareWarning bool `mapstructure:"suppress-type-compare-assertion"` + ForbidFocusContainer bool `mapstructure:"forbid-focus-container"` + AllowHaveLenZero bool `mapstructure:"allow-havelen-zero"` + ForceExpectTo bool `mapstructure:"force-expect-to"` + ValidateAsyncIntervals bool `mapstructure:"validate-async-intervals"` + ForbidSpecPollution bool `mapstructure:"forbid-spec-pollution"` } type GocognitSettings struct { @@ -200,15 +477,29 @@ type GocognitSettings struct { } type GoConstSettings struct { - IgnoreTests bool `mapstructure:"ignore-tests"` - MatchWithConstants bool `mapstructure:"match-constant"` - MinStringLen int `mapstructure:"min-len"` - MinOccurrencesCount int `mapstructure:"min-occurrences"` - ParseNumbers bool `mapstructure:"numbers"` - NumberMin int `mapstructure:"min"` - NumberMax int `mapstructure:"max"` - IgnoreCalls bool `mapstructure:"ignore-calls"` -} + IgnoreStrings string `mapstructure:"ignore-strings"` + IgnoreTests bool `mapstructure:"ignore-tests"` + MatchWithConstants bool `mapstructure:"match-constant"` + MinStringLen int `mapstructure:"min-len"` + MinOccurrencesCount int `mapstructure:"min-occurrences"` + ParseNumbers bool `mapstructure:"numbers"` + NumberMin int `mapstructure:"min"` + NumberMax int `mapstructure:"max"` + IgnoreCalls bool `mapstructure:"ignore-calls"` +} + +type GoCriticSettings struct { + Go string `mapstructure:"-"` + DisableAll bool `mapstructure:"disable-all"` + EnabledChecks []string `mapstructure:"enabled-checks"` + EnableAll bool `mapstructure:"enable-all"` + DisabledChecks []string `mapstructure:"disabled-checks"` + EnabledTags []string `mapstructure:"enabled-tags"` + DisabledTags []string `mapstructure:"disabled-tags"` + SettingsPerCheck map[string]GoCriticCheckSettings `mapstructure:"settings"` +} + +type GoCriticCheckSettings map[string]any type GoCycloSettings struct { MinComplexity int `mapstructure:"min-complexity"` @@ -218,6 +509,7 @@ type GodotSettings struct { Scope string `mapstructure:"scope"` Exclude []string `mapstructure:"exclude"` Capital bool `mapstructure:"capital"` + Period bool `mapstructure:"period"` // Deprecated: use `Scope` instead CheckAll bool `mapstructure:"check-all"` @@ -228,11 +520,21 @@ type GodoxSettings struct { } type GoFmtSettings struct { - Simplify bool + Simplify bool + RewriteRules []GoFmtRewriteRule `mapstructure:"rewrite-rules"` +} + +type GoFmtRewriteRule struct { + Pattern string + Replacement string } type GofumptSettings struct { - ExtraRules bool `mapstructure:"extra-rules"` + ModulePath string `mapstructure:"module-path"` + ExtraRules bool `mapstructure:"extra-rules"` + + // Deprecated: use the global `run.go` instead. + LangVersion string `mapstructure:"lang-version"` } type GoHeaderSettings struct { @@ -245,12 +547,12 @@ type GoImportsSettings struct { LocalPrefixes string `mapstructure:"local-prefixes"` } -type GoLintSettings struct { - MinConfidence float64 `mapstructure:"min-confidence"` -} - type GoMndSettings struct { - Settings map[string]map[string]interface{} + Settings map[string]map[string]any // Deprecated + Checks []string `mapstructure:"checks"` + IgnoredNumbers []string `mapstructure:"ignored-numbers"` + IgnoredFiles []string `mapstructure:"ignored-files"` + IgnoredFunctions []string `mapstructure:"ignored-functions"` } type GoModDirectivesSettings struct { @@ -279,42 +581,64 @@ type GoModGuardSettings struct { } type GoSecSettings struct { - Includes []string - Excludes []string - Config map[string]interface{} `mapstructure:"config"` + Includes []string `mapstructure:"includes"` + Excludes []string `mapstructure:"excludes"` + Severity string `mapstructure:"severity"` + Confidence string `mapstructure:"confidence"` + ExcludeGenerated bool `mapstructure:"exclude-generated"` + Config map[string]any `mapstructure:"config"` + Concurrency int `mapstructure:"concurrency"` +} + +type GosmopolitanSettings struct { + AllowTimeLocal bool `mapstructure:"allow-time-local"` + EscapeHatches []string `mapstructure:"escape-hatches"` + IgnoreTests bool `mapstructure:"ignore-tests"` + WatchForScripts []string `mapstructure:"watch-for-scripts"` } type GovetSettings struct { - CheckShadowing bool `mapstructure:"check-shadowing"` - Settings map[string]map[string]interface{} + Go string `mapstructure:"-"` Enable []string Disable []string EnableAll bool `mapstructure:"enable-all"` DisableAll bool `mapstructure:"disable-all"` + + Settings map[string]map[string]any + + // Deprecated: the linter should be enabled inside `Enable`. + CheckShadowing bool `mapstructure:"check-shadowing"` } -func (cfg GovetSettings) Validate() error { +func (cfg *GovetSettings) Validate() error { if cfg.EnableAll && cfg.DisableAll { - return errors.New("enable-all and disable-all can't be combined") + return errors.New("govet: enable-all and disable-all can't be combined") } if cfg.EnableAll && len(cfg.Enable) != 0 { - return errors.New("enable-all and enable can't be combined") + return errors.New("govet: enable-all and enable can't be combined") } if cfg.DisableAll && len(cfg.Disable) != 0 { - return errors.New("disable-all and disable can't be combined") + return errors.New("govet: disable-all and disable can't be combined") } return nil } -type IfshortSettings struct { - MaxDeclLines int `mapstructure:"max-decl-lines"` - MaxDeclChars int `mapstructure:"max-decl-chars"` +type GrouperSettings struct { + ConstRequireSingleConst bool `mapstructure:"const-require-single-const"` + ConstRequireGrouping bool `mapstructure:"const-require-grouping"` + ImportRequireSingleImport bool `mapstructure:"import-require-single-import"` + ImportRequireGrouping bool `mapstructure:"import-require-grouping"` + TypeRequireSingleType bool `mapstructure:"type-require-single-type"` + TypeRequireGrouping bool `mapstructure:"type-require-grouping"` + VarRequireSingleVar bool `mapstructure:"var-require-single-var"` + VarRequireGrouping bool `mapstructure:"var-require-grouping"` } type ImportAsSettings struct { - Alias []ImportAsAlias - NoUnaliased bool `mapstructure:"no-unaliased"` + Alias []ImportAsAlias + NoUnaliased bool `mapstructure:"no-unaliased"` + NoExtraAliases bool `mapstructure:"no-extra-aliases"` } type ImportAsAlias struct { @@ -322,24 +646,63 @@ type ImportAsAlias struct { Alias string } +type INamedParamSettings struct { + SkipSingleParam bool `mapstructure:"skip-single-param"` +} + +type InterfaceBloatSettings struct { + Max int `mapstructure:"max"` +} + +type IreturnSettings struct { + Allow []string `mapstructure:"allow"` + Reject []string `mapstructure:"reject"` +} + type LllSettings struct { LineLength int `mapstructure:"line-length"` TabWidth int `mapstructure:"tab-width"` } -type MakezeroSettings struct { - Always bool +type LoggerCheckSettings struct { + Kitlog bool `mapstructure:"kitlog"` + Klog bool `mapstructure:"klog"` + Logr bool `mapstructure:"logr"` + Zap bool `mapstructure:"zap"` + RequireStringKey bool `mapstructure:"require-string-key"` + NoPrintfLike bool `mapstructure:"no-printf-like"` + Rules []string `mapstructure:"rules"` +} + +type MaintIdxSettings struct { + Under int `mapstructure:"under"` } -type MalignedSettings struct { - SuggestNewOrder bool `mapstructure:"suggest-new"` +type MakezeroSettings struct { + Always bool } type MisspellSettings struct { - Locale string + Mode string `mapstructure:"mode"` + Locale string `mapstructure:"locale"` + ExtraWords []MisspellExtraWords `mapstructure:"extra-words"` + // TODO(ldez): v2 the option must be renamed to `IgnoredRules`. IgnoreWords []string `mapstructure:"ignore-words"` } +type MisspellExtraWords struct { + Typo string `mapstructure:"typo"` + Correction string `mapstructure:"correction"` +} + +type MustTagSettings struct { + Functions []struct { + Name string `mapstructure:"name"` + Tag string `mapstructure:"tag"` + ArgPos int `mapstructure:"arg-pos"` + } `mapstructure:"functions"` +} + type NakedretSettings struct { MaxFuncLines int `mapstructure:"max-func-lines"` } @@ -348,14 +711,39 @@ type NestifSettings struct { MinComplexity int `mapstructure:"min-complexity"` } +type NilNilSettings struct { + CheckedTypes []string `mapstructure:"checked-types"` +} + +type NlreturnSettings struct { + BlockSize int `mapstructure:"block-size"` +} + type NoLintLintSettings struct { RequireExplanation bool `mapstructure:"require-explanation"` - AllowLeadingSpace bool `mapstructure:"allow-leading-space"` RequireSpecific bool `mapstructure:"require-specific"` AllowNoExplanation []string `mapstructure:"allow-no-explanation"` AllowUnused bool `mapstructure:"allow-unused"` } +type NoNamedReturnsSettings struct { + ReportErrorInDefer bool `mapstructure:"report-error-in-defer"` +} + +type ParallelTestSettings struct { + Go string `mapstructure:"-"` + IgnoreMissing bool `mapstructure:"ignore-missing"` + IgnoreMissingSubtests bool `mapstructure:"ignore-missing-subtests"` +} + +type PerfSprintSettings struct { + IntConversion bool `mapstructure:"int-conversion"` + ErrError bool `mapstructure:"err-error"` + ErrorF bool `mapstructure:"errorf"` + SprintF1 bool `mapstructure:"sprintf1"` + StrConcat bool `mapstructure:"strconcat"` +} + type PreallocSettings struct { Simple bool RangeLoops bool `mapstructure:"range-loops"` @@ -372,15 +760,29 @@ type PromlinterSettings struct { DisabledLinters []string `mapstructure:"disabled-linters"` } +type ProtoGetterSettings struct { + SkipGeneratedBy []string `mapstructure:"skip-generated-by"` + SkipFiles []string `mapstructure:"skip-files"` + SkipAnyGenerated bool `mapstructure:"skip-any-generated"` + ReplaceFirstArgInAppend bool `mapstructure:"replace-first-arg-in-append"` +} + +type ReassignSettings struct { + Patterns []string `mapstructure:"patterns"` +} + type ReviveSettings struct { + MaxOpenFiles int `mapstructure:"max-open-files"` IgnoreGeneratedHeader bool `mapstructure:"ignore-generated-header"` Confidence float64 Severity string + EnableAllRules bool `mapstructure:"enable-all-rules"` Rules []struct { Name string - Arguments []interface{} + Arguments []any Severity string Disabled bool + Exclude []string } ErrorCode int `mapstructure:"error-code"` WarningCode int `mapstructure:"warning-code"` @@ -394,7 +796,25 @@ type RowsErrCheckSettings struct { Packages []string } +type SlogLintSettings struct { + NoMixedArgs bool `mapstructure:"no-mixed-args"` + KVOnly bool `mapstructure:"kv-only"` + NoGlobal string `mapstructure:"no-global"` + AttrOnly bool `mapstructure:"attr-only"` + ContextOnly bool `mapstructure:"context-only"` + StaticMsg bool `mapstructure:"static-msg"` + NoRawKeys bool `mapstructure:"no-raw-keys"` + KeyNamingCase string `mapstructure:"key-naming-case"` + ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"` +} + +type SpancheckSettings struct { + Checks []string `mapstructure:"checks"` + IgnoreCheckSignatures []string `mapstructure:"ignore-check-signatures"` +} + type StaticCheckSettings struct { + // Deprecated: use the global `run.go` instead. GoVersion string `mapstructure:"go"` Checks []string `mapstructure:"checks"` @@ -407,8 +827,11 @@ func (s *StaticCheckSettings) HasConfiguration() bool { return len(s.Initialisms) > 0 || len(s.HTTPStatusCodeWhitelist) > 0 || len(s.DotImportWhitelist) > 0 || len(s.Checks) > 0 } -type StructCheckSettings struct { - CheckExportedFields bool `mapstructure:"exported-fields"` +type TagAlignSettings struct { + Align bool `mapstructure:"align"` + Sort bool `mapstructure:"sort"` + Order []string `mapstructure:"order"` + Strict bool `mapstructure:"strict"` } type TagliatelleSettings struct { @@ -418,26 +841,69 @@ type TagliatelleSettings struct { } } +type TestifylintSettings struct { + EnableAll bool `mapstructure:"enable-all"` + DisableAll bool `mapstructure:"disable-all"` + EnabledCheckers []string `mapstructure:"enable"` + DisabledCheckers []string `mapstructure:"disable"` + + BoolCompare struct { + IgnoreCustomTypes bool `mapstructure:"ignore-custom-types"` + } `mapstructure:"bool-compare"` + + ExpectedActual struct { + ExpVarPattern string `mapstructure:"pattern"` + } `mapstructure:"expected-actual"` + + RequireError struct { + FnPattern string `mapstructure:"fn-pattern"` + } `mapstructure:"require-error"` + + SuiteExtraAssertCall struct { + Mode string `mapstructure:"mode"` + } `mapstructure:"suite-extra-assert-call"` +} + type TestpackageSettings struct { - SkipRegexp string `mapstructure:"skip-regexp"` + SkipRegexp string `mapstructure:"skip-regexp"` + AllowPackages []string `mapstructure:"allow-packages"` } type ThelperSettings struct { - Test struct { - First bool `mapstructure:"first"` - Name bool `mapstructure:"name"` - Begin bool `mapstructure:"begin"` - } `mapstructure:"test"` - Benchmark struct { - First bool `mapstructure:"first"` - Name bool `mapstructure:"name"` - Begin bool `mapstructure:"begin"` - } `mapstructure:"benchmark"` - TB struct { - First bool `mapstructure:"first"` - Name bool `mapstructure:"name"` - Begin bool `mapstructure:"begin"` - } `mapstructure:"tb"` + Test ThelperOptions `mapstructure:"test"` + Fuzz ThelperOptions `mapstructure:"fuzz"` + Benchmark ThelperOptions `mapstructure:"benchmark"` + TB ThelperOptions `mapstructure:"tb"` +} + +type ThelperOptions struct { + First *bool `mapstructure:"first"` + Name *bool `mapstructure:"name"` + Begin *bool `mapstructure:"begin"` +} + +type TenvSettings struct { + All bool `mapstructure:"all"` +} + +type UseStdlibVarsSettings struct { + HTTPMethod bool `mapstructure:"http-method"` + HTTPStatusCode bool `mapstructure:"http-status-code"` + TimeWeekday bool `mapstructure:"time-weekday"` + TimeMonth bool `mapstructure:"time-month"` + TimeLayout bool `mapstructure:"time-layout"` + CryptoHash bool `mapstructure:"crypto-hash"` + DefaultRPCPath bool `mapstructure:"default-rpc-path"` + OSDevNull bool `mapstructure:"os-dev-null"` + SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` + TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` + ConstantKind bool `mapstructure:"constant-kind"` + SyslogPriority bool `mapstructure:"syslog-priority"` +} + +type UnconvertSettings struct { + FastMath bool `mapstructure:"fast-math"` + Safe bool `mapstructure:"safe"` } type UnparamSettings struct { @@ -445,8 +911,27 @@ type UnparamSettings struct { Algo string } -type VarCheckSettings struct { - CheckExportedFields bool `mapstructure:"exported-fields"` +type UnusedSettings struct { + FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"` + PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"` + ExportedIsUsed bool `mapstructure:"exported-is-used"` + ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"` + ParametersAreUsed bool `mapstructure:"parameters-are-used"` + LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"` + GeneratedIsUsed bool `mapstructure:"generated-is-used"` +} + +type VarnamelenSettings struct { + MaxDistance int `mapstructure:"max-distance"` + MinNameLength int `mapstructure:"min-name-length"` + CheckReceiver bool `mapstructure:"check-receiver"` + CheckReturn bool `mapstructure:"check-return"` + CheckTypeParam bool `mapstructure:"check-type-param"` + IgnoreNames []string `mapstructure:"ignore-names"` + IgnoreTypeAssertOk bool `mapstructure:"ignore-type-assert-ok"` + IgnoreMapIndexOk bool `mapstructure:"ignore-map-index-ok"` + IgnoreChanRecvOk bool `mapstructure:"ignore-chan-recv-ok"` + IgnoreDecls []string `mapstructure:"ignore-decls"` } type WhitespaceSettings struct { @@ -455,24 +940,60 @@ type WhitespaceSettings struct { } type WrapcheckSettings struct { - IgnoreSigs []string `mapstructure:"ignoreSigs"` + // TODO(ldez): v2 the options must be renamed to use hyphen. + IgnoreSigs []string `mapstructure:"ignoreSigs"` + IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"` + IgnorePackageGlobs []string `mapstructure:"ignorePackageGlobs"` + IgnoreInterfaceRegexps []string `mapstructure:"ignoreInterfaceRegexps"` } type WSLSettings struct { - StrictAppend bool `mapstructure:"strict-append"` - AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` - AllowAssignAndAnythingCuddle bool `mapstructure:"allow-assign-and-anything"` - AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` - AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` - AllowTrailingComment bool `mapstructure:"allow-trailing-comment"` - AllowSeparatedLeadingComment bool `mapstructure:"allow-separated-leading-comment"` - ForceCuddleErrCheckAndAssign bool `mapstructure:"force-err-cuddling"` - ForceExclusiveShortDeclarations bool `mapstructure:"force-short-decl-cuddling"` - ForceCaseTrailingWhitespaceLimit int `mapstructure:"force-case-trailing-whitespace"` -} - + StrictAppend bool `mapstructure:"strict-append"` + AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` + AllowAssignAndAnythingCuddle bool `mapstructure:"allow-assign-and-anything"` + AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + ForceCaseTrailingWhitespaceLimit int `mapstructure:"force-case-trailing-whitespace"` + AllowTrailingComment bool `mapstructure:"allow-trailing-comment"` + AllowSeparatedLeadingComment bool `mapstructure:"allow-separated-leading-comment"` + AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` + AllowCuddleWithCalls []string `mapstructure:"allow-cuddle-with-calls"` + AllowCuddleWithRHS []string `mapstructure:"allow-cuddle-with-rhs"` + ForceCuddleErrCheckAndAssign bool `mapstructure:"force-err-cuddling"` + ErrorVariableNames []string `mapstructure:"error-variable-names"` + ForceExclusiveShortDeclarations bool `mapstructure:"force-short-decl-cuddling"` +} + +// CustomLinterSettings encapsulates the meta-data of a private linter. type CustomLinterSettings struct { - Path string + // Type plugin type. + // It can be `goplugin` or `module`. + Type string `mapstructure:"type"` + + // Path to a plugin *.so file that implements the private linter. + // Only for Go plugin system. + Path string + + // Description describes the purpose of the private linter. Description string + // OriginalURL The URL containing the source code for the private linter. OriginalURL string `mapstructure:"original-url"` + + // Settings plugin settings only work with linterdb.PluginConstructor symbol. + Settings any +} + +func (s *CustomLinterSettings) Validate() error { + if s.Type == "module" { + if s.Path != "" { + return errors.New("path not supported with module type") + } + + return nil + } + + if s.Path == "" { + return errors.New("path is required") + } + + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go deleted file mode 100644 index 34f8507581..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go +++ /dev/null @@ -1,365 +0,0 @@ -package config - -import ( - "fmt" - "sort" - "strings" - - _ "github.com/go-critic/go-critic/checkers" // this import register checkers - "github.com/go-critic/go-critic/framework/linter" - "github.com/pkg/errors" - - "github.com/golangci/golangci-lint/pkg/logutils" -) - -const gocriticDebugKey = "gocritic" - -var ( - gocriticDebugf = logutils.Debug(gocriticDebugKey) - isGocriticDebug = logutils.HaveDebugTag(gocriticDebugKey) - allGocriticCheckers = linter.GetCheckersInfo() - allGocriticCheckerMap = func() map[string]*linter.CheckerInfo { - checkInfoMap := make(map[string]*linter.CheckerInfo) - for _, checkInfo := range allGocriticCheckers { - checkInfoMap[checkInfo.Name] = checkInfo - } - return checkInfoMap - }() -) - -type GocriticCheckSettings map[string]interface{} - -type GocriticSettings struct { - EnabledChecks []string `mapstructure:"enabled-checks"` - DisabledChecks []string `mapstructure:"disabled-checks"` - EnabledTags []string `mapstructure:"enabled-tags"` - DisabledTags []string `mapstructure:"disabled-tags"` - SettingsPerCheck map[string]GocriticCheckSettings `mapstructure:"settings"` - - inferredEnabledChecks map[string]bool -} - -func debugChecksListf(checks []string, format string, args ...interface{}) { - if isGocriticDebug { - prefix := fmt.Sprintf(format, args...) - gocriticDebugf(prefix+" checks (%d): %s", len(checks), sprintStrings(checks)) - } -} - -func stringsSliceToSet(ss []string) map[string]bool { - ret := map[string]bool{} - for _, s := range ss { - ret[s] = true - } - - return ret -} - -func buildGocriticTagToCheckersMap() map[string][]string { - tagToCheckers := map[string][]string{} - for _, checker := range allGocriticCheckers { - for _, tag := range checker.Tags { - tagToCheckers[tag] = append(tagToCheckers[tag], checker.Name) - } - } - return tagToCheckers -} - -func gocriticCheckerTagsDebugf() { - if !isGocriticDebug { - return - } - - tagToCheckers := buildGocriticTagToCheckersMap() - - var allTags []string - for tag := range tagToCheckers { - allTags = append(allTags, tag) - } - sort.Strings(allTags) - - gocriticDebugf("All gocritic existing tags and checks:") - for _, tag := range allTags { - debugChecksListf(tagToCheckers[tag], " tag %q", tag) - } -} - -func (s *GocriticSettings) gocriticDisabledCheckersDebugf() { - if !isGocriticDebug { - return - } - - var disabledCheckers []string - for _, checker := range allGocriticCheckers { - if s.inferredEnabledChecks[strings.ToLower(checker.Name)] { - continue - } - - disabledCheckers = append(disabledCheckers, checker.Name) - } - - if len(disabledCheckers) == 0 { - gocriticDebugf("All checks are enabled") - } else { - debugChecksListf(disabledCheckers, "Final not used") - } -} - -func (s *GocriticSettings) InferEnabledChecks(log logutils.Log) { - gocriticCheckerTagsDebugf() - - enabledByDefaultChecks := getDefaultEnabledGocriticCheckersNames() - debugChecksListf(enabledByDefaultChecks, "Enabled by default") - - disabledByDefaultChecks := getDefaultDisabledGocriticCheckersNames() - debugChecksListf(disabledByDefaultChecks, "Disabled by default") - - var enabledChecks []string - - // EnabledTags - if len(s.EnabledTags) != 0 { - tagToCheckers := buildGocriticTagToCheckersMap() - for _, tag := range s.EnabledTags { - enabledChecks = append(enabledChecks, tagToCheckers[tag]...) - } - debugChecksListf(enabledChecks, "Enabled by config tags %s", sprintStrings(s.EnabledTags)) - } - - if !(len(s.EnabledTags) == 0 && len(s.EnabledChecks) != 0) { - // don't use default checks only if we have no enabled tags and enable some checks manually - enabledChecks = append(enabledChecks, enabledByDefaultChecks...) - } - - // DisabledTags - if len(s.DisabledTags) != 0 { - enabledChecks = filterByDisableTags(enabledChecks, s.DisabledTags, log) - } - - // EnabledChecks - if len(s.EnabledChecks) != 0 { - debugChecksListf(s.EnabledChecks, "Enabled by config") - - alreadyEnabledChecksSet := stringsSliceToSet(enabledChecks) - for _, enabledCheck := range s.EnabledChecks { - if alreadyEnabledChecksSet[enabledCheck] { - log.Warnf("No need to enable check %q: it's already enabled", enabledCheck) - continue - } - enabledChecks = append(enabledChecks, enabledCheck) - } - } - - // DisabledChecks - if len(s.DisabledChecks) != 0 { - debugChecksListf(s.DisabledChecks, "Disabled by config") - - enabledChecksSet := stringsSliceToSet(enabledChecks) - for _, disabledCheck := range s.DisabledChecks { - if !enabledChecksSet[disabledCheck] { - log.Warnf("Gocritic check %q was explicitly disabled via config. However, as this check"+ - "is disabled by default, there is no need to explicitly disable it via config.", disabledCheck) - continue - } - delete(enabledChecksSet, disabledCheck) - } - - enabledChecks = nil - for enabledCheck := range enabledChecksSet { - enabledChecks = append(enabledChecks, enabledCheck) - } - } - - s.inferredEnabledChecks = map[string]bool{} - for _, check := range enabledChecks { - s.inferredEnabledChecks[strings.ToLower(check)] = true - } - - debugChecksListf(enabledChecks, "Final used") - s.gocriticDisabledCheckersDebugf() -} - -func validateStringsUniq(ss []string) error { - set := map[string]bool{} - for _, s := range ss { - _, ok := set[s] - if ok { - return fmt.Errorf("%q occurs multiple times in list", s) - } - set[s] = true - } - - return nil -} - -func intersectStringSlice(s1, s2 []string) []string { - s1Map := make(map[string]struct{}) - for _, s := range s1 { - s1Map[s] = struct{}{} - } - - result := make([]string, 0) - for _, s := range s2 { - if _, exists := s1Map[s]; exists { - result = append(result, s) - } - } - - return result -} - -func (s *GocriticSettings) Validate(log logutils.Log) error { - if len(s.EnabledTags) == 0 { - if len(s.EnabledChecks) != 0 && len(s.DisabledChecks) != 0 { - return errors.New("both enabled and disabled check aren't allowed for gocritic") - } - } else { - if err := validateStringsUniq(s.EnabledTags); err != nil { - return errors.Wrap(err, "validate enabled tags") - } - - tagToCheckers := buildGocriticTagToCheckersMap() - for _, tag := range s.EnabledTags { - if _, ok := tagToCheckers[tag]; !ok { - return fmt.Errorf("gocritic [enabled]tag %q doesn't exist", tag) - } - } - } - - if len(s.DisabledTags) > 0 { - tagToCheckers := buildGocriticTagToCheckersMap() - for _, tag := range s.EnabledTags { - if _, ok := tagToCheckers[tag]; !ok { - return fmt.Errorf("gocritic [disabled]tag %q doesn't exist", tag) - } - } - } - - if err := validateStringsUniq(s.EnabledChecks); err != nil { - return errors.Wrap(err, "validate enabled checks") - } - if err := validateStringsUniq(s.DisabledChecks); err != nil { - return errors.Wrap(err, "validate disabled checks") - } - - if err := s.validateCheckerNames(log); err != nil { - return errors.Wrap(err, "validation failed") - } - - return nil -} - -func (s *GocriticSettings) IsCheckEnabled(name string) bool { - return s.inferredEnabledChecks[strings.ToLower(name)] -} - -func sprintAllowedCheckerNames(allowedNames map[string]bool) string { - var namesSlice []string - for name := range allowedNames { - namesSlice = append(namesSlice, name) - } - return sprintStrings(namesSlice) -} - -func sprintStrings(ss []string) string { - sort.Strings(ss) - return fmt.Sprint(ss) -} - -// getAllCheckerNames returns a map containing all checker names supported by gocritic. -func getAllCheckerNames() map[string]bool { - allCheckerNames := map[string]bool{} - for _, checker := range allGocriticCheckers { - allCheckerNames[strings.ToLower(checker.Name)] = true - } - - return allCheckerNames -} - -func isEnabledByDefaultGocriticCheck(info *linter.CheckerInfo) bool { - return !info.HasTag("experimental") && - !info.HasTag("opinionated") && - !info.HasTag("performance") -} - -func getDefaultEnabledGocriticCheckersNames() []string { - var enabled []string - for _, info := range allGocriticCheckers { - enable := isEnabledByDefaultGocriticCheck(info) - if enable { - enabled = append(enabled, info.Name) - } - } - - return enabled -} - -func getDefaultDisabledGocriticCheckersNames() []string { - var disabled []string - for _, info := range allGocriticCheckers { - enable := isEnabledByDefaultGocriticCheck(info) - if !enable { - disabled = append(disabled, info.Name) - } - } - - return disabled -} - -func (s *GocriticSettings) validateCheckerNames(log logutils.Log) error { - allowedNames := getAllCheckerNames() - - for _, name := range s.EnabledChecks { - if !allowedNames[strings.ToLower(name)] { - return fmt.Errorf("enabled checker %s doesn't exist, all existing checkers: %s", - name, sprintAllowedCheckerNames(allowedNames)) - } - } - - for _, name := range s.DisabledChecks { - if !allowedNames[strings.ToLower(name)] { - return fmt.Errorf("disabled checker %s doesn't exist, all existing checkers: %s", - name, sprintAllowedCheckerNames(allowedNames)) - } - } - - for checkName := range s.SettingsPerCheck { - if _, ok := allowedNames[checkName]; !ok { - return fmt.Errorf("invalid setting, checker %s doesn't exist, all existing checkers: %s", - checkName, sprintAllowedCheckerNames(allowedNames)) - } - if !s.IsCheckEnabled(checkName) { - log.Warnf("Gocritic settings were provided for not enabled check %q", checkName) - } - } - - return nil -} - -func (s *GocriticSettings) GetLowercasedParams() map[string]GocriticCheckSettings { - ret := map[string]GocriticCheckSettings{} - for checker, params := range s.SettingsPerCheck { - ret[strings.ToLower(checker)] = params - } - return ret -} - -func filterByDisableTags(enabledChecks, disableTags []string, log logutils.Log) []string { - enabledChecksSet := stringsSliceToSet(enabledChecks) - for _, enabledCheck := range enabledChecks { - checkInfo, checkInfoExists := allGocriticCheckerMap[enabledCheck] - if !checkInfoExists { - log.Warnf("Gocritic check %q was not exists via filtering disabled tags", enabledCheck) - continue - } - hitTags := intersectStringSlice(checkInfo.Tags, disableTags) - if len(hitTags) != 0 { - delete(enabledChecksSet, enabledCheck) - } - debugChecksListf(enabledChecks, "Disabled by config tags %s", sprintStrings(disableTags)) - } - enabledChecks = nil - for enabledCheck := range enabledChecksSet { - enabledChecks = append(enabledChecks, enabledCheck) - } - return enabledChecks -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go new file mode 100644 index 0000000000..782daa4c92 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go @@ -0,0 +1,398 @@ +package config + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/go-viper/mapstructure/v2" + "github.com/mitchellh/go-homedir" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +var errConfigDisabled = errors.New("config is disabled by --no-config") + +type LoaderOptions struct { + Config string // Flag only. The path to the golangci config file, as specified with the --config argument. + NoConfig bool // Flag only. +} + +type Loader struct { + opts LoaderOptions + + viper *viper.Viper + fs *pflag.FlagSet + + log logutils.Log + + cfg *Config +} + +func NewLoader(log logutils.Log, v *viper.Viper, fs *pflag.FlagSet, opts LoaderOptions, cfg *Config) *Loader { + return &Loader{ + opts: opts, + viper: v, + fs: fs, + log: log, + cfg: cfg, + } +} + +func (l *Loader) Load() error { + err := l.setConfigFile() + if err != nil { + return err + } + + err = l.parseConfig() + if err != nil { + return err + } + + l.applyStringSliceHack() + + l.handleGoVersion() + + err = l.handleDeprecation() + if err != nil { + return err + } + + err = l.handleEnableOnlyOption() + if err != nil { + return err + } + + return nil +} + +func (l *Loader) setConfigFile() error { + configFile, err := l.evaluateOptions() + if err != nil { + if errors.Is(err, errConfigDisabled) { + return nil + } + + return fmt.Errorf("can't parse --config option: %w", err) + } + + if configFile != "" { + l.viper.SetConfigFile(configFile) + + // Assume YAML if the file has no extension. + if filepath.Ext(configFile) == "" { + l.viper.SetConfigType("yaml") + } + } else { + l.setupConfigFileSearch() + } + + return nil +} + +func (l *Loader) evaluateOptions() (string, error) { + if l.opts.NoConfig && l.opts.Config != "" { + return "", errors.New("can't combine option --config and --no-config") + } + + if l.opts.NoConfig { + return "", errConfigDisabled + } + + configFile, err := homedir.Expand(l.opts.Config) + if err != nil { + return "", errors.New("failed to expand configuration path") + } + + return configFile, nil +} + +func (l *Loader) setupConfigFileSearch() { + firstArg := extractFirstPathArg() + + absStartPath, err := filepath.Abs(firstArg) + if err != nil { + l.log.Warnf("Can't make abs path for %q: %s", firstArg, err) + absStartPath = filepath.Clean(firstArg) + } + + // start from it + var curDir string + if fsutils.IsDir(absStartPath) { + curDir = absStartPath + } else { + curDir = filepath.Dir(absStartPath) + } + + // find all dirs from it up to the root + configSearchPaths := []string{"./"} + + for { + configSearchPaths = append(configSearchPaths, curDir) + + newCurDir := filepath.Dir(curDir) + if curDir == newCurDir || newCurDir == "" { + break + } + + curDir = newCurDir + } + + // find home directory for global config + if home, err := homedir.Dir(); err != nil { + l.log.Warnf("Can't get user's home directory: %s", err.Error()) + } else if !slices.Contains(configSearchPaths, home) { + configSearchPaths = append(configSearchPaths, home) + } + + l.log.Infof("Config search paths: %s", configSearchPaths) + + l.viper.SetConfigName(".golangci") + + for _, p := range configSearchPaths { + l.viper.AddConfigPath(p) + } +} + +func (l *Loader) parseConfig() error { + if err := l.viper.ReadInConfig(); err != nil { + var configFileNotFoundError viper.ConfigFileNotFoundError + if errors.As(err, &configFileNotFoundError) { + // Load configuration from flags only. + err = l.viper.Unmarshal(l.cfg, customDecoderHook()) + if err != nil { + return fmt.Errorf("can't unmarshal config by viper (flags): %w", err) + } + + return nil + } + + return fmt.Errorf("can't read viper config: %w", err) + } + + err := l.setConfigDir() + if err != nil { + return err + } + + // Load configuration from all sources (flags, file). + if err := l.viper.Unmarshal(l.cfg, customDecoderHook()); err != nil { + return fmt.Errorf("can't unmarshal config by viper (flags, file): %w", err) + } + + if l.cfg.InternalTest { // just for testing purposes: to detect config file usage + _, _ = fmt.Fprintln(logutils.StdOut, "test") + os.Exit(exitcodes.Success) + } + + return nil +} + +func (l *Loader) setConfigDir() error { + usedConfigFile := l.viper.ConfigFileUsed() + if usedConfigFile == "" { + return nil + } + + if usedConfigFile == os.Stdin.Name() { + usedConfigFile = "" + l.log.Infof("Reading config file stdin") + } else { + var err error + usedConfigFile, err = fsutils.ShortestRelPath(usedConfigFile, "") + if err != nil { + l.log.Warnf("Can't pretty print config file path: %v", err) + } + + l.log.Infof("Used config file %s", usedConfigFile) + } + + usedConfigDir, err := filepath.Abs(filepath.Dir(usedConfigFile)) + if err != nil { + return errors.New("can't get config directory") + } + + l.cfg.cfgDir = usedConfigDir + + return nil +} + +// Hack to append values from StringSlice flags. +// Viper always overrides StringSlice values. +// https://github.com/spf13/viper/issues/1448 +// So StringSlice flags are not bind to Viper like that their values are obtain via Cobra Flags. +func (l *Loader) applyStringSliceHack() { + if l.fs == nil { + return + } + + l.appendStringSlice("enable", &l.cfg.Linters.Enable) + l.appendStringSlice("disable", &l.cfg.Linters.Disable) + l.appendStringSlice("presets", &l.cfg.Linters.Presets) + l.appendStringSlice("build-tags", &l.cfg.Run.BuildTags) + l.appendStringSlice("exclude", &l.cfg.Issues.ExcludePatterns) + + l.appendStringSlice("skip-dirs", &l.cfg.Run.SkipDirs) + l.appendStringSlice("skip-files", &l.cfg.Run.SkipFiles) + l.appendStringSlice("exclude-dirs", &l.cfg.Issues.ExcludeDirs) + l.appendStringSlice("exclude-files", &l.cfg.Issues.ExcludeFiles) +} + +func (l *Loader) appendStringSlice(name string, current *[]string) { + if l.fs.Changed(name) { + val, _ := l.fs.GetStringSlice(name) + *current = append(*current, val...) + } +} + +func (l *Loader) handleGoVersion() { + if l.cfg.Run.Go == "" { + l.cfg.Run.Go = detectGoVersion() + } + + l.cfg.LintersSettings.Govet.Go = l.cfg.Run.Go + + l.cfg.LintersSettings.ParallelTest.Go = l.cfg.Run.Go + + if l.cfg.LintersSettings.Gofumpt.LangVersion == "" { + l.cfg.LintersSettings.Gofumpt.LangVersion = l.cfg.Run.Go + } + + trimmedGoVersion := trimGoVersion(l.cfg.Run.Go) + + l.cfg.LintersSettings.Gocritic.Go = trimmedGoVersion + + // staticcheck related linters. + if l.cfg.LintersSettings.Staticcheck.GoVersion == "" { + l.cfg.LintersSettings.Staticcheck.GoVersion = trimmedGoVersion + } + if l.cfg.LintersSettings.Gosimple.GoVersion == "" { + l.cfg.LintersSettings.Gosimple.GoVersion = trimmedGoVersion + } + if l.cfg.LintersSettings.Stylecheck.GoVersion != "" { + l.cfg.LintersSettings.Stylecheck.GoVersion = trimmedGoVersion + } +} + +func (l *Loader) handleDeprecation() error { + // Deprecated since v1.57.0 + if len(l.cfg.Run.SkipFiles) > 0 { + l.warn("The configuration option `run.skip-files` is deprecated, please use `issues.exclude-files`.") + l.cfg.Issues.ExcludeFiles = l.cfg.Run.SkipFiles + } + + // Deprecated since v1.57.0 + if len(l.cfg.Run.SkipDirs) > 0 { + l.warn("The configuration option `run.skip-dirs` is deprecated, please use `issues.exclude-dirs`.") + l.cfg.Issues.ExcludeDirs = l.cfg.Run.SkipDirs + } + + // The 2 options are true by default. + // Deprecated since v1.57.0 + if !l.cfg.Run.UseDefaultSkipDirs { + l.warn("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") + } + l.cfg.Issues.UseDefaultExcludeDirs = l.cfg.Run.UseDefaultSkipDirs && l.cfg.Issues.UseDefaultExcludeDirs + + // The 2 options are false by default. + // Deprecated since v1.57.0 + if l.cfg.Run.ShowStats { + l.warn("The configuration option `run.show-stats` is deprecated, please use `output.show-stats`") + } + l.cfg.Output.ShowStats = l.cfg.Run.ShowStats || l.cfg.Output.ShowStats + + // Deprecated since v1.57.0 + if l.cfg.Output.Format != "" { + l.warn("The configuration option `output.format` is deprecated, please use `output.formats`") + + var f OutputFormats + err := f.UnmarshalText([]byte(l.cfg.Output.Format)) + if err != nil { + return fmt.Errorf("unmarshal output format: %w", err) + } + + l.cfg.Output.Formats = f + } + + // Deprecated since v1.57.0, + // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). + if l.cfg.LintersSettings.Govet.CheckShadowing { + l.warn("The configuration option `govet.check-shadowing` is deprecated. " + + "Please enable `shadow` instead, if you are not using `enable-all`.") + } + + return nil +} + +func (l *Loader) handleEnableOnlyOption() error { + lookup := l.fs.Lookup("enable-only") + if lookup == nil { + return nil + } + + only, err := l.fs.GetStringSlice("enable-only") + if err != nil { + return err + } + + if len(only) > 0 { + l.cfg.Linters = Linters{ + Enable: only, + DisableAll: true, + } + } + + return nil +} + +func (l *Loader) warn(format string) { + if l.cfg.InternalTest || l.cfg.InternalCmdTest || os.Getenv(logutils.EnvTestRun) == "1" { + return + } + + l.log.Warnf(format) +} + +func customDecoderHook() viper.DecoderConfigOption { + return viper.DecodeHook(mapstructure.ComposeDecodeHookFunc( + // Default hooks (https://github.com/spf13/viper/blob/518241257478c557633ab36e474dfcaeb9a3c623/viper.go#L135-L138). + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + + // Needed for forbidigo, and output.formats. + mapstructure.TextUnmarshallerHookFunc(), + )) +} + +func extractFirstPathArg() string { + args := os.Args + + // skip all args ([golangci-lint, run/linters]) before files/dirs list + for len(args) != 0 { + if args[0] == "run" { + args = args[1:] + break + } + + args = args[1:] + } + + // find first file/dir arg + firstArg := "./..." + for _, arg := range args { + if !strings.HasPrefix(arg, "-") { + firstArg = arg + break + } + } + + return firstArg +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go index d67f110f67..672b1c7d42 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -1,18 +1,27 @@ package config +import ( + "errors" + "fmt" + "slices" + "strings" +) + const ( OutFormatJSON = "json" OutFormatLineNumber = "line-number" OutFormatColoredLineNumber = "colored-line-number" OutFormatTab = "tab" + OutFormatColoredTab = "colored-tab" OutFormatCheckstyle = "checkstyle" OutFormatCodeClimate = "code-climate" OutFormatHTML = "html" OutFormatJunitXML = "junit-xml" OutFormatGithubActions = "github-actions" + OutFormatTeamCity = "teamcity" ) -var OutFormats = []string{ +var AllOutputFormats = []string{ OutFormatColoredLineNumber, OutFormatLineNumber, OutFormatJSON, @@ -22,15 +31,82 @@ var OutFormats = []string{ OutFormatHTML, OutFormatJunitXML, OutFormatGithubActions, + OutFormatTeamCity, } type Output struct { - Format string - Color string - PrintIssuedLine bool `mapstructure:"print-issued-lines"` - PrintLinterName bool `mapstructure:"print-linter-name"` - UniqByLine bool `mapstructure:"uniq-by-line"` - SortResults bool `mapstructure:"sort-results"` - PrintWelcomeMessage bool `mapstructure:"print-welcome"` - PathPrefix string `mapstructure:"path-prefix"` + Formats OutputFormats `mapstructure:"formats"` + PrintIssuedLine bool `mapstructure:"print-issued-lines"` + PrintLinterName bool `mapstructure:"print-linter-name"` + UniqByLine bool `mapstructure:"uniq-by-line"` + SortResults bool `mapstructure:"sort-results"` + SortOrder []string `mapstructure:"sort-order"` + PathPrefix string `mapstructure:"path-prefix"` + ShowStats bool `mapstructure:"show-stats"` + + // Deprecated: use Formats instead. + Format string `mapstructure:"format"` +} + +func (o *Output) Validate() error { + if !o.SortResults && len(o.SortOrder) > 0 { + return errors.New("sort-results should be 'true' to use sort-order") + } + + validOrders := []string{"linter", "file", "severity"} + + all := strings.Join(o.SortOrder, " ") + + for _, order := range o.SortOrder { + if strings.Count(all, order) > 1 { + return fmt.Errorf("the sort-order name %q is repeated several times", order) + } + + if !slices.Contains(validOrders, order) { + return fmt.Errorf("unsupported sort-order name %q", order) + } + } + + for _, format := range o.Formats { + err := format.Validate() + if err != nil { + return err + } + } + + return nil +} + +type OutputFormat struct { + Format string `mapstructure:"format"` + Path string `mapstructure:"path"` +} + +func (o *OutputFormat) Validate() error { + if o.Format == "" { + return errors.New("the format is required") + } + + if !slices.Contains(AllOutputFormats, o.Format) { + return fmt.Errorf("unsupported output format %q", o.Format) + } + + return nil +} + +type OutputFormats []OutputFormat + +func (p *OutputFormats) UnmarshalText(text []byte) error { + formats := strings.Split(string(text), ",") + + for _, item := range formats { + format, path, _ := strings.Cut(item, ":") + + *p = append(*p, OutputFormat{ + Path: path, + Format: format, + }) + } + + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go deleted file mode 100644 index 6e97277daa..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go +++ /dev/null @@ -1,221 +0,0 @@ -package config - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/mitchellh/go-homedir" - "github.com/spf13/viper" - - "github.com/golangci/golangci-lint/pkg/fsutils" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/sliceutil" -) - -type FileReader struct { - log logutils.Log - cfg *Config - commandLineCfg *Config -} - -func NewFileReader(toCfg, commandLineCfg *Config, log logutils.Log) *FileReader { - return &FileReader{ - log: log, - cfg: toCfg, - commandLineCfg: commandLineCfg, - } -} - -func (r *FileReader) Read() error { - // XXX: hack with double parsing for 2 purposes: - // 1. to access "config" option here. - // 2. to give config less priority than command line. - - configFile, err := r.parseConfigOption() - if err != nil { - if err == errConfigDisabled { - return nil - } - - return fmt.Errorf("can't parse --config option: %s", err) - } - - if configFile != "" { - viper.SetConfigFile(configFile) - } else { - r.setupConfigFileSearch() - } - - return r.parseConfig() -} - -func (r *FileReader) parseConfig() error { - if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - return nil - } - - return fmt.Errorf("can't read viper config: %s", err) - } - - usedConfigFile := viper.ConfigFileUsed() - if usedConfigFile == "" { - return nil - } - - usedConfigFile, err := fsutils.ShortestRelPath(usedConfigFile, "") - if err != nil { - r.log.Warnf("Can't pretty print config file path: %s", err) - } - r.log.Infof("Used config file %s", usedConfigFile) - - if err := viper.Unmarshal(r.cfg); err != nil { - return fmt.Errorf("can't unmarshal config by viper: %s", err) - } - - if err := r.validateConfig(); err != nil { - return fmt.Errorf("can't validate config: %s", err) - } - - if r.cfg.InternalTest { // just for testing purposes: to detect config file usage - fmt.Fprintln(logutils.StdOut, "test") - os.Exit(0) - } - - return nil -} - -func (r *FileReader) validateConfig() error { - c := r.cfg - if len(c.Run.Args) != 0 { - return errors.New("option run.args in config isn't supported now") - } - - if c.Run.CPUProfilePath != "" { - return errors.New("option run.cpuprofilepath in config isn't allowed") - } - - if c.Run.MemProfilePath != "" { - return errors.New("option run.memprofilepath in config isn't allowed") - } - - if c.Run.TracePath != "" { - return errors.New("option run.tracepath in config isn't allowed") - } - - if c.Run.IsVerbose { - return errors.New("can't set run.verbose option with config: only on command-line") - } - for i, rule := range c.Issues.ExcludeRules { - if err := rule.Validate(); err != nil { - return fmt.Errorf("error in exclude rule #%d: %v", i, err) - } - } - if len(c.Severity.Rules) > 0 && c.Severity.Default == "" { - return errors.New("can't set severity rule option: no default severity defined") - } - for i, rule := range c.Severity.Rules { - if err := rule.Validate(); err != nil { - return fmt.Errorf("error in severity rule #%d: %v", i, err) - } - } - if err := c.LintersSettings.Govet.Validate(); err != nil { - return fmt.Errorf("error in govet config: %v", err) - } - return nil -} - -func getFirstPathArg() string { - args := os.Args - - // skip all args ([golangci-lint, run/linters]) before files/dirs list - for len(args) != 0 { - if args[0] == "run" { - args = args[1:] - break - } - - args = args[1:] - } - - // find first file/dir arg - firstArg := "./..." - for _, arg := range args { - if !strings.HasPrefix(arg, "-") { - firstArg = arg - break - } - } - - return firstArg -} - -func (r *FileReader) setupConfigFileSearch() { - firstArg := getFirstPathArg() - absStartPath, err := filepath.Abs(firstArg) - if err != nil { - r.log.Warnf("Can't make abs path for %q: %s", firstArg, err) - absStartPath = filepath.Clean(firstArg) - } - - // start from it - var curDir string - if fsutils.IsDir(absStartPath) { - curDir = absStartPath - } else { - curDir = filepath.Dir(absStartPath) - } - - // find all dirs from it up to the root - configSearchPaths := []string{"./"} - - for { - configSearchPaths = append(configSearchPaths, curDir) - newCurDir := filepath.Dir(curDir) - if curDir == newCurDir || newCurDir == "" { - break - } - curDir = newCurDir - } - - // find home directory for global config - if home, err := homedir.Dir(); err != nil { - r.log.Warnf("Can't get user's home directory: %s", err.Error()) - } else if !sliceutil.Contains(configSearchPaths, home) { - configSearchPaths = append(configSearchPaths, home) - } - - r.log.Infof("Config search paths: %s", configSearchPaths) - viper.SetConfigName(".golangci") - for _, p := range configSearchPaths { - viper.AddConfigPath(p) - } -} - -var errConfigDisabled = errors.New("config is disabled by --no-config") - -func (r *FileReader) parseConfigOption() (string, error) { - cfg := r.commandLineCfg - if cfg == nil { - return "", nil - } - - configFile := cfg.Run.Config - if cfg.Run.NoConfig && configFile != "" { - return "", fmt.Errorf("can't combine option --config and --no-config") - } - - if cfg.Run.NoConfig { - return "", errConfigDisabled - } - - configFile, err := homedir.Expand(configFile) - if err != nil { - return "", fmt.Errorf("failed to expand configuration path") - } - - return configFile, nil -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go index ff6347945e..1531ab8830 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -1,20 +1,19 @@ package config -import "time" - +import ( + "fmt" + "slices" + "strings" + "time" +) + +// Run encapsulates the config options for running the linter analysis. type Run struct { - IsVerbose bool `mapstructure:"verbose"` - Silent bool - CPUProfilePath string - MemProfilePath string - TracePath string - Concurrency int - PrintResourcesUsage bool `mapstructure:"print-resources-usage"` + Timeout time.Duration `mapstructure:"timeout"` - Config string - NoConfig bool + Concurrency int `mapstructure:"concurrency"` - Args []string + Go string `mapstructure:"go"` BuildTags []string `mapstructure:"build-tags"` ModulesDownloadMode string `mapstructure:"modules-download-mode"` @@ -22,16 +21,27 @@ type Run struct { ExitCodeIfIssuesFound int `mapstructure:"issues-exit-code"` AnalyzeTests bool `mapstructure:"tests"` - // Deprecated: Deadline exists for historical compatibility - // and should not be used. To set run timeout use Timeout instead. - Deadline time.Duration - Timeout time.Duration - - PrintVersion bool - SkipFiles []string `mapstructure:"skip-files"` - SkipDirs []string `mapstructure:"skip-dirs"` - UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` + // Deprecated: use Issues.ExcludeFiles instead. + SkipFiles []string `mapstructure:"skip-files"` + // Deprecated: use Issues.ExcludeDirs instead. + SkipDirs []string `mapstructure:"skip-dirs"` + // Deprecated: use Issues.UseDefaultExcludeDirs instead. + UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` AllowParallelRunners bool `mapstructure:"allow-parallel-runners"` AllowSerialRunners bool `mapstructure:"allow-serial-runners"` + + // Deprecated: use Output.ShowStats instead. + ShowStats bool `mapstructure:"show-stats"` +} + +func (r *Run) Validate() error { + // go help modules + allowedMods := []string{"mod", "readonly", "vendor"} + + if r.ModulesDownloadMode != "" && !slices.Contains(allowedMods, r.ModulesDownloadMode) { + return fmt.Errorf("invalid modules download path %s, only (%s) allowed", r.ModulesDownloadMode, strings.Join(allowedMods, "|")) + } + + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go b/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go index 3068a0ed69..a6d2c9ec3f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go @@ -1,5 +1,10 @@ package config +import ( + "errors" + "fmt" +) + const severityRuleMinConditionsCount = 1 type Severity struct { @@ -8,11 +13,29 @@ type Severity struct { Rules []SeverityRule `mapstructure:"rules"` } +func (s *Severity) Validate() error { + if len(s.Rules) > 0 && s.Default == "" { + return errors.New("can't set severity rule option: no default severity defined") + } + + for i, rule := range s.Rules { + if err := rule.Validate(); err != nil { + return fmt.Errorf("error in severity rule #%d: %w", i, err) + } + } + + return nil +} + type SeverityRule struct { BaseRule `mapstructure:",squash"` Severity string } func (s *SeverityRule) Validate() error { + if s.Severity == "" { + return errors.New("severity should be set") + } + return s.BaseRule.Validate(severityRuleMinConditionsCount) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go index 536f903614..83331dbe7b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go @@ -1,14 +1,14 @@ package exitcodes const ( - Success = 0 - IssuesFound = 1 - WarningInTest = 2 - Failure = 3 - Timeout = 4 - NoGoFiles = 5 - NoConfigFileDetected = 6 - ErrorWasLogged = 7 + Success = iota + IssuesFound + WarningInTest + Failure + Timeout + NoGoFiles + NoConfigFileDetected + ErrorWasLogged ) type ExitError struct { @@ -30,5 +30,3 @@ var ( Code: Failure, } ) - -// 1 diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go index 2b17a03986..e8e5ba19b7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go @@ -2,11 +2,9 @@ package fsutils import ( "fmt" - "io/ioutil" + "os" "sync" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/logutils" ) @@ -24,9 +22,9 @@ func (fc *FileCache) GetFileBytes(filePath string) ([]byte, error) { return cachedBytes.([]byte), nil } - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) if err != nil { - return nil, errors.Wrapf(err, "can't read file %s", filePath) + return nil, fmt.Errorf("can't read file %s: %w", filePath, err) } fc.files.Store(filePath, fileBytes) @@ -56,7 +54,7 @@ func PrettifyBytesCount(n int64) string { func (fc *FileCache) PrintStats(log logutils.Log) { var size int64 var mapLen int - fc.files.Range(func(_, fileBytes interface{}) bool { + fc.files.Range(func(_, fileBytes any) bool { mapLen++ size += int64(len(fileBytes.([]byte))) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/files.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/files.go new file mode 100644 index 0000000000..4398ab9fc1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/files.go @@ -0,0 +1,33 @@ +package fsutils + +import "path/filepath" + +// Files combines different operations related to handling file paths and content. +type Files struct { + *LineCache + pathPrefix string +} + +func NewFiles(lc *LineCache, pathPrefix string) *Files { + return &Files{ + LineCache: lc, + pathPrefix: pathPrefix, + } +} + +// WithPathPrefix takes a path that is relative to the current directory (as used in issues) +// and adds the configured path prefix, if there is one. +// The resulting path then can be shown to the user or compared against paths specified in the configuration. +func (f *Files) WithPathPrefix(relativePath string) string { + return WithPathPrefix(f.pathPrefix, relativePath) +} + +// WithPathPrefix takes a path that is relative to the current directory (as used in issues) +// and adds the configured path prefix, if there is one. +// The resulting path then can be shown to the user or compared against paths specified in the configuration. +func WithPathPrefix(pathPrefix, relativePath string) string { + if pathPrefix == "" { + return relativePath + } + return filepath.Join(pathPrefix, relativePath) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go index a39c105e43..80bb9c5b44 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -12,10 +12,12 @@ func IsDir(filename string) bool { return err == nil && fi.IsDir() } -var cachedWd string -var cachedWdError error -var getWdOnce sync.Once -var useCache = true +var ( + cachedWd string + cachedWdError error + getWdOnce sync.Once + useCache = true +) func UseWdCache(use bool) { useCache = use @@ -34,7 +36,7 @@ func Getwd() (string, error) { evaledWd, err := EvalSymlinks(cachedWd) if err != nil { - cachedWd, cachedWdError = "", fmt.Errorf("can't eval symlinks on wd %s: %s", cachedWd, err) + cachedWd, cachedWdError = "", fmt.Errorf("can't eval symlinks on wd %s: %w", cachedWd, err) return } @@ -70,13 +72,13 @@ func ShortestRelPath(path, wd string) (string, error) { var err error wd, err = Getwd() if err != nil { - return "", fmt.Errorf("can't get working directory: %s", err) + return "", fmt.Errorf("can't get working directory: %w", err) } } evaledPath, err := EvalSymlinks(path) if err != nil { - return "", fmt.Errorf("can't eval symlinks for path %s: %s", path, err) + return "", fmt.Errorf("can't eval symlinks for path %s: %w", path, err) } path = evaledPath @@ -92,7 +94,7 @@ func ShortestRelPath(path, wd string) (string, error) { relPath, err := filepath.Rel(wd, absPath) if err != nil { - return "", fmt.Errorf("can't get relative path for path %s and root %s: %s", + return "", fmt.Errorf("can't get relative path for path %s and root %s: %w", absPath, wd, err) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go index ab408e7d54..2e92264846 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go @@ -4,8 +4,6 @@ import ( "bytes" "fmt" "sync" - - "github.com/pkg/errors" ) type fileLinesCache [][]byte @@ -21,7 +19,7 @@ func NewLineCache(fc *FileCache) *LineCache { } } -// GetLine returns a index1-th (1-based index) line from the file on filePath +// GetLine returns the index1-th (1-based index) line from the file on filePath func (lc *LineCache) GetLine(filePath string, index1 int) (string, error) { if index1 == 0 { // some linters, e.g. gosec can do it: it really means first line index1 = 1 @@ -39,7 +37,7 @@ func (lc *LineCache) GetLine(filePath string, index1 int) (string, error) { func (lc *LineCache) getRawLine(filePath string, index0 int) ([]byte, error) { fc, err := lc.getFileCache(filePath) if err != nil { - return nil, errors.Wrapf(err, "failed to get file %s lines cache", filePath) + return nil, fmt.Errorf("failed to get file %s lines cache: %w", filePath, err) } if index0 < 0 { @@ -61,7 +59,7 @@ func (lc *LineCache) getFileCache(filePath string) (fileLinesCache, error) { fileBytes, err := lc.fileCache.GetFileBytes(filePath) if err != nil { - return nil, errors.Wrapf(err, "can't get file %s bytes from cache", filePath) + return nil, fmt.Errorf("can't get file %s bytes from cache: %w", filePath, err) } fc := bytes.Split(fileBytes, []byte("\n")) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_unix.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_unix.go new file mode 100644 index 0000000000..2a171ecc0c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_unix.go @@ -0,0 +1,8 @@ +//go:build !windows + +package fsutils + +// NormalizePathInRegex it's a noop function on Unix. +func NormalizePathInRegex(path string) string { + return path +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_windows.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_windows.go new file mode 100644 index 0000000000..650aae1e16 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_windows.go @@ -0,0 +1,28 @@ +//go:build windows + +package fsutils + +import ( + "path/filepath" + "regexp" + "strings" +) + +var separatorToReplace = regexp.QuoteMeta(string(filepath.Separator)) + +// NormalizePathInRegex normalizes path in regular expressions. +// noop on Unix. +// This replacing should be safe because "/" are disallowed in Windows +// https://docs.microsoft.com/windows/win32/fileio/naming-a-file +func NormalizePathInRegex(path string) string { + // remove redundant character escape "\/" https://github.com/golangci/golangci-lint/issues/3277 + clean := regexp.MustCompile(`\\+/`). + ReplaceAllStringFunc(path, func(s string) string { + if strings.Count(s, "\\")%2 == 0 { + return s + } + return s[1:] + }) + + return strings.ReplaceAll(clean, "/", separatorToReplace) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint.go new file mode 100644 index 0000000000..67dde79918 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/alingse/asasalint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewAsasalint(setting *config.AsasalintSettings) *goanalysis.Linter { + cfg := asasalint.LinterSetting{} + if setting != nil { + cfg.Exclude = setting.Exclude + cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions + cfg.IgnoreTest = setting.IgnoreTest + } + + a, err := asasalint.NewAnalyzer(cfg) + if err != nil { + linterLogger.Fatalf("asasalint: create analyzer: %v", err) + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go index 1bf8c7b7da..7e7ee05e5c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go @@ -8,12 +8,12 @@ import ( ) func NewAsciicheck() *goanalysis.Linter { + a := asciicheck.NewAnalyzer() + return goanalysis.NewLinter( - "asciicheck", - "Simple linter to check that your code does not contain non-ASCII identifiers", - []*analysis.Analyzer{ - asciicheck.NewAnalyzer(), - }, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk.go new file mode 100644 index 0000000000..ef266f55ca --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk.go @@ -0,0 +1,59 @@ +package golinters + +import ( + "strings" + + "github.com/breml/bidichk/pkg/bidichk" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewBiDiChkFuncName(cfg *config.BiDiChkSettings) *goanalysis.Linter { + a := bidichk.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if cfg != nil { + var opts []string + + if cfg.LeftToRightEmbedding { + opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING") + } + if cfg.RightToLeftEmbedding { + opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING") + } + if cfg.PopDirectionalFormatting { + opts = append(opts, "POP-DIRECTIONAL-FORMATTING") + } + if cfg.LeftToRightOverride { + opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE") + } + if cfg.RightToLeftOverride { + opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE") + } + if cfg.LeftToRightIsolate { + opts = append(opts, "LEFT-TO-RIGHT-ISOLATE") + } + if cfg.RightToLeftIsolate { + opts = append(opts, "RIGHT-TO-LEFT-ISOLATE") + } + if cfg.FirstStrongIsolate { + opts = append(opts, "FIRST-STRONG-ISOLATE") + } + if cfg.PopDirectionalIsolate { + opts = append(opts, "POP-DIRECTIONAL-ISOLATE") + } + + cfgMap[a.Name] = map[string]any{ + "disallowed-runes": strings.Join(opts, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + "Checks for dangerous unicode character sequences", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go index 0e03813d1a..e56bd83f28 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go @@ -8,14 +8,10 @@ import ( ) func NewBodyclose() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - bodyclose.Analyzer, - } - return goanalysis.NewLinter( "bodyclose", "checks whether HTTP response body is closed successfully", - analyzers, + []*analysis.Analyzer{bodyclose.Analyzer}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/commons.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/commons.go new file mode 100644 index 0000000000..3b40e59bfe --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/commons.go @@ -0,0 +1,6 @@ +package golinters + +import "github.com/golangci/golangci-lint/pkg/logutils" + +// linterLogger must be use only when the context logger is not available. +var linterLogger = logutils.NewStderrLog(logutils.DebugKeyLinter) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/containedctx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/containedctx.go new file mode 100644 index 0000000000..8f7859af7d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/containedctx.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/sivchari/containedctx" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewContainedCtx() *goanalysis.Linter { + a := containedctx.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/contextcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/contextcheck.go new file mode 100644 index 0000000000..f54192a189 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/contextcheck.go @@ -0,0 +1,22 @@ +package golinters + +import ( + "github.com/kkHAIKE/contextcheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +func NewContextCheck() *goanalysis.Linter { + analyzer := contextcheck.NewAnalyzer(contextcheck.Configuration{}) + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = contextcheck.NewRun(lintCtx.Packages, false) + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/copyloopvar.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/copyloopvar.go new file mode 100644 index 0000000000..02ce5f37a6 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/copyloopvar.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/karamaru-alpha/copyloopvar" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewCopyLoopVar(settings *config.CopyLoopVarSettings) *goanalysis.Linter { + a := copyloopvar.NewAnalyzer() + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + "ignore-alias": settings.IgnoreAlias, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go index 6f55b27975..f76c55552c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go @@ -8,14 +8,12 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -const cyclopName = "cyclop" - func NewCyclop(settings *config.Cyclop) *goanalysis.Linter { a := analyzer.NewAnalyzer() - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - d := map[string]interface{}{ + d := map[string]any{ "skipTests": settings.SkipTests, } @@ -27,11 +25,11 @@ func NewCyclop(settings *config.Cyclop) *goanalysis.Linter { d["packageAverage"] = settings.PackageAverage } - cfg = map[string]map[string]interface{}{a.Name: d} + cfg = map[string]map[string]any{a.Name: d} } return goanalysis.NewLinter( - cyclopName, + a.Name, "checks function and package cyclomatic complexity", []*analysis.Analyzer{a}, cfg, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go deleted file mode 100644 index 6ff38909fb..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go +++ /dev/null @@ -1,52 +0,0 @@ -package golinters - -import ( - "fmt" - "sync" - - deadcodeAPI "github.com/golangci/go-misc/deadcode" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func NewDeadcode() *goanalysis.Linter { - const linterName = "deadcode" - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) - issues, err := deadcodeAPI.Run(prog) - if err != nil { - return nil, err - } - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("%s is unused", formatCode(i.UnusedIdentName, nil)), - FromLinter: linterName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - }, - } - return goanalysis.NewLinter( - linterName, - "Finds unused code", - []*analysis.Analyzer{analyzer}, - nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/decorder.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/decorder.go new file mode 100644 index 0000000000..5202a03a4f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/decorder.go @@ -0,0 +1,44 @@ +package golinters + +import ( + "strings" + + "gitlab.com/bosi/decorder" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewDecorder(settings *config.DecorderSettings) *goanalysis.Linter { + a := decorder.Analyzer + + // disable all rules/checks by default + cfg := map[string]any{ + "ignore-underscore-vars": false, + "disable-dec-num-check": true, + "disable-type-dec-num-check": false, + "disable-const-dec-num-check": false, + "disable-var-dec-num-check": false, + "disable-dec-order-check": true, + "disable-init-func-first-check": true, + } + + if settings != nil { + cfg["dec-order"] = strings.Join(settings.DecOrder, ",") + cfg["ignore-underscore-vars"] = settings.IgnoreUnderscoreVars + cfg["disable-dec-num-check"] = settings.DisableDecNumCheck + cfg["disable-type-dec-num-check"] = settings.DisableTypeDecNumCheck + cfg["disable-const-dec-num-check"] = settings.DisableConstDecNumCheck + cfg["disable-var-dec-num-check"] = settings.DisableVarDecNumCheck + cfg["disable-dec-order-check"] = settings.DisableDecOrderCheck + cfg["disable-init-func-first-check"] = settings.DisableInitFuncFirstCheck + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + map[string]map[string]any{a.Name: cfg}, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go index aa372e9568..49e471df82 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go @@ -1,112 +1,50 @@ package golinters import ( - "fmt" - "strings" - "sync" - - "github.com/OpenPeeDeeP/depguard" + "github.com/OpenPeeDeeP/depguard/v2" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/loader" //nolint:staticcheck // require changes in github.com/OpenPeeDeeP/depguard + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -func setDepguardListType(dg *depguard.Depguard, lintCtx *linter.Context) error { - listType := lintCtx.Settings().Depguard.ListType - var found bool - dg.ListType, found = depguard.StringToListType[strings.ToLower(listType)] - if !found { - if listType != "" { - return fmt.Errorf("unsure what list type %s is", listType) - } - dg.ListType = depguard.LTBlacklist - } - - return nil -} +func NewDepguard(settings *config.DepGuardSettings) *goanalysis.Linter { + conf := depguard.LinterSettings{} -func setupDepguardPackages(dg *depguard.Depguard, lintCtx *linter.Context) { - if dg.ListType == depguard.LTBlacklist { - // if the list type was a blacklist the packages with error messages should - // be included in the blacklist package list + if settings != nil { + for s, rule := range settings.Rules { + list := &depguard.List{ + ListMode: rule.ListMode, + Files: rule.Files, + Allow: rule.Allow, + } - noMessagePackages := make(map[string]bool) - for _, pkg := range dg.Packages { - noMessagePackages[pkg] = true - } + // because of bug with Viper parsing (split on dot) we use a list of struct instead of a map. + // https://github.com/spf13/viper/issues/324 + // https://github.com/golangci/golangci-lint/issues/3749#issuecomment-1492536630 - for pkg := range lintCtx.Settings().Depguard.PackagesWithErrorMessage { - if _, ok := noMessagePackages[pkg]; !ok { - dg.Packages = append(dg.Packages, pkg) + deny := map[string]string{} + for _, r := range rule.Deny { + deny[r.Pkg] = r.Desc } + list.Deny = deny + + conf[s] = list } } -} -func NewDepguard() *goanalysis.Linter { - const linterName = "depguard" - var mu sync.Mutex - var resIssues []goanalysis.Issue + a := depguard.NewUncompiledAnalyzer(&conf) - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } return goanalysis.NewLinter( - linterName, - "Go linter that checks if package imports are in a list of acceptable packages", - []*analysis.Analyzer{analyzer}, + a.Analyzer.Name, + a.Analyzer.Doc, + []*analysis.Analyzer{a.Analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - dgSettings := &lintCtx.Settings().Depguard - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) - dg := &depguard.Depguard{ - Packages: dgSettings.Packages, - IncludeGoRoot: dgSettings.IncludeGoRoot, - } - if err := setDepguardListType(dg, lintCtx); err != nil { - return nil, err - } - setupDepguardPackages(dg, lintCtx) - - loadConfig := &loader.Config{ - Cwd: "", // fallbacked to os.Getcwd - Build: nil, // fallbacked to build.Default - } - issues, err := dg.Run(loadConfig, prog) - if err != nil { - return nil, err - } - if len(issues) == 0 { - return nil, nil - } - msgSuffix := "is in the blacklist" - if dg.ListType == depguard.LTWhitelist { - msgSuffix = "is not in the whitelist" - } - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - userSuppliedMsgSuffix := dgSettings.PackagesWithErrorMessage[i.PackageName] - if userSuppliedMsgSuffix != "" { - userSuppliedMsgSuffix = ": " + userSuppliedMsgSuffix - } - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: i.Position, - Text: fmt.Sprintf("%s %s%s", formatCode(i.PackageName, lintCtx.Cfg), msgSuffix, userSuppliedMsgSuffix), - FromLinter: linterName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil + err := a.Compile() + if err != nil { + lintCtx.Log.Errorf("create analyzer: %v", err) } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go index 8978ff913d..de567ce7cf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go @@ -8,51 +8,64 @@ import ( "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const dogsledLinterName = "dogsled" +const dogsledName = "dogsled" -func NewDogsled() *goanalysis.Linter { +func NewDogsled(settings *config.DogsledSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: dogsledLinterName, + Name: dogsledName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - dogsledLinterName, - "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var pkgIssues []goanalysis.Issue - for _, f := range pass.Files { - v := returnsVisitor{ - maxBlanks: lintCtx.Settings().Dogsled.MaxBlankIdentifiers, - f: pass.Fset, - } - ast.Walk(&v, f) - for i := range v.issues { - pkgIssues = append(pkgIssues, goanalysis.NewIssue(&v.issues[i], pass)) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runDogsled(pass, settings) + + if len(issues) == 0 { + return nil, nil } mu.Lock() - resIssues = append(resIssues, pkgIssues...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + dogsledName, + "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } +func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue { + var reports []goanalysis.Issue + for _, f := range pass.Files { + v := &returnsVisitor{ + maxBlanks: settings.MaxBlankIdentifiers, + f: pass.Fset, + } + + ast.Walk(v, f) + + for i := range v.issues { + reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass)) + } + } + + return reports +} + type returnsVisitor struct { f *token.FileSet maxBlanks int @@ -87,7 +100,7 @@ func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { if numBlank > v.maxBlanks { v.issues = append(v.issues, result.Issue{ - FromLinter: dogsledLinterName, + FromLinter: dogsledName, Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), Pos: v.f.Position(assgnStmt.Pos()), }) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go index ed1c4fcbdc..eec51eabab 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go @@ -6,39 +6,26 @@ import ( "sync" duplAPI "github.com/golangci/dupl" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const duplLinterName = "dupl" +const duplName = "dupl" -func NewDupl() *goanalysis.Linter { +func NewDupl(settings *config.DuplSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: duplLinterName, + Name: duplName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - duplLinterName, - "Tool for code clone detection", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - issues, err := duplAPI.Run(fileNames, lintCtx.Settings().Dupl.Threshold) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runDupl(pass, settings) if err != nil { return nil, err } @@ -47,37 +34,62 @@ func NewDupl() *goanalysis.Linter { return nil, nil } - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") - if err != nil { - return nil, errors.Wrapf(err, "failed to get shortest rel path for %q", i.To.Filename()) - } - dupl := fmt.Sprintf("%s:%d-%d", toFilename, i.To.LineStart(), i.To.LineEnd()) - text := fmt.Sprintf("%d-%d lines are duplicate of %s", - i.From.LineStart(), i.From.LineEnd(), - formatCode(dupl, lintCtx.Cfg)) - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.From.Filename(), - Line: i.From.LineStart(), - }, - LineRange: &result.Range{ - From: i.From.LineStart(), - To: i.From.LineEnd(), - }, - Text: text, - FromLinter: duplLinterName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + duplName, + "Tool for code clone detection", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + issues, err := duplAPI.Run(fileNames, settings.Threshold) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, 0, len(issues)) + + for _, i := range issues { + toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") + if err != nil { + return nil, fmt.Errorf("failed to get shortest rel path for %q: %w", i.To.Filename(), err) + } + + dupl := fmt.Sprintf("%s:%d-%d", toFilename, i.To.LineStart(), i.To.LineEnd()) + text := fmt.Sprintf("%d-%d lines are duplicate of %s", + i.From.LineStart(), i.From.LineEnd(), + formatCode(dupl, nil)) + + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.From.Filename(), + Line: i.From.LineStart(), + }, + LineRange: &result.Range{ + From: i.From.LineStart(), + To: i.From.LineEnd(), + }, + Text: text, + FromLinter: duplName, + }, pass)) + } + + return res, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword.go new file mode 100644 index 0000000000..6f079ffc89 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "strings" + + "github.com/Abirdcfly/dupword" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewDupWord(setting *config.DupWordSettings) *goanalysis.Linter { + a := dupword.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if setting != nil { + cfgMap[a.Name] = map[string]any{ + "keyword": strings.Join(setting.Keywords, ","), + "ignore": strings.Join(setting.Ignore, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + "checks for duplicate words in the source code", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go index 9c452af50e..880de5d420 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go @@ -10,6 +10,10 @@ import ( func NewDurationCheck() *goanalysis.Linter { a := durationcheck.Analyzer - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). - WithLoadMode(goanalysis.LoadModeTypesInfo) + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go index 9aac7326a0..89b18519c9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go @@ -11,7 +11,6 @@ import ( "sync" "github.com/kisielk/errcheck/errcheck" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -22,26 +21,27 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -func NewErrcheck() *goanalysis.Linter { - const linterName = "errcheck" +const errcheckName = "errcheck" +func NewErrcheck(settings *config.ErrcheckSettings) *goanalysis.Linter { var mu sync.Mutex - var res []goanalysis.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: errcheckName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } return goanalysis.NewLinter( - linterName, - "Errcheck is a program for checking for unchecked errors "+ - "in go programs. These unchecked errors can be critical bugs in some cases", + errcheckName, + "errcheck is a program for checking for unchecked errors in Go code. "+ + "These unchecked errors can be critical bugs in some cases", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { // copied from errcheck - checker, err := getChecker(&lintCtx.Settings().Errcheck) + checker, err := getChecker(settings) if err != nil { lintCtx.Log.Errorf("failed to get checker: %v", err) return @@ -49,52 +49,67 @@ func NewErrcheck() *goanalysis.Linter { checker.Tags = lintCtx.Cfg.Run.BuildTags - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - pkg := &packages.Package{ - Fset: pass.Fset, - Syntax: pass.Files, - Types: pass.Pkg, - TypesInfo: pass.TypesInfo, + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues := runErrCheck(lintCtx, pass, checker) + if err != nil { + return nil, err } - errcheckIssues := checker.CheckPackage(pkg).Unique() - if len(errcheckIssues.UncheckedErrors) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, len(errcheckIssues.UncheckedErrors)) - for i, err := range errcheckIssues.UncheckedErrors { - var text string - if err.FuncName != "" { - text = fmt.Sprintf( - "Error return value of %s is not checked", - formatCode(err.SelectorName, lintCtx.Cfg), - ) - } else { - text = "Error return value is not checked" - } - - issues[i] = goanalysis.NewIssue( - &result.Issue{ - FromLinter: linterName, - Text: text, - Pos: err.Pos, - }, - pass, - ) - } - mu.Lock() - res = append(res, issues...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil } }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res + return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } +func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck.Checker) []goanalysis.Issue { + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + lintIssues := checker.CheckPackage(pkg).Unique() + if len(lintIssues.UncheckedErrors) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues.UncheckedErrors)) + + for i, err := range lintIssues.UncheckedErrors { + text := "Error return value is not checked" + + if err.FuncName != "" { + code := err.SelectorName + if err.SelectorName == "" { + code = err.FuncName + } + + text = fmt.Sprintf("Error return value of %s is not checked", formatCode(code, lintCtx.Cfg)) + } + + issues[i] = goanalysis.NewIssue( + &result.Issue{ + FromLinter: errcheckName, + Text: text, + Pos: err.Pos, + }, + pass, + ) + } + + return issues +} + // parseIgnoreConfig was taken from errcheck in order to keep the API identical. // https://github.com/kisielk/errcheck/blob/1787c4bee836470bf45018cfbc783650db3c6501/main.go#L25-L60 func parseIgnoreConfig(s string) (map[string]*regexp.Regexp, error) { @@ -127,7 +142,7 @@ func parseIgnoreConfig(s string) (map[string]*regexp.Regexp, error) { func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { ignoreConfig, err := parseIgnoreConfig(errCfg.Ignore) if err != nil { - return nil, errors.Wrap(err, "failed to parse 'ignore' directive") + return nil, fmt.Errorf("failed to parse 'ignore' directive: %w", err) } checker := errcheck.Checker{ @@ -135,10 +150,13 @@ func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { BlankAssignments: !errCfg.CheckAssignToBlank, TypeAssertions: !errCfg.CheckTypeAssertions, SymbolRegexpsByPackage: map[string]*regexp.Regexp{}, - Symbols: append([]string{}, errcheck.DefaultExcludedSymbols...), }, } + if !errCfg.DisableDefaultExclusions { + checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, errcheck.DefaultExcludedSymbols...) + } + for pkg, re := range ignoreConfig { checker.Exclusions.SymbolRegexpsByPackage[pkg] = re } @@ -152,6 +170,8 @@ func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, exclude...) } + checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, errCfg.ExcludeFunctions...) + return &checker, nil } @@ -231,7 +251,7 @@ func readExcludeFile(name string) ([]string, error) { } if fh == nil { - return nil, errors.Wrapf(err, "failed reading exclude file: %s", name) + return nil, fmt.Errorf("failed reading exclude file: %s: %w", name, err) } scanner := bufio.NewScanner(fh) @@ -242,7 +262,7 @@ func readExcludeFile(name string) ([]string, error) { } if err := scanner.Err(); err != nil { - return nil, errors.Wrapf(err, "failed scanning file: %s", name) + return nil, fmt.Errorf("failed scanning file: %s: %w", name, err) } return excludes, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson.go new file mode 100644 index 0000000000..1af4450b4e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "github.com/breml/errchkjson" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewErrChkJSONFuncName(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { + a := errchkjson.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + cfgMap[a.Name] = map[string]any{ + "omit-safe": true, + } + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + "omit-safe": !cfg.CheckErrorFreeEncoding, + "report-no-exported": cfg.ReportNoExported, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go new file mode 100644 index 0000000000..193a7aba75 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/Antonboom/errname/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewErrName() *goanalysis.Linter { + a := analyzer.New() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go index dd9d901617..cac94159d6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go @@ -11,13 +11,14 @@ import ( func NewErrorLint(cfg *config.ErrorLintSettings) *goanalysis.Linter { a := errorlint.NewAnalyzer() - cfgMap := map[string]map[string]interface{}{} + cfgMap := map[string]map[string]any{} if cfg != nil { - cfgMap[a.Name] = map[string]interface{}{ - "errorf": cfg.Errorf, - "asserts": cfg.Asserts, - "comparison": cfg.Comparison, + cfgMap[a.Name] = map[string]any{ + "errorf": cfg.Errorf, + "errorf-multi": cfg.ErrorfMulti, + "asserts": cfg.Asserts, + "comparison": cfg.Comparison, } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery.go new file mode 100644 index 0000000000..9911d315e0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/lufeee/execinquery" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExecInQuery() *goanalysis.Linter { + a := execinquery.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go index 85534d42cd..fe58e10f05 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go @@ -11,16 +11,27 @@ import ( func NewExhaustive(settings *config.ExhaustiveSettings) *goanalysis.Linter { a := exhaustive.Analyzer - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - cfg = map[string]map[string]interface{}{ + cfg = map[string]map[string]any{ a.Name: { + exhaustive.CheckFlag: settings.Check, exhaustive.CheckGeneratedFlag: settings.CheckGenerated, exhaustive.DefaultSignifiesExhaustiveFlag: settings.DefaultSignifiesExhaustive, + exhaustive.IgnoreEnumMembersFlag: settings.IgnoreEnumMembers, + exhaustive.IgnoreEnumTypesFlag: settings.IgnoreEnumTypes, + exhaustive.PackageScopeOnlyFlag: settings.PackageScopeOnly, + exhaustive.ExplicitExhaustiveMapFlag: settings.ExplicitExhaustiveMap, + exhaustive.ExplicitExhaustiveSwitchFlag: settings.ExplicitExhaustiveSwitch, + exhaustive.DefaultCaseRequiredFlag: settings.DefaultCaseRequired, }, } } - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). - WithLoadMode(goanalysis.LoadModeTypesInfo) + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go deleted file mode 100644 index 6a1dbd71c5..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go +++ /dev/null @@ -1,31 +0,0 @@ -package golinters - -import ( - "strings" - - "github.com/mbilski/exhaustivestruct/pkg/analyzer" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" -) - -func NewExhaustiveStruct(settings *config.ExhaustiveStructSettings) *goanalysis.Linter { - a := analyzer.Analyzer - - var cfg map[string]map[string]interface{} - if settings != nil { - cfg = map[string]map[string]interface{}{ - a.Name: { - "struct_patterns": strings.Join(settings.StructPatterns, ","), - }, - } - } - - return goanalysis.NewLinter( - a.Name, - a.Doc, - []*analysis.Analyzer{a}, - cfg, - ).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustruct.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustruct.go new file mode 100644 index 0000000000..5272879e1d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustruct.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExhaustruct(settings *config.ExhaustructSettings) *goanalysis.Linter { + var include, exclude []string + if settings != nil { + include = settings.Include + exclude = settings.Exclude + } + + a, err := analyzer.NewAnalyzer(include, exclude) + if err != nil { + linterLogger.Fatalf("exhaustruct configuration: %v", err) + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go index 2fa9d51833..797484ba2c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go @@ -1,70 +1,103 @@ package golinters import ( + "fmt" "sync" "github.com/ashanbrown/forbidigo/forbidigo" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -func NewForbidigo() *goanalysis.Linter { - const linterName = "forbidigo" +const forbidigoName = "forbidigo" + +func NewForbidigo(settings *config.ForbidigoSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: forbidigoName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Forbids identifiers", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - s := &lintCtx.Settings().Forbidigo - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - options := []forbidigo.Option{ - forbidigo.OptionExcludeGodocExamples(s.ExcludeGodocExamples), - // disable "//permit" directives so only "//nolint" directives matters within golangci lint - forbidigo.OptionIgnorePermitDirectives(true), - } - forbid, err := forbidigo.NewLinter(s.Forbid, options...) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runForbidigo(pass, settings) if err != nil { - return nil, errors.Wrapf(err, "failed to create linter %q", linterName) + return nil, err } - for _, file := range pass.Files { - hints, err := forbid.Run(pass.Fset, file) - if err != nil { - return nil, errors.Wrapf(err, "forbidigo linter failed on file %q", file.Name.String()) - } - for _, hint := range hints { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) - } - } - - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + // Without AnalyzeTypes, LoadModeSyntax is enough. + // But we cannot make this depend on the settings and have to mirror the mode chosen in GetAllSupportedLinterConfigs, + // therefore we have to use LoadModeTypesInfo in all cases. + return goanalysis.NewLinter( + forbidigoName, + "Forbids identifiers", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) { + options := []forbidigo.Option{ + forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples), + // disable "//permit" directives so only "//nolint" directives matters within golangci-lint + forbidigo.OptionIgnorePermitDirectives(true), + forbidigo.OptionAnalyzeTypes(settings.AnalyzeTypes), + } + + // Convert patterns back to strings because that is what NewLinter accepts. + var patterns []string + for _, pattern := range settings.Forbid { + buffer, err := pattern.MarshalString() + if err != nil { + return nil, err + } + patterns = append(patterns, string(buffer)) + } + + forbid, err := forbidigo.NewLinter(patterns, options...) + if err != nil { + return nil, fmt.Errorf("failed to create linter %q: %w", forbidigoName, err) + } + + var issues []goanalysis.Issue + for _, file := range pass.Files { + runConfig := forbidigo.RunConfig{ + Fset: pass.Fset, + DebugLog: logutils.Debug(logutils.DebugKeyForbidigo), + } + if settings != nil && settings.AnalyzeTypes { + runConfig.TypesInfo = pass.TypesInfo + } + hints, err := forbid.RunWithConfig(runConfig, file) + if err != nil { + return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) + } + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: forbidigoName, + }, pass)) + } + } + + return issues, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go index 29cb6b7ef7..d421e174b7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go @@ -8,57 +8,68 @@ import ( "github.com/ultraware/funlen" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const funlenLinterName = "funlen" +const funlenName = "funlen" -func NewFunlen() *goanalysis.Linter { +func NewFunlen(settings *config.FunlenSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: funlenLinterName, + Name: funlenName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - funlenLinterName, - "Tool for detection of long functions", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []funlen.Message - for _, file := range pass.Files { - fileIssues := funlen.Run(file, pass.Fset, lintCtx.Settings().Funlen.Lines, lintCtx.Settings().Funlen.Statements) - issues = append(issues, fileIssues...) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runFunlen(pass, settings) if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - res[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: funlenLinterName, - }, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + funlenName, + "Tool for detection of long functions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue { + var lintIssues []funlen.Message + for _, file := range pass.Files { + fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments) + lintIssues = append(lintIssues, fileIssues...) + } + + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issues[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: funlenName, + }, pass) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go index 6fa43544e6..3862267692 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go @@ -1,79 +1,69 @@ package golinters import ( - "bytes" "fmt" "sync" + gcicfg "github.com/daixiang0/gci/pkg/config" "github.com/daixiang0/gci/pkg/gci" - "github.com/pkg/errors" - "github.com/shazow/go-diff/difflib" + "github.com/daixiang0/gci/pkg/io" + "github.com/daixiang0/gci/pkg/log" + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const gciName = "gci" -func NewGci() *goanalysis.Linter { +func NewGci(settings *config.GciSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - differ := difflib.New() analyzer := &analysis.Analyzer{ Name: gciName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + + var cfg *gcicfg.Config + if settings != nil { + rawCfg := gcicfg.YamlConfig{ + Cfg: gcicfg.BoolConfig{ + SkipGenerated: settings.SkipGenerated, + CustomOrder: settings.CustomOrder, + }, + SectionStrings: settings.Sections, + } + + if settings.LocalPrefixes != "" { + prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)} + rawCfg.SectionStrings = prefix + } + + var err error + cfg, err = rawCfg.Parse() + if err != nil { + linterLogger.Fatalf("gci: configuration parsing: %v", err) + } + } + + var lock sync.Mutex + return goanalysis.NewLinter( gciName, - "Gci control golang package import order and make it always deterministic.", + "Gci controls Go package import order and makes it always deterministic.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - localFlag := lintCtx.Settings().Gci.LocalPrefixes - goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes - if localFlag == "" && goimportsFlag != "" { - localFlag = goimportsFlag - } - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - source, result, err := gci.Run(f, &gci.FlagSet{LocalFlag: localFlag}) - if err != nil { - return nil, err - } - if result == nil { - continue - } - - diff := bytes.Buffer{} - _, err = diff.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) - if err != nil { - return nil, fmt.Errorf("can't write diff header: %v", err) - } - - err = differ.Diff(&diff, bytes.NewReader(source), bytes.NewReader(result)) - if err != nil { - return nil, fmt.Errorf("can't get gci diff output: %v", err) - } - - is, err := extractIssuesFromPatch(diff.String(), lintCtx.Log, lintCtx, gciName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gci diff output %q", diff.String()) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGci(pass, lintCtx, cfg, &lock) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -90,3 +80,78 @@ func NewGci() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var diffs []string + err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock) + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + + for _, diff := range diffs { + if diff == "" { + continue + } + + is, err := extractIssuesFromPatch(diff, lintCtx, gciName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} + +// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator. +// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI. +// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75 +// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80 +func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error { + log.InitLogger() + defer func() { _ = log.L().Sync() }() + + return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + lock.Lock() + *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) + lock.Unlock() + return nil + }) +} + +func getErrorTextForGci(settings config.GciSettings) string { + text := "File is not `gci`-ed" + + hasOptions := settings.SkipGenerated || len(settings.Sections) > 0 + if !hasOptions { + return text + } + + text += " with" + + if settings.SkipGenerated { + text += " --skip-generated" + } + + if len(settings.Sections) > 0 { + for _, section := range settings.Sections { + text += " -s " + section + } + } + + if settings.CustomOrder { + text += " --custom-order" + } + + return text +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter.go new file mode 100644 index 0000000000..3d20a9f49e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter.go @@ -0,0 +1,39 @@ +package golinters + +import ( + "github.com/nunnatsa/ginkgolinter" + "github.com/nunnatsa/ginkgolinter/types" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGinkgoLinter(settings *config.GinkgoLinterSettings) *goanalysis.Linter { + cfg := &types.Config{} + + if settings != nil { + cfg = &types.Config{ + SuppressLen: types.Boolean(settings.SuppressLenAssertion), + SuppressNil: types.Boolean(settings.SuppressNilAssertion), + SuppressErr: types.Boolean(settings.SuppressErrAssertion), + SuppressCompare: types.Boolean(settings.SuppressCompareAssertion), + SuppressAsync: types.Boolean(settings.SuppressAsyncAssertion), + ForbidFocus: types.Boolean(settings.ForbidFocusContainer), + SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning), + AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero), + ForceExpectTo: types.Boolean(settings.ForceExpectTo), + ValidateAsyncIntervals: types.Boolean(settings.ForbidSpecPollution), + ForbidSpecPollution: types.Boolean(settings.ValidateAsyncIntervals), + } + } + + a := ginkgolinter.NewAnalyzerWithConfig(cfg) + + return goanalysis.NewLinter( + a.Name, + "enforces standards of using ginkgo and gomega", + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go deleted file mode 100644 index b702d1660b..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go +++ /dev/null @@ -1,36 +0,0 @@ -package goanalysis - -import ( - "go/types" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/loader" //nolint:staticcheck // it's an adapter for golang.org/x/tools/go/packages -) - -func MakeFakeLoaderProgram(pass *analysis.Pass) *loader.Program { - prog := &loader.Program{ - Fset: pass.Fset, - Created: []*loader.PackageInfo{ - { - Pkg: pass.Pkg, - Importable: true, // not used - TransitivelyErrorFree: true, // TODO - - Files: pass.Files, - Errors: nil, - Info: *pass.TypesInfo, - }, - }, - AllPackages: map[*types.Package]*loader.PackageInfo{ - pass.Pkg: { - Pkg: pass.Pkg, - Importable: true, - TransitivelyErrorFree: true, - Files: pass.Files, - Errors: nil, - Info: *pass.TypesInfo, - }, - }, - } - return prog -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go index 13b9ccf0af..f59e02cc64 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go @@ -1,9 +1,9 @@ package goanalysis import ( + "errors" "fmt" - "github.com/pkg/errors" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/pkg/lint/linter" diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go index f331a3ab9f..15d8dd2b33 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go @@ -13,9 +13,9 @@ type Issue struct { Pass *analysis.Pass } -func NewIssue(i *result.Issue, pass *analysis.Pass) Issue { +func NewIssue(issue *result.Issue, pass *analysis.Pass) Issue { return Issue{ - Issue: *i, + Issue: *issue, Pass: pass, } } @@ -23,6 +23,7 @@ func NewIssue(i *result.Issue, pass *analysis.Pass) Issue { type EncodingIssue struct { FromLinter string Text string + Severity string Pos token.Position LineRange *result.Range Replacement *result.Replacement diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go index ef49e4284a..f8ca2e7553 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go @@ -2,11 +2,11 @@ package goanalysis import ( "context" + "errors" "flag" "fmt" "strings" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/lint/linter" @@ -44,14 +44,14 @@ const ( type Linter struct { name, desc string analyzers []*analysis.Analyzer - cfg map[string]map[string]interface{} + cfg map[string]map[string]any issuesReporter func(*linter.Context) []Issue contextSetter func(*linter.Context) loadMode LoadMode needUseOriginalPackages bool } -func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]interface{}) *Linter { +func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]any) *Linter { return &Linter{name: name, desc: desc, analyzers: analyzers, cfg: cfg} } @@ -102,13 +102,13 @@ func (lnt *Linter) allAnalyzerNames() []string { return ret } -func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]interface{}) error { +func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]any) error { for k, v := range cfg { f := a.Flags.Lookup(k) if f == nil { validFlagNames := allFlagNames(&a.Flags) if len(validFlagNames) == 0 { - return fmt.Errorf("analyzer doesn't have settings") + return errors.New("analyzer doesn't have settings") } return fmt.Errorf("analyzer doesn't have setting %q, valid settings: %v", @@ -116,7 +116,7 @@ func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]interf } if err := f.Value.Set(valueToString(v)); err != nil { - return errors.Wrapf(err, "failed to set analyzer setting %q with value %v", k, v) + return fmt.Errorf("failed to set analyzer setting %q with value %v: %w", k, v, err) } } @@ -137,7 +137,7 @@ func (lnt *Linter) configure() error { } if err := lnt.configureAnalyzer(a, analyzerSettings); err != nil { - return errors.Wrapf(err, "failed to configure analyzer %s", analyzerName) + return fmt.Errorf("failed to configure analyzer %s: %w", analyzerName, err) } } @@ -146,11 +146,11 @@ func (lnt *Linter) configure() error { func (lnt *Linter) preRun(lintCtx *linter.Context) error { if err := analysis.Validate(lnt.analyzers); err != nil { - return errors.Wrap(err, "failed to validate analyzers") + return fmt.Errorf("failed to validate analyzers: %w", err) } if err := lnt.configure(); err != nil { - return errors.Wrap(err, "failed to configure analyzers") + return fmt.Errorf("failed to configure analyzers: %w", err) } if lnt.contextSetter != nil { @@ -195,12 +195,12 @@ func allFlagNames(fs *flag.FlagSet) []string { return ret } -func valueToString(v interface{}) string { +func valueToString(v any) string { if ss, ok := v.([]string); ok { return strings.Join(ss, ",") } - if is, ok := v.([]interface{}); ok { + if is, ok := v.([]any); ok { var ss []string for _, i := range is { ss = append(ss, fmt.Sprint(i)) @@ -211,3 +211,7 @@ func valueToString(v interface{}) string { return fmt.Sprint(v) } + +func DummyRun(_ *analysis.Pass) (any, error) { + return nil, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go index 5c24d10964..333ab20f1f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go @@ -2,8 +2,8 @@ package goanalysis import ( "context" + "fmt" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/lint/linter" @@ -24,7 +24,7 @@ func NewMetaLinter(linters []*Linter) *MetaLinter { func (ml MetaLinter) Run(_ context.Context, lintCtx *linter.Context) ([]result.Issue, error) { for _, l := range ml.linters { if err := l.preRun(lintCtx); err != nil { - return nil, errors.Wrapf(err, "failed to pre-run %s", l.Name()) + return nil, fmt.Errorf("failed to pre-run %s: %w", l.Name(), err) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go index 8b460d16b4..b75e3fa0f5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go @@ -11,12 +11,13 @@ package goanalysis import ( "encoding/gob" + "fmt" "go/token" "runtime" "sort" "sync" - "github.com/pkg/errors" + "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -28,17 +29,17 @@ import ( ) var ( - debugf = logutils.Debug("goanalysis") + debugf = logutils.Debug(logutils.DebugKeyGoAnalysis) - analyzeDebugf = logutils.Debug("goanalysis/analyze") - isMemoryDebug = logutils.HaveDebugTag("goanalysis/memory") - issuesCacheDebugf = logutils.Debug("goanalysis/issues/cache") + analyzeDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisAnalyze) + isMemoryDebug = logutils.HaveDebugTag(logutils.DebugKeyGoAnalysisMemory) + issuesCacheDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisIssuesCache) - factsDebugf = logutils.Debug("goanalysis/facts") - factsCacheDebugf = logutils.Debug("goanalysis/facts/cache") - factsInheritDebugf = logutils.Debug("goanalysis/facts/inherit") - factsExportDebugf = logutils.Debug("goanalysis/facts") - isFactsExportDebug = logutils.HaveDebugTag("goanalysis/facts/export") + factsDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFacts) + factsCacheDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFactsCache) + factsInheritDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFactsInherit) + factsExportDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFacts) + isFactsExportDebug = logutils.HaveDebugTag(logutils.DebugKeyGoAnalysisFactsExport) ) type Diagnostic struct { @@ -60,7 +61,8 @@ type runner struct { } func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard, - loadMode LoadMode, sw *timeutils.Stopwatch) *runner { + loadMode LoadMode, sw *timeutils.Stopwatch, +) *runner { return &runner{ prefix: prefix, log: logger, @@ -79,7 +81,8 @@ func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loa // singlechecker and the multi-analysis commands. // It returns the appropriate exit code. func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, - []error, map[*analysis.Pass]*packages.Package) { + []error, map[*analysis.Pass]*packages.Package, +) { debugf("Analyzing %d packages on load mode %s", len(initialPackages), r.loadMode) defer r.pkgCache.Trim() @@ -115,7 +118,8 @@ func (r *runner) markAllActions(a *analysis.Analyzer, pkg *packages.Package, mar } func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, - initialPkgs map[*packages.Package]bool, actions map[actKey]*action, actAlloc *actionAllocator) *action { + initialPkgs map[*packages.Package]bool, actions map[actKey]*action, actAlloc *actionAllocator, +) *action { k := actKey{a, pkg} act, ok := actions[k] if ok { @@ -149,7 +153,8 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, } func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *packages.Package, - initialPkgs map[*packages.Package]bool, actions map[actKey]*action, actAlloc *actionAllocator) { + initialPkgs map[*packages.Package]bool, actions map[actKey]*action, actAlloc *actionAllocator, +) { // An analysis that consumes/produces facts // must run on the package's dependencies too. if len(a.FactTypes) == 0 { @@ -159,10 +164,7 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac act.objectFacts = make(map[objectFactKey]analysis.Fact) act.packageFacts = make(map[packageFactKey]analysis.Fact) - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } + paths := maps.Keys(pkg.Imports) sort.Strings(paths) // for determinism for _, path := range paths { dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) @@ -175,9 +177,9 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac } } -//nolint:gocritic func (r *runner) prepareAnalysis(pkgs []*packages.Package, - analyzers []*analysis.Analyzer) (map[*packages.Package]bool, []*action, []*action) { + analyzers []*analysis.Analyzer, +) (initialPkgs map[*packages.Package]bool, allActions, roots []*action) { // Construct the action graph. // Each graph node (action) is one unit of analysis. @@ -185,7 +187,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, // and analysis-to-analysis (horizontal) dependencies. // This place is memory-intensive: e.g. Istio project has 120k total actions. - // Therefore optimize it carefully. + // Therefore, optimize it carefully. markedActions := make(map[actKey]struct{}, len(analyzers)*len(pkgs)) for _, a := range analyzers { for _, pkg := range pkgs { @@ -197,13 +199,13 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, actions := make(map[actKey]*action, totalActionsCount) actAlloc := newActionAllocator(totalActionsCount) - initialPkgs := make(map[*packages.Package]bool, len(pkgs)) + initialPkgs = make(map[*packages.Package]bool, len(pkgs)) for _, pkg := range pkgs { initialPkgs[pkg] = true } // Build nodes for initial packages. - roots := make([]*action, 0, len(pkgs)*len(analyzers)) + roots = make([]*action, 0, len(pkgs)*len(analyzers)) for _, a := range analyzers { for _, pkg := range pkgs { root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc) @@ -212,10 +214,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, } } - allActions := make([]*action, 0, len(actions)) - for _, act := range actions { - allActions = append(allActions, act) - } + allActions = maps.Values(actions) debugf("Built %d actions", len(actions)) @@ -281,7 +280,6 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze return rootActions } -//nolint:nakedret func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []error) { extracted := make(map[*action]bool) var extract func(*action) @@ -311,7 +309,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err if pe, ok := act.err.(*errorutil.PanicError); ok { panic(pe) } - retErrors = append(retErrors, errors.Wrap(act.err, act.a.Name)) + retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err)) return } @@ -338,5 +336,5 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err } } visitAll(roots) - return + return retDiags, retErrors } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go index 96c613e83e..6b57cb0c9a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go @@ -1,14 +1,14 @@ package goanalysis import ( + "errors" "fmt" "go/types" + "io" "reflect" "runtime/debug" "time" - "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/types/objectpath" @@ -49,7 +49,7 @@ type action struct { deps []*action objectFacts map[objectFactKey]analysis.Fact packageFacts map[packageFactKey]analysis.Fact - result interface{} + result any diagnostics []analysis.Diagnostic err error r *runner @@ -97,6 +97,13 @@ func (act *action) waitUntilDependingAnalyzersWorked() { func (act *action) analyzeSafe() { defer func() { if p := recover(); p != nil { + if !act.isroot { + // This line allows to display "hidden" panic with analyzers like buildssa. + // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, + // this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA"). + act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack())) + } + act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) } @@ -118,26 +125,22 @@ func (act *action) analyze() { }(time.Now()) // Report an error if any dependency failures. - var depErrors *multierror.Error + var depErrors error for _, dep := range act.deps { if dep.err == nil { continue } - depErrors = multierror.Append(depErrors, errors.Cause(dep.err)) + depErrors = errors.Join(depErrors, errors.Unwrap(dep.err)) } if depErrors != nil { - depErrors.ErrorFormat = func(e []error) string { - return fmt.Sprintf("failed prerequisites: %v", e) - } - - act.err = depErrors + act.err = fmt.Errorf("failed prerequisites: %w", depErrors) return } // Plumb the output values of the dependencies // into the inputs of this action. Also facts. - inputs := make(map[*analysis.Analyzer]interface{}) + inputs := make(map[*analysis.Analyzer]any) startedAt := time.Now() for _, dep := range act.deps { if dep.pkg == act.pkg { @@ -179,9 +182,9 @@ func (act *action) analyze() { if act.pkg.IllTyped { // It looks like there should be !pass.Analyzer.RunDespiteErrors - // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here - // but it exit before it if packages.Load have failed. - act.err = errors.Wrap(&IllTypedError{Pkg: act.pkg}, "analysis skipped") + // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, + // but it exits before it if packages.Load have failed. + act.err = fmt.Errorf("analysis skipped: %w", &IllTypedError{Pkg: act.pkg}) } else { startedAt = time.Now() act.result, act.err = pass.Analyzer.Run(pass) @@ -331,7 +334,7 @@ func (act *action) loadPersistedFacts() bool { var facts []Fact key := fmt.Sprintf("%s/facts", act.a.Name) if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil { - if err != pkgcache.ErrMissing { + if !errors.Is(err, pkgcache.ErrMissing) && !errors.Is(err, io.EOF) { act.r.log.Warnf("Failed to get persisted facts: %s", err) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go index 9fa396854e..e39e2212c3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go @@ -1,6 +1,7 @@ package goanalysis import ( + "errors" "fmt" "go/ast" "go/parser" @@ -11,7 +12,6 @@ import ( "sync" "sync/atomic" - "github.com/pkg/errors" "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/go/packages" @@ -59,9 +59,9 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { defer lp.decUse(loadMode < LoadModeWholeProgram) if err := lp.loadWithFacts(loadMode); err != nil { - werr := errors.Wrapf(err, "failed to load package %s", lp.pkg.Name) + werr := fmt.Errorf("failed to load package %s: %w", lp.pkg.Name, err) // Don't need to write error to errCh, it will be extracted and reported on another layer. - // Unblock depending actions and propagate error. + // Unblock depending on actions and propagate error. for _, act := range lp.actions { close(act.analysisDoneCh) act.err = werr @@ -123,6 +123,7 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { pkg.TypesInfo = &types.Info{ Types: make(map[ast.Expr]types.TypeAndValue), + Instances: make(map[*ast.Ident]types.Instance), Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), @@ -269,16 +270,16 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error // Load package from export data if loadMode >= LoadModeTypesInfo { if err := lp.loadFromExportData(); err != nil { - // We asked Go to give us up to date export data, yet + // We asked Go to give us up-to-date export data, yet // we can't load it. There must be something wrong. // // Attempt loading from source. This should fail (because // otherwise there would be export data); we just want to // get the compile errors. If loading from source succeeds - // we discard the result, anyway. Otherwise we'll fail + // we discard the result, anyway. Otherwise, we'll fail // when trying to reload from export data later. - // Otherwise it panics because uses already existing (from exported data) types. + // Otherwise, it panics because uses already existing (from exported data) types. pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) if srcErr := lp.loadFromSource(loadMode); srcErr != nil { return srcErr @@ -289,7 +290,7 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error Msg: fmt.Sprintf("could not load export data: %s", err), Kind: packages.ParseError, }) - return errors.Wrap(err, "could not load export data") + return fmt.Errorf("could not load export data: %w", err) } } @@ -311,7 +312,7 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error // Cached facts loading failed: analyze later the action from source. To perform // the analysis we need to load the package from source code. - // Otherwise it panics because uses already existing (from exported data) types. + // Otherwise, it panics because uses already existing (from exported data) types. if loadMode >= LoadModeTypesInfo { pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) } @@ -433,6 +434,7 @@ func (lp *loadingPackage) convertError(err error) []packages.Error { // If you see this error message, please file a bug. lp.log.Warnf("Internal error: error %q (%T) without position", err, err) } + return errs } @@ -444,7 +446,7 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } -func sizeOfValueTreeBytes(v interface{}) int { +func sizeOfValueTreeBytes(v any) int { return sizeOfReflectValueTreeBytes(reflect.ValueOf(v), map[uintptr]struct{}{}) } @@ -492,6 +494,6 @@ func sizeOfReflectValueTreeBytes(rv reflect.Value, visitedPtrs map[uintptr]struc case reflect.Invalid: return 0 default: - panic("unknown rv of type " + fmt.Sprint(rv)) + panic("unknown rv of type " + rv.String()) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go index 7e4cf902e7..b832fc32da 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go @@ -14,6 +14,7 @@ import ( "github.com/golangci/golangci-lint/internal/pkgcache" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/timeutils" ) @@ -28,7 +29,7 @@ type runAnalyzersConfig interface { } func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { - log := lintCtx.Log.Child("goanalysis") + log := lintCtx.Log.Child(logutils.DebugKeyGoAnalysis) sw := timeutils.NewStopwatch("analyzers", log) const stagesToPrint = 10 @@ -123,7 +124,8 @@ func getIssuesCacheKey(analyzers []*analysis.Analyzer) string { } func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool, - issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) { + issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer, +) { startedAt := time.Now() perPkgIssues := map[*packages.Package][]result.Issue{} for ind := range issues { @@ -150,6 +152,7 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages. encodedIssues = append(encodedIssues, EncodingIssue{ FromLinter: i.FromLinter, Text: i.Text, + Severity: i.Severity, Pos: i.Pos, LineRange: i.LineRange, Replacement: i.Replacement, @@ -181,9 +184,9 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages. issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt)) } -//nolint:gocritic func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, - analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) { + analyzers []*analysis.Analyzer, +) (issuesFromCache []result.Issue, pkgsFromCache map[*packages.Package]bool) { startedAt := time.Now() lintResKey := getIssuesCacheKey(analyzers) @@ -217,16 +220,18 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, } issues := make([]result.Issue, 0, len(pkgIssues)) - for _, i := range pkgIssues { + for i := range pkgIssues { + issue := &pkgIssues[i] issues = append(issues, result.Issue{ - FromLinter: i.FromLinter, - Text: i.Text, - Pos: i.Pos, - LineRange: i.LineRange, - Replacement: i.Replacement, + FromLinter: issue.FromLinter, + Text: issue.Text, + Severity: issue.Severity, + Pos: issue.Pos, + LineRange: issue.LineRange, + Replacement: issue.Replacement, Pkg: pkg, - ExpectNoLint: i.ExpectNoLint, - ExpectedNoLintLinter: i.ExpectedNoLintLinter, + ExpectNoLint: issue.ExpectNoLint, + ExpectedNoLintLinter: issue.ExpectedNoLintLinter, }) } cacheRes.issues = issues @@ -241,13 +246,12 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, wg.Wait() loadedIssuesCount := 0 - var issues []result.Issue - pkgsFromCache := map[*packages.Package]bool{} + pkgsFromCache = map[*packages.Package]bool{} for pkg, cacheRes := range pkgToCacheRes { if cacheRes.loadErr == nil { loadedIssuesCount += len(cacheRes.issues) pkgsFromCache[pkg] = true - issues = append(issues, cacheRes.issues...) + issuesFromCache = append(issuesFromCache, cacheRes.issues...) issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues)) } else { issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr) @@ -255,7 +259,7 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, } issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages", loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs)) - return issues, pkgsFromCache + return issuesFromCache, pkgsFromCache } func analyzersHashID(analyzers []*analysis.Analyzer) string { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocheckcompilerdirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocheckcompilerdirectives.go new file mode 100644 index 0000000000..2592c89946 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocheckcompilerdirectives.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "4d63.com/gocheckcompilerdirectives/checkcompilerdirectives" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGoCheckCompilerDirectives() *goanalysis.Linter { + a := checkcompilerdirectives.Analyzer() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go index 804865cfcb..e94778a32a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go @@ -8,22 +8,19 @@ import ( ) func NewGochecknoglobals() *goanalysis.Linter { - gochecknoglobals := checknoglobals.Analyzer() + a := checknoglobals.Analyzer() - // gochecknoglobals only lints test files if the `-t` flag is passed so we - // pass the `t` flag as true to the analyzer before running it. This can be - // turned of by using the regular golangci-lint flags such as `--tests` or - // `--skip-files`. - linterConfig := map[string]map[string]interface{}{ - gochecknoglobals.Name: { - "t": true, - }, + // gochecknoglobals only lints test files if the `-t` flag is passed, + // so we pass the `t` flag as true to the analyzer before running it. + // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--exclude-files`. + linterConfig := map[string]map[string]any{ + a.Name: {"t": true}, } return goanalysis.NewLinter( - gochecknoglobals.Name, - gochecknoglobals.Doc, - []*analysis.Analyzer{gochecknoglobals}, + a.Name, + "Check that no global variables exist.", + []*analysis.Analyzer{a}, linterConfig, - ).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go index f9715bda86..a51b531b94 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go @@ -22,7 +22,7 @@ func NewGochecknoinits() *goanalysis.Linter { analyzer := &analysis.Analyzer{ Name: gochecknoinitsName, Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (interface{}, error) { + Run: func(pass *analysis.Pass) (any, error) { var res []goanalysis.Issue for _, file := range pass.Files { fileIssues := checkFileForInits(file, pass.Fset) @@ -41,6 +41,7 @@ func NewGochecknoinits() *goanalysis.Linter { return nil, nil }, } + return goanalysis.NewLinter( gochecknoinitsName, "Checks that no init functions are present in Go code", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype.go new file mode 100644 index 0000000000..5516179dfc --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype.go @@ -0,0 +1,80 @@ +package golinters + +import ( + "strings" + "sync" + + gochecksumtype "github.com/alecthomas/go-check-sumtype" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const goCheckSumTypeName = "gochecksumtype" + +func NewGoCheckSumType() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goCheckSumTypeName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoCheckSumType(pass) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + return goanalysis.NewLinter( + goCheckSumTypeName, + `Run exhaustiveness checks on Go "sum types"`, + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(_ *linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runGoCheckSumType(pass *analysis.Pass) ([]goanalysis.Issue, error) { + var resIssues []goanalysis.Issue + + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + var unknownError error + errors := gochecksumtype.Run([]*packages.Package{pkg}) + for _, err := range errors { + err, ok := err.(gochecksumtype.Error) + if !ok { + unknownError = err + continue + } + + resIssues = append(resIssues, goanalysis.NewIssue(&result.Issue{ + FromLinter: goCheckSumTypeName, + Text: strings.TrimPrefix(err.Error(), err.Pos().String()+": "), + Pos: err.Pos(), + }, pass)) + } + + return resIssues, unknownError +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go index eb42dd149c..f5cff840e4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go @@ -8,6 +8,7 @@ import ( "github.com/uudashr/gocognit" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -15,54 +16,64 @@ import ( const gocognitName = "gocognit" -func NewGocognit() *goanalysis.Linter { +func NewGocognit(settings *config.GocognitSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runGocognit(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, } + return goanalysis.NewLinter( gocognitName, "Computes and checks the cognitive complexity of functions", []*analysis.Analyzer{analyzer}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var stats []gocognit.Stat - for _, f := range pass.Files { - stats = gocognit.ComplexityStats(f, pass.Fset, stats) - } - if len(stats) == 0 { - return nil, nil - } + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} - sort.SliceStable(stats, func(i, j int) bool { - return stats[i].Complexity > stats[j].Complexity - }) +func runGocognit(pass *analysis.Pass, settings *config.GocognitSettings) []goanalysis.Issue { + var stats []gocognit.Stat + for _, f := range pass.Files { + stats = gocognit.ComplexityStats(f, pass.Fset, stats) + } + if len(stats) == 0 { + return nil + } - res := make([]goanalysis.Issue, 0, len(stats)) - for _, s := range stats { - if s.Complexity <= lintCtx.Settings().Gocognit.MinComplexity { - break // Break as the stats is already sorted from greatest to least - } + sort.SliceStable(stats, func(i, j int) bool { + return stats[i].Complexity > stats[j].Complexity + }) - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: s.Pos, - Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", - s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocognit.MinComplexity), - FromLinter: gocognitName, - }, pass)) - } + issues := make([]goanalysis.Issue, 0, len(stats)) + for _, s := range stats { + if s.Complexity <= settings.MinComplexity { + break // Break as the stats is already sorted from greatest to least + } - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: s.Pos, + Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", + s.Complexity, formatCode(s.FuncName, nil), settings.MinComplexity), + FromLinter: gocognitName, + }, pass)) + } - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + return issues } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go index 0801ee15d8..204e2496a6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go @@ -7,6 +7,7 @@ import ( goconstAPI "github.com/jgautheron/goconst" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,72 +15,80 @@ import ( const goconstName = "goconst" -func NewGoconst() *goanalysis.Linter { +func NewGoconst(settings *config.GoConstSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goconstName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - goconstName, - "Finds repeated strings that could be replaced by a constant", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - issues, err := checkConstants(pass, lintCtx) - if err != nil || len(issues) == 0 { + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoconst(pass, settings) + if err != nil { return nil, err } + if len(issues) == 0 { + return nil, nil + } + mu.Lock() resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + goconstName, + "Finds repeated strings that could be replaced by a constant", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]goanalysis.Issue, error) { +func runGoconst(pass *analysis.Pass, settings *config.GoConstSettings) ([]goanalysis.Issue, error) { cfg := goconstAPI.Config{ - IgnoreTests: lintCtx.Settings().Goconst.IgnoreTests, - MatchWithConstants: lintCtx.Settings().Goconst.MatchWithConstants, - MinStringLength: lintCtx.Settings().Goconst.MinStringLen, - MinOccurrences: lintCtx.Settings().Goconst.MinOccurrencesCount, - ParseNumbers: lintCtx.Settings().Goconst.ParseNumbers, - NumberMin: lintCtx.Settings().Goconst.NumberMin, - NumberMax: lintCtx.Settings().Goconst.NumberMax, + IgnoreStrings: settings.IgnoreStrings, + IgnoreTests: settings.IgnoreTests, + MatchWithConstants: settings.MatchWithConstants, + MinStringLength: settings.MinStringLen, + MinOccurrences: settings.MinOccurrencesCount, + ParseNumbers: settings.ParseNumbers, + NumberMin: settings.NumberMin, + NumberMax: settings.NumberMax, ExcludeTypes: map[goconstAPI.Type]bool{}, } - if lintCtx.Settings().Goconst.IgnoreCalls { + + if settings.IgnoreCalls { cfg.ExcludeTypes[goconstAPI.Call] = true } - goconstIssues, err := goconstAPI.Run(pass.Files, pass.Fset, &cfg) + + lintIssues, err := goconstAPI.Run(pass.Files, pass.Fset, &cfg) if err != nil { return nil, err } - if len(goconstIssues) == 0 { + if len(lintIssues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, 0, len(goconstIssues)) - for _, i := range goconstIssues { - textBegin := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, lintCtx.Cfg), i.OccurrencesCount) - var textEnd string + res := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, i := range lintIssues { + text := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, nil), i.OccurrencesCount) + if i.MatchingConst == "" { - textEnd = ", make it a constant" + text += ", make it a constant" } else { - textEnd = fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, lintCtx.Cfg)) + text += fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, nil)) } + res = append(res, goanalysis.NewIssue(&result.Issue{ Pos: i.Pos, - Text: textBegin + textEnd, + Text: text, FromLinter: goconstName, }, pass)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go index 75eb7d3076..f78c2cf909 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go @@ -1,93 +1,179 @@ package golinters import ( + "errors" "fmt" "go/ast" "go/types" "path/filepath" + "reflect" "runtime" "sort" "strings" "sync" - gocriticlinter "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/checkers" + gocriticlinter "github.com/go-critic/go-critic/linter" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -const gocriticName = "gocritic" +const goCriticName = "gocritic" -func NewGocritic() *goanalysis.Linter { +var ( + goCriticDebugf = logutils.Debug(logutils.DebugKeyGoCritic) + isGoCriticDebug = logutils.HaveDebugTag(logutils.DebugKeyGoCritic) +) + +func NewGoCritic(settings *config.GoCriticSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - sizes := types.SizesFor("gc", runtime.GOARCH) + wrapper := &goCriticWrapper{ + sizes: types.SizesFor("gc", runtime.GOARCH), + } analyzer := &analysis.Analyzer{ - Name: gocriticName, + Name: goCriticName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - gocriticName, - `Provides many diagnostics that check for bugs, performance and style issues. -Extensible without recompilation through dynamic rules. -Dynamic rules are written declaratively with AST patterns, filters, report message and optional suggestion.`, - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - linterCtx := gocriticlinter.NewContext(pass.Fset, sizes) - enabledCheckers, err := buildEnabledCheckers(lintCtx, linterCtx) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := wrapper.run(pass) if err != nil { return nil, err } - linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) - var res []goanalysis.Issue - pkgIssues := runGocriticOnPackage(linterCtx, enabledCheckers, pass.Files) - for i := range pkgIssues { - res = append(res, goanalysis.NewIssue(&pkgIssues[i], pass)) - } - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil + }, + } + + return goanalysis.NewLinter( + goCriticName, + `Provides diagnostics that check for bugs, performance and style issues. +Extensible without recompilation through dynamic rules. +Dynamic rules are written declaratively with AST patterns, filters, report message and optional suggestion.`, + []*analysis.Analyzer{analyzer}, + nil, + ). + WithContextSetter(func(context *linter.Context) { + wrapper.configDir = context.Cfg.GetConfigDir() + + wrapper.init(context.Log, settings) + }). + WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +type goCriticWrapper struct { + settingsWrapper *goCriticSettingsWrapper + configDir string + sizes types.Sizes + once sync.Once +} + +func (w *goCriticWrapper) init(logger logutils.Log, settings *config.GoCriticSettings) { + if settings == nil { + return + } + + w.once.Do(func() { + err := checkers.InitEmbeddedRules() + if err != nil { + logger.Fatalf("%s: %v: setting an explicit GOROOT can fix this problem", goCriticName, err) } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + }) + + settingsWrapper := newGoCriticSettingsWrapper(settings, logger) + settingsWrapper.InferEnabledChecks() + // Validate must be after InferEnabledChecks, not before. + // Because it uses gathered information about tags set and finally enabled checks. + if err := settingsWrapper.Validate(); err != nil { + logger.Fatalf("%s: invalid settings: %s", goCriticName, err) + } + + w.settingsWrapper = settingsWrapper } -func normalizeCheckerInfoParams(info *gocriticlinter.CheckerInfo) gocriticlinter.CheckerParams { - // lowercase info param keys here because golangci-lint's config parser lowercases all strings - ret := gocriticlinter.CheckerParams{} - for k, v := range info.Params { - ret[strings.ToLower(k)] = v +func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { + if w.settingsWrapper == nil { + return nil, errors.New("the settings wrapper is nil") } - return ret + linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes) + + linterCtx.SetGoVersion(w.settingsWrapper.Go) + + enabledCheckers, err := w.buildEnabledCheckers(linterCtx) + if err != nil { + return nil, err + } + + linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) + + pkgIssues := runGoCriticOnPackage(linterCtx, enabledCheckers, pass.Files) + + issues := make([]goanalysis.Issue, 0, len(pkgIssues)) + for i := range pkgIssues { + issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass)) + } + + return issues, nil +} + +func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { + allLowerCasedParams := w.settingsWrapper.GetLowerCasedParams() + + var enabledCheckers []*gocriticlinter.Checker + for _, info := range gocriticlinter.GetCheckersInfo() { + if !w.settingsWrapper.IsCheckEnabled(info.Name) { + continue + } + + if err := w.configureCheckerInfo(info, allLowerCasedParams); err != nil { + return nil, err + } + + c, err := gocriticlinter.NewChecker(linterCtx, info) + if err != nil { + return nil, err + } + enabledCheckers = append(enabledCheckers, c) + } + + return enabledCheckers, nil } -func configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string]config.GocriticCheckSettings) error { - params := allParams[strings.ToLower(info.Name)] +func (w *goCriticWrapper) configureCheckerInfo( + info *gocriticlinter.CheckerInfo, + allLowerCasedParams map[string]config.GoCriticCheckSettings, +) error { + params := allLowerCasedParams[strings.ToLower(info.Name)] if params == nil { // no config for this checker return nil } - infoParams := normalizeCheckerInfoParams(info) + // To lowercase info param keys here because golangci-lint's config parser lowercases all strings. + infoParams := normalizeMap(info.Params) for k, p := range params { v, ok := infoParams[k] if ok { - v.Value = p + v.Value = w.normalizeCheckerParamsValue(p) continue } @@ -97,10 +183,7 @@ func configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string info.Name, k) } - var supportedKeys []string - for sk := range info.Params { - supportedKeys = append(supportedKeys, sk) - } + supportedKeys := maps.Keys(info.Params) sort.Strings(supportedKeys) return fmt.Errorf("checker %s config param %s doesn't exist, all existing: %s", @@ -110,58 +193,397 @@ func configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string return nil } -func buildEnabledCheckers(lintCtx *linter.Context, linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { - s := lintCtx.Settings().Gocritic - allParams := s.GetLowercasedParams() - - var enabledCheckers []*gocriticlinter.Checker - for _, info := range gocriticlinter.GetCheckersInfo() { - if !s.IsCheckEnabled(info.Name) { - continue - } - - if err := configureCheckerInfo(info, allParams); err != nil { - return nil, err - } - - c, err := gocriticlinter.NewChecker(linterCtx, info) - if err != nil { - return nil, err - } - enabledCheckers = append(enabledCheckers, c) +// normalizeCheckerParamsValue normalizes value types. +// go-critic asserts that CheckerParam.Value has some specific types, +// but the file parsers (TOML, YAML, JSON) don't create the same representation for raw type. +// then we have to convert value types into the expected value types. +// Maybe in the future, this kind of conversion will be done in go-critic itself. +func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any { + rv := reflect.ValueOf(p) + switch rv.Type().Kind() { + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + return int(rv.Int()) + case reflect.Bool: + return rv.Bool() + case reflect.String: + // Perform variable substitution. + return strings.ReplaceAll(rv.String(), "${configDir}", w.configDir) + default: + return p } - - return enabledCheckers, nil } -func runGocriticOnPackage(linterCtx *gocriticlinter.Context, checkers []*gocriticlinter.Checker, - files []*ast.File) []result.Issue { +func runGoCriticOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue { var res []result.Issue for _, f := range files { filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) linterCtx.SetFileInfo(filename, f) - issues := runGocriticOnFile(linterCtx, f, checkers) + issues := runGoCriticOnFile(linterCtx, f, checks) res = append(res, issues...) } return res } -func runGocriticOnFile(ctx *gocriticlinter.Context, f *ast.File, checkers []*gocriticlinter.Checker) []result.Issue { +func runGoCriticOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue { var res []result.Issue - for _, c := range checkers { + for _, c := range checks { // All checkers are expected to use *lint.Context // as read-only structure, so no copying is required. for _, warn := range c.Check(f) { - pos := ctx.FileSet.Position(warn.Node.Pos()) - res = append(res, result.Issue{ + pos := linterCtx.FileSet.Position(warn.Pos) + issue := result.Issue{ Pos: pos, Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), - FromLinter: gocriticName, - }) + FromLinter: goCriticName, + } + + if warn.HasQuickFix() { + issue.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: pos.Column - 1, + Length: int(warn.Suggestion.To - warn.Suggestion.From), + NewString: string(warn.Suggestion.Replacement), + }, + } + } + + res = append(res, issue) } } return res } + +type goCriticChecks[T any] map[string]T + +func (m goCriticChecks[T]) has(name string) bool { + _, ok := m[name] + return ok +} + +type goCriticSettingsWrapper struct { + *config.GoCriticSettings + + logger logutils.Log + + allCheckers []*gocriticlinter.CheckerInfo + + allChecks goCriticChecks[struct{}] + allChecksByTag goCriticChecks[[]string] + allTagsSorted []string + inferredEnabledChecks goCriticChecks[struct{}] + + // *LowerCased fields are used for GoCriticSettings.SettingsPerCheck validation only. + + allChecksLowerCased goCriticChecks[struct{}] + inferredEnabledChecksLowerCased goCriticChecks[struct{}] +} + +func newGoCriticSettingsWrapper(settings *config.GoCriticSettings, logger logutils.Log) *goCriticSettingsWrapper { + allCheckers := gocriticlinter.GetCheckersInfo() + + allChecks := make(goCriticChecks[struct{}], len(allCheckers)) + allChecksLowerCased := make(goCriticChecks[struct{}], len(allCheckers)) + allChecksByTag := make(goCriticChecks[[]string]) + for _, checker := range allCheckers { + allChecks[checker.Name] = struct{}{} + allChecksLowerCased[strings.ToLower(checker.Name)] = struct{}{} + + for _, tag := range checker.Tags { + allChecksByTag[tag] = append(allChecksByTag[tag], checker.Name) + } + } + + allTagsSorted := maps.Keys(allChecksByTag) + sort.Strings(allTagsSorted) + + return &goCriticSettingsWrapper{ + GoCriticSettings: settings, + logger: logger, + allCheckers: allCheckers, + allChecks: allChecks, + allChecksLowerCased: allChecksLowerCased, + allChecksByTag: allChecksByTag, + allTagsSorted: allTagsSorted, + inferredEnabledChecks: make(goCriticChecks[struct{}]), + inferredEnabledChecksLowerCased: make(goCriticChecks[struct{}]), + } +} + +func (s *goCriticSettingsWrapper) IsCheckEnabled(name string) bool { + return s.inferredEnabledChecks.has(name) +} + +func (s *goCriticSettingsWrapper) GetLowerCasedParams() map[string]config.GoCriticCheckSettings { + return normalizeMap(s.SettingsPerCheck) +} + +// InferEnabledChecks tries to be consistent with (lintersdb.EnabledSet).build. +func (s *goCriticSettingsWrapper) InferEnabledChecks() { + s.debugChecksInitialState() + + enabledByDefaultChecks, disabledByDefaultChecks := s.buildEnabledAndDisabledByDefaultChecks() + debugChecksListf(enabledByDefaultChecks, "Enabled by default") + debugChecksListf(disabledByDefaultChecks, "Disabled by default") + + enabledChecks := make(goCriticChecks[struct{}]) + + if s.EnableAll { + enabledChecks = make(goCriticChecks[struct{}], len(s.allCheckers)) + for _, info := range s.allCheckers { + enabledChecks[info.Name] = struct{}{} + } + } else if !s.DisableAll { + // enable-all/disable-all revokes the default settings. + enabledChecks = make(goCriticChecks[struct{}], len(enabledByDefaultChecks)) + for _, check := range enabledByDefaultChecks { + enabledChecks[check] = struct{}{} + } + } + + if len(s.EnabledTags) != 0 { + enabledFromTags := s.expandTagsToChecks(s.EnabledTags) + debugChecksListf(enabledFromTags, "Enabled by config tags %s", sprintSortedStrings(s.EnabledTags)) + + for _, check := range enabledFromTags { + enabledChecks[check] = struct{}{} + } + } + + if len(s.EnabledChecks) != 0 { + debugChecksListf(s.EnabledChecks, "Enabled by config") + + for _, check := range s.EnabledChecks { + if enabledChecks.has(check) { + s.logger.Warnf("%s: no need to enable check %q: it's already enabled", goCriticName, check) + continue + } + enabledChecks[check] = struct{}{} + } + } + + if len(s.DisabledTags) != 0 { + disabledFromTags := s.expandTagsToChecks(s.DisabledTags) + debugChecksListf(disabledFromTags, "Disabled by config tags %s", sprintSortedStrings(s.DisabledTags)) + + for _, check := range disabledFromTags { + delete(enabledChecks, check) + } + } + + if len(s.DisabledChecks) != 0 { + debugChecksListf(s.DisabledChecks, "Disabled by config") + + for _, check := range s.DisabledChecks { + if !enabledChecks.has(check) { + s.logger.Warnf("%s: no need to disable check %q: it's already disabled", goCriticName, check) + continue + } + delete(enabledChecks, check) + } + } + + s.inferredEnabledChecks = enabledChecks + s.inferredEnabledChecksLowerCased = normalizeMap(s.inferredEnabledChecks) + s.debugChecksFinalState() +} + +func (s *goCriticSettingsWrapper) buildEnabledAndDisabledByDefaultChecks() (enabled, disabled []string) { + for _, info := range s.allCheckers { + if enabledByDef := isEnabledByDefaultGoCriticChecker(info); enabledByDef { + enabled = append(enabled, info.Name) + } else { + disabled = append(disabled, info.Name) + } + } + return enabled, disabled +} + +func (s *goCriticSettingsWrapper) expandTagsToChecks(tags []string) []string { + var checks []string + for _, tag := range tags { + checks = append(checks, s.allChecksByTag[tag]...) + } + return checks +} + +func (s *goCriticSettingsWrapper) debugChecksInitialState() { + if !isGoCriticDebug { + return + } + + goCriticDebugf("All gocritic existing tags and checks:") + for _, tag := range s.allTagsSorted { + debugChecksListf(s.allChecksByTag[tag], " tag %q", tag) + } +} + +func (s *goCriticSettingsWrapper) debugChecksFinalState() { + if !isGoCriticDebug { + return + } + + var enabledChecks []string + var disabledChecks []string + + for _, checker := range s.allCheckers { + name := checker.Name + if s.inferredEnabledChecks.has(name) { + enabledChecks = append(enabledChecks, name) + } else { + disabledChecks = append(disabledChecks, name) + } + } + + debugChecksListf(enabledChecks, "Final used") + + if len(disabledChecks) == 0 { + goCriticDebugf("All checks are enabled") + } else { + debugChecksListf(disabledChecks, "Final not used") + } +} + +// Validate tries to be consistent with (lintersdb.Validator).validateEnabledDisabledLintersConfig. +func (s *goCriticSettingsWrapper) Validate() error { + for _, v := range []func() error{ + s.validateOptionsCombinations, + s.validateCheckerTags, + s.validateCheckerNames, + s.validateDisabledAndEnabledAtOneMoment, + s.validateAtLeastOneCheckerEnabled, + } { + if err := v(); err != nil { + return err + } + } + return nil +} + +func (s *goCriticSettingsWrapper) validateOptionsCombinations() error { + if s.EnableAll { + if s.DisableAll { + return errors.New("enable-all and disable-all options must not be combined") + } + + if len(s.EnabledTags) != 0 { + return errors.New("enable-all and enabled-tags options must not be combined") + } + + if len(s.EnabledChecks) != 0 { + return errors.New("enable-all and enabled-checks options must not be combined") + } + } + + if s.DisableAll { + if len(s.DisabledTags) != 0 { + return errors.New("disable-all and disabled-tags options must not be combined") + } + + if len(s.DisabledChecks) != 0 { + return errors.New("disable-all and disabled-checks options must not be combined") + } + + if len(s.EnabledTags) == 0 && len(s.EnabledChecks) == 0 { + return errors.New("all checks were disabled, but no one check was enabled: at least one must be enabled") + } + } + + return nil +} + +func (s *goCriticSettingsWrapper) validateCheckerTags() error { + for _, tag := range s.EnabledTags { + if !s.allChecksByTag.has(tag) { + return fmt.Errorf("enabled tag %q doesn't exist, see %s's documentation", tag, goCriticName) + } + } + + for _, tag := range s.DisabledTags { + if !s.allChecksByTag.has(tag) { + return fmt.Errorf("disabled tag %q doesn't exist, see %s's documentation", tag, goCriticName) + } + } + + return nil +} + +func (s *goCriticSettingsWrapper) validateCheckerNames() error { + for _, name := range s.EnabledChecks { + if !s.allChecks.has(name) { + return fmt.Errorf("enabled check %q doesn't exist, see %s's documentation", name, goCriticName) + } + } + + for _, name := range s.DisabledChecks { + if !s.allChecks.has(name) { + return fmt.Errorf("disabled check %q doesn't exist, see %s documentation", name, goCriticName) + } + } + + for name := range s.SettingsPerCheck { + lcName := strings.ToLower(name) + if !s.allChecksLowerCased.has(lcName) { + return fmt.Errorf("invalid check settings: check %q doesn't exist, see %s documentation", name, goCriticName) + } + if !s.inferredEnabledChecksLowerCased.has(lcName) { + s.logger.Warnf("%s: settings were provided for disabled check %q", goCriticName, name) + } + } + + return nil +} + +func (s *goCriticSettingsWrapper) validateDisabledAndEnabledAtOneMoment() error { + for _, tag := range s.DisabledTags { + if slices.Contains(s.EnabledTags, tag) { + return fmt.Errorf("tag %q disabled and enabled at one moment", tag) + } + } + + for _, check := range s.DisabledChecks { + if slices.Contains(s.EnabledChecks, check) { + return fmt.Errorf("check %q disabled and enabled at one moment", check) + } + } + + return nil +} + +func (s *goCriticSettingsWrapper) validateAtLeastOneCheckerEnabled() error { + if len(s.inferredEnabledChecks) == 0 { + return errors.New("eventually all checks were disabled: at least one must be enabled") + } + return nil +} + +func normalizeMap[ValueT any](in map[string]ValueT) map[string]ValueT { + ret := make(map[string]ValueT, len(in)) + for k, v := range in { + ret[strings.ToLower(k)] = v + } + return ret +} + +func isEnabledByDefaultGoCriticChecker(info *gocriticlinter.CheckerInfo) bool { + // https://github.com/go-critic/go-critic/blob/5b67cfd487ae9fe058b4b19321901b3131810f65/cmd/gocritic/check.go#L342-L345 + return !info.HasTag(gocriticlinter.ExperimentalTag) && + !info.HasTag(gocriticlinter.OpinionatedTag) && + !info.HasTag(gocriticlinter.PerformanceTag) && + !info.HasTag(gocriticlinter.SecurityTag) +} + +func debugChecksListf(checks []string, format string, args ...any) { + if !isGoCriticDebug { + return + } + + goCriticDebugf("%s checks (%d): %s", fmt.Sprintf(format, args...), len(checks), sprintSortedStrings(checks)) +} + +func sprintSortedStrings(v []string) string { + sort.Strings(slices.Clone(v)) + return fmt.Sprint(v) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go index 5c61fec72c..63aefb09fe 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go @@ -7,6 +7,7 @@ import ( "github.com/fzipp/gocyclo" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,48 +15,61 @@ import ( const gocycloName = "gocyclo" -func NewGocyclo() *goanalysis.Linter { +func NewGocyclo(settings *config.GoCycloSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: gocycloName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runGoCyclo(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, } + return goanalysis.NewLinter( gocycloName, "Computes and checks the cyclomatic complexity of functions", []*analysis.Analyzer{analyzer}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var stats gocyclo.Stats - for _, f := range pass.Files { - stats = gocyclo.AnalyzeASTFile(f, pass.Fset, stats) - } - if len(stats) == 0 { - return nil, nil - } + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} - stats = stats.SortAndFilter(-1, lintCtx.Settings().Gocyclo.MinComplexity) +func runGoCyclo(pass *analysis.Pass, settings *config.GoCycloSettings) []goanalysis.Issue { + var stats gocyclo.Stats + for _, f := range pass.Files { + stats = gocyclo.AnalyzeASTFile(f, pass.Fset, stats) + } + if len(stats) == 0 { + return nil + } - res := make([]goanalysis.Issue, 0, len(stats)) - for _, s := range stats { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: s.Pos, - Text: fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", - s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocyclo.MinComplexity), - FromLinter: gocycloName, - }, pass)) - } + stats = stats.SortAndFilter(-1, settings.MinComplexity) - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() + issues := make([]goanalysis.Issue, 0, len(stats)) - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + for _, s := range stats { + text := fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", + s.Complexity, formatCode(s.FuncName, nil), settings.MinComplexity) + + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: s.Pos, + Text: text, + FromLinter: gocycloName, + }, pass)) + } + + return issues } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go index 6252458909..06c160fec6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go @@ -6,6 +6,7 @@ import ( "github.com/tetafro/godot" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -13,72 +14,89 @@ import ( const godotName = "godot" -func NewGodot() *goanalysis.Linter { +func NewGodot(settings *config.GodotSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: godotName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - godotName, - "Check if comments end in a period", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - cfg := lintCtx.Cfg.LintersSettings.Godot - settings := godot.Settings{ - Scope: godot.Scope(cfg.Scope), - Exclude: cfg.Exclude, - Period: true, - Capital: cfg.Capital, + var dotSettings godot.Settings + + if settings != nil { + dotSettings = godot.Settings{ + Scope: godot.Scope(settings.Scope), + Exclude: settings.Exclude, + Period: settings.Period, + Capital: settings.Capital, } // Convert deprecated setting - if cfg.CheckAll { // nolint: staticcheck - settings.Scope = godot.TopLevelScope + // todo(butuzov): remove on v2 release + if settings.CheckAll { + dotSettings.Scope = godot.AllScope } + } - if settings.Scope == "" { - settings.Scope = godot.DeclScope - } + if dotSettings.Scope == "" { + dotSettings.Scope = godot.DeclScope + } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []godot.Issue - for _, file := range pass.Files { - iss, err := godot.Run(file, pass.Fset, settings) - if err != nil { - return nil, err - } - issues = append(issues, iss...) + analyzer := &analysis.Analyzer{ + Name: godotName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGodot(pass, dotSettings) + if err != nil { + return nil, err } if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - issue := result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: godotName, - Replacement: &result.Replacement{ - NewLines: []string{i.Replacement}, - }, - } - - res[k] = goanalysis.NewIssue(&issue, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + godotName, + "Check if comments end in a period", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) { + var lintIssues []godot.Issue + for _, file := range pass.Files { + iss, err := godot.Run(file, pass.Fset, settings) + if err != nil { + return nil, err + } + lintIssues = append(lintIssues, iss...) + } + + if len(lintIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issue := result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: godotName, + Replacement: &result.Replacement{ + NewLines: []string{i.Replacement}, + }, + } + + issues[k] = goanalysis.NewIssue(&issue, pass) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go index 2a4dd9fafb..208bade576 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go @@ -8,6 +8,7 @@ import ( "github.com/matoous/godox" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -15,49 +16,60 @@ import ( const godoxName = "godox" -func NewGodox() *goanalysis.Linter { +func NewGodox(settings *config.GodoxSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: godoxName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - godoxName, - "Tool for detection of FIXME, TODO and other comment keywords", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []godox.Message - for _, file := range pass.Files { - issues = append(issues, godox.Run(file, pass.Fset, lintCtx.Settings().Godox.Keywords...)...) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runGodox(pass, settings) if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - res[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: godoxName, - }, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + godoxName, + "Tool for detection of FIXME, TODO and other comment keywords", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue { + var messages []godox.Message + for _, file := range pass.Files { + messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...) + } + + if len(messages) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(messages)) + + for k, i := range messages { + issues[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: godoxName, + }, pass) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go index 0c10005a08..10addc57c2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go @@ -10,10 +10,8 @@ import ( func NewGoerr113() *goanalysis.Linter { return goanalysis.NewLinter( "goerr113", - "Golang linter to check the errors handling expressions", - []*analysis.Analyzer{ - err113.NewAnalyzer(), - }, + "Go linter to check the errors handling expressions", + []*analysis.Analyzer{err113.NewAnalyzer()}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go index aa340dcf3c..d2d0d3ccc5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go @@ -1,26 +1,29 @@ package golinters import ( + "fmt" "sync" gofmtAPI "github.com/golangci/gofmt/gofmt" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const gofmtName = "gofmt" -func NewGofmt() *goanalysis.Linter { +func NewGofmt(settings *config.GoFmtSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: gofmtName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( gofmtName, "Gofmt checks whether code was gofmt-ed. By default "+ @@ -28,32 +31,10 @@ func NewGofmt() *goanalysis.Linter { []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := gofmtAPI.Run(f, lintCtx.Settings().Gofmt.Simplify) - if err != nil { // TODO: skip - return nil, err - } - if diff == nil { - continue - } - - is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, gofmtName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGofmt(lintCtx, pass, settings) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -70,3 +51,35 @@ func NewGofmt() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var rewriteRules []gofmtAPI.RewriteRule + for _, rule := range settings.RewriteRules { + rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule)) + } + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules) + if err != nil { // TODO: skip + return nil, err + } + if diff == nil { + continue + } + + is, err := extractIssuesFromPatch(string(diff), lintCtx, gofmtName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go index 39e8092e97..afa7afbf89 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go @@ -6,9 +6,9 @@ import ( "go/token" "strings" - "github.com/pkg/errors" diffpkg "github.com/sourcegraph/go-diff/diff" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" @@ -49,12 +49,12 @@ type hunkChangesParser struct { func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { lines := bytes.Split(h.Body, []byte{'\n'}) - currentOriginalLineNumer := int(h.OrigStartLine) + currentOriginalLineNumber := int(h.OrigStartLine) var ret []diffLine for i, line := range lines { dl := diffLine{ - originalNumber: currentOriginalLineNumer, + originalNumber: currentOriginalLineNumber, } lineStr := string(line) @@ -62,7 +62,7 @@ func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { if strings.HasPrefix(lineStr, "-") { dl.typ = diffLineDeleted dl.data = strings.TrimPrefix(lineStr, "-") - currentOriginalLineNumer++ + currentOriginalLineNumber++ } else if strings.HasPrefix(lineStr, "+") { dl.typ = diffLineAdded dl.data = strings.TrimPrefix(lineStr, "+") @@ -74,12 +74,22 @@ func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { dl.typ = diffLineOriginal dl.data = strings.TrimPrefix(lineStr, " ") - currentOriginalLineNumer++ + currentOriginalLineNumber++ } ret = append(ret, dl) } + // if > 0, then the original file had a 'No newline at end of file' mark + if h.OrigNoNewlineAt > 0 { + dl := diffLine{ + originalNumber: currentOriginalLineNumber + 1, + typ: diffLineAdded, + data: "", + } + ret = append(ret, dl) + } + p.lines = ret } @@ -123,8 +133,8 @@ func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLin } if len(addedLines) != 0 { - //nolint:gocritic - change.Replacement.NewLines = append(p.replacementLinesToPrepend, addedLines...) + change.Replacement.NewLines = append([]string{}, p.replacementLinesToPrepend...) + change.Replacement.NewLines = append(change.Replacement.NewLines, addedLines...) if len(p.replacementLinesToPrepend) != 0 { p.replacementLinesToPrepend = nil } @@ -207,61 +217,55 @@ func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { return p.ret } -func getErrorTextForLinter(lintCtx *linter.Context, linterName string) string { +func getErrorTextForLinter(settings *config.LintersSettings, linterName string) string { text := "File is not formatted" switch linterName { + case gciName: + text = getErrorTextForGci(settings.Gci) case gofumptName: text = "File is not `gofumpt`-ed" - if lintCtx.Settings().Gofumpt.ExtraRules { + if settings.Gofumpt.ExtraRules { text += " with `-extra`" } case gofmtName: text = "File is not `gofmt`-ed" - if lintCtx.Settings().Gofmt.Simplify { + if settings.Gofmt.Simplify { text += " with `-s`" } + for _, rule := range settings.Gofmt.RewriteRules { + text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement) + } case goimportsName: text = "File is not `goimports`-ed" - if lintCtx.Settings().Goimports.LocalPrefixes != "" { - text += " with -local " + lintCtx.Settings().Goimports.LocalPrefixes - } - case gciName: - text = "File is not `gci`-ed" - localPrefixes := lintCtx.Settings().Gci.LocalPrefixes - goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes - if localPrefixes == "" && goimportsFlag != "" { - localPrefixes = goimportsFlag - } - - if localPrefixes != "" { - text += " with -local " + localPrefixes + if settings.Goimports.LocalPrefixes != "" { + text += " with -local " + settings.Goimports.LocalPrefixes } } return text } -func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Context, linterName string) ([]result.Issue, error) { +func extractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string) ([]result.Issue, error) { diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) if err != nil { - return nil, errors.Wrap(err, "can't parse patch") + return nil, fmt.Errorf("can't parse patch: %w", err) } if len(diffs) == 0 { - return nil, fmt.Errorf("got no diffs from patch parser: %v", diffs) + return nil, fmt.Errorf("got no diffs from patch parser: %v", patch) } - issues := []result.Issue{} + var issues []result.Issue for _, d := range diffs { if len(d.Hunks) == 0 { - log.Warnf("Got no hunks in diff %+v", d) + lintCtx.Log.Warnf("Got no hunks in diff %+v", d) continue } for _, hunk := range d.Hunks { - p := hunkChangesParser{ - log: log, - } + p := hunkChangesParser{log: lintCtx.Log} + changes := p.parse(hunk) + for _, change := range changes { change := change // fix scope i := result.Issue{ @@ -270,7 +274,7 @@ func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Cont Filename: d.NewName, Line: change.LineRange.From, }, - Text: getErrorTextForLinter(lintCtx, linterName), + Text: getErrorTextForLinter(lintCtx.Settings(), linterName), Replacement: &change.Replacement, } if change.LineRange.From != change.LineRange.To { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go index e91e54eeaf..c2aaf121de 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go @@ -3,77 +3,57 @@ package golinters import ( "bytes" "fmt" - "io/ioutil" + "io" + "os" "sync" - "github.com/pkg/errors" "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" "mvdan.cc/gofumpt/format" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const gofumptName = "gofumpt" -func NewGofumpt() *goanalysis.Linter { +type differ interface { + Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error +} + +func NewGofumpt(settings *config.GofumptSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - differ := difflib.New() + + diff := difflib.New() + + var options format.Options + + if settings != nil { + options = format.Options{ + LangVersion: getLangVersion(settings), + ModulePath: settings.ModulePath, + ExtraRules: settings.ExtraRules, + } + } analyzer := &analysis.Analyzer{ Name: gofumptName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( gofumptName, "Gofumpt checks whether code was gofumpt-ed.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - input, err := ioutil.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to open file %s: %w", f, err) - } - output, err := format.Source(input, format.Options{ - ExtraRules: lintCtx.Settings().Gofumpt.ExtraRules, - }) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - if !bytes.Equal(input, output) { - out := bytes.Buffer{} - _, err = out.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - err = differ.Diff(&out, bytes.NewReader(input), bytes.NewReader(output)) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - diff := out.String() - is, err := extractIssuesFromPatch(diff, lintCtx.Log, lintCtx, gofumptName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gofumpt diff output %q", diff) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGofumpt(lintCtx, pass, diff, options) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -90,3 +70,50 @@ func NewGofumpt() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var issues []goanalysis.Issue + + for _, f := range fileNames { + input, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to open file %s: %w", f, err) + } + + output, err := format.Source(input, options) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + if !bytes.Equal(input, output) { + out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) + + err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + diff := out.String() + is, err := extractIssuesFromPatch(diff, lintCtx, gofumptName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + } + + return issues, nil +} + +func getLangVersion(settings *config.GofumptSettings) string { + if settings == nil || settings.LangVersion == "" { + // TODO: defaults to "1.15", in the future (v2) must be set by using build.Default.ReleaseTags like staticcheck. + return "1.15" + } + return settings.LangVersion +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go index 2ff587b0d1..5ed5d9d8de 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go @@ -4,9 +4,10 @@ import ( "go/token" "sync" - goheader "github.com/denis-tingajkin/go-header" + goheader "github.com/denis-tingaikin/go-header" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,72 +15,101 @@ import ( const goHeaderName = "goheader" -func NewGoHeader() *goanalysis.Linter { +func NewGoHeader(settings *config.GoHeaderSettings) *goanalysis.Linter { var mu sync.Mutex - var issues []goanalysis.Issue + var resIssues []goanalysis.Issue + + conf := &goheader.Configuration{} + if settings != nil { + conf = &goheader.Configuration{ + Values: settings.Values, + Template: settings.Template, + TemplatePath: settings.TemplatePath, + } + } analyzer := &analysis.Analyzer{ Name: goHeaderName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - goHeaderName, - "Checks is file header matches to pattern", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - cfg := lintCtx.Cfg.LintersSettings.Goheader - c := &goheader.Configuration{ - Values: cfg.Values, - Template: cfg.Template, - TemplatePath: cfg.TemplatePath, - } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - if c.TemplatePath == "" && c.Template == "" { - // User did not pass template, so then do not run go-header linter - return nil, nil - } - template, err := c.GetTemplate() - if err != nil { - return nil, err - } - values, err := c.GetValues() + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoHeader(pass, conf) if err != nil { return nil, err } - a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) - var res []goanalysis.Issue - for _, file := range pass.Files { - path := pass.Fset.Position(file.Pos()).Filename - i := a.Analyze(&goheader.Target{ - File: file, - Path: path, - }) - if i == nil { - continue - } - issue := result.Issue{ - Pos: token.Position{ - Line: i.Location().Line + 1, - Column: i.Location().Position, - Filename: path, - }, - Text: i.Message(), - FromLinter: goHeaderName, - } - res = append(res, goanalysis.NewIssue(&issue, pass)) - } - if len(res) == 0 { + + if len(issues) == 0 { return nil, nil } mu.Lock() - issues = append(issues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues + }, + } + + return goanalysis.NewLinter( + goHeaderName, + "Checks is file header matches to pattern", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) { + if conf.TemplatePath == "" && conf.Template == "" { + // User did not pass template, so then do not run go-header linter + return nil, nil + } + + template, err := conf.GetTemplate() + if err != nil { + return nil, err + } + + values, err := conf.GetValues() + if err != nil { + return nil, err + } + + a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) + + var issues []goanalysis.Issue + for _, file := range pass.Files { + path := pass.Fset.Position(file.Pos()).Filename + + i := a.Analyze(&goheader.Target{File: file, Path: path}) + + if i == nil { + continue + } + + issue := result.Issue{ + Pos: token.Position{ + Line: i.Location().Line + 1, + Column: i.Location().Position, + Filename: path, + }, + Text: i.Message(), + FromLinter: goHeaderName, + } + + if fix := i.Fix(); fix != nil { + issue.LineRange = &result.Range{ + From: issue.Line(), + To: issue.Line() + len(fix.Actual) - 1, + } + issue.Replacement = &result.Replacement{ + NeedOnlyDelete: len(fix.Expected) == 0, + NewLines: fix.Expected, + } + } + + issues = append(issues, goanalysis.NewIssue(&issue, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go index 9ea4558f40..aac27f38e5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go @@ -1,60 +1,43 @@ package golinters import ( + "fmt" "sync" goimportsAPI "github.com/golangci/gofmt/goimports" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/imports" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const goimportsName = "goimports" -func NewGoimports() *goanalysis.Linter { +func NewGoimports(settings *config.GoImportsSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goimportsName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( goimportsName, - "Goimports does everything that gofmt does. Additionally it checks unused imports", + "Check import statements are formatted according to the 'goimport' command. "+ + "Reformat imports in autofix mode.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - imports.LocalPrefix = lintCtx.Settings().Goimports.LocalPrefixes - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } + imports.LocalPrefix = settings.LocalPrefixes - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := goimportsAPI.Run(f) - if err != nil { // TODO: skip - return nil, err - } - if diff == nil { - continue - } - - is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, goimportsName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGoImports(lintCtx, pass) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -71,3 +54,30 @@ func NewGoimports() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := goimportsAPI.Run(f) + if err != nil { // TODO: skip + return nil, err + } + if diff == nil { + continue + } + + is, err := extractIssuesFromPatch(string(diff), lintCtx, goimportsName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go deleted file mode 100644 index 3b1b1b66f1..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go +++ /dev/null @@ -1,78 +0,0 @@ -package golinters - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "sync" - - lintAPI "github.com/golangci/lint-1" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet, - typesPkg *types.Package, typesInfo *types.Info) ([]result.Issue, error) { - l := new(lintAPI.Linter) - ps, err := l.LintPkg(files, fset, typesPkg, typesInfo) - if err != nil { - return nil, fmt.Errorf("can't lint %d files: %s", len(files), err) - } - - if len(ps) == 0 { - return nil, nil - } - - issues := make([]result.Issue, 0, len(ps)) // This is worst case - for idx := range ps { - if ps[idx].Confidence >= minConfidence { - issues = append(issues, result.Issue{ - Pos: ps[idx].Position, - Text: ps[idx].Text, - FromLinter: golintName, - }) - // TODO: use p.Link and p.Category - } - } - - return issues, nil -} - -const golintName = "golint" - -func NewGolint() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: golintName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - golintName, - "Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset, pass.Pkg, pass.TypesInfo) - if err != nil || len(res) == 0 { - return nil, err - } - - mu.Lock() - for i := range res { - resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) - } - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go index f7e71b7dae..2e6d77a801 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go @@ -8,20 +8,38 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -func NewGoMND(cfg *config.Config) *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - mnd.Analyzer, - } +func NewGoMND(settings *config.GoMndSettings) *goanalysis.Linter { + var linterCfg map[string]map[string]any + + if settings != nil { + // TODO(ldez) For compatibility only, must be drop in v2. + if len(settings.Settings) > 0 { + linterCfg = settings.Settings + } else { + cfg := make(map[string]any) + if len(settings.Checks) > 0 { + cfg["checks"] = settings.Checks + } + if len(settings.IgnoredNumbers) > 0 { + cfg["ignored-numbers"] = settings.IgnoredNumbers + } + if len(settings.IgnoredFiles) > 0 { + cfg["ignored-files"] = settings.IgnoredFiles + } + if len(settings.IgnoredFunctions) > 0 { + cfg["ignored-functions"] = settings.IgnoredFunctions + } - var linterCfg map[string]map[string]interface{} - if cfg != nil { - linterCfg = cfg.LintersSettings.Gomnd.Settings + linterCfg = map[string]map[string]any{ + "mnd": cfg, + } + } } return goanalysis.NewLinter( "gomnd", "An analyzer to detect magic numbers.", - analyzers, + []*analysis.Analyzer{mnd.Analyzer}, linterCfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go index 40d3bf786e..56afcd465f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go @@ -30,6 +30,7 @@ func NewGoModDirectives(settings *config.GoModDirectivesSettings) *goanalysis.Li analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } return goanalysis.NewLinter( @@ -38,7 +39,7 @@ func NewGoModDirectives(settings *config.GoModDirectivesSettings) *goanalysis.Li []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + analyzer.Run = func(pass *analysis.Pass) (any, error) { once.Do(func() { results, err := gomoddirectives.Analyze(opts) if err != nil { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go index 30ca6cc3de..157bf56c35 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go @@ -6,6 +6,7 @@ import ( "github.com/ryancurrah/gomodguard" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -19,31 +20,18 @@ const ( ) // NewGomodguard returns a new Gomodguard linter. -func NewGomodguard() *goanalysis.Linter { - var ( - issues []goanalysis.Issue - mu = sync.Mutex{} - analyzer = &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - ) - - return goanalysis.NewLinter( - gomodguardName, - gomodguardDesc, - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - linterCfg := lintCtx.Cfg.LintersSettings.Gomodguard - - processorCfg := &gomodguard.Configuration{} - processorCfg.Allowed.Modules = linterCfg.Allowed.Modules - processorCfg.Allowed.Domains = linterCfg.Allowed.Domains - processorCfg.Blocked.LocalReplaceDirectives = linterCfg.Blocked.LocalReplaceDirectives - - for n := range linterCfg.Blocked.Modules { - for k, v := range linterCfg.Blocked.Modules[n] { +func NewGomodguard(settings *config.GoModGuardSettings) *goanalysis.Linter { + var issues []goanalysis.Issue + var mu sync.Mutex + + processorCfg := &gomodguard.Configuration{} + if settings != nil { + processorCfg.Allowed.Modules = settings.Allowed.Modules + processorCfg.Allowed.Domains = settings.Allowed.Domains + processorCfg.Blocked.LocalReplaceDirectives = settings.Blocked.LocalReplaceDirectives + + for n := range settings.Blocked.Modules { + for k, v := range settings.Blocked.Modules[n] { m := map[string]gomodguard.BlockedModule{k: { Recommendations: v.Recommendations, Reason: v.Reason, @@ -53,8 +41,8 @@ func NewGomodguard() *goanalysis.Linter { } } - for n := range linterCfg.Blocked.Versions { - for k, v := range linterCfg.Blocked.Versions[n] { + for n := range settings.Blocked.Versions { + for k, v := range settings.Blocked.Versions[n] { m := map[string]gomodguard.BlockedVersion{k: { Version: v.Version, Reason: v.Reason, @@ -63,7 +51,20 @@ func NewGomodguard() *goanalysis.Linter { break } } + } + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, + } + + return goanalysis.NewLinter( + gomodguardName, + gomodguardDesc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { processor, err := gomodguard.NewProcessor(processorCfg) if err != nil { lintCtx.Log.Warnf("running gomodguard failed: %s: if you are not using go modules "+ @@ -71,14 +72,8 @@ func NewGomodguard() *goanalysis.Linter { return } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var files []string - - for _, file := range pass.Files { - files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) - } - - gomodguardIssues := processor.ProcessFiles(files) + analyzer.Run = func(pass *analysis.Pass) (any, error) { + gomodguardIssues := processor.ProcessFiles(getFileNames(pass)) mu.Lock() defer mu.Unlock() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go index c5516dc7f9..e513718ba8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go @@ -8,10 +8,12 @@ import ( ) func NewGoPrintfFuncName() *goanalysis.Linter { + a := analyzer.Analyzer + return goanalysis.NewLinter( - "goprintffuncname", - "Checks that printf-like functions are named with `f` at the end", - []*analysis.Analyzer{analyzer.Analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go index 328ba5ccc7..0c09c59672 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go @@ -3,13 +3,14 @@ package golinters import ( "fmt" "go/token" - "io/ioutil" + "io" "log" "strconv" "strings" "sync" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" "github.com/securego/gosec/v2/rules" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -26,83 +27,38 @@ func NewGosec(settings *config.GoSecSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - gasConfig := gosec.NewConfig() - var filters []rules.RuleFilter + conf := gosec.NewConfig() if settings != nil { filters = gosecRuleFilters(settings.Includes, settings.Excludes) - - for k, v := range settings.Config { - // Uses ToUpper because the parsing of the map's key change the key to lowercase. - // The value is not impacted by that: the case is respected. - gasConfig.Set(strings.ToUpper(k), v) - } + conf = toGosecConfig(settings) } - ruleDefinitions := rules.Generate(filters...) + logger := log.New(io.Discard, "", 0) - logger := log.New(ioutil.Discard, "", 0) + ruleDefinitions := rules.Generate(false, filters...) analyzer := &analysis.Analyzer{ Name: gosecName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( gosecName, "Inspects source code for security problems", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - gosecAnalyzer := gosec.NewAnalyzer(gasConfig, true, logger) - gosecAnalyzer.LoadRules(ruleDefinitions.Builders()) - - pkg := &packages.Package{ - Fset: pass.Fset, - Syntax: pass.Files, - Types: pass.Pkg, - TypesInfo: pass.TypesInfo, - } - gosecAnalyzer.Check(pkg) - issues, _, _ := gosecAnalyzer.Report() - if len(issues) == 0 { - return nil, nil - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + // The `gosecAnalyzer` is here because of concurrency issue. + gosecAnalyzer := gosec.NewAnalyzer(conf, true, settings.ExcludeGenerated, false, settings.Concurrency, logger) + gosecAnalyzer.LoadRules(ruleDefinitions.RulesInfo()) - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - text := fmt.Sprintf("%s: %s", i.RuleID, i.What) // TODO: use severity and confidence - var r *result.Range - line, err := strconv.Atoi(i.Line) - if err != nil { - r = &result.Range{} - if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { - lintCtx.Log.Warnf("Can't convert gosec line number %q of %v to int: %s", i.Line, i, err) - continue - } - line = r.From - } - - column, err := strconv.Atoi(i.Col) - if err != nil { - lintCtx.Log.Warnf("Can't convert gosec column number %q of %v to int: %s", i.Col, i, err) - continue - } - - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.File, - Line: line, - Column: column, - }, - Text: text, - LineRange: r, - FromLinter: gosecName, - }, pass)) - } + issues := runGoSec(lintCtx, pass, settings, gosecAnalyzer) mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil @@ -112,6 +68,113 @@ func NewGosec(settings *config.GoSecSettings) *goanalysis.Linter { }).WithLoadMode(goanalysis.LoadModeTypesInfo) } +func runGoSec(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoSecSettings, analyzer *gosec.Analyzer) []goanalysis.Issue { + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + analyzer.CheckRules(pkg) + + secIssues, _, _ := analyzer.Report() + if len(secIssues) == 0 { + return nil + } + + severity, err := convertToScore(settings.Severity) + if err != nil { + lintCtx.Log.Warnf("The provided severity %v", err) + } + + confidence, err := convertToScore(settings.Confidence) + if err != nil { + lintCtx.Log.Warnf("The provided confidence %v", err) + } + + secIssues = filterIssues(secIssues, severity, confidence) + + issues := make([]goanalysis.Issue, 0, len(secIssues)) + for _, i := range secIssues { + text := fmt.Sprintf("%s: %s", i.RuleID, i.What) + + var r *result.Range + + line, err := strconv.Atoi(i.Line) + if err != nil { + r = &result.Range{} + if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { + lintCtx.Log.Warnf("Can't convert gosec line number %q of %v to int: %s", i.Line, i, err) + continue + } + line = r.From + } + + column, err := strconv.Atoi(i.Col) + if err != nil { + lintCtx.Log.Warnf("Can't convert gosec column number %q of %v to int: %s", i.Col, i, err) + continue + } + + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Severity: convertScoreToString(i.Severity), + Pos: token.Position{ + Filename: i.File, + Line: line, + Column: column, + }, + Text: text, + LineRange: r, + FromLinter: gosecName, + }, pass)) + } + + return issues +} + +func toGosecConfig(settings *config.GoSecSettings) gosec.Config { + conf := gosec.NewConfig() + + for k, v := range settings.Config { + if k == gosec.Globals { + convertGosecGlobals(v, conf) + continue + } + + // Uses ToUpper because the parsing of the map's key change the key to lowercase. + // The value is not impacted by that: the case is respected. + conf.Set(strings.ToUpper(k), v) + } + + return conf +} + +func convertScoreToString(score issue.Score) string { + switch score { + case issue.Low: + return "low" + case issue.Medium: + return "medium" + case issue.High: + return "high" + default: + return "" + } +} + +// based on https://github.com/securego/gosec/blob/47bfd4eb6fc7395940933388550b547538b4c946/config.go#L52-L62 +func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { + globalOptionMap, ok := globalOptionFromConfig.(map[string]any) + if !ok { + return + } + + for k, v := range globalOptionMap { + conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v)) + } +} + // based on https://github.com/securego/gosec/blob/569328eade2ccbad4ce2d0f21ee158ab5356a5cf/cmd/gosec/main.go#L170-L188 func gosecRuleFilters(includes, excludes []string) []rules.RuleFilter { var filters []rules.RuleFilter @@ -126,3 +189,31 @@ func gosecRuleFilters(includes, excludes []string) []rules.RuleFilter { return filters } + +// code borrowed from https://github.com/securego/gosec/blob/69213955dacfd560562e780f723486ef1ca6d486/cmd/gosec/main.go#L250-L262 +func convertToScore(str string) (issue.Score, error) { + str = strings.ToLower(str) + switch str { + case "", "low": + return issue.Low, nil + case "medium": + return issue.Medium, nil + case "high": + return issue.High, nil + default: + return issue.Low, fmt.Errorf("'%s' is invalid, use low instead. Valid options: low, medium, high", str) + } +} + +// code borrowed from https://github.com/securego/gosec/blob/69213955dacfd560562e780f723486ef1ca6d486/cmd/gosec/main.go#L264-L276 +func filterIssues(issues []*issue.Issue, severity, confidence issue.Score) []*issue.Issue { + res := make([]*issue.Issue, 0) + + for _, i := range issues { + if i.Severity >= severity && i.Confidence >= confidence { + res = append(res, i) + } + } + + return res +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go index fa14f1a966..de60ded73e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go @@ -14,7 +14,7 @@ func NewGosimple(settings *config.StaticCheckSettings) *goanalysis.Linter { return goanalysis.NewLinter( "gosimple", - "Linter for Go source code that specializes in simplifying a code", + "Linter for Go source code that specializes in simplifying code", analyzers, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan.go new file mode 100644 index 0000000000..2e01fcc70d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "strings" + + "github.com/xen0n/gosmopolitan" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGosmopolitan(s *config.GosmopolitanSettings) *goanalysis.Linter { + a := gosmopolitan.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if s != nil { + cfgMap[a.Name] = map[string]any{ + "allowtimelocal": s.AllowTimeLocal, + "escapehatches": strings.Join(s.EscapeHatches, ","), + "lookattests": !s.IgnoreTests, + "watchforscripts": strings.Join(s.WatchForScripts, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go index b3860e0170..ecd406e3bf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go @@ -1,7 +1,11 @@ package golinters import ( + "slices" + "sort" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/appends" "golang.org/x/tools/go/analysis/passes/asmdecl" "golang.org/x/tools/go/analysis/passes/assign" "golang.org/x/tools/go/analysis/passes/atomic" @@ -14,6 +18,8 @@ import ( "golang.org/x/tools/go/analysis/passes/copylock" _ "golang.org/x/tools/go/analysis/passes/ctrlflow" // unused, internal analyzer "golang.org/x/tools/go/analysis/passes/deepequalerrors" + "golang.org/x/tools/go/analysis/passes/defers" + "golang.org/x/tools/go/analysis/passes/directive" "golang.org/x/tools/go/analysis/passes/errorsas" "golang.org/x/tools/go/analysis/passes/fieldalignment" "golang.org/x/tools/go/analysis/passes/findcall" @@ -31,12 +37,14 @@ import ( "golang.org/x/tools/go/analysis/passes/shadow" "golang.org/x/tools/go/analysis/passes/shift" "golang.org/x/tools/go/analysis/passes/sigchanyzer" + "golang.org/x/tools/go/analysis/passes/slog" "golang.org/x/tools/go/analysis/passes/sortslice" "golang.org/x/tools/go/analysis/passes/stdmethods" "golang.org/x/tools/go/analysis/passes/stringintconv" "golang.org/x/tools/go/analysis/passes/structtag" "golang.org/x/tools/go/analysis/passes/testinggoroutine" "golang.org/x/tools/go/analysis/passes/tests" + "golang.org/x/tools/go/analysis/passes/timeformat" "golang.org/x/tools/go/analysis/passes/unmarshal" "golang.org/x/tools/go/analysis/passes/unreachable" "golang.org/x/tools/go/analysis/passes/unsafeptr" @@ -45,10 +53,12 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/logutils" ) var ( allAnalyzers = []*analysis.Analyzer{ + appends.Analyzer, asmdecl.Analyzer, assign.Analyzer, atomic.Analyzer, @@ -59,6 +69,8 @@ var ( composite.Analyzer, copylock.Analyzer, deepequalerrors.Analyzer, + defers.Analyzer, + directive.Analyzer, errorsas.Analyzer, fieldalignment.Analyzer, findcall.Analyzer, @@ -74,12 +86,14 @@ var ( shadow.Analyzer, shift.Analyzer, sigchanyzer.Analyzer, + slog.Analyzer, sortslice.Analyzer, stdmethods.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, tests.Analyzer, + timeformat.Analyzer, unmarshal.Analyzer, unreachable.Analyzer, unsafeptr.Analyzer, @@ -87,8 +101,9 @@ var ( unusedwrite.Analyzer, } - // https://github.com/golang/go/blob/879db69ce2de814bc3203c39b45617ba51cc5366/src/cmd/vet/main.go#L40-L68 + // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81 defaultAnalyzers = []*analysis.Analyzer{ + appends.Analyzer, asmdecl.Analyzer, assign.Analyzer, atomic.Analyzer, @@ -97,6 +112,8 @@ var ( cgocall.Analyzer, composite.Analyzer, copylock.Analyzer, + defers.Analyzer, + directive.Analyzer, errorsas.Analyzer, framepointer.Analyzer, httpresponse.Analyzer, @@ -107,11 +124,13 @@ var ( printf.Analyzer, shift.Analyzer, sigchanyzer.Analyzer, + slog.Analyzer, stdmethods.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, tests.Analyzer, + timeformat.Analyzer, unmarshal.Analyzer, unreachable.Analyzer, unsafeptr.Analyzer, @@ -119,67 +138,92 @@ var ( } ) -func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers []*analysis.Analyzer) bool { - if cfg.EnableAll { - for _, n := range cfg.Disable { - if n == name { - return false - } - } - return true - } - // Raw for loops should be OK on small slice lengths. - for _, n := range cfg.Enable { - if n == name { - return true - } - } - for _, n := range cfg.Disable { - if n == name { - return false - } - } - if cfg.DisableAll { - return false - } - for _, a := range defaultAnalyzers { - if a.Name == name { - return true - } +var ( + govetDebugf = logutils.Debug(logutils.DebugKeyGovet) + isGovetDebug = logutils.HaveDebugTag(logutils.DebugKeyGovet) +) + +func NewGovet(settings *config.GovetSettings) *goanalysis.Linter { + var conf map[string]map[string]any + if settings != nil { + conf = settings.Settings } - return false + + return goanalysis.NewLinter( + "govet", + "Vet examines Go source code and reports suspicious constructs. "+ + "It is roughly the same as 'go vet' and uses its passes.", + analyzersFromConfig(settings), + conf, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func analyzersFromConfig(cfg *config.GovetSettings) []*analysis.Analyzer { - if cfg == nil { - return defaultAnalyzers - } +func analyzersFromConfig(settings *config.GovetSettings) []*analysis.Analyzer { + debugAnalyzersListf(allAnalyzers, "All available analyzers") + debugAnalyzersListf(defaultAnalyzers, "Default analyzers") - if cfg.CheckShadowing { - // Keeping for backward compatibility. - cfg.Enable = append(cfg.Enable, shadow.Analyzer.Name) + if settings == nil { + return defaultAnalyzers } var enabledAnalyzers []*analysis.Analyzer for _, a := range allAnalyzers { - if isAnalyzerEnabled(a.Name, cfg, defaultAnalyzers) { + if isAnalyzerEnabled(a.Name, settings, defaultAnalyzers) { enabledAnalyzers = append(enabledAnalyzers, a) } } + debugAnalyzersListf(enabledAnalyzers, "Enabled by config analyzers") + return enabledAnalyzers } -func NewGovet(cfg *config.GovetSettings) *goanalysis.Linter { - var settings map[string]map[string]interface{} - if cfg != nil { - settings = cfg.Settings +func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers []*analysis.Analyzer) bool { + // TODO(ldez) remove loopclosure when go1.23 + if name == loopclosure.Analyzer.Name && config.IsGoGreaterThanOrEqual(cfg.Go, "1.22") { + return false + } + + // TODO(ldez) re-enable httpresponse once https://github.com/golangci/golangci-lint/issues/4482 is fixed. + if name == httpresponse.Analyzer.Name { + govetDebugf("httpresponse is disabled due to panic. See https://github.com/golang/go/issues/66259") + return false + } + + // Keeping for backward compatibility. + if cfg.CheckShadowing && name == shadow.Analyzer.Name { + return true + } + + switch { + case cfg.EnableAll: + return !slices.Contains(cfg.Disable, name) + + case slices.Contains(cfg.Enable, name): + return true + + case slices.Contains(cfg.Disable, name): + return false + + case cfg.DisableAll: + return false + + default: + return slices.ContainsFunc(defaultAnalyzers, func(a *analysis.Analyzer) bool { return a.Name == name }) + } +} + +func debugAnalyzersListf(analyzers []*analysis.Analyzer, message string) { + if !isGovetDebug { + return } - return goanalysis.NewLinter( - "govet", - "Vet examines Go source code and reports suspicious constructs, "+ - "such as Printf calls whose arguments do not align with the format string", - analyzersFromConfig(cfg), - settings, - ).WithLoadMode(goanalysis.LoadModeTypesInfo) + + analyzerNames := make([]string, 0, len(analyzers)) + for _, a := range analyzers { + analyzerNames = append(analyzerNames, a.Name) + } + + sort.Strings(analyzerNames) + + govetDebugf("%s (%d): %s", message, len(analyzerNames), analyzerNames) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper.go new file mode 100644 index 0000000000..41761f2ae0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper.go @@ -0,0 +1,32 @@ +package golinters + +import ( + grouper "github.com/leonklingele/grouper/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGrouper(settings *config.GrouperSettings) *goanalysis.Linter { + linterCfg := map[string]map[string]any{} + if settings != nil { + linterCfg["grouper"] = map[string]any{ + "const-require-single-const": settings.ConstRequireSingleConst, + "const-require-grouping": settings.ConstRequireGrouping, + "import-require-single-import": settings.ImportRequireSingleImport, + "import-require-grouping": settings.ImportRequireGrouping, + "type-require-single-type": settings.TypeRequireSingleType, + "type-require-grouping": settings.TypeRequireGrouping, + "var-require-single-var": settings.VarRequireSingleVar, + "var-require-grouping": settings.VarRequireGrouping, + } + } + + return goanalysis.NewLinter( + "grouper", + "Analyze expression groups.", + []*analysis.Analyzer{grouper.New()}, + linterCfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go deleted file mode 100644 index c26f08e403..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go +++ /dev/null @@ -1,28 +0,0 @@ -package golinters - -import ( - "github.com/esimonov/ifshort/pkg/analyzer" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" -) - -func NewIfshort(settings *config.IfshortSettings) *goanalysis.Linter { - var cfg map[string]map[string]interface{} - if settings != nil { - cfg = map[string]map[string]interface{}{ - analyzer.Analyzer.Name: { - "max-decl-lines": settings.MaxDeclLines, - "max-decl-chars": settings.MaxDeclChars, - }, - } - } - - return goanalysis.NewLinter( - "ifshort", - "Checks that your code uses short syntax for if-statements whenever possible", - []*analysis.Analyzer{analyzer.Analyzer}, - cfg, - ).WithLoadMode(goanalysis.LoadModeSyntax) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go index 523aa257b4..a426333dbf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go @@ -3,8 +3,9 @@ package golinters import ( "fmt" "strconv" + "strings" - "github.com/julz/importas" // nolint: misspell + "github.com/julz/importas" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -25,20 +26,38 @@ func NewImportAs(settings *config.ImportAsSettings) *goanalysis.Linter { return } if len(settings.Alias) == 0 { - lintCtx.Log.Infof("importas settings found, but no aliases listed. List aliases under alias: key.") // nolint: misspell + lintCtx.Log.Infof("importas settings found, but no aliases listed. List aliases under alias: key.") } - err := analyzer.Flags.Set("no-unaliased", strconv.FormatBool(settings.NoUnaliased)) - if err != nil { + if err := analyzer.Flags.Set("no-unaliased", strconv.FormatBool(settings.NoUnaliased)); err != nil { lintCtx.Log.Errorf("failed to parse configuration: %v", err) } + if err := analyzer.Flags.Set("no-extra-aliases", strconv.FormatBool(settings.NoExtraAliases)); err != nil { + lintCtx.Log.Errorf("failed to parse configuration: %v", err) + } + + uniqPackages := make(map[string]config.ImportAsAlias) + uniqAliases := make(map[string]config.ImportAsAlias) for _, a := range settings.Alias { if a.Pkg == "" { lintCtx.Log.Errorf("invalid configuration, empty package: pkg=%s alias=%s", a.Pkg, a.Alias) continue } + if v, ok := uniqPackages[a.Pkg]; ok { + lintCtx.Log.Errorf("invalid configuration, multiple aliases for the same package: pkg=%s aliases=[%s,%s]", a.Pkg, a.Alias, v.Alias) + } else { + uniqPackages[a.Pkg] = a + } + + // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`). + if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") { + lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg) + } else { + uniqAliases[a.Alias] = a + } + err := analyzer.Flags.Set("alias", fmt.Sprintf("%s:%s", a.Pkg, a.Alias)) if err != nil { lintCtx.Log.Errorf("failed to parse configuration: %v", err) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/inamedparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/inamedparam.go new file mode 100644 index 0000000000..887f3db2ad --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/inamedparam.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/macabu/inamedparam" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewINamedParam(settings *config.INamedParamSettings) *goanalysis.Linter { + a := inamedparam.Analyzer + + var cfg map[string]map[string]any + + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + "skip-single-param": settings.SkipSingleParam, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go index c87bb2fa51..ac5eb20adb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go @@ -8,10 +8,12 @@ import ( ) func NewIneffassign() *goanalysis.Linter { + a := ineffassign.Analyzer + return goanalysis.NewLinter( - "ineffassign", + a.Name, "Detects when assignments to existing variables are not used", - []*analysis.Analyzer{ineffassign.Analyzer}, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacebloat.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacebloat.go new file mode 100644 index 0000000000..a6dbfe178f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacebloat.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/sashamelentyev/interfacebloat/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewInterfaceBloat(settings *config.InterfaceBloatSettings) *goanalysis.Linter { + a := analyzer.New() + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + analyzer.InterfaceMaxMethodsFlag: settings.Max, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go deleted file mode 100644 index 1edbe894cc..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go +++ /dev/null @@ -1,67 +0,0 @@ -package golinters - -import ( - "sync" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/buildssa" - "mvdan.cc/interfacer/check" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const interfacerName = "interfacer" - -func NewInterfacer() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: interfacerName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Requires: []*analysis.Analyzer{buildssa.Analyzer}, - } - return goanalysis.NewLinter( - interfacerName, - "Linter that suggests narrower interface types", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) - ssaPkg := ssa.Pkg - c := &check.Checker{} - prog := goanalysis.MakeFakeLoaderProgram(pass) - c.Program(prog) - c.ProgramSSA(ssaPkg.Prog) - - issues, err := c.Check() - if err != nil { - return nil, err - } - if len(issues) == 0 { - return nil, nil - } - - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - pos := pass.Fset.Position(i.Pos()) - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: pos, - Text: i.Message(), - FromLinter: interfacerName, - }, pass)) - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/intrange.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/intrange.go new file mode 100644 index 0000000000..8de956dc12 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/intrange.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/ckaznocha/intrange" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewIntrange() *goanalysis.Linter { + a := intrange.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ireturn.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ireturn.go new file mode 100644 index 0000000000..dc09dad0ee --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ireturn.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "strings" + + "github.com/butuzov/ireturn/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewIreturn(settings *config.IreturnSettings) *goanalysis.Linter { + a := analyzer.NewAnalyzer() + + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "allow": strings.Join(settings.Allow, ","), + "reject": strings.Join(settings.Reject, ","), + "nonolint": true, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go index 5f26e91ddb..84ce61a9ee 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go @@ -2,6 +2,7 @@ package golinters import ( "bufio" + "errors" "fmt" "go/token" "os" @@ -11,25 +12,107 @@ import ( "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) +const lllName = "lll" + +const goCommentDirectivePrefix = "//go:" + +func NewLLL(settings *config.LllSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: lllName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runLll(pass, settings) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + return goanalysis.NewLinter( + lllName, + "Reports long lines", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + spaces := strings.Repeat(" ", settings.TabWidth) + + var issues []goanalysis.Issue + for _, f := range fileNames { + lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces) + if err != nil { + return nil, err + } + + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + } + + return issues, nil +} + func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { var res []result.Issue f, err := os.Open(filename) if err != nil { - return nil, fmt.Errorf("can't open file %s: %s", filename, err) + return nil, fmt.Errorf("can't open file %s: %w", filename, err) } defer f.Close() - lineNumber := 1 + lineNumber := 0 + multiImportEnabled := false + scanner := bufio.NewScanner(f) for scanner.Scan() { + lineNumber++ + line := scanner.Text() - line = strings.Replace(line, "\t", tabSpaces, -1) + line = strings.ReplaceAll(line, "\t", tabSpaces) + + if strings.HasPrefix(line, goCommentDirectivePrefix) { + continue + } + + if strings.HasPrefix(line, "import") { + multiImportEnabled = strings.HasSuffix(line, "(") + continue + } + + if multiImportEnabled { + if line == ")" { + multiImportEnabled = false + } + + continue + } + lineLen := utf8.RuneCountInString(line) if lineLen > maxLineLen { res = append(res, result.Issue{ @@ -41,21 +124,20 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r FromLinter: lllName, }) } - lineNumber++ } if err := scanner.Err(); err != nil { - if err == bufio.ErrTooLong && maxLineLen < bufio.MaxScanTokenSize { + if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize { // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize // we can return this line as a long line instead of returning an error. // The reason for this change is that this case might happen with autogenerated files // The go-bindata tool for instance might generate a file with a very long line. - // In this case, as it's a auto generated file, the warning returned by lll will + // In this case, as it's an auto generated file, the warning returned by lll will // be ignored. // But if we return a linter error here, and this error happens for an autogenerated // file the error will be discarded (fine), but all the subsequent errors for lll will - // be discarded for other files and we'll miss legit error. + // be discarded for other files, and we'll miss legit error. res = append(res, result.Issue{ Pos: token.Position{ Filename: filename, @@ -66,59 +148,9 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r FromLinter: lllName, }) } else { - return nil, fmt.Errorf("can't scan file %s: %s", filename, err) + return nil, fmt.Errorf("can't scan file %s: %w", filename, err) } } return res, nil } - -const lllName = "lll" - -func NewLLL() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: lllName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - lllName, - "Reports long lines", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var res []goanalysis.Issue - spaces := strings.Repeat(" ", lintCtx.Settings().Lll.TabWidth) - for _, f := range fileNames { - issues, err := getLLLIssuesForFile(f, lintCtx.Settings().Lll.LineLength, spaces) - if err != nil { - return nil, err - } - for i := range issues { - res = append(res, goanalysis.NewIssue(&issues[i], pass)) - } - } - - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck.go new file mode 100644 index 0000000000..fc29127c3b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck.go @@ -0,0 +1,44 @@ +package golinters + +import ( + "github.com/timonwong/loggercheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewLoggerCheck(settings *config.LoggerCheckSettings) *goanalysis.Linter { + var opts []loggercheck.Option + + if settings != nil { + var disable []string + if !settings.Kitlog { + disable = append(disable, "kitlog") + } + if !settings.Klog { + disable = append(disable, "klog") + } + if !settings.Logr { + disable = append(disable, "logr") + } + if !settings.Zap { + disable = append(disable, "zap") + } + + opts = []loggercheck.Option{ + loggercheck.WithDisable(disable), + loggercheck.WithRequireStringKey(settings.RequireStringKey), + loggercheck.WithRules(settings.Rules), + loggercheck.WithNoPrintfLike(settings.NoPrintfLike), + } + } + + analyzer := loggercheck.NewAnalyzer(opts...) + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx.go new file mode 100644 index 0000000000..55509d970c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/yagipy/maintidx" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewMaintIdx(cfg *config.MaintIdxSettings) *goanalysis.Linter { + analyzer := maintidx.Analyzer + + cfgMap := map[string]map[string]any{ + analyzer.Name: {"under": 20}, + } + + if cfg != nil { + cfgMap[analyzer.Name] = map[string]any{ + "under": cfg.Under, + } + } + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go index cdde09291d..7a14c8e09d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go @@ -1,12 +1,13 @@ package golinters import ( + "fmt" "sync" "github.com/ashanbrown/makezero/makezero" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,47 +15,60 @@ import ( const makezeroName = "makezero" -func NewMakezero() *goanalysis.Linter { +func NewMakezero(settings *config.MakezeroSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: makezeroName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - makezeroName, - "Finds slice declarations with non-zero initial length", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - s := &lintCtx.Settings().Makezero - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - linter := makezero.NewLinter(s.Always) - for _, file := range pass.Files { - hints, err := linter.Run(pass.Fset, pass.TypesInfo, file) - if err != nil { - return nil, errors.Wrapf(err, "makezero linter failed on file %q", file.Name.String()) - } - for _, hint := range hints { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: makezeroName, - }, pass)) - } + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runMakeZero(pass, settings) + if err != nil { + return nil, err } - if len(res) == 0 { + + if len(issues) == 0 { return nil, nil } + mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() + return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + makezeroName, + "Finds slice declarations with non-zero initial length", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) { + zero := makezero.NewLinter(settings.Always) + + var issues []goanalysis.Issue + + for _, file := range pass.Files { + hints, err := zero.Run(pass.Fset, pass.TypesInfo, file) + if err != nil { + return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) + } + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: makezeroName, + }, pass)) + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go deleted file mode 100644 index 22422b8c6a..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go +++ /dev/null @@ -1,58 +0,0 @@ -package golinters - -import ( - "fmt" - "sync" - - malignedAPI "github.com/golangci/maligned" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func NewMaligned() *goanalysis.Linter { - const linterName = "maligned" - var mu sync.Mutex - var res []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Tool to detect Go structs that would take less memory if their fields were sorted", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) - - malignedIssues := malignedAPI.Run(prog) - if len(malignedIssues) == 0 { - return nil, nil - } - - issues := make([]goanalysis.Issue, 0, len(malignedIssues)) - for _, i := range malignedIssues { - text := fmt.Sprintf("struct of size %d bytes could be of size %d bytes", i.OldSize, i.NewSize) - if lintCtx.Settings().Maligned.SuggestNewOrder { - text += fmt.Sprintf(":\n%s", formatCodeBlock(i.NewStructDef, lintCtx.Cfg)) - } - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: text, - FromLinter: linterName, - }, pass)) - } - - mu.Lock() - res = append(res, issues...) - mu.Unlock() - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror.go new file mode 100644 index 0000000000..d6e2bb06ac --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror.go @@ -0,0 +1,70 @@ +package golinters + +import ( + "sync" + + "github.com/butuzov/mirror" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewMirror() *goanalysis.Linter { + var ( + mu sync.Mutex + issues []goanalysis.Issue + ) + + a := mirror.NewAnalyzer() + a.Run = func(pass *analysis.Pass) (any, error) { + // mirror only lints test files if the `--with-tests` flag is passed, + // so we pass the `with-tests` flag as true to the analyzer before running it. + // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` + // or can be disabled per linter via exclude rules. + // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) + violations := mirror.Run(pass, true) + + if len(violations) == 0 { + return nil, nil + } + + for index := range violations { + i := violations[index].Issue(pass.Fset) + + issue := result.Issue{ + FromLinter: a.Name, + Text: i.Message, + Pos: i.Start, + } + + if i.InlineFix != "" { + issue.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: i.Start.Column - 1, + Length: len(i.Original), + NewString: i.InlineFix, + }, + } + } + + mu.Lock() + issues = append(issues, goanalysis.NewIssue(&issue, pass)) + mu.Unlock() + } + + return nil, nil + } + + analyzer := goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) + + return analyzer +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go index 80ecf9bb66..8a97534c58 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go @@ -5,34 +5,142 @@ import ( "go/token" "strings" "sync" + "unicode" "github.com/golangci/misspell" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func runMisspellOnFile(fileName string, r *misspell.Replacer, lintCtx *linter.Context) ([]result.Issue, error) { - var res []result.Issue - fileContent, err := lintCtx.FileCache.GetFileBytes(fileName) +const misspellName = "misspell" + +func NewMisspell(settings *config.MisspellSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: misspellName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, + } + + return goanalysis.NewLinter( + misspellName, + "Finds commonly misspelled English words", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + replacer, ruleErr := createMisspellReplacer(settings) + + analyzer.Run = func(pass *analysis.Pass) (any, error) { + if ruleErr != nil { + return nil, ruleErr + } + + issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var issues []goanalysis.Issue + for _, filename := range fileNames { + lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode) + if err != nil { + return nil, err + } + + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + } + + return issues, nil +} + +func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) { + replacer := &misspell.Replacer{ + Replacements: misspell.DictMain, + } + + // Figure out regional variations + switch strings.ToUpper(settings.Locale) { + case "": + // nothing + case "US": + replacer.AddRuleList(misspell.DictAmerican) + case "UK", "GB": + replacer.AddRuleList(misspell.DictBritish) + case "NZ", "AU", "CA": + return nil, fmt.Errorf("unknown locale: %q", settings.Locale) + } + + err := appendExtraWords(replacer, settings.ExtraWords) if err != nil { - return nil, fmt.Errorf("can't get file %s contents: %s", fileName, err) + return nil, fmt.Errorf("process extra words: %w", err) } - // use r.Replace, not r.ReplaceGo because r.ReplaceGo doesn't find - // issues inside strings: it searches only inside comments. r.Replace - // searches all words: it treats input as a plain text. A standalone misspell - // tool uses r.Replace by default. - _, diffs := r.Replace(string(fileContent)) + if len(settings.IgnoreWords) != 0 { + replacer.RemoveRule(settings.IgnoreWords) + } + + // It can panic. + replacer.Compile() + + return replacer, nil +} + +func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) { + fileContent, err := lintCtx.FileCache.GetFileBytes(filename) + if err != nil { + return nil, fmt.Errorf("can't get file %s contents: %w", filename, err) + } + + // `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments. + // `r.Replace` searches all words: it treats input as a plain text. + // The standalone misspell tool uses `r.Replace` by default. + var replace func(input string) (string, []misspell.Diff) + switch strings.ToLower(mode) { + case "restricted": + replace = replacer.ReplaceGo + default: + replace = replacer.Replace + } + + _, diffs := replace(string(fileContent)) + + var res []result.Issue + for _, diff := range diffs { text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) + pos := token.Position{ - Filename: fileName, + Filename: filename, Line: diff.Line, Column: diff.Column + 1, } + replacement := &result.Replacement{ Inline: &result.InlineFix{ StartCol: diff.Column, @@ -52,81 +160,29 @@ func runMisspellOnFile(fileName string, r *misspell.Replacer, lintCtx *linter.Co return res, nil } -const misspellName = "misspell" +func appendExtraWords(replacer *misspell.Replacer, extraWords []config.MisspellExtraWords) error { + if len(extraWords) == 0 { + return nil + } -func NewMisspell() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var ruleErr error + extra := make([]string, 0, len(extraWords)*2) - analyzer := &analysis.Analyzer{ - Name: misspellName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - misspellName, - "Finds commonly misspelled English words in comments", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - r := misspell.Replacer{ - Replacements: misspell.DictMain, + for _, word := range extraWords { + if word.Typo == "" || word.Correction == "" { + return fmt.Errorf("typo (%q) and correction (%q) fields should not be empty", word.Typo, word.Correction) } - // Figure out regional variations - settings := lintCtx.Settings().Misspell - locale := settings.Locale - switch strings.ToUpper(locale) { - case "": - // nothing - case "US": - r.AddRuleList(misspell.DictAmerican) - case "UK", "GB": - r.AddRuleList(misspell.DictBritish) - case "NZ", "AU", "CA": - ruleErr = fmt.Errorf("unknown locale: %q", locale) + if strings.ContainsFunc(word.Typo, func(r rune) bool { return !unicode.IsLetter(r) }) { + return fmt.Errorf("the word %q in the 'typo' field should only contain letters", word.Typo) } - - if ruleErr == nil { - if len(settings.IgnoreWords) != 0 { - r.RemoveRule(settings.IgnoreWords) - } - - r.Compile() + if strings.ContainsFunc(word.Correction, func(r rune) bool { return !unicode.IsLetter(r) }) { + return fmt.Errorf("the word %q in the 'correction' field should only contain letters", word.Correction) } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - if ruleErr != nil { - return nil, ruleErr - } - - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var res []goanalysis.Issue - for _, f := range fileNames { - issues, err := runMisspellOnFile(f, &r, lintCtx) - if err != nil { - return nil, err - } - for i := range issues { - res = append(res, goanalysis.NewIssue(&issues[i], pass)) - } - } - if len(res) == 0 { - return nil, nil - } + extra = append(extra, strings.ToLower(word.Typo), strings.ToLower(word.Correction)) + } - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() + replacer.AddRuleList(extra) - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag.go new file mode 100644 index 0000000000..72d9195828 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "go-simpler.org/musttag" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewMustTag(setting *config.MustTagSettings) *goanalysis.Linter { + var funcs []musttag.Func + + if setting != nil { + for _, fn := range setting.Functions { + funcs = append(funcs, musttag.Func{ + Name: fn.Name, + Tag: fn.Tag, + ArgPos: fn.ArgPos, + }) + } + } + + a := musttag.New(funcs...) + + return goanalysis. + NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go index 86735a51ad..6153860fb4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go @@ -1,123 +1,25 @@ package golinters import ( - "fmt" - "go/ast" - "go/token" - "sync" - + "github.com/alexkohler/nakedret/v2" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -type nakedretVisitor struct { - maxLength int - f *token.FileSet - issues []result.Issue -} - -func (v *nakedretVisitor) processFuncDecl(funcDecl *ast.FuncDecl) { - file := v.f.File(funcDecl.Pos()) - functionLineLength := file.Position(funcDecl.End()).Line - file.Position(funcDecl.Pos()).Line - - // Scan the body for usage of the named returns - for _, stmt := range funcDecl.Body.List { - s, ok := stmt.(*ast.ReturnStmt) - if !ok { - continue - } - - if len(s.Results) != 0 { - continue - } - - file := v.f.File(s.Pos()) - if file == nil || functionLineLength <= v.maxLength { - continue - } - if funcDecl.Name == nil { - continue - } - - v.issues = append(v.issues, result.Issue{ - FromLinter: nakedretName, - Text: fmt.Sprintf("naked return in func `%s` with %d lines of code", - funcDecl.Name.Name, functionLineLength), - Pos: v.f.Position(s.Pos()), - }) - } -} - -func (v *nakedretVisitor) Visit(node ast.Node) ast.Visitor { - funcDecl, ok := node.(*ast.FuncDecl) - if !ok { - return v - } - - var namedReturns []*ast.Ident - - // We've found a function - if funcDecl.Type != nil && funcDecl.Type.Results != nil { - for _, field := range funcDecl.Type.Results.List { - for _, ident := range field.Names { - if ident != nil { - namedReturns = append(namedReturns, ident) - } - } - } - } - - if len(namedReturns) == 0 || funcDecl.Body == nil { - return v +func NewNakedret(settings *config.NakedretSettings) *goanalysis.Linter { + var maxLines int + if settings != nil { + maxLines = settings.MaxFuncLines } - v.processFuncDecl(funcDecl) - return v -} - -const nakedretName = "nakedret" + a := nakedret.NakedReturnAnalyzer(uint(maxLines)) -func NewNakedret() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: nakedretName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } return goanalysis.NewLinter( - nakedretName, - "Finds naked returns in functions greater than a specified function length", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - for _, file := range pass.Files { - v := nakedretVisitor{ - maxLength: lintCtx.Settings().Nakedret.MaxFuncLines, - f: pass.Fset, - } - ast.Walk(&v, file) - for i := range v.issues { - res = append(res, goanalysis.NewIssue(&v.issues[i], pass)) - } - } - - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go index 0998a8ce2f..4125ad9afc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go @@ -7,6 +7,7 @@ import ( "github.com/nakabonne/nestif" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,52 +15,64 @@ import ( const nestifName = "nestif" -func NewNestif() *goanalysis.Linter { +func NewNestif(settings *config.NestifSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - nestifName, - "Reports deeply nested if statements", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - checker := &nestif.Checker{ - MinComplexity: lintCtx.Settings().Nestif.MinComplexity, - } - var issues []nestif.Issue - for _, f := range pass.Files { - issues = append(issues, checker.Check(f, pass.Fset)...) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runNestIf(pass, settings) + if len(issues) == 0 { return nil, nil } - sort.SliceStable(issues, func(i, j int) bool { - return issues[i].Complexity > issues[j].Complexity - }) - - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: nestifName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + nestifName, + "Reports deeply nested if statements", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue { + checker := &nestif.Checker{ + MinComplexity: settings.MinComplexity, + } + + var lintIssues []nestif.Issue + for _, f := range pass.Files { + lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...) + } + + if len(lintIssues) == 0 { + return nil + } + + sort.SliceStable(lintIssues, func(i, j int) bool { + return lintIssues[i].Complexity > lintIssues[j].Complexity + }) + + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: nestifName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go index d8a9a613ef..2ea16f2f39 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go @@ -9,6 +9,7 @@ import ( func NewNilErr() *goanalysis.Linter { a := nilerr.Analyzer + return goanalysis.NewLinter( a.Name, "Finds the code that returns nil even if it checks that the error is not nil.", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil.go new file mode 100644 index 0000000000..804557b76d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "strings" + + "github.com/Antonboom/nilnil/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNilNil(cfg *config.NilNilSettings) *goanalysis.Linter { + a := analyzer.New() + + cfgMap := make(map[string]map[string]any) + if cfg != nil && len(cfg.CheckedTypes) != 0 { + cfgMap[a.Name] = map[string]any{ + "checked-types": strings.Join(cfg.CheckedTypes, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go index 3b661c64c8..a359548f42 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go @@ -4,16 +4,24 @@ import ( "github.com/ssgreg/nlreturn/v2/pkg/nlreturn" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -func NewNLReturn() *goanalysis.Linter { +func NewNLReturn(settings *config.NlreturnSettings) *goanalysis.Linter { + a := nlreturn.NewAnalyzer() + + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "block-size": settings.BlockSize, + } + } + return goanalysis.NewLinter( - "nlreturn", + a.Name, "nlreturn checks for a new line before return and branch statements to increase code clarity", - []*analysis.Analyzer{ - nlreturn.NewAnalyzer(), - }, - nil, + []*analysis.Analyzer{a}, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go index b5c4a4be24..cff9c97dcf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go @@ -8,14 +8,12 @@ import ( ) func NewNoctx() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - noctx.Analyzer, - } + a := noctx.Analyzer return goanalysis.NewLinter( - "noctx", - "noctx finds sending http request without context.Context", - analyzers, + a.Name, + "Finds sending http request without context.Context", + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go index 889cff864c..7000620d19 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go @@ -7,87 +7,98 @@ import ( "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/nolintlint" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const NolintlintName = "nolintlint" +const NoLintLintName = "nolintlint" -func NewNoLintLint() *goanalysis.Linter { +func NewNoLintLint(settings *config.NoLintLintSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: NolintlintName, + Name: NoLintLintName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - NolintlintName, - "Reports ill-formed or insufficient nolint directives", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var needs nolintlint.Needs - settings := lintCtx.Settings().NoLintLint - if settings.RequireExplanation { - needs |= nolintlint.NeedsExplanation - } - if !settings.AllowLeadingSpace { - needs |= nolintlint.NeedsMachineOnly - } - if settings.RequireSpecific { - needs |= nolintlint.NeedsSpecific - } - if !settings.AllowUnused { - needs |= nolintlint.NeedsUnused - } - - lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runNoLintLint(pass, settings) if err != nil { return nil, err } - nodes := make([]ast.Node, 0, len(pass.Files)) - for _, n := range pass.Files { - nodes = append(nodes, n) - } - issues, err := lnt.Run(pass.Fset, nodes...) - if err != nil { - return nil, fmt.Errorf("linter failed to run: %s", err) - } - var res []goanalysis.Issue - for _, i := range issues { - expectNoLint := false - var expectedNolintLinter string - if ii, ok := i.(nolintlint.UnusedCandidate); ok { - expectedNolintLinter = ii.ExpectedLinter - expectNoLint = true - } - issue := &result.Issue{ - FromLinter: NolintlintName, - Text: i.Details(), - Pos: i.Position(), - ExpectNoLint: expectNoLint, - ExpectedNoLintLinter: expectedNolintLinter, - Replacement: i.Replacement(), - } - res = append(res, goanalysis.NewIssue(issue, pass)) - } - - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + NoLintLintName, + "Reports ill-formed or insufficient nolint directives", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) { + var needs nolintlint.Needs + if settings.RequireExplanation { + needs |= nolintlint.NeedsExplanation + } + if settings.RequireSpecific { + needs |= nolintlint.NeedsSpecific + } + if !settings.AllowUnused { + needs |= nolintlint.NeedsUnused + } + + lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + if err != nil { + return nil, err + } + + nodes := make([]ast.Node, 0, len(pass.Files)) + for _, n := range pass.Files { + nodes = append(nodes, n) + } + + lintIssues, err := lnt.Run(pass.Fset, nodes...) + if err != nil { + return nil, fmt.Errorf("linter failed to run: %w", err) + } + + var issues []goanalysis.Issue + + for _, i := range lintIssues { + expectNoLint := false + var expectedNolintLinter string + if ii, ok := i.(nolintlint.UnusedCandidate); ok { + expectedNolintLinter = ii.ExpectedLinter + expectNoLint = true + } + + issue := &result.Issue{ + FromLinter: NoLintLintName, + Text: i.Details(), + Pos: i.Position(), + ExpectNoLint: expectNoLint, + ExpectedNoLintLinter: expectedNolintLinter, + Replacement: i.Replacement(), + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md index 3d440d5a5b..1643df7a57 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md @@ -1,17 +1,17 @@ # nolintlint -nolintlint is a Go static analysis tool to find ill-formed or insufficiently explained `// nolint` directives for golangci -(or any other linter, using th ) +nolintlint is a Go static analysis tool to find ill-formed or insufficiently explained `//nolint` directives for golangci-lint +(or any other linter, using this package) ## Purpose To ensure that lint exceptions have explanations. Consider the case below: ```Go -import "crypto/md5" //nolint +import "crypto/md5" //nolint:all func hash(data []byte) []byte { - return md5.New().Sum(data) //nolint + return md5.New().Sum(data) //nolint:all } ``` @@ -27,5 +27,5 @@ func hash(data []byte) []byte { ``` `nolintlint` can also identify cases where you may have written `// nolint`. Finally `nolintlint`, can also enforce that you -use the machine-readable nolint directive format `//nolint` and that you mention what linter is being suppressed, as shown above when we write `//nolint:gosec`. +use the machine-readable nolint directive format `//nolint:all` and that you mention what linter is being suppressed, as shown above when we write `//nolint:gosec`. diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go index 4466cab41f..1bce5ef5d2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -19,12 +19,12 @@ type BaseIssue struct { replacement *result.Replacement } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (b BaseIssue) Position() token.Position { return b.position } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (b BaseIssue) Replacement() *result.Replacement { return b.replacement } @@ -33,53 +33,49 @@ type ExtraLeadingSpace struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (i ExtraLeadingSpace) Details() string { return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) } -//nolint:gocritic // TODO must be change in the future. func (i ExtraLeadingSpace) String() string { return toString(i) } type NotMachine struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (i NotMachine) Details() string { expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", i.fullDirective, expected) } -//nolint:gocritic // TODO must be change in the future. func (i NotMachine) String() string { return toString(i) } type NotSpecific struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (i NotSpecific) Details() string { return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", i.fullDirective, i.directiveWithOptionalLeadingSpace) } -//nolint:gocritic // TODO must be change in the future. func (i NotSpecific) String() string { return toString(i) } type ParseError struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (i ParseError) Details() string { return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", i.fullDirective, i.directiveWithOptionalLeadingSpace) } -//nolint:gocritic // TODO must be change in the future. func (i ParseError) String() string { return toString(i) } type NoExplanation struct { @@ -87,13 +83,12 @@ type NoExplanation struct { fullDirectiveWithoutExplanation string } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (i NoExplanation) Details() string { return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", i.fullDirective, i.fullDirectiveWithoutExplanation) } -//nolint:gocritic // TODO must be change in the future. func (i NoExplanation) String() string { return toString(i) } type UnusedCandidate struct { @@ -101,7 +96,7 @@ type UnusedCandidate struct { ExpectedLinter string } -//nolint:gocritic // TODO must be change in the future. +//nolint:gocritic // TODO(ldez) must be change in the future. func (i UnusedCandidate) Details() string { details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) if i.ExpectedLinter != "" { @@ -110,11 +105,10 @@ func (i UnusedCandidate) Details() string { return details } -//nolint:gocritic // TODO must be change in the future. func (i UnusedCandidate) String() string { return toString(i) } -func toString(i Issue) string { - return fmt.Sprintf("%s at %s", i.Details(), i.Position()) +func toString(issue Issue) string { + return fmt.Sprintf("%s at %s", issue.Details(), issue.Position()) } type Issue interface { @@ -152,15 +146,17 @@ func NewLinter(needs Needs, excludes []string) (*Linter, error) { } return &Linter{ - needs: needs, + needs: needs | NeedsMachineOnly, excludeByLinter: excludeByName, }, nil } -var leadingSpacePattern = regexp.MustCompile(`^//(\s*)`) -var trailingBlankExplanation = regexp.MustCompile(`\s*(//\s*)?$`) +var ( + leadingSpacePattern = regexp.MustCompile(`^//(\s*)`) + trailingBlankExplanation = regexp.MustCompile(`\s*(//\s*)?$`) +) -//nolint:funlen,gocyclo +//nolint:funlen,gocyclo // the function is going to be refactored in the future func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { var issues []Issue @@ -184,12 +180,14 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { leadingSpace = leadingSpaceMatches[1] } - directiveWithOptionalLeadingSpace := comment.Text - if len(leadingSpace) > 0 { - split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") - directiveWithOptionalLeadingSpace = "// " + strings.TrimSpace(split[1]) + directiveWithOptionalLeadingSpace := "//" + if leadingSpace != "" { + directiveWithOptionalLeadingSpace += " " } + split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") + directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) + pos := fset.Position(comment.Pos()) end := fset.Position(comment.End()) @@ -199,8 +197,8 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { position: pos, } - // check for, report and eliminate leading spaces so we can check for other issues - if len(leadingSpace) > 0 { + // check for, report and eliminate leading spaces, so we can check for other issues + if leadingSpace != "" { removeWhitespace := &result.Replacement{ Inline: &result.InlineFix{ StartCol: pos.Column + 1, @@ -227,8 +225,9 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { } lintersText, explanation := fullMatches[1], fullMatches[2] + var linters []string - if len(lintersText) > 0 { + if lintersText != "" && !strings.HasPrefix(lintersText, "all") { lls := strings.Split(lintersText, ",") linters = make([]string, 0, len(lls)) rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") @@ -281,7 +280,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation - // otherwise, check if we are excluding all of the mentioned linters + // otherwise, check if we are excluding all the mentioned linters for _, ll := range linters { if !l.excludeByLinter[ll] { // if a linter does require explanation needsExplanation = true diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nonamedreturns.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nonamedreturns.go new file mode 100644 index 0000000000..7856f6d613 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nonamedreturns.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/firefart/nonamedreturns/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNoNamedReturns(settings *config.NoNamedReturnsSettings) *goanalysis.Linter { + a := analyzer.Analyzer + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + analyzer.FlagReportErrorInDefer: settings.ReportErrorInDefer, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosprintfhostport.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosprintfhostport.go new file mode 100644 index 0000000000..a63b9bb5f5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosprintfhostport.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/stbenjam/no-sprintf-host-port/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNoSprintfHostPort() *goanalysis.Linter { + a := analyzer.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go index 3b784baf5a..8215619c73 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go @@ -4,18 +4,31 @@ import ( "github.com/kunwardeep/paralleltest/pkg/paralleltest" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -func NewParallelTest() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - paralleltest.NewAnalyzer(), +func NewParallelTest(settings *config.ParallelTestSettings) *goanalysis.Linter { + a := paralleltest.NewAnalyzer() + + var cfg map[string]map[string]any + if settings != nil { + d := map[string]any{ + "i": settings.IgnoreMissing, + "ignoremissingsubtests": settings.IgnoreMissingSubtests, + } + + if config.IsGoGreaterThanOrEqual(settings.Go, "1.22") { + d["ignoreloopVar"] = true + } + + cfg = map[string]map[string]any{a.Name: d} } return goanalysis.NewLinter( - "paralleltest", - "paralleltest detects missing usage of t.Parallel() method in your Go test", - analyzers, - nil, - ).WithLoadMode(goanalysis.LoadModeSyntax) + a.Name, + "Detects missing usage of t.Parallel() method in your Go test", + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint.go new file mode 100644 index 0000000000..6fe315fdea --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "github.com/catenacyber/perfsprint/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewPerfSprint(settings *config.PerfSprintSettings) *goanalysis.Linter { + a := analyzer.New() + + cfg := map[string]map[string]any{ + a.Name: {"fiximports": false}, + } + + if settings != nil { + cfg[a.Name]["int-conversion"] = settings.IntConversion + cfg[a.Name]["err-error"] = settings.ErrError + cfg[a.Name]["errorf"] = settings.ErrorF + cfg[a.Name]["sprintf1"] = settings.SprintF1 + cfg[a.Name]["strconcat"] = settings.StrConcat + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go index 3d06cf1472..4777614939 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go @@ -7,6 +7,7 @@ import ( "github.com/alexkohler/prealloc/pkg" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,44 +15,50 @@ import ( const preallocName = "prealloc" -func NewPrealloc() *goanalysis.Linter { +func NewPreAlloc(settings *config.PreallocSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: preallocName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - preallocName, - "Finds slice declarations that could potentially be preallocated", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - s := &lintCtx.Settings().Prealloc - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - hints := pkg.Check(pass.Files, s.Simple, s.RangeLoops, s.ForLoops) - for _, hint := range hints { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(hint.Pos), - Text: fmt.Sprintf("Consider preallocating %s", formatCode(hint.DeclaredSliceName, lintCtx.Cfg)), - FromLinter: preallocName, - }, pass)) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runPreAlloc(pass, settings) - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + preallocName, + "Finds slice declarations that could potentially be pre-allocated", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue { + var issues []goanalysis.Issue + + hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops) + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(hint.Pos), + Text: fmt.Sprintf("Consider pre-allocating %s", formatCode(hint.DeclaredSliceName, nil)), + FromLinter: preallocName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go index caccd48239..d3c25e274b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go @@ -11,9 +11,9 @@ import ( func NewPredeclared(settings *config.PredeclaredSettings) *goanalysis.Linter { a := predeclared.Analyzer - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - cfg = map[string]map[string]interface{}{ + cfg = map[string]map[string]any{ a.Name: { predeclared.IgnoreFlag: settings.Ignore, predeclared.QualifiedFlag: settings.Qualified, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go index 4fba3d2747..381c57489d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go @@ -7,57 +7,71 @@ import ( "github.com/yeya24/promlinter" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewPromlinter() *goanalysis.Linter { +const promlinterName = "promlinter" + +func NewPromlinter(settings *config.PromlinterSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - const linterName = "promlinter" - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, + var promSettings promlinter.Setting + if settings != nil { + promSettings = promlinter.Setting{ + Strict: settings.Strict, + DisabledLintFuncs: settings.DisabledLinters, + } } - return goanalysis.NewLinter( - linterName, - "Check Prometheus metrics naming via promlint", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - strict := lintCtx.Cfg.LintersSettings.Promlinter.Strict - disabledLinters := lintCtx.Cfg.LintersSettings.Promlinter.DisabledLinters - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - issues := promlinter.RunLint(pass.Fset, pass.Files, promlinter.Setting{ - Strict: strict, - DisabledLintFuncs: disabledLinters, - }) + analyzer := &analysis.Analyzer{ + Name: promlinterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runPromLinter(pass, promSettings) if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - issue := result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("Metric: %s Error: %s", i.Metric, i.Text), - FromLinter: linterName, - } - - res[k] = goanalysis.NewIssue(&issue, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + promlinterName, + "Check Prometheus metrics naming via promlint", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runPromLinter(pass *analysis.Pass, promSettings promlinter.Setting) []goanalysis.Issue { + lintIssues := promlinter.RunLint(pass.Fset, pass.Files, promSettings) + + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issue := result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("Metric: %s Error: %s", i.Metric, i.Text), + FromLinter: promlinterName, + } + + issues[k] = goanalysis.NewIssue(&issue, pass) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter.go new file mode 100644 index 0000000000..9a5e7b4db8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter.go @@ -0,0 +1,74 @@ +package golinters + +import ( + "sync" + + "github.com/ghostiam/protogetter" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewProtoGetter(settings *config.ProtoGetterSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + var cfg protogetter.Config + if settings != nil { + cfg = protogetter.Config{ + SkipGeneratedBy: settings.SkipGeneratedBy, + SkipFiles: settings.SkipFiles, + SkipAnyGenerated: settings.SkipAnyGenerated, + ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend, + } + } + cfg.Mode = protogetter.GolangciLintMode + + a := protogetter.NewAnalyzer(&cfg) + a.Run = func(pass *analysis.Pass) (any, error) { + pgIssues, err := protogetter.Run(pass, &cfg) + if err != nil { + return nil, err + } + + issues := make([]goanalysis.Issue, len(pgIssues)) + for i, issue := range pgIssues { + report := &result.Issue{ + FromLinter: a.Name, + Pos: issue.Pos, + Text: issue.Message, + Replacement: &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: issue.InlineFix.StartCol, + Length: issue.InlineFix.Length, + NewString: issue.InlineFix.NewString, + }, + }, + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/reassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/reassign.go new file mode 100644 index 0000000000..a6dd670530 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/reassign.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "fmt" + "strings" + + "github.com/curioswitch/go-reassign" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewReassign(settings *config.ReassignSettings) *goanalysis.Linter { + a := reassign.NewAnalyzer() + + var cfg map[string]map[string]any + if settings != nil && len(settings.Patterns) > 0 { + cfg = map[string]map[string]any{ + a.Name: { + reassign.FlagPattern: fmt.Sprintf("^(%s)$", strings.Join(settings.Patterns, "|")), + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go index 182013c826..fafdedd5d2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go @@ -5,15 +5,14 @@ import ( "encoding/json" "fmt" "go/token" - "io/ioutil" + "os" "reflect" + "sync" "github.com/BurntSushi/toml" - "github.com/mgechev/dots" reviveConfig "github.com/mgechev/revive/config" "github.com/mgechev/revive/lint" "github.com/mgechev/revive/rule" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -25,21 +24,25 @@ import ( const reviveName = "revive" -var reviveDebugf = logutils.Debug("revive") +var reviveDebugf = logutils.Debug(logutils.DebugKeyRevive) -// jsonObject defines a JSON object of an failure +// jsonObject defines a JSON object of a failure type jsonObject struct { Severity lint.Severity lint.Failure `json:",inline"` } // NewRevive returns a new Revive linter. -func NewRevive(cfg *config.ReviveSettings) *goanalysis.Linter { - var issues []goanalysis.Issue +// + +func NewRevive(settings *config.ReviveSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } return goanalysis.NewLinter( @@ -48,78 +51,87 @@ func NewRevive(cfg *config.ReviveSettings) *goanalysis.Linter { []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var files []string - - for _, file := range pass.Files { - files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) - } - - conf, err := getReviveConfig(cfg) + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runRevive(lintCtx, pass, settings) if err != nil { return nil, err } - formatter, err := reviveConfig.GetFormatter("json") - if err != nil { - return nil, err + if len(issues) == 0 { + return nil, nil } - revive := lint.New(ioutil.ReadFile) + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() - lintingRules, err := reviveConfig.GetLintingRules(conf) - if err != nil { - return nil, err - } - - packages, err := dots.ResolvePackages(files, []string{}) - if err != nil { - return nil, err - } + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} - failures, err := revive.Lint(packages, lintingRules, *conf) - if err != nil { - return nil, err - } +func runRevive(lintCtx *linter.Context, pass *analysis.Pass, settings *config.ReviveSettings) ([]goanalysis.Issue, error) { + packages := [][]string{getFileNames(pass)} - formatChan := make(chan lint.Failure) - exitChan := make(chan bool) + conf, err := getReviveConfig(settings) + if err != nil { + return nil, err + } - var output string - go func() { - output, err = formatter.Format(formatChan, *conf) - if err != nil { - lintCtx.Log.Errorf("Format error: %v", err) - } - exitChan <- true - }() + formatter, err := reviveConfig.GetFormatter("json") + if err != nil { + return nil, err + } - for f := range failures { - if f.Confidence < conf.Confidence { - continue - } + revive := lint.New(os.ReadFile, settings.MaxOpenFiles) - formatChan <- f - } + lintingRules, err := reviveConfig.GetLintingRules(conf, []lint.Rule{}) + if err != nil { + return nil, err + } - close(formatChan) - <-exitChan + failures, err := revive.Lint(packages, lintingRules, *conf) + if err != nil { + return nil, err + } - var results []jsonObject - err = json.Unmarshal([]byte(output), &results) - if err != nil { - return nil, err - } + formatChan := make(chan lint.Failure) + exitChan := make(chan bool) - for i := range results { - issues = append(issues, reviveToIssue(pass, &results[i])) - } + var output string + go func() { + output, err = formatter.Format(formatChan, *conf) + if err != nil { + lintCtx.Log.Errorf("Format error: %v", err) + } + exitChan <- true + }() - return nil, nil + for f := range failures { + if f.Confidence < conf.Confidence { + continue } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeSyntax) + + formatChan <- f + } + + close(formatChan) + <-exitChan + + var results []jsonObject + err = json.Unmarshal([]byte(output), &results) + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + for i := range results { + issues = append(issues, reviveToIssue(pass, &results[i])) + } + + return issues, nil } func reviveToIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { @@ -146,9 +158,10 @@ func reviveToIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { } // This function mimics the GetConfig function of revive. -// This allow to get default values and right types. +// This allows to get default values and right types. // https://github.com/golangci/golangci-lint/issues/1745 -// https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L155 +// https://github.com/mgechev/revive/blob/v1.3.7/config/config.go#L217 +// https://github.com/mgechev/revive/blob/v1.3.7/config/config.go#L169-L174 func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) { conf := defaultConfig() @@ -158,35 +171,44 @@ func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) { err := toml.NewEncoder(buf).Encode(rawRoot) if err != nil { - return nil, errors.Wrap(err, "failed to encode configuration") + return nil, fmt.Errorf("failed to encode configuration: %w", err) } conf = &lint.Config{} - _, err = toml.DecodeReader(buf, conf) + _, err = toml.NewDecoder(buf).Decode(conf) if err != nil { - return nil, errors.Wrap(err, "failed to decode configuration") + return nil, fmt.Errorf("failed to decode configuration: %w", err) } } normalizeConfig(conf) + for k, r := range conf.Rules { + err := r.Initialize() + if err != nil { + return nil, fmt.Errorf("error in config of rule %q: %w", k, err) + } + conf.Rules[k] = r + } + reviveDebugf("revive configuration: %#v", conf) return conf, nil } -func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { - rawRoot := map[string]interface{}{ +func createConfigMap(cfg *config.ReviveSettings) map[string]any { + rawRoot := map[string]any{ "ignoreGeneratedHeader": cfg.IgnoreGeneratedHeader, "confidence": cfg.Confidence, "severity": cfg.Severity, "errorCode": cfg.ErrorCode, "warningCode": cfg.WarningCode, + "enableAllRules": cfg.EnableAllRules, } - rawDirectives := map[string]map[string]interface{}{} + rawDirectives := map[string]map[string]any{} for _, directive := range cfg.Directives { - rawDirectives[directive.Name] = map[string]interface{}{ + rawDirectives[directive.Name] = map[string]any{ "severity": directive.Severity, } } @@ -195,12 +217,13 @@ func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { rawRoot["directive"] = rawDirectives } - rawRules := map[string]map[string]interface{}{} + rawRules := map[string]map[string]any{} for _, s := range cfg.Rules { - rawRules[s.Name] = map[string]interface{}{ + rawRules[s.Name] = map[string]any{ "severity": s.Severity, "arguments": safeTomlSlice(s.Arguments), "disabled": s.Disabled, + "exclude": s.Exclude, } } @@ -211,19 +234,19 @@ func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { return rawRoot } -func safeTomlSlice(r []interface{}) []interface{} { +func safeTomlSlice(r []any) []any { if len(r) == 0 { return nil } - if _, ok := r[0].(map[interface{}]interface{}); !ok { + if _, ok := r[0].(map[any]any); !ok { return r } - var typed []interface{} + var typed []any for _, elt := range r { - item := map[string]interface{}{} - for k, v := range elt.(map[interface{}]interface{}) { + item := map[string]any{} + for k, v := range elt.(map[any]any) { item[k.(string)] = v } @@ -234,7 +257,7 @@ func safeTomlSlice(r []interface{}) []interface{} { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L15 +// Extracted from https://github.com/mgechev/revive/blob/v1.3.7/config/config.go#L15 var defaultRules = []lint.Rule{ &rule.VarDeclarationsRule{}, &rule.PackageCommentsRule{}, @@ -243,7 +266,6 @@ var defaultRules = []lint.Rule{ &rule.ExportedRule{}, &rule.VarNamingRule{}, &rule.IndentErrorFlowRule{}, - &rule.IfReturnRule{}, &rule.RangeRule{}, &rule.ErrorfRule{}, &rule.ErrorNamingRule{}, @@ -255,14 +277,102 @@ var defaultRules = []lint.Rule{ &rule.TimeNamingRule{}, &rule.ContextKeysType{}, &rule.ContextAsArgumentRule{}, + &rule.EmptyBlockRule{}, + &rule.SuperfluousElseRule{}, + &rule.UnusedParamRule{}, + &rule.UnreachableCodeRule{}, + &rule.RedefinesBuiltinIDRule{}, } +var allRules = append([]lint.Rule{ + &rule.ArgumentsLimitRule{}, + &rule.CyclomaticRule{}, + &rule.FileHeaderRule{}, + &rule.ConfusingNamingRule{}, + &rule.GetReturnRule{}, + &rule.ModifiesParamRule{}, + &rule.ConfusingResultsRule{}, + &rule.DeepExitRule{}, + &rule.AddConstantRule{}, + &rule.FlagParamRule{}, + &rule.UnnecessaryStmtRule{}, + &rule.StructTagRule{}, + &rule.ModifiesValRecRule{}, + &rule.ConstantLogicalExprRule{}, + &rule.BoolLiteralRule{}, + &rule.ImportsBlocklistRule{}, + &rule.FunctionResultsLimitRule{}, + &rule.MaxPublicStructsRule{}, + &rule.RangeValInClosureRule{}, + &rule.RangeValAddress{}, + &rule.WaitGroupByValueRule{}, + &rule.AtomicRule{}, + &rule.EmptyLinesRule{}, + &rule.LineLengthLimitRule{}, + &rule.CallToGCRule{}, + &rule.DuplicatedImportsRule{}, + &rule.ImportShadowingRule{}, + &rule.BareReturnRule{}, + &rule.UnusedReceiverRule{}, + &rule.UnhandledErrorRule{}, + &rule.CognitiveComplexityRule{}, + &rule.StringOfIntRule{}, + &rule.StringFormatRule{}, + &rule.EarlyReturnRule{}, + &rule.UnconditionalRecursionRule{}, + &rule.IdenticalBranchesRule{}, + &rule.DeferRule{}, + &rule.UnexportedNamingRule{}, + &rule.FunctionLength{}, + &rule.NestedStructs{}, + &rule.UselessBreak{}, + &rule.UncheckedTypeAssertionRule{}, + &rule.TimeEqualRule{}, + &rule.BannedCharsRule{}, + &rule.OptimizeOperandsOrderRule{}, + &rule.UseAnyRule{}, + &rule.DataRaceRule{}, + &rule.CommentSpacingsRule{}, + &rule.IfReturnRule{}, + &rule.RedundantImportAlias{}, + &rule.ImportAliasNamingRule{}, + &rule.EnforceMapStyleRule{}, + &rule.EnforceRepeatedArgTypeStyleRule{}, + &rule.EnforceSliceStyleRule{}, + &rule.MaxControlNestingRule{}, +}, defaultRules...) + +const defaultConfidence = 0.8 + // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L133 +// Extracted from https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L145 func normalizeConfig(cfg *lint.Config) { + // NOTE(ldez): this custom section for golangci-lint should be kept. + // --- if cfg.Confidence == 0 { - cfg.Confidence = 0.8 + cfg.Confidence = defaultConfidence } + if cfg.Severity == "" { + cfg.Severity = lint.SeverityWarning + } + // --- + + if len(cfg.Rules) == 0 { + cfg.Rules = map[string]lint.RuleConfig{} + } + if cfg.EnableAllRules { + // Add to the configuration all rules not yet present in it + for _, r := range allRules { + ruleName := r.Name() + _, alreadyInConf := cfg.Rules[ruleName] + if alreadyInConf { + continue + } + // Add the rule with an empty conf for + cfg.Rules[ruleName] = lint.RuleConfig{} + } + } + severity := cfg.Severity if severity != "" { for k, v := range cfg.Rules { @@ -281,10 +391,10 @@ func normalizeConfig(cfg *lint.Config) { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L182 +// Extracted from https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L214 func defaultConfig() *lint.Config { defaultConfig := lint.Config{ - Confidence: 0.0, + Confidence: defaultConfidence, Severity: lint.SeverityWarning, Rules: map[string]lint.RuleConfig{}, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go deleted file mode 100644 index d4c89d3829..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go +++ /dev/null @@ -1,23 +0,0 @@ -package golinters - -import ( - "github.com/jingyugao/rowserrcheck/passes/rowserr" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" -) - -func NewRowsErrCheck() *goanalysis.Linter { - analyzer := rowserr.NewAnalyzer() - return goanalysis.NewLinter( - "rowserrcheck", - "checks whether Err of rows is checked successfully", - []*analysis.Analyzer{analyzer}, - nil, - ).WithLoadMode(goanalysis.LoadModeTypesInfo). - WithContextSetter(func(lintCtx *linter.Context) { - pkgs := lintCtx.Settings().RowsErrCheck.Packages - analyzer.Run = rowserr.NewRun(pkgs...) - }) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck.go new file mode 100644 index 0000000000..d67efd0692 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck.go @@ -0,0 +1,25 @@ +package golinters + +import ( + "github.com/jingyugao/rowserrcheck/passes/rowserr" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewRowsErrCheck(settings *config.RowsErrCheckSettings) *goanalysis.Linter { + var pkgs []string + if settings != nil { + pkgs = settings.Packages + } + + a := rowserr.NewAnalyzer(pkgs...) + + return goanalysis.NewLinter( + a.Name, + "checks whether Rows.Err of rows is checked successfully", + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go deleted file mode 100644 index ba3921e196..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go +++ /dev/null @@ -1,177 +0,0 @@ -package golinters - -import ( - "fmt" - "go/ast" - "go/token" - "sync" - - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const scopelintName = "scopelint" - -func NewScopelint() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: scopelintName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - scopelintName, - "Scopelint checks for unpinned variables in go programs", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue - for _, file := range pass.Files { - n := Node{ - fset: pass.Fset, - DangerObjects: map[*ast.Object]int{}, - UnsafeObjects: map[*ast.Object]int{}, - SkipFuncs: map[*ast.FuncLit]int{}, - issues: &res, - } - ast.Walk(&n, file) - } - - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - for i := range res { - resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) - } - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -// The code below is copy-pasted from https://github.com/kyoh86/scopelint 92cbe2cc9276abda0e309f52cc9e309d407f174e - -// Node represents a Node being linted. -type Node struct { - fset *token.FileSet - DangerObjects map[*ast.Object]int - UnsafeObjects map[*ast.Object]int - SkipFuncs map[*ast.FuncLit]int - issues *[]result.Issue -} - -// Visit method is invoked for each node encountered by Walk. -// If the result visitor w is not nil, Walk visits each of the children -// of node with the visitor w, followed by a call of w.Visit(nil). -//nolint:gocyclo,gocritic -func (f *Node) Visit(node ast.Node) ast.Visitor { - switch typedNode := node.(type) { - case *ast.ForStmt: - switch init := typedNode.Init.(type) { - case *ast.AssignStmt: - for _, lh := range init.Lhs { - switch tlh := lh.(type) { - case *ast.Ident: - f.UnsafeObjects[tlh.Obj] = 0 - } - } - } - - case *ast.RangeStmt: - // Memory variables declared in range statement - switch k := typedNode.Key.(type) { - case *ast.Ident: - f.UnsafeObjects[k.Obj] = 0 - } - switch v := typedNode.Value.(type) { - case *ast.Ident: - f.UnsafeObjects[v.Obj] = 0 - } - - case *ast.UnaryExpr: - if typedNode.Op == token.AND { - switch ident := typedNode.X.(type) { - case *ast.Ident: - if _, unsafe := f.UnsafeObjects[ident.Obj]; unsafe { - f.errorf(ident, "Using a reference for the variable on range scope %s", formatCode(ident.Name, nil)) - } - } - } - - case *ast.Ident: - if _, obj := f.DangerObjects[typedNode.Obj]; obj { - // It is the naked variable in scope of range statement. - f.errorf(node, "Using the variable on range scope %s in function literal", formatCode(typedNode.Name, nil)) - break - } - - case *ast.CallExpr: - // Ignore func literals that'll be called immediately. - switch funcLit := typedNode.Fun.(type) { - case *ast.FuncLit: - f.SkipFuncs[funcLit] = 0 - } - - case *ast.FuncLit: - if _, skip := f.SkipFuncs[typedNode]; !skip { - dangers := map[*ast.Object]int{} - for d := range f.DangerObjects { - dangers[d] = 0 - } - for u := range f.UnsafeObjects { - dangers[u] = 0 - f.UnsafeObjects[u]++ - } - return &Node{ - fset: f.fset, - DangerObjects: dangers, - UnsafeObjects: f.UnsafeObjects, - SkipFuncs: f.SkipFuncs, - issues: f.issues, - } - } - - case *ast.ReturnStmt: - unsafe := map[*ast.Object]int{} - for u := range f.UnsafeObjects { - if f.UnsafeObjects[u] == 0 { - continue - } - unsafe[u] = f.UnsafeObjects[u] - } - return &Node{ - fset: f.fset, - DangerObjects: f.DangerObjects, - UnsafeObjects: unsafe, - SkipFuncs: f.SkipFuncs, - issues: f.issues, - } - } - return f -} - -// The variadic arguments may start with link and category types, -// and must end with a format string and any arguments. -//nolint:interfacer -func (f *Node) errorf(n ast.Node, format string, args ...interface{}) { - pos := f.fset.Position(n.Pos()) - f.errorAtf(pos, format, args...) -} - -func (f *Node) errorAtf(pos token.Position, format string, args ...interface{}) { - *f.issues = append(*f.issues, result.Issue{ - Pos: pos, - Text: fmt.Sprintf(format, args...), - FromLinter: scopelintName, - }) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sloglint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sloglint.go new file mode 100644 index 0000000000..67977453e4 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sloglint.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "go-simpler.org/sloglint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewSlogLint(settings *config.SlogLintSettings) *goanalysis.Linter { + var opts *sloglint.Options + if settings != nil { + opts = &sloglint.Options{ + NoMixedArgs: settings.NoMixedArgs, + KVOnly: settings.KVOnly, + NoGlobal: settings.NoGlobal, + AttrOnly: settings.AttrOnly, + ContextOnly: settings.ContextOnly, + StaticMsg: settings.StaticMsg, + NoRawKeys: settings.NoRawKeys, + KeyNamingCase: settings.KeyNamingCase, + ArgsOnSepLines: settings.ArgsOnSepLines, + } + } + + a := sloglint.New(opts) + + return goanalysis. + NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck.go new file mode 100644 index 0000000000..9341244777 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/jjti/go-spancheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewSpancheck(settings *config.SpancheckSettings) *goanalysis.Linter { + cfg := spancheck.NewDefaultConfig() + + if settings != nil { + if settings.Checks != nil { + cfg.EnabledChecks = settings.Checks + } + + if settings.IgnoreCheckSignatures != nil { + cfg.IgnoreChecksSignaturesSlice = settings.IgnoreCheckSignatures + } + } + + a := spancheck.NewAnalyzerWithConfig(cfg) + + return goanalysis. + NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go index 48ca246e70..e63b292a20 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go @@ -8,14 +8,12 @@ import ( ) func NewSQLCloseCheck() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - analyzer.NewAnalyzer(), - } + a := analyzer.NewAnalyzer() return goanalysis.NewLinter( - "sqlclosecheck", - "Checks that sql.Rows and sql.Stmt are closed.", - analyzers, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go index 2226eabb4e..673484630a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go @@ -9,12 +9,12 @@ import ( func NewStaticcheck(settings *config.StaticCheckSettings) *goanalysis.Linter { cfg := staticCheckConfig(settings) - analyzers := setupStaticCheckAnalyzers(staticcheck.Analyzers, getGoVersion(settings), cfg.Checks) return goanalysis.NewLinter( "staticcheck", - "Staticcheck is a go vet on steroids, applying a ton of static analysis checks", + "It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary."+ + " The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint.", analyzers, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go index dc6360d7e0..0eb21ec9cf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go @@ -12,7 +12,7 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) -var debugf = logutils.Debug("megacheck") +var debugf = logutils.Debug(logutils.DebugKeyMegacheck) func getGoVersion(settings *config.StaticCheckSettings) string { var goVersion string @@ -24,8 +24,7 @@ func getGoVersion(settings *config.StaticCheckSettings) string { return goVersion } - // TODO: uses "1.13" for backward compatibility, but in the future (v2) must be set by using build.Default.ReleaseTags like staticcheck. - return "1.13" + return "1.17" } func setupStaticCheckAnalyzers(src []*lint.Analyzer, goVersion string, checks []string) []*analysis.Analyzer { @@ -99,7 +98,8 @@ func staticCheckConfig(settings *config.StaticCheckSettings) *scconfig.Config { } // https://github.com/dominikh/go-tools/blob/9bf17c0388a65710524ba04c2d821469e639fdc2/lintcmd/lint.go#L437-L477 -// nolint // Keep the original source code. +// +//nolint:gocritic // Keep the original source code. func filterAnalyzerNames(analyzers []string, checks []string) map[string]bool { allowedChecks := map[string]bool{} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go deleted file mode 100644 index 7c16f8ec36..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go +++ /dev/null @@ -1,55 +0,0 @@ -package golinters // nolint:dupl - -import ( - "fmt" - "sync" - - structcheckAPI "github.com/golangci/check/cmd/structcheck" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func NewStructcheck() *goanalysis.Linter { - const linterName = "structcheck" - var mu sync.Mutex - var res []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Finds unused struct fields", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - checkExported := lintCtx.Settings().Structcheck.CheckExportedFields - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) - - structcheckIssues := structcheckAPI.Run(prog, checkExported) - if len(structcheckIssues) == 0 { - return nil, nil - } - - issues := make([]goanalysis.Issue, 0, len(structcheckIssues)) - for _, i := range structcheckIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("%s is unused", formatCode(i.FieldName, lintCtx.Cfg)), - FromLinter: linterName, - }, pass)) - } - - mu.Lock() - res = append(res, issues...) - mu.Unlock() - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go index 899f6ff582..d9b0f87c87 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go @@ -15,7 +15,7 @@ func NewStylecheck(settings *config.StaticCheckSettings) *goanalysis.Linter { // `scconfig.Analyzer` is a singleton, then it's not possible to have more than one instance for all staticcheck "sub-linters". // When we will merge the 4 "sub-linters", the problem will disappear: https://github.com/golangci/golangci-lint/issues/357 // Currently only stylecheck analyzer has a configuration in staticcheck. - scconfig.Analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + scconfig.Analyzer.Run = func(_ *analysis.Pass) (any, error) { return cfg, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign.go new file mode 100644 index 0000000000..c23838f702 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign.go @@ -0,0 +1,75 @@ +package golinters + +import ( + "sync" + + "github.com/4meepo/tagalign" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewTagAlign(settings *config.TagAlignSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)} + + if settings != nil { + options = append(options, tagalign.WithAlign(settings.Align)) + + if settings.Sort || len(settings.Order) > 0 { + options = append(options, tagalign.WithSort(settings.Order...)) + } + + // Strict style will be applied only if Align and Sort are enabled together. + if settings.Strict && settings.Align && settings.Sort { + options = append(options, tagalign.WithStrictStyle()) + } + } + + analyzer := tagalign.NewAnalyzer(options...) + analyzer.Run = func(pass *analysis.Pass) (any, error) { + taIssues := tagalign.Run(pass, options...) + + issues := make([]goanalysis.Issue, len(taIssues)) + for i, issue := range taIssues { + report := &result.Issue{ + FromLinter: analyzer.Name, + Pos: issue.Pos, + Text: issue.Message, + Replacement: &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: issue.InlineFix.StartCol, + Length: issue.InlineFix.Length, + NewString: issue.InlineFix.NewString, + }, + }, + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go index 5f58fc1d3b..67c14cbd48 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go @@ -11,8 +11,9 @@ import ( func NewTagliatelle(settings *config.TagliatelleSettings) *goanalysis.Linter { cfg := tagliatelle.Config{ Rules: map[string]string{ - "json": "camel", - "yaml": "camel", + "json": "camel", + "yaml": "camel", + "header": "header", }, } @@ -25,6 +26,10 @@ func NewTagliatelle(settings *config.TagliatelleSettings) *goanalysis.Linter { a := tagliatelle.New(cfg) - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). - WithLoadMode(goanalysis.LoadModeSyntax) + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv.go new file mode 100644 index 0000000000..6c6bd3186f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/sivchari/tenv" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTenv(settings *config.TenvSettings) *goanalysis.Linter { + a := tenv.Analyzer + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + tenv.A: settings.All, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testableexamples.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testableexamples.go new file mode 100644 index 0000000000..3333593a62 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testableexamples.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/maratori/testableexamples/pkg/testableexamples" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTestableexamples() *goanalysis.Linter { + a := testableexamples.NewAnalyzer() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testifylint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testifylint.go new file mode 100644 index 0000000000..32b194f00f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testifylint.go @@ -0,0 +1,46 @@ +package golinters + +import ( + "github.com/Antonboom/testifylint/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTestifylint(settings *config.TestifylintSettings) *goanalysis.Linter { + a := analyzer.New() + + cfg := make(map[string]map[string]any) + if settings != nil { + cfg[a.Name] = map[string]any{ + "enable-all": settings.EnableAll, + "disable-all": settings.DisableAll, + + "bool-compare.ignore-custom-types": settings.BoolCompare.IgnoreCustomTypes, + } + if len(settings.EnabledCheckers) > 0 { + cfg[a.Name]["enable"] = settings.EnabledCheckers + } + if len(settings.DisabledCheckers) > 0 { + cfg[a.Name]["disable"] = settings.DisabledCheckers + } + + if p := settings.ExpectedActual.ExpVarPattern; p != "" { + cfg[a.Name]["expected-actual.pattern"] = p + } + if p := settings.RequireError.FnPattern; p != "" { + cfg[a.Name]["require-error.fn-pattern"] = p + } + if m := settings.SuiteExtraAssertCall.Mode; m != "" { + cfg[a.Name]["suite-extra-assert-call.mode"] = m + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go index 1248e78fda..b8a867eb60 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go @@ -1,6 +1,8 @@ package golinters import ( + "strings" + "github.com/maratori/testpackage/pkg/testpackage" "golang.org/x/tools/go/analysis" @@ -9,15 +11,18 @@ import ( ) func NewTestpackage(cfg *config.TestpackageSettings) *goanalysis.Linter { - var a = testpackage.NewAnalyzer() - var settings map[string]map[string]interface{} + a := testpackage.NewAnalyzer() + + var settings map[string]map[string]any if cfg != nil { - settings = map[string]map[string]interface{}{ + settings = map[string]map[string]any{ a.Name: { - testpackage.SkipRegexpFlagName: cfg.SkipRegexp, + testpackage.SkipRegexpFlagName: cfg.SkipRegexp, + testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","), }, } } + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go index 1d92f2fbfa..1ae85ef42e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go @@ -4,6 +4,7 @@ import ( "strings" "github.com/kulti/thelper/pkg/analyzer" + "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -13,49 +14,67 @@ import ( func NewThelper(cfg *config.ThelperSettings) *goanalysis.Linter { a := analyzer.NewAnalyzer() - cfgMap := map[string]map[string]interface{}{} - if cfg != nil { - var opts []string + opts := map[string]struct{}{ + "t_name": {}, + "t_begin": {}, + "t_first": {}, - if cfg.Test.Name { - opts = append(opts, "t_name") - } - if cfg.Test.Begin { - opts = append(opts, "t_begin") - } - if cfg.Test.First { - opts = append(opts, "t_first") - } + "f_name": {}, + "f_begin": {}, + "f_first": {}, - if cfg.Benchmark.Name { - opts = append(opts, "b_name") - } - if cfg.Benchmark.Begin { - opts = append(opts, "b_begin") - } - if cfg.Benchmark.First { - opts = append(opts, "b_first") - } + "b_name": {}, + "b_begin": {}, + "b_first": {}, - if cfg.TB.Name { - opts = append(opts, "tb_name") - } - if cfg.TB.Begin { - opts = append(opts, "tb_begin") - } - if cfg.TB.First { - opts = append(opts, "tb_first") - } + "tb_name": {}, + "tb_begin": {}, + "tb_first": {}, + } - cfgMap[a.Name] = map[string]interface{}{ - "checks": strings.Join(opts, ","), - } + if cfg != nil { + applyTHelperOptions(cfg.Test, "t_", opts) + applyTHelperOptions(cfg.Fuzz, "f_", opts) + applyTHelperOptions(cfg.Benchmark, "b_", opts) + applyTHelperOptions(cfg.TB, "tb_", opts) + } + + if len(opts) == 0 { + linterLogger.Fatalf("thelper: at least one option must be enabled") + } + + args := maps.Keys(opts) + + cfgMap := map[string]map[string]any{ + a.Name: { + "checks": strings.Join(args, ","), + }, } return goanalysis.NewLinter( - "thelper", - "thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers", + a.Name, + a.Doc, []*analysis.Analyzer{a}, cfgMap, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func applyTHelperOptions(o config.ThelperOptions, prefix string, opts map[string]struct{}) { + if o.Name != nil { + if !*o.Name { + delete(opts, prefix+"name") + } + } + + if o.Begin != nil { + if !*o.Begin { + delete(opts, prefix+"begin") + } + } + + if o.First != nil { + if !*o.First { + delete(opts, prefix+"first") + } + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go index a4b96eb735..643f2c2710 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go @@ -8,14 +8,11 @@ import ( ) func NewTparallel() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - tparallel.Analyzer, - } - + a := tparallel.Analyzer return goanalysis.NewLinter( - "tparallel", - "tparallel detects inappropriate usage of t.Parallel() method in your Go test codes", - analyzers, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go index 24f4339fbd..e9b26ef485 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go @@ -12,17 +12,13 @@ func NewTypecheck() *goanalysis.Linter { analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (interface{}, error) { - return nil, nil - }, + Run: goanalysis.DummyRun, } - linter := goanalysis.NewLinter( + return goanalysis.NewLinter( linterName, "Like the front-end of a Go compiler, parses and type-checks Go code", []*analysis.Analyzer{analyzer}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) - - return linter } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go index 456f6836cc..08e493aef0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go @@ -3,51 +3,60 @@ package golinters import ( "sync" - unconvertAPI "github.com/golangci/unconvert" + "github.com/golangci/unconvert" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewUnconvert() *goanalysis.Linter { - const linterName = "unconvert" +const unconvertName = "unconvert" + +func NewUnconvert(settings *config.UnconvertSettings) *goanalysis.Linter { var mu sync.Mutex - var res []goanalysis.Issue + var resIssues []goanalysis.Issue + analyzer := &analysis.Analyzer{ - Name: linterName, + Name: unconvertName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Remove unnecessary type conversions", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) + Run: func(pass *analysis.Pass) (any, error) { + issues := runUnconvert(pass, settings) - positions := unconvertAPI.Run(prog) - if len(positions) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, 0, len(positions)) - for _, pos := range positions { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pos, - Text: "unnecessary conversion", - FromLinter: linterName, - }, pass)) - } - mu.Lock() - res = append(res, issues...) + resIssues = append(resIssues, issues...) mu.Unlock() + return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res + }, + } + + return goanalysis.NewLinter( + unconvertName, + "Remove unnecessary type conversions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runUnconvert(pass *analysis.Pass, settings *config.UnconvertSettings) []goanalysis.Issue { + positions := unconvert.Run(pass, settings.FastMath, settings.Safe) + + var issues []goanalysis.Issue + for _, position := range positions { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: position, + Text: "unnecessary conversion", + FromLinter: unconvertName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go index 33dd55c9b2..4078d94988 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go @@ -8,69 +8,83 @@ import ( "golang.org/x/tools/go/packages" "mvdan.cc/unparam/check" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewUnparam() *goanalysis.Linter { - const linterName = "unparam" +const unparamName = "unparam" + +func NewUnparam(settings *config.UnparamSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: unparamName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, - } - return goanalysis.NewLinter( - linterName, - "Reports unused function parameters", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - us := &lintCtx.Settings().Unparam - if us.Algo != "cha" { - lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") - } - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) - ssaPkg := ssa.Pkg - - pkg := &packages.Package{ - Fset: pass.Fset, - Syntax: pass.Files, - Types: pass.Pkg, - TypesInfo: pass.TypesInfo, - } - - c := &check.Checker{} - c.CheckExportedFuncs(us.CheckExported) - c.Packages([]*packages.Package{pkg}) - c.ProgramSSA(ssaPkg.Prog) - - unparamIssues, err := c.Check() + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runUnparam(pass, settings) if err != nil { return nil, err } - var res []goanalysis.Issue - for _, i := range unparamIssues { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(i.Pos()), - Text: i.Message(), - FromLinter: linterName, - }, pass)) + if len(issues) == 0 { + return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil + }, + } + + return goanalysis.NewLinter( + unparamName, + "Reports unused function parameters", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + if settings.Algo != "cha" { + lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") } }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) { + ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + ssaPkg := ssa.Pkg + + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + c := &check.Checker{} + c.CheckExportedFuncs(settings.CheckExported) + c.Packages([]*packages.Package{pkg}) + c.ProgramSSA(ssaPkg.Prog) + + unparamIssues, err := c.Check() + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + for _, i := range unparamIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(i.Pos()), + Text: i.Message(), + FromLinter: unparamName, + }, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go index cfdf1f2ca7..a93061c96c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go @@ -5,6 +5,9 @@ import ( "sync" "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/analysis/facts/directives" + "honnef.co/go/tools/analysis/facts/generated" + "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/unused" "github.com/golangci/golangci-lint/pkg/config" @@ -13,37 +16,20 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -type UnusedSettings struct { - GoVersion string -} - -func NewUnused(settings *config.StaticCheckSettings) *goanalysis.Linter { - const name = "unused" +const unusedName = "unused" +func NewUnused(settings *config.UnusedSettings, scSettings *config.StaticCheckSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: name, + Name: unusedName, Doc: unused.Analyzer.Analyzer.Doc, Requires: unused.Analyzer.Analyzer.Requires, - Run: func(pass *analysis.Pass) (interface{}, error) { - res, err := unused.Analyzer.Analyzer.Run(pass) - if err != nil { - return nil, err - } - - sr := unused.Serialize(pass, res.(unused.Result), pass.Fset) - - var issues []goanalysis.Issue - for _, object := range sr.Unused { - issue := goanalysis.NewIssue(&result.Issue{ - FromLinter: name, - Text: fmt.Sprintf("%s %s is unused", object.Kind, object.Name), - Pos: object.Position, - }, pass) - - issues = append(issues, issue) + Run: func(pass *analysis.Pass) (any, error) { + issues := runUnused(pass, settings) + if len(issues) == 0 { + return nil, nil } mu.Lock() @@ -54,16 +40,73 @@ func NewUnused(settings *config.StaticCheckSettings) *goanalysis.Linter { }, } - setAnalyzerGoVersion(analyzer, getGoVersion(settings)) + setAnalyzerGoVersion(analyzer, getGoVersion(scSettings)) - lnt := goanalysis.NewLinter( - name, + return goanalysis.NewLinter( + unusedName, "Checks Go code for unused constants, variables, functions and types", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(lintCtx *linter.Context) []goanalysis.Issue { + ).WithIssuesReporter(func(_ *linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runUnused(pass *analysis.Pass, cfg *config.UnusedSettings) []goanalysis.Issue { + res := getUnusedResults(pass, cfg) + + used := make(map[string]bool) + for _, obj := range res.Used { + used[fmt.Sprintf("%s %d %s", obj.Position.Filename, obj.Position.Line, obj.Name)] = true + } + + var issues []goanalysis.Issue + + // Inspired by https://github.com/dominikh/go-tools/blob/d694aadcb1f50c2d8ac0a1dd06217ebb9f654764/lintcmd/lint.go#L177-L197 + for _, object := range res.Unused { + if object.Kind == "type param" { + continue + } + + key := fmt.Sprintf("%s %d %s", object.Position.Filename, object.Position.Line, object.Name) + if used[key] { + continue + } + + issue := goanalysis.NewIssue(&result.Issue{ + FromLinter: unusedName, + Text: fmt.Sprintf("%s %s is unused", object.Kind, object.Name), + Pos: object.Position, + }, pass) + + issues = append(issues, issue) + } + + return issues +} + +func getUnusedResults(pass *analysis.Pass, settings *config.UnusedSettings) unused.Result { + opts := unused.Options{ + FieldWritesAreUses: settings.FieldWritesAreUses, + PostStatementsAreReads: settings.PostStatementsAreReads, + ExportedIsUsed: settings.ExportedIsUsed, + ExportedFieldsAreUsed: settings.ExportedFieldsAreUsed, + ParametersAreUsed: settings.ParametersAreUsed, + LocalVariablesAreUsed: settings.LocalVariablesAreUsed, + GeneratedIsUsed: settings.GeneratedIsUsed, + } + + // ref: https://github.com/dominikh/go-tools/blob/4ec1f474ca6c0feb8e10a8fcca4ab95f5b5b9881/internal/cmd/unused/unused.go#L68 + nodes := unused.Graph(pass.Fset, + pass.Files, + pass.Pkg, + pass.TypesInfo, + pass.ResultOf[directives.Analyzer].([]lint.Directive), + pass.ResultOf[generated.Analyzer].(map[string]generated.Generator), + opts, + ) - return lnt + sg := unused.SerializedGraph{} + sg.Merge(nodes) + return sg.Results() } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars.go new file mode 100644 index 0000000000..663a841ac7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars.go @@ -0,0 +1,38 @@ +package golinters + +import ( + "github.com/sashamelentyev/usestdlibvars/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewUseStdlibVars(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { + a := analyzer.New() + + cfgMap := make(map[string]map[string]any) + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + analyzer.ConstantKindFlag: cfg.ConstantKind, + analyzer.CryptoHashFlag: cfg.CryptoHash, + analyzer.HTTPMethodFlag: cfg.HTTPMethod, + analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode, + analyzer.OSDevNullFlag: cfg.OSDevNull, + analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath, + analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel, + analyzer.SyslogPriorityFlag: cfg.SyslogPriority, + analyzer.TimeLayoutFlag: cfg.TimeLayout, + analyzer.TimeMonthFlag: cfg.TimeMonth, + analyzer.TimeWeekdayFlag: cfg.TimeWeekday, + analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go index 1940f30e3f..932cbfcf78 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go @@ -2,8 +2,11 @@ package golinters import ( "fmt" + "path/filepath" "strings" + "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" ) @@ -15,10 +18,16 @@ func formatCode(code string, _ *config.Config) string { return fmt.Sprintf("`%s`", code) } -func formatCodeBlock(code string, _ *config.Config) string { - if strings.Contains(code, "`") { - return code // TODO: properly escape or remove +func getFileNames(pass *analysis.Pass) []string { + var fileNames []string + for _, f := range pass.Files { + fileName := pass.Fset.PositionFor(f.Pos(), true).Filename + ext := filepath.Ext(fileName) + if ext != "" && ext != ".go" { + // position has been adjusted to a non-go file, revert to original file + fileName = pass.Fset.PositionFor(f.Pos(), false).Filename + } + fileNames = append(fileNames, fileName) } - - return fmt.Sprintf("```\n%s\n```", code) + return fileNames } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go deleted file mode 100644 index dcf2e7de8e..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go +++ /dev/null @@ -1,55 +0,0 @@ -package golinters // nolint:dupl - -import ( - "fmt" - "sync" - - varcheckAPI "github.com/golangci/check/cmd/varcheck" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func NewVarcheck() *goanalysis.Linter { - const linterName = "varcheck" - var mu sync.Mutex - var res []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Finds unused global variables and constants", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - checkExported := lintCtx.Settings().Varcheck.CheckExportedFields - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) - - varcheckIssues := varcheckAPI.Run(prog, checkExported) - if len(varcheckIssues) == 0 { - return nil, nil - } - - issues := make([]goanalysis.Issue, 0, len(varcheckIssues)) - for _, i := range varcheckIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("%s is unused", formatCode(i.VarName, lintCtx.Cfg)), - FromLinter: linterName, - }, pass)) - } - - mu.Lock() - res = append(res, issues...) - mu.Unlock() - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varnamelen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varnamelen.go new file mode 100644 index 0000000000..688dfa8046 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varnamelen.go @@ -0,0 +1,46 @@ +package golinters + +import ( + "strconv" + "strings" + + "github.com/blizzy78/varnamelen" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewVarnamelen(settings *config.VarnamelenSettings) *goanalysis.Linter { + analyzer := varnamelen.NewAnalyzer() + cfg := map[string]map[string]any{} + + if settings != nil { + vnlCfg := map[string]any{ + "checkReceiver": strconv.FormatBool(settings.CheckReceiver), + "checkReturn": strconv.FormatBool(settings.CheckReturn), + "checkTypeParam": strconv.FormatBool(settings.CheckTypeParam), + "ignoreNames": strings.Join(settings.IgnoreNames, ","), + "ignoreTypeAssertOk": strconv.FormatBool(settings.IgnoreTypeAssertOk), + "ignoreMapIndexOk": strconv.FormatBool(settings.IgnoreMapIndexOk), + "ignoreChanRecvOk": strconv.FormatBool(settings.IgnoreChanRecvOk), + "ignoreDecls": strings.Join(settings.IgnoreDecls, ","), + } + + if settings.MaxDistance > 0 { + vnlCfg["maxDistance"] = strconv.Itoa(settings.MaxDistance) + } + if settings.MinNameLength > 0 { + vnlCfg["minNameLength"] = strconv.Itoa(settings.MinNameLength) + } + + cfg[analyzer.Name] = vnlCfg + } + + return goanalysis.NewLinter( + analyzer.Name, + "checks that the length of a variable's name matches its scope", + []*analysis.Analyzer{analyzer}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go index d359fb0195..9038c827dc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go @@ -8,14 +8,12 @@ import ( ) func NewWastedAssign() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - wastedassign.Analyzer, - } + a := wastedassign.Analyzer return goanalysis.NewLinter( - "wastedassign", - "wastedassign finds wasted assignment statements.", - analyzers, + a.Name, + "Finds wasted assignment statements", + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go index d475465a21..5487b1016a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go @@ -1,80 +1,53 @@ package golinters import ( - "go/token" + "fmt" "sync" - "github.com/pkg/errors" "github.com/ultraware/whitespace" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewWhitespace() *goanalysis.Linter { - const linterName = "whitespace" +const whitespaceName = "whitespace" + +func NewWhitespace(settings *config.WhitespaceSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, + var wsSettings whitespace.Settings + if settings != nil { + wsSettings = whitespace.Settings{ + Mode: whitespace.RunningModeGolangCI, + MultiIf: settings.MultiIf, + MultiFunc: settings.MultiFunc, + } } + + a := whitespace.NewAnalyzer(&wsSettings) + return goanalysis.NewLinter( - linterName, - "Tool for detection of leading and trailing whitespace", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - cfg := lintCtx.Cfg.LintersSettings.Whitespace - settings := whitespace.Settings{MultiIf: cfg.MultiIf, MultiFunc: cfg.MultiFunc} - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []whitespace.Message - for _, file := range pass.Files { - issues = append(issues, whitespace.Run(file, pass.Fset, settings)...) + ).WithContextSetter(func(_ *linter.Context) { + a.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runWhitespace(pass, wsSettings) + if err != nil { + return nil, err } if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - issue := result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - LineRange: &result.Range{From: i.Pos.Line, To: i.Pos.Line}, - Text: i.Message, - FromLinter: linterName, - Replacement: &result.Replacement{}, - } - - bracketLine, err := lintCtx.LineCache.GetLine(issue.Pos.Filename, issue.Pos.Line) - if err != nil { - return nil, errors.Wrapf(err, "failed to get line %s:%d", issue.Pos.Filename, issue.Pos.Line) - } - - switch i.Type { - case whitespace.MessageTypeLeading: - issue.LineRange.To++ // cover two lines by the issue: opening bracket "{" (issue.Pos.Line) and following empty line - case whitespace.MessageTypeTrailing: - issue.LineRange.From-- // cover two lines by the issue: closing bracket "}" (issue.Pos.Line) and preceding empty line - issue.Pos.Line-- // set in sync with LineRange.From to not break fixer and other code features - case whitespace.MessageTypeAddAfter: - bracketLine += "\n" - } - issue.Replacement.NewLines = []string{bracketLine} - - res[k] = goanalysis.NewIssue(&issue, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil @@ -83,3 +56,47 @@ func NewWhitespace() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) { + lintIssues := whitespace.Run(pass, &wsSettings) + + issues := make([]goanalysis.Issue, len(lintIssues)) + for i, issue := range lintIssues { + report := &result.Issue{ + FromLinter: whitespaceName, + Pos: pass.Fset.PositionFor(issue.Diagnostic, false), + Text: issue.Message, + } + + switch issue.MessageType { + case whitespace.MessageTypeRemove: + if len(issue.LineNumbers) == 0 { + continue + } + + report.LineRange = &result.Range{ + From: issue.LineNumbers[0], + To: issue.LineNumbers[len(issue.LineNumbers)-1], + } + + report.Replacement = &result.Replacement{NeedOnlyDelete: true} + + case whitespace.MessageTypeAdd: + report.Pos = pass.Fset.PositionFor(issue.FixStart, false) + report.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: 0, + Length: 1, + NewString: "\n\t", + }, + } + + default: + return nil, fmt.Errorf("unknown message type: %v", issue.MessageType) + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go index 7717d188ac..6d25db4277 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go @@ -8,20 +8,27 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -const wrapcheckName = "wrapcheck" - func NewWrapcheck(settings *config.WrapcheckSettings) *goanalysis.Linter { cfg := wrapcheck.NewDefaultConfig() if settings != nil { if len(settings.IgnoreSigs) != 0 { cfg.IgnoreSigs = settings.IgnoreSigs } + if len(settings.IgnoreSigRegexps) != 0 { + cfg.IgnoreSigRegexps = settings.IgnoreSigRegexps + } + if len(settings.IgnorePackageGlobs) != 0 { + cfg.IgnorePackageGlobs = settings.IgnorePackageGlobs + } + if len(settings.IgnoreInterfaceRegexps) != 0 { + cfg.IgnoreInterfaceRegexps = settings.IgnoreInterfaceRegexps + } } a := wrapcheck.NewAnalyzer(cfg) return goanalysis.NewLinter( - wrapcheckName, + a.Name, a.Doc, []*analysis.Analyzer{a}, nil, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go index 29d00faea5..3b090a686e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go @@ -1,83 +1,39 @@ package golinters import ( - "sync" - - "github.com/bombsimon/wsl/v3" + "github.com/bombsimon/wsl/v4" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const ( - name = "wsl" ) -// NewWSL returns a new WSL linter. -func NewWSL() *goanalysis.Linter { - var ( - issues []goanalysis.Issue - mu = sync.Mutex{} - analyzer = &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, - Doc: goanalysis.TheOnlyanalyzerDoc, +func NewWSL(settings *config.WSLSettings) *goanalysis.Linter { + var conf *wsl.Configuration + if settings != nil { + conf = &wsl.Configuration{ + StrictAppend: settings.StrictAppend, + AllowAssignAndCallCuddle: settings.AllowAssignAndCallCuddle, + AllowAssignAndAnythingCuddle: settings.AllowAssignAndAnythingCuddle, + AllowMultiLineAssignCuddle: settings.AllowMultiLineAssignCuddle, + ForceCaseTrailingWhitespaceLimit: settings.ForceCaseTrailingWhitespaceLimit, + AllowTrailingComment: settings.AllowTrailingComment, + AllowSeparatedLeadingComment: settings.AllowSeparatedLeadingComment, + AllowCuddleDeclaration: settings.AllowCuddleDeclaration, + AllowCuddleWithCalls: settings.AllowCuddleWithCalls, + AllowCuddleWithRHS: settings.AllowCuddleWithRHS, + ForceCuddleErrCheckAndAssign: settings.ForceCuddleErrCheckAndAssign, + ErrorVariableNames: settings.ErrorVariableNames, + ForceExclusiveShortDeclarations: settings.ForceExclusiveShortDeclarations, } - ) + } + + a := wsl.NewAnalyzer(conf) return goanalysis.NewLinter( - name, - "Whitespace Linter - Forces you to use empty lines!", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var ( - files = []string{} - linterCfg = lintCtx.Cfg.LintersSettings.WSL - processorCfg = wsl.Configuration{ - StrictAppend: linterCfg.StrictAppend, - AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, - AllowAssignAndAnythingCuddle: linterCfg.AllowAssignAndAnythingCuddle, - AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, - AllowCuddleDeclaration: linterCfg.AllowCuddleDeclaration, - AllowTrailingComment: linterCfg.AllowTrailingComment, - AllowSeparatedLeadingComment: linterCfg.AllowSeparatedLeadingComment, - ForceCuddleErrCheckAndAssign: linterCfg.ForceCuddleErrCheckAndAssign, - ForceCaseTrailingWhitespaceLimit: linterCfg.ForceCaseTrailingWhitespaceLimit, - ForceExclusiveShortDeclarations: linterCfg.ForceExclusiveShortDeclarations, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, - ErrorVariableNames: []string{"err"}, - } - ) - - for _, file := range pass.Files { - files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) - } - - wslErrors, _ := wsl.NewProcessorWithConfig(processorCfg). - ProcessFiles(files) - - if len(wslErrors) == 0 { - return nil, nil - } - - mu.Lock() - defer mu.Unlock() - - for _, err := range wslErrors { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - FromLinter: name, - Pos: err.Position, - Text: err.Reason, - }, pass)) - } - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/zerologlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/zerologlint.go new file mode 100644 index 0000000000..edde726655 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/zerologlint.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/ykadowak/zerologlint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewZerologLint() *goanalysis.Linter { + a := zerologlint.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go index 1c05b98050..7b748d8e90 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go @@ -3,13 +3,12 @@ package goutil import ( "context" "encoding/json" + "fmt" "os" "os/exec" "strings" "time" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/logutils" ) @@ -30,24 +29,27 @@ func NewEnv(log logutils.Log) *Env { return &Env{ vars: map[string]string{}, log: log, - debugf: logutils.Debug("env"), + debugf: logutils.Debug(logutils.DebugKeyEnv), } } -func (e *Env) Discover(ctx context.Context) error { +func (e Env) Discover(ctx context.Context) error { startedAt := time.Now() - args := []string{"env", "-json"} - args = append(args, string(EnvGoCache), string(EnvGoRoot)) - out, err := exec.CommandContext(ctx, "go", args...).Output() + + //nolint:gosec // Everything is static here. + cmd := exec.CommandContext(ctx, "go", "env", "-json", string(EnvGoCache), string(EnvGoRoot)) + + out, err := cmd.Output() if err != nil { - return errors.Wrap(err, "failed to run 'go env'") + return fmt.Errorf("failed to run '%s': %w", strings.Join(cmd.Args, " "), err) } if err = json.Unmarshal(out, &e.vars); err != nil { - return errors.Wrapf(err, "failed to parse 'go %s' json", strings.Join(args, " ")) + return fmt.Errorf("failed to parse '%s' json: %w", strings.Join(cmd.Args, " "), err) } e.debugf("Read go env for %s: %#v", time.Since(startedAt), e.vars) + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go new file mode 100644 index 0000000000..22c95e08af --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go @@ -0,0 +1,64 @@ +package lint + +import ( + "context" + "fmt" + + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type ContextBuilder struct { + cfg *config.Config + + pkgLoader *PackageLoader + + fileCache *fsutils.FileCache + pkgCache *pkgcache.Cache + + loadGuard *load.Guard +} + +func NewContextBuilder(cfg *config.Config, pkgLoader *PackageLoader, + fileCache *fsutils.FileCache, pkgCache *pkgcache.Cache, loadGuard *load.Guard, +) *ContextBuilder { + return &ContextBuilder{ + cfg: cfg, + pkgLoader: pkgLoader, + fileCache: fileCache, + pkgCache: pkgCache, + loadGuard: loadGuard, + } +} + +func (cl *ContextBuilder) Build(ctx context.Context, log logutils.Log, linters []*linter.Config) (*linter.Context, error) { + pkgs, deduplicatedPkgs, err := cl.pkgLoader.Load(ctx, linters) + if err != nil { + return nil, fmt.Errorf("failed to load packages: %w", err) + } + + if len(deduplicatedPkgs) == 0 { + return nil, fmt.Errorf("%w: running `go mod tidy` may solve the problem", exitcodes.ErrNoGoFiles) + } + + ret := &linter.Context{ + Packages: deduplicatedPkgs, + + // At least `unused` linters works properly only on original (not deduplicated) packages, + // see https://github.com/golangci/golangci-lint/pull/585. + OriginalPackages: pkgs, + + Cfg: cl.cfg, + Log: log, + FileCache: cl.fileCache, + PkgCache: cl.pkgCache, + LoadGuard: cl.loadGuard, + } + + return ret, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go index 2372a011e3..8e57d6bdf6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -1,7 +1,11 @@ package linter import ( + "fmt" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/config" ) const ( @@ -39,6 +43,7 @@ type Config struct { AlternativeNames []string OriginalURL string // URL of original (not forked) repo, needed for autogenerated README + Internal bool // Internal linters cannot be disabled (ex: typecheck). CanAutoFix bool IsSlow bool DoesChangeTypes bool @@ -47,6 +52,16 @@ type Config struct { Deprecation *Deprecation } +func (lc *Config) WithEnabledByDefault() *Config { + lc.EnabledByDefault = true + return lc +} + +func (lc *Config) WithInternal() *Config { + lc.Internal = true + return lc +} + func (lc *Config) ConsiderSlow() *Config { lc.IsSlow = true return lc @@ -63,7 +78,7 @@ func (lc *Config) WithLoadFiles() *Config { func (lc *Config) WithLoadForGoAnalysis() *Config { lc = lc.WithLoadFiles() - lc.LoadMode |= packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedTypesSizes + lc.LoadMode |= packages.NeedImports | packages.NeedDeps | packages.NeedExportFile | packages.NeedTypesSizes lc.IsSlow = true return lc } @@ -119,6 +134,27 @@ func (lc *Config) Name() string { return lc.Linter.Name() } +func (lc *Config) WithNoopFallback(cfg *config.Config, cond func(cfg *config.Config) error) *Config { + if err := cond(cfg); err != nil { + lc.Linter = NewNoop(lc.Linter, err.Error()) + lc.LoadMode = 0 + + return lc.WithLoadFiles() + } + + return lc +} + +func IsGoLowerThanGo122() func(cfg *config.Config) error { + return func(cfg *config.Config) error { + if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, "1.22") { + return nil + } + + return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go 1.22", cfg.Run.Go) + } +} + func NewConfig(linter Linter) *Config { lc := &Config{ Linter: linter, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go index a9f9d7d7f2..81d1560f7d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go @@ -22,7 +22,6 @@ type Context struct { Cfg *config.Config FileCache *fsutils.FileCache - LineCache *fsutils.LineCache Log logutils.Log PkgCache *pkgcache.Cache diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go index cfe9ec0209..1d4e7b04cc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go @@ -3,6 +3,7 @@ package linter import ( "context" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/result" ) @@ -11,3 +12,46 @@ type Linter interface { Name() string Desc() string } + +type Noop struct { + name string + desc string + reason string +} + +func NewNoop(l Linter, reason string) Noop { + return Noop{ + name: l.Name(), + desc: l.Desc(), + reason: reason, + } +} + +func NewNoopDeprecated(name string, cfg *config.Config) Noop { + noop := Noop{ + name: name, + desc: "Deprecated", + reason: "This linter is fully inactivated: it will not produce any reports.", + } + + if cfg.InternalCmdTest { + noop.reason = "" + } + + return noop +} + +func (n Noop) Run(_ context.Context, lintCtx *Context) ([]result.Issue, error) { + if n.reason != "" { + lintCtx.Log.Warnf("%s: %s", n.name, n.reason) + } + return nil, nil +} + +func (n Noop) Name() string { + return n.name +} + +func (n Noop) Desc() string { + return n.desc +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go new file mode 100644 index 0000000000..08041dc58f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go @@ -0,0 +1,711 @@ +package lintersdb + +import ( + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +// LinterBuilder builds the "internal" linters based on the configuration. +type LinterBuilder struct{} + +// NewLinterBuilder creates a new LinterBuilder. +func NewLinterBuilder() *LinterBuilder { + return &LinterBuilder{} +} + +// Build loads all the "internal" linters. +// The configuration is use for the linter settings. +func (b LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { + if cfg == nil { + return nil, nil + } + + const megacheckName = "megacheck" + + // The linters are sorted in the alphabetical order (case-insensitive). + // When a new linter is added the version in `WithSince(...)` must be the next minor version of golangci-lint. + return []*linter.Config{ + linter.NewConfig(golinters.NewAsasalint(&cfg.LintersSettings.Asasalint)). + WithSince("1.47.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/alingse/asasalint"), + + linter.NewConfig(golinters.NewAsciicheck()). + WithSince("v1.26.0"). + WithPresets(linter.PresetBugs, linter.PresetStyle). + WithURL("https://github.com/tdakkota/asciicheck"), + + linter.NewConfig(golinters.NewBiDiChkFuncName(&cfg.LintersSettings.BiDiChk)). + WithSince("1.43.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/breml/bidichk"), + + linter.NewConfig(golinters.NewBodyclose()). + WithSince("v1.18.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance, linter.PresetBugs). + WithURL("https://github.com/timakin/bodyclose"), + + linter.NewConfig(golinters.NewContainedCtx()). + WithSince("1.44.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/sivchari/containedctx"), + + linter.NewConfig(golinters.NewContextCheck()). + WithSince("v1.43.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/kkHAIKE/contextcheck"), + + linter.NewConfig(golinters.NewCopyLoopVar(&cfg.LintersSettings.CopyLoopVar)). + WithSince("v1.57.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/karamaru-alpha/copyloopvar"). + WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), + + linter.NewConfig(golinters.NewCyclop(&cfg.LintersSettings.Cyclop)). + WithSince("v1.37.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/bkielbasa/cyclop"), + + linter.NewConfig(golinters.NewDecorder(&cfg.LintersSettings.Decorder)). + WithSince("v1.44.0"). + WithPresets(linter.PresetFormatting, linter.PresetStyle). + WithURL("https://gitlab.com/bosi/decorder"), + + linter.NewConfig(linter.NewNoopDeprecated("deadcode", cfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"). + Deprecated("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), + + linter.NewConfig(golinters.NewDepguard(&cfg.LintersSettings.Depguard)). + WithSince("v1.4.0"). + WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). + WithURL("https://github.com/OpenPeeDeeP/depguard"), + + linter.NewConfig(golinters.NewDogsled(&cfg.LintersSettings.Dogsled)). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/alexkohler/dogsled"), + + linter.NewConfig(golinters.NewDupl(&cfg.LintersSettings.Dupl)). + WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mibk/dupl"), + + linter.NewConfig(golinters.NewDupWord(&cfg.LintersSettings.DupWord)). + WithSince("1.50.0"). + WithPresets(linter.PresetComment). + WithURL("https://github.com/Abirdcfly/dupword"), + + linter.NewConfig(golinters.NewDurationCheck()). + WithSince("v1.37.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/charithe/durationcheck"), + + linter.NewConfig(golinters.NewErrcheck(&cfg.LintersSettings.Errcheck)). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetError). + WithURL("https://github.com/kisielk/errcheck"), + + linter.NewConfig(golinters.NewErrChkJSONFuncName(&cfg.LintersSettings.ErrChkJSON)). + WithSince("1.44.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/breml/errchkjson"), + + linter.NewConfig(golinters.NewErrName()). + WithSince("v1.42.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Antonboom/errname"), + + linter.NewConfig(golinters.NewErrorLint(&cfg.LintersSettings.ErrorLint)). + WithSince("v1.32.0"). + WithPresets(linter.PresetBugs, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/polyfloyd/go-errorlint"), + + linter.NewConfig(golinters.NewExecInQuery()). + WithSince("v1.46.0"). + WithPresets(linter.PresetSQL). + WithLoadForGoAnalysis(). + WithURL("https://github.com/lufeee/execinquery"), + + linter.NewConfig(golinters.NewExhaustive(&cfg.LintersSettings.Exhaustive)). + WithSince(" v1.28.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/nishanths/exhaustive"), + + linter.NewConfig(linter.NewNoopDeprecated("exhaustivestruct", cfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithLoadForGoAnalysis(). + WithURL("https://github.com/mbilski/exhaustivestruct"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.46.0", "exhaustruct"), + + linter.NewConfig(golinters.NewExhaustruct(&cfg.LintersSettings.Exhaustruct)). + WithSince("v1.46.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithLoadForGoAnalysis(). + WithURL("https://github.com/GaijinEntertainment/go-exhaustruct"), + + linter.NewConfig(golinters.NewExportLoopRef()). + WithSince("v1.28.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/kyoh86/exportloopref"), + + linter.NewConfig(golinters.NewForbidigo(&cfg.LintersSettings.Forbidigo)). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle). + // Strictly speaking, + // the additional information is only needed when forbidigoCfg.AnalyzeTypes is chosen by the user. + // But we don't know that here in all cases (sometimes config is not loaded), + // so we have to assume that it is needed to be on the safe side. + WithLoadForGoAnalysis(). + WithURL("https://github.com/ashanbrown/forbidigo"), + + linter.NewConfig(golinters.NewForceTypeAssert()). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/gostaticanalysis/forcetypeassert"), + + linter.NewConfig(golinters.NewFunlen(&cfg.LintersSettings.Funlen)). + WithSince("v1.18.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/ultraware/funlen"), + + linter.NewConfig(golinters.NewGci(&cfg.LintersSettings.Gci)). + WithSince("v1.30.0"). + WithPresets(linter.PresetFormatting, linter.PresetImport). + WithURL("https://github.com/daixiang0/gci"), + + linter.NewConfig(golinters.NewGinkgoLinter(&cfg.LintersSettings.GinkgoLinter)). + WithSince("v1.51.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/nunnatsa/ginkgolinter"), + + linter.NewConfig(golinters.NewGoCheckCompilerDirectives()). + WithSince("v1.51.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/leighmcculloch/gocheckcompilerdirectives"), + + linter.NewConfig(golinters.NewGochecknoglobals()). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/leighmcculloch/gochecknoglobals"), + + linter.NewConfig(golinters.NewGochecknoinits()). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle), + + linter.NewConfig(golinters.NewGoCheckSumType()). + WithSince("v1.55.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/alecthomas/go-check-sumtype"), + + linter.NewConfig(golinters.NewGocognit(&cfg.LintersSettings.Gocognit)). + WithSince("v1.20.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/uudashr/gocognit"), + + linter.NewConfig(golinters.NewGoconst(&cfg.LintersSettings.Goconst)). + WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/jgautheron/goconst"), + + linter.NewConfig(golinters.NewGoCritic(&cfg.LintersSettings.Gocritic)). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle, linter.PresetMetaLinter). + WithLoadForGoAnalysis(). + WithURL("https://github.com/go-critic/go-critic"), + + linter.NewConfig(golinters.NewGocyclo(&cfg.LintersSettings.Gocyclo)). + WithSince("v1.0.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/fzipp/gocyclo"), + + linter.NewConfig(golinters.NewGodot(&cfg.LintersSettings.Godot)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithAutoFix(). + WithURL("https://github.com/tetafro/godot"), + + linter.NewConfig(golinters.NewGodox(&cfg.LintersSettings.Godox)). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithURL("https://github.com/matoous/godox"), + + linter.NewConfig(golinters.NewGoerr113()). + WithSince("v1.26.0"). + WithPresets(linter.PresetStyle, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Djarvur/go-err113"), + + linter.NewConfig(golinters.NewGofmt(&cfg.LintersSettings.Gofmt)). + WithSince("v1.0.0"). + WithPresets(linter.PresetFormatting). + WithAutoFix(). + WithURL("https://pkg.go.dev/cmd/gofmt"), + + linter.NewConfig(golinters.NewGofumpt(&cfg.LintersSettings.Gofumpt)). + WithSince("v1.28.0"). + WithPresets(linter.PresetFormatting). + WithAutoFix(). + WithURL("https://github.com/mvdan/gofumpt"), + + linter.NewConfig(golinters.NewGoHeader(&cfg.LintersSettings.Goheader)). + WithSince("v1.28.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/denis-tingaikin/go-header"), + + linter.NewConfig(golinters.NewGoimports(&cfg.LintersSettings.Goimports)). + WithSince("v1.20.0"). + WithPresets(linter.PresetFormatting, linter.PresetImport). + WithAutoFix(). + WithURL("https://pkg.go.dev/golang.org/x/tools/cmd/goimports"), + + linter.NewConfig(linter.NewNoopDeprecated("golint", cfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/golang/lint"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), + + linter.NewConfig(golinters.NewGoMND(&cfg.LintersSettings.Gomnd)). + WithSince("v1.22.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/tommy-muehle/go-mnd"), + + linter.NewConfig(golinters.NewGoModDirectives(&cfg.LintersSettings.GoModDirectives)). + WithSince("v1.39.0"). + WithPresets(linter.PresetStyle, linter.PresetModule). + WithURL("https://github.com/ldez/gomoddirectives"), + + linter.NewConfig(golinters.NewGomodguard(&cfg.LintersSettings.Gomodguard)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). + WithURL("https://github.com/ryancurrah/gomodguard"), + + linter.NewConfig(golinters.NewGoPrintfFuncName()). + WithSince("v1.23.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/jirfag/go-printf-func-name"), + + linter.NewConfig(golinters.NewGosec(&cfg.LintersSettings.Gosec)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/securego/gosec"). + WithAlternativeNames("gas"), + + linter.NewConfig(golinters.NewGosimple(&cfg.LintersSettings.Gosimple)). + WithEnabledByDefault(). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithAlternativeNames(megacheckName). + WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), + + linter.NewConfig(golinters.NewGosmopolitan(&cfg.LintersSettings.Gosmopolitan)). + WithSince("v1.53.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/xen0n/gosmopolitan"), + + linter.NewConfig(golinters.NewGovet(&cfg.LintersSettings.Govet)). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAlternativeNames("vet", "vetshadow"). + WithURL("https://pkg.go.dev/cmd/vet"), + + linter.NewConfig(golinters.NewGrouper(&cfg.LintersSettings.Grouper)). + WithSince("v1.44.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/leonklingele/grouper"), + + linter.NewConfig(linter.NewNoopDeprecated("ifshort", cfg)). + WithSince("v1.36.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/esimonov/ifshort"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.48.0", ""), + + linter.NewConfig(golinters.NewImportAs(&cfg.LintersSettings.ImportAs)). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/julz/importas"), + + linter.NewConfig(golinters.NewINamedParam(&cfg.LintersSettings.Inamedparam)). + WithSince("v1.55.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/macabu/inamedparam"), + + linter.NewConfig(golinters.NewIneffassign()). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/gordonklaus/ineffassign"), + + linter.NewConfig(golinters.NewInterfaceBloat(&cfg.LintersSettings.InterfaceBloat)). + WithSince("v1.49.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/sashamelentyev/interfacebloat"), + + linter.NewConfig(linter.NewNoopDeprecated("interfacer", cfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mvdan/interfacer"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", ""), + + linter.NewConfig(golinters.NewIntrange()). + WithSince("v1.57.0"). + WithURL("https://github.com/ckaznocha/intrange"). + WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), + + linter.NewConfig(golinters.NewIreturn(&cfg.LintersSettings.Ireturn)). + WithSince("v1.43.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/butuzov/ireturn"), + + linter.NewConfig(golinters.NewLLL(&cfg.LintersSettings.Lll)). + WithSince("v1.8.0"). + WithPresets(linter.PresetStyle), + + linter.NewConfig(golinters.NewLoggerCheck(&cfg.LintersSettings.LoggerCheck)). + WithSince("v1.49.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithAlternativeNames("logrlint"). + WithURL("https://github.com/timonwong/loggercheck"), + + linter.NewConfig(golinters.NewMaintIdx(&cfg.LintersSettings.MaintIdx)). + WithSince("v1.44.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/yagipy/maintidx"), + + linter.NewConfig(golinters.NewMakezero(&cfg.LintersSettings.Makezero)). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ashanbrown/makezero"), + + linter.NewConfig(linter.NewNoopDeprecated("maligned", cfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/mdempsky/maligned"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), + + linter.NewConfig(golinters.NewMirror()). + WithSince("v1.53.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/butuzov/mirror"), + + linter.NewConfig(golinters.NewMisspell(&cfg.LintersSettings.Misspell)). + WithSince("v1.8.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithAutoFix(). + WithURL("https://github.com/client9/misspell"), + + linter.NewConfig(golinters.NewMustTag(&cfg.LintersSettings.MustTag)). + WithSince("v1.51.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithURL("https://github.com/go-simpler/musttag"), + + linter.NewConfig(golinters.NewNakedret(&cfg.LintersSettings.Nakedret)). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/alexkohler/nakedret"), + + linter.NewConfig(golinters.NewNestif(&cfg.LintersSettings.Nestif)). + WithSince("v1.25.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/nakabonne/nestif"), + + linter.NewConfig(golinters.NewNilErr()). + WithSince("v1.38.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/gostaticanalysis/nilerr"), + + linter.NewConfig(golinters.NewNilNil(&cfg.LintersSettings.NilNil)). + WithSince("v1.43.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Antonboom/nilnil"), + + linter.NewConfig(golinters.NewNLReturn(&cfg.LintersSettings.Nlreturn)). + WithSince("v1.30.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/ssgreg/nlreturn"), + + linter.NewConfig(golinters.NewNoctx()). + WithSince("v1.28.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance, linter.PresetBugs). + WithURL("https://github.com/sonatard/noctx"), + + linter.NewConfig(golinters.NewNoNamedReturns(&cfg.LintersSettings.NoNamedReturns)). + WithSince("v1.46.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/firefart/nonamedreturns"), + + linter.NewConfig(linter.NewNoopDeprecated("nosnakecase", cfg)). + WithSince("v1.47.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/sivchari/nosnakecase"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.48.1", "revive 'var-naming'"), + + linter.NewConfig(golinters.NewNoSprintfHostPort()). + WithSince("v1.46.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/stbenjam/no-sprintf-host-port"), + + linter.NewConfig(golinters.NewParallelTest(&cfg.LintersSettings.ParallelTest)). + WithSince("v1.33.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithURL("https://github.com/kunwardeep/paralleltest"), + + linter.NewConfig(golinters.NewPerfSprint(&cfg.LintersSettings.PerfSprint)). + WithSince("v1.55.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/catenacyber/perfsprint"), + + linter.NewConfig(golinters.NewPreAlloc(&cfg.LintersSettings.Prealloc)). + WithSince("v1.19.0"). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/alexkohler/prealloc"), + + linter.NewConfig(golinters.NewPredeclared(&cfg.LintersSettings.Predeclared)). + WithSince("v1.35.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/nishanths/predeclared"), + + linter.NewConfig(golinters.NewPromlinter(&cfg.LintersSettings.Promlinter)). + WithSince("v1.40.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/yeya24/promlinter"), + + linter.NewConfig(golinters.NewProtoGetter(&cfg.LintersSettings.ProtoGetter)). + WithSince("v1.55.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithAutoFix(). + WithURL("https://github.com/ghostiam/protogetter"), + + linter.NewConfig(golinters.NewReassign(&cfg.LintersSettings.Reassign)). + WithSince("1.49.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/curioswitch/go-reassign"), + + linter.NewConfig(golinters.NewRevive(&cfg.LintersSettings.Revive)). + WithSince("v1.37.0"). + WithPresets(linter.PresetStyle, linter.PresetMetaLinter). + ConsiderSlow(). + WithURL("https://github.com/mgechev/revive"), + + linter.NewConfig(golinters.NewRowsErrCheck(&cfg.LintersSettings.RowsErrCheck)). + WithSince("v1.23.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetSQL). + WithURL("https://github.com/jingyugao/rowserrcheck"), + + linter.NewConfig(golinters.NewSlogLint(&cfg.LintersSettings.SlogLint)). + WithSince("v1.55.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithURL("https://github.com/go-simpler/sloglint"), + + linter.NewConfig(linter.NewNoopDeprecated("scopelint", cfg)). + WithSince("v1.12.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/kyoh86/scopelint"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.39.0", "exportloopref"), + + linter.NewConfig(golinters.NewSQLCloseCheck()). + WithSince("v1.28.0"). + WithPresets(linter.PresetBugs, linter.PresetSQL). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ryanrolds/sqlclosecheck"), + + linter.NewConfig(golinters.NewSpancheck(&cfg.LintersSettings.Spancheck)). + WithSince("v1.56.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/jjti/go-spancheck"), + + linter.NewConfig(golinters.NewStaticcheck(&cfg.LintersSettings.Staticcheck)). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAlternativeNames(megacheckName). + WithURL("https://staticcheck.io/"), + + linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/opennota/check"). + Deprecated("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), + + linter.NewConfig(golinters.NewStylecheck(&cfg.LintersSettings.Stylecheck)). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), + + linter.NewConfig(golinters.NewTagAlign(&cfg.LintersSettings.TagAlign)). + WithSince("v1.53.0"). + WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithAutoFix(). + WithURL("https://github.com/4meepo/tagalign"), + + linter.NewConfig(golinters.NewTagliatelle(&cfg.LintersSettings.Tagliatelle)). + WithSince("v1.40.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/ldez/tagliatelle"), + + linter.NewConfig(golinters.NewTenv(&cfg.LintersSettings.Tenv)). + WithSince("v1.43.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/sivchari/tenv"), + + linter.NewConfig(golinters.NewTestableexamples()). + WithSince("v1.50.0"). + WithPresets(linter.PresetTest). + WithURL("https://github.com/maratori/testableexamples"), + + linter.NewConfig(golinters.NewTestifylint(&cfg.LintersSettings.Testifylint)). + WithSince("v1.55.0"). + WithPresets(linter.PresetTest, linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Antonboom/testifylint"), + + linter.NewConfig(golinters.NewTestpackage(&cfg.LintersSettings.Testpackage)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithURL("https://github.com/maratori/testpackage"), + + linter.NewConfig(golinters.NewThelper(&cfg.LintersSettings.Thelper)). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/kulti/thelper"), + + linter.NewConfig(golinters.NewTparallel()). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithLoadForGoAnalysis(). + WithURL("https://github.com/moricho/tparallel"), + + linter.NewConfig(golinters.NewTypecheck()). + WithInternal(). + WithEnabledByDefault(). + WithSince("v1.3.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL(""), + + linter.NewConfig(golinters.NewUnconvert(&cfg.LintersSettings.Unconvert)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mdempsky/unconvert"), + + linter.NewConfig(golinters.NewUnparam(&cfg.LintersSettings.Unparam)). + WithSince("v1.9.0"). + WithPresets(linter.PresetUnused). + WithLoadForGoAnalysis(). + WithURL("https://github.com/mvdan/unparam"), + + linter.NewConfig(golinters.NewUnused(&cfg.LintersSettings.Unused, &cfg.LintersSettings.Staticcheck)). + WithEnabledByDefault(). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithAlternativeNames(megacheckName). + ConsiderSlow(). + WithChangeTypes(). + WithURL("https://github.com/dominikh/go-tools/tree/master/unused"), + + linter.NewConfig(golinters.NewUseStdlibVars(&cfg.LintersSettings.UseStdlibVars)). + WithSince("v1.48.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/sashamelentyev/usestdlibvars"), + + linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/opennota/check"). + Deprecated("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), + + linter.NewConfig(golinters.NewVarnamelen(&cfg.LintersSettings.Varnamelen)). + WithSince("v1.43.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/blizzy78/varnamelen"), + + linter.NewConfig(golinters.NewWastedAssign()). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/sanposhiho/wastedassign"), + + linter.NewConfig(golinters.NewWhitespace(&cfg.LintersSettings.Whitespace)). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle). + WithAutoFix(). + WithURL("https://github.com/ultraware/whitespace"), + + linter.NewConfig(golinters.NewWrapcheck(&cfg.LintersSettings.Wrapcheck)). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/tomarrell/wrapcheck"), + + linter.NewConfig(golinters.NewWSL(&cfg.LintersSettings.WSL)). + WithSince("v1.20.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/bombsimon/wsl"), + + linter.NewConfig(golinters.NewZerologLint()). + WithSince("v1.53.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ykadowak/zerologlint"), + + // nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives + linter.NewConfig(golinters.NewNoLintLint(&cfg.LintersSettings.NoLintLint)). + WithSince("v1.26.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/golangci/golangci-lint/blob/master/pkg/golinters/nolintlint/README.md"), + }, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go new file mode 100644 index 0000000000..4c6b4b5988 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go @@ -0,0 +1,139 @@ +package lintersdb + +import ( + "errors" + "fmt" + "path/filepath" + "plugin" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +const goPluginType = "goplugin" + +type AnalyzerPlugin interface { + GetAnalyzers() []*analysis.Analyzer +} + +// PluginGoBuilder builds the custom linters (Go plugin) based on the configuration. +type PluginGoBuilder struct { + log logutils.Log +} + +// NewPluginGoBuilder creates new PluginGoBuilder. +func NewPluginGoBuilder(log logutils.Log) *PluginGoBuilder { + return &PluginGoBuilder{log: log} +} + +// Build loads custom linters that are specified in the golangci-lint config file. +func (b *PluginGoBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { + if cfg == nil || b.log == nil { + return nil, nil + } + + var linters []*linter.Config + + for name, settings := range cfg.LintersSettings.Custom { + if settings.Type != goPluginType && settings.Type != "" { + continue + } + + settings := settings + + lc, err := b.loadConfig(cfg, name, &settings) + if err != nil { + return nil, fmt.Errorf("unable to load custom analyzer %q: %s, %w", name, settings.Path, err) + } else { + linters = append(linters, lc) + } + } + + return linters, nil +} + +// loadConfig loads the configuration of private linters. +// Private linters are dynamically loaded from .so plugin files. +func (b *PluginGoBuilder) loadConfig(cfg *config.Config, name string, settings *config.CustomLinterSettings) (*linter.Config, error) { + analyzers, err := b.getAnalyzerPlugin(cfg, settings.Path, settings.Settings) + if err != nil { + return nil, err + } + + b.log.Infof("Loaded %s: %s", settings.Path, name) + + customLinter := goanalysis.NewLinter(name, settings.Description, analyzers, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) + + linterConfig := linter.NewConfig(customLinter). + WithEnabledByDefault(). + WithLoadForGoAnalysis(). + WithURL(settings.OriginalURL) + + return linterConfig, nil +} + +// getAnalyzerPlugin loads a private linter as specified in the config file, +// loads the plugin from a .so file, +// and returns the 'AnalyzerPlugin' interface implemented by the private plugin. +// An error is returned if the private linter cannot be loaded +// or the linter does not implement the AnalyzerPlugin interface. +func (b *PluginGoBuilder) getAnalyzerPlugin(cfg *config.Config, path string, settings any) ([]*analysis.Analyzer, error) { + if !filepath.IsAbs(path) { + // resolve non-absolute paths relative to config file's directory + path = filepath.Join(cfg.GetConfigDir(), path) + } + + plug, err := plugin.Open(path) + if err != nil { + return nil, err + } + + analyzers, err := b.lookupPlugin(plug, settings) + if err != nil { + return nil, fmt.Errorf("lookup plugin %s: %w", path, err) + } + + return analyzers, nil +} + +func (b *PluginGoBuilder) lookupPlugin(plug *plugin.Plugin, settings any) ([]*analysis.Analyzer, error) { + symbol, err := plug.Lookup("New") + if err != nil { + analyzers, errP := b.lookupAnalyzerPlugin(plug) + if errP != nil { + return nil, errors.Join(err, errP) + } + + return analyzers, nil + } + + // The type func cannot be used here, must be the explicit signature. + constructor, ok := symbol.(func(any) ([]*analysis.Analyzer, error)) + if !ok { + return nil, fmt.Errorf("plugin does not abide by 'New' function: %T", symbol) + } + + return constructor(settings) +} + +func (b *PluginGoBuilder) lookupAnalyzerPlugin(plug *plugin.Plugin) ([]*analysis.Analyzer, error) { + symbol, err := plug.Lookup("AnalyzerPlugin") + if err != nil { + return nil, err + } + + b.log.Warnf("plugin: 'AnalyzerPlugin' plugins are deprecated, please use the new plugin signature: " + + "https://golangci-lint.run/contributing/new-linters/#create-a-plugin") + + analyzerPlugin, ok := symbol.(AnalyzerPlugin) + if !ok { + return nil, fmt.Errorf("plugin does not abide by 'AnalyzerPlugin' interface: %T", symbol) + } + + return analyzerPlugin.GetAnalyzers(), nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_module.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_module.go new file mode 100644 index 0000000000..904b18bd5e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_module.go @@ -0,0 +1,85 @@ +package lintersdb + +import ( + "fmt" + "strings" + + "github.com/golangci/plugin-module-register/register" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +const modulePluginType = "module" + +// PluginModuleBuilder builds the custom linters (module plugin) based on the configuration. +type PluginModuleBuilder struct { + log logutils.Log +} + +// NewPluginModuleBuilder creates new PluginModuleBuilder. +func NewPluginModuleBuilder(log logutils.Log) *PluginModuleBuilder { + return &PluginModuleBuilder{log: log} +} + +// Build loads custom linters that are specified in the golangci-lint config file. +func (b *PluginModuleBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { + if cfg == nil || b.log == nil { + return nil, nil + } + + var linters []*linter.Config + + for name, settings := range cfg.LintersSettings.Custom { + if settings.Type != modulePluginType { + continue + } + + b.log.Infof("Loaded %s: %s", settings.Path, name) + + newPlugin, err := register.GetPlugin(name) + if err != nil { + return nil, fmt.Errorf("plugin(%s): %w", name, err) + } + + p, err := newPlugin(settings.Settings) + if err != nil { + return nil, fmt.Errorf("plugin(%s): newPlugin %w", name, err) + } + + analyzers, err := p.BuildAnalyzers() + if err != nil { + return nil, fmt.Errorf("plugin(%s): BuildAnalyzers %w", name, err) + } + + customLinter := goanalysis.NewLinter(name, settings.Description, analyzers, nil) + + switch strings.ToLower(p.GetLoadMode()) { + case register.LoadModeSyntax: + customLinter = customLinter.WithLoadMode(goanalysis.LoadModeSyntax) + case register.LoadModeTypesInfo: + customLinter = customLinter.WithLoadMode(goanalysis.LoadModeTypesInfo) + default: + customLinter = customLinter.WithLoadMode(goanalysis.LoadModeTypesInfo) + } + + lc := linter.NewConfig(customLinter). + WithEnabledByDefault(). + WithURL(settings.OriginalURL) + + switch strings.ToLower(p.GetLoadMode()) { + case register.LoadModeSyntax: + // noop + case register.LoadModeTypesInfo: + lc = lc.WithLoadForGoAnalysis() + default: + lc = lc.WithLoadForGoAnalysis() + } + + linters = append(linters, lc) + } + + return linters, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go deleted file mode 100644 index 9814aa857e..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go +++ /dev/null @@ -1,207 +0,0 @@ -package lintersdb - -import ( - "os" - "sort" - "strings" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/logutils" -) - -type EnabledSet struct { - m *Manager - v *Validator - log logutils.Log - cfg *config.Config - debugf logutils.DebugFunc -} - -func NewEnabledSet(m *Manager, v *Validator, log logutils.Log, cfg *config.Config) *EnabledSet { - return &EnabledSet{ - m: m, - v: v, - log: log, - cfg: cfg, - debugf: logutils.Debug("enabled_linters"), - } -} - -func (es EnabledSet) build(lcfg *config.Linters, enabledByDefaultLinters []*linter.Config) map[string]*linter.Config { - es.debugf("Linters config: %#v", lcfg) - resultLintersSet := map[string]*linter.Config{} - switch { - case len(lcfg.Presets) != 0: - break // imply --disable-all - case lcfg.EnableAll: - resultLintersSet = linterConfigsToMap(es.m.GetAllSupportedLinterConfigs()) - case lcfg.DisableAll: - break - default: - resultLintersSet = linterConfigsToMap(enabledByDefaultLinters) - } - - // --presets can only add linters to default set - for _, p := range lcfg.Presets { - for _, lc := range es.m.GetAllLinterConfigsForPreset(p) { - lc := lc - resultLintersSet[lc.Name()] = lc - } - } - - // --fast removes slow linters from current set. - // It should be after --presets to be able to run only fast linters in preset. - // It should be before --enable and --disable to be able to enable or disable specific linter. - if lcfg.Fast { - for name, lc := range resultLintersSet { - if lc.IsSlowLinter() { - delete(resultLintersSet, name) - } - } - } - - for _, name := range lcfg.Enable { - for _, lc := range es.m.GetLinterConfigs(name) { - // it's important to use lc.Name() nor name because name can be alias - resultLintersSet[lc.Name()] = lc - } - } - - for _, name := range lcfg.Disable { - for _, lc := range es.m.GetLinterConfigs(name) { - // it's important to use lc.Name() nor name because name can be alias - delete(resultLintersSet, lc.Name()) - } - } - - return resultLintersSet -} - -func (es EnabledSet) GetEnabledLintersMap() (map[string]*linter.Config, error) { - if err := es.v.validateEnabledDisabledLintersConfig(&es.cfg.Linters); err != nil { - return nil, err - } - - enabledLinters := es.build(&es.cfg.Linters, es.m.GetAllEnabledByDefaultLinters()) - if os.Getenv("GL_TEST_RUN") == "1" { - es.verbosePrintLintersStatus(enabledLinters) - } - return enabledLinters, nil -} - -// GetOptimizedLinters returns enabled linters after optimization (merging) of multiple linters -// into a fewer number of linters. E.g. some go/analysis linters can be optimized into -// one metalinter for data reuse and speed up. -func (es EnabledSet) GetOptimizedLinters() ([]*linter.Config, error) { - if err := es.v.validateEnabledDisabledLintersConfig(&es.cfg.Linters); err != nil { - return nil, err - } - - resultLintersSet := es.build(&es.cfg.Linters, es.m.GetAllEnabledByDefaultLinters()) - es.verbosePrintLintersStatus(resultLintersSet) - es.combineGoAnalysisLinters(resultLintersSet) - - var resultLinters []*linter.Config - for _, lc := range resultLintersSet { - resultLinters = append(resultLinters, lc) - } - - // Make order of execution of linters (go/analysis metalinter and unused) stable. - sort.Slice(resultLinters, func(i, j int) bool { - a, b := resultLinters[i], resultLinters[j] - - if b.Name() == linter.LastLinter { - return true - } - - if a.Name() == linter.LastLinter { - return false - } - - if a.DoesChangeTypes != b.DoesChangeTypes { - return b.DoesChangeTypes // move type-changing linters to the end to optimize speed - } - return strings.Compare(a.Name(), b.Name()) < 0 - }) - - return resultLinters, nil -} - -func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) { - var goanalysisLinters []*goanalysis.Linter - goanalysisPresets := map[string]bool{} - for _, linter := range linters { - lnt, ok := linter.Linter.(*goanalysis.Linter) - if !ok { - continue - } - if lnt.LoadMode() == goanalysis.LoadModeWholeProgram { - // It's ineffective by CPU and memory to run whole-program and incremental analyzers at once. - continue - } - goanalysisLinters = append(goanalysisLinters, lnt) - for _, p := range linter.InPresets { - goanalysisPresets[p] = true - } - } - - if len(goanalysisLinters) <= 1 { - es.debugf("Didn't combine go/analysis linters: got only %d linters", len(goanalysisLinters)) - return - } - - for _, lnt := range goanalysisLinters { - delete(linters, lnt.Name()) - } - - // Make order of execution of go/analysis analyzers stable. - sort.Slice(goanalysisLinters, func(i, j int) bool { - a, b := goanalysisLinters[i], goanalysisLinters[j] - - if b.Name() == linter.LastLinter { - return true - } - - if a.Name() == linter.LastLinter { - return false - } - - return strings.Compare(a.Name(), b.Name()) <= 0 - }) - - ml := goanalysis.NewMetaLinter(goanalysisLinters) - - var presets []string - for p := range goanalysisPresets { - presets = append(presets, p) - } - - mlConfig := &linter.Config{ - Linter: ml, - EnabledByDefault: false, - InPresets: presets, - AlternativeNames: nil, - OriginalURL: "", - } - - mlConfig = mlConfig.WithLoadForGoAnalysis() - - linters[ml.Name()] = mlConfig - es.debugf("Combined %d go/analysis linters into one metalinter", len(goanalysisLinters)) -} - -func (es EnabledSet) verbosePrintLintersStatus(lcs map[string]*linter.Config) { - var linterNames []string - for _, lc := range lcs { - linterNames = append(linterNames, lc.Name()) - } - sort.StringSlice(linterNames).Sort() - es.log.Infof("Active %d linters: %s", len(linterNames), linterNames) - - if len(es.cfg.Linters.Presets) != 0 { - sort.StringSlice(es.cfg.Linters.Presets).Sort() - es.log.Infof("Active presets: %s", es.cfg.Linters.Presets) - } -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go index fc760dc34a..9816605892 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go @@ -2,533 +2,139 @@ package lintersdb import ( "fmt" - "path/filepath" - "plugin" + "os" + "slices" + "sort" - "github.com/spf13/viper" - "golang.org/x/tools/go/analysis" + "golang.org/x/exp/maps" "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/golinters" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/report" ) +type Builder interface { + Build(cfg *config.Config) ([]*linter.Config, error) +} + +// Manager is a type of database for all linters (internals or plugins). +// It provides methods to access to the linter sets. type Manager struct { + log logutils.Log + debugf logutils.DebugFunc + + cfg *config.Config + + linters []*linter.Config + nameToLCs map[string][]*linter.Config - cfg *config.Config - log logutils.Log } -func NewManager(cfg *config.Config, log logutils.Log) *Manager { - m := &Manager{cfg: cfg, log: log} - nameToLCs := make(map[string][]*linter.Config) - for _, lc := range m.GetAllSupportedLinterConfigs() { - for _, name := range lc.AllNames() { - nameToLCs[name] = append(nameToLCs[name], lc) - } +// NewManager creates a new Manager. +// This constructor will call the builders to build and store the linters. +func NewManager(log logutils.Log, cfg *config.Config, builders ...Builder) (*Manager, error) { + m := &Manager{ + log: log, + debugf: logutils.Debug(logutils.DebugKeyEnabledLinters), + nameToLCs: make(map[string][]*linter.Config), } - m.nameToLCs = nameToLCs - return m -} - -func (m *Manager) WithCustomLinters() *Manager { - if m.log == nil { - m.log = report.NewLogWrapper(logutils.NewStderrLog(""), &report.Data{}) + m.cfg = cfg + if cfg == nil { + m.cfg = config.NewDefault() } - if m.cfg != nil { - for name, settings := range m.cfg.LintersSettings.Custom { - lc, err := m.loadCustomLinterConfig(name, settings) - - if err != nil { - m.log.Errorf("Unable to load custom analyzer %s:%s, %v", - name, - settings.Path, - err) - } else { - m.nameToLCs[name] = append(m.nameToLCs[name], lc) - } + + for _, builder := range builders { + linters, err := builder.Build(m.cfg) + if err != nil { + return nil, fmt.Errorf("build linters: %w", err) } + + m.linters = append(m.linters, linters...) } - return m -} -func (Manager) AllPresets() []string { - return []string{ - linter.PresetBugs, - linter.PresetComment, - linter.PresetComplexity, - linter.PresetError, - linter.PresetFormatting, - linter.PresetImport, - linter.PresetMetaLinter, - linter.PresetModule, - linter.PresetPerformance, - linter.PresetSQL, - linter.PresetStyle, - linter.PresetTest, - linter.PresetUnused, + for _, lc := range m.linters { + for _, name := range lc.AllNames() { + m.nameToLCs[name] = append(m.nameToLCs[name], lc) + } } -} -func (m Manager) allPresetsSet() map[string]bool { - ret := map[string]bool{} - for _, p := range m.AllPresets() { - ret[p] = true + err := NewValidator(m).Validate(m.cfg) + if err != nil { + return nil, err } - return ret + + return m, nil } -func (m Manager) GetLinterConfigs(name string) []*linter.Config { +func (m *Manager) GetLinterConfigs(name string) []*linter.Config { return m.nameToLCs[name] } -func enableLinterConfigs(lcs []*linter.Config, isEnabled func(lc *linter.Config) bool) []*linter.Config { +func (m *Manager) GetAllSupportedLinterConfigs() []*linter.Config { + return m.linters +} + +func (m *Manager) GetAllLinterConfigsForPreset(p string) []*linter.Config { var ret []*linter.Config - for _, lc := range lcs { - lc := lc - lc.EnabledByDefault = isEnabled(lc) - ret = append(ret, lc) + for _, lc := range m.linters { + if lc.IsDeprecated() { + continue + } + + if slices.Contains(lc.InPresets, p) { + ret = append(ret, lc) + } } return ret } -//nolint:funlen -func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config { - var govetCfg *config.GovetSettings - var testpackageCfg *config.TestpackageSettings - var exhaustiveCfg *config.ExhaustiveSettings - var exhaustiveStructCfg *config.ExhaustiveStructSettings - var errorlintCfg *config.ErrorLintSettings - var thelperCfg *config.ThelperSettings - var predeclaredCfg *config.PredeclaredSettings - var ifshortCfg *config.IfshortSettings - var reviveCfg *config.ReviveSettings - var cyclopCfg *config.Cyclop - var importAsCfg *config.ImportAsSettings - var goModDirectivesCfg *config.GoModDirectivesSettings - var tagliatelleCfg *config.TagliatelleSettings - var gosecCfg *config.GoSecSettings - var gosimpleCfg *config.StaticCheckSettings - var staticcheckCfg *config.StaticCheckSettings - var stylecheckCfg *config.StaticCheckSettings - var unusedCfg *config.StaticCheckSettings - var wrapcheckCfg *config.WrapcheckSettings - - if m.cfg != nil { - govetCfg = &m.cfg.LintersSettings.Govet - testpackageCfg = &m.cfg.LintersSettings.Testpackage - exhaustiveCfg = &m.cfg.LintersSettings.Exhaustive - exhaustiveStructCfg = &m.cfg.LintersSettings.ExhaustiveStruct - errorlintCfg = &m.cfg.LintersSettings.ErrorLint - thelperCfg = &m.cfg.LintersSettings.Thelper - predeclaredCfg = &m.cfg.LintersSettings.Predeclared - ifshortCfg = &m.cfg.LintersSettings.Ifshort - reviveCfg = &m.cfg.LintersSettings.Revive - cyclopCfg = &m.cfg.LintersSettings.Cyclop - importAsCfg = &m.cfg.LintersSettings.ImportAs - goModDirectivesCfg = &m.cfg.LintersSettings.GoModDirectives - tagliatelleCfg = &m.cfg.LintersSettings.Tagliatelle - gosecCfg = &m.cfg.LintersSettings.Gosec - gosimpleCfg = &m.cfg.LintersSettings.Gosimple - staticcheckCfg = &m.cfg.LintersSettings.Staticcheck - stylecheckCfg = &m.cfg.LintersSettings.Stylecheck - unusedCfg = &m.cfg.LintersSettings.Unused - wrapcheckCfg = &m.cfg.LintersSettings.Wrapcheck - } +func (m *Manager) GetEnabledLintersMap() (map[string]*linter.Config, error) { + enabledLinters := m.build(m.GetAllEnabledByDefaultLinters()) - const megacheckName = "megacheck" - - lcs := []*linter.Config{ - linter.NewConfig(golinters.NewGovet(govetCfg)). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetMetaLinter). - WithAlternativeNames("vet", "vetshadow"). - WithURL("https://golang.org/cmd/vet/"), - linter.NewConfig(golinters.NewBodyclose()). - WithSince("v1.18.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetPerformance, linter.PresetBugs). - WithURL("https://github.com/timakin/bodyclose"), - linter.NewConfig(golinters.NewNoctx()). - WithSince("v1.28.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetPerformance, linter.PresetBugs). - WithURL("https://github.com/sonatard/noctx"), - linter.NewConfig(golinters.NewErrcheck()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetError). - WithURL("https://github.com/kisielk/errcheck"), - linter.NewConfig(golinters.NewGolint()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/golang/lint"). - Deprecated("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), - linter.NewConfig(golinters.NewRowsErrCheck()). - WithSince("v1.23.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetSQL). - WithURL("https://github.com/jingyugao/rowserrcheck"), - - linter.NewConfig(golinters.NewStaticcheck(staticcheckCfg)). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetMetaLinter). - WithAlternativeNames(megacheckName). - WithURL("https://staticcheck.io/"), - linter.NewConfig(golinters.NewUnused(unusedCfg)). - WithSince("v1.20.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithAlternativeNames(megacheckName). - ConsiderSlow(). - WithChangeTypes(). - WithURL("https://github.com/dominikh/go-tools/tree/master/unused"), - linter.NewConfig(golinters.NewGosimple(gosimpleCfg)). - WithSince("v1.20.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle). - WithAlternativeNames(megacheckName). - WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), - - linter.NewConfig(golinters.NewStylecheck(stylecheckCfg)). - WithSince("v1.20.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), - linter.NewConfig(golinters.NewGosec(gosecCfg)). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs). - WithURL("https://github.com/securego/gosec"). - WithAlternativeNames("gas"), - linter.NewConfig(golinters.NewStructcheck()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/opennota/check"), - linter.NewConfig(golinters.NewVarcheck()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/opennota/check"), - linter.NewConfig(golinters.NewInterfacer()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/mvdan/interfacer"). - Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", ""), - linter.NewConfig(golinters.NewUnconvert()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/mdempsky/unconvert"), - linter.NewConfig(golinters.NewIneffassign()). - WithSince("v1.0.0"). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/gordonklaus/ineffassign"), - linter.NewConfig(golinters.NewDupl()). - WithSince("v1.0.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/mibk/dupl"), - linter.NewConfig(golinters.NewGoconst()). - WithSince("v1.0.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/jgautheron/goconst"), - linter.NewConfig(golinters.NewDeadcode()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"), - linter.NewConfig(golinters.NewGocyclo()). - WithSince("v1.0.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/fzipp/gocyclo"), - linter.NewConfig(golinters.NewCyclop(cyclopCfg)). - WithSince("v1.37.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/bkielbasa/cyclop"), - linter.NewConfig(golinters.NewGocognit()). - WithSince("v1.20.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/uudashr/gocognit"), - linter.NewConfig(golinters.NewTypecheck()). - WithSince("v1.3.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs). - WithURL(""), - linter.NewConfig(golinters.NewAsciicheck()). - WithSince("v1.26.0"). - WithPresets(linter.PresetBugs, linter.PresetStyle). - WithURL("https://github.com/tdakkota/asciicheck"), - - linter.NewConfig(golinters.NewGofmt()). - WithSince("v1.0.0"). - WithPresets(linter.PresetFormatting). - WithAutoFix(). - WithURL("https://golang.org/cmd/gofmt/"), - linter.NewConfig(golinters.NewGofumpt()). - WithSince("v1.28.0"). - WithPresets(linter.PresetFormatting). - WithAutoFix(). - WithURL("https://github.com/mvdan/gofumpt"), - linter.NewConfig(golinters.NewGoimports()). - WithSince("v1.20.0"). - WithPresets(linter.PresetFormatting, linter.PresetImport). - WithAutoFix(). - WithURL("https://godoc.org/golang.org/x/tools/cmd/goimports"), - linter.NewConfig(golinters.NewGoHeader()). - WithSince("v1.28.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/denis-tingajkin/go-header"), - linter.NewConfig(golinters.NewGci()). - WithSince("v1.30.0"). - WithPresets(linter.PresetFormatting, linter.PresetImport). - WithAutoFix(). - WithURL("https://github.com/daixiang0/gci"), - linter.NewConfig(golinters.NewMaligned()). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetPerformance). - WithURL("https://github.com/mdempsky/maligned"). - Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), - linter.NewConfig(golinters.NewDepguard()). - WithSince("v1.4.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). - WithURL("https://github.com/OpenPeeDeeP/depguard"), - linter.NewConfig(golinters.NewMisspell()). - WithSince("v1.8.0"). - WithPresets(linter.PresetStyle, linter.PresetComment). - WithAutoFix(). - WithURL("https://github.com/client9/misspell"), - linter.NewConfig(golinters.NewLLL()). - WithSince("v1.8.0"). - WithPresets(linter.PresetStyle), - linter.NewConfig(golinters.NewUnparam()). - WithSince("v1.9.0"). - WithPresets(linter.PresetUnused). - WithLoadForGoAnalysis(). - WithURL("https://github.com/mvdan/unparam"), - linter.NewConfig(golinters.NewDogsled()). - WithSince("v1.19.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/alexkohler/dogsled"), - linter.NewConfig(golinters.NewNakedret()). - WithSince("v1.19.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/alexkohler/nakedret"), - linter.NewConfig(golinters.NewPrealloc()). - WithSince("v1.19.0"). - WithPresets(linter.PresetPerformance). - WithURL("https://github.com/alexkohler/prealloc"), - linter.NewConfig(golinters.NewScopelint()). - WithSince("v1.12.0"). - WithPresets(linter.PresetBugs). - WithURL("https://github.com/kyoh86/scopelint"). - Deprecated("The repository of the linter has been deprecated by the owner.", "v1.39.0", "exportloopref"), - linter.NewConfig(golinters.NewGocritic()). - WithSince("v1.12.0"). - WithPresets(linter.PresetStyle, linter.PresetMetaLinter). - WithLoadForGoAnalysis(). - WithURL("https://github.com/go-critic/go-critic"), - linter.NewConfig(golinters.NewGochecknoinits()). - WithSince("v1.12.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/leighmcculloch/gochecknoinits"), - linter.NewConfig(golinters.NewGochecknoglobals()). - WithSince("v1.12.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/leighmcculloch/gochecknoglobals"), - linter.NewConfig(golinters.NewGodox()). - WithSince("v1.19.0"). - WithPresets(linter.PresetStyle, linter.PresetComment). - WithURL("https://github.com/matoous/godox"), - linter.NewConfig(golinters.NewFunlen()). - WithSince("v1.18.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/ultraware/funlen"), - linter.NewConfig(golinters.NewWhitespace()). - WithSince("v1.19.0"). - WithPresets(linter.PresetStyle). - WithAutoFix(). - WithURL("https://github.com/ultraware/whitespace"), - linter.NewConfig(golinters.NewWSL()). - WithSince("v1.20.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/bombsimon/wsl"), - linter.NewConfig(golinters.NewGoPrintfFuncName()). - WithSince("v1.23.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/jirfag/go-printf-func-name"), - linter.NewConfig(golinters.NewGoMND(m.cfg)). - WithSince("v1.22.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/tommy-muehle/go-mnd"), - linter.NewConfig(golinters.NewGoerr113()). - WithSince("v1.26.0"). - WithPresets(linter.PresetStyle, linter.PresetError). - WithLoadForGoAnalysis(). - WithURL("https://github.com/Djarvur/go-err113"), - linter.NewConfig(golinters.NewGomodguard()). - WithSince("v1.25.0"). - WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). - WithURL("https://github.com/ryancurrah/gomodguard"), - linter.NewConfig(golinters.NewGodot()). - WithSince("v1.25.0"). - WithPresets(linter.PresetStyle, linter.PresetComment). - WithAutoFix(). - WithURL("https://github.com/tetafro/godot"), - linter.NewConfig(golinters.NewTestpackage(testpackageCfg)). - WithSince("v1.25.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithURL("https://github.com/maratori/testpackage"), - linter.NewConfig(golinters.NewNestif()). - WithSince("v1.25.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/nakabonne/nestif"), - linter.NewConfig(golinters.NewExportLoopRef()). - WithSince("v1.28.0"). - WithPresets(linter.PresetBugs). - WithLoadForGoAnalysis(). - WithURL("https://github.com/kyoh86/exportloopref"), - linter.NewConfig(golinters.NewExhaustive(exhaustiveCfg)). - WithSince(" v1.28.0"). - WithPresets(linter.PresetBugs). - WithLoadForGoAnalysis(). - WithURL("https://github.com/nishanths/exhaustive"), - linter.NewConfig(golinters.NewSQLCloseCheck()). - WithSince("v1.28.0"). - WithPresets(linter.PresetBugs, linter.PresetSQL). - WithLoadForGoAnalysis(). - WithURL("https://github.com/ryanrolds/sqlclosecheck"), - linter.NewConfig(golinters.NewNLReturn()). - WithSince("v1.30.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/ssgreg/nlreturn"), - linter.NewConfig(golinters.NewWrapcheck(wrapcheckCfg)). - WithSince("v1.32.0"). - WithPresets(linter.PresetStyle, linter.PresetError). - WithLoadForGoAnalysis(). - WithURL("https://github.com/tomarrell/wrapcheck"), - linter.NewConfig(golinters.NewThelper(thelperCfg)). - WithSince("v1.34.0"). - WithPresets(linter.PresetStyle). - WithLoadForGoAnalysis(). - WithURL("https://github.com/kulti/thelper"), - linter.NewConfig(golinters.NewTparallel()). - WithSince("v1.32.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithLoadForGoAnalysis(). - WithURL("https://github.com/moricho/tparallel"), - linter.NewConfig(golinters.NewExhaustiveStruct(exhaustiveStructCfg)). - WithSince("v1.32.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithLoadForGoAnalysis(). - WithURL("https://github.com/mbilski/exhaustivestruct"), - linter.NewConfig(golinters.NewErrorLint(errorlintCfg)). - WithSince("v1.32.0"). - WithPresets(linter.PresetBugs, linter.PresetError). - WithLoadForGoAnalysis(). - WithURL("https://github.com/polyfloyd/go-errorlint"), - linter.NewConfig(golinters.NewParallelTest()). - WithSince("v1.33.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithURL("https://github.com/kunwardeep/paralleltest"), - linter.NewConfig(golinters.NewMakezero()). - WithSince("v1.34.0"). - WithPresets(linter.PresetStyle, linter.PresetBugs). - WithLoadForGoAnalysis(). - WithURL("https://github.com/ashanbrown/makezero"), - linter.NewConfig(golinters.NewForbidigo()). - WithSince("v1.34.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/ashanbrown/forbidigo"), - linter.NewConfig(golinters.NewIfshort(ifshortCfg)). - WithSince("v1.36.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/esimonov/ifshort"), - linter.NewConfig(golinters.NewPredeclared(predeclaredCfg)). - WithSince("v1.35.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/nishanths/predeclared"), - linter.NewConfig(golinters.NewRevive(reviveCfg)). - WithSince("v1.37.0"). - WithPresets(linter.PresetStyle, linter.PresetMetaLinter). - ConsiderSlow(). - WithURL("https://github.com/mgechev/revive"), - linter.NewConfig(golinters.NewDurationCheck()). - WithSince("v1.37.0"). - WithPresets(linter.PresetBugs). - WithLoadForGoAnalysis(). - WithURL("https://github.com/charithe/durationcheck"), - linter.NewConfig(golinters.NewWastedAssign()). - WithSince("v1.38.0"). - WithPresets(linter.PresetStyle). - WithLoadForGoAnalysis(). - WithURL("https://github.com/sanposhiho/wastedassign"), - linter.NewConfig(golinters.NewImportAs(importAsCfg)). - WithSince("v1.38.0"). - WithPresets(linter.PresetStyle). - WithLoadForGoAnalysis(). - WithURL("https://github.com/julz/importas"), - linter.NewConfig(golinters.NewNilErr()). - WithSince("v1.38.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs). - WithURL("https://github.com/gostaticanalysis/nilerr"), - linter.NewConfig(golinters.NewForceTypeAssert()). - WithSince("v1.38.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/gostaticanalysis/forcetypeassert"), - linter.NewConfig(golinters.NewGoModDirectives(goModDirectivesCfg)). - WithSince("v1.39.0"). - WithPresets(linter.PresetStyle, linter.PresetModule). - WithURL("https://github.com/ldez/gomoddirectives"), - linter.NewConfig(golinters.NewPromlinter()). - WithSince("v1.40.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/yeya24/promlinter"), - linter.NewConfig(golinters.NewTagliatelle(tagliatelleCfg)). - WithSince("v1.40.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/ldez/tagliatelle"), - - // nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives - linter.NewConfig(golinters.NewNoLintLint()). - WithSince("v1.26.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/golangci/golangci-lint/blob/master/pkg/golinters/nolintlint/README.md"), + if os.Getenv(logutils.EnvTestRun) == "1" { + m.verbosePrintLintersStatus(enabledLinters) } - enabledByDefault := map[string]bool{ - golinters.NewGovet(nil).Name(): true, - golinters.NewErrcheck().Name(): true, - golinters.NewStaticcheck(staticcheckCfg).Name(): true, - golinters.NewUnused(unusedCfg).Name(): true, - golinters.NewGosimple(gosimpleCfg).Name(): true, - golinters.NewStructcheck().Name(): true, - golinters.NewVarcheck().Name(): true, - golinters.NewIneffassign().Name(): true, - golinters.NewDeadcode().Name(): true, - golinters.NewTypecheck().Name(): true, - } - return enableLinterConfigs(lcs, func(lc *linter.Config) bool { - return enabledByDefault[lc.Name()] + return enabledLinters, nil +} + +// GetOptimizedLinters returns enabled linters after optimization (merging) of multiple linters into a fewer number of linters. +// E.g. some go/analysis linters can be optimized into one metalinter for data reuse and speed up. +func (m *Manager) GetOptimizedLinters() ([]*linter.Config, error) { + resultLintersSet := m.build(m.GetAllEnabledByDefaultLinters()) + m.verbosePrintLintersStatus(resultLintersSet) + + m.combineGoAnalysisLinters(resultLintersSet) + + resultLinters := maps.Values(resultLintersSet) + + // Make order of execution of linters (go/analysis metalinter and unused) stable. + sort.Slice(resultLinters, func(i, j int) bool { + a, b := resultLinters[i], resultLinters[j] + + if b.Name() == linter.LastLinter { + return true + } + + if a.Name() == linter.LastLinter { + return false + } + + if a.DoesChangeTypes != b.DoesChangeTypes { + return b.DoesChangeTypes // move type-changing linters to the end to optimize speed + } + return a.Name() < b.Name() }) + + return resultLinters, nil } -func (m Manager) GetAllEnabledByDefaultLinters() []*linter.Config { +func (m *Manager) GetAllEnabledByDefaultLinters() []*linter.Config { var ret []*linter.Config - for _, lc := range m.GetAllSupportedLinterConfigs() { + for _, lc := range m.linters { if lc.EnabledByDefault { ret = append(ret, lc) } @@ -537,77 +143,168 @@ func (m Manager) GetAllEnabledByDefaultLinters() []*linter.Config { return ret } -func linterConfigsToMap(lcs []*linter.Config) map[string]*linter.Config { - ret := map[string]*linter.Config{} - for _, lc := range lcs { - lc := lc // local copy - ret[lc.Name()] = lc +//nolint:gocyclo // the complexity cannot be reduced. +func (m *Manager) build(enabledByDefaultLinters []*linter.Config) map[string]*linter.Config { + m.debugf("Linters config: %#v", m.cfg.Linters) + + resultLintersSet := map[string]*linter.Config{} + switch { + case m.cfg.Linters.DisableAll: + // no default linters + case len(m.cfg.Linters.Presets) != 0: + // imply --disable-all + case m.cfg.Linters.EnableAll: + resultLintersSet = linterConfigsToMap(m.linters) + default: + resultLintersSet = linterConfigsToMap(enabledByDefaultLinters) } - return ret -} + // --presets can only add linters to default set + for _, p := range m.cfg.Linters.Presets { + for _, lc := range m.GetAllLinterConfigsForPreset(p) { + lc := lc + resultLintersSet[lc.Name()] = lc + } + } -func (m Manager) GetAllLinterConfigsForPreset(p string) []*linter.Config { - var ret []*linter.Config - for _, lc := range m.GetAllSupportedLinterConfigs() { - for _, ip := range lc.InPresets { - if p == ip { - ret = append(ret, lc) - break + // --fast removes slow linters from current set. + // It should be after --presets to be able to run only fast linters in preset. + // It should be before --enable and --disable to be able to enable or disable specific linter. + if m.cfg.Linters.Fast { + for name, lc := range resultLintersSet { + if lc.IsSlowLinter() { + delete(resultLintersSet, name) } } } - return ret -} + for _, name := range m.cfg.Linters.Enable { + for _, lc := range m.GetLinterConfigs(name) { + // it's important to use lc.Name() nor name because name can be alias + resultLintersSet[lc.Name()] = lc + } + } -func (m Manager) loadCustomLinterConfig(name string, settings config.CustomLinterSettings) (*linter.Config, error) { - analyzer, err := m.getAnalyzerPlugin(settings.Path) - if err != nil { - return nil, err + for _, name := range m.cfg.Linters.Disable { + for _, lc := range m.GetLinterConfigs(name) { + // it's important to use lc.Name() nor name because name can be alias + delete(resultLintersSet, lc.Name()) + } + } + + // typecheck is not a real linter and cannot be disabled. + if _, ok := resultLintersSet["typecheck"]; !ok && (m.cfg == nil || !m.cfg.InternalCmdTest) { + for _, lc := range m.GetLinterConfigs("typecheck") { + // it's important to use lc.Name() nor name because name can be alias + resultLintersSet[lc.Name()] = lc + } } - m.log.Infof("Loaded %s: %s", settings.Path, name) - customLinter := goanalysis.NewLinter( - name, - settings.Description, - analyzer.GetAnalyzers(), - nil).WithLoadMode(goanalysis.LoadModeTypesInfo) - linterConfig := linter.NewConfig(customLinter) - linterConfig.EnabledByDefault = true - linterConfig.IsSlow = false - linterConfig.WithURL(settings.OriginalURL) - return linterConfig, nil + + return resultLintersSet } -type AnalyzerPlugin interface { - GetAnalyzers() []*analysis.Analyzer +func (m *Manager) combineGoAnalysisLinters(linters map[string]*linter.Config) { + var goanalysisLinters []*goanalysis.Linter + goanalysisPresets := map[string]bool{} + for _, lc := range linters { + lnt, ok := lc.Linter.(*goanalysis.Linter) + if !ok { + continue + } + if lnt.LoadMode() == goanalysis.LoadModeWholeProgram { + // It's ineffective by CPU and memory to run whole-program and incremental analyzers at once. + continue + } + goanalysisLinters = append(goanalysisLinters, lnt) + for _, p := range lc.InPresets { + goanalysisPresets[p] = true + } + } + + if len(goanalysisLinters) <= 1 { + m.debugf("Didn't combine go/analysis linters: got only %d linters", len(goanalysisLinters)) + return + } + + for _, lnt := range goanalysisLinters { + delete(linters, lnt.Name()) + } + + // Make order of execution of go/analysis analyzers stable. + sort.Slice(goanalysisLinters, func(i, j int) bool { + a, b := goanalysisLinters[i], goanalysisLinters[j] + + if b.Name() == linter.LastLinter { + return true + } + + if a.Name() == linter.LastLinter { + return false + } + + return a.Name() <= b.Name() + }) + + ml := goanalysis.NewMetaLinter(goanalysisLinters) + + presets := maps.Keys(goanalysisPresets) + sort.Strings(presets) + + mlConfig := &linter.Config{ + Linter: ml, + EnabledByDefault: false, + InPresets: presets, + AlternativeNames: nil, + OriginalURL: "", + } + + mlConfig = mlConfig.WithLoadForGoAnalysis() + + linters[ml.Name()] = mlConfig + m.debugf("Combined %d go/analysis linters into one metalinter", len(goanalysisLinters)) } -func (m Manager) getAnalyzerPlugin(path string) (AnalyzerPlugin, error) { - if !filepath.IsAbs(path) { - // resolve non-absolute paths relative to config file's directory - configFilePath := viper.ConfigFileUsed() - absConfigFilePath, err := filepath.Abs(configFilePath) - if err != nil { - return nil, fmt.Errorf("could not get absolute representation of config file path %q: %v", configFilePath, err) +func (m *Manager) verbosePrintLintersStatus(lcs map[string]*linter.Config) { + var linterNames []string + for _, lc := range lcs { + if lc.Internal { + continue } - path = filepath.Join(filepath.Dir(absConfigFilePath), path) + + linterNames = append(linterNames, lc.Name()) } + sort.Strings(linterNames) + m.log.Infof("Active %d linters: %s", len(linterNames), linterNames) - plug, err := plugin.Open(path) - if err != nil { - return nil, err + if len(m.cfg.Linters.Presets) != 0 { + sort.Strings(m.cfg.Linters.Presets) + m.log.Infof("Active presets: %s", m.cfg.Linters.Presets) } +} - symbol, err := plug.Lookup("AnalyzerPlugin") - if err != nil { - return nil, err +func AllPresets() []string { + return []string{ + linter.PresetBugs, + linter.PresetComment, + linter.PresetComplexity, + linter.PresetError, + linter.PresetFormatting, + linter.PresetImport, + linter.PresetMetaLinter, + linter.PresetModule, + linter.PresetPerformance, + linter.PresetSQL, + linter.PresetStyle, + linter.PresetTest, + linter.PresetUnused, } +} - analyzerPlugin, ok := symbol.(AnalyzerPlugin) - if !ok { - return nil, fmt.Errorf("plugin %s does not abide by 'AnalyzerPlugin' interface", path) +func linterConfigsToMap(lcs []*linter.Config) map[string]*linter.Config { + ret := map[string]*linter.Config{} + for _, lc := range lcs { + ret[lc.Name()] = lc } - return analyzerPlugin, nil + return ret } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go index ed731a9686..364d5082a5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go @@ -1,7 +1,9 @@ package lintersdb import ( + "errors" "fmt" + "slices" "strings" "github.com/golangci/golangci-lint/pkg/config" @@ -12,16 +14,36 @@ type Validator struct { } func NewValidator(m *Manager) *Validator { - return &Validator{ - m: m, + return &Validator{m: m} +} + +// Validate validates the configuration by calling all other validators for different +// sections in the configuration and then some additional linter validation functions. +func (v Validator) Validate(cfg *config.Config) error { + err := cfg.Validate() + if err != nil { + return err + } + + validators := []func(cfg *config.Linters) error{ + v.validateLintersNames, + v.validatePresets, + } + + for _, v := range validators { + if err := v(&cfg.Linters); err != nil { + return err + } } + + return nil } func (v Validator) validateLintersNames(cfg *config.Linters) error { allNames := append([]string{}, cfg.Enable...) allNames = append(allNames, cfg.Disable...) - unknownNames := []string{} + var unknownNames []string for _, name := range allNames { if v.m.GetLinterConfigs(name) == nil { @@ -30,7 +52,7 @@ func (v Validator) validateLintersNames(cfg *config.Linters) error { } if len(unknownNames) > 0 { - return fmt.Errorf("unknown linters: '%v', run 'golangci-lint linters' to see the list of supported linters", + return fmt.Errorf("unknown linters: '%v', run 'golangci-lint help linters' to see the list of supported linters", strings.Join(unknownNames, ",")) } @@ -38,69 +60,17 @@ func (v Validator) validateLintersNames(cfg *config.Linters) error { } func (v Validator) validatePresets(cfg *config.Linters) error { - allPresets := v.m.allPresetsSet() + presets := AllPresets() + for _, p := range cfg.Presets { - if !allPresets[p] { + if !slices.Contains(presets, p) { return fmt.Errorf("no such preset %q: only next presets exist: (%s)", - p, strings.Join(v.m.AllPresets(), "|")) + p, strings.Join(presets, "|")) } } if len(cfg.Presets) != 0 && cfg.EnableAll { - return fmt.Errorf("--presets is incompatible with --enable-all") - } - - return nil -} - -func (v Validator) validateAllDisableEnableOptions(cfg *config.Linters) error { - if cfg.EnableAll && cfg.DisableAll { - return fmt.Errorf("--enable-all and --disable-all options must not be combined") - } - - if cfg.DisableAll { - if len(cfg.Enable) == 0 && len(cfg.Presets) == 0 { - return fmt.Errorf("all linters were disabled, but no one linter was enabled: must enable at least one") - } - - if len(cfg.Disable) != 0 { - return fmt.Errorf("can't combine options --disable-all and --disable %s", cfg.Disable[0]) - } - } - - if cfg.EnableAll && len(cfg.Enable) != 0 && !cfg.Fast { - return fmt.Errorf("can't combine options --enable-all and --enable %s", cfg.Enable[0]) - } - - return nil -} - -func (v Validator) validateDisabledAndEnabledAtOneMoment(cfg *config.Linters) error { - enabledLintersSet := map[string]bool{} - for _, name := range cfg.Enable { - enabledLintersSet[name] = true - } - - for _, name := range cfg.Disable { - if enabledLintersSet[name] { - return fmt.Errorf("linter %q can't be disabled and enabled at one moment", name) - } - } - - return nil -} - -func (v Validator) validateEnabledDisabledLintersConfig(cfg *config.Linters) error { - validators := []func(cfg *config.Linters) error{ - v.validateLintersNames, - v.validatePresets, - v.validateAllDisableEnableOptions, - v.validateDisabledAndEnabledAtOneMoment, - } - for _, v := range validators { - if err := v(cfg); err != nil { - return err - } + return errors.New("--presets is incompatible with --enable-all") } return nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go deleted file mode 100644 index 69852afb98..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go +++ /dev/null @@ -1,315 +0,0 @@ -package lint - -import ( - "context" - "fmt" - "go/build" - "go/token" - "os" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/pkg/errors" - "golang.org/x/tools/go/packages" - - "github.com/golangci/golangci-lint/internal/pkgcache" - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/exitcodes" - "github.com/golangci/golangci-lint/pkg/fsutils" - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" - "github.com/golangci/golangci-lint/pkg/goutil" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/logutils" -) - -type ContextLoader struct { - cfg *config.Config - log logutils.Log - debugf logutils.DebugFunc - goenv *goutil.Env - pkgTestIDRe *regexp.Regexp - lineCache *fsutils.LineCache - fileCache *fsutils.FileCache - pkgCache *pkgcache.Cache - loadGuard *load.Guard -} - -func NewContextLoader(cfg *config.Config, log logutils.Log, goenv *goutil.Env, - lineCache *fsutils.LineCache, fileCache *fsutils.FileCache, pkgCache *pkgcache.Cache, loadGuard *load.Guard) *ContextLoader { - return &ContextLoader{ - cfg: cfg, - log: log, - debugf: logutils.Debug("loader"), - goenv: goenv, - pkgTestIDRe: regexp.MustCompile(`^(.*) \[(.*)\.test\]`), - lineCache: lineCache, - fileCache: fileCache, - pkgCache: pkgCache, - loadGuard: loadGuard, - } -} - -func (cl *ContextLoader) prepareBuildContext() { - // Set GOROOT to have working cross-compilation: cross-compiled binaries - // have invalid GOROOT. XXX: can't use runtime.GOROOT(). - goroot := cl.goenv.Get(goutil.EnvGoRoot) - if goroot == "" { - return - } - - os.Setenv("GOROOT", goroot) - build.Default.GOROOT = goroot - build.Default.BuildTags = cl.cfg.Run.BuildTags -} - -func (cl *ContextLoader) findLoadMode(linters []*linter.Config) packages.LoadMode { - loadMode := packages.LoadMode(0) - for _, lc := range linters { - loadMode |= lc.LoadMode - } - - return loadMode -} - -func (cl *ContextLoader) buildArgs() []string { - args := cl.cfg.Run.Args - if len(args) == 0 { - return []string{"./..."} - } - - var retArgs []string - for _, arg := range args { - if strings.HasPrefix(arg, ".") || filepath.IsAbs(arg) { - retArgs = append(retArgs, arg) - } else { - // go/packages doesn't work well if we don't have prefix ./ for local packages - retArgs = append(retArgs, fmt.Sprintf(".%c%s", filepath.Separator, arg)) - } - } - - return retArgs -} - -func (cl *ContextLoader) makeBuildFlags() ([]string, error) { - var buildFlags []string - - if len(cl.cfg.Run.BuildTags) != 0 { - // go help build - buildFlags = append(buildFlags, "-tags", strings.Join(cl.cfg.Run.BuildTags, " ")) - cl.log.Infof("Using build tags: %v", cl.cfg.Run.BuildTags) - } - - mod := cl.cfg.Run.ModulesDownloadMode - if mod != "" { - // go help modules - allowedMods := []string{"mod", "readonly", "vendor"} - var ok bool - for _, am := range allowedMods { - if am == mod { - ok = true - break - } - } - if !ok { - return nil, fmt.Errorf("invalid modules download path %s, only (%s) allowed", mod, strings.Join(allowedMods, "|")) - } - - buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", cl.cfg.Run.ModulesDownloadMode)) - } - - return buildFlags, nil -} - -func stringifyLoadMode(mode packages.LoadMode) string { - m := map[packages.LoadMode]string{ - packages.NeedCompiledGoFiles: "compiled_files", - packages.NeedDeps: "deps", - packages.NeedExportsFile: "exports_file", - packages.NeedFiles: "files", - packages.NeedImports: "imports", - packages.NeedName: "name", - packages.NeedSyntax: "syntax", - packages.NeedTypes: "types", - packages.NeedTypesInfo: "types_info", - packages.NeedTypesSizes: "types_sizes", - } - - var flags []string - for flag, flagStr := range m { - if mode&flag != 0 { - flags = append(flags, flagStr) - } - } - - return fmt.Sprintf("%d (%s)", mode, strings.Join(flags, "|")) -} - -func (cl *ContextLoader) debugPrintLoadedPackages(pkgs []*packages.Package) { - cl.debugf("loaded %d pkgs", len(pkgs)) - for i, pkg := range pkgs { - var syntaxFiles []string - for _, sf := range pkg.Syntax { - syntaxFiles = append(syntaxFiles, pkg.Fset.Position(sf.Pos()).Filename) - } - cl.debugf("Loaded pkg #%d: ID=%s GoFiles=%s CompiledGoFiles=%s Syntax=%s", - i, pkg.ID, pkg.GoFiles, pkg.CompiledGoFiles, syntaxFiles) - } -} - -func (cl *ContextLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) error { - for _, pkg := range pkgs { - for _, err := range pkg.Errors { - if strings.Contains(err.Msg, "no Go files") { - return errors.Wrapf(exitcodes.ErrNoGoFiles, "package %s", pkg.PkgPath) - } - if strings.Contains(err.Msg, "cannot find package") { - // when analyzing not existing directory - return errors.Wrap(exitcodes.ErrFailure, err.Msg) - } - } - } - - return nil -} - -func (cl *ContextLoader) loadPackages(ctx context.Context, loadMode packages.LoadMode) ([]*packages.Package, error) { - defer func(startedAt time.Time) { - cl.log.Infof("Go packages loading at mode %s took %s", stringifyLoadMode(loadMode), time.Since(startedAt)) - }(time.Now()) - - cl.prepareBuildContext() - - buildFlags, err := cl.makeBuildFlags() - if err != nil { - return nil, errors.Wrap(err, "failed to make build flags for go list") - } - - conf := &packages.Config{ - Mode: loadMode, - Tests: cl.cfg.Run.AnalyzeTests, - Context: ctx, - BuildFlags: buildFlags, - Logf: cl.debugf, - //TODO: use fset, parsefile, overlay - } - - args := cl.buildArgs() - cl.debugf("Built loader args are %s", args) - pkgs, err := packages.Load(conf, args...) - if err != nil { - return nil, errors.Wrap(err, "failed to load with go/packages") - } - - // Currently, go/packages doesn't guarantee that error will be returned - // if context was canceled. See - // https://github.com/golang/tools/commit/c5cec6710e927457c3c29d6c156415e8539a5111#r39261855 - if ctx.Err() != nil { - return nil, errors.Wrap(ctx.Err(), "timed out to load packages") - } - - if loadMode&packages.NeedSyntax == 0 { - // Needed e.g. for go/analysis loading. - fset := token.NewFileSet() - packages.Visit(pkgs, nil, func(pkg *packages.Package) { - pkg.Fset = fset - cl.loadGuard.AddMutexForPkg(pkg) - }) - } - - cl.debugPrintLoadedPackages(pkgs) - - if err := cl.parseLoadedPackagesErrors(pkgs); err != nil { - return nil, err - } - - return cl.filterTestMainPackages(pkgs), nil -} - -func (cl *ContextLoader) tryParseTestPackage(pkg *packages.Package) (name string, isTest bool) { - matches := cl.pkgTestIDRe.FindStringSubmatch(pkg.ID) - if matches == nil { - return "", false - } - - return matches[1], true -} - -func (cl *ContextLoader) filterTestMainPackages(pkgs []*packages.Package) []*packages.Package { - var retPkgs []*packages.Package - for _, pkg := range pkgs { - if pkg.Name == "main" && strings.HasSuffix(pkg.PkgPath, ".test") { - // it's an implicit testmain package - cl.debugf("skip pkg ID=%s", pkg.ID) - continue - } - - retPkgs = append(retPkgs, pkg) - } - - return retPkgs -} - -func (cl *ContextLoader) filterDuplicatePackages(pkgs []*packages.Package) []*packages.Package { - packagesWithTests := map[string]bool{} - for _, pkg := range pkgs { - name, isTest := cl.tryParseTestPackage(pkg) - if !isTest { - continue - } - packagesWithTests[name] = true - } - - cl.debugf("package with tests: %#v", packagesWithTests) - - var retPkgs []*packages.Package - for _, pkg := range pkgs { - _, isTest := cl.tryParseTestPackage(pkg) - if !isTest && packagesWithTests[pkg.PkgPath] { - // If tests loading is enabled, - // for package with files a.go and a_test.go go/packages loads two packages: - // 1. ID=".../a" GoFiles=[a.go] - // 2. ID=".../a [.../a.test]" GoFiles=[a.go a_test.go] - // We need only the second package, otherwise we can get warnings about unused variables/fields/functions - // in a.go if they are used only in a_test.go. - cl.debugf("skip pkg ID=%s because we load it with test package", pkg.ID) - continue - } - - retPkgs = append(retPkgs, pkg) - } - - return retPkgs -} - -func (cl *ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*linter.Context, error) { - loadMode := cl.findLoadMode(linters) - pkgs, err := cl.loadPackages(ctx, loadMode) - if err != nil { - return nil, errors.Wrap(err, "failed to load packages") - } - - deduplicatedPkgs := cl.filterDuplicatePackages(pkgs) - - if len(deduplicatedPkgs) == 0 { - return nil, exitcodes.ErrNoGoFiles - } - - ret := &linter.Context{ - Packages: deduplicatedPkgs, - - // At least `unused` linters works properly only on original (not deduplicated) packages, - // see https://github.com/golangci/golangci-lint/pull/585. - OriginalPackages: pkgs, - - Cfg: cl.cfg, - Log: cl.log, - FileCache: cl.fileCache, - LineCache: cl.lineCache, - PkgCache: cl.pkgCache, - LoadGuard: cl.loadGuard, - } - - return ret, nil -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go new file mode 100644 index 0000000000..e8222a4554 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go @@ -0,0 +1,281 @@ +package lint + +import ( + "context" + "fmt" + "go/build" + "go/token" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/goutil" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +// PackageLoader loads packages based on [golang.org/x/tools/go/packages.Load]. +type PackageLoader struct { + log logutils.Log + debugf logutils.DebugFunc + + cfg *config.Config + + args []string + + pkgTestIDRe *regexp.Regexp + + goenv *goutil.Env + + loadGuard *load.Guard +} + +// NewPackageLoader creates a new PackageLoader. +func NewPackageLoader(log logutils.Log, cfg *config.Config, args []string, goenv *goutil.Env, loadGuard *load.Guard) *PackageLoader { + return &PackageLoader{ + cfg: cfg, + args: args, + log: log, + debugf: logutils.Debug(logutils.DebugKeyLoader), + goenv: goenv, + pkgTestIDRe: regexp.MustCompile(`^(.*) \[(.*)\.test\]`), + loadGuard: loadGuard, + } +} + +// Load loads packages. +func (l *PackageLoader) Load(ctx context.Context, linters []*linter.Config) (pkgs, deduplicatedPkgs []*packages.Package, err error) { + loadMode := findLoadMode(linters) + + pkgs, err = l.loadPackages(ctx, loadMode) + if err != nil { + return nil, nil, fmt.Errorf("failed to load packages: %w", err) + } + + return pkgs, l.filterDuplicatePackages(pkgs), nil +} + +func (l *PackageLoader) loadPackages(ctx context.Context, loadMode packages.LoadMode) ([]*packages.Package, error) { + defer func(startedAt time.Time) { + l.log.Infof("Go packages loading at mode %s took %s", stringifyLoadMode(loadMode), time.Since(startedAt)) + }(time.Now()) + + l.prepareBuildContext() + + conf := &packages.Config{ + Mode: loadMode, + Tests: l.cfg.Run.AnalyzeTests, + Context: ctx, + BuildFlags: l.makeBuildFlags(), + Logf: l.debugf, + // TODO: use fset, parsefile, overlay + } + + args := l.buildArgs() + l.debugf("Built loader args are %s", args) + pkgs, err := packages.Load(conf, args...) + if err != nil { + return nil, fmt.Errorf("failed to load with go/packages: %w", err) + } + + if loadMode&packages.NeedSyntax == 0 { + // Needed e.g. for go/analysis loading. + fset := token.NewFileSet() + packages.Visit(pkgs, nil, func(pkg *packages.Package) { + pkg.Fset = fset + l.loadGuard.AddMutexForPkg(pkg) + }) + } + + l.debugPrintLoadedPackages(pkgs) + + if err := l.parseLoadedPackagesErrors(pkgs); err != nil { + return nil, err + } + + return l.filterTestMainPackages(pkgs), nil +} + +func (l *PackageLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) error { + for _, pkg := range pkgs { + var errs []packages.Error + for _, err := range pkg.Errors { + // quick fix: skip error related to `go list` invocation by packages.Load() + // The behavior has been changed between go1.19 and go1.20, the error is now inside the JSON content. + // https://github.com/golangci/golangci-lint/pull/3414#issuecomment-1364756303 + if strings.Contains(err.Msg, "# command-line-arguments") { + continue + } + + errs = append(errs, err) + + if strings.Contains(err.Msg, "no Go files") { + return fmt.Errorf("package %s: %w", pkg.PkgPath, exitcodes.ErrNoGoFiles) + } + if strings.Contains(err.Msg, "cannot find package") { + // when analyzing not existing directory + return fmt.Errorf("%v: %w", err.Msg, exitcodes.ErrFailure) + } + } + + pkg.Errors = errs + } + + return nil +} + +func (l *PackageLoader) tryParseTestPackage(pkg *packages.Package) (name string, isTest bool) { + matches := l.pkgTestIDRe.FindStringSubmatch(pkg.ID) + if matches == nil { + return "", false + } + + return matches[1], true +} + +func (l *PackageLoader) filterDuplicatePackages(pkgs []*packages.Package) []*packages.Package { + packagesWithTests := map[string]bool{} + for _, pkg := range pkgs { + name, isTest := l.tryParseTestPackage(pkg) + if !isTest { + continue + } + packagesWithTests[name] = true + } + + l.debugf("package with tests: %#v", packagesWithTests) + + var retPkgs []*packages.Package + for _, pkg := range pkgs { + _, isTest := l.tryParseTestPackage(pkg) + if !isTest && packagesWithTests[pkg.PkgPath] { + // If tests loading is enabled, + // for package with files a.go and a_test.go go/packages loads two packages: + // 1. ID=".../a" GoFiles=[a.go] + // 2. ID=".../a [.../a.test]" GoFiles=[a.go a_test.go] + // We need only the second package, otherwise we can get warnings about unused variables/fields/functions + // in a.go if they are used only in a_test.go. + l.debugf("skip pkg ID=%s because we load it with test package", pkg.ID) + continue + } + + retPkgs = append(retPkgs, pkg) + } + + return retPkgs +} + +func (l *PackageLoader) filterTestMainPackages(pkgs []*packages.Package) []*packages.Package { + var retPkgs []*packages.Package + for _, pkg := range pkgs { + if pkg.Name == "main" && strings.HasSuffix(pkg.PkgPath, ".test") { + // it's an implicit testmain package + l.debugf("skip pkg ID=%s", pkg.ID) + continue + } + + retPkgs = append(retPkgs, pkg) + } + + return retPkgs +} + +func (l *PackageLoader) debugPrintLoadedPackages(pkgs []*packages.Package) { + l.debugf("loaded %d pkgs", len(pkgs)) + for i, pkg := range pkgs { + var syntaxFiles []string + for _, sf := range pkg.Syntax { + syntaxFiles = append(syntaxFiles, pkg.Fset.Position(sf.Pos()).Filename) + } + l.debugf("Loaded pkg #%d: ID=%s GoFiles=%s CompiledGoFiles=%s Syntax=%s", + i, pkg.ID, pkg.GoFiles, pkg.CompiledGoFiles, syntaxFiles) + } +} + +func (l *PackageLoader) prepareBuildContext() { + // Set GOROOT to have working cross-compilation: cross-compiled binaries + // have invalid GOROOT. XXX: can't use runtime.GOROOT(). + goroot := l.goenv.Get(goutil.EnvGoRoot) + if goroot == "" { + return + } + + os.Setenv(string(goutil.EnvGoRoot), goroot) + build.Default.GOROOT = goroot + build.Default.BuildTags = l.cfg.Run.BuildTags +} + +func (l *PackageLoader) buildArgs() []string { + if len(l.args) == 0 { + return []string{"./..."} + } + + var retArgs []string + for _, arg := range l.args { + if strings.HasPrefix(arg, ".") || filepath.IsAbs(arg) { + retArgs = append(retArgs, arg) + } else { + // go/packages doesn't work well if we don't have the prefix ./ for local packages + retArgs = append(retArgs, fmt.Sprintf(".%c%s", filepath.Separator, arg)) + } + } + + return retArgs +} + +func (l *PackageLoader) makeBuildFlags() []string { + var buildFlags []string + + if len(l.cfg.Run.BuildTags) != 0 { + // go help build + buildFlags = append(buildFlags, "-tags", strings.Join(l.cfg.Run.BuildTags, " ")) + l.log.Infof("Using build tags: %v", l.cfg.Run.BuildTags) + } + + if l.cfg.Run.ModulesDownloadMode != "" { + // go help modules + buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", l.cfg.Run.ModulesDownloadMode)) + } + + return buildFlags +} + +func findLoadMode(linters []*linter.Config) packages.LoadMode { + loadMode := packages.LoadMode(0) + for _, lc := range linters { + loadMode |= lc.LoadMode + } + + return loadMode +} + +func stringifyLoadMode(mode packages.LoadMode) string { + m := map[packages.LoadMode]string{ + packages.NeedCompiledGoFiles: "compiled_files", + packages.NeedDeps: "deps", + packages.NeedExportFile: "exports_file", + packages.NeedFiles: "files", + packages.NeedImports: "imports", + packages.NeedName: "name", + packages.NeedSyntax: "syntax", + packages.NeedTypes: "types", + packages.NeedTypesInfo: "types_info", + packages.NeedTypesSizes: "types_sizes", + } + + var flags []string + for flag, flagStr := range m { + if mode&flag != 0 { + flags = append(flags, flagStr) + } + } + + return fmt.Sprintf("%d (%s)", mode, strings.Join(flags, "|")) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index 8882b93009..b1d604c964 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -2,13 +2,11 @@ package lint import ( "context" + "errors" "fmt" "runtime/debug" "strings" - "github.com/pkg/errors" - gopackages "golang.org/x/tools/go/packages" - "github.com/golangci/golangci-lint/internal/errorutil" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" @@ -22,46 +20,45 @@ import ( "github.com/golangci/golangci-lint/pkg/timeutils" ) +type processorStat struct { + inCount int + outCount int +} + type Runner struct { + Log logutils.Log + + lintCtx *linter.Context Processors []processors.Processor - Log logutils.Log } -func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lintersdb.EnabledSet, - lineCache *fsutils.LineCache, dbManager *lintersdb.Manager, pkgs []*gopackages.Package) (*Runner, error) { - skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles) +func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *goutil.Env, + lineCache *fsutils.LineCache, fileCache *fsutils.FileCache, + dbManager *lintersdb.Manager, lintCtx *linter.Context, +) (*Runner, error) { + // Beware that some processors need to add the path prefix when working with paths + // because they get invoked before the path prefixer (exclude and severity rules) + // or process other paths (skip files). + files := fsutils.NewFiles(lineCache, cfg.Output.PathPrefix) + + skipFilesProcessor, err := processors.NewSkipFiles(cfg.Issues.ExcludeFiles, cfg.Output.PathPrefix) if err != nil { return nil, err } - skipDirs := cfg.Run.SkipDirs - if cfg.Run.UseDefaultSkipDirs { + skipDirs := cfg.Issues.ExcludeDirs + if cfg.Issues.UseDefaultExcludeDirs { skipDirs = append(skipDirs, packages.StdExcludeDirRegexps...) } - skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child("skip dirs"), cfg.Run.Args) + + skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child(logutils.DebugKeySkipDirs), args, cfg.Output.PathPrefix) if err != nil { return nil, err } - enabledLinters, err := es.GetEnabledLintersMap() + enabledLinters, err := dbManager.GetEnabledLintersMap() if err != nil { - return nil, errors.Wrap(err, "failed to get enabled linters") - } - - // print deprecated messages - if !cfg.InternalCmdTest { - for name, lc := range enabledLinters { - if !lc.IsDeprecated() { - continue - } - - var extra string - if lc.Deprecation.Replacement != "" { - extra = fmt.Sprintf(" Replaced by %s.", lc.Deprecation.Replacement) - } - - log.Warnf("The linter '%s' is deprecated (since %s) due to: %s %s", name, lc.Deprecation.Since, lc.Deprecation.Message, extra) - } + return nil, fmt.Errorf("failed to get enabled linters: %w", err) } return &Runner{ @@ -69,39 +66,76 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint processors.NewCgo(goenv), // Must go after Cgo. - processors.NewFilenameUnadjuster(pkgs, log.Child("filename_unadjuster")), + processors.NewFilenameUnadjuster(lintCtx.Packages, log.Child(logutils.DebugKeyFilenameUnadjuster)), + + // Must go after FilenameUnadjuster. + processors.NewInvalidIssue(log.Child(logutils.DebugKeyInvalidIssue)), // Must be before diff, nolint and exclude autogenerated processor at least. processors.NewPathPrettifier(), skipFilesProcessor, skipDirsProcessor, // must be after path prettifier - processors.NewAutogeneratedExclude(), + processors.NewAutogeneratedExclude(cfg.Issues.ExcludeGeneratedStrict), // Must be before exclude because users see already marked output and configure excluding by it. processors.NewIdentifierMarker(), getExcludeProcessor(&cfg.Issues), - getExcludeRulesProcessor(&cfg.Issues, log, lineCache), - processors.NewNolint(log.Child("nolint"), dbManager, enabledLinters), + getExcludeRulesProcessor(&cfg.Issues, log, files), + processors.NewNolint(log.Child(logutils.DebugKeyNolint), dbManager, enabledLinters), processors.NewUniqByLine(cfg), - processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath), + processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath, cfg.Issues.WholeFiles), processors.NewMaxPerFileFromLinter(cfg), - processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child("max_same_issues"), cfg), - processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child("max_from_linter"), cfg), - processors.NewSourceCode(lineCache, log.Child("source_code")), + processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child(logutils.DebugKeyMaxSameIssues), cfg), + processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child(logutils.DebugKeyMaxFromLinter), cfg), + processors.NewSourceCode(lineCache, log.Child(logutils.DebugKeySourceCode)), processors.NewPathShortener(), - getSeverityRulesProcessor(&cfg.Severity, log, lineCache), + getSeverityRulesProcessor(&cfg.Severity, log, files), + + // The fixer still needs to see paths for the issues that are relative to the current directory. + processors.NewFixer(cfg, log, fileCache), + + // Now we can modify the issues for output. processors.NewPathPrefixer(cfg.Output.PathPrefix), processors.NewSortResults(cfg), }, - Log: log, + lintCtx: lintCtx, + Log: log, }, nil } +func (r *Runner) Run(ctx context.Context, linters []*linter.Config) ([]result.Issue, error) { + sw := timeutils.NewStopwatch("linters", r.Log) + defer sw.Print() + + var ( + lintErrors error + issues []result.Issue + ) + + for _, lc := range linters { + lc := lc + sw.TrackStage(lc.Name(), func() { + linterIssues, err := r.runLinterSafe(ctx, r.lintCtx, lc) + if err != nil { + lintErrors = errors.Join(lintErrors, fmt.Errorf("can't run linter %s", lc.Linter.Name()), err) + r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err) + + return + } + + issues = append(issues, linterIssues...) + }) + } + + return r.processLintResults(issues), lintErrors +} + func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, - lc *linter.Config) (ret []result.Issue, err error) { + lc *linter.Config, +) (ret []result.Issue, err error) { defer func() { if panicData := recover(); panicData != nil { if pe, ok := panicData.(*errorutil.PanicError); ok { @@ -123,7 +157,7 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, // which affects to the next analysis. // To avoid this issue, we clear type information from the packages. // See https://github.com/golangci/golangci-lint/pull/944. - // Currently DoesChangeTypes is true only for `unused`. + // Currently, DoesChangeTypes is true only for `unused`. lintCtx.ClearTypesInPackages() } @@ -140,12 +174,7 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, return issues, nil } -type processorStat struct { - inCount int - outCount int -} - -func (r Runner) processLintResults(inIssues []result.Issue) []result.Issue { +func (r *Runner) processLintResults(inIssues []result.Issue) []result.Issue { sw := timeutils.NewStopwatch("processing", r.Log) var issuesBefore, issuesAfter int @@ -176,7 +205,7 @@ func (r Runner) processLintResults(inIssues []result.Issue) []result.Issue { return outIssues } -func (r Runner) printPerProcessorStat(stat map[string]processorStat) { +func (r *Runner) printPerProcessorStat(stat map[string]processorStat) { parts := make([]string, 0, len(stat)) for name, ps := range stat { if ps.inCount != 0 { @@ -188,26 +217,6 @@ func (r Runner) printPerProcessorStat(stat map[string]processorStat) { } } -func (r Runner) Run(ctx context.Context, linters []*linter.Config, lintCtx *linter.Context) ([]result.Issue, error) { - sw := timeutils.NewStopwatch("linters", r.Log) - defer sw.Print() - - var issues []result.Issue - for _, lc := range linters { - lc := lc - sw.TrackStage(lc.Name(), func() { - linterIssues, err := r.runLinterSafe(ctx, lintCtx, lc) - if err != nil { - r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err) - return - } - issues = append(issues, linterIssues...) - }) - } - - return r.processLintResults(issues), nil -} - func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, statPerProcessor map[string]processorStat) []result.Issue { for _, p := range r.Processors { var newIssues []result.Issue @@ -236,31 +245,27 @@ func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, s } func getExcludeProcessor(cfg *config.Issues) processors.Processor { - var excludeTotalPattern string - - if len(cfg.ExcludePatterns) != 0 { - excludeTotalPattern = fmt.Sprintf("(%s)", strings.Join(cfg.ExcludePatterns, "|")) + opts := processors.ExcludeOptions{ + CaseSensitive: cfg.ExcludeCaseSensitive, } - var excludeProcessor processors.Processor - if cfg.ExcludeCaseSensitive { - excludeProcessor = processors.NewExcludeCaseSensitive(excludeTotalPattern) - } else { - excludeProcessor = processors.NewExclude(excludeTotalPattern) + if len(cfg.ExcludePatterns) != 0 { + opts.Pattern = fmt.Sprintf("(%s)", strings.Join(cfg.ExcludePatterns, "|")) } - return excludeProcessor + return processors.NewExclude(opts) } -func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { +func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, files *fsutils.Files) processors.Processor { var excludeRules []processors.ExcludeRule for _, r := range cfg.ExcludeRules { excludeRules = append(excludeRules, processors.ExcludeRule{ BaseRule: processors.BaseRule{ - Text: r.Text, - Source: r.Source, - Path: r.Path, - Linters: r.Linters, + Text: r.Text, + Source: r.Source, + Path: r.Path, + PathExcept: r.PathExcept, + Linters: r.Linters, }, }) } @@ -276,54 +281,34 @@ func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *f } } - var excludeRulesProcessor processors.Processor - if cfg.ExcludeCaseSensitive { - excludeRulesProcessor = processors.NewExcludeRulesCaseSensitive( - excludeRules, - lineCache, - log.Child("exclude_rules"), - ) - } else { - excludeRulesProcessor = processors.NewExcludeRules( - excludeRules, - lineCache, - log.Child("exclude_rules"), - ) + opts := processors.ExcludeRulesOptions{ + Rules: excludeRules, + CaseSensitive: cfg.ExcludeCaseSensitive, } - return excludeRulesProcessor + return processors.NewExcludeRules(log.Child(logutils.DebugKeyExcludeRules), files, opts) } -func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { +func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, files *fsutils.Files) processors.Processor { var severityRules []processors.SeverityRule for _, r := range cfg.Rules { severityRules = append(severityRules, processors.SeverityRule{ Severity: r.Severity, BaseRule: processors.BaseRule{ - Text: r.Text, - Source: r.Source, - Path: r.Path, - Linters: r.Linters, + Text: r.Text, + Source: r.Source, + Path: r.Path, + PathExcept: r.PathExcept, + Linters: r.Linters, }, }) } - var severityRulesProcessor processors.Processor - if cfg.CaseSensitive { - severityRulesProcessor = processors.NewSeverityRulesCaseSensitive( - cfg.Default, - severityRules, - lineCache, - log.Child("severity_rules"), - ) - } else { - severityRulesProcessor = processors.NewSeverityRules( - cfg.Default, - severityRules, - lineCache, - log.Child("severity_rules"), - ) + severityOpts := processors.SeverityOptions{ + Default: cfg.Default, + Rules: severityRules, + CaseSensitive: cfg.CaseSensitive, } - return severityRulesProcessor + return processors.NewSeverity(log.Child(logutils.DebugKeySeverityRules), files, severityOpts) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go index b955417a87..16067e490e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go @@ -1,11 +1,11 @@ package logutils type Log interface { - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Infof(format string, args ...interface{}) + Fatalf(format string, args ...any) + Panicf(format string, args ...any) + Errorf(format string, args ...any) + Warnf(format string, args ...any) + Infof(format string, args ...any) Child(name string) Log SetLevel(level LogLevel) @@ -14,18 +14,18 @@ type Log interface { type LogLevel int const ( - // Debug messages, write to debug logs only by logutils.Debug. + // LogLevelDebug Debug messages, write to debug logs only by logutils.Debug. LogLevelDebug LogLevel = 0 - // Information messages, don't write too much messages, + // LogLevelInfo Information messages, don't write too many messages, // only useful ones: they are shown when running with -v. LogLevelInfo LogLevel = 1 - // Hidden errors: non critical errors: work can be continued, no need to fail whole program; + // LogLevelWarn Hidden errors: non-critical errors: work can be continued, no need to fail whole program; // tests will crash if any warning occurred. LogLevelWarn LogLevel = 2 - // Only not hidden from user errors: whole program failing, usually + // LogLevelError Only not hidden from user errors: whole program failing, usually // error logging happens in 1-2 places: in the "main" function. LogLevelError LogLevel = 3 ) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go index 93c9873d9f..e4bb98109d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go @@ -5,9 +5,71 @@ import ( "strings" ) +// EnvTestRun value: "1" +const EnvTestRun = "GL_TEST_RUN" + +// envDebug value: one or several debug keys. +// examples: +// - Remove output to `/dev/null`: `GL_DEBUG=linters_output ./golangci-lint run` +// - Show linters configuration: `GL_DEBUG=enabled_linters golangci-lint run` +// - Some analysis details: `GL_DEBUG=goanalysis/analyze,goanalysis/facts golangci-lint run` +const envDebug = "GL_DEBUG" + +const ( + DebugKeyAutogenExclude = "autogen_exclude" // Debugs a filter excluding autogenerated source code. + DebugKeyBinSalt = "bin_salt" + DebugKeyConfigReader = "config_reader" + DebugKeyEmpty = "" + DebugKeyEnabledLinters = "enabled_linters" + DebugKeyEnv = "env" // Debugs `go env` command. + DebugKeyExcludeRules = "exclude_rules" + DebugKeyExec = "exec" + DebugKeyFilenameUnadjuster = "filename_unadjuster" + DebugKeyInvalidIssue = "invalid_issue" + DebugKeyForbidigo = "forbidigo" + DebugKeyGoEnv = "goenv" + DebugKeyLinter = "linter" + DebugKeyLintersContext = "linters_context" + DebugKeyLintersDB = "lintersdb" + DebugKeyLintersOutput = "linters_output" + DebugKeyLoader = "loader" // Debugs packages loading (including `go/packages` internal debugging). + DebugKeyMaxFromLinter = "max_from_linter" + DebugKeyMaxSameIssues = "max_same_issues" + DebugKeyPkgCache = "pkgcache" + DebugKeyRunner = "runner" + DebugKeySeverityRules = "severity_rules" + DebugKeySkipDirs = "skip_dirs" + DebugKeySourceCode = "source_code" + DebugKeyStopwatch = "stopwatch" + DebugKeyTabPrinter = "tab_printer" + DebugKeyTest = "test" + DebugKeyTextPrinter = "text_printer" +) + +const ( + DebugKeyGoAnalysis = "goanalysis" + + DebugKeyGoAnalysisAnalyze = DebugKeyGoAnalysis + "/analyze" + DebugKeyGoAnalysisIssuesCache = DebugKeyGoAnalysis + "/issues/cache" + DebugKeyGoAnalysisMemory = DebugKeyGoAnalysis + "/memory" + + DebugKeyGoAnalysisFacts = DebugKeyGoAnalysis + "/facts" + DebugKeyGoAnalysisFactsCache = DebugKeyGoAnalysisFacts + "/cache" + DebugKeyGoAnalysisFactsExport = DebugKeyGoAnalysisFacts + "/export" + DebugKeyGoAnalysisFactsInherit = DebugKeyGoAnalysisFacts + "/inherit" +) + +const ( + DebugKeyGoCritic = "gocritic" // Debugs `go-critic` linter. + DebugKeyGovet = "govet" // Debugs `govet` linter. + DebugKeyMegacheck = "megacheck" // Debugs `staticcheck` related linters. + DebugKeyNolint = "nolint" // Debugs a filter excluding issues by `//nolint` comments. + DebugKeyRevive = "revive" // Debugs `revive` linter. +) + func getEnabledDebugs() map[string]bool { ret := map[string]bool{} - debugVar := os.Getenv("GL_DEBUG") + debugVar := os.Getenv(envDebug) if debugVar == "" { return ret } @@ -21,9 +83,9 @@ func getEnabledDebugs() map[string]bool { var enabledDebugs = getEnabledDebugs() -type DebugFunc func(format string, args ...interface{}) +type DebugFunc func(format string, args ...any) -func nopDebugf(format string, args ...interface{}) {} +func nopDebugf(_ string, _ ...any) {} func Debug(tag string) DebugFunc { if !enabledDebugs[tag] { @@ -33,7 +95,7 @@ func Debug(tag string) DebugFunc { logger := NewStderrLog(tag) logger.SetLevel(LogLevelDebug) - return func(format string, args ...interface{}) { + return func(format string, args ...any) { logger.Debugf(format, args...) } } @@ -42,8 +104,15 @@ func HaveDebugTag(tag string) bool { return enabledDebugs[tag] } +var verbose bool + func SetupVerboseLog(log Log, isVerbose bool) { if isVerbose { + verbose = isVerbose log.SetLevel(LogLevelInfo) } } + +func IsVerbose() bool { + return verbose +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go index e897ce1ede..efda8cc20f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go @@ -12,28 +12,28 @@ func NewMockLog() *MockLog { return &MockLog{} } -func (m *MockLog) Fatalf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Fatalf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Panicf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Panicf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Errorf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Errorf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Warnf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Warnf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Infof(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Infof(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go index 67c70dc8f2..ef13754867 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go @@ -5,5 +5,7 @@ import ( colorable "github.com/mattn/go-colorable" ) -var StdOut = color.Output // https://github.com/golangci/golangci-lint/issues/14 -var StdErr = colorable.NewColorableStderr() +var ( + StdOut = color.Output // https://github.com/golangci/golangci-lint/issues/14 + StdErr = colorable.NewColorableStderr() +) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go index b4697ee4c7..569a177a7c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go @@ -5,18 +5,25 @@ import ( "os" "time" - "github.com/sirupsen/logrus" //nolint:depguard + "github.com/sirupsen/logrus" "github.com/golangci/golangci-lint/pkg/exitcodes" ) +const ( + // envLogLevel values: "error", "err", "warning", "warn","info" + envLogLevel = "LOG_LEVEL" + // envLogTimestamp value: "1" + envLogTimestamp = "LOG_TIMESTAMP" +) + type StderrLog struct { name string logger *logrus.Logger level LogLevel } -var _ Log = NewStderrLog("") +var _ Log = NewStderrLog(DebugKeyEmpty) func NewStderrLog(name string) *StderrLog { sl := &StderrLog{ @@ -25,7 +32,7 @@ func NewStderrLog(name string) *StderrLog { level: LogLevelWarn, } - switch os.Getenv("LOG_LEVEL") { + switch os.Getenv(envLogLevel) { case "error", "err": sl.logger.SetLevel(logrus.ErrorLevel) case "warning", "warn": @@ -38,9 +45,10 @@ func NewStderrLog(name string) *StderrLog { sl.logger.Out = StdErr formatter := &logrus.TextFormatter{ - DisableTimestamp: true, // `INFO[0007] msg` -> `INFO msg` + DisableTimestamp: true, // `INFO[0007] msg` -> `INFO msg` + EnvironmentOverrideColors: true, } - if os.Getenv("LOG_TIMESTAMP") == "1" { + if os.Getenv(envLogTimestamp) == "1" { formatter.DisableTimestamp = false formatter.FullTimestamp = true formatter.TimestampFormat = time.StampMilli @@ -59,17 +67,17 @@ func (sl StderrLog) prefix() string { return prefix } -func (sl StderrLog) Fatalf(format string, args ...interface{}) { +func (sl StderrLog) Fatalf(format string, args ...any) { sl.logger.Errorf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) os.Exit(exitcodes.Failure) } -func (sl StderrLog) Panicf(format string, args ...interface{}) { +func (sl StderrLog) Panicf(format string, args ...any) { v := fmt.Sprintf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) panic(v) } -func (sl StderrLog) Errorf(format string, args ...interface{}) { +func (sl StderrLog) Errorf(format string, args ...any) { if sl.level > LogLevelError { return } @@ -80,7 +88,7 @@ func (sl StderrLog) Errorf(format string, args ...interface{}) { // called on hidden errors, see log levels comments. } -func (sl StderrLog) Warnf(format string, args ...interface{}) { +func (sl StderrLog) Warnf(format string, args ...any) { if sl.level > LogLevelWarn { return } @@ -88,7 +96,7 @@ func (sl StderrLog) Warnf(format string, args ...interface{}) { sl.logger.Warnf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) } -func (sl StderrLog) Infof(format string, args ...interface{}) { +func (sl StderrLog) Infof(format string, args ...any) { if sl.level > LogLevelInfo { return } @@ -96,7 +104,7 @@ func (sl StderrLog) Infof(format string, args ...interface{}) { sl.logger.Infof("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) } -func (sl StderrLog) Debugf(format string, args ...interface{}) { +func (sl StderrLog) Debugf(format string, args ...any) { if sl.level > LogLevelDebug { return } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go index c620573b93..ff37651aff 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go @@ -1,15 +1,13 @@ package packages import ( + "errors" "fmt" "go/token" "strconv" "strings" - - "github.com/pkg/errors" ) -//nolint:gomnd func ParseErrorPosition(pos string) (*token.Position, error) { // file:line(:colon) parts := strings.Split(pos, ":") @@ -20,14 +18,14 @@ func ParseErrorPosition(pos string) (*token.Position, error) { file := parts[0] line, err := strconv.Atoi(parts[1]) if err != nil { - return nil, fmt.Errorf("can't parse line number %q: %s", parts[1], err) + return nil, fmt.Errorf("can't parse line number %q: %w", parts[1], err) } var column int if len(parts) == 3 { // no column column, err = strconv.Atoi(parts[2]) if err != nil { - return nil, errors.Wrapf(err, "failed to parse column from %q", parts[2]) + return nil, fmt.Errorf("failed to parse column from %q: %w", parts[2], err) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/skip.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/exclude.go similarity index 100% rename from vendor/github.com/golangci/golangci-lint/pkg/packages/skip.go rename to vendor/github.com/golangci/golangci-lint/pkg/packages/exclude.go diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go index e4268897f4..6a7789ebb7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go @@ -42,7 +42,7 @@ func ExtractErrors(pkg *packages.Package) []packages.Error { continue } - // change pos to local file to properly process it by processors (properly read line etc) + // change pos to local file to properly process it by processors (properly read line etc.) uniqErrors[i].Msg = fmt.Sprintf("%s: %s", uniqErrors[i].Pos, uniqErrors[i].Msg) uniqErrors[i].Pos = fmt.Sprintf("%s:1", pkg.GoFiles[0]) } @@ -65,7 +65,7 @@ func extractErrorsImpl(pkg *packages.Package, seenPackages map[*packages.Package } seenPackages[pkg] = true - if !pkg.IllTyped { // otherwise it may take hours to traverse all deps many times + if !pkg.IllTyped { // otherwise, it may take hours to traverse all deps many times return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go index c5b948a98d..e32eef7f51 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go @@ -1,16 +1,19 @@ package printers import ( - "context" "encoding/xml" "fmt" + "io" + "sort" "github.com/go-xmlfmt/xmlfmt" + "golang.org/x/exp/maps" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) +const defaultCheckstyleSeverity = "error" + type checkstyleOutput struct { XMLName xml.Name `xml:"checkstyle"` Version string `xml:"version,attr"` @@ -30,15 +33,15 @@ type checkstyleError struct { Source string `xml:"source,attr"` } -const defaultCheckstyleSeverity = "error" - -type Checkstyle struct{} +type Checkstyle struct { + w io.Writer +} -func NewCheckstyle() *Checkstyle { - return &Checkstyle{} +func NewCheckstyle(w io.Writer) *Checkstyle { + return &Checkstyle{w: w} } -func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { +func (p Checkstyle) Print(issues []result.Issue) error { out := checkstyleOutput{ Version: "5.0", } @@ -72,16 +75,21 @@ func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { file.Errors = append(file.Errors, newError) } - out.Files = make([]*checkstyleFile, 0, len(files)) - for _, file := range files { - out.Files = append(out.Files, file) - } + out.Files = maps.Values(files) + + sort.Slice(out.Files, func(i, j int) bool { + return out.Files[i].Name < out.Files[j].Name + }) data, err := xml.Marshal(&out) if err != nil { return err } - fmt.Fprintf(logutils.StdOut, "%s%s\n", xml.Header, xmlfmt.FormatXML(string(data), "", " ")) + _, err = fmt.Fprintf(p.w, "%s%s\n", xml.Header, xmlfmt.FormatXML(string(data), "", " ")) + if err != nil { + return err + } + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index 35a22ce99a..50d6dcff3b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -1,16 +1,18 @@ package printers import ( - "context" "encoding/json" - "fmt" + "io" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -// CodeClimateIssue is a subset of the Code Climate spec - https://github.com/codeclimate/spec/blob/master/SPEC.md#data-types -// It is just enough to support GitLab CI Code Quality - https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html +const defaultCodeClimateSeverity = "critical" + +// CodeClimateIssue is a subset of the Code Climate spec. +// https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types +// It is just enough to support GitLab CI Code Quality. +// https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html type CodeClimateIssue struct { Description string `json:"description"` Severity string `json:"severity,omitempty"` @@ -24,14 +26,15 @@ type CodeClimateIssue struct { } type CodeClimate struct { + w io.Writer } -func NewCodeClimate() *CodeClimate { - return &CodeClimate{} +func NewCodeClimate(w io.Writer) *CodeClimate { + return &CodeClimate{w: w} } -func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { - codeClimateIssues := []CodeClimateIssue{} +func (p CodeClimate) Print(issues []result.Issue) error { + codeClimateIssues := make([]CodeClimateIssue, 0, len(issues)) for i := range issues { issue := &issues[i] codeClimateIssue := CodeClimateIssue{} @@ -39,6 +42,7 @@ func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { codeClimateIssue.Location.Path = issue.Pos.Filename codeClimateIssue.Location.Lines.Begin = issue.Pos.Line codeClimateIssue.Fingerprint = issue.Fingerprint() + codeClimateIssue.Severity = defaultCodeClimateSeverity if issue.Severity != "" { codeClimateIssue.Severity = issue.Severity @@ -47,11 +51,9 @@ func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { codeClimateIssues = append(codeClimateIssues, codeClimateIssue) } - outputJSON, err := json.Marshal(codeClimateIssues) + err := json.NewEncoder(p.w).Encode(codeClimateIssues) if err != nil { return err } - - fmt.Fprint(logutils.StdOut, string(outputJSON)) return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go index 4ebc266857..e396119da1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go @@ -1,22 +1,23 @@ package printers import ( - "context" "fmt" + "io" + "path/filepath" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -type github struct { +type GitHub struct { + w io.Writer } const defaultGithubSeverity = "error" -// NewGithub output format outputs issues according to Github actions format: -// https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message -func NewGithub() Printer { - return &github{} +// NewGitHub output format outputs issues according to GitHub actions format: +// https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message +func NewGitHub(w io.Writer) *GitHub { + return &GitHub{w: w} } // print each line as: ::error file=app.js,line=10,col=15::Something went wrong @@ -26,7 +27,12 @@ func formatIssueAsGithub(issue *result.Issue) string { severity = issue.Severity } - ret := fmt.Sprintf("::%s file=%s,line=%d", severity, issue.FilePath(), issue.Line()) + // Convert backslashes to forward slashes. + // This is needed when running on windows. + // Otherwise, GitHub won't be able to show the annotations pointing to the file path with backslashes. + file := filepath.ToSlash(issue.FilePath()) + + ret := fmt.Sprintf("::%s file=%s,line=%d", severity, file, issue.Line()) if issue.Pos.Column != 0 { ret += fmt.Sprintf(",col=%d", issue.Pos.Column) } @@ -35,9 +41,9 @@ func formatIssueAsGithub(issue *result.Issue) string { return ret } -func (g *github) Print(_ context.Context, issues []result.Issue) error { +func (p *GitHub) Print(issues []result.Issue) error { for ind := range issues { - _, err := fmt.Fprintln(logutils.StdOut, formatIssueAsGithub(&issues[ind])) + _, err := fmt.Fprintln(p.w, formatIssueAsGithub(&issues[ind])) if err != nil { return err } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go index 65ab753bd5..7dd1e5c623 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go @@ -1,12 +1,11 @@ package printers import ( - "context" "fmt" "html/template" + "io" "strings" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -123,13 +122,15 @@ type htmlIssue struct { Code string } -type HTML struct{} +type HTML struct { + w io.Writer +} -func NewHTML() *HTML { - return &HTML{} +func NewHTML(w io.Writer) *HTML { + return &HTML{w: w} } -func (h HTML) Print(_ context.Context, issues []result.Issue) error { +func (p HTML) Print(issues []result.Issue) error { var htmlIssues []htmlIssue for i := range issues { @@ -151,5 +152,5 @@ func (h HTML) Print(_ context.Context, issues []result.Issue) error { return err } - return t.Execute(logutils.StdOut, struct{ Issues []htmlIssue }{Issues: htmlIssues}) + return t.Execute(p.w, struct{ Issues []htmlIssue }{Issues: htmlIssues}) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go index 6ffa996fb0..28509cac45 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go @@ -1,22 +1,22 @@ package printers import ( - "context" "encoding/json" - "fmt" + "io" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/report" "github.com/golangci/golangci-lint/pkg/result" ) type JSON struct { - rd *report.Data + rd *report.Data // TODO(ldez) should be drop in v2. Only use by JSON reporter. + w io.Writer } -func NewJSON(rd *report.Data) *JSON { +func NewJSON(rd *report.Data, w io.Writer) *JSON { return &JSON{ rd: rd, + w: w, } } @@ -25,17 +25,14 @@ type JSONResult struct { Report *report.Data } -func (p JSON) Print(ctx context.Context, issues []result.Issue) error { +func (p JSON) Print(issues []result.Issue) error { res := JSONResult{ Issues: issues, Report: p.rd, } - - outputJSON, err := json.Marshal(res) - if err != nil { - return err + if res.Issues == nil { + res.Issues = []result.Issue{} } - fmt.Fprint(logutils.StdOut, string(outputJSON)) - return nil + return json.NewEncoder(p.w).Encode(res) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go index 9277cd66f2..3e3f82f580 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go @@ -1,11 +1,14 @@ package printers import ( - "context" "encoding/xml" + "fmt" + "io" + "sort" "strings" - "github.com/golangci/golangci-lint/pkg/logutils" + "golang.org/x/exp/maps" + "github.com/golangci/golangci-lint/pkg/result" ) @@ -31,17 +34,19 @@ type testCaseXML struct { type failureXML struct { Message string `xml:"message,attr"` + Type string `xml:"type,attr"` Content string `xml:",cdata"` } type JunitXML struct { + w io.Writer } -func NewJunitXML() *JunitXML { - return &JunitXML{} +func NewJunitXML(w io.Writer) *JunitXML { + return &JunitXML{w: w} } -func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { +func (p JunitXML) Print(issues []result.Issue) error { suites := make(map[string]testSuiteXML) // use a map to group by file for ind := range issues { @@ -56,8 +61,10 @@ func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { Name: i.FromLinter, ClassName: i.Pos.String(), Failure: failureXML{ - Message: i.Text, - Content: strings.Join(i.SourceLines, "\n"), + Type: i.Severity, + Message: i.Pos.String() + ": " + i.Text, + Content: fmt.Sprintf("%s: %s\nCategory: %s\nFile: %s\nLine: %d\nDetails: %s", + i.Severity, i.Text, i.FromLinter, i.Pos.Filename, i.Pos.Line, strings.Join(i.SourceLines, "\n")), }, } @@ -66,11 +73,13 @@ func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { } var res testSuitesXML - for _, val := range suites { - res.TestSuites = append(res.TestSuites, val) - } + res.TestSuites = maps.Values(suites) + + sort.Slice(res.TestSuites, func(i, j int) bool { + return res.TestSuites[i].Suite < res.TestSuites[j].Suite + }) - enc := xml.NewEncoder(logutils.StdOut) + enc := xml.NewEncoder(p.w) enc.Indent("", " ") if err := enc.Encode(res); err != nil { return err diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go index bfafb88e2a..08c34234a9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go @@ -1,11 +1,143 @@ package printers import ( - "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/report" "github.com/golangci/golangci-lint/pkg/result" ) -type Printer interface { - Print(ctx context.Context, issues []result.Issue) error +const defaultFileMode = 0o644 + +type issuePrinter interface { + Print(issues []result.Issue) error +} + +// Printer prints issues +type Printer struct { + cfg *config.Output + reportData *report.Data + + log logutils.Log + + stdOut io.Writer + stdErr io.Writer +} + +// NewPrinter creates a new Printer. +func NewPrinter(log logutils.Log, cfg *config.Output, reportData *report.Data) (*Printer, error) { + if log == nil { + return nil, errors.New("missing log argument in constructor") + } + if cfg == nil { + return nil, errors.New("missing config argument in constructor") + } + if reportData == nil { + return nil, errors.New("missing reportData argument in constructor") + } + + return &Printer{ + cfg: cfg, + reportData: reportData, + log: log, + stdOut: logutils.StdOut, + stdErr: logutils.StdErr, + }, nil +} + +// Print prints issues based on the formats defined +func (c *Printer) Print(issues []result.Issue) error { + for _, format := range c.cfg.Formats { + err := c.printReports(issues, format) + if err != nil { + return err + } + } + + return nil +} + +func (c *Printer) printReports(issues []result.Issue, format config.OutputFormat) error { + w, shouldClose, err := c.createWriter(format.Path) + if err != nil { + return fmt.Errorf("can't create output for %s: %w", format.Path, err) + } + + defer func() { + if file, ok := w.(io.Closer); shouldClose && ok { + _ = file.Close() + } + }() + + p, err := c.createPrinter(format.Format, w) + if err != nil { + return err + } + + if err = p.Print(issues); err != nil { + return fmt.Errorf("can't print %d issues: %w", len(issues), err) + } + + return nil +} + +func (c *Printer) createWriter(path string) (io.Writer, bool, error) { + if path == "" || path == "stdout" { + return c.stdOut, false, nil + } + + if path == "stderr" { + return c.stdErr, false, nil + } + + err := os.MkdirAll(filepath.Dir(path), os.ModePerm) + if err != nil { + return nil, false, err + } + + f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, defaultFileMode) + if err != nil { + return nil, false, err + } + + return f, true, nil +} + +func (c *Printer) createPrinter(format string, w io.Writer) (issuePrinter, error) { + var p issuePrinter + + switch format { + case config.OutFormatJSON: + p = NewJSON(c.reportData, w) + case config.OutFormatColoredLineNumber, config.OutFormatLineNumber: + p = NewText(c.cfg.PrintIssuedLine, + format == config.OutFormatColoredLineNumber, c.cfg.PrintLinterName, + c.log.Child(logutils.DebugKeyTextPrinter), w) + case config.OutFormatTab, config.OutFormatColoredTab: + p = NewTab(c.cfg.PrintLinterName, + format == config.OutFormatColoredTab, + c.log.Child(logutils.DebugKeyTabPrinter), w) + case config.OutFormatCheckstyle: + p = NewCheckstyle(w) + case config.OutFormatCodeClimate: + p = NewCodeClimate(w) + case config.OutFormatHTML: + p = NewHTML(w) + case config.OutFormatJunitXML: + p = NewJunitXML(w) + case config.OutFormatGithubActions: + p = NewGitHub(w) + case config.OutFormatTeamCity: + p = NewTeamCity(w) + default: + return nil, fmt.Errorf("unknown output format %q", format) + } + + return p, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go index d3cdce673d..c6d390d188 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go @@ -1,7 +1,6 @@ package printers import ( - "context" "fmt" "io" "text/tabwriter" @@ -14,23 +13,33 @@ import ( type Tab struct { printLinterName bool - log logutils.Log + useColors bool + + log logutils.Log + w io.Writer } -func NewTab(printLinterName bool, log logutils.Log) *Tab { +func NewTab(printLinterName, useColors bool, log logutils.Log, w io.Writer) *Tab { return &Tab{ printLinterName: printLinterName, + useColors: useColors, log: log, + w: w, } } -func (p Tab) SprintfColored(ca color.Attribute, format string, args ...interface{}) string { +func (p *Tab) SprintfColored(ca color.Attribute, format string, args ...any) string { c := color.New(ca) + + if !p.useColors { + c.DisableColor() + } + return c.Sprintf(format, args...) } -func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { - w := tabwriter.NewWriter(logutils.StdOut, 0, 0, 2, ' ', 0) +func (p *Tab) Print(issues []result.Issue) error { + w := tabwriter.NewWriter(p.w, 0, 0, 2, ' ', 0) for i := range issues { p.printIssue(&issues[i], w) @@ -43,15 +52,15 @@ func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { return nil } -func (p Tab) printIssue(i *result.Issue, w io.Writer) { - text := p.SprintfColored(color.FgRed, "%s", i.Text) +func (p *Tab) printIssue(issue *result.Issue, w io.Writer) { + text := p.SprintfColored(color.FgRed, "%s", issue.Text) if p.printLinterName { - text = fmt.Sprintf("%s\t%s", i.FromLinter, text) + text = fmt.Sprintf("%s\t%s", issue.FromLinter, text) } - pos := p.SprintfColored(color.Bold, "%s:%d", i.FilePath(), i.Line()) - if i.Pos.Column != 0 { - pos += fmt.Sprintf(":%d", i.Pos.Column) + pos := p.SprintfColored(color.Bold, "%s:%d", issue.FilePath(), issue.Line()) + if issue.Pos.Column != 0 { + pos += fmt.Sprintf(":%d", issue.Pos.Column) } fmt.Fprintf(w, "%s\t%s\n", pos, text) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go new file mode 100644 index 0000000000..d3693e9971 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go @@ -0,0 +1,122 @@ +package printers + +import ( + "fmt" + "io" + "strings" + "unicode/utf8" + + "github.com/golangci/golangci-lint/pkg/result" +) + +// Field limits. +const ( + smallLimit = 255 + largeLimit = 4000 +) + +// TeamCity printer for TeamCity format. +type TeamCity struct { + w io.Writer + escaper *strings.Replacer +} + +// NewTeamCity output format outputs issues according to TeamCity service message format. +func NewTeamCity(w io.Writer) *TeamCity { + return &TeamCity{ + w: w, + // https://www.jetbrains.com/help/teamcity/service-messages.html#Escaped+Values + escaper: strings.NewReplacer( + "'", "|'", + "\n", "|n", + "\r", "|r", + "|", "||", + "[", "|[", + "]", "|]", + ), + } +} + +func (p *TeamCity) Print(issues []result.Issue) error { + uniqLinters := map[string]struct{}{} + + for i := range issues { + issue := issues[i] + + _, ok := uniqLinters[issue.FromLinter] + if !ok { + inspectionType := InspectionType{ + id: issue.FromLinter, + name: issue.FromLinter, + description: issue.FromLinter, + category: "Golangci-lint reports", + } + + _, err := inspectionType.Print(p.w, p.escaper) + if err != nil { + return err + } + + uniqLinters[issue.FromLinter] = struct{}{} + } + + instance := InspectionInstance{ + typeID: issue.FromLinter, + message: issue.Text, + file: issue.FilePath(), + line: issue.Line(), + severity: issue.Severity, + } + + _, err := instance.Print(p.w, p.escaper) + if err != nil { + return err + } + } + + return nil +} + +// InspectionType is the unique description of the conducted inspection. Each specific warning or +// an error in code (inspection instance) has an inspection type. +// https://www.jetbrains.com/help/teamcity/service-messages.html#Inspection+Type +type InspectionType struct { + id string // (mandatory) limited by 255 characters. + name string // (mandatory) limited by 255 characters. + description string // (mandatory) limited by 255 characters. + category string // (mandatory) limited by 4000 characters. +} + +func (i InspectionType) Print(w io.Writer, escaper *strings.Replacer) (int, error) { + return fmt.Fprintf(w, "##teamcity[InspectionType id='%s' name='%s' description='%s' category='%s']\n", + limit(i.id, smallLimit), limit(i.name, smallLimit), limit(escaper.Replace(i.description), largeLimit), limit(i.category, smallLimit)) +} + +// InspectionInstance reports a specific defect, warning, error message. +// Includes location, description, and various optional and custom attributes. +// https://www.jetbrains.com/help/teamcity/service-messages.html#Inspection+Instance +type InspectionInstance struct { + typeID string // (mandatory) limited by 255 characters. + message string // (optional) limited by 4000 characters. + file string // (mandatory) file path limited by 4000 characters. + line int // (optional) line of the file. + severity string // (optional) any linter severity. +} + +func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int, error) { + return fmt.Fprintf(w, "##teamcity[inspection typeId='%s' message='%s' file='%s' line='%d' SEVERITY='%s']\n", + limit(i.typeID, smallLimit), + limit(replacer.Replace(i.message), largeLimit), + limit(i.file, largeLimit), + i.line, strings.ToUpper(i.severity)) +} + +func limit(s string, max int) string { + var size, count int + for i := 0; i < max && count < len(s); i++ { + _, size = utf8.DecodeRuneInString(s[count:]) + count += size + } + + return s[:count] +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go index 1814528884..56cced7696 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go @@ -1,8 +1,8 @@ package printers import ( - "context" "fmt" + "io" "strings" "github.com/fatih/color" @@ -13,31 +13,34 @@ import ( type Text struct { printIssuedLine bool - useColors bool printLinterName bool + useColors bool log logutils.Log + w io.Writer } -func NewText(printIssuedLine, useColors, printLinterName bool, log logutils.Log) *Text { +func NewText(printIssuedLine, useColors, printLinterName bool, log logutils.Log, w io.Writer) *Text { return &Text{ printIssuedLine: printIssuedLine, - useColors: useColors, printLinterName: printLinterName, + useColors: useColors, log: log, + w: w, } } -func (p Text) SprintfColored(ca color.Attribute, format string, args ...interface{}) string { +func (p *Text) SprintfColored(ca color.Attribute, format string, args ...any) string { + c := color.New(ca) + if !p.useColors { - return fmt.Sprintf(format, args...) + c.DisableColor() } - c := color.New(ca) return c.Sprintf(format, args...) } -func (p *Text) Print(ctx context.Context, issues []result.Issue) error { +func (p *Text) Print(issues []result.Issue) error { for i := range issues { p.printIssue(&issues[i]) @@ -52,32 +55,32 @@ func (p *Text) Print(ctx context.Context, issues []result.Issue) error { return nil } -func (p Text) printIssue(i *result.Issue) { - text := p.SprintfColored(color.FgRed, "%s", strings.TrimSpace(i.Text)) +func (p *Text) printIssue(issue *result.Issue) { + text := p.SprintfColored(color.FgRed, "%s", strings.TrimSpace(issue.Text)) if p.printLinterName { - text += fmt.Sprintf(" (%s)", i.FromLinter) + text += fmt.Sprintf(" (%s)", issue.FromLinter) } - pos := p.SprintfColored(color.Bold, "%s:%d", i.FilePath(), i.Line()) - if i.Pos.Column != 0 { - pos += fmt.Sprintf(":%d", i.Pos.Column) + pos := p.SprintfColored(color.Bold, "%s:%d", issue.FilePath(), issue.Line()) + if issue.Pos.Column != 0 { + pos += fmt.Sprintf(":%d", issue.Pos.Column) } - fmt.Fprintf(logutils.StdOut, "%s: %s\n", pos, text) + fmt.Fprintf(p.w, "%s: %s\n", pos, text) } -func (p Text) printSourceCode(i *result.Issue) { - for _, line := range i.SourceLines { - fmt.Fprintln(logutils.StdOut, line) +func (p *Text) printSourceCode(issue *result.Issue) { + for _, line := range issue.SourceLines { + fmt.Fprintln(p.w, line) } } -func (p Text) printUnderLinePointer(i *result.Issue) { +func (p *Text) printUnderLinePointer(issue *result.Issue) { // if column == 0 it means column is unknown (e.g. for gosec) - if len(i.SourceLines) != 1 || i.Pos.Column == 0 { + if len(issue.SourceLines) != 1 || issue.Pos.Column == 0 { return } - col0 := i.Pos.Column - 1 - line := i.SourceLines[0] + col0 := issue.Pos.Column - 1 + line := issue.SourceLines[0] prefixRunes := make([]rune, 0, len(line)) for j := 0; j < len(line) && j < col0; j++ { if line[j] == '\t' { @@ -87,5 +90,5 @@ func (p Text) printUnderLinePointer(i *result.Issue) { } } - fmt.Fprintf(logutils.StdOut, "%s%s\n", string(prefixRunes), p.SprintfColored(color.FgYellow, "^")) + fmt.Fprintf(p.w, "%s%s\n", string(prefixRunes), p.SprintfColored(color.FgYellow, "^")) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/report/log.go b/vendor/github.com/golangci/golangci-lint/pkg/report/log.go index 45ab6cae85..61665f28b7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/report/log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/report/log.go @@ -20,20 +20,20 @@ func NewLogWrapper(log logutils.Log, reportData *Data) *LogWrapper { } } -func (lw LogWrapper) Fatalf(format string, args ...interface{}) { +func (lw LogWrapper) Fatalf(format string, args ...any) { lw.origLog.Fatalf(format, args...) } -func (lw LogWrapper) Panicf(format string, args ...interface{}) { +func (lw LogWrapper) Panicf(format string, args ...any) { lw.origLog.Panicf(format, args...) } -func (lw LogWrapper) Errorf(format string, args ...interface{}) { +func (lw LogWrapper) Errorf(format string, args ...any) { lw.origLog.Errorf(format, args...) lw.rd.Error = fmt.Sprintf(format, args...) } -func (lw LogWrapper) Warnf(format string, args ...interface{}) { +func (lw LogWrapper) Warnf(format string, args ...any) { lw.origLog.Warnf(format, args...) w := Warning{ Tag: strings.Join(lw.tags, "/"), @@ -43,7 +43,7 @@ func (lw LogWrapper) Warnf(format string, args ...interface{}) { lw.rd.Warnings = append(lw.rd.Warnings, w) } -func (lw LogWrapper) Infof(format string, args ...interface{}) { +func (lw LogWrapper) Infof(format string, args ...any) { lw.origLog.Infof(format, args...) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 707a2b17cd..32246a6df4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -1,7 +1,7 @@ package result import ( - "crypto/md5" //nolint:gosec + "crypto/md5" //nolint:gosec // for md5 hash "fmt" "go/token" @@ -91,8 +91,8 @@ func (i *Issue) Fingerprint() string { firstLine = i.SourceLines[0] } - hash := md5.New() //nolint:gosec - _, _ = hash.Write([]byte(fmt.Sprintf("%s%s%s", i.Pos.Filename, i.Text, firstLine))) + hash := md5.New() //nolint:gosec // we don't need a strong hash here + _, _ = fmt.Fprintf(hash, "%s%s%s", i.Pos.Filename, i.Text, firstLine) return fmt.Sprintf("%X", hash.Sum(nil)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go index 57388f64fa..b5944cd1d0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go @@ -5,35 +5,44 @@ import ( "go/parser" "go/token" "path/filepath" + "regexp" "strings" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -var autogenDebugf = logutils.Debug("autogen_exclude") +const ( + genCodeGenerated = "code generated" + genDoNotEdit = "do not edit" + genAutoFile = "autogenerated file" // easyjson +) -type ageFileSummary struct { - isGenerated bool -} +var _ Processor = &AutogeneratedExclude{} -type ageFileSummaryCache map[string]*ageFileSummary +type fileSummary struct { + generated bool +} type AutogeneratedExclude struct { - fileSummaryCache ageFileSummaryCache + debugf logutils.DebugFunc + + strict bool + strictPattern *regexp.Regexp + + fileSummaryCache map[string]*fileSummary } -func NewAutogeneratedExclude() *AutogeneratedExclude { +func NewAutogeneratedExclude(strict bool) *AutogeneratedExclude { return &AutogeneratedExclude{ - fileSummaryCache: ageFileSummaryCache{}, + debugf: logutils.Debug(logutils.DebugKeyAutogenExclude), + strict: strict, + strictPattern: regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`), + fileSummaryCache: map[string]*fileSummary{}, } } -var _ Processor = &AutogeneratedExclude{} - -func (p AutogeneratedExclude) Name() string { +func (p *AutogeneratedExclude) Name() string { return "autogenerated_exclude" } @@ -41,86 +50,104 @@ func (p *AutogeneratedExclude) Process(issues []result.Issue) ([]result.Issue, e return filterIssuesErr(issues, p.shouldPassIssue) } -func isSpecialAutogeneratedFile(filePath string) bool { - fileName := filepath.Base(filePath) - // fake files or generation definitions to which //line points to for generated files - return filepath.Ext(fileName) != ".go" -} +func (p *AutogeneratedExclude) Finish() {} -func (p *AutogeneratedExclude) shouldPassIssue(i *result.Issue) (bool, error) { - if i.FromLinter == "typecheck" { +func (p *AutogeneratedExclude) shouldPassIssue(issue *result.Issue) (bool, error) { + if issue.FromLinter == "typecheck" { // don't hide typechecking errors in generated files: users expect to see why the project isn't compiling return true, nil } - if filepath.Base(i.FilePath()) == "go.mod" { - return true, nil + // The file is already known. + fs := p.fileSummaryCache[issue.FilePath()] + if fs != nil { + return !fs.generated, nil } - if isSpecialAutogeneratedFile(i.FilePath()) { - return false, nil - } + fs = &fileSummary{} + p.fileSummaryCache[issue.FilePath()] = fs - fs, err := p.getOrCreateFileSummary(i) - if err != nil { - return false, err + if p.strict { + var err error + fs.generated, err = p.isGeneratedFileStrict(issue.FilePath()) + if err != nil { + return false, fmt.Errorf("failed to get doc (strict) of file %s: %w", issue.FilePath(), err) + } + } else { + doc, err := getComments(issue.FilePath()) + if err != nil { + return false, fmt.Errorf("failed to get doc (lax) of file %s: %w", issue.FilePath(), err) + } + + fs.generated = p.isGeneratedFileLax(doc) } + p.debugf("file %q is generated: %t", issue.FilePath(), fs.generated) + // don't report issues for autogenerated files - return !fs.isGenerated, nil + return !fs.generated, nil } -// isGenerated reports whether the source file is generated code. -// Using a bit laxer rules than https://golang.org/s/generatedcode to -// match more generated code. See #48 and #72. -func isGeneratedFileByComment(doc string) bool { - const ( - genCodeGenerated = "code generated" - genDoNotEdit = "do not edit" - genAutoFile = "autogenerated file" // easyjson - ) - +// isGeneratedFileLax reports whether the source file is generated code. +// The function uses a bit laxer rules than isGeneratedFileStrict to match more generated code. +// See https://github.com/golangci/golangci-lint/issues/48 and https://github.com/golangci/golangci-lint/issues/72. +func (p *AutogeneratedExclude) isGeneratedFileLax(doc string) bool { markers := []string{genCodeGenerated, genDoNotEdit, genAutoFile} + doc = strings.ToLower(doc) + for _, marker := range markers { if strings.Contains(doc, marker) { - autogenDebugf("doc contains marker %q: file is generated", marker) + p.debugf("doc contains marker %q: file is generated", marker) + return true } } - autogenDebugf("doc of len %d doesn't contain any of markers: %s", len(doc), markers) + p.debugf("doc of len %d doesn't contain any of markers: %s", len(doc), markers) + return false } -func (p *AutogeneratedExclude) getOrCreateFileSummary(i *result.Issue) (*ageFileSummary, error) { - fs := p.fileSummaryCache[i.FilePath()] - if fs != nil { - return fs, nil +// isGeneratedFileStrict returns true if the source file has a line that matches the regular expression: +// +// ^// Code generated .* DO NOT EDIT\.$ +// +// This line must appear before the first non-comment, non-blank text in the file. +// Based on https://go.dev/s/generatedcode. +func (p *AutogeneratedExclude) isGeneratedFileStrict(filePath string) (bool, error) { + file, err := parser.ParseFile(token.NewFileSet(), filePath, nil, parser.PackageClauseOnly|parser.ParseComments) + if err != nil { + return false, fmt.Errorf("failed to parse file: %w", err) } - fs = &ageFileSummary{} - p.fileSummaryCache[i.FilePath()] = fs - - if i.FilePath() == "" { - return nil, fmt.Errorf("no file path for issue") + if file == nil || len(file.Comments) == 0 { + return false, nil } - doc, err := getDoc(i.FilePath()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get doc of file %s", i.FilePath()) + for _, comment := range file.Comments { + if comment.Pos() > file.Package { + return false, nil + } + + for _, line := range comment.List { + generated := p.strictPattern.MatchString(line.Text) + if generated { + p.debugf("doc contains ignore expression: file is generated") + + return true, nil + } + } } - fs.isGenerated = isGeneratedFileByComment(doc) - autogenDebugf("file %q is generated: %t", i.FilePath(), fs.isGenerated) - return fs, nil + return false, nil } -func getDoc(filePath string) (string, error) { +func getComments(filePath string) (string, error) { fset := token.NewFileSet() syntax, err := parser.ParseFile(fset, filePath, nil, parser.PackageClauseOnly|parser.ParseComments) if err != nil { - return "", errors.Wrap(err, "failed to parse file") + return "", fmt.Errorf("failed to parse file: %w", err) } var docLines []string @@ -131,4 +158,6 @@ func getDoc(filePath string) (string, error) { return strings.Join(docLines, "\n"), nil } -func (p AutogeneratedExclude) Finish() {} +func isGoFile(name string) bool { + return filepath.Ext(name) == ".go" +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go index b6ce4f2159..12333c898d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go @@ -8,32 +8,39 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +const caseInsensitivePrefix = "(?i)" + type BaseRule struct { - Text string - Source string - Path string - Linters []string + Text string + Source string + Path string + PathExcept string + Linters []string } type baseRule struct { - text *regexp.Regexp - source *regexp.Regexp - path *regexp.Regexp - linters []string + text *regexp.Regexp + source *regexp.Regexp + path *regexp.Regexp + pathExcept *regexp.Regexp + linters []string } func (r *baseRule) isEmpty() bool { - return r.text == nil && r.source == nil && r.path == nil && len(r.linters) == 0 + return r.text == nil && r.source == nil && r.path == nil && r.pathExcept == nil && len(r.linters) == 0 } -func (r *baseRule) match(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { +func (r *baseRule) match(issue *result.Issue, files *fsutils.Files, log logutils.Log) bool { if r.isEmpty() { return false } if r.text != nil && !r.text.MatchString(issue.Text) { return false } - if r.path != nil && !r.path.MatchString(issue.FilePath()) { + if r.path != nil && !r.path.MatchString(files.WithPathPrefix(issue.FilePath())) { + return false + } + if r.pathExcept != nil && r.pathExcept.MatchString(issue.FilePath()) { return false } if len(r.linters) != 0 && !r.matchLinter(issue) { @@ -41,7 +48,7 @@ func (r *baseRule) match(issue *result.Issue, lineCache *fsutils.LineCache, log } // the most heavyweight checking last - if r.source != nil && !r.matchSource(issue, lineCache, log) { + if r.source != nil && !r.matchSource(issue, files.LineCache, log) { return false } @@ -58,7 +65,7 @@ func (r *baseRule) matchLinter(issue *result.Issue) bool { return false } -func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { // nolint:interfacer +func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { sourceLine, errSourceLine := lineCache.GetLine(issue.FilePath(), issue.Line()) if errSourceLine != nil { log.Warnf("Failed to get line %s:%d from line cache: %s", issue.FilePath(), issue.Line(), errSourceLine) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go index c8793871ac..1ad73c31af 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go @@ -1,11 +1,10 @@ package processors import ( + "fmt" "path/filepath" "strings" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/result" ) @@ -27,17 +26,17 @@ func (p Cgo) Name() string { } func (p Cgo) Process(issues []result.Issue) ([]result.Issue, error) { - return filterIssuesErr(issues, func(i *result.Issue) (bool, error) { - // some linters (.e.g gosec, deadcode) return incorrect filepaths for cgo issues, + return filterIssuesErr(issues, func(issue *result.Issue) (bool, error) { + // some linters (e.g. gosec, deadcode) return incorrect filepaths for cgo issues, // also cgo files have strange issues looking like false positives. // cache dir contains all preprocessed files including cgo files - issueFilePath := i.FilePath() - if !filepath.IsAbs(i.FilePath()) { - absPath, err := filepath.Abs(i.FilePath()) + issueFilePath := issue.FilePath() + if !filepath.IsAbs(issue.FilePath()) { + absPath, err := filepath.Abs(issue.FilePath()) if err != nil { - return false, errors.Wrapf(err, "failed to build abs path for %q", i.FilePath()) + return false, fmt.Errorf("failed to build abs path for %q: %w", issue.FilePath(), err) } issueFilePath = absPath } @@ -46,7 +45,7 @@ func (p Cgo) Process(issues []result.Issue) ([]result.Issue, error) { return false, nil } - if filepath.Base(i.FilePath()) == "_cgo_gotypes.go" { + if filepath.Base(issue.FilePath()) == "_cgo_gotypes.go" { // skip cgo warning for go1.10 return false, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go index fc4aba4b93..d607b02182 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "strings" @@ -13,21 +12,25 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +const envGolangciDiffProcessorPatch = "GOLANGCI_DIFF_PROCESSOR_PATCH" + type Diff struct { onlyNew bool fromRev string patchFilePath string + wholeFiles bool patch string } var _ Processor = Diff{} -func NewDiff(onlyNew bool, fromRev, patchFilePath string) *Diff { +func NewDiff(onlyNew bool, fromRev, patchFilePath string, wholeFiles bool) *Diff { return &Diff{ onlyNew: onlyNew, fromRev: fromRev, patchFilePath: patchFilePath, - patch: os.Getenv("GOLANGCI_DIFF_PROCESSOR_PATCH"), + wholeFiles: wholeFiles, + patch: os.Getenv(envGolangciDiffProcessorPatch), } } @@ -42,9 +45,9 @@ func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { var patchReader io.Reader if p.patchFilePath != "" { - patch, err := ioutil.ReadFile(p.patchFilePath) + patch, err := os.ReadFile(p.patchFilePath) if err != nil { - return nil, fmt.Errorf("can't read from patch file %s: %s", p.patchFilePath, err) + return nil, fmt.Errorf("can't read from patch file %s: %w", p.patchFilePath, err) } patchReader = bytes.NewReader(patch) } else if p.patch != "" { @@ -54,20 +57,21 @@ func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { c := revgrep.Checker{ Patch: patchReader, RevisionFrom: p.fromRev, + WholeFiles: p.wholeFiles, } if err := c.Prepare(); err != nil { - return nil, fmt.Errorf("can't prepare diff by revgrep: %s", err) + return nil, fmt.Errorf("can't prepare diff by revgrep: %w", err) } - return transformIssues(issues, func(i *result.Issue) *result.Issue { - hunkPos, isNew := c.IsNewIssue(i) + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + hunkPos, isNew := c.IsNewIssue(issue) if !isNew { return nil } - newI := *i - newI.HunkPos = hunkPos - return &newI + newIssue := *issue + newIssue.HunkPos = hunkPos + return &newIssue }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go index 92959a328c..05a56ef965 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go @@ -6,24 +6,37 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +var _ Processor = Exclude{} + type Exclude struct { + name string + pattern *regexp.Regexp } -var _ Processor = Exclude{} +type ExcludeOptions struct { + Pattern string + CaseSensitive bool +} -func NewExclude(pattern string) *Exclude { - var patternRe *regexp.Regexp - if pattern != "" { - patternRe = regexp.MustCompile("(?i)" + pattern) +func NewExclude(opts ExcludeOptions) *Exclude { + p := &Exclude{name: "exclude"} + + prefix := caseInsensitivePrefix + if opts.CaseSensitive { + p.name = "exclude-case-sensitive" + prefix = "" } - return &Exclude{ - pattern: patternRe, + + if opts.Pattern != "" { + p.pattern = regexp.MustCompile(prefix + opts.Pattern) } + + return p } func (p Exclude) Name() string { - return "exclude" + return p.name } func (p Exclude) Process(issues []result.Issue) ([]result.Issue, error) { @@ -31,29 +44,9 @@ func (p Exclude) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - return filterIssues(issues, func(i *result.Issue) bool { - return !p.pattern.MatchString(i.Text) + return filterIssues(issues, func(issue *result.Issue) bool { + return !p.pattern.MatchString(issue.Text) }), nil } func (p Exclude) Finish() {} - -type ExcludeCaseSensitive struct { - *Exclude -} - -var _ Processor = ExcludeCaseSensitive{} - -func NewExcludeCaseSensitive(pattern string) *ExcludeCaseSensitive { - var patternRe *regexp.Regexp - if pattern != "" { - patternRe = regexp.MustCompile(pattern) - } - return &ExcludeCaseSensitive{ - &Exclude{pattern: patternRe}, - } -} - -func (p ExcludeCaseSensitive) Name() string { - return "exclude-case-sensitive" -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go index d4d6569f4c..a20d56d05d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go @@ -8,6 +8,8 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +var _ Processor = ExcludeRules{} + type excludeRule struct { baseRule } @@ -17,48 +19,45 @@ type ExcludeRule struct { } type ExcludeRules struct { - rules []excludeRule - lineCache *fsutils.LineCache - log logutils.Log -} + name string -func NewExcludeRules(rules []ExcludeRule, lineCache *fsutils.LineCache, log logutils.Log) *ExcludeRules { - r := &ExcludeRules{ - lineCache: lineCache, - log: log, - } - r.rules = createRules(rules, "(?i)") + log logutils.Log + files *fsutils.Files - return r + rules []excludeRule } -func createRules(rules []ExcludeRule, prefix string) []excludeRule { - parsedRules := make([]excludeRule, 0, len(rules)) - for _, rule := range rules { - parsedRule := excludeRule{} - parsedRule.linters = rule.Linters - if rule.Text != "" { - parsedRule.text = regexp.MustCompile(prefix + rule.Text) - } - if rule.Source != "" { - parsedRule.source = regexp.MustCompile(prefix + rule.Source) - } - if rule.Path != "" { - parsedRule.path = regexp.MustCompile(rule.Path) - } - parsedRules = append(parsedRules, parsedRule) +type ExcludeRulesOptions struct { + Rules []ExcludeRule + CaseSensitive bool +} + +func NewExcludeRules(log logutils.Log, files *fsutils.Files, opts ExcludeRulesOptions) *ExcludeRules { + p := &ExcludeRules{ + name: "exclude-rules", + files: files, + log: log, } - return parsedRules + + prefix := caseInsensitivePrefix + if opts.CaseSensitive { + prefix = "" + p.name = "exclude-rules-case-sensitive" + } + + p.rules = createRules(opts.Rules, prefix) + + return p } func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) { if len(p.rules) == 0 { return issues, nil } - return filterIssues(issues, func(i *result.Issue) bool { + return filterIssues(issues, func(issue *result.Issue) bool { for _, rule := range p.rules { rule := rule - if rule.match(i, p.lineCache, p.log) { + if rule.match(issue, p.files, p.log) { return false } } @@ -66,25 +65,35 @@ func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) { }), nil } -func (ExcludeRules) Name() string { return "exclude-rules" } -func (ExcludeRules) Finish() {} +func (p ExcludeRules) Name() string { return p.name } -var _ Processor = ExcludeRules{} +func (ExcludeRules) Finish() {} -type ExcludeRulesCaseSensitive struct { - *ExcludeRules -} +func createRules(rules []ExcludeRule, prefix string) []excludeRule { + parsedRules := make([]excludeRule, 0, len(rules)) -func NewExcludeRulesCaseSensitive(rules []ExcludeRule, lineCache *fsutils.LineCache, log logutils.Log) *ExcludeRulesCaseSensitive { - r := &ExcludeRules{ - lineCache: lineCache, - log: log, - } - r.rules = createRules(rules, "") + for _, rule := range rules { + parsedRule := excludeRule{} + parsedRule.linters = rule.Linters - return &ExcludeRulesCaseSensitive{r} -} + if rule.Text != "" { + parsedRule.text = regexp.MustCompile(prefix + rule.Text) + } -func (ExcludeRulesCaseSensitive) Name() string { return "exclude-rules-case-sensitive" } + if rule.Source != "" { + parsedRule.source = regexp.MustCompile(prefix + rule.Source) + } + + if rule.Path != "" { + parsedRule.path = regexp.MustCompile(fsutils.NormalizePathInRegex(rule.Path)) + } + + if rule.PathExcept != "" { + parsedRule.pathExcept = regexp.MustCompile(fsutils.NormalizePathInRegex(rule.PathExcept)) + } + + parsedRules = append(parsedRules, parsedRule) + } -var _ Processor = ExcludeCaseSensitive{} + return parsedRules +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go index 96540245b3..adf82c823c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go @@ -97,35 +97,35 @@ func NewFilenameUnadjuster(pkgs []*packages.Package, log logutils.Log) *Filename } } -func (p FilenameUnadjuster) Name() string { +func (p *FilenameUnadjuster) Name() string { return "filename_unadjuster" } func (p *FilenameUnadjuster) Process(issues []result.Issue) ([]result.Issue, error) { - return transformIssues(issues, func(i *result.Issue) *result.Issue { - issueFilePath := i.FilePath() - if !filepath.IsAbs(i.FilePath()) { - absPath, err := filepath.Abs(i.FilePath()) + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + issueFilePath := issue.FilePath() + if !filepath.IsAbs(issue.FilePath()) { + absPath, err := filepath.Abs(issue.FilePath()) if err != nil { - p.log.Warnf("failed to build abs path for %q: %s", i.FilePath(), err) - return i + p.log.Warnf("failed to build abs path for %q: %s", issue.FilePath(), err) + return issue } issueFilePath = absPath } mapper := p.m[issueFilePath] if mapper == nil { - return i + return issue } - newI := *i - newI.Pos = mapper(i.Pos) - if !p.loggedUnadjustments[i.Pos.Filename] { - p.log.Infof("Unadjusted from %v to %v", i.Pos, newI.Pos) - p.loggedUnadjustments[i.Pos.Filename] = true + newIssue := *issue + newIssue.Pos = mapper(issue.Pos) + if !p.loggedUnadjustments[issue.Pos.Filename] { + p.log.Infof("Unadjusted from %v to %v", issue.Pos, newIssue.Pos) + p.loggedUnadjustments[issue.Pos.Filename] = true } - return &newI + return &newIssue }), nil } -func (FilenameUnadjuster) Finish() {} +func (p *FilenameUnadjuster) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index 17f519e32b..2879beb48f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -8,8 +8,7 @@ import ( "sort" "strings" - "github.com/pkg/errors" - + "github.com/golangci/golangci-lint/internal/robustio" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/logutils" @@ -17,6 +16,8 @@ import ( "github.com/golangci/golangci-lint/pkg/timeutils" ) +var _ Processor = Fixer{} + type Fixer struct { cfg *config.Config log logutils.Log @@ -37,9 +38,9 @@ func (f Fixer) printStat() { f.sw.PrintStages() } -func (f Fixer) Process(issues []result.Issue) []result.Issue { +func (f Fixer) Process(issues []result.Issue) ([]result.Issue, error) { if !f.cfg.Issues.NeedFix { - return issues + return issues, nil } outIssues := make([]result.Issue, 0, len(issues)) @@ -68,22 +69,28 @@ func (f Fixer) Process(issues []result.Issue) []result.Issue { } f.printStat() - return outIssues + return outIssues, nil +} + +func (f Fixer) Name() string { + return "fixer" } +func (f Fixer) Finish() {} + func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { // TODO: don't read the whole file into memory: read line by line; // can't just use bufio.scanner: it has a line length limit origFileData, err := f.fileCache.GetFileBytes(filePath) if err != nil { - return errors.Wrapf(err, "failed to get file bytes for %s", filePath) + return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err) } origFileLines := bytes.Split(origFileData, []byte("\n")) tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) tmpOutFile, err := os.Create(tmpFileName) if err != nil { - return errors.Wrapf(err, "failed to make file %s", tmpFileName) + return fmt.Errorf("failed to make file %s: %w", tmpFileName, err) } // merge multiple issues per line into one issue @@ -104,14 +111,14 @@ func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { if err = f.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { tmpOutFile.Close() - os.Remove(tmpOutFile.Name()) + _ = robustio.RemoveAll(tmpOutFile.Name()) return err } tmpOutFile.Close() - if err = os.Rename(tmpOutFile.Name(), filePath); err != nil { - os.Remove(tmpOutFile.Name()) - return errors.Wrapf(err, "failed to rename %s -> %s", tmpOutFile.Name(), filePath) + if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil { + _ = robustio.RemoveAll(tmpOutFile.Name()) + return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err) } return nil @@ -126,20 +133,22 @@ func (f Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileL // check issues first for ind := range lineIssues { - i := &lineIssues[ind] - if i.LineRange != nil { + li := &lineIssues[ind] + + if li.LineRange != nil { f.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) return &lineIssues[0] } - r := i.Replacement - if r.Inline == nil || len(r.NewLines) != 0 || r.NeedOnlyDelete { + inline := li.Replacement.Inline + + if inline == nil || len(li.Replacement.NewLines) != 0 || li.Replacement.NeedOnlyDelete { f.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues) - return &lineIssues[0] + return li } - if r.Inline.StartCol < 0 || r.Inline.Length <= 0 || r.Inline.StartCol+r.Inline.Length > len(origLine) { - f.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, i, r.Inline) + if inline.StartCol < 0 || inline.Length <= 0 || inline.StartCol+inline.Length > len(origLine) { + f.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, li, inline) return nil } } @@ -155,7 +164,7 @@ func (f Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, line var newLineBuf bytes.Buffer newLineBuf.Grow(len(origLine)) - //nolint:misspell + //nolint:misspell // misspelling is intentional // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" curOrigLinePos := 0 @@ -240,7 +249,7 @@ func (f Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmp outLine += "\n" } if _, err := tmpOutFile.WriteString(outLine); err != nil { - return errors.Wrap(err, "failed to write output line") + return fmt.Errorf("failed to write output line: %w", err) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go index 5cc4e56ba2..1975f6d086 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go @@ -31,27 +31,39 @@ var replacePatterns = []replacePattern{ {`^composites: (\S+) composite literal uses unkeyed fields$`, "composites: `${1}` composite literal uses unkeyed fields"}, // gosec - {`^(\S+): Blacklisted import (\S+): weak cryptographic primitive$`, - "${1}: Blacklisted import `${2}`: weak cryptographic primitive"}, + { + `^(\S+): Blacklisted import (\S+): weak cryptographic primitive$`, + "${1}: Blacklisted import `${2}`: weak cryptographic primitive", + }, {`^TLS InsecureSkipVerify set true.$`, "TLS `InsecureSkipVerify` set true."}, // gosimple {`should replace loop with (.*)$`, "should replace loop with `${1}`"}, - {`should use a simple channel send/receive instead of select with a single case`, - "should use a simple channel send/receive instead of `select` with a single case"}, - {`should omit comparison to bool constant, can be simplified to (.+)$`, - "should omit comparison to bool constant, can be simplified to `${1}`"}, + { + `should use a simple channel send/receive instead of select with a single case`, + "should use a simple channel send/receive instead of `select` with a single case", + }, + { + `should omit comparison to bool constant, can be simplified to (.+)$`, + "should omit comparison to bool constant, can be simplified to `${1}`", + }, {`should write (.+) instead of (.+)$`, "should write `${1}` instead of `${2}`"}, {`redundant return statement$`, "redundant `return` statement"}, - {`should replace this if statement with an unconditional strings.TrimPrefix`, - "should replace this `if` statement with an unconditional `strings.TrimPrefix`"}, + { + `should replace this if statement with an unconditional strings.TrimPrefix`, + "should replace this `if` statement with an unconditional `strings.TrimPrefix`", + }, // staticcheck {`this value of (\S+) is never used$`, "this value of `${1}` is never used"}, - {`should use time.Since instead of time.Now\(\).Sub$`, - "should use `time.Since` instead of `time.Now().Sub`"}, - {`should check returned error before deferring response.Close\(\)$`, - "should check returned error before deferring `response.Close()`"}, + { + `should use time.Since instead of time.Now\(\).Sub$`, + "should use `time.Since` instead of `time.Now().Sub`", + }, + { + `should check returned error before deferring response.Close\(\)$`, + "should check returned error before deferring `response.Close()`", + }, {`no value of type uint is less than 0$`, "no value of type `uint` is less than `0`"}, // unused @@ -59,26 +71,40 @@ var replacePatterns = []replacePattern{ // typecheck {`^unknown field (\S+) in struct literal$`, "unknown field `${1}` in struct literal"}, - {`^invalid operation: (\S+) \(variable of type (\S+)\) has no field or method (\S+)$`, - "invalid operation: `${1}` (variable of type `${2}`) has no field or method `${3}`"}, + { + `^invalid operation: (\S+) \(variable of type (\S+)\) has no field or method (\S+)$`, + "invalid operation: `${1}` (variable of type `${2}`) has no field or method `${3}`", + }, {`^undeclared name: (\S+)$`, "undeclared name: `${1}`"}, - {`^cannot use addr \(variable of type (\S+)\) as (\S+) value in argument to (\S+)$`, - "cannot use addr (variable of type `${1}`) as `${2}` value in argument to `${3}`"}, + { + `^cannot use addr \(variable of type (\S+)\) as (\S+) value in argument to (\S+)$`, + "cannot use addr (variable of type `${1}`) as `${2}` value in argument to `${3}`", + }, {`^other declaration of (\S+)$`, "other declaration of `${1}`"}, {`^(\S+) redeclared in this block$`, "`${1}` redeclared in this block"}, // golint - {`^exported (type|method|function|var|const) (\S+) should have comment or be unexported$`, - "exported ${1} `${2}` should have comment or be unexported"}, - {`^comment on exported (type|method|function|var|const) (\S+) should be of the form "(\S+) ..."$`, - "comment on exported ${1} `${2}` should be of the form `${3} ...`"}, + { + `^exported (type|method|function|var|const) (\S+) should have comment or be unexported$`, + "exported ${1} `${2}` should have comment or be unexported", + }, + { + `^comment on exported (type|method|function|var|const) (\S+) should be of the form "(\S+) ..."$`, + "comment on exported ${1} `${2}` should be of the form `${3} ...`", + }, {`^should replace (.+) with (.+)$`, "should replace `${1}` with `${2}`"}, - {`^if block ends with a return statement, so drop this else and outdent its block$`, - "`if` block ends with a `return` statement, so drop this `else` and outdent its block"}, - {`^(struct field|var|range var|const|type|(?:func|method|interface method) (?:parameter|result)) (\S+) should be (\S+)$`, - "${1} `${2}` should be `${3}`"}, - {`^don't use underscores in Go names; var (\S+) should be (\S+)$`, - "don't use underscores in Go names; var `${1}` should be `${2}`"}, + { + `^if block ends with a return statement, so drop this else and outdent its block$`, + "`if` block ends with a `return` statement, so drop this `else` and outdent its block", + }, + { + `^(struct field|var|range var|const|type|(?:func|method|interface method) (?:parameter|result)) (\S+) should be (\S+)$`, + "${1} `${2}` should be `${3}`", + }, + { + `^don't use underscores in Go names; var (\S+) should be (\S+)$`, + "don't use underscores in Go names; var `${1}` should be `${2}`", + }, } type IdentifierMarker struct { @@ -101,10 +127,10 @@ func NewIdentifierMarker() *IdentifierMarker { } func (im IdentifierMarker) Process(issues []result.Issue) ([]result.Issue, error) { - return transformIssues(issues, func(i *result.Issue) *result.Issue { - iCopy := *i - iCopy.Text = im.markIdentifiers(iCopy.Text) - return &iCopy + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + newIssue := *issue + newIssue.Text = im.markIdentifiers(newIssue.Text) + return &newIssue }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go new file mode 100644 index 0000000000..a8cfef8920 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go @@ -0,0 +1,54 @@ +package processors + +import ( + "path/filepath" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var _ Processor = InvalidIssue{} + +type InvalidIssue struct { + log logutils.Log +} + +func NewInvalidIssue(log logutils.Log) *InvalidIssue { + return &InvalidIssue{log: log} +} + +func (p InvalidIssue) Process(issues []result.Issue) ([]result.Issue, error) { + return filterIssuesErr(issues, p.shouldPassIssue) +} + +func (p InvalidIssue) Name() string { + return "invalid_issue" +} + +func (p InvalidIssue) Finish() {} + +func (p InvalidIssue) shouldPassIssue(issue *result.Issue) (bool, error) { + if issue.FromLinter == "typecheck" { + return true, nil + } + + if issue.FilePath() == "" { + // contextcheck has a known bug https://github.com/kkHAIKE/contextcheck/issues/21 + if issue.FromLinter != "contextcheck" { + p.log.Warnf("no file path for the issue: probably a bug inside the linter %q: %#v", issue.FromLinter, issue) + } + + return false, nil + } + + if filepath.Base(issue.FilePath()) == "go.mod" { + return true, nil + } + + if !isGoFile(issue.FilePath()) { + p.log.Infof("issue related to file %s is skipped", issue.FilePath()) + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/issues.go new file mode 100644 index 0000000000..a65b0c2b0c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/issues.go @@ -0,0 +1,46 @@ +package processors + +import ( + "fmt" + + "github.com/golangci/golangci-lint/pkg/result" +) + +func filterIssues(issues []result.Issue, filter func(issue *result.Issue) bool) []result.Issue { + retIssues := make([]result.Issue, 0, len(issues)) + for i := range issues { + if filter(&issues[i]) { + retIssues = append(retIssues, issues[i]) + } + } + + return retIssues +} + +func filterIssuesErr(issues []result.Issue, filter func(issue *result.Issue) (bool, error)) ([]result.Issue, error) { + retIssues := make([]result.Issue, 0, len(issues)) + for i := range issues { + ok, err := filter(&issues[i]) + if err != nil { + return nil, fmt.Errorf("can't filter issue %#v: %w", issues[i], err) + } + + if ok { + retIssues = append(retIssues, issues[i]) + } + } + + return retIssues, nil +} + +func transformIssues(issues []result.Issue, transform func(issue *result.Issue) *result.Issue) []result.Issue { + retIssues := make([]result.Issue, 0, len(issues)) + for i := range issues { + newIssue := transform(&issues[i]) + if newIssue != nil { + retIssues = append(retIssues, *newIssue) + } + } + + return retIssues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go index c58666c566..65b04272ba 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -24,7 +24,7 @@ func NewMaxFromLinter(limit int, log logutils.Log, cfg *config.Config) *MaxFromL } } -func (p MaxFromLinter) Name() string { +func (p *MaxFromLinter) Name() string { return "max_from_linter" } @@ -33,18 +33,18 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - return filterIssues(issues, func(i *result.Issue) bool { - if i.Replacement != nil && p.cfg.Issues.NeedFix { + return filterIssues(issues, func(issue *result.Issue) bool { + if issue.Replacement != nil && p.cfg.Issues.NeedFix { // we need to fix all issues at once => we need to return all of them return true } - p.lc[i.FromLinter]++ // always inc for stat - return p.lc[i.FromLinter] <= p.limit + p.lc[issue.FromLinter]++ // always inc for stat + return p.lc[issue.FromLinter] <= p.limit }), nil } -func (p MaxFromLinter) Finish() { +func (p *MaxFromLinter) Finish() { walkStringToIntMapSortedByValue(p.lc, func(linter string, count int) { if count > p.limit { p.log.Infof("%d/%d issues from linter %s were hidden, use --max-issues-per-linter", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go index e36446c9fc..372f40cc58 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go @@ -5,8 +5,10 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -type linterToCountMap map[string]int -type fileToLinterToCountMap map[string]linterToCountMap +type ( + linterToCountMap map[string]int + fileToLinterToCountMap map[string]linterToCountMap +) type MaxPerFileFromLinter struct { flc fileToLinterToCountMap @@ -31,29 +33,29 @@ func NewMaxPerFileFromLinter(cfg *config.Config) *MaxPerFileFromLinter { } } -func (p MaxPerFileFromLinter) Name() string { +func (p *MaxPerFileFromLinter) Name() string { return "max_per_file_from_linter" } func (p *MaxPerFileFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { - return filterIssues(issues, func(i *result.Issue) bool { - limit := p.maxPerFileFromLinterConfig[i.FromLinter] + return filterIssues(issues, func(issue *result.Issue) bool { + limit := p.maxPerFileFromLinterConfig[issue.FromLinter] if limit == 0 { return true } - lm := p.flc[i.FilePath()] + lm := p.flc[issue.FilePath()] if lm == nil { - p.flc[i.FilePath()] = linterToCountMap{} + p.flc[issue.FilePath()] = linterToCountMap{} } - count := p.flc[i.FilePath()][i.FromLinter] + count := p.flc[issue.FilePath()][issue.FromLinter] if count >= limit { return false } - p.flc[i.FilePath()][i.FromLinter]++ + p.flc[issue.FilePath()][issue.FromLinter]++ return true }), nil } -func (p MaxPerFileFromLinter) Finish() {} +func (p *MaxPerFileFromLinter) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go index 84fdf0c053..a3ceeb5953 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -28,7 +28,7 @@ func NewMaxSameIssues(limit int, log logutils.Log, cfg *config.Config) *MaxSameI } } -func (MaxSameIssues) Name() string { +func (p *MaxSameIssues) Name() string { return "max_same_issues" } @@ -37,18 +37,18 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - return filterIssues(issues, func(i *result.Issue) bool { - if i.Replacement != nil && p.cfg.Issues.NeedFix { + return filterIssues(issues, func(issue *result.Issue) bool { + if issue.Replacement != nil && p.cfg.Issues.NeedFix { // we need to fix all issues at once => we need to return all of them return true } - p.tc[i.Text]++ // always inc for stat - return p.tc[i.Text] <= p.limit + p.tc[issue.Text]++ // always inc for stat + return p.tc[issue.Text] <= p.limit }), nil } -func (p MaxSameIssues) Finish() { +func (p *MaxSameIssues) Finish() { walkStringToIntMapSortedByValue(p.tc, func(text string, count int) { if count > p.limit { p.log.Infof("%d/%d issues with text %q were hidden, use --max-same-issues", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go index 0788a7160e..df8e814959 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go @@ -1,7 +1,6 @@ package processors import ( - "fmt" "go/ast" "go/parser" "go/token" @@ -9,6 +8,8 @@ import ( "sort" "strings" + "golang.org/x/exp/maps" + "github.com/golangci/golangci-lint/pkg/golinters" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -16,7 +17,10 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -var nolintDebugf = logutils.Debug("nolint") +var ( + nolintDebugf = logutils.Debug(logutils.DebugKeyNolint) + nolintRe = regexp.MustCompile(`^nolint( |:|$)`) +) type ignoredRange struct { linters []string @@ -32,7 +36,7 @@ func (i *ignoredRange) doesMatch(issue *result.Issue) bool { } // only allow selective nolinting of nolintlint - nolintFoundForLinter := len(i.linters) == 0 && issue.FromLinter != golinters.NolintlintName + nolintFoundForLinter := len(i.linters) == 0 && issue.FromLinter != golinters.NoLintLintName for _, linterName := range i.linters { if linterName == issue.FromLinter { @@ -46,8 +50,8 @@ func (i *ignoredRange) doesMatch(issue *result.Issue) bool { } // handle possible unused nolint directives - // nolintlint generates potential issues for every nolint directive and they are filtered out here - if issue.FromLinter == golinters.NolintlintName && issue.ExpectNoLint { + // nolintlint generates potential issues for every nolint directive, and they are filtered out here + if issue.FromLinter == golinters.NoLintLintName && issue.ExpectNoLint { if issue.ExpectedNoLintLinter != "" { return i.matchedIssueFromLinter[issue.ExpectedNoLintLinter] } @@ -84,7 +88,7 @@ func NewNolint(log logutils.Log, dbManager *lintersdb.Manager, enabledLinters ma var _ Processor = &Nolint{} -func (p Nolint) Name() string { +func (p *Nolint) Name() string { return "nolint" } @@ -94,33 +98,31 @@ func (p *Nolint) Process(issues []result.Issue) ([]result.Issue, error) { return filterIssuesErr(issues, p.shouldPassIssue) } -func (p *Nolint) getOrCreateFileData(i *result.Issue) (*fileData, error) { - fd := p.cache[i.FilePath()] +func (p *Nolint) getOrCreateFileData(issue *result.Issue) *fileData { + fd := p.cache[issue.FilePath()] if fd != nil { - return fd, nil + return fd } fd = &fileData{} - p.cache[i.FilePath()] = fd - - if i.FilePath() == "" { - return nil, fmt.Errorf("no file path for issue") - } + p.cache[issue.FilePath()] = fd // TODO: migrate this parsing to go/analysis facts // or cache them somehow per file. // Don't use cached AST because they consume a lot of memory on large projects. fset := token.NewFileSet() - f, err := parser.ParseFile(fset, i.FilePath(), nil, parser.ParseComments) + f, err := parser.ParseFile(fset, issue.FilePath(), nil, parser.ParseComments) if err != nil { // Don't report error because it's already must be reporter by typecheck or go/analysis. - return fd, nil + return fd } - fd.ignoredRanges = p.buildIgnoredRangesForFile(f, fset, i.FilePath()) - nolintDebugf("file %s: built nolint ranges are %+v", i.FilePath(), fd.ignoredRanges) - return fd, nil + fd.ignoredRanges = p.buildIgnoredRangesForFile(f, fset, issue.FilePath()) + + nolintDebugf("file %s: built nolint ranges are %+v", issue.FilePath(), fd.ignoredRanges) + + return fd } func (p *Nolint) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, filePath string) []ignoredRange { @@ -145,28 +147,25 @@ func (p *Nolint) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, fil return allRanges } -func (p *Nolint) shouldPassIssue(i *result.Issue) (bool, error) { - nolintDebugf("got issue: %v", *i) - if i.FromLinter == golinters.NolintlintName && i.ExpectNoLint && i.ExpectedNoLintLinter != "" { +func (p *Nolint) shouldPassIssue(issue *result.Issue) (bool, error) { + nolintDebugf("got issue: %v", *issue) + if issue.FromLinter == golinters.NoLintLintName && issue.ExpectNoLint && issue.ExpectedNoLintLinter != "" { // don't expect disabled linters to cover their nolint statements nolintDebugf("enabled linters: %v", p.enabledLinters) - if p.enabledLinters[i.ExpectedNoLintLinter] == nil { + if p.enabledLinters[issue.ExpectedNoLintLinter] == nil { return false, nil } - nolintDebugf("checking that lint issue was used for %s: %v", i.ExpectedNoLintLinter, i) + nolintDebugf("checking that lint issue was used for %s: %v", issue.ExpectedNoLintLinter, issue) } - fd, err := p.getOrCreateFileData(i) - if err != nil { - return false, err - } + fd := p.getOrCreateFileData(issue) for _, ir := range fd.ignoredRanges { - if ir.doesMatch(i) { - nolintDebugf("found ignored range for issue %v: %v", i, ir) - ir.matchedIssueFromLinter[i.FromLinter] = true + if ir.doesMatch(issue) { + nolintDebugf("found ignored range for issue %v: %v", issue, ir) + ir.matchedIssueFromLinter[issue.FromLinter] = true if ir.originalRange != nil { - ir.originalRange.matchedIssueFromLinter[i.FromLinter] = true + ir.originalRange.matchedIssueFromLinter[issue.FromLinter] = true } return false, nil } @@ -234,7 +233,7 @@ func (p *Nolint) extractFileCommentsInlineRanges(fset *token.FileSet, comments . func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *token.FileSet) *ignoredRange { text = strings.TrimLeft(text, "/ ") - if ok, _ := regexp.MatchString(`^nolint( |:|$)`, text); !ok { + if !nolintRe.MatchString(text) { return nil } @@ -251,7 +250,7 @@ func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *to } } - if !strings.HasPrefix(text, "nolint:") { + if strings.HasPrefix(text, "nolint:all") || !strings.HasPrefix(text, "nolint:") { return buildRange(nil) // ignore all linters } @@ -259,8 +258,12 @@ func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *to var linters []string text = strings.Split(text, "//")[0] // allow another comment after this comment linterItems := strings.Split(strings.TrimPrefix(text, "nolint:"), ",") - for _, linter := range linterItems { - linterName := strings.ToLower(strings.TrimSpace(linter)) + for _, item := range linterItems { + linterName := strings.ToLower(strings.TrimSpace(item)) + if linterName == "all" { + p.unknownLintersSet = map[string]bool{} + return buildRange(nil) + } lcs := p.dbManager.GetLinterConfigs(linterName) if lcs == nil { @@ -279,15 +282,12 @@ func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *to return buildRange(linters) } -func (p Nolint) Finish() { +func (p *Nolint) Finish() { if len(p.unknownLintersSet) == 0 { return } - unknownLinters := []string{} - for name := range p.unknownLintersSet { - unknownLinters = append(unknownLinters, name) - } + unknownLinters := maps.Keys(p.unknownLintersSet) sort.Strings(unknownLinters) p.log.Warnf("Found unknown linters in //nolint directives: %s", strings.Join(unknownLinters, ", ")) @@ -301,7 +301,7 @@ func (issues sortWithNolintlintLast) Len() int { } func (issues sortWithNolintlintLast) Less(i, j int) bool { - return issues[i].FromLinter != golinters.NolintlintName && issues[j].FromLinter == golinters.NolintlintName + return issues[i].FromLinter != golinters.NoLintLintName && issues[j].FromLinter == golinters.NoLintLintName } func (issues sortWithNolintlintLast) Swap(i, j int) { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go index 5ce940b39b..f6b885011b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go @@ -1,8 +1,7 @@ package processors import ( - "path" - + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -27,7 +26,7 @@ func (*PathPrefixer) Name() string { func (p *PathPrefixer) Process(issues []result.Issue) ([]result.Issue, error) { if p.prefix != "" { for i := range issues { - issues[i].Pos.Filename = path.Join(p.prefix, issues[i].Pos.Filename) + issues[i].Pos.Filename = fsutils.WithPathPrefix(p.prefix, issues[i].Pos.Filename) } } return issues, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go index 3a140999c0..79cdd7473c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go @@ -29,19 +29,19 @@ func (p PathPrettifier) Name() string { } func (p PathPrettifier) Process(issues []result.Issue) ([]result.Issue, error) { - return transformIssues(issues, func(i *result.Issue) *result.Issue { - if !filepath.IsAbs(i.FilePath()) { - return i + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + if !filepath.IsAbs(issue.FilePath()) { + return issue } - rel, err := fsutils.ShortestRelPath(i.FilePath(), "") + rel, err := fsutils.ShortestRelPath(issue.FilePath(), "") if err != nil { - return i + return issue } - newI := i - newI.Pos.Filename = rel - return newI + newIssue := issue + newIssue.Pos.Filename = rel + return newIssue }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go index 484f7f1f11..d7fa5ea91d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go @@ -29,11 +29,11 @@ func (p PathShortener) Name() string { } func (p PathShortener) Process(issues []result.Issue) ([]result.Issue, error) { - return transformIssues(issues, func(i *result.Issue) *result.Issue { - newI := i - newI.Text = strings.Replace(newI.Text, p.wd+"/", "", -1) - newI.Text = strings.Replace(newI.Text, p.wd, "", -1) - return newI + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + newIssue := issue + newIssue.Text = strings.ReplaceAll(newIssue.Text, p.wd+"/", "") + newIssue.Text = strings.ReplaceAll(newIssue.Text, p.wd, "") + return newIssue }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go new file mode 100644 index 0000000000..2568ba45c4 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go @@ -0,0 +1,126 @@ +package processors + +import ( + "regexp" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +const severityFromLinter = "@linter" + +var _ Processor = &Severity{} + +type severityRule struct { + baseRule + severity string +} + +type SeverityRule struct { + BaseRule + Severity string +} + +type SeverityOptions struct { + Default string + Rules []SeverityRule + CaseSensitive bool +} + +type Severity struct { + name string + + log logutils.Log + + files *fsutils.Files + + defaultSeverity string + rules []severityRule +} + +func NewSeverity(log logutils.Log, files *fsutils.Files, opts SeverityOptions) *Severity { + p := &Severity{ + name: "severity-rules", + files: files, + log: log, + defaultSeverity: opts.Default, + } + + prefix := caseInsensitivePrefix + if opts.CaseSensitive { + prefix = "" + p.name = "severity-rules-case-sensitive" + } + + p.rules = createSeverityRules(opts.Rules, prefix) + + return p +} + +func (p *Severity) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.rules) == 0 && p.defaultSeverity == "" { + return issues, nil + } + + return transformIssues(issues, p.transform), nil +} + +func (p *Severity) transform(issue *result.Issue) *result.Issue { + for _, rule := range p.rules { + if rule.match(issue, p.files, p.log) { + if rule.severity == severityFromLinter || (rule.severity == "" && p.defaultSeverity == severityFromLinter) { + return issue + } + + issue.Severity = rule.severity + if issue.Severity == "" { + issue.Severity = p.defaultSeverity + } + + return issue + } + } + + if p.defaultSeverity != severityFromLinter { + issue.Severity = p.defaultSeverity + } + + return issue +} + +func (p *Severity) Name() string { return p.name } + +func (*Severity) Finish() {} + +func createSeverityRules(rules []SeverityRule, prefix string) []severityRule { + parsedRules := make([]severityRule, 0, len(rules)) + + for _, rule := range rules { + parsedRule := severityRule{} + parsedRule.linters = rule.Linters + parsedRule.severity = rule.Severity + + if rule.Text != "" { + parsedRule.text = regexp.MustCompile(prefix + rule.Text) + } + + if rule.Source != "" { + parsedRule.source = regexp.MustCompile(prefix + rule.Source) + } + + if rule.Path != "" { + path := fsutils.NormalizePathInRegex(rule.Path) + parsedRule.path = regexp.MustCompile(path) + } + + if rule.PathExcept != "" { + pathExcept := fsutils.NormalizePathInRegex(rule.PathExcept) + parsedRule.pathExcept = regexp.MustCompile(pathExcept) + } + + parsedRules = append(parsedRules, parsedRule) + } + + return parsedRules +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go deleted file mode 100644 index 7c9a4c1d63..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go +++ /dev/null @@ -1,103 +0,0 @@ -package processors - -import ( - "regexp" - - "github.com/golangci/golangci-lint/pkg/fsutils" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" -) - -type severityRule struct { - baseRule - severity string -} - -type SeverityRule struct { - BaseRule - Severity string -} - -type SeverityRules struct { - defaultSeverity string - rules []severityRule - lineCache *fsutils.LineCache - log logutils.Log -} - -func NewSeverityRules(defaultSeverity string, rules []SeverityRule, lineCache *fsutils.LineCache, log logutils.Log) *SeverityRules { - r := &SeverityRules{ - lineCache: lineCache, - log: log, - defaultSeverity: defaultSeverity, - } - r.rules = createSeverityRules(rules, "(?i)") - - return r -} - -func createSeverityRules(rules []SeverityRule, prefix string) []severityRule { - parsedRules := make([]severityRule, 0, len(rules)) - for _, rule := range rules { - parsedRule := severityRule{} - parsedRule.linters = rule.Linters - parsedRule.severity = rule.Severity - if rule.Text != "" { - parsedRule.text = regexp.MustCompile(prefix + rule.Text) - } - if rule.Source != "" { - parsedRule.source = regexp.MustCompile(prefix + rule.Source) - } - if rule.Path != "" { - parsedRule.path = regexp.MustCompile(rule.Path) - } - parsedRules = append(parsedRules, parsedRule) - } - return parsedRules -} - -func (p SeverityRules) Process(issues []result.Issue) ([]result.Issue, error) { - if len(p.rules) == 0 && p.defaultSeverity == "" { - return issues, nil - } - return transformIssues(issues, func(i *result.Issue) *result.Issue { - for _, rule := range p.rules { - rule := rule - - ruleSeverity := p.defaultSeverity - if rule.severity != "" { - ruleSeverity = rule.severity - } - - if rule.match(i, p.lineCache, p.log) { - i.Severity = ruleSeverity - return i - } - } - i.Severity = p.defaultSeverity - return i - }), nil -} - -func (SeverityRules) Name() string { return "severity-rules" } -func (SeverityRules) Finish() {} - -var _ Processor = SeverityRules{} - -type SeverityRulesCaseSensitive struct { - *SeverityRules -} - -func NewSeverityRulesCaseSensitive(defaultSeverity string, rules []SeverityRule, - lineCache *fsutils.LineCache, log logutils.Log) *SeverityRulesCaseSensitive { - r := &SeverityRules{ - lineCache: lineCache, - log: log, - defaultSeverity: defaultSeverity, - } - r.rules = createSeverityRules(rules, "") - - return &SeverityRulesCaseSensitive{r} -} - -func (SeverityRulesCaseSensitive) Name() string { return "severity-rules-case-sensitive" } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go index 6488c109ec..7c4e0b9c0c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go @@ -1,12 +1,12 @@ package processors import ( + "fmt" "path/filepath" "regexp" "strings" - "github.com/pkg/errors" - + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -22,19 +22,20 @@ type SkipDirs struct { skippedDirs map[string]*skipStat absArgsDirs []string skippedDirsCache map[string]bool + pathPrefix string } -var _ Processor = SkipFiles{} +var _ Processor = (*SkipDirs)(nil) const goFileSuffix = ".go" -func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDirs, error) { +func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string, pathPrefix string) (*SkipDirs, error) { var patternsRe []*regexp.Regexp for _, p := range patterns { - p = normalizePathInRegex(p) + p = fsutils.NormalizePathInRegex(p) patternRe, err := regexp.Compile(p) if err != nil { - return nil, errors.Wrapf(err, "can't compile regexp %q", p) + return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) } patternsRe = append(patternsRe, patternRe) } @@ -51,7 +52,7 @@ func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDi absArg, err := filepath.Abs(arg) if err != nil { - return nil, errors.Wrapf(err, "failed to abs-ify arg %q", arg) + return nil, fmt.Errorf("failed to abs-ify arg %q: %w", arg, err) } absArgsDirs = append(absArgsDirs, absArg) } @@ -62,6 +63,7 @@ func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDi skippedDirs: map[string]*skipStat{}, absArgsDirs: absArgsDirs, skippedDirsCache: map[string]bool{}, + pathPrefix: pathPrefix, }, nil } @@ -77,15 +79,15 @@ func (p *SkipDirs) Process(issues []result.Issue) ([]result.Issue, error) { return filterIssues(issues, p.shouldPassIssue), nil } -func (p *SkipDirs) shouldPassIssue(i *result.Issue) bool { - if filepath.IsAbs(i.FilePath()) { - if !isSpecialAutogeneratedFile(i.FilePath()) { - p.log.Warnf("Got abs path %s in skip dirs processor, it should be relative", i.FilePath()) +func (p *SkipDirs) shouldPassIssue(issue *result.Issue) bool { + if filepath.IsAbs(issue.FilePath()) { + if isGoFile(issue.FilePath()) { + p.log.Warnf("Got abs path %s in skip dirs processor, it should be relative", issue.FilePath()) } return true } - issueRelDir := filepath.Dir(i.FilePath()) + issueRelDir := filepath.Dir(issue.FilePath()) if toPass, ok := p.skippedDirsCache[issueRelDir]; ok { if !toPass { @@ -120,8 +122,9 @@ func (p *SkipDirs) shouldPassIssueDirs(issueRelDir, issueAbsDir string) bool { // The alternative solution is to find relative to args path, but it has // disadvantages (https://github.com/golangci/golangci-lint/pull/313). + path := fsutils.WithPathPrefix(p.pathPrefix, issueRelDir) for _, pattern := range p.patterns { - if pattern.MatchString(issueRelDir) { + if pattern.MatchString(path) { ps := pattern.String() if p.skippedDirs[issueRelDir] == nil { p.skippedDirs[issueRelDir] = &skipStat{ diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go index 522b07e4f2..f1873a376c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go @@ -4,28 +4,31 @@ import ( "fmt" "regexp" + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/result" ) type SkipFiles struct { - patterns []*regexp.Regexp + patterns []*regexp.Regexp + pathPrefix string } -var _ Processor = SkipFiles{} +var _ Processor = (*SkipFiles)(nil) -func NewSkipFiles(patterns []string) (*SkipFiles, error) { +func NewSkipFiles(patterns []string, pathPrefix string) (*SkipFiles, error) { var patternsRe []*regexp.Regexp for _, p := range patterns { - p = normalizePathInRegex(p) + p = fsutils.NormalizePathInRegex(p) patternRe, err := regexp.Compile(p) if err != nil { - return nil, fmt.Errorf("can't compile regexp %q: %s", p, err) + return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) } patternsRe = append(patternsRe, patternRe) } return &SkipFiles{ - patterns: patternsRe, + patterns: patternsRe, + pathPrefix: pathPrefix, }, nil } @@ -38,9 +41,10 @@ func (p SkipFiles) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - return filterIssues(issues, func(i *result.Issue) bool { - for _, p := range p.patterns { - if p.MatchString(i.FilePath()) { + return filterIssues(issues, func(issue *result.Issue) bool { + path := fsutils.WithPathPrefix(p.pathPrefix, issue.FilePath()) + for _, pattern := range p.patterns { + if pattern.MatchString(path) { return false } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go index e726c3adfe..8e4af57e63 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go @@ -1,6 +1,9 @@ package processors import ( + "errors" + "fmt" + "slices" "sort" "strings" @@ -9,96 +12,127 @@ import ( ) // Base propose of this functionality to sort results (issues) -// produced by various linters by analyzing code. We achieving this +// produced by various linters by analyzing code. We're achieving this // by sorting results.Issues using processor step, and chain based // rules that can compare different properties of the Issues struct. +const ( + orderNameFile = "file" + orderNameLinter = "linter" + orderNameSeverity = "severity" +) + var _ Processor = (*SortResults)(nil) type SortResults struct { - cmp comparator - cfg *config.Config + cmps map[string]*comparator + + cfg *config.Output } func NewSortResults(cfg *config.Config) *SortResults { - // For sorting we are comparing (in next order): file names, line numbers, - // position, and finally - giving up. return &SortResults{ - cmp: ByName{ - next: ByLine{ - next: ByColumn{}, - }, + cmps: map[string]*comparator{ + // For sorting we are comparing (in next order): + // file names, line numbers, position, and finally - giving up. + orderNameFile: byFileName().SetNext(byLine().SetNext(byColumn())), + // For sorting we are comparing: linter name + orderNameLinter: byLinter(), + // For sorting we are comparing: severity + orderNameSeverity: bySeverity(), }, - cfg: cfg, + cfg: &cfg.Output, } } // Process is performing sorting of the result issues. func (sr SortResults) Process(issues []result.Issue) ([]result.Issue, error) { - if !sr.cfg.Output.SortResults { + if !sr.cfg.SortResults { return issues, nil } + if len(sr.cfg.SortOrder) == 0 { + sr.cfg.SortOrder = []string{orderNameFile} + } + + var cmps []*comparator + for _, name := range sr.cfg.SortOrder { + if c, ok := sr.cmps[name]; ok { + cmps = append(cmps, c) + } else { + return nil, fmt.Errorf("unsupported sort-order name %q", name) + } + } + + cmp, err := mergeComparators(cmps) + if err != nil { + return nil, err + } + sort.Slice(issues, func(i, j int) bool { - return sr.cmp.Compare(&issues[i], &issues[j]) == Less + return cmp.Compare(&issues[i], &issues[j]) == less }) return issues, nil } func (sr SortResults) Name() string { return "sort_results" } -func (sr SortResults) Finish() {} + +func (sr SortResults) Finish() {} type compareResult int const ( - Less compareResult = iota - 1 - Equal - Greater - None + less compareResult = iota - 1 + equal + greater + none ) func (c compareResult) isNeutral() bool { // return true if compare result is incomparable or equal. - return c == None || c == Equal + return c == none || c == equal } -//nolint:exhaustive func (c compareResult) String() string { switch c { - case Less: - return "Less" - case Equal: - return "Equal" - case Greater: - return "Greater" + case less: + return "less" + case equal: + return "equal" + case greater: + return "greater" + default: + return "none" } - - return "None" } -// comparator describe how to implement compare for two "issues" lexicographically -type comparator interface { - Compare(a, b *result.Issue) compareResult - Next() comparator +// comparator describes how to implement compare for two "issues". +type comparator struct { + name string + compare func(a, b *result.Issue) compareResult + next *comparator } -var ( - _ comparator = (*ByName)(nil) - _ comparator = (*ByLine)(nil) - _ comparator = (*ByColumn)(nil) -) +func (cmp *comparator) Next() *comparator { return cmp.next } -type ByName struct{ next comparator } +func (cmp *comparator) SetNext(c *comparator) *comparator { + cmp.next = c + return cmp +} -//nolint:golint -func (cmp ByName) Next() comparator { return cmp.next } +func (cmp *comparator) String() string { + s := cmp.name + if cmp.Next() != nil { + s += " > " + cmp.Next().String() + } -//nolint:golint -func (cmp ByName) Compare(a, b *result.Issue) compareResult { - var res compareResult + return s +} - if res = compareResult(strings.Compare(a.FilePath(), b.FilePath())); !res.isNeutral() { +func (cmp *comparator) Compare(a, b *result.Issue) compareResult { + res := cmp.compare(a, b) + if !res.isNeutral() { return res } @@ -109,44 +143,95 @@ func (cmp ByName) Compare(a, b *result.Issue) compareResult { return res } -type ByLine struct{ next comparator } +func byFileName() *comparator { + return &comparator{ + name: "byFileName", + compare: func(a, b *result.Issue) compareResult { + return compareResult(strings.Compare(a.FilePath(), b.FilePath())) + }, + } +} + +func byLine() *comparator { + return &comparator{ + name: "byLine", + compare: func(a, b *result.Issue) compareResult { + return numericCompare(a.Line(), b.Line()) + }, + } +} + +func byColumn() *comparator { + return &comparator{ + name: "byColumn", + compare: func(a, b *result.Issue) compareResult { + return numericCompare(a.Column(), b.Column()) + }, + } +} -//nolint:golint -func (cmp ByLine) Next() comparator { return cmp.next } +func byLinter() *comparator { + return &comparator{ + name: "byLinter", + compare: func(a, b *result.Issue) compareResult { + return compareResult(strings.Compare(a.FromLinter, b.FromLinter)) + }, + } +} -//nolint:golint -func (cmp ByLine) Compare(a, b *result.Issue) compareResult { - var res compareResult +func bySeverity() *comparator { + return &comparator{ + name: "bySeverity", + compare: func(a, b *result.Issue) compareResult { + return severityCompare(a.Severity, b.Severity) + }, + } +} - if res = numericCompare(a.Line(), b.Line()); !res.isNeutral() { - return res +func mergeComparators(cmps []*comparator) (*comparator, error) { + if len(cmps) == 0 { + return nil, errors.New("no comparator") } - if next := cmp.Next(); next != nil { - return next.Compare(a, b) + for i := 0; i < len(cmps)-1; i++ { + findComparatorTip(cmps[i]).SetNext(cmps[i+1]) } - return res + return cmps[0], nil } -type ByColumn struct{ next comparator } +func findComparatorTip(cmp *comparator) *comparator { + if cmp.Next() != nil { + return findComparatorTip(cmp.Next()) + } -//nolint:golint -func (cmp ByColumn) Next() comparator { return cmp.next } + return cmp +} -//nolint:golint -func (cmp ByColumn) Compare(a, b *result.Issue) compareResult { - var res compareResult +func severityCompare(a, b string) compareResult { + // The position inside the slice define the importance (lower to higher). + classic := []string{"low", "medium", "high", "warning", "error"} + + if slices.Contains(classic, a) && slices.Contains(classic, b) { + switch { + case slices.Index(classic, a) > slices.Index(classic, b): + return greater + case slices.Index(classic, a) < slices.Index(classic, b): + return less + default: + return equal + } + } - if res = numericCompare(a.Column(), b.Column()); !res.isNeutral() { - return res + if slices.Contains(classic, a) { + return greater } - if next := cmp.Next(); next != nil { - return next.Compare(a, b) + if slices.Contains(classic, b) { + return less } - return res + return compareResult(strings.Compare(a, b)) } func numericCompare(a, b int) compareResult { @@ -160,14 +245,14 @@ func numericCompare(a, b int) compareResult { switch { case isZeroValuesBoth || isEqual: - return Equal + return equal case isValuesInvalid || isZeroValueInA || isZeroValueInB: - return None + return none case a > b: - return Greater + return greater case a < b: - return Less + return less } - return Equal + return equal } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go index cfd73cb98e..005b3143f8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go @@ -25,22 +25,22 @@ func (p SourceCode) Name() string { } func (p SourceCode) Process(issues []result.Issue) ([]result.Issue, error) { - return transformIssues(issues, func(i *result.Issue) *result.Issue { - newI := *i + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + newIssue := *issue - lineRange := i.GetLineRange() + lineRange := issue.GetLineRange() for lineNumber := lineRange.From; lineNumber <= lineRange.To; lineNumber++ { - line, err := p.lineCache.GetLine(i.FilePath(), lineNumber) + line, err := p.lineCache.GetLine(issue.FilePath(), lineNumber) if err != nil { p.log.Warnf("Failed to get line %d for file %s: %s", - lineNumber, i.FilePath(), err) - return i + lineNumber, issue.FilePath(), err) + return issue } - newI.SourceLines = append(newI.SourceLines, line) + newIssue.SourceLines = append(newIssue.SourceLines, line) } - return &newI + return &newIssue }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go index 17167dde5d..aad1e019e3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -5,8 +5,10 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -type lineToCount map[int]int -type fileToLineToCount map[string]lineToCount +type ( + lineToCount map[int]int + fileToLineToCount map[string]lineToCount +) type UniqByLine struct { flc fileToLineToCount @@ -22,7 +24,7 @@ func NewUniqByLine(cfg *config.Config) *UniqByLine { var _ Processor = &UniqByLine{} -func (p UniqByLine) Name() string { +func (p *UniqByLine) Name() string { return "uniq_by_line" } @@ -31,28 +33,28 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - return filterIssues(issues, func(i *result.Issue) bool { - if i.Replacement != nil && p.cfg.Issues.NeedFix { + return filterIssues(issues, func(issue *result.Issue) bool { + if issue.Replacement != nil && p.cfg.Issues.NeedFix { // if issue will be auto-fixed we shouldn't collapse issues: // e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them. return true } - lc := p.flc[i.FilePath()] + lc := p.flc[issue.FilePath()] if lc == nil { lc = lineToCount{} - p.flc[i.FilePath()] = lc + p.flc[issue.FilePath()] = lc } const limit = 1 - count := lc[i.Line()] + count := lc[issue.Line()] if count == limit { return false } - lc[i.Line()]++ + lc[issue.Line()]++ return true }), nil } -func (p UniqByLine) Finish() {} +func (p *UniqByLine) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go deleted file mode 100644 index 7108fd3b3c..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go +++ /dev/null @@ -1,62 +0,0 @@ -package processors - -import ( - "path/filepath" - "regexp" - "strings" - - "github.com/pkg/errors" - - "github.com/golangci/golangci-lint/pkg/result" -) - -func filterIssues(issues []result.Issue, filter func(i *result.Issue) bool) []result.Issue { - retIssues := make([]result.Issue, 0, len(issues)) - for i := range issues { - if filter(&issues[i]) { - retIssues = append(retIssues, issues[i]) - } - } - - return retIssues -} - -func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, error)) ([]result.Issue, error) { - retIssues := make([]result.Issue, 0, len(issues)) - for i := range issues { - ok, err := filter(&issues[i]) - if err != nil { - return nil, errors.Wrapf(err, "can't filter issue %#v", issues[i]) - } - - if ok { - retIssues = append(retIssues, issues[i]) - } - } - - return retIssues, nil -} - -func transformIssues(issues []result.Issue, transform func(i *result.Issue) *result.Issue) []result.Issue { - retIssues := make([]result.Issue, 0, len(issues)) - for i := range issues { - newI := transform(&issues[i]) - if newI != nil { - retIssues = append(retIssues, *newI) - } - } - - return retIssues -} - -var separatorToReplace = regexp.QuoteMeta(string(filepath.Separator)) - -func normalizePathInRegex(path string) string { - if filepath.Separator == '/' { - return path - } - - // This replacing should be safe because "/" are disallowed in Windows - // https://docs.microsoft.com/ru-ru/windows/win32/fileio/naming-a-file - return strings.ReplaceAll(path, "/", separatorToReplace) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go b/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go deleted file mode 100644 index cb89e34e0c..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go +++ /dev/null @@ -1,17 +0,0 @@ -package sliceutil - -// IndexOf get the index of the given value in the given string slice, -// or -1 if not found. -func IndexOf(slice []string, value string) int { - for i, v := range slice { - if v == value { - return i - } - } - return -1 -} - -// Contains check if a string slice contains a value. -func Contains(slice []string, value string) bool { - return IndexOf(slice, value) != -1 -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go index 9628bd80f2..d944dea2ea 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go @@ -15,10 +15,10 @@ const noStagesText = "no stages" type Stopwatch struct { name string startedAt time.Time - stages map[string]time.Duration log logutils.Log - sync.Mutex + stages map[string]time.Duration + mu sync.Mutex } func NewStopwatch(name string, log logutils.Log) *Stopwatch { @@ -36,7 +36,7 @@ type stageDuration struct { } func (s *Stopwatch) stageDurationsSorted() []stageDuration { - stageDurations := []stageDuration{} + stageDurations := make([]stageDuration, 0, len(s.stages)) for n, d := range s.stages { stageDurations = append(stageDurations, stageDuration{ name: n, @@ -56,7 +56,7 @@ func (s *Stopwatch) sprintStages() string { stageDurations := s.stageDurationsSorted() - stagesStrings := []string{} + stagesStrings := make([]string, 0, len(stageDurations)) for _, s := range stageDurations { stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) } @@ -71,7 +71,7 @@ func (s *Stopwatch) sprintTopStages(n int) string { stageDurations := s.stageDurationsSorted() - stagesStrings := []string{} + var stagesStrings []string for i := 0; i < len(stageDurations) && i < n; i++ { s := stageDurations[i] stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) @@ -110,7 +110,7 @@ func (s *Stopwatch) TrackStage(name string, f func()) { startedAt := time.Now() f() - s.Lock() + s.mu.Lock() s.stages[name] += time.Since(startedAt) - s.Unlock() + s.mu.Unlock() } diff --git a/vendor/github.com/golangci/lint-1/.travis.yml b/vendor/github.com/golangci/lint-1/.travis.yml deleted file mode 100644 index bc2f4b311e..0000000000 --- a/vendor/github.com/golangci/lint-1/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.10.x - - 1.11.x - - master - -go_import_path: github.com/golangci/lint-1 - -install: - - go get -t -v ./... - -script: - - go test -v -race ./... - -matrix: - allow_failures: - - go: master - fast_finish: true diff --git a/vendor/github.com/golangci/lint-1/CONTRIBUTING.md b/vendor/github.com/golangci/lint-1/CONTRIBUTING.md deleted file mode 100644 index 2e39a1c677..0000000000 --- a/vendor/github.com/golangci/lint-1/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Contributing to Golint - -## Before filing an issue: - -### Are you having trouble building golint? - -Check you have the latest version of its dependencies. Run -``` -go get -u github.com/golangci/lint-1/golint -``` -If you still have problems, consider searching for existing issues before filing a new issue. - -## Before sending a pull request: - -Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/github.com/golangci/lint-1/LICENSE b/vendor/github.com/golangci/lint-1/LICENSE deleted file mode 100644 index 65d761bc9f..0000000000 --- a/vendor/github.com/golangci/lint-1/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/lint-1/README.md b/vendor/github.com/golangci/lint-1/README.md deleted file mode 100644 index 2de6ee835c..0000000000 --- a/vendor/github.com/golangci/lint-1/README.md +++ /dev/null @@ -1,88 +0,0 @@ -Golint is a linter for Go source code. - -[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) - -## Installation - -Golint requires a -[supported release of Go](https://golang.org/doc/devel/release.html#policy). - - go get -u github.com/golangci/lint-1/golint - -To find out where `golint` was installed you can run `go list -f {{.Target}} github.com/golangci/lint-1/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. - -## Usage - -Invoke `golint` with one or more filenames, directories, or packages named -by its import path. Golint uses the same -[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as -the `go` command and therefore -also supports relative import paths like `./...`. Additionally the `...` -wildcard can be used as suffix on relative and absolute file paths to recurse -into them. - -The output of this tool is a list of suggestions in Vim quickfix format, -which is accepted by lots of different editors. - -## Purpose - -Golint differs from gofmt. Gofmt reformats Go source code, whereas -golint prints out style mistakes. - -Golint differs from govet. Govet is concerned with correctness, whereas -golint is concerned with coding style. Golint is in use at Google, and it -seeks to match the accepted style of the open source Go project. - -The suggestions made by golint are exactly that: suggestions. -Golint is not perfect, and has both false positives and false negatives. -Do not treat its output as a gold standard. We will not be adding pragmas -or other knobs to suppress specific warnings, so do not expect or require -code to be completely "lint-free". -In short, this tool is not, and will never be, trustworthy enough for its -suggestions to be enforced automatically, for example as part of a build process. -Golint makes suggestions for many of the mechanically checkable items listed in -[Effective Go](https://golang.org/doc/effective_go.html) and the -[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). - -## Scope - -Golint is meant to carry out the stylistic conventions put forth in -[Effective Go](https://golang.org/doc/effective_go.html) and -[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). -Changes that are not aligned with those documents will not be considered. - -## Contributions - -Contributions to this project are welcome provided they are [in scope](#scope), -though please send mail before starting work on anything major. -Contributors retain their copyright, so we need you to fill out -[a short form](https://developers.google.com/open-source/cla/individual) -before we can accept your contribution. - -## Vim - -Add this to your ~/.vimrc: - - set rtp+=$GOPATH/src/github.com/golangci/lint-1/misc/vim - -If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. - -Running `:Lint` will run golint on the current file and populate the quickfix list. - -Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` - - autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow - - -## Emacs - -Add this to your `.emacs` file: - - (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs")) - (require 'golint) - -If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. - -Running M-x golint will run golint on the current file. - -For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/github.com/golangci/lint-1/lint.go b/vendor/github.com/golangci/lint-1/lint.go deleted file mode 100644 index 886c85bf09..0000000000 --- a/vendor/github.com/golangci/lint-1/lint.go +++ /dev/null @@ -1,1655 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lint contains a linter for Go source code. -package lint // import "github.com/golangci/lint-1" - -import ( - "bufio" - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "go/types" - "io/ioutil" - "regexp" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/gcexportdata" -) - -const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" - -// A Linter lints Go source code. -type Linter struct { -} - -// Problem represents a problem in some source code. -type Problem struct { - Position token.Position // position in source file - Text string // the prose that describes the problem - Link string // (optional) the link to the style guide for the problem - Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness - LineText string // the source line - Category string // a short name for the general category of the problem - - // If the problem has a suggested fix (the minority case), - // ReplacementLine is a full replacement for the relevant line of the source file. - ReplacementLine string -} - -func (p *Problem) String() string { - if p.Link != "" { - return p.Text + "\n\n" + p.Link - } - return p.Text -} - -type byPosition []Problem - -func (p byPosition) Len() int { return len(p) } -func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p byPosition) Less(i, j int) bool { - pi, pj := p[i].Position, p[j].Position - - if pi.Filename != pj.Filename { - return pi.Filename < pj.Filename - } - if pi.Line != pj.Line { - return pi.Line < pj.Line - } - if pi.Column != pj.Column { - return pi.Column < pj.Column - } - - return p[i].Text < p[j].Text -} - -// Lint lints src. -func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { - return l.LintFiles(map[string][]byte{filename: src}) -} - -// LintFiles lints a set of files of a single package. -// The argument is a map of filename to source. -func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { - pkg := &pkg{ - fset: token.NewFileSet(), - files: make(map[string]*file), - } - var pkgName string - for filename, src := range files { - if isGenerated(src) { - continue // See issue #239 - } - f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) - if err != nil { - return nil, err - } - if pkgName == "" { - pkgName = f.Name.Name - } else if f.Name.Name != pkgName { - return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) - } - pkg.files[filename] = &file{ - pkg: pkg, - f: f, - fset: pkg.fset, - src: src, - filename: filename, - } - } - if len(pkg.files) == 0 { - return nil, nil - } - return pkg.lint(), nil -} - -// LintFiles lints a set of files of a single package. -// The argument is a map of filename to source. -func (l *Linter) LintPkg(files []*ast.File, fset *token.FileSet, typesPkg *types.Package, typesInfo *types.Info) ([]Problem, error) { - pkg := &pkg{ - fset: fset, - files: make(map[string]*file), - typesPkg: typesPkg, - typesInfo: typesInfo, - } - var pkgName string - for _, f := range files { - // use PositionFor, not Position because of //line directives: - // this filename will be used for source lines extraction. - filename := fset.PositionFor(f.Pos(), false).Filename - if filename == "" { - return nil, fmt.Errorf("no file name for file %+v", f) - } - - if pkgName == "" { - pkgName = f.Name.Name - } else if f.Name.Name != pkgName { - return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) - } - - // TODO: reuse golangci-lint lines cache - src, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("can't read file %s: %s", filename, err) - } - - pkg.files[filename] = &file{ - pkg: pkg, - f: f, - fset: pkg.fset, - src: src, - filename: filename, - } - } - if len(pkg.files) == 0 { - return nil, nil - } - return pkg.lint(), nil -} - -var ( - genHdr = []byte("// Code generated ") - genFtr = []byte(" DO NOT EDIT.") -) - -// isGenerated reports whether the source file is generated code -// according the rules from https://golang.org/s/generatedcode. -func isGenerated(src []byte) bool { - sc := bufio.NewScanner(bytes.NewReader(src)) - for sc.Scan() { - b := sc.Bytes() - if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { - return true - } - } - return false -} - -// pkg represents a package being linted. -type pkg struct { - fset *token.FileSet - files map[string]*file - - typesPkg *types.Package - typesInfo *types.Info - - // sortable is the set of types in the package that implement sort.Interface. - sortable map[string]bool - // main is whether this is a "main" package. - main bool - - problems []Problem -} - -func (p *pkg) lint() []Problem { - p.scanSortable() - p.main = p.isMain() - - for _, f := range p.files { - f.lint() - } - - sort.Sort(byPosition(p.problems)) - - return p.problems -} - -// file represents a file being linted. -type file struct { - pkg *pkg - f *ast.File - fset *token.FileSet - src []byte - filename string -} - -func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } - -func (f *file) lint() { - f.lintPackageComment() - f.lintImports() - f.lintBlankImports() - f.lintExported() - f.lintNames() - f.lintElses() - f.lintRanges() - f.lintErrorf() - f.lintErrors() - f.lintErrorStrings() - f.lintReceiverNames() - f.lintIncDec() - f.lintErrorReturn() - f.lintUnexportedReturn() - f.lintTimeNames() - f.lintContextKeyTypes() - f.lintContextArgs() -} - -type link string -type category string - -// The variadic arguments may start with link and category types, -// and must end with a format string and any arguments. -// It returns the new Problem. -func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { - pos := f.fset.Position(n.Pos()) - if pos.Filename == "" { - pos.Filename = f.filename - } - return f.pkg.errorfAt(pos, confidence, args...) -} - -func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { - problem := Problem{ - Position: pos, - Confidence: confidence, - } - if pos.Filename != "" { - // The file might not exist in our mapping if a //line directive was encountered. - if f, ok := p.files[pos.Filename]; ok { - problem.LineText = srcLine(f.src, pos) - } - } - -argLoop: - for len(args) > 1 { // always leave at least the format string in args - switch v := args[0].(type) { - case link: - problem.Link = string(v) - case category: - problem.Category = string(v) - default: - break argLoop - } - args = args[1:] - } - - problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) - - p.problems = append(p.problems, problem) - return &p.problems[len(p.problems)-1] -} - -var newImporter = func(fset *token.FileSet) types.ImporterFrom { - return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) -} - -func (p *pkg) typeCheck() error { - config := &types.Config{ - // By setting a no-op error reporter, the type checker does as much work as possible. - Error: func(error) {}, - Importer: newImporter(p.fset), - } - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - } - var anyFile *file - var astFiles []*ast.File - for _, f := range p.files { - anyFile = f - astFiles = append(astFiles, f.f) - } - pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) - // Remember the typechecking info, even if config.Check failed, - // since we will get partial information. - p.typesPkg = pkg - p.typesInfo = info - return err -} - -func (p *pkg) typeOf(expr ast.Expr) types.Type { - if p.typesInfo == nil { - return nil - } - return p.typesInfo.TypeOf(expr) -} - -func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { - n, ok := typ.(*types.Named) - if !ok { - return false - } - tn := n.Obj() - return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name -} - -// scopeOf returns the tightest scope encompassing id. -func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { - var scope *types.Scope - if obj := p.typesInfo.ObjectOf(id); obj != nil { - scope = obj.Parent() - } - if scope == p.typesPkg.Scope() { - // We were given a top-level identifier. - // Use the file-level scope instead of the package-level scope. - pos := id.Pos() - for _, f := range p.files { - if f.f.Pos() <= pos && pos < f.f.End() { - scope = p.typesInfo.Scopes[f.f] - break - } - } - } - return scope -} - -func (p *pkg) scanSortable() { - p.sortable = make(map[string]bool) - - // bitfield for which methods exist on each type. - const ( - Len = 1 << iota - Less - Swap - ) - nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} - has := make(map[string]int) - for _, f := range p.files { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return true - } - // TODO(dsymonds): We could check the signature to be more precise. - recv := receiverType(fn) - if i, ok := nmap[fn.Name.Name]; ok { - has[recv] |= i - } - return false - }) - } - for typ, ms := range has { - if ms == Len|Less|Swap { - p.sortable[typ] = true - } - } -} - -func (p *pkg) isMain() bool { - for _, f := range p.files { - if f.isMain() { - return true - } - } - return false -} - -func (f *file) isMain() bool { - if f.f.Name.Name == "main" { - return true - } - return false -} - -// lintPackageComment checks package comments. It complains if -// there is no package comment, or if it is not of the right form. -// This has a notable false positive in that a package comment -// could rightfully appear in a different file of the same package, -// but that's not easy to fix since this linter is file-oriented. -func (f *file) lintPackageComment() { - if f.isTest() { - return - } - - const ref = styleGuideBase + "#package-comments" - prefix := "Package " + f.f.Name.Name + " " - - // Look for a detached package comment. - // First, scan for the last comment that occurs before the "package" keyword. - var lastCG *ast.CommentGroup - for _, cg := range f.f.Comments { - if cg.Pos() > f.f.Package { - // Gone past "package" keyword. - break - } - lastCG = cg - } - if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { - endPos := f.fset.Position(lastCG.End()) - pkgPos := f.fset.Position(f.f.Package) - if endPos.Line+1 < pkgPos.Line { - // There isn't a great place to anchor this error; - // the start of the blank lines between the doc and the package statement - // is at least pointing at the location of the problem. - pos := token.Position{ - Filename: endPos.Filename, - // Offset not set; it is non-trivial, and doesn't appear to be needed. - Line: endPos.Line + 1, - Column: 1, - } - f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") - return - } - } - - if f.f.Doc == nil { - f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") - return - } - s := f.f.Doc.Text() - if ts := strings.TrimLeft(s, " \t"); ts != s { - f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") - s = ts - } - // Only non-main packages need to keep to this form. - if !f.pkg.main && !strings.HasPrefix(s, prefix) { - f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) - } -} - -func (f *file) isCgo() bool { - if f.src == nil { - return false - } - newLinePos := bytes.Index(f.src, []byte("\n")) - if newLinePos < 0 { - return false - } - firstLine := string(f.src[:newLinePos]) - - // files using cgo have implicitly added comment "Created by cgo - DO NOT EDIT" for go <= 1.10 - // and "Code generated by cmd/cgo" for go >= 1.11 - return strings.Contains(firstLine, "Created by cgo") || strings.Contains(firstLine, "Code generated by cmd/cgo") -} - -// lintBlankImports complains if a non-main package has blank imports that are -// not documented. -func (f *file) lintBlankImports() { - // In package main and in tests, we don't complain about blank imports. - if f.pkg.main || f.isTest() || f.isCgo() { - return - } - - // The first element of each contiguous group of blank imports should have - // an explanatory comment of some kind. - for i, imp := range f.f.Imports { - pos := f.fset.Position(imp.Pos()) - - if !isBlank(imp.Name) { - continue // Ignore non-blank imports. - } - if i > 0 { - prev := f.f.Imports[i-1] - prevPos := f.fset.Position(prev.Pos()) - if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { - continue // A subsequent blank in a group. - } - } - - // This is the first blank import of a group. - if imp.Doc == nil && imp.Comment == nil { - ref := "" - f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") - } - } -} - -// lintImports examines import blocks. -func (f *file) lintImports() { - for i, is := range f.f.Imports { - _ = i - if is.Name != nil && is.Name.Name == "." && !f.isTest() { - f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") - } - - } -} - -const docCommentsLink = styleGuideBase + "#doc-comments" - -// lintExported examines the exported names. -// It complains if any required doc comments are missing, -// or if they are not of the right form. The exact rules are in -// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function -// also tracks the GenDecl structure being traversed to permit -// doc comments for constants to be on top of the const block. -// It also complains if the names stutter when combined with -// the package name. -func (f *file) lintExported() { - if f.isTest() { - return - } - - var lastGen *ast.GenDecl // last GenDecl entered. - - // Set of GenDecls that have already had missing comments flagged. - genDeclMissingComments := make(map[*ast.GenDecl]bool) - - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.GenDecl: - if v.Tok == token.IMPORT { - return false - } - // token.CONST, token.TYPE or token.VAR - lastGen = v - return true - case *ast.FuncDecl: - f.lintFuncDoc(v) - if v.Recv == nil { - // Only check for stutter on functions, not methods. - // Method names are not used package-qualified. - f.checkStutter(v.Name, "func") - } - // Don't proceed inside funcs. - return false - case *ast.TypeSpec: - // inside a GenDecl, which usually has the doc - doc := v.Doc - if doc == nil { - doc = lastGen.Doc - } - f.lintTypeDoc(v, doc) - f.checkStutter(v.Name, "type") - // Don't proceed inside types. - return false - case *ast.ValueSpec: - f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) - return false - } - return true - }) -} - -var ( - allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) - anyCapsRE = regexp.MustCompile(`[A-Z]`) -) - -// knownNameExceptions is a set of names that are known to be exempt from naming checks. -// This is usually because they are constrained by having to match names in the -// standard library. -var knownNameExceptions = map[string]bool{ - "LastInsertId": true, // must match database/sql - "kWh": true, -} - -func isInTopLevel(f *ast.File, ident *ast.Ident) bool { - path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) - for _, f := range path { - switch f.(type) { - case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: - continue - } - return false - } - return true -} - -// lintNames examines all names in the file. -// It complains if any use underscores or incorrect known initialisms. -func (f *file) lintNames() { - // Package names need slightly different handling than other names. - if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { - f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") - } - if anyCapsRE.MatchString(f.f.Name.Name) { - f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) - } - - check := func(id *ast.Ident, thing string) { - if id.Name == "_" { - return - } - if knownNameExceptions[id.Name] { - return - } - - // Handle two common styles from other languages that don't belong in Go. - if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { - capCount := 0 - for _, c := range id.Name { - if 'A' <= c && c <= 'Z' { - capCount++ - } - } - if capCount >= 2 { - f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") - return - } - } - if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { - if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { - should := string(id.Name[1]+'a'-'A') + id.Name[2:] - f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) - } - } - - should := lintName(id.Name) - if id.Name == should { - return - } - - if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { - f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) - return - } - f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) - } - checkList := func(fl *ast.FieldList, thing string) { - if fl == nil { - return - } - for _, f := range fl.List { - for _, id := range f.Names { - check(id, thing) - } - } - } - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.AssignStmt: - if v.Tok == token.ASSIGN { - return true - } - for _, exp := range v.Lhs { - if id, ok := exp.(*ast.Ident); ok { - check(id, "var") - } - } - case *ast.FuncDecl: - if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { - return true - } - - thing := "func" - if v.Recv != nil { - thing = "method" - } - - // Exclude naming warnings for functions that are exported to C but - // not exported in the Go API. - // See https://github.com/golang/lint/issues/144. - if ast.IsExported(v.Name.Name) || !isCgoExported(v) { - check(v.Name, thing) - } - - checkList(v.Type.Params, thing+" parameter") - checkList(v.Type.Results, thing+" result") - case *ast.GenDecl: - if v.Tok == token.IMPORT { - return true - } - var thing string - switch v.Tok { - case token.CONST: - thing = "const" - case token.TYPE: - thing = "type" - case token.VAR: - thing = "var" - } - for _, spec := range v.Specs { - switch s := spec.(type) { - case *ast.TypeSpec: - check(s.Name, thing) - case *ast.ValueSpec: - for _, id := range s.Names { - check(id, thing) - } - } - } - case *ast.InterfaceType: - // Do not check interface method names. - // They are often constrainted by the method names of concrete types. - for _, x := range v.Methods.List { - ft, ok := x.Type.(*ast.FuncType) - if !ok { // might be an embedded interface name - continue - } - checkList(ft.Params, "interface method parameter") - checkList(ft.Results, "interface method result") - } - case *ast.RangeStmt: - if v.Tok == token.ASSIGN { - return true - } - if id, ok := v.Key.(*ast.Ident); ok { - check(id, "range var") - } - if id, ok := v.Value.(*ast.Ident); ok { - check(id, "range var") - } - case *ast.StructType: - for _, f := range v.Fields.List { - for _, id := range f.Names { - check(id, "struct field") - } - } - } - return true - }) -} - -// lintName returns a different name if it should be different. -func lintName(name string) (should string) { - // Fast path for simple cases: "_" and all lowercase. - if name == "_" { - return name - } - allLower := true - for _, r := range name { - if !unicode.IsLower(r) { - allLower = false - break - } - } - if allLower { - return name - } - - // Split camelCase at any lower->upper transition, and split on underscores. - // Check each word for common initialisms. - runes := []rune(name) - w, i := 0, 0 // index of start of word, scan - for i+1 <= len(runes) { - eow := false // whether we hit the end of a word - if i+1 == len(runes) { - eow = true - } else if runes[i+1] == '_' { - // underscore; shift the remainder forward over any run of underscores - eow = true - n := 1 - for i+n+1 < len(runes) && runes[i+n+1] == '_' { - n++ - } - - // Leave at most one underscore if the underscore is between two digits - if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { - n-- - } - - copy(runes[i+1:], runes[i+n+1:]) - runes = runes[:len(runes)-n] - } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { - // lower->non-lower - eow = true - } - i++ - if !eow { - continue - } - - // [w,i) is a word. - word := string(runes[w:i]) - if u := strings.ToUpper(word); commonInitialisms[u] { - // Keep consistent case, which is lowercase only at the start. - if w == 0 && unicode.IsLower(runes[w]) { - u = strings.ToLower(u) - } - // All the common initialisms are ASCII, - // so we can replace the bytes exactly. - copy(runes[w:], []rune(u)) - } else if w > 0 && strings.ToLower(word) == word { - // already all lowercase, and not the first word, so uppercase the first character. - runes[w] = unicode.ToUpper(runes[w]) - } - w = i - } - return string(runes) -} - -// commonInitialisms is a set of common initialisms. -// Only add entries that are highly unlikely to be non-initialisms. -// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. -var commonInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTP": true, - "HTTPS": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, -} - -// lintTypeDoc examines the doc comment on a type. -// It complains if they are missing from an exported type, -// or if they are not of the standard form. -func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { - if !ast.IsExported(t.Name.Name) { - return - } - if doc == nil { - f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) - return - } - - s := doc.Text() - articles := [...]string{"A", "An", "The"} - for _, a := range articles { - if strings.HasPrefix(s, a+" ") { - s = s[len(a)+1:] - break - } - } - if !strings.HasPrefix(s, t.Name.Name+" ") { - f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) - } -} - -var commonMethods = map[string]bool{ - "Error": true, - "Read": true, - "ServeHTTP": true, - "String": true, - "Write": true, -} - -// lintFuncDoc examines doc comments on functions and methods. -// It complains if they are missing, or not of the right form. -// It has specific exclusions for well-known methods (see commonMethods above). -func (f *file) lintFuncDoc(fn *ast.FuncDecl) { - if !ast.IsExported(fn.Name.Name) { - // func is unexported - return - } - kind := "function" - name := fn.Name.Name - if fn.Recv != nil && len(fn.Recv.List) > 0 { - // method - kind = "method" - recv := receiverType(fn) - if !ast.IsExported(recv) { - // receiver is unexported - return - } - if commonMethods[name] { - return - } - switch name { - case "Len", "Less", "Swap": - if f.pkg.sortable[recv] { - return - } - } - name = recv + "." + name - } - if fn.Doc == nil { - f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) - return - } - s := fn.Doc.Text() - prefix := fn.Name.Name + " " - if !strings.HasPrefix(s, prefix) { - f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) - } -} - -// lintValueSpecDoc examines package-global variables and constants. -// It complains if they are not individually declared, -// or if they are not suitably documented in the right form (unless they are in a block that is commented). -func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { - kind := "var" - if gd.Tok == token.CONST { - kind = "const" - } - - if len(vs.Names) > 1 { - // Check that none are exported except for the first. - for _, n := range vs.Names[1:] { - if ast.IsExported(n.Name) { - f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) - return - } - } - } - - // Only one name. - name := vs.Names[0].Name - if !ast.IsExported(name) { - return - } - - if vs.Doc == nil && gd.Doc == nil { - if genDeclMissingComments[gd] { - return - } - block := "" - if kind == "const" && gd.Lparen.IsValid() { - block = " (or a comment on this block)" - } - f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) - genDeclMissingComments[gd] = true - return - } - // If this GenDecl has parens and a comment, we don't check its comment form. - if gd.Lparen.IsValid() && gd.Doc != nil { - return - } - // The relevant text to check will be on either vs.Doc or gd.Doc. - // Use vs.Doc preferentially. - doc := vs.Doc - if doc == nil { - doc = gd.Doc - } - prefix := name + " " - if !strings.HasPrefix(doc.Text(), prefix) { - f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) - } -} - -func (f *file) checkStutter(id *ast.Ident, thing string) { - pkg, name := f.f.Name.Name, id.Name - if !ast.IsExported(name) { - // unexported name - return - } - // A name stutters if the package name is a strict prefix - // and the next character of the name starts a new word. - if len(name) <= len(pkg) { - // name is too short to stutter. - // This permits the name to be the same as the package name. - return - } - if !strings.EqualFold(pkg, name[:len(pkg)]) { - return - } - // We can assume the name is well-formed UTF-8. - // If the next rune after the package name is uppercase or an underscore - // the it's starting a new word and thus this name stutters. - rem := name[len(pkg):] - if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { - f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) - } -} - -// zeroLiteral is a set of ast.BasicLit values that are zero values. -// It is not exhaustive. -var zeroLiteral = map[string]bool{ - "false": true, // bool - // runes - `'\x00'`: true, - `'\000'`: true, - // strings - `""`: true, - "``": true, - // numerics - "0": true, - "0.": true, - "0.0": true, - "0i": true, -} - -// lintElses examines else blocks. It complains about any else block whose if block ends in a return. -func (f *file) lintElses() { - // We don't want to flag if { } else if { } else { } constructions. - // They will appear as an IfStmt whose Else field is also an IfStmt. - // Record such a node so we ignore it when we visit it. - ignore := make(map[*ast.IfStmt]bool) - - f.walk(func(node ast.Node) bool { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return true - } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - ignore[elseif] = true - return true - } - if ignore[ifStmt] { - return true - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return true - } - if len(ifStmt.Body.List) == 0 { - return true - } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } - } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - if _, ok := lastStmt.(*ast.ReturnStmt); ok { - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" - } - f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) - } - return true - }) -} - -// lintRanges examines range clauses. It complains about redundant constructions. -func (f *file) lintRanges() { - f.walk(func(node ast.Node) bool { - rs, ok := node.(*ast.RangeStmt) - if !ok { - return true - } - - if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { - p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") - - newRS := *rs // shallow copy - newRS.Value = nil - newRS.Key = nil - p.ReplacementLine = f.firstLineOf(&newRS, rs) - - return true - } - - if isIdent(rs.Value, "_") { - p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) - - newRS := *rs // shallow copy - newRS.Value = nil - p.ReplacementLine = f.firstLineOf(&newRS, rs) - } - - return true - }) -} - -// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. -func (f *file) lintErrorf() { - f.walk(func(node ast.Node) bool { - ce, ok := node.(*ast.CallExpr) - if !ok || len(ce.Args) != 1 { - return true - } - isErrorsNew := isPkgDot(ce.Fun, "errors", "New") - var isTestingError bool - se, ok := ce.Fun.(*ast.SelectorExpr) - if ok && se.Sel.Name == "Error" { - if typ := f.pkg.typeOf(se.X); typ != nil { - isTestingError = typ.String() == "*testing.T" - } - } - if !isErrorsNew && !isTestingError { - return true - } - if !f.imports("errors") { - return true - } - arg := ce.Args[0] - ce, ok = arg.(*ast.CallExpr) - if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { - return true - } - errorfPrefix := "fmt" - if isTestingError { - errorfPrefix = f.render(se.X) - } - p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) - - m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) - if m != nil { - p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] - } - - return true - }) -} - -// lintErrors examines global error vars. It complains if they aren't named in the standard way. -func (f *file) lintErrors() { - for _, decl := range f.f.Decls { - gd, ok := decl.(*ast.GenDecl) - if !ok || gd.Tok != token.VAR { - continue - } - for _, spec := range gd.Specs { - spec := spec.(*ast.ValueSpec) - if len(spec.Names) != 1 || len(spec.Values) != 1 { - continue - } - ce, ok := spec.Values[0].(*ast.CallExpr) - if !ok { - continue - } - if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { - continue - } - - id := spec.Names[0] - prefix := "err" - if id.IsExported() { - prefix = "Err" - } - if !strings.HasPrefix(id.Name, prefix) { - f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) - } - } - } -} - -func lintErrorString(s string) (isClean bool, conf float64) { - const basicConfidence = 0.8 - const capConfidence = basicConfidence - 0.2 - first, firstN := utf8.DecodeRuneInString(s) - last, _ := utf8.DecodeLastRuneInString(s) - if last == '.' || last == ':' || last == '!' || last == '\n' { - return false, basicConfidence - } - if unicode.IsUpper(first) { - // People use proper nouns and exported Go identifiers in error strings, - // so decrease the confidence of warnings for capitalization. - if len(s) <= firstN { - return false, capConfidence - } - // Flag strings starting with something that doesn't look like an initialism. - if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { - return false, capConfidence - } - } - return true, 0 -} - -// lintErrorStrings examines error strings. -// It complains if they are capitalized or end in punctuation or a newline. -func (f *file) lintErrorStrings() { - f.walk(func(node ast.Node) bool { - ce, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { - return true - } - if len(ce.Args) < 1 { - return true - } - str, ok := ce.Args[0].(*ast.BasicLit) - if !ok || str.Kind != token.STRING { - return true - } - s, _ := strconv.Unquote(str.Value) // can assume well-formed Go - if s == "" { - return true - } - clean, conf := lintErrorString(s) - if clean { - return true - } - - f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), - "error strings should not be capitalized or end with punctuation or a newline") - return true - }) -} - -// lintReceiverNames examines receiver names. It complains about inconsistent -// names used for the same type and names such as "this". -func (f *file) lintReceiverNames() { - typeReceiver := map[string]string{} - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return true - } - names := fn.Recv.List[0].Names - if len(names) < 1 { - return true - } - name := names[0].Name - const ref = styleGuideBase + "#receiver-names" - if name == "_" { - f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) - return true - } - if name == "this" || name == "self" { - f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) - return true - } - recv := receiverType(fn) - if prev, ok := typeReceiver[recv]; ok && prev != name { - f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) - return true - } - typeReceiver[recv] = name - return true - }) -} - -// lintIncDec examines statements that increment or decrement a variable. -// It complains if they don't use x++ or x--. -func (f *file) lintIncDec() { - f.walk(func(n ast.Node) bool { - as, ok := n.(*ast.AssignStmt) - if !ok { - return true - } - if len(as.Lhs) != 1 { - return true - } - if !isOne(as.Rhs[0]) { - return true - } - var suffix string - switch as.Tok { - case token.ADD_ASSIGN: - suffix = "++" - case token.SUB_ASSIGN: - suffix = "--" - default: - return true - } - f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) - return true - }) -} - -// lintErrorReturn examines function declarations that return an error. -// It complains if the error isn't the last parameter. -func (f *file) lintErrorReturn() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Type.Results == nil { - return true - } - ret := fn.Type.Results.List - if len(ret) <= 1 { - return true - } - if isIdent(ret[len(ret)-1].Type, "error") { - return true - } - // An error return parameter should be the last parameter. - // Flag any error parameters found before the last. - for _, r := range ret[:len(ret)-1] { - if isIdent(r.Type, "error") { - f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") - break // only flag one - } - } - return true - }) -} - -// lintUnexportedReturn examines exported function declarations. -// It complains if any return an unexported type. -func (f *file) lintUnexportedReturn() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok { - return true - } - if fn.Type.Results == nil { - return false - } - if !fn.Name.IsExported() { - return false - } - thing := "func" - if fn.Recv != nil && len(fn.Recv.List) > 0 { - thing = "method" - if !ast.IsExported(receiverType(fn)) { - // Don't report exported methods of unexported types, - // such as private implementations of sort.Interface. - return false - } - } - for _, ret := range fn.Type.Results.List { - typ := f.pkg.typeOf(ret.Type) - if exportedType(typ) { - continue - } - f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), - "exported %s %s returns unexported type %s, which can be annoying to use", - thing, fn.Name.Name, typ) - break // only flag one - } - return false - }) -} - -// exportedType reports whether typ is an exported type. -// It is imprecise, and will err on the side of returning true, -// such as for composite types. -func exportedType(typ types.Type) bool { - switch T := typ.(type) { - case *types.Named: - // Builtin types have no package. - return T.Obj().Pkg() == nil || T.Obj().Exported() - case *types.Map: - return exportedType(T.Key()) && exportedType(T.Elem()) - case interface { - Elem() types.Type - }: // array, slice, pointer, chan - return exportedType(T.Elem()) - } - // Be conservative about other types, such as struct, interface, etc. - return true -} - -// timeSuffixes is a list of name suffixes that imply a time unit. -// This is not an exhaustive list. -var timeSuffixes = []string{ - "Sec", "Secs", "Seconds", - "Msec", "Msecs", - "Milli", "Millis", "Milliseconds", - "Usec", "Usecs", "Microseconds", - "MS", "Ms", -} - -func (f *file) lintTimeNames() { - f.walk(func(node ast.Node) bool { - v, ok := node.(*ast.ValueSpec) - if !ok { - return true - } - for _, name := range v.Names { - origTyp := f.pkg.typeOf(name) - // Look for time.Duration or *time.Duration; - // the latter is common when using flag.Duration. - typ := origTyp - if pt, ok := typ.(*types.Pointer); ok { - typ = pt.Elem() - } - if !f.pkg.isNamedType(typ, "time", "Duration") { - continue - } - suffix := "" - for _, suf := range timeSuffixes { - if strings.HasSuffix(name.Name, suf) { - suffix = suf - break - } - } - if suffix == "" { - continue - } - f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) - } - return true - }) -} - -// lintContextKeyTypes checks for call expressions to context.WithValue with -// basic types used for the key argument. -// See: https://golang.org/issue/17293 -func (f *file) lintContextKeyTypes() { - f.walk(func(node ast.Node) bool { - switch node := node.(type) { - case *ast.CallExpr: - f.checkContextKeyType(node) - } - - return true - }) -} - -// checkContextKeyType reports an error if the call expression calls -// context.WithValue with a key argument of basic type. -func (f *file) checkContextKeyType(x *ast.CallExpr) { - sel, ok := x.Fun.(*ast.SelectorExpr) - if !ok { - return - } - pkg, ok := sel.X.(*ast.Ident) - if !ok || pkg.Name != "context" { - return - } - if sel.Sel.Name != "WithValue" { - return - } - - // key is second argument to context.WithValue - if len(x.Args) != 3 { - return - } - key := f.pkg.typesInfo.Types[x.Args[1]] - - if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { - f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) - } -} - -// lintContextArgs examines function declarations that contain an -// argument with a type of context.Context -// It complains if that argument isn't the first parameter. -func (f *file) lintContextArgs() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || len(fn.Type.Params.List) <= 1 { - return true - } - // A context.Context should be the first parameter of a function. - // Flag any that show up after the first. - for _, arg := range fn.Type.Params.List[1:] { - if isPkgDot(arg.Type, "context", "Context") { - f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") - break // only flag one - } - } - return true - }) -} - -// containsComments returns whether the interval [start, end) contains any -// comments without "// MATCH " prefix. -func (f *file) containsComments(start, end token.Pos) bool { - for _, cgroup := range f.f.Comments { - comments := cgroup.List - if comments[0].Slash >= end { - // All comments starting with this group are after end pos. - return false - } - if comments[len(comments)-1].Slash < start { - // Comments group ends before start pos. - continue - } - for _, c := range comments { - if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { - return true - } - } - } - return false -} - -// receiverType returns the named type of the method receiver, sans "*", -// or "invalid-type" if fn.Recv is ill formed. -func receiverType(fn *ast.FuncDecl) string { - switch e := fn.Recv.List[0].Type.(type) { - case *ast.Ident: - return e.Name - case *ast.StarExpr: - if id, ok := e.X.(*ast.Ident); ok { - return id.Name - } - } - // The parser accepts much more than just the legal forms. - return "invalid-type" -} - -func (f *file) walk(fn func(ast.Node) bool) { - ast.Walk(walker(fn), f.f) -} - -func (f *file) render(x interface{}) string { - var buf bytes.Buffer - if err := printer.Fprint(&buf, f.fset, x); err != nil { - panic(err) - } - return buf.String() -} - -func (f *file) debugRender(x interface{}) string { - var buf bytes.Buffer - if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { - panic(err) - } - return buf.String() -} - -// walker adapts a function to satisfy the ast.Visitor interface. -// The function return whether the walk should proceed into the node's children. -type walker func(ast.Node) bool - -func (w walker) Visit(node ast.Node) ast.Visitor { - if w(node) { - return w - } - return nil -} - -func isIdent(expr ast.Expr, ident string) bool { - id, ok := expr.(*ast.Ident) - return ok && id.Name == ident -} - -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } - -func isPkgDot(expr ast.Expr, pkg, name string) bool { - sel, ok := expr.(*ast.SelectorExpr) - return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) -} - -func isOne(expr ast.Expr) bool { - lit, ok := expr.(*ast.BasicLit) - return ok && lit.Kind == token.INT && lit.Value == "1" -} - -func isCgoExported(f *ast.FuncDecl) bool { - if f.Recv != nil || f.Doc == nil { - return false - } - - cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) - for _, c := range f.Doc.List { - if cgoExport.MatchString(c.Text) { - return true - } - } - return false -} - -var basicTypeKinds = map[types.BasicKind]string{ - types.UntypedBool: "bool", - types.UntypedInt: "int", - types.UntypedRune: "rune", - types.UntypedFloat: "float64", - types.UntypedComplex: "complex128", - types.UntypedString: "string", -} - -// isUntypedConst reports whether expr is an untyped constant, -// and indicates what its default type is. -// scope may be nil. -func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { - // Re-evaluate expr outside of its context to see if it's untyped. - // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) - exprStr := f.render(expr) - tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) - if err != nil { - return "", false - } - if b, ok := tv.Type.(*types.Basic); ok { - if dt, ok := basicTypeKinds[b.Kind()]; ok { - return dt, true - } - } - - return "", false -} - -// firstLineOf renders the given node and returns its first line. -// It will also match the indentation of another node. -func (f *file) firstLineOf(node, match ast.Node) string { - line := f.render(node) - if i := strings.Index(line, "\n"); i >= 0 { - line = line[:i] - } - return f.indentOf(match) + line -} - -func (f *file) indentOf(node ast.Node) string { - line := srcLine(f.src, f.fset.Position(node.Pos())) - for i, r := range line { - switch r { - case ' ', '\t': - default: - return line[:i] - } - } - return line // unusual or empty line -} - -func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { - line := srcLine(f.src, f.fset.Position(node.Pos())) - line = strings.TrimSuffix(line, "\n") - rx := regexp.MustCompile(pattern) - return rx.FindStringSubmatch(line) -} - -// imports returns true if the current file imports the specified package path. -func (f *file) imports(importPath string) bool { - all := astutil.Imports(f.fset, f.f) - for _, p := range all { - for _, i := range p { - uq, err := strconv.Unquote(i.Path.Value) - if err == nil && importPath == uq { - return true - } - } - } - return false -} - -// srcLine returns the complete line at p, including the terminating newline. -func srcLine(src []byte, p token.Position) string { - // Run to end of line in both directions if not at line start/end. - lo, hi := p.Offset, p.Offset+1 - for lo > 0 && src[lo-1] != '\n' { - lo-- - } - for hi < len(src) && src[hi-1] != '\n' { - hi++ - } - return string(src[lo:hi]) -} diff --git a/vendor/github.com/golangci/maligned/LICENSE b/vendor/github.com/golangci/maligned/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/github.com/golangci/maligned/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/maligned/README b/vendor/github.com/golangci/maligned/README deleted file mode 100644 index 4e57f6eab2..0000000000 --- a/vendor/github.com/golangci/maligned/README +++ /dev/null @@ -1,7 +0,0 @@ -Install: - - go get github.com/mdempsky/maligned - -Usage: - - maligned cmd/compile/internal/gc cmd/link/internal/ld diff --git a/vendor/github.com/golangci/maligned/maligned.go b/vendor/github.com/golangci/maligned/maligned.go deleted file mode 100644 index c2492b2ffa..0000000000 --- a/vendor/github.com/golangci/maligned/maligned.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package maligned - -import ( - "fmt" - "go/ast" - "go/build" - "go/token" - "go/types" - "sort" - "strings" - - "golang.org/x/tools/go/loader" -) - -var fset = token.NewFileSet() - -type Issue struct { - OldSize, NewSize int - NewStructDef string - Pos token.Position -} - -func Run(prog *loader.Program) []Issue { - flagVerbose := true - fset = prog.Fset - - var issues []Issue - - for _, pkg := range prog.InitialPackages() { - for _, file := range pkg.Files { - ast.Inspect(file, func(node ast.Node) bool { - if s, ok := node.(*ast.StructType); ok { - i := malign(node.Pos(), pkg.Types[s].Type.(*types.Struct), flagVerbose) - if i != nil { - issues = append(issues, *i) - } - } - return true - }) - } - } - - return issues -} - -func malign(pos token.Pos, str *types.Struct, verbose bool) *Issue { - wordSize := int64(8) - maxAlign := int64(8) - switch build.Default.GOARCH { - case "386", "arm": - wordSize, maxAlign = 4, 4 - case "amd64p32": - wordSize = 4 - } - - s := gcSizes{wordSize, maxAlign} - sz := s.Sizeof(str) - opt, fields := optimalSize(str, &s, verbose) - if sz == opt { - return nil - } - - newStructDefParts := []string{"struct{"} - - var w int - for _, f := range fields { - if n := len(f.Name()); n > w { - w = n - } - } - spaces := strings.Repeat(" ", w) - for _, f := range fields { - line := fmt.Sprintf("\t%s%s\t%s,", f.Name(), spaces[len(f.Name()):], f.Type().String()) - newStructDefParts = append(newStructDefParts, line) - } - newStructDefParts = append(newStructDefParts, "}") - - return &Issue{ - OldSize: int(sz), - NewSize: int(opt), - NewStructDef: strings.Join(newStructDefParts, "\n"), - Pos: fset.Position(pos), - } -} - -func optimalSize(str *types.Struct, sizes *gcSizes, stable bool) (int64, []*types.Var) { - nf := str.NumFields() - fields := make([]*types.Var, nf) - alignofs := make([]int64, nf) - sizeofs := make([]int64, nf) - for i := 0; i < nf; i++ { - fields[i] = str.Field(i) - ft := fields[i].Type() - alignofs[i] = sizes.Alignof(ft) - sizeofs[i] = sizes.Sizeof(ft) - } - if stable { // Stable keeps as much of the order as possible, but slower - sort.Stable(&byAlignAndSize{fields, alignofs, sizeofs}) - } else { - sort.Sort(&byAlignAndSize{fields, alignofs, sizeofs}) - } - return sizes.Sizeof(types.NewStruct(fields, nil)), fields -} - -type byAlignAndSize struct { - fields []*types.Var - alignofs []int64 - sizeofs []int64 -} - -func (s *byAlignAndSize) Len() int { return len(s.fields) } -func (s *byAlignAndSize) Swap(i, j int) { - s.fields[i], s.fields[j] = s.fields[j], s.fields[i] - s.alignofs[i], s.alignofs[j] = s.alignofs[j], s.alignofs[i] - s.sizeofs[i], s.sizeofs[j] = s.sizeofs[j], s.sizeofs[i] -} - -func (s *byAlignAndSize) Less(i, j int) bool { - // Place zero sized objects before non-zero sized objects. - if s.sizeofs[i] == 0 && s.sizeofs[j] != 0 { - return true - } - if s.sizeofs[j] == 0 && s.sizeofs[i] != 0 { - return false - } - - // Next, place more tightly aligned objects before less tightly aligned objects. - if s.alignofs[i] != s.alignofs[j] { - return s.alignofs[i] > s.alignofs[j] - } - - // Lastly, order by size. - if s.sizeofs[i] != s.sizeofs[j] { - return s.sizeofs[i] > s.sizeofs[j] - } - - return false -} - -// Code below based on go/types.StdSizes. - -type gcSizes struct { - WordSize int64 - MaxAlign int64 -} - -func (s *gcSizes) Alignof(T types.Type) int64 { - // NOTE: On amd64, complex64 is 8 byte aligned, - // even though float32 is only 4 byte aligned. - - // For arrays and structs, alignment is defined in terms - // of alignment of the elements and fields, respectively. - switch t := T.Underlying().(type) { - case *types.Array: - // spec: "For a variable x of array type: unsafe.Alignof(x) - // is the same as unsafe.Alignof(x[0]), but at least 1." - return s.Alignof(t.Elem()) - case *types.Struct: - // spec: "For a variable x of struct type: unsafe.Alignof(x) - // is the largest of the values unsafe.Alignof(x.f) for each - // field f of x, but at least 1." - max := int64(1) - for i, nf := 0, t.NumFields(); i < nf; i++ { - if a := s.Alignof(t.Field(i).Type()); a > max { - max = a - } - } - return max - } - a := s.Sizeof(T) // may be 0 - // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." - if a < 1 { - return 1 - } - if a > s.MaxAlign { - return s.MaxAlign - } - return a -} - -var basicSizes = [...]byte{ - types.Bool: 1, - types.Int8: 1, - types.Int16: 2, - types.Int32: 4, - types.Int64: 8, - types.Uint8: 1, - types.Uint16: 2, - types.Uint32: 4, - types.Uint64: 8, - types.Float32: 4, - types.Float64: 8, - types.Complex64: 8, - types.Complex128: 16, -} - -func (s *gcSizes) Sizeof(T types.Type) int64 { - switch t := T.Underlying().(type) { - case *types.Basic: - k := t.Kind() - if int(k) < len(basicSizes) { - if s := basicSizes[k]; s > 0 { - return int64(s) - } - } - if k == types.String { - return s.WordSize * 2 - } - case *types.Array: - n := t.Len() - if n == 0 { - return 0 - } - a := s.Alignof(t.Elem()) - z := s.Sizeof(t.Elem()) - return align(z, a)*(n-1) + z - case *types.Slice: - return s.WordSize * 3 - case *types.Struct: - nf := t.NumFields() - if nf == 0 { - return 0 - } - - var o int64 - max := int64(1) - for i := 0; i < nf; i++ { - ft := t.Field(i).Type() - a, sz := s.Alignof(ft), s.Sizeof(ft) - if a > max { - max = a - } - if i == nf-1 && sz == 0 && o != 0 { - sz = 1 - } - o = align(o, a) + sz - } - return align(o, max) - case *types.Interface: - return s.WordSize * 2 - } - return s.WordSize // catch-all -} - -// align returns the smallest y >= x such that y % a == 0. -func align(x, a int64) int64 { - y := x + a - 1 - return y - y%a -} diff --git a/vendor/github.com/golangci/misspell/.gitignore b/vendor/github.com/golangci/misspell/.gitignore index b1b707e326..5e5c368f87 100644 --- a/vendor/github.com/golangci/misspell/.gitignore +++ b/vendor/github.com/golangci/misspell/.gitignore @@ -2,6 +2,9 @@ dist/ bin/ vendor/ +.idea/ +/misspell + # editor turds *~ *.gz diff --git a/vendor/github.com/golangci/misspell/.golangci.yml b/vendor/github.com/golangci/misspell/.golangci.yml new file mode 100644 index 0000000000..31c566eab3 --- /dev/null +++ b/vendor/github.com/golangci/misspell/.golangci.yml @@ -0,0 +1,109 @@ +run: + timeout: 2m + skip-files: [] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + - shadow # FIXME(ldez) must be fixed + gocyclo: + min-complexity: 16 + goconst: + min-len: 3 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + gofumpt: + extra-rules: true + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + godox: + keywords: + - FIXME + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + - exitAfterDefer # FIXME(ldez) must be fixed + - ifElseChain # FIXME(ldez) must be fixed + settings: + hugeParam: + sizeThreshold: 100 + forbidigo: + forbid: + - '^print(ln)?$' + - '^panic$' + - '^spew\.Print(f|ln)?$' + - '^spew\.Dump$' + +linters: + enable-all: true + disable: + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated + - execinquery # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - sqlclosecheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - dupl + - exhaustive + - exhaustruct + - forbidigo + - gochecknoglobals + - gochecknoinits + - goerr113 + - gomnd + - lll + - nilnil + - nlreturn + - paralleltest + - prealloc + - testpackage + - tparallel + - varnamelen + - wrapcheck + - wsl + - misspell + - gosec # FIXME(ldez) must be fixed + - errcheck # FIXME(ldez) must be fixed + - nonamedreturns # FIXME(ldez) must be fixed + - nakedret # FIXME(ldez) must be fixed + +issues: + exclude-use-default: false + max-per-linter: 0 + max-same-issues: 0 + exclude: + - 'ST1000: at least one file in a package should have a package comment' + - 'package-comments: should have a package comment' + exclude-rules: + - path: .*_test.go + linters: + - funlen + - goconst diff --git a/vendor/github.com/golangci/misspell/.travis.yml b/vendor/github.com/golangci/misspell/.travis.yml deleted file mode 100644 index e63e6c2bdc..0000000000 --- a/vendor/github.com/golangci/misspell/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -sudo: required -dist: trusty -group: edge -language: go -go: - - "1.10" -git: - depth: 1 - -script: - - ./scripts/travis.sh - -# calls goreleaser when a new tag is pushed -deploy: -- provider: script - skip_cleanup: true - script: curl -sL http://git.io/goreleaser | bash - on: - tags: true - condition: $TRAVIS_OS_NAME = linux diff --git a/vendor/github.com/golangci/misspell/Dockerfile b/vendor/github.com/golangci/misspell/Dockerfile index b8ea37b4c5..788ce3a775 100644 --- a/vendor/github.com/golangci/misspell/Dockerfile +++ b/vendor/github.com/golangci/misspell/Dockerfile @@ -1,16 +1,13 @@ -FROM golang:1.10.0-alpine +FROM golang:1.19-alpine # cache buster -RUN echo 4 +RUN echo 4 # git is needed for "go get" below RUN apk add --no-cache git make # these are my standard testing / linting tools RUN /bin/true \ - && go get -u github.com/golang/dep/cmd/dep \ - && go get -u github.com/alecthomas/gometalinter \ - && gometalinter --install \ && rm -rf /go/src /go/pkg # # * SCOWL word list @@ -35,3 +32,4 @@ RUN /bin/true \ && wget -O /scowl-wl/words-US-60.txt ${SOURCE_US} \ && wget -O /scowl-wl/words-GB-ise-60.txt ${SOURCE_GB_ISE} +RUN git config --global --add safe.directory "/go/src/github.com/golangci/misspell" diff --git a/vendor/github.com/golangci/misspell/Gopkg.lock b/vendor/github.com/golangci/misspell/Gopkg.lock deleted file mode 100644 index 90ed45115f..0000000000 --- a/vendor/github.com/golangci/misspell/Gopkg.lock +++ /dev/null @@ -1,24 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings" - ] - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "087ea4c49358ea8258ad9edfe514cd5ce9975c889c258e5ec7b5d2b720aae113" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/golangci/misspell/Gopkg.toml b/vendor/github.com/golangci/misspell/Gopkg.toml deleted file mode 100644 index e9b8e6a45a..0000000000 --- a/vendor/github.com/golangci/misspell/Gopkg.toml +++ /dev/null @@ -1,34 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/gobwas/glob" - version = "0.2.3" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/golangci/misspell/LICENSE b/vendor/github.com/golangci/misspell/LICENSE index 423e1f9e0f..bfcfcd3013 100644 --- a/vendor/github.com/golangci/misspell/LICENSE +++ b/vendor/github.com/golangci/misspell/LICENSE @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/golangci/misspell/Makefile b/vendor/github.com/golangci/misspell/Makefile index 862ab77b0d..783f977cb4 100644 --- a/vendor/github.com/golangci/misspell/Makefile +++ b/vendor/github.com/golangci/misspell/Makefile @@ -1,17 +1,18 @@ -CONTAINER=nickg/misspell +CONTAINER=golangci/misspell + +default: lint test build install: ## install misspell into GOPATH/bin go install ./cmd/misspell -build: hooks ## build and lint misspell - ./scripts/build.sh +build: ## build misspell + go build ./cmd/misspell test: ## run all tests - go test . + go test -v . -# real publishing is done only by travis -publish: ## test goreleaser - ./scripts/goreleaser-dryrun.sh +lint: ## run linter + golangci-lint run # the grep in line 2 is to remove misspellings in the spelling dictionary # that trigger false positives!! @@ -37,31 +38,22 @@ clean: ## clean up time go clean ./... git gc --aggressive -ci: ## run test like travis-ci does, requires docker +ci: docker-build ## run test like travis-ci does, requires docker docker run --rm \ - -v $(PWD):/go/src/github.com/client9/misspell \ - -w /go/src/github.com/client9/misspell \ + -v $(PWD):/go/src/github.com/golangci/misspell \ + -w /go/src/github.com/golangci/misspell \ ${CONTAINER} \ - make build falsepositives + make install falsepositives docker-build: ## build a docker test image docker build -t ${CONTAINER} . -docker-pull: ## pull latest test image - docker pull ${CONTAINER} - docker-console: ## log into the test image docker run --rm -it \ - -v $(PWD):/go/src/github.com/client9/misspell \ - -w /go/src/github.com/client9/misspell \ + -v $(PWD):/go/src/github.com/golangci/misspell \ + -w /go/src/github.com/golangci/misspell \ ${CONTAINER} sh -.git/hooks/pre-commit: scripts/pre-commit.sh - cp -f scripts/pre-commit.sh .git/hooks/pre-commit -.git/hooks/commit-msg: scripts/commit-msg.sh - cp -f scripts/commit-msg.sh .git/hooks/commit-msg -hooks: .git/hooks/pre-commit .git/hooks/commit-msg ## install git precommit hooks - .PHONY: help ci console docker-build bench # https://www.client9.com/self-documenting-makefiles/ @@ -69,6 +61,6 @@ help: @awk -F ':|##' '/^[^\t].+?:.*?##/ {\ printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF \ }' $(MAKEFILE_LIST) -.DEFAULT_GOAL=help +.DEFAULT_GOAL=default .PHONY=help diff --git a/vendor/github.com/golangci/misspell/README.md b/vendor/github.com/golangci/misspell/README.md index 5b68af04da..cccd04996f 100644 --- a/vendor/github.com/golangci/misspell/README.md +++ b/vendor/github.com/golangci/misspell/README.md @@ -19,7 +19,7 @@ Both will install as `./bin/misspell`. You can adjust the download location usi If you use [Go](https://golang.org/), the best way to run `misspell` is by using [gometalinter](#gometalinter). Otherwise, install `misspell` the old-fashioned way: ``` -go get -u github.com/client9/misspell/cmd/misspell +go install github.com/client9/misspell/cmd/misspell@latest ``` and misspell will be in your `GOPATH` diff --git a/vendor/github.com/golangci/misspell/ascii.go b/vendor/github.com/golangci/misspell/ascii.go index 1430718d6a..d60af5a8da 100644 --- a/vendor/github.com/golangci/misspell/ascii.go +++ b/vendor/github.com/golangci/misspell/ascii.go @@ -1,7 +1,7 @@ package misspell -// ByteToUpper converts an ascii byte to upper cases -// Uses a branchless algorithm +// ByteToUpper converts an ascii byte to upper cases. +// Uses a branch-less algorithm. func ByteToUpper(x byte) byte { b := byte(0x80) | x c := b - byte(0x61) @@ -10,8 +10,8 @@ func ByteToUpper(x byte) byte { return x - (e >> 2) } -// ByteToLower converts an ascii byte to lower case -// uses a branchless algorithm +// ByteToLower converts an ascii byte to lower case. +// Uses a branch-less algorithm. func ByteToLower(eax byte) byte { ebx := eax&byte(0x7f) + byte(0x25) ebx = ebx&byte(0x7f) + byte(0x1a) @@ -19,7 +19,7 @@ func ByteToLower(eax byte) byte { return eax + ebx } -// ByteEqualFold does ascii compare, case insensitive +// ByteEqualFold does ascii compare, case insensitive. func ByteEqualFold(a, b byte) bool { return a == b || ByteToLower(a) == ByteToLower(b) } @@ -27,7 +27,7 @@ func ByteEqualFold(a, b byte) bool { // StringEqualFold ASCII case-insensitive comparison // golang toUpper/toLower for both bytes and strings // appears to be Unicode based which is super slow -// based from https://codereview.appspot.com/5180044/patch/14007/21002 +// based from https://codereview.appspot.com/5180044/patch/14007/21002. func StringEqualFold(s1, s2 string) bool { if len(s1) != len(s2) { return false @@ -47,9 +47,7 @@ func StringEqualFold(s1, s2 string) bool { return true } -// StringHasPrefixFold is similar to strings.HasPrefix but comparison -// is done ignoring ASCII case. -// / +// StringHasPrefixFold is similar to strings.HasPrefix but comparison is done ignoring ASCII case. func StringHasPrefixFold(s1, s2 string) bool { // prefix is bigger than input --> false if len(s1) < len(s2) { diff --git a/vendor/github.com/golangci/misspell/case.go b/vendor/github.com/golangci/misspell/case.go index 2ea3850dfa..0b580bedbc 100644 --- a/vendor/github.com/golangci/misspell/case.go +++ b/vendor/github.com/golangci/misspell/case.go @@ -4,10 +4,10 @@ import ( "strings" ) -// WordCase is an enum of various word casing styles +// WordCase is an enum of various word casing styles. type WordCase int -// Various WordCase types.. likely to be not correct +// Various WordCase types... likely to be not correct. const ( CaseUnknown WordCase = iota CaseLower @@ -15,7 +15,7 @@ const ( CaseTitle ) -// CaseStyle returns what case style a word is in +// CaseStyle returns what case style a word is in. func CaseStyle(word string) WordCase { upperCount := 0 lowerCount := 0 @@ -42,11 +42,10 @@ func CaseStyle(word string) WordCase { return CaseUnknown } -// CaseVariations returns -// If AllUpper or First-Letter-Only is upcased: add the all upper case version -// If AllLower, add the original, the title and upcase forms -// If Mixed, return the original, and the all upcase form -// +// CaseVariations returns: +// If AllUpper or First-Letter-Only is upper-cased: add the all upper case version. +// If AllLower, add the original, the title and upper-case forms. +// If Mixed, return the original, and the all upper-case form. func CaseVariations(word string, style WordCase) []string { switch style { case CaseLower: diff --git a/vendor/github.com/golangci/misspell/goreleaser.yml b/vendor/github.com/golangci/misspell/goreleaser.yml index 560cb3810c..97aa83e5ac 100644 --- a/vendor/github.com/golangci/misspell/goreleaser.yml +++ b/vendor/github.com/golangci/misspell/goreleaser.yml @@ -1,11 +1,7 @@ -# goreleaser.yml -# https://github.com/goreleaser/goreleaser - project_name: misspell builds: - - - main: cmd/misspell/main.go + - main: cmd/misspell/main.go binary: misspell ldflags: -s -w -X main.version={{.Version}} goos: @@ -14,22 +10,18 @@ builds: - windows goarch: - amd64 + - arm64 env: - CGO_ENABLED=0 - ignore: - - goos: darwin - goarch: 386 - - goos: windows - goarch: 386 -archive: - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - replacements: - amd64: 64bit - 386: 32bit - darwin: mac - files: - - none* +archives: + - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + replacements: + amd64: 64bit + 386: 32bit + darwin: mac + files: + - LICENSE checksum: name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt" diff --git a/vendor/github.com/golangci/misspell/install-misspell.sh b/vendor/github.com/golangci/misspell/install-misspell.sh index e24a84a20b..51e9b3372a 100644 --- a/vendor/github.com/golangci/misspell/install-misspell.sh +++ b/vendor/github.com/golangci/misspell/install-misspell.sh @@ -6,12 +6,12 @@ set -e usage() { this=$1 cat < 50000 { fin, err := os.Open(filename) if err != nil { - return "", fmt.Errorf("Unable to open large file %q: %s", filename, err) + return "", fmt.Errorf("unable to open large file %q: %w", filename, err) } defer fin.Close() buf := make([]byte, 512) _, err = io.ReadFull(fin, buf) if err != nil { - return "", fmt.Errorf("Unable to read 512 bytes from %q: %s", filename, err) + return "", fmt.Errorf("unable to read 512 bytes from %q: %w", filename, err) } if !isTextFile(buf) { return "", nil } - // set so we don't double check this file + // set so we don't double-check this file isText = true } // read in whole file - raw, err := ioutil.ReadFile(filename) + raw, err := os.ReadFile(filename) if err != nil { - return "", fmt.Errorf("Unable to read all %q: %s", filename, err) + return "", fmt.Errorf("unable to read all %q: %w", filename, err) } if !isText && !isTextFile(raw) { diff --git a/vendor/github.com/golangci/misspell/notwords.go b/vendor/github.com/golangci/misspell/notwords.go index 06d0d5a5ad..a250cf7f6e 100644 --- a/vendor/github.com/golangci/misspell/notwords.go +++ b/vendor/github.com/golangci/misspell/notwords.go @@ -13,10 +13,10 @@ var ( ) // RemovePath attempts to strip away embedded file system paths, e.g. -// /foo/bar or /static/myimg.png // -// TODO: windows style +// /foo/bar or /static/myimg.png // +// TODO: windows style. func RemovePath(s string) string { out := bytes.Buffer{} var idx int @@ -57,28 +57,28 @@ func RemovePath(s string) string { return out.String() } -// replaceWithBlanks returns a string with the same number of spaces as the input +// replaceWithBlanks returns a string with the same number of spaces as the input. func replaceWithBlanks(s string) string { return strings.Repeat(" ", len(s)) } -// RemoveEmail remove email-like strings, e.g. "nickg+junk@xfoobar.com", "nickg@xyz.abc123.biz" +// RemoveEmail remove email-like strings, e.g. "nickg+junk@xfoobar.com", "nickg@xyz.abc123.biz". func RemoveEmail(s string) string { return reEmail.ReplaceAllStringFunc(s, replaceWithBlanks) } -// RemoveHost removes host-like strings "foobar.com" "abc123.fo1231.biz" +// RemoveHost removes host-like strings "foobar.com" "abc123.fo1231.biz". func RemoveHost(s string) string { return reHost.ReplaceAllStringFunc(s, replaceWithBlanks) } -// RemoveBackslashEscapes removes characters that are preceeded by a backslash -// commonly found in printf format stringd "\nto" +// RemoveBackslashEscapes removes characters that are preceded by a backslash. +// commonly found in printf format string "\nto". func removeBackslashEscapes(s string) string { return reBackslash.ReplaceAllStringFunc(s, replaceWithBlanks) } -// RemoveNotWords blanks out all the not words +// RemoveNotWords blanks out all the not words. func RemoveNotWords(s string) string { // do most selective/specific first return removeBackslashEscapes(RemoveHost(RemoveEmail(RemovePath(StripURL(s))))) diff --git a/vendor/github.com/golangci/misspell/replace.go b/vendor/github.com/golangci/misspell/replace.go index a99bbcc582..bcfcf8deb5 100644 --- a/vendor/github.com/golangci/misspell/replace.go +++ b/vendor/github.com/golangci/misspell/replace.go @@ -18,7 +18,7 @@ func max(x, y int) int { func inArray(haystack []string, needle string) bool { for _, word := range haystack { - if needle == word { + if strings.EqualFold(needle, word) { return true } } @@ -27,7 +27,7 @@ func inArray(haystack []string, needle string) bool { var wordRegexp = regexp.MustCompile(`[a-zA-Z0-9']+`) -// Diff is datastructure showing what changed in a single line +// Diff is datastructures showing what changed in a single line. type Diff struct { Filename string FullLine string @@ -37,7 +37,7 @@ type Diff struct { Corrected string } -// Replacer is the main struct for spelling correction +// Replacer is the main struct for spelling correction. type Replacer struct { Replacements []string Debug bool @@ -45,7 +45,7 @@ type Replacer struct { corrected map[string]string } -// New creates a new default Replacer using the main rule list +// New creates a new default Replacer using the main rule list. func New() *Replacer { r := Replacer{ Replacements: DictMain, @@ -54,31 +54,32 @@ func New() *Replacer { return &r } -// RemoveRule deletes existings rules. -// TODO: make inplace to save memory +// RemoveRule deletes existing rules. +// The content of `ignore` is case-insensitive. +// TODO: make in place to save memory. func (r *Replacer) RemoveRule(ignore []string) { - newwords := make([]string, 0, len(r.Replacements)) + newWords := make([]string, 0, len(r.Replacements)) for i := 0; i < len(r.Replacements); i += 2 { if inArray(ignore, r.Replacements[i]) { continue } - newwords = append(newwords, r.Replacements[i:i+2]...) + newWords = append(newWords, r.Replacements[i:i+2]...) } r.engine = nil - r.Replacements = newwords + r.Replacements = newWords } // AddRuleList appends new rules. // Input is in the same form as Strings.Replacer: [ old1, new1, old2, new2, ....] -// Note: does not check for duplictes +// Note: does not check for duplicates. func (r *Replacer) AddRuleList(additions []string) { r.engine = nil r.Replacements = append(r.Replacements, additions...) } -// Compile compiles the rules. Required before using the Replace functions +// Compile compiles the rules. +// Required before using the Replace functions. func (r *Replacer) Compile() { - r.corrected = make(map[string]string, len(r.Replacements)/2) for i := 0; i < len(r.Replacements); i += 2 { r.corrected[r.Replacements[i]] = r.Replacements[i+1] @@ -92,11 +93,14 @@ extract words from each line1 replace word -> newword if word == new-word - continue + + continue + if new-word in list of replacements - continue -new word not original, and not in list of replacements - some substring got mixed up. UNdo + + continue + +new word not original, and not in list of replacements some substring got mixed up. UNdo. */ func (r *Replacer) recheckLine(s string, lineNum int, buf io.Writer, next func(Diff)) { first := 0 @@ -136,9 +140,8 @@ func (r *Replacer) recheckLine(s string, lineNum int, buf io.Writer, next func(D io.WriteString(buf, s[first:]) } -// ReplaceGo is a specialized routine for correcting Golang source -// files. Currently only checks comments, not identifiers for -// spelling. +// ReplaceGo is a specialized routine for correcting Golang source files. +// Currently only checks comments, not identifiers for spelling. func (r *Replacer) ReplaceGo(input string) (string, []Diff) { var s scanner.Scanner s.Init(strings.NewReader(input)) @@ -169,7 +172,7 @@ Loop: return input, nil } if lastPos < len(input) { - output = output + input[lastPos:] + output += input[lastPos:] } diffs := make([]Diff, 0, 8) buf := bytes.NewBuffer(make([]byte, 0, max(len(input), len(output))+100)) @@ -187,11 +190,9 @@ Loop: } return buf.String(), diffs - } -// Replace is corrects misspellings in input, returning corrected version -// along with a list of diffs. +// Replace is corrects misspellings in input, returning corrected version along with a list of diffs. func (r *Replacer) Replace(input string) (string, []Diff) { output := r.engine.Replace(input) if input == output { @@ -215,8 +216,8 @@ func (r *Replacer) Replace(input string) (string, []Diff) { return buf.String(), diffs } -// ReplaceReader applies spelling corrections to a reader stream. Diffs are -// emitted through a callback. +// ReplaceReader applies spelling corrections to a reader stream. +// Diffs are emitted through a callback. func (r *Replacer) ReplaceReader(raw io.Reader, w io.Writer, next func(Diff)) error { var ( err error @@ -239,7 +240,7 @@ func (r *Replacer) ReplaceReader(raw io.Reader, w io.Writer, next func(Diff)) er io.WriteString(w, line) continue } - // but it can be inaccurate, so we need to double check + // but it can be inaccurate, so we need to double-check r.recheckLine(line, lineNum, w, next) } return nil diff --git a/vendor/github.com/golangci/misspell/stringreplacer.go b/vendor/github.com/golangci/misspell/stringreplacer.go index 3151eceb70..73ca9a56ac 100644 --- a/vendor/github.com/golangci/misspell/stringreplacer.go +++ b/vendor/github.com/golangci/misspell/stringreplacer.go @@ -6,7 +6,6 @@ package misspell import ( "io" - // "log" "strings" ) @@ -38,7 +37,7 @@ func (r *StringReplacer) Replace(s string) string { } // WriteString writes s to w with all replacements performed. -func (r *StringReplacer) WriteString(w io.Writer, s string) (n int, err error) { +func (r *StringReplacer) WriteString(w io.Writer, s string) (int, error) { return r.r.WriteString(w, s) } @@ -46,14 +45,14 @@ func (r *StringReplacer) WriteString(w io.Writer, s string) (n int, err error) { // and values may be empty. For example, the trie containing keys "ax", "ay", // "bcbc", "x" and "xy" could have eight nodes: // -// n0 - -// n1 a- -// n2 .x+ -// n3 .y+ -// n4 b- -// n5 .cbc+ -// n6 x+ -// n7 .y+ +// n0 - +// n1 a- +// n2 .x+ +// n3 .y+ +// n4 b- +// n5 .cbc+ +// n6 x+ +// n7 .y+ // // n0 is the root node, and its children are n1, n4 and n6; n1's children are // n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked @@ -103,6 +102,7 @@ func (t *trieNode) add(key, val string, priority int, r *genericReplacer) { return } + //nolint:nestif // TODO(ldez) must be fixed. if t.prefix != "" { // Need to split the prefix among multiple nodes. var n int // length of the longest common prefix @@ -157,42 +157,6 @@ func (t *trieNode) add(key, val string, priority int, r *genericReplacer) { } } -func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) { - // Iterate down the trie to the end, and grab the value and keylen with - // the highest priority. - bestPriority := 0 - node := &r.root - n := 0 - for node != nil { - if node.priority > bestPriority && !(ignoreRoot && node == &r.root) { - bestPriority = node.priority - val = node.value - keylen = n - found = true - } - - if s == "" { - break - } - if node.table != nil { - index := r.mapping[ByteToLower(s[0])] - if int(index) == r.tableSize { - break - } - node = node.table[index] - s = s[1:] - n++ - } else if node.prefix != "" && StringHasPrefixFold(s, node.prefix) { - n += len(node.prefix) - s = s[len(node.prefix):] - node = node.next - } else { - break - } - } - return -} - // genericReplacer is the fully generic algorithm. // It's used as a fallback when nothing faster can be used. type genericReplacer struct { @@ -236,38 +200,40 @@ func makeGenericReplacer(oldnew []string) *genericReplacer { return r } -type appendSliceWriter []byte - -// Write writes to the buffer to satisfy io.Writer. -func (w *appendSliceWriter) Write(p []byte) (int, error) { - *w = append(*w, p...) - return len(p), nil -} - -// WriteString writes to the buffer without string->[]byte->string allocations. -func (w *appendSliceWriter) WriteString(s string) (int, error) { - *w = append(*w, s...) - return len(s), nil -} - -type stringWriterIface interface { - WriteString(string) (int, error) -} - -type stringWriter struct { - w io.Writer -} - -func (w stringWriter) WriteString(s string) (int, error) { - return w.w.Write([]byte(s)) -} +func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) { + // Iterate down the trie to the end, and grab the value and keylen with + // the highest priority. + bestPriority := 0 + node := &r.root + n := 0 + for node != nil { + if node.priority > bestPriority && !(ignoreRoot && node == &r.root) { + bestPriority = node.priority + val = node.value + keylen = n + found = true + } -func getStringWriter(w io.Writer) stringWriterIface { - sw, ok := w.(stringWriterIface) - if !ok { - sw = stringWriter{w} + if s == "" { + break + } + if node.table != nil { + index := r.mapping[ByteToLower(s[0])] + if int(index) == r.tableSize { + break + } + node = node.table[index] + s = s[1:] + n++ + } else if node.prefix != "" && StringHasPrefixFold(s, node.prefix) { + n += len(node.prefix) + s = s[len(node.prefix):] + node = node.next + } else { + break + } } - return sw + return } func (r *genericReplacer) Replace(s string) string { @@ -276,6 +242,7 @@ func (r *genericReplacer) Replace(s string) string { return string(buf) } +//nolint:gocognit // TODO(ldez) must be fixed. func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) { sw := getStringWriter(w) var last, wn int @@ -316,7 +283,7 @@ func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) if err != nil { return } - //log.Printf("%d: Going to correct %q with %q", i, s[i:i+keylen], val) + // debug helper: log.Printf("%d: Going to correct %q with %q", i, s[i:i+keylen], val) wn, err = sw.WriteString(val) n += wn if err != nil { @@ -334,3 +301,33 @@ func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) } return } + +type appendSliceWriter []byte + +// Write writes to the buffer to satisfy io.Writer. +func (w *appendSliceWriter) Write(p []byte) (int, error) { + *w = append(*w, p...) + return len(p), nil +} + +// WriteString writes to the buffer without string->[]byte->string allocations. +func (w *appendSliceWriter) WriteString(s string) (int, error) { + *w = append(*w, s...) + return len(s), nil +} + +type stringWriter struct { + w io.Writer +} + +func (w stringWriter) WriteString(s string) (int, error) { + return w.w.Write([]byte(s)) +} + +func getStringWriter(w io.Writer) io.StringWriter { + sw, ok := w.(io.StringWriter) + if !ok { + sw = stringWriter{w} + } + return sw +} diff --git a/vendor/github.com/golangci/misspell/url.go b/vendor/github.com/golangci/misspell/url.go index 1a259f5f99..203b91a79e 100644 --- a/vendor/github.com/golangci/misspell/url.go +++ b/vendor/github.com/golangci/misspell/url.go @@ -7,11 +7,12 @@ import ( // Regexp for URL https://mathiasbynens.be/demo/url-regex // // original @imme_emosol (54 chars) has trouble with dashes in hostname -// @(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS -var reURL = regexp.MustCompile(`(?i)(https?|ftp)://(-\.)?([^\s/?\.#]+\.?)+(/[^\s]*)?`) +// @(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS. +var reURL = regexp.MustCompile(`(?i)(https?|ftp)://(-\.)?([^\s/?.#]+\.?)+(/\S*)?`) // StripURL attemps to replace URLs with blank spaces, e.g. -// "xxx http://foo.com/ yyy -> "xxx yyyy" +// +// "xxx http://foo.com/ yyy -> "xxx yyyy". func StripURL(s string) string { return reURL.ReplaceAllStringFunc(s, replaceWithBlanks) } diff --git a/vendor/github.com/golangci/plugin-module-register/LICENSE b/vendor/github.com/golangci/plugin-module-register/LICENSE new file mode 100644 index 0000000000..e72bfddabc --- /dev/null +++ b/vendor/github.com/golangci/plugin-module-register/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/vendor/github.com/golangci/plugin-module-register/register/register.go b/vendor/github.com/golangci/plugin-module-register/register/register.go new file mode 100644 index 0000000000..72ad7f46f2 --- /dev/null +++ b/vendor/github.com/golangci/plugin-module-register/register/register.go @@ -0,0 +1,73 @@ +package register + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + + "golang.org/x/tools/go/analysis" +) + +// Plugins load mode. +const ( + LoadModeSyntax = "syntax" + LoadModeTypesInfo = "typesinfo" +) + +var ( + pluginsMu sync.RWMutex + plugins = make(map[string]NewPlugin) +) + +// LinterPlugin the interface of the plugin structure. +type LinterPlugin interface { + BuildAnalyzers() ([]*analysis.Analyzer, error) + GetLoadMode() string +} + +// NewPlugin the contract of the constructor of a plugin. +type NewPlugin func(conf any) (LinterPlugin, error) + +// Plugin registers a plugin. +func Plugin(name string, p NewPlugin) { + pluginsMu.Lock() + + plugins[name] = p + + pluginsMu.Unlock() +} + +// GetPlugin gets a plugin by name. +func GetPlugin(name string) (NewPlugin, error) { + pluginsMu.Lock() + defer pluginsMu.Unlock() + + p, ok := plugins[name] + if !ok { + return nil, fmt.Errorf("plugin %q not found", name) + } + + return p, nil +} + +// DecodeSettings decode settings from golangci-lint to the structure of the plugin configuration. +func DecodeSettings[T any](rawSettings any) (T, error) { + var buffer bytes.Buffer + + if err := json.NewEncoder(&buffer).Encode(rawSettings); err != nil { + var zero T + return zero, fmt.Errorf("encoding settings: %w", err) + } + + decoder := json.NewDecoder(&buffer) + decoder.DisallowUnknownFields() + + s := new(T) + if err := decoder.Decode(s); err != nil { + var zero T + return zero, fmt.Errorf("decoding settings: %w", err) + } + + return *s, nil +} diff --git a/vendor/github.com/golangci/revgrep/.golangci.yml b/vendor/github.com/golangci/revgrep/.golangci.yml index b8ed6204fe..02ed5ec849 100644 --- a/vendor/github.com/golangci/revgrep/.golangci.yml +++ b/vendor/github.com/golangci/revgrep/.golangci.yml @@ -28,12 +28,20 @@ linters-settings: linters: enable-all: true disable: - - maligned # Deprecated - - scopelint # Deprecated - - golint # Deprecated - - interfacer # Deprecated - - exhaustivestruct # Deprecated + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated - cyclop # duplicate of gocyclo + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) - dupl - lll - nestif @@ -54,6 +62,7 @@ linters: - nosnakecase - nonamedreturns - nilerr + - depguard issues: exclude-use-default: false diff --git a/vendor/github.com/golangci/revgrep/revgrep.go b/vendor/github.com/golangci/revgrep/revgrep.go index 4b990fa048..7796b1c01c 100644 --- a/vendor/github.com/golangci/revgrep/revgrep.go +++ b/vendor/github.com/golangci/revgrep/revgrep.go @@ -1,3 +1,4 @@ +// Package revgrep filter static analysis tools to only lines changed based on a commit reference. package revgrep import ( @@ -17,31 +18,26 @@ import ( // Checker provides APIs to filter static analysis tools to specific commits, // such as showing only issues since last commit. type Checker struct { - // Patch file (unified) to read to detect lines being changed, if nil revgrep - // will attempt to detect the VCS and generate an appropriate patch. Auto - // detection will search for uncommitted changes first, if none found, will - // generate a patch from last committed change. File paths within patches - // must be relative to current working directory. + // Patch file (unified) to read to detect lines being changed, + // if nil revgrep will attempt to detect the VCS and generate an appropriate patch. + // Auto-detection will search for uncommitted changes first, + // if none found, will generate a patch from last committed change. + // File paths within patches must be relative to current working directory. Patch io.Reader - // NewFiles is a list of file names (with absolute paths) where the entire - // contents of the file is new. + // NewFiles is a list of file names (with absolute paths) where the entire contents of the file is new. NewFiles []string // Debug sets the debug writer for additional output. Debug io.Writer - // RevisionFrom check revision starting at, leave blank for auto detection - // ignored if patch is set. + // RevisionFrom check revision starting at, leave blank for auto-detection ignored if patch is set. RevisionFrom string - // WholeFiles indicates that the user wishes to see all issues that comes up - // anywhere in any file that has been changed in this revision or patch. + // WholeFiles indicates that the user wishes to see all issues that comes up anywhere in any file that has been changed in this revision or patch. WholeFiles bool - // RevisionTo checks revision finishing at, leave blank for auto detection - // ignored if patch is set. + // RevisionTo checks revision finishing at, leave blank for auto-detection ignored if patch is set. RevisionTo string // Regexp to match path, line number, optional column number, and message. Regexp string - // AbsPath is used to make an absolute path of an issue's filename to be - // relative in order to match patch file. If not set, current working - // directory is used. + // AbsPath is used to make an absolute path of an issue's filename to be relative in order to match patch file. + // If not set, current working directory is used. AbsPath string // Calculated changes for next calls to IsNewIssue @@ -56,9 +52,7 @@ type Issue struct { LineNo int // ColNo is the column number or 0 if none could be parsed. ColNo int - // HunkPos is position from file's first @@, for new files this will be the - // line number. - // + // HunkPos is position from file's first @@, for new files this will be the line number. // See also: https://developer.github.com/v3/pulls/comments/#create-a-comment HunkPos int // Issue text as it appeared from the tool. @@ -135,16 +129,14 @@ func (c *Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { return 0, false } -// Check scans reader and writes any lines to writer that have been added in -// Checker.Patch. +// Check scans reader and writes any lines to writer that have been added in Checker.Patch. // // Returns the issues written to writer when no error occurs. // -// If no VCS could be found or other VCS errors occur, all issues are written -// to writer and an error is returned. +// If no VCS could be found or other VCS errors occur, +// all issues are written to writer and an error is returned. // -// File paths in reader must be relative to current working directory or -// absolute. +// File paths in reader must be relative to current working directory or absolute. func (c *Checker) Check(reader io.Reader, writer io.Writer) (issues []Issue, err error) { returnErr := c.Prepare() writeAll := returnErr != nil @@ -265,8 +257,7 @@ func (c *Checker) preparePatch() error { } // linesChanges returns a map of file names to line numbers being changed. -// If key is nil, the file has been recently added, else it contains a slice -// of positions that have been added. +// If key is nil, the file has been recently added, else it contains a slice of positions that have been added. func (c *Checker) linesChanged() map[string][]pos { type state struct { file string @@ -343,17 +334,12 @@ func (c *Checker) linesChanged() map[string][]pos { return changes } -// GitPatch returns a patch from a git repository, if no git repository was -// was found and no errors occurred, nil is returned, else an error is returned -// revisionFrom and revisionTo defines the git diff parameters, if left blank -// and there are unstaged changes or untracked files, only those will be returned -// else only check changes since HEAD~. If revisionFrom is set but revisionTo -// is not, untracked files will be included, to exclude untracked files set -// revisionTo to HEAD~. It's incorrect to specify revisionTo without a -// revisionFrom. +// GitPatch returns a patch from a git repository, +// if no git repository was found and no errors occurred, nil is returned, else an error is returned revisionFrom and revisionTo defines the git diff parameters, +// if left blank and there are unstaged changes or untracked files, only those will be returned else only check changes since HEAD~. +// If revisionFrom is set but revisionTo is not, untracked files will be included, to exclude untracked files set revisionTo to HEAD~. +// It's incorrect to specify revisionTo without a revisionFrom. func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { - var patch bytes.Buffer - // check if git repo exists if err := exec.Command("git", "status", "--porcelain").Run(); err != nil { // don't return an error, we assume the error is not repo exists @@ -377,8 +363,9 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { newFiles = append(newFiles, string(file)) } + var patch bytes.Buffer if revisionFrom != "" { - cmd := exec.Command("git", "diff", "--color=never", "--relative", revisionFrom) + cmd := gitDiff(revisionFrom) if revisionTo != "" { cmd.Args = append(cmd.Args, revisionTo) } @@ -392,12 +379,12 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { if revisionTo == "" { return &patch, newFiles, nil } + return &patch, nil, nil } // make a patch for unstaged changes - // use --no-prefix to remove b/ given: +++ b/main.go - cmd := exec.Command("git", "diff", "--color=never", "--relative", "--") + cmd := gitDiff("--") cmd.Stdout = &patch if err := cmd.Run(); err != nil { return nil, nil, fmt.Errorf("error executing git diff: %w", err) @@ -412,7 +399,7 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { // check for changes in recent commit - cmd = exec.Command("git", "diff", "--color=never", "--relative", "HEAD~", "--") + cmd = gitDiff("HEAD~", "--") cmd.Stdout = &patch if err := cmd.Run(); err != nil { return nil, nil, fmt.Errorf("error executing git diff HEAD~: %w", err) @@ -420,3 +407,55 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { return &patch, nil, nil } + +func gitDiff(extraArgs ...string) *exec.Cmd { + cmd := exec.Command("git", "diff", "--color=never", "--no-ext-diff") + + if isSupportedByGit(2, 41, 0) { + cmd.Args = append(cmd.Args, "--default-prefix") + } + + cmd.Args = append(cmd.Args, "--relative") + cmd.Args = append(cmd.Args, extraArgs...) + + return cmd +} + +func isSupportedByGit(major, minor, patch int) bool { + output, err := exec.Command("git", "version").CombinedOutput() + if err != nil { + return false + } + + parts := bytes.Split(bytes.TrimSpace(output), []byte(" ")) + if len(parts) < 3 { + return false + } + + v := string(parts[2]) + if v == "" { + return false + } + + vp := regexp.MustCompile(`^(\d+)\.(\d+)(?:\.(\d+))?.*$`).FindStringSubmatch(v) + if len(vp) < 4 { + return false + } + + currentMajor, err := strconv.Atoi(vp[1]) + if err != nil { + return false + } + + currentMinor, err := strconv.Atoi(vp[2]) + if err != nil { + return false + } + + currentPatch, err := strconv.Atoi(vp[3]) + if err != nil { + return false + } + + return currentMajor*1_000_000_000+currentMinor*1_000_000+currentPatch*1_000 >= major*1_000_000_000+minor*1_000_000+patch*1_000 +} diff --git a/vendor/github.com/golangci/unconvert/README b/vendor/github.com/golangci/unconvert/README deleted file mode 100644 index dbaea4f572..0000000000 --- a/vendor/github.com/golangci/unconvert/README +++ /dev/null @@ -1,36 +0,0 @@ -About: - -The unconvert program analyzes Go packages to identify unnecessary -type conversions; i.e., expressions T(x) where x already has type T. - -Install: - - $ go get github.com/mdempsky/unconvert - -Usage: - - $ unconvert -v bytes fmt - GOROOT/src/bytes/reader.go:117:14: unnecessary conversion - abs = int64(r.i) + offset - ^ - GOROOT/src/fmt/print.go:411:21: unnecessary conversion - p.fmt.integer(int64(v), 16, unsigned, udigits) - ^ - -Flags: - -Using the -v flag, unconvert will also print the source line and a -caret to indicate the unnecessary conversion's position therein. - -Using the -apply flag, unconvert will rewrite the Go source files -without the unnecessary type conversions. - -Using the -all flag, unconvert will analyze the Go packages under all -possible GOOS/GOARCH combinations, and only identify conversions that -are unnecessary in all cases. - -E.g., syscall.Timespec's Sec and Nsec fields are int64 under -linux/amd64 but int32 under linux/386. An int64(ts.Sec) conversion -that appears in a linux/amd64-only file will be identified as -unnecessary, but it will be preserved if it occurs in a file that's -compiled for both linux/amd64 and linux/386. diff --git a/vendor/github.com/golangci/unconvert/README.md b/vendor/github.com/golangci/unconvert/README.md new file mode 100644 index 0000000000..e9230c2183 --- /dev/null +++ b/vendor/github.com/golangci/unconvert/README.md @@ -0,0 +1,6 @@ +Fork of [unconvert](https://github.com/mdempsky/unconvert) to be usable as a library. + +The specific elements are inside the file `golangci.go`. + +The only modification of the file `unconvert.go` is the remove of the global variables for the flags. +The tests will never work because of that, then the CI is disabled. diff --git a/vendor/github.com/golangci/unconvert/golangci.go b/vendor/github.com/golangci/unconvert/golangci.go new file mode 100644 index 0000000000..306c44e5ec --- /dev/null +++ b/vendor/github.com/golangci/unconvert/golangci.go @@ -0,0 +1,78 @@ +package unconvert + +import ( + "go/ast" + "go/token" + "strings" + "sync" + + "golang.org/x/tools/go/analysis" +) + +// Transformed version of the original unconvert flags section. +// The section has been removed inside `unconvert.go` +var ( + flagAll = pointer(false) + flagApply = pointer(false) + flagCPUProfile = pointer("") + flagSafe = pointer(false) + flagV = pointer(false) + flagTests = pointer(true) + flagFastMath = pointer(false) + flagTags = pointer("") + flagConfigs = pointer("") +) + +func pointer[T string | int | int32 | int64 | bool](v T) *T { return &v } + +func Run(pass *analysis.Pass, fastMath, safe bool) []token.Position { + type res struct { + file string + edits editSet + } + + flagFastMath = pointer(fastMath) + flagSafe = pointer(safe) + + ch := make(chan res) + var wg sync.WaitGroup + for _, file := range pass.Files { + file := file + + tokenFile := pass.Fset.File(file.Package) + filename := tokenFile.Position(file.Package).Filename + + // Hack to recognize _cgo_gotypes.go. + if strings.HasSuffix(filename, "-d") || strings.HasSuffix(filename, "/_cgo_gotypes.go") { + continue + } + + wg.Add(1) + go func() { + defer wg.Done() + + v := visitor{info: pass.TypesInfo, file: tokenFile, edits: make(editSet)} + ast.Walk(&v, file) + + ch <- res{filename, v.edits} + }() + } + go func() { + wg.Wait() + close(ch) + }() + + m := make(fileToEditSet) + for r := range ch { + m[r.file] = r.edits + } + + var positions []token.Position + for _, edit := range m { + for position, _ := range edit { + positions = append(positions, position) + } + } + + return positions +} diff --git a/vendor/github.com/golangci/unconvert/unconvert.go b/vendor/github.com/golangci/unconvert/unconvert.go index 38737d39f7..222aeadf88 100644 --- a/vendor/github.com/golangci/unconvert/unconvert.go +++ b/vendor/github.com/golangci/unconvert/unconvert.go @@ -2,15 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Unconvert removes redundant type conversions from Go packages. +// Package unconvert Unconvert removes redundant type conversions from Go packages. package unconvert import ( "bytes" + "encoding/json" "flag" "fmt" "go/ast" - "go/build" "go/format" "go/parser" "go/token" @@ -18,15 +18,16 @@ import ( "io/ioutil" "log" "os" + "os/exec" "reflect" "runtime/pprof" "sort" + "strings" "sync" "unicode" - "github.com/kisielk/gotool" "golang.org/x/text/width" - "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" ) // Unnecessary conversions are identified by the position @@ -34,6 +35,31 @@ import ( type editSet map[token.Position]struct{} +func (e editSet) add(pos token.Position) { + pos.Offset = 0 + e[pos] = struct{}{} +} + +func (e editSet) has(pos token.Position) bool { + pos.Offset = 0 + _, ok := e[pos] + return ok +} + +func (e editSet) remove(pos token.Position) { + pos.Offset = 0 + delete(e, pos) +} + +// intersect removes positions from e that are not present in x. +func (e editSet) intersect(x editSet) { + for pos := range e { + if _, ok := x[pos]; !ok { + delete(e, pos) + } + } +} + type fileToEditSet map[string]editSet func apply(file string, edits editSet) { @@ -97,11 +123,11 @@ func (e *editor) rewrite(f *ast.Expr) { } pos := e.file.Position(call.Lparen) - if _, ok := e.edits[pos]; !ok { + if !e.edits.has(pos) { return } *f = call.Args[0] - delete(e.edits, pos) + e.edits.remove(pos) } var ( @@ -161,21 +187,12 @@ func rub(buf []byte) []byte { return res.Bytes() } -var ( - flagAll = flag.Bool("unconvert.all", false, "type check all GOOS and GOARCH combinations") - flagApply = flag.Bool("unconvert.apply", false, "apply edits to source files") - flagCPUProfile = flag.String("unconvert.cpuprofile", "", "write CPU profile to file") - // TODO(mdempsky): Better description and maybe flag name. - flagSafe = flag.Bool("unconvert.safe", false, "be more conservative (experimental)") - flagV = flag.Bool("unconvert.v", false, "verbose output") -) - func usage() { fmt.Fprintf(os.Stderr, "usage: unconvert [flags] [package ...]\n") flag.PrintDefaults() } -func nomain() { +func main() { flag.Usage = usage flag.Parse() @@ -188,18 +205,29 @@ func nomain() { defer pprof.StopCPUProfile() } - importPaths := gotool.ImportPaths(flag.Args()) - if len(importPaths) == 0 { - return - } + patterns := flag.Args() // 0 or more import path patterns. + + var configs [][]string + if *flagConfigs != "" { + if os.Getenv("UNCONVERT_CONFIGS_EXPERIMENT") != "1" { + fmt.Println("WARNING: -configs is experimental and subject to change without notice.") + fmt.Println("Please comment at https://github.com/mdempsky/unconvert/issues/26") + fmt.Println("if you'd like to rely on this interface.") + fmt.Println("(Set UNCONVERT_CONFIGS_EXPERIMENT=1 to silence this warning.)") + fmt.Println() + } - var m fileToEditSet - if *flagAll { - m = mergeEdits(importPaths) + if err := json.Unmarshal([]byte(*flagConfigs), &configs); err != nil { + log.Fatal(err) + } + } else if *flagAll { + configs = allConfigs() } else { - m = computeEdits(importPaths, build.Default.GOOS, build.Default.GOARCH, build.Default.CgoEnabled) + configs = [][]string{nil} } + m := mergeEdits(patterns, configs) + if *flagApply { var wg sync.WaitGroup for f, e := range m { @@ -226,69 +254,36 @@ func nomain() { } } -func Run(prog *loader.Program) []token.Position { - m := computeEditsFromProg(prog) - var conversions []token.Position - for _, positions := range m { - for pos := range positions { - conversions = append(conversions, pos) - } +func allConfigs() [][]string { + out, err := exec.Command("go", "tool", "dist", "list", "-json").Output() + if err != nil { + log.Fatal(err) + } + + var platforms []struct { + GOOS, GOARCH string + } + err = json.Unmarshal(out, &platforms) + if err != nil { + log.Fatal(err) } - return conversions -} -var plats = [...]struct { - goos, goarch string -}{ - // TODO(mdempsky): buildall.bash also builds linux-386-387 and linux-arm-arm5. - {"android", "386"}, - {"android", "amd64"}, - {"android", "arm"}, - {"android", "arm64"}, - {"darwin", "386"}, - {"darwin", "amd64"}, - {"darwin", "arm"}, - {"darwin", "arm64"}, - {"dragonfly", "amd64"}, - {"freebsd", "386"}, - {"freebsd", "amd64"}, - {"freebsd", "arm"}, - {"linux", "386"}, - {"linux", "amd64"}, - {"linux", "arm"}, - {"linux", "arm64"}, - {"linux", "mips64"}, - {"linux", "mips64le"}, - {"linux", "ppc64"}, - {"linux", "ppc64le"}, - {"linux", "s390x"}, - {"nacl", "386"}, - {"nacl", "amd64p32"}, - {"nacl", "arm"}, - {"netbsd", "386"}, - {"netbsd", "amd64"}, - {"netbsd", "arm"}, - {"openbsd", "386"}, - {"openbsd", "amd64"}, - {"openbsd", "arm"}, - {"plan9", "386"}, - {"plan9", "amd64"}, - {"plan9", "arm"}, - {"solaris", "amd64"}, - {"windows", "386"}, - {"windows", "amd64"}, + var res [][]string + for _, platform := range platforms { + res = append(res, []string{ + "GOOS=" + platform.GOOS, + "GOARCH=" + platform.GOARCH, + }) + } + return res } -func mergeEdits(importPaths []string) fileToEditSet { +func mergeEdits(patterns []string, configs [][]string) fileToEditSet { m := make(fileToEditSet) - for _, plat := range plats { - for f, e := range computeEdits(importPaths, plat.goos, plat.goarch, false) { + for _, config := range configs { + for f, e := range computeEdits(patterns, config) { if e0, ok := m[f]; ok { - for k := range e0 { - if _, ok := e[k]; !ok { - delete(e0, k) - } - } + e0.intersect(e) } else { m[f] = e } @@ -297,48 +292,48 @@ func mergeEdits(importPaths []string) fileToEditSet { return m } -type noImporter struct{} - -func (noImporter) Import(path string) (*types.Package, error) { - panic("golang.org/x/tools/go/loader said this wouldn't be called") -} - -func computeEdits(importPaths []string, os, arch string, cgoEnabled bool) fileToEditSet { - ctxt := build.Default - ctxt.GOOS = os - ctxt.GOARCH = arch - ctxt.CgoEnabled = cgoEnabled - - var conf loader.Config - conf.Build = &ctxt - conf.TypeChecker.Importer = noImporter{} - for _, importPath := range importPaths { - conf.Import(importPath) +func computeEdits(patterns []string, config []string) fileToEditSet { + // TODO(mdempsky): Move into config? + var buildFlags []string + if *flagTags != "" { + buildFlags = []string{"-tags", *flagTags} } - prog, err := conf.Load() + + pkgs, err := packages.Load(&packages.Config{ + Mode: packages.NeedSyntax | packages.NeedTypes | packages.NeedTypesInfo, + Env: append(os.Environ(), config...), + BuildFlags: buildFlags, + Tests: *flagTests, + }, patterns...) if err != nil { log.Fatal(err) } + packages.PrintErrors(pkgs) - return computeEditsFromProg(prog) -} - -func computeEditsFromProg(prog *loader.Program) fileToEditSet { type res struct { file string edits editSet } + ch := make(chan res) var wg sync.WaitGroup - for _, pkg := range prog.InitialPackages() { - for _, file := range pkg.Files { + for _, pkg := range pkgs { + for _, file := range pkg.Syntax { pkg, file := pkg, file + tokenFile := pkg.Fset.File(file.Package) + filename := tokenFile.Position(file.Package).Filename + + // Hack to recognize _cgo_gotypes.go. + if strings.HasSuffix(filename, "-d") || strings.HasSuffix(filename, "/_cgo_gotypes.go") { + continue + } + wg.Add(1) go func() { defer wg.Done() - v := visitor{pkg: pkg, file: prog.Fset.File(file.Package), edits: make(editSet)} + v := visitor{info: pkg.TypesInfo, file: tokenFile, edits: make(editSet)} ast.Walk(&v, file) - ch <- res{v.file.Name(), v.edits} + ch <- res{filename, v.edits} }() } } @@ -360,7 +355,7 @@ type step struct { } type visitor struct { - pkg *loader.PackageInfo + info *types.Info file *token.File edits editSet path []step @@ -390,7 +385,7 @@ func (v *visitor) unconvert(call *ast.CallExpr) { if len(call.Args) != 1 || call.Ellipsis != token.NoPos { return } - ft, ok := v.pkg.Types[call.Fun] + ft, ok := v.info.Types[call.Fun] if !ok { fmt.Println("Missing type for function") return @@ -399,7 +394,7 @@ func (v *visitor) unconvert(call *ast.CallExpr) { // Function call; not a conversion. return } - at, ok := v.pkg.Types[call.Args[0]] + at, ok := v.info.Types[call.Args[0]] if !ok { fmt.Println("Missing type for argument") return @@ -408,7 +403,13 @@ func (v *visitor) unconvert(call *ast.CallExpr) { // A real conversion. return } - if isUntypedValue(call.Args[0], &v.pkg.Info) { + if !*flagFastMath && isFloatingPoint(ft.Type) { + // As of Go 1.9, explicit floating-point type + // conversions are always significant because they + // force rounding and prevent operation fusing. + return + } + if isUntypedValue(call.Args[0], v.info) { // Workaround golang.org/issue/13061. return } @@ -417,31 +418,15 @@ func (v *visitor) unconvert(call *ast.CallExpr) { fmt.Println("Skipped a possible type conversion because of -safe at", v.file.Position(call.Pos())) return } - if v.isCgoCheckPointerContext() { - // cmd/cgo generates explicit type conversions that - // are often redundant when introducing - // _cgoCheckPointer calls (issue #16). Users can't do - // anything about these, so skip over them. - return - } - v.edits[v.file.Position(call.Lparen)] = struct{}{} + v.edits.add(v.file.Position(call.Lparen)) } -func (v *visitor) isCgoCheckPointerContext() bool { - ctxt := &v.path[len(v.path)-2] - if ctxt.i != 1 { - return false - } - call, ok := ctxt.n.(*ast.CallExpr) - if !ok { - return false - } - ident, ok := call.Fun.(*ast.Ident) - if !ok { - return false - } - return ident.Name == "_cgoCheckPointer" +// isFloatingPointer reports whether t's underlying type is a floating +// point type. +func isFloatingPoint(t types.Type) bool { + ut, ok := t.Underlying().(*types.Basic) + return ok && ut.Info()&(types.IsFloat|types.IsComplex) != 0 } // isSafeContext reports whether the current context requires @@ -463,7 +448,7 @@ func (v *visitor) isSafeContext(t types.Type) bool { } // We're a conversion in the pos'th element of n.Rhs. // Check that the corresponding element of n.Lhs is of type t. - lt, ok := v.pkg.Types[n.Lhs[pos]] + lt, ok := v.info.Types[n.Lhs[pos]] if !ok { fmt.Println("Missing type for LHS expression") return false @@ -485,7 +470,7 @@ func (v *visitor) isSafeContext(t types.Type) bool { } else { other = n.X } - ot, ok := v.pkg.Types[other] + ot, ok := v.info.Types[other] if !ok { fmt.Println("Missing type for other binop subexpr") return false @@ -497,7 +482,7 @@ func (v *visitor) isSafeContext(t types.Type) bool { // Type conversion in the function subexpr is okay. return true } - ft, ok := v.pkg.Types[n.Fun] + ft, ok := v.info.Types[n.Fun] if !ok { fmt.Println("Missing type for function expression") return false @@ -550,7 +535,7 @@ func (v *visitor) isSafeContext(t types.Type) bool { if typeExpr == nil { fmt.Println(ctxt) } - pt, ok := v.pkg.Types[typeExpr] + pt, ok := v.info.Types[typeExpr] if !ok { fmt.Println("Missing type for return parameter at", v.file.Position(n.Pos())) return false diff --git a/vendor/github.com/google/s2a-go/.gitignore b/vendor/github.com/google/s2a-go/.gitignore deleted file mode 100644 index 01764d1cdf..0000000000 --- a/vendor/github.com/google/s2a-go/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Ignore binaries without extension -//example/client/client -//example/server/server -//internal/v2/fakes2av2_server/fakes2av2_server - -.idea/ \ No newline at end of file diff --git a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md deleted file mode 100644 index dc079b4d66..0000000000 --- a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,93 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of -experience, education, socio-economic status, nationality, personal appearance, -race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, or to ban temporarily or permanently any -contributor for other behaviors that they deem inappropriate, threatening, -offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when the Project -Steward has a reasonable belief that an individual's behavior may have a -negative impact on the project or its community. - -## Conflict Resolution - -We do not believe that all conflict is bad; healthy debate and disagreement -often yield positive results. However, it is never okay to be disrespectful or -to engage in behavior that violates the project’s code of conduct. - -If you see someone violating the code of conduct, you are encouraged to address -the behavior directly with those involved. Many issues can be resolved quickly -and easily, and this gives people more control over the outcome of their -dispute. If you are unable to resolve the matter for any reason, or if the -behavior is threatening or harassing, report it. We are dedicated to providing -an environment where participants feel welcome and safe. - -Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the -Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to -receive and address reported violations of the code of conduct. They will then -work with a committee consisting of representatives from the Open Source -Programs Office and the Google Open Source Strategy team. If for any reason you -are uncomfortable reaching out to the Project Steward, please email -opensource@google.com. - -We will investigate every complaint, but you may not receive a direct response. -We will use our discretion in determining when and how to follow up on reported -incidents, which may range from not taking action to permanent expulsion from -the project and project-sponsored spaces. We will notify the accused of the -report and provide them an opportunity to discuss it before any action is taken. -The identity of the reporter will be omitted from the details of the report -supplied to the accused. In potentially harmful situations, such as ongoing -harassment or threats to anyone's safety, we may take action without notice. - -## Attribution - -This Code of Conduct is adapted from the Contributor Covenant, version 1.4, -available at -https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/vendor/github.com/google/s2a-go/CONTRIBUTING.md deleted file mode 100644 index 22b241cb73..0000000000 --- a/vendor/github.com/google/s2a-go/CONTRIBUTING.md +++ /dev/null @@ -1,29 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement (CLA). You (or your employer) retain the copyright to your -contribution; this simply gives us permission to use and redistribute your -contributions as part of the project. Head over to - to see your current agreements on file or -to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. - -## Community Guidelines - -This project follows -[Google's Open Source Community Guidelines](https://opensource.google/conduct/). diff --git a/vendor/github.com/google/s2a-go/LICENSE.md b/vendor/github.com/google/s2a-go/LICENSE.md deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/google/s2a-go/LICENSE.md +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md deleted file mode 100644 index d566950f38..0000000000 --- a/vendor/github.com/google/s2a-go/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Secure Session Agent Client Libraries - -The Secure Session Agent is a service that enables a workload to offload select -operations from the mTLS handshake and protects a workload's private key -material from exfiltration. Specifically, the workload asks the Secure Session -Agent for the TLS configuration to use during the handshake, to perform private -key operations, and to validate the peer certificate chain. The Secure Session -Agent's client libraries enable applications to communicate with the Secure -Session Agent during the TLS handshake, and to encrypt traffic to the peer -after the TLS handshake is complete. - -This repository contains the source code for the Secure Session Agent's Go -client libraries, which allow gRPC-Go applications to use the Secure Session -Agent. This repository supports the Bazel and Golang build systems. - -All code in this repository is experimental and subject to change. We do not -guarantee API stability at this time. diff --git a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go deleted file mode 100644 index 034d1b912c..0000000000 --- a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package fallback provides default implementations of fallback options when S2A fails. -package fallback - -import ( - "context" - "crypto/tls" - "fmt" - "net" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -const ( - alpnProtoStrH2 = "h2" - alpnProtoStrHTTP = "http/1.1" - defaultHTTPSPort = "443" -) - -// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. -// It supports GRPC use case, thus the alpn is set to 'h2'. -var FallbackTLSConfigGRPC = tls.Config{ - MinVersion: tls.VersionTLS13, - ClientSessionCache: nil, - NextProtos: []string{alpnProtoStrH2}, -} - -// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. -// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. -var FallbackTLSConfigHTTP = tls.Config{ - MinVersion: tls.VersionTLS13, - ClientSessionCache: nil, - NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, -} - -// ClientHandshake establishes a TLS connection and returns it, plus its auth info. -// Inputs: -// -// targetServer: the server attempted with S2A. -// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. -// If fallback is successful, the `conn` should be closed. -// err: the error encountered when performing the client-side TLS handshake with S2A. -type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) - -// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, -// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. -// Example use: -// -// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ -// S2AAddress: s2aAddress, -// FallbackOpts: &s2a.FallbackOptions{ // optional -// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), -// }, -// }) -// -// The fallback server's certificate must be verifiable using OS root store. -// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, -// it uses default port 443. -// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, -// and min TLS version is set to 1.3. -func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { - var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} - return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) -} - -func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { - fallbackServerAddr, err := processFallbackAddr(fallbackAddr) - if err != nil { - if grpclog.V(1) { - grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) - } - return nil, err - } - return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { - fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) - if fbErr != nil { - grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) - return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) - } - - tc, success := fbConn.(*tls.Conn) - if !success { - grpclog.Infof("the connection with fallback server is expected to be tls but isn't") - return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) - } - - tlsInfo := credentials.TLSInfo{ - State: tc.ConnectionState(), - CommonAuthInfo: credentials.CommonAuthInfo{ - SecurityLevel: credentials.PrivacyAndIntegrity, - }, - } - if grpclog.V(1) { - grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) - grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) - grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) - } - conn.Close() - return fbConn, tlsInfo, nil - }, nil -} - -// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. -// Example use: -// -// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) -// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ -// S2AAddress: s2aAddress, // required -// FallbackOpts: &s2a.FallbackOptions{ -// FallbackDialer: &s2a.FallbackDialer{ -// Dialer: fallbackDialer, -// ServerAddr: fallbackServerAddr, -// }, -// }, -// }) -// -// The fallback server's certificate should be verifiable using OS root store. -// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, -// it uses default port 443. -// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, -// and min TLS version is set to 1.3. -func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { - fallbackServerAddr, err := processFallbackAddr(fallbackAddr) - if err != nil { - if grpclog.V(1) { - grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) - } - return nil, "", err - } - return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil -} - -func processFallbackAddr(fallbackAddr string) (string, error) { - var fallbackServerAddr string - var err error - - if fallbackAddr == "" { - return "", fmt.Errorf("empty fallback address") - } - _, _, err = net.SplitHostPort(fallbackAddr) - if err != nil { - // fallbackAddr does not have port suffix - fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) - } else { - // FallbackServerAddr already has port suffix - fallbackServerAddr = fallbackAddr - } - return fallbackServerAddr, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go deleted file mode 100644 index aa3967f9d1..0000000000 --- a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package authinfo provides authentication and authorization information that -// results from the TLS handshake. -package authinfo - -import ( - "errors" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" - grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" - "google.golang.org/grpc/credentials" -) - -var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) - -const s2aAuthType = "s2a" - -// S2AAuthInfo exposes authentication and authorization information from the -// S2A session result to the gRPC stack. -type S2AAuthInfo struct { - s2aContext *contextpb.S2AContext - commonAuthInfo credentials.CommonAuthInfo -} - -// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. -func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { - return newS2AAuthInfo(result) -} - -func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { - if result == nil { - return nil, errors.New("NewS2aAuthInfo given nil session result") - } - return &S2AAuthInfo{ - s2aContext: &contextpb.S2AContext{ - ApplicationProtocol: result.GetApplicationProtocol(), - TlsVersion: result.GetState().GetTlsVersion(), - Ciphersuite: result.GetState().GetTlsCiphersuite(), - PeerIdentity: result.GetPeerIdentity(), - LocalIdentity: result.GetLocalIdentity(), - PeerCertFingerprint: result.GetPeerCertFingerprint(), - LocalCertFingerprint: result.GetLocalCertFingerprint(), - IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), - }, - commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, - }, nil -} - -// AuthType returns the authentication type. -func (s *S2AAuthInfo) AuthType() string { - return s2aAuthType -} - -// ApplicationProtocol returns the application protocol, e.g. "grpc". -func (s *S2AAuthInfo) ApplicationProtocol() string { - return s.s2aContext.GetApplicationProtocol() -} - -// TLSVersion returns the TLS version negotiated during the handshake. -func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { - return s.s2aContext.GetTlsVersion() -} - -// Ciphersuite returns the ciphersuite negotiated during the handshake. -func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { - return s.s2aContext.GetCiphersuite() -} - -// PeerIdentity returns the authenticated identity of the peer. -func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { - return s.s2aContext.GetPeerIdentity() -} - -// LocalIdentity returns the local identity of the application used during -// session setup. -func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { - return s.s2aContext.GetLocalIdentity() -} - -// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in -// the S2A handshake. -func (s *S2AAuthInfo) PeerCertFingerprint() []byte { - return s.s2aContext.GetPeerCertFingerprint() -} - -// LocalCertFingerprint returns the SHA256 hash of the local certificate used -// in the S2A handshake. -func (s *S2AAuthInfo) LocalCertFingerprint() []byte { - return s.s2aContext.GetLocalCertFingerprint() -} - -// IsHandshakeResumed returns true if a cached session was used to resume -// the handshake. -func (s *S2AAuthInfo) IsHandshakeResumed() bool { - return s.s2aContext.GetIsHandshakeResumed() -} - -// SecurityLevel returns the security level of the connection. -func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { - return s.commonAuthInfo.SecurityLevel -} diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go deleted file mode 100644 index 8297c9a974..0000000000 --- a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go +++ /dev/null @@ -1,438 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package handshaker communicates with the S2A handshaker service. -package handshaker - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "sync" - - "github.com/google/s2a-go/internal/authinfo" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" - "github.com/google/s2a-go/internal/record" - "github.com/google/s2a-go/internal/tokenmanager" - grpc "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -var ( - // appProtocol contains the application protocol accepted by the handshaker. - appProtocol = "grpc" - // frameLimit is the maximum size of a frame in bytes. - frameLimit = 1024 * 64 - // peerNotRespondingError is the error thrown when the peer doesn't respond. - errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") -) - -// Handshaker defines a handshaker interface. -type Handshaker interface { - // ClientHandshake starts and completes a TLS handshake from the client side, - // and returns a secure connection along with additional auth information. - ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // ServerHandshake starts and completes a TLS handshake from the server side, - // and returns a secure connection along with additional auth information. - ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // Close terminates the Handshaker. It should be called when the handshake - // is complete. - Close() error -} - -// ClientHandshakerOptions contains the options needed to configure the S2A -// handshaker service on the client-side. -type ClientHandshakerOptions struct { - // MinTLSVersion specifies the min TLS version supported by the client. - MinTLSVersion commonpb.TLSVersion - // MaxTLSVersion specifies the max TLS version supported by the client. - MaxTLSVersion commonpb.TLSVersion - // TLSCiphersuites is the ordered list of ciphersuites supported by the - // client. - TLSCiphersuites []commonpb.Ciphersuite - // TargetIdentities contains a list of allowed server identities. One of the - // target identities should match the peer identity in the handshake - // result; otherwise, the handshake fails. - TargetIdentities []*commonpb.Identity - // LocalIdentity is the local identity of the client application. If none is - // provided, then the S2A will choose the default identity. - LocalIdentity *commonpb.Identity - // TargetName is the allowed server name, which may be used for server - // authorization check by the S2A if it is provided. - TargetName string - // EnsureProcessSessionTickets allows users to wait and ensure that all - // available session tickets are sent to S2A before a process completes. - EnsureProcessSessionTickets *sync.WaitGroup -} - -// ServerHandshakerOptions contains the options needed to configure the S2A -// handshaker service on the server-side. -type ServerHandshakerOptions struct { - // MinTLSVersion specifies the min TLS version supported by the server. - MinTLSVersion commonpb.TLSVersion - // MaxTLSVersion specifies the max TLS version supported by the server. - MaxTLSVersion commonpb.TLSVersion - // TLSCiphersuites is the ordered list of ciphersuites supported by the - // server. - TLSCiphersuites []commonpb.Ciphersuite - // LocalIdentities is the list of local identities that may be assumed by - // the server. If no local identity is specified, then the S2A chooses a - // default local identity. - LocalIdentities []*commonpb.Identity -} - -// s2aHandshaker performs a TLS handshake using the S2A handshaker service. -type s2aHandshaker struct { - // stream is used to communicate with the S2A handshaker service. - stream s2apb.S2AService_SetUpSessionClient - // conn is the connection to the peer. - conn net.Conn - // clientOpts should be non-nil iff the handshaker is client-side. - clientOpts *ClientHandshakerOptions - // serverOpts should be non-nil iff the handshaker is server-side. - serverOpts *ServerHandshakerOptions - // isClient determines if the handshaker is client or server side. - isClient bool - // hsAddr stores the address of the S2A handshaker service. - hsAddr string - // tokenManager manages access tokens for authenticating to S2A. - tokenManager tokenmanager.AccessTokenManager - // localIdentities is the set of local identities for whom the - // tokenManager should fetch a token when preparing a request to be - // sent to S2A. - localIdentities []*commonpb.Identity -} - -// NewClientHandshaker creates an s2aHandshaker instance that performs a -// client-side TLS handshake using the S2A handshaker service. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { - stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - grpclog.Infof("failed to create single token access token manager: %v", err) - } - return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil -} - -func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { - var localIdentities []*commonpb.Identity - if opts != nil { - localIdentities = []*commonpb.Identity{opts.LocalIdentity} - } - return &s2aHandshaker{ - stream: stream, - conn: c, - clientOpts: opts, - isClient: true, - hsAddr: hsAddr, - tokenManager: tokenManager, - localIdentities: localIdentities, - } -} - -// NewServerHandshaker creates an s2aHandshaker instance that performs a -// server-side TLS handshake using the S2A handshaker service. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { - stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - grpclog.Infof("failed to create single token access token manager: %v", err) - } - return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil -} - -func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { - var localIdentities []*commonpb.Identity - if opts != nil { - localIdentities = opts.LocalIdentities - } - return &s2aHandshaker{ - stream: stream, - conn: c, - serverOpts: opts, - isClient: false, - hsAddr: hsAddr, - tokenManager: tokenManager, - localIdentities: localIdentities, - } -} - -// ClientHandshake performs a client-side TLS handshake using the S2A handshaker -// service. When complete, returns a TLS connection. -func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { - if !h.isClient { - return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") - } - // Extract the hostname from the target name. The target name is assumed to be an authority. - hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) - if err != nil { - // If the target name had no host port or could not be parsed, use it as is. - hostname = h.clientOpts.TargetName - } - - // Prepare a client start message to send to the S2A handshaker service. - req := &s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_ClientStart{ - ClientStart: &s2apb.ClientSessionStartReq{ - ApplicationProtocols: []string{appProtocol}, - MinTlsVersion: h.clientOpts.MinTLSVersion, - MaxTlsVersion: h.clientOpts.MaxTLSVersion, - TlsCiphersuites: h.clientOpts.TLSCiphersuites, - TargetIdentities: h.clientOpts.TargetIdentities, - LocalIdentity: h.clientOpts.LocalIdentity, - TargetName: hostname, - }, - }, - AuthMechanisms: h.getAuthMechanisms(), - } - conn, result, err := h.setUpSession(req) - if err != nil { - return nil, nil, err - } - authInfo, err := authinfo.NewS2AAuthInfo(result) - if err != nil { - return nil, nil, err - } - return conn, authInfo, nil -} - -// ServerHandshake performs a server-side TLS handshake using the S2A handshaker -// service. When complete, returns a TLS connection. -func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { - if h.isClient { - return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") - } - p := make([]byte, frameLimit) - n, err := h.conn.Read(p) - if err != nil { - return nil, nil, err - } - // Prepare a server start message to send to the S2A handshaker service. - req := &s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_ServerStart{ - ServerStart: &s2apb.ServerSessionStartReq{ - ApplicationProtocols: []string{appProtocol}, - MinTlsVersion: h.serverOpts.MinTLSVersion, - MaxTlsVersion: h.serverOpts.MaxTLSVersion, - TlsCiphersuites: h.serverOpts.TLSCiphersuites, - LocalIdentities: h.serverOpts.LocalIdentities, - InBytes: p[:n], - }, - }, - AuthMechanisms: h.getAuthMechanisms(), - } - conn, result, err := h.setUpSession(req) - if err != nil { - return nil, nil, err - } - authInfo, err := authinfo.NewS2AAuthInfo(result) - if err != nil { - return nil, nil, err - } - return conn, authInfo, nil -} - -// setUpSession proxies messages between the peer and the S2A handshaker -// service. -func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { - resp, err := h.accessHandshakerService(req) - if err != nil { - return nil, nil, err - } - // Check if the returned status is an error. - if resp.GetStatus() != nil { - if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { - return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) - } - } - // Calculate the extra unread bytes from the Session. Attempting to consume - // more than the bytes sent will throw an error. - var extra []byte - if req.GetServerStart() != nil { - if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { - return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") - } - extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] - } - result, extra, err := h.processUntilDone(resp, extra) - if err != nil { - return nil, nil, err - } - if result.GetLocalIdentity() == nil { - return nil, nil, errors.New("local identity must be populated in session result") - } - - // Create a new TLS record protocol using the Session Result. - newConn, err := record.NewConn(&record.ConnParameters{ - NetConn: h.conn, - Ciphersuite: result.GetState().GetTlsCiphersuite(), - TLSVersion: result.GetState().GetTlsVersion(), - InTrafficSecret: result.GetState().GetInKey(), - OutTrafficSecret: result.GetState().GetOutKey(), - UnusedBuf: extra, - InSequence: result.GetState().GetInSequence(), - OutSequence: result.GetState().GetOutSequence(), - HSAddr: h.hsAddr, - ConnectionID: result.GetState().GetConnectionId(), - LocalIdentity: result.GetLocalIdentity(), - EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), - }) - if err != nil { - return nil, nil, err - } - return newConn, result, nil -} - -func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { - if h.clientOpts == nil { - return nil - } - return h.clientOpts.EnsureProcessSessionTickets -} - -// accessHandshakerService sends the session request to the S2A handshaker -// service and returns the session response. -func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { - if err := h.stream.Send(req); err != nil { - return nil, err - } - resp, err := h.stream.Recv() - if err != nil { - return nil, err - } - return resp, nil -} - -// processUntilDone continues proxying messages between the peer and the S2A -// handshaker service until the handshaker service returns the SessionResult at -// the end of the handshake or an error occurs. -func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { - for { - if len(resp.OutFrames) > 0 { - if _, err := h.conn.Write(resp.OutFrames); err != nil { - return nil, nil, err - } - } - if resp.Result != nil { - return resp.Result, unusedBytes, nil - } - buf := make([]byte, frameLimit) - n, err := h.conn.Read(buf) - if err != nil && err != io.EOF { - return nil, nil, err - } - // If there is nothing to send to the handshaker service and nothing is - // received from the peer, then we are stuck. This covers the case when - // the peer is not responding. Note that handshaker service connection - // issues are caught in accessHandshakerService before we even get - // here. - if len(resp.OutFrames) == 0 && n == 0 { - return nil, nil, errPeerNotResponding - } - // Append extra bytes from the previous interaction with the handshaker - // service with the current buffer read from conn. - p := append(unusedBytes, buf[:n]...) - // From here on, p and unusedBytes point to the same slice. - resp, err = h.accessHandshakerService(&s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_Next{ - Next: &s2apb.SessionNextReq{ - InBytes: p, - }, - }, - AuthMechanisms: h.getAuthMechanisms(), - }) - if err != nil { - return nil, nil, err - } - - // Cache the local identity returned by S2A, if it is populated. This - // overwrites any existing local identities. This is done because, once the - // S2A has selected a local identity, then only that local identity should - // be asserted in future requests until the end of the current handshake. - if resp.GetLocalIdentity() != nil { - h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} - } - - // Set unusedBytes based on the handshaker service response. - if resp.GetBytesConsumed() > uint32(len(p)) { - return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") - } - unusedBytes = p[resp.GetBytesConsumed():] - } -} - -// Close shuts down the handshaker and the stream to the S2A handshaker service -// when the handshake is complete. It should be called when the caller obtains -// the secure connection at the end of the handshake. -func (h *s2aHandshaker) Close() error { - return h.stream.CloseSend() -} - -func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { - if h.tokenManager == nil { - return nil - } - // First handle the special case when no local identities have been provided - // by the application. In this case, an AuthenticationMechanism with no local - // identity will be sent. - if len(h.localIdentities) == 0 { - token, err := h.tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("unable to get token for empty local identity: %v", err) - return nil - } - return []*s2apb.AuthenticationMechanism{ - { - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } - } - - // Next, handle the case where the application (or the S2A) has provided - // one or more local identities. - var authMechanisms []*s2apb.AuthenticationMechanism - for _, localIdentity := range h.localIdentities { - token, err := h.tokenManager.Token(localIdentity) - if err != nil { - grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) - continue - } - - authMechanism := &s2apb.AuthenticationMechanism{ - Identity: localIdentity, - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - } - authMechanisms = append(authMechanisms, authMechanism) - } - return authMechanisms -} diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go deleted file mode 100644 index 49573af887..0000000000 --- a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +++ /dev/null @@ -1,99 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package service is a utility for calling the S2A handshaker service. -package service - -import ( - "context" - "net" - "os" - "strings" - "sync" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/socket" - grpc "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" -) - -// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. -const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" - -var ( - // appEngineDialerHook is an AppEngine-specific dial option that is set - // during init time. If nil, then the application is not running on Google - // AppEngine. - appEngineDialerHook func(context.Context) grpc.DialOption - // mu guards hsConnMap and hsDialer. - mu sync.Mutex - // hsConnMap represents a mapping from an S2A handshaker service address - // to a corresponding connection to an S2A handshaker service instance. - hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial -) - -func init() { - if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { - return - } - appEngineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} - -// Dial dials the S2A handshaker service. If a connection has already been -// established, this function returns it. Otherwise, a new connection is -// created. -func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { - mu.Lock() - defer mu.Unlock() - - hsConn, ok := hsConnMap[handshakerServiceAddress] - if !ok { - // Create a new connection to the S2A handshaker service. Note that - // this connection stays open until the application is closed. - grpcOpts := []grpc.DialOption{ - grpc.WithInsecure(), - } - if enableAppEngineDialer() && appEngineDialerHook != nil { - if grpclog.V(1) { - grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") - } - grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) - } - var err error - hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) - if err != nil { - return nil, err - } - hsConnMap[handshakerServiceAddress] = hsConn - } - return hsConn, nil -} - -func enableAppEngineDialer() bool { - if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { - return true - } - return false -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go deleted file mode 100644 index 16278a1d99..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/common/common.proto - -package common_go_proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The ciphersuites supported by S2A. The name determines the confidentiality, -// and authentication ciphers as well as the hash algorithm used for PRF in -// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: -// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. -// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. -type Ciphersuite int32 - -const ( - Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 - Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 - Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 -) - -// Enum value maps for Ciphersuite. -var ( - Ciphersuite_name = map[int32]string{ - 0: "AES_128_GCM_SHA256", - 1: "AES_256_GCM_SHA384", - 2: "CHACHA20_POLY1305_SHA256", - } - Ciphersuite_value = map[string]int32{ - "AES_128_GCM_SHA256": 0, - "AES_256_GCM_SHA384": 1, - "CHACHA20_POLY1305_SHA256": 2, - } -) - -func (x Ciphersuite) Enum() *Ciphersuite { - p := new(Ciphersuite) - *p = x - return p -} - -func (x Ciphersuite) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() -} - -func (Ciphersuite) Type() protoreflect.EnumType { - return &file_internal_proto_common_common_proto_enumTypes[0] -} - -func (x Ciphersuite) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Ciphersuite.Descriptor instead. -func (Ciphersuite) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} -} - -// The TLS versions supported by S2A's handshaker module. -type TLSVersion int32 - -const ( - TLSVersion_TLS1_2 TLSVersion = 0 - TLSVersion_TLS1_3 TLSVersion = 1 -) - -// Enum value maps for TLSVersion. -var ( - TLSVersion_name = map[int32]string{ - 0: "TLS1_2", - 1: "TLS1_3", - } - TLSVersion_value = map[string]int32{ - "TLS1_2": 0, - "TLS1_3": 1, - } -) - -func (x TLSVersion) Enum() *TLSVersion { - p := new(TLSVersion) - *p = x - return p -} - -func (x TLSVersion) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() -} - -func (TLSVersion) Type() protoreflect.EnumType { - return &file_internal_proto_common_common_proto_enumTypes[1] -} - -func (x TLSVersion) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use TLSVersion.Descriptor instead. -func (TLSVersion) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} -} - -type Identity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to IdentityOneof: - // - // *Identity_SpiffeId - // *Identity_Hostname - // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId - IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` - // Additional identity-specific attributes. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Identity) Reset() { - *x = Identity{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_common_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Identity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Identity) ProtoMessage() {} - -func (x *Identity) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_common_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Identity.ProtoReflect.Descriptor instead. -func (*Identity) Descriptor() ([]byte, []int) { - return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} -} - -func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { - if m != nil { - return m.IdentityOneof - } - return nil -} - -func (x *Identity) GetSpiffeId() string { - if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { - return x.SpiffeId - } - return "" -} - -func (x *Identity) GetHostname() string { - if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { - return x.Hostname - } - return "" -} - -func (x *Identity) GetUid() string { - if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { - return x.Uid - } - return "" -} - -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername - } - return "" -} - -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId - } - return "" -} - -func (x *Identity) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -type isIdentity_IdentityOneof interface { - isIdentity_IdentityOneof() -} - -type Identity_SpiffeId struct { - // The SPIFFE ID of a connection endpoint. - SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` -} - -type Identity_Hostname struct { - // The hostname of a connection endpoint. - Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` -} - -type Identity_Uid struct { - // The UID of a connection endpoint. - Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` -} - -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` -} - -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` -} - -func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} - -func (*Identity_Hostname) isIdentity_IdentityOneof() {} - -func (*Identity_Uid) isIdentity_IdentityOneof() {} - -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} - -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} - -var File_internal_proto_common_common_proto protoreflect.FileDescriptor - -var file_internal_proto_common_common_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_common_common_proto_rawDescOnce sync.Once - file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc -) - -func file_internal_proto_common_common_proto_rawDescGZIP() []byte { - file_internal_proto_common_common_proto_rawDescOnce.Do(func() { - file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) - }) - return file_internal_proto_common_common_proto_rawDescData -} - -var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ - (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite - (TLSVersion)(0), // 1: s2a.proto.TLSVersion - (*Identity)(nil), // 2: s2a.proto.Identity - nil, // 3: s2a.proto.Identity.AttributesEntry -} -var file_internal_proto_common_common_proto_depIdxs = []int32{ - 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_proto_common_common_proto_init() } -func file_internal_proto_common_common_proto_init() { - if File_internal_proto_common_common_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Identity_SpiffeId)(nil), - (*Identity_Hostname)(nil), - (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_common_common_proto_rawDesc, - NumEnums: 2, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_common_common_proto_goTypes, - DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, - EnumInfos: file_internal_proto_common_common_proto_enumTypes, - MessageInfos: file_internal_proto_common_common_proto_msgTypes, - }.Build() - File_internal_proto_common_common_proto = out.File - file_internal_proto_common_common_proto_rawDesc = nil - file_internal_proto_common_common_proto_goTypes = nil - file_internal_proto_common_common_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go deleted file mode 100644 index f4f763ae10..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/s2a_context/s2a_context.proto - -package s2a_context_go_proto - -import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type S2AContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this connection, e.g., 'grpc'. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The TLS version number that the S2A's handshaker module used to set up the - // session. - TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` - // The TLS ciphersuite negotiated by the S2A's handshaker module. - Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` - // The authenticated identity of the peer. - PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` - // The local identity used during session setup. This could be: - // - The local identity that the client specifies in ClientSessionStartReq. - // - One of the local identities that the server specifies in - // ServerSessionStartReq. - // - If neither client or server specifies local identities, the S2A picks the - // default one. In this case, this field will contain that identity. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The SHA256 hash of the peer certificate used in the handshake. - PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` - // The SHA256 hash of the local certificate used in the handshake. - LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` - // Set to true if a cached session was reused to resume the handshake. - IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` -} - -func (x *S2AContext) Reset() { - *x = S2AContext{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *S2AContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S2AContext) ProtoMessage() {} - -func (x *S2AContext) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. -func (*S2AContext) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} -} - -func (x *S2AContext) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.TlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { - if x != nil { - return x.Ciphersuite - } - return common_go_proto.Ciphersuite(0) -} - -func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { - if x != nil { - return x.PeerIdentity - } - return nil -} - -func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *S2AContext) GetPeerCertFingerprint() []byte { - if x != nil { - return x.PeerCertFingerprint - } - return nil -} - -func (x *S2AContext) GetLocalCertFingerprint() []byte { - if x != nil { - return x.LocalCertFingerprint - } - return nil -} - -func (x *S2AContext) GetIsHandshakeResumed() bool { - if x != nil { - return x.IsHandshakeResumed - } - return false -} - -var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor - -var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, - 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, - 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, - 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, - 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, - 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, - 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, - 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once - file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc -) - -func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { - file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { - file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) - }) - return file_internal_proto_s2a_context_s2a_context_proto_rawDescData -} - -var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ - (*S2AContext)(nil), // 0: s2a.proto.S2AContext - (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion - (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite - (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity -} -var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion - 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite - 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity - 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } -func file_internal_proto_s2a_context_s2a_context_proto_init() { - if File_internal_proto_s2a_context_s2a_context_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S2AContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, - DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, - MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, - }.Build() - File_internal_proto_s2a_context_s2a_context_proto = out.File - file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil - file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil - file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go deleted file mode 100644 index 0a86ebee59..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ /dev/null @@ -1,1377 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/s2a/s2a.proto - -package s2a_go_proto - -import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AuthenticationMechanism struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // (Optional) Application may specify an identity associated to an - // authentication mechanism. Otherwise, S2A assumes that the authentication - // mechanism is associated with the default identity. If the default identity - // cannot be determined, session setup fails. - Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` - // Types that are assignable to MechanismOneof: - // - // *AuthenticationMechanism_Token - MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` -} - -func (x *AuthenticationMechanism) Reset() { - *x = AuthenticationMechanism{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthenticationMechanism) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthenticationMechanism) ProtoMessage() {} - -func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. -func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} -} - -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { - if x != nil { - return x.Identity - } - return nil -} - -func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { - if m != nil { - return m.MechanismOneof - } - return nil -} - -func (x *AuthenticationMechanism) GetToken() string { - if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { - return x.Token - } - return "" -} - -type isAuthenticationMechanism_MechanismOneof interface { - isAuthenticationMechanism_MechanismOneof() -} - -type AuthenticationMechanism_Token struct { - // A token that the application uses to authenticate itself to the S2A. - Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` -} - -func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} - -type ClientSessionStartReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocols supported by the client, e.g., "grpc". - ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // (Optional) The minimum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the minimum version it supports. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` - // (Optional) The maximum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the maximum version it supports. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` - // The TLS ciphersuites that the client is willing to support. - TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` - // (Optional) Describes which server identities are acceptable by the client. - // If target identities are provided and none of them matches the peer - // identity of the server, session setup fails. - TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` - // (Optional) Application may specify a local identity. Otherwise, S2A chooses - // the default local identity. If the default identity cannot be determined, - // session setup fails. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The target name that is used by S2A to configure SNI in the TLS handshake. - // It is also used to perform server authorization check if avaiable. This - // check is intended to verify that the peer authenticated identity is - // authorized to run a service with the target name. - // This field MUST only contain the host portion of the server address. It - // MUST not contain the scheme or the port number. For example, if the server - // address is dns://www.example.com:443, the value of this field should be - // set to www.example.com. - TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` -} - -func (x *ClientSessionStartReq) Reset() { - *x = ClientSessionStartReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientSessionStartReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientSessionStartReq) ProtoMessage() {} - -func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. -func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} -} - -func (x *ClientSessionStartReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.TlsCiphersuites - } - return nil -} - -func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { - if x != nil { - return x.TargetIdentities - } - return nil -} - -func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *ClientSessionStartReq) GetTargetName() string { - if x != nil { - return x.TargetName - } - return "" -} - -type ServerSessionStartReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocols supported by the server, e.g., "grpc". - ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // (Optional) The minimum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the minimum version it supports. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` - // (Optional) The maximum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the maximum version it supports. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` - // The TLS ciphersuites that the server is willing to support. - TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` - // (Optional) A list of local identities supported by the server, if - // specified. Otherwise, S2A chooses the default local identity. If the - // default identity cannot be determined, session setup fails. - LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` - // The byte representation of the first handshake message received from the - // client peer. It is possible that this first message is split into multiple - // chunks. In this case, the first chunk is sent using this field and the - // following chunks are sent using the in_bytes field of SessionNextReq - // Specifically, if the client peer is using S2A, this field contains the - // bytes in the out_frames field of SessionResp message that the client peer - // received from its S2A after initiating the handshake. - InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *ServerSessionStartReq) Reset() { - *x = ServerSessionStartReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerSessionStartReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerSessionStartReq) ProtoMessage() {} - -func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. -func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} -} - -func (x *ServerSessionStartReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.TlsCiphersuites - } - return nil -} - -func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { - if x != nil { - return x.LocalIdentities - } - return nil -} - -func (x *ServerSessionStartReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type SessionNextReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The byte representation of session setup, i.e., handshake messages. - // Specifically: - // - All handshake messages sent from the server to the client. - // - All, except for the first, handshake messages sent from the client to - // the server. Note that the first message is communicated to S2A using the - // in_bytes field of ServerSessionStartReq. - // - // If the peer is using S2A, this field contains the bytes in the out_frames - // field of SessionResp message that the peer received from its S2A. - InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *SessionNextReq) Reset() { - *x = SessionNextReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionNextReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionNextReq) ProtoMessage() {} - -func (x *SessionNextReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. -func (*SessionNextReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} -} - -func (x *SessionNextReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type ResumptionTicketReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The byte representation of a NewSessionTicket message received from the - // server. - InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` - // A connection identifier that was created and sent by S2A at the end of a - // handshake. - ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` - // The local identity that was used by S2A during session setup and included - // in |SessionResult|. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` -} - -func (x *ResumptionTicketReq) Reset() { - *x = ResumptionTicketReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResumptionTicketReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResumptionTicketReq) ProtoMessage() {} - -func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. -func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} -} - -func (x *ResumptionTicketReq) GetInBytes() [][]byte { - if x != nil { - return x.InBytes - } - return nil -} - -func (x *ResumptionTicketReq) GetConnectionId() uint64 { - if x != nil { - return x.ConnectionId - } - return 0 -} - -func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -type SessionReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ReqOneof: - // - // *SessionReq_ClientStart - // *SessionReq_ServerStart - // *SessionReq_Next - // *SessionReq_ResumptionTicket - ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` - // (Optional) The authentication mechanisms that the client wishes to use to - // authenticate to the S2A, ordered by preference. The S2A will always use the - // first authentication mechanism that appears in the list and is supported by - // the S2A. - AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` -} - -func (x *SessionReq) Reset() { - *x = SessionReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionReq) ProtoMessage() {} - -func (x *SessionReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. -func (*SessionReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} -} - -func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { - if m != nil { - return m.ReqOneof - } - return nil -} - -func (x *SessionReq) GetClientStart() *ClientSessionStartReq { - if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { - return x.ClientStart - } - return nil -} - -func (x *SessionReq) GetServerStart() *ServerSessionStartReq { - if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { - return x.ServerStart - } - return nil -} - -func (x *SessionReq) GetNext() *SessionNextReq { - if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { - return x.Next - } - return nil -} - -func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { - if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { - return x.ResumptionTicket - } - return nil -} - -func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { - if x != nil { - return x.AuthMechanisms - } - return nil -} - -type isSessionReq_ReqOneof interface { - isSessionReq_ReqOneof() -} - -type SessionReq_ClientStart struct { - // The client session setup request message. - ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` -} - -type SessionReq_ServerStart struct { - // The server session setup request message. - ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` -} - -type SessionReq_Next struct { - // The next session setup message request message. - Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` -} - -type SessionReq_ResumptionTicket struct { - // The resumption ticket that is received from the server. This message is - // only accepted by S2A if it is running as a client and if it is received - // after session setup is complete. If S2A is running as a server and it - // receives this message, the session is terminated. - ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` -} - -func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} - -func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} - -func (*SessionReq_Next) isSessionReq_ReqOneof() {} - -func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} - -type SessionState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The TLS version number that the S2A's handshaker module used to set up the - // session. - TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` - // The TLS ciphersuite negotiated by the S2A's handshaker module. - TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` - // The sequence number of the next, incoming, TLS record. - InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` - // The sequence number of the next, outgoing, TLS record. - OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` - // The key for the inbound direction. - InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` - // The key for the outbound direction. - OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` - // The constant part of the record nonce for the outbound direction. - InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` - // The constant part of the record nonce for the inbound direction. - OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` - // A connection identifier that can be provided to S2A to perform operations - // related to this connection. This identifier will be stored by the record - // protocol, and included in the |ResumptionTicketReq| message that is later - // sent back to S2A. This field is set only for client-side connections. - ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` - // Set to true if a cached session was reused to do an abbreviated handshake. - IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` -} - -func (x *SessionState) Reset() { - *x = SessionState{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionState) ProtoMessage() {} - -func (x *SessionState) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. -func (*SessionState) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} -} - -func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.TlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { - if x != nil { - return x.TlsCiphersuite - } - return common_go_proto.Ciphersuite(0) -} - -func (x *SessionState) GetInSequence() uint64 { - if x != nil { - return x.InSequence - } - return 0 -} - -func (x *SessionState) GetOutSequence() uint64 { - if x != nil { - return x.OutSequence - } - return 0 -} - -func (x *SessionState) GetInKey() []byte { - if x != nil { - return x.InKey - } - return nil -} - -func (x *SessionState) GetOutKey() []byte { - if x != nil { - return x.OutKey - } - return nil -} - -func (x *SessionState) GetInFixedNonce() []byte { - if x != nil { - return x.InFixedNonce - } - return nil -} - -func (x *SessionState) GetOutFixedNonce() []byte { - if x != nil { - return x.OutFixedNonce - } - return nil -} - -func (x *SessionState) GetConnectionId() uint64 { - if x != nil { - return x.ConnectionId - } - return 0 -} - -func (x *SessionState) GetIsHandshakeResumed() bool { - if x != nil { - return x.IsHandshakeResumed - } - return false -} - -type SessionResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this session. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The session state at the end. This state contains all cryptographic - // material required to initialize the record protocol object. - State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - // The authenticated identity of the peer. - PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` - // The local identity used during session setup. This could be: - // - The local identity that the client specifies in ClientSessionStartReq. - // - One of the local identities that the server specifies in - // ServerSessionStartReq. - // - If neither client or server specifies local identities, the S2A picks the - // default one. In this case, this field will contain that identity. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The SHA256 hash of the local certificate used in the handshake. - LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` - // The SHA256 hash of the peer certificate used in the handshake. - PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` -} - -func (x *SessionResult) Reset() { - *x = SessionResult{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionResult) ProtoMessage() {} - -func (x *SessionResult) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. -func (*SessionResult) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} -} - -func (x *SessionResult) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *SessionResult) GetState() *SessionState { - if x != nil { - return x.State - } - return nil -} - -func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { - if x != nil { - return x.PeerIdentity - } - return nil -} - -func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *SessionResult) GetLocalCertFingerprint() []byte { - if x != nil { - return x.LocalCertFingerprint - } - return nil -} - -func (x *SessionResult) GetPeerCertFingerprint() []byte { - if x != nil { - return x.PeerCertFingerprint - } - return nil -} - -type SessionStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code that is specific to the application and the implementation - // of S2A, e.g., gRPC status code. - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // The status details. - Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` -} - -func (x *SessionStatus) Reset() { - *x = SessionStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionStatus) ProtoMessage() {} - -func (x *SessionStatus) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. -func (*SessionStatus) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} -} - -func (x *SessionStatus) GetCode() uint32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *SessionStatus) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -type SessionResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The local identity used during session setup. This could be: - // - The local identity that the client specifies in ClientSessionStartReq. - // - One of the local identities that the server specifies in - // ServerSessionStartReq. - // - If neither client or server specifies local identities, the S2A picks the - // default one. In this case, this field will contain that identity. - // - // If the SessionResult is populated, then this must coincide with the local - // identity specified in the SessionResult; otherwise, the handshake must - // fail. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The byte representation of the frames that should be sent to the peer. May - // be empty if nothing needs to be sent to the peer or if in_bytes in the - // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent - // to the peer even if the session setup status is not OK as these frames may - // contain appropriate alerts. - OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` - // Number of bytes in the in_bytes field that are consumed by S2A. It is - // possible that part of in_bytes is unrelated to the session setup process. - BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` - // This is set if the session is successfully set up. out_frames may - // still be set to frames that needs to be forwarded to the peer. - Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` - // Status of session setup at the current stage. - Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *SessionResp) Reset() { - *x = SessionResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionResp) ProtoMessage() {} - -func (x *SessionResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. -func (*SessionResp) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} -} - -func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *SessionResp) GetOutFrames() []byte { - if x != nil { - return x.OutFrames - } - return nil -} - -func (x *SessionResp) GetBytesConsumed() uint32 { - if x != nil { - return x.BytesConsumed - } - return 0 -} - -func (x *SessionResp) GetResult() *SessionResult { - if x != nil { - return x.Result - } - return nil -} - -func (x *SessionResp) GetStatus() *SessionStatus { - if x != nil { - return x.Status - } - return nil -} - -var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor - -var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, - 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, - 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, - 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, - 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, - 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, - 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, - 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, - 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, - 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, - 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, - 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, - 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, - 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, - 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, - 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, - 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, - 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, - 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, - 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, - 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, - 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, - 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, - 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, - 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, - 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, - 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, - 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, - 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once - file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc -) - -func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { - file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { - file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) - }) - return file_internal_proto_s2a_s2a_proto_rawDescData -} - -var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ - (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism - (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq - (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq - (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq - (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq - (*SessionReq)(nil), // 5: s2a.proto.SessionReq - (*SessionState)(nil), // 6: s2a.proto.SessionState - (*SessionResult)(nil), // 7: s2a.proto.SessionResult - (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus - (*SessionResp)(nil), // 9: s2a.proto.SessionResp - (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity - (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion - (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite -} -var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ - 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity - 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion - 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion - 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite - 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity - 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity - 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion - 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion - 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite - 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity - 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity - 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq - 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq - 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq - 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq - 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism - 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion - 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite - 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState - 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity - 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity - 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity - 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult - 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus - 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq - 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp - 25, // [25:26] is the sub-list for method output_type - 24, // [24:25] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name -} - -func init() { file_internal_proto_s2a_s2a_proto_init() } -func file_internal_proto_s2a_s2a_proto_init() { - if File_internal_proto_s2a_s2a_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticationMechanism); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientSessionStartReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerSessionStartReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionNextReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResumptionTicketReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*AuthenticationMechanism_Token)(nil), - } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*SessionReq_ClientStart)(nil), - (*SessionReq_ServerStart)(nil), - (*SessionReq_Next)(nil), - (*SessionReq_ResumptionTicket)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, - NumEnums: 0, - NumMessages: 10, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, - DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, - MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, - }.Build() - File_internal_proto_s2a_s2a_proto = out.File - file_internal_proto_s2a_s2a_proto_rawDesc = nil - file_internal_proto_s2a_s2a_proto_goTypes = nil - file_internal_proto_s2a_s2a_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go deleted file mode 100644 index 0fa582fc87..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.12 -// source: internal/proto/s2a/s2a.proto - -package s2a_go_proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" -) - -// S2AServiceClient is the client API for S2AService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type S2AServiceClient interface { - // S2A service accepts a stream of session setup requests and returns a stream - // of session setup responses. The client of this service is expected to send - // exactly one client_start or server_start message followed by at least one - // next message. Applications running TLS clients can send requests with - // resumption_ticket messages only after the session is successfully set up. - // - // Every time S2A client sends a request, this service sends a response. - // However, clients do not have to wait for service response before sending - // the next request. - SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) -} - -type s2AServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { - return &s2AServiceClient{cc} -} - -func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &s2AServiceSetUpSessionClient{stream} - return x, nil -} - -type S2AService_SetUpSessionClient interface { - Send(*SessionReq) error - Recv() (*SessionResp, error) - grpc.ClientStream -} - -type s2AServiceSetUpSessionClient struct { - grpc.ClientStream -} - -func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { - m := new(SessionResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AServiceServer is the server API for S2AService service. -// All implementations must embed UnimplementedS2AServiceServer -// for forward compatibility -type S2AServiceServer interface { - // S2A service accepts a stream of session setup requests and returns a stream - // of session setup responses. The client of this service is expected to send - // exactly one client_start or server_start message followed by at least one - // next message. Applications running TLS clients can send requests with - // resumption_ticket messages only after the session is successfully set up. - // - // Every time S2A client sends a request, this service sends a response. - // However, clients do not have to wait for service response before sending - // the next request. - SetUpSession(S2AService_SetUpSessionServer) error - mustEmbedUnimplementedS2AServiceServer() -} - -// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. -type UnimplementedS2AServiceServer struct { -} - -func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { - return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") -} -func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} - -// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to S2AServiceServer will -// result in compilation errors. -type UnsafeS2AServiceServer interface { - mustEmbedUnimplementedS2AServiceServer() -} - -func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { - s.RegisterService(&S2AService_ServiceDesc, srv) -} - -func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) -} - -type S2AService_SetUpSessionServer interface { - Send(*SessionResp) error - Recv() (*SessionReq, error) - grpc.ServerStream -} - -type s2AServiceSetUpSessionServer struct { - grpc.ServerStream -} - -func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { - m := new(SessionReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var S2AService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "s2a.proto.S2AService", - HandlerType: (*S2AServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "SetUpSession", - Handler: _S2AService_SetUpSession_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "internal/proto/s2a/s2a.proto", -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go deleted file mode 100644 index c84bed9774..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/v2/common/common.proto - -package common_go_proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using -// S2A. -type Ciphersuite int32 - -const ( - Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 - Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 - Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 - Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 - Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 - Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 - Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 -) - -// Enum value maps for Ciphersuite. -var ( - Ciphersuite_name = map[int32]string{ - 0: "CIPHERSUITE_UNSPECIFIED", - 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", - 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - } - Ciphersuite_value = map[string]int32{ - "CIPHERSUITE_UNSPECIFIED": 0, - "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, - "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, - "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, - "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, - "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, - "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, - } -) - -func (x Ciphersuite) Enum() *Ciphersuite { - p := new(Ciphersuite) - *p = x - return p -} - -func (x Ciphersuite) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() -} - -func (Ciphersuite) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[0] -} - -func (x Ciphersuite) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Ciphersuite.Descriptor instead. -func (Ciphersuite) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} -} - -// The TLS versions supported by S2A's handshaker module. -type TLSVersion int32 - -const ( - TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 - TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 - TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 - TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 - TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 -) - -// Enum value maps for TLSVersion. -var ( - TLSVersion_name = map[int32]string{ - 0: "TLS_VERSION_UNSPECIFIED", - 1: "TLS_VERSION_1_0", - 2: "TLS_VERSION_1_1", - 3: "TLS_VERSION_1_2", - 4: "TLS_VERSION_1_3", - } - TLSVersion_value = map[string]int32{ - "TLS_VERSION_UNSPECIFIED": 0, - "TLS_VERSION_1_0": 1, - "TLS_VERSION_1_1": 2, - "TLS_VERSION_1_2": 3, - "TLS_VERSION_1_3": 4, - } -) - -func (x TLSVersion) Enum() *TLSVersion { - p := new(TLSVersion) - *p = x - return p -} - -func (x TLSVersion) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() -} - -func (TLSVersion) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[1] -} - -func (x TLSVersion) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use TLSVersion.Descriptor instead. -func (TLSVersion) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} -} - -// The side in the TLS connection. -type ConnectionSide int32 - -const ( - ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 - ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 - ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 -) - -// Enum value maps for ConnectionSide. -var ( - ConnectionSide_name = map[int32]string{ - 0: "CONNECTION_SIDE_UNSPECIFIED", - 1: "CONNECTION_SIDE_CLIENT", - 2: "CONNECTION_SIDE_SERVER", - } - ConnectionSide_value = map[string]int32{ - "CONNECTION_SIDE_UNSPECIFIED": 0, - "CONNECTION_SIDE_CLIENT": 1, - "CONNECTION_SIDE_SERVER": 2, - } -) - -func (x ConnectionSide) Enum() *ConnectionSide { - p := new(ConnectionSide) - *p = x - return p -} - -func (x ConnectionSide) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() -} - -func (ConnectionSide) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[2] -} - -func (x ConnectionSide) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ConnectionSide.Descriptor instead. -func (ConnectionSide) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} -} - -// The ALPN protocols that the application can negotiate during a TLS handshake. -type AlpnProtocol int32 - -const ( - AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 - AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 - AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 - AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 -) - -// Enum value maps for AlpnProtocol. -var ( - AlpnProtocol_name = map[int32]string{ - 0: "ALPN_PROTOCOL_UNSPECIFIED", - 1: "ALPN_PROTOCOL_GRPC", - 2: "ALPN_PROTOCOL_HTTP2", - 3: "ALPN_PROTOCOL_HTTP1_1", - } - AlpnProtocol_value = map[string]int32{ - "ALPN_PROTOCOL_UNSPECIFIED": 0, - "ALPN_PROTOCOL_GRPC": 1, - "ALPN_PROTOCOL_HTTP2": 2, - "ALPN_PROTOCOL_HTTP1_1": 3, - } -) - -func (x AlpnProtocol) Enum() *AlpnProtocol { - p := new(AlpnProtocol) - *p = x - return p -} - -func (x AlpnProtocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() -} - -func (AlpnProtocol) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[3] -} - -func (x AlpnProtocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AlpnProtocol.Descriptor instead. -func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} -} - -var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor - -var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, - 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once - file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc -) - -func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { - file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { - file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) - }) - return file_internal_proto_v2_common_common_proto_rawDescData -} - -var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ - (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite - (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion - (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide - (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol -} -var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_internal_proto_v2_common_common_proto_init() } -func file_internal_proto_v2_common_common_proto_init() { - if File_internal_proto_v2_common_common_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, - NumEnums: 4, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_v2_common_common_proto_goTypes, - DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, - EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, - }.Build() - File_internal_proto_v2_common_common_proto = out.File - file_internal_proto_v2_common_common_proto_rawDesc = nil - file_internal_proto_v2_common_common_proto_goTypes = nil - file_internal_proto_v2_common_common_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go deleted file mode 100644 index b7fd871c7a..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/v2/s2a_context/s2a_context.proto - -package s2a_context_go_proto - -import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type S2AContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The SPIFFE ID from the peer leaf certificate, if present. - // - // This field is only populated if the leaf certificate is a valid SPIFFE - // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid - // SPIFFE ID. - LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` - // The URIs that are present in the SubjectAltName extension of the peer leaf - // certificate. - // - // Note that the extracted URIs are not validated and may not be properly - // formatted. - LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` - // The DNSNames that are present in the SubjectAltName extension of the peer - // leaf certificate. - LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` - // The (ordered) list of fingerprints in the certificate chain used to verify - // the given leaf certificate. The order MUST be from leaf certificate - // fingerprint to root certificate fingerprint. - // - // A fingerprint is the base-64 encoding of the SHA256 hash of the - // DER-encoding of a certificate. The list MAY be populated even if the peer - // certificate chain was NOT validated successfully. - PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` - // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The SHA256 hash of the DER-encoding of the local leaf certificate used in - // the handshake. - LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` -} - -func (x *S2AContext) Reset() { - *x = S2AContext{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *S2AContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S2AContext) ProtoMessage() {} - -func (x *S2AContext) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. -func (*S2AContext) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} -} - -func (x *S2AContext) GetLeafCertSpiffeId() string { - if x != nil { - return x.LeafCertSpiffeId - } - return "" -} - -func (x *S2AContext) GetLeafCertUris() []string { - if x != nil { - return x.LeafCertUris - } - return nil -} - -func (x *S2AContext) GetLeafCertDnsnames() []string { - if x != nil { - return x.LeafCertDnsnames - } - return nil -} - -func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { - if x != nil { - return x.PeerCertificateChainFingerprints - } - return nil -} - -func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { - if x != nil { - return x.LocalLeafCertFingerprint - } - return nil -} - -var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor - -var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, - 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc -) - -func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) - }) - return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData -} - -var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ - (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity -} -var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } -func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { - if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S2AContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, - DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, - MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, - }.Build() - File_internal_proto_v2_s2a_context_s2a_context_proto = out.File - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil - file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil - file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go deleted file mode 100644 index e843450c7e..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ /dev/null @@ -1,2494 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/v2/s2a/s2a.proto - -package s2a_go_proto - -import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" - common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" - s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SignatureAlgorithm int32 - -const ( - SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 - // RSA Public-Key Cryptography Standards #1. - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 - // ECDSA. - SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 - SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 - SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 - // RSA Probabilistic Signature Scheme. - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 - // ED25519. - SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 -) - -// Enum value maps for SignatureAlgorithm. -var ( - SignatureAlgorithm_name = map[int32]string{ - 0: "S2A_SSL_SIGN_UNSPECIFIED", - 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", - 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", - 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", - 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", - 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", - 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", - 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", - 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", - 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", - 10: "S2A_SSL_SIGN_ED25519", - } - SignatureAlgorithm_value = map[string]int32{ - "S2A_SSL_SIGN_UNSPECIFIED": 0, - "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, - "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, - "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, - "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, - "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, - "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, - "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, - "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, - "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, - "S2A_SSL_SIGN_ED25519": 10, - } -) - -func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { - p := new(SignatureAlgorithm) - *p = x - return p -} - -func (x SignatureAlgorithm) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() -} - -func (SignatureAlgorithm) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] -} - -func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SignatureAlgorithm.Descriptor instead. -func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} -} - -type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 - -const ( - GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 - GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 -) - -// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. -var ( - GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "DONT_REQUEST_CLIENT_CERTIFICATE", - 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", - 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", - 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", - 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", - } - GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ - "UNSPECIFIED": 0, - "DONT_REQUEST_CLIENT_CERTIFICATE": 1, - "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, - "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, - "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, - "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, - } -) - -func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { - p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) - *p = x - return p -} - -func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() -} - -func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] -} - -func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. -func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} -} - -type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 - -const ( - OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 - // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of - // the TLS handshake must be signed to prove possession of the private key. - // - // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. - OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 - // When performing a TLS 1.2 handshake using an RSA algorithm, the key - // exchange algorithm involves the client generating a premaster secret, - // encrypting it using the server's public key, and sending this encrypted - // blob to the server in a ClientKeyExchange message. - // - // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. - OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 -) - -// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. -var ( - OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SIGN", - 2: "DECRYPT", - } - OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ - "UNSPECIFIED": 0, - "SIGN": 1, - "DECRYPT": 2, - } -) - -func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { - p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) - *p = x - return p -} - -func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() -} - -func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] -} - -func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. -func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} -} - -type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 - -const ( - OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 - OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 - OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 -) - -// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. -var ( - OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "ENCRYPT", - 2: "DECRYPT", - } - OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ - "UNSPECIFIED": 0, - "ENCRYPT": 1, - "DECRYPT": 2, - } -) - -func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { - p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) - *p = x - return p -} - -func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() -} - -func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] -} - -func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. -func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} -} - -type ValidatePeerCertificateChainReq_VerificationMode int32 - -const ( - // The default verification mode supported by S2A. - ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 - // The SPIFFE verification mode selects the set of trusted certificates to - // use for path building based on the SPIFFE trust domain in the peer's leaf - // certificate. - ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 - // The connect-to-Google verification mode uses the trust bundle for - // connecting to Google, e.g. *.mtls.googleapis.com endpoints. - ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 -) - -// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. -var ( - ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SPIFFE", - 2: "CONNECT_TO_GOOGLE", - } - ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, - } -) - -func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { - p := new(ValidatePeerCertificateChainReq_VerificationMode) - *p = x - return p -} - -func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() -} - -func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] -} - -func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. -func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} -} - -type ValidatePeerCertificateChainResp_ValidationResult int32 - -const ( - ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 - ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 - ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 -) - -// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. -var ( - ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SUCCESS", - 2: "FAILURE", - } - ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ - "UNSPECIFIED": 0, - "SUCCESS": 1, - "FAILURE": 2, - } -) - -func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { - p := new(ValidatePeerCertificateChainResp_ValidationResult) - *p = x - return p -} - -func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() -} - -func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] -} - -func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. -func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} -} - -type AlpnPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If true, the application MUST perform ALPN negotiation. - EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` - // The ordered list of ALPN protocols that specify how the application SHOULD - // negotiate ALPN during the TLS handshake. - // - // The application MAY ignore any ALPN protocols in this list that are not - // supported by the application. - AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` -} - -func (x *AlpnPolicy) Reset() { - *x = AlpnPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlpnPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlpnPolicy) ProtoMessage() {} - -func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. -func (*AlpnPolicy) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} -} - -func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { - if x != nil { - return x.EnableAlpnNegotiation - } - return false -} - -func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { - if x != nil { - return x.AlpnProtocols - } - return nil -} - -type AuthenticationMechanism struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Applications may specify an identity associated to an authentication - // mechanism. Otherwise, S2A assumes that the authentication mechanism is - // associated with the default identity. If the default identity cannot be - // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` - // Types that are assignable to MechanismOneof: - // - // *AuthenticationMechanism_Token - MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` -} - -func (x *AuthenticationMechanism) Reset() { - *x = AuthenticationMechanism{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthenticationMechanism) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthenticationMechanism) ProtoMessage() {} - -func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. -func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} -} - -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { - if x != nil { - return x.Identity - } - return nil -} - -func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { - if m != nil { - return m.MechanismOneof - } - return nil -} - -func (x *AuthenticationMechanism) GetToken() string { - if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { - return x.Token - } - return "" -} - -type isAuthenticationMechanism_MechanismOneof interface { - isAuthenticationMechanism_MechanismOneof() -} - -type AuthenticationMechanism_Token struct { - // A token that the application uses to authenticate itself to S2A. - Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` -} - -func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} - -type Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code that is specific to the application and the implementation - // of S2A, e.g., gRPC status code. - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // The status details. - Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` -} - -func (x *Status) Reset() { - *x = Status{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Status) ProtoMessage() {} - -func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Status.ProtoReflect.Descriptor instead. -func (*Status) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} -} - -func (x *Status) GetCode() uint32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *Status) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -type GetTlsConfigurationReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The role of the application in the TLS connection. - ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` - // The server name indication (SNI) extension, which MAY be populated when a - // server is offloading to S2A. The SNI is used to determine the server - // identity if the local identity in the request is empty. - Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` -} - -func (x *GetTlsConfigurationReq) Reset() { - *x = GetTlsConfigurationReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationReq) ProtoMessage() {} - -func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} -} - -func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { - if x != nil { - return x.ConnectionSide - } - return common_go_proto.ConnectionSide(0) -} - -func (x *GetTlsConfigurationReq) GetSni() string { - if x != nil { - return x.Sni - } - return "" -} - -type GetTlsConfigurationResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to TlsConfiguration: - // - // *GetTlsConfigurationResp_ClientTlsConfiguration_ - // *GetTlsConfigurationResp_ServerTlsConfiguration_ - TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` -} - -func (x *GetTlsConfigurationResp) Reset() { - *x = GetTlsConfigurationResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationResp) ProtoMessage() {} - -func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} -} - -func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { - if m != nil { - return m.TlsConfiguration - } - return nil -} - -func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { - if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { - return x.ClientTlsConfiguration - } - return nil -} - -func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { - if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { - return x.ServerTlsConfiguration - } - return nil -} - -type isGetTlsConfigurationResp_TlsConfiguration interface { - isGetTlsConfigurationResp_TlsConfiguration() -} - -type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { - ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` -} - -type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { - ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` -} - -func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { -} - -func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { -} - -type OffloadPrivateKeyOperationReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The operation the private key is used for. - Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` - // The signature algorithm to be used for signing operations. - SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` - // The input bytes to be signed or decrypted. - // - // Types that are assignable to InBytes: - // - // *OffloadPrivateKeyOperationReq_RawBytes - // *OffloadPrivateKeyOperationReq_Sha256Digest - // *OffloadPrivateKeyOperationReq_Sha384Digest - // *OffloadPrivateKeyOperationReq_Sha512Digest - InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` -} - -func (x *OffloadPrivateKeyOperationReq) Reset() { - *x = OffloadPrivateKeyOperationReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadPrivateKeyOperationReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} - -func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. -func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} -} - -func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { - if x != nil { - return x.Operation - } - return OffloadPrivateKeyOperationReq_UNSPECIFIED -} - -func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { - if x != nil { - return x.SignatureAlgorithm - } - return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED -} - -func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { - if m != nil { - return m.InBytes - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { - return x.RawBytes - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { - return x.Sha256Digest - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { - return x.Sha384Digest - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { - return x.Sha512Digest - } - return nil -} - -type isOffloadPrivateKeyOperationReq_InBytes interface { - isOffloadPrivateKeyOperationReq_InBytes() -} - -type OffloadPrivateKeyOperationReq_RawBytes struct { - // Raw bytes to be hashed and signed, or decrypted. - RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` -} - -type OffloadPrivateKeyOperationReq_Sha256Digest struct { - // A SHA256 hash to be signed. Must be 32 bytes. - Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` -} - -type OffloadPrivateKeyOperationReq_Sha384Digest struct { - // A SHA384 hash to be signed. Must be 48 bytes. - Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` -} - -type OffloadPrivateKeyOperationReq_Sha512Digest struct { - // A SHA512 hash to be signed. Must be 64 bytes. - Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` -} - -func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} - -func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} - -func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} - -func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} - -type OffloadPrivateKeyOperationResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The signed or decrypted output bytes. - OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` -} - -func (x *OffloadPrivateKeyOperationResp) Reset() { - *x = OffloadPrivateKeyOperationResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadPrivateKeyOperationResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} - -func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. -func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} -} - -func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { - if x != nil { - return x.OutBytes - } - return nil -} - -type OffloadResumptionKeyOperationReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The operation the resumption key is used for. - Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` - // The bytes to be encrypted or decrypted. - InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *OffloadResumptionKeyOperationReq) Reset() { - *x = OffloadResumptionKeyOperationReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadResumptionKeyOperationReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} - -func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. -func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} -} - -func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { - if x != nil { - return x.Operation - } - return OffloadResumptionKeyOperationReq_UNSPECIFIED -} - -func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type OffloadResumptionKeyOperationResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encrypted or decrypted bytes. - OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` -} - -func (x *OffloadResumptionKeyOperationResp) Reset() { - *x = OffloadResumptionKeyOperationResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadResumptionKeyOperationResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} - -func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. -func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} -} - -func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { - if x != nil { - return x.OutBytes - } - return nil -} - -type ValidatePeerCertificateChainReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The verification mode that S2A MUST use to validate the peer certificate - // chain. - Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` - // Types that are assignable to PeerOneof: - // - // *ValidatePeerCertificateChainReq_ClientPeer_ - // *ValidatePeerCertificateChainReq_ServerPeer_ - PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` -} - -func (x *ValidatePeerCertificateChainReq) Reset() { - *x = ValidatePeerCertificateChainReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainReq) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} -} - -func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { - if x != nil { - return x.Mode - } - return ValidatePeerCertificateChainReq_UNSPECIFIED -} - -func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { - if m != nil { - return m.PeerOneof - } - return nil -} - -func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { - if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { - return x.ClientPeer - } - return nil -} - -func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { - if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { - return x.ServerPeer - } - return nil -} - -type isValidatePeerCertificateChainReq_PeerOneof interface { - isValidatePeerCertificateChainReq_PeerOneof() -} - -type ValidatePeerCertificateChainReq_ClientPeer_ struct { - ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` -} - -type ValidatePeerCertificateChainReq_ServerPeer_ struct { - ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` -} - -func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} - -func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} - -type ValidatePeerCertificateChainResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The result of validating the peer certificate chain. - ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` - // The validation details. This field is only populated when the validation - // result is NOT SUCCESS. - ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` - // The S2A context contains information from the peer certificate chain. - // - // The S2A context MAY be populated even if validation of the peer certificate - // chain fails. - Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` -} - -func (x *ValidatePeerCertificateChainResp) Reset() { - *x = ValidatePeerCertificateChainResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainResp) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} -} - -func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { - if x != nil { - return x.ValidationResult - } - return ValidatePeerCertificateChainResp_UNSPECIFIED -} - -func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { - if x != nil { - return x.ValidationDetails - } - return "" -} - -func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { - if x != nil { - return x.Context - } - return nil -} - -type SessionReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The identity corresponding to the TLS configurations that MUST be used for - // the TLS handshake. - // - // If a managed identity already exists, the local identity and authentication - // mechanisms are ignored. If a managed identity doesn't exist and the local - // identity is not populated, S2A will try to deduce the managed identity to - // use from the SNI extension. If that also fails, S2A uses the default - // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The authentication mechanisms that the application wishes to use to - // authenticate to S2A, ordered by preference. S2A will always use the first - // authentication mechanism that matches the managed identity. - AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` - // Types that are assignable to ReqOneof: - // - // *SessionReq_GetTlsConfigurationReq - // *SessionReq_OffloadPrivateKeyOperationReq - // *SessionReq_OffloadResumptionKeyOperationReq - // *SessionReq_ValidatePeerCertificateChainReq - ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` -} - -func (x *SessionReq) Reset() { - *x = SessionReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionReq) ProtoMessage() {} - -func (x *SessionReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. -func (*SessionReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} -} - -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { - if x != nil { - return x.AuthenticationMechanisms - } - return nil -} - -func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { - if m != nil { - return m.ReqOneof - } - return nil -} - -func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { - if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { - return x.GetTlsConfigurationReq - } - return nil -} - -func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { - if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { - return x.OffloadPrivateKeyOperationReq - } - return nil -} - -func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { - if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { - return x.OffloadResumptionKeyOperationReq - } - return nil -} - -func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { - if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { - return x.ValidatePeerCertificateChainReq - } - return nil -} - -type isSessionReq_ReqOneof interface { - isSessionReq_ReqOneof() -} - -type SessionReq_GetTlsConfigurationReq struct { - // Requests the certificate chain and TLS configuration corresponding to the - // local identity, which the application MUST use to negotiate the TLS - // handshake. - GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` -} - -type SessionReq_OffloadPrivateKeyOperationReq struct { - // Signs or decrypts the input bytes using a private key corresponding to - // the local identity in the request. - // - // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the - // S2Av2 by a server during a TLS 1.2 handshake. - OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` -} - -type SessionReq_OffloadResumptionKeyOperationReq struct { - // Encrypts or decrypts the input bytes using a resumption key corresponding - // to the local identity in the request. - OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` -} - -type SessionReq_ValidatePeerCertificateChainReq struct { - // Verifies the peer's certificate chain using - // (a) trust bundles corresponding to the local identity in the request, and - // (b) the verification mode in the request. - ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` -} - -func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} - -func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} - -func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} - -func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} - -type SessionResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Status of the session response. - // - // The status field is populated so that if an error occurs when making an - // individual request, then communication with the S2A may continue. If an - // error is returned directly (e.g. at the gRPC layer), then it may result - // that the bidirectional stream being closed. - Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - // Types that are assignable to RespOneof: - // - // *SessionResp_GetTlsConfigurationResp - // *SessionResp_OffloadPrivateKeyOperationResp - // *SessionResp_OffloadResumptionKeyOperationResp - // *SessionResp_ValidatePeerCertificateChainResp - RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` -} - -func (x *SessionResp) Reset() { - *x = SessionResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionResp) ProtoMessage() {} - -func (x *SessionResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. -func (*SessionResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} -} - -func (x *SessionResp) GetStatus() *Status { - if x != nil { - return x.Status - } - return nil -} - -func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { - if m != nil { - return m.RespOneof - } - return nil -} - -func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { - if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { - return x.GetTlsConfigurationResp - } - return nil -} - -func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { - if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { - return x.OffloadPrivateKeyOperationResp - } - return nil -} - -func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { - if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { - return x.OffloadResumptionKeyOperationResp - } - return nil -} - -func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { - if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { - return x.ValidatePeerCertificateChainResp - } - return nil -} - -type isSessionResp_RespOneof interface { - isSessionResp_RespOneof() -} - -type SessionResp_GetTlsConfigurationResp struct { - // Contains the certificate chain and TLS configurations corresponding to - // the local identity. - GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` -} - -type SessionResp_OffloadPrivateKeyOperationResp struct { - // Contains the signed or encrypted output bytes using the private key - // corresponding to the local identity. - OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` -} - -type SessionResp_OffloadResumptionKeyOperationResp struct { - // Contains the encrypted or decrypted output bytes using the resumption key - // corresponding to the local identity. - OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` -} - -type SessionResp_ValidatePeerCertificateChainResp struct { - // Contains the validation result, peer identity and fingerprints of peer - // certificates. - ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` -} - -func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} - -func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} - -func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} - -func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} - -// Next ID: 8 -type GetTlsConfigurationResp_ClientTlsConfiguration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain that the client MUST use for the TLS handshake. - // It's a list of PEM-encoded certificates, ordered from leaf to root, - // excluding the root. - CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` - // The minimum TLS version number that the client MUST use for the TLS - // handshake. If this field is not provided, the client MUST use the default - // minimum version of the client's TLS library. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` - // The maximum TLS version number that the client MUST use for the TLS - // handshake. If this field is not provided, the client MUST use the default - // maximum version of the client's TLS library. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` - // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to - // negotiate in the TLS handshake. - Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` - // The policy that dictates how the client negotiates ALPN during the TLS - // handshake. - AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { - *x = GetTlsConfigurationResp_ClientTlsConfiguration{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { - if x != nil { - return x.CertificateChain - } - return nil -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.Ciphersuites - } - return nil -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { - if x != nil { - return x.AlpnPolicy - } - return nil -} - -// Next ID: 12 -type GetTlsConfigurationResp_ServerTlsConfiguration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain that the server MUST use for the TLS handshake. - // It's a list of PEM-encoded certificates, ordered from leaf to root, - // excluding the root. - CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` - // The minimum TLS version number that the server MUST use for the TLS - // handshake. If this field is not provided, the server MUST use the default - // minimum version of the server's TLS library. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` - // The maximum TLS version number that the server MUST use for the TLS - // handshake. If this field is not provided, the server MUST use the default - // maximum version of the server's TLS library. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` - // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to - // negotiate in the TLS handshake. - Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` - // Whether to enable TLS resumption. - TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` - // Whether the server MUST request a client certificate (i.e. to negotiate - // TLS vs. mTLS). - RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` - // Returns the maximum number of extra bytes that - // |OffloadResumptionKeyOperation| can add to the number of unencrypted - // bytes to form the encrypted bytes. - MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` - // The policy that dictates how the server negotiates ALPN during the TLS - // handshake. - AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { - *x = GetTlsConfigurationResp_ServerTlsConfiguration{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { - if x != nil { - return x.CertificateChain - } - return nil -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.Ciphersuites - } - return nil -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { - if x != nil { - return x.TlsResumptionEnabled - } - return false -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { - if x != nil { - return x.RequestClientCertificate - } - return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { - if x != nil { - return x.MaxOverheadOfTicketAead - } - return 0 -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { - if x != nil { - return x.AlpnPolicy - } - return nil -} - -type ValidatePeerCertificateChainReq_ClientPeer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain to be verified. The chain MUST be a list of - // DER-encoded certificates, ordered from leaf to root, excluding the root. - CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` -} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { - *x = ValidatePeerCertificateChainReq_ClientPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} -} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { - if x != nil { - return x.CertificateChain - } - return nil -} - -type ValidatePeerCertificateChainReq_ServerPeer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain to be verified. The chain MUST be a list of - // DER-encoded certificates, ordered from leaf to root, excluding the root. - CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` - // The expected hostname of the server. - ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` - // The UnrestrictedClientPolicy specified by the user. - SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { - *x = ValidatePeerCertificateChainReq_ServerPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { - if x != nil { - return x.CertificateChain - } - return nil -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { - if x != nil { - return x.ServerHostname - } - return "" -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { - if x != nil { - return x.SerializedUnrestrictedClientPolicy - } - return nil -} - -var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor - -var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once - file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc -) - -func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { - file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { - file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) - }) - return file_internal_proto_v2_s2a_s2a_proto_rawDescData -} - -var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ - (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm - (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate - (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation - (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation - (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode - (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult - (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy - (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism - (*Status)(nil), // 8: s2a.proto.v2.Status - (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq - (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp - (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq - (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp - (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq - (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp - (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq - (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp - (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq - (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp - (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration - (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration - (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer - (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer - (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity - (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide - (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext - (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion - (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite -} -var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ - 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity - 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide - 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration - 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration - 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation - 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm - 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation - 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode - 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer - 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer - 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult - 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity - 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism - 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq - 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq - 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq - 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq - 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status - 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp - 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp - 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp - 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp - 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion - 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion - 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite - 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy - 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion - 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion - 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite - 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate - 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy - 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq - 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp - 34, // [34:35] is the sub-list for method output_type - 33, // [33:34] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name -} - -func init() { file_internal_proto_v2_s2a_s2a_proto_init() } -func file_internal_proto_v2_s2a_s2a_proto_init() { - if File_internal_proto_v2_s2a_s2a_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlpnPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticationMechanism); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadPrivateKeyOperationReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadPrivateKeyOperationResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadResumptionKeyOperationReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadResumptionKeyOperationResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*AuthenticationMechanism_Token)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), - (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*OffloadPrivateKeyOperationReq_RawBytes)(nil), - (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), - (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), - (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ - (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), - (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ - (*SessionReq_GetTlsConfigurationReq)(nil), - (*SessionReq_OffloadPrivateKeyOperationReq)(nil), - (*SessionReq_OffloadResumptionKeyOperationReq)(nil), - (*SessionReq_ValidatePeerCertificateChainReq)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ - (*SessionResp_GetTlsConfigurationResp)(nil), - (*SessionResp_OffloadPrivateKeyOperationResp)(nil), - (*SessionResp_OffloadResumptionKeyOperationResp)(nil), - (*SessionResp_ValidatePeerCertificateChainResp)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, - NumEnums: 6, - NumMessages: 17, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, - DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, - EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, - MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, - }.Build() - File_internal_proto_v2_s2a_s2a_proto = out.File - file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil - file_internal_proto_v2_s2a_s2a_proto_goTypes = nil - file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go deleted file mode 100644 index 2566df6c30..0000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.12 -// source: internal/proto/v2/s2a/s2a.proto - -package s2a_go_proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" -) - -// S2AServiceClient is the client API for S2AService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type S2AServiceClient interface { - // SetUpSession is a bidirectional stream used by applications to offload - // operations from the TLS handshake. - SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) -} - -type s2AServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { - return &s2AServiceClient{cc} -} - -func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &s2AServiceSetUpSessionClient{stream} - return x, nil -} - -type S2AService_SetUpSessionClient interface { - Send(*SessionReq) error - Recv() (*SessionResp, error) - grpc.ClientStream -} - -type s2AServiceSetUpSessionClient struct { - grpc.ClientStream -} - -func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { - m := new(SessionResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AServiceServer is the server API for S2AService service. -// All implementations must embed UnimplementedS2AServiceServer -// for forward compatibility -type S2AServiceServer interface { - // SetUpSession is a bidirectional stream used by applications to offload - // operations from the TLS handshake. - SetUpSession(S2AService_SetUpSessionServer) error - mustEmbedUnimplementedS2AServiceServer() -} - -// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. -type UnimplementedS2AServiceServer struct { -} - -func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { - return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") -} -func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} - -// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to S2AServiceServer will -// result in compilation errors. -type UnsafeS2AServiceServer interface { - mustEmbedUnimplementedS2AServiceServer() -} - -func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { - s.RegisterService(&S2AService_ServiceDesc, srv) -} - -func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) -} - -type S2AService_SetUpSessionServer interface { - Send(*SessionResp) error - Recv() (*SessionReq, error) - grpc.ServerStream -} - -type s2AServiceSetUpSessionServer struct { - grpc.ServerStream -} - -func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { - m := new(SessionReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var S2AService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "s2a.proto.v2.S2AService", - HandlerType: (*S2AServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "SetUpSession", - Handler: _S2AService_SetUpSession_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "internal/proto/v2/s2a/s2a.proto", -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go deleted file mode 100644 index 486f4ec4f2..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package aeadcrypter provides the interface for AEAD cipher implementations -// used by S2A's record protocol. -package aeadcrypter - -// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record -// protocol. -type S2AAEADCrypter interface { - // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. - // dst and plaintext may fully overlap or not at all. - Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) - // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may - // fully overlap or not at all. - Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) - // TagSize returns the tag size in bytes. - TagSize() int -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go deleted file mode 100644 index 85c4e595d7..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package aeadcrypter - -import ( - "crypto/aes" - "crypto/cipher" - "fmt" -) - -// Supported key sizes in bytes. -const ( - AES128GCMKeySize = 16 - AES256GCMKeySize = 32 -) - -// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. -type aesgcm struct { - aead cipher.AEAD -} - -// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be -// either 128 bits or 256 bits. -func NewAESGCM(key []byte) (S2AAEADCrypter, error) { - if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { - return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) - } - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - a, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - return &aesgcm{aead: a}, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext may -// fully overlap or not at all. -func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { - return encrypt(s.aead, dst, plaintext, nonce, aad) -} - -func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { - return decrypt(s.aead, dst, ciphertext, nonce, aad) -} - -func (s *aesgcm) TagSize() int { - return TagSize -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go deleted file mode 100644 index 214df4ca41..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package aeadcrypter - -import ( - "crypto/cipher" - "fmt" - - "golang.org/x/crypto/chacha20poly1305" -) - -// Supported key size in bytes. -const ( - Chacha20Poly1305KeySize = 32 -) - -// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD -// crypter. -type chachapoly struct { - aead cipher.AEAD -} - -// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must -// be Chacha20Poly1305KeySize bytes in length. -func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { - if len(key) != Chacha20Poly1305KeySize { - return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) - } - c, err := chacha20poly1305.New(key) - if err != nil { - return nil, err - } - return &chachapoly{aead: c}, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext may -// fully overlap or not at all. -func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { - return encrypt(s.aead, dst, plaintext, nonce, aad) -} - -func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { - return decrypt(s.aead, dst, ciphertext, nonce, aad) -} - -func (s *chachapoly) TagSize() int { - return TagSize -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go deleted file mode 100644 index b3c36ad95d..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package aeadcrypter - -import ( - "crypto/cipher" - "fmt" -) - -const ( - // TagSize is the tag size in bytes for AES-128-GCM-SHA256, - // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. - TagSize = 16 - // NonceSize is the size of the nonce in number of bytes for - // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. - NonceSize = 12 - // SHA256DigestSize is the digest size of sha256 in bytes. - SHA256DigestSize = 32 - // SHA384DigestSize is the digest size of sha384 in bytes. - SHA384DigestSize = 48 -) - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return head, tail -} - -// encrypt is the encryption function for an AEAD crypter. aead determines -// the type of AEAD crypter. dst can contain bytes at the beginning of the -// ciphertext that will not be encrypted but will be authenticated. If dst has -// enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext may -// fully overlap or not at all. -func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) - } - // If we need to allocate an output buffer, we want to include space for - // the tag to avoid forcing the caller to reallocate as well. - dlen := len(dst) - dst, out := sliceForAppend(dst, len(plaintext)+TagSize) - data := out[:len(plaintext)] - copy(data, plaintext) // data may fully overlap plaintext - - // Seal appends the ciphertext and the tag to its first argument and - // returns the updated slice. However, sliceForAppend above ensures that - // dst has enough capacity to avoid a reallocation and copy due to the - // append. - dst = aead.Seal(dst[:dlen], nonce, data, aad) - return dst, nil -} - -// decrypt is the decryption function for an AEAD crypter, where aead determines -// the type of AEAD crypter, and dst the destination bytes for the decrypted -// ciphertext. The dst buffer may fully overlap with plaintext or not at all. -func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) - } - // If dst is equal to ciphertext[:0], ciphertext storage is reused. - plaintext, err := aead.Open(dst, nonce, ciphertext, aad) - if err != nil { - return nil, fmt.Errorf("message auth failed: %v", err) - } - return plaintext, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go deleted file mode 100644 index ddeaa6d77d..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package halfconn - -import ( - "crypto/sha256" - "crypto/sha512" - "fmt" - "hash" - - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" - "github.com/google/s2a-go/internal/record/internal/aeadcrypter" -) - -// ciphersuite is the interface for retrieving ciphersuite-specific information -// and utilities. -type ciphersuite interface { - // keySize returns the key size in bytes. This refers to the key used by - // the AEAD crypter. This is derived by calling HKDF expand on the traffic - // secret. - keySize() int - // nonceSize returns the nonce size in bytes. - nonceSize() int - // trafficSecretSize returns the traffic secret size in bytes. This refers - // to the secret used to derive the traffic key and nonce, as specified in - // https://tools.ietf.org/html/rfc8446#section-7. - trafficSecretSize() int - // hashFunction returns the hash function for the ciphersuite. - hashFunction() func() hash.Hash - // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite - // using that key. - aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) -} - -func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { - switch ciphersuite { - case s2apb.Ciphersuite_AES_128_GCM_SHA256: - return &aesgcm128sha256{}, nil - case s2apb.Ciphersuite_AES_256_GCM_SHA384: - return &aesgcm256sha384{}, nil - case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: - return &chachapolysha256{}, nil - default: - return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) - } -} - -// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite -// interface. -type aesgcm128sha256 struct{} - -func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } -func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } -func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } -func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } -func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { - return aeadcrypter.NewAESGCM(key) -} - -// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite -// interface. -type aesgcm256sha384 struct{} - -func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } -func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } -func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } -func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } -func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { - return aeadcrypter.NewAESGCM(key) -} - -// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite -// interface. -type chachapolysha256 struct{} - -func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } -func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } -func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } -func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } -func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { - return aeadcrypter.NewChachaPoly(key) -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go deleted file mode 100644 index 9499cdca75..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package halfconn - -import "errors" - -// counter is a 64-bit counter. -type counter struct { - val uint64 - hasOverflowed bool -} - -// newCounter creates a new counter with the initial value set to val. -func newCounter(val uint64) counter { - return counter{val: val} -} - -// value returns the current value of the counter. -func (c *counter) value() (uint64, error) { - if c.hasOverflowed { - return 0, errors.New("counter has overflowed") - } - return c.val, nil -} - -// increment increments the counter and checks for overflow. -func (c *counter) increment() { - // If the counter is already invalid due to overflow, there is no need to - // increase it. We check for the hasOverflowed flag in the call to value(). - if c.hasOverflowed { - return - } - c.val++ - if c.val == 0 { - c.hasOverflowed = true - } -} - -// reset sets the counter value to zero and sets the hasOverflowed flag to -// false. -func (c *counter) reset() { - c.val = 0 - c.hasOverflowed = false -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go deleted file mode 100644 index e05f2c36a6..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package halfconn - -import ( - "fmt" - "hash" - - "golang.org/x/crypto/hkdf" -) - -// hkdfExpander is the interface for the HKDF expansion function; see -// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is -// specified in https://tools.ietf.org/html/rfc8446#section-7.2 -type hkdfExpander interface { - // expand takes a secret, a label, and the output length in bytes, and - // returns the resulting expanded key. - expand(secret, label []byte, length int) ([]byte, error) -} - -// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf -// for HKDF expansion. -type defaultHKDFExpander struct { - h func() hash.Hash -} - -// newDefaultHKDFExpander creates an instance of the default HKDF expander -// using the given hash function. -func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { - return &defaultHKDFExpander{h: h} -} - -func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { - outBuf := make([]byte, length) - n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) - if err != nil { - return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) - } - if n < length { - return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) - } - return outBuf, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go deleted file mode 100644 index dff99ff594..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 -// connection. -package halfconn - -import ( - "fmt" - "sync" - - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" - "github.com/google/s2a-go/internal/record/internal/aeadcrypter" - "golang.org/x/crypto/cryptobyte" -) - -// The constants below were taken from Section 7.2 and 7.3 in -// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label -// in HKDF-Expand-Label. -const ( - tls13Key = "tls13 key" - tls13Nonce = "tls13 iv" - tls13Update = "tls13 traffic upd" -) - -// S2AHalfConnection stores the state of the TLS 1.3 connection in the -// inbound or outbound direction. -type S2AHalfConnection struct { - cs ciphersuite - expander hkdfExpander - // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. - mutex sync.Mutex - aeadCrypter aeadcrypter.S2AAEADCrypter - sequence counter - trafficSecret []byte - nonce []byte -} - -// New creates a new instance of S2AHalfConnection given a ciphersuite and a -// traffic secret. -func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { - cs, err := newCiphersuite(ciphersuite) - if err != nil { - return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) - } - if cs.trafficSecretSize() != len(trafficSecret) { - return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) - } - - hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} - if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { - return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) - } - - return hc, nil -} - -// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. -// dst and plaintext may fully overlap or not at all. Note that the sequence -// number will still be incremented on failure, unless the sequence has -// overflowed. -func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { - hc.mutex.Lock() - sequence, err := hc.getAndIncrementSequence() - if err != nil { - hc.mutex.Unlock() - return nil, err - } - nonce := hc.maskedNonce(sequence) - crypter := hc.aeadCrypter - hc.mutex.Unlock() - return crypter.Encrypt(dst, plaintext, nonce, aad) -} - -// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may -// fully overlap or not at all. Note that the sequence number will still be -// incremented on failure, unless the sequence has overflowed. -func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { - hc.mutex.Lock() - sequence, err := hc.getAndIncrementSequence() - if err != nil { - hc.mutex.Unlock() - return nil, err - } - nonce := hc.maskedNonce(sequence) - crypter := hc.aeadCrypter - hc.mutex.Unlock() - return crypter.Decrypt(dst, ciphertext, nonce, aad) -} - -// UpdateKey advances the traffic secret key, as specified in -// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives -// a new key and nonce, and resets the sequence number. -func (hc *S2AHalfConnection) UpdateKey() error { - hc.mutex.Lock() - defer hc.mutex.Unlock() - - var err error - hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) - if err != nil { - return fmt.Errorf("failed to derive traffic secret: %v", err) - } - - if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { - return fmt.Errorf("failed to update half connection: %v", err) - } - - hc.sequence.reset() - return nil -} - -// TagSize returns the tag size in bytes of the underlying AEAD crypter. -func (hc *S2AHalfConnection) TagSize() int { - return hc.aeadCrypter.TagSize() -} - -// updateCrypterAndNonce takes a new traffic secret and updates the crypter -// and nonce. Note that the mutex must be held while calling this function. -func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { - key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) - if err != nil { - return fmt.Errorf("failed to update key: %v", err) - } - - hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) - if err != nil { - return fmt.Errorf("failed to update nonce: %v", err) - } - - hc.aeadCrypter, err = hc.cs.aeadCrypter(key) - if err != nil { - return fmt.Errorf("failed to update AEAD crypter: %v", err) - } - return nil -} - -// getAndIncrement returns the current sequence number and increments it. Note -// that the mutex must be held while calling this function. -func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { - sequence, err := hc.sequence.value() - if err != nil { - return 0, err - } - hc.sequence.increment() - return sequence, nil -} - -// maskedNonce creates a copy of the nonce that is masked with the sequence -// number. Note that the mutex must be held while calling this function. -func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { - const uint64Size = 8 - nonce := make([]byte, len(hc.nonce)) - copy(nonce, hc.nonce) - for i := 0; i < uint64Size; i++ { - nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) - } - return nonce -} - -// deriveSecret implements the Derive-Secret function, as specified in -// https://tools.ietf.org/html/rfc8446#section-7.1. -func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { - var hkdfLabel cryptobyte.Builder - hkdfLabel.AddUint16(uint16(length)) - hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(label) - }) - // Append an empty `Context` field to the label, as specified in the RFC. - // The half connection does not use the `Context` field. - hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte("")) - }) - hkdfLabelBytes, err := hkdfLabel.Bytes() - if err != nil { - return nil, fmt.Errorf("deriveSecret failed: %v", err) - } - return hc.expander.expand(secret, hkdfLabelBytes, length) -} diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go deleted file mode 100644 index c60515510a..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ /dev/null @@ -1,757 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package record implements the TLS 1.3 record protocol used by the S2A -// transport credentials. -package record - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "net" - "sync" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - "github.com/google/s2a-go/internal/record/internal/halfconn" - "github.com/google/s2a-go/internal/tokenmanager" - "google.golang.org/grpc/grpclog" -) - -// recordType is the `ContentType` as described in -// https://tools.ietf.org/html/rfc8446#section-5.1. -type recordType byte - -const ( - alert recordType = 21 - handshake recordType = 22 - applicationData recordType = 23 -) - -// keyUpdateRequest is the `KeyUpdateRequest` as described in -// https://tools.ietf.org/html/rfc8446#section-4.6.3. -type keyUpdateRequest byte - -const ( - updateNotRequested keyUpdateRequest = 0 - updateRequested keyUpdateRequest = 1 -) - -// alertDescription is the `AlertDescription` as described in -// https://tools.ietf.org/html/rfc8446#section-6. -type alertDescription byte - -const ( - closeNotify alertDescription = 0 -) - -// sessionTicketState is used to determine whether session tickets have not yet -// been received, are in the process of being received, or have finished -// receiving. -type sessionTicketState byte - -const ( - ticketsNotYetReceived sessionTicketState = 0 - receivingTickets sessionTicketState = 1 - notReceivingTickets sessionTicketState = 2 -) - -const ( - // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, - // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from - // https://tools.ietf.org/html/rfc8446#section-5.1. - - // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext - // in a single TLS 1.3 record. - tlsRecordMaxPlaintextSize = 16384 // 2^14 - // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. - tlsRecordTypeSize = 1 - // tlsTagSize is the size in bytes of the tag of the following three - // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, - // CHACHA20-POLY1305-SHA256. - tlsTagSize = 16 - // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a - // single TLS 1.3 record. This is the maximum size of the plaintext plus the - // record type byte and 16 bytes of the tag. - tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize - // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record - // header type. - tlsRecordHeaderTypeSize = 1 - // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS - // 1.3 record header legacy record version. - tlsRecordHeaderLegacyRecordVersionSize = 2 - // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 - // record header payload length. - tlsRecordHeaderPayloadLengthSize = 2 - // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. - tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize - // tlsRecordMaxSize - tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize - // tlsApplicationData is the application data type of the TLS 1.3 record - // header. - tlsApplicationData = 23 - // tlsLegacyRecordVersion is the legacy record version of the TLS record. - tlsLegacyRecordVersion = 3 - // tlsAlertSize is the size in bytes of an alert of TLS 1.3. - tlsAlertSize = 2 -) - -const ( - // These are TLS 1.3 handshake-specific constants. - - // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session - // ticket message of TLS 1.3. - tlsHandshakeNewSessionTicketType = 4 - // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message - // of TLS 1.3. - tlsHandshakeKeyUpdateType = 24 - // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake - // message type field. - tlsHandshakeMsgTypeSize = 1 - // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake - // message length field. - tlsHandshakeLengthSize = 3 - // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 - // handshake key update message. - tlsHandshakeKeyUpdateMsgSize = 1 - // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 - // handshake message. - tlsHandshakePrefixSize = 4 - // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message - // in TLS 1.3. This is the sum of the max sizes of all the fields in the - // NewSessionTicket struct specified in - // https://tools.ietf.org/html/rfc8446#section-4.6.1. - tlsMaxSessionTicketSize = 131338 -) - -const ( - // outBufMaxRecords is the maximum number of records that can fit in the - // ourRecordsBuf buffer. - outBufMaxRecords = 16 - // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. - outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize - // maxAllowedTickets is the maximum number of session tickets that are - // allowed. The number of tickets are limited to ensure that the size of the - // ticket queue does not grow indefinitely. S2A also keeps a limit on the - // number of tickets that it caches. - maxAllowedTickets = 5 -) - -// preConstructedKeyUpdateMsg holds the key update message. This is needed as an -// optimization so that the same message does not need to be constructed every -// time a key update message is sent. -var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() - -// conn represents a secured TLS connection. It implements the net.Conn -// interface. -type conn struct { - net.Conn - // inConn is the half connection responsible for decrypting incoming bytes. - inConn *halfconn.S2AHalfConnection - // outConn is the half connection responsible for encrypting outgoing bytes. - outConn *halfconn.S2AHalfConnection - // pendingApplicationData holds data that has been read from the connection - // and decrypted, but has not yet been returned by Read. - pendingApplicationData []byte - // unusedBuf holds data read from the network that has not yet been - // decrypted. This data might not consist of a complete record. It may - // consist of several records, the last of which could be incomplete. - unusedBuf []byte - // outRecordsBuf is a buffer used to store outgoing TLS records before - // they are written to the network. - outRecordsBuf []byte - // nextRecord stores the next record info in the unusedBuf buffer. - nextRecord []byte - // overheadSize is the overhead size in bytes of each TLS 1.3 record, which - // is computed as overheadSize = header size + record type byte + tag size. - // Note that there is no padding by zeros in the overhead calculation. - overheadSize int - // readMutex guards against concurrent calls to Read. This is required since - // Close may be called during a Read. - readMutex sync.Mutex - // writeMutex guards against concurrent calls to Write. This is required - // since Close may be called during a Write, and also because a key update - // message may be written during a Read. - writeMutex sync.Mutex - // handshakeBuf holds handshake messages while they are being processed. - handshakeBuf []byte - // ticketState is the current processing state of the session tickets. - ticketState sessionTicketState - // sessionTickets holds the completed session tickets until they are sent to - // the handshaker service for processing. - sessionTickets [][]byte - // ticketSender sends session tickets to the S2A handshaker service. - ticketSender s2aTicketSender - // callComplete is a channel that blocks closing the record protocol until a - // pending call to the S2A completes. - callComplete chan bool -} - -// ConnParameters holds the parameters used for creating a new conn object. -type ConnParameters struct { - // NetConn is the TCP connection to the peer. This parameter is required. - NetConn net.Conn - // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker - // service. This parameter is required. - Ciphersuite commonpb.Ciphersuite - // TLSVersion is the TLS version number negotiated by the S2A handshaker - // service. This parameter is required. - TLSVersion commonpb.TLSVersion - // InTrafficSecret is the traffic secret used to derive the session key for - // the inbound direction. This parameter is required. - InTrafficSecret []byte - // OutTrafficSecret is the traffic secret used to derive the session key - // for the outbound direction. This parameter is required. - OutTrafficSecret []byte - // UnusedBuf is the data read from the network that has not yet been - // decrypted. This parameter is optional. If not provided, then no - // application data was sent in the same flight of messages as the final - // handshake message. - UnusedBuf []byte - // InSequence is the sequence number of the next, incoming, TLS record. - // This parameter is required. - InSequence uint64 - // OutSequence is the sequence number of the next, outgoing, TLS record. - // This parameter is required. - OutSequence uint64 - // HSAddr stores the address of the S2A handshaker service. This parameter - // is optional. If not provided, then TLS resumption is disabled. - HSAddr string - // ConnectionId is the connection identifier that was created and sent by - // S2A at the end of a handshake. - ConnectionID uint64 - // LocalIdentity is the local identity that was used by S2A during session - // setup and included in the session result. - LocalIdentity *commonpb.Identity - // EnsureProcessSessionTickets allows users to wait and ensure that all - // available session tickets are sent to S2A before a process completes. - EnsureProcessSessionTickets *sync.WaitGroup -} - -// NewConn creates a TLS record protocol that wraps the TCP connection. -func NewConn(o *ConnParameters) (net.Conn, error) { - if o == nil { - return nil, errors.New("conn options must not be nil") - } - if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { - return nil, errors.New("TLS version must be TLS 1.3") - } - - inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) - if err != nil { - return nil, fmt.Errorf("failed to create inbound half connection: %v", err) - } - outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) - if err != nil { - return nil, fmt.Errorf("failed to create outbound half connection: %v", err) - } - - // The tag size for the in/out connections should be the same. - overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() - var unusedBuf []byte - if o.UnusedBuf == nil { - // We pre-allocate unusedBuf to be of size - // 2*tlsRecordMaxSize-1 during initialization. We only read from the - // network into unusedBuf when unusedBuf does not contain a complete - // record and the incomplete record is at most tlsRecordMaxSize-1 - // (bytes). And we read at most tlsRecordMaxSize bytes of data from the - // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 - // is large enough to buffer data read from the network. - unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) - } else { - unusedBuf = make([]byte, len(o.UnusedBuf)) - copy(unusedBuf, o.UnusedBuf) - } - - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - grpclog.Infof("failed to create single token access token manager: %v", err) - } - - s2aConn := &conn{ - Conn: o.NetConn, - inConn: inConn, - outConn: outConn, - unusedBuf: unusedBuf, - outRecordsBuf: make([]byte, tlsRecordMaxSize), - nextRecord: unusedBuf, - overheadSize: overheadSize, - ticketState: ticketsNotYetReceived, - // Pre-allocate the buffer for one session ticket message and the max - // plaintext size. This is the largest size that handshakeBuf will need - // to hold. The largest incomplete handshake message is the - // [handshake header size] + [max session ticket size] - 1. - // Then, tlsRecordMaxPlaintextSize is the maximum size that will be - // appended to the handshakeBuf before the handshake message is - // completed. Therefore, the buffer size below should be large enough to - // buffer any handshake messages. - handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), - ticketSender: &ticketSender{ - hsAddr: o.HSAddr, - connectionID: o.ConnectionID, - localIdentity: o.LocalIdentity, - tokenManager: tokenManager, - ensureProcessSessionTickets: o.EnsureProcessSessionTickets, - }, - callComplete: make(chan bool), - } - return s2aConn, nil -} - -// Read reads and decrypts a TLS 1.3 record from the underlying connection, and -// copies any application data received from the peer into b. If the size of the -// payload is greater than len(b), Read retains the remaining bytes in an -// internal buffer, and subsequent calls to Read will read from this buffer -// until it is exhausted. At most 1 TLS record worth of application data is -// written to b for each call to Read. -// -// Note that for the user to efficiently call this method, the user should -// ensure that the buffer b is allocated such that the buffer does not have any -// unused segments. This can be done by calling Read via io.ReadFull, which -// continually calls Read until the specified buffer has been filled. Also note -// that the user should close the connection via Close() if an error is thrown -// by a call to Read. -func (p *conn) Read(b []byte) (n int, err error) { - p.readMutex.Lock() - defer p.readMutex.Unlock() - // Check if p.pendingApplication data has leftover application data from - // the previous call to Read. - if len(p.pendingApplicationData) == 0 { - // Read a full record from the wire. - record, err := p.readFullRecord() - if err != nil { - return 0, err - } - // Now we have a complete record, so split the header and validate it - // The TLS record is split into 2 pieces: the record header and the - // payload. The payload has the following form: - // [payload] = [ciphertext of application data] - // + [ciphertext of record type byte] - // + [(optionally) ciphertext of padding by zeros] - // + [tag] - header, payload, err := splitAndValidateHeader(record) - if err != nil { - return 0, err - } - // Decrypt the ciphertext. - p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) - if err != nil { - return 0, err - } - // Remove the padding by zeros and the record type byte from the - // p.pendingApplicationData buffer. - msgType, err := p.stripPaddingAndType() - if err != nil { - return 0, err - } - // Check that the length of the plaintext after stripping the padding - // and record type byte is under the maximum plaintext size. - if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { - return 0, errors.New("plaintext size larger than maximum") - } - // The expected message types are application data, alert, and - // handshake. For application data, the bytes are directly copied into - // b. For an alert, the type of the alert is checked and the connection - // is closed on a close notify alert. For a handshake message, the - // handshake message type is checked. The handshake message type can be - // a key update type, for which we advance the traffic secret, and a - // new session ticket type, for which we send the received ticket to S2A - // for processing. - switch msgType { - case applicationData: - if len(p.handshakeBuf) > 0 { - return 0, errors.New("application data received while processing fragmented handshake messages") - } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } - case alert: - return 0, p.handleAlertMessage() - case handshake: - if err = p.handleHandshakeMessage(); err != nil { - return 0, err - } - return 0, nil - default: - return 0, errors.New("unknown record type") - } - } - // Write as much application data as possible to b, the output buffer. - n = copy(b, p.pendingApplicationData) - p.pendingApplicationData = p.pendingApplicationData[n:] - return n, nil -} - -// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a -// TLS 1.3 record (of type "application data") from each segment, and sends -// the record to the peer. It returns the number of plaintext bytes that were -// successfully sent to the peer. -func (p *conn) Write(b []byte) (n int, err error) { - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - return p.writeTLSRecord(b, tlsApplicationData) -} - -// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, -// builds a TLS 1.3 record (of type recordType) from each segment, and sends -// the record to the peer. It returns the number of plaintext bytes that were -// successfully sent to the peer. -func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { - // Create a record of only header, record type, and tag if given empty - // byte array. - if len(b) == 0 { - recordEndIndex, _, err := p.buildRecord(b, recordType, 0) - if err != nil { - return 0, err - } - - // Write the bytes stored in outRecordsBuf to p.Conn. Since we return - // the number of plaintext bytes written without overhead, we will - // always return 0 while p.Conn.Write returns the entire record length. - _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) - return 0, err - } - - numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) - totalRecordsSize := len(b) + numRecords*p.overheadSize - partialBSize := len(b) - if totalRecordsSize > outBufMaxSize { - totalRecordsSize = outBufMaxSize - partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize - } - if len(p.outRecordsBuf) < totalRecordsSize { - p.outRecordsBuf = make([]byte, totalRecordsSize) - } - for bStart := 0; bStart < len(b); bStart += partialBSize { - bEnd := bStart + partialBSize - if bEnd > len(b) { - bEnd = len(b) - } - partialB := b[bStart:bEnd] - recordEndIndex := 0 - for len(partialB) > 0 { - recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) - if err != nil { - // Return the amount of bytes written prior to the error. - return bStart, err - } - } - // Write the bytes stored in outRecordsBuf to p.Conn. If there is an - // error, calculate the total number of plaintext bytes of complete - // records successfully written to the peer and return it. - nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) - if err != nil { - numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) - return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err - } - } - return len(b), nil -} - -// buildRecord builds a TLS 1.3 record of type recordType from plaintext, -// and writes the record to outRecordsBuf at recordStartIndex. The record will -// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the -// index of outRecordsBuf where the current record ends, as well as any -// remaining plaintext bytes. -func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { - // Construct the payload, which consists of application data and record type. - dataLen := len(plaintext) - if dataLen > tlsRecordMaxPlaintextSize { - dataLen = tlsRecordMaxPlaintextSize - } - remainingPlaintext = plaintext[dataLen:] - newRecordBuf := p.outRecordsBuf[recordStartIndex:] - - copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) - newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType - payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. - // Construct the header. - newRecordBuf[0] = tlsApplicationData - newRecordBuf[1] = tlsLegacyRecordVersion - newRecordBuf[2] = tlsLegacyRecordVersion - binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) - header := newRecordBuf[:tlsRecordHeaderSize] - - // Encrypt the payload using header as aad. - encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) - if err != nil { - return 0, plaintext, err - } - recordStartIndex += len(header) + len(encryptedPayload) - return recordStartIndex, remainingPlaintext, nil -} - -func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } - return p.Conn.Close() -} - -// stripPaddingAndType strips the padding by zeros and record type from -// p.pendingApplicationData and returns the record type. Note that -// p.pendingApplicationData should be of the form: -// [application data] + [record type byte] + [trailing zeros] -func (p *conn) stripPaddingAndType() (recordType, error) { - if len(p.pendingApplicationData) == 0 { - return 0, errors.New("application data had length 0") - } - i := len(p.pendingApplicationData) - 1 - // Search for the index of the record type byte. - for i > 0 { - if p.pendingApplicationData[i] != 0 { - break - } - i-- - } - rt := recordType(p.pendingApplicationData[i]) - p.pendingApplicationData = p.pendingApplicationData[:i] - return rt, nil -} - -// readFullRecord reads from the wire until a record is completed and returns -// the full record. -func (p *conn) readFullRecord() (fullRecord []byte, err error) { - fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) - if err != nil { - return nil, err - } - // Check whether the next record to be decrypted has been completely - // received. - if len(fullRecord) == 0 { - copy(p.unusedBuf, p.nextRecord) - p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] - // Always copy next incomplete record to the beginning of the - // unusedBuf buffer and reset nextRecord to it. - p.nextRecord = p.unusedBuf - } - // Keep reading from the wire until we have a complete record. - for len(fullRecord) == 0 { - if len(p.unusedBuf) == cap(p.unusedBuf) { - tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) - copy(tmp, p.unusedBuf) - p.unusedBuf = tmp - } - n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) - if err != nil { - return nil, err - } - p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] - fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) - if err != nil { - return nil, err - } - } - return fullRecord, nil -} - -// parseReadBuffer parses the provided buffer and returns a full record and any -// remaining bytes in that buffer. If the record is incomplete, nil is returned -// for the first return value and the given byte buffer is returned for the -// second return value. The length of the payload specified by the header should -// not be greater than maxLen, otherwise an error is returned. Note that this -// function does not allocate or copy any buffers. -func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { - // If the header is not complete, return the provided buffer as remaining - // buffer. - if len(b) < tlsRecordHeaderSize { - return nil, b, nil - } - msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] - length := binary.BigEndian.Uint16(msgLenField) - if length > maxLen { - return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) - } - if len(b) < int(length)+tlsRecordHeaderSize { - // Record is not complete yet. - return nil, b, nil - } - return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil -} - -// splitAndValidateHeader splits the header from the payload in the TLS 1.3 -// record and returns them. Note that the header is checked for validity, and an -// error is returned when an invalid header is parsed. Also note that this -// function does not allocate or copy any buffers. -func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { - if len(record) < tlsRecordHeaderSize { - return nil, nil, fmt.Errorf("record was smaller than the header size") - } - header = record[:tlsRecordHeaderSize] - payload = record[tlsRecordHeaderSize:] - if header[0] != tlsApplicationData { - return nil, nil, fmt.Errorf("incorrect type in the header") - } - // Check the legacy record version, which should be 0x03, 0x03. - if header[1] != 0x03 || header[2] != 0x03 { - return nil, nil, fmt.Errorf("incorrect legacy record version in the header") - } - return header, payload, nil -} - -// handleAlertMessage handles an alert message. -func (p *conn) handleAlertMessage() error { - if len(p.pendingApplicationData) != tlsAlertSize { - return errors.New("invalid alert message size") - } - alertType := p.pendingApplicationData[1] - // Clear the body of the alert message. - p.pendingApplicationData = p.pendingApplicationData[:0] - if alertType == byte(closeNotify) { - return errors.New("received a close notify alert") - } - // TODO(matthewstevenson88): Add support for more alert types. - return fmt.Errorf("received an unrecognized alert type: %v", alertType) -} - -// parseHandshakeHeader parses a handshake message from the handshake buffer. -// It returns the message type, the message length, the message, the raw message -// that includes the type and length bytes and a flag indicating whether the -// handshake message has been fully parsed. i.e. whether the entire handshake -// message was in the handshake buffer. -func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { - // Handle the case where the 4 byte handshake header is fragmented. - if len(p.handshakeBuf) < tlsHandshakePrefixSize { - return 0, 0, nil, nil, false - } - msgType = p.handshakeBuf[0] - msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) - if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { - return 0, 0, nil, nil, false - } - msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] - rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] - p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] - return msgType, msgLen, msg, rawMsg, true -} - -// handleHandshakeMessage handles a handshake message. Note that the first -// complete handshake message from the handshake buffer is removed, if it -// exists. -func (p *conn) handleHandshakeMessage() error { - // Copy the pending application data to the handshake buffer. At this point, - // we are guaranteed that the pending application data contains only parts - // of a handshake message. - p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) - p.pendingApplicationData = p.pendingApplicationData[:0] - // Several handshake messages may be coalesced into a single record. - // Continue reading them until the handshake buffer is empty. - for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() - if !ok { - // The handshake could not be fully parsed, so read in another - // record and try again later. - break - } - switch handshakeMsgType { - case tlsHandshakeKeyUpdateType: - if msgLen != tlsHandshakeKeyUpdateMsgSize { - return errors.New("invalid handshake key update message length") - } - if len(p.handshakeBuf) != 0 { - return errors.New("key update message must be the last message of a handshake record") - } - if err := p.handleKeyUpdateMsg(msg); err != nil { - return err - } - case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } - default: - return errors.New("unknown handshake message type") - } - } - return nil -} - -func buildKeyUpdateRequest() []byte { - b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) - b[0] = tlsHandshakeKeyUpdateType - b[1] = 0 - b[2] = 0 - b[3] = tlsHandshakeKeyUpdateMsgSize - b[4] = byte(updateNotRequested) - return b -} - -// handleKeyUpdateMsg handles a key update message. -func (p *conn) handleKeyUpdateMsg(msg []byte) error { - keyUpdateRequest := msg[0] - if keyUpdateRequest != byte(updateNotRequested) && - keyUpdateRequest != byte(updateRequested) { - return errors.New("invalid handshake key update message") - } - if err := p.inConn.UpdateKey(); err != nil { - return err - } - // Send a key update message back to the peer if requested. - if keyUpdateRequest == byte(updateRequested) { - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) - if err != nil { - return err - } - if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { - return errors.New("key update request message wrote less bytes than expected") - } - if err = p.outConn.UpdateKey(); err != nil { - return err - } - } - return nil -} - -// bidEndianInt24 converts the given byte buffer of at least size 3 and -// outputs the resulting 24 bit integer as a uint32. This is needed because -// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does -// not provide a way to transform a byte buffer into a 3 byte integer. -func bigEndianInt24(b []byte) uint32 { - _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go deleted file mode 100644 index 33fa3c55d4..0000000000 --- a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package record - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/google/s2a-go/internal/handshaker/service" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" - "github.com/google/s2a-go/internal/tokenmanager" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -// sessionTimeout is the timeout for creating a session with the S2A handshaker -// service. -const sessionTimeout = time.Second * 5 - -// s2aTicketSender sends session tickets to the S2A handshaker service. -type s2aTicketSender interface { - // sendTicketsToS2A sends the given session tickets to the S2A handshaker - // service. - sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) -} - -// ticketStream is the stream used to send and receive session information. -type ticketStream interface { - Send(*s2apb.SessionReq) error - Recv() (*s2apb.SessionResp, error) -} - -type ticketSender struct { - // hsAddr stores the address of the S2A handshaker service. - hsAddr string - // connectionID is the connection identifier that was created and sent by - // S2A at the end of a handshake. - connectionID uint64 - // localIdentity is the local identity that was used by S2A during session - // setup and included in the session result. - localIdentity *commonpb.Identity - // tokenManager manages access tokens for authenticating to S2A. - tokenManager tokenmanager.AccessTokenManager - // ensureProcessSessionTickets allows users to wait and ensure that all - // available session tickets are sent to S2A before a process completes. - ensureProcessSessionTickets *sync.WaitGroup -} - -// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker -// service. This is done asynchronously and writes to the error logs if an error -// occurs. -func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { - // Note that the goroutine is in the function rather than at the caller - // because the fake ticket sender used for testing must run synchronously - // so that the session tickets can be accessed from it after the tests have - // been run. - if t.ensureProcessSessionTickets != nil { - t.ensureProcessSessionTickets.Add(1) - } - go func() { - if err := func() error { - defer func() { - if t.ensureProcessSessionTickets != nil { - t.ensureProcessSessionTickets.Done() - } - }() - hsConn, err := service.Dial(t.hsAddr) - if err != nil { - return err - } - client := s2apb.NewS2AServiceClient(hsConn) - ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) - defer cancel() - session, err := client.SetUpSession(ctx) - if err != nil { - return err - } - defer func() { - if err := session.CloseSend(); err != nil { - grpclog.Error(err) - } - }() - return t.writeTicketsToStream(session, sessionTickets) - }(); err != nil { - grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", - t.localIdentity, err) - } - callComplete <- true - close(callComplete) - }() -} - -// writeTicketsToStream writes the given session tickets to the given stream. -func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { - if err := stream.Send( - &s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_ResumptionTicket{ - ResumptionTicket: &s2apb.ResumptionTicketReq{ - InBytes: sessionTickets, - ConnectionId: t.connectionID, - LocalIdentity: t.localIdentity, - }, - }, - AuthMechanisms: t.getAuthMechanisms(), - }, - ); err != nil { - return err - } - sessionResp, err := stream.Recv() - if err != nil { - return err - } - if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { - return fmt.Errorf("s2a session ticket response had error status: %v, %v", - sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) - } - return nil -} - -func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { - if t.tokenManager == nil { - return nil - } - // First handle the special case when no local identity has been provided - // by the application. In this case, an AuthenticationMechanism with no local - // identity will be sent. - if t.localIdentity == nil { - token, err := t.tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("unable to get token for empty local identity: %v", err) - return nil - } - return []*s2apb.AuthenticationMechanism{ - { - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } - } - - // Next, handle the case where the application (or the S2A) has specified - // a local identity. - token, err := t.tokenManager.Token(t.localIdentity) - if err != nil { - grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) - return nil - } - return []*s2apb.AuthenticationMechanism{ - { - Identity: t.localIdentity, - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } -} diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go deleted file mode 100644 index ec96ba3b6a..0000000000 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package tokenmanager provides tokens for authenticating to S2A. -package tokenmanager - -import ( - "fmt" - "os" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" -) - -const ( - s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" -) - -// AccessTokenManager manages tokens for authenticating to S2A. -type AccessTokenManager interface { - // DefaultToken returns a token that an application with no specified local - // identity must use to authenticate to S2A. - DefaultToken() (token string, err error) - // Token returns a token that an application with local identity equal to - // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) -} - -type singleTokenAccessTokenManager struct { - token string -} - -// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance -// that will always manage the same token. -// -// The token to be managed is read from the s2aAccessTokenEnvironmentVariable -// environment variable. If this environment variable is not set, then this -// function returns an error. -func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { - token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) - if !variableExists { - return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) - } - return &singleTokenAccessTokenManager{token: token}, nil -} - -// DefaultToken always returns the token managed by the -// singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { - return m.token, nil -} - -// Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { - return m.token, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/README.md b/vendor/github.com/google/s2a-go/internal/v2/README.md deleted file mode 100644 index 3806d1e9cc..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/README.md +++ /dev/null @@ -1 +0,0 @@ -**This directory has the implementation of the S2Av2's gRPC-Go client libraries** diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go deleted file mode 100644 index cc811879b5..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package certverifier offloads verifications to S2Av2. -package certverifier - -import ( - "crypto/x509" - "fmt" - - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and -// receives a SessionResp. -func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // Offload verification to S2Av2. - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") - } - if err := s2AStream.Send(&s2av2pb.SessionReq{ - ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ - ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ - Mode: verificationMode, - PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ - ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ - CertificateChain: rawCerts, - }, - }, - }, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") - return err - } - - // Get the response from S2Av2. - resp, err := s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") - return err - } - - // Parse the response. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - - } - - if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { - return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) - } - - return nil - } -} - -// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and -// receives a SessionResp. -func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // Offload verification to S2Av2. - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") - } - if err := s2AStream.Send(&s2av2pb.SessionReq{ - ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ - ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ - Mode: verificationMode, - PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ - ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ - CertificateChain: rawCerts, - ServerHostname: hostname, - SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, - }, - }, - }, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") - return err - } - - // Get the response from S2Av2. - resp, err := s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") - return err - } - - // Parse the response. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { - return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) - } - - return nil - } -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go deleted file mode 100644 index e7478d43fb..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go +++ /dev/null @@ -1,186 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package remotesigner offloads private key operations to S2Av2. -package remotesigner - -import ( - "crypto" - "crypto/rsa" - "crypto/x509" - "fmt" - "io" - - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -// remoteSigner implementes the crypto.Signer interface. -type remoteSigner struct { - leafCert *x509.Certificate - s2AStream stream.S2AStream -} - -// New returns an instance of RemoteSigner, an implementation of the -// crypto.Signer interface. -func New(leafCert *x509.Certificate, s2AStream stream.S2AStream) crypto.Signer { - return &remoteSigner{leafCert, s2AStream} -} - -func (s *remoteSigner) Public() crypto.PublicKey { - return s.leafCert.PublicKey -} - -func (s *remoteSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { - signatureAlgorithm, err := getSignatureAlgorithm(opts, s.leafCert) - if err != nil { - return nil, err - } - - req, err := getSignReq(signatureAlgorithm, digest) - if err != nil { - return nil, err - } - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for signing operation.") - } - if err := s.s2AStream.Send(&s2av2pb.SessionReq{ - ReqOneof: &s2av2pb.SessionReq_OffloadPrivateKeyOperationReq{ - OffloadPrivateKeyOperationReq: req, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for signing operation.") - return nil, err - } - - resp, err := s.s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive signing operation response from S2Av2.") - return nil, err - } - - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return nil, fmt.Errorf("failed to offload signing with private key to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - return resp.GetOffloadPrivateKeyOperationResp().GetOutBytes(), nil -} - -// getCert returns the leafCert field in s. -func (s *remoteSigner) getCert() *x509.Certificate { - return s.leafCert -} - -// getStream returns the s2AStream field in s. -func (s *remoteSigner) getStream() stream.S2AStream { - return s.s2AStream -} - -func getSignReq(signatureAlgorithm s2av2pb.SignatureAlgorithm, digest []byte) (*s2av2pb.OffloadPrivateKeyOperationReq, error) { - if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256) { - return &s2av2pb.OffloadPrivateKeyOperationReq{ - Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, - SignatureAlgorithm: signatureAlgorithm, - InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha256Digest{ - Sha256Digest: digest, - }, - }, nil - } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384) { - return &s2av2pb.OffloadPrivateKeyOperationReq{ - Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, - SignatureAlgorithm: signatureAlgorithm, - InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha384Digest{ - Sha384Digest: digest, - }, - }, nil - } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519) { - return &s2av2pb.OffloadPrivateKeyOperationReq{ - Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, - SignatureAlgorithm: signatureAlgorithm, - InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha512Digest{ - Sha512Digest: digest, - }, - }, nil - } else { - return nil, fmt.Errorf("unknown signature algorithm: %v", signatureAlgorithm) - } -} - -// getSignatureAlgorithm returns the signature algorithm that S2A must use when -// performing a signing operation that has been offloaded by an application -// using the crypto/tls libraries. -func getSignatureAlgorithm(opts crypto.SignerOpts, leafCert *x509.Certificate) (s2av2pb.SignatureAlgorithm, error) { - if opts == nil || leafCert == nil { - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } - switch leafCert.PublicKeyAlgorithm { - case x509.RSA: - if rsaPSSOpts, ok := opts.(*rsa.PSSOptions); ok { - return rsaPSSAlgorithm(rsaPSSOpts) - } - return rsaPPKCS1Algorithm(opts) - case x509.ECDSA: - return ecdsaAlgorithm(opts) - case x509.Ed25519: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm: %q", leafCert.PublicKeyAlgorithm) - } -} - -func rsaPSSAlgorithm(opts *rsa.PSSOptions) (s2av2pb.SignatureAlgorithm, error) { - switch opts.HashFunc() { - case crypto.SHA256: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256, nil - case crypto.SHA384: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384, nil - case crypto.SHA512: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } -} - -func rsaPPKCS1Algorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { - switch opts.HashFunc() { - case crypto.SHA256: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256, nil - case crypto.SHA384: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384, nil - case crypto.SHA512: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } -} - -func ecdsaAlgorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { - switch opts.HashFunc() { - case crypto.SHA256: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256, nil - case crypto.SHA384: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384, nil - case crypto.SHA512: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go deleted file mode 100644 index ff172883f2..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ /dev/null @@ -1,354 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package v2 provides the S2Av2 transport credentials used by a gRPC -// application. -package v2 - -import ( - "context" - "crypto/tls" - "errors" - "net" - "os" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/s2a-go/fallback" - "github.com/google/s2a-go/internal/handshaker/service" - "github.com/google/s2a-go/internal/tokenmanager" - "github.com/google/s2a-go/internal/v2/tlsconfigstore" - "github.com/google/s2a-go/stream" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -const ( - s2aSecurityProtocol = "tls" - defaultS2ATimeout = 3 * time.Second -) - -// An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. -const s2aTimeoutEnv = "S2A_TIMEOUT" - -type s2av2TransportCreds struct { - info *credentials.ProtocolInfo - isClient bool - serverName string - s2av2Address string - tokenManager *tokenmanager.AccessTokenManager - // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity - // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity - verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode - fallbackClientHandshake fallback.ClientHandshake - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) - serverAuthorizationPolicy []byte -} - -// NewClientCreds returns a client-side transport credentials object that uses -// the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { - // Create an AccessTokenManager instance to use to authenticate to S2Av2. - accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - - creds := &s2av2TransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - isClient: true, - serverName: "", - s2av2Address: s2av2Address, - localIdentity: localIdentity, - verificationMode: verificationMode, - fallbackClientHandshake: fallbackClientHandshakeFunc, - getS2AStream: getS2AStream, - serverAuthorizationPolicy: serverAuthorizationPolicy, - } - if err != nil { - creds.tokenManager = nil - } else { - creds.tokenManager = &accessTokenManager - } - if grpclog.V(1) { - grpclog.Info("Created client S2Av2 transport credentials.") - } - return creds, nil -} - -// NewServerCreds returns a server-side transport credentials object that uses -// the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { - // Create an AccessTokenManager instance to use to authenticate to S2Av2. - accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - creds := &s2av2TransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - isClient: false, - s2av2Address: s2av2Address, - localIdentities: localIdentities, - verificationMode: verificationMode, - getS2AStream: getS2AStream, - } - if err != nil { - creds.tokenManager = nil - } else { - creds.tokenManager = &accessTokenManager - } - if grpclog.V(1) { - grpclog.Info("Created server S2Av2 transport credentials.") - } - return creds, nil -} - -// ClientHandshake performs a client-side mTLS handshake using the S2Av2. -func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if !c.isClient { - return nil, nil, errors.New("client handshake called using server transport credentials") - } - // Remove the port from serverAuthority. - serverName := removeServerNamePort(serverAuthority) - timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) - defer cancel() - s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) - if err != nil { - grpclog.Infof("Failed to connect to S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - defer s2AStream.CloseSend() - if grpclog.V(1) { - grpclog.Infof("Connected to S2Av2.") - } - var config *tls.Config - - var tokenManager tokenmanager.AccessTokenManager - if c.tokenManager == nil { - tokenManager = nil - } else { - tokenManager = *c.tokenManager - } - - if c.serverName == "" { - config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - } else { - config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - } - if grpclog.V(1) { - grpclog.Infof("Got client TLS config from S2Av2.") - } - creds := credentials.NewTLS(config) - - conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) - if err != nil { - grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) - - return conn, authInfo, err -} - -// ServerHandshake performs a server-side mTLS handshake using the S2Av2. -func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if c.isClient { - return nil, nil, errors.New("server handshake called using client transport credentials") - } - ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) - defer cancel() - s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) - if err != nil { - grpclog.Infof("Failed to connect to S2Av2: %v", err) - return nil, nil, err - } - defer s2AStream.CloseSend() - if grpclog.V(1) { - grpclog.Infof("Connected to S2Av2.") - } - - var tokenManager tokenmanager.AccessTokenManager - if c.tokenManager == nil { - tokenManager = nil - } else { - tokenManager = *c.tokenManager - } - - config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) - if err != nil { - grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) - return nil, nil, err - } - if grpclog.V(1) { - grpclog.Infof("Got server TLS config from S2Av2.") - } - creds := credentials.NewTLS(config) - return creds.ServerHandshake(rawConn) -} - -// Info returns protocol info of s2av2TransportCreds. -func (c *s2av2TransportCreds) Info() credentials.ProtocolInfo { - return *c.info -} - -// Clone makes a deep copy of s2av2TransportCreds. -func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { - info := *c.info - serverName := c.serverName - fallbackClientHandshake := c.fallbackClientHandshake - - s2av2Address := c.s2av2Address - var tokenManager tokenmanager.AccessTokenManager - if c.tokenManager == nil { - tokenManager = nil - } else { - tokenManager = *c.tokenManager - } - verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity - if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) - } - var localIdentities []*commonpbv1.Identity - if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) - for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) - } - } - creds := &s2av2TransportCreds{ - info: &info, - isClient: c.isClient, - serverName: serverName, - fallbackClientHandshake: fallbackClientHandshake, - s2av2Address: s2av2Address, - localIdentity: localIdentity, - localIdentities: localIdentities, - verificationMode: verificationMode, - } - if c.tokenManager == nil { - creds.tokenManager = nil - } else { - creds.tokenManager = &tokenManager - } - return creds -} - -// NewClientTLSConfig returns a tls.Config instance that uses S2Av2 to establish a TLS connection as -// a client. The tls.Config MUST only be used to establish a single TLS connection. -func NewClientTLSConfig( - ctx context.Context, - s2av2Address string, - tokenManager tokenmanager.AccessTokenManager, - verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, - serverName string, - serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, nil) - if err != nil { - grpclog.Infof("Failed to connect to S2Av2: %v", err) - return nil, err - } - - return tlsconfigstore.GetTLSConfigurationForClient(removeServerNamePort(serverName), s2AStream, tokenManager, nil, verificationMode, serverAuthorizationPolicy) -} - -// OverrideServerName sets the ServerName in the s2av2TransportCreds protocol -// info. The ServerName MUST be a hostname. -func (c *s2av2TransportCreds) OverrideServerName(serverNameOverride string) error { - serverName := removeServerNamePort(serverNameOverride) - c.info.ServerName = serverName - c.serverName = serverName - return nil -} - -// Remove the trailing port from server name. -func removeServerNamePort(serverName string) string { - name, _, err := net.SplitHostPort(serverName) - if err != nil { - name = serverName - } - return name -} - -type s2AGrpcStream struct { - stream s2av2pb.S2AService_SetUpSessionClient -} - -func (x s2AGrpcStream) Send(m *s2av2pb.SessionReq) error { - return x.stream.Send(m) -} - -func (x s2AGrpcStream) Recv() (*s2av2pb.SessionResp, error) { - return x.stream.Recv() -} - -func (x s2AGrpcStream) CloseSend() error { - return x.stream.CloseSend() -} - -func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { - if getS2AStream != nil { - return getS2AStream(ctx, s2av2Address) - } - // TODO(rmehta19): Consider whether to close the connection to S2Av2. - conn, err := service.Dial(s2av2Address) - if err != nil { - return nil, err - } - client := s2av2pb.NewS2AServiceClient(conn) - gRPCStream, err := client.SetUpSession(ctx, []grpc.CallOption{}...) - if err != nil { - return nil, err - } - return &s2AGrpcStream{ - stream: gRPCStream, - }, nil -} - -// GetS2ATimeout returns the timeout enforced on the connection to the S2A service for handshake. -func GetS2ATimeout() time.Duration { - timeout, err := time.ParseDuration(os.Getenv(s2aTimeoutEnv)) - if err != nil { - return defaultS2ATimeout - } - return timeout -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go deleted file mode 100644 index 4d91913229..0000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ /dev/null @@ -1,404 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package tlsconfigstore offloads operations to S2Av2. -package tlsconfigstore - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - - "github.com/google/s2a-go/internal/tokenmanager" - "github.com/google/s2a-go/internal/v2/certverifier" - "github.com/google/s2a-go/internal/v2/remotesigner" - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" - commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -const ( - // HTTP/2 - h2 = "h2" -) - -// GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) - - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for client TLS config.") - } - // Send request to S2Av2 for config. - if err := s2AStream.Send(&s2av2pb.SessionReq{ - LocalIdentity: localIdentity, - AuthenticationMechanisms: authMechanisms, - ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ - GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ - ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_CLIENT, - }, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for client TLS config") - return nil, err - } - - // Get the response containing config from S2Av2. - resp, err := s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive client TLS config response from S2Av2.") - return nil, err - } - - // TODO(rmehta19): Add unit test for this if statement. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - // Extract TLS configiguration from SessionResp. - tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() - - var cert tls.Certificate - for i, v := range tlsConfig.CertificateChain { - // Populate Certificates field. - block, _ := pem.Decode([]byte(v)) - if block == nil { - return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") - } - x509Cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - cert.Certificate = append(cert.Certificate, x509Cert.Raw) - if i == 0 { - cert.Leaf = x509Cert - } - } - - if len(tlsConfig.CertificateChain) > 0 { - cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) - if cert.PrivateKey == nil { - return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") - } - } - - minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) - if err != nil { - return nil, err - } - - // Create mTLS credentials for client. - config := &tls.Config{ - VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), - ServerName: serverHostname, - InsecureSkipVerify: true, // NOLINT - ClientSessionCache: nil, - SessionTicketsDisabled: true, - MinVersion: minVersion, - MaxVersion: maxVersion, - NextProtos: []string{h2}, - } - if len(tlsConfig.CertificateChain) > 0 { - config.Certificates = []tls.Certificate{cert} - } - return config, nil -} - -// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { - return &tls.Config{ - GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), - }, nil -} - -// ClientConfig builds a TLS config for a server to establish a secure -// connection with a client, based on SNI communicated during ClientHello. -// Ensures that server presents the correct certificate to establish a TLS -// connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { - return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { - tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) - if err != nil { - return nil, err - } - - var cert tls.Certificate - for i, v := range tlsConfig.CertificateChain { - // Populate Certificates field. - block, _ := pem.Decode([]byte(v)) - if block == nil { - return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") - } - x509Cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - cert.Certificate = append(cert.Certificate, x509Cert.Raw) - if i == 0 { - cert.Leaf = x509Cert - } - } - - cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) - if cert.PrivateKey == nil { - return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") - } - - minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) - if err != nil { - return nil, err - } - - clientAuth := getTLSClientAuthType(tlsConfig) - - var cipherSuites []uint16 - cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) - - // Create mTLS credentials for server. - return &tls.Config{ - Certificates: []tls.Certificate{cert}, - VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), - ClientAuth: clientAuth, - CipherSuites: cipherSuites, - SessionTicketsDisabled: true, - MinVersion: minVersion, - MaxVersion: maxVersion, - NextProtos: []string{h2}, - }, nil - } -} - -func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { - var tlsGoCipherSuites []uint16 - for _, v := range tlsConfigCipherSuites { - s := getTLSCipherSuite(v) - if s != 0xffff { - tlsGoCipherSuites = append(tlsGoCipherSuites, s) - } - } - return tlsGoCipherSuites -} - -func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { - switch tlsCipherSuite { - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: - return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: - return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: - return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: - return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: - return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: - return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - default: - return 0xffff - } -} - -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { - authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity - if localIdentities != nil { - locID = localIdentities[0] - } - - if err := s2AStream.Send(&s2av2pb.SessionReq{ - LocalIdentity: locID, - AuthenticationMechanisms: authMechanisms, - ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ - GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ - ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, - Sni: sni, - }, - }, - }); err != nil { - return nil, err - } - - resp, err := s2AStream.Recv() - if err != nil { - return nil, err - } - - // TODO(rmehta19): Add unit test for this if statement. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil -} - -func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { - var clientAuth tls.ClientAuthType - switch x := tlsConfig.RequestClientCertificate; x { - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: - clientAuth = tls.NoClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: - clientAuth = tls.RequestClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: - // This case actually maps to tls.VerifyClientCertIfGiven. However this - // mapping triggers normal verification, followed by custom verification, - // specified in VerifyPeerCertificate. To bypass normal verification, and - // only do custom verification we set clientAuth to RequireAnyClientCert or - // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full - // discussion. - clientAuth = tls.RequireAnyClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: - clientAuth = tls.RequireAnyClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: - // This case actually maps to tls.RequireAndVerifyClientCert. However this - // mapping triggers normal verification, followed by custom verification, - // specified in VerifyPeerCertificate. To bypass normal verification, and - // only do custom verification we set clientAuth to RequireAnyClientCert or - // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full - // discussion. - clientAuth = tls.RequireAnyClientCert - default: - clientAuth = tls.RequireAnyClientCert - } - return clientAuth -} - -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { - if tokenManager == nil { - return nil - } - if len(localIdentities) == 0 { - token, err := tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("Unable to get token for empty local identity: %v", err) - return nil - } - return []*s2av2pb.AuthenticationMechanism{ - { - MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } - } - var authMechanisms []*s2av2pb.AuthenticationMechanism - for _, localIdentity := range localIdentities { - if localIdentity == nil { - token, err := tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) - continue - } - authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ - Identity: localIdentity, - MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ - Token: token, - }, - }) - } else { - token, err := tokenManager.Token(localIdentity) - if err != nil { - grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) - continue - } - authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ - Identity: localIdentity, - MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ - Token: token, - }, - }) - } - } - return authMechanisms -} - -// TODO(rmehta19): refactor switch statements into a helper function. -func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { - // Map S2Av2 TLSVersion to consts defined in tls package. - var minVersion uint16 - var maxVersion uint16 - switch x := tlsConfig.MinTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - minVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - minVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - minVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - minVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) - } - - switch x := tlsConfig.MaxTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - maxVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - maxVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - maxVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - maxVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) - } - if minVersion > maxVersion { - return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") - } - return minVersion, maxVersion, nil -} - -func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { - // Map S2Av2 TLSVersion to consts defined in tls package. - var minVersion uint16 - var maxVersion uint16 - switch x := tlsConfig.MinTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - minVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - minVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - minVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - minVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) - } - - switch x := tlsConfig.MaxTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - maxVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - maxVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - maxVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - maxVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) - } - if minVersion > maxVersion { - return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") - } - return minVersion, maxVersion, nil -} diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go deleted file mode 100644 index 1c1349de4a..0000000000 --- a/vendor/github.com/google/s2a-go/s2a.go +++ /dev/null @@ -1,412 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package s2a provides the S2A transport credentials used by a gRPC -// application. -package s2a - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/s2a-go/fallback" - "github.com/google/s2a-go/internal/handshaker" - "github.com/google/s2a-go/internal/handshaker/service" - "github.com/google/s2a-go/internal/tokenmanager" - "github.com/google/s2a-go/internal/v2" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -const ( - s2aSecurityProtocol = "tls" - // defaultTimeout specifies the default server handshake timeout. - defaultTimeout = 30.0 * time.Second -) - -// s2aTransportCreds are the transport credentials required for establishing -// a secure connection using the S2A. They implement the -// credentials.TransportCredentials interface. -type s2aTransportCreds struct { - info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion - // tlsCiphersuites contains the ciphersuites used in the S2A connection. - // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite - // localIdentity should only be used by the client. - localIdentity *commonpb.Identity - // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity - // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity - isClient bool - s2aAddr string - ensureProcessSessionTickets *sync.WaitGroup -} - -// NewClientCreds returns a client-side transport credentials object that uses -// the S2A to establish a secure connection with a server. -func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { - if opts == nil { - return nil, errors.New("nil client options") - } - var targetIdentities []*commonpb.Identity - for _, targetIdentity := range opts.TargetIdentities { - protoTargetIdentity, err := toProtoIdentity(targetIdentity) - if err != nil { - return nil, err - } - targetIdentities = append(targetIdentities, protoTargetIdentity) - } - localIdentity, err := toProtoIdentity(opts.LocalIdentity) - if err != nil { - return nil, err - } - if opts.EnableLegacyMode { - return &s2aTransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, - }, - localIdentity: localIdentity, - targetIdentities: targetIdentities, - isClient: true, - s2aAddr: opts.S2AAddress, - ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, - }, nil - } - verificationMode := getVerificationMode(opts.VerificationMode) - var fallbackFunc fallback.ClientHandshake - if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { - fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc - } - return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) -} - -// NewServerCreds returns a server-side transport credentials object that uses -// the S2A to establish a secure connection with a client. -func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { - if opts == nil { - return nil, errors.New("nil server options") - } - var localIdentities []*commonpb.Identity - for _, localIdentity := range opts.LocalIdentities { - protoLocalIdentity, err := toProtoIdentity(localIdentity) - if err != nil { - return nil, err - } - localIdentities = append(localIdentities, protoLocalIdentity) - } - if opts.EnableLegacyMode { - return &s2aTransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, - }, - localIdentities: localIdentities, - isClient: false, - s2aAddr: opts.S2AAddress, - }, nil - } - verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) -} - -// ClientHandshake initiates a client-side TLS handshake using the S2A. -func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if !c.isClient { - return nil, nil, errors.New("client handshake called using server transport credentials") - } - - // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) - if err != nil { - grpclog.Infof("Failed to connect to S2A: %v", err) - return nil, nil, err - } - - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - - opts := &handshaker.ClientHandshakerOptions{ - MinTLSVersion: c.minTLSVersion, - MaxTLSVersion: c.maxTLSVersion, - TLSCiphersuites: c.tlsCiphersuites, - TargetIdentities: c.targetIdentities, - LocalIdentity: c.localIdentity, - TargetName: serverAuthority, - EnsureProcessSessionTickets: c.ensureProcessSessionTickets, - } - chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) - if err != nil { - grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) - return nil, nil, err - } - defer func() { - if err != nil { - if closeErr := chs.Close(); closeErr != nil { - grpclog.Infof("Close failed unexpectedly: %v", err) - err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) - } - } - }() - - secConn, authInfo, err := chs.ClientHandshake(context.Background()) - if err != nil { - grpclog.Infof("Handshake failed: %v", err) - return nil, nil, err - } - return secConn, authInfo, nil -} - -// ServerHandshake initiates a server-side TLS handshake using the S2A. -func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if c.isClient { - return nil, nil, errors.New("server handshake called using client transport credentials") - } - - // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) - if err != nil { - grpclog.Infof("Failed to connect to S2A: %v", err) - return nil, nil, err - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - - opts := &handshaker.ServerHandshakerOptions{ - MinTLSVersion: c.minTLSVersion, - MaxTLSVersion: c.maxTLSVersion, - TLSCiphersuites: c.tlsCiphersuites, - LocalIdentities: c.localIdentities, - } - shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) - if err != nil { - grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) - return nil, nil, err - } - defer func() { - if err != nil { - if closeErr := shs.Close(); closeErr != nil { - grpclog.Infof("Close failed unexpectedly: %v", err) - err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) - } - } - }() - - secConn, authInfo, err := shs.ServerHandshake(context.Background()) - if err != nil { - grpclog.Infof("Handshake failed: %v", err) - return nil, nil, err - } - return secConn, authInfo, nil -} - -func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { - return *c.info -} - -func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { - info := *c.info - var localIdentity *commonpb.Identity - if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) - } - var localIdentities []*commonpb.Identity - if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) - for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) - } - } - var targetIdentities []*commonpb.Identity - if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) - for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) - } - } - return &s2aTransportCreds{ - info: &info, - minTLSVersion: c.minTLSVersion, - maxTLSVersion: c.maxTLSVersion, - tlsCiphersuites: c.tlsCiphersuites, - localIdentity: localIdentity, - localIdentities: localIdentities, - targetIdentities: targetIdentities, - isClient: c.isClient, - s2aAddr: c.s2aAddr, - } -} - -func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { - c.info.ServerName = serverNameOverride - return nil -} - -// TLSClientConfigOptions specifies parameters for creating client TLS config. -type TLSClientConfigOptions struct { - // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. - // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ - // ServerName: "example.com", - // }) - ServerName string -} - -// TLSClientConfigFactory defines the interface for a client TLS config factory. -type TLSClientConfigFactory interface { - Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) -} - -// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. -func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { - if opts == nil { - return nil, fmt.Errorf("opts must be non-nil") - } - if opts.EnableLegacyMode { - return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") - } - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - // The only possible error is: access token not set in the environment, - // which is okay in environments other than serverless. - grpclog.Infof("Access token manager not initialized: %v", err) - return &s2aTLSClientConfigFactory{ - s2av2Address: opts.S2AAddress, - tokenManager: nil, - verificationMode: getVerificationMode(opts.VerificationMode), - serverAuthorizationPolicy: opts.serverAuthorizationPolicy, - }, nil - } - return &s2aTLSClientConfigFactory{ - s2av2Address: opts.S2AAddress, - tokenManager: tokenManager, - verificationMode: getVerificationMode(opts.VerificationMode), - serverAuthorizationPolicy: opts.serverAuthorizationPolicy, - }, nil -} - -type s2aTLSClientConfigFactory struct { - s2av2Address string - tokenManager tokenmanager.AccessTokenManager - verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode - serverAuthorizationPolicy []byte -} - -func (f *s2aTLSClientConfigFactory) Build( - ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { - serverName := "" - if opts != nil && opts.ServerName != "" { - serverName = opts.ServerName - } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) -} - -func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { - switch verificationMode { - case ConnectToGoogle: - return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE - case Spiffe: - return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE - default: - return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED - } -} - -// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. -// Example use with http.RoundTripper: -// -// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ -// S2AAddress: s2aAddress, // required -// }) -// transport := http.DefaultTransport -// transport.DialTLSContext = dialTLSContext -func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { - - return func(ctx context.Context, network, addr string) (net.Conn, error) { - - fallback := func(err error) (net.Conn, error) { - if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && - opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { - fbDialer := opts.FallbackOpts.FallbackDialer - grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) - fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) - if fbErr != nil { - return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) - } - return fbConn, nil - } - return nil, err - } - - factory, err := NewTLSClientConfigFactory(opts) - if err != nil { - grpclog.Infof("error creating S2A client config factory: %v", err) - return fallback(err) - } - - serverName, _, err := net.SplitHostPort(addr) - if err != nil { - serverName = addr - } - timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) - defer cancel() - s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ - ServerName: serverName, - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } - - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - c, err := s2aDialer.DialContext(ctx, network, addr) - if err != nil { - grpclog.Infof("error dialing with S2A to %s: %v", addr, err) - return fallback(err) - } - grpclog.Infof("success dialing MTLS to %s with S2A", addr) - return c, nil - } -} diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go deleted file mode 100644 index 94feafb9cf..0000000000 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ /dev/null @@ -1,208 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package s2a - -import ( - "context" - "crypto/tls" - "errors" - "sync" - - "github.com/google/s2a-go/fallback" - "github.com/google/s2a-go/stream" - - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" -) - -// Identity is the interface for S2A identities. -type Identity interface { - // Name returns the name of the identity. - Name() string -} - -type spiffeID struct { - spiffeID string -} - -func (s *spiffeID) Name() string { return s.spiffeID } - -// NewSpiffeID creates a SPIFFE ID from id. -func NewSpiffeID(id string) Identity { - return &spiffeID{spiffeID: id} -} - -type hostname struct { - hostname string -} - -func (h *hostname) Name() string { return h.hostname } - -// NewHostname creates a hostname from name. -func NewHostname(name string) Identity { - return &hostname{hostname: name} -} - -type uid struct { - uid string -} - -func (h *uid) Name() string { return h.uid } - -// NewUID creates a UID from name. -func NewUID(name string) Identity { - return &uid{uid: name} -} - -// VerificationModeType specifies the mode that S2A must use to verify the peer -// certificate chain. -type VerificationModeType int - -// Three types of verification modes. -const ( - Unspecified = iota - ConnectToGoogle - Spiffe -) - -// ClientOptions contains the client-side options used to establish a secure -// channel using the S2A handshaker service. -type ClientOptions struct { - // TargetIdentities contains a list of allowed server identities. One of the - // target identities should match the peer identity in the handshake - // result; otherwise, the handshake fails. - TargetIdentities []Identity - // LocalIdentity is the local identity of the client application. If none is - // provided, then the S2A will choose the default identity, if one exists. - LocalIdentity Identity - // S2AAddress is the address of the S2A. - S2AAddress string - // EnsureProcessSessionTickets waits for all session tickets to be sent to - // S2A before a process completes. - // - // This functionality is crucial for processes that complete very soon after - // using S2A to establish a TLS connection, but it can be ignored for longer - // lived processes. - // - // Usage example: - // func main() { - // var ensureProcessSessionTickets sync.WaitGroup - // clientOpts := &s2a.ClientOptions{ - // EnsureProcessSessionTickets: &ensureProcessSessionTickets, - // // Set other members. - // } - // creds, _ := s2a.NewClientCreds(clientOpts) - // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) - // defer conn.Close() - // - // // Make RPC call. - // - // // The process terminates right after the RPC call ends. - // // ensureProcessSessionTickets can be used to ensure resumption - // // tickets are fully processed. If the process is long-lived, using - // // ensureProcessSessionTickets is not necessary. - // ensureProcessSessionTickets.Wait() - // } - EnsureProcessSessionTickets *sync.WaitGroup - // If true, enables the use of legacy S2Av1. - EnableLegacyMode bool - // VerificationMode specifies the mode that S2A must use to verify the - // peer certificate chain. - VerificationMode VerificationModeType - - // Optional fallback after dialing with S2A fails. - FallbackOpts *FallbackOptions - - // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) - - // Serialized user specified policy for server authorization. - serverAuthorizationPolicy []byte -} - -// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. -type FallbackOptions struct { - // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). - // It will be called by ClientHandshake function, after handshake with S2A fails. - // s2a.NewClientCreds() ignores the other FallbackDialer field. - FallbackClientHandshakeFunc fallback.ClientHandshake - - // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). - // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. - // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. - FallbackDialer *FallbackDialer -} - -// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. -type FallbackDialer struct { - // Dialer specifies a fallback tls.Dialer. - Dialer *tls.Dialer - // ServerAddr is used by Dialer to establish fallback connection. - ServerAddr string -} - -// DefaultClientOptions returns the default client options. -func DefaultClientOptions(s2aAddress string) *ClientOptions { - return &ClientOptions{ - S2AAddress: s2aAddress, - VerificationMode: ConnectToGoogle, - } -} - -// ServerOptions contains the server-side options used to establish a secure -// channel using the S2A handshaker service. -type ServerOptions struct { - // LocalIdentities is the list of local identities that may be assumed by - // the server. If no local identity is specified, then the S2A chooses a - // default local identity, if one exists. - LocalIdentities []Identity - // S2AAddress is the address of the S2A. - S2AAddress string - // If true, enables the use of legacy S2Av1. - EnableLegacyMode bool - // VerificationMode specifies the mode that S2A must use to verify the - // peer certificate chain. - VerificationMode VerificationModeType - - // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) -} - -// DefaultServerOptions returns the default server options. -func DefaultServerOptions(s2aAddress string) *ServerOptions { - return &ServerOptions{ - S2AAddress: s2aAddress, - VerificationMode: ConnectToGoogle, - } -} - -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { - if identity == nil { - return nil, nil - } - switch id := identity.(type) { - case *spiffeID: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil - case *hostname: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil - case *uid: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil - default: - return nil, errors.New("unrecognized identity type") - } -} diff --git a/vendor/github.com/google/s2a-go/s2a_utils.go b/vendor/github.com/google/s2a-go/s2a_utils.go deleted file mode 100644 index d649cc4614..0000000000 --- a/vendor/github.com/google/s2a-go/s2a_utils.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package s2a - -import ( - "context" - "errors" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/peer" -) - -// AuthInfo exposes security information from the S2A to the application. -type AuthInfo interface { - // AuthType returns the authentication type. - AuthType() string - // ApplicationProtocol returns the application protocol, e.g. "grpc". - ApplicationProtocol() string - // TLSVersion returns the TLS version negotiated during the handshake. - TLSVersion() commonpb.TLSVersion - // Ciphersuite returns the ciphersuite negotiated during the handshake. - Ciphersuite() commonpb.Ciphersuite - // PeerIdentity returns the authenticated identity of the peer. - PeerIdentity() *commonpb.Identity - // LocalIdentity returns the local identity of the application used during - // session setup. - LocalIdentity() *commonpb.Identity - // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in - // the S2A handshake. - PeerCertFingerprint() []byte - // LocalCertFingerprint returns the SHA256 hash of the local certificate used - // in the S2A handshake. - LocalCertFingerprint() []byte - // IsHandshakeResumed returns true if a cached session was used to resume - // the handshake. - IsHandshakeResumed() bool - // SecurityLevel returns the security level of the connection. - SecurityLevel() credentials.SecurityLevel -} - -// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given -// peer, if it exists. This API should be used by gRPC clients after -// obtaining a peer object using the grpc.Peer() CallOption. -func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { - s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) - if !ok { - return nil, errors.New("no S2AAuthInfo found in Peer") - } - return s2aAuthInfo, nil -} - -// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given -// context, if it exists. This API should be used by gRPC server RPC handlers -// to get information about the peer. On the client-side, use the grpc.Peer() -// CallOption and the AuthInfoFromPeer function. -func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { - p, ok := peer.FromContext(ctx) - if !ok { - return nil, errors.New("no Peer found in Context") - } - return AuthInfoFromPeer(p) -} diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go deleted file mode 100644 index 584bf32b1c..0000000000 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package stream provides an interface for bidirectional streaming to the S2A server. -package stream - -import ( - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. -type S2AStream interface { - // Send sends the message to the S2A server. - Send(*s2av2pb.SessionReq) error - // Recv receives the message from the S2A server. - Recv() (*s2av2pb.SessionResp, error) - // Closes the channel to the S2A server. - CloseSend() error -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829dc6..7ec5ac7ea9 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec2..dc60082d3b 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb68..3167b643d4 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go deleted file mode 100644 index b3283b8158..0000000000 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2022 Google LLC. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). -// -// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. -package client - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/gob" - "errors" - "fmt" - "io" - "net/rpc" - "os" - "os/exec" - - "github.com/googleapis/enterprise-certificate-proxy/client/util" -) - -const signAPI = "EnterpriseCertSigner.Sign" -const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" -const publicKeyAPI = "EnterpriseCertSigner.Public" - -// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. -type Connection struct { - io.ReadCloser - io.WriteCloser -} - -// Close closes c's underlying ReadCloser and WriteCloser. -func (c *Connection) Close() error { - rerr := c.ReadCloser.Close() - werr := c.WriteCloser.Close() - if rerr != nil { - return rerr - } - return werr -} - -func init() { - gob.Register(crypto.SHA256) - gob.Register(&rsa.PSSOptions{}) -} - -// SignArgs contains arguments to a crypto Signer.Sign method. -type SignArgs struct { - Digest []byte // The content to sign. - Opts crypto.SignerOpts // Options for signing, such as Hash identifier. -} - -// Key implements credential.Credential by holding the executed signer subprocess. -type Key struct { - cmd *exec.Cmd // Pointer to the signer subprocess. - client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. - publicKey crypto.PublicKey // Public key of loaded certificate. - chain [][]byte // Certificate chain of loaded certificate. -} - -// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. -func (k *Key) CertificateChain() [][]byte { - return k.chain -} - -// Close closes the RPC connection and kills the signer subprocess. -// Call this to free up resources when the Key object is no longer needed. -func (k *Key) Close() error { - if err := k.cmd.Process.Kill(); err != nil { - return fmt.Errorf("failed to kill signer process: %w", err) - } - // Wait for cmd to exit and release resources. Since the process is forcefully killed, this - // will return a non-nil error (varies by OS), which we will ignore. - _ = k.cmd.Wait() - // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. - // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. - if err := k.client.Close(); err.Error() != "close |0: file already closed" { - return fmt.Errorf("failed to close RPC connection: %w", err) - } - return nil -} - -// Public returns the public key for this Key. -func (k *Key) Public() crypto.PublicKey { - return k.publicKey -} - -// Sign signs a message digest, using the specified signer options. -func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { - if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { - return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) - } - err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) - return -} - -// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, -// possibly due to missing config or missing binary path. -var ErrCredUnavailable = errors.New("Cred is unavailable") - -// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate -// related operations, including signing messages with the private key. -// -// The signer binary path is read from the specified configFilePath, if provided. -// Otherwise, use the default config file path. -// -// The config file also specifies which certificate the signer should use. -func Cred(configFilePath string) (*Key, error) { - if configFilePath == "" { - configFilePath = util.GetDefaultConfigFilePath() - } - enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) - if err != nil { - if errors.Is(err, util.ErrConfigUnavailable) { - return nil, ErrCredUnavailable - } - return nil, err - } - k := &Key{ - cmd: exec.Command(enterpriseCertSignerPath, configFilePath), - } - - // Redirect errors from subprocess to parent process. - k.cmd.Stderr = os.Stderr - - // RPC client will communicate with subprocess over stdin/stdout. - kin, err := k.cmd.StdinPipe() - if err != nil { - return nil, err - } - kout, err := k.cmd.StdoutPipe() - if err != nil { - return nil, err - } - k.client = rpc.NewClient(&Connection{kout, kin}) - - if err := k.cmd.Start(); err != nil { - return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) - } - - if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { - return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) - } - - var publicKeyBytes []byte - if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { - return nil, fmt.Errorf("failed to retrieve public key: %w", err) - } - - publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) - if err != nil { - return nil, fmt.Errorf("failed to parse public key: %w", err) - } - - var ok bool - k.publicKey, ok = publicKey.(crypto.PublicKey) - if !ok { - return nil, fmt.Errorf("invalid public key type: %T", publicKey) - } - - switch pub := k.publicKey.(type) { - case *rsa.PublicKey: - if pub.Size() < 256 { - return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) - } - case *ecdsa.PublicKey: - default: - return nil, fmt.Errorf("unsupported public key type: %v", pub) - } - - return k, nil -} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go deleted file mode 100644 index 1640ec1c9e..0000000000 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2022 Google LLC. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package util provides helper functions for the client. -package util - -import ( - "encoding/json" - "errors" - "io" - "os" - "os/user" - "path/filepath" - "runtime" -) - -const configFileName = "certificate_config.json" - -// EnterpriseCertificateConfig contains parameters for initializing signer. -type EnterpriseCertificateConfig struct { - Libs Libs `json:"libs"` -} - -// Libs specifies the locations of helper libraries. -type Libs struct { - ECP string `json:"ecp"` -} - -// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, -// possibly due to entire config missing or missing binary path. -var ErrConfigUnavailable = errors.New("Config is unavailable") - -// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. -func LoadSignerBinaryPath(configFilePath string) (path string, err error) { - jsonFile, err := os.Open(configFilePath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", ErrConfigUnavailable - } - return "", err - } - - byteValue, err := io.ReadAll(jsonFile) - if err != nil { - return "", err - } - var config EnterpriseCertificateConfig - err = json.Unmarshal(byteValue, &config) - if err != nil { - return "", err - } - signerBinaryPath := config.Libs.ECP - if signerBinaryPath == "" { - return "", ErrConfigUnavailable - } - return signerBinaryPath, nil -} - -func guessHomeDir() string { - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - if v := os.Getenv("HOME"); v != "" { - return v - } - // Else, fall back to user.Current: - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "" -} - -func getDefaultConfigFileDirectory() (directory string) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud") - } - return filepath.Join(guessHomeDir(), ".config/gcloud") -} - -// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. -func GetDefaultConfigFilePath() (path string) { - return filepath.Join(getDefaultConfigFileDirectory(), configFileName) -} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json deleted file mode 100644 index 91d60a809f..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "v2": "2.11.0" -} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md deleted file mode 100644 index e17b196f6c..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ /dev/null @@ -1,99 +0,0 @@ -# Changelog - -## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) - - -### Features - -* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) - - -### Bug Fixes - -* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) - -## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) - - -### Features - -* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) - -## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) - - -### Bug Fixes - -* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) - -## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) - - -### Features - -* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) - - -### Documentation - -* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) - -## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) - - -### Features - -* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) - -## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) - - -### Bug Fixes - -* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) - -## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) - - -### Features - -* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) -* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) - -## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) - - -### Features - -* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) - -## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) - - -### Bug Fixes - -* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) - -## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) - - -### Features - -* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) - -## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) - - -### Features - -* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) - - -### Bug Fixes - -* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) - - -### Miscellaneous Chores - -* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE deleted file mode 100644 index 6d16b6578a..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go deleted file mode 100644 index d785a065ca..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2021, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package apierror implements a wrapper error for parsing error details from -// API calls. Both HTTP & gRPC status errors are supported. -// -// For examples of how to use [APIError] with client libraries please reference -// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) -// in the client library documentation. -package apierror - -import ( - "errors" - "fmt" - "strings" - - jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" - "google.golang.org/api/googleapi" - "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" -) - -// ErrDetails holds the google/rpc/error_details.proto messages. -type ErrDetails struct { - ErrorInfo *errdetails.ErrorInfo - BadRequest *errdetails.BadRequest - PreconditionFailure *errdetails.PreconditionFailure - QuotaFailure *errdetails.QuotaFailure - RetryInfo *errdetails.RetryInfo - ResourceInfo *errdetails.ResourceInfo - RequestInfo *errdetails.RequestInfo - DebugInfo *errdetails.DebugInfo - Help *errdetails.Help - LocalizedMessage *errdetails.LocalizedMessage - - // Unknown stores unidentifiable error details. - Unknown []interface{} -} - -// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. -var ErrMessageNotFound = errors.New("message not found") - -// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the -// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, -// the content of the message is copied to the provided message. -// -// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the -// protocol buffer type of the provided message. -func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { - if v == nil { - return ErrMessageNotFound - } - for _, elem := range e.Unknown { - if elemProto, ok := elem.(proto.Message); ok { - if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { - proto.Merge(v, elemProto) - return nil - } - } - } - return ErrMessageNotFound -} - -func (e ErrDetails) String() string { - var d strings.Builder - if e.ErrorInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n", - e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata())) - } - - if e.BadRequest != nil { - v := e.BadRequest.GetFieldViolations() - var f []string - var desc []string - for _, x := range v { - f = append(f, x.GetField()) - desc = append(desc, x.GetDescription()) - } - d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n", - strings.Join(f, " "), strings.Join(desc, " "))) - } - - if e.PreconditionFailure != nil { - v := e.PreconditionFailure.GetViolations() - var t []string - var s []string - var desc []string - for _, x := range v { - t = append(t, x.GetType()) - s = append(s, x.GetSubject()) - desc = append(desc, x.GetDescription()) - } - d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "), - strings.Join(s, " "), strings.Join(desc, " "))) - } - - if e.QuotaFailure != nil { - v := e.QuotaFailure.GetViolations() - var s []string - var desc []string - for _, x := range v { - s = append(s, x.GetSubject()) - desc = append(desc, x.GetDescription()) - } - d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n", - strings.Join(s, " "), strings.Join(desc, " "))) - } - - if e.RequestInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n", - e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData())) - } - - if e.ResourceInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n", - e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(), - e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription())) - - } - if e.RetryInfo != nil { - d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration())) - - } - if e.Unknown != nil { - var s []string - for _, x := range e.Unknown { - s = append(s, fmt.Sprintf("%v", x)) - } - d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " "))) - } - - if e.DebugInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(), - strings.Join(e.DebugInfo.GetStackEntries(), " "))) - } - if e.Help != nil { - var desc []string - var url []string - for _, x := range e.Help.Links { - desc = append(desc, x.GetDescription()) - url = append(url, x.GetUrl()) - } - d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n", - strings.Join(desc, " "), strings.Join(url, " "))) - } - if e.LocalizedMessage != nil { - d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n", - e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage())) - } - - return d.String() -} - -// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It -// implements error and Status interfaces. -type APIError struct { - err error - status *status.Status - httpErr *googleapi.Error - details ErrDetails -} - -// Details presents the error details of the APIError. -func (a *APIError) Details() ErrDetails { - return a.details -} - -// Unwrap extracts the original error. -func (a *APIError) Unwrap() error { - return a.err -} - -// Error returns a readable representation of the APIError. -func (a *APIError) Error() string { - var msg string - if a.httpErr != nil { - // Truncate the googleapi.Error message because it dumps the Details in - // an ugly way. - msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { - msg = a.err.Error() - } - return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) -} - -// GRPCStatus extracts the underlying gRPC Status error. -// This method is necessary to fulfill the interface -// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. -func (a *APIError) GRPCStatus() *status.Status { - return a.status -} - -// Reason returns the reason in an ErrorInfo. -// If ErrorInfo is nil, it returns an empty string. -func (a *APIError) Reason() string { - return a.details.ErrorInfo.GetReason() -} - -// Domain returns the domain in an ErrorInfo. -// If ErrorInfo is nil, it returns an empty string. -func (a *APIError) Domain() string { - return a.details.ErrorInfo.GetDomain() -} - -// Metadata returns the metadata in an ErrorInfo. -// If ErrorInfo is nil, it returns nil. -func (a *APIError) Metadata() map[string]string { - return a.details.ErrorInfo.GetMetadata() - -} - -// setDetailsFromError parses a Status error or a googleapi.Error -// and sets status and details or httpErr and details, respectively. -// It returns false if neither Status nor googleapi.Error can be parsed. -// When err is a googleapi.Error, the status of the returned error will -// be set to an Unknown error, rather than nil, since a nil code is -// interpreted as OK in the gRPC status package. -func (a *APIError) setDetailsFromError(err error) bool { - st, isStatus := status.FromError(err) - var herr *googleapi.Error - isHTTPErr := errors.As(err, &herr) - - switch { - case isStatus: - a.status = st - a.details = parseDetails(st.Details()) - case isHTTPErr: - a.httpErr = herr - a.details = parseHTTPDetails(herr) - a.status = status.New(codes.Unknown, herr.Message) - default: - return false - } - return true -} - -// FromError parses a Status error or a googleapi.Error and builds an -// APIError, wrapping the provided error in the new APIError. It -// returns false if neither Status nor googleapi.Error can be parsed. -func FromError(err error) (*APIError, bool) { - return ParseError(err, true) -} - -// ParseError parses a Status error or a googleapi.Error and builds an -// APIError. If wrap is true, it wraps the error in the new APIError. -// It returns false if neither Status nor googleapi.Error can be parsed. -func ParseError(err error, wrap bool) (*APIError, bool) { - if err == nil { - return nil, false - } - ae := APIError{} - if wrap { - ae = APIError{err: err} - } - if !ae.setDetailsFromError(err) { - return nil, false - } - return &ae, true -} - -// parseDetails accepts a slice of interface{} that should be backed by some -// sort of proto.Message that can be cast to the google/rpc/error_details.proto -// types. -// -// This is for internal use only. -func parseDetails(details []interface{}) ErrDetails { - var ed ErrDetails - for _, d := range details { - switch d := d.(type) { - case *errdetails.ErrorInfo: - ed.ErrorInfo = d - case *errdetails.BadRequest: - ed.BadRequest = d - case *errdetails.PreconditionFailure: - ed.PreconditionFailure = d - case *errdetails.QuotaFailure: - ed.QuotaFailure = d - case *errdetails.RetryInfo: - ed.RetryInfo = d - case *errdetails.ResourceInfo: - ed.ResourceInfo = d - case *errdetails.RequestInfo: - ed.RequestInfo = d - case *errdetails.DebugInfo: - ed.DebugInfo = d - case *errdetails.Help: - ed.Help = d - case *errdetails.LocalizedMessage: - ed.LocalizedMessage = d - default: - ed.Unknown = append(ed.Unknown, d) - } - } - - return ed -} - -// parseHTTPDetails will convert the given googleapi.Error into the protobuf -// representation then parse the Any values that contain the error details. -// -// This is for internal use only. -func parseHTTPDetails(gae *googleapi.Error) ErrDetails { - e := &jsonerror.Error{} - if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil { - // If the error body does not conform to the error schema, ignore it - // altogther. See https://cloud.google.com/apis/design/errors#http_mapping. - return ErrDetails{} - } - - // Coerce the Any messages into proto.Message then parse the details. - details := []interface{}{} - for _, any := range e.GetError().GetDetails() { - m, err := any.UnmarshalNew() - if err != nil { - // Ignore malformed Any values. - continue - } - details = append(details, m) - } - - return parseDetails(details) -} - -// HTTPCode returns the underlying HTTP response status code. This method returns -// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To -// check gRPC error codes use [google.golang.org/grpc/status.Code]. -func (a *APIError) HTTPCode() int { - if a.httpErr == nil { - return -1 - } - return a.httpErr.Code -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md deleted file mode 100644 index 9ff0caea94..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# HTTP JSON Error Schema - -The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey -error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping. -This package is for internal parsing logic only and should not be used in any -other context. - -## Regeneration - -To regenerate the protobuf Go code you will need the following: - -* A local copy of [googleapis], the absolute path to which should be exported to -the environment variable `GOOGLEAPIS` -* The protobuf compiler [protoc] -* The Go [protobuf plugin] -* The [goimports] tool - -From this directory run the following command: -```sh -protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto -goimports -w . -``` - -Note: the `module` plugin option ensures the generated code is placed in this -directory, and not in several nested directories defined by `go_package` option. - -[googleapis]: https://github.com/googleapis/googleapis -[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation -[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated -[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go deleted file mode 100644 index e4b03f161d..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.17.3 -// source: custom_error.proto - -package jsonerror - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Error code for `CustomError`. -type CustomError_CustomErrorCode int32 - -const ( - // Default error. - CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 - // Too many foo. - CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 - // Not enough foo. - CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 - // Catastrophic error. - CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 -) - -// Enum value maps for CustomError_CustomErrorCode. -var ( - CustomError_CustomErrorCode_name = map[int32]string{ - 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", - 1: "TOO_MANY_FOO", - 2: "NOT_ENOUGH_FOO", - 3: "UNIVERSE_WAS_DESTROYED", - } - CustomError_CustomErrorCode_value = map[string]int32{ - "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, - "TOO_MANY_FOO": 1, - "NOT_ENOUGH_FOO": 2, - "UNIVERSE_WAS_DESTROYED": 3, - } -) - -func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { - p := new(CustomError_CustomErrorCode) - *p = x - return p -} - -func (x CustomError_CustomErrorCode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { - return file_custom_error_proto_enumTypes[0].Descriptor() -} - -func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { - return &file_custom_error_proto_enumTypes[0] -} - -func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. -func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { - return file_custom_error_proto_rawDescGZIP(), []int{0, 0} -} - -// CustomError is an example of a custom error message which may be included -// in an rpc status. It is not meant to reflect a standard error. -type CustomError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Error code specific to the custom API being invoked. - Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` - // Name of the failed entity. - Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` - // Message that describes the error. - ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *CustomError) Reset() { - *x = CustomError{} - if protoimpl.UnsafeEnabled { - mi := &file_custom_error_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomError) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomError) ProtoMessage() {} - -func (x *CustomError) ProtoReflect() protoreflect.Message { - mi := &file_custom_error_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. -func (*CustomError) Descriptor() ([]byte, []int) { - return file_custom_error_proto_rawDescGZIP(), []int{0} -} - -func (x *CustomError) GetCode() CustomError_CustomErrorCode { - if x != nil { - return x.Code - } - return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED -} - -func (x *CustomError) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *CustomError) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -var File_custom_error_proto protoreflect.FileDescriptor - -var file_custom_error_proto_rawDesc = []byte{ - 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, - 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, - 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, - 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, - 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, - 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_custom_error_proto_rawDescOnce sync.Once - file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc -) - -func file_custom_error_proto_rawDescGZIP() []byte { - file_custom_error_proto_rawDescOnce.Do(func() { - file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) - }) - return file_custom_error_proto_rawDescData -} - -var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_custom_error_proto_goTypes = []interface{}{ - (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode - (*CustomError)(nil), // 1: error.CustomError -} -var file_custom_error_proto_depIdxs = []int32{ - 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_custom_error_proto_init() } -func file_custom_error_proto_init() { - if File_custom_error_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_custom_error_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_custom_error_proto_goTypes, - DependencyIndexes: file_custom_error_proto_depIdxs, - EnumInfos: file_custom_error_proto_enumTypes, - MessageInfos: file_custom_error_proto_msgTypes, - }.Build() - File_custom_error_proto = out.File - file_custom_error_proto_rawDesc = nil - file_custom_error_proto_goTypes = nil - file_custom_error_proto_depIdxs = nil -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto deleted file mode 100644 index 21678ae65c..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package error; - -option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; - - -// CustomError is an example of a custom error message which may be included -// in an rpc status. It is not meant to reflect a standard error. -message CustomError { - - // Error code for `CustomError`. - enum CustomErrorCode { - // Default error. - CUSTOM_ERROR_CODE_UNSPECIFIED = 0; - - // Too many foo. - TOO_MANY_FOO = 1; - - // Not enough foo. - NOT_ENOUGH_FOO = 2; - - // Catastrophic error. - UNIVERSE_WAS_DESTROYED = 3; - - } - - // Error code specific to the custom API being invoked. - CustomErrorCode code = 1; - - // Name of the failed entity. - string entity = 2; - - // Message that describes the error. - string error_message = 3; -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go deleted file mode 100644 index 7dd9b83739..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.15.8 -// source: apierror/internal/proto/error.proto - -package jsonerror - -import ( - reflect "reflect" - sync "sync" - - code "google.golang.org/genproto/googleapis/rpc/code" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The error format v2 for Google JSON REST APIs. -// Copied from https://cloud.google.com/apis/design/errors#http_mapping. -// -// NOTE: This schema is not used for other wire protocols. -type Error struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The actual error payload. The nested message structure is for backward - // compatibility with Google API client libraries. It also makes the error - // more readable to developers. - Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *Error) Reset() { - *x = Error{} - if protoimpl.UnsafeEnabled { - mi := &file_apierror_internal_proto_error_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Error) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Error) ProtoMessage() {} - -func (x *Error) ProtoReflect() protoreflect.Message { - mi := &file_apierror_internal_proto_error_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Error.ProtoReflect.Descriptor instead. -func (*Error) Descriptor() ([]byte, []int) { - return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} -} - -func (x *Error) GetError() *Error_Status { - if x != nil { - return x.Error - } - return nil -} - -// This message has the same semantics as `google.rpc.Status`. It uses HTTP -// status code instead of gRPC status code. It has an extra field `status` -// for backward compatibility with Google API Client Libraries. -type Error_Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The HTTP status code that corresponds to `google.rpc.Status.code`. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // This corresponds to `google.rpc.Status.message`. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // This is the enum version for `google.rpc.Status.code`. - Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"` - // This corresponds to `google.rpc.Status.details`. - Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` -} - -func (x *Error_Status) Reset() { - *x = Error_Status{} - if protoimpl.UnsafeEnabled { - mi := &file_apierror_internal_proto_error_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Error_Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Error_Status) ProtoMessage() {} - -func (x *Error_Status) ProtoReflect() protoreflect.Message { - mi := &file_apierror_internal_proto_error_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. -func (*Error_Status) Descriptor() ([]byte, []int) { - return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Error_Status) GetCode() int32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *Error_Status) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *Error_Status) GetStatus() code.Code { - if x != nil { - return x.Status - } - return code.Code(0) -} - -func (x *Error_Status) GetDetails() []*anypb.Any { - if x != nil { - return x.Details - } - return nil -} - -var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor - -var file_apierror_internal_proto_error_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, - 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_apierror_internal_proto_error_proto_rawDescOnce sync.Once - file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc -) - -func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { - file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { - file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) - }) - return file_apierror_internal_proto_error_proto_rawDescData -} - -var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ - (*Error)(nil), // 0: error.Error - (*Error_Status)(nil), // 1: error.Error.Status - (code.Code)(0), // 2: google.rpc.Code - (*anypb.Any)(nil), // 3: google.protobuf.Any -} -var file_apierror_internal_proto_error_proto_depIdxs = []int32{ - 1, // 0: error.Error.error:type_name -> error.Error.Status - 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code - 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_apierror_internal_proto_error_proto_init() } -func file_apierror_internal_proto_error_proto_init() { - if File_apierror_internal_proto_error_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Error); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Error_Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_apierror_internal_proto_error_proto_goTypes, - DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, - MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, - }.Build() - File_apierror_internal_proto_error_proto = out.File - file_apierror_internal_proto_error_proto_rawDesc = nil - file_apierror_internal_proto_error_proto_goTypes = nil - file_apierror_internal_proto_error_proto_depIdxs = nil -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto deleted file mode 100644 index 4b9b13ce11..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package error; - -import "google/protobuf/any.proto"; -import "google/rpc/code.proto"; - -option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; - -// The error format v2 for Google JSON REST APIs. -// Copied from https://cloud.google.com/apis/design/errors#http_mapping. -// -// NOTE: This schema is not used for other wire protocols. -message Error { - // This message has the same semantics as `google.rpc.Status`. It uses HTTP - // status code instead of gRPC status code. It has an extra field `status` - // for backward compatibility with Google API Client Libraries. - message Status { - // The HTTP status code that corresponds to `google.rpc.Status.code`. - int32 code = 1; - // This corresponds to `google.rpc.Status.message`. - string message = 2; - // This is the enum version for `google.rpc.Status.code`. - google.rpc.Code status = 4; - // This corresponds to `google.rpc.Status.details`. - repeated google.protobuf.Any details = 5; - } - // The actual error payload. The nested message structure is for backward - // compatibility with Google API client libraries. It also makes the error - // more readable to developers. - Status error = 1; -} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go deleted file mode 100644 index c52e03f643..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "errors" - "math/rand" - "time" - - "google.golang.org/api/googleapi" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// CallOption is an option used by Invoke to control behaviors of RPC calls. -// CallOption works by modifying relevant fields of CallSettings. -type CallOption interface { - // Resolve applies the option by modifying cs. - Resolve(cs *CallSettings) -} - -// Retryer is used by Invoke to determine retry behavior. -type Retryer interface { - // Retry reports whether a request should be retried and how long to pause before retrying - // if the previous attempt returned with err. Invoke never calls Retry with nil error. - Retry(err error) (pause time.Duration, shouldRetry bool) -} - -type retryerOption func() Retryer - -func (o retryerOption) Resolve(s *CallSettings) { - s.Retry = o -} - -// WithRetry sets CallSettings.Retry to fn. -func WithRetry(fn func() Retryer) CallOption { - return retryerOption(fn) -} - -// OnErrorFunc returns a Retryer that retries if and only if the previous attempt -// returns an error that satisfies shouldRetry. -// -// Pause times between retries are specified by bo. bo is only used for its -// parameters; each Retryer has its own copy. -func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer { - return &errorRetryer{ - shouldRetry: shouldRetry, - backoff: bo, - } -} - -type errorRetryer struct { - backoff Backoff - shouldRetry func(err error) bool -} - -func (r *errorRetryer) Retry(err error) (time.Duration, bool) { - if r.shouldRetry(err) { - return r.backoff.Pause(), true - } - - return 0, false -} - -// OnCodes returns a Retryer that retries if and only if -// the previous attempt returns a GRPC error whose error code is stored in cc. -// Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnCodes(cc []codes.Code, bo Backoff) Retryer { - return &boRetryer{ - backoff: bo, - codes: append([]codes.Code(nil), cc...), - } -} - -type boRetryer struct { - backoff Backoff - codes []codes.Code -} - -func (r *boRetryer) Retry(err error) (time.Duration, bool) { - st, ok := status.FromError(err) - if !ok { - return 0, false - } - c := st.Code() - for _, rc := range r.codes { - if c == rc { - return r.backoff.Pause(), true - } - } - return 0, false -} - -// OnHTTPCodes returns a Retryer that retries if and only if -// the previous attempt returns a googleapi.Error whose status code is stored in -// cc. Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnHTTPCodes(bo Backoff, cc ...int) Retryer { - codes := make(map[int]bool, len(cc)) - for _, c := range cc { - codes[c] = true - } - - return &httpRetryer{ - backoff: bo, - codes: codes, - } -} - -type httpRetryer struct { - backoff Backoff - codes map[int]bool -} - -func (r *httpRetryer) Retry(err error) (time.Duration, bool) { - var gerr *googleapi.Error - if !errors.As(err, &gerr) { - return 0, false - } - - if r.codes[gerr.Code] { - return r.backoff.Pause(), true - } - - return 0, false -} - -// Backoff implements exponential backoff. The wait time between retries is a -// random value between 0 and the "retry period" - the time between retries. The -// retry period starts at Initial and increases by the factor of Multiplier -// every retry, but is capped at Max. -// -// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should -// be built on top of Backoff. -type Backoff struct { - // Initial is the initial value of the retry period, defaults to 1 second. - Initial time.Duration - - // Max is the maximum value of the retry period, defaults to 30 seconds. - Max time.Duration - - // Multiplier is the factor by which the retry period increases. - // It should be greater than 1 and defaults to 2. - Multiplier float64 - - // cur is the current retry period. - cur time.Duration -} - -// Pause returns the next time.Duration that the caller should use to backoff. -func (bo *Backoff) Pause() time.Duration { - if bo.Initial == 0 { - bo.Initial = time.Second - } - if bo.cur == 0 { - bo.cur = bo.Initial - } - if bo.Max == 0 { - bo.Max = 30 * time.Second - } - if bo.Multiplier < 1 { - bo.Multiplier = 2 - } - // Select a duration between 1ns and the current max. It might seem - // counterintuitive to have so much jitter, but - // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that - // that is the best strategy. - d := time.Duration(1 + rand.Int63n(int64(bo.cur))) - bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) - if bo.cur > bo.Max { - bo.cur = bo.Max - } - return d -} - -type grpcOpt []grpc.CallOption - -func (o grpcOpt) Resolve(s *CallSettings) { - s.GRPC = o -} - -type pathOpt struct { - p string -} - -func (p pathOpt) Resolve(s *CallSettings) { - s.Path = p.p -} - -type timeoutOpt struct { - t time.Duration -} - -func (t timeoutOpt) Resolve(s *CallSettings) { - s.timeout = t.t -} - -// WithPath applies a Path override to the HTTP-based APICall. -// -// This is for internal use only. -func WithPath(p string) CallOption { - return &pathOpt{p: p} -} - -// WithGRPCOptions allows passing gRPC call options during client creation. -func WithGRPCOptions(opt ...grpc.CallOption) CallOption { - return grpcOpt(append([]grpc.CallOption(nil), opt...)) -} - -// WithTimeout is a convenience option for setting a context.WithTimeout on the -// singular context.Context used for **all** APICall attempts. Calculated from -// the start of the first APICall attempt. -// If the context.Context provided to Invoke already has a Deadline set, that -// will always be respected over the deadline calculated using this option. -func WithTimeout(t time.Duration) CallOption { - return &timeoutOpt{t: t} -} - -// CallSettings allow fine-grained control over how calls are made. -type CallSettings struct { - // Retry returns a Retryer to be used to control retry logic of a method call. - // If Retry is nil or the returned Retryer is nil, the call will not be retried. - Retry func() Retryer - - // CallOptions to be forwarded to GRPC. - GRPC []grpc.CallOption - - // Path is an HTTP override for an APICall. - Path string - - // Timeout defines the amount of time that Invoke has to complete. - // Unexported so it cannot be changed by the code in an APICall. - timeout time.Duration -} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go deleted file mode 100644 index 1b53d0a3ac..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/content_type.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2022, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "io" - "io/ioutil" - "net/http" -) - -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// The content of media will be sniffed to determine the content type. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader) (io.Reader, string) { - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. - if typer, ok := media.(interface { - ContentType() string - }); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go deleted file mode 100644 index 36cdfa33e3..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/gax.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package gax contains a set of modules which aid the development of APIs -// for clients and servers based on gRPC and Google API conventions. -// -// Application code will rarely need to use this library directly. -// However, code generated automatically from API definition files can use it -// to simplify code generation and to provide more convenient and idiomatic API surfaces. -package gax - -import "github.com/googleapis/gax-go/v2/internal" - -// Version specifies the gax-go version being used. -const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go deleted file mode 100644 index 6488461f4d..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2018, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "bytes" - "runtime" - "strings" - "unicode" -) - -var ( - // GoVersion is a header-safe representation of the current runtime - // environment's Go version. This is for GAX consumers that need to - // report the Go runtime version in API calls. - GoVersion string - // version is a package internal global variable for testing purposes. - version = runtime.Version -) - -// versionUnknown is only used when the runtime version cannot be determined. -const versionUnknown = "UNKNOWN" - -func init() { - GoVersion = goVersion() -} - -// goVersion returns a Go runtime version derived from the runtime environment -// that is modified to be suitable for reporting in a header, meaning it has no -// whitespace. If it is unable to determine the Go runtime version, it returns -// versionUnknown. -func goVersion() string { - const develPrefix = "devel +" - - s := version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - // Some release candidates already have a dash in them. - if !strings.HasPrefix(prerelease, "-") { - prerelease = "-" + prerelease - } - s += prerelease - } - return s - } - return "UNKNOWN" -} - -// XGoogHeader is for use by the Google Cloud Libraries only. -// -// XGoogHeader formats key-value pairs. -// The resulting string is suitable for x-goog-api-client header. -func XGoogHeader(keyval ...string) string { - if len(keyval) == 0 { - return "" - } - if len(keyval)%2 != 0 { - panic("gax.Header: odd argument count") - } - var buf bytes.Buffer - for i := 0; i < len(keyval); i += 2 { - buf.WriteByte(' ') - buf.WriteString(keyval[i]) - buf.WriteByte('/') - buf.WriteString(keyval[i+1]) - } - return buf.String()[1:] -} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go deleted file mode 100644 index 374dcdb115..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package internal - -// Version is the current tagged release of the library. -const Version = "2.11.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go deleted file mode 100644 index 721d1af551..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "context" - "strings" - "time" - - "github.com/googleapis/gax-go/v2/apierror" -) - -// APICall is a user defined call stub. -type APICall func(context.Context, CallSettings) error - -// Invoke calls the given APICall, performing retries as specified by opts, if -// any. -func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { - var settings CallSettings - for _, opt := range opts { - opt.Resolve(&settings) - } - return invoke(ctx, call, settings, Sleep) -} - -// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. -// If interrupted, Sleep returns ctx.Err(). -func Sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -type sleeper func(ctx context.Context, d time.Duration) error - -// invoke implements Invoke, taking an additional sleeper argument for testing. -func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { - var retryer Retryer - - // Only use the value provided via WithTimeout if the context doesn't - // already have a deadline. This is important for backwards compatibility if - // the user already set a deadline on the context given to Invoke. - if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { - c, cc := context.WithTimeout(ctx, settings.timeout) - defer cc() - ctx = c - } - - for { - err := call(ctx, settings) - if err == nil { - return nil - } - // Never retry permanent certificate errors. (e.x. if ca-certificates - // are not installed). We should only make very few, targeted - // exceptions: many (other) status=Unavailable should be retried, such - // as if there's a network hiccup, or the internet goes out for a - // minute. This is also why here we are doing string parsing instead of - // simply making Unavailable a non-retried code elsewhere. - if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { - return err - } - if apierr, ok := apierror.FromError(err); ok { - err = apierr - } - if settings.Retry == nil { - return err - } - if retryer == nil { - if r := settings.Retry(); r != nil { - retryer = r - } else { - return err - } - } - if d, ok := retryer.Retry(err); !ok { - return err - } else if err = sp(ctx, d); err != nil { - return err - } - } -} diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go deleted file mode 100644 index cc4486eb9e..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "encoding/json" - "errors" - "io" - - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" -) - -var ( - arrayOpen = json.Delim('[') - arrayClose = json.Delim(']') - errBadOpening = errors.New("unexpected opening token, expected '['") -) - -// ProtoJSONStream represents a wrapper for consuming a stream of protobuf -// messages encoded using protobuf-JSON format. More information on this format -// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. -// The stream must appear as a comma-delimited, JSON array of obbjects with -// opening and closing square braces. -// -// This is for internal use only. -type ProtoJSONStream struct { - first, closed bool - reader io.ReadCloser - stream *json.Decoder - typ protoreflect.MessageType -} - -// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are -// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream -// must be closed when done. -// -// This is for internal use only. -func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { - return &ProtoJSONStream{ - first: true, - reader: rc, - stream: json.NewDecoder(rc), - typ: typ, - } -} - -// Recv decodes the next protobuf message in the stream or returns io.EOF if -// the stream is done. It is not safe to call Recv on the same stream from -// different goroutines, just like it is not safe to do so with a single gRPC -// stream. Type-cast the protobuf message returned to the type provided at -// ProtoJSONStream creation. -// Calls to Recv after calling Close will produce io.EOF. -func (s *ProtoJSONStream) Recv() (proto.Message, error) { - if s.closed { - return nil, io.EOF - } - if s.first { - s.first = false - - // Consume the opening '[' so Decode gets one object at a time. - if t, err := s.stream.Token(); err != nil { - return nil, err - } else if t != arrayOpen { - return nil, errBadOpening - } - } - - // Capture the next block of data for the item (a JSON object) in the stream. - var raw json.RawMessage - if err := s.stream.Decode(&raw); err != nil { - e := err - // To avoid checking the first token of each stream, just attempt to - // Decode the next blob and if that fails, double check if it is just - // the closing token ']'. If it is the closing, return io.EOF. If it - // isn't, return the original error. - if t, _ := s.stream.Token(); t == arrayClose { - e = io.EOF - } - return nil, e - } - - // Initialize a new instance of the protobuf message to unmarshal the - // raw data into. - m := s.typ.New().Interface() - err := protojson.Unmarshal(raw, m) - - return m, err -} - -// Close closes the stream so that resources are cleaned up. -func (s *ProtoJSONStream) Close() error { - // Dereference the *json.Decoder so that the memory is gc'd. - s.stream = nil - s.closed = true - - return s.reader.Close() -} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json deleted file mode 100644 index 61ee266a15..0000000000 --- a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "release-type": "go-yoshi", - "separate-pull-requests": true, - "include-component-in-tag": false, - "packages": { - "v2": { - "component": "v2" - } - } -} diff --git a/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go b/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go index c7b4fa9784..f9dece8f2b 100644 --- a/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go +++ b/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go @@ -60,6 +60,7 @@ type builder struct { block *block vars map[*ast.Object]*variable results []*ast.FieldList + defers []bool breaks branchStack continues branchStack gotos branchStack @@ -181,6 +182,12 @@ func (bld *builder) Visit(n ast.Node) ast.Visitor { } brek.setDestination(bld.newBlock(exits...)) bld.breaks.pop() + case *ast.DeferStmt: + bld.walk(n.Call.Fun) + for _, a := range n.Call.Args { + bld.walk(a) + } + bld.defers[len(bld.defers)-1] = true case *ast.LabeledStmt: bld.gotos.get(n.Label).setDestination(bld.newBlock(bld.block)) bld.labelStmt = n @@ -360,6 +367,7 @@ func (bld *builder) fun(typ *ast.FuncType, body *ast.BlockStmt) { v.fundept++ } bld.results = append(bld.results, typ.Results) + bld.defers = append(bld.defers, false) b := bld.block bld.newBlock() @@ -369,6 +377,7 @@ func (bld *builder) fun(typ *ast.FuncType, body *ast.BlockStmt) { bld.block = b bld.results = bld.results[:len(bld.results)-1] + bld.defers = bld.defers[:len(bld.defers)-1] for _, v := range bld.vars { v.fundept-- } @@ -422,8 +431,11 @@ func (bld *builder) swtch(stmt ast.Stmt, cases []ast.Stmt) { bld.breaks.pop() } -// An operation that might panic marks named function results as used. +// If an operation might panic and be recovered, mark named function results as used. func (bld *builder) maybePanic() { + if len(bld.defers) == 0 || !bld.defers[len(bld.defers)-1] { + return + } if len(bld.results) == 0 { return } diff --git a/vendor/github.com/hashicorp/go-cty/LICENSE b/vendor/github.com/hashicorp/go-cty/LICENSE new file mode 100644 index 0000000000..d6503b5552 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule.go b/vendor/github.com/hashicorp/go-cty/cty/capsule.go new file mode 100644 index 0000000000..2fdc15eaec --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule.go @@ -0,0 +1,128 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type + Ops *CapsuleOps +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + impl := t.Ops.TypeGoString + if impl == nil { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + if t.Ops == noCapsuleOps { + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) + } else { + // Including the operations in the output will make this _very_ long, + // so in practice any capsule type with ops ought to provide a + // TypeGoString function to override this with something more + // reasonable. + return fmt.Sprintf("cty.CapsuleWithOps(%q, reflect.TypeOf(%#v), %#v)", t.Name, victimVal.Interface(), t.Ops) + } + } + return impl(t.GoType) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value by default supports no operations except +// equality, and equality is implemented by pointer identity of the +// encapsulated pointer. A capsule type can optionally have its own +// implementations of certain operations if it is created with CapsuleWithOps +// instead of Capsule. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: noCapsuleOps, + }, + } +} + +// CapsuleWithOps is like Capsule except the caller may provide an object +// representing some overloaded operation implementations to associate with +// the given capsule type. +// +// All of the other caveats and restrictions for capsule types still apply, but +// overloaded operations can potentially help a capsule type participate better +// in cty operations. +func CapsuleWithOps(name string, nativeType reflect.Type, ops *CapsuleOps) Type { + // Copy the operations to make sure the caller can't modify them after + // we're constructed. + ourOps := *ops + ourOps.assertValid() + + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: &ourOps, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go new file mode 100644 index 0000000000..3ff6855ecd --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go @@ -0,0 +1,132 @@ +package cty + +import ( + "reflect" +) + +// CapsuleOps represents a set of overloaded operations for a capsule type. +// +// Each field is a reference to a function that can either be nil or can be +// set to an implementation of the corresponding operation. If an operation +// function is nil then it isn't supported for the given capsule type. +type CapsuleOps struct { + // GoString provides the GoString implementation for values of the + // corresponding type. Conventionally this should return a string + // representation of an expression that would produce an equivalent + // value. + GoString func(val interface{}) string + + // TypeGoString provides the GoString implementation for the corresponding + // capsule type itself. + TypeGoString func(goTy reflect.Type) string + + // Equals provides the implementation of the Equals operation. This is + // called only with known, non-null values of the corresponding type, + // but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.Equals on those nested values. + // + // The result value must always be of type cty.Bool, or the Equals + // operation will panic. + // + // If RawEquals is set without also setting Equals, the RawEquals + // implementation will be used as a fallback implementation. That fallback + // is appropriate only for leaf types that do not contain any nested + // cty.Value that would need to distinguish Equals vs. RawEquals for their + // own equality. + // + // If RawEquals is nil then Equals must also be nil, selecting the default + // pointer-identity comparison instead. + Equals func(a, b interface{}) Value + + // RawEquals provides the implementation of the RawEquals operation. + // This is called only with known, non-null values of the corresponding + // type, but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.RawEquals on those nested values. + // + // If RawEquals is nil, values of the corresponding type are compared by + // pointer identity of the encapsulated value. + RawEquals func(a, b interface{}) bool + + // ConversionFrom can provide conversions from the corresponding type to + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given source type. Return nil to indicate + // that no such conversion is available. + ConversionFrom func(src Type) func(interface{}, Path) (Value, error) + + // ConversionTo can provide conversions to the corresponding type from + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given destination type. Return nil to indicate + // that no such conversion is available. + ConversionTo func(dst Type) func(Value, Path) (interface{}, error) + + // ExtensionData is an extension point for applications that wish to + // create their own extension features using capsule types. + // + // The key argument is any value that can be compared with Go's == + // operator, but should be of a named type in a package belonging to the + // application defining the key. An ExtensionData implementation must + // check to see if the given key is familar to it, and if so return a + // suitable value for the key. + // + // If the given key is unrecognized, the ExtensionData function must + // return a nil interface. (Importantly, not an interface containing a nil + // pointer of some other type.) + // The common implementation of ExtensionData is a single switch statement + // over "key" which has a default case returning nil. + // + // The meaning of any given key is entirely up to the application that + // defines it. Applications consuming ExtensionData from capsule types + // should do so defensively: if the result of ExtensionData is not valid, + // prefer to ignore it or gracefully produce an error rather than causing + // a panic. + ExtensionData func(key interface{}) interface{} +} + +// noCapsuleOps is a pointer to a CapsuleOps with no functions set, which +// is used as the default operations value when a type is created using +// the Capsule function. +var noCapsuleOps = &CapsuleOps{} + +func (ops *CapsuleOps) assertValid() { + if ops.RawEquals == nil && ops.Equals != nil { + panic("Equals cannot be set without RawEquals") + } +} + +// CapsuleOps returns a pointer to the CapsuleOps value for a capsule type, +// or panics if the receiver is not a capsule type. +// +// The caller must not modify the CapsuleOps. +func (ty Type) CapsuleOps() *CapsuleOps { + if !ty.IsCapsuleType() { + panic("not a capsule-typed value") + } + + return ty.typeImpl.(*capsuleType).Ops +} + +// CapsuleExtensionData is a convenience interface to the ExtensionData +// function that can be optionally implemented for a capsule type. It will +// check to see if the underlying type implements ExtensionData and call it +// if so. If not, it will return nil to indicate that the given key is not +// supported. +// +// See the documentation for CapsuleOps.ExtensionData for more information +// on the purpose of and usage of this mechanism. +// +// If CapsuleExtensionData is called on a non-capsule type then it will panic. +func (ty Type) CapsuleExtensionData(key interface{}) interface{} { + ops := ty.CapsuleOps() + if ops.ExtensionData == nil { + return nil + } + return ops.ExtensionData(key) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/collection.go b/vendor/github.com/hashicorp/go-cty/cty/collection.go new file mode 100644 index 0000000000..ab3919b14b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go new file mode 100644 index 0000000000..6ad3bff454 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go new file mode 100644 index 0000000000..9c59c8f74d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go @@ -0,0 +1,190 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + var ret conversion + ret = func(in cty.Value, path cty.Path) (cty.Value, error) { + if in.IsMarked() { + // We must unmark during the conversion and then re-apply the + // same marks to the result. + in, inMarks := in.Unmark() + v, err := ret(in, path) + if v != cty.NilVal { + v = v.WithMarks(inMarks) + } + return v, err + } + + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } + + return ret +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + case out.IsObjectType() && in.IsMapType(): + if !unsafe { + // Converting a map to an object is an "unsafe" conversion, + // because we don't know if all the map keys will correspond to + // object attributes. + return nil + } + return conversionMapToObject(in, out, unsafe) + + case in.IsCapsuleType() || out.IsCapsuleType(): + if !unsafe { + // Capsule types can only participate in "unsafe" conversions, + // because we don't know enough about their conversion behaviors + // to be sure that they will always be safe. + return nil + } + if in.Equals(out) { + // conversion to self is never allowed + return nil + } + if out.IsCapsuleType() { + if fn := out.CapsuleOps().ConversionTo; fn != nil { + return conversionToCapsule(in, out, fn) + } + } + if in.IsCapsuleType() { + if fn := in.CapsuleOps().ConversionFrom; fn != nil { + return conversionFromCapsule(in, out, fn) + } + } + // No conversion operation is available, then. + return nil + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go new file mode 100644 index 0000000000..6a6006af9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go @@ -0,0 +1,31 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +func conversionToCapsule(inTy, outTy cty.Type, fn func(inTy cty.Type) func(cty.Value, cty.Path) (interface{}, error)) conversion { + rawConv := fn(inTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + rawV, err := rawConv(in, path) + if err != nil { + return cty.NilVal, err + } + return cty.CapsuleVal(outTy, rawV), nil + } +} + +func conversionFromCapsule(inTy, outTy cty.Type, fn func(outTy cty.Type) func(interface{}, cty.Path) (cty.Value, error)) conversion { + rawConv := fn(outTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + return rawConv(in.EncapsulatedValue(), path) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 0000000000..575973d3c3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,488 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty set + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, elemPath.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty map + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.MapValEmpty(ety), nil + } + + if ety.IsCollectionType() || ety.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, false); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.SetValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + if mapEty.IsCollectionType() || mapEty.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, unsafe); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionMapToObject returns a conversion that will take a value of the +// given map type and return an object of the given type. The object attribute +// types must all be compatible with the map element type. +// +// Will panic if the given mapType and objType are not maps and objects +// respectively. +func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conversion { + objectAtys := objType.AttributeTypes() + mapEty := mapType.ElementType() + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(mapEty, objectAty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + + // if there is no corresponding attribute, we skip this key + if _, ok := objectAtys[name.AsString()]; !ok { + continue + } + + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[name.AsString()] = val + } + + return cty.ObjectVal(elems), nil + } +} + +func conversionUnifyCollectionElements(elems map[string]cty.Value, path cty.Path, unsafe bool) (map[string]cty.Value, error) { + elemTypes := make([]cty.Type, 0, len(elems)) + for _, elem := range elems { + elemTypes = append(elemTypes, elem.Type()) + } + unifiedType, _ := unify(elemTypes, unsafe) + if unifiedType == cty.NilType { + } + + unifiedElems := make(map[string]cty.Value) + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elem.Type().Equals(unifiedType) { + unifiedElems[name] = elem + continue + } + conv := getConversion(elem.Type(), unifiedType, unsafe) + if conv == nil { + } + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + val, err := conv(elem, elemPath) + if err != nil { + return nil, err + } + unifiedElems[name] = val + } + + return unifiedElems, nil +} + +func conversionCheckMapElementTypes(elems map[string]cty.Value, path cty.Path) error { + elementType := cty.NilType + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elementType == cty.NilType { + elementType = elem.Type() + continue + } + if !elementType.Equals(elem.Type()) { + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + return elemPath.NewErrorf("%s is required", elementType.FriendlyName()) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 0000000000..5f571da13c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go new file mode 100644 index 0000000000..93743ca82f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 0000000000..a55344413e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,57 @@ +package convert + +import ( + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + v, err := cty.ParseNumberVal(val.AsString()) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return v, nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + switch strings.ToLower(val.AsString()) { + case "true": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"true\"") + case "false": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"false\"") + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + } + }, + }, +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 0000000000..d89ec3808b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go new file mode 100644 index 0000000000..2037299bab --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 0000000000..72f307f207 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/public.go b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go new file mode 100644 index 0000000000..3b50a69264 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/hashicorp/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go new file mode 100644 index 0000000000..8a9c327668 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go new file mode 100644 index 0000000000..b2a3bbe54e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go @@ -0,0 +1,357 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + mapCt := 0 + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsMapType(): + mapCt++ + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case mapCt > 0 && (mapCt+dynamicCt) == len(types): + return unifyMapTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // If we fall out here, no unification is possible + return cty.NilType, nil +} + +func unifyMapTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + elemTypes := make([]cty.Type, 0, len(types)) + for _, ty := range types { + elemTypes = append(elemTypes, ty.ElementType()) + } + retElemType, _ := unify(elemTypes, unsafe) + if retElemType == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(retElemType) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return cty.NilType, nil + } + } + + return retTy, conversions +} + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/doc.go new file mode 100644 index 0000000000..d31f0547bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go new file mode 100644 index 0000000000..31567e766a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go @@ -0,0 +1,194 @@ +package cty + +import ( + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.IsMarked(): + return false + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + val.assertUnmarked() + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/error.go b/vendor/github.com/hashicorp/go-cty/cty/error.go new file mode 100644 index 0000000000..dd139f7249 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gob.go b/vendor/github.com/hashicorp/go-cty/cty/gob.go new file mode 100644 index 0000000000..80929aa544 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gob.go @@ -0,0 +1,204 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + if val.IsMarked() { + return nil, errors.New("value is marked") + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // Because big.Float.GobEncode is implemented with a pointer reciever, + // gob encoding of an interface{} containing a *big.Float value does not + // round-trip correctly, emerging instead as a non-pointer big.Float. + // The rest of cty expects all number values to be represented by + // *big.Float, so we'll fix that up here. + gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty) + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} + +// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number +// values through gob: the big.Float.GobEncode method is implemented on a +// pointer receiver, and so it loses the "pointer-ness" of the value on +// encode, causing the values to emerge the other end as big.Float rather than +// *big.Float as we expect elsewhere in cty. +// +// The implementation of gobDecodeFixNumberPtr mutates the given raw value +// during its work, and may either return the same value mutated or a new +// value. Callers must no longer use whatever value they pass as "raw" after +// this function is called. +func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} { + // Unfortunately we need to work recursively here because number values + // might be embedded in structural or collection type values. + + switch { + case ty.Equals(Number): + if bf, ok := raw.(big.Float); ok { + return &bf // wrap in pointer + } + case ty.IsMapType() && ty.ElementType().Equals(Number): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + m[k] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsListType() && ty.ElementType().Equals(Number): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + s[i] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsSetType() && ty.ElementType().Equals(Number): + if s, ok := raw.(set.Set); ok { + newS := set.NewSet(s.Rules()) + for it := s.Iterator(); it.Next(); { + newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType()) + newS.Add(newV) + } + return newS + } + case ty.IsObjectType(): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + aty := ty.AttributeType(k) + m[k] = gobDecodeFixNumberPtr(v, aty) + } + } + case ty.IsTupleType(): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + ety := ty.TupleElementType(i) + s[i] = gobDecodeFixNumberPtr(v, ety) + } + } + } + + return raw +} + +// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr +// that works with already-constructed values. This is primarily for testing, +// to fix up intentionally-invalid number values for the parts of the test +// code that need them to be valid, such as calling GoString on them. +func gobDecodeFixNumberPtrVal(v Value) Value { + raw := gobDecodeFixNumberPtr(v.v, v.ty) + return Value{ + v: raw, + ty: v.ty, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go new file mode 100644 index 0000000000..a5177d22b2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go new file mode 100644 index 0000000000..0677a07947 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go new file mode 100644 index 0000000000..fc35c16920 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go @@ -0,0 +1,548 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go new file mode 100644 index 0000000000..404faba18f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go @@ -0,0 +1,686 @@ +package gocty + +import ( + "math" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + target.SetBool(val.True()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.SetInt(iv) + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.SetUint(iv) + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32, reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.SetFloat(fv) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.ConvertibleTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type())) + return nil + + case bigIntType.ConvertibleTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type())) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.String: + target.SetString(val.AsString()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go new file mode 100644 index 0000000000..b413425358 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/helper.go b/vendor/github.com/hashicorp/go-cty/cty/helper.go new file mode 100644 index 0000000000..1b88e9fa08 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json.go b/vendor/github.com/hashicorp/go-cty/cty/json.go new file mode 100644 index 0000000000..c421a62ed9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/doc.go b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go new file mode 100644 index 0000000000..8916513d67 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go new file mode 100644 index 0000000000..728ab0100f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go @@ -0,0 +1,193 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/simple.go b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go new file mode 100644 index 0000000000..aaba8c3bde --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type.go b/vendor/github.com/hashicorp/go-cty/cty/json/type.go new file mode 100644 index 0000000000..59d7f2e17f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go new file mode 100644 index 0000000000..8adf22bbe9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go @@ -0,0 +1,170 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the [ delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) == ']' { + break + } + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go new file mode 100644 index 0000000000..5ad190d33b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := cty.ParseNumberVal(v) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/value.go b/vendor/github.com/hashicorp/go-cty/cty/json/value.go new file mode 100644 index 0000000000..50748f7090 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/list_type.go b/vendor/github.com/hashicorp/go-cty/cty/list_type.go new file mode 100644 index 0000000000..2ef02a12f3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/map_type.go b/vendor/github.com/hashicorp/go-cty/cty/map_type.go new file mode 100644 index 0000000000..82d36c6282 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/marks.go b/vendor/github.com/hashicorp/go-cty/cty/marks.go new file mode 100644 index 0000000000..3898e45533 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/marks.go @@ -0,0 +1,296 @@ +package cty + +import ( + "fmt" + "strings" +) + +// marker is an internal wrapper type used to add special "marks" to values. +// +// A "mark" is an annotation that can be used to represent additional +// characteristics of values that propagate through operation methods to +// result values. However, a marked value cannot be used with integration +// methods normally associated with its type, in order to ensure that +// calling applications don't inadvertently drop marks as they round-trip +// values out of cty and back in again. +// +// Marked values are created only explicitly by the calling application, so +// an application that never marks a value does not need to worry about +// encountering marked values. +type marker struct { + realV interface{} + marks ValueMarks +} + +// ValueMarks is a map, representing a set, of "mark" values associated with +// a Value. See Value.Mark for more information on the usage of mark values. +type ValueMarks map[interface{}]struct{} + +// NewValueMarks constructs a new ValueMarks set with the given mark values. +func NewValueMarks(marks ...interface{}) ValueMarks { + if len(marks) == 0 { + return nil + } + ret := make(ValueMarks, len(marks)) + for _, v := range marks { + ret[v] = struct{}{} + } + return ret +} + +// Equal returns true if the receiver and the given ValueMarks both contain +// the same marks. +func (m ValueMarks) Equal(o ValueMarks) bool { + if len(m) != len(o) { + return false + } + for v := range m { + if _, ok := o[v]; !ok { + return false + } + } + return true +} + +func (m ValueMarks) GoString() string { + var s strings.Builder + s.WriteString("cty.NewValueMarks(") + i := 0 + for mv := range m { + if i != 0 { + s.WriteString(", ") + } + s.WriteString(fmt.Sprintf("%#v", mv)) + i++ + } + s.WriteString(")") + return s.String() +} + +// IsMarked returns true if and only if the receiving value carries at least +// one mark. A marked value cannot be used directly with integration methods +// without explicitly unmarking it (and retrieving the markings) first. +func (val Value) IsMarked() bool { + _, ok := val.v.(marker) + return ok +} + +// HasMark returns true if and only if the receiving value has the given mark. +func (val Value) HasMark(mark interface{}) bool { + if mr, ok := val.v.(marker); ok { + _, ok := mr.marks[mark] + return ok + } + return false +} + +// ContainsMarked returns true if the receiving value or any value within it +// is marked. +// +// This operation is relatively expensive. If you only need a shallow result, +// use IsMarked instead. +func (val Value) ContainsMarked() bool { + ret := false + Walk(val, func(_ Path, v Value) (bool, error) { + if v.IsMarked() { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +func (val Value) assertUnmarked() { + if val.IsMarked() { + panic("value is marked, so must be unmarked first") + } +} + +// Marks returns a map (representing a set) of all of the mark values +// associated with the receiving value, without changing the marks. Returns nil +// if the value is not marked at all. +func (val Value) Marks() ValueMarks { + if mr, ok := val.v.(marker); ok { + // copy so that the caller can't mutate our internals + ret := make(ValueMarks, len(mr.marks)) + for k, v := range mr.marks { + ret[k] = v + } + return ret + } + return nil +} + +// HasSameMarks returns true if an only if the receiver and the given other +// value have identical marks. +func (val Value) HasSameMarks(other Value) bool { + vm, vmOK := val.v.(marker) + om, omOK := other.v.(marker) + if vmOK != omOK { + return false + } + if vmOK { + return vm.marks.Equal(om.marks) + } + return true +} + +// Mark returns a new value that as the same type and underlying value as +// the receiver but that also carries the given value as a "mark". +// +// Marks are used to carry additional application-specific characteristics +// associated with values. A marked value can be used with operation methods, +// in which case the marks are propagated to the operation results. A marked +// value _cannot_ be used with integration methods, so callers of those +// must derive an unmarked value using Unmark (and thus explicitly handle +// the markings) before calling the integration methods. +// +// The mark value can be any value that would be valid to use as a map key. +// The mark value should be of a named type in order to use the type itself +// as a namespace for markings. That type can be unexported if desired, in +// order to ensure that the mark can only be handled through the defining +// package's own functions. +// +// An application that never calls this method does not need to worry about +// handling marked values. +func (val Value) Mark(mark interface{}) Value { + var newMarker marker + newMarker.realV = val.v + if mr, ok := val.v.(marker); ok { + // It's already a marker, so we'll retain existing marks. + newMarker.marks = make(ValueMarks, len(mr.marks)+1) + for k, v := range mr.marks { + newMarker.marks[k] = v + } + } else { + // It's not a marker yet, so we're creating the first mark. + newMarker.marks = make(ValueMarks, 1) + } + newMarker.marks[mark] = struct{}{} + return Value{ + ty: val.ty, + v: newMarker, + } +} + +// Unmark separates the marks of the receiving value from the value itself, +// removing a new unmarked value and a map (representing a set) of the marks. +// +// If the receiver isn't marked, Unmark returns it verbatim along with a nil +// map of marks. +func (val Value) Unmark() (Value, ValueMarks) { + if !val.IsMarked() { + return val, nil + } + mr := val.v.(marker) + marks := val.Marks() // copy so that the caller can't mutate our internals + return Value{ + ty: val.ty, + v: mr.realV, + }, marks +} + +// UnmarkDeep is similar to Unmark, but it works with an entire nested structure +// rather than just the given value directly. +// +// The result is guaranteed to contain no nested values that are marked, and +// the returned marks set includes the superset of all of the marks encountered +// during the operation. +func (val Value) UnmarkDeep() (Value, ValueMarks) { + marks := make(ValueMarks) + ret, _ := Transform(val, func(_ Path, v Value) (Value, error) { + unmarkedV, valueMarks := v.Unmark() + for m, s := range valueMarks { + marks[m] = s + } + return unmarkedV, nil + }) + return ret, marks +} + +func (val Value) unmarkForce() Value { + unw, _ := val.Unmark() + return unw +} + +// WithMarks returns a new value that has the same type and underlying value +// as the receiver and also has the marks from the given maps (representing +// sets). +func (val Value) WithMarks(marks ...ValueMarks) Value { + if len(marks) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, s := range marks { + markCount += len(s) + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, s := range marks { + for m := range s { + newMarks[m] = struct{}{} + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} + +// WithSameMarks returns a new value that has the same type and underlying +// value as the receiver and also has the marks from the given source values. +// +// Use this if you are implementing your own higher-level operations against +// cty using the integration methods, to re-introduce the marks from the +// source values of the operation. +func (val Value) WithSameMarks(srcs ...Value) Value { + if len(srcs) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + markCount += len(mr.marks) + } + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + for m := range mr.marks { + newMarks[m] = struct{}{} + } + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go similarity index 90% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go index 9a4e94c279..ce59d9ff42 100644 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go @@ -3,8 +3,8 @@ package msgpack import ( "bytes" - "github.com/vmihailenco/msgpack/v4" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" ) type dynamicVal struct { diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go new file mode 100644 index 0000000000..8a43c16ac9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,211 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/vmihailenco/msgpack" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 0000000000..86664bac57 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 0000000000..67f4c9a4cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,334 @@ +package msgpack + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + dec.Skip() // skip what we've peeked + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + dec.Skip() // skip what we've peeked + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + v, err := cty.ParseNumberVal(rv) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return v, nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.List(ety)), nil + case length == 0: + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Set(ety)), nil + case length == 0: + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Map(ety)), nil + case length == 0: + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Tuple(etys)), nil + case length == 0: + return cty.TupleVal(nil), nil + case length != len(etys): + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Object(atys)), nil + case length == 0: + return cty.ObjectVal(nil), nil + case length != len(atys): + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", + len(atys), length) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + switch { + case length == -1: + return cty.NullVal(cty.DynamicPseudoType), nil + case length != 2: + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/null.go b/vendor/github.com/hashicorp/go-cty/cty/null.go new file mode 100644 index 0000000000..d58d0287b6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/object_type.go b/vendor/github.com/hashicorp/go-cty/cty/object_type.go new file mode 100644 index 0000000000..187d38751b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path.go b/vendor/github.com/hashicorp/go-cty/cty/path.go new file mode 100644 index 0000000000..636e68c63d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path.go @@ -0,0 +1,270 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// IndexInt is a typed convenience method for Index. +func (p Path) IndexInt(v int) Path { + return p.Index(NumberIntVal(int64(v))) +} + +// IndexString is a typed convenience method for Index. +func (p Path) IndexString(v string) Path { + return p.Index(StringVal(v)) +} + +// IndexPath is a convenience method to start a new Path with an IndexStep. +func IndexPath(v Value) Path { + return Path{}.Index(v) +} + +// IndexIntPath is a typed convenience method for IndexPath. +func IndexIntPath(v int) Path { + return IndexPath(NumberIntVal(int64(v))) +} + +// IndexStringPath is a typed convenience method for IndexPath. +func IndexStringPath(v string) Path { + return IndexPath(StringVal(v)) +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Equals compares 2 Paths for exact equality. +func (p Path) Equals(other Path) bool { + if len(p) != len(other) { + return false + } + + for i := range p { + pv := p[i] + switch pv := pv.(type) { + case GetAttrStep: + ov, ok := other[i].(GetAttrStep) + if !ok || pv != ov { + return false + } + case IndexStep: + ov, ok := other[i].(IndexStep) + if !ok { + return false + } + + if !pv.Key.RawEquals(ov.Key) { + return false + } + default: + // Any invalid steps default to evaluating false. + return false + } + } + + return true + +} + +// HasPrefix determines if the path p contains the provided prefix. +func (p Path) HasPrefix(prefix Path) bool { + if len(prefix) > len(p) { + return false + } + + return p[:len(prefix)].Equals(prefix) +} + +// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. +func GetAttrPath(name string) Path { + return Path{}.GetAttr(name) +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + + switch s.Key.Type() { + case Number: + if !(val.Type().IsListType() || val.Type().IsTupleType()) { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path_set.go b/vendor/github.com/hashicorp/go-cty/cty/path_set.go new file mode 100644 index 0000000000..977523de57 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/hashicorp/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go new file mode 100644 index 0000000000..7b3d1196cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/set/gob.go rename to vendor/github.com/hashicorp/go-cty/cty/set/gob.go diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go new file mode 100644 index 0000000000..4a60494f9d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go @@ -0,0 +1,15 @@ +package set + +type Iterator struct { + vals []interface{} + idx int +} + +func (it *Iterator) Value() interface{} { + return it.vals[it.idx] +} + +func (it *Iterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/ops.go b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go new file mode 100644 index 0000000000..fd1555f218 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go @@ -0,0 +1,210 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set. If the set's rules +// implement OrderedRules then the result is ordered per those rules. If +// no order is provided, or if it is not a total order, then the iteration +// order is undefined but consistent for a particular version of cty. Do not +// rely on specific ordering between cty releases unless the rules order is a +// total order. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + vals := s.Values() + + return &Iterator{ + vals: vals, + idx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all the values in the set. If the set rules have +// an order then the result is in that order. If no order is provided or if +// it is not a total order then the result order is undefined, but consistent +// for a particular set value within a specific release of cty. +func (s Set) Values() []interface{} { + var ret []interface{} + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIDs := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIDs = append(bucketIDs, id) + } + sort.Ints(bucketIDs) + + for _, bucketID := range bucketIDs { + ret = append(ret, s.vals[bucketID]...) + } + + if orderRules, ok := s.rules.(OrderedRules); ok { + sort.SliceStable(ret, func(i, j int) bool { + return orderRules.Less(ret[i], ret[j]) + }) + } + + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/rules.go b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go new file mode 100644 index 0000000000..51f744b5e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go @@ -0,0 +1,43 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} + +// OrderedRules is an extension of Rules that can apply a partial order to +// element values. When a set's Rules implements OrderedRules an iterator +// over the set will return items in the order described by the rules. +// +// If the given order is not a total order (that is, some pairs of non-equivalent +// elements do not have a defined order) then the resulting iteration order +// is undefined but consistent for a particular version of cty. The exact +// order in that case is not part of the contract and is subject to change +// between versions. +type OrderedRules interface { + Rules + + // Less returns true if and only if the first argument should sort before + // the second argument. If the second argument should sort before the first + // or if there is no defined order for the values, return false. + Less(interface{}, interface{}) bool +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/set.go b/vendor/github.com/hashicorp/go-cty/cty/set/set.go new file mode 100644 index 0000000000..b4fb316f1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_helper.go b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go new file mode 100644 index 0000000000..3162284262 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go @@ -0,0 +1,132 @@ +package cty + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +// +// It also panics if the given value is marked, because marked values cannot +// be stored in sets. +func (s ValueSet) requireElementType(v Value) { + if v.IsMarked() { + panic("cannot store marked value directly in a set (make the set itself unknown instead)") + } + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_internals.go b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go new file mode 100644 index 0000000000..4080198097 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go @@ -0,0 +1,244 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +var _ set.OrderedRules = setRules{} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes, marks := makeSetHashBytes(val) + if len(marks) > 0 { + panic("can't take hash of value that has marks or has embedded values that have marks") + } + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +// Less is an implementation of set.OrderedRules so that we can iterate over +// set elements in a consistent order, where such an order is possible. +func (r setRules) Less(v1, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less + return false + } + + // Null values always sort after non-null values + if v2v.IsNull() && !v1v.IsNull() { + return true + } else if v1v.IsNull() { + return false + } + // Unknown values always sort after known values + if v1v.IsKnown() && !v2v.IsKnown() { + return true + } else if !v1v.IsKnown() { + return false + } + + switch r.Type { + case String: + // String values sort lexicographically + return v1v.AsString() < v2v.AsString() + case Bool: + // Weird to have a set of bools, but if we do then false sorts before true. + if v2v.True() || !v1v.True() { + return true + } + return false + case Number: + v1f := v1v.AsBigFloat() + v2f := v2v.AsBigFloat() + return v1f.Cmp(v2f) < 0 + default: + // No other types have a well-defined ordering, so we just produce a + // default consistent-but-undefined ordering then. This situation is + // not considered a compatibility constraint; callers should rely only + // on the ordering rules for primitive values. + v1h, _ := makeSetHashBytes(v1v) + v2h, _ := makeSetHashBytes(v2v) + return bytes.Compare(v1h, v2h) < 0 + } +} + +func makeSetHashBytes(val Value) ([]byte, ValueMarks) { + var buf bytes.Buffer + marks := make(ValueMarks) + appendSetHashBytes(val, &buf, marks) + return buf.Bytes(), marks +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + + // Marks aren't considered part of a value for equality-testing purposes, + // so we'll unmark our value before we work with it but we'll remember + // the marks in case the caller needs to re-apply them to a derived + // value. + if val.IsMarked() { + unmarkedVal, valMarks := val.Unmark() + for m := range valMarks { + marks[m] = struct{}{} + } + val = unmarkedVal + } + + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + // Due to an unfortunate quirk of gob encoding for big.Float, we end up + // with non-pointer values immediately after a gob round-trip, and + // we end up in here before we've had a chance to run + // gobDecodeFixNumberPtr on the inner values of a gob-encoded set, + // and so sadly we must make a special effort to handle that situation + // here just so that we can get far enough along to fix it up for + // everything else in this package. + if bf, ok := val.v.(big.Float); ok { + buf.WriteString(bf.String()) + return + } + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf, marks) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf, marks) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_type.go b/vendor/github.com/hashicorp/go-cty/cty/set_type.go new file mode 100644 index 0000000000..cbc3706f2c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go new file mode 100644 index 0000000000..798cacd63a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/type.go b/vendor/github.com/hashicorp/go-cty/cty/type.go new file mode 100644 index 0000000000..730cb9862e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/hashicorp/go-cty/cty/type_conform.go b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go new file mode 100644 index 0000000000..476eeea87f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go @@ -0,0 +1,139 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go similarity index 93% rename from vendor/github.com/zclconf/go-cty/cty/types_to_register.go rename to vendor/github.com/hashicorp/go-cty/cty/types_to_register.go index e1e220aab3..ec05bb18aa 100644 --- a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go +++ b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go @@ -6,7 +6,7 @@ import ( "math/big" "strings" - "github.com/zclconf/go-cty/cty/set" + "github.com/hashicorp/go-cty/cty/set" ) // InternalTypesToRegister is a slice of values that covers all of the @@ -49,7 +49,7 @@ func init() { for _, tv := range InternalTypesToRegister { typeName := fmt.Sprintf("%T", tv) if strings.HasPrefix(typeName, "cty.") { - gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + gob.RegisterName(fmt.Sprintf("github.com/hashicorp/go-cty/%s", typeName), tv) } else { gob.Register(tv) } diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/unknown.go new file mode 100644 index 0000000000..e54179eb14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go new file mode 100644 index 0000000000..ba926475ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value.go b/vendor/github.com/hashicorp/go-cty/cty/value.go new file mode 100644 index 0000000000..1025ba82eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value.go @@ -0,0 +1,108 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsKnown() + } + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + if val.IsMarked() { + return val.unmarkForce().IsNull() + } + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsWhollyKnown() + } + + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_init.go b/vendor/github.com/hashicorp/go-cty/cty/value_init.go new file mode 100644 index 0000000000..853a5a7dba --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_init.go @@ -0,0 +1,324 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/hashicorp/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + +// MustParseNumberVal is like ParseNumberVal but it will panic in case of any +// error. It can be used during initialization or any other situation where +// the given string is a constant or otherwise known to be correct by the +// caller. +func MustParseNumberVal(s string) Value { + ret, err := ParseNumberVal(s) + if err != nil { + panic(err) + } + return ret +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + var markSets []ValueMarks + + for i, val := range vals { + if unmarkedVal, marks := val.UnmarkDeep(); len(marks) > 0 { + val = unmarkedVal + markSets = append(markSets, marks) + } + if val.ContainsMarked() { + // FIXME: Allow this, but unmark the values and apply the + // marking to the set itself instead. + panic("set cannot contain marked values") + } + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + }.WithMarks(markSets...) +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_ops.go b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go new file mode 100644 index 0000000000..69e5a8abbf --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go @@ -0,0 +1,1290 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GoString is an implementation of fmt.GoStringer that produces concise +// source-like representations of values suitable for use in debug messages. +func (val Value) GoString() string { + if val.IsMarked() { + unVal, marks := val.Unmark() + if len(marks) == 1 { + var mark interface{} + for m := range marks { + mark = m + } + return fmt.Sprintf("%#v.Mark(%#v)", unVal, mark) + } + return fmt.Sprintf("%#v.WithMarks(%#v)", unVal, marks) + } + + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } + return "cty.False" + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1)) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.SetVal(%#v)", vals) + case val.ty.IsListType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.ListVal(%#v)", vals) + case val.ty.IsMapType(): + vals := val.AsValueMap() + if len(vals) == 0 { + return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.MapVal(%#v)", vals) + case val.ty.IsTupleType(): + if val.ty.Equals(EmptyTuple) { + return "cty.EmptyTupleVal" + } + vals := val.AsValueSlice() + return fmt.Sprintf("cty.TupleVal(%#v)", vals) + case val.ty.IsObjectType(): + if val.ty.Equals(EmptyObject) { + return "cty.EmptyObjectVal" + } + vals := val.AsValueMap() + return fmt.Sprintf("cty.ObjectVal(%#v)", vals) + case val.ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().GoString + if impl == nil { + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + return impl(val.EncapsulatedValue()) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. +func (val Value) Equals(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Equals(other).WithMarks(valMarks, otherMarks) + } + + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown + return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + } + + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false + return BoolVal(false) + } + + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().Equals + if impl == nil { + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + } + return BoolVal(impl(val.v, other.v)) + } + ret := impl(val.v, other.v) + if !ret.Type().Equals(Bool) { + panic(fmt.Sprintf("Equals for %#v returned %#v, not cty.Bool", ty, ret.Type())) + } + return ret + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + val.assertUnmarked() + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if !val.HasSameMarks(other) { + return false + } + // Since we've now checked the marks, we'll unmark for the rest of this... + val = val.unmarkForce() + other = other.unmarkForce() + + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + } + return impl(val.v, other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Add(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Subtract(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Negate().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Multiply(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Divide(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Modulo(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Absolute().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.GetAttr(name).WithMarks(valMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.Index(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.HasIndex(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + if val.IsMarked() || elem.IsMarked() { + val, valMarks := val.Unmark() + elem, elemMarks := elem.Unmark() + return val.HasElement(elem).WithMarks(valMarks, elemMarks) + } + + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Length().WithMarks(valMarks) + } + + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + val.assertUnmarked() + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + val.assertUnmarked() + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + val.assertUnmarked() + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Not().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.And(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Or(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.LessThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.GreaterThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + val.assertUnmarked() + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + val.assertUnmarked() + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + val.assertUnmarked() + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + val.assertUnmarked() + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/walk.go b/vendor/github.com/hashicorp/go-cty/cty/walk.go new file mode 100644 index 0000000000..a6943babef --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/github.com/hashicorp/go-getter/.gitignore b/vendor/github.com/hashicorp/go-getter/.gitignore deleted file mode 100644 index 511ca2675b..0000000000 --- a/vendor/github.com/hashicorp/go-getter/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cmd/go-getter/go-getter diff --git a/vendor/github.com/hashicorp/go-getter/.goreleaser.yml b/vendor/github.com/hashicorp/go-getter/.goreleaser.yml deleted file mode 100644 index f1c1b9decd..0000000000 --- a/vendor/github.com/hashicorp/go-getter/.goreleaser.yml +++ /dev/null @@ -1,49 +0,0 @@ -env: - - GOPRIVATE=github.com/hashicorp - -builds: - - id: signable - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - darwin_amd64 - - windows_386 - - windows_amd64 - dir: ./cmd/go-getter/ - flags: - - -trimpath - ldflags: - - -X main.GitCommit={{ .Commit }} - - mod_timestamp: '{{ .CommitTimestamp }}' - targets: - - linux_386 - - linux_amd64 - dir: ./cmd/go-getter/ - flags: - - -trimpath - ldflags: - - -X main.GitCommit={{ .Commit }} - -archives: - - format: zip - name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - files: - - none* - -checksum: - name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' - algorithm: sha256 - -signs: - - signature: ${artifact}.sig - cmd: sh - args: - - -c - - >- - signore - sign - --file ${artifact} - --out ${signature} - artifacts: checksum - -changelog: - skip: true diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE deleted file mode 100644 index 34a4c73fc4..0000000000 --- a/vendor/github.com/hashicorp/go-getter/LICENSE +++ /dev/null @@ -1,356 +0,0 @@ -Copyright (c) 2015 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md deleted file mode 100644 index 7f380fdcca..0000000000 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ /dev/null @@ -1,453 +0,0 @@ -# go-getter - -[![CircleCI](https://circleci.com/gh/hashicorp/go-getter/tree/main.svg?style=svg)][circleci] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[circleci]: https://circleci.com/gh/hashicorp/go-getter/tree/main -[godocs]: http://godoc.org/github.com/hashicorp/go-getter - -go-getter is a library for Go (golang) for downloading files or directories -from various sources using a URL as the primary form of input. - -The power of this library is being flexible in being able to download -from a number of different sources (file paths, Git, HTTP, Mercurial, etc.) -using a single string as input. This removes the burden of knowing how to -download from a variety of sources from the implementer. - -The concept of a _detector_ automatically turns invalid URLs into proper -URLs. For example: "github.com/hashicorp/go-getter" would turn into a -Git URL. Or "./foo" would turn into a file URL. These are extensible. - -This library is used by [Terraform](https://terraform.io) for -downloading modules and [Nomad](https://nomadproject.io) for downloading -binaries. - -## Installation and Usage - -Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-getter). - -Installation can be done with a normal `go get`: - -``` -$ go get github.com/hashicorp/go-getter -``` - -go-getter also has a command you can use to test URL strings: - -``` -$ go install github.com/hashicorp/go-getter/cmd/go-getter -... - -$ go-getter github.com/foo/bar ./foo -... -``` - -The command is useful for verifying URL structures. - -## Security -Fetching resources from user-supplied URLs is an inherently dangerous operation and may -leave your application vulnerable to [server side request forgery](https://owasp.org/www-community/attacks/Server_Side_Request_Forgery), -[path traversal](https://owasp.org/www-community/attacks/Path_Traversal), [denial of service](https://owasp.org/www-community/attacks/Denial_of_Service) -or other security flaws. - -go-getter contains mitigations for some of these security issues, but should still be used with -caution in security-critical contexts. See the available [security options](#Security-Options) that -can be configured to mitigate some of these risks. - -go-getter may return values that contain caller-provided query parameters that can contain sensitive data. -Context around what parameters are and are not sensitive is known only by the caller of go-getter, and specific to each use case. -We recommend the caller ensure that go-getter's return values (e.g., error messages) are properly handled and sanitized to ensure -sensitive data is not persisted to logs. -## URL Format - -go-getter uses a single string URL as input to download from a variety of -protocols. go-getter has various "tricks" with this URL to do certain things. -This section documents the URL format. - -### Supported Protocols and Detectors - -**Protocols** are used to download files/directories using a specific -mechanism. Example protocols are Git and HTTP. - -**Detectors** are used to transform a valid or invalid URL into another -URL if it matches a certain pattern. Example: "github.com/user/repo" is -automatically transformed into a fully valid Git URL. This allows go-getter -to be very user friendly. - -go-getter out of the box supports the following protocols. Additional protocols -can be augmented at runtime by implementing the `Getter` interface. - - * Local files - * Git - * Mercurial - * HTTP - * Amazon S3 - * Google GCP - -In addition to the above protocols, go-getter has what are called "detectors." -These take a URL and attempt to automatically choose the best protocol for -it, which might involve even changing the protocol. The following detection -is built-in by default: - - * File paths such as "./foo" are automatically changed to absolute - file URLs. - * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically - changed to Git protocol over HTTP. - * GitLab URLs, such as "gitlab.com/inkscape/inkscape" are automatically - changed to Git protocol over HTTP. - * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically - changed to a Git or mercurial protocol using the BitBucket API. - -### Forced Protocol - -In some cases, the protocol to use is ambiguous depending on the source -URL. For example, "http://github.com/mitchellh/vagrant.git" could reference -an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this -URL. - -Forced protocol can be done by prefixing the URL with the protocol followed -by double colons. For example: `git::http://github.com/mitchellh/vagrant.git` -would download the given HTTP URL using the Git protocol. - -Forced protocols will also override any detectors. - -In the absence of a forced protocol, detectors may be run on the URL, transforming -the protocol anyways. The above example would've used the Git protocol either -way since the Git detector would've detected it was a GitHub URL. - -### Protocol-Specific Options - -Each protocol can support protocol-specific options to configure that -protocol. For example, the `git` protocol supports specifying a `ref` -query parameter that tells it what ref to checkout for that Git -repository. - -The options are specified as query parameters on the URL (or URL-like string) -given to go-getter. Using the Git example above, the URL below is a valid -input to go-getter: - - github.com/hashicorp/go-getter?ref=abcd1234 - -The protocol-specific options are documented below the URL format -section. But because they are part of the URL, we point it out here so -you know they exist. - -### Subdirectories - -If you want to download only a specific subdirectory from a downloaded -directory, you can specify a subdirectory after a double-slash `//`. -go-getter will first download the URL specified _before_ the double-slash -(as if you didn't specify a double-slash), but will then copy the -path after the double slash into the target directory. - -For example, if you're downloading this GitHub repository, but you only -want to download the `testdata` directory, you can do the following: - -``` -https://github.com/hashicorp/go-getter.git//testdata -``` - -If you downloaded this to the `/tmp` directory, then the file -`/tmp/archive.gz` would exist. Notice that this file is in the `testdata` -directory in this repository, but because we specified a subdirectory, -go-getter automatically copied only that directory contents. - -Subdirectory paths may also use filesystem glob patterns. The path must -match _exactly one_ entry or go-getter will return an error. -This is useful if you're not sure the exact directory name but it follows -a predictable naming structure. - -For example, the following URL would also work: - -``` -https://github.com/hashicorp/go-getter.git//test-* -``` - -### Checksumming - -For file downloads of any protocol, go-getter can automatically verify -a checksum for you. Note that checksumming only works for downloading files, -not directories, but checksumming will work for any protocol. - -To checksum a file, append a `checksum` query parameter to the URL. go-getter -will parse out this query parameter automatically and use it to verify the -checksum. The parameter value can be in the format of `type:value` or just -`value`, where type is "md5", "sha1", "sha256", "sha512" or "file" . The -"value" should be the actual checksum value or download URL for "file". When -`type` part is omitted, type will be guessed based on the length of the -checksum string. Examples: - -``` -./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 -``` - -``` -./foo.txt?checksum=b7d96c89d09d9e204f5fedc4d5d55b21 -``` - -``` -./foo.txt?checksum=file:./foo.txt.sha256sum -``` - -When checksumming from a file - ex: with `checksum=file:url` - go-getter will -get the file linked in the URL after `file:` using the same configuration. For -example, in `file:http://releases.ubuntu.com/cosmic/MD5SUMS` go-getter will -download a checksum file under the aforementioned url using the http protocol. -All protocols supported by go-getter can be used. The checksum file will be -downloaded in a temporary file then parsed. The destination of the temporary -file can be changed by setting system specific environment variables: `TMPDIR` -for unix; `TMP`, `TEMP` or `USERPROFILE` on windows. Read godoc of -[os.TempDir](https://golang.org/pkg/os/#TempDir) for more information on the -temporary directory selection. Content of files are expected to be BSD or GNU -style. Once go-getter is done with the checksum file; it is deleted. - -The checksum query parameter is never sent to the backend protocol -implementation. It is used at a higher level by go-getter itself. - -If the destination file exists and the checksums match: download -will be skipped. - -### Unarchiving - -go-getter will automatically unarchive files into a file or directory -based on the extension of the file being requested (over any protocol). -This works for both file and directory downloads. - -go-getter looks for an `archive` query parameter to specify the format of -the archive. If this isn't specified, go-getter will use the extension of -the path to see if it appears archived. Unarchiving can be explicitly -disabled by setting the `archive` query parameter to `false`. - -The following archive formats are supported: - - * `tar.gz` and `tgz` - * `tar.bz2` and `tbz2` - * `tar.xz` and `txz` - * `zip` - * `gz` - * `bz2` - * `xz` - -For example, an example URL is shown below: - -``` -./foo.zip -``` - -This will automatically be inferred to be a ZIP file and will be extracted. -You can also be explicit about the archive type: - -``` -./some/other/path?archive=zip -``` - -And finally, you can disable archiving completely: - -``` -./some/path?archive=false -``` - -You can combine unarchiving with the other features of go-getter such -as checksumming. The special `archive` query parameter will be removed -from the URL before going to the final protocol downloader. - -## Protocol-Specific Options - -This section documents the protocol-specific options that can be specified for -go-getter. These options should be appended to the input as normal query -parameters ([HTTP headers](#headers) are an exception to this, however). -Depending on the usage of go-getter, applications may provide alternate ways of -inputting options. For example, [Nomad](https://www.nomadproject.io) provides a -nice options block for specifying options rather than in the URL. - -## General (All Protocols) - -The options below are available to all protocols: - - * `archive` - The archive format to use to unarchive this file, or "" (empty - string) to disable unarchiving. For more details, see the complete section - on archive support above. - - * `checksum` - Checksum to verify the downloaded file or archive. See - the entire section on checksumming above for format and more details. - - * `filename` - When in file download mode, allows specifying the name of the - downloaded file on disk. Has no effect in directory mode. - -### Local Files (`file`) - -None - -### Git (`git`) - - * `ref` - The Git ref to checkout. This is a ref, so it can point to - a commit SHA, a branch name, etc. If it is a named ref such as a branch - name, go-getter will update it to the latest on each get. - - * `sshkey` - An SSH private key to use during clones. The provided key must - be a base64-encoded string. For example, to generate a suitable `sshkey` - from a private key file on disk, you would run `base64 -w0 `. - - **Note**: Git 2.3+ is required to use this feature. - - * `depth` - The Git clone depth. The provided number specifies the last `n` - revisions to clone from the repository. - - -The `git` getter accepts both URL-style SSH addresses like -`git::ssh://git@example.com/foo/bar`, and "scp-style" addresses like -`git::git@example.com/foo/bar`. In the latter case, omitting the `git::` -force prefix is allowed if the username prefix is exactly `git@`. - -The "scp-style" addresses _cannot_ be used in conjunction with the `ssh://` -scheme prefix, because in that case the colon is used to mark an optional -port number to connect on, rather than to delimit the path from the host. - -### Mercurial (`hg`) - - * `rev` - The Mercurial revision to checkout. - -### HTTP (`http`) - -#### Basic Authentication - -To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the -hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special -characters, including the username and password, must be URL encoded. - -#### Headers - -Optional request headers can be added by supplying them in a custom -[`HttpGetter`](https://godoc.org/github.com/hashicorp/go-getter#HttpGetter) -(_not_ as query parameters like most other options). These headers will be sent -out on every request the getter in question makes. - -### S3 (`s3`) - -S3 takes various access configurations in the URL. Note that it will also -read these from standard AWS environment variables if they're set. S3 compliant servers like Minio -are also supported. If the query parameters are present, these take priority. - - * `aws_access_key_id` - AWS access key. - * `aws_access_key_secret` - AWS access key secret. - * `aws_access_token` - AWS access token if this is being used. - * `aws_profile` - Use this profile from local ~/.aws/ config. Takes priority over the other three. - -#### Using IAM Instance Profiles with S3 - -If you use go-getter and want to use an EC2 IAM Instance Profile to avoid -using credentials, then just omit these and the profile, if available will -be used automatically. - -### Using S3 with Minio - If you use go-gitter for Minio support, you must consider the following: - - * `aws_access_key_id` (required) - Minio access key. - * `aws_access_key_secret` (required) - Minio access key secret. - * `region` (optional - defaults to us-east-1) - Region identifier to use. - * `version` (optional - defaults to Minio default) - Configuration file format. - -#### S3 Bucket Examples - -S3 has several addressing schemes used to reference your bucket. These are -listed here: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html - -Some examples for these addressing schemes: -- s3::https://s3.amazonaws.com/bucket/foo -- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo -- bucket.s3.amazonaws.com/foo -- bucket.s3-eu-west-1.amazonaws.com/foo/bar -- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2" - -### GCS (`gcs`) - -#### GCS Authentication - -In order to access to GCS, authentication credentials should be provided. More information can be found [here](https://cloud.google.com/docs/authentication/getting-started) - -#### GCS Bucket Examples - -- gcs::https://www.googleapis.com/storage/v1/bucket -- gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip -- www.googleapis.com/storage/v1/bucket/foo - -#### GCS Testing - -The tests for `get_gcs.go` require you to have GCP credentials set in your environment. These credentials can have any level of permissions to any project, they just need to exist. This means setting `GOOGLE_APPLICATION_CREDENTIALS="~/path/to/credentials.json"` or `GOOGLE_CREDENTIALS="{stringified-credentials-json}"`. Due to this configuration, `get_gcs_test.go` will fail for external contributors in CircleCI. - - -### Security Options - -**Disable Symlinks** - -In your getter client config, we recommend using the `DisableSymlinks` option, -which prevents writing through or copying from symlinks (which may point outside the directory). - -```go -client := getter.Client{ - // This will prevent copying or writing files through symlinks - DisableSymlinks: true, -} -``` - -**Disable or Limit `X-Terraform-Get`** - -Go-Getter supports arbitrary redirects via the `X-Terraform-Get` header. This functionality -exists to support [Terraform use cases](https://www.terraform.io/language/modules/sources#http-urls), -but is likely not needed in most applications. - -For code that uses the `HttpGetter`, add the following configuration options: - -```go -var httpGetter = &getter.HttpGetter{ - // Most clients should disable X-Terraform-Get - // See the note below - XTerraformGetDisabled: true, - // Your software probably doesn’t rely on X-Terraform-Get, but - // if it does, you should set the above field to false, plus - // set XTerraformGet Limit to prevent endless redirects - // XTerraformGetLimit: 10, -} -``` - -**Enforce Timeouts** - -The `HttpGetter` supports timeouts and other resource-constraining configuration options. The `GitGetter` and `HgGetter` -only support timeouts. - -Configuration for the `HttpGetter`: - -```go -var httpGetter = &getter.HttpGetter{ - // Disable pre-fetch HEAD requests - DoNotCheckHeadFirst: true, - - // As an alternative to the above setting, you can - // set a reasonable timeout for HEAD requests - // HeadFirstTimeout: 10 * time.Second, - - // Read timeout for HTTP operations - ReadTimeout: 30 * time.Second, - - // Set the maximum number of bytes - // that can be read by the getter - MaxBytes: 500000000, // 500 MB -} -``` - -For code that uses the `GitGetter` or `HgGetter`, set the `Timeout` option: -```go -var gitGetter = &getter.GitGetter{ - // Set a reasonable timeout for git operations - Timeout: 5 * time.Minute, -} -``` - -```go -var hgGetter = &getter.HgGetter{ - // Set a reasonable timeout for hg operations - Timeout: 5 * time.Minute, -} -``` - diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/checksum.go deleted file mode 100644 index f1090839fb..0000000000 --- a/vendor/github.com/hashicorp/go-getter/checksum.go +++ /dev/null @@ -1,317 +0,0 @@ -package getter - -import ( - "bufio" - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "fmt" - "hash" - "io" - "net/url" - "os" - "path/filepath" - "strings" - - urlhelper "github.com/hashicorp/go-getter/helper/url" -) - -// FileChecksum helps verifying the checksum for a file. -type FileChecksum struct { - Type string - Hash hash.Hash - Value []byte - Filename string -} - -// A ChecksumError is returned when a checksum differs -type ChecksumError struct { - Hash hash.Hash - Actual []byte - Expected []byte - File string -} - -func (cerr *ChecksumError) Error() string { - if cerr == nil { - return "" - } - return fmt.Sprintf( - "Checksums did not match for %s.\nExpected: %s\nGot: %s\n%T", - cerr.File, - hex.EncodeToString(cerr.Expected), - hex.EncodeToString(cerr.Actual), - cerr.Hash, // ex: *sha256.digest - ) -} - -// checksum is a simple method to compute the checksum of a source file -// and compare it to the given expected value. -func (c *FileChecksum) checksum(source string) error { - f, err := os.Open(source) - if err != nil { - return fmt.Errorf("Failed to open file for checksum: %s", err) - } - defer f.Close() - - c.Hash.Reset() - if _, err := io.Copy(c.Hash, f); err != nil { - return fmt.Errorf("Failed to hash: %s", err) - } - - if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) { - return &ChecksumError{ - Hash: c.Hash, - Actual: actual, - Expected: c.Value, - File: source, - } - } - - return nil -} - -// extractChecksum will return a FileChecksum based on the 'checksum' -// parameter of u. -// ex: -// http://hashicorp.com/terraform?checksum= -// http://hashicorp.com/terraform?checksum=: -// http://hashicorp.com/terraform?checksum=file: -// when checksumming from a file, extractChecksum will go get checksum_url -// in a temporary directory, parse the content of the file then delete it. -// Content of files are expected to be BSD style or GNU style. -// -// BSD-style checksum: -// MD5 (file1) = -// MD5 (file2) = -// -// GNU-style: -// file1 -// *file2 -// -// see parseChecksumLine for more detail on checksum file parsing -func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) { - q := u.Query() - v := q.Get("checksum") - - if v == "" { - return nil, nil - } - - vs := strings.SplitN(v, ":", 2) - switch len(vs) { - case 2: - break // good - default: - // here, we try to guess the checksum from it's length - // if the type was not passed - return newChecksumFromValue(v, filepath.Base(u.EscapedPath())) - } - - checksumType, checksumValue := vs[0], vs[1] - - switch checksumType { - case "file": - return c.ChecksumFromFile(checksumValue, u) - default: - return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) - } -} - -func newChecksum(checksumValue, filename string) (*FileChecksum, error) { - c := &FileChecksum{ - Filename: filename, - } - var err error - c.Value, err = hex.DecodeString(checksumValue) - if err != nil { - return nil, fmt.Errorf("invalid checksum: %s", err) - } - return c, nil -} - -func newChecksumFromType(checksumType, checksumValue, filename string) (*FileChecksum, error) { - c, err := newChecksum(checksumValue, filename) - if err != nil { - return nil, err - } - - c.Type = strings.ToLower(checksumType) - switch c.Type { - case "md5": - c.Hash = md5.New() - case "sha1": - c.Hash = sha1.New() - case "sha256": - c.Hash = sha256.New() - case "sha512": - c.Hash = sha512.New() - default: - return nil, fmt.Errorf( - "unsupported checksum type: %s", checksumType) - } - - return c, nil -} - -func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) { - c, err := newChecksum(checksumValue, filename) - if err != nil { - return nil, err - } - - switch len(c.Value) { - case md5.Size: - c.Hash = md5.New() - c.Type = "md5" - case sha1.Size: - c.Hash = sha1.New() - c.Type = "sha1" - case sha256.Size: - c.Hash = sha256.New() - c.Type = "sha256" - case sha512.Size: - c.Hash = sha512.New() - c.Type = "sha512" - default: - return nil, fmt.Errorf("Unknown type for checksum %s", checksumValue) - } - - return c, nil -} - -// ChecksumFromFile will return all the FileChecksums found in file -// -// ChecksumFromFile will try to guess the hashing algorithm based on content -// of checksum file -// -// ChecksumFromFile will only return checksums for files that match file -// behind src -func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) { - checksumFileURL, err := urlhelper.Parse(checksumFile) - if err != nil { - return nil, err - } - - tempfile, err := tmpFile("", filepath.Base(checksumFileURL.Path)) - if err != nil { - return nil, err - } - defer os.Remove(tempfile) - - c2 := &Client{ - Ctx: c.Ctx, - Getters: c.Getters, - Decompressors: c.Decompressors, - Detectors: c.Detectors, - Pwd: c.Pwd, - Dir: false, - Src: checksumFile, - Dst: tempfile, - ProgressListener: c.ProgressListener, - } - if err = c2.Get(); err != nil { - return nil, fmt.Errorf( - "Error downloading checksum file: %s", err) - } - - filename := filepath.Base(src.Path) - absPath, err := filepath.Abs(src.Path) - if err != nil { - return nil, err - } - checksumFileDir := filepath.Dir(checksumFileURL.Path) - relpath, err := filepath.Rel(checksumFileDir, absPath) - switch { - case err == nil || - err.Error() == "Rel: can't make "+absPath+" relative to "+checksumFileDir: - // ex: on windows C:\gopath\...\content.txt cannot be relative to \ - // which is okay, may be another expected path will work. - break - default: - return nil, err - } - - // possible file identifiers: - options := []string{ - filename, // ubuntu-14.04.1-server-amd64.iso - "*" + filename, // *ubuntu-14.04.1-server-amd64.iso Standard checksum - "?" + filename, // ?ubuntu-14.04.1-server-amd64.iso shasum -p - relpath, // dir/ubuntu-14.04.1-server-amd64.iso - "./" + relpath, // ./dir/ubuntu-14.04.1-server-amd64.iso - absPath, // fullpath; set if local - } - - f, err := os.Open(tempfile) - if err != nil { - return nil, fmt.Errorf( - "Error opening downloaded file: %s", err) - } - defer f.Close() - rd := bufio.NewReader(f) - for { - line, err := rd.ReadString('\n') - if err != nil { - if err != io.EOF { - return nil, fmt.Errorf( - "Error reading checksum file: %s", err) - } - if line == "" { - break - } - // parse the line, if we hit EOF, but the line is not empty - } - checksum, err := parseChecksumLine(line) - if err != nil || checksum == nil { - continue - } - if checksum.Filename == "" { - // filename not sure, let's try - return checksum, nil - } - // make sure the checksum is for the right file - for _, option := range options { - if option != "" && checksum.Filename == option { - // any checksum will work so we return the first one - return checksum, nil - } - } - } - return nil, fmt.Errorf("no checksum found in: %s", checksumFile) -} - -// parseChecksumLine takes a line from a checksum file and returns -// checksumType, checksumValue and filename parseChecksumLine guesses the style -// of the checksum BSD vs GNU by splitting the line and by counting the parts. -// of a line. -// for BSD type sums parseChecksumLine guesses the hashing algorithm -// by checking the length of the checksum. -func parseChecksumLine(line string) (*FileChecksum, error) { - parts := strings.Fields(line) - - switch len(parts) { - case 4: - // BSD-style checksum: - // MD5 (file1) = - // MD5 (file2) = - if len(parts[1]) <= 2 || - parts[1][0] != '(' || parts[1][len(parts[1])-1] != ')' { - return nil, fmt.Errorf( - "Unexpected BSD-style-checksum filename format: %s", line) - } - filename := parts[1][1 : len(parts[1])-1] - return newChecksumFromType(parts[0], parts[3], filename) - case 2: - // GNU-style: - // file1 - // *file2 - return newChecksumFromValue(parts[0], parts[1]) - case 0: - return nil, nil // empty line - default: - return newChecksumFromValue(parts[0], "") - } -} diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go deleted file mode 100644 index 05f2f5f651..0000000000 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ /dev/null @@ -1,347 +0,0 @@ -package getter - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - urlhelper "github.com/hashicorp/go-getter/helper/url" - safetemp "github.com/hashicorp/go-safetemp" -) - -// ErrSymlinkCopy means that a copy of a symlink was encountered on a request with DisableSymlinks enabled. -var ErrSymlinkCopy = errors.New("copying of symlinks has been disabled") - -// Client is a client for downloading things. -// -// Top-level functions such as Get are shortcuts for interacting with a client. -// Using a client directly allows more fine-grained control over how downloading -// is done, as well as customizing the protocols supported. -type Client struct { - // Ctx for cancellation - Ctx context.Context - - // Src is the source URL to get. - // - // Dst is the path to save the downloaded thing as. If Dir is set to - // true, then this should be a directory. If the directory doesn't exist, - // it will be created for you. - // - // Pwd is the working directory for detection. If this isn't set, some - // detection may fail. Client will not default pwd to the current - // working directory for security reasons. - Src string - Dst string - Pwd string - - // Mode is the method of download the client will use. See ClientMode - // for documentation. - Mode ClientMode - - // Umask is used to mask file permissions when storing local files or decompressing - // an archive - Umask os.FileMode - - // Detectors is the list of detectors that are tried on the source. - // If this is nil, then the default Detectors will be used. - Detectors []Detector - - // Decompressors is the map of decompressors supported by this client. - // If this is nil, then the default value is the Decompressors global. - Decompressors map[string]Decompressor - - // Getters is the map of protocols supported by this client. If this - // is nil, then the default Getters variable will be used. - Getters map[string]Getter - - // Dir, if true, tells the Client it is downloading a directory (versus - // a single file). This distinction is necessary since filenames and - // directory names follow the same format so disambiguating is impossible - // without knowing ahead of time. - // - // WARNING: deprecated. If Mode is set, that will take precedence. - Dir bool - - // ProgressListener allows to track file downloads. - // By default a no op progress listener is used. - ProgressListener ProgressTracker - - // Insecure controls whether a client verifies the server's - // certificate chain and host name. If Insecure is true, crypto/tls - // accepts any certificate presented by the server and any host name in that - // certificate. In this mode, TLS is susceptible to machine-in-the-middle - // attacks unless custom verification is used. This should be used only for - // testing or in combination with VerifyConnection or VerifyPeerCertificate. - // This is identical to tls.Config.InsecureSkipVerify. - Insecure bool - - // Disable symlinks - DisableSymlinks bool - - Options []ClientOption -} - -// umask returns the effective umask for the Client, defaulting to the process umask -func (c *Client) umask() os.FileMode { - if c == nil { - return 0 - } - return c.Umask -} - -// mode returns file mode umasked by the Client umask -func (c *Client) mode(mode os.FileMode) os.FileMode { - m := mode & ^c.umask() - return m -} - -// Get downloads the configured source to the destination. -func (c *Client) Get() error { - if err := c.Configure(c.Options...); err != nil { - return err - } - - // Store this locally since there are cases we swap this - mode := c.Mode - if mode == ClientModeInvalid { - if c.Dir { - mode = ClientModeDir - } else { - mode = ClientModeFile - } - } - - src, err := Detect(c.Src, c.Pwd, c.Detectors) - if err != nil { - return err - } - - // Determine if we have a forced protocol, i.e. "git::http://..." - force, src := getForcedGetter(src) - - // If there is a subdir component, then we download the root separately - // and then copy over the proper subdir. - var realDst string - dst := c.Dst - src, subDir := SourceDirSubdir(src) - if subDir != "" { - // Check if the subdirectory is attempting to traverse updwards, outside of - // the cloned repository path. - subDir := filepath.Clean(subDir) - if containsDotDot(subDir) { - return fmt.Errorf("subdirectory component contain path traversal out of the repository") - } - // Prevent absolute paths, remove a leading path separator from the subdirectory - if subDir[0] == os.PathSeparator { - subDir = subDir[1:] - } - - td, tdcloser, err := safetemp.Dir("", "getter") - if err != nil { - return err - } - defer tdcloser.Close() - - realDst = dst - dst = td - } - - u, err := urlhelper.Parse(src) - if err != nil { - return err - } - if force == "" { - force = u.Scheme - } - - g, ok := c.Getters[force] - if !ok { - return fmt.Errorf( - "download not supported for scheme '%s'", force) - } - - // We have magic query parameters that we use to signal different features - q := u.Query() - - // Determine if we have an archive type - archiveV := q.Get("archive") - if archiveV != "" { - // Delete the paramter since it is a magic parameter we don't - // want to pass on to the Getter - q.Del("archive") - u.RawQuery = q.Encode() - - // If we can parse the value as a bool and it is false, then - // set the archive to "-" which should never map to a decompressor - if b, err := strconv.ParseBool(archiveV); err == nil && !b { - archiveV = "-" - } - } - if archiveV == "" { - // We don't appear to... but is it part of the filename? - matchingLen := 0 - for k := range c.Decompressors { - if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { - archiveV = k - matchingLen = len(k) - } - } - } - - // If we have a decompressor, then we need to change the destination - // to download to a temporary path. We unarchive this into the final, - // real path. - var decompressDst string - var decompressDir bool - decompressor := c.Decompressors[archiveV] - if decompressor != nil { - // Create a temporary directory to store our archive. We delete - // this at the end of everything. - td, err := ioutil.TempDir("", "getter") - if err != nil { - return fmt.Errorf( - "Error creating temporary directory for archive: %s", err) - } - defer os.RemoveAll(td) - - // Swap the download directory to be our temporary path and - // store the old values. - decompressDst = dst - decompressDir = mode != ClientModeFile - dst = filepath.Join(td, "archive") - mode = ClientModeFile - } - - // Determine checksum if we have one - checksum, err := c.extractChecksum(u) - if err != nil { - return fmt.Errorf("invalid checksum: %s", err) - } - - // Delete the query parameter if we have it. - q.Del("checksum") - u.RawQuery = q.Encode() - - if mode == ClientModeAny { - // Ask the getter which client mode to use - mode, err = g.ClientMode(u) - if err != nil { - return err - } - - // Destination is the base name of the URL path in "any" mode when - // a file source is detected. - if mode == ClientModeFile { - filename := filepath.Base(u.Path) - - // Determine if we have a custom file name - if v := q.Get("filename"); v != "" { - // Delete the query parameter if we have it. - q.Del("filename") - u.RawQuery = q.Encode() - - filename = v - } - - if containsDotDot(filename) { - return fmt.Errorf("filename query parameter contain path traversal") - } - - dst = filepath.Join(dst, filename) - } - } - - // If we're not downloading a directory, then just download the file - // and return. - if mode == ClientModeFile { - getFile := true - if checksum != nil { - if err := checksum.checksum(dst); err == nil { - // don't get the file if the checksum of dst is correct - getFile = false - } - } - if getFile { - err := g.GetFile(dst, u) - if err != nil { - return err - } - - if checksum != nil { - if err := checksum.checksum(dst); err != nil { - return err - } - } - } - - if decompressor != nil { - // We have a decompressor, so decompress the current destination - // into the final destination with the proper mode. - err := decompressor.Decompress(decompressDst, dst, decompressDir, c.umask()) - if err != nil { - return err - } - - // Swap the information back - dst = decompressDst - if decompressDir { - mode = ClientModeAny - } else { - mode = ClientModeFile - } - } - - // We check the dir value again because it can be switched back - // if we were unarchiving. If we're still only Get-ing a file, then - // we're done. - if mode == ClientModeFile { - return nil - } - } - - // If we're at this point we're either downloading a directory or we've - // downloaded and unarchived a directory and we're just checking subdir. - // In the case we have a decompressor we don't Get because it was Get - // above. - if decompressor == nil { - // If we're getting a directory, then this is an error. You cannot - // checksum a directory. TODO: test - if checksum != nil { - return fmt.Errorf( - "checksum cannot be specified for directory download") - } - - // We're downloading a directory, which might require a bit more work - // if we're specifying a subdir. - err := g.Get(dst, u) - if err != nil { - err = fmt.Errorf("error downloading '%s': %s", RedactURL(u), err) - return err - } - } - - // If we have a subdir, copy that over - if subDir != "" { - if err := os.RemoveAll(realDst); err != nil { - return err - } - if err := os.MkdirAll(realDst, c.mode(0755)); err != nil { - return err - } - - // Process any globs - subDir, err := SubdirGlob(dst, subDir) - if err != nil { - return err - } - - return copyDir(c.Ctx, realDst, subDir, false, c.DisableSymlinks, c.umask()) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go deleted file mode 100644 index 7f02509a78..0000000000 --- a/vendor/github.com/hashicorp/go-getter/client_mode.go +++ /dev/null @@ -1,24 +0,0 @@ -package getter - -// ClientMode is the mode that the client operates in. -type ClientMode uint - -const ( - ClientModeInvalid ClientMode = iota - - // ClientModeAny downloads anything it can. In this mode, dst must - // be a directory. If src is a file, it is saved into the directory - // with the basename of the URL. If src is a directory or archive, - // it is unpacked directly into dst. - ClientModeAny - - // ClientModeFile downloads a single file. In this mode, dst must - // be a file path (doesn't have to exist). src must point to a single - // file. It is saved as dst. - ClientModeFile - - // ClientModeDir downloads a directory. In this mode, dst must be - // a directory path (doesn't have to exist). src must point to an - // archive or directory (such as in s3). - ClientModeDir -) diff --git a/vendor/github.com/hashicorp/go-getter/client_option.go b/vendor/github.com/hashicorp/go-getter/client_option.go deleted file mode 100644 index b164137533..0000000000 --- a/vendor/github.com/hashicorp/go-getter/client_option.go +++ /dev/null @@ -1,100 +0,0 @@ -package getter - -import ( - "context" - "os" -) - -// ClientOption is used to configure a client. -type ClientOption func(*Client) error - -// Configure applies all of the given client options, along with any default -// behavior including context, decompressors, detectors, and getters used by -// the client. -func (c *Client) Configure(opts ...ClientOption) error { - // If the context has not been configured use the background context. - if c.Ctx == nil { - c.Ctx = context.Background() - } - - // Store the options used to configure this client. - c.Options = opts - - // Apply all of the client options. - for _, opt := range opts { - err := opt(c) - if err != nil { - return err - } - } - - // If the client was not configured with any Decompressors, Detectors, - // or Getters, use the default values for each. - if c.Decompressors == nil { - c.Decompressors = Decompressors - } - if c.Detectors == nil { - c.Detectors = Detectors - } - if c.Getters == nil { - c.Getters = Getters - } - - // Set the client for each getter, so the top-level client can know - // the getter-specific client functions or progress tracking. - for _, getter := range c.Getters { - getter.SetClient(c) - } - - return nil -} - -// WithContext allows to pass a context to operation -// in order to be able to cancel a download in progress. -func WithContext(ctx context.Context) ClientOption { - return func(c *Client) error { - c.Ctx = ctx - return nil - } -} - -// WithDecompressors specifies which Decompressor are available. -func WithDecompressors(decompressors map[string]Decompressor) ClientOption { - return func(c *Client) error { - c.Decompressors = decompressors - return nil - } -} - -// WithDecompressors specifies which compressors are available. -func WithDetectors(detectors []Detector) ClientOption { - return func(c *Client) error { - c.Detectors = detectors - return nil - } -} - -// WithGetters specifies which getters are available. -func WithGetters(getters map[string]Getter) ClientOption { - return func(c *Client) error { - c.Getters = getters - return nil - } -} - -// WithMode specifies which client mode the getters should operate in. -func WithMode(mode ClientMode) ClientOption { - return func(c *Client) error { - c.Mode = mode - return nil - } -} - -// WithUmask specifies how to mask file permissions when storing local -// files or decompressing an archive. -func WithUmask(mode os.FileMode) ClientOption { - return func(c *Client) error { - c.Umask = mode - return nil - } -} diff --git a/vendor/github.com/hashicorp/go-getter/client_option_insecure.go b/vendor/github.com/hashicorp/go-getter/client_option_insecure.go deleted file mode 100644 index 75da58cdda..0000000000 --- a/vendor/github.com/hashicorp/go-getter/client_option_insecure.go +++ /dev/null @@ -1,14 +0,0 @@ -package getter - -// WithInsecure allows for a user to avoid -// checking certificates (not recommended). -// For example, when connecting on HTTPS where an -// invalid certificate is presented. -// User assumes all risk. -// Not all getters have support for insecure mode yet. -func WithInsecure() func(*Client) error { - return func(c *Client) error { - c.Insecure = true - return nil - } -} diff --git a/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/vendor/github.com/hashicorp/go-getter/client_option_progress.go deleted file mode 100644 index 9b185f71de..0000000000 --- a/vendor/github.com/hashicorp/go-getter/client_option_progress.go +++ /dev/null @@ -1,38 +0,0 @@ -package getter - -import ( - "io" -) - -// WithProgress allows for a user to track -// the progress of a download. -// For example by displaying a progress bar with -// current download. -// Not all getters have progress support yet. -func WithProgress(pl ProgressTracker) func(*Client) error { - return func(c *Client) error { - c.ProgressListener = pl - return nil - } -} - -// ProgressTracker allows to track the progress of downloads. -type ProgressTracker interface { - // TrackProgress should be called when - // a new object is being downloaded. - // src is the location the file is - // downloaded from. - // currentSize is the current size of - // the file in case it is a partial - // download. - // totalSize is the total size in bytes, - // size can be zero if the file size - // is not known. - // stream is the file being downloaded, every - // written byte will add up to processed size. - // - // TrackProgress returns a ReadCloser that wraps the - // download in progress ( stream ). - // When the download is finished, body shall be closed. - TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) -} diff --git a/vendor/github.com/hashicorp/go-getter/common.go b/vendor/github.com/hashicorp/go-getter/common.go deleted file mode 100644 index d2afd8ad88..0000000000 --- a/vendor/github.com/hashicorp/go-getter/common.go +++ /dev/null @@ -1,14 +0,0 @@ -package getter - -import ( - "io/ioutil" -) - -func tmpFile(dir, pattern string) (string, error) { - f, err := ioutil.TempFile(dir, pattern) - if err != nil { - return "", err - } - f.Close() - return f.Name(), nil -} diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go deleted file mode 100644 index 646c283db3..0000000000 --- a/vendor/github.com/hashicorp/go-getter/copy_dir.go +++ /dev/null @@ -1,85 +0,0 @@ -package getter - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" -) - -// mode returns the file mode masked by the umask -func mode(mode, umask os.FileMode) os.FileMode { - return mode & ^umask -} - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -// -// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. -func copyDir(ctx context.Context, dst string, src string, ignoreDot bool, disableSymlinks bool, umask os.FileMode) error { - // We can safely evaluate the symlinks here, even if disabled, because they - // will be checked before actual use in walkFn and copyFile - var err error - src, err = filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if disableSymlinks { - fileInfo, err := os.Lstat(path) - if err != nil { - return fmt.Errorf("failed to check copy file source for symlinks: %w", err) - } - if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink { - return ErrSymlinkCopy - } - // if info.Mode()&os.ModeSymlink == os.ModeSymlink { - // return ErrSymlinkCopy - // } - } - - if path == src { - return nil - } - - if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, mode(0755, umask)); err != nil { - return err - } - - return nil - } - - // If we have a file, copy the contents. - _, err = copyFile(ctx, dstPath, path, disableSymlinks, info.Mode(), umask) - return err - } - - return filepath.Walk(src, walkFn) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go deleted file mode 100644 index c0ca99befa..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ /dev/null @@ -1,80 +0,0 @@ -package getter - -import ( - "os" - "strings" -) - -// Decompressor defines the interface that must be implemented to add -// support for decompressing a type. -// -// Important: if you're implementing a decompressor, please use the -// containsDotDot helper in this file to ensure that files can't be -// decompressed outside of the specified directory. -type Decompressor interface { - // Decompress should decompress src to dst. dir specifies whether dst - // is a directory or single file. src is guaranteed to be a single file - // that exists. dst is not guaranteed to exist already. - Decompress(dst, src string, dir bool, umask os.FileMode) error -} - -// LimitedDecompressors creates the set of Decompressors, but with each compressor configured -// with the given filesLimit and/or fileSizeLimit where applicable. -func LimitedDecompressors(filesLimit int, fileSizeLimit int64) map[string]Decompressor { - tarDecompressor := &TarDecompressor{FilesLimit: filesLimit, FileSizeLimit: fileSizeLimit} - tbzDecompressor := &TarBzip2Decompressor{FilesLimit: filesLimit, FileSizeLimit: fileSizeLimit} - tgzDecompressor := &TarGzipDecompressor{FilesLimit: filesLimit, FileSizeLimit: fileSizeLimit} - txzDecompressor := &TarXzDecompressor{FilesLimit: filesLimit, FileSizeLimit: fileSizeLimit} - tzstDecompressor := &TarZstdDecompressor{FilesLimit: filesLimit, FileSizeLimit: fileSizeLimit} - bzipDecompressor := &Bzip2Decompressor{FileSizeLimit: fileSizeLimit} - gzipDecompressor := &GzipDecompressor{FileSizeLimit: fileSizeLimit} - xzDecompressor := &XzDecompressor{FileSizeLimit: fileSizeLimit} - zipDecompressor := &ZipDecompressor{FilesLimit: filesLimit, FileSizeLimit: fileSizeLimit} - zstDecompressor := &ZstdDecompressor{FileSizeLimit: fileSizeLimit} - - return map[string]Decompressor{ - "bz2": bzipDecompressor, - "gz": gzipDecompressor, - "xz": xzDecompressor, - "tar": tarDecompressor, - "tar.bz2": tbzDecompressor, - "tar.gz": tgzDecompressor, - "tar.xz": txzDecompressor, - "tar.zst": tzstDecompressor, - "tbz2": tbzDecompressor, - "tgz": tgzDecompressor, - "txz": txzDecompressor, - "tzst": tzstDecompressor, - "zip": zipDecompressor, - "zst": zstDecompressor, - } -} - -const ( - noFilesLimit = 0 - noFileSizeLimit = 0 -) - -// Decompressors is the mapping of extension to the Decompressor implementation -// configured with default settings that will decompress that extension/type. -// -// Note: these decompressors by default do not limit the number of files or the -// maximum file size created by the decompressed payload. -var Decompressors = LimitedDecompressors(noFilesLimit, noFileSizeLimit) - -// containsDotDot checks if the filepath value v contains a ".." entry. -// This will check filepath components by splitting along / or \. This -// function is copied directly from the Go net/http implementation. -func containsDotDot(v string) bool { - if !strings.Contains(v, "..") { - return false - } - for _, ent := range strings.FieldsFunc(v, isSlashRune) { - if ent == ".." { - return true - } - } - return false -} - -func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go deleted file mode 100644 index 6db0b35778..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go +++ /dev/null @@ -1,42 +0,0 @@ -package getter - -import ( - "compress/bzip2" - "fmt" - "os" - "path/filepath" -) - -// Bzip2Decompressor is an implementation of Decompressor that can -// decompress bz2 files. -type Bzip2Decompressor struct { - // FileSizeLimit limits the size of a decompressed file. - // - // The zero value means no limit. - FileSizeLimit int64 -} - -func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // Directory isn't supported at all - if dir { - return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") - } - - // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Bzip2 compression is second - bzipR := bzip2.NewReader(f) - - // Copy it out - return copyReader(dst, bzipR, 0622, umask, d.FileSizeLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go deleted file mode 100644 index f94f2bcff6..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ /dev/null @@ -1,46 +0,0 @@ -package getter - -import ( - "compress/gzip" - "fmt" - "os" - "path/filepath" -) - -// GzipDecompressor is an implementation of Decompressor that can -// decompress gzip files. -type GzipDecompressor struct { - // FileSizeLimit limits the size of a decompressed file. - // - // The zero value means no limit. - FileSizeLimit int64 -} - -func (d *GzipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // Directory isn't supported at all - if dir { - return fmt.Errorf("gzip-compressed files can only unarchive to a single file") - } - - // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // gzip compression is second - gzipR, err := gzip.NewReader(f) - if err != nil { - return err - } - defer gzipR.Close() - - // Copy it out - return copyReader(dst, gzipR, 0622, umask, d.FileSizeLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go deleted file mode 100644 index b5188c0e57..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tar.go +++ /dev/null @@ -1,183 +0,0 @@ -package getter - -import ( - "archive/tar" - "fmt" - "io" - "os" - "path/filepath" - "time" -) - -// untar is a shared helper for untarring an archive. The reader should provide -// an uncompressed view of the tar archive. -func untar(input io.Reader, dst, src string, dir bool, umask os.FileMode, fileSizeLimit int64, filesLimit int) error { - tarR := tar.NewReader(input) - done := false - dirHdrs := []*tar.Header{} - now := time.Now() - - var ( - fileSize int64 - filesCount int - ) - - for { - if filesLimit > 0 { - filesCount++ - if filesCount > filesLimit { - return fmt.Errorf("tar archive contains too many files: %d > %d", filesCount, filesLimit) - } - } - - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - break - } - if err != nil { - return err - } - - if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { - // don't unpack extended headers as files - continue - } - - path := dst - if dir { - // Disallow parent traversal - if containsDotDot(hdr.Name) { - return fmt.Errorf("entry contains '..': %s", hdr.Name) - } - - path = filepath.Join(path, hdr.Name) - } - - fileInfo := hdr.FileInfo() - - fileSize += fileInfo.Size() - - if fileSizeLimit > 0 && fileSize > fileSizeLimit { - return fmt.Errorf("tar archive larger than limit: %d", fileSizeLimit) - } - - if fileInfo.IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, mode(0755, umask)); err != nil { - return err - } - - // Record the directory information so that we may set its attributes - // after all files have been extracted - dirHdrs = append(dirHdrs, hdr) - - continue - } else { - // There is no ordering guarantee that a file in a directory is - // listed before the directory - dstPath := filepath.Dir(path) - - // Check that the directory exists, otherwise create it - if _, err := os.Stat(dstPath); os.IsNotExist(err) { - if err := os.MkdirAll(dstPath, mode(0755, umask)); err != nil { - return err - } - } - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Size limit is tracked using the returned file info. - err = copyReader(path, tarR, hdr.FileInfo().Mode(), umask, 0) - if err != nil { - return err - } - - // Set the access and modification time if valid, otherwise default to current time - aTime := now - mTime := now - if hdr.AccessTime.Unix() > 0 { - aTime = hdr.AccessTime - } - if hdr.ModTime.Unix() > 0 { - mTime = hdr.ModTime - } - if err := os.Chtimes(path, aTime, mTime); err != nil { - return err - } - } - - // Perform a final pass over extracted directories to update metadata - for _, dirHdr := range dirHdrs { - path := filepath.Join(dst, dirHdr.Name) - // Chmod the directory since they might be created before we know the mode flags - if err := os.Chmod(path, mode(dirHdr.FileInfo().Mode(), umask)); err != nil { - return err - } - // Set the mtime/atime attributes since they would have been changed during extraction - aTime := now - mTime := now - if dirHdr.AccessTime.Unix() > 0 { - aTime = dirHdr.AccessTime - } - if dirHdr.ModTime.Unix() > 0 { - mTime = dirHdr.ModTime - } - if err := os.Chtimes(path, aTime, mTime); err != nil { - return err - } - } - - return nil -} - -// TarDecompressor is an implementation of Decompressor that can -// unpack tar files. -type TarDecompressor struct { - // FileSizeLimit limits the total size of all - // decompressed files. - // - // The zero value means no limit. - FileSizeLimit int64 - - // FilesLimit limits the number of files that are - // allowed to be decompressed. - // - // The zero value means no limit. - FilesLimit int -} - -func (d *TarDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - return untar(f, dst, src, dir, umask, d.FileSizeLimit, d.FilesLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go deleted file mode 100644 index 78609c9ff1..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ /dev/null @@ -1,45 +0,0 @@ -package getter - -import ( - "compress/bzip2" - "os" - "path/filepath" -) - -// TarBzip2Decompressor is an implementation of Decompressor that can -// decompress tar.bz2 files. -type TarBzip2Decompressor struct { - // FileSizeLimit limits the total size of all - // decompressed files. - // - // The zero value means no limit. - FileSizeLimit int64 - - // FilesLimit limits the number of files that are - // allowed to be decompressed. - // - // The zero value means no limit. - FilesLimit int -} - -func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Bzip2 compression is second - bzipR := bzip2.NewReader(f) - return untar(bzipR, dst, src, dir, umask, d.FileSizeLimit, d.FilesLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go deleted file mode 100644 index b18bd6cb6d..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ /dev/null @@ -1,171 +0,0 @@ -package getter - -import ( - "crypto/md5" - "encoding/hex" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - "sort" - "strings" - "time" - - "github.com/mitchellh/go-testing-interface" -) - -// TestDecompressCase is a single test case for testing decompressors -type TestDecompressCase struct { - Input string // Input is the complete path to the input file - Dir bool // Dir is whether or not we're testing directory mode - Err bool // Err is whether we expect an error or not - DirList []string // DirList is the list of files for Dir mode - FileMD5 string // FileMD5 is the expected MD5 for a single file - Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) -} - -// TestDecompressor is a helper function for testing generic decompressors. -func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { - t.Helper() - - for _, tc := range cases { - t.Logf("Testing: %s", tc.Input) - - // Temporary dir to store stuff - td, err := ioutil.TempDir("", "getter") - if err != nil { - t.Fatalf("err: %s", err) - } - - // Destination is always joining result so that we have a new path - dst := filepath.Join(td, "subdir", "result") - - // We use a function so defers work - func() { - defer os.RemoveAll(td) - - // Decompress - err := d.Decompress(dst, tc.Input, tc.Dir, 0022) - if (err != nil) != tc.Err { - t.Fatalf("err %s: %s", tc.Input, err) - } - if tc.Err { - return - } - - // If it isn't a directory, then check for a single file - if !tc.Dir { - fi, err := os.Stat(dst) - if err != nil { - t.Fatalf("err %s: %s", tc.Input, err) - } - if fi.IsDir() { - t.Fatalf("err %s: expected file, got directory", tc.Input) - } - if tc.FileMD5 != "" { - actual := testMD5(t, dst) - expected := tc.FileMD5 - if actual != expected { - t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) - } - } - - if tc.Mtime != nil { - actual := fi.ModTime() - if tc.Mtime.Unix() > 0 { - expected := *tc.Mtime - if actual != expected { - t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) - } - } else if actual.Unix() <= 0 { - t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) - } - } - - return - } - - // Convert expected for windows - expected := tc.DirList - if runtime.GOOS == "windows" { - for i, v := range expected { - expected[i] = strings.Replace(v, "/", "\\", -1) - } - } - - // Directory, check for the correct contents - actual := testListDir(t, dst) - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) - } - // Check for correct atime/mtime - for _, dir := range actual { - path := filepath.Join(dst, dir) - if tc.Mtime != nil { - fi, err := os.Stat(path) - if err != nil { - t.Fatalf("err: %s", err) - } - actual := fi.ModTime() - if tc.Mtime.Unix() > 0 { - expected := *tc.Mtime - if actual != expected { - t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) - } - } else if actual.Unix() < 0 { - t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) - } - - } - } - }() - } -} - -func testListDir(t testing.T, path string) []string { - var result []string - err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - sub = strings.TrimPrefix(sub, path) - if sub == "" { - return nil - } - sub = sub[1:] // Trim the leading path sep. - - // If it is a dir, add trailing sep - if info.IsDir() { - sub += string(os.PathSeparator) - } - - result = append(result, sub) - return nil - }) - if err != nil { - t.Fatalf("err: %s", err) - } - - sort.Strings(result) - return result -} - -func testMD5(t testing.T, path string) string { - f, err := os.Open(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - h := md5.New() - _, err = io.Copy(h, f) - if err != nil { - t.Fatalf("err: %s", err) - } - - result := h.Sum(nil) - return hex.EncodeToString(result) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go deleted file mode 100644 index 848f5e372a..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ /dev/null @@ -1,51 +0,0 @@ -package getter - -import ( - "compress/gzip" - "fmt" - "os" - "path/filepath" -) - -// TarGzipDecompressor is an implementation of Decompressor that can -// decompress tar.gzip files. -type TarGzipDecompressor struct { - // FileSizeLimit limits the total size of all - // decompressed files. - // - // The zero value means no limit. - FileSizeLimit int64 - - // FilesLimit limits the number of files that are - // allowed to be decompressed. - // - // The zero value means no limit. - FilesLimit int -} - -func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Gzip compression is second - gzipR, err := gzip.NewReader(f) - if err != nil { - return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) - } - defer gzipR.Close() - - return untar(gzipR, dst, src, dir, umask, d.FileSizeLimit, d.FilesLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go deleted file mode 100644 index 42f6179a80..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_txz.go +++ /dev/null @@ -1,51 +0,0 @@ -package getter - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/ulikunitz/xz" -) - -// TarXzDecompressor is an implementation of Decompressor that can -// decompress tar.xz files. -type TarXzDecompressor struct { - // FileSizeLimit limits the total size of all - // decompressed files. - // - // The zero value means no limit. - FileSizeLimit int64 - - // FilesLimit limits the number of files that are - // allowed to be decompressed. - // - // The zero value means no limit. - FilesLimit int -} - -func (d *TarXzDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // xz compression is second - txzR, err := xz.NewReader(f) - if err != nil { - return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) - } - - return untar(txzR, dst, src, dir, umask, d.FileSizeLimit, d.FilesLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tzst.go b/vendor/github.com/hashicorp/go-getter/decompress_tzst.go deleted file mode 100644 index 3b086ced26..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_tzst.go +++ /dev/null @@ -1,52 +0,0 @@ -package getter - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/klauspost/compress/zstd" -) - -// TarZstdDecompressor is an implementation of Decompressor that can -// decompress tar.zstd files. -type TarZstdDecompressor struct { - // FileSizeLimit limits the total size of all - // decompressed files. - // - // The zero value means no limit. - FileSizeLimit int64 - - // FilesLimit limits the number of files that are - // allowed to be decompressed. - // - // The zero value means no limit. - FilesLimit int -} - -func (d *TarZstdDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // Zstd compression is second - zstdR, err := zstd.NewReader(f) - if err != nil { - return fmt.Errorf("Error opening a zstd reader for %s: %s", src, err) - } - defer zstdR.Close() - - return untar(zstdR, dst, src, dir, umask, d.FileSizeLimit, d.FilesLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go deleted file mode 100644 index 89fafd6bc7..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_xz.go +++ /dev/null @@ -1,46 +0,0 @@ -package getter - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/ulikunitz/xz" -) - -// XzDecompressor is an implementation of Decompressor that can -// decompress xz files. -type XzDecompressor struct { - // FileSizeLimit limits the size of a decompressed file. - // - // The zero value means no limit. - FileSizeLimit int64 -} - -func (d *XzDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // Directory isn't supported at all - if dir { - return fmt.Errorf("xz-compressed files can only unarchive to a single file") - } - - // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // xz compression is second - xzR, err := xz.NewReader(f) - if err != nil { - return err - } - - // Copy it out, potentially using a file size limit. - return copyReader(dst, xzR, 0622, umask, d.FileSizeLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go deleted file mode 100644 index 3ae80f298c..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ /dev/null @@ -1,118 +0,0 @@ -package getter - -import ( - "archive/zip" - "fmt" - "os" - "path/filepath" -) - -// ZipDecompressor is an implementation of Decompressor that can -// decompress zip files. -type ZipDecompressor struct { - // FileSizeLimit limits the total size of all - // decompressed files. - // - // The zero value means no limit. - FileSizeLimit int64 - - // FilesLimit limits the number of files that are - // allowed to be decompressed. - // - // The zero value means no limit. - FilesLimit int -} - -func (d *ZipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - // If we're going into a directory we should make that first - mkdir := dst - if !dir { - mkdir = filepath.Dir(dst) - } - if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { - return err - } - - // Open the zip - zipR, err := zip.OpenReader(src) - if err != nil { - return err - } - defer zipR.Close() - - // Check the zip integrity - if len(zipR.File) == 0 { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - if !dir && len(zipR.File) > 1 { - return fmt.Errorf("expected a single file: %s", src) - } - - if d.FilesLimit > 0 && len(zipR.File) > d.FilesLimit { - return fmt.Errorf("zip archive contains too many files: %d > %d", len(zipR.File), d.FilesLimit) - } - - var fileSizeTotal int64 - - // Go through and unarchive - for _, f := range zipR.File { - path := dst - if dir { - // Disallow parent traversal - if containsDotDot(f.Name) { - return fmt.Errorf("entry contains '..': %s", f.Name) - } - - path = filepath.Join(path, f.Name) - } - - fileInfo := f.FileInfo() - - fileSizeTotal += fileInfo.Size() - - if d.FileSizeLimit > 0 && fileSizeTotal > d.FileSizeLimit { - return fmt.Errorf("zip archive larger than limit: %d", d.FileSizeLimit) - } - - if fileInfo.IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, mode(0755, umask)); err != nil { - return err - } - - continue - } - - // Create the enclosing directories if we must. ZIP files aren't - // required to contain entries for just the directories so this - // can happen. - if dir { - if err := os.MkdirAll(filepath.Dir(path), mode(0755, umask)); err != nil { - return err - } - } - - // Open the file for reading - srcF, err := f.Open() - if err != nil { - if srcF != nil { - srcF.Close() - } - return err - } - - // Size limit is tracked using the returned file info. - err = copyReader(path, srcF, f.Mode(), umask, 0) - srcF.Close() - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zstd.go b/vendor/github.com/hashicorp/go-getter/decompress_zstd.go deleted file mode 100644 index 3922d27bdf..0000000000 --- a/vendor/github.com/hashicorp/go-getter/decompress_zstd.go +++ /dev/null @@ -1,46 +0,0 @@ -package getter - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/klauspost/compress/zstd" -) - -// ZstdDecompressor is an implementation of Decompressor that -// can decompress .zst files. -type ZstdDecompressor struct { - // FileSizeLimit limits the size of a decompressed file. - // - // The zero value means no limit. - FileSizeLimit int64 -} - -func (d *ZstdDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { - if dir { - return fmt.Errorf("zstd-compressed files can only unarchive to a single file") - } - - // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { - return err - } - - // File first - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - // zstd compression is second - zstdR, err := zstd.NewReader(f) - if err != nil { - return err - } - defer zstdR.Close() - - // Copy it out, potentially using a file size limit. - return copyReader(dst, zstdR, 0622, umask, d.FileSizeLimit) -} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go deleted file mode 100644 index f134f77051..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ /dev/null @@ -1,106 +0,0 @@ -package getter - -import ( - "fmt" - "path/filepath" - - "github.com/hashicorp/go-getter/helper/url" -) - -// Detector defines the interface that an invalid URL or a URL with a blank -// scheme is passed through in order to determine if its shorthand for -// something else well-known. -type Detector interface { - // Detect will detect whether the string matches a known pattern to - // turn it into a proper URL. - Detect(string, string) (string, bool, error) -} - -// Detectors is the list of detectors that are tried on an invalid URL. -// This is also the order they're tried (index 0 is first). -var Detectors []Detector - -func init() { - Detectors = []Detector{ - new(GitHubDetector), - new(GitLabDetector), - new(GitDetector), - new(BitBucketDetector), - new(S3Detector), - new(GCSDetector), - new(FileDetector), - } -} - -// Detect turns a source string into another source string if it is -// detected to be of a known pattern. -// -// The third parameter should be the list of detectors to use in the -// order to try them. If you don't want to configure this, just use -// the global Detectors variable. -// -// This is safe to be called with an already valid source string: Detect -// will just return it. -func Detect(src string, pwd string, ds []Detector) (string, error) { - getForce, getSrc := getForcedGetter(src) - - // Separate out the subdir if there is one, we don't pass that to detect - getSrc, subDir := SourceDirSubdir(getSrc) - - u, err := url.Parse(getSrc) - if err == nil && u.Scheme != "" { - // Valid URL - return src, nil - } - - for _, d := range ds { - result, ok, err := d.Detect(getSrc, pwd) - if err != nil { - return "", err - } - if !ok { - continue - } - - var detectForce string - detectForce, result = getForcedGetter(result) - result, detectSubdir := SourceDirSubdir(result) - - // If we have a subdir from the detection, then prepend it to our - // requested subdir. - if detectSubdir != "" { - if subDir != "" { - subDir = filepath.Join(detectSubdir, subDir) - } else { - subDir = detectSubdir - } - } - - if subDir != "" { - u, err := url.Parse(result) - if err != nil { - return "", fmt.Errorf("Error parsing URL: %s", err) - } - u.Path += "//" + subDir - - // a subdir may contain wildcards, but in order to support them we - // have to ensure the path isn't escaped. - u.RawPath = u.Path - - result = u.String() - } - - // Preserve the forced getter if it exists. We try to use the - // original set force first, followed by any force set by the - // detector. - if getForce != "" { - result = fmt.Sprintf("%s::%s", getForce, result) - } else if detectForce != "" { - result = fmt.Sprintf("%s::%s", detectForce, result) - } - - return result, nil - } - - return "", fmt.Errorf("invalid source string: %s", src) -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go deleted file mode 100644 index 19047eb197..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go +++ /dev/null @@ -1,66 +0,0 @@ -package getter - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// BitBucketDetector implements Detector to detect BitBucket URLs and turn -// them into URLs that the Git or Hg Getter can understand. -type BitBucketDetector struct{} - -func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "bitbucket.org/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { - u, err := url.Parse("https://" + src) - if err != nil { - return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) - } - - // We need to get info on this BitBucket repository to determine whether - // it is Git or Hg. - var info struct { - SCM string `json:"scm"` - } - infoUrl := "https://api.bitbucket.org/2.0/repositories" + u.Path - resp, err := http.Get(infoUrl) - if err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - if resp.StatusCode == 403 { - // A private repo - return "", true, fmt.Errorf( - "shorthand BitBucket URL can't be used for private repos, " + - "please use a full URL") - } - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&info); err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - - switch info.SCM { - case "git": - if !strings.HasSuffix(u.Path, ".git") { - u.Path += ".git" - } - - return "git::" + u.String(), true, nil - case "hg": - return "hg::" + u.String(), true, nil - default: - return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) - } -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go deleted file mode 100644 index 4ef41ea73f..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_file.go +++ /dev/null @@ -1,67 +0,0 @@ -package getter - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -// FileDetector implements Detector to detect file paths. -type FileDetector struct{} - -func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if !filepath.IsAbs(src) { - if pwd == "" { - return "", true, fmt.Errorf( - "relative paths require a module with a pwd") - } - - // Stat the pwd to determine if its a symbolic link. If it is, - // then the pwd becomes the original directory. Otherwise, - // `filepath.Join` below does some weird stuff. - // - // We just ignore if the pwd doesn't exist. That error will be - // caught later when we try to use the URL. - if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { - if err != nil { - return "", true, err - } - if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = filepath.EvalSymlinks(pwd) - if err != nil { - return "", true, err - } - - // The symlink itself might be a relative path, so we have to - // resolve this to have a correctly rooted URL. - pwd, err = filepath.Abs(pwd) - if err != nil { - return "", true, err - } - } - } - - src = filepath.Join(pwd, src) - } - - return fmtFileURL(src), true, nil -} - -func fmtFileURL(path string) string { - if runtime.GOOS == "windows" { - // Make sure we're using "/" on Windows. URLs are "/"-based. - path = filepath.ToSlash(path) - return fmt.Sprintf("file://%s", path) - } - - // Make sure that we don't start with "/" since we add that below. - if path[0] == '/' { - path = path[1:] - } - return fmt.Sprintf("file:///%s", path) -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_gcs.go b/vendor/github.com/hashicorp/go-getter/detect_gcs.go deleted file mode 100644 index 11363737c7..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_gcs.go +++ /dev/null @@ -1,43 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "strings" -) - -// GCSDetector implements Detector to detect GCS URLs and turn -// them into URLs that the GCSGetter can understand. -type GCSDetector struct{} - -func (d *GCSDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.Contains(src, "googleapis.com/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *GCSDetector) detectHTTP(src string) (string, bool, error) { - - parts := strings.Split(src, "/") - if len(parts) < 5 { - return "", false, fmt.Errorf( - "URL is not a valid GCS URL") - } - version := parts[2] - bucket := parts[3] - object := strings.Join(parts[4:], "/") - - url, err := url.Parse(fmt.Sprintf("https://www.googleapis.com/storage/%s/%s/%s", - version, bucket, object)) - if err != nil { - return "", false, fmt.Errorf("error parsing GCS URL: %s", err) - } - - return "gcs::" + url.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_git.go b/vendor/github.com/hashicorp/go-getter/detect_git.go deleted file mode 100644 index eeb8a04c5e..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_git.go +++ /dev/null @@ -1,26 +0,0 @@ -package getter - -// GitDetector implements Detector to detect Git SSH URLs such as -// git@host.com:dir1/dir2 and converts them to proper URLs. -type GitDetector struct{} - -func (d *GitDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - u, err := detectSSH(src) - if err != nil { - return "", true, err - } - if u == nil { - return "", false, nil - } - - // We require the username to be "git" to assume that this is a Git URL - if u.User.Username() != "git" { - return "", false, nil - } - - return "git::" + u.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go deleted file mode 100644 index 4bf4daf238..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_github.go +++ /dev/null @@ -1,47 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "strings" -) - -// GitHubDetector implements Detector to detect GitHub URLs and turn -// them into URLs that the Git Getter can understand. -type GitHubDetector struct{} - -func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "github.com/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 3 { - return "", false, fmt.Errorf( - "GitHub URLs should be github.com/username/repo") - } - - urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) - } - - if !strings.HasSuffix(url.Path, ".git") { - url.Path += ".git" - } - - if len(parts) > 3 { - url.Path += "//" + strings.Join(parts[3:], "/") - } - - return "git::" + url.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_gitlab.go b/vendor/github.com/hashicorp/go-getter/detect_gitlab.go deleted file mode 100644 index 9d1e8d83cd..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_gitlab.go +++ /dev/null @@ -1,47 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "strings" -) - -// GitLabDetector implements Detector to detect GitLab URLs and turn -// them into URLs that the Git Getter can understand. -type GitLabDetector struct{} - -func (d *GitLabDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "gitlab.com/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *GitLabDetector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 3 { - return "", false, fmt.Errorf( - "GitLab URLs should be gitlab.com/username/repo") - } - - urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) - repoUrl, err := url.Parse(urlStr) - if err != nil { - return "", true, fmt.Errorf("error parsing GitLab URL: %s", err) - } - - if !strings.HasSuffix(repoUrl.Path, ".git") { - repoUrl.Path += ".git" - } - - if len(parts) > 3 { - repoUrl.Path += "//" + strings.Join(parts[3:], "/") - } - - return "git::" + repoUrl.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go deleted file mode 100644 index 89f3c35dcf..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_s3.go +++ /dev/null @@ -1,73 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "strings" -) - -// S3Detector implements Detector to detect S3 URLs and turn -// them into URLs that the S3 getter can understand. -type S3Detector struct{} - -func (d *S3Detector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.Contains(src, ".amazonaws.com/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *S3Detector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 2 { - return "", false, fmt.Errorf( - "URL is not a valid S3 URL") - } - - hostParts := strings.Split(parts[0], ".") - if len(hostParts) == 3 { - return d.detectPathStyle(hostParts[0], parts[1:]) - } else if len(hostParts) == 4 { - return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) - } else if len(hostParts) == 5 && hostParts[1] == "s3" { - return d.detectNewVhostStyle(hostParts[2], hostParts[0], parts[1:]) - } else { - return "", false, fmt.Errorf( - "URL is not a valid S3 URL") - } -} - -func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { - urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", false, fmt.Errorf("error parsing S3 URL: %s", err) - } - - return "s3::" + url.String(), true, nil -} - -func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { - urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", false, fmt.Errorf("error parsing S3 URL: %s", err) - } - - return "s3::" + url.String(), true, nil -} - -func (d *S3Detector) detectNewVhostStyle(region, bucket string, parts []string) (string, bool, error) { - urlStr := fmt.Sprintf("https://s3.%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", false, fmt.Errorf("error parsing S3 URL: %s", err) - } - - return "s3::" + url.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/vendor/github.com/hashicorp/go-getter/detect_ssh.go deleted file mode 100644 index c0dbe9d475..0000000000 --- a/vendor/github.com/hashicorp/go-getter/detect_ssh.go +++ /dev/null @@ -1,49 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "regexp" - "strings" -) - -// Note that we do not have an SSH-getter currently so this file serves -// only to hold the detectSSH helper that is used by other detectors. - -// sshPattern matches SCP-like SSH patterns (user@host:path) -var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$") - -// detectSSH determines if the src string matches an SSH-like URL and -// converts it into a net.URL compatible string. This returns nil if the -// string doesn't match the SSH pattern. -// -// This function is tested indirectly via detect_git_test.go -func detectSSH(src string) (*url.URL, error) { - matched := sshPattern.FindStringSubmatch(src) - if matched == nil { - return nil, nil - } - - user := matched[1] - host := matched[2] - path := matched[3] - qidx := strings.Index(path, "?") - if qidx == -1 { - qidx = len(path) - } - - var u url.URL - u.Scheme = "ssh" - u.User = url.User(user) - u.Host = host - u.Path = path[0:qidx] - if qidx < len(path) { - q, err := url.ParseQuery(path[qidx+1:]) - if err != nil { - return nil, fmt.Errorf("error parsing GitHub SSH URL: %s", err) - } - u.RawQuery = q.Encode() - } - - return &u, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go deleted file mode 100644 index 647ccf4592..0000000000 --- a/vendor/github.com/hashicorp/go-getter/folder_storage.go +++ /dev/null @@ -1,65 +0,0 @@ -package getter - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "os" - "path/filepath" -) - -// FolderStorage is an implementation of the Storage interface that manages -// modules on the disk. -type FolderStorage struct { - // StorageDir is the directory where the modules will be stored. - StorageDir string -} - -// Dir implements Storage.Dir -func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { - d = s.dir(key) - _, err = os.Stat(d) - if err == nil { - // Directory exists - e = true - return - } - if os.IsNotExist(err) { - // Directory doesn't exist - d = "" - e = false - err = nil - return - } - - // An error - d = "" - e = false - return -} - -// Get implements Storage.Get -func (s *FolderStorage) Get(key string, source string, update bool) error { - dir := s.dir(key) - if !update { - if _, err := os.Stat(dir); err == nil { - // If the directory already exists, then we're done since - // we're not updating. - return nil - } else if !os.IsNotExist(err) { - // If the error we got wasn't a file-not-exist error, then - // something went wrong and we should report it. - return fmt.Errorf("Error reading module directory: %s", err) - } - } - - // Get the source. This always forces an update. - return Get(dir, source) -} - -// dir returns the directory name internally that we'll use to map to -// internally. -func (s *FolderStorage) dir(key string) string { - sum := md5.Sum([]byte(key)) - return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) -} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go deleted file mode 100644 index c233763c67..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ /dev/null @@ -1,152 +0,0 @@ -// getter is a package for downloading files or directories from a variety of -// protocols. -// -// getter is unique in its ability to download both directories and files. -// It also detects certain source strings to be protocol-specific URLs. For -// example, "github.com/hashicorp/go-getter" would turn into a Git URL and -// use the Git protocol. -// -// Protocols and detectors are extensible. -// -// To get started, see Client. -package getter - -import ( - "bytes" - "fmt" - "net/url" - "os/exec" - "regexp" - "syscall" - - cleanhttp "github.com/hashicorp/go-cleanhttp" -) - -// Getter defines the interface that schemes must implement to download -// things. -type Getter interface { - // Get downloads the given URL into the given directory. This always - // assumes that we're updating and gets the latest version that it can. - // - // The directory may already exist (if we're updating). If it is in a - // format that isn't understood, an error should be returned. Get shouldn't - // simply nuke the directory. - Get(string, *url.URL) error - - // GetFile downloads the give URL into the given path. The URL must - // reference a single file. If possible, the Getter should check if - // the remote end contains the same file and no-op this operation. - GetFile(string, *url.URL) error - - // ClientMode returns the mode based on the given URL. This is used to - // allow clients to let the getters decide which mode to use. - ClientMode(*url.URL) (ClientMode, error) - - // SetClient allows a getter to know it's client - // in order to access client's Get functions or - // progress tracking. - SetClient(*Client) -} - -// Getters is the mapping of scheme to the Getter implementation that will -// be used to get a dependency. -var Getters map[string]Getter - -// forcedRegexp is the regular expression that finds forced getters. This -// syntax is schema::url, example: git::https://foo.com -var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) - -// httpClient is the default client to be used by HttpGetters. -var httpClient = cleanhttp.DefaultClient() - -func init() { - httpGetter := &HttpGetter{ - Netrc: true, - } - - Getters = map[string]Getter{ - "file": new(FileGetter), - "git": new(GitGetter), - "gcs": new(GCSGetter), - "hg": new(HgGetter), - "s3": new(S3Getter), - "http": httpGetter, - "https": httpGetter, - } -} - -// Get downloads the directory specified by src into the folder specified by -// dst. If dst already exists, Get will attempt to update it. -// -// src is a URL, whereas dst is always just a file path to a folder. This -// folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string, opts ...ClientOption) error { - return (&Client{ - Src: src, - Dst: dst, - Dir: true, - Options: opts, - }).Get() -} - -// GetAny downloads a URL into the given destination. Unlike Get or -// GetFile, both directories and files are supported. -// -// dst must be a directory. If src is a file, it will be downloaded -// into dst with the basename of the URL. If src is a directory or -// archive, it will be unpacked directly into dst. -func GetAny(dst, src string, opts ...ClientOption) error { - return (&Client{ - Src: src, - Dst: dst, - Mode: ClientModeAny, - Options: opts, - }).Get() -} - -// GetFile downloads the file specified by src into the path specified by -// dst. -func GetFile(dst, src string, opts ...ClientOption) error { - return (&Client{ - Src: src, - Dst: dst, - Dir: false, - Options: opts, - }).Get() -} - -// getRunCommand is a helper that will run a command and capture the output -// in the case an error happens. -func getRunCommand(cmd *exec.Cmd) error { - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - err := cmd.Run() - if err == nil { - return nil - } - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return fmt.Errorf( - "%s exited with %d: %s", - cmd.Path, - status.ExitStatus(), - buf.String()) - } - } - - return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) -} - -// getForcedGetter takes a source and returns the tuple of the forced -// getter and the raw URL (without the force syntax). -func getForcedGetter(src string) (string, string) { - var forced string - if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { - forced = ms[1] - src = ms[2] - } - - return forced, src -} diff --git a/vendor/github.com/hashicorp/go-getter/get_base.go b/vendor/github.com/hashicorp/go-getter/get_base.go deleted file mode 100644 index 09e9b6313b..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_base.go +++ /dev/null @@ -1,20 +0,0 @@ -package getter - -import "context" - -// getter is our base getter; it regroups -// fields all getters have in common. -type getter struct { - client *Client -} - -func (g *getter) SetClient(c *Client) { g.client = c } - -// Context tries to returns the Contex from the getter's -// client. otherwise context.Background() is returned. -func (g *getter) Context() context.Context { - if g == nil || g.client == nil { - return context.Background() - } - return g.client.Ctx -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go deleted file mode 100644 index 78660839a0..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_file.go +++ /dev/null @@ -1,36 +0,0 @@ -package getter - -import ( - "net/url" - "os" -) - -// FileGetter is a Getter implementation that will download a module from -// a file scheme. -type FileGetter struct { - getter - - // Copy, if set to true, will copy data instead of using a symlink. If - // false, attempts to symlink to speed up the operation and to lower the - // disk space usage. If the symlink fails, may attempt to copy on windows. - Copy bool -} - -func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - fi, err := os.Stat(path) - if err != nil { - return 0, err - } - - // Check if the source is a directory. - if fi.IsDir() { - return ClientModeDir, nil - } - - return ClientModeFile, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/get_file_copy.go deleted file mode 100644 index d6145cbac1..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_file_copy.go +++ /dev/null @@ -1,90 +0,0 @@ -package getter - -import ( - "context" - "fmt" - "io" - "os" -) - -// readerFunc is syntactic sugar for read interface. -type readerFunc func(p []byte) (n int, err error) - -func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } - -// Copy is a io.Copy cancellable by context -func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) { - // Copy will call the Reader and Writer interface multiple time, in order - // to copy by chunk (avoiding loading the whole file in memory). - return io.Copy(dst, readerFunc(func(p []byte) (int, error) { - - select { - case <-ctx.Done(): - // context has been canceled - // stop process and propagate "context canceled" error - return 0, ctx.Err() - default: - // otherwise just run default io.Reader implementation - return src.Read(p) - } - })) -} - -// copyReader copies from an io.Reader into a file, using umask to create the dst file -func copyReader(dst string, src io.Reader, fmode, umask os.FileMode, fileSizeLimit int64) error { - dstF, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fmode) - if err != nil { - return err - } - defer dstF.Close() - - if fileSizeLimit > 0 { - src = io.LimitReader(src, fileSizeLimit) - } - - _, err = io.Copy(dstF, src) - if err != nil { - return err - } - - // Explicitly chmod; the process umask is unconditionally applied otherwise. - // We'll mask the mode with our own umask, but that may be different than - // the process umask - return os.Chmod(dst, mode(fmode, umask)) -} - -// copyFile copies a file in chunks from src path to dst path, using umask to create the dst file -func copyFile(ctx context.Context, dst, src string, disableSymlinks bool, fmode, umask os.FileMode) (int64, error) { - if disableSymlinks { - fileInfo, err := os.Lstat(src) - if err != nil { - return 0, fmt.Errorf("failed to check copy file source for symlinks: %w", err) - } - if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink { - return 0, ErrSymlinkCopy - } - } - - srcF, err := os.Open(src) - if err != nil { - return 0, err - } - defer srcF.Close() - - dstF, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fmode) - if err != nil { - return 0, err - } - defer dstF.Close() - - count, err := Copy(ctx, dstF, srcF) - if err != nil { - return 0, err - } - - // Explicitly chmod; the process umask is unconditionally applied otherwise. - // We'll mask the mode with our own umask, but that may be different than - // the process umask - err = os.Chmod(dst, mode(fmode, umask)) - return count, err -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go deleted file mode 100644 index a14a38263e..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ /dev/null @@ -1,99 +0,0 @@ -// +build !windows - -package getter - -import ( - "fmt" - "net/url" - "os" - "path/filepath" -) - -func (g *FileGetter) Get(dst string, u *url.URL) error { - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - return os.Symlink(path, dst) -} - -func (g *FileGetter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a file to be usable. - var fi os.FileInfo - var err error - if fi, err = os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if fi.IsDir() { - return fmt.Errorf("source path must be a file") - } - - _, err = os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err = os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - // If we're not copying, just symlink and we're done - if !g.Copy { - return os.Symlink(path, dst) - } - - var disableSymlinks bool - - if g.client != nil && g.client.DisableSymlinks { - disableSymlinks = true - } - - // Copy - _, err = copyFile(ctx, dst, path, disableSymlinks, fi.Mode(), g.client.umask()) - return err -} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go deleted file mode 100644 index 31146f5753..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go +++ /dev/null @@ -1,130 +0,0 @@ -// +build windows - -package getter - -import ( - "fmt" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - "syscall" -) - -func (g *FileGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - sourcePath := toBackslash(path) - - // Use mklink to create a junction point - output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) - } - - return nil -} - -func (g *FileGetter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - path := u.Path - if u.RawPath != "" { - path = u.RawPath - } - - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if fi.IsDir() { - return fmt.Errorf("source path must be a file") - } - - _, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - // If we're not copying, just symlink and we're done - if !g.Copy { - if err = os.Symlink(path, dst); err == nil { - return err - } - lerr, ok := err.(*os.LinkError) - if !ok { - return err - } - switch lerr.Err { - case syscall.ERROR_PRIVILEGE_NOT_HELD: - // no symlink privilege, let's - // fallback to a copy to avoid an error. - break - default: - return err - } - } - - var disableSymlinks bool - - if g.client != nil && g.client.DisableSymlinks { - disableSymlinks = true - } - - // Copy - _, err = copyFile(ctx, dst, path, disableSymlinks, 0666, g.client.umask()) - return err -} - -// toBackslash returns the result of replacing each slash character -// in path with a backslash ('\') character. Multiple separators are -// replaced by multiple backslashes. -func toBackslash(path string) string { - return strings.Replace(path, "/", "\\", -1) -} diff --git a/vendor/github.com/hashicorp/go-getter/get_gcs.go b/vendor/github.com/hashicorp/go-getter/get_gcs.go deleted file mode 100644 index 0c2f969950..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_gcs.go +++ /dev/null @@ -1,227 +0,0 @@ -package getter - -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "golang.org/x/oauth2" - "google.golang.org/api/option" - - "cloud.google.com/go/storage" - "google.golang.org/api/iterator" -) - -// GCSGetter is a Getter implementation that will download a module from -// a GCS bucket. -type GCSGetter struct { - getter - - // Timeout sets a deadline which all GCS operations should - // complete within. Zero value means no timeout. - Timeout time.Duration - - // FileSizeLimit limits the size of an single - // decompressed file. - // - // The zero value means no limit. - FileSizeLimit int64 -} - -func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - // Parse URL - bucket, object, _, err := g.parseURL(u) - if err != nil { - return 0, err - } - - client, err := g.getClient(ctx) - if err != nil { - return 0, err - } - iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) - for { - obj, err := iter.Next() - if err != nil && err != iterator.Done { - return 0, err - } - - if err == iterator.Done { - break - } - if strings.HasSuffix(obj.Name, "/") { - // A directory matched the prefix search, so this must be a directory - return ClientModeDir, nil - } else if obj.Name != object { - // A file matched the prefix search and doesn't have the same name - // as the query, so this must be a directory - return ClientModeDir, nil - } - } - // There are no directories or subdirectories, and if a match was returned, - // it was exactly equal to the prefix search. So return File mode - return ClientModeFile, nil -} - -func (g *GCSGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - // Parse URL - bucket, object, _, err := g.parseURL(u) - if err != nil { - return err - } - - // Remove destination if it already exists - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // Remove the destination - if err := os.RemoveAll(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - client, err := g.getClient(ctx) - if err != nil { - return err - } - - // Iterate through all matching objects. - iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) - for { - obj, err := iter.Next() - if err != nil && err != iterator.Done { - return err - } - if err == iterator.Done { - break - } - - if !strings.HasSuffix(obj.Name, "/") { - // Get the object destination path - objDst, err := filepath.Rel(object, obj.Name) - if err != nil { - return err - } - objDst = filepath.Join(dst, objDst) - // Download the matching object. - err = g.getObject(ctx, client, objDst, bucket, obj.Name, "") - if err != nil { - return err - } - } - } - return nil -} - -func (g *GCSGetter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - // Parse URL - bucket, object, fragment, err := g.parseURL(u) - if err != nil { - return err - } - - client, err := g.getClient(ctx) - if err != nil { - return err - } - return g.getObject(ctx, client, dst, bucket, object, fragment) -} - -func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object, fragment string) error { - var rc *storage.Reader - var err error - if fragment != "" { - generation, err := strconv.ParseInt(fragment, 10, 64) - if err != nil { - return err - } - rc, err = client.Bucket(bucket).Object(object).Generation(generation).NewReader(ctx) - } else { - rc, err = client.Bucket(bucket).Object(object).NewReader(ctx) - } - if err != nil { - return err - } - defer rc.Close() - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - // There is no limit set for the size of an object from GCS - return copyReader(dst, rc, 0666, g.client.umask(), 0) -} - -func (g *GCSGetter) parseURL(u *url.URL) (bucket, path, fragment string, err error) { - if strings.Contains(u.Host, "googleapis.com") { - hostParts := strings.Split(u.Host, ".") - if len(hostParts) != 3 { - err = fmt.Errorf("URL is not a valid GCS URL") - return - } - - pathParts := strings.SplitN(u.Path, "/", 5) - if len(pathParts) != 5 { - err = fmt.Errorf("URL is not a valid GCS URL") - return - } - bucket = pathParts[3] - path = pathParts[4] - fragment = u.Fragment - } - return -} - -func (g *GCSGetter) getClient(ctx context.Context) (client *storage.Client, err error) { - var opts []option.ClientOption - - if v, ok := os.LookupEnv("GOOGLE_OAUTH_ACCESS_TOKEN"); ok { - tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ - AccessToken: v, - }) - opts = append(opts, option.WithTokenSource(tokenSource)) - } - - newClient, err := storage.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - return newClient, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go deleted file mode 100644 index db89edef8b..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ /dev/null @@ -1,376 +0,0 @@ -package getter - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - urlhelper "github.com/hashicorp/go-getter/helper/url" - safetemp "github.com/hashicorp/go-safetemp" - version "github.com/hashicorp/go-version" -) - -// GitGetter is a Getter implementation that will download a module from -// a git repository. -type GitGetter struct { - getter - - // Timeout sets a deadline which all git CLI operations should - // complete within. Zero value means no timeout. - Timeout time.Duration -} - -var defaultBranchRegexp = regexp.MustCompile(`\s->\sorigin/(.*)`) -var lsRemoteSymRefRegexp = regexp.MustCompile(`ref: refs/heads/([^\s]+).*`) - -func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { - return ClientModeDir, nil -} - -func (g *GitGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - if _, err := exec.LookPath("git"); err != nil { - return fmt.Errorf("git must be available and on the PATH") - } - - // The port number must be parseable as an integer. If not, the user - // was probably trying to use a scp-style address, in which case the - // ssh:// prefix must be removed to indicate that. - // - // This is not necessary in versions of Go which have patched - // CVE-2019-14809 (e.g. Go 1.12.8+) - if portStr := u.Port(); portStr != "" { - if _, err := strconv.ParseUint(portStr, 10, 16); err != nil { - return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr) - } - } - - // Extract some query parameters we use - var ref, sshKey string - depth := 0 // 0 means "don't use shallow clone" - q := u.Query() - if len(q) > 0 { - ref = q.Get("ref") - q.Del("ref") - - sshKey = q.Get("sshkey") - q.Del("sshkey") - - if n, err := strconv.Atoi(q.Get("depth")); err == nil { - depth = n - } - q.Del("depth") - - // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() - } - - var sshKeyFile string - if sshKey != "" { - // Check that the git version is sufficiently new. - if err := checkGitVersion(ctx, "2.3"); err != nil { - return fmt.Errorf("Error using ssh key: %v", err) - } - - // We have an SSH key - decode it. - raw, err := base64.StdEncoding.DecodeString(sshKey) - if err != nil { - return err - } - - // Create a temp file for the key and ensure it is removed. - fh, err := ioutil.TempFile("", "go-getter") - if err != nil { - return err - } - sshKeyFile = fh.Name() - defer os.Remove(sshKeyFile) - - // Set the permissions prior to writing the key material. - if err := os.Chmod(sshKeyFile, 0600); err != nil { - return err - } - - // Write the raw key into the temp file. - _, err = fh.Write(raw) - fh.Close() - if err != nil { - return err - } - } - - // Clone or update the repository - _, err := os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - err = g.update(ctx, dst, sshKeyFile, ref, depth) - } else { - err = g.clone(ctx, dst, sshKeyFile, u, ref, depth) - } - if err != nil { - return err - } - - // Next: check out the proper tag/branch if it is specified, and checkout - if ref != "" { - if err := g.checkout(ctx, dst, ref); err != nil { - return err - } - } - - // Lastly, download any/all submodules. - return g.fetchSubmodules(ctx, dst, sshKeyFile, depth) -} - -// GetFile for Git doesn't support updating at this time. It will download -// the file every time. -func (g *GitGetter) GetFile(dst string, u *url.URL) error { - td, tdcloser, err := safetemp.Dir("", "getter") - if err != nil { - return err - } - defer tdcloser.Close() - - // Get the filename, and strip the filename from the URL so we can - // just get the repository directly. - filename := filepath.Base(u.Path) - u.Path = filepath.Dir(u.Path) - - // Get the full repository - if err := g.Get(td, u); err != nil { - return err - } - - // Copy the single file - u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) - if err != nil { - return err - } - - fg := &FileGetter{Copy: true} - return fg.GetFile(dst, u) -} - -func (g *GitGetter) checkout(ctx context.Context, dst string, ref string) error { - cmd := exec.CommandContext(ctx, "git", "checkout", ref) - cmd.Dir = dst - return getRunCommand(cmd) -} - -// gitCommitIDRegex is a pattern intended to match strings that seem -// "likely to be" git commit IDs, rather than named refs. This cannot be -// an exact decision because it's valid to name a branch or tag after a series -// of hexadecimal digits too. -// -// We require at least 7 digits here because that's the smallest size git -// itself will typically generate, and so it'll reduce the risk of false -// positives on short branch names that happen to also be "hex words". -var gitCommitIDRegex = regexp.MustCompile("^[0-9a-fA-F]{7,40}$") - -func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, ref string, depth int) error { - args := []string{"clone"} - - originalRef := ref // we handle an unspecified ref differently than explicitly selecting the default branch below - if ref == "" { - ref = findRemoteDefaultBranch(ctx, u) - } - if depth > 0 { - args = append(args, "--depth", strconv.Itoa(depth)) - args = append(args, "--branch", ref) - } - args = append(args, u.String(), dst) - - cmd := exec.CommandContext(ctx, "git", args...) - setupGitEnv(cmd, sshKeyFile) - err := getRunCommand(cmd) - if err != nil { - if depth > 0 && originalRef != "" { - // If we're creating a shallow clone then the given ref must be - // a named ref (branch or tag) rather than a commit directly. - // We can't accurately recognize the resulting error here without - // hard-coding assumptions about git's human-readable output, but - // we can at least try a heuristic. - if gitCommitIDRegex.MatchString(originalRef) { - return fmt.Errorf("%w (note that setting 'depth' requires 'ref' to be a branch or tag name)", err) - } - } - return err - } - - if depth < 1 && originalRef != "" { - // If we didn't add --depth and --branch above then we will now be - // on the remote repository's default branch, rather than the selected - // ref, so we'll need to fix that before we return. - return g.checkout(ctx, dst, originalRef) - } - return nil -} - -func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, depth int) error { - // Determine if we're a branch. If we're NOT a branch, then we just - // switch to master prior to checking out - cmd := exec.CommandContext(ctx, "git", "show-ref", "-q", "--verify", "refs/heads/"+ref) - cmd.Dir = dst - - if getRunCommand(cmd) != nil { - // Not a branch, switch to default branch. This will also catch - // non-existent branches, in which case we want to switch to default - // and then checkout the proper branch later. - ref = findDefaultBranch(ctx, dst) - } - - // We have to be on a branch to pull - if err := g.checkout(ctx, dst, ref); err != nil { - return err - } - - if depth > 0 { - cmd = exec.CommandContext(ctx, "git", "pull", "--depth", strconv.Itoa(depth), "--ff-only") - } else { - cmd = exec.CommandContext(ctx, "git", "pull", "--ff-only") - } - - cmd.Dir = dst - setupGitEnv(cmd, sshKeyFile) - return getRunCommand(cmd) -} - -// fetchSubmodules downloads any configured submodules recursively. -func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, depth int) error { - args := []string{"submodule", "update", "--init", "--recursive"} - if depth > 0 { - args = append(args, "--depth", strconv.Itoa(depth)) - } - cmd := exec.CommandContext(ctx, "git", args...) - cmd.Dir = dst - setupGitEnv(cmd, sshKeyFile) - return getRunCommand(cmd) -} - -// findDefaultBranch checks the repo's origin remote for its default branch -// (generally "master"). "master" is returned if an origin default branch -// can't be determined. -func findDefaultBranch(ctx context.Context, dst string) string { - var stdoutbuf bytes.Buffer - cmd := exec.CommandContext(ctx, "git", "branch", "-r", "--points-at", "refs/remotes/origin/HEAD") - cmd.Dir = dst - cmd.Stdout = &stdoutbuf - err := cmd.Run() - matches := defaultBranchRegexp.FindStringSubmatch(stdoutbuf.String()) - if err != nil || matches == nil { - return "master" - } - return matches[len(matches)-1] -} - -// findRemoteDefaultBranch checks the remote repo's HEAD symref to return the remote repo's -// default branch. "master" is returned if no HEAD symref exists. -func findRemoteDefaultBranch(ctx context.Context, u *url.URL) string { - var stdoutbuf bytes.Buffer - cmd := exec.CommandContext(ctx, "git", "ls-remote", "--symref", u.String(), "HEAD") - cmd.Stdout = &stdoutbuf - err := cmd.Run() - matches := lsRemoteSymRefRegexp.FindStringSubmatch(stdoutbuf.String()) - if err != nil || matches == nil { - return "master" - } - return matches[len(matches)-1] -} - -// setupGitEnv sets up the environment for the given command. This is used to -// pass configuration data to git and ssh and enables advanced cloning methods. -func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { - const gitSSHCommand = "GIT_SSH_COMMAND=" - var sshCmd []string - - // If we have an existing GIT_SSH_COMMAND, we need to append our options. - // We will also remove our old entry to make sure the behavior is the same - // with versions of Go < 1.9. - env := os.Environ() - for i, v := range env { - if strings.HasPrefix(v, gitSSHCommand) && len(v) > len(gitSSHCommand) { - sshCmd = []string{v} - - env[i], env[len(env)-1] = env[len(env)-1], env[i] - env = env[:len(env)-1] - break - } - } - - if len(sshCmd) == 0 { - sshCmd = []string{gitSSHCommand + "ssh"} - } - - if sshKeyFile != "" { - // We have an SSH key temp file configured, tell ssh about this. - if runtime.GOOS == "windows" { - sshKeyFile = strings.Replace(sshKeyFile, `\`, `/`, -1) - } - sshCmd = append(sshCmd, "-i", sshKeyFile) - } - - env = append(env, strings.Join(sshCmd, " ")) - cmd.Env = env -} - -// checkGitVersion is used to check the version of git installed on the system -// against a known minimum version. Returns an error if the installed version -// is older than the given minimum. -func checkGitVersion(ctx context.Context, min string) error { - want, err := version.NewVersion(min) - if err != nil { - return err - } - - out, err := exec.CommandContext(ctx, "git", "version").Output() - if err != nil { - return err - } - - fields := strings.Fields(string(out)) - if len(fields) < 3 { - return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) - } - v := fields[2] - if runtime.GOOS == "windows" && strings.Contains(v, ".windows.") { - // on windows, git version will return for example: - // git version 2.20.1.windows.1 - // Which does not follow the semantic versionning specs - // https://semver.org. We remove that part in order for - // go-version to not error. - v = v[:strings.Index(v, ".windows.")] - } - - have, err := version.NewVersion(v) - if err != nil { - return err - } - - if have.LessThan(want) { - return fmt.Errorf("Required git version = %s, have %s", want, have) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go deleted file mode 100644 index afa3bde818..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ /dev/null @@ -1,147 +0,0 @@ -package getter - -import ( - "context" - "fmt" - "net/url" - "os" - "os/exec" - "path/filepath" - "runtime" - "time" - - urlhelper "github.com/hashicorp/go-getter/helper/url" - safetemp "github.com/hashicorp/go-safetemp" -) - -// HgGetter is a Getter implementation that will download a module from -// a Mercurial repository. -type HgGetter struct { - getter - - // Timeout sets a deadline which all hg CLI operations should - // complete within. Zero value means no timeout. - Timeout time.Duration -} - -func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { - return ClientModeDir, nil -} - -func (g *HgGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - if _, err := exec.LookPath("hg"); err != nil { - return fmt.Errorf("hg must be available and on the PATH") - } - - newURL, err := urlhelper.Parse(u.String()) - if err != nil { - return err - } - if fixWindowsDrivePath(newURL) { - // See valid file path form on http://www.selenic.com/hg/help/urls - newURL.Path = fmt.Sprintf("/%s", newURL.Path) - } - - // Extract some query parameters we use - var rev string - q := newURL.Query() - if len(q) > 0 { - rev = q.Get("rev") - q.Del("rev") - - newURL.RawQuery = q.Encode() - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err != nil { - if err := g.clone(ctx, dst, newURL); err != nil { - return err - } - } - - if err := g.pull(ctx, dst, newURL); err != nil { - return err - } - - return g.update(ctx, dst, newURL, rev) -} - -// GetFile for Hg doesn't support updating at this time. It will download -// the file every time. -func (g *HgGetter) GetFile(dst string, u *url.URL) error { - // Create a temporary directory to store the full source. This has to be - // a non-existent directory. - td, tdcloser, err := safetemp.Dir("", "getter") - if err != nil { - return err - } - defer tdcloser.Close() - - // Get the filename, and strip the filename from the URL so we can - // just get the repository directly. - filename := filepath.Base(u.Path) - u.Path = filepath.ToSlash(filepath.Dir(u.Path)) - - // If we're on Windows, we need to set the host to "localhost" for hg - if runtime.GOOS == "windows" { - u.Host = "localhost" - } - - // Get the full repository - if err := g.Get(td, u); err != nil { - return err - } - - // Copy the single file - u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) - if err != nil { - return err - } - - fg := &FileGetter{Copy: true, getter: g.getter} - return fg.GetFile(dst, u) -} - -func (g *HgGetter) clone(ctx context.Context, dst string, u *url.URL) error { - cmd := exec.CommandContext(ctx, "hg", "clone", "-U", "--", u.String(), dst) - return getRunCommand(cmd) -} - -func (g *HgGetter) pull(ctx context.Context, dst string, u *url.URL) error { - cmd := exec.CommandContext(ctx, "hg", "pull") - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *HgGetter) update(ctx context.Context, dst string, u *url.URL, rev string) error { - args := []string{"update"} - if rev != "" { - args = append(args, "--", rev) - } - - cmd := exec.CommandContext(ctx, "hg", args...) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func fixWindowsDrivePath(u *url.URL) bool { - // hg assumes a file:/// prefix for Windows drive letter file paths. - // (e.g. file:///c:/foo/bar) - // If the URL Path does not begin with a '/' character, the resulting URL - // path will have a file:// prefix. (e.g. file://c:/foo/bar) - // See http://www.selenic.com/hg/help/urls and the examples listed in - // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 - return runtime.GOOS == "windows" && u.Scheme == "file" && - len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' -} diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go deleted file mode 100644 index a4d273f14f..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ /dev/null @@ -1,622 +0,0 @@ -package getter - -import ( - "context" - "crypto/tls" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" - safetemp "github.com/hashicorp/go-safetemp" -) - -// HttpGetter is a Getter implementation that will download from an HTTP -// endpoint. -// -// For file downloads, HTTP is used directly. -// -// The protocol for downloading a directory from an HTTP endpoint is as follows: -// -// An HTTP GET request is made to the URL with the additional GET parameter -// "terraform-get=1". This lets you handle that scenario specially if you -// wish. The response must be a 2xx. -// -// First, a header is looked for "X-Terraform-Get" which should contain -// a source URL to download. This source must use one of the configured -// protocols and getters for the client, or "http"/"https" if using -// the HttpGetter directly. -// -// If the header is not present, then a meta tag is searched for named -// "terraform-get" and the content should be a source URL. -// -// The source URL, whether from the header or meta tag, must be a fully -// formed URL. The shorthand syntax of "github.com/foo/bar" or relative -// paths are not allowed. -type HttpGetter struct { - getter - - // Netrc, if true, will lookup and use auth information found - // in the user's netrc file if available. - Netrc bool - - // Client is the http.Client to use for Get requests. - // This defaults to a cleanhttp.DefaultClient if left unset. - Client *http.Client - - // Header contains optional request header fields that should be included - // with every HTTP request. Note that the zero value of this field is nil, - // and as such it needs to be initialized before use, via something like - // make(http.Header). - Header http.Header - - // DoNotCheckHeadFirst configures the client to NOT check if the server - // supports HEAD requests. - DoNotCheckHeadFirst bool - - // HeadFirstTimeout configures the client to enforce a timeout when - // the server supports HEAD requests. - // - // The zero value means no timeout. - HeadFirstTimeout time.Duration - - // ReadTimeout configures the client to enforce a timeout when - // making a request to an HTTP server and reading its response body. - // - // The zero value means no timeout. - ReadTimeout time.Duration - - // MaxBytes limits the number of bytes that will be ready from an HTTP - // response body returned from a server. The zero value means no limit. - MaxBytes int64 - - // XTerraformGetLimit configures how many times the client with follow - // the " X-Terraform-Get" header value. - // - // The zero value means no limit. - XTerraformGetLimit int - - // XTerraformGetDisabled disables the client's usage of the "X-Terraform-Get" - // header value. - XTerraformGetDisabled bool -} - -func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { - if strings.HasSuffix(u.Path, "/") { - return ClientModeDir, nil - } - return ClientModeFile, nil -} - -type contextKey int - -const ( - xTerraformGetDisable contextKey = 0 - xTerraformGetLimit contextKey = 1 - xTerraformGetLimitCurrentValue contextKey = 2 - httpClientValue contextKey = 3 - httpMaxBytesValue contextKey = 4 -) - -func xTerraformGetDisabled(ctx context.Context) bool { - value, ok := ctx.Value(xTerraformGetDisable).(bool) - if !ok { - return false - } - return value -} - -func xTerraformGetLimitCurrentValueFromContext(ctx context.Context) int { - value, ok := ctx.Value(xTerraformGetLimitCurrentValue).(int) - if !ok { - return 1 - } - return value -} - -func xTerraformGetLimiConfiguredtFromContext(ctx context.Context) int { - value, ok := ctx.Value(xTerraformGetLimit).(int) - if !ok { - return 0 - } - return value -} - -func httpClientFromContext(ctx context.Context) *http.Client { - value, ok := ctx.Value(httpClientValue).(*http.Client) - if !ok { - return nil - } - return value -} - -func httpMaxBytesFromContext(ctx context.Context) int64 { - value, ok := ctx.Value(httpMaxBytesValue).(int64) - if !ok { - return 0 // no limit - } - return value -} - -type limitedWrappedReaderCloser struct { - underlying io.Reader - closeFn func() error -} - -func (l *limitedWrappedReaderCloser) Read(p []byte) (n int, err error) { - return l.underlying.Read(p) -} - -func (l *limitedWrappedReaderCloser) Close() (err error) { - return l.closeFn() -} - -func newLimitedWrappedReaderCloser(r io.ReadCloser, limit int64) io.ReadCloser { - return &limitedWrappedReaderCloser{ - underlying: io.LimitReader(r, limit), - closeFn: r.Close, - } -} - -func (g *HttpGetter) Get(dst string, u *url.URL) error { - ctx := g.Context() - - // Optionally disable any X-Terraform-Get redirects. This is reccomended for usage of - // this client outside of Terraform's. This feature is likely not required if the - // source server can provider normal HTTP redirects. - if g.XTerraformGetDisabled { - ctx = context.WithValue(ctx, xTerraformGetDisable, g.XTerraformGetDisabled) - } - - // Optionally enforce a limit on X-Terraform-Get redirects. We check this for every - // invocation of this function, because the value is not passed down to subsequent - // client Get function invocations. - if g.XTerraformGetLimit > 0 { - ctx = context.WithValue(ctx, xTerraformGetLimit, g.XTerraformGetLimit) - } - - // If there was a limit on X-Terraform-Get redirects, check what the current count value. - // - // If the value is greater than the limit, return an error. Otherwise, increment the value, - // and include it in the the context to be passed along in all the subsequent client - // Get function invocations. - if limit := xTerraformGetLimiConfiguredtFromContext(ctx); limit > 0 { - currentValue := xTerraformGetLimitCurrentValueFromContext(ctx) - - if currentValue > limit { - return fmt.Errorf("too many X-Terraform-Get redirects: %d", currentValue) - } - - currentValue++ - - ctx = context.WithValue(ctx, xTerraformGetLimitCurrentValue, currentValue) - } - - // Optionally enforce a maxiumum HTTP response body size. - if g.MaxBytes > 0 { - ctx = context.WithValue(ctx, httpMaxBytesValue, g.MaxBytes) - } - - // Copy the URL so we can modify it - var newU url.URL = *u - u = &newU - - if g.Netrc { - // Add auth from netrc if we can - if err := addAuthFromNetrc(u); err != nil { - return err - } - } - - // If the HTTP client is nil, check if there is one available in the context, - // otherwise create one using cleanhttp's default transport. - if g.Client == nil { - if client := httpClientFromContext(ctx); client != nil { - g.Client = client - } else { - client := httpClient - if g.client != nil && g.client.Insecure { - insecureTransport := cleanhttp.DefaultTransport() - insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client.Transport = insecureTransport - } - g.Client = client - } - } - - // Pass along the configured HTTP client in the context for usage with the X-Terraform-Get feature. - ctx = context.WithValue(ctx, httpClientValue, g.Client) - - // Add terraform-get to the parameter. - q := u.Query() - q.Add("terraform-get", "1") - u.RawQuery = q.Encode() - - readCtx := ctx - - if g.ReadTimeout > 0 { - var cancel context.CancelFunc - readCtx, cancel = context.WithTimeout(ctx, g.ReadTimeout) - defer cancel() - } - - // Get the URL - req, err := http.NewRequestWithContext(readCtx, "GET", u.String(), nil) - if err != nil { - return err - } - - if g.Header != nil { - req.Header = g.Header.Clone() - } - - resp, err := g.Client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - body := resp.Body - - if maxBytes := httpMaxBytesFromContext(ctx); maxBytes > 0 { - body = newLimitedWrappedReaderCloser(body, maxBytes) - } - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) - } - - if disabled := xTerraformGetDisabled(ctx); disabled { - return nil - } - - // Extract the source URL, - var source string - if v := resp.Header.Get("X-Terraform-Get"); v != "" { - source = v - } else { - source, err = g.parseMeta(readCtx, body) - if err != nil { - return err - } - } - if source == "" { - return fmt.Errorf("no source URL was returned") - } - - // If there is a subdir component, then we download the root separately - // into a temporary directory, then copy over the proper subdir. - source, subDir := SourceDirSubdir(source) - - var opts []ClientOption - - // Check if the protocol was switched to one which was not configured. - if g.client != nil && g.client.Getters != nil { - // We must first use the Detectors provided, because `X-Terraform-Get does - // not necessarily return a valid URL. We can replace the source string - // here, since the detectors would have been called immediately during the - // next Get anyway. - source, err = Detect(source, g.client.Pwd, g.client.Detectors) - if err != nil { - return err - } - - protocol := "" - // X-Terraform-Get allows paths relative to the previous request too, - // which won't have a protocol. - if !relativeGet(source) { - protocol = strings.Split(source, ":")[0] - } - - // Otherwise, all default getters are allowed. - if protocol != "" { - _, allowed := g.client.Getters[protocol] - if !allowed { - return fmt.Errorf("no getter available for X-Terraform-Get source protocol: %q", protocol) - } - } - } - - // Add any getter client options. - if g.client != nil { - opts = g.client.Options - } - - // If the client is nil, we know we're using the HttpGetter directly. In - // this case, we don't know exactly which protocols are configured, but we - // can make a good guess. - // - // This prevents all default getters from being allowed when only using the - // HttpGetter directly. To enable protocol switching, a client "wrapper" must - // be used. - if g.client == nil { - switch { - case subDir != "": - // If there's a subdirectory, we will also need a file getter to - // unpack it. - opts = append(opts, WithGetters(map[string]Getter{ - "file": new(FileGetter), - "http": g, - "https": g, - })) - default: - opts = append(opts, WithGetters(map[string]Getter{ - "http": g, - "https": g, - })) - } - } - - // Ensure we pass along the context we constructed in this function. - // - // This is especially important to enforce a limit on X-Terraform-Get redirects - // which could be setup, if configured, at the top of this function. - opts = append(opts, WithContext(ctx)) - - if subDir != "" { - // We have a subdir, time to jump some hoops - return g.getSubdir(ctx, dst, source, subDir, opts...) - } - - // Note: this allows the protocol to be switched to another configured getters. - return Get(dst, source, opts...) -} - -// GetFile fetches the file from src and stores it at dst. -// If the server supports Accept-Range, HttpGetter will attempt a range -// request. This means it is the caller's responsibility to ensure that an -// older version of the destination file does not exist, else it will be either -// falsely identified as being replaced, or corrupted with extra bytes -// appended. -func (g *HttpGetter) GetFile(dst string, src *url.URL) error { - ctx := g.Context() - - // Optionally enforce a maxiumum HTTP response body size. - if g.MaxBytes > 0 { - ctx = context.WithValue(ctx, httpMaxBytesValue, g.MaxBytes) - } - - if g.Netrc { - // Add auth from netrc if we can - if err := addAuthFromNetrc(src); err != nil { - return err - } - } - // Create all the parent directories if needed - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, g.client.mode(0666)) - if err != nil { - return err - } - defer f.Close() - - if g.Client == nil { - g.Client = httpClient - if g.client != nil && g.client.Insecure { - insecureTransport := cleanhttp.DefaultTransport() - insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - g.Client.Transport = insecureTransport - } - } - - var ( - currentFileSize int64 - req *http.Request - ) - - if !g.DoNotCheckHeadFirst { - headCtx := ctx - - if g.HeadFirstTimeout > 0 { - var cancel context.CancelFunc - - headCtx, cancel = context.WithTimeout(ctx, g.HeadFirstTimeout) - defer cancel() - } - - // We first make a HEAD request so we can check - // if the server supports range queries. If the server/URL doesn't - // support HEAD requests, we just fall back to GET. - req, err = http.NewRequestWithContext(headCtx, "HEAD", src.String(), nil) - if err != nil { - return err - } - if g.Header != nil { - req.Header = g.Header.Clone() - } - headResp, err := g.Client.Do(req) - if err == nil { - headResp.Body.Close() - if headResp.StatusCode == 200 { - // If the HEAD request succeeded, then attempt to set the range - // query if we can. - if headResp.Header.Get("Accept-Ranges") == "bytes" && headResp.ContentLength >= 0 { - if fi, err := f.Stat(); err == nil { - if _, err = f.Seek(0, io.SeekEnd); err == nil { - currentFileSize = fi.Size() - if currentFileSize >= headResp.ContentLength { - // file already present - return nil - } - } - } - } - } - } - } - - readCtx := ctx - - if g.ReadTimeout > 0 { - var cancel context.CancelFunc - readCtx, cancel = context.WithTimeout(ctx, g.ReadTimeout) - defer cancel() - } - - req, err = http.NewRequestWithContext(readCtx, "GET", src.String(), nil) - if err != nil { - return err - } - if g.Header != nil { - req.Header = g.Header.Clone() - } - if currentFileSize > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", currentFileSize)) - } - - resp, err := g.Client.Do(req) - if err != nil { - return err - } - switch resp.StatusCode { - case http.StatusOK, http.StatusPartialContent: - // all good - default: - resp.Body.Close() - return fmt.Errorf("bad response code: %d", resp.StatusCode) - } - - body := resp.Body - - if maxBytes := httpMaxBytesFromContext(ctx); maxBytes > 0 { - body = newLimitedWrappedReaderCloser(body, maxBytes) - } - - if g.client != nil && g.client.ProgressListener != nil { - // track download - fn := filepath.Base(src.EscapedPath()) - body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) - } - defer body.Close() - - n, err := Copy(readCtx, f, body) - if err == nil && n < resp.ContentLength { - err = io.ErrShortWrite - } - return err -} - -// getSubdir downloads the source into the destination, but with -// the proper subdir. -func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string, opts ...ClientOption) error { - // Create a temporary directory to store the full source. This has to be - // a non-existent directory. - td, tdcloser, err := safetemp.Dir("", "getter") - if err != nil { - return err - } - defer tdcloser.Close() - - // Download that into the given directory - if err := Get(td, source, opts...); err != nil { - return err - } - - // Process any globbing - sourcePath, err := SubdirGlob(td, subDir) - if err != nil { - return err - } - - // Make sure the subdir path actually exists - if _, err := os.Stat(sourcePath); err != nil { - return fmt.Errorf( - "Error downloading %s: %s", source, err) - } - - // Copy the subdirectory into our actual destination. - if err := os.RemoveAll(dst); err != nil { - return err - } - - // Make the final destination - if err := os.MkdirAll(dst, g.client.mode(0755)); err != nil { - return err - } - - var disableSymlinks bool - - if g.client != nil && g.client.DisableSymlinks { - disableSymlinks = true - } - - return copyDir(ctx, dst, sourcePath, false, disableSymlinks, g.client.umask()) -} - -// parseMeta looks for the first meta tag in the given reader that -// will give us the source URL. -func (g *HttpGetter) parseMeta(ctx context.Context, r io.Reader) (string, error) { - d := xml.NewDecoder(r) - d.CharsetReader = charsetReader - d.Strict = false - var err error - var t xml.Token - for { - if ctx.Err() != nil { - return "", fmt.Errorf("context error while parsing meta tag: %w", ctx.Err()) - } - - t, err = d.Token() - if err != nil { - if err == io.EOF { - err = nil - } - return "", err - } - if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { - return "", nil - } - if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { - return "", nil - } - e, ok := t.(xml.StartElement) - if !ok || !strings.EqualFold(e.Name.Local, "meta") { - continue - } - if attrValue(e.Attr, "name") != "terraform-get" { - continue - } - if f := attrValue(e.Attr, "content"); f != "" { - return f, nil - } - } -} - -// X-Terraform-Get allows paths relative to the previous request -var relativeGet = regexp.MustCompile(`^\.{0,2}/`).MatchString - -// attrValue returns the attribute value for the case-insensitive key -// `name', or the empty string if nothing is found. -func attrValue(attrs []xml.Attr, name string) string { - for _, a := range attrs { - if strings.EqualFold(a.Name.Local, name) { - return a.Value - } - } - return "" -} - -// charsetReader returns a reader for the given charset. Currently -// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful -// error which is printed by go get, so the user can find why the package -// wasn't downloaded if the encoding is not supported. Note that, in -// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters -// greater than 0x7f are not rejected). -func charsetReader(charset string, input io.Reader) (io.Reader, error) { - switch strings.ToLower(charset) { - case "ascii": - return input, nil - default: - return nil, fmt.Errorf("can't decode XML document using charset %q", charset) - } -} diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go deleted file mode 100644 index e2a98ea284..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_mock.go +++ /dev/null @@ -1,54 +0,0 @@ -package getter - -import ( - "net/url" -) - -// MockGetter is an implementation of Getter that can be used for tests. -type MockGetter struct { - getter - - // Proxy, if set, will be called after recording the calls below. - // If it isn't set, then the *Err values will be returned. - Proxy Getter - - GetCalled bool - GetDst string - GetURL *url.URL - GetErr error - - GetFileCalled bool - GetFileDst string - GetFileURL *url.URL - GetFileErr error -} - -func (g *MockGetter) Get(dst string, u *url.URL) error { - g.GetCalled = true - g.GetDst = dst - g.GetURL = u - - if g.Proxy != nil { - return g.Proxy.Get(dst, u) - } - - return g.GetErr -} - -func (g *MockGetter) GetFile(dst string, u *url.URL) error { - g.GetFileCalled = true - g.GetFileDst = dst - g.GetFileURL = u - - if g.Proxy != nil { - return g.Proxy.GetFile(dst, u) - } - return g.GetFileErr -} - -func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { - if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { - return ClientModeDir, nil - } - return ClientModeFile, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go deleted file mode 100644 index 94291947c0..0000000000 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ /dev/null @@ -1,347 +0,0 @@ -package getter - -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" -) - -// S3Getter is a Getter implementation that will download a module from -// a S3 bucket. -type S3Getter struct { - getter - - // Timeout sets a deadline which all S3 operations should - // complete within. - // - // The zero value means timeout. - Timeout time.Duration -} - -func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { - // Parse URL - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - region, bucket, path, _, creds, err := g.parseUrl(u) - if err != nil { - return 0, err - } - - // Create client config - client, err := g.newS3Client(region, u, creds) - if err != nil { - return 0, err - } - - // List the object(s) at the given prefix - req := &s3.ListObjectsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(path), - } - resp, err := client.ListObjectsWithContext(ctx, req) - if err != nil { - return 0, err - } - - for _, o := range resp.Contents { - // Use file mode on exact match. - if *o.Key == path { - return ClientModeFile, nil - } - - // Use dir mode if child keys are found. - if strings.HasPrefix(*o.Key, path+"/") { - return ClientModeDir, nil - } - } - - // There was no match, so just return file mode. The download is going - // to fail but we will let S3 return the proper error later. - return ClientModeFile, nil -} - -func (g *S3Getter) Get(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - // Parse URL - region, bucket, path, _, creds, err := g.parseUrl(u) - if err != nil { - return err - } - - // Remove destination if it already exists - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - if err == nil { - // Remove the destination - if err := os.RemoveAll(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - client, err := g.newS3Client(region, u, creds) - if err != nil { - return err - } - - // List files in path, keep listing until no more objects are found - lastMarker := "" - hasMore := true - for hasMore { - req := &s3.ListObjectsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(path), - } - if lastMarker != "" { - req.Marker = aws.String(lastMarker) - } - - resp, err := client.ListObjectsWithContext(ctx, req) - if err != nil { - return err - } - - hasMore = aws.BoolValue(resp.IsTruncated) - - // Get each object storing each file relative to the destination path - for _, object := range resp.Contents { - lastMarker = aws.StringValue(object.Key) - objPath := aws.StringValue(object.Key) - - // If the key ends with a backslash assume it is a directory and ignore - if strings.HasSuffix(objPath, "/") { - continue - } - - // Get the object destination path - objDst, err := filepath.Rel(path, objPath) - if err != nil { - return err - } - objDst = filepath.Join(dst, objDst) - - if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { - return err - } - } - } - - return nil -} - -func (g *S3Getter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - region, bucket, path, version, creds, err := g.parseUrl(u) - if err != nil { - return err - } - - client, err := g.newS3Client(region, u, creds) - if err != nil { - return err - } - - return g.getObject(ctx, client, dst, bucket, path, version) -} - -func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { - req := &s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - if version != "" { - req.VersionId = aws.String(version) - } - - resp, err := client.GetObjectWithContext(ctx, req) - if err != nil { - return err - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - body := resp.Body - - if g.client != nil && g.client.ProgressListener != nil { - fn := filepath.Base(key) - body = g.client.ProgressListener.TrackProgress(fn, 0, *resp.ContentLength, resp.Body) - } - defer body.Close() - - // There is no limit set for the size of an object from S3 - return copyReader(dst, body, 0666, g.client.umask(), 0) -} - -func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { - conf := &aws.Config{} - metadataURLOverride := os.Getenv("AWS_METADATA_URL") - if creds == nil && metadataURLOverride != "" { - creds = credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(session.New(&aws.Config{ - Endpoint: aws.String(metadataURLOverride), - })), - }, - }) - } - - if creds != nil { - conf.Endpoint = &url.Host - conf.S3ForcePathStyle = aws.Bool(true) - if url.Scheme == "http" { - conf.DisableSSL = aws.Bool(true) - } - } - - conf.Credentials = creds - if region != "" { - conf.Region = aws.String(region) - } - - return conf.WithCredentialsChainVerboseErrors(true) -} - -func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // This just check whether we are dealing with S3 or - // any other S3 compliant service. S3 has a predictable - // url as others do not - if strings.Contains(u.Host, "amazonaws.com") { - // Amazon S3 supports both virtual-hosted–style and path-style URLs to access a bucket, although path-style is deprecated - // In both cases few older regions supports dash-style region indication (s3-Region) even if AWS discourages their use. - // The same bucket could be reached with: - // bucket.s3.region.amazonaws.com/path - // bucket.s3-region.amazonaws.com/path - // s3.amazonaws.com/bucket/path - // s3-region.amazonaws.com/bucket/path - - hostParts := strings.Split(u.Host, ".") - switch len(hostParts) { - // path-style - case 3: - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } - pathParts := strings.SplitN(u.Path, "/", 3) - bucket = pathParts[1] - path = pathParts[2] - // vhost-style, dash region indication - case 4: - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[1], "s3-"), "s3") - if region == "" { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } - pathParts := strings.SplitN(u.Path, "/", 2) - bucket = hostParts[0] - path = pathParts[1] - //vhost-style, dot region indication - case 5: - region = hostParts[2] - pathParts := strings.SplitN(u.Path, "/", 2) - bucket = hostParts[0] - path = pathParts[1] - - } - if len(hostParts) < 3 && len(hostParts) > 5 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } - version = u.Query().Get("version") - - } else { - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 compliant URL") - return - } - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") - region = u.Query().Get("region") - if region == "" { - region = "us-east-1" - } - } - - _, hasAwsId := u.Query()["aws_access_key_id"] - _, hasAwsSecret := u.Query()["aws_access_key_secret"] - _, hasAwsToken := u.Query()["aws_access_token"] - if hasAwsId || hasAwsSecret || hasAwsToken { - creds = credentials.NewStaticCredentials( - u.Query().Get("aws_access_key_id"), - u.Query().Get("aws_access_key_secret"), - u.Query().Get("aws_access_token"), - ) - } - - return -} - -func (g *S3Getter) newS3Client( - region string, url *url.URL, creds *credentials.Credentials, -) (*s3.S3, error) { - var sess *session.Session - - if profile := url.Query().Get("aws_profile"); profile != "" { - var err error - sess, err = session.NewSessionWithOptions(session.Options{ - Profile: profile, - SharedConfigState: session.SharedConfigEnable, - }) - if err != nil { - return nil, err - } - } else { - config := g.getAWSConfig(region, url, creds) - sess = session.New(config) - } - - return s3.New(sess), nil -} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go deleted file mode 100644 index 02497c2543..0000000000 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url.go +++ /dev/null @@ -1,14 +0,0 @@ -package url - -import ( - "net/url" -) - -// Parse parses rawURL into a URL structure. -// The rawURL may be relative or absolute. -// -// Parse is a wrapper for the Go stdlib net/url Parse function, but returns -// Windows "safe" URLs on Windows platforms. -func Parse(rawURL string) (*url.URL, error) { - return parse(rawURL) -} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go deleted file mode 100644 index ed1352a917..0000000000 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package url - -import ( - "net/url" -) - -func parse(rawURL string) (*url.URL, error) { - return url.Parse(rawURL) -} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go deleted file mode 100644 index 4280ec59a8..0000000000 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go +++ /dev/null @@ -1,39 +0,0 @@ -package url - -import ( - "fmt" - "net/url" - "path/filepath" - "strings" -) - -func parse(rawURL string) (*url.URL, error) { - // Make sure we're using "/" since URLs are "/"-based. - rawURL = filepath.ToSlash(rawURL) - - if len(rawURL) > 1 && rawURL[1] == ':' { - // Assume we're dealing with a drive letter. In which case we - // force the 'file' scheme to avoid "net/url" URL.String() prepending - // our url with "./". - rawURL = "file://" + rawURL - } - - u, err := url.Parse(rawURL) - if err != nil { - return nil, err - } - - if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { - // Assume we're dealing with a drive letter file path where the drive - // letter has been parsed into the URL Host. - u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) - u.Host = "" - } - - // Remove leading slash for absolute file paths. - if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { - u.Path = u.Path[1:] - } - - return u, err -} diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go deleted file mode 100644 index c7f6a3fb3f..0000000000 --- a/vendor/github.com/hashicorp/go-getter/netrc.go +++ /dev/null @@ -1,67 +0,0 @@ -package getter - -import ( - "fmt" - "net/url" - "os" - "runtime" - - "github.com/bgentry/go-netrc/netrc" - "github.com/mitchellh/go-homedir" -) - -// addAuthFromNetrc adds auth information to the URL from the user's -// netrc file if it can be found. This will only add the auth info -// if the URL doesn't already have auth info specified and the -// the username is blank. -func addAuthFromNetrc(u *url.URL) error { - // If the URL already has auth information, do nothing - if u.User != nil && u.User.Username() != "" { - return nil - } - - // Get the netrc file path - path := os.Getenv("NETRC") - if path == "" { - filename := ".netrc" - if runtime.GOOS == "windows" { - filename = "_netrc" - } - - var err error - path, err = homedir.Expand("~/" + filename) - if err != nil { - return err - } - } - - // If the file is not a file, then do nothing - if fi, err := os.Stat(path); err != nil { - // File doesn't exist, do nothing - if os.IsNotExist(err) { - return nil - } - - // Some other error! - return err - } else if fi.IsDir() { - // File is directory, ignore - return nil - } - - // Load up the netrc file - net, err := netrc.ParseFile(path) - if err != nil { - return fmt.Errorf("Error parsing netrc file at %q: %s", path, err) - } - - machine := net.FindMachine(u.Host) - if machine == nil { - // Machine not found, no problem - return nil - } - - // Set the user info - u.User = url.UserPassword(machine.Login, machine.Password) - return nil -} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go deleted file mode 100644 index 48ac9234ed..0000000000 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ /dev/null @@ -1,77 +0,0 @@ -package getter - -import ( - "fmt" - "path/filepath" - "strings" -) - -// SourceDirSubdir takes a source URL and returns a tuple of the URL without -// the subdir and the subdir. -// -// ex: -// dom.com/path/?q=p => dom.com/path/?q=p, "" -// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" -// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" -// -func SourceDirSubdir(src string) (string, string) { - - // URL might contains another url in query parameters - stop := len(src) - if idx := strings.Index(src, "?"); idx > -1 { - stop = idx - } - - // Calculate an offset to avoid accidentally marking the scheme - // as the dir. - var offset int - if idx := strings.Index(src[:stop], "://"); idx > -1 { - offset = idx + 3 - } - - // First see if we even have an explicit subdir - idx := strings.Index(src[offset:stop], "//") - if idx == -1 { - return src, "" - } - - idx += offset - subdir := src[idx+2:] - src = src[:idx] - - // Next, check if we have query parameters and push them onto the - // URL. - if idx = strings.Index(subdir, "?"); idx > -1 { - query := subdir[idx:] - subdir = subdir[:idx] - src += query - } - - return src, subdir -} - -// SubdirGlob returns the actual subdir with globbing processed. -// -// dst should be a destination directory that is already populated (the -// download is complete) and subDir should be the set subDir. If subDir -// is an empty string, this returns an empty string. -// -// The returned path is the full absolute path. -func SubdirGlob(dst, subDir string) (string, error) { - pattern := filepath.Join(dst, subDir) - - matches, err := filepath.Glob(pattern) - if err != nil { - return "", err - } - - if len(matches) == 0 { - return "", fmt.Errorf("subdir %q not found", subDir) - } - - if len(matches) > 1 { - return "", fmt.Errorf("subdir %q matches multiple paths", subDir) - } - - return matches[0], nil -} diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go deleted file mode 100644 index 2bc6b9ec33..0000000000 --- a/vendor/github.com/hashicorp/go-getter/storage.go +++ /dev/null @@ -1,13 +0,0 @@ -package getter - -// Storage is an interface that knows how to lookup downloaded directories -// as well as download and update directories from their sources into the -// proper location. -type Storage interface { - // Dir returns the directory on local disk where the directory source - // can be loaded from. - Dir(string) (string, bool, error) - - // Get will download and optionally update the given directory. - Get(string, string, bool) error -} diff --git a/vendor/github.com/hashicorp/go-getter/url.go b/vendor/github.com/hashicorp/go-getter/url.go deleted file mode 100644 index a9aed7f508..0000000000 --- a/vendor/github.com/hashicorp/go-getter/url.go +++ /dev/null @@ -1,25 +0,0 @@ -package getter - -import "net/url" - -// RedactURL is a port of url.Redacted from the standard library, -// which is like url.String but replaces any password with "redacted". -// Only the password in u.URL is redacted. This allows the library -// to maintain compatibility with go1.14. -// This port was also extended to redact SSH key from URL query parameter. -func RedactURL(u *url.URL) string { - if u == nil { - return "" - } - - ru := *u - if _, has := ru.User.Password(); has { - ru.User = url.UserPassword(ru.User.Username(), "redacted") - } - q := ru.Query() - if q.Get("sshkey") != "" { - q.Set("sshkey", "redacted") - ru.RawQuery = q.Encode() - } - return ru.String() -} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE index abaf1e45f2..9938fb50ee 100644 --- a/vendor/github.com/hashicorp/go-hclog/LICENSE +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -1,6 +1,4 @@ -MIT License - -Copyright (c) 2017 HashiCorp +Copyright (c) 2017 HashiCorp, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index a16f5cd572..21a17c5af3 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -17,7 +17,7 @@ JSON output mode for production. ## Stability Note -This library has reached 1.0 stability. It's API can be considered solidified +This library has reached 1.0 stability. Its API can be considered solidified and promised through future versions. ## Installation and Docs diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go index 9635c838b4..d00816b38f 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + //go:build !windows // +build !windows @@ -7,23 +10,35 @@ import ( "github.com/mattn/go-isatty" ) +// hasFD is used to check if the writer has an Fd value to check +// if it's a terminal. +type hasFD interface { + Fd() uintptr +} + // setColorization will mutate the values of this logger -// to approperately configure colorization options. It provides +// to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { - switch opts.Color { - case ColorOff: - fallthrough - case ForceColor: + if opts.Color != AutoColor { return - case AutoColor: - fi := l.checkWriterIsFile() - isUnixTerm := isatty.IsTerminal(fi.Fd()) - isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) - isTerm := isUnixTerm || isCygwinTerm - if !isTerm { + } + + if sc, ok := l.writer.w.(SupportsColor); ok { + if !sc.SupportsColor() { l.headerColor = ColorOff l.writer.color = ColorOff } + return + } + + fi, ok := l.writer.w.(hasFD) + if !ok { + return + } + + if !isatty.IsTerminal(fi.Fd()) { + l.headerColor = ColorOff + l.writer.color = ColorOff } } diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go index 30859168ee..2c3fb9ea6f 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + //go:build windows // +build windows @@ -7,32 +10,32 @@ import ( "os" colorable "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" ) // setColorization will mutate the values of this logger -// to approperately configure colorization options. It provides +// to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { - switch opts.Color { - case ColorOff: + if opts.Color == ColorOff { + return + } + + fi, ok := l.writer.w.(*os.File) + if !ok { + l.writer.color = ColorOff + l.headerColor = ColorOff return - case ForceColor: - fi := l.checkWriterIsFile() - l.writer.w = colorable.NewColorable(fi) - case AutoColor: - fi := l.checkWriterIsFile() - isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) - isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) - isTerm := isUnixTerm || isCygwinTerm - if !isTerm { - l.writer.color = ColorOff - l.headerColor = ColorOff - return - } - - if l.headerColor == ColorOff { - l.writer.w = colorable.NewColorable(fi) - } + } + + cfi := colorable.NewColorable(fi) + + // NewColorable detects if color is possible and if it's not, then it + // returns the original value. So we can test if we got the original + // value back to know if color is possible. + if cfi == fi { + l.writer.color = ColorOff + l.headerColor = ColorOff + } else { + l.writer.w = cfi } } diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go index 7815f50194..eb5aba556b 100644 --- a/vendor/github.com/hashicorp/go-hclog/context.go +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go index cfd4307a80..4b73ba553d 100644 --- a/vendor/github.com/hashicorp/go-hclog/exclude.go +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index b9f00217ca..a7403f593a 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( @@ -20,13 +23,13 @@ var ( ) // Default returns a globally held logger. This can be a good starting -// place, and then you can use .With() and .Name() to create sub-loggers +// place, and then you can use .With() and .Named() to create sub-loggers // to be used in more specific contexts. // The value of the Default logger can be set via SetDefault() or by // changing the options in DefaultOptions. // // This method is goroutine safe, returning a global from memory, but -// cause should be used if SetDefault() is called it random times +// care should be used if SetDefault() is called it random times // in the program as that may result in race conditions and an unexpected // Logger being returned. func Default() Logger { diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index ff42f1bfc1..e9b1c18853 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 83232f7a62..b45064acf1 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( @@ -8,7 +11,6 @@ import ( "fmt" "io" "log" - "os" "reflect" "runtime" "sort" @@ -17,6 +19,8 @@ import ( "sync" "sync/atomic" "time" + "unicode" + "unicode/utf8" "github.com/fatih/color" ) @@ -48,6 +52,12 @@ var ( Warn: color.New(color.FgHiYellow), Error: color.New(color.FgHiRed), } + + faintBoldColor = color.New(color.Faint, color.Bold) + faintColor = color.New(color.Faint) + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") ) // Make sure that intLogger is a Logger @@ -70,6 +80,7 @@ type intLogger struct { level *int32 headerColor ColorOption + fieldColor ColorOption implied []interface{} @@ -77,6 +88,8 @@ type intLogger struct { // create subloggers with their own level setting independentLevels bool + + subloggerHook func(sub Logger) Logger } // New returns a configured logger. @@ -115,14 +128,19 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex = new(sync.Mutex) } - var primaryColor, headerColor ColorOption - - if opts.ColorHeaderOnly { - primaryColor = ColorOff + var ( + primaryColor ColorOption = ColorOff + headerColor ColorOption = ColorOff + fieldColor ColorOption = ColorOff + ) + switch { + case opts.ColorHeaderOnly: headerColor = opts.Color - } else { + case opts.ColorHeaderAndFields: + fieldColor = opts.Color + headerColor = opts.Color + default: primaryColor = opts.Color - headerColor = ColorOff } l := &intLogger{ @@ -137,6 +155,8 @@ func newLogger(opts *LoggerOptions) *intLogger { exclude: opts.Exclude, independentLevels: opts.IndependentLevels, headerColor: headerColor, + fieldColor: fieldColor, + subloggerHook: opts.SubloggerHook, } if opts.IncludeLocation { l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset @@ -152,6 +172,10 @@ func newLogger(opts *LoggerOptions) *intLogger { l.timeFormat = opts.TimeFormat } + if l.subloggerHook == nil { + l.subloggerHook = identityHook + } + l.setColorization(opts) atomic.StoreInt32(l.level, int32(level)) @@ -159,8 +183,12 @@ func newLogger(opts *LoggerOptions) *intLogger { return l } +func identityHook(logger Logger) Logger { + return logger +} + // offsetIntLogger is the stack frame offset in the call stack for the caller to -// one of the Warn,Info,Log,etc methods. +// one of the Warn, Info, Log, etc methods. const offsetIntLogger = 3 // Log a message and a set of key/value pairs if the given level is at @@ -235,7 +263,17 @@ func needsQuoting(str string) bool { return false } -// Non-JSON logging format function +// logPlain is the non-JSON logging format function which writes directly +// to the underlying writer the logger was initialized with. +// +// If the logger was initialized with a color function, it also handles +// applying the color to the log message. +// +// Color Options +// 1. No color. +// 2. Color the whole log line, based on the level. +// 3. Color only the header (level) part of the log line. +// 4. Color both the header and fields of the log line. func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { if !l.disableTime { @@ -269,16 +307,19 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, if name != "" { l.writer.WriteString(name) - l.writer.WriteString(": ") + if msg != "" { + l.writer.WriteString(": ") + l.writer.WriteString(msg) + } + } else if msg != "" { + l.writer.WriteString(msg) } - l.writer.WriteString(msg) - args = append(l.implied, args...) var stacktrace CapturedStacktrace - if args != nil && len(args) > 0 { + if len(args) > 0 { if len(args)%2 != 0 { cs, ok := args[len(args)-1].(CapturedStacktrace) if ok { @@ -292,13 +333,16 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, l.writer.WriteByte(':') + // Handle the field arguments, which come in pairs (key=val). FOR: for i := 0; i < len(args); i = i + 2 { var ( + key string val string raw bool ) + // Convert the field value to a string. switch st := args[i+1].(type) { case string: val = st @@ -350,8 +394,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, } } - var key string - + // Convert the field key to a string. switch st := args[i].(type) { case string: key = st @@ -359,21 +402,49 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, key = fmt.Sprintf("%s", st) } + // Optionally apply the ANSI "faint" and "bold" + // SGR values to the key. + if l.fieldColor != ColorOff { + key = faintBoldColor.Sprint(key) + } + + // Values may contain multiple lines, and that format + // is preserved, with each line prefixed with a " | " + // to show it's part of a collection of lines. + // + // Values may also need quoting, if not all the runes + // in the value string are "normal", like if they + // contain ANSI escape sequences. if strings.Contains(val, "\n") { l.writer.WriteString("\n ") l.writer.WriteString(key) - l.writer.WriteString("=\n") - writeIndent(l.writer, val, " | ") + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparatorWithNewLine) + writeIndent(l.writer, val, faintMultiLinePrefix) + } else { + l.writer.WriteString("=\n") + writeIndent(l.writer, val, " | ") + } l.writer.WriteString(" ") } else if !raw && needsQuoting(val) { l.writer.WriteByte(' ') l.writer.WriteString(key) - l.writer.WriteByte('=') - l.writer.WriteString(strconv.Quote(val)) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } + l.writer.WriteByte('"') + writeEscapedForOutput(l.writer, val, true) + l.writer.WriteByte('"') } else { l.writer.WriteByte(' ') l.writer.WriteString(key) - l.writer.WriteByte('=') + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } l.writer.WriteString(val) } } @@ -393,19 +464,98 @@ func writeIndent(w *writer, str string, indent string) { if nl == -1 { if str != "" { w.WriteString(indent) - w.WriteString(str) + writeEscapedForOutput(w, str, false) w.WriteString("\n") } return } w.WriteString(indent) - w.WriteString(str[:nl]) + writeEscapedForOutput(w, str[:nl], false) w.WriteString("\n") str = str[nl+1:] } } +func needsEscaping(str string) bool { + for _, b := range str { + if !unicode.IsPrint(b) || b == '"' { + return true + } + } + + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) { + if !needsEscaping(str) { + w.Write([]byte(str)) + return + } + + bb := bufPool.Get().(*bytes.Buffer) + bb.Reset() + + defer bufPool.Put(bb) + + for _, r := range str { + if escapeQuotes && r == '"' { + bb.WriteString(`\"`) + } else if unicode.IsPrint(r) { + bb.WriteRune(r) + } else { + switch r { + case '\a': + bb.WriteString(`\a`) + case '\b': + bb.WriteString(`\b`) + case '\f': + bb.WriteString(`\f`) + case '\n': + bb.WriteString(`\n`) + case '\r': + bb.WriteString(`\r`) + case '\t': + bb.WriteString(`\t`) + case '\v': + bb.WriteString(`\v`) + default: + switch { + case r < ' ': + bb.WriteString(`\x`) + bb.WriteByte(lowerhex[byte(r)>>4]) + bb.WriteByte(lowerhex[byte(r)&0xF]) + case !utf8.ValidRune(r): + r = 0xFFFD + fallthrough + case r < 0x10000: + bb.WriteString(`\u`) + for s := 12; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + default: + bb.WriteString(`\U`) + for s := 28; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + } + } + } + } + + w.Write(bb.Bytes()) +} + func (l *intLogger) renderSlice(v reflect.Value) string { var buf bytes.Buffer @@ -638,7 +788,7 @@ func (l *intLogger) With(args ...interface{}) Logger { sl.implied = append(sl.implied, MissingKey, extra) } - return sl + return l.subloggerHook(sl) } // Create a new sub-Logger that a name decending from the current name. @@ -652,7 +802,7 @@ func (l *intLogger) Named(name string) Logger { sl.name = name } - return sl + return l.subloggerHook(sl) } // Create a new sub-Logger with an explicit name. This ignores the current @@ -663,7 +813,7 @@ func (l *intLogger) ResetNamed(name string) Logger { sl.name = name - return sl + return l.subloggerHook(sl) } func (l *intLogger) ResetOutput(opts *LoggerOptions) error { @@ -707,6 +857,11 @@ func (l *intLogger) SetLevel(level Level) { atomic.StoreInt32(l.level, int32(level)) } +// Returns the current level +func (l *intLogger) GetLevel() Level { + return Level(atomic.LoadInt32(l.level)) +} + // Create a *log.Logger that will send it's data through this Logger. This // allows packages that expect to be using the standard library log to actually // use this logger. @@ -734,16 +889,6 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { } } -// checks if the underlying io.Writer is a file, and -// panics if not. For use by colorization. -func (l *intLogger) checkWriterIsFile() *os.File { - fi, ok := l.writer.w.(*os.File) - if !ok { - panic("Cannot enable coloring of non-file Writers") - } - return fi -} - // Accept implements the SinkAdapter interface func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { i.log(name, level, msg, args...) diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 8581430284..947ac0c9af 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( @@ -9,7 +12,7 @@ import ( ) var ( - //DefaultOutput is used as the default log output. + // DefaultOutput is used as the default log output. DefaultOutput io.Writer = os.Stderr // DefaultLevel is used as the default log level. @@ -28,7 +31,7 @@ const ( // of actions in code, such as function enters/exits, etc. Trace Level = 1 - // Debug information for programmer lowlevel analysis. + // Debug information for programmer low-level analysis. Debug Level = 2 // Info information about steady state operations. @@ -44,13 +47,13 @@ const ( Off Level = 6 ) -// Format is a simple convience type for when formatting is required. When +// Format is a simple convenience type for when formatting is required. When // processing a value of this type, the logger automatically treats the first // argument as a Printf formatting string and passes the rest as the values // to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). type Format []interface{} -// Fmt returns a Format type. This is a convience function for creating a Format +// Fmt returns a Format type. This is a convenience function for creating a Format // type. func Fmt(str string, args ...interface{}) Format { return append(Format{str}, args...) @@ -89,6 +92,13 @@ const ( ForceColor ) +// SupportsColor is an optional interface that can be implemented by the output +// value. If implemented and SupportsColor() returns true, then AutoColor will +// enable colorization. +type SupportsColor interface { + SupportsColor() bool +} + // LevelFromString returns a Level type for the named log level, or "NoLevel" if // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. @@ -134,7 +144,7 @@ func (l Level) String() string { } } -// Logger describes the interface that must be implemeted by all loggers. +// Logger describes the interface that must be implemented by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings @@ -198,6 +208,9 @@ type Logger interface { // implementation cannot update the level on the fly, it should no-op. SetLevel(level Level) + // Returns the current level + GetLevel() Level + // Return a value that conforms to the stdlib log.Logger interface StandardLogger(opts *StandardLoggerOptions) *log.Logger @@ -236,7 +249,7 @@ type LoggerOptions struct { // Name of the subsystem to prefix logs with Name string - // The threshold for the logger. Anything less severe is supressed + // The threshold for the logger. Anything less severe is suppressed Level Level // Where to write the logs to. Defaults to os.Stderr if nil @@ -267,13 +280,17 @@ type LoggerOptions struct { // because setting TimeFormat to empty assumes the default format. DisableTime bool - // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // Color the output. On Windows, colored logs are only available for io.Writers that // are concretely instances of *os.File. Color ColorOption // Only color the header, not the body. This can help with readability of long messages. ColorHeaderOnly bool + // Color the header and message body fields. This can help with readability + // of long messages with multiple fields. + ColorHeaderAndFields bool + // A function which is called with the log information and if it returns true the value // should not be logged. // This is useful when interacting with a system that you wish to suppress the log @@ -282,9 +299,16 @@ type LoggerOptions struct { // IndependentLevels causes subloggers to be created with an independent // copy of this logger's level. This means that using SetLevel on this - // logger will not effect any subloggers, and SetLevel on any subloggers - // will not effect the parent or sibling loggers. + // logger will not affect any subloggers, and SetLevel on any subloggers + // will not affect the parent or sibling loggers. IndependentLevels bool + + // SubloggerHook registers a function that is called when a sublogger via + // Named, With, or ResetNamed is created. If defined, the function is passed + // the newly created Logger and the returned Logger is returned from the + // original function. This option allows customization via interception and + // wrapping of Logger instances. + SubloggerHook func(sub Logger) Logger } // InterceptLogger describes the interface for using a logger diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index bc14f77080..d43da809eb 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( @@ -49,6 +52,8 @@ func (l *nullLogger) ResetNamed(name string) Logger { return l } func (l *nullLogger) SetLevel(level Level) {} +func (l *nullLogger) GetLevel() Level { return NoLevel } + func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { return log.New(l.StandardWriter(opts), "", log.LstdFlags) } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 641f20ccbc..03739b61fa 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go index 421a1f06c0..4ee219bf0c 100644 --- a/vendor/github.com/hashicorp/go-hclog/writer.go +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MIT + package hclog import ( diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md new file mode 100644 index 0000000000..3d0379c500 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -0,0 +1,102 @@ +## v1.6.0 + +CHANGES: + +* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing. + If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean + value in the `|`-separated protocol negotiation line. + +ENHANCEMENTS: + +* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)] +* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)] +* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)] +* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] +* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] +* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] + +## v1.5.2 + +ENHANCEMENTS: + +client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)] + +## v1.5.1 + +BUGS: + +* server: `PLUGIN_UNIX_SOCKET_DIR` is consistently used for gRPC broker sockets as well as the initial socket [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] + +ENHANCEMENTS: + +* client: New `UnixSocketConfig` option in `ClientConfig` to support making the client's Unix sockets group-writable [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] + +## v1.5.0 + +ENHANCEMENTS: + +* client: New `runner.Runner` interface to support clients providing custom plugin command runner implementations [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] + * Accessible via new `ClientConfig` field `RunnerFunc`, which is mutually exclusive with `Cmd` and `Reattach` + * Reattaching support via `ReattachConfig` field `ReattachFunc` +* client: New `ClientConfig` field `SkipHostEnv` allows omitting the client process' own environment variables from the plugin command's environment [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] +* client: Add `ID()` method to `Client` for retrieving the pid or other unique ID of a running plugin [[GH-272](https://github.com/hashicorp/go-plugin/pull/272)] +* server: Support setting the directory to create Unix sockets in with the env var `PLUGIN_UNIX_SOCKET_DIR` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] +* server: Support setting group write permission and a custom group name or gid owner with the env var `PLUGIN_UNIX_SOCKET_GROUP` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] + +## v1.4.11-rc1 + +ENHANCEMENTS: + +* deps: bump protoreflect to v1.15.1 [[GH-264](https://github.com/hashicorp/go-plugin/pull/264)] + +## v1.4.10 + +BUG FIXES: + +* additional notes: ensure to close files [[GH-241](https://github.com/hashicorp/go-plugin/pull/241)] + +ENHANCEMENTS: + +* deps: Remove direct dependency on golang.org/x/net [[GH-240](https://github.com/hashicorp/go-plugin/pull/240)] + +## v1.4.9 + +ENHANCEMENTS: + +* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)] + +## v1.4.8 + +BUG FIXES: + +* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)] + +## v1.4.7 + +ENHANCEMENTS: + +* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)] + +## v1.4.6 + +BUG FIXES: + +* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)] + +## v1.4.5 + +ENHANCEMENTS: + +* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)] + + +## v1.4.4 + +ENHANCEMENTS: + +* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)] + +BUG FIXES: + +* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)] +* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)] diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE index 82b4de97c7..042324fb7e 100644 --- a/vendor/github.com/hashicorp/go-plugin/LICENSE +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2016 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 46ee09fc0c..50baee06e1 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -3,8 +3,10 @@ `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system that has been in use by HashiCorp tooling for over 4 years. While initially created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and -[Vault](https://www.vaultproject.io). +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), +[Boundary](https://www.boundaryproject.io), +and [Waypoint](https://www.waypointproject.io). While the plugin system is over RPC, it is currently only designed to work over a local [reliable] network. Plugins over a real network are not supported diff --git a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml new file mode 100644 index 0000000000..033d0153b2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go + out: . + opt: + - paths=source_relative + - plugin: buf.build/grpc/go:v1.3.0 + out: . + opt: + - paths=source_relative + - require_unimplemented_servers=false diff --git a/vendor/github.com/hashicorp/go-plugin/buf.yaml b/vendor/github.com/hashicorp/go-plugin/buf.yaml new file mode 100644 index 0000000000..3d0da4c719 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/buf.yaml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: v1 +build: + excludes: + - examples/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 780a3121da..73f6b35151 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -22,7 +25,11 @@ import ( "sync/atomic" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/cmdrunner" + "github.com/hashicorp/go-plugin/internal/grpcmux" + "github.com/hashicorp/go-plugin/runner" + "google.golang.org/grpc" ) // If this is 1, then we've called CleanupClients. This can be used @@ -40,7 +47,7 @@ var managedClientsLock sync.Mutex var ( // ErrProcessNotFound is returned when a client is instantiated to // reattach to an existing process and it isn't found. - ErrProcessNotFound = errors.New("Reattachment process not found") + ErrProcessNotFound = cmdrunner.ErrProcessNotFound // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match // the one provided in the SecureConfig. @@ -57,8 +64,18 @@ var ( // ErrSecureConfigAndReattach is returned when both Reattach and // SecureConfig are set. ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") + + // ErrGRPCBrokerMuxNotSupported is returned when the client requests + // multiplexing over the gRPC broker, but the plugin does not support the + // feature. In most cases, this should be resolvable by updating and + // rebuilding the plugin, or restarting the plugin with + // ClientConfig.GRPCBrokerMultiplex set to false. + ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature") ) +// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines. +const defaultPluginLogBufferSize = 64 * 1024 + // Client handles the lifecycle of a plugin application. It launches // plugins, connects to them, dispenses interface implementations, and handles // killing the process. @@ -75,7 +92,7 @@ type Client struct { exited bool l sync.Mutex address net.Addr - process *os.Process + runner runner.AttachedRunner client ClientProtocol protocol Protocol logger hclog.Logger @@ -94,6 +111,11 @@ type Client struct { // processKilled is used for testing only, to flag when the process was // forcefully killed. processKilled bool + + unixSocketCfg UnixSocketConfig + + grpcMuxerOnce sync.Once + grpcMuxer *grpcmux.GRPCClientMuxer } // NegotiatedVersion returns the protocol version negotiated with the server. @@ -102,6 +124,19 @@ func (c *Client) NegotiatedVersion() int { return c.negotiatedVersion } +// ID returns a unique ID for the running plugin. By default this is the process +// ID (pid), but it could take other forms if RunnerFunc was provided. +func (c *Client) ID() string { + c.l.Lock() + defer c.l.Unlock() + + if c.runner != nil { + return c.runner.ID() + } + + return "" +} + // ClientConfig is the configuration used to initialize a new // plugin client. After being used to initialize a plugin client, // that configuration must not be modified again. @@ -129,6 +164,13 @@ type ClientConfig struct { Cmd *exec.Cmd Reattach *ReattachConfig + // RunnerFunc allows consumers to provide their own implementation of + // runner.Runner and control the context within which a plugin is executed. + // The cmd argument will have been copied from the config and populated with + // environment variables that a go-plugin server expects to read such as + // AutoMTLS certs and the magic cookie key. + RunnerFunc func(l hclog.Logger, cmd *exec.Cmd, tmpDir string) (runner.Runner, error) + // SecureConfig is configuration for verifying the integrity of the // executable. It can not be used with Reattach. SecureConfig *SecureConfig @@ -181,6 +223,10 @@ type ClientConfig struct { // it will default to hclog's default logger. Logger hclog.Logger + // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines. + // If this is 0, then the default of 64KB is used. + PluginLogBufferSize int + // AutoMTLS has the client and server automatically negotiate mTLS for // transport authentication. This ensures that only the original client will // be allowed to connect to the server, and all other connections will be @@ -203,15 +249,64 @@ type ClientConfig struct { // // You cannot Reattach to a server with this option enabled. AutoMTLS bool + + // GRPCDialOptions allows plugin users to pass custom grpc.DialOption + // to create gRPC connections. This only affects plugins using the gRPC + // protocol. + GRPCDialOptions []grpc.DialOption + + // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC + // broker will multiplex all brokered gRPC servers over the plugin's original + // listener socket instead of making a new listener for each server. The + // go-plugin library currently only includes a Go implementation for the + // server (i.e. plugin) side of gRPC broker multiplexing. + // + // Does not support reattaching. + // + // Multiplexed gRPC streams MUST be established sequentially, i.e. after + // calling AcceptAndServe from one side, wait for the other side to Dial + // before calling AcceptAndServe again. + GRPCBrokerMultiplex bool + + // SkipHostEnv allows plugins to run without inheriting the parent process' + // environment variables. + SkipHostEnv bool + + // UnixSocketConfig configures additional options for any Unix sockets + // that are created. Not normally required. Not supported on Windows. + UnixSocketConfig *UnixSocketConfig +} + +type UnixSocketConfig struct { + // If set, go-plugin will change the owner of any Unix sockets created to + // this group, and set them as group-writable. Can be a name or gid. The + // client process must be a member of this group or chown will fail. + Group string + + // TempDir specifies the base directory to use when creating a plugin-specific + // temporary directory. It is expected to already exist and be writable. If + // not set, defaults to the directory chosen by os.MkdirTemp. + TempDir string + + // The directory to create Unix sockets in. Internally created and managed + // by go-plugin and deleted when the plugin is killed. Will be created + // inside TempDir if specified. + socketDir string } // ReattachConfig is used to configure a client to reattach to an // already-running plugin process. You can retrieve this information by // calling ReattachConfig on Client. type ReattachConfig struct { - Protocol Protocol - Addr net.Addr - Pid int + Protocol Protocol + ProtocolVersion int + Addr net.Addr + Pid int + + // ReattachFunc allows consumers to provide their own implementation of + // runner.AttachedRunner and attach to something other than a plain process. + // At least one of Pid or ReattachFunc must be set. + ReattachFunc runner.ReattachFunc // Test is set to true if this is reattaching to to a plugin in "test mode" // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the @@ -288,11 +383,11 @@ func CleanupClients() { wg.Wait() } -// Creates a new plugin client which manages the lifecycle of an external +// NewClient creates a new plugin client which manages the lifecycle of an external // plugin and gets the address for the RPC connection. // // The client must be cleaned up at some point by calling Kill(). If -// the client is a managed client (created with NewManagedClient) you +// the client is a managed client (created with ClientConfig.Managed) you // can just call CleanupClients at the end of your program and they will // be properly cleaned. func NewClient(config *ClientConfig) (c *Client) { @@ -310,10 +405,10 @@ func NewClient(config *ClientConfig) (c *Client) { } if config.SyncStdout == nil { - config.SyncStdout = ioutil.Discard + config.SyncStdout = io.Discard } if config.SyncStderr == nil { - config.SyncStderr = ioutil.Discard + config.SyncStderr = io.Discard } if config.AllowedProtocols == nil { @@ -328,6 +423,10 @@ func NewClient(config *ClientConfig) (c *Client) { }) } + if config.PluginLogBufferSize == 0 { + config.PluginLogBufferSize = defaultPluginLogBufferSize + } + c = &Client{ config: config, logger: config.Logger, @@ -400,12 +499,13 @@ func (c *Client) killed() bool { func (c *Client) Kill() { // Grab a lock to read some private fields. c.l.Lock() - process := c.process + runner := c.runner addr := c.address + hostSocketDir := c.unixSocketCfg.socketDir c.l.Unlock() - // If there is no process, there is nothing to kill. - if process == nil { + // If there is no runner or ID, there is nothing to kill. + if runner == nil || runner.ID() == "" { return } @@ -413,10 +513,14 @@ func (c *Client) Kill() { // Wait for the all client goroutines to finish. c.clientWaitGroup.Wait() + if hostSocketDir != "" { + os.RemoveAll(hostSocketDir) + } + // Make sure there is no reference to the old process after it has been // killed. c.l.Lock() - c.process = nil + c.runner = nil c.l.Unlock() }() @@ -459,14 +563,16 @@ func (c *Client) Kill() { // If graceful exiting failed, just kill it c.logger.Warn("plugin failed to exit gracefully") - process.Kill() + if err := runner.Kill(context.Background()); err != nil { + c.logger.Debug("error killing plugin", "error", err) + } c.l.Lock() c.processKilled = true c.l.Unlock() } -// Starts the underlying subprocess, communicating with it to negotiate +// Start the underlying subprocess, communicating with it to negotiate // a port for RPC connections, and returning the address to connect via RPC. // // This method is safe to call multiple times. Subsequent calls have no effect. @@ -484,16 +590,27 @@ func (c *Client) Start() (addr net.Addr, err error) { // this in a {} for scoping reasons, and hopeful that the escape // analysis will pop the stack here. { - cmdSet := c.config.Cmd != nil - attachSet := c.config.Reattach != nil - secureSet := c.config.SecureConfig != nil - if cmdSet == attachSet { - return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + var mutuallyExclusiveOptions int + if c.config.Cmd != nil { + mutuallyExclusiveOptions += 1 + } + if c.config.Reattach != nil { + mutuallyExclusiveOptions += 1 + } + if c.config.RunnerFunc != nil { + mutuallyExclusiveOptions += 1 + } + if mutuallyExclusiveOptions != 1 { + return nil, fmt.Errorf("exactly one of Cmd, or Reattach, or RunnerFunc must be set") } - if secureSet && attachSet { + if c.config.SecureConfig != nil && c.config.Reattach != nil { return nil, ErrSecureConfigAndReattach } + + if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil { + return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config") + } } if c.config.Reattach != nil { @@ -525,20 +642,22 @@ func (c *Client) Start() (addr net.Addr, err error) { fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), } + if c.config.GRPCBrokerMultiplex { + env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC)) + } cmd := c.config.Cmd - cmd.Env = append(cmd.Env, os.Environ()...) - cmd.Env = append(cmd.Env, env...) - cmd.Stdin = os.Stdin - - cmdStdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err + if cmd == nil { + // It's only possible to get here if RunnerFunc is non-nil, but we'll + // still use cmd as a spec to populate metadata for the external + // implementation to consume. + cmd = exec.Command("") } - cmdStderr, err := cmd.StderrPipe() - if err != nil { - return nil, err + if !c.config.SkipHostEnv { + cmd.Env = append(cmd.Env, os.Environ()...) } + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin if c.config.SecureConfig != nil { if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { @@ -567,30 +686,68 @@ func (c *Client) Start() (addr net.Addr, err error) { c.config.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, ServerName: "localhost", } } - c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) - err = cmd.Start() - if err != nil { - return + if c.config.UnixSocketConfig != nil { + c.unixSocketCfg = *c.config.UnixSocketConfig + } + + if c.unixSocketCfg.Group != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketGroup, c.unixSocketCfg.Group)) + } + + var runner runner.Runner + switch { + case c.config.RunnerFunc != nil: + c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir") + if err != nil { + return nil, err + } + // os.MkdirTemp creates folders with 0o700, so if we have a group + // configured we need to make it group-writable. + if c.unixSocketCfg.Group != "" { + err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770) + if err != nil { + return nil, err + } + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir)) + c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir) + + runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir) + if err != nil { + return nil, err + } + default: + runner, err = cmdrunner.NewCmdRunner(c.logger, cmd) + if err != nil { + return nil, err + } + } - // Set the process - c.process = cmd.Process - c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + c.runner = runner + startCtx, startCtxCancel := context.WithTimeout(context.Background(), c.config.StartTimeout) + defer startCtxCancel() + err = runner.Start(startCtx) + if err != nil { + return nil, err + } // Make sure the command is properly cleaned up if there is an error defer func() { - r := recover() + rErr := recover() - if err != nil || r != nil { - cmd.Process.Kill() + if err != nil || rErr != nil { + runner.Kill(context.Background()) } - if r != nil { - panic(r) + if rErr != nil { + panic(rErr) } }() @@ -601,7 +758,7 @@ func (c *Client) Start() (addr net.Addr, err error) { c.clientWaitGroup.Add(1) c.stderrWaitGroup.Add(1) // logStderr calls Done() - go c.logStderr(cmdStderr) + go c.logStderr(runner.Name(), runner.Stderr()) c.clientWaitGroup.Add(1) go func() { @@ -610,29 +767,19 @@ func (c *Client) Start() (addr net.Addr, err error) { defer c.clientWaitGroup.Done() - // get the cmd info early, since the process information will be removed - // in Kill. - pid := c.process.Pid - path := cmd.Path - // wait to finish reading from stderr since the stderr pipe reader // will be closed by the subsequent call to cmd.Wait(). c.stderrWaitGroup.Wait() // Wait for the command to end. - err := cmd.Wait() - - debugMsgArgs := []interface{}{ - "path", path, - "pid", pid, - } + err := runner.Wait(context.Background()) if err != nil { - debugMsgArgs = append(debugMsgArgs, - []interface{}{"error", err.Error()}...) + c.logger.Error("plugin process exited", "plugin", runner.Name(), "id", runner.ID(), "error", err.Error()) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", "plugin", runner.Name(), "id", runner.ID()) } - // Log and make sure to flush the logs write away - c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() // Set that we exited, which takes a lock @@ -649,10 +796,13 @@ func (c *Client) Start() (addr net.Addr, err error) { defer c.clientWaitGroup.Done() defer close(linesCh) - scanner := bufio.NewScanner(cmdStdout) + scanner := bufio.NewScanner(runner.Stdout()) for scanner.Scan() { linesCh <- scanner.Text() } + if scanner.Err() != nil { + c.logger.Error("error encountered while scanning stdout", "error", scanner.Err()) + } }() // Make sure after we exit we read the lines from stdout forever @@ -672,22 +822,27 @@ func (c *Client) Start() (addr net.Addr, err error) { timeout := time.After(c.config.StartTimeout) // Start looking for the address - c.logger.Debug("waiting for RPC address", "path", cmd.Path) + c.logger.Debug("waiting for RPC address", "plugin", runner.Name()) select { case <-timeout: err = errors.New("timeout while waiting for plugin to start") case <-c.doneCtx.Done(): err = errors.New("plugin exited before we could connect") - case line := <-linesCh: + case line, ok := <-linesCh: // Trim the line and split by "|" in order to get the parts of // the output. line = strings.TrimSpace(line) - parts := strings.SplitN(line, "|", 6) + parts := strings.Split(line, "|") if len(parts) < 4 { - err = fmt.Errorf( - "Unrecognized remote plugin message: %s\n\n"+ - "This usually means that the plugin is either invalid or simply\n"+ - "needs to be recompiled to support the latest protocol.", line) + errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line) + if !ok { + errText += "\n" + "Failed to read any lines from plugin's stdout" + } + additionalNotes := runner.Diagnose(context.Background()) + if additionalNotes != "" { + errText += "\n" + additionalNotes + } + err = errors.New(errText) return } @@ -722,13 +877,18 @@ func (c *Client) Start() (addr net.Addr, err error) { c.negotiatedVersion = version c.logger.Debug("using plugin", "version", version) - switch parts[2] { + network, address, err := runner.PluginToHost(parts[2], parts[3]) + if err != nil { + return addr, err + } + + switch network { case "tcp": - addr, err = net.ResolveTCPAddr("tcp", parts[3]) + addr, err = net.ResolveTCPAddr("tcp", address) case "unix": - addr, err = net.ResolveUnixAddr("unix", parts[3]) + addr, err = net.ResolveUnixAddr("unix", address) default: - err = fmt.Errorf("Unknown address type: %s", parts[3]) + err = fmt.Errorf("Unknown address type: %s", address) } // If we have a server type, then record that. We default to net/rpc @@ -760,6 +920,18 @@ func (c *Client) Start() (addr net.Addr, err error) { return nil, fmt.Errorf("error parsing server cert: %s", err) } } + + if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC { + if len(parts) <= 6 { + return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+ + "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported) + } + if muxSupported, err := strconv.ParseBool(parts[6]); err != nil { + return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6]) + } else if !muxSupported { + return nil, ErrGRPCBrokerMuxNotSupported + } + } } c.address = addr @@ -767,7 +939,7 @@ func (c *Client) Start() (addr net.Addr, err error) { } // loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA for the client TLSConfig. +// server, and load it as the RootCA and ClientCA for the client TLSConfig. func (c *Client) loadServerCert(cert string) error { certPool := x509.NewCertPool() @@ -784,43 +956,35 @@ func (c *Client) loadServerCert(cert string) error { certPool.AddCert(x509Cert) c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool return nil } func (c *Client) reattach() (net.Addr, error) { - // Verify the process still exists. If not, then it is an error - p, err := os.FindProcess(c.config.Reattach.Pid) - if err != nil { - // On Unix systems, FindProcess never returns an error. - // On Windows, for non-existent pids it returns: - // os.SyscallError - 'OpenProcess: the paremter is incorrect' - return nil, ErrProcessNotFound + reattachFunc := c.config.Reattach.ReattachFunc + // For backwards compatibility default to cmdrunner.ReattachFunc + if reattachFunc == nil { + reattachFunc = cmdrunner.ReattachFunc(c.config.Reattach.Pid, c.config.Reattach.Addr) } - // Attempt to connect to the addr since on Unix systems FindProcess - // doesn't actually return an error if it can't find the process. - conn, err := net.Dial( - c.config.Reattach.Addr.Network(), - c.config.Reattach.Addr.String()) + r, err := reattachFunc() if err != nil { - p.Kill() - return nil, ErrProcessNotFound + return nil, err } - conn.Close() // Create a context for when we kill c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) c.clientWaitGroup.Add(1) // Goroutine to mark exit status - go func(pid int) { + go func(r runner.AttachedRunner) { defer c.clientWaitGroup.Done() // ensure the context is cancelled when we're done defer c.ctxCancel() // Wait for the process to die - pidWait(pid) + r.Wait(context.Background()) // Log so we can see it c.logger.Debug("reattached plugin process exited") @@ -829,7 +993,7 @@ func (c *Client) reattach() (net.Addr, error) { c.l.Lock() defer c.l.Unlock() c.exited = true - }(p.Pid) + }(r) // Set the address and protocol c.address = c.config.Reattach.Addr @@ -839,11 +1003,14 @@ func (c *Client) reattach() (net.Addr, error) { c.protocol = ProtocolNetRPC } - // If we're in test mode, we do NOT set the process. This avoids the - // process being killed (the only purpose we have for c.process), since - // in test mode the process is responsible for exiting on its own. - if !c.config.Reattach.Test { - c.process = p + if c.config.Reattach.Test { + c.negotiatedVersion = c.config.Reattach.ProtocolVersion + } else { + // If we're in test mode, we do NOT set the runner. This avoids the + // runner being killed (the only purpose we have for setting c.runner + // when reattaching), since in test mode the process is responsible for + // exiting on its own. + c.runner = r } return c.address, nil @@ -882,6 +1049,9 @@ func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) // // If this returns nil then the process hasn't been started yet. Please // call Start or Client before calling this. +// +// Clients who specified a RunnerFunc will need to populate their own +// ReattachFunc in the returned ReattachConfig before it can be used. func (c *Client) ReattachConfig() *ReattachConfig { c.l.Lock() defer c.l.Unlock() @@ -899,11 +1069,16 @@ func (c *Client) ReattachConfig() *ReattachConfig { return c.config.Reattach } - return &ReattachConfig{ + reattach := &ReattachConfig{ Protocol: c.protocol, Addr: c.address, - Pid: c.config.Cmd.Process.Pid, } + + if c.config.Cmd != nil && c.config.Cmd.Process != nil { + reattach.Pid = c.config.Cmd.Process.Pid + } + + return reattach } // Protocol returns the protocol of server on the remote end. This will @@ -939,11 +1114,24 @@ func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) // dialer is compatible with grpc.WithDialer and creates the connection // to the plugin. func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { - conn, err := netAddrDialer(c.address)("", timeout) + muxer, err := c.getGRPCMuxer(c.address) if err != nil { return nil, err } + var conn net.Conn + if muxer.Enabled() { + conn, err = muxer.Dial() + if err != nil { + return nil, err + } + } else { + conn, err = netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + } + // If we have a TLS config we wrap our connection. We only do this // for net/rpc since gRPC uses its own mechanism for TLS. if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { @@ -953,14 +1141,28 @@ func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { return conn, nil } -var stdErrBufferSize = 64 * 1024 +func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) { + if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex { + return nil, nil + } + + var err error + c.grpcMuxerOnce.Do(func() { + c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr) + }) + if err != nil { + return nil, err + } + + return c.grpcMuxer, nil +} -func (c *Client) logStderr(r io.Reader) { +func (c *Client) logStderr(name string, r io.Reader) { defer c.clientWaitGroup.Done() defer c.stderrWaitGroup.Done() - l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + l := c.logger.Named(filepath.Base(name)) - reader := bufio.NewReaderSize(r, stdErrBufferSize) + reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize) // continuation indicates the previous line was a prefix continuation := false diff --git a/vendor/github.com/hashicorp/go-plugin/constants.go b/vendor/github.com/hashicorp/go-plugin/constants.go new file mode 100644 index 0000000000..e7f5bbe5f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/constants.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +const ( + // EnvUnixSocketDir specifies the directory that _plugins_ should create unix + // sockets in. Does not affect client behavior. + EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR" + + // EnvUnixSocketGroup specifies the owning, writable group to set for Unix + // sockets created by _plugins_. Does not affect client behavior. + EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP" + + envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC" +) diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go index d22c566ed5..c5b96242b1 100644 --- a/vendor/github.com/hashicorp/go-plugin/discover.go +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go index 22a7baa6a0..e62a21913f 100644 --- a/vendor/github.com/hashicorp/go-plugin/error.go +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin // This is a type that wraps error types so that they can be messaged diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go index daf142d170..5b17e37fef 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -11,7 +14,9 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/hashicorp/go-plugin/runner" "github.com/oklog/run" "google.golang.org/grpc" @@ -36,6 +41,8 @@ type sendErr struct { // connection information to/from the plugin. Implements GRPCBrokerServer and // streamer interfaces. type gRPCBrokerServer struct { + plugin.UnimplementedGRPCBrokerServer + // send is used to send connection info to the gRPC stream. send chan *sendErr @@ -259,25 +266,41 @@ func (s *gRPCBrokerClientImpl) Close() { type GRPCBroker struct { nextId uint32 streamer streamer - streams map[uint32]*gRPCBrokerPending tls *tls.Config doneCh chan struct{} o sync.Once + clientStreams map[uint32]*gRPCBrokerPending + serverStreams map[uint32]*gRPCBrokerPending + + unixSocketCfg UnixSocketConfig + addrTranslator runner.AddrTranslator + + dialMutex sync.Mutex + + muxer grpcmux.GRPCMuxer + sync.Mutex } type gRPCBrokerPending struct { ch chan *plugin.ConnInfo doneCh chan struct{} + once sync.Once } -func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { +func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker { return &GRPCBroker{ streamer: s, - streams: make(map[uint32]*gRPCBrokerPending), tls: tls, doneCh: make(chan struct{}), + + clientStreams: make(map[uint32]*gRPCBrokerPending), + serverStreams: make(map[uint32]*gRPCBrokerPending), + muxer: muxer, + + unixSocketCfg: unixSocketCfg, + addrTranslator: addrTranslator, } } @@ -285,15 +308,59 @@ func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { // // This should not be called multiple times with the same ID at one time. func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { - listener, err := serverListener() + if b.muxer.Enabled() { + p := b.getServerStream(id) + go func() { + err := b.listenForKnocks(id) + if err != nil { + log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err) + } + }() + + ln, err := b.muxer.Listener(id, p.doneCh) + if err != nil { + return nil, err + } + + ln = &rmListener{ + Listener: ln, + close: func() error { + // We could have multiple listeners on the same ID, so use sync.Once + // for closing doneCh to ensure we don't get a panic. + p.once.Do(func() { + close(p.doneCh) + }) + + b.Lock() + defer b.Unlock() + + // No longer need to listen for knocks once the listener is closed. + delete(b.serverStreams, id) + + return nil + }, + } + + return ln, nil + } + + listener, err := serverListener(b.unixSocketCfg) if err != nil { return nil, err } + advertiseNet := listener.Addr().Network() + advertiseAddr := listener.Addr().String() + if b.addrTranslator != nil { + advertiseNet, advertiseAddr, err = b.addrTranslator.HostToPlugin(advertiseNet, advertiseAddr) + if err != nil { + return nil, err + } + } err = b.streamer.Send(&plugin.ConnInfo{ ServiceId: id, - Network: listener.Addr().Network(), - Address: listener.Addr().String(), + Network: advertiseNet, + Address: advertiseAddr, }) if err != nil { return nil, err @@ -309,20 +376,20 @@ func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { // connection is opened every call, these calls should be used sparingly. // Multiple gRPC server implementations can be registered to a single // AcceptAndServe call. -func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { - listener, err := b.Accept(id) +func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) { + ln, err := b.Accept(id) if err != nil { log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) return } - defer listener.Close() + defer ln.Close() var opts []grpc.ServerOption if b.tls != nil { opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} } - server := s(opts) + server := newGRPCServer(opts) // Here we use a run group to close this goroutine if the server is shutdown // or the broker is shutdown. @@ -330,7 +397,7 @@ func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc { // Serve on the listener, if shutting down call GracefulStop. g.Add(func() error { - return server.Serve(listener) + return server.Serve(ln) }, func(err error) { server.GracefulStop() }) @@ -363,12 +430,108 @@ func (b *GRPCBroker) Close() error { return nil } +func (b *GRPCBroker) listenForKnocks(id uint32) error { + p := b.getServerStream(id) + for { + select { + case msg := <-p.ch: + // Shouldn't be possible. + if msg.ServiceId != id { + return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId) + } + + // Also shouldn't be possible. + if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack { + return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock) + } + + // Successful knock, open the door for the given ID. + var ackError string + err := b.muxer.AcceptKnock(id) + if err != nil { + ackError = err.Error() + } + + // Send back an acknowledgement to allow the client to start dialling. + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Knock: &plugin.ConnInfo_Knock{ + Knock: true, + Ack: true, + Error: ackError, + }, + }) + if err != nil { + return fmt.Errorf("error sending back knock acknowledgement: %w", err) + } + case <-p.doneCh: + return nil + } + } +} + +func (b *GRPCBroker) knock(id uint32) error { + // Send a knock. + err := b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Knock: &plugin.ConnInfo_Knock{ + Knock: true, + }, + }) + if err != nil { + return err + } + + // Wait for the ack. + p := b.getClientStream(id) + select { + case msg := <-p.ch: + if msg.ServiceId != id { + return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId) + } + if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack { + return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock) + } + if msg.Knock.Error != "" { + return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error) + } + case <-time.After(5 * time.Second): + return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id) + } + + return nil +} + +func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) { + return func(string, time.Duration) (net.Conn, error) { + b.dialMutex.Lock() + defer b.dialMutex.Unlock() + + // Tell the other side the listener ID it should give the next stream to. + err := b.knock(id) + if err != nil { + return nil, fmt.Errorf("failed to knock before dialling client: %w", err) + } + + conn, err := b.muxer.Dial() + if err != nil { + return nil, err + } + + return conn, nil + } +} + // Dial opens a connection by ID. func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + if b.muxer.Enabled() { + return dialGRPCConn(b.tls, b.muxDial(id)) + } + var c *plugin.ConnInfo // Open the stream - p := b.getStream(id) + p := b.getClientStream(id) select { case c = <-p.ch: close(p.doneCh) @@ -376,12 +539,20 @@ func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { return nil, fmt.Errorf("timeout waiting for connection info") } + network, address := c.Network, c.Address + if b.addrTranslator != nil { + network, address, err = b.addrTranslator.PluginToHost(network, address) + if err != nil { + return nil, err + } + } + var addr net.Addr - switch c.Network { + switch network { case "tcp": - addr, err = net.ResolveTCPAddr("tcp", c.Address) + addr, err = net.ResolveTCPAddr("tcp", address) case "unix": - addr, err = net.ResolveUnixAddr("unix", c.Address) + addr, err = net.ResolveUnixAddr("unix", address) default: err = fmt.Errorf("Unknown address type: %s", c.Address) } @@ -408,37 +579,63 @@ func (m *GRPCBroker) NextId() uint32 { // the plugin host/client. func (m *GRPCBroker) Run() { for { - stream, err := m.streamer.Recv() + msg, err := m.streamer.Recv() if err != nil { // Once we receive an error, just exit break } // Initialize the waiter - p := m.getStream(stream.ServiceId) + var p *gRPCBrokerPending + if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack { + p = m.getServerStream(msg.ServiceId) + // The server side doesn't close the channel immediately as it needs + // to continuously listen for knocks. + } else { + p = m.getClientStream(msg.ServiceId) + go m.timeoutWait(msg.ServiceId, p) + } select { - case p.ch <- stream: + case p.ch <- msg: default: } + } +} + +// getClientStream is a buffer to receive new connection info and knock acks +// by stream ID. +func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.clientStreams[id] + if ok { + return p + } - go m.timeoutWait(stream.ServiceId, p) + m.clientStreams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), } + return m.clientStreams[id] } -func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { +// getServerStream is a buffer to receive knocks to a multiplexed stream ID +// that its side is listening on. Not used unless multiplexing is enabled. +func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending { m.Lock() defer m.Unlock() - p, ok := m.streams[id] + p, ok := m.serverStreams[id] if ok { return p } - m.streams[id] = &gRPCBrokerPending{ + m.serverStreams[id] = &gRPCBrokerPending{ ch: make(chan *plugin.ConnInfo, 1), doneCh: make(chan struct{}), } - return m.streams[id] + return m.serverStreams[id] } func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { @@ -453,5 +650,5 @@ func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { defer m.Unlock() // Delete the stream so no one else can grab it - delete(m.streams, id) + delete(m.clientStreams, id) } diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 9781219132..627649d839 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -1,6 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( + "context" "crypto/tls" "fmt" "math" @@ -8,15 +12,14 @@ import ( "time" "github.com/hashicorp/go-plugin/internal/plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/health/grpc_health_v1" ) -func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { // Build dialing options. - opts := make([]grpc.DialOption, 0, 5) + opts := make([]grpc.DialOption, 0) // We use a custom dialer so that we can connect over unix domain sockets. opts = append(opts, grpc.WithDialer(dialer)) @@ -37,6 +40,9 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + // Add our custom options if we have any + opts = append(opts, dialOpts...) + // Connect. Note the first parameter is unused because we use a custom // dialer that has the state to see the address. conn, err := grpc.Dial("unused", opts...) @@ -50,14 +56,19 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, // newGRPCClient creates a new GRPCClient. The Client argument is expected // to be successfully started already with a lock held. func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { - conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) + if err != nil { + return nil, err + } + + muxer, err := c.getGRPCMuxer(c.address) if err != nil { return nil, err } // Start the broker. brokerGRPCClient := newGRPCBrokerClient(conn) - broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer) go broker.Run() go brokerGRPCClient.StartStream() diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go index 1a8a8e70ea..2085356cd3 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go index 387628bf48..a5f40c7f06 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -9,6 +12,7 @@ import ( "net" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/hashicorp/go-plugin/internal/plugin" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -58,6 +62,8 @@ type GRPCServer struct { stdioServer *grpcStdioServer logger hclog.Logger + + muxer *grpcmux.GRPCServerMuxer } // ServerProtocol impl. @@ -81,7 +87,7 @@ func (s *GRPCServer) Init() error { // Register the broker service brokerServer := newGRPCBrokerServer() plugin.RegisterGRPCBrokerServer(s.server, brokerServer) - s.broker = newGRPCBroker(brokerServer, s.TLS) + s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer) go s.broker.Run() // Register the controller @@ -107,14 +113,26 @@ func (s *GRPCServer) Init() error { return nil } -// Stop calls Stop on the underlying grpc.Server +// Stop calls Stop on the underlying grpc.Server and Close on the underlying +// grpc.Broker if present. func (s *GRPCServer) Stop() { s.server.Stop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } } -// GracefulStop calls GracefulStop on the underlying grpc.Server +// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on +// the underlying grpc.Broker if present. func (s *GRPCServer) GracefulStop() { s.server.GracefulStop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } } // Config is the GRPCServerConfig encoded as JSON then base64. diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go index 6231a9fd62..ae06c11631 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -136,12 +139,12 @@ func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { status.Code(err) == codes.Canceled || status.Code(err) == codes.Unimplemented || err == context.Canceled { - c.log.Warn("received EOF, stopping recv loop", "err", err) + c.log.Debug("received EOF, stopping recv loop", "err", err) return } c.log.Error("error receiving data", "err", err) - continue + return } // Determine our output writer based on channel diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go new file mode 100644 index 0000000000..1854d2dd53 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +// addrTranslator implements stateless identity functions, as the host and plugin +// run in the same context wrt Unix and network addresses. +type addrTranslator struct{} + +func (*addrTranslator) PluginToHost(pluginNet, pluginAddr string) (string, string, error) { + return pluginNet, pluginAddr, nil +} + +func (*addrTranslator) HostToPlugin(hostNet, hostAddr string) (string, string, error) { + return hostNet, hostAddr, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go new file mode 100644 index 0000000000..dce1a86a88 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import ( + "context" + "fmt" + "net" + "os" + + "github.com/hashicorp/go-plugin/runner" +) + +// ReattachFunc returns a function that allows reattaching to a plugin running +// as a plain process. The process may or may not be a child process. +func ReattachFunc(pid int, addr net.Addr) runner.ReattachFunc { + return func() (runner.AttachedRunner, error) { + p, err := os.FindProcess(pid) + if err != nil { + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + return &CmdAttachedRunner{ + pid: pid, + process: p, + }, nil + } +} + +// CmdAttachedRunner is mostly a subset of CmdRunner, except the Wait function +// does not assume the process is a child of the host process, and so uses a +// different implementation to wait on the process. +type CmdAttachedRunner struct { + pid int + process *os.Process + + addrTranslator +} + +func (c *CmdAttachedRunner) Wait(_ context.Context) error { + return pidWait(c.pid) +} + +func (c *CmdAttachedRunner) Kill(_ context.Context) error { + return c.process.Kill() +} + +func (c *CmdAttachedRunner) ID() string { + return fmt.Sprintf("%d", c.pid) +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go new file mode 100644 index 0000000000..b26fea928e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/exec" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/runner" +) + +var ( + _ runner.Runner = (*CmdRunner)(nil) + + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") +) + +const unrecognizedRemotePluginMessage = `This usually means + the plugin was not compiled for this architecture, + the plugin is missing dynamic-link libraries necessary to run, + the plugin is not executable by this process due to file permissions, or + the plugin failed to negotiate the initial go-plugin protocol handshake +%s` + +// CmdRunner implements the runner.Runner interface. It mostly just passes through +// to exec.Cmd methods. +type CmdRunner struct { + logger hclog.Logger + cmd *exec.Cmd + + stdout io.ReadCloser + stderr io.ReadCloser + + // Cmd info is persisted early, since the process information will be removed + // after Kill is called. + path string + pid int + + addrTranslator +} + +// NewCmdRunner returns an implementation of runner.Runner for running a plugin +// as a subprocess. It must be passed a cmd that hasn't yet been started. +func NewCmdRunner(logger hclog.Logger, cmd *exec.Cmd) (*CmdRunner, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + return &CmdRunner{ + logger: logger, + cmd: cmd, + stdout: stdout, + stderr: stderr, + path: cmd.Path, + }, nil +} + +func (c *CmdRunner) Start(_ context.Context) error { + c.logger.Debug("starting plugin", "path", c.cmd.Path, "args", c.cmd.Args) + err := c.cmd.Start() + if err != nil { + return err + } + + c.pid = c.cmd.Process.Pid + c.logger.Debug("plugin started", "path", c.path, "pid", c.pid) + return nil +} + +func (c *CmdRunner) Wait(_ context.Context) error { + return c.cmd.Wait() +} + +func (c *CmdRunner) Kill(_ context.Context) error { + if c.cmd.Process != nil { + err := c.cmd.Process.Kill() + // Swallow ErrProcessDone, we support calling Kill multiple times. + if !errors.Is(err, os.ErrProcessDone) { + return err + } + return nil + } + + return nil +} + +func (c *CmdRunner) Stdout() io.ReadCloser { + return c.stdout +} + +func (c *CmdRunner) Stderr() io.ReadCloser { + return c.stderr +} + +func (c *CmdRunner) Name() string { + return c.path +} + +func (c *CmdRunner) ID() string { + return fmt.Sprintf("%d", c.pid) +} + +// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format +// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports. +var peTypes = map[uint16]string{ + 0x14c: "386", + 0x1c0: "arm", + 0x6264: "loong64", + 0x8664: "amd64", + 0xaa64: "arm64", +} + +func (c *CmdRunner) Diagnose(_ context.Context) string { + return fmt.Sprintf(unrecognizedRemotePluginMessage, additionalNotesAboutCommand(c.cmd.Path)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go new file mode 100644 index 0000000000..ce04cfebc6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package cmdrunner + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "os/user" + "runtime" + "strconv" + "syscall" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + statT, ok := stat.Sys().(*syscall.Stat_t) + if ok { + currentUsername := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil { + currentUsername = u.Username + } + currentGroup := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil { + currentGroup = g.Name + } + username := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil { + username = u.Username + } + group := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil { + group = g.Name + } + notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername) + notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup) + } + + if elfFile, err := elf.Open(path); err == nil { + defer elfFile.Close() + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + defer machoFile.Close() + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + defer peFile.Close() + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go new file mode 100644 index 0000000000..39c51dd1e0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package cmdrunner + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "runtime" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + + if elfFile, err := elf.Open(path); err == nil { + defer elfFile.Close() + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + defer machoFile.Close() + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + defer peFile.Close() + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go new file mode 100644 index 0000000000..6c34dc7747 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import "time" + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go new file mode 100644 index 0000000000..bf3fc5b683 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package cmdrunner + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go similarity index 81% rename from vendor/github.com/hashicorp/go-plugin/process_windows.go rename to vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go index 9f7b018090..6c39df28f7 100644 --- a/vendor/github.com/hashicorp/go-plugin/process_windows.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go @@ -1,4 +1,7 @@ -package plugin +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner import ( "syscall" @@ -19,6 +22,7 @@ func _pidAlive(pid int) bool { if err != nil { return false } + defer syscall.CloseHandle(h) var ec uint32 if e := syscall.GetExitCodeProcess(h, &ec); e != nil { diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go new file mode 100644 index 0000000000..e8a3a152a1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "io" + "net" + + "github.com/hashicorp/yamux" +) + +var _ net.Listener = (*blockedClientListener)(nil) + +// blockedClientListener accepts connections for a specific gRPC broker stream +// ID on the client (host) side of the connection. +type blockedClientListener struct { + session *yamux.Session + waitCh chan struct{} + doneCh <-chan struct{} +} + +func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener { + return &blockedClientListener{ + waitCh: make(chan struct{}, 1), + doneCh: doneCh, + session: session, + } +} + +func (b *blockedClientListener) Accept() (net.Conn, error) { + select { + case <-b.waitCh: + return b.session.Accept() + case <-b.doneCh: + return nil, io.EOF + } +} + +func (b *blockedClientListener) Addr() net.Addr { + return b.session.Addr() +} + +func (b *blockedClientListener) Close() error { + // We don't close the session, the client muxer is responsible for that. + return nil +} + +func (b *blockedClientListener) unblock() { + b.waitCh <- struct{}{} +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go new file mode 100644 index 0000000000..0edb2c05d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "io" + "net" +) + +var _ net.Listener = (*blockedServerListener)(nil) + +// blockedServerListener accepts connections for a specific gRPC broker stream +// ID on the server (plugin) side of the connection. +type blockedServerListener struct { + addr net.Addr + acceptCh chan acceptResult + doneCh <-chan struct{} +} + +type acceptResult struct { + conn net.Conn + err error +} + +func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener { + return &blockedServerListener{ + addr: addr, + acceptCh: make(chan acceptResult), + doneCh: doneCh, + } +} + +func (b *blockedServerListener) Accept() (net.Conn, error) { + select { + case accept := <-b.acceptCh: + return accept.conn, accept.err + case <-b.doneCh: + return nil, io.EOF + } +} + +func (b *blockedServerListener) Addr() net.Addr { + return b.addr +} + +func (b *blockedServerListener) Close() error { + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go new file mode 100644 index 0000000000..b203ba467b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "fmt" + "net" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/yamux" +) + +var _ GRPCMuxer = (*GRPCClientMuxer)(nil) + +// GRPCClientMuxer implements the client (host) side of the gRPC broker's +// GRPCMuxer interface for multiplexing multiple gRPC broker connections over +// a single net.Conn. +// +// The client dials the initial net.Conn eagerly, and creates a yamux.Session +// as the implementation for multiplexing any additional connections. +// +// Each net.Listener returned from Listener will block until the client receives +// a knock that matches its gRPC broker stream ID. There is no default listener +// on the client, as it is a client for the gRPC broker's control services. (See +// GRPCServerMuxer for more details). +type GRPCClientMuxer struct { + logger hclog.Logger + session *yamux.Session + + acceptMutex sync.Mutex + acceptListeners map[uint32]*blockedClientListener +} + +func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) { + // Eagerly establish the underlying connection as early as possible. + logger.Debug("making new client mux initial connection", "addr", addr) + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + _ = tcpConn.SetKeepAlive(true) + } + + cfg := yamux.DefaultConfig() + cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, + }) + cfg.LogOutput = nil + sess, err := yamux.Client(conn, cfg) + if err != nil { + return nil, err + } + + logger.Debug("client muxer connected", "addr", addr) + m := &GRPCClientMuxer{ + logger: logger, + session: sess, + acceptListeners: make(map[uint32]*blockedClientListener), + } + + return m, nil +} + +func (m *GRPCClientMuxer) Enabled() bool { + return m != nil +} + +func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { + ln := newBlockedClientListener(m.session, doneCh) + + m.acceptMutex.Lock() + m.acceptListeners[id] = ln + m.acceptMutex.Unlock() + + return ln, nil +} + +func (m *GRPCClientMuxer) AcceptKnock(id uint32) error { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() + + ln, ok := m.acceptListeners[id] + if !ok { + return fmt.Errorf("no listener for id %d", id) + } + ln.unblock() + return nil +} + +func (m *GRPCClientMuxer) Dial() (net.Conn, error) { + stream, err := m.session.Open() + if err != nil { + return nil, fmt.Errorf("error dialling new client stream: %w", err) + } + + return stream, nil +} + +func (m *GRPCClientMuxer) Close() error { + return m.session.Close() +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go new file mode 100644 index 0000000000..c52aaf553e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "net" +) + +// GRPCMuxer enables multiple implementations of net.Listener to accept +// connections over a single "main" multiplexed net.Conn, and dial multiple +// client connections over the same multiplexed net.Conn. +// +// The first multiplexed connection is used to serve the gRPC broker's own +// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio. +// +// Clients must "knock" before dialling, to tell the server side that the +// next net.Conn should be accepted onto a specific stream ID. The knock is a +// bidirectional streaming message on the plugin.GRPCBroker service. +type GRPCMuxer interface { + // Enabled determines whether multiplexing should be used. It saves users + // of the interface from having to compare an interface with nil, which + // is a bit awkward to do correctly. + Enabled() bool + + // Listener returns a multiplexed listener that will wait until AcceptKnock + // is called with a matching ID before its Accept function returns. + Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) + + // AcceptKnock unblocks the listener with the matching ID, and returns an + // error if it hasn't been created yet. + AcceptKnock(id uint32) error + + // Dial makes a new multiplexed client connection. To dial a specific ID, + // a knock must be sent first. + Dial() (net.Conn, error) + + // Close closes connections and releases any resources associated with the + // muxer. + Close() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go new file mode 100644 index 0000000000..27696ee769 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/yamux" +) + +var _ GRPCMuxer = (*GRPCServerMuxer)(nil) +var _ net.Listener = (*GRPCServerMuxer)(nil) + +// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's +// GRPCMuxer interface for multiplexing multiple gRPC broker connections over +// a single net.Conn. +// +// The server side needs a listener to serve the gRPC broker's control services, +// which includes the service we will receive knocks on. That means we always +// accept the first connection onto a "default" main listener, and if we accept +// any further connections without receiving a knock first, they are also given +// to the default listener. +// +// When creating additional multiplexed listeners for specific stream IDs, we +// can't control the order in which gRPC servers will call Accept() on each +// listener, but we do need to control which gRPC server accepts which connection. +// As such, each multiplexed listener blocks waiting on a channel. It will be +// unblocked when a knock is received for the matching stream ID. +type GRPCServerMuxer struct { + addr net.Addr + logger hclog.Logger + + sessionErrCh chan error + sess *yamux.Session + + knockCh chan uint32 + + acceptMutex sync.Mutex + acceptChannels map[uint32]chan acceptResult +} + +func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer { + m := &GRPCServerMuxer{ + addr: ln.Addr(), + logger: logger, + + sessionErrCh: make(chan error), + + knockCh: make(chan uint32, 1), + acceptChannels: make(map[uint32]chan acceptResult), + } + + go m.acceptSession(ln) + + return m +} + +// acceptSessionAndMuxAccept is responsible for establishing the yamux session, +// and then kicking off the acceptLoop function. +func (m *GRPCServerMuxer) acceptSession(ln net.Listener) { + defer close(m.sessionErrCh) + + m.logger.Debug("accepting initial connection", "addr", m.addr) + conn, err := ln.Accept() + if err != nil { + m.sessionErrCh <- err + return + } + + m.logger.Debug("initial server connection accepted", "addr", m.addr) + cfg := yamux.DefaultConfig() + cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, + }) + cfg.LogOutput = nil + m.sess, err = yamux.Server(conn, cfg) + if err != nil { + m.sessionErrCh <- err + return + } +} + +func (m *GRPCServerMuxer) session() (*yamux.Session, error) { + select { + case err := <-m.sessionErrCh: + if err != nil { + return nil, err + } + case <-time.After(5 * time.Second): + return nil, errors.New("timed out waiting for connection to be established") + } + + // Should never happen. + if m.sess == nil { + return nil, errors.New("no connection established and no error received") + } + + return m.sess, nil +} + +// Accept accepts all incoming connections and routes them to the correct +// stream ID based on the most recent knock received. +func (m *GRPCServerMuxer) Accept() (net.Conn, error) { + session, err := m.session() + if err != nil { + return nil, fmt.Errorf("error establishing yamux session: %w", err) + } + + for { + conn, acceptErr := session.Accept() + + select { + case id := <-m.knockCh: + m.acceptMutex.Lock() + acceptCh, ok := m.acceptChannels[id] + m.acceptMutex.Unlock() + + if !ok { + if conn != nil { + _ = conn.Close() + } + return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id) + } + m.logger.Debug("sending conn to brokered listener", "id", id) + acceptCh <- acceptResult{ + conn: conn, + err: acceptErr, + } + default: + m.logger.Debug("sending conn to default listener") + return conn, acceptErr + } + } +} + +func (m *GRPCServerMuxer) Addr() net.Addr { + return m.addr +} + +func (m *GRPCServerMuxer) Close() error { + session, err := m.session() + if err != nil { + return err + } + + return session.Close() +} + +func (m *GRPCServerMuxer) Enabled() bool { + return m != nil +} + +func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { + sess, err := m.session() + if err != nil { + return nil, err + } + + ln := newBlockedServerListener(sess.Addr(), doneCh) + m.acceptMutex.Lock() + m.acceptChannels[id] = ln.acceptCh + m.acceptMutex.Unlock() + + return ln, nil +} + +func (m *GRPCServerMuxer) Dial() (net.Conn, error) { + sess, err := m.session() + if err != nil { + return nil, err + } + + stream, err := sess.OpenStream() + if err != nil { + return nil, fmt.Errorf("error dialling new server stream: %w", err) + } + + return stream, nil +} + +func (m *GRPCServerMuxer) AcceptKnock(id uint32) error { + m.knockCh <- id + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go deleted file mode 100644 index fb9d415254..0000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. - -package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go index 6bf103859f..acc6dc9c77 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -1,203 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_broker.proto +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_broker.proto package plugin -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ConnInfo struct { - ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` - Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"` } -func (m *ConnInfo) Reset() { *m = ConnInfo{} } -func (m *ConnInfo) String() string { return proto.CompactTextString(m) } -func (*ConnInfo) ProtoMessage() {} -func (*ConnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_broker_3322b07398605250, []int{0} -} -func (m *ConnInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnInfo.Unmarshal(m, b) -} -func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) -} -func (dst *ConnInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnInfo.Merge(dst, src) +func (x *ConnInfo) Reset() { + *x = ConnInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ConnInfo) XXX_Size() int { - return xxx_messageInfo_ConnInfo.Size(m) + +func (x *ConnInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ConnInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ConnInfo.DiscardUnknown(m) + +func (*ConnInfo) ProtoMessage() {} + +func (x *ConnInfo) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ConnInfo proto.InternalMessageInfo +// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead. +func (*ConnInfo) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0} +} -func (m *ConnInfo) GetServiceId() uint32 { - if m != nil { - return m.ServiceId +func (x *ConnInfo) GetServiceId() uint32 { + if x != nil { + return x.ServiceId } return 0 } -func (m *ConnInfo) GetNetwork() string { - if m != nil { - return m.Network +func (x *ConnInfo) GetNetwork() string { + if x != nil { + return x.Network } return "" } -func (m *ConnInfo) GetAddress() string { - if m != nil { - return m.Address +func (x *ConnInfo) GetAddress() string { + if x != nil { + return x.Address } return "" } -func init() { - proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +func (x *ConnInfo) GetKnock() *ConnInfo_Knock { + if x != nil { + return x.Knock + } + return nil } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn +type ConnInfo_Knock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// GRPCBrokerClient is the client API for GRPCBroker service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GRPCBrokerClient interface { - StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) + Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"` + Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` } -type gRPCBrokerClient struct { - cc *grpc.ClientConn +func (x *ConnInfo_Knock) Reset() { + *x = ConnInfo_Knock{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { - return &gRPCBrokerClient{cc} +func (x *ConnInfo_Knock) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) - if err != nil { - return nil, err - } - x := &gRPCBrokerStartStreamClient{stream} - return x, nil -} +func (*ConnInfo_Knock) ProtoMessage() {} -type GRPCBroker_StartStreamClient interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ClientStream +func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type gRPCBrokerStartStreamClient struct { - grpc.ClientStream +// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead. +func (*ConnInfo_Knock) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0} } -func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { - return x.ClientStream.SendMsg(m) +func (x *ConnInfo_Knock) GetKnock() bool { + if x != nil { + return x.Knock + } + return false } -func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *ConnInfo_Knock) GetAck() bool { + if x != nil { + return x.Ack } - return m, nil + return false } -// GRPCBrokerServer is the server API for GRPCBroker service. -type GRPCBrokerServer interface { - StartStream(GRPCBroker_StartStreamServer) error +func (x *ConnInfo_Knock) GetError() string { + if x != nil { + return x.Error + } + return "" } -func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { - s.RegisterService(&_GRPCBroker_serviceDesc, srv) +var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08, + 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b, + 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f, + 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f, + 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35, + 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a, + 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) -} +var ( + file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc +) -type GRPCBroker_StartStreamServer interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ServerStream +func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData) + }) + return file_internal_plugin_grpc_broker_proto_rawDescData } -type gRPCBrokerStartStreamServer struct { - grpc.ServerStream +var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{ + (*ConnInfo)(nil), // 0: plugin.ConnInfo + (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock } - -func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { - return x.ServerStream.SendMsg(m) +var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{ + 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock + 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo + 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } -func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err +func init() { file_internal_plugin_grpc_broker_proto_init() } +func file_internal_plugin_grpc_broker_proto_init() { + if File_internal_plugin_grpc_broker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnInfo_Knock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - return m, nil -} - -var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCBroker", - HandlerType: (*GRPCBrokerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartStream", - Handler: _GRPCBroker_StartStream_Handler, - ServerStreams: true, - ClientStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "grpc_broker.proto", -} - -func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } - -var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, - 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, - 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, - 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, - 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, - 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, - 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, - 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, - 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, + GoTypes: file_internal_plugin_grpc_broker_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs, + MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_broker_proto = out.File + file_internal_plugin_grpc_broker_proto_rawDesc = nil + file_internal_plugin_grpc_broker_proto_goTypes = nil + file_internal_plugin_grpc_broker_proto_depIdxs = nil } diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto index aa3df4630a..c92cd645cb 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -1,11 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package plugin; -option go_package = "plugin"; +option go_package = "./plugin"; message ConnInfo { uint32 service_id = 1; string network = 2; string address = 3; + message Knock { + bool knock = 1; + bool ack = 2; + string error = 3; + } + Knock knock = 4; } service GRPCBroker { diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go new file mode 100644 index 0000000000..1b0f80705d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_broker.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream" +) + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +// All implementations should embed UnimplementedGRPCBrokerServer +// for forward compatibility +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCBrokerServer struct { +} + +func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error { + return status.Errorf(codes.Unimplemented, "method StartStream not implemented") +} + +// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCBrokerServer will +// result in compilation errors. +type UnsafeGRPCBrokerServer interface { + mustEmbedUnimplementedGRPCBrokerServer() +} + +func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) { + s.RegisterService(&GRPCBroker_ServiceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCBroker_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/plugin/grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go index 3e39da95a8..8ca48e0d92 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -1,145 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_controller.proto +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_controller.proto package plugin -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Empty struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} -} -func (m *Empty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Empty.Unmarshal(m, b) -} -func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Empty.Marshal(b, m, deterministic) -} -func (dst *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(dst, src) -} -func (m *Empty) XXX_Size() int { - return xxx_messageInfo_Empty.Size(m) -} -func (m *Empty) XXX_DiscardUnknown() { - xxx_messageInfo_Empty.DiscardUnknown(m) +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -var xxx_messageInfo_Empty proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Empty)(nil), "plugin.Empty") +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*Empty) ProtoMessage() {} -// GRPCControllerClient is the client API for GRPCController service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GRPCControllerClient interface { - Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type gRPCControllerClient struct { - cc *grpc.ClientConn +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0} } -func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { - return &gRPCControllerClient{cc} -} +var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor -func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, + 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -// GRPCControllerServer is the server API for GRPCController service. -type GRPCControllerServer interface { - Shutdown(context.Context, *Empty) (*Empty, error) +var ( + file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc +) + +func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData) + }) + return file_internal_plugin_grpc_controller_proto_rawDescData } -func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { - s.RegisterService(&_GRPCController_serviceDesc, srv) +var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: plugin.Empty +} +var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{ + 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty + 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GRPCControllerServer).Shutdown(ctx, in) +func init() { file_internal_plugin_grpc_controller_proto_init() } +func file_internal_plugin_grpc_controller_proto_init() { + if File_internal_plugin_grpc_controller_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/plugin.GRPCController/Shutdown", + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _GRPCController_serviceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCController", - HandlerType: (*GRPCControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Shutdown", - Handler: _GRPCController_Shutdown_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "grpc_controller.proto", -} - -func init() { - proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) -} - -var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ - // 108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, - 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, - 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, - 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, + GoTypes: file_internal_plugin_grpc_controller_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs, + MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_controller_proto = out.File + file_internal_plugin_grpc_controller_proto_rawDesc = nil + file_internal_plugin_grpc_controller_proto_goTypes = nil + file_internal_plugin_grpc_controller_proto_depIdxs = nil } diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto index 345d0a1c1f..2755fa638b 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -1,6 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package plugin; -option go_package = "plugin"; +option go_package = "./plugin"; message Empty { } diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go new file mode 100644 index 0000000000..427611aa00 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_controller.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown" +) + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +// All implementations should embed UnimplementedGRPCControllerServer +// for forward compatibility +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCControllerServer struct { +} + +func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} + +// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCControllerServer will +// result in compilation errors. +type UnsafeGRPCControllerServer interface { + mustEmbedUnimplementedGRPCControllerServer() +} + +func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) { + s.RegisterService(&GRPCController_ServiceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GRPCController_Shutdown_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCController_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "internal/plugin/grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go index c8f94921b4..139cbb4a90 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -1,28 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_stdio.proto +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_stdio.proto package plugin -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import empty "github.com/golang/protobuf/ptypes/empty" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type StdioData_Channel int32 @@ -32,202 +32,194 @@ const ( StdioData_STDERR StdioData_Channel = 2 ) -var StdioData_Channel_name = map[int32]string{ - 0: "INVALID", - 1: "STDOUT", - 2: "STDERR", -} -var StdioData_Channel_value = map[string]int32{ - "INVALID": 0, - "STDOUT": 1, - "STDERR": 2, -} - -func (x StdioData_Channel) String() string { - return proto.EnumName(StdioData_Channel_name, int32(x)) -} -func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} -} +// Enum value maps for StdioData_Channel. +var ( + StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", + } + StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, + } +) -// StdioData is a single chunk of stdout or stderr data that is streamed -// from GRPCStdio. -type StdioData struct { - Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x StdioData_Channel) Enum() *StdioData_Channel { + p := new(StdioData_Channel) + *p = x + return p } -func (m *StdioData) Reset() { *m = StdioData{} } -func (m *StdioData) String() string { return proto.CompactTextString(m) } -func (*StdioData) ProtoMessage() {} -func (*StdioData) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} -} -func (m *StdioData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StdioData.Unmarshal(m, b) -} -func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) -} -func (dst *StdioData) XXX_Merge(src proto.Message) { - xxx_messageInfo_StdioData.Merge(dst, src) -} -func (m *StdioData) XXX_Size() int { - return xxx_messageInfo_StdioData.Size(m) -} -func (m *StdioData) XXX_DiscardUnknown() { - xxx_messageInfo_StdioData.DiscardUnknown(m) +func (x StdioData_Channel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -var xxx_messageInfo_StdioData proto.InternalMessageInfo - -func (m *StdioData) GetChannel() StdioData_Channel { - if m != nil { - return m.Channel - } - return StdioData_INVALID +func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor { + return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor() } -func (m *StdioData) GetData() []byte { - if m != nil { - return m.Data - } - return nil +func (StdioData_Channel) Type() protoreflect.EnumType { + return &file_internal_plugin_grpc_stdio_proto_enumTypes[0] } -func init() { - proto.RegisterType((*StdioData)(nil), "plugin.StdioData") - proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +func (x StdioData_Channel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// GRPCStdioClient is the client API for GRPCStdio service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GRPCStdioClient interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +// Deprecated: Use StdioData_Channel.Descriptor instead. +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0} } -type gRPCStdioClient struct { - cc *grpc.ClientConn -} +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { - return &gRPCStdioClient{cc} + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { - stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) - if err != nil { - return nil, err - } - x := &gRPCStdioStreamStdioClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err +func (x *StdioData) Reset() { + *x = StdioData{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return x, nil } -type GRPCStdio_StreamStdioClient interface { - Recv() (*StdioData, error) - grpc.ClientStream +func (x *StdioData) String() string { + return protoimpl.X.MessageStringOf(x) } -type gRPCStdioStreamStdioClient struct { - grpc.ClientStream -} +func (*StdioData) ProtoMessage() {} -func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { - m := new(StdioData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *StdioData) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return m, nil -} - -// GRPCStdioServer is the server API for GRPCStdio service. -type GRPCStdioServer interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error + return mi.MessageOf(x) } -func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { - s.RegisterService(&_GRPCStdio_serviceDesc, srv) +// Deprecated: Use StdioData.ProtoReflect.Descriptor instead. +func (*StdioData) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0} } -func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *StdioData) GetChannel() StdioData_Channel { + if x != nil { + return x.Channel } - return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) -} - -type GRPCStdio_StreamStdioServer interface { - Send(*StdioData) error - grpc.ServerStream + return StdioData_INVALID } -type gRPCStdioStreamStdioServer struct { - grpc.ServerStream +func (x *StdioData) GetData() []byte { + if x != nil { + return x.Data + } + return nil } -func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { - return x.ServerStream.SendMsg(m) -} +var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e, + 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54, + 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47, + 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc +) -var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCStdio", - HandlerType: (*GRPCStdioServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamStdio", - Handler: _GRPCStdio_StreamStdio_Handler, - ServerStreams: true, +func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData) + }) + return file_internal_plugin_grpc_stdio_proto_rawDescData +} + +var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{ + (StdioData_Channel)(0), // 0: plugin.StdioData.Channel + (*StdioData)(nil), // 1: plugin.StdioData + (*emptypb.Empty)(nil), // 2: google.protobuf.Empty +} +var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{ + 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel + 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty + 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_plugin_grpc_stdio_proto_init() } +func file_internal_plugin_grpc_stdio_proto_init() { + if File_internal_plugin_grpc_stdio_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StdioData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "grpc_stdio.proto", -} - -func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } - -var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ - // 221 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, - 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, - 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, - 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, - 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, - 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, - 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, - 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, - 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, - 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, - 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, - 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, - 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, + GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs, + EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes, + MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_stdio_proto = out.File + file_internal_plugin_grpc_stdio_proto_rawDesc = nil + file_internal_plugin_grpc_stdio_proto_goTypes = nil + file_internal_plugin_grpc_stdio_proto_depIdxs = nil } diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto index ce1a122303..f48ac76c97 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -1,6 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package plugin; -option go_package = "plugin"; +option go_package = "./plugin"; import "google/protobuf/empty.proto"; diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go new file mode 100644 index 0000000000..f82b150350 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go @@ -0,0 +1,148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_stdio.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio" +) + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +// All implementations should embed UnimplementedGRPCStdioServer +// for forward compatibility +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error +} + +// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCStdioServer struct { +} + +func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error { + return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented") +} + +// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCStdioServer will +// result in compilation errors. +type UnsafeGRPCStdioServer interface { + mustEmbedUnimplementedGRPCStdioServer() +} + +func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) { + s.RegisterService(&GRPCStdio_ServiceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCStdio_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "internal/plugin/grpc_stdio.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go index fb2ef930ca..ab963d56b5 100644 --- a/vendor/github.com/hashicorp/go-plugin/log_entry.go +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go index 8895524587..09ecafaf45 100644 --- a/vendor/github.com/hashicorp/go-plugin/mtls.go +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go index 01c45ad7c6..4eb1208fbb 100644 --- a/vendor/github.com/hashicorp/go-plugin/mux_broker.go +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 79d9674633..184749b96e 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // The plugin package exposes functions and helpers for communicating to // plugins which are implemented as standalone binary applications. // diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go index 88c999a580..b88446361d 100644 --- a/vendor/github.com/hashicorp/go-plugin/process.go +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -1,24 +1,4 @@ -package plugin - -import ( - "time" -) - -// pidAlive checks whether a pid is alive. -func pidAlive(pid int) bool { - return _pidAlive(pid) -} +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 -// pidWait blocks for a process to exit. -func pidWait(pid int) error { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for range ticker.C { - if !pidAlive(pid) { - break - } - } - - return nil -} +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go deleted file mode 100644 index 70ba546bf6..0000000000 --- a/vendor/github.com/hashicorp/go-plugin/process_posix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package plugin - -import ( - "os" - "syscall" -) - -// _pidAlive tests whether a process is alive or not by sending it Signal 0, -// since Go otherwise has no way to test this. -func _pidAlive(pid int) bool { - proc, err := os.FindProcess(pid) - if err == nil { - err = proc.Signal(syscall.Signal(0)) - } - - return err == nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go index 0cfc19e52d..e4b7be3837 100644 --- a/vendor/github.com/hashicorp/go-plugin/protocol.go +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go index f30a4b1d38..142454df80 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 5bb18dd5db..cec0a3d93a 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -42,10 +45,16 @@ func (s *RPCServer) Config() string { return "" } // ServerProtocol impl. func (s *RPCServer) Serve(lis net.Listener) { + defer s.done() + for { conn, err := lis.Accept() if err != nil { - log.Printf("[ERR] plugin: plugin server: %s", err) + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) return } @@ -78,7 +87,7 @@ func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { // Connect the stdstreams (in, out, err) stdstream := make([]net.Conn, 2) - for i, _ := range stdstream { + for i := range stdstream { stdstream[i], err = mux.Accept() if err != nil { mux.Close() @@ -129,13 +138,15 @@ type controlServer struct { // Ping can be called to verify the connection (and likely the binary) // is still alive to a plugin. func (c *controlServer) Ping( - null bool, response *struct{}) error { + null bool, response *struct{}, +) error { *response = struct{}{} return nil } func (c *controlServer) Quit( - null bool, response *struct{}) error { + null bool, response *struct{}, +) error { // End the server c.server.done() @@ -152,7 +163,8 @@ type dispenseServer struct { } func (d *dispenseServer) Dispense( - name string, response *uint32) error { + name string, response *uint32, +) error { // Find the function to create this implementation p, ok := d.plugins[name] if !ok { diff --git a/vendor/github.com/hashicorp/go-plugin/runner/runner.go b/vendor/github.com/hashicorp/go-plugin/runner/runner.go new file mode 100644 index 0000000000..e638ae5f8e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/runner/runner.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package runner + +import ( + "context" + "io" +) + +// Runner defines the interface required by go-plugin to manage the lifecycle of +// of a plugin and attempt to negotiate a connection with it. Note that this +// is orthogonal to the protocol and transport used, which is negotiated over stdout. +type Runner interface { + // Start should start the plugin and ensure any work required for servicing + // other interface methods is done. If the context is cancelled, it should + // only abort any attempts to _start_ the plugin. Waiting and shutdown are + // handled separately. + Start(ctx context.Context) error + + // Diagnose makes a best-effort attempt to return any debug information that + // might help users understand why a plugin failed to start and negotiate a + // connection. + Diagnose(ctx context.Context) string + + // Stdout is used to negotiate the go-plugin protocol. + Stdout() io.ReadCloser + + // Stderr is used for forwarding plugin logs to the host process logger. + Stderr() io.ReadCloser + + // Name is a human-friendly name for the plugin, such as the path to the + // executable. It does not have to be unique. + Name() string + + AttachedRunner +} + +// AttachedRunner defines a limited subset of Runner's interface to represent the +// reduced responsibility for plugin lifecycle when attaching to an already running +// plugin. +type AttachedRunner interface { + // Wait should wait until the plugin stops running, whether in response to + // an out of band signal or in response to calling Kill(). + Wait(ctx context.Context) error + + // Kill should stop the plugin and perform any cleanup required. + Kill(ctx context.Context) error + + // ID is a unique identifier to represent the running plugin. e.g. pid or + // container ID. + ID() string + + AddrTranslator +} + +// AddrTranslator translates addresses between the execution context of the host +// process and the plugin. For example, if the plugin is in a container, the file +// path for a Unix socket may be different between the host and the container. +// +// It is only intended to be used by the host process. +type AddrTranslator interface { + // Called before connecting on any addresses received back from the plugin. + PluginToHost(pluginNet, pluginAddr string) (hostNet string, hostAddr string, err error) + + // Called on any host process addresses before they are sent to the plugin. + HostToPlugin(hostNet, hostAddr string) (pluginNet string, pluginAddr string, err error) +} + +// ReattachFunc can be passed to a client's reattach config to reattach to an +// already running plugin instead of starting it ourselves. +type ReattachFunc func() (AttachedRunner, error) diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 002d6080d4..e741bc7fa1 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -8,17 +11,17 @@ import ( "errors" "fmt" "io" - "io/ioutil" - "log" "net" "os" "os/signal" + "os/user" "runtime" "sort" "strconv" "strings" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" "google.golang.org/grpc" ) @@ -132,6 +135,13 @@ type ServeTestConfig struct { SyncStdio bool } +func unixSocketConfigFromEnv() UnixSocketConfig { + return UnixSocketConfig{ + Group: os.Getenv(EnvUnixSocketGroup), + socketDir: os.Getenv(EnvUnixSocketDir), + } +} + // protocolVersion determines the protocol version and plugin set to be used by // the server. In the event that there is no suitable version, the last version // in the config is returned leaving the client to report the incompatibility. @@ -260,9 +270,6 @@ func Serve(opts *ServeConfig) { // start with default version in the handshake config protoVersion, protoType, pluginSet := protocolVersion(opts) - // Logging goes to the original stderr - log.SetOutput(os.Stderr) - logger := opts.Logger if logger == nil { // internal logger to os.Stderr @@ -274,7 +281,7 @@ func Serve(opts *ServeConfig) { } // Register a listener so we can accept a connection - listener, err := serverListener() + listener, err := serverListener(unixSocketConfigFromEnv()) if err != nil { logger.Error("plugin init error", "error", err) return @@ -308,13 +315,13 @@ func Serve(opts *ServeConfig) { certPEM, keyPEM, err := generateCert() if err != nil { - logger.Error("failed to generate client certificate", "error", err) + logger.Error("failed to generate server certificate", "error", err) panic(err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - logger.Error("failed to parse client certificate", "error", err) + logger.Error("failed to parse server certificate", "error", err) panic(err) } @@ -323,6 +330,8 @@ func Serve(opts *ServeConfig) { ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCertPool, MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", } // We send back the raw leaf cert data for the client rather than the @@ -379,6 +388,12 @@ func Serve(opts *ServeConfig) { } case ProtocolGRPC: + var muxer *grpcmux.GRPCServerMuxer + if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex { + muxer = grpcmux.NewGRPCServerMuxer(logger, listener) + listener = muxer + } + // Create the gRPC server server = &GRPCServer{ Plugins: pluginSet, @@ -388,6 +403,7 @@ func Serve(opts *ServeConfig) { Stderr: stderr_r, DoneCh: doneCh, logger: logger, + muxer: muxer, } default: @@ -406,23 +422,38 @@ func Serve(opts *ServeConfig) { // bring it up. In test mode, we don't do this because clients will // attach via a reattach config. if opts.Test == nil { - fmt.Printf("%d|%d|%s|%s|%s|%s\n", + const grpcBrokerMultiplexingSupported = true + protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s", CoreProtocolVersion, protoVersion, listener.Addr().Network(), listener.Addr().String(), protoType, serverCert) + + // Old clients will error with new plugins if we blindly append the + // seventh segment for gRPC broker multiplexing support, because old + // client code uses strings.SplitN(line, "|", 6), which means a seventh + // segment will get appended to the sixth segment as "sixthpart|true". + // + // If the environment variable is set, we assume the client is new enough + // to handle a seventh segment, as it should now use + // strings.Split(line, "|") and always handle each segment individually. + if os.Getenv(envMultiplexGRPC) != "" { + protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported) + } + fmt.Printf("%s\n", protocolLine) os.Stdout.Sync() } else if ch := opts.Test.ReattachConfigCh; ch != nil { // Send back the reattach config that can be used. This isn't // quite ready if they connect immediately but the client should // retry a few times. ch <- &ReattachConfig{ - Protocol: protoType, - Addr: listener.Addr(), - Pid: os.Getpid(), - Test: true, + Protocol: protoType, + ProtocolVersion: protoVersion, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, } } @@ -494,12 +525,12 @@ func Serve(opts *ServeConfig) { } } -func serverListener() (net.Listener, error) { +func serverListener(unixSocketCfg UnixSocketConfig) (net.Listener, error) { if runtime.GOOS == "windows" { return serverListener_tcp() } - return serverListener_unix() + return serverListener_unix(unixSocketCfg) } func serverListener_tcp() (net.Listener, error) { @@ -544,8 +575,8 @@ func serverListener_tcp() (net.Listener, error) { return nil, errors.New("Couldn't bind plugin TCP listener") } -func serverListener_unix() (net.Listener, error) { - tf, err := ioutil.TempFile("", "plugin") +func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) { + tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin") if err != nil { return nil, err } @@ -565,20 +596,62 @@ func serverListener_unix() (net.Listener, error) { return nil, err } + // By default, unix sockets are only writable by the owner. Set up a custom + // group owner and group write permissions if configured. + if unixSocketCfg.Group != "" { + err = setGroupWritable(path, unixSocketCfg.Group, 0o660) + if err != nil { + return nil, err + } + } + // Wrap the listener in rmListener so that the Unix domain socket file // is removed on close. - return &rmListener{ - Listener: l, - Path: path, - }, nil + return newDeleteFileListener(l, path), nil +} + +func setGroupWritable(path, groupString string, mode os.FileMode) error { + groupID, err := strconv.Atoi(groupString) + if err != nil { + group, err := user.LookupGroup(groupString) + if err != nil { + return fmt.Errorf("failed to find gid from %q: %w", groupString, err) + } + groupID, err = strconv.Atoi(group.Gid) + if err != nil { + return fmt.Errorf("failed to parse %q group's gid as an integer: %w", groupString, err) + } + } + + err = os.Chown(path, -1, groupID) + if err != nil { + return err + } + + err = os.Chmod(path, mode) + if err != nil { + return err + } + + return nil } // rmListener is an implementation of net.Listener that forwards most -// calls to the listener but also removes a file as part of the close. We -// use this to cleanup the unix domain socket on close. +// calls to the listener but also calls an additional close function. We +// use this to cleanup the unix domain socket on close, as well as clean +// up multiplexed listeners. type rmListener struct { net.Listener - Path string + close func() error +} + +func newDeleteFileListener(ln net.Listener, path string) *rmListener { + return &rmListener{ + Listener: ln, + close: func() error { + return os.Remove(path) + }, + } } func (l *rmListener) Close() error { @@ -588,5 +661,5 @@ func (l *rmListener) Close() error { } // Remove the file - return os.Remove(l.Path) + return l.close() } diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go index 033079ea0f..6b14b0c291 100644 --- a/vendor/github.com/hashicorp/go-plugin/server_mux.go +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go index 1d547aaaab..a2348642d8 100644 --- a/vendor/github.com/hashicorp/go-plugin/stream.go +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index e36f2eb2b7..a8735dfc8c 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -8,7 +11,7 @@ import ( "net/rpc" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/mitchellh/go-testing-interface" "google.golang.org/grpc" ) @@ -132,49 +135,51 @@ func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, * // TestPluginGRPCConn returns a plugin gRPC client and server that are connected // together and configured. This is used to test gRPC connections. -func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { +func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { // Create a listener - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := serverListener(UnixSocketConfig{}) if err != nil { - t.Fatalf("err: %s", err) + t.Fatal(err) } + logger := hclog.New(&hclog.LoggerOptions{ + Level: hclog.Debug, + }) + // Start up the server + var muxer *grpcmux.GRPCServerMuxer + if multiplex { + muxer = grpcmux.NewGRPCServerMuxer(logger, ln) + ln = muxer + } server := &GRPCServer{ Plugins: ps, DoneCh: make(chan struct{}), Server: DefaultGRPCServer, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer), - logger: hclog.Default(), + logger: logger, + muxer: muxer, } if err := server.Init(); err != nil { t.Fatalf("err: %s", err) } - go server.Serve(l) - - // Connect to the server - conn, err := grpc.Dial( - l.Addr().String(), - grpc.WithBlock(), - grpc.WithInsecure()) - if err != nil { - t.Fatalf("err: %s", err) + go server.Serve(ln) + + client := &Client{ + address: ln.Addr(), + protocol: ProtocolGRPC, + config: &ClientConfig{ + Plugins: ps, + GRPCBrokerMultiplex: multiplex, + }, + logger: logger, } - brokerGRPCClient := newGRPCBrokerClient(conn) - broker := newGRPCBroker(brokerGRPCClient, nil) - go broker.Run() - go brokerGRPCClient.StartStream() - - // Create the client - client := &GRPCClient{ - Conn: conn, - Plugins: ps, - broker: broker, - doneCtx: context.Background(), - controller: plugin.NewGRPCControllerClient(conn), + grpcClient, err := newGRPCClient(context.Background(), client) + if err != nil { + t.Fatal(err) } - return client, server + return grpcClient, server } diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE deleted file mode 100644 index be2cc4dfb6..0000000000 --- a/vendor/github.com/hashicorp/go-safetemp/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md deleted file mode 100644 index 02ece33171..0000000000 --- a/vendor/github.com/hashicorp/go-safetemp/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# go-safetemp -[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp) - -Functions for safely working with temporary directories and files. - -## Why? - -The Go standard library provides the excellent `ioutil` package for -working with temporary directories and files. This library builds on top -of that to provide safe abstractions above that. diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go deleted file mode 100644 index c4ae72b789..0000000000 --- a/vendor/github.com/hashicorp/go-safetemp/safetemp.go +++ /dev/null @@ -1,40 +0,0 @@ -package safetemp - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// Dir creates a new temporary directory that isn't yet created. This -// can be used with calls that expect a non-existent directory. -// -// The directory is created as a child of a temporary directory created -// within the directory dir starting with prefix. The temporary directory -// returned is always named "temp". The parent directory has the specified -// prefix. -// -// The returned io.Closer should be used to clean up the returned directory. -// This will properly remove the returned directory and any other temporary -// files created. -// -// If an error is returned, the Closer does not need to be called (and will -// be nil). -func Dir(dir, prefix string) (string, io.Closer, error) { - // Create the temporary directory - td, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", nil, err - } - - return filepath.Join(td, "temp"), pathCloser(td), nil -} - -// pathCloser implements io.Closer to remove the given path on Close. -type pathCloser string - -// Close deletes this path. -func (p pathCloser) Close() error { - return os.RemoveAll(string(p)) -} diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE index e87a115e46..a320b309c4 100644 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -1,3 +1,5 @@ +Copyright © 2015-2022 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go index 911227f612..0c10c4e9f5 100644 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -4,22 +4,40 @@ import ( "crypto/rand" "encoding/hex" "fmt" + "io" ) // GenerateRandomBytes is used to generate random bytes of given size. func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } buf := make([]byte, size) - if _, err := rand.Read(buf); err != nil { + if _, err := io.ReadFull(reader, buf); err != nil { return nil, fmt.Errorf("failed to read random bytes: %v", err) } return buf, nil } + const uuidLen = 16 // GenerateUUID is used to generate a random UUID func GenerateUUID() (string, error) { - buf, err := GenerateRandomBytes(uuidLen) + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) if err != nil { return "", err } diff --git a/vendor/github.com/hashicorp/hc-install/.copywrite.hcl b/vendor/github.com/hashicorp/hc-install/.copywrite.hcl new file mode 100644 index 0000000000..45ec82c768 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/.copywrite.hcl @@ -0,0 +1,7 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2020 + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/hc-install/.go-version b/vendor/github.com/hashicorp/hc-install/.go-version new file mode 100644 index 0000000000..ce2dd53570 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/.go-version @@ -0,0 +1 @@ +1.21.5 diff --git a/vendor/github.com/hashicorp/hc-install/LICENSE b/vendor/github.com/hashicorp/hc-install/LICENSE new file mode 100644 index 0000000000..c121cee6e5 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/LICENSE @@ -0,0 +1,375 @@ +Copyright (c) 2020 HashiCorp, Inc. + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hc-install/README.md b/vendor/github.com/hashicorp/hc-install/README.md new file mode 100644 index 0000000000..6e78b5a610 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/README.md @@ -0,0 +1,120 @@ +# hc-install + +An **experimental** Go module for downloading or locating HashiCorp binaries, verifying signatures and checksums, and asserting version constraints. + +This module is a successor to tfinstall, available in pre-1.0 versions of [terraform-exec](https://github.com/hashicorp/terraform-exec). Current users of tfinstall are advised to move to hc-install before upgrading terraform-exec to v1.0.0. + +## hc-install is not a package manager + +This library is intended for use within Go programs or automated environments (such as CIs) +which have some business downloading or otherwise locating HashiCorp binaries. + +The included command-line utility, `hc-install`, is a convenient way of using +the library in ad-hoc or CI shell scripting outside of Go. + +`hc-install` does **not**: + + - Determine suitable installation path based on target system. e.g. in `/usr/bin` or `/usr/local/bin` on Unix based system. + - Deal with execution of installed binaries (via service files or otherwise). + - Upgrade existing binaries on your system. + - Add nor link downloaded binaries to your `$PATH`. + +## API + +The `Installer` offers a few high-level methods: + + - `Ensure(context.Context, []src.Source)` to find, install, or build a product version + - `Install(context.Context, []src.Installable)` to install a product version + +### Sources + +The `Installer` methods accept number of different `Source` types. +Each comes with different trade-offs described below. + + - `fs.{AnyVersion,ExactVersion,Version}` - Finds a binary in `$PATH` (or additional paths) + - **Pros:** + - This is most convenient when you already have the product installed on your system + which you already manage. + - **Cons:** + - Only relies on a single version, expects _you_ to manage the installation + - _Not recommended_ for any environment where product installation is not controlled or managed by you (e.g. default GitHub Actions image managed by GitHub) + - `releases.{LatestVersion,ExactVersion}` - Downloads, verifies & installs any known product from `releases.hashicorp.com` + - **Pros:** + - Fast and reliable way of obtaining any pre-built version of any product + - Allows installation of enterprise versions + - **Cons:** + - Installation may consume some bandwidth, disk space and a little time + - Potentially less stable builds (see `checkpoint` below) + - `checkpoint.LatestVersion` - Downloads, verifies & installs any known product available in HashiCorp Checkpoint + - **Pros:** + - Checkpoint typically contains only product versions considered stable + - **Cons:** + - Installation may consume some bandwidth, disk space and a little time + - Currently doesn't allow installation of old versions or enterprise versions (see `releases` above) + - `build.GitRevision` - Clones raw source code and builds the product from it + - **Pros:** + - Useful for catching bugs and incompatibilities as early as possible (prior to product release). + - **Cons:** + - Building from scratch can consume significant amount of time & resources (CPU, memory, bandwith, disk space) + - There are no guarantees that build instructions will always be up-to-date + - There's increased likelihood of build containing bugs prior to release + - Any CI builds relying on this are likely to be fragile + +## Example Usage + +See examples at https://pkg.go.dev/github.com/hashicorp/hc-install#example-Installer. + +## CLI + +In addition to the Go library, which is the intended primary use case of `hc-install`, we also distribute CLI. + +The CLI comes with some trade-offs: + + - more limited interface compared to the flexible Go API (installs specific versions of products via `releases.ExactVersion`) + - minimal environment pre-requisites (no need to compile Go code) + - see ["hc-install is not a package manager"](https://github.com/hashicorp/hc-install#hc-install-is-not-a-package-manager) + +### Installation + +Given that one of the key roles of the CLI/library is integrity checking, you should choose the installation method which involves the same level of integrity checks, and/or perform these checks yourself. `go install` provides only minimal to no integrity checks, depending on exact use. We recommend any of the installation methods documented below. + +#### Homebrew (macOS / Linux) + +[Homebrew](https://brew.sh) + +``` +brew install hashicorp/tap/hc-install +``` + +#### Linux + +We support Debian & Ubuntu via apt and RHEL, CentOS, Fedora and Amazon Linux via RPM. + +You can follow the instructions in the [Official Packaging Guide](https://www.hashicorp.com/official-packaging-guide) to install the package from the official HashiCorp-maintained repositories. The package name is `hc-install` in all repositories. + +#### Other platforms + +1. [Download for the latest version](https://releases.hashicorp.com/hc-install/) relevant for your operating system and architecture. +2. Verify integrity by comparing the SHA256 checksums which are part of the release (called `hc-install__SHA256SUMS`). +3. Install it by unzipping it and moving it to a directory included in your system's `PATH`. +4. Check that you have installed it correctly via `hc-install --version`. + You should see the latest version printed to your terminal. + +### Usage + +``` +Usage: hc-install install [options] -version + + This command installs a HashiCorp product. + Options: + -version [REQUIRED] Version of product to install. + -path Path to directory where the product will be installed. Defaults + to current working directory. +``` +```sh +hc-install install -version 1.3.7 terraform +``` +``` +hc-install: will install terraform@1.3.7 +installed terraform@1.3.7 to /current/working/dir/terraform +``` diff --git a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go new file mode 100644 index 0000000000..2cd5379fb8 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package checkpoint + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "time" + + checkpoint "github.com/hashicorp/go-checkpoint" + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/pubkey" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + isrc "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +var ( + defaultTimeout = 30 * time.Second + discardLogger = log.New(ioutil.Discard, "", 0) +) + +// LatestVersion installs the latest version known to Checkpoint +// to OS temp directory, or to InstallDir (if not empty) +type LatestVersion struct { + Product product.Product + Timeout time.Duration + SkipChecksumVerification bool + InstallDir string + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + ArmoredPublicKey string + + logger *log.Logger + pathsToRemove []string +} + +func (*LatestVersion) IsSourceImpl() isrc.InstallSrcSigil { + return isrc.InstallSrcSigil{} +} + +func (lv *LatestVersion) SetLogger(logger *log.Logger) { + lv.logger = logger +} + +func (lv *LatestVersion) log() *log.Logger { + if lv.logger == nil { + return discardLogger + } + return lv.logger +} + +func (lv *LatestVersion) Validate() error { + if !validators.IsProductNameValid(lv.Product.Name) { + return fmt.Errorf("invalid product name: %q", lv.Product.Name) + } + if !validators.IsBinaryNameValid(lv.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName()) + } + + return nil +} + +func (lv *LatestVersion) Install(ctx context.Context) (string, error) { + timeout := defaultTimeout + if lv.Timeout > 0 { + timeout = lv.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + // TODO: Introduce CheckWithContext to allow for cancellation + resp, err := checkpoint.Check(&checkpoint.CheckParams{ + Product: lv.Product.Name, + OS: runtime.GOOS, + Arch: runtime.GOARCH, + Force: true, + }) + if err != nil { + return "", err + } + + latestVersion, err := version.NewVersion(resp.CurrentVersion) + if err != nil { + return "", err + } + + if lv.pathsToRemove == nil { + lv.pathsToRemove = make([]string, 0) + } + + dstDir := lv.InstallDir + if dstDir == "" { + var err error + dirName := fmt.Sprintf("%s_*", lv.Product.Name) + dstDir, err = ioutil.TempDir("", dirName) + if err != nil { + return "", err + } + lv.pathsToRemove = append(lv.pathsToRemove, dstDir) + lv.log().Printf("created new temp dir at %s", dstDir) + } + lv.log().Printf("will install into dir at %s", dstDir) + + rels := rjson.NewReleases() + rels.SetLogger(lv.log()) + pv, err := rels.GetProductVersion(ctx, lv.Product.Name, latestVersion) + if err != nil { + return "", err + } + + d := &rjson.Downloader{ + Logger: lv.log(), + VerifyChecksum: !lv.SkipChecksumVerification, + ArmoredPublicKey: pubkey.DefaultPublicKey, + BaseURL: rels.BaseURL, + } + if lv.ArmoredPublicKey != "" { + d.ArmoredPublicKey = lv.ArmoredPublicKey + } + zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir, "") + if zipFilePath != "" { + lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath) + } + if err != nil { + return "", err + } + + execPath := filepath.Join(dstDir, lv.Product.BinaryName()) + + lv.pathsToRemove = append(lv.pathsToRemove, execPath) + + lv.log().Printf("changing perms of %s", execPath) + err = os.Chmod(execPath, 0o700) + if err != nil { + return "", err + } + + return execPath, nil +} + +func (lv *LatestVersion) Remove(ctx context.Context) error { + if lv.pathsToRemove != nil { + for _, path := range lv.pathsToRemove { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/hc-install/errors/errors.go b/vendor/github.com/hashicorp/hc-install/errors/errors.go new file mode 100644 index 0000000000..15d51b6026 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/errors/errors.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package errors + +type skippableErr struct { + Err error +} + +func (e skippableErr) Error() string { + return e.Err.Error() +} + +func SkippableErr(err error) skippableErr { + return skippableErr{Err: err} +} + +func IsErrorSkippable(err error) bool { + _, ok := err.(skippableErr) + return ok +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/any_version.go b/vendor/github.com/hashicorp/hc-install/fs/any_version.go new file mode 100644 index 0000000000..8071dfcf52 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/any_version.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// AnyVersion finds an executable binary of any version +// either defined by ExactBinPath, or as part of Product. +// +// When ExactBinPath is used, the source is skipped when +// the binary is not found or accessible/executable. +// +// When Product is used, binary name is looked up within system $PATH +// and any declared ExtraPaths (which are *appended* to +// any directories in $PATH). Source is skipped if no binary +// is found or accessible/executable. +type AnyVersion struct { + // Product represents the product (its binary name to look up), + // conflicts with ExactBinPath + Product *product.Product + + // ExtraPaths represents additional dir paths to be appended to + // the default system $PATH, conflicts with ExactBinPath + ExtraPaths []string + + // ExactBinPath represents exact path to the binary, + // conflicts with Product and ExtraPaths + ExactBinPath string + + logger *log.Logger +} + +func (*AnyVersion) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (av *AnyVersion) Validate() error { + if av.ExactBinPath == "" && av.Product == nil { + return fmt.Errorf("must use either ExactBinPath or Product + ExtraPaths") + } + if av.ExactBinPath != "" && (av.Product != nil || len(av.ExtraPaths) > 0) { + return fmt.Errorf("use either ExactBinPath or Product + ExtraPaths, not both") + } + if av.ExactBinPath != "" && !filepath.IsAbs(av.ExactBinPath) { + return fmt.Errorf("expected ExactBinPath (%q) to be an absolute path", av.ExactBinPath) + } + if av.Product != nil && !validators.IsBinaryNameValid(av.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", av.Product.BinaryName()) + } + return nil +} + +func (av *AnyVersion) SetLogger(logger *log.Logger) { + av.logger = logger +} + +func (av *AnyVersion) log() *log.Logger { + if av.logger == nil { + return discardLogger + } + return av.logger +} + +func (av *AnyVersion) Find(ctx context.Context) (string, error) { + if av.ExactBinPath != "" { + err := checkExecutable(av.ExactBinPath) + if err != nil { + return "", errors.SkippableErr(err) + } + + return av.ExactBinPath, nil + } + + execPath, err := findFile(lookupDirs(av.ExtraPaths), av.Product.BinaryName(), checkExecutable) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/exact_version.go b/vendor/github.com/hashicorp/hc-install/fs/exact_version.go new file mode 100644 index 0000000000..c3cc49bfc9 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/exact_version.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// ExactVersion finds the first executable binary of the product name +// which matches the Version within system $PATH and any declared ExtraPaths +// (which are *appended* to any directories in $PATH) +type ExactVersion struct { + Product product.Product + Version *version.Version + ExtraPaths []string + Timeout time.Duration + + logger *log.Logger +} + +func (*ExactVersion) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (ev *ExactVersion) SetLogger(logger *log.Logger) { + ev.logger = logger +} + +func (ev *ExactVersion) log() *log.Logger { + if ev.logger == nil { + return discardLogger + } + return ev.logger +} + +func (ev *ExactVersion) Validate() error { + if !validators.IsBinaryNameValid(ev.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", ev.Product.BinaryName()) + } + if ev.Version == nil { + return fmt.Errorf("undeclared version") + } + if ev.Product.GetVersion == nil { + return fmt.Errorf("undeclared version getter") + } + return nil +} + +func (ev *ExactVersion) Find(ctx context.Context) (string, error) { + timeout := defaultTimeout + if ev.Timeout > 0 { + timeout = ev.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + execPath, err := findFile(lookupDirs(ev.ExtraPaths), ev.Product.BinaryName(), func(file string) error { + err := checkExecutable(file) + if err != nil { + return err + } + + v, err := ev.Product.GetVersion(ctx, file) + if err != nil { + return err + } + + if !ev.Version.Equal(v) { + return fmt.Errorf("version (%s) doesn't match %s", v, ev.Version) + } + + return nil + }) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs.go b/vendor/github.com/hashicorp/hc-install/fs/fs.go new file mode 100644 index 0000000000..216df2c2cd --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/fs.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "io/ioutil" + "log" + "time" +) + +var ( + defaultTimeout = 10 * time.Second + discardLogger = log.New(ioutil.Discard, "", 0) +) + +type fileCheckFunc func(path string) error diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go new file mode 100644 index 0000000000..eebd98b82c --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package fs + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +func lookupDirs(extraDirs []string) []string { + pathVar := os.Getenv("PATH") + dirs := filepath.SplitList(pathVar) + for _, ep := range extraDirs { + dirs = append(dirs, ep) + } + return dirs +} + +func findFile(dirs []string, file string, f fileCheckFunc) (string, error) { + for _, dir := range dirs { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + if err := f(path); err == nil { + return path, nil + } + } + return "", fmt.Errorf("%s: %w", file, exec.ErrNotFound) +} + +func checkExecutable(file string) error { + d, err := os.Stat(file) + if err != nil { + return err + } + if m := d.Mode(); !m.IsDir() && m&0111 != 0 { + return nil + } + return os.ErrPermission +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go b/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go new file mode 100644 index 0000000000..e2e4e73fb1 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func lookupDirs(extraDirs []string) []string { + pathVar := os.Getenv("path") + dirs := filepath.SplitList(pathVar) + for _, ep := range extraDirs { + dirs = append(dirs, ep) + } + return dirs +} + +func findFile(dirs []string, file string, f fileCheckFunc) (string, error) { + for _, dir := range dirs { + path := filepath.Join(dir, file) + if err := f(path); err == nil { + return path, nil + } + } + return "", fmt.Errorf("%s: %w", file, exec.ErrNotFound) +} + +func checkExecutable(file string) error { + var exts []string + x := os.Getenv(`PATHEXT`) + if x != "" { + for _, e := range strings.Split(strings.ToLower(x), `;`) { + if e == "" { + continue + } + if e[0] != '.' { + e = "." + e + } + exts = append(exts, e) + } + } else { + exts = []string{".com", ".exe", ".bat", ".cmd"} + } + + if len(exts) == 0 { + return chkStat(file) + } + if hasExt(file) { + if chkStat(file) == nil { + return nil + } + } + for _, e := range exts { + if f := file + e; chkStat(f) == nil { + return nil + } + } + return fs.ErrNotExist +} + +func chkStat(file string) error { + d, err := os.Stat(file) + if err != nil { + return err + } + if d.IsDir() { + return fs.ErrPermission + } + return nil +} + +func hasExt(file string) bool { + i := strings.LastIndex(file, ".") + if i < 0 { + return false + } + return strings.LastIndexAny(file, `:\/`) < i +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/version.go b/vendor/github.com/hashicorp/hc-install/fs/version.go new file mode 100644 index 0000000000..39efb52d9f --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/version.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// Version finds the first executable binary of the product name +// which matches the version constraint within system $PATH and any declared ExtraPaths +// (which are *appended* to any directories in $PATH) +type Version struct { + Product product.Product + Constraints version.Constraints + ExtraPaths []string + Timeout time.Duration + + logger *log.Logger +} + +func (*Version) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (v *Version) SetLogger(logger *log.Logger) { + v.logger = logger +} + +func (v *Version) log() *log.Logger { + if v.logger == nil { + return discardLogger + } + return v.logger +} + +func (v *Version) Validate() error { + if !validators.IsBinaryNameValid(v.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", v.Product.BinaryName()) + } + if len(v.Constraints) == 0 { + return fmt.Errorf("undeclared version constraints") + } + if v.Product.GetVersion == nil { + return fmt.Errorf("undeclared version getter") + } + return nil +} + +func (v *Version) Find(ctx context.Context) (string, error) { + timeout := defaultTimeout + if v.Timeout > 0 { + timeout = v.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + execPath, err := findFile(lookupDirs(v.ExtraPaths), v.Product.BinaryName(), func(file string) error { + err := checkExecutable(file) + if err != nil { + return err + } + + ver, err := v.Product.GetVersion(ctx, file) + if err != nil { + return err + } + + for _, vc := range v.Constraints { + if !vc.Check(ver) { + return fmt.Errorf("version (%s) doesn't meet constraints %s", ver, vc.String()) + } + } + + return nil + }) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/installer.go b/vendor/github.com/hashicorp/hc-install/installer.go new file mode 100644 index 0000000000..6c704eede3 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/installer.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package install + +import ( + "context" + "fmt" + "io/ioutil" + "log" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/src" +) + +type Installer struct { + logger *log.Logger + + removableSources []src.Removable +} + +type RemoveFunc func(ctx context.Context) error + +func NewInstaller() *Installer { + discardLogger := log.New(ioutil.Discard, "", 0) + return &Installer{ + logger: discardLogger, + } +} + +func (i *Installer) SetLogger(logger *log.Logger) { + i.logger = logger +} + +func (i *Installer) Ensure(ctx context.Context, sources []src.Source) (string, error) { + var errs *multierror.Error + + for _, source := range sources { + if srcWithLogger, ok := source.(src.LoggerSettable); ok { + srcWithLogger.SetLogger(i.logger) + } + + if srcValidatable, ok := source.(src.Validatable); ok { + err := srcValidatable.Validate() + if err != nil { + errs = multierror.Append(errs, err) + } + } + } + + if errs.ErrorOrNil() != nil { + return "", errs + } + + i.removableSources = make([]src.Removable, 0) + + for _, source := range sources { + if s, ok := source.(src.Removable); ok { + i.removableSources = append(i.removableSources, s) + } + + switch s := source.(type) { + case src.Findable: + execPath, err := s.Find(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + case src.Installable: + execPath, err := s.Install(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + case src.Buildable: + execPath, err := s.Build(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + default: + return "", fmt.Errorf("unknown source: %T", s) + } + } + + return "", fmt.Errorf("unable to find, install, or build from %d sources: %s", + len(sources), errs.ErrorOrNil()) +} + +func (i *Installer) Install(ctx context.Context, sources []src.Installable) (string, error) { + var errs *multierror.Error + + i.removableSources = make([]src.Removable, 0) + + for _, source := range sources { + if srcWithLogger, ok := source.(src.LoggerSettable); ok { + srcWithLogger.SetLogger(i.logger) + } + + if srcValidatable, ok := source.(src.Validatable); ok { + err := srcValidatable.Validate() + if err != nil { + errs = multierror.Append(errs, err) + continue + } + } + + if s, ok := source.(src.Removable); ok { + i.removableSources = append(i.removableSources, s) + } + + execPath, err := source.Install(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + } + + return "", fmt.Errorf("unable install from %d sources: %s", + len(sources), errs.ErrorOrNil()) +} + +func (i *Installer) Remove(ctx context.Context) error { + var errs *multierror.Error + + if i.removableSources != nil { + for _, rs := range i.removableSources { + err := rs.Remove(ctx) + if err != nil { + errs = multierror.Append(errs, err) + } + } + } + + return errs.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go new file mode 100644 index 0000000000..858f8ab297 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package build + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/hashicorp/go-version" +) + +// GetGoVersion obtains version of locally installed Go via "go version" +func GetGoVersion(ctx context.Context) (*version.Version, error) { + cmd := exec.CommandContext(ctx, "go", "version") + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to build: %w\n%s", err, out) + } + + output := strings.TrimSpace(string(out)) + + // e.g. "go version go1.15" + re := regexp.MustCompile(`^go version go([0-9.]+)\s+`) + matches := re.FindStringSubmatch(output) + if len(matches) != 2 { + return nil, fmt.Errorf("unexpected go version output: %q", output) + } + + rawGoVersion := matches[1] + v, err := version.NewVersion(rawGoVersion) + if err != nil { + return nil, fmt.Errorf("unexpected go version output: %w", err) + } + + return v, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go new file mode 100644 index 0000000000..504bf45a30 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package build + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + + "github.com/hashicorp/go-version" + "golang.org/x/mod/modfile" +) + +var discardLogger = log.New(ioutil.Discard, "", 0) + +// GoBuild represents a Go builder (to run "go build") +type GoBuild struct { + Version *version.Version + DetectVendoring bool + + pathToRemove string + logger *log.Logger +} + +func (gb *GoBuild) SetLogger(logger *log.Logger) { + gb.logger = logger +} + +func (gb *GoBuild) log() *log.Logger { + if gb.logger == nil { + return discardLogger + } + return gb.logger +} + +// Build runs "go build" within a given repo to produce binaryName in targetDir +func (gb *GoBuild) Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error) { + reqGo, err := gb.ensureRequiredGoVersion(ctx, repoDir) + if err != nil { + return "", err + } + defer reqGo.CleanupFunc(ctx) + + if reqGo.Version == nil { + gb.logger.Println("building using default available Go") + } else { + gb.logger.Printf("building using Go %s", reqGo.Version) + } + + // `go build` would download dependencies as a side effect, but we attempt + // to do it early in a separate step, such that we can easily distinguish + // network failures from build failures. + // + // Note, that `go mod download` was introduced in Go 1.11 + // See https://github.com/golang/go/commit/9f4ea6c2 + minGoVersion := version.Must(version.NewVersion("1.11")) + if reqGo.Version.GreaterThanOrEqual(minGoVersion) { + downloadArgs := []string{"mod", "download"} + gb.log().Printf("executing %s %q in %q", reqGo.Cmd, downloadArgs, repoDir) + cmd := exec.CommandContext(ctx, reqGo.Cmd, downloadArgs...) + cmd.Dir = repoDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("unable to download dependencies: %w\n%s", err, out) + } + } + + buildArgs := []string{"build", "-o", filepath.Join(targetDir, binaryName)} + + if gb.DetectVendoring { + vendorDir := filepath.Join(repoDir, "vendor") + if fi, err := os.Stat(vendorDir); err == nil && fi.IsDir() { + buildArgs = append(buildArgs, "-mod", "vendor") + } + } + + gb.log().Printf("executing %s %q in %q", reqGo.Cmd, buildArgs, repoDir) + cmd := exec.CommandContext(ctx, reqGo.Cmd, buildArgs...) + cmd.Dir = repoDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("unable to build: %w\n%s", err, out) + } + + binPath := filepath.Join(targetDir, binaryName) + + gb.pathToRemove = binPath + + return binPath, nil +} + +func (gb *GoBuild) Remove(ctx context.Context) error { + return os.RemoveAll(gb.pathToRemove) +} + +type Go struct { + Cmd string + CleanupFunc CleanupFunc + Version *version.Version +} + +func (gb *GoBuild) ensureRequiredGoVersion(ctx context.Context, repoDir string) (Go, error) { + cmdName := "go" + noopCleanupFunc := func(context.Context) {} + + var installedVersion *version.Version + + if gb.Version != nil { + gb.logger.Printf("attempting to satisfy explicit requirement for Go %s", gb.Version) + goVersion, err := GetGoVersion(ctx) + if err != nil { + return Go{ + Cmd: cmdName, + CleanupFunc: noopCleanupFunc, + }, err + } + + if !goVersion.GreaterThanOrEqual(gb.Version) { + // found incompatible version, try downloading the desired one + return gb.installGoVersion(ctx, gb.Version) + } + installedVersion = goVersion + } + + if requiredVersion, ok := guessRequiredGoVersion(repoDir); ok { + gb.logger.Printf("attempting to satisfy guessed Go requirement %s", requiredVersion) + goVersion, err := GetGoVersion(ctx) + if err != nil { + return Go{ + Cmd: cmdName, + CleanupFunc: noopCleanupFunc, + }, err + } + + if !goVersion.GreaterThanOrEqual(requiredVersion) { + // found incompatible version, try downloading the desired one + return gb.installGoVersion(ctx, requiredVersion) + } + installedVersion = goVersion + } else { + gb.logger.Println("unable to guess Go requirement") + } + + return Go{ + Cmd: cmdName, + CleanupFunc: noopCleanupFunc, + Version: installedVersion, + }, nil +} + +// CleanupFunc represents a function to be called once Go is no longer needed +// e.g. to remove any version installed temporarily per requirements +type CleanupFunc func(context.Context) + +func guessRequiredGoVersion(repoDir string) (*version.Version, bool) { + goEnvFile := filepath.Join(repoDir, ".go-version") + if fi, err := os.Stat(goEnvFile); err == nil && !fi.IsDir() { + b, err := ioutil.ReadFile(goEnvFile) + if err != nil { + return nil, false + } + requiredVersion, err := version.NewVersion(string(bytes.TrimSpace(b))) + if err != nil { + return nil, false + } + return requiredVersion, true + } + + goModFile := filepath.Join(repoDir, "go.mod") + if fi, err := os.Stat(goModFile); err == nil && !fi.IsDir() { + b, err := ioutil.ReadFile(goModFile) + if err != nil { + return nil, false + } + f, err := modfile.ParseLax(fi.Name(), b, nil) + if err != nil { + return nil, false + } + if f.Go == nil { + return nil, false + } + requiredVersion, err := version.NewVersion(f.Go.Version) + if err != nil { + return nil, false + } + return requiredVersion, true + } + + return nil, false +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go new file mode 100644 index 0000000000..00165fff5c --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package build + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// GoIsInstalled represents a checker of whether Go is installed locally +type GoIsInstalled struct { + RequiredVersion version.Constraints +} + +// Check checks whether any Go version is installed locally +func (gii *GoIsInstalled) Check(ctx context.Context) error { + goVersion, err := GetGoVersion(ctx) + if err != nil { + return err + } + + if gii.RequiredVersion != nil && !gii.RequiredVersion.Check(goVersion) { + return fmt.Errorf("go %s required (%s available)", + gii.RequiredVersion, goVersion) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go new file mode 100644 index 0000000000..9dc070d795 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package build + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/hashicorp/go-version" +) + +var v1_21 = version.Must(version.NewVersion("1.21")) + +// installGoVersion installs given version of Go using Go +// according to https://golang.org/doc/manage-install +func (gb *GoBuild) installGoVersion(ctx context.Context, v *version.Version) (Go, error) { + goVersion := v.String() + + // trim 0 patch versions as that's how Go does it + // for versions prior to 1.21 + // See https://github.com/golang/go/issues/62136 + if v.LessThan(v1_21) { + versionString := v.Core().String() + goVersion = strings.TrimSuffix(versionString, ".0") + } + pkgURL := fmt.Sprintf("golang.org/dl/go%s", goVersion) + + gb.log().Printf("go getting %q", pkgURL) + cmd := exec.CommandContext(ctx, "go", "get", pkgURL) + out, err := cmd.CombinedOutput() + if err != nil { + return Go{}, fmt.Errorf("unable to get Go %s: %w\n%s", v, err, out) + } + + gb.log().Printf("go installing %q", pkgURL) + cmd = exec.CommandContext(ctx, "go", "install", pkgURL) + out, err = cmd.CombinedOutput() + if err != nil { + return Go{}, fmt.Errorf("unable to install Go %s: %w\n%s", v, err, out) + } + + cmdName := fmt.Sprintf("go%s", goVersion) + + gb.log().Printf("downloading go %q", v) + cmd = exec.CommandContext(ctx, cmdName, "download") + out, err = cmd.CombinedOutput() + if err != nil { + return Go{}, fmt.Errorf("unable to download Go %s: %w\n%s", v, err, out) + } + gb.log().Printf("download of go %q finished", v) + + cleanupFunc := func(ctx context.Context) { + cmd = exec.CommandContext(ctx, cmdName, "env", "GOROOT") + out, err = cmd.CombinedOutput() + if err != nil { + return + } + rootPath := strings.TrimSpace(string(out)) + + // run some extra checks before deleting, just to be sure + if rootPath != "" && strings.HasSuffix(rootPath, v.String()) { + os.RemoveAll(rootPath) + } + } + + return Go{ + Cmd: cmdName, + CleanupFunc: cleanupFunc, + Version: v, + }, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go new file mode 100644 index 0000000000..a9503dfdb8 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package httpclient + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/hc-install/version" +) + +// NewHTTPClient provides a pre-configured http.Client +// e.g. with relevant User-Agent header +func NewHTTPClient() *http.Client { + client := cleanhttp.DefaultClient() + + userAgent := fmt.Sprintf("hc-install/%s", version.Version()) + + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: userAgent, + inner: cli.Transport, + } + + return client +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + return rt.inner.RoundTrip(req) +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go b/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go new file mode 100644 index 0000000000..d06f104506 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pubkey + +const ( + // See https://www.hashicorp.com/security + DefaultPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGB9+xkBEACabYZOWKmgZsHTdRDiyPJxhbuUiKX65GUWkyRMJKi/1dviVxOX +PG6hBPtF48IFnVgxKpIb7G6NjBousAV+CuLlv5yqFKpOZEGC6sBV+Gx8Vu1CICpl +Zm+HpQPcIzwBpN+Ar4l/exCG/f/MZq/oxGgH+TyRF3XcYDjG8dbJCpHO5nQ5Cy9h +QIp3/Bh09kET6lk+4QlofNgHKVT2epV8iK1cXlbQe2tZtfCUtxk+pxvU0UHXp+AB +0xc3/gIhjZp/dePmCOyQyGPJbp5bpO4UeAJ6frqhexmNlaw9Z897ltZmRLGq1p4a +RnWL8FPkBz9SCSKXS8uNyV5oMNVn4G1obCkc106iWuKBTibffYQzq5TG8FYVJKrh +RwWB6piacEB8hl20IIWSxIM3J9tT7CPSnk5RYYCTRHgA5OOrqZhC7JefudrP8n+M +pxkDgNORDu7GCfAuisrf7dXYjLsxG4tu22DBJJC0c/IpRpXDnOuJN1Q5e/3VUKKW +mypNumuQpP5lc1ZFG64TRzb1HR6oIdHfbrVQfdiQXpvdcFx+Fl57WuUraXRV6qfb +4ZmKHX1JEwM/7tu21QE4F1dz0jroLSricZxfaCTHHWNfvGJoZ30/MZUrpSC0IfB3 +iQutxbZrwIlTBt+fGLtm3vDtwMFNWM+Rb1lrOxEQd2eijdxhvBOHtlIcswARAQAB +tERIYXNoaUNvcnAgU2VjdXJpdHkgKGhhc2hpY29ycC5jb20vc2VjdXJpdHkpIDxz +ZWN1cml0eUBoYXNoaWNvcnAuY29tPokCVAQTAQoAPhYhBMh0AR8KtAURDQIQVTQ2 +XZRy10aPBQJgffsZAhsDBQkJZgGABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJ +EDQ2XZRy10aPtpcP/0PhJKiHtC1zREpRTrjGizoyk4Sl2SXpBZYhkdrG++abo6zs +buaAG7kgWWChVXBo5E20L7dbstFK7OjVs7vAg/OLgO9dPD8n2M19rpqSbbvKYWvp +0NSgvFTT7lbyDhtPj0/bzpkZEhmvQaDWGBsbDdb2dBHGitCXhGMpdP0BuuPWEix+ +QnUMaPwU51q9GM2guL45Tgks9EKNnpDR6ZdCeWcqo1IDmklloidxT8aKL21UOb8t +cD+Bg8iPaAr73bW7Jh8TdcV6s6DBFub+xPJEB/0bVPmq3ZHs5B4NItroZ3r+h3ke +VDoSOSIZLl6JtVooOJ2la9ZuMqxchO3mrXLlXxVCo6cGcSuOmOdQSz4OhQE5zBxx +LuzA5ASIjASSeNZaRnffLIHmht17BPslgNPtm6ufyOk02P5XXwa69UCjA3RYrA2P +QNNC+OWZ8qQLnzGldqE4MnRNAxRxV6cFNzv14ooKf7+k686LdZrP/3fQu2p3k5rY +0xQUXKh1uwMUMtGR867ZBYaxYvwqDrg9XB7xi3N6aNyNQ+r7zI2lt65lzwG1v9hg +FG2AHrDlBkQi/t3wiTS3JOo/GCT8BjN0nJh0lGaRFtQv2cXOQGVRW8+V/9IpqEJ1 +qQreftdBFWxvH7VJq2mSOXUJyRsoUrjkUuIivaA9Ocdipk2CkP8bpuGz7ZF4uQIN +BGB9+xkBEACoklYsfvWRCjOwS8TOKBTfl8myuP9V9uBNbyHufzNETbhYeT33Cj0M +GCNd9GdoaknzBQLbQVSQogA+spqVvQPz1MND18GIdtmr0BXENiZE7SRvu76jNqLp +KxYALoK2Pc3yK0JGD30HcIIgx+lOofrVPA2dfVPTj1wXvm0rbSGA4Wd4Ng3d2AoR +G/wZDAQ7sdZi1A9hhfugTFZwfqR3XAYCk+PUeoFrkJ0O7wngaon+6x2GJVedVPOs +2x/XOR4l9ytFP3o+5ILhVnsK+ESVD9AQz2fhDEU6RhvzaqtHe+sQccR3oVLoGcat +ma5rbfzH0Fhj0JtkbP7WreQf9udYgXxVJKXLQFQgel34egEGG+NlbGSPG+qHOZtY +4uWdlDSvmo+1P95P4VG/EBteqyBbDDGDGiMs6lAMg2cULrwOsbxWjsWka8y2IN3z +1stlIJFvW2kggU+bKnQ+sNQnclq3wzCJjeDBfucR3a5WRojDtGoJP6Fc3luUtS7V +5TAdOx4dhaMFU9+01OoH8ZdTRiHZ1K7RFeAIslSyd4iA/xkhOhHq89F4ECQf3Bt4 +ZhGsXDTaA/VgHmf3AULbrC94O7HNqOvTWzwGiWHLfcxXQsr+ijIEQvh6rHKmJK8R +9NMHqc3L18eMO6bqrzEHW0Xoiu9W8Yj+WuB3IKdhclT3w0pO4Pj8gQARAQABiQI8 +BBgBCgAmFiEEyHQBHwq0BRENAhBVNDZdlHLXRo8FAmB9+xkCGwwFCQlmAYAACgkQ +NDZdlHLXRo9ZnA/7BmdpQLeTjEiXEJyW46efxlV1f6THn9U50GWcE9tebxCXgmQf +u+Uju4hreltx6GDi/zbVVV3HCa0yaJ4JVvA4LBULJVe3ym6tXXSYaOfMdkiK6P1v +JgfpBQ/b/mWB0yuWTUtWx18BQQwlNEQWcGe8n1lBbYsH9g7QkacRNb8tKUrUbWlQ +QsU8wuFgly22m+Va1nO2N5C/eE/ZEHyN15jEQ+QwgQgPrK2wThcOMyNMQX/VNEr1 +Y3bI2wHfZFjotmek3d7ZfP2VjyDudnmCPQ5xjezWpKbN1kvjO3as2yhcVKfnvQI5 +P5Frj19NgMIGAp7X6pF5Csr4FX/Vw316+AFJd9Ibhfud79HAylvFydpcYbvZpScl +7zgtgaXMCVtthe3GsG4gO7IdxxEBZ/Fm4NLnmbzCIWOsPMx/FxH06a539xFq/1E2 +1nYFjiKg8a5JFmYU/4mV9MQs4bP/3ip9byi10V+fEIfp5cEEmfNeVeW5E7J8PqG9 +t4rLJ8FR4yJgQUa2gs2SNYsjWQuwS/MJvAv4fDKlkQjQmYRAOp1SszAnyaplvri4 +ncmfDsf0r65/sd6S40g5lHH8LIbGxcOIN6kwthSTPWX89r42CbY8GzjTkaeejNKx +v1aCrO58wAtursO1DiXCvBY7+NdafMRnoHwBk50iPqrVkNA8fv+auRyB2/G5Ag0E +YH3+JQEQALivllTjMolxUW2OxrXb+a2Pt6vjCBsiJzrUj0Pa63U+lT9jldbCCfgP +wDpcDuO1O05Q8k1MoYZ6HddjWnqKG7S3eqkV5c3ct3amAXp513QDKZUfIDylOmhU +qvxjEgvGjdRjz6kECFGYr6Vnj/p6AwWv4/FBRFlrq7cnQgPynbIH4hrWvewp3Tqw +GVgqm5RRofuAugi8iZQVlAiQZJo88yaztAQ/7VsXBiHTn61ugQ8bKdAsr8w/ZZU5 +HScHLqRolcYg0cKN91c0EbJq9k1LUC//CakPB9mhi5+aUVUGusIM8ECShUEgSTCi +KQiJUPZ2CFbbPE9L5o9xoPCxjXoX+r7L/WyoCPTeoS3YRUMEnWKvc42Yxz3meRb+ +BmaqgbheNmzOah5nMwPupJYmHrjWPkX7oyyHxLSFw4dtoP2j6Z7GdRXKa2dUYdk2 +x3JYKocrDoPHh3Q0TAZujtpdjFi1BS8pbxYFb3hHmGSdvz7T7KcqP7ChC7k2RAKO +GiG7QQe4NX3sSMgweYpl4OwvQOn73t5CVWYp/gIBNZGsU3Pto8g27vHeWyH9mKr4 +cSepDhw+/X8FGRNdxNfpLKm7Vc0Sm9Sof8TRFrBTqX+vIQupYHRi5QQCuYaV6OVr +ITeegNK3So4m39d6ajCR9QxRbmjnx9UcnSYYDmIB6fpBuwT0ogNtABEBAAGJBHIE +GAEKACYCGwIWIQTIdAEfCrQFEQ0CEFU0Nl2UctdGjwUCYH4bgAUJAeFQ2wJAwXQg +BBkBCgAdFiEEs2y6kaLAcwxDX8KAsLRBCXaFtnYFAmB9/iUACgkQsLRBCXaFtnYX +BhAAlxejyFXoQwyGo9U+2g9N6LUb/tNtH29RHYxy4A3/ZUY7d/FMkArmh4+dfjf0 +p9MJz98Zkps20kaYP+2YzYmaizO6OA6RIddcEXQDRCPHmLts3097mJ/skx9qLAf6 +rh9J7jWeSqWO6VW6Mlx8j9m7sm3Ae1OsjOx/m7lGZOhY4UYfY627+Jf7WQ5103Qs +lgQ09es/vhTCx0g34SYEmMW15Tc3eCjQ21b1MeJD/V26npeakV8iCZ1kHZHawPq/ +aCCuYEcCeQOOteTWvl7HXaHMhHIx7jjOd8XX9V+UxsGz2WCIxX/j7EEEc7CAxwAN +nWp9jXeLfxYfjrUB7XQZsGCd4EHHzUyCf7iRJL7OJ3tz5Z+rOlNjSgci+ycHEccL +YeFAEV+Fz+sj7q4cFAferkr7imY1XEI0Ji5P8p/uRYw/n8uUf7LrLw5TzHmZsTSC +UaiL4llRzkDC6cVhYfqQWUXDd/r385OkE4oalNNE+n+txNRx92rpvXWZ5qFYfv7E +95fltvpXc0iOugPMzyof3lwo3Xi4WZKc1CC/jEviKTQhfn3WZukuF5lbz3V1PQfI +xFsYe9WYQmp25XGgezjXzp89C/OIcYsVB1KJAKihgbYdHyUN4fRCmOszmOUwEAKR +3k5j4X8V5bk08sA69NVXPn2ofxyk3YYOMYWW8ouObnXoS8QJEDQ2XZRy10aPMpsQ +AIbwX21erVqUDMPn1uONP6o4NBEq4MwG7d+fT85rc1U0RfeKBwjucAE/iStZDQoM +ZKWvGhFR+uoyg1LrXNKuSPB82unh2bpvj4zEnJsJadiwtShTKDsikhrfFEK3aCK8 +Zuhpiu3jxMFDhpFzlxsSwaCcGJqcdwGhWUx0ZAVD2X71UCFoOXPjF9fNnpy80YNp +flPjj2RnOZbJyBIM0sWIVMd8F44qkTASf8K5Qb47WFN5tSpePq7OCm7s8u+lYZGK +wR18K7VliundR+5a8XAOyUXOL5UsDaQCK4Lj4lRaeFXunXl3DJ4E+7BKzZhReJL6 +EugV5eaGonA52TWtFdB8p+79wPUeI3KcdPmQ9Ll5Zi/jBemY4bzasmgKzNeMtwWP +fk6WgrvBwptqohw71HDymGxFUnUP7XYYjic2sVKhv9AevMGycVgwWBiWroDCQ9Ja +btKfxHhI2p+g+rcywmBobWJbZsujTNjhtme+kNn1mhJsD3bKPjKQfAxaTskBLb0V +wgV21891TS1Dq9kdPLwoS4XNpYg2LLB4p9hmeG3fu9+OmqwY5oKXsHiWc43dei9Y +yxZ1AAUOIaIdPkq+YG/PhlGE4YcQZ4RPpltAr0HfGgZhmXWigbGS+66pUj+Ojysc +j0K5tCVxVu0fhhFpOlHv0LWaxCbnkgkQH9jfMEJkAWMOuQINBGCAXCYBEADW6RNr +ZVGNXvHVBqSiOWaxl1XOiEoiHPt50Aijt25yXbG+0kHIFSoR+1g6Lh20JTCChgfQ +kGGjzQvEuG1HTw07YhsvLc0pkjNMfu6gJqFox/ogc53mz69OxXauzUQ/TZ27GDVp +UBu+EhDKt1s3OtA6Bjz/csop/Um7gT0+ivHyvJ/jGdnPEZv8tNuSE/Uo+hn/Q9hg +8SbveZzo3C+U4KcabCESEFl8Gq6aRi9vAfa65oxD5jKaIz7cy+pwb0lizqlW7H9t +Qlr3dBfdIcdzgR55hTFC5/XrcwJ6/nHVH/xGskEasnfCQX8RYKMuy0UADJy72TkZ +bYaCx+XXIcVB8GTOmJVoAhrTSSVLAZspfCnjwnSxisDn3ZzsYrq3cV6sU8b+QlIX +7VAjurE+5cZiVlaxgCjyhKqlGgmonnReWOBacCgL/UvuwMmMp5TTLmiLXLT7uxeG +ojEyoCk4sMrqrU1jevHyGlDJH9Taux15GILDwnYFfAvPF9WCid4UZ4Ouwjcaxfys +3LxNiZIlUsXNKwS3mhiMRL4TRsbs4k4QE+LIMOsauIvcvm8/frydvQ/kUwIhVTH8 +0XGOH909bYtJvY3fudK7ShIwm7ZFTduBJUG473E/Fn3VkhTmBX6+PjOC50HR/Hyb +waRCzfDruMe3TAcE/tSP5CUOb9C7+P+hPzQcDwARAQABiQRyBBgBCgAmFiEEyHQB +Hwq0BRENAhBVNDZdlHLXRo8FAmCAXCYCGwIFCQlmAYACQAkQNDZdlHLXRo/BdCAE +GQEKAB0WIQQ3TsdbSFkTYEqDHMfIIMbVzSerhwUCYIBcJgAKCRDIIMbVzSerh0Xw +D/9ghnUsoNCu1OulcoJdHboMazJvDt/znttdQSnULBVElgM5zk0Uyv87zFBzuCyQ +JWL3bWesQ2uFx5fRWEPDEfWVdDrjpQGb1OCCQyz1QlNPV/1M1/xhKGS9EeXrL8Dw +F6KTGkRwn1yXiP4BGgfeFIQHmJcKXEZ9HkrpNb8mcexkROv4aIPAwn+IaE+NHVtt +IBnufMXLyfpkWJQtJa9elh9PMLlHHnuvnYLvuAoOkhuvs7fXDMpfFZ01C+QSv1dz +Hm52GSStERQzZ51w4c0rYDneYDniC/sQT1x3dP5Xf6wzO+EhRMabkvoTbMqPsTEP +xyWr2pNtTBYp7pfQjsHxhJpQF0xjGN9C39z7f3gJG8IJhnPeulUqEZjhRFyVZQ6/ +siUeq7vu4+dM/JQL+i7KKe7Lp9UMrG6NLMH+ltaoD3+lVm8fdTUxS5MNPoA/I8cK +1OWTJHkrp7V/XaY7mUtvQn5V1yET5b4bogz4nME6WLiFMd+7x73gB+YJ6MGYNuO8 +e/NFK67MfHbk1/AiPTAJ6s5uHRQIkZcBPG7y5PpfcHpIlwPYCDGYlTajZXblyKrw +BttVnYKvKsnlysv11glSg0DphGxQJbXzWpvBNyhMNH5dffcfvd3eXJAxnD81GD2z +ZAriMJ4Av2TfeqQ2nxd2ddn0jX4WVHtAvLXfCgLM2Gveho4jD/9sZ6PZz/rEeTvt +h88t50qPcBa4bb25X0B5FO3TeK2LL3VKLuEp5lgdcHVonrcdqZFobN1CgGJua8TW +SprIkh+8ATZ/FXQTi01NzLhHXT1IQzSpFaZw0gb2f5ruXwvTPpfXzQrs2omY+7s7 +fkCwGPesvpSXPKn9v8uhUwD7NGW/Dm+jUM+QtC/FqzX7+/Q+OuEPjClUh1cqopCZ +EvAI3HjnavGrYuU6DgQdjyGT/UDbuwbCXqHxHojVVkISGzCTGpmBcQYQqhcFRedJ +yJlu6PSXlA7+8Ajh52oiMJ3ez4xSssFgUQAyOB16432tm4erpGmCyakkoRmMUn3p +wx+QIppxRlsHznhcCQKR3tcblUqH3vq5i4/ZAihusMCa0YrShtxfdSb13oKX+pFr +aZXvxyZlCa5qoQQBV1sowmPL1N2j3dR9TVpdTyCFQSv4KeiExmowtLIjeCppRBEK +eeYHJnlfkyKXPhxTVVO6H+dU4nVu0ASQZ07KiQjbI+zTpPKFLPp3/0sPRJM57r1+ +aTS71iR7nZNZ1f8LZV2OvGE6fJVtgJ1J4Nu02K54uuIhU3tg1+7Xt+IqwRc9rbVr +pHH/hFCYBPW2D2dxB+k2pQlg5NI+TpsXj5Zun8kRw5RtVb+dLuiH/xmxArIee8Jq +ZF5q4h4I33PSGDdSvGXn9UMY5Isjpg== +=7pIB +-----END PGP PUBLIC KEY BLOCK-----` +) diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go new file mode 100644 index 0000000000..843de8cdfa --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releasesjson + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "log" + "net/http" + "net/url" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/hashicorp/hc-install/internal/httpclient" +) + +type ChecksumDownloader struct { + ProductVersion *ProductVersion + Logger *log.Logger + ArmoredPublicKey string + + BaseURL string +} + +type ChecksumFileMap map[string]HashSum + +type HashSum []byte + +func (hs HashSum) Size() int { + return len(hs) +} + +func (hs HashSum) String() string { + return hex.EncodeToString(hs) +} + +func HashSumFromHexDigest(hexDigest string) (HashSum, error) { + sumBytes, err := hex.DecodeString(hexDigest) + if err != nil { + return nil, err + } + return HashSum(sumBytes), nil +} + +func (cd *ChecksumDownloader) DownloadAndVerifyChecksums(ctx context.Context) (ChecksumFileMap, error) { + sigFilename, err := cd.findSigFilename(cd.ProductVersion) + if err != nil { + return nil, err + } + + client := httpclient.NewHTTPClient() + sigURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL, + url.PathEscape(cd.ProductVersion.Name), + url.PathEscape(cd.ProductVersion.RawVersion), + url.PathEscape(sigFilename)) + cd.Logger.Printf("downloading signature from %s", sigURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", sigURL, err) + } + sigResp, err := client.Do(req) + if err != nil { + return nil, err + } + + if sigResp.StatusCode != 200 { + return nil, fmt.Errorf("failed to download signature from %q: %s", sigURL, sigResp.Status) + } + + defer sigResp.Body.Close() + + shasumsURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL, + url.PathEscape(cd.ProductVersion.Name), + url.PathEscape(cd.ProductVersion.RawVersion), + url.PathEscape(cd.ProductVersion.SHASUMS)) + cd.Logger.Printf("downloading checksums from %s", shasumsURL) + + req, err = http.NewRequestWithContext(ctx, http.MethodGet, shasumsURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", shasumsURL, err) + } + sumsResp, err := client.Do(req) + if err != nil { + return nil, err + } + + if sumsResp.StatusCode != 200 { + return nil, fmt.Errorf("failed to download checksums from %q: %s", shasumsURL, sumsResp.Status) + } + + defer sumsResp.Body.Close() + + var shaSums strings.Builder + sumsReader := io.TeeReader(sumsResp.Body, &shaSums) + + err = cd.verifySumsSignature(sumsReader, sigResp.Body) + if err != nil { + return nil, err + } + + return fileMapFromChecksums(shaSums) +} + +func fileMapFromChecksums(checksums strings.Builder) (ChecksumFileMap, error) { + csMap := make(ChecksumFileMap, 0) + + lines := strings.Split(checksums.String(), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + parts := strings.Fields(line) + if len(parts) != 2 { + return nil, fmt.Errorf("unexpected checksum line format: %q", line) + } + + h, err := HashSumFromHexDigest(parts[0]) + if err != nil { + return nil, err + } + + if h.Size() != sha256.Size { + return nil, fmt.Errorf("unexpected sha256 format (len: %d, expected: %d)", + h.Size(), sha256.Size) + } + + csMap[parts[1]] = h + } + return csMap, nil +} + +func (cd *ChecksumDownloader) verifySumsSignature(checksums, signature io.Reader) error { + el, err := cd.keyEntityList() + if err != nil { + return err + } + + _, err = openpgp.CheckDetachedSignature(el, checksums, signature, nil) + if err != nil { + return fmt.Errorf("unable to verify checksums signature: %w", err) + } + + cd.Logger.Printf("checksum signature is valid") + + return nil +} + +func (cd *ChecksumDownloader) findSigFilename(pv *ProductVersion) (string, error) { + sigFiles := pv.SHASUMSSigs + if len(sigFiles) == 0 { + sigFiles = []string{pv.SHASUMSSig} + } + + keyIds, err := cd.pubKeyIds() + if err != nil { + return "", err + } + + for _, filename := range sigFiles { + for _, keyID := range keyIds { + if strings.HasSuffix(filename, fmt.Sprintf("_SHA256SUMS.%s.sig", keyID)) { + return filename, nil + } + } + if strings.HasSuffix(filename, "_SHA256SUMS.sig") { + return filename, nil + } + } + + return "", fmt.Errorf("no suitable sig file found") +} + +func (cd *ChecksumDownloader) pubKeyIds() ([]string, error) { + entityList, err := cd.keyEntityList() + if err != nil { + return nil, err + } + + fingerprints := make([]string, 0) + for _, entity := range entityList { + fingerprints = append(fingerprints, entity.PrimaryKey.KeyIdShortString()) + } + + return fingerprints, nil +} + +func (cd *ChecksumDownloader) keyEntityList() (openpgp.EntityList, error) { + if cd.ArmoredPublicKey == "" { + return nil, fmt.Errorf("no public key provided") + } + return openpgp.ReadArmoredKeyRing(strings.NewReader(cd.ArmoredPublicKey)) +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go new file mode 100644 index 0000000000..146c1cf029 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go @@ -0,0 +1,224 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releasesjson + +import ( + "archive/zip" + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/hc-install/internal/httpclient" +) + +type Downloader struct { + Logger *log.Logger + VerifyChecksum bool + ArmoredPublicKey string + BaseURL string +} + +func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, binDir string, licenseDir string) (zipFilePath string, err error) { + if len(pv.Builds) == 0 { + return "", fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version) + } + + pb, ok := pv.Builds.FilterBuild(runtime.GOOS, runtime.GOARCH, "zip") + if !ok { + return "", fmt.Errorf("no ZIP archive found for %s %s %s/%s", + pv.Name, pv.Version, runtime.GOOS, runtime.GOARCH) + } + + var verifiedChecksum HashSum + if d.VerifyChecksum { + v := &ChecksumDownloader{ + BaseURL: d.BaseURL, + ProductVersion: pv, + Logger: d.Logger, + ArmoredPublicKey: d.ArmoredPublicKey, + } + verifiedChecksums, err := v.DownloadAndVerifyChecksums(ctx) + if err != nil { + return "", err + } + var ok bool + verifiedChecksum, ok = verifiedChecksums[pb.Filename] + if !ok { + return "", fmt.Errorf("no checksum found for %q", pb.Filename) + } + } + + client := httpclient.NewHTTPClient() + + archiveURL := pb.URL + if d.BaseURL != "" { + // ensure that absolute download links from mocked responses + // are still pointing to the mock server if one is set + baseURL, err := url.Parse(d.BaseURL) + if err != nil { + return "", err + } + + u, err := url.Parse(archiveURL) + if err != nil { + return "", err + } + u.Scheme = baseURL.Scheme + u.Host = baseURL.Host + archiveURL = u.String() + } + + d.Logger.Printf("downloading archive from %s", archiveURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, archiveURL, nil) + if err != nil { + return "", fmt.Errorf("failed to create request for %q: %w", archiveURL, err) + } + resp, err := client.Do(req) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status) + } + + defer resp.Body.Close() + + pkgReader := resp.Body + + contentType := resp.Header.Get("content-type") + if !contentTypeIsZip(contentType) { + return "", fmt.Errorf("unexpected content-type: %s (expected any of %q)", + contentType, zipMimeTypes) + } + + expectedSize := resp.ContentLength + + pkgFile, err := ioutil.TempFile("", pb.Filename) + if err != nil { + return "", err + } + defer pkgFile.Close() + pkgFilePath, err := filepath.Abs(pkgFile.Name()) + + d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name()) + + var bytesCopied int64 + if d.VerifyChecksum { + d.Logger.Printf("verifying checksum of %q", pb.Filename) + h := sha256.New() + r := io.TeeReader(resp.Body, pkgFile) + + bytesCopied, err = io.Copy(h, r) + if err != nil { + return "", err + } + + calculatedSum := h.Sum(nil) + if !bytes.Equal(calculatedSum, verifiedChecksum) { + return pkgFilePath, fmt.Errorf( + "checksum mismatch (expected: %x, got: %x)", + verifiedChecksum, calculatedSum, + ) + } + } else { + bytesCopied, err = io.Copy(pkgFile, pkgReader) + if err != nil { + return pkgFilePath, err + } + } + + d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name()) + + if expectedSize != 0 && bytesCopied != int64(expectedSize) { + return pkgFilePath, fmt.Errorf( + "unexpected size (downloaded: %d, expected: %d)", + bytesCopied, expectedSize, + ) + } + + r, err := zip.OpenReader(pkgFile.Name()) + if err != nil { + return pkgFilePath, err + } + defer r.Close() + + for _, f := range r.File { + if strings.Contains(f.Name, "..") { + // While we generally trust the source ZIP file + // we still reject path traversal attempts as a precaution. + continue + } + srcFile, err := f.Open() + if err != nil { + return pkgFilePath, err + } + + // Determine the appropriate destination file path + dstDir := binDir + if isLicenseFile(f.Name) && licenseDir != "" { + dstDir = licenseDir + } + + d.Logger.Printf("unpacking %s to %s", f.Name, dstDir) + dstPath := filepath.Join(dstDir, f.Name) + dstFile, err := os.Create(dstPath) + if err != nil { + return pkgFilePath, err + } + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + return pkgFilePath, err + } + srcFile.Close() + dstFile.Close() + } + + return pkgFilePath, nil +} + +// The production release site uses consistent single mime type +// but mime types are platform-dependent +// and we may use different OS under test +var zipMimeTypes = []string{ + "application/x-zip-compressed", // Windows + "application/zip", // Unix +} + +func contentTypeIsZip(contentType string) bool { + for _, mt := range zipMimeTypes { + if mt == contentType { + return true + } + } + return false +} + +// Enterprise products have a few additional license files +// that need to be extracted to a separate directory +var licenseFiles = []string{ + "EULA.txt", + "TermsOfEvaluation.txt", +} + +func isLicenseFile(filename string) bool { + for _, lf := range licenseFiles { + if lf == filename { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go new file mode 100644 index 0000000000..99b811a645 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releasesjson + +import "github.com/hashicorp/go-version" + +// ProductVersion is a wrapper around a particular product version like +// "consul 0.5.1". A ProductVersion may have one or more builds. +type ProductVersion struct { + Name string `json:"name"` + RawVersion string `json:"version"` + Version *version.Version `json:"-"` + SHASUMS string `json:"shasums,omitempty"` + SHASUMSSig string `json:"shasums_signature,omitempty"` + SHASUMSSigs []string `json:"shasums_signatures,omitempty"` + Builds ProductBuilds `json:"builds"` +} + +type ProductVersionsMap map[string]*ProductVersion + +type ProductVersions []*ProductVersion + +func (pv ProductVersions) Len() int { + return len(pv) +} + +func (pv ProductVersions) Less(i, j int) bool { + return pv[i].Version.LessThan(pv[j].Version) +} + +func (pv ProductVersions) Swap(i, j int) { + pv[i], pv[j] = pv[j], pv[i] +} + +func (pvm ProductVersionsMap) AsSlice() ProductVersions { + versions := make(ProductVersions, 0) + + for _, pVersion := range pvm { + versions = append(versions, pVersion) + } + + return versions +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go new file mode 100644 index 0000000000..755019f2f2 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releasesjson + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/httpclient" +) + +const defaultBaseURL = "https://releases.hashicorp.com" + +// Product is a top-level product like "Consul" or "Nomad". A Product may have +// one or more versions. +type Product struct { + Name string `json:"name"` + Versions ProductVersionsMap `json:"versions"` +} + +type ProductBuilds []*ProductBuild + +func (pbs ProductBuilds) FilterBuild(os string, arch string, suffix string) (*ProductBuild, bool) { + for _, pb := range pbs { + if pb.OS == os && pb.Arch == arch && strings.HasSuffix(pb.Filename, suffix) { + return pb, true + } + } + return nil, false +} + +// ProductBuild is an OS/arch-specific representation of a product. This is the +// actual file that a user would download, like "consul_0.5.1_linux_amd64". +type ProductBuild struct { + Name string `json:"name"` + Version string `json:"version"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + URL string `json:"url"` +} + +type Releases struct { + logger *log.Logger + BaseURL string +} + +func NewReleases() *Releases { + return &Releases{ + logger: log.New(ioutil.Discard, "", 0), + BaseURL: defaultBaseURL, + } +} + +func (r *Releases) SetLogger(logger *log.Logger) { + r.logger = logger +} + +func (r *Releases) ListProductVersions(ctx context.Context, productName string) (ProductVersionsMap, error) { + client := httpclient.NewHTTPClient() + + productIndexURL := fmt.Sprintf("%s/%s/index.json", + r.BaseURL, + url.PathEscape(productName)) + r.logger.Printf("requesting versions from %s", productIndexURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, productIndexURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", productIndexURL, err) + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("failed to obtain product versions from %q: %s ", + productIndexURL, resp.Status) + } + + contentType := resp.Header.Get("content-type") + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { + return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) + } + + defer resp.Body.Close() + + r.logger.Printf("received %s", resp.Status) + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + p := Product{} + err = json.Unmarshal(body, &p) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal: %q", + err, string(body)) + } + + for rawVersion := range p.Versions { + v, err := version.NewVersion(rawVersion) + if err != nil { + // remove unparseable version + delete(p.Versions, rawVersion) + continue + } + + p.Versions[rawVersion].Version = v + } + + return p.Versions, nil +} + +func (r *Releases) GetProductVersion(ctx context.Context, product string, version *version.Version) (*ProductVersion, error) { + client := httpclient.NewHTTPClient() + + indexURL := fmt.Sprintf("%s/%s/%s/index.json", + r.BaseURL, + url.PathEscape(product), + url.PathEscape(version.String())) + r.logger.Printf("requesting version from %s", indexURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, indexURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", indexURL, err) + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("failed to obtain product version from %q: %s ", + indexURL, resp.Status) + } + + contentType := resp.Header.Get("content-type") + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { + return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) + } + + defer resp.Body.Close() + + r.logger.Printf("received %s", resp.Status) + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + pv := &ProductVersion{} + err = json.Unmarshal(body, pv) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal response: %q", + err, string(body)) + } + + return pv, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/src/src.go b/vendor/github.com/hashicorp/hc-install/internal/src/src.go new file mode 100644 index 0000000000..9cac8a64e3 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/src/src.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package src + +type InstallSrcSigil struct{} diff --git a/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go b/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go new file mode 100644 index 0000000000..8a331c4cf7 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validators + +import "regexp" + +var ( + productNameRe = regexp.MustCompile(`^[a-z0-9-]+$`) + binaryNameRe = regexp.MustCompile(`^[a-zA-Z0-9-_.]+$`) +) + +// IsProductNameValid provides early user-facing validation of a product name +func IsProductNameValid(productName string) bool { + return productNameRe.MatchString(productName) +} + +// IsBinaryNameValid provides early user-facing validation of binary name +func IsBinaryNameValid(binaryName string) bool { + return binaryNameRe.MatchString(binaryName) +} diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go new file mode 100644 index 0000000000..9789d7c318 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/consul.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var consulVersionOutputRe = regexp.MustCompile(`Consul ` + simpleVersionRe) + +var ( + v1_18 = version.Must(version.NewVersion("1.18")) +) + +var Consul = Product{ + Name: "consul", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "consul.exe" + } + return "consul" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := consulVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/consul.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{}, + }, +} diff --git a/vendor/github.com/hashicorp/hc-install/product/nomad.go b/vendor/github.com/hashicorp/hc-install/product/nomad.go new file mode 100644 index 0000000000..b675d9a19a --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/nomad.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var nomadVersionOutputRe = regexp.MustCompile(`Nomad ` + simpleVersionRe) + +var Nomad = Product{ + Name: "nomad", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "nomad.exe" + } + return "nomad" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := nomadVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/nomad.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{DetectVendoring: true}, + }, +} diff --git a/vendor/github.com/hashicorp/hc-install/product/product.go b/vendor/github.com/hashicorp/hc-install/product/product.go new file mode 100644 index 0000000000..85f2e11bf2 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/product.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package product + +import ( + "context" + "time" + + "github.com/hashicorp/go-version" +) + +type Product struct { + // Name which identifies the product + // on releases.hashicorp.com and in Checkpoint + Name string + + // BinaryName represents name of the unpacked binary to be executed or built + BinaryName BinaryNameFunc + + // GetVersion represents how to obtain the version of the product + // reflecting any output or CLI flag differences + GetVersion func(ctx context.Context, execPath string) (*version.Version, error) + + // BuildInstructions represents how to build the product "from scratch" + BuildInstructions *BuildInstructions +} + +type BinaryNameFunc func() string + +type BuildInstructions struct { + GitRepoURL string + + // CloneTimeout overrides default timeout + // for cloning the repository + CloneTimeout time.Duration + + // PreCloneCheck represents any checks to run + // prior to building, such as verifying build + // dependencies (e.g. whether Go is installed) + PreCloneCheck Checker + + // PreCloneCheckTimeout overrides default timeout + // for the PreCloneCheck + PreCloneCheckTimeout time.Duration + + // Build represents how to build the product + // after checking out the source code + Build Builder + + // BuildTimeout overrides default timeout + // for the Builder + BuildTimeout time.Duration +} + +type Checker interface { + Check(ctx context.Context) error +} + +type Builder interface { + Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error) + Remove(ctx context.Context) error +} diff --git a/vendor/github.com/hashicorp/hc-install/product/terraform.go b/vendor/github.com/hashicorp/hc-install/product/terraform.go new file mode 100644 index 0000000000..afb6b35fb3 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/terraform.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var ( + simpleVersionRe = `v?(?P[0-9]+(?:\.[0-9]+)*(?:-[A-Za-z0-9\.]+)?)` + + terraformVersionOutputRe = regexp.MustCompile(`Terraform ` + simpleVersionRe) +) + +var Terraform = Product{ + Name: "terraform", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "terraform.exe" + } + return "terraform" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := terraformVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/terraform.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{DetectVendoring: true}, + }, +} diff --git a/vendor/github.com/hashicorp/hc-install/product/vault.go b/vendor/github.com/hashicorp/hc-install/product/vault.go new file mode 100644 index 0000000000..0b25965963 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/vault.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var ( + vaultVersionOutputRe = regexp.MustCompile(`Vault ` + simpleVersionRe) +) + +var Vault = Product{ + Name: "vault", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "vault.exe" + } + return "vault" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := vaultVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/vault.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{}, + }, +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/enterprise.go b/vendor/github.com/hashicorp/hc-install/releases/enterprise.go new file mode 100644 index 0000000000..179d40d1cd --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/enterprise.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releases + +import "fmt" + +type EnterpriseOptions struct { + // LicenseDir represents directory path where to install license files (required) + LicenseDir string + + // Meta represents optional version metadata (e.g. hsm, fips1402) + Meta string +} + +func enterpriseVersionMetadata(eo *EnterpriseOptions) string { + if eo == nil { + return "" + } + + metadata := "ent" + if eo.Meta != "" { + metadata += "." + eo.Meta + } + return metadata +} + +func validateEnterpriseOptions(eo *EnterpriseOptions) error { + if eo == nil { + return nil + } + + if eo.LicenseDir == "" { + return fmt.Errorf("LicenseDir must be provided when requesting enterprise versions") + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go new file mode 100644 index 0000000000..e42f4d239f --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go @@ -0,0 +1,186 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releases + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/pubkey" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + isrc "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// ExactVersion installs the given Version of product +// to OS temp directory, or to InstallDir (if not empty) +type ExactVersion struct { + Product product.Product + Version *version.Version + InstallDir string + Timeout time.Duration + + // Enterprise indicates installation of enterprise version (leave nil for Community editions) + Enterprise *EnterpriseOptions + + SkipChecksumVerification bool + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + ArmoredPublicKey string + + apiBaseURL string + logger *log.Logger + pathsToRemove []string +} + +func (*ExactVersion) IsSourceImpl() isrc.InstallSrcSigil { + return isrc.InstallSrcSigil{} +} + +func (ev *ExactVersion) SetLogger(logger *log.Logger) { + ev.logger = logger +} + +func (ev *ExactVersion) log() *log.Logger { + if ev.logger == nil { + return discardLogger + } + return ev.logger +} + +func (ev *ExactVersion) Validate() error { + if !validators.IsProductNameValid(ev.Product.Name) { + return fmt.Errorf("invalid product name: %q", ev.Product.Name) + } + + if !validators.IsBinaryNameValid(ev.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", ev.Product.BinaryName()) + } + + if ev.Version == nil { + return fmt.Errorf("unknown version") + } + + if err := validateEnterpriseOptions(ev.Enterprise); err != nil { + return err + } + + return nil +} + +func (ev *ExactVersion) Install(ctx context.Context) (string, error) { + timeout := defaultInstallTimeout + if ev.Timeout > 0 { + timeout = ev.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + if ev.pathsToRemove == nil { + ev.pathsToRemove = make([]string, 0) + } + + dstDir := ev.InstallDir + if dstDir == "" { + var err error + dirName := fmt.Sprintf("%s_*", ev.Product.Name) + dstDir, err = ioutil.TempDir("", dirName) + if err != nil { + return "", err + } + ev.pathsToRemove = append(ev.pathsToRemove, dstDir) + ev.log().Printf("created new temp dir at %s", dstDir) + } + ev.log().Printf("will install into dir at %s", dstDir) + + rels := rjson.NewReleases() + if ev.apiBaseURL != "" { + rels.BaseURL = ev.apiBaseURL + } + rels.SetLogger(ev.log()) + installVersion := ev.Version + if ev.Enterprise != nil { + installVersion = versionWithMetadata(installVersion, enterpriseVersionMetadata(ev.Enterprise)) + } + pv, err := rels.GetProductVersion(ctx, ev.Product.Name, installVersion) + if err != nil { + return "", err + } + + d := &rjson.Downloader{ + Logger: ev.log(), + VerifyChecksum: !ev.SkipChecksumVerification, + ArmoredPublicKey: pubkey.DefaultPublicKey, + BaseURL: rels.BaseURL, + } + if ev.ArmoredPublicKey != "" { + d.ArmoredPublicKey = ev.ArmoredPublicKey + } + if ev.apiBaseURL != "" { + d.BaseURL = ev.apiBaseURL + } + + licenseDir := "" + if ev.Enterprise != nil { + licenseDir = ev.Enterprise.LicenseDir + } + zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir, licenseDir) + if zipFilePath != "" { + ev.pathsToRemove = append(ev.pathsToRemove, zipFilePath) + } + if err != nil { + return "", err + } + + execPath := filepath.Join(dstDir, ev.Product.BinaryName()) + + ev.pathsToRemove = append(ev.pathsToRemove, execPath) + + ev.log().Printf("changing perms of %s", execPath) + err = os.Chmod(execPath, 0o700) + if err != nil { + return "", err + } + + return execPath, nil +} + +func (ev *ExactVersion) Remove(ctx context.Context) error { + if ev.pathsToRemove != nil { + for _, path := range ev.pathsToRemove { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } + + return nil +} + +// versionWithMetadata returns a new version by combining the given version with the given metadata +func versionWithMetadata(v *version.Version, metadata string) *version.Version { + if v == nil { + return nil + } + + if metadata == "" { + return v + } + + v2, err := version.NewVersion(fmt.Sprintf("%s+%s", v.Core(), metadata)) + if err != nil { + return nil + } + + return v2 +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go new file mode 100644 index 0000000000..9893b223ad --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go @@ -0,0 +1,195 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releases + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/pubkey" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + isrc "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +type LatestVersion struct { + Product product.Product + Constraints version.Constraints + InstallDir string + Timeout time.Duration + IncludePrereleases bool + + // Enterprise indicates installation of enterprise version (leave nil for Community editions) + Enterprise *EnterpriseOptions + + SkipChecksumVerification bool + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + ArmoredPublicKey string + + apiBaseURL string + logger *log.Logger + pathsToRemove []string +} + +func (*LatestVersion) IsSourceImpl() isrc.InstallSrcSigil { + return isrc.InstallSrcSigil{} +} + +func (lv *LatestVersion) SetLogger(logger *log.Logger) { + lv.logger = logger +} + +func (lv *LatestVersion) log() *log.Logger { + if lv.logger == nil { + return discardLogger + } + return lv.logger +} + +func (lv *LatestVersion) Validate() error { + if !validators.IsProductNameValid(lv.Product.Name) { + return fmt.Errorf("invalid product name: %q", lv.Product.Name) + } + + if !validators.IsBinaryNameValid(lv.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName()) + } + + if err := validateEnterpriseOptions(lv.Enterprise); err != nil { + return err + } + + return nil +} + +func (lv *LatestVersion) Install(ctx context.Context) (string, error) { + timeout := defaultInstallTimeout + if lv.Timeout > 0 { + timeout = lv.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + if lv.pathsToRemove == nil { + lv.pathsToRemove = make([]string, 0) + } + + dstDir := lv.InstallDir + if dstDir == "" { + var err error + dirName := fmt.Sprintf("%s_*", lv.Product.Name) + dstDir, err = ioutil.TempDir("", dirName) + if err != nil { + return "", err + } + lv.pathsToRemove = append(lv.pathsToRemove, dstDir) + lv.log().Printf("created new temp dir at %s", dstDir) + } + lv.log().Printf("will install into dir at %s", dstDir) + + rels := rjson.NewReleases() + if lv.apiBaseURL != "" { + rels.BaseURL = lv.apiBaseURL + } + rels.SetLogger(lv.log()) + versions, err := rels.ListProductVersions(ctx, lv.Product.Name) + if err != nil { + return "", err + } + + if len(versions) == 0 { + return "", fmt.Errorf("no versions found for %q", lv.Product.Name) + } + + versionToInstall, ok := lv.findLatestMatchingVersion(versions, lv.Constraints) + if !ok { + return "", fmt.Errorf("no matching version found for %q", lv.Constraints) + } + + d := &rjson.Downloader{ + Logger: lv.log(), + VerifyChecksum: !lv.SkipChecksumVerification, + ArmoredPublicKey: pubkey.DefaultPublicKey, + BaseURL: rels.BaseURL, + } + if lv.ArmoredPublicKey != "" { + d.ArmoredPublicKey = lv.ArmoredPublicKey + } + if lv.apiBaseURL != "" { + d.BaseURL = lv.apiBaseURL + } + licenseDir := "" + if lv.Enterprise != nil { + licenseDir = lv.Enterprise.LicenseDir + } + zipFilePath, err := d.DownloadAndUnpack(ctx, versionToInstall, dstDir, licenseDir) + if zipFilePath != "" { + lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath) + } + if err != nil { + return "", err + } + + execPath := filepath.Join(dstDir, lv.Product.BinaryName()) + + lv.pathsToRemove = append(lv.pathsToRemove, execPath) + + lv.log().Printf("changing perms of %s", execPath) + err = os.Chmod(execPath, 0o700) + if err != nil { + return "", err + } + + return execPath, nil +} + +func (lv *LatestVersion) Remove(ctx context.Context) error { + if lv.pathsToRemove != nil { + for _, path := range lv.pathsToRemove { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } + return nil +} + +func (lv *LatestVersion) findLatestMatchingVersion(pvs rjson.ProductVersionsMap, vc version.Constraints) (*rjson.ProductVersion, bool) { + expectedMetadata := enterpriseVersionMetadata(lv.Enterprise) + versions := make(version.Collection, 0) + for _, pv := range pvs.AsSlice() { + if !lv.IncludePrereleases && pv.Version.Prerelease() != "" { + // skip prereleases if desired + continue + } + + if pv.Version.Metadata() != expectedMetadata { + continue + } + + if vc.Check(pv.Version) { + versions = append(versions, pv.Version) + } + } + + if len(versions) == 0 { + return nil, false + } + + sort.Stable(versions) + latestVersion := versions[len(versions)-1] + + return pvs[latestVersion.Original()], true +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/releases.go b/vendor/github.com/hashicorp/hc-install/releases/releases.go new file mode 100644 index 0000000000..7bef49ba30 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/releases.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releases + +import ( + "io/ioutil" + "log" + "time" +) + +var ( + defaultInstallTimeout = 30 * time.Second + defaultListTimeout = 10 * time.Second + discardLogger = log.New(ioutil.Discard, "", 0) +) diff --git a/vendor/github.com/hashicorp/hc-install/releases/versions.go b/vendor/github.com/hashicorp/hc-install/releases/versions.go new file mode 100644 index 0000000000..49b1af78ca --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/versions.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package releases + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/hashicorp/go-version" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" + "github.com/hashicorp/hc-install/src" +) + +// Versions allows listing all versions of a product +// which match Constraints +type Versions struct { + Product product.Product + Constraints version.Constraints + Enterprise *EnterpriseOptions // require enterprise version if set (leave nil for OSS) + + ListTimeout time.Duration + + // Install represents configuration for installation of any listed version + Install InstallationOptions +} + +type InstallationOptions struct { + Timeout time.Duration + Dir string + + SkipChecksumVerification bool + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + // during installation + ArmoredPublicKey string +} + +func (v *Versions) List(ctx context.Context) ([]src.Source, error) { + if !validators.IsProductNameValid(v.Product.Name) { + return nil, fmt.Errorf("invalid product name: %q", v.Product.Name) + } + + if err := validateEnterpriseOptions(v.Enterprise); err != nil { + return nil, err + } + + timeout := defaultListTimeout + if v.ListTimeout > 0 { + timeout = v.ListTimeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + r := rjson.NewReleases() + pvs, err := r.ListProductVersions(ctx, v.Product.Name) + if err != nil { + return nil, err + } + + versions := pvs.AsSlice() + sort.Stable(versions) + + expectedMetadata := enterpriseVersionMetadata(v.Enterprise) + + installables := make([]src.Source, 0) + for _, pv := range versions { + if !v.Constraints.Check(pv.Version) { + // skip version which doesn't match constraint + continue + } + + if pv.Version.Metadata() != expectedMetadata { + // skip version which doesn't match required metadata for enterprise or OSS versions + continue + } + + ev := &ExactVersion{ + Product: v.Product, + Version: pv.Version, + InstallDir: v.Install.Dir, + Timeout: v.Install.Timeout, + + ArmoredPublicKey: v.Install.ArmoredPublicKey, + SkipChecksumVerification: v.Install.SkipChecksumVerification, + } + + if v.Enterprise != nil { + ev.Enterprise = &EnterpriseOptions{ + Meta: v.Enterprise.Meta, + LicenseDir: v.Enterprise.LicenseDir, + } + } + + installables = append(installables, ev) + } + + return installables, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/src/src.go b/vendor/github.com/hashicorp/hc-install/src/src.go new file mode 100644 index 0000000000..f7b8265efb --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/src/src.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package src + +import ( + "context" + "log" + + isrc "github.com/hashicorp/hc-install/internal/src" +) + +// Source represents an installer, finder, or builder +type Source interface { + IsSourceImpl() isrc.InstallSrcSigil +} + +type Installable interface { + Source + Install(ctx context.Context) (string, error) +} + +type Findable interface { + Source + Find(ctx context.Context) (string, error) +} + +type Buildable interface { + Source + Build(ctx context.Context) (string, error) +} + +type Validatable interface { + Source + Validate() error +} + +type Removable interface { + Source + Remove(ctx context.Context) error +} + +type LoggerSettable interface { + SetLogger(logger *log.Logger) +} diff --git a/vendor/github.com/hashicorp/hc-install/version/VERSION b/vendor/github.com/hashicorp/hc-install/version/VERSION new file mode 100644 index 0000000000..844f6a91ac --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/version/VERSION @@ -0,0 +1 @@ +0.6.3 diff --git a/vendor/github.com/hashicorp/hc-install/version/version.go b/vendor/github.com/hashicorp/hc-install/version/version.go new file mode 100644 index 0000000000..facd42949c --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/version/version.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package version + +import ( + _ "embed" + "strings" + + "github.com/hashicorp/go-version" +) + +//go:embed VERSION +var rawVersion string + +// parsedVersion declared here ensures that invalid versions panic early, on import +var parsedVersion = version.Must(version.NewVersion(strings.TrimSpace(rawVersion))) + +// Version returns the version of the library +// +// Note: This is only exposed as public function/package +// due to hard-coded constraints in the release tooling. +// In general downstream should not implement version-specific +// logic and rely on this function to be present in future releases. +func Version() *version.Version { + return parsedVersion +} diff --git a/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl b/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl new file mode 100644 index 0000000000..35eae08227 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl @@ -0,0 +1,16 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2014 + + # (OPTIONAL) A list of globs that should not have copyright/license headers. + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + header_ignore = [ + "hclsyntax/fuzz/testdata/**", + "hclwrite/fuzz/testdata/**", + "json/fuzz/testdata/**", + "specsuite/tests/**", + ] +} diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index 025c4a1a41..f3fe93d45f 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,13 +1,176 @@ # HCL Changelog -## v2.8.2 (Unreleased) +## v2.19.0 (October 16, 2023) + +### Enhancements + +* ext/dynblock: `dynblock.Expand` now supports an optional hook for calling applications to check and potentially veto (by returning error diagnostics) particular `for_each` values. The behavior is unchanged for callers that don't set the new option. ([#634](https://github.com/hashicorp/hcl/pull/634)) + +### Bugs Fixed + +* hclsyntax: Further fixes for treatment of "marked" values in the conditional expression, and better tracking of refined values into the conditional expression results, building on the fixes from v2.18.1. ([#633](https://github.com/hashicorp/hcl/pull/633)) + +## v2.18.1 (October 5, 2023) + +### Bugs Fixed + +* hclsyntax: Conditional expressions will no longer panic when one or both of their results are "marked", as is the case for situations like how HashiCorp Terraform tracks its concept of "sensitive values". ([#630](https://github.com/hashicorp/hcl/pull/630)) + +## v2.18.0 (August 30, 2023) + +### Enhancements + +* HCL now uses the tables from Unicode 15 when performing string normalization and character segmentation. HCL was previously using the Unicode 13 tables. + + For calling applications where consistent Unicode support is important, consider also upgrading to Go 1.21 at the same time as adopting HCL v2.18.0 so that the standard library unicode tables (used for case folding, etc) will also be from Unicode 15. + +## v2.17.1 (August 30, 2023) + +### Enhancements + +* hclsyntax: When evaluating string templates that have a long known constant prefix, HCL will truncate the known prefix to avoid creating excessively-large refinements. String prefix refinements are intended primarily for relatively-short fixed prefixes, such as `https://` at the start of a URL known to use that scheme. ([#617](https://github.com/hashicorp/hcl/pull/617)) +* ext/tryfunc: The "try" and "can" functions now handle unknown values slightly more precisely, and so can return known values in more situations when given expressions referring to unknown symbols. ([#622](https://github.com/hashicorp/hcl/pull/622)) + +### Bugs Fixed + +* ext/typeexpr: Will no longer try to refine unknown values of unknown type when dealing with a user-specified type constraint containing the `any` keyword, avoiding an incorrect panic at runtime. ([#625](https://github.com/hashicorp/hcl/pull/625)) +* ext/typeexpr: Now correctly handles attempts to declare the same object type attribute multiple times by returning an error. Previously this could potentially panic by creating an incoherent internal state. ([#624](https://github.com/hashicorp/hcl/pull/624)) + +## v2.17.0 (May 31, 2023) + +### Enhancements + +* HCL now uses a newer version of the upstream `cty` library which has improved treatment of unknown values: it can now track additional optional information that reduces the range of an unknown value, which allows some operations against unknown values to return known or partially-known results. ([#590](https://github.com/hashicorp/hcl/pull/590)) + + **Note:** This change effectively passes on [`cty`'s notion of backward compatibility](https://github.com/zclconf/go-cty/blob/main/COMPATIBILITY.md) whereby unknown values can become "more known" in later releases. In particular, if your caller is using `cty.Value.RawEquals` in its tests against the results of operations with unknown values then you may see those tests begin failing after upgrading, due to the values now being more "refined". + + If so, you should review the refinements with consideration to [the `cty` refinements docs](https://github.com/zclconf/go-cty/blob/7dcbae46a6f247e983efb1fa774d2bb68781a333/docs/refinements.md) and update your expected results to match only if the reported refinements seem correct for the given situation. The `RawEquals` method is intended only for making exact value comparisons in test cases, so main application code should not use it; use `Equals` instead for real logic, which will take refinements into account automatically. + +## v2.16.2 (March 9, 2023) + +### Bugs Fixed + +* ext/typeexpr: Verify type assumptions when applying default values, and ignore input values that do not match type assumptions. ([#594](https://github.com/hashicorp/hcl/pull/594)) + +## v2.16.1 (February 13, 2023) + +### Bugs Fixed + +* hclsyntax: Report correct `Range.End` for `FunctionCall` with incomplete argument ([#588](https://github.com/hashicorp/hcl/pull/588)) + +## v2.16.0 (January 30, 2023) + +### Enhancements + +* ext/typeexpr: Modify the `Defaults` functionality to implement additional flexibility. HCL will now upcast lists and sets into tuples, and maps into objects, when applying default values if the applied defaults cause the elements within a target collection to have differing types. Previously, this would have resulted in a panic, now HCL will return a modified overall type. ([#574](https://github.com/hashicorp/hcl/pull/574)) + + Users should return to the advice provided by v2.14.0, and apply the go-cty convert functionality *after* setting defaults on a given `cty.Value`, rather than before. +* hclfmt: Avoid rewriting unchanged files. ([#576](https://github.com/hashicorp/hcl/pull/576)) +* hclsyntax: Simplify the AST for certain string expressions. ([#584](https://github.com/hashicorp/hcl/pull/584)) + +### Bugs Fixed + +* hclwrite: Fix data race in `formatSpaces`. ([#511](https://github.com/hashicorp/hcl/pull/511)) + +## v2.15.0 (November 10, 2022) + +### Bugs Fixed + +* ext/typeexpr: Skip null objects when applying defaults. This prevents crashes when null objects are creating inside collections, and stops incomplete objects being created with only optional attributes set. ([#567](https://github.com/hashicorp/hcl/pull/567)) +* ext/typeexpr: Ensure default values do not have optional metadata attached. This prevents crashes when default values are inserted into concrete go-cty values that have also been stripped of their optional metadata. ([#568](https://github.com/hashicorp/hcl/pull/568)) + +### Enhancements + +* ext/typeexpr: With the [go-cty](https://github.com/zclconf/go-cty) upstream depenendency updated to v1.12.0, the `Defaults` struct and associated functions can apply additional and more flexible 'unsafe' conversions (examples include tuples into collections such as lists and sets, and additional safety around null and dynamic values). ([#564](https://github.com/hashicorp/hcl/pull/564)) +* ext/typeexpr: With the [go-cty](https://github.com/zclconf/go-cty) upstream depenendency updated to v1.12.0, users should now apply the go-cty convert functionality *before* setting defaults on a given `cty.Value`, rather than after, if they require a specific `cty.Type`. ([#564](https://github.com/hashicorp/hcl/pull/564)) + +## v2.14.1 (September 23, 2022) + +### Bugs Fixed + +* ext/typeexpr: Type convert defaults for optional object attributes when applying them. This prevents crashes in certain cases when the objects in question are part of a collection. ([#555](https://github.com/hashicorp/hcl/pull/555)) + +## v2.14.0 (September 1, 2022) + +### Enhancements + +* ext/typeexpr: Added support for optional object attributes to `TypeConstraint`. Attributes can be wrapped in the special `optional(…)` modifier, allowing the attribute to be omitted while still meeting the type constraint. For more information, [cty's documentation on conversion between object types](https://github.com/zclconf/go-cty/blob/main/docs/convert.md#conversion-between-object-types). ([#549](https://github.com/hashicorp/hcl/pull/549)) +* ext/typeexpr: New function: `TypeConstraintWithDefaults`. In this mode, the `optional(…)` modifier accepts a second argument which can be used as the default value for omitted object attributes. The function returns both a `cty.Type` and associated `Defaults`, the latter of which has an `Apply` method to apply defaults to a given value. ([#549](https://github.com/hashicorp/hcl/pull/549)) + +## v2.13.0 (June 22, 2022) + +### Enhancements + +* hcl: `hcl.Diagnostic` now has an additional field `Extra` which is intended for carrying arbitrary supporting data ("extra information") related to the diagnostic message, intended to allow diagnostic renderers to optionally tailor the presentation of messages for particular situations. ([#539](https://github.com/hashicorp/hcl/pull/539)) +* hclsyntax: When an error occurs during a function call, the returned diagnostics will include _extra information_ (as described in the previous point) about which function was being called and, if the message is about an error returned by the function itself, that raw `error` value without any post-processing. ([#539](https://github.com/hashicorp/hcl/pull/539)) + +### Bugs Fixed + +* hclwrite: Fixed a potential data race for any situation where `hclwrite.Format` runs concurrently with itself. ([#534](https://github.com/hashicorp/hcl/pull/534)) + +## v2.12.0 (April 22, 2022) + +### Enhancements + +* hclsyntax: Evaluation of conditional expressions will now produce more precise error messages about inconsistencies between the types of the true and false result expressions, particularly in cases where both are of the same structural type kind but differ in their nested elements. ([#530](https://github.com/hashicorp/hcl/pull/530)) +* hclsyntax: The lexer will no longer allocate a small object on the heap for each token. Instead, in that situation it will allocate only when needed to return a diagnostic message with source location information. ([#490](https://github.com/hashicorp/hcl/pull/490)) +* hclwrite: New functions `TokensForTuple`, `TokensForObject`, and `TokensForFunctionCall` allow for more easily constructing the three constructs which are supported for static analysis and which HCL-based languages typically use in contexts where an expression is used only for its syntax, and not evaluated to produce a real value. For example, these new functions together are sufficient to construct all valid type constraint expressions from [the Type Expressions Extension](./ext/typeexpr/), which is the basis of variable type constraints in the Terraform language at the time of writing. ([#502](https://github.com/hashicorp/hcl/pull/502)) +* json: New functions `IsJSONExpression` and `IsJSONBody` to determine if a given expression or body was created by the JSON syntax parser. In normal situations it's better not to worry about what syntax a particular expression/body originated in, but this can be useful in some trickier cases where an application needs to shim for backwards-compatibility or for static analysis that needs to have special handling of the JSON syntax's embedded expression/template conventions. ([#524](https://github.com/hashicorp/hcl/pull/524)) + +### Bugs Fixed + +* gohcl: Fix docs about supported types for blocks. ([#507](https://github.com/hashicorp/hcl/pull/507)) + +## v2.11.1 (December 1, 2021) + +### Bugs Fixed + +* hclsyntax: The type for an upgraded unknown value with a splat expression cannot be known ([#495](https://github.com/hashicorp/hcl/pull/495)) + +## v2.11.0 (December 1, 2021) + +### Enhancements + +* hclsyntax: Various error messages related to unexpectedly reaching end of file while parsing a delimited subtree will now return specialized messages describing the opening tokens as "unclosed", instead of returning a generic diagnostic that just happens to refer to the empty source range at the end of the file. This gives better feedback when error messages are being presented alongside a source code snippet, as is common in HCL-based applications, because it shows which innermost container the parser was working on when it encountered the error. ([#492](https://github.com/hashicorp/hcl/pull/492)) + +### Bugs Fixed + +* hclsyntax: Upgrading an unknown single value to a list using a splat expression must return unknown ([#493](https://github.com/hashicorp/hcl/pull/493)) + +## v2.10.1 (July 21, 2021) + +* dynblock: Decode unknown dynamic blocks in order to obtain any diagnostics even though the decoded value is not used ([#476](https://github.com/hashicorp/hcl/pull/476)) +* hclsyntax: Calling functions is now more robust in the face of an incorrectly-implemented function which returns a `function.ArgError` whose argument index is out of range for the length of the arguments. Previously this would often lead to a panic, but now it'll return a less-precice error message instead. Functions that return out-of-bounds argument indices still ought to be fixed so that the resulting error diagnostics can be as precise as possible. ([#472](https://github.com/hashicorp/hcl/pull/472)) +* hclsyntax: Ensure marks on unknown values are maintained when processing string templates. ([#478](https://github.com/hashicorp/hcl/pull/478)) +* hcl: Improved error messages for various common error situtions in `hcl.Index` and `hcl.GetAttr`. These are part of the implementation of indexing and attribute lookup in the native syntax expression language too, so the new error messages will apply to problems using those operators. ([#474](https://github.com/hashicorp/hcl/pull/474)) + +## v2.10.0 (April 20, 2021) + +### Enhancements + +* dynblock,hcldec: Using dynblock in conjunction with hcldec can now decode blocks with unknown dynamic for_each arguments as entirely unknown values ([#461](https://github.com/hashicorp/hcl/pull/461)) +* hclsyntax: Some syntax errors during parsing of the inside of `${` ... `}` template interpolation sequences will now produce an extra hint message about the need to escape as `$${` when trying to include interpolation syntax for other languages like shell scripting, AWS IAM policies, etc. ([#462](https://github.com/hashicorp/hcl/pull/462)) + +## v2.9.1 (March 10, 2021) + +### Bugs Fixed + +* hclsyntax: Fix panic for marked index value. ([#451](https://github.com/hashicorp/hcl/pull/451)) + +## v2.9.0 (February 23, 2021) + +### Enhancements + +* HCL's native syntax and JSON scanners -- and thus all of the other parsing components that build on top of them -- are now using Unicode 13 rules for text segmentation when counting text characters for the purpose of reporting source location columns. Previously HCL was using Unicode 12. Unicode 13 still uses the same algorithm but includes some additions to the character tables the algorithm is defined in terms of, to properly categorize new characters defined in Unicode 13. + +## v2.8.2 (January 6, 2021) ### Bugs Fixed * hclsyntax: Fix panic for marked collection splat. ([#436](https://github.com/hashicorp/hcl/pull/436)) * hclsyntax: Fix panic for marked template loops. ([#437](https://github.com/hashicorp/hcl/pull/437)) -* hclsyntax: Fix `for` expression marked conditional.([#438](https://github.com/hashicorp/hcl/pull/438)) -* hclsyntax: Mark objects with keys that are sensitive ([#440](https://github.com/hashicorp/hcl/pull/440)) +* hclsyntax: Fix `for` expression marked conditional. ([#438](https://github.com/hashicorp/hcl/pull/438)) +* hclsyntax: Mark objects with keys that are sensitive. ([#440](https://github.com/hashicorp/hcl/pull/440)) ## v2.8.1 (December 17, 2020) diff --git a/vendor/github.com/hashicorp/hcl/v2/LICENSE b/vendor/github.com/hashicorp/hcl/v2/LICENSE index 82b4de97c7..e25da5fad9 100644 --- a/vendor/github.com/hashicorp/hcl/v2/LICENSE +++ b/vendor/github.com/hashicorp/hcl/v2/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2014 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go index c80535b7a7..578f81a2c2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( @@ -63,6 +66,28 @@ type Diagnostic struct { // case of colliding names. Expression Expression EvalContext *EvalContext + + // Extra is an extension point for additional machine-readable information + // about this problem. + // + // Recipients of diagnostic objects may type-assert this value with + // specific interface types they know about to discover if any additional + // information is available that is interesting for their use-case. + // + // Extra is always considered to be optional extra information and so a + // diagnostic message should still always be fully described (from the + // perspective of a human who understands the language the messages are + // written in) by the other fields in case a particular recipient. + // + // Functions that return diagnostics with Extra populated should typically + // document that they place values implementing a particular interface, + // rather than a concrete type, and define that interface such that its + // methods can dynamically indicate a lack of support at runtime even + // if the interface happens to be statically available. An Extra + // type that wraps other Extra values should additionally implement + // interface DiagnosticExtraUnwrapper to return the value they are wrapping + // so that callers can access inner values to type-assert against. + Extra interface{} } // Diagnostics is a list of Diagnostic instances. @@ -141,3 +166,24 @@ type DiagnosticWriter interface { WriteDiagnostic(*Diagnostic) error WriteDiagnostics(Diagnostics) error } + +// DiagnosticExtraUnwrapper is an interface implemented by values in the +// Extra field of Diagnostic when they are wrapping another "Extra" value that +// was generated downstream. +// +// Diagnostic recipients which want to examine "Extra" values to sniff for +// particular types of extra data can either type-assert this interface +// directly and repeatedly unwrap until they recieve nil, or can use the +// helper function DiagnosticExtra. +type DiagnosticExtraUnwrapper interface { + // If the reciever is wrapping another "diagnostic extra" value, returns + // that value. Otherwise returns nil to indicate dynamically that nothing + // is wrapped. + // + // The "nothing is wrapped" condition can be signalled either by this + // method returning nil or by a type not implementing this interface at all. + // + // Implementers should never create unwrap "cycles" where a nested extra + // value returns a value that was also wrapping it. + UnwrapDiagnosticExtra() interface{} +} diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go index 0b4a2629b9..bdfad42bf9 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go new file mode 100644 index 0000000000..92be8f1a85 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build go1.18 +// +build go1.18 + +package hcl + +// This file contains additional diagnostics-related symbols that use the +// Go 1.18 type parameters syntax and would therefore be incompatible with +// Go 1.17 and earlier. + +// DiagnosticExtra attempts to retrieve an "extra value" of type T from the +// given diagnostic, if either the diag.Extra field directly contains a value +// of that type or the value implements DiagnosticExtraUnwrapper and directly +// or indirectly returns a value of that type. +// +// Type T should typically be an interface type, so that code which generates +// diagnostics can potentially return different implementations of the same +// interface dynamically as needed. +// +// If a value of type T is found, returns that value and true to indicate +// success. Otherwise, returns the zero value of T and false to indicate +// failure. +func DiagnosticExtra[T any](diag *Diagnostic) (T, bool) { + extra := diag.Extra + var zero T + + for { + if ret, ok := extra.(T); ok { + return ret, true + } + + if unwrap, ok := extra.(DiagnosticExtraUnwrapper); ok { + // If our "extra" implements DiagnosticExtraUnwrapper then we'll + // unwrap one level and try this again. + extra = unwrap.UnwrapDiagnosticExtra() + } else { + return zero, false + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go index c12833440a..fd00ca6f65 100644 --- a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go +++ b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/doc.go b/vendor/github.com/hashicorp/hcl/v2/doc.go index 0d43fb2c78..a0e3119f2c 100644 --- a/vendor/github.com/hashicorp/hcl/v2/doc.go +++ b/vendor/github.com/hashicorp/hcl/v2/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package hcl contains the main modelling types and general utility functions // for HCL. // diff --git a/vendor/github.com/hashicorp/hcl/v2/eval_context.go b/vendor/github.com/hashicorp/hcl/v2/eval_context.go index 915910ad8a..921cfcb429 100644 --- a/vendor/github.com/hashicorp/hcl/v2/eval_context.go +++ b/vendor/github.com/hashicorp/hcl/v2/eval_context.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_call.go b/vendor/github.com/hashicorp/hcl/v2/expr_call.go index 6963fbae36..ca59b90d23 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_call.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_call.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ExprCall tests if the given expression is a function call and, diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_list.go b/vendor/github.com/hashicorp/hcl/v2/expr_list.go index d05cca0b9a..8c0cf40518 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_list.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_list.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ExprList tests if the given expression is a static list construct and, diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_map.go b/vendor/github.com/hashicorp/hcl/v2/expr_map.go index 96d1ce4bfa..56cf974740 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_map.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ExprMap tests if the given expression is a static map construct and, diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go index 6d5d205c49..6683fd5444 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl type unwrapExpression interface { diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go index c9d7a1efb2..e0dda0df9e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package customdecode contains a HCL extension that allows, in certain // contexts, expression evaluation to be overridden by custom static analysis. // diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go index af7c66c235..2477f21907 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package customdecode import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md deleted file mode 100644 index f59ce92e94..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md +++ /dev/null @@ -1,184 +0,0 @@ -# HCL Dynamic Blocks Extension - -This HCL extension implements a special block type named "dynamic" that can -be used to dynamically generate blocks of other types by iterating over -collection values. - -Normally the block structure in an HCL configuration file is rigid, even -though dynamic expressions can be used within attribute values. This is -convenient for most applications since it allows the overall structure of -the document to be decoded easily, but in some applications it is desirable -to allow dynamic block generation within certain portions of the configuration. - -Dynamic block generation is performed using the `dynamic` block type: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - dynamic "nested" { - for_each = ["a", "b", "c"] - iterator = nested - content { - foo = "dynamic block ${nested.value}" - } - } - - nested { - foo = "static block 2" - } -} -``` - -The above is interpreted as if it were written as follows: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - nested { - foo = "dynamic block a" - } - - nested { - foo = "dynamic block b" - } - - nested { - foo = "dynamic block c" - } - - nested { - foo = "static block 2" - } -} -``` - -Since HCL block syntax is not normally exposed to the possibility of unknown -values, this extension must make some compromises when asked to iterate over -an unknown collection. If the length of the collection cannot be statically -recognized (because it is an unknown value of list, map, or set type) then -the `dynamic` construct will generate a _single_ dynamic block whose iterator -key and value are both unknown values of the dynamic pseudo-type, thus causing -any attribute values derived from iteration to appear as unknown values. There -is no explicit representation of the fact that the length of the collection may -eventually be different than one. - -## Usage - -Pass a body to function `Expand` to obtain a new body that will, on access -to its content, evaluate and expand any nested `dynamic` blocks. -Dynamic block processing is also automatically propagated into any nested -blocks that are returned, allowing users to nest dynamic blocks inside -one another and to nest dynamic blocks inside other static blocks. - -HCL structural decoding does not normally have access to an `EvalContext`, so -any variables and functions that should be available to the `for_each` -and `labels` expressions must be passed in when calling `Expand`. Expressions -within the `content` block are evaluated separately and so can be passed a -separate `EvalContext` if desired, during normal attribute expression -evaluation. - -## Detecting Variables - -Some applications dynamically generate an `EvalContext` by analyzing which -variables are referenced by an expression before evaluating it. - -This unfortunately requires some extra effort when this analysis is required -for the context passed to `Expand`: the HCL API requires a schema to be -provided in order to do any analysis of the blocks in a body, but the low-level -schema model provides a description of only one level of nested blocks at -a time, and thus a new schema must be provided for each additional level of -nesting. - -To make this arduous process as convenient as possible, this package provides -a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` -instance that can be used to find variables directly in a given body and also -determine which nested blocks require recursive calls. Using this mechanism -requires that the caller be able to look up a schema given a nested block type. -For _simple_ formats where a specific block type name always has the same schema -regardless of context, a walk can be implemented as follows: - -```go -func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { - vars, children := node.Visit(schema) - - for _, child := range children { - var childSchema *hcl.BodySchema - switch child.BlockTypeName { - case "a": - childSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "b", - LabelNames: []string{"key"}, - }, - }, - } - case "b": - childSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "val", - Required: true, - }, - }, - } - default: - // Should never happen, because the above cases should be exhaustive - // for the application's configuration format. - panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) - } - - vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) - } -} -``` - -### Detecting Variables with `hcldec` Specifications - -For applications that use the higher-level `hcldec` package to decode nested -configuration structures into `cty` values, the same specification can be used -to automatically drive the recursive variable-detection walk described above. - -The helper function `ForEachVariablesHCLDec` allows an entire recursive -configuration structure to be analyzed in a single call given a `hcldec.Spec` -that describes the nested block structure. This means a `hcldec`-based -application can support dynamic blocks with only a little additional effort: - -```go -func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { - // Determine which variables are needed to expand dynamic blocks - neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) - - // Build a suitable EvalContext and expand dynamic blocks - dynCtx := buildEvalContext(neededForDynamic) - dynBody := dynblock.Expand(body, dynCtx) - - // Determine which variables are needed to fully decode the expanded body - // This will analyze expressions that came both from static blocks in the - // original body and from blocks that were dynamically added by Expand. - neededForDecode := hcldec.Variables(dynBody, spec) - - // Build a suitable EvalContext and then fully decode the body as per the - // hcldec specification. - decCtx := buildEvalContext(neededForDecode) - return hcldec.Decode(dynBody, spec, decCtx) -} - -func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { - // (to be implemented by your application) -} -``` - -# Performance - -This extension is going quite harshly against the grain of the HCL API, and -so it uses lots of wrapping objects and temporary data structures to get its -work done. HCL in general is not suitable for use in high-performance situations -or situations sensitive to memory pressure, but that is _especially_ true for -this extension. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go deleted file mode 100644 index 65a9eab2df..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go +++ /dev/null @@ -1,262 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// expandBody wraps another hcl.Body and expands any "dynamic" blocks found -// inside whenever Content or PartialContent is called. -type expandBody struct { - original hcl.Body - forEachCtx *hcl.EvalContext - iteration *iteration // non-nil if we're nested inside another "dynamic" block - - // These are used with PartialContent to produce a "remaining items" - // body to return. They are nil on all bodies fresh out of the transformer. - // - // Note that this is re-implemented here rather than delegating to the - // existing support required by the underlying body because we need to - // retain access to the entire original body on subsequent decode operations - // so we can retain any "dynamic" blocks for types we didn't take consume - // on the first pass. - hiddenAttrs map[string]struct{} - hiddenBlocks map[string]hcl.BlockHeaderSchema -} - -func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, diags := b.original.Content(extSchema) - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - return content, diags -} - -func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, _, diags := b.original.PartialContent(extSchema) - // We discard the "remain" argument above because we're going to construct - // our own remain that also takes into account remaining "dynamic" blocks. - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - remain := &expandBody{ - original: b.original, - forEachCtx: b.forEachCtx, - iteration: b.iteration, - hiddenAttrs: make(map[string]struct{}), - hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), - } - for name := range b.hiddenAttrs { - remain.hiddenAttrs[name] = struct{}{} - } - for typeName, blockS := range b.hiddenBlocks { - remain.hiddenBlocks[typeName] = blockS - } - for _, attrS := range schema.Attributes { - remain.hiddenAttrs[attrS.Name] = struct{}{} - } - for _, blockS := range schema.Blocks { - remain.hiddenBlocks[blockS.Type] = blockS - } - - return content, remain, diags -} - -func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - // If we have any hiddenBlocks then we also need to register those here - // so that a call to "Content" on the underlying body won't fail. - // (We'll filter these out again once we process the result of either - // Content or PartialContent.) - for _, blockS := range b.hiddenBlocks { - extSchema.Blocks = append(extSchema.Blocks, blockS) - } - - // If we have any hiddenAttrs then we also need to register these, for - // the same reason as we deal with hiddenBlocks above. - if len(b.hiddenAttrs) != 0 { - newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) - copy(newAttrs, extSchema.Attributes) - for name := range b.hiddenAttrs { - newAttrs = append(newAttrs, hcl.AttributeSchema{ - Name: name, - Required: false, - }) - } - extSchema.Attributes = newAttrs - } - - return extSchema -} - -func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { - if len(b.hiddenAttrs) == 0 && b.iteration == nil { - // Easy path: just pass through the attrs from the original body verbatim - return rawAttrs - } - - // Otherwise we have some work to do: we must filter out any attributes - // that are hidden (since a previous PartialContent call already saw these) - // and wrap the expressions of the inner attributes so that they will - // have access to our iteration variables. - attrs := make(hcl.Attributes, len(rawAttrs)) - for name, rawAttr := range rawAttrs { - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - if b.iteration != nil { - attr := *rawAttr // shallow copy so we can mutate it - attr.Expr = exprWrap{ - Expression: attr.Expr, - i: b.iteration, - } - attrs[name] = &attr - } else { - // If we have no active iteration then no wrapping is required. - attrs[name] = rawAttr - } - } - return attrs -} - -func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { - var blocks hcl.Blocks - var diags hcl.Diagnostics - - for _, rawBlock := range rawBlocks { - switch rawBlock.Type { - case "dynamic": - realBlockType := rawBlock.Labels[0] - if _, hidden := b.hiddenBlocks[realBlockType]; hidden { - continue - } - - var blockS *hcl.BlockHeaderSchema - for _, candidate := range schema.Blocks { - if candidate.Type == realBlockType { - blockS = &candidate - break - } - } - if blockS == nil { - // Not a block type that the caller requested. - if !partial { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported block type", - Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), - Subject: &rawBlock.LabelRanges[0], - }) - } - continue - } - - spec, specDiags := b.decodeSpec(blockS, rawBlock) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - continue - } - - if spec.forEachVal.IsKnown() { - for it := spec.forEachVal.ElementIterator(); it.Next(); { - key, value := it.Element() - i := b.iteration.MakeChild(spec.iteratorName, key, value) - - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - // Attach our new iteration context so that attributes - // and other nested blocks can refer to our iterator. - block.Body = b.expandChild(block.Body, i) - blocks = append(blocks, block) - } - } - } else { - // If our top-level iteration value isn't known then we're forced - // to compromise since HCL doesn't have any concept of an - // "unknown block". In this case then, we'll produce a single - // dynamic block with the iterator values set to DynamicVal, - // which at least makes the potential for a block visible - // in our result, even though it's not represented in a fully-accurate - // way. - i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - block.Body = b.expandChild(block.Body, i) - - // We additionally force all of the leaf attribute values - // in the result to be unknown so the calling application - // can, if necessary, use that as a heuristic to detect - // when a single nested block might be standing in for - // multiple blocks yet to be expanded. This retains the - // structure of the generated body but forces all of its - // leaf attribute values to be unknown. - block.Body = unknownBody{block.Body} - - blocks = append(blocks, block) - } - } - - default: - if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { - // A static block doesn't create a new iteration context, but - // it does need to inherit _our own_ iteration context in - // case it contains expressions that refer to our inherited - // iterators, or nested "dynamic" blocks. - expandedBlock := *rawBlock // shallow copy - expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) - blocks = append(blocks, &expandedBlock) - } - } - } - - return blocks, diags -} - -func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { - chiCtx := i.EvalContext(b.forEachCtx) - ret := Expand(child, chiCtx) - ret.(*expandBody).iteration = i - return ret -} - -func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // blocks aren't allowed in JustAttributes mode and this body can - // only produce blocks, so we'll just pass straight through to our - // underlying body here. - return b.original.JustAttributes() -} - -func (b *expandBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go deleted file mode 100644 index 98a51eadd8..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go +++ /dev/null @@ -1,215 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -type expandSpec struct { - blockType string - blockTypeRange hcl.Range - defRange hcl.Range - forEachVal cty.Value - iteratorName string - labelExprs []hcl.Expression - contentBody hcl.Body - inherited map[string]*iteration -} - -func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var schema *hcl.BodySchema - if len(blockS.LabelNames) != 0 { - schema = dynamicBlockBodySchemaLabels - } else { - schema = dynamicBlockBodySchemaNoLabels - } - - specContent, specDiags := rawSpec.Body.Content(schema) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - return nil, diags - } - - //// for_each attribute - - eachAttr := specContent.Attributes["for_each"] - eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) - diags = append(diags, eachDiags...) - - if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { - // We skip this error for DynamicPseudoType because that means we either - // have a null (which is checked immediately below) or an unknown - // (which is handled in the expandBody Content methods). - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - if eachVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: "Cannot use a null value in for_each.", - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - - //// iterator attribute - - iteratorName := blockS.Type - if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { - itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) - diags = append(diags, itDiags...) - if itDiags.HasErrors() { - return nil, diags - } - - if len(itTraversal) != 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic iterator name", - Detail: "Dynamic iterator must be a single variable name.", - Subject: itTraversal.SourceRange().Ptr(), - }) - return nil, diags - } - - iteratorName = itTraversal.RootName() - } - - var labelExprs []hcl.Expression - if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { - var labelDiags hcl.Diagnostics - labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - if len(labelExprs) > len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic block label", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), - }) - return nil, diags - } else if len(labelExprs) < len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Insufficient dynamic block labels", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelsAttr.Expr.Range().Ptr(), - }) - return nil, diags - } - } - - // Since our schema requests only blocks of type "content", we can assume - // that all entries in specContent.Blocks are content blocks. - if len(specContent.Blocks) == 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing dynamic content block", - Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", - Subject: &specContent.MissingItemRange, - }) - return nil, diags - } - if len(specContent.Blocks) > 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic content block", - Detail: "Only one nested content block is allowed for each dynamic block.", - Subject: &specContent.Blocks[1].DefRange, - }) - return nil, diags - } - - return &expandSpec{ - blockType: blockS.Type, - blockTypeRange: rawSpec.LabelRanges[0], - defRange: rawSpec.DefRange, - forEachVal: eachVal, - iteratorName: iteratorName, - labelExprs: labelExprs, - contentBody: specContent.Blocks[0].Body, - }, diags -} - -func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { - var diags hcl.Diagnostics - var labels []string - var labelRanges []hcl.Range - lCtx := i.EvalContext(ctx) - for _, labelExpr := range s.labelExprs { - labelVal, labelDiags := labelExpr.Value(lCtx) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - var convErr error - labelVal, convErr = convert.Convert(labelVal, cty.String) - if convErr != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if labelVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "Cannot use a null value as a dynamic block label.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if !labelVal.IsKnown() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - - labels = append(labels, labelVal.AsString()) - labelRanges = append(labelRanges, labelExpr.Range()) - } - - block := &hcl.Block{ - Type: s.blockType, - TypeRange: s.blockTypeRange, - Labels: labels, - LabelRanges: labelRanges, - DefRange: s.defRange, - Body: s.contentBody, - } - - return block, diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go deleted file mode 100644 index 460a1d2a31..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go +++ /dev/null @@ -1,42 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type exprWrap struct { - hcl.Expression - i *iteration -} - -func (e exprWrap) Variables() []hcl.Traversal { - raw := e.Expression.Variables() - ret := make([]hcl.Traversal, 0, len(raw)) - - // Filter out traversals that refer to our iterator name or any - // iterator we've inherited; we're going to provide those in - // our Value wrapper, so the caller doesn't need to know about them. - for _, traversal := range raw { - rootName := traversal.RootName() - if rootName == e.i.IteratorName { - continue - } - if _, inherited := e.i.Inherited[rootName]; inherited { - continue - } - ret = append(ret, traversal) - } - return ret -} - -func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - extCtx := e.i.EvalContext(ctx) - return e.Expression.Value(extCtx) -} - -// UnwrapExpression returns the expression being wrapped by this instance. -// This allows the original expression to be recovered by hcl.UnwrapExpression. -func (e exprWrap) UnwrapExpression() hcl.Expression { - return e.Expression -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go deleted file mode 100644 index c566388689..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go +++ /dev/null @@ -1,66 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type iteration struct { - IteratorName string - Key cty.Value - Value cty.Value - Inherited map[string]*iteration -} - -func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { - return &iteration{ - IteratorName: s.iteratorName, - Key: key, - Value: value, - Inherited: s.inherited, - } -} - -func (i *iteration) Object() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "key": i.Key, - "value": i.Value, - }) -} - -func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { - new := base.NewChild() - - if i != nil { - new.Variables = map[string]cty.Value{} - for name, otherIt := range i.Inherited { - new.Variables[name] = otherIt.Object() - } - new.Variables[i.IteratorName] = i.Object() - } - - return new -} - -func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { - if i == nil { - // Create entirely new root iteration, then - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - } - } - - inherited := map[string]*iteration{} - for name, otherIt := range i.Inherited { - inherited[name] = otherIt - } - inherited[i.IteratorName] = i - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - Inherited: inherited, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go deleted file mode 100644 index a5bfd94ec7..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go +++ /dev/null @@ -1,47 +0,0 @@ -// Package dynblock provides an extension to HCL that allows dynamic -// declaration of nested blocks in certain contexts via a special block type -// named "dynamic". -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Expand "dynamic" blocks in the given body, returning a new body that -// has those blocks expanded. -// -// The given EvalContext is used when evaluating "for_each" and "labels" -// attributes within dynamic blocks, allowing those expressions access to -// variables and functions beyond the iterator variable created by the -// iteration. -// -// Expand returns no diagnostics because no blocks are actually expanded -// until a call to Content or PartialContent on the returned body, which -// will then expand only the blocks selected by the schema. -// -// "dynamic" blocks are also expanded automatically within nested blocks -// in the given body, including within other dynamic blocks, thus allowing -// multi-dimensional iteration. However, it is not possible to -// dynamically-generate the "dynamic" blocks themselves except through nesting. -// -// parent { -// dynamic "child" { -// for_each = child_objs -// content { -// dynamic "grandchild" { -// for_each = child.value.children -// labels = [grandchild.key] -// content { -// parent_key = child.key -// value = grandchild.value -// } -// } -// } -// } -// } -func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { - return &expandBody{ - original: body, - forEachCtx: ctx, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go deleted file mode 100644 index b3907d6eae..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go +++ /dev/null @@ -1,50 +0,0 @@ -package dynblock - -import "github.com/hashicorp/hcl/v2" - -var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, -} - -var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - { - Name: "labels", - Required: true, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} - -var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go deleted file mode 100644 index ce98259a58..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go +++ /dev/null @@ -1,84 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// unknownBody is a funny body that just reports everything inside it as -// unknown. It uses a given other body as a sort of template for what attributes -// and blocks are inside -- including source location information -- but -// subsitutes unknown values of unknown type for all attributes. -// -// This rather odd process is used to handle expansion of dynamic blocks whose -// for_each expression is unknown. Since a block cannot itself be unknown, -// we instead arrange for everything _inside_ the block to be unknown instead, -// to give the best possible approximation. -type unknownBody struct { - template hcl.Body -} - -var _ hcl.Body = unknownBody{} - -func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, diags := b.template.Content(schema) - content = b.fixupContent(content) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, diags -} - -func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - content, remain, diags := b.template.PartialContent(schema) - content = b.fixupContent(content) - remain = unknownBody{remain} // remaining content must also be wrapped - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, remain, diags -} - -func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - attrs, diags := b.template.JustAttributes() - attrs = b.fixupAttrs(attrs) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return attrs, diags -} - -func (b unknownBody) MissingItemRange() hcl.Range { - return b.template.MissingItemRange() -} - -func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { - ret := &hcl.BodyContent{} - ret.Attributes = b.fixupAttrs(got.Attributes) - if len(got.Blocks) > 0 { - ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) - for _, gotBlock := range got.Blocks { - new := *gotBlock // shallow copy - new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown - ret.Blocks = append(ret.Blocks, &new) - } - } - - return ret -} - -func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { - if len(got) == 0 { - return nil - } - ret := make(hcl.Attributes, len(got)) - for name, gotAttr := range got { - new := *gotAttr // shallow copy - new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) - ret[name] = &new - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go deleted file mode 100644 index 192339295e..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go +++ /dev/null @@ -1,209 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// WalkVariables begins the recursive process of walking all expressions and -// nested blocks in the given body and its child bodies while taking into -// account any "dynamic" blocks. -// -// This function requires that the caller walk through the nested block -// structure in the given body level-by-level so that an appropriate schema -// can be provided at each level to inform further processing. This workflow -// is thus easiest to use for calling applications that have some higher-level -// schema representation available with which to drive this multi-step -// process. If your application uses the hcldec package, you may be able to -// use VariablesHCLDec instead for a more automatic approach. -func WalkVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - includeContent: true, - } -} - -// WalkExpandVariables is like Variables but it includes only the variables -// required for successful block expansion, ignoring any variables referenced -// inside block contents. The result is the minimal set of all variables -// required for a call to Expand, excluding variables that would only be -// needed to subsequently call Content or PartialContent on the expanded -// body. -func WalkExpandVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - } -} - -type WalkVariablesNode struct { - body hcl.Body - it *iteration - - includeContent bool -} - -type WalkVariablesChild struct { - BlockTypeName string - Node WalkVariablesNode -} - -// Body returns the HCL Body associated with the child node, in case the caller -// wants to do some sort of inspection of it in order to decide what schema -// to pass to Visit. -// -// Most implementations should just fetch a fixed schema based on the -// BlockTypeName field and not access this. Deciding on a schema dynamically -// based on the body is a strange thing to do and generally necessary only if -// your caller is already doing other bizarre things with HCL bodies. -func (c WalkVariablesChild) Body() hcl.Body { - return c.Node.body -} - -// Visit returns the variable traversals required for any "dynamic" blocks -// directly in the body associated with this node, and also returns any child -// nodes that must be visited in order to continue the walk. -// -// Each child node has its associated block type name given in its BlockTypeName -// field, which the calling application should use to determine the appropriate -// schema for the content of each child node and pass it to the child node's -// own Visit method to continue the walk recursively. -func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { - extSchema := n.extendSchema(schema) - container, _, _ := n.body.PartialContent(extSchema) - if container == nil { - return vars, children - } - - children = make([]WalkVariablesChild, 0, len(container.Blocks)) - - if n.includeContent { - for _, attr := range container.Attributes { - for _, traversal := range attr.Expr.Variables() { - var ours, inherited bool - if n.it != nil { - ours = traversal.RootName() == n.it.IteratorName - _, inherited = n.it.Inherited[traversal.RootName()] - } - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - } - - for _, block := range container.Blocks { - switch block.Type { - - case "dynamic": - blockTypeName := block.Labels[0] - inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) - if inner == nil { - continue - } - - iteratorName := blockTypeName - if attr, exists := inner.Attributes["iterator"]; exists { - iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) - if len(iterTraversal) == 0 { - // Ignore this invalid dynamic block, since it'll produce - // an error if someone tries to extract content from it - // later anyway. - continue - } - iteratorName = iterTraversal.RootName() - } - blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) - - if attr, exists := inner.Attributes["for_each"]; exists { - // Filter out iterator names inherited from parent blocks - for _, traversal := range attr.Expr.Variables() { - if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { - vars = append(vars, traversal) - } - } - } - if attr, exists := inner.Attributes["labels"]; exists { - // Filter out both our own iterator name _and_ those inherited - // from parent blocks, since we provide _both_ of these to the - // label expressions. - for _, traversal := range attr.Expr.Variables() { - ours := traversal.RootName() == iteratorName - _, inherited := blockIt.Inherited[traversal.RootName()] - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - - for _, contentBlock := range inner.Blocks { - // We only request "content" blocks in our schema, so we know - // any blocks we find here will be content blocks. We require - // exactly one content block for actual expansion, but we'll - // be more liberal here so that callers can still collect - // variables from erroneous "dynamic" blocks. - children = append(children, WalkVariablesChild{ - BlockTypeName: blockTypeName, - Node: WalkVariablesNode{ - body: contentBlock.Body, - it: blockIt, - includeContent: n.includeContent, - }, - }) - } - - default: - children = append(children, WalkVariablesChild{ - BlockTypeName: block.Type, - Node: WalkVariablesNode{ - body: block.Body, - it: n.it, - includeContent: n.includeContent, - }, - }) - - } - } - - return vars, children -} - -func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - return extSchema -} - -// This is a more relaxed schema than what's in schema.go, since we -// want to maximize the amount of variables we can find even if there -// are erroneous blocks. -var variableDetectionInnerSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: false, - }, - { - Name: "labels", - Required: false, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go deleted file mode 100644 index 907ef3eba7..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go +++ /dev/null @@ -1,43 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" -) - -// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec -// specification to automatically drive the recursive walk through nested -// blocks in the given body. -// -// This is a drop-in replacement for hcldec.Variables which is able to treat -// blocks of type "dynamic" in the same special way that dynblock.Expand would, -// exposing both the variables referenced in the "for_each" and "labels" -// arguments and variables used in the nested "content" block. -func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the -// minimal set of variables required to call Expand, ignoring variables that -// are referenced only inside normal block contents. See WalkExpandVariables -// for more information. -func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkExpandVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { - vars, children := node.Visit(hcldec.ImpliedSchema(spec)) - - if len(children) > 0 { - childSpecs := hcldec.ChildBlockTypes(spec) - for _, child := range children { - if childSpec, exists := childSpecs[child.BlockTypeName]; exists { - vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) - } - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md deleted file mode 100644 index 058f1e3d84..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md +++ /dev/null @@ -1,135 +0,0 @@ -# HCL Type Expressions Extension - -This HCL extension defines a convention for describing HCL types using function -call and variable reference syntax, allowing configuration formats to include -type information provided by users. - -The type syntax is processed statically from a hcl.Expression, so it cannot -use any of the usual language operators. This is similar to type expressions -in statically-typed programming languages. - -```hcl -variable "example" { - type = list(string) -} -``` - -The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` -functions, and so it relies on the underlying syntax to define how "keyword" -and "call" are interpreted. The above shows how they are interpreted in -the HCL native syntax, while the following shows the same information -expressed in JSON: - -```json -{ - "variable": { - "example": { - "type": "list(string)" - } - } -} -``` - -Notice that since we have additional contextual information that we intend -to allow only calls and keywords the JSON syntax is able to parse the given -string directly as an expression, rather than as a template as would be -the case for normal expression evaluation. - -For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr). - -## Type Expression Syntax - -When expressed in the native syntax, the following expressions are permitted -in a type expression: - -* `string` - string -* `bool` - boolean -* `number` - number -* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) -* `list()` - list of the type given as an argument -* `set()` - set of the type given as an argument -* `map()` - map of the type given as an argument -* `tuple([])` - tuple with the element types given in the single list argument -* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument - -For example: - -* `list(string)` -* `object({name=string,age=number})` -* `map(object({name=string,age=number}))` - -Note that the object constructor syntax is not fully-general for all possible -object types because it requires the attribute names to be valid identifiers. -In practice it is expected that any time an object type is being fixed for -type checking it will be one that has identifiers as its attributes; object -types with weird attributes generally show up only from arbitrary object -constructors in configuration files, which are usually treated either as maps -or as the dynamic pseudo-type. - -## Type Constraints as Values - -Along with defining a convention for writing down types using HCL expression -constructs, this package also includes a mechanism for representing types as -values that can be used as data within an HCL-based language. - -`typeexpr.TypeConstraintType` is a -[`cty` capsule type](https://github.com/zclconf/go-cty/blob/master/docs/types.md#capsule-types) -that encapsulates `cty.Type` values. You can construct such a value directly -using the `TypeConstraintVal` function: - -```go -tyVal := typeexpr.TypeConstraintVal(cty.String) - -// We can unpack the type from a value using TypeConstraintFromVal -ty := typeExpr.TypeConstraintFromVal(tyVal) -``` - -However, the primary purpose of `typeexpr.TypeConstraintType` is to be -specified as the type constraint for an argument, in which case it serves -as a signal for HCL to treat the argument expression as a type constraint -expression as defined above, rather than as a normal value expression. - -"An argument" in the above in practice means the following two locations: - -* As the type constraint for a parameter of a cty function that will be - used in an `hcl.EvalContext`. In that case, function calls in the HCL - native expression syntax will require the argument to be valid type constraint - expression syntax and the function implementation will receive a - `TypeConstraintType` value as the argument value for that parameter. - -* As the type constraint for a `hcldec.AttrSpec` or `hcldec.BlockAttrsSpec` - when decoding an HCL body using `hcldec`. In that case, the attributes - with that type constraint will be required to be valid type constraint - expression syntax and the result will be a `TypeConstraintType` value. - -Note that the special handling of these arguments means that an argument -marked in this way must use the type constraint syntax directly. It is not -valid to pass in a value of `TypeConstraintType` that has been obtained -dynamically via some other expression result. - -`TypeConstraintType` is provided with the intent of using it internally within -application code when incorporating type constraint expression syntax into -an HCL-based language, not to be used for dynamic "programming with types". A -calling application could support programming with types by defining its _own_ -capsule type, but that is not the purpose of `TypeConstraintType`. - -## The "convert" `cty` Function - -Building on the `TypeConstraintType` described in the previous section, this -package also provides `typeexpr.ConvertFunc` which is a cty function that -can be placed into a `cty.EvalContext` (conventionally named "convert") in -order to provide a general type conversion function in an HCL-based language: - -```hcl - foo = convert("true", bool) -``` - -The second parameter uses the mechanism described in the previous section to -require its argument to be a type constraint expression rather than a value -expression. In doing so, it allows converting with any type constraint that -can be expressed in this package's type constraint syntax. In the above example, -the `foo` argument would receive a boolean true, or `cty.True` in `cty` terms. - -The target type constraint must always be provided statically using inline -type constraint syntax. There is no way to _dynamically_ select a type -constraint using this function. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go deleted file mode 100644 index c4b379579d..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package typeexpr extends HCL with a convention for describing HCL types -// within configuration files. -// -// The type syntax is processed statically from a hcl.Expression, so it cannot -// use any of the usual language operators. This is similar to type expressions -// in statically-typed programming languages. -// -// variable "example" { -// type = list(string) -// } -package typeexpr diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go deleted file mode 100644 index 11b0689798..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go +++ /dev/null @@ -1,196 +0,0 @@ -package typeexpr - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -const invalidTypeSummary = "Invalid type specification" - -// getType is the internal implementation of both Type and TypeConstraint, -// using the passed flag to distinguish. When constraint is false, the "any" -// keyword will produce an error. -func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { - // First we'll try for one of our keywords - kw := hcl.ExprAsKeyword(expr) - switch kw { - case "bool": - return cty.Bool, nil - case "string": - return cty.String, nil - case "number": - return cty.Number, nil - case "any": - if constraint { - return cty.DynamicPseudoType, nil - } - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), - Subject: expr.Range().Ptr(), - }} - case "list", "map", "set": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), - Subject: expr.Range().Ptr(), - }} - case "object": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: expr.Range().Ptr(), - }} - case "tuple": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: expr.Range().Ptr(), - }} - case "": - // okay! we'll fall through and try processing as a call, then. - default: - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), - Subject: expr.Range().Ptr(), - }} - } - - // If we get down here then our expression isn't just a keyword, so we'll - // try to process it as a call instead. - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", - Subject: expr.Range().Ptr(), - }} - } - - switch call.Name { - case "bool", "string", "number", "any": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), - Subject: &call.ArgsRange, - }} - } - - if len(call.Arguments) != 1 { - contextRange := call.ArgsRange - subjectRange := call.ArgsRange - if len(call.Arguments) > 1 { - // If we have too many arguments (as opposed to too _few_) then - // we'll highlight the extraneous arguments as the diagnostic - // subject. - subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) - } - - switch call.Name { - case "list", "set", "map": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), - Subject: &subjectRange, - Context: &contextRange, - }} - case "object": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: &subjectRange, - Context: &contextRange, - }} - case "tuple": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: &subjectRange, - Context: &contextRange, - }} - } - } - - switch call.Name { - - case "list": - ety, diags := getType(call.Arguments[0], constraint) - return cty.List(ety), diags - case "set": - ety, diags := getType(call.Arguments[0], constraint) - return cty.Set(ety), diags - case "map": - ety, diags := getType(call.Arguments[0], constraint) - return cty.Map(ety), diags - case "object": - attrDefs, diags := hcl.ExprMap(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - - atys := make(map[string]cty.Type) - for _, attrDef := range attrDefs { - attrName := hcl.ExprAsKeyword(attrDef.Key) - if attrName == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object constructor map keys must be attribute names.", - Subject: attrDef.Key.Range().Ptr(), - Context: expr.Range().Ptr(), - }) - continue - } - aty, attrDiags := getType(attrDef.Value, constraint) - diags = append(diags, attrDiags...) - atys[attrName] = aty - } - return cty.Object(atys), diags - case "tuple": - elemDefs, diags := hcl.ExprList(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Tuple type constructor requires a list of element types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - etys := make([]cty.Type, len(elemDefs)) - for i, defExpr := range elemDefs { - ety, elemDiags := getType(defExpr, constraint) - diags = append(diags, elemDiags...) - etys[i] = ety - } - return cty.Tuple(etys), diags - default: - // Can't access call.Arguments in this path because we've not validated - // that it contains exactly one expression here. - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), - Subject: expr.Range().Ptr(), - }} - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go deleted file mode 100644 index 3b8f618fbc..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go +++ /dev/null @@ -1,129 +0,0 @@ -package typeexpr - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Type attempts to process the given expression as a type expression and, if -// successful, returns the resulting type. If unsuccessful, error diagnostics -// are returned. -func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - return getType(expr, false) -} - -// TypeConstraint attempts to parse the given expression as a type constraint -// and, if successful, returns the resulting type. If unsuccessful, error -// diagnostics are returned. -// -// A type constraint has the same structure as a type, but it additionally -// allows the keyword "any" to represent cty.DynamicPseudoType, which is often -// used as a wildcard in type checking and type conversion operations. -func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - return getType(expr, true) -} - -// TypeString returns a string rendering of the given type as it would be -// expected to appear in the HCL native syntax. -// -// This is primarily intended for showing types to the user in an application -// that uses typexpr, where the user can be assumed to be familiar with the -// type expression syntax. In applications that do not use typeexpr these -// results may be confusing to the user and so type.FriendlyName may be -// preferable, even though it's less precise. -// -// TypeString produces reasonable results only for types like what would be -// produced by the Type and TypeConstraint functions. In particular, it cannot -// support capsule types. -func TypeString(ty cty.Type) string { - // Easy cases first - switch ty { - case cty.String: - return "string" - case cty.Bool: - return "bool" - case cty.Number: - return "number" - case cty.DynamicPseudoType: - return "any" - } - - if ty.IsCapsuleType() { - panic("TypeString does not support capsule types") - } - - if ty.IsCollectionType() { - ety := ty.ElementType() - etyString := TypeString(ety) - switch { - case ty.IsListType(): - return fmt.Sprintf("list(%s)", etyString) - case ty.IsSetType(): - return fmt.Sprintf("set(%s)", etyString) - case ty.IsMapType(): - return fmt.Sprintf("map(%s)", etyString) - default: - // Should never happen because the above is exhaustive - panic("unsupported collection type") - } - } - - if ty.IsObjectType() { - var buf bytes.Buffer - buf.WriteString("object({") - atys := ty.AttributeTypes() - names := make([]string, 0, len(atys)) - for name := range atys { - names = append(names, name) - } - sort.Strings(names) - first := true - for _, name := range names { - aty := atys[name] - if !first { - buf.WriteByte(',') - } - if !hclsyntax.ValidIdentifier(name) { - // Should never happen for any type produced by this package, - // but we'll do something reasonable here just so we don't - // produce garbage if someone gives us a hand-assembled object - // type that has weird attribute names. - // Using Go-style quoting here isn't perfect, since it doesn't - // exactly match HCL syntax, but it's fine for an edge-case. - buf.WriteString(fmt.Sprintf("%q", name)) - } else { - buf.WriteString(name) - } - buf.WriteByte('=') - buf.WriteString(TypeString(aty)) - first = false - } - buf.WriteString("})") - return buf.String() - } - - if ty.IsTupleType() { - var buf bytes.Buffer - buf.WriteString("tuple([") - etys := ty.TupleElementTypes() - first := true - for _, ety := range etys { - if !first { - buf.WriteByte(',') - } - buf.WriteString(TypeString(ety)) - first = false - } - buf.WriteString("])") - return buf.String() - } - - // Should never happen because we covered all cases above. - panic(fmt.Errorf("unsupported type %#v", ty)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go deleted file mode 100644 index 5462d82c3c..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go +++ /dev/null @@ -1,118 +0,0 @@ -package typeexpr - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// TypeConstraintType is a cty capsule type that allows cty type constraints to -// be used as values. -// -// If TypeConstraintType is used in a context supporting the -// customdecode.CustomExpressionDecoder extension then it will implement -// expression decoding using the TypeConstraint function, thus allowing -// type expressions to be used in contexts where value expressions might -// normally be expected, such as in arguments to function calls. -var TypeConstraintType cty.Type - -// TypeConstraintVal constructs a cty.Value whose type is -// TypeConstraintType. -func TypeConstraintVal(ty cty.Type) cty.Value { - return cty.CapsuleVal(TypeConstraintType, &ty) -} - -// TypeConstraintFromVal extracts the type from a cty.Value of -// TypeConstraintType that was previously constructed using TypeConstraintVal. -// -// If the given value isn't a known, non-null value of TypeConstraintType -// then this function will panic. -func TypeConstraintFromVal(v cty.Value) cty.Type { - if !v.Type().Equals(TypeConstraintType) { - panic("value is not of TypeConstraintType") - } - ptr := v.EncapsulatedValue().(*cty.Type) - return *ptr -} - -// ConvertFunc is a cty function that implements type conversions. -// -// Its signature is as follows: -// convert(value, type_constraint) -// -// ...where type_constraint is a type constraint expression as defined by -// typeexpr.TypeConstraint. -// -// It relies on HCL's customdecode extension and so it's not suitable for use -// in non-HCL contexts or if you are using a HCL syntax implementation that -// does not support customdecode for function arguments. However, it _is_ -// supported for function calls in the HCL native expression syntax. -var ConvertFunc function.Function - -func init() { - TypeConstraintType = cty.CapsuleWithOps("type constraint", reflect.TypeOf(cty.Type{}), &cty.CapsuleOps{ - ExtensionData: func(key interface{}) interface{} { - switch key { - case customdecode.CustomExpressionDecoder: - return customdecode.CustomExpressionDecoderFunc( - func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - ty, diags := TypeConstraint(expr) - if diags.HasErrors() { - return cty.NilVal, diags - } - return TypeConstraintVal(ty), nil - }, - ) - default: - return nil - } - }, - TypeGoString: func(_ reflect.Type) string { - return "typeexpr.TypeConstraintType" - }, - GoString: func(raw interface{}) string { - tyPtr := raw.(*cty.Type) - return fmt.Sprintf("typeexpr.TypeConstraintVal(%#v)", *tyPtr) - }, - RawEquals: func(a, b interface{}) bool { - aPtr := a.(*cty.Type) - bPtr := b.(*cty.Type) - return (*aPtr).Equals(*bPtr) - }, - }) - - ConvertFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - { - Name: "type", - Type: TypeConstraintType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - wantTypePtr := args[1].EncapsulatedValue().(*cty.Type) - got, err := convert.Convert(args[0], *wantTypePtr) - if err != nil { - return cty.NilType, function.NewArgError(0, err) - } - return got.Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - v, err := convert.Convert(args[0], retType) - if err != nil { - return cty.NilVal, function.NewArgError(0, err) - } - return v, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go deleted file mode 100644 index 302a3f4630..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go +++ /dev/null @@ -1,338 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// DecodeBody extracts the configuration within the given body into the given -// value. This value must be a non-nil pointer to either a struct or -// a map, where in the former case the configuration will be decoded using -// struct tags and in the latter case only attributes are allowed and their -// values are decoded into the map. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - rv := reflect.ValueOf(val) - if rv.Kind() != reflect.Ptr { - panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) - } - - return decodeBodyToValue(body, ctx, rv.Elem()) -} - -func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - et := val.Type() - switch et.Kind() { - case reflect.Struct: - return decodeBodyToStruct(body, ctx, val) - case reflect.Map: - return decodeBodyToMap(body, ctx, val) - default: - panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) - } -} - -func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - schema, partial := ImpliedBodySchema(val.Interface()) - - var content *hcl.BodyContent - var leftovers hcl.Body - var diags hcl.Diagnostics - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - if content == nil { - return diags - } - - tags := getFieldTags(val.Type()) - - if tags.Body != nil { - fieldIdx := *tags.Body - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - switch { - case bodyType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(body)) - - default: - diags = append(diags, decodeBodyToValue(body, ctx, fieldV)...) - } - } - - if tags.Remain != nil { - fieldIdx := *tags.Remain - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - switch { - case bodyType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(leftovers)) - case attrsType.AssignableTo(field.Type): - attrs, attrsDiags := leftovers.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - fieldV.Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) - } - } - - for name, fieldIdx := range tags.Attributes { - attr := content.Attributes[name] - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - - if attr == nil { - if !exprType.AssignableTo(field.Type) { - continue - } - - // As a special case, if the target is of type hcl.Expression then - // we'll assign an actual expression that evalues to a cty null, - // so the caller can deal with it within the cty realm rather - // than within the Go realm. - synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) - fieldV.Set(reflect.ValueOf(synthExpr)) - continue - } - - switch { - case attrType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr)) - case exprType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr.Expr)) - default: - diags = append(diags, DecodeExpression( - attr.Expr, ctx, fieldV.Addr().Interface(), - )...) - } - } - - blocksByType := content.Blocks.ByType() - - for typeName, fieldIdx := range tags.Blocks { - blocks := blocksByType[typeName] - field := val.Type().Field(fieldIdx) - - ty := field.Type - isSlice := false - isPtr := false - if ty.Kind() == reflect.Slice { - isSlice = true - ty = ty.Elem() - } - if ty.Kind() == reflect.Ptr { - isPtr = true - ty = ty.Elem() - } - - if len(blocks) > 1 && !isSlice { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", typeName), - Detail: fmt.Sprintf( - "Only one %s block is allowed. Another was defined at %s.", - typeName, blocks[0].DefRange.String(), - ), - Subject: &blocks[1].DefRange, - }) - continue - } - - if len(blocks) == 0 { - if isSlice || isPtr { - if val.Field(fieldIdx).IsNil() { - val.Field(fieldIdx).Set(reflect.Zero(field.Type)) - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", typeName), - Detail: fmt.Sprintf("A %s block is required.", typeName), - Subject: body.MissingItemRange().Ptr(), - }) - } - continue - } - - switch { - - case isSlice: - elemType := ty - if isPtr { - elemType = reflect.PtrTo(ty) - } - sli := val.Field(fieldIdx) - if sli.IsNil() { - sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) - } - - for i, block := range blocks { - if isPtr { - if i >= sli.Len() { - sli = reflect.Append(sli, reflect.New(ty)) - } - v := sli.Index(i) - if v.IsNil() { - v = reflect.New(ty) - } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - sli.Index(i).Set(v) - } else { - if i >= sli.Len() { - sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty))) - } - diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) - } - } - - if sli.Len() > len(blocks) { - sli.SetLen(len(blocks)) - } - - val.Field(fieldIdx).Set(sli) - - default: - block := blocks[0] - if isPtr { - v := val.Field(fieldIdx) - if v.IsNil() { - v = reflect.New(ty) - } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - val.Field(fieldIdx).Set(v) - } else { - diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) - } - - } - - } - - return diags -} - -func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - attrs, diags := body.JustAttributes() - if attrs == nil { - return diags - } - - mv := reflect.MakeMap(v.Type()) - - for k, attr := range attrs { - switch { - case attrType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) - case exprType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) - default: - ev := reflect.New(v.Type().Elem()) - diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) - mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) - } - } - - v.Set(mv) - - return diags -} - -func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - var diags hcl.Diagnostics - - ty := v.Type() - - switch { - case blockType.AssignableTo(ty): - v.Elem().Set(reflect.ValueOf(block)) - case bodyType.AssignableTo(ty): - v.Elem().Set(reflect.ValueOf(block.Body)) - case attrsType.AssignableTo(ty): - attrs, attrsDiags := block.Body.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - v.Elem().Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) - - if len(block.Labels) > 0 { - blockTags := getFieldTags(ty) - for li, lv := range block.Labels { - lfieldIdx := blockTags.Labels[li].FieldIndex - v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) - } - } - - } - - return diags -} - -// DecodeExpression extracts the value of the given expression into the given -// value. This value must be something that gocty is able to decode into, -// since the final decoding is delegated to that package. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - srcVal, diags := expr.Value(ctx) - - convTy, err := gocty.ImpliedType(val) - if err != nil { - panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) - } - - srcVal, err = convert.Convert(srcVal, convTy) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - return diags - } - - err = gocty.FromCtyValue(srcVal, val) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go deleted file mode 100644 index 419efb03de..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go +++ /dev/null @@ -1,64 +0,0 @@ -// Package gohcl allows decoding HCL configurations into Go data structures. -// -// It provides a convenient and concise way of describing the schema for -// configuration and then accessing the resulting data via native Go -// types. -// -// A struct field tag scheme is used, similar to other decoding and -// unmarshalling libraries. The tags are formatted as in the following example: -// -// ThingType string `hcl:"thing_type,attr"` -// -// Within each tag there are two comma-separated tokens. The first is the -// name of the corresponding construct in configuration, while the second -// is a keyword giving the kind of construct expected. The following -// kind keywords are supported: -// -// attr (the default) indicates that the value is to be populated from an attribute -// block indicates that the value is to populated from a block -// label indicates that the value is to populated from a block label -// optional is the same as attr, but the field is optional -// remain indicates that the value is to be populated from the remaining body after populating other fields -// -// "attr" fields may either be of type *hcl.Expression, in which case the raw -// expression is assigned, or of any type accepted by gocty, in which case -// gocty will be used to assign the value to a native Go type. -// -// "block" fields may be of type *hcl.Block or hcl.Body, in which case the -// corresponding raw value is assigned, or may be a struct that recursively -// uses the same tags. Block fields may also be slices of any of these types, -// in which case multiple blocks of the corresponding type are decoded into -// the slice. -// -// "body" can be placed on a single field of type hcl.Body to capture -// the full hcl.Body that was decoded for a block. This does not allow leftover -// values like "remain", so a decoding error will still be returned if leftover -// fields are given. If you want to capture the decoding body PLUS leftover -// fields, you must specify a "remain" field as well to prevent errors. The -// body field and the remain field will both contain the leftover fields. -// -// "label" fields are considered only in a struct used as the type of a field -// marked as "block", and are used sequentially to capture the labels of -// the blocks being decoded. In this case, the name token is used only as -// an identifier for the label in diagnostic messages. -// -// "optional" fields behave like "attr" fields, but they are optional -// and will not give parsing errors if they are missing. -// -// "remain" can be placed on a single field that may be either of type -// hcl.Body or hcl.Attributes, in which case any remaining body content is -// placed into this field for delayed processing. If no "remain" field is -// present then any attributes or blocks not matched by another valid tag -// will cause an error diagnostic. -// -// Only a subset of this tagging/typing vocabulary is supported for the -// "Encode" family of functions. See the EncodeIntoBody docs for full details -// on the constraints there. -// -// Broadly-speaking this package deals with two types of error. The first is -// errors in the configuration itself, which are returned as diagnostics -// written with the configuration author as the target audience. The second -// is bugs in the calling program, such as invalid struct tags, which are -// surfaced via panics since there can be no useful runtime handling of such -// errors and they should certainly not be returned to the user as diagnostics. -package gohcl diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go deleted file mode 100644 index d612e09c9f..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go +++ /dev/null @@ -1,191 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - - "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EncodeIntoBody replaces the contents of the given hclwrite Body with -// attributes and blocks derived from the given value, which must be a -// struct value or a pointer to a struct value with the struct tags defined -// in this package. -// -// This function can work only with fully-decoded data. It will ignore any -// fields tagged as "remain", any fields that decode attributes into either -// hcl.Attribute or hcl.Expression values, and any fields that decode blocks -// into hcl.Attributes values. This function does not have enough information -// to complete the decoding of these types. -// -// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock -// to produce a whole hclwrite.Block including block labels. -// -// As long as a suitable value is given to encode and the destination body -// is non-nil, this function will always complete. It will panic in case of -// any errors in the calling program, such as passing an inappropriate type -// or a nil body. -// -// The layout of the resulting HCL source is derived from the ordering of -// the struct fields, with blank lines around nested blocks of different types. -// Fields representing attributes should usually precede those representing -// blocks so that the attributes can group togather in the result. For more -// control, use the hclwrite API directly. -func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - populateBody(rv, ty, tags, dst) -} - -// EncodeAsBlock creates a new hclwrite.Block populated with the data from -// the given value, which must be a struct or pointer to struct with the -// struct tags defined in this package. -// -// If the given struct type has fields tagged with "label" tags then they -// will be used in order to annotate the created block with labels. -// -// This function has the same constraints as EncodeIntoBody and will panic -// if they are violated. -func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - labels := make([]string, len(tags.Labels)) - for i, lf := range tags.Labels { - lv := rv.Field(lf.FieldIndex) - // We just stringify whatever we find. It should always be a string - // but if not then we'll still do something reasonable. - labels[i] = fmt.Sprintf("%s", lv.Interface()) - } - - block := hclwrite.NewBlock(blockType, labels) - populateBody(rv, ty, tags, block.Body()) - return block -} - -func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { - nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) - namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) - for n, i := range tags.Attributes { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - for n, i := range tags.Blocks { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - sort.SliceStable(namesOrder, func(i, j int) bool { - ni, nj := namesOrder[i], namesOrder[j] - return nameIdxs[ni] < nameIdxs[nj] - }) - - dst.Clear() - - prevWasBlock := false - for _, name := range namesOrder { - fieldIdx := nameIdxs[name] - field := ty.Field(fieldIdx) - fieldTy := field.Type - fieldVal := rv.Field(fieldIdx) - - if fieldTy.Kind() == reflect.Ptr { - fieldTy = fieldTy.Elem() - fieldVal = fieldVal.Elem() - } - - if _, isAttr := tags.Attributes[name]; isAttr { - - if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { - continue // ignore undecoded fields - } - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - if prevWasBlock { - dst.AppendNewline() - prevWasBlock = false - } - - valTy, err := gocty.ImpliedType(fieldVal.Interface()) - if err != nil { - panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) - } - - val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) - if err != nil { - // This should never happen, since we should always be able - // to decode into the implied type. - panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) - } - - dst.SetAttributeValue(name, val) - - } else { // must be a block, then - elemTy := fieldTy - isSeq := false - if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { - isSeq = true - elemTy = elemTy.Elem() - } - - if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { - continue // ignore undecoded fields - } - prevWasBlock = false - - if isSeq { - l := fieldVal.Len() - for i := 0; i < l; i++ { - elemVal := fieldVal.Index(i) - if !elemVal.IsValid() { - continue // ignore (elem value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(elemVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } else { - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(fieldVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go deleted file mode 100644 index df21cc493b..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go +++ /dev/null @@ -1,181 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the -// given value, which must be a struct value or a pointer to one. If an -// inappropriate value is passed, this function will panic. -// -// The second return argument indicates whether the given struct includes -// a "remain" field, and thus the returned schema is non-exhaustive. -// -// This uses the tags on the fields of the struct to discover how each -// field's value should be expressed within configuration. If an invalid -// mapping is attempted, this function will panic. -func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { - ty := reflect.TypeOf(val) - - if ty.Kind() == reflect.Ptr { - ty = ty.Elem() - } - - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("given value must be struct, not %T", val)) - } - - var attrSchemas []hcl.AttributeSchema - var blockSchemas []hcl.BlockHeaderSchema - - tags := getFieldTags(ty) - - attrNames := make([]string, 0, len(tags.Attributes)) - for n := range tags.Attributes { - attrNames = append(attrNames, n) - } - sort.Strings(attrNames) - for _, n := range attrNames { - idx := tags.Attributes[n] - optional := tags.Optional[n] - field := ty.Field(idx) - - var required bool - - switch { - case field.Type.AssignableTo(exprType): - // If we're decoding to hcl.Expression then absense can be - // indicated via a null value, so we don't specify that - // the field is required during decoding. - required = false - case field.Type.Kind() != reflect.Ptr && !optional: - required = true - default: - required = false - } - - attrSchemas = append(attrSchemas, hcl.AttributeSchema{ - Name: n, - Required: required, - }) - } - - blockNames := make([]string, 0, len(tags.Blocks)) - for n := range tags.Blocks { - blockNames = append(blockNames, n) - } - sort.Strings(blockNames) - for _, n := range blockNames { - idx := tags.Blocks[n] - field := ty.Field(idx) - fty := field.Type - if fty.Kind() == reflect.Slice { - fty = fty.Elem() - } - if fty.Kind() == reflect.Ptr { - fty = fty.Elem() - } - if fty.Kind() != reflect.Struct { - panic(fmt.Sprintf( - "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, - )) - } - ftags := getFieldTags(fty) - var labelNames []string - if len(ftags.Labels) > 0 { - labelNames = make([]string, len(ftags.Labels)) - for i, l := range ftags.Labels { - labelNames[i] = l.Name - } - } - - blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ - Type: n, - LabelNames: labelNames, - }) - } - - partial = tags.Remain != nil - schema = &hcl.BodySchema{ - Attributes: attrSchemas, - Blocks: blockSchemas, - } - return schema, partial -} - -type fieldTags struct { - Attributes map[string]int - Blocks map[string]int - Labels []labelField - Remain *int - Body *int - Optional map[string]bool -} - -type labelField struct { - FieldIndex int - Name string -} - -func getFieldTags(ty reflect.Type) *fieldTags { - ret := &fieldTags{ - Attributes: map[string]int{}, - Blocks: map[string]int{}, - Optional: map[string]bool{}, - } - - ct := ty.NumField() - for i := 0; i < ct; i++ { - field := ty.Field(i) - tag := field.Tag.Get("hcl") - if tag == "" { - continue - } - - comma := strings.Index(tag, ",") - var name, kind string - if comma != -1 { - name = tag[:comma] - kind = tag[comma+1:] - } else { - name = tag - kind = "attr" - } - - switch kind { - case "attr": - ret.Attributes[name] = i - case "block": - ret.Blocks[name] = i - case "label": - ret.Labels = append(ret.Labels, labelField{ - FieldIndex: i, - Name: name, - }) - case "remain": - if ret.Remain != nil { - panic("only one 'remain' tag is permitted") - } - idx := i // copy, because this loop will continue assigning to i - ret.Remain = &idx - case "body": - if ret.Body != nil { - panic("only one 'body' tag is permitted") - } - idx := i // copy, because this loop will continue assigning to i - ret.Body = &idx - case "optional": - ret.Attributes[name] = i - ret.Optional[name] = true - default: - panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) - } - } - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go deleted file mode 100644 index a8d00f8ff2..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go +++ /dev/null @@ -1,16 +0,0 @@ -package gohcl - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" -) - -var victimExpr hcl.Expression -var victimBody hcl.Body - -var exprType = reflect.TypeOf(&victimExpr).Elem() -var bodyType = reflect.TypeOf(&victimBody).Elem() -var blockType = reflect.TypeOf((*hcl.Block)(nil)) -var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) -var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go deleted file mode 100644 index 71de451934..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go +++ /dev/null @@ -1,21 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -type blockLabel struct { - Value string - Range hcl.Range -} - -func labelsForBlock(block *hcl.Block) []blockLabel { - ret := make([]blockLabel, len(block.Labels)) - for i := range block.Labels { - ret[i] = blockLabel{ - Value: block.Labels[i], - Range: block.LabelRanges[i], - } - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go deleted file mode 100644 index c6e42236d8..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { - schema := ImpliedSchema(spec) - - var content *hcl.BodyContent - var diags hcl.Diagnostics - var leftovers hcl.Body - - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - - val, valDiags := spec.decode(content, blockLabels, ctx) - diags = append(diags, valDiags...) - - return val, leftovers, diags -} - -func impliedType(spec Spec) cty.Type { - return spec.impliedType() -} - -func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - return spec.sourceRange(content, blockLabels) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go deleted file mode 100644 index 23bfe542b2..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package hcldec provides a higher-level API for unpacking the content of -// HCL bodies, implemented in terms of the low-level "Content" API exposed -// by the bodies themselves. -// -// It allows decoding an entire nested configuration in a single operation -// by providing a description of the intended structure. -// -// For some applications it may be more convenient to use the "gohcl" -// package, which has a similar purpose but decodes directly into native -// Go data types. hcldec instead targets the cty type system, and thus allows -// a cty-driven application to remain within that type system. -package hcldec diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go deleted file mode 100644 index e2027cfd2d..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go +++ /dev/null @@ -1,23 +0,0 @@ -package hcldec - -import ( - "encoding/gob" -) - -func init() { - // Every Spec implementation should be registered with gob, so that - // specs can be sent over gob channels, such as using - // github.com/hashicorp/go-plugin with plugins that need to describe - // what shape of configuration they are expecting. - gob.Register(ObjectSpec(nil)) - gob.Register(TupleSpec(nil)) - gob.Register((*AttrSpec)(nil)) - gob.Register((*LiteralSpec)(nil)) - gob.Register((*ExprSpec)(nil)) - gob.Register((*BlockSpec)(nil)) - gob.Register((*BlockListSpec)(nil)) - gob.Register((*BlockSetSpec)(nil)) - gob.Register((*BlockMapSpec)(nil)) - gob.Register((*BlockLabelSpec)(nil)) - gob.Register((*DefaultSpec)(nil)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go deleted file mode 100644 index 1fa548d0c3..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go +++ /dev/null @@ -1,81 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Decode interprets the given body using the given specification and returns -// the resulting value. If the given body is not valid per the spec, error -// diagnostics are returned and the returned value is likely to be incomplete. -// -// The ctx argument may be nil, in which case any references to variables or -// functions will produce error diagnostics. -func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, _, diags := decode(body, nil, ctx, spec, false) - return val, diags -} - -// PartialDecode is like Decode except that it permits "leftover" items in -// the top-level body, which are returned as a new body to allow for -// further processing. -// -// Any descendent block bodies are _not_ decoded partially and thus must -// be fully described by the given specification. -func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { - return decode(body, nil, ctx, spec, true) -} - -// ImpliedType returns the value type that should result from decoding the -// given spec. -func ImpliedType(spec Spec) cty.Type { - return impliedType(spec) -} - -// SourceRange interprets the given body using the given specification and -// then returns the source range of the value that would be used to -// fulfill the spec. -// -// This can be used if application-level validation detects value errors, to -// obtain a reasonable SourceRange to use for generated diagnostics. It works -// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) -// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will -// be less useful the broader the specification, so e.g. a spec that returns -// the entirety of all of the blocks of a given type is likely to be -// _particularly_ arbitrary and useless. -// -// If the given body is not valid per the given spec, the result is best-effort -// and may not actually be something ideal. It's expected that an application -// will already have used Decode or PartialDecode earlier and thus had an -// opportunity to detect and report spec violations. -func SourceRange(body hcl.Body, spec Spec) hcl.Range { - return sourceRange(body, nil, spec) -} - -// ChildBlockTypes returns a map of all of the child block types declared -// by the given spec, with block type names as keys and the associated -// nested body specs as values. -func ChildBlockTypes(spec Spec) map[string]Spec { - ret := map[string]Spec{} - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if bs, ok := s.(blockSpec); ok { - for _, blockS := range bs.blockHeaderSchemata() { - nested := bs.nestedSpec() - if nested != nil { // nil can be returned to dynamically opt out of this interface - ret[blockS.Type] = nested - } - } - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go deleted file mode 100644 index ddbe7fa4ab..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. -// This is the schema that the Decode function will use internally to -// access the content of a given body. -func ImpliedSchema(spec Spec) *hcl.BodySchema { - var attrs []hcl.AttributeSchema - var blocks []hcl.BlockHeaderSchema - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if as, ok := s.(attrSpec); ok { - attrs = append(attrs, as.attrSchemata()...) - } - - if bs, ok := s.(blockSpec); ok { - blocks = append(blocks, bs.blockHeaderSchemata()...) - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return &hcl.BodySchema{ - Attributes: attrs, - Blocks: blocks, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go deleted file mode 100644 index b3cb1f84d9..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go +++ /dev/null @@ -1,1637 +0,0 @@ -package hcldec - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// A Spec is a description of how to decode a hcl.Body to a cty.Value. -// -// The various other types in this package whose names end in "Spec" are -// the spec implementations. The most common top-level spec is ObjectSpec, -// which decodes body content into a cty.Value of an object type. -type Spec interface { - // Perform the decode operation on the given body, in the context of - // the given block (which might be null), using the given eval context. - // - // "block" is provided only by the nested calls performed by the spec - // types that work on block bodies. - decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) - - // Return the cty.Type that should be returned when decoding a body with - // this spec. - impliedType() cty.Type - - // Call the given callback once for each of the nested specs that would - // get decoded with the same body and block as the receiver. This should - // not descend into the nested specs used when decoding blocks. - visitSameBodyChildren(cb visitFunc) - - // Determine the source range of the value that would be returned for the - // spec in the given content, in the context of the given block - // (which might be null). If the corresponding item is missing, return - // a place where it might be inserted. - sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range -} - -type visitFunc func(spec Spec) - -// An ObjectSpec is a Spec that produces a cty.Value of an object type whose -// attributes correspond to the keys of the spec map. -type ObjectSpec map[string]Spec - -// attrSpec is implemented by specs that require attributes from the body. -type attrSpec interface { - attrSchemata() []hcl.AttributeSchema -} - -// blockSpec is implemented by specs that require blocks from the body. -type blockSpec interface { - blockHeaderSchemata() []hcl.BlockHeaderSchema - nestedSpec() Spec -} - -// specNeedingVariables is implemented by specs that can use variables -// from the EvalContext, to declare which variables they need. -type specNeedingVariables interface { - variablesNeeded(content *hcl.BodyContent) []hcl.Traversal -} - -func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make(map[string]cty.Value, len(s)) - var diags hcl.Diagnostics - - for k, spec := range s { - var kd hcl.Diagnostics - vals[k], kd = spec.decode(content, blockLabels, ctx) - diags = append(diags, kd...) - } - - return cty.ObjectVal(vals), diags -} - -func (s ObjectSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyObject - } - - attrTypes := make(map[string]cty.Type) - for k, childSpec := range s { - attrTypes[k] = childSpec.impliedType() - } - return cty.Object(attrTypes) -} - -func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose -// elements correspond to the elements of the spec slice. -type TupleSpec []Spec - -func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make([]cty.Value, len(s)) - var diags hcl.Diagnostics - - for i, spec := range s { - var ed hcl.Diagnostics - vals[i], ed = spec.decode(content, blockLabels, ctx) - diags = append(diags, ed...) - } - - return cty.TupleVal(vals), diags -} - -func (s TupleSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyTuple - } - - attrTypes := make([]cty.Type, len(s)) - for i, childSpec := range s { - attrTypes[i] = childSpec.impliedType() - } - return cty.Tuple(attrTypes) -} - -func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// An AttrSpec is a Spec that evaluates a particular attribute expression in -// the body and returns its resulting value converted to the requested type, -// or produces a diagnostic if the type is incorrect. -type AttrSpec struct { - Name string - Type cty.Type - Required bool -} - -func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - attr, exists := content.Attributes[s.Name] - if !exists { - return nil - } - - return attr.Expr.Variables() -} - -// attrSpec implementation -func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { - return []hcl.AttributeSchema{ - { - Name: s.Name, - Required: s.Required, - }, - } -} - -func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - attr, exists := content.Attributes[s.Name] - if !exists { - return content.MissingItemRange - } - - return attr.Expr.Range() -} - -func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - attr, exists := content.Attributes[s.Name] - if !exists { - // We don't need to check required and emit a diagnostic here, because - // that would already have happened when building "content". - return cty.NullVal(s.Type), nil - } - - if decodeFn := customdecode.CustomExpressionDecoderForType(s.Type); decodeFn != nil { - v, diags := decodeFn(attr.Expr, ctx) - if v == cty.NilVal { - v = cty.UnknownVal(s.Type) - } - return v, diags - } - - val, diags := attr.Expr.Value(ctx) - - convVal, err := convert.Convert(val, s.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect attribute value type", - Detail: fmt.Sprintf( - "Inappropriate value for attribute %q: %s.", - s.Name, err.Error(), - ), - Subject: attr.Expr.Range().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.Range()).Ptr(), - Expression: attr.Expr, - EvalContext: ctx, - }) - // We'll return an unknown value of the _correct_ type so that the - // incomplete result can still be used for some analysis use-cases. - val = cty.UnknownVal(s.Type) - } else { - val = convVal - } - - return val, diags -} - -func (s *AttrSpec) impliedType() cty.Type { - return s.Type -} - -// A LiteralSpec is a Spec that produces the given literal value, ignoring -// the given body. -type LiteralSpec struct { - Value cty.Value -} - -func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Value, nil -} - -func (s *LiteralSpec) impliedType() cty.Type { - return s.Value.Type() -} - -func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No sensible range to return for a literal, so the caller had better - // ensure it doesn't cause any diagnostics. - return hcl.Range{ - Filename: "", - } -} - -// An ExprSpec is a Spec that evaluates the given expression, ignoring the -// given body. -type ExprSpec struct { - Expr hcl.Expression -} - -func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - return s.Expr.Variables() -} - -func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Expr.Value(ctx) -} - -func (s *ExprSpec) impliedType() cty.Type { - // We can't know the type of our expression until we evaluate it - return cty.DynamicPseudoType -} - -func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - return s.Expr.Range() -} - -// A BlockSpec is a Spec that produces a cty.Value by decoding the contents -// of a single nested block of a given type, using a nested spec. -// -// If the Required flag is not set, the nested block may be omitted, in which -// case a null value is produced. If it _is_ set, an error diagnostic is -// produced if there are no nested blocks of the given type. -type BlockSpec struct { - TypeName string - Nested Spec - Required bool -} - -func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return nil - } - - return Variables(childBlock.Body, s.Nested) -} - -func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - if childBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, childBlock.DefRange.String(), - ), - Subject: &candidate.DefRange, - }) - break - } - - childBlock = candidate - } - - if childBlock == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(s.Nested.impliedType()), diags - } - - if s.Nested == nil { - panic("BlockSpec with no Nested Spec") - } - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - return val, diags -} - -func (s *BlockSpec) impliedType() cty.Type { - return s.Nested.impliedType() -} - -func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockListSpec is a Spec that produces a cty list of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockListSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockListSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.ListValEmpty(s.Nested.impliedType()) - } else { - // Since our target is a list, all of the decoded elements must have the - // same type or cty.ListVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - ret = cty.ListVal(elems) - } - - return ret, diags -} - -func (s *BlockListSpec) impliedType() cty.Type { - return cty.List(s.Nested.impliedType()) -} - -func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockTupleSpec is a Spec that produces a cty tuple of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// This is similar to BlockListSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockTupleSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockTupleSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.EmptyTupleVal - } else { - ret = cty.TupleVal(elems) - } - - return ret, diags -} - -func (s *BlockTupleSpec) impliedType() cty.Type { - // We can't predict our type, because we don't know how many blocks - // there will be until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockSetSpec is a Spec that produces a cty set of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockSetSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSetSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockSetSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.SetValEmpty(s.Nested.impliedType()) - } else { - // Since our target is a set, all of the decoded elements must have the - // same type or cty.SetVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - ret = cty.SetVal(elems) - } - - return ret, diags -} - -func (s *BlockSetSpec) impliedType() cty.Type { - return cty.Set(s.Nested.impliedType()) -} - -func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockMapSpec is a Spec that produces a cty map of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of map structure is created for each of the given label names. -// There must be at least one given label name. -type BlockMapSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockMapSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockMapSpec with no Nested Spec") - } - if ImpliedType(s).HasDynamicTypes() { - panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.MapValEmpty(s.Nested.impliedType()), diags - } - - var ctyMap func(map[string]interface{}, int) cty.Value - ctyMap = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyMap(v.(map[string]interface{}), depth-1) - } - } - return cty.MapVal(vals) - } - - return ctyMap(elems, len(s.LabelNames)), diags -} - -func (s *BlockMapSpec) impliedType() cty.Type { - ret := s.Nested.impliedType() - for _ = range s.LabelNames { - ret = cty.Map(ret) - } - return ret -} - -func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockObjectSpec is a Spec that produces a cty object of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of object structure is created for each of the given label names. -// There must be at least one given label name. -// -// This is similar to BlockMapSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockObjectSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockObjectSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockObjectSpec with no Nested Spec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.EmptyObjectVal, diags - } - - var ctyObj func(map[string]interface{}, int) cty.Value - ctyObj = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyObj(v.(map[string]interface{}), depth-1) - } - } - return cty.ObjectVal(vals) - } - - return ctyObj(elems, len(s.LabelNames)), diags -} - -func (s *BlockObjectSpec) impliedType() cty.Type { - // We can't predict our type, since we don't know how many blocks are - // present and what labels they have until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockAttrsSpec is a Spec that interprets a single block as if it were -// a map of some element type. That is, each attribute within the block -// becomes a key in the resulting map and the attribute's value becomes the -// element value, after conversion to the given element type. The resulting -// value is a cty.Map of the given element type. -// -// This spec imposes a validation constraint that there be exactly one block -// of the given type name and that this block may contain only attributes. The -// block does not accept any labels. -// -// This is an alternative to an AttrSpec of a map type for situations where -// block syntax is desired. Note that block syntax does not permit dynamic -// keys, construction of the result via a "for" expression, etc. In most cases -// an AttrSpec is preferred if the desired result is a map whose keys are -// chosen by the user rather than by schema. -type BlockAttrsSpec struct { - TypeName string - ElementType cty.Type - Required bool -} - -func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// blockSpec implementation -func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: nil, - }, - } -} - -// blockSpec implementation -func (s *BlockAttrsSpec) nestedSpec() Spec { - // This is an odd case: we aren't actually going to apply a nested spec - // in this case, since we're going to interpret the body directly as - // attributes, but we need to return something non-nil so that the - // decoder will recognize this as a block spec. We won't actually be - // using this for anything at decode time. - return noopSpec{} -} - -// specNeedingVariables implementation -func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - - block, _ := s.findBlock(content) - if block == nil { - return nil - } - - var vars []hcl.Traversal - - attrs, diags := block.Body.JustAttributes() - if diags.HasErrors() { - return nil - } - - for _, attr := range attrs { - vars = append(vars, attr.Expr.Variables()...) - } - - // We'll return the variables references in source order so that any - // error messages that result are also in source order. - sort.Slice(vars, func(i, j int) bool { - return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte - }) - - return vars -} - -func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - block, other := s.findBlock(content) - if block == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(cty.Map(s.ElementType)), diags - } - if other != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, block.DefRange.String(), - ), - Subject: &other.DefRange, - }) - } - - attrs, attrDiags := block.Body.JustAttributes() - diags = append(diags, attrDiags...) - - if len(attrs) == 0 { - return cty.MapValEmpty(s.ElementType), diags - } - - vals := make(map[string]cty.Value, len(attrs)) - for name, attr := range attrs { - if decodeFn := customdecode.CustomExpressionDecoderForType(s.ElementType); decodeFn != nil { - attrVal, attrDiags := decodeFn(attr.Expr, ctx) - diags = append(diags, attrDiags...) - if attrVal == cty.NilVal { - attrVal = cty.UnknownVal(s.ElementType) - } - vals[name] = attrVal - continue - } - - attrVal, attrDiags := attr.Expr.Value(ctx) - diags = append(diags, attrDiags...) - - attrVal, err := convert.Convert(attrVal, s.ElementType) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid attribute value", - Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), - Subject: attr.Expr.Range().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.Range()).Ptr(), - Expression: attr.Expr, - EvalContext: ctx, - }) - attrVal = cty.UnknownVal(s.ElementType) - } - - vals[name] = attrVal - } - - return cty.MapVal(vals), diags -} - -func (s *BlockAttrsSpec) impliedType() cty.Type { - return cty.Map(s.ElementType) -} - -func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - block, _ := s.findBlock(content) - if block == nil { - return content.MissingItemRange - } - return block.DefRange -} - -func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - if block != nil { - return block, candidate - } - block = candidate - } - - return block, nil -} - -// A BlockLabelSpec is a Spec that returns a cty.String representing the -// label of the block its given body belongs to, if indeed its given body -// belongs to a block. It is a programming error to use this in a non-block -// context, so this spec will panic in that case. -// -// This spec only works in the nested spec within a BlockSpec, BlockListSpec, -// BlockSetSpec or BlockMapSpec. -// -// The full set of label specs used against a particular block must have a -// consecutive set of indices starting at zero. The maximum index found -// defines how many labels the corresponding blocks must have in cty source. -type BlockLabelSpec struct { - Index int - Name string -} - -func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return cty.StringVal(blockLabels[s.Index].Value), nil -} - -func (s *BlockLabelSpec) impliedType() cty.Type { - return cty.String // labels are always strings -} - -func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return blockLabels[s.Index].Range -} - -func findLabelSpecs(spec Spec) []string { - maxIdx := -1 - var names map[int]string - - var visit visitFunc - visit = func(s Spec) { - if ls, ok := s.(*BlockLabelSpec); ok { - if maxIdx < ls.Index { - maxIdx = ls.Index - } - if names == nil { - names = make(map[int]string) - } - names[ls.Index] = ls.Name - } - s.visitSameBodyChildren(visit) - } - - visit(spec) - - if maxIdx < 0 { - return nil // no labels at all - } - - ret := make([]string, maxIdx+1) - for i := range ret { - name := names[i] - if name == "" { - // Should never happen if the spec is conformant, since we require - // consecutive indices starting at zero. - name = fmt.Sprintf("missing%02d", i) - } - ret[i] = name - } - - return ret -} - -// DefaultSpec is a spec that wraps two specs, evaluating the primary first -// and then evaluating the default if the primary returns a null value. -// -// The two specifications must have the same implied result type for correct -// operation. If not, the result is undefined. -// -// Any requirements imposed by the "Default" spec apply even if "Primary" does -// not return null. For example, if the "Default" spec is for a required -// attribute then that attribute is always required, regardless of the result -// of the "Primary" spec. -// -// The "Default" spec must not describe a nested block, since otherwise the -// result of ChildBlockTypes would not be decidable without evaluation. If -// the default spec _does_ describe a nested block then the result is -// undefined. -type DefaultSpec struct { - Primary Spec - Default Spec -} - -func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Primary) - cb(s.Default) -} - -func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, diags := s.Primary.decode(content, blockLabels, ctx) - if val.IsNull() { - var moreDiags hcl.Diagnostics - val, moreDiags = s.Default.decode(content, blockLabels, ctx) - diags = append(diags, moreDiags...) - } - return val, diags -} - -func (s *DefaultSpec) impliedType() cty.Type { - return s.Primary.impliedType() -} - -// attrSpec implementation -func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { - // We must pass through the union of both of our nested specs so that - // we'll have both values available in the result. - var ret []hcl.AttributeSchema - if as, ok := s.Primary.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - if as, ok := s.Default.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - return ret -} - -// blockSpec implementation -func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - // Only the primary spec may describe a block, since otherwise - // our nestedSpec method below can't know which to return. - if bs, ok := s.Primary.(blockSpec); ok { - return bs.blockHeaderSchemata() - } - return nil -} - -// blockSpec implementation -func (s *DefaultSpec) nestedSpec() Spec { - if bs, ok := s.Primary.(blockSpec); ok { - return bs.nestedSpec() - } - return nil -} - -func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We can't tell from here which of the two specs will ultimately be used - // in our result, so we'll just assume the first. This is usually the right - // choice because the default is often a literal spec that doesn't have a - // reasonable source range to return anyway. - return s.Primary.sourceRange(content, blockLabels) -} - -// TransformExprSpec is a spec that wraps another and then evaluates a given -// hcl.Expression on the result. -// -// The implied type of this spec is determined by evaluating the expression -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -type TransformExprSpec struct { - Wrapped Spec - Expr hcl.Expression - TransformCtx *hcl.EvalContext - VarName string -} - -func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: wrappedVal, - } - resultVal, resultDiags := s.Expr.Value(chiCtx) - diags = append(diags, resultDiags...) - return resultVal, diags -} - -func (s *TransformExprSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: cty.UnknownVal(wrappedTy), - } - resultVal, _ := s.Expr.Value(chiCtx) - return resultVal.Type() -} - -func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// TransformFuncSpec is a spec that wraps another and then evaluates a given -// cty function with the result. The given function must expect exactly one -// argument, where the result of the wrapped spec will be passed. -// -// The implied type of this spec is determined by type-checking the function -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -// -// If the given function produces an error when run, this spec will produce -// a non-user-actionable diagnostic message. It's the caller's responsibility -// to ensure that the given function cannot fail for any non-error result -// of the wrapped spec. -type TransformFuncSpec struct { - Wrapped Spec - Func function.Function -} - -func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) - if err != nil { - // This is not a good example of a diagnostic because it is reporting - // a programming error in the calling application, rather than something - // an end-user could act on. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Transform function failed", - Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), - Subject: s.sourceRange(content, blockLabels).Ptr(), - }) - return cty.UnknownVal(s.impliedType()), diags - } - - return resultVal, diags -} - -func (s *TransformFuncSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) - if err != nil { - // Should never happen with a correctly-configured spec - return cty.DynamicPseudoType - } - - return resultTy -} - -func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// ValidateFuncSpec is a spec that allows for extended -// developer-defined validation. The validation function receives the -// result of the wrapped spec. -// -// The Subject field of the returned Diagnostic is optional. If not -// specified, it is automatically populated with the range covered by -// the wrapped spec. -// -type ValidateSpec struct { - Wrapped Spec - Func func(value cty.Value) hcl.Diagnostics -} - -func (s *ValidateSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *ValidateSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - validateDiags := s.Func(wrappedVal) - // Auto-populate the Subject fields if they weren't set. - for i := range validateDiags { - if validateDiags[i].Subject == nil { - validateDiags[i].Subject = s.sourceRange(content, blockLabels).Ptr() - } - } - - diags = append(diags, validateDiags...) - return wrappedVal, diags -} - -func (s *ValidateSpec) impliedType() cty.Type { - return s.Wrapped.impliedType() -} - -func (s *ValidateSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - return s.Wrapped.sourceRange(content, blockLabels) -} - -// noopSpec is a placeholder spec that does nothing, used in situations where -// a non-nil placeholder spec is required. It is not exported because there is -// no reason to use it directly; it is always an implementation detail only. -type noopSpec struct { -} - -func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return cty.NullVal(cty.DynamicPseudoType), nil -} - -func (s noopSpec) impliedType() cty.Type { - return cty.DynamicPseudoType -} - -func (s noopSpec) visitSameBodyChildren(cb visitFunc) { - // nothing to do -} - -func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No useful range for a noopSpec, and nobody should be calling this anyway. - return hcl.Range{ - Filename: "noopSpec", - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go deleted file mode 100644 index f8440eb602..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Variables processes the given body with the given spec and returns a -// list of the variable traversals that would be required to decode -// the same pairing of body and spec. -// -// This can be used to conditionally populate the variables in the EvalContext -// passed to Decode, for applications where a static scope is insufficient. -// -// If the given body is not compliant with the given schema, the result may -// be incomplete, but that's assumed to be okay because the eventual call -// to Decode will produce error diagnostics anyway. -func Variables(body hcl.Body, spec Spec) []hcl.Traversal { - var vars []hcl.Traversal - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - if vs, ok := spec.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - - var visitFn visitFunc - visitFn = func(s Spec) { - if vs, ok := s.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - s.visitSameBodyChildren(visitFn) - } - spec.visitSameBodyChildren(visitFn) - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go deleted file mode 100644 index 1a8014480c..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package hcled provides functionality intended to help an application -// that embeds HCL to deliver relevant information to a text editor or IDE -// for navigating around and analyzing configuration files. -package hcled diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go deleted file mode 100644 index 050ad758f6..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go +++ /dev/null @@ -1,34 +0,0 @@ -package hcled - -import ( - "github.com/hashicorp/hcl/v2" -) - -type contextStringer interface { - ContextString(offset int) string -} - -// ContextString returns a string describing the context of the given byte -// offset, if available. An empty string is returned if no such information -// is available, or otherwise the returned string is in a form that depends -// on the language used to write the referenced file. -func ContextString(file *hcl.File, offset int) string { - if cser, ok := file.Nav.(contextStringer); ok { - return cser.ContextString(offset) - } - return "" -} - -type contextDefRanger interface { - ContextDefRange(offset int) hcl.Range -} - -func ContextDefRange(file *hcl.File, offset int) hcl.Range { - if cser, ok := file.Nav.(contextDefRanger); ok { - defRange := cser.ContextDefRange(offset) - if !defRange.Empty() { - return defRange - } - } - return file.Body.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go deleted file mode 100644 index 1dc2eccd87..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go +++ /dev/null @@ -1,135 +0,0 @@ -// Package hclparse has the main API entry point for parsing both HCL native -// syntax and HCL JSON. -// -// The main HCL package also includes SimpleParse and SimpleParseFile which -// can be a simpler interface for the common case where an application just -// needs to parse a single file. The gohcl package simplifies that further -// in its SimpleDecode function, which combines hcl.SimpleParse with decoding -// into Go struct values -// -// Package hclparse, then, is useful for applications that require more fine -// control over parsing or which need to load many separate files and keep -// track of them for possible error reporting or other analysis. -package hclparse - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2/json" -) - -// NOTE: This is the public interface for parsing. The actual parsers are -// in other packages alongside this one, with this package just wrapping them -// to provide a unified interface for the caller across all supported formats. - -// Parser is the main interface for parsing configuration files. As well as -// parsing files, a parser also retains a registry of all of the files it -// has parsed so that multiple attempts to parse the same file will return -// the same object and so the collected files can be used when printing -// diagnostics. -// -// Any diagnostics for parsing a file are only returned once on the first -// call to parse that file. Callers are expected to collect up diagnostics -// and present them together, so returning diagnostics for the same file -// multiple times would create a confusing result. -type Parser struct { - files map[string]*hcl.File -} - -// NewParser creates a new parser, ready to parse configuration files. -func NewParser() *Parser { - return &Parser{ - files: map[string]*hcl.File{}, - } -} - -// ParseHCL parses the given buffer (which is assumed to have been loaded from -// the given filename) as a native-syntax configuration file and returns the -// hcl.File object representing it. -func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) - p.files[filename] = file - return file, diags -} - -// ParseHCLFile reads the given filename and parses it as a native-syntax HCL -// configuration file. An error diagnostic is returned if the given file -// cannot be read. -func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - src, err := ioutil.ReadFile(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), - }, - } - } - - return p.ParseHCL(src, filename) -} - -// ParseJSON parses the given JSON buffer (which is assumed to have been loaded -// from the given filename) and returns the hcl.File object representing it. -func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.Parse(src, filename) - p.files[filename] = file - return file, diags -} - -// ParseJSONFile reads the given filename and parses it as JSON, similarly to -// ParseJSON. An error diagnostic is returned if the given file cannot be read. -func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.ParseFile(filename) - p.files[filename] = file - return file, diags -} - -// AddFile allows a caller to record in a parser a file that was parsed some -// other way, thus allowing it to be included in the registry of sources. -func (p *Parser) AddFile(filename string, file *hcl.File) { - p.files[filename] = file -} - -// Sources returns a map from filenames to the raw source code that was -// read from them. This is intended to be used, for example, to print -// diagnostics with contextual information. -// -// The arrays underlying the returned slices should not be modified. -func (p *Parser) Sources() map[string][]byte { - ret := make(map[string][]byte) - for fn, f := range p.files { - ret[fn] = f.Bytes - } - return ret -} - -// Files returns a map from filenames to the File objects produced from them. -// This is intended to be used, for example, to print diagnostics with -// contextual information. -// -// The returned map and all of the objects it refers to directly or indirectly -// must not be modified. -func (p *Parser) Files() map[string]*hcl.File { - return p.files -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go index 8c20286b27..43689d7409 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go index ccc1c0ae2c..5b0e468102 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go index 617bc29dc2..defe3dbb78 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package hclsyntax contains the parser, AST, etc for HCL's native language, // as opposed to the JSON variant. // diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 63f2e88ec7..e0de1c3dd4 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( "fmt" + "sort" "sync" "github.com/hashicorp/hcl/v2" @@ -25,7 +29,7 @@ type Expression interface { } // Assert that Expression implements hcl.Expression -var assertExprImplExpr hcl.Expression = Expression(nil) +var _ hcl.Expression = Expression(nil) // ParenthesesExpr represents an expression written in grouping // parentheses. @@ -269,6 +273,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } } + diagExtra := functionCallDiagExtra{ + calledFunctionName: e.Name, + } + params := f.Params() varParam := f.VarParam() @@ -296,6 +304,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -310,6 +319,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -341,6 +351,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: expandExpr, EvalContext: ctx, + Extra: &diagExtra, }) return cty.DynamicVal, diags } @@ -364,6 +375,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }, } } @@ -381,6 +393,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }, } } @@ -425,6 +438,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: argExpr, EvalContext: ctx, + Extra: &diagExtra, }) } } @@ -441,6 +455,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti resultVal, err := f.Call(argVals) if err != nil { + // For errors in the underlying call itself we also return the raw + // call error via an extra method on our "diagnostic extra" value. + diagExtra.functionCallError = err + switch terr := err.(type) { case function.ArgError: i := terr.Index @@ -451,22 +469,57 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti param = varParam } - // this can happen if an argument is (incorrectly) null. - if i > len(e.Args)-1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid function argument", - Detail: fmt.Sprintf( - "Invalid value for %q parameter: %s.", - param.Name, err, - ), - Subject: args[len(params)].StartRange().Ptr(), - Context: e.Range().Ptr(), - Expression: e, - EvalContext: ctx, - }) + if param == nil || i > len(args)-1 { + // Getting here means that the function we called has a bug: + // it returned an arg error that refers to an argument index + // that wasn't present in the call. For that situation + // we'll degrade to a less specific error just to give + // some sort of answer, but best to still fix the buggy + // function so that it only returns argument indices that + // are in range. + switch { + case param != nil: + // In this case we'll assume that the function was trying + // to talk about a final variadic parameter but the caller + // didn't actually provide any arguments for it. That means + // we can at least still name the parameter in the + // error message, but our source range will be the call + // as a whole because we don't have an argument expression + // to highlight specifically. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + Extra: &diagExtra, + }) + default: + // This is the most degenerate case of all, where the + // index is out of range even for the declared parameters, + // and so we can't tell which parameter the function is + // trying to report an error for. Just a generic error + // report in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + Extra: &diagExtra, + }) + } } else { - argExpr := e.Args[i] + argExpr := args[i] // TODO: we should also unpick a PathError here and show the // path to the deep value where the error was detected. @@ -481,6 +534,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: argExpr, EvalContext: ctx, + Extra: &diagExtra, }) } @@ -496,6 +550,7 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti Context: e.Range().Ptr(), Expression: e, EvalContext: ctx, + Extra: &diagExtra, }) } @@ -528,6 +583,39 @@ func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { return ret } +// FunctionCallDiagExtra is an interface implemented by the value in the "Extra" +// field of some diagnostics returned by FunctionCallExpr.Value, giving +// cooperating callers access to some machine-readable information about the +// call that a diagnostic relates to. +type FunctionCallDiagExtra interface { + // CalledFunctionName returns the name of the function being called at + // the time the diagnostic was generated, if any. Returns an empty string + // if there is no known called function. + CalledFunctionName() string + + // FunctionCallError returns the error value returned by the implementation + // of the function being called, if any. Returns nil if the diagnostic was + // not returned in response to a call error. + // + // Some errors related to calling functions are generated by HCL itself + // rather than by the underlying function, in which case this method + // will return nil. + FunctionCallError() error +} + +type functionCallDiagExtra struct { + calledFunctionName string + functionCallError error +} + +func (e *functionCallDiagExtra) CalledFunctionName() string { + return e.calledFunctionName +} + +func (e *functionCallDiagExtra) FunctionCallError() error { + return e.functionCallError +} + type ConditionalExpr struct { Condition Expression TrueResult Expression @@ -582,12 +670,8 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic Severity: hcl.DiagError, Summary: "Inconsistent conditional result types", Detail: fmt.Sprintf( - // FIXME: Need a helper function for showing natural-language type diffs, - // since this will generate some useless messages in some cases, like - // "These expressions are object and object respectively" if the - // object types don't exactly match. - "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", - trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), + "The true and false result expressions must have consistent types. %s.", + describeConditionalTypeMismatch(trueResult.Type(), falseResult.Type()), ), Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), Context: &e.SrcRange, @@ -612,14 +696,101 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic return cty.UnknownVal(resultType), diags } if !condResult.IsKnown() { - return cty.UnknownVal(resultType), diags + // we use the unmarked values throughout the unknown branch + _, condResultMarks := condResult.Unmark() + trueResult, trueResultMarks := trueResult.Unmark() + falseResult, falseResultMarks := falseResult.Unmark() + + // use a value to merge marks + _, resMarks := cty.DynamicVal.WithMarks(condResultMarks, trueResultMarks, falseResultMarks).Unmark() + + trueRange := trueResult.Range() + falseRange := falseResult.Range() + + // if both branches are known to be null, then the result must still be null + if trueResult.IsNull() && falseResult.IsNull() { + return cty.NullVal(resultType).WithMarks(resMarks), diags + } + + // We might be able to offer a refined range for the result based on + // the two possible outcomes. + if trueResult.Type() == cty.Number && falseResult.Type() == cty.Number { + ref := cty.UnknownVal(cty.Number).Refine() + if trueRange.DefinitelyNotNull() && falseRange.DefinitelyNotNull() { + ref = ref.NotNull() + } + + falseLo, falseLoInc := falseRange.NumberLowerBound() + falseHi, falseHiInc := falseRange.NumberUpperBound() + trueLo, trueLoInc := trueRange.NumberLowerBound() + trueHi, trueHiInc := trueRange.NumberUpperBound() + + if falseLo.IsKnown() && trueLo.IsKnown() { + lo, loInc := falseLo, falseLoInc + switch { + case trueLo.LessThan(falseLo).True(): + lo, loInc = trueLo, trueLoInc + case trueLo.Equals(falseLo).True(): + loInc = trueLoInc || falseLoInc + } + + ref = ref.NumberRangeLowerBound(lo, loInc) + } + + if falseHi.IsKnown() && trueHi.IsKnown() { + hi, hiInc := falseHi, falseHiInc + switch { + case trueHi.GreaterThan(falseHi).True(): + hi, hiInc = trueHi, trueHiInc + case trueHi.Equals(falseHi).True(): + hiInc = trueHiInc || falseHiInc + } + ref = ref.NumberRangeUpperBound(hi, hiInc) + } + + return ref.NewValue().WithMarks(resMarks), diags + } + + if trueResult.Type().IsCollectionType() && falseResult.Type().IsCollectionType() { + if trueResult.Type().Equals(falseResult.Type()) { + ref := cty.UnknownVal(resultType).Refine() + if trueRange.DefinitelyNotNull() && falseRange.DefinitelyNotNull() { + ref = ref.NotNull() + } + + falseLo := falseRange.LengthLowerBound() + falseHi := falseRange.LengthUpperBound() + trueLo := trueRange.LengthLowerBound() + trueHi := trueRange.LengthUpperBound() + + lo := falseLo + if trueLo < falseLo { + lo = trueLo + } + + hi := falseHi + if trueHi > falseHi { + hi = trueHi + } + + ref = ref.CollectionLengthLowerBound(lo).CollectionLengthUpperBound(hi) + return ref.NewValue().WithMarks(resMarks), diags + } + } + + ret := cty.UnknownVal(resultType) + if trueRange.DefinitelyNotNull() && falseRange.DefinitelyNotNull() { + ret = ret.RefineNotNull() + } + return ret.WithMarks(resMarks), diags } + condResult, err := convert.Convert(condResult, cty.Bool) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Incorrect condition type", - Detail: fmt.Sprintf("The condition expression must be of type bool."), + Detail: "The condition expression must be of type bool.", Subject: e.Condition.Range().Ptr(), Context: &e.SrcRange, Expression: e.Condition, @@ -679,6 +850,144 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic } } +// describeConditionalTypeMismatch makes a best effort to describe the +// difference between types in the true and false arms of a conditional +// expression in a way that would be useful to someone trying to understand +// why their conditional expression isn't valid. +// +// NOTE: This function is only designed to deal with situations +// where trueTy and falseTy are different. Calling it with two equal +// types will produce a nonsense result. This function also only really +// deals with situations that type unification can't resolve, so we should +// call this function only after trying type unification first. +func describeConditionalTypeMismatch(trueTy, falseTy cty.Type) string { + // The main tricky cases here are when both trueTy and falseTy are + // of the same structural type kind, such as both being object types + // or both being tuple types. In that case the "FriendlyName" method + // returns only "object" or "tuple" and so we need to do some more + // work to describe what's different inside them. + + switch { + case trueTy.IsObjectType() && falseTy.IsObjectType(): + // We'll first gather up the attribute names and sort them. In the + // event that there are multiple attributes that disagree across + // the two types, we'll prefer to report the one that sorts lexically + // least just so that our error message is consistent between + // evaluations. + var trueAttrs, falseAttrs []string + for name := range trueTy.AttributeTypes() { + trueAttrs = append(trueAttrs, name) + } + sort.Strings(trueAttrs) + for name := range falseTy.AttributeTypes() { + falseAttrs = append(falseAttrs, name) + } + sort.Strings(falseAttrs) + + for _, name := range trueAttrs { + if !falseTy.HasAttribute(name) { + return fmt.Sprintf("The 'true' value includes object attribute %q, which is absent in the 'false' value", name) + } + trueAty := trueTy.AttributeType(name) + falseAty := falseTy.AttributeType(name) + if !trueAty.Equals(falseAty) { + // For deeply-nested differences this will likely get very + // clunky quickly by nesting these messages inside one another, + // but we'll accept that for now in the interests of producing + // _some_ useful feedback, even if it isn't as concise as + // we'd prefer it to be. Deeply-nested structures in + // conditionals are thankfully not super common. + return fmt.Sprintf( + "Type mismatch for object attribute %q: %s", + name, describeConditionalTypeMismatch(trueAty, falseAty), + ) + } + } + for _, name := range falseAttrs { + if !trueTy.HasAttribute(name) { + return fmt.Sprintf("The 'false' value includes object attribute %q, which is absent in the 'true' value", name) + } + // NOTE: We don't need to check the attribute types again, because + // any attribute that both types have in common would already have + // been checked in the previous loop. + } + case trueTy.IsTupleType() && falseTy.IsTupleType(): + trueEtys := trueTy.TupleElementTypes() + falseEtys := falseTy.TupleElementTypes() + + if trueCount, falseCount := len(trueEtys), len(falseEtys); trueCount != falseCount { + return fmt.Sprintf("The 'true' tuple has length %d, but the 'false' tuple has length %d", trueCount, falseCount) + } + + // NOTE: Thanks to the condition above, we know that both tuples are + // of the same length and so they must have some differing types + // instead. + for i := range trueEtys { + trueEty := trueEtys[i] + falseEty := falseEtys[i] + + if !trueEty.Equals(falseEty) { + // For deeply-nested differences this will likely get very + // clunky quickly by nesting these messages inside one another, + // but we'll accept that for now in the interests of producing + // _some_ useful feedback, even if it isn't as concise as + // we'd prefer it to be. Deeply-nested structures in + // conditionals are thankfully not super common. + return fmt.Sprintf( + "Type mismatch for tuple element %d: %s", + i, describeConditionalTypeMismatch(trueEty, falseEty), + ) + } + } + case trueTy.IsCollectionType() && falseTy.IsCollectionType(): + // For this case we're specifically interested in the situation where: + // - both collections are of the same kind, AND + // - the element types of both are either object or tuple types. + // This is just to avoid writing a useless statement like + // "The 'true' value is list of object, but the 'false' value is list of object". + // This still doesn't account for more awkward cases like collections + // of collections of structural types, but we won't let perfect be + // the enemy of the good. + trueEty := trueTy.ElementType() + falseEty := falseTy.ElementType() + if (trueTy.IsListType() && falseTy.IsListType()) || (trueTy.IsMapType() && falseTy.IsMapType()) || (trueTy.IsSetType() && falseTy.IsSetType()) { + if (trueEty.IsObjectType() && falseEty.IsObjectType()) || (trueEty.IsTupleType() && falseEty.IsTupleType()) { + noun := "collection" + switch { // NOTE: We now know that trueTy and falseTy have the same collection kind + case trueTy.IsListType(): + noun = "list" + case trueTy.IsSetType(): + noun = "set" + case trueTy.IsMapType(): + noun = "map" + } + return fmt.Sprintf( + "Mismatched %s element types: %s", + noun, describeConditionalTypeMismatch(trueEty, falseEty), + ) + } + } + } + + // If we don't manage any more specialized message, we'll just report + // what the two types are. + trueName := trueTy.FriendlyName() + falseName := falseTy.FriendlyName() + if trueName == falseName { + // Absolute last resort for when we have no special rule above but + // we have two types with the same friendly name anyway. This is + // the most vague of all possible messages but is reserved for + // particularly awkward cases, like lists of lists of differing tuple + // types. + return "At least one deeply-nested attribute or element is not compatible across both the 'true' and the 'false' value" + } + return fmt.Sprintf( + "The 'true' value is %s, but the 'false' value is %s", + trueTy.FriendlyName(), falseTy.FriendlyName(), + ) + +} + func (e *ConditionalExpr) Range() hcl.Range { return e.SrcRange } @@ -966,9 +1275,9 @@ func (e *ObjectConsKeyExpr) UnwrapExpression() Expression { // ForExpr represents iteration constructs: // -// tuple = [for i, v in list: upper(v) if i > 2] -// object = {for k, v in map: k => upper(v)} -// object_of_tuples = {for v in list: v.key: v...} +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} type ForExpr struct { KeyVar string // empty if ignoring the key ValVar string @@ -1399,7 +1708,24 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } + upgradedUnknown := false if autoUpgrade { + // If we're upgrading an unknown value to a tuple/list, the result + // cannot be known. Otherwise a tuple containing an unknown value will + // upgrade to a different number of elements depending on whether + // sourceVal becomes null or not. + // We record this condition here so we can process any remaining + // expression after the * to verify the result of the traversal. For + // example, it is valid to use a splat on a single object to retrieve a + // list of a single attribute, but we still need to check if that + // attribute actually exists. + if !sourceVal.IsKnown() { + sourceRng := sourceVal.Range() + if sourceRng.CouldBeNull() { + upgradedUnknown = true + } + } + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) sourceTy = sourceVal.Type() } @@ -1440,7 +1766,21 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { // checking to proceed. ty, tyDiags := resultTy() diags = append(diags, tyDiags...) - return cty.UnknownVal(ty), diags + ret := cty.UnknownVal(ty) + if ty != cty.DynamicPseudoType { + ret = ret.RefineNotNull() + } + if ty.IsListType() && sourceVal.Type().IsCollectionType() { + // We can refine the length of an unknown list result based on + // the source collection's own length. + sv, _ := sourceVal.Unmark() + sourceRng := sv.Range() + ret = ret.Refine(). + CollectionLengthLowerBound(sourceRng.LengthLowerBound()). + CollectionLengthUpperBound(sourceRng.LengthUpperBound()). + NewValue() + } + return ret.WithSameMarks(sourceVal), diags } // Unmark the collection, and save the marks to apply to the returned @@ -1466,6 +1806,10 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } e.Item.clearValue(ctx) // clean up our temporary value + if upgradedUnknown { + return cty.DynamicVal, diags + } + if !isKnown { // We'll ingore the resultTy diagnostics in this case since they // will just be the same errors we saw while iterating above. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go index c1db0cecc8..6585612c10 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go index 0b7e07a5b1..a0dc7c2291 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( @@ -35,11 +38,9 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) if partVal.IsNull() { diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid template interpolation value", - Detail: fmt.Sprintf( - "The expression result is null. Cannot include a null value in a string template.", - ), + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: "The expression result is null. Cannot include a null value in a string template.", Subject: part.Range().Ptr(), Context: &e.SrcRange, Expression: part, @@ -48,6 +49,12 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } + // Unmark the part and merge its marks into the set + unmarkedVal, partMarks := partVal.Unmark() + for k, v := range partMarks { + marks[k] = v + } + if !partVal.IsKnown() { // If any part is unknown then the result as a whole must be // unknown too. We'll keep on processing the rest of the parts @@ -57,7 +64,7 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } - strVal, err := convert.Convert(partVal, cty.String) + strVal, err := convert.Convert(unmarkedVal, cty.String) if err != nil { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -74,22 +81,38 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } - // Unmark the part and merge its marks into the set - unmarked, partMarks := strVal.Unmark() - for k, v := range partMarks { - marks[k] = v + // If we're just continuing to validate after we found an unknown value + // then we'll skip appending so that "buf" will contain only the + // known prefix of the result. + if isKnown && !diags.HasErrors() { + buf.WriteString(strVal.AsString()) } - - buf.WriteString(unmarked.AsString()) } var ret cty.Value if !isKnown { ret = cty.UnknownVal(cty.String) + if !diags.HasErrors() { // Invalid input means our partial result buffer is suspect + if knownPrefix := buf.String(); knownPrefix != "" { + byteLen := len(knownPrefix) + // Impose a reasonable upper limit to avoid producing too long a prefix. + // The 128 B is about 10% of the safety limits in cty's msgpack decoder. + // @see https://github.com/zclconf/go-cty/blob/v1.13.2/cty/msgpack/unknown.go#L170-L175 + // + // This operation is safe because StringPrefix removes incomplete trailing grapheme clusters. + if byteLen > 128 { // arbitrarily-decided threshold + byteLen = 128 + } + ret = ret.Refine().StringPrefix(knownPrefix[:byteLen]).NewValue() + } + } } else { ret = cty.StringVal(buf.String()) } + // A template rendering result is never null. + ret = ret.RefineNotNull() + // Apply the full set of marks to the returned value return ret.WithMarks(marks), diags } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go index a82bf790eb..ce5a5cb755 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax // Generated by expression_vars_get.go. DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go index f55e9ce2c2..7be626ffd6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go index 841656a6a1..383ec6b85d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax //go:generate go run expression_vars_gen.go diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go index eef8b9626c..5124ae95c3 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go index af98ef0451..83e1d4efb5 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go index 41b35e53f8..6ead6091c6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go index 0998cc4122..aa147afeb4 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( @@ -6,7 +9,7 @@ import ( "strconv" "unicode/utf8" - "github.com/apparentlymart/go-textseg/v12/textseg" + "github.com/apparentlymart/go-textseg/v15/textseg" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) @@ -76,14 +79,37 @@ Token: default: bad := p.Read() if !p.recovery { - if bad.Type == TokenOQuote { + switch bad.Type { + case TokenOQuote: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid argument name", Detail: "Argument names must not be quoted.", Subject: &bad.Range, }) - } else { + case TokenEOF: + switch end { + case TokenCBrace: + // If we're looking for a closing brace then we're parsing a block + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration block", + Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: &startRange, + }) + default: + // The only other "end" should itself be TokenEOF (for + // the top-level body) and so we shouldn't get here, + // but we'll return a generic error message anyway to + // be resilient. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration body", + Detail: "Found end of file before the end of this configuration body.", + Subject: &startRange, + }) + } + default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Argument or block definition required", @@ -144,8 +170,6 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { }, } } - - return nil, nil } // parseSingleAttrBody is a weird variant of ParseBody that deals with the @@ -388,12 +412,23 @@ Token: // user intent for this one, we'll skip it if we're already in // recovery mode. if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid single-argument block definition", - Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", - Subject: p.Peek().Range.Ptr(), - }) + switch p.Peek().Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed configuration block", + Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: oBrace.Range.Ptr(), + Context: hcl.RangeBetween(ident.Range, oBrace.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } } p.recover(TokenCBrace) } @@ -1059,12 +1094,22 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { default: var diags hcl.Diagnostics if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid expression", - Detail: "Expected the start of an expression, but found an invalid expression token.", - Subject: &start.Range, - }) + switch start.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing expression", + Detail: "Expected the start of an expression, but found the end of the file.", + Subject: &start.Range, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } } p.setRecovery() @@ -1132,7 +1177,12 @@ Token: // if there was a parse error in the argument then we've // probably been left in a weird place in the token stream, // so we'll bail out with a partial argument list. - p.recover(TokenCParen) + recoveredTok := p.recover(TokenCParen) + + // record the recovered token, if one was found + if recoveredTok.Type == TokenCParen { + closeTok = recoveredTok + } break Token } @@ -1163,13 +1213,23 @@ Token: } if sep.Type != TokenComma { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing argument separator", - Detail: "A comma is required to separate each function argument from the next.", - Subject: &sep.Range, - Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), - }) + switch sep.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated function call", + Detail: "There is no closing parenthesis for this function call before the end of the file. This may be caused by incorrect parethesis nesting elsewhere in this file.", + Subject: hcl.RangeBetween(name.Range, openTok.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } closeTok = p.recover(TokenCParen) break Token } @@ -1242,13 +1302,23 @@ func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { if next.Type != TokenComma { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing item separator", - Detail: "Expected a comma to mark the beginning of the next item.", - Subject: &next.Range, - Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), - }) + switch next.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated tuple constructor expression", + Detail: "There is no corresponding closing bracket before the end of the file. This may be caused by incorrect bracket nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } } close = p.recover(TokenCBrack) break @@ -1359,6 +1429,13 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { Subject: &next.Range, Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), }) + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated object constructor expression", + Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1399,13 +1476,23 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { if next.Type != TokenComma && next.Type != TokenNewline { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute separator", - Detail: "Expected a newline or comma to mark the beginning of the next attribute.", - Subject: &next.Range, - Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), - }) + switch next.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated object constructor expression", + Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.", + Subject: open.Range.Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } } close = p.recover(TokenCBrace) break @@ -1661,7 +1748,7 @@ func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) var diags hcl.Diagnostics ret := &bytes.Buffer{} - var cQuote Token + var endRange hcl.Range Token: for { @@ -1669,7 +1756,7 @@ Token: switch tok.Type { case TokenCQuote: - cQuote = tok + endRange = tok.Range break Token case TokenQuotedLit: @@ -1712,6 +1799,7 @@ Token: Subject: &tok.Range, Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) + endRange = tok.Range break Token default: @@ -1724,13 +1812,14 @@ Token: Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), }) p.recover(TokenCQuote) + endRange = tok.Range break Token } } - return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags + return ret.String(), hcl.RangeBetween(oQuote.Range, endRange), diags } // ParseStringLiteralToken processes the given token, which must be either a diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go index bb85646103..19e988064d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( @@ -5,7 +8,7 @@ import ( "strings" "unicode" - "github.com/apparentlymart/go-textseg/v12/textseg" + "github.com/apparentlymart/go-textseg/v15/textseg" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) @@ -38,6 +41,7 @@ func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Express if flushHeredoc { flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec } + meldConsecutiveStringLiterals(parts) tp := templateParser{ Tokens: parts.Tokens, SrcRange: parts.SrcRange, @@ -413,13 +417,44 @@ Token: close := p.Peek() if close.Type != TokenTemplateSeqEnd { if !p.recovery { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extra characters after interpolation expression", - Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", - Subject: &close.Range, - Context: hcl.RangeBetween(startRange, close.Range).Ptr(), - }) + switch close.Type { + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed template interpolation sequence", + Detail: "There is no closing brace for this interpolation sequence before the end of the file. This might be caused by incorrect nesting inside the given expression.", + Subject: &startRange, + }) + case TokenColon: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Template interpolation doesn't expect a colon at this location. Did you intend this to be a literal sequence to be processed as part of another language? If so, you can escape it by starting with \"$${\" instead of just \"${\".", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + default: + if (close.Type == TokenCQuote || close.Type == TokenOQuote) && end == TokenCQuote { + // We'll get here if we're processing a _quoted_ + // template and we find an errant quote inside an + // interpolation sequence, which suggests that + // the interpolation sequence is missing its terminator. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed template interpolation sequence", + Detail: "There is no closing brace for this interpolation sequence before the end of the quoted template. This might be caused by incorrect nesting inside the given expression.", + Subject: &startRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.\n\nThis can happen when you include interpolation syntax for another language, such as shell scripting, but forget to escape the interpolation start token. If this is an embedded sequence for another language, escape it by starting with \"$${\" instead of just \"${\".", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + } } p.recover(TokenTemplateSeqEnd) } else { @@ -720,6 +755,37 @@ func flushHeredocTemplateParts(parts *templateParts) { } } +// meldConsecutiveStringLiterals simplifies the AST output by combining a +// sequence of string literal tokens into a single string literal. This must be +// performed after any whitespace trimming operations. +func meldConsecutiveStringLiterals(parts *templateParts) { + if len(parts.Tokens) == 0 { + return + } + + // Loop over all tokens starting at the second element, as we want to join + // pairs of consecutive string literals. + i := 1 + for i < len(parts.Tokens) { + if prevLiteral, ok := parts.Tokens[i-1].(*templateLiteralToken); ok { + if literal, ok := parts.Tokens[i].(*templateLiteralToken); ok { + // The current and previous tokens are both literals: combine + prevLiteral.Val = prevLiteral.Val + literal.Val + prevLiteral.SrcRange.End = literal.SrcRange.End + + // Remove the current token from the slice + parts.Tokens = append(parts.Tokens[:i], parts.Tokens[i+1:]...) + + // Continue without moving forward in the slice + continue + } + } + + // Try the next pair of tokens + i++ + } +} + type templateParts struct { Tokens []templateToken SrcRange hcl.Range diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go index 7dcb0fd341..3afa6ab064 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go index f056f906e3..74fa3fb331 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go index 0b68efd600..d56f8e50be 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go index 2895ade758..5d60ff5a5e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + //line scan_string_lit.rl:1 package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go index 794123a851..1bbbb92781 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + //line scan_tokens.rl:1 package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md index 550bd93adf..6d31e35255 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md @@ -82,9 +82,9 @@ Comments serve as program documentation and come in two forms: - _Inline comments_ start with the `/*` sequence and end with the `*/` sequence, and may have any characters within except the ending sequence. - An inline comments is considered equivalent to a whitespace sequence. + An inline comment is considered equivalent to a whitespace sequence. -Comments and whitespace cannot begin within within other comments, or within +Comments and whitespace cannot begin within other comments, or within template literals except inside an interpolation sequence or template directive. ### Identifiers @@ -268,10 +268,10 @@ collection value. ```ebnf CollectionValue = tuple | object; tuple = "[" ( - (Expression ("," Expression)* ","?)? + (Expression (("," | Newline) Expression)* ","?)? ) "]"; object = "{" ( - (objectelem ("," objectelem)* ","?)? + (objectelem (( "," | Newline) objectelem)* ","?)? ) "}"; objectelem = (Identifier | Expression) ("=" | ":") Expression; ``` @@ -293,18 +293,20 @@ Between the open and closing delimiters of these sequences, newline sequences are ignored as whitespace. There is a syntax ambiguity between _for expressions_ and collection values -whose first element is a reference to a variable named `for`. The -_for expression_ interpretation has priority, so to produce a tuple whose -first element is the value of a variable named `for`, or an object with a -key named `for`, use parentheses to disambiguate: +whose first element starts with an identifier named `for`. The _for expression_ +interpretation has priority, so to write a key literally named `for` +or an expression derived from a variable named `for` you must use parentheses +or quotes to disambiguate: - `[for, foo, baz]` is a syntax error. - `[(for), foo, baz]` is a tuple whose first element is the value of variable `for`. -- `{for: 1, baz: 2}` is a syntax error. -- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. -- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the +- `{for = 1, baz = 2}` is a syntax error. +- `{"for" = 1, baz = 2}` is an object with an attribute literally named `for`. +- `{baz = 2, for = 1}` is equivalent to the previous example, and resolves the ambiguity by reordering. +- `{(for) = 1, baz = 2}` is an object with a key with the same value as the + variable `for`. ### Template Expressions @@ -489,7 +491,7 @@ that were produced against each distinct key. - `[for v in ["a", "b"]: v]` returns `["a", "b"]`. - `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. - `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. -- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute +- `{for i, v in ["a", "a", "b"]: v => i}` produces an error, because attribute `a` is defined twice. - `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. @@ -633,7 +635,7 @@ binaryOp = ExprTerm binaryOperator ExprTerm; binaryOperator = compareOperator | arithmeticOperator | logicOperator; compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; arithmeticOperator = "+" | "-" | "*" | "/" | "%"; -logicOperator = "&&" | "||" | "!"; +logicOperator = "&&" | "||"; ``` The unary operators have the highest precedence. @@ -888,7 +890,7 @@ as templates. - `hello ${true}` produces the string `"hello true"` - `${""}${true}` produces the string `"true"` because there are two interpolation sequences, even though one produces an empty result. -- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because +- `%{ for v in [true] }${v}%{ endfor }` produces the string `true` because the presence of the `for` directive circumvents the unwrapping even though the final result is a single value. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go index 2f7470c772..ff272631d4 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( @@ -13,17 +16,12 @@ func (b *Block) AsHCLBlock() *hcl.Block { return nil } - lastHeaderRange := b.TypeRange - if len(b.LabelRanges) > 0 { - lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] - } - return &hcl.Block{ Type: b.Type, Labels: b.Labels, Body: b.Body, - DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + DefRange: b.DefRange(), TypeRange: b.TypeRange, LabelRanges: b.LabelRanges, } @@ -40,7 +38,7 @@ type Body struct { hiddenBlocks map[string]struct{} SrcRange hcl.Range - EndRange hcl.Range // Final token of the body, for reporting missing items + EndRange hcl.Range // Final token of the body (zero-length range) } // Assert that *Body implements hcl.Body @@ -390,5 +388,9 @@ func (b *Block) Range() hcl.Range { } func (b *Block) DefRange() hcl.Range { - return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + return hcl.RangeBetween(b.TypeRange, lastHeaderRange) } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go index 587844ac20..5085716845 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go index 59f4c43478..afde5f33a0 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( "bytes" "fmt" - "github.com/apparentlymart/go-textseg/v12/textseg" + "github.com/apparentlymart/go-textseg/v15/textseg" "github.com/hashicorp/hcl/v2" ) @@ -191,8 +194,10 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { toldBadUTF8 := 0 for _, tok := range tokens { - // copy token so it's safe to point to it - tok := tok + tokRange := func() *hcl.Range { + r := tok.Range + return &r + } switch tok.Type { case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: @@ -211,7 +216,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Unsupported operator", Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), - Subject: &tok.Range, + Subject: tokRange(), }) toldBitwise++ } @@ -221,7 +226,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Unsupported operator", Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", - Subject: &tok.Range, + Subject: tokRange(), }) toldExponent++ @@ -234,7 +239,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Severity: hcl.DiagError, Summary: "Invalid character", Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< 0: - line.lead[0].SpacesBefore = 2 * len(indents) - indents = append(indents, netBrackets) - case netBrackets < 0: - closed := -netBrackets - for closed > 0 && len(indents) > 0 { - switch { - - case closed > indents[len(indents)-1]: - closed -= indents[len(indents)-1] - indents = indents[:len(indents)-1] - - case closed < indents[len(indents)-1]: - indents[len(indents)-1] -= closed - closed = 0 - - default: - indents = indents[:len(indents)-1] - closed = 0 - } - } - line.lead[0].SpacesBefore = 2 * len(indents) - default: - line.lead[0].SpacesBefore = 2 * len(indents) - } - } -} - -func formatSpaces(lines []formatLine) { - for _, line := range lines { - for i, token := range line.lead { - var before, after *Token - if i > 0 { - before = line.lead[i-1] - } else { - before = nilToken - } - if i < (len(line.lead) - 1) { - after = line.lead[i+1] - } else { - after = nilToken - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - for i, token := range line.assign { - if i == 0 { - // first token in "assign" always has one space before to - // separate the equals sign from what it's assigning. - token.SpacesBefore = 1 - } - - var before, after *Token - if i > 0 { - before = line.assign[i-1] - } else { - before = nilToken - } - if i < (len(line.assign) - 1) { - after = line.assign[i+1] - } else { - after = nilToken - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - - } -} - -func formatCells(lines []formatLine) { - - chainStart := -1 - maxColumns := 0 - - // We'll deal with the "assign" cell first, since moving that will - // also impact the "comment" cell. - closeAssignChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.assign[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.assign == nil { - if chainStart != -1 { - closeAssignChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeAssignChain(len(lines)) - } - - // Now we'll deal with the comments - closeCommentChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() + chainLine.assign.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.comment[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.comment == nil { - if chainStart != -1 { - closeCommentChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() + line.assign.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeCommentChain(len(lines)) - } - -} - -// spaceAfterToken decides whether a particular subject token should have a -// space after it when surrounded by the given before and after tokens. -// "before" can be TokenNil, if the subject token is at the start of a sequence. -func spaceAfterToken(subject, before, after *Token) bool { - switch { - - case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: - // Never add spaces before a newline - return false - - case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: - // Don't split a function name from open paren in a call - return false - - case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: - // Don't use spaces around attribute access dots - return false - - case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: - // No space right before a comma or ... in an argument list - return false - - case subject.Type == hclsyntax.TokenComma: - // Always a space after a comma - return true - - case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: - // No extra spaces within templates - return false - - case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: - // This is a special case for inside for expressions where a user - // might want to use a literal tuple constructor: - // [for x in [foo]: x] - // ... in that case, we would normally produce in[foo] thinking that - // in is a reference, but we'll recognize it as a keyword here instead - // to make the result less confusing. - return true - - case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): - return false - - case subject.Type == hclsyntax.TokenBang: - // No space after a bang - return false - - case subject.Type == hclsyntax.TokenMinus: - // Since a minus can either be subtraction or negation, and the latter - // should _not_ have a space after it, we need to use some heuristics - // to decide which case this is. - // We guess that we have a negation if the token before doesn't look - // like it could be the end of an expression. - - switch before.Type { - - case hclsyntax.TokenNil: - // Minus at the start of input must be a negation - return false - - case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: - // Minus immediately after an opening bracket or separator must be a negation. - return false - - case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: - // Minus immediately after another arithmetic operator must be negation. - return false - - case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: - // Minus immediately after another comparison operator must be negation. - return false - - case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: - // Minus immediately after logical operator doesn't make sense but probably intended as negation. - return false - - default: - return true - } - - case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: - // Unlike other bracket types, braces have spaces on both sides of them, - // both in single-line nested blocks foo { bar = baz } and in object - // constructor expressions foo = { bar = baz }. - if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { - // An open brace followed by a close brace is an exception, however. - // e.g. foo {} rather than foo { } - return false - } - return true - - // In the unlikely event that an interpolation expression is just - // a single object constructor, we'll put a space between the ${ and - // the following { to make this more obvious, and then the same - // thing for the two braces at the end. - case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: - return true - case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: - return true - - // Don't add spaces between interpolated items - case subject.Type == hclsyntax.TokenTemplateSeqEnd && (after.Type == hclsyntax.TokenTemplateInterp || after.Type == hclsyntax.TokenTemplateControl): - return false - - case tokenBracketChange(subject) > 0: - // No spaces after open brackets - return false - - case tokenBracketChange(after) < 0: - // No spaces before close brackets - return false - - default: - // Most tokens are space-separated - return true - - } -} - -func linesForFormat(tokens Tokens) []formatLine { - if len(tokens) == 0 { - return make([]formatLine, 0) - } - - // first we'll count our lines, so we can allocate the array for them in - // a single block. (We want to minimize memory pressure in this codepath, - // so it can be run somewhat-frequently by editor integrations.) - lineCount := 1 // if there are zero newlines then there is one line - for _, tok := range tokens { - if tokenIsNewline(tok) { - lineCount++ - } - } - - // To start, we'll just put everything in the "lead" cell on each line, - // and then do another pass over the lines afterwards to adjust. - lines := make([]formatLine, lineCount) - li := 0 - lineStart := 0 - for i, tok := range tokens { - if tok.Type == hclsyntax.TokenEOF { - // The EOF token doesn't belong to any line, and terminates the - // token sequence. - lines[li].lead = tokens[lineStart:i] - break - } - - if tokenIsNewline(tok) { - lines[li].lead = tokens[lineStart : i+1] - lineStart = i + 1 - li++ - } - } - - // If a set of tokens doesn't end in TokenEOF (e.g. because it's a - // fragment of tokens from the middle of a file) then we might fall - // out here with a line still pending. - if lineStart < len(tokens) { - lines[li].lead = tokens[lineStart:] - if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { - lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] - } - } - - // Now we'll pick off any trailing comments and attribute assignments - // to shuffle off into the "comment" and "assign" cells. - for i := range lines { - line := &lines[i] - - if len(line.lead) == 0 { - // if the line is empty then there's nothing for us to do - // (this should happen only for the final line, because all other - // lines would have a newline token of some kind) - continue - } - - if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { - line.comment = line.lead[len(line.lead)-1:] - line.lead = line.lead[:len(line.lead)-1] - } - - for i, tok := range line.lead { - if i > 0 && tok.Type == hclsyntax.TokenEqual { - // We only move the tokens into "assign" if the RHS seems to - // be a whole expression, which we determine by counting - // brackets. If there's a net positive number of brackets - // then that suggests we're introducing a multi-line expression. - netBrackets := 0 - for _, token := range line.lead[i:] { - netBrackets += tokenBracketChange(token) - } - - if netBrackets == 0 { - line.assign = line.lead[i:] - line.lead = line.lead[:i] - } - break - } - } - } - - return lines -} - -func tokenIsNewline(tok *Token) bool { - if tok.Type == hclsyntax.TokenNewline { - return true - } else if tok.Type == hclsyntax.TokenComment { - // Single line tokens (# and //) consume their terminating newline, - // so we need to treat them as newline tokens as well. - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - return true - } - } - return false -} - -func tokenBracketChange(tok *Token) int { - switch tok.Type { - case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: - return 1 - case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: - return -1 - default: - return 0 - } -} - -// formatLine represents a single line of source code for formatting purposes, -// splitting its tokens into up to three "cells": -// -// lead: always present, representing everything up to one of the others -// assign: if line contains an attribute assignment, represents the tokens -// starting at (and including) the equals symbol -// comment: if line contains any non-comment tokens and ends with a -// single-line comment token, represents the comment. -// -// When formatting, the leading spaces of the first tokens in each of these -// cells is adjusted to align vertically their occurences on consecutive -// rows. -type formatLine struct { - lead Tokens - assign Tokens - comment Tokens -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go deleted file mode 100644 index 48e1312113..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go +++ /dev/null @@ -1,256 +0,0 @@ -package hclwrite - -import ( - "fmt" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// TokensForValue returns a sequence of tokens that represents the given -// constant value. -// -// This function only supports types that are used by HCL. In particular, it -// does not support capsule types and will panic if given one. -// -// It is not possible to express an unknown value in source code, so this -// function will panic if the given value is unknown or contains any unknown -// values. A caller can call the value's IsWhollyKnown method to verify that -// no unknown values are present before calling TokensForValue. -func TokensForValue(val cty.Value) Tokens { - toks := appendTokensForValue(val, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForTraversal returns a sequence of tokens that represents the given -// traversal. -// -// If the traversal is absolute then the result is a self-contained, valid -// reference expression. If the traversal is relative then the returned tokens -// could be appended to some other expression tokens to traverse into the -// represented expression. -func TokensForTraversal(traversal hcl.Traversal) Tokens { - toks := appendTokensForTraversal(traversal, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -func appendTokensForValue(val cty.Value, toks Tokens) Tokens { - switch { - - case !val.IsKnown(): - panic("cannot produce tokens for unknown value") - - case val.IsNull(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(`null`), - }) - - case val.Type() == cty.Bool: - var src []byte - if val.True() { - src = []byte(`true`) - } else { - src = []byte(`false`) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: src, - }) - - case val.Type() == cty.Number: - bf := val.AsBigFloat() - srcStr := bf.Text('f', -1) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNumberLit, - Bytes: []byte(srcStr), - }) - - case val.Type() == cty.String: - // TODO: If it's a multi-line string ending in a newline, format - // it as a HEREDOC instead. - src := escapeQuotedStringLit(val.AsString()) - toks = append(toks, &Token{ - Type: hclsyntax.TokenOQuote, - Bytes: []byte{'"'}, - }) - if len(src) > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenQuotedLit, - Bytes: src, - }) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenCQuote, - Bytes: []byte{'"'}, - }) - - case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - - i := 0 - for it := val.ElementIterator(); it.Next(); { - if i > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - _, eVal := it.Element() - toks = appendTokensForValue(eVal, toks) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - - case val.Type().IsMapType() || val.Type().IsObjectType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }) - if val.LengthInt() > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }) - } - - i := 0 - for it := val.ElementIterator(); it.Next(); { - eKey, eVal := it.Element() - if hclsyntax.ValidIdentifier(eKey.AsString()) { - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(eKey.AsString()), - }) - } else { - toks = appendTokensForValue(eKey, toks) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }) - toks = appendTokensForValue(eVal, toks) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }) - - default: - panic(fmt.Sprintf("cannot produce tokens for %#v", val)) - } - - return toks -} - -func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { - for _, step := range traversal { - toks = appendTokensForTraversalStep(step, toks) - } - return toks -} - -func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) Tokens { - switch ts := step.(type) { - case hcl.TraverseRoot: - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }) - case hcl.TraverseAttr: - toks = append( - toks, - &Token{ - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }, - ) - case hcl.TraverseIndex: - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - toks = appendTokensForValue(ts.Key, toks) - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - default: - panic(fmt.Sprintf("unsupported traversal step type %T", step)) - } - - return toks -} - -func escapeQuotedStringLit(s string) []byte { - if len(s) == 0 { - return nil - } - buf := make([]byte, 0, len(s)) - for i, r := range s { - switch r { - case '\n': - buf = append(buf, '\\', 'n') - case '\r': - buf = append(buf, '\\', 'r') - case '\t': - buf = append(buf, '\\', 't') - case '"': - buf = append(buf, '\\', '"') - case '\\': - buf = append(buf, '\\', '\\') - case '$', '%': - buf = appendRune(buf, r) - remain := s[i+1:] - if len(remain) > 0 && remain[0] == '{' { - // Double up our template introducer symbol to escape it. - buf = appendRune(buf, r) - } - default: - if !unicode.IsPrint(r) { - var fmted string - if r < 65536 { - fmted = fmt.Sprintf("\\u%04x", r) - } else { - fmted = fmt.Sprintf("\\U%08x", r) - } - buf = append(buf, fmted...) - } else { - buf = appendRune(buf, r) - } - } - } - return buf -} - -func appendRune(b []byte, r rune) []byte { - l := utf8.RuneLen(r) - for i := 0; i < l; i++ { - b = append(b, 0) // make room at the end of our buffer - } - ch := b[len(b)-l:] - utf8.EncodeRune(ch, r) - return b -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go deleted file mode 100644 index cedf686270..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go +++ /dev/null @@ -1,23 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type nativeNodeSorter struct { - Nodes []hclsyntax.Node -} - -func (s nativeNodeSorter) Len() int { - return len(s.Nodes) -} - -func (s nativeNodeSorter) Less(i, j int) bool { - rangeI := s.Nodes[i].Range() - rangeJ := s.Nodes[j].Range() - return rangeI.Start.Byte < rangeJ.Start.Byte -} - -func (s nativeNodeSorter) Swap(i, j int) { - s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go deleted file mode 100644 index d3a5b72c9e..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go +++ /dev/null @@ -1,296 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" -) - -// node represents a node in the AST. -type node struct { - content nodeContent - - list *nodes - before, after *node -} - -func newNode(c nodeContent) *node { - return &node{ - content: c, - } -} - -func (n *node) Equal(other *node) bool { - return cmp.Equal(n.content, other.content) -} - -func (n *node) BuildTokens(to Tokens) Tokens { - return n.content.BuildTokens(to) -} - -// Detach removes the receiver from the list it currently belongs to. If the -// node is not currently in a list, this is a no-op. -func (n *node) Detach() { - if n.list == nil { - return - } - if n.before != nil { - n.before.after = n.after - } - if n.after != nil { - n.after.before = n.before - } - if n.list.first == n { - n.list.first = n.after - } - if n.list.last == n { - n.list.last = n.before - } - n.list = nil - n.before = nil - n.after = nil -} - -// ReplaceWith removes the receiver from the list it currently belongs to and -// inserts a new node with the given content in its place. If the node is not -// currently in a list, this function will panic. -// -// The return value is the newly-constructed node, containing the given content. -// After this function returns, the reciever is no longer attached to a list. -func (n *node) ReplaceWith(c nodeContent) *node { - if n.list == nil { - panic("can't replace node that is not in a list") - } - - before := n.before - after := n.after - list := n.list - n.before, n.after, n.list = nil, nil, nil - - nn := newNode(c) - nn.before = before - nn.after = after - nn.list = list - if before != nil { - before.after = nn - } - if after != nil { - after.before = nn - } - return nn -} - -func (n *node) assertUnattached() { - if n.list != nil { - panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) - } -} - -// nodeContent is the interface type implemented by all AST content types. -type nodeContent interface { - walkChildNodes(w internalWalkFunc) - BuildTokens(to Tokens) Tokens -} - -// nodes is a list of nodes. -type nodes struct { - first, last *node -} - -func (ns *nodes) BuildTokens(to Tokens) Tokens { - for n := ns.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -func (ns *nodes) Clear() { - ns.first = nil - ns.last = nil -} - -func (ns *nodes) Append(c nodeContent) *node { - n := &node{ - content: c, - } - ns.AppendNode(n) - n.list = ns - return n -} - -func (ns *nodes) AppendNode(n *node) { - if ns.last != nil { - n.before = ns.last - ns.last.after = n - } - n.list = ns - ns.last = n - if ns.first == nil { - ns.first = n - } -} - -// Insert inserts a nodeContent at a given position. -// This is just a wrapper for InsertNode. See InsertNode for details. -func (ns *nodes) Insert(pos *node, c nodeContent) *node { - n := &node{ - content: c, - } - ns.InsertNode(pos, n) - n.list = ns - return n -} - -// InsertNode inserts a node at a given position. -// The first argument is a node reference before which to insert. -// To insert it to an empty list, set position to nil. -func (ns *nodes) InsertNode(pos *node, n *node) { - if pos == nil { - // inserts n to empty list. - ns.first = n - ns.last = n - } else { - // inserts n before pos. - pos.before.after = n - n.before = pos.before - pos.before = n - n.after = pos - } - - n.list = ns -} - -func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { - if len(tokens) == 0 { - return nil - } - n := newNode(tokens) - ns.AppendNode(n) - n.list = ns - return n -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns *nodes) FindNodeWithContent(content nodeContent) *node { - for n := ns.first; n != nil; n = n.after { - if n.content == content { - return n - } - } - return nil -} - -// nodeSet is an unordered set of nodes. It is used to describe a set of nodes -// that all belong to the same list that have some role or characteristic -// in common. -type nodeSet map[*node]struct{} - -func newNodeSet() nodeSet { - return make(nodeSet) -} - -func (ns nodeSet) Has(n *node) bool { - if ns == nil { - return false - } - _, exists := ns[n] - return exists -} - -func (ns nodeSet) Add(n *node) { - ns[n] = struct{}{} -} - -func (ns nodeSet) Remove(n *node) { - delete(ns, n) -} - -func (ns nodeSet) Clear() { - for n := range ns { - delete(ns, n) - } -} - -func (ns nodeSet) List() []*node { - if len(ns) == 0 { - return nil - } - - ret := make([]*node, 0, len(ns)) - - // Determine which list we are working with. We assume here that all of - // the nodes belong to the same list, since that is part of the contract - // for nodeSet. - var list *nodes - for n := range ns { - list = n.list - break - } - - // We recover the order by iterating over the whole list. This is not - // the most efficient way to do it, but our node lists should always be - // small so not worth making things more complex. - for n := list.first; n != nil; n = n.after { - if ns.Has(n) { - ret = append(ret, n) - } - } - return ret -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns nodeSet) FindNodeWithContent(content nodeContent) *node { - for n := range ns { - if n.content == content { - return n - } - } - return nil -} - -type internalWalkFunc func(*node) - -// inTree can be embedded into a content struct that has child nodes to get -// a standard implementation of the NodeContent interface and a record of -// a potential parent node. -type inTree struct { - parent *node - children *nodes -} - -func newInTree() inTree { - return inTree{ - children: &nodes{}, - } -} - -func (it *inTree) assertUnattached() { - if it.parent != nil { - panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) - } -} - -func (it *inTree) walkChildNodes(w internalWalkFunc) { - for n := it.children.first; n != nil; n = n.after { - w(n) - } -} - -func (it *inTree) BuildTokens(to Tokens) Tokens { - for n := it.children.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -// leafNode can be embedded into a content struct to give it a do-nothing -// implementation of walkChildNodes -type leafNode struct { -} - -func (n *leafNode) walkChildNodes(w internalWalkFunc) { -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go deleted file mode 100644 index 3df51447ae..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go +++ /dev/null @@ -1,638 +0,0 @@ -package hclwrite - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// Our "parser" here is actually not doing any parsing of its own. Instead, -// it leans on the native parser in hclsyntax, and then uses the source ranges -// from the AST to partition the raw token sequence to match the raw tokens -// up to AST nodes. -// -// This strategy feels somewhat counter-intuitive, since most of the work the -// parser does is thrown away here, but this strategy is chosen because the -// normal parsing work done by hclsyntax is considered to be the "main case", -// while modifying and re-printing source is more of an edge case, used only -// in ancillary tools, and so it's good to keep all the main parsing logic -// with the main case but keep all of the extra complexity of token wrangling -// out of the main parser, which is already rather complex just serving the -// use-cases it already serves. -// -// If the parsing step produces any errors, the returned File is nil because -// we can't reliably extract tokens from the partial AST produced by an -// erroneous parse. -func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - file, diags := hclsyntax.ParseConfig(src, filename, start) - if diags.HasErrors() { - return nil, diags - } - - // To do our work here, we use the "native" tokens (those from hclsyntax) - // to match against source ranges in the AST, but ultimately produce - // slices from our sequence of "writer" tokens, which contain only - // *relative* position information that is more appropriate for - // transformation/writing use-cases. - nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) - if diags.HasErrors() { - // should never happen, since we would've caught these diags in - // the first call above. - return nil, diags - } - writerTokens := writerTokens(nativeTokens) - - from := inputTokens{ - nativeTokens: nativeTokens, - writerTokens: writerTokens, - } - - before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) - ret := &File{ - inTree: newInTree(), - - srcBytes: src, - body: root, - } - - nodes := ret.inTree.children - nodes.Append(before.Tokens()) - nodes.AppendNode(root) - nodes.Append(after.Tokens()) - - return ret, diags -} - -type inputTokens struct { - nativeTokens hclsyntax.Tokens - writerTokens Tokens -} - -func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { - for i, t := range it.writerTokens { - if t.Type == ty { - return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) - } - } - panic(fmt.Sprintf("didn't find any token of type %s", ty)) -} - -func (it inputTokens) PartitionTypeOk(ty hclsyntax.TokenType) (before, within, after inputTokens, ok bool) { - for i, t := range it.writerTokens { - if t.Type == ty { - return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)), true - } - } - - return inputTokens{}, inputTokens{}, inputTokens{}, false -} - -func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { - before, within, after := it.PartitionType(ty) - if within.Len() != 1 { - panic("PartitionType found more than one token") - } - return before, within.Tokens()[0], after -} - -// PartitionIncludeComments is like Partition except the returned "within" -// range includes any lead and line comments associated with the range. -func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - start = partitionLeadCommentTokens(it.nativeTokens[:start]) - _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) - end += afterNewline - - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return - -} - -// PartitionBlockItem is similar to PartitionIncludeComments but it returns -// the comments as separate token sequences so that they can be captured into -// AST attributes. It makes assumptions that apply only to block items, so -// should not be used for other constructs. -func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { - before, within, after = it.Partition(rng) - before, leadComments = before.PartitionLeadComments() - lineComments, newline, after = after.PartitionLineEndTokens() - return -} - -func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { - start := partitionLeadCommentTokens(it.nativeTokens) - before = it.Slice(0, start) - within = it.Slice(start, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { - afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) - comments = it.Slice(0, afterComments) - newline = it.Slice(afterComments, afterNewline) - after = it.Slice(afterNewline, len(it.nativeTokens)) - return -} - -func (it inputTokens) Slice(start, end int) inputTokens { - // When we slice, we create a new slice with no additional capacity because - // we expect that these slices will be mutated in order to insert - // new code into the AST, and we want to ensure that a new underlying - // array gets allocated in that case, rather than writing into some - // following slice and corrupting it. - return inputTokens{ - nativeTokens: it.nativeTokens[start:end:end], - writerTokens: it.writerTokens[start:end:end], - } -} - -func (it inputTokens) Len() int { - return len(it.nativeTokens) -} - -func (it inputTokens) Tokens() Tokens { - return it.writerTokens -} - -func (it inputTokens) Types() []hclsyntax.TokenType { - ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) - for i, tok := range it.nativeTokens { - ret[i] = tok.Type - } - return ret -} - -// parseBody locates the given body within the given input tokens and returns -// the resulting *Body object as well as the tokens that appeared before and -// after it. -func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { - before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) - - // The main AST doesn't retain the original source ordering of the - // body items, so we need to reconstruct that ordering by inspecting - // their source ranges. - nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) - for _, nativeAttr := range nativeBody.Attributes { - nativeItems = append(nativeItems, nativeAttr) - } - for _, nativeBlock := range nativeBody.Blocks { - nativeItems = append(nativeItems, nativeBlock) - } - sort.Sort(nativeNodeSorter{nativeItems}) - - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - - remain := within - for _, nativeItem := range nativeItems { - beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) - - if beforeItem.Len() > 0 { - body.AppendUnstructuredTokens(beforeItem.Tokens()) - } - body.appendItemNode(item) - - remain = afterItem - } - - if remain.Len() > 0 { - body.AppendUnstructuredTokens(remain.Tokens()) - } - - return before, newNode(body), after -} - -func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { - before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) - - var item *node - - switch tItem := nativeItem.(type) { - case *hclsyntax.Attribute: - item = parseAttribute(tItem, within, leadComments, lineComments, newline) - case *hclsyntax.Block: - item = parseBlock(tItem, within, leadComments, lineComments, newline) - default: - // should never happen if caller is behaving - panic("unsupported native item type") - } - - return before, item, after -} - -func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { - attr := &Attribute{ - inTree: newInTree(), - } - children := attr.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - attr.leadComments = cn - children.AppendNode(cn) - } - - before, nameTokens, from := from.Partition(nativeAttr.NameRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if nameTokens.Len() != 1 { - // Should never happen with valid input - panic("attribute name is not exactly one token") - } - token := nameTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - attr.name = in - children.AppendNode(in) - } - - before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(equalsTokens.Tokens()) - - before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) - { - children.AppendUnstructuredTokens(before.Tokens()) - exprNode := parseExpression(nativeAttr.Expr, exprTokens) - attr.expr = exprNode - children.AppendNode(exprNode) - } - - { - cn := newNode(newComments(lineComments.Tokens())) - attr.lineComments = cn - children.AppendNode(cn) - } - - children.AppendUnstructuredTokens(newline.Tokens()) - - // Collect any stragglers, though there shouldn't be any - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(attr) -} - -func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { - block := &Block{ - inTree: newInTree(), - } - children := block.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - block.leadComments = cn - children.AppendNode(cn) - } - - before, typeTokens, from := from.Partition(nativeBlock.TypeRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if typeTokens.Len() != 1 { - // Should never happen with valid input - panic("block type name is not exactly one token") - } - token := typeTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - block.typeName = in - children.AppendNode(in) - } - - before, labelsNode, from := parseBlockLabels(nativeBlock, from) - block.labels = labelsNode - children.AppendNode(labelsNode) - - before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) - children.AppendUnstructuredTokens(before.Tokens()) - block.open = children.AppendUnstructuredTokens(oBrace.Tokens()) - - // We go a bit out of order here: we go hunting for the closing brace - // so that we have a delimited body, but then we'll deal with the body - // before we actually append the closing brace and any straggling tokens - // that appear after it. - bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) - before, body, after := parseBody(nativeBlock.Body, bodyTokens) - children.AppendUnstructuredTokens(before.Tokens()) - block.body = body - children.AppendNode(body) - children.AppendUnstructuredTokens(after.Tokens()) - - block.close = children.AppendUnstructuredTokens(cBrace.Tokens()) - - // stragglers - children.AppendUnstructuredTokens(from.Tokens()) - if lineComments.Len() > 0 { - // blocks don't actually have line comments, so we'll just treat - // them as extra stragglers - children.AppendUnstructuredTokens(lineComments.Tokens()) - } - children.AppendUnstructuredTokens(newline.Tokens()) - - return newNode(block) -} - -func parseBlockLabels(nativeBlock *hclsyntax.Block, from inputTokens) (inputTokens, *node, inputTokens) { - labelsObj := newBlockLabels(nil) - children := labelsObj.children - - var beforeAll inputTokens - for i, rng := range nativeBlock.LabelRanges { - var before, labelTokens inputTokens - before, labelTokens, from = from.Partition(rng) - if i == 0 { - beforeAll = before - } else { - children.AppendUnstructuredTokens(before.Tokens()) - } - tokens := labelTokens.Tokens() - var ln *node - if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { - ln = newNode(newIdentifier(tokens[0])) - } else { - ln = newNode(newQuoted(tokens)) - } - labelsObj.items.Add(ln) - children.AppendNode(ln) - } - - after := from - return beforeAll, newNode(labelsObj), after -} - -func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { - expr := newExpression() - children := expr.inTree.children - - nativeVars := nativeExpr.Variables() - - for _, nativeTraversal := range nativeVars { - before, traversal, after := parseTraversal(nativeTraversal, from) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(traversal) - expr.absTraversals.Add(traversal) - from = after - } - // Attach any stragglers that don't belong to a traversal to the expression - // itself. In an expression with no traversals at all, this is just the - // entirety of "from". - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(expr) -} - -func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { - traversal := newTraversal() - children := traversal.inTree.children - before, from, after = from.Partition(nativeTraversal.SourceRange()) - - stepAfter := from - for _, nativeStep := range nativeTraversal { - before, step, after := parseTraversalStep(nativeStep, stepAfter) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(step) - traversal.steps.Add(step) - stepAfter = after - } - - return before, newNode(traversal), after -} - -func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { - var children *nodes - switch tNativeStep := nativeStep.(type) { - - case hcl.TraverseRoot, hcl.TraverseAttr: - step := newTraverseName() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) - name := newIdentifier(token) - children.AppendUnstructuredTokens(inBefore.Tokens()) - step.name = children.Append(name) - children.AppendUnstructuredTokens(inAfter.Tokens()) - return before, newNode(step), after - - case hcl.TraverseIndex: - step := newTraverseIndex() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - - if inBefore, dot, from, ok := from.PartitionTypeOk(hclsyntax.TokenDot); ok { - children.AppendUnstructuredTokens(inBefore.Tokens()) - children.AppendUnstructuredTokens(dot.Tokens()) - - valBefore, valToken, valAfter := from.PartitionTypeSingle(hclsyntax.TokenNumberLit) - children.AppendUnstructuredTokens(valBefore.Tokens()) - key := newNumber(valToken) - step.key = children.Append(key) - children.AppendUnstructuredTokens(valAfter.Tokens()) - - return before, newNode(step), after - } - - var inBefore, oBrack, keyTokens, cBrack inputTokens - inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) - children.AppendUnstructuredTokens(inBefore.Tokens()) - children.AppendUnstructuredTokens(oBrack.Tokens()) - keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) - - keyVal := tNativeStep.Key - switch keyVal.Type() { - case cty.String: - key := newQuoted(keyTokens.Tokens()) - step.key = children.Append(key) - case cty.Number: - valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) - children.AppendUnstructuredTokens(valBefore.Tokens()) - key := newNumber(valToken) - step.key = children.Append(key) - children.AppendUnstructuredTokens(valAfter.Tokens()) - } - - children.AppendUnstructuredTokens(cBrack.Tokens()) - children.AppendUnstructuredTokens(from.Tokens()) - - return before, newNode(step), after - default: - panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) - } - -} - -// writerTokens takes a sequence of tokens as produced by the main hclsyntax -// package and transforms it into an equivalent sequence of tokens using -// this package's own token model. -// -// The resulting list contains the same number of tokens and uses the same -// indices as the input, allowing the two sets of tokens to be correlated -// by index. -func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { - // Ultimately we want a slice of token _pointers_, but since we can - // predict how much memory we're going to devote to tokens we'll allocate - // it all as a single flat buffer and thus give the GC less work to do. - tokBuf := make([]Token, len(nativeTokens)) - var lastByteOffset int - for i, mainToken := range nativeTokens { - // Create a copy of the bytes so that we can mutate without - // corrupting the original token stream. - bytes := make([]byte, len(mainToken.Bytes)) - copy(bytes, mainToken.Bytes) - - tokBuf[i] = Token{ - Type: mainToken.Type, - Bytes: bytes, - - // We assume here that spaces are always ASCII spaces, since - // that's what the scanner also assumes, and thus the number - // of bytes skipped is also the number of space characters. - SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, - } - - lastByteOffset = mainToken.Range.End.Byte - } - - // Now make a slice of pointers into the previous slice. - ret := make(Tokens, len(tokBuf)) - for i := range ret { - ret[i] = &tokBuf[i] - } - - return ret -} - -// partitionTokens takes a sequence of tokens and a hcl.Range and returns -// two indices within the token sequence that correspond with the range -// boundaries, such that the slice operator could be used to produce -// three token sequences for before, within, and after respectively: -// -// start, end := partitionTokens(toks, rng) -// before := toks[:start] -// within := toks[start:end] -// after := toks[end:] -// -// This works best when the range is aligned with token boundaries (e.g. -// because it was produced in terms of the scanner's result) but if that isn't -// true then it will make a best effort that may produce strange results at -// the boundaries. -// -// Native hclsyntax tokens are used here, because they contain the necessary -// absolute position information. However, since writerTokens produces a -// correlatable sequence of writer tokens, the resulting indices can be -// used also to index into its result, allowing the partitioning of writer -// tokens to be driven by the partitioning of native tokens. -// -// The tokens are assumed to be in source order and non-overlapping, which -// will be true if the token sequence from the scanner is used directly. -func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { - // We use a linear search here because we assume that in most cases our - // target range is close to the beginning of the sequence, and the sequences - // are generally small for most reasonable files anyway. - for i := 0; ; i++ { - if i >= len(toks) { - // No tokens for the given range at all! - return len(toks), len(toks) - } - - if toks[i].Range.Start.Byte >= rng.Start.Byte { - start = i - break - } - } - - for i := start; ; i++ { - if i >= len(toks) { - // The range "hangs off" the end of the token sequence - return start, len(toks) - } - - if toks[i].Range.Start.Byte >= rng.End.Byte { - end = i // end marker is exclusive - break - } - } - - return start, end -} - -// partitionLeadCommentTokens takes a sequence of tokens that is assumed -// to immediately precede a construct that can have lead comment tokens, -// and returns the index into that sequence where the lead comments begin. -// -// Lead comments are defined as whole lines containing only comment tokens -// with no blank lines between. If no such lines are found, the returned -// index will be len(toks). -func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { - // single-line comments (which is what we're interested in here) - // consume their trailing newline, so we can just walk backwards - // until we stop seeing comment tokens. - for i := len(toks) - 1; i >= 0; i-- { - if toks[i].Type != hclsyntax.TokenComment { - return i + 1 - } - } - return 0 -} - -// partitionLineEndTokens takes a sequence of tokens that is assumed -// to immediately follow a construct that can have a line comment, and -// returns first the index where any line comments end and then second -// the index immediately after the trailing newline. -// -// Line comments are defined as comments that appear immediately after -// a construct on the same line where its significant tokens ended. -// -// Since single-line comment tokens (# and //) include the newline that -// terminates them, in the presence of these the two returned indices -// will be the same since the comment itself serves as the line end. -func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { - for i := 0; i < len(toks); i++ { - tok := toks[i] - if tok.Type != hclsyntax.TokenComment { - switch tok.Type { - case hclsyntax.TokenNewline: - return i, i + 1 - case hclsyntax.TokenEOF: - // Although this is valid, we mustn't include the EOF - // itself as our "newline" or else strange things will - // happen when we try to append new items. - return i, i - default: - // If we have well-formed input here then nothing else should be - // possible. This path should never happen, because we only try - // to extract tokens from the sequence if the parser succeeded, - // and it should catch this problem itself. - panic("malformed line trailers: expected only comments and newlines") - } - } - - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - // Newline at the end of a single-line comment serves both as - // the end of comments *and* the end of the line. - return i + 1, i + 1 - } - } - return len(toks), len(toks) -} - -// lexConfig uses the hclsyntax scanner to get a token stream and then -// rewrites it into this package's token model. -// -// Any errors produced during scanning are ignored, so the results of this -// function should be used with care. -func lexConfig(src []byte) Tokens { - mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) - return writerTokens(mainTokens) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go deleted file mode 100644 index 678a3aa457..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go +++ /dev/null @@ -1,44 +0,0 @@ -package hclwrite - -import ( - "bytes" - - "github.com/hashicorp/hcl/v2" -) - -// NewFile creates a new file object that is empty and ready to have constructs -// added t it. -func NewFile() *File { - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - file := &File{ - inTree: newInTree(), - } - file.body = file.inTree.children.Append(body) - return file -} - -// ParseConfig interprets the given source bytes into a *hclwrite.File. The -// resulting AST can be used to perform surgical edits on the source code -// before turning it back into bytes again. -func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - return parse(src, filename, start) -} - -// Format takes source code and performs simple whitespace changes to transform -// it to a canonical layout style. -// -// Format skips constructing an AST and works directly with tokens, so it -// is less expensive than formatting via the AST for situations where no other -// changes will be made. It also ignores syntax errors and can thus be applied -// to partial source code, although the result in that case may not be -// desirable. -func Format(src []byte) []byte { - tokens := lexConfig(src) - format(tokens) - buf := &bytes.Buffer{} - tokens.WriteTo(buf) - return buf.Bytes() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go deleted file mode 100644 index 7d21d09dde..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go +++ /dev/null @@ -1,122 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" - - "github.com/apparentlymart/go-textseg/v12/textseg" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// Token is a single sequence of bytes annotated with a type. It is similar -// in purpose to hclsyntax.Token, but discards the source position information -// since that is not useful in code generation. -type Token struct { - Type hclsyntax.TokenType - Bytes []byte - - // We record the number of spaces before each token so that we can - // reproduce the exact layout of the original file when we're making - // surgical changes in-place. When _new_ code is created it will always - // be in the canonical style, but we preserve layout of existing code. - SpacesBefore int -} - -// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. -// A complete token is not possible since we don't have source location -// information here, and so this method is unexported so we can be sure it will -// only be used for internal purposes where we know the range isn't important. -// -// This is primarily intended to allow us to re-use certain functionality from -// hclsyntax rather than re-implementing it against our own token type here. -func (t *Token) asHCLSyntax() hclsyntax.Token { - return hclsyntax.Token{ - Type: t.Type, - Bytes: t.Bytes, - Range: hcl.Range{ - Filename: "", - }, - } -} - -// Tokens is a flat list of tokens. -type Tokens []*Token - -func (ts Tokens) Bytes() []byte { - buf := &bytes.Buffer{} - ts.WriteTo(buf) - return buf.Bytes() -} - -func (ts Tokens) testValue() string { - return string(ts.Bytes()) -} - -// Columns returns the number of columns (grapheme clusters) the token sequence -// occupies. The result is not meaningful if there are newline or single-line -// comment tokens in the sequence. -func (ts Tokens) Columns() int { - ret := 0 - for _, token := range ts { - ret += token.SpacesBefore // spaces are always worth one column each - ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) - ret += ct - } - return ret -} - -// WriteTo takes an io.Writer and writes the bytes for each token to it, -// along with the spacing that separates each token. In other words, this -// allows serializing the tokens to a file or other such byte stream. -func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { - // We know we're going to be writing a lot of small chunks of repeated - // space characters, so we'll prepare a buffer of these that we can - // easily pass to wr.Write without any further allocation. - spaces := make([]byte, 40) - for i := range spaces { - spaces[i] = ' ' - } - - var n int64 - var err error - for _, token := range ts { - if err != nil { - return n, err - } - - for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { - thisChunk := spacesBefore - if thisChunk > len(spaces) { - thisChunk = len(spaces) - } - var thisN int - thisN, err = wr.Write(spaces[:thisChunk]) - n += int64(thisN) - if err != nil { - return n, err - } - } - - var thisN int - thisN, err = wr.Write(token.Bytes) - n += int64(thisN) - } - - return n, err -} - -func (ts Tokens) walkChildNodes(w internalWalkFunc) { - // Unstructured tokens have no child nodes -} - -func (ts Tokens) BuildTokens(to Tokens) Tokens { - return append(to, ts...) -} - -func newIdentToken(name string) *Token { - return &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(name), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/ast.go b/vendor/github.com/hashicorp/hcl/v2/json/ast.go deleted file mode 100644 index 9c580ca347..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package json - -import ( - "math/big" - - "github.com/hashicorp/hcl/v2" -) - -type node interface { - Range() hcl.Range - StartRange() hcl.Range -} - -type objectVal struct { - Attrs []*objectAttr - SrcRange hcl.Range // range of the entire object, brace-to-brace - OpenRange hcl.Range // range of the opening brace - CloseRange hcl.Range // range of the closing brace -} - -func (n *objectVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *objectVal) StartRange() hcl.Range { - return n.OpenRange -} - -type objectAttr struct { - Name string - Value node - NameRange hcl.Range // range of the name string -} - -func (n *objectAttr) Range() hcl.Range { - return n.NameRange -} - -func (n *objectAttr) StartRange() hcl.Range { - return n.NameRange -} - -type arrayVal struct { - Values []node - SrcRange hcl.Range // range of the entire object, bracket-to-bracket - OpenRange hcl.Range // range of the opening bracket -} - -func (n *arrayVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *arrayVal) StartRange() hcl.Range { - return n.OpenRange -} - -type booleanVal struct { - Value bool - SrcRange hcl.Range -} - -func (n *booleanVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *booleanVal) StartRange() hcl.Range { - return n.SrcRange -} - -type numberVal struct { - Value *big.Float - SrcRange hcl.Range -} - -func (n *numberVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *numberVal) StartRange() hcl.Range { - return n.SrcRange -} - -type stringVal struct { - Value string - SrcRange hcl.Range -} - -func (n *stringVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *stringVal) StartRange() hcl.Range { - return n.SrcRange -} - -type nullVal struct { - SrcRange hcl.Range -} - -func (n *nullVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *nullVal) StartRange() hcl.Range { - return n.SrcRange -} - -// invalidVal is used as a placeholder where a value is needed for a valid -// parse tree but the input was invalid enough to prevent one from being -// created. -type invalidVal struct { - SrcRange hcl.Range -} - -func (n invalidVal) Range() hcl.Range { - return n.SrcRange -} - -func (n invalidVal) StartRange() hcl.Range { - return n.SrcRange -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go deleted file mode 100644 index fbdd8bff50..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go +++ /dev/null @@ -1,33 +0,0 @@ -package json - -import ( - "github.com/agext/levenshtein" -) - -var keywords = []string{"false", "true", "null"} - -// keywordSuggestion tries to find a valid JSON keyword that is close to the -// given string and returns it if found. If no keyword is close enough, returns -// the empty string. -func keywordSuggestion(given string) string { - return nameSuggestion(given, keywords) -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/doc.go b/vendor/github.com/hashicorp/hcl/v2/json/doc.go deleted file mode 100644 index 84d731939f..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package json is the JSON parser for HCL. It parses JSON files and returns -// implementations of the core HCL structural interfaces in terms of the -// JSON data inside. -// -// This is not a generic JSON parser. Instead, it deals with the mapping from -// the JSON information model to the HCL information model, using a number -// of hard-coded structural conventions. -// -// In most cases applications will not import this package directly, but will -// instead access its functionality indirectly through functions in the main -// "hcl" package and in the "hclparse" package. -package json diff --git a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go b/vendor/github.com/hashicorp/hcl/v2/json/navigation.go deleted file mode 100644 index bc8a97f749..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go +++ /dev/null @@ -1,70 +0,0 @@ -package json - -import ( - "fmt" - "strings" -) - -type navigation struct { - root node -} - -// Implementation of hcled.ContextString -func (n navigation) ContextString(offset int) string { - steps := navigationStepsRev(n.root, offset) - if steps == nil { - return "" - } - - // We built our slice backwards, so we'll reverse it in-place now. - half := len(steps) / 2 // integer division - for i := 0; i < half; i++ { - steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i] - } - - ret := strings.Join(steps, "") - if len(ret) > 0 && ret[0] == '.' { - ret = ret[1:] - } - return ret -} - -func navigationStepsRev(v node, offset int) []string { - switch tv := v.(type) { - case *objectVal: - // Do any of our properties have an object that contains the target - // offset? - for _, attr := range tv.Attrs { - k := attr.Name - av := attr.Value - - switch av.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if av.Range().ContainsOffset(offset) { - return append(navigationStepsRev(av, offset), "."+k) - } - } - case *arrayVal: - // Do any of our elements contain the target offset? - for i, elem := range tv.Values { - - switch elem.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if elem.Range().ContainsOffset(offset) { - return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/parser.go b/vendor/github.com/hashicorp/hcl/v2/json/parser.go deleted file mode 100644 index 6b7420b9eb..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/parser.go +++ /dev/null @@ -1,504 +0,0 @@ -package json - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func parseFileContent(buf []byte, filename string, start hcl.Pos) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{Filename: filename, Pos: start}) - p := newPeeker(tokens) - node, diags := parseValue(p) - if len(diags) == 0 && p.Peek().Type != tokenEOF { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous data after value", - Detail: "Extra characters appear after the JSON value.", - Subject: p.Peek().Range.Ptr(), - }) - } - return node, diags -} - -func parseExpression(buf []byte, filename string, start hcl.Pos) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{Filename: filename, Pos: start}) - p := newPeeker(tokens) - node, diags := parseValue(p) - if len(diags) == 0 && p.Peek().Type != tokenEOF { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous data after value", - Detail: "Extra characters appear after the JSON value.", - Subject: p.Peek().Range.Ptr(), - }) - } - return node, diags -} - -func parseValue(p *peeker) (node, hcl.Diagnostics) { - tok := p.Peek() - - wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { - if n != nil { - return n, diags - } - return invalidVal{tok.Range}, diags - } - - switch tok.Type { - case tokenBraceO: - return wrapInvalid(parseObject(p)) - case tokenBrackO: - return wrapInvalid(parseArray(p)) - case tokenNumber: - return wrapInvalid(parseNumber(p)) - case tokenString: - return wrapInvalid(parseString(p)) - case tokenKeyword: - return wrapInvalid(parseKeyword(p)) - case tokenBraceC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing JSON value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenBrackC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing array element value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenEOF: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing value", - Detail: "The JSON data ends prematurely.", - Subject: &tok.Range, - }, - }) - default: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid start of value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - } -} - -func tokenCanStartValue(tok token) bool { - switch tok.Type { - case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: - return true - default: - return false - } -} - -func parseObject(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - attrs := []*objectAttr{} - - // recover is used to shift the peeker to what seems to be the end of - // our object, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBraceO: - open++ - case tokenBraceC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBraceC { - break Token - } - - keyNode, keyDiags := parseValue(p) - diags = diags.Extend(keyDiags) - if keyNode == nil { - return nil, diags - } - - keyStrNode, ok := keyNode.(*stringVal) - if !ok { - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object property name", - Detail: "A JSON object property name must be a string", - Subject: keyNode.StartRange().Ptr(), - }) - } - - key := keyStrNode.Value - - colon := p.Read() - if colon.Type != tokenColon { - recover(colon) - - if colon.Type == tokenBraceC || colon.Type == tokenComma { - // Catch common mistake of using braces instead of brackets - // for an object. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing object value", - Detail: "A JSON object attribute must have a value, introduced by a colon.", - Subject: &colon.Range, - }) - } - - if colon.Type == tokenEquals { - // Possible confusion with native HCL syntax. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", - Subject: &colon.Range, - }) - } - - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "A colon must appear between an object property's name and its value.", - Subject: &colon.Range, - }) - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - attrs = append(attrs, &objectAttr{ - Name: key, - Value: valNode, - NameRange: keyStrNode.SrcRange, - }) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBraceC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in object", - Detail: "JSON does not permit a trailing comma after the final property in an object.", - Subject: &comma.Range, - }) - } - continue Token - case tokenEOF: - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing brace was found for this JSON object.", - Subject: &open.Range, - }) - case tokenBrackC: - // Consume the bracket anyway, so that we don't return with the peeker - // at a strange place. - p.Read() - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched braces", - Detail: "A JSON object must be closed with a brace, not a bracket.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBraceC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each property definition in an object.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &objectVal{ - Attrs: attrs, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - CloseRange: close.Range, - }, diags -} - -func parseArray(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - vals := []node{} - - // recover is used to shift the peeker to what seems to be the end of - // our array, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBrackO: - open++ - case tokenBrackC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBrackC { - break Token - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - vals = append(vals, valNode) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBrackC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in array", - Detail: "JSON does not permit a trailing comma after the final value in an array.", - Subject: &comma.Range, - }) - } - continue Token - case tokenColon: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid array value", - Detail: "A colon is not used to introduce values in a JSON array.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenEOF: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing bracket was found for this JSON array.", - Subject: &open.Range, - }) - case tokenBraceC: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched brackets", - Detail: "A JSON array must be closed with a bracket, not a brace.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBrackC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each value in an array.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &arrayVal{ - Values: vals, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - }, diags -} - -func parseNumber(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - - // Use encoding/json to validate the number syntax. - // TODO: Do this more directly to produce better diagnostics. - var num json.Number - err := json.Unmarshal(tok.Bytes, &num) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - // We want to guarantee that we parse numbers the same way as cty (and thus - // native syntax HCL) would here, so we'll use the cty parser even though - // in most other cases we don't actually introduce cty concepts until - // decoding time. We'll unwrap the parsed float immediately afterwards, so - // the cty value is just a temporary helper. - nv, err := cty.ParseNumberVal(string(num)) - if err != nil { - // Should never happen if above passed, since JSON numbers are a subset - // of what cty can parse... - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - return &numberVal{ - Value: nv.AsBigFloat(), - SrcRange: tok.Range, - }, nil -} - -func parseString(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - var str string - err := json.Unmarshal(tok.Bytes, &str) - - if err != nil { - var errRange hcl.Range - if serr, ok := err.(*json.SyntaxError); ok { - errOfs := serr.Offset - errPos := tok.Range.Start - errPos.Byte += int(errOfs) - - // TODO: Use the byte offset to properly count unicode - // characters for the column, and mark the whole of the - // character that was wrong as part of our range. - errPos.Column += int(errOfs) - - errEndPos := errPos - errEndPos.Byte++ - errEndPos.Column++ - - errRange = hcl.Range{ - Filename: tok.Range.Filename, - Start: errPos, - End: errEndPos, - } - } else { - errRange = tok.Range - } - - var contextRange *hcl.Range - if errRange != tok.Range { - contextRange = &tok.Range - } - - // FIXME: Eventually we should parse strings directly here so - // we can produce a more useful error message in the face fo things - // such as invalid escapes, etc. - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON string", - Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), - Subject: &errRange, - Context: contextRange, - }, - } - } - - return &stringVal{ - Value: str, - SrcRange: tok.Range, - }, nil -} - -func parseKeyword(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - s := string(tok.Bytes) - - switch s { - case "true": - return &booleanVal{ - Value: true, - SrcRange: tok.Range, - }, nil - case "false": - return &booleanVal{ - Value: false, - SrcRange: tok.Range, - }, nil - case "null": - return &nullVal{ - SrcRange: tok.Range, - }, nil - case "undefined", "NaN", "Infinity": - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), - Subject: &tok.Range, - }, - } - default: - var dym string - if suggest := keywordSuggestion(s); suggest != "" { - dym = fmt.Sprintf(" Did you mean %q?", suggest) - } - - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), - Subject: &tok.Range, - }, - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go b/vendor/github.com/hashicorp/hcl/v2/json/peeker.go deleted file mode 100644 index fc7bbf5827..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go +++ /dev/null @@ -1,25 +0,0 @@ -package json - -type peeker struct { - tokens []token - pos int -} - -func newPeeker(tokens []token) *peeker { - return &peeker{ - tokens: tokens, - pos: 0, - } -} - -func (p *peeker) Peek() token { - return p.tokens[p.pos] -} - -func (p *peeker) Read() token { - ret := p.tokens[p.pos] - if ret.Type != tokenEOF { - p.pos++ - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/public.go b/vendor/github.com/hashicorp/hcl/v2/json/public.go deleted file mode 100644 index d1e4faf59b..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/public.go +++ /dev/null @@ -1,117 +0,0 @@ -package json - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/hcl/v2" -) - -// Parse attempts to parse the given buffer as JSON and, if successful, returns -// a hcl.File for the HCL configuration represented by it. -// -// This is not a generic JSON parser. Instead, it deals only with the profile -// of JSON used to express HCL configuration. -// -// The returned file is valid only if the returned diagnostics returns false -// from its HasErrors method. If HasErrors returns true, the file represents -// the subset of data that was able to be parsed, which may be none. -func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - return ParseWithStartPos(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) -} - -// ParseWithStartPos attempts to parse like json.Parse, but unlike json.Parse -// you can pass a start position of the given JSON as a hcl.Pos. -// -// In most cases json.Parse should be sufficient, but it can be useful for parsing -// a part of JSON with correct positions. -func ParseWithStartPos(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { - rootNode, diags := parseFileContent(src, filename, start) - - switch rootNode.(type) { - case *objectVal, *arrayVal: - // okay - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Root value must be object", - Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", - Subject: rootNode.StartRange().Ptr(), - }) - - // Since we've already produced an error message for this being - // invalid, we'll return an empty placeholder here so that trying to - // extract content from our root body won't produce a redundant - // error saying the same thing again in more general terms. - fakePos := hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - } - fakeRange := hcl.Range{ - Filename: filename, - Start: fakePos, - End: fakePos, - } - rootNode = &objectVal{ - Attrs: []*objectAttr{}, - SrcRange: fakeRange, - OpenRange: fakeRange, - } - } - - file := &hcl.File{ - Body: &body{ - val: rootNode, - }, - Bytes: src, - Nav: navigation{rootNode}, - } - return file, diags -} - -// ParseExpression parses the given buffer as a standalone JSON expression, -// returning it as an instance of Expression. -func ParseExpression(src []byte, filename string) (hcl.Expression, hcl.Diagnostics) { - return ParseExpressionWithStartPos(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) -} - -// ParseExpressionWithStartPos parses like json.ParseExpression, but unlike -// json.ParseExpression you can pass a start position of the given JSON -// expression as a hcl.Pos. -func ParseExpressionWithStartPos(src []byte, filename string, start hcl.Pos) (hcl.Expression, hcl.Diagnostics) { - node, diags := parseExpression(src, filename, start) - return &expression{src: node}, diags -} - -// ParseFile is a convenience wrapper around Parse that first attempts to load -// data from the given filename, passing the result to Parse if successful. -// -// If the file cannot be read, an error diagnostic with nil context is returned. -func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { - f, err := os.Open(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to open file", - Detail: fmt.Sprintf("The file %q could not be opened.", filename), - }, - } - } - defer f.Close() - - src, err := ioutil.ReadAll(f) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), - }, - } - } - - return Parse(src, filename) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go b/vendor/github.com/hashicorp/hcl/v2/json/scanner.go deleted file mode 100644 index ff78a9b587..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go +++ /dev/null @@ -1,306 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/apparentlymart/go-textseg/v12/textseg" - "github.com/hashicorp/hcl/v2" -) - -//go:generate stringer -type tokenType scanner.go -type tokenType rune - -const ( - tokenBraceO tokenType = '{' - tokenBraceC tokenType = '}' - tokenBrackO tokenType = '[' - tokenBrackC tokenType = ']' - tokenComma tokenType = ',' - tokenColon tokenType = ':' - tokenKeyword tokenType = 'K' - tokenString tokenType = 'S' - tokenNumber tokenType = 'N' - tokenEOF tokenType = '␄' - tokenInvalid tokenType = 0 - tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax -) - -type token struct { - Type tokenType - Bytes []byte - Range hcl.Range -} - -// scan returns the primary tokens for the given JSON buffer in sequence. -// -// The responsibility of this pass is to just mark the slices of the buffer -// as being of various types. It is lax in how it interprets the multi-byte -// token types keyword, string and number, preferring to capture erroneous -// extra bytes that we presume the user intended to be part of the token -// so that we can generate more helpful diagnostics in the parser. -func scan(buf []byte, start pos) []token { - var tokens []token - p := start - for { - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - buf, p = skipWhitespace(buf, p) - - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - start = p - - first := buf[0] - switch { - case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': - p.Pos.Column++ - p.Pos.Byte++ - tokens = append(tokens, token{ - Type: tokenType(first), - Bytes: buf[0:1], - Range: posRange(start, p), - }) - buf = buf[1:] - case first == '"': - var tokBuf []byte - tokBuf, buf, p = scanString(buf, p) - tokens = append(tokens, token{ - Type: tokenString, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartNumber(first): - var tokBuf []byte - tokBuf, buf, p = scanNumber(buf, p) - tokens = append(tokens, token{ - Type: tokenNumber, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartKeyword(first): - var tokBuf []byte - tokBuf, buf, p = scanKeyword(buf, p) - tokens = append(tokens, token{ - Type: tokenKeyword, - Bytes: tokBuf, - Range: posRange(start, p), - }) - default: - tokens = append(tokens, token{ - Type: tokenInvalid, - Bytes: buf[:1], - Range: start.Range(1, 1), - }) - // If we've encountered an invalid then we might as well stop - // scanning since the parser won't proceed beyond this point. - // We insert a synthetic EOF marker here to match the expectations - // of consumers of this data structure. - p.Pos.Column++ - p.Pos.Byte++ - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - } -} - -func byteCanStartNumber(b byte) bool { - switch b { - // We are slightly more tolerant than JSON requires here since we - // expect the parser will make a stricter interpretation of the - // number bytes, but we specifically don't allow 'e' or 'E' here - // since we want the scanner to treat that as the start of an - // invalid keyword instead, to produce more intelligible error messages. - case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return true - default: - return false - } -} - -func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't check that the sequence of digit-ish bytes is - // in a valid order. The parser must do this when decoding a number - // token. - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func byteCanStartKeyword(b byte) bool { - switch { - // We allow any sequence of alphabetical characters here, even though - // JSON is more constrained, so that we can collect what we presume - // the user intended to be a single keyword and then check its validity - // in the parser, where we can generate better diagnostics. - // So e.g. we want to be able to say: - // unrecognized keyword "True". Did you mean "true"? - case isAlphabetical(b): - return true - default: - return false - } -} - -func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - b := buf[i] - switch { - case isAlphabetical(b) || b == '_': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func scanString(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't validate correct use of escapes, etc. It pays - // attention to escapes only for the purpose of identifying the closing - // quote character. It's the parser's responsibility to do proper - // validation. - // - // The scanner also doesn't specifically detect unterminated string - // literals, though they can be identified in the parser by checking if - // the final byte in a string token is the double-quote character. - - // Skip the opening quote symbol - i := 1 - p := start - p.Pos.Byte++ - p.Pos.Column++ - escaping := false -Byte: - for i < len(buf) { - b := buf[i] - - switch { - case b == '\\': - escaping = !escaping - p.Pos.Byte++ - p.Pos.Column++ - i++ - case b == '"': - p.Pos.Byte++ - p.Pos.Column++ - i++ - if !escaping { - break Byte - } - escaping = false - case b < 32: - break Byte - default: - // Advance by one grapheme cluster, so that we consider each - // grapheme to be a "column". - // Ignoring error because this scanner cannot produce errors. - advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) - - p.Pos.Byte += advance - p.Pos.Column++ - i += advance - - escaping = false - } - } - return buf[:i], buf[i:], p -} - -func skipWhitespace(buf []byte, start pos) ([]byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case ' ': - p.Pos.Byte++ - p.Pos.Column++ - case '\n': - p.Pos.Byte++ - p.Pos.Column = 1 - p.Pos.Line++ - case '\r': - // For the purpose of line/column counting we consider a - // carriage return to take up no space, assuming that it will - // be paired up with a newline (on Windows, for example) that - // will account for both of them. - p.Pos.Byte++ - case '\t': - // We arbitrarily count a tab as if it were two spaces, because - // we need to choose _some_ number here. This means any system - // that renders code on-screen with markers must itself treat - // tabs as a pair of spaces for rendering purposes, or instead - // use the byte offset and back into its own column position. - p.Pos.Byte++ - p.Pos.Column += 2 - default: - break Byte - } - } - return buf[i:], p -} - -type pos struct { - Filename string - Pos hcl.Pos -} - -func (p *pos) Range(byteLen, charLen int) hcl.Range { - start := p.Pos - end := p.Pos - end.Byte += byteLen - end.Column += charLen - return hcl.Range{ - Filename: p.Filename, - Start: start, - End: end, - } -} - -func posRange(start, end pos) hcl.Range { - return hcl.Range{ - Filename: start.Filename, - Start: start.Pos, - End: end.Pos, - } -} - -func (t token) GoString() string { - return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) -} - -func isAlphabetical(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/spec.md b/vendor/github.com/hashicorp/hcl/v2/json/spec.md deleted file mode 100644 index dac5729d48..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/spec.md +++ /dev/null @@ -1,405 +0,0 @@ -# HCL JSON Syntax Specification - -This is the specification for the JSON serialization for hcl. HCL is a system -for defining configuration languages for applications. The HCL information -model is designed to support multiple concrete syntaxes for configuration, -and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) -by being easy to machine-generate, whereas the native syntax is oriented -towards human authoring and maintenance - -This syntax is defined in terms of JSON as defined in -[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON -grammar as-is, and merely defines a specific methodology for interpreting -JSON constructs into HCL structural elements and expressions. - -This mapping is defined such that valid JSON-serialized HCL input can be -_produced_ using standard JSON implementations in various programming languages. -_Parsing_ such JSON has some additional constraints not beyond what is normally -supported by JSON parsers, so a specialized parser may be required that -is able to: - -- Preserve the relative ordering of properties defined in an object. -- Preserve multiple definitions of the same property name. -- Preserve numeric values to the precision required by the number type - in [the HCL syntax-agnostic information model](../spec.md). -- Retain source location information for parsed tokens/constructs in order - to produce good error messages. - -## Structural Elements - -[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an -abstract container for attribute definitions and child blocks. A body is -represented in JSON as either a single JSON object or a JSON array of objects. - -Body processing is in terms of JSON object properties, visited in the order -they appear in the input. Where a body is represented by a single JSON object, -the properties of that object are visited in order. Where a body is -represented by a JSON array, each of its elements are visited in order and -each element has its properties visited in order. If any element of the array -is not a JSON object then the input is erroneous. - -When a body is being processed in the _dynamic attributes_ mode, the allowance -of a JSON array in the previous paragraph does not apply and instead a single -JSON object is always required. - -As defined in the language-agnostic model, body processing is in terms -of a schema which provides context for interpreting the body's content. For -JSON bodies, the schema is crucial to allow differentiation of attribute -definitions and block definitions, both of which are represented via object -properties. - -The special property name `"//"`, when used in an object representing a HCL -body, is parsed and ignored. A property with this name can be used to -include human-readable comments. (This special property name is _not_ -processed in this way for any _other_ HCL constructs that are represented as -JSON objects.) - -### Attributes - -Where the given schema describes an attribute with a given name, the object -property with the matching name — if present — serves as the attribute's -definition. - -When a body is being processed in the _dynamic attributes_ mode, each object -property serves as an attribute definition for the attribute whose name -matches the property name. - -The value of an attribute definition property is interpreted as an _expression_, -as described in a later section. - -Given a schema that calls for an attribute named "foo", a JSON object like -the following provides a definition for that attribute: - -```json -{ - "foo": "bar baz" -} -``` - -### Blocks - -Where the given schema describes a block with a given type name, each object -property with the matching name serves as a definition of zero or more blocks -of that type. - -Processing of child blocks is in terms of nested JSON objects and arrays. -If the schema defines one or more _labels_ for the block type, a nested JSON -object or JSON array of objects is required for each labelling level. These -are flattened to a single ordered sequence of object properties using the -same algorithm as for body content as defined above. Each object property -serves as a label value at the corresponding level. - -After any labelling levels, the next nested value is either a JSON object -representing a single block body, or a JSON array of JSON objects that each -represent a single block body. Use of an array accommodates the definition -of multiple blocks that have identical type and labels. - -Given a schema that calls for a block type named "foo" with no labels, the -following JSON objects are all valid definitions of zero or more blocks of this -type: - -```json -{ - "foo": { - "child_attr": "baz" - } -} -``` - -```json -{ - "foo": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] -} -``` - -```json -{ - "foo": [] -} -``` - -The first of these defines a single child block of type "foo". The second -defines _two_ such blocks. The final example shows a degenerate definition -of zero blocks, though generators should prefer to omit the property entirely -in this scenario. - -Given a schema that calls for a block type named "foo" with _two_ labels, the -extra label levels must be represented as objects or arrays of objects as in -the following examples: - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": { - "child_attr": "baz" - } - } - } -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -```json -{ - "foo": [ - { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - } - }, - { - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } - ] -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -Arrays can be introduced at either the label definition or block body -definition levels to define multiple definitions of the same block type -or labels while preserving order. - -A JSON HCL parser _must_ support duplicate definitions of the same property -name within a single object, preserving all of them and the relative ordering -between them. The array-based forms are also required so that JSON HCL -configurations can be produced with JSON producing libraries that are not -able to preserve property definition order and multiple definitions of -the same property. - -## Expressions - -JSON lacks a native expression syntax, so the HCL JSON syntax instead defines -a mapping for each of the JSON value types, including a special mapping for -strings that allows optional use of arbitrary expressions. - -### Objects - -When interpreted as an expression, a JSON object represents a value of a HCL -object type. - -Each property of the JSON object represents an attribute of the HCL object type. -The property name string given in the JSON input is interpreted as a string -expression as described below, and its result is converted to string as defined -by the syntax-agnostic information model. If such a conversion is not possible, -an error is produced and evaluation fails. - -An instance of the constructed object type is then created, whose values -are interpreted by again recursively applying the mapping rules defined in -this section to each of the property values. - -If any evaluated property name strings produce null values, an error is -produced and evaluation fails. If any produce _unknown_ values, the _entire -object's_ result is an unknown value of the dynamic pseudo-type, signalling -that the type of the object cannot be determined. - -It is an error to define the same property name multiple times within a single -JSON object interpreted as an expression. In full expression mode, this -constraint applies to the name expression results after conversion to string, -rather than the raw string that may contain interpolation expressions. - -### Arrays - -When interpreted as an expression, a JSON array represents a value of a HCL -tuple type. - -Each element of the JSON array represents an element of the HCL tuple type. -The tuple type is constructed by enumerating the JSON array elements, creating -for each an element whose type is the result of recursively applying the -expression mapping rules. Correspondence is preserved between the array element -indices and the tuple element indices. - -An instance of the constructed tuple type is then created, whose values are -interpreted by again recursively applying the mapping rules defined in this -section. - -### Numbers - -When interpreted as an expression, a JSON number represents a HCL number value. - -HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must -be able to translate exactly the value given to a number of corresponding -precision, within the constraints set by the HCL syntax-agnostic information -model. - -In practice, off-the-shelf JSON serializers often do not support customizing the -processing of numbers, and instead force processing as 32-bit or 64-bit -floating point values. - -A _producer_ of JSON HCL that uses such a serializer can provide numeric values -as JSON strings where they have precision too great for representation in the -serializer's chosen numeric type in situations where the result will be -converted to number (using the standard conversion rules) by a calling -application. - -Alternatively, for expressions that are evaluated in full expression mode an -embedded template interpolation can be used to faithfully represent a number, -such as `"${1e150}"`, which will then be evaluated by the underlying HCL native -syntax expression evaluator. - -### Boolean Values - -The JSON boolean values `true` and `false`, when interpreted as expressions, -represent the corresponding HCL boolean values. - -### The Null Value - -The JSON value `null`, when interpreted as an expression, represents a -HCL null value of the dynamic pseudo-type. - -### Strings - -When interpreted as an expression, a JSON string may be interpreted in one of -two ways depending on the evaluation mode. - -If evaluating in literal-only mode (as defined by the syntax-agnostic -information model) the literal string is intepreted directly as a HCL string -value, by directly using the exact sequence of unicode characters represented. -Template interpolations and directives MUST NOT be processed in this mode, -allowing any characters that appear as introduction sequences to pass through -literally: - -```json -"Hello world! Template sequences like ${ are not intepreted here." -``` - -When evaluating in full expression mode (again, as defined by the syntax- -agnostic information model) the literal string is instead interpreted as a -_standalone template_ in the HCL Native Syntax. The expression evaluation -result is then the direct result of evaluating that template with the current -variable scope and function table. - -```json -"Hello, ${name}! Template sequences are interpreted in full expression mode." -``` - -In particular the _Template Interpolation Unwrapping_ requirement from the -HCL native syntax specification must be implemented, allowing the use of -single-interpolation templates to represent expressions that would not -otherwise be representable in JSON, such as the following example where -the result must be a number, rather than a string representation of a number: - -```json -"${ a + b }" -``` - -## Static Analysis - -The HCL static analysis operations are implemented for JSON values that -represent expressions, as described in the following sections. - -Due to the limited expressive power of the JSON syntax alone, use of these -static analyses functions rather than normal expression evaluation is used -as additional context for how a JSON value is to be interpreted, which means -that static analyses can result in a different interpretation of a given -expression than normal evaluation. - -### Static List - -An expression interpreted as a static list must be a JSON array. Each of the -values in the array is interpreted as an expression and returned. - -### Static Map - -An expression interpreted as a static map must be a JSON object. Each of the -key/value pairs in the object is presented as a pair of expressions. Since -object property names are always strings, evaluating the key expression with -a non-`nil` evaluation context will evaluate any template sequences given -in the property name. - -### Static Call - -An expression interpreted as a static call must be a string. The content of -the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then the static call analysis is delegated to -that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. - -### Static Traversal - -An expression interpreted as a static traversal must be a string. The content -of the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then static traversal analysis is delegated -to that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. diff --git a/vendor/github.com/hashicorp/hcl/v2/json/structure.go b/vendor/github.com/hashicorp/hcl/v2/json/structure.go deleted file mode 100644 index 76c9d73999..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/structure.go +++ /dev/null @@ -1,637 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// body is the implementation of "Body" used for files processed with the JSON -// parser. -type body struct { - val node - - // If non-nil, the keys of this map cause the corresponding attributes to - // be treated as non-existing. This is used when Body.PartialContent is - // called, to produce the "remaining content" Body. - hiddenAttrs map[string]struct{} -} - -// expression is the implementation of "Expression" used for files processed -// with the JSON parser. -type expression struct { - src node -} - -func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, newBody, diags := b.PartialContent(schema) - - hiddenAttrs := newBody.(*body).hiddenAttrs - - var nameSuggestions []string - for _, attrS := range schema.Attributes { - if _, ok := hiddenAttrs[attrS.Name]; !ok { - // Only suggest an attribute name if we didn't use it already. - nameSuggestions = append(nameSuggestions, attrS.Name) - } - } - for _, blockS := range schema.Blocks { - // Blocks can appear multiple times, so we'll suggest their type - // names regardless of whether they've already been used. - nameSuggestions = append(nameSuggestions, blockS.Type) - } - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - for _, attr := range jsonAttrs { - k := attr.Name - if k == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, ok := hiddenAttrs[k]; !ok { - suggestion := nameSuggestion(k, nameSuggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous JSON object property", - Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), - Subject: &attr.NameRange, - Context: attr.Range().Ptr(), - }) - } - } - - return content, diags -} - -func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - usedNames := map[string]struct{}{} - if b.hiddenAttrs != nil { - for k := range b.hiddenAttrs { - usedNames[k] = struct{}{} - } - } - - content := &hcl.BodyContent{ - Attributes: map[string]*hcl.Attribute{}, - Blocks: nil, - - MissingItemRange: b.MissingItemRange(), - } - - // Create some more convenient data structures for our work below. - attrSchemas := map[string]hcl.AttributeSchema{} - blockSchemas := map[string]hcl.BlockHeaderSchema{} - for _, attrS := range schema.Attributes { - attrSchemas[attrS.Name] = attrS - } - for _, blockS := range schema.Blocks { - blockSchemas[blockS.Type] = blockS - } - - for _, jsonAttr := range jsonAttrs { - attrName := jsonAttr.Name - if _, used := b.hiddenAttrs[attrName]; used { - continue - } - - if attrS, defined := attrSchemas[attrName]; defined { - if existing, exists := content.Attributes[attrName]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate argument", - Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), - Subject: &jsonAttr.NameRange, - Context: jsonAttr.Range().Ptr(), - }) - continue - } - - content.Attributes[attrS.Name] = &hcl.Attribute{ - Name: attrS.Name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - usedNames[attrName] = struct{}{} - - } else if blockS, defined := blockSchemas[attrName]; defined { - bv := jsonAttr.Value - blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) - diags = append(diags, blockDiags...) - usedNames[attrName] = struct{}{} - } - - // We ignore anything that isn't defined because that's the - // PartialContent contract. The Content method will catch leftovers. - } - - // Make sure we got all the required attributes. - for _, attrS := range schema.Attributes { - if !attrS.Required { - continue - } - if _, defined := content.Attributes[attrS.Name]; !defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required argument", - Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), - Subject: b.MissingItemRange().Ptr(), - }) - } - } - - unusedBody := &body{ - val: b.val, - hiddenAttrs: usedNames, - } - - return content, unusedBody, diags -} - -// JustAttributes for JSON bodies interprets all properties of the wrapped -// JSON object as attributes and returns them. -func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - attrs := make(map[string]*hcl.Attribute) - - obj, ok := b.val.(*objectVal) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, setting the arguments for this block.", - Subject: b.val.StartRange().Ptr(), - }) - return attrs, diags - } - - for _, jsonAttr := range obj.Attrs { - name := jsonAttr.Name - if name == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - - if existing, exists := attrs[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate attribute definition", - Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), - Subject: &jsonAttr.NameRange, - }) - continue - } - - attrs[name] = &hcl.Attribute{ - Name: name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - } - - // No diagnostics possible here, since the parser already took care of - // finding duplicates and every JSON value can be a valid attribute value. - return attrs, diags -} - -func (b *body) MissingItemRange() hcl.Range { - switch tv := b.val.(type) { - case *objectVal: - return tv.CloseRange - case *arrayVal: - return tv.OpenRange - default: - // Should not happen in correct operation, but might show up if the - // input is invalid and we are producing partial results. - return tv.StartRange() - } -} - -func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { - if len(labelsLeft) > 0 { - labelName := labelsLeft[0] - jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) - diags = append(diags, attrDiags...) - - if len(jsonAttrs) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing block label", - Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), - Subject: v.StartRange().Ptr(), - }) - return - } - labelsUsed := append(labelsUsed, "") - labelRanges := append(labelRanges, hcl.Range{}) - for _, p := range jsonAttrs { - pk := p.Name - labelsUsed[len(labelsUsed)-1] = pk - labelRanges[len(labelRanges)-1] = p.NameRange - diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) - } - return - } - - // By the time we get here, we've peeled off all the labels and we're ready - // to deal with the block's actual content. - - // need to copy the label slices because their underlying arrays will - // continue to be mutated after we return. - labels := make([]string, len(labelsUsed)) - copy(labels, labelsUsed) - labelR := make([]hcl.Range, len(labelRanges)) - copy(labelR, labelRanges) - - switch tv := v.(type) { - case *nullVal: - // There is no block content, e.g the value is null. - return - case *objectVal: - // Single instance of the block - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: tv, - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - case *arrayVal: - // Multiple instances of the block - for _, av := range tv.Values { - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: av, // might be mistyped; we'll find out when content is requested for this body - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), - Subject: v.StartRange().Ptr(), - }) - } - return -} - -// collectDeepAttrs takes either a single object or an array of objects and -// flattens it into a list of object attributes, collecting attributes from -// all of the objects in a given array. -// -// Ordering is preserved, so a list of objects that each have one property -// will result in those properties being returned in the same order as the -// objects appeared in the array. -// -// This is appropriate for use only for objects representing bodies or labels -// within a block. -// -// The labelName argument, if non-null, is used to tailor returned error -// messages to refer to block labels rather than attributes and child blocks. -// It has no other effect. -func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { - var diags hcl.Diagnostics - var attrs []*objectAttr - - switch tv := v.(type) { - case *nullVal: - // If a value is null, then we don't return any attributes or return an error. - - case *objectVal: - attrs = append(attrs, tv.Attrs...) - - case *arrayVal: - for _, ev := range tv.Values { - switch tev := ev.(type) { - case *objectVal: - attrs = append(attrs, tev.Attrs...) - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), - Subject: ev.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, to define arguments and child blocks.", - Subject: ev.StartRange().Ptr(), - }) - } - } - } - - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), - Subject: v.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", - Subject: v.StartRange().Ptr(), - }) - } - } - - return attrs, diags -} - -func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - switch v := e.src.(type) { - case *stringVal: - if ctx != nil { - // Parse string contents as a HCL native language expression. - // We only do this if we have a context, so passing a nil context - // is how the caller specifies that interpolations are not allowed - // and that the string should just be returned verbatim. - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, evalDiags := expr.Value(ctx) - diags = append(diags, evalDiags...) - return val, diags - } - - return cty.StringVal(v.Value), nil - case *numberVal: - return cty.NumberVal(v.Value), nil - case *booleanVal: - return cty.BoolVal(v.Value), nil - case *arrayVal: - var diags hcl.Diagnostics - vals := []cty.Value{} - for _, jsonVal := range v.Values { - val, valDiags := (&expression{src: jsonVal}).Value(ctx) - vals = append(vals, val) - diags = append(diags, valDiags...) - } - return cty.TupleVal(vals), diags - case *objectVal: - var diags hcl.Diagnostics - attrs := map[string]cty.Value{} - attrRanges := map[string]hcl.Range{} - known := true - for _, jsonAttr := range v.Attrs { - // In this one context we allow keys to contain interpolation - // expressions too, assuming we're evaluating in interpolation - // mode. This achieves parity with the native syntax where - // object expressions can have dynamic keys, while block contents - // may not. - name, nameDiags := (&expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}).Value(ctx) - valExpr := &expression{src: jsonAttr.Value} - val, valDiags := valExpr.Value(ctx) - diags = append(diags, nameDiags...) - diags = append(diags, valDiags...) - - var err error - name, err = convert.Convert(name, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if name.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: "Cannot use null value as an object key.", - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if !name.IsKnown() { - // This is a bit of a weird case, since our usual rules require - // us to tolerate unknowns and just represent the result as - // best we can but if we don't know the key then we can't - // know the type of our object at all, and thus we must turn - // the whole thing into cty.DynamicVal. This is consistent with - // how this situation is handled in the native syntax. - // We'll keep iterating so we can collect other errors in - // subsequent attributes. - known = false - continue - } - nameStr := name.AsString() - if _, defined := attrs[nameStr]; defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate object attribute", - Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), - Subject: &jsonAttr.NameRange, - Expression: e, - EvalContext: ctx, - }) - continue - } - attrs[nameStr] = val - attrRanges[nameStr] = jsonAttr.NameRange - } - if !known { - // We encountered an unknown key somewhere along the way, so - // we can't know what our type will eventually be. - return cty.DynamicVal, diags - } - return cty.ObjectVal(attrs), diags - case *nullVal: - return cty.NullVal(cty.DynamicPseudoType), nil - default: - // Default to DynamicVal so that ASTs containing invalid nodes can - // still be partially-evaluated. - return cty.DynamicVal, nil - } -} - -func (e *expression) Variables() []hcl.Traversal { - var vars []hcl.Traversal - - switch v := e.src.(type) { - case *stringVal: - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return vars - } - return expr.Variables() - - case *arrayVal: - for _, jsonVal := range v.Values { - vars = append(vars, (&expression{src: jsonVal}).Variables()...) - } - case *objectVal: - for _, jsonAttr := range v.Attrs { - keyExpr := &stringVal{ // we're going to treat key as an expression in this context - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - } - vars = append(vars, (&expression{src: keyExpr}).Variables()...) - vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) - } - } - - return vars -} - -func (e *expression) Range() hcl.Range { - return e.src.Range() -} - -func (e *expression) StartRange() hcl.Range { - return e.src.StartRange() -} - -// Implementation for hcl.AbsTraversalForExpr. -func (e *expression) AsTraversal() hcl.Traversal { - // In JSON-based syntax a traversal is given as a string containing - // traversal syntax as defined by hclsyntax.ParseTraversalAbs. - - switch v := e.src.(type) { - case *stringVal: - traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - return traversal - default: - return nil - } -} - -// Implementation for hcl.ExprCall. -func (e *expression) ExprCall() *hcl.StaticCall { - // In JSON-based syntax a static call is given as a string containing - // an expression in the native syntax that also supports ExprCall. - - switch v := e.src.(type) { - case *stringVal: - expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return nil - } - - return call - default: - return nil - } -} - -// Implementation for hcl.ExprList. -func (e *expression) ExprList() []hcl.Expression { - switch v := e.src.(type) { - case *arrayVal: - ret := make([]hcl.Expression, len(v.Values)) - for i, node := range v.Values { - ret[i] = &expression{src: node} - } - return ret - default: - return nil - } -} - -// Implementation for hcl.ExprMap. -func (e *expression) ExprMap() []hcl.KeyValuePair { - switch v := e.src.(type) { - case *objectVal: - ret := make([]hcl.KeyValuePair, len(v.Attrs)) - for i, jsonAttr := range v.Attrs { - ret[i] = hcl.KeyValuePair{ - Key: &expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}, - Value: &expression{src: jsonAttr.Value}, - } - } - return ret - default: - return nil - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go deleted file mode 100644 index bbcce5b306..0000000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. - -package json - -import "strconv" - -const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" - -var _tokenType_map = map[tokenType]string{ - 0: _tokenType_name[0:12], - 44: _tokenType_name[12:22], - 58: _tokenType_name[22:32], - 61: _tokenType_name[32:43], - 75: _tokenType_name[43:55], - 78: _tokenType_name[55:66], - 83: _tokenType_name[66:77], - 91: _tokenType_name[77:88], - 93: _tokenType_name[88:99], - 123: _tokenType_name[99:110], - 125: _tokenType_name[110:121], - 9220: _tokenType_name[121:129], -} - -func (i tokenType) String() string { - if str, ok := _tokenType_map[i]; ok { - return str - } - return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/github.com/hashicorp/hcl/v2/merged.go b/vendor/github.com/hashicorp/hcl/v2/merged.go index 96e62a58d4..27fd1ed5eb 100644 --- a/vendor/github.com/hashicorp/hcl/v2/merged.go +++ b/vendor/github.com/hashicorp/hcl/v2/merged.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/ops.go b/vendor/github.com/hashicorp/hcl/v2/ops.go index f69de5b933..bdf23614d6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ops.go +++ b/vendor/github.com/hashicorp/hcl/v2/ops.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( @@ -21,6 +24,8 @@ import ( // though nil can be provided if the calling application is going to // ignore the subject of the returned diagnostics anyway. func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + const invalidIndex = "Invalid index" + if collection.IsNull() { return cty.DynamicVal, Diagnostics{ { @@ -35,7 +40,7 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", + Summary: invalidIndex, Detail: "Can't use a null value as an indexing key.", Subject: srcRange, }, @@ -66,7 +71,7 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", + Summary: invalidIndex, Detail: fmt.Sprintf( "The given key does not identify an element in this collection value: %s.", keyErr.Error(), @@ -88,31 +93,84 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) } } if has.False() { - // We have a more specialized error message for the situation of - // using a fractional number to index into a sequence, because - // that will tend to happen if the user is trying to use division - // to calculate an index and not realizing that HCL does float - // division rather than integer division. if (ty.IsListType() || ty.IsTupleType()) && key.Type().Equals(cty.Number) { if key.IsKnown() && !key.IsNull() { + // NOTE: we don't know what any marks might've represented + // up at the calling application layer, so we must avoid + // showing the literal number value in these error messages + // in case the mark represents something important, such as + // a value being "sensitive". + key, _ := key.Unmark() bf := key.AsBigFloat() if _, acc := bf.Int(nil); acc != big.Exact { + // We have a more specialized error message for the + // situation of using a fractional number to index into + // a sequence, because that will tend to happen if the + // user is trying to use division to calculate an index + // and not realizing that HCL does float division + // rather than integer division. + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: invalidIndex, + Detail: "The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index has a fractional part.", + Subject: srcRange, + }, + } + } + + if bf.Sign() < 0 { + // Some other languages allow negative indices to + // select "backwards" from the end of the sequence, + // but HCL doesn't do that in order to give better + // feedback if a dynamic index is calculated + // incorrectly. return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", - Detail: fmt.Sprintf("The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index (%g) has a fractional part.", bf), + Summary: invalidIndex, + Detail: "The given key does not identify an element in this collection value: a negative number is not a valid index for a sequence.", Subject: srcRange, }, } } + if lenVal := collection.Length(); lenVal.IsKnown() && !lenVal.IsMarked() { + // Length always returns a number, and we already + // checked that it's a known number, so this is safe. + lenBF := lenVal.AsBigFloat() + var result big.Float + result.Sub(bf, lenBF) + if result.Sign() < 1 { + if lenBF.Sign() == 0 { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: invalidIndex, + Detail: "The given key does not identify an element in this collection value: the collection has no elements.", + Subject: srcRange, + }, + } + } else { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: invalidIndex, + Detail: "The given key does not identify an element in this collection value: the given index is greater than or equal to the length of the collection.", + Subject: srcRange, + }, + } + } + } + } } } + // If this is not one of the special situations we handled above + // then we'll fall back on a very generic message. return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", + Summary: invalidIndex, Detail: "The given key does not identify an element in this collection value.", Subject: srcRange, }, @@ -122,12 +180,13 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) return collection.Index(key), nil case ty.IsObjectType(): + wasNumber := key.Type() == cty.Number key, keyErr := convert.Convert(key, cty.String) if keyErr != nil { return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", + Summary: invalidIndex, Detail: fmt.Sprintf( "The given key does not identify an element in this collection value: %s.", keyErr.Error(), @@ -143,14 +202,24 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) return cty.DynamicVal, nil } + key, _ = key.Unmark() attrName := key.AsString() if !ty.HasAttribute(attrName) { + var suggestion string + if wasNumber { + // We note this only as an addendum to an error we would've + // already returned anyway, because it is valid (albeit weird) + // to have an attribute whose name is just decimal digits + // and then access that attribute using a number whose + // decimal representation is the same digits. + suggestion = " An object only supports looking up attributes by name, not by numeric index." + } return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", - Detail: "The given key does not identify an element in this collection value.", + Summary: invalidIndex, + Detail: fmt.Sprintf("The given key does not identify an element in this collection value.%s", suggestion), Subject: srcRange, }, } @@ -158,11 +227,21 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) return collection.GetAttr(attrName), nil + case ty.IsSetType(): + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: invalidIndex, + Detail: "Elements of a set are identified only by their value and don't have any separate index or key to select with, so it's only possible to perform operations across all elements of the set.", + Subject: srcRange, + }, + } + default: return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Invalid index", + Summary: invalidIndex, Detail: "This value does not have any indices.", Subject: srcRange, }, @@ -195,6 +274,8 @@ func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagno } } + const unsupportedAttr = "Unsupported attribute" + ty := obj.Type() switch { case ty.IsObjectType(): @@ -202,7 +283,7 @@ func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagno return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Unsupported attribute", + Summary: unsupportedAttr, Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName), Subject: srcRange, }, @@ -239,11 +320,69 @@ func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagno return obj.Index(idx), nil case ty == cty.DynamicPseudoType: return cty.DynamicVal, nil + case ty.IsListType() && ty.ElementType().IsObjectType(): + // It seems a common mistake to try to access attributes on a whole + // list of objects rather than on a specific individual element, so + // we have some extra hints for that case. + + switch { + case ty.ElementType().HasAttribute(attrName): + // This is a very strong indication that the user mistook the list + // of objects for a single object, so we can be a little more + // direct in our suggestion here. + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: unsupportedAttr, + Detail: fmt.Sprintf("Can't access attributes on a list of objects. Did you mean to access attribute %q for a specific element of the list, or across all elements of the list?", attrName), + Subject: srcRange, + }, + } + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: unsupportedAttr, + Detail: "Can't access attributes on a list of objects. Did you mean to access an attribute for a specific element of the list, or across all elements of the list?", + Subject: srcRange, + }, + } + } + + case ty.IsSetType() && ty.ElementType().IsObjectType(): + // This is similar to the previous case, but we can't give such a + // direct suggestion because there is no mechanism to select a single + // item from a set. + // We could potentially suggest using a for expression or splat + // operator here, but we typically don't get into syntax specifics + // in hcl.GetAttr suggestions because it's a general function used in + // various other situations, such as in application-specific operations + // that might have a more constraint set of alternative approaches. + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: unsupportedAttr, + Detail: "Can't access attributes on a set of objects. Did you mean to access an attribute across all elements of the set?", + Subject: srcRange, + }, + } + + case ty.IsPrimitiveType(): + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: unsupportedAttr, + Detail: fmt.Sprintf("Can't access attributes on a primitive-typed value (%s).", ty.FriendlyName()), + Subject: srcRange, + }, + } + default: return cty.DynamicVal, Diagnostics{ { Severity: DiagError, - Summary: "Unsupported attribute", + Summary: unsupportedAttr, Detail: "This value does not have any attributes.", Subject: srcRange, }, diff --git a/vendor/github.com/hashicorp/hcl/v2/pos.go b/vendor/github.com/hashicorp/hcl/v2/pos.go index 06db8bfbd4..1bbbce87e1 100644 --- a/vendor/github.com/hashicorp/hcl/v2/pos.go +++ b/vendor/github.com/hashicorp/hcl/v2/pos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import "fmt" diff --git a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go index ef0aa1015b..2232f374af 100644 --- a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go +++ b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( "bufio" "bytes" - "github.com/apparentlymart/go-textseg/v12/textseg" + "github.com/apparentlymart/go-textseg/v15/textseg" ) // RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc diff --git a/vendor/github.com/hashicorp/hcl/v2/schema.go b/vendor/github.com/hashicorp/hcl/v2/schema.go index 891257acb2..d4e339cb26 100644 --- a/vendor/github.com/hashicorp/hcl/v2/schema.go +++ b/vendor/github.com/hashicorp/hcl/v2/schema.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // BlockHeaderSchema represents the shape of a block header, and is diff --git a/vendor/github.com/hashicorp/hcl/v2/static_expr.go b/vendor/github.com/hashicorp/hcl/v2/static_expr.go index 98ada87b62..e14d7f890a 100644 --- a/vendor/github.com/hashicorp/hcl/v2/static_expr.go +++ b/vendor/github.com/hashicorp/hcl/v2/static_expr.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/structure.go b/vendor/github.com/hashicorp/hcl/v2/structure.go index aab09457d7..2bdf579d1d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/structure.go +++ b/vendor/github.com/hashicorp/hcl/v2/structure.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go index 8521814e5f..62aba139f0 100644 --- a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go +++ b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ----------------------------------------------------------------------------- diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal.go b/vendor/github.com/hashicorp/hcl/v2/traversal.go index d710197008..540dde7ec7 100644 --- a/vendor/github.com/hashicorp/hcl/v2/traversal.go +++ b/vendor/github.com/hashicorp/hcl/v2/traversal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go index f69d5fe9b2..87eeb15997 100644 --- a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go +++ b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // AbsTraversalForExpr attempts to interpret the given expression as @@ -13,7 +16,7 @@ package hcl // // In most cases the calling application is interested in the value // that results from an expression, but in rarer cases the application -// needs to see the the name of the variable and subsequent +// needs to see the name of the variable and subsequent // attributes/indexes itself, for example to allow users to give references // to the variables themselves rather than to their values. An implementer // of this function should at least support attribute and index steps. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE deleted file mode 100644 index 82b4de97c7..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go deleted file mode 100644 index d9d2762583..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go +++ /dev/null @@ -1,138 +0,0 @@ -package tfconfig - -import ( - "fmt" - - legacyhclparser "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/v2" -) - -// Diagnostic describes a problem (error or warning) encountered during -// configuration loading. -type Diagnostic struct { - Severity DiagSeverity `json:"severity"` - Summary string `json:"summary"` - Detail string `json:"detail,omitempty"` - - // Pos is not populated for all diagnostics, but when populated should - // indicate a particular line that the described problem relates to. - Pos *SourcePos `json:"pos,omitempty"` -} - -// Diagnostics represents a sequence of diagnostics. This is the type that -// should be returned from a function that might generate diagnostics. -type Diagnostics []Diagnostic - -// HasErrors returns true if there is at least one Diagnostic of severity -// DiagError in the receiever. -// -// If a function returns a Diagnostics without errors then the result can -// be assumed to be complete within the "best effort" constraints of this -// library. If errors are present then the caller may wish to employ more -// caution in relying on the result. -func (diags Diagnostics) HasErrors() bool { - for _, diag := range diags { - if diag.Severity == DiagError { - return true - } - } - return false -} - -func (diags Diagnostics) Error() string { - switch len(diags) { - case 0: - return "no problems" - case 1: - return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) - default: - return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) - } -} - -// Err returns an error representing the receiver if the receiver HasErrors, or -// nil otherwise. -// -// The returned error can be type-asserted back to a Diagnostics if needed. -func (diags Diagnostics) Err() error { - if diags.HasErrors() { - return diags - } - return nil -} - -// DiagSeverity describes the severity of a Diagnostic. -type DiagSeverity rune - -// DiagError indicates a problem that prevented proper processing of the -// configuration. In the precense of DiagError diagnostics the result is -// likely to be incomplete. -const DiagError DiagSeverity = 'E' - -// DiagWarning indicates a problem that the user may wish to consider but -// that did not prevent proper processing of the configuration. -const DiagWarning DiagSeverity = 'W' - -// MarshalJSON is an implementation of encoding/json.Marshaler -func (s DiagSeverity) MarshalJSON() ([]byte, error) { - switch s { - case DiagError: - return []byte(`"error"`), nil - case DiagWarning: - return []byte(`"warning"`), nil - default: - return []byte(`"invalid"`), nil - } -} - -func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { - if len(diags) == 0 { - return nil - } - ret := make(Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = Diagnostic{ - Summary: diag.Summary, - Detail: diag.Detail, - } - switch diag.Severity { - case hcl.DiagError: - ret[i].Severity = DiagError - case hcl.DiagWarning: - ret[i].Severity = DiagWarning - } - if diag.Subject != nil { - pos := sourcePosHCL(*diag.Subject) - ret[i].Pos = &pos - } - } - return ret -} - -func diagnosticsError(err error) Diagnostics { - if err == nil { - return nil - } - - if posErr, ok := err.(*legacyhclparser.PosError); ok { - pos := sourcePosLegacyHCL(posErr.Pos, "") - return Diagnostics{ - Diagnostic{ - Severity: DiagError, - Summary: posErr.Err.Error(), - Pos: &pos, - }, - } - } - - return Diagnostics{ - Diagnostic{ - Severity: DiagError, - Summary: err.Error(), - }, - } -} - -func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { - return diagnosticsError(fmt.Errorf(format, args...)) -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go deleted file mode 100644 index 1604a6e08a..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package tfconfig is a helper library that does careful, shallow parsing of -// Terraform modules to provide access to high-level metadata while -// remaining broadly compatible with configurations targeting various -// different Terraform versions. -// -// This packge focuses on describing top-level objects only, and in particular -// does not attempt any sort of processing that would require access to plugins. -// Currently it allows callers to extract high-level information about -// variables, outputs, resource blocks, provider dependencies, and Terraform -// Core dependencies. -// -// This package only works at the level of single modules. A full configuration -// is a tree of potentially several modules, some of which may be references -// to remote packages. There are some basic helpers for traversing calls to -// modules at relative local paths, however. -// -// This package employs a "best effort" parsing strategy, producing as complete -// a result as possible even though the input may not be entirely valid. The -// intended use-case is high-level analysis and indexing of externally-facing -// module characteristics, as opposed to validating or even applying the module. -package tfconfig diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go deleted file mode 100644 index a070f76e04..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go +++ /dev/null @@ -1,130 +0,0 @@ -package tfconfig - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadModule reads the directory at the given path and attempts to interpret -// it as a Terraform module. -func LoadModule(dir string) (*Module, Diagnostics) { - - // For broad compatibility here we actually have two separate loader - // codepaths. The main one uses the new HCL parser and API and is intended - // for configurations from Terraform 0.12 onwards (though will work for - // many older configurations too), but we'll also fall back on one that - // uses the _old_ HCL implementation so we can deal with some edge-cases - // that are not valid in new HCL. - - module, diags := loadModule(dir) - if diags.HasErrors() { - // Try using the legacy HCL parser and see if we fare better. - legacyModule, legacyDiags := loadModuleLegacyHCL(dir) - if !legacyDiags.HasErrors() { - legacyModule.init(legacyDiags) - return legacyModule, legacyDiags - } - } - - module.init(diags) - return module, diags -} - -// IsModuleDir checks if the given path contains terraform configuration files. -// This allows the caller to decide how to handle directories that do not have tf files. -func IsModuleDir(dir string) bool { - primaryPaths, _ := dirFiles(dir) - if len(primaryPaths) == 0 { - return false - } - return true -} - -func (m *Module) init(diags Diagnostics) { - // Fill in any additional provider requirements that are implied by - // resource configurations, to avoid the caller from needing to apply - // this logic itself. Implied requirements don't have version constraints, - // but we'll make sure the requirement value is still non-nil in this - // case so callers can easily recognize it. - for _, r := range m.ManagedResources { - if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { - m.RequiredProviders[r.Provider.Name] = &ProviderRequirement{} - } - } - for _, r := range m.DataResources { - if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { - m.RequiredProviders[r.Provider.Name] = &ProviderRequirement{} - } - } - - // We redundantly also reference the diagnostics from inside the module - // object, primarily so that we can easily included in JSON-serialized - // versions of the module object. - m.Diagnostics = diags -} - -func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { - infos, err := ioutil.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - var override []string - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || isIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - // We are assuming that any _override files will be logically named, - // and processing the files in alphabetical order. Primaries first, then overrides. - primary = append(primary, override...) - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// isIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func isIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go deleted file mode 100644 index f83ac87267..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go +++ /dev/null @@ -1,319 +0,0 @@ -package tfconfig - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclparse" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -func loadModule(dir string) (*Module, Diagnostics) { - mod := newModule(dir) - primaryPaths, diags := dirFiles(dir) - - parser := hclparse.NewParser() - - for _, filename := range primaryPaths { - var file *hcl.File - var fileDiags hcl.Diagnostics - if strings.HasSuffix(filename, ".json") { - file, fileDiags = parser.ParseJSONFile(filename) - } else { - file, fileDiags = parser.ParseHCLFile(filename) - } - diags = append(diags, fileDiags...) - if file == nil { - continue - } - - content, _, contentDiags := file.Body.PartialContent(rootSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) - diags = append(diags, contentDiags...) - - if attr, defined := content.Attributes["required_version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredCore = append(mod.RequiredCore, version) - } - } - - for _, innerBlock := range content.Blocks { - switch innerBlock.Type { - case "required_providers": - reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) - diags = append(diags, reqsDiags...) - for name, req := range reqs { - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = req - } else { - mod.RequiredProviders[name].VersionConstraints = append(mod.RequiredProviders[name].VersionConstraints, req.VersionConstraints...) - } - } - } - } - - case "variable": - content, _, contentDiags := block.Body.PartialContent(variableSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - v := &Variable{ - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - mod.Variables[name] = v - - if attr, defined := content.Attributes["type"]; defined { - // We handle this particular attribute in a somewhat-tricky way: - // since Terraform may evolve its type expression syntax in - // future versions, we don't want to be overly-strict in how - // we handle it here, and so we'll instead just take the raw - // source provided by the user, using the source location - // information in the expression object. - // - // However, older versions of Terraform expected the type - // to be a string containing a keyword, so we'll need to - // handle that as a special case first for backward compatibility. - - var typeExpr string - - var typeExprAsStr string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) - if !valDiags.HasErrors() { - typeExpr = typeExprAsStr - } else { - - rng := attr.Expr.Range() - sourceFilename := rng.Filename - source, exists := parser.Sources()[sourceFilename] - if exists { - typeExpr = string(rng.SliceBytes(source)) - } else { - // This should never happen, so we'll just warn about it and leave the type unspecified. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Source code not available", - Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), - Subject: &block.DefRange, - }) - typeExpr = "" - } - - } - - v.Type = typeExpr - } - - if attr, defined := content.Attributes["description"]; defined { - var description string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) - diags = append(diags, valDiags...) - v.Description = description - } - - if attr, defined := content.Attributes["default"]; defined { - // To avoid the caller needing to deal with cty here, we'll - // use its JSON encoding to convert into an - // approximately-equivalent plain Go interface{} value - // to return. - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - if val.IsWhollyKnown() { // should only be false if there are errors in the input - valJSON, err := ctyjson.Marshal(val, val.Type()) - if err != nil { - // Should never happen, since all possible known - // values have a JSON mapping. - panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) - } - var def interface{} - err = json.Unmarshal(valJSON, &def) - if err != nil { - // Again should never happen, because valJSON is - // guaranteed valid by ctyjson.Marshal. - panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) - } - v.Default = def - } - } - - case "output": - - content, _, contentDiags := block.Body.PartialContent(outputSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - o := &Output{ - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - mod.Outputs[name] = o - - if attr, defined := content.Attributes["description"]; defined { - var description string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) - diags = append(diags, valDiags...) - o.Description = description - } - - case "provider": - - content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - // Even if there isn't an explicit version required, we still - // need an entry in our map to signal the unversioned dependency. - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = &ProviderRequirement{} - } - if attr, defined := content.Attributes["version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredProviders[name].VersionConstraints = append(mod.RequiredProviders[name].VersionConstraints, version) - } - } - - case "resource", "data": - - content, _, contentDiags := block.Body.PartialContent(resourceSchema) - diags = append(diags, contentDiags...) - - typeName := block.Labels[0] - name := block.Labels[1] - - r := &Resource{ - Type: typeName, - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - var resourcesMap map[string]*Resource - - switch block.Type { - case "resource": - r.Mode = ManagedResourceMode - resourcesMap = mod.ManagedResources - case "data": - r.Mode = DataResourceMode - resourcesMap = mod.DataResources - } - - key := r.MapKey() - - resourcesMap[key] = r - - if attr, defined := content.Attributes["provider"]; defined { - // New style here is to provide this as a naked traversal - // expression, but we also support quoted references for - // older configurations that predated this convention. - traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) - if travDiags.HasErrors() { - traversal = nil // in case we got any partial results - - // Fall back on trying to parse as a string - var travStr string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) - if !valDiags.HasErrors() { - var strDiags hcl.Diagnostics - traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) - if strDiags.HasErrors() { - traversal = nil - } - } - } - - // If we get out here with a nil traversal then we didn't - // succeed in processing the input. - if len(traversal) > 0 { - providerName := traversal.RootName() - alias := "" - if len(traversal) > 1 { - if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { - alias = getAttr.Name - } - } - r.Provider = ProviderRef{ - Name: providerName, - Alias: alias, - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider reference", - Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", - Subject: attr.Expr.Range().Ptr(), - }) - } - } else { - // If provider _isn't_ set then we'll infer it from the - // resource type. - r.Provider = ProviderRef{ - Name: resourceTypeDefaultProviderName(r.Type), - } - } - - case "module": - - content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - mc := &ModuleCall{ - Name: block.Labels[0], - Pos: sourcePosHCL(block.DefRange), - } - - // check if this is overriding an existing module - var origSource string - if origMod, exists := mod.ModuleCalls[name]; exists { - origSource = origMod.Source - } - - mod.ModuleCalls[name] = mc - - if attr, defined := content.Attributes["source"]; defined { - var source string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) - diags = append(diags, valDiags...) - mc.Source = source - } - - if mc.Source == "" { - mc.Source = origSource - } - - if attr, defined := content.Attributes["version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - mc.Version = version - } - - default: - // Should never happen because our cases above should be - // exhaustive for our schema. - panic(fmt.Errorf("unhandled block type %q", block.Type)) - } - } - } - - return mod, diagnosticsHCL(diags) -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go deleted file mode 100644 index c79b033b6b..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go +++ /dev/null @@ -1,323 +0,0 @@ -package tfconfig - -import ( - "io/ioutil" - "strings" - - legacyhcl "github.com/hashicorp/hcl" - legacyast "github.com/hashicorp/hcl/hcl/ast" -) - -func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { - // This implementation is intentionally more quick-and-dirty than the - // main loader. In particular, it doesn't bother to keep careful track - // of multiple error messages because we always fall back on returning - // the main parser's error message if our fallback parsing produces - // an error, and thus the errors here are not seen by the end-caller. - mod := newModule(dir) - - primaryPaths, diags := dirFiles(dir) - if diags.HasErrors() { - return mod, diagnosticsHCL(diags) - } - - for _, filename := range primaryPaths { - src, err := ioutil.ReadFile(filename) - if err != nil { - return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) - } - - hclRoot, err := legacyhcl.Parse(string(src)) - if err != nil { - return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) - } - - list, ok := hclRoot.Node.(*legacyast.ObjectList) - if !ok { - return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) - } - - for _, item := range list.Filter("terraform").Items { - if len(item.Keys) > 0 { - item = &legacyast.ObjectItem{ - Val: &legacyast.ObjectType{ - List: &legacyast.ObjectList{ - Items: []*legacyast.ObjectItem{item}, - }, - }, - } - } - - type TerraformBlock struct { - RequiredVersion string `hcl:"required_version"` - } - var block TerraformBlock - err = legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("terraform block: %s", err) - } - - if block.RequiredVersion != "" { - mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) - } - } - - if vars := list.Filter("variable"); len(vars.Items) > 0 { - vars = vars.Children() - type VariableBlock struct { - Type string `hcl:"type"` - Default interface{} - Description string - Fields []string `hcl:",decodedFields"` - } - - for _, item := range vars.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block VariableBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) - } - - // Clean up legacy HCL decoding ambiguity by unwrapping list of maps - if ms, ok := block.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - block.Default = def - } - - v := &Variable{ - Name: name, - Type: block.Type, - Description: block.Description, - Default: block.Default, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - if _, exists := mod.Variables[name]; exists { - return nil, diagnosticsErrorf("duplicate variable block for %q", name) - } - mod.Variables[name] = v - - } - } - - if outputs := list.Filter("output"); len(outputs.Items) > 0 { - outputs = outputs.Children() - type OutputBlock struct { - Description string - } - - for _, item := range outputs.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block OutputBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) - } - - o := &Output{ - Name: name, - Description: block.Description, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - if _, exists := mod.Outputs[name]; exists { - return nil, diagnosticsErrorf("duplicate output block for %q", name) - } - mod.Outputs[name] = o - } - } - - for _, blockType := range []string{"resource", "data"} { - if resources := list.Filter(blockType); len(resources.Items) > 0 { - resources = resources.Children() - type ResourceBlock struct { - Provider string - } - - for _, item := range resources.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 2) - - if len(item.Keys) != 2 { - return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) - } - - typeName := item.Keys[0].Token.Value().(string) - name := item.Keys[1].Token.Value().(string) - var mode ResourceMode - var rMap map[string]*Resource - switch blockType { - case "resource": - mode = ManagedResourceMode - rMap = mod.ManagedResources - case "data": - mode = DataResourceMode - rMap = mod.DataResources - } - - var block ResourceBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) - } - - var providerName, providerAlias string - if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { - providerName = block.Provider[:dotPos] - providerAlias = block.Provider[dotPos+1:] - } else { - providerName = block.Provider - } - if providerName == "" { - providerName = resourceTypeDefaultProviderName(typeName) - } - - r := &Resource{ - Mode: mode, - Type: typeName, - Name: name, - Provider: ProviderRef{ - Name: providerName, - Alias: providerAlias, - }, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - key := r.MapKey() - if _, exists := rMap[key]; exists { - return nil, diagnosticsErrorf("duplicate resource block for %q", key) - } - rMap[key] = r - } - } - - } - - if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { - moduleCalls = moduleCalls.Children() - type ModuleBlock struct { - Source string - Version string - } - - for _, item := range moduleCalls.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block ModuleBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) - } - - mc := &ModuleCall{ - Name: name, - Source: block.Source, - Version: block.Version, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - // it's possible this module call is from an override file - if origMod, exists := mod.ModuleCalls[name]; exists { - if mc.Source == "" { - mc.Source = origMod.Source - } - } - mod.ModuleCalls[name] = mc - } - } - - if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { - providerConfigs = providerConfigs.Children() - type ProviderBlock struct { - Version string - } - - for _, item := range providerConfigs.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block ProviderBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) - } - // Even if there wasn't an explicit version required, we still - // need an entry in our map to signal the unversioned dependency. - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = &ProviderRequirement{} - } - - if block.Version != "" { - mod.RequiredProviders[name].VersionConstraints = append(mod.RequiredProviders[name].VersionConstraints, block.Version) - } - } - } - } - - return mod, nil -} - -// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when -// parsing JSON as input: if we're parsing JSON then directly nested -// items will show up as additional "keys". -// -// For objects that expect a fixed number of keys, this breaks the -// decoding process. This function unwraps the object into what it would've -// looked like if it came directly from HCL by specifying the number of keys -// you expect. -// -// Example: -// -// { "foo": { "baz": {} } } -// -// Will show up with Keys being: []string{"foo", "baz"} -// when we really just want the first two. This function will fix this. -func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { - if len(item.Keys) > depth && item.Keys[0].Token.JSON { - for len(item.Keys) > depth { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &legacyast.ObjectType{ - List: &legacyast.ObjectList{ - Items: []*legacyast.ObjectItem{ - &legacyast.ObjectItem{ - Keys: []*legacyast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go deleted file mode 100644 index 63027d1841..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfconfig - -// Module is the top-level type representing a parsed and processed Terraform -// module. -type Module struct { - // Path is the local filesystem directory where the module was loaded from. - Path string `json:"path"` - - Variables map[string]*Variable `json:"variables"` - Outputs map[string]*Output `json:"outputs"` - - RequiredCore []string `json:"required_core,omitempty"` - RequiredProviders map[string]*ProviderRequirement `json:"required_providers"` - - ManagedResources map[string]*Resource `json:"managed_resources"` - DataResources map[string]*Resource `json:"data_resources"` - ModuleCalls map[string]*ModuleCall `json:"module_calls"` - - // Diagnostics records any errors and warnings that were detected during - // loading, primarily for inclusion in serialized forms of the module - // since this slice is also returned as a second argument from LoadModule. - Diagnostics Diagnostics `json:"diagnostics,omitempty"` -} - -func newModule(path string) *Module { - return &Module{ - Path: path, - Variables: make(map[string]*Variable), - Outputs: make(map[string]*Output), - RequiredProviders: make(map[string]*ProviderRequirement), - ManagedResources: make(map[string]*Resource), - DataResources: make(map[string]*Resource), - ModuleCalls: make(map[string]*ModuleCall), - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go deleted file mode 100644 index 5e1e05a726..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go +++ /dev/null @@ -1,11 +0,0 @@ -package tfconfig - -// ModuleCall represents a "module" block within a module. That is, a -// declaration of a child module from inside its parent. -type ModuleCall struct { - Name string `json:"name"` - Source string `json:"source"` - Version string `json:"version,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go deleted file mode 100644 index 890b25e694..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go +++ /dev/null @@ -1,9 +0,0 @@ -package tfconfig - -// Output represents a single output from a Terraform module. -type Output struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go deleted file mode 100644 index 157c8c2c15..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go +++ /dev/null @@ -1,85 +0,0 @@ -package tfconfig - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/zclconf/go-cty/cty/gocty" -) - -// ProviderRef is a reference to a provider configuration within a module. -// It represents the contents of a "provider" argument in a resource, or -// a value in the "providers" map for a module call. -type ProviderRef struct { - Name string `json:"name"` - Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced -} - -type ProviderRequirement struct { - Source string `json:"source,omitempty"` - VersionConstraints []string `json:"version_constraints,omitempty"` -} - -func decodeRequiredProvidersBlock(block *hcl.Block) (map[string]*ProviderRequirement, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - reqs := make(map[string]*ProviderRequirement) - for name, attr := range attrs { - expr, err := attr.Expr.Value(nil) - if err != nil { - diags = append(diags, err...) - } - - switch { - case expr.Type().IsPrimitiveType(): - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - reqs[name] = &ProviderRequirement{ - VersionConstraints: []string{version}, - } - } - - case expr.Type().IsObjectType(): - var pr ProviderRequirement - if expr.Type().HasAttribute("version") { - var version string - err := gocty.FromCtyValue(expr.GetAttr("version"), &version) - if err == nil { - pr.VersionConstraints = append(pr.VersionConstraints, version) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: "Unsuitable value: string required", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - if expr.Type().HasAttribute("source") { - var source string - err := gocty.FromCtyValue(expr.GetAttr("source"), &source) - if err == nil { - pr.Source = source - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: "Unsuitable value: string required", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - reqs[name] = &pr - - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: "Unsuitable value: string required", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - return reqs, diags -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go deleted file mode 100644 index 401c8fce97..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go +++ /dev/null @@ -1,64 +0,0 @@ -package tfconfig - -import ( - "fmt" - "strconv" - "strings" -) - -// Resource represents a single "resource" or "data" block within a module. -type Resource struct { - Mode ResourceMode `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - - Provider ProviderRef `json:"provider"` - - Pos SourcePos `json:"pos"` -} - -// MapKey returns a string that can be used to uniquely identify the receiver -// in a map[string]*Resource. -func (r *Resource) MapKey() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // should never happen - return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) - } -} - -// ResourceMode represents the "mode" of a resource, which is used to -// distinguish between managed resources ("resource" blocks in config) and -// data resources ("data" blocks in config). -type ResourceMode rune - -const InvalidResourceMode ResourceMode = 0 -const ManagedResourceMode ResourceMode = 'M' -const DataResourceMode ResourceMode = 'D' - -func (m ResourceMode) String() string { - switch m { - case ManagedResourceMode: - return "managed" - case DataResourceMode: - return "data" - default: - return "" - } -} - -// MarshalJSON implements encoding/json.Marshaler. -func (m ResourceMode) MarshalJSON() ([]byte, error) { - return []byte(strconv.Quote(m.String())), nil -} - -func resourceTypeDefaultProviderName(typeName string) string { - if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { - return typeName[:underPos] - } - return typeName -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go deleted file mode 100644 index fd6ca9e70d..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go +++ /dev/null @@ -1,106 +0,0 @@ -package tfconfig - -import ( - "github.com/hashicorp/hcl/v2" -) - -var rootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - LabelNames: nil, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - }, -} - -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "required_providers", - }, - }, -} - -var providerConfigSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "version", - }, - { - Name: "alias", - }, - }, -} - -var variableSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "type", - }, - { - Name: "description", - }, - { - Name: "default", - }, - }, -} - -var outputSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - }, -} - -var moduleCallSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - }, - { - Name: "version", - }, - { - Name: "providers", - }, - }, -} - -var resourceSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "provider", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go deleted file mode 100644 index 548c9f9a39..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go +++ /dev/null @@ -1,50 +0,0 @@ -package tfconfig - -import ( - legacyhcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/v2" -) - -// SourcePos is a pointer to a particular location in a source file. -// -// This type is embedded into other structs to allow callers to locate the -// definition of each described module element. The SourcePos of an element -// is usually the first line of its definition, although the definition can -// be a little "fuzzy" with JSON-based config files. -type SourcePos struct { - Filename string `json:"filename"` - Line int `json:"line"` -} - -func sourcePos(filename string, line int) SourcePos { - return SourcePos{ - Filename: filename, - Line: line, - } -} - -func sourcePosHCL(rng hcl.Range) SourcePos { - // We intentionally throw away the column information here because - // current and legacy HCL both disagree on the definition of a column - // and so a line-only reference is the best granularity we can do - // such that the result is consistent between both parsers. - return SourcePos{ - Filename: rng.Filename, - Line: rng.Start.Line, - } -} - -func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { - useFilename := pos.Filename - // We'll try to use the filename given in legacy HCL position, but - // in practice there's no way to actually get this populated via - // the HCL API so it's usually empty except in some specialized - // situations, such as positions in error objects. - if useFilename == "" { - useFilename = filename - } - return SourcePos{ - Filename: useFilename, - Line: pos.Line, - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go deleted file mode 100644 index 0f73fc995a..0000000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go +++ /dev/null @@ -1,16 +0,0 @@ -package tfconfig - -// Variable represents a single variable from a Terraform module. -type Variable struct { - Name string `json:"name"` - Type string `json:"type,omitempty"` - Description string `json:"description,omitempty"` - - // Default is an approximate representation of the default value in - // the native Go type system. The conversion from the value given in - // configuration may be slightly lossy. Only values that can be - // serialized by json.Marshal will be included here. - Default interface{} `json:"default,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-exec/LICENSE b/vendor/github.com/hashicorp/terraform-exec/LICENSE index a612ad9813..c121cee6e5 100644 --- a/vendor/github.com/hashicorp/terraform-exec/LICENSE +++ b/vendor/github.com/hashicorp/terraform-exec/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2020 HashiCorp, Inc. + Mozilla Public License Version 2.0 ================================== diff --git a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go index 4f26db7fb0..90b6688912 100644 --- a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go +++ b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -1,6 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version -const version = "0.13.3" +const version = "0.20.0" // ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. // This is a function to allow for future possible enhancement using debug.BuildInfo. diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go index 82d09d5fc7..2c5a6d07a9 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go @@ -1,14 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( "context" "fmt" + "io" "os/exec" "strconv" ) type applyConfig struct { backup string + destroy bool dirOrPlan string lock bool @@ -17,6 +22,8 @@ type applyConfig struct { parallelism int reattachInfo ReattachInfo refresh bool + refreshOnly bool + replaceAddrs []string state string stateOut string targets []string @@ -27,6 +34,7 @@ type applyConfig struct { } var defaultApplyOptions = applyConfig{ + destroy: false, lock: true, parallelism: 10, refresh: true, @@ -73,6 +81,14 @@ func (opt *RefreshOption) configureApply(conf *applyConfig) { conf.refresh = opt.refresh } +func (opt *RefreshOnlyOption) configureApply(conf *applyConfig) { + conf.refreshOnly = opt.refreshOnly +} + +func (opt *ReplaceOption) configureApply(conf *applyConfig) { + conf.replaceAddrs = append(conf.replaceAddrs, opt.address) +} + func (opt *VarOption) configureApply(conf *applyConfig) { conf.vars = append(conf.vars, opt.assignment) } @@ -85,6 +101,10 @@ func (opt *ReattachOption) configureApply(conf *applyConfig) { conf.reattachInfo = opt.info } +func (opt *DestroyFlagOption) configureApply(conf *applyConfig) { + conf.destroy = opt.destroy +} + // Apply represents the terraform apply subcommand. func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { cmd, err := tf.applyCmd(ctx, opts...) @@ -94,6 +114,27 @@ func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { return tf.runTerraformCmd(ctx, cmd) } +// ApplyJSON represents the terraform apply subcommand with the `-json` flag. +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. ApplyJSON is likely to be +// removed in a future major version in favour of Apply returning JSON by default. +func (tf *Terraform) ApplyJSON(ctx context.Context, w io.Writer, opts ...ApplyOption) error { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return fmt.Errorf("terraform apply -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.applyJSONCmd(ctx, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { c := defaultApplyOptions @@ -101,6 +142,32 @@ func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.C o.configureApply(&c) } + args, err := tf.buildApplyArgs(ctx, c) + if err != nil { + return nil, err + } + + return tf.buildApplyCmd(ctx, c, args) +} + +func (tf *Terraform) applyJSONCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { + c := defaultApplyOptions + + for _, o := range opts { + o.configureApply(&c) + } + + args, err := tf.buildApplyArgs(ctx, c) + if err != nil { + return nil, err + } + + args = append(args, "-json") + + return tf.buildApplyCmd(ctx, c, args) +} + +func (tf *Terraform) buildApplyArgs(ctx context.Context, c applyConfig) ([]string, error) { args := []string{"apply", "-no-color", "-auto-approve", "-input=false"} // string opts: only pass if set @@ -125,7 +192,35 @@ func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.C args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + if c.refreshOnly { + err := tf.compatible(ctx, tf0_15_4, nil) + if err != nil { + return nil, fmt.Errorf("refresh-only option was introduced in Terraform 0.15.4: %w", err) + } + if !c.refresh { + return nil, fmt.Errorf("you cannot use refresh=false in refresh-only planning mode") + } + args = append(args, "-refresh-only") + } + // string slice opts: split into separate args + if c.replaceAddrs != nil { + err := tf.compatible(ctx, tf0_15_2, nil) + if err != nil { + return nil, fmt.Errorf("replace option was introduced in Terraform 0.15.2: %w", err) + } + for _, addr := range c.replaceAddrs { + args = append(args, "-replace="+addr) + } + } + if c.destroy { + err := tf.compatible(ctx, tf0_15_2, nil) + if err != nil { + return nil, fmt.Errorf("-destroy option was introduced in Terraform 0.15.2: %w", err) + } + args = append(args, "-destroy") + } + if c.targets != nil { for _, ta := range c.targets { args = append(args, "-target="+ta) @@ -137,6 +232,10 @@ func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.C } } + return args, nil +} + +func (tf *Terraform) buildApplyCmd(ctx context.Context, c applyConfig, args []string) (*exec.Cmd, error) { // string argument: pass if set if c.dirOrPlan != "" { args = append(args, c.dirOrPlan) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go index e792dc9ce7..5e160324c8 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go @@ -1,9 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( + "bufio" "bytes" "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -17,10 +22,12 @@ import ( const ( checkpointDisableEnvVar = "CHECKPOINT_DISABLE" cliArgsEnvVar = "TF_CLI_ARGS" - logEnvVar = "TF_LOG" inputEnvVar = "TF_INPUT" automationEnvVar = "TF_IN_AUTOMATION" + logEnvVar = "TF_LOG" + logCoreEnvVar = "TF_LOG_CORE" logPathEnvVar = "TF_LOG_PATH" + logProviderEnvVar = "TF_LOG_PROVIDER" reattachEnvVar = "TF_REATTACH_PROVIDERS" appendUserAgentEnvVar = "TF_APPEND_USER_AGENT" workspaceEnvVar = "TF_WORKSPACE" @@ -35,8 +42,10 @@ var prohibitedEnvVars = []string{ cliArgsEnvVar, inputEnvVar, automationEnvVar, - logPathEnvVar, logEnvVar, + logCoreEnvVar, + logPathEnvVar, + logProviderEnvVar, reattachEnvVar, appendUserAgentEnvVar, workspaceEnvVar, @@ -146,18 +155,21 @@ func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { if tf.logPath == "" { // so logging can't pollute our stderr output env[logEnvVar] = "" + env[logCoreEnvVar] = "" env[logPathEnvVar] = "" + env[logProviderEnvVar] = "" } else { + env[logEnvVar] = tf.log + env[logCoreEnvVar] = tf.logCore env[logPathEnvVar] = tf.logPath - // Log levels other than TRACE are currently unreliable, the CLI recommends using TRACE only. - env[logEnvVar] = "TRACE" + env[logProviderEnvVar] = tf.logProvider } // constant automation override env vars env[automationEnvVar] = "1" // force usage of workspace methods for switching - env[workspaceEnvVar] = "" + delete(env, workspaceEnvVar) if tf.disablePluginTLS { env[disablePluginTLSEnvVar] = "1" @@ -171,12 +183,12 @@ func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { } func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string]string, args ...string) *exec.Cmd { - cmd := exec.Command(tf.execPath, args...) + cmd := exec.CommandContext(ctx, tf.execPath, args...) cmd.Env = tf.buildEnv(mergeEnv) cmd.Dir = tf.workingDir - tf.logger.Printf("[INFO] running Terraform command: %s", cmdString(cmd)) + tf.logger.Printf("[INFO] running Terraform command: %s", cmd.String()) return cmd } @@ -230,3 +242,36 @@ func mergeWriters(writers ...io.Writer) io.Writer { } return io.MultiWriter(compact...) } + +func writeOutput(ctx context.Context, r io.ReadCloser, w io.Writer) error { + // ReadBytes will block until bytes are read, which can cause a delay in + // returning even if the command's context has been canceled. Use a separate + // goroutine to prompt ReadBytes to return on cancel + closeCtx, closeCancel := context.WithCancel(ctx) + defer closeCancel() + go func() { + select { + case <-ctx.Done(): + r.Close() + case <-closeCtx.Done(): + return + } + }() + + buf := bufio.NewReader(r) + for { + line, err := buf.ReadBytes('\n') + if len(line) > 0 { + if _, err := w.Write(line); err != nil { + return err + } + } + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go index 2e88dd3e6a..3af11c8122 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -1,31 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !linux // +build !linux package tfexec import ( "context" + "fmt" "os/exec" "strings" + "sync" ) func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { var errBuf strings.Builder - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { - if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { - err := cmd.Process.Kill() - if err != nil { - tf.logger.Printf("error from kill: %s", err) - } - } - } - }() - // check for early cancellation select { case <-ctx.Done(): @@ -33,12 +24,71 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { default: } - err := cmd.Run() - if err == nil && ctx.Err() != nil { - err = ctx.Err() + // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and + // cmd.Stderr because it can cause hanging when killing the command + // https://github.com/golang/go/issues/23019 + stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout) + stderrWriter := mergeWriters(tf.stderr, &errBuf) + + cmd.Stderr = nil + cmd.Stdout = nil + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return err + } + + err = cmd.Start() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } + } + if err != nil { + return err + } + + var errStdout, errStderr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + errStderr = writeOutput(ctx, stderrPipe, stderrWriter) + }() + + // Reads from pipes must be completed before calling cmd.Wait(). Otherwise + // can cause a race condition + wg.Wait() + + err = cmd.Wait() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } } if err != nil { - return tf.wrapExitError(ctx, err, errBuf.String()) + return fmt.Errorf("%w\n%s", err, errBuf.String()) + } + + // Return error if there was an issue reading the std out/err + if errStdout != nil && ctx.Err() != nil { + return fmt.Errorf("%w\n%s", errStdout, errBuf.String()) + } + if errStderr != nil && ctx.Err() != nil { + return fmt.Errorf("%w\n%s", errStderr, errBuf.String()) } return nil diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go index 7cbdcb96f1..0565372c12 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -1,18 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( "context" + "fmt" "os/exec" "strings" + "sync" "syscall" ) func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { var errBuf strings.Builder - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - cmd.SysProcAttr = &syscall.SysProcAttr{ // kill children if parent is dead Pdeathsig: syscall.SIGKILL, @@ -20,21 +22,6 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { Setpgid: true, } - go func() { - <-ctx.Done() - if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { - if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { - // send SIGINT to process group - err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) - if err != nil { - tf.logger.Printf("error from SIGINT: %s", err) - } - } - - // TODO: send a kill if it doesn't respond for a bit? - } - }() - // check for early cancellation select { case <-ctx.Done(): @@ -42,12 +29,71 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { default: } - err := cmd.Run() - if err == nil && ctx.Err() != nil { - err = ctx.Err() + // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and + // cmd.Stderr because it can cause hanging when killing the command + // https://github.com/golang/go/issues/23019 + stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout) + stderrWriter := mergeWriters(tf.stderr, &errBuf) + + cmd.Stderr = nil + cmd.Stdout = nil + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return err + } + + err = cmd.Start() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } + } + if err != nil { + return err + } + + var errStdout, errStderr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + errStderr = writeOutput(ctx, stderrPipe, stderrWriter) + }() + + // Reads from pipes must be completed before calling cmd.Wait(). Otherwise + // can cause a race condition + wg.Wait() + + err = cmd.Wait() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } } if err != nil { - return tf.wrapExitError(ctx, err, errBuf.String()) + return fmt.Errorf("%w\n%s", err, errBuf.String()) + } + + // Return error if there was an issue reading the std out/err + if errStdout != nil && ctx.Err() != nil { + return fmt.Errorf("%w\n%s", errStdout, errBuf.String()) + } + if errStderr != nil && ctx.Err() != nil { + return fmt.Errorf("%w\n%s", errStderr, errBuf.String()) } return nil diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go deleted file mode 100644 index 4f81d1148e..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.13 - -package tfexec - -import ( - "os/exec" -) - -// cmdString handles go 1.12 as stringer was only added to exec.Cmd in 1.13 -func cmdString(c *exec.Cmd) string { - return c.String() -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go deleted file mode 100644 index 75614dbf6b..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !go1.13 - -package tfexec - -import ( - "os/exec" - "strings" -) - -// cmdString handles go 1.12 as stringer was only added to exec.Cmd in 1.13 -func cmdString(c *exec.Cmd) string { - b := new(strings.Builder) - b.WriteString(c.Path) - for _, a := range c.Args[1:] { - b.WriteByte(' ') - b.WriteString(a) - } - return b.String() -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go index 8011c0ba86..dbef8b3757 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( "context" "fmt" + "io" "os/exec" "strconv" ) @@ -95,6 +99,27 @@ func (tf *Terraform) Destroy(ctx context.Context, opts ...DestroyOption) error { return tf.runTerraformCmd(ctx, cmd) } +// DestroyJSON represents the terraform destroy subcommand with the `-json` flag. +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. DestroyJSON is likely to be +// removed in a future major version in favour of Destroy returning JSON by default. +func (tf *Terraform) DestroyJSON(ctx context.Context, w io.Writer, opts ...DestroyOption) error { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return fmt.Errorf("terraform destroy -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.destroyJSONCmd(ctx, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { c := defaultDestroyOptions @@ -102,6 +127,25 @@ func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*ex o.configureDestroy(&c) } + args := tf.buildDestroyArgs(c) + + return tf.buildDestroyCmd(ctx, c, args) +} + +func (tf *Terraform) destroyJSONCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { + c := defaultDestroyOptions + + for _, o := range opts { + o.configureDestroy(&c) + } + + args := tf.buildDestroyArgs(c) + args = append(args, "-json") + + return tf.buildDestroyCmd(ctx, c, args) +} + +func (tf *Terraform) buildDestroyArgs(c destroyConfig) []string { args := []string{"destroy", "-no-color", "-auto-approve", "-input=false"} // string opts: only pass if set @@ -138,6 +182,10 @@ func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*ex } } + return args +} + +func (tf *Terraform) buildDestroyCmd(ctx context.Context, c destroyConfig, args []string) (*exec.Cmd, error) { // optional positional argument if c.dir != "" { args = append(args, c.dir) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go index 0e82bbd9f9..288476f516 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package tfexec exposes functionality for constructing and running Terraform // CLI commands. Structured return values use the data types defined in the // github.com/hashicorp/terraform-json package. diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go index 7a32ef2f1f..c6645e8bc7 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go @@ -1,6 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec -import "fmt" +import ( + "context" + "fmt" +) // this file contains non-parsed exported errors @@ -37,3 +43,25 @@ type ErrManualEnvVar struct { func (err *ErrManualEnvVar) Error() string { return fmt.Sprintf("manual setting of env var %q detected", err.Name) } + +// cmdErr is a custom error type to be returned when a cmd exits with a context +// error such as context.Canceled or context.DeadlineExceeded. +// The type is specifically designed to respond true to errors.Is for these two +// errors. +// See https://github.com/golang/go/issues/21880 for why this is necessary. +type cmdErr struct { + err error + ctxErr error +} + +func (e cmdErr) Is(target error) bool { + switch target { + case context.DeadlineExceeded, context.Canceled: + return e.ctxErr == context.DeadlineExceeded || e.ctxErr == context.Canceled + } + return false +} + +func (e cmdErr) Error() string { + return e.err.Error() +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go deleted file mode 100644 index 5596fa2a13..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go +++ /dev/null @@ -1,247 +0,0 @@ -package tfexec - -import ( - "context" - "fmt" - "os/exec" - "regexp" - "strings" -) - -// this file contains errors parsed from stderr - -var ( - // The "Required variable not set:" case is for 0.11 - missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) - missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) - - usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) - - // "Could not load plugin" is present in 0.13 - noInitErrRegexp = regexp.MustCompile(`Error: Could not satisfy plugin requirements|Error: Could not load plugin`) - - noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) - - workspaceDoesNotExistRegexp = regexp.MustCompile(`Workspace "(.+)" doesn't exist.`) - - workspaceAlreadyExistsRegexp = regexp.MustCompile(`Workspace "(.+)" already exists`) - - tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) - tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) - configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) -) - -func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string) error { - exitErr, ok := err.(*exec.ExitError) - if !ok { - // not an exit error, short circuit, nothing to wrap - return err - } - - ctxErr := ctx.Err() - - // nothing to parse, return early - errString := strings.TrimSpace(stderr) - if errString == "" { - return &unwrapper{exitErr, ctxErr} - } - - switch { - case tfVersionMismatchErrRegexp.MatchString(stderr): - constraint := "" - constraints := tfVersionMismatchConstraintRegexp.FindStringSubmatch(stderr) - for i := 1; i < len(constraints); i++ { - constraint = strings.TrimSpace(constraints[i]) - if constraint != "" { - break - } - } - - if constraint == "" { - // hardcode a value here for weird cases (incl. 0.12) - constraint = "unknown" - } - - // only set this if it happened to be cached already - ver := "" - if tf != nil && tf.execVersion != nil { - ver = tf.execVersion.String() - } - - return &ErrTFVersionMismatch{ - unwrapper: unwrapper{exitErr, ctxErr}, - - Constraint: constraint, - TFVersion: ver, - } - case missingVarErrRegexp.MatchString(stderr): - name := "" - names := missingVarNameRegexp.FindStringSubmatch(stderr) - for i := 1; i < len(names); i++ { - name = strings.TrimSpace(names[i]) - if name != "" { - break - } - } - - return &ErrMissingVar{ - unwrapper: unwrapper{exitErr, ctxErr}, - - VariableName: name, - } - case usageRegexp.MatchString(stderr): - return &ErrCLIUsage{ - unwrapper: unwrapper{exitErr, ctxErr}, - - stderr: stderr, - } - case noInitErrRegexp.MatchString(stderr): - return &ErrNoInit{ - unwrapper: unwrapper{exitErr, ctxErr}, - - stderr: stderr, - } - case noConfigErrRegexp.MatchString(stderr): - return &ErrNoConfig{ - unwrapper: unwrapper{exitErr, ctxErr}, - - stderr: stderr, - } - case workspaceDoesNotExistRegexp.MatchString(stderr): - submatches := workspaceDoesNotExistRegexp.FindStringSubmatch(stderr) - if len(submatches) == 2 { - return &ErrNoWorkspace{ - unwrapper: unwrapper{exitErr, ctxErr}, - - Name: submatches[1], - } - } - case workspaceAlreadyExistsRegexp.MatchString(stderr): - submatches := workspaceAlreadyExistsRegexp.FindStringSubmatch(stderr) - if len(submatches) == 2 { - return &ErrWorkspaceExists{ - unwrapper: unwrapper{exitErr, ctxErr}, - - Name: submatches[1], - } - } - case configInvalidErrRegexp.MatchString(stderr): - return &ErrConfigInvalid{stderr: stderr} - } - - return fmt.Errorf("%w\n%s", &unwrapper{exitErr, ctxErr}, stderr) -} - -type unwrapper struct { - err error - ctxErr error -} - -func (u *unwrapper) Unwrap() error { - return u.err -} - -func (u *unwrapper) Is(target error) bool { - switch target { - case context.DeadlineExceeded, context.Canceled: - return u.ctxErr == context.DeadlineExceeded || - u.ctxErr == context.Canceled - } - return false -} - -func (u *unwrapper) Error() string { - return u.err.Error() -} - -type ErrConfigInvalid struct { - stderr string -} - -func (e *ErrConfigInvalid) Error() string { - return "configuration is invalid" -} - -type ErrMissingVar struct { - unwrapper - - VariableName string -} - -func (err *ErrMissingVar) Error() string { - return fmt.Sprintf("variable %q was required but not supplied", err.VariableName) -} - -type ErrNoWorkspace struct { - unwrapper - - Name string -} - -func (err *ErrNoWorkspace) Error() string { - return fmt.Sprintf("workspace %q does not exist", err.Name) -} - -// ErrWorkspaceExists is returned when creating a workspace that already exists -type ErrWorkspaceExists struct { - unwrapper - - Name string -} - -func (err *ErrWorkspaceExists) Error() string { - return fmt.Sprintf("workspace %q already exists", err.Name) -} - -type ErrNoInit struct { - unwrapper - - stderr string -} - -func (e *ErrNoInit) Error() string { - return e.stderr -} - -type ErrNoConfig struct { - unwrapper - - stderr string -} - -func (e *ErrNoConfig) Error() string { - return e.stderr -} - -// ErrCLIUsage is returned when the combination of flags or arguments is incorrect. -// -// CLI indicates usage errors in three different ways: either -// 1. Exit 1, with a custom error message on stderr. -// 2. Exit 1, with command usage logged to stderr. -// 3. Exit 127, with command usage logged to stdout. -// Currently cases 1 and 2 are handled. -// TODO KEM: Handle exit 127 case. How does this work on non-Unix platforms? -type ErrCLIUsage struct { - unwrapper - - stderr string -} - -func (e *ErrCLIUsage) Error() string { - return e.stderr -} - -// ErrTFVersionMismatch is returned when the running Terraform version is not compatible with the -// value specified for required_version in the terraform block. -type ErrTFVersionMismatch struct { - unwrapper - - TFVersion string - - // Constraint is not returned in the error messaging on 0.12 - Constraint string -} - -func (e *ErrTFVersionMismatch) Error() string { - return "terraform core version not supported by configuration" -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go index 10f6cb4cf4..0979492336 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go @@ -1,7 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( - "bytes" "context" "fmt" "io" @@ -44,7 +46,7 @@ func FormatString(ctx context.Context, execPath string, content string) (string, // FormatString formats a passed string. func (tf *Terraform) FormatString(ctx context.Context, content string) (string, error) { in := strings.NewReader(content) - var outBuf bytes.Buffer + var outBuf strings.Builder err := tf.Format(ctx, in, &outBuf) if err != nil { return "", err @@ -101,7 +103,7 @@ func (tf *Terraform) FormatCheck(ctx context.Context, opts ...FormatOption) (boo return false, nil, err } - var outBuf bytes.Buffer + var outBuf strings.Builder cmd.Stdout = mergeWriters(cmd.Stdout, &outBuf) err = tf.runTerraformCmd(ctx, cmd) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go new file mode 100644 index 0000000000..7d74a12f93 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type forceUnlockConfig struct { + dir string +} + +var defaultForceUnlockOptions = forceUnlockConfig{} + +type ForceUnlockOption interface { + configureForceUnlock(*forceUnlockConfig) +} + +func (opt *DirOption) configureForceUnlock(conf *forceUnlockConfig) { + conf.dir = opt.path +} + +// ForceUnlock represents the `terraform force-unlock` command +func (tf *Terraform) ForceUnlock(ctx context.Context, lockID string, opts ...ForceUnlockOption) error { + unlockCmd, err := tf.forceUnlockCmd(ctx, lockID, opts...) + if err != nil { + return err + } + + if err := tf.runTerraformCmd(ctx, unlockCmd); err != nil { + return err + } + + return nil +} + +func (tf *Terraform) forceUnlockCmd(ctx context.Context, lockID string, opts ...ForceUnlockOption) (*exec.Cmd, error) { + c := defaultForceUnlockOptions + + for _, o := range opts { + o.configureForceUnlock(&c) + } + args := []string{"force-unlock", "-no-color", "-force"} + + // positional arguments + args = append(args, lockID) + + // optional positional arguments + if c.dir != "" { + err := tf.compatible(ctx, nil, tf0_15_0) + if err != nil { + return nil, fmt.Errorf("[DIR] option was removed in Terraform v0.15.0") + } + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go new file mode 100644 index 0000000000..8a1363b5df --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type getCmdConfig struct { + dir string + update bool +} + +// GetCmdOption represents options used in the Get method. +type GetCmdOption interface { + configureGet(*getCmdConfig) +} + +func (opt *DirOption) configureGet(conf *getCmdConfig) { + conf.dir = opt.path +} + +func (opt *UpdateOption) configureGet(conf *getCmdConfig) { + conf.update = opt.update +} + +// Get represents the terraform get subcommand. +func (tf *Terraform) Get(ctx context.Context, opts ...GetCmdOption) error { + cmd, err := tf.getCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) getCmd(ctx context.Context, opts ...GetCmdOption) (*exec.Cmd, error) { + c := getCmdConfig{} + + for _, o := range opts { + o.configureGet(&c) + } + + args := []string{"get", "-no-color"} + + args = append(args, "-update="+fmt.Sprint(c.update)) + + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go new file mode 100644 index 0000000000..0f8b0eee25 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +type graphConfig struct { + plan string + drawCycles bool + graphType string +} + +var defaultGraphOptions = graphConfig{} + +type GraphOption interface { + configureGraph(*graphConfig) +} + +func (opt *GraphPlanOption) configureGraph(conf *graphConfig) { + conf.plan = opt.file +} + +func (opt *DrawCyclesOption) configureGraph(conf *graphConfig) { + conf.drawCycles = opt.drawCycles +} + +func (opt *GraphTypeOption) configureGraph(conf *graphConfig) { + conf.graphType = opt.graphType +} + +func (tf *Terraform) Graph(ctx context.Context, opts ...GraphOption) (string, error) { + graphCmd, err := tf.graphCmd(ctx, opts...) + if err != nil { + return "", err + } + var outBuf strings.Builder + graphCmd.Stdout = &outBuf + err = tf.runTerraformCmd(ctx, graphCmd) + if err != nil { + return "", err + } + + return outBuf.String(), nil + +} + +func (tf *Terraform) graphCmd(ctx context.Context, opts ...GraphOption) (*exec.Cmd, error) { + c := defaultGraphOptions + + for _, o := range opts { + o.configureGraph(&c) + } + + args := []string{"graph"} + + if c.plan != "" { + // plan was a positional argument prior to Terraform 0.15.0. Ensure proper use by checking version. + if err := tf.compatible(ctx, tf0_15_0, nil); err == nil { + args = append(args, "-plan="+c.plan) + } else { + args = append(args, c.plan) + } + } + + if c.drawCycles { + err := tf.compatible(ctx, tf0_5_0, nil) + if err != nil { + return nil, fmt.Errorf("-draw-cycles was first introduced in Terraform 0.5.0: %w", err) + } + args = append(args, "-draw-cycles") + } + + if c.graphType != "" { + err := tf.compatible(ctx, tf0_8_0, nil) + if err != nil { + return nil, fmt.Errorf("-graph-type was first introduced in Terraform 0.8.0: %w", err) + } + args = append(args, "-type="+c.graphType) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go index e243d72817..67275dfaa7 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go index bff9ecd3ed..c292fdc0f1 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( @@ -52,6 +55,10 @@ func (opt *DirOption) configureInit(conf *initConfig) { conf.dir = opt.path } +func (opt *ForceCopyOption) configureInit(conf *initConfig) { + conf.forceCopy = opt.forceCopy +} + func (opt *FromModuleOption) configureInit(conf *initConfig) { conf.fromModule = opt.source } @@ -116,7 +123,7 @@ func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd o.configureInit(&c) } - args := []string{"init", "-no-color", "-force-copy", "-input=false"} + args := []string{"init", "-no-color", "-input=false"} // string opts: only pass if set if c.fromModule != "" { @@ -144,6 +151,10 @@ func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd args = append(args, "-verify-plugins="+fmt.Sprint(c.verifyPlugins)) } + if c.forceCopy { + args = append(args, "-force-copy") + } + // unary flags: pass if true if c.reconfigure { args = append(args, "-reconfigure") diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go new file mode 100644 index 0000000000..0e642b2db8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" + + tfjson "github.com/hashicorp/terraform-json" +) + +// MetadataFunctions represents the terraform metadata functions -json subcommand. +func (tf *Terraform) MetadataFunctions(ctx context.Context) (*tfjson.MetadataFunctions, error) { + err := tf.compatible(ctx, tf1_4_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform metadata functions was added in 1.4.0: %w", err) + } + + functionsCmd := tf.metadataFunctionsCmd(ctx) + + var ret tfjson.MetadataFunctions + err = tf.runTerraformCmdJSON(ctx, functionsCmd, &ret) + if err != nil { + return nil, err + } + + return &ret, nil +} + +func (tf *Terraform) metadataFunctionsCmd(ctx context.Context, args ...string) *exec.Cmd { + allArgs := []string{"metadata", "functions", "-json"} + allArgs = append(allArgs, args...) + + return tf.buildTerraformCmd(ctx, nil, allArgs...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go index 6d20869bd4..d783027a4f 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( @@ -14,6 +17,16 @@ func AllowMissingConfig(allowMissingConfig bool) *AllowMissingConfigOption { return &AllowMissingConfigOption{allowMissingConfig} } +// AllowMissingOption represents the -allow-missing flag. +type AllowMissingOption struct { + allowMissing bool +} + +// AllowMissing represents the -allow-missing flag. +func AllowMissing(allowMissing bool) *AllowMissingOption { + return &AllowMissingOption{allowMissing} +} + // BackendOption represents the -backend flag. type BackendOption struct { backend bool @@ -108,6 +121,15 @@ func Destroy(destroy bool) *DestroyFlagOption { return &DestroyFlagOption{destroy} } +type DrawCyclesOption struct { + drawCycles bool +} + +// DrawCycles represents the -draw-cycles flag. +func DrawCycles(drawCycles bool) *DrawCyclesOption { + return &DrawCyclesOption{drawCycles} +} + type DryRunOption struct { dryRun bool } @@ -117,6 +139,15 @@ func DryRun(dryRun bool) *DryRunOption { return &DryRunOption{dryRun} } +type FSMirrorOption struct { + fsMirror string +} + +// FSMirror represents the -fs-mirror option (path to filesystem mirror directory) +func FSMirror(fsMirror string) *FSMirrorOption { + return &FSMirrorOption{fsMirror} +} + type ForceOption struct { force bool } @@ -178,6 +209,15 @@ func LockTimeout(lockTimeout string) *LockTimeoutOption { return &LockTimeoutOption{lockTimeout} } +type NetMirrorOption struct { + netMirror string +} + +// NetMirror represents the -net-mirror option (base URL of a network mirror) +func NetMirror(netMirror string) *NetMirrorOption { + return &NetMirrorOption{netMirror} +} + type OutOption struct { path string } @@ -194,6 +234,33 @@ func Parallelism(n int) *ParallelismOption { return &ParallelismOption{n} } +type GraphPlanOption struct { + file string +} + +// GraphPlan represents the -plan flag which is a specified plan file string +func GraphPlan(file string) *GraphPlanOption { + return &GraphPlanOption{file} +} + +type UseJSONNumberOption struct { + useJSONNumber bool +} + +// JSONNumber determines how numerical values are handled during JSON decoding. +func JSONNumber(useJSONNumber bool) *UseJSONNumberOption { + return &UseJSONNumberOption{useJSONNumber} +} + +type PlatformOption struct { + platform string +} + +// Platform represents the -platform flag which is an os_arch string +func Platform(platform string) *PlatformOption { + return &PlatformOption{platform} +} + type PluginDirOption struct { pluginDir string } @@ -202,15 +269,25 @@ func PluginDir(pluginDir string) *PluginDirOption { return &PluginDirOption{pluginDir} } +type ProviderOption struct { + provider string +} + +// Provider represents the positional argument (provider source address) +func Provider(providers string) *ProviderOption { + return &ProviderOption{providers} +} + type ReattachInfo map[string]ReattachConfig // ReattachConfig holds the information Terraform needs to be able to attach // itself to a provider process, so it can drive the process. type ReattachConfig struct { - Protocol string - Pid int - Test bool - Addr ReattachConfigAddr + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr ReattachConfigAddr } // ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. @@ -259,6 +336,22 @@ func Refresh(refresh bool) *RefreshOption { return &RefreshOption{refresh} } +type RefreshOnlyOption struct { + refreshOnly bool +} + +func RefreshOnly(refreshOnly bool) *RefreshOnlyOption { + return &RefreshOnlyOption{refreshOnly} +} + +type ReplaceOption struct { + address string +} + +func Replace(address string) *ReplaceOption { + return &ReplaceOption{address} +} + type StateOption struct { path string } @@ -289,6 +382,31 @@ func Target(resource string) *TargetOption { return &TargetOption{resource} } +type TestsDirectoryOption struct { + testsDirectory string +} + +// TestsDirectory represents the -tests-directory option (path to tests files) +func TestsDirectory(testsDirectory string) *TestsDirectoryOption { + return &TestsDirectoryOption{testsDirectory} +} + +type GraphTypeOption struct { + graphType string +} + +func GraphType(graphType string) *GraphTypeOption { + return &GraphTypeOption{graphType} +} + +type UpdateOption struct { + update bool +} + +func Update(update bool) *UpdateOption { + return &UpdateOption{update} +} + type UpgradeOption struct { upgrade bool } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go index b16b8b7289..b1185e8a17 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go index bfe77db73f..946ce8d0ad 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( "context" "fmt" + "io" "os/exec" "strconv" ) @@ -16,6 +20,8 @@ type planConfig struct { parallelism int reattachInfo ReattachInfo refresh bool + refreshOnly bool + replaceAddrs []string state string targets []string vars []string @@ -63,6 +69,14 @@ func (opt *RefreshOption) configurePlan(conf *planConfig) { conf.refresh = opt.refresh } +func (opt *RefreshOnlyOption) configurePlan(conf *planConfig) { + conf.refreshOnly = opt.refreshOnly +} + +func (opt *ReplaceOption) configurePlan(conf *planConfig) { + conf.replaceAddrs = append(conf.replaceAddrs, opt.address) +} + func (opt *ParallelismOption) configurePlan(conf *planConfig) { conf.parallelism = opt.parallelism } @@ -103,6 +117,42 @@ func (tf *Terraform) Plan(ctx context.Context, opts ...PlanOption) (bool, error) return false, err } +// PlanJSON executes `terraform plan` with the specified options as well as the +// `-json` flag and waits for it to complete. +// +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. +// +// The returned boolean is false when the plan diff is empty (no changes) and +// true when the plan diff is non-empty (changes present). +// +// The returned error is nil if `terraform plan` has been executed and exits +// with either 0 or 2. +// +// PlanJSON is likely to be removed in a future major version in favour of +// Plan returning JSON by default. +func (tf *Terraform) PlanJSON(ctx context.Context, w io.Writer, opts ...PlanOption) (bool, error) { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return false, fmt.Errorf("terraform plan -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.planJSONCmd(ctx, opts...) + if err != nil { + return false, err + } + + err = tf.runTerraformCmd(ctx, cmd) + if err != nil && cmd.ProcessState.ExitCode() == 2 { + return true, nil + } + + return false, err +} + func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) { c := defaultPlanOptions @@ -110,6 +160,32 @@ func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd o.configurePlan(&c) } + args, err := tf.buildPlanArgs(ctx, c) + if err != nil { + return nil, err + } + + return tf.buildPlanCmd(ctx, c, args) +} + +func (tf *Terraform) planJSONCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) { + c := defaultPlanOptions + + for _, o := range opts { + o.configurePlan(&c) + } + + args, err := tf.buildPlanArgs(ctx, c) + if err != nil { + return nil, err + } + + args = append(args, "-json") + + return tf.buildPlanCmd(ctx, c, args) +} + +func (tf *Terraform) buildPlanArgs(ctx context.Context, c planConfig) ([]string, error) { args := []string{"plan", "-no-color", "-input=false", "-detailed-exitcode"} // string opts: only pass if set @@ -131,7 +207,27 @@ func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + if c.refreshOnly { + err := tf.compatible(ctx, tf0_15_4, nil) + if err != nil { + return nil, fmt.Errorf("refresh-only option was introduced in Terraform 0.15.4: %w", err) + } + if !c.refresh { + return nil, fmt.Errorf("you cannot use refresh=false in refresh-only planning mode") + } + args = append(args, "-refresh-only") + } + // unary flags: pass if true + if c.replaceAddrs != nil { + err := tf.compatible(ctx, tf0_15_2, nil) + if err != nil { + return nil, fmt.Errorf("replace option was introduced in Terraform 0.15.2: %w", err) + } + for _, addr := range c.replaceAddrs { + args = append(args, "-replace="+addr) + } + } if c.destroy { args = append(args, "-destroy") } @@ -148,6 +244,10 @@ func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd } } + return args, nil +} + +func (tf *Terraform) buildPlanCmd(ctx context.Context, c planConfig, args []string) (*exec.Cmd, error) { // optional positional argument if c.dir != "" { args = append(args, c.dir) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go new file mode 100644 index 0000000000..ef5d995b2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type providersLockConfig struct { + fsMirror string + netMirror string + platforms []string + providers []string +} + +var defaultProvidersLockOptions = providersLockConfig{} + +type ProvidersLockOption interface { + configureProvidersLock(*providersLockConfig) +} + +func (opt *FSMirrorOption) configureProvidersLock(conf *providersLockConfig) { + conf.fsMirror = opt.fsMirror +} + +func (opt *NetMirrorOption) configureProvidersLock(conf *providersLockConfig) { + conf.netMirror = opt.netMirror +} + +func (opt *PlatformOption) configureProvidersLock(conf *providersLockConfig) { + conf.platforms = append(conf.platforms, opt.platform) +} + +func (opt *ProviderOption) configureProvidersLock(conf *providersLockConfig) { + conf.providers = append(conf.providers, opt.provider) +} + +// ProvidersLock represents the `terraform providers lock` command +func (tf *Terraform) ProvidersLock(ctx context.Context, opts ...ProvidersLockOption) error { + err := tf.compatible(ctx, tf0_14_0, nil) + if err != nil { + return fmt.Errorf("terraform providers lock was added in 0.14.0: %w", err) + } + + lockCmd := tf.providersLockCmd(ctx, opts...) + + err = tf.runTerraformCmd(ctx, lockCmd) + if err != nil { + return err + } + + return err +} + +func (tf *Terraform) providersLockCmd(ctx context.Context, opts ...ProvidersLockOption) *exec.Cmd { + c := defaultProvidersLockOptions + + for _, o := range opts { + o.configureProvidersLock(&c) + } + args := []string{"providers", "lock"} + + // string options, only pass if set + if c.fsMirror != "" { + args = append(args, "-fs-mirror="+c.fsMirror) + } + + if c.netMirror != "" { + args = append(args, "-net-mirror="+c.netMirror) + } + + for _, p := range c.platforms { + args = append(args, "-platform="+p) + } + + // positional providers argument + for _, p := range c.providers { + args = append(args, p) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go index 52efc5db60..995dd15697 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go index 78f6b4b501..16733889d8 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go @@ -1,7 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( "context" + "fmt" + "io" "os/exec" "strconv" ) @@ -78,6 +83,27 @@ func (tf *Terraform) Refresh(ctx context.Context, opts ...RefreshCmdOption) erro return tf.runTerraformCmd(ctx, cmd) } +// RefreshJSON represents the terraform refresh subcommand with the `-json` flag. +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. RefreshJSON is likely to be +// removed in a future major version in favour of Refresh returning JSON by default. +func (tf *Terraform) RefreshJSON(ctx context.Context, w io.Writer, opts ...RefreshCmdOption) error { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return fmt.Errorf("terraform refresh -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.refreshJSONCmd(ctx, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { c := defaultRefreshOptions @@ -85,6 +111,26 @@ func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) ( o.configureRefresh(&c) } + args := tf.buildRefreshArgs(c) + + return tf.buildRefreshCmd(ctx, c, args) + +} + +func (tf *Terraform) refreshJSONCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { + c := defaultRefreshOptions + + for _, o := range opts { + o.configureRefresh(&c) + } + + args := tf.buildRefreshArgs(c) + args = append(args, "-json") + + return tf.buildRefreshCmd(ctx, c, args) +} + +func (tf *Terraform) buildRefreshArgs(c refreshConfig) []string { args := []string{"refresh", "-no-color", "-input=false"} // string opts: only pass if set @@ -119,6 +165,10 @@ func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) ( } } + return args +} + +func (tf *Terraform) buildRefreshCmd(ctx context.Context, c refreshConfig, args []string) (*exec.Cmd, error) { // optional positional argument if c.dir != "" { args = append(args, c.dir) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go index a8d67f1a4c..5854af1da1 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go @@ -1,16 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( - "bytes" "context" "fmt" "os/exec" + "strings" tfjson "github.com/hashicorp/terraform-json" ) type showConfig struct { reattachInfo ReattachInfo + jsonNumber *UseJSONNumberOption } var defaultShowOptions = showConfig{} @@ -23,6 +27,10 @@ func (opt *ReattachOption) configureShow(conf *showConfig) { conf.reattachInfo = opt.info } +func (opt *UseJSONNumberOption) configureShow(conf *showConfig) { + conf.jsonNumber = opt +} + // Show reads the default state path and outputs the state. // To read a state or plan file, ShowState or ShowPlan must be used instead. func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.State, error) { @@ -50,6 +58,11 @@ func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.Stat var ret tfjson.State ret.UseJSONNumber(true) + + if c.jsonNumber != nil { + ret.UseJSONNumber(c.jsonNumber.useJSONNumber) + } + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) if err != nil { return nil, err @@ -93,6 +106,11 @@ func (tf *Terraform) ShowStateFile(ctx context.Context, statePath string, opts . var ret tfjson.State ret.UseJSONNumber(true) + + if c.jsonNumber != nil { + ret.UseJSONNumber(c.jsonNumber.useJSONNumber) + } + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) if err != nil { return nil, err @@ -135,6 +153,11 @@ func (tf *Terraform) ShowPlanFile(ctx context.Context, planPath string, opts ... showCmd := tf.showCmd(ctx, true, mergeEnv, planPath) var ret tfjson.Plan + + if c.jsonNumber != nil { + ret.UseJSONNumber(c.jsonNumber.useJSONNumber) + } + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) if err != nil { return nil, err @@ -173,14 +196,14 @@ func (tf *Terraform) ShowPlanFileRaw(ctx context.Context, planPath string, opts showCmd := tf.showCmd(ctx, false, mergeEnv, planPath) - var ret bytes.Buffer - showCmd.Stdout = &ret + var outBuf strings.Builder + showCmd.Stdout = &outBuf err := tf.runTerraformCmd(ctx, showCmd) if err != nil { return "", err } - return ret.String(), nil + return outBuf.String(), nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go index fc7eecf86c..ca92e5220b 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go new file mode 100644 index 0000000000..9fa6e5db95 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "bytes" + "context" + "os/exec" +) + +type statePullConfig struct { + reattachInfo ReattachInfo +} + +var defaultStatePullConfig = statePullConfig{} + +type StatePullOption interface { + configureShow(*statePullConfig) +} + +func (opt *ReattachOption) configureStatePull(conf *statePullConfig) { + conf.reattachInfo = opt.info +} + +func (tf *Terraform) StatePull(ctx context.Context, opts ...StatePullOption) (string, error) { + c := defaultStatePullConfig + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return "", err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + cmd := tf.statePullCmd(ctx, mergeEnv) + + var ret bytes.Buffer + cmd.Stdout = &ret + err := tf.runTerraformCmd(ctx, cmd) + if err != nil { + return "", err + } + + return ret.String(), nil +} + +func (tf *Terraform) statePullCmd(ctx context.Context, mergeEnv map[string]string) *exec.Cmd { + args := []string{"state", "pull"} + + return tf.buildTerraformCmd(ctx, mergeEnv, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go new file mode 100644 index 0000000000..a0873e96ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type statePushConfig struct { + force bool + lock bool + lockTimeout string +} + +var defaultStatePushOptions = statePushConfig{ + lock: false, + lockTimeout: "0s", +} + +// StatePushCmdOption represents options used in the Refresh method. +type StatePushCmdOption interface { + configureStatePush(*statePushConfig) +} + +func (opt *ForceOption) configureStatePush(conf *statePushConfig) { + conf.force = opt.force +} + +func (opt *LockOption) configureStatePush(conf *statePushConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStatePush(conf *statePushConfig) { + conf.lockTimeout = opt.timeout +} + +func (tf *Terraform) StatePush(ctx context.Context, path string, opts ...StatePushCmdOption) error { + cmd, err := tf.statePushCmd(ctx, path, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) statePushCmd(ctx context.Context, path string, opts ...StatePushCmdOption) (*exec.Cmd, error) { + c := defaultStatePushOptions + + for _, o := range opts { + o.configureStatePush(&c) + } + + args := []string{"state", "push"} + + if c.force { + args = append(args, "-force") + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + args = append(args, path) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go index 0c5dd66676..2db18cb747 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go new file mode 100644 index 0000000000..b6ac955cdd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type taintConfig struct { + state string + allowMissing bool + lock bool + lockTimeout string +} + +var defaultTaintOptions = taintConfig{ + allowMissing: false, + lock: true, +} + +// TaintOption represents options used in the Taint method. +type TaintOption interface { + configureTaint(*taintConfig) +} + +func (opt *StateOption) configureTaint(conf *taintConfig) { + conf.state = opt.path +} + +func (opt *AllowMissingOption) configureTaint(conf *taintConfig) { + conf.allowMissing = opt.allowMissing +} + +func (opt *LockOption) configureTaint(conf *taintConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureTaint(conf *taintConfig) { + conf.lockTimeout = opt.timeout +} + +// Taint represents the terraform taint subcommand. +func (tf *Terraform) Taint(ctx context.Context, address string, opts ...TaintOption) error { + err := tf.compatible(ctx, tf0_4_1, nil) + if err != nil { + return fmt.Errorf("taint was first introduced in Terraform 0.4.1: %w", err) + } + taintCmd := tf.taintCmd(ctx, address, opts...) + return tf.runTerraformCmd(ctx, taintCmd) +} + +func (tf *Terraform) taintCmd(ctx context.Context, address string, opts ...TaintOption) *exec.Cmd { + c := defaultTaintOptions + + for _, o := range opts { + o.configureTaint(&c) + } + + args := []string{"taint", "-no-color"} + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + if c.allowMissing { + args = append(args, "-allow-missing") + } + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go index 74787af0c2..628b733dec 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( @@ -32,14 +35,14 @@ type printfer interface { // but it ignores certain environment variables that are managed within the code and prohibits // setting them through SetEnv: // -// - TF_APPEND_USER_AGENT -// - TF_IN_AUTOMATION -// - TF_INPUT -// - TF_LOG -// - TF_LOG_PATH -// - TF_REATTACH_PROVIDERS -// - TF_DISABLE_PLUGIN_TLS -// - TF_SKIP_PROVIDER_VERIFY +// - TF_APPEND_USER_AGENT +// - TF_IN_AUTOMATION +// - TF_INPUT +// - TF_LOG +// - TF_LOG_PATH +// - TF_REATTACH_PROVIDERS +// - TF_DISABLE_PLUGIN_TLS +// - TF_SKIP_PROVIDER_VERIFY type Terraform struct { execPath string workingDir string @@ -48,19 +51,30 @@ type Terraform struct { skipProviderVerify bool env map[string]string - stdout io.Writer - stderr io.Writer - logger printfer + stdout io.Writer + stderr io.Writer + logger printfer + + // TF_LOG environment variable, defaults to TRACE if logPath is set. + log string + + // TF_LOG_CORE environment variable + logCore string + + // TF_LOG_PATH environment variable logPath string + // TF_LOG_PROVIDER environment variable + logProvider string + versionLock sync.Mutex execVersion *version.Version provVersions map[string]*version.Version } // NewTerraform returns a Terraform struct with default values for all fields. -// If a blank execPath is supplied, NewTerraform will attempt to locate an -// appropriate binary on the system PATH. +// If a blank execPath is supplied, NewTerraform will error. +// Use hc-install or output from os.LookPath to get a desirable execPath. func NewTerraform(workingDir string, execPath string) (*Terraform, error) { if workingDir == "" { return nil, fmt.Errorf("Terraform cannot be initialised with empty workdir") @@ -71,7 +85,7 @@ func NewTerraform(workingDir string, execPath string) (*Terraform, error) { } if execPath == "" { - err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the tfinstall package.") + err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the github.com/hashicorp/hc-install module.") return nil, &ErrNoSuitableBinary{ err: err, } @@ -122,10 +136,58 @@ func (tf *Terraform) SetStderr(w io.Writer) { tf.stderr = w } +// SetLog sets the TF_LOG environment variable for Terraform CLI execution. +// This must be combined with a call to SetLogPath to take effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later as setting the +// log level was unreliable in earlier versions. It will default to TRACE when +// SetLogPath is called on versions 0.14.11 and earlier, or if SetLogCore and +// SetLogProvider have not been called before SetLogPath on versions 0.15.0 and +// later. +func (tf *Terraform) SetLog(log string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.log = log + return nil +} + +// SetLogCore sets the TF_LOG_CORE environment variable for Terraform CLI +// execution. This must be combined with a call to SetLogPath to take effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later. +func (tf *Terraform) SetLogCore(logCore string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.logCore = logCore + return nil +} + // SetLogPath sets the TF_LOG_PATH environment variable for Terraform CLI // execution. func (tf *Terraform) SetLogPath(path string) error { tf.logPath = path + // Prevent setting the log path without enabling logging + if tf.log == "" && tf.logCore == "" && tf.logProvider == "" { + tf.log = "TRACE" + } + return nil +} + +// SetLogProvider sets the TF_LOG_PROVIDER environment variable for Terraform +// CLI execution. This must be combined with a call to SetLogPath to take +// effect. +// +// This is only compatible with Terraform CLI 0.15.0 or later. +func (tf *Terraform) SetLogProvider(logProvider string) error { + err := tf.compatible(context.Background(), tf0_15_0, nil) + if err != nil { + return err + } + tf.logProvider = logProvider return nil } diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/test.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/test.go new file mode 100644 index 0000000000..5e0bb6353f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/test.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "io" + "os/exec" +) + +type testConfig struct { + testsDirectory string +} + +var defaultTestOptions = testConfig{} + +type TestOption interface { + configureTest(*testConfig) +} + +func (opt *TestsDirectoryOption) configureTest(conf *testConfig) { + conf.testsDirectory = opt.testsDirectory +} + +// Test represents the terraform test -json subcommand. +// +// The given io.Writer, if specified, will receive +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON from Terraform including test results. +func (tf *Terraform) Test(ctx context.Context, w io.Writer, opts ...TestOption) error { + err := tf.compatible(ctx, tf1_6_0, nil) + + if err != nil { + return fmt.Errorf("terraform test was added in 1.6.0: %w", err) + } + + tf.SetStdout(w) + + testCmd := tf.testCmd(ctx) + + err = tf.runTerraformCmd(ctx, testCmd) + + if err != nil { + return err + } + + return nil +} + +func (tf *Terraform) testCmd(ctx context.Context, opts ...TestOption) *exec.Cmd { + c := defaultTestOptions + + for _, o := range opts { + o.configureTest(&c) + } + + args := []string{"test", "-json"} + + if c.testsDirectory != "" { + args = append(args, "-tests-directory="+c.testsDirectory) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go new file mode 100644 index 0000000000..5f0bf350e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type untaintConfig struct { + state string + allowMissing bool + lock bool + lockTimeout string +} + +var defaultUntaintOptions = untaintConfig{ + allowMissing: false, + lock: true, +} + +// OutputOption represents options used in the Output method. +type UntaintOption interface { + configureUntaint(*untaintConfig) +} + +func (opt *StateOption) configureUntaint(conf *untaintConfig) { + conf.state = opt.path +} + +func (opt *AllowMissingOption) configureUntaint(conf *untaintConfig) { + conf.allowMissing = opt.allowMissing +} + +func (opt *LockOption) configureUntaint(conf *untaintConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureUntaint(conf *untaintConfig) { + conf.lockTimeout = opt.timeout +} + +// Untaint represents the terraform untaint subcommand. +func (tf *Terraform) Untaint(ctx context.Context, address string, opts ...UntaintOption) error { + err := tf.compatible(ctx, tf0_6_13, nil) + if err != nil { + return fmt.Errorf("untaint was first introduced in Terraform 0.6.13: %w", err) + } + untaintCmd := tf.untaintCmd(ctx, address, opts...) + return tf.runTerraformCmd(ctx, untaintCmd) +} + +func (tf *Terraform) untaintCmd(ctx context.Context, address string, opts ...UntaintOption) *exec.Cmd { + c := defaultUntaintOptions + + for _, o := range opts { + o.configureUntaint(&c) + } + + args := []string{"untaint", "-no-color"} + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + if c.allowMissing { + args = append(args, "-allow-missing") + } + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go index e55237a7b6..34a2c87de1 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go new file mode 100644 index 0000000000..98dc459085 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type upgrade013Config struct { + dir string + + reattachInfo ReattachInfo +} + +var defaultUpgrade013Options = upgrade013Config{} + +// Upgrade013Option represents options used in the Destroy method. +type Upgrade013Option interface { + configureUpgrade013(*upgrade013Config) +} + +func (opt *DirOption) configureUpgrade013(conf *upgrade013Config) { + conf.dir = opt.path +} + +func (opt *ReattachOption) configureUpgrade013(conf *upgrade013Config) { + conf.reattachInfo = opt.info +} + +// Upgrade013 represents the terraform 0.13upgrade subcommand. +func (tf *Terraform) Upgrade013(ctx context.Context, opts ...Upgrade013Option) error { + cmd, err := tf.upgrade013Cmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) upgrade013Cmd(ctx context.Context, opts ...Upgrade013Option) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_13_0, tf0_14_0) + if err != nil { + return nil, fmt.Errorf("terraform 0.13upgrade is only supported in 0.13 releases: %w", err) + } + + c := defaultUpgrade013Options + + for _, o := range opts { + o.configureUpgrade013(&c) + } + + args := []string{"0.13upgrade", "-no-color", "-yes"} + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go index 756eccd75c..d995d3759d 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( @@ -19,8 +22,8 @@ func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, erro cmd := tf.buildTerraformCmd(ctx, nil, "validate", "-no-color", "-json") - var outbuf = bytes.Buffer{} - cmd.Stdout = &outbuf + var outBuf = bytes.Buffer{} + cmd.Stdout = &outBuf err = tf.runTerraformCmd(ctx, cmd) // TODO: this command should not exit 1 if you pass -json as its hard to differentiate other errors @@ -30,7 +33,7 @@ func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, erro var ret tfjson.ValidateOutput // TODO: ret.UseJSONNumber(true) validate output should support JSON numbers - jsonErr := json.Unmarshal(outbuf.Bytes(), &ret) + jsonErr := json.Unmarshal(outBuf.Bytes(), &ret) if jsonErr != nil { // the original call was possibly bad, if it has an error, actually just return that if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go index b6496ab6e9..4ba4f6eafc 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( @@ -10,14 +13,26 @@ import ( "strings" "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-json" + tfjson "github.com/hashicorp/terraform-json" ) var ( + tf0_4_1 = version.Must(version.NewVersion("0.4.1")) + tf0_5_0 = version.Must(version.NewVersion("0.5.0")) + tf0_6_13 = version.Must(version.NewVersion("0.6.13")) tf0_7_7 = version.Must(version.NewVersion("0.7.7")) + tf0_8_0 = version.Must(version.NewVersion("0.8.0")) + tf0_10_0 = version.Must(version.NewVersion("0.10.0")) tf0_12_0 = version.Must(version.NewVersion("0.12.0")) tf0_13_0 = version.Must(version.NewVersion("0.13.0")) + tf0_14_0 = version.Must(version.NewVersion("0.14.0")) tf0_15_0 = version.Must(version.NewVersion("0.15.0")) + tf0_15_2 = version.Must(version.NewVersion("0.15.2")) + tf0_15_3 = version.Must(version.NewVersion("0.15.3")) + tf0_15_4 = version.Must(version.NewVersion("0.15.4")) + tf1_1_0 = version.Must(version.NewVersion("1.1.0")) + tf1_4_0 = version.Must(version.NewVersion("1.4.0")) + tf1_6_0 = version.Must(version.NewVersion("1.6.0")) ) // Version returns structured output from the terraform version command including both the Terraform CLI version @@ -87,7 +102,7 @@ func parseJsonVersionOutput(stdout []byte) (*version.Version, map[string]*versio func (tf *Terraform) versionFromPlaintext(ctx context.Context) (*version.Version, map[string]*version.Version, error) { versionCmd := tf.buildTerraformCmd(ctx, nil, "version") - var outBuf bytes.Buffer + var outBuf strings.Builder versionCmd.Stdout = &outBuf err := tf.runTerraformCmd(ctx, versionCmd) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go new file mode 100644 index 0000000000..f2a17e652e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type workspaceDeleteConfig struct { + lock bool + lockTimeout string + force bool +} + +var defaultWorkspaceDeleteOptions = workspaceDeleteConfig{ + lock: true, + lockTimeout: "0s", +} + +// WorkspaceDeleteCmdOption represents options that are applicable to the WorkspaceDelete method. +type WorkspaceDeleteCmdOption interface { + configureWorkspaceDelete(*workspaceDeleteConfig) +} + +func (opt *LockOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ForceOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.force = opt.force +} + +// WorkspaceDelete represents the workspace delete subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceDelete(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) error { + cmd, err := tf.workspaceDeleteCmd(ctx, workspace, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) workspaceDeleteCmd(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) (*exec.Cmd, error) { + c := defaultWorkspaceDeleteOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace delete in Terraform 0.12: %w", err) + } + } + + o.configureWorkspaceDelete(&c) + } + + args := []string{"workspace", "delete", "-no-color"} + + if c.force { + args = append(args, "-force") + } + if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceDeleteOptions.lockTimeout { + // only pass if not default, so we don't need to worry about the 0.11 version check + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if !c.lock { + // only pass if false, so we don't need to worry about the 0.11 version check + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + } + + args = append(args, workspace) + + cmd := tf.buildTerraformCmd(ctx, nil, args...) + + return cmd, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go index b8d0309454..1b4bec3763 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go @@ -1,7 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( - "bytes" "context" "strings" ) @@ -11,7 +13,7 @@ func (tf *Terraform) WorkspaceList(ctx context.Context) ([]string, string, error // TODO: [DIR] param option wlCmd := tf.buildTerraformCmd(ctx, nil, "workspace", "list", "-no-color") - var outBuf bytes.Buffer + var outBuf strings.Builder wlCmd.Stdout = &outBuf err := tf.runTerraformCmd(ctx, wlCmd) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go index 2e05ffdb76..921a11873f 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import ( diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go index 5a51330f6f..da88472ab4 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfexec import "context" diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go new file mode 100644 index 0000000000..840eff9ae9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +// WorkspaceShow represents the workspace show subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceShow(ctx context.Context) (string, error) { + workspaceShowCmd, err := tf.workspaceShowCmd(ctx) + if err != nil { + return "", err + } + + var outBuffer strings.Builder + workspaceShowCmd.Stdout = &outBuffer + + err = tf.runTerraformCmd(ctx, workspaceShowCmd) + if err != nil { + return "", err + } + + return strings.TrimSpace(outBuffer.String()), nil +} + +func (tf *Terraform) workspaceShowCmd(ctx context.Context) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_10_0, nil) + if err != nil { + return nil, fmt.Errorf("workspace show was first introduced in Terraform 0.10.0: %w", err) + } + + return tf.buildTerraformCmd(ctx, nil, "workspace", "show", "-no-color"), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/doc.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/doc.go deleted file mode 100644 index e48ea412b3..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package tfinstall offers multiple strategies for finding and/or installing -// a binary version of Terraform. Some of the strategies can also authenticate -// the source of the binary as an official HashiCorp release. -package tfinstall diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/download.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/download.go deleted file mode 100644 index 69c51f036c..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/download.go +++ /dev/null @@ -1,125 +0,0 @@ -package tfinstall - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/hashicorp/go-getter" - "golang.org/x/crypto/openpgp" -) - -func ensureInstallDir(installDir string) (string, error) { - if installDir == "" { - return ioutil.TempDir("", "tfexec") - } - - if _, err := os.Stat(installDir); err != nil { - return "", fmt.Errorf("could not access directory %s for installing Terraform: %w", installDir, err) - } - - return installDir, nil -} - -func downloadWithVerification(ctx context.Context, tfVersion string, installDir string, appendUserAgent string) (string, error) { - osName := runtime.GOOS - archName := runtime.GOARCH - - // setup: ensure we have a place to put our downloaded terraform binary - tfDir, err := ensureInstallDir(installDir) - if err != nil { - return "", err - } - - httpGetter := &getter.HttpGetter{ - Netrc: true, - Client: newHTTPClient(appendUserAgent), - } - client := getter.Client{ - Ctx: ctx, - Getters: map[string]getter.Getter{ - "https": httpGetter, - }, - } - client.Mode = getter.ClientModeAny - - // firstly, download and verify the signature of the checksum file - - sumsTmpDir, err := ioutil.TempDir("", "tfinstall") - if err != nil { - return "", err - } - defer os.RemoveAll(sumsTmpDir) - - sumsFilename := "terraform_" + tfVersion + "_SHA256SUMS" - sumsSigFilename := sumsFilename + ".72D7468F.sig" - - sumsURL := fmt.Sprintf("%s/%s/%s", baseURL, tfVersion, sumsFilename) - sumsSigURL := fmt.Sprintf("%s/%s/%s", baseURL, tfVersion, sumsSigFilename) - - client.Src = sumsURL - client.Dst = sumsTmpDir - err = client.Get() - if err != nil { - return "", fmt.Errorf("error fetching checksums at URL %s: %w", sumsURL, err) - } - - client.Src = sumsSigURL - err = client.Get() - if err != nil { - return "", fmt.Errorf("error fetching checksums signature: %s", err) - } - - sumsPath := filepath.Join(sumsTmpDir, sumsFilename) - sumsSigPath := filepath.Join(sumsTmpDir, sumsSigFilename) - - err = verifySumsSignature(sumsPath, sumsSigPath) - if err != nil { - return "", err - } - - // secondly, download Terraform itself, verifying the checksum - url := tfURL(tfVersion, osName, archName) - client.Src = url - client.Dst = tfDir - client.Mode = getter.ClientModeDir - err = client.Get() - if err != nil { - return "", err - } - - return filepath.Join(tfDir, "terraform"), nil -} - -// verifySumsSignature downloads SHA256SUMS and SHA256SUMS.sig and verifies -// the signature using the HashiCorp public key. -func verifySumsSignature(sumsPath, sumsSigPath string) error { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashicorpPublicKey)) - if err != nil { - return err - } - data, err := os.Open(sumsPath) - if err != nil { - return err - } - sig, err := os.Open(sumsSigPath) - if err != nil { - return err - } - _, err = openpgp.CheckDetachedSignature(el, data, sig) - - return err -} - -func tfURL(tfVersion, osName, archName string) string { - sumsFilename := "terraform_" + tfVersion + "_SHA256SUMS" - sumsURL := fmt.Sprintf("%s/%s/%s", baseURL, tfVersion, sumsFilename) - return fmt.Sprintf( - "%s/%s/terraform_%s_%s_%s.zip?checksum=file:%s", - baseURL, tfVersion, tfVersion, osName, archName, sumsURL, - ) -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_path.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_path.go deleted file mode 100644 index 010cc5015a..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_path.go +++ /dev/null @@ -1,27 +0,0 @@ -package tfinstall - -import ( - "context" - "os" -) - -type ExactPathOption struct { - execPath string -} - -var _ ExecPathFinder = &ExactPathOption{} - -func ExactPath(execPath string) *ExactPathOption { - opt := &ExactPathOption{ - execPath: execPath, - } - return opt -} - -func (opt *ExactPathOption) ExecPath(context.Context) (string, error) { - if _, err := os.Stat(opt.execPath); err != nil { - // fall through to the next strategy if the local path does not exist - return "", nil - } - return opt.execPath, nil -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_version.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_version.go deleted file mode 100644 index afcb9aca42..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_version.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfinstall - -import ( - "context" - - "github.com/hashicorp/go-version" -) - -type ExactVersionOption struct { - tfVersion string - installDir string - - UserAgent string -} - -var _ ExecPathFinder = &ExactVersionOption{} - -func ExactVersion(tfVersion string, installDir string) *ExactVersionOption { - opt := &ExactVersionOption{ - tfVersion: tfVersion, - installDir: installDir, - } - - return opt -} - -func (opt *ExactVersionOption) ExecPath(ctx context.Context) (string, error) { - // validate version - _, err := version.NewVersion(opt.tfVersion) - if err != nil { - return "", err - } - - return downloadWithVerification(ctx, opt.tfVersion, opt.installDir, opt.UserAgent) -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/http.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/http.go deleted file mode 100644 index 70d95a6e80..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/http.go +++ /dev/null @@ -1,37 +0,0 @@ -package tfinstall - -import ( - "fmt" - "net/http" - "os" - "strings" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - - intversion "github.com/hashicorp/terraform-exec/internal/version" -) - -type userAgentRoundTripper struct { - inner http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - return rt.inner.RoundTrip(req) -} - -func newHTTPClient(appendUA string) *http.Client { - appendUA = strings.TrimSpace(appendUA + " " + os.Getenv("TF_APPEND_USER_AGENT")) - userAgent := strings.TrimSpace(fmt.Sprintf("HashiCorp-tfinstall/%s %s", intversion.ModuleVersion(), appendUA)) - - cli := cleanhttp.DefaultPooledClient() - cli.Transport = &userAgentRoundTripper{ - userAgent: userAgent, - inner: cli.Transport, - } - - return cli -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/latest_version.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/latest_version.go deleted file mode 100644 index f01735cac4..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/latest_version.go +++ /dev/null @@ -1,51 +0,0 @@ -package tfinstall - -import ( - "context" - "fmt" - - "github.com/hashicorp/go-checkpoint" -) - -type LatestVersionOption struct { - forceCheckpoint bool - installDir string - - UserAgent string -} - -var _ ExecPathFinder = &LatestVersionOption{} - -func LatestVersion(installDir string, forceCheckpoint bool) *LatestVersionOption { - opt := &LatestVersionOption{ - forceCheckpoint: forceCheckpoint, - installDir: installDir, - } - - return opt -} - -func (opt *LatestVersionOption) ExecPath(ctx context.Context) (string, error) { - v, err := latestVersion(opt.forceCheckpoint) - if err != nil { - return "", err - } - - return downloadWithVerification(ctx, v, opt.installDir, opt.UserAgent) -} - -func latestVersion(forceCheckpoint bool) (string, error) { - resp, err := checkpoint.Check(&checkpoint.CheckParams{ - Product: "terraform", - Force: forceCheckpoint, - }) - if err != nil { - return "", err - } - - if resp.CurrentVersion == "" { - return "", fmt.Errorf("could not determine latest version of terraform using checkpoint: CHECKPOINT_DISABLE may be set") - } - - return resp.CurrentVersion, nil -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/look_path.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/look_path.go deleted file mode 100644 index 2ebec09b65..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/look_path.go +++ /dev/null @@ -1,30 +0,0 @@ -package tfinstall - -import ( - "context" - "log" - "os/exec" -) - -type LookPathOption struct { -} - -var _ ExecPathFinder = &LookPathOption{} - -func LookPath() *LookPathOption { - opt := &LookPathOption{} - - return opt -} - -func (opt *LookPathOption) ExecPath(context.Context) (string, error) { - p, err := exec.LookPath("terraform") - if err != nil { - if notFoundErr, ok := err.(*exec.Error); ok && notFoundErr.Err == exec.ErrNotFound { - log.Printf("[WARN] could not locate a terraform executable on system path; continuing") - return "", nil - } - return "", err - } - return p, nil -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go deleted file mode 100644 index c545595bfb..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go +++ /dev/null @@ -1,124 +0,0 @@ -package tfinstall - -const hashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBGB9+xkBEACabYZOWKmgZsHTdRDiyPJxhbuUiKX65GUWkyRMJKi/1dviVxOX -PG6hBPtF48IFnVgxKpIb7G6NjBousAV+CuLlv5yqFKpOZEGC6sBV+Gx8Vu1CICpl -Zm+HpQPcIzwBpN+Ar4l/exCG/f/MZq/oxGgH+TyRF3XcYDjG8dbJCpHO5nQ5Cy9h -QIp3/Bh09kET6lk+4QlofNgHKVT2epV8iK1cXlbQe2tZtfCUtxk+pxvU0UHXp+AB -0xc3/gIhjZp/dePmCOyQyGPJbp5bpO4UeAJ6frqhexmNlaw9Z897ltZmRLGq1p4a -RnWL8FPkBz9SCSKXS8uNyV5oMNVn4G1obCkc106iWuKBTibffYQzq5TG8FYVJKrh -RwWB6piacEB8hl20IIWSxIM3J9tT7CPSnk5RYYCTRHgA5OOrqZhC7JefudrP8n+M -pxkDgNORDu7GCfAuisrf7dXYjLsxG4tu22DBJJC0c/IpRpXDnOuJN1Q5e/3VUKKW -mypNumuQpP5lc1ZFG64TRzb1HR6oIdHfbrVQfdiQXpvdcFx+Fl57WuUraXRV6qfb -4ZmKHX1JEwM/7tu21QE4F1dz0jroLSricZxfaCTHHWNfvGJoZ30/MZUrpSC0IfB3 -iQutxbZrwIlTBt+fGLtm3vDtwMFNWM+Rb1lrOxEQd2eijdxhvBOHtlIcswARAQAB -tERIYXNoaUNvcnAgU2VjdXJpdHkgKGhhc2hpY29ycC5jb20vc2VjdXJpdHkpIDxz -ZWN1cml0eUBoYXNoaWNvcnAuY29tPokCVAQTAQoAPhYhBMh0AR8KtAURDQIQVTQ2 -XZRy10aPBQJgffsZAhsDBQkJZgGABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJ -EDQ2XZRy10aPtpcP/0PhJKiHtC1zREpRTrjGizoyk4Sl2SXpBZYhkdrG++abo6zs -buaAG7kgWWChVXBo5E20L7dbstFK7OjVs7vAg/OLgO9dPD8n2M19rpqSbbvKYWvp -0NSgvFTT7lbyDhtPj0/bzpkZEhmvQaDWGBsbDdb2dBHGitCXhGMpdP0BuuPWEix+ -QnUMaPwU51q9GM2guL45Tgks9EKNnpDR6ZdCeWcqo1IDmklloidxT8aKL21UOb8t -cD+Bg8iPaAr73bW7Jh8TdcV6s6DBFub+xPJEB/0bVPmq3ZHs5B4NItroZ3r+h3ke -VDoSOSIZLl6JtVooOJ2la9ZuMqxchO3mrXLlXxVCo6cGcSuOmOdQSz4OhQE5zBxx -LuzA5ASIjASSeNZaRnffLIHmht17BPslgNPtm6ufyOk02P5XXwa69UCjA3RYrA2P -QNNC+OWZ8qQLnzGldqE4MnRNAxRxV6cFNzv14ooKf7+k686LdZrP/3fQu2p3k5rY -0xQUXKh1uwMUMtGR867ZBYaxYvwqDrg9XB7xi3N6aNyNQ+r7zI2lt65lzwG1v9hg -FG2AHrDlBkQi/t3wiTS3JOo/GCT8BjN0nJh0lGaRFtQv2cXOQGVRW8+V/9IpqEJ1 -qQreftdBFWxvH7VJq2mSOXUJyRsoUrjkUuIivaA9Ocdipk2CkP8bpuGz7ZF4uQIN -BGB9+xkBEACoklYsfvWRCjOwS8TOKBTfl8myuP9V9uBNbyHufzNETbhYeT33Cj0M -GCNd9GdoaknzBQLbQVSQogA+spqVvQPz1MND18GIdtmr0BXENiZE7SRvu76jNqLp -KxYALoK2Pc3yK0JGD30HcIIgx+lOofrVPA2dfVPTj1wXvm0rbSGA4Wd4Ng3d2AoR -G/wZDAQ7sdZi1A9hhfugTFZwfqR3XAYCk+PUeoFrkJ0O7wngaon+6x2GJVedVPOs -2x/XOR4l9ytFP3o+5ILhVnsK+ESVD9AQz2fhDEU6RhvzaqtHe+sQccR3oVLoGcat -ma5rbfzH0Fhj0JtkbP7WreQf9udYgXxVJKXLQFQgel34egEGG+NlbGSPG+qHOZtY -4uWdlDSvmo+1P95P4VG/EBteqyBbDDGDGiMs6lAMg2cULrwOsbxWjsWka8y2IN3z -1stlIJFvW2kggU+bKnQ+sNQnclq3wzCJjeDBfucR3a5WRojDtGoJP6Fc3luUtS7V -5TAdOx4dhaMFU9+01OoH8ZdTRiHZ1K7RFeAIslSyd4iA/xkhOhHq89F4ECQf3Bt4 -ZhGsXDTaA/VgHmf3AULbrC94O7HNqOvTWzwGiWHLfcxXQsr+ijIEQvh6rHKmJK8R -9NMHqc3L18eMO6bqrzEHW0Xoiu9W8Yj+WuB3IKdhclT3w0pO4Pj8gQARAQABiQI8 -BBgBCgAmFiEEyHQBHwq0BRENAhBVNDZdlHLXRo8FAmB9+xkCGwwFCQlmAYAACgkQ -NDZdlHLXRo9ZnA/7BmdpQLeTjEiXEJyW46efxlV1f6THn9U50GWcE9tebxCXgmQf -u+Uju4hreltx6GDi/zbVVV3HCa0yaJ4JVvA4LBULJVe3ym6tXXSYaOfMdkiK6P1v -JgfpBQ/b/mWB0yuWTUtWx18BQQwlNEQWcGe8n1lBbYsH9g7QkacRNb8tKUrUbWlQ -QsU8wuFgly22m+Va1nO2N5C/eE/ZEHyN15jEQ+QwgQgPrK2wThcOMyNMQX/VNEr1 -Y3bI2wHfZFjotmek3d7ZfP2VjyDudnmCPQ5xjezWpKbN1kvjO3as2yhcVKfnvQI5 -P5Frj19NgMIGAp7X6pF5Csr4FX/Vw316+AFJd9Ibhfud79HAylvFydpcYbvZpScl -7zgtgaXMCVtthe3GsG4gO7IdxxEBZ/Fm4NLnmbzCIWOsPMx/FxH06a539xFq/1E2 -1nYFjiKg8a5JFmYU/4mV9MQs4bP/3ip9byi10V+fEIfp5cEEmfNeVeW5E7J8PqG9 -t4rLJ8FR4yJgQUa2gs2SNYsjWQuwS/MJvAv4fDKlkQjQmYRAOp1SszAnyaplvri4 -ncmfDsf0r65/sd6S40g5lHH8LIbGxcOIN6kwthSTPWX89r42CbY8GzjTkaeejNKx -v1aCrO58wAtursO1DiXCvBY7+NdafMRnoHwBk50iPqrVkNA8fv+auRyB2/G5Ag0E -YH3+JQEQALivllTjMolxUW2OxrXb+a2Pt6vjCBsiJzrUj0Pa63U+lT9jldbCCfgP -wDpcDuO1O05Q8k1MoYZ6HddjWnqKG7S3eqkV5c3ct3amAXp513QDKZUfIDylOmhU -qvxjEgvGjdRjz6kECFGYr6Vnj/p6AwWv4/FBRFlrq7cnQgPynbIH4hrWvewp3Tqw -GVgqm5RRofuAugi8iZQVlAiQZJo88yaztAQ/7VsXBiHTn61ugQ8bKdAsr8w/ZZU5 -HScHLqRolcYg0cKN91c0EbJq9k1LUC//CakPB9mhi5+aUVUGusIM8ECShUEgSTCi -KQiJUPZ2CFbbPE9L5o9xoPCxjXoX+r7L/WyoCPTeoS3YRUMEnWKvc42Yxz3meRb+ -BmaqgbheNmzOah5nMwPupJYmHrjWPkX7oyyHxLSFw4dtoP2j6Z7GdRXKa2dUYdk2 -x3JYKocrDoPHh3Q0TAZujtpdjFi1BS8pbxYFb3hHmGSdvz7T7KcqP7ChC7k2RAKO -GiG7QQe4NX3sSMgweYpl4OwvQOn73t5CVWYp/gIBNZGsU3Pto8g27vHeWyH9mKr4 -cSepDhw+/X8FGRNdxNfpLKm7Vc0Sm9Sof8TRFrBTqX+vIQupYHRi5QQCuYaV6OVr -ITeegNK3So4m39d6ajCR9QxRbmjnx9UcnSYYDmIB6fpBuwT0ogNtABEBAAGJBHIE -GAEKACYCGwIWIQTIdAEfCrQFEQ0CEFU0Nl2UctdGjwUCYH4bgAUJAeFQ2wJAwXQg -BBkBCgAdFiEEs2y6kaLAcwxDX8KAsLRBCXaFtnYFAmB9/iUACgkQsLRBCXaFtnYX -BhAAlxejyFXoQwyGo9U+2g9N6LUb/tNtH29RHYxy4A3/ZUY7d/FMkArmh4+dfjf0 -p9MJz98Zkps20kaYP+2YzYmaizO6OA6RIddcEXQDRCPHmLts3097mJ/skx9qLAf6 -rh9J7jWeSqWO6VW6Mlx8j9m7sm3Ae1OsjOx/m7lGZOhY4UYfY627+Jf7WQ5103Qs -lgQ09es/vhTCx0g34SYEmMW15Tc3eCjQ21b1MeJD/V26npeakV8iCZ1kHZHawPq/ -aCCuYEcCeQOOteTWvl7HXaHMhHIx7jjOd8XX9V+UxsGz2WCIxX/j7EEEc7CAxwAN -nWp9jXeLfxYfjrUB7XQZsGCd4EHHzUyCf7iRJL7OJ3tz5Z+rOlNjSgci+ycHEccL -YeFAEV+Fz+sj7q4cFAferkr7imY1XEI0Ji5P8p/uRYw/n8uUf7LrLw5TzHmZsTSC -UaiL4llRzkDC6cVhYfqQWUXDd/r385OkE4oalNNE+n+txNRx92rpvXWZ5qFYfv7E -95fltvpXc0iOugPMzyof3lwo3Xi4WZKc1CC/jEviKTQhfn3WZukuF5lbz3V1PQfI -xFsYe9WYQmp25XGgezjXzp89C/OIcYsVB1KJAKihgbYdHyUN4fRCmOszmOUwEAKR -3k5j4X8V5bk08sA69NVXPn2ofxyk3YYOMYWW8ouObnXoS8QJEDQ2XZRy10aPMpsQ -AIbwX21erVqUDMPn1uONP6o4NBEq4MwG7d+fT85rc1U0RfeKBwjucAE/iStZDQoM -ZKWvGhFR+uoyg1LrXNKuSPB82unh2bpvj4zEnJsJadiwtShTKDsikhrfFEK3aCK8 -Zuhpiu3jxMFDhpFzlxsSwaCcGJqcdwGhWUx0ZAVD2X71UCFoOXPjF9fNnpy80YNp -flPjj2RnOZbJyBIM0sWIVMd8F44qkTASf8K5Qb47WFN5tSpePq7OCm7s8u+lYZGK -wR18K7VliundR+5a8XAOyUXOL5UsDaQCK4Lj4lRaeFXunXl3DJ4E+7BKzZhReJL6 -EugV5eaGonA52TWtFdB8p+79wPUeI3KcdPmQ9Ll5Zi/jBemY4bzasmgKzNeMtwWP -fk6WgrvBwptqohw71HDymGxFUnUP7XYYjic2sVKhv9AevMGycVgwWBiWroDCQ9Ja -btKfxHhI2p+g+rcywmBobWJbZsujTNjhtme+kNn1mhJsD3bKPjKQfAxaTskBLb0V -wgV21891TS1Dq9kdPLwoS4XNpYg2LLB4p9hmeG3fu9+OmqwY5oKXsHiWc43dei9Y -yxZ1AAUOIaIdPkq+YG/PhlGE4YcQZ4RPpltAr0HfGgZhmXWigbGS+66pUj+Ojysc -j0K5tCVxVu0fhhFpOlHv0LWaxCbnkgkQH9jfMEJkAWMOuQINBGCAXCYBEADW6RNr -ZVGNXvHVBqSiOWaxl1XOiEoiHPt50Aijt25yXbG+0kHIFSoR+1g6Lh20JTCChgfQ -kGGjzQvEuG1HTw07YhsvLc0pkjNMfu6gJqFox/ogc53mz69OxXauzUQ/TZ27GDVp -UBu+EhDKt1s3OtA6Bjz/csop/Um7gT0+ivHyvJ/jGdnPEZv8tNuSE/Uo+hn/Q9hg -8SbveZzo3C+U4KcabCESEFl8Gq6aRi9vAfa65oxD5jKaIz7cy+pwb0lizqlW7H9t -Qlr3dBfdIcdzgR55hTFC5/XrcwJ6/nHVH/xGskEasnfCQX8RYKMuy0UADJy72TkZ -bYaCx+XXIcVB8GTOmJVoAhrTSSVLAZspfCnjwnSxisDn3ZzsYrq3cV6sU8b+QlIX -7VAjurE+5cZiVlaxgCjyhKqlGgmonnReWOBacCgL/UvuwMmMp5TTLmiLXLT7uxeG -ojEyoCk4sMrqrU1jevHyGlDJH9Taux15GILDwnYFfAvPF9WCid4UZ4Ouwjcaxfys -3LxNiZIlUsXNKwS3mhiMRL4TRsbs4k4QE+LIMOsauIvcvm8/frydvQ/kUwIhVTH8 -0XGOH909bYtJvY3fudK7ShIwm7ZFTduBJUG473E/Fn3VkhTmBX6+PjOC50HR/Hyb -waRCzfDruMe3TAcE/tSP5CUOb9C7+P+hPzQcDwARAQABiQRyBBgBCgAmFiEEyHQB -Hwq0BRENAhBVNDZdlHLXRo8FAmCAXCYCGwIFCQlmAYACQAkQNDZdlHLXRo/BdCAE -GQEKAB0WIQQ3TsdbSFkTYEqDHMfIIMbVzSerhwUCYIBcJgAKCRDIIMbVzSerh0Xw -D/9ghnUsoNCu1OulcoJdHboMazJvDt/znttdQSnULBVElgM5zk0Uyv87zFBzuCyQ -JWL3bWesQ2uFx5fRWEPDEfWVdDrjpQGb1OCCQyz1QlNPV/1M1/xhKGS9EeXrL8Dw -F6KTGkRwn1yXiP4BGgfeFIQHmJcKXEZ9HkrpNb8mcexkROv4aIPAwn+IaE+NHVtt -IBnufMXLyfpkWJQtJa9elh9PMLlHHnuvnYLvuAoOkhuvs7fXDMpfFZ01C+QSv1dz -Hm52GSStERQzZ51w4c0rYDneYDniC/sQT1x3dP5Xf6wzO+EhRMabkvoTbMqPsTEP -xyWr2pNtTBYp7pfQjsHxhJpQF0xjGN9C39z7f3gJG8IJhnPeulUqEZjhRFyVZQ6/ -siUeq7vu4+dM/JQL+i7KKe7Lp9UMrG6NLMH+ltaoD3+lVm8fdTUxS5MNPoA/I8cK -1OWTJHkrp7V/XaY7mUtvQn5V1yET5b4bogz4nME6WLiFMd+7x73gB+YJ6MGYNuO8 -e/NFK67MfHbk1/AiPTAJ6s5uHRQIkZcBPG7y5PpfcHpIlwPYCDGYlTajZXblyKrw -BttVnYKvKsnlysv11glSg0DphGxQJbXzWpvBNyhMNH5dffcfvd3eXJAxnD81GD2z -ZAriMJ4Av2TfeqQ2nxd2ddn0jX4WVHtAvLXfCgLM2Gveho4jD/9sZ6PZz/rEeTvt -h88t50qPcBa4bb25X0B5FO3TeK2LL3VKLuEp5lgdcHVonrcdqZFobN1CgGJua8TW -SprIkh+8ATZ/FXQTi01NzLhHXT1IQzSpFaZw0gb2f5ruXwvTPpfXzQrs2omY+7s7 -fkCwGPesvpSXPKn9v8uhUwD7NGW/Dm+jUM+QtC/FqzX7+/Q+OuEPjClUh1cqopCZ -EvAI3HjnavGrYuU6DgQdjyGT/UDbuwbCXqHxHojVVkISGzCTGpmBcQYQqhcFRedJ -yJlu6PSXlA7+8Ajh52oiMJ3ez4xSssFgUQAyOB16432tm4erpGmCyakkoRmMUn3p -wx+QIppxRlsHznhcCQKR3tcblUqH3vq5i4/ZAihusMCa0YrShtxfdSb13oKX+pFr -aZXvxyZlCa5qoQQBV1sowmPL1N2j3dR9TVpdTyCFQSv4KeiExmowtLIjeCppRBEK -eeYHJnlfkyKXPhxTVVO6H+dU4nVu0ASQZ07KiQjbI+zTpPKFLPp3/0sPRJM57r1+ -aTS71iR7nZNZ1f8LZV2OvGE6fJVtgJ1J4Nu02K54uuIhU3tg1+7Xt+IqwRc9rbVr -pHH/hFCYBPW2D2dxB+k2pQlg5NI+TpsXj5Zun8kRw5RtVb+dLuiH/xmxArIee8Jq -ZF5q4h4I33PSGDdSvGXn9UMY5Isjpg== -=7pIB ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go deleted file mode 100644 index 3dd6963534..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go +++ /dev/null @@ -1,62 +0,0 @@ -package tfinstall - -import ( - "context" - "fmt" - "os/exec" - "strings" -) - -const baseURL = "https://releases.hashicorp.com/terraform" - -type ExecPathFinder interface { - ExecPath(context.Context) (string, error) -} - -func Find(ctx context.Context, opts ...ExecPathFinder) (string, error) { - var terraformPath string - - // go through the options in order - // until a valid terraform executable is found - for _, opt := range opts { - p, err := opt.ExecPath(ctx) - if err != nil { - return "", fmt.Errorf("unexpected error: %s", err) - } - - if p == "" { - // strategy did not locate an executable - fall through to next - continue - } else { - terraformPath = p - break - } - } - - if terraformPath == "" { - return "", fmt.Errorf("could not find terraform executable") - } - - err := runTerraformVersion(terraformPath) - if err != nil { - return "", fmt.Errorf("executable found at path %s is not terraform: %s", terraformPath, err) - } - - return terraformPath, nil -} - -func runTerraformVersion(execPath string) error { - cmd := exec.Command(execPath, "version") - - out, err := cmd.Output() - if err != nil { - return err - } - - // very basic sanity check - if !strings.Contains(string(out), "Terraform v") { - return fmt.Errorf("located executable at %s, but output of `terraform version` was:\n%s", execPath, out) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-json/.copywrite.hcl b/vendor/github.com/hashicorp/terraform-json/.copywrite.hcl new file mode 100644 index 0000000000..ada7d74aa7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/.copywrite.hcl @@ -0,0 +1,13 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2019 + + # (OPTIONAL) A list of globs that should not have copyright/license headers. + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + header_ignore = [ + "testdata/**", + ] +} diff --git a/vendor/github.com/hashicorp/terraform-json/.go-version b/vendor/github.com/hashicorp/terraform-json/.go-version new file mode 100644 index 0000000000..5fb5a6b4f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/.go-version @@ -0,0 +1 @@ +1.20 diff --git a/vendor/github.com/hashicorp/terraform-json/CODEOWNERS b/vendor/github.com/hashicorp/terraform-json/CODEOWNERS new file mode 100644 index 0000000000..a99f162a51 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/CODEOWNERS @@ -0,0 +1,2 @@ +# This codebase has shared ownership and responsibility. +* @hashicorp/terraform-core @hashicorp/terraform-devex @hashicorp/tf-editor-experience-engineers diff --git a/vendor/github.com/hashicorp/terraform-json/LICENSE b/vendor/github.com/hashicorp/terraform-json/LICENSE index a612ad9813..3b97eaf3c3 100644 --- a/vendor/github.com/hashicorp/terraform-json/LICENSE +++ b/vendor/github.com/hashicorp/terraform-json/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2019 HashiCorp, Inc. + Mozilla Public License Version 2.0 ================================== diff --git a/vendor/github.com/hashicorp/terraform-json/README.md b/vendor/github.com/hashicorp/terraform-json/README.md index fea0ba2609..462c1a819d 100644 --- a/vendor/github.com/hashicorp/terraform-json/README.md +++ b/vendor/github.com/hashicorp/terraform-json/README.md @@ -1,6 +1,5 @@ # terraform-json -[![CircleCI](https://circleci.com/gh/hashicorp/terraform-json/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/terraform-json/tree/main) [![GoDoc](https://godoc.org/github.com/hashicorp/terraform-json?status.svg)](https://godoc.org/github.com/hashicorp/terraform-json) This repository houses data types designed to help parse the data produced by @@ -16,7 +15,39 @@ This repository also serves as de facto documentation for the formats produced by these commands. For more details, see the [GoDoc](https://godoc.org/github.com/hashicorp/terraform-json). -## Why a Separate Repository? +## Should I use this library? + +This library was built for a few specific applications, and is not intended for +general purpose use. + +The Terraform core team **recommends against** using `terraform-json` if your +application has any of the following requirements: + +* **Forward-compatibility**: each version of this library represents a specific + snapshot of the [Terraform JSON output format](https://developer.hashicorp.com/terraform/internals/json-format), + and it often slightly lags behind Terraform itself. The library supports + [the 1.x compatibility promises](https://developer.hashicorp.com/terraform/language/v1-compatibility-promises) + but you will need to upgrade the version promptly to use new additions. If you + require full compatibility with future Terraform versions, we recommend + implementing your own custom decoders for the parts of the JSON format you need. +* **Writing JSON output**: the structures in this library are not guaranteed to emit + JSON data which is semantically equivalent to Terraform itself. If your application + must robustly write JSON data to be consumed by systems which expect Terraform's + format to be supported, you should implement your own custom encoders. +* **Filtering or round-tripping**: the Terraform JSON formats are designed to be + forwards compatible, and permit new attributes to be added which may safely be + ignored by earlier versions of consumers. This library **drops unknown attributes**, + which means it is unsuitable for any application which intends to filter data + or read-modify-write data which will be consumed downstream. Any application doing + this will silently drop new data from new versions. For this application, you should + implement a custom decoder and encoder which preserves any unknown attributes + through a round-trip. + +When is `terraform-json` suitable? We recommend using it for applications which +decode the core stable data types and use it directly, and don't attempt to emit +JSON to be consumed by applications which expect the Terraform format. + +## Why a separate repository? To reduce dependencies on any of Terraform core's internals, we've made a design decision to make any helpers or libraries that work with the external JSON data diff --git a/vendor/github.com/hashicorp/terraform-json/action.go b/vendor/github.com/hashicorp/terraform-json/action.go index 51c4c8369a..c74f7e68a3 100644 --- a/vendor/github.com/hashicorp/terraform-json/action.go +++ b/vendor/github.com/hashicorp/terraform-json/action.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson // Action is a valid action type for a resource change. diff --git a/vendor/github.com/hashicorp/terraform-json/checks.go b/vendor/github.com/hashicorp/terraform-json/checks.go new file mode 100644 index 0000000000..558cb290b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/checks.go @@ -0,0 +1,145 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfjson + +// CheckKind is a string representation of the type of conditional check +// referenced in a check result. +type CheckKind string + +const ( + // CheckKindResource indicates the check result is from a pre- or + // post-condition on a resource or data source. + CheckKindResource CheckKind = "resource" + + // CheckKindOutputValue indicates the check result is from an output + // post-condition. + CheckKindOutputValue CheckKind = "output_value" + + // CheckKindCheckBlock indicates the check result is from a check block. + CheckKindCheckBlock CheckKind = "check" +) + +// CheckStatus is a string representation of the status of a given conditional +// check. +type CheckStatus string + +const ( + // CheckStatusPass indicates the check passed. + CheckStatusPass CheckStatus = "pass" + + // CheckStatusFail indicates the check failed. + CheckStatusFail CheckStatus = "fail" + + // CheckStatusError indicates the check errored. This is distinct from + // CheckStatusFail in that it represents a logical or configuration error + // within the check block that prevented the check from executing, as + // opposed to the check was attempted and evaluated to false. + CheckStatusError CheckStatus = "error" + + // CheckStatusUnknown indicates the result of the check was not known. This + // could be because a value within the check could not be known at plan + // time, or because the overall plan failed for an unrelated reason before + // this check could be executed. + CheckStatusUnknown CheckStatus = "unknown" +) + +// CheckStaticAddress details the address of the object that performed a given +// check. The static address points to the overall resource, as opposed to the +// dynamic address which contains the instance key for any resource that has +// multiple instances. +type CheckStaticAddress struct { + // ToDisplay is a formatted and ready to display representation of the + // address. + ToDisplay string `json:"to_display"` + + // Kind represents the CheckKind of this check. + Kind CheckKind `json:"kind"` + + // Module is the module part of the address. This will be empty for any + // resources in the root module. + Module string `json:"module,omitempty"` + + // Mode is the ResourceMode of the resource that contains this check. This + // field is only set is Kind equals CheckKindResource. + Mode ResourceMode `json:"mode,omitempty"` + + // Type is the resource type for the resource that contains this check. This + // field is only set if Kind equals CheckKindResource. + Type string `json:"type,omitempty"` + + // Name is the name of the resource, check block, or output that contains + // this check. + Name string `json:"name,omitempty"` +} + +// CheckDynamicAddress contains the InstanceKey field for any resources that +// have multiple instances. A complete address can be built by combining the +// CheckStaticAddress with the CheckDynamicAddress. +type CheckDynamicAddress struct { + // ToDisplay is a formatted and ready to display representation of the + // full address, including the additional information from the relevant + // CheckStaticAddress. + ToDisplay string `json:"to_display"` + + // Module is the module part of the address. This address will include the + // instance key for any module expansions resulting from foreach or count + // arguments. This field will be empty for any resources within the root + // module. + Module string `json:"module,omitempty"` + + // InstanceKey is the instance key for any instances of a given resource. + // + // InstanceKey will be empty if there was no foreach or count argument + // defined on the containing object. + InstanceKey interface{} `json:"instance_key,omitempty"` +} + +// CheckResultStatic is the container for a "checkable object". +// +// A "checkable object" is a resource or data source, an output, or a check +// block. +type CheckResultStatic struct { + // Address is the absolute address of the "checkable object" + Address CheckStaticAddress `json:"address"` + + // Status is the overall status for all the checks within this object. + Status CheckStatus `json:"status"` + + // Instances contains the results for dynamic object that belongs to this + // static object. For example, any instances created from an object using + // the foreach or count meta arguments. + // + // Check blocks and outputs will only contain a single instance, while + // resources can contain 1 to many. + Instances []CheckResultDynamic `json:"instances,omitempty"` +} + +// CheckResultDynamic describes the check result for a dynamic object that +// results from the expansion of the containing object. +type CheckResultDynamic struct { + // Address is the relative address of this instance given the Address in the + // parent object. + Address CheckDynamicAddress `json:"address"` + + // Status is the overall status for the checks within this dynamic object. + Status CheckStatus `json:"status"` + + // Problems describes any additional optional details about this check if + // the check failed. + // + // This will not include the errors resulting from this check block, as they + // will be exposed as diagnostics in the original terraform execution. It + // may contain any failure messages even if the overall status is + // CheckStatusError, however, as the instance could contain multiple checks + // that returned a mix of error and failure statuses. + Problems []CheckResultProblem `json:"problems,omitempty"` +} + +// CheckResultProblem describes one of potentially several problems that led to +// a check being classied as CheckStatusFail. +type CheckResultProblem struct { + // Message is the condition error message provided by the original check + // author. + Message string `json:"message"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/config.go b/vendor/github.com/hashicorp/terraform-json/config.go index e093cfa8bf..e8ea638acc 100644 --- a/vendor/github.com/hashicorp/terraform-json/config.go +++ b/vendor/github.com/hashicorp/terraform-json/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson import ( @@ -48,6 +51,9 @@ type ProviderConfig struct { // The name of the provider, ie: "aws". Name string `json:"name,omitempty"` + // The fully-specified name of the provider, ie: "registry.terraform.io/hashicorp/aws". + FullName string `json:"full_name,omitempty"` + // The alias of the provider, ie: "us-east-1". Alias string `json:"alias,omitempty"` diff --git a/vendor/github.com/hashicorp/terraform-json/expression.go b/vendor/github.com/hashicorp/terraform-json/expression.go index 8a39face7b..5ecb15ce50 100644 --- a/vendor/github.com/hashicorp/terraform-json/expression.go +++ b/vendor/github.com/hashicorp/terraform-json/expression.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson import "encoding/json" diff --git a/vendor/github.com/hashicorp/terraform-json/metadata.go b/vendor/github.com/hashicorp/terraform-json/metadata.go new file mode 100644 index 0000000000..8ac111ad67 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/metadata.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" +) + +// MetadataFunctionsFormatVersionConstraints defines the versions of the JSON +// metadata functions format that are supported by this package. +var MetadataFunctionsFormatVersionConstraints = "~> 1.0" + +// MetadataFunctions is the top-level object returned when exporting function +// signatures +type MetadataFunctions struct { + // The version of the format. This should always match the + // MetadataFunctionsFormatVersionConstraints in this package, else + // unmarshaling will fail. + FormatVersion string `json:"format_version"` + + // The signatures of the functions available in a Terraform version. + Signatures map[string]*FunctionSignature `json:"function_signatures,omitempty"` +} + +// Validate checks to ensure that MetadataFunctions is present, and the +// version matches the version supported by this library. +func (f *MetadataFunctions) Validate() error { + if f == nil { + return errors.New("metadata functions data is nil") + } + + if f.FormatVersion == "" { + return errors.New("unexpected metadata functions data, format version is missing") + } + + constraint, err := version.NewConstraint(MetadataFunctionsFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(f.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", f.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported metadata functions format version: %q does not satisfy %q", + version, constraint) + } + + return nil +} + +func (f *MetadataFunctions) UnmarshalJSON(b []byte) error { + type rawFunctions MetadataFunctions + var functions rawFunctions + + err := json.Unmarshal(b, &functions) + if err != nil { + return err + } + + *f = *(*MetadataFunctions)(&functions) + + return f.Validate() +} + +// FunctionSignature represents a function signature. +type FunctionSignature struct { + // Description is an optional human-readable description + // of the function + Description string `json:"description,omitempty"` + + // Summary is an optional shortened description of the function + Summary string `json:"summary,omitempty"` + + // DeprecationMessage is an optional message that indicates that the + // function should be considered deprecated and what actions should be + // performed by the practitioner to handle the deprecation. + DeprecationMessage string `json:"deprecation_message,omitempty"` + + // ReturnType is the ctyjson representation of the function's + // return types based on supplying all parameters using + // dynamic types. Functions can have dynamic return types. + ReturnType cty.Type `json:"return_type"` + + // Parameters describes the function's fixed positional parameters. + Parameters []*FunctionParameter `json:"parameters,omitempty"` + + // VariadicParameter describes the function's variadic + // parameter if it is supported. + VariadicParameter *FunctionParameter `json:"variadic_parameter,omitempty"` +} + +// FunctionParameter represents a parameter to a function. +type FunctionParameter struct { + // Name is an optional name for the argument. + Name string `json:"name,omitempty"` + + // Description is an optional human-readable description + // of the argument + Description string `json:"description,omitempty"` + + // IsNullable is true if null is acceptable value for the argument + IsNullable bool `json:"is_nullable,omitempty"` + + // A type that any argument for this parameter must conform to. + Type cty.Type `json:"type"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go index 7e44c5c420..38ea778e1c 100644 --- a/vendor/github.com/hashicorp/terraform-json/plan.go +++ b/vendor/github.com/hashicorp/terraform-json/plan.go @@ -1,14 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson import ( + "bytes" "encoding/json" "errors" "fmt" + + "github.com/hashicorp/go-version" ) -// PlanFormatVersion is the version of the JSON plan format that is -// supported by this package. -const PlanFormatVersion = "0.1" +// PlanFormatVersionConstraints defines the versions of the JSON plan format +// that are supported by this package. +var PlanFormatVersionConstraints = ">= 0.1, < 2.0" // ResourceMode is a string representation of the resource type found // in certain fields in the plan. @@ -24,6 +30,12 @@ const ( // Plan represents the entire contents of an output Terraform plan. type Plan struct { + // useJSONNumber opts into the behavior of calling + // json.Decoder.UseNumber prior to decoding the plan, which turns + // numbers into json.Numbers instead of float64s. Set it using + // Plan.UseJSONNumber. + useJSONNumber bool + // The version of the plan format. This should always match the // PlanFormatVersion constant in this package, or else an unmarshal // will be unstable. @@ -40,6 +52,10 @@ type Plan struct { // this plan. PlannedValues *StateValues `json:"planned_values,omitempty"` + // The change operations for resources and data sources within this plan + // resulting from resource drift. + ResourceDrift []*ResourceChange `json:"resource_drift,omitempty"` + // The change operations for resources and data sources within this // plan. ResourceChanges []*ResourceChange `json:"resource_changes,omitempty"` @@ -53,6 +69,35 @@ type Plan struct { // The Terraform configuration used to make the plan. Config *Config `json:"configuration,omitempty"` + + // RelevantAttributes represents any resource instances and their + // attributes which may have contributed to the planned changes + RelevantAttributes []ResourceAttribute `json:"relevant_attributes,omitempty"` + + // Checks contains the results of any conditional checks executed, or + // planned to be executed, during this plan. + Checks []CheckResultStatic `json:"checks,omitempty"` + + // Timestamp contains the static timestamp that Terraform considers to be + // the time this plan executed, in UTC. + Timestamp string `json:"timestamp,omitempty"` +} + +// ResourceAttribute describes a full path to a resource attribute +type ResourceAttribute struct { + // Resource describes resource instance address (e.g. null_resource.foo) + Resource string `json:"resource"` + // Attribute describes the attribute path using a lossy representation + // of cty.Path. (e.g. ["id"] or ["objects", 0, "val"]). + Attribute []json.RawMessage `json:"attribute"` +} + +// UseJSONNumber controls whether the Plan will be decoded using the +// json.Number behavior or the float64 behavior. When b is true, the Plan will +// represent numbers in PlanOutputs as json.Numbers. When b is false, the +// Plan will represent numbers in PlanOutputs as float64s. +func (p *Plan) UseJSONNumber(b bool) { + p.useJSONNumber = b } // Validate checks to ensure that the plan is present, and the @@ -66,18 +111,42 @@ func (p *Plan) Validate() error { return errors.New("unexpected plan input, format version is missing") } - if PlanFormatVersion != p.FormatVersion { - return fmt.Errorf("unsupported plan format version: expected %q, got %q", PlanFormatVersion, p.FormatVersion) + constraint, err := version.NewConstraint(PlanFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(p.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", p.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported plan format version: %q does not satisfy %q", + version, constraint) } return nil } +func isStringInSlice(slice []string, s string) bool { + for _, el := range slice { + if el == s { + return true + } + } + return false +} + func (p *Plan) UnmarshalJSON(b []byte) error { type rawPlan Plan var plan rawPlan - err := json.Unmarshal(b, &plan) + dec := json.NewDecoder(bytes.NewReader(b)) + if p.useJSONNumber { + dec.UseNumber() + } + err := dec.Decode(&plan) if err != nil { return err } @@ -94,6 +163,10 @@ type ResourceChange struct { // The absolute resource address. Address string `json:"address,omitempty"` + // The absolute address that this resource instance had + // at the conclusion of a previous plan. + PreviousAddress string `json:"previous_address,omitempty"` + // The module portion of the above address. Omitted if the instance // is in the root module. ModuleAddress string `json:"module_address,omitempty"` @@ -158,6 +231,37 @@ type Change struct { // display of sensitive values in user interfaces. BeforeSensitive interface{} `json:"before_sensitive,omitempty"` AfterSensitive interface{} `json:"after_sensitive,omitempty"` + + // Importing contains the import metadata about this operation. If importing + // is present (ie. not null) then the change is an import operation in + // addition to anything mentioned in the actions field. The actual contents + // of the Importing struct is subject to change, so downstream consumers + // should treat any values in here as strictly optional. + Importing *Importing `json:"importing,omitempty"` + + // GeneratedConfig contains any HCL config generated for this resource + // during planning as a string. + // + // If this is populated, then Importing should also be populated but this + // might change in the future. However, not all Importing changes will + // contain generated config. + GeneratedConfig string `json:"generated_config,omitempty"` + + // ReplacePaths contains a set of paths that point to attributes/elements + // that are causing the overall resource to be replaced rather than simply + // updated. + // + // This field is always a slice of indexes, where an index in this context + // is either an integer pointing to a child of a set/list, or a string + // pointing to the child of a map, object, or block. + ReplacePaths []interface{} `json:"replace_paths,omitempty"` +} + +// Importing is a nested object for the resource import metadata. +type Importing struct { + // The original ID of this resource used to target it as part of planned + // import operation. + ID string `json:"id,omitempty"` } // PlanVariable is a top-level variable in the Terraform plan. diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go index 494c359f27..a2918ef480 100644 --- a/vendor/github.com/hashicorp/terraform-json/schemas.go +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson import ( @@ -5,18 +8,19 @@ import ( "errors" "fmt" + "github.com/hashicorp/go-version" "github.com/zclconf/go-cty/cty" ) -// ProviderSchemasFormatVersion is the version of the JSON provider -// schema format that is supported by this package. -const ProviderSchemasFormatVersion = "0.2" +// ProviderSchemasFormatVersionConstraints defines the versions of the JSON +// provider schema format that are supported by this package. +var ProviderSchemasFormatVersionConstraints = ">= 0.1, < 2.0" // ProviderSchemas represents the schemas of all providers and // resources in use by the configuration. type ProviderSchemas struct { - // The version of the plan format. This should always match the - // ProviderSchemasFormatVersion constant in this package, or else + // The version of the plan format. This should always match one of + // ProviderSchemasFormatVersions in this package, or else // an unmarshal will be unstable. FormatVersion string `json:"format_version,omitempty"` @@ -38,10 +42,19 @@ func (p *ProviderSchemas) Validate() error { return errors.New("unexpected provider schema data, format version is missing") } - oldVersion := "0.1" - if p.FormatVersion != ProviderSchemasFormatVersion && p.FormatVersion != oldVersion { - return fmt.Errorf("unsupported provider schema data format version: expected %q or %q, got %q", - PlanFormatVersion, oldVersion, p.FormatVersion) + constraint, err := version.NewConstraint(ProviderSchemasFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(p.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", p.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported provider schema format version: %q does not satisfy %q", + version, constraint) } return nil @@ -73,6 +86,9 @@ type ProviderSchema struct { // The schemas for any data sources in this provider. DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"` + + // The definitions for any functions in this provider. + Functions map[string]*FunctionSignature `json:"functions,omitempty"` } // Schema is the JSON representation of a particular schema @@ -213,6 +229,43 @@ type SchemaAttribute struct { Sensitive bool `json:"sensitive,omitempty"` } +// jsonSchemaAttribute describes an attribute within a schema block +// in a middle-step internal representation before marshalled into +// a more useful SchemaAttribute with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. +type jsonSchemaAttribute struct { + AttributeType json.RawMessage `json:"type,omitempty"` + AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"` + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Required bool `json:"required,omitempty"` + Optional bool `json:"optional,omitempty"` + Computed bool `json:"computed,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +func (as *SchemaAttribute) MarshalJSON() ([]byte, error) { + jsonSa := &jsonSchemaAttribute{ + AttributeNestedType: as.AttributeNestedType, + Description: as.Description, + DescriptionKind: as.DescriptionKind, + Deprecated: as.Deprecated, + Required: as.Required, + Optional: as.Optional, + Computed: as.Computed, + Sensitive: as.Sensitive, + } + if as.AttributeType != cty.NilType { + attrTy, _ := as.AttributeType.MarshalJSON() + jsonSa.AttributeType = attrTy + } + return json.Marshal(jsonSa) +} + // SchemaNestedAttributeType describes a nested attribute // which could also be just expressed simply as cty.Object(...), // cty.List(cty.Object(...)) etc. but this allows tracking additional diff --git a/vendor/github.com/hashicorp/terraform-json/state.go b/vendor/github.com/hashicorp/terraform-json/state.go index e1a9149c15..e5336329b8 100644 --- a/vendor/github.com/hashicorp/terraform-json/state.go +++ b/vendor/github.com/hashicorp/terraform-json/state.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson import ( @@ -5,11 +8,14 @@ import ( "encoding/json" "errors" "fmt" + + "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" ) -// StateFormatVersion is the version of the JSON state format that is -// supported by this package. -const StateFormatVersion = "0.1" +// StateFormatVersionConstraints defines the versions of the JSON state format +// that are supported by this package. +var StateFormatVersionConstraints = ">= 0.1, < 2.0" // State is the top-level representation of a Terraform state. type State struct { @@ -29,6 +35,10 @@ type State struct { // The values that make up the state. Values *StateValues `json:"values,omitempty"` + + // Checks contains the results of any conditional checks when Values was + // last updated. + Checks []CheckResultStatic `json:"checks,omitempty"` } // UseJSONNumber controls whether the State will be decoded using the @@ -50,8 +60,19 @@ func (s *State) Validate() error { return errors.New("unexpected state input, format version is missing") } - if StateFormatVersion != s.FormatVersion { - return fmt.Errorf("unsupported state format version: expected %q, got %q", StateFormatVersion, s.FormatVersion) + constraint, err := version.NewConstraint(StateFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(s.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", s.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported state format version: %q does not satisfy %q", + version, constraint) } return nil @@ -127,8 +148,8 @@ type StateResource struct { // provider offering "google_compute_instance". ProviderName string `json:"provider_name,omitempty"` - // The version of the resource type schema the "values" property - // conforms to. + // The version of the resource type schema the "values" property + // conforms to. SchemaVersion uint64 `json:"schema_version,"` // The JSON representation of the attribute values of the resource, @@ -137,6 +158,11 @@ type StateResource struct { // from absent values. AttributeValues map[string]interface{} `json:"values,omitempty"` + // The JSON representation of the sensitivity of the resource's + // attribute values. Only attributes which are sensitive + // are included in this structure. + SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` + // The addresses of the resources that this resource depends on. DependsOn []string `json:"depends_on,omitempty"` @@ -157,4 +183,31 @@ type StateOutput struct { // The value of the output. Value interface{} `json:"value,omitempty"` + + // The type of the output. + Type cty.Type `json:"type,omitempty"` +} + +// jsonStateOutput describes an output value in a middle-step internal +// representation before marshalled into a more useful StateOutput with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. +type jsonStateOutput struct { + Sensitive bool `json:"sensitive"` + Value interface{} `json:"value,omitempty"` + Type json.RawMessage `json:"type,omitempty"` +} + +func (so *StateOutput) MarshalJSON() ([]byte, error) { + jsonSa := &jsonStateOutput{ + Sensitive: so.Sensitive, + Value: so.Value, + } + if so.Type != cty.NilType { + outputType, _ := so.Type.MarshalJSON() + jsonSa.Type = outputType + } + return json.Marshal(jsonSa) } diff --git a/vendor/github.com/hashicorp/terraform-json/tfjson.go b/vendor/github.com/hashicorp/terraform-json/tfjson.go index 55f9ac4449..3a78a4f0fe 100644 --- a/vendor/github.com/hashicorp/terraform-json/tfjson.go +++ b/vendor/github.com/hashicorp/terraform-json/tfjson.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package tfjson is a de-coupled helper library containing types for // the plan format output by "terraform show -json" command. This // command is designed for the export of Terraform plan data in diff --git a/vendor/github.com/hashicorp/terraform-json/validate.go b/vendor/github.com/hashicorp/terraform-json/validate.go index db9db1919c..53652eff30 100644 --- a/vendor/github.com/hashicorp/terraform-json/validate.go +++ b/vendor/github.com/hashicorp/terraform-json/validate.go @@ -1,11 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson import ( "encoding/json" "errors" "fmt" + + "github.com/hashicorp/go-version" ) +// ValidateFormatVersionConstraints defines the versions of the JSON +// validate format that are supported by this package. +var ValidateFormatVersionConstraints = ">= 0.1, < 2.0" + // Pos represents a position in a config file type Pos struct { Line int `json:"line"` @@ -110,10 +119,19 @@ func (vo *ValidateOutput) Validate() error { return nil } - supportedVersion := "0.1" - if vo.FormatVersion != supportedVersion { - return fmt.Errorf("unsupported validation output format version: expected %q, got %q", - supportedVersion, vo.FormatVersion) + constraint, err := version.NewConstraint(ValidateFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(vo.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", vo.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported validation output format version: %q does not satisfy %q", + version, constraint) } return nil diff --git a/vendor/github.com/hashicorp/terraform-json/version.go b/vendor/github.com/hashicorp/terraform-json/version.go index 16f0a853e3..7516ad6dd4 100644 --- a/vendor/github.com/hashicorp/terraform-json/version.go +++ b/vendor/github.com/hashicorp/terraform-json/version.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tfjson // VersionOutput represents output from the version -json command diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE new file mode 100644 index 0000000000..e5ead304d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2020 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go new file mode 100644 index 0000000000..d99e19796c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +// DataSourceContext injects the data source type into logger contexts. +func DataSourceContext(ctx context.Context, dataSource string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyDataSourceType, dataSource) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyDataSourceType, dataSource) + ctx = tflog.SetField(ctx, KeyDataSourceType, dataSource) + + return ctx +} + +// InitContext creates SDK and provider logger contexts. +func InitContext(ctx context.Context, sdkOpts tfsdklog.Options, providerOpts tflog.Options) context.Context { + ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{ + tfsdklog.WithLevelFromEnv(EnvTfLogSdk), + }, sdkOpts...)...) + ctx = ProtoSubsystemContext(ctx, sdkOpts) + ctx = tfsdklog.NewRootProviderLogger(ctx, providerOpts...) + + return ctx +} + +// ProtoSubsystemContext adds the proto subsystem to the SDK logger context. +func ProtoSubsystemContext(ctx context.Context, sdkOpts tfsdklog.Options) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemProto, append(tfsdklog.Options{ + // All calls are through the Protocol* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkProto), + }, sdkOpts...)...) + + return ctx +} + +// ProtocolVersionContext injects the protocol version into logger contexts. +func ProtocolVersionContext(ctx context.Context, protocolVersion string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyProtocolVersion, protocolVersion) + + return ctx +} + +// ProviderAddressContext injects the provider address into logger contexts. +func ProviderAddressContext(ctx context.Context, providerAddress string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyProviderAddress, providerAddress) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyProviderAddress, providerAddress) + ctx = tflog.SetField(ctx, KeyProviderAddress, providerAddress) + + return ctx +} + +// RequestIdContext injects a unique request ID into logger contexts. +func RequestIdContext(ctx context.Context) context.Context { + reqID, err := uuid.GenerateUUID() + + if err != nil { + reqID = "unable to assign request ID: " + err.Error() + } + + ctx = tfsdklog.SetField(ctx, KeyRequestID, reqID) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyRequestID, reqID) + ctx = tflog.SetField(ctx, KeyRequestID, reqID) + + return ctx +} + +// ResourceContext injects the resource type into logger contexts. +func ResourceContext(ctx context.Context, resource string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyResourceType, resource) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyResourceType, resource) + ctx = tflog.SetField(ctx, KeyResourceType, resource) + + return ctx +} + +// RpcContext injects the RPC name into logger contexts. +func RpcContext(ctx context.Context, rpc string) context.Context { + ctx = tfsdklog.SetField(ctx, KeyRPC, rpc) + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyRPC, rpc) + ctx = tflog.SetField(ctx, KeyRPC, rpc) + + return ctx +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go new file mode 100644 index 0000000000..1d29f515ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package logging contains shared environment variable and log functionality. +package logging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go new file mode 100644 index 0000000000..c203345769 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +// Environment variables. +const ( + // EnvTfLogProvider is the prefix of the environment variable that sets the + // logging level of the root provider logger for the provider being served. + // The suffix is an underscore and the parsed provider name. For example, + // registry.terraform.io/hashicorp/example becomes TF_LOG_PROVIDER_EXAMPLE. + EnvTfLogProvider = "TF_LOG_PROVIDER" + + // EnvTfLogSdk is an environment variable that sets the root logging level + // of SDK loggers. + EnvTfLogSdk = "TF_LOG_SDK" + + // EnvTfLogSdkProto is an environment variable that sets the logging level + // of SDK protocol loggers. Infers root SDK logging level, if unset. + EnvTfLogSdkProto = "TF_LOG_SDK_PROTO" + + // EnvTfLogSdkProtoDataDir is an environment variable that sets the + // directory to write raw protocol data files for debugging purposes. + EnvTfLogSdkProtoDataDir = "TF_LOG_SDK_PROTO_DATA_DIR" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go new file mode 100644 index 0000000000..7ad9127148 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +// Global logging keys attached to all requests. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +const ( + // Attribute of the diagnostic being logged. + KeyDiagnosticAttribute = "diagnostic_attribute" + + // Number of the error diagnostics. + KeyDiagnosticErrorCount = "diagnostic_error_count" + + // Severity of the diagnostic being logged. + KeyDiagnosticSeverity = "diagnostic_severity" + + // Detail of the diagnostic being logged. + KeyDiagnosticDetail = "diagnostic_detail" + + // Summary of the diagnostic being logged. + KeyDiagnosticSummary = "diagnostic_summary" + + // Number of the warning diagnostics. + KeyDiagnosticWarningCount = "diagnostic_warning_count" + + // Underlying error string + KeyError = "error" + + // Argument position of the function error. + KeyFunctionErrorArgument = "function_error_argument" + + // Boolean indicating presence of function error + KeyFunctionErrorExists = "function_error_exists" + + // Message of the function error. + KeyFunctionErrorText = "function_error_text" + + // Duration in milliseconds for the RPC request + KeyRequestDurationMs = "tf_req_duration_ms" + + // A unique ID for the RPC request + KeyRequestID = "tf_req_id" + + // The full address of the provider, such as + // registry.terraform.io/hashicorp/random + KeyProviderAddress = "tf_provider_addr" + + // The RPC being run, such as "ApplyResourceChange" + KeyRPC = "tf_rpc" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Path to protocol data file, such as "/tmp/example.json" + KeyProtocolDataFile = "tf_proto_data_file" + + // The protocol version being used, as a string, such as "6" + KeyProtocolVersion = "tf_proto_version" + + // Whether the GetProviderSchemaOptional server capability is enabled + KeyServerCapabilityGetProviderSchemaOptional = "tf_server_capability_get_provider_schema_optional" + + // Whether the PlanDestroy server capability is enabled + KeyServerCapabilityPlanDestroy = "tf_server_capability_plan_destroy" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go new file mode 100644 index 0000000000..a1d49eae12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemProto is the tfsdklog subsystem name for protocol logging. + SubsystemProto = "proto" +) + +// ProtocolError emits a protocol subsystem log at ERROR level. +func ProtocolError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemProto, msg, additionalFields...) +} + +// ProtocolWarn emits a protocol subsystem log at WARN level. +func ProtocolWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemProto, msg, additionalFields...) +} + +// ProtocolTrace emits a protocol subsystem log at TRACE level. +func ProtocolTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemProto, msg, additionalFields...) +} + +// ProtocolSetField returns a context with the additional protocol subsystem +// field set. +func ProtocolSetField(ctx context.Context, key string, value any) context.Context { + return tfsdklog.SubsystemSetField(ctx, SubsystemProto, key, value) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go new file mode 100644 index 0000000000..e96188f59c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + "fmt" + "os" + "path" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +const ( + // fileExtEmpty is the file extension for empty data. + // Empty data may be expected, depending on the RPC. + fileExtEmpty = "empty" + + // fileExtJson is the file extension for JSON data. + fileExtJson = "json" + + // fileExtMsgpack is the file extension for MessagePack data. + fileExtMsgpack = "msgpack" +) + +var protocolDataSkippedLog sync.Once + +// ProtocolData emits raw protocol data to a file, if given a directory. +// +// The directory must exist and be writable, prior to invoking this function. +// +// File names are in the format: {TIME}_{RPC}_{MESSAGE}_{FIELD}.{EXT} +func ProtocolData(ctx context.Context, dataDir string, rpc string, message string, field string, data interface{}) { + if dataDir == "" { + // Write a log, only once, that explains how to enable this functionality. + protocolDataSkippedLog.Do(func() { + ProtocolTrace(ctx, "Skipping protocol data file writing because no data directory is set. "+ + fmt.Sprintf("Use the %s environment variable to enable this functionality.", EnvTfLogSdkProtoDataDir)) + }) + + return + } + + var fileContents []byte + var fileExtension string + + switch data := data.(type) { + case *tfprotov5.DynamicValue: + fileExtension, fileContents = protocolDataDynamicValue5(ctx, data) + case *tfprotov6.DynamicValue: + fileExtension, fileContents = protocolDataDynamicValue6(ctx, data) + default: + ProtocolError(ctx, fmt.Sprintf("Skipping unknown protocol data type: %T", data)) + return + } + + writeProtocolFile(ctx, dataDir, rpc, message, field, fileExtension, fileContents) +} + +// ProtocolPrivateData emits raw protocol private data to a file, if given a +// directory. This data is "private" in the sense that it is provider-owned, +// rather than something managed by Terraform. +// +// The directory must exist and be writable, prior to invoking this function. +// +// File names are in the format: {TIME}_{RPC}_{MESSAGE}_{FIELD}(.empty) +func ProtocolPrivateData(ctx context.Context, dataDir string, rpc string, message string, field string, data []byte) { + if dataDir == "" { + // Write a log, only once, that explains how to enable this functionality. + protocolDataSkippedLog.Do(func() { + ProtocolTrace(ctx, "Skipping protocol data file writing because no data directory is set. "+ + fmt.Sprintf("Use the %s environment variable to enable this functionality.", EnvTfLogSdkProtoDataDir)) + }) + + return + } + + var fileExtension string + + if len(data) == 0 { + fileExtension = fileExtEmpty + } + + writeProtocolFile(ctx, dataDir, rpc, message, field, fileExtension, data) +} + +func protocolDataDynamicValue5(_ context.Context, value *tfprotov5.DynamicValue) (string, []byte) { + if value == nil { + return fileExtEmpty, nil + } + + // (tfprotov5.DynamicValue).Unmarshal() prefers JSON first, so prefer to + // output JSON if found. + if len(value.JSON) > 0 { + return fileExtJson, value.JSON + } + + if len(value.MsgPack) > 0 { + return fileExtMsgpack, value.MsgPack + } + + return fileExtEmpty, nil +} + +func protocolDataDynamicValue6(_ context.Context, value *tfprotov6.DynamicValue) (string, []byte) { + if value == nil { + return fileExtEmpty, nil + } + + // (tfprotov6.DynamicValue).Unmarshal() prefers JSON first, so prefer to + // output JSON if found. + if len(value.JSON) > 0 { + return fileExtJson, value.JSON + } + + if len(value.MsgPack) > 0 { + return fileExtMsgpack, value.MsgPack + } + + return fileExtEmpty, nil +} + +func writeProtocolFile(ctx context.Context, dataDir string, rpc string, message string, field string, fileExtension string, fileContents []byte) { + fileName := fmt.Sprintf("%d_%s_%s_%s", time.Now().UnixMilli(), rpc, message, field) + + if fileExtension != "" { + fileName += "." + fileExtension + } + + filePath := path.Join(dataDir, fileName) + ctx = ProtocolSetField(ctx, KeyProtocolDataFile, filePath) + + ProtocolTrace(ctx, "Writing protocol data file") + + err := os.WriteFile(filePath, fileContents, 0644) + + if err != nil { + ProtocolError(ctx, "Unable to write protocol data file", map[string]any{KeyError: err.Error()}) + return + } + + ProtocolTrace(ctx, "Wrote protocol data file") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go new file mode 100644 index 0000000000..1ece6992d0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "log" + "strings" + + tfaddr "github.com/hashicorp/terraform-registry-address" +) + +func ProviderLoggerName(providerAddress string) string { + provider, err := tfaddr.ParseProviderSource(providerAddress) + if err != nil { + log.Printf("[ERROR] Error parsing provider name %q: %s", providerAddress, err) + return "" + } + + return strings.ReplaceAll(provider.Type, "-", "_") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go new file mode 100644 index 0000000000..f76df34175 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "context" +) + +// DataSourceMetadata describes metadata for a data resource in the GetMetadata +// RPC. +type DataSourceMetadata struct { + // TypeName is the name of the data resource. + TypeName string +} + +// DataSourceServer is an interface containing the methods a data source +// implementation needs to fill. +type DataSourceServer interface { + // ValidateDataSourceConfig is called when Terraform is checking that a + // data source's configuration is valid. It is guaranteed to have types + // conforming to your schema, but it is not guaranteed that all values + // will be known. This is your opportunity to do custom or advanced + // validation prior to a plan being generated. + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfigRequest) (*ValidateDataSourceConfigResponse, error) + + // ReadDataSource is called when Terraform is refreshing a data + // source's state. + ReadDataSource(context.Context, *ReadDataSourceRequest) (*ReadDataSourceResponse, error) +} + +// ValidateDataSourceConfigRequest is the request Terraform sends when it wants +// to validate a data source's configuration. +type ValidateDataSourceConfigRequest struct { + // TypeName is the type of data source Terraform is validating. + TypeName string + + // Config is the configuration the user supplied for that data source. + // See the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config *DynamicValue +} + +// ValidateDataSourceConfigResponse is the response from the provider about the +// validity of a data source's configuration. +type ValidateDataSourceConfigResponse struct { + // Diagnostics report errors or warnings related to the given + // configuration. Returning an empty slice indicates a successful + // validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ReadDataSourceRequest is the request Terraform sends when it wants to get +// the latest state for a data source. +type ReadDataSourceRequest struct { + // TypeName is the type of data source Terraform is requesting an + // updated state for. + TypeName string + + // Config is the configuration the user supplied for that data source. + // See the documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may have unknown values. + Config *DynamicValue + + // ProviderMeta supplies the provider metadata configuration for the + // module this data source is in. Module-specific provider metadata is + // an advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ReadDataSourceResponse is the response from the provider about the current +// state of the requested data source. +type ReadDataSourceResponse struct { + // State is the current state of the data source, represented as a + // `DynamicValue`. See the documentation on `DynamicValue` for + // information about safely creating the `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + State *DynamicValue + + // Diagnostics report errors or warnings related to retrieving the + // current state of the requested data source. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go new file mode 100644 index 0000000000..15ab6a4ab1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import "github.com/hashicorp/terraform-plugin-go/tftypes" + +const ( + // DiagnosticSeverityInvalid is used to indicate an invalid + // `DiagnosticSeverity`. Provider developers should not use it. + DiagnosticSeverityInvalid DiagnosticSeverity = 0 + + // DiagnosticSeverityError is used to indicate that a `Diagnostic` + // represents an error and should halt Terraform execution. + DiagnosticSeverityError DiagnosticSeverity = 1 + + // DiagnosticSeverityWarning is used to indicate that a `Diagnostic` + // represents a warning and should not halt Terraform's execution, but + // it should be surfaced to the user. + DiagnosticSeverityWarning DiagnosticSeverity = 2 +) + +// Diagnostic is used to convey information back the user running Terraform. +type Diagnostic struct { + // Severity indicates how Terraform should handle the Diagnostic. + Severity DiagnosticSeverity + + // Summary is a brief description of the problem, roughly + // sentence-sized, and should provide a concise description of what + // went wrong. For example, a Summary could be as simple as "Invalid + // value.". + Summary string + + // Detail is a lengthier, more complete description of the problem. + // Detail should provide enough information that a user can resolve the + // problem entirely. For example, a Detail could be "Values must be + // alphanumeric and lowercase only." + Detail string + + // Attribute indicates which field, specifically, has the problem. Not + // setting this will indicate the entire resource; setting it will + // indicate that the problem is with a certain field in the resource, + // which helps users find the source of the problem. + Attribute *tftypes.AttributePath +} + +// DiagnosticSeverity represents different classes of Diagnostic which affect +// how Terraform handles the Diagnostics. +type DiagnosticSeverity int32 + +func (d DiagnosticSeverity) String() string { + switch d { + case 0: + return "INVALID" + case 1: + return "ERROR" + case 2: + return "WARNING" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go new file mode 100644 index 0000000000..2d35c9251f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfprotov5 provides the interfaces and types needed to build a +// Terraform provider server. +// +// All Terraform provider servers should be built on +// these types, to take advantage of the ecosystem and tooling built around +// them. +// +// These types are small wrappers around the Terraform protocol. It is assumed +// that developers using tfprotov5 are familiar with the protocol, its +// requirements, and its semantics. Developers not comfortable working with the +// raw protocol should use the github.com/hashicorp/terraform-plugin-sdk/v2 Go +// module instead, which offers a less verbose, safer way to develop a +// Terraform provider, albeit with less flexibility and power. +// +// Provider developers should start by defining a type that implements the +// `ProviderServer` interface. A struct is recommended, as it will allow you to +// store the configuration information attached to your provider for use in +// requests, but any type is technically possible. +// +// `ProviderServer` implementations will need to implement the composed +// interfaces, `ResourceServer` and `DataSourceServer`. It is recommended, but +// not required, to use an embedded `ResourceRouter` and `DataSourceRouter` in +// your `ProviderServer` to achieve this, which will let you handle requests +// for each resource and data source in a resource-specific or data +// source-specific function. +// +// To serve the `ProviderServer` implementation as a gRPC server that Terraform +// can connect to, use the `tfprotov5/server.Serve` function. +package tfprotov5 diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go new file mode 100644 index 0000000000..d21e496611 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + msgpack "github.com/vmihailenco/msgpack/v5" + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +// ErrUnknownDynamicValueType is returned when a DynamicValue has no MsgPack or +// JSON bytes set. This should never be returned during the normal operation of +// a provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `DynamicValue` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownDynamicValueType = errors.New("DynamicValue had no JSON or msgpack data set") + +// NewDynamicValue creates a DynamicValue from a tftypes.Value. You must +// specify the tftype.Type you want to send the value as, and it must be a type +// that is compatible with the Type of the Value. Usually it should just be the +// Type of the Value, but it can also be the DynamicPseudoType. +func NewDynamicValue(t tftypes.Type, v tftypes.Value) (DynamicValue, error) { + b, err := v.MarshalMsgPack(t) //nolint:staticcheck + if err != nil { + return DynamicValue{}, err + } + return DynamicValue{ + MsgPack: b, + }, nil +} + +// DynamicValue represents a nested encoding value that came from the protocol. +// The only way providers should ever interact with it is by calling its +// `Unmarshal` method to retrieve a `tftypes.Value`. Although the type system +// allows for other interactions, they are explicitly not supported, and will +// not be considered when evaluating for breaking changes. Treat this type as +// an opaque value, and *only* call its `Unmarshal` method. +type DynamicValue struct { + MsgPack []byte + JSON []byte +} + +// IsNull returns true if the DynamicValue represents a null value based on the +// underlying JSON or MessagePack data. +func (d DynamicValue) IsNull() (bool, error) { + if d.JSON != nil { + decoder := json.NewDecoder(bytes.NewReader(d.JSON)) + token, err := decoder.Token() + + if err != nil { + return false, fmt.Errorf("unable to read DynamicValue JSON token: %w", err) + } + + if token != nil { + return false, nil + } + + return true, nil + } + + if d.MsgPack != nil { + decoder := msgpack.NewDecoder(bytes.NewReader(d.MsgPack)) + code, err := decoder.PeekCode() + + if err != nil { + return false, fmt.Errorf("unable to read DynamicValue MsgPack code: %w", err) + } + + // Extensions are considered unknown + if msgpcode.IsExt(code) || code != msgpcode.Nil { + return false, nil + } + + return true, nil + } + + return false, fmt.Errorf("unable to read DynamicValue: %w", ErrUnknownDynamicValueType) +} + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the DynamicValue in an easy-to-interact-with way. It is the +// main purpose of the DynamicValue type, and is how provider developers should +// obtain config, state, and other values from the protocol. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same, as do user-specified values when the provider has a +// DynamicPseudoType in its schema. Objects and maps all look the same, as +// well, as do DynamicPseudoType values sometimes. Fortunately, the provider +// should already know the type; it should be the type of the schema, or +// PseudoDynamicType if that's what's in the schema. `Unmarshal` will then +// parse the value as though it belongs to that type, if possible, and return a +// `tftypes.Value` with the appropriate information. If the data can't be +// interpreted as that type, an error will be returned saying so. In these +// cases, double check to make sure the schema is declaring the same type being +// passed into `Unmarshal`. +// +// In the event an ErrUnknownDynamicValueType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `DynamicValue` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `DynamicValue`s +// received from RPC requests. +func (d DynamicValue) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if d.JSON != nil { + return tftypes.ValueFromJSON(d.JSON, typ) //nolint:staticcheck + } + if d.MsgPack != nil { + return tftypes.ValueFromMsgPack(d.MsgPack, typ) //nolint:staticcheck + } + return tftypes.Value{}, ErrUnknownDynamicValueType +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go new file mode 100644 index 0000000000..ef1e363a3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Function describes the definition of a function. Result must be defined. +type Function struct { + // Parameters is the ordered list of positional function parameters. + Parameters []*FunctionParameter + + // VariadicParameter is an optional final parameter which accepts zero or + // more argument values, in which Terraform will send an ordered list of the + // parameter type. + VariadicParameter *FunctionParameter + + // Return is the function result. + Return *FunctionReturn + + // Summary is the shortened human-readable documentation for the function. + Summary string + + // Description is the longer human-readable documentation for the function. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // DeprecationMessage is the human-readable documentation if the function + // is deprecated. This message should be practitioner oriented to explain + // how their configuration should be updated. + DeprecationMessage string +} + +// FunctionMetadata describes metadata for a function in the GetMetadata RPC. +type FunctionMetadata struct { + // Name is the name of the function. + Name string +} + +// FunctionParameter describes the definition of a function parameter. Type must +// be defined. +type FunctionParameter struct { + // AllowNullValue when enabled denotes that a null argument value can be + // passed to the provider. When disabled, Terraform returns an error if the + // argument value is null. + AllowNullValue bool + + // AllowUnknownValues when enabled denotes that any unknown argument value + // (recursively checked for collections) can be passed to the provider. When + // disabled and an unknown value is present, Terraform skips the function + // call entirely and returns an unknown value result from the function. + AllowUnknownValues bool + + // Description is the human-readable documentation for the parameter. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Name is the human-readable display name for the parameter. Parameters + // are by definition positional and this name is only used in documentation. + Name string + + // Type indicates the type of data the parameter expects. + Type tftypes.Type +} + +// FunctionReturn describes the definition of a function result. Type must be +// defined. +type FunctionReturn struct { + // Type indicates the type of return data. + Type tftypes.Type +} + +// FunctionServer is an interface containing the methods a function +// implementation needs to fill. +type FunctionServer interface { + // CallFunction is called when Terraform wants to execute the logic of a + // function referenced in the configuration. + CallFunction(context.Context, *CallFunctionRequest) (*CallFunctionResponse, error) + + // GetFunctions is called when Terraform wants to lookup which functions a + // provider supports when not calling GetProviderSchema. + GetFunctions(context.Context, *GetFunctionsRequest) (*GetFunctionsResponse, error) +} + +// CallFunctionRequest is the request Terraform sends when it wants to execute +// the logic of function referenced in the configuration. +type CallFunctionRequest struct { + // Name is the function name being called. + Name string + + // Arguments is the configuration value of each argument the practitioner + // supplied for the function call. The ordering and value of each element + // matches the function parameters and their associated type. If the + // function definition includes a final variadic parameter, its value is an + // ordered list of the variadic parameter type. + Arguments []*DynamicValue +} + +// CallFunctionResponse is the response from the provider with the result of +// executing the logic of the function. +type CallFunctionResponse struct { + // Error reports errors related to the execution of the + // function logic. Returning a nil error indicates a successful response + // with no errors presented to practitioners. + Error *FunctionError + + // Result is the return value from the called function, matching the result + // type in the function definition. + Result *DynamicValue +} + +// GetFunctionsRequest is the request Terraform sends when it wants to lookup +// which functions a provider supports when not calling GetProviderSchema. +type GetFunctionsRequest struct{} + +// GetFunctionsResponse is the response from the provider about the implemented +// functions. +type GetFunctionsResponse struct { + // Diagnostics report errors or warnings related to the provider + // implementation. Returning an empty slice indicates a successful response + // with no warnings or errors presented to practitioners. + Diagnostics []*Diagnostic + + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function_error.go new file mode 100644 index 0000000000..558335f961 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function_error.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +// FunctionError is used to convey information back to the user running Terraform. +type FunctionError struct { + // Text is the description of the error. + Text string + + // FunctionArgument is the positional function argument for aligning + // configuration source. + FunctionArgument *int64 +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go new file mode 100644 index 0000000000..cc40d861fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package diag + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Diagnostics is a collection of Diagnostic. +type Diagnostics []*tfprotov5.Diagnostic + +// ErrorCount returns the number of error severity diagnostics. +func (d Diagnostics) ErrorCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov5.DiagnosticSeverityError { + continue + } + + result++ + } + + return result +} + +// Log will log every diagnostic: +// +// - Error severity at ERROR level +// - Warning severity at WARN level +// - Invalid/Unknown severity at WARN level +func (d Diagnostics) Log(ctx context.Context) { + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + diagnosticFields := map[string]interface{}{ + logging.KeyDiagnosticDetail: diagnostic.Detail, + logging.KeyDiagnosticSeverity: diagnostic.Severity.String(), + logging.KeyDiagnosticSummary: diagnostic.Summary, + } + + if diagnostic.Attribute != nil { + diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() + } + + switch diagnostic.Severity { + case tfprotov5.DiagnosticSeverityError: + logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) + case tfprotov5.DiagnosticSeverityWarning: + logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields) + default: + logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields) + } + } +} + +// WarningCount returns the number of warning severity diagnostics. +func (d Diagnostics) WarningCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov5.DiagnosticSeverityWarning { + continue + } + + result++ + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go new file mode 100644 index 0000000000..faaba2285c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package diag contains diagnostics helpers. These implementations are +// intentionally outside the public API. +package diag diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go new file mode 100644 index 0000000000..3b831e7dcf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ValidateDataSourceConfigRequest(in *tfplugin5.ValidateDataSourceConfig_Request) *tfprotov5.ValidateDataSourceConfigRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ValidateDataSourceConfigRequest{ + Config: DynamicValue(in.Config), + TypeName: in.TypeName, + } + + return resp +} + +func ReadDataSourceRequest(in *tfplugin5.ReadDataSource_Request) *tfprotov5.ReadDataSourceRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ReadDataSourceRequest{ + Config: DynamicValue(in.Config), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/doc.go new file mode 100644 index 0000000000..01a7012d53 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package fromproto converts Protocol Buffers generated tfplugin5 types into +// terraform-plugin-go tfprotov5 types. +package fromproto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/dynamic_value.go new file mode 100644 index 0000000000..af332bfdd9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/dynamic_value.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func DynamicValue(in *tfplugin5.DynamicValue) *tfprotov5.DynamicValue { + if in == nil { + return nil + } + + resp := &tfprotov5.DynamicValue{ + MsgPack: in.Msgpack, + JSON: in.Json, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go new file mode 100644 index 0000000000..0abd61de32 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func CallFunctionRequest(in *tfplugin5.CallFunction_Request) *tfprotov5.CallFunctionRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.CallFunctionRequest{ + Arguments: make([]*tfprotov5.DynamicValue, 0, len(in.Arguments)), + Name: in.Name, + } + + for _, argument := range in.Arguments { + resp.Arguments = append(resp.Arguments, DynamicValue(argument)) + } + + return resp +} + +func GetFunctionsRequest(in *tfplugin5.GetFunctions_Request) *tfprotov5.GetFunctionsRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.GetFunctionsRequest{} + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go new file mode 100644 index 0000000000..6f8cd7d935 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetMetadataRequest(in *tfplugin5.GetMetadata_Request) *tfprotov5.GetMetadataRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.GetMetadataRequest{} + + return resp +} + +func GetProviderSchemaRequest(in *tfplugin5.GetProviderSchema_Request) *tfprotov5.GetProviderSchemaRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.GetProviderSchemaRequest{} + + return resp +} + +func PrepareProviderConfigRequest(in *tfplugin5.PrepareProviderConfig_Request) *tfprotov5.PrepareProviderConfigRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.PrepareProviderConfigRequest{ + Config: DynamicValue(in.Config), + } + + return resp +} + +func ConfigureProviderRequest(in *tfplugin5.Configure_Request) *tfprotov5.ConfigureProviderRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ConfigureProviderRequest{ + Config: DynamicValue(in.Config), + TerraformVersion: in.TerraformVersion, + } + + return resp +} + +func StopProviderRequest(in *tfplugin5.Stop_Request) *tfprotov5.StopProviderRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.StopProviderRequest{} + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/raw_state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/raw_state.go new file mode 100644 index 0000000000..c31b7e64bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/raw_state.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func RawState(in *tfplugin5.RawState) *tfprotov5.RawState { + if in == nil { + return nil + } + + resp := &tfprotov5.RawState{ + JSON: in.Json, + Flatmap: in.Flatmap, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go new file mode 100644 index 0000000000..c7e8d72ef0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ValidateResourceTypeConfigRequest(in *tfplugin5.ValidateResourceTypeConfig_Request) *tfprotov5.ValidateResourceTypeConfigRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ValidateResourceTypeConfigRequest{ + Config: DynamicValue(in.Config), + TypeName: in.TypeName, + } + + return resp +} + +func UpgradeResourceStateRequest(in *tfplugin5.UpgradeResourceState_Request) *tfprotov5.UpgradeResourceStateRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.UpgradeResourceStateRequest{ + RawState: RawState(in.RawState), + TypeName: in.TypeName, + Version: in.Version, + } + + return resp +} + +func ReadResourceRequest(in *tfplugin5.ReadResource_Request) *tfprotov5.ReadResourceRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ReadResourceRequest{ + CurrentState: DynamicValue(in.CurrentState), + Private: in.Private, + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} + +func PlanResourceChangeRequest(in *tfplugin5.PlanResourceChange_Request) *tfprotov5.PlanResourceChangeRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.PlanResourceChangeRequest{ + Config: DynamicValue(in.Config), + PriorPrivate: in.PriorPrivate, + PriorState: DynamicValue(in.PriorState), + ProposedNewState: DynamicValue(in.ProposedNewState), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} + +func ApplyResourceChangeRequest(in *tfplugin5.ApplyResourceChange_Request) *tfprotov5.ApplyResourceChangeRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ApplyResourceChangeRequest{ + Config: DynamicValue(in.Config), + PlannedPrivate: in.PlannedPrivate, + PlannedState: DynamicValue(in.PlannedState), + PriorState: DynamicValue(in.PriorState), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} + +func ImportResourceStateRequest(in *tfplugin5.ImportResourceState_Request) *tfprotov5.ImportResourceStateRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.ImportResourceStateRequest{ + TypeName: in.TypeName, + ID: in.Id, + } + + return resp +} + +func MoveResourceStateRequest(in *tfplugin5.MoveResourceState_Request) *tfprotov5.MoveResourceStateRequest { + if in == nil { + return nil + } + + resp := &tfprotov5.MoveResourceStateRequest{ + SourcePrivate: in.SourcePrivate, + SourceProviderAddress: in.SourceProviderAddress, + SourceSchemaVersion: in.SourceSchemaVersion, + SourceState: RawState(in.SourceState), + SourceTypeName: in.SourceTypeName, + TargetTypeName: in.TargetTypeName, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/doc.go new file mode 100644 index 0000000000..9b9f61f06d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package funcerr contains function error helpers. These implementations are +// intentionally outside the public API. +package funcerr diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/function_error.go new file mode 100644 index 0000000000..60428b442e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/function_error.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcerr + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// FunctionError is a single FunctionError. +type FunctionError tfprotov5.FunctionError + +// HasError returns true if the FunctionError is not empty. +func (e *FunctionError) HasError() bool { + if e == nil { + return false + } + + return e.Text != "" || e.FunctionArgument != nil +} + +// Log will log the function error: +func (e *FunctionError) Log(ctx context.Context) { + if e == nil { + return + } + + if !e.HasError() { + return + } + + switch { + case e.FunctionArgument != nil && e.Text != "": + logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{ + logging.KeyFunctionErrorText: e.Text, + logging.KeyFunctionErrorArgument: *e.FunctionArgument, + }) + case e.FunctionArgument != nil: + logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{ + logging.KeyFunctionErrorArgument: *e.FunctionArgument, + }) + case e.Text != "": + logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{ + logging.KeyFunctionErrorText: e.Text, + }) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go new file mode 100644 index 0000000000..5b2556a27b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5serverlogging + +// Context key types. +// Reference: https://staticcheck.io/docs/checks/#SA1029 + +// ContextKeyDownstreamRequestStartTime is a context.Context key to store the +// time.Time when the server began a downstream request. +type ContextKeyDownstreamRequestStartTime struct{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go new file mode 100644 index 0000000000..82b9d39e5c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tf5serverlogging contains logging functionality specific to +// tf5server and tfprotov5 types. +package tf5serverlogging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go new file mode 100644 index 0000000000..8c442feffd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5serverlogging + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr" +) + +// DownstreamRequest sets a request duration start time context key and +// generates a TRACE "Sending request downstream" log. +func DownstreamRequest(ctx context.Context) context.Context { + requestStart := time.Now() + ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart) + + logging.ProtocolTrace(ctx, "Sending request downstream") + + return ctx +} + +// DownstreamResponse generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// diagnostic severity counts +// - Per-diagnostic logs +func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) { + responseFields := map[string]interface{}{ + logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(), + logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + diagnostics.Log(ctx) +} + +// DownstreamResponseWithError generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// whether a function error is present +// - Log with function error details +func DownstreamResponseWithError(ctx context.Context, funcErr *tfprotov5.FunctionError) { + fe := (*funcerr.FunctionError)(funcErr) + + responseFields := map[string]interface{}{ + logging.KeyFunctionErrorExists: fe.HasError(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + fe.Log(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/server_capabilities.go new file mode 100644 index 0000000000..d0f86c8427 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/server_capabilities.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5serverlogging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ServerCapabilities generates a TRACE "Announced server capabilities" log. +func ServerCapabilities(ctx context.Context, capabilities *tfprotov5.ServerCapabilities) { + responseFields := map[string]interface{}{ + logging.KeyServerCapabilityGetProviderSchemaOptional: false, + logging.KeyServerCapabilityPlanDestroy: false, + } + + if capabilities != nil { + responseFields[logging.KeyServerCapabilityGetProviderSchemaOptional] = capabilities.GetProviderSchemaOptional + responseFields[logging.KeyServerCapabilityPlanDestroy] = capabilities.PlanDestroy + } + + logging.ProtocolTrace(ctx, "Announced server capabilities", responseFields) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 0000000000..4f3f970d4d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,6289 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 5.5 +// +// This file defines version 5.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: tfplugin5.proto + +package tfplugin5 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StringKind int32 + +const ( + StringKind_PLAIN StringKind = 0 + StringKind_MARKDOWN StringKind = 1 +) + +// Enum value maps for StringKind. +var ( + StringKind_name = map[int32]string{ + 0: "PLAIN", + 1: "MARKDOWN", + } + StringKind_value = map[string]int32{ + "PLAIN": 0, + "MARKDOWN": 1, + } +) + +func (x StringKind) Enum() *StringKind { + p := new(StringKind) + *p = x + return p +} + +func (x StringKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StringKind) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[0].Descriptor() +} + +func (StringKind) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[0] +} + +func (x StringKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StringKind.Descriptor instead. +func (StringKind) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{0} +} + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +// Enum value maps for Diagnostic_Severity. +var ( + Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", + } + Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, + } +) + +func (x Diagnostic_Severity) Enum() *Diagnostic_Severity { + p := new(Diagnostic_Severity) + *p = x + return p +} + +func (x Diagnostic_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Diagnostic_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[1].Descriptor() +} + +func (Diagnostic_Severity) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[1] +} + +func (x Diagnostic_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Diagnostic_Severity.Descriptor instead. +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +// Enum value maps for Schema_NestedBlock_NestingMode. +var ( + Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", + } + Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, + } +) + +func (x Schema_NestedBlock_NestingMode) Enum() *Schema_NestedBlock_NestingMode { + p := new(Schema_NestedBlock_NestingMode) + *p = x + return p +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_NestedBlock_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[2].Descriptor() +} + +func (Schema_NestedBlock_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[2] +} + +func (x Schema_NestedBlock_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_NestedBlock_NestingMode.Descriptor instead. +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` +} + +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. +func (*DynamicValue) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{0} +} + +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack + } + return nil +} + +func (x *DynamicValue) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +type Diagnostic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` +} + +func (x *Diagnostic) Reset() { + *x = Diagnostic{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostic) ProtoMessage() {} + +func (x *Diagnostic) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostic.ProtoReflect.Descriptor instead. +func (*Diagnostic) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{1} +} + +func (x *Diagnostic) GetSeverity() Diagnostic_Severity { + if x != nil { + return x.Severity + } + return Diagnostic_INVALID +} + +func (x *Diagnostic) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Diagnostic) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +func (x *Diagnostic) GetAttribute() *AttributePath { + if x != nil { + return x.Attribute + } + return nil +} + +type FunctionError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + // The optional function_argument records the index position of the + // argument which caused the error. + FunctionArgument *int64 `protobuf:"varint,2,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"` +} + +func (x *FunctionError) Reset() { + *x = FunctionError{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FunctionError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FunctionError) ProtoMessage() {} + +func (x *FunctionError) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FunctionError.ProtoReflect.Descriptor instead. +func (*FunctionError) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{2} +} + +func (x *FunctionError) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *FunctionError) GetFunctionArgument() int64 { + if x != nil && x.FunctionArgument != nil { + return *x.FunctionArgument + } + return 0 +} + +type AttributePath struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` +} + +func (x *AttributePath) Reset() { + *x = AttributePath{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath) ProtoMessage() {} + +func (x *AttributePath) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath.ProtoReflect.Descriptor instead. +func (*AttributePath) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3} +} + +func (x *AttributePath) GetSteps() []*AttributePath_Step { + if x != nil { + return x.Steps + } + return nil +} + +type Stop struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Stop) Reset() { + *x = Stop{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop) ProtoMessage() {} + +func (x *Stop) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop.ProtoReflect.Descriptor instead. +func (*Stop) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4} +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RawState) Reset() { + *x = RawState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawState) ProtoMessage() {} + +func (x *RawState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawState.ProtoReflect.Descriptor instead. +func (*RawState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5} +} + +func (x *RawState) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +func (x *RawState) GetFlatmap() map[string]string { + if x != nil { + return x.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6} +} + +func (x *Schema) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +type ServerCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"` + // The get_provider_schema_optional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + GetProviderSchemaOptional bool `protobuf:"varint,2,opt,name=get_provider_schema_optional,json=getProviderSchemaOptional,proto3" json:"get_provider_schema_optional,omitempty"` + // The move_resource_state capability signals that a provider supports the + // MoveResourceState RPC. + MoveResourceState bool `protobuf:"varint,3,opt,name=move_resource_state,json=moveResourceState,proto3" json:"move_resource_state,omitempty"` +} + +func (x *ServerCapabilities) Reset() { + *x = ServerCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerCapabilities) ProtoMessage() {} + +func (x *ServerCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerCapabilities.ProtoReflect.Descriptor instead. +func (*ServerCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7} +} + +func (x *ServerCapabilities) GetPlanDestroy() bool { + if x != nil { + return x.PlanDestroy + } + return false +} + +func (x *ServerCapabilities) GetGetProviderSchemaOptional() bool { + if x != nil { + return x.GetProviderSchemaOptional + } + return false +} + +func (x *ServerCapabilities) GetMoveResourceState() bool { + if x != nil { + return x.MoveResourceState + } + return false +} + +type Function struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parameters is the ordered list of positional function parameters. + Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"` + // return is the function result. + Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"` + // summary is the human-readable shortened documentation for the function. + Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + // description is human-readable documentation for the function. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + // deprecation_message is human-readable documentation if the + // function is deprecated. + DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` +} + +func (x *Function) Reset() { + *x = Function{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function) ProtoMessage() {} + +func (x *Function) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function.ProtoReflect.Descriptor instead. +func (*Function) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8} +} + +func (x *Function) GetParameters() []*Function_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Function) GetVariadicParameter() *Function_Parameter { + if x != nil { + return x.VariadicParameter + } + return nil +} + +func (x *Function) GetReturn() *Function_Return { + if x != nil { + return x.Return + } + return nil +} + +func (x *Function) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Function) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Function) GetDeprecationMessage() string { + if x != nil { + return x.DeprecationMessage + } + return "" +} + +type GetMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata) Reset() { + *x = GetMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata) ProtoMessage() {} + +func (x *GetMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9} +} + +type GetProviderSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema) Reset() { + *x = GetProviderSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema) ProtoMessage() {} + +func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10} +} + +type PrepareProviderConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PrepareProviderConfig) Reset() { + *x = PrepareProviderConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig) ProtoMessage() {} + +func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11} +} + +type UpgradeResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpgradeResourceState) Reset() { + *x = UpgradeResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState) ProtoMessage() {} + +func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12} +} + +type ValidateResourceTypeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateResourceTypeConfig) Reset() { + *x = ValidateResourceTypeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13} +} + +type ValidateDataSourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateDataSourceConfig) Reset() { + *x = ValidateDataSourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig) ProtoMessage() {} + +func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14} +} + +type Configure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Configure) Reset() { + *x = Configure{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure) ProtoMessage() {} + +func (x *Configure) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure.ProtoReflect.Descriptor instead. +func (*Configure) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15} +} + +type ReadResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadResource) Reset() { + *x = ReadResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource) ProtoMessage() {} + +func (x *ReadResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. +func (*ReadResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16} +} + +type PlanResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PlanResourceChange) Reset() { + *x = PlanResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange) ProtoMessage() {} + +func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17} +} + +type ApplyResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApplyResourceChange) Reset() { + *x = ApplyResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange) ProtoMessage() {} + +func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18} +} + +type ImportResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ImportResourceState) Reset() { + *x = ImportResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState) ProtoMessage() {} + +func (x *ImportResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19} +} + +type MoveResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MoveResourceState) Reset() { + *x = MoveResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState) ProtoMessage() {} + +func (x *MoveResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead. +func (*MoveResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{20} +} + +type ReadDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadDataSource) Reset() { + *x = ReadDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource) ProtoMessage() {} + +func (x *ReadDataSource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{21} +} + +type GetProvisionerSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProvisionerSchema) Reset() { + *x = GetProvisionerSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema) ProtoMessage() {} + +func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{22} +} + +type ValidateProvisionerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateProvisionerConfig) Reset() { + *x = ValidateProvisionerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig) ProtoMessage() {} + +func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23} +} + +type ProvisionResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ProvisionResource) Reset() { + *x = ProvisionResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource) ProtoMessage() {} + +func (x *ProvisionResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource.ProtoReflect.Descriptor instead. +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24} +} + +type GetFunctions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions) Reset() { + *x = GetFunctions{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions) ProtoMessage() {} + +func (x *GetFunctions) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. +func (*GetFunctions) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{25} +} + +type CallFunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CallFunction) Reset() { + *x = CallFunction{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction) ProtoMessage() {} + +func (x *CallFunction) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. +func (*CallFunction) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{26} +} + +type AttributePath_Step struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` +} + +func (x *AttributePath_Step) Reset() { + *x = AttributePath_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath_Step) ProtoMessage() {} + +func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath_Step.ProtoReflect.Descriptor instead. +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *AttributePath_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyString() string { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +type Stop_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Stop_Request) Reset() { + *x = Stop_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop_Request) ProtoMessage() {} + +func (x *Stop_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop_Request.ProtoReflect.Descriptor instead. +func (*Stop_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4, 0} +} + +type Stop_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *Stop_Response) Reset() { + *x = Stop_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop_Response) ProtoMessage() {} + +func (x *Stop_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop_Response.ProtoReflect.Descriptor instead. +func (*Stop_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *Stop_Response) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type Schema_Block struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Block) Reset() { + *x = Schema_Block{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Block) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Block) ProtoMessage() {} + +func (x *Schema_Block) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Block.ProtoReflect.Descriptor instead. +func (*Schema_Block) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *Schema_Block) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema_Block) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if x != nil { + return x.BlockTypes + } + return nil +} + +func (x *Schema_Block) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Block) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Block) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Attribute) Reset() { + *x = Schema_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Attribute) ProtoMessage() {} + +func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Attribute.ProtoReflect.Descriptor instead. +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *Schema_Attribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Schema_Attribute) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema_Attribute) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Attribute) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Schema_Attribute) GetOptional() bool { + if x != nil { + return x.Optional + } + return false +} + +func (x *Schema_Attribute) GetComputed() bool { + if x != nil { + return x.Computed + } + return false +} + +func (x *Schema_Attribute) GetSensitive() bool { + if x != nil { + return x.Sensitive + } + return false +} + +func (x *Schema_Attribute) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Attribute) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_NestedBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_NestedBlock) Reset() { + *x = Schema_NestedBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_NestedBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_NestedBlock) ProtoMessage() {} + +func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_NestedBlock.ProtoReflect.Descriptor instead. +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2} +} + +func (x *Schema_NestedBlock) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *Schema_NestedBlock) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +func (x *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (x *Schema_NestedBlock) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema_NestedBlock) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type Function_Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the human-readable display name for the parameter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // type is the type constraint for the parameter. + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"` + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"` + // description is human-readable documentation for the parameter. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` +} + +func (x *Function_Parameter) Reset() { + *x = Function_Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Parameter) ProtoMessage() {} + +func (x *Function_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. +func (*Function_Parameter) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Function_Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Function_Parameter) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Function_Parameter) GetAllowNullValue() bool { + if x != nil { + return x.AllowNullValue + } + return false +} + +func (x *Function_Parameter) GetAllowUnknownValues() bool { + if x != nil { + return x.AllowUnknownValues + } + return false +} + +func (x *Function_Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function_Parameter) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +type Function_Return struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the type constraint for the function result. + Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Function_Return) Reset() { + *x = Function_Return{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Return) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Return) ProtoMessage() {} + +func (x *Function_Return) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. +func (*Function_Return) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *Function_Return) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +type GetMetadata_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata_Request) Reset() { + *x = GetMetadata_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Request) ProtoMessage() {} + +func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. +func (*GetMetadata_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} +} + +type GetMetadata_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` + Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + // functions returns metadata for any functions. + Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"` +} + +func (x *GetMetadata_Response) Reset() { + *x = GetMetadata_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Response) ProtoMessage() {} + +func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. +func (*GetMetadata_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetMetadata_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetMetadata_Response) GetDataSources() []*GetMetadata_DataSourceMetadata { + if x != nil { + return x.DataSources + } + return nil +} + +func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata { + if x != nil { + return x.Resources + } + return nil +} + +func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata { + if x != nil { + return x.Functions + } + return nil +} + +type GetMetadata_FunctionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the function name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetadata_FunctionMetadata) Reset() { + *x = GetMetadata_FunctionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_FunctionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_FunctionMetadata) ProtoMessage() {} + +func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 2} +} + +func (x *GetMetadata_FunctionMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetMetadata_DataSourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_DataSourceMetadata) Reset() { + *x = GetMetadata_DataSourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_DataSourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 3} +} + +func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetMetadata_ResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_ResourceMetadata) Reset() { + *x = GetMetadata_ResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_ResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_ResourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 4} +} + +func (x *GetMetadata_ResourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetProviderSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema_Request) Reset() { + *x = GetProviderSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Request) ProtoMessage() {} + +func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} +} + +type GetProviderSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetProviderSchema_Response) Reset() { + *x = GetProviderSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Response) ProtoMessage() {} + +func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 1} +} + +func (x *GetProviderSchema_Response) GetProvider() *Schema { + if x != nil { + return x.Provider + } + return nil +} + +func (x *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if x != nil { + return x.ResourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if x != nil { + return x.DataSourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { + if x != nil { + return x.ProviderMeta + } + return nil +} + +func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +type PrepareProviderConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *PrepareProviderConfig_Request) Reset() { + *x = PrepareProviderConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig_Request) ProtoMessage() {} + +func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig_Request.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *PrepareProviderConfig_Response) Reset() { + *x = PrepareProviderConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig_Response) ProtoMessage() {} + +func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig_Response.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} +} + +func (x *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if x != nil { + return x.PreparedConfig + } + return nil +} + +func (x *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// UpgradeResourceState RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to exist (in the case of resource destruction), be wholly +// known, nor match the given prior state, which could lead to unexpected +// provider behaviors for practitioners. +type UpgradeResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` +} + +func (x *UpgradeResourceState_Request) Reset() { + *x = UpgradeResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Request) ProtoMessage() {} + +func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *UpgradeResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *UpgradeResourceState_Request) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *UpgradeResourceState_Request) GetRawState() *RawState { + if x != nil { + return x.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *UpgradeResourceState_Response) Reset() { + *x = UpgradeResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Response) ProtoMessage() {} + +func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if x != nil { + return x.UpgradedState + } + return nil +} + +func (x *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateResourceTypeConfig_Request) Reset() { + *x = ValidateResourceTypeConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *ValidateResourceTypeConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateResourceTypeConfig_Response) Reset() { + *x = ValidateResourceTypeConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} +} + +func (x *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateDataSourceConfig_Request) Reset() { + *x = ValidateDataSourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} + +func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *ValidateDataSourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateDataSourceConfig_Response) Reset() { + *x = ValidateDataSourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} + +func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} +} + +func (x *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type Configure_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *Configure_Request) Reset() { + *x = Configure_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure_Request) ProtoMessage() {} + +func (x *Configure_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure_Request.ProtoReflect.Descriptor instead. +func (*Configure_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *Configure_Request) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion + } + return "" +} + +func (x *Configure_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type Configure_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *Configure_Response) Reset() { + *x = Configure_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure_Response) ProtoMessage() {} + +func (x *Configure_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure_Response.ProtoReflect.Descriptor instead. +func (*Configure_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} +} + +func (x *Configure_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// ReadResource RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to be wholly known nor match the given prior state, which +// could lead to unexpected provider behaviors for practitioners. +type ReadResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadResource_Request) Reset() { + *x = ReadResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Request) ProtoMessage() {} + +func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *ReadResource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadResource_Request) GetCurrentState() *DynamicValue { + if x != nil { + return x.CurrentState + } + return nil +} + +func (x *ReadResource_Request) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ReadResource_Response) Reset() { + *x = ReadResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Response) ProtoMessage() {} + +func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} +} + +func (x *ReadResource_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ReadResource_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type PlanResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *PlanResourceChange_Request) Reset() { + *x = PlanResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Request) ProtoMessage() {} + +func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} +} + +func (x *PlanResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if x != nil { + return x.ProposedNewState + } + return nil +} + +func (x *PlanResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *PlanResourceChange_Request) GetPriorPrivate() []byte { + if x != nil { + return x.PriorPrivate + } + return nil +} + +func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type PlanResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *PlanResourceChange_Response) Reset() { + *x = PlanResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Response) ProtoMessage() {} + +func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} +} + +func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if x != nil { + return x.RequiresReplace + } + return nil +} + +func (x *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ApplyResourceChange_Request) Reset() { + *x = ApplyResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Request) ProtoMessage() {} + +func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *ApplyResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ApplyResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *ApplyResourceChange_Response) Reset() { + *x = ApplyResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Response) ProtoMessage() {} + +func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ApplyResourceChange_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ImportResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ImportResourceState_Request) Reset() { + *x = ImportResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Request) ProtoMessage() {} + +func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *ImportResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_Request) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ImportResourceState_ImportedResource) Reset() { + *x = ImportResourceState_ImportedResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_ImportedResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_ImportedResource) ProtoMessage() {} + +func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} +} + +func (x *ImportResourceState_ImportedResource) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ImportResourceState_ImportedResource) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type ImportResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ImportResourceState_Response) Reset() { + *x = ImportResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Response) ProtoMessage() {} + +func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 2} +} + +func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if x != nil { + return x.ImportedResources + } + return nil +} + +func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type MoveResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The address of the provider the resource is being moved from. + SourceProviderAddress string `protobuf:"bytes,1,opt,name=source_provider_address,json=sourceProviderAddress,proto3" json:"source_provider_address,omitempty"` + // The resource type that the resource is being moved from. + SourceTypeName string `protobuf:"bytes,2,opt,name=source_type_name,json=sourceTypeName,proto3" json:"source_type_name,omitempty"` + // The schema version of the resource type that the resource is being + // moved from. + SourceSchemaVersion int64 `protobuf:"varint,3,opt,name=source_schema_version,json=sourceSchemaVersion,proto3" json:"source_schema_version,omitempty"` + // The raw state of the resource being moved. Only the json field is + // populated, as there should be no legacy providers using the flatmap + // format that support newly introduced RPCs. + SourceState *RawState `protobuf:"bytes,4,opt,name=source_state,json=sourceState,proto3" json:"source_state,omitempty"` + // The resource type that the resource is being moved to. + TargetTypeName string `protobuf:"bytes,5,opt,name=target_type_name,json=targetTypeName,proto3" json:"target_type_name,omitempty"` + // The private state of the resource being moved. + SourcePrivate []byte `protobuf:"bytes,6,opt,name=source_private,json=sourcePrivate,proto3" json:"source_private,omitempty"` +} + +func (x *MoveResourceState_Request) Reset() { + *x = MoveResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Request) ProtoMessage() {} + +func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *MoveResourceState_Request) GetSourceProviderAddress() string { + if x != nil { + return x.SourceProviderAddress + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceTypeName() string { + if x != nil { + return x.SourceTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceSchemaVersion() int64 { + if x != nil { + return x.SourceSchemaVersion + } + return 0 +} + +func (x *MoveResourceState_Request) GetSourceState() *RawState { + if x != nil { + return x.SourceState + } + return nil +} + +func (x *MoveResourceState_Request) GetTargetTypeName() string { + if x != nil { + return x.TargetTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourcePrivate() []byte { + if x != nil { + return x.SourcePrivate + } + return nil +} + +type MoveResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the resource after it has been moved. + TargetState *DynamicValue `protobuf:"bytes,1,opt,name=target_state,json=targetState,proto3" json:"target_state,omitempty"` + // Any diagnostics that occurred during the move. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // The private state of the resource after it has been moved. + TargetPrivate []byte `protobuf:"bytes,3,opt,name=target_private,json=targetPrivate,proto3" json:"target_private,omitempty"` +} + +func (x *MoveResourceState_Response) Reset() { + *x = MoveResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Response) ProtoMessage() {} + +func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *MoveResourceState_Response) GetTargetState() *DynamicValue { + if x != nil { + return x.TargetState + } + return nil +} + +func (x *MoveResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *MoveResourceState_Response) GetTargetPrivate() []byte { + if x != nil { + return x.TargetPrivate + } + return nil +} + +type ReadDataSource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadDataSource_Request) Reset() { + *x = ReadDataSource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Request) ProtoMessage() {} + +func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0} +} + +func (x *ReadDataSource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadDataSource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadDataSource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ReadDataSource_Response) Reset() { + *x = ReadDataSource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Response) ProtoMessage() {} + +func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1} +} + +func (x *ReadDataSource_Response) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetProvisionerSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProvisionerSchema_Request) Reset() { + *x = GetProvisionerSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema_Request) ProtoMessage() {} + +func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 0} +} + +type GetProvisionerSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetProvisionerSchema_Response) Reset() { + *x = GetProvisionerSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema_Response) ProtoMessage() {} + +func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 1} +} + +func (x *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if x != nil { + return x.Provisioner + } + return nil +} + +func (x *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateProvisionerConfig_Request) Reset() { + *x = ValidateProvisionerConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} + +func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 0} +} + +func (x *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateProvisionerConfig_Response) Reset() { + *x = ValidateProvisionerConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} + +func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 1} +} + +func (x *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ProvisionResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (x *ProvisionResource_Request) Reset() { + *x = ProvisionResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource_Request) ProtoMessage() {} + +func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource_Request.ProtoReflect.Descriptor instead. +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 0} +} + +func (x *ProvisionResource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ProvisionResource_Request) GetConnection() *DynamicValue { + if x != nil { + return x.Connection + } + return nil +} + +type ProvisionResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ProvisionResource_Response) Reset() { + *x = ProvisionResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource_Response) ProtoMessage() {} + +func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource_Response.ProtoReflect.Descriptor instead. +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 1} +} + +func (x *ProvisionResource_Response) GetOutput() string { + if x != nil { + return x.Output + } + return "" +} + +func (x *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetFunctions_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions_Request) Reset() { + *x = GetFunctions_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Request) ProtoMessage() {} + +func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. +func (*GetFunctions_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{25, 0} +} + +type GetFunctions_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // diagnostics is any warnings or errors. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetFunctions_Response) Reset() { + *x = GetFunctions_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Response) ProtoMessage() {} + +func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. +func (*GetFunctions_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{25, 1} +} + +func (x *GetFunctions_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type CallFunction_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the name of the function being called. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // arguments is the data of each function argument value. + Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` +} + +func (x *CallFunction_Request) Reset() { + *x = CallFunction_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Request) ProtoMessage() {} + +func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. +func (*CallFunction_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{26, 0} +} + +func (x *CallFunction_Request) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CallFunction_Request) GetArguments() []*DynamicValue { + if x != nil { + return x.Arguments + } + return nil +} + +type CallFunction_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // result is result value after running the function logic. + Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // error is any error from the function logic. + Error *FunctionError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *CallFunction_Response) Reset() { + *x = CallFunction_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Response) ProtoMessage() {} + +func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. +func (*CallFunction_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{26, 1} +} + +func (x *CallFunction_Response) GetResult() *DynamicValue { + if x != nil { + return x.Result + } + return nil +} + +func (x *CallFunction_Response) GetError() *FunctionError { + if x != nil { + return x.Error + } + return nil +} + +var File_tfplugin5_proto protoreflect.FileDescriptor + +var file_tfplugin5_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x22, 0x3c, 0x0a, 0x0c, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, + 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, + 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, + 0x22, 0x6b, 0x0a, 0x0d, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01, + 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, + 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, + 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74, + 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x33, 0x0a, 0x04, + 0x53, 0x74, 0x6f, 0x70, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, + 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a, + 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x07, 0x0a, 0x06, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2, + 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, + 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, + 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x12, + 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, + 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, + 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, 0x0a, 0x07, 0x6e, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, 0x0b, 0x4e, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, + 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, + 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0xa8, 0x01, 0x0a, 0x12, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74, + 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x32, + 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, + 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, + 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, + 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, + 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, + 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, + 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, + 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, + 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, + 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, + 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, + 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, + 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, + 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe7, 0x03, + 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa6, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, + 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xef, 0x0b, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x6c, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x18, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, + 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, + 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, + 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, + 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_tfplugin5_proto_rawDescOnce sync.Once + file_tfplugin5_proto_rawDescData = file_tfplugin5_proto_rawDesc +) + +func file_tfplugin5_proto_rawDescGZIP() []byte { + file_tfplugin5_proto_rawDescOnce.Do(func() { + file_tfplugin5_proto_rawDescData = protoimpl.X.CompressGZIP(file_tfplugin5_proto_rawDescData) + }) + return file_tfplugin5_proto_rawDescData +} + +var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 80) +var file_tfplugin5_proto_goTypes = []interface{}{ + (StringKind)(0), // 0: tfplugin5.StringKind + (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity + (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin5.Schema.NestedBlock.NestingMode + (*DynamicValue)(nil), // 3: tfplugin5.DynamicValue + (*Diagnostic)(nil), // 4: tfplugin5.Diagnostic + (*FunctionError)(nil), // 5: tfplugin5.FunctionError + (*AttributePath)(nil), // 6: tfplugin5.AttributePath + (*Stop)(nil), // 7: tfplugin5.Stop + (*RawState)(nil), // 8: tfplugin5.RawState + (*Schema)(nil), // 9: tfplugin5.Schema + (*ServerCapabilities)(nil), // 10: tfplugin5.ServerCapabilities + (*Function)(nil), // 11: tfplugin5.Function + (*GetMetadata)(nil), // 12: tfplugin5.GetMetadata + (*GetProviderSchema)(nil), // 13: tfplugin5.GetProviderSchema + (*PrepareProviderConfig)(nil), // 14: tfplugin5.PrepareProviderConfig + (*UpgradeResourceState)(nil), // 15: tfplugin5.UpgradeResourceState + (*ValidateResourceTypeConfig)(nil), // 16: tfplugin5.ValidateResourceTypeConfig + (*ValidateDataSourceConfig)(nil), // 17: tfplugin5.ValidateDataSourceConfig + (*Configure)(nil), // 18: tfplugin5.Configure + (*ReadResource)(nil), // 19: tfplugin5.ReadResource + (*PlanResourceChange)(nil), // 20: tfplugin5.PlanResourceChange + (*ApplyResourceChange)(nil), // 21: tfplugin5.ApplyResourceChange + (*ImportResourceState)(nil), // 22: tfplugin5.ImportResourceState + (*MoveResourceState)(nil), // 23: tfplugin5.MoveResourceState + (*ReadDataSource)(nil), // 24: tfplugin5.ReadDataSource + (*GetProvisionerSchema)(nil), // 25: tfplugin5.GetProvisionerSchema + (*ValidateProvisionerConfig)(nil), // 26: tfplugin5.ValidateProvisionerConfig + (*ProvisionResource)(nil), // 27: tfplugin5.ProvisionResource + (*GetFunctions)(nil), // 28: tfplugin5.GetFunctions + (*CallFunction)(nil), // 29: tfplugin5.CallFunction + (*AttributePath_Step)(nil), // 30: tfplugin5.AttributePath.Step + (*Stop_Request)(nil), // 31: tfplugin5.Stop.Request + (*Stop_Response)(nil), // 32: tfplugin5.Stop.Response + nil, // 33: tfplugin5.RawState.FlatmapEntry + (*Schema_Block)(nil), // 34: tfplugin5.Schema.Block + (*Schema_Attribute)(nil), // 35: tfplugin5.Schema.Attribute + (*Schema_NestedBlock)(nil), // 36: tfplugin5.Schema.NestedBlock + (*Function_Parameter)(nil), // 37: tfplugin5.Function.Parameter + (*Function_Return)(nil), // 38: tfplugin5.Function.Return + (*GetMetadata_Request)(nil), // 39: tfplugin5.GetMetadata.Request + (*GetMetadata_Response)(nil), // 40: tfplugin5.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 41: tfplugin5.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 42: tfplugin5.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 43: tfplugin5.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 44: tfplugin5.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 45: tfplugin5.GetProviderSchema.Response + nil, // 46: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 47: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 48: tfplugin5.GetProviderSchema.Response.FunctionsEntry + (*PrepareProviderConfig_Request)(nil), // 49: tfplugin5.PrepareProviderConfig.Request + (*PrepareProviderConfig_Response)(nil), // 50: tfplugin5.PrepareProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 51: tfplugin5.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 52: tfplugin5.UpgradeResourceState.Response + (*ValidateResourceTypeConfig_Request)(nil), // 53: tfplugin5.ValidateResourceTypeConfig.Request + (*ValidateResourceTypeConfig_Response)(nil), // 54: tfplugin5.ValidateResourceTypeConfig.Response + (*ValidateDataSourceConfig_Request)(nil), // 55: tfplugin5.ValidateDataSourceConfig.Request + (*ValidateDataSourceConfig_Response)(nil), // 56: tfplugin5.ValidateDataSourceConfig.Response + (*Configure_Request)(nil), // 57: tfplugin5.Configure.Request + (*Configure_Response)(nil), // 58: tfplugin5.Configure.Response + (*ReadResource_Request)(nil), // 59: tfplugin5.ReadResource.Request + (*ReadResource_Response)(nil), // 60: tfplugin5.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 61: tfplugin5.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 62: tfplugin5.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 63: tfplugin5.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 64: tfplugin5.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 65: tfplugin5.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 66: tfplugin5.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 67: tfplugin5.ImportResourceState.Response + (*MoveResourceState_Request)(nil), // 68: tfplugin5.MoveResourceState.Request + (*MoveResourceState_Response)(nil), // 69: tfplugin5.MoveResourceState.Response + (*ReadDataSource_Request)(nil), // 70: tfplugin5.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 71: tfplugin5.ReadDataSource.Response + (*GetProvisionerSchema_Request)(nil), // 72: tfplugin5.GetProvisionerSchema.Request + (*GetProvisionerSchema_Response)(nil), // 73: tfplugin5.GetProvisionerSchema.Response + (*ValidateProvisionerConfig_Request)(nil), // 74: tfplugin5.ValidateProvisionerConfig.Request + (*ValidateProvisionerConfig_Response)(nil), // 75: tfplugin5.ValidateProvisionerConfig.Response + (*ProvisionResource_Request)(nil), // 76: tfplugin5.ProvisionResource.Request + (*ProvisionResource_Response)(nil), // 77: tfplugin5.ProvisionResource.Response + (*GetFunctions_Request)(nil), // 78: tfplugin5.GetFunctions.Request + (*GetFunctions_Response)(nil), // 79: tfplugin5.GetFunctions.Response + nil, // 80: tfplugin5.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 81: tfplugin5.CallFunction.Request + (*CallFunction_Response)(nil), // 82: tfplugin5.CallFunction.Response +} +var file_tfplugin5_proto_depIdxs = []int32{ + 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity + 6, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath + 30, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step + 33, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry + 34, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block + 37, // 5: tfplugin5.Function.parameters:type_name -> tfplugin5.Function.Parameter + 37, // 6: tfplugin5.Function.variadic_parameter:type_name -> tfplugin5.Function.Parameter + 38, // 7: tfplugin5.Function.return:type_name -> tfplugin5.Function.Return + 0, // 8: tfplugin5.Function.description_kind:type_name -> tfplugin5.StringKind + 35, // 9: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute + 36, // 10: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock + 0, // 11: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind + 0, // 12: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind + 34, // 13: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block + 2, // 14: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode + 0, // 15: tfplugin5.Function.Parameter.description_kind:type_name -> tfplugin5.StringKind + 10, // 16: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 4, // 17: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 42, // 18: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata + 43, // 19: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata + 41, // 20: tfplugin5.GetMetadata.Response.functions:type_name -> tfplugin5.GetMetadata.FunctionMetadata + 9, // 21: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema + 46, // 22: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 47, // 23: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 4, // 24: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 9, // 25: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema + 10, // 26: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 48, // 27: tfplugin5.GetProviderSchema.Response.functions:type_name -> tfplugin5.GetProviderSchema.Response.FunctionsEntry + 9, // 28: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 9, // 29: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 11, // 30: tfplugin5.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 3, // 31: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 32: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 4, // 33: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 34: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 3, // 35: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 4, // 36: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 37: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 38: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 39: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 40: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 41: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 42: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 43: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 3, // 44: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 45: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 46: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 47: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 48: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 3, // 49: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 50: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 51: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 6, // 52: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 4, // 53: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 54: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 55: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 3, // 56: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 57: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 58: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 59: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 60: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 66, // 61: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 4, // 62: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 63: tfplugin5.MoveResourceState.Request.source_state:type_name -> tfplugin5.RawState + 3, // 64: tfplugin5.MoveResourceState.Response.target_state:type_name -> tfplugin5.DynamicValue + 4, // 65: tfplugin5.MoveResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 66: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 67: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 68: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 4, // 69: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 9, // 70: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 4, // 71: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 72: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 73: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 74: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 75: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 4, // 76: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 80, // 77: tfplugin5.GetFunctions.Response.functions:type_name -> tfplugin5.GetFunctions.Response.FunctionsEntry + 4, // 78: tfplugin5.GetFunctions.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 11, // 79: tfplugin5.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 3, // 80: tfplugin5.CallFunction.Request.arguments:type_name -> tfplugin5.DynamicValue + 3, // 81: tfplugin5.CallFunction.Response.result:type_name -> tfplugin5.DynamicValue + 5, // 82: tfplugin5.CallFunction.Response.error:type_name -> tfplugin5.FunctionError + 39, // 83: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request + 44, // 84: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 49, // 85: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 53, // 86: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 55, // 87: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 51, // 88: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 57, // 89: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 59, // 90: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 61, // 91: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 63, // 92: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 65, // 93: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 68, // 94: tfplugin5.Provider.MoveResourceState:input_type -> tfplugin5.MoveResourceState.Request + 70, // 95: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 78, // 96: tfplugin5.Provider.GetFunctions:input_type -> tfplugin5.GetFunctions.Request + 81, // 97: tfplugin5.Provider.CallFunction:input_type -> tfplugin5.CallFunction.Request + 31, // 98: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 72, // 99: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 74, // 100: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 76, // 101: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 31, // 102: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 40, // 103: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response + 45, // 104: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 50, // 105: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 54, // 106: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 56, // 107: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 52, // 108: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 58, // 109: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 60, // 110: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 62, // 111: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 64, // 112: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 67, // 113: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 69, // 114: tfplugin5.Provider.MoveResourceState:output_type -> tfplugin5.MoveResourceState.Response + 71, // 115: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 79, // 116: tfplugin5.Provider.GetFunctions:output_type -> tfplugin5.GetFunctions.Response + 82, // 117: tfplugin5.Provider.CallFunction:output_type -> tfplugin5.CallFunction.Response + 32, // 118: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 73, // 119: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 75, // 120: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 77, // 121: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 32, // 122: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 103, // [103:123] is the sub-list for method output_type + 83, // [83:103] is the sub-list for method input_type + 83, // [83:83] is the sub-list for extension type_name + 83, // [83:83] is the sub-list for extension extendee + 0, // [0:83] is the sub-list for field type_name +} + +func init() { file_tfplugin5_proto_init() } +func file_tfplugin5_proto_init() { + if File_tfplugin5_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tfplugin5_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Diagnostic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FunctionError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RawState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerCapabilities); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_NestedBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Return); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_FunctionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_DataSourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_ResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_ImportedResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_tfplugin5_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_tfplugin5_proto_msgTypes[27].OneofWrappers = []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tfplugin5_proto_rawDesc, + NumEnums: 3, + NumMessages: 80, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_tfplugin5_proto_goTypes, + DependencyIndexes: file_tfplugin5_proto_depIdxs, + EnumInfos: file_tfplugin5_proto_enumTypes, + MessageInfos: file_tfplugin5_proto_msgTypes, + }.Build() + File_tfplugin5_proto = out.File + file_tfplugin5_proto_rawDesc = nil + file_tfplugin5_proto_goTypes = nil + file_tfplugin5_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto new file mode 100644 index 0000000000..1266a51095 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto @@ -0,0 +1,589 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 5.5 +// +// This file defines version 5.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// +syntax = "proto3"; +option go_package = "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"; + +package tfplugin5; + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +message DynamicValue { + bytes msgpack = 1; + bytes json = 2; +} + +message Diagnostic { + enum Severity { + INVALID = 0; + ERROR = 1; + WARNING = 2; + } + Severity severity = 1; + string summary = 2; + string detail = 3; + AttributePath attribute = 4; +} + +message FunctionError { + string text = 1; + // The optional function_argument records the index position of the + // argument which caused the error. + optional int64 function_argument = 2; +} + +message AttributePath { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + string element_key_string = 2; + int64 element_key_int = 3; + } + } + repeated Step steps = 1; +} + +message Stop { + message Request { + } + message Response { + string Error = 1; + } +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +message RawState { + bytes json = 1; + map flatmap = 2; +} + +enum StringKind { + PLAIN = 0; + MARKDOWN = 1; +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +message Schema { + message Block { + int64 version = 1; + repeated Attribute attributes = 2; + repeated NestedBlock block_types = 3; + string description = 4; + StringKind description_kind = 5; + bool deprecated = 6; + } + + message Attribute { + string name = 1; + bytes type = 2; + string description = 3; + bool required = 4; + bool optional = 5; + bool computed = 6; + bool sensitive = 7; + StringKind description_kind = 8; + bool deprecated = 9; + } + + message NestedBlock { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + GROUP = 5; + } + + string type_name = 1; + Block block = 2; + NestingMode nesting = 3; + int64 min_items = 4; + int64 max_items = 5; + } + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + int64 version = 1; + + // Block is the top level configuration block for this schema. + Block block = 2; +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +message ServerCapabilities { + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + bool plan_destroy = 1; + + // The get_provider_schema_optional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + bool get_provider_schema_optional = 2; + + // The move_resource_state capability signals that a provider supports the + // MoveResourceState RPC. + bool move_resource_state = 3; +} + +message Function { + // parameters is the ordered list of positional function parameters. + repeated Parameter parameters = 1; + + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + Parameter variadic_parameter = 2; + + // return is the function result. + Return return = 3; + + // summary is the human-readable shortened documentation for the function. + string summary = 4; + + // description is human-readable documentation for the function. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + + // deprecation_message is human-readable documentation if the + // function is deprecated. + string deprecation_message = 7; + + message Parameter { + // name is the human-readable display name for the parameter. + string name = 1; + + // type is the type constraint for the parameter. + bytes type = 2; + + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + bool allow_null_value = 3; + + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + bool allow_unknown_values = 4; + + // description is human-readable documentation for the parameter. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + } + + message Return { + // type is the type constraint for the function result. + bytes type = 1; + } +} + +service Provider { + //////// Information about what a provider supports/expects + + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetSchema RPC as a fallback. + rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response); + + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); + rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); + rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); + rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); + rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); + + //////// One-time initialization, called before other functions below + rpc Configure(Configure.Request) returns (Configure.Response); + + //////// Managed Resource Lifecycle + rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); + rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); + rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); + rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); + rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response); + rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + + // Functions + + // GetFunctions returns the definitions of all functions. + rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response); + + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + rpc CallFunction(CallFunction.Request) returns (CallFunction.Response); + + //////// Graceful Shutdown + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetMetadata { + message Request { + } + + message Response { + ServerCapabilities server_capabilities = 1; + repeated Diagnostic diagnostics = 2; + repeated DataSourceMetadata data_sources = 3; + repeated ResourceMetadata resources = 4; + + // functions returns metadata for any functions. + repeated FunctionMetadata functions = 5; + } + + message FunctionMetadata { + // name is the function name. + string name = 1; + } + + message DataSourceMetadata { + string type_name = 1; + } + + message ResourceMetadata { + string type_name = 1; + } +} + +message GetProviderSchema { + message Request { + } + message Response { + Schema provider = 1; + map resource_schemas = 2; + map data_source_schemas = 3; + repeated Diagnostic diagnostics = 4; + Schema provider_meta = 5; + ServerCapabilities server_capabilities = 6; + + // functions is a mapping of function names to definitions. + map functions = 7; + } +} + +message PrepareProviderConfig { + message Request { + DynamicValue config = 1; + } + message Response { + DynamicValue prepared_config = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message UpgradeResourceState { + // Request is the message that is sent to the provider during the + // UpgradeResourceState RPC. + // + // This message intentionally does not include configuration data as any + // configuration-based or configuration-conditional changes should occur + // during the PlanResourceChange RPC. Additionally, the configuration is + // not guaranteed to exist (in the case of resource destruction), be wholly + // known, nor match the given prior state, which could lead to unexpected + // provider behaviors for practitioners. + message Request { + string type_name = 1; + + // version is the schema_version number recorded in the state file + int64 version = 2; + + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState raw_state = 3; + } + message Response { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + DynamicValue upgraded_state = 1; + + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateResourceTypeConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ValidateDataSourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message Configure { + message Request { + string terraform_version = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ReadResource { + // Request is the message that is sent to the provider during the + // ReadResource RPC. + // + // This message intentionally does not include configuration data as any + // configuration-based or configuration-conditional changes should occur + // during the PlanResourceChange RPC. Additionally, the configuration is + // not guaranteed to be wholly known nor match the given prior state, which + // could lead to unexpected provider behaviors for practitioners. + message Request { + string type_name = 1; + DynamicValue current_state = 2; + bytes private = 3; + DynamicValue provider_meta = 4; + } + message Response { + DynamicValue new_state = 1; + repeated Diagnostic diagnostics = 2; + bytes private = 3; + } +} + +message PlanResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue proposed_new_state = 3; + DynamicValue config = 4; + bytes prior_private = 5; + DynamicValue provider_meta = 6; + } + + message Response { + DynamicValue planned_state = 1; + repeated AttributePath requires_replace = 2; + bytes planned_private = 3; + repeated Diagnostic diagnostics = 4; + + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; + } +} + +message ApplyResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue planned_state = 3; + DynamicValue config = 4; + bytes planned_private = 5; + DynamicValue provider_meta = 6; + } + message Response { + DynamicValue new_state = 1; + bytes private = 2; + repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; + } +} + +message ImportResourceState { + message Request { + string type_name = 1; + string id = 2; + } + + message ImportedResource { + string type_name = 1; + DynamicValue state = 2; + bytes private = 3; + } + + message Response { + repeated ImportedResource imported_resources = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message MoveResourceState { + message Request { + // The address of the provider the resource is being moved from. + string source_provider_address = 1; + + // The resource type that the resource is being moved from. + string source_type_name = 2; + + // The schema version of the resource type that the resource is being + // moved from. + int64 source_schema_version = 3; + + // The raw state of the resource being moved. Only the json field is + // populated, as there should be no legacy providers using the flatmap + // format that support newly introduced RPCs. + RawState source_state = 4; + + // The resource type that the resource is being moved to. + string target_type_name = 5; + + // The private state of the resource being moved. + bytes source_private = 6; + } + + message Response { + // The state of the resource after it has been moved. + DynamicValue target_state = 1; + + // Any diagnostics that occurred during the move. + repeated Diagnostic diagnostics = 2; + + // The private state of the resource after it has been moved. + bytes target_private = 3; + } +} + +message ReadDataSource { + message Request { + string type_name = 1; + DynamicValue config = 2; + DynamicValue provider_meta = 3; + } + message Response { + DynamicValue state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +service Provisioner { + rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); + rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); + rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProvisionerSchema { + message Request { + } + message Response { + Schema provisioner = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateProvisionerConfig { + message Request { + DynamicValue config = 1; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ProvisionResource { + message Request { + DynamicValue config = 1; + DynamicValue connection = 2; + } + message Response { + string output = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message GetFunctions { + message Request {} + + message Response { + // functions is a mapping of function names to definitions. + map functions = 1; + + // diagnostics is any warnings or errors. + repeated Diagnostic diagnostics = 2; + } +} + +message CallFunction { + message Request { + // name is the name of the function being called. + string name = 1; + + // arguments is the data of each function argument value. + repeated DynamicValue arguments = 2; + } + + message Response { + // result is result value after running the function logic. + DynamicValue result = 1; + + // error is any error from the function logic. + FunctionError error = 2; + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go new file mode 100644 index 0000000000..fc01684637 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go @@ -0,0 +1,941 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 5.5 +// +// This file defines version 5.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: tfplugin5.proto + +package tfplugin5 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Provider_GetMetadata_FullMethodName = "/tfplugin5.Provider/GetMetadata" + Provider_GetSchema_FullMethodName = "/tfplugin5.Provider/GetSchema" + Provider_PrepareProviderConfig_FullMethodName = "/tfplugin5.Provider/PrepareProviderConfig" + Provider_ValidateResourceTypeConfig_FullMethodName = "/tfplugin5.Provider/ValidateResourceTypeConfig" + Provider_ValidateDataSourceConfig_FullMethodName = "/tfplugin5.Provider/ValidateDataSourceConfig" + Provider_UpgradeResourceState_FullMethodName = "/tfplugin5.Provider/UpgradeResourceState" + Provider_Configure_FullMethodName = "/tfplugin5.Provider/Configure" + Provider_ReadResource_FullMethodName = "/tfplugin5.Provider/ReadResource" + Provider_PlanResourceChange_FullMethodName = "/tfplugin5.Provider/PlanResourceChange" + Provider_ApplyResourceChange_FullMethodName = "/tfplugin5.Provider/ApplyResourceChange" + Provider_ImportResourceState_FullMethodName = "/tfplugin5.Provider/ImportResourceState" + Provider_MoveResourceState_FullMethodName = "/tfplugin5.Provider/MoveResourceState" + Provider_ReadDataSource_FullMethodName = "/tfplugin5.Provider/ReadDataSource" + Provider_GetFunctions_FullMethodName = "/tfplugin5.Provider/GetFunctions" + Provider_CallFunction_FullMethodName = "/tfplugin5.Provider/CallFunction" + Provider_Stop_FullMethodName = "/tfplugin5.Provider/Stop" +) + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProviderClient interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetSchema RPC as a fallback. + GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) + // ////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc grpc.ClientConnInterface +} + +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) { + out := new(GetMetadata_Response) + err := c.cc.Invoke(ctx, Provider_GetMetadata_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, Provider_GetSchema_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, Provider_PrepareProviderConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, Provider_ValidateResourceTypeConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, Provider_ValidateDataSourceConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, Provider_UpgradeResourceState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, Provider_Configure_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, Provider_ReadResource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, Provider_PlanResourceChange_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, Provider_ApplyResourceChange_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, Provider_ImportResourceState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) { + out := new(MoveResourceState_Response) + err := c.cc.Invoke(ctx, Provider_MoveResourceState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, Provider_ReadDataSource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) { + out := new(GetFunctions_Response) + err := c.cc.Invoke(ctx, Provider_GetFunctions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) { + out := new(CallFunction_Response) + err := c.cc.Invoke(ctx, Provider_CallFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, Provider_Stop_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +// All implementations must embed UnimplementedProviderServer +// for forward compatibility +type ProviderServer interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetSchema RPC as a fallback. + GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) + // ////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) + mustEmbedUnimplementedProviderServer() +} + +// UnimplementedProviderServer must be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (UnimplementedProviderServer) GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") +} +func (UnimplementedProviderServer) GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (UnimplementedProviderServer) PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") +} +func (UnimplementedProviderServer) ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") +} +func (UnimplementedProviderServer) ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") +} +func (UnimplementedProviderServer) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (UnimplementedProviderServer) Configure(context.Context, *Configure_Request) (*Configure_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (UnimplementedProviderServer) ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (UnimplementedProviderServer) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (UnimplementedProviderServer) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (UnimplementedProviderServer) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (UnimplementedProviderServer) MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveResourceState not implemented") +} +func (UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (UnimplementedProviderServer) GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFunctions not implemented") +} +func (UnimplementedProviderServer) CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CallFunction not implemented") +} +func (UnimplementedProviderServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} +func (UnimplementedProviderServer) mustEmbedUnimplementedProviderServer() {} + +// UnsafeProviderServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProviderServer will +// result in compilation errors. +type UnsafeProviderServer interface { + mustEmbedUnimplementedProviderServer() +} + +func RegisterProviderServer(s grpc.ServiceRegistrar, srv ProviderServer) { + s.RegisterService(&Provider_ServiceDesc, srv) +} + +func _Provider_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetadata_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetMetadata_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetMetadata(ctx, req.(*GetMetadata_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetSchema_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_PrepareProviderConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ValidateResourceTypeConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ValidateDataSourceConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_UpgradeResourceState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_Configure_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ReadResource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_PlanResourceChange_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ApplyResourceChange_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ImportResourceState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_MoveResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).MoveResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_MoveResourceState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).MoveResourceState(ctx, req.(*MoveResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ReadDataSource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctions_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetFunctions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetFunctions(ctx, req.(*GetFunctions_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunction_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_CallFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).CallFunction(ctx, req.(*CallFunction_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_Stop_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +// Provider_ServiceDesc is the grpc.ServiceDesc for Provider service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provider_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetadata", + Handler: _Provider_GetMetadata_Handler, + }, + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "MoveResourceState", + Handler: _Provider_MoveResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "GetFunctions", + Handler: _Provider_GetFunctions_Handler, + }, + { + MethodName: "CallFunction", + Handler: _Provider_CallFunction_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +const ( + Provisioner_GetSchema_FullMethodName = "/tfplugin5.Provisioner/GetSchema" + Provisioner_ValidateProvisionerConfig_FullMethodName = "/tfplugin5.Provisioner/ValidateProvisionerConfig" + Provisioner_ProvisionResource_FullMethodName = "/tfplugin5.Provisioner/ProvisionResource" + Provisioner_Stop_FullMethodName = "/tfplugin5.Provisioner/Stop" +) + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc grpc.ClientConnInterface +} + +func NewProvisionerClient(cc grpc.ClientConnInterface) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, Provisioner_GetSchema_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, Provisioner_ValidateProvisionerConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &Provisioner_ServiceDesc.Streams[0], Provisioner_ProvisionResource_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, Provisioner_Stop_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +// All implementations must embed UnimplementedProvisionerServer +// for forward compatibility +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) + mustEmbedUnimplementedProvisionerServer() +} + +// UnimplementedProvisionerServer must be embedded to have forward compatible implementations. +type UnimplementedProvisionerServer struct { +} + +func (UnimplementedProvisionerServer) GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (UnimplementedProvisionerServer) ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") +} +func (UnimplementedProvisionerServer) ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error { + return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") +} +func (UnimplementedProvisionerServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} +func (UnimplementedProvisionerServer) mustEmbedUnimplementedProvisionerServer() {} + +// UnsafeProvisionerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProvisionerServer will +// result in compilation errors. +type UnsafeProvisionerServer interface { + mustEmbedUnimplementedProvisionerServer() +} + +func RegisterProvisionerServer(s grpc.ServiceRegistrar, srv ProvisionerServer) { + s.RegisterService(&Provisioner_ServiceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provisioner_GetSchema_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provisioner_ValidateProvisionerConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provisioner_Stop_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +// Provisioner_ServiceDesc is the grpc.ServiceDesc for Provisioner service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provisioner_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/attribute_path.go new file mode 100644 index 0000000000..4a469b1478 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/attribute_path.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func AttributePath(in *tftypes.AttributePath) *tfplugin5.AttributePath { + if in == nil { + return nil + } + + resp := &tfplugin5.AttributePath{ + Steps: AttributePath_Steps(in.Steps()), + } + + return resp +} + +func AttributePaths(in []*tftypes.AttributePath) []*tfplugin5.AttributePath { + resp := make([]*tfplugin5.AttributePath, 0, len(in)) + + for _, a := range in { + resp = append(resp, AttributePath(a)) + } + + return resp +} + +func AttributePath_Step(step tftypes.AttributePathStep) *tfplugin5.AttributePath_Step { + if step == nil { + return nil + } + + switch step := step.(type) { + case tftypes.AttributeName: + return &tfplugin5.AttributePath_Step{ + Selector: &tfplugin5.AttributePath_Step_AttributeName{ + AttributeName: string(step), + }, + } + case tftypes.ElementKeyInt: + return &tfplugin5.AttributePath_Step{ + Selector: &tfplugin5.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: int64(step), + }, + } + case tftypes.ElementKeyString: + return &tfplugin5.AttributePath_Step{ + Selector: &tfplugin5.AttributePath_Step_ElementKeyString{ + ElementKeyString: string(step), + }, + } + case tftypes.ElementKeyValue: + // The protocol has no equivalent of an ElementKeyValue, so this + // returns nil for the step to signal a step we cannot convey back + // to Terraform. + return nil + } + + // It is not currently possible to create tftypes.AttributePathStep + // implementations outside the tftypes package and these implementations + // should rarely change, if ever, since they are critical to how + // Terraform understands attribute paths. If this panic was reached, it + // implies that a new step type was introduced and needs to be + // implemented as a new case above or that this logic needs to be + // otherwise changed to handle some new attribute path system. + panic(fmt.Sprintf("unimplemented tftypes.AttributePathStep type: %T", step)) +} + +func AttributePath_Steps(in []tftypes.AttributePathStep) []*tfplugin5.AttributePath_Step { + resp := make([]*tfplugin5.AttributePath_Step, 0, len(in)) + + for _, step := range in { + s := AttributePath_Step(step) + + // In the face of a ElementKeyValue or missing step, Terraform has no + // way to represent the attribute path, so only return the prefix. + if s == nil { + return resp + } + + resp = append(resp, s) + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go new file mode 100644 index 0000000000..a62f3cde22 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetMetadata_DataSourceMetadata(in *tfprotov5.DataSourceMetadata) *tfplugin5.GetMetadata_DataSourceMetadata { + if in == nil { + return nil + } + + resp := &tfplugin5.GetMetadata_DataSourceMetadata{ + TypeName: in.TypeName, + } + + return resp +} + +func ValidateDataSourceConfig_Response(in *tfprotov5.ValidateDataSourceConfigResponse) *tfplugin5.ValidateDataSourceConfig_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.ValidateDataSourceConfig_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func ReadDataSource_Response(in *tfprotov5.ReadDataSourceResponse) *tfplugin5.ReadDataSource_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.ReadDataSource_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + State: DynamicValue(in.State), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go new file mode 100644 index 0000000000..bf30765e61 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func Diagnostic(in *tfprotov5.Diagnostic) *tfplugin5.Diagnostic { + if in == nil { + return nil + } + + resp := &tfplugin5.Diagnostic{ + Attribute: AttributePath(in.Attribute), + Detail: ForceValidUTF8(in.Detail), + Severity: Diagnostic_Severity(in.Severity), + Summary: ForceValidUTF8(in.Summary), + } + + return resp +} + +func Diagnostic_Severity(in tfprotov5.DiagnosticSeverity) tfplugin5.Diagnostic_Severity { + return tfplugin5.Diagnostic_Severity(in) +} + +func Diagnostics(in []*tfprotov5.Diagnostic) []*tfplugin5.Diagnostic { + resp := make([]*tfplugin5.Diagnostic, 0, len(in)) + + for _, diag := range in { + resp = append(resp, Diagnostic(diag)) + } + + return resp +} + +// ForceValidUTF8 returns a string guaranteed to be valid UTF-8 even if the +// input isn't, by replacing any invalid bytes with a valid UTF-8 encoding of +// the Unicode Replacement Character (\uFFFD). +// +// The protobuf serialization library will reject invalid UTF-8 with an +// unhelpful error message: +// +// string field contains invalid UTF-8 +// +// Passing a string result through this function makes invalid UTF-8 instead +// emerge as placeholder characters on the other side of the wire protocol, +// giving a better chance of still returning a partially-legible message +// instead of a generic character encoding error. +// +// This is intended for user-facing messages such as diagnostic summary and +// detail messages, where Terraform will just treat the value as opaque and +// it's ultimately up to the user and their terminal or web browser to +// interpret the result. Don't use this for strings that have machine-readable +// meaning. +func ForceValidUTF8(s string) string { + // Most strings that pass through here will already be valid UTF-8 and + // utf8.ValidString has a fast path which will beat our rune-by-rune + // analysis below, so it's worth the cost of walking the string twice + // in the rarer invalid case. + if utf8.ValidString(s) { + return s + } + + // If we get down here then we know there's at least one invalid UTF-8 + // sequence in the string, so in this slow path we'll reconstruct the + // string one rune at a time, guaranteeing that we'll only write valid + // UTF-8 sequences into the resulting buffer. + // + // Any invalid string will grow at least a little larger as a result of + // this operation because we'll be replacing each invalid byte with + // the three-byte sequence \xEF\xBF\xBD, which is the UTF-8 encoding of + // the replacement character \uFFFD. 9 is a magic number giving room for + // three such expansions without any further allocation. + ret := make([]byte, 0, len(s)+9) + for { + // If the first byte in s is not the start of a valid UTF-8 sequence + // then the following will return utf8.RuneError, 1, where + // utf8.RuneError is the unicode replacement character. + r, advance := utf8.DecodeRuneInString(s) + if advance == 0 { + break + } + s = s[advance:] + ret = utf8.AppendRune(ret, r) + } + return string(ret) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/doc.go new file mode 100644 index 0000000000..91b961e888 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package toproto converts terraform-plugin-go tfprotov5 types to Protocol +// Buffers generated tfplugin5 types. +package toproto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/dynamic_value.go new file mode 100644 index 0000000000..7f86517a3f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/dynamic_value.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func DynamicValue(in *tfprotov5.DynamicValue) *tfplugin5.DynamicValue { + if in == nil { + return nil + } + + resp := &tfplugin5.DynamicValue{ + Msgpack: in.MsgPack, + Json: in.JSON, + } + + return resp +} + +func CtyType(in tftypes.Type) []byte { + if in == nil { + return nil + } + + // MarshalJSON is always error safe. + // nolint:staticcheck // Intended first-party usage + resp, _ := in.MarshalJSON() + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go new file mode 100644 index 0000000000..319c7fa08c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func CallFunction_Response(in *tfprotov5.CallFunctionResponse) *tfplugin5.CallFunction_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.CallFunction_Response{ + Error: FunctionError(in.Error), + Result: DynamicValue(in.Result), + } + + return resp +} + +func Function(in *tfprotov5.Function) *tfplugin5.Function { + if in == nil { + return nil + } + + resp := &tfplugin5.Function{ + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + DeprecationMessage: in.DeprecationMessage, + Parameters: make([]*tfplugin5.Function_Parameter, 0, len(in.Parameters)), + Return: Function_Return(in.Return), + Summary: in.Summary, + VariadicParameter: Function_Parameter(in.VariadicParameter), + } + + for _, parameter := range in.Parameters { + resp.Parameters = append(resp.Parameters, Function_Parameter(parameter)) + } + + return resp +} + +func Function_Parameter(in *tfprotov5.FunctionParameter) *tfplugin5.Function_Parameter { + if in == nil { + return nil + } + + resp := &tfplugin5.Function_Parameter{ + AllowNullValue: in.AllowNullValue, + AllowUnknownValues: in.AllowUnknownValues, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Name: in.Name, + Type: CtyType(in.Type), + } + + return resp +} + +func Function_Return(in *tfprotov5.FunctionReturn) *tfplugin5.Function_Return { + if in == nil { + return nil + } + + resp := &tfplugin5.Function_Return{ + Type: CtyType(in.Type), + } + + return resp +} + +func GetFunctions_Response(in *tfprotov5.GetFunctionsResponse) *tfplugin5.GetFunctions_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.GetFunctions_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + Functions: make(map[string]*tfplugin5.Function, len(in.Functions)), + } + + for name, function := range in.Functions { + resp.Functions[name] = Function(function) + } + + return resp +} + +func GetMetadata_FunctionMetadata(in *tfprotov5.FunctionMetadata) *tfplugin5.GetMetadata_FunctionMetadata { + if in == nil { + return nil + } + + resp := &tfplugin5.GetMetadata_FunctionMetadata{ + Name: in.Name, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function_error.go new file mode 100644 index 0000000000..1dab5b4379 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function_error.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func FunctionError(in *tfprotov5.FunctionError) *tfplugin5.FunctionError { + if in == nil { + return nil + } + + resp := &tfplugin5.FunctionError{ + FunctionArgument: in.FunctionArgument, + Text: ForceValidUTF8(in.Text), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go new file mode 100644 index 0000000000..4891c53874 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetMetadata_Response(in *tfprotov5.GetMetadataResponse) *tfplugin5.GetMetadata_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.GetMetadata_Response{ + DataSources: make([]*tfplugin5.GetMetadata_DataSourceMetadata, 0, len(in.DataSources)), + Diagnostics: Diagnostics(in.Diagnostics), + Functions: make([]*tfplugin5.GetMetadata_FunctionMetadata, 0, len(in.Functions)), + Resources: make([]*tfplugin5.GetMetadata_ResourceMetadata, 0, len(in.Resources)), + ServerCapabilities: ServerCapabilities(in.ServerCapabilities), + } + + for _, datasource := range in.DataSources { + resp.DataSources = append(resp.DataSources, GetMetadata_DataSourceMetadata(&datasource)) + } + + for _, function := range in.Functions { + resp.Functions = append(resp.Functions, GetMetadata_FunctionMetadata(&function)) + } + + for _, resource := range in.Resources { + resp.Resources = append(resp.Resources, GetMetadata_ResourceMetadata(&resource)) + } + + return resp +} + +func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) *tfplugin5.GetProviderSchema_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.GetProviderSchema_Response{ + DataSourceSchemas: make(map[string]*tfplugin5.Schema, len(in.DataSourceSchemas)), + Diagnostics: Diagnostics(in.Diagnostics), + Functions: make(map[string]*tfplugin5.Function, len(in.Functions)), + Provider: Schema(in.Provider), + ProviderMeta: Schema(in.ProviderMeta), + ResourceSchemas: make(map[string]*tfplugin5.Schema, len(in.ResourceSchemas)), + ServerCapabilities: ServerCapabilities(in.ServerCapabilities), + } + + for name, schema := range in.ResourceSchemas { + resp.ResourceSchemas[name] = Schema(schema) + } + + for name, schema := range in.DataSourceSchemas { + resp.DataSourceSchemas[name] = Schema(schema) + } + + for name, function := range in.Functions { + resp.Functions[name] = Function(function) + } + + return resp +} + +func PrepareProviderConfig_Response(in *tfprotov5.PrepareProviderConfigResponse) *tfplugin5.PrepareProviderConfig_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.PrepareProviderConfig_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + PreparedConfig: DynamicValue(in.PreparedConfig), + } + + return resp +} + +func Configure_Response(in *tfprotov5.ConfigureProviderResponse) *tfplugin5.Configure_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.Configure_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func Stop_Response(in *tfprotov5.StopProviderResponse) *tfplugin5.Stop_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.Stop_Response{ + Error: in.Error, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go new file mode 100644 index 0000000000..0ba9ab465f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetMetadata_ResourceMetadata(in *tfprotov5.ResourceMetadata) *tfplugin5.GetMetadata_ResourceMetadata { + if in == nil { + return nil + } + + resp := &tfplugin5.GetMetadata_ResourceMetadata{ + TypeName: in.TypeName, + } + + return resp +} + +func ValidateResourceTypeConfig_Response(in *tfprotov5.ValidateResourceTypeConfigResponse) *tfplugin5.ValidateResourceTypeConfig_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.ValidateResourceTypeConfig_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func UpgradeResourceState_Response(in *tfprotov5.UpgradeResourceStateResponse) *tfplugin5.UpgradeResourceState_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.UpgradeResourceState_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + UpgradedState: DynamicValue(in.UpgradedState), + } + + return resp +} + +func ReadResource_Response(in *tfprotov5.ReadResourceResponse) *tfplugin5.ReadResource_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.ReadResource_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + NewState: DynamicValue(in.NewState), + Private: in.Private, + } + + return resp +} + +func PlanResourceChange_Response(in *tfprotov5.PlanResourceChangeResponse) *tfplugin5.PlanResourceChange_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.PlanResourceChange_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck + PlannedPrivate: in.PlannedPrivate, + PlannedState: DynamicValue(in.PlannedState), + RequiresReplace: AttributePaths(in.RequiresReplace), + } + + return resp +} + +func ApplyResourceChange_Response(in *tfprotov5.ApplyResourceChangeResponse) *tfplugin5.ApplyResourceChange_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.ApplyResourceChange_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck + NewState: DynamicValue(in.NewState), + Private: in.Private, + } + + return resp +} + +func ImportResourceState_Response(in *tfprotov5.ImportResourceStateResponse) *tfplugin5.ImportResourceState_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.ImportResourceState_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + ImportedResources: ImportResourceState_ImportedResources(in.ImportedResources), + } + + return resp +} + +func ImportResourceState_ImportedResource(in *tfprotov5.ImportedResource) *tfplugin5.ImportResourceState_ImportedResource { + if in == nil { + return nil + } + + resp := &tfplugin5.ImportResourceState_ImportedResource{ + Private: in.Private, + State: DynamicValue(in.State), + TypeName: in.TypeName, + } + + return resp +} + +func ImportResourceState_ImportedResources(in []*tfprotov5.ImportedResource) []*tfplugin5.ImportResourceState_ImportedResource { + resp := make([]*tfplugin5.ImportResourceState_ImportedResource, 0, len(in)) + + for _, i := range in { + resp = append(resp, ImportResourceState_ImportedResource(i)) + } + + return resp +} + +func MoveResourceState_Response(in *tfprotov5.MoveResourceStateResponse) *tfplugin5.MoveResourceState_Response { + if in == nil { + return nil + } + + resp := &tfplugin5.MoveResourceState_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + TargetPrivate: in.TargetPrivate, + TargetState: DynamicValue(in.TargetState), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/schema.go new file mode 100644 index 0000000000..69d47af1a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/schema.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func Schema(in *tfprotov5.Schema) *tfplugin5.Schema { + if in == nil { + return nil + } + + resp := &tfplugin5.Schema{ + Block: Schema_Block(in.Block), + Version: in.Version, + } + + return resp +} + +func Schema_Block(in *tfprotov5.SchemaBlock) *tfplugin5.Schema_Block { + if in == nil { + return nil + } + + resp := &tfplugin5.Schema_Block{ + Attributes: Schema_Attributes(in.Attributes), + BlockTypes: Schema_NestedBlocks(in.BlockTypes), + Deprecated: in.Deprecated, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Version: in.Version, + } + + return resp +} + +func Schema_Attribute(in *tfprotov5.SchemaAttribute) *tfplugin5.Schema_Attribute { + if in == nil { + return nil + } + + resp := &tfplugin5.Schema_Attribute{ + Computed: in.Computed, + Deprecated: in.Deprecated, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Name: in.Name, + Optional: in.Optional, + Required: in.Required, + Sensitive: in.Sensitive, + Type: CtyType(in.Type), + } + + return resp +} + +func Schema_Attributes(in []*tfprotov5.SchemaAttribute) []*tfplugin5.Schema_Attribute { + resp := make([]*tfplugin5.Schema_Attribute, 0, len(in)) + + for _, a := range in { + resp = append(resp, Schema_Attribute(a)) + } + + return resp +} + +func Schema_NestedBlock(in *tfprotov5.SchemaNestedBlock) *tfplugin5.Schema_NestedBlock { + if in == nil { + return nil + } + + resp := &tfplugin5.Schema_NestedBlock{ + Block: Schema_Block(in.Block), + MaxItems: in.MaxItems, + MinItems: in.MinItems, + Nesting: Schema_NestedBlock_NestingMode(in.Nesting), + TypeName: in.TypeName, + } + + return resp +} + +func Schema_NestedBlocks(in []*tfprotov5.SchemaNestedBlock) []*tfplugin5.Schema_NestedBlock { + resp := make([]*tfplugin5.Schema_NestedBlock, 0, len(in)) + + for _, b := range in { + resp = append(resp, Schema_NestedBlock(b)) + } + + return resp +} + +func Schema_NestedBlock_NestingMode(in tfprotov5.SchemaNestedBlockNestingMode) tfplugin5.Schema_NestedBlock_NestingMode { + return tfplugin5.Schema_NestedBlock_NestingMode(in) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/server_capabilities.go new file mode 100644 index 0000000000..9fcbe0e6dd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/server_capabilities.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ServerCapabilities(in *tfprotov5.ServerCapabilities) *tfplugin5.ServerCapabilities { + if in == nil { + return nil + } + + resp := &tfplugin5.ServerCapabilities{ + GetProviderSchemaOptional: in.GetProviderSchemaOptional, + MoveResourceState: in.MoveResourceState, + PlanDestroy: in.PlanDestroy, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/string_kind.go new file mode 100644 index 0000000000..fbb399c0a5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/string_kind.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func StringKind(in tfprotov5.StringKind) tfplugin5.StringKind { + return tfplugin5.StringKind(in) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go new file mode 100644 index 0000000000..fa85a8e04f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "context" +) + +// ProviderServer is an interface that reflects that Terraform protocol. +// Providers must implement this interface. +type ProviderServer interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + GetMetadata(context.Context, *GetMetadataRequest) (*GetMetadataResponse, error) + + // GetProviderSchema is called when Terraform needs to know what the + // provider's schema is, along with the schemas of all its resources + // and data sources. + GetProviderSchema(context.Context, *GetProviderSchemaRequest) (*GetProviderSchemaResponse, error) + + // PrepareProviderConfig is called to give a provider a chance to + // modify the configuration the user specified before validation. + PrepareProviderConfig(context.Context, *PrepareProviderConfigRequest) (*PrepareProviderConfigResponse, error) + + // ConfigureProvider is called to pass the user-specified provider + // configuration to the provider. + ConfigureProvider(context.Context, *ConfigureProviderRequest) (*ConfigureProviderResponse, error) + + // StopProvider is called when Terraform would like providers to shut + // down as quickly as possible, and usually represents an interrupt. + StopProvider(context.Context, *StopProviderRequest) (*StopProviderResponse, error) + + // ResourceServer is an interface encapsulating all the + // resource-related RPC requests. ProviderServer implementations must + // implement them, but they are a handy interface for defining what a + // resource is to terraform-plugin-go, so they're their own interface + // that is composed into ProviderServer. + ResourceServer + + // DataSourceServer is an interface encapsulating all the data + // source-related RPC requests. ProviderServer implementations must + // implement them, but they are a handy interface for defining what a + // data source is to terraform-plugin-go, so they're their own + // interface that is composed into ProviderServer. + DataSourceServer + + // FunctionServer is an interface encapsulating all the function-related RPC + // requests. ProviderServer implementations must implement them, but they + // are a handy interface for defining what a function is to + // terraform-plugin-go, so they are their own interface that is composed + // into ProviderServer. + // + // This will be required in an upcoming release. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // FunctionServer +} + +// GetMetadataRequest represents a GetMetadata RPC request. +type GetMetadataRequest struct{} + +// GetMetadataResponse represents a GetMetadata RPC response. +type GetMetadataResponse struct { + // ServerCapabilities defines optionally supported protocol features, + // such as forward-compatible Terraform behavior changes. + ServerCapabilities *ServerCapabilities + + // Diagnostics report errors or warnings related to returning the + // provider's schemas. Returning an empty slice indicates success, with + // no errors or warnings generated. + Diagnostics []*Diagnostic + + // DataSources returns metadata for all data resources. + DataSources []DataSourceMetadata + + // Functions returns metadata for all functions. + Functions []FunctionMetadata + + // Resources returns metadata for all managed resources. + Resources []ResourceMetadata +} + +// GetProviderSchemaRequest represents a Terraform RPC request for the +// provider's schemas. +type GetProviderSchemaRequest struct{} + +// GetProviderSchemaResponse represents a Terraform RPC response containing the +// provider's schemas. +type GetProviderSchemaResponse struct { + // ServerCapabilities defines optionally supported protocol features, + // such as forward-compatible Terraform behavior changes. + ServerCapabilities *ServerCapabilities + + // Provider defines the schema for the provider configuration, which + // will be specified in the provider block of the user's configuration. + Provider *Schema + + // ProviderMeta defines the schema for the provider's metadata, which + // will be specified in the provider_meta blocks of the terraform block + // for a module. This is an advanced feature and its usage should be + // coordinated with the Terraform Core team by opening an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. + ProviderMeta *Schema + + // ResourceSchemas is a map of resource names to the schema for the + // configuration specified in the resource. The name should be a + // resource name, and should be prefixed with your provider's shortname + // and an underscore. It should match the first label after `resource` + // in a user's configuration. + ResourceSchemas map[string]*Schema + + // DataSourceSchemas is a map of data source names to the schema for + // the configuration specified in the data source. The name should be a + // data source name, and should be prefixed with your provider's + // shortname and an underscore. It should match the first label after + // `data` in a user's configuration. + DataSourceSchemas map[string]*Schema + + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function + + // Diagnostics report errors or warnings related to returning the + // provider's schemas. Returning an empty slice indicates success, with + // no errors or warnings generated. + Diagnostics []*Diagnostic +} + +// PrepareProviderConfigRequest represents a Terraform RPC request for the +// provider to modify the provider configuration in preparation for Terraform +// validating it. +type PrepareProviderConfigRequest struct { + // Config is the configuration the user supplied for the provider. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // The PrepareProviderConfig RPC call will be called twice; once when + // generating a plan, once when applying the plan. When called during + // plan, Config can contain unknown values if fields with unknown + // values are interpolated into it. At apply time, all fields will have + // known values. Values that are not set in the configuration will be + // null. + Config *DynamicValue +} + +// PrepareProviderConfigResponse represents a Terraform RPC response containing +// a modified provider configuration that Terraform can now validate and use. +type PrepareProviderConfigResponse struct { + // PreparedConfig should be set to the modified configuration. See the + // documentation on `DynamicValue` for information about safely + // creating the `DynamicValue`. + // + // This RPC call exists because early versions of the Terraform Plugin + // SDK allowed providers to set defaults for provider configurations in + // such a way that Terraform couldn't validate the provider config + // without retrieving the default values first. As providers using + // terraform-plugin-go directly and new frameworks built on top of it + // have no such requirement, it is safe and recommended to simply set + // PreparedConfig to the value of the PrepareProviderConfigRequest's + // Config property, indicating that no changes are needed to the + // configuration. + // + // The configuration should be represented as a tftypes.Object, with + // each attribute and nested block getting its own key and value. + // + // TODO: should we provide an implementation that does that that + // provider developers can just embed and not need to implement the + // method themselves, then? + PreparedConfig *DynamicValue + + // Diagnostics report errors or warnings related to preparing the + // provider's configuration. Returning an empty slice indicates + // success, with no errors or warnings generated. + Diagnostics []*Diagnostic +} + +// ConfigureProviderRequest represents a Terraform RPC request to supply the +// provider with information about what the user entered in the provider's +// configuration block. +type ConfigureProviderRequest struct { + // TerraformVersion is the version of Terraform executing the request. + // This is supplied for logging, analytics, and User-Agent purposes + // *only*. Providers should not try to gate provider behavior on + // Terraform versions. It will make you sad. We can't stop you from + // doing it, but we really highly recommend you do not do it. + TerraformVersion string + + // Config is the configuration the user supplied for the provider. This + // information should usually be persisted to the underlying type + // that's implementing the ProviderServer interface, for use in later + // RPC requests. See the documentation on `DynamicValue` for more + // information about safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // The ConfigureProvider RPC call will be called twice; once when + // generating a plan, once when applying the plan. When called during + // plan, Config can contain unknown values if fields with unknown + // values are interpolated into it. At apply time, all fields will have + // known values. Values that are not set in the configuration will be + // null. + Config *DynamicValue +} + +// ConfigureProviderResponse represents a Terraform RPC response to the +// configuration block that Terraform supplied for the provider. +type ConfigureProviderResponse struct { + // Diagnostics report errors or warnings related to the provider's + // configuration. Returning an empty slice indicates success, with no + // errors or warnings generated. + Diagnostics []*Diagnostic +} + +// StopProviderRequest represents a Terraform RPC request to interrupt a +// provider's work and terminate a provider's processes as soon as possible. +type StopProviderRequest struct{} + +// StopProviderResponse represents a Terraform RPC response surfacing an issues +// the provider encountered in terminating. +type StopProviderResponse struct { + // Error should be set to a string describing the error if the provider + // cannot currently shut down for some reason. Because this always + // represents a system error and not a user error, it is returned as a + // string, not a Diagnostic. + Error string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go new file mode 100644 index 0000000000..3090d298af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go @@ -0,0 +1,546 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ResourceMetadata describes metadata for a managed resource in the GetMetadata +// RPC. +type ResourceMetadata struct { + // TypeName is the name of the managed resource. + TypeName string +} + +// ResourceServer is an interface containing the methods a resource +// implementation needs to fill. +type ResourceServer interface { + // ValidateResourceTypeConfig is called when Terraform is checking that + // a resource's configuration is valid. It is guaranteed to have types + // conforming to your schema. This is your opportunity to do custom or + // advanced validation prior to a plan being generated. + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfigRequest) (*ValidateResourceTypeConfigResponse, error) + + // UpgradeResourceState is called when Terraform has encountered a + // resource with a state in a schema that doesn't match the schema's + // current version. It is the provider's responsibility to modify the + // state to upgrade it to the latest state schema. + UpgradeResourceState(context.Context, *UpgradeResourceStateRequest) (*UpgradeResourceStateResponse, error) + + // ReadResource is called when Terraform is refreshing a resource's + // state. + ReadResource(context.Context, *ReadResourceRequest) (*ReadResourceResponse, error) + + // PlanResourceChange is called when Terraform is attempting to + // calculate a plan for a resource. Terraform will suggest a proposed + // new state, which the provider can modify or return unmodified to + // influence Terraform's plan. + PlanResourceChange(context.Context, *PlanResourceChangeRequest) (*PlanResourceChangeResponse, error) + + // ApplyResourceChange is called when Terraform has detected a diff + // between the resource's state and the user's config, and the user has + // approved a planned change. The provider is to apply the changes + // contained in the plan, and return the resulting state. + ApplyResourceChange(context.Context, *ApplyResourceChangeRequest) (*ApplyResourceChangeResponse, error) + + // ImportResourceState is called when a user has requested Terraform + // import a resource. The provider should fetch the information + // specified by the passed ID and return it as one or more resource + // states for Terraform to assume control of. + ImportResourceState(context.Context, *ImportResourceStateRequest) (*ImportResourceStateResponse, error) +} + +// ResourceServerWithMoveResourceState is a temporary interface for servers +// to implement MoveResourceState RPC handling. +// +// Deprecated: The MoveResourceState method will be moved into the +// ResourceServer interface and this interface will be removed in a future +// version. +type ResourceServerWithMoveResourceState interface { + ResourceServer + + // MoveResourceState is called when Terraform is asked to change a resource + // type for an existing resource. The provider must accept the change as + // valid by ensuring the source resource type, schema version, and provider + // address are compatible to convert the source state into the target + // resource type and latest state version. + // + // This functionality is only supported in Terraform 1.8 and later. The + // provider must have enabled the MoveResourceState server capability to + // enable these requests. + MoveResourceState(context.Context, *MoveResourceStateRequest) (*MoveResourceStateResponse, error) +} + +// ValidateResourceTypeConfigRequest is the request Terraform sends when it +// wants to validate a resource's configuration. +type ValidateResourceTypeConfigRequest struct { + // TypeName is the type of resource Terraform is validating. + TypeName string + + // Config is the configuration the user supplied for that resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. Any attributes not directly + // set in the configuration will be null. + Config *DynamicValue +} + +// ValidateResourceTypeConfigResponse is the response from the provider about +// the validity of a resource's configuration. +type ValidateResourceTypeConfigResponse struct { + // Diagnostics report errors or warnings related to the given + // configuration. Returning an empty slice indicates a successful + // validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// UpgradeResourceStateRequest is the request Terraform sends when it needs a +// provider to upgrade the state of a given resource. +type UpgradeResourceStateRequest struct { + // TypeName is the type of resource that Terraform needs to upgrade the + // state for. + TypeName string + + // Version is the version of the state the resource currently has. + Version int64 + + // RawState is the state as Terraform sees it right now. See the + // documentation for `RawState` for information on how to work with the + // data it contains. + RawState *RawState +} + +// UpgradeResourceStateResponse is the response from the provider containing +// the upgraded state for the given resource. +type UpgradeResourceStateResponse struct { + // UpgradedState is the upgraded state for the resource, represented as + // a `DynamicValue`. See the documentation on `DynamicValue` for + // information about safely creating the `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // Terraform CLI 0.12 through 0.14 require the Msgpack field to be + // populated or an EOF error will be returned. + UpgradedState *DynamicValue + + // Diagnostics report errors or warnings related to upgrading the + // state of the requested resource. Returning an empty slice indicates + // a successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ReadResourceRequest is the request Terraform sends when it wants to get the +// latest state for a resource. +type ReadResourceRequest struct { + // TypeName is the type of resource Terraform is requesting an upated + // state for. + TypeName string + + // CurrentState is the current state of the resource as far as + // Terraform knows, represented as a `DynamicValue`. See the + // documentation for `DynamicValue` for information about safely + // accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + CurrentState *DynamicValue + + // Private is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the ReadResourceResponse type Private field. + Private []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ReadResourceResponse is the response from the provider about the current +// state of the requested resource. +type ReadResourceResponse struct { + // NewState is the current state of the resource according to the + // provider, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely creating the + // `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + NewState *DynamicValue + + // Diagnostics report errors or warnings related to retrieving the + // current state of the requested resource. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte +} + +// PlanResourceChangeRequest is the request Terraform sends when it is +// generating a plan for a resource and wants the provider's input on what the +// planned state should be. +type PlanResourceChangeRequest struct { + // TypeName is the type of resource Terraform is generating a plan for. + TypeName string + + // PriorState is the state of the resource before the plan is applied, + // represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PriorState *DynamicValue + + // ProposedNewState is the state that Terraform is proposing for the + // resource, with the changes in the configuration applied, represented + // as a `DynamicValue`. See the documentation for `DynamicValue` for + // information about safely accessing the state. + // + // The ProposedNewState merges any non-null values in the configuration + // with any computed attributes in PriorState as a utility to help + // providers avoid needing to implement such merging functionality + // themselves. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + // + // The ProposedNewState will be null when planning a delete operation. + ProposedNewState *DynamicValue + + // Config is the configuration the user supplied for the resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config *DynamicValue + + // PriorPrivate is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the PlanResourceChangeResponse type PlannedPrivate field. + PriorPrivate []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// PlanResourceChangeResponse is the response from the provider about what the +// planned state for a given resource should be. +type PlanResourceChangeResponse struct { + // PlannedState is the provider's indication of what the state for the + // resource should be after apply, represented as a `DynamicValue`. See + // the documentation for `DynamicValue` for information about safely + // creating the `DynamicValue`. + // + // This is usually derived from the ProposedNewState passed in the + // PlanResourceChangeRequest, with default values substituted for any + // null values and overriding any computed values that are expected to + // change as a result of the apply operation. This may contain unknown + // values if the value could change but its new value won't be known + // until apply time. + // + // Any value that was non-null in the configuration must either + // preserve the exact configuration value or return the corresponding + // value from the prior state. The value from the prior state should be + // returned when the configuration value is semantically equivalent to + // the state value. + // + // Any value that is marked as computed in the schema and is null in + // the configuration may be set by the provider to any value of the + // expected type. + // + // PlanResourceChange will actually be called twice; once when + // generating the plan for the user to approve, once during the apply. + // During the apply, additional values from the configuration--upstream + // values interpolated in that were computed at apply time--will be + // populated. During this second call, any attribute that had a known + // value in the first PlannedState must have an identical value in the + // second PlannedState. Any unknown values may remain unknown or may + // take on any value of the appropriate type. This means the values + // returned in PlannedState should be deterministic and unknown values + // should be used if a field's value may change depending on what value + // ends up filling an unknown value in the config. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + PlannedState *DynamicValue + + // RequiresReplace is a list of tftypes.AttributePaths that require the + // resource to be replaced. They should point to the specific field + // that changed that requires the resource to be destroyed and + // recreated. + RequiresReplace []*tftypes.AttributePath + + // PlannedPrivate should be set to any state that the provider would + // like sent with requests for this resource. This state will be + // associated with the resource, but will not be considered when + // calculating diffs. + // + // This private state data will be sent in the ApplyResourceChange RPC, in + // relation to the types of this package, the ApplyResourceChangeRequest + // type PlannedPrivate field. + PlannedPrivate []byte + + // Diagnostics report errors or warnings related to determining the + // planned state of the requested resource. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic + + // UnsafeToUseLegacyTypeSystem should only be set by + // hashicorp/terraform-plugin-sdk. It modifies Terraform's behavior to + // work with the legacy expectations of that SDK. + // + // Nobody else should use this. Ever. For any reason. Just don't do it. + // + // We have to expose it here for terraform-plugin-sdk to be muxable, or + // we wouldn't even be including it in this type. Don't use it. It may + // go away or change behavior on you with no warning. It is + // explicitly unsupported and not part of our SemVer guarantees. + // + // Deprecated: Really, just don't use this, you don't need it. + UnsafeToUseLegacyTypeSystem bool +} + +// ApplyResourceChangeRequest is the request Terraform sends when it needs to +// apply a planned set of changes to a resource. +type ApplyResourceChangeRequest struct { + // TypeName is the type of resource Terraform wants to change. + TypeName string + + // PriorState is the state of the resource before the changes are + // applied, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PriorState *DynamicValue + + // PlannedState is Terraform's plan for what the state should look like + // after the changes are applied, represented as a `DynamicValue`. See + // the documentation for `DynamicValue` for information about safely + // accessing the state. + // + // This is the PlannedState returned during PlanResourceChange. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PlannedState *DynamicValue + + // Config is the configuration the user supplied for the resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values. + Config *DynamicValue + + // PlannedPrivate is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + // + // This private state data is sourced from the PlanResourceChange RPC, in + // relation to the types in this package, the PlanResourceChangeResponse + // type PlannedPrivate field. + // + // To ensure private state data is preserved, copy any necessary data to + // the ApplyResourceChangeResponse type Private field. + PlannedPrivate []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ApplyResourceChangeResponse is the response from the provider about what the +// state of a resource is after planned changes have been applied. +type ApplyResourceChangeResponse struct { + // NewState is the provider's understanding of what the resource's + // state is after changes are applied, represented as a `DynamicValue`. + // See the documentation for `DynamicValue` for information about + // safely creating the `DynamicValue`. + // + // Any attribute, whether computed or not, that has a known value in + // the PlannedState in the ApplyResourceChangeRequest must be preserved + // exactly as it was in NewState. + // + // Any attribute in the PlannedState in the ApplyResourceChangeRequest + // that is unknown must take on a known value at this time. No unknown + // values are allowed in the NewState. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + NewState *DynamicValue + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte + + // Diagnostics report errors or warnings related to applying changes to + // the requested resource. Returning an empty slice indicates a + // successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic + + // UnsafeToUseLegacyTypeSystem should only be set by + // hashicorp/terraform-plugin-sdk. It modifies Terraform's behavior to + // work with the legacy expectations of that SDK. + // + // Nobody else should use this. Ever. For any reason. Just don't do it. + // + // We have to expose it here for terraform-plugin-sdk to be muxable, or + // we wouldn't even be including it in this type. Don't use it. It may + // go away or change behavior on you with no warning. It is + // explicitly unsupported and not part of our SemVer guarantees. + // + // Deprecated: Really, just don't use this, you don't need it. + UnsafeToUseLegacyTypeSystem bool +} + +// ImportResourceStateRequest is the request Terraform sends when it wants a +// provider to import one or more resources specified by an ID. +type ImportResourceStateRequest struct { + // TypeName is the type of resource Terraform wants to import. + TypeName string + + // ID is the user-supplied identifying information about the resource + // or resources. Providers decide and communicate to users the format + // for the ID, and use it to determine what resource or resources to + // import. + ID string +} + +// ImportResourceStateResponse is the response from the provider about the +// imported resources. +type ImportResourceStateResponse struct { + // ImportedResources are the resources the provider found and was able + // to import. + ImportedResources []*ImportedResource + + // Diagnostics report errors or warnings related to importing the + // requested resource or resources. Returning an empty slice indicates + // a successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ImportedResource represents a single resource that a provider has +// successfully imported into state. +type ImportedResource struct { + // TypeName is the type of resource that was imported. + TypeName string + + // State is the provider's understanding of the imported resource's + // state, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely creating the + // `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + State *DynamicValue + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte +} + +// MoveResourceStateRequest is the request Terraform sends when it requests a +// provider to move the state of a source resource into the target resource. +// Target resource types generally must opt into accepting each source resource +// type since any transformation logic requires knowledge of the source state. +// +// This functionality is only supported in Terraform 1.8 and later. The provider +// must have enabled the MoveResourceState server capability to enable these +// requests. +type MoveResourceStateRequest struct { + // SourcePrivate is the private state of the source resource. + SourcePrivate []byte + + // SourceProviderAddress is the address of the provider for the source + // resource type. + SourceProviderAddress string + + // SourceSchemaVersion is the version of the source resource state. + SourceSchemaVersion int64 + + // SourceState is the raw state of the source resource. + // + // Only the underlying JSON field is populated. + SourceState *RawState + + // SourceTypeName is the source resource type for the move request. + SourceTypeName string + + // TargetTypeName is the target resource type for the move request. + TargetTypeName string +} + +// MoveResourceStateResponse is the response from the provider containing +// the moved state for the given resource. +type MoveResourceStateResponse struct { + // TargetPrivate is the target resource private state after the move. + TargetPrivate []byte + + // TargetState is the target resource state after the move. + TargetState *DynamicValue + + // Diagnostics report any warnings or errors related to moving the state. + Diagnostics []*Diagnostic +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go new file mode 100644 index 0000000000..9b860275f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go @@ -0,0 +1,328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import "github.com/hashicorp/terraform-plugin-go/tftypes" + +const ( + // SchemaNestedBlockNestingModeInvalid indicates that the nesting mode + // for a nested block in the schema is invalid. This generally + // indicates a nested block that was created incorrectly. + SchemaNestedBlockNestingModeInvalid SchemaNestedBlockNestingMode = 0 + + // SchemaNestedBlockNestingModeSingle indicates that the nested block + // should be treated as a single block with no labels, and there should + // not be more than one of these blocks in the containing block. The + // block will appear in config and state values as a tftypes.Object. + SchemaNestedBlockNestingModeSingle SchemaNestedBlockNestingMode = 1 + + // SchemaNestedBlockNestingModeList indicates that multiple instances + // of the nested block should be permitted, with no labels, and that + // the instances of the block should appear in config and state values + // as a tftypes.List, with an ElementType of tftypes.Object. + SchemaNestedBlockNestingModeList SchemaNestedBlockNestingMode = 2 + + // SchemaNestedBlockNestingModeSet indicates that multiple instances + // of the nested block should be permitted, with no labels, and that + // the instances of the block should appear in config and state values + // as a tftypes.Set, with an ElementType of tftypes.Object. + SchemaNestedBlockNestingModeSet SchemaNestedBlockNestingMode = 3 + + // SchemaNestedBlockNestingModeMap indicates that multiple instances of + // the nested block should be permitted, each with a single label, and + // that they should be represented in state and config values as a + // tftypes.Map, with an ElementType of tftypes.Object. The labels on + // the blocks will be used as the map keys. It is an error, therefore, + // to use the same label value on multiple block instances. + SchemaNestedBlockNestingModeMap SchemaNestedBlockNestingMode = 4 + + // SchemaNestedBlockNestingModeGroup indicates that the nested block + // should be treated as a single block with no labels, and there should + // not be more than one of these blocks in the containing block. The + // block will appear in config and state values as a tftypes.Object. + // + // SchemaNestedBlockNestingModeGroup is distinct from + // SchemaNestedBlockNestingModeSingle in that it guarantees that the + // block will never be null. If it is omitted from a config, the block + // will still be set, but its attributes and nested blocks will all be + // null. This is an exception to the rule that any block not set in the + // configuration cannot be set in config by the provider; this ensures + // the block is always considered "set" in the configuration, and is + // therefore settable in state by the provider. + SchemaNestedBlockNestingModeGroup SchemaNestedBlockNestingMode = 5 +) + +// Schema is how Terraform defines the shape of data. It can be thought of as +// the type information for resources, data sources, provider configuration, +// and all the other data that Terraform sends to providers. It is how +// providers express their requirements for that data. +type Schema struct { + // Version indicates which version of the schema this is. Versions + // should be monotonically incrementing numbers. When Terraform + // encounters a resource stored in state with a schema version lower + // that the schema version the provider advertises for that resource, + // Terraform requests the provider upgrade the resource's state. + Version int64 + + // Block is the root level of the schema, the collection of attributes + // and blocks that make up a resource, data source, provider, or other + // configuration block. + Block *SchemaBlock +} + +// ValueType returns the tftypes.Type for a Schema. +// +// If Schema is missing, an empty Object is returned. +func (s *Schema) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + return s.Block.ValueType() +} + +// SchemaBlock represents a block in a schema. Blocks are how Terraform creates +// groupings of attributes. In configurations, they don't use the equals sign +// and use dynamic instead of list comprehensions. +// +// Blocks will show up in state and config Values as a tftypes.Object, with the +// attributes and nested blocks defining the tftypes.Object's AttributeTypes. +type SchemaBlock struct { + // TODO: why do we have version in the block, too? + Version int64 + + // Attributes are the attributes defined within the block. These are + // the fields that users can set using the equals sign or reference in + // interpolations. + Attributes []*SchemaAttribute + + // BlockTypes are the nested blocks within the block. These are used to + // have blocks within blocks. + BlockTypes []*SchemaNestedBlock + + // Description offers an end-user friendly description of what the + // block is for. This will be surfaced to users through editor + // integrations, documentation generation, and other settings. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Deprecated, when set to true, indicates that a block should no + // longer be used and users should migrate away from it. At the moment + // it is unused and will have no impact, but it will be used in future + // tooling that is powered by provider schemas to enable richer user + // experiences. Providers should set it when deprecating blocks in + // preparation for these tools. + Deprecated bool +} + +// ValueType returns the tftypes.Type for a SchemaBlock. +// +// If SchemaBlock is missing, an empty Object is returned. +func (s *SchemaBlock) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + for _, block := range s.BlockTypes { + if block == nil { + continue + } + + blockType := block.ValueType() + + if blockType == nil { + continue + } + + attributeTypes[block.TypeName] = blockType + } + + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + +// SchemaAttribute represents a single attribute within a schema block. +// Attributes are the fields users can set in configuration using the equals +// sign, can assign to variables, can interpolate, and can use list +// comprehensions on. +type SchemaAttribute struct { + // Name is the name of the attribute. This is what the user will put + // before the equals sign to assign a value to this attribute. + Name string + + // Type indicates the type of data the attribute expects. See the + // documentation for the tftypes package for information on what types + // are supported and their behaviors. + Type tftypes.Type + + // Description offers an end-user friendly description of what the + // attribute is for. This will be surfaced to users through editor + // integrations, documentation generation, and other settings. + Description string + + // Required, when set to true, indicates that this attribute must have + // a value assigned to it by the user or Terraform will throw an error. + Required bool + + // Optional, when set to true, indicates that the user does not need to + // supply a value for this attribute, but may. + Optional bool + + // Computed, when set to true, indicates the the provider will supply a + // value for this field. If Optional and Required are false and + // Computed is true, the user will not be able to specify a value for + // this field without Terraform throwing an error. If Optional is true + // and Computed is true, the user can specify a value for this field, + // but the provider may supply a value if the user does not. It is + // always a violation of Terraform's protocol to substitute a value for + // what the user entered, even if Computed is true. + Computed bool + + // Sensitive, when set to true, indicates that the contents of this + // attribute should be considered sensitive and not included in output. + // This does not encrypt or otherwise protect these values in state, it + // only offers protection from them showing up in plans or other + // output. + Sensitive bool + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Deprecated, when set to true, indicates that a attribute should no + // longer be used and users should migrate away from it. At the moment + // it is unused and will have no impact, but it will be used in future + // tooling that is powered by provider schemas to enable richer user + // experiences. Providers should set it when deprecating attributes in + // preparation for these tools. + Deprecated bool +} + +// ValueType returns the tftypes.Type for a SchemaAttribute. +// +// If SchemaAttribute is missing, nil is returned. +func (s *SchemaAttribute) ValueType() tftypes.Type { + if s == nil { + return nil + } + + return s.Type +} + +// SchemaNestedBlock is a nested block within another block. See SchemaBlock +// for more information on blocks. +type SchemaNestedBlock struct { + // TypeName is the name of the block. It is what the user will specify + // when using the block in configuration. + TypeName string + + // Block is the block being nested inside another block. See the + // SchemaBlock documentation for more information on blocks. + Block *SchemaBlock + + // Nesting is the kind of nesting the block is using. Different nesting + // modes have different behaviors and imply different kinds of data. + Nesting SchemaNestedBlockNestingMode + + // MinItems is the minimum number of instances of this block that a + // user must specify or Terraform will return an error. + // + // MinItems can only be set for SchemaNestedBlockNestingModeList and + // SchemaNestedBlockNestingModeSet. SchemaNestedBlockNestingModeSingle + // can also set MinItems and MaxItems both to 1 to indicate that the + // block is required to be set. All other SchemaNestedBlockNestingModes + // must leave MinItems set to 0. + MinItems int64 + + // MaxItems is the maximum number of instances of this block that a + // user may specify before Terraform returns an error. + // + // MaxItems can only be set for SchemaNestedBlockNestingModeList and + // SchemaNestedBlockNestingModeSet. SchemaNestedBlockNestingModeSingle + // can also set MinItems and MaxItems both to 1 to indicate that the + // block is required to be set. All other SchemaNestedBlockNestingModes + // must leave MaxItems set to 0. + MaxItems int64 +} + +// ValueType returns the tftypes.Type for a SchemaNestedBlock. +// +// If SchemaNestedBlock is missing or the Nesting mode is invalid, nil is +// returned. +func (s *SchemaNestedBlock) ValueType() tftypes.Type { + if s == nil { + return nil + } + + blockType := s.Block.ValueType() + + switch s.Nesting { + case SchemaNestedBlockNestingModeGroup: + return blockType + case SchemaNestedBlockNestingModeList: + return tftypes.List{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeMap: + return tftypes.Map{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSet: + return tftypes.Set{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSingle: + return blockType + default: + return nil + } +} + +// SchemaNestedBlockNestingMode indicates the nesting mode for +// SchemaNestedBlocks. The nesting mode determines the number of instances of +// the block allowed, how many labels the block expects, and the data structure +// used for the block in config and state values. +type SchemaNestedBlockNestingMode int32 + +func (s SchemaNestedBlockNestingMode) String() string { + switch s { + case 0: + return "INVALID" + case 1: + return "SINGLE" + case 2: + return "LIST" + case 3: + return "SET" + case 4: + return "MAP" + case 5: + return "GROUP" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server_capabilities.go new file mode 100644 index 0000000000..f5065fd877 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server_capabilities.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +// ServerCapabilities allows providers to communicate optionally supported +// protocol features, such as forward-compatible Terraform behavior changes. +// +// This information is used in GetProviderSchemaResponse as capabilities are +// static features which must be known upfront in the provider server. +type ServerCapabilities struct { + // GetProviderSchemaOptional signals that this provider does not require + // having the GetProviderSchema RPC called first to operate normally. This + // means the caller can use a cached copy of the provider's schema instead. + GetProviderSchemaOptional bool + + // MoveResourceState signals that a provider supports the MoveResourceState + // RPC. + MoveResourceState bool + + // PlanDestroy signals that a provider expects a call to + // PlanResourceChange when a resource is going to be destroyed. This is + // opt-in to prevent unexpected errors or panics since the + // ProposedNewState in PlanResourceChangeRequest will be a null value. + PlanDestroy bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go new file mode 100644 index 0000000000..08c118958e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ErrUnknownRawStateType is returned when a RawState has no Flatmap or JSON +// bytes set. This should never be returned during the normal operation of a +// provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `RawState` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownRawStateType = errors.New("RawState had no JSON or flatmap data set") + +// RawState is the raw, undecoded state for providers to upgrade. It is +// undecoded as Terraform, for whatever reason, doesn't have the previous +// schema available to it, and so cannot decode the state itself and pushes +// that responsibility off onto providers. +// +// It is safe to assume that Flatmap can be ignored for any state written by +// Terraform 0.12.0 or higher, but it is not safe to assume that all states +// written by 0.12.0 or higher will be in JSON format; future versions may +// switch to an alternate encoding for states. +type RawState struct { + JSON []byte + Flatmap map[string]string +} + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the RawState in an easy-to-interact-with way. It is the +// main purpose of the RawState type, and is how provider developers should +// obtain state values from the UpgradeResourceState RPC call. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same. Objects and maps all look the same, as well, as do +// user-specified values when DynamicPseudoType is used in the schema. +// Fortunately, the provider should already know the type; it should be the +// type of the schema, or DynamicPseudoType if that's what's in the schema. +// `Unmarshal` will then parse the value as though it belongs to that type, if +// possible, and return a `tftypes.Value` with the appropriate information. If +// the data can't be interpreted as that type, an error will be returned saying +// so. In these cases, double check to make sure the schema is declaring the +// same type being passed into `Unmarshal`. +// +// In the event an ErrUnknownRawStateType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `RawState` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `RawState`s received +// from RPC requests. +// +// State files written before Terraform 0.12 that haven't been upgraded yet +// cannot be unmarshaled, and must have their Flatmap property read directly. +func (s RawState) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if s.JSON != nil { + return tftypes.ValueFromJSON(s.JSON, typ) //nolint:staticcheck + } + if s.Flatmap != nil { + return tftypes.Value{}, fmt.Errorf("flatmap states cannot be unmarshaled, only states written by Terraform 0.12 and higher can be unmarshaled") + } + return tftypes.Value{}, ErrUnknownRawStateType +} + +// UnmarshalOpts contains options that can be used to modify the behaviour when +// unmarshalling. Currently, this only contains a struct for opts for JSON but +// could have a field for Flatmap in the future. +type UnmarshalOpts struct { + ValueFromJSONOpts tftypes.ValueFromJSONOpts +} + +// UnmarshalWithOpts is identical to Unmarshal but also accepts a tftypes.UnmarshalOpts which contains +// options that can be used to modify the behaviour when unmarshalling JSON or Flatmap. +func (s RawState) UnmarshalWithOpts(typ tftypes.Type, opts UnmarshalOpts) (tftypes.Value, error) { + if s.JSON != nil { + return tftypes.ValueFromJSONWithOpts(s.JSON, typ, opts.ValueFromJSONOpts) //nolint:staticcheck + } + if s.Flatmap != nil { + return tftypes.Value{}, fmt.Errorf("flatmap states cannot be unmarshaled, only states written by Terraform 0.12 and higher can be unmarshaled") + } + return tftypes.Value{}, ErrUnknownRawStateType +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/string_kind.go new file mode 100644 index 0000000000..69169b678a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/string_kind.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +const ( + // StringKindPlain indicates a string is plaintext, and should be + // interpreted as having no formatting information. + StringKindPlain StringKind = 0 + + // StringKindMarkdown indicates a string is markdown-formatted, and + // should be rendered using a Markdown renderer to correctly display + // its formatting. + StringKindMarkdown StringKind = 1 +) + +// StringKind indicates a formatting or encoding scheme for a string. +type StringKind int32 + +func (s StringKind) String() string { + switch s { + case 0: + return "PLAIN" + case 1: + return "MARKDOWN" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/doc.go new file mode 100644 index 0000000000..96089e6ba1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tf5server implements a server implementation to run +// tfprotov5.ProviderServers as gRPC servers. +// +// Providers will likely be calling tf5server.Serve from their main function to +// start the server so Terraform can connect to it. +package tf5server diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/plugin.go new file mode 100644 index 0000000000..602246aa3f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/plugin.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5server + +import ( + "context" + "errors" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin is an implementation of the +// github.com/hashicorp/go-plugin#Plugin and +// github.com/hashicorp/go-plugin#GRPCPlugin interfaces, indicating how to +// serve tfprotov5.ProviderServers as gRPC plugins for go-plugin. +type GRPCProviderPlugin struct { + GRPCProvider func() tfprotov5.ProviderServer + Opts []ServeOpt + Name string +} + +// Server always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// Client always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// GRPCClient always returns an error; we're only implementing the server half +// of the interface. +func (p *GRPCProviderPlugin) GRPCClient(context.Context, *plugin.GRPCBroker, *grpc.ClientConn) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// GRPCServer registers the gRPC provider server with the gRPC server that +// go-plugin is standing up. +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + tfplugin5.RegisterProviderServer(s, New(p.Name, p.GRPCProvider(), p.Opts...)) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go new file mode 100644 index 0000000000..feb359620a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go @@ -0,0 +1,1043 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf5server + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/signal" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + "github.com/mitchellh/go-testing-interface" +) + +const ( + // protocolVersionMajor represents the major version number of the protocol + // being served. This is used during the plugin handshake to validate the + // server and client are compatible. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMajor uint = 5 + + // protocolVersionMinor represents the minor version number of the protocol + // being served. Backwards compatible additions are possible in the + // protocol definitions, which is when this may be increased. While it is + // not used in plugin negotiation, it can be helpful to include this value + // for debugging, such as in logs. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMinor uint = 4 +) + +// protocolVersion represents the combined major and minor version numbers of +// the protocol being served. +var protocolVersion string = fmt.Sprintf("%d.%d", protocolVersionMajor, protocolVersionMinor) + +const ( + // envTfReattachProviders is the environment variable used by Terraform CLI + // to directly connect to already running provider processes, such as those + // being inspected by debugging processes. When connecting to providers in + // this manner, Terraform CLI disables certain plugin handshake checks and + // will not stop the provider process. + envTfReattachProviders = "TF_REATTACH_PROVIDERS" +) + +const ( + // grpcMaxMessageSize is the maximum gRPC send and receive message sizes + // for the server. + // + // This 256MB value is arbitrarily raised from the default message sizes of + // 4MB to account for advanced use cases, but arbitrarily lowered from + // MaxInt32 (or similar) to prevent incorrect server implementations from + // exhausting resources in common execution environments. Receiving a gRPC + // message size error is preferable for troubleshooting over determining + // why an execution environment may have terminated the process via its + // memory management processes, such as oom-killer on Linux. + // + // This value is kept as constant over allowing server configurability + // since there are many factors that influence message size, such as + // Terraform configuration and state data. If larger message size use + // cases appear, other gRPC options should be explored, such as + // implementing streaming RPCs and messages. + grpcMaxMessageSize = 256 << 20 +) + +// ServeOpt is an interface for defining options that can be passed to the +// Serve function. Each implementation modifies the ServeConfig being +// generated. A slice of ServeOpts then, cumulatively applied, render a full +// ServeConfig. +type ServeOpt interface { + ApplyServeOpt(*ServeConfig) error +} + +// ServeConfig contains the configured options for how a provider should be +// served. +type ServeConfig struct { + logger hclog.Logger + debugCtx context.Context + debugCh chan *plugin.ReattachConfig + debugCloseCh chan struct{} + + managedDebug bool + managedDebugReattachConfigTimeout time.Duration + managedDebugStopSignals []os.Signal + + disableLogInitStderr bool + disableLogLocation bool + useLoggingSink testing.T + envVar string +} + +type serveConfigFunc func(*ServeConfig) error + +func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { + return s(in) +} + +// WithDebug returns a ServeOpt that will set the server into debug mode, using +// the passed options to populate the go-plugin ServeTestConfig. +// +// This is an advanced ServeOpt that assumes the caller will fully manage the +// reattach configuration and server lifecycle. Refer to WithManagedDebug for a +// ServeOpt that handles common use cases, such as implementing provider main +// functions. +func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.managedDebug { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.debugCtx = ctx + in.debugCh = config + in.debugCloseCh = closeCh + return nil + }) +} + +// WithManagedDebug returns a ServeOpt that will start the server in debug +// mode, managing the reattach configuration handling and server lifecycle. +// Reattach configuration is output to stdout with human friendly instructions. +// By default, the server can be stopped with os.Interrupt (SIGINT; ctrl-c). +// +// Refer to the optional WithManagedDebugStopSignals and +// WithManagedDebugReattachConfigTimeout ServeOpt for additional configuration. +// +// The reattach configuration output of this handling is not protected by +// compatibility guarantees. Use the WithDebug ServeOpt for advanced use cases. +func WithManagedDebug() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.debugCh != nil { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.managedDebug = true + return nil + }) +} + +// WithManagedDebugStopSignals returns a ServeOpt that will set the stop signals for a +// debug managed process (WithManagedDebug). When not configured, os.Interrupt +// (SIGINT; Ctrl-c) will stop the process. +func WithManagedDebugStopSignals(signals []os.Signal) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugStopSignals = signals + return nil + }) +} + +// WithManagedDebugReattachConfigTimeout returns a ServeOpt that will set the timeout +// for a debug managed process to start and return its reattach configuration. +// When not configured, 2 seconds is the default. +func WithManagedDebugReattachConfigTimeout(timeout time.Duration) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugReattachConfigTimeout = timeout + return nil + }) +} + +// WithGoPluginLogger returns a ServeOpt that will set the logger that +// go-plugin should use to log messages. +func WithGoPluginLogger(logger hclog.Logger) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.logger = logger + return nil + }) +} + +// WithLoggingSink returns a ServeOpt that will enable the logging sink, which +// is used in test frameworks to control where terraform-plugin-log output is +// written and at what levels, mimicking Terraform's logging sink behaviors. +func WithLoggingSink(t testing.T) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.useLoggingSink = t + return nil + }) +} + +// WithoutLogStderrOverride returns a ServeOpt that will disable the +// terraform-plugin-log behavior of logging to the stderr that existed at +// startup, not the stderr that exists when the logging statement is called. +func WithoutLogStderrOverride() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.disableLogInitStderr = true + return nil + }) +} + +// WithoutLogLocation returns a ServeOpt that will exclude file names and line +// numbers from log output for the terraform-plugin-log logs generated by the +// SDKs and provider. +func WithoutLogLocation() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.disableLogLocation = true + return nil + }) +} + +// WithLogEnvVarName sets the name of the provider for the purposes of the +// logging environment variable that controls the provider's log level. It is +// the part following TF_LOG_PROVIDER_ and defaults to the name part of the +// provider's registry address, or disabled if it can't parse the provider's +// registry address. Name must only contain letters, numbers, and hyphens. +func WithLogEnvVarName(name string) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if !regexp.MustCompile(`^[a-zA-Z0-9-]+$`).MatchString(name) { + return errors.New("environment variable names can only contain a-z, A-Z, 0-9, and -") + } + in.envVar = name + return nil + }) +} + +// Serve starts a tfprotov5.ProviderServer serving, ready for Terraform to +// connect to it. The name passed in should be the fully qualified name that +// users will enter in the source field of the required_providers block, like +// "registry.terraform.io/hashicorp/time". +// +// Zero or more options to configure the server may also be passed. The default +// invocation is sufficient, but if the provider wants to run in debug mode or +// modify the logger that go-plugin is using, ServeOpts can be specified to +// support that. +func Serve(name string, serverFactory func() tfprotov5.ProviderServer, opts ...ServeOpt) error { + // Defaults + conf := ServeConfig{ + managedDebugReattachConfigTimeout: 2 * time.Second, + managedDebugStopSignals: []os.Signal{os.Interrupt}, + } + + for _, opt := range opts { + err := opt.ApplyServeOpt(&conf) + if err != nil { + return err + } + } + + serveConfig := &plugin.ServeConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: protocolVersionMajor, + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", + }, + Plugins: plugin.PluginSet{ + "provider": &GRPCProviderPlugin{ + Name: name, + Opts: opts, + GRPCProvider: serverFactory, + }, + }, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(grpcMaxMessageSize)) + opts = append(opts, grpc.MaxSendMsgSize(grpcMaxMessageSize)) + + return grpc.NewServer(opts...) + }, + } + + if conf.logger != nil { + serveConfig.Logger = conf.logger + } + + if conf.managedDebug { + ctx, cancel := context.WithCancel(context.Background()) + signalCh := make(chan os.Signal, len(conf.managedDebugStopSignals)) + + signal.Notify(signalCh, conf.managedDebugStopSignals...) + + defer func() { + signal.Stop(signalCh) + cancel() + }() + + go func() { + select { + case <-signalCh: + cancel() + case <-ctx.Done(): + } + }() + + conf.debugCh = make(chan *plugin.ReattachConfig) + conf.debugCloseCh = make(chan struct{}) + conf.debugCtx = ctx + } + + if conf.debugCh != nil { + serveConfig.Test = &plugin.ServeTestConfig{ + Context: conf.debugCtx, + ReattachConfigCh: conf.debugCh, + CloseCh: conf.debugCloseCh, + } + } + + if !conf.managedDebug { + plugin.Serve(serveConfig) + return nil + } + + go plugin.Serve(serveConfig) + + var pluginReattachConfig *plugin.ReattachConfig + + select { + case pluginReattachConfig = <-conf.debugCh: + case <-time.After(conf.managedDebugReattachConfigTimeout): + return errors.New("timeout waiting on reattach configuration") + } + + if pluginReattachConfig == nil { + return errors.New("nil reattach configuration received") + } + + // Duplicate implementation is required because the go-plugin + // ReattachConfig.Addr implementation is not friendly for JSON encoding + // and to avoid importing terraform-exec. + type reattachConfigAddr struct { + Network string + String string + } + + type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr + } + + reattachBytes, err := json.Marshal(map[string]reattachConfig{ + name: { + Protocol: string(pluginReattachConfig.Protocol), + ProtocolVersion: pluginReattachConfig.ProtocolVersion, + Pid: pluginReattachConfig.Pid, + Test: pluginReattachConfig.Test, + Addr: reattachConfigAddr{ + Network: pluginReattachConfig.Addr.Network(), + String: pluginReattachConfig.Addr.String(), + }, + }, + }) + + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + // This is currently intended to be executed via provider main function and + // human friendly, so output directly to stdout. + fmt.Printf("Provider started. To attach Terraform CLI, set the %s environment variable with the following:\n\n", envTfReattachProviders) + + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"%s=%s\"\n", envTfReattachProviders, reattachStr) + fmt.Printf("\tPowerShell:\t$env:%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `''`)) + default: + fmt.Printf("\t%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + } + + fmt.Println("") + + // Wait for the server to be done. + <-conf.debugCloseCh + + return nil +} + +type server struct { + downstream tfprotov5.ProviderServer + tfplugin5.UnimplementedProviderServer + + stopMu sync.Mutex + stopCh chan struct{} + + tflogSDKOpts tfsdklog.Options + tflogOpts tflog.Options + useTFLogSink bool + testHandle testing.T + name string + + // protocolDataDir is a directory to store raw protocol data files for + // debugging purposes. + protocolDataDir string + + // protocolVersion is the protocol version for the server. + protocolVersion string +} + +func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { + select { + case <-ctx.Done(): + return + case <-stopCh: + cancel() + } +} + +// stoppableContext returns a context that wraps `ctx` but will be canceled +// when the server's stopCh is closed. +// +// This is used to cancel all in-flight contexts when the Stop method of the +// server is called. +func (s *server) stoppableContext(ctx context.Context) context.Context { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + go mergeStop(stoppable, cancel, s.stopCh) + return stoppable +} + +// loggingContext returns a context that wraps `ctx` and has +// terraform-plugin-log loggers injected. +func (s *server) loggingContext(ctx context.Context) context.Context { + if s.useTFLogSink { + ctx = tfsdklog.RegisterTestSink(ctx, s.testHandle) + } + + ctx = logging.InitContext(ctx, s.tflogSDKOpts, s.tflogOpts) + ctx = logging.RequestIdContext(ctx) + ctx = logging.ProviderAddressContext(ctx, s.name) + ctx = logging.ProtocolVersionContext(ctx, s.protocolVersion) + + return ctx +} + +// New converts a tfprotov5.ProviderServer into a server capable of handling +// Terraform protocol requests and issuing responses using the gRPC types. +func New(name string, serve tfprotov5.ProviderServer, opts ...ServeOpt) tfplugin5.ProviderServer { + var conf ServeConfig + for _, opt := range opts { + err := opt.ApplyServeOpt(&conf) + if err != nil { + // this should never happen, we already executed all + // this code as part of Serve + panic(err) + } + } + var sdkOptions tfsdklog.Options + var options tflog.Options + if !conf.disableLogInitStderr { + sdkOptions = append(sdkOptions, tfsdklog.WithStderrFromInit()) + options = append(options, tfsdklog.WithStderrFromInit()) + } + if conf.disableLogLocation { + sdkOptions = append(sdkOptions, tfsdklog.WithoutLocation()) + options = append(options, tflog.WithoutLocation()) + } + envVar := conf.envVar + if envVar == "" { + envVar = logging.ProviderLoggerName(name) + } + if envVar != "" { + options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv(logging.EnvTfLogProvider, envVar)) + } + return &server{ + downstream: serve, + stopCh: make(chan struct{}), + tflogOpts: options, + tflogSDKOpts: sdkOptions, + name: name, + useTFLogSink: conf.useLoggingSink != nil, + testHandle: conf.useLoggingSink, + protocolDataDir: os.Getenv(logging.EnvTfLogSdkProtoDataDir), + protocolVersion: protocolVersion, + } +} + +func (s *server) GetMetadata(ctx context.Context, protoReq *tfplugin5.GetMetadata_Request) (*tfplugin5.GetMetadata_Response, error) { + rpc := "GetMetadata" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.GetMetadataRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.GetMetadata(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + tf5serverlogging.ServerCapabilities(ctx, resp.ServerCapabilities) + + protoResp := toproto.GetMetadata_Response(resp) + + return protoResp, nil +} + +func (s *server) GetSchema(ctx context.Context, protoReq *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { + rpc := "GetProviderSchema" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.GetProviderSchemaRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.GetProviderSchema(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + tf5serverlogging.ServerCapabilities(ctx, resp.ServerCapabilities) + + protoResp := toproto.GetProviderSchema_Response(resp) + + return protoResp, nil +} + +func (s *server) PrepareProviderConfig(ctx context.Context, protoReq *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { + rpc := "PrepareProviderConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.PrepareProviderConfigRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.PrepareProviderConfig(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PreparedConfig", resp.PreparedConfig) + + protoResp := toproto.PrepareProviderConfig_Response(resp) + + return protoResp, nil +} + +func (s *server) Configure(ctx context.Context, protoReq *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { + rpc := "Configure" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + req := fromproto.ConfigureProviderRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ConfigureProvider(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.Configure_Response(resp) + + return protoResp, nil +} + +// stop closes the stopCh associated with the server and replaces it with a new +// one. +// +// This causes all in-flight requests for the server to have their contexts +// canceled. +func (s *server) stop() { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + close(s.stopCh) + s.stopCh = make(chan struct{}) +} + +func (s *server) Stop(ctx context.Context, protoReq *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + rpc := "Stop" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.StopProviderRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.StopProvider(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, nil) + logging.ProtocolTrace(ctx, "Closing all our contexts") + s.stop() + logging.ProtocolTrace(ctx, "Closed all our contexts") + + protoResp := toproto.Stop_Response(resp) + + return protoResp, nil +} + +func (s *server) ValidateDataSourceConfig(ctx context.Context, protoReq *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + rpc := "ValidateDataSourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ValidateDataSourceConfigRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ValidateDataSourceConfig(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.ValidateDataSourceConfig_Response(resp) + + return protoResp, nil +} + +func (s *server) ReadDataSource(ctx context.Context, protoReq *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { + rpc := "ReadDataSource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ReadDataSourceRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ReadDataSource(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) + + protoResp := toproto.ReadDataSource_Response(resp) + + return protoResp, nil +} + +func (s *server) ValidateResourceTypeConfig(ctx context.Context, protoReq *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + rpc := "ValidateResourceTypeConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ValidateResourceTypeConfigRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ValidateResourceTypeConfig(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.ValidateResourceTypeConfig_Response(resp) + + return protoResp, nil +} + +func (s *server) UpgradeResourceState(ctx context.Context, protoReq *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { + rpc := "UpgradeResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.UpgradeResourceStateRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.UpgradeResourceState(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) + + protoResp := toproto.UpgradeResourceState_Response(resp) + + return protoResp, nil +} + +func (s *server) ReadResource(ctx context.Context, protoReq *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { + rpc := "ReadResource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ReadResourceRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", req.CurrentState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "Private", req.Private) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ReadResource(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "Private", resp.Private) + + protoResp := toproto.ReadResource_Response(resp) + + return protoResp, nil +} + +func (s *server) PlanResourceChange(ctx context.Context, protoReq *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { + rpc := "PlanResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.PlanResourceChangeRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", req.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", req.ProposedNewState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "PriorPrivate", req.PriorPrivate) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.PlanResourceChange(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "PlannedPrivate", resp.PlannedPrivate) + + protoResp := toproto.PlanResourceChange_Response(resp) + + return protoResp, nil +} + +func (s *server) ApplyResourceChange(ctx context.Context, protoReq *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { + rpc := "ApplyResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ApplyResourceChangeRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", req.PlannedState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", req.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "PlannedPrivate", req.PlannedPrivate) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ApplyResourceChange(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "Private", resp.Private) + + protoResp := toproto.ApplyResourceChange_Response(resp) + + return protoResp, nil +} + +func (s *server) ImportResourceState(ctx context.Context, protoReq *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { + rpc := "ImportResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ImportResourceStateRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ImportResourceState(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + for _, importedResource := range resp.ImportedResources { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "Private", importedResource.Private) + } + + protoResp := toproto.ImportResourceState_Response(resp) + + return protoResp, nil +} + +func (s *server) MoveResourceState(ctx context.Context, protoReq *tfplugin5.MoveResourceState_Request) (*tfplugin5.MoveResourceState_Response, error) { + rpc := "MoveResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TargetTypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and error in preference of + // s.downstream.MoveResourceState below once ResourceServer interface + // implements the MoveResourceState method. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 + // nolint:staticcheck + resourceServerWMRS, ok := s.downstream.(tfprotov5.ResourceServerWithMoveResourceState) + + if !ok { + logging.ProtocolError(ctx, "ProviderServer does not implement ResourceServerWithMoveResourceState") + + protoResp := &tfplugin5.MoveResourceState_Response{ + Diagnostics: []*tfplugin5.Diagnostic{ + { + Severity: tfplugin5.Diagnostic_ERROR, + Summary: "Provider Move Resource State Not Implemented", + Detail: "A MoveResourceState call was received by the provider, however the provider does not implement the call. " + + "Either upgrade the provider to a version that implements move resource state support or this is a bug in Terraform that should be reported to the Terraform maintainers.", + }, + }, + } + + return protoResp, nil + } + + req := fromproto.MoveResourceStateRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 + // resp, err := s.downstream.MoveResourceState(ctx, req) + resp, err := resourceServerWMRS.MoveResourceState(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "TargetState", resp.TargetState) + + protoResp := toproto.MoveResourceState_Response(resp) + + return protoResp, nil +} + +func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin5.CallFunction_Request) (*tfplugin5.CallFunction_Response, error) { + rpc := "CallFunction" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and error in preference of s.downstream.CallFunction + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov5.FunctionServer) + + if !ok { + logging.ProtocolError(ctx, "ProviderServer does not implement FunctionServer") + + text := "Provider Functions Not Implemented: A provider-defined function call was received by the provider, however the provider does not implement functions. " + + "Either upgrade the provider to a version that implements provider-defined functions or this is a bug in Terraform that should be reported to the Terraform maintainers." + + protoResp := &tfplugin5.CallFunction_Response{ + Error: &tfplugin5.FunctionError{ + Text: text, + }, + } + + return protoResp, nil + } + + req := fromproto.CallFunctionRequest(protoReq) + + for position, argument := range req.Arguments { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", fmt.Sprintf("Arguments_%d", position), argument) + } + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.CallFunction(ctx, req) + resp, err := functionServer.CallFunction(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponseWithError(ctx, resp.Error) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "Result", resp.Result) + + protoResp := toproto.CallFunction_Response(resp) + + return protoResp, nil +} + +func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin5.GetFunctions_Request) (*tfplugin5.GetFunctions_Response, error) { + rpc := "GetFunctions" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and response in preference of s.downstream.GetFunctions + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov5.FunctionServer) + + if !ok { + logging.ProtocolWarn(ctx, "ProviderServer does not implement FunctionServer") + + protoResp := &tfplugin5.GetFunctions_Response{ + Functions: map[string]*tfplugin5.Function{}, + } + + return protoResp, nil + } + + req := fromproto.GetFunctionsRequest(protoReq) + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.GetFunctions(ctx, req) + resp, err := functionServer.GetFunctions(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.GetFunctions_Response(resp) + + return protoResp, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go new file mode 100644 index 0000000000..ebb2cbd3dc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/data_source.go @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "context" +) + +// DataSourceMetadata describes metadata for a data resource in the GetMetadata +// RPC. +type DataSourceMetadata struct { + // TypeName is the name of the data resource. + TypeName string +} + +// DataSourceServer is an interface containing the methods a data source +// implementation needs to fill. +type DataSourceServer interface { + // ValidateDataResourceConfig is called when Terraform is checking that a + // data source's configuration is valid. It is guaranteed to have types + // conforming to your schema, but it is not guaranteed that all values + // will be known. This is your opportunity to do custom or advanced + // validation prior to a plan being generated. + ValidateDataResourceConfig(context.Context, *ValidateDataResourceConfigRequest) (*ValidateDataResourceConfigResponse, error) + + // ReadDataSource is called when Terraform is refreshing a data + // source's state. + ReadDataSource(context.Context, *ReadDataSourceRequest) (*ReadDataSourceResponse, error) +} + +// ValidateDataResourceConfigRequest is the request Terraform sends when it wants +// to validate a data source's configuration. +type ValidateDataResourceConfigRequest struct { + // TypeName is the type of data source Terraform is validating. + TypeName string + + // Config is the configuration the user supplied for that data source. + // See the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config *DynamicValue +} + +// ValidateDataResourceConfigResponse is the response from the provider about the +// validity of a data source's configuration. +type ValidateDataResourceConfigResponse struct { + // Diagnostics report errors or warnings related to the given + // configuration. Returning an empty slice indicates a successful + // validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ReadDataSourceRequest is the request Terraform sends when it wants to get +// the latest state for a data source. +type ReadDataSourceRequest struct { + // TypeName is the type of data source Terraform is requesting an + // updated state for. + TypeName string + + // Config is the configuration the user supplied for that data source. + // See the documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may have unknown values. + Config *DynamicValue + + // ProviderMeta supplies the provider metadata configuration for the + // module this data source is in. Module-specific provider metadata is + // an advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ReadDataSourceResponse is the response from the provider about the current +// state of the requested data source. +type ReadDataSourceResponse struct { + // State is the current state of the data source, represented as a + // `DynamicValue`. See the documentation on `DynamicValue` for + // information about safely creating the `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + State *DynamicValue + + // Diagnostics report errors or warnings related to retrieving the + // current state of the requested data source. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go new file mode 100644 index 0000000000..8f856abbba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import "github.com/hashicorp/terraform-plugin-go/tftypes" + +const ( + // DiagnosticSeverityInvalid is used to indicate an invalid + // `DiagnosticSeverity`. Provider developers should not use it. + DiagnosticSeverityInvalid DiagnosticSeverity = 0 + + // DiagnosticSeverityError is used to indicate that a `Diagnostic` + // represents an error and should halt Terraform execution. + DiagnosticSeverityError DiagnosticSeverity = 1 + + // DiagnosticSeverityWarning is used to indicate that a `Diagnostic` + // represents a warning and should not halt Terraform's execution, but + // it should be surfaced to the user. + DiagnosticSeverityWarning DiagnosticSeverity = 2 +) + +// Diagnostic is used to convey information back the user running Terraform. +type Diagnostic struct { + // Severity indicates how Terraform should handle the Diagnostic. + Severity DiagnosticSeverity + + // Summary is a brief description of the problem, roughly + // sentence-sized, and should provide a concise description of what + // went wrong. For example, a Summary could be as simple as "Invalid + // value.". + Summary string + + // Detail is a lengthier, more complete description of the problem. + // Detail should provide enough information that a user can resolve the + // problem entirely. For example, a Detail could be "Values must be + // alphanumeric and lowercase only." + Detail string + + // Attribute indicates which field, specifically, has the problem. Not + // setting this will indicate the entire resource; setting it will + // indicate that the problem is with a certain field in the resource, + // which helps users find the source of the problem. + Attribute *tftypes.AttributePath +} + +// DiagnosticSeverity represents different classes of Diagnostic which affect +// how Terraform handles the Diagnostics. +type DiagnosticSeverity int32 + +func (d DiagnosticSeverity) String() string { + switch d { + case 0: + return "INVALID" + case 1: + return "ERROR" + case 2: + return "WARNING" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/doc.go new file mode 100644 index 0000000000..e9adfdedb6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/doc.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfprotov6 provides the interfaces and types needed to build a +// Terraform provider server. +// +// All Terraform provider servers should be built on +// these types, to take advantage of the ecosystem and tooling built around +// them. +// +// These types are small wrappers around the Terraform protocol. It is assumed +// that developers using tfprotov6 are familiar with the protocol, its +// requirements, and its semantics. Developers not comfortable working with the +// raw protocol should use the github.com/hashicorp/terraform-plugin-sdk/v2 Go +// module instead, which offers a less verbose, safer way to develop a +// Terraform provider, albeit with less flexibility and power. +// +// Provider developers should start by defining a type that implements the +// `ProviderServer` interface. A struct is recommended, as it will allow you to +// store the configuration information attached to your provider for use in +// requests, but any type is technically possible. +// +// `ProviderServer` implementations will need to implement the composed +// interfaces, `ResourceServer` and `DataSourceServer`. It is recommended, but +// not required, to use an embedded `ResourceRouter` and `DataSourceRouter` in +// your `ProviderServer` to achieve this, which will let you handle requests +// for each resource and data source in a resource-specific or data +// source-specific function. +// +// To serve the `ProviderServer` implementation as a gRPC server that Terraform +// can connect to, use the `tfprotov6/server.Serve` function. +package tfprotov6 diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/dynamic_value.go new file mode 100644 index 0000000000..76bac4d5df --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/dynamic_value.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + msgpack "github.com/vmihailenco/msgpack/v5" + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +// ErrUnknownDynamicValueType is returned when a DynamicValue has no MsgPack or +// JSON bytes set. This should never be returned during the normal operation of +// a provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `DynamicValue` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownDynamicValueType = errors.New("DynamicValue had no JSON or msgpack data set") + +// NewDynamicValue creates a DynamicValue from a tftypes.Value. You must +// specify the tftype.Type you want to send the value as, and it must be a type +// that is compatible with the Type of the Value. Usually it should just be the +// Type of the Value, but it can also be the DynamicPseudoType. +func NewDynamicValue(t tftypes.Type, v tftypes.Value) (DynamicValue, error) { + b, err := v.MarshalMsgPack(t) //nolint:staticcheck + if err != nil { + return DynamicValue{}, err + } + return DynamicValue{ + MsgPack: b, + }, nil +} + +// DynamicValue represents a nested encoding value that came from the protocol. +// The only way providers should ever interact with it is by calling its +// `Unmarshal` method to retrieve a `tftypes.Value`. Although the type system +// allows for other interactions, they are explicitly not supported, and will +// not be considered when evaluating for breaking changes. Treat this type as +// an opaque value, and *only* call its `Unmarshal` method. +type DynamicValue struct { + MsgPack []byte + JSON []byte +} + +// IsNull returns true if the DynamicValue represents a null value based on the +// underlying JSON or MessagePack data. +func (d DynamicValue) IsNull() (bool, error) { + if d.JSON != nil { + decoder := json.NewDecoder(bytes.NewReader(d.JSON)) + token, err := decoder.Token() + + if err != nil { + return false, fmt.Errorf("unable to read DynamicValue JSON token: %w", err) + } + + if token != nil { + return false, nil + } + + return true, nil + } + + if d.MsgPack != nil { + decoder := msgpack.NewDecoder(bytes.NewReader(d.MsgPack)) + code, err := decoder.PeekCode() + + if err != nil { + return false, fmt.Errorf("unable to read DynamicValue MsgPack code: %w", err) + } + + // Extensions are considered unknown + if msgpcode.IsExt(code) || code != msgpcode.Nil { + return false, nil + } + + return true, nil + } + + return false, fmt.Errorf("unable to read DynamicValue: %w", ErrUnknownDynamicValueType) +} + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the DynamicValue in an easy-to-interact-with way. It is the +// main purpose of the DynamicValue type, and is how provider developers should +// obtain config, state, and other values from the protocol. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same, as do user-specified values when the provider has a +// DynamicPseudoType in its schema. Objects and maps all look the same, as +// well, as do DynamicPseudoType values sometimes. Fortunately, the provider +// should already know the type; it should be the type of the schema, or +// PseudoDynamicType if that's what's in the schema. `Unmarshal` will then +// parse the value as though it belongs to that type, if possible, and return a +// `tftypes.Value` with the appropriate information. If the data can't be +// interpreted as that type, an error will be returned saying so. In these +// cases, double check to make sure the schema is declaring the same type being +// passed into `Unmarshal`. +// +// In the event an ErrUnknownDynamicValueType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `DynamicValue` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `DynamicValue`s +// received from RPC requests. +func (d DynamicValue) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if d.JSON != nil { + return tftypes.ValueFromJSON(d.JSON, typ) //nolint:staticcheck + } + if d.MsgPack != nil { + return tftypes.ValueFromMsgPack(d.MsgPack, typ) //nolint:staticcheck + } + return tftypes.Value{}, ErrUnknownDynamicValueType +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go new file mode 100644 index 0000000000..6105b1ce73 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Function describes the definition of a function. Result must be defined. +type Function struct { + // Parameters is the ordered list of positional function parameters. + Parameters []*FunctionParameter + + // VariadicParameter is an optional final parameter which accepts zero or + // more argument values, in which Terraform will send an ordered list of the + // parameter type. + VariadicParameter *FunctionParameter + + // Return is the function result. + Return *FunctionReturn + + // Summary is the shortened human-readable documentation for the function. + Summary string + + // Description is the longer human-readable documentation for the function. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // DeprecationMessage is the human-readable documentation if the function + // is deprecated. This message should be practitioner oriented to explain + // how their configuration should be updated. + DeprecationMessage string +} + +// FunctionMetadata describes metadata for a function in the GetMetadata RPC. +type FunctionMetadata struct { + // Name is the name of the function. + Name string +} + +// FunctionParameter describes the definition of a function parameter. Type must +// be defined. +type FunctionParameter struct { + // AllowNullValue when enabled denotes that a null argument value can be + // passed to the provider. When disabled, Terraform returns an error if the + // argument value is null. + AllowNullValue bool + + // AllowUnknownValues when enabled denotes that any unknown argument value + // (recursively checked for collections) can be passed to the provider. When + // disabled and an unknown value is present, Terraform skips the function + // call entirely and returns an unknown value result from the function. + AllowUnknownValues bool + + // Description is the human-readable documentation for the parameter. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Name is the human-readable display name for the parameter. Parameters + // are by definition positional and this name is only used in documentation. + Name string + + // Type indicates the type of data the parameter expects. + Type tftypes.Type +} + +// FunctionReturn describes the definition of a function result. Type must be +// defined. +type FunctionReturn struct { + // Type indicates the type of return data. + Type tftypes.Type +} + +// FunctionServer is an interface containing the methods a function +// implementation needs to fill. +type FunctionServer interface { + // CallFunction is called when Terraform wants to execute the logic of a + // function referenced in the configuration. + CallFunction(context.Context, *CallFunctionRequest) (*CallFunctionResponse, error) + + // GetFunctions is called when Terraform wants to lookup which functions a + // provider supports when not calling GetProviderSchema. + GetFunctions(context.Context, *GetFunctionsRequest) (*GetFunctionsResponse, error) +} + +// CallFunctionRequest is the request Terraform sends when it wants to execute +// the logic of function referenced in the configuration. +type CallFunctionRequest struct { + // Name is the function name being called. + Name string + + // Arguments is the configuration value of each argument the practitioner + // supplied for the function call. The ordering and value of each element + // matches the function parameters and their associated type. If the + // function definition includes a final variadic parameter, its value is an + // ordered list of the variadic parameter type. + Arguments []*DynamicValue +} + +// CallFunctionResponse is the response from the provider with the result of +// executing the logic of the function. +type CallFunctionResponse struct { + // Error reports errors related to the execution of the + // function logic. Returning a nil error indicates a successful response + // with no errors presented to practitioners. + Error *FunctionError + + // Result is the return value from the called function, matching the result + // type in the function definition. + Result *DynamicValue +} + +// GetFunctionsRequest is the request Terraform sends when it wants to lookup +// which functions a provider supports when not calling GetProviderSchema. +type GetFunctionsRequest struct{} + +// GetFunctionsResponse is the response from the provider about the implemented +// functions. +type GetFunctionsResponse struct { + // Diagnostics report errors or warnings related to the provider + // implementation. Returning an empty slice indicates a successful response + // with no warnings or errors presented to practitioners. + Diagnostics []*Diagnostic + + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function_error.go new file mode 100644 index 0000000000..17f9d8a162 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function_error.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +// FunctionError is used to convey information back to the user running Terraform. +type FunctionError struct { + // Text is the description of the error. + Text string + + // FunctionArgument is the positional function argument for aligning + // configuration source. + FunctionArgument *int64 +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go new file mode 100644 index 0000000000..29bed4540c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package diag + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Diagnostics is a collection of Diagnostic. +type Diagnostics []*tfprotov6.Diagnostic + +// ErrorCount returns the number of error severity diagnostics. +func (d Diagnostics) ErrorCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov6.DiagnosticSeverityError { + continue + } + + result++ + } + + return result +} + +// Log will log every diagnostic: +// +// - Error severity at ERROR level +// - Warning severity at WARN level +// - Invalid/Unknown severity at WARN level +func (d Diagnostics) Log(ctx context.Context) { + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + diagnosticFields := map[string]interface{}{ + logging.KeyDiagnosticDetail: diagnostic.Detail, + logging.KeyDiagnosticSeverity: diagnostic.Severity.String(), + logging.KeyDiagnosticSummary: diagnostic.Summary, + } + + if diagnostic.Attribute != nil { + diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() + } + + switch diagnostic.Severity { + case tfprotov6.DiagnosticSeverityError: + logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) + case tfprotov6.DiagnosticSeverityWarning: + logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields) + default: + logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields) + } + } +} + +// WarningCount returns the number of warning severity diagnostics. +func (d Diagnostics) WarningCount() int { + var result int + + for _, diagnostic := range d { + if diagnostic == nil { + continue + } + + if diagnostic.Severity != tfprotov6.DiagnosticSeverityWarning { + continue + } + + result++ + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go new file mode 100644 index 0000000000..faaba2285c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package diag contains diagnostics helpers. These implementations are +// intentionally outside the public API. +package diag diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go new file mode 100644 index 0000000000..2544e12f2e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/data_source.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func ValidateDataResourceConfigRequest(in *tfplugin6.ValidateDataResourceConfig_Request) *tfprotov6.ValidateDataResourceConfigRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ValidateDataResourceConfigRequest{ + Config: DynamicValue(in.Config), + TypeName: in.TypeName, + } + + return resp +} + +func ReadDataSourceRequest(in *tfplugin6.ReadDataSource_Request) *tfprotov6.ReadDataSourceRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ReadDataSourceRequest{ + Config: DynamicValue(in.Config), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/doc.go new file mode 100644 index 0000000000..a9996dff6c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package fromproto converts Protocol Buffers generated tfplugin6 types into +// terraform-plugin-go tfprotov6 types. +package fromproto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/dynamic_value.go new file mode 100644 index 0000000000..d66d3dd053 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/dynamic_value.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func DynamicValue(in *tfplugin6.DynamicValue) *tfprotov6.DynamicValue { + if in == nil { + return nil + } + + resp := &tfprotov6.DynamicValue{ + MsgPack: in.Msgpack, + JSON: in.Json, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go new file mode 100644 index 0000000000..a25351739b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func CallFunctionRequest(in *tfplugin6.CallFunction_Request) *tfprotov6.CallFunctionRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.CallFunctionRequest{ + Arguments: make([]*tfprotov6.DynamicValue, 0, len(in.Arguments)), + Name: in.Name, + } + + for _, argument := range in.Arguments { + resp.Arguments = append(resp.Arguments, DynamicValue(argument)) + } + + return resp +} + +func GetFunctionsRequest(in *tfplugin6.GetFunctions_Request) *tfprotov6.GetFunctionsRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.GetFunctionsRequest{} + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go new file mode 100644 index 0000000000..912288684a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/provider.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func GetMetadataRequest(in *tfplugin6.GetMetadata_Request) *tfprotov6.GetMetadataRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.GetMetadataRequest{} + + return resp +} + +func GetProviderSchemaRequest(in *tfplugin6.GetProviderSchema_Request) *tfprotov6.GetProviderSchemaRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.GetProviderSchemaRequest{} + + return resp +} + +func ValidateProviderConfigRequest(in *tfplugin6.ValidateProviderConfig_Request) *tfprotov6.ValidateProviderConfigRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ValidateProviderConfigRequest{ + Config: DynamicValue(in.Config), + } + + return resp +} + +func ConfigureProviderRequest(in *tfplugin6.ConfigureProvider_Request) *tfprotov6.ConfigureProviderRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ConfigureProviderRequest{ + Config: DynamicValue(in.Config), + TerraformVersion: in.TerraformVersion, + } + + return resp +} + +func StopProviderRequest(in *tfplugin6.StopProvider_Request) *tfprotov6.StopProviderRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.StopProviderRequest{} + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/raw_state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/raw_state.go new file mode 100644 index 0000000000..559b08264f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/raw_state.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func RawState(in *tfplugin6.RawState) *tfprotov6.RawState { + if in == nil { + return nil + } + + resp := &tfprotov6.RawState{ + JSON: in.Json, + Flatmap: in.Flatmap, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go new file mode 100644 index 0000000000..1b5997c70a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/resource.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func ValidateResourceConfigRequest(in *tfplugin6.ValidateResourceConfig_Request) *tfprotov6.ValidateResourceConfigRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ValidateResourceConfigRequest{ + Config: DynamicValue(in.Config), + TypeName: in.TypeName, + } + + return resp +} + +func UpgradeResourceStateRequest(in *tfplugin6.UpgradeResourceState_Request) *tfprotov6.UpgradeResourceStateRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.UpgradeResourceStateRequest{ + RawState: RawState(in.RawState), + TypeName: in.TypeName, + Version: in.Version, + } + + return resp +} + +func ReadResourceRequest(in *tfplugin6.ReadResource_Request) *tfprotov6.ReadResourceRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ReadResourceRequest{ + CurrentState: DynamicValue(in.CurrentState), + Private: in.Private, + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} + +func PlanResourceChangeRequest(in *tfplugin6.PlanResourceChange_Request) *tfprotov6.PlanResourceChangeRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.PlanResourceChangeRequest{ + Config: DynamicValue(in.Config), + PriorPrivate: in.PriorPrivate, + PriorState: DynamicValue(in.PriorState), + ProposedNewState: DynamicValue(in.ProposedNewState), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} + +func ApplyResourceChangeRequest(in *tfplugin6.ApplyResourceChange_Request) *tfprotov6.ApplyResourceChangeRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ApplyResourceChangeRequest{ + Config: DynamicValue(in.Config), + PlannedPrivate: in.PlannedPrivate, + PlannedState: DynamicValue(in.PlannedState), + PriorState: DynamicValue(in.PriorState), + ProviderMeta: DynamicValue(in.ProviderMeta), + TypeName: in.TypeName, + } + + return resp +} + +func ImportResourceStateRequest(in *tfplugin6.ImportResourceState_Request) *tfprotov6.ImportResourceStateRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.ImportResourceStateRequest{ + TypeName: in.TypeName, + ID: in.Id, + } + + return resp +} + +func MoveResourceStateRequest(in *tfplugin6.MoveResourceState_Request) *tfprotov6.MoveResourceStateRequest { + if in == nil { + return nil + } + + resp := &tfprotov6.MoveResourceStateRequest{ + SourcePrivate: in.SourcePrivate, + SourceProviderAddress: in.SourceProviderAddress, + SourceSchemaVersion: in.SourceSchemaVersion, + SourceState: RawState(in.SourceState), + SourceTypeName: in.SourceTypeName, + TargetTypeName: in.TargetTypeName, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr/doc.go new file mode 100644 index 0000000000..9b9f61f06d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package funcerr contains function error helpers. These implementations are +// intentionally outside the public API. +package funcerr diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr/function_error.go new file mode 100644 index 0000000000..0d6f665564 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr/function_error.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcerr + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// FunctionError is a single FunctionError. +type FunctionError tfprotov6.FunctionError + +// HasError returns true if the FunctionError is not empty. +func (e *FunctionError) HasError() bool { + if e == nil { + return false + } + + return e.Text != "" || e.FunctionArgument != nil +} + +// Log will log the function error: +func (e *FunctionError) Log(ctx context.Context) { + if e == nil { + return + } + + if !e.HasError() { + return + } + + switch { + case e.FunctionArgument != nil && e.Text != "": + logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{ + logging.KeyFunctionErrorText: e.Text, + logging.KeyFunctionErrorArgument: *e.FunctionArgument, + }) + case e.FunctionArgument != nil: + logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{ + logging.KeyFunctionErrorArgument: *e.FunctionArgument, + }) + case e.Text != "": + logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{ + logging.KeyFunctionErrorText: e.Text, + }) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go new file mode 100644 index 0000000000..99f55e467d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/context_keys.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6serverlogging + +// Context key types. +// Reference: https://staticcheck.io/docs/checks/#SA1029 + +// ContextKeyDownstreamRequestStartTime is a context.Context key to store the +// time.Time when the server began a downstream request. +type ContextKeyDownstreamRequestStartTime struct{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go new file mode 100644 index 0000000000..29c51245ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tf5serverlogging contains logging functionality specific to +// tf5server and tfprotov5 types. +package tf6serverlogging diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go new file mode 100644 index 0000000000..9c27d41739 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/downstream_request.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6serverlogging + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr" +) + +// DownstreamRequest sets a request duration start time context key and +// generates a TRACE "Sending request downstream" log. +func DownstreamRequest(ctx context.Context) context.Context { + requestStart := time.Now() + ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart) + + logging.ProtocolTrace(ctx, "Sending request downstream") + + return ctx +} + +// DownstreamResponse generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// diagnostic severity counts +// - Per-diagnostic logs +func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) { + responseFields := map[string]interface{}{ + logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(), + logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + diagnostics.Log(ctx) +} + +// DownstreamResponseWithError generates the following logging: +// +// - TRACE "Received downstream response" log with request duration and +// whether a function error is present +// - Log with function error details +func DownstreamResponseWithError(ctx context.Context, funcErr *tfprotov6.FunctionError) { + fe := (*funcerr.FunctionError)(funcErr) + + responseFields := map[string]interface{}{ + logging.KeyFunctionErrorExists: fe.HasError(), + } + + if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok { + responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds() + } + + logging.ProtocolTrace(ctx, "Received downstream response", responseFields) + fe.Log(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/server_capabilities.go new file mode 100644 index 0000000000..f6aaf953d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging/server_capabilities.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6serverlogging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ServerCapabilities generates a TRACE "Announced server capabilities" log. +func ServerCapabilities(ctx context.Context, capabilities *tfprotov6.ServerCapabilities) { + responseFields := map[string]interface{}{ + logging.KeyServerCapabilityGetProviderSchemaOptional: false, + logging.KeyServerCapabilityPlanDestroy: false, + } + + if capabilities != nil { + responseFields[logging.KeyServerCapabilityGetProviderSchemaOptional] = capabilities.GetProviderSchemaOptional + responseFields[logging.KeyServerCapabilityPlanDestroy] = capabilities.PlanDestroy + } + + logging.ProtocolTrace(ctx, "Announced server capabilities", responseFields) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go new file mode 100644 index 0000000000..5f77472884 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go @@ -0,0 +1,5857 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 6.5 +// +// This file defines version 6.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 6 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: tfplugin6.proto + +package tfplugin6 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StringKind int32 + +const ( + StringKind_PLAIN StringKind = 0 + StringKind_MARKDOWN StringKind = 1 +) + +// Enum value maps for StringKind. +var ( + StringKind_name = map[int32]string{ + 0: "PLAIN", + 1: "MARKDOWN", + } + StringKind_value = map[string]int32{ + "PLAIN": 0, + "MARKDOWN": 1, + } +) + +func (x StringKind) Enum() *StringKind { + p := new(StringKind) + *p = x + return p +} + +func (x StringKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StringKind) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[0].Descriptor() +} + +func (StringKind) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[0] +} + +func (x StringKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StringKind.Descriptor instead. +func (StringKind) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{0} +} + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +// Enum value maps for Diagnostic_Severity. +var ( + Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", + } + Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, + } +) + +func (x Diagnostic_Severity) Enum() *Diagnostic_Severity { + p := new(Diagnostic_Severity) + *p = x + return p +} + +func (x Diagnostic_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Diagnostic_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[1].Descriptor() +} + +func (Diagnostic_Severity) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[1] +} + +func (x Diagnostic_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Diagnostic_Severity.Descriptor instead. +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +// Enum value maps for Schema_NestedBlock_NestingMode. +var ( + Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", + } + Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, + } +) + +func (x Schema_NestedBlock_NestingMode) Enum() *Schema_NestedBlock_NestingMode { + p := new(Schema_NestedBlock_NestingMode) + *p = x + return p +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_NestedBlock_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[2].Descriptor() +} + +func (Schema_NestedBlock_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[2] +} + +func (x Schema_NestedBlock_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_NestedBlock_NestingMode.Descriptor instead. +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 2, 0} +} + +type Schema_Object_NestingMode int32 + +const ( + Schema_Object_INVALID Schema_Object_NestingMode = 0 + Schema_Object_SINGLE Schema_Object_NestingMode = 1 + Schema_Object_LIST Schema_Object_NestingMode = 2 + Schema_Object_SET Schema_Object_NestingMode = 3 + Schema_Object_MAP Schema_Object_NestingMode = 4 +) + +// Enum value maps for Schema_Object_NestingMode. +var ( + Schema_Object_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + } + Schema_Object_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + } +) + +func (x Schema_Object_NestingMode) Enum() *Schema_Object_NestingMode { + p := new(Schema_Object_NestingMode) + *p = x + return p +} + +func (x Schema_Object_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_Object_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[3].Descriptor() +} + +func (Schema_Object_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[3] +} + +func (x Schema_Object_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_Object_NestingMode.Descriptor instead. +func (Schema_Object_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 3, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` +} + +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. +func (*DynamicValue) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{0} +} + +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack + } + return nil +} + +func (x *DynamicValue) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +type Diagnostic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin6.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` +} + +func (x *Diagnostic) Reset() { + *x = Diagnostic{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostic) ProtoMessage() {} + +func (x *Diagnostic) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostic.ProtoReflect.Descriptor instead. +func (*Diagnostic) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{1} +} + +func (x *Diagnostic) GetSeverity() Diagnostic_Severity { + if x != nil { + return x.Severity + } + return Diagnostic_INVALID +} + +func (x *Diagnostic) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Diagnostic) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +func (x *Diagnostic) GetAttribute() *AttributePath { + if x != nil { + return x.Attribute + } + return nil +} + +type FunctionError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + // The optional function_argument records the index position of the + // argument which caused the error. + FunctionArgument *int64 `protobuf:"varint,2,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"` +} + +func (x *FunctionError) Reset() { + *x = FunctionError{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FunctionError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FunctionError) ProtoMessage() {} + +func (x *FunctionError) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FunctionError.ProtoReflect.Descriptor instead. +func (*FunctionError) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{2} +} + +func (x *FunctionError) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *FunctionError) GetFunctionArgument() int64 { + if x != nil && x.FunctionArgument != nil { + return *x.FunctionArgument + } + return 0 +} + +type AttributePath struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` +} + +func (x *AttributePath) Reset() { + *x = AttributePath{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath) ProtoMessage() {} + +func (x *AttributePath) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath.ProtoReflect.Descriptor instead. +func (*AttributePath) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{3} +} + +func (x *AttributePath) GetSteps() []*AttributePath_Step { + if x != nil { + return x.Steps + } + return nil +} + +type StopProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopProvider) Reset() { + *x = StopProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopProvider) ProtoMessage() {} + +func (x *StopProvider) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopProvider.ProtoReflect.Descriptor instead. +func (*StopProvider) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{4} +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RawState) Reset() { + *x = RawState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawState) ProtoMessage() {} + +func (x *RawState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawState.ProtoReflect.Descriptor instead. +func (*RawState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{5} +} + +func (x *RawState) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +func (x *RawState) GetFlatmap() map[string]string { + if x != nil { + return x.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource or Provider. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6} +} + +func (x *Schema) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +type Function struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parameters is the ordered list of positional function parameters. + Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"` + // return is the function result. + Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"` + // summary is the human-readable shortened documentation for the function. + Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + // description is human-readable documentation for the function. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + // deprecation_message is human-readable documentation if the + // function is deprecated. + DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` +} + +func (x *Function) Reset() { + *x = Function{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function) ProtoMessage() {} + +func (x *Function) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function.ProtoReflect.Descriptor instead. +func (*Function) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{7} +} + +func (x *Function) GetParameters() []*Function_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Function) GetVariadicParameter() *Function_Parameter { + if x != nil { + return x.VariadicParameter + } + return nil +} + +func (x *Function) GetReturn() *Function_Return { + if x != nil { + return x.Return + } + return nil +} + +func (x *Function) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Function) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Function) GetDeprecationMessage() string { + if x != nil { + return x.DeprecationMessage + } + return "" +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +type ServerCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"` + // The get_provider_schema_optional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + GetProviderSchemaOptional bool `protobuf:"varint,2,opt,name=get_provider_schema_optional,json=getProviderSchemaOptional,proto3" json:"get_provider_schema_optional,omitempty"` + // The move_resource_state capability signals that a provider supports the + // MoveResourceState RPC. + MoveResourceState bool `protobuf:"varint,3,opt,name=move_resource_state,json=moveResourceState,proto3" json:"move_resource_state,omitempty"` +} + +func (x *ServerCapabilities) Reset() { + *x = ServerCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerCapabilities) ProtoMessage() {} + +func (x *ServerCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerCapabilities.ProtoReflect.Descriptor instead. +func (*ServerCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{8} +} + +func (x *ServerCapabilities) GetPlanDestroy() bool { + if x != nil { + return x.PlanDestroy + } + return false +} + +func (x *ServerCapabilities) GetGetProviderSchemaOptional() bool { + if x != nil { + return x.GetProviderSchemaOptional + } + return false +} + +func (x *ServerCapabilities) GetMoveResourceState() bool { + if x != nil { + return x.MoveResourceState + } + return false +} + +type GetMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata) Reset() { + *x = GetMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata) ProtoMessage() {} + +func (x *GetMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9} +} + +type GetProviderSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema) Reset() { + *x = GetProviderSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema) ProtoMessage() {} + +func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10} +} + +type ValidateProviderConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateProviderConfig) Reset() { + *x = ValidateProviderConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProviderConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProviderConfig) ProtoMessage() {} + +func (x *ValidateProviderConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProviderConfig.ProtoReflect.Descriptor instead. +func (*ValidateProviderConfig) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{11} +} + +type UpgradeResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpgradeResourceState) Reset() { + *x = UpgradeResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState) ProtoMessage() {} + +func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{12} +} + +type ValidateResourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateResourceConfig) Reset() { + *x = ValidateResourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceConfig) ProtoMessage() {} + +func (x *ValidateResourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateResourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{13} +} + +type ValidateDataResourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateDataResourceConfig) Reset() { + *x = ValidateDataResourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataResourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataResourceConfig) ProtoMessage() {} + +func (x *ValidateDataResourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataResourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateDataResourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{14} +} + +type ConfigureProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ConfigureProvider) Reset() { + *x = ConfigureProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureProvider) ProtoMessage() {} + +func (x *ConfigureProvider) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureProvider.ProtoReflect.Descriptor instead. +func (*ConfigureProvider) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{15} +} + +type ReadResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadResource) Reset() { + *x = ReadResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource) ProtoMessage() {} + +func (x *ReadResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. +func (*ReadResource) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{16} +} + +type PlanResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PlanResourceChange) Reset() { + *x = PlanResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange) ProtoMessage() {} + +func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{17} +} + +type ApplyResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApplyResourceChange) Reset() { + *x = ApplyResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange) ProtoMessage() {} + +func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{18} +} + +type ImportResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ImportResourceState) Reset() { + *x = ImportResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState) ProtoMessage() {} + +func (x *ImportResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19} +} + +type MoveResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MoveResourceState) Reset() { + *x = MoveResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState) ProtoMessage() {} + +func (x *MoveResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead. +func (*MoveResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20} +} + +type ReadDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadDataSource) Reset() { + *x = ReadDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource) ProtoMessage() {} + +func (x *ReadDataSource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21} +} + +type GetFunctions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions) Reset() { + *x = GetFunctions{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions) ProtoMessage() {} + +func (x *GetFunctions) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. +func (*GetFunctions) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{22} +} + +type CallFunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CallFunction) Reset() { + *x = CallFunction{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction) ProtoMessage() {} + +func (x *CallFunction) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. +func (*CallFunction) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{23} +} + +type AttributePath_Step struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` +} + +func (x *AttributePath_Step) Reset() { + *x = AttributePath_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath_Step) ProtoMessage() {} + +func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath_Step.ProtoReflect.Descriptor instead. +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *AttributePath_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyString() string { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +type StopProvider_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopProvider_Request) Reset() { + *x = StopProvider_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopProvider_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopProvider_Request) ProtoMessage() {} + +func (x *StopProvider_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopProvider_Request.ProtoReflect.Descriptor instead. +func (*StopProvider_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{4, 0} +} + +type StopProvider_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *StopProvider_Response) Reset() { + *x = StopProvider_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopProvider_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopProvider_Response) ProtoMessage() {} + +func (x *StopProvider_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopProvider_Response.ProtoReflect.Descriptor instead. +func (*StopProvider_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *StopProvider_Response) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type Schema_Block struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Block) Reset() { + *x = Schema_Block{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Block) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Block) ProtoMessage() {} + +func (x *Schema_Block) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Block.ProtoReflect.Descriptor instead. +func (*Schema_Block) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *Schema_Block) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema_Block) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if x != nil { + return x.BlockTypes + } + return nil +} + +func (x *Schema_Block) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Block) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Block) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + NestedType *Schema_Object `protobuf:"bytes,10,opt,name=nested_type,json=nestedType,proto3" json:"nested_type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Attribute) Reset() { + *x = Schema_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Attribute) ProtoMessage() {} + +func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Attribute.ProtoReflect.Descriptor instead. +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *Schema_Attribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Schema_Attribute) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema_Attribute) GetNestedType() *Schema_Object { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *Schema_Attribute) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Attribute) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Schema_Attribute) GetOptional() bool { + if x != nil { + return x.Optional + } + return false +} + +func (x *Schema_Attribute) GetComputed() bool { + if x != nil { + return x.Computed + } + return false +} + +func (x *Schema_Attribute) GetSensitive() bool { + if x != nil { + return x.Sensitive + } + return false +} + +func (x *Schema_Attribute) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Attribute) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_NestedBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin6.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_NestedBlock) Reset() { + *x = Schema_NestedBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_NestedBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_NestedBlock) ProtoMessage() {} + +func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_NestedBlock.ProtoReflect.Descriptor instead. +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 2} +} + +func (x *Schema_NestedBlock) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *Schema_NestedBlock) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +func (x *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (x *Schema_NestedBlock) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema_NestedBlock) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type Schema_Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Attributes []*Schema_Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + Nesting Schema_Object_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin6.Schema_Object_NestingMode" json:"nesting,omitempty"` + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + // + // Deprecated: Marked as deprecated in tfplugin6.proto. + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + // Deprecated: Marked as deprecated in tfplugin6.proto. + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_Object) Reset() { + *x = Schema_Object{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Object) ProtoMessage() {} + +func (x *Schema_Object) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Object.ProtoReflect.Descriptor instead. +func (*Schema_Object) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 3} +} + +func (x *Schema_Object) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Object) GetNesting() Schema_Object_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_Object_INVALID +} + +// Deprecated: Marked as deprecated in tfplugin6.proto. +func (x *Schema_Object) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +// Deprecated: Marked as deprecated in tfplugin6.proto. +func (x *Schema_Object) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type Function_Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the human-readable display name for the parameter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // type is the type constraint for the parameter. + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"` + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"` + // description is human-readable documentation for the parameter. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` +} + +func (x *Function_Parameter) Reset() { + *x = Function_Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Parameter) ProtoMessage() {} + +func (x *Function_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. +func (*Function_Parameter) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *Function_Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Function_Parameter) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Function_Parameter) GetAllowNullValue() bool { + if x != nil { + return x.AllowNullValue + } + return false +} + +func (x *Function_Parameter) GetAllowUnknownValues() bool { + if x != nil { + return x.AllowUnknownValues + } + return false +} + +func (x *Function_Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function_Parameter) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +type Function_Return struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the type constraint for the function result. + Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Function_Return) Reset() { + *x = Function_Return{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Return) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Return) ProtoMessage() {} + +func (x *Function_Return) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. +func (*Function_Return) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *Function_Return) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +type GetMetadata_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata_Request) Reset() { + *x = GetMetadata_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Request) ProtoMessage() {} + +func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. +func (*GetMetadata_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 0} +} + +type GetMetadata_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` + Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + // functions returns metadata for any functions. + Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"` +} + +func (x *GetMetadata_Response) Reset() { + *x = GetMetadata_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Response) ProtoMessage() {} + +func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. +func (*GetMetadata_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetMetadata_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetMetadata_Response) GetDataSources() []*GetMetadata_DataSourceMetadata { + if x != nil { + return x.DataSources + } + return nil +} + +func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata { + if x != nil { + return x.Resources + } + return nil +} + +func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata { + if x != nil { + return x.Functions + } + return nil +} + +type GetMetadata_FunctionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the function name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetadata_FunctionMetadata) Reset() { + *x = GetMetadata_FunctionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_FunctionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_FunctionMetadata) ProtoMessage() {} + +func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 2} +} + +func (x *GetMetadata_FunctionMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetMetadata_DataSourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_DataSourceMetadata) Reset() { + *x = GetMetadata_DataSourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_DataSourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 3} +} + +func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetMetadata_ResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_ResourceMetadata) Reset() { + *x = GetMetadata_ResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_ResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_ResourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 4} +} + +func (x *GetMetadata_ResourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetProviderSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema_Request) Reset() { + *x = GetProviderSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Request) ProtoMessage() {} + +func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 0} +} + +type GetProviderSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetProviderSchema_Response) Reset() { + *x = GetProviderSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Response) ProtoMessage() {} + +func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 1} +} + +func (x *GetProviderSchema_Response) GetProvider() *Schema { + if x != nil { + return x.Provider + } + return nil +} + +func (x *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if x != nil { + return x.ResourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if x != nil { + return x.DataSourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { + if x != nil { + return x.ProviderMeta + } + return nil +} + +func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +type ValidateProviderConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateProviderConfig_Request) Reset() { + *x = ValidateProviderConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProviderConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProviderConfig_Request) ProtoMessage() {} + +func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProviderConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateProviderConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *ValidateProviderConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateProviderConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateProviderConfig_Response) Reset() { + *x = ValidateProviderConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProviderConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProviderConfig_Response) ProtoMessage() {} + +func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProviderConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateProviderConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 1} +} + +func (x *ValidateProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// UpgradeResourceState RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to exist (in the case of resource destruction), be wholly +// known, nor match the given prior state, which could lead to unexpected +// provider behaviors for practitioners. +type UpgradeResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` +} + +func (x *UpgradeResourceState_Request) Reset() { + *x = UpgradeResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Request) ProtoMessage() {} + +func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *UpgradeResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *UpgradeResourceState_Request) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *UpgradeResourceState_Request) GetRawState() *RawState { + if x != nil { + return x.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *UpgradeResourceState_Response) Reset() { + *x = UpgradeResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Response) ProtoMessage() {} + +func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if x != nil { + return x.UpgradedState + } + return nil +} + +func (x *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateResourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateResourceConfig_Request) Reset() { + *x = ValidateResourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceConfig_Request) ProtoMessage() {} + +func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateResourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *ValidateResourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateResourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateResourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateResourceConfig_Response) Reset() { + *x = ValidateResourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceConfig_Response) ProtoMessage() {} + +func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateResourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 1} +} + +func (x *ValidateResourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateDataResourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateDataResourceConfig_Request) Reset() { + *x = ValidateDataResourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataResourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataResourceConfig_Request) ProtoMessage() {} + +func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataResourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateDataResourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *ValidateDataResourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateDataResourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateDataResourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateDataResourceConfig_Response) Reset() { + *x = ValidateDataResourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataResourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataResourceConfig_Response) ProtoMessage() {} + +func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataResourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateDataResourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 1} +} + +func (x *ValidateDataResourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ConfigureProvider_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ConfigureProvider_Request) Reset() { + *x = ConfigureProvider_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureProvider_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureProvider_Request) ProtoMessage() {} + +func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureProvider_Request.ProtoReflect.Descriptor instead. +func (*ConfigureProvider_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *ConfigureProvider_Request) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion + } + return "" +} + +func (x *ConfigureProvider_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ConfigureProvider_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ConfigureProvider_Response) Reset() { + *x = ConfigureProvider_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureProvider_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureProvider_Response) ProtoMessage() {} + +func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureProvider_Response.ProtoReflect.Descriptor instead. +func (*ConfigureProvider_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 1} +} + +func (x *ConfigureProvider_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// ReadResource RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to be wholly known nor match the given prior state, which +// could lead to unexpected provider behaviors for practitioners. +type ReadResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadResource_Request) Reset() { + *x = ReadResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Request) ProtoMessage() {} + +func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *ReadResource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadResource_Request) GetCurrentState() *DynamicValue { + if x != nil { + return x.CurrentState + } + return nil +} + +func (x *ReadResource_Request) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ReadResource_Response) Reset() { + *x = ReadResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Response) ProtoMessage() {} + +func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 1} +} + +func (x *ReadResource_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ReadResource_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type PlanResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *PlanResourceChange_Request) Reset() { + *x = PlanResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Request) ProtoMessage() {} + +func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 0} +} + +func (x *PlanResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if x != nil { + return x.ProposedNewState + } + return nil +} + +func (x *PlanResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *PlanResourceChange_Request) GetPriorPrivate() []byte { + if x != nil { + return x.PriorPrivate + } + return nil +} + +func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type PlanResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *PlanResourceChange_Response) Reset() { + *x = PlanResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Response) ProtoMessage() {} + +func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 1} +} + +func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if x != nil { + return x.RequiresReplace + } + return nil +} + +func (x *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ApplyResourceChange_Request) Reset() { + *x = ApplyResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Request) ProtoMessage() {} + +func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *ApplyResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ApplyResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *ApplyResourceChange_Response) Reset() { + *x = ApplyResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Response) ProtoMessage() {} + +func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ApplyResourceChange_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ImportResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ImportResourceState_Request) Reset() { + *x = ImportResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Request) ProtoMessage() {} + +func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *ImportResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_Request) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ImportResourceState_ImportedResource) Reset() { + *x = ImportResourceState_ImportedResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_ImportedResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_ImportedResource) ProtoMessage() {} + +func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 1} +} + +func (x *ImportResourceState_ImportedResource) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ImportResourceState_ImportedResource) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type ImportResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ImportResourceState_Response) Reset() { + *x = ImportResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Response) ProtoMessage() {} + +func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 2} +} + +func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if x != nil { + return x.ImportedResources + } + return nil +} + +func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type MoveResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The address of the provider the resource is being moved from. + SourceProviderAddress string `protobuf:"bytes,1,opt,name=source_provider_address,json=sourceProviderAddress,proto3" json:"source_provider_address,omitempty"` + // The resource type that the resource is being moved from. + SourceTypeName string `protobuf:"bytes,2,opt,name=source_type_name,json=sourceTypeName,proto3" json:"source_type_name,omitempty"` + // The schema version of the resource type that the resource is being + // moved from. + SourceSchemaVersion int64 `protobuf:"varint,3,opt,name=source_schema_version,json=sourceSchemaVersion,proto3" json:"source_schema_version,omitempty"` + // The raw state of the resource being moved. Only the json field is + // populated, as there should be no legacy providers using the flatmap + // format that support newly introduced RPCs. + SourceState *RawState `protobuf:"bytes,4,opt,name=source_state,json=sourceState,proto3" json:"source_state,omitempty"` + // The resource type that the resource is being moved to. + TargetTypeName string `protobuf:"bytes,5,opt,name=target_type_name,json=targetTypeName,proto3" json:"target_type_name,omitempty"` + // The private state of the resource being moved. + SourcePrivate []byte `protobuf:"bytes,6,opt,name=source_private,json=sourcePrivate,proto3" json:"source_private,omitempty"` +} + +func (x *MoveResourceState_Request) Reset() { + *x = MoveResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Request) ProtoMessage() {} + +func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *MoveResourceState_Request) GetSourceProviderAddress() string { + if x != nil { + return x.SourceProviderAddress + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceTypeName() string { + if x != nil { + return x.SourceTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceSchemaVersion() int64 { + if x != nil { + return x.SourceSchemaVersion + } + return 0 +} + +func (x *MoveResourceState_Request) GetSourceState() *RawState { + if x != nil { + return x.SourceState + } + return nil +} + +func (x *MoveResourceState_Request) GetTargetTypeName() string { + if x != nil { + return x.TargetTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourcePrivate() []byte { + if x != nil { + return x.SourcePrivate + } + return nil +} + +type MoveResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the resource after it has been moved. + TargetState *DynamicValue `protobuf:"bytes,1,opt,name=target_state,json=targetState,proto3" json:"target_state,omitempty"` + // Any diagnostics that occurred during the move. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // The private state of the resource after it has been moved. + TargetPrivate []byte `protobuf:"bytes,3,opt,name=target_private,json=targetPrivate,proto3" json:"target_private,omitempty"` +} + +func (x *MoveResourceState_Response) Reset() { + *x = MoveResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Response) ProtoMessage() {} + +func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *MoveResourceState_Response) GetTargetState() *DynamicValue { + if x != nil { + return x.TargetState + } + return nil +} + +func (x *MoveResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *MoveResourceState_Response) GetTargetPrivate() []byte { + if x != nil { + return x.TargetPrivate + } + return nil +} + +type ReadDataSource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadDataSource_Request) Reset() { + *x = ReadDataSource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Request) ProtoMessage() {} + +func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 0} +} + +func (x *ReadDataSource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadDataSource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadDataSource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ReadDataSource_Response) Reset() { + *x = ReadDataSource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Response) ProtoMessage() {} + +func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 1} +} + +func (x *ReadDataSource_Response) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetFunctions_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions_Request) Reset() { + *x = GetFunctions_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Request) ProtoMessage() {} + +func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. +func (*GetFunctions_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{22, 0} +} + +type GetFunctions_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // diagnostics is any warnings or errors. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetFunctions_Response) Reset() { + *x = GetFunctions_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Response) ProtoMessage() {} + +func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. +func (*GetFunctions_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{22, 1} +} + +func (x *GetFunctions_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type CallFunction_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the name of the function being called. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // arguments is the data of each function argument value. + Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` +} + +func (x *CallFunction_Request) Reset() { + *x = CallFunction_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Request) ProtoMessage() {} + +func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. +func (*CallFunction_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{23, 0} +} + +func (x *CallFunction_Request) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CallFunction_Request) GetArguments() []*DynamicValue { + if x != nil { + return x.Arguments + } + return nil +} + +type CallFunction_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // result is result value after running the function logic. + Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // error is any errors from the function logic. + Error *FunctionError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *CallFunction_Response) Reset() { + *x = CallFunction_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Response) ProtoMessage() {} + +func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. +func (*CallFunction_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{23, 1} +} + +func (x *CallFunction_Response) GetResult() *DynamicValue { + if x != nil { + return x.Result + } + return nil +} + +func (x *CallFunction_Response) GetError() *FunctionError { + if x != nil { + return x.Error + } + return nil +} + +var File_tfplugin6_proto protoreflect.FileDescriptor + +var file_tfplugin6_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x22, 0x3c, 0x0a, 0x0c, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, + 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, + 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, + 0x22, 0x6b, 0x0a, 0x0d, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01, + 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, + 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, + 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74, + 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x3b, 0x0a, 0x0c, + 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, + 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, + 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, + 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x95, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xe4, 0x02, 0x0a, 0x09, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, + 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, + 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, + 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x1a, 0x8b, 0x02, 0x0a, + 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x6d, 0x69, 0x6e, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x6d, 0x61, + 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, + 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, + 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, + 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, + 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, + 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa8, 0x01, 0x0a, 0x12, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, + 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, + 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, + 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, + 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, + 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, + 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, + 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, + 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, + 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, + 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, + 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, + 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, + 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, + 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x22, 0xe7, 0x03, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3a, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9c, 0x02, 0x0a, + 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, + 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xa4, 0x0c, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, + 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, + 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, + 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, + 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, + 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x36, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_tfplugin6_proto_rawDescOnce sync.Once + file_tfplugin6_proto_rawDescData = file_tfplugin6_proto_rawDesc +) + +func file_tfplugin6_proto_rawDescGZIP() []byte { + file_tfplugin6_proto_rawDescOnce.Do(func() { + file_tfplugin6_proto_rawDescData = protoimpl.X.CompressGZIP(file_tfplugin6_proto_rawDescData) + }) + return file_tfplugin6_proto_rawDescData +} + +var file_tfplugin6_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 72) +var file_tfplugin6_proto_goTypes = []interface{}{ + (StringKind)(0), // 0: tfplugin6.StringKind + (Diagnostic_Severity)(0), // 1: tfplugin6.Diagnostic.Severity + (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin6.Schema.NestedBlock.NestingMode + (Schema_Object_NestingMode)(0), // 3: tfplugin6.Schema.Object.NestingMode + (*DynamicValue)(nil), // 4: tfplugin6.DynamicValue + (*Diagnostic)(nil), // 5: tfplugin6.Diagnostic + (*FunctionError)(nil), // 6: tfplugin6.FunctionError + (*AttributePath)(nil), // 7: tfplugin6.AttributePath + (*StopProvider)(nil), // 8: tfplugin6.StopProvider + (*RawState)(nil), // 9: tfplugin6.RawState + (*Schema)(nil), // 10: tfplugin6.Schema + (*Function)(nil), // 11: tfplugin6.Function + (*ServerCapabilities)(nil), // 12: tfplugin6.ServerCapabilities + (*GetMetadata)(nil), // 13: tfplugin6.GetMetadata + (*GetProviderSchema)(nil), // 14: tfplugin6.GetProviderSchema + (*ValidateProviderConfig)(nil), // 15: tfplugin6.ValidateProviderConfig + (*UpgradeResourceState)(nil), // 16: tfplugin6.UpgradeResourceState + (*ValidateResourceConfig)(nil), // 17: tfplugin6.ValidateResourceConfig + (*ValidateDataResourceConfig)(nil), // 18: tfplugin6.ValidateDataResourceConfig + (*ConfigureProvider)(nil), // 19: tfplugin6.ConfigureProvider + (*ReadResource)(nil), // 20: tfplugin6.ReadResource + (*PlanResourceChange)(nil), // 21: tfplugin6.PlanResourceChange + (*ApplyResourceChange)(nil), // 22: tfplugin6.ApplyResourceChange + (*ImportResourceState)(nil), // 23: tfplugin6.ImportResourceState + (*MoveResourceState)(nil), // 24: tfplugin6.MoveResourceState + (*ReadDataSource)(nil), // 25: tfplugin6.ReadDataSource + (*GetFunctions)(nil), // 26: tfplugin6.GetFunctions + (*CallFunction)(nil), // 27: tfplugin6.CallFunction + (*AttributePath_Step)(nil), // 28: tfplugin6.AttributePath.Step + (*StopProvider_Request)(nil), // 29: tfplugin6.StopProvider.Request + (*StopProvider_Response)(nil), // 30: tfplugin6.StopProvider.Response + nil, // 31: tfplugin6.RawState.FlatmapEntry + (*Schema_Block)(nil), // 32: tfplugin6.Schema.Block + (*Schema_Attribute)(nil), // 33: tfplugin6.Schema.Attribute + (*Schema_NestedBlock)(nil), // 34: tfplugin6.Schema.NestedBlock + (*Schema_Object)(nil), // 35: tfplugin6.Schema.Object + (*Function_Parameter)(nil), // 36: tfplugin6.Function.Parameter + (*Function_Return)(nil), // 37: tfplugin6.Function.Return + (*GetMetadata_Request)(nil), // 38: tfplugin6.GetMetadata.Request + (*GetMetadata_Response)(nil), // 39: tfplugin6.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 40: tfplugin6.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 41: tfplugin6.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 42: tfplugin6.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 43: tfplugin6.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 44: tfplugin6.GetProviderSchema.Response + nil, // 45: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 46: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 47: tfplugin6.GetProviderSchema.Response.FunctionsEntry + (*ValidateProviderConfig_Request)(nil), // 48: tfplugin6.ValidateProviderConfig.Request + (*ValidateProviderConfig_Response)(nil), // 49: tfplugin6.ValidateProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 50: tfplugin6.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 51: tfplugin6.UpgradeResourceState.Response + (*ValidateResourceConfig_Request)(nil), // 52: tfplugin6.ValidateResourceConfig.Request + (*ValidateResourceConfig_Response)(nil), // 53: tfplugin6.ValidateResourceConfig.Response + (*ValidateDataResourceConfig_Request)(nil), // 54: tfplugin6.ValidateDataResourceConfig.Request + (*ValidateDataResourceConfig_Response)(nil), // 55: tfplugin6.ValidateDataResourceConfig.Response + (*ConfigureProvider_Request)(nil), // 56: tfplugin6.ConfigureProvider.Request + (*ConfigureProvider_Response)(nil), // 57: tfplugin6.ConfigureProvider.Response + (*ReadResource_Request)(nil), // 58: tfplugin6.ReadResource.Request + (*ReadResource_Response)(nil), // 59: tfplugin6.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 60: tfplugin6.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 61: tfplugin6.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 62: tfplugin6.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 63: tfplugin6.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 64: tfplugin6.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 65: tfplugin6.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 66: tfplugin6.ImportResourceState.Response + (*MoveResourceState_Request)(nil), // 67: tfplugin6.MoveResourceState.Request + (*MoveResourceState_Response)(nil), // 68: tfplugin6.MoveResourceState.Response + (*ReadDataSource_Request)(nil), // 69: tfplugin6.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 70: tfplugin6.ReadDataSource.Response + (*GetFunctions_Request)(nil), // 71: tfplugin6.GetFunctions.Request + (*GetFunctions_Response)(nil), // 72: tfplugin6.GetFunctions.Response + nil, // 73: tfplugin6.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 74: tfplugin6.CallFunction.Request + (*CallFunction_Response)(nil), // 75: tfplugin6.CallFunction.Response +} +var file_tfplugin6_proto_depIdxs = []int32{ + 1, // 0: tfplugin6.Diagnostic.severity:type_name -> tfplugin6.Diagnostic.Severity + 7, // 1: tfplugin6.Diagnostic.attribute:type_name -> tfplugin6.AttributePath + 28, // 2: tfplugin6.AttributePath.steps:type_name -> tfplugin6.AttributePath.Step + 31, // 3: tfplugin6.RawState.flatmap:type_name -> tfplugin6.RawState.FlatmapEntry + 32, // 4: tfplugin6.Schema.block:type_name -> tfplugin6.Schema.Block + 36, // 5: tfplugin6.Function.parameters:type_name -> tfplugin6.Function.Parameter + 36, // 6: tfplugin6.Function.variadic_parameter:type_name -> tfplugin6.Function.Parameter + 37, // 7: tfplugin6.Function.return:type_name -> tfplugin6.Function.Return + 0, // 8: tfplugin6.Function.description_kind:type_name -> tfplugin6.StringKind + 33, // 9: tfplugin6.Schema.Block.attributes:type_name -> tfplugin6.Schema.Attribute + 34, // 10: tfplugin6.Schema.Block.block_types:type_name -> tfplugin6.Schema.NestedBlock + 0, // 11: tfplugin6.Schema.Block.description_kind:type_name -> tfplugin6.StringKind + 35, // 12: tfplugin6.Schema.Attribute.nested_type:type_name -> tfplugin6.Schema.Object + 0, // 13: tfplugin6.Schema.Attribute.description_kind:type_name -> tfplugin6.StringKind + 32, // 14: tfplugin6.Schema.NestedBlock.block:type_name -> tfplugin6.Schema.Block + 2, // 15: tfplugin6.Schema.NestedBlock.nesting:type_name -> tfplugin6.Schema.NestedBlock.NestingMode + 33, // 16: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute + 3, // 17: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode + 0, // 18: tfplugin6.Function.Parameter.description_kind:type_name -> tfplugin6.StringKind + 12, // 19: tfplugin6.GetMetadata.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 5, // 20: tfplugin6.GetMetadata.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 41, // 21: tfplugin6.GetMetadata.Response.data_sources:type_name -> tfplugin6.GetMetadata.DataSourceMetadata + 42, // 22: tfplugin6.GetMetadata.Response.resources:type_name -> tfplugin6.GetMetadata.ResourceMetadata + 40, // 23: tfplugin6.GetMetadata.Response.functions:type_name -> tfplugin6.GetMetadata.FunctionMetadata + 10, // 24: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema + 45, // 25: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + 46, // 26: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + 5, // 27: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 10, // 28: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema + 12, // 29: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 47, // 30: tfplugin6.GetProviderSchema.Response.functions:type_name -> tfplugin6.GetProviderSchema.Response.FunctionsEntry + 10, // 31: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema + 10, // 32: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema + 11, // 33: tfplugin6.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 4, // 34: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 35: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 9, // 36: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState + 4, // 37: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue + 5, // 38: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 39: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 40: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 41: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 42: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 43: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 44: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 45: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue + 4, // 46: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 47: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 48: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 49: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 50: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue + 4, // 51: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 52: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 53: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue + 7, // 54: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath + 5, // 55: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 56: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 57: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue + 4, // 58: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 59: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 60: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 61: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 62: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue + 65, // 63: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource + 5, // 64: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 9, // 65: tfplugin6.MoveResourceState.Request.source_state:type_name -> tfplugin6.RawState + 4, // 66: tfplugin6.MoveResourceState.Response.target_state:type_name -> tfplugin6.DynamicValue + 5, // 67: tfplugin6.MoveResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 68: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 69: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 70: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue + 5, // 71: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 73, // 72: tfplugin6.GetFunctions.Response.functions:type_name -> tfplugin6.GetFunctions.Response.FunctionsEntry + 5, // 73: tfplugin6.GetFunctions.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 11, // 74: tfplugin6.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 4, // 75: tfplugin6.CallFunction.Request.arguments:type_name -> tfplugin6.DynamicValue + 4, // 76: tfplugin6.CallFunction.Response.result:type_name -> tfplugin6.DynamicValue + 6, // 77: tfplugin6.CallFunction.Response.error:type_name -> tfplugin6.FunctionError + 38, // 78: tfplugin6.Provider.GetMetadata:input_type -> tfplugin6.GetMetadata.Request + 43, // 79: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request + 48, // 80: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request + 52, // 81: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request + 54, // 82: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request + 50, // 83: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request + 56, // 84: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request + 58, // 85: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request + 60, // 86: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request + 62, // 87: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request + 64, // 88: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request + 67, // 89: tfplugin6.Provider.MoveResourceState:input_type -> tfplugin6.MoveResourceState.Request + 69, // 90: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request + 71, // 91: tfplugin6.Provider.GetFunctions:input_type -> tfplugin6.GetFunctions.Request + 74, // 92: tfplugin6.Provider.CallFunction:input_type -> tfplugin6.CallFunction.Request + 29, // 93: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request + 39, // 94: tfplugin6.Provider.GetMetadata:output_type -> tfplugin6.GetMetadata.Response + 44, // 95: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response + 49, // 96: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response + 53, // 97: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response + 55, // 98: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response + 51, // 99: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response + 57, // 100: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response + 59, // 101: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response + 61, // 102: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response + 63, // 103: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response + 66, // 104: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response + 68, // 105: tfplugin6.Provider.MoveResourceState:output_type -> tfplugin6.MoveResourceState.Response + 70, // 106: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response + 72, // 107: tfplugin6.Provider.GetFunctions:output_type -> tfplugin6.GetFunctions.Response + 75, // 108: tfplugin6.Provider.CallFunction:output_type -> tfplugin6.CallFunction.Response + 30, // 109: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response + 94, // [94:110] is the sub-list for method output_type + 78, // [78:94] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name +} + +func init() { file_tfplugin6_proto_init() } +func file_tfplugin6_proto_init() { + if File_tfplugin6_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tfplugin6_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Diagnostic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FunctionError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RawState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerCapabilities); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataResourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_NestedBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Return); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_FunctionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_DataSourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_ResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProviderConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProviderConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataResourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataResourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureProvider_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureProvider_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_ImportedResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_tfplugin6_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_tfplugin6_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tfplugin6_proto_rawDesc, + NumEnums: 4, + NumMessages: 72, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_tfplugin6_proto_goTypes, + DependencyIndexes: file_tfplugin6_proto_depIdxs, + EnumInfos: file_tfplugin6_proto_enumTypes, + MessageInfos: file_tfplugin6_proto_msgTypes, + }.Build() + File_tfplugin6_proto = out.File + file_tfplugin6_proto_rawDesc = nil + file_tfplugin6_proto_goTypes = nil + file_tfplugin6_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto new file mode 100644 index 0000000000..097abf0cca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto @@ -0,0 +1,570 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 6.5 +// +// This file defines version 6.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 6 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// +syntax = "proto3"; +option go_package = "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6"; + +package tfplugin6; + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +message DynamicValue { + bytes msgpack = 1; + bytes json = 2; +} + +message Diagnostic { + enum Severity { + INVALID = 0; + ERROR = 1; + WARNING = 2; + } + Severity severity = 1; + string summary = 2; + string detail = 3; + AttributePath attribute = 4; +} + +message FunctionError { + string text = 1; + // The optional function_argument records the index position of the + // argument which caused the error. + optional int64 function_argument = 2; +} + +message AttributePath { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + string element_key_string = 2; + int64 element_key_int = 3; + } + } + repeated Step steps = 1; +} + +message StopProvider { + message Request { + } + message Response { + string Error = 1; + } +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +message RawState { + bytes json = 1; + map flatmap = 2; +} + +enum StringKind { + PLAIN = 0; + MARKDOWN = 1; +} + +// Schema is the configuration schema for a Resource or Provider. +message Schema { + message Block { + int64 version = 1; + repeated Attribute attributes = 2; + repeated NestedBlock block_types = 3; + string description = 4; + StringKind description_kind = 5; + bool deprecated = 6; + } + + message Attribute { + string name = 1; + bytes type = 2; + Object nested_type = 10; + string description = 3; + bool required = 4; + bool optional = 5; + bool computed = 6; + bool sensitive = 7; + StringKind description_kind = 8; + bool deprecated = 9; + } + + message NestedBlock { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + GROUP = 5; + } + + string type_name = 1; + Block block = 2; + NestingMode nesting = 3; + int64 min_items = 4; + int64 max_items = 5; + } + + message Object { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + } + + repeated Attribute attributes = 1; + NestingMode nesting = 3; + + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + int64 min_items = 4 [deprecated = true]; + int64 max_items = 5 [deprecated = true]; + } + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + int64 version = 1; + + // Block is the top level configuration block for this schema. + Block block = 2; +} + +message Function { + // parameters is the ordered list of positional function parameters. + repeated Parameter parameters = 1; + + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + Parameter variadic_parameter = 2; + + // return is the function result. + Return return = 3; + + // summary is the human-readable shortened documentation for the function. + string summary = 4; + + // description is human-readable documentation for the function. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + + // deprecation_message is human-readable documentation if the + // function is deprecated. + string deprecation_message = 7; + + message Parameter { + // name is the human-readable display name for the parameter. + string name = 1; + + // type is the type constraint for the parameter. + bytes type = 2; + + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + bool allow_null_value = 3; + + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + bool allow_unknown_values = 4; + + // description is human-readable documentation for the parameter. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + } + + message Return { + // type is the type constraint for the function result. + bytes type = 1; + } +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +message ServerCapabilities { + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + bool plan_destroy = 1; + + // The get_provider_schema_optional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + bool get_provider_schema_optional = 2; + + // The move_resource_state capability signals that a provider supports the + // MoveResourceState RPC. + bool move_resource_state = 3; +} + +service Provider { + //////// Information about what a provider supports/expects + + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response); + + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + rpc GetProviderSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); + rpc ValidateProviderConfig(ValidateProviderConfig.Request) returns (ValidateProviderConfig.Response); + rpc ValidateResourceConfig(ValidateResourceConfig.Request) returns (ValidateResourceConfig.Response); + rpc ValidateDataResourceConfig(ValidateDataResourceConfig.Request) returns (ValidateDataResourceConfig.Response); + rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); + + //////// One-time initialization, called before other functions below + rpc ConfigureProvider(ConfigureProvider.Request) returns (ConfigureProvider.Response); + + //////// Managed Resource Lifecycle + rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); + rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); + rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); + rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); + rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response); + rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + + // Functions + + // GetFunctions returns the definitions of all functions. + rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response); + + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + rpc CallFunction(CallFunction.Request) returns (CallFunction.Response); + + //////// Graceful Shutdown + rpc StopProvider(StopProvider.Request) returns (StopProvider.Response); +} + +message GetMetadata { + message Request { + } + + message Response { + ServerCapabilities server_capabilities = 1; + repeated Diagnostic diagnostics = 2; + repeated DataSourceMetadata data_sources = 3; + repeated ResourceMetadata resources = 4; + + // functions returns metadata for any functions. + repeated FunctionMetadata functions = 5; + } + + message FunctionMetadata { + // name is the function name. + string name = 1; + } + + message DataSourceMetadata { + string type_name = 1; + } + + message ResourceMetadata { + string type_name = 1; + } +} + +message GetProviderSchema { + message Request { + } + message Response { + Schema provider = 1; + map resource_schemas = 2; + map data_source_schemas = 3; + repeated Diagnostic diagnostics = 4; + Schema provider_meta = 5; + ServerCapabilities server_capabilities = 6; + + // functions is a mapping of function names to definitions. + map functions = 7; + } +} + +message ValidateProviderConfig { + message Request { + DynamicValue config = 1; + } + message Response { + repeated Diagnostic diagnostics = 2; + } +} + +message UpgradeResourceState { + // Request is the message that is sent to the provider during the + // UpgradeResourceState RPC. + // + // This message intentionally does not include configuration data as any + // configuration-based or configuration-conditional changes should occur + // during the PlanResourceChange RPC. Additionally, the configuration is + // not guaranteed to exist (in the case of resource destruction), be wholly + // known, nor match the given prior state, which could lead to unexpected + // provider behaviors for practitioners. + message Request { + string type_name = 1; + + // version is the schema_version number recorded in the state file + int64 version = 2; + + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState raw_state = 3; + } + message Response { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + DynamicValue upgraded_state = 1; + + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateResourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ValidateDataResourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ConfigureProvider { + message Request { + string terraform_version = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ReadResource { + // Request is the message that is sent to the provider during the + // ReadResource RPC. + // + // This message intentionally does not include configuration data as any + // configuration-based or configuration-conditional changes should occur + // during the PlanResourceChange RPC. Additionally, the configuration is + // not guaranteed to be wholly known nor match the given prior state, which + // could lead to unexpected provider behaviors for practitioners. + message Request { + string type_name = 1; + DynamicValue current_state = 2; + bytes private = 3; + DynamicValue provider_meta = 4; + } + message Response { + DynamicValue new_state = 1; + repeated Diagnostic diagnostics = 2; + bytes private = 3; + } +} + +message PlanResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue proposed_new_state = 3; + DynamicValue config = 4; + bytes prior_private = 5; + DynamicValue provider_meta = 6; + } + + message Response { + DynamicValue planned_state = 1; + repeated AttributePath requires_replace = 2; + bytes planned_private = 3; + repeated Diagnostic diagnostics = 4; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; + } +} + +message ApplyResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue planned_state = 3; + DynamicValue config = 4; + bytes planned_private = 5; + DynamicValue provider_meta = 6; + } + message Response { + DynamicValue new_state = 1; + bytes private = 2; + repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; + } +} + +message ImportResourceState { + message Request { + string type_name = 1; + string id = 2; + } + + message ImportedResource { + string type_name = 1; + DynamicValue state = 2; + bytes private = 3; + } + + message Response { + repeated ImportedResource imported_resources = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message MoveResourceState { + message Request { + // The address of the provider the resource is being moved from. + string source_provider_address = 1; + + // The resource type that the resource is being moved from. + string source_type_name = 2; + + // The schema version of the resource type that the resource is being + // moved from. + int64 source_schema_version = 3; + + // The raw state of the resource being moved. Only the json field is + // populated, as there should be no legacy providers using the flatmap + // format that support newly introduced RPCs. + RawState source_state = 4; + + // The resource type that the resource is being moved to. + string target_type_name = 5; + + // The private state of the resource being moved. + bytes source_private = 6; + } + + message Response { + // The state of the resource after it has been moved. + DynamicValue target_state = 1; + + // Any diagnostics that occurred during the move. + repeated Diagnostic diagnostics = 2; + + // The private state of the resource after it has been moved. + bytes target_private = 3; + } +} + +message ReadDataSource { + message Request { + string type_name = 1; + DynamicValue config = 2; + DynamicValue provider_meta = 3; + } + message Response { + DynamicValue state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message GetFunctions { + message Request {} + + message Response { + // functions is a mapping of function names to definitions. + map functions = 1; + + // diagnostics is any warnings or errors. + repeated Diagnostic diagnostics = 2; + } +} + +message CallFunction { + message Request { + // name is the name of the function being called. + string name = 1; + + // arguments is the data of each function argument value. + repeated DynamicValue arguments = 2; + } + + message Response { + // result is result value after running the function logic. + DynamicValue result = 1; + + // error is any errors from the function logic. + FunctionError error = 2; + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go new file mode 100644 index 0000000000..3ae64b469f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go @@ -0,0 +1,712 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 6.5 +// +// This file defines version 6.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 6 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: tfplugin6.proto + +package tfplugin6 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Provider_GetMetadata_FullMethodName = "/tfplugin6.Provider/GetMetadata" + Provider_GetProviderSchema_FullMethodName = "/tfplugin6.Provider/GetProviderSchema" + Provider_ValidateProviderConfig_FullMethodName = "/tfplugin6.Provider/ValidateProviderConfig" + Provider_ValidateResourceConfig_FullMethodName = "/tfplugin6.Provider/ValidateResourceConfig" + Provider_ValidateDataResourceConfig_FullMethodName = "/tfplugin6.Provider/ValidateDataResourceConfig" + Provider_UpgradeResourceState_FullMethodName = "/tfplugin6.Provider/UpgradeResourceState" + Provider_ConfigureProvider_FullMethodName = "/tfplugin6.Provider/ConfigureProvider" + Provider_ReadResource_FullMethodName = "/tfplugin6.Provider/ReadResource" + Provider_PlanResourceChange_FullMethodName = "/tfplugin6.Provider/PlanResourceChange" + Provider_ApplyResourceChange_FullMethodName = "/tfplugin6.Provider/ApplyResourceChange" + Provider_ImportResourceState_FullMethodName = "/tfplugin6.Provider/ImportResourceState" + Provider_MoveResourceState_FullMethodName = "/tfplugin6.Provider/MoveResourceState" + Provider_ReadDataSource_FullMethodName = "/tfplugin6.Provider/ReadDataSource" + Provider_GetFunctions_FullMethodName = "/tfplugin6.Provider/GetFunctions" + Provider_CallFunction_FullMethodName = "/tfplugin6.Provider/CallFunction" + Provider_StopProvider_FullMethodName = "/tfplugin6.Provider/StopProvider" +) + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProviderClient interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetProviderSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + ValidateProviderConfig(ctx context.Context, in *ValidateProviderConfig_Request, opts ...grpc.CallOption) (*ValidateProviderConfig_Response, error) + ValidateResourceConfig(ctx context.Context, in *ValidateResourceConfig_Request, opts ...grpc.CallOption) (*ValidateResourceConfig_Response, error) + ValidateDataResourceConfig(ctx context.Context, in *ValidateDataResourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataResourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + ConfigureProvider(ctx context.Context, in *ConfigureProvider_Request, opts ...grpc.CallOption) (*ConfigureProvider_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) + // ////// Graceful Shutdown + StopProvider(ctx context.Context, in *StopProvider_Request, opts ...grpc.CallOption) (*StopProvider_Response, error) +} + +type providerClient struct { + cc grpc.ClientConnInterface +} + +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) { + out := new(GetMetadata_Response) + err := c.cc.Invoke(ctx, Provider_GetMetadata_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetProviderSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, Provider_GetProviderSchema_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateProviderConfig(ctx context.Context, in *ValidateProviderConfig_Request, opts ...grpc.CallOption) (*ValidateProviderConfig_Response, error) { + out := new(ValidateProviderConfig_Response) + err := c.cc.Invoke(ctx, Provider_ValidateProviderConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceConfig(ctx context.Context, in *ValidateResourceConfig_Request, opts ...grpc.CallOption) (*ValidateResourceConfig_Response, error) { + out := new(ValidateResourceConfig_Response) + err := c.cc.Invoke(ctx, Provider_ValidateResourceConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataResourceConfig(ctx context.Context, in *ValidateDataResourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataResourceConfig_Response, error) { + out := new(ValidateDataResourceConfig_Response) + err := c.cc.Invoke(ctx, Provider_ValidateDataResourceConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, Provider_UpgradeResourceState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ConfigureProvider(ctx context.Context, in *ConfigureProvider_Request, opts ...grpc.CallOption) (*ConfigureProvider_Response, error) { + out := new(ConfigureProvider_Response) + err := c.cc.Invoke(ctx, Provider_ConfigureProvider_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, Provider_ReadResource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, Provider_PlanResourceChange_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, Provider_ApplyResourceChange_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, Provider_ImportResourceState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) { + out := new(MoveResourceState_Response) + err := c.cc.Invoke(ctx, Provider_MoveResourceState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, Provider_ReadDataSource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) { + out := new(GetFunctions_Response) + err := c.cc.Invoke(ctx, Provider_GetFunctions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) { + out := new(CallFunction_Response) + err := c.cc.Invoke(ctx, Provider_CallFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) StopProvider(ctx context.Context, in *StopProvider_Request, opts ...grpc.CallOption) (*StopProvider_Response, error) { + out := new(StopProvider_Response) + err := c.cc.Invoke(ctx, Provider_StopProvider_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +// All implementations must embed UnimplementedProviderServer +// for forward compatibility +type ProviderServer interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetProviderSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + ValidateProviderConfig(context.Context, *ValidateProviderConfig_Request) (*ValidateProviderConfig_Response, error) + ValidateResourceConfig(context.Context, *ValidateResourceConfig_Request) (*ValidateResourceConfig_Response, error) + ValidateDataResourceConfig(context.Context, *ValidateDataResourceConfig_Request) (*ValidateDataResourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + ConfigureProvider(context.Context, *ConfigureProvider_Request) (*ConfigureProvider_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) + // ////// Graceful Shutdown + StopProvider(context.Context, *StopProvider_Request) (*StopProvider_Response, error) + mustEmbedUnimplementedProviderServer() +} + +// UnimplementedProviderServer must be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (UnimplementedProviderServer) GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") +} +func (UnimplementedProviderServer) GetProviderSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetProviderSchema not implemented") +} +func (UnimplementedProviderServer) ValidateProviderConfig(context.Context, *ValidateProviderConfig_Request) (*ValidateProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProviderConfig not implemented") +} +func (UnimplementedProviderServer) ValidateResourceConfig(context.Context, *ValidateResourceConfig_Request) (*ValidateResourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceConfig not implemented") +} +func (UnimplementedProviderServer) ValidateDataResourceConfig(context.Context, *ValidateDataResourceConfig_Request) (*ValidateDataResourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataResourceConfig not implemented") +} +func (UnimplementedProviderServer) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (UnimplementedProviderServer) ConfigureProvider(context.Context, *ConfigureProvider_Request) (*ConfigureProvider_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigureProvider not implemented") +} +func (UnimplementedProviderServer) ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (UnimplementedProviderServer) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (UnimplementedProviderServer) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (UnimplementedProviderServer) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (UnimplementedProviderServer) MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveResourceState not implemented") +} +func (UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (UnimplementedProviderServer) GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFunctions not implemented") +} +func (UnimplementedProviderServer) CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CallFunction not implemented") +} +func (UnimplementedProviderServer) StopProvider(context.Context, *StopProvider_Request) (*StopProvider_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopProvider not implemented") +} +func (UnimplementedProviderServer) mustEmbedUnimplementedProviderServer() {} + +// UnsafeProviderServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProviderServer will +// result in compilation errors. +type UnsafeProviderServer interface { + mustEmbedUnimplementedProviderServer() +} + +func RegisterProviderServer(s grpc.ServiceRegistrar, srv ProviderServer) { + s.RegisterService(&Provider_ServiceDesc, srv) +} + +func _Provider_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetadata_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetMetadata_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetMetadata(ctx, req.(*GetMetadata_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetProviderSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetProviderSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetProviderSchema_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetProviderSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ValidateProviderConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateProviderConfig(ctx, req.(*ValidateProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ValidateResourceConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceConfig(ctx, req.(*ValidateResourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataResourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataResourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataResourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ValidateDataResourceConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataResourceConfig(ctx, req.(*ValidateDataResourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_UpgradeResourceState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ConfigureProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureProvider_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ConfigureProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ConfigureProvider_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ConfigureProvider(ctx, req.(*ConfigureProvider_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ReadResource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_PlanResourceChange_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ApplyResourceChange_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ImportResourceState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_MoveResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).MoveResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_MoveResourceState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).MoveResourceState(ctx, req.(*MoveResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_ReadDataSource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctions_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetFunctions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetFunctions(ctx, req.(*GetFunctions_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunction_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_CallFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).CallFunction(ctx, req.(*CallFunction_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_StopProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopProvider_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).StopProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_StopProvider_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).StopProvider(ctx, req.(*StopProvider_Request)) + } + return interceptor(ctx, in, info, handler) +} + +// Provider_ServiceDesc is the grpc.ServiceDesc for Provider service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Provider_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin6.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetadata", + Handler: _Provider_GetMetadata_Handler, + }, + { + MethodName: "GetProviderSchema", + Handler: _Provider_GetProviderSchema_Handler, + }, + { + MethodName: "ValidateProviderConfig", + Handler: _Provider_ValidateProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceConfig", + Handler: _Provider_ValidateResourceConfig_Handler, + }, + { + MethodName: "ValidateDataResourceConfig", + Handler: _Provider_ValidateDataResourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "ConfigureProvider", + Handler: _Provider_ConfigureProvider_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "MoveResourceState", + Handler: _Provider_MoveResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "GetFunctions", + Handler: _Provider_GetFunctions_Handler, + }, + { + MethodName: "CallFunction", + Handler: _Provider_CallFunction_Handler, + }, + { + MethodName: "StopProvider", + Handler: _Provider_StopProvider_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin6.proto", +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/attribute_path.go new file mode 100644 index 0000000000..188973362d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/attribute_path.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func AttributePath(in *tftypes.AttributePath) *tfplugin6.AttributePath { + if in == nil { + return nil + } + + resp := &tfplugin6.AttributePath{ + Steps: AttributePath_Steps(in.Steps()), + } + + return resp +} + +func AttributePaths(in []*tftypes.AttributePath) []*tfplugin6.AttributePath { + resp := make([]*tfplugin6.AttributePath, 0, len(in)) + + for _, a := range in { + resp = append(resp, AttributePath(a)) + } + + return resp +} + +func AttributePath_Step(step tftypes.AttributePathStep) *tfplugin6.AttributePath_Step { + if step == nil { + return nil + } + + switch step := step.(type) { + case tftypes.AttributeName: + return &tfplugin6.AttributePath_Step{ + Selector: &tfplugin6.AttributePath_Step_AttributeName{ + AttributeName: string(step), + }, + } + case tftypes.ElementKeyInt: + return &tfplugin6.AttributePath_Step{ + Selector: &tfplugin6.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: int64(step), + }, + } + case tftypes.ElementKeyString: + return &tfplugin6.AttributePath_Step{ + Selector: &tfplugin6.AttributePath_Step_ElementKeyString{ + ElementKeyString: string(step), + }, + } + case tftypes.ElementKeyValue: + // The protocol has no equivalent of an ElementKeyValue, so this + // returns nil for the step to signal a step we cannot convey back + // to Terraform. + return nil + } + + // It is not currently possible to create tftypes.AttributePathStep + // implementations outside the tftypes package and these implementations + // should rarely change, if ever, since they are critical to how + // Terraform understands attribute paths. If this panic was reached, it + // implies that a new step type was introduced and needs to be + // implemented as a new case above or that this logic needs to be + // otherwise changed to handle some new attribute path system. + panic(fmt.Sprintf("unimplemented tftypes.AttributePathStep type: %T", step)) +} + +func AttributePath_Steps(in []tftypes.AttributePathStep) []*tfplugin6.AttributePath_Step { + resp := make([]*tfplugin6.AttributePath_Step, 0, len(in)) + + for _, step := range in { + s := AttributePath_Step(step) + + // In the face of a ElementKeyValue or missing step, Terraform has no + // way to represent the attribute path, so only return the prefix. + if s == nil { + return resp + } + + resp = append(resp, s) + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go new file mode 100644 index 0000000000..954272ab16 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/data_source.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func GetMetadata_DataSourceMetadata(in *tfprotov6.DataSourceMetadata) *tfplugin6.GetMetadata_DataSourceMetadata { + if in == nil { + return nil + } + + return &tfplugin6.GetMetadata_DataSourceMetadata{ + TypeName: in.TypeName, + } +} + +func ValidateDataResourceConfig_Response(in *tfprotov6.ValidateDataResourceConfigResponse) *tfplugin6.ValidateDataResourceConfig_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ValidateDataResourceConfig_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func ReadDataSource_Response(in *tfprotov6.ReadDataSourceResponse) *tfplugin6.ReadDataSource_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ReadDataSource_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + State: DynamicValue(in.State), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go new file mode 100644 index 0000000000..fa3ea736d6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func Diagnostic(in *tfprotov6.Diagnostic) *tfplugin6.Diagnostic { + if in == nil { + return nil + } + + resp := &tfplugin6.Diagnostic{ + Attribute: AttributePath(in.Attribute), + Detail: ForceValidUTF8(in.Detail), + Severity: Diagnostic_Severity(in.Severity), + Summary: ForceValidUTF8(in.Summary), + } + + return resp +} + +func Diagnostic_Severity(in tfprotov6.DiagnosticSeverity) tfplugin6.Diagnostic_Severity { + return tfplugin6.Diagnostic_Severity(in) +} + +func Diagnostics(in []*tfprotov6.Diagnostic) []*tfplugin6.Diagnostic { + resp := make([]*tfplugin6.Diagnostic, 0, len(in)) + + for _, diag := range in { + resp = append(resp, Diagnostic(diag)) + } + + return resp +} + +// ForceValidUTF8 returns a string guaranteed to be valid UTF-8 even if the +// input isn't, by replacing any invalid bytes with a valid UTF-8 encoding of +// the Unicode Replacement Character (\uFFFD). +// +// The protobuf serialization library will reject invalid UTF-8 with an +// unhelpful error message: +// +// string field contains invalid UTF-8 +// +// Passing a string result through this function makes invalid UTF-8 instead +// emerge as placeholder characters on the other side of the wire protocol, +// giving a better chance of still returning a partially-legible message +// instead of a generic character encoding error. +// +// This is intended for user-facing messages such as diagnostic summary and +// detail messages, where Terraform will just treat the value as opaque and +// it's ultimately up to the user and their terminal or web browser to +// interpret the result. Don't use this for strings that have machine-readable +// meaning. +func ForceValidUTF8(s string) string { + // Most strings that pass through here will already be valid UTF-8 and + // utf8.ValidString has a fast path which will beat our rune-by-rune + // analysis below, so it's worth the cost of walking the string twice + // in the rarer invalid case. + if utf8.ValidString(s) { + return s + } + + // If we get down here then we know there's at least one invalid UTF-8 + // sequence in the string, so in this slow path we'll reconstruct the + // string one rune at a time, guaranteeing that we'll only write valid + // UTF-8 sequences into the resulting buffer. + // + // Any invalid string will grow at least a little larger as a result of + // this operation because we'll be replacing each invalid byte with + // the three-byte sequence \xEF\xBF\xBD, which is the UTF-8 encoding of + // the replacement character \uFFFD. 9 is a magic number giving room for + // three such expansions without any further allocation. + ret := make([]byte, 0, len(s)+9) + for { + // If the first byte in s is not the start of a valid UTF-8 sequence + // then the following will return utf8.RuneError, 1, where + // utf8.RuneError is the unicode replacement character. + r, advance := utf8.DecodeRuneInString(s) + if advance == 0 { + break + } + s = s[advance:] + ret = utf8.AppendRune(ret, r) + } + return string(ret) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/doc.go new file mode 100644 index 0000000000..2f60cc4275 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package toproto converts terraform-plugin-go tfprotov6 types to Protocol +// Buffers generated tfplugin6 types. +package toproto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/dynamic_value.go new file mode 100644 index 0000000000..881be1814d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/dynamic_value.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func DynamicValue(in *tfprotov6.DynamicValue) *tfplugin6.DynamicValue { + if in == nil { + return nil + } + + resp := &tfplugin6.DynamicValue{ + Msgpack: in.MsgPack, + Json: in.JSON, + } + + return resp +} + +func CtyType(in tftypes.Type) []byte { + if in == nil { + return nil + } + + // MarshalJSON is always error safe. + // nolint:staticcheck // Intended first-party usage + resp, _ := in.MarshalJSON() + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go new file mode 100644 index 0000000000..8c4d73ebe8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func CallFunction_Response(in *tfprotov6.CallFunctionResponse) *tfplugin6.CallFunction_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.CallFunction_Response{ + Error: FunctionError(in.Error), + Result: DynamicValue(in.Result), + } + + return resp +} + +func Function(in *tfprotov6.Function) *tfplugin6.Function { + if in == nil { + return nil + } + + resp := &tfplugin6.Function{ + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + DeprecationMessage: in.DeprecationMessage, + Parameters: make([]*tfplugin6.Function_Parameter, 0, len(in.Parameters)), + Return: Function_Return(in.Return), + Summary: in.Summary, + VariadicParameter: Function_Parameter(in.VariadicParameter), + } + + for _, parameter := range in.Parameters { + resp.Parameters = append(resp.Parameters, Function_Parameter(parameter)) + } + + return resp +} + +func Function_Parameter(in *tfprotov6.FunctionParameter) *tfplugin6.Function_Parameter { + if in == nil { + return nil + } + + resp := &tfplugin6.Function_Parameter{ + AllowNullValue: in.AllowNullValue, + AllowUnknownValues: in.AllowUnknownValues, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Name: in.Name, + Type: CtyType(in.Type), + } + + return resp +} + +func Function_Return(in *tfprotov6.FunctionReturn) *tfplugin6.Function_Return { + if in == nil { + return nil + } + + resp := &tfplugin6.Function_Return{ + Type: CtyType(in.Type), + } + + return resp +} + +func GetFunctions_Response(in *tfprotov6.GetFunctionsResponse) *tfplugin6.GetFunctions_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.GetFunctions_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + Functions: make(map[string]*tfplugin6.Function, len(in.Functions)), + } + + for name, function := range in.Functions { + resp.Functions[name] = Function(function) + } + + return resp +} + +func GetMetadata_FunctionMetadata(in *tfprotov6.FunctionMetadata) *tfplugin6.GetMetadata_FunctionMetadata { + if in == nil { + return nil + } + + return &tfplugin6.GetMetadata_FunctionMetadata{ + Name: in.Name, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function_error.go new file mode 100644 index 0000000000..33f3a22317 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function_error.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func FunctionError(in *tfprotov6.FunctionError) *tfplugin6.FunctionError { + if in == nil { + return nil + } + + resp := &tfplugin6.FunctionError{ + FunctionArgument: in.FunctionArgument, + Text: ForceValidUTF8(in.Text), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go new file mode 100644 index 0000000000..7b283c9d47 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func GetMetadata_Response(in *tfprotov6.GetMetadataResponse) *tfplugin6.GetMetadata_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.GetMetadata_Response{ + DataSources: make([]*tfplugin6.GetMetadata_DataSourceMetadata, 0, len(in.DataSources)), + Diagnostics: Diagnostics(in.Diagnostics), + Functions: make([]*tfplugin6.GetMetadata_FunctionMetadata, 0, len(in.Functions)), + Resources: make([]*tfplugin6.GetMetadata_ResourceMetadata, 0, len(in.Resources)), + ServerCapabilities: ServerCapabilities(in.ServerCapabilities), + } + + for _, datasource := range in.DataSources { + resp.DataSources = append(resp.DataSources, GetMetadata_DataSourceMetadata(&datasource)) + } + + for _, function := range in.Functions { + resp.Functions = append(resp.Functions, GetMetadata_FunctionMetadata(&function)) + } + + for _, resource := range in.Resources { + resp.Resources = append(resp.Resources, GetMetadata_ResourceMetadata(&resource)) + } + + return resp +} + +func GetProviderSchema_Response(in *tfprotov6.GetProviderSchemaResponse) *tfplugin6.GetProviderSchema_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.GetProviderSchema_Response{ + DataSourceSchemas: make(map[string]*tfplugin6.Schema, len(in.DataSourceSchemas)), + Diagnostics: Diagnostics(in.Diagnostics), + Functions: make(map[string]*tfplugin6.Function, len(in.Functions)), + Provider: Schema(in.Provider), + ProviderMeta: Schema(in.ProviderMeta), + ResourceSchemas: make(map[string]*tfplugin6.Schema, len(in.ResourceSchemas)), + ServerCapabilities: ServerCapabilities(in.ServerCapabilities), + } + + for name, schema := range in.ResourceSchemas { + resp.ResourceSchemas[name] = Schema(schema) + } + + for name, schema := range in.DataSourceSchemas { + resp.DataSourceSchemas[name] = Schema(schema) + } + + for name, function := range in.Functions { + resp.Functions[name] = Function(function) + } + + return resp +} + +func ValidateProviderConfig_Response(in *tfprotov6.ValidateProviderConfigResponse) *tfplugin6.ValidateProviderConfig_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ValidateProviderConfig_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func ConfigureProvider_Response(in *tfprotov6.ConfigureProviderResponse) *tfplugin6.ConfigureProvider_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ConfigureProvider_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func StopProvider_Response(in *tfprotov6.StopProviderResponse) *tfplugin6.StopProvider_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.StopProvider_Response{ + Error: in.Error, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go new file mode 100644 index 0000000000..638504d7e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/resource.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func GetMetadata_ResourceMetadata(in *tfprotov6.ResourceMetadata) *tfplugin6.GetMetadata_ResourceMetadata { + if in == nil { + return nil + } + + resp := &tfplugin6.GetMetadata_ResourceMetadata{ + TypeName: in.TypeName, + } + + return resp +} + +func ValidateResourceConfig_Response(in *tfprotov6.ValidateResourceConfigResponse) *tfplugin6.ValidateResourceConfig_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ValidateResourceConfig_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + } + + return resp +} + +func UpgradeResourceState_Response(in *tfprotov6.UpgradeResourceStateResponse) *tfplugin6.UpgradeResourceState_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.UpgradeResourceState_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + UpgradedState: DynamicValue(in.UpgradedState), + } + + return resp +} + +func ReadResource_Response(in *tfprotov6.ReadResourceResponse) *tfplugin6.ReadResource_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ReadResource_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + NewState: DynamicValue(in.NewState), + Private: in.Private, + } + + return resp +} + +func PlanResourceChange_Response(in *tfprotov6.PlanResourceChangeResponse) *tfplugin6.PlanResourceChange_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.PlanResourceChange_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck + PlannedPrivate: in.PlannedPrivate, + PlannedState: DynamicValue(in.PlannedState), + RequiresReplace: AttributePaths(in.RequiresReplace), + } + + return resp +} + +func ApplyResourceChange_Response(in *tfprotov6.ApplyResourceChangeResponse) *tfplugin6.ApplyResourceChange_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ApplyResourceChange_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck + NewState: DynamicValue(in.NewState), + Private: in.Private, + } + + return resp +} + +func ImportResourceState_Response(in *tfprotov6.ImportResourceStateResponse) *tfplugin6.ImportResourceState_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.ImportResourceState_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + ImportedResources: ImportResourceState_ImportedResources(in.ImportedResources), + } + + return resp +} + +func ImportResourceState_ImportedResource(in *tfprotov6.ImportedResource) *tfplugin6.ImportResourceState_ImportedResource { + if in == nil { + return nil + } + + resp := &tfplugin6.ImportResourceState_ImportedResource{ + Private: in.Private, + State: DynamicValue(in.State), + TypeName: in.TypeName, + } + + return resp +} + +func ImportResourceState_ImportedResources(in []*tfprotov6.ImportedResource) []*tfplugin6.ImportResourceState_ImportedResource { + resp := make([]*tfplugin6.ImportResourceState_ImportedResource, 0, len(in)) + + for _, i := range in { + resp = append(resp, ImportResourceState_ImportedResource(i)) + } + + return resp +} + +func MoveResourceState_Response(in *tfprotov6.MoveResourceStateResponse) *tfplugin6.MoveResourceState_Response { + if in == nil { + return nil + } + + resp := &tfplugin6.MoveResourceState_Response{ + Diagnostics: Diagnostics(in.Diagnostics), + TargetPrivate: in.TargetPrivate, + TargetState: DynamicValue(in.TargetState), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go new file mode 100644 index 0000000000..fb46bd676d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/schema.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func Schema(in *tfprotov6.Schema) *tfplugin6.Schema { + if in == nil { + return nil + } + + resp := &tfplugin6.Schema{ + Block: Schema_Block(in.Block), + Version: in.Version, + } + + return resp +} + +func Schema_Block(in *tfprotov6.SchemaBlock) *tfplugin6.Schema_Block { + if in == nil { + return nil + } + + resp := &tfplugin6.Schema_Block{ + Attributes: Schema_Attributes(in.Attributes), + BlockTypes: Schema_NestedBlocks(in.BlockTypes), + Deprecated: in.Deprecated, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Version: in.Version, + } + + return resp +} + +func Schema_Attribute(in *tfprotov6.SchemaAttribute) *tfplugin6.Schema_Attribute { + if in == nil { + return nil + } + + resp := &tfplugin6.Schema_Attribute{ + Computed: in.Computed, + Deprecated: in.Deprecated, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Name: in.Name, + NestedType: Schema_Object(in.NestedType), + Optional: in.Optional, + Required: in.Required, + Sensitive: in.Sensitive, + Type: CtyType(in.Type), + } + + return resp +} + +func Schema_Attributes(in []*tfprotov6.SchemaAttribute) []*tfplugin6.Schema_Attribute { + resp := make([]*tfplugin6.Schema_Attribute, 0, len(in)) + + for _, a := range in { + resp = append(resp, Schema_Attribute(a)) + } + + return resp +} + +func Schema_NestedBlock(in *tfprotov6.SchemaNestedBlock) *tfplugin6.Schema_NestedBlock { + if in == nil { + return nil + } + + resp := &tfplugin6.Schema_NestedBlock{ + Block: Schema_Block(in.Block), + MaxItems: in.MaxItems, + MinItems: in.MinItems, + Nesting: Schema_NestedBlock_NestingMode(in.Nesting), + TypeName: in.TypeName, + } + + return resp +} + +func Schema_NestedBlocks(in []*tfprotov6.SchemaNestedBlock) []*tfplugin6.Schema_NestedBlock { + resp := make([]*tfplugin6.Schema_NestedBlock, 0, len(in)) + + for _, b := range in { + resp = append(resp, Schema_NestedBlock(b)) + } + + return resp +} + +func Schema_NestedBlock_NestingMode(in tfprotov6.SchemaNestedBlockNestingMode) tfplugin6.Schema_NestedBlock_NestingMode { + return tfplugin6.Schema_NestedBlock_NestingMode(in) +} + +func Schema_Object_NestingMode(in tfprotov6.SchemaObjectNestingMode) tfplugin6.Schema_Object_NestingMode { + return tfplugin6.Schema_Object_NestingMode(in) +} + +func Schema_Object(in *tfprotov6.SchemaObject) *tfplugin6.Schema_Object { + if in == nil { + return nil + } + + resp := &tfplugin6.Schema_Object{ + Attributes: Schema_Attributes(in.Attributes), + Nesting: Schema_Object_NestingMode(in.Nesting), + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/server_capabilities.go new file mode 100644 index 0000000000..82d21b2e9f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/server_capabilities.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func ServerCapabilities(in *tfprotov6.ServerCapabilities) *tfplugin6.ServerCapabilities { + if in == nil { + return nil + } + + resp := &tfplugin6.ServerCapabilities{ + GetProviderSchemaOptional: in.GetProviderSchemaOptional, + MoveResourceState: in.MoveResourceState, + PlanDestroy: in.PlanDestroy, + } + + return resp +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/string_kind.go new file mode 100644 index 0000000000..8e5036ee48 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/string_kind.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func StringKind(in tfprotov6.StringKind) tfplugin6.StringKind { + return tfplugin6.StringKind(in) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go new file mode 100644 index 0000000000..e1ea384de3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "context" +) + +// ProviderServer is an interface that reflects that Terraform protocol. +// Providers must implement this interface. +type ProviderServer interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + GetMetadata(context.Context, *GetMetadataRequest) (*GetMetadataResponse, error) + + // GetProviderSchema is called when Terraform needs to know what the + // provider's schema is, along with the schemas of all its resources + // and data sources. + GetProviderSchema(context.Context, *GetProviderSchemaRequest) (*GetProviderSchemaResponse, error) + + // ValidateProviderConfig is called to give a provider a chance to + // validate the configuration the user specified. + ValidateProviderConfig(context.Context, *ValidateProviderConfigRequest) (*ValidateProviderConfigResponse, error) + + // ConfigureProvider is called to pass the user-specified provider + // configuration to the provider. + ConfigureProvider(context.Context, *ConfigureProviderRequest) (*ConfigureProviderResponse, error) + + // StopProvider is called when Terraform would like providers to shut + // down as quickly as possible, and usually represents an interrupt. + StopProvider(context.Context, *StopProviderRequest) (*StopProviderResponse, error) + + // ResourceServer is an interface encapsulating all the + // resource-related RPC requests. ProviderServer implementations must + // implement them, but they are a handy interface for defining what a + // resource is to terraform-plugin-go, so they're their own interface + // that is composed into ProviderServer. + ResourceServer + + // DataSourceServer is an interface encapsulating all the data + // source-related RPC requests. ProviderServer implementations must + // implement them, but they are a handy interface for defining what a + // data source is to terraform-plugin-go, so they're their own + // interface that is composed into ProviderServer. + DataSourceServer + + // FunctionServer is an interface encapsulating all the function-related RPC + // requests. ProviderServer implementations must implement them, but they + // are a handy interface for defining what a function is to + // terraform-plugin-go, so they are their own interface that is composed + // into ProviderServer. + // + // This will be required in an upcoming release. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // FunctionServer +} + +// GetMetadataRequest represents a GetMetadata RPC request. +type GetMetadataRequest struct{} + +// GetMetadataResponse represents a GetMetadata RPC response. +type GetMetadataResponse struct { + // ServerCapabilities defines optionally supported protocol features, + // such as forward-compatible Terraform behavior changes. + ServerCapabilities *ServerCapabilities + + // Diagnostics report errors or warnings related to returning the + // provider's schemas. Returning an empty slice indicates success, with + // no errors or warnings generated. + Diagnostics []*Diagnostic + + // DataSources returns metadata for all data resources. + DataSources []DataSourceMetadata + + // Functions returns metadata for all functions. + Functions []FunctionMetadata + + // Resources returns metadata for all managed resources. + Resources []ResourceMetadata +} + +// GetProviderSchemaRequest represents a Terraform RPC request for the +// provider's schemas. +type GetProviderSchemaRequest struct{} + +// GetProviderSchemaResponse represents a Terraform RPC response containing the +// provider's schemas. +type GetProviderSchemaResponse struct { + // ServerCapabilities defines optionally supported protocol features, + // such as forward-compatible Terraform behavior changes. + ServerCapabilities *ServerCapabilities + + // Provider defines the schema for the provider configuration, which + // will be specified in the provider block of the user's configuration. + Provider *Schema + + // ProviderMeta defines the schema for the provider's metadata, which + // will be specified in the provider_meta blocks of the terraform block + // for a module. This is an advanced feature and its usage should be + // coordinated with the Terraform Core team by opening an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. + ProviderMeta *Schema + + // ResourceSchemas is a map of resource names to the schema for the + // configuration specified in the resource. The name should be a + // resource name, and should be prefixed with your provider's shortname + // and an underscore. It should match the first label after `resource` + // in a user's configuration. + ResourceSchemas map[string]*Schema + + // DataSourceSchemas is a map of data source names to the schema for + // the configuration specified in the data source. The name should be a + // data source name, and should be prefixed with your provider's + // shortname and an underscore. It should match the first label after + // `data` in a user's configuration. + DataSourceSchemas map[string]*Schema + + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function + + // Diagnostics report errors or warnings related to returning the + // provider's schemas. Returning an empty slice indicates success, with + // no errors or warnings generated. + Diagnostics []*Diagnostic +} + +// ValidateProviderConfigRequest represents a Terraform RPC request for the +// provider to modify the provider configuration in preparation for Terraform +// validating it. +type ValidateProviderConfigRequest struct { + // Config is the configuration the user supplied for the provider. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // The ValidateProviderConfig RPC call will be called twice; once when + // generating a plan, once when applying the plan. When called during + // plan, Config can contain unknown values if fields with unknown + // values are interpolated into it. At apply time, all fields will have + // known values. Values that are not set in the configuration will be + // null. + Config *DynamicValue +} + +// ValidateProviderConfigResponse represents a Terraform RPC response containing +// a modified provider configuration that Terraform can now validate and use. +type ValidateProviderConfigResponse struct { + // PreparedConfig should be set to the modified configuration. See the + // documentation on `DynamicValue` for information about safely + // creating the `DynamicValue`. + // + // This RPC call exists because early versions of the Terraform Plugin + // SDK allowed providers to set defaults for provider configurations in + // such a way that Terraform couldn't validate the provider config + // without retrieving the default values first. As providers using + // terraform-plugin-go directly and new frameworks built on top of it + // have no such requirement, it is safe and recommended to simply set + // PreparedConfig to the value of the PrepareProviderConfigRequest's + // Config property, indicating that no changes are needed to the + // configuration. + // + // The configuration should be represented as a tftypes.Object, with + // each attribute and nested block getting its own key and value. + // + // TODO: should we provide an implementation that does that that + // provider developers can just embed and not need to implement the + // method themselves, then? + PreparedConfig *DynamicValue + + // Diagnostics report errors or warnings related to preparing the + // provider's configuration. Returning an empty slice indicates + // success, with no errors or warnings generated. + Diagnostics []*Diagnostic +} + +// ConfigureProviderRequest represents a Terraform RPC request to supply the +// provider with information about what the user entered in the provider's +// configuration block. +type ConfigureProviderRequest struct { + // TerraformVersion is the version of Terraform executing the request. + // This is supplied for logging, analytics, and User-Agent purposes + // *only*. Providers should not try to gate provider behavior on + // Terraform versions. It will make you sad. We can't stop you from + // doing it, but we really highly recommend you do not do it. + TerraformVersion string + + // Config is the configuration the user supplied for the provider. This + // information should usually be persisted to the underlying type + // that's implementing the ProviderServer interface, for use in later + // RPC requests. See the documentation on `DynamicValue` for more + // information about safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // The ConfigureProvider RPC call will be called twice; once when + // generating a plan, once when applying the plan. When called during + // plan, Config can contain unknown values if fields with unknown + // values are interpolated into it. At apply time, all fields will have + // known values. Values that are not set in the configuration will be + // null. + Config *DynamicValue +} + +// ConfigureProviderResponse represents a Terraform RPC response to the +// configuration block that Terraform supplied for the provider. +type ConfigureProviderResponse struct { + // Diagnostics report errors or warnings related to the provider's + // configuration. Returning an empty slice indicates success, with no + // errors or warnings generated. + Diagnostics []*Diagnostic +} + +// StopProviderRequest represents a Terraform RPC request to interrupt a +// provider's work and terminate a provider's processes as soon as possible. +type StopProviderRequest struct{} + +// StopProviderResponse represents a Terraform RPC response surfacing an issues +// the provider encountered in terminating. +type StopProviderResponse struct { + // Error should be set to a string describing the error if the provider + // cannot currently shut down for some reason. Because this always + // represents a system error and not a user error, it is returned as a + // string, not a Diagnostic. + Error string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go new file mode 100644 index 0000000000..9344f8db82 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/resource.go @@ -0,0 +1,543 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ResourceMetadata describes metadata for a managed resource in the GetMetadata +// RPC. +type ResourceMetadata struct { + // TypeName is the name of the managed resource. + TypeName string +} + +// ResourceServer is an interface containing the methods a resource +// implementation needs to fill. +type ResourceServer interface { + // ValidateResourceConfig is called when Terraform is checking that + // a resource's configuration is valid. It is guaranteed to have types + // conforming to your schema. This is your opportunity to do custom or + // advanced validation prior to a plan being generated. + ValidateResourceConfig(context.Context, *ValidateResourceConfigRequest) (*ValidateResourceConfigResponse, error) + + // UpgradeResourceState is called when Terraform has encountered a + // resource with a state in a schema that doesn't match the schema's + // current version. It is the provider's responsibility to modify the + // state to upgrade it to the latest state schema. + UpgradeResourceState(context.Context, *UpgradeResourceStateRequest) (*UpgradeResourceStateResponse, error) + + // ReadResource is called when Terraform is refreshing a resource's + // state. + ReadResource(context.Context, *ReadResourceRequest) (*ReadResourceResponse, error) + + // PlanResourceChange is called when Terraform is attempting to + // calculate a plan for a resource. Terraform will suggest a proposed + // new state, which the provider can modify or return unmodified to + // influence Terraform's plan. + PlanResourceChange(context.Context, *PlanResourceChangeRequest) (*PlanResourceChangeResponse, error) + + // ApplyResourceChange is called when Terraform has detected a diff + // between the resource's state and the user's config, and the user has + // approved a planned change. The provider is to apply the changes + // contained in the plan, and return the resulting state. + ApplyResourceChange(context.Context, *ApplyResourceChangeRequest) (*ApplyResourceChangeResponse, error) + + // ImportResourceState is called when a user has requested Terraform + // import a resource. The provider should fetch the information + // specified by the passed ID and return it as one or more resource + // states for Terraform to assume control of. + ImportResourceState(context.Context, *ImportResourceStateRequest) (*ImportResourceStateResponse, error) +} + +// ResourceServerWithMoveResourceState is a temporary interface for servers +// to implement MoveResourceState RPC handling. +// +// Deprecated: The MoveResourceState method will be moved into the +// ResourceServer interface and this interface will be removed in a future +// version. +type ResourceServerWithMoveResourceState interface { + ResourceServer + + // MoveResourceState is called when Terraform is asked to change a resource + // type for an existing resource. The provider must accept the change as + // valid by ensuring the source resource type, schema version, and provider + // address are compatible to convert the source state into the target + // resource type and latest state version. + // + // This functionality is only supported in Terraform 1.8 and later. The + // provider must have enabled the MoveResourceState server capability to + // enable these requests. + MoveResourceState(context.Context, *MoveResourceStateRequest) (*MoveResourceStateResponse, error) +} + +// ValidateResourceConfigRequest is the request Terraform sends when it +// wants to validate a resource's configuration. +type ValidateResourceConfigRequest struct { + // TypeName is the type of resource Terraform is validating. + TypeName string + + // Config is the configuration the user supplied for that resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. Any attributes not directly + // set in the configuration will be null. + Config *DynamicValue +} + +// ValidateResourceConfigResponse is the response from the provider about +// the validity of a resource's configuration. +type ValidateResourceConfigResponse struct { + // Diagnostics report errors or warnings related to the given + // configuration. Returning an empty slice indicates a successful + // validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// UpgradeResourceStateRequest is the request Terraform sends when it needs a +// provider to upgrade the state of a given resource. +type UpgradeResourceStateRequest struct { + // TypeName is the type of resource that Terraform needs to upgrade the + // state for. + TypeName string + + // Version is the version of the state the resource currently has. + Version int64 + + // RawState is the state as Terraform sees it right now. See the + // documentation for `RawState` for information on how to work with the + // data it contains. + RawState *RawState +} + +// UpgradeResourceStateResponse is the response from the provider containing +// the upgraded state for the given resource. +type UpgradeResourceStateResponse struct { + // UpgradedState is the upgraded state for the resource, represented as + // a `DynamicValue`. See the documentation on `DynamicValue` for + // information about safely creating the `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + UpgradedState *DynamicValue + + // Diagnostics report errors or warnings related to upgrading the + // state of the requested resource. Returning an empty slice indicates + // a successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ReadResourceRequest is the request Terraform sends when it wants to get the +// latest state for a resource. +type ReadResourceRequest struct { + // TypeName is the type of resource Terraform is requesting an upated + // state for. + TypeName string + + // CurrentState is the current state of the resource as far as + // Terraform knows, represented as a `DynamicValue`. See the + // documentation for `DynamicValue` for information about safely + // accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + CurrentState *DynamicValue + + // Private is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the ReadResourceResponse type Private field. + Private []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ReadResourceResponse is the response from the provider about the current +// state of the requested resource. +type ReadResourceResponse struct { + // NewState is the current state of the resource according to the + // provider, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely creating the + // `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + NewState *DynamicValue + + // Diagnostics report errors or warnings related to retrieving the + // current state of the requested resource. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte +} + +// PlanResourceChangeRequest is the request Terraform sends when it is +// generating a plan for a resource and wants the provider's input on what the +// planned state should be. +type PlanResourceChangeRequest struct { + // TypeName is the type of resource Terraform is generating a plan for. + TypeName string + + // PriorState is the state of the resource before the plan is applied, + // represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PriorState *DynamicValue + + // ProposedNewState is the state that Terraform is proposing for the + // resource, with the changes in the configuration applied, represented + // as a `DynamicValue`. See the documentation for `DynamicValue` for + // information about safely accessing the state. + // + // The ProposedNewState merges any non-null values in the configuration + // with any computed attributes in PriorState as a utility to help + // providers avoid needing to implement such merging functionality + // themselves. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + // + // The ProposedNewState will be null when planning a delete operation. + ProposedNewState *DynamicValue + + // Config is the configuration the user supplied for the resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config *DynamicValue + + // PriorPrivate is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + // + // To ensure private state data is preserved, copy any necessary data to + // the PlanResourceChangeResponse type PlannedPrivate field. + PriorPrivate []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// PlanResourceChangeResponse is the response from the provider about what the +// planned state for a given resource should be. +type PlanResourceChangeResponse struct { + // PlannedState is the provider's indication of what the state for the + // resource should be after apply, represented as a `DynamicValue`. See + // the documentation for `DynamicValue` for information about safely + // creating the `DynamicValue`. + // + // This is usually derived from the ProposedNewState passed in the + // PlanResourceChangeRequest, with default values substituted for any + // null values and overriding any computed values that are expected to + // change as a result of the apply operation. This may contain unknown + // values if the value could change but its new value won't be known + // until apply time. + // + // Any value that was non-null in the configuration must either + // preserve the exact configuration value or return the corresponding + // value from the prior state. The value from the prior state should be + // returned when the configuration value is semantically equivalent to + // the state value. + // + // Any value that is marked as computed in the schema and is null in + // the configuration may be set by the provider to any value of the + // expected type. + // + // PlanResourceChange will actually be called twice; once when + // generating the plan for the user to approve, once during the apply. + // During the apply, additional values from the configuration--upstream + // values interpolated in that were computed at apply time--will be + // populated. During this second call, any attribute that had a known + // value in the first PlannedState must have an identical value in the + // second PlannedState. Any unknown values may remain unknown or may + // take on any value of the appropriate type. This means the values + // returned in PlannedState should be deterministic and unknown values + // should be used if a field's value may change depending on what value + // ends up filling an unknown value in the config. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + PlannedState *DynamicValue + + // RequiresReplace is a list of tftypes.AttributePaths that require the + // resource to be replaced. They should point to the specific field + // that changed that requires the resource to be destroyed and + // recreated. + RequiresReplace []*tftypes.AttributePath + + // PlannedPrivate should be set to any state that the provider would + // like sent with requests for this resource. This state will be + // associated with the resource, but will not be considered when + // calculating diffs. + // + // This private state data will be sent in the ApplyResourceChange RPC, in + // relation to the types of this package, the ApplyResourceChangeRequest + // type PlannedPrivate field. + PlannedPrivate []byte + + // Diagnostics report errors or warnings related to determining the + // planned state of the requested resource. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic + + // UnsafeToUseLegacyTypeSystem should only be set by + // hashicorp/terraform-plugin-sdk. It modifies Terraform's behavior to + // work with the legacy expectations of that SDK. + // + // Nobody else should use this. Ever. For any reason. Just don't do it. + // + // We have to expose it here for terraform-plugin-sdk to be muxable, or + // we wouldn't even be including it in this type. Don't use it. It may + // go away or change behavior on you with no warning. It is + // explicitly unsupported and not part of our SemVer guarantees. + // + // Deprecated: Really, just don't use this, you don't need it. + UnsafeToUseLegacyTypeSystem bool +} + +// ApplyResourceChangeRequest is the request Terraform sends when it needs to +// apply a planned set of changes to a resource. +type ApplyResourceChangeRequest struct { + // TypeName is the type of resource Terraform wants to change. + TypeName string + + // PriorState is the state of the resource before the changes are + // applied, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PriorState *DynamicValue + + // PlannedState is Terraform's plan for what the state should look like + // after the changes are applied, represented as a `DynamicValue`. See + // the documentation for `DynamicValue` for information about safely + // accessing the state. + // + // This is the PlannedState returned during PlanResourceChange. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PlannedState *DynamicValue + + // Config is the configuration the user supplied for the resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values. + Config *DynamicValue + + // PlannedPrivate is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + // + // This private state data is sourced from the PlanResourceChange RPC, in + // relation to the types in this package, the PlanResourceChangeResponse + // type PlannedPrivate field. + // + // To ensure private state data is preserved, copy any necessary data to + // the ApplyResourceChangeResponse type Private field. + PlannedPrivate []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ApplyResourceChangeResponse is the response from the provider about what the +// state of a resource is after planned changes have been applied. +type ApplyResourceChangeResponse struct { + // NewState is the provider's understanding of what the resource's + // state is after changes are applied, represented as a `DynamicValue`. + // See the documentation for `DynamicValue` for information about + // safely creating the `DynamicValue`. + // + // Any attribute, whether computed or not, that has a known value in + // the PlannedState in the ApplyResourceChangeRequest must be preserved + // exactly as it was in NewState. + // + // Any attribute in the PlannedState in the ApplyResourceChangeRequest + // that is unknown must take on a known value at this time. No unknown + // values are allowed in the NewState. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + NewState *DynamicValue + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte + + // Diagnostics report errors or warnings related to applying changes to + // the requested resource. Returning an empty slice indicates a + // successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic + + // UnsafeToUseLegacyTypeSystem should only be set by + // hashicorp/terraform-plugin-sdk. It modifies Terraform's behavior to + // work with the legacy expectations of that SDK. + // + // Nobody else should use this. Ever. For any reason. Just don't do it. + // + // We have to expose it here for terraform-plugin-sdk to be muxable, or + // we wouldn't even be including it in this type. Don't use it. It may + // go away or change behavior on you with no warning. It is + // explicitly unsupported and not part of our SemVer guarantees. + // + // Deprecated: Really, just don't use this, you don't need it. + UnsafeToUseLegacyTypeSystem bool +} + +// ImportResourceStateRequest is the request Terraform sends when it wants a +// provider to import one or more resources specified by an ID. +type ImportResourceStateRequest struct { + // TypeName is the type of resource Terraform wants to import. + TypeName string + + // ID is the user-supplied identifying information about the resource + // or resources. Providers decide and communicate to users the format + // for the ID, and use it to determine what resource or resources to + // import. + ID string +} + +// ImportResourceStateResponse is the response from the provider about the +// imported resources. +type ImportResourceStateResponse struct { + // ImportedResources are the resources the provider found and was able + // to import. + ImportedResources []*ImportedResource + + // Diagnostics report errors or warnings related to importing the + // requested resource or resources. Returning an empty slice indicates + // a successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ImportedResource represents a single resource that a provider has +// successfully imported into state. +type ImportedResource struct { + // TypeName is the type of resource that was imported. + TypeName string + + // State is the provider's understanding of the imported resource's + // state, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely creating the + // `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + State *DynamicValue + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte +} + +// MoveResourceStateRequest is the request Terraform sends when it requests a +// provider to move the state of a source resource into the target resource. +// Target resource types generally must opt into accepting each source resource +// type since any transformation logic requires knowledge of the source state. +// +// This functionality is only supported in Terraform 1.8 and later. The provider +// must have enabled the MoveResourceState server capability to enable these +// requests. +type MoveResourceStateRequest struct { + // SourcePrivate is the private state of the source resource. + SourcePrivate []byte + + // SourceProviderAddress is the address of the provider for the source + // resource type. + SourceProviderAddress string + + // SourceSchemaVersion is the version of the source resource state. + SourceSchemaVersion int64 + + // SourceState is the raw state of the source resource. + // + // Only the underlying JSON field is populated. + SourceState *RawState + + // SourceTypeName is the source resource type for the move request. + SourceTypeName string + + // TargetTypeName is the target resource type for the move request. + TargetTypeName string +} + +// MoveResourceStateResponse is the response from the provider containing +// the moved state for the given resource. +type MoveResourceStateResponse struct { + // TargetPrivate is the target resource private state after the move. + TargetPrivate []byte + + // TargetState is the target resource state after the move. + TargetState *DynamicValue + + // Diagnostics report any warnings or errors related to moving the state. + Diagnostics []*Diagnostic +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go new file mode 100644 index 0000000000..b368c620fb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/schema.go @@ -0,0 +1,443 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import "github.com/hashicorp/terraform-plugin-go/tftypes" + +const ( + // SchemaNestedBlockNestingModeInvalid indicates that the nesting mode + // for a nested block in the schema is invalid. This generally + // indicates a nested block that was created incorrectly. + SchemaNestedBlockNestingModeInvalid SchemaNestedBlockNestingMode = 0 + + // SchemaNestedBlockNestingModeSingle indicates that the nested block + // should be treated as a single block with no labels, and there should + // not be more than one of these blocks in the containing block. The + // block will appear in config and state values as a tftypes.Object. + SchemaNestedBlockNestingModeSingle SchemaNestedBlockNestingMode = 1 + + // SchemaNestedBlockNestingModeList indicates that multiple instances + // of the nested block should be permitted, with no labels, and that + // the instances of the block should appear in config and state values + // as a tftypes.List, with an ElementType of tftypes.Object. + SchemaNestedBlockNestingModeList SchemaNestedBlockNestingMode = 2 + + // SchemaNestedBlockNestingModeSet indicates that multiple instances + // of the nested block should be permitted, with no labels, and that + // the instances of the block should appear in config and state values + // as a tftypes.Set, with an ElementType of tftypes.Object. + SchemaNestedBlockNestingModeSet SchemaNestedBlockNestingMode = 3 + + // SchemaNestedBlockNestingModeMap indicates that multiple instances of + // the nested block should be permitted, each with a single label, and + // that they should be represented in state and config values as a + // tftypes.Map, with an ElementType of tftypes.Object. The labels on + // the blocks will be used as the map keys. It is an error, therefore, + // to use the same label value on multiple block instances. + SchemaNestedBlockNestingModeMap SchemaNestedBlockNestingMode = 4 + + // SchemaNestedBlockNestingModeGroup indicates that the nested block + // should be treated as a single block with no labels, and there should + // not be more than one of these blocks in the containing block. The + // block will appear in config and state values as a tftypes.Object. + // + // SchemaNestedBlockNestingModeGroup is distinct from + // SchemaNestedBlockNestingModeSingle in that it guarantees that the + // block will never be null. If it is omitted from a config, the block + // will still be set, but its attributes and nested blocks will all be + // null. This is an exception to the rule that any block not set in the + // configuration cannot be set in config by the provider; this ensures + // the block is always considered "set" in the configuration, and is + // therefore settable in state by the provider. + SchemaNestedBlockNestingModeGroup SchemaNestedBlockNestingMode = 5 + + // SchemaObjectNestingModeInvalid indicates that the nesting mode + // for a nested type in the schema is invalid. This generally + // indicates a nested type that was created incorrectly. + SchemaObjectNestingModeInvalid SchemaObjectNestingMode = 0 + + // SchemaObjectNestingModeSingle indicates that the nested type should + // be treated as a single object. The block will appear in config and state + // values as a tftypes.Object. + SchemaObjectNestingModeSingle SchemaObjectNestingMode = 1 + + // SchemaObjectNestingModeList indicates that multiple instances of the + // nested type should be permitted, and that the nested type should appear + // in config and state values as a tftypes.List, with an ElementType of + // tftypes.Object. + SchemaObjectNestingModeList SchemaObjectNestingMode = 2 + + // SchemaObjectNestingModeSet indicates that multiple instances of the + // nested type should be permitted, and that the nested type should appear in + // config and state values as a tftypes.Set, with an ElementType of + // tftypes.Object. + SchemaObjectNestingModeSet SchemaObjectNestingMode = 3 + + // SchemaObjectNestingModeMap indicates that multiple instances of the + // nested type should be permitted, and that they should be appear in state + // and config values as a tftypes.Map, with an ElementType of + // tftypes.Object. + SchemaObjectNestingModeMap SchemaObjectNestingMode = 4 +) + +// Schema is how Terraform defines the shape of data. It can be thought of as +// the type information for resources, data sources, provider configuration, +// and all the other data that Terraform sends to providers. It is how +// providers express their requirements for that data. +type Schema struct { + // Version indicates which version of the schema this is. Versions + // should be monotonically incrementing numbers. When Terraform + // encounters a resource stored in state with a schema version lower + // that the schema version the provider advertises for that resource, + // Terraform requests the provider upgrade the resource's state. + Version int64 + + // Block is the root level of the schema, the collection of attributes + // and blocks that make up a resource, data source, provider, or other + // configuration block. + Block *SchemaBlock +} + +// ValueType returns the tftypes.Type for a Schema. +// +// If Schema is missing, an empty Object is returned. +func (s *Schema) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + return s.Block.ValueType() +} + +// SchemaBlock represents a block in a schema. Blocks are how Terraform creates +// groupings of attributes. In configurations, they don't use the equals sign +// and use dynamic instead of list comprehensions. +// +// Blocks will show up in state and config Values as a tftypes.Object, with the +// attributes and nested blocks defining the tftypes.Object's AttributeTypes. +type SchemaBlock struct { + // TODO: why do we have version in the block, too? + Version int64 + + // Attributes are the attributes defined within the block. These are + // the fields that users can set using the equals sign or reference in + // interpolations. + Attributes []*SchemaAttribute + + // BlockTypes are the nested blocks within the block. These are used to + // have blocks within blocks. + BlockTypes []*SchemaNestedBlock + + // Description offers an end-user friendly description of what the + // block is for. This will be surfaced to users through editor + // integrations, documentation generation, and other settings. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Deprecated, when set to true, indicates that a block should no + // longer be used and users should migrate away from it. At the moment + // it is unused and will have no impact, but it will be used in future + // tooling that is powered by provider schemas to enable richer user + // experiences. Providers should set it when deprecating blocks in + // preparation for these tools. + Deprecated bool +} + +// ValueType returns the tftypes.Type for a SchemaBlock. +// +// If SchemaBlock is missing, an empty Object is returned. +func (s *SchemaBlock) ValueType() tftypes.Type { + if s == nil { + return tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{}, + } + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + for _, block := range s.BlockTypes { + if block == nil { + continue + } + + blockType := block.ValueType() + + if blockType == nil { + continue + } + + attributeTypes[block.TypeName] = blockType + } + + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + +// SchemaAttribute represents a single attribute within a schema block. +// Attributes are the fields users can set in configuration using the equals +// sign, can assign to variables, can interpolate, and can use list +// comprehensions on. +type SchemaAttribute struct { + // Name is the name of the attribute. This is what the user will put + // before the equals sign to assign a value to this attribute. + Name string + + // Type indicates the type of data the attribute expects. See the + // documentation for the tftypes package for information on what types + // are supported and their behaviors. + Type tftypes.Type + + // NestedType indicates that this is a NestedBlock-style object masquerading + // as an attribute. This field conflicts with Type. + NestedType *SchemaObject + + // Description offers an end-user friendly description of what the + // attribute is for. This will be surfaced to users through editor + // integrations, documentation generation, and other settings. + Description string + + // Required, when set to true, indicates that this attribute must have + // a value assigned to it by the user or Terraform will throw an error. + Required bool + + // Optional, when set to true, indicates that the user does not need to + // supply a value for this attribute, but may. + Optional bool + + // Computed, when set to true, indicates the the provider will supply a + // value for this field. If Optional and Required are false and + // Computed is true, the user will not be able to specify a value for + // this field without Terraform throwing an error. If Optional is true + // and Computed is true, the user can specify a value for this field, + // but the provider may supply a value if the user does not. It is + // always a violation of Terraform's protocol to substitute a value for + // what the user entered, even if Computed is true. + Computed bool + + // Sensitive, when set to true, indicates that the contents of this + // attribute should be considered sensitive and not included in output. + // This does not encrypt or otherwise protect these values in state, it + // only offers protection from them showing up in plans or other + // output. + Sensitive bool + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Deprecated, when set to true, indicates that a attribute should no + // longer be used and users should migrate away from it. At the moment + // it is unused and will have no impact, but it will be used in future + // tooling that is powered by provider schemas to enable richer user + // experiences. Providers should set it when deprecating attributes in + // preparation for these tools. + Deprecated bool +} + +// ValueType returns the tftypes.Type for a SchemaAttribute. +// +// If SchemaAttribute is missing, nil is returned. +func (s *SchemaAttribute) ValueType() tftypes.Type { + if s == nil { + return nil + } + + // It is not valid to set both NestedType and Type. + if s.NestedType != nil { + return s.NestedType.ValueType() + } + + return s.Type +} + +// SchemaNestedBlock is a nested block within another block. See SchemaBlock +// for more information on blocks. +type SchemaNestedBlock struct { + // TypeName is the name of the block. It is what the user will specify + // when using the block in configuration. + TypeName string + + // Block is the block being nested inside another block. See the + // SchemaBlock documentation for more information on blocks. + Block *SchemaBlock + + // Nesting is the kind of nesting the block is using. Different nesting + // modes have different behaviors and imply different kinds of data. + Nesting SchemaNestedBlockNestingMode + + // MinItems is the minimum number of instances of this block that a + // user must specify or Terraform will return an error. + // + // MinItems can only be set for SchemaNestedBlockNestingModeList and + // SchemaNestedBlockNestingModeSet. SchemaNestedBlockNestingModeSingle + // can also set MinItems and MaxItems both to 1 to indicate that the + // block is required to be set. All other SchemaNestedBlockNestingModes + // must leave MinItems set to 0. + MinItems int64 + + // MaxItems is the maximum number of instances of this block that a + // user may specify before Terraform returns an error. + // + // MaxItems can only be set for SchemaNestedBlockNestingModeList and + // SchemaNestedBlockNestingModeSet. SchemaNestedBlockNestingModeSingle + // can also set MinItems and MaxItems both to 1 to indicate that the + // block is required to be set. All other SchemaNestedBlockNestingModes + // must leave MaxItems set to 0. + MaxItems int64 +} + +// ValueType returns the tftypes.Type for a SchemaNestedBlock. +// +// If SchemaNestedBlock is missing or the Nesting mode is invalid, nil is +// returned. +func (s *SchemaNestedBlock) ValueType() tftypes.Type { + if s == nil { + return nil + } + + blockType := s.Block.ValueType() + + switch s.Nesting { + case SchemaNestedBlockNestingModeGroup: + return blockType + case SchemaNestedBlockNestingModeList: + return tftypes.List{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeMap: + return tftypes.Map{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSet: + return tftypes.Set{ + ElementType: blockType, + } + case SchemaNestedBlockNestingModeSingle: + return blockType + default: + return nil + } +} + +// SchemaNestedBlockNestingMode indicates the nesting mode for +// SchemaNestedBlocks. The nesting mode determines the number of instances of +// the block allowed, how many labels the block expects, and the data structure +// used for the block in config and state values. +type SchemaNestedBlockNestingMode int32 + +func (s SchemaNestedBlockNestingMode) String() string { + switch s { + case 0: + return "INVALID" + case 1: + return "SINGLE" + case 2: + return "LIST" + case 3: + return "SET" + case 4: + return "MAP" + case 5: + return "GROUP" + } + return "UNKNOWN" +} + +// SchemaObject represents a nested-block-stype object in an Attribute. +type SchemaObject struct { + // Attributes are the attributes defined within the Object. + Attributes []*SchemaAttribute + + Nesting SchemaObjectNestingMode +} + +// ValueType returns the tftypes.Type for a SchemaObject. +// +// If SchemaObject is missing or the Nesting mode is invalid, nil is returned. +func (s *SchemaObject) ValueType() tftypes.Type { + if s == nil { + return nil + } + + attributeTypes := map[string]tftypes.Type{} + + for _, attribute := range s.Attributes { + if attribute == nil { + continue + } + + attributeType := attribute.ValueType() + + if attributeType == nil { + continue + } + + attributeTypes[attribute.Name] = attributeType + } + + objectType := tftypes.Object{ + AttributeTypes: attributeTypes, + } + + switch s.Nesting { + case SchemaObjectNestingModeList: + return tftypes.List{ + ElementType: objectType, + } + case SchemaObjectNestingModeMap: + return tftypes.Map{ + ElementType: objectType, + } + case SchemaObjectNestingModeSet: + return tftypes.Set{ + ElementType: objectType, + } + case SchemaObjectNestingModeSingle: + return objectType + default: + return nil + } +} + +// SchemaObjectNestingMode indicates the nesting mode for +// SchemaNestedBlocks. The nesting mode determines the number of instances of +// the nested type allowed and the data structure used for the block in config +// and state values. +type SchemaObjectNestingMode int32 + +func (s SchemaObjectNestingMode) String() string { + switch s { + case 0: + return "INVALID" + case 1: + return "SINGLE" + case 2: + return "LIST" + case 3: + return "SET" + case 4: + return "MAP" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/server_capabilities.go new file mode 100644 index 0000000000..959899ced5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/server_capabilities.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +// ServerCapabilities allows providers to communicate optionally supported +// protocol features, such as forward-compatible Terraform behavior changes. +// +// This information is used in GetProviderSchemaResponse as capabilities are +// static features which must be known upfront in the provider server. +type ServerCapabilities struct { + // GetProviderSchemaOptional signals that this provider does not require + // having the GetProviderSchema RPC called first to operate normally. This + // means the caller can use a cached copy of the provider's schema instead. + GetProviderSchemaOptional bool + + // MoveResourceState signals that a provider supports the MoveResourceState + // RPC. + MoveResourceState bool + + // PlanDestroy signals that a provider expects a call to + // PlanResourceChange when a resource is going to be destroyed. This is + // opt-in to prevent unexpected errors or panics since the + // ProposedNewState in PlanResourceChangeRequest will be a null value. + PlanDestroy bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/state.go new file mode 100644 index 0000000000..5f7a14518b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/state.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ErrUnknownRawStateType is returned when a RawState has no Flatmap or JSON +// bytes set. This should never be returned during the normal operation of a +// provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `RawState` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownRawStateType = errors.New("RawState had no JSON or flatmap data set") + +// RawState is the raw, undecoded state for providers to upgrade. It is +// undecoded as Terraform, for whatever reason, doesn't have the previous +// schema available to it, and so cannot decode the state itself and pushes +// that responsibility off onto providers. +// +// It is safe to assume that Flatmap can be ignored for any state written by +// Terraform 0.12.0 or higher, but it is not safe to assume that all states +// written by 0.12.0 or higher will be in JSON format; future versions may +// switch to an alternate encoding for states. +type RawState struct { + JSON []byte + Flatmap map[string]string +} + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the RawState in an easy-to-interact-with way. It is the +// main purpose of the RawState type, and is how provider developers should +// obtain state values from the UpgradeResourceState RPC call. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same. Objects and maps all look the same, as well, as do +// user-specified values when DynamicPseudoType is used in the schema. +// Fortunately, the provider should already know the type; it should be the +// type of the schema, or DynamicPseudoType if that's what's in the schema. +// `Unmarshal` will then parse the value as though it belongs to that type, if +// possible, and return a `tftypes.Value` with the appropriate information. If +// the data can't be interpreted as that type, an error will be returned saying +// so. In these cases, double check to make sure the schema is declaring the +// same type being passed into `Unmarshal`. +// +// In the event an ErrUnknownRawStateType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `RawState` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `RawState`s received +// from RPC requests. +// +// State files written before Terraform 0.12 that haven't been upgraded yet +// cannot be unmarshaled, and must have their Flatmap property read directly. +func (s RawState) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if s.JSON != nil { + return tftypes.ValueFromJSON(s.JSON, typ) //nolint:staticcheck + } + if s.Flatmap != nil { + return tftypes.Value{}, fmt.Errorf("flatmap states cannot be unmarshaled, only states written by Terraform 0.12 and higher can be unmarshaled") + } + return tftypes.Value{}, ErrUnknownRawStateType +} + +// UnmarshalOpts contains options that can be used to modify the behaviour when +// unmarshalling. Currently, this only contains a struct for opts for JSON but +// could have a field for Flatmap in the future. +type UnmarshalOpts struct { + ValueFromJSONOpts tftypes.ValueFromJSONOpts +} + +// UnmarshalWithOpts is identical to Unmarshal but also accepts a tftypes.UnmarshalOpts which contains +// options that can be used to modify the behaviour when unmarshalling JSON or Flatmap. +func (s RawState) UnmarshalWithOpts(typ tftypes.Type, opts UnmarshalOpts) (tftypes.Value, error) { + if s.JSON != nil { + return tftypes.ValueFromJSONWithOpts(s.JSON, typ, opts.ValueFromJSONOpts) //nolint:staticcheck + } + if s.Flatmap != nil { + return tftypes.Value{}, fmt.Errorf("flatmap states cannot be unmarshaled, only states written by Terraform 0.12 and higher can be unmarshaled") + } + return tftypes.Value{}, ErrUnknownRawStateType +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/string_kind.go new file mode 100644 index 0000000000..7f0d9131b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/string_kind.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +const ( + // StringKindPlain indicates a string is plaintext, and should be + // interpreted as having no formatting information. + StringKindPlain StringKind = 0 + + // StringKindMarkdown indicates a string is markdown-formatted, and + // should be rendered using a Markdown renderer to correctly display + // its formatting. + StringKindMarkdown StringKind = 1 +) + +// StringKind indicates a formatting or encoding scheme for a string. +type StringKind int32 + +func (s StringKind) String() string { + switch s { + case 0: + return "PLAIN" + case 1: + return "MARKDOWN" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/doc.go new file mode 100644 index 0000000000..5501d70d71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tf6server implements a server implementation to run +// tfprotov6.ProviderServers as gRPC servers. +// +// Providers will likely be calling tf6server.Serve from their main function to +// start the server so Terraform can connect to it. +package tf6server diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/plugin.go new file mode 100644 index 0000000000..c5d24ffdb4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/plugin.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6server + +import ( + "context" + "errors" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin is an implementation of the +// github.com/hashicorp/go-plugin#Plugin and +// github.com/hashicorp/go-plugin#GRPCPlugin interfaces, indicating how to +// serve tfprotov6.ProviderServers as gRPC plugins for go-plugin. +type GRPCProviderPlugin struct { + GRPCProvider func() tfprotov6.ProviderServer + Opts []ServeOpt + Name string +} + +// Server always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// Client always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// GRPCClient always returns an error; we're only implementing the server half +// of the interface. +func (p *GRPCProviderPlugin) GRPCClient(context.Context, *plugin.GRPCBroker, *grpc.ClientConn) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// GRPCServer registers the gRPC provider server with the gRPC server that +// go-plugin is standing up. +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + tfplugin6.RegisterProviderServer(s, New(p.Name, p.GRPCProvider(), p.Opts...)) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go new file mode 100644 index 0000000000..e8b5eb4dae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go @@ -0,0 +1,1043 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf6server + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/signal" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + + "github.com/hashicorp/terraform-plugin-go/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + "github.com/mitchellh/go-testing-interface" +) + +const ( + // protocolVersionMajor represents the major version number of the protocol + // being served. This is used during the plugin handshake to validate the + // server and client are compatible. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMajor uint = 6 + + // protocolVersionMinor represents the minor version number of the protocol + // being served. Backwards compatible additions are possible in the + // protocol definitions, which is when this may be increased. While it is + // not used in plugin negotiation, it can be helpful to include this value + // for debugging, such as in logs. + // + // In the future, it may be possible to include this information directly + // in the protocol buffers rather than recreating a constant here. + protocolVersionMinor uint = 4 +) + +// protocolVersion represents the combined major and minor version numbers of +// the protocol being served. +var protocolVersion string = fmt.Sprintf("%d.%d", protocolVersionMajor, protocolVersionMinor) + +const ( + // envTfReattachProviders is the environment variable used by Terraform CLI + // to directly connect to already running provider processes, such as those + // being inspected by debugging processes. When connecting to providers in + // this manner, Terraform CLI disables certain plugin handshake checks and + // will not stop the provider process. + envTfReattachProviders = "TF_REATTACH_PROVIDERS" +) + +const ( + // grpcMaxMessageSize is the maximum gRPC send and receive message sizes + // for the server. + // + // This 256MB value is arbitrarily raised from the default message sizes of + // 4MB to account for advanced use cases, but arbitrarily lowered from + // MaxInt32 (or similar) to prevent incorrect server implementations from + // exhausting resources in common execution environments. Receiving a gRPC + // message size error is preferable for troubleshooting over determining + // why an execution environment may have terminated the process via its + // memory management processes, such as oom-killer on Linux. + // + // This value is kept as constant over allowing server configurability + // since there are many factors that influence message size, such as + // Terraform configuration and state data. If larger message size use + // cases appear, other gRPC options should be explored, such as + // implementing streaming RPCs and messages. + grpcMaxMessageSize = 256 << 20 +) + +// ServeOpt is an interface for defining options that can be passed to the +// Serve function. Each implementation modifies the ServeConfig being +// generated. A slice of ServeOpts then, cumulatively applied, render a full +// ServeConfig. +type ServeOpt interface { + ApplyServeOpt(*ServeConfig) error +} + +// ServeConfig contains the configured options for how a provider should be +// served. +type ServeConfig struct { + logger hclog.Logger + debugCtx context.Context + debugCh chan *plugin.ReattachConfig + debugCloseCh chan struct{} + + managedDebug bool + managedDebugReattachConfigTimeout time.Duration + managedDebugStopSignals []os.Signal + + disableLogInitStderr bool + disableLogLocation bool + useLoggingSink testing.T + envVar string +} + +type serveConfigFunc func(*ServeConfig) error + +func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { + return s(in) +} + +// WithDebug returns a ServeOpt that will set the server into debug mode, using +// the passed options to populate the go-plugin ServeTestConfig. +// +// This is an advanced ServeOpt that assumes the caller will fully manage the +// reattach configuration and server lifecycle. Refer to WithManagedDebug for a +// ServeOpt that handles common use cases, such as implementing provider main +// functions. +func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.managedDebug { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.debugCtx = ctx + in.debugCh = config + in.debugCloseCh = closeCh + return nil + }) +} + +// WithManagedDebug returns a ServeOpt that will start the server in debug +// mode, managing the reattach configuration handling and server lifecycle. +// Reattach configuration is output to stdout with human friendly instructions. +// By default, the server can be stopped with os.Interrupt (SIGINT; ctrl-c). +// +// Refer to the optional WithManagedDebugStopSignals and +// WithManagedDebugReattachConfigTimeout ServeOpt for additional configuration. +// +// The reattach configuration output of this handling is not protected by +// compatibility guarantees. Use the WithDebug ServeOpt for advanced use cases. +func WithManagedDebug() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if in.debugCh != nil { + return errors.New("cannot set both WithDebug and WithManagedDebug") + } + + in.managedDebug = true + return nil + }) +} + +// WithManagedDebugStopSignals returns a ServeOpt that will set the stop signals for a +// debug managed process (WithManagedDebug). When not configured, os.Interrupt +// (SIGINT; Ctrl-c) will stop the process. +func WithManagedDebugStopSignals(signals []os.Signal) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugStopSignals = signals + return nil + }) +} + +// WithManagedDebugReattachConfigTimeout returns a ServeOpt that will set the timeout +// for a debug managed process to start and return its reattach configuration. +// When not configured, 2 seconds is the default. +func WithManagedDebugReattachConfigTimeout(timeout time.Duration) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.managedDebugReattachConfigTimeout = timeout + return nil + }) +} + +// WithGoPluginLogger returns a ServeOpt that will set the logger that +// go-plugin should use to log messages. +func WithGoPluginLogger(logger hclog.Logger) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.logger = logger + return nil + }) +} + +// WithLoggingSink returns a ServeOpt that will enable the logging sink, which +// is used in test frameworks to control where terraform-plugin-log output is +// written and at what levels, mimicking Terraform's logging sink behaviors. +func WithLoggingSink(t testing.T) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.useLoggingSink = t + return nil + }) +} + +// WithoutLogStderrOverride returns a ServeOpt that will disable the +// terraform-plugin-log behavior of logging to the stderr that existed at +// startup, not the stderr that exists when the logging statement is called. +func WithoutLogStderrOverride() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.disableLogInitStderr = true + return nil + }) +} + +// WithoutLogLocation returns a ServeOpt that will exclude file names and line +// numbers from log output for the terraform-plugin-log logs generated by the +// SDKs and provider. +func WithoutLogLocation() ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.disableLogLocation = true + return nil + }) +} + +// WithLogEnvVarName sets the name of the provider for the purposes of the +// logging environment variable that controls the provider's log level. It is +// the part following TF_LOG_PROVIDER_ and defaults to the name part of the +// provider's registry address, or disabled if it can't parse the provider's +// registry address. Name must only contain letters, numbers, and hyphens. +func WithLogEnvVarName(name string) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + if !regexp.MustCompile(`^[a-zA-Z0-9-]+$`).MatchString(name) { + return errors.New("environment variable names can only contain a-z, A-Z, 0-9, and -") + } + in.envVar = name + return nil + }) +} + +// Serve starts a tfprotov6.ProviderServer serving, ready for Terraform to +// connect to it. The name passed in should be the fully qualified name that +// users will enter in the source field of the required_providers block, like +// "registry.terraform.io/hashicorp/time". +// +// Zero or more options to configure the server may also be passed. The default +// invocation is sufficient, but if the provider wants to run in debug mode or +// modify the logger that go-plugin is using, ServeOpts can be specified to +// support that. +func Serve(name string, serverFactory func() tfprotov6.ProviderServer, opts ...ServeOpt) error { + // Defaults + conf := ServeConfig{ + managedDebugReattachConfigTimeout: 2 * time.Second, + managedDebugStopSignals: []os.Signal{os.Interrupt}, + } + + for _, opt := range opts { + err := opt.ApplyServeOpt(&conf) + if err != nil { + return err + } + } + + serveConfig := &plugin.ServeConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: protocolVersionMajor, + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", + }, + Plugins: plugin.PluginSet{ + "provider": &GRPCProviderPlugin{ + GRPCProvider: serverFactory, + Opts: opts, + Name: name, + }, + }, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(grpcMaxMessageSize)) + opts = append(opts, grpc.MaxSendMsgSize(grpcMaxMessageSize)) + + return grpc.NewServer(opts...) + }, + } + + if conf.logger != nil { + serveConfig.Logger = conf.logger + } + + if conf.managedDebug { + ctx, cancel := context.WithCancel(context.Background()) + signalCh := make(chan os.Signal, len(conf.managedDebugStopSignals)) + + signal.Notify(signalCh, conf.managedDebugStopSignals...) + + defer func() { + signal.Stop(signalCh) + cancel() + }() + + go func() { + select { + case <-signalCh: + cancel() + case <-ctx.Done(): + } + }() + + conf.debugCh = make(chan *plugin.ReattachConfig) + conf.debugCloseCh = make(chan struct{}) + conf.debugCtx = ctx + } + + if conf.debugCh != nil { + serveConfig.Test = &plugin.ServeTestConfig{ + Context: conf.debugCtx, + ReattachConfigCh: conf.debugCh, + CloseCh: conf.debugCloseCh, + } + } + + if !conf.managedDebug { + plugin.Serve(serveConfig) + return nil + } + + go plugin.Serve(serveConfig) + + var pluginReattachConfig *plugin.ReattachConfig + + select { + case pluginReattachConfig = <-conf.debugCh: + case <-time.After(conf.managedDebugReattachConfigTimeout): + return errors.New("timeout waiting on reattach configuration") + } + + if pluginReattachConfig == nil { + return errors.New("nil reattach configuration received") + } + + // Duplicate implementation is required because the go-plugin + // ReattachConfig.Addr implementation is not friendly for JSON encoding + // and to avoid importing terraform-exec. + type reattachConfigAddr struct { + Network string + String string + } + + type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr + } + + reattachBytes, err := json.Marshal(map[string]reattachConfig{ + name: { + Protocol: string(pluginReattachConfig.Protocol), + ProtocolVersion: pluginReattachConfig.ProtocolVersion, + Pid: pluginReattachConfig.Pid, + Test: pluginReattachConfig.Test, + Addr: reattachConfigAddr{ + Network: pluginReattachConfig.Addr.Network(), + String: pluginReattachConfig.Addr.String(), + }, + }, + }) + + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + // This is currently intended to be executed via provider main function and + // human friendly, so output directly to stdout. + fmt.Printf("Provider started. To attach Terraform CLI, set the %s environment variable with the following:\n\n", envTfReattachProviders) + + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"%s=%s\"\n", envTfReattachProviders, reattachStr) + fmt.Printf("\tPowerShell:\t$env:%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `''`)) + default: + fmt.Printf("\t%s='%s'\n", envTfReattachProviders, strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + } + + fmt.Println("") + + // Wait for the server to be done. + <-conf.debugCloseCh + + return nil +} + +type server struct { + downstream tfprotov6.ProviderServer + tfplugin6.UnimplementedProviderServer + + stopMu sync.Mutex + stopCh chan struct{} + + tflogSDKOpts tfsdklog.Options + tflogOpts tflog.Options + useTFLogSink bool + testHandle testing.T + name string + + // protocolDataDir is a directory to store raw protocol data files for + // debugging purposes. + protocolDataDir string + + // protocolVersion is the protocol version for the server. + protocolVersion string +} + +func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { + select { + case <-ctx.Done(): + return + case <-stopCh: + cancel() + } +} + +// stoppableContext returns a context that wraps `ctx` but will be canceled +// when the server's stopCh is closed. +// +// This is used to cancel all in-flight contexts when the Stop method of the +// server is called. +func (s *server) stoppableContext(ctx context.Context) context.Context { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + go mergeStop(stoppable, cancel, s.stopCh) + return stoppable +} + +// loggingContext returns a context that wraps `ctx` and has +// terraform-plugin-log loggers injected. +func (s *server) loggingContext(ctx context.Context) context.Context { + if s.useTFLogSink { + ctx = tfsdklog.RegisterTestSink(ctx, s.testHandle) + } + + ctx = logging.InitContext(ctx, s.tflogSDKOpts, s.tflogOpts) + ctx = logging.RequestIdContext(ctx) + ctx = logging.ProviderAddressContext(ctx, s.name) + ctx = logging.ProtocolVersionContext(ctx, s.protocolVersion) + + return ctx +} + +// New converts a tfprotov6.ProviderServer into a server capable of handling +// Terraform protocol requests and issuing responses using the gRPC types. +func New(name string, serve tfprotov6.ProviderServer, opts ...ServeOpt) tfplugin6.ProviderServer { + var conf ServeConfig + for _, opt := range opts { + err := opt.ApplyServeOpt(&conf) + if err != nil { + // this should never happen, we already executed all + // this code as part of Serve + panic(err) + } + } + var sdkOptions tfsdklog.Options + var options tflog.Options + if !conf.disableLogInitStderr { + sdkOptions = append(sdkOptions, tfsdklog.WithStderrFromInit()) + options = append(options, tfsdklog.WithStderrFromInit()) + } + if conf.disableLogLocation { + sdkOptions = append(sdkOptions, tfsdklog.WithoutLocation()) + options = append(options, tflog.WithoutLocation()) + } + envVar := conf.envVar + if envVar == "" { + envVar = logging.ProviderLoggerName(name) + } + if envVar != "" { + options = append(options, tfsdklog.WithLogName(envVar), tflog.WithLevelFromEnv(logging.EnvTfLogProvider, envVar)) + } + return &server{ + downstream: serve, + stopCh: make(chan struct{}), + tflogOpts: options, + tflogSDKOpts: sdkOptions, + name: name, + useTFLogSink: conf.useLoggingSink != nil, + testHandle: conf.useLoggingSink, + protocolDataDir: os.Getenv(logging.EnvTfLogSdkProtoDataDir), + protocolVersion: protocolVersion, + } +} + +func (s *server) GetMetadata(ctx context.Context, protoReq *tfplugin6.GetMetadata_Request) (*tfplugin6.GetMetadata_Response, error) { + rpc := "GetMetadata" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.GetMetadataRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.GetMetadata(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + tf6serverlogging.ServerCapabilities(ctx, resp.ServerCapabilities) + + protoResp := toproto.GetMetadata_Response(resp) + + return protoResp, nil +} + +func (s *server) GetProviderSchema(ctx context.Context, protoReq *tfplugin6.GetProviderSchema_Request) (*tfplugin6.GetProviderSchema_Response, error) { + rpc := "GetProviderSchema" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.GetProviderSchemaRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.GetProviderSchema(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + tf6serverlogging.ServerCapabilities(ctx, resp.ServerCapabilities) + + protoResp := toproto.GetProviderSchema_Response(resp) + + return protoResp, nil +} + +func (s *server) ConfigureProvider(ctx context.Context, protoReq *tfplugin6.ConfigureProvider_Request) (*tfplugin6.ConfigureProvider_Response, error) { + rpc := "ConfigureProvider" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ConfigureProviderRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ConfigureProvider(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.ConfigureProvider_Response(resp) + + return protoResp, nil +} + +func (s *server) ValidateProviderConfig(ctx context.Context, protoReq *tfplugin6.ValidateProviderConfig_Request) (*tfplugin6.ValidateProviderConfig_Response, error) { + rpc := "ValidateProviderConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ValidateProviderConfigRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ValidateProviderConfig(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.ValidateProviderConfig_Response(resp) + + return protoResp, nil +} + +// stop closes the stopCh associated with the server and replaces it with a new +// one. +// +// This causes all in-flight requests for the server to have their contexts +// canceled. +func (s *server) stop() { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + close(s.stopCh) + s.stopCh = make(chan struct{}) +} + +func (s *server) StopProvider(ctx context.Context, protoReq *tfplugin6.StopProvider_Request) (*tfplugin6.StopProvider_Response, error) { + rpc := "StopProvider" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.StopProviderRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.StopProvider(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, nil) + logging.ProtocolTrace(ctx, "Closing all our contexts") + s.stop() + logging.ProtocolTrace(ctx, "Closed all our contexts") + + protoResp := toproto.StopProvider_Response(resp) + + return protoResp, nil +} + +func (s *server) ValidateDataResourceConfig(ctx context.Context, protoReq *tfplugin6.ValidateDataResourceConfig_Request) (*tfplugin6.ValidateDataResourceConfig_Response, error) { + rpc := "ValidateDataResourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ValidateDataResourceConfigRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ValidateDataResourceConfig(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.ValidateDataResourceConfig_Response(resp) + + return protoResp, nil +} + +func (s *server) ReadDataSource(ctx context.Context, protoReq *tfplugin6.ReadDataSource_Request) (*tfplugin6.ReadDataSource_Response, error) { + rpc := "ReadDataSource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.DataSourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ReadDataSourceRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ReadDataSource(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "State", resp.State) + + protoResp := toproto.ReadDataSource_Response(resp) + + return protoResp, nil +} + +func (s *server) ValidateResourceConfig(ctx context.Context, protoReq *tfplugin6.ValidateResourceConfig_Request) (*tfplugin6.ValidateResourceConfig_Response, error) { + rpc := "ValidateResourceConfig" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ValidateResourceConfigRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ValidateResourceConfig(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.ValidateResourceConfig_Response(resp) + + return protoResp, nil +} + +func (s *server) UpgradeResourceState(ctx context.Context, protoReq *tfplugin6.UpgradeResourceState_Request) (*tfplugin6.UpgradeResourceState_Response, error) { + rpc := "UpgradeResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.UpgradeResourceStateRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.UpgradeResourceState(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "UpgradedState", resp.UpgradedState) + + protoResp := toproto.UpgradeResourceState_Response(resp) + + return protoResp, nil +} + +func (s *server) ReadResource(ctx context.Context, protoReq *tfplugin6.ReadResource_Request) (*tfplugin6.ReadResource_Response, error) { + rpc := "ReadResource" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ReadResourceRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "CurrentState", req.CurrentState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "Private", req.Private) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ReadResource(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "Private", resp.Private) + + protoResp := toproto.ReadResource_Response(resp) + + return protoResp, nil +} + +func (s *server) PlanResourceChange(ctx context.Context, protoReq *tfplugin6.PlanResourceChange_Request) (*tfplugin6.PlanResourceChange_Response, error) { + rpc := "PlanResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.PlanResourceChangeRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", req.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProposedNewState", req.ProposedNewState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "PriorPrivate", req.PriorPrivate) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.PlanResourceChange(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "PlannedState", resp.PlannedState) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "PlannedPrivate", resp.PlannedPrivate) + + protoResp := toproto.PlanResourceChange_Response(resp) + + return protoResp, nil +} + +func (s *server) ApplyResourceChange(ctx context.Context, protoReq *tfplugin6.ApplyResourceChange_Request) (*tfplugin6.ApplyResourceChange_Response, error) { + rpc := "ApplyResourceChange" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ApplyResourceChangeRequest(protoReq) + + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "Config", req.Config) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PlannedState", req.PlannedState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "PriorState", req.PriorState) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", "ProviderMeta", req.ProviderMeta) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Request", "PlannedPrivate", req.PlannedPrivate) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ApplyResourceChange(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "NewState", resp.NewState) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response", "Private", resp.Private) + + protoResp := toproto.ApplyResourceChange_Response(resp) + + return protoResp, nil +} + +func (s *server) ImportResourceState(ctx context.Context, protoReq *tfplugin6.ImportResourceState_Request) (*tfplugin6.ImportResourceState_Response, error) { + rpc := "ImportResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + req := fromproto.ImportResourceStateRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + resp, err := s.downstream.ImportResourceState(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + for _, importedResource := range resp.ImportedResources { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "State", importedResource.State) + logging.ProtocolPrivateData(ctx, s.protocolDataDir, rpc, "Response_ImportedResource", "Private", importedResource.Private) + } + + protoResp := toproto.ImportResourceState_Response(resp) + + return protoResp, nil +} + +func (s *server) MoveResourceState(ctx context.Context, protoReq *tfplugin6.MoveResourceState_Request) (*tfplugin6.MoveResourceState_Response, error) { + rpc := "MoveResourceState" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = logging.ResourceContext(ctx, protoReq.TargetTypeName) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and error in preference of + // s.downstream.MoveResourceState below once ResourceServer interface + // implements the MoveResourceState method. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 + // nolint:staticcheck + resourceServerWMRS, ok := s.downstream.(tfprotov6.ResourceServerWithMoveResourceState) + + if !ok { + logging.ProtocolError(ctx, "ProviderServer does not implement ResourceServerWithMoveResourceState") + + protoResp := &tfplugin6.MoveResourceState_Response{ + Diagnostics: []*tfplugin6.Diagnostic{ + { + Severity: tfplugin6.Diagnostic_ERROR, + Summary: "Provider Move Resource State Not Implemented", + Detail: "A MoveResourceState call was received by the provider, however the provider does not implement the call. " + + "Either upgrade the provider to a version that implements move resource state support or this is a bug in Terraform that should be reported to the Terraform maintainers.", + }, + }, + } + + return protoResp, nil + } + + req := fromproto.MoveResourceStateRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/363 + // resp, err := s.downstream.MoveResourceState(ctx, req) + resp, err := resourceServerWMRS.MoveResourceState(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]interface{}{logging.KeyError: err}) + + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "TargetState", resp.TargetState) + + protoResp := toproto.MoveResourceState_Response(resp) + + return protoResp, nil +} + +func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin6.CallFunction_Request) (*tfplugin6.CallFunction_Response, error) { + rpc := "CallFunction" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and error in preference of s.downstream.CallFunction + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov6.FunctionServer) + + if !ok { + logging.ProtocolError(ctx, "ProviderServer does not implement FunctionServer") + + text := "Provider Functions Not Implemented: A provider-defined function call was received by the provider, however the provider does not implement functions. " + + "Either upgrade the provider to a version that implements provider-defined functions or this is a bug in Terraform that should be reported to the Terraform maintainers." + + protoResp := &tfplugin6.CallFunction_Response{ + Error: &tfplugin6.FunctionError{ + Text: text, + }, + } + + return protoResp, nil + } + + req := fromproto.CallFunctionRequest(protoReq) + + for position, argument := range req.Arguments { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", fmt.Sprintf("Arguments_%d", position), argument) + } + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.CallFunction(ctx, req) + resp, err := functionServer.CallFunction(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponseWithError(ctx, resp.Error) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "Result", resp.Result) + + protoResp := toproto.CallFunction_Response(resp) + + return protoResp, nil +} + +func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin6.GetFunctions_Request) (*tfplugin6.GetFunctions_Response, error) { + rpc := "GetFunctions" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and response in preference of s.downstream.GetFunctions + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov6.FunctionServer) + + if !ok { + logging.ProtocolWarn(ctx, "ProviderServer does not implement FunctionServer") + + protoResp := &tfplugin6.GetFunctions_Response{ + Functions: map[string]*tfplugin6.Function{}, + } + + return protoResp, nil + } + + req := fromproto.GetFunctionsRequest(protoReq) + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.GetFunctions(ctx, req) + resp, err := functionServer.GetFunctions(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp := toproto.GetFunctions_Response(resp) + + return protoResp, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go new file mode 100644 index 0000000000..809cc99f66 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +var ( + // ErrNotAttributePathStepper is returned when a type that doesn't full + // the AttributePathStepper interface is passed to WalkAttributePath. + ErrNotAttributePathStepper = errors.New("doesn't fill tftypes.AttributePathStepper interface") + + // ErrInvalidStep is returned when an AttributePath has the wrong kind + // of AttributePathStep for the type that WalkAttributePath is + // operating on. + ErrInvalidStep = errors.New("step cannot be applied to this value") +) + +// AttributePath is a type that can point to a specific value within an +// aggregate Terraform value. It consists of steps, each identifying one +// element or attribute of the current value, and making that the current +// value. This allows referring to arbitrarily precise values. +type AttributePath struct { + // Steps are the steps that must be followed from the root of the value + // to obtain the value being indicated. + steps []AttributePathStep +} + +// NewAttributePath returns an empty AttributePath, ready to have steps added +// to it using WithElementKeyString, WithElementKeyInt, WithElementKeyValue, or +// WithAttributeName. +func NewAttributePath() *AttributePath { + return &AttributePath{} +} + +// NewAttributePathWithSteps returns an AttributePath populated with the passed +// AttributePathSteps. +func NewAttributePathWithSteps(steps []AttributePathStep) *AttributePath { + return &AttributePath{ + steps: steps, + } +} + +// Steps returns the AttributePathSteps that make up an AttributePath. +func (a *AttributePath) Steps() []AttributePathStep { + if a == nil { + return nil + } + steps := make([]AttributePathStep, len(a.steps)) + copy(steps, a.steps) + return steps +} + +func (a *AttributePath) String() string { + var res strings.Builder + for pos, step := range a.Steps() { + if pos != 0 { + res.WriteString(".") + } + switch v := step.(type) { + case AttributeName: + res.WriteString(`AttributeName("` + string(v) + `")`) + case ElementKeyString: + res.WriteString(`ElementKeyString("` + string(v) + `")`) + case ElementKeyInt: + res.WriteString(`ElementKeyInt(` + strconv.FormatInt(int64(v), 10) + `)`) + case ElementKeyValue: + res.WriteString(`ElementKeyValue(` + Value(v).String() + `)`) + } + } + return res.String() +} + +// Equal returns true if two AttributePaths should be considered equal. +// AttributePaths are considered equal if they have the same number of steps, +// the steps are all the same types, and the steps have all the same values. +func (a *AttributePath) Equal(o *AttributePath) bool { + if a == nil { + return o == nil || len(o.steps) == 0 + } + + if o == nil { + return len(a.steps) == 0 + } + + if len(a.steps) != len(o.steps) { + return false + } + + for pos, aStep := range a.steps { + oStep := o.steps[pos] + + if !aStep.Equal(oStep) { + return false + } + } + return true +} + +// NewErrorf returns an error associated with the value indicated by `a`. This +// is equivalent to calling a.NewError(fmt.Errorf(f, args...)). +func (a *AttributePath) NewErrorf(f string, args ...interface{}) error { + return a.NewError(fmt.Errorf(f, args...)) +} + +// NewError returns an error that associates `err` with the value indicated by +// `a`. +func (a *AttributePath) NewError(err error) error { + var wrapped AttributePathError + if errors.As(err, &wrapped) { + // TODO: at some point we'll probably want to handle the + // AttributePathError-within-AttributePathError situation, + // either by de-duplicating the paths we're surfacing, or + // privileging one, or something. For now, let's just do the + // naive thing and not add our own path. + return err + } + return AttributePathError{ + Path: a, + err: err, + } +} + +// LastStep returns the last step in the path. If the path is nil or empty, nil +// is returned. +func (a *AttributePath) LastStep() AttributePathStep { + if a == nil || len(a.steps) == 0 { + return nil + } + + return a.steps[len(a.steps)-1] +} + +// NextStep returns the next step in the path. If the path is nil or empty, nil +// is returned. +func (a *AttributePath) NextStep() AttributePathStep { + if a == nil || len(a.steps) == 0 { + return nil + } + + return a.steps[0] +} + +// WithAttributeName adds an AttributeName step to `a`, using `name` as the +// attribute's name. `a` is copied, not modified. +func (a *AttributePath) WithAttributeName(name string) *AttributePath { + if a == nil { + return &AttributePath{ + steps: []AttributePathStep{AttributeName(name)}, + } + } + + // Avoid re-allocating larger slice + steps := make([]AttributePathStep, len(a.steps)+1) + copy(steps, a.steps) + steps[len(steps)-1] = AttributeName(name) + + return &AttributePath{ + steps: steps, + } +} + +// WithElementKeyString adds an ElementKeyString step to `a`, using `key` as +// the element's key. `a` is copied, not modified. +func (a *AttributePath) WithElementKeyString(key string) *AttributePath { + if a == nil { + return &AttributePath{ + steps: []AttributePathStep{ElementKeyString(key)}, + } + } + + // Avoid re-allocating larger slice + steps := make([]AttributePathStep, len(a.steps)+1) + copy(steps, a.steps) + steps[len(steps)-1] = ElementKeyString(key) + + return &AttributePath{ + steps: steps, + } +} + +// WithElementKeyInt adds an ElementKeyInt step to `a`, using `key` as the +// element's key. `a` is copied, not modified. +func (a *AttributePath) WithElementKeyInt(key int) *AttributePath { + if a == nil { + return &AttributePath{ + steps: []AttributePathStep{ElementKeyInt(key)}, + } + } + + // Avoid re-allocating larger slice + steps := make([]AttributePathStep, len(a.steps)+1) + copy(steps, a.steps) + steps[len(steps)-1] = ElementKeyInt(key) + + return &AttributePath{ + steps: steps, + } +} + +// WithElementKeyValue adds an ElementKeyValue to `a`, using `key` as the +// element's key. `a` is copied, not modified. +func (a *AttributePath) WithElementKeyValue(key Value) *AttributePath { + if a == nil { + return &AttributePath{ + steps: []AttributePathStep{ElementKeyValue(key)}, + } + } + + // Avoid re-allocating larger slice + steps := make([]AttributePathStep, len(a.steps)+1) + copy(steps, a.steps) + steps[len(steps)-1] = ElementKeyValue(key) + + return &AttributePath{ + steps: steps, + } +} + +// WithoutLastStep removes the last step, whatever kind of step it was, from +// `a`. `a` is copied, not modified. +func (a *AttributePath) WithoutLastStep() *AttributePath { + if a == nil || len(a.steps) == 0 { + return nil + } + + return &AttributePath{ + // Paths are immutable, so this should be safe without copying. + steps: a.steps[:len(a.steps)-1], + } +} + +// AttributePathStep is an intentionally unimplementable interface that +// functions as an enum, allowing us to use different strongly-typed step types +// as a generic "step" type. +// +// An AttributePathStep is meant to indicate a single step in an AttributePath, +// indicating a specific attribute or element that is the next value in the +// path. +type AttributePathStep interface { + // Equal returns true if the AttributePathStep is equal to the other. + Equal(AttributePathStep) bool + + unfulfillable() // make this interface fillable only by this package +} + +var ( + _ AttributePathStep = AttributeName("") + _ AttributePathStep = ElementKeyString("") + _ AttributePathStep = ElementKeyInt(0) +) + +// AttributeName is an AttributePathStep implementation that indicates the next +// step in the AttributePath is to select an attribute. The value of the +// AttributeName is the name of the attribute to be selected. +type AttributeName string + +// Equal returns true if the other AttributePathStep is an AttributeName and +// has the same value. +func (a AttributeName) Equal(other AttributePathStep) bool { + otherA, ok := other.(AttributeName) + + if !ok { + return false + } + + return string(a) == string(otherA) +} + +func (a AttributeName) unfulfillable() {} + +// ElementKeyString is an AttributePathStep implementation that indicates the +// next step in the AttributePath is to select an element using a string key. +// The value of the ElementKeyString is the key of the element to select. +type ElementKeyString string + +// Equal returns true if the other AttributePathStep is an ElementKeyString and +// has the same value. +func (e ElementKeyString) Equal(other AttributePathStep) bool { + otherE, ok := other.(ElementKeyString) + + if !ok { + return false + } + + return string(e) == string(otherE) +} + +func (e ElementKeyString) unfulfillable() {} + +// ElementKeyInt is an AttributePathStep implementation that indicates the next +// step in the AttributePath is to select an element using an int64 key. The +// value of the ElementKeyInt is the key of the element to select. +type ElementKeyInt int64 + +// Equal returns true if the other AttributePathStep is an ElementKeyInt and +// has the same value. +func (e ElementKeyInt) Equal(other AttributePathStep) bool { + otherE, ok := other.(ElementKeyInt) + + if !ok { + return false + } + + return int(e) == int(otherE) +} + +func (e ElementKeyInt) unfulfillable() {} + +// ElementKeyValue is an AttributePathStep implementation that indicates the +// next step in the AttributePath is to select an element using the element +// itself as a key. The value of the ElementKeyValue is the key of the element +// to select. +type ElementKeyValue Value + +// Equal returns true if the other AttributePathStep is an ElementKeyValue and +// has the same value. +func (e ElementKeyValue) Equal(other AttributePathStep) bool { + otherE, ok := other.(ElementKeyValue) + + if !ok { + return false + } + + return Value(e).Equal(Value(otherE)) +} + +func (e ElementKeyValue) unfulfillable() {} + +// AttributePathStepper is an interface that types can implement to make them +// traversable by WalkAttributePath, allowing providers to retrieve the +// specific value an AttributePath is pointing to. +type AttributePathStepper interface { + // Return the attribute or element the AttributePathStep is referring + // to, or an error if the AttributePathStep is referring to an + // attribute or element that doesn't exist. + ApplyTerraform5AttributePathStep(AttributePathStep) (interface{}, error) +} + +// WalkAttributePath will return the Type or Value that `path` is pointing to, +// using `in` as the root. If an error is returned, the AttributePath returned +// will indicate the steps that remained to be applied when the error was +// encountered. +// +// map[string]interface{} and []interface{} types have built-in support. Other +// types need to use the AttributePathStepper interface to tell +// WalkAttributePath how to traverse themselves. +func WalkAttributePath(in interface{}, path *AttributePath) (interface{}, *AttributePath, error) { + if path == nil || len(path.steps) == 0 { + return in, path, nil + } + stepper, ok := in.(AttributePathStepper) + if !ok { + stepper, ok = builtinAttributePathStepper(in) + if !ok { + return in, path, ErrNotAttributePathStepper + } + } + next, err := stepper.ApplyTerraform5AttributePathStep(path.NextStep()) + if err != nil { + return in, path, err + } + return WalkAttributePath(next, NewAttributePathWithSteps(path.steps[1:])) +} + +func builtinAttributePathStepper(in interface{}) (AttributePathStepper, bool) { + switch v := in.(type) { + case map[string]interface{}: + return mapStringInterfaceAttributePathStepper(v), true + case []interface{}: + return interfaceSliceAttributePathStepper(v), true + default: + return nil, false + } +} + +type mapStringInterfaceAttributePathStepper map[string]interface{} + +func (m mapStringInterfaceAttributePathStepper) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + attributeName, isAttributeName := step.(AttributeName) + elementKeyString, isElementKeyString := step.(ElementKeyString) + if !isAttributeName && !isElementKeyString { + return nil, ErrInvalidStep + } + var stepValue string + if isAttributeName { + stepValue = string(attributeName) + } + if isElementKeyString { + stepValue = string(elementKeyString) + } + v, ok := m[stepValue] + if !ok { + return nil, ErrInvalidStep + } + return v, nil +} + +type interfaceSliceAttributePathStepper []interface{} + +func (i interfaceSliceAttributePathStepper) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + eki, isElementKeyInt := step.(ElementKeyInt) + if !isElementKeyInt { + return nil, ErrInvalidStep + } + if eki < 0 { + return nil, ErrInvalidStep + } + // slices can only have items up to the max value of int + // but we get ElementKeyInt as an int64 + // we keep ElementKeyInt as an int64 and cast the length of the slice + // to int64 here because if ElementKeyInt is greater than the max value + // of int, we will always (correctly) error out here. This lets us + // confidently cast ElementKeyInt to an int below, knowing we're not + // truncating data + if int64(eki) >= int64(len(i)) { + return nil, ErrInvalidStep + } + return i[int(eki)], nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path_error.go new file mode 100644 index 0000000000..df887c2fd6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/attribute_path_error.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "fmt" +) + +// AttributePathError represents an error associated with part of a +// tftypes.Value, indicated by the Path property. +type AttributePathError struct { + Path *AttributePath + err error +} + +// Equal returns true if two AttributePathErrors are semantically equal. To be +// considered equal, they must have the same path and if errors are set, the +// strings returned by their `Error()` methods must match. +func (a AttributePathError) Equal(o AttributePathError) bool { + if !a.Path.Equal(o.Path) { + return false + } + + if (a.err == nil && o.err != nil) || (a.err != nil && o.err == nil) { + return false + } + + if a.err == nil { + return true + } + + return a.err.Error() == o.err.Error() +} + +func (a AttributePathError) Error() string { + var path string + if len(a.Path.Steps()) > 0 { + path = a.Path.String() + ": " + } + return fmt.Sprintf("%s%s", path, a.err) +} + +func (a AttributePathError) Unwrap() error { + return a.err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go new file mode 100644 index 0000000000..72486b65e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/diff.go @@ -0,0 +1,298 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "errors" + "fmt" + "math/big" +) + +// ValueDiff expresses a subset of a Value that is different between two +// Values. The Path property indicates where the subset is located within the +// Value, and Value1 and Value2 indicate what the subset is in each of the +// Values. If the Value does not contain a subset at that AttributePath, its +// Value will be nil. This is distinct from a Value with a nil in it (a "null" +// value), which is present in the Value. +type ValueDiff struct { + // The Path these different subsets are located at in the original + // Values. + Path *AttributePath + + // The subset of the first Value passed to Diff found at the + // AttributePath indicated by Path. + Value1 *Value + + // The subset of the second Value passed to Diff found at the + // AttributePath indicated by Path. + Value2 *Value +} + +func (v ValueDiff) String() string { + val1 := "{no value set}" + if v.Value1 != nil { + val1 = v.Value1.String() + } + val2 := "{no value set}" + if v.Value2 != nil { + val2 = v.Value2.String() + } + return fmt.Sprintf("%s: value1: %s, value2: %s", + v.Path.String(), val1, val2) +} + +// Equal returns whether two ValueDiffs should be considered equal or not. +// ValueDiffs are consisdered equal when their Path, Value1, and Value2 +// properties are considered equal. +func (v ValueDiff) Equal(o ValueDiff) bool { + if !v.Path.Equal(o.Path) { + return false + } + if v.Value1 == nil && o.Value1 != nil { + return false + } + if v.Value1 != nil && o.Value1 == nil { + return false + } + if v.Value1 != nil && o.Value1 != nil && !v.Value1.Equal(*o.Value1) { + return false + } + if v.Value2 == nil && o.Value2 != nil { + return false + } + if v.Value2 != nil && o.Value2 == nil { + return false + } + if v.Value2 != nil && o.Value2 != nil && !v.Value2.Equal(*o.Value2) { + return false + } + return true +} + +// Diff computes the differences between `val1` and `val2` and surfaces them as +// a slice of ValueDiffs. The ValueDiffs in the struct will use `val1`'s values +// as Value1 and `val2`'s values as Value2. An empty or nil slice means the two +// Values can be considered equal. Values must be the same type when passed to +// Diff; passing in Values of two different types will result in an error. If +// both Values are empty, they are considered equal. If one Value is missing +// type, it will result in an error. val1.Type().Is(val2.Type()) is a safe way +// to check that Values can be compared with Diff. +func (val1 Value) Diff(val2 Value) ([]ValueDiff, error) { + var diffs []ValueDiff + + if val1.Type() == nil && val2.Type() == nil && val1.value == nil && val2.value == nil { + return diffs, nil + } + if (val1.Type() == nil && val2.Type() != nil) || (val1.Type() != nil && val2.Type() == nil) { + return nil, errors.New("cannot diff value missing type") + } + if !val1.Type().Is(val2.Type()) { + return nil, errors.New("Can't diff values of different types") + } + + // make sure everything in val2 is also in val1 + err := Walk(val2, func(path *AttributePath, value2 Value) (bool, error) { + _, _, err := WalkAttributePath(val1, path) + if err != nil && err != ErrInvalidStep { + return false, fmt.Errorf("Error walking %q: %w", path, err) + } else if err == ErrInvalidStep { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: nil, + Value2: &value2, + }) + return false, nil + } + return true, nil + }) + if err != nil { + return nil, err + } + + // make sure everything in val1 is also in val2 and also that it all matches + err = Walk(val1, func(path *AttributePath, value1 Value) (bool, error) { + // pull out the Value at the same path in val2 + value2I, _, err := WalkAttributePath(val2, path) + if err != nil && err != ErrInvalidStep { + return false, fmt.Errorf("Error walking %q: %w", path, err) + } else if err == ErrInvalidStep { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: nil, + }) + return true, nil + } + + value2, ok := value2I.(Value) + if !ok { + return false, fmt.Errorf("unexpected type %T in Diff", value2I) + } + + // if they're both unknown, no need to continue + if !value1.IsKnown() && !value2.IsKnown() { + return false, nil + } + + // if val1 is unknown and val2 not, we have a diff + // no need to continue to recurse into val1, no further to go + if !value1.IsKnown() && value2.IsKnown() { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + return false, nil + } + + // if val2 is unknown and val1 not, we have a diff + // continue to recurse though, so we can surface the elements of val1 + // that are now "missing" as diffs + if value1.IsKnown() && !value2.IsKnown() { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + return true, nil + } + + // if they're both null, no need to continue + if value1.IsNull() && value2.IsNull() { + return false, nil + } + + // if val1 is null and val2 not, we have a diff + // no need to continue to recurse into val1, no further to go + if value1.IsNull() && !value2.IsNull() { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + return false, nil + } + + // if val2 is null and val1 not, we have a diff + // continue to recurse though, so we can surface the elements of val1 + // that are now "missing" as diffs + if !value1.IsNull() && value2.IsNull() { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + return true, nil + } + + // we know there are known, non-null values, time to compare them + switch { + case value1.Type().Is(String): + var s1, s2 string + err := value1.As(&s1) + if err != nil { + return false, fmt.Errorf("Error converting %s (value1) at %q: %w", value1, path, err) + } + err = value2.As(&s2) + if err != nil { + return false, fmt.Errorf("Error converting %s (value2) at %q: %w", value2, path, err) + } + if s1 != s2 { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + } + return false, nil + case value1.Type().Is(Number): + n1, n2 := big.NewFloat(0), big.NewFloat(0) + err := value1.As(&n1) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + err = value2.As(&n2) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + if n1.Cmp(n2) != 0 { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + } + return false, nil + case value1.Type().Is(Bool): + var b1, b2 bool + err := value1.As(&b1) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + err = value2.As(&b2) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + if b1 != b2 { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + } + return false, nil + case value1.Type().Is(List{}), value1.Type().Is(Set{}), value1.Type().Is(Tuple{}): + var s1, s2 []Value + err := value1.As(&s1) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + err = value2.As(&s2) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + // we only care about if the lengths match for lists, + // sets, and tuples. If any of the elements differ, + // the recursion of the walk will find them for us. + if len(s1) != len(s2) { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + return true, nil + } + return true, nil + case value1.Type().Is(Map{}), value1.Type().Is(Object{}): + m1 := map[string]Value{} + m2 := map[string]Value{} + err := value1.As(&m1) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + err = value2.As(&m2) + if err != nil { + return false, fmt.Errorf("Error converting %q: %w", path, err) + } + // we need maps and objects to have the same exact keys + // as each other + if len(m1) != len(m2) { + diffs = append(diffs, ValueDiff{ + Path: path, + Value1: &value1, + Value2: &value2, + }) + return true, nil + } + // if we have the same keys, we can just let recursion + // from the walk check the sub-values match + return true, nil + case value1.Type().Is(DynamicPseudoType): + // Let recursion from the walk check the sub-values match + return true, nil + } + return false, fmt.Errorf("unexpected type %v in Diff at %s", value1.Type(), path) + }) + return diffs, err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/doc.go new file mode 100644 index 0000000000..ea82da6e90 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/doc.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tftypes provides a type system for Terraform configuration and state +// values. +// +// Terraform's configuration and state values are stored using a collection of +// types. There are primitive types, such as strings, numbers, and booleans, +// but there are also aggregate types such as lists, sets, tuples, maps, and +// objects, which consist of multiple values of primitive types aggregated into +// a single value. There is also a dynamic pseudo-type that represents an +// unknown type. It is useful for indicating that any type of data is +// acceptable. +// +// Terraform's values map neatly onto either primitives built into Go or types +// in the Go standard library, with one exception. Terraform has the concept of +// unknown values, values that may or may not be set at a future date. These +// are distinct from null values, which indicate a value that is known to not +// be set, and are mostly encountered when a user has interpolated a computed +// field into another field; the field that is interpolated into has an unknown +// value, because the field being interpolated won't have its value known until +// apply time. +// +// To address this, the tftypes package wraps all values in a special Value +// type. This Value type is capable of holding known and unknown values, +// interrogating whether the value is known or not, and accessing the concrete +// value that Terraform sent in the cases where the value is known. A common +// pattern is to use the Value.IsKnown() method to confirm that a value is +// known, then to use the Value.As() method to retrieve the underlying data for +// use. +// +// When using the Value.As() method, certain types have built-in behavior to +// support using them as destinations for converted data: +// +// * String values can be converted into strings +// +// * Number values can be converted into *big.Floats +// +// * Boolean values can be converted into bools +// +// * List, Set, and Tuple values can be converted into a slice of Values +// +// * Map and Object values can be converted into a map with string keys and +// Value values. +// +// These defaults were chosen because they're capable of losslessly +// representing all possible values for their Terraform type, with the +// exception of null values. Converting into pointer versions of any of these +// types will correctly surface null values as well. +// +// Custom, provider-defined types can define their own conversion logic that +// will be respected by Value.As(), as well, by implementing the +// FromTerraform5Value method for that type. The FromTerraform5Value method +// accepts a Value as an argument and returns an error. The Value passed in +// will be the same Value that Value.As() was called on. The recommended +// implementation of the FromTerraform5Value method is to call Value.As() on +// the passed Value, converting it into one of the built-in types above, and +// then performing whatever type casting or conversion logic is required to +// assign the data to the provider-supplied type. +package tftypes diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go new file mode 100644 index 0000000000..fdf9b1db6a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/list.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "fmt" +) + +// List is a Terraform type representing an ordered collection of elements, all +// of the same type. +type List struct { + ElementType Type + + // used to make this type uncomparable + // see https://golang.org/ref/spec#Comparison_operators + // this enforces the use of Is, instead + _ []struct{} +} + +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a List, +// returning the Type found at that AttributePath within the List. If the +// AttributePathStep cannot be applied to the List, an ErrInvalidStep error +// will be returned. +func (l List) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case ElementKeyInt: + if int64(s) < 0 { + return nil, ErrInvalidStep + } + + return l.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + +// Equal returns true if the two Lists are exactly equal. Unlike Is, passing in +// a List with no ElementType will always return false. +func (l List) Equal(o Type) bool { + v, ok := o.(List) + if !ok { + return false + } + if l.ElementType == nil || v.ElementType == nil { + // when doing exact comparisons, we can't compare types that + // don't have element types set, so we just consider them not + // equal + return false + } + return l.ElementType.Equal(v.ElementType) +} + +// UsableAs returns whether the two Lists are type compatible. +// +// If the other type is DynamicPseudoType, it will return true. +// If the other type is not a List, it will return false. +// If the other List does not have a type compatible ElementType, it will +// return false. +func (l List) UsableAs(o Type) bool { + if o.Is(DynamicPseudoType) { + return true + } + v, ok := o.(List) + if !ok { + return false + } + return l.ElementType.UsableAs(v.ElementType) +} + +// Is returns whether `t` is a List type or not. It does not perform any +// ElementType checks. +func (l List) Is(t Type) bool { + _, ok := t.(List) + return ok +} + +func (l List) String() string { + return "tftypes.List[" + l.ElementType.String() + "]" +} + +func (l List) private() {} + +func (l List) supportedGoTypes() []string { + return []string{"[]tftypes.Value"} +} + +func valueFromList(typ Type, in interface{}) (Value, error) { + switch value := in.(type) { + case []Value: + var valType Type + for pos, v := range value { + if !v.Type().UsableAs(typ) { + return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("can't use %s as %s", v.Type(), typ) + } + if valType == nil { + valType = v.Type() + } + if !v.Type().Equal(valType) { + return Value{}, fmt.Errorf("lists must only contain one type of element, saw %s and %s", valType, v.Type()) + } + } + return Value{ + typ: List{ElementType: typ}, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.List; expected types are: %s", in, formattedSupportedGoTypes(List{})) + } +} + +// MarshalJSON returns a JSON representation of the full type signature of `l`, +// including its ElementType. +// +// Deprecated: this is not meant to be called by third-party code. +func (l List) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + buf.WriteString(`["list",`) + + // MarshalJSON is always error safe + elementTypeBytes, _ := l.ElementType.MarshalJSON() + + buf.Write(elementTypeBytes) + buf.WriteString(`]`) + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go new file mode 100644 index 0000000000..cfcab04f59 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/map.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "fmt" + "sort" +) + +// Map is a Terraform type representing an unordered collection of elements, +// all of the same type, each identifiable with a unique string key. +type Map struct { + ElementType Type + + // used to make this type uncomparable + // see https://golang.org/ref/spec#Comparison_operators + // this enforces the use of Is, instead + _ []struct{} +} + +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Map, +// returning the Type found at that AttributePath within the Map. If the +// AttributePathStep cannot be applied to the Map, an ErrInvalidStep error +// will be returned. +func (m Map) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch step.(type) { + case ElementKeyString: + return m.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + +// Equal returns true if the two Maps are exactly equal. Unlike Is, passing in +// a Map with no ElementType will always return false. +func (m Map) Equal(o Type) bool { + v, ok := o.(Map) + if !ok { + return false + } + if v.ElementType == nil || m.ElementType == nil { + // when doing exact comparisons, we can't compare types that + // don't have element types set, so we just consider them not + // equal + return false + } + return m.ElementType.Equal(v.ElementType) +} + +// UsableAs returns whether the two Maps are type compatible. +// +// If the other type is DynamicPseudoType, it will return true. +// If the other type is not a Map, it will return false. +// If the other Map does not have a type compatible ElementType, it will +// return false. +func (m Map) UsableAs(o Type) bool { + if o.Is(DynamicPseudoType) { + return true + } + v, ok := o.(Map) + if !ok { + return false + } + return m.ElementType.UsableAs(v.ElementType) +} + +// Is returns whether `t` is a Map type or not. It does not perform any +// ElementType checks. +func (m Map) Is(t Type) bool { + _, ok := t.(Map) + return ok +} + +func (m Map) String() string { + return "tftypes.Map[" + m.ElementType.String() + "]" +} + +func (m Map) private() {} + +func (m Map) supportedGoTypes() []string { + return []string{"map[string]tftypes.Value"} +} + +// MarshalJSON returns a JSON representation of the full type signature of `m`, +// including its ElementType. +// +// Deprecated: this is not meant to be called by third-party code. +func (m Map) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + buf.WriteString(`["map",`) + + // MarshalJSON is always error safe + elementTypeBytes, _ := m.ElementType.MarshalJSON() + + buf.Write(elementTypeBytes) + buf.WriteString(`]`) + + return buf.Bytes(), nil +} + +func valueFromMap(typ Type, in interface{}) (Value, error) { + switch value := in.(type) { + case map[string]Value: + keys := make([]string, 0, len(value)) + for k := range value { + keys = append(keys, k) + } + sort.Strings(keys) + var elType Type + for _, k := range keys { + v := value[k] + if !v.Type().UsableAs(typ) { + return Value{}, NewAttributePath().WithElementKeyString(k).NewErrorf("can't use %s as %s", v.Type(), typ) + } + if elType == nil { + elType = v.Type() + } + if !elType.Equal(v.Type()) { + return Value{}, fmt.Errorf("maps must only contain one type of element, saw %s and %s", elType, v.Type()) + } + } + return Value{ + typ: Map{ElementType: typ}, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Map; expected types are: %s", in, formattedSupportedGoTypes(Map{})) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go new file mode 100644 index 0000000000..0e340bee01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/object.go @@ -0,0 +1,319 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" +) + +// Object is a Terraform type representing an unordered collection of +// attributes, potentially of differing types, each identifiable with a unique +// string name. The number of attributes, their names, and their types are part +// of the type signature for the Object, and so two Objects with different +// attribute names or types are considered to be distinct types. +type Object struct { + // AttributeTypes is a map of attributes to their types. The key should + // be the name of the attribute, and the value should be the type of + // the attribute. + AttributeTypes map[string]Type + + // OptionalAttributes is a set of attributes that are optional. This + // allows values of this object type to include or not include those + // attributes without changing the type of the value; other attributes + // are considered part of the type signature, and their absence means a + // value is no longer of that type. + // + // OptionalAttributes is only valid when declaring a type constraint + // (e.g. Schema) and should not be used as part of a Type when creating + // a Value (e.g. NewValue()). When creating a Value, all OptionalAttributes + // must still be defined in the Object by setting each attribute to a null + // or known value for its attribute type. + // + // The key of OptionalAttributes should be the name of the attribute + // that is optional. The value should be an empty struct, used only to + // indicate presence. + // + // OptionalAttributes must also be listed in the AttributeTypes + // property, indicating their types. + OptionalAttributes map[string]struct{} + + // used to make this type uncomparable + // see https://golang.org/ref/spec#Comparison_operators + // this enforces the use of Is, instead + _ []struct{} +} + +// ApplyTerraform5AttributePathStep applies an AttributePathStep to an Object, +// returning the Type found at that AttributePath within the Object. If the +// AttributePathStep cannot be applied to the Object, an ErrInvalidStep error +// will be returned. +func (o Object) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case AttributeName: + if len(o.AttributeTypes) == 0 { + return nil, ErrInvalidStep + } + + attrType, ok := o.AttributeTypes[string(s)] + + if !ok { + return nil, ErrInvalidStep + } + + return attrType, nil + default: + return nil, ErrInvalidStep + } +} + +// Equal returns true if the two Objects are exactly equal. Unlike Is, passing +// in an Object with no AttributeTypes will always return false. +func (o Object) Equal(other Type) bool { + v, ok := other.(Object) + if !ok { + return false + } + if v.AttributeTypes == nil || o.AttributeTypes == nil { + // when doing exact comparisons, we can't compare types that + // don't have attribute types set, so we just consider them not + // equal + return false + } + + // if the don't have the exact same optional attributes, they're not + // the same type. + if len(v.OptionalAttributes) != len(o.OptionalAttributes) { + return false + } + for attr := range o.OptionalAttributes { + if !v.attrIsOptional(attr) { + return false + } + } + + // if they don't have the same attribute types, they're not the + // same type. + if len(v.AttributeTypes) != len(o.AttributeTypes) { + return false + } + for k, typ := range o.AttributeTypes { + if _, ok := v.AttributeTypes[k]; !ok { + return false + } + if !typ.Equal(v.AttributeTypes[k]) { + return false + } + } + return true +} + +// UsableAs returns whether the two Objects are type compatible. +// +// If the other type is DynamicPseudoType, it will return true. +// If the other type is not a Object, it will return false. +// If the other Object does not have matching AttributeTypes length, it will +// return false. +// If the other Object does not have a type compatible ElementType for every +// nested attribute, it will return false. +// +// If the current type contains OptionalAttributes, it will panic. +func (o Object) UsableAs(other Type) bool { + if other.Is(DynamicPseudoType) { + return true + } + v, ok := other.(Object) + if !ok { + return false + } + if len(o.OptionalAttributes) > 0 { + panic("Objects with OptionalAttributes cannot be used.") + } + if len(v.AttributeTypes) != len(o.AttributeTypes) { + return false + } + for k, typ := range o.AttributeTypes { + otherTyp, ok := v.AttributeTypes[k] + if !ok { + return false + } + if !typ.UsableAs(otherTyp) { + return false + } + } + return true +} + +// Is returns whether `t` is an Object type or not. It does not perform any +// AttributeTypes checks. +func (o Object) Is(t Type) bool { + _, ok := t.(Object) + return ok +} + +func (o Object) attrIsOptional(attr string) bool { + if o.OptionalAttributes == nil { + return false + } + _, ok := o.OptionalAttributes[attr] + return ok +} + +func (o Object) String() string { + var res strings.Builder + res.WriteString("tftypes.Object[") + keys := make([]string, 0, len(o.AttributeTypes)) + for k := range o.AttributeTypes { + keys = append(keys, k) + } + sort.Strings(keys) + for pos, key := range keys { + if pos != 0 { + res.WriteString(", ") + } + res.WriteString(`"` + key + `":`) + res.WriteString(o.AttributeTypes[key].String()) + if o.attrIsOptional(key) { + res.WriteString(`?`) + } + } + res.WriteString("]") + return res.String() +} + +func (o Object) private() {} + +func (o Object) supportedGoTypes() []string { + return []string{"map[string]tftypes.Value"} +} + +func valueFromObject(types map[string]Type, optionalAttrs map[string]struct{}, in interface{}) (Value, error) { + switch value := in.(type) { + case map[string]Value: + // types should only be null if the "Object" is actually a + // DynamicPseudoType being created from a map[string]Value. In + // which case, we don't know what types it should have, or even + // how many there will be, so let's not validate that at all + if types != nil { + for k := range types { + if _, ok := optionalAttrs[k]; ok { + // if it's optional, we don't need to check that it has a value + continue + } + if _, ok := value[k]; !ok { + return Value{}, fmt.Errorf("can't create a tftypes.Value of type %s, required attribute %q not set", Object{AttributeTypes: types}, k) + } + } + for k, v := range value { + typ, ok := types[k] + if !ok { + return Value{}, fmt.Errorf("can't set a value on %q in tftypes.NewValue, key not part of the object type %s", k, Object{AttributeTypes: types}) + } + if v.Type() == nil { + return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("missing value type") + } + if !v.Type().UsableAs(typ) { + return Value{}, NewAttributePath().WithAttributeName(k).NewErrorf("can't use %s as %s", v.Type(), typ) + } + } + } + return Value{ + typ: Object{AttributeTypes: types, OptionalAttributes: optionalAttrs}, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Object; expected types are: %s", in, formattedSupportedGoTypes(Object{})) + } +} + +// MarshalJSON returns a JSON representation of the full type signature of `o`, +// including the AttributeTypes and, if present, OptionalAttributes. +// +// Deprecated: this is not meant to be called by third-party code. +func (o Object) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + buf.WriteString(`["object",{`) + + attributeTypeNames := make([]string, 0, len(o.AttributeTypes)) + + for attributeTypeName := range o.AttributeTypes { + attributeTypeNames = append(attributeTypeNames, attributeTypeName) + } + + // Ensure consistent ordering for human readability and unit testing. + // The slices package was introduced in Go 1.21, so it is not usable until + // this Go module is updated to Go 1.21 minimum. + sort.Strings(attributeTypeNames) + + for index, attributeTypeName := range attributeTypeNames { + if index > 0 { + buf.WriteString(`,`) + } + + buf.Write(marshalJSONObjectAttributeName(attributeTypeName)) + buf.WriteString(`:`) + + // MarshalJSON is always error safe + attributeTypeBytes, _ := o.AttributeTypes[attributeTypeName].MarshalJSON() + + buf.Write(attributeTypeBytes) + } + + buf.WriteString(`}`) + + if len(o.OptionalAttributes) > 0 { + buf.WriteString(`,[`) + + optionalAttributeNames := make([]string, 0, len(o.OptionalAttributes)) + + for optionalAttributeName := range o.OptionalAttributes { + optionalAttributeNames = append(optionalAttributeNames, optionalAttributeName) + } + + // Ensure consistent ordering for human readability and unit testing. + // The slices package was introduced in Go 1.21, so it is not usable + // until this Go module is updated to Go 1.21 minimum. + sort.Strings(optionalAttributeNames) + + for index, optionalAttributeName := range optionalAttributeNames { + if index > 0 { + buf.WriteString(`,`) + } + + buf.Write(marshalJSONObjectAttributeName(optionalAttributeName)) + } + + buf.WriteString(`]`) + } + + buf.WriteString(`]`) + + return buf.Bytes(), nil +} + +// marshalJSONObjectAttributeName an object attribute name string into JSON or +// panics. +// +// JSON encoding a string has some non-trivial rules and go-cty already depends +// on the Go standard library for this, so for now this logic also offloads this +// effort the same way to handle user input. As of Go 1.21, it is not possible +// for a caller to input something that would trigger an encoding error. There +// is FuzzMarshalJSONObjectAttributeName to verify this assertion. +// +// If a panic can be induced, a Type Validate() method or requiring the use of +// Type construction functions that require validation are better solutions than +// handling validation errors at this point. +func marshalJSONObjectAttributeName(name string) []byte { + result, err := json.Marshal(name) + + if err != nil { + panic(fmt.Sprintf("unable to JSON encode object attribute name: %s", name)) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go new file mode 100644 index 0000000000..1a6042814b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/primitive.go @@ -0,0 +1,411 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "fmt" + "math/big" +) + +var ( + // DynamicPseudoType is a pseudo-type in Terraform's type system that + // is used as a wildcard type. It indicates that any Terraform type can + // be used. + DynamicPseudoType = primitive{name: "DynamicPseudoType"} + + // String is a primitive type in Terraform that represents a UTF-8 + // string of bytes. + String = primitive{name: "String"} + + // Number is a primitive type in Terraform that represents a real + // number. + Number = primitive{name: "Number"} + + // Bool is a primitive type in Terraform that represents a true or + // false boolean value. + Bool = primitive{name: "Bool"} +) + +var ( + _ Type = primitive{name: "test"} +) + +type primitive struct { + name string + + // used to make this type uncomparable + // see https://golang.org/ref/spec#Comparison_operators + // this enforces the use of Is, instead + _ []struct{} +} + +// ApplyTerraform5AttributePathStep always returns an ErrInvalidStep error +// as it is invalid to step into a primitive. +func (p primitive) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + return nil, ErrInvalidStep +} + +func (p primitive) Equal(o Type) bool { + v, ok := o.(primitive) + if !ok { + return false + } + return p.name == v.name +} + +func (p primitive) Is(t Type) bool { + return p.Equal(t) +} + +func (p primitive) UsableAs(t Type) bool { + v, ok := t.(primitive) + if !ok { + return false + } + if v.name == DynamicPseudoType.name { + return true + } + return v.name == p.name +} + +func (p primitive) String() string { + return "tftypes." + p.name +} + +func (p primitive) private() {} + +func (p primitive) MarshalJSON() ([]byte, error) { + switch p.name { + case String.name: + return []byte(`"string"`), nil + case Number.name: + return []byte(`"number"`), nil + case Bool.name: + return []byte(`"bool"`), nil + case DynamicPseudoType.name: + return []byte(`"dynamic"`), nil + } + + // MarshalJSON should always be error safe and reaching this panic implies + // a new primitive type was added that needs to be handled above. + panic(fmt.Sprintf("unimplemented tftypes.primitive type: %+v", p)) +} + +func (p primitive) supportedGoTypes() []string { + switch p.name { + case String.name: + return []string{"string", "*string"} + case Number.name: + return []string{ + "*big.Float", + "uint", "*uint", + "uint8", "*uint8", + "uint16", "*uint16", + "uint32", "*uint32", + "uint64", "*uint64", + "int", "*int", + "int8", "*int8", + "int16", "*int16", + "int32", "*int32", + "int64", "*int64", + "float64", "*float64", + } + case Bool.name: + return []string{"bool", "*bool"} + case DynamicPseudoType.name: + // List/Set is covered by Tuple, Map is covered by Object + possibleTypes := []Type{ + String, Bool, Number, + Tuple{}, Object{}, + } + results := []string{} + for _, t := range possibleTypes { + results = append(results, t.supportedGoTypes()...) + } + return results + } + panic(fmt.Sprintf("unknown primitive type %q", p.name)) +} + +func valueFromString(in interface{}) (Value, error) { + switch value := in.(type) { + case *string: + if value == nil { + return Value{ + typ: String, + value: nil, + }, nil + } + return Value{ + typ: String, + value: *value, + }, nil + case string: + return Value{ + typ: String, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.String; expected types are: %s", in, formattedSupportedGoTypes(String)) + } +} + +func valueFromBool(in interface{}) (Value, error) { + switch value := in.(type) { + case *bool: + if value == nil { + return Value{ + typ: Bool, + value: nil, + }, nil + } + return Value{ + typ: Bool, + value: *value, + }, nil + case bool: + return Value{ + typ: Bool, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Bool; expected types are: %s", in, formattedSupportedGoTypes(Bool)) + } +} + +func valueFromNumber(in interface{}) (Value, error) { + switch value := in.(type) { + case *big.Float: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: value, + }, nil + case uint: + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(value)), + }, nil + case *uint: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(*value)), + }, nil + case uint8: + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(value)), + }, nil + case *uint8: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(*value)), + }, nil + case uint16: + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(value)), + }, nil + case *uint16: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(*value)), + }, nil + case uint32: + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(value)), + }, nil + case *uint32: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetUint64(uint64(*value)), + }, nil + case uint64: + return Value{ + typ: Number, + value: new(big.Float).SetUint64(value), + }, nil + case *uint64: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetUint64(*value), + }, nil + case int: + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(value)), + }, nil + case *int: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(*value)), + }, nil + case int8: + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(value)), + }, nil + case *int8: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(*value)), + }, nil + case int16: + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(value)), + }, nil + case *int16: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(*value)), + }, nil + case int32: + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(value)), + }, nil + case *int32: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetInt64(int64(*value)), + }, nil + case int64: + return Value{ + typ: Number, + value: new(big.Float).SetInt64(value), + }, nil + case *int64: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: new(big.Float).SetInt64(*value), + }, nil + case float64: + return Value{ + typ: Number, + value: big.NewFloat(value), + }, nil + case *float64: + if value == nil { + return Value{ + typ: Number, + value: nil, + }, nil + } + return Value{ + typ: Number, + value: big.NewFloat(*value), + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Number; expected types are: %s", in, formattedSupportedGoTypes(Number)) + } +} + +func valueFromDynamicPseudoType(val interface{}) (Value, error) { + switch val := val.(type) { + case string, *string: + v, err := valueFromString(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case *big.Float, float64, *float64, int, *int, int8, *int8, int16, *int16, int32, *int32, int64, *int64, uint, *uint, uint8, *uint8, uint16, *uint16, uint32, *uint32, uint64, *uint64: + v, err := valueFromNumber(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case bool, *bool: + v, err := valueFromBool(val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case map[string]Value: + v, err := valueFromObject(nil, nil, val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + case []Value: + v, err := valueFromTuple(nil, val) + if err != nil { + return Value{}, err + } + v.typ = DynamicPseudoType + return v, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.DynamicPseudoType; expected types are: %s", val, formattedSupportedGoTypes(DynamicPseudoType)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go new file mode 100644 index 0000000000..1865c1fa6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/set.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "fmt" +) + +// Set is a Terraform type representing an unordered collection of unique +// elements, all of the same type. +type Set struct { + ElementType Type + + // used to make this type uncomparable + // see https://golang.org/ref/spec#Comparison_operators + // this enforces the use of Is, instead + _ []struct{} +} + +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Set, +// returning the Type found at that AttributePath within the Set. If the +// AttributePathStep cannot be applied to the Set, an ErrInvalidStep error +// will be returned. +func (s Set) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch step.(type) { + case ElementKeyValue: + return s.ElementType, nil + default: + return nil, ErrInvalidStep + } +} + +// Equal returns true if the two Sets are exactly equal. Unlike Is, passing in +// a Set with no ElementType will always return false. +func (s Set) Equal(o Type) bool { + v, ok := o.(Set) + if !ok { + return false + } + if v.ElementType == nil || s.ElementType == nil { + // when doing exact comparisons, we can't compare types that + // don't have element types set, so we just consider them not + // equal + return false + } + return s.ElementType.Equal(v.ElementType) +} + +// UsableAs returns whether the two Sets are type compatible. +// +// If the other type is DynamicPseudoType, it will return true. +// If the other type is not a Set, it will return false. +// If the other Set does not have a type compatible ElementType, it will +// return false. +func (s Set) UsableAs(o Type) bool { + if o.Is(DynamicPseudoType) { + return true + } + v, ok := o.(Set) + if !ok { + return false + } + return s.ElementType.UsableAs(v.ElementType) +} + +// Is returns whether `t` is a Set type or not. It does not perform any +// ElementType checks. +func (s Set) Is(t Type) bool { + _, ok := t.(Set) + return ok +} + +func (s Set) String() string { + return "tftypes.Set[" + s.ElementType.String() + "]" +} + +func (s Set) private() {} + +func (s Set) supportedGoTypes() []string { + return []string{"[]tftypes.Value"} +} + +func valueFromSet(typ Type, in interface{}) (Value, error) { + switch value := in.(type) { + case []Value: + var elType Type + for _, v := range value { + if !v.Type().UsableAs(typ) { + return Value{}, NewAttributePath().WithElementKeyValue(v).NewErrorf("can't use %s as %s", v.Type(), typ) + } + if elType == nil { + elType = v.Type() + } + if !elType.Equal(v.Type()) { + return Value{}, fmt.Errorf("sets must only contain one type of element, saw %s and %s", elType, v.Type()) + } + } + return Value{ + typ: Set{ElementType: typ}, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Set; expected types are: %s", in, formattedSupportedGoTypes(Set{})) + } +} + +// MarshalJSON returns a JSON representation of the full type signature of `s`, +// including its ElementType. +// +// Deprecated: this is not meant to be called by third-party code. +func (s Set) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + buf.WriteString(`["set",`) + + // MarshalJSON is always error safe + elementTypeBytes, _ := s.ElementType.MarshalJSON() + + buf.Write(elementTypeBytes) + buf.WriteString(`]`) + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go new file mode 100644 index 0000000000..9e735dcea2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/tuple.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "fmt" + "strings" +) + +// Tuple is a Terraform type representing an ordered collection of elements, +// potentially of differing types. The number of elements and their types are +// part of the type signature for the Tuple, and so two Tuples with different +// numbers or types of elements are considered to be distinct types. +type Tuple struct { + ElementTypes []Type + + // used to make this type uncomparable + // see https://golang.org/ref/spec#Comparison_operators + // this enforces the use of Is, instead + _ []struct{} +} + +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Tuple, +// returning the Type found at that AttributePath within the Tuple. If the +// AttributePathStep cannot be applied to the Tuple, an ErrInvalidStep error +// will be returned. +func (tu Tuple) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + switch s := step.(type) { + case ElementKeyInt: + if int64(s) < 0 || int64(s) >= int64(len(tu.ElementTypes)) { + return nil, ErrInvalidStep + } + + return tu.ElementTypes[int64(s)], nil + default: + return nil, ErrInvalidStep + } +} + +// Equal returns true if the two Tuples are exactly equal. Unlike Is, passing +// in a Tuple with no ElementTypes will always return false. +func (tu Tuple) Equal(o Type) bool { + v, ok := o.(Tuple) + if !ok { + return false + } + if v.ElementTypes == nil || tu.ElementTypes == nil { + // when doing exact comparisons, we can't compare types that + // don't have element types set, so we just consider them not + // equal + return false + } + if len(v.ElementTypes) != len(tu.ElementTypes) { + return false + } + for pos, typ := range tu.ElementTypes { + if !typ.Equal(v.ElementTypes[pos]) { + return false + } + } + return true +} + +// UsableAs returns whether the two Tuples are type compatible. +// +// If the other type is DynamicPseudoType, it will return true. +// If the other type is not a Tuple, it will return false. +// If the other Tuple does not have matching ElementTypes length, it will +// return false. +// If the other Tuple does not have type compatible ElementTypes in each +// position, it will return false. +func (tu Tuple) UsableAs(o Type) bool { + if o.Is(DynamicPseudoType) { + return true + } + v, ok := o.(Tuple) + if !ok { + return false + } + if len(v.ElementTypes) != len(tu.ElementTypes) { + return false + } + for pos, typ := range tu.ElementTypes { + if !typ.UsableAs(v.ElementTypes[pos]) { + return false + } + } + return true +} + +// Is returns whether `t` is a Tuple type or not. It does not perform any +// ElementTypes checks. +func (tu Tuple) Is(t Type) bool { + _, ok := t.(Tuple) + return ok +} + +func (tu Tuple) String() string { + var res strings.Builder + res.WriteString("tftypes.Tuple[") + for pos, t := range tu.ElementTypes { + if pos != 0 { + res.WriteString(", ") + } + res.WriteString(t.String()) + } + res.WriteString("]") + return res.String() +} + +func (tu Tuple) private() {} + +func (tu Tuple) supportedGoTypes() []string { + return []string{"[]tftypes.Value"} +} + +func valueFromTuple(types []Type, in interface{}) (Value, error) { + switch value := in.(type) { + case []Value: + // types should only be null if the "Tuple" is actually a + // DynamicPseudoType being created from a []Value. In which + // case, we don't know what types it should have, or even how + // many there will be, so let's not validate that at all + if types != nil { + if len(types) != len(value) { + return Value{}, fmt.Errorf("can't create a tftypes.Value with %d elements, type %s requires %d elements", len(value), Tuple{ElementTypes: types}, len(types)) + } + for pos, v := range value { + typ := types[pos] + if !v.Type().UsableAs(typ) { + return Value{}, NewAttributePath().WithElementKeyInt(pos).NewErrorf("can't use %s as %s", v.Type(), typ) + } + } + } + return Value{ + typ: Tuple{ElementTypes: types}, + value: value, + }, nil + default: + return Value{}, fmt.Errorf("tftypes.NewValue can't use %T as a tftypes.Tuple; expected types are: %s", in, formattedSupportedGoTypes(Tuple{})) + } +} + +// MarshalJSON returns a JSON representation of the full type signature of +// `tu`, including the ElementTypes. +// +// Deprecated: this is not meant to be called by third-party code. +func (tu Tuple) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + buf.WriteString(`["tuple",[`) + + for index, elementType := range tu.ElementTypes { + if index > 0 { + buf.WriteString(",") + } + + // MarshalJSON is always error safe + elementTypeBytes, _ := elementType.MarshalJSON() + + buf.Write(elementTypeBytes) + } + + buf.WriteString(`]]`) + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go new file mode 100644 index 0000000000..be0749dc01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/type.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "strings" +) + +// Type is an interface representing a Terraform type. It is only meant to be +// implemented by the tftypes package. Types define the shape and +// characteristics of data coming from or being sent to Terraform. +type Type interface { + // AttributePathStepper requires each Type to implement the + // ApplyTerraform5AttributePathStep method, so Type is compatible with + // WalkAttributePath. The method should return the Type found at that + // AttributePath within the Type or ErrInvalidStep. + AttributePathStepper + + // Is is used to determine what type a Type implementation is. It is + // the recommended method for determining whether two types are + // equivalent or not. + + // Is performs shallow type equality checks, in that the root type is + // compared, but underlying attribute/element types are not. + Is(Type) bool + + // Equal performs deep type equality checks, including attribute/element + // types and whether attributes are optional or not. + Equal(Type) bool + + // UsableAs performs type conformance checks. This primarily checks if the + // target implements DynamicPsuedoType in a compatible manner. + UsableAs(Type) bool + + // String returns a string representation of the Type's name. + String() string + + // MarshalJSON returns a JSON representation of the Type's signature. + // It is modeled based on Terraform's requirements for type signature + // JSON representations, and may change over time to match Terraform's + // formatting. The error return should always be nil. + // + // Deprecated: this is not meant to be called by third-party code. + MarshalJSON() ([]byte, error) + + // private is meant to keep this interface from being implemented by + // types from other packages. + private() + + // supportedGoTypes returns a list of string representations of the Go + // types that the Type supports for its values. + supportedGoTypes() []string +} + +// TypeFromElements returns the common type that the passed elements all have +// in common. An error will be returned if the passed elements are not of the +// same type. +func TypeFromElements(elements []Value) (Type, error) { + var typ Type + for _, el := range elements { + if typ == nil { + typ = el.Type() + continue + } + if !typ.Equal(el.Type()) { + return nil, errors.New("elements do not all have the same types") + } + } + if typ == nil { + return DynamicPseudoType, nil + } + return typ, nil +} + +type jsonType struct { + t Type +} + +// ParseJSONType returns a Type from its JSON representation. The JSON +// representation should come from Terraform or from MarshalJSON as the format +// is not part of this package's API guarantees. +// +// Deprecated: this is not meant to be called by third-party code. +func ParseJSONType(buf []byte) (Type, error) { + var t jsonType + err := json.Unmarshal(buf, &t) + return t.t, err +} + +func (t *jsonType) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + t.t = Bool + case "number": + t.t = Number + case "string": + t.t = String + case "dynamic": + t.t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety jsonType + err = dec.Decode(&ety) + if err != nil { + return err + } + t.t = List{ + ElementType: ety.t, + } + case "map": + var ety jsonType + err = dec.Decode(&ety) + if err != nil { + return err + } + t.t = Map{ + ElementType: ety.t, + } + case "set": + var ety jsonType + err = dec.Decode(&ety) + if err != nil { + return err + } + t.t = Set{ + ElementType: ety.t, + } + case "object": + var atys map[string]jsonType + err = dec.Decode(&atys) + if err != nil { + return err + } + types := make(map[string]Type, len(atys)) + for k, v := range atys { + types[k] = v.t + } + o := Object{ + AttributeTypes: types, + OptionalAttributes: map[string]struct{}{}, + } + if dec.More() { + var optionals []string + err = dec.Decode(&optionals) + if err != nil { + return err + } + for _, attr := range optionals { + o.OptionalAttributes[attr] = struct{}{} + } + } + t.t = o + case "tuple": + var etys []jsonType + err = dec.Decode(&etys) + if err != nil { + return err + } + types := make([]Type, 0, len(etys)) + for _, ty := range etys { + types = append(types, ty.t) + } + t.t = Tuple{ + ElementTypes: types, + } + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} + +func formattedSupportedGoTypes(t Type) string { + sgt := t.supportedGoTypes() + switch len(sgt) { + case 0: + return "no supported Go types" + case 1: + return sgt[0] + case 2: + return sgt[0] + " or " + sgt[1] + default: + sgt[len(sgt)-1] = "or " + sgt[len(sgt)-1] + return strings.Join(sgt, ", ") + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/unknown_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/unknown_value.go new file mode 100644 index 0000000000..4aefe719e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/unknown_value.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +const ( + // UnknownValue represents a value that is not yet known. It can be the + // value of any type. + UnknownValue = unknown(0) +) + +type unknown byte diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go new file mode 100644 index 0000000000..b845078396 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value.go @@ -0,0 +1,594 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "fmt" + "math/big" + "sort" + "strconv" + "strings" + + msgpack "github.com/vmihailenco/msgpack/v5" +) + +// ValueConverter is an interface that provider-defined types can implement to +// control how Value.As will convert a Value into that type. The passed Value +// is the Value that Value.As is being called on. The intended usage is to call +// Value.As on the passed Value, converting it into a builtin type, and then +// converting or casting that builtin type to the provider-defined type. +type ValueConverter interface { + FromTerraform5Value(Value) error +} + +// ValueCreator is an interface that provider-defined types can implement to +// control how NewValue will convert that type into a Value. The returned +// interface should return one of the builtin Value representations that should +// be used for that Value. +type ValueCreator interface { + ToTerraform5Value() (interface{}, error) +} + +// Value is a piece of data from Terraform or being returned to Terraform. It +// has a Type associated with it, defining its shape and characteristics, and a +// Go representation of that Type containing the data itself. Values are a +// special type and are not represented as pure Go values beause they can +// contain UnknownValues, which cannot be losslessly represented in Go's type +// system. +// +// The recommended usage of a Value is to check that it is known, using +// Value.IsKnown, then to convert it to a Go type, using Value.As. The Go type +// can then be manipulated. +type Value struct { + typ Type + value interface{} +} + +func (val Value) String() string { + typ := val.Type() + + if typ == nil { + return "invalid typeless tftypes.Value<>" + } + + // null and unknown values we use static strings for + if val.IsNull() { + return typ.String() + "" + } + if !val.IsKnown() { + return typ.String() + "" + } + + // everything else is built up + var res strings.Builder + switch { + case typ.Is(String): + var s string + err := val.As(&s) + if err != nil { + panic(err) + } + res.WriteString(typ.String() + `<"` + s + `">`) + case typ.Is(Number): + n := big.NewFloat(0) + err := val.As(&n) + if err != nil { + panic(err) + } + res.WriteString(typ.String() + `<"` + n.String() + `">`) + case typ.Is(Bool): + var b bool + err := val.As(&b) + if err != nil { + panic(err) + } + res.WriteString(typ.String() + `<"` + strconv.FormatBool(b) + `">`) + case typ.Is(List{}), typ.Is(Set{}), typ.Is(Tuple{}): + var l []Value + err := val.As(&l) + if err != nil { + panic(err) + } + res.WriteString(typ.String() + `<`) + for pos, el := range l { + if pos != 0 { + res.WriteString(", ") + } + res.WriteString(el.String()) + } + res.WriteString(">") + case typ.Is(Map{}), typ.Is(Object{}): + m := map[string]Value{} + err := val.As(&m) + if err != nil { + panic(err) + } + res.WriteString(typ.String() + `<`) + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for pos, key := range keys { + if pos != 0 { + res.WriteString(", ") + } + res.WriteString(`"` + key + `":`) + res.WriteString(m[key].String()) + } + res.WriteString(">") + } + return res.String() +} + +// ApplyTerraform5AttributePathStep applies an AttributePathStep to a Value, +// returning the Value found at that AttributePath within the Value. It +// fulfills that AttributePathStepper interface, allowing Values to be passed +// to WalkAttributePath. This allows retrieving a subset of a Value using an +// AttributePath. If the AttributePathStep can't be applied to the Value, +// either because it is the wrong type or because no Value exists at that +// AttributePathStep, an ErrInvalidStep error will be returned. +func (val Value) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + if !val.IsKnown() || val.IsNull() { + return nil, ErrInvalidStep + } + + // Since this logic is very hot path, it is optimized to use Value + // implementation details rather than As() to avoid memory allocations. + switch s := step.(type) { + case AttributeName: + if _, ok := val.Type().(Object); !ok { + return nil, ErrInvalidStep + } + o, ok := val.value.(map[string]Value) + if !ok { + return nil, fmt.Errorf("cannot convert %T into map[string]tftypes.Value", val.value) + } + res, ok := o[string(s)] + if !ok { + return nil, ErrInvalidStep + } + return res, nil + case ElementKeyString: + if _, ok := val.Type().(Map); !ok { + return nil, ErrInvalidStep + } + m, ok := val.value.(map[string]Value) + if !ok { + return nil, fmt.Errorf("cannot convert %T into map[string]tftypes.Value", val.value) + } + res, ok := m[string(s)] + if !ok { + return nil, ErrInvalidStep + } + return res, nil + case ElementKeyInt: + _, listOk := val.Type().(List) + _, tupleOk := val.Type().(Tuple) + if !listOk && !tupleOk { + return nil, ErrInvalidStep + } + if int64(s) < 0 { + return nil, ErrInvalidStep + } + sl, ok := val.value.([]Value) + if !ok { + return nil, fmt.Errorf("cannot convert %T into []tftypes.Value", val.value) + } + if int64(len(sl)) <= int64(s) { + return nil, ErrInvalidStep + } + return sl[int64(s)], nil + case ElementKeyValue: + if _, ok := val.Type().(Set); !ok { + return nil, ErrInvalidStep + } + sl, ok := val.value.([]Value) + if !ok { + return nil, fmt.Errorf("cannot convert %T into []tftypes.Value", val.value) + } + stepValue := Value(s) + for _, el := range sl { + deepEqual, err := stepValue.deepEqual(el) + if err != nil { + return nil, err + } + if deepEqual { + return el, nil + } + } + return nil, ErrInvalidStep + default: + return nil, fmt.Errorf("unexpected AttributePathStep type %T", step) + } +} + +// Equal returns true if two Values should be considered equal. Values are +// considered equal if their types are considered equal and if they represent +// data that is considered equal. +func (val Value) Equal(o Value) bool { + if val.Type() == nil && o.Type() == nil && val.value == nil && o.value == nil { + return true + } + if val.Type() == nil { + return false + } + if o.Type() == nil { + return false + } + if !val.Type().Equal(o.Type()) { + return false + } + deepEqual, err := val.deepEqual(o) + if err != nil { + panic(err) + } + return deepEqual +} + +// Copy returns a defensively-copied clone of Value that shares no underlying +// data structures with the original Value and can be mutated without +// accidentally mutating the original. +func (val Value) Copy() Value { + newVal := val.value + switch v := val.value.(type) { + case []Value: + newVals := make([]Value, 0, len(v)) + for _, value := range v { + newVals = append(newVals, value.Copy()) + } + newVal = newVals + case map[string]Value: + newVals := make(map[string]Value, len(v)) + for k, value := range v { + newVals[k] = value.Copy() + } + newVal = newVals + } + return NewValue(val.Type(), newVal) +} + +// NewValue returns a Value constructed using the specified Type and stores the +// passed value in it. +// +// The passed value should be in one of the builtin Value representations or +// implement the ValueCreator interface. +// +// If the passed value is not a valid value for the passed type, NewValue will +// panic. Any value and type combination that does not return an error from +// ValidateValue is guaranteed to not panic. When calling NewValue with user +// input with a type not known at compile time, it is recommended to call +// ValidateValue before calling NewValue, to allow graceful handling of the +// error. +// +// The builtin Value representations are: +// +// - String: string, *string +// - Number: *big.Float, int64, *int64, int32, *int32, int16, *int16, int8, +// *int8, int, *int, uint64, *uint64, uint32, *uint32, uint16, +// *uint16, uint8, *uint8, uint, *uint, float64, *float64 +// - Bool: bool, *bool +// - Map and Object: map[string]Value +// - Tuple, List, and Set: []Value +func NewValue(t Type, val interface{}) Value { + v, err := newValue(t, val) + if err != nil { + panic(err) + } + return v +} + +// ValidateValue checks that the Go type passed as `val` can be used as a value +// for the Type passed as `t`. A nil error response indicates that the value is +// valid for the type. +func ValidateValue(t Type, val interface{}) error { + _, err := newValue(t, val) + return err +} + +func newValue(t Type, val interface{}) (Value, error) { + if val == nil || val == UnknownValue { + return Value{ + typ: t, + value: val, + }, nil + } + + if creator, ok := val.(ValueCreator); ok { + var err error + val, err = creator.ToTerraform5Value() + if err != nil { + return Value{}, fmt.Errorf("error creating tftypes.Value: %w", err) + } + } + + switch typ := t.(type) { + case primitive: + switch typ.name { + case String.name: + v, err := valueFromString(val) + if err != nil { + return Value{}, err + } + return v, nil + case Number.name: + v, err := valueFromNumber(val) + if err != nil { + return Value{}, err + } + return v, nil + case Bool.name: + v, err := valueFromBool(val) + if err != nil { + return Value{}, err + } + return v, nil + case DynamicPseudoType.name: + v, err := valueFromDynamicPseudoType(val) + if err != nil { + return Value{}, err + } + return v, nil + default: + return Value{}, fmt.Errorf("unknown primitive type %v passed to tftypes.NewValue", typ) + } + case Map: + v, err := valueFromMap(typ.ElementType, val) + if err != nil { + return Value{}, err + } + return v, nil + case Object: + v, err := valueFromObject(typ.AttributeTypes, typ.OptionalAttributes, val) + if err != nil { + return Value{}, err + } + return v, nil + case List: + v, err := valueFromList(typ.ElementType, val) + if err != nil { + return Value{}, err + } + return v, nil + case Set: + v, err := valueFromSet(typ.ElementType, val) + if err != nil { + return Value{}, err + } + return v, nil + case Tuple: + v, err := valueFromTuple(typ.ElementTypes, val) + if err != nil { + return Value{}, err + } + return v, nil + default: + return Value{}, fmt.Errorf("unknown type %s passed to tftypes.NewValue", t) + } +} + +// As converts a Value into a Go value. `dst` must be set to a pointer to a +// value of a supported type for the Value's type or an implementation of the +// ValueConverter interface. +// +// For Strings, `dst` must be a pointer to a string or a pointer to a pointer +// to a string. If it's a pointer to a pointer to a string, if the Value is +// null, the pointer to the string will be set to nil. If it's a pointer to a +// string, if the Value is null, the string will be set to the empty value. +// +// For Numbers, `dst` must be a poitner to a big.Float or a pointer to a +// pointer to a big.Float. If it's a pointer to a pointer to a big.Float, if +// the Value is null, the pointer to the big.Float will be set to nil. If it's +// a pointer to a big.Float, if the Value is null, the big.Float will be set to +// 0. +// +// For Bools, `dst` must be a pointer to a bool or a pointer to a pointer to a +// bool. If it's a pointer to a pointer to a bool, if the Value is null, the +// pointer to the bool will be set to nil. If it's a pointer to a bool, if the +// Value is null, the bool will be set to false. +// +// For Maps and Objects, `dst` must be a pointer to a map[string]Value or a +// pointer to a pointer to a map[string]Value. If it's a pointer to a pointer +// to a map[string]Value, if the Value is null, the pointer to the +// map[string]Value will be set to nil. If it's a pointer to a +// map[string]Value, if the Value is null, the map[string]Value will be set to +// an empty map. +// +// For Lists, Sets, and Tuples, `dst` must be a pointer to a []Value or a +// pointer to a pointer to a []Value. If it's a pointer to a pointer to a +// []Value, if the Value is null, the poitner to []Value will be set to nil. If +// it's a pointer to a []Value, if the Value is null, the []Value will be set +// to an empty slice. +// +// Future builtin conversions may be added over time. +// +// If `val` is unknown, an error will be returned, as unknown values can't be +// represented in Go's type system. Providers should check Value.IsKnown before +// calling Value.As. +func (val Value) As(dst interface{}) error { + unmarshaler, ok := dst.(ValueConverter) + if ok { + return unmarshaler.FromTerraform5Value(val) + } + if !val.IsKnown() { + return fmt.Errorf("unmarshaling unknown values is not supported") + } + switch target := dst.(type) { + case *string: + if val.IsNull() { + *target = "" + return nil + } + v, ok := val.value.(string) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected string", val.Type(), dst) + } + *target = v + return nil + case **string: + if val.IsNull() { + *target = nil + return nil + } + if *target == nil { + var s string + *target = &s + } + return val.As(*target) + case *big.Float: + if val.IsNull() { + target.Set(big.NewFloat(0)) + return nil + } + v, ok := val.value.(*big.Float) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected *big.Float", val.Type(), dst) + } + target.Copy(v) + return nil + case **big.Float: + if val.IsNull() { + *target = nil + return nil + } + if *target == nil { + *target = big.NewFloat(0) + } + return val.As(*target) + case *bool: + if val.IsNull() { + *target = false + return nil + } + v, ok := val.value.(bool) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected boolean", val.Type(), dst) + } + *target = v + return nil + case **bool: + if val.IsNull() { + *target = nil + return nil + } + if *target == nil { + var b bool + *target = &b + } + return val.As(*target) + case *map[string]Value: + if val.IsNull() { + *target = map[string]Value{} + return nil + } + v, ok := val.value.(map[string]Value) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected map[string]tftypes.Value", val.Type(), dst) + } + *target = v + return nil + case **map[string]Value: + if val.IsNull() { + *target = nil + return nil + } + if *target == nil { + m := map[string]Value{} + *target = &m + } + return val.As(*target) + case *[]Value: + if val.IsNull() { + *target = []Value{} + return nil + } + v, ok := val.value.([]Value) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T expected []tftypes.Value", val.Type(), dst) + } + *target = v + return nil + case **[]Value: + if val.IsNull() { + *target = nil + return nil + } + if *target == nil { + l := []Value{} + *target = &l + } + return val.As(*target) + } + return fmt.Errorf("can't unmarshal into %T, needs FromTerraform5Value method", dst) +} + +// Type returns the Type of the Value. +func (val Value) Type() Type { + return val.typ +} + +// IsKnown returns true if `val` is known. If `val` is an aggregate type, only +// the top level of the aggregate type is checked; elements and attributes are +// not checked. +func (val Value) IsKnown() bool { + return val.value != UnknownValue +} + +// IsFullyKnown returns true if `val` is known. If `val` is an aggregate type, +// IsFullyKnown only returns true if all elements and attributes are known, as +// well. +func (val Value) IsFullyKnown() bool { + if !val.IsKnown() { + return false + } + if val.value == nil { + return true + } + switch val.Type().(type) { + case primitive: + return true + case List, Set, Tuple: + //nolint:forcetypeassert // NewValue func validates the type + for _, v := range val.value.([]Value) { + if !v.IsFullyKnown() { + return false + } + } + return true + case Map, Object: + //nolint:forcetypeassert // NewValue func validates the type + for _, v := range val.value.(map[string]Value) { + if !v.IsFullyKnown() { + return false + } + } + return true + } + panic(fmt.Sprintf("unknown type %T", val.Type())) +} + +// IsNull returns true if the Value is null. +func (val Value) IsNull() bool { + return val.value == nil +} + +// MarshalMsgPack returns a msgpack representation of the Value. This is used +// for constructing tfprotov5.DynamicValues. +// +// Deprecated: this is not meant to be called by third parties. Don't use it. +func (val Value) MarshalMsgPack(t Type) ([]byte, error) { + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshalMsgPack(val, t, NewAttributePath(), enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func unexpectedValueTypeError(p *AttributePath, expected, got interface{}, typ Type) error { + return p.NewErrorf("unexpected value type %T, %s values must be of type %T", got, typ, expected) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_equal.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_equal.go new file mode 100644 index 0000000000..8d326657a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_equal.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "errors" + "fmt" + "math/big" +) + +// deepEqual walks both Value to ensure any underlying Value are equal. This +// logic is essentially a duplicate of Diff, however it is intended to return +// early on any inequality and avoids memory allocations where possible. +// +// There might be ways to better share the internal logic of this method with +// Diff, however that effort is reserved for a time when the effort is justified +// over resolving the inherent compute and memory performance issues with Diff +// when only checking for inequality. +func (val1 Value) deepEqual(val2 Value) (bool, error) { + if val1.Type() == nil && val2.Type() == nil && val1.value == nil && val2.value == nil { + return false, nil + } + + if (val1.Type() == nil && val2.Type() != nil) || (val1.Type() != nil && val2.Type() == nil) { + return false, errors.New("cannot diff value missing type") + } + + if !val1.Type().Is(val2.Type()) { + return false, errors.New("Can't diff values of different types") + } + + // Capture walk differences for returning early + var hasDiff bool + + // make sure everything in val2 is also in val1 + err := Walk(val2, func(path *AttributePath, _ Value) (bool, error) { + _, _, err := val1.walkAttributePath(path) + + if err != nil && err != ErrInvalidStep { + return false, fmt.Errorf("Error walking %q: %w", path, err) + } else if err == ErrInvalidStep { + hasDiff = true + + return false, stopWalkError + } + + return true, nil + }) + + if err != nil { + return false, err + } + + if hasDiff { + return false, nil + } + + // make sure everything in val1 is also in val2 and also that it all matches + err = Walk(val1, func(path *AttributePath, value1 Value) (bool, error) { + // pull out the Value at the same path in val2 + value2, _, err := val2.walkAttributePath(path) + + if err != nil && err != ErrInvalidStep { + return false, fmt.Errorf("Error walking %q: %w", path, err) + } else if err == ErrInvalidStep { + hasDiff = true + + return false, stopWalkError + } + + // if they're both unknown, no need to continue + if !value1.IsKnown() && !value2.IsKnown() { + return false, nil + } + + if value1.IsKnown() != value2.IsKnown() { + hasDiff = true + + return false, stopWalkError + } + + // if they're both null, no need to continue + if value1.IsNull() && value2.IsNull() { + return false, nil + } + + if value1.IsNull() != value2.IsNull() { + hasDiff = true + + return false, stopWalkError + } + + // We know there are known, non-null values, time to compare them. + // Since this logic is very hot path, it is optimized to use type and + // value implementation details rather than Equal() and As() + // respectively, since both result in memory allocations. + switch typ := value1.Type().(type) { + case primitive: + switch typ.name { + case String.name: + s1, ok := value1.value.(string) + + if !ok { + return false, fmt.Errorf("cannot convert %T into string", value1.value) + } + + s2, ok := value2.value.(string) + + if !ok { + return false, fmt.Errorf("cannot convert %T into string", value2.value) + } + + if s1 != s2 { + hasDiff = true + + return false, stopWalkError + } + case Number.name: + n1, ok := value1.value.(*big.Float) + + if !ok { + return false, fmt.Errorf("cannot convert %T into *big.Float", value1.value) + } + + n2, ok := value2.value.(*big.Float) + + if !ok { + return false, fmt.Errorf("cannot convert %T into *big.Float", value2.value) + } + + if n1.Cmp(n2) != 0 { + hasDiff = true + + return false, stopWalkError + } + case Bool.name: + b1, ok := value1.value.(bool) + + if !ok { + return false, fmt.Errorf("cannot convert %T into bool", value1.value) + } + + b2, ok := value2.value.(bool) + + if !ok { + return false, fmt.Errorf("cannot convert %T into bool", value2.value) + } + + if b1 != b2 { + hasDiff = true + + return false, stopWalkError + } + case DynamicPseudoType.name: + // Let recursion from the walk check the sub-values match + return true, nil + } + + return false, nil + case List, Set, Tuple: + s1, ok := value1.value.([]Value) + + if !ok { + return false, fmt.Errorf("cannot convert %T into []tftypes.Value", value1.value) + } + + s2, ok := value2.value.([]Value) + + if !ok { + return false, fmt.Errorf("cannot convert %T into []tftypes.Value", value2.value) + } + + // we only care about if the lengths match for lists, + // sets, and tuples. If any of the elements differ, + // the recursion of the walk will find them for us. + if len(s1) != len(s2) { + hasDiff = true + + return false, stopWalkError + } + + return true, nil + case Map, Object: + m1, ok := value1.value.(map[string]Value) + + if !ok { + return false, fmt.Errorf("cannot convert %T into map[string]tftypes.Value", value1.value) + } + + m2, ok := value2.value.(map[string]Value) + + if !ok { + return false, fmt.Errorf("cannot convert %T into map[string]tftypes.Value", value2.value) + } + + // we only care about if the number of keys match for maps and + // objects. If any of the elements differ, the recursion of the walk + // will find them for us. + if len(m1) != len(m2) { + hasDiff = true + + return false, stopWalkError + } + + return true, nil + } + + return false, fmt.Errorf("unexpected type %v in Diff at %s", value1.Type(), path) + }) + + return !hasDiff, err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go new file mode 100644 index 0000000000..8a61918e2c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_json.go @@ -0,0 +1,514 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "encoding/json" + "math/big" + "strings" +) + +// ValueFromJSON returns a Value from the JSON-encoded bytes, using the +// provided Type to determine what shape the Value should be. +// DynamicPseudoTypes will be transparently parsed into the types they +// represent. +// +// Deprecated: this function is exported for internal use in +// terraform-plugin-go. Third parties should not use it, and its behavior is +// not covered under the API compatibility guarantees. Don't use this. +func ValueFromJSON(data []byte, typ Type) (Value, error) { + return jsonUnmarshal(data, typ, NewAttributePath(), ValueFromJSONOpts{}) +} + +// ValueFromJSONOpts contains options that can be used to modify the behaviour when +// unmarshalling JSON. +type ValueFromJSONOpts struct { + // IgnoreUndefinedAttributes is used to ignore any attributes which appear in the + // JSON but do not have a corresponding entry in the schema. For example, raw state + // where an attribute has been removed from the schema. + IgnoreUndefinedAttributes bool +} + +// ValueFromJSONWithOpts is identical to ValueFromJSON with the exception that it +// accepts ValueFromJSONOpts which can be used to modify the unmarshalling behaviour, such +// as ignoring undefined attributes, for instance. This can occur when the JSON +// being unmarshalled does not have a corresponding attribute in the schema. +func ValueFromJSONWithOpts(data []byte, typ Type, opts ValueFromJSONOpts) (Value, error) { + return jsonUnmarshal(data, typ, NewAttributePath(), opts) +} + +func jsonByteDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} + +func jsonUnmarshal(buf []byte, typ Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + + if tok == nil { + return NewValue(typ, nil), nil + } + + switch { + case typ.Is(String): + return jsonUnmarshalString(buf, typ, p) + case typ.Is(Number): + return jsonUnmarshalNumber(buf, typ, p) + case typ.Is(Bool): + return jsonUnmarshalBool(buf, typ, p) + case typ.Is(DynamicPseudoType): + return jsonUnmarshalDynamicPseudoType(buf, typ, p, opts) + case typ.Is(List{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return jsonUnmarshalList(buf, typ.(List).ElementType, p, opts) + case typ.Is(Set{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return jsonUnmarshalSet(buf, typ.(Set).ElementType, p, opts) + case typ.Is(Map{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return jsonUnmarshalMap(buf, typ.(Map).ElementType, p, opts) + case typ.Is(Tuple{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return jsonUnmarshalTuple(buf, typ.(Tuple).ElementTypes, p, opts) + case typ.Is(Object{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return jsonUnmarshalObject(buf, typ.(Object).AttributeTypes, p, opts) + } + return Value{}, p.NewErrorf("unknown type %s", typ) +} + +func jsonUnmarshalString(buf []byte, _ Type, p *AttributePath) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + switch v := tok.(type) { + case string: + return NewValue(String, v), nil + case json.Number: + return NewValue(String, string(v)), nil + case bool: + if v { + return NewValue(String, "true"), nil + } + return NewValue(String, "false"), nil + } + return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, String) +} + +func jsonUnmarshalNumber(buf []byte, typ Type, p *AttributePath) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + switch numTok := tok.(type) { + case json.Number: + f, _, err := big.ParseFloat(string(numTok), 10, 512, big.ToNearestEven) + if err != nil { + return Value{}, p.NewErrorf("error parsing number: %w", err) + } + return NewValue(typ, f), nil + case string: + f, _, err := big.ParseFloat(numTok, 10, 512, big.ToNearestEven) + if err != nil { + return Value{}, p.NewErrorf("error parsing number: %w", err) + } + return NewValue(typ, f), nil + } + return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, Number) +} + +func jsonUnmarshalBool(buf []byte, _ Type, p *AttributePath) (Value, error) { + dec := jsonByteDecoder(buf) + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + switch v := tok.(type) { + case bool: + return NewValue(Bool, v), nil + case string: + switch v { + case "true", "1": + return NewValue(Bool, true), nil + case "false", "0": + return NewValue(Bool, false), nil + } + switch strings.ToLower(v) { + case "true": + return Value{}, p.NewErrorf("to convert from string, use lowercase \"true\"") + case "false": + return Value{}, p.NewErrorf("to convert from string, use lowercase \"false\"") + } + case json.Number: + switch v { + case "1": + return NewValue(Bool, true), nil + case "0": + return NewValue(Bool, false), nil + } + } + return Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, Bool) +} + +func jsonUnmarshalDynamicPseudoType(buf []byte, _ Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('{') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) + } + var t Type + var valBody []byte + for dec.More() { + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + key, ok := tok.(string) + if !ok { + return Value{}, p.NewErrorf("expected key to be a string, got %T", tok) + } + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return Value{}, p.NewErrorf("error decoding value: %w", err) + } + switch key { + case "type": + t, err = ParseJSONType(rawVal) //nolint:staticcheck + if err != nil { + return Value{}, p.NewErrorf("error decoding type information: %w", err) + } + case "value": + valBody = rawVal + default: + return Value{}, p.NewErrorf("invalid key %q in dynamically-typed value", key) + } + } + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('}') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) + } + if t == nil { + return Value{}, p.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return Value{}, p.NewErrorf("missing value in dynamically-typed value") + } + return jsonUnmarshal(valBody, t, p, opts) +} + +func jsonUnmarshalList(buf []byte, elementType Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('[') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('['), tok) + } + + // we want to have a value for this always, even if there are no + // elements, because no elements is *technically* different than empty, + // and we want to preserve that distinction + // + // var vals []Value + // would evaluate as nil if the list is empty + // + // while generally in Go it's undesirable to treat empty and nil slices + // separately, in this case we're surfacing a non-Go-in-origin + // distinction, so we'll allow it. + vals := []Value{} + + var idx int + for dec.More() { + innerPath := p.WithElementKeyInt(idx) + // update the index + idx++ + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return Value{}, innerPath.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, elementType, innerPath, opts) + if err != nil { + return Value{}, err + } + vals = append(vals, val) + } + + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim(']') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim(']'), tok) + } + + elTyp := elementType + if elTyp.Is(DynamicPseudoType) { + elTyp, err = TypeFromElements(vals) + if err != nil { + return Value{}, p.NewErrorf("invalid elements for list: %w", err) + } + } + return NewValue(List{ + ElementType: elTyp, + }, vals), nil +} + +func jsonUnmarshalSet(buf []byte, elementType Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('[') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('['), tok) + } + + // we want to have a value for this always, even if there are no + // elements, because no elements is *technically* different than empty, + // and we want to preserve that distinction + // + // var vals []Value + // would evaluate as nil if the set is empty + // + // while generally in Go it's undesirable to treat empty and nil slices + // separately, in this case we're surfacing a non-Go-in-origin + // distinction, so we'll allow it. + vals := []Value{} + + for dec.More() { + innerPath := p.WithElementKeyValue(NewValue(elementType, UnknownValue)) + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return Value{}, innerPath.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, elementType, innerPath, opts) + if err != nil { + return Value{}, err + } + vals = append(vals, val) + } + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim(']') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim(']'), tok) + } + + elTyp := elementType + if elTyp.Is(DynamicPseudoType) { + elTyp, err = TypeFromElements(vals) + if err != nil { + return Value{}, p.NewErrorf("invalid elements for list: %w", err) + } + } + return NewValue(Set{ + ElementType: elTyp, + }, vals), nil +} + +func jsonUnmarshalMap(buf []byte, attrType Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('{') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) + } + + vals := map[string]Value{} + for dec.More() { + innerPath := p.WithElementKeyValue(NewValue(attrType, UnknownValue)) + tok, err := dec.Token() + if err != nil { + return Value{}, innerPath.NewErrorf("error reading token: %w", err) + } + key, ok := tok.(string) + if !ok { + return Value{}, innerPath.NewErrorf("expected map key to be a string, got %T", tok) + } + + //fix the path value, we have an actual key now + innerPath = p.WithElementKeyString(key) + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return Value{}, innerPath.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, attrType, innerPath, opts) + if err != nil { + return Value{}, err + } + vals[key] = val + } + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('}') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) + } + + return NewValue(Map{ + ElementType: attrType, + }, vals), nil +} + +func jsonUnmarshalTuple(buf []byte, elementTypes []Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('[') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('['), tok) + } + + // we want to have a value for this always, even if there are no + // elements, because no elements is *technically* different than empty, + // and we want to preserve that distinction + // + // var vals []Value + // would evaluate as nil if the tuple is empty + // + // while generally in Go it's undesirable to treat empty and nil slices + // separately, in this case we're surfacing a non-Go-in-origin + // distinction, so we'll allow it. + vals := []Value{} + + var idx int + for dec.More() { + if idx >= len(elementTypes) { + return Value{}, p.NewErrorf("too many tuple elements (only have types for %d)", len(elementTypes)) + } + + innerPath := p.WithElementKeyInt(idx) + elementType := elementTypes[idx] + idx++ + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return Value{}, innerPath.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, elementType, innerPath, opts) + if err != nil { + return Value{}, err + } + vals = append(vals, val) + } + + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim(']') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim(']'), tok) + } + + if len(vals) != len(elementTypes) { + return Value{}, p.NewErrorf("not enough tuple elements (only have %d, have types for %d)", len(vals), len(elementTypes)) + } + + return NewValue(Tuple{ + ElementTypes: elementTypes, + }, vals), nil +} + +// jsonUnmarshalObject attempts to decode JSON object structure to tftypes.Value object. +// opts contains fields that can be used to modify the behaviour of JSON unmarshalling. +func jsonUnmarshalObject(buf []byte, attrTypes map[string]Type, p *AttributePath, opts ValueFromJSONOpts) (Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('{') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) + } + + vals := map[string]Value{} + for dec.More() { + tok, err := dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading object attribute key token: %w", err) + } + key, ok := tok.(string) + if !ok { + return Value{}, p.NewErrorf("object attribute key was %T with value %v, not string", tok, tok) + } + innerPath := p.WithAttributeName(key) + attrType, ok := attrTypes[key] + if !ok { + if opts.IgnoreUndefinedAttributes { + // We are trying to ignore the key and value of any unsupported attribute. + _ = dec.Decode(new(json.RawMessage)) + continue + } + + return Value{}, innerPath.NewErrorf("unsupported attribute %q", key) + } + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return Value{}, innerPath.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, attrType, innerPath, opts) + if err != nil { + return Value{}, err + } + vals[key] = val + } + + tok, err = dec.Token() + if err != nil { + return Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('}') { + return Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) + } + + // make sure we have a value for every attribute + for k, typ := range attrTypes { + if _, ok := vals[k]; !ok { + vals[k] = NewValue(typ, nil) + } + } + + return NewValue(Object{ + AttributeTypes: attrTypes, + }, vals), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go new file mode 100644 index 0000000000..ed03ef9833 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_msgpack.go @@ -0,0 +1,584 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import ( + "bytes" + "fmt" + "math" + "math/big" + "sort" + + msgpack "github.com/vmihailenco/msgpack/v5" + msgpackCodes "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type msgPackUnknownType struct{} + +var msgPackUnknownVal = msgPackUnknownType{} + +func (u msgPackUnknownType) MarshalMsgpack() ([]byte, error) { + return []byte{0xd4, 0, 0}, nil +} + +// ValueFromMsgPack returns a Value from the MsgPack-encoded bytes, using the +// provided Type to determine what shape the Value should be. +// DynamicPseudoTypes will be transparently parsed into the types they +// represent. +// +// Deprecated: this function is exported for internal use in +// terraform-plugin-go. Third parties should not use it, and its behavior is +// not covered under the API compatibility guarantees. Don't use this. +func ValueFromMsgPack(data []byte, typ Type) (Value, error) { + r := bytes.NewReader(data) + dec := msgpack.NewDecoder(r) + return msgpackUnmarshal(dec, typ, NewAttributePath()) +} + +func msgpackUnmarshal(dec *msgpack.Decoder, typ Type, path *AttributePath) (Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return Value{}, path.NewErrorf("error peeking next byte: %w", err) + } + if msgpackCodes.IsExt(peek) { + // as with go-cty, assume all extensions are unknown values + err := dec.Skip() + if err != nil { + return Value{}, path.NewErrorf("error skipping extension byte: %w", err) + } + return NewValue(typ, UnknownValue), nil + } + if typ.Is(DynamicPseudoType) { + return msgpackUnmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + err := dec.Skip() + if err != nil { + return Value{}, path.NewErrorf("error skipping nil byte: %w", err) + } + return NewValue(typ, nil), nil + } + + switch { + case typ.Is(String): + rv, err := dec.DecodeString() + if err != nil { + return Value{}, path.NewErrorf("error decoding string: %w", err) + } + return NewValue(String, rv), nil + case typ.Is(Number): + peek, err := dec.PeekCode() + if err != nil { + return Value{}, path.NewErrorf("couldn't peek number: %w", err) + } + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) + } + return NewValue(Number, new(big.Float).SetInt64(rv)), nil + } + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) + } + return NewValue(Number, new(big.Float).SetInt64(rv)), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return Value{}, path.NewErrorf("couldn't decode number as uint64: %w", err) + } + return NewValue(Number, new(big.Float).SetUint64(rv)), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return Value{}, path.NewErrorf("couldn't decode number as float64: %w", err) + } + return NewValue(Number, big.NewFloat(rv)), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return Value{}, path.NewErrorf("couldn't decode number as string: %w", err) + } + // according to + // https://github.com/hashicorp/go-cty/blob/85980079f637862fa8e43ddc82dd74315e2f4c85/cty/value_init.go#L49 + // Base 10, precision 512, and rounding to nearest even + // is the standard way to handle numbers arriving as + // strings. + fv, _, err := big.ParseFloat(rv, 10, 512, big.ToNearestEven) + if err != nil { + return Value{}, path.NewErrorf("error parsing %q as number: %w", rv, err) + } + return NewValue(Number, fv), nil + } + case typ.Is(Bool): + rv, err := dec.DecodeBool() + if err != nil { + return Value{}, path.NewErrorf("couldn't decode bool: %w", err) + } + return NewValue(Bool, rv), nil + case typ.Is(List{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return msgpackUnmarshalList(dec, typ.(List).ElementType, path) + case typ.Is(Set{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return msgpackUnmarshalSet(dec, typ.(Set).ElementType, path) + case typ.Is(Map{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return msgpackUnmarshalMap(dec, typ.(Map).ElementType, path) + case typ.Is(Tuple{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return msgpackUnmarshalTuple(dec, typ.(Tuple).ElementTypes, path) + case typ.Is(Object{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return msgpackUnmarshalObject(dec, typ.(Object).AttributeTypes, path) + } + return Value{}, path.NewErrorf("unsupported type %s", typ.String()) +} + +func msgpackUnmarshalList(dec *msgpack.Decoder, typ Type, path *AttributePath) (Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return Value{}, path.NewErrorf("error decoding list length: %w", err) + } + + switch { + case length < 0: + return NewValue(List{ + ElementType: typ, + }, nil), nil + case length == 0: + return NewValue(List{ + ElementType: typ, + }, []Value{}), nil + } + + vals := make([]Value, 0, length) + for i := 0; i < length; i++ { + innerPath := path.WithElementKeyInt(i) + val, err := msgpackUnmarshal(dec, typ, innerPath) + if err != nil { + return Value{}, err + } + vals = append(vals, val) + } + + elTyp := typ + if elTyp.Is(DynamicPseudoType) { + elTyp, err = TypeFromElements(vals) + if err != nil { + return Value{}, err + } + } + + return NewValue(List{ + ElementType: elTyp, + }, vals), nil +} + +func msgpackUnmarshalSet(dec *msgpack.Decoder, typ Type, path *AttributePath) (Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return Value{}, path.NewErrorf("error decoding set length: %w", err) + } + + switch { + case length < 0: + return NewValue(Set{ + ElementType: typ, + }, nil), nil + case length == 0: + return NewValue(Set{ + ElementType: typ, + }, []Value{}), nil + } + + vals := make([]Value, 0, length) + for i := 0; i < length; i++ { + innerPath := path.WithElementKeyInt(i) + val, err := msgpackUnmarshal(dec, typ, innerPath) + if err != nil { + return Value{}, err + } + vals = append(vals, val) + } + + elTyp, err := TypeFromElements(vals) + if err != nil { + return Value{}, err + } + + return NewValue(Set{ + ElementType: elTyp, + }, vals), nil +} + +func msgpackUnmarshalMap(dec *msgpack.Decoder, typ Type, path *AttributePath) (Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return Value{}, path.NewErrorf("error decoding map length: %w", err) + } + + switch { + case length < 0: + return NewValue(Map{ + ElementType: typ, + }, nil), nil + case length == 0: + return NewValue(Map{ + ElementType: typ, + }, map[string]Value{}), nil + } + + vals := make(map[string]Value, length) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return Value{}, path.NewErrorf("error decoding map key: %w", err) + } + innerPath := path.WithElementKeyString(key) + val, err := msgpackUnmarshal(dec, typ, innerPath) + if err != nil { + return Value{}, err + } + vals[key] = val + } + + return NewValue(Map{ + ElementType: typ, + }, vals), nil +} + +func msgpackUnmarshalTuple(dec *msgpack.Decoder, types []Type, path *AttributePath) (Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return Value{}, path.NewErrorf("error decoding tuple length: %w", err) + } + + switch { + case length < 0: + return NewValue(Tuple{ + ElementTypes: types, + }, nil), nil + case length != len(types): + return Value{}, path.NewErrorf("error decoding tuple; expected %d items, got %d", len(types), length) + } + + vals := make([]Value, 0, length) + for i := 0; i < length; i++ { + innerPath := path.WithElementKeyInt(i) + typ := types[i] + val, err := msgpackUnmarshal(dec, typ, innerPath) + if err != nil { + return Value{}, err + } + vals = append(vals, val) + } + + return NewValue(Tuple{ + ElementTypes: types, + }, vals), nil +} + +func msgpackUnmarshalObject(dec *msgpack.Decoder, types map[string]Type, path *AttributePath) (Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return Value{}, path.NewErrorf("error decoding object length: %w", err) + } + + switch { + case length < 0: + return NewValue(Object{ + AttributeTypes: types, + }, nil), nil + case length != len(types): + return Value{}, path.NewErrorf("error decoding object; expected %d attributes, got %d", len(types), length) + } + + vals := make(map[string]Value, length) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return Value{}, path.NewErrorf("error decoding object key: %w", err) + } + typ, exists := types[key] + if !exists { + return Value{}, path.NewErrorf("unknown attribute %q", key) + } + innerPath := path.WithAttributeName(key) + val, err := msgpackUnmarshal(dec, typ, innerPath) + if err != nil { + return Value{}, err + } + vals[key] = val + } + + return NewValue(Object{ + AttributeTypes: types, + }, vals), nil +} + +func msgpackUnmarshalDynamic(dec *msgpack.Decoder, path *AttributePath) (Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return Value{}, path.NewErrorf("error checking length of DynamicPseudoType value: %w", err) + } + + switch { + case length == -1: + return newValue(DynamicPseudoType, nil) + case length != 2: + return Value{}, path.NewErrorf("expected %d elements in DynamicPseudoType array, got %d", 2, length) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return Value{}, path.NewErrorf("error decoding bytes: %w", err) + } + typ, err := ParseJSONType(typeJSON) //nolint:staticcheck + if err != nil { + return Value{}, path.NewErrorf("error parsing type information: %w", err) + } + return msgpackUnmarshal(dec, typ, path) +} + +func marshalMsgPack(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) error { + if typ.Is(DynamicPseudoType) && !val.Type().Is(DynamicPseudoType) { + return marshalMsgPackDynamicPseudoType(val, typ, p, enc) + + } + if !val.IsKnown() { + err := enc.Encode(msgPackUnknownVal) + if err != nil { + return p.NewErrorf("error encoding UnknownValue: %w", err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return p.NewErrorf("error encoding null value: %w", err) + } + return nil + } + switch { + case typ.Is(String): + return marshalMsgPackString(val, typ, p, enc) + case typ.Is(Number): + return marshalMsgPackNumber(val, typ, p, enc) + case typ.Is(Bool): + return marshalMsgPackBool(val, typ, p, enc) + case typ.Is(List{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return marshalMsgPackList(val, typ.(List), p, enc) + case typ.Is(Set{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return marshalMsgPackSet(val, typ.(Set), p, enc) + case typ.Is(Map{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return marshalMsgPackMap(val, typ.(Map), p, enc) + case typ.Is(Tuple{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return marshalMsgPackTuple(val, typ.(Tuple), p, enc) + case typ.Is(Object{}): + //nolint:forcetypeassert // Is func above guarantees this type assertion + return marshalMsgPackObject(val, typ.(Object), p, enc) + } + return fmt.Errorf("unknown type %s", typ) +} + +func marshalMsgPackDynamicPseudoType(val Value, _ Type, p *AttributePath, enc *msgpack.Encoder) error { + typeJSON, err := val.Type().MarshalJSON() + if err != nil { + return p.NewErrorf("error generating JSON for type %s: %w", val.Type(), err) + } + err = enc.EncodeArrayLen(2) + if err != nil { + return p.NewErrorf("error encoding array length: %w", err) + } + err = enc.EncodeBytes(typeJSON) + if err != nil { + return p.NewErrorf("error encoding JSON type info: %w", err) + } + err = marshalMsgPack(val, val.Type(), p, enc) + if err != nil { + return p.NewErrorf("error marshaling DynamicPseudoType value: %w", err) + } + return nil +} + +func marshalMsgPackString(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) error { + s, ok := val.value.(string) + if !ok { + return unexpectedValueTypeError(p, s, val.value, typ) + } + err := enc.EncodeString(s) + if err != nil { + return p.NewErrorf("error encoding string value: %w", err) + } + return nil +} + +func marshalMsgPackNumber(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) error { + n, ok := val.value.(*big.Float) + if !ok { + return unexpectedValueTypeError(p, n, val.value, typ) + } + if n.IsInf() { + if n.Sign() == -1 { + err := enc.EncodeFloat64(math.Inf(-1)) + if err != nil { + return p.NewErrorf("error encoding negative infinity: %w", err) + } + } else if n.Sign() == 1 { + err := enc.EncodeFloat64(math.Inf(1)) + if err != nil { + return p.NewErrorf("error encoding positive infinity: %w", err) + } + } else { + return p.NewErrorf("error encoding unknown infiniy: sign %d is unknown", n.Sign()) + } + } else if iv, acc := n.Int64(); acc == big.Exact { + err := enc.EncodeInt(iv) + if err != nil { + return p.NewErrorf("error encoding int value: %w", err) + } + } else if fv, acc := n.Float64(); acc == big.Exact { + err := enc.EncodeFloat64(fv) + if err != nil { + return p.NewErrorf("error encoding float value: %w", err) + } + } else { + err := enc.EncodeString(n.Text('f', -1)) + if err != nil { + return p.NewErrorf("error encoding number string value: %w", err) + } + } + return nil +} + +func marshalMsgPackBool(val Value, typ Type, p *AttributePath, enc *msgpack.Encoder) error { + b, ok := val.value.(bool) + if !ok { + return unexpectedValueTypeError(p, b, val.value, typ) + } + err := enc.EncodeBool(b) + if err != nil { + return p.NewErrorf("error encoding bool value: %w", err) + } + return nil +} + +func marshalMsgPackList(val Value, typ List, p *AttributePath, enc *msgpack.Encoder) error { + l, ok := val.value.([]Value) + if !ok { + return unexpectedValueTypeError(p, l, val.value, typ) + } + err := enc.EncodeArrayLen(len(l)) + if err != nil { + return p.NewErrorf("error encoding list length: %w", err) + } + for pos, i := range l { + err := marshalMsgPack(i, typ.ElementType, p.WithElementKeyInt(pos), enc) + if err != nil { + return err + } + } + return nil +} + +func marshalMsgPackSet(val Value, typ Set, p *AttributePath, enc *msgpack.Encoder) error { + s, ok := val.value.([]Value) + if !ok { + return unexpectedValueTypeError(p, s, val.value, typ) + } + err := enc.EncodeArrayLen(len(s)) + if err != nil { + return p.NewErrorf("error encoding set length: %w", err) + } + for _, i := range s { + err := marshalMsgPack(i, typ.ElementType, p.WithElementKeyValue(i), enc) + if err != nil { + return err + } + } + return nil +} + +func marshalMsgPackMap(val Value, typ Map, p *AttributePath, enc *msgpack.Encoder) error { + m, ok := val.value.(map[string]Value) + if !ok { + return unexpectedValueTypeError(p, m, val.value, typ) + } + err := enc.EncodeMapLen(len(m)) + if err != nil { + return p.NewErrorf("error encoding map length: %w", err) + } + for k, v := range m { + err := marshalMsgPack(NewValue(String, k), String, p.WithElementKeyString(k), enc) + if err != nil { + return p.NewErrorf("error encoding map key: %w", err) + } + err = marshalMsgPack(v, typ.ElementType, p, enc) + if err != nil { + return err + } + } + return nil +} + +func marshalMsgPackTuple(val Value, typ Tuple, p *AttributePath, enc *msgpack.Encoder) error { + t, ok := val.value.([]Value) + if !ok { + return unexpectedValueTypeError(p, t, val.value, typ) + } + types := typ.ElementTypes + err := enc.EncodeArrayLen(len(types)) + if err != nil { + return p.NewErrorf("error encoding tuple length: %w", err) + } + for pos, v := range t { + ty := types[pos] + err := marshalMsgPack(v, ty, p.WithElementKeyInt(pos), enc) + if err != nil { + return err + } + } + return nil +} + +func marshalMsgPackObject(val Value, typ Object, p *AttributePath, enc *msgpack.Encoder) error { + o, ok := val.value.(map[string]Value) + if !ok { + return unexpectedValueTypeError(p, o, val.value, typ) + } + types := typ.AttributeTypes + keys := make([]string, 0, len(types)) + for k := range types { + keys = append(keys, k) + } + sort.Strings(keys) + err := enc.EncodeMapLen(len(keys)) + if err != nil { + return p.NewErrorf("error encoding object length: %w", err) + } + for _, k := range keys { + ty := types[k] + v, ok := o[k] + if !ok { + return p.WithAttributeName(k).NewErrorf("no value set") + } + err := marshalMsgPack(NewValue(String, k), String, p.WithAttributeName(k), enc) + if err != nil { + return err + } + err = marshalMsgPack(v, ty, p.WithAttributeName(k), enc) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_walk.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_walk.go new file mode 100644 index 0000000000..7e6abb6013 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/value_walk.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tftypes + +import "fmt" + +// walkAttributePath will return the Value that `path` is pointing to within the +// Value. If an error is returned, the AttributePath returned will indicate +// will indicate the steps that remained to be applied when the error was +// encountered. +// +// This implementation, along with one for Type, could be exported to deprecate +// the overly generic WalkAttributePath function. +func (v Value) walkAttributePath(path *AttributePath) (Value, *AttributePath, error) { + if path == nil || len(path.steps) == 0 { + return v, path, nil + } + + nextValueI, err := v.ApplyTerraform5AttributePathStep(path.NextStep()) + + if err != nil { + return Value{}, path, err + } + + nextValue, ok := nextValueI.(Value) + + if !ok { + return Value{}, path, fmt.Errorf("unknown type %T returned from tftypes.ApplyTerraform5AttributePathStep", nextValueI) + } + + return nextValue.walkAttributePath(NewAttributePathWithSteps(path.steps[1:])) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/walk.go b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/walk.go new file mode 100644 index 0000000000..e06a540abe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tftypes/walk.go @@ -0,0 +1,349 @@ +package tftypes + +// these functions are based heavily on github.com/zclconf/go-cty +// used under the MIT License +// +// Copyright (c) 2017-2018 Martin Atkins +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +import ( + "errors" +) + +// stopWalkError is a well-known error for immediately stopping walk() without +// returning an actual error. +// +// The implementation of walk() will continue walking all attributes/elements +// within an object/collection since the boolean return value of the callback +// function is only intended to signal whether to stop descending into the same +// Value. Changing that behavior would be considered a breaking change. +// +// This could be considered for exporting to give external consumers better +// performance. +var stopWalkError = errors.New("walk stop requested") + +// Walk traverses a Value, calling the passed function for every element and +// attribute in the Value. The AttributePath passed to the callback function +// will identify which attribute or element is currently being surfaced by the +// Walk, and the passed Value will be the element or attribute at that +// AttributePath. Returning true from the callback function will indicate that +// any attributes or elements of the surfaced Value should be walked, too; +// returning false short-circuits the walk at that element or attribute, and +// does not visit any of its descendants. The return value of the callback does +// not matter when the Value that has been surfaced has no elements or +// attributes. Walk uses a depth-first traversal. +func Walk(val Value, cb func(*AttributePath, Value) (bool, error)) error { + _, err := walk(NewAttributePath(), val, cb) + + return err +} + +// walk is the internal implementation of Walk(). It includes a bool return for +// whether callers should continue walking any remaining Value. +func walk(path *AttributePath, val Value, cb func(*AttributePath, Value) (bool, error)) (bool, error) { + shouldContinue, err := cb(path, val) + + if errors.Is(err, stopWalkError) { + return false, nil + } + + if err != nil { + return false, path.NewError(err) + } + + if !shouldContinue { + // The callback bool return is intended to signal that this Value should + // no longer be descended. Changing this behavior is a breaking change. + // A stopWalkError can be used to signal that all remaining Value can be + // skipped. + return true, nil + } + + if val.IsNull() || !val.IsKnown() { + return true, nil + } + + switch val.Type().(type) { + case List, Tuple: + v, ok := val.value.([]Value) + + if !ok { + return false, path.NewErrorf("cannot convert %T into []tftypes.Value", val.value) + } + + for pos, el := range v { + elementPath := path.WithElementKeyInt(pos) + shouldContinue, err := walk(elementPath, el, cb) + + if err != nil { + return false, elementPath.NewError(err) + } + + if !shouldContinue { + return false, nil + } + } + case Map: + v, ok := val.value.(map[string]Value) + + if !ok { + return false, path.NewErrorf("cannot convert %T into map[string]tftypes.Value", val.value) + } + + for k, el := range v { + elementPath := path.WithElementKeyString(k) + shouldContinue, err := walk(elementPath, el, cb) + + if err != nil { + return false, elementPath.NewError(err) + } + + if !shouldContinue { + return false, nil + } + } + case Object: + v, ok := val.value.(map[string]Value) + + if !ok { + return false, path.NewErrorf("cannot convert %T into map[string]tftypes.Value", val.value) + } + + for k, el := range v { + attributePath := path.WithAttributeName(k) + shouldContinue, err := walk(attributePath, el, cb) + + if err != nil { + return false, attributePath.NewError(err) + } + + if !shouldContinue { + return false, nil + } + } + case Set: + v, ok := val.value.([]Value) + + if !ok { + return false, path.NewErrorf("cannot convert %T into []tftypes.Value", val.value) + } + + for _, el := range v { + elementPath := path.WithElementKeyValue(el) + shouldContinue, err := walk(elementPath, el, cb) + + if err != nil { + return false, elementPath.NewError(err) + } + + if !shouldContinue { + return false, nil + } + } + } + + return true, nil +} + +// Transform uses a callback to mutate a Value. Each element or attribute will +// be visited in turn, with the AttributePath and Value surfaced to the +// callback, as in Walk. Unlike in Walk, the callback returns a Value instead +// of a boolean; this is the Value that will be stored at that AttributePath. +// The callback must return the passed Value unmodified if it wishes to not +// mutate a Value. Elements and attributes of a Value will be passed to the +// callback prior to the Value they belong to being passed to the callback, +// which means a callback can overwrite its own modifications. Values passed to +// the callback will always reflect the results of earlier callback calls. +func Transform(val Value, cb func(*AttributePath, Value) (Value, error)) (Value, error) { + return transform(NewAttributePath(), val, cb) +} + +func transform(path *AttributePath, val Value, cb func(*AttributePath, Value) (Value, error)) (Value, error) { + switch val.Type().(type) { + case nil: + return val, path.NewError(errors.New("invalid transform: value missing type")) + } + + newVal, err := transformUnderlying(path, val, cb) + + if err != nil { + return val, err + } + + res, err := cb(path, newVal) + + if err != nil { + return res, path.NewError(err) + } + + newTy := newVal.Type() + + if newTy == nil { + return val, path.NewError(errors.New("invalid transform: new value missing type")) + } + + if !newTy.UsableAs(val.Type()) { + return val, path.NewError(errors.New("invalid transform: value changed type")) + } + + return res, err +} + +// transformUnderlying returns the Value with any underlying attribute or +// element transformations completed. +func transformUnderlying(path *AttributePath, val Value, cb func(*AttributePath, Value) (Value, error)) (Value, error) { + // If the Value is null or unknown, there is nothing to descend. + if val.IsNull() || !val.IsKnown() { + return val, nil + } + + switch val.Type().(type) { + case List, Tuple: + elements, ok := val.value.([]Value) + + if !ok { + return val, path.NewErrorf("cannot convert %T into []tftypes.Value", val.value) + } + + if len(elements) == 0 { + return val, nil + } + + newElements := make([]Value, 0, len(elements)) + + for index, element := range elements { + elementPath := path.WithElementKeyInt(index) + + newElement, err := transform(elementPath, element, cb) + + if err != nil { + return val, elementPath.NewError(err) + } + + newElements = append(newElements, newElement) + } + + newVal, err := newValue(val.Type(), newElements) + + if err != nil { + return val, path.NewError(err) + } + + return newVal, nil + case Map: + elements, ok := val.value.(map[string]Value) + + if !ok { + return val, path.NewErrorf("cannot convert %T into map[string]tftypes.Value", val.value) + } + + if len(elements) == 0 { + return val, nil + } + + newElements := make(map[string]Value, len(elements)) + + for key, element := range elements { + elementPath := path.WithElementKeyString(key) + + newElement, err := transform(elementPath, element, cb) + + if err != nil { + return val, elementPath.NewError(err) + } + + newElements[key] = newElement + } + + newVal, err := newValue(val.Type(), newElements) + + if err != nil { + return val, path.NewError(err) + } + + return newVal, nil + case Object: + attributes, ok := val.value.(map[string]Value) + + if !ok { + return val, path.NewErrorf("cannot convert %T into map[string]tftypes.Value", val.value) + } + + if len(attributes) == 0 { + return val, nil + } + + newAttributes := make(map[string]Value, len(attributes)) + + for name, attribute := range attributes { + attributePath := path.WithAttributeName(name) + + newAttribute, err := transform(attributePath, attribute, cb) + + if err != nil { + return val, attributePath.NewError(err) + } + + newAttributes[name] = newAttribute + } + + newVal, err := newValue(val.Type(), newAttributes) + + if err != nil { + return val, path.NewError(err) + } + + return newVal, nil + case Set: + elements, ok := val.value.([]Value) + + if !ok { + return val, path.NewErrorf("cannot convert %T into []tftypes.Value", val.value) + } + + if len(elements) == 0 { + return val, nil + } + + newElements := make([]Value, 0, len(elements)) + + for _, element := range elements { + elementPath := path.WithElementKeyValue(element) + + newElement, err := transform(elementPath, element, cb) + + if err != nil { + return val, elementPath.NewError(err) + } + + newElements = append(newElements, newElement) + } + + newVal, err := newValue(val.Type(), newElements) + + if err != nil { + return val, path.NewError(err) + } + + return newVal, nil + } + + return val, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE new file mode 100644 index 0000000000..84cd064397 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2021 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/fieldutils/field_maps.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/fieldutils/field_maps.go new file mode 100644 index 0000000000..ac78d6da73 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/fieldutils/field_maps.go @@ -0,0 +1,45 @@ +package fieldutils + +// MergeFieldMaps takes a slice of field maps, +// and merges all the key/value pairs into a new single field map. +// +// Input order matters: in case two or more maps use the same key, +// the last one to set that key will have the corresponding value +// persisted. +func MergeFieldMaps(maps ...map[string]interface{}) map[string]interface{} { + // Pre-allocate a map to merge all the maps into, + // that has at least the capacity equivalent to the number + // of maps to merge + result := make(map[string]interface{}, len(maps)) + + // Merge all the maps into one; + // in case of clash, only the last key is preserved + for _, m := range maps { + for k, v := range m { + result[k] = v + } + } + + return result +} + +// FieldMapsToKeys will extract all the field maps keys, avoiding repetitions +// in case two or more maps contained the same key. +func FieldMapsToKeys(maps ...map[string]interface{}) []string { + switch len(maps) { + case 0: + return nil + case 1: + result := make([]string, 0, len(maps[0])) + + for k := range maps[0] { + result = append(result, k) + } + + return result + default: + // As we merge all maps into one, we can use this + // same function recursively, falling back on the `switch case 1`. + return FieldMapsToKeys(MergeFieldMaps(maps...)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go new file mode 100644 index 0000000000..44c81ab8e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/args.go @@ -0,0 +1,26 @@ +package hclogutils + +import ( + "github.com/hashicorp/terraform-plugin-log/internal/fieldutils" +) + +// FieldMapsToArgs will shallow merge field maps into a slice of key/value pairs +// arguments (i.e. `[k1, v1, k2, v2, ...]`) expected by hc-log.Logger methods. +func FieldMapsToArgs(maps ...map[string]interface{}) []interface{} { + switch len(maps) { + case 0: + return nil + case 1: + result := make([]interface{}, 0, len(maps[0])*2) + + for k, v := range maps[0] { + result = append(result, k, v) + } + + return result + default: + // As we merge all maps into one, we can use this + // same function recursively, falling back on the `switch case 1`. + return FieldMapsToArgs(fieldutils.MergeFieldMaps(maps...)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go new file mode 100644 index 0000000000..a0ec34e20f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/hclogutils/logger_options.go @@ -0,0 +1,29 @@ +package hclogutils + +import ( + "github.com/hashicorp/go-hclog" +) + +// LoggerOptionsCopy will safely copy LoggerOptions. Manually implemented +// to save importing a dependency such as github.com/mitchellh/copystructure. +func LoggerOptionsCopy(src *hclog.LoggerOptions) *hclog.LoggerOptions { + if src == nil { + return nil + } + + return &hclog.LoggerOptions{ + AdditionalLocationOffset: src.AdditionalLocationOffset, + Color: src.Color, + DisableTime: src.DisableTime, + Exclude: src.Exclude, + IncludeLocation: src.IncludeLocation, + IndependentLevels: src.IndependentLevels, + JSONFormat: src.JSONFormat, + Level: src.Level, + Mutex: src.Mutex, + Name: src.Name, + Output: src.Output, + TimeFormat: src.TimeFormat, + TimeFn: src.TimeFn, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/filtering.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/filtering.go new file mode 100644 index 0000000000..c7b9c450a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/filtering.go @@ -0,0 +1,133 @@ +package logging + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-log/internal/fieldutils" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" +) + +const logMaskingReplacementString = "***" + +// ShouldOmit takes a log's *string message and slices of fields, +// and determines, based on the LoggerOpts configuration, if the +// log should be omitted (i.e. prevent it to be printed on the final writer). +func (lo LoggerOpts) ShouldOmit(msg *string, fieldMaps ...map[string]interface{}) bool { + // Omit log if any of the configured keys is found in the given fields + if len(lo.OmitLogWithFieldKeys) > 0 { + fieldsKeys := fieldutils.FieldMapsToKeys(fieldMaps...) + if argKeysContain(fieldsKeys, lo.OmitLogWithFieldKeys) { + return true + } + } + + // Omit log if any of the configured regexp matches the log message + if len(lo.OmitLogWithMessageRegexes) > 0 { + for _, r := range lo.OmitLogWithMessageRegexes { + if r.MatchString(*msg) { + return true + } + } + } + + // Omit log if any of the configured strings is contained in the log message + if len(lo.OmitLogWithMessageStrings) > 0 { + for _, s := range lo.OmitLogWithMessageStrings { + if strings.Contains(*msg, s) { + return true + } + } + } + + return false +} + +// ApplyMask takes a log's *string message and slices of fields, +// and applies masking to fields keys' values and/or to log message, +// based on the LoggerOpts configuration. +// +// Note that the given input is changed-in-place by this method. +func (lo LoggerOpts) ApplyMask(msg *string, fieldMaps ...map[string]interface{}) { + // Replace any log field value with the corresponding field key equal to the configured strings + if len(lo.MaskFieldValuesWithFieldKeys) > 0 { + for _, k := range lo.MaskFieldValuesWithFieldKeys { + for _, f := range fieldMaps { + for fk := range f { + if k == fk { + f[k] = logMaskingReplacementString + } + } + } + } + } + + // Replace any part of any log field matching any of the configured regexp + if len(lo.MaskAllFieldValuesRegexes) > 0 { + for _, r := range lo.MaskAllFieldValuesRegexes { + for _, f := range fieldMaps { + for fk, fv := range f { + // Can apply the regexp replacement, only if the field value is indeed a string + fvStr, ok := fv.(string) + if ok { + f[fk] = r.ReplaceAllString(fvStr, logMaskingReplacementString) + } + } + } + } + } + + // Replace any part of any log field matching any of the configured strings + if len(lo.MaskAllFieldValuesStrings) > 0 { + for _, s := range lo.MaskAllFieldValuesStrings { + for _, f := range fieldMaps { + for fk, fv := range f { + // Can apply the regexp replacement, only if the field value is indeed a string + fvStr, ok := fv.(string) + if ok { + f[fk] = strings.ReplaceAll(fvStr, s, logMaskingReplacementString) + } + } + } + } + } + + // Replace any part of the log message matching any of the configured regexp + if len(lo.MaskMessageRegexes) > 0 { + for _, r := range lo.MaskMessageRegexes { + *msg = r.ReplaceAllString(*msg, logMaskingReplacementString) + } + } + + // Replace any part of the log message equal to any of the configured strings + if len(lo.MaskMessageStrings) > 0 { + for _, s := range lo.MaskMessageStrings { + *msg = strings.ReplaceAll(*msg, s, logMaskingReplacementString) + } + } +} + +func OmitOrMask(tfLoggerOpts LoggerOpts, msg *string, additionalFields []map[string]interface{}) ([]interface{}, bool) { + additionalFieldsMap := fieldutils.MergeFieldMaps(additionalFields...) + + // Apply the provider root LoggerOpts to determine if this log should be omitted + if tfLoggerOpts.ShouldOmit(msg, tfLoggerOpts.Fields, additionalFieldsMap) { + return nil, true + } + + // Apply the provider root LoggerOpts to apply masking to this log + tfLoggerOpts.ApplyMask(msg, tfLoggerOpts.Fields, additionalFieldsMap) + + return hclogutils.FieldMapsToArgs(tfLoggerOpts.Fields, additionalFieldsMap), false +} + +func argKeysContain(haystack []string, needles []string) bool { + for _, h := range haystack { + for _, n := range needles { + if n == h { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go new file mode 100644 index 0000000000..4aa80bb98d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/log.go @@ -0,0 +1,117 @@ +package logging + +import ( + "io" + "os" +) + +const ( + // DefaultProviderRootLoggerName is the default provider root logger name. + DefaultProviderRootLoggerName string = "provider" + + // DefaultSDKRootLoggerName is the default SDK root logger name. + DefaultSDKRootLoggerName string = "sdk" +) + +// loggerKey defines context keys for locating loggers in context.Context +// it's a private type to make sure no other packages can override the key +type loggerKey string + +const ( + // ProviderRootLoggerKey is the loggerKey that will hold the root + // logger for writing logs from within provider code. + ProviderRootLoggerKey loggerKey = "provider" + + // ProviderRootLoggerOptionsKey is the loggerKey that will hold the root + // logger options when the root provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + ProviderRootLoggerOptionsKey loggerKey = "provider-options" + + // SDKRootLoggerKey is the loggerKey that will hold the root logger for + // writing logs from with SDKs. + SDKRootLoggerKey loggerKey = "sdk" + + // SDKRootLoggerOptionsKey is the loggerKey that will hold the root + // logger options when the SDK provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + SDKRootLoggerOptionsKey loggerKey = "sdk-options" + + // SinkKey is the loggerKey that will hold the logging sink used for + // test frameworks. + SinkKey loggerKey = "" + + // SinkOptionsKey is the loggerKey that will hold the sink + // logger options when the SDK provider logger is created. This is to + // assist creating subsystem loggers, as most options cannot be fetched and + // a logger does not provide set methods for these options. + SinkOptionsKey loggerKey = "sink-options" + + // TFLoggerOpts is the loggerKey that will hold the LoggerOpts associated + // with the provider root logger (at `provider.tf-logger-opts`), and the + // provider sub-system logger (at `provider.SUBSYSTEM.tf-logger-opts`), + // in the context.Context. + // Note that only some LoggerOpts require to be stored this way, + // while others use the underlying *hclog.LoggerOptions of hclog.Logger. + TFLoggerOpts loggerKey = "tf-logger-opts" +) + +// providerSubsystemLoggerKey is the loggerKey that will hold the subsystem logger +// for writing logs from within a provider subsystem. +func providerSubsystemLoggerKey(subsystem string) loggerKey { + return ProviderRootLoggerKey + loggerKey("."+subsystem) +} + +// providerRootTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of the provider. +func providerRootTFLoggerOptsKey() loggerKey { + return ProviderRootLoggerKey + "." + TFLoggerOpts +} + +// providerRootTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of a provider subsystem. +func providerSubsystemTFLoggerOptsKey(subsystem string) loggerKey { + return providerSubsystemLoggerKey(subsystem) + "." + TFLoggerOpts +} + +// providerSubsystemLoggerKey is the loggerKey that will hold the subsystem logger +// for writing logs from within an SDK subsystem. +func sdkSubsystemLoggerKey(subsystem string) loggerKey { + return SDKRootLoggerKey + loggerKey("."+subsystem) +} + +// sdkRootTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of the SDK. +func sdkRootTFLoggerOptsKey() loggerKey { + return SDKRootLoggerKey + "." + TFLoggerOpts +} + +// sdkSubsystemTFLoggerOptsKey is the loggerKey that will hold +// the LoggerOpts of an SDK subsystem. +func sdkSubsystemTFLoggerOptsKey(subsystem string) loggerKey { + return sdkSubsystemLoggerKey(subsystem) + "." + TFLoggerOpts +} + +var ( + // Stderr caches the original os.Stderr when the process is started. + // + // When go-plugin.Serve is called, it overwrites our os.Stderr with a + // gRPC stream which Terraform ignores. This tends to be before our + // loggers get set up, as go-plugin has no way to pass in a base + // context, and our loggers are passed around via contexts. This leaves + // our loggers writing to an output that is never read by anything, + // meaning the logs get blackholed. This isn't ideal, for log output, + // so this is our workaround: we copy stderr on init, before Serve can + // be called, and offer an option to write to that instead of the + // os.Stderr available at runtime. + // + // Ideally, this is a short-term fix until Terraform starts reading + // from go-plugin's gRPC-streamed stderr channel, but for the moment it + // works. + Stderr io.Writer +) + +func init() { + Stderr = os.Stderr +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go new file mode 100644 index 0000000000..583dc2cbf5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go @@ -0,0 +1,369 @@ +package logging + +import ( + "io" + "os" + "regexp" + + "github.com/hashicorp/go-hclog" +) + +// Option defines a modification to the configuration for a logger. +type Option func(LoggerOpts) LoggerOpts + +// LoggerOpts is a collection of configuration settings for loggers. +type LoggerOpts struct { + // Name is the name or "@module" of a logger. + Name string + + // Level is the most verbose level that a logger will write logs for. + Level hclog.Level + + // IncludeLocation indicates whether logs should include the location + // of the logging statement or not. + IncludeLocation bool + + // AdditionalLocationOffset is the number of additional stack levels to + // skip when finding the file and line information for the log line. + // Defaults to 1 to account for the tflog and tfsdklog logging functions. + AdditionalLocationOffset int + + // Output dictates where logs are written to. Output should only ever + // be set by tflog or tfsdklog, never by SDK authors or provider + // developers. Where logs get written to is complex and delicate and + // requires a deep understanding of Terraform's architecture, and it's + // easy to mess up on accident. + Output io.Writer + + // IncludeTime indicates whether logs should include the time they were + // written or not. It should only be set to true when testing tflog or + // tfsdklog; providers and SDKs should always include the time logs + // were written as part of the log. + IncludeTime bool + + // Fields indicates the key/value pairs to be added to each of its log output. + Fields map[string]interface{} + + // IncludeRootFields indicates whether a new subsystem logger should + // copy existing fields from the root logger. This is only performed + // at the time of new subsystem creation. + IncludeRootFields bool + + // OmitLogWithFieldKeys indicates that the logger should omit to write + // any log when any of the given keys is found within the fields. + // + // Example: + // + // OmitLogWithFieldKeys = `['foo', 'baz']` + // + // log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted + // log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed + // log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted + // + OmitLogWithFieldKeys []string + + // OmitLogWithMessageRegexes indicates that the logger should omit to write + // any log that matches any of the given *regexp.Regexp. + // + // Example: + // + // OmitLogWithMessageRegexes = `[regexp.MustCompile("(foo|bar)")]` + // + // log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted + // log2 = `{ msg = "pineapple mango", fields = {...}` -> printed + // log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted + // + OmitLogWithMessageRegexes []*regexp.Regexp + + // OmitLogWithMessageStrings indicates that the logger should omit to write + // any log that matches any of the given string. + // + // Example: + // + // OmitLogWithMessageStrings = `['foo', 'bar']` + // + // log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted + // log2 = `{ msg = "pineapple mango", fields = {...}` -> printed + // log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted + // + OmitLogWithMessageStrings []string + + // MaskFieldValuesWithFieldKeys indicates that the logger should mask with asterisks (`*`) + // any field value where the key matches one of the given keys. + // + // Example: + // + // MaskFieldValuesWithFieldKeys = `['foo', 'baz']` + // + // log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value + // log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value + // log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value + // + MaskFieldValuesWithFieldKeys []string + + // MaskAllFieldValuesRegexes indicates that the logger should replace, within + // all the log field values, the portion matching one of the given *regexp.Regexp. + // + // Note that the replacement will happen, only for field values that are of type string. + // + // Example: + // + // MaskAllFieldValuesRegexes = `[regexp.MustCompile("(foo|bar)")]` + // + // log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value + // log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value + // log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value + // + MaskAllFieldValuesRegexes []*regexp.Regexp + + // MaskAllFieldValuesStrings indicates that the logger should replace, within + // all the log field values, the portion equal to one of the given strings. + // + // Note that the replacement will happen, only for field values that are of type string. + // + // Example: + // + // MaskAllFieldValuesStrings = `['foo', 'baz']` + // + // log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': 'bar', 'k3': '***' }` -> masked value + // log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': '***' }` -> as-is value + // log2 = `{ msg = "...", fields = { 'k1': '*** bar ***' }` -> masked value + MaskAllFieldValuesStrings []string + + // MaskMessageRegexes indicates that the logger should replace, within + // a log message, the portion matching one of the given *regexp.Regexp. + // + // Example: + // + // MaskMessageRegexes = `[regexp.MustCompile("(foo|bar)")]` + // + // log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion + // log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is + // log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion + // + MaskMessageRegexes []*regexp.Regexp + + // MaskMessageStrings indicates that the logger should replace, within + // a log message, the portion equal to one of the given strings. + // + // Example: + // + // MaskMessageStrings = `['foo', 'bar']` + // + // log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion + // log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is + // log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion + // + MaskMessageStrings []string +} + +// Copy creates a duplicate LoggerOpts. This should be used to ensure +// safe LoggerOpts modification when the LoggerOpts could be saved into a +// new context.Context. +func (o LoggerOpts) Copy() LoggerOpts { + result := LoggerOpts{ + AdditionalLocationOffset: o.AdditionalLocationOffset, + Fields: make(map[string]any, len(o.Fields)), + IncludeLocation: o.IncludeLocation, + IncludeRootFields: o.IncludeRootFields, + IncludeTime: o.IncludeTime, + Level: o.Level, + MaskAllFieldValuesRegexes: make([]*regexp.Regexp, len(o.MaskAllFieldValuesRegexes)), + MaskAllFieldValuesStrings: make([]string, len(o.MaskAllFieldValuesStrings)), + MaskFieldValuesWithFieldKeys: make([]string, len(o.MaskFieldValuesWithFieldKeys)), + MaskMessageRegexes: make([]*regexp.Regexp, len(o.MaskMessageRegexes)), + MaskMessageStrings: make([]string, len(o.MaskMessageStrings)), + Name: o.Name, + OmitLogWithFieldKeys: make([]string, len(o.OmitLogWithFieldKeys)), + OmitLogWithMessageRegexes: make([]*regexp.Regexp, len(o.OmitLogWithMessageRegexes)), + OmitLogWithMessageStrings: make([]string, len(o.OmitLogWithMessageStrings)), + Output: o.Output, + } + + // Copy all slice/map contents to prevent leaking memory references + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + for key, value := range o.Fields { + result.Fields[key] = value + } + + copy(result.MaskAllFieldValuesRegexes, o.MaskAllFieldValuesRegexes) + copy(result.MaskAllFieldValuesStrings, o.MaskAllFieldValuesStrings) + copy(result.MaskFieldValuesWithFieldKeys, o.MaskFieldValuesWithFieldKeys) + copy(result.MaskMessageRegexes, o.MaskMessageRegexes) + copy(result.MaskMessageStrings, o.MaskMessageStrings) + copy(result.OmitLogWithFieldKeys, o.OmitLogWithFieldKeys) + copy(result.OmitLogWithMessageRegexes, o.OmitLogWithMessageRegexes) + copy(result.OmitLogWithMessageStrings, o.OmitLogWithMessageStrings) + + return result +} + +// ApplyLoggerOpts generates a LoggerOpts out of a list of Option +// implementations. By default, AdditionalLocationOffset is 1, IncludeLocation +// is true, IncludeTime is true, and Output is os.Stderr. +func ApplyLoggerOpts(opts ...Option) LoggerOpts { + // set some defaults + l := LoggerOpts{ + AdditionalLocationOffset: 1, + IncludeLocation: true, + IncludeTime: true, + Output: os.Stderr, + } + for _, opt := range opts { + l = opt(l) + } + return l +} + +// WithAdditionalLocationOffset sets the WithAdditionalLocationOffset +// configuration option, allowing implementations to fix location information +// when implementing helper functions. The default offset of 1 is automatically +// added to the provided value to account for the tflog and tfsdk logging +// functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) Option { + return func(l LoggerOpts) LoggerOpts { + l.AdditionalLocationOffset = additionalLocationOffset + 1 + return l + } +} + +// WithOutput sets the Output configuration option, controlling where logs get +// written to. This is mostly used for testing (to write to os.Stdout, so the +// test framework can compare it against the example output) and as a helper +// when implementing safe, specific output strategies in tfsdklog. +func WithOutput(output io.Writer) Option { + return func(l LoggerOpts) LoggerOpts { + l.Output = output + return l + } +} + +// WithField sets the provided key/value pair, onto the LoggerOpts.Fields field. +// +// Behind the scene, fields are stored in a map[string]interface{}: +// this means that in case the same key is used multiple times (key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func WithField(key string, value interface{}) Option { + return func(l LoggerOpts) LoggerOpts { + // Lazily create this map, on first assignment + if l.Fields == nil { + l.Fields = make(map[string]interface{}) + } + + l.Fields[key] = value + return l + } +} + +// WithFields sets all the provided key/value pairs, onto the LoggerOpts.Fields field. +// +// Behind the scene, fields are stored in a map[string]interface{}: +// this means that in case the same key is used multiple times (key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func WithFields(fields map[string]interface{}) Option { + return func(l LoggerOpts) LoggerOpts { + // Lazily create this map, on first assignment + if l.Fields == nil { + l.Fields = make(map[string]interface{}) + } + + for k, v := range fields { + l.Fields[k] = v + } + + return l + } +} + +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeRootFields = true + return l + } +} + +// WithoutLocation disables the location included with logging statements. It +// should only ever be used to make log output deterministic when testing +// terraform-plugin-log. +func WithoutLocation() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeLocation = false + return l + } +} + +// WithoutTimestamp disables the timestamp included with logging statements. It +// should only ever be used to make log output deterministic when testing +// terraform-plugin-log. +func WithoutTimestamp() Option { + return func(l LoggerOpts) LoggerOpts { + l.IncludeTime = false + return l + } +} + +// WithOmitLogWithFieldKeys appends keys to the LoggerOpts.OmitLogWithFieldKeys field. +func WithOmitLogWithFieldKeys(keys ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.OmitLogWithFieldKeys = append(l.OmitLogWithFieldKeys, keys...) + return l + } +} + +// WithOmitLogWithMessageRegexes appends *regexp.Regexp to the LoggerOpts.OmitLogWithMessageRegexes field. +func WithOmitLogWithMessageRegexes(expressions ...*regexp.Regexp) Option { + return func(l LoggerOpts) LoggerOpts { + l.OmitLogWithMessageRegexes = append(l.OmitLogWithMessageRegexes, expressions...) + return l + } +} + +// WithOmitLogWithMessageStrings appends string to the LoggerOpts.OmitLogWithMessageStrings field. +func WithOmitLogWithMessageStrings(matchingStrings ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.OmitLogWithMessageStrings = append(l.OmitLogWithMessageStrings, matchingStrings...) + return l + } +} + +// WithMaskFieldValuesWithFieldKeys appends keys to the LoggerOpts.MaskFieldValuesWithFieldKeys field. +func WithMaskFieldValuesWithFieldKeys(keys ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskFieldValuesWithFieldKeys = append(l.MaskFieldValuesWithFieldKeys, keys...) + return l + } +} + +// WithMaskAllFieldValuesRegexes appends keys to the LoggerOpts.MaskAllFieldValuesRegexes field. +func WithMaskAllFieldValuesRegexes(expressions ...*regexp.Regexp) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskAllFieldValuesRegexes = append(l.MaskAllFieldValuesRegexes, expressions...) + return l + } +} + +// WithMaskAllFieldValuesStrings appends keys to the LoggerOpts.MaskAllFieldValuesStrings field. +func WithMaskAllFieldValuesStrings(matchingStrings ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskAllFieldValuesStrings = append(l.MaskAllFieldValuesStrings, matchingStrings...) + return l + } +} + +// WithMaskMessageRegexes appends *regexp.Regexp to the LoggerOpts.MaskMessageRegexes field. +func WithMaskMessageRegexes(expressions ...*regexp.Regexp) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskMessageRegexes = append(l.MaskMessageRegexes, expressions...) + return l + } +} + +// WithMaskMessageStrings appends string to the LoggerOpts.MaskMessageStrings field. +func WithMaskMessageStrings(matchingStrings ...string) Option { + return func(l LoggerOpts) LoggerOpts { + l.MaskMessageStrings = append(l.MaskMessageStrings, matchingStrings...) + return l + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go new file mode 100644 index 0000000000..c8f45a86ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go @@ -0,0 +1,117 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/go-hclog" +) + +// GetProviderRootLogger returns the root logger used for writing logs +// from a provider. If no root logger has been created, it will return nil. +func GetProviderRootLogger(ctx context.Context) hclog.Logger { + logger := ctx.Value(ProviderRootLoggerKey) + if logger == nil { + return nil + } + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + return hclogger +} + +// GetProviderRootLoggerOptions returns the root logger options used for +// creating the root provider logger. If the root logger has not been created +// or the options are not present, it will return nil. +func GetProviderRootLoggerOptions(ctx context.Context) *hclog.LoggerOptions { + if GetProviderRootLogger(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(ProviderRootLoggerOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + +// SetProviderRootLogger sets `logger` as the root logger used for writing +// logs from a provider. +func SetProviderRootLogger(ctx context.Context, logger hclog.Logger) context.Context { + return context.WithValue(ctx, ProviderRootLoggerKey, logger) +} + +// SetProviderRootLoggerOptions sets `loggerOptions` as the root logger options +// used for creating the provider root logger. +func SetProviderRootLoggerOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, ProviderRootLoggerOptionsKey, loggerOptions) +} + +// NewProviderSubsystemLoggerWarning is the text included in log output when a +// subsystem is auto-generated by terraform-plugin-log because it was used +// before the provider instantiated it. +const NewProviderSubsystemLoggerWarning = "This log was generated by a subsystem logger that wasn't created before being used. Use tflog.NewSubsystem to create this logger before it is used." + +// GetProviderSubsystemLogger returns the subsystem logger for the named +// subsystem in provider space. If no such subsystem logger has been created, +// it will return nil. +func GetProviderSubsystemLogger(ctx context.Context, subsystem string) hclog.Logger { + logger := ctx.Value(providerSubsystemLoggerKey(subsystem)) + if logger == nil { + return nil + } + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger +} + +// SetProviderSubsystemLogger sets `logger` as the logger for the named +// subsystem in provider space. +func SetProviderSubsystemLogger(ctx context.Context, subsystem string, logger hclog.Logger) context.Context { + return context.WithValue(ctx, providerSubsystemLoggerKey(subsystem), logger) +} + +// GetProviderRootTFLoggerOpts retrieves the LoggerOpts of the provider root logger. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetProviderRootTFLoggerOpts(ctx context.Context) LoggerOpts { + lOpts, ok := ctx.Value(providerRootTFLoggerOptsKey()).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetProviderRootTFLoggerOpts sets the LoggerOpts of the provider root logger, in the context. +func SetProviderRootTFLoggerOpts(ctx context.Context, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, providerRootTFLoggerOptsKey(), lOpts) +} + +// GetProviderSubsystemTFLoggerOpts retrieves the LoggerOpts of the logger for the named provider subsystem. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetProviderSubsystemTFLoggerOpts(ctx context.Context, subsystem string) LoggerOpts { + lOpts, ok := ctx.Value(providerSubsystemTFLoggerOptsKey(subsystem)).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetProviderSubsystemTFLoggerOpts sets the LoggerOpts of the logger for the named provider subsystem, in the context. +func SetProviderSubsystemTFLoggerOpts(ctx context.Context, subsystem string, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, providerSubsystemTFLoggerOptsKey(subsystem), lOpts) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go new file mode 100644 index 0000000000..217c83ed72 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go @@ -0,0 +1,118 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/go-hclog" +) + +// GetSDKRootLogger returns the root logger used for writing logs from an SDK. +// If no root logger has been created, it will return nil. +func GetSDKRootLogger(ctx context.Context) hclog.Logger { + logger := ctx.Value(SDKRootLoggerKey) + if logger == nil { + return nil + } + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger +} + +// GetSDKRootLoggerOptions returns the root logger options used for +// creating the root SDK logger. If the root logger has not been created or +// the options are not present, it will return nil. +func GetSDKRootLoggerOptions(ctx context.Context) *hclog.LoggerOptions { + if GetSDKRootLogger(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(SDKRootLoggerOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + +// SetSDKRootLogger sets `logger` as the root logger used for writing logs from +// an SDK. +func SetSDKRootLogger(ctx context.Context, logger hclog.Logger) context.Context { + return context.WithValue(ctx, SDKRootLoggerKey, logger) +} + +// SetSDKRootLoggerOptions sets `loggerOptions` as the root logger options +// used for creating the SDK root logger. +func SetSDKRootLoggerOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, SDKRootLoggerOptionsKey, loggerOptions) +} + +// GetSDKRootTFLoggerOpts retrieves the LoggerOpts of the SDK root logger. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetSDKRootTFLoggerOpts(ctx context.Context) LoggerOpts { + lOpts, ok := ctx.Value(sdkRootTFLoggerOptsKey()).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetSDKRootTFLoggerOpts sets the LoggerOpts of the SDK root logger, in the context. +func SetSDKRootTFLoggerOpts(ctx context.Context, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, sdkRootTFLoggerOptsKey(), lOpts) +} + +// NewSDKSubsystemLoggerWarning is the text included in log output when a +// subsystem is auto-generated by terraform-plugin-log because it was used +// before the SDK instantiated it. +const NewSDKSubsystemLoggerWarning = "This log was generated by an SDK subsystem logger that wasn't created before being used. Use tflog.NewSubsystem to create this logger before it is used." + +// GetSDKSubsystemLogger returns the subsystem logger for the named subsystem +// in SDK space. If no such subsystem logger has been created, it will return +// nil. +func GetSDKSubsystemLogger(ctx context.Context, subsystem string) hclog.Logger { + logger := ctx.Value(sdkSubsystemLoggerKey(subsystem)) + if logger == nil { + return nil + } + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger +} + +// SetSDKSubsystemLogger sets `logger` as the logger for the named subsystem in +// SDK space. +func SetSDKSubsystemLogger(ctx context.Context, subsystem string, logger hclog.Logger) context.Context { + return context.WithValue(ctx, sdkSubsystemLoggerKey(subsystem), logger) +} + +// GetSDKSubsystemTFLoggerOpts retrieves the LoggerOpts of the logger for the named SDK subsystem. +// The value is stored in the context.Context: if none is found, a new one will be created. +func GetSDKSubsystemTFLoggerOpts(ctx context.Context, subsystem string) LoggerOpts { + lOpts, ok := ctx.Value(sdkSubsystemTFLoggerOptsKey(subsystem)).(LoggerOpts) + if !ok { + lOpts = LoggerOpts{} + } + + return lOpts +} + +// SetSDKSubsystemTFLoggerOpts sets the LoggerOpts of the logger for the named SDK subsystem, in the context. +func SetSDKSubsystemTFLoggerOpts(ctx context.Context, subsystem string, lOpts LoggerOpts) context.Context { + return context.WithValue(ctx, sdkSubsystemTFLoggerOptsKey(subsystem), lOpts) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go new file mode 100644 index 0000000000..b56ce8bfe7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go @@ -0,0 +1,57 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/go-hclog" +) + +// GetSink returns the sink logger used for writing logs. +// If no sink logger has been created, it will return nil. +func GetSink(ctx context.Context) hclog.Logger { + logger := ctx.Value(SinkKey) + if logger == nil { + return nil + } + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger +} + +// GetSinkOptions returns the root logger options used for +// creating the root SDK logger. If the root logger has not been created or +// the options are not present, it will return nil. +func GetSinkOptions(ctx context.Context) *hclog.LoggerOptions { + if GetSink(ctx) == nil { + return nil + } + + loggerOptionsRaw := ctx.Value(SinkOptionsKey) + + if loggerOptionsRaw == nil { + return nil + } + + loggerOptions, ok := loggerOptionsRaw.(*hclog.LoggerOptions) + + if !ok { + return nil + } + + return loggerOptions +} + +// SetSink sets `logger` as the sink logger used for writing logs. +func SetSink(ctx context.Context, logger hclog.Logger) context.Context { + return context.WithValue(ctx, SinkKey, logger) +} + +// SetSinkOptions sets `loggerOptions` as the root logger options +// used for creating the SDK root logger. +func SetSinkOptions(ctx context.Context, loggerOptions *hclog.LoggerOptions) context.Context { + return context.WithValue(ctx, SinkOptionsKey, loggerOptions) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/doc.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/doc.go new file mode 100644 index 0000000000..97ca218845 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/doc.go @@ -0,0 +1,13 @@ +// Package tflog provides helper functions for writing log output and creating +// loggers for Terraform plugins. +// +// For most plugin authors, building on an SDK or framework, the SDK or +// framework will take care of injecting a logger using New. +// +// tflog also allows plugin authors to create subsystem loggers, which are +// loggers for sufficiently distinct areas of the codebase or concerns. The +// benefit of using distinct loggers for these concerns is doing so allows +// plugin authors and practitioners to configure different log levels for each +// subsystem's log, allowing log output to be turned on or off without +// recompiling. +package tflog diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go new file mode 100644 index 0000000000..750177812c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/options.go @@ -0,0 +1,60 @@ +package tflog + +import ( + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/logging" +) + +// Options are a collection of logging options, useful for collecting arguments +// to NewSubsystem prior to calling it. +type Options []logging.Option + +// WithAdditionalLocationOffset returns an option that allowing implementations +// to fix location information when implementing helper functions. The default +// offset of 1 is automatically added to the provided value to account for the +// tflog logging functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) logging.Option { + return logging.WithAdditionalLocationOffset(additionalLocationOffset) +} + +// WithLevelFromEnv returns an option that will set the level of the logger +// based on the string in an environment variable. The environment variable +// checked will be `name` and `subsystems`, joined by _ and in all caps. +func WithLevelFromEnv(name string, subsystems ...string) logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + envVar := strings.Join(subsystems, "_") + if envVar != "" { + envVar = "_" + envVar + } + envVar = strings.ToUpper(name + envVar) + l.Level = hclog.LevelFromString(os.Getenv(envVar)) + return l + } +} + +// WithLevel returns an option that will set the level of the logger. +func WithLevel(level hclog.Level) logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + l.Level = level + return l + } +} + +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() logging.Option { + return logging.WithRootFields() +} + +// WithoutLocation returns an option that disables including the location of +// the log line in the log output, which is on by default. This has no effect +// when used with NewSubsystem. +func WithoutLocation() logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + l.IncludeLocation = false + return l + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go new file mode 100644 index 0000000000..c1a1572ce0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go @@ -0,0 +1,345 @@ +package tflog + +import ( + "context" + "regexp" + + "github.com/hashicorp/terraform-plugin-log/internal/logging" +) + +// SetField returns a new context.Context that has a modified logger in it which +// will include key and value as fields in all its log output. +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SetField(ctx context.Context, key string, value interface{}) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// Trace logs `msg` at the trace level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Trace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever SDK the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) +} + +// Debug logs `msg` at the debug level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Debug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever SDK the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) +} + +// Info logs `msg` at the info level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Info(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever SDK the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) +} + +// Warn logs `msg` at the warn level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Warn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever SDK the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) +} + +// Error logs `msg` at the error level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Error(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever SDK the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// OmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any field value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskAllFieldValuesRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// matching one of the given *regexp.Regexp. +// +// Note that the replacement will happen, only for field values that are of type string. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func MaskAllFieldValuesRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskAllFieldValuesStrings returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// equal to one of the given strings. +// +// Note that the replacement will happen, only for field values that are of type string. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func MaskAllFieldValuesStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings, +// matching one of the given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings, +// equal to one of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func MaskMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) +} + +// MaskLogRegexes is a shortcut to invoke MaskMessageRegexes and MaskAllFieldValuesRegexes using the same input. +// Refer to those functions for details. +func MaskLogRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + return MaskMessageRegexes(MaskAllFieldValuesRegexes(ctx, expressions...), expressions...) +} + +// MaskLogStrings is a shortcut to invoke MaskMessageStrings and MaskAllFieldValuesStrings using the same input. +// Refer to those functions for details. +func MaskLogStrings(ctx context.Context, matchingStrings ...string) context.Context { + return MaskMessageStrings(MaskAllFieldValuesStrings(ctx, matchingStrings...), matchingStrings...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go new file mode 100644 index 0000000000..1f66e757ad --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go @@ -0,0 +1,424 @@ +package tflog + +import ( + "context" + "regexp" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" + "github.com/hashicorp/terraform-plugin-log/internal/logging" +) + +// NewSubsystem returns a new context.Context that contains a subsystem logger +// configured with the passed options, named after the subsystem argument. +// +// Subsystem loggers allow different areas of a plugin codebase to use +// different logging levels, giving developers more fine-grained control over +// what is logging and with what verbosity. They're best utilized for logical +// concerns that are sometimes helpful to log, but may generate unwanted noise +// at other times. +// +// The only Options supported for subsystems are the Options for setting the +// level and additional location offset of the logger. +func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Option) context.Context { + logger := logging.GetProviderRootLogger(ctx) + + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever SDK the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return ctx + } + + rootLoggerOptions := logging.GetProviderRootLoggerOptions(ctx) + subLoggerTFLoggerOpts := logging.ApplyLoggerOpts(options...) + + // If root logger options are not available, + // fallback to creating a logger named like the given subsystem. + // This will preserve the root logger options, + // but cannot make changes beyond setting the level + // due to limitations with the hclog.Logger interface. + var subLogger hclog.Logger + if rootLoggerOptions == nil { + subLogger = logger.Named(subsystem) + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + logger.Warn("Unable to create logging subsystem with AdditionalLocationOffset due to missing root logger options") + } + } else { + subLoggerOptions := hclogutils.LoggerOptionsCopy(rootLoggerOptions) + subLoggerOptions.Name = subLoggerOptions.Name + "." + subsystem + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + subLoggerOptions.AdditionalLocationOffset = subLoggerTFLoggerOpts.AdditionalLocationOffset + } + + subLogger = hclog.New(subLoggerOptions) + } + + // Set the configured log level + if subLoggerTFLoggerOpts.Level != hclog.NoLevel { + subLogger.SetLevel(subLoggerTFLoggerOpts.Level) + } + + // Propagate root fields to the subsystem logger + if subLoggerTFLoggerOpts.IncludeRootFields { + loggerTFOpts := logging.GetProviderRootTFLoggerOpts(ctx) + subLoggerTFLoggerOpts = logging.WithFields(loggerTFOpts.Fields)(subLoggerTFLoggerOpts) + } + + // Set the subsystem LoggerOpts in the context + ctx = logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, subLoggerTFLoggerOpts) + + return logging.SetProviderSubsystemLogger(ctx, subsystem, subLogger) +} + +// SubsystemSetField returns a new context.Context that has a modified logger for +// the specified subsystem in it which will include key and value as fields +// in all its log output. +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SubsystemSetField(ctx context.Context, subsystem, key string, value interface{}) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemTrace logs `msg` at the trace level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemTrace(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) +} + +// SubsystemDebug logs `msg` at the debug level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemDebug(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) +} + +// SubsystemInfo logs `msg` at the info level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemInfo(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) +} + +// SubsystemWarn logs `msg` at the warn level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemWarn(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) +} + +// SubsystemError logs `msg` at the error level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetProviderSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetProviderRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetProviderSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewProviderSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// SubsystemOmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any argument value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskAllFieldValuesRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// matching one of the given *regexp.Regexp. +// +// Note that the replacement will happen, only for field values that are of type string. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func SubsystemMaskAllFieldValuesRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskAllFieldValuesStrings returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// equal to one of the given strings. +// +// Note that the replacement will happen, only for field values that are of type string. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func SubsystemMaskAllFieldValuesStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings, +// matching one of the given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings, +// equal to one of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func SubsystemMaskMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskLogRegexes is a shortcut to invoke SubsystemMaskMessageRegexes and SubsystemMaskAllFieldValuesRegexes using the same input. +// Refer to those functions for details. +func SubsystemMaskLogRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + return SubsystemMaskMessageRegexes(SubsystemMaskAllFieldValuesRegexes(ctx, subsystem, expressions...), subsystem, expressions...) +} + +// SubsystemMaskLogStrings is a shortcut to invoke SubsystemMaskMessageStrings and SubsystemMaskAllFieldValuesStrings using the same input. +// Refer to those functions for details. +func SubsystemMaskLogStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + return SubsystemMaskMessageStrings(SubsystemMaskAllFieldValuesStrings(ctx, subsystem, matchingStrings...), subsystem, matchingStrings...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/doc.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/doc.go new file mode 100644 index 0000000000..2ed6f6fc83 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/doc.go @@ -0,0 +1,10 @@ +// Package tfsdklog provides helper functions for logging from SDKs and +// frameworks for building Terraform plugins. +// +// Plugin authors shouldn't need to use this package; it is meant for authors +// of the frameworks and SDKs for plugins. Plugin authors should use the tflog +// package. +// +// This package provides very similar functionality to tflog, except it uses a +// separate namespace for its logs. +package tfsdklog diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/levels.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/levels.go new file mode 100644 index 0000000000..ed475a1379 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/levels.go @@ -0,0 +1,81 @@ +package tfsdklog + +import ( + "sync" + + "github.com/hashicorp/go-hclog" +) + +var ( + // rootLevel stores the effective level of the root SDK logger during + // NewRootSDKLogger where the value is deterministically chosen based on + // environment variables, etc. This call generally happens with each new + // provider RPC request. If the environment variable values changed during + // runtime between calls, then inflight provider requests checking this + // value would receive the most up-to-date value which would potentially + // differ with the actual in-context logger level. This tradeoff would only + // effect the inflight requests and should not be an overall performance + // concern in the case of this level causing more context checks until the + // request is over. + rootLevel hclog.Level = hclog.NoLevel + + // rootLevelMutex is a read-write mutex that protects rootLevel from + // triggering the data race detector. + rootLevelMutex = sync.RWMutex{} + + // subsystemLevels stores the effective level of all subsystem SDK loggers + // during NewSubsystem where the value is deterministically chosen based on + // environment variables, etc. This call generally happens with each new + // provider RPC request. If the environment variable values changed during + // runtime between calls, then inflight provider requests checking this + // value would receive the most up-to-date value which would potentially + // differ with the actual in-context logger level. This tradeoff would only + // effect the inflight requests and should not be an overall performance + // concern in the case of this level causing more context checks until the + // request is over. + subsystemLevels map[string]hclog.Level = make(map[string]hclog.Level) + + // subsystemLevelsMutex is a read-write mutex that protects the + // subsystemLevels map from concurrent read and write panics. + subsystemLevelsMutex = sync.RWMutex{} +) + +// subsystemWouldLog returns true if the subsystem SDK logger would emit a log +// at the given level. This is performed outside the context-based logger for +// performance. +func subsystemWouldLog(subsystem string, level hclog.Level) bool { + subsystemLevelsMutex.RLock() + + setLevel, ok := subsystemLevels[subsystem] + + subsystemLevelsMutex.RUnlock() + + if !ok { + return false + } + + return wouldLog(setLevel, level) +} + +// rootWouldLog returns true if the root SDK logger would emit a log at the +// given level. This is performed outside the context-based logger for +// performance. +func rootWouldLog(level hclog.Level) bool { + rootLevelMutex.RLock() + + setLevel := rootLevel + + rootLevelMutex.RUnlock() + + return wouldLog(setLevel, level) +} + +// wouldLog returns true if the set level would emit a log at the given +// level. This is performed outside the context-based logger for performance. +func wouldLog(setLevel, checkLevel hclog.Level) bool { + if checkLevel == hclog.Off { + return false + } + + return checkLevel >= setLevel +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go new file mode 100644 index 0000000000..b1ba8e51ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/options.go @@ -0,0 +1,78 @@ +package tfsdklog + +import ( + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/logging" +) + +// Options is a collection of logging options, useful for collecting arguments +// to NewSubsystem, NewRootSDKLogger, and NewRootProviderLogger before calling +// them. +type Options []logging.Option + +// WithAdditionalLocationOffset returns an option that allowing implementations +// to fix location information when implementing helper functions. The default +// offset of 1 is automatically added to the provided value to account for the +// tfsdklog logging functions. +func WithAdditionalLocationOffset(additionalLocationOffset int) logging.Option { + return logging.WithAdditionalLocationOffset(additionalLocationOffset) +} + +// WithLogName returns an option that will set the logger name explicitly to +// `name`. This has no effect when used with NewSubsystem. +func WithLogName(name string) logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + l.Name = name + return l + } +} + +// WithLevelFromEnv returns an option that will set the level of the logger +// based on the string in an environment variable. The environment variable +// checked will be `name` and `subsystems`, joined by _ and in all caps. +func WithLevelFromEnv(name string, subsystems ...string) logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + envVar := strings.Join(subsystems, "_") + if envVar != "" { + envVar = "_" + envVar + } + envVar = strings.ToUpper(name + envVar) + l.Level = hclog.LevelFromString(os.Getenv(envVar)) + return l + } +} + +// WithLevel returns an option that will set the level of the logger. +func WithLevel(level hclog.Level) logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + l.Level = level + return l + } +} + +// WithRootFields enables the copying of root logger fields to a new subsystem +// logger during creation. +func WithRootFields() logging.Option { + return logging.WithRootFields() +} + +// WithoutLocation returns an option that disables including the location of +// the log line in the log output, which is on by default. This has no effect +// when used with NewSubsystem. +func WithoutLocation() logging.Option { + return func(l logging.LoggerOpts) logging.LoggerOpts { + l.IncludeLocation = false + return l + } +} + +// WithStderrFromInit returns an option that tells the logger to write to the +// os.Stderr that was present when the program started, not the one that is +// available at runtime. Some versions of Terraform overwrite os.Stderr with an +// io.Writer that is never read, so any log lines written to it will be lost. +func WithStderrFromInit() logging.Option { + return logging.WithOutput(logging.Stderr) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go new file mode 100644 index 0000000000..4ffb2cc2f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go @@ -0,0 +1,436 @@ +package tfsdklog + +import ( + "context" + "regexp" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" + "github.com/hashicorp/terraform-plugin-log/internal/logging" +) + +// NewRootSDKLogger returns a new context.Context that contains an SDK logger +// configured with the passed options. +func NewRootSDKLogger(ctx context.Context, options ...logging.Option) context.Context { + opts := logging.ApplyLoggerOpts(options...) + if opts.Name == "" { + opts.Name = logging.DefaultSDKRootLoggerName + } + if sink := logging.GetSink(ctx); sink != nil { + logger := sink.Named(opts.Name) + sinkLoggerOptions := logging.GetSinkOptions(ctx) + sdkLoggerOptions := hclogutils.LoggerOptionsCopy(sinkLoggerOptions) + sdkLoggerOptions.Name = opts.Name + + if opts.Level != hclog.NoLevel { + logger.SetLevel(opts.Level) + sdkLoggerOptions.Level = opts.Level + } + + ctx = logging.SetSDKRootLogger(ctx, logger) + ctx = logging.SetSDKRootLoggerOptions(ctx, sdkLoggerOptions) + + return ctx + } + if opts.Level == hclog.NoLevel { + opts.Level = hclog.Trace + } + + // Cache root logger level outside context for performance reasons. + rootLevelMutex.Lock() + + rootLevel = opts.Level + + rootLevelMutex.Unlock() + + loggerOptions := &hclog.LoggerOptions{ + Name: opts.Name, + Level: opts.Level, + JSONFormat: true, + IndependentLevels: true, + IncludeLocation: opts.IncludeLocation, + DisableTime: !opts.IncludeTime, + Output: opts.Output, + AdditionalLocationOffset: opts.AdditionalLocationOffset, + } + + ctx = logging.SetSDKRootLogger(ctx, hclog.New(loggerOptions)) + ctx = logging.SetSDKRootLoggerOptions(ctx, loggerOptions) + + return ctx +} + +// NewRootProviderLogger returns a new context.Context that contains a provider +// logger configured with the passed options. +func NewRootProviderLogger(ctx context.Context, options ...logging.Option) context.Context { + opts := logging.ApplyLoggerOpts(options...) + if opts.Name == "" { + opts.Name = logging.DefaultProviderRootLoggerName + } + if sink := logging.GetSink(ctx); sink != nil { + logger := sink.Named(opts.Name) + sinkLoggerOptions := logging.GetSinkOptions(ctx) + providerLoggerOptions := hclogutils.LoggerOptionsCopy(sinkLoggerOptions) + providerLoggerOptions.Name = opts.Name + + if opts.Level != hclog.NoLevel { + logger.SetLevel(opts.Level) + providerLoggerOptions.Level = opts.Level + } + + ctx = logging.SetProviderRootLogger(ctx, logger) + ctx = logging.SetProviderRootLoggerOptions(ctx, providerLoggerOptions) + + return ctx + } + if opts.Level == hclog.NoLevel { + opts.Level = hclog.Trace + } + loggerOptions := &hclog.LoggerOptions{ + Name: opts.Name, + Level: opts.Level, + JSONFormat: true, + IndependentLevels: true, + IncludeLocation: opts.IncludeLocation, + DisableTime: !opts.IncludeTime, + Output: opts.Output, + AdditionalLocationOffset: opts.AdditionalLocationOffset, + } + + ctx = logging.SetProviderRootLogger(ctx, hclog.New(loggerOptions)) + ctx = logging.SetProviderRootLoggerOptions(ctx, loggerOptions) + + return ctx +} + +// SetField returns a new context.Context that has a modified logger in it which +// will include key and value as fields in all its log output. +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SetField(ctx context.Context, key string, value interface{}) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// Trace logs `msg` at the trace level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Trace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetSDKRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production the root + // logger for code should be injected by the in + // question, so really this is only likely in unit tests, at + // most so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) +} + +// Debug logs `msg` at the debug level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Debug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetSDKRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production the root + // logger for code should be injected by the in + // question, so really this is only likely in unit tests, at + // most so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) +} + +// Info logs `msg` at the info level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Info(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetSDKRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production the root + // logger for code should be injected by the in + // question, so really this is only likely in unit tests, at + // most so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) +} + +// Warn logs `msg` at the warn level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Warn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetSDKRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production the root + // logger for code should be injected by the in + // question, so really this is only likely in unit tests, at + // most so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) +} + +// Error logs `msg` at the error level to the logger in `ctx`, with optional +// `additionalFields` structured key-value fields in the log output. Fields are +// shallow merged with any defined on the logger, e.g. by the `SetField()` function, +// and across multiple maps. +func Error(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + logger := logging.GetSDKRootLogger(ctx) + if logger == nil { + // this essentially should never happen in production the root + // logger for code should be injected by the in + // question, so really this is only likely in unit tests, at + // most so just making this a no-op is fine + return + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKRootTFLoggerOpts(ctx), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// OmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// OmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any field value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskAllFieldValuesRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// matching one of the given *regexp.Regexp. +// +// Note that the replacement will happen, only for field values that are of type string. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func MaskAllFieldValuesRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskAllFieldValuesStrings returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// equal to one of the given strings. +// +// Note that the replacement will happen, only for field values that are of type string. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func MaskAllFieldValuesStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings, +// matching one of the given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings, +// equal to one of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func MaskMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKRootTFLoggerOpts(ctx) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) +} + +// MaskLogRegexes is a shortcut to invoke MaskMessageRegexes and MaskAllFieldValuesRegexes using the same input. +// Refer to those functions for details. +func MaskLogRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { + return MaskMessageRegexes(MaskAllFieldValuesRegexes(ctx, expressions...), expressions...) +} + +// MaskLogStrings is a shortcut to invoke MaskMessageStrings and MaskAllFieldValuesStrings using the same input. +// Refer to those functions for details. +func MaskLogStrings(ctx context.Context, matchingStrings ...string) context.Context { + return MaskMessageStrings(MaskAllFieldValuesStrings(ctx, matchingStrings...), matchingStrings...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go new file mode 100644 index 0000000000..6326901f17 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sink.go @@ -0,0 +1,152 @@ +package tfsdklog + +import ( + "context" + "fmt" + "io" + "os" + "strings" + "sync" + "syscall" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/logging" + testing "github.com/mitchellh/go-testing-interface" +) + +const ( + // envLog is the environment variable that users can set to control the + // least-verbose level of logs that will be output during testing. If + // this environment variable is set, it will default to off. This is + // just the default; specific loggers and sub-loggers can set a lower + // or higher verbosity level without a problem right now. In theory, + // they should not be able to. + // + // Valid values are TRACE, DEBUG, INFO, WARN, ERROR, and OFF. A special + // pseudo-value, JSON, will set the value to TRACE and output the + // results in their JSON format. + envLog = "TF_LOG" + + // envLogFile is the environment variable that controls where log + // output is written during tests. By default, logs will be written to + // standard error. Setting this environment variable to another file + // path will write logs there instead during tests. + envLogFile = "TF_LOG_PATH" + + // envAccLogFile is the environment variable that controls where log + // output from the provider under test and the Terraform binary (and + // other providers) will be written during tests. Setting this + // environment variable to a file will combine all log output in that + // file. If both this environment variable and TF_LOG_PATH are set, + // this environment variable will take precedence. + envAccLogFile = "TF_ACC_LOG_PATH" + + // envLogPathMask is the environment variable that controls per-test + // logging output. It should be set to a fmt-compatible string, where a + // single %s will be replaced with the test name, and the log output + // for that test (and only that test) will be written to that file. + // Setting this environment variable will override TF_LOG_PATH. + // Only the logs for the provider under test are included. + envLogPathMask = "TF_LOG_PATH_MASK" +) + +// ValidLevels are the string representations of levels that can be set for +// loggers. +var ValidLevels = []string{"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF"} + +// Only show invalid log level message once across any number of level lookups. +var invalidLogLevelMessage sync.Once + +// RegisterTestSink sets up a logging sink, for use with test frameworks and +// other cases where plugin logs don't get routed through Terraform. This +// applies the same filtering and file output behaviors that Terraform does. +// +// RegisterTestSink should only ever be called by test frameworks, providers +// should never call it. +// +// RegisterTestSink must be called prior to any loggers being setup or +// instantiated. +func RegisterTestSink(ctx context.Context, t testing.T) context.Context { + logger, loggerOptions := newSink(t) + + ctx = logging.SetSink(ctx, logger) + ctx = logging.SetSinkOptions(ctx, loggerOptions) + + return ctx +} + +func newSink(t testing.T) (hclog.Logger, *hclog.LoggerOptions) { + logOutput := io.Writer(os.Stderr) + var json bool + var logLevel hclog.Level + var logFile string + + envLevel := strings.ToUpper(os.Getenv(envLog)) + + // if TF_LOG_PATH is set, output logs there + if logPath := os.Getenv(envLogFile); logPath != "" { + logFile = logPath + } + + // if TF_ACC_LOG_PATH is set, output logs there instead + if logPath := os.Getenv(envAccLogFile); logPath != "" { + logFile = logPath + // helper/resource makes this default to TRACE, so we should, + // too + envLevel = "TRACE" + } + + // if TF_LOG_PATH_MASK is set, use a test-name specific logging file, + // instead + if logPathMask := os.Getenv(envLogPathMask); logPathMask != "" { + testName := strings.Replace(t.Name(), "/", "__", -1) + logFile = fmt.Sprintf(logPathMask, testName) + } + + if logFile != "" { + f, err := os.OpenFile(logFile, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + fmt.Fprintf(os.Stderr, "Error opening log file: %v\n", err) + } else { + logOutput = f + } + } + + // if TF_LOG is set, set the level + if envLevel == "" { + logLevel = hclog.Off + } else if envLevel == "JSON" { + logLevel = hclog.Trace + json = true + } else if isValidLogLevel(envLevel) { + logLevel = hclog.LevelFromString(envLevel) + } else { + invalidLogLevelMessage.Do(func() { + fmt.Fprintf( + os.Stderr, + "[WARN] Invalid log level: %q. Defaulting to level: OFF. Valid levels are: %+v\n", + envLevel, + ValidLevels, + ) + }) + } + + loggerOptions := &hclog.LoggerOptions{ + Level: logLevel, + Output: logOutput, + IndependentLevels: true, + JSONFormat: json, + } + + return hclog.New(loggerOptions), loggerOptions +} + +func isValidLogLevel(level string) bool { + for _, validLevel := range ValidLevels { + if level == validLevel { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go new file mode 100644 index 0000000000..0aeb5463ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go @@ -0,0 +1,447 @@ +package tfsdklog + +import ( + "context" + "regexp" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-log/internal/hclogutils" + "github.com/hashicorp/terraform-plugin-log/internal/logging" +) + +// NewSubsystem returns a new context.Context that contains a subsystem logger +// configured with the passed options, named after the subsystem argument. +// +// Subsystem loggers allow different areas of a plugin codebase to use +// different logging levels, giving developers more fine-grained control over +// what is logging and with what verbosity. They're best utilized for logical +// concerns that are sometimes helpful to log, but may generate unwanted noise +// at other times. +// +// The only Options supported for subsystems are the Options for setting the +// level and additional location offset of the logger. +func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Option) context.Context { + logger := logging.GetSDKRootLogger(ctx) + + if logger == nil { + // this essentially should never happen in production + // the root logger for provider code should be injected + // by whatever the provider developer is using, so + // really this is only likely in unit tests, at most + // so just making this a no-op is fine + return ctx + } + + rootLoggerOptions := logging.GetSDKRootLoggerOptions(ctx) + subLoggerTFLoggerOpts := logging.ApplyLoggerOpts(options...) + + // If root logger options are not available, + // fallback to creating a logger named like the given subsystem. + // This will preserve the root logger options, + // but cannot make changes beyond setting the level + // due to limitations with the hclog.Logger interface. + var subLogger hclog.Logger + if rootLoggerOptions == nil { + subLogger = logger.Named(subsystem) + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + logger.Warn("Unable to create logging subsystem with AdditionalLocationOffset due to missing root logger options") + } + } else { + subLoggerOptions := hclogutils.LoggerOptionsCopy(rootLoggerOptions) + subLoggerOptions.Name = subLoggerOptions.Name + "." + subsystem + + if subLoggerTFLoggerOpts.AdditionalLocationOffset != 1 { + subLoggerOptions.AdditionalLocationOffset = subLoggerTFLoggerOpts.AdditionalLocationOffset + } + + subLogger = hclog.New(subLoggerOptions) + } + + // Cache subsystem logger level outside context for performance reasons. + subsystemLevelsMutex.Lock() + + subsystemLevels[subsystem] = subLoggerTFLoggerOpts.Level + + subsystemLevelsMutex.Unlock() + + // Set the configured log level + if subLoggerTFLoggerOpts.Level != hclog.NoLevel { + subLogger.SetLevel(subLoggerTFLoggerOpts.Level) + } + + // Propagate root fields to the subsystem logger + if subLoggerTFLoggerOpts.IncludeRootFields { + loggerTFOpts := logging.GetSDKRootTFLoggerOpts(ctx) + subLoggerTFLoggerOpts = logging.WithFields(loggerTFOpts.Fields)(subLoggerTFLoggerOpts) + } + + // Set the subsystem LoggerOpts in the context + ctx = logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, subLoggerTFLoggerOpts) + + return logging.SetSDKSubsystemLogger(ctx, subsystem, subLogger) +} + +// SubsystemSetField returns a new context.Context that has a modified logger for +// the specified subsystem in it which will include key and value as fields +// in all its log output. +// +// In case of the same key is used multiple times (i.e. key collision), +// the last one set is the one that gets persisted and then outputted with the logs. +func SubsystemSetField(ctx context.Context, subsystem, key string, value interface{}) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemTrace logs `msg` at the trace level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemTrace(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + if !subsystemWouldLog(subsystem, hclog.Trace) { + return + } + + logger := logging.GetSDKSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Trace(msg, additionalArgs...) +} + +// SubsystemDebug logs `msg` at the debug level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemDebug(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + if !subsystemWouldLog(subsystem, hclog.Debug) { + return + } + + logger := logging.GetSDKSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Debug(msg, additionalArgs...) +} + +// SubsystemInfo logs `msg` at the info level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemInfo(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + if !subsystemWouldLog(subsystem, hclog.Info) { + return + } + + logger := logging.GetSDKSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Info(msg, additionalArgs...) +} + +// SubsystemWarn logs `msg` at the warn level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemWarn(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + if !subsystemWouldLog(subsystem, hclog.Warn) { + return + } + + logger := logging.GetSDKSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Warn(msg, additionalArgs...) +} + +// SubsystemError logs `msg` at the error level to the subsystem logger +// specified in `ctx`, with optional `additionalFields` structured key-value +// fields in the log output. Fields are shallow merged with any defined on the +// subsystem logger, e.g. by the `SubsystemSetField()` function, and across +// multiple maps. +func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields ...map[string]interface{}) { + if !subsystemWouldLog(subsystem, hclog.Error) { + return + } + + logger := logging.GetSDKSubsystemLogger(ctx, subsystem) + if logger == nil { + if logging.GetSDKRootLogger(ctx) == nil { + // logging isn't set up, nothing we can do, just silently fail + // this should basically never happen in production + return + } + // create a new logger if one doesn't exist + logger = logging.GetSDKSubsystemLogger(NewSubsystem(ctx, subsystem), subsystem).With("new_logger_warning", logging.NewSDKSubsystemLoggerWarning) + } + + additionalArgs, shouldOmit := logging.OmitOrMask(logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem), &msg, additionalFields) + if shouldOmit { + return + } + + logger.Error(msg, additionalArgs...) +} + +// SubsystemOmitLogWithFieldKeys returns a new context.Context that has a modified logger +// that will omit to write any log when any of the given keys is found +// within its fields. +// +// Each call to this function is additive: +// the keys to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageRegexes returns a new context.Context that has a modified logger +// that will omit to write any log that has a message matching any of the +// given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemOmitLogWithMessageStrings returns a new context.Context that has a modified logger +// that will omit to write any log that matches any of the given string. +// +// Each call to this function is additive: +// the string to omit by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskFieldValuesWithFieldKeys returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) any field value where the +// key matches one of the given keys. +// +// Each call to this function is additive: +// the keys to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'baz']` +// +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskAllFieldValuesRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// matching one of the given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func SubsystemMaskAllFieldValuesRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskAllFieldValuesStrings returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all field value substrings, +// equal to one of the given strings. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +func SubsystemMaskAllFieldValuesStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageRegexes returns a new context.Context that has a modified logger +// that masks (replaces) with asterisks (`***`) all message substrings, +// matching one of the given *regexp.Regexp. +// +// Each call to this function is additive: +// the regexp to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `[regexp.MustCompile("(foo|bar)")]` +// +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskMessageStrings returns a new context.Context that has a modified logger +// that masks (replace) with asterisks (`***`) all message substrings, +// equal to one of the given strings. +// +// Each call to this function is additive: +// the string to mask by are added to the existing configuration. +// +// Example: +// +// configuration = `['foo', 'bar']` +// +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +func SubsystemMaskMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) + + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) + + return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) +} + +// SubsystemMaskLogRegexes is a shortcut to invoke SubsystemMaskMessageRegexes and SubsystemMaskAllFieldValuesRegexes using the same input. +// Refer to those functions for details. +func SubsystemMaskLogRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { + return SubsystemMaskMessageRegexes(SubsystemMaskAllFieldValuesRegexes(ctx, subsystem, expressions...), subsystem, expressions...) +} + +// SubsystemMaskLogStrings is a shortcut to invoke SubsystemMaskMessageStrings and SubsystemMaskAllFieldValuesStrings using the same input. +// Refer to those functions for details. +func SubsystemMaskLogStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { + return SubsystemMaskMessageStrings(SubsystemMaskAllFieldValuesStrings(ctx, subsystem, matchingStrings...), subsystem, matchingStrings...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE deleted file mode 100644 index c33dcc7c92..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/doc.go deleted file mode 100644 index 08bff3c88b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Package acctest provides the ability to opt in to the new binary test driver. The binary -// test driver allows you to run your acceptance tests with a binary of Terraform instead of -// an emulated version packaged inside the SDK. This allows for a number of important -// enhancements, but most notably a more realistic testing experience and matrix testing -// against multiple versions of Terraform CLI. This also allows the SDK to be completely -// separated, at a dependency level, from the Terraform CLI, as long as it is >= 0.12.0 -// -// The new test driver must be enabled by initialising the test helper in your TestMain -// function in all provider packages that run acceptance tests. Most providers have only -// one package. -// -// In v2 of the SDK, the binary test driver will be mandatory. -// -// After importing this package, you can add code similar to the following: -// -// func TestMain(m *testing.M) { -// acctest.UseBinaryDriver("provider_name", Provider) -// resource.TestMain(m) -// } -// -// Where `Provider` is the function that returns the instance of a configured `terraform.ResourceProvider` -// Some providers already have a TestMain defined, usually for the purpose of enabling test -// sweepers. These additional occurrences should be removed. -// -// Initialising the binary test helper using UseBinaryDriver causes all tests to be run using -// the new binary driver. Until SDK v2, the DisableBinaryDriver boolean property can be used -// to use the legacy test driver for an individual TestCase. -// -// It is no longer necessary to import other Terraform providers as Go modules: these -// imports should be removed. -package acctest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/helper.go deleted file mode 100644 index 4fe1634213..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/acctest/helper.go +++ /dev/null @@ -1,28 +0,0 @@ -package acctest - -import ( - "log" - "os" - - "github.com/hashicorp/terraform-plugin-sdk/plugin" - tftest "github.com/hashicorp/terraform-plugin-test/v2" -) - -var TestHelper *tftest.Helper - -func UseBinaryDriver(name string, providerFunc plugin.ProviderFunc) { - log.Println("[DEBUG] not using binary driver name, it's no longer needed") - sourceDir, err := os.Getwd() - if err != nil { - panic(err) - } - - if tftest.RunningAsPlugin() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: providerFunc, - }) - os.Exit(0) - } else { - TestHelper = tftest.AutoInitProviderHelper(sourceDir) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/acctest.go deleted file mode 100644 index 9d31031a47..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/acctest.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package acctest contains for Terraform Acceptance Tests -package acctest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/random.go deleted file mode 100644 index 258e4db70f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/random.go +++ /dev/null @@ -1,176 +0,0 @@ -package acctest - -import ( - "bytes" - crand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "math/big" - "math/rand" - "net" - "strings" - "time" - - "golang.org/x/crypto/ssh" - - "github.com/apparentlymart/go-cidr/cidr" -) - -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - -// Helpers for generating random tidbits for use in identifiers to prevent -// collisions in acceptance tests. - -// RandInt generates a random integer -func RandInt() int { - return rand.New(rand.NewSource(time.Now().UnixNano())).Int() -} - -// RandomWithPrefix is used to generate a unique name with a prefix, for -// randomizing names in acceptance tests -func RandomWithPrefix(name string) string { - return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) -} - -func RandIntRange(min int, max int) int { - source := rand.New(rand.NewSource(time.Now().UnixNano())) - rangeMax := max - min - - return int(source.Int31n(int32(rangeMax))) -} - -// RandString generates a random alphanumeric string of the length specified -func RandString(strlen int) string { - return RandStringFromCharSet(strlen, CharSetAlphaNum) -} - -// RandStringFromCharSet generates a random string by selecting characters from -// the charset provided -func RandStringFromCharSet(strlen int, charSet string) string { - result := make([]byte, strlen) - for i := 0; i < strlen; i++ { - result[i] = charSet[rand.Intn(len(charSet))] - } - return string(result) -} - -// RandSSHKeyPair generates a public and private SSH key pair. The public key is -// returned in OpenSSH format, and the private key is PEM encoded. -func RandSSHKeyPair(comment string) (string, string, error) { - privateKey, privateKeyPEM, err := genPrivateKey() - if err != nil { - return "", "", err - } - - publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) - if err != nil { - return "", "", err - } - keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) - return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil -} - -// RandTLSCert generates a self-signed TLS certificate with a newly created -// private key, and returns both the cert and the private key PEM encoded. -func RandTLSCert(orgName string) (string, string, error) { - template := &x509.Certificate{ - SerialNumber: big.NewInt(int64(RandInt())), - Subject: pkix.Name{ - Organization: []string{orgName}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(24 * time.Hour), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - privateKey, privateKeyPEM, err := genPrivateKey() - if err != nil { - return "", "", err - } - - cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) - if err != nil { - return "", "", err - } - - certPEM, err := pemEncode(cert, "CERTIFICATE") - if err != nil { - return "", "", err - } - - return certPEM, privateKeyPEM, nil -} - -// RandIpAddress returns a random IP address in the specified CIDR block. -// The prefix length must be less than 31. -func RandIpAddress(s string) (string, error) { - _, network, err := net.ParseCIDR(s) - if err != nil { - return "", err - } - - firstIp, lastIp := cidr.AddressRange(network) - first := &big.Int{} - first.SetBytes([]byte(firstIp)) - last := &big.Int{} - last.SetBytes([]byte(lastIp)) - r := &big.Int{} - r.Sub(last, first) - if len := r.BitLen(); len > 31 { - return "", fmt.Errorf("CIDR range is too large: %d", len) - } - - max := int(r.Int64()) - if max == 0 { - // panic: invalid argument to Int31n - return firstIp.String(), nil - } - - host, err := cidr.Host(network, RandIntRange(0, max)) - if err != nil { - return "", err - } - - return host.String(), nil -} - -func genPrivateKey() (*rsa.PrivateKey, string, error) { - privateKey, err := rsa.GenerateKey(crand.Reader, 1024) - if err != nil { - return nil, "", err - } - - privateKeyPEM, err := pemEncode(x509.MarshalPKCS1PrivateKey(privateKey), "RSA PRIVATE KEY") - if err != nil { - return nil, "", err - } - - return privateKey, privateKeyPEM, nil -} - -func pemEncode(b []byte, block string) (string, error) { - var buf bytes.Buffer - pb := &pem.Block{Type: block, Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return "", err - } - - return buf.String(), nil -} - -const ( - // CharSetAlphaNum is the alphanumeric character set for use with - // RandStringFromCharSet - CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" - - // CharSetAlpha is the alphabetical character set for use with - // RandStringFromCharSet - CharSetAlpha = "abcdefghijklmnopqrstuvwxyz" -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/remotetests.go deleted file mode 100644 index 87c60b8be4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/remotetests.go +++ /dev/null @@ -1,27 +0,0 @@ -package acctest - -import ( - "net/http" - "os" - "testing" -) - -// SkipRemoteTestsEnvVar is an environment variable that can be set by a user -// running the tests in an environment with limited network connectivity. By -// default, tests requiring internet connectivity make an effort to skip if no -// internet is available, but in some cases the smoke test will pass even -// though the test should still be skipped. -const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS" - -// RemoteTestPrecheck is meant to be run by any unit test that requires -// outbound internet connectivity. The test will be skipped if it's -// unavailable. -func RemoteTestPrecheck(t *testing.T) { - if os.Getenv(SkipRemoteTestsEnvVar) != "" { - t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar) - } - - if _, err := http.Get("http://google.com"); err != nil { - t.Skipf("skipping, internet seems to not be available: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go deleted file mode 100644 index b09199953e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go +++ /dev/null @@ -1,72 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// All returns a CustomizeDiffFunc that runs all of the given -// CustomizeDiffFuncs and returns all of the errors produced. -// -// If one function produces an error, functions after it are still run. -// If this is not desirable, use function Sequence instead. -// -// If multiple functions returns errors, the result is a multierror. -// -// For example: -// -// &schema.Resource{ -// // ... -// CustomizeDiff: customdiff.All( -// customdiff.ValidateChange("size", func (old, new, meta interface{}) error { -// // If we are increasing "size" then the new value must be -// // a multiple of the old value. -// if new.(int) <= old.(int) { -// return nil -// } -// if (new.(int) % old.(int)) != 0 { -// return fmt.Errorf("new size value must be an integer multiple of old value %d", old.(int)) -// } -// return nil -// }), -// customdiff.ForceNewIfChange("size", func (old, new, meta interface{}) bool { -// // "size" can only increase in-place, so we must create a new resource -// // if it is decreased. -// return new.(int) < old.(int) -// }), -// customdiff.ComputedIf("version_id", func (d *schema.ResourceDiff, meta interface{}) bool { -// // Any change to "content" causes a new "version_id" to be allocated. -// return d.HasChange("content") -// }), -// ), -// } -// -func All(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - var err error - for _, f := range funcs { - thisErr := f(d, meta) - if thisErr != nil { - err = multierror.Append(err, thisErr) - } - } - return err - } -} - -// Sequence returns a CustomizeDiffFunc that runs all of the given -// CustomizeDiffFuncs in sequence, stopping at the first one that returns -// an error and returning that error. -// -// If all functions succeed, the combined function also succeeds. -func Sequence(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - for _, f := range funcs { - err := f(d, meta) - if err != nil { - return err - } - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go deleted file mode 100644 index 54ea5c4020..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go +++ /dev/null @@ -1,16 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ComputedIf returns a CustomizeDiffFunc that sets the given key's new value -// as computed if the given condition function returns true. -func ComputedIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if f(d, meta) { - d.SetNewComputed(key) - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go deleted file mode 100644 index 1d8e2bfd65..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go +++ /dev/null @@ -1,60 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ResourceConditionFunc is a function type that makes a boolean decision based -// on an entire resource diff. -type ResourceConditionFunc func(d *schema.ResourceDiff, meta interface{}) bool - -// ValueChangeConditionFunc is a function type that makes a boolean decision -// by comparing two values. -type ValueChangeConditionFunc func(old, new, meta interface{}) bool - -// ValueConditionFunc is a function type that makes a boolean decision based -// on a given value. -type ValueConditionFunc func(value, meta interface{}) bool - -// If returns a CustomizeDiffFunc that calls the given condition -// function and then calls the given CustomizeDiffFunc only if the condition -// function returns true. -// -// This can be used to include conditional customizations when composing -// customizations using All and Sequence, but should generally be used only in -// simple scenarios. Prefer directly writing a CustomizeDiffFunc containing -// a conditional branch if the given CustomizeDiffFunc is already a -// locally-defined function, since this avoids obscuring the control flow. -func If(cond ResourceConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if cond(d, meta) { - return f(d, meta) - } - return nil - } -} - -// IfValueChange returns a CustomizeDiffFunc that calls the given condition -// function with the old and new values of the given key and then calls the -// given CustomizeDiffFunc only if the condition function returns true. -func IfValueChange(key string, cond ValueChangeConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange(key) - if cond(old, new, meta) { - return f(d, meta) - } - return nil - } -} - -// IfValue returns a CustomizeDiffFunc that calls the given condition -// function with the new values of the given key and then calls the -// given CustomizeDiffFunc only if the condition function returns true. -func IfValue(key string, cond ValueConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if cond(d.Get(key), meta) { - return f(d, meta) - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go deleted file mode 100644 index c6ad1199cd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package customdiff provides a set of reusable and composable functions -// to enable more "declarative" use of the CustomizeDiff mechanism available -// for resources in package helper/schema. -// -// The intent of these helpers is to make the intent of a set of diff -// customizations easier to see, rather than lost in a sea of Go function -// boilerplate. They should _not_ be used in situations where they _obscure_ -// intent, e.g. by over-using the composition functions where a single -// function containing normal Go control flow statements would be more -// straightforward. -package customdiff diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go deleted file mode 100644 index 26afa8cb69..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go +++ /dev/null @@ -1,40 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ForceNewIf returns a CustomizeDiffFunc that flags the given key as -// requiring a new resource if the given condition function returns true. -// -// The return value of the condition function is ignored if the old and new -// values of the field compare equal, since no attribute diff is generated in -// that case. -func ForceNewIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if f(d, meta) { - d.ForceNew(key) - } - return nil - } -} - -// ForceNewIfChange returns a CustomizeDiffFunc that flags the given key as -// requiring a new resource if the given condition function returns true. -// -// The return value of the condition function is ignored if the old and new -// values compare equal, since no attribute diff is generated in that case. -// -// This function is similar to ForceNewIf but provides the condition function -// only the old and new values of the given key, which leads to more compact -// and explicit code in the common case where the decision can be made with -// only the specific field value. -func ForceNewIfChange(key string, f ValueChangeConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange(key) - if f(old, new, meta) { - d.ForceNew(key) - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go deleted file mode 100644 index 0bc2c69505..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go +++ /dev/null @@ -1,38 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ValueChangeValidationFunc is a function type that validates the difference -// (or lack thereof) between two values, returning an error if the change -// is invalid. -type ValueChangeValidationFunc func(old, new, meta interface{}) error - -// ValueValidationFunc is a function type that validates a particular value, -// returning an error if the value is invalid. -type ValueValidationFunc func(value, meta interface{}) error - -// ValidateChange returns a CustomizeDiffFunc that applies the given validation -// function to the change for the given key, returning any error produced. -func ValidateChange(key string, f ValueChangeValidationFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange(key) - return f(old, new, meta) - } -} - -// ValidateValue returns a CustomizeDiffFunc that applies the given validation -// function to value of the given key, returning any error produced. -// -// This should generally not be used since it is functionally equivalent to -// a validation function applied directly to the schema attribute in question, -// but is provided for situations where composing multiple CustomizeDiffFuncs -// together makes intent clearer than spreading that validation across the -// schema. -func ValidateValue(key string, f ValueValidationFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - val := d.Get(key) - return f(val, meta) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go deleted file mode 100644 index f0c022dd23..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go +++ /dev/null @@ -1,43 +0,0 @@ -package hashcode - -import ( - "bytes" - "fmt" - "hash/crc32" -) - -// String hashes a string to a unique hashcode. -// -// Deprecated: This will be removed in v2 without replacement. If you need -// its functionality, you can copy it, import crc32 directly, or reference the -// v1 package. -// -// crc32 returns a uint32, but for our use we need -// and non negative integer. Here we cast to an integer -// and invert it if the result is negative. -func String(s string) int { - v := int(crc32.ChecksumIEEE([]byte(s))) - if v >= 0 { - return v - } - if -v >= 0 { - return -v - } - // v == MinInt - return 0 -} - -// Strings hashes a list of strings to a unique hashcode. -// -// Deprecated: This will be removed in v2 without replacement. If you need -// its functionality, you can copy it, import crc32 directly, or reference the -// v1 package. -func Strings(strings []string) string { - var buf bytes.Buffer - - for _, s := range strings { - buf.WriteString(fmt.Sprintf("%s-", s)) - } - - return fmt.Sprintf("%d", String(buf.String())) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go deleted file mode 100644 index cc8b50372f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go +++ /dev/null @@ -1,114 +0,0 @@ -package logging - -import ( - "io" - "io/ioutil" - "log" - "os" - "strings" - "syscall" - - "github.com/hashicorp/logutils" - testing "github.com/mitchellh/go-testing-interface" -) - -// These are the environmental variables that determine if we log, and if -// we log whether or not the log should go to a file. -const ( - EnvLog = "TF_LOG" // Set to True - EnvLogFile = "TF_LOG_PATH" // Set to a file -) - -var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} - -// LogOutput determines where we should send logs (if anywhere) and the log level. -func LogOutput() (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := LogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - if logPath := os.Getenv(EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - // This was the default since the beginning - logOutput = &logutils.LevelFilter{ - Levels: ValidLevels, - MinLevel: logutils.LogLevel(logLevel), - Writer: logOutput, - } - - return -} - -// SetTestOutput is equivalent to SetOutput, but declares itself a test -// helper so it doesn't show up as the source of errors when testing. -func SetTestOutput(t testing.T) { - setOutput(t) -} - -// SetOutput checks for a log destination with LogOutput, and calls -// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses -// ioutil.Discard. Any error from LogOutout is fatal. -func SetOutput() { - setOutput(nil) -} - -func setOutput(t testing.T) { - if t != nil { - t.Helper() - } - out, err := LogOutput() - if err != nil { - log.Fatal(err) - } - - if out == nil { - out = ioutil.Discard - } - - log.SetOutput(out) -} - -// LogLevel returns the current log level string based the environment vars -func LogLevel() string { - envLevel := os.Getenv(EnvLog) - if envLevel == "" { - return "" - } - - logLevel := "TRACE" - if isValidLogLevel(envLevel) { - // allow following for better ux: info, Info or INFO - logLevel = strings.ToUpper(envLevel) - } else { - log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, ValidLevels) - } - - return logLevel -} - -// IsDebugOrHigher returns whether or not the current log level is debug or trace -func IsDebugOrHigher() bool { - level := string(LogLevel()) - return level == "DEBUG" || level == "TRACE" -} - -func isValidLogLevel(level string) bool { - for _, l := range ValidLevels { - if strings.ToUpper(level) == string(l) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go deleted file mode 100644 index bddabe647a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go +++ /dev/null @@ -1,70 +0,0 @@ -package logging - -import ( - "bytes" - "encoding/json" - "log" - "net/http" - "net/http/httputil" - "strings" -) - -type transport struct { - name string - transport http.RoundTripper -} - -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - if IsDebugOrHigher() { - reqData, err := httputil.DumpRequestOut(req, true) - if err == nil { - log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) - } else { - log.Printf("[ERROR] %s API Request error: %#v", t.name, err) - } - } - - resp, err := t.transport.RoundTrip(req) - if err != nil { - return resp, err - } - - if IsDebugOrHigher() { - respData, err := httputil.DumpResponse(resp, true) - if err == nil { - log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) - } else { - log.Printf("[ERROR] %s API Response error: %#v", t.name, err) - } - } - - return resp, nil -} - -func NewTransport(name string, t http.RoundTripper) *transport { - return &transport{name, t} -} - -// prettyPrintJsonLines iterates through a []byte line-by-line, -// transforming any lines that are complete json into pretty-printed json. -func prettyPrintJsonLines(b []byte) string { - parts := strings.Split(string(b), "\n") - for i, p := range parts { - if b := []byte(p); json.Valid(b) { - var out bytes.Buffer - json.Indent(&out, b, "", " ") - parts[i] = out.String() - } - } - return strings.Join(parts, "\n") -} - -const logReqMsg = `%s API Request Details: ----[ REQUEST ]--------------------------------------- -%s ------------------------------------------------------` - -const logRespMsg = `%s API Response Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go deleted file mode 100644 index 7ee21614b9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go +++ /dev/null @@ -1,79 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "time" -) - -type NotFoundError struct { - LastError error - LastRequest interface{} - LastResponse interface{} - Message string - Retries int -} - -func (e *NotFoundError) Error() string { - if e.Message != "" { - return e.Message - } - - if e.Retries > 0 { - return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) - } - - return "couldn't find resource" -} - -// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending -type UnexpectedStateError struct { - LastError error - State string - ExpectedState []string -} - -func (e *UnexpectedStateError) Error() string { - return fmt.Sprintf( - "unexpected state '%s', wanted target '%s'. last error: %s", - e.State, - strings.Join(e.ExpectedState, ", "), - e.LastError, - ) -} - -// TimeoutError is returned when WaitForState times out -type TimeoutError struct { - LastError error - LastState string - Timeout time.Duration - ExpectedState []string -} - -func (e *TimeoutError) Error() string { - expectedState := "resource to be gone" - if len(e.ExpectedState) > 0 { - expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) - } - - extraInfo := make([]string, 0) - if e.LastState != "" { - extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) - } - if e.Timeout > 0 { - extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) - } - - suffix := "" - if len(extraInfo) > 0 { - suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) - } - - if e.LastError != nil { - return fmt.Sprintf("timeout while waiting for %s%s: %s", - expectedState, suffix, e.LastError) - } - - return fmt.Sprintf("timeout while waiting for %s%s", - expectedState, suffix) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go deleted file mode 100644 index db12cee202..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package resource - -import ( - "context" - "net" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - tfplugin "github.com/hashicorp/terraform-plugin-sdk/plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC -// shim and starts it in a grpc server using an inmem connection. It returns a -// GRPCClient for this new server to test the shimmed resource provider. -func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { - listener := bufconn.Listen(256 * 1024) - grpcServer := grpc.NewServer() - - p := plugin.NewGRPCProviderServerShim(rp) - proto.RegisterProviderServer(grpcServer, p) - - go grpcServer.Serve(listener) - - conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { - return listener.Dial() - }), grpc.WithInsecure()) - if err != nil { - panic(err) - } - - var pp tfplugin.GRPCProviderPlugin - client, _ := pp.GRPCClient(context.Background(), nil, conn) - - grpcClient := client.(*tfplugin.GRPCProvider) - grpcClient.TestServer = grpcServer - - return grpcClient -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/json.go deleted file mode 100644 index 345abf7199..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/json.go +++ /dev/null @@ -1,12 +0,0 @@ -package resource - -import ( - "bytes" - "encoding/json" -) - -func unmarshalJSON(data []byte, v interface{}) error { - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - return dec.Decode(v) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go deleted file mode 100644 index 4befdb353f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go +++ /dev/null @@ -1,140 +0,0 @@ -package resource - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// Map is a map of resources that are supported, and provides helpers for -// more easily implementing a ResourceProvider. -type Map struct { - Mapping map[string]Resource -} - -func (m *Map) Validate( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := m.Mapping[t] - if !ok { - return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} - } - - // If there is no validator set, then it is valid - if r.ConfigValidator == nil { - return nil, nil - } - - return r.ConfigValidator.Validate(c) -} - -// Apply performs a create or update depending on the diff, and calls -// the proper function on the matching Resource. -func (m *Map) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource if it is created - err := r.Destroy(s, meta) - if err != nil { - return s, err - } - - s.ID = "" - } - - // If we're only destroying, and not creating, then return now. - // Otherwise, we continue so that we can create a new resource. - if !d.RequiresNew() { - return nil, nil - } - } - - var result *terraform.InstanceState - var err error - if s.ID == "" { - result, err = r.Create(s, d, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf( - "Resource type '%s' doesn't support update", - info.Type) - } - - result, err = r.Update(s, d, meta) - } - if result != nil { - if result.Attributes == nil { - result.Attributes = make(map[string]string) - } - - result.Attributes["id"] = result.ID - } - - return result, err -} - -// Diff performs a diff on the proper resource type. -func (m *Map) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, meta) -} - -// Refresh performs a Refresh on the proper resource type. -// -// Refresh on the Resource won't be called if the state represents a -// non-created resource (ID is blank). -// -// An error is returned if the resource isn't registered. -func (m *Map) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the resource isn't created, don't refresh. - if s.ID == "" { - return s, nil - } - - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Refresh(s, meta) -} - -// Resources returns all the resources that are supported by this -// resource map and can be used to satisfy the Resources method of -// a ResourceProvider. -func (m *Map) Resources() []terraform.ResourceType { - ks := make([]string, 0, len(m.Mapping)) - for k := range m.Mapping { - ks = append(ks, k) - } - sort.Strings(ks) - - rs := make([]terraform.ResourceType, 0, len(m.Mapping)) - for _, k := range ks { - rs = append(rs, terraform.ResourceType{ - Name: k, - }) - } - - return rs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/plugin.go deleted file mode 100644 index 1e100ad09b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/plugin.go +++ /dev/null @@ -1,174 +0,0 @@ -package resource - -import ( - "context" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - "sync" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/terraform-exec/tfexec" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/logging" - grpcplugin "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/hashicorp/terraform-plugin-sdk/plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - tftest "github.com/hashicorp/terraform-plugin-test/v2" - testing "github.com/mitchellh/go-testing-interface" -) - -func runProviderCommand(t testing.T, f func() error, wd *tftest.WorkingDir, factories map[string]terraform.ResourceProviderFactory) error { - // don't point to this as a test failure location - // point to whatever called it - t.Helper() - - // for backwards compatibility, make this opt-in - if os.Getenv("TF_ACCTEST_REATTACH") != "1" { - log.Println("[DEBUG] TF_ACCTEST_REATTACH not set to 1, not using reattach-based testing") - return f() - } - if acctest.TestHelper == nil { - log.Println("[DEBUG] acctest.TestHelper is nil, assuming we're not using binary acceptance testing") - return f() - } - log.Println("[DEBUG] TF_ACCTEST_REATTACH set to 1 and acctest.TestHelper is not nil, using reattach-based testing") - - // Run the providers in the same process as the test runner using the - // reattach behavior in Terraform. This ensures we get test coverage - // and enables the use of delve as a debugger. - // - // This behavior is only available in Terraform 0.12.26 and later. - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // this is needed so Terraform doesn't default to expecting protocol 4; - // we're skipping the handshake because Terraform didn't launch the - // plugins. - os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") - - // Terraform 0.12.X and 0.13.X+ treat namespaceless providers - // differently in terms of what namespace they default to. So we're - // going to set both variations, as we don't know which version of - // Terraform we're talking to. We're also going to allow overriding - // the host or namespace using environment variables. - var namespaces []string - host := "registry.terraform.io" - if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { - namespaces = append(namespaces, v) - } else { - namespaces = append(namespaces, "-", "hashicorp") - } - if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { - host = v - } - - // Spin up gRPC servers for every provider factory, start a - // WaitGroup to listen for all of the close channels. - var wg sync.WaitGroup - reattachInfo := map[string]tfexec.ReattachConfig{} - for providerName, factory := range factories { - // providerName may be returned as terraform-provider-foo, and - // we need just foo. So let's fix that. - providerName = strings.TrimPrefix(providerName, "terraform-provider-") - - provider, err := factory() - if err != nil { - return fmt.Errorf("unable to create provider %q from factory: %v", providerName, err) - } - - // keep track of the running factory, so we can make sure it's - // shut down. - wg.Add(1) - - // configure the settings our plugin will be served with - // the GRPCProviderFunc wraps a non-gRPC provider server - // into a gRPC interface, and the logger just discards logs - // from go-plugin. - opts := &plugin.ServeOpts{ - GRPCProviderFunc: func() proto.ProviderServer { - return grpcplugin.NewGRPCProviderServerShim(provider) - }, - Logger: hclog.New(&hclog.LoggerOptions{ - Name: "plugintest", - Level: hclog.Trace, - Output: ioutil.Discard, - }), - } - - // let's actually start the provider server - config, closeCh, err := plugin.DebugServe(ctx, opts) - if err != nil { - return fmt.Errorf("unable to serve provider %q: %v", providerName, err) - } - - tfexecConfig := tfexec.ReattachConfig{ - Protocol: config.Protocol, - Pid: config.Pid, - Test: config.Test, - Addr: tfexec.ReattachConfigAddr{ - Network: config.Addr.Network, - String: config.Addr.String, - }, - } - - // plugin.DebugServe hijacks our log output location, so let's - // reset it - logging.SetTestOutput(t) - - // when the provider exits, remove one from the waitgroup - // so we can track when everything is done - go func(c <-chan struct{}) { - <-c - wg.Done() - }(closeCh) - - // set our provider's reattachinfo in our map, once - // for every namespace that different Terraform versions - // may expect. - for _, ns := range namespaces { - reattachInfo[strings.TrimSuffix(host, "/")+"/"+ - strings.TrimSuffix(ns, "/")+"/"+ - providerName] = tfexecConfig - } - } - - // set the working directory reattach info that will tell Terraform how - // to connect to our various running servers. - wd.SetReattachInfo(reattachInfo) - - // ok, let's call whatever Terraform command the test was trying to - // call, now that we know it'll attach back to those servers we just - // started. - err := f() - if err != nil { - log.Printf("[WARN] Got error running Terraform: %s", err) - } - - // cancel the servers so they'll return. Otherwise, this closeCh won't - // get closed, and we'll hang here. - cancel() - - // wait for the servers to actually shut down; it may take a moment for - // them to clean up, or whatever. - // TODO: add a timeout here? - // PC: do we need one? The test will time out automatically... - wg.Wait() - - // once we've run the Terraform command, let's remove the reattach - // information from the WorkingDir's environment. The WorkingDir will - // persist until the next call, but the server in the reattach info - // doesn't exist anymore at this point, so the reattach info is no - // longer valid. In theory it should be overwritten in the next call, - // but just to avoid any confusing bug reports, let's just unset the - // environment variable altogether. - wd.UnsetReattachInfo() - - // return any error returned from the orchestration code running - // Terraform commands - return err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go deleted file mode 100644 index 80782413b4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go +++ /dev/null @@ -1,49 +0,0 @@ -package resource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/config" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -type Resource struct { - ConfigValidator *config.Validator - Create CreateFunc - Destroy DestroyFunc - Diff DiffFunc - Refresh RefreshFunc - Update UpdateFunc -} - -// CreateFunc is a function that creates a resource that didn't previously -// exist. -type CreateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) - -// DestroyFunc is a function that destroys a resource that previously -// exists using the state. -type DestroyFunc func( - *terraform.InstanceState, - interface{}) error - -// DiffFunc is a function that performs a diff of a resource. -type DiffFunc func( - *terraform.InstanceState, - *terraform.ResourceConfig, - interface{}) (*terraform.InstanceDiff, error) - -// RefreshFunc is a function that performs a refresh of a specific type -// of resource. -type RefreshFunc func( - *terraform.InstanceState, - interface{}) (*terraform.InstanceState, error) - -// UpdateFunc is a function that is called to update a resource that -// previously existed. The difference between this and CreateFunc is that -// the diff is guaranteed to only contain attributes that don't require -// a new resource. -type UpdateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go deleted file mode 100644 index 88a839664c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go +++ /dev/null @@ -1,259 +0,0 @@ -package resource - -import ( - "log" - "time" -) - -var refreshGracePeriod = 30 * time.Second - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an EC2 instance after refreshing -// it. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, err error) - -// StateChangeConf is the configuration struct used for `WaitForState`. -type StateChangeConf struct { - Delay time.Duration // Wait this time before starting checks - Pending []string // States that are "allowed" and will continue trying - Refresh StateRefreshFunc // Refreshes the current state - Target []string // Target state - Timeout time.Duration // The amount of time to wait before timeout - MinTimeout time.Duration // Smallest time to wait before refreshes - PollInterval time.Duration // Override MinTimeout/backoff and only poll this often - NotFoundChecks int // Number of times to allow not found - - // This is to work around inconsistent APIs - ContinuousTargetOccurence int // Number of times the Target state has to occur continuously -} - -// WaitForState watches an object and waits for it to achieve the state -// specified in the configuration using the specified Refresh() func, -// waiting the number of seconds specified in the timeout configuration. -// -// If the Refresh function returns an error, exit immediately with that error. -// -// If the Refresh function returns a state other than the Target state or one -// listed in Pending, return immediately with an error. -// -// If the Timeout is exceeded before reaching the Target state, return an -// error. -// -// Otherwise, the result is the result of the first call to the Refresh function to -// reach the target state. -func (conf *StateChangeConf) WaitForState() (interface{}, error) { - log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) - - notfoundTick := 0 - targetOccurence := 0 - - // Set a default for times to check for not found - if conf.NotFoundChecks == 0 { - conf.NotFoundChecks = 20 - } - - if conf.ContinuousTargetOccurence == 0 { - conf.ContinuousTargetOccurence = 1 - } - - type Result struct { - Result interface{} - State string - Error error - Done bool - } - - // Read every result from the refresh loop, waiting for a positive result.Done. - resCh := make(chan Result, 1) - // cancellation channel for the refresh loop - cancelCh := make(chan struct{}) - - result := Result{} - - go func() { - defer close(resCh) - - time.Sleep(conf.Delay) - - // start with 0 delay for the first loop - var wait time.Duration - - for { - // store the last result - resCh <- result - - // wait and watch for cancellation - select { - case <-cancelCh: - return - case <-time.After(wait): - // first round had no wait - if wait == 0 { - wait = 100 * time.Millisecond - } - } - - res, currentState, err := conf.Refresh() - result = Result{ - Result: res, - State: currentState, - Error: err, - } - - if err != nil { - resCh <- result - return - } - - // If we're waiting for the absence of a thing, then return - if res == nil && len(conf.Target) == 0 { - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - - if res == nil { - // If we didn't find the resource, check if we have been - // not finding it for awhile, and if so, report an error. - notfoundTick++ - if notfoundTick > conf.NotFoundChecks { - result.Error = &NotFoundError{ - LastError: err, - Retries: notfoundTick, - } - resCh <- result - return - } - } else { - // Reset the counter for when a resource isn't found - notfoundTick = 0 - found := false - - for _, allowed := range conf.Target { - if currentState == allowed { - found = true - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - } - - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - targetOccurence = 0 - break - } - } - - if !found && len(conf.Pending) > 0 { - result.Error = &UnexpectedStateError{ - LastError: err, - State: result.State, - ExpectedState: conf.Target, - } - resCh <- result - return - } - } - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } - - // If a poll interval has been specified, choose that interval. - // Otherwise bound the default value. - if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { - wait = conf.PollInterval - } else { - if wait < conf.MinTimeout { - wait = conf.MinTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second - } - } - - log.Printf("[TRACE] Waiting %s before next try", wait) - } - }() - - // store the last value result from the refresh loop - lastResult := Result{} - - timeout := time.After(conf.Timeout) - for { - select { - case r, ok := <-resCh: - // channel closed, so return the last result - if !ok { - return lastResult.Result, lastResult.Error - } - - // we reached the intended state - if r.Done { - return r.Result, r.Error - } - - // still waiting, store the last result - lastResult = r - - case <-timeout: - log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) - log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) - - // cancel the goroutine and start our grace period timer - close(cancelCh) - timeout := time.After(refreshGracePeriod) - - // we need a for loop and a label to break on, because we may have - // an extra response value to read, but still want to wait for the - // channel to close. - forSelect: - for { - select { - case r, ok := <-resCh: - if r.Done { - // the last refresh loop reached the desired state - return r.Result, r.Error - } - - if !ok { - // the goroutine returned - break forSelect - } - - // target state not reached, save the result for the - // TimeoutError and wait for the channel to close - lastResult = r - case <-timeout: - log.Println("[ERROR] WaitForState exceeded refresh grace period") - break forSelect - } - } - - return nil, &TimeoutError{ - LastError: lastResult.Error, - LastState: lastResult.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go deleted file mode 100644 index ddca4ae3f0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go +++ /dev/null @@ -1,470 +0,0 @@ -package resource - -import ( - "encoding/json" - "fmt" - "strconv" - - tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/zclconf/go-cty/cty" -) - -// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests -func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { - state := terraform.NewState() - - // in the odd case of a nil state, let the helper packages handle it - if newState == nil { - return nil, nil - } - - for _, newMod := range newState.Modules { - mod := state.AddModule(newMod.Addr) - - for name, out := range newMod.OutputValues { - outputType := "" - val := hcl2shim.ConfigValueFromHCL2(out.Value) - ty := out.Value.Type() - switch { - case ty == cty.String: - outputType = "string" - case ty.IsTupleType() || ty.IsListType(): - outputType = "list" - case ty.IsMapType(): - outputType = "map" - } - - mod.Outputs[name] = &terraform.OutputState{ - Type: outputType, - Value: val, - Sensitive: out.Sensitive, - } - } - - for _, res := range newMod.Resources { - resType := res.Addr.Type - providerType := res.ProviderConfig.ProviderConfig.Type - - resource := getResource(providers, providerType, res.Addr) - - for key, i := range res.Instances { - resState := &terraform.ResourceState{ - Type: resType, - Provider: res.ProviderConfig.String(), - } - - // We should always have a Current instance here, but be safe about checking. - if i.Current != nil { - flatmap, err := shimmedAttributes(i.Current, resource) - if err != nil { - return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if i.Current.Private != nil { - err := json.Unmarshal(i.Current.Private, &meta) - if err != nil { - return nil, err - } - } - - resState.Primary = &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: i.Current.Status == states.ObjectTainted, - Meta: meta, - } - - if i.Current.SchemaVersion != 0 { - if resState.Primary.Meta == nil { - resState.Primary.Meta = map[string]interface{}{} - } - resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion - } - - for _, dep := range i.Current.Dependencies { - resState.Dependencies = append(resState.Dependencies, dep.String()) - } - - // convert the indexes to the old style flapmap indexes - idx := "" - switch key.(type) { - case addrs.IntKey: - // don't add numeric index values to resources with a count of 0 - if len(res.Instances) > 1 { - idx = fmt.Sprintf(".%d", key) - } - case addrs.StringKey: - idx = "." + key.String() - } - - mod.Resources[res.Addr.String()+idx] = resState - } - - // add any deposed instances - for _, dep := range i.Deposed { - flatmap, err := shimmedAttributes(dep, resource) - if err != nil { - return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if dep.Private != nil { - err := json.Unmarshal(dep.Private, &meta) - if err != nil { - return nil, err - } - } - - deposed := &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: dep.Status == states.ObjectTainted, - Meta: meta, - } - if dep.SchemaVersion != 0 { - deposed.Meta = map[string]interface{}{ - "schema_version": dep.SchemaVersion, - } - } - - resState.Deposed = append(resState.Deposed, deposed) - } - } - } - } - - return state, nil -} - -func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { - p := providers[providerName] - if p == nil { - panic(fmt.Sprintf("provider %q not found in test step", providerName)) - } - - // this is only for tests, so should only see schema.Providers - provider := p.(*schema.Provider) - - switch addr.Mode { - case addrs.ManagedResourceMode: - resource := provider.ResourcesMap[addr.Type] - if resource != nil { - return resource - } - case addrs.DataResourceMode: - resource := provider.DataSourcesMap[addr.Type] - if resource != nil { - return resource - } - } - - panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) -} - -func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { - flatmap := instance.AttrsFlat - if flatmap != nil { - return flatmap, nil - } - - // if we have json attrs, they need to be decoded - rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) - if err != nil { - return nil, err - } - - instanceState, err := res.ShimInstanceStateFromValue(rio.Value) - if err != nil { - return nil, err - } - - return instanceState.Attributes, nil -} - -type shimmedState struct { - state *terraform.State -} - -func shimStateFromJson(jsonState *tfjson.State) (*terraform.State, error) { - state := terraform.NewState() - state.TFVersion = jsonState.TerraformVersion - - if jsonState.Values == nil { - // the state is empty - return state, nil - } - - for key, output := range jsonState.Values.Outputs { - os, err := shimOutputState(output) - if err != nil { - return nil, err - } - state.RootModule().Outputs[key] = os - } - - ss := &shimmedState{state} - err := ss.shimStateModule(jsonState.Values.RootModule) - if err != nil { - return nil, err - } - - return state, nil -} - -func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { - os := &terraform.OutputState{ - Sensitive: so.Sensitive, - } - - switch v := so.Value.(type) { - case string: - os.Type = "string" - os.Value = v - return os, nil - case []interface{}: - os.Type = "list" - if len(v) == 0 { - os.Value = v - return os, nil - } - switch firstElem := v[0].(type) { - case string: - elements := make([]interface{}, len(v)) - for i, el := range v { - elements[i] = el.(string) - } - os.Value = elements - case bool: - elements := make([]interface{}, len(v)) - for i, el := range v { - elements[i] = el.(bool) - } - os.Value = elements - // unmarshalled number from JSON will always be json.Number - case json.Number: - elements := make([]interface{}, len(v)) - for i, el := range v { - elements[i] = el.(json.Number) - } - os.Value = elements - case []interface{}: - os.Value = v - case map[string]interface{}: - os.Value = v - default: - return nil, fmt.Errorf("unexpected output list element type: %T", firstElem) - } - return os, nil - case map[string]interface{}: - os.Type = "map" - os.Value = v - return os, nil - case bool: - os.Type = "string" - os.Value = strconv.FormatBool(v) - return os, nil - // unmarshalled number from JSON will always be json.Number - case json.Number: - os.Type = "string" - os.Value = v.String() - return os, nil - } - - return nil, fmt.Errorf("unexpected output type: %T", so.Value) -} - -func (ss *shimmedState) shimStateModule(sm *tfjson.StateModule) error { - var path addrs.ModuleInstance - - if sm.Address == "" { - path = addrs.RootModuleInstance - } else { - var diags tfdiags.Diagnostics - path, diags = addrs.ParseModuleInstanceStr(sm.Address) - if diags.HasErrors() { - return diags.Err() - } - } - - mod := ss.state.AddModule(path) - for _, res := range sm.Resources { - resourceState, err := shimResourceState(res) - if err != nil { - return err - } - - key, err := shimResourceStateKey(res) - if err != nil { - return err - } - - mod.Resources[key] = resourceState - } - - if len(sm.ChildModules) > 0 { - return fmt.Errorf("Modules are not supported. Found %d modules.", - len(sm.ChildModules)) - } - return nil -} - -func shimResourceStateKey(res *tfjson.StateResource) (string, error) { - if res.Index == nil { - return res.Address, nil - } - - var mode terraform.ResourceMode - switch res.Mode { - case tfjson.DataResourceMode: - mode = terraform.DataResourceMode - case tfjson.ManagedResourceMode: - mode = terraform.ManagedResourceMode - default: - return "", fmt.Errorf("unexpected resource mode for %q", res.Address) - } - - var index int - switch idx := res.Index.(type) { - case json.Number: - i, err := idx.Int64() - if err != nil { - return "", fmt.Errorf("unexpected index value (%q) for %q, ", - idx, res.Address) - } - index = int(i) - default: - return "", fmt.Errorf("unexpected index type (%T) for %q, "+ - "for_each is not supported", res.Index, res.Address) - } - - rsk := &terraform.ResourceStateKey{ - Mode: mode, - Type: res.Type, - Name: res.Name, - Index: index, - } - - return rsk.String(), nil -} - -func shimResourceState(res *tfjson.StateResource) (*terraform.ResourceState, error) { - sf := &shimmedFlatmap{} - err := sf.FromMap(res.AttributeValues) - if err != nil { - return nil, err - } - attributes := sf.Flatmap() - - if _, ok := attributes["id"]; !ok { - return nil, fmt.Errorf("no %q found in attributes", "id") - } - - return &terraform.ResourceState{ - Provider: res.ProviderName, - Type: res.Type, - Primary: &terraform.InstanceState{ - ID: attributes["id"], - Attributes: attributes, - Meta: map[string]interface{}{ - "schema_version": int(res.SchemaVersion), - }, - Tainted: res.Tainted, - }, - Dependencies: res.DependsOn, - }, nil -} - -type shimmedFlatmap struct { - m map[string]string -} - -func (sf *shimmedFlatmap) FromMap(attributes map[string]interface{}) error { - if sf.m == nil { - sf.m = make(map[string]string, len(attributes)) - } - - return sf.AddMap("", attributes) -} - -func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error { - for key, value := range m { - k := key - if prefix != "" { - k = fmt.Sprintf("%s.%s", prefix, key) - } - - err := sf.AddEntry(k, value) - if err != nil { - return err - } - } - - mapLength := "%" - if prefix != "" { - mapLength = fmt.Sprintf("%s.%s", prefix, "%") - } - - sf.AddEntry(mapLength, strconv.Itoa(len(m))) - - return nil -} - -func (sf *shimmedFlatmap) AddSlice(name string, elements []interface{}) error { - for i, elem := range elements { - key := fmt.Sprintf("%s.%d", name, i) - err := sf.AddEntry(key, elem) - if err != nil { - return err - } - } - - sliceLength := fmt.Sprintf("%s.#", name) - sf.AddEntry(sliceLength, strconv.Itoa(len(elements))) - - return nil -} - -func (sf *shimmedFlatmap) AddEntry(key string, value interface{}) error { - switch el := value.(type) { - case nil: - // omit the entry - return nil - case bool: - sf.m[key] = strconv.FormatBool(el) - case json.Number: - sf.m[key] = el.String() - case string: - sf.m[key] = el - case map[string]interface{}: - err := sf.AddMap(key, el) - if err != nil { - return err - } - case []interface{}: - err := sf.AddSlice(key, el) - if err != nil { - return err - } - default: - // This should never happen unless terraform-json - // changes how attributes (types) are represented. - // - // We handle all types which the JSON unmarshaler - // can possibly produce - // https://golang.org/pkg/encoding/json/#Unmarshal - - return fmt.Errorf("%q: unexpected type (%T)", key, el) - } - return nil -} - -func (sf *shimmedFlatmap) Flatmap() map[string]string { - return sf.m -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go deleted file mode 100644 index 20a1c9d14d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go +++ /dev/null @@ -1,1514 +0,0 @@ -package resource - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "regexp" - "strconv" - "strings" - "syscall" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/logutils" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/logging" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/command/format" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload" - "github.com/hashicorp/terraform-plugin-sdk/internal/initwd" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/mitchellh/colorstring" -) - -// flagSweep is a flag available when running tests on the command line. It -// contains a comma seperated list of regions to for the sweeper functions to -// run in. This flag bypasses the normal Test path and instead runs functions designed to -// clean up any leaked resources a testing environment could have created. It is -// a best effort attempt, and relies on Provider authors to implement "Sweeper" -// methods for resources. - -// Adding Sweeper methods with AddTestSweepers will -// construct a list of sweeper funcs to be called here. We iterate through -// regions provided by the sweep flag, and for each region we iterate through the -// tests, and exit on any errors. At time of writing, sweepers are ran -// sequentially, however they can list dependencies to be ran first. We track -// the sweepers that have been ran, so as to not run a sweeper twice for a given -// region. -// -// WARNING: -// Sweepers are designed to be destructive. You should not use the -sweep flag -// in any environment that is not strictly a test environment. Resources will be -// destroyed. - -var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") -var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") -var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") -var sweeperFuncs map[string]*Sweeper - -// type SweeperFunc is a signature for a function that acts as a sweeper. It -// accepts a string for the region that the sweeper is to be ran in. This -// function must be able to construct a valid client for that region. -type SweeperFunc func(r string) error - -type Sweeper struct { - // Name for sweeper. Must be unique to be ran by the Sweeper Runner - Name string - - // Dependencies list the const names of other Sweeper functions that must be ran - // prior to running this Sweeper. This is an ordered list that will be invoked - // recursively at the helper/resource level - Dependencies []string - - // Sweeper function that when invoked sweeps the Provider of specific - // resources - F SweeperFunc -} - -func init() { - sweeperFuncs = make(map[string]*Sweeper) -} - -// AddTestSweepers function adds a given name and Sweeper configuration -// pair to the internal sweeperFuncs map. Invoke this function to register a -// resource sweeper to be available for running when the -sweep flag is used -// with `go test`. Sweeper names must be unique to help ensure a given sweeper -// is only ran once per run. -func AddTestSweepers(name string, s *Sweeper) { - if _, ok := sweeperFuncs[name]; ok { - log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) - } - - sweeperFuncs[name] = s -} - -func TestMain(m *testing.M) { - flag.Parse() - if *flagSweep != "" { - // parse flagSweep contents for regions to run - regions := strings.Split(*flagSweep, ",") - - // get filtered list of sweepers to run based on sweep-run flag - sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) - - if _, err := runSweepers(regions, sweepers, *flagSweepAllowFailures); err != nil { - os.Exit(1) - } - } else { - exitCode := m.Run() - - if acctest.TestHelper != nil { - err := acctest.TestHelper.Close() - if err != nil { - log.Printf("Error cleaning up temporary test files: %s", err) - } - } - os.Exit(exitCode) - } -} - -func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures bool) (map[string]map[string]error, error) { - var sweeperErrorFound bool - sweeperRunList := make(map[string]map[string]error) - - for _, region := range regions { - region = strings.TrimSpace(region) - - var regionSweeperErrorFound bool - regionSweeperRunList := make(map[string]error) - - log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) - for _, sweeper := range sweepers { - if err := runSweeperWithRegion(region, sweeper, sweepers, regionSweeperRunList, allowFailures); err != nil { - if allowFailures { - continue - } - - sweeperRunList[region] = regionSweeperRunList - return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) - } - } - - log.Printf("Sweeper Tests ran successfully:\n") - for sweeper, sweeperErr := range regionSweeperRunList { - if sweeperErr == nil { - fmt.Printf("\t- %s\n", sweeper) - } else { - regionSweeperErrorFound = true - } - } - - if regionSweeperErrorFound { - sweeperErrorFound = true - log.Printf("Sweeper Tests ran unsuccessfully:\n") - for sweeper, sweeperErr := range regionSweeperRunList { - if sweeperErr != nil { - fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) - } - } - } - - sweeperRunList[region] = regionSweeperRunList - } - - if sweeperErrorFound { - return sweeperRunList, errors.New("at least one sweeper failed") - } - - return sweeperRunList, nil -} - -// filterSweepers takes a comma seperated string listing the names of sweepers -// to be ran, and returns a filtered set from the list of all of sweepers to -// run based on the names given. -func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { - filterSlice := strings.Split(strings.ToLower(f), ",") - if len(filterSlice) == 1 && filterSlice[0] == "" { - // if the filter slice is a single element of "" then no sweeper list was - // given, so just return the full list - return source - } - - sweepers := make(map[string]*Sweeper) - for name := range source { - for _, s := range filterSlice { - if strings.Contains(strings.ToLower(name), s) { - for foundName, foundSweeper := range filterSweeperWithDependencies(name, source) { - sweepers[foundName] = foundSweeper - } - } - } - } - return sweepers -} - -// filterSweeperWithDependencies recursively returns sweeper and all dependencies. -// Since filterSweepers performs fuzzy matching, this function is used -// to perform exact sweeper and dependency lookup. -func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[string]*Sweeper { - result := make(map[string]*Sweeper) - - currentSweeper, ok := source[name] - if !ok { - log.Printf("[WARN] Sweeper has dependency (%s), but that sweeper was not found", name) - return result - } - - result[name] = currentSweeper - - for _, dependency := range currentSweeper.Dependencies { - for foundName, foundSweeper := range filterSweeperWithDependencies(dependency, source) { - result[foundName] = foundSweeper - } - } - - return result -} - -// runSweeperWithRegion recieves a sweeper and a region, and recursively calls -// itself with that region for every dependency found for that sweeper. If there -// are no dependencies, invoke the contained sweeper fun with the region, and -// add the success/fail status to the sweeperRunList. -func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { - for _, dep := range s.Dependencies { - if depSweeper, ok := sweepers[dep]; ok { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) - err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) - - if err != nil { - if allowFailures { - log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) - continue - } - - return err - } - } else { - log.Printf("[WARN] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) - } - } - - if _, ok := sweeperRunList[s.Name]; ok { - log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) - return nil - } - - log.Printf("[DEBUG] Running Sweeper (%s) in region (%s)", s.Name, region) - - runE := s.F(region) - - sweeperRunList[s.Name] = runE - - if runE != nil { - log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", s.Name, region, runE) - } - - return runE -} - -const TestEnvVar = "TF_ACC" -const TestDisableBinaryTestingFlagEnvVar = "TF_DISABLE_BINARY_TESTING" - -// TestProvider can be implemented by any ResourceProvider to provide custom -// reset functionality at the start of an acceptance test. -// The helper/schema Provider implements this interface. -type TestProvider interface { - TestReset() error -} - -// TestCheckFunc is the callback type used with acceptance tests to check -// the state of a resource. The state passed in is the latest state known, -// or in the case of being after a destroy, it is the last known state when -// it was created. -type TestCheckFunc func(*terraform.State) error - -// ImportStateCheckFunc is the check function for ImportState tests -type ImportStateCheckFunc func([]*terraform.InstanceState) error - -// ImportStateIdFunc is an ID generation function to help with complex ID -// generation for ImportState tests. -type ImportStateIdFunc func(*terraform.State) (string, error) - -// TestCase is a single acceptance test case used to test the apply/destroy -// lifecycle of a resource in a specific configuration. -// -// When the destroy plan is executed, the config from the last TestStep -// is used to plan it. -type TestCase struct { - // IsUnitTest allows a test to run regardless of the TF_ACC - // environment variable. This should be used with care - only for - // fast tests on local resources (e.g. remote state with a local - // backend) but can be used to increase confidence in correct - // operation of Terraform without waiting for a full acctest run. - IsUnitTest bool - - // PreCheck, if non-nil, will be called before any test steps are - // executed. It will only be executed in the case that the steps - // would run, so it can be used for some validation before running - // acceptance tests, such as verifying that keys are setup. - PreCheck func() - - // Providers is the ResourceProvider that will be under test. - // - // Alternately, ProviderFactories can be specified for the providers - // that are valid. This takes priority over Providers. - // - // The end effect of each is the same: specifying the providers that - // are used within the tests. - Providers map[string]terraform.ResourceProvider - ProviderFactories map[string]terraform.ResourceProviderFactory - - // ExternalProviders are providers the TestCase relies on that should - // be downloaded from the registry during init. This is only really - // necessary to set if you're using import, as providers in your config - // will be automatically retrieved during init. Import doesn't always - // use a config, however, so we allow manually specifying them here to - // be downloaded for import tests. - // - // ExternalProviders will only be used when using binary acceptance - // testing in reattach mode. - ExternalProviders map[string]ExternalProvider - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // CheckDestroy is called after the resource is finally destroyed - // to allow the tester to test that the resource is truly gone. - CheckDestroy TestCheckFunc - - // Steps are the apply sequences done within the context of the - // same state. Each step can have its own check to verify correctness. - Steps []TestStep - - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. - // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string - IDRefreshIgnore []string - - // DisableBinaryDriver forces this test case to run using the legacy test - // driver, even if the binary test driver has been enabled. - // - // Deprecated: This property will be removed in version 2.0.0 of the SDK. - DisableBinaryDriver bool -} - -// ExternalProvider holds information about third-party providers that should -// be downloaded by Terraform as part of running the test step. -type ExternalProvider struct { - VersionConstraint string // the version constraint for the provider - Source string // the provider source -} - -// TestStep is a single apply sequence of a test, done within the -// context of a state. -// -// Multiple TestSteps can be sequenced in a Test to allow testing -// potentially complex update logic. In general, simply create/destroy -// tests will only need one step. -type TestStep struct { - // ResourceName should be set to the name of the resource - // that is being tested. Example: "aws_instance.foo". Various test - // modes use this to auto-detect state information. - // - // This is only required if the test mode settings below say it is - // for the mode you're using. - ResourceName string - - // PreConfig is called before the Config is applied to perform any per-step - // setup that needs to happen. This is called regardless of "test mode" - // below. - PreConfig func() - - // Taint is a list of resource addresses to taint prior to the execution of - // the step. Be sure to only include this at a step where the referenced - // address will be present in state, as it will fail the test if the resource - // is missing. - // - // This option is ignored on ImportState tests, and currently only works for - // resources in the root module path. - Taint []string - - //--------------------------------------------------------------- - // Test modes. One of the following groups of settings must be - // set to determine what the test step will do. Ideally we would've - // used Go interfaces here but there are now hundreds of tests we don't - // want to re-type so instead we just determine which step logic - // to run based on what settings below are set. - //--------------------------------------------------------------- - - //--------------------------------------------------------------- - // Plan, Apply testing - //--------------------------------------------------------------- - - // Config a string of the configuration to give to Terraform. If this - // is set, then the TestCase will execute this step with the same logic - // as a `terraform apply`. - Config string - - // Check is called after the Config is applied. Use this step to - // make your own API calls to check the status of things, and to - // inspect the format of the ResourceState itself. - // - // If an error is returned, the test will fail. In this case, a - // destroy plan will still be attempted. - // - // If this is nil, no check is done on this step. - Check TestCheckFunc - - // Destroy will create a destroy plan if set to true. - Destroy bool - - // ExpectNonEmptyPlan can be set to true for specific types of tests that are - // looking to verify that a diff occurs - ExpectNonEmptyPlan bool - - // ExpectError allows the construction of test cases that we expect to fail - // with an error. The specified regexp must match against the error for the - // test to pass. - ExpectError *regexp.Regexp - - // PlanOnly can be set to only run `plan` with this configuration, and not - // actually apply it. This is useful for ensuring config changes result in - // no-op plans - PlanOnly bool - - // PreventDiskCleanup can be set to true for testing terraform modules which - // require access to disk at runtime. Note that this will leave files in the - // temp folder - PreventDiskCleanup bool - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // SkipFunc is called before applying config, but after PreConfig - // This is useful for defining test steps with platform-dependent checks - SkipFunc func() (bool, error) - - //--------------------------------------------------------------- - // ImportState testing - //--------------------------------------------------------------- - - // ImportState, if true, will test the functionality of ImportState - // by importing the resource with ResourceName (must be set) and the - // ID of that resource. - ImportState bool - - // ImportStateId is the ID to perform an ImportState operation with. - // This is optional. If it isn't set, then the resource ID is automatically - // determined by inspecting the state for ResourceName's ID. - ImportStateId string - - // ImportStateIdPrefix is the prefix added in front of ImportStateId. - // This can be useful in complex import cases, where more than one - // attribute needs to be passed on as the Import ID. Mainly in cases - // where the ID is not known, and a known prefix needs to be added to - // the unset ImportStateId field. - ImportStateIdPrefix string - - // ImportStateIdFunc is a function that can be used to dynamically generate - // the ID for the ImportState tests. It is sent the state, which can be - // checked to derive the attributes necessary and generate the string in the - // desired format. - ImportStateIdFunc ImportStateIdFunc - - // ImportStateCheck checks the results of ImportState. It should be - // used to verify that the resulting value of ImportState has the - // proper resources, IDs, and attributes. - ImportStateCheck ImportStateCheckFunc - - // ImportStateVerify, if true, will also check that the state values - // that are finally put into the state after import match for all the - // IDs returned by the Import. Note that this checks for strict equality - // and does not respect DiffSuppressFunc or CustomizeDiff. - // - // ImportStateVerifyIgnore is a list of prefixes of fields that should - // not be verified to be equal. These can be set to ephemeral fields or - // fields that can't be refreshed and don't matter. - ImportStateVerify bool - ImportStateVerifyIgnore []string - - // provider s is used internally to maintain a reference to the - // underlying providers during the tests - providers map[string]terraform.ResourceProvider -} - -// Set to a file mask in sprintf format where %s is test name -const EnvLogPathMask = "TF_LOG_PATH_MASK" - -func LogOutput(t TestT) (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := logging.LogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - - if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { - // Escape special characters which may appear if we have subtests - testName := strings.Replace(t.Name(), "/", "__", -1) - - logPath := fmt.Sprintf(logPathMask, testName) - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - // This was the default since the beginning - logOutput = &logutils.LevelFilter{ - Levels: logging.ValidLevels, - MinLevel: logutils.LogLevel(logLevel), - Writer: logOutput, - } - - return -} - -// ParallelTest performs an acceptance test on a resource, allowing concurrency -// with other ParallelTest. -// -// Tests will fail if they do not properly handle conditions to allow multiple -// tests to occur against the same resource or service (e.g. random naming). -// All other requirements of the Test function also apply to this function. -func ParallelTest(t TestT, c TestCase) { - t.Parallel() - Test(t, c) -} - -// Test performs an acceptance test on a resource. -// -// Tests are not run unless an environmental variable "TF_ACC" is -// set to some non-empty value. This is to avoid test cases surprising -// a user by creating real resources. -// -// Tests will fail unless the verbose flag (`go test -v`, or explicitly -// the "-test.v" flag) is set. Because some acceptance tests take quite -// long, we require the verbose flag so users are able to see progress -// output. -func Test(t TestT, c TestCase) { - // We only run acceptance tests if an env var is set because they're - // slow and generally require some outside configuration. You can opt out - // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) - return - } - if v := os.Getenv(TestDisableBinaryTestingFlagEnvVar); v != "" { - b, err := strconv.ParseBool(v) - if err != nil { - t.Error(fmt.Errorf("Error parsing EnvVar %q value %q: %s", TestDisableBinaryTestingFlagEnvVar, v, err)) - } - - c.DisableBinaryDriver = b - } - - logWriter, err := LogOutput(t) - if err != nil { - t.Error(fmt.Errorf("error setting up logging: %s", err)) - } - log.SetOutput(logWriter) - - // We require verbose mode so that the user knows what is going on. - if !testTesting && !testing.Verbose() && !c.IsUnitTest { - t.Fatal("Acceptance tests must be run with the -v flag on tests") - } - - // get instances of all providers, so we can use the individual - // resources to shim the state during the tests. - providers := make(map[string]terraform.ResourceProvider) - for name, pf := range testProviderFactories(c) { - p, err := pf() - if err != nil { - t.Fatal(err) - } - providers[name] = p - } - - if acctest.TestHelper != nil && c.DisableBinaryDriver == false { - // auto-configure all providers - for _, p := range providers { - err = p.Configure(terraform.NewResourceConfigRaw(nil)) - if err != nil { - t.Fatal(err) - } - } - - // Run the PreCheck if we have it. - // This is done after the auto-configure to allow providers - // to override the default auto-configure parameters. - if c.PreCheck != nil { - c.PreCheck() - } - - // inject providers for ImportStateVerify - RunNewTest(t.(*testing.T), c, providers) - return - } else { - // run the PreCheck if we have it - if c.PreCheck != nil { - c.PreCheck() - } - } - - providerResolver, err := testProviderResolver(c) - if err != nil { - t.Fatal(err) - } - - opts := terraform.ContextOpts{ProviderResolver: providerResolver} - - // A single state variable to track the lifecycle, starting with no state - var state *terraform.State - - // Go through each step and run it - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - errored := false - for i, step := range c.Steps { - // insert the providers into the step so we can get the resources for - // shimming the state - step.providers = providers - - var err error - log.Printf("[DEBUG] Test: Executing step %d", i) - - if step.SkipFunc != nil { - skip, err := step.SkipFunc() - if err != nil { - t.Fatal(err) - } - if skip { - log.Printf("[WARN] Skipping step %d", i) - continue - } - } - - if step.Config == "" && !step.ImportState { - err = fmt.Errorf( - "unknown test mode for step. Please see TestStep docs\n\n%#v", - step) - } else { - if step.ImportState { - if step.Config == "" { - step.Config, err = testProviderConfig(c) - if err != nil { - t.Fatal("Error setting config for providers: " + err.Error()) - } - } - - // Can optionally set step.Config in addition to - // step.ImportState, to provide config for the import. - state, err = testStepImportState(opts, state, step) - } else { - state, err = testStepConfig(opts, state, step) - } - } - - // If we expected an error, but did not get one, fail - if err == nil && step.ExpectError != nil { - errored = true - t.Error(fmt.Sprintf( - "Step %d, no error received, but expected a match to:\n\n%s\n\n", - i, step.ExpectError)) - break - } - - // If there was an error, exit - if err != nil { - // Perhaps we expected an error? Check if it matches - if step.ExpectError != nil { - if !step.ExpectError.MatchString(err.Error()) { - errored = true - t.Error(fmt.Sprintf( - "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", - i, err, step.ExpectError)) - break - } - } else { - errored = true - t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) - break - } - } - - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - log.Printf( - "[WARN] Test: Running ID-only refresh check on %s", - idRefreshCheck.Primary.ID) - if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { - log.Printf("[ERROR] Test: ID-only test failed: %s", err) - t.Error(fmt.Sprintf( - "[ERROR] Test: ID-only test failed: %s", err)) - break - } - } - } - } - - // If we never checked an id-only refresh, it is a failure. - if idRefresh { - if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { - t.Error("ID-only refresh check never ran.") - } - } - - // If we have a state, then run the destroy - if state != nil { - lastStep := c.Steps[len(c.Steps)-1] - destroyStep := TestStep{ - Config: lastStep.Config, - Check: c.CheckDestroy, - Destroy: true, - PreventDiskCleanup: lastStep.PreventDiskCleanup, - PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, - providers: providers, - } - - log.Printf("[WARN] Test: Executing destroy step") - state, err := testStep(opts, state, destroyStep) - if err != nil { - t.Error(fmt.Sprintf( - "Error destroying resource! WARNING: Dangling resources\n"+ - "may exist. The full state and error is shown below.\n\n"+ - "Error: %s\n\nState: %s", - err, - state)) - } - } else { - log.Printf("[WARN] Skipping destroy test since there is no state.") - } -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) (string, error) { - var lines []string - var requiredProviders []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - for p, v := range c.ExternalProviders { - if _, ok := c.Providers[p]; ok { - return "", fmt.Errorf("Provider %q set in both Providers and ExternalProviders for TestCase. Must be set in only one.", p) - } - if _, ok := c.ProviderFactories[p]; ok { - return "", fmt.Errorf("Provider %q set in both ProviderFactories and ExternalProviders for TestCase. Must be set in only one.", p) - } - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - var providerBlock string - if v.VersionConstraint != "" { - providerBlock = fmt.Sprintf("%s\nversion = %q", providerBlock, v.VersionConstraint) - } - if v.Source != "" { - providerBlock = fmt.Sprintf("%s\nsource = %q", providerBlock, v.Source) - } - if providerBlock != "" { - providerBlock = fmt.Sprintf("%s = {%s\n}\n", p, providerBlock) - } - requiredProviders = append(requiredProviders, providerBlock) - } - - if len(requiredProviders) > 0 { - lines = append([]string{fmt.Sprintf("terraform {\nrequired_providers {\n%s}\n}\n\n", strings.Join(requiredProviders, ""))}, lines...) - } - - return strings.Join(lines, ""), nil -} - -// testProviderFactories combines the fixed Providers and -// ResourceProviderFactory functions into a single map of -// ResourceProviderFactory functions. -func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { - ctxProviders := make(map[string]terraform.ResourceProviderFactory) - for k, pf := range c.ProviderFactories { - ctxProviders[k] = pf - } - - // add any fixed providers - for k, p := range c.Providers { - ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) - } - return ctxProviders -} - -// testProviderResolver is a helper to build a ResourceProviderResolver -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderResolver(c TestCase) (providers.Resolver, error) { - ctxProviders := testProviderFactories(c) - - // wrap the old provider factories in the test grpc server so they can be - // called from terraform. - newProviders := make(map[string]providers.Factory) - - for k, pf := range ctxProviders { - factory := pf // must copy to ensure each closure sees its own value - newProviders[k] = func() (providers.Interface, error) { - p, err := factory() - if err != nil { - return nil, err - } - - // The provider is wrapped in a GRPCTestProvider so that it can be - // passed back to terraform core as a providers.Interface, rather - // than the legacy ResourceProvider. - return GRPCTestProvider(p), nil - } - } - - return providers.ResolverFixed(newProviders), nil -} - -// UnitTest is a helper to force the acceptance testing harness to run in the -// normal unit test suite. This should only be used for resource that don't -// have any external dependencies. -func UnitTest(t TestT, c TestCase) { - c.IsUnitTest = true - Test(t, c) -} - -func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { - // TODO: We guard by this right now so master doesn't explode. We - // need to remove this eventually to make this part of the normal tests. - if os.Getenv("TF_ACC_IDONLY") == "" { - return nil - } - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: r.Type, - Name: "foo", - }.Instance(addrs.NoKey) - absAddr := addr.Absolute(addrs.RootModuleInstance) - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := states.NewState() - state.RootModule().SetResourceInstanceCurrent( - addr, - &states.ResourceInstanceObjectSrc{ - AttrsFlat: r.Primary.Attributes, - Status: states.ObjectReady, - }, - addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance), - ) - - // Create the config module. We use the full config because Refresh - // doesn't have access to it and we may need things like provider - // configurations. The initial implementation of id-only checks used - // an empty config module, but that caused the aforementioned problems. - cfg, err := testConfig(opts, step) - if err != nil { - return err - } - - // Initialize the context - opts.Config = cfg - opts.State = state - ctx, ctxDiags := terraform.NewContext(&opts) - if ctxDiags.HasErrors() { - return ctxDiags.Err() - } - if diags := ctx.Validate(); len(diags) > 0 { - if diags.HasErrors() { - return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) - } - - // Refresh! - state, refreshDiags := ctx.Refresh() - if refreshDiags.HasErrors() { - return refreshDiags.Err() - } - - // Verify attribute equivalence. - actualR := state.ResourceInstance(absAddr) - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Current == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Current.AttrsFlat - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} - -func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { - if step.PreConfig != nil { - step.PreConfig() - } - - cfgPath, err := ioutil.TempDir("", "tf-test") - if err != nil { - return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) - } - - if step.PreventDiskCleanup { - log.Printf("[INFO] Skipping defer os.RemoveAll call") - } else { - defer os.RemoveAll(cfgPath) - } - - // Write the main configuration file - err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating temporary file for config: %s", err) - } - - // Create directory for our child modules, if any. - modulesDir := filepath.Join(cfgPath, ".modules") - err = os.Mkdir(modulesDir, os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating child modules directory: %s", err) - } - - inst := initwd.NewModuleInstaller(modulesDir, nil) - _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) - if installDiags.HasErrors() { - return nil, installDiags.Err() - } - - loader, err := configload.NewLoader(&configload.Config{ - ModulesDir: modulesDir, - }) - if err != nil { - return nil, fmt.Errorf("failed to create config loader: %s", err) - } - - config, configDiags := loader.LoadConfig(cfgPath) - if configDiags.HasErrors() { - return nil, configDiags - } - - return config, nil -} - -func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.ResourceName]; ok { - return v, nil - } - } - } - - return nil, fmt.Errorf( - "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) -} - -// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - for i, f := range fs { - if err := f(s); err != nil { - return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) - } - } - - return nil - } -} - -// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -// -// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the -// TestCheckFuncs and aggregates failures. -func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - var result *multierror.Error - - for i, f := range fs { - if err := f(s); err != nil { - result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) - } - } - - return result.ErrorOrNil() - } -} - -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. -func TestCheckResourceAttrSet(name, key string) TestCheckFunc { - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - }) -} - -// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with -// support for non-root modules -func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - }) -} - -func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { - if val, ok := is.Attributes[key]; !ok || val == "" { - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) - } - - return nil -} - -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. -func TestCheckResourceAttr(name, key, value string) TestCheckFunc { - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - }) -} - -// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with -// support for non-root modules -func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - }) -} - -func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { - // Empty containers may be elided from the state. - // If the intent here is to check for an empty container, allow the key to - // also be non-existent. - emptyCheck := false - if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - emptyCheck = true - } - - if v, ok := is.Attributes[key]; !ok || v != value { - if emptyCheck && !ok { - return nil - } - - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } - - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) - } - return nil -} - -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. -func TestCheckNoResourceAttr(name, key string) TestCheckFunc { - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - }) -} - -// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with -// support for non-root modules -func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - }) -} - -func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { - // Empty containers may sometimes be included in the state. - // If the intent here is to check for an empty container, allow the value to - // also be "0". - emptyCheck := false - if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { - emptyCheck = true - } - - val, exists := is.Attributes[key] - if emptyCheck && val == "0" { - return nil - } - - if exists { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) - } - - return nil -} - -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. -func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - }) -} - -// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with -// support for non-root modules -func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - }) -} - -func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) - } - - return nil -} - -// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the -// value is a pointer so that it can be updated while the test is running. -// It will only be dereferenced at the point this step is run. -func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckResourceAttr(name, key, *value)(s) - } -} - -// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with -// support for non-root modules -func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckModuleResourceAttr(mp, name, key, *value)(s) - } -} - -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. -func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { - return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { - isFirst, err := primaryInstanceState(s, nameFirst) - if err != nil { - return err - } - - isSecond, err := primaryInstanceState(s, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - }) -} - -// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with -// support for non-root modules -func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { - mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() - mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() - return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { - isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) - if err != nil { - return err - } - - isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - }) -} - -func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { - vFirst, okFirst := isFirst.Attributes[keyFirst] - vSecond, okSecond := isSecond.Attributes[keySecond] - - // Container count values of 0 should not be relied upon, and not reliably - // maintained by helper/schema. For the purpose of tests, consider unset and - // 0 to be equal. - if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && - (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { - // they have the same suffix, and it is a collection count key. - if vFirst == "0" || vFirst == "" { - okFirst = false - } - if vSecond == "0" || vSecond == "" { - okSecond = false - } - } - - if okFirst != okSecond { - if !okFirst { - return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) - } - return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) - } - if !(okFirst || okSecond) { - // If they both don't exist then they are equally unset, so that's okay. - return nil - } - - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) - } - - return nil -} - -// TestCheckOutput checks an output in the Terraform configuration -func TestCheckOutput(name, value string) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Value != value { - return fmt.Errorf( - "Output '%s': expected %#v, got %#v", - name, - value, - rs) - } - - return nil - } -} - -func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if !r.MatchString(rs.Value.(string)) { - return fmt.Errorf( - "Output '%s': %#v didn't match %q", - name, - rs, - r.String()) - } - - return nil - } -} - -// TestT is the interface used to handle the test lifecycle of a test. -// -// Users should just use a *testing.T object, which implements this. -type TestT interface { - Error(args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) - Name() string - Parallel() -} - -// This is set to true by unit tests to alter some behavior -var testTesting = false - -// modulePrimaryInstanceState returns the instance state for the given resource -// name in a ModuleState -func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { - rs, ok := ms.Resources[name] - if !ok { - return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) - } - - is := rs.Primary - if is == nil { - return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) - } - - return is, nil -} - -// modulePathPrimaryInstanceState returns the primary instance state for the -// given resource name in a given module path. -func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { - ms := s.ModuleByPath(mp) - if ms == nil { - return nil, fmt.Errorf("No module found at: %s", mp) - } - - return modulePrimaryInstanceState(s, ms, name) -} - -// primaryInstanceState returns the primary instance state for the given -// resource name in the root module. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() - return modulePrimaryInstanceState(s, ms, name) -} - -// operationError is a specialized implementation of error used to describe -// failures during one of the several operations performed for a particular -// test case. -type operationError struct { - OpName string - Diags tfdiags.Diagnostics -} - -func newOperationError(opName string, diags tfdiags.Diagnostics) error { - return operationError{opName, diags} -} - -// Error returns a terse error string containing just the basic diagnostic -// messages, for situations where normal Go error behavior is appropriate. -func (err operationError) Error() string { - return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) -} - -// ErrorDetail is like Error except it includes verbosely-rendered diagnostics -// similar to what would come from a normal Terraform run, which include -// additional context not included in Error(). -func (err operationError) ErrorDetail() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "errors during %s:", err.OpName) - clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} - for _, diag := range err.Diags { - diagStr := format.Diagnostic(diag, nil, clr, 78) - buf.WriteByte('\n') - buf.WriteString(diagStr) - } - return buf.String() -} - -// detailedErrorMessage is a helper for calling ErrorDetail on an error if -// it is an operationError or just taking Error otherwise. -func detailedErrorMessage(err error) string { - switch tErr := err.(type) { - case operationError: - return tErr.ErrorDetail() - default: - return err.Error() - } -} - -// indexesIntoTypeSet is a heuristic to try and identify if a flatmap style -// string address uses a precalculated TypeSet hash, which are integers and -// typically are large and obviously not a list index -func indexesIntoTypeSet(key string) bool { - for _, part := range strings.Split(key, ".") { - if i, err := strconv.Atoi(part); err == nil && i > 100 { - return true - } - } - return false -} - -func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - err := f(s) - if err != nil && s.IsBinaryDrivenTest && indexesIntoTypeSet(key) { - return fmt.Errorf("Error in test check: %s\nTest check address %q likely indexes into TypeSet\nThis is not possible in V1 of the SDK while using the binary driver\nPlease disable the driver for this TestCase with DisableBinaryDriver: true", err, key) - } - return err - } -} - -func checkIfIndexesIntoTypeSetPair(keyFirst, keySecond string, f TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - err := f(s) - if err != nil && s.IsBinaryDrivenTest && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { - return fmt.Errorf("Error in test check: %s\nTest check address %q or %q likely indexes into TypeSet\nThis is not possible in V1 of the SDK while using the binary driver\nPlease disable the driver for this TestCase with DisableBinaryDriver: true", err, keyFirst, keySecond) - } - return err - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go deleted file mode 100644 index e21525de86..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go +++ /dev/null @@ -1,404 +0,0 @@ -package resource - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "log" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// testStepConfig runs a config-mode test step -func testStepConfig( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - return testStep(opts, state, step) -} - -func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { - if !step.Destroy { - if err := testStepTaint(state, step); err != nil { - return state, err - } - } - - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - var stepDiags tfdiags.Diagnostics - - // Build the context - opts.Config = cfg - opts.State, err = terraform.ShimLegacyState(state) - if err != nil { - return nil, err - } - - opts.Destroy = step.Destroy - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) - } - if stepDiags := ctx.Validate(); len(stepDiags) > 0 { - if stepDiags.HasErrors() { - return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", stepDiags) - } - - // Refresh! - newState, stepDiags := ctx.Refresh() - // shim the state first so the test can check the state on errors - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("refresh", stepDiags) - } - - // If this step is a PlanOnly step, skip over this first Plan and subsequent - // Apply, and use the follow up Plan that checks for perpetual diffs - if !step.PlanOnly { - // Plan! - if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("plan", stepDiags) - } else { - log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) - } - - // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behavior in the check - // function - stateBeforeApplication := state.DeepCopy() - - // Apply the diff, creating real resources. - newState, stepDiags = ctx.Apply() - // shim the state first so the test can check the state on errors - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("apply", stepDiags) - } - - // Run any configured checks - if step.Check != nil { - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Now, verify that Plan is now empty and we don't have a perpetual diff issue - // We do this with TWO plans. One without a refresh. - var p *plans.Plan - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("follow-up plan", stepDiags) - } - if !p.Changes.Empty() { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // And another after a Refresh. - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - newState, stepDiags = ctx.Refresh() - if stepDiags.HasErrors() { - return state, newOperationError("follow-up refresh", stepDiags) - } - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - } - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("second follow-up refresh", stepDiags) - } - empty := p.Changes.Empty() - - // Data resources are tricky because they legitimately get instantiated - // during refresh so that they will be already populated during the - // plan walk. Because of this, if we have any data resources in the - // config we'll end up wanting to destroy them again here. This is - // acceptable and expected, and we'll treat it as "empty" for the - // sake of this testing. - if step.Destroy && !empty { - empty = true - for _, change := range p.Changes.Resources { - if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { - empty = false - break - } - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && empty { - return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // Made it here? Good job test step! - return state, nil -} - -// legacyPlanComparisonString produces a string representation of the changes -// from a plan and a given state togther, as was formerly produced by the -// String method of terraform.Plan. -// -// This is here only for compatibility with existing tests that predate our -// new plan and state types, and should not be used in new tests. Instead, use -// a library like "cmp" to do a deep equality and diff on the two -// data structures. -func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { - return fmt.Sprintf( - "DIFF:\n\n%s\n\nSTATE:\n\n%s", - legacyDiffComparisonString(changes), - state.String(), - ) -} - -// legacyDiffComparisonString produces a string representation of the changes -// from a planned changes object, as was formerly produced by the String method -// of terraform.Diff. -// -// This is here only for compatibility with existing tests that predate our -// new plan types, and should not be used in new tests. Instead, use a library -// like "cmp" to do a deep equality check and diff on the two data structures. -func legacyDiffComparisonString(changes *plans.Changes) string { - // The old string representation of a plan was grouped by module, but - // our new plan structure is not grouped in that way and so we'll need - // to preprocess it in order to produce that grouping. - type ResourceChanges struct { - Current *plans.ResourceInstanceChangeSrc - Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc - } - byModule := map[string]map[string]*ResourceChanges{} - resourceKeys := map[string][]string{} - requiresReplace := map[string][]string{} - var moduleKeys []string - for _, rc := range changes.Resources { - if rc.Action == plans.NoOp { - // We won't mention no-op changes here at all, since the old plan - // model we are emulating here didn't have such a concept. - continue - } - moduleKey := rc.Addr.Module.String() - if _, exists := byModule[moduleKey]; !exists { - moduleKeys = append(moduleKeys, moduleKey) - byModule[moduleKey] = make(map[string]*ResourceChanges) - } - resourceKey := rc.Addr.Resource.String() - if _, exists := byModule[moduleKey][resourceKey]; !exists { - resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) - byModule[moduleKey][resourceKey] = &ResourceChanges{ - Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), - } - } - - if rc.DeposedKey == states.NotDeposed { - byModule[moduleKey][resourceKey].Current = rc - } else { - byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc - } - - rr := []string{} - for _, p := range rc.RequiredReplace.List() { - rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) - } - requiresReplace[resourceKey] = rr - } - sort.Strings(moduleKeys) - for _, ks := range resourceKeys { - sort.Strings(ks) - } - - var buf bytes.Buffer - - for _, moduleKey := range moduleKeys { - rcs := byModule[moduleKey] - var mBuf bytes.Buffer - - for _, resourceKey := range resourceKeys[moduleKey] { - rc := rcs[resourceKey] - - forceNewAttrs := requiresReplace[resourceKey] - - crud := "UPDATE" - if rc.Current != nil { - switch rc.Current.Action { - case plans.DeleteThenCreate: - crud = "DESTROY/CREATE" - case plans.CreateThenDelete: - crud = "CREATE/DESTROY" - case plans.Delete: - crud = "DESTROY" - case plans.Create: - crud = "CREATE" - } - } else { - // We must be working on a deposed object then, in which - // case destroying is the only possible action. - crud = "DESTROY" - } - - extra := "" - if rc.Current == nil && len(rc.Deposed) > 0 { - extra = " (deposed only)" - } - - fmt.Fprintf( - &mBuf, "%s: %s%s\n", - crud, resourceKey, extra, - ) - - attrNames := map[string]bool{} - var oldAttrs map[string]string - var newAttrs map[string]string - if rc.Current != nil { - if before := rc.Current.Before; before != nil { - ty, err := before.ImpliedType() - if err == nil { - val, err := before.Decode(ty) - if err == nil { - oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range oldAttrs { - attrNames[k] = true - } - } - } - } - if after := rc.Current.After; after != nil { - ty, err := after.ImpliedType() - if err == nil { - val, err := after.Decode(ty) - if err == nil { - newAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range newAttrs { - attrNames[k] = true - } - } - } - } - } - if oldAttrs == nil { - oldAttrs = make(map[string]string) - } - if newAttrs == nil { - newAttrs = make(map[string]string) - } - - attrNamesOrder := make([]string, 0, len(attrNames)) - keyLen := 0 - for n := range attrNames { - attrNamesOrder = append(attrNamesOrder, n) - if len(n) > keyLen { - keyLen = len(n) - } - } - sort.Strings(attrNamesOrder) - - for _, attrK := range attrNamesOrder { - v := newAttrs[attrK] - u := oldAttrs[attrK] - - if v == hcl2shim.UnknownVariableValue { - v = "" - } - // NOTE: we don't support here because we would - // need schema to do that. Excluding sensitive values - // is now done at the UI layer, and so should not be tested - // at the core layer. - - updateMsg := "" - - // This may not be as precise as in the old diff, as it matches - // everything under the attribute that was originally marked as - // ForceNew, but should help make it easier to determine what - // caused replacement here. - for _, k := range forceNewAttrs { - if strings.HasPrefix(attrK, k) { - updateMsg = " (forces new resource)" - break - } - } - - fmt.Fprintf( - &mBuf, " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, v, - updateMsg, - ) - } - } - - if moduleKey == "" { // root module - buf.Write(mBuf.Bytes()) - buf.WriteByte('\n') - continue - } - - fmt.Fprintf(&buf, "%s:\n", moduleKey) - s := bufio.NewScanner(&mBuf) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return buf.String() -} - -func testStepTaint(state *terraform.State, step TestStep) error { - for _, p := range step.Taint { - m := state.RootModule() - if m == nil { - return errors.New("no state") - } - rs, ok := m.Resources[p] - if !ok { - return fmt.Errorf("resource %q not found in state", p) - } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) - rs.Taint() - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go deleted file mode 100644 index 7b7c30a7a2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go +++ /dev/null @@ -1,233 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// testStepImportState runs an imort state test step -func testStepImportState( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - - // Determine the ID to import - var importId string - switch { - case step.ImportStateIdFunc != nil: - var err error - importId, err = step.ImportStateIdFunc(state) - if err != nil { - return state, err - } - case step.ImportStateId != "": - importId = step.ImportStateId - default: - resource, err := testResource(step, state) - if err != nil { - return state, err - } - importId = resource.Primary.ID - } - - importPrefix := step.ImportStateIdPrefix - if importPrefix != "" { - importId = fmt.Sprintf("%s%s", importPrefix, importId) - } - - // Setup the context. We initialize with an empty state. We use the - // full config for provider configurations. - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - opts.Config = cfg - - // import tests start with empty state - opts.State = states.NewState() - - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, stepDiags.Err() - } - - // The test step provides the resource address as a string, so we need - // to parse it to get an addrs.AbsResourceAddress to pass in to the - // import method. - traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) - if hclDiags.HasErrors() { - return nil, hclDiags - } - importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) - if stepDiags.HasErrors() { - return nil, stepDiags.Err() - } - - // Do the import - importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ - // Set the module so that any provider config is loaded - Config: cfg, - - Targets: []*terraform.ImportTarget{ - { - Addr: importAddr, - ID: importId, - }, - }, - }) - if stepDiags.HasErrors() { - log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) - return state, stepDiags.Err() - } - - newState, err := shimNewState(importedState, step.providers) - if err != nil { - return nil, err - } - - // Go through the new state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range newState.RootModule().Resources { - if r.Primary != nil { - is := r.Primary.DeepCopy() - is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type - states = append(states, is) - } - } - if err := step.ImportStateCheck(states); err != nil { - return state, err - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := newState.RootModule().Resources - old := state.RootModule().Resources - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for _, r2 := range old { - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { - oldR = r2 - break - } - } - if oldR == nil { - return state, fmt.Errorf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - var rsrcSchema *schema.Resource - if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { - providerType := providerAddr.ProviderConfig.Type - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - } - - // don't add empty flatmapped containers, so we can more easily - // compare the attributes - skipEmpty := func(k, v string) bool { - if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { - if v == "0" { - return true - } - } - return false - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - actual[k] = v - } - - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return state, fmt.Errorf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - // Return the old state (non-imported) so we don't change anything. - return state, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new.go deleted file mode 100644 index 30e210bb32..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new.go +++ /dev/null @@ -1,246 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - tftest "github.com/hashicorp/terraform-plugin-test/v2" -) - -func runPostTestDestroy(t *testing.T, c TestCase, wd *tftest.WorkingDir, factories map[string]terraform.ResourceProviderFactory, statePreDestroy *terraform.State) error { - t.Helper() - - err := runProviderCommand(t, func() error { - wd.RequireDestroy(t) - return nil - }, wd, factories) - if err != nil { - return err - } - - if c.CheckDestroy != nil { - if err := c.CheckDestroy(statePreDestroy); err != nil { - return err - } - } - - return nil -} - -func RunNewTest(t *testing.T, c TestCase, providers map[string]terraform.ResourceProvider) { - t.Helper() - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - wd := acctest.TestHelper.RequireNewWorkingDir(t) - - defer func() { - var statePreDestroy *terraform.State - err := runProviderCommand(t, func() error { - statePreDestroy = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - t.Fatalf("Error retrieving state, there may be dangling resources: %s", err.Error()) - return - } - - if !stateIsEmpty(statePreDestroy) { - err := runPostTestDestroy(t, c, wd, c.ProviderFactories, statePreDestroy) - if err != nil { - t.Fatalf("Error running post-test destroy, there may be dangling resources: %s", err.Error()) - } - } - - wd.Close() - }() - - providerCfg, err := testProviderConfig(c) - if err != nil { - t.Fatal(err) - } - - wd.RequireSetConfig(t, providerCfg) - - err = runProviderCommand(t, func() error { - return wd.Init() - }, wd, c.ProviderFactories) - if err != nil { - t.Fatalf("Error running init: %s", err.Error()) - return - } - - // use this to track last step succesfully applied - // acts as default for import tests - var appliedCfg string - - for i, step := range c.Steps { - - if step.PreConfig != nil { - step.PreConfig() - } - - if step.SkipFunc != nil { - skip, err := step.SkipFunc() - if err != nil { - t.Fatal(err) - } - if skip { - log.Printf("[WARN] Skipping step %d/%d", i+1, len(c.Steps)) - continue - } - } - - if step.ImportState { - step.providers = providers - err := testStepNewImportState(t, c, wd, step, appliedCfg) - if step.ExpectError != nil { - if err == nil { - t.Fatalf("Step %d/%d error running import: expected an error but got none", i+1, len(c.Steps)) - } - if !step.ExpectError.MatchString(err.Error()) { - t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", i+1, len(c.Steps), step.ExpectError.String(), err) - } - } else { - if err != nil { - t.Fatalf("Step %d/%d error running import: %s", i+1, len(c.Steps), err) - } - } - continue - } - - if step.Config != "" { - err := testStepNewConfig(t, c, wd, step) - if step.ExpectError != nil { - if err == nil { - t.Fatalf("Step %d/%d, expected an error but got none", i+1, len(c.Steps)) - } - if !step.ExpectError.MatchString(err.Error()) { - t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) - } - } else { - if err != nil { - t.Fatalf("Step %d/%d error: %s", i+1, len(c.Steps), err) - } - } - appliedCfg = step.Config - continue - } - - t.Fatal("Unsupported test mode") - } -} - -func getState(t *testing.T, wd *tftest.WorkingDir) *terraform.State { - t.Helper() - - jsonState := wd.RequireState(t) - state, err := shimStateFromJson(jsonState) - if err != nil { - t.Fatal(err) - } - return state -} - -func stateIsEmpty(state *terraform.State) bool { - return state.Empty() || !state.HasResources() -} - -func planIsEmpty(plan *tfjson.Plan) bool { - for _, rc := range plan.ResourceChanges { - if rc.Mode == tfjson.DataResourceMode { - // Skip data sources as the current implementation ignores - // existing state and they are all re-read every time - continue - } - - for _, a := range rc.Change.Actions { - if a != tfjson.ActionNoop { - return false - } - } - } - return true -} - -func testIDRefresh(c TestCase, t *testing.T, wd *tftest.WorkingDir, step TestStep, r *terraform.ResourceState) error { - t.Helper() - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := terraform.NewState() - state.RootModule().Resources = make(map[string]*terraform.ResourceState) - state.RootModule().Resources[c.IDRefreshName] = &terraform.ResourceState{} - - // Temporarily set the config to a minimal provider config for the refresh - // test. After the refresh we can reset it. - cfg, err := testProviderConfig(c) - if err != nil { - return err - } - wd.RequireSetConfig(t, cfg) - defer wd.RequireSetConfig(t, step.Config) - - // Refresh! - err = runProviderCommand(t, func() error { - wd.RequireRefresh(t) - state = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - return err - } - - // Verify attribute equivalence. - actualR := state.RootModule().Resources[c.IDRefreshName] - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Primary == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Primary.Attributes - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_config.go deleted file mode 100644 index 7b0f7455eb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_config.go +++ /dev/null @@ -1,226 +0,0 @@ -package resource - -import ( - "fmt" - "testing" - - tfjson "github.com/hashicorp/terraform-json" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - tftest "github.com/hashicorp/terraform-plugin-test/v2" -) - -func testStepNewConfig(t *testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep) error { - t.Helper() - - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - - if !step.Destroy { - var state *terraform.State - err := runProviderCommand(t, func() error { - state = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error retrieving state: %v", err) - } - if err := testStepTaint(state, step); err != nil { - return fmt.Errorf("Error when tainting resources: %s", err) - } - } - - err := wd.SetConfig(step.Config) - if err != nil { - return fmt.Errorf("Error setting config: %s", err) - } - - // require a refresh before applying - // failing to do this will result in data sources not being updated - err = runProviderCommand(t, func() error { - return wd.Refresh() - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error running pre-apply refresh: %v", err) - } - - // If this step is a PlanOnly step, skip over this first Plan and - // subsequent Apply, and use the follow-up Plan that checks for - // permadiffs - if !step.PlanOnly { - // Plan! - err = runProviderCommand(t, func() error { - if step.Destroy { - return wd.CreateDestroyPlan() - } - return wd.CreatePlan() - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error running pre-apply plan: %s", err) - } - - // We need to keep a copy of the state prior to destroying such - // that the destroy steps can verify their behavior in the - // check function - var stateBeforeApplication *terraform.State - err = runProviderCommand(t, func() error { - stateBeforeApplication = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error retrieving pre-apply state: %s", err) - } - - // Apply the diff, creating real resources - err = runProviderCommand(t, func() error { - return wd.Apply() - }, wd, c.ProviderFactories) - if err != nil { - if step.Destroy { - return fmt.Errorf("Error running destroy: %s", err) - } - return fmt.Errorf("Error running apply: %s", err) - } - - var state *terraform.State - err = runProviderCommand(t, func() error { - state = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("error retrieving state after apply: %v", err) - } - if step.Check != nil { - state.IsBinaryDrivenTest = true - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Test for perpetual diffs by performing a plan, a refresh, and another plan - - // do a plan - err = runProviderCommand(t, func() error { - if step.Destroy { - return wd.CreateDestroyPlan() - } - return wd.CreatePlan() - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error running post-apply plan: %s", err) - } - - var plan *tfjson.Plan - err = runProviderCommand(t, func() error { - var err error - plan, err = wd.SavedPlan() - return err - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error retrieving post-apply plan: %s", err) - } - - if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { - var stdout string - err = runProviderCommand(t, func() error { - var err error - stdout, err = wd.SavedPlanStdout() - return err - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error retrieving formatted plan output: %s", err) - } - return fmt.Errorf("After applying this test step, the plan was not empty.\nstdout:\n\n%s", stdout) - } - - // do a refresh - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - err := runProviderCommand(t, func() error { - return wd.Refresh() - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error running post-apply refresh: %s", err) - } - } - - // do another plan - err = runProviderCommand(t, func() error { - if step.Destroy { - return wd.CreateDestroyPlan() - } - return wd.CreatePlan() - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error running second post-apply plan: %s", err) - } - - err = runProviderCommand(t, func() error { - var err error - plan, err = wd.SavedPlan() - return err - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error retrieving second post-apply plan: %s", err) - } - - // check if plan is empty - if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { - var stdout string - err = runProviderCommand(t, func() error { - var err error - stdout, err = wd.SavedPlanStdout() - return err - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error retrieving formatted second plan output: %s", err) - } - return fmt.Errorf("After applying this test step and performing a `terraform refresh`, the plan was not empty.\nstdout\n\n%s", stdout) - } else if step.ExpectNonEmptyPlan && planIsEmpty(plan) { - return fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // ID-ONLY REFRESH - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - var state *terraform.State - err = runProviderCommand(t, func() error { - state = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - return err - } - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - if err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil { - return fmt.Errorf( - "[ERROR] Test: ID-only test failed: %s", err) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_import_state.go deleted file mode 100644 index f19153713d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_new_import_state.go +++ /dev/null @@ -1,258 +0,0 @@ -package resource - -import ( - "fmt" - "reflect" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform-plugin-sdk/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - tftest "github.com/hashicorp/terraform-plugin-test/v2" -) - -func testStepNewImportState(t *testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep, cfg string) error { - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - - if step.ResourceName == "" { - t.Fatal("ResourceName is required for an import state test") - } - - // get state from check sequence - var state *terraform.State - err := runProviderCommand(t, func() error { - state = getState(t, wd) - return nil - }, wd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error getting state: %v", err) - } - - // Determine the ID to import - var importId string - switch { - case step.ImportStateIdFunc != nil: - var err error - importId, err = step.ImportStateIdFunc(state) - if err != nil { - t.Fatal(err) - } - case step.ImportStateId != "": - importId = step.ImportStateId - default: - resource, err := testResource(step, state) - if err != nil { - t.Fatal(err) - } - importId = resource.Primary.ID - } - importId = step.ImportStateIdPrefix + importId - - // Create working directory for import tests - if step.Config == "" { - step.Config = cfg - if step.Config == "" { - t.Fatal("Cannot import state with no specified config") - } - } - importWd := acctest.TestHelper.RequireNewWorkingDir(t) - defer importWd.Close() - importWd.RequireSetConfig(t, step.Config) - err = runProviderCommand(t, func() error { - return importWd.Init() - }, importWd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error running init: %v", err) - } - - err = runProviderCommand(t, func() error { - return importWd.Import(step.ResourceName, importId) - }, importWd, c.ProviderFactories) - if err != nil { - return err - } - - var importState *terraform.State - err = runProviderCommand(t, func() error { - importState = getState(t, importWd) - return nil - }, importWd, c.ProviderFactories) - if err != nil { - return fmt.Errorf("Error getting state after import: %v", err) - } - - // Go through the imported state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range importState.RootModule().Resources { - if r.Primary != nil { - is := r.Primary.DeepCopy() - is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type - states = append(states, is) - } - } - if err := step.ImportStateCheck(states); err != nil { - t.Fatal(err) - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := importState.RootModule().Resources - old := state.RootModule().Resources - - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for r2Key, r2 := range old { - // Ensure that we do not match against data sources as they - // cannot be imported and are not what we want to verify. - // Mode is not present in ResourceState so we use the - // stringified ResourceStateKey for comparison. - if strings.HasPrefix(r2Key, "data.") { - continue - } - - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type && r2.Provider == r.Provider { - oldR = r2 - break - } - } - if oldR == nil { - t.Fatalf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - // KEM CHANGE FROM OLD FRAMEWORK: Fail test if this happens. - var rsrcSchema *schema.Resource - - // r.Provider at this point is `registry.terraform.io/hashicorp/blah` but we need `blah` - val, tfdiags := addrs.ParseProviderSourceString(r.Provider) - if tfdiags.HasErrors() { - t.Fatal(tfdiags.Err()) - } - providerName := val.Type - providerAddr, diags := addrs.ParseAbsProviderConfigStr("provider." + providerName + "." + r.Type) - if diags.HasErrors() { - t.Fatalf("Failed to find schema for resource with ID %s", r.Primary) - } - - providerType := providerAddr.ProviderConfig.Type - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - - // don't add empty flatmapped containers, so we can more easily - // compare the attributes - skipEmpty := func(k, v string) bool { - if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { - if v == "0" { - return true - } - } - return false - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - actual[k] = v - } - - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - - // timeouts are only _sometimes_ added to state. To - // account for this, just don't compare timeouts at - // all. - for k := range actual { - if strings.HasPrefix(k, "timeouts.") { - delete(actual, k) - } - if k == "timeouts" { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, "timeouts.") { - delete(expected, k) - } - if k == "timeouts" { - delete(expected, k) - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - t.Fatalf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go deleted file mode 100644 index e56a5155d1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go +++ /dev/null @@ -1,84 +0,0 @@ -package resource - -import ( - "sync" - "time" -) - -// Retry is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -func Retry(timeout time.Duration, f RetryFunc) error { - // These are used to pull the error out of the function; need a mutex to - // avoid a data race. - var resultErr error - var resultErrMu sync.Mutex - - c := &StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * time.Millisecond, - Refresh: func() (interface{}, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil - } - return nil, "quit", rerr.Err - }, - } - - _, waitErr := c.WaitForState() - - // Need to acquire the lock here to be able to avoid race using resultErr as - // the return value - resultErrMu.Lock() - defer resultErrMu.Unlock() - - // resultErr may be nil because the wait timed out and resultErr was never - // set; this is still an error - if resultErr == nil { - return waitErr - } - // resultErr takes precedence over waitErr if both are set because it is - // more likely to be useful - return resultErr -} - -// RetryFunc is the function retried until it succeeds. -type RetryFunc func() *RetryError - -// RetryError is the required return type of RetryFunc. It forces client code -// to choose whether or not a given error is retryable. -type RetryError struct { - Err error - Retryable bool -} - -// RetryableError is a helper to create a RetryError that's retryable from a -// given error. -func RetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: true} -} - -// NonRetryableError is a helper to create a RetryError that's _not_ retryable -// from a given error. -func NonRetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: false} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go deleted file mode 100644 index 609c208b36..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go +++ /dev/null @@ -1,200 +0,0 @@ -package schema - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - ctyconvert "github.com/zclconf/go-cty/cty/convert" -) - -// Backend represents a partial backend.Backend implementation and simplifies -// the creation of configuration loading and validation. -// -// Unlike other schema structs such as Provider, this struct is meant to be -// embedded within your actual implementation. It provides implementations -// only for Input and Configure and gives you a method for accessing the -// configuration in the form of a ResourceData that you're expected to call -// from the other implementation funcs. -type Backend struct { - // Schema is the schema for the configuration of this backend. If this - // Backend has no configuration this can be omitted. - Schema map[string]*Schema - - // ConfigureFunc is called to configure the backend. Use the - // FromContext* methods to extract information from the context. - // This can be nil, in which case nothing will be called but the - // config will still be stored. - ConfigureFunc func(context.Context) error - - config *ResourceData -} - -var ( - backendConfigKey = contextKey("backend config") -) - -// FromContextBackendConfig extracts a ResourceData with the configuration -// from the context. This should only be called by Backend functions. -func FromContextBackendConfig(ctx context.Context) *ResourceData { - return ctx.Value(backendConfigKey).(*ResourceData) -} - -func (b *Backend) ConfigSchema() *configschema.Block { - // This is an alias of CoreConfigSchema just to implement the - // backend.Backend interface. - return b.CoreConfigSchema() -} - -func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - if b == nil { - return configVal, nil - } - var diags tfdiags.Diagnostics - var err error - - // In order to use Transform below, this needs to be filled out completely - // according the schema. - configVal, err = b.CoreConfigSchema().CoerceValue(configVal) - if err != nil { - return configVal, diags.Append(err) - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := b.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return configVal, diags - } - - shimRC := b.shimConfig(configVal) - warns, errs := schemaMap(b.Schema).Validate(shimRC) - for _, warn := range warns { - diags = diags.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - diags = diags.Append(err) - } - return configVal, diags -} - -func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { - if b == nil { - return nil - } - - var diags tfdiags.Diagnostics - sm := schemaMap(b.Schema) - shimRC := b.shimConfig(obj) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, shimRC, nil, nil, true) - if err != nil { - diags = diags.Append(err) - return diags - } - - data, err := sm.Data(nil, diff) - if err != nil { - diags = diags.Append(err) - return diags - } - b.config = data - - if b.ConfigureFunc != nil { - err = b.ConfigureFunc(context.WithValue( - context.Background(), backendConfigKey, data)) - if err != nil { - diags = diags.Append(err) - return diags - } - } - - return diags -} - -// shimConfig turns a new-style cty.Value configuration (which must be of -// an object type) into a minimal old-style *terraform.ResourceConfig object -// that should be populated enough to appease the not-yet-updated functionality -// in this package. This should be removed once everything is updated. -func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { - shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) - if !ok { - // If the configVal was nil, we still want a non-nil map here. - shimMap = map[string]interface{}{} - } - return &terraform.ResourceConfig{ - Config: shimMap, - Raw: shimMap, - } -} - -// Config returns the configuration. This is available after Configure is -// called. -func (b *Backend) Config() *ResourceData { - return b.config -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go deleted file mode 100644 index d5e20e0388..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go +++ /dev/null @@ -1,6 +0,0 @@ -package schema - -// Equal is an interface that checks for deep equality between two objects. -type Equal interface { - Equal(interface{}) bool -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/json.go deleted file mode 100644 index 265099a6b6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/json.go +++ /dev/null @@ -1,12 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/json" -) - -func unmarshalJSON(data []byte, v interface{}) error { - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - return dec.Decode(v) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go deleted file mode 100644 index 2f88f1eab9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go +++ /dev/null @@ -1,474 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -var ReservedProviderFields = []string{ - "alias", - "version", -} - -// Provider represents a resource provider in Terraform, and properly -// implements all of the ResourceProvider API. -// -// By defining a schema for the configuration of the provider, the -// map of supporting resources, and a configuration function, the schema -// framework takes over and handles all the provider operations for you. -// -// After defining the provider structure, it is unlikely that you'll require any -// of the methods on Provider itself. -type Provider struct { - // Schema is the schema for the configuration of this provider. If this - // provider has no configuration, this can be omitted. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ResourcesMap is the list of available resources that this provider - // can manage, along with their Resource structure defining their - // own schemas and CRUD operations. - // - // Provider automatically handles routing operations such as Apply, - // Diff, etc. to the proper resource. - ResourcesMap map[string]*Resource - - // DataSourcesMap is the collection of available data sources that - // this provider implements, with a Resource instance defining - // the schema and Read operation of each. - // - // Resource instances for data sources must have a Read function - // and must *not* implement Create, Update or Delete. - DataSourcesMap map[string]*Resource - - // ConfigureFunc is a function for configuring the provider. If the - // provider doesn't need to be configured, this can be omitted. - // - // See the ConfigureFunc documentation for more information. - ConfigureFunc ConfigureFunc - - // MetaReset is called by TestReset to reset any state stored in the meta - // interface. This is especially important if the StopContext is stored by - // the provider. - MetaReset func() error - - meta interface{} - - // a mutex is required because TestReset can directly replace the stopCtx - stopMu sync.Mutex - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once - - TerraformVersion string -} - -// ConfigureFunc is the function used to configure a Provider. -// -// The interface{} value returned by this function is stored and passed into -// the subsequent resources as the meta parameter. This return value is -// usually used to pass along a configured API client, a configuration -// structure, etc. -type ConfigureFunc func(*ResourceData) (interface{}, error) - -// InternalValidate should be called to validate the structure -// of the provider. -// -// This should be called in a unit test for any provider to verify -// before release that a provider is properly configured for use with -// this library. -func (p *Provider) InternalValidate() error { - if p == nil { - return errors.New("provider is nil") - } - - var validationErrors error - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - - // Provider-specific checks - for k := range sm { - if isReservedProviderFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a provider", k) - } - } - - for k, r := range p.ResourcesMap { - if err := r.InternalValidate(nil, true); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) - } - } - - for k, r := range p.DataSourcesMap { - if err := r.InternalValidate(nil, false); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) - } - } - - return validationErrors -} - -func isReservedProviderFieldName(name string) bool { - for _, reservedName := range ReservedProviderFields { - if name == reservedName { - return true - } - } - return false -} - -// Meta returns the metadata associated with this provider that was -// returned by the Configure call. It will be nil until Configure is called. -func (p *Provider) Meta() interface{} { - return p.meta -} - -// SetMeta can be used to forcefully set the Meta object of the provider. -// Note that if Configure is called the return value will override anything -// set here. -func (p *Provider) SetMeta(v interface{}) { - p.meta = v -} - -// Stopped reports whether the provider has been stopped or not. -func (p *Provider) Stopped() bool { - ctx := p.StopContext() - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// StopCh returns a channel that is closed once the provider is stopped. -func (p *Provider) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - return p.stopCtx -} - -func (p *Provider) stopInit() { - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvider interface. -func (p *Provider) Stop() error { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtxCancel() - return nil -} - -// TestReset resets any state stored in the Provider, and will call TestReset -// on Meta if it implements the TestProvider interface. -// This may be used to reset the schema.Provider at the start of a test, and is -// automatically called by resource.Test. -func (p *Provider) TestReset() error { - p.stopInit() - if p.MetaReset != nil { - return p.MetaReset() - } - return nil -} - -// GetSchema implementation of terraform.ResourceProvider interface -func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - resourceTypes := map[string]*configschema.Block{} - dataSources := map[string]*configschema.Block{} - - for _, name := range req.ResourceTypes { - if r, exists := p.ResourcesMap[name]; exists { - resourceTypes[name] = r.CoreConfigSchema() - } - } - for _, name := range req.DataSources { - if r, exists := p.DataSourcesMap[name]; exists { - dataSources[name] = r.CoreConfigSchema() - } - } - - return &terraform.ProviderSchema{ - Provider: schemaMap(p.Schema).CoreConfigSchema(), - ResourceTypes: resourceTypes, - DataSources: dataSources, - }, nil -} - -// Input implementation of terraform.ResourceProvider interface. -func (p *Provider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return schemaMap(p.Schema).Input(input, c) -} - -// Validate implementation of terraform.ResourceProvider interface. -func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provider failed! This is always a bug\n"+ - "with the provider itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - return schemaMap(p.Schema).Validate(c) -} - -// ValidateResource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.ResourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support resource: %s", t)} - } - - return r.Validate(c) -} - -// Configure implementation of terraform.ResourceProvider interface. -func (p *Provider) Configure(c *terraform.ResourceConfig) error { - // No configuration - if p.ConfigureFunc == nil { - return nil - } - - sm := schemaMap(p.Schema) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c, nil, p.meta, true) - if err != nil { - return err - } - - data, err := sm.Data(nil, diff) - if err != nil { - return err - } - - if p.TerraformVersion == "" { - // Terraform 0.12 introduced this field to the protocol - // We can therefore assume that if it's unconfigured at this point, it's 0.10 or 0.11 - p.TerraformVersion = "0.11+compatible" - } - meta, err := p.ConfigureFunc(data) - if err != nil { - return err - } - - p.meta = meta - return nil -} - -// Apply implementation of terraform.ResourceProvider interface. -func (p *Provider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Apply(s, d, p.meta) -} - -// Diff implementation of terraform.ResourceProvider interface. -func (p *Provider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, p.meta) -} - -// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't -// attempt to calculate ignore_changes. -func (p *Provider) SimpleDiff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.simpleDiff(s, c, p.meta) -} - -// Refresh implementation of terraform.ResourceProvider interface. -func (p *Provider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Refresh(s, p.meta) -} - -// Resources implementation of terraform.ResourceProvider interface. -func (p *Provider) Resources() []terraform.ResourceType { - keys := make([]string, 0, len(p.ResourcesMap)) - for k := range p.ResourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.ResourceType, 0, len(keys)) - for _, k := range keys { - resource := p.ResourcesMap[k] - - // This isn't really possible (it'd fail InternalValidate), but - // we do it anyways to avoid a panic. - if resource == nil { - resource = &Resource{} - } - - result = append(result, terraform.ResourceType{ - Name: k, - Importable: resource.Importer != nil, - - // Indicates that a provider is compiled against a new enough - // version of core to support the GetSchema method. - SchemaAvailable: true, - }) - } - - return result -} - -func (p *Provider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - // Find the resource - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - // If it doesn't support import, error - if r.Importer == nil { - return nil, fmt.Errorf("resource %s doesn't support import", info.Type) - } - - // Create the data - data := r.Data(nil) - data.SetId(id) - data.SetType(info.Type) - - // Call the import function - results := []*ResourceData{data} - if r.Importer.State != nil { - var err error - results, err = r.Importer.State(data, p.meta) - if err != nil { - return nil, err - } - } - - // Convert the results to InstanceState values and return it - states := make([]*terraform.InstanceState, len(results)) - for i, r := range results { - states[i] = r.State() - } - - // Verify that all are non-nil. If there are any nil the error - // isn't obvious so we circumvent that with a friendlier error. - for _, s := range states { - if s == nil { - return nil, fmt.Errorf( - "nil entry in ImportState results. This is always a bug with\n" + - "the resource that is being imported. Please report this as\n" + - "a bug to Terraform.") - } - } - - return states, nil -} - -// ValidateDataSource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.DataSourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support data source: %s", t)} - } - - return r.Validate(c) -} - -// ReadDataDiff implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.Diff(nil, c, p.meta) -} - -// RefreshData implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.ReadDataApply(d, p.meta) -} - -// DataSources implementation of terraform.ResourceProvider interface. -func (p *Provider) DataSources() []terraform.DataSource { - keys := make([]string, 0, len(p.DataSourcesMap)) - for k := range p.DataSourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.DataSource, 0, len(keys)) - for _, k := range keys { - result = append(result, terraform.DataSource{ - Name: k, - - // Indicates that a provider is compiled against a new enough - // version of core to support the GetSchema method. - SchemaAvailable: true, - }) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go deleted file mode 100644 index bb7a24e5e0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go +++ /dev/null @@ -1,848 +0,0 @@ -package schema - -import ( - "errors" - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/zclconf/go-cty/cty" -) - -var ReservedDataSourceFields = []string{ - "connection", - "count", - "depends_on", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedResourceFields = []string{ - "connection", - "count", - "depends_on", - "id", - "lifecycle", - "provider", - "provisioner", -} - -// Resource represents a thing in Terraform that has a set of configurable -// attributes and a lifecycle (create, read, update, delete). -// -// The Resource schema is an abstraction that allows provider writers to -// worry only about CRUD operations while off-loading validation, diff -// generation, etc. to this higher level library. -// -// In spite of the name, this struct is not used only for terraform resources, -// but also for data sources. In the case of data sources, the Create, -// Update and Delete functions must not be provided. -type Resource struct { - // Schema is the schema for the configuration of this resource. - // - // The keys of this map are the configuration keys, and the values - // describe the schema of the configuration value. - // - // The schema is used to represent both configurable data as well - // as data that might be computed in the process of creating this - // resource. - Schema map[string]*Schema - - // SchemaVersion is the version number for this resource's Schema - // definition. The current SchemaVersion stored in the state for each - // resource. Provider authors can increment this version number - // when Schema semantics change. If the State's SchemaVersion is less than - // the current SchemaVersion, the InstanceState is yielded to the - // MigrateState callback, where the provider can make whatever changes it - // needs to update the state to be compatible to the latest version of the - // Schema. - // - // When unset, SchemaVersion defaults to 0, so provider authors can start - // their Versioning at any integer >= 1 - SchemaVersion int - - // MigrateState is deprecated and any new changes to a resource's schema - // should be handled by StateUpgraders. Existing MigrateState implementations - // should remain for compatibility with existing state. MigrateState will - // still be called if the stored SchemaVersion is less than the - // first version of the StateUpgraders. - // - // MigrateState is responsible for updating an InstanceState with an old - // version to the format expected by the current version of the Schema. - // - // It is called during Refresh if the State's stored SchemaVersion is less - // than the current SchemaVersion of the Resource. - // - // The function is yielded the state's stored SchemaVersion and a pointer to - // the InstanceState that needs updating, as well as the configured - // provider's configured meta interface{}, in case the migration process - // needs to make any remote API calls. - MigrateState StateMigrateFunc - - // StateUpgraders contains the functions responsible for upgrading an - // existing state with an old schema version to a newer schema. It is - // called specifically by Terraform when the stored schema version is less - // than the current SchemaVersion of the Resource. - // - // StateUpgraders map specific schema versions to a StateUpgrader - // function. The registered versions are expected to be ordered, - // consecutive values. The initial value may be greater than 0 to account - // for legacy schemas that weren't recorded and can be handled by - // MigrateState. - StateUpgraders []StateUpgrader - - // The functions below are the CRUD operations for this resource. - // - // The only optional operation is Update. If Update is not implemented, - // then updates will not be supported for this resource. - // - // The ResourceData parameter in the functions below are used to - // query configuration and changes for the resource as well as to set - // the ID, computed data, etc. - // - // The interface{} parameter is the result of the ConfigureFunc in - // the provider for this resource. If the provider does not define - // a ConfigureFunc, this will be nil. This parameter should be used - // to store API clients, configuration structures, etc. - // - // If any errors occur during each of the operation, an error should be - // returned. If a resource was partially updated, be careful to enable - // partial state mode for ResourceData and use it accordingly. - // - // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff - // accordingly. If this function isn't set, it will not be called. You - // can also signal existence in the Read method by calling d.SetId("") - // if the Resource is no longer present and should be removed from state. - // The *ResourceData passed to Exists should _not_ be modified. - Create CreateFunc - Read ReadFunc - Update UpdateFunc - Delete DeleteFunc - Exists ExistsFunc - - // CustomizeDiff is a custom function for working with the diff that - // Terraform has created for this resource - it can be used to customize the - // diff that has been created, diff values not controlled by configuration, - // or even veto the diff altogether and abort the plan. It is passed a - // *ResourceDiff, a structure similar to ResourceData but lacking most write - // functions like Set, while introducing new functions that work with the - // diff such as SetNew, SetNewComputed, and ForceNew. - // - // The phases Terraform runs this in, and the state available via functions - // like Get and GetChange, are as follows: - // - // * New resource: One run with no state - // * Existing resource: One run with state - // * Existing resource, forced new: One run with state (before ForceNew), - // then one run without state (as if new resource) - // * Tainted resource: No runs (custom diff logic is skipped) - // * Destroy: No runs (standard diff logic is skipped on destroy diffs) - // - // This function needs to be resilient to support all scenarios. - // - // If this function needs to access external API resources, remember to flag - // the RequiresRefresh attribute mentioned below to ensure that - // -refresh=false is blocked when running plan or apply, as this means that - // this resource requires refresh-like behaviour to work effectively. - // - // For the most part, only computed fields can be customized by this - // function. - // - // This function is only allowed on regular resources (not data sources). - CustomizeDiff CustomizeDiffFunc - - // Importer is the ResourceImporter implementation for this resource. - // If this is nil, then this resource does not support importing. If - // this is non-nil, then it supports importing and ResourceImporter - // must be validated. The validity of ResourceImporter is verified - // by InternalValidate on Resource. - Importer *ResourceImporter - - // If non-empty, this string is emitted as a warning during Validate. - DeprecationMessage string - - // Timeouts allow users to specify specific time durations in which an - // operation should time out, to allow them to extend an action to suit their - // usage. For example, a user may specify a large Creation timeout for their - // AWS RDS Instance due to it's size, or restoring from a snapshot. - // Resource implementors must enable Timeout support by adding the allowed - // actions (Create, Read, Update, Delete, Default) to the Resource struct, and - // accessing them in the matching methods. - Timeouts *ResourceTimeout - - // Description is used as the description for docs, the language server and - // other user facing usage. It can be plain-text or markdown depending on the - // global DescriptionKind setting. - Description string - - // UseJSONNumber should be set when state upgraders will expect - // json.Numbers instead of float64s for numbers. This is added as a - // toggle for backwards compatibility for type assertions, but should - // be used in all new resources to avoid bugs with sufficiently large - // user input. - // - // See github.com/hashicorp/terraform-plugin-sdk/issues/655 for more - // details. - UseJSONNumber bool -} - -// ShimInstanceStateFromValue converts a cty.Value to a -// terraform.InstanceState. -func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { - // Get the raw shimmed value. While this is correct, the set hashes don't - // match those from the Schema. - s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) - - // We now rebuild the state through the ResourceData, so that the set indexes - // match what helper/schema expects. - data, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - return nil, err - } - - s = data.State() - if s == nil { - s = &terraform.InstanceState{} - } - return s, nil -} - -// See Resource documentation. -type CreateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ReadFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type UpdateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type DeleteFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ExistsFunc func(*ResourceData, interface{}) (bool, error) - -// See Resource documentation. -type StateMigrateFunc func( - int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) - -type StateUpgrader struct { - // Version is the version schema that this Upgrader will handle, converting - // it to Version+1. - Version int - - // Type describes the schema that this function can upgrade. Type is - // required to decode the schema if the state was stored in a legacy - // flatmap format. - Type cty.Type - - // Upgrade takes the JSON encoded state and the provider meta value, and - // upgrades the state one single schema version. The provided state is - // deocded into the default json types using a map[string]interface{}. It - // is up to the StateUpgradeFunc to ensure that the returned value can be - // encoded using the new schema. - Upgrade StateUpgradeFunc -} - -// See StateUpgrader -type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) - -// See Resource documentation. -type CustomizeDiffFunc func(*ResourceDiff, interface{}) error - -// Apply creates, updates, and/or deletes a resource. -func (r *Resource) Apply( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - data, err := schemaMap(r.Schema).Data(s, d) - if err != nil { - return s, err - } - - // Instance Diff shoould have the timeout info, need to copy it over to the - // ResourceData meta - rt := ResourceTimeout{} - if _, ok := d.Meta[TimeoutKey]; ok { - if err := rt.DiffDecode(d); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } else if s != nil { - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - } else { - log.Printf("[DEBUG] No meta timeoutkey found in Apply()") - } - data.timeouts = &rt - - if s == nil { - // The Terraform API dictates that this should never happen, but - // it doesn't hurt to be safe in this case. - s = new(terraform.InstanceState) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource since it is created - if err := r.Delete(data, meta); err != nil { - return r.recordCurrentSchemaVersion(data.State()), err - } - - // Make sure the ID is gone. - data.SetId("") - } - - // If we're only destroying, and not creating, then return - // now since we're done! - if !d.RequiresNew() { - return nil, nil - } - - // Reset the data to be stateless since we just destroyed - data, err = schemaMap(r.Schema).Data(nil, d) - // data was reset, need to re-apply the parsed timeouts - data.timeouts = &rt - if err != nil { - return nil, err - } - } - - err = nil - if data.Id() == "" { - // We're creating, it is a new resource. - data.MarkNewResource() - err = r.Create(data, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf("doesn't support update") - } - - err = r.Update(data, meta) - } - - return r.recordCurrentSchemaVersion(data.State()), err -} - -// Diff returns a diff of this resource. -func (r *Resource) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - t := &ResourceTimeout{} - err := t.ConfigDecode(r, c) - - if err != nil { - return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) - } - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) - if err != nil { - return instanceDiff, err - } - - if instanceDiff != nil { - if err := t.DiffEncode(instanceDiff); err != nil { - log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) - } - } else { - log.Printf("[DEBUG] Instance Diff is nil in Diff()") - } - - return instanceDiff, err -} - -func (r *Resource) simpleDiff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) - if err != nil { - return instanceDiff, err - } - - if instanceDiff == nil { - instanceDiff = terraform.NewInstanceDiff() - } - - // Make sure the old value is set in each of the instance diffs. - // This was done by the RequiresNew logic in the full legacy Diff. - for k, attr := range instanceDiff.Attributes { - if attr == nil { - continue - } - if s != nil { - attr.Old = s.Attributes[k] - } - } - - return instanceDiff, nil -} - -// Validate validates the resource configuration against the schema. -func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { - warns, errs := schemaMap(r.Schema).Validate(c) - - if r.DeprecationMessage != "" { - warns = append(warns, r.DeprecationMessage) - } - - return warns, errs -} - -// ReadDataApply loads the data for a data source, given a diff that -// describes the configuration arguments and desired computed attributes. -func (r *Resource) ReadDataApply( - d *terraform.InstanceDiff, - meta interface{}, -) (*terraform.InstanceState, error) { - // Data sources are always built completely from scratch - // on each read, so the source state is always nil. - data, err := schemaMap(r.Schema).Data(nil, d) - if err != nil { - return nil, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - // Data sources can set an ID if they want, but they aren't - // required to; we'll provide a placeholder if they don't, - // to preserve the invariant that all resources have non-empty - // ids. - state.ID = "-" - } - - return r.recordCurrentSchemaVersion(state), err -} - -// RefreshWithoutUpgrade reads the instance state, but does not call -// MigrateState or the StateUpgraders, since those are now invoked in a -// separate API call. -// RefreshWithoutUpgrade is part of the new plugin shims. -func (r *Resource) RefreshWithoutUpgrade( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - // there may be new StateUpgraders that need to be run - s, err := r.upgradeState(s, meta) - if err != nil { - return s, err - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - var err error - - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - migrate := needsMigration && r.MigrateState != nil - - if migrate { - s, err = r.MigrateState(stateSchemaVersion, s, meta) - if err != nil { - return s, err - } - } - - if len(r.StateUpgraders) == 0 { - return s, nil - } - - // If we ran MigrateState, then the stateSchemaVersion value is no longer - // correct. We can expect the first upgrade function to be the correct - // schema type version. - if migrate { - stateSchemaVersion = r.StateUpgraders[0].Version - } - - schemaType := r.CoreConfigSchema().ImpliedType() - // find the expected type to convert the state - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion == upgrader.Version { - schemaType = upgrader.Type - } - } - - // StateUpgraders only operate on the new JSON format state, so the state - // need to be converted. - stateVal, err := StateValueFromInstanceState(s, schemaType) - if err != nil { - return nil, err - } - - jsonState, err := StateValueToJSONMap(stateVal, schemaType) - if err != nil { - return nil, err - } - - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion != upgrader.Version { - continue - } - - jsonState, err = upgrader.Upgrade(jsonState, meta) - if err != nil { - return nil, err - } - stateSchemaVersion++ - } - - // now we need to re-flatmap the new state - stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) - if err != nil { - return nil, err - } - - return r.ShimInstanceStateFromValue(stateVal) -} - -// InternalValidate should be called to validate the structure -// of the resource. -// -// This should be called in a unit test for any resource to verify -// before release that a resource is properly configured for use with -// this library. -// -// Provider.InternalValidate() will automatically call this for all of -// the resources it manages, so you don't need to call this manually if it -// is part of a Provider. -func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { - if r == nil { - return errors.New("resource is nil") - } - - if !writable { - if r.Create != nil || r.Update != nil || r.Delete != nil { - return fmt.Errorf("must not implement Create, Update or Delete") - } - - // CustomizeDiff cannot be defined for read-only resources - if r.CustomizeDiff != nil { - return fmt.Errorf("cannot implement CustomizeDiff") - } - } - - tsm := topSchemaMap - - if r.isTopLevel() && writable { - // All non-Computed attributes must be ForceNew if Update is not defined - if r.Update == nil { - nonForceNewAttrs := make([]string, 0) - for k, v := range r.Schema { - if !v.ForceNew && !v.Computed { - nonForceNewAttrs = append(nonForceNewAttrs, k) - } - } - if len(nonForceNewAttrs) > 0 { - return fmt.Errorf( - "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) - } - } else { - nonUpdateableAttrs := make([]string, 0) - for k, v := range r.Schema { - if v.ForceNew || v.Computed && !v.Optional { - nonUpdateableAttrs = append(nonUpdateableAttrs, k) - } - } - updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) - if updateableAttrs == 0 { - return fmt.Errorf( - "All fields are ForceNew or Computed w/out Optional, Update is superfluous") - } - } - - tsm = schemaMap(r.Schema) - - // Destroy, and Read are required - if r.Read == nil { - return fmt.Errorf("Read must be implemented") - } - if r.Delete == nil { - return fmt.Errorf("Delete must be implemented") - } - - // If we have an importer, we need to verify the importer. - if r.Importer != nil { - if err := r.Importer.InternalValidate(); err != nil { - return err - } - } - - for k, f := range tsm { - if isReservedResourceFieldName(k, f) { - return fmt.Errorf("%s is a reserved field name", k) - } - } - } - - lastVersion := -1 - for _, u := range r.StateUpgraders { - if lastVersion >= 0 && u.Version-lastVersion > 1 { - return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) - } - - if u.Version >= r.SchemaVersion { - return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) - } - - if !u.Type.IsObjectType() { - return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) - } - - if u.Upgrade == nil { - return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) - } - - lastVersion = u.Version - } - - if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { - return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) - } - - // Data source - if r.isTopLevel() && !writable { - tsm = schemaMap(r.Schema) - for k := range tsm { - if isReservedDataSourceFieldName(k) { - return fmt.Errorf("%s is a reserved field name", k) - } - } - } - - return schemaMap(r.Schema).InternalValidate(tsm) -} - -func isReservedDataSourceFieldName(name string) bool { - for _, reservedName := range ReservedDataSourceFields { - if name == reservedName { - return true - } - } - return false -} - -func isReservedResourceFieldName(name string, s *Schema) bool { - // Allow phasing out "id" - // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 - if name == "id" && (s.Deprecated != "" || s.Removed != "") { - return false - } - - for _, reservedName := range ReservedResourceFields { - if name == reservedName { - return true - } - } - return false -} - -// Data returns a ResourceData struct for this Resource. Each return value -// is a separate copy and can be safely modified differently. -// -// The data returned from this function has no actual affect on the Resource -// itself (including the state given to this function). -// -// This function is useful for unit tests and ResourceImporter functions. -func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { - result, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - // At the time of writing, this isn't possible (Data never returns - // non-nil errors). We panic to find this in the future if we have to. - // I don't see a reason for Data to ever return an error. - panic(err) - } - - // load the Resource timeouts - result.timeouts = r.Timeouts - if result.timeouts == nil { - result.timeouts = &ResourceTimeout{} - } - - // Set the schema version to latest by default - result.meta = map[string]interface{}{ - "schema_version": strconv.Itoa(r.SchemaVersion), - } - - return result -} - -// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing -// -// TODO: May be able to be removed with the above ResourceData function. -func (r *Resource) TestResourceData() *ResourceData { - return &ResourceData{ - schema: r.Schema, - } -} - -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through in the schema -// of the receiving Resource. -// -// Deprecated: This function will be removed in version 2 without replacement. -func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { - return SchemasForFlatmapPath(path, r.Schema) -} - -// Returns true if the resource is "top level" i.e. not a sub-resource. -func (r *Resource) isTopLevel() bool { - // TODO: This is a heuristic; replace with a definitive attribute? - return (r.Create != nil || r.Read != nil) -} - -// Determines if a given InstanceState needs to be migrated by checking the -// stored version number with the current SchemaVersion -func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { - // Get the raw interface{} value for the schema version. If it doesn't - // exist or is nil then set it to zero. - raw := is.Meta["schema_version"] - if raw == nil { - raw = "0" - } - - // Try to convert it to a string. If it isn't a string then we pretend - // that it isn't set at all. It should never not be a string unless it - // was manually tampered with. - rawString, ok := raw.(string) - if !ok { - rawString = "0" - } - - stateSchemaVersion, _ := strconv.Atoi(rawString) - - // Don't run MigrateState if the version is handled by a StateUpgrader, - // since StateMigrateFuncs are not required to handle unknown versions - maxVersion := r.SchemaVersion - if len(r.StateUpgraders) > 0 { - maxVersion = r.StateUpgraders[0].Version - } - - return stateSchemaVersion < maxVersion, stateSchemaVersion -} - -func (r *Resource) recordCurrentSchemaVersion( - state *terraform.InstanceState) *terraform.InstanceState { - if state != nil && r.SchemaVersion > 0 { - if state.Meta == nil { - state.Meta = make(map[string]interface{}) - } - state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) - } - return state -} - -// Noop is a convenience implementation of resource function which takes -// no action and returns no error. -func Noop(*ResourceData, interface{}) error { - return nil -} - -// RemoveFromState is a convenience implementation of a resource function -// which sets the resource ID to empty string (to remove it from state) -// and returns no error. -func RemoveFromState(d *ResourceData, _ interface{}) error { - d.SetId("") - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go deleted file mode 100644 index b855801d91..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go +++ /dev/null @@ -1,559 +0,0 @@ -package schema - -import ( - "log" - "reflect" - "strings" - "sync" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// ResourceData is used to query and set the attributes of a resource. -// -// ResourceData is the primary argument received for CRUD operations on -// a resource as well as configuration of a provider. It is a powerful -// structure that can be used to not only query data, but check for changes, -// define partial state updates, etc. -// -// The most relevant methods to take a look at are Get, Set, and Partial. -type ResourceData struct { - // Settable (internally) - schema map[string]*Schema - config *terraform.ResourceConfig - state *terraform.InstanceState - diff *terraform.InstanceDiff - meta map[string]interface{} - timeouts *ResourceTimeout - - // Don't set - multiReader *MultiLevelFieldReader - setWriter *MapFieldWriter - newState *terraform.InstanceState - partial bool - partialMap map[string]struct{} - once sync.Once - isNew bool - - panicOnError bool -} - -// getResult is the internal structure that is generated when a Get -// is called that contains some extra data that might be used. -type getResult struct { - Value interface{} - ValueProcessed interface{} - Computed bool - Exists bool - Schema *Schema -} - -// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary -// values, bypassing schema. This MUST NOT be used in normal circumstances - -// it exists only to support the remote_state data source. -// -// Deprecated: Fully define schema attributes and use Set() instead. -func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { - d.once.Do(d.init) - - d.setWriter.unsafeWriteField(key, value) -} - -// Get returns the data for the given key, or nil if the key doesn't exist -// in the schema. -// -// If the key does exist in the schema but doesn't exist in the configuration, -// then the default value for that type will be returned. For strings, this is -// "", for numbers it is 0, etc. -// -// If you want to test if something is set at all in the configuration, -// use GetOk. -func (d *ResourceData) Get(key string) interface{} { - v, _ := d.GetOk(key) - return v -} - -// GetChange returns the old and new value for a given key. -// -// HasChange should be used to check if a change exists. It is possible -// that both the old and new value are the same if the old value was not -// set and the new value is. This is common, for example, for boolean -// fields which have a zero value of false. -func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { - o, n := d.getChange(key, getSourceState, getSourceDiff) - return o.Value, n.Value -} - -// GetOk returns the data for the given key and whether or not the key -// has been set to a non-zero value at some point. -// -// The first result will not necessarilly be nil if the value doesn't exist. -// The second result should be checked to determine this information. -func (d *ResourceData) GetOk(key string) (interface{}, bool) { - r := d.getRaw(key, getSourceSet) - exists := r.Exists && !r.Computed - if exists { - // If it exists, we also want to verify it is not the zero-value. - value := r.Value - zero := r.Schema.Type.Zero() - - if eq, ok := value.(Equal); ok { - exists = !eq.Equal(zero) - } else { - exists = !reflect.DeepEqual(value, zero) - } - } - - return r.Value, exists -} - -// GetOkExists can check if TypeBool attributes that are Optional with -// no Default value have been set. -// -// Deprecated: usage is discouraged due to undefined behaviors and may be -// removed in a future version of the SDK -func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { - r := d.getRaw(key, getSourceSet) - exists := r.Exists && !r.Computed - return r.Value, exists -} - -func (d *ResourceData) getRaw(key string, level getSource) getResult { - var parts []string - if key != "" { - parts = strings.Split(key, ".") - } - - return d.get(parts, level) -} - -// HasChanges returns whether or not any of the given keys has been changed. -func (d *ResourceData) HasChanges(keys ...string) bool { - for _, key := range keys { - if d.HasChange(key) { - return true - } - } - return false -} - -// HasChange returns whether or not the given key has been changed. -func (d *ResourceData) HasChange(key string) bool { - o, n := d.GetChange(key) - - // If the type implements the Equal interface, then call that - // instead of just doing a reflect.DeepEqual. An example where this is - // needed is *Set - if eq, ok := o.(Equal); ok { - return !eq.Equal(n) - } - - return !reflect.DeepEqual(o, n) -} - -// Partial turns partial state mode on/off. -// -// When partial state mode is enabled, then only key prefixes specified -// by SetPartial will be in the final state. This allows providers to return -// partial states for partially applied resources (when errors occur). -func (d *ResourceData) Partial(on bool) { - d.partial = on - if on { - if d.partialMap == nil { - d.partialMap = make(map[string]struct{}) - } - } else { - d.partialMap = nil - } -} - -// Set sets the value for the given key. -// -// If the key is invalid or the value is not a correct type, an error -// will be returned. -func (d *ResourceData) Set(key string, value interface{}) error { - d.once.Do(d.init) - - // If the value is a pointer to a non-struct, get its value and - // use that. This allows Set to take a pointer to primitives to - // simplify the interface. - reflectVal := reflect.ValueOf(value) - if reflectVal.Kind() == reflect.Ptr { - if reflectVal.IsNil() { - // If the pointer is nil, then the value is just nil - value = nil - } else { - // Otherwise, we dereference the pointer as long as its not - // a pointer to a struct, since struct pointers are allowed. - reflectVal = reflect.Indirect(reflectVal) - if reflectVal.Kind() != reflect.Struct { - value = reflectVal.Interface() - } - } - } - - err := d.setWriter.WriteField(strings.Split(key, "."), value) - if err != nil && d.panicOnError { - panic(err) - } - return err -} - -// SetPartial adds the key to the final state output while -// in partial state mode. The key must be a root key in the schema (i.e. -// it cannot be "list.0"). -// -// If partial state mode is disabled, then this has no effect. Additionally, -// whenever partial state mode is toggled, the partial data is cleared. -// -// Deprecated: Partial state has very limited benefit given Terraform refreshes -// before operations by default. -func (d *ResourceData) SetPartial(k string) { - if d.partial { - d.partialMap[k] = struct{}{} - } -} - -func (d *ResourceData) MarkNewResource() { - d.isNew = true -} - -func (d *ResourceData) IsNewResource() bool { - return d.isNew -} - -// Id returns the ID of the resource. -func (d *ResourceData) Id() string { - var result string - - if d.state != nil { - result = d.state.ID - if result == "" { - result = d.state.Attributes["id"] - } - } - - if d.newState != nil { - result = d.newState.ID - if result == "" { - result = d.newState.Attributes["id"] - } - } - - return result -} - -// ConnInfo returns the connection info for this resource. -func (d *ResourceData) ConnInfo() map[string]string { - if d.newState != nil { - return d.newState.Ephemeral.ConnInfo - } - - if d.state != nil { - return d.state.Ephemeral.ConnInfo - } - - return nil -} - -// SetId sets the ID of the resource. If the value is blank, then the -// resource is destroyed. -func (d *ResourceData) SetId(v string) { - d.once.Do(d.init) - d.newState.ID = v - - // once we transition away from the legacy state types, "id" will no longer - // be a special field, and will become a normal attribute. - // set the attribute normally - d.setWriter.unsafeWriteField("id", v) - - // Make sure the newState is also set, otherwise the old value - // may get precedence. - if d.newState.Attributes == nil { - d.newState.Attributes = map[string]string{} - } - d.newState.Attributes["id"] = v -} - -// SetConnInfo sets the connection info for a resource. -func (d *ResourceData) SetConnInfo(v map[string]string) { - d.once.Do(d.init) - d.newState.Ephemeral.ConnInfo = v -} - -// SetType sets the ephemeral type for the data. This is only required -// for importing. -func (d *ResourceData) SetType(t string) { - d.once.Do(d.init) - d.newState.Ephemeral.Type = t -} - -// State returns the new InstanceState after the diff and any Set -// calls. -func (d *ResourceData) State() *terraform.InstanceState { - var result terraform.InstanceState - result.ID = d.Id() - result.Meta = d.meta - - // If we have no ID, then this resource doesn't exist and we just - // return nil. - if result.ID == "" { - return nil - } - - if d.timeouts != nil { - if err := d.timeouts.StateEncode(&result); err != nil { - log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) - } - } - - // Look for a magic key in the schema that determines we skip the - // integrity check of fields existing in the schema, allowing dynamic - // keys to be created. - hasDynamicAttributes := false - for k := range d.schema { - if k == "__has_dynamic_attributes" { - hasDynamicAttributes = true - log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) - } - } - - // In order to build the final state attributes, we read the full - // attribute set as a map[string]interface{}, write it to a MapFieldWriter, - // and then use that map. - rawMap := make(map[string]interface{}) - for k := range d.schema { - source := getSourceSet - if d.partial { - source = getSourceState - if _, ok := d.partialMap[k]; ok { - source = getSourceSet - } - } - - raw := d.get([]string{k}, source) - if raw.Exists && !raw.Computed { - rawMap[k] = raw.Value - if raw.ValueProcessed != nil { - rawMap[k] = raw.ValueProcessed - } - } - } - - mapW := &MapFieldWriter{Schema: d.schema} - if err := mapW.WriteField(nil, rawMap); err != nil { - log.Printf("[ERR] Error writing fields: %s", err) - return nil - } - - result.Attributes = mapW.Map() - - if hasDynamicAttributes { - // If we have dynamic attributes, just copy the attributes map - // one for one into the result attributes. - for k, v := range d.setWriter.Map() { - // Don't clobber schema values. This limits usage of dynamic - // attributes to names which _do not_ conflict with schema - // keys! - if _, ok := result.Attributes[k]; !ok { - result.Attributes[k] = v - } - } - } - - if d.newState != nil { - result.Ephemeral = d.newState.Ephemeral - } - - // TODO: This is hacky and we can remove this when we have a proper - // state writer. We should instead have a proper StateFieldWriter - // and use that. - for k, schema := range d.schema { - if schema.Type != TypeMap { - continue - } - - if result.Attributes[k] == "" { - delete(result.Attributes, k) - } - } - - if v := d.Id(); v != "" { - result.Attributes["id"] = d.Id() - } - - if d.state != nil { - result.Tainted = d.state.Tainted - } - - return &result -} - -// Timeout returns the data for the given timeout key -// Returns a duration of 20 minutes for any key not found, or not found and no default. -func (d *ResourceData) Timeout(key string) time.Duration { - key = strings.ToLower(key) - - // System default of 20 minutes - defaultTimeout := 20 * time.Minute - - if d.timeouts == nil { - return defaultTimeout - } - - var timeout *time.Duration - switch key { - case TimeoutCreate: - timeout = d.timeouts.Create - case TimeoutRead: - timeout = d.timeouts.Read - case TimeoutUpdate: - timeout = d.timeouts.Update - case TimeoutDelete: - timeout = d.timeouts.Delete - } - - if timeout != nil { - return *timeout - } - - if d.timeouts.Default != nil { - return *d.timeouts.Default - } - - return defaultTimeout -} - -func (d *ResourceData) init() { - // Initialize the field that will store our new state - var copyState terraform.InstanceState - if d.state != nil { - copyState = *d.state.DeepCopy() - } - d.newState = ©State - - // Initialize the map for storing set data - d.setWriter = &MapFieldWriter{Schema: d.schema} - - // Initialize the reader for getting data from the - // underlying sources (config, diff, etc.) - readers := make(map[string]FieldReader) - var stateAttributes map[string]string - if d.state != nil { - stateAttributes = d.state.Attributes - readers["state"] = &MapFieldReader{ - Schema: d.schema, - Map: BasicMapReader(stateAttributes), - } - } - if d.config != nil { - readers["config"] = &ConfigFieldReader{ - Schema: d.schema, - Config: d.config, - } - } - if d.diff != nil { - readers["diff"] = &DiffFieldReader{ - Schema: d.schema, - Diff: d.diff, - Source: &MultiLevelFieldReader{ - Levels: []string{"state", "config"}, - Readers: readers, - }, - } - } - readers["set"] = &MapFieldReader{ - Schema: d.schema, - Map: BasicMapReader(d.setWriter.Map()), - } - d.multiReader = &MultiLevelFieldReader{ - Levels: []string{ - "state", - "config", - "diff", - "set", - }, - - Readers: readers, - } -} - -func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool, bool) { - // Get the change between the state and the config. - o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) - if !o.Exists { - o.Value = nil - } - if !n.Exists { - n.Value = nil - } - - // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false -} - -func (d *ResourceData) getChange( - k string, - oldLevel getSource, - newLevel getSource) (getResult, getResult) { - var parts, parts2 []string - if k != "" { - parts = strings.Split(k, ".") - parts2 = strings.Split(k, ".") - } - - o := d.get(parts, oldLevel) - n := d.get(parts2, newLevel) - return o, n -} - -func (d *ResourceData) get(addr []string, source getSource) getResult { - d.once.Do(d.init) - - level := "set" - flags := source & ^getSourceLevelMask - exact := flags&getSourceExact != 0 - source = source & getSourceLevelMask - if source >= getSourceSet { - level = "set" - } else if source >= getSourceDiff { - level = "diff" - } else if source >= getSourceConfig { - level = "config" - } else { - level = "state" - } - - var result FieldReadResult - var err error - if exact { - result, err = d.multiReader.ReadFieldExact(addr, level) - } else { - result, err = d.multiReader.ReadFieldMerge(addr, level) - } - if err != nil { - panic(err) - } - - // If the result doesn't exist, then we set the value to the zero value - var schema *Schema - if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { - schema = schemaL[len(schemaL)-1] - } - - if result.Value == nil && schema != nil { - result.Value = result.ValueOrZero(schema) - } - - // Transform the FieldReadResult into a getResult. It might be worth - // merging these two structures one day. - return getResult{ - Value: result.Value, - ValueProcessed: result.ValueProcessed, - Computed: result.Computed, - Exists: result.Exists, - Schema: schema, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go deleted file mode 100644 index 8bfb079be6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go +++ /dev/null @@ -1,17 +0,0 @@ -package schema - -//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go - -// getSource represents the level we want to get for a value (internally). -// Any source less than or equal to the level will be loaded (whichever -// has a value first). -type getSource byte - -const ( - getSourceState getSource = 1 << iota - getSourceConfig - getSourceDiff - getSourceSet - getSourceExact // Only get from the _exact_ level - getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go deleted file mode 100644 index 5dada3caf3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go +++ /dev/null @@ -1,52 +0,0 @@ -package schema - -// ResourceImporter defines how a resource is imported in Terraform. This -// can be set onto a Resource struct to make it Importable. Not all resources -// have to be importable; if a Resource doesn't have a ResourceImporter then -// it won't be importable. -// -// "Importing" in Terraform is the process of taking an already-created -// resource and bringing it under Terraform management. This can include -// updating Terraform state, generating Terraform configuration, etc. -type ResourceImporter struct { - // The functions below must all be implemented for importing to work. - - // State is called to convert an ID to one or more InstanceState to - // insert into the Terraform state. If this isn't specified, then - // the ID is passed straight through. - State StateFunc -} - -// StateFunc is the function called to import a resource into the -// Terraform state. It is given a ResourceData with only ID set. This -// ID is going to be an arbitrary value given by the user and may not map -// directly to the ID format that the resource expects, so that should -// be validated. -// -// This should return a slice of ResourceData that turn into the state -// that was imported. This might be as simple as returning only the argument -// that was given to the function. In other cases (such as AWS security groups), -// an import may fan out to multiple resources and this will have to return -// multiple. -// -// To create the ResourceData structures for other resource types (if -// you have to), instantiate your resource and call the Data function. -type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) - -// InternalValidate should be called to validate the structure of this -// importer. This should be called in a unit test. -// -// Resource.InternalValidate() will automatically call this, so this doesn't -// need to be called manually. Further, Resource.InternalValidate() is -// automatically called by Provider.InternalValidate(), so you only need -// to internal validate the provider. -func (r *ResourceImporter) InternalValidate() error { - return nil -} - -// ImportStatePassthrough is an implementation of StateFunc that can be -// used to simply pass the ID directly through. This should be used only -// in the case that an ID-only refresh is possible. -func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { - return []*ResourceData{d}, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go deleted file mode 100644 index df0172fa10..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go +++ /dev/null @@ -1,2037 +0,0 @@ -// schema is a high-level framework for easily writing new providers -// for Terraform. Usage of schema is recommended over attempting to write -// to the low-level plugin interfaces manually. -// -// schema breaks down provider creation into simple CRUD operations for -// resources. The logic of diffing, destroying before creating, updating -// or creating, etc. is all handled by the framework. The plugin author -// only needs to implement a configuration schema and the CRUD operations and -// everything else is meant to just work. -// -// A good starting point is to view the Provider structure. -package schema - -import ( - "context" - "fmt" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/mapstructure" -) - -// Name of ENV variable which (if not empty) prefers panic over error -const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" - -// type used for schema package context keys -type contextKey string - -var ( - protoVersionMu sync.Mutex - protoVersion5 = false -) - -func isProto5() bool { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - return protoVersion5 - -} - -// SetProto5 enables a feature flag for any internal changes required required -// to work with the new plugin protocol. This should not be called by -// provider. -func SetProto5() { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - protoVersion5 = true -} - -// Schema is used to describe the structure of a value. -// -// Read the documentation of the struct elements for important details. -type Schema struct { - // Type is the type of the value and must be one of the ValueType values. - // - // This type not only determines what type is expected/valid in configuring - // this value, but also what type is returned when ResourceData.Get is - // called. The types returned by Get are: - // - // TypeBool - bool - // TypeInt - int - // TypeFloat - float64 - // TypeString - string - // TypeList - []interface{} - // TypeMap - map[string]interface{} - // TypeSet - *schema.Set - // - Type ValueType - - // ConfigMode allows for overriding the default behaviors for mapping - // schema entries onto configuration constructs. - // - // By default, the Elem field is used to choose whether a particular - // schema is represented in configuration as an attribute or as a nested - // block; if Elem is a *schema.Resource then it's a block and it's an - // attribute otherwise. - // - // If Elem is *schema.Resource then setting ConfigMode to - // SchemaConfigModeAttr will force it to be represented in configuration - // as an attribute, which means that the Computed flag can be used to - // provide default elements when the argument isn't set at all, while still - // allowing the user to force zero elements by explicitly assigning an - // empty list. - // - // When Computed is set without Optional, the attribute is not settable - // in configuration at all and so SchemaConfigModeAttr is the automatic - // behavior, and SchemaConfigModeBlock is not permitted. - ConfigMode SchemaConfigMode - - // If one of these is set, then this item can come from the configuration. - // Both cannot be set. If Optional is set, the value is optional. If - // Required is set, the value is required. - // - // One of these must be set if the value is not computed. That is: - // value either comes from the config, is computed, or is both. - Optional bool - Required bool - - // If this is non-nil, the provided function will be used during diff - // of this field. If this is nil, a default diff for the type of the - // schema will be used. - // - // This allows comparison based on something other than primitive, list - // or map equality - for example SSH public keys may be considered - // equivalent regardless of trailing whitespace. - DiffSuppressFunc SchemaDiffSuppressFunc - - // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration. - // - // DefaultFunc can be specified to compute a dynamic default. - // Only one of Default or DefaultFunc can be set. If DefaultFunc is - // used then its return value should be stable to avoid generating - // confusing/perpetual diffs. - // - // Changing either Default or the return value of DefaultFunc can be - // a breaking change, especially if the attribute in question has - // ForceNew set. If a default needs to change to align with changing - // assumptions in an upstream API then it may be necessary to also use - // the MigrateState function on the resource to change the state to match, - // or have the Read function adjust the state value to align with the - // new default. - // - // If Required is true above, then Default cannot be set. DefaultFunc - // can be set with Required. If the DefaultFunc returns nil, then there - // will be no default and the user will be asked to fill it in. - // - // If either of these is set, then the user won't be asked for input - // for this key if the default is not nil. - Default interface{} - DefaultFunc SchemaDefaultFunc - - // Description is used as the description for docs, the language server and - // other user facing usage. It can be plain-text or markdown depending on the - // global DescriptionKind setting. - Description string - - // InputDefault is the default value to use for when inputs are requested. - // This differs from Default in that if Default is set, no input is - // asked for. If Input is asked, this will be the default value offered. - InputDefault string - - // The fields below relate to diffs. - // - // If Computed is true, then the result of this value is computed - // (unless specified by config) on creation. - // - // If ForceNew is true, then a change in this resource necessitates - // the creation of a new resource. - // - // StateFunc is a function called to change the value of this before - // storing it in the state (and likewise before comparing for diffs). - // The use for this is for example with large strings, you may want - // to simply store the hash of it. - Computed bool - ForceNew bool - StateFunc SchemaStateFunc - - // The following fields are only set for a TypeList, TypeSet, or TypeMap. - // - // Elem represents the element type. For a TypeMap, it must be a *Schema - // with a Type that is one of the primitives: TypeString, TypeBool, - // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a - // *Resource. If it is *Schema, the element type is just a simple value. - // If it is *Resource, the element type is a complex structure, - // potentially managed via its own CRUD actions on the API. - Elem interface{} - - // The following fields are only set for a TypeList or TypeSet. - // - // MaxItems defines a maximum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however more than one instance would - // cause instability. - // - // MinItems defines a minimum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however less than one instance would - // cause instability. - // - // If the field Optional is set to true then MinItems is ignored and thus - // effectively zero. - MaxItems int - MinItems int - - // PromoteSingle originally allowed for a single element to be assigned - // where a primitive list was expected, but this no longer works from - // Terraform v0.12 onwards (Terraform Core will require a list to be set - // regardless of what this is set to) and so only applies to Terraform v0.11 - // and earlier, and so should be used only to retain this functionality - // for those still using v0.11 with a provider that formerly used this. - PromoteSingle bool - - // The following fields are only valid for a TypeSet type. - // - // Set defines a function to determine the unique ID of an item so that - // a proper set can be built. - Set SchemaSetFunc - - // ComputedWhen is a set of queries on the configuration. Whenever any - // of these things is changed, it will require a recompute (this requires - // that Computed is set to true). - // - // NOTE: This currently does not work. - ComputedWhen []string - - // ConflictsWith is a set of schema keys that conflict with this schema. - // This will only check that they're set in the _config_. This will not - // raise an error for a malfunctioning resource that sets a conflicting - // key. - // - // ExactlyOneOf is a set of schema keys that, when set, only one of the - // keys in that list can be specified. It will error if none are - // specified as well. - // - // AtLeastOneOf is a set of schema keys that, when set, at least one of - // the keys in that list must be specified. - // - // RequiredWith is a set of schema keys that must be set simultaneously. - ConflictsWith []string - ExactlyOneOf []string - AtLeastOneOf []string - RequiredWith []string - - // When Deprecated is set, this attribute is deprecated. - // - // A deprecated field still works, but will probably stop working in near - // future. This string is the message shown to the user with instructions on - // how to address the deprecation. - Deprecated string - - // When Removed is set, this attribute has been removed from the schema - // - // Deprecated: This field will be removed in version 2 without replacement - // as the functionality is not necessary. - // - // Removed attributes can be left in the Schema to generate informative error - // messages for the user when they show up in resource configurations. - // This string is the message shown to the user with instructions on - // what do to about the removed attribute. - Removed string - - // ValidateFunc allows individual fields to define arbitrary validation - // logic. It is yielded the provided config value as an interface{} that is - // guaranteed to be of the proper Schema type, and it can yield warnings or - // errors based on inspection of that value. - // - // ValidateFunc is honored only when the schema's Type is set to TypeInt, - // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. - ValidateFunc SchemaValidateFunc - - // Sensitive ensures that the attribute's value does not get displayed in - // logs or regular output. It should be used for passwords or other - // secret fields. Future versions of Terraform may encrypt these - // values. - Sensitive bool -} - -// SchemaConfigMode is used to influence how a schema item is mapped into a -// corresponding configuration construct, using the ConfigMode field of -// Schema. -type SchemaConfigMode int - -const ( - SchemaConfigModeAuto SchemaConfigMode = iota - SchemaConfigModeAttr - SchemaConfigModeBlock -) - -// SchemaDiffSuppressFunc is a function which can be used to determine -// whether a detected diff on a schema element is "valid" or not, and -// suppress it from the plan if necessary. -// -// Return true if the diff should be suppressed, false to retain it. -type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool - -// SchemaDefaultFunc is a function called to return a default value for -// a field. -type SchemaDefaultFunc func() (interface{}, error) - -// EnvDefaultFunc is a helper function that returns the value of the -// given environment variable, if one exists, or the default value -// otherwise. -func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - return v, nil - } - - return dv, nil - } -} - -// MultiEnvDefaultFunc is a helper function that returns the value of the first -// environment variable in the given list that returns a non-empty value. If -// none of the environment variables return a value, the default value is -// returned. -func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v, nil - } - } - return dv, nil - } -} - -// SchemaSetFunc is a function that must return a unique ID for the given -// element. This unique ID is used to store the element in a hash. -type SchemaSetFunc func(interface{}) int - -// SchemaStateFunc is a function used to convert some type to a string -// to be stored in the state. -type SchemaStateFunc func(interface{}) string - -// SchemaValidateFunc is a function used to validate a single field in the -// schema. -type SchemaValidateFunc func(interface{}, string) ([]string, []error) - -func (s *Schema) GoString() string { - return fmt.Sprintf("*%#v", *s) -} - -// Returns a default value for this schema by either reading Default or -// evaluating DefaultFunc. If neither of these are defined, returns nil. -func (s *Schema) DefaultValue() (interface{}, error) { - if s.Default != nil { - return s.Default, nil - } - - if s.DefaultFunc != nil { - defaultValue, err := s.DefaultFunc() - if err != nil { - return nil, fmt.Errorf("error loading default: %s", err) - } - return defaultValue, nil - } - - return nil, nil -} - -// Returns a zero value for the schema. -func (s *Schema) ZeroValue() interface{} { - // If it's a set then we'll do a bit of extra work to provide the - // right hashing function in our empty value. - if s.Type == TypeSet { - setFunc := s.Set - if setFunc == nil { - // Default set function uses the schema to hash the whole value - elem := s.Elem - switch t := elem.(type) { - case *Schema: - setFunc = HashSchema(t) - case *Resource: - setFunc = HashResource(t) - default: - panic("invalid set element type") - } - } - return &Set{F: setFunc} - } else { - return s.Type.Zero() - } -} - -func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { - if d == nil { - return d - } - - if s.Type == TypeBool { - normalizeBoolString := func(s string) string { - switch s { - case "0": - return "false" - case "1": - return "true" - } - return s - } - d.Old = normalizeBoolString(d.Old) - d.New = normalizeBoolString(d.New) - } - - if s.Computed && !d.NewRemoved && d.New == "" { - // Computed attribute without a new value set - d.NewComputed = true - } - - if s.ForceNew { - // ForceNew, mark that this field is requiring new under the - // following conditions, explained below: - // - // * Old != New - There is a change in value. This field - // is therefore causing a new resource. - // - // * NewComputed - This field is being computed, hence a - // potential change in value, mark as causing a new resource. - d.RequiresNew = d.Old != d.New || d.NewComputed - } - - if d.NewRemoved { - return d - } - - if s.Computed { - // FIXME: This is where the customized bool from getChange finally - // comes into play. It allows the previously incorrect behavior - // of an empty string being used as "unset" when the value is - // computed. This should be removed once we can properly - // represent an unset/nil value from the configuration. - if !customized { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil - } - } - - if d.New == "" && !d.NewComputed { - // Computed attribute without a new value set - d.NewComputed = true - } - } - - if s.Sensitive { - // Set the Sensitive flag so output is hidden in the UI - d.Sensitive = true - } - - return d -} - -// InternalMap is used to aid in the transition to the new schema types and -// protocol. The name is not meant to convey any usefulness, as this is not to -// be used directly by any providers. -type InternalMap = schemaMap - -// schemaMap is a wrapper that adds nice functions on top of schemas. -type schemaMap map[string]*Schema - -func (m schemaMap) panicOnError() bool { - if os.Getenv(PanicOnErr) != "" { - return true - } - return false -} - -// Data returns a ResourceData for the given schema, state, and diff. -// -// The diff is optional. -func (m schemaMap) Data( - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*ResourceData, error) { - return &ResourceData{ - schema: m, - state: s, - diff: d, - panicOnError: m.panicOnError(), - }, nil -} - -// DeepCopy returns a copy of this schemaMap. The copy can be safely modified -// without affecting the original. -func (m *schemaMap) DeepCopy() schemaMap { - copy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - return *copy.(*schemaMap) -} - -// Diff returns the diff for a resource given the schema map, -// state, and configuration. -func (m schemaMap) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - customizeDiff CustomizeDiffFunc, - meta interface{}, - handleRequiresNew bool) (*terraform.InstanceDiff, error) { - result := new(terraform.InstanceDiff) - result.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Make sure to mark if the resource is tainted - if s != nil { - result.DestroyTainted = s.Tainted - } - - d := &ResourceData{ - schema: m, - state: s, - config: c, - panicOnError: m.panicOnError(), - } - - for k, schema := range m { - err := m.diff(k, schema, result, d, false) - if err != nil { - return nil, err - } - } - - // Remove any nil diffs just to keep things clean - for k, v := range result.Attributes { - if v == nil { - delete(result.Attributes, k) - } - } - - // If this is a non-destroy diff, call any custom diff logic that has been - // defined. - if !result.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, s, result) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result, rd, false) - if err != nil { - return nil, err - } - } - } - - if handleRequiresNew { - // If the diff requires a new resource, then we recompute the diff - // so we have the complete new resource diff, and preserve the - // RequiresNew fields where necessary so the user knows exactly what - // caused that. - if result.RequiresNew() { - // Create the new diff - result2 := new(terraform.InstanceDiff) - result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Preserve the DestroyTainted flag - result2.DestroyTainted = result.DestroyTainted - - // Reset the data to not contain state. We have to call init() - // again in order to reset the FieldReaders. - d.state = nil - d.init() - - // Perform the diff again - for k, schema := range m { - err := m.diff(k, schema, result2, d, false) - if err != nil { - return nil, err - } - } - - // Re-run customization - if !result2.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, d.state, result2) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result2, rd, false) - if err != nil { - return nil, err - } - } - } - - // Force all the fields to not force a new since we know what we - // want to force new. - for k, attr := range result2.Attributes { - if attr == nil { - continue - } - - if attr.RequiresNew { - attr.RequiresNew = false - } - - if s != nil { - attr.Old = s.Attributes[k] - } - } - - // Now copy in all the requires new diffs... - for k, attr := range result.Attributes { - if attr == nil { - continue - } - - newAttr, ok := result2.Attributes[k] - if !ok { - newAttr = attr - } - - if attr.RequiresNew { - newAttr.RequiresNew = true - } - - result2.Attributes[k] = newAttr - } - - // And set the diff! - result = result2 - } - - } - - // Go through and detect all of the ComputedWhens now that we've - // finished the diff. - // TODO - - if result.Empty() { - // If we don't have any diff elements, just return nil - return nil, nil - } - - return result, nil -} - -// Input implements the terraform.ResourceProvider method by asking -// for input for required configuration keys that don't have a value. -func (m schemaMap) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := m[k] - - // Skip things that don't require config, if that is even valid - // for a provider schema. - // Required XOR Optional must always be true to validate, so we only - // need to check one. - if v.Optional { - continue - } - - // Deprecated fields should never prompt - if v.Deprecated != "" { - continue - } - - // Skip things that have a value of some sort already - if _, ok := c.Raw[k]; ok { - continue - } - - // Skip if it has a default value - defaultValue, err := v.DefaultValue() - if err != nil { - return nil, fmt.Errorf("%s: error loading default: %s", k, err) - } - if defaultValue != nil { - continue - } - - var value interface{} - switch v.Type { - case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: - continue - case TypeString: - value, err = m.inputString(input, k, v) - default: - panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) - } - - if err != nil { - return nil, fmt.Errorf( - "%s: %s", k, err) - } - - c.Config[k] = value - } - - return c, nil -} - -// Validate validates the configuration against this schema mapping. -func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return m.validateObject("", m, c) -} - -// InternalValidate validates the format of this schema. This should be called -// from a unit test (and not in user-path code) to verify that a schema -// is properly built. -func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { - return m.internalValidate(topSchemaMap, false) -} - -func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { - if topSchemaMap == nil { - topSchemaMap = m - } - for k, v := range m { - if v.Type == TypeInvalid { - return fmt.Errorf("%s: Type must be specified", k) - } - - if v.Optional && v.Required { - return fmt.Errorf("%s: Optional or Required must be set, not both", k) - } - - if v.Required && v.Computed { - return fmt.Errorf("%s: Cannot be both Required and Computed", k) - } - - if !v.Required && !v.Optional && !v.Computed { - return fmt.Errorf("%s: One of optional, required, or computed must be set", k) - } - - computedOnly := v.Computed && !v.Optional - - switch v.ConfigMode { - case SchemaConfigModeBlock: - if _, ok := v.Elem.(*Resource); !ok { - return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) - } - if attrsOnly { - return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) - } - if computedOnly { - return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) - } - case SchemaConfigModeAttr: - // anything goes - case SchemaConfigModeAuto: - // Since "Auto" for Elem: *Resource would create a nested block, - // and that's impossible inside an attribute, we require it to be - // explicitly overridden as mode "Attr" for clarity. - if _, ok := v.Elem.(*Resource); ok { - if attrsOnly { - return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) - } - } - default: - return fmt.Errorf("%s: invalid ConfigMode value", k) - } - - if v.Computed && v.Default != nil { - return fmt.Errorf("%s: Default must be nil if computed", k) - } - - if v.Required && v.Default != nil { - return fmt.Errorf("%s: Default cannot be set with Required", k) - } - - if len(v.ComputedWhen) > 0 && !v.Computed { - return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) - } - - if len(v.ConflictsWith) > 0 && v.Required { - return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) - } - - if len(v.ExactlyOneOf) > 0 && v.Required { - return fmt.Errorf("%s: ExactlyOneOf cannot be set with Required", k) - } - - if len(v.AtLeastOneOf) > 0 && v.Required { - return fmt.Errorf("%s: AtLeastOneOf cannot be set with Required", k) - } - - if len(v.ConflictsWith) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap, v, false) - if err != nil { - return fmt.Errorf("ConflictsWith: %+v", err) - } - } - - if len(v.RequiredWith) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.RequiredWith, topSchemaMap, v, true) - if err != nil { - return fmt.Errorf("RequiredWith: %+v", err) - } - } - - if len(v.ExactlyOneOf) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap, v, true) - if err != nil { - return fmt.Errorf("ExactlyOneOf: %+v", err) - } - } - - if len(v.AtLeastOneOf) > 0 { - err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap, v, true) - if err != nil { - return fmt.Errorf("AtLeastOneOf: %+v", err) - } - } - - if v.Type == TypeList || v.Type == TypeSet { - if v.Elem == nil { - return fmt.Errorf("%s: Elem must be set for lists", k) - } - - if v.Default != nil { - return fmt.Errorf("%s: Default is not valid for lists or sets", k) - } - - if v.Type != TypeSet && v.Set != nil { - return fmt.Errorf("%s: Set can only be set for TypeSet", k) - } - - switch t := v.Elem.(type) { - case *Resource: - attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr - - if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { - return err - } - case *Schema: - bad := t.Computed || t.Optional || t.Required - if bad { - return fmt.Errorf( - "%s: Elem must have only Type set", k) - } - } - } else { - if v.MaxItems > 0 || v.MinItems > 0 { - return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) - } - } - - // Computed-only field - if v.Computed && !v.Optional { - if v.ValidateFunc != nil { - return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ - "there's nothing to validate on computed-only field", k) - } - if v.DiffSuppressFunc != nil { - return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ - " between config and state representation. "+ - "There is no config for computed-only field, nothing to compare.", k) - } - } - - if v.ValidateFunc != nil { - switch v.Type { - case TypeList, TypeSet: - return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) - } - } - - if v.Deprecated == "" && v.Removed == "" { - if !isValidFieldName(k) { - return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) - } - } - } - - return nil -} - -func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap, self *Schema, allowSelfReference bool) error { - for _, key := range keys { - parts := strings.Split(key, ".") - sm := topSchemaMap - var target *Schema - for idx, part := range parts { - // Skip index fields if 0 - partInt, err := strconv.Atoi(part) - - if err == nil { - if partInt != 0 { - return fmt.Errorf("%s configuration block reference (%s) can only use the .0. index for TypeList and MaxItems: 1 configuration blocks", k, key) - } - - continue - } - - var ok bool - if target, ok = sm[part]; !ok { - return fmt.Errorf("%s references unknown attribute (%s) at part (%s)", k, key, part) - } - - subResource, ok := target.Elem.(*Resource) - - if !ok { - continue - } - - // Skip Type/MaxItems check if not the last element - if (target.Type == TypeSet || target.MaxItems != 1) && idx+1 != len(parts) { - return fmt.Errorf("%s configuration block reference (%s) can only be used with TypeList and MaxItems: 1 configuration blocks", k, key) - } - - sm = schemaMap(subResource.Schema) - } - - if target == nil { - return fmt.Errorf("%s cannot find target attribute (%s), sm: %#v", k, key, sm) - } - - if target == self && !allowSelfReference { - return fmt.Errorf("%s cannot reference self (%s)", k, key) - } - - if target.Required { - return fmt.Errorf("%s cannot contain Required attribute (%s)", k, key) - } - - if len(target.ComputedWhen) > 0 { - return fmt.Errorf("%s cannot contain Computed(When) attribute (%s)", k, key) - } - } - - return nil -} - -func isValidFieldName(name string) bool { - re := regexp.MustCompile("^[a-z0-9_]+$") - return re.MatchString(name) -} - -// resourceDiffer is an interface that is used by the private diff functions. -// This helps facilitate diff logic for both ResourceData and ResoureDiff with -// minimal divergence in code. -type resourceDiffer interface { - diffChange(string) (interface{}, interface{}, bool, bool, bool) - Get(string) interface{} - GetChange(string) (interface{}, interface{}) - GetOk(string) (interface{}, bool) - HasChange(string) bool - Id() string -} - -func (m schemaMap) diff( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - - unsupressedDiff := new(terraform.InstanceDiff) - unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - var err error - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - err = m.diffString(k, schema, unsupressedDiff, d, all) - case TypeList: - err = m.diffList(k, schema, unsupressedDiff, d, all) - case TypeMap: - err = m.diffMap(k, schema, unsupressedDiff, d, all) - case TypeSet: - err = m.diffSet(k, schema, unsupressedDiff, d, all) - default: - err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) - } - - for attrK, attrV := range unsupressedDiff.Attributes { - switch rd := d.(type) { - case *ResourceData: - if schema.DiffSuppressFunc != nil && attrV != nil && - schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { - // If this attr diff is suppressed, we may still need it in the - // overall diff if it's contained within a set. Rather than - // dropping the diff, make it a NOOP. - if !all { - continue - } - - attrV = &terraform.ResourceAttrDiff{ - Old: attrV.Old, - New: attrV.Old, - } - } - } - diff.Attributes[attrK] = attrV - } - - return err -} - -func (m schemaMap) diffList( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - o, n, _, computedList, customized := d.diffChange(k) - if computedList { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedList && schema.Computed { - return nil - } - - if o == nil { - o = []interface{}{} - } - if n == nil { - n = []interface{}{} - } - if s, ok := o.(*Set); ok { - o = s.List() - } - if s, ok := n.(*Set); ok { - n = s.List() - } - os := o.([]interface{}) - vs := n.([]interface{}) - - // If the new value was set, and the two are equal, then we're done. - // We have to do this check here because sets might be NOT - // reflect.DeepEqual so we need to wait until we get the []interface{} - if !all && nSet && reflect.DeepEqual(os, vs) { - return nil - } - - // Get the counts - oldLen := len(os) - newLen := len(vs) - oldStr := strconv.FormatInt(int64(oldLen), 10) - - // If the whole list is computed, then say that the # is computed - if computedList { - diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ - Old: oldStr, - NewComputed: true, - RequiresNew: schema.ForceNew, - } - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - computed := oldLen == 0 && newLen == 0 && schema.Computed - if changed || computed || all { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - newStr := "" - if !computed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // Figure out the maximum - maxLen := oldLen - if newLen > maxLen { - maxLen = newLen - } - - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for i := 0; i < maxLen; i++ { - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%d.%s", k, i, k2) - err := m.diff(subK, schema, diff, d, all) - if err != nil { - return err - } - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeList). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - for i := 0; i < maxLen; i++ { - subK := fmt.Sprintf("%s.%d", k, i) - err := m.diff(subK, &t2, diff, d, all) - if err != nil { - return err - } - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - - return nil -} - -func (m schemaMap) diffMap( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - prefix := k + "." - - // First get all the values from the state - var stateMap, configMap map[string]string - o, n, _, nComputed, customized := d.diffChange(k) - if err := mapstructure.WeakDecode(o, &stateMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(n, &configMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - // Keep track of whether the state _exists_ at all prior to clearing it - stateExists := o != nil - - // Delete any count values, since we don't use those - delete(configMap, "%") - delete(stateMap, "%") - - // Check if the number of elements has changed. - oldLen, newLen := len(stateMap), len(configMap) - changed := oldLen != newLen - if oldLen != 0 && newLen == 0 && schema.Computed { - changed = false - } - - // It is computed if we have no old value, no new value, the schema - // says it is computed, and it didn't exist in the state before. The - // last point means: if it existed in the state, even empty, then it - // has already been computed. - computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists - - // If the count has changed or we're computed, then add a diff for the - // count. "nComputed" means that the new value _contains_ a value that - // is computed. We don't do granular diffs for this yet, so we mark the - // whole map as computed. - if changed || computed || nComputed { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed || nComputed, - ForceNew: schema.ForceNew, - } - - oldStr := strconv.FormatInt(int64(oldLen), 10) - newStr := "" - if !computed && !nComputed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".%"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // If the new map is nil and we're computed, then ignore it. - if n == nil && schema.Computed { - return nil - } - - // Now we compare, preferring values from the config map - for k, v := range configMap { - old, ok := stateMap[k] - delete(stateMap, k) - - if old == v && ok && !all { - continue - } - - diff.Attributes[prefix+k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: old, - New: v, - }, - customized, - ) - } - for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }, - customized, - ) - } - - return nil -} - -func (m schemaMap) diffSet( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - - o, n, _, computedSet, customized := d.diffChange(k) - if computedSet { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedSet && schema.Computed { - return nil - } - - if o == nil { - o = schema.ZeroValue().(*Set) - } - if n == nil { - n = schema.ZeroValue().(*Set) - } - os := o.(*Set) - ns := n.(*Set) - - // If the new value was set, compare the listCode's to determine if - // the two are equal. Comparing listCode's instead of the actual values - // is needed because there could be computed values in the set which - // would result in false positives while comparing. - if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { - return nil - } - - // Get the counts - oldLen := os.Len() - newLen := ns.Len() - oldStr := strconv.Itoa(oldLen) - newStr := strconv.Itoa(newLen) - - // Build a schema for our count - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - // If the set computed then say that the # is computed - if computedSet || schema.Computed && !nSet { - // If # already exists, equals 0 and no new set is supplied, there - // is nothing to record in the diff - count, ok := d.GetOk(k + ".#") - if ok && count.(int) == 0 && !nSet && !computedSet { - return nil - } - - // Set the count but make sure that if # does not exist, we don't - // use the zeroed value - countStr := strconv.Itoa(count.(int)) - if !ok { - countStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }, - customized, - ) - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // Build the list of codes that will make up our set. This is the - // removed codes as well as all the codes in the new codes. - codes := make([][]string, 2) - codes[0] = os.Difference(ns).listCode() - codes[1] = ns.listCode() - for _, list := range codes { - for _, code := range list { - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%s.%s", k, code, k2) - err := m.diff(subK, schema, diff, d, true) - if err != nil { - return err - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeSet). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - subK := fmt.Sprintf("%s.%s", k, code) - err := m.diff(subK, &t2, diff, d, true) - if err != nil { - return err - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - } - } - - return nil -} - -func (m schemaMap) diffString( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - var originalN interface{} - var os, ns string - o, n, _, computed, customized := d.diffChange(k) - if schema.StateFunc != nil && n != nil { - originalN = n - n = schema.StateFunc(n) - } - nraw := n - if nraw == nil && o != nil { - nraw = schema.Type.Zero() - } - if err := mapstructure.WeakDecode(o, &os); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(nraw, &ns); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - if os == ns && !all && !computed { - // They're the same value. If there old value is not blank or we - // have an ID, then return right away since we're already setup. - if os != "" || d.Id() != "" { - return nil - } - - // Otherwise, only continue if we're computed - if !schema.Computed { - return nil - } - } - - removed := false - if o != nil && n == nil && !computed { - removed = true - } - if removed && schema.Computed { - return nil - } - - diff.Attributes[k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }, - customized, - ) - - return nil -} - -func (m schemaMap) inputString( - input terraform.UIInput, - k string, - schema *Schema) (interface{}, error) { - result, err := input.Input(context.Background(), &terraform.InputOpts{ - Id: k, - Query: k, - Description: schema.Description, - Default: schema.InputDefault, - }) - - return result, err -} - -func (m schemaMap) validate( - k string, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, ok := c.Get(k) - if !ok && schema.DefaultFunc != nil { - // We have a dynamic default. Check if we have a value. - var err error - raw, err = schema.DefaultFunc() - if err != nil { - return nil, []error{fmt.Errorf( - "%q, error loading default: %s", k, err)} - } - - // We're okay as long as we had a value set - ok = raw != nil - } - - err := validateExactlyOneAttribute(k, schema, c) - if err != nil { - return nil, []error{err} - } - - err = validateAtLeastOneAttribute(k, schema, c) - if err != nil { - return nil, []error{err} - } - - if !ok { - if schema.Required { - return nil, []error{fmt.Errorf( - "%q: required field is not set", k)} - } - return nil, nil - } - - if !schema.Required && !schema.Optional { - // This is a computed-only field - return nil, []error{fmt.Errorf( - "%q: this field cannot be set", k)} - } - - err = validateRequiredWithAttribute(k, schema, c) - if err != nil { - return nil, []error{err} - } - - // If the value is unknown then we can't validate it yet. - // In particular, this avoids spurious type errors where downstream - // validation code sees UnknownVariableValue as being just a string. - // The SDK has to allow the unknown value through initially, so that - // Required fields set via an interpolated value are accepted. - if !isWhollyKnown(raw) { - if schema.Deprecated != "" { - return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil - } - return nil, nil - } - - err = validateConflictingAttributes(k, schema, c) - if err != nil { - return nil, []error{err} - } - - return m.validateType(k, raw, schema, c) -} - -// isWhollyKnown returns false if the argument contains an UnknownVariableValue -func isWhollyKnown(raw interface{}) bool { - switch raw := raw.(type) { - case string: - if raw == hcl2shim.UnknownVariableValue { - return false - } - case []interface{}: - for _, v := range raw { - if !isWhollyKnown(v) { - return false - } - } - case map[string]interface{}: - for _, v := range raw { - if !isWhollyKnown(v) { - return false - } - } - } - return true -} -func validateConflictingAttributes( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.ConflictsWith) == 0 { - return nil - } - - for _, conflictingKey := range schema.ConflictsWith { - if raw, ok := c.Get(conflictingKey); ok { - if raw == hcl2shim.UnknownVariableValue { - // An unknown value might become unset (null) once known, so - // we must defer validation until it's known. - continue - } - return fmt.Errorf( - "%q: conflicts with %s", k, conflictingKey) - } - } - - return nil -} - -func removeDuplicates(elements []string) []string { - encountered := make(map[string]struct{}, 0) - result := []string{} - - for v := range elements { - if _, ok := encountered[elements[v]]; !ok { - encountered[elements[v]] = struct{}{} - result = append(result, elements[v]) - } - } - - return result -} - -func validateRequiredWithAttribute( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.RequiredWith) == 0 { - return nil - } - - allKeys := removeDuplicates(append(schema.RequiredWith, k)) - sort.Strings(allKeys) - - for _, key := range allKeys { - if _, ok := c.Get(key); !ok { - return fmt.Errorf("%q: all of `%s` must be specified", k, strings.Join(allKeys, ",")) - } - } - - return nil -} - -func validateExactlyOneAttribute( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.ExactlyOneOf) == 0 { - return nil - } - - allKeys := removeDuplicates(append(schema.ExactlyOneOf, k)) - sort.Strings(allKeys) - specified := make([]string, 0) - unknownVariableValueCount := 0 - for _, exactlyOneOfKey := range allKeys { - if c.IsComputed(exactlyOneOfKey) { - unknownVariableValueCount++ - continue - } - - _, ok := c.Get(exactlyOneOfKey) - if ok { - specified = append(specified, exactlyOneOfKey) - } - } - - if len(specified) == 0 && unknownVariableValueCount == 0 { - return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) - } - - if len(specified) > 1 { - return fmt.Errorf("%q: only one of `%s` can be specified, but `%s` were specified.", k, strings.Join(allKeys, ","), strings.Join(specified, ",")) - } - - return nil -} - -func validateAtLeastOneAttribute( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.AtLeastOneOf) == 0 { - return nil - } - - allKeys := removeDuplicates(append(schema.AtLeastOneOf, k)) - sort.Strings(allKeys) - - for _, atLeastOneOfKey := range allKeys { - if _, ok := c.Get(atLeastOneOfKey); ok { - // We can ignore hcl2shim.UnknownVariable by assuming it's been set and additional validation elsewhere - // will uncover this if it is in fact null. - return nil - } - } - - return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) -} - -func (m schemaMap) validateList( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // first check if the list is wholly unknown - if s, ok := raw.(string); ok { - if s == hcl2shim.UnknownVariableValue { - return nil, nil - } - } - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - - // If we support promotion and the raw value isn't a slice, wrap - // it in []interface{} and check again. - if schema.PromoteSingle && rawV.Kind() != reflect.Slice { - raw = []interface{}{raw} - rawV = reflect.ValueOf(raw) - } - - if rawV.Kind() != reflect.Slice { - return nil, []error{fmt.Errorf( - "%s: should be a list", k)} - } - - // We can't validate list length if this came from a dynamic block. - // Since there's no way to determine if something was from a dynamic block - // at this point, we're going to skip validation in the new protocol if - // there are any unknowns. Validate will eventually be called again once - // all values are known. - if isProto5() && !isWhollyKnown(raw) { - return nil, nil - } - - // Validate length - if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} - } - - if schema.MinItems > 0 && rawV.Len() < schema.MinItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} - } - - // Now build the []interface{} - raws := make([]interface{}, rawV.Len()) - for i := range raws { - raws[i] = rawV.Index(i).Interface() - } - - var ws []string - var es []error - for i, raw := range raws { - key := fmt.Sprintf("%s.%d", k, i) - - // Reify the key value from the ResourceConfig. - // If the list was computed we have all raw values, but some of these - // may be known in the config, and aren't individually marked as Computed. - if r, ok := c.Get(key); ok { - raw = r - } - - var ws2 []string - var es2 []error - switch t := schema.Elem.(type) { - case *Resource: - // This is a sub-resource - ws2, es2 = m.validateObject(key, t.Schema, c) - case *Schema: - ws2, es2 = m.validateType(key, raw, t, c) - } - - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - return ws, es -} - -func (m schemaMap) validateMap( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // first check if the list is wholly unknown - if s, ok := raw.(string); ok { - if s == hcl2shim.UnknownVariableValue { - return nil, nil - } - } - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - switch rawV.Kind() { - case reflect.String: - // If raw and reified are equal, this is a string and should - // be rejected. - reified, reifiedOk := c.Get(k) - if reifiedOk && raw == reified && !c.IsComputed(k) { - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - // Otherwise it's likely raw is an interpolation. - return nil, nil - case reflect.Map: - case reflect.Slice: - default: - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - - // If it is not a slice, validate directly - if rawV.Kind() != reflect.Slice { - mapIface := rawV.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - if schema.ValidateFunc != nil { - return schema.ValidateFunc(mapIface, k) - } - return nil, nil - } - - // It is a slice, verify that all the elements are maps - raws := make([]interface{}, rawV.Len()) - for i := range raws { - raws[i] = rawV.Index(i).Interface() - } - - for _, raw := range raws { - v := reflect.ValueOf(raw) - if v.Kind() != reflect.Map { - return nil, []error{fmt.Errorf( - "%s: should be a map", k)} - } - mapIface := v.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - } - - if schema.ValidateFunc != nil { - validatableMap := make(map[string]interface{}) - for _, raw := range raws { - for k, v := range raw.(map[string]interface{}) { - validatableMap[k] = v - } - } - - return schema.ValidateFunc(validatableMap, k) - } - - return nil, nil -} - -func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { - for key, raw := range m { - valueType, err := getValueType(k, schema) - if err != nil { - return nil, []error{err} - } - - switch valueType { - case TypeBool: - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeInt: - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeFloat: - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeString: - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - } - return nil, nil -} - -func getValueType(k string, schema *Schema) (ValueType, error) { - if schema.Elem == nil { - return TypeString, nil - } - if vt, ok := schema.Elem.(ValueType); ok { - return vt, nil - } - - // If a Schema is provided to a Map, we use the Type of that schema - // as the type for each element in the Map. - if s, ok := schema.Elem.(*Schema); ok { - return s.Type, nil - } - - if _, ok := schema.Elem.(*Resource); ok { - // TODO: We don't actually support this (yet) - // but silently pass the validation, until we decide - // how to handle nested structures in maps - return TypeString, nil - } - return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) -} - -func (m schemaMap) validateObject( - k string, - schema map[string]*Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, _ := c.Get(k) - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - - if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { - return nil, []error{fmt.Errorf( - "%s: expected object, got %s", - k, reflect.ValueOf(raw).Kind())} - } - - var ws []string - var es []error - for subK, s := range schema { - key := subK - if k != "" { - key = fmt.Sprintf("%s.%s", k, subK) - } - - ws2, es2 := m.validate(key, s, c) - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - // Detect any extra/unknown keys and report those as errors. - if m, ok := raw.(map[string]interface{}); ok { - for subk := range m { - if _, ok := schema[subk]; !ok { - if subk == TimeoutsConfigKey { - continue - } - es = append(es, fmt.Errorf( - "%s: invalid or unknown key: %s", k, subk)) - } - } - } - - return ws, es -} - -func (m schemaMap) validatePrimitive( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - - // a nil value shouldn't happen in the old protocol, and in the new - // protocol the types have already been validated. Either way, we can't - // reflect on nil, so don't panic. - if raw == nil { - return nil, nil - } - - // Catch if the user gave a complex type where a primitive was - // expected, so we can return a friendly error message that - // doesn't contain Go type system terminology. - switch reflect.ValueOf(raw).Type().Kind() { - case reflect.Slice: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a list", k), - } - case reflect.Map: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a map", k), - } - default: // ok - } - - if c.IsComputed(k) { - // If the key is being computed, then it is not an error as - // long as it's not a slice or map. - return nil, nil - } - - var decoded interface{} - switch schema.Type { - case TypeBool: - // Verify that we can parse this as the correct type - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeInt: - switch { - case isProto5(): - // We need to verify the type precisely, because WeakDecode will - // decode a float as an integer. - - // the config shims only use int for integral number values - if v, ok := raw.(int); ok { - decoded = v - } else { - return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} - } - default: - // Verify that we can parse this as an int - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - } - case TypeFloat: - // Verify that we can parse this as an int - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeString: - // Verify that we can parse this as a string - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - - if schema.ValidateFunc != nil { - return schema.ValidateFunc(decoded, k) - } - - return nil, nil -} - -func (m schemaMap) validateType( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - var ws []string - var es []error - switch schema.Type { - case TypeSet, TypeList: - ws, es = m.validateList(k, raw, schema, c) - case TypeMap: - ws, es = m.validateMap(k, raw, schema, c) - default: - ws, es = m.validatePrimitive(k, raw, schema, c) - } - - if schema.Deprecated != "" { - ws = append(ws, fmt.Sprintf( - "%q: [DEPRECATED] %s", k, schema.Deprecated)) - } - - if schema.Removed != "" { - es = append(es, fmt.Errorf( - "%q: [REMOVED] %s", k, schema.Removed)) - } - - return ws, es -} - -// Zero returns the zero value for a type. -func (t ValueType) Zero() interface{} { - switch t { - case TypeInvalid: - return nil - case TypeBool: - return false - case TypeInt: - return 0 - case TypeFloat: - return 0.0 - case TypeString: - return "" - case TypeList: - return []interface{}{} - case TypeMap: - return map[string]interface{}{} - case TypeSet: - return new(Set) - case typeObject: - return map[string]interface{}{} - default: - panic(fmt.Sprintf("unknown type %s", t)) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go deleted file mode 100644 index 945b0b7ed3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go +++ /dev/null @@ -1,127 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - "sort" - "strconv" -) - -func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { - if val == nil { - buf.WriteRune(';') - return - } - - switch schema.Type { - case TypeBool: - if val.(bool) { - buf.WriteRune('1') - } else { - buf.WriteRune('0') - } - case TypeInt: - buf.WriteString(strconv.Itoa(val.(int))) - case TypeFloat: - buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) - case TypeString: - buf.WriteString(val.(string)) - case TypeList: - buf.WriteRune('(') - l := val.([]interface{}) - for _, innerVal := range l { - serializeCollectionMemberForHash(buf, innerVal, schema.Elem) - } - buf.WriteRune(')') - case TypeMap: - - m := val.(map[string]interface{}) - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - buf.WriteRune('[') - for _, k := range keys { - innerVal := m[k] - if innerVal == nil { - continue - } - buf.WriteString(k) - buf.WriteRune(':') - - switch innerVal := innerVal.(type) { - case bool: - buf.WriteString(strconv.FormatBool(innerVal)) - case int: - buf.WriteString(strconv.Itoa(innerVal)) - case float64: - buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) - case string: - buf.WriteString(innerVal) - default: - panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) - } - - buf.WriteRune(';') - } - buf.WriteRune(']') - case TypeSet: - buf.WriteRune('{') - s := val.(*Set) - for _, innerVal := range s.List() { - serializeCollectionMemberForHash(buf, innerVal, schema.Elem) - } - buf.WriteRune('}') - default: - panic("unknown schema type to serialize") - } - buf.WriteRune(';') -} - -// SerializeValueForHash appends a serialization of the given resource config -// to the given buffer, guaranteeing deterministic results given the same value -// and schema. -// -// Its primary purpose is as input into a hashing function in order -// to hash complex substructures when used in sets, and so the serialization -// is not reversible. -func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { - if val == nil { - return - } - sm := resource.Schema - m := val.(map[string]interface{}) - var keys []string - for k := range sm { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - innerSchema := sm[k] - // Skip attributes that are not user-provided. Computed attributes - // do not contribute to the hash since their ultimate value cannot - // be known at plan/diff time. - if !(innerSchema.Required || innerSchema.Optional) { - continue - } - - buf.WriteString(k) - buf.WriteRune(':') - innerVal := m[k] - SerializeValueForHash(buf, innerVal, innerSchema) - } -} - -func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { - switch tElem := elem.(type) { - case *Schema: - SerializeValueForHash(buf, val, tElem) - case *Resource: - buf.WriteRune('<') - SerializeResourceForHash(buf, val, tElem) - buf.WriteString(">;") - default: - panic(fmt.Sprintf("invalid element type: %T", tElem)) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go deleted file mode 100644 index 1b39ff639d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go +++ /dev/null @@ -1,279 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" -) - -// HashString hashes strings. If you want a Set of strings, this is the -// SchemaSetFunc you want. -func HashString(v interface{}) int { - return hashcode.String(v.(string)) -} - -// HashInt hashes integers. If you want a Set of integers, this is the -// SchemaSetFunc you want. -func HashInt(v interface{}) int { - return hashcode.String(strconv.Itoa(v.(int))) -} - -// HashResource hashes complex structures that are described using -// a *Resource. This is the default set implementation used when a set's -// element type is a full resource. -func HashResource(resource *Resource) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeResourceForHash(&buf, v, resource) - return hashcode.String(buf.String()) - } -} - -// HashSchema hashes values that are described using a *Schema. This is the -// default set implementation used when a set's element type is a single -// schema. -func HashSchema(schema *Schema) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeValueForHash(&buf, v, schema) - return hashcode.String(buf.String()) - } -} - -// Set is a set data structure that is returned for elements of type -// TypeSet. -type Set struct { - F SchemaSetFunc - - m map[string]interface{} - once sync.Once -} - -// NewSet is a convenience method for creating a new set with the given -// items. -func NewSet(f SchemaSetFunc, items []interface{}) *Set { - s := &Set{F: f} - for _, i := range items { - s.Add(i) - } - - return s -} - -// CopySet returns a copy of another set. -func CopySet(otherSet *Set) *Set { - return NewSet(otherSet.F, otherSet.List()) -} - -// Add adds an item to the set if it isn't already in the set. -func (s *Set) Add(item interface{}) { - s.add(item, false) -} - -// Remove removes an item if it's already in the set. Idempotent. -func (s *Set) Remove(item interface{}) { - s.remove(item) -} - -// Contains checks if the set has the given item. -func (s *Set) Contains(item interface{}) bool { - _, ok := s.m[s.hash(item)] - return ok -} - -// Len returns the amount of items in the set. -func (s *Set) Len() int { - return len(s.m) -} - -// List returns the elements of this set in slice format. -// -// The order of the returned elements is deterministic. Given the same -// set, the order of this will always be the same. -func (s *Set) List() []interface{} { - result := make([]interface{}, len(s.m)) - for i, k := range s.listCode() { - result[i] = s.m[k] - } - - return result -} - -// Difference performs a set difference of the two sets, returning -// a new third set that has only the elements unique to this set. -func (s *Set) Difference(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; !ok { - result.m[k] = v - } - } - - return result -} - -// Intersection performs the set intersection of the two sets -// and returns a new third set. -func (s *Set) Intersection(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; ok { - result.m[k] = v - } - } - - return result -} - -// Union performs the set union of the two sets and returns a new third -// set. -func (s *Set) Union(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - result.m[k] = v - } - for k, v := range other.m { - result.m[k] = v - } - - return result -} - -func checkSetMapEqual(m1, m2 map[string]interface{}) bool { - if (m1 == nil) != (m2 == nil) { - return false - } - if len(m1) != len(m2) { - return false - } - for k := range m1 { - v1 := m1[k] - v2, ok := m2[k] - if !ok { - return false - } - switch v1.(type) { - case map[string]interface{}: - same := checkSetMapEqual(v1.(map[string]interface{}), v2.(map[string]interface{})) - if !same { - return false - } - case *Set: - same := v1.(*Set).Equal(v2) - if !same { - return false - } - default: - same := reflect.DeepEqual(v1, v2) - if !same { - return false - } - } - } - return true -} - -func (s *Set) Equal(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - return checkSetMapEqual(s.m, other.m) -} - -// HashEqual simply checks to the keys the top-level map to the keys in the -// other set's top-level map to see if they are equal. This obviously assumes -// you have a properly working hash function - use HashResource if in doubt. -func (s *Set) HashEqual(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - ks1 := make([]string, 0) - ks2 := make([]string, 0) - - for k := range s.m { - ks1 = append(ks1, k) - } - for k := range other.m { - ks2 = append(ks2, k) - } - - sort.Strings(ks1) - sort.Strings(ks2) - - return reflect.DeepEqual(ks1, ks2) -} - -func (s *Set) GoString() string { - return fmt.Sprintf("*Set(%#v)", s.m) -} - -func (s *Set) init() { - s.m = make(map[string]interface{}) -} - -func (s *Set) add(item interface{}, computed bool) string { - s.once.Do(s.init) - - code := s.hash(item) - if computed { - code = "~" + code - - if isProto5() { - tmpCode := code - count := 0 - for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { - count++ - tmpCode = fmt.Sprintf("%s%d", code, count) - } - code = tmpCode - } - } - - if _, ok := s.m[code]; !ok { - s.m[code] = item - } - - return code -} - -func (s *Set) hash(item interface{}) string { - code := s.F(item) - // Always return a nonnegative hashcode. - if code < 0 { - code = -code - } - return strconv.Itoa(code) -} - -func (s *Set) remove(item interface{}) string { - s.once.Do(s.init) - - code := s.hash(item) - delete(s.m, code) - - return code -} - -func (s *Set) listCode() []string { - // Sort the hash codes so the order of the list is deterministic - keys := make([]string, 0, len(s.m)) - for k := range s.m { - keys = append(keys, k) - } - sort.Sort(sort.StringSlice(keys)) - return keys -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go deleted file mode 100644 index 4d0fd7365d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go +++ /dev/null @@ -1,28 +0,0 @@ -package schema - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// TestResourceDataRaw creates a ResourceData from a raw configuration map. -func TestResourceDataRaw( - t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { - t.Helper() - - c := terraform.NewResourceConfigRaw(raw) - - sm := schemaMap(schema) - diff, err := sm.Diff(nil, c, nil, nil, true) - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := sm.Data(nil, diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go deleted file mode 100644 index 0f65d692f0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go +++ /dev/null @@ -1,21 +0,0 @@ -package schema - -//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go - -// ValueType is an enum of the type that can be represented by a schema. -type ValueType int - -const ( - TypeInvalid ValueType = iota - TypeBool - TypeInt - TypeFloat - TypeString - TypeList - TypeMap - TypeSet - typeObject -) - -// NOTE: ValueType has more functions defined on it in schema.go. We can't -// put them here because we reference other files. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/suppress_json_diff.go deleted file mode 100644 index e23707f574..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/suppress_json_diff.go +++ /dev/null @@ -1,21 +0,0 @@ -package structure - -import ( - "reflect" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool { - oldMap, err := ExpandJsonFromString(old) - if err != nil { - return false - } - - newMap, err := ExpandJsonFromString(new) - if err != nil { - return false - } - - return reflect.DeepEqual(oldMap, newMap) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/list.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/list.go deleted file mode 100644 index d60a4882f2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/list.go +++ /dev/null @@ -1,41 +0,0 @@ -package validation - -import "fmt" - -// ValidateListUniqueStrings is a ValidateFunc that ensures a list has no -// duplicate items in it. It's useful for when a list is needed over a set -// because order matters, yet the items still need to be unique. -// -// Deprecated: use ListOfUniqueStrings -func ValidateListUniqueStrings(i interface{}, k string) (warnings []string, errors []error) { - return ListOfUniqueStrings(i, k) -} - -// ListOfUniqueStrings is a ValidateFunc that ensures a list has no -// duplicate items in it. It's useful for when a list is needed over a set -// because order matters, yet the items still need to be unique. -func ListOfUniqueStrings(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.([]interface{}) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be List", k)) - return warnings, errors - } - - for _, e := range v { - if _, eok := e.(string); !eok { - errors = append(errors, fmt.Errorf("expected %q to only contain string elements, found :%v", k, e)) - return warnings, errors - } - } - - for n1, i1 := range v { - for n2, i2 := range v { - if i1.(string) == i2.(string) && n1 != n2 { - errors = append(errors, fmt.Errorf("expected %q to not have duplicates: found 2 or more of %v", k, i1)) - return warnings, errors - } - } - } - - return warnings, errors -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/meta.go deleted file mode 100644 index b05557ac2a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/meta.go +++ /dev/null @@ -1,59 +0,0 @@ -package validation - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// NoZeroValues is a SchemaValidateFunc which tests if the provided value is -// not a zero value. It's useful in situations where you want to catch -// explicit zero values on things like required fields during validation. -func NoZeroValues(i interface{}, k string) (s []string, es []error) { - if reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() { - switch reflect.TypeOf(i).Kind() { - case reflect.String: - es = append(es, fmt.Errorf("%s must not be empty, got %v", k, i)) - case reflect.Int, reflect.Float64: - es = append(es, fmt.Errorf("%s must not be zero, got %v", k, i)) - default: - // this validator should only ever be applied to TypeString, TypeInt and TypeFloat - panic(fmt.Errorf("can't use NoZeroValues with %T attribute %s", i, k)) - } - } - return -} - -// All returns a SchemaValidateFunc which tests if the provided value -// passes all provided SchemaValidateFunc -func All(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { - return func(i interface{}, k string) ([]string, []error) { - var allErrors []error - var allWarnings []string - for _, validator := range validators { - validatorWarnings, validatorErrors := validator(i, k) - allWarnings = append(allWarnings, validatorWarnings...) - allErrors = append(allErrors, validatorErrors...) - } - return allWarnings, allErrors - } -} - -// Any returns a SchemaValidateFunc which tests if the provided value -// passes any of the provided SchemaValidateFunc -func Any(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { - return func(i interface{}, k string) ([]string, []error) { - var allErrors []error - var allWarnings []string - for _, validator := range validators { - validatorWarnings, validatorErrors := validator(i, k) - if len(validatorWarnings) == 0 && len(validatorErrors) == 0 { - return []string{}, []error{} - } - allWarnings = append(allWarnings, validatorWarnings...) - allErrors = append(allErrors, validatorErrors...) - } - return allWarnings, allErrors - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/network.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/network.go deleted file mode 100644 index 4fea717987..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/network.go +++ /dev/null @@ -1,194 +0,0 @@ -package validation - -import ( - "bytes" - "fmt" - "net" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// SingleIP returns a SchemaValidateFunc which tests if the provided value -// is of type string, and in valid single Value notation -// -// Deprecated: use IsIPAddress instead -func SingleIP() schema.SchemaValidateFunc { - return IsIPAddress -} - -// IsIPAddress is a SchemaValidateFunc which tests if the provided value is of type string and is a single IP (v4 or v6) -func IsIPAddress(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) - return warnings, errors - } - - ip := net.ParseIP(v) - if ip == nil { - errors = append(errors, fmt.Errorf("expected %s to contain a valid IP, got: %s", k, v)) - } - - return warnings, errors -} - -// IsIPv6Address is a SchemaValidateFunc which tests if the provided value is of type string and a valid IPv6 address -func IsIPv6Address(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) - return warnings, errors - } - - ip := net.ParseIP(v) - if six := ip.To16(); six == nil { - errors = append(errors, fmt.Errorf("expected %s to contain a valid IPv6 address, got: %s", k, v)) - } - - return warnings, errors -} - -// IsIPv4Address is a SchemaValidateFunc which tests if the provided value is of type string and a valid IPv4 address -func IsIPv4Address(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) - return warnings, errors - } - - ip := net.ParseIP(v) - if four := ip.To4(); four == nil { - errors = append(errors, fmt.Errorf("expected %s to contain a valid IPv4 address, got: %s", k, v)) - } - - return warnings, errors -} - -// IPRange returns a SchemaValidateFunc which tests if the provided value is of type string, and in valid IP range -// -// Deprecated: use IsIPv4Range instead -func IPRange() schema.SchemaValidateFunc { - return IsIPv4Range -} - -// IsIPv4Range is a SchemaValidateFunc which tests if the provided value is of type string, and in valid IP range -func IsIPv4Range(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - ips := strings.Split(v, "-") - if len(ips) != 2 { - errors = append(errors, fmt.Errorf("expected %s to contain a valid IP range, got: %s", k, v)) - return warnings, errors - } - - ip1 := net.ParseIP(ips[0]) - ip2 := net.ParseIP(ips[1]) - if ip1 == nil || ip2 == nil || bytes.Compare(ip1, ip2) > 0 { - errors = append(errors, fmt.Errorf("expected %s to contain a valid IP range, got: %s", k, v)) - } - - return warnings, errors -} - -// IsCIDR is a SchemaValidateFunc which tests if the provided value is of type string and a valid CIDR -func IsCIDR(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - if _, _, err := net.ParseCIDR(v); err != nil { - errors = append(errors, fmt.Errorf("expected %q to be a valid IPv4 Value, got %v: %v", k, i, err)) - } - - return warnings, errors -} - -// CIDRNetwork returns a SchemaValidateFunc which tests if the provided value -// is of type string, is in valid Value network notation, and has significant bits between min and max (inclusive) -// -// Deprecated: use IsCIDRNetwork instead -func CIDRNetwork(min, max int) schema.SchemaValidateFunc { - return IsCIDRNetwork(min, max) -} - -// IsCIDRNetwork returns a SchemaValidateFunc which tests if the provided value -// is of type string, is in valid Value network notation, and has significant bits between min and max (inclusive) -func IsCIDRNetwork(min, max int) schema.SchemaValidateFunc { - return func(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - _, ipnet, err := net.ParseCIDR(v) - if err != nil { - errors = append(errors, fmt.Errorf("expected %s to contain a valid Value, got: %s with err: %s", k, v, err)) - return warnings, errors - } - - if ipnet == nil || v != ipnet.String() { - errors = append(errors, fmt.Errorf("expected %s to contain a valid network Value, expected %s, got %s", - k, ipnet, v)) - } - - sigbits, _ := ipnet.Mask.Size() - if sigbits < min || sigbits > max { - errors = append(errors, fmt.Errorf("expected %q to contain a network Value with between %d and %d significant bits, got: %d", k, min, max, sigbits)) - } - - return warnings, errors - } -} - -// IsMACAddress is a SchemaValidateFunc which tests if the provided value is of type string and a valid MAC address -func IsMACAddress(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) - return warnings, errors - } - - if _, err := net.ParseMAC(v); err != nil { - errors = append(errors, fmt.Errorf("expected %q to be a valid MAC address, got %v: %v", k, i, err)) - } - - return warnings, errors -} - -// IsPortNumber is a SchemaValidateFunc which tests if the provided value is of type string and a valid TCP Port Number -func IsPortNumber(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(int) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be integer", k)) - return warnings, errors - } - - if 1 > v || v > 65535 { - errors = append(errors, fmt.Errorf("expected %q to be a valid port number, got: %v", k, v)) - } - - return warnings, errors -} - -// IsPortNumberOrZero is a SchemaValidateFunc which tests if the provided value is of type string and a valid TCP Port Number or zero -func IsPortNumberOrZero(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(int) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be integer", k)) - return warnings, errors - } - - if 0 > v || v > 65535 { - errors = append(errors, fmt.Errorf("expected %q to be a valid port number or 0, got: %v", k, v)) - } - - return warnings, errors -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/strings.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/strings.go deleted file mode 100644 index c4e53b5126..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/strings.go +++ /dev/null @@ -1,253 +0,0 @@ -package validation - -import ( - "encoding/base64" - "fmt" - "regexp" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/structure" -) - -// StringIsNotEmpty is a ValidateFunc that ensures a string is not empty -func StringIsNotEmpty(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if !ok { - return nil, []error{fmt.Errorf("expected type of %q to be string", k)} - } - - if v == "" { - return nil, []error{fmt.Errorf("expected %q to not be an empty string, got %v", k, i)} - } - - return nil, nil -} - -// StringIsNotWhiteSpace is a ValidateFunc that ensures a string is not empty or consisting entirely of whitespace characters -func StringIsNotWhiteSpace(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if !ok { - return nil, []error{fmt.Errorf("expected type of %q to be string", k)} - } - - if strings.TrimSpace(v) == "" { - return nil, []error{fmt.Errorf("expected %q to not be an empty string or whitespace", k)} - } - - return nil, nil -} - -// StringIsEmpty is a ValidateFunc that ensures a string has no characters -func StringIsEmpty(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if !ok { - return nil, []error{fmt.Errorf("expected type of %q to be string", k)} - } - - if v != "" { - return nil, []error{fmt.Errorf("expected %q to be an empty string: got %v", k, v)} - } - - return nil, nil -} - -// StringIsWhiteSpace is a ValidateFunc that ensures a string is composed of entirely whitespace -func StringIsWhiteSpace(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if !ok { - return nil, []error{fmt.Errorf("expected type of %q to be string", k)} - } - - if strings.TrimSpace(v) != "" { - return nil, []error{fmt.Errorf("expected %q to be an empty string or whitespace: got %v", k, v)} - } - - return nil, nil -} - -// StringLenBetween returns a SchemaValidateFunc which tests if the provided value -// is of type string and has length between min and max (inclusive) -func StringLenBetween(min, max int) schema.SchemaValidateFunc { - return func(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - if len(v) < min || len(v) > max { - errors = append(errors, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v)) - } - - return warnings, errors - } -} - -// StringMatch returns a SchemaValidateFunc which tests if the provided value -// matches a given regexp. Optionally an error message can be provided to -// return something friendlier than "must match some globby regexp". -func StringMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { - return func(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if !ok { - return nil, []error{fmt.Errorf("expected type of %s to be string", k)} - } - - if ok := r.MatchString(v); !ok { - if message != "" { - return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} - - } - return nil, []error{fmt.Errorf("expected value of %s to match regular expression %q, got %v", k, r, i)} - } - return nil, nil - } -} - -// StringDoesNotMatch returns a SchemaValidateFunc which tests if the provided value -// does not match a given regexp. Optionally an error message can be provided to -// return something friendlier than "must not match some globby regexp". -func StringDoesNotMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { - return func(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if !ok { - return nil, []error{fmt.Errorf("expected type of %s to be string", k)} - } - - if ok := r.MatchString(v); ok { - if message != "" { - return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} - - } - return nil, []error{fmt.Errorf("expected value of %s to not match regular expression %q, got %v", k, r, i)} - } - return nil, nil - } -} - -// StringInSlice returns a SchemaValidateFunc which tests if the provided value -// is of type string and matches the value of an element in the valid slice -// will test with in lower case if ignoreCase is true -func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { - return func(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - for _, str := range valid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { - return warnings, errors - } - } - - errors = append(errors, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v)) - return warnings, errors - } -} - -// StringNotInSlice returns a SchemaValidateFunc which tests if the provided value -// is of type string and does not match the value of any element in the invalid slice -// will test with in lower case if ignoreCase is true -func StringNotInSlice(invalid []string, ignoreCase bool) schema.SchemaValidateFunc { - return func(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - for _, str := range invalid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { - errors = append(errors, fmt.Errorf("expected %s to not be any of %v, got %s", k, invalid, v)) - return warnings, errors - } - } - - return warnings, errors - } -} - -// StringDoesNotContainAny returns a SchemaValidateFunc which validates that the -// provided value does not contain any of the specified Unicode code points in chars. -func StringDoesNotContainAny(chars string) schema.SchemaValidateFunc { - return func(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - if strings.ContainsAny(v, chars) { - errors = append(errors, fmt.Errorf("expected value of %s to not contain any of %q, got %v", k, chars, i)) - return warnings, errors - } - - return warnings, errors - } -} - -// StringIsBase64 is a ValidateFunc that ensures a string can be parsed as Base64 -func StringIsBase64(i interface{}, k string) (warnings []string, errors []error) { - // Empty string is not allowed - if warnings, errors = StringIsNotEmpty(i, k); len(errors) > 0 { - return - } - - // NoEmptyStrings checks it is a string - v, _ := i.(string) - - if _, err := base64.StdEncoding.DecodeString(v); err != nil { - errors = append(errors, fmt.Errorf("expected %q to be a base64 string, got %v", k, v)) - } - - return warnings, errors -} - -// ValidateJsonString is a SchemaValidateFunc which tests to make sure the -// supplied string is valid JSON. -// -// Deprecated: use StringIsJSON instead -func ValidateJsonString(i interface{}, k string) (warnings []string, errors []error) { - return StringIsJSON(i, k) -} - -// StringIsJSON is a SchemaValidateFunc which tests to make sure the supplied string is valid JSON. -func StringIsJSON(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - if _, err := structure.NormalizeJsonString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) - } - - return warnings, errors -} - -// ValidateRegexp returns a SchemaValidateFunc which tests to make sure the -// supplied string is a valid regular expression. -// -// Deprecated: use StringIsValidRegExp instead -func ValidateRegexp(i interface{}, k string) (warnings []string, errors []error) { - return StringIsValidRegExp(i, k) -} - -// StringIsValidRegExp returns a SchemaValidateFunc which tests to make sure the supplied string is a valid regular expression. -func StringIsValidRegExp(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) - return warnings, errors - } - - if _, err := regexp.Compile(v); err != nil { - errors = append(errors, fmt.Errorf("%q: %s", k, err)) - } - - return warnings, errors -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/testing.go deleted file mode 100644 index 8a2da7f892..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/testing.go +++ /dev/null @@ -1,43 +0,0 @@ -package validation - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -type testCase struct { - val interface{} - f schema.SchemaValidateFunc - expectedErr *regexp.Regexp -} - -func runTestCases(t *testing.T, cases []testCase) { - matchErr := func(errs []error, r *regexp.Regexp) bool { - // err must match one provided - for _, err := range errs { - if r.MatchString(err.Error()) { - return true - } - } - - return false - } - - for i, tc := range cases { - _, errs := tc.f(tc.val, "test_property") - - if len(errs) == 0 && tc.expectedErr == nil { - continue - } - - if len(errs) != 0 && tc.expectedErr == nil { - t.Fatalf("expected test case %d to produce no errors, got %v", i, errs) - } - - if !matchErr(errs, tc.expectedErr) { - t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/time.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/time.go deleted file mode 100644 index 1c6788c68f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/time.go +++ /dev/null @@ -1,61 +0,0 @@ -package validation - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// IsDayOfTheWeek id a SchemaValidateFunc which tests if the provided value is of type string and a valid english day of the week -func IsDayOfTheWeek(ignoreCase bool) schema.SchemaValidateFunc { - return StringInSlice([]string{ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday", - "Saturday", - "Sunday", - }, ignoreCase) -} - -// IsMonth id a SchemaValidateFunc which tests if the provided value is of type string and a valid english month -func IsMonth(ignoreCase bool) schema.SchemaValidateFunc { - return StringInSlice([]string{ - "January", - "February", - "March", - "April", - "May", - "June", - "July", - "August", - "September", - "October", - "November", - "December", - }, ignoreCase) -} - -// IsRFC3339Time is a SchemaValidateFunc which tests if the provided value is of type string and a valid RFC33349Time -func IsRFC3339Time(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) - return warnings, errors - } - - if _, err := time.Parse(time.RFC3339, v); err != nil { - errors = append(errors, fmt.Errorf("expected %q to be a valid RFC3339 date, got %q: %+v", k, i, err)) - } - - return warnings, errors -} - -// ValidateRFC3339TimeString is a ValidateFunc that ensures a string parses as time.RFC3339 format -// -// Deprecated: use IsRFC3339Time() instead -func ValidateRFC3339TimeString(i interface{}, k string) (warnings []string, errors []error) { - return IsRFC3339Time(i, k) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go deleted file mode 100644 index 158b499e7c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go +++ /dev/null @@ -1,30 +0,0 @@ -package httpclient - -import ( - "fmt" - "log" - "os" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/meta" -) - -const uaEnvVar = "TF_APPEND_USER_AGENT" - -// TerraformUserAgent returns a User-Agent header for a Terraform version string. -// -// Deprecated: This will be removed in v2 without replacement. If you need -// its functionality, you can copy it or reference the v1 package. -func TerraformUserAgent(version string) string { - ua := fmt.Sprintf("HashiCorp Terraform/%s (+https://www.terraform.io) Terraform Plugin SDK/%s", version, meta.SDKVersionString()) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go deleted file mode 100644 index 90a5faf0ed..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// CountAttr is the address of an attribute of the "count" object in -// the interpolation scope, like "count.index". -type CountAttr struct { - referenceable - Name string -} - -func (ca CountAttr) String() string { - return "count." + ca.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go deleted file mode 100644 index 46093314fe..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Package addrs contains types that represent "addresses", which are -// references to specific objects within a Terraform configuration or -// state. -// -// All addresses have string representations based on HCL traversal syntax -// which should be used in the user-interface, and also in-memory -// representations that can be used internally. -// -// For object types that exist within Terraform modules a pair of types is -// used. The "local" part of the address is represented by a type, and then -// an absolute path to that object in the context of its module is represented -// by a type of the same name with an "Abs" prefix added, for "absolute". -// -// All types within this package should be treated as immutable, even if this -// is not enforced by the Go compiler. It is always an implementation error -// to modify an address object in-place after it is initially constructed. -package addrs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go deleted file mode 100644 index 7a6385035d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// ForEachAttr is the address of an attribute referencing the current "for_each" object in -// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" -type ForEachAttr struct { - referenceable - Name string -} - -func (f ForEachAttr) String() string { - return "each." + f.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go deleted file mode 100644 index d2c046c111..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go +++ /dev/null @@ -1,41 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// InputVariable is the address of an input variable. -type InputVariable struct { - referenceable - Name string -} - -func (v InputVariable) String() string { - return "var." + v.Name -} - -// AbsInputVariableInstance is the address of an input variable within a -// particular module instance. -type AbsInputVariableInstance struct { - Module ModuleInstance - Variable InputVariable -} - -// InputVariable returns the absolute address of the input variable of the -// given name inside the receiving module instance. -func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { - return AbsInputVariableInstance{ - Module: m, - Variable: InputVariable{ - Name: name, - }, - } -} - -func (v AbsInputVariableInstance) String() string { - if len(v.Module) == 0 { - return v.String() - } - - return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go deleted file mode 100644 index cef8b27964..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go +++ /dev/null @@ -1,123 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// InstanceKey represents the key of an instance within an object that -// contains multiple instances due to using "count" or "for_each" arguments -// in configuration. -// -// IntKey and StringKey are the two implementations of this type. No other -// implementations are allowed. The single instance of an object that _isn't_ -// using "count" or "for_each" is represented by NoKey, which is a nil -// InstanceKey. -type InstanceKey interface { - instanceKeySigil() - String() string -} - -// ParseInstanceKey returns the instance key corresponding to the given value, -// which must be known and non-null. -// -// If an unknown or null value is provided then this function will panic. This -// function is intended to deal with the values that would naturally be found -// in a hcl.TraverseIndex, which (when parsed from source, at least) can never -// contain unknown or null values. -func ParseInstanceKey(key cty.Value) (InstanceKey, error) { - switch key.Type() { - case cty.String: - return StringKey(key.AsString()), nil - case cty.Number: - var idx int - err := gocty.FromCtyValue(key, &idx) - return IntKey(idx), err - default: - return NoKey, fmt.Errorf("either a string or an integer is required") - } -} - -// NoKey represents the absense of an InstanceKey, for the single instance -// of a configuration object that does not use "count" or "for_each" at all. -var NoKey InstanceKey - -// IntKey is the InstanceKey representation representing integer indices, as -// used when the "count" argument is specified or if for_each is used with -// a sequence type. -type IntKey int - -func (k IntKey) instanceKeySigil() { -} - -func (k IntKey) String() string { - return fmt.Sprintf("[%d]", int(k)) -} - -// StringKey is the InstanceKey representation representing string indices, as -// used when the "for_each" argument is specified with a map or object type. -type StringKey string - -func (k StringKey) instanceKeySigil() { -} - -func (k StringKey) String() string { - // FIXME: This isn't _quite_ right because Go's quoted string syntax is - // slightly different than HCL's, but we'll accept it for now. - return fmt.Sprintf("[%q]", string(k)) -} - -// InstanceKeyLess returns true if the first given instance key i should sort -// before the second key j, and false otherwise. -func InstanceKeyLess(i, j InstanceKey) bool { - iTy := instanceKeyType(i) - jTy := instanceKeyType(j) - - switch { - case i == j: - return false - case i == NoKey: - return true - case j == NoKey: - return false - case iTy != jTy: - // The ordering here is arbitrary except that we want NoKeyType - // to sort before the others, so we'll just use the enum values - // of InstanceKeyType here (where NoKey is zero, sorting before - // any other). - return uint32(iTy) < uint32(jTy) - case iTy == IntKeyType: - return int(i.(IntKey)) < int(j.(IntKey)) - case iTy == StringKeyType: - return string(i.(StringKey)) < string(j.(StringKey)) - default: - // Shouldn't be possible to get down here in practice, since the - // above is exhaustive. - return false - } -} - -func instanceKeyType(k InstanceKey) InstanceKeyType { - if _, ok := k.(StringKey); ok { - return StringKeyType - } - if _, ok := k.(IntKey); ok { - return IntKeyType - } - return NoKeyType -} - -// InstanceKeyType represents the different types of instance key that are -// supported. Usually it is sufficient to simply type-assert an InstanceKey -// value to either IntKey or StringKey, but this type and its values can be -// used to represent the types themselves, rather than specific values -// of those types. -type InstanceKeyType rune - -const ( - NoKeyType InstanceKeyType = 0 - IntKeyType InstanceKeyType = 'I' - StringKeyType InstanceKeyType = 'S' -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go deleted file mode 100644 index 61a07b9c75..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go +++ /dev/null @@ -1,48 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// LocalValue is the address of a local value. -type LocalValue struct { - referenceable - Name string -} - -func (v LocalValue) String() string { - return "local." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: v, - } -} - -// AbsLocalValue is the absolute address of a local value within a module instance. -type AbsLocalValue struct { - Module ModuleInstance - LocalValue LocalValue -} - -// LocalValue returns the absolute address of a local value of the given -// name within the receiving module instance. -func (m ModuleInstance) LocalValue(name string) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: LocalValue{ - Name: name, - }, - } -} - -func (v AbsLocalValue) String() string { - if len(v.Module) == 0 { - return v.LocalValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go deleted file mode 100644 index 1533f853c5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go +++ /dev/null @@ -1,51 +0,0 @@ -package addrs - -import ( - "strings" -) - -// Module is an address for a module call within configuration. This is -// the static counterpart of ModuleInstance, representing a traversal through -// the static module call tree in configuration and does not take into account -// the potentially-multiple instances of a module that might be created by -// "count" and "for_each" arguments within those calls. -// -// This type should be used only in very specialized cases when working with -// the static module call tree. Type ModuleInstance is appropriate in more cases. -// -// Although Module is a slice, it should be treated as immutable after creation. -type Module []string - -// IsRoot returns true if the receiver is the address of the root module, -// or false otherwise. -func (m Module) IsRoot() bool { - return len(m) == 0 -} - -func (m Module) String() string { - if len(m) == 0 { - return "" - } - return strings.Join([]string(m), ".") -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m Module) Call() (Module, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - caller, callName := m[:len(m)-1], m[len(m)-1] - return caller, ModuleCall{ - Name: callName, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go deleted file mode 100644 index d138fade76..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go +++ /dev/null @@ -1,63 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// ModuleCall is the address of a call from the current module to a child -// module. -// -// There is no "Abs" version of ModuleCall because an absolute module path -// is represented by ModuleInstance. -type ModuleCall struct { - referenceable - Name string -} - -func (c ModuleCall) String() string { - return "module." + c.Name -} - -// ModuleCallInstance is the address of one instance of a module created from -// a module call, which might create multiple instances using "count" or -// "for_each" arguments. -type ModuleCallInstance struct { - referenceable - Call ModuleCall - Key InstanceKey -} - -func (c ModuleCallInstance) String() string { - if c.Key == NoKey { - return c.Call.String() - } - return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) -} - -// ModuleInstance returns the address of the module instance that corresponds -// to the receiving call instance when resolved in the given calling module. -// In other words, it returns the child module instance that the receving -// call instance creates. -func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { - return caller.Child(c.Call.Name, c.Key) -} - -// ModuleCallOutput is the address of a particular named output produced by -// an instance of a module call. -type ModuleCallOutput struct { - referenceable - Call ModuleCallInstance - Name string -} - -func (co ModuleCallOutput) String() string { - return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) -} - -// AbsOutputValue returns the absolute output value address that corresponds -// to the receving module call output address, once resolved in the given -// calling module. -func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { - moduleAddr := co.Call.ModuleInstance(caller) - return moduleAddr.OutputValue(co.Name) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go deleted file mode 100644 index bb0901a26c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go +++ /dev/null @@ -1,388 +0,0 @@ -package addrs - -import ( - "bytes" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ModuleInstance is an address for a particular module instance within the -// dynamic module tree. This is an extension of the static traversals -// represented by type Module that deals with the possibility of a single -// module call producing multiple instances via the "count" and "for_each" -// arguments. -// -// Although ModuleInstance is a slice, it should be treated as immutable after -// creation. -type ModuleInstance []ModuleInstanceStep - -var ( - _ Targetable = ModuleInstance(nil) -) - -func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { - mi, remain, diags := parseModuleInstancePrefix(traversal) - if len(remain) != 0 { - if len(remain) == len(traversal) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "A module instance address must begin with \"module.\".", - Subject: remain.SourceRange().Ptr(), - }) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "The module instance address is followed by additional invalid content.", - Subject: remain.SourceRange().Ptr(), - }) - } - } - return mi, diags -} - -// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - addr, addrDiags := ParseModuleInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { - remain := traversal - var mi ModuleInstance - var diags tfdiags.Diagnostics - - for len(remain) > 0 { - var next string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - next = tt.Name - case hcl.TraverseAttr: - next = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Module address prefix must be followed by dot and then a name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - - if next != "module" { - break - } - - kwRange := remain[0].SourceRange() - remain = remain[1:] - // If we have the prefix "module" then we should be followed by an - // module call name, as an attribute, and then optionally an index step - // giving the instance key. - if len(remain) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: &kwRange, - }) - break - } - - var moduleName string - switch tt := remain[0].(type) { - case hcl.TraverseAttr: - moduleName = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - remain = remain[1:] - step := ModuleInstanceStep{ - Name: moduleName, - } - - if len(remain) > 0 { - if idx, ok := remain[0].(hcl.TraverseIndex); ok { - remain = remain[1:] - - switch idx.Key.Type() { - case cty.String: - step.InstanceKey = StringKey(idx.Key.AsString()) - case cty.Number: - var idxInt int - err := gocty.FromCtyValue(idx.Key, &idxInt) - if err == nil { - step.InstanceKey = IntKey(idxInt) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: fmt.Sprintf("Invalid module index: %s.", err), - Subject: idx.SourceRange().Ptr(), - }) - } - default: - // Should never happen, because no other types are allowed in traversal indices. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Invalid module key: must be either a string or an integer.", - Subject: idx.SourceRange().Ptr(), - }) - } - } - } - - mi = append(mi, step) - } - - var retRemain hcl.Traversal - if len(remain) > 0 { - retRemain = make(hcl.Traversal, len(remain)) - copy(retRemain, remain) - // The first element here might be either a TraverseRoot or a - // TraverseAttr, depending on whether we had a module address on the - // front. To make life easier for callers, we'll normalize to always - // start with a TraverseRoot. - if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { - retRemain[0] = hcl.TraverseRoot{ - Name: tt.Name, - SrcRange: tt.SrcRange, - } - } - } - - return mi, retRemain, diags -} - -// UnkeyedInstanceShim is a shim method for converting a Module address to the -// equivalent ModuleInstance address that assumes that no modules have -// keyed instances. -// -// This is a temporary allowance for the fact that Terraform does not presently -// support "count" and "for_each" on modules, and thus graph building code that -// derives graph nodes from configuration must just assume unkeyed modules -// in order to construct the graph. At a later time when "count" and "for_each" -// support is added for modules, all callers of this method will need to be -// reworked to allow for keyed module instances. -func (m Module) UnkeyedInstanceShim() ModuleInstance { - path := make(ModuleInstance, len(m)) - for i, name := range m { - path[i] = ModuleInstanceStep{Name: name} - } - return path -} - -// ModuleInstanceStep is a single traversal step through the dynamic module -// tree. It is used only as part of ModuleInstance. -type ModuleInstanceStep struct { - Name string - InstanceKey InstanceKey -} - -// RootModuleInstance is the module instance address representing the root -// module, which is also the zero value of ModuleInstance. -var RootModuleInstance ModuleInstance - -// IsRoot returns true if the receiver is the address of the root module instance, -// or false otherwise. -func (m ModuleInstance) IsRoot() bool { - return len(m) == 0 -} - -// Child returns the address of a child module instance of the receiver, -// identified by the given name and key. -func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { - ret := make(ModuleInstance, 0, len(m)+1) - ret = append(ret, m...) - return append(ret, ModuleInstanceStep{ - Name: name, - InstanceKey: key, - }) -} - -// Parent returns the address of the parent module instance of the receiver, or -// the receiver itself if there is no parent (if it's the root module address). -func (m ModuleInstance) Parent() ModuleInstance { - if len(m) == 0 { - return m - } - return m[:len(m)-1] -} - -// String returns a string representation of the receiver, in the format used -// within e.g. user-provided resource addresses. -// -// The address of the root module has the empty string as its representation. -func (m ModuleInstance) String() string { - var buf bytes.Buffer - sep := "" - for _, step := range m { - buf.WriteString(sep) - buf.WriteString("module.") - buf.WriteString(step.Name) - if step.InstanceKey != NoKey { - buf.WriteString(step.InstanceKey.String()) - } - sep = "." - } - return buf.String() -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (m ModuleInstance) Less(o ModuleInstance) bool { - if len(m) != len(o) { - // Shorter path sorts first. - return len(m) < len(o) - } - - for i := range m { - mS, oS := m[i], o[i] - switch { - case mS.Name != oS.Name: - return mS.Name < oS.Name - case mS.InstanceKey != oS.InstanceKey: - return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) - } - } - - return false -} - -// Ancestors returns a slice containing the receiver and all of its ancestor -// module instances, all the way up to (and including) the root module. -// The result is ordered by depth, with the root module always first. -// -// Since the result always includes the root module, a caller may choose to -// ignore it by slicing the result with [1:]. -func (m ModuleInstance) Ancestors() []ModuleInstance { - ret := make([]ModuleInstance, 0, len(m)+1) - for i := 0; i <= len(m); i++ { - ret = append(ret, m[:i]) - } - return ret -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module instance that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// A single module call can produce potentially many module instances, so the -// result discards any instance key that might be present on the last step -// of the instance. To retain this, use CallInstance instead. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCall{ - Name: lastStep.Name, - } -} - -// CallInstance returns the module call instance address that corresponds to -// the given module instance, along with the address of the module instance -// that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCallInstance and then returns a slice of the receiever that excludes -// that last part. This is just a convenience for situations where a call\ -// address is required, such as when dealing with *Reference and Referencable -// values. -func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { - if len(m) == 0 { - panic("cannot produce ModuleCallInstance for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCallInstance{ - Call: ModuleCall{ - Name: lastStep.Name, - }, - Key: lastStep.InstanceKey, - } -} - -// TargetContains implements Targetable by returning true if the given other -// address either matches the receiver, is a sub-module-instance of the -// receiver, or is a targetable absolute address within a module that -// is contained within the reciever. -func (m ModuleInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case ModuleInstance: - if len(to) < len(m) { - // Can't be contained if the path is shorter - return false - } - // Other is contained if its steps match for the length of our own path. - for i, ourStep := range m { - otherStep := to[i] - if ourStep != otherStep { - return false - } - } - // If we fall out here then the prefixed matched, so it's contained. - return true - - case AbsResource: - return m.TargetContains(to.Module) - - case AbsResourceInstance: - return m.TargetContains(to.Module) - - default: - return false - } -} - -func (m ModuleInstance) targetableSigil() { - // ModuleInstance is targetable -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go deleted file mode 100644 index bcd923acb7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go +++ /dev/null @@ -1,75 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// OutputValue is the address of an output value, in the context of the module -// that is defining it. -// -// This is related to but separate from ModuleCallOutput, which represents -// a module output from the perspective of its parent module. Since output -// values cannot be represented from the module where they are defined, -// OutputValue is not Referenceable, while ModuleCallOutput is. -type OutputValue struct { - Name string -} - -func (v OutputValue) String() string { - return "output." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: v, - } -} - -// AbsOutputValue is the absolute address of an output value within a module instance. -// -// This represents an output globally within the namespace of a particular -// configuration. It is related to but separate from ModuleCallOutput, which -// represents a module output from the perspective of its parent module. -type AbsOutputValue struct { - Module ModuleInstance - OutputValue OutputValue -} - -// OutputValue returns the absolute address of an output value of the given -// name within the receiving module instance. -func (m ModuleInstance) OutputValue(name string) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: OutputValue{ - Name: name, - }, - } -} - -func (v AbsOutputValue) String() string { - if v.Module.IsRoot() { - return v.OutputValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) -} - -// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, -// returning also the module instance that the ModuleCallOutput is relative -// to. -// -// The root module does not have a call, and so this method cannot be used -// with outputs in the root module, and will panic in that case. -func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { - if v.Module.IsRoot() { - panic("ReferenceFromCall used with root module output") - } - - caller, call := v.Module.CallInstance() - return caller, ModuleCallOutput{ - Call: call, - Name: v.OutputValue.Name, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go deleted file mode 100644 index a2ee16441e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go +++ /dev/null @@ -1,346 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Reference describes a reference to an address with source location -// information. -type Reference struct { - Subject Referenceable - SourceRange tfdiags.SourceRange - Remaining hcl.Traversal -} - -// ParseRef attempts to extract a referencable address from the prefix of the -// given traversal, which must be an absolute traversal or this function -// will panic. -// -// If no error diagnostics are returned, the returned reference includes the -// address that was extracted, the source range it was extracted from, and any -// remaining relative traversal that was not consumed as part of the -// reference. -// -// If error diagnostics are returned then the Reference value is invalid and -// must not be used. -func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - ref, diags := parseRef(traversal) - - // Normalize a little to make life easier for callers. - if ref != nil { - if len(ref.Remaining) == 0 { - ref.Remaining = nil - } - } - - return ref, diags -} - -// ParseRefStr is a helper wrapper around ParseRef that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseRef. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned reference may be nil or incomplete. -func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - ref, targetDiags := ParseRef(traversal) - diags = diags.Append(targetDiags) - return ref, diags -} - -func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - switch root { - - case "count": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: CountAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "each": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: ForEachAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "data": - if len(traversal) < 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, - Subject: traversal.SourceRange().Ptr(), - }) - return nil, diags - } - remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser - return parseResourceRef(DataResourceMode, rootRange, remain) - - case "local": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: LocalValue{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "module": - callName, callRange, remain, diags := parseSingleAttrRef(traversal) - if diags.HasErrors() { - return nil, diags - } - - // A traversal starting with "module" can either be a reference to - // an entire module instance or to a single output from a module - // instance, depending on what we find after this introducer. - - callInstance := ModuleCallInstance{ - Call: ModuleCall{ - Name: callName, - }, - Key: NoKey, - } - - if len(remain) == 0 { - // Reference to an entire module instance. Might alternatively - // be a reference to a collection of instances of a particular - // module, but the caller will need to deal with that ambiguity - // since we don't have enough context here. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(callRange), - Remaining: remain, - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - callInstance.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - - if len(remain) == 0 { - // Also a reference to an entire module instance, but we have a key - // now. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), - Remaining: remain, - }, diags - } - } - - if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { - remain = remain[1:] - return &Reference{ - Subject: ModuleCallOutput{ - Name: attrTrav.Name, - Call: callInstance, - }, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), - Remaining: remain, - }, diags - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: "Module instance objects do not support this operation.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - - case "path": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: PathAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "self": - return &Reference{ - Subject: Self, - SourceRange: tfdiags.SourceRangeFromHCL(rootRange), - Remaining: traversal[1:], - }, diags - - case "terraform": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: TerraformAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "var": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: InputVariable{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - default: - return parseResourceRef(ManagedResourceMode, rootRange, traversal) - } -} - -func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, - Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - // If it isn't a TraverseRoot then it must be a "data" reference. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object does not support this operation.`, - Subject: traversal[0].SourceRange().Ptr(), - }) - return nil, diags - } - - attrTrav, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - var what string - switch mode { - case DataResourceMode: - what = "data source" - default: - what = "resource type" - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), - Subject: traversal[1].SourceRange().Ptr(), - }) - return nil, diags - } - name = attrTrav.Name - rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) - remain := traversal[2:] - - resourceAddr := Resource{ - Mode: mode, - Type: typeName, - Name: name, - } - resourceInstAddr := ResourceInstance{ - Resource: resourceAddr, - Key: NoKey, - } - - if len(remain) == 0 { - // This might actually be a reference to the collection of all instances - // of the resource, but we don't have enough context here to decide - // so we'll let the caller resolve that ambiguity. - return &Reference{ - Subject: resourceAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - rng = hcl.RangeBetween(rng, idxTrav.SrcRange) - } - - return &Reference{ - Subject: resourceInstAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags -} - -func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), - Subject: &rootRange, - }) - return "", hcl.Range{}, nil, diags - } - if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { - return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object does not support this operation.", root), - Subject: traversal[1].SourceRange().Ptr(), - }) - return "", hcl.Range{}, nil, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go deleted file mode 100644 index 5b922e8b66..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go +++ /dev/null @@ -1,240 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Target describes a targeted address with source location information. -type Target struct { - Subject Targetable - SourceRange tfdiags.SourceRange -} - -// ParseTarget attempts to interpret the given traversal as a targetable -// address. The given traversal must be absolute, or this function will -// panic. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the Target value is invalid and -// must not be used. -func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { - path, remain, diags := parseModuleInstancePrefix(traversal) - if diags.HasErrors() { - return nil, diags - } - - rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) - - if len(remain) == 0 { - return &Target{ - Subject: path, - SourceRange: rng, - }, diags - } - - mode := ManagedResourceMode - if remain.RootName() == "data" { - mode = DataResourceMode - remain = remain[1:] - } - - if len(remain) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource specification must include a resource type and name.", - Subject: remain.SourceRange().Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - switch mode { - case ManagedResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource type name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - case DataResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A data source name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - default: - panic("unknown mode") - } - return nil, diags - } - - switch tt := remain[1].(type) { - case hcl.TraverseAttr: - name = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource name is required.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - var subject Targetable - remain = remain[2:] - switch len(remain) { - case 0: - subject = path.Resource(mode, typeName, name) - case 1: - if tt, ok := remain[0].(hcl.TraverseIndex); ok { - key, err := ParseInstanceKey(tt.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - - subject = path.ResourceInstance(mode, typeName, name, key) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource instance key must be given in square brackets.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Unexpected extra operators after address.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - return &Target{ - Subject: subject, - SourceRange: rng, - }, diags -} - -// ParseTargetStr is a helper wrapper around ParseTarget that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a target string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseTarget. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned target may be nil or incomplete. -func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - target, targetDiags := ParseTarget(traversal) - diags = diags.Append(targetDiags) - return target, diags -} - -// ParseAbsResourceInstance attempts to interpret the given traversal as an -// absolute resource instance address, using the same syntax as expected by -// ParseTarget. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the AbsResource value is invalid and -// must not be used. -func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { - addr, diags := ParseTarget(traversal) - if diags.HasErrors() { - return AbsResourceInstance{}, diags - } - - switch tt := addr.Subject.(type) { - - case AbsResource: - return tt.Instance(NoKey), diags - - case AbsResourceInstance: - return tt, diags - - case ModuleInstance: // Catch likely user error with specialized message - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - default: // Generic message for other address types - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - } -} - -// ParseAbsResourceInstanceStr is a helper wrapper around -// ParseAbsResourceInstance that takes a string and parses it with the HCL -// native syntax traversal parser before interpreting it. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address may be incomplete. -// -// Since this function has no context about the source of the given string, -// any returned diagnostics will not have meaningful source location -// information. -func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsResourceInstance{}, diags - } - - addr, addrDiags := ParseAbsResourceInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go deleted file mode 100644 index cfc13f4bcd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// PathAttr is the address of an attribute of the "path" object in -// the interpolation scope, like "path.module". -type PathAttr struct { - referenceable - Name string -} - -func (pa PathAttr) String() string { - return "path." + pa.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider.go deleted file mode 100644 index 4fa82517a4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider.go +++ /dev/null @@ -1,464 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" - - "golang.org/x/net/idna" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - svchost "github.com/hashicorp/terraform-svchost" -) - -// Provider encapsulates a single provider type. In the future this will be -// extended to include additional fields including Namespace and SourceHost -type Provider struct { - Type string - Namespace string - Hostname svchost.Hostname -} - -// DefaultRegistryHost is the hostname used for provider addresses that do -// not have an explicit hostname. -const DefaultRegistryHost = svchost.Hostname("registry.terraform.io") - -// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider -// namespace. Built-in provider addresses must also have their namespace set -// to BuiltInProviderNamespace in order to be considered as built-in. -const BuiltInProviderHost = svchost.Hostname("terraform.io") - -// BuiltInProviderNamespace is the provider namespace used for "built-in" -// providers. Built-in provider addresses must also have their hostname -// set to BuiltInProviderHost in order to be considered as built-in. -// -// The this namespace is literally named "builtin", in the hope that users -// who see FQNs containing this will be able to infer the way in which they are -// special, even if they haven't encountered the concept formally yet. -const BuiltInProviderNamespace = "builtin" - -// LegacyProviderNamespace is the special string used in the Namespace field -// of type Provider to mark a legacy provider address. This special namespace -// value would normally be invalid, and can be used only when the hostname is -// DefaultRegistryHost because that host owns the mapping from legacy name to -// FQN. -const LegacyProviderNamespace = "-" - -// String returns an FQN string, indended for use in machine-readable output. -func (pt Provider) String() string { - if pt.IsZero() { - panic("called String on zero-value addrs.Provider") - } - return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type -} - -// ForDisplay returns a user-friendly FQN string, simplified for readability. If -// the provider is using the default hostname, the hostname is omitted. -func (pt Provider) ForDisplay() string { - if pt.IsZero() { - panic("called ForDisplay on zero-value addrs.Provider") - } - - if pt.Hostname == DefaultRegistryHost { - return pt.Namespace + "/" + pt.Type - } - return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type -} - -// NewProvider constructs a provider address from its parts, and normalizes -// the namespace and type parts to lowercase using unicode case folding rules -// so that resulting addrs.Provider values can be compared using standard -// Go equality rules (==). -// -// The hostname is given as a svchost.Hostname, which is required by the -// contract of that type to have already been normalized for equality testing. -// -// This function will panic if the given namespace or type name are not valid. -// When accepting namespace or type values from outside the program, use -// ParseProviderPart first to check that the given value is valid. -func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { - if namespace == LegacyProviderNamespace { - // Legacy provider addresses must always be created via - // NewLegacyProvider so that we can use static analysis to find - // codepaths still working with those. - panic("attempt to create legacy provider address using NewProvider; use NewLegacyProvider instead") - } - - return Provider{ - Type: MustParseProviderPart(typeName), - Namespace: MustParseProviderPart(namespace), - Hostname: hostname, - } -} - -// ImpliedProviderForUnqualifiedType represents the rules for inferring what -// provider FQN a user intended when only a naked type name is available. -// -// For all except the type name "terraform" this returns a so-called "default" -// provider, which is under the registry.terraform.io/hashicorp/ namespace. -// -// As a special case, the string "terraform" maps to -// "terraform.io/builtin/terraform" because that is the more likely user -// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" -// which remains only for compatibility with older Terraform versions. -func ImpliedProviderForUnqualifiedType(typeName string) Provider { - switch typeName { - case "terraform": - // Note for future maintainers: any additional strings we add here - // as implied to be builtin must never also be use as provider names - // in the registry.terraform.io/hashicorp/... namespace, because - // otherwise older versions of Terraform could implicitly select - // the registry name instead of the internal one. - return NewBuiltInProvider(typeName) - default: - return NewDefaultProvider(typeName) - } -} - -// NewDefaultProvider returns the default address of a HashiCorp-maintained, -// Registry-hosted provider. -func NewDefaultProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: "hashicorp", - Hostname: DefaultRegistryHost, - } -} - -// NewBuiltInProvider returns the address of a "built-in" provider. See -// the docs for Provider.IsBuiltIn for more information. -func NewBuiltInProvider(name string) Provider { - return Provider{ - Type: MustParseProviderPart(name), - Namespace: BuiltInProviderNamespace, - Hostname: BuiltInProviderHost, - } -} - -// NewLegacyProvider returns a mock address for a provider. -// This will be removed when ProviderType is fully integrated. -func NewLegacyProvider(name string) Provider { - return Provider{ - // We intentionally don't normalize and validate the legacy names, - // because existing code expects legacy provider names to pass through - // verbatim, even if not compliant with our new naming rules. - Type: name, - Namespace: LegacyProviderNamespace, - Hostname: DefaultRegistryHost, - } -} - -// LegacyString returns the provider type, which is frequently used -// interchangeably with provider name. This function can and should be removed -// when provider type is fully integrated. As a safeguard for future -// refactoring, this function panics if the Provider is not a legacy provider. -func (pt Provider) LegacyString() string { - if pt.IsZero() { - panic("called LegacyString on zero-value addrs.Provider") - } - if pt.Namespace != LegacyProviderNamespace && pt.Namespace != BuiltInProviderNamespace { - panic(pt.String() + " cannot be represented as a legacy string") - } - return pt.Type -} - -// IsZero returns true if the receiver is the zero value of addrs.Provider. -// -// The zero value is not a valid addrs.Provider and calling other methods on -// such a value is likely to either panic or otherwise misbehave. -func (pt Provider) IsZero() bool { - return pt == Provider{} -} - -// IsBuiltIn returns true if the receiver is the address of a "built-in" -// provider. That is, a provider under terraform.io/builtin/ which is -// included as part of the Terraform binary itself rather than one to be -// installed from elsewhere. -// -// These are ignored by the provider installer because they are assumed to -// already be available without any further installation. -func (pt Provider) IsBuiltIn() bool { - return pt.Hostname == BuiltInProviderHost && pt.Namespace == BuiltInProviderNamespace -} - -// LessThan returns true if the receiver should sort before the other given -// address in an ordered list of provider addresses. -// -// This ordering is an arbitrary one just to allow deterministic results from -// functions that would otherwise have no natural ordering. It's subject -// to change in future. -func (pt Provider) LessThan(other Provider) bool { - switch { - case pt.Hostname != other.Hostname: - return pt.Hostname < other.Hostname - case pt.Namespace != other.Namespace: - return pt.Namespace < other.Namespace - default: - return pt.Type < other.Type - } -} - -// IsLegacy returns true if the provider is a legacy-style provider -func (pt Provider) IsLegacy() bool { - if pt.IsZero() { - panic("called IsLegacy() on zero-value addrs.Provider") - } - - return pt.Hostname == DefaultRegistryHost && pt.Namespace == LegacyProviderNamespace - -} - -// IsDefault returns true if the provider is a default hashicorp provider -func (pt Provider) IsDefault() bool { - if pt.IsZero() { - panic("called IsDefault() on zero-value addrs.Provider") - } - - return pt.Hostname == DefaultRegistryHost && pt.Namespace == "hashicorp" -} - -// Equals returns true if the receiver and other provider have the same attributes. -func (pt Provider) Equals(other Provider) bool { - return pt == other -} - -// ParseProviderSourceString parses the source attribute and returns a provider. -// This is intended primarily to parse the FQN-like strings returned by -// terraform-config-inspect. -// -// The following are valid source string formats: -// name -// namespace/name -// hostname/namespace/name -func ParseProviderSourceString(str string) (Provider, tfdiags.Diagnostics) { - var ret Provider - var diags tfdiags.Diagnostics - - // split the source string into individual components - parts := strings.Split(str, "/") - if len(parts) == 0 || len(parts) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source string", - Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, - }) - return ret, diags - } - - // check for an invalid empty string in any part - for i := range parts { - if parts[i] == "" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source string", - Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, - }) - return ret, diags - } - } - - // check the 'name' portion, which is always the last part - givenName := parts[len(parts)-1] - name, err := ParseProviderPart(givenName) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider type", - Detail: fmt.Sprintf(`Invalid provider type %q in source %q: %s"`, givenName, str, err), - }) - return ret, diags - } - ret.Type = name - ret.Hostname = DefaultRegistryHost - - if len(parts) == 1 { - return NewDefaultProvider(parts[0]), diags - } - - if len(parts) >= 2 { - // the namespace is always the second-to-last part - givenNamespace := parts[len(parts)-2] - if givenNamespace == LegacyProviderNamespace { - // For now we're tolerating legacy provider addresses until we've - // finished updating the rest of the codebase to no longer use them, - // or else we'd get errors round-tripping through legacy subsystems. - ret.Namespace = LegacyProviderNamespace - } else { - namespace, err := ParseProviderPart(givenNamespace) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider namespace", - Detail: fmt.Sprintf(`Invalid provider namespace %q in source %q: %s"`, namespace, str, err), - }) - return Provider{}, diags - } - ret.Namespace = namespace - } - } - - // Final Case: 3 parts - if len(parts) == 3 { - // the namespace is always the first part in a three-part source string - hn, err := svchost.ForComparison(parts[0]) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source hostname", - Detail: fmt.Sprintf(`Invalid provider source hostname namespace %q in source %q: %s"`, hn, str, err), - }) - return Provider{}, diags - } - ret.Hostname = hn - } - - if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost { - // Legacy provider addresses must always be on the default registry - // host, because the default registry host decides what actual FQN - // each one maps to. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider namespace", - Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".", - }) - return Provider{}, diags - } - - // Due to how plugin executables are named and provider git repositories - // are conventionally named, it's a reasonable and - // apparently-somewhat-common user error to incorrectly use the - // "terraform-provider-" prefix in a provider source address. There is - // no good reason for a provider to have the prefix "terraform-" anyway, - // so we've made that invalid from the start both so we can give feedback - // to provider developers about the terraform- prefix being redundant - // and give specialized feedback to folks who incorrectly use the full - // terraform-provider- prefix to help them self-correct. - const redundantPrefix = "terraform-" - const userErrorPrefix = "terraform-provider-" - if strings.HasPrefix(ret.Type, redundantPrefix) { - if strings.HasPrefix(ret.Type, userErrorPrefix) { - // Likely user error. We only return this specialized error if - // whatever is after the prefix would otherwise be a - // syntactically-valid provider type, so we don't end up advising - // the user to try something that would be invalid for another - // reason anyway. - // (This is mainly just for robustness, because the validation - // we already did above should've rejected most/all ways for - // the suggestedType to end up invalid here.) - suggestedType := ret.Type[len(userErrorPrefix):] - if _, err := ParseProviderPart(suggestedType); err == nil { - suggestedAddr := ret - suggestedAddr.Type = suggestedType - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider type", - fmt.Sprintf("Provider source %q has a type with the prefix %q, which isn't valid. Although that prefix is often used in the names of version control repositories for Terraform providers, provider source strings should not include it.\n\nDid you mean %q?", ret.ForDisplay(), userErrorPrefix, suggestedAddr.ForDisplay()), - )) - return Provider{}, diags - } - } - // Otherwise, probably instead an incorrectly-named provider, perhaps - // arising from a similar instinct to what causes there to be - // thousands of Python packages on PyPI with "python-"-prefixed - // names. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider type", - fmt.Sprintf("Provider source %q has a type with the prefix %q, which isn't allowed because it would be redundant to name a Terraform provider with that prefix. If you are the author of this provider, rename it to not include the prefix.", ret, redundantPrefix), - )) - return Provider{}, diags - } - - return ret, diags -} - -// MustParseProviderSourceString is a wrapper around ParseProviderSourceString that panics if -// it returns an error. -func MustParseProviderSourceString(str string) Provider { - result, diags := ParseProviderSourceString(str) - if diags.HasErrors() { - panic(diags.Err().Error()) - } - return result -} - -// ParseProviderPart processes an addrs.Provider namespace or type string -// provided by an end-user, producing a normalized version if possible or -// an error if the string contains invalid characters. -// -// A provider part is processed in the same way as an individual label in a DNS -// domain name: it is transformed to lowercase per the usual DNS case mapping -// and normalization rules and may contain only letters, digits, and dashes. -// Additionally, dashes may not appear at the start or end of the string. -// -// These restrictions are intended to allow these names to appear in fussy -// contexts such as directory/file names on case-insensitive filesystems, -// repository names on GitHub, etc. We're using the DNS rules in particular, -// rather than some similar rules defined locally, because the hostname part -// of an addrs.Provider is already a hostname and it's ideal to use exactly -// the same case folding and normalization rules for all of the parts. -// -// In practice a provider type string conventionally does not contain dashes -// either. Such names are permitted, but providers with such type names will be -// hard to use because their resource type names will not be able to contain -// the provider type name and thus each resource will need an explicit provider -// address specified. (A real-world example of such a provider is the -// "google-beta" variant of the GCP provider, which has resource types that -// start with the "google_" prefix instead.) -// -// It's valid to pass the result of this function as the argument to a -// subsequent call, in which case the result will be identical. -func ParseProviderPart(given string) (string, error) { - if len(given) == 0 { - return "", fmt.Errorf("must have at least one character") - } - - // We're going to process the given name using the same "IDNA" library we - // use for the hostname portion, since it already implements the case - // folding rules we want. - // - // The idna library doesn't expose individual label parsing directly, but - // once we've verified it doesn't contain any dots we can just treat it - // like a top-level domain for this library's purposes. - if strings.ContainsRune(given, '.') { - return "", fmt.Errorf("dots are not allowed") - } - - // We don't allow names containing multiple consecutive dashes, just as - // a matter of preference: they look weird, confusing, or incorrect. - // This also, as a side-effect, prevents the use of the "punycode" - // indicator prefix "xn--" that would cause the IDNA library to interpret - // the given name as punycode, because that would be weird and unexpected. - if strings.Contains(given, "--") { - return "", fmt.Errorf("cannot use multiple consecutive dashes") - } - - result, err := idna.Lookup.ToUnicode(given) - if err != nil { - return "", fmt.Errorf("must contain only letters, digits, and dashes, and may not use leading or trailing dashes") - } - - return result, nil -} - -// MustParseProviderPart is a wrapper around ParseProviderPart that panics if -// it returns an error. -func MustParseProviderPart(given string) string { - result, err := ParseProviderPart(given) - if err != nil { - panic(err.Error()) - } - return result -} - -// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) -func IsProviderPartNormalized(str string) (bool, error) { - normalized, err := ParseProviderPart(str) - if err != nil { - return false, err - } - if str == normalized { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go deleted file mode 100644 index c6fce1a504..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go +++ /dev/null @@ -1,289 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ProviderConfig is the address of a provider configuration. -type ProviderConfig struct { - Type string - - // If not empty, Alias identifies which non-default (aliased) provider - // configuration this address refers to. - Alias string -} - -// ParseProviderConfigCompact parses the given absolute traversal as a relative -// provider address in compact form. The following are examples of traversals -// that can be successfully parsed as compact relative provider configuration -// addresses: -// -// aws -// aws.foo -// -// This function will panic if given a relative traversal. -// -// If the returned diagnostics contains errors then the result value is invalid -// and must not be used. -func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := ProviderConfig{ - Type: traversal.RootName(), - } - - if len(traversal) < 2 { - // Just a type name, then. - return ret, diags - } - - aliasStep := traversal[1] - switch ts := aliasStep.(type) { - case hcl.TraverseAttr: - ret.Alias = ts.Name - return ret, diags - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", - Subject: aliasStep.SourceRange().Ptr(), - }) - } - - if len(traversal) > 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous extra operators after provider configuration address.", - Subject: traversal[2:].SourceRange().Ptr(), - }) - } - - return ret, diags -} - -// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return ProviderConfig{}, diags - } - - addr, addrDiags := ParseProviderConfigCompact(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// Absolute returns an AbsProviderConfig from the receiver and the given module -// instance address. -func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { - return AbsProviderConfig{ - Module: module, - ProviderConfig: pc, - } -} - -func (pc ProviderConfig) String() string { - if pc.Type == "" { - // Should never happen; always indicates a bug - return "provider." - } - - if pc.Alias != "" { - return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias) - } - - return "provider." + pc.Type -} - -// StringCompact is an alternative to String that returns the form that can -// be parsed by ParseProviderConfigCompact, without the "provider." prefix. -func (pc ProviderConfig) StringCompact() string { - if pc.Alias != "" { - return fmt.Sprintf("%s.%s", pc.Type, pc.Alias) - } - return pc.Type -} - -// AbsProviderConfig is the absolute address of a provider configuration -// within a particular module instance. -type AbsProviderConfig struct { - Module ModuleInstance - ProviderConfig ProviderConfig -} - -// ParseAbsProviderConfig parses the given traversal as an absolute provider -// address. The following are examples of traversals that can be successfully -// parsed as absolute provider configuration addresses: -// -// provider.aws -// provider.aws.foo -// module.bar.provider.aws -// module.bar.module.baz.provider.aws.foo -// module.foo[1].provider.aws.foo -// -// This type of address is used, for example, to record the relationships -// between resources and provider configurations in the state structure. -// This type of address is not generally used in the UI, except in error -// messages that refer to provider configurations. -func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { - modInst, remain, diags := parseModuleInstancePrefix(traversal) - ret := AbsProviderConfig{ - Module: modInst, - } - if len(remain) < 2 || remain.RootName() != "provider" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - if len(remain) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous operators after provider configuration alias.", - Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), - }) - return ret, diags - } - - if tt, ok := remain[1].(hcl.TraverseAttr); ok { - ret.ProviderConfig.Type = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The prefix \"provider.\" must be followed by a provider type name.", - Subject: remain[1].SourceRange().Ptr(), - }) - return ret, diags - } - - if len(remain) == 3 { - if tt, ok := remain[2].(hcl.TraverseAttr); ok { - ret.ProviderConfig.Alias = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider type name must be followed by a configuration alias name.", - Subject: remain[2].SourceRange().Ptr(), - }) - return ret, diags - } - } - - return ret, diags -} - -// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseAbsProviderConfig. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address is invalid. -func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsProviderConfig{}, diags - } - - addr, addrDiags := ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ProviderConfigDefault returns the address of the default provider config -// of the given type inside the recieving module instance. -func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m, - ProviderConfig: ProviderConfig{ - Type: name, - }, - } -} - -// ProviderConfigAliased returns the address of an aliased provider config -// of with given type and alias inside the recieving module instance. -func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m, - ProviderConfig: ProviderConfig{ - Type: name, - Alias: alias, - }, - } -} - -// Inherited returns an address that the receiving configuration address might -// inherit from in a parent module. The second bool return value indicates if -// such inheritance is possible, and thus whether the returned address is valid. -// -// Inheritance is possible only for default (un-aliased) providers in modules -// other than the root module. Even if a valid address is returned, inheritence -// may not be performed for other reasons, such as if the calling module -// provided explicit provider configurations within the call for this module. -// The ProviderTransformer graph transform in the main terraform module has -// the authoritative logic for provider inheritance, and this method is here -// mainly just for its benefit. -func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { - // Can't inherit if we're already in the root. - if len(pc.Module) == 0 { - return AbsProviderConfig{}, false - } - - // Can't inherit if we have an alias. - if pc.ProviderConfig.Alias != "" { - return AbsProviderConfig{}, false - } - - // Otherwise, we might inherit from a configuration with the same - // provider name in the parent module instance. - parentMod := pc.Module.Parent() - return pc.ProviderConfig.Absolute(parentMod), true -} - -func (pc AbsProviderConfig) String() string { - if len(pc.Module) == 0 { - return pc.ProviderConfig.String() - } - return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go deleted file mode 100644 index 64b8ac869c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go +++ /dev/null @@ -1,7 +0,0 @@ -package addrs - -// ProviderType encapsulates a single provider type. In the future this will be -// extended to include additional fields including Namespace and SourceHost -type ProviderType struct { - Name string -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go deleted file mode 100644 index 211083a5f4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go +++ /dev/null @@ -1,20 +0,0 @@ -package addrs - -// Referenceable is an interface implemented by all address types that can -// appear as references in configuration language expressions. -type Referenceable interface { - // All implementations of this interface must be covered by the type switch - // in lang.Scope.buildEvalContext. - referenceableSigil() - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseRef to produce an identical - // result. - String() string -} - -type referenceable struct { -} - -func (r referenceable) referenceableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go deleted file mode 100644 index 103f8a28c3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go +++ /dev/null @@ -1,254 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" -) - -// Resource is an address for a resource block within configuration, which -// contains potentially-multiple resource instances if that configuration -// block uses "count" or "for_each". -type Resource struct { - referenceable - Mode ResourceMode - Type string - Name string -} - -func (r Resource) String() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // Should never happen, but we'll return a string here rather than - // crashing just in case it does. - return fmt.Sprintf(".%s.%s", r.Type, r.Name) - } -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r Resource) Instance(key InstanceKey) ResourceInstance { - return ResourceInstance{ - Resource: r, - Key: key, - } -} - -// Absolute returns an AbsResource from the receiver and the given module -// instance address. -func (r Resource) Absolute(module ModuleInstance) AbsResource { - return AbsResource{ - Module: module, - Resource: r, - } -} - -// DefaultProviderConfig returns the address of the provider configuration -// that should be used for the resource identified by the reciever if it -// does not have a provider configuration address explicitly set in -// configuration. -// -// This method is not able to verify that such a configuration exists, nor -// represent the behavior of automatically inheriting certain provider -// configurations from parent modules. It just does a static analysis of the -// receiving address and returns an address to start from, relative to the -// same module that contains the resource. -func (r Resource) DefaultProviderConfig() ProviderConfig { - typeName := r.Type - if under := strings.Index(typeName, "_"); under != -1 { - typeName = typeName[:under] - } - return ProviderConfig{ - Type: typeName, - } -} - -// ResourceInstance is an address for a specific instance of a resource. -// When a resource is defined in configuration with "count" or "for_each" it -// produces zero or more instances, which can be addressed using this type. -type ResourceInstance struct { - referenceable - Resource Resource - Key InstanceKey -} - -func (r ResourceInstance) ContainingResource() Resource { - return r.Resource -} - -func (r ResourceInstance) String() string { - if r.Key == NoKey { - return r.Resource.String() - } - return r.Resource.String() + r.Key.String() -} - -// Absolute returns an AbsResourceInstance from the receiver and the given module -// instance address. -func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { - return AbsResourceInstance{ - Module: module, - Resource: r, - } -} - -// AbsResource is an absolute address for a resource under a given module path. -type AbsResource struct { - targetable - Module ModuleInstance - Resource Resource -} - -// Resource returns the address of a particular resource within the receiver. -func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { - return AbsResource{ - Module: m, - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - } -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: r.Module, - Resource: r.Resource.Instance(key), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is either equal to the receiver or is an instance of the -// receiver. -func (r AbsResource) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResource: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - case AbsResourceInstance: - return r.TargetContains(to.ContainingResource()) - - default: - return false - - } -} - -func (r AbsResource) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -// AbsResourceInstance is an absolute address for a resource instance under a -// given module path. -type AbsResourceInstance struct { - targetable - Module ModuleInstance - Resource ResourceInstance -} - -// ResourceInstance returns the address of a particular resource instance within the receiver. -func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: m, - Resource: ResourceInstance{ - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - Key: key, - }, - } -} - -// ContainingResource returns the address of the resource that contains the -// receving resource instance. In other words, it discards the key portion -// of the address to produce an AbsResource value. -func (r AbsResourceInstance) ContainingResource() AbsResource { - return AbsResource{ - Module: r.Module, - Resource: r.Resource.ContainingResource(), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is equal to the receiver. -func (r AbsResourceInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResourceInstance: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - default: - return false - - } -} - -func (r AbsResourceInstance) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { - switch { - - case len(r.Module) != len(o.Module): - return len(r.Module) < len(o.Module) - - case r.Module.String() != o.Module.String(): - return r.Module.Less(o.Module) - - case r.Resource.Resource.Mode != o.Resource.Resource.Mode: - return r.Resource.Resource.Mode == DataResourceMode - - case r.Resource.Resource.Type != o.Resource.Resource.Type: - return r.Resource.Resource.Type < o.Resource.Resource.Type - - case r.Resource.Resource.Name != o.Resource.Resource.Name: - return r.Resource.Resource.Name < o.Resource.Resource.Name - - case r.Resource.Key != o.Resource.Key: - return InstanceKeyLess(r.Resource.Key, o.Resource.Key) - - default: - return false - - } -} - -// ResourceMode defines which lifecycle applies to a given resource. Each -// resource lifecycle has a slightly different address format. -type ResourceMode rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode - -const ( - // InvalidResourceMode is the zero value of ResourceMode and is not - // a valid resource mode. - InvalidResourceMode ResourceMode = 0 - - // ManagedResourceMode indicates a managed resource, as defined by - // "resource" blocks in configuration. - ManagedResourceMode ResourceMode = 'M' - - // DataResourceMode indicates a data resource, as defined by - // "data" blocks in configuration. - DataResourceMode ResourceMode = 'D' -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go deleted file mode 100644 index 9bdbdc421a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go +++ /dev/null @@ -1,105 +0,0 @@ -package addrs - -import "fmt" - -// ResourceInstancePhase is a special kind of reference used only internally -// during graph building to represent resource instances that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via an instance phase -// or can declare that they reference an instance phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourceInstancePhase struct { - referenceable - ResourceInstance ResourceInstance - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourceInstancePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { - return ResourceInstancePhase{ - ResourceInstance: r, - Phase: rpt, - } -} - -// ContainingResource returns an address for the same phase of the resource -// that this instance belongs to. -func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { - return rp.ResourceInstance.Resource.Phase(rp.Phase) -} - -func (rp ResourceInstancePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) -} - -// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. -type ResourceInstancePhaseType string - -const ( - // ResourceInstancePhaseDestroy represents the "destroy" phase of a - // resource instance. - ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" - - // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy - // but is used for resources that have "create_before_destroy" set, thus - // requiring a different dependency ordering. - ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" -) - -func (rpt ResourceInstancePhaseType) String() string { - return string(rpt) -} - -// ResourcePhase is a special kind of reference used only internally -// during graph building to represent resources that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via a resource phase -// or can declare that they reference a resource phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// Since resources (as opposed to instances) aren't actually phased, this -// address type is used only as an approximation during initial construction -// of the resource-oriented plan graph, under the assumption that resource -// instances with ResourceInstancePhase addresses will be created in dynamic -// subgraphs during the graph walk. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourcePhase struct { - referenceable - Resource Resource - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourcePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { - return ResourcePhase{ - Resource: r, - Phase: rpt, - } -} - -func (rp ResourcePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go deleted file mode 100644 index 0b5c33f8ee..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. - -package addrs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidResourceMode-0] - _ = x[ManagedResourceMode-77] - _ = x[DataResourceMode-68] -} - -const ( - _ResourceMode_name_0 = "InvalidResourceMode" - _ResourceMode_name_1 = "DataResourceMode" - _ResourceMode_name_2 = "ManagedResourceMode" -) - -func (i ResourceMode) String() string { - switch { - case i == 0: - return _ResourceMode_name_0 - case i == 68: - return _ResourceMode_name_1 - case i == 77: - return _ResourceMode_name_2 - default: - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go deleted file mode 100644 index 7f24eaf085..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go +++ /dev/null @@ -1,14 +0,0 @@ -package addrs - -// Self is the address of the special object "self" that behaves as an alias -// for a containing object currently in scope. -const Self selfT = 0 - -type selfT int - -func (s selfT) referenceableSigil() { -} - -func (s selfT) String() string { - return "self" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go deleted file mode 100644 index 16819a5afb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go +++ /dev/null @@ -1,26 +0,0 @@ -package addrs - -// Targetable is an interface implemented by all address types that can be -// used as "targets" for selecting sub-graphs of a graph. -type Targetable interface { - targetableSigil() - - // TargetContains returns true if the receiver is considered to contain - // the given other address. Containment, for the purpose of targeting, - // means that if a container address is targeted then all of the - // addresses within it are also implicitly targeted. - // - // A targetable address always contains at least itself. - TargetContains(other Targetable) bool - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseTarget to produce an - // identical result. - String() string -} - -type targetable struct { -} - -func (r targetable) targetableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go deleted file mode 100644 index a880182ae2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// TerraformAttr is the address of an attribute of the "terraform" object in -// the interpolation scope, like "terraform.workspace". -type TerraformAttr struct { - referenceable - Name string -} - -func (ta TerraformAttr) String() string { - return "terraform." + ta.Name -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go deleted file mode 100644 index c054acf0ac..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go +++ /dev/null @@ -1,295 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcled" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/mitchellh/colorstring" - wordwrap "github.com/mitchellh/go-wordwrap" - "github.com/zclconf/go-cty/cty" -) - -// Diagnostic formats a single diagnostic message. -// -// The width argument specifies at what column the diagnostic messages will -// be wrapped. If set to zero, messages will not be wrapped by this function -// at all. Although the long-form text parts of the message are wrapped, -// not all aspects of the message are guaranteed to fit within the specified -// terminal width. -func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { - if diag == nil { - // No good reason to pass a nil diagnostic in here... - return "" - } - - var buf bytes.Buffer - - switch diag.Severity() { - case tfdiags.Error: - buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) - case tfdiags.Warning: - buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) - default: - // Clear out any coloring that might be applied by Terraform's UI helper, - // so our result is not context-sensitive. - buf.WriteString(color.Color("\n[reset]")) - } - - desc := diag.Description() - sourceRefs := diag.Source() - - // We don't wrap the summary, since we expect it to be terse, and since - // this is where we put the text of a native Go error it may not always - // be pure text that lends itself well to word-wrapping. - fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) - - if sourceRefs.Subject != nil { - // We'll borrow HCL's range implementation here, because it has some - // handy features to help us produce a nice source code snippet. - highlightRange := sourceRefs.Subject.ToHCL() - snippetRange := highlightRange - if sourceRefs.Context != nil { - snippetRange = sourceRefs.Context.ToHCL() - } - - // Make sure the snippet includes the highlight. This should be true - // for any reasonable diagnostic, but we'll make sure. - snippetRange = hcl.RangeOver(snippetRange, highlightRange) - if snippetRange.Empty() { - snippetRange.End.Byte++ - snippetRange.End.Column++ - } - if highlightRange.Empty() { - highlightRange.End.Byte++ - highlightRange.End.Column++ - } - - var src []byte - if sources != nil { - src = sources[snippetRange.Filename] - } - if src == nil { - // This should generally not happen, as long as sources are always - // loaded through the main loader. We may load things in other - // ways in weird cases, so we'll tolerate it at the expense of - // a not-so-helpful error message. - fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) - } else { - file, offset := parseRange(src, highlightRange) - - headerRange := highlightRange - - contextStr := hcled.ContextString(file, offset-1) - if contextStr != "" { - contextStr = ", in " + contextStr - } - - fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) - - // Config snippet rendering - sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) - for sc.Scan() { - lineRange := sc.Range() - if !lineRange.Overlaps(snippetRange) { - continue - } - beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) - before := beforeRange.SliceBytes(src) - highlighted := highlightedRange.SliceBytes(src) - after := afterRange.SliceBytes(src) - fmt.Fprintf( - &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), - lineRange.Start.Line, - before, highlighted, after, - ) - } - - } - - if fromExpr := diag.FromExpr(); fromExpr != nil { - // We may also be able to generate information about the dynamic - // values of relevant variables at the point of evaluation, then. - // This is particularly useful for expressions that get evaluated - // multiple times with different values, such as blocks using - // "count" and "for_each", or within "for" expressions. - expr := fromExpr.Expression - ctx := fromExpr.EvalContext - vars := expr.Variables() - stmts := make([]string, 0, len(vars)) - seen := make(map[string]struct{}, len(vars)) - Traversals: - for _, traversal := range vars { - for len(traversal) > 1 { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // Skip anything that generates errors, since we probably - // already have the same error in our diagnostics set - // already. - traversal = traversal[:len(traversal)-1] - continue - } - - traversalStr := traversalStr(traversal) - if _, exists := seen[traversalStr]; exists { - continue Traversals // don't show duplicates when the same variable is referenced multiple times - } - switch { - case !val.IsKnown(): - // Can't say anything about this yet, then. - continue Traversals - case val.IsNull(): - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) - default: - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) - } - seen[traversalStr] = struct{}{} - } - } - - sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? - - if len(stmts) > 0 { - fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) - } - for _, stmt := range stmts { - fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) - } - } - - buf.WriteByte('\n') - } - - if desc.Detail != "" { - detail := desc.Detail - if width != 0 { - detail = wordwrap.WrapString(detail, uint(width)) - } - fmt.Fprintf(&buf, "%s\n", detail) - } - - return buf.String() -} - -func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { - filename := rng.Filename - offset := rng.Start.Byte - - // We need to re-parse here to get a *hcl.File we can interrogate. This - // is not awesome since we presumably already parsed the file earlier too, - // but this re-parsing is architecturally simpler than retaining all of - // the hcl.File objects and we only do this in the case of an error anyway - // so the overhead here is not a big problem. - parser := hclparse.NewParser() - var file *hcl.File - var diags hcl.Diagnostics - if strings.HasSuffix(filename, ".json") { - file, diags = parser.ParseJSON(src, filename) - } else { - file, diags = parser.ParseHCL(src, filename) - } - if diags.HasErrors() { - return file, offset - } - - return file, offset -} - -// traversalStr produces a representation of an HCL traversal that is compact, -// resembles HCL native syntax, and is suitable for display in the UI. -func traversalStr(traversal hcl.Traversal) string { - // This is a specialized subset of traversal rendering tailored to - // producing helpful contextual messages in diagnostics. It is not - // comprehensive nor intended to be used for other purposes. - - var buf bytes.Buffer - for _, step := range traversal { - switch tStep := step.(type) { - case hcl.TraverseRoot: - buf.WriteString(tStep.Name) - case hcl.TraverseAttr: - buf.WriteByte('.') - buf.WriteString(tStep.Name) - case hcl.TraverseIndex: - buf.WriteByte('[') - if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { - buf.WriteString(compactValueStr(tStep.Key)) - } else { - // We'll just use a placeholder for more complex values, - // since otherwise our result could grow ridiculously long. - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} - -// compactValueStr produces a compact, single-line summary of a given value -// that is suitable for display in the UI. -// -// For primitives it returns a full representation, while for more complex -// types it instead summarizes the type, size, etc to produce something -// that is hopefully still somewhat useful but not as verbose as a rendering -// of the entire data structure. -func compactValueStr(val cty.Value) string { - // This is a specialized subset of value rendering tailored to producing - // helpful but concise messages in diagnostics. It is not comprehensive - // nor intended to be used for other purposes. - - ty := val.Type() - switch { - case val.IsNull(): - return "null" - case !val.IsKnown(): - // Should never happen here because we should filter before we get - // in here, but we'll do something reasonable rather than panic. - return "(not yet known)" - case ty == cty.Bool: - if val.True() { - return "true" - } - return "false" - case ty == cty.Number: - bf := val.AsBigFloat() - return bf.Text('g', 10) - case ty == cty.String: - // Go string syntax is not exactly the same as HCL native string syntax, - // but we'll accept the minor edge-cases where this is different here - // for now, just to get something reasonable here. - return fmt.Sprintf("%q", val.AsString()) - case ty.IsCollectionType() || ty.IsTupleType(): - l := val.LengthInt() - switch l { - case 0: - return "empty " + ty.FriendlyName() - case 1: - return ty.FriendlyName() + " with 1 element" - default: - return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) - } - case ty.IsObjectType(): - atys := ty.AttributeTypes() - l := len(atys) - switch l { - case 0: - return "object with no attributes" - case 1: - var name string - for k := range atys { - name = k - } - return fmt.Sprintf("object with 1 attribute %q", name) - default: - return fmt.Sprintf("object with %d attributes", l) - } - default: - return ty.FriendlyName() - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go deleted file mode 100644 index 0a2aa7d02e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go +++ /dev/null @@ -1,1192 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ResourceChange returns a string representation of a change to a particular -// resource, for inclusion in user-facing plan output. -// -// The resource schema must be provided along with the change so that the -// formatted change can reflect the configuration structure for the associated -// resource. -// -// If "color" is non-nil, it will be used to color the result. Otherwise, -// no color codes will be included. -func ResourceChange( - change *plans.ResourceInstanceChangeSrc, - tainted bool, - schema *configschema.Block, - color *colorstring.Colorize, -) string { - addr := change.Addr - var buf bytes.Buffer - - if color == nil { - color = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - Reset: false, - } - } - - dispAddr := addr.String() - if change.DeposedKey != states.NotDeposed { - dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) - } - - switch change.Action { - case plans.Create: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) - case plans.Read: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) - case plans.Update: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) - case plans.CreateThenDelete, plans.DeleteThenCreate: - if tainted { - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) - } else { - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) - } - case plans.Delete: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) - default: - // should never happen, since the above is exhaustive - buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) - } - buf.WriteString(color.Color("[reset]\n")) - - switch change.Action { - case plans.Create: - buf.WriteString(color.Color("[green] +[reset] ")) - case plans.Read: - buf.WriteString(color.Color("[cyan] <=[reset] ")) - case plans.Update: - buf.WriteString(color.Color("[yellow] ~[reset] ")) - case plans.DeleteThenCreate: - buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] ")) - case plans.CreateThenDelete: - buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] ")) - case plans.Delete: - buf.WriteString(color.Color("[red] -[reset] ")) - default: - buf.WriteString(color.Color("??? ")) - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - buf.WriteString(fmt.Sprintf( - "resource %q %q", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - case addrs.DataResourceMode: - buf.WriteString(fmt.Sprintf( - "data %q %q ", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - default: - // should never happen, since the above is exhaustive - buf.WriteString(addr.String()) - } - - buf.WriteString(" {") - - p := blockBodyDiffPrinter{ - buf: &buf, - color: color, - action: change.Action, - requiredReplace: change.RequiredReplace, - } - - // Most commonly-used resources have nested blocks that result in us - // going at least three traversals deep while we recurse here, so we'll - // start with that much capacity and then grow as needed for deeper - // structures. - path := make(cty.Path, 0, 3) - - changeV, err := change.Decode(schema.ImpliedType()) - if err != nil { - // Should never happen in here, since we've already been through - // loads of layers of encode/decode of the planned changes before now. - panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) - } - - // We currently have an opt-out that permits the legacy SDK to return values - // that defy our usual conventions around handling of nesting blocks. To - // avoid the rendering code from needing to handle all of these, we'll - // normalize first. - // (Ideally we'd do this as part of the SDK opt-out implementation in core, - // but we've added it here for now to reduce risk of unexpected impacts - // on other code in core.) - changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) - changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) - - bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) - if bodyWritten { - buf.WriteString("\n") - buf.WriteString(strings.Repeat(" ", 4)) - } - buf.WriteString("}\n") - - return buf.String() -} - -type blockBodyDiffPrinter struct { - buf *bytes.Buffer - color *colorstring.Colorize - action plans.Action - requiredReplace cty.PathSet -} - -const forcesNewResourceCaption = " [red]# forces replacement[reset]" - -// writeBlockBodyDiff writes attribute or block differences -// and returns true if any differences were found and written -func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { - path = ctyEnsurePathCapacity(path, 1) - - bodyWritten := false - blankBeforeBlocks := false - { - attrNames := make([]string, 0, len(schema.Attributes)) - attrNameLen := 0 - for name := range schema.Attributes { - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - if oldVal.IsNull() && newVal.IsNull() { - // Skip attributes where both old and new values are null - // (we do this early here so that we'll do our value alignment - // based on the longest attribute name that has a change, rather - // than the longest attribute name in the full set.) - continue - } - - attrNames = append(attrNames, name) - if len(name) > attrNameLen { - attrNameLen = len(name) - } - } - sort.Strings(attrNames) - if len(attrNames) > 0 { - blankBeforeBlocks = true - } - - for _, name := range attrNames { - attrS := schema.Attributes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - bodyWritten = true - p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) - } - } - - { - blockTypeNames := make([]string, 0, len(schema.BlockTypes)) - for name := range schema.BlockTypes { - blockTypeNames = append(blockTypeNames, name) - } - sort.Strings(blockTypeNames) - - for _, name := range blockTypeNames { - blockS := schema.BlockTypes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - bodyWritten = true - p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) - - // Always include a blank for any subsequent block types. - blankBeforeBlocks = true - } - } - - return bodyWritten -} - -func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { - path = append(path, cty.GetAttrStep{Name: name}) - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - showJustNew := false - var action plans.Action - switch { - case old.IsNull(): - action = plans.Create - showJustNew = true - case new.IsNull(): - action = plans.Delete - case ctyEqualWithUnknown(old, new): - action = plans.NoOp - showJustNew = true - default: - action = plans.Update - } - - p.writeActionSymbol(action) - - p.buf.WriteString(p.color.Color("[bold]")) - p.buf.WriteString(name) - p.buf.WriteString(p.color.Color("[reset]")) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) - p.buf.WriteString(" = ") - - if attrS.Sensitive { - p.buf.WriteString("(sensitive value)") - } else { - switch { - case showJustNew: - p.writeValue(new, action, indent+2) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - default: - // We show new even if it is null to emphasize the fact - // that it is being unset, since otherwise it is easy to - // misunderstand that the value is still set to the old value. - p.writeValueDiff(old, new, indent+2, path) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { - path = append(path, cty.GetAttrStep{Name: name}) - if old.IsNull() && new.IsNull() { - // Nothing to do if both old and new is null - return - } - - // Where old/new are collections representing a nesting mode other than - // NestingSingle, we assume the collection value can never be unknown - // since we always produce the container for the nested objects, even if - // the objects within are computed. - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - var action plans.Action - eqV := new.Equals(old) - switch { - case old.IsNull(): - action = plans.Create - case new.IsNull(): - action = plans.Delete - case !new.IsWhollyKnown() || !old.IsWhollyKnown(): - // "old" should actually always be known due to our contract - // that old values must never be unknown, but we'll allow it - // anyway to be robust. - action = plans.Update - case !eqV.IsKnown() || !eqV.True(): - action = plans.Update - } - - if blankBefore { - p.buf.WriteRune('\n') - } - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) - case configschema.NestingList: - // For the sake of handling nested blocks, we'll treat a null list - // the same as an empty list since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockListAsEmpty(old) - new = ctyNullBlockListAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - // Here we intentionally preserve the index-based correspondance - // between old and new, rather than trying to detect insertions - // and removals in the list, because this more accurately reflects - // how Terraform Core and providers will understand the change, - // particularly when the nested block contains computed attributes - // that will themselves maintain correspondance by index. - - // commonLen is number of elements that exist in both lists, which - // will be presented as updates (~). Any additional items in one - // of the lists will be presented as either creates (+) or deletes (-) - // depending on which list they belong to. - var commonLen int - switch { - case len(oldItems) < len(newItems): - commonLen = len(oldItems) - default: - commonLen = len(newItems) - } - - if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { - p.buf.WriteRune('\n') - } - - for i := 0; i < commonLen; i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := newItems[i] - action := plans.Update - if oldItem.RawEquals(newItem) { - action = plans.NoOp - } - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) - } - for i := commonLen; i < len(oldItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := cty.NullVal(oldItem.Type()) - p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) - } - for i := commonLen; i < len(newItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - newItem := newItems[i] - oldItem := cty.NullVal(newItem.Type()) - p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) - } - case configschema.NestingSet: - // For the sake of handling nested blocks, we'll treat a null set - // the same as an empty set since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockSetAsEmpty(old) - new = ctyNullBlockSetAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both sets are empty - return - } - - allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) - allItems = append(allItems, oldItems...) - allItems = append(allItems, newItems...) - all := cty.SetVal(allItems) - - if blankBefore { - p.buf.WriteRune('\n') - } - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - var action plans.Action - var oldValue, newValue cty.Value - switch { - case !val.IsKnown(): - action = plans.Update - newValue = val - case !old.HasElement(val).True(): - action = plans.Create - oldValue = cty.NullVal(val.Type()) - newValue = val - case !new.HasElement(val).True(): - action = plans.Delete - oldValue = val - newValue = cty.NullVal(val.Type()) - default: - action = plans.NoOp - oldValue = val - newValue = val - } - path := append(path, cty.IndexStep{Key: val}) - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) - } - - case configschema.NestingMap: - // For the sake of handling nested blocks, we'll treat a null map - // the same as an empty map since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockMapAsEmpty(old) - new = ctyNullBlockMapAsEmpty(new) - - oldItems := old.AsValueMap() - newItems := new.AsValueMap() - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both maps are empty - return - } - - allKeys := make(map[string]bool) - for k := range oldItems { - allKeys[k] = true - } - for k := range newItems { - allKeys[k] = true - } - allKeysOrder := make([]string, 0, len(allKeys)) - for k := range allKeys { - allKeysOrder = append(allKeysOrder, k) - } - sort.Strings(allKeysOrder) - - if blankBefore { - p.buf.WriteRune('\n') - } - - for _, k := range allKeysOrder { - var action plans.Action - oldValue := oldItems[k] - newValue := newItems[k] - switch { - case oldValue == cty.NilVal: - oldValue = cty.NullVal(newValue.Type()) - action = plans.Create - case newValue == cty.NilVal: - newValue = cty.NullVal(oldValue.Type()) - action = plans.Delete - case !newValue.RawEquals(oldValue): - action = plans.Update - default: - action = plans.NoOp - } - - path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) - p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - - if label != nil { - fmt.Fprintf(p.buf, "%s %q {", name, *label) - } else { - fmt.Fprintf(p.buf, "%s {", name) - } - - if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - - bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) - if bodyWritten { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - } - p.buf.WriteString("}") -} - -func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { - if !val.IsKnown() { - p.buf.WriteString("(known after apply)") - return - } - if val.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) - return - } - - ty := val.Type() - - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - { - // Special behavior for JSON strings containing array or object - src := []byte(val.AsString()) - ty, err := ctyjson.ImpliedType(src) - // check for the special case of "null", which decodes to nil, - // and just allow it to be printed out directly - if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" { - jv, err := ctyjson.Unmarshal(src, ty) - if err == nil { - p.buf.WriteString("jsonencode(") - if jv.LengthInt() == 0 { - p.writeValue(jv, action, 0) - } else { - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(jv, action, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteByte(')') - break // don't *also* do the normal behavior below - } - } - } - fmt.Fprintf(p.buf, "%q", val.AsString()) - case cty.Bool: - if val.True() { - p.buf.WriteString("true") - } else { - p.buf.WriteString("false") - } - case cty.Number: - bf := val.AsBigFloat() - p.buf.WriteString(bf.Text('f', -1)) - default: - // should never happen, since the above is exhaustive - fmt.Fprintf(p.buf, "%#v", val) - } - case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): - p.buf.WriteString("[") - - it := val.ElementIterator() - for it.Next() { - _, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",") - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("]") - case ty.IsMapType(): - p.buf.WriteString("{") - - keyLen := 0 - for it := val.ElementIterator(); it.Next(); { - key, _ := it.Element() - if keyStr := key.AsString(); len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - for it := val.ElementIterator(); it.Next(); { - key, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(key, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - case ty.IsObjectType(): - p.buf.WriteString("{") - - atys := ty.AttributeTypes() - attrNames := make([]string, 0, len(atys)) - nameLen := 0 - for attrName := range atys { - attrNames = append(attrNames, attrName) - if len(attrName) > nameLen { - nameLen = len(attrName) - } - } - sort.Strings(attrNames) - - for _, attrName := range attrNames { - val := val.GetAttr(attrName) - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.buf.WriteString(attrName) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if len(attrNames) > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - } -} - -func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { - ty := old.Type() - typesEqual := ctyTypesEqual(ty, new.Type()) - - // We have some specialized diff implementations for certain complex - // values where it's useful to see a visualization of the diff of - // the nested elements rather than just showing the entire old and - // new values verbatim. - // However, these specialized implementations can apply only if both - // values are known and non-null. - if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { - switch { - case ty == cty.String: - // We have special behavior for both multi-line strings in general - // and for strings that can parse as JSON. For the JSON handling - // to apply, both old and new must be valid JSON. - // For single-line strings that don't parse as JSON we just fall - // out of this switch block and do the default old -> new rendering. - oldS := old.AsString() - newS := new.AsString() - - { - // Special behavior for JSON strings containing object or - // list values. - oldBytes := []byte(oldS) - newBytes := []byte(newS) - oldType, oldErr := ctyjson.ImpliedType(oldBytes) - newType, newErr := ctyjson.ImpliedType(newBytes) - if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { - oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) - newJV, newErr := ctyjson.Unmarshal(newBytes, newType) - if oldErr == nil && newErr == nil { - if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace - p.buf.WriteString("jsonencode(") - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(plans.Update) - p.writeValueDiff(oldJV, newJV, indent+4, path) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } else { - // if they differ only in insigificant whitespace - // then we'll note that but still expand out the - // effective value. - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) - } else { - p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) - } - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(oldJV, plans.NoOp, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } - return - } - } - } - - if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { - break - } - - p.buf.WriteString("<<~EOT") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var oldLines, newLines []cty.Value - { - r := strings.NewReader(oldS) - sc := bufio.NewScanner(r) - for sc.Scan() { - oldLines = append(oldLines, cty.StringVal(sc.Text())) - } - } - { - r := strings.NewReader(newS) - sc := bufio.NewScanner(r) - for sc.Scan() { - newLines = append(newLines, cty.StringVal(sc.Text())) - } - } - - diffLines := ctySequenceDiff(oldLines, newLines) - for _, diffLine := range diffLines { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(diffLine.Action) - - switch diffLine.Action { - case plans.NoOp, plans.Delete: - p.buf.WriteString(diffLine.Before.AsString()) - case plans.Create: - p.buf.WriteString(diffLine.After.AsString()) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return for strings - p.buf.WriteString(diffLine.After.AsString()) - - } - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol - p.buf.WriteString("EOT") - - return - - case ty.IsSetType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var addedVals, removedVals, allVals []cty.Value - for it := old.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if new.HasElement(val).False() { - removedVals = append(removedVals, val) - } - } - for it := new.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if val.IsKnown() && old.HasElement(val).False() { - addedVals = append(addedVals, val) - } - } - - var all, added, removed cty.Value - if len(allVals) > 0 { - all = cty.SetVal(allVals) - } else { - all = cty.SetValEmpty(ty.ElementType()) - } - if len(addedVals) > 0 { - added = cty.SetVal(addedVals) - } else { - added = cty.SetValEmpty(ty.ElementType()) - } - if len(removedVals) > 0 { - removed = cty.SetVal(removedVals) - } else { - removed = cty.SetValEmpty(ty.ElementType()) - } - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - - var action plans.Action - switch { - case !val.IsKnown(): - action = plans.Update - case added.HasElement(val).True(): - action = plans.Create - case removed.HasElement(val).True(): - action = plans.Delete - default: - action = plans.NoOp - } - - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - case ty.IsListType() || ty.IsTupleType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) - for _, elemDiff := range elemDiffs { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(elemDiff.Action) - switch elemDiff.Action { - case plans.NoOp, plans.Delete: - p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) - case plans.Update: - p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) - case plans.Create: - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return. - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - } - - p.buf.WriteString(",\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - - case ty.IsMapType(): - p.buf.WriteString("{") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - kV := cty.StringVal(k) - var action plans.Action - if old.HasIndex(kV).False() { - action = plans.Create - } else if new.HasIndex(kV).False() { - action = plans.Delete - } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { - action = plans.NoOp - } else { - action = plans.Update - } - - path := append(path, cty.IndexStep{Key: kV}) - - p.writeActionSymbol(action) - p.writeValue(kV, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - switch action { - case plans.Create, plans.NoOp: - v := new.Index(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.Index(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.Index(kV) - newV := new.Index(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteByte('\n') - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - return - case ty.IsObjectType(): - p.buf.WriteString("{") - p.buf.WriteString("\n") - - forcesNewResource := p.pathForcesNewResource(path) - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - kV := k - var action plans.Action - if !old.Type().HasAttribute(kV) { - action = plans.Create - } else if !new.Type().HasAttribute(kV) { - action = plans.Delete - } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { - action = plans.NoOp - } else { - action = plans.Update - } - - path := append(path, cty.GetAttrStep{Name: kV}) - - p.writeActionSymbol(action) - p.buf.WriteString(k) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - - switch action { - case plans.Create, plans.NoOp: - v := new.GetAttr(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.GetAttr(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.GetAttr(kV) - newV := new.GetAttr(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - - if forcesNewResource { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - return - } - } - - // In all other cases, we just show the new and old values as-is - p.writeValue(old, plans.Delete, indent) - if new.IsNull() { - p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) - } else { - p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) - } - - p.writeValue(new, plans.Create, indent) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } -} - -// writeActionSymbol writes a symbol to represent the given action, followed -// by a space. -// -// It only supports the actions that can be represented with a single character: -// Create, Delete, Update and NoAction. -func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { - switch action { - case plans.Create: - p.buf.WriteString(p.color.Color("[green]+[reset] ")) - case plans.Delete: - p.buf.WriteString(p.color.Color("[red]-[reset] ")) - case plans.Update: - p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) - case plans.NoOp: - p.buf.WriteString(" ") - default: - // Should never happen - p.buf.WriteString(p.color.Color("? ")) - } -} - -func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { - if !p.action.IsReplace() { - // "requiredReplace" only applies when the instance is being replaced - return false - } - return p.requiredReplace.Has(path) -} - -func ctyEmptyString(value cty.Value) bool { - if !value.IsNull() && value.IsKnown() { - valueType := value.Type() - if valueType == cty.String && value.AsString() == "" { - return true - } - } - return false -} - -func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { - attrType := val.Type().AttributeType(name) - - if val.IsNull() { - return cty.NullVal(attrType) - } - - // We treat "" as null here - // as existing SDK doesn't support null yet. - // This allows us to avoid spurious diffs - // until we introduce null to the SDK. - attrValue := val.GetAttr(name) - if ctyEmptyString(attrValue) { - return cty.NullVal(attrType) - } - - return attrValue -} - -func ctyCollectionValues(val cty.Value) []cty.Value { - if !val.IsKnown() || val.IsNull() { - return nil - } - - ret := make([]cty.Value, 0, val.LengthInt()) - for it := val.ElementIterator(); it.Next(); { - _, value := it.Element() - ret = append(ret, value) - } - return ret -} - -// ctySequenceDiff returns differences between given sequences of cty.Value(s) -// in the form of Create, Delete, or Update actions (for objects). -func ctySequenceDiff(old, new []cty.Value) []*plans.Change { - var ret []*plans.Change - lcs := objchange.LongestCommonSubsequence(old, new) - var oldI, newI, lcsI int - for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { - for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { - isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType()) - if isObjectDiff && newI < len(new) { - ret = append(ret, &plans.Change{ - Action: plans.Update, - Before: old[oldI], - After: new[newI], - }) - oldI++ - newI++ // we also consume the next "new" in this case - continue - } - - ret = append(ret, &plans.Change{ - Action: plans.Delete, - Before: old[oldI], - After: cty.NullVal(old[oldI].Type()), - }) - oldI++ - } - for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { - ret = append(ret, &plans.Change{ - Action: plans.Create, - Before: cty.NullVal(new[newI].Type()), - After: new[newI], - }) - newI++ - } - if lcsI < len(lcs) { - ret = append(ret, &plans.Change{ - Action: plans.NoOp, - Before: lcs[lcsI], - After: lcs[lcsI], - }) - - // All of our indexes advance together now, since the line - // is common to all three sequences. - lcsI++ - oldI++ - newI++ - } - } - return ret -} - -func ctyEqualWithUnknown(old, new cty.Value) bool { - if !old.IsWhollyKnown() || !new.IsWhollyKnown() { - return false - } - return old.Equals(new).True() -} - -// ctyTypesEqual checks equality of two types more loosely -// by avoiding checks of object/tuple elements -// as we render differences on element-by-element basis anyway -func ctyTypesEqual(oldT, newT cty.Type) bool { - if oldT.IsObjectType() && newT.IsObjectType() { - return true - } - if oldT.IsTupleType() && newT.IsTupleType() { - return true - } - return oldT.Equals(newT) -} - -func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { - if cap(path)-len(path) >= minExtra { - return path - } - newCap := cap(path) * 2 - if newCap < (len(path) + minExtra) { - newCap = len(path) + minExtra - } - newPath := make(cty.Path, len(path), newCap) - copy(newPath, path) - return newPath -} - -// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "list" is -// actually represented as a tuple type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsListType() { - return cty.ListValEmpty(ty.ElementType()) - } - return cty.EmptyTupleVal // must need a tuple, then -} - -// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "map" is -// actually represented as an object type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsMapType() { - return cty.MapValEmpty(ty.ElementType()) - } - return cty.EmptyObjectVal // must need an object, then -} - -// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - // Dynamically-typed attributes are not supported inside blocks backed by - // sets, so our result here is always a set. - return cty.SetValEmpty(in.Type().ElementType()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go deleted file mode 100644 index aa8d7deb2a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package format contains helpers for formatting various Terraform -// structures for human-readabout output. -// -// This package is used by the official Terraform CLI in formatting any -// output and is exported to encourage non-official frontends to mimic the -// output formatting as much as possible so that text formats of Terraform -// structures have a consistent look and feel. -package format diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go deleted file mode 100644 index 85ebbfec5e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go +++ /dev/null @@ -1,123 +0,0 @@ -package format - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ObjectValueID takes a value that is assumed to be an object representation -// of some resource instance object and attempts to heuristically find an -// attribute of it that is likely to be a unique identifier in the remote -// system that it belongs to which will be useful to the user. -// -// If such an attribute is found, its name and string value intended for -// display are returned. Both returned strings are empty if no such attribute -// exists, in which case the caller should assume that the resource instance -// address within the Terraform configuration is the best available identifier. -// -// This is only a best-effort sort of thing, relying on naming conventions in -// our resource type schemas. The result is not guaranteed to be unique, but -// should generally be suitable for display to an end-user anyway. -// -// This function will panic if the given value is not of an object type. -func ObjectValueID(obj cty.Value) (k, v string) { - if obj.IsNull() || !obj.IsKnown() { - return "", "" - } - - atys := obj.Type().AttributeTypes() - - switch { - - case atys["id"] == cty.String: - v := obj.GetAttr("id") - if v.IsKnown() && !v.IsNull() { - return "id", v.AsString() - } - - case atys["name"] == cty.String: - // "name" isn't always globally unique, but if there isn't also an - // "id" then it _often_ is, in practice. - v := obj.GetAttr("name") - if v.IsKnown() && !v.IsNull() { - return "name", v.AsString() - } - } - - return "", "" -} - -// ObjectValueName takes a value that is assumed to be an object representation -// of some resource instance object and attempts to heuristically find an -// attribute of it that is likely to be a human-friendly name in the remote -// system that it belongs to which will be useful to the user. -// -// If such an attribute is found, its name and string value intended for -// display are returned. Both returned strings are empty if no such attribute -// exists, in which case the caller should assume that the resource instance -// address within the Terraform configuration is the best available identifier. -// -// This is only a best-effort sort of thing, relying on naming conventions in -// our resource type schemas. The result is not guaranteed to be unique, but -// should generally be suitable for display to an end-user anyway. -// -// Callers that use both ObjectValueName and ObjectValueID at the same time -// should be prepared to get the same attribute key and value from both in -// some cases, since there is overlap betweek the id-extraction and -// name-extraction heuristics. -// -// This function will panic if the given value is not of an object type. -func ObjectValueName(obj cty.Value) (k, v string) { - if obj.IsNull() || !obj.IsKnown() { - return "", "" - } - - atys := obj.Type().AttributeTypes() - - switch { - - case atys["name"] == cty.String: - v := obj.GetAttr("name") - if v.IsKnown() && !v.IsNull() { - return "name", v.AsString() - } - - case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: - tags := obj.GetAttr("tags") - if tags.IsNull() || !tags.IsWhollyKnown() { - break - } - - switch { - case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): - v := tags.Index(cty.StringVal("name")) - if v.IsKnown() && !v.IsNull() { - return "tags.name", v.AsString() - } - case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): - // AWS-style naming convention - v := tags.Index(cty.StringVal("Name")) - if v.IsKnown() && !v.IsNull() { - return "tags.Name", v.AsString() - } - } - } - - return "", "" -} - -// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID -// and ObjectValueName (in that preference order) to try to extract some sort -// of human-friendly descriptive string value for an object as additional -// context about an object when it is being displayed in a compact way (where -// not all of the attributes are visible.) -// -// Just as with the two functions it wraps, it is a best-effort and may return -// two empty strings if no suitable attribute can be found for a given object. -func ObjectValueIDOrName(obj cty.Value) (k, v string) { - k, v = ObjectValueID(obj) - if k != "" { - return - } - k, v = ObjectValueName(obj) - return -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go deleted file mode 100644 index 14869ad3ca..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go +++ /dev/null @@ -1,208 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/terraform" - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" -) - -// StateOpts are the options for formatting a state. -type StateOpts struct { - // State is the state to format. This is required. - State *states.State - - // Schemas are used to decode attributes. This is required. - Schemas *terraform.Schemas - - // Color is the colorizer. This is optional. - Color *colorstring.Colorize -} - -// State takes a state and returns a string -func State(opts *StateOpts) string { - if opts.Color == nil { - panic("colorize not given") - } - - if opts.Schemas == nil { - panic("schemas not given") - } - - s := opts.State - if len(s.Modules) == 0 { - return "The state file is empty. No resources are represented." - } - - buf := bytes.NewBufferString("[reset]") - p := blockBodyDiffPrinter{ - buf: buf, - color: opts.Color, - action: plans.NoOp, - } - - // Format all the modules - for _, m := range s.Modules { - formatStateModule(p, m, opts.Schemas) - } - - // Write the outputs for the root module - m := s.RootModule() - - if m.OutputValues != nil { - if len(m.OutputValues) > 0 { - p.buf.WriteString("Outputs:\n\n") - } - - // Sort the outputs - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - // Output each output k/v pair - for _, k := range ks { - v := m.OutputValues[k] - p.buf.WriteString(fmt.Sprintf("%s = ", k)) - p.writeValue(v.Value, plans.NoOp, 0) - p.buf.WriteString("\n") - } - } - - trimmedOutput := strings.TrimSpace(p.buf.String()) - trimmedOutput += "[reset]" - - return opts.Color.Color(trimmedOutput) - -} - -func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { - // First get the names of all the resources so we can show them - // in alphabetical order. - names := make([]string, 0, len(m.Resources)) - for name := range m.Resources { - names = append(names, name) - } - sort.Strings(names) - - // Go through each resource and begin building up the output. - for _, key := range names { - for k, v := range m.Resources[key].Instances { - // keep these in order to keep the current object first, and - // provide deterministic output for the deposed objects - type obj struct { - header string - instance *states.ResourceInstanceObjectSrc - } - instances := []obj{} - - addr := m.Resources[key].Addr - - taintStr := "" - if v.Current != nil && v.Current.Status == 'T' { - taintStr = " (tainted)" - } - - instances = append(instances, - obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current}) - - for dk, v := range v.Deposed { - instances = append(instances, - obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v}) - } - - // Sort the instances for consistent output. - // Starting the sort from the second index, so the current instance - // is always first. - sort.Slice(instances[1:], func(i, j int) bool { - return instances[i+1].header < instances[j+1].header - }) - - for _, obj := range instances { - header := obj.header - instance := obj.instance - p.buf.WriteString(header) - if instance == nil { - // this shouldn't happen, but there's nothing to do here so - // don't panic below. - continue - } - - var schema *configschema.Block - provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() - if _, exists := schemas.Providers[provider]; !exists { - // This should never happen in normal use because we should've - // loaded all of the schemas and checked things prior to this - // point. We can't return errors here, but since this is UI code - // we will try to do _something_ reasonable. - p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider)) - continue - } - - switch addr.Mode { - case addrs.ManagedResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider, - addr.Mode, - addr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "resource %q %q {", - addr.Type, - addr.Name, - )) - case addrs.DataResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider, - addr.Mode, - addr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "data %q %q {", - addr.Type, - addr.Name, - )) - default: - // should never happen, since the above is exhaustive - p.buf.WriteString(addr.String()) - } - - val, err := instance.Decode(schema.ImpliedType()) - if err != nil { - fmt.Println(err.Error()) - break - } - - path := make(cty.Path, 0, 3) - bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) - if bodyWritten { - p.buf.WriteString("\n") - } - - p.buf.WriteString("}\n\n") - } - } - } - p.buf.WriteString("\n") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go deleted file mode 100644 index 76d161d723..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go +++ /dev/null @@ -1,24 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Backend represents a "backend" block inside a "terraform" block in a module -// or file. -type Backend struct { - Type string - Config hcl.Body - - TypeRange hcl.Range - DeclRange hcl.Range -} - -func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { - return &Backend{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - Config: block.Body, - DeclRange: block.DefRange, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go deleted file mode 100644 index e594ebd40f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go +++ /dev/null @@ -1,116 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// ------------------------------------------------------------------------- -// Functions in this file are compatibility shims intended to ease conversion -// from the old configuration loader. Any use of these functions that makes -// a change should generate a deprecation warning explaining to the user how -// to update their code for new patterns. -// -// Shims are particularly important for any patterns that have been widely -// documented in books, tutorials, etc. Users will still be starting from -// these examples and we want to help them adopt the latest patterns rather -// than leave them stranded. -// ------------------------------------------------------------------------- - -// shimTraversalInString takes any arbitrary expression and checks if it is -// a quoted string in the native syntax. If it _is_, then it is parsed as a -// traversal and re-wrapped into a synthetic traversal expression and a -// warning is generated. Otherwise, the given expression is just returned -// verbatim. -// -// This function has no effect on expressions from the JSON syntax, since -// traversals in strings are the required pattern in that syntax. -// -// If wantKeyword is set, the generated warning diagnostic will talk about -// keywords rather than references. The behavior is otherwise unchanged, and -// the caller remains responsible for checking that the result is indeed -// a keyword, e.g. using hcl.ExprAsKeyword. -func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { - // ObjectConsKeyExpr is a special wrapper type used for keys on object - // constructors to deal with the fact that naked identifiers are normally - // handled as "bareword" strings rather than as variable references. Since - // we know we're interpreting as a traversal anyway (and thus it won't - // matter whether it's a string or an identifier) we can safely just unwrap - // here and then process whatever we find inside as normal. - if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { - expr = ocke.Wrapped - } - - if !exprIsNativeQuotedString(expr) { - return expr, nil - } - - strVal, diags := expr.Value(nil) - if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { - // Since we're not even able to attempt a shim here, we'll discard - // the diagnostics we saw so far and let the caller's own error - // handling take care of reporting the invalid expression. - return expr, nil - } - - // The position handling here isn't _quite_ right because it won't - // take into account any escape sequences in the literal string, but - // it should be close enough for any error reporting to make sense. - srcRange := expr.Range() - startPos := srcRange.Start // copy - startPos.Column++ // skip initial quote - startPos.Byte++ // skip initial quote - - traversal, tDiags := hclsyntax.ParseTraversalAbs( - []byte(strVal.AsString()), - srcRange.Filename, - startPos, - ) - diags = append(diags, tDiags...) - - // For initial release our deprecation warnings are disabled to allow - // a period where modules can be compatible with both old and new - // conventions. - // FIXME: Re-enable these deprecation warnings in a release prior to - // Terraform 0.13 and then remove the shims altogether for 0.13. - /* - if wantKeyword { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted keywords are deprecated", - Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", - Subject: &srcRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted references are deprecated", - Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", - Subject: &srcRange, - }) - } - */ - - return &hclsyntax.ScopeTraversalExpr{ - Traversal: traversal, - SrcRange: srcRange, - }, diags -} - -// shimIsIgnoreChangesStar returns true if the given expression seems to be -// a string literal whose value is "*". This is used to support a legacy -// form of ignore_changes = all . -// -// This function does not itself emit any diagnostics, so it's the caller's -// responsibility to emit a warning diagnostic when this function returns true. -func shimIsIgnoreChangesStar(expr hcl.Expression) bool { - val, valDiags := expr.Value(nil) - if valDiags.HasErrors() { - return false - } - if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { - return false - } - return val.AsString() == "*" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go deleted file mode 100644 index 82c88a10f2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go +++ /dev/null @@ -1,164 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -// -// A module tree described in *this* package represents the static tree -// represented by configuration. During evaluation a static ModuleNode may -// expand into zero or more module instances depending on the use of count and -// for_each configuration attributes within each call. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *Module - - // CallRange is the source range for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallRange hcl.Range - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // SourceAddrRange is the location in the configuration source where the - // SourceAddr value was set, for use in diagnostic messages. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddrRange hcl.Range - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// NewEmptyConfig constructs a single-node configuration tree with an empty -// root module. This is generally a pretty useless thing to do, so most callers -// should instead use BuildConfig. -func NewEmptyConfig() *Config { - ret := &Config{} - ret.Root = ret - ret.Children = make(map[string]*Config) - ret.Module = &Module{} - return ret -} - -// DeepEach calls the given function once for each module in the tree, starting -// with the receiver. -// -// A parent is always called before its children and children of a particular -// node are visited in lexicographic order by their names. -func (c *Config) DeepEach(cb func(c *Config)) { - cb(c) - - names := make([]string, 0, len(c.Children)) - for name := range c.Children { - names = append(names, name) - } - - for _, name := range names { - c.Children[name].DeepEach(cb) - } -} - -// DescendentForInstance is like Descendent except that it accepts a path -// to a particular module instance in the dynamic module graph, returning -// the node from the static module graph that corresponds to it. -// -// All instances created by a particular module call share the same -// configuration, so the keys within the given path are disregarded. -func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { - current := c - for _, step := range path { - current = current.Children[step.Name] - if current == nil { - return nil - } - } - return current -} - -// ProviderTypes returns the names of each distinct provider type referenced -// in the receiving configuration. -// -// This is a helper for easily determining which provider types are required -// to fully interpret the configuration, though it does not include version -// information and so callers are expected to have already dealt with -// provider version selection in an earlier step and have identified suitable -// versions for each provider. -func (c *Config) ProviderTypes() []string { - m := make(map[string]struct{}) - c.gatherProviderTypes(m) - - ret := make([]string, 0, len(m)) - for k := range m { - ret = append(ret, k) - } - sort.Strings(ret) - return ret -} -func (c *Config) gatherProviderTypes(m map[string]struct{}) { - if c == nil { - return - } - - for _, pc := range c.Module.ProviderConfigs { - m[pc.Name] = struct{}{} - } - for _, rc := range c.Module.ManagedResources { - providerAddr := rc.ProviderConfigAddr() - m[providerAddr.Type] = struct{}{} - } - for _, rc := range c.Module.DataResources { - providerAddr := rc.ProviderConfigAddr() - m[providerAddr.Type] = struct{}{} - } - - // Must also visit our child modules, recursively. - for _, cc := range c.Children { - cc.gatherProviderTypes(m) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go deleted file mode 100644 index cb46b65aaa..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go +++ /dev/null @@ -1,160 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -// -// The result is a module tree that has so far only had basic module- and -// file-level invariants validated. If the returned diagnostics contains errors, -// the returned module tree may be incomplete but can still be used carefully -// for static analysis. -func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := map[string]*Config{} - - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - VersionConstraint: call.Version, - Parent: parent, - CallRange: call.DeclRange, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallRange: call.DeclRange, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = append(diags, modDiags...) - - ret[call.Name] = child - } - - return ret, diags -} - -// A ModuleWalker knows how to find and load a child module given details about -// the module to be loaded and a reference to its partially-loaded parent -// Config. -type ModuleWalker interface { - // LoadModule finds and loads a requested child module. - // - // If errors are detected during loading, implementations should return them - // in the diagnostics object. If the diagnostics object contains any errors - // then the caller will tolerate the returned module being nil or incomplete. - // If no errors are returned, it should be non-nil and complete. - // - // Full validation need not have been performed but an implementation should - // ensure that the basic file- and module-validations performed by the - // LoadConfigDir function (valid syntax, no namespace collisions, etc) have - // been performed before returning a module. - LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) - -// LoadModule implements ModuleWalker. -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return f(req) -} - -// ModuleRequest is used with the ModuleWalker interface to describe a child -// module that must be loaded. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // SourceAddrRange is the source range for the SourceAddr value as it - // was provided in configuration. This can and should be used to generate - // diagnostics about the source address having invalid syntax, referring - // to a non-existent object, etc. - SourceAddrRange hcl.Range - - // VersionConstraint is the version constraint applied to the module in - // configuration. This data structure includes the source range for - // the constraint, which can and should be used to generate diagnostics - // about constraint-related issues, such as constraints that eliminate all - // available versions of a module whose source is otherwise valid. - VersionConstraint VersionConstraint - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source range for the header of the "module" block - // in configuration that prompted this request. This can be used as the - // subject of an error diagnostic that relates to the module call itself, - // rather than to either its source address or its version number. - CallRange hcl.Range -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go deleted file mode 100644 index ebbeb3b629..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go +++ /dev/null @@ -1,125 +0,0 @@ -package configload - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If the current path is a symlink, recreate the symlink relative to - // the dst directory - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - target, err := os.Readlink(path) - if err != nil { - return err - } - - return os.Symlink(target, dstPath) - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go deleted file mode 100644 index 8b615f9026..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package configload knows how to install modules into the .terraform/modules -// directory and to load modules from those installed locations. It is used -// in conjunction with the LoadConfig function in the parent package. -package configload diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go deleted file mode 100644 index 57df04145a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris dragonfly - -package configload - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go deleted file mode 100644 index 4dc28eaa89..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package configload - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go deleted file mode 100644 index 0d22e67264..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package configload - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go deleted file mode 100644 index 0d12d7d2aa..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go +++ /dev/null @@ -1,101 +0,0 @@ -package configload - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/spf13/afero" -) - -// A Loader instance is the main entry-point for loading configurations via -// this package. -// -// It extends the general config-loading functionality in the parent package -// "configs" to support installation of modules from remote sources and -// loading full configurations using modules that were previously installed. -type Loader struct { - // parser is used to read configuration - parser *configs.Parser - - // modules is used to install and locate descendent modules that are - // referenced (directly or indirectly) from the root module. - modules moduleMgr -} - -// Config is used with NewLoader to specify configuration arguments for the -// loader. -type Config struct { - // ModulesDir is a path to a directory where descendent modules are - // (or should be) installed. (This is usually the - // .terraform/modules directory, in the common case where this package - // is being loaded from the main Terraform CLI package.) - ModulesDir string - - // Services is the service discovery client to use when locating remote - // module registry endpoints. If this is nil then registry sources are - // not supported, which should be true only in specialized circumstances - // such as in tests. - Services *disco.Disco -} - -// NewLoader creates and returns a loader that reads configuration from the -// real OS filesystem. -// -// The loader has some internal state about the modules that are currently -// installed, which is read from disk as part of this function. If that -// manifest cannot be read then an error will be returned. -func NewLoader(config *Config) (*Loader, error) { - fs := afero.NewOsFs() - parser := configs.NewParser(fs) - reg := registry.NewClient(config.Services, nil) - - ret := &Loader{ - parser: parser, - modules: moduleMgr{ - FS: afero.Afero{Fs: fs}, - CanInstall: true, - Dir: config.ModulesDir, - Services: config.Services, - Registry: reg, - }, - } - - err := ret.modules.readModuleManifestSnapshot() - if err != nil { - return nil, fmt.Errorf("failed to read module manifest: %s", err) - } - - return ret, nil -} - -// ModulesDir returns the path to the directory where the loader will look for -// the local cache of remote module packages. -func (l *Loader) ModulesDir() string { - return l.modules.Dir -} - -// RefreshModules updates the in-memory cache of the module manifest from the -// module manifest file on disk. This is not necessary in normal use because -// module installation and configuration loading are separate steps, but it -// can be useful in tests where module installation is done as a part of -// configuration loading by a helper function. -// -// Call this function after any module installation where an existing loader -// is already alive and may be used again later. -// -// An error is returned if the manifest file cannot be read. -func (l *Loader) RefreshModules() error { - if l == nil { - // Nothing to do, then. - return nil - } - return l.modules.readModuleManifestSnapshot() -} - -// Sources returns the source code cache for the underlying parser of this -// loader. This is a shorthand for l.Parser().Sources(). -func (l *Loader) Sources() map[string][]byte { - return l.parser.Sources() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go deleted file mode 100644 index bcfa733e60..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go +++ /dev/null @@ -1,105 +0,0 @@ -package configload - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// LoadConfig reads the Terraform module in the given directory and uses it as the -// root module to build the static module tree that represents a configuration, -// assuming that all required descendent modules have already been installed. -// -// If error diagnostics are returned, the returned configuration may be either -// nil or incomplete. In the latter case, cautious static analysis is possible -// in spite of the errors. -// -// LoadConfig performs the basic syntax and uniqueness validations that are -// required to process the individual modules, and also detects -func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { - rootMod, diags := l.parser.LoadConfigDir(rootDir) - if rootMod == nil { - return nil, diags - } - - cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) - diags = append(diags, cDiags...) - - return cfg, diags -} - -// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that -// are presumed to have already been installed. A different function -// (moduleWalkerInstall) is used for installation. -func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { - // Since we're just loading here, we expect that all referenced modules - // will be already installed and described in our manifest. However, we - // do verify that the manifest and the configuration are in agreement - // so that we can prompt the user to run "terraform init" if not. - - key := l.modules.manifest.ModuleKey(req.Path) - record, exists := l.modules.manifest[key] - - if !exists { - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Module not installed", - Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.CallRange, - }, - } - } - - var diags hcl.Diagnostics - - // Check for inconsistencies between manifest and config - if req.SourceAddr != record.SourceAddr { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module source has changed", - Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.SourceAddrRange, - }) - } - if len(req.VersionConstraint.Required) > 0 && record.Version == nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module version requirements have changed", - Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.SourceAddrRange, - }) - } - if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module version requirements have changed", - Detail: fmt.Sprintf( - "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", - record.Version, - ), - Subject: &req.SourceAddrRange, - }) - } - - mod, mDiags := l.parser.LoadConfigDir(record.Dir) - diags = append(diags, mDiags...) - if mod == nil { - // nil specifically indicates that the directory does not exist or - // cannot be read, so in this case we'll discard any generic diagnostics - // returned from LoadConfigDir and produce our own context-sensitive - // error message. - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Module not installed", - Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), - Subject: &req.CallRange, - }, - } - } - - return mod, record.Version, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go deleted file mode 100644 index 0772edc71f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go +++ /dev/null @@ -1,492 +0,0 @@ -package configload - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "time" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/spf13/afero" -) - -// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously -// creates an in-memory snapshot of the configuration files used, which can -// be later used to create a loader that may read only from this snapshot. -func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { - rootMod, diags := l.parser.LoadConfigDir(rootDir) - if rootMod == nil { - return nil, nil, diags - } - - snap := &Snapshot{ - Modules: map[string]*SnapshotModule{}, - } - walker := l.makeModuleWalkerSnapshot(snap) - cfg, cDiags := configs.BuildConfig(rootMod, walker) - diags = append(diags, cDiags...) - - addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) - diags = append(diags, addDiags...) - - return cfg, snap, diags -} - -// NewLoaderFromSnapshot creates a Loader that reads files only from the -// given snapshot. -// -// A snapshot-based loader cannot install modules, so calling InstallModules -// on the return value will cause a panic. -// -// A snapshot-based loader also has access only to configuration files. Its -// underlying parser does not have access to other files in the native -// filesystem, such as values files. For those, either use a normal loader -// (created by NewLoader) or use the configs.Parser API directly. -func NewLoaderFromSnapshot(snap *Snapshot) *Loader { - fs := snapshotFS{snap} - parser := configs.NewParser(fs) - - ret := &Loader{ - parser: parser, - modules: moduleMgr{ - FS: afero.Afero{Fs: fs}, - CanInstall: false, - manifest: snap.moduleManifest(), - }, - } - - return ret -} - -// Snapshot is an in-memory representation of the source files from a -// configuration, which can be used as an alternative configurations source -// for a loader with NewLoaderFromSnapshot. -// -// The primary purpose of a Snapshot is to build the configuration portion -// of a plan file (see ../../plans/planfile) so that it can later be reloaded -// and used to recover the exact configuration that the plan was built from. -type Snapshot struct { - // Modules is a map from opaque module keys (suitable for use as directory - // names on all supported operating systems) to the snapshot information - // about each module. - Modules map[string]*SnapshotModule -} - -// SnapshotModule represents a single module within a Snapshot. -type SnapshotModule struct { - // Dir is the path, relative to the root directory given when the - // snapshot was created, where the module appears in the snapshot's - // virtual filesystem. - Dir string - - // Files is a map from each configuration file filename for the - // module to a raw byte representation of the source file contents. - Files map[string][]byte - - // SourceAddr is the source address given for this module in configuration. - SourceAddr string `json:"Source"` - - // Version is the version of the module that is installed, or nil if - // the module is installed from a source that does not support versions. - Version *version.Version `json:"-"` -} - -// moduleManifest constructs a module manifest based on the contents of -// the receiving snapshot. -func (s *Snapshot) moduleManifest() modsdir.Manifest { - ret := make(modsdir.Manifest) - - for k, modSnap := range s.Modules { - ret[k] = modsdir.Record{ - Key: k, - Dir: modSnap.Dir, - SourceAddr: modSnap.SourceAddr, - Version: modSnap.Version, - } - } - - return ret -} - -// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit -// the same lookup behaviors as l.moduleWalkerLoad but will additionally write -// source files from the referenced modules into the given snapshot. -func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { - return configs.ModuleWalkerFunc( - func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { - mod, v, diags := l.moduleWalkerLoad(req) - if diags.HasErrors() { - return mod, v, diags - } - - key := l.modules.manifest.ModuleKey(req.Path) - record, exists := l.modules.manifest[key] - - if !exists { - // Should never happen, since otherwise moduleWalkerLoader would've - // returned an error and we would've returned already. - panic(fmt.Sprintf("module %s is not present in manifest", key)) - } - - addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) - diags = append(diags, addDiags...) - - return mod, v, diags - }, - ) -} - -func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { - var diags hcl.Diagnostics - - primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) - if moreDiags.HasErrors() { - // Any diagnostics we get here should be already present - // in diags, so it's weird if we get here but we'll allow it - // and return a general error message in that case. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read directory for module", - Detail: fmt.Sprintf("The source directory %s could not be read", dir), - }) - return diags - } - - snapMod := &SnapshotModule{ - Dir: dir, - Files: map[string][]byte{}, - SourceAddr: sourceAddr, - Version: v, - } - - files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) - files = append(files, primaryFiles...) - files = append(files, overrideFiles...) - sources := l.Sources() // should be populated with all the files we need by now - for _, filePath := range files { - filename := filepath.Base(filePath) - src, exists := sources[filePath] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing source file for snapshot", - Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), - }) - continue - } - snapMod.Files[filepath.Clean(filename)] = src - } - - snap.Modules[key] = snapMod - - return diags -} - -// snapshotFS is an implementation of afero.Fs that reads from a snapshot. -// -// This is not intended as a general-purpose filesystem implementation. Instead, -// it just supports the minimal functionality required to support the -// configuration loader and parser as an implementation detail of creating -// a loader from a snapshot. -type snapshotFS struct { - snap *Snapshot -} - -var _ afero.Fs = snapshotFS{} - -func (fs snapshotFS) Create(name string) (afero.File, error) { - return nil, fmt.Errorf("cannot create file inside configuration snapshot") -} - -func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { - return fmt.Errorf("cannot create directory inside configuration snapshot") -} - -func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { - return fmt.Errorf("cannot create directories inside configuration snapshot") -} - -func (fs snapshotFS) Open(name string) (afero.File, error) { - - // Our "filesystem" is sparsely populated only with the directories - // mentioned by modules in our snapshot, so the high-level process - // for opening a file is: - // - Find the module snapshot corresponding to the containing directory - // - Find the file within that snapshot - // - Wrap the resulting byte slice in a snapshotFile to return - // - // The other possibility handled here is if the given name is for the - // module directory itself, in which case we'll return a snapshotDir - // instead. - // - // This function doesn't try to be incredibly robust in supporting - // different permutations of paths, etc because in practice we only - // need to support the path forms that our own loader and parser will - // generate. - - dir := filepath.Dir(name) - fn := filepath.Base(name) - directDir := filepath.Clean(name) - - // First we'll check to see if this is an exact path for a module directory. - // We need to do this first (rather than as part of the next loop below) - // because a module in a child directory of another module can otherwise - // appear to be a file in that parent directory. - for _, candidate := range fs.snap.Modules { - modDir := filepath.Clean(candidate.Dir) - if modDir == directDir { - // We've matched the module directory itself - filenames := make([]string, 0, len(candidate.Files)) - for n := range candidate.Files { - filenames = append(filenames, n) - } - sort.Strings(filenames) - return snapshotDir{ - filenames: filenames, - }, nil - } - } - - // If we get here then the given path isn't a module directory exactly, so - // we'll treat it as a file path and try to find a module directory it - // could be located in. - var modSnap *SnapshotModule - for _, candidate := range fs.snap.Modules { - modDir := filepath.Clean(candidate.Dir) - if modDir == dir { - modSnap = candidate - break - } - } - if modSnap == nil { - return nil, os.ErrNotExist - } - - src, exists := modSnap.Files[fn] - if !exists { - return nil, os.ErrNotExist - } - - return &snapshotFile{ - src: src, - }, nil -} - -func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { - return fs.Open(name) -} - -func (fs snapshotFS) Remove(name string) error { - return fmt.Errorf("cannot remove file inside configuration snapshot") -} - -func (fs snapshotFS) RemoveAll(path string) error { - return fmt.Errorf("cannot remove files inside configuration snapshot") -} - -func (fs snapshotFS) Rename(old, new string) error { - return fmt.Errorf("cannot rename file inside configuration snapshot") -} - -func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { - f, err := fs.Open(name) - if err != nil { - return nil, err - } - _, isDir := f.(snapshotDir) - return snapshotFileInfo{ - name: filepath.Base(name), - isDir: isDir, - }, nil -} - -func (fs snapshotFS) Name() string { - return "ConfigSnapshotFS" -} - -func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { - return fmt.Errorf("cannot set file mode inside configuration snapshot") -} - -func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { - return fmt.Errorf("cannot set file times inside configuration snapshot") -} - -type snapshotFile struct { - snapshotFileStub - src []byte - at int64 -} - -var _ afero.File = (*snapshotFile)(nil) - -func (f *snapshotFile) Read(p []byte) (n int, err error) { - if len(p) > 0 && f.at == int64(len(f.src)) { - return 0, io.EOF - } - if f.at > int64(len(f.src)) { - return 0, io.ErrUnexpectedEOF - } - if int64(len(f.src))-f.at >= int64(len(p)) { - n = len(p) - } else { - n = int(int64(len(f.src)) - f.at) - } - copy(p, f.src[f.at:f.at+int64(n)]) - f.at += int64(n) - return -} - -func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { - f.at = off - return f.Read(p) -} - -func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { - switch whence { - case 0: - f.at = offset - case 1: - f.at += offset - case 2: - f.at = int64(len(f.src)) + offset - } - return f.at, nil -} - -type snapshotDir struct { - snapshotFileStub - filenames []string - at int -} - -var _ afero.File = snapshotDir{} - -func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { - names, err := f.Readdirnames(count) - if err != nil { - return nil, err - } - ret := make([]os.FileInfo, len(names)) - for i, name := range names { - ret[i] = snapshotFileInfo{ - name: name, - isDir: false, - } - } - return ret, nil -} - -func (f snapshotDir) Readdirnames(count int) ([]string, error) { - var outLen int - names := f.filenames[f.at:] - if count > 0 { - if len(names) < count { - outLen = len(names) - } else { - outLen = count - } - if len(names) == 0 { - return nil, io.EOF - } - } else { - outLen = len(names) - } - f.at += outLen - - return names[:outLen], nil -} - -// snapshotFileInfo is a minimal implementation of os.FileInfo to support our -// virtual filesystem from snapshots. -type snapshotFileInfo struct { - name string - isDir bool -} - -var _ os.FileInfo = snapshotFileInfo{} - -func (fi snapshotFileInfo) Name() string { - return fi.name -} - -func (fi snapshotFileInfo) Size() int64 { - // In practice, our parser and loader never call Size - return -1 -} - -func (fi snapshotFileInfo) Mode() os.FileMode { - return os.ModePerm -} - -func (fi snapshotFileInfo) ModTime() time.Time { - return time.Now() -} - -func (fi snapshotFileInfo) IsDir() bool { - return fi.isDir -} - -func (fi snapshotFileInfo) Sys() interface{} { - return nil -} - -type snapshotFileStub struct{} - -func (f snapshotFileStub) Close() error { - return nil -} - -func (f snapshotFileStub) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("cannot read") -} - -func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { - return 0, fmt.Errorf("cannot read") -} - -func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { - return 0, fmt.Errorf("cannot seek") -} - -func (f snapshotFileStub) Write(p []byte) (n int, err error) { - return f.WriteAt(p, 0) -} - -func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { - return 0, fmt.Errorf("cannot write to file in snapshot") -} - -func (f snapshotFileStub) WriteString(s string) (n int, err error) { - return 0, fmt.Errorf("cannot write to file in snapshot") -} - -func (f snapshotFileStub) Name() string { - // in practice, the loader and parser never use this - return "" -} - -func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { - return nil, fmt.Errorf("cannot use Readdir on a file") -} - -func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { - return nil, fmt.Errorf("cannot use Readdir on a file") -} - -func (f snapshotFileStub) Stat() (os.FileInfo, error) { - return nil, fmt.Errorf("cannot stat") -} - -func (f snapshotFileStub) Sync() error { - return nil -} - -func (f snapshotFileStub) Truncate(size int64) error { - return fmt.Errorf("cannot write to file in snapshot") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go deleted file mode 100644 index 797f50d242..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go +++ /dev/null @@ -1,62 +0,0 @@ -package configload - -import ( - "os" - "path/filepath" - - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/spf13/afero" -) - -type moduleMgr struct { - FS afero.Afero - - // CanInstall is true for a module manager that can support installation. - // - // This must be set only if FS is an afero.OsFs, because the installer - // (which uses go-getter) is not aware of the virtual filesystem - // abstraction and will always write into the "real" filesystem. - CanInstall bool - - // Dir is the path where descendent modules are (or will be) installed. - Dir string - - // Services is a service discovery client that will be used to find - // remote module registry endpoints. This object may be pre-loaded with - // cached discovery information. - Services *disco.Disco - - // Registry is a client for the module registry protocol, which is used - // when a module is requested from a registry source. - Registry *registry.Client - - // manifest tracks the currently-installed modules for this manager. - // - // The loader may read this. Only the installer may write to it, and - // after a set of updates are completed the installer must call - // writeModuleManifestSnapshot to persist a snapshot of the manifest - // to disk for use on subsequent runs. - manifest modsdir.Manifest -} - -func (m *moduleMgr) manifestSnapshotPath() string { - return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) -} - -// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. -func (m *moduleMgr) readModuleManifestSnapshot() error { - r, err := m.FS.Open(m.manifestSnapshotPath()) - if err != nil { - if os.IsNotExist(err) { - // We'll treat a missing file as an empty manifest - m.manifest = make(modsdir.Manifest) - return nil - } - return err - } - - m.manifest, err = modsdir.ReadManifestSnapshot(r) - return err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go deleted file mode 100644 index 86ca9d10b7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go +++ /dev/null @@ -1,43 +0,0 @@ -package configload - -import ( - "io/ioutil" - "os" - "testing" -) - -// NewLoaderForTests is a variant of NewLoader that is intended to be more -// convenient for unit tests. -// -// The loader's modules directory is a separate temporary directory created -// for each call. Along with the created loader, this function returns a -// cleanup function that should be called before the test completes in order -// to remove that temporary directory. -// -// In the case of any errors, t.Fatal (or similar) will be called to halt -// execution of the test, so the calling test does not need to handle errors -// itself. -func NewLoaderForTests(t *testing.T) (*Loader, func()) { - t.Helper() - - modulesDir, err := ioutil.TempDir("", "tf-configs") - if err != nil { - t.Fatalf("failed to create temporary modules dir: %s", err) - return nil, func() {} - } - - cleanup := func() { - os.RemoveAll(modulesDir) - } - - loader, err := NewLoader(&Config{ - ModulesDir: modulesDir, - }) - if err != nil { - cleanup() - t.Fatalf("failed to create config loader: %s", err) - return nil, func() {} - } - - return loader, cleanup -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go deleted file mode 100644 index 2c21ca5e57..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go +++ /dev/null @@ -1,123 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" -) - -var mapLabelNames = []string{"key"} - -// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body -// using the facilities in the hcldec package. -// -// The returned specification is guaranteed to return a value of the same type -// returned by method ImpliedType, but it may contain null values if any of the -// block attributes are defined as optional and/or computed respectively. -func (b *Block) DecoderSpec() hcldec.Spec { - ret := hcldec.ObjectSpec{} - if b == nil { - return ret - } - - for name, attrS := range b.Attributes { - ret[name] = attrS.decoderSpec(name) - } - - for name, blockS := range b.BlockTypes { - if _, exists := ret[name]; exists { - // This indicates an invalid schema, since it's not valid to - // define both an attribute and a block type of the same name. - // However, we don't raise this here since it's checked by - // InternalValidate. - continue - } - - childSpec := blockS.Block.DecoderSpec() - - // We can only validate 0 or 1 for MinItems, because a dynamic block - // may satisfy any number of min items while only having a single - // block in the config. We cannot validate MaxItems because a - // configuration may have any number of dynamic blocks - minItems := 0 - if blockS.MinItems > 1 { - minItems = 1 - } - - switch blockS.Nesting { - case NestingSingle, NestingGroup: - ret[name] = &hcldec.BlockSpec{ - TypeName: name, - Nested: childSpec, - Required: blockS.MinItems == 1, - } - if blockS.Nesting == NestingGroup { - ret[name] = &hcldec.DefaultSpec{ - Primary: ret[name], - Default: &hcldec.LiteralSpec{ - Value: blockS.EmptyValue(), - }, - } - } - case NestingList: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockTupleSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } else { - ret[name] = &hcldec.BlockListSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } - case NestingSet: - // We forbid dynamically-typed attributes inside NestingSet in - // InternalValidate, so we don't do anything special to handle - // that here. (There is no set analog to tuple and object types, - // because cty's set implementation depends on knowing the static - // type in order to properly compute its internal hashes.) - ret[name] = &hcldec.BlockSetSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - case NestingMap: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockObjectSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } else { - ret[name] = &hcldec.BlockMapSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. - continue - } - } - - return ret -} - -func (a *Attribute) decoderSpec(name string) hcldec.Spec { - return &hcldec.AttrSpec{ - Name: name, - Type: a.Type, - Required: a.Required, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go deleted file mode 100644 index caf8d730c1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package configschema contains types for describing the expected structure -// of a configuration block whose shape is not known until runtime. -// -// For example, this is used to describe the expected contents of a resource -// configuration block, which is defined by the corresponding provider plugin -// and thus not compiled into Terraform core. -// -// A configschema primarily describes the shape of configuration, but it is -// also suitable for use with other structures derived from the configuration, -// such as the cached state of a resource or a resource diff. -// -// This package should not be confused with the package helper/schema, which -// is the higher-level helper library used to implement providers themselves. -package configschema diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go deleted file mode 100644 index 51f51cebcc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty.Type that would result from decoding a -// configuration block using the receiving block schema. -// -// ImpliedType always returns a result, even if the given schema is -// inconsistent. Code that creates configschema.Block objects should be -// tested using the InternalValidate method to detect any inconsistencies -// that would cause this method to fall back on defaults and assumptions. -func (b *Block) ImpliedType() cty.Type { - if b == nil { - return cty.EmptyObject - } - - return hcldec.ImpliedType(b.DecoderSpec()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go deleted file mode 100644 index ebf1abbab1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go +++ /dev/null @@ -1,105 +0,0 @@ -package configschema - -import ( - "fmt" - "regexp" - - "github.com/zclconf/go-cty/cty" - - multierror "github.com/hashicorp/go-multierror" -) - -var validName = regexp.MustCompile(`^[a-z0-9_]+$`) - -// InternalValidate returns an error if the receiving block and its child -// schema definitions have any consistencies with the documented rules for -// valid schema. -// -// This is intended to be used within unit tests to detect when a given -// schema is invalid. -func (b *Block) InternalValidate() error { - if b == nil { - return fmt.Errorf("top-level block schema is nil") - } - return b.internalValidate("", nil) - -} - -func (b *Block) internalValidate(prefix string, err error) error { - for name, attrS := range b.Attributes { - if attrS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) - continue - } - if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { - err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) - } - if attrS.Optional && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) - } - if attrS.Computed && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) - } - if attrS.Type == cty.NilType { - err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) - } - } - - for name, blockS := range b.BlockTypes { - if blockS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) - continue - } - - if _, isAttr := b.Attributes[name]; isAttr { - err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) - } else if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - - if blockS.MinItems < 0 || blockS.MaxItems < 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) - } - - switch blockS.Nesting { - case NestingSingle: - switch { - case blockS.MinItems != blockS.MaxItems: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) - case blockS.MinItems < 0 || blockS.MinItems > 1: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) - } - case NestingGroup: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) - } - case NestingList, NestingSet: - if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) - } - if blockS.Nesting == NestingSet { - ety := blockS.Block.ImpliedType() - if ety.HasDynamicTypes() { - // This is not permitted because the HCL (cty) set implementation - // needs to know the exact type of set elements in order to - // properly hash them, and so can't support mixed types. - err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) - } - } - case NestingMap: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) - } - default: - err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) - } - - subPrefix := prefix + name + "." - err = blockS.Block.internalValidate(subPrefix, err) - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go deleted file mode 100644 index 0be3b8fa35..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go +++ /dev/null @@ -1,38 +0,0 @@ -package configschema - -// NoneRequired returns a deep copy of the receiver with any required -// attributes translated to optional. -func (b *Block) NoneRequired() *Block { - ret := &Block{} - - if b.Attributes != nil { - ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) - } - for name, attrS := range b.Attributes { - ret.Attributes[name] = attrS.forceOptional() - } - - if b.BlockTypes != nil { - ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) - } - for name, blockS := range b.BlockTypes { - ret.BlockTypes[name] = blockS.noneRequired() - } - - return ret -} - -func (b *NestedBlock) noneRequired() *NestedBlock { - ret := *b - ret.Block = *(ret.Block.NoneRequired()) - ret.MinItems = 0 - ret.MaxItems = 0 - return &ret -} - -func (a *Attribute) forceOptional() *Attribute { - ret := *a - ret.Optional = true - ret.Required = false - return &ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go deleted file mode 100644 index b41a309688..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go +++ /dev/null @@ -1,155 +0,0 @@ -package configschema - -import ( - "github.com/zclconf/go-cty/cty" -) - -// StringKind represents the format a string is in. -type StringKind int - -const ( - // StringPlain indicates a string is plain-text and requires no processing for display. - StringPlain StringKind = iota - // StringMarkdown indicates a string is in markdown format and may - // require additional processing to display. - StringMarkdown -) - -// Block represents a configuration block. -// -// "Block" here is a logical grouping construct, though it happens to map -// directly onto the physical block syntax of Terraform's native configuration -// syntax. It may be a more a matter of convention in other syntaxes, such as -// JSON. -// -// When converted to a value, a Block always becomes an instance of an object -// type derived from its defined attributes and nested blocks -type Block struct { - // Attributes describes any attributes that may appear directly inside - // the block. - Attributes map[string]*Attribute - - // BlockTypes describes any nested block types that may appear directly - // inside the block. - BlockTypes map[string]*NestedBlock - - // Description and DescriptionKind contain a user facing description of the block - // and the format of that string. - Description string - DescriptionKind StringKind - - // Deprecated indicates whether the block has been marked as deprecated in the - // provider and usage should be discouraged. - Deprecated bool -} - -// Attribute represents a configuration attribute, within a block. -type Attribute struct { - // Type is a type specification that the attribute's value must conform to. - Type cty.Type - - // Description is an English-language description of the purpose and - // usage of the attribute. A description should be concise and use only - // one or two sentences, leaving full definition to longer-form - // documentation defined elsewhere. - Description string - DescriptionKind StringKind - - // Required, if set to true, specifies that an omitted or null value is - // not permitted. - Required bool - - // Optional, if set to true, specifies that an omitted or null value is - // permitted. This field conflicts with Required. - Optional bool - - // Computed, if set to true, specifies that the value comes from the - // provider rather than from configuration. If combined with Optional, - // then the config may optionally provide an overridden value. - Computed bool - - // Sensitive, if set to true, indicates that an attribute may contain - // sensitive information. - // - // At present nothing is done with this information, but callers are - // encouraged to set it where appropriate so that it may be used in the - // future to help Terraform mask sensitive information. (Terraform - // currently achieves this in a limited sense via other mechanisms.) - Sensitive bool - - // Deprecated indicates whether the attribute has been marked as deprecated in the - // provider and usage should be discouraged. - Deprecated bool -} - -// NestedBlock represents the embedding of one block within another. -type NestedBlock struct { - // Block is the description of the block that's nested. - Block - - // Nesting provides the nesting mode for the child block, which determines - // how many instances of the block are allowed, how many labels it expects, - // and how the resulting data will be converted into a data structure. - Nesting NestingMode - - // MinItems and MaxItems set, for the NestingList and NestingSet nesting - // modes, lower and upper limits on the number of child blocks allowed - // of the given type. If both are left at zero, no limit is applied. - // - // As a special case, both values can be set to 1 for NestingSingle in - // order to indicate that a particular single block is required. - // - // These fields are ignored for other nesting modes and must both be left - // at zero. - MinItems, MaxItems int -} - -// NestingMode is an enumeration of modes for nesting blocks inside other -// blocks. -type NestingMode int - -//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode - -const ( - nestingModeInvalid NestingMode = iota - - // NestingSingle indicates that only a single instance of a given - // block type is permitted, with no labels, and its content should be - // provided directly as an object value. - NestingSingle - - // NestingGroup is similar to NestingSingle in that it calls for only a - // single instance of a given block type with no labels, but it additonally - // guarantees that its result will never be null, even if the block is - // absent, and instead the nested attributes and blocks will be treated - // as absent in that case. (Any required attributes or blocks within the - // nested block are not enforced unless the block is explicitly present - // in the configuration, so they are all effectively optional when the - // block is not present.) - // - // This is useful for the situation where a remote API has a feature that - // is always enabled but has a group of settings related to that feature - // that themselves have default values. By using NestingGroup instead of - // NestingSingle in that case, generated plans will show the block as - // present even when not present in configuration, thus allowing any - // default values within to be displayed to the user. - NestingGroup - - // NestingList indicates that multiple blocks of the given type are - // permitted, with no labels, and that their corresponding objects should - // be provided in a list. - NestingList - - // NestingSet indicates that multiple blocks of the given type are - // permitted, with no labels, and that their corresponding objects should - // be provided in a set. - NestingSet - - // NestingMap indicates that multiple blocks of the given type are - // permitted, each with a single label, and that their corresponding - // objects should be provided in a map whose keys are the labels. - // - // It's an error, therefore, to use the same label value on multiple - // blocks. - NestingMap -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go deleted file mode 100644 index 446705bafb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go +++ /dev/null @@ -1,173 +0,0 @@ -package configschema - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// StaticValidateTraversal checks whether the given traversal (which must be -// relative) refers to a construct in the receiving schema, returning error -// diagnostics if any problems are found. -// -// This method is "optimistic" in that it will not return errors for possible -// problems that cannot be detected statically. It is possible that an -// traversal which passed static validation will still fail when evaluated. -func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { - if !traversal.IsRelative() { - panic("StaticValidateTraversal on absolute traversal") - } - if len(traversal) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - next := traversal[0] - after := traversal[1:] - - var name string - switch step := next.(type) { - case hcl.TraverseAttr: - name = step.Name - case hcl.TraverseIndex: - // No other traversal step types are allowed directly at a block. - // If it looks like the user was trying to use index syntax to - // access an attribute then we'll produce a specialized message. - key := step.Key - if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { - maybeName := key.AsString() - if hclsyntax.ValidIdentifier(maybeName) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), - Subject: &step.SrcRange, - }) - return diags - } - } - // If it looks like some other kind of index then we'll use a generic error. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: &step.SrcRange, - }) - return diags - default: - // No other traversal types should appear in a normal valid traversal, - // but we'll handle this with a generic error anyway to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: next.SourceRange().Ptr(), - }) - return diags - } - - if attrS, exists := b.Attributes[name]; exists { - // For attribute validation we will just apply the rest of the - // traversal to an unknown value of the attribute type and pass - // through HCL's own errors, since we don't want to replicate all of - // HCL's type checking rules here. - val := cty.UnknownVal(attrS.Type) - _, hclDiags := after.TraverseRel(val) - diags = diags.Append(hclDiags) - return diags - } - - if blockS, exists := b.BlockTypes[name]; exists { - moreDiags := blockS.staticValidateTraversal(name, after) - diags = diags.Append(moreDiags) - return diags - } - - // If we get here then the name isn't valid at all. We'll collect up - // all of the names that _are_ valid to use as suggestions. - var suggestions []string - for name := range b.Attributes { - suggestions = append(suggestions, name) - } - for name := range b.BlockTypes { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unsupported attribute`, - Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), - Subject: next.SourceRange().Ptr(), - }) - - return diags -} - -func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { - if b.Nesting == NestingSingle || b.Nesting == NestingGroup { - // Single blocks are easy: just pass right through. - return b.Block.StaticValidateTraversal(traversal) - } - - if len(traversal) == 0 { - // It's always valid to access a nested block's attribute directly. - return nil - } - - var diags tfdiags.Diagnostics - next := traversal[0] - after := traversal[1:] - - switch b.Nesting { - - case NestingSet: - // Can't traverse into a set at all, since it does not have any keys - // to index with. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Cannot index a set value`, - Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), - Subject: next.SourceRange().Ptr(), - }) - return diags - - case NestingList: - if _, ok := next.(hcl.TraverseIndex); ok { - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), - Subject: next.SourceRange().Ptr(), - }) - } - return diags - - case NestingMap: - // Both attribute and index steps are valid for maps, so we'll just - // pass through here and let normal evaluation catch an - // incorrectly-typed index key later, if present. - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - return diags - - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. (Note that we handled NestingSingle separately - // back at the start of this function.) - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go deleted file mode 100644 index 036c2d6c30..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go +++ /dev/null @@ -1,23 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { - var ret []hcl.Traversal - exprs, diags := hcl.ExprList(attr.Expr) - - for _, expr := range exprs { - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - ret = append(ret, traversal) - } - } - - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go deleted file mode 100644 index f01eb79f40..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package configs contains types that represent Terraform configurations and -// the different elements thereof. -// -// The functionality in this package can be used for some static analyses of -// Terraform configurations, but this package generally exposes representations -// of the configuration source code rather than the result of evaluating these -// objects. The sibling package "lang" deals with evaluation of structures -// and expressions in the configuration. -// -// Due to its close relationship with HCL, this package makes frequent use -// of types from the HCL API, including raw HCL diagnostic messages. Such -// diagnostics can be converted into Terraform-flavored diagnostics, if needed, -// using functions in the sibling package tfdiags. -// -// The Parser type is the main entry-point into this package. The LoadConfigDir -// method can be used to load a single module directory, and then a full -// configuration (including any descendent modules) can be produced using -// the top-level BuildConfig method. -package configs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go deleted file mode 100644 index 3403c026bf..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go +++ /dev/null @@ -1,276 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -// RequiresReplace takes a list of flatmapped paths from a -// InstanceDiff.Attributes along with the corresponding cty.Type, and returns -// the list of the cty.Paths that are flagged as causing the resource -// replacement (RequiresNew). -// This will filter out redundant paths, paths that refer to flatmapped indexes -// (e.g. "#", "%"), and will return any changes within a set as the path to the -// set itself. -func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { - var paths []cty.Path - - for _, attr := range attrs { - p, err := requiresReplacePath(attr, ty) - if err != nil { - return nil, err - } - - paths = append(paths, p) - } - - // now trim off any trailing paths that aren't GetAttrSteps, since only an - // attribute itself can require replacement - paths = trimPaths(paths) - - // There may be redundant paths due to set elements or index attributes - // Do some ugly n^2 filtering, but these are always fairly small sets. - for i := 0; i < len(paths)-1; i++ { - for j := i + 1; j < len(paths); j++ { - if reflect.DeepEqual(paths[i], paths[j]) { - // swap the tail and slice it off - paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] - paths = paths[:len(paths)-1] - j-- - } - } - } - - return paths, nil -} - -// trimPaths removes any trailing steps that aren't of type GetAttrSet, since -// only an attribute itself can require replacement -func trimPaths(paths []cty.Path) []cty.Path { - var trimmed []cty.Path - for _, path := range paths { - path = trimPath(path) - if len(path) > 0 { - trimmed = append(trimmed, path) - } - } - return trimmed -} - -func trimPath(path cty.Path) cty.Path { - for len(path) > 0 { - _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) - if isGetAttr { - break - } - path = path[:len(path)-1] - } - return path -} - -// requiresReplacePath takes a key from a flatmap along with the cty.Type -// describing the structure, and returns the cty.Path that would be used to -// reference the nested value in the data structure. -// This is used specifically to record the RequiresReplace attributes from a -// ResourceInstanceDiff. -func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { - if k == "" { - return nil, nil - } - if !ty.IsObjectType() { - panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) - } - - path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) - if err != nil { - return path, fmt.Errorf("[%s] %s", k, err) - } - return path, nil -} - -func pathSplit(p string) (string, string) { - parts := strings.SplitN(p, ".", 2) - head := parts[0] - rest := "" - if len(parts) > 1 { - rest = parts[1] - } - return head, rest -} - -func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { - k, rest := pathSplit(key) - - path := cty.Path{cty.GetAttrStep{Name: k}} - - ty, ok := atys[k] - if !ok { - return path, fmt.Errorf("attribute %q not found", k) - } - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - switch { - case ty.IsPrimitiveType(): - err = fmt.Errorf("invalid step %q with type %#v", key, ty) - case ty.IsObjectType(): - path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) - case ty.IsTupleType(): - path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) - case ty.IsMapType(): - path, err = pathFromFlatmapKeyMap(key, ty) - case ty.IsListType(): - path, err = pathFromFlatmapKeyList(key, ty) - case ty.IsSetType(): - path, err = pathFromFlatmapKeySet(key, ty) - default: - err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) - } - - if err != nil { - return path, err - } - - return path, nil -} - -func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := pathSplit(key) - - // we don't need to convert the index keys to paths - if k == "#" { - return path, nil - } - - idx, err := strconv.Atoi(k) - if err != nil { - return path, err - } - - path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} - - if idx >= len(etys) { - return path, fmt.Errorf("index %s out of range in %#v", key, etys) - } - - if rest == "" { - return path, nil - } - - ty := etys[idx] - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := key, "" - if !ty.ElementType().IsPrimitiveType() { - k, rest = pathSplit(key) - } - - // we don't need to convert the index keys to paths - if k == "%" { - return path, nil - } - - path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := pathSplit(key) - - // we don't need to convert the index keys to paths - if key == "#" { - return path, nil - } - - idx, err := strconv.Atoi(k) - if err != nil { - return path, err - } - - path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { - // once we hit a set, we can't return consistent paths, so just mark the - // set as a whole changed. - return nil, nil -} - -// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for -// use in generating legacy style diffs. -func FlatmapKeyFromPath(path cty.Path) string { - var parts []string - - for _, step := range path { - switch step := step.(type) { - case cty.GetAttrStep: - parts = append(parts, step.Name) - case cty.IndexStep: - switch ty := step.Key.Type(); { - case ty == cty.String: - parts = append(parts, step.Key.AsString()) - case ty == cty.Number: - i, _ := step.Key.AsBigFloat().Int64() - parts = append(parts, strconv.Itoa(int(i))) - } - } - } - - return strings.Join(parts, ".") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go deleted file mode 100644 index 68f48da8f3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go +++ /dev/null @@ -1,85 +0,0 @@ -package hcl2shim - -import ( - "fmt" - - hcl2 "github.com/hashicorp/hcl/v2" -) - -// SingleAttrBody is a weird implementation of hcl2.Body that acts as if -// it has a single attribute whose value is the given expression. -// -// This is used to shim Resource.RawCount and Output.RawConfig to behave -// more like they do in the old HCL loader. -type SingleAttrBody struct { - Name string - Expr hcl2.Expression -} - -var _ hcl2.Body = SingleAttrBody{} - -func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - if !all { - // This should never happen because this body implementation should only - // be used by code that is aware that it's using a single-attr body. - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid attribute", - Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - return content, diags -} - -func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { - content, all, diags := b.content(schema) - var remain hcl2.Body - if all { - // If the request matched the one attribute we represent, then the - // remaining body is empty. - remain = hcl2.EmptyBody() - } else { - remain = b - } - return content, remain, diags -} - -func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { - ret := &hcl2.BodyContent{} - all := false - var diags hcl2.Diagnostics - - for _, attrS := range schema.Attributes { - if attrS.Name == b.Name { - attrs, _ := b.JustAttributes() - ret.Attributes = attrs - all = true - } else if attrS.Required { - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Missing attribute", - Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), - Subject: b.Expr.Range().Ptr(), - }) - } - } - - return ret, all, diags -} - -func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { - return hcl2.Attributes{ - b.Name: { - Expr: b.Expr, - Name: b.Name, - NameRange: b.Expr.Range(), - Range: b.Expr.Range(), - }, - }, nil -} - -func (b SingleAttrBody) MissingItemRange() hcl2.Range { - return b.Expr.Range() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go deleted file mode 100644 index 78223c3b86..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go +++ /dev/null @@ -1,404 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Module is a container for a set of configuration constructs that are -// evaluated within a common namespace. -type Module struct { - // SourceDir is the filesystem directory that the module was loaded from. - // - // This is populated automatically only for configurations loaded with - // LoadConfigDir. If the parser is using a virtual filesystem then the - // path here will be in terms of that virtual filesystem. - - // Any other caller that constructs a module directly with NewModule may - // assign a suitable value to this attribute before using it for other - // purposes. It should be treated as immutable by all consumers of Module - // values. - SourceDir string - - CoreVersionConstraints []VersionConstraint - - Backend *Backend - ProviderConfigs map[string]*Provider - ProviderRequirements map[string][]VersionConstraint - - Variables map[string]*Variable - Locals map[string]*Local - Outputs map[string]*Output - - ModuleCalls map[string]*ModuleCall - - ManagedResources map[string]*Resource - DataResources map[string]*Resource -} - -// File describes the contents of a single configuration file. -// -// Individual files are not usually used alone, but rather combined together -// with other files (conventionally, those in the same directory) to produce -// a *Module, using NewModule. -// -// At the level of an individual file we represent directly the structural -// elements present in the file, without any attempt to detect conflicting -// declarations. A File object can therefore be used for some basic static -// analysis of individual elements, but must be built into a Module to detect -// duplicate declarations. -type File struct { - CoreVersionConstraints []VersionConstraint - - Backends []*Backend - ProviderConfigs []*Provider - ProviderRequirements []*ProviderRequirement - - Variables []*Variable - Locals []*Local - Outputs []*Output - - ModuleCalls []*ModuleCall - - ManagedResources []*Resource - DataResources []*Resource -} - -// NewModule takes a list of primary files and a list of override files and -// produces a *Module by combining the files together. -// -// If there are any conflicting declarations in the given files -- for example, -// if the same variable name is defined twice -- then the resulting module -// will be incomplete and error diagnostics will be returned. Careful static -// analysis of the returned Module is still possible in this case, but the -// module will probably not be semantically valid. -func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { - var diags hcl.Diagnostics - mod := &Module{ - ProviderConfigs: map[string]*Provider{}, - ProviderRequirements: map[string][]VersionConstraint{}, - Variables: map[string]*Variable{}, - Locals: map[string]*Local{}, - Outputs: map[string]*Output{}, - ModuleCalls: map[string]*ModuleCall{}, - ManagedResources: map[string]*Resource{}, - DataResources: map[string]*Resource{}, - } - - for _, file := range primaryFiles { - fileDiags := mod.appendFile(file) - diags = append(diags, fileDiags...) - } - - for _, file := range overrideFiles { - fileDiags := mod.mergeFile(file) - diags = append(diags, fileDiags...) - } - - return mod, diags -} - -// ResourceByAddr returns the configuration for the resource with the given -// address, or nil if there is no such resource. -func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { - key := addr.String() - switch addr.Mode { - case addrs.ManagedResourceMode: - return m.ManagedResources[key] - case addrs.DataResourceMode: - return m.DataResources[key] - default: - return nil - } -} - -func (m *Module) appendFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - for _, constraint := range file.CoreVersionConstraints { - // If there are any conflicting requirements then we'll catch them - // when we actually check these constraints. - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - - for _, b := range file.Backends { - if m.Backend != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), - Subject: &b.DeclRange, - }) - continue - } - m.Backend = b - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - if existing, exists := m.ProviderConfigs[key]; exists { - if existing.Alias == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } - continue - } - m.ProviderConfigs[key] = pc - } - - for _, reqd := range file.ProviderRequirements { - m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) - } - - for _, v := range file.Variables { - if existing, exists := m.Variables[v.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate variable declaration", - Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &v.DeclRange, - }) - } - m.Variables[v.Name] = v - } - - for _, l := range file.Locals { - if existing, exists := m.Locals[l.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate local value definition", - Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &l.DeclRange, - }) - } - m.Locals[l.Name] = l - } - - for _, o := range file.Outputs { - if existing, exists := m.Outputs[o.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate output definition", - Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &o.DeclRange, - }) - } - m.Outputs[o.Name] = o - } - - for _, mc := range file.ModuleCalls { - if existing, exists := m.ModuleCalls[mc.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate module call", - Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), - Subject: &mc.DeclRange, - }) - } - m.ModuleCalls[mc.Name] = mc - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - if existing, exists := m.ManagedResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.ManagedResources[key] = r - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - if existing, exists := m.DataResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.DataResources[key] = r - } - - return diags -} - -func (m *Module) mergeFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - if len(file.CoreVersionConstraints) != 0 { - // This is a bit of a strange case for overriding since we normally - // would union together across multiple files anyway, but we'll - // allow it and have each override file clobber any existing list. - m.CoreVersionConstraints = nil - for _, constraint := range file.CoreVersionConstraints { - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - } - - if len(file.Backends) != 0 { - switch len(file.Backends) { - case 1: - m.Backend = file.Backends[0] - default: - // An override file with multiple backends is still invalid, even - // though it can override backends from _other_ files. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), - Subject: &file.Backends[1].DeclRange, - }) - } - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - existing, exists := m.ProviderConfigs[key] - if pc.Alias == "" { - // We allow overriding a non-existing _default_ provider configuration - // because the user model is that an absent provider configuration - // implies an empty provider configuration, which is what the user - // is therefore overriding here. - if exists { - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } else { - m.ProviderConfigs[key] = pc - } - } else { - // For aliased providers, there must be a base configuration to - // override. This allows us to detect and report alias typos - // that might otherwise cause the override to not apply. - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base provider configuration for override", - Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), - Subject: &pc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } - } - - if len(file.ProviderRequirements) != 0 { - mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) - } - - for _, v := range file.Variables { - existing, exists := m.Variables[v.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base variable declaration to override", - Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), - Subject: &v.DeclRange, - }) - continue - } - mergeDiags := existing.merge(v) - diags = append(diags, mergeDiags...) - } - - for _, l := range file.Locals { - existing, exists := m.Locals[l.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base local value definition to override", - Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), - Subject: &l.DeclRange, - }) - continue - } - mergeDiags := existing.merge(l) - diags = append(diags, mergeDiags...) - } - - for _, o := range file.Outputs { - existing, exists := m.Outputs[o.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base output definition to override", - Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), - Subject: &o.DeclRange, - }) - continue - } - mergeDiags := existing.merge(o) - diags = append(diags, mergeDiags...) - } - - for _, mc := range file.ModuleCalls { - existing, exists := m.ModuleCalls[mc.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing module call to override", - Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), - Subject: &mc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(mc) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - existing, exists := m.ManagedResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing resource to override", - Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - existing, exists := m.DataResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing data resource to override", - Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r) - diags = append(diags, mergeDiags...) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go deleted file mode 100644 index a484ffef9c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go +++ /dev/null @@ -1,188 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ModuleCall represents a "module" block in a module or file. -type ModuleCall struct { - Name string - - SourceAddr string - SourceAddrRange hcl.Range - SourceSet bool - - Config hcl.Body - - Version VersionConstraint - - Count hcl.Expression - ForEach hcl.Expression - - Providers []PassedProviderConfig - - DependsOn []hcl.Traversal - - DeclRange hcl.Range -} - -func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { - mc := &ModuleCall{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := moduleBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, remain, diags := block.Body.PartialContent(schema) - mc.Config = remain - - if !hclsyntax.ValidIdentifier(mc.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["source"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) - diags = append(diags, valDiags...) - mc.SourceAddrRange = attr.Expr.Range() - mc.SourceSet = true - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - mc.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - if attr, exists := content.Attributes["count"]; exists { - mc.Count = attr.Expr - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["for_each"]; exists { - mc.ForEach = attr.Expr - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - mc.DependsOn = append(mc.DependsOn, deps...) - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["providers"]; exists { - seen := make(map[string]hcl.Range) - pairs, pDiags := hcl.ExprMap(attr.Expr) - diags = append(diags, pDiags...) - for _, pair := range pairs { - key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") - diags = append(diags, keyDiags...) - value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") - diags = append(diags, valueDiags...) - if keyDiags.HasErrors() || valueDiags.HasErrors() { - continue - } - - matchKey := key.String() - if prev, exists := seen[matchKey]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider address", - Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), - Subject: pair.Value.Range().Ptr(), - }) - continue - } - - rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) - seen[matchKey] = rng - mc.Providers = append(mc.Providers, PassedProviderConfig{ - InChild: key, - InParent: value, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in module block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return mc, diags -} - -// PassedProviderConfig represents a provider config explicitly passed down to -// a child module, possibly giving it a new local address in the process. -type PassedProviderConfig struct { - InChild *ProviderConfigRef - InParent *ProviderConfigRef -} - -var moduleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - Required: true, - }, - { - Name: "version", - }, - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "depends_on", - }, - { - Name: "providers", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - // These are all reserved for future use. - {Type: "lifecycle"}, - {Type: "locals"}, - {Type: "provider", LabelNames: []string{"type"}}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go deleted file mode 100644 index 6fb82acfbd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go +++ /dev/null @@ -1,247 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// The methods in this file are used by Module.mergeFile to apply overrides -// to our different configuration elements. These methods all follow the -// pattern of mutating the receiver to incorporate settings from the parameter, -// returning error diagnostics if any aspect of the parameter cannot be merged -// into the receiver for some reason. -// -// User expectation is that anything _explicitly_ set in the given object -// should take precedence over the corresponding settings in the receiver, -// but that anything omitted in the given object should be left unchanged. -// In some cases it may be reasonable to do a "deep merge" of certain nested -// features, if it is possible to unambiguously correlate the nested elements -// and their behaviors are orthogonal to each other. - -func (p *Provider) merge(op *Provider) hcl.Diagnostics { - var diags hcl.Diagnostics - - if op.Version.Required != nil { - p.Version = op.Version - } - - p.Config = MergeBodies(p.Config, op.Config) - - return diags -} - -func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { - // Any provider name that's mentioned in the override gets nilled out in - // our map so that we'll rebuild it below. Any provider not mentioned is - // left unchanged. - for _, reqd := range ovrd { - delete(recv, reqd.Name) - } - for _, reqd := range ovrd { - recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) - } -} - -func (v *Variable) merge(ov *Variable) hcl.Diagnostics { - var diags hcl.Diagnostics - - if ov.DescriptionSet { - v.Description = ov.Description - v.DescriptionSet = ov.DescriptionSet - } - if ov.Default != cty.NilVal { - v.Default = ov.Default - } - if ov.Type != cty.NilType { - v.Type = ov.Type - } - if ov.ParsingMode != 0 { - v.ParsingMode = ov.ParsingMode - } - - // If the override file overrode type without default or vice-versa then - // it may have created an invalid situation, which we'll catch now by - // attempting to re-convert the value. - // - // Note that here we may be re-converting an already-converted base value - // from the base config. This will be a no-op if the type was not changed, - // but in particular might be user-observable in the edge case where the - // literal value in config could've been converted to the overridden type - // constraint but the converted value cannot. In practice, this situation - // should be rare since most of our conversions are interchangable. - if v.Default != cty.NilVal { - val, err := convert.Convert(v.Default, v.Type) - if err != nil { - // What exactly we'll say in the error message here depends on whether - // it was Default or Type that was overridden here. - switch { - case ov.Type != cty.NilType && ov.Default == cty.NilVal: - // If only the type was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), - Subject: &ov.DeclRange, - }) - case ov.Type == cty.NilType && ov.Default != cty.NilVal: - // Only the default was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - } - } else { - v.Default = val - } - } - - return diags -} - -func (l *Local) merge(ol *Local) hcl.Diagnostics { - var diags hcl.Diagnostics - - // Since a local is just a single expression in configuration, the - // override definition entirely replaces the base definition, including - // the source range so that we'll send the user to the right place if - // there is an error. - l.Expr = ol.Expr - l.DeclRange = ol.DeclRange - - return diags -} - -func (o *Output) merge(oo *Output) hcl.Diagnostics { - var diags hcl.Diagnostics - - if oo.Description != "" { - o.Description = oo.Description - } - if oo.Expr != nil { - o.Expr = oo.Expr - } - if oo.SensitiveSet { - o.Sensitive = oo.Sensitive - o.SensitiveSet = oo.SensitiveSet - } - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(oo.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { - var diags hcl.Diagnostics - - if omc.SourceSet { - mc.SourceAddr = omc.SourceAddr - mc.SourceAddrRange = omc.SourceAddrRange - mc.SourceSet = omc.SourceSet - } - - if omc.Count != nil { - mc.Count = omc.Count - } - - if omc.ForEach != nil { - mc.ForEach = omc.ForEach - } - - if len(omc.Version.Required) != 0 { - mc.Version = omc.Version - } - - mc.Config = MergeBodies(mc.Config, omc.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(mc.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (r *Resource) merge(or *Resource) hcl.Diagnostics { - var diags hcl.Diagnostics - - if r.Mode != or.Mode { - // This is always a programming error, since managed and data resources - // are kept in separate maps in the configuration structures. - panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) - } - - if or.Count != nil { - r.Count = or.Count - } - if or.ForEach != nil { - r.ForEach = or.ForEach - } - if or.ProviderConfigRef != nil { - r.ProviderConfigRef = or.ProviderConfigRef - } - if r.Mode == addrs.ManagedResourceMode { - // or.Managed is always non-nil for managed resource mode - - if or.Managed.Connection != nil { - r.Managed.Connection = or.Managed.Connection - } - if or.Managed.CreateBeforeDestroySet { - r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy - r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet - } - if len(or.Managed.IgnoreChanges) != 0 { - r.Managed.IgnoreChanges = or.Managed.IgnoreChanges - } - if or.Managed.PreventDestroySet { - r.Managed.PreventDestroy = or.Managed.PreventDestroy - r.Managed.PreventDestroySet = or.Managed.PreventDestroySet - } - if len(or.Managed.Provisioners) != 0 { - r.Managed.Provisioners = or.Managed.Provisioners - } - } - - r.Config = MergeBodies(r.Config, or.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(or.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go deleted file mode 100644 index 7b51eae85e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go +++ /dev/null @@ -1,143 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// MergeBodies creates a new HCL body that contains a combination of the -// given base and override bodies. Attributes and blocks defined in the -// override body take precedence over those of the same name defined in -// the base body. -// -// If any block of a particular type appears in "override" then it will -// replace _all_ of the blocks of the same type in "base" in the new -// body. -func MergeBodies(base, override hcl.Body) hcl.Body { - return mergeBody{ - Base: base, - Override: override, - } -} - -// mergeBody is a hcl.Body implementation that wraps a pair of other bodies -// and allows attributes and blocks within the override to take precedence -// over those defined in the base body. -// -// This is used to deal with dynamically-processed bodies in Module.mergeFile. -// It uses a shallow-only merging strategy where direct attributes defined -// in Override will override attributes of the same name in Base, while any -// blocks defined in Override will hide all blocks of the same type in Base. -// -// This cannot possibly "do the right thing" in all cases, because we don't -// have enough information about user intent. However, this behavior is intended -// to be reasonable for simple overriding use-cases. -type mergeBody struct { - Base hcl.Body - Override hcl.Body -} - -var _ hcl.Body = mergeBody{} - -func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, _, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - return content, diags -} - -func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - remain := MergeBodies(baseRemain, overrideRemain) - - return content, remain, diags -} - -func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - } - - // For attributes we just assign from each map in turn and let the override - // map clobber any matching entries from base. - for k, a := range base.Attributes { - content.Attributes[k] = a - } - for k, a := range override.Attributes { - content.Attributes[k] = a - } - - // Things are a little more interesting for blocks because they arrive - // as a flat list. Our merging semantics call for us to suppress blocks - // from base if at least one block of the same type appears in override. - // We explicitly do not try to correlate and deeply merge nested blocks, - // since we don't have enough context here to infer user intent. - - overriddenBlockTypes := make(map[string]bool) - for _, block := range override.Blocks { - if block.Type == "dynamic" { - overriddenBlockTypes[block.Labels[0]] = true - continue - } - overriddenBlockTypes[block.Type] = true - } - for _, block := range base.Blocks { - // We skip over dynamic blocks whose type label is an overridden type - // but note that below we do still leave them as dynamic blocks in - // the result because expanding the dynamic blocks that are left is - // done much later during the core graph walks, where we can safely - // evaluate the expressions. - if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { - continue - } - if overriddenBlockTypes[block.Type] { - continue - } - content.Blocks = append(content.Blocks, block) - } - for _, block := range override.Blocks { - content.Blocks = append(content.Blocks, block) - } - - return content -} - -func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := make(hcl.Attributes) - - baseAttrs, aDiags := b.Base.JustAttributes() - diags = append(diags, aDiags...) - overrideAttrs, aDiags := b.Override.JustAttributes() - diags = append(diags, aDiags...) - - for k, a := range baseAttrs { - ret[k] = a - } - for k, a := range overrideAttrs { - ret[k] = a - } - - return ret, diags -} - -func (b mergeBody) MissingItemRange() hcl.Range { - return b.Base.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go deleted file mode 100644 index 8c8398e0b7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go +++ /dev/null @@ -1,354 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/typeexpr" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// A consistent detail message for all "not a valid identifier" diagnostics. -const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." - -// Variable represents a "variable" block in a module or file. -type Variable struct { - Name string - Description string - Default cty.Value - Type cty.Type - ParsingMode VariableParsingMode - - DescriptionSet bool - - DeclRange hcl.Range -} - -func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { - v := &Variable{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - // Unless we're building an override, we'll set some defaults - // which we might override with attributes below. We leave these - // as zero-value in the override case so we can recognize whether - // or not they are set when we merge. - if !override { - v.Type = cty.DynamicPseudoType - v.ParsingMode = VariableParseLiteral - } - - content, diags := block.Body.Content(variableBlockSchema) - - if !hclsyntax.ValidIdentifier(v.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - // Don't allow declaration of variables that would conflict with the - // reserved attribute and block type names in a "module" block, since - // these won't be usable for child modules. - for _, attr := range moduleBlockSchema.Attributes { - if attr.Name == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), - Subject: &block.LabelRanges[0], - }) - } - } - for _, blockS := range moduleBlockSchema.Blocks { - if blockS.Type == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), - Subject: &block.LabelRanges[0], - }) - } - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) - diags = append(diags, valDiags...) - v.DescriptionSet = true - } - - if attr, exists := content.Attributes["type"]; exists { - ty, parseMode, tyDiags := decodeVariableType(attr.Expr) - diags = append(diags, tyDiags...) - v.Type = ty - v.ParsingMode = parseMode - } - - if attr, exists := content.Attributes["default"]; exists { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - - // Convert the default to the expected type so we can catch invalid - // defaults early and allow later code to assume validity. - // Note that this depends on us having already processed any "type" - // attribute above. - // However, we can't do this if we're in an override file where - // the type might not be set; we'll catch that during merge. - if v.Type != cty.NilType { - var err error - val, err = convert.Convert(val, v.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), - Subject: attr.Expr.Range().Ptr(), - }) - val = cty.DynamicVal - } - } - - v.Default = val - } - - return v, diags -} - -func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { - if exprIsNativeQuotedString(expr) { - // Here we're accepting the pre-0.12 form of variable type argument where - // the string values "string", "list" and "map" are accepted has a hint - // about the type used primarily for deciding how to parse values - // given on the command line and in environment variables. - // Only the native syntax ends up in this codepath; we handle the - // JSON syntax (which is, of course, quoted even in the new format) - // in the normal codepath below. - val, diags := expr.Value(nil) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - str := val.AsString() - switch str { - case "string": - return cty.String, VariableParseLiteral, diags - case "list": - return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags - case "map": - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags - default: - return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: "Invalid legacy variable type hint", - Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, - Subject: expr.Range().Ptr(), - }} - } - } - - // First we'll deal with some shorthand forms that the HCL-level type - // expression parser doesn't include. These both emulate pre-0.12 behavior - // of allowing a list or map of any element type as long as all of the - // elements are consistent. This is the same as list(any) or map(any). - switch hcl.ExprAsKeyword(expr) { - case "list": - return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil - case "map": - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil - } - - ty, diags := typeexpr.TypeConstraint(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - - switch { - case ty.IsPrimitiveType(): - // Primitive types use literal parsing. - return ty, VariableParseLiteral, diags - default: - // Everything else uses HCL parsing - return ty, VariableParseHCL, diags - } -} - -// VariableParsingMode defines how values of a particular variable given by -// text-only mechanisms (command line arguments and environment variables) -// should be parsed to produce the final value. -type VariableParsingMode rune - -// VariableParseLiteral is a variable parsing mode that just takes the given -// string directly as a cty.String value. -const VariableParseLiteral VariableParsingMode = 'L' - -// VariableParseHCL is a variable parsing mode that attempts to parse the given -// string as an HCL expression and returns the result. -const VariableParseHCL VariableParsingMode = 'H' - -// Parse uses the receiving parsing mode to process the given variable value -// string, returning the result along with any diagnostics. -// -// A VariableParsingMode does not know the expected type of the corresponding -// variable, so it's the caller's responsibility to attempt to convert the -// result to the appropriate type and return to the user any diagnostics that -// conversion may produce. -// -// The given name is used to create a synthetic filename in case any diagnostics -// must be generated about the given string value. This should be the name -// of the root module variable whose value will be populated from the given -// string. -// -// If the returned diagnostics has errors, the returned value may not be -// valid. -func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { - switch m { - case VariableParseLiteral: - return cty.StringVal(value), nil - case VariableParseHCL: - fakeFilename := fmt.Sprintf("", name) - expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, valDiags := expr.Value(nil) - diags = append(diags, valDiags...) - return val, diags - default: - // Should never happen - panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) - } -} - -// Output represents an "output" block in a module or file. -type Output struct { - Name string - Description string - Expr hcl.Expression - DependsOn []hcl.Traversal - Sensitive bool - - DescriptionSet bool - SensitiveSet bool - - DeclRange hcl.Range -} - -func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { - o := &Output{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := outputBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, diags := block.Body.Content(schema) - - if !hclsyntax.ValidIdentifier(o.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid output name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) - diags = append(diags, valDiags...) - o.DescriptionSet = true - } - - if attr, exists := content.Attributes["value"]; exists { - o.Expr = attr.Expr - } - - if attr, exists := content.Attributes["sensitive"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) - diags = append(diags, valDiags...) - o.SensitiveSet = true - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - o.DependsOn = append(o.DependsOn, deps...) - } - - return o, diags -} - -// Local represents a single entry from a "locals" block in a module or file. -// The "locals" block itself is not represented, because it serves only to -// provide context for us to interpret its contents. -type Local struct { - Name string - Expr hcl.Expression - - DeclRange hcl.Range -} - -func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - if len(attrs) == 0 { - return nil, diags - } - - locals := make([]*Local, 0, len(attrs)) - for name, attr := range attrs { - if !hclsyntax.ValidIdentifier(name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid local value name", - Detail: badIdentifierDetail, - Subject: &attr.NameRange, - }) - } - - locals = append(locals, &Local{ - Name: name, - Expr: attr.Expr, - DeclRange: attr.Range, - }) - } - return locals, diags -} - -var variableBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "default", - }, - { - Name: "type", - }, - }, -} - -var outputBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "value", - Required: true, - }, - { - Name: "depends_on", - }, - { - Name: "sensitive", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go deleted file mode 100644 index 2a621b5772..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go +++ /dev/null @@ -1,100 +0,0 @@ -package configs - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/spf13/afero" -) - -// Parser is the main interface to read configuration files and other related -// files from disk. -// -// It retains a cache of all files that are loaded so that they can be used -// to create source code snippets in diagnostics, etc. -type Parser struct { - fs afero.Afero - p *hclparse.Parser -} - -// NewParser creates and returns a new Parser that reads files from the given -// filesystem. If a nil filesystem is passed then the system's "real" filesystem -// will be used, via afero.OsFs. -func NewParser(fs afero.Fs) *Parser { - if fs == nil { - fs = afero.OsFs{} - } - - return &Parser{ - fs: afero.Afero{Fs: fs}, - p: hclparse.NewParser(), - } -} - -// LoadHCLFile is a low-level method that reads the file at the given path, -// parses it, and returns the hcl.Body representing its root. In many cases -// it is better to use one of the other Load*File methods on this type, -// which additionally decode the root body in some way and return a higher-level -// construct. -// -// If the file cannot be read at all -- e.g. because it does not exist -- then -// this method will return a nil body and error diagnostics. In this case -// callers may wish to ignore the provided error diagnostics and produce -// a more context-sensitive error instead. -// -// The file will be parsed using the HCL native syntax unless the filename -// ends with ".json", in which case the HCL JSON syntax will be used. -func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { - src, err := p.fs.ReadFile(path) - - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q could not be read.", path), - }, - } - } - - var file *hcl.File - var diags hcl.Diagnostics - switch { - case strings.HasSuffix(path, ".json"): - file, diags = p.p.ParseJSON(src, path) - default: - file, diags = p.p.ParseHCL(src, path) - } - - // If the returned file or body is nil, then we'll return a non-nil empty - // body so we'll meet our contract that nil means an error reading the file. - if file == nil || file.Body == nil { - return hcl.EmptyBody(), diags - } - - return file.Body, diags -} - -// Sources returns a map of the cached source buffers for all files that -// have been loaded through this parser, with source filenames (as requested -// when each file was opened) as the keys. -func (p *Parser) Sources() map[string][]byte { - return p.p.Sources() -} - -// ForceFileSource artificially adds source code to the cache of file sources, -// as if it had been loaded from the given filename. -// -// This should be used only in special situations where configuration is loaded -// some other way. Most callers should load configuration via methods of -// Parser, which will update the sources cache automatically. -func (p *Parser) ForceFileSource(filename string, src []byte) { - // We'll make a synthetic hcl.File here just so we can reuse the - // existing cache. - p.p.AddFile(filename, &hcl.File{ - Body: hcl.EmptyBody(), - Bytes: src, - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go deleted file mode 100644 index d4cbc945c3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go +++ /dev/null @@ -1,247 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigFile reads the file at the given path and parses it as a config -// file. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil *File will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, false) -} - -// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes -// certain required attribute constraints in order to interpret the given -// file as an overrides file. -func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, true) -} - -func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { - - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - file := &File{} - - var reqDiags hcl.Diagnostics - file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) - diags = append(diags, reqDiags...) - - content, contentDiags := body.Content(configFileSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, contentDiags := block.Body.Content(terraformBlockSchema) - diags = append(diags, contentDiags...) - - // We ignore the "terraform_version" attribute here because - // sniffCoreVersionRequirements already dealt with that above. - - for _, innerBlock := range content.Blocks { - switch innerBlock.Type { - - case "backend": - backendCfg, cfgDiags := decodeBackendBlock(innerBlock) - diags = append(diags, cfgDiags...) - if backendCfg != nil { - file.Backends = append(file.Backends, backendCfg) - } - - case "required_providers": - reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) - diags = append(diags, reqsDiags...) - file.ProviderRequirements = append(file.ProviderRequirements, reqs...) - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - case "provider": - cfg, cfgDiags := decodeProviderBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ProviderConfigs = append(file.ProviderConfigs, cfg) - } - - case "variable": - cfg, cfgDiags := decodeVariableBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Variables = append(file.Variables, cfg) - } - - case "locals": - defs, defsDiags := decodeLocalsBlock(block) - diags = append(diags, defsDiags...) - file.Locals = append(file.Locals, defs...) - - case "output": - cfg, cfgDiags := decodeOutputBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Outputs = append(file.Outputs, cfg) - } - - case "module": - cfg, cfgDiags := decodeModuleBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ModuleCalls = append(file.ModuleCalls, cfg) - } - - case "resource": - cfg, cfgDiags := decodeResourceBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ManagedResources = append(file.ManagedResources, cfg) - } - - case "data": - cfg, cfgDiags := decodeDataBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.DataResources = append(file.DataResources, cfg) - } - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - return file, diags -} - -// sniffCoreVersionRequirements does minimal parsing of the given body for -// "terraform" blocks with "required_version" attributes, returning the -// requirements found. -// -// This is intended to maximize the chance that we'll be able to read the -// requirements (syntax errors notwithstanding) even if the config file contains -// constructs that might've been added in future Terraform versions -// -// This is a "best effort" sort of method which will return constraints it is -// able to find, but may return no constraints at all if the given body is -// so invalid that it cannot be decoded at all. -func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { - rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) - - var constraints []VersionConstraint - - for _, block := range rootContent.Blocks { - content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) - diags = append(diags, blockDiags...) - - attr, exists := content.Attributes["required_version"] - if !exists { - continue - } - - constraint, constraintDiags := decodeVersionConstraint(attr) - diags = append(diags, constraintDiags...) - if !constraintDiags.HasErrors() { - constraints = append(constraints, constraint) - } - } - - return constraints, diags -} - -// configFileSchema is the schema for the top-level of a config file. We use -// the low-level HCL API for this level so we can easily deal with each -// block type separately with its own decoding logic. -var configFileSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "locals", - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - }, -} - -// terraformBlockSchema is the schema for a top-level "terraform" block in -// a configuration file. -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "backend", - LabelNames: []string{"type"}, - }, - { - Type: "required_providers", - }, - }, -} - -// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements -var configFileVersionSniffRootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - }, -} - -// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements -var configFileVersionSniffBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go deleted file mode 100644 index afdd69833f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go +++ /dev/null @@ -1,163 +0,0 @@ -package configs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigDir reads the .tf and .tf.json files in the given directory -// as config files (using LoadConfigFile) and then combines these files into -// a single Module. -// -// If this method returns nil, that indicates that the given directory does not -// exist at all or could not be opened for some reason. Callers may wish to -// detect this case and ignore the returned diagnostics so that they can -// produce a more context-aware error message in that case. -// -// If this method returns a non-nil module while error diagnostics are returned -// then the module may be incomplete but can be used carefully for static -// analysis. -// -// This file does not consider a directory with no files to be an error, and -// will simply return an empty module in that case. Callers should first call -// Parser.IsConfigDir if they wish to recognize that situation. -// -// .tf files are parsed using the HCL native syntax while .tf.json files are -// parsed using the HCL JSON syntax. -func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { - primaryPaths, overridePaths, diags := p.dirFiles(path) - if diags.HasErrors() { - return nil, diags - } - - primary, fDiags := p.loadFiles(primaryPaths, false) - diags = append(diags, fDiags...) - override, fDiags := p.loadFiles(overridePaths, true) - diags = append(diags, fDiags...) - - mod, modDiags := NewModule(primary, override) - diags = append(diags, modDiags...) - - mod.SourceDir = path - - return mod, diags -} - -// ConfigDirFiles returns lists of the primary and override files configuration -// files in the given directory. -// -// If the given directory does not exist or cannot be read, error diagnostics -// are returned. If errors are returned, the resulting lists may be incomplete. -func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - return p.dirFiles(dir) -} - -// IsConfigDir determines whether the given path refers to a directory that -// exists and contains at least one Terraform config file (with a .tf or -// .tf.json extension.) -func (p *Parser) IsConfigDir(path string) bool { - primaryPaths, overridePaths, _ := p.dirFiles(path) - return (len(primaryPaths) + len(overridePaths)) > 0 -} - -func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { - var files []*File - var diags hcl.Diagnostics - - for _, path := range paths { - var f *File - var fDiags hcl.Diagnostics - if override { - f, fDiags = p.LoadConfigFileOverride(path) - } else { - f, fDiags = p.LoadConfigFile(path) - } - diags = append(diags, fDiags...) - if f != nil { - files = append(files, f) - } - } - - return files, diags -} - -func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - infos, err := p.fs.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || IsIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// IsIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} - -// IsEmptyDir returns true if the given filesystem path contains no Terraform -// configuration files. -// -// Unlike the methods of the Parser type, this function always consults the -// real filesystem, and thus it isn't appropriate to use when working with -// configuration loaded from a plan file. -func IsEmptyDir(path string) (bool, error) { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - return true, nil - } - - p := NewParser(nil) - fs, os, err := p.dirFiles(path) - if err != nil { - return false, err - } - - return len(fs) == 0 && len(os) == 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go deleted file mode 100644 index 10d98e5b09..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go +++ /dev/null @@ -1,43 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// LoadValuesFile reads the file at the given path and parses it as a "values -// file", which is an HCL config file whose top-level attributes are treated -// as arbitrary key.value pairs. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil map will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - vals := make(map[string]cty.Value) - attrs, attrDiags := body.JustAttributes() - diags = append(diags, attrDiags...) - if attrs == nil { - return vals, diags - } - - for name, attr := range attrs { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - vals[name] = val - } - - return vals, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go deleted file mode 100644 index cb9ba1f3f6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go +++ /dev/null @@ -1,144 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Provider represents a "provider" block in a module or file. A provider -// block is a provider configuration, and there can be zero or more -// configurations for each actual provider. -type Provider struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if no alias set - - Version VersionConstraint - - Config hcl.Body - - DeclRange hcl.Range -} - -func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { - content, config, diags := block.Body.PartialContent(providerBlockSchema) - - provider := &Provider{ - Name: block.Labels[0], - NameRange: block.LabelRanges[0], - Config: config, - DeclRange: block.DefRange, - } - - if attr, exists := content.Attributes["alias"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) - diags = append(diags, valDiags...) - provider.AliasRange = attr.Expr.Range().Ptr() - - if !hclsyntax.ValidIdentifier(provider.Alias) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration alias", - Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), - }) - } - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - provider.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - // Reserved attribute names - for _, name := range []string{"count", "depends_on", "for_each", "source"} { - if attr, exists := content.Attributes[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in provider block", - Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), - Subject: &attr.NameRange, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provider block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return provider, diags -} - -// Addr returns the address of the receiving provider configuration, relative -// to its containing module. -func (p *Provider) Addr() addrs.ProviderConfig { - return addrs.ProviderConfig{ - Type: p.Name, - Alias: p.Alias, - } -} - -func (p *Provider) moduleUniqueKey() string { - if p.Alias != "" { - return fmt.Sprintf("%s.%s", p.Name, p.Alias) - } - return p.Name -} - -// ProviderRequirement represents a declaration of a dependency on a particular -// provider version without actually configuring that provider. This is used in -// child modules that expect a provider to be passed in from their parent. -type ProviderRequirement struct { - Name string - Requirement VersionConstraint -} - -func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - var reqs []*ProviderRequirement - for name, attr := range attrs { - req, reqDiags := decodeVersionConstraint(attr) - diags = append(diags, reqDiags...) - if !diags.HasErrors() { - reqs = append(reqs, &ProviderRequirement{ - Name: name, - Requirement: req, - }) - } - } - return reqs, diags -} - -var providerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "alias", - }, - { - Name: "version", - }, - - // Attribute names reserved for future expansion. - {Name: "count"}, - {Name: "depends_on"}, - {Name: "for_each"}, - {Name: "source"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - // _All_ of these are reserved for future expansion. - {Type: "lifecycle"}, - {Type: "locals"}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go deleted file mode 100644 index 47b6567918..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go +++ /dev/null @@ -1,150 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" -) - -// Provisioner represents a "provisioner" block when used within a -// "resource" block in a module or file. -type Provisioner struct { - Type string - Config hcl.Body - Connection *Connection - When ProvisionerWhen - OnFailure ProvisionerOnFailure - - DeclRange hcl.Range - TypeRange hcl.Range -} - -func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { - pv := &Provisioner{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - DeclRange: block.DefRange, - When: ProvisionerWhenCreate, - OnFailure: ProvisionerOnFailureFail, - } - - content, config, diags := block.Body.PartialContent(provisionerBlockSchema) - pv.Config = config - - if attr, exists := content.Attributes["when"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "create": - pv.When = ProvisionerWhenCreate - case "destroy": - pv.When = ProvisionerWhenDestroy - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"when\" keyword", - Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", - Subject: expr.Range().Ptr(), - }) - } - } - - if attr, exists := content.Attributes["on_failure"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "continue": - pv.OnFailure = ProvisionerOnFailureContinue - case "fail": - pv.OnFailure = ProvisionerOnFailureFail - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"on_failure\" keyword", - Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - //conn, connDiags := decodeConnectionBlock(block) - //diags = append(diags, connDiags...) - pv.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provisioner block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return pv, diags -} - -// Connection represents a "connection" block when used within either a -// "resource" or "provisioner" block in a module or file. -type Connection struct { - Config hcl.Body - - DeclRange hcl.Range -} - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - {Name: "when"}, - {Name: "on_failure"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "connection"}, - {Type: "lifecycle"}, // reserved for future use - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go deleted file mode 100644 index 7ff5a6e00b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerOnFailureInvalid-0] - _ = x[ProvisionerOnFailureContinue-1] - _ = x[ProvisionerOnFailureFail-2] -} - -const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" - -var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} - -func (i ProvisionerOnFailure) String() string { - if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { - return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go deleted file mode 100644 index 9f21b3ac63..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerWhenInvalid-0] - _ = x[ProvisionerWhenCreate-1] - _ = x[ProvisionerWhenDestroy-2] -} - -const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" - -var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} - -func (i ProvisionerWhen) String() string { - if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { - return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go deleted file mode 100644 index cd9991a389..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go +++ /dev/null @@ -1,490 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Resource represents a "resource" or "data" block in a module or file. -type Resource struct { - Mode addrs.ResourceMode - Name string - Type string - Config hcl.Body - Count hcl.Expression - ForEach hcl.Expression - - ProviderConfigRef *ProviderConfigRef - - DependsOn []hcl.Traversal - - // Managed is populated only for Mode = addrs.ManagedResourceMode, - // containing the additional fields that apply to managed resources. - // For all other resource modes, this field is nil. - Managed *ManagedResource - - DeclRange hcl.Range - TypeRange hcl.Range -} - -// ManagedResource represents a "resource" block in a module or file. -type ManagedResource struct { - Connection *Connection - Provisioners []*Provisioner - - CreateBeforeDestroy bool - PreventDestroy bool - IgnoreChanges []hcl.Traversal - IgnoreAllChanges bool - - CreateBeforeDestroySet bool - PreventDestroySet bool -} - -func (r *Resource) moduleUniqueKey() string { - return r.Addr().String() -} - -// Addr returns a resource address for the receiver that is relative to the -// resource's containing module. -func (r *Resource) Addr() addrs.Resource { - return addrs.Resource{ - Mode: r.Mode, - Type: r.Type, - Name: r.Name, - } -} - -// ProviderConfigAddr returns the address for the provider configuration -// that should be used for this resource. This function implements the -// default behavior of extracting the type from the resource type name if -// an explicit "provider" argument was not provided. -func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { - if r.ProviderConfigRef == nil { - return r.Addr().DefaultProviderConfig() - } - - return addrs.ProviderConfig{ - Type: r.ProviderConfigRef.Name, - Alias: r.ProviderConfigRef.Alias, - } -} - -func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - r := &Resource{ - Mode: addrs.ManagedResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - Managed: &ManagedResource{}, - } - - content, remain, diags := block.Body.PartialContent(resourceBlockSchema) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same resource block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - var seenLifecycle *hcl.Block - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - case "lifecycle": - if seenLifecycle != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate lifecycle block", - Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenLifecycle = block - - lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) - diags = append(diags, lcDiags...) - - if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) - diags = append(diags, valDiags...) - r.Managed.CreateBeforeDestroySet = true - } - - if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) - diags = append(diags, valDiags...) - r.Managed.PreventDestroySet = true - } - - if attr, exists := lcContent.Attributes["ignore_changes"]; exists { - - // ignore_changes can either be a list of relative traversals - // or it can be just the keyword "all" to ignore changes to this - // resource entirely. - // ignore_changes = [ami, instance_type] - // ignore_changes = all - // We also allow two legacy forms for compatibility with earlier - // versions: - // ignore_changes = ["ami", "instance_type"] - // ignore_changes = ["*"] - - kw := hcl.ExprAsKeyword(attr.Expr) - - switch { - case kw == "all": - r.Managed.IgnoreAllChanges = true - default: - exprs, listDiags := hcl.ExprList(attr.Expr) - diags = append(diags, listDiags...) - - var ignoreAllRange hcl.Range - - for _, expr := range exprs { - - // our expr might be the literal string "*", which - // we accept as a deprecated way of saying "all". - if shimIsIgnoreChangesStar(expr) { - r.Managed.IgnoreAllChanges = true - ignoreAllRange = expr.Range() - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Deprecated ignore_changes wildcard", - Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", - Subject: attr.Expr.Range().Ptr(), - }) - continue - } - - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.RelTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) - } - } - - if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid ignore_changes ruleset", - Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", - Subject: &ignoreAllRange, - Context: attr.Expr.Range().Ptr(), - }) - } - - } - - } - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - r.Managed.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - case "provisioner": - pv, pvDiags := decodeProvisionerBlock(block) - diags = append(diags, pvDiags...) - if pv != nil { - r.Managed.Provisioners = append(r.Managed.Provisioners, pv) - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in resource block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return r, diags -} - -func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - r := &Resource{ - Mode: addrs.DataResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - } - - content, remain, diags := block.Body.PartialContent(dataBlockSchema) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same data block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - for _, block := range content.Blocks { - // All of the block types we accept are just reserved for future use, but some get a specialized error message. - switch block.Type { - case "lifecycle": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported lifecycle block", - Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", - Subject: &block.DefRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in data block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return r, diags -} - -type ProviderConfigRef struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if alias not set -} - -func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var shimDiags hcl.Diagnostics - expr, shimDiags = shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - - // AbsTraversalForExpr produces only generic errors, so we'll discard - // the errors given and produce our own with extra context. If we didn't - // get any errors then we might still have warnings, though. - if !travDiags.HasErrors() { - diags = append(diags, travDiags...) - } - - if len(traversal) < 1 || len(traversal) > 2 { - // A provider reference was given as a string literal in the legacy - // configuration language and there are lots of examples out there - // showing that usage, so we'll sniff for that situation here and - // produce a specialized error message for it to help users find - // the new correct form. - if exprIsNativeQuotedString(expr) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "A provider configuration reference must not be given in quotes.", - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - ret := &ProviderConfigRef{ - Name: traversal.RootName(), - NameRange: traversal[0].SourceRange(), - } - - if len(traversal) > 1 { - aliasStep, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", - Subject: traversal[1].SourceRange().Ptr(), - }) - return ret, diags - } - - ret.Alias = aliasStep.Name - ret.AliasRange = aliasStep.SourceRange().Ptr() - } - - return ret, diags -} - -// Addr returns the provider config address corresponding to the receiving -// config reference. -// -// This is a trivial conversion, essentially just discarding the source -// location information and keeping just the addressing information. -func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { - return addrs.ProviderConfig{ - Type: r.Name, - Alias: r.Alias, - } -} - -func (r *ProviderConfigRef) String() string { - if r == nil { - return "" - } - if r.Alias != "" { - return fmt.Sprintf("%s.%s", r.Name, r.Alias) - } - return r.Name -} - -var commonResourceAttributes = []hcl.AttributeSchema{ - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "provider", - }, - { - Name: "depends_on", - }, -} - -var resourceBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "locals"}, // reserved for future use - {Type: "lifecycle"}, - {Type: "connection"}, - {Type: "provisioner", LabelNames: []string{"type"}}, - }, -} - -var dataBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, // reserved for future use - {Type: "locals"}, // reserved for future use - }, -} - -var resourceLifecycleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "create_before_destroy", - }, - { - Name: "prevent_destroy", - }, - { - Name: "ignore_changes", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go deleted file mode 100644 index cd914e5dbc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go +++ /dev/null @@ -1,118 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes -// corresponding to the elements given in the values map. -// -// This is useful in situations where, for example, values provided on the -// command line can override values given in configuration, using MergeBodies. -// -// The given filename is used in case any diagnostics are returned. Since -// the created body is synthetic, it is likely that this will not be a "real" -// filename. For example, if from a command line argument it could be -// a representation of that argument's name, such as "-var=...". -func SynthBody(filename string, values map[string]cty.Value) hcl.Body { - return synthBody{ - Filename: filename, - Values: values, - } -} - -type synthBody struct { - Filename string - Values map[string]cty.Value -} - -func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, remain, diags := b.PartialContent(schema) - remainS := remain.(synthBody) - for name := range remainS.Values { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported attribute", - Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), - Subject: b.synthRange().Ptr(), - }) - } - return content, diags -} - -func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - MissingItemRange: b.synthRange(), - } - - remainValues := make(map[string]cty.Value) - for attrName, val := range b.Values { - remainValues[attrName] = val - } - - for _, attrS := range schema.Attributes { - delete(remainValues, attrS.Name) - val, defined := b.Values[attrS.Name] - if !defined { - if attrS.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required attribute", - Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), - Subject: b.synthRange().Ptr(), - }) - } - continue - } - content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) - } - - // We just ignore blocks altogether, because this body type never has - // nested blocks. - - remain := synthBody{ - Filename: b.Filename, - Values: remainValues, - } - - return content, remain, diags -} - -func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - ret := make(hcl.Attributes) - for name, val := range b.Values { - ret[name] = b.synthAttribute(name, val) - } - return ret, nil -} - -func (b synthBody) MissingItemRange() hcl.Range { - return b.synthRange() -} - -func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { - rng := b.synthRange() - return &hcl.Attribute{ - Name: name, - Expr: &hclsyntax.LiteralValueExpr{ - Val: val, - SrcRange: rng, - }, - NameRange: rng, - Range: rng, - } -} - -func (b synthBody) synthRange() hcl.Range { - return hcl.Range{ - Filename: b.Filename, - Start: hcl.Pos{Line: 1, Column: 1}, - End: hcl.Pos{Line: 1, Column: 1}, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go deleted file mode 100644 index e135546fb7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go +++ /dev/null @@ -1,63 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// exprIsNativeQuotedString determines whether the given expression looks like -// it's a quoted string in the HCL native syntax. -// -// This should be used sparingly only for situations where our legacy HCL -// decoding would've expected a keyword or reference in quotes but our new -// decoding expects the keyword or reference to be provided directly as -// an identifier-based expression. -func exprIsNativeQuotedString(expr hcl.Expression) bool { - _, ok := expr.(*hclsyntax.TemplateExpr) - return ok -} - -// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is -// equivalent except that any required attributes are forced to not be required. -// -// This is useful for dealing with "override" config files, which are allowed -// to omit things that they don't wish to override from the main configuration. -// -// The returned schema may have some pointers in common with the given schema, -// so neither the given schema nor the returned schema should be modified after -// using this function in order to avoid confusion. -// -// Overrides are rarely used, so it's recommended to just create the override -// schema on the fly only when it's needed, rather than storing it in a global -// variable as we tend to do for a primary schema. -func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} - -// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that -// is equivalent except that it accepts an additional block type "dynamic" with -// a single label, used to recognize usage of the HCL dynamic block extension. -func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - - copy(ret.Blocks, schema.Blocks) - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, - }) - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go deleted file mode 100644 index c02ad4b552..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go +++ /dev/null @@ -1,45 +0,0 @@ -package configs - -// VariableTypeHint is an enumeration used for the Variable.TypeHint field, -// which is an incompletely-specified type for the variable which is used -// as a hint for whether a value provided in an ambiguous context (on the -// command line or in an environment variable) should be taken literally as a -// string or parsed as an HCL expression to produce a data structure. -// -// The type hint is applied to runtime values as well, but since it does not -// accurately describe a precise type it is not fully-sufficient to infer -// the dynamic type of a value passed through a variable. -// -// These hints use inaccurate terminology for historical reasons. Full details -// are in the documentation for each constant in this enumeration, but in -// summary: -// -// TypeHintString requires a primitive type -// TypeHintList requires a type that could be converted to a tuple -// TypeHintMap requires a type that could be converted to an object -type VariableTypeHint rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint - -// TypeHintNone indicates the absence of a type hint. Values specified in -// ambiguous contexts will be treated as literal strings, as if TypeHintString -// were selected, but no runtime value checks will be applied. This is reasonable -// type hint for a module that is never intended to be used at the top-level -// of a configuration, since descendent modules never receive values from -// ambiguous contexts. -const TypeHintNone VariableTypeHint = 0 - -// TypeHintString spec indicates that a value provided in an ambiguous context -// should be treated as a literal string, and additionally requires that the -// runtime value for the variable is of a primitive type (string, number, bool). -const TypeHintString VariableTypeHint = 'S' - -// TypeHintList indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an tuple, list, or set type. -const TypeHintList VariableTypeHint = 'L' - -// TypeHintMap indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an object or map type. -const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go deleted file mode 100644 index 2b50428ce1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeHintNone-0] - _ = x[TypeHintString-83] - _ = x[TypeHintList-76] - _ = x[TypeHintMap-77] -} - -const ( - _VariableTypeHint_name_0 = "TypeHintNone" - _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" - _VariableTypeHint_name_2 = "TypeHintString" -) - -var ( - _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} -) - -func (i VariableTypeHint) String() string { - switch { - case i == 0: - return _VariableTypeHint_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] - case i == 83: - return _VariableTypeHint_name_2 - default: - return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go deleted file mode 100644 index 0f541dc711..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go +++ /dev/null @@ -1,71 +0,0 @@ -package configs - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// VersionConstraint represents a version constraint on some resource -// (e.g. Terraform Core, a provider, a module, ...) that carries with it -// a source range so that a helpful diagnostic can be printed in the event -// that a particular constraint does not match. -type VersionConstraint struct { - Required version.Constraints - DeclRange hcl.Range -} - -func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { - ret := VersionConstraint{ - DeclRange: attr.Range, - } - - val, diags := attr.Expr.Value(nil) - if diags.HasErrors() { - return ret, diags - } - var err error - val, err = convert.Convert(val, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - if val.IsNull() { - // A null version constraint is strange, but we'll just treat it - // like an empty constraint set. - return ret, diags - } - - if !val.IsWhollyKnown() { - // If there is a syntax error, HCL sets the value of the given attribute - // to cty.DynamicVal. A diagnostic for the syntax error will already - // bubble up, so we will move forward gracefully here. - return ret, diags - } - - constraintStr := val.AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - ret.Required = constraints - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go deleted file mode 100644 index a150af9619..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go +++ /dev/null @@ -1,301 +0,0 @@ -package dag - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/go-multierror" -) - -// AcyclicGraph is a specialization of Graph that cannot have cycles. With -// this property, we get the property of sane graph traversal. -type AcyclicGraph struct { - Graph -} - -// WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) tfdiags.Diagnostics - -// DepthWalkFunc is a walk function that also receives the current depth of the -// walk as an argument -type DepthWalkFunc func(Vertex, int) error - -func (g *AcyclicGraph) DirectedGraph() Grapher { - return g -} - -// Returns a Set that includes every Vertex yielded by walking down from the -// provided starting Vertex v. -func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.DownEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.DepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Returns a Set that includes every Vertex yielded by walking up from the -// provided starting Vertex v. -func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.UpEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Root returns the root of the DAG, or an error. -// -// Complexity: O(V) -func (g *AcyclicGraph) Root() (Vertex, error) { - roots := make([]Vertex, 0, 1) - for _, v := range g.Vertices() { - if g.UpEdges(v).Len() == 0 { - roots = append(roots, v) - } - } - - if len(roots) > 1 { - // TODO(mitchellh): make this error message a lot better - return nil, fmt.Errorf("multiple roots: %#v", roots) - } - - if len(roots) == 0 { - return nil, fmt.Errorf("no roots found") - } - - return roots[0], nil -} - -// TransitiveReduction performs the transitive reduction of graph g in place. -// The transitive reduction of a graph is a graph with as few edges as -// possible with the same reachability as the original graph. This means -// that if there are three nodes A => B => C, and A connects to both -// B and C, and B connects to C, then the transitive reduction is the -// same graph with only a single edge between A and B, and a single edge -// between B and C. -// -// The graph must be valid for this operation to behave properly. If -// Validate() returns an error, the behavior is undefined and the results -// will likely be unexpected. -// -// Complexity: O(V(V+E)), or asymptotically O(VE) -func (g *AcyclicGraph) TransitiveReduction() { - // For each vertex u in graph g, do a DFS starting from each vertex - // v such that the edge (u,v) exists (v is a direct descendant of u). - // - // For each v-prime reachable from v, remove the edge (u, v-prime). - defer g.debug.BeginOperation("TransitiveReduction", "").End("") - - for _, u := range g.Vertices() { - uTargets := g.DownEdges(u) - vs := AsVertexList(g.DownEdges(u)) - - g.depthFirstWalk(vs, false, func(v Vertex, d int) error { - shared := uTargets.Intersection(g.DownEdges(v)) - for _, vPrime := range AsVertexList(shared) { - g.RemoveEdge(BasicEdge(u, vPrime)) - } - - return nil - }) - } -} - -// Validate validates the DAG. A DAG is valid if it has a single root -// with no cycles. -func (g *AcyclicGraph) Validate() error { - if _, err := g.Root(); err != nil { - return err - } - - // Look for cycles of more than 1 component - var err error - cycles := g.Cycles() - if len(cycles) > 0 { - for _, cycle := range cycles { - cycleStr := make([]string, len(cycle)) - for j, vertex := range cycle { - cycleStr[j] = VertexName(vertex) - } - - err = multierror.Append(err, fmt.Errorf( - "Cycle: %s", strings.Join(cycleStr, ", "))) - } - } - - // Look for cycles to self - for _, e := range g.Edges() { - if e.Source() == e.Target() { - err = multierror.Append(err, fmt.Errorf( - "Self reference: %s", VertexName(e.Source()))) - } - } - - return err -} - -func (g *AcyclicGraph) Cycles() [][]Vertex { - var cycles [][]Vertex - for _, cycle := range StronglyConnected(&g.Graph) { - if len(cycle) > 1 { - cycles = append(cycles, cycle) - } - } - return cycles -} - -// Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. The resulting diagnostics -// contains problems from all graphs visited, in no particular order. -func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { - defer g.debug.BeginOperation(typeWalk, "").End("") - - w := &Walker{Callback: cb, Reverse: true} - w.Update(g) - return w.Wait() -} - -// simple convenience helper for converting a dag.Set to a []Vertex -func AsVertexList(s *Set) []Vertex { - rawList := s.List() - vertexList := make([]Vertex, len(rawList)) - for i, raw := range rawList { - vertexList[i] = raw.(Vertex) - } - return vertexList -} - -type vertexAtDepth struct { - Vertex Vertex - Depth int -} - -// depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. -func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - return g.depthFirstWalk(start, true, f) -} - -// This internal method provides the option of not sorting the vertices during -// the walk, which we use for the Transitive reduction. -// Some configurations can lead to fully-connected subgraphs, which makes our -// transitive reduction algorithm O(n^3). This is still passable for the size -// of our graphs, but the additional n^2 sort operations would make this -// uncomputable in a reasonable amount of time. -func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - // Visit targets of this in a consistent order. - targets := AsVertexList(g.DownEdges(current.Vertex)) - - if sorted { - sort.Sort(byVertexName(targets)) - } - - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - } - - return nil -} - -// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from -// the vertices in start. -func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Add next set of targets in a consistent order. - targets := AsVertexList(g.UpEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - } - - return nil -} - -// byVertexName implements sort.Interface so a list of Vertices can be sorted -// consistently by their VertexName -type byVertexName []Vertex - -func (b byVertexName) Len() int { return len(b) } -func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byVertexName) Less(i, j int) bool { - return VertexName(b[i]) < VertexName(b[j]) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go deleted file mode 100644 index 65a351b6fb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go +++ /dev/null @@ -1,278 +0,0 @@ -package dag - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// DotOpts are the options for generating a dot formatted Graph. -type DotOpts struct { - // Allows some nodes to decide to only show themselves when the user has - // requested the "verbose" graph. - Verbose bool - - // Highlight Cycles - DrawCycles bool - - // How many levels to expand modules as we draw - MaxDepth int - - // use this to keep the cluster_ naming convention from the previous dot writer - cluster bool -} - -// GraphNodeDotter can be implemented by a node to cause it to be included -// in the dot graph. The Dot method will be called which is expected to -// return a representation of this node. -type GraphNodeDotter interface { - // Dot is called to return the dot formatting for the node. - // The first parameter is the title of the node. - // The second parameter includes user-specified options that affect the dot - // graph. See GraphDotOpts below for details. - DotNode(string, *DotOpts) *DotNode -} - -// DotNode provides a structure for Vertices to return in order to specify their -// dot format. -type DotNode struct { - Name string - Attrs map[string]string -} - -// Returns the DOT representation of this Graph. -func (g *marshalGraph) Dot(opts *DotOpts) []byte { - if opts == nil { - opts = &DotOpts{ - DrawCycles: true, - MaxDepth: -1, - Verbose: true, - } - } - - var w indentWriter - w.WriteString("digraph {\n") - w.Indent() - - // some dot defaults - w.WriteString(`compound = "true"` + "\n") - w.WriteString(`newrank = "true"` + "\n") - - // the top level graph is written as the first subgraph - w.WriteString(`subgraph "root" {` + "\n") - g.writeBody(opts, &w) - - // cluster isn't really used other than for naming purposes in some graphs - opts.cluster = opts.MaxDepth != 0 - maxDepth := opts.MaxDepth - if maxDepth == 0 { - maxDepth = -1 - } - - for _, s := range g.Subgraphs { - g.writeSubgraph(s, opts, maxDepth, &w) - } - - w.Unindent() - w.WriteString("}\n") - return w.Bytes() -} - -func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - name := v.Name - attrs := v.Attrs - if v.graphNodeDotter != nil { - node := v.graphNodeDotter.DotNode(name, opts) - if node == nil { - return []byte{} - } - - newAttrs := make(map[string]string) - for k, v := range attrs { - newAttrs[k] = v - } - for k, v := range node.Attrs { - newAttrs[k] = v - } - - name = node.Name - attrs = newAttrs - } - - buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) - writeAttrs(&buf, attrs) - buf.WriteByte('\n') - - return buf.Bytes() -} - -func (e *marshalEdge) dot(g *marshalGraph) string { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - sourceName := g.vertexByID(e.Source).Name - targetName := g.vertexByID(e.Target).Name - s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) - buf.WriteString(s) - writeAttrs(&buf, e.Attrs) - - return buf.String() -} - -func cycleDot(e *marshalEdge, g *marshalGraph) string { - return e.dot(g) + ` [color = "red", penwidth = "2.0"]` -} - -// Write the subgraph body. The is recursive, and the depth argument is used to -// record the current depth of iteration. -func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { - if depth == 0 { - return - } - depth-- - - name := sg.Name - if opts.cluster { - // we prefix with cluster_ to match the old dot output - name = "cluster_" + name - sg.Attrs["label"] = sg.Name - } - w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) - sg.writeBody(opts, w) - - for _, sg := range sg.Subgraphs { - g.writeSubgraph(sg, opts, depth, w) - } -} - -func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { - w.Indent() - - for _, as := range attrStrings(g.Attrs) { - w.WriteString(as + "\n") - } - - // list of Vertices that aren't to be included in the dot output - skip := map[string]bool{} - - for _, v := range g.Vertices { - if v.graphNodeDotter == nil { - skip[v.ID] = true - continue - } - - w.Write(v.dot(g, opts)) - } - - var dotEdges []string - - if opts.DrawCycles { - for _, c := range g.Cycles { - if len(c) < 2 { - continue - } - - for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { - if j >= len(c) { - j = 0 - } - src := c[i] - tgt := c[j] - - if skip[src.ID] || skip[tgt.ID] { - continue - } - - e := &marshalEdge{ - Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), - Source: src.ID, - Target: tgt.ID, - Attrs: make(map[string]string), - } - - dotEdges = append(dotEdges, cycleDot(e, g)) - src = tgt - } - } - } - - for _, e := range g.Edges { - dotEdges = append(dotEdges, e.dot(g)) - } - - // srot these again to match the old output - sort.Strings(dotEdges) - - for _, e := range dotEdges { - w.WriteString(e + "\n") - } - - w.Unindent() - w.WriteString("}\n") -} - -func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { - if len(attrs) > 0 { - buf.WriteString(" [") - buf.WriteString(strings.Join(attrStrings(attrs), ", ")) - buf.WriteString("]") - } -} - -func attrStrings(attrs map[string]string) []string { - strings := make([]string, 0, len(attrs)) - for k, v := range attrs { - strings = append(strings, fmt.Sprintf("%s = %q", k, v)) - } - sort.Strings(strings) - return strings -} - -// Provide a bytes.Buffer like structure, which will indent when starting a -// newline. -type indentWriter struct { - bytes.Buffer - level int -} - -func (w *indentWriter) indent() { - newline := []byte("\n") - if !bytes.HasSuffix(w.Bytes(), newline) { - return - } - for i := 0; i < w.level; i++ { - w.Buffer.WriteString("\t") - } -} - -// Indent increases indentation by 1 -func (w *indentWriter) Indent() { w.level++ } - -// Unindent decreases indentation by 1 -func (w *indentWriter) Unindent() { w.level-- } - -// the following methods intercecpt the byte.Buffer writes and insert the -// indentation when starting a new line. -func (w *indentWriter) Write(b []byte) (int, error) { - w.indent() - return w.Buffer.Write(b) -} - -func (w *indentWriter) WriteString(s string) (int, error) { - w.indent() - return w.Buffer.WriteString(s) -} -func (w *indentWriter) WriteByte(b byte) error { - w.indent() - return w.Buffer.WriteByte(b) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go deleted file mode 100644 index f0d99ee3a6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go +++ /dev/null @@ -1,37 +0,0 @@ -package dag - -import ( - "fmt" -) - -// Edge represents an edge in the graph, with a source and target vertex. -type Edge interface { - Source() Vertex - Target() Vertex - - Hashable -} - -// BasicEdge returns an Edge implementation that simply tracks the source -// and target given as-is. -func BasicEdge(source, target Vertex) Edge { - return &basicEdge{S: source, T: target} -} - -// basicEdge is a basic implementation of Edge that has the source and -// target vertex. -type basicEdge struct { - S, T Vertex -} - -func (e *basicEdge) Hashcode() interface{} { - return fmt.Sprintf("%p-%p", e.S, e.T) -} - -func (e *basicEdge) Source() Vertex { - return e.S -} - -func (e *basicEdge) Target() Vertex { - return e.T -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go deleted file mode 100644 index e7517a2062..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go +++ /dev/null @@ -1,391 +0,0 @@ -package dag - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "sort" -) - -// Graph is used to represent a dependency graph. -type Graph struct { - vertices *Set - edges *Set - downEdges map[interface{}]*Set - upEdges map[interface{}]*Set - - // JSON encoder for recording debug information - debug *encoder -} - -// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. -type Subgrapher interface { - Subgraph() Grapher -} - -// A Grapher is any type that returns a Grapher, mainly used to identify -// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they -// return themselves. -type Grapher interface { - DirectedGraph() Grapher -} - -// Vertex of the graph. -type Vertex interface{} - -// NamedVertex is an optional interface that can be implemented by Vertex -// to give it a human-friendly name that is used for outputting the graph. -type NamedVertex interface { - Vertex - Name() string -} - -func (g *Graph) DirectedGraph() Grapher { - return g -} - -// Vertices returns the list of all the vertices in the graph. -func (g *Graph) Vertices() []Vertex { - list := g.vertices.List() - result := make([]Vertex, len(list)) - for i, v := range list { - result[i] = v.(Vertex) - } - - return result -} - -// Edges returns the list of all the edges in the graph. -func (g *Graph) Edges() []Edge { - list := g.edges.List() - result := make([]Edge, len(list)) - for i, v := range list { - result[i] = v.(Edge) - } - - return result -} - -// EdgesFrom returns the list of edges from the given source. -func (g *Graph) EdgesFrom(v Vertex) []Edge { - var result []Edge - from := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Source()) == from { - result = append(result, e) - } - } - - return result -} - -// EdgesTo returns the list of edges to the given target. -func (g *Graph) EdgesTo(v Vertex) []Edge { - var result []Edge - search := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Target()) == search { - result = append(result, e) - } - } - - return result -} - -// HasVertex checks if the given Vertex is present in the graph. -func (g *Graph) HasVertex(v Vertex) bool { - return g.vertices.Include(v) -} - -// HasEdge checks if the given Edge is present in the graph. -func (g *Graph) HasEdge(e Edge) bool { - return g.edges.Include(e) -} - -// Add adds a vertex to the graph. This is safe to call multiple time with -// the same Vertex. -func (g *Graph) Add(v Vertex) Vertex { - g.init() - g.vertices.Add(v) - g.debug.Add(v) - return v -} - -// Remove removes a vertex from the graph. This will also remove any -// edges with this vertex as a source or target. -func (g *Graph) Remove(v Vertex) Vertex { - // Delete the vertex itself - g.vertices.Delete(v) - g.debug.Remove(v) - - // Delete the edges to non-existent things - for _, target := range g.DownEdges(v).List() { - g.RemoveEdge(BasicEdge(v, target)) - } - for _, source := range g.UpEdges(v).List() { - g.RemoveEdge(BasicEdge(source, v)) - } - - return nil -} - -// Replace replaces the original Vertex with replacement. If the original -// does not exist within the graph, then false is returned. Otherwise, true -// is returned. -func (g *Graph) Replace(original, replacement Vertex) bool { - // If we don't have the original, we can't do anything - if !g.vertices.Include(original) { - return false - } - - defer g.debug.BeginOperation("Replace", "").End("") - - // If they're the same, then don't do anything - if original == replacement { - return true - } - - // Add our new vertex, then copy all the edges - g.Add(replacement) - for _, target := range g.DownEdges(original).List() { - g.Connect(BasicEdge(replacement, target)) - } - for _, source := range g.UpEdges(original).List() { - g.Connect(BasicEdge(source, replacement)) - } - - // Remove our old vertex, which will also remove all the edges - g.Remove(original) - - return true -} - -// RemoveEdge removes an edge from the graph. -func (g *Graph) RemoveEdge(edge Edge) { - g.init() - g.debug.RemoveEdge(edge) - - // Delete the edge from the set - g.edges.Delete(edge) - - // Delete the up/down edges - if s, ok := g.downEdges[hashcode(edge.Source())]; ok { - s.Delete(edge.Target()) - } - if s, ok := g.upEdges[hashcode(edge.Target())]; ok { - s.Delete(edge.Source()) - } -} - -// DownEdges returns the outward edges from the source Vertex v. -func (g *Graph) DownEdges(v Vertex) *Set { - g.init() - return g.downEdges[hashcode(v)] -} - -// UpEdges returns the inward edges to the destination Vertex v. -func (g *Graph) UpEdges(v Vertex) *Set { - g.init() - return g.upEdges[hashcode(v)] -} - -// Connect adds an edge with the given source and target. This is safe to -// call multiple times with the same value. Note that the same value is -// verified through pointer equality of the vertices, not through the -// value of the edge itself. -func (g *Graph) Connect(edge Edge) { - g.init() - g.debug.Connect(edge) - - source := edge.Source() - target := edge.Target() - sourceCode := hashcode(source) - targetCode := hashcode(target) - - // Do we have this already? If so, don't add it again. - if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { - return - } - - // Add the edge to the set - g.edges.Add(edge) - - // Add the down edge - s, ok := g.downEdges[sourceCode] - if !ok { - s = new(Set) - g.downEdges[sourceCode] = s - } - s.Add(target) - - // Add the up edge - s, ok = g.upEdges[targetCode] - if !ok { - s = new(Set) - g.upEdges[targetCode] = s - } - s.Add(source) -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) StringWithNodeTypes() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - targetNodes := make(map[string]Vertex) - for _, target := range targets.List() { - dep := VertexName(target) - deps = append(deps, dep) - targetNodes[dep] = target - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) - } - } - - return buf.String() -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) String() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s\n", name)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - for _, target := range targets.List() { - deps = append(deps, VertexName(target)) - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s\n", d)) - } - } - - return buf.String() -} - -func (g *Graph) init() { - if g.vertices == nil { - g.vertices = new(Set) - } - if g.edges == nil { - g.edges = new(Set) - } - if g.downEdges == nil { - g.downEdges = make(map[interface{}]*Set) - } - if g.upEdges == nil { - g.upEdges = make(map[interface{}]*Set) - } -} - -// Dot returns a dot-formatted representation of the Graph. -func (g *Graph) Dot(opts *DotOpts) []byte { - return newMarshalGraph("", g).Dot(opts) -} - -// MarshalJSON returns a JSON representation of the entire Graph. -func (g *Graph) MarshalJSON() ([]byte, error) { - dg := newMarshalGraph("root", g) - return json.MarshalIndent(dg, "", " ") -} - -// SetDebugWriter sets the io.Writer where the Graph will record debug -// information. After this is set, the graph will immediately encode itself to -// the stream, and continue to record all subsequent operations. -func (g *Graph) SetDebugWriter(w io.Writer) { - g.debug = &encoder{w: w} - g.debug.Encode(newMarshalGraph("root", g)) -} - -// DebugVertexInfo encodes arbitrary information about a vertex in the graph -// debug logs. -func (g *Graph) DebugVertexInfo(v Vertex, info string) { - va := newVertexInfo(typeVertexInfo, v, info) - g.debug.Encode(va) -} - -// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug -// logs. -func (g *Graph) DebugEdgeInfo(e Edge, info string) { - ea := newEdgeInfo(typeEdgeInfo, e, info) - g.debug.Encode(ea) -} - -// DebugVisitInfo records a visit to a Vertex during a walk operation. -func (g *Graph) DebugVisitInfo(v Vertex, info string) { - vi := newVertexInfo(typeVisitInfo, v, info) - g.debug.Encode(vi) -} - -// DebugOperation marks the start of a set of graph transformations in -// the debug log, and returns a DebugOperationEnd func, which marks the end of -// the operation in the log. Additional information can be added to the log via -// the info parameter. -// -// The returned func's End method allows this method to be called from a single -// defer statement: -// defer g.DebugOperationBegin("OpName", "operating").End("") -// -// The returned function must be called to properly close the logical operation -// in the logs. -func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { - return g.debug.BeginOperation(operation, info) -} - -// VertexName returns the name of a vertex. -func VertexName(raw Vertex) string { - switch v := raw.(type) { - case NamedVertex: - return v.Name() - case fmt.Stringer: - return fmt.Sprintf("%s", v) - default: - return fmt.Sprintf("%v", v) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go deleted file mode 100644 index 7b23ea9c12..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go +++ /dev/null @@ -1,460 +0,0 @@ -package dag - -import ( - "encoding/json" - "fmt" - "io" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -const ( - typeOperation = "Operation" - typeTransform = "Transform" - typeWalk = "Walk" - typeDepthFirstWalk = "DepthFirstWalk" - typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" - typeTransitiveReduction = "TransitiveReduction" - typeEdgeInfo = "EdgeInfo" - typeVertexInfo = "VertexInfo" - typeVisitInfo = "VisitInfo" -) - -// the marshal* structs are for serialization of the graph data. -type marshalGraph struct { - // Type is always "Graph", for identification as a top level object in the - // JSON stream. - Type string - - // Each marshal structure requires a unique ID so that it can be referenced - // by other structures. - ID string `json:",omitempty"` - - // Human readable name for this graph. - Name string `json:",omitempty"` - - // Arbitrary attributes that can be added to the output. - Attrs map[string]string `json:",omitempty"` - - // List of graph vertices, sorted by ID. - Vertices []*marshalVertex `json:",omitempty"` - - // List of edges, sorted by Source ID. - Edges []*marshalEdge `json:",omitempty"` - - // Any number of subgraphs. A subgraph itself is considered a vertex, and - // may be referenced by either end of an edge. - Subgraphs []*marshalGraph `json:",omitempty"` - - // Any lists of vertices that are included in cycles. - Cycles [][]*marshalVertex `json:",omitempty"` -} - -// The add, remove, connect, removeEdge methods mirror the basic Graph -// manipulations to reconstruct a marshalGraph from a debug log. -func (g *marshalGraph) add(v *marshalVertex) { - g.Vertices = append(g.Vertices, v) - sort.Sort(vertices(g.Vertices)) -} - -func (g *marshalGraph) remove(v *marshalVertex) { - for i, existing := range g.Vertices { - if v.ID == existing.ID { - g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) - return - } - } -} - -func (g *marshalGraph) connect(e *marshalEdge) { - g.Edges = append(g.Edges, e) - sort.Sort(edges(g.Edges)) -} - -func (g *marshalGraph) removeEdge(e *marshalEdge) { - for i, existing := range g.Edges { - if e.Source == existing.Source && e.Target == existing.Target { - g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) - return - } - } -} - -func (g *marshalGraph) vertexByID(id string) *marshalVertex { - for _, v := range g.Vertices { - if id == v.ID { - return v - } - } - return nil -} - -type marshalVertex struct { - // Unique ID, used to reference this vertex from other structures. - ID string - - // Human readable name - Name string `json:",omitempty"` - - Attrs map[string]string `json:",omitempty"` - - // This is to help transition from the old Dot interfaces. We record if the - // node was a GraphNodeDotter here, so we can call it to get attributes. - graphNodeDotter GraphNodeDotter -} - -func newMarshalVertex(v Vertex) *marshalVertex { - dn, ok := v.(GraphNodeDotter) - if !ok { - dn = nil - } - - return &marshalVertex{ - ID: marshalVertexID(v), - Name: VertexName(v), - Attrs: make(map[string]string), - graphNodeDotter: dn, - } -} - -// vertices is a sort.Interface implementation for sorting vertices by ID -type vertices []*marshalVertex - -func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } -func (v vertices) Len() int { return len(v) } -func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -type marshalEdge struct { - // Human readable name - Name string - - // Source and Target Vertices by ID - Source string - Target string - - Attrs map[string]string `json:",omitempty"` -} - -func newMarshalEdge(e Edge) *marshalEdge { - return &marshalEdge{ - Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), - Source: marshalVertexID(e.Source()), - Target: marshalVertexID(e.Target()), - Attrs: make(map[string]string), - } -} - -// edges is a sort.Interface implementation for sorting edges by Source ID -type edges []*marshalEdge - -func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } -func (e edges) Len() int { return len(e) } -func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } - -// build a marshalGraph structure from a *Graph -func newMarshalGraph(name string, g *Graph) *marshalGraph { - mg := &marshalGraph{ - Type: "Graph", - Name: name, - Attrs: make(map[string]string), - } - - for _, v := range g.Vertices() { - id := marshalVertexID(v) - if sg, ok := marshalSubgrapher(v); ok { - smg := newMarshalGraph(VertexName(v), sg) - smg.ID = id - mg.Subgraphs = append(mg.Subgraphs, smg) - } - - mv := newMarshalVertex(v) - mg.Vertices = append(mg.Vertices, mv) - } - - sort.Sort(vertices(mg.Vertices)) - - for _, e := range g.Edges() { - mg.Edges = append(mg.Edges, newMarshalEdge(e)) - } - - sort.Sort(edges(mg.Edges)) - - for _, c := range (&AcyclicGraph{*g}).Cycles() { - var cycle []*marshalVertex - for _, v := range c { - mv := newMarshalVertex(v) - cycle = append(cycle, mv) - } - mg.Cycles = append(mg.Cycles, cycle) - } - - return mg -} - -// Attempt to return a unique ID for any vertex. -func marshalVertexID(v Vertex) string { - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return strconv.Itoa(int(val.Pointer())) - case reflect.Interface: - return strconv.Itoa(int(val.InterfaceData()[1])) - } - - if v, ok := v.(Hashable); ok { - h := v.Hashcode() - if h, ok := h.(string); ok { - return h - } - } - - // fallback to a name, which we hope is unique. - return VertexName(v) - - // we could try harder by attempting to read the arbitrary value from the - // interface, but we shouldn't get here from terraform right now. -} - -// check for a Subgrapher, and return the underlying *Graph. -func marshalSubgrapher(v Vertex) (*Graph, bool) { - sg, ok := v.(Subgrapher) - if !ok { - return nil, false - } - - switch g := sg.Subgraph().DirectedGraph().(type) { - case *Graph: - return g, true - case *AcyclicGraph: - return &g.Graph, true - } - - return nil, false -} - -// The DebugOperationEnd func type provides a way to call an End function via a -// method call, allowing for the chaining of methods in a defer statement. -type DebugOperationEnd func(string) - -// End calls function e with the info parameter, marking the end of this -// operation in the logs. -func (e DebugOperationEnd) End(info string) { e(info) } - -// encoder provides methods to write debug data to an io.Writer, and is a noop -// when no writer is present -type encoder struct { - sync.Mutex - w io.Writer -} - -// Encode is analogous to json.Encoder.Encode -func (e *encoder) Encode(i interface{}) { - if e == nil || e.w == nil { - return - } - e.Lock() - defer e.Unlock() - - js, err := json.Marshal(i) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } - js = append(js, '\n') - - _, err = e.w.Write(js) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } -} - -func (e *encoder) Add(v Vertex) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - AddVertex: newMarshalVertex(v), - }) -} - -// Remove records the removal of Vertex v. -func (e *encoder) Remove(v Vertex) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveVertex: newMarshalVertex(v), - }) -} - -func (e *encoder) Connect(edge Edge) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - AddEdge: newMarshalEdge(edge), - }) -} - -func (e *encoder) RemoveEdge(edge Edge) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveEdge: newMarshalEdge(edge), - }) -} - -// BeginOperation marks the start of set of graph transformations, and returns -// an EndDebugOperation func to be called once the opration is complete. -func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { - if e == nil { - return func(string) {} - } - - e.Encode(marshalOperation{ - Type: typeOperation, - Begin: op, - Info: info, - }) - - return func(info string) { - e.Encode(marshalOperation{ - Type: typeOperation, - End: op, - Info: info, - }) - } -} - -// structure for recording graph transformations -type marshalTransform struct { - // Type: "Transform" - Type string - AddEdge *marshalEdge `json:",omitempty"` - RemoveEdge *marshalEdge `json:",omitempty"` - AddVertex *marshalVertex `json:",omitempty"` - RemoveVertex *marshalVertex `json:",omitempty"` -} - -func (t marshalTransform) Transform(g *marshalGraph) { - switch { - case t.AddEdge != nil: - g.connect(t.AddEdge) - case t.RemoveEdge != nil: - g.removeEdge(t.RemoveEdge) - case t.AddVertex != nil: - g.add(t.AddVertex) - case t.RemoveVertex != nil: - g.remove(t.RemoveVertex) - } -} - -// this structure allows us to decode any object in the json stream for -// inspection, then re-decode it into a proper struct if needed. -type streamDecode struct { - Type string - Map map[string]interface{} - JSON []byte -} - -func (s *streamDecode) UnmarshalJSON(d []byte) error { - s.JSON = d - err := json.Unmarshal(d, &s.Map) - if err != nil { - return err - } - - if t, ok := s.Map["Type"]; ok { - s.Type, _ = t.(string) - } - return nil -} - -// structure for recording the beginning and end of any multi-step -// transformations. These are informational, and not required to reproduce the -// graph state. -type marshalOperation struct { - Type string - Begin string `json:",omitempty"` - End string `json:",omitempty"` - Info string `json:",omitempty"` -} - -// decodeGraph decodes a marshalGraph from an encoded graph stream. -func decodeGraph(r io.Reader) (*marshalGraph, error) { - dec := json.NewDecoder(r) - - // a stream should always start with a graph - g := &marshalGraph{} - - err := dec.Decode(g) - if err != nil { - return nil, err - } - - // now replay any operations that occurred on the original graph - for dec.More() { - s := &streamDecode{} - err := dec.Decode(s) - if err != nil { - return g, err - } - - // the only Type we're concerned with here is Transform to complete the - // Graph - if s.Type != typeTransform { - continue - } - - t := &marshalTransform{} - err = json.Unmarshal(s.JSON, t) - if err != nil { - return g, err - } - t.Transform(g) - } - return g, nil -} - -// marshalVertexInfo allows encoding arbitrary information about the a single -// Vertex in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalVertexInfo struct { - Type string - Vertex *marshalVertex - Info string -} - -func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { - return &marshalVertexInfo{ - Type: infoType, - Vertex: newMarshalVertex(v), - Info: info, - } -} - -// marshalEdgeInfo allows encoding arbitrary information about the a single -// Edge in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalEdgeInfo struct { - Type string - Edge *marshalEdge - Info string -} - -func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { - return &marshalEdgeInfo{ - Type: infoType, - Edge: newMarshalEdge(e), - Info: info, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go deleted file mode 100644 index 92b42151d7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go +++ /dev/null @@ -1,123 +0,0 @@ -package dag - -import ( - "sync" -) - -// Set is a set data structure. -type Set struct { - m map[interface{}]interface{} - once sync.Once -} - -// Hashable is the interface used by set to get the hash code of a value. -// If this isn't given, then the value of the item being added to the set -// itself is used as the comparison value. -type Hashable interface { - Hashcode() interface{} -} - -// hashcode returns the hashcode used for set elements. -func hashcode(v interface{}) interface{} { - if h, ok := v.(Hashable); ok { - return h.Hashcode() - } - - return v -} - -// Add adds an item to the set -func (s *Set) Add(v interface{}) { - s.once.Do(s.init) - s.m[hashcode(v)] = v -} - -// Delete removes an item from the set. -func (s *Set) Delete(v interface{}) { - s.once.Do(s.init) - delete(s.m, hashcode(v)) -} - -// Include returns true/false of whether a value is in the set. -func (s *Set) Include(v interface{}) bool { - s.once.Do(s.init) - _, ok := s.m[hashcode(v)] - return ok -} - -// Intersection computes the set intersection with other. -func (s *Set) Intersection(other *Set) *Set { - result := new(Set) - if s == nil { - return result - } - if other != nil { - for _, v := range s.m { - if other.Include(v) { - result.Add(v) - } - } - } - - return result -} - -// Difference returns a set with the elements that s has but -// other doesn't. -func (s *Set) Difference(other *Set) *Set { - result := new(Set) - if s != nil { - for k, v := range s.m { - var ok bool - if other != nil { - _, ok = other.m[k] - } - if !ok { - result.Add(v) - } - } - } - - return result -} - -// Filter returns a set that contains the elements from the receiver -// where the given callback returns true. -func (s *Set) Filter(cb func(interface{}) bool) *Set { - result := new(Set) - - for _, v := range s.m { - if cb(v) { - result.Add(v) - } - } - - return result -} - -// Len is the number of items in the set. -func (s *Set) Len() int { - if s == nil { - return 0 - } - - return len(s.m) -} - -// List returns the list of set elements. -func (s *Set) List() []interface{} { - if s == nil { - return nil - } - - r := make([]interface{}, 0, len(s.m)) - for _, v := range s.m { - r = append(r, v) - } - - return r -} - -func (s *Set) init() { - s.m = make(map[interface{}]interface{}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go deleted file mode 100644 index 9d8b25ce2c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go +++ /dev/null @@ -1,107 +0,0 @@ -package dag - -// StronglyConnected returns the list of strongly connected components -// within the Graph g. This information is primarily used by this package -// for cycle detection, but strongly connected components have widespread -// use. -func StronglyConnected(g *Graph) [][]Vertex { - vs := g.Vertices() - acct := sccAcct{ - NextIndex: 1, - VertexIndex: make(map[Vertex]int, len(vs)), - } - for _, v := range vs { - // Recurse on any non-visited nodes - if acct.VertexIndex[v] == 0 { - stronglyConnected(&acct, g, v) - } - } - return acct.SCC -} - -func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { - // Initial vertex visit - index := acct.visit(v) - minIdx := index - - for _, raw := range g.DownEdges(v).List() { - target := raw.(Vertex) - targetIdx := acct.VertexIndex[target] - - // Recurse on successor if not yet visited - if targetIdx == 0 { - minIdx = min(minIdx, stronglyConnected(acct, g, target)) - } else if acct.inStack(target) { - // Check if the vertex is in the stack - minIdx = min(minIdx, targetIdx) - } - } - - // Pop the strongly connected components off the stack if - // this is a root vertex - if index == minIdx { - var scc []Vertex - for { - v2 := acct.pop() - scc = append(scc, v2) - if v2 == v { - break - } - } - - acct.SCC = append(acct.SCC, scc) - } - - return minIdx -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -// sccAcct is used ot pass around accounting information for -// the StronglyConnectedComponents algorithm -type sccAcct struct { - NextIndex int - VertexIndex map[Vertex]int - Stack []Vertex - SCC [][]Vertex -} - -// visit assigns an index and pushes a vertex onto the stack -func (s *sccAcct) visit(v Vertex) int { - idx := s.NextIndex - s.VertexIndex[v] = idx - s.NextIndex++ - s.push(v) - return idx -} - -// push adds a vertex to the stack -func (s *sccAcct) push(n Vertex) { - s.Stack = append(s.Stack, n) -} - -// pop removes a vertex from the stack -func (s *sccAcct) pop() Vertex { - n := len(s.Stack) - if n == 0 { - return nil - } - vertex := s.Stack[n-1] - s.Stack = s.Stack[:n-1] - return vertex -} - -// inStack checks if a vertex is in the stack -func (s *sccAcct) inStack(needle Vertex) bool { - for _, n := range s.Stack { - if n == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go deleted file mode 100644 index 5ddf8ef34c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go +++ /dev/null @@ -1,454 +0,0 @@ -package dag - -import ( - "errors" - "log" - "sync" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Walker is used to walk every vertex of a graph in parallel. -// -// A vertex will only be walked when the dependencies of that vertex have -// been walked. If two vertices can be walked at the same time, they will be. -// -// Update can be called to update the graph. This can be called even during -// a walk, cahnging vertices/edges mid-walk. This should be done carefully. -// If a vertex is removed but has already been executed, the result of that -// execution (any error) is still returned by Wait. Changing or re-adding -// a vertex that has already executed has no effect. Changing edges of -// a vertex that has already executed has no effect. -// -// Non-parallelism can be enforced by introducing a lock in your callback -// function. However, the goroutine overhead of a walk will remain. -// Walker will create V*2 goroutines (one for each vertex, and dependency -// waiter for each vertex). In general this should be of no concern unless -// there are a huge number of vertices. -// -// The walk is depth first by default. This can be changed with the Reverse -// option. -// -// A single walker is only valid for one graph walk. After the walk is complete -// you must construct a new walker to walk again. State for the walk is never -// deleted in case vertices or edges are changed. -type Walker struct { - // Callback is what is called for each vertex - Callback WalkFunc - - // Reverse, if true, causes the source of an edge to depend on a target. - // When false (default), the target depends on the source. - Reverse bool - - // changeLock must be held to modify any of the fields below. Only Update - // should modify these fields. Modifying them outside of Update can cause - // serious problems. - changeLock sync.Mutex - vertices Set - edges Set - vertexMap map[Vertex]*walkerVertex - - // wait is done when all vertices have executed. It may become "undone" - // if new vertices are added. - wait sync.WaitGroup - - // diagsMap contains the diagnostics recorded so far for execution, - // and upstreamFailed contains all the vertices whose problems were - // caused by upstream failures, and thus whose diagnostics should be - // excluded from the final set. - // - // Readers and writers of either map must hold diagsLock. - diagsMap map[Vertex]tfdiags.Diagnostics - upstreamFailed map[Vertex]struct{} - diagsLock sync.Mutex -} - -type walkerVertex struct { - // These should only be set once on initialization and never written again. - // They are not protected by a lock since they don't need to be since - // they are write-once. - - // DoneCh is closed when this vertex has completed execution, regardless - // of success. - // - // CancelCh is closed when the vertex should cancel execution. If execution - // is already complete (DoneCh is closed), this has no effect. Otherwise, - // execution is cancelled as quickly as possible. - DoneCh chan struct{} - CancelCh chan struct{} - - // Dependency information. Any changes to any of these fields requires - // holding DepsLock. - // - // DepsCh is sent a single value that denotes whether the upstream deps - // were successful (no errors). Any value sent means that the upstream - // dependencies are complete. No other values will ever be sent again. - // - // DepsUpdateCh is closed when there is a new DepsCh set. - DepsCh chan bool - DepsUpdateCh chan struct{} - DepsLock sync.Mutex - - // Below is not safe to read/write in parallel. This behavior is - // enforced by changes only happening in Update. Nothing else should - // ever modify these. - deps map[Vertex]chan struct{} - depsCancelCh chan struct{} -} - -// Wait waits for the completion of the walk and returns diagnostics describing -// any problems that arose. Update should be called to populate the walk with -// vertices and edges prior to calling this. -// -// Wait will return as soon as all currently known vertices are complete. -// If you plan on calling Update with more vertices in the future, you -// should not call Wait until after this is done. -func (w *Walker) Wait() tfdiags.Diagnostics { - // Wait for completion - w.wait.Wait() - - var diags tfdiags.Diagnostics - w.diagsLock.Lock() - for v, vDiags := range w.diagsMap { - if _, upstream := w.upstreamFailed[v]; upstream { - // Ignore diagnostics for nodes that had failed upstreams, since - // the downstream diagnostics are likely to be redundant. - continue - } - diags = diags.Append(vDiags) - } - w.diagsLock.Unlock() - - return diags -} - -// Update updates the currently executing walk with the given graph. -// This will perform a diff of the vertices and edges and update the walker. -// Already completed vertices remain completed (including any errors during -// their execution). -// -// This returns immediately once the walker is updated; it does not wait -// for completion of the walk. -// -// Multiple Updates can be called in parallel. Update can be called at any -// time during a walk. -func (w *Walker) Update(g *AcyclicGraph) { - log.Print("[TRACE] dag/walk: updating graph") - var v, e *Set - if g != nil { - v, e = g.vertices, g.edges - } - - // Grab the change lock so no more updates happen but also so that - // no new vertices are executed during this time since we may be - // removing them. - w.changeLock.Lock() - defer w.changeLock.Unlock() - - // Initialize fields - if w.vertexMap == nil { - w.vertexMap = make(map[Vertex]*walkerVertex) - } - - // Calculate all our sets - newEdges := e.Difference(&w.edges) - oldEdges := w.edges.Difference(e) - newVerts := v.Difference(&w.vertices) - oldVerts := w.vertices.Difference(v) - - // Add the new vertices - for _, raw := range newVerts.List() { - v := raw.(Vertex) - - // Add to the waitgroup so our walk is not done until everything finishes - w.wait.Add(1) - - // Add to our own set so we know about it already - log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) - w.vertices.Add(raw) - - // Initialize the vertex info - info := &walkerVertex{ - DoneCh: make(chan struct{}), - CancelCh: make(chan struct{}), - deps: make(map[Vertex]chan struct{}), - } - - // Add it to the map and kick off the walk - w.vertexMap[v] = info - } - - // Remove the old vertices - for _, raw := range oldVerts.List() { - v := raw.(Vertex) - - // Get the vertex info so we can cancel it - info, ok := w.vertexMap[v] - if !ok { - // This vertex for some reason was never in our map. This - // shouldn't be possible. - continue - } - - // Cancel the vertex - close(info.CancelCh) - - // Delete it out of the map - delete(w.vertexMap, v) - - log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) - w.vertices.Delete(raw) - } - - // Add the new edges - var changedDeps Set - for _, raw := range newEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Get the info for the dep - depInfo, ok := w.vertexMap[dep] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Add the dependency to our waiter - waiterInfo.deps[dep] = depInfo.DoneCh - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: added edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Add(raw) - } - - // Process reoved edges - for _, raw := range oldEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Delete the dependency from the waiter - delete(waiterInfo.deps, dep) - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: removed edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Delete(raw) - } - - // For each vertex with changed dependencies, we need to kick off - // a new waiter and notify the vertex of the changes. - for _, raw := range changedDeps.List() { - v := raw.(Vertex) - info, ok := w.vertexMap[v] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Create a new done channel - doneCh := make(chan bool, 1) - - // Create the channel we close for cancellation - cancelCh := make(chan struct{}) - - // Build a new deps copy - deps := make(map[Vertex]<-chan struct{}) - for k, v := range info.deps { - deps[k] = v - } - - // Update the update channel - info.DepsLock.Lock() - if info.DepsUpdateCh != nil { - close(info.DepsUpdateCh) - } - info.DepsCh = doneCh - info.DepsUpdateCh = make(chan struct{}) - info.DepsLock.Unlock() - - // Cancel the older waiter - if info.depsCancelCh != nil { - close(info.depsCancelCh) - } - info.depsCancelCh = cancelCh - - log.Printf( - "[TRACE] dag/walk: dependencies changed for %q, sending new deps", - VertexName(v)) - - // Start the waiter - go w.waitDeps(v, deps, doneCh, cancelCh) - } - - // Start all the new vertices. We do this at the end so that all - // the edge waiters and changes are setup above. - for _, raw := range newVerts.List() { - v := raw.(Vertex) - go w.walkVertex(v, w.vertexMap[v]) - } -} - -// edgeParts returns the waiter and the dependency, in that order. -// The waiter is waiting on the dependency. -func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { - if w.Reverse { - return e.Source(), e.Target() - } - - return e.Target(), e.Source() -} - -// walkVertex walks a single vertex, waiting for any dependencies before -// executing the callback. -func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { - // When we're done executing, lower the waitgroup count - defer w.wait.Done() - - // When we're done, always close our done channel - defer close(info.DoneCh) - - // Wait for our dependencies. We create a [closed] deps channel so - // that we can immediately fall through to load our actual DepsCh. - var depsSuccess bool - var depsUpdateCh chan struct{} - depsCh := make(chan bool, 1) - depsCh <- true - close(depsCh) - for { - select { - case <-info.CancelCh: - // Cancel - return - - case depsSuccess = <-depsCh: - // Deps complete! Mark as nil to trigger completion handling. - depsCh = nil - - case <-depsUpdateCh: - // New deps, reloop - } - - // Check if we have updated dependencies. This can happen if the - // dependencies were satisfied exactly prior to an Update occurring. - // In that case, we'd like to take into account new dependencies - // if possible. - info.DepsLock.Lock() - if info.DepsCh != nil { - depsCh = info.DepsCh - info.DepsCh = nil - } - if info.DepsUpdateCh != nil { - depsUpdateCh = info.DepsUpdateCh - } - info.DepsLock.Unlock() - - // If we still have no deps channel set, then we're done! - if depsCh == nil { - break - } - } - - // If we passed dependencies, we just want to check once more that - // we're not cancelled, since this can happen just as dependencies pass. - select { - case <-info.CancelCh: - // Cancelled during an update while dependencies completed. - return - default: - } - - // Run our callback or note that our upstream failed - var diags tfdiags.Diagnostics - var upstreamFailed bool - if depsSuccess { - log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) - diags = w.Callback(v) - } else { - log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) - // This won't be displayed to the user because we'll set upstreamFailed, - // but we need to ensure there's at least one error in here so that - // the failures will cascade downstream. - diags = diags.Append(errors.New("upstream dependencies failed")) - upstreamFailed = true - } - - // Record the result (we must do this after execution because we mustn't - // hold diagsLock while visiting a vertex.) - w.diagsLock.Lock() - if w.diagsMap == nil { - w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) - } - w.diagsMap[v] = diags - if w.upstreamFailed == nil { - w.upstreamFailed = make(map[Vertex]struct{}) - } - if upstreamFailed { - w.upstreamFailed[v] = struct{}{} - } - w.diagsLock.Unlock() -} - -func (w *Walker) waitDeps( - v Vertex, - deps map[Vertex]<-chan struct{}, - doneCh chan<- bool, - cancelCh <-chan struct{}) { - - // For each dependency given to us, wait for it to complete - for dep, depCh := range deps { - DepSatisfied: - for { - select { - case <-depCh: - // Dependency satisfied! - break DepSatisfied - - case <-cancelCh: - // Wait cancelled. Note that we didn't satisfy dependencies - // so that anything waiting on us also doesn't run. - doneCh <- false - return - - case <-time.After(time.Second * 5): - log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", - VertexName(v), VertexName(dep)) - } - } - } - - // Dependencies satisfied! We need to check if any errored - w.diagsLock.Lock() - defer w.diagsLock.Unlock() - for dep := range deps { - if w.diagsMap[dep].HasErrors() { - // One of our dependencies failed, so return false - doneCh <- false - return - } - } - - // All dependencies satisfied and successful - doneCh <- true -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go deleted file mode 100644 index b86bd7923a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go +++ /dev/null @@ -1,63 +0,0 @@ -package earlyconfig - -import ( - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *tfconfig.Module - - // CallPos is the source position for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallPos tfconfig.SourcePos - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go deleted file mode 100644 index 3707f2738d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go +++ /dev/null @@ -1,144 +0,0 @@ -package earlyconfig - -import ( - "fmt" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := map[string]*Config{} - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - var vc version.Constraints - if strings.TrimSpace(call.Version) != "" { - var err error - vc, err = version.NewConstraint(call.Version) - if err != nil { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), - })) - continue - } - } - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.Source, - VersionConstraints: vc, - Parent: parent, - CallPos: call.Pos, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallPos: call.Pos, - SourceAddr: call.Source, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = diags.Append(modDiags) - - ret[call.Name] = child - } - - return ret, diags -} - -// ModuleRequest is used as part of the ModuleWalker interface used with -// function BuildConfig. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // VersionConstraint is the version constraint applied to the module in - // configuration. - VersionConstraints version.Constraints - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source position for the header of the "module" block - // in configuration that prompted this request. - CallPos tfconfig.SourcePos -} - -// ModuleWalker is an interface used with BuildConfig. -type ModuleWalker interface { - LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) - -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - return f(req) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go deleted file mode 100644 index b2e1807eb7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go +++ /dev/null @@ -1,78 +0,0 @@ -package earlyconfig - -import ( - "fmt" - - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { - ret := make(tfdiags.Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = wrapDiagnostic(diag) - } - return ret -} - -func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { - return wrappedDiagnostic{ - d: diag, - } -} - -type wrappedDiagnostic struct { - d tfconfig.Diagnostic -} - -func (d wrappedDiagnostic) Severity() tfdiags.Severity { - switch d.d.Severity { - case tfconfig.DiagError: - return tfdiags.Error - case tfconfig.DiagWarning: - return tfdiags.Warning - default: - // Should never happen since there are no other severities - return 0 - } -} - -func (d wrappedDiagnostic) Description() tfdiags.Description { - // Since the inspect library doesn't produce precise source locations, - // we include the position information as part of the error message text. - // See the comment inside method "Source" for more information. - switch { - case d.d.Pos == nil: - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: d.d.Detail, - } - case d.d.Detail != "": - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), - } - default: - return tfdiags.Description{ - Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), - } - } -} - -func (d wrappedDiagnostic) Source() tfdiags.Source { - // Since the inspect library is constrained by the lowest common denominator - // between legacy HCL and modern HCL, it only returns ranges at whole-line - // granularity, and that isn't sufficient to populate a tfdiags.Source - // and so we'll just omit ranges altogether and include the line number in - // the Description text. - // - // Callers that want to return nicer errors should consider reacting to - // earlyconfig errors by attempting a follow-up parse with the normal - // config loader, which can produce more precise source location - // information. - return tfdiags.Source{} -} - -func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go deleted file mode 100644 index a9cf10f37c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package earlyconfig is a specialized alternative to the top-level "configs" -// package that does only shallow processing of configuration and is therefore -// able to be much more liberal than the full config loader in what it accepts. -// -// In particular, it can accept both current and legacy HCL syntax, and it -// ignores top-level blocks that it doesn't recognize. These two characteristics -// make this package ideal for dependency-checking use-cases so that we are -// more likely to be able to return an error message about an explicit -// incompatibility than to return a less-actionable message about a construct -// not being supported. -// -// However, its liberal approach also means it should be used sparingly. It -// exists primarily for "terraform init", so that it is able to detect -// incompatibilities more robustly when installing dependencies. For most -// other use-cases, use the "configs" and "configs/configload" packages. -// -// Package earlyconfig is a wrapper around the terraform-config-inspect -// codebase, adding to it just some helper functionality for Terraform's own -// use-cases. -package earlyconfig diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go deleted file mode 100644 index 11eff2eb69..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go +++ /dev/null @@ -1,13 +0,0 @@ -package earlyconfig - -import ( - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// LoadModule loads some top-level metadata for the module in the given -// directory. -func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { - mod, diags := tfconfig.LoadModule(dir) - return mod, wrapDiagnostics(diags) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go deleted file mode 100644 index 1bb7b9f2f9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hcl2shim.UnknownVariableValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go deleted file mode 100644 index 9ff6e42652..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go deleted file mode 100644 index 435e04a39d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go deleted file mode 100644 index be5db8b982..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go +++ /dev/null @@ -1,214 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/flatmap" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// Validator is a helper that helps you validate the configuration -// of your resource, resource provider, etc. -// -// At the most basic level, set the Required and Optional lists to be -// specifiers of keys that are required or optional. If a key shows up -// that isn't in one of these two lists, then an error is generated. -// -// The "specifiers" allowed in this is a fairly rich syntax to help -// describe the format of your configuration: -// -// * Basic keys are just strings. For example: "foo" will match the -// "foo" key. -// -// * Nested structure keys can be matched by doing -// "listener.*.foo". This will verify that there is at least one -// listener element that has the "foo" key set. -// -// * The existence of a nested structure can be checked by simply -// doing "listener.*" which will verify that there is at least -// one element in the "listener" structure. This is NOT -// validating that "listener" is an array. It is validating -// that it is a nested structure in the configuration. -// -type Validator struct { - Required []string - Optional []string -} - -func (v *Validator) Validate( - c *terraform.ResourceConfig) (ws []string, es []error) { - // Flatten the configuration so it is easier to reason about - flat := flatmap.Flatten(c.Raw) - - keySet := make(map[string]validatorKey) - for i, vs := range [][]string{v.Required, v.Optional} { - req := i == 0 - for _, k := range vs { - vk, err := newValidatorKey(k, req) - if err != nil { - es = append(es, err) - continue - } - - keySet[k] = vk - } - } - - purged := make([]string, 0) - for _, kv := range keySet { - p, w, e := kv.Validate(flat) - if len(w) > 0 { - ws = append(ws, w...) - } - if len(e) > 0 { - es = append(es, e...) - } - - purged = append(purged, p...) - } - - // Delete all the keys we processed in order to find - // the unknown keys. - for _, p := range purged { - delete(flat, p) - } - - // The rest are unknown - for k := range flat { - es = append(es, fmt.Errorf("Unknown configuration: %s", k)) - } - - return -} - -type validatorKey interface { - // Validate validates the given configuration and returns viewed keys, - // warnings, and errors. - Validate(map[string]string) ([]string, []string, []error) -} - -func newValidatorKey(k string, req bool) (validatorKey, error) { - var result validatorKey - - parts := strings.Split(k, ".") - if len(parts) > 1 && parts[1] == "*" { - result = &nestedValidatorKey{ - Parts: parts, - Required: req, - } - } else { - result = &basicValidatorKey{ - Key: k, - Required: req, - } - } - - return result, nil -} - -// basicValidatorKey validates keys that are basic such as "foo" -type basicValidatorKey struct { - Key string - Required bool -} - -func (v *basicValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - for k := range m { - // If we have the exact key its a match - if k == v.Key { - return []string{k}, nil, nil - } - } - - if !v.Required { - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", v.Key)} -} - -type nestedValidatorKey struct { - Parts []string - Required bool -} - -func (v *nestedValidatorKey) validate( - m map[string]string, - prefix string, - offset int) ([]string, []string, []error) { - if offset >= len(v.Parts) { - // We're at the end. Look for a specific key. - v2 := &basicValidatorKey{Key: prefix, Required: v.Required} - return v2.Validate(m) - } - - current := v.Parts[offset] - - // If we're at offset 0, special case to start at the next one. - if offset == 0 { - return v.validate(m, current, offset+1) - } - - // Determine if we're doing a "for all" or a specific key - if current != "*" { - // We're looking at a specific key, continue on. - return v.validate(m, prefix+"."+current, offset+1) - } - - // We're doing a "for all", so we loop over. - countStr, ok := m[prefix+".#"] - if !ok { - if !v.Required { - // It wasn't required, so its no problem. - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", prefix)} - } - - count, err := strconv.ParseInt(countStr, 0, 0) - if err != nil { - // This shouldn't happen if flatmap works properly - panic("invalid flatmap array") - } - - var e []error - var w []string - u := make([]string, 1, count+1) - u[0] = prefix + ".#" - for i := 0; i < int(count); i++ { - prefix := fmt.Sprintf("%s.%d", prefix, i) - - // Mark that we saw this specific key - u = append(u, prefix) - - // Mark all prefixes of this - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - u = append(u, k) - } - - // If we have more parts, then validate deeper - if offset+1 < len(v.Parts) { - u2, w2, e2 := v.validate(m, prefix, offset+1) - - u = append(u, u2...) - w = append(w, w2...) - e = append(e, e2...) - } - } - - return u, w, e -} - -func (v *nestedValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - return v.validate(m, "", 0) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go deleted file mode 100644 index 54899bc652..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go +++ /dev/null @@ -1,24 +0,0 @@ -package didyoumean - -import ( - "github.com/agext/levenshtein" -) - -// NameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func NameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go deleted file mode 100644 index 82b5937bfe..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package plugin contains types and functions to help Terraform plugins -// implement the plugin rpc interface. -// The primary Provider type will be responsible for converting from the grpc -// wire protocol to the types and methods known to the provider -// implementations. -package plugin diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go deleted file mode 100644 index 79375e0ba5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go +++ /dev/null @@ -1,1408 +0,0 @@ -package plugin - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - - "github.com/zclconf/go-cty/cty" - ctyconvert "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/msgpack" - context "golang.org/x/net/context" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -const newExtraKey = "_new_extra_shim" - -// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a -// proto.ProviderServer implementation. If the provided provider is not a -// *schema.Provider, this will return nil, -func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { - sp, ok := p.(*schema.Provider) - if !ok { - return nil - } - - return &GRPCProviderServer{ - provider: sp, - } -} - -// GRPCProviderServer handles the server, or plugin side of the rpc connection. -type GRPCProviderServer struct { - provider *schema.Provider -} - -func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { - // Here we are certain that the provider is being called through grpc, so - // make sure the feature flag for helper/schema is set - schema.SetProto5() - - resp := &proto.GetProviderSchema_Response{ - ResourceSchemas: make(map[string]*proto.Schema), - DataSourceSchemas: make(map[string]*proto.Schema), - } - - resp.Provider = &proto.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), - } - - for typ, res := range s.provider.ResourcesMap { - resp.ResourceSchemas[typ] = &proto.Schema{ - Version: int64(res.SchemaVersion), - Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), - } - } - - for typ, dat := range s.provider.DataSourcesMap { - resp.DataSourceSchemas[typ] = &proto.Schema{ - Version: int64(dat.SchemaVersion), - Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), - } - } - - return resp, nil -} - -func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { - return schema.InternalMap(s.provider.Schema).CoreConfigSchema() -} - -func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { - res := s.provider.ResourcesMap[name] - return res.CoreConfigSchema() -} - -func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { - dat := s.provider.DataSourcesMap[name] - return dat.CoreConfigSchema() -} - -func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { - resp := &proto.PrepareProviderConfig_Response{} - - schemaBlock := s.getProviderSchemaBlock() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := s.provider.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return resp, nil - } - - configVal, err = schemaBlock.CoerceValue(configVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.Validate(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} - - return resp, nil -} - -func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { - resp := &proto.ValidateResourceTypeConfig_Response{} - - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.ValidateResource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { - resp := &proto.ValidateDataSourceConfig_Response{} - - schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.ValidateDataSource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { - resp := &proto.UpgradeResourceState_Response{} - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - version := int(req.Version) - - jsonMap := map[string]interface{}{} - var err error - - switch { - // We first need to upgrade a flatmap state if it exists. - // There should never be both a JSON and Flatmap state in the request. - case len(req.RawState.Flatmap) > 0: - jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - // if there's a JSON state, we need to decode it. - case len(req.RawState.Json) > 0: - if res.UseJSONNumber { - err = unmarshalJSON(req.RawState.Json, &jsonMap) - } else { - err = json.Unmarshal(req.RawState.Json, &jsonMap) - } - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - default: - log.Println("[DEBUG] no state provided to upgrade") - return resp, nil - } - - // complete the upgrade of the JSON states - jsonMap, err = s.upgradeJSONState(version, jsonMap, res) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // The provider isn't required to clean out removed fields - s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) - - // now we need to turn the state into the default json representation, so - // that it can be re-decoded using the actual schema. - val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Now we need to make sure blocks are represented correctly, which means - // that missing blocks are empty collections, rather than null. - // First we need to CoerceValue to ensure that all object types match. - val, err = schemaBlock.CoerceValue(val) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - // Normalize the value and fill in any missing blocks. - val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) - - // encode the final state to the expected msgpack format - newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} - return resp, nil -} - -// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate -// state if necessary, and converts it to the new JSON state format decoded as a -// map[string]interface{}. -// upgradeFlatmapState returns the json map along with the corresponding schema -// version. -func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { - // this will be the version we've upgraded so, defaulting to the given - // version in case no migration was called. - upgradedVersion := version - - // first determine if we need to call the legacy MigrateState func - requiresMigrate := version < res.SchemaVersion - - schemaType := res.CoreConfigSchema().ImpliedType() - - // if there are any StateUpgraders, then we need to only compare - // against the first version there - if len(res.StateUpgraders) > 0 { - requiresMigrate = version < res.StateUpgraders[0].Version - } - - if requiresMigrate && res.MigrateState == nil { - // Providers were previously allowed to bump the version - // without declaring MigrateState. - // If there are further upgraders, then we've only updated that far. - if len(res.StateUpgraders) > 0 { - schemaType = res.StateUpgraders[0].Type - upgradedVersion = res.StateUpgraders[0].Version - } - } else if requiresMigrate { - is := &terraform.InstanceState{ - ID: m["id"], - Attributes: m, - Meta: map[string]interface{}{ - "schema_version": strconv.Itoa(version), - }, - } - - is, err := res.MigrateState(version, is, s.provider.Meta()) - if err != nil { - return nil, 0, err - } - - // re-assign the map in case there was a copy made, making sure to keep - // the ID - m := is.Attributes - m["id"] = is.ID - - // if there are further upgraders, then we've only updated that far - if len(res.StateUpgraders) > 0 { - schemaType = res.StateUpgraders[0].Type - upgradedVersion = res.StateUpgraders[0].Version - } - } else { - // the schema version may be newer than the MigrateState functions - // handled and older than the current, but still stored in the flatmap - // form. If that's the case, we need to find the correct schema type to - // convert the state. - for _, upgrader := range res.StateUpgraders { - if upgrader.Version == version { - schemaType = upgrader.Type - break - } - } - } - - // now we know the state is up to the latest version that handled the - // flatmap format state. Now we can upgrade the format and continue from - // there. - newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) - if err != nil { - return nil, 0, err - } - - var jsonMap map[string]interface{} - if res.UseJSONNumber { - jsonMap, err = schema.StateValueToJSONMapJSONNumber(newConfigVal, schemaType) - } else { - jsonMap, err = schema.StateValueToJSONMap(newConfigVal, schemaType) - } - - return jsonMap, upgradedVersion, err -} - -func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { - var err error - - for _, upgrader := range res.StateUpgraders { - if version != upgrader.Version { - continue - } - - m, err = upgrader.Upgrade(m, s.provider.Meta()) - if err != nil { - return nil, err - } - version++ - } - - return m, nil -} - -// Remove any attributes no longer present in the schema, so that the json can -// be correctly decoded. -func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { - // we're only concerned with finding maps that corespond to object - // attributes - switch v := v.(type) { - case []interface{}: - // If these aren't blocks the next call will be a noop - if ty.IsListType() || ty.IsSetType() { - eTy := ty.ElementType() - for _, eV := range v { - s.removeAttributes(eV, eTy) - } - } - return - case map[string]interface{}: - // map blocks aren't yet supported, but handle this just in case - if ty.IsMapType() { - eTy := ty.ElementType() - for _, eV := range v { - s.removeAttributes(eV, eTy) - } - return - } - - if ty == cty.DynamicPseudoType { - log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) - return - } - - if !ty.IsObjectType() { - // This shouldn't happen, and will fail to decode further on, so - // there's no need to handle it here. - log.Printf("[WARN] unexpected type %#v for map in json state", ty) - return - } - - attrTypes := ty.AttributeTypes() - for attr, attrV := range v { - attrTy, ok := attrTypes[attr] - if !ok { - log.Printf("[DEBUG] attribute %q no longer present in schema", attr) - delete(v, attr) - continue - } - - s.removeAttributes(attrV, attrTy) - } - } -} - -func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { - resp := &proto.Stop_Response{} - - err := s.provider.Stop() - if err != nil { - resp.Error = err.Error() - } - - return resp, nil -} - -func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { - resp := &proto.Configure_Response{} - - schemaBlock := s.getProviderSchemaBlock() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - s.provider.TerraformVersion = req.TerraformVersion - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - err = s.provider.Configure(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - - return resp, nil -} - -func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { - resp := &proto.ReadResource_Response{ - // helper/schema did previously handle private data during refresh, but - // core is now going to expect this to be maintained in order to - // persist it in the state. - Private: req.Private, - } - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - instanceState, err := res.ShimInstanceStateFromValue(stateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - private := make(map[string]interface{}) - if len(req.Private) > 0 { - if err := json.Unmarshal(req.Private, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - instanceState.Meta = private - - newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - if newInstanceState == nil || newInstanceState.ID == "" { - // The old provider API used an empty id to signal that the remote - // object appears to have been deleted, but our new protocol expects - // to see a null value (in the cty sense) in that case. - newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil - } - - // helper/schema should always copy the ID over, but do it again just to be safe - newInstanceState.Attributes["id"] = newInstanceState.ID - - newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = normalizeNullValues(newStateVal, stateVal, false) - newStateVal = copyTimeoutValues(newStateVal, stateVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - - return resp, nil -} - -func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { - resp := &proto.PlanResourceChange_Response{} - - // This is a signal to Terraform Core that we're doing the best we can to - // shim the legacy type system of the SDK onto the Terraform type system - // but we need it to cut us some slack. This setting should not be taken - // forward to any new SDK implementations, since setting it prevents us - // from catching certain classes of provider bug that can lead to - // confusing downstream errors. - resp.LegacyTypeSystem = true - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - create := priorStateVal.IsNull() - - proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // We don't usually plan destroys, but this can return early in any case. - if proposedNewStateVal.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - priorPrivate := make(map[string]interface{}) - if len(req.PriorPrivate) > 0 { - if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - priorState.Meta = priorPrivate - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // turn the proposed state into a legacy configuration - cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) - - diff, err := s.provider.SimpleDiff(info, priorState, cfg) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // if this is a new instance, we need to make sure ID is going to be computed - if create { - if diff == nil { - diff = terraform.NewInstanceDiff() - } - - diff.Attributes["id"] = &terraform.ResourceAttrDiff{ - NewComputed: true, - } - } - - if diff == nil || len(diff.Attributes) == 0 { - // schema.Provider.Diff returns nil if it ends up making a diff with no - // changes, but our new interface wants us to return an actual change - // description that _shows_ there are no changes. This is always the - // prior state, because we force a diff above if this is a new instance. - resp.PlannedState = req.PriorState - resp.PlannedPrivate = req.PriorPrivate - return resp, nil - } - - if priorState == nil { - priorState = &terraform.InstanceState{} - } - - // now we need to apply the diff to the prior state, so get the planned state - plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) - - plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) - - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) - - // The old SDK code has some imprecisions that cause it to sometimes - // generate differences that the SDK itself does not consider significant - // but Terraform Core would. To avoid producing weird do-nothing diffs - // in that case, we'll check if the provider as produced something we - // think is "equivalent" to the prior state and just return the prior state - // itself if so, thus ensuring that Terraform Core will treat this as - // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its - // accuracy. - forceNoChanges := false - if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { - plannedStateVal = priorStateVal - forceNoChanges = true - } - - // if this was creating the resource, we need to set any remaining computed - // fields - if create { - plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) - } - - plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.PlannedState = &proto.DynamicValue{ - Msgpack: plannedMP, - } - - // encode any timeouts into the diff Meta - t := &schema.ResourceTimeout{} - if err := t.ConfigDecode(res, cfg); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - if err := t.DiffEncode(diff); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Now we need to store any NewExtra values, which are where any actual - // StateFunc modified config fields are hidden. - privateMap := diff.Meta - if privateMap == nil { - privateMap = map[string]interface{}{} - } - - newExtra := map[string]interface{}{} - - for k, v := range diff.Attributes { - if v.NewExtra != nil { - newExtra[k] = v.NewExtra - } - } - privateMap[newExtraKey] = newExtra - - // the Meta field gets encoded into PlannedPrivate - plannedPrivate, err := json.Marshal(privateMap) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.PlannedPrivate = plannedPrivate - - // collect the attributes that require instance replacement, and convert - // them to cty.Paths. - var requiresNew []string - if !forceNoChanges { - for attr, d := range diff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - } - - // If anything requires a new resource already, or the "id" field indicates - // that we will be creating a new resource, then we need to add that to - // RequiresReplace so that core can tell if the instance is being replaced - // even if changes are being suppressed via "ignore_changes". - id := plannedStateVal.GetAttr("id") - if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { - requiresNew = append(requiresNew, "id") - } - - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // convert these to the protocol structures - for _, p := range requiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) - } - - return resp, nil -} - -func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { - resp := &proto.ApplyResourceChange_Response{ - // Start with the existing state as a fallback - NewState: req.PriorState, - } - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - private := make(map[string]interface{}) - if len(req.PlannedPrivate) > 0 { - if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - var diff *terraform.InstanceDiff - destroy := false - - // a null state means we are destroying the instance - if plannedStateVal.IsNull() { - destroy = true - diff = &terraform.InstanceDiff{ - Attributes: make(map[string]*terraform.ResourceAttrDiff), - Meta: make(map[string]interface{}), - Destroy: true, - } - } else { - diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - if diff == nil { - diff = &terraform.InstanceDiff{ - Attributes: make(map[string]*terraform.ResourceAttrDiff), - Meta: make(map[string]interface{}), - } - } - - // add NewExtra Fields that may have been stored in the private data - if newExtra := private[newExtraKey]; newExtra != nil { - for k, v := range newExtra.(map[string]interface{}) { - d := diff.Attributes[k] - - if d == nil { - d = &terraform.ResourceAttrDiff{} - } - - d.NewExtra = v - diff.Attributes[k] = d - } - } - - if private != nil { - diff.Meta = private - } - - for k, d := range diff.Attributes { - // We need to turn off any RequiresNew. There could be attributes - // without changes in here inserted by helper/schema, but if they have - // RequiresNew then the state will be dropped from the ResourceData. - d.RequiresNew = false - - // Check that any "removed" attributes that don't actually exist in the - // prior state, or helper/schema will confuse itself - if d.NewRemoved { - if _, ok := priorState.Attributes[k]; !ok { - delete(diff.Attributes, k) - } - } - } - - newInstanceState, err := s.provider.Apply(info, priorState, diff) - // we record the error here, but continue processing any returned state. - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - } - newStateVal := cty.NullVal(schemaBlock.ImpliedType()) - - // Always return a null value for destroy. - // While this is usually indicated by a nil state, check for missing ID or - // attributes in the case of a provider failure. - if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil - } - - // We keep the null val if we destroyed the resource, otherwise build the - // entire object, even if the new state was nil. - newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) - - newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - - meta, err := json.Marshal(newInstanceState.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.Private = meta - - // This is a signal to Terraform Core that we're doing the best we can to - // shim the legacy type system of the SDK onto the Terraform type system - // but we need it to cut us some slack. This setting should not be taken - // forward to any new SDK implementations, since setting it prevents us - // from catching certain classes of provider bug that can lead to - // confusing downstream errors. - resp.LegacyTypeSystem = true - - return resp, nil -} - -func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { - resp := &proto.ImportResourceState_Response{} - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - newInstanceStates, err := s.provider.ImportState(info, req.Id) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - for _, is := range newInstanceStates { - // copy the ID again just to be sure it wasn't missed - is.Attributes["id"] = is.ID - - resourceType := is.Ephemeral.Type - if resourceType == "" { - resourceType = req.TypeName - } - - schemaBlock := s.getResourceSchemaBlock(resourceType) - newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Normalize the value and fill in any missing blocks. - newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - meta, err := json.Marshal(is.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - importedResource := &proto.ImportResourceState_ImportedResource{ - TypeName: resourceType, - State: &proto.DynamicValue{ - Msgpack: newStateMP, - }, - Private: meta, - } - - resp.ImportedResources = append(resp.ImportedResources, importedResource) - } - - return resp, nil -} - -func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { - resp := &proto.ReadDataSource_Response{} - - schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - // we need to still build the diff separately with the Read method to match - // the old behavior - diff, err := s.provider.ReadDataDiff(info, config) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // now we can get the new complete data source - newInstanceState, err := s.provider.ReadDataApply(info, diff) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = copyTimeoutValues(newStateVal, configVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.State = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil -} - -func pathToAttributePath(path cty.Path) *proto.AttributePath { - var steps []*proto.AttributePath_Step - - for _, step := range path { - switch s := step.(type) { - case cty.GetAttrStep: - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: s.Name, - }, - }) - case cty.IndexStep: - ty := s.Key.Type() - switch ty { - case cty.Number: - i, _ := s.Key.AsBigFloat().Int64() - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: i, - }, - }) - case cty.String: - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: s.Key.AsString(), - }, - }) - } - } - } - - return &proto.AttributePath{Steps: steps} -} - -// helper/schema throws away timeout values from the config and stores them in -// the Private/Meta fields. we need to copy those values into the planned state -// so that core doesn't see a perpetual diff with the timeout block. -func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { - // if `to` is null we are planning to remove it altogether. - if to.IsNull() { - return to - } - toAttrs := to.AsValueMap() - // We need to remove the key since the hcl2shims will add a non-null block - // because we can't determine if a single block was null from the flatmapped - // values. This needs to conform to the correct schema for marshaling, so - // change the value to null rather than deleting it from the object map. - timeouts, ok := toAttrs[schema.TimeoutsConfigKey] - if ok { - toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) - } - - // if from is null then there are no timeouts to copy - if from.IsNull() { - return cty.ObjectVal(toAttrs) - } - - fromAttrs := from.AsValueMap() - timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] - - // timeouts shouldn't be unknown, but don't copy possibly invalid values either - if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { - // no timeouts block to copy - return cty.ObjectVal(toAttrs) - } - - toAttrs[schema.TimeoutsConfigKey] = timeouts - - return cty.ObjectVal(toAttrs) -} - -// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all -// StateFuncs and CustomizeDiffs removed. This will be used during apply to -// create a diff from a planned state where the diff modifications have already -// been applied. -func stripResourceModifiers(r *schema.Resource) *schema.Resource { - if r == nil { - return nil - } - // start with a shallow copy - newResource := new(schema.Resource) - *newResource = *r - - newResource.CustomizeDiff = nil - newResource.Schema = map[string]*schema.Schema{} - - for k, s := range r.Schema { - newResource.Schema[k] = stripSchema(s) - } - - return newResource -} - -func stripSchema(s *schema.Schema) *schema.Schema { - if s == nil { - return nil - } - // start with a shallow copy - newSchema := new(schema.Schema) - *newSchema = *s - - newSchema.StateFunc = nil - - switch e := newSchema.Elem.(type) { - case *schema.Schema: - newSchema.Elem = stripSchema(e) - case *schema.Resource: - newSchema.Elem = stripResourceModifiers(e) - } - - return newSchema -} - -// Zero values and empty containers may be interchanged by the apply process. -// When there is a discrepency between src and dst value being null or empty, -// prefer the src value. This takes a little more liberty with set types, since -// we can't correlate modified set values. In the case of sets, if the src set -// was wholly known we assume the value was correctly applied and copy that -// entirely to the new value. -// While apply prefers the src value, during plan we prefer dst whenever there -// is an unknown or a set is involved, since the plan can alter the value -// however it sees fit. This however means that a CustomizeDiffFunction may not -// be able to change a null to an empty value or vice versa, but that should be -// very uncommon nor was it reliable before 0.12 either. -func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { - ty := dst.Type() - if !src.IsNull() && !src.IsKnown() { - // Return src during plan to retain unknown interpolated placeholders, - // which could be lost if we're only updating a resource. If this is a - // read scenario, then there shouldn't be any unknowns at all. - if dst.IsNull() && !apply { - return src - } - return dst - } - - // Handle null/empty changes for collections during apply. - // A change between null and empty values prefers src to make sure the state - // is consistent between plan and apply. - if ty.IsCollectionType() && apply { - dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 - srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 - - if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { - return src - } - } - - // check the invariants that we need below, to ensure we are working with - // non-null and known values. - if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { - return dst - } - - switch { - case ty.IsMapType(), ty.IsObjectType(): - var dstMap map[string]cty.Value - if !dst.IsNull() { - dstMap = dst.AsValueMap() - } - if dstMap == nil { - dstMap = map[string]cty.Value{} - } - - srcMap := src.AsValueMap() - for key, v := range srcMap { - dstVal, ok := dstMap[key] - if !ok && apply && ty.IsMapType() { - // don't transfer old map values to dst during apply - continue - } - - if dstVal == cty.NilVal { - if !apply && ty.IsMapType() { - // let plan shape this map however it wants - continue - } - dstVal = cty.NullVal(v.Type()) - } - - dstMap[key] = normalizeNullValues(dstVal, v, apply) - } - - // you can't call MapVal/ObjectVal with empty maps, but nothing was - // copied in anyway. If the dst is nil, and the src is known, assume the - // src is correct. - if len(dstMap) == 0 { - if dst.IsNull() && src.IsWhollyKnown() && apply { - return src - } - return dst - } - - if ty.IsMapType() { - // helper/schema will populate an optional+computed map with - // unknowns which we have to fixup here. - // It would be preferable to simply prevent any known value from - // becoming unknown, but concessions have to be made to retain the - // broken legacy behavior when possible. - for k, srcVal := range srcMap { - if !srcVal.IsNull() && srcVal.IsKnown() { - dstVal, ok := dstMap[k] - if !ok { - continue - } - - if !dstVal.IsNull() && !dstVal.IsKnown() { - dstMap[k] = srcVal - } - } - } - - return cty.MapVal(dstMap) - } - - return cty.ObjectVal(dstMap) - - case ty.IsSetType(): - // If the original was wholly known, then we expect that is what the - // provider applied. The apply process loses too much information to - // reliably re-create the set. - if src.IsWhollyKnown() && apply { - return src - } - - case ty.IsListType(), ty.IsTupleType(): - // If the dst is null, and the src is known, then we lost an empty value - // so take the original. - if dst.IsNull() { - if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { - return src - } - - // if dst is null and src only contains unknown values, then we lost - // those during a read or plan. - if !apply && !src.IsNull() { - allUnknown := true - for _, v := range src.AsValueSlice() { - if v.IsKnown() { - allUnknown = false - break - } - } - if allUnknown { - return src - } - } - - return dst - } - - // if the lengths are identical, then iterate over each element in succession. - srcLen := src.LengthInt() - dstLen := dst.LengthInt() - if srcLen == dstLen && srcLen > 0 { - srcs := src.AsValueSlice() - dsts := dst.AsValueSlice() - - for i := 0; i < srcLen; i++ { - dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) - } - - if ty.IsTupleType() { - return cty.TupleVal(dsts) - } - return cty.ListVal(dsts) - } - - case ty == cty.String: - // The legacy SDK should not be able to remove a value during plan or - // apply, however we are only going to overwrite this if the source was - // an empty string, since that is what is often equated with unset and - // lost in the diff process. - if dst.IsNull() && src.AsString() == "" { - return src - } - } - - return dst -} - -// validateConfigNulls checks a config value for unsupported nulls before -// attempting to shim the value. While null values can mostly be ignored in the -// configuration, since they're not supported in HCL1, the case where a null -// appears in a list-like attribute (list, set, tuple) will present a nil value -// to helper/schema which can panic. Return an error to the user in this case, -// indicating the attribute with the null value. -func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { - var diags []*proto.Diagnostic - if v.IsNull() || !v.IsKnown() { - return diags - } - - switch { - case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - if ev.IsNull() { - // if this is a set, the kv is also going to be null which - // isn't a valid path element, so we can't append it to the - // diagnostic. - p := path - if !kv.IsNull() { - p = append(p, cty.IndexStep{Key: kv}) - } - - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "Null value found in list", - Detail: "Null values are not allowed for this attribute value.", - Attribute: convert.PathToAttributePath(p), - }) - continue - } - - d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) - diags = convert.AppendProtoDiag(diags, d) - } - - case v.Type().IsMapType() || v.Type().IsObjectType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - var step cty.PathStep - switch { - case v.Type().IsMapType(): - step = cty.IndexStep{Key: kv} - case v.Type().IsObjectType(): - step = cty.GetAttrStep{Name: kv.AsString()} - } - d := validateConfigNulls(ev, append(path, step)) - diags = convert.AppendProtoDiag(diags, d) - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/json.go deleted file mode 100644 index a5f7bf2d3e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/json.go +++ /dev/null @@ -1,12 +0,0 @@ -package plugin - -import ( - "bytes" - "encoding/json" -) - -func unmarshalJSON(data []byte, v interface{}) error { - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - return dec.Decode(v) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go deleted file mode 100644 index a22a264fa0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go +++ /dev/null @@ -1,131 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// SetUnknowns takes a cty.Value, and compares it to the schema setting any null -// values which are computed to unknown. -func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { - if !val.IsKnown() { - return val - } - - // If the object was null, we still need to handle the top level attributes - // which might be computed, but we don't need to expand the blocks. - if val.IsNull() { - objMap := map[string]cty.Value{} - allNull := true - for name, attr := range schema.Attributes { - switch { - case attr.Computed: - objMap[name] = cty.UnknownVal(attr.Type) - allNull = false - default: - objMap[name] = cty.NullVal(attr.Type) - } - } - - // If this object has no unknown attributes, then we can leave it null. - if allNull { - return val - } - - return cty.ObjectVal(objMap) - } - - valMap := val.AsValueMap() - newVals := make(map[string]cty.Value) - - for name, attr := range schema.Attributes { - v := valMap[name] - - if attr.Computed && v.IsNull() { - newVals[name] = cty.UnknownVal(attr.Type) - continue - } - - newVals[name] = v - } - - for name, blockS := range schema.BlockTypes { - blockVal := valMap[name] - if blockVal.IsNull() || !blockVal.IsKnown() { - newVals[name] = blockVal - continue - } - - blockValType := blockVal.Type() - blockElementType := blockS.Block.ImpliedType() - - // This switches on the value type here, so we can correctly switch - // between Tuples/Lists and Maps/Objects. - switch { - case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: - // NestingSingle is the only exception here, where we treat the - // block directly as an object - newVals[name] = SetUnknowns(blockVal, &blockS.Block) - - case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): - listVals := blockVal.AsValueSlice() - newListVals := make([]cty.Value, 0, len(listVals)) - - for _, v := range listVals { - newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) - } - - switch { - case blockValType.IsSetType(): - switch len(newListVals) { - case 0: - newVals[name] = cty.SetValEmpty(blockElementType) - default: - newVals[name] = cty.SetVal(newListVals) - } - case blockValType.IsListType(): - switch len(newListVals) { - case 0: - newVals[name] = cty.ListValEmpty(blockElementType) - default: - newVals[name] = cty.ListVal(newListVals) - } - case blockValType.IsTupleType(): - newVals[name] = cty.TupleVal(newListVals) - } - - case blockValType.IsMapType(), blockValType.IsObjectType(): - mapVals := blockVal.AsValueMap() - newMapVals := make(map[string]cty.Value) - - for k, v := range mapVals { - newMapVals[k] = SetUnknowns(v, &blockS.Block) - } - - switch { - case blockValType.IsMapType(): - switch len(newMapVals) { - case 0: - newVals[name] = cty.MapValEmpty(blockElementType) - default: - newVals[name] = cty.MapVal(newMapVals) - } - case blockValType.IsObjectType(): - if len(newMapVals) == 0 { - // We need to populate empty values to make a valid object. - for attr, ty := range blockElementType.AttributeTypes() { - newMapVals[attr] = cty.NullVal(ty) - } - } - newVals[name] = cty.ObjectVal(newMapVals) - } - - default: - panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) - } - } - - return cty.ObjectVal(newVals) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go deleted file mode 100644 index ad8d626c66..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go +++ /dev/null @@ -1,53 +0,0 @@ -package httpclient - -import ( - "fmt" - "log" - "net/http" - "os" - "strings" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -const uaEnvVar = "TF_APPEND_USER_AGENT" -const userAgentFormat = "Terraform/%s" - -// New returns the DefaultPooledClient from the cleanhttp -// package that will also send a Terraform User-Agent string. -func New() *http.Client { - cli := cleanhttp.DefaultPooledClient() - cli.Transport = &userAgentRoundTripper{ - userAgent: UserAgentString(), - inner: cli.Transport, - } - return cli -} - -type userAgentRoundTripper struct { - inner http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) - return rt.inner.RoundTrip(req) -} - -func UserAgentString() string { - ua := fmt.Sprintf(userAgentFormat, version.Version) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go deleted file mode 100644 index 7096ff74f8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go +++ /dev/null @@ -1,125 +0,0 @@ -package initwd - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If the current path is a symlink, recreate the symlink relative to - // the dst directory - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - target, err := os.Readlink(path) - if err != nil { - return err - } - - return os.Symlink(target, dstPath) - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go deleted file mode 100644 index b9d938dbb0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package initwd contains various helper functions used by the "terraform init" -// command to initialize a working directory. -// -// These functions may also be used from testing code to simulate the behaviors -// of "terraform init" against test fixtures, but should not be used elsewhere -// in the main code. -package initwd diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go deleted file mode 100644 index 641e71dec8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go +++ /dev/null @@ -1,363 +0,0 @@ -package initwd - -import ( - "fmt" - "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -const initFromModuleRootCallName = "root" -const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." - -// DirFromModule populates the given directory (which must exist and be -// empty) with the contents of the module at the given source address. -// -// It does this by installing the given module and all of its descendent -// modules in a temporary root directory and then copying the installed -// files into suitable locations. As a consequence, any diagnostics it -// generates will reveal the location of this temporary directory to the -// user. -// -// This rather roundabout installation approach is taken to ensure that -// installation proceeds in a manner identical to normal module installation. -// -// If the given source address specifies a sub-directory of the given -// package then only the sub-directory and its descendents will be copied -// into the given root directory, which will cause any relative module -// references using ../ from that module to be unresolvable. Error diagnostics -// are produced in that case, to prompt the user to rewrite the source strings -// to be absolute references to the original remote module. -func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // The way this function works is pretty ugly, but we accept it because - // -from-module is a less important case than normal module installation - // and so it's better to keep this ugly complexity out here rather than - // adding even more complexity to the normal module installer. - - // The target directory must exist but be empty. - { - entries, err := ioutil.ReadDir(rootDir) - if err != nil { - if os.IsNotExist(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Target directory does not exist", - fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read target directory", - fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), - )) - } - return diags - } - haveEntries := false - for _, entry := range entries { - if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { - continue - } - haveEntries = true - } - if haveEntries { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Can't populate non-empty directory", - fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), - )) - return diags - } - } - - instDir := filepath.Join(rootDir, ".terraform/init-from-module") - inst := NewModuleInstaller(instDir, reg) - log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) - os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too - err := os.MkdirAll(instDir, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create temporary directory", - fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), - )) - return diags - } - - instManifest := make(modsdir.Manifest) - retManifest := make(modsdir.Manifest) - - fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) - fakePos := tfconfig.SourcePos{ - Filename: fakeFilename, - Line: 1, - } - - // -from-module allows relative paths but it's different than a normal - // module address where it'd be resolved relative to the module call - // (which is synthetic, here.) To address this, we'll just patch up any - // relative paths to be absolute paths before we run, ensuring we'll - // get the right result. This also, as an important side-effect, ensures - // that the result will be "downloaded" with go-getter (copied from the - // source location), rather than just recorded as a relative path. - { - maybePath := filepath.ToSlash(sourceAddr) - if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { - if wd, err := os.Getwd(); err == nil { - sourceAddr = filepath.Join(wd, sourceAddr) - log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) - } - } - } - - // Now we need to create an artificial root module that will seed our - // installation process. - fakeRootModule := &tfconfig.Module{ - ModuleCalls: map[string]*tfconfig.ModuleCall{ - initFromModuleRootCallName: { - Name: initFromModuleRootCallName, - Source: sourceAddr, - Pos: fakePos, - }, - }, - } - - // wrapHooks filters hook notifications to only include Download calls - // and to trim off the initFromModuleRootCallName prefix. We'll produce - // our own Install notifications directly below. - wrapHooks := installHooksInitDir{ - Wrapped: hooks, - } - getter := reusingGetter{} - _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) - diags = append(diags, instDiags...) - if instDiags.HasErrors() { - return diags - } - - // If all of that succeeded then we'll now migrate what was installed - // into the final directory structure. - err = os.MkdirAll(modulesDir, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create local modules directory", - fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), - )) - return diags - } - - recordKeys := make([]string, 0, len(instManifest)) - for k := range instManifest { - recordKeys = append(recordKeys, k) - } - sort.Strings(recordKeys) - - for _, recordKey := range recordKeys { - record := instManifest[recordKey] - - if record.Key == initFromModuleRootCallName { - // We've found the module the user requested, which we must - // now copy into rootDir so it can be used directly. - log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) - err := copyDir(rootDir, record.Dir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to copy root module", - fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), - )) - continue - } - - // We'll try to load the newly-copied module here just so we can - // sniff for any module calls that ../ out of the root directory - // and must thus be rewritten to be absolute addresses again. - // For now we can't do this rewriting automatically, but we'll - // generate an error to help the user do it manually. - mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway - if mod != nil { - for _, mc := range mod.ModuleCalls { - if pathTraversesUp(mc.Source) { - packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) - newSubdir := filepath.Join(givenSubdir, mc.Source) - if pathTraversesUp(newSubdir) { - // This should never happen in any reasonable - // configuration since this suggests a path that - // traverses up out of the package root. We'll just - // ignore this, since we'll fail soon enough anyway - // trying to resolve this path when this module is - // loaded. - continue - } - - var newAddr = packageAddr - if newSubdir != "" { - newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Root module references parent directory", - fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), - )) - continue - } - } - } - - retManifest[""] = modsdir.Record{ - Key: "", - Dir: rootDir, - } - continue - } - - if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { - // Ignore the *real* root module, whose key is empty, since - // we're only interested in the module named "root" and its - // descendents. - continue - } - - newKey := record.Key[len(initFromModuleRootKeyPrefix):] - instPath := filepath.Join(modulesDir, newKey) - tempPath := filepath.Join(instDir, record.Key) - - // tempPath won't be present for a module that was installed from - // a relative path, so in that case we just record the installation - // directory and assume it was already copied into place as part - // of its parent. - if _, err := os.Stat(tempPath); err != nil { - if !os.IsNotExist(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to stat temporary module install directory", - fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), - )) - continue - } - - var parentKey string - if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { - parentKey = newKey[:lastDot] - } else { - parentKey = "" // parent is the root module - } - - parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] - parentNew := retManifest[parentKey] - - // We need to figure out which portion of our directory is the - // parent package path and which portion is the subdirectory - // under that. - baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) - if err != nil { - // Should never happen, because we constructed both directories - // from the same base and so they must have a common prefix. - panic(err) - } - - newDir := filepath.Join(parentNew.Dir, baseDirRel) - log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) - newRecord := record // shallow copy - newRecord.Dir = newDir - newRecord.Key = newKey - retManifest[newKey] = newRecord - hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) - continue - } - - err = os.MkdirAll(instPath, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create module install directory", - fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), - )) - continue - } - - // We copy rather than "rename" here because renaming between directories - // can be tricky in edge-cases like network filesystems, etc. - log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) - err := copyDir(instPath, tempPath) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to copy descendent module", - fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), - )) - continue - } - - subDir, err := filepath.Rel(tempPath, record.Dir) - if err != nil { - // Should never happen, because we constructed both directories - // from the same base and so they must have a common prefix. - panic(err) - } - - newRecord := record // shallow copy - newRecord.Dir = filepath.Join(instPath, subDir) - newRecord.Key = newKey - retManifest[newKey] = newRecord - hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) - } - - retManifest.WriteSnapshotToDir(modulesDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write module manifest", - fmt.Sprintf("Error writing module manifest: %s.", err), - )) - } - - if !diags.HasErrors() { - // Try to clean up our temporary directory, but don't worry if we don't - // succeed since it shouldn't hurt anything. - os.RemoveAll(instDir) - } - - return diags -} - -func pathTraversesUp(path string) bool { - return strings.HasPrefix(filepath.ToSlash(path), "../") -} - -// installHooksInitDir is an adapter wrapper for an InstallHooks that -// does some fakery to make downloads look like they are happening in their -// final locations, rather than in the temporary loader we use. -// -// It also suppresses "Install" calls entirely, since InitDirFromModule -// does its own installation steps after the initial installation pass -// has completed. -type installHooksInitDir struct { - Wrapped ModuleInstallHooks - ModuleInstallHooksImpl -} - -func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { - if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { - // We won't announce the root module, since hook implementations - // don't expect to see that and the caller will usually have produced - // its own user-facing notification about what it's doing anyway. - return - } - - trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] - h.Wrapped.Download(trimAddr, packageAddr, version) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go deleted file mode 100644 index 8dc0374b1e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go +++ /dev/null @@ -1,204 +0,0 @@ -package initwd - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - getter "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" -) - -// We configure our own go-getter detector and getter sets here, because -// the set of sources we support is part of Terraform's documentation and -// so we don't want any new sources introduced in go-getter to sneak in here -// and work even though they aren't documented. This also insulates us from -// any meddling that might be done by other go-getter callers linked into our -// executable. - -var goGetterNoDetectors = []getter.Detector{} - -var goGetterDecompressors = map[string]getter.Decompressor{ - "bz2": new(getter.Bzip2Decompressor), - "gz": new(getter.GzipDecompressor), - "xz": new(getter.XzDecompressor), - "zip": new(getter.ZipDecompressor), - - "tar.bz2": new(getter.TarBzip2Decompressor), - "tar.tbz2": new(getter.TarBzip2Decompressor), - - "tar.gz": new(getter.TarGzipDecompressor), - "tgz": new(getter.TarGzipDecompressor), - - "tar.xz": new(getter.TarXzDecompressor), - "txz": new(getter.TarXzDecompressor), -} - -var goGetterGetters = map[string]getter.Getter{ - "file": new(getter.FileGetter), - "gcs": new(getter.GCSGetter), - "git": new(getter.GitGetter), - "hg": new(getter.HgGetter), - "s3": new(getter.S3Getter), - "http": getterHTTPGetter, - "https": getterHTTPGetter, -} - -var getterHTTPClient = cleanhttp.DefaultClient() - -var getterHTTPGetter = &getter.HttpGetter{ - Client: getterHTTPClient, - Netrc: true, -} - -// A reusingGetter is a helper for the module installer that remembers -// the final resolved addresses of all of the sources it has already been -// asked to install, and will copy from a prior installation directory if -// it has the same resolved source address. -// -// The keys in a reusingGetter are resolved and trimmed source addresses -// (with a scheme always present, and without any "subdir" component), -// and the values are the paths where each source was previously installed. -type reusingGetter map[string]string - -// getWithGoGetter retrieves the package referenced in the given address -// into the installation path and then returns the full path to any subdir -// indicated in the address. -// -// The errors returned by this function are those surfaced by the underlying -// go-getter library, which have very inconsistent quality as -// end-user-actionable error messages. At this time we do not have any -// reasonable way to improve these error messages at this layer because -// the underlying errors are not separately recognizable. -func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { - packageAddr, subDir := splitAddrSubdir(addr) - - log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) - - realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) - if err != nil { - return "", err - } - - if isMaybeRelativeLocalPath(realAddr) { - return "", &MaybeRelativePathErr{addr} - } - - var realSubDir string - realAddr, realSubDir = splitAddrSubdir(realAddr) - if realSubDir != "" { - subDir = filepath.Join(realSubDir, subDir) - } - - if realAddr != packageAddr { - log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) - } - - if prevDir, exists := g[realAddr]; exists { - log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) - err := os.Mkdir(instPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) - } - err = copyDir(instPath, prevDir) - if err != nil { - return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) - } - } else { - log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) - client := getter.Client{ - Src: realAddr, - Dst: instPath, - Pwd: instPath, - - Mode: getter.ClientModeDir, - - Detectors: goGetterNoDetectors, // we already did detection above - Decompressors: goGetterDecompressors, - Getters: goGetterGetters, - } - err = client.Get() - if err != nil { - return "", err - } - // Remember where we installed this so we might reuse this directory - // on subsequent calls to avoid re-downloading. - g[realAddr] = instPath - } - - // Our subDir string can contain wildcards until this point, so that - // e.g. a subDir of * can expand to one top-level directory in a .tar.gz - // archive. Now that we've expanded the archive successfully we must - // resolve that into a concrete path. - var finalDir string - if subDir != "" { - finalDir, err = getter.SubdirGlob(instPath, subDir) - log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) - if err != nil { - return "", err - } - } else { - finalDir = instPath - } - - // If we got this far then we have apparently succeeded in downloading - // the requested object! - return filepath.Clean(finalDir), nil -} - -// splitAddrSubdir splits the given address (which is assumed to be a -// registry address or go-getter-style address) into a package portion -// and a sub-directory portion. -// -// The package portion defines what should be downloaded and then the -// sub-directory portion, if present, specifies a sub-directory within -// the downloaded object (an archive, VCS repository, etc) that contains -// the module's configuration files. -// -// The subDir portion will be returned as empty if no subdir separator -// ("//") is present in the address. -func splitAddrSubdir(addr string) (packageAddr, subDir string) { - return getter.SourceDirSubdir(addr) -} - -var localSourcePrefixes = []string{ - "./", - "../", - ".\\", - "..\\", -} - -func isLocalSourceAddr(addr string) bool { - for _, prefix := range localSourcePrefixes { - if strings.HasPrefix(addr, prefix) { - return true - } - } - return false -} - -func isRegistrySourceAddr(addr string) bool { - _, err := regsrc.ParseModuleSource(addr) - return err == nil -} - -type MaybeRelativePathErr struct { - Addr string -} - -func (e *MaybeRelativePathErr) Error() string { - return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) -} - -func isMaybeRelativeLocalPath(addr string) bool { - if strings.HasPrefix(addr, "file://") { - _, err := os.Stat(addr[7:]) - if err != nil { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go deleted file mode 100644 index 1150b093cc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris dragonfly - -package initwd - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go deleted file mode 100644 index 30532f54ac..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package initwd - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go deleted file mode 100644 index 3ed58e4bf9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package initwd - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go deleted file mode 100644 index 8e05575673..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go +++ /dev/null @@ -1,558 +0,0 @@ -package initwd - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" - "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -type ModuleInstaller struct { - modsDir string - reg *registry.Client -} - -func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { - return &ModuleInstaller{ - modsDir: modsDir, - reg: reg, - } -} - -// InstallModules analyses the root module in the given directory and installs -// all of its direct and transitive dependencies into the given modules -// directory, which must already exist. -// -// Since InstallModules makes possibly-time-consuming calls to remote services, -// a hook interface is supported to allow the caller to be notified when -// each module is installed and, for remote modules, when downloading begins. -// LoadConfig guarantees that two hook calls will not happen concurrently but -// it does not guarantee any particular ordering of hook calls. This mechanism -// is for UI feedback only and does not give the caller any control over the -// process. -// -// If modules are already installed in the target directory, they will be -// skipped unless their source address or version have changed or unless -// the upgrade flag is set. -// -// InstallModules never deletes any directory, except in the case where it -// needs to replace a directory that is already present with a newly-extracted -// package. -// -// If the returned diagnostics contains errors then the module installation -// may have wholly or partially completed. Modules must be loaded in order -// to find their dependencies, so this function does many of the same checks -// as LoadConfig as a side-effect. -// -// If successful (the returned diagnostics contains no errors) then the -// first return value is the early configuration tree that was constructed by -// the installation process. -func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { - log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) - - rootMod, diags := earlyconfig.LoadModule(rootDir) - if rootMod == nil { - return nil, diags - } - - manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read modules manifest file", - fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), - )) - return nil, diags - } - - getter := reusingGetter{} - cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) - diags = append(diags, instDiags...) - - return cfg, diags -} - -func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if hooks == nil { - // Use our no-op implementation as a placeholder - hooks = ModuleInstallHooksImpl{} - } - - // Create a manifest record for the root module. This will be used if - // there are any relative-pathed modules in the root. - manifest[""] = modsdir.Record{ - Key: "", - Dir: rootDir, - } - - cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( - func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - - key := manifest.ModuleKey(req.Path) - instPath := i.packageInstallPath(req.Path) - - log.Printf("[DEBUG] Module installer: begin %s", key) - - // First we'll check if we need to upgrade/replace an existing - // installed module, and delete it out of the way if so. - replace := upgrade - if !replace { - record, recorded := manifest[key] - switch { - case !recorded: - log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) - replace = true - case record.SourceAddr != req.SourceAddr: - log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) - replace = true - case record.Version != nil && !req.VersionConstraints.Check(record.Version): - log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) - replace = true - } - } - - // If we _are_ planning to replace this module, then we'll remove - // it now so our installation code below won't conflict with any - // existing remnants. - if replace { - if _, recorded := manifest[key]; recorded { - log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) - } - delete(manifest, key) - // Deleting a module invalidates all of its descendent modules too. - keyPrefix := key + "." - for subKey := range manifest { - if strings.HasPrefix(subKey, keyPrefix) { - if _, recorded := manifest[subKey]; recorded { - log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) - } - delete(manifest, subKey) - } - } - } - - record, recorded := manifest[key] - if !recorded { - // Clean up any stale cache directory that might be present. - // If this is a local (relative) source then the dir will - // not exist, but we'll ignore that. - log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) - err := os.RemoveAll(instPath) - if err != nil && !os.IsNotExist(err) { - log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to remove local module cache", - fmt.Sprintf( - "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", - instPath, err, - ), - )) - return nil, nil, diags - } - } else { - // If this module is already recorded and its root directory - // exists then we will just load what's already there and - // keep our existing record. - info, err := os.Stat(record.Dir) - if err == nil && info.IsDir() { - mod, mDiags := earlyconfig.LoadModule(record.Dir) - diags = diags.Append(mDiags) - - log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) - return mod, record.Version, diags - } - } - - // If we get down here then it's finally time to actually install - // the module. There are some variants to this process depending - // on what type of module source address we have. - switch { - - case isLocalSourceAddr(req.SourceAddr): - log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) - mod, mDiags := i.installLocalModule(req, key, manifest, hooks) - diags = append(diags, mDiags...) - return mod, nil, diags - - case isRegistrySourceAddr(req.SourceAddr): - addr, err := regsrc.ParseModuleSource(req.SourceAddr) - if err != nil { - // Should never happen because isRegistrySourceAddr already validated - panic(err) - } - log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) - - mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) - diags = append(diags, mDiags...) - return mod, v, diags - - default: - log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) - - mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) - diags = append(diags, mDiags...) - return mod, nil, diags - } - - }, - )) - diags = append(diags, cDiags...) - - err := manifest.WriteSnapshotToDir(i.modsDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to update module manifest", - fmt.Sprintf("Unable to write the module manifest file: %s", err), - )) - } - - return cfg, diags -} - -func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - parentKey := manifest.ModuleKey(req.Parent.Path) - parentRecord, recorded := manifest[parentKey] - if !recorded { - // This is indicative of a bug rather than a user-actionable error - panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) - } - - if len(req.VersionConstraints) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid version constraint", - fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } - - // For local sources we don't actually need to modify the - // filesystem at all because the parent already wrote - // the files we need, and so we just load up what's already here. - newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) - - log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) - // it is possible that the local directory is a symlink - newDir, err := filepath.EvalSymlinks(newDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), - )) - } - - mod, mDiags := earlyconfig.LoadModule(newDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } else { - diags = diags.Append(mDiags) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Dir: newDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) - hooks.Install(key, nil, newDir) - - return mod, diags -} - -func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - hostname, err := addr.SvcHost() - if err != nil { - // If it looks like the user was trying to use punycode then we'll generate - // a specialized error for that case. We require the unicode form of - // hostname so that hostnames are always human-readable in configuration - // and punycode can't be used to hide a malicious module hostname. - if strings.HasPrefix(addr.RawHost.Raw, "xn--") { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid module registry hostname", - fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid module registry hostname", - fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } - return nil, nil, diags - } - - reg := i.reg - - log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) - resp, err := reg.ModuleVersions(addr) - if err != nil { - if registry.IsModuleNotFound(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not found", - fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error accessing remote module registry", - fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), - )) - } - return nil, nil, diags - } - - // The response might contain information about dependencies to allow us - // to potentially optimize future requests, but we don't currently do that - // and so for now we'll just take the first item which is guaranteed to - // be the address we requested. - if len(resp.Modules) < 1 { - // Should never happen, but since this is a remote service that may - // be implemented by third-parties we will handle it gracefully. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid response from remote module registry", - fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - return nil, nil, diags - } - - modMeta := resp.Modules[0] - - var latestMatch *version.Version - var latestVersion *version.Version - for _, mv := range modMeta.Versions { - v, err := version.NewVersion(mv.Version) - if err != nil { - // Should never happen if the registry server is compliant with - // the protocol, but we'll warn if not to assist someone who - // might be developing a module registry server. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Invalid response from remote module registry", - fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - continue - } - - // If we've found a pre-release version then we'll ignore it unless - // it was exactly requested. - if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { - log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) - continue - } - - if latestVersion == nil || v.GreaterThan(latestVersion) { - latestVersion = v - } - - if req.VersionConstraints.Check(v) { - if latestMatch == nil || v.GreaterThan(latestMatch) { - latestMatch = v - } - } - } - - if latestVersion == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module has no versions", - fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), - )) - return nil, nil, diags - } - - if latestMatch == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unresolvable module version constraint", - fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), - )) - return nil, nil, diags - } - - // Report up to the caller that we're about to start downloading. - packageAddr, _ := splitAddrSubdir(req.SourceAddr) - hooks.Download(key, packageAddr, latestMatch) - - // If we manage to get down here then we've found a suitable version to - // install, so we need to ask the registry where we should download it from. - // The response to this is a go-getter-style address string. - dlAddr, err := reg.ModuleLocation(addr, latestMatch.String()) - if err != nil { - log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid response from remote module registry", - fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch), - )) - return nil, nil, diags - } - - log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) - - modDir, err := getter.getWithGoGetter(instPath, dlAddr) - if err != nil { - // Errors returned by go-getter have very inconsistent quality as - // end-user error messages, but for now we're accepting that because - // we have no way to recognize any specific errors to improve them - // and masking the error entirely would hide valuable diagnostic - // information from the user. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to download module", - fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), - )) - return nil, nil, diags - } - - log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) - - if addr.RawSubmodule != "" { - // Append the user's requested subdirectory to any subdirectory that - // was implied by any of the nested layers we expanded within go-getter. - modDir = filepath.Join(modDir, addr.RawSubmodule) - } - - log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) - - // Finally we are ready to try actually loading the module. - mod, mDiags := earlyconfig.LoadModule(modDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. For registry modules this actually - // indicates a bug in the code above, since it's not the - // user's responsibility to create the directory in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), - )) - } else { - diags = append(diags, mDiags...) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Version: latestMatch, - Dir: modDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) - hooks.Install(key, latestMatch, modDir) - - return mod, latestMatch, diags -} - -func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Report up to the caller that we're about to start downloading. - packageAddr, _ := splitAddrSubdir(req.SourceAddr) - hooks.Download(key, packageAddr, nil) - - if len(req.VersionConstraints) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid version constraint", - fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - return nil, diags - } - - modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) - if err != nil { - if _, ok := err.(*MaybeRelativePathErr); ok { - log.Printf( - "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", - req.SourceAddr, - ) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not found", - fmt.Sprintf( - "The module address %q could not be resolved.\n\n"+ - "If you intended this as a path relative to the current "+ - "module, use \"./%s\" instead. The \"./\" prefix "+ - "indicates that the address is a relative filesystem path.", - req.SourceAddr, req.SourceAddr, - ), - )) - } else { - // Errors returned by go-getter have very inconsistent quality as - // end-user error messages, but for now we're accepting that because - // we have no way to recognize any specific errors to improve them - // and masking the error entirely would hide valuable diagnostic - // information from the user. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to download module", - fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), - )) - } - return nil, diags - - } - - log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) - - mod, mDiags := earlyconfig.LoadModule(modDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. For go-getter modules this actually - // indicates a bug in the code above, since it's not the - // user's responsibility to create the directory in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), - )) - } else { - diags = append(diags, mDiags...) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Dir: modDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) - hooks.Install(key, nil, modDir) - - return mod, diags -} - -func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { - return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go deleted file mode 100644 index 817a6dc832..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go +++ /dev/null @@ -1,36 +0,0 @@ -package initwd - -import ( - version "github.com/hashicorp/go-version" -) - -// ModuleInstallHooks is an interface used to provide notifications about the -// installation process being orchestrated by InstallModules. -// -// This interface may have new methods added in future, so implementers should -// embed InstallHooksImpl to get no-op implementations of any unimplemented -// methods. -type ModuleInstallHooks interface { - // Download is called for modules that are retrieved from a remote source - // before that download begins, to allow a caller to give feedback - // on progress through a possibly-long sequence of downloads. - Download(moduleAddr, packageAddr string, version *version.Version) - - // Install is called for each module that is installed, even if it did - // not need to be downloaded from a remote source. - Install(moduleAddr string, version *version.Version, localPath string) -} - -// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that -// can be embedded in another implementation struct to allow only partial -// implementation of the interface. -type ModuleInstallHooksImpl struct { -} - -func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { -} - -func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { -} - -var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go deleted file mode 100644 index 8f89909c6f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package blocktoattr includes some helper functions that can perform -// preprocessing on a HCL body where a configschema.Block schema is available -// in order to allow list and set attributes defined in the schema to be -// optionally written by the user as block syntax. -package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go deleted file mode 100644 index f782f6b754..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go +++ /dev/null @@ -1,187 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization -// functionality to allow attributes that are specified as having list or set -// type in the schema to be written with HCL block syntax as multiple nested -// blocks with the attribute name as the block type. -// -// This partially restores some of the block/attribute confusion from HCL 1 -// so that existing patterns that depended on that confusion can continue to -// be used in the short term while we settle on a longer-term strategy. -// -// Most of the fixup work is actually done when the returned body is -// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual -// decode of the body might not, if the content of the body is so ambiguous -// that there's no safe way to map it to the schema. -func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { - // The schema should never be nil, but in practice it seems to be sometimes - // in the presence of poorly-configured test mocks, so we'll be robust - // by synthesizing an empty one. - if schema == nil { - schema = &configschema.Block{} - } - - return &fixupBody{ - original: body, - schema: schema, - names: ambiguousNames(schema), - } -} - -type fixupBody struct { - original hcl.Body - schema *configschema.Block - names map[string]struct{} -} - -// Content decodes content from the body. The given schema must be the lower-level -// representation of the same schema that was previously passed to FixUpBlockAttrs, -// or else the result is undefined. -func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, diags := b.original.Content(schema) - return b.fixupContent(content), diags -} - -func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, remain, diags := b.original.PartialContent(schema) - remain = &fixupBody{ - original: remain, - schema: b.schema, - names: b.names, - } - return b.fixupContent(content), remain, diags -} - -func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // FixUpBlockAttrs is not intended to be used in situations where we'd use - // JustAttributes, so we just pass this through verbatim to complete our - // implementation of hcl.Body. - return b.original.JustAttributes() -} - -func (b *fixupBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} - -// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's -// content to determine whether the author has used attribute or block syntax -// for each of the ambigious attributes where both are permitted. -// -// The resulting schema will always contain all of the same names that are -// in the given schema, but some attribute schemas may instead be replaced by -// block header schemas. -func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { - return effectiveSchema(given, b.original, b.names, true) -} - -func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { - var ret hcl.BodyContent - ret.Attributes = make(hcl.Attributes) - for name, attr := range content.Attributes { - ret.Attributes[name] = attr - } - blockAttrVals := make(map[string][]*hcl.Block) - for _, block := range content.Blocks { - if _, exists := b.names[block.Type]; exists { - // If we get here then we've found a block type whose instances need - // to be re-interpreted as a list-of-objects attribute. We'll gather - // those up and fix them up below. - blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) - continue - } - - // We need to now re-wrap our inner body so it will be subject to the - // same attribute-as-block fixup when recursively decoded. - retBlock := *block // shallow copy - if blockS, ok := b.schema.BlockTypes[block.Type]; ok { - // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then - retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) - } - - ret.Blocks = append(ret.Blocks, &retBlock) - } - // No we'll install synthetic attributes for each of our fixups. We can't - // do this exactly because HCL's information model expects an attribute - // to be a single decl but we have multiple separate blocks. We'll - // approximate things, then, by using only our first block for the source - // location information. (We are guaranteed at least one by the above logic.) - for name, blocks := range blockAttrVals { - ret.Attributes[name] = &hcl.Attribute{ - Name: name, - Expr: &fixupBlocksExpr{ - blocks: blocks, - ety: b.schema.Attributes[name].Type.ElementType(), - }, - - Range: blocks[0].DefRange, - NameRange: blocks[0].TypeRange, - } - } - return &ret -} - -type fixupBlocksExpr struct { - blocks hcl.Blocks - ety cty.Type -} - -func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - // In order to produce a suitable value for our expression we need to - // now decode the whole descendent block structure under each of our block - // bodies. - // - // That requires us to do something rather strange: we must construct a - // synthetic block type schema derived from the element type of the - // attribute, thus inverting our usual direction of lowering a schema - // into an implied type. Because a type is less detailed than a schema, - // the result is imprecise and in particular will just consider all - // the attributes to be optional and let the provider eventually decide - // whether to return errors if they turn out to be null when required. - schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety - spec := schema.DecoderSpec() - - vals := make([]cty.Value, len(e.blocks)) - var diags hcl.Diagnostics - for i, block := range e.blocks { - body := FixUpBlockAttrs(block.Body, schema) - val, blockDiags := hcldec.Decode(body, spec, ctx) - diags = append(diags, blockDiags...) - if val == cty.NilVal { - val = cty.UnknownVal(e.ety) - } - vals[i] = val - } - if len(vals) == 0 { - return cty.ListValEmpty(e.ety), diags - } - return cty.ListVal(vals), diags -} - -func (e *fixupBlocksExpr) Variables() []hcl.Traversal { - var ret []hcl.Traversal - schema := SchemaForCtyElementType(e.ety) - spec := schema.DecoderSpec() - for _, block := range e.blocks { - ret = append(ret, hcldec.Variables(block.Body, spec)...) - } - return ret -} - -func (e *fixupBlocksExpr) Range() hcl.Range { - // This is not really an appropriate range for the expression but it's - // the best we can do from here. - return e.blocks[0].DefRange -} - -func (e *fixupBlocksExpr) StartRange() hcl.Range { - return e.blocks[0].DefRange -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go deleted file mode 100644 index 129ee0e82f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go +++ /dev/null @@ -1,119 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func ambiguousNames(schema *configschema.Block) map[string]struct{} { - if schema == nil { - return nil - } - ambiguousNames := make(map[string]struct{}) - for name, attrS := range schema.Attributes { - aty := attrS.Type - if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { - ambiguousNames[name] = struct{}{} - } - } - return ambiguousNames -} - -func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { - ret := &hcl.BodySchema{} - - appearsAsBlock := make(map[string]struct{}) - { - // We'll construct some throwaway schemas here just to probe for - // whether each of our ambiguous names seems to be being used as - // an attribute or a block. We need to check both because in JSON - // syntax we rely on the schema to decide between attribute or block - // interpretation and so JSON will always answer yes to both of - // these questions and we want to prefer the attribute interpretation - // in that case. - var probeSchema hcl.BodySchema - - for name := range ambiguousNames { - probeSchema = hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: name, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - if _, exists := content.Attributes[name]; exists { - // Can decode as an attribute, so we'll go with that. - continue - } - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: name, - }, - }, - } - content, _, _ = body.PartialContent(&probeSchema) - if len(content.Blocks) > 0 || dynamicExpanded { - // A dynamic block with an empty iterator returns nothing. - // If there's no attribute and we have either a block or a - // dynamic expansion, we need to rewrite this one as a - // block for a successful result. - appearsAsBlock[name] = struct{}{} - } - } - if !dynamicExpanded { - // If we're deciding for a context where dynamic blocks haven't - // been expanded yet then we need to probe for those too. - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "dynamic", - LabelNames: []string{"type"}, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - for _, block := range content.Blocks { - if _, exists := ambiguousNames[block.Labels[0]]; exists { - appearsAsBlock[block.Labels[0]] = struct{}{} - } - } - } - } - - for _, attrS := range given.Attributes { - if _, exists := appearsAsBlock[attrS.Name]; exists { - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: attrS.Name, - }) - } else { - ret.Attributes = append(ret.Attributes, attrS) - } - } - - // Anything that is specified as a block type in the input schema remains - // that way by just passing through verbatim. - ret.Blocks = append(ret.Blocks, given.Blocks...) - - return ret -} - -// SchemaForCtyElementType converts a cty object type into an -// approximately-equivalent configschema.Block representing the element of -// a list or set. If the given type is not an object type then this -// function will panic. -func SchemaForCtyElementType(ty cty.Type) *configschema.Block { - atys := ty.AttributeTypes() - ret := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute, len(atys)), - } - for name, aty := range atys { - ret.Attributes[name] = &configschema.Attribute{ - Type: aty, - Optional: true, - } - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go deleted file mode 100644 index f5ed1c539b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go +++ /dev/null @@ -1,45 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// ExpandedVariables finds all of the global variables referenced in the -// given body with the given schema while taking into account the possibilities -// both of "dynamic" blocks being expanded and the possibility of certain -// attributes being written instead as nested blocks as allowed by the -// FixUpBlockAttrs function. -// -// This function exists to allow variables to be analyzed prior to dynamic -// block expansion while also dealing with the fact that dynamic block expansion -// might in turn produce nested blocks that are subject to FixUpBlockAttrs. -// -// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, -// which is itself a drop-in replacement for hcldec.Variables. -func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { - rootNode := dynblock.WalkVariables(body) - return walkVariables(rootNode, body, schema) -} - -func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { - givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) - ambiguousNames := ambiguousNames(schema) - effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) - vars, children := node.Visit(effectiveRawSchema) - - for _, child := range children { - if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { - vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) - } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { - // ☝️Check for collection type before element type, because if this is a mis-placed reference, - // a panic here will prevent other useful diags from being elevated to show the user what to fix - synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) - vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go deleted file mode 100644 index 13f7ed935e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Data is an interface whose implementations can provide cty.Value -// representations of objects identified by referenceable addresses from -// the addrs package. -// -// This interface will grow each time a new type of reference is added, and so -// implementations outside of the Terraform codebases are not advised. -// -// Each method returns a suitable value and optionally some diagnostics. If the -// returned diagnostics contains errors then the type of the returned value is -// used to construct an unknown value of the same type which is then used in -// place of the requested object so that type checking can still proceed. In -// cases where it's not possible to even determine a suitable result type, -// cty.DynamicVal is returned along with errors describing the problem. -type Data interface { - StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics - - GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go deleted file mode 100644 index af5c5cac0d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package lang deals with the runtime aspects of Terraform's configuration -// language, with concerns such as expression evaluation. It is closely related -// to sibling package "configs", which is responsible for configuration -// parsing and static validation. -package lang diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go deleted file mode 100644 index ec48a873f0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go +++ /dev/null @@ -1,473 +0,0 @@ -package lang - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// ExpandBlock expands any "dynamic" blocks present in the given body. The -// result is a body with those blocks expanded, ready to be evaluated with -// EvalBlock. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - traversals := dynblock.ExpandVariablesHCLDec(body, spec) - refs, diags := References(traversals) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - - return dynblock.Expand(body, ctx), diags -} - -// EvalBlock evaluates the given body using the given block schema and returns -// a cty object value representing its contents. The type of the result conforms -// to the implied type of the given schema. -// -// This function does not automatically expand "dynamic" blocks within the -// body. If that is desired, first call the ExpandBlock method to obtain -// an expanded body to pass to this method. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - refs, diags := ReferencesInBlock(body, schema) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(schema.ImpliedType()), diags - } - - // HACK: In order to remain compatible with some assumptions made in - // Terraform v0.11 and earlier about the approximate equivalence of - // attribute vs. block syntax, we do a just-in-time fixup here to allow - // any attribute in the schema that has a list-of-objects or set-of-objects - // kind to potentially be populated instead by one or more nested blocks - // whose type is the attribute name. - body = blocktoattr.FixUpBlockAttrs(body, schema) - - val, evalDiags := hcldec.Decode(body, spec, ctx) - diags = diags.Append(evalDiags) - - return val, diags -} - -// EvalExpr evaluates a single expression in the receiving context and returns -// the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - refs, diags := ReferencesInExpr(expr) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(wantType), diags - } - - val, evalDiags := expr.Value(ctx) - diags = diags.Append(evalDiags) - - if wantType != cty.DynamicPseudoType { - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: expr.Range().Ptr(), - }) - } - } - - return val, diags -} - -// EvalReference evaluates the given reference in the receiving scope and -// returns the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // We cheat a bit here and just build an EvalContext for our requested - // reference with the "self" address overridden, and then pull the "self" - // result out of it to return. - ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) - diags = diags.Append(ctxDiags) - val := ctx.Variables["self"] - if val == cty.NilVal { - val = cty.DynamicVal - } - - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - - return val, diags -} - -// EvalContext constructs a HCL expression evaluation context whose variable -// scope contains sufficient values to satisfy the given set of references. -// -// Most callers should prefer to use the evaluation helper methods that -// this type offers, but this is here for less common situations where the -// caller will handle the evaluation calls itself. -func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { - return s.evalContext(refs, s.SelfAddr) -} - -func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { - if s == nil { - panic("attempt to construct EvalContext for nil Scope") - } - - var diags tfdiags.Diagnostics - vals := make(map[string]cty.Value) - funcs := s.Functions() - ctx := &hcl.EvalContext{ - Variables: vals, - Functions: funcs, - } - - if len(refs) == 0 { - // Easy path for common case where there are no references at all. - return ctx, diags - } - - // First we'll do static validation of the references. This catches things - // early that might otherwise not get caught due to unknown values being - // present in the scope during planning. - if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { - diags = diags.Append(staticDiags) - return ctx, diags - } - - // The reference set we are given has not been de-duped, and so there can - // be redundant requests in it for two reasons: - // - The same item is referenced multiple times - // - Both an item and that item's container are separately referenced. - // We will still visit every reference here and ask our data source for - // it, since that allows us to gather a full set of any errors and - // warnings, but once we've gathered all the data we'll then skip anything - // that's redundant in the process of populating our values map. - dataResources := map[string]map[string]cty.Value{} - managedResources := map[string]map[string]cty.Value{} - wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} - moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} - inputVariables := map[string]cty.Value{} - localValues := map[string]cty.Value{} - pathAttrs := map[string]cty.Value{} - terraformAttrs := map[string]cty.Value{} - countAttrs := map[string]cty.Value{} - forEachAttrs := map[string]cty.Value{} - var self cty.Value - - for _, ref := range refs { - rng := ref.SourceRange - - rawSubj := ref.Subject - if rawSubj == addrs.Self { - if selfAddr == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - continue - } - - if selfAddr == addrs.Self { - // Programming error: the self address cannot alias itself. - panic("scope SelfAddr attempting to alias itself") - } - - // self can only be used within a resource instance - subj := selfAddr.(addrs.ResourceInstance) - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj.ContainingResource(), rng)) - - diags = diags.Append(valDiags) - - // Self is an exception in that it must always resolve to a - // particular instance. We will still insert the full resource into - // the context below. - switch k := subj.Key.(type) { - case addrs.IntKey: - self = val.Index(cty.NumberIntVal(int64(k))) - case addrs.StringKey: - self = val.Index(cty.StringVal(string(k))) - default: - self = val - } - - continue - } - - // This type switch must cover all of the "Referenceable" implementations - // in package addrs, however we are removing the possibility of - // ResourceInstance beforehand. - if addr, ok := rawSubj.(addrs.ResourceInstance); ok { - rawSubj = addr.ContainingResource() - } - - switch subj := rawSubj.(type) { - case addrs.Resource: - var into map[string]map[string]cty.Value - switch subj.Mode { - case addrs.ManagedResourceMode: - into = managedResources - case addrs.DataResourceMode: - into = dataResources - default: - panic(fmt.Errorf("unsupported ResourceMode %s", subj.Mode)) - } - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj, rng)) - diags = diags.Append(valDiags) - - r := subj - if into[r.Type] == nil { - into[r.Type] = make(map[string]cty.Value) - } - into[r.Type][r.Name] = val - - case addrs.ModuleCallInstance: - val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) - diags = diags.Append(valDiags) - - if wholeModules[subj.Call.Name] == nil { - wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) - } - wholeModules[subj.Call.Name][subj.Key] = val - - case addrs.ModuleCallOutput: - val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) - diags = diags.Append(valDiags) - - callName := subj.Call.Call.Name - callKey := subj.Call.Key - if moduleOutputs[callName] == nil { - moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) - } - if moduleOutputs[callName][callKey] == nil { - moduleOutputs[callName][callKey] = make(map[string]cty.Value) - } - moduleOutputs[callName][callKey][subj.Name] = val - - case addrs.InputVariable: - val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) - diags = diags.Append(valDiags) - inputVariables[subj.Name] = val - - case addrs.LocalValue: - val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) - diags = diags.Append(valDiags) - localValues[subj.Name] = val - - case addrs.PathAttr: - val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) - diags = diags.Append(valDiags) - pathAttrs[subj.Name] = val - - case addrs.TerraformAttr: - val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) - diags = diags.Append(valDiags) - terraformAttrs[subj.Name] = val - - case addrs.CountAttr: - val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) - diags = diags.Append(valDiags) - countAttrs[subj.Name] = val - - case addrs.ForEachAttr: - val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) - diags = diags.Append(valDiags) - forEachAttrs[subj.Name] = val - - default: - // Should never happen - panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) - } - } - - for k, v := range buildResourceObjects(managedResources) { - vals[k] = v - } - vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) - vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) - vals["var"] = cty.ObjectVal(inputVariables) - vals["local"] = cty.ObjectVal(localValues) - vals["path"] = cty.ObjectVal(pathAttrs) - vals["terraform"] = cty.ObjectVal(terraformAttrs) - vals["count"] = cty.ObjectVal(countAttrs) - vals["each"] = cty.ObjectVal(forEachAttrs) - if self != cty.NilVal { - vals["self"] = self - } - - return ctx, diags -} - -func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - for typeName, nameVals := range resources { - vals[typeName] = cty.ObjectVal(nameVals) - } - return vals -} - -func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - - for name, keys := range wholeModules { - vals[name] = buildInstanceObjects(keys) - } - - for name, keys := range moduleOutputs { - if _, exists := wholeModules[name]; exists { - // If we also have a whole module value for this name then we'll - // skip this since the individual outputs are embedded in that result. - continue - } - - // The shape of this collection isn't compatible with buildInstanceObjects, - // but rather than replicating most of the buildInstanceObjects logic - // here we'll instead first transform the structure to be what that - // function expects and then use it. This is a little wasteful, but - // we do not expect this these maps to be large and so the extra work - // here should not hurt too much. - flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) - for k, vals := range keys { - flattened[k] = cty.ObjectVal(vals) - } - vals[name] = buildInstanceObjects(flattened) - } - - return vals -} - -func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { - if val, exists := keys[addrs.NoKey]; exists { - // If present, a "no key" value supersedes all other values, - // since they should be embedded inside it. - return val - } - - // If we only have individual values then we need to construct - // either a list or a map, depending on what sort of keys we - // have. - haveInt := false - haveString := false - maxInt := 0 - - for k := range keys { - switch tk := k.(type) { - case addrs.IntKey: - haveInt = true - if int(tk) > maxInt { - maxInt = int(tk) - } - case addrs.StringKey: - haveString = true - } - } - - // We should either have ints or strings and not both, but - // if we have both then we'll prefer strings and let the - // language interpreter try to convert the int keys into - // strings in a map. - switch { - case haveString: - vals := make(map[string]cty.Value) - for k, v := range keys { - switch tk := k.(type) { - case addrs.StringKey: - vals[string(tk)] = v - case addrs.IntKey: - sk := strconv.Itoa(int(tk)) - vals[sk] = v - } - } - return cty.ObjectVal(vals) - case haveInt: - // We'll make a tuple that is long enough for our maximum - // index value. It doesn't matter if we end up shorter than - // the number of instances because if length(...) were - // being evaluated we would've got a NoKey reference and - // thus not ended up in this codepath at all. - vals := make([]cty.Value, maxInt+1) - for i := range vals { - if v, exists := keys[addrs.IntKey(i)]; exists { - vals[i] = v - } else { - // Just a placeholder, since nothing will access this anyway - vals[i] = cty.DynamicVal - } - } - return cty.TupleVal(vals) - default: - // Should never happen because there are no other key types. - log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") - return cty.EmptyObjectVal - } -} - -func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { - if diags.HasErrors() { - // If there are errors then we will force an unknown result so that - // we can still evaluate and catch type errors but we'll avoid - // producing redundant re-statements of the same errors we've already - // dealt with here. - return cty.UnknownVal(val.Type()), diags - } - return val, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go deleted file mode 100644 index 8c07514896..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go +++ /dev/null @@ -1,218 +0,0 @@ -package funcs - -import ( - "fmt" - "net" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CidrHostFunc contructs a function that calculates a full host IP address -// within a given IP network address prefix. -var CidrHostFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "hostnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var hostNum int - if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { - return cty.UnknownVal(cty.String), err - } - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ip.String()), nil - }, -}) - -// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given -// in CIDR notation into a subnet mask address. -var CidrNetmaskFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - return cty.StringVal(net.IP(network.Mask).String()), nil - }, -}) - -// CidrSubnetFunc contructs a function that calculates a subnet address within -// a given IP network address prefix. -var CidrSubnetFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "newbits", - Type: cty.Number, - }, - { - Name: "netnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var newbits int - if err := gocty.FromCtyValue(args[1], &newbits); err != nil { - return cty.UnknownVal(cty.String), err - } - var netnum int - if err := gocty.FromCtyValue(args[2], &netnum); err != nil { - return cty.UnknownVal(cty.String), err - } - - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if newbits > 32 { - return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") - } - - newNetwork, err := cidr.Subnet(network, newbits, netnum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(newNetwork.String()), nil - }, -}) - -// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive -// subnet addresses at once, rather than just a single subnet extension. -var CidrSubnetsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "newbits", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) - } - startPrefixLen, _ := network.Mask.Size() - - prefixLengthArgs := args[1:] - if len(prefixLengthArgs) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - var firstLength int - if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(1, err) - } - firstLength += startPrefixLen - - retVals := make([]cty.Value, len(prefixLengthArgs)) - - current, _ := cidr.PreviousSubnet(network, firstLength) - for i, lengthArg := range prefixLengthArgs { - var length int - if err := gocty.FromCtyValue(lengthArg, &length); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) - } - - if length < 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") - } - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if length > 32 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") - } - length += startPrefixLen - if length > (len(network.IP) * 8) { - protocol := "IP" - switch len(network.IP) * 8 { - case 32: - protocol = "IPv4" - case 128: - protocol = "IPv6" - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) - } - - next, rollover := cidr.NextSubnet(current, length) - if rollover || !network.Contains(next.IP) { - // If we run out of suffix bits in the base CIDR prefix then - // NextSubnet will start incrementing the prefix bits, which - // we don't allow because it would then allocate addresses - // outside of the caller's given prefix. - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) - } - - current = next - retVals[i] = cty.StringVal(current.String()) - } - - return cty.ListVal(retVals), nil - }, -}) - -// CidrHost calculates a full host IP address within a given IP network address prefix. -func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { - return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) -} - -// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. -func CidrNetmask(prefix cty.Value) (cty.Value, error) { - return CidrNetmaskFunc.Call([]cty.Value{prefix}) -} - -// CidrSubnet calculates a subnet address within a given IP network address prefix. -func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { - return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) -} - -// CidrSubnets calculates a sequence of consecutive subnet prefixes that may -// be of different prefix lengths under a common base prefix. -func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(newbits)+1) - args[0] = prefix - copy(args[1:], newbits) - return CidrSubnetsFunc.Call(args) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go deleted file mode 100644 index e6898457b9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go +++ /dev/null @@ -1,1519 +0,0 @@ -package funcs - -import ( - "errors" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - "github.com/zclconf/go-cty/cty/gocty" -) - -var ElementFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "index", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - list := args[0] - listTy := list.Type() - switch { - case listTy.IsListType(): - return listTy.ElementType(), nil - case listTy.IsTupleType(): - if !args[1].IsKnown() { - // If the index isn't known yet then we can't predict the - // result type since each tuple element can have its own type. - return cty.DynamicPseudoType, nil - } - - etys := listTy.TupleElementTypes() - var index int - err := gocty.FromCtyValue(args[1], &index) - if err != nil { - // e.g. fractional number where whole number is required - return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) - } - if len(etys) == 0 { - return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list") - } - index = index % len(etys) - return etys[index], nil - default: - return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var index int - err := gocty.FromCtyValue(args[1], &index) - if err != nil { - // can't happen because we checked this in the Type function above - return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) - } - - if !args[0].IsKnown() { - return cty.UnknownVal(retType), nil - } - - l := args[0].LengthInt() - if l == 0 { - return cty.DynamicVal, errors.New("cannot use element function with an empty list") - } - index = index % l - - // We did all the necessary type checks in the type function above, - // so this is guaranteed not to fail. - return args[0].Index(cty.NumberIntVal(int64(index))), nil - }, -}) - -var LengthFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - collTy := args[0].Type() - switch { - case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: - return cty.Number, nil - default: - return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - coll := args[0] - collTy := args[0].Type() - switch { - case collTy == cty.DynamicPseudoType: - return cty.UnknownVal(cty.Number), nil - case collTy.IsTupleType(): - l := len(collTy.TupleElementTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy.IsObjectType(): - l := len(collTy.AttributeTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy == cty.String: - // We'll delegate to the cty stdlib strlen function here, because - // it deals with all of the complexities of tokenizing unicode - // grapheme clusters. - return stdlib.Strlen(coll) - case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): - return coll.Length(), nil - default: - // Should never happen, because of the checks in our Type func above - return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") - } - }, -}) - -// CoalesceFunc constructs a function that takes any number of arguments and -// returns the first one that isn't empty. This function was copied from go-cty -// stdlib and modified so that it returns the first *non-empty* non-null element -// from a sequence, instead of merely the first non-null. -var CoalesceFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - argTypes := make([]cty.Type, len(args)) - for i, val := range args { - argTypes[i] = val.Type() - } - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - return retType, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, argVal := range args { - // We already know this will succeed because of the checks in our Type func above - argVal, _ = convert.Convert(argVal, retType) - if !argVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - if argVal.IsNull() { - continue - } - if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { - continue - } - - return argVal, nil - } - return cty.NilVal, errors.New("no non-null, non-empty-string arguments") - }, -}) - -// CoalesceListFunc constructs a function that takes any number of list arguments -// and returns the first one that isn't empty. -var CoalesceListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - // if any argument is unknown, we can't be certain know which type we will return - if !arg.IsKnown() { - return cty.DynamicPseudoType, nil - } - ty := arg.Type() - - if !ty.IsListType() && !ty.IsTupleType() { - return cty.NilType, errors.New("coalescelist arguments must be lists or tuples") - } - - argTypes[i] = arg.Type() - } - - last := argTypes[0] - // If there are mixed types, we have to return a dynamic type. - for _, next := range argTypes[1:] { - if !next.Equals(last) { - return cty.DynamicPseudoType, nil - } - } - - return last, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsKnown() { - // If we run into an unknown list at some point, we can't - // predict the final result yet. (If there's a known, non-empty - // arg before this then we won't get here.) - return cty.UnknownVal(retType), nil - } - - if arg.LengthInt() > 0 { - return arg, nil - } - } - - return cty.NilVal, errors.New("no non-null arguments") - }, -}) - -// CompactFunc constructs a function that takes a list of strings and returns a new list -// with any empty string elements removed. -var CompactFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.String), - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - if !listVal.IsWhollyKnown() { - // If some of the element values aren't known yet then we - // can't yet return a compacted list - return cty.UnknownVal(retType), nil - } - - var outputList []cty.Value - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - if v.IsNull() || v.AsString() == "" { - continue - } - outputList = append(outputList, v) - } - - if len(outputList) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - return cty.ListVal(outputList), nil - }, -}) - -// ContainsFunc constructs a function that determines whether a given list or -// set contains a given single value as one of its elements. -var ContainsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - arg := args[0] - ty := arg.Type() - - if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() { - return cty.NilVal, errors.New("argument must be list, tuple, or set") - } - - _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1]) - if err != nil { - return cty.False, nil - } - - return cty.True, nil - }, -}) - -// IndexFunc constructs a function that finds the element index for a given value in a list. -var IndexFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { - return cty.NilVal, errors.New("argument must be a list or tuple") - } - - if !args[0].IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - - if args[0].LengthInt() == 0 { // Easy path - return cty.NilVal, errors.New("cannot search an empty list") - } - - for it := args[0].ElementIterator(); it.Next(); { - i, v := it.Element() - eq, err := stdlib.Equal(v, args[1]) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - if eq.True() { - return i, nil - } - } - return cty.NilVal, errors.New("item not found") - - }, -}) - -// DistinctFunc constructs a function that takes a list and returns a new list -// with any duplicate elements removed. -var DistinctFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - - if !listVal.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - var list []cty.Value - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - list, err = appendIfMissing(list, v) - if err != nil { - return cty.NilVal, err - } - } - - if len(list) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(list), nil - }, -}) - -// ChunklistFunc constructs a function that splits a single list into fixed-size chunks, -// returning a list of lists. -var ChunklistFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "size", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.List(args[0].Type()), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - if !listVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - - if listVal.LengthInt() == 0 { - return cty.ListValEmpty(listVal.Type()), nil - } - - var size int - err = gocty.FromCtyValue(args[1], &size) - if err != nil { - return cty.NilVal, fmt.Errorf("invalid index: %s", err) - } - - if size < 0 { - return cty.NilVal, errors.New("the size argument must be positive") - } - - output := make([]cty.Value, 0) - - // if size is 0, returns a list made of the initial list - if size == 0 { - output = append(output, listVal) - return cty.ListVal(output), nil - } - - chunk := make([]cty.Value, 0) - - l := args[0].LengthInt() - i := 0 - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - chunk = append(chunk, v) - - // Chunk when index isn't 0, or when reaching the values's length - if (i+1)%size == 0 || (i+1) == l { - output = append(output, cty.ListVal(chunk)) - chunk = make([]cty.Value, 0) - } - i++ - } - - return cty.ListVal(output), nil - }, -}) - -// FlattenFunc constructs a function that takes a list and replaces any elements -// that are lists with a flattened sequence of the list contents. -var FlattenFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsWhollyKnown() { - return cty.DynamicPseudoType, nil - } - - argTy := args[0].Type() - if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() { - return cty.NilType, errors.New("can only flatten lists, sets and tuples") - } - - retVal, known := flattener(args[0]) - if !known { - return cty.DynamicPseudoType, nil - } - - tys := make([]cty.Type, len(retVal)) - for i, ty := range retVal { - tys[i] = ty.Type() - } - return cty.Tuple(tys), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] - if inputList.LengthInt() == 0 { - return cty.EmptyTupleVal, nil - } - - out, known := flattener(inputList) - if !known { - return cty.UnknownVal(retType), nil - } - - return cty.TupleVal(out), nil - }, -}) - -// Flatten until it's not a cty.List, and return whether the value is known. -// We can flatten lists with unknown values, as long as they are not -// lists themselves. -func flattener(flattenList cty.Value) ([]cty.Value, bool) { - out := make([]cty.Value, 0) - for it := flattenList.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { - if !val.IsKnown() { - return out, false - } - - res, known := flattener(val) - if !known { - return res, known - } - out = append(out, res...) - } else { - out = append(out, val) - } - } - return out, true -} - -// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys. -var KeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty := args[0].Type() - switch { - case ty.IsMapType(): - return cty.List(cty.String), nil - case ty.IsObjectType(): - atys := ty.AttributeTypes() - if len(atys) == 0 { - return cty.EmptyTuple, nil - } - // All of our result elements will be strings, and atys just - // decides how many there are. - etys := make([]cty.Type, len(atys)) - for i := range etys { - etys[i] = cty.String - } - return cty.Tuple(etys), nil - default: - return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - m := args[0] - var keys []cty.Value - - switch { - case m.Type().IsObjectType(): - // In this case we allow unknown values so we must work only with - // the attribute _types_, not with the value itself. - var names []string - for name := range m.Type().AttributeTypes() { - names = append(names, name) - } - sort.Strings(names) // same ordering guaranteed by cty's ElementIterator - if len(names) == 0 { - return cty.EmptyTupleVal, nil - } - keys = make([]cty.Value, len(names)) - for i, name := range names { - keys[i] = cty.StringVal(name) - } - return cty.TupleVal(keys), nil - default: - if !m.IsKnown() { - return cty.UnknownVal(retType), nil - } - - // cty guarantees that ElementIterator will iterate in lexicographical - // order by key. - for it := args[0].ElementIterator(); it.Next(); { - k, _ := it.Element() - keys = append(keys, k) - } - if len(keys) == 0 { - return cty.ListValEmpty(cty.String), nil - } - return cty.ListVal(keys), nil - } - }, -}) - -// ListFunc constructs a function that takes an arbitrary number of arguments -// and returns a list containing those values in the same order. -// -// This function is deprecated in Terraform v0.12 -var ListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - argTypes[i] = arg.Type() - } - - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.List(retType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newList := make([]cty.Value, 0, len(args)) - - for _, arg := range args { - // We already know this will succeed because of the checks in our Type func above - arg, _ = convert.Convert(arg, retType.ElementType()) - newList = append(newList, arg) - } - - return cty.ListVal(newList), nil - }, -}) - -// LookupFunc constructs a function that performs dynamic lookups of map types. -var LookupFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - }, - { - Name: "key", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "default", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 1 || len(args) > 3 { - return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) - } - - ty := args[0].Type() - - switch { - case ty.IsObjectType(): - if !args[1].IsKnown() { - return cty.DynamicPseudoType, nil - } - - key := args[1].AsString() - if ty.HasAttribute(key) { - return args[0].GetAttr(key).Type(), nil - } else if len(args) == 3 { - // if the key isn't found but a default is provided, - // return the default type - return args[2].Type(), nil - } - return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) - case ty.IsMapType(): - if len(args) == 3 { - _, err = convert.Convert(args[2], ty.ElementType()) - if err != nil { - return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") - } - } - return ty.ElementType(), nil - default: - return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var defaultVal cty.Value - defaultValueSet := false - - if len(args) == 3 { - defaultVal = args[2] - defaultValueSet = true - } - - mapVar := args[0] - lookupKey := args[1].AsString() - - if !mapVar.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - if mapVar.Type().IsObjectType() { - if mapVar.Type().HasAttribute(lookupKey) { - return mapVar.GetAttr(lookupKey), nil - } - } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { - return mapVar.Index(cty.StringVal(lookupKey)), nil - } - - if defaultValueSet { - defaultVal, err = convert.Convert(defaultVal, retType) - if err != nil { - return cty.NilVal, err - } - return defaultVal, nil - } - - return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( - "lookup failed to find '%s'", lookupKey) - }, -}) - -// MapFunc constructs a function that takes an even number of arguments and -// returns a map whose elements are constructed from consecutive pairs of arguments. -// -// This function is deprecated in Terraform v0.12 -var MapFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 2 || len(args)%2 != 0 { - return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) - } - - argTypes := make([]cty.Type, len(args)/2) - index := 0 - - for i := 0; i < len(args); i += 2 { - argTypes[index] = args[i+1].Type() - index++ - } - - valType, _ := convert.UnifyUnsafe(argTypes) - if valType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.Map(valType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - } - - outputMap := make(map[string]cty.Value) - - for i := 0; i < len(args); i += 2 { - - key := args[i].AsString() - - err := gocty.FromCtyValue(args[i], &key) - if err != nil { - return cty.NilVal, err - } - - val := args[i+1] - - var variable cty.Value - err = gocty.FromCtyValue(val, &variable) - if err != nil { - return cty.NilVal, err - } - - // We already know this will succeed because of the checks in our Type func above - variable, _ = convert.Convert(variable, retType.ElementType()) - - // Check for duplicate keys - if _, ok := outputMap[key]; ok { - return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) - } - outputMap[key] = variable - } - - return cty.MapVal(outputMap), nil - }, -}) - -// MatchkeysFunc constructs a function that constructs a new list by taking a -// subset of elements from one list whose indexes match the corresponding -// indexes of values in another list. -var MatchkeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "keys", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "searchset", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - if ty == cty.NilType { - return cty.NilType, errors.New("keys and searchset must be of the same type") - } - - // the return type is based on args[0] (values) - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !args[0].IsKnown() { - return cty.UnknownVal(cty.List(retType.ElementType())), nil - } - - if args[0].LengthInt() != args[1].LengthInt() { - return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") - } - - output := make([]cty.Value, 0) - values := args[0] - - // Keys and searchset must be the same type. - // We can skip error checking here because we've already verified that - // they can be unified in the Type function - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - keys, _ := convert.Convert(args[1], ty) - searchset, _ := convert.Convert(args[2], ty) - - // if searchset is empty, return an empty list. - if searchset.LengthInt() == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - - if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, key := it.Element() - for iter := searchset.ElementIterator(); iter.Next(); { - _, search := iter.Element() - eq, err := stdlib.Equal(key, search) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.ListValEmpty(retType.ElementType()), nil - } - if eq.True() { - v := values.Index(cty.NumberIntVal(int64(i))) - output = append(output, v) - break - } - } - i++ - } - - // if we haven't matched any key, then output is an empty list. - if len(output) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(output), nil - }, -}) - -// MergeFunc constructs a function that takes an arbitrary number of maps and -// returns a single map that contains a merged set of elements from all of the maps. -// -// If more than one given map defines the same key then the one that is later in -// the argument sequence takes precedence. -var MergeFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "maps", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - }, - Type: function.StaticReturnType(cty.DynamicPseudoType), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - outputMap := make(map[string]cty.Value) - - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { - return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) - } - for it := arg.ElementIterator(); it.Next(); { - k, v := it.Element() - outputMap[k.AsString()] = v - } - } - return cty.ObjectVal(outputMap), nil - }, -}) - -// ReverseFunc takes a sequence and produces a new sequence of the same length -// with all of the same elements as the given sequence but in reverse order. -var ReverseFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - argTy := args[0].Type() - switch { - case argTy.IsTupleType(): - argTys := argTy.TupleElementTypes() - retTys := make([]cty.Type, len(argTys)) - for i, ty := range argTys { - retTys[len(retTys)-i-1] = ty - } - return cty.Tuple(retTys), nil - case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list - return cty.List(argTy.ElementType()), nil - default: - return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - in := args[0].AsValueSlice() - outVals := make([]cty.Value, len(in)) - for i, v := range in { - outVals[len(outVals)-i-1] = v - } - switch { - case retType.IsTupleType(): - return cty.TupleVal(outVals), nil - default: - if len(outVals) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(outVals), nil - } - }, -}) - -// SetProductFunc calculates the cartesian product of two or more sets or -// sequences. If the arguments are all lists then the result is a list of tuples, -// preserving the ordering of all of the input lists. Otherwise the result is a -// set of tuples. -var SetProductFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "sets", - Type: cty.DynamicPseudoType, - }, - Type: func(args []cty.Value) (retType cty.Type, err error) { - if len(args) < 2 { - return cty.NilType, errors.New("at least two arguments are required") - } - - listCount := 0 - elemTys := make([]cty.Type, len(args)) - for i, arg := range args { - aty := arg.Type() - switch { - case aty.IsSetType(): - elemTys[i] = aty.ElementType() - case aty.IsListType(): - elemTys[i] = aty.ElementType() - listCount++ - case aty.IsTupleType(): - // We can accept a tuple type only if there's some common type - // that all of its elements can be converted to. - allEtys := aty.TupleElementTypes() - if len(allEtys) == 0 { - elemTys[i] = cty.DynamicPseudoType - listCount++ - break - } - ety, _ := convert.UnifyUnsafe(allEtys) - if ety == cty.NilType { - return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") - } - elemTys[i] = ety - listCount++ - default: - return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") - } - } - - if listCount == len(args) { - return cty.List(cty.Tuple(elemTys)), nil - } - return cty.Set(cty.Tuple(elemTys)), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - ety := retType.ElementType() - - total := 1 - for _, arg := range args { - // Because of our type checking function, we are guaranteed that - // all of the arguments are known, non-null values of types that - // support LengthInt. - total *= arg.LengthInt() - } - - if total == 0 { - // If any of the arguments was an empty collection then our result - // is also an empty collection, which we'll short-circuit here. - if retType.IsListType() { - return cty.ListValEmpty(ety), nil - } - return cty.SetValEmpty(ety), nil - } - - subEtys := ety.TupleElementTypes() - product := make([][]cty.Value, total) - - b := make([]cty.Value, total*len(args)) - n := make([]int, len(args)) - s := 0 - argVals := make([][]cty.Value, len(args)) - for i, arg := range args { - argVals[i] = arg.AsValueSlice() - } - - for i := range product { - e := s + len(args) - pi := b[s:e] - product[i] = pi - s = e - - for j, n := range n { - val := argVals[j][n] - ty := subEtys[j] - if !val.Type().Equals(ty) { - var err error - val, err = convert.Convert(val, ty) - if err != nil { - // Should never happen since we checked this in our - // type-checking function. - return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) - } - } - pi[j] = val - } - - for j := len(n) - 1; j >= 0; j-- { - n[j]++ - if n[j] < len(argVals[j]) { - break - } - n[j] = 0 - } - } - - productVals := make([]cty.Value, total) - for i, vals := range product { - productVals[i] = cty.TupleVal(vals) - } - - if retType.IsListType() { - return cty.ListVal(productVals), nil - } - return cty.SetVal(productVals), nil - }, -}) - -// SliceFunc constructs a function that extracts some consecutive elements -// from within a list. -var SliceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "start_index", - Type: cty.Number, - }, - { - Name: "end_index", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - arg := args[0] - argTy := arg.Type() - - if argTy.IsSetType() { - return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important") - } - if !argTy.IsListType() && !argTy.IsTupleType() { - return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value") - } - - startIndex, endIndex, idxsKnown, err := sliceIndexes(args) - if err != nil { - return cty.NilType, err - } - - if argTy.IsListType() { - return argTy, nil - } - - if !idxsKnown { - // If we don't know our start/end indices then we can't predict - // the result type if we're planning to return a tuple. - return cty.DynamicPseudoType, nil - } - return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] - - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - - // we ignore idxsKnown return value here because the indices are always - // known here, or else the call would've short-circuited. - startIndex, endIndex, _, err := sliceIndexes(args) - if err != nil { - return cty.NilVal, err - } - - if endIndex-startIndex == 0 { - if retType.IsTupleType() { - return cty.EmptyTupleVal, nil - } - return cty.ListValEmpty(retType.ElementType()), nil - } - - outputList := inputList.AsValueSlice()[startIndex:endIndex] - - if retType.IsTupleType() { - return cty.TupleVal(outputList), nil - } - - return cty.ListVal(outputList), nil - }, -}) - -func sliceIndexes(args []cty.Value) (int, int, bool, error) { - var startIndex, endIndex, length int - var startKnown, endKnown, lengthKnown bool - - if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known - length = args[0].LengthInt() - lengthKnown = true - } - - if args[1].IsKnown() { - if err := gocty.FromCtyValue(args[1], &startIndex); err != nil { - return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err) - } - if startIndex < 0 { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero") - } - if lengthKnown && startIndex > length { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list") - } - startKnown = true - } - if args[2].IsKnown() { - if err := gocty.FromCtyValue(args[2], &endIndex); err != nil { - return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err) - } - if endIndex < 0 { - return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero") - } - if lengthKnown && endIndex > length { - return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list") - } - endKnown = true - } - if startKnown && endKnown { - if startIndex > endIndex { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index") - } - } - return startIndex, endIndex, startKnown && endKnown, nil -} - -// TransposeFunc contructs a function that takes a map of lists of strings and -// TransposeFunc constructs a function that takes a map of lists of strings and -// swaps the keys and values to produce a new map of lists of strings. -var TransposeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.Map(cty.List(cty.String)), - }, - }, - Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputMap := args[0] - if !inputMap.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - outputMap := make(map[string]cty.Value) - tmpMap := make(map[string][]string) - - for it := inputMap.ElementIterator(); it.Next(); { - inKey, inVal := it.Element() - for iter := inVal.ElementIterator(); iter.Next(); { - _, val := iter.Element() - if !val.Type().Equals(cty.String) { - return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") - } - - outKey := val.AsString() - if _, ok := tmpMap[outKey]; !ok { - tmpMap[outKey] = make([]string, 0) - } - outVal := tmpMap[outKey] - outVal = append(outVal, inKey.AsString()) - sort.Strings(outVal) - tmpMap[outKey] = outVal - } - } - - for outKey, outVal := range tmpMap { - values := make([]cty.Value, 0) - for _, v := range outVal { - values = append(values, cty.StringVal(v)) - } - outputMap[outKey] = cty.ListVal(values) - } - - return cty.MapVal(outputMap), nil - }, -}) - -// ValuesFunc constructs a function that returns a list of the map values, -// in the order of the sorted keys. -var ValuesFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - ty := args[0].Type() - if ty.IsMapType() { - return cty.List(ty.ElementType()), nil - } else if ty.IsObjectType() { - // The result is a tuple type with all of the same types as our - // object type's attributes, sorted in lexicographical order by the - // keys. (This matches the sort order guaranteed by ElementIterator - // on a cty object value.) - atys := ty.AttributeTypes() - if len(atys) == 0 { - return cty.EmptyTuple, nil - } - attrNames := make([]string, 0, len(atys)) - for name := range atys { - attrNames = append(attrNames, name) - } - sort.Strings(attrNames) - - tys := make([]cty.Type, len(attrNames)) - for i, name := range attrNames { - tys[i] = atys[name] - } - return cty.Tuple(tys), nil - } - return cty.NilType, errors.New("values() requires a map as the first argument") - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - mapVar := args[0] - - // We can just iterate the map/object value here because cty guarantees - // that these types always iterate in key lexicographical order. - var values []cty.Value - for it := mapVar.ElementIterator(); it.Next(); { - _, val := it.Element() - values = append(values, val) - } - - if retType.IsTupleType() { - return cty.TupleVal(values), nil - } - if len(values) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(values), nil - }, -}) - -// ZipmapFunc constructs a function that constructs a map from a list of keys -// and a corresponding list of values. -var ZipmapFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "keys", - Type: cty.List(cty.String), - }, - { - Name: "values", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - keys := args[0] - values := args[1] - valuesTy := values.Type() - - switch { - case valuesTy.IsListType(): - return cty.Map(values.Type().ElementType()), nil - case valuesTy.IsTupleType(): - if !keys.IsWhollyKnown() { - // Since zipmap with a tuple produces an object, we need to know - // all of the key names before we can predict our result type. - return cty.DynamicPseudoType, nil - } - - keysRaw := keys.AsValueSlice() - valueTypesRaw := valuesTy.TupleElementTypes() - if len(keysRaw) != len(valueTypesRaw) { - return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) - } - atys := make(map[string]cty.Type, len(valueTypesRaw)) - for i, keyVal := range keysRaw { - if keyVal.IsNull() { - return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) - } - key := keyVal.AsString() - atys[key] = valueTypesRaw[i] - } - return cty.Object(atys), nil - - default: - return cty.NilType, errors.New("values argument must be a list or tuple value") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - keys := args[0] - values := args[1] - - if !keys.IsWhollyKnown() { - // Unknown map keys and object attributes are not supported, so - // our entire result must be unknown in this case. - return cty.UnknownVal(retType), nil - } - - // both keys and values are guaranteed to be shallowly-known here, - // because our declared params above don't allow unknown or null values. - if keys.LengthInt() != values.LengthInt() { - return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) - } - - output := make(map[string]cty.Value) - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, v := it.Element() - val := values.Index(cty.NumberIntVal(int64(i))) - output[v.AsString()] = val - i++ - } - - switch { - case retType.IsMapType(): - if len(output) == 0 { - return cty.MapValEmpty(retType.ElementType()), nil - } - return cty.MapVal(output), nil - case retType.IsObjectType(): - return cty.ObjectVal(output), nil - default: - // Should never happen because the type-check function should've - // caught any other case. - return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) - } - }, -}) - -// helper function to add an element to a list, if it does not already exist -func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { - for _, ele := range slice { - eq, err := stdlib.Equal(ele, element) - if err != nil { - return slice, err - } - if eq.True() { - return slice, nil - } - } - return append(slice, element), nil -} - -// Element returns a single element from a given list at the given index. If -// index is greater than the length of the list then it is wrapped modulo -// the list length. -func Element(list, index cty.Value) (cty.Value, error) { - return ElementFunc.Call([]cty.Value{list, index}) -} - -// Length returns the number of elements in the given collection or number of -// Unicode characters in the given string. -func Length(collection cty.Value) (cty.Value, error) { - return LengthFunc.Call([]cty.Value{collection}) -} - -// Coalesce takes any number of arguments and returns the first one that isn't empty. -func Coalesce(args ...cty.Value) (cty.Value, error) { - return CoalesceFunc.Call(args) -} - -// CoalesceList takes any number of list arguments and returns the first one that isn't empty. -func CoalesceList(args ...cty.Value) (cty.Value, error) { - return CoalesceListFunc.Call(args) -} - -// Compact takes a list of strings and returns a new list -// with any empty string elements removed. -func Compact(list cty.Value) (cty.Value, error) { - return CompactFunc.Call([]cty.Value{list}) -} - -// Contains determines whether a given list contains a given single value -// as one of its elements. -func Contains(list, value cty.Value) (cty.Value, error) { - return ContainsFunc.Call([]cty.Value{list, value}) -} - -// Index finds the element index for a given value in a list. -func Index(list, value cty.Value) (cty.Value, error) { - return IndexFunc.Call([]cty.Value{list, value}) -} - -// Distinct takes a list and returns a new list with any duplicate elements removed. -func Distinct(list cty.Value) (cty.Value, error) { - return DistinctFunc.Call([]cty.Value{list}) -} - -// Chunklist splits a single list into fixed-size chunks, returning a list of lists. -func Chunklist(list, size cty.Value) (cty.Value, error) { - return ChunklistFunc.Call([]cty.Value{list, size}) -} - -// Flatten takes a list and replaces any elements that are lists with a flattened -// sequence of the list contents. -func Flatten(list cty.Value) (cty.Value, error) { - return FlattenFunc.Call([]cty.Value{list}) -} - -// Keys takes a map and returns a sorted list of the map keys. -func Keys(inputMap cty.Value) (cty.Value, error) { - return KeysFunc.Call([]cty.Value{inputMap}) -} - -// List takes any number of list arguments and returns a list containing those -// values in the same order. -func List(args ...cty.Value) (cty.Value, error) { - return ListFunc.Call(args) -} - -// Lookup performs a dynamic lookup into a map. -// There are two required arguments, map and key, plus an optional default, -// which is a value to return if no key is found in map. -func Lookup(args ...cty.Value) (cty.Value, error) { - return LookupFunc.Call(args) -} - -// Map takes an even number of arguments and returns a map whose elements are constructed -// from consecutive pairs of arguments. -func Map(args ...cty.Value) (cty.Value, error) { - return MapFunc.Call(args) -} - -// Matchkeys constructs a new list by taking a subset of elements from one list -// whose indexes match the corresponding indexes of values in another list. -func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { - return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) -} - -// Merge takes an arbitrary number of maps and returns a single map that contains -// a merged set of elements from all of the maps. -// -// If more than one given map defines the same key then the one that is later in -// the argument sequence takes precedence. -func Merge(maps ...cty.Value) (cty.Value, error) { - return MergeFunc.Call(maps) -} - -// Reverse takes a sequence and produces a new sequence of the same length -// with all of the same elements as the given sequence but in reverse order. -func Reverse(list cty.Value) (cty.Value, error) { - return ReverseFunc.Call([]cty.Value{list}) -} - -// SetProduct computes the cartesian product of sets or sequences. -func SetProduct(sets ...cty.Value) (cty.Value, error) { - return SetProductFunc.Call(sets) -} - -// Slice extracts some consecutive elements from within a list. -func Slice(list, start, end cty.Value) (cty.Value, error) { - return SliceFunc.Call([]cty.Value{list, start, end}) -} - -// Transpose takes a map of lists of strings and swaps the keys and values to -// produce a new map of lists of strings. -func Transpose(values cty.Value) (cty.Value, error) { - return TransposeFunc.Call([]cty.Value{values}) -} - -// Values returns a list of the map values, in the order of the sorted keys. -// This function only works on flat maps. -func Values(values cty.Value) (cty.Value, error) { - return ValuesFunc.Call([]cty.Value{values}) -} - -// Zipmap constructs a map from a list of keys and a corresponding list of values. -func Zipmap(keys, values cty.Value) (cty.Value, error) { - return ZipmapFunc.Call([]cty.Value{keys, values}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go deleted file mode 100644 index 83f8597972..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -package funcs - -import ( - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeToFunc constructs a "to..." function, like "tostring", which converts -// its argument to a specific type or type kind. -// -// The given type wantTy can be any type constraint that cty's "convert" package -// would accept. In particular, this means that you can pass -// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which -// will then cause cty to attempt to unify all of the element types when given -// a tuple. -func MakeToFunc(wantTy cty.Type) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "v", - // We use DynamicPseudoType rather than wantTy here so that - // all values will pass through the function API verbatim and - // we can handle the conversion logic within the Type and - // Impl functions. This allows us to customize the error - // messages to be more appropriate for an explicit type - // conversion, whereas the cty function system produces - // messages aimed at _implicit_ type conversions. - Type: cty.DynamicPseudoType, - AllowNull: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - gotTy := args[0].Type() - if gotTy.Equals(wantTy) { - return wantTy, nil - } - conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) - if conv == nil { - // We'll use some specialized errors for some trickier cases, - // but most we can handle in a simple way. - switch { - case gotTy.IsTupleType() && wantTy.IsTupleType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - case gotTy.IsObjectType() && wantTy.IsObjectType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - default: - return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - // If a conversion is available then everything is fine. - return wantTy, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - // We didn't set "AllowUnknown" on our argument, so it is guaranteed - // to be known here but may still be null. - ret, err := convert.Convert(args[0], retType) - if err != nil { - // Because we used GetConversionUnsafe above, conversion can - // still potentially fail in here. For example, if the user - // asks to convert the string "a" to bool then we'll - // optimistically permit it during type checking but fail here - // once we note that the value isn't either "true" or "false". - gotTy := args[0].Type() - switch { - case gotTy == cty.String && wantTy == cty.Bool: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) - case gotTy == cty.String && wantTy == cty.Number: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) - default: - return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - return ret, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go deleted file mode 100644 index 28074fb13e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go +++ /dev/null @@ -1,325 +0,0 @@ -package funcs - -import ( - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "fmt" - "hash" - - uuidv5 "github.com/google/uuid" - uuid "github.com/hashicorp/go-uuid" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" - "golang.org/x/crypto/bcrypt" -) - -var UUIDFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - result, err := uuid.GenerateUUID() - if err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.StringVal(result), nil - }, -}) - -var UUIDV5Func = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "namespace", - Type: cty.String, - }, - { - Name: "name", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var namespace uuidv5.UUID - switch { - case args[0].AsString() == "dns": - namespace = uuidv5.NameSpaceDNS - case args[0].AsString() == "url": - namespace = uuidv5.NameSpaceURL - case args[0].AsString() == "oid": - namespace = uuidv5.NameSpaceOID - case args[0].AsString() == "x500": - namespace = uuidv5.NameSpaceX500 - default: - if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) - } - } - val := args[1].AsString() - return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil - }, -}) - -// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) -} - -// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) -} - -// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. -var BcryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "cost", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - defaultCost := 10 - - if len(args) > 1 { - var val int - if err := gocty.FromCtyValue(args[1], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - defaultCost = val - } - - if len(args) > 2 { - return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") - } - - input := args[0].AsString() - out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. -var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) - -// MakeFileMd5Func constructs a function that is like Md5Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileMd5Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) -} - -// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. -var RsaDecryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "ciphertext", - Type: cty.String, - }, - { - Name: "privatekey", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - key := args[1].AsString() - - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) - } - - block, _ := pem.Decode([]byte(key)) - if block == nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") - } - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - return cty.UnknownVal(cty.String), fmt.Errorf( - "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", - ) - } - - x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Sha1Func contructs a function that computes the SHA1 hash of a given string -// and encodes it with hexadecimal digits. -var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) - -// MakeFileSha1Func constructs a function that is like Sha1Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha1Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) -} - -// Sha256Func contructs a function that computes the SHA256 hash of a given string -// and encodes it with hexadecimal digits. -var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) - -// MakeFileSha256Func constructs a function that is like Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) -} - -// Sha512Func contructs a function that computes the SHA512 hash of a given string -// and encodes it with hexadecimal digits. -var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) - -// MakeFileSha512Func constructs a function that is like Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) -} - -func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - h := hf() - h.Write([]byte(s)) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - h := hf() - h.Write(src) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -// UUID generates and returns a Type-4 UUID in the standard hexadecimal string -// format. -// -// This is not a pure function: it will generate a different result for each -// call. It must therefore be registered as an impure function in the function -// table in the "lang" package. -func UUID() (cty.Value, error) { - return UUIDFunc.Call(nil) -} - -// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string -// format. -func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { - return UUIDV5Func.Call([]cty.Value{namespace, name}) -} - -// Base64Sha256 computes the SHA256 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -func Base64Sha256(str cty.Value) (cty.Value, error) { - return Base64Sha256Func.Call([]cty.Value{str}) -} - -// Base64Sha512 computes the SHA512 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 -func Base64Sha512(str cty.Value) (cty.Value, error) { - return Base64Sha512Func.Call([]cty.Value{str}) -} - -// Bcrypt computes a hash of the given string using the Blowfish cipher, -// returning a string in the Modular Crypt Format -// usually expected in the shadow password file on many Unix systems. -func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(cost)+1) - args[0] = str - copy(args[1:], cost) - return BcryptFunc.Call(args) -} - -// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. -func Md5(str cty.Value) (cty.Value, error) { - return Md5Func.Call([]cty.Value{str}) -} - -// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding -// cleartext. -func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { - return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) -} - -// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. -func Sha1(str cty.Value) (cty.Value, error) { - return Sha1Func.Call([]cty.Value{str}) -} - -// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. -func Sha256(str cty.Value) (cty.Value, error) { - return Sha256Func.Call([]cty.Value{str}) -} - -// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. -func Sha512(str cty.Value) (cty.Value, error) { - return Sha512Func.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go deleted file mode 100644 index 5dae198774..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go +++ /dev/null @@ -1,70 +0,0 @@ -package funcs - -import ( - "time" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// TimestampFunc constructs a function that returns a string representation of the current date and time. -var TimestampFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil - }, -}) - -// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. -var TimeAddFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "timestamp", - Type: cty.String, - }, - { - Name: "duration", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - ts, err := time.Parse(time.RFC3339, args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - duration, err := time.ParseDuration(args[1].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil - }, -}) - -// Timestamp returns a string representation of the current date and time. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax, and so timestamp -// returns a string in this format. -func Timestamp() (cty.Value, error) { - return TimestampFunc.Call([]cty.Value{}) -} - -// TimeAdd adds a duration to a timestamp, returning a new timestamp. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires -// the timestamp argument to be a string conforming to this syntax. -// -// `duration` is a string representation of a time difference, consisting of -// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted -// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first -// number may be negative to indicate a negative duration, like `"-2h5m"`. -// -// The result is a string, also in RFC 3339 format, representing the result -// of adding the given direction to the given timestamp. -func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { - return TimeAddFunc.Call([]cty.Value{timestamp, duration}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go deleted file mode 100644 index af93f08dc1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go +++ /dev/null @@ -1,140 +0,0 @@ -package funcs - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "fmt" - "log" - "net/url" - "unicode/utf8" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. -var Base64DecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - sDec, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) - } - if !utf8.Valid([]byte(sDec)) { - log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) - return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") - } - return cty.StringVal(string(sDec)), nil - }, -}) - -// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. -var Base64EncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil - }, -}) - -// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in -// Base64 encoding. -var Base64GzipFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - - var b bytes.Buffer - gz := gzip.NewWriter(&b) - if _, err := gz.Write([]byte(s)); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) - } - if err := gz.Flush(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) - } - if err := gz.Close(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) - } - return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil - }, -}) - -// URLEncodeFunc constructs a function that applies URL encoding to a given string. -var URLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(url.QueryEscape(args[0].AsString())), nil - }, -}) - -// Base64Decode decodes a string containing a base64 sequence. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will also interpret the resulting bytes as -// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function -// produces an error. -func Base64Decode(str cty.Value) (cty.Value, error) { - return Base64DecodeFunc.Call([]cty.Value{str}) -} - -// Base64Encode applies Base64 encoding to a string. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, and then apply Base64 encoding to the result. -func Base64Encode(str cty.Value) (cty.Value, error) { - return Base64EncodeFunc.Call([]cty.Value{str}) -} - -// Base64Gzip compresses a string with gzip and then encodes the result in -// Base64 encoding. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. -func Base64Gzip(str cty.Value) (cty.Value, error) { - return Base64GzipFunc.Call([]cty.Value{str}) -} - -// URLEncode applies URL encoding to a given string. -// -// This function identifies characters in the given string that would have a -// special meaning when included as a query string argument in a URL and -// escapes them using RFC 3986 "percent encoding". -// -// If the given string contains non-ASCII characters, these are first encoded as -// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. -func URLEncode(str cty.Value) (cty.Value, error) { - return URLEncodeFunc.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go deleted file mode 100644 index 786d3e74bc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go +++ /dev/null @@ -1,360 +0,0 @@ -package funcs - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "unicode/utf8" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - homedir "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeFileFunc constructs a function that takes a file path and returns the -// contents of that file, either directly as a string (where valid UTF-8 is -// required) or as a string containing base64 bytes. -func MakeFileFunc(baseDir string, encBase64 bool) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - switch { - case encBase64: - enc := base64.StdEncoding.EncodeToString(src) - return cty.StringVal(enc), nil - default: - if !utf8.Valid(src) { - return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) - } - return cty.StringVal(string(src)), nil - } - }, - }) -} - -// MakeTemplateFileFunc constructs a function that takes a file path and -// an arbitrary object of named values and attempts to render the referenced -// file as a template using HCL template syntax. -// -// The template itself may recursively call other functions so a callback -// must be provided to get access to those functions. The template cannot, -// however, access any variables defined in the scope: it is restricted only to -// those variables provided in the second function argument, to ensure that all -// dependencies on other graph nodes can be seen before executing this function. -// -// As a special exception, a referenced template file may not recursively call -// the templatefile function, since that would risk the same file being -// included into itself indefinitely. -func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { - - params := []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - { - Name: "vars", - Type: cty.DynamicPseudoType, - }, - } - - loadTmpl := func(fn string) (hcl.Expression, error) { - // We re-use File here to ensure the same filename interpretation - // as it does, along with its other safety checks. - tmplVal, err := File(baseDir, cty.StringVal(fn)) - if err != nil { - return nil, err - } - - expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return nil, diags - } - - return expr, nil - } - - renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { - if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { - return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time - } - - ctx := &hcl.EvalContext{ - Variables: varsVal.AsValueMap(), - } - - // We'll pre-check references in the template here so we can give a - // more specialized error message than HCL would by default, so it's - // clearer that this problem is coming from a templatefile call. - for _, traversal := range expr.Variables() { - root := traversal.RootName() - if _, ok := ctx.Variables[root]; !ok { - return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) - } - } - - givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems - funcs := make(map[string]function.Function, len(givenFuncs)) - for name, fn := range givenFuncs { - if name == "templatefile" { - // We stub this one out to prevent recursive calls. - funcs[name] = function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") - }, - }) - continue - } - funcs[name] = fn - } - ctx.Functions = funcs - - val, diags := expr.Value(ctx) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - return val, nil - } - - return function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - if !(args[0].IsKnown() && args[1].IsKnown()) { - return cty.DynamicPseudoType, nil - } - - // We'll render our template now to see what result type it produces. - // A template consisting only of a single interpolation an potentially - // return any type. - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicPseudoType, err - } - - // This is safe even if args[1] contains unknowns because the HCL - // template renderer itself knows how to short-circuit those. - val, err := renderTmpl(expr, args[1]) - return val.Type(), err - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicVal, err - } - return renderTmpl(expr, args[1]) - }, - }) - -} - -// MakeFileExistsFunc constructs a function that takes a path -// and determines whether a file exists at that path -func MakeFileExistsFunc(baseDir string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - path, err := homedir.Expand(path) - if err != nil { - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return cty.False, nil - } - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) - } - - if fi.Mode().IsRegular() { - return cty.True, nil - } - - return cty.False, fmt.Errorf("%s is not a regular file, but %q", - path, fi.Mode().String()) - }, - }) -} - -// BasenameFunc constructs a function that takes a string containing a filesystem path -// and removes all except the last portion from it. -var BasenameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Base(args[0].AsString())), nil - }, -}) - -// DirnameFunc constructs a function that takes a string containing a filesystem path -// and removes the last portion from it. -var DirnameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Dir(args[0].AsString())), nil - }, -}) - -// AbsPathFunc constructs a function that converts a filesystem path to an absolute path -var AbsPathFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - absPath, err := filepath.Abs(args[0].AsString()) - return cty.StringVal(filepath.ToSlash(absPath)), err - }, -}) - -// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. -var PathExpandFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - - homePath, err := homedir.Expand(args[0].AsString()) - return cty.StringVal(homePath), err - }, -}) - -func readFileBytes(baseDir, path string) ([]byte, error) { - path, err := homedir.Expand(path) - if err != nil { - return nil, fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - src, err := ioutil.ReadFile(path) - if err != nil { - // ReadFile does not return Terraform-user-friendly error - // messages, so we'll provide our own. - if os.IsNotExist(err) { - return nil, fmt.Errorf("no file exists at %s", path) - } - return nil, fmt.Errorf("failed to read %s", path) - } - - return src, nil -} - -// File reads the contents of the file at the given path. -// -// The file must contain valid UTF-8 bytes, or this function will return an error. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func File(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, false) - return fn.Call([]cty.Value{path}) -} - -// FileExists determines whether a file exists at the given path. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileExists(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileExistsFunc(baseDir) - return fn.Call([]cty.Value{path}) -} - -// FileBase64 reads the contents of the file at the given path. -// -// The bytes from the file are encoded as base64 before returning. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, true) - return fn.Call([]cty.Value{path}) -} - -// Basename takes a string containing a filesystem path and removes all except the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Basename(path cty.Value) (cty.Value, error) { - return BasenameFunc.Call([]cty.Value{path}) -} - -// Dirname takes a string containing a filesystem path and removes the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Dirname(path cty.Value) (cty.Value, error) { - return DirnameFunc.Call([]cty.Value{path}) -} - -// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with -// the current user's home directory path. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the leading segment in the path is not `~` then the given path is returned unmodified. -func Pathexpand(path cty.Value) (cty.Value, error) { - return PathExpandFunc.Call([]cty.Value{path}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go deleted file mode 100644 index c813f47bf6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go +++ /dev/null @@ -1,217 +0,0 @@ -package funcs - -import ( - "math" - "math/big" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CeilFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var CeilFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.NumberIntVal(int64(math.Ceil(val))), nil - }, -}) - -// FloorFunc contructs a function that returns the closest whole number lesser -// than or equal to the given value. -var FloorFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.NumberIntVal(int64(math.Floor(val))), nil - }, -}) - -// LogFunc contructs a function that returns the logarithm of a given number in a given base. -var LogFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var base float64 - if err := gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil - }, -}) - -// PowFunc contructs a function that returns the logarithm of a given number in a given base. -var PowFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "power", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var power float64 - if err := gocty.FromCtyValue(args[1], &power); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Pow(num, power)), nil - }, -}) - -// SignumFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var SignumFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num int - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - switch { - case num < 0: - return cty.NumberIntVal(-1), nil - case num > 0: - return cty.NumberIntVal(+1), nil - default: - return cty.NumberIntVal(0), nil - } - }, -}) - -// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. -var ParseIntFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "number", - Type: cty.DynamicPseudoType, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].Type().Equals(cty.String) { - return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) - } - return cty.Number, nil - }, - - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var numstr string - var base int - var err error - - if err = gocty.FromCtyValue(args[0], &numstr); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(0, err) - } - - if err = gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.Number), function.NewArgError(1, err) - } - - if base < 2 || base > 62 { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 1, - "base must be a whole number between 2 and 62 inclusive", - ) - } - - num, ok := (&big.Int{}).SetString(numstr, base) - if !ok { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 0, - "cannot parse %q as a base %d integer", - numstr, - base, - ) - } - - parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) - - return parsedNum, nil - }, -}) - -// Ceil returns the closest whole number greater than or equal to the given value. -func Ceil(num cty.Value) (cty.Value, error) { - return CeilFunc.Call([]cty.Value{num}) -} - -// Floor returns the closest whole number lesser than or equal to the given value. -func Floor(num cty.Value) (cty.Value, error) { - return FloorFunc.Call([]cty.Value{num}) -} - -// Log returns returns the logarithm of a given number in a given base. -func Log(num, base cty.Value) (cty.Value, error) { - return LogFunc.Call([]cty.Value{num, base}) -} - -// Pow returns the logarithm of a given number in a given base. -func Pow(num, power cty.Value) (cty.Value, error) { - return PowFunc.Call([]cty.Value{num, power}) -} - -// Signum determines the sign of a number, returning a number between -1 and -// 1 to represent the sign. -func Signum(num cty.Value) (cty.Value, error) { - return SignumFunc.Call([]cty.Value{num}) -} - -// ParseInt parses a string argument and returns an integer of the specified base. -func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { - return ParseIntFunc.Call([]cty.Value{num, base}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go deleted file mode 100644 index c9ddf19e36..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go +++ /dev/null @@ -1,280 +0,0 @@ -package funcs - -import ( - "fmt" - "regexp" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -var JoinFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "separator", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "lists", - Type: cty.List(cty.String), - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - sep := args[0].AsString() - listVals := args[1:] - if len(listVals) < 1 { - return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") - } - - l := 0 - for _, list := range listVals { - if !list.IsWhollyKnown() { - return cty.UnknownVal(cty.String), nil - } - l += list.LengthInt() - } - - items := make([]string, 0, l) - for ai, list := range listVals { - ei := 0 - for it := list.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.IsNull() { - if len(listVals) > 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) - } - items = append(items, val.AsString()) - ei++ - } - } - - return cty.StringVal(strings.Join(items, sep)), nil - }, -}) - -var SortFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.String), - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - listVal := args[0] - - if !listVal.IsWhollyKnown() { - // If some of the element values aren't known yet then we - // can't yet preduct the order of the result. - return cty.UnknownVal(retType), nil - } - if listVal.LengthInt() == 0 { // Easy path - return listVal, nil - } - - list := make([]string, 0, listVal.LengthInt()) - for it := listVal.ElementIterator(); it.Next(); { - iv, v := it.Element() - if v.IsNull() { - return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) - } - list = append(list, v.AsString()) - } - - sort.Strings(list) - retVals := make([]cty.Value, len(list)) - for i, s := range list { - retVals[i] = cty.StringVal(s) - } - return cty.ListVal(retVals), nil - }, -}) - -var SplitFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "separator", - Type: cty.String, - }, - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - sep := args[0].AsString() - str := args[1].AsString() - elems := strings.Split(str, sep) - elemVals := make([]cty.Value, len(elems)) - for i, s := range elems { - elemVals[i] = cty.StringVal(s) - } - if len(elemVals) == 0 { - return cty.ListValEmpty(cty.String), nil - } - return cty.ListVal(elemVals), nil - }, -}) - -// ChompFunc constructions a function that removes newline characters at the end of a string. -var ChompFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) - return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil - }, -}) - -// IndentFunc constructions a function that adds a given number of spaces to the -// beginnings of all but the first line in a given multi-line string. -var IndentFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "spaces", - Type: cty.Number, - }, - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var spaces int - if err := gocty.FromCtyValue(args[0], &spaces); err != nil { - return cty.UnknownVal(cty.String), err - } - data := args[1].AsString() - pad := strings.Repeat(" ", spaces) - return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil - }, -}) - -// ReplaceFunc constructions a function that searches a given string for another -// given substring, and replaces each occurence with a given replacement string. -var ReplaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "substr", - Type: cty.String, - }, - { - Name: "replace", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - str := args[0].AsString() - substr := args[1].AsString() - replace := args[2].AsString() - - // We search/replace using a regexp if the string is surrounded - // in forward slashes. - if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { - re, err := regexp.Compile(substr[1 : len(substr)-1]) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(re.ReplaceAllString(str, replace)), nil - } - - return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil - }, -}) - -// TitleFunc constructions a function that converts the first letter of each word -// in the given string to uppercase. -var TitleFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - return cty.StringVal(strings.Title(args[0].AsString())), nil - }, -}) - -// TrimSpaceFunc constructions a function that removes any space characters from -// the start and end of the given string. -var TrimSpaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil - }, -}) - -// Join concatenates together the string elements of one or more lists with a -// given separator. -func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(lists)+1) - args[0] = sep - copy(args[1:], lists) - return JoinFunc.Call(args) -} - -// Sort re-orders the elements of a given list of strings so that they are -// in ascending lexicographical order. -func Sort(list cty.Value) (cty.Value, error) { - return SortFunc.Call([]cty.Value{list}) -} - -// Split divides a given string by a given separator, returning a list of -// strings containing the characters between the separator sequences. -func Split(sep, str cty.Value) (cty.Value, error) { - return SplitFunc.Call([]cty.Value{sep, str}) -} - -// Chomp removes newline characters at the end of a string. -func Chomp(str cty.Value) (cty.Value, error) { - return ChompFunc.Call([]cty.Value{str}) -} - -// Indent adds a given number of spaces to the beginnings of all but the first -// line in a given multi-line string. -func Indent(spaces, str cty.Value) (cty.Value, error) { - return IndentFunc.Call([]cty.Value{spaces, str}) -} - -// Replace searches a given string for another given substring, -// and replaces all occurences with a given replacement string. -func Replace(str, substr, replace cty.Value) (cty.Value, error) { - return ReplaceFunc.Call([]cty.Value{str, substr, replace}) -} - -// Title converts the first letter of each word in the given string to uppercase. -func Title(str cty.Value) (cty.Value, error) { - return TitleFunc.Call([]cty.Value{str}) -} - -// TrimSpace removes any space characters from the start and end of the given string. -func TrimSpace(str cty.Value) (cty.Value, error) { - return TrimSpaceFunc.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go deleted file mode 100644 index a3c4906646..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go +++ /dev/null @@ -1,146 +0,0 @@ -package lang - -import ( - ctyyaml "github.com/zclconf/go-cty-yaml" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - - "github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs" -) - -var impureFunctions = []string{ - "bcrypt", - "timestamp", - "uuid", -} - -// Functions returns the set of functions that should be used to when evaluating -// expressions in the receiving scope. -func (s *Scope) Functions() map[string]function.Function { - s.funcsLock.Lock() - if s.funcs == nil { - // Some of our functions are just directly the cty stdlib functions. - // Others are implemented in the subdirectory "funcs" here in this - // repository. New functions should generally start out their lives - // in the "funcs" directory and potentially graduate to cty stdlib - // later if the functionality seems to be something domain-agnostic - // that would be useful to all applications using cty functions. - - s.funcs = map[string]function.Function{ - "abs": stdlib.AbsoluteFunc, - "abspath": funcs.AbsPathFunc, - "basename": funcs.BasenameFunc, - "base64decode": funcs.Base64DecodeFunc, - "base64encode": funcs.Base64EncodeFunc, - "base64gzip": funcs.Base64GzipFunc, - "base64sha256": funcs.Base64Sha256Func, - "base64sha512": funcs.Base64Sha512Func, - "bcrypt": funcs.BcryptFunc, - "ceil": funcs.CeilFunc, - "chomp": funcs.ChompFunc, - "cidrhost": funcs.CidrHostFunc, - "cidrnetmask": funcs.CidrNetmaskFunc, - "cidrsubnet": funcs.CidrSubnetFunc, - "cidrsubnets": funcs.CidrSubnetsFunc, - "coalesce": funcs.CoalesceFunc, - "coalescelist": funcs.CoalesceListFunc, - "compact": funcs.CompactFunc, - "concat": stdlib.ConcatFunc, - "contains": funcs.ContainsFunc, - "csvdecode": stdlib.CSVDecodeFunc, - "dirname": funcs.DirnameFunc, - "distinct": funcs.DistinctFunc, - "element": funcs.ElementFunc, - "chunklist": funcs.ChunklistFunc, - "file": funcs.MakeFileFunc(s.BaseDir, false), - "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), - "filebase64": funcs.MakeFileFunc(s.BaseDir, true), - "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), - "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), - "filemd5": funcs.MakeFileMd5Func(s.BaseDir), - "filesha1": funcs.MakeFileSha1Func(s.BaseDir), - "filesha256": funcs.MakeFileSha256Func(s.BaseDir), - "filesha512": funcs.MakeFileSha512Func(s.BaseDir), - "flatten": funcs.FlattenFunc, - "floor": funcs.FloorFunc, - "format": stdlib.FormatFunc, - "formatdate": stdlib.FormatDateFunc, - "formatlist": stdlib.FormatListFunc, - "indent": funcs.IndentFunc, - "index": funcs.IndexFunc, - "join": funcs.JoinFunc, - "jsondecode": stdlib.JSONDecodeFunc, - "jsonencode": stdlib.JSONEncodeFunc, - "keys": funcs.KeysFunc, - "length": funcs.LengthFunc, - "list": funcs.ListFunc, - "log": funcs.LogFunc, - "lookup": funcs.LookupFunc, - "lower": stdlib.LowerFunc, - "map": funcs.MapFunc, - "matchkeys": funcs.MatchkeysFunc, - "max": stdlib.MaxFunc, - "md5": funcs.Md5Func, - "merge": funcs.MergeFunc, - "min": stdlib.MinFunc, - "parseint": funcs.ParseIntFunc, - "pathexpand": funcs.PathExpandFunc, - "pow": funcs.PowFunc, - "range": stdlib.RangeFunc, - "regex": stdlib.RegexFunc, - "regexall": stdlib.RegexAllFunc, - "replace": funcs.ReplaceFunc, - "reverse": funcs.ReverseFunc, - "rsadecrypt": funcs.RsaDecryptFunc, - "setintersection": stdlib.SetIntersectionFunc, - "setproduct": funcs.SetProductFunc, - "setunion": stdlib.SetUnionFunc, - "sha1": funcs.Sha1Func, - "sha256": funcs.Sha256Func, - "sha512": funcs.Sha512Func, - "signum": funcs.SignumFunc, - "slice": funcs.SliceFunc, - "sort": funcs.SortFunc, - "split": funcs.SplitFunc, - "strrev": stdlib.ReverseFunc, - "substr": stdlib.SubstrFunc, - "timestamp": funcs.TimestampFunc, - "timeadd": funcs.TimeAddFunc, - "title": funcs.TitleFunc, - "tostring": funcs.MakeToFunc(cty.String), - "tonumber": funcs.MakeToFunc(cty.Number), - "tobool": funcs.MakeToFunc(cty.Bool), - "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), - "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), - "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), - "transpose": funcs.TransposeFunc, - "trimspace": funcs.TrimSpaceFunc, - "upper": stdlib.UpperFunc, - "urlencode": funcs.URLEncodeFunc, - "uuid": funcs.UUIDFunc, - "uuidv5": funcs.UUIDV5Func, - "values": funcs.ValuesFunc, - "yamldecode": ctyyaml.YAMLDecodeFunc, - "yamlencode": ctyyaml.YAMLEncodeFunc, - "zipmap": funcs.ZipmapFunc, - } - - s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { - // The templatefile function prevents recursive calls to itself - // by copying this map and overwriting the "templatefile" entry. - return s.funcs - }) - - if s.PureOnly { - // Force our few impure functions to return unknown so that we - // can defer evaluating them until a later pass. - for _, name := range impureFunctions { - s.funcs[name] = function.Unpredictable(s.funcs[name]) - } - } - } - s.funcsLock.Unlock() - - return s.funcs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go deleted file mode 100644 index 7923d51135..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go +++ /dev/null @@ -1,81 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// References finds all of the references in the given set of traversals, -// returning diagnostics if any of the traversals cannot be interpreted as a -// reference. -// -// This function does not do any de-duplication of references, since references -// have source location information embedded in them and so any invalid -// references that are duplicated should have errors reported for each -// occurence. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. Otherwise, the returned slice has one reference per -// given traversal, though it is not guaranteed that the references will -// appear in the same order as the given traversals. -func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { - if len(traversals) == 0 { - return nil, nil - } - - var diags tfdiags.Diagnostics - refs := make([]*addrs.Reference, 0, len(traversals)) - - for _, traversal := range traversals { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if ref == nil { - continue - } - refs = append(refs, ref) - } - - return refs, diags -} - -// ReferencesInBlock is a helper wrapper around References that first searches -// the given body for traversals, before converting those traversals to -// references. -// -// A block schema must be provided so that this function can determine where in -// the body variables are expected. -func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { - if body == nil { - return nil, nil - } - - // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or - // dynblock.VariablesHCLDec here because when we evaluate a block we'll - // first apply the dynamic block extension and _then_ the blocktoattr - // transform, and so blocktoattr.ExpandedVariables takes into account - // both of those transforms when it analyzes the body to ensure we find - // all of the references as if they'd already moved into their final - // locations, even though we can't expand dynamic blocks yet until we - // already know which variables are required. - // - // The set of cases we want to detect here is covered by the tests for - // the plan graph builder in the main 'terraform' package, since it's - // in a better position to test this due to having mock providers etc - // available. - traversals := blocktoattr.ExpandedVariables(body, schema) - return References(traversals) -} - -// ReferencesInExpr is a helper wrapper around References that first searches -// the given expression for traversals, before converting those traversals -// to references. -func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { - if expr == nil { - return nil, nil - } - traversals := expr.Variables() - return References(traversals) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go deleted file mode 100644 index a720cca682..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "sync" - - "github.com/zclconf/go-cty/cty/function" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Scope is the main type in this package, allowing dynamic evaluation of -// blocks and expressions based on some contextual information that informs -// which variables and functions will be available. -type Scope struct { - // Data is used to resolve references in expressions. - Data Data - - // SelfAddr is the address that the "self" object should be an alias of, - // or nil if the "self" object should not be available at all. - SelfAddr addrs.Referenceable - - // BaseDir is the base directory used by any interpolation functions that - // accept filesystem paths as arguments. - BaseDir string - - // PureOnly can be set to true to request that any non-pure functions - // produce unknown value results rather than actually executing. This is - // important during a plan phase to avoid generating results that could - // then differ during apply. - PureOnly bool - - funcs map[string]function.Function - funcsLock sync.Mutex -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go deleted file mode 100644 index 0d7d664fc1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package modsdir is an internal package containing the model types used to -// represent the manifest of modules in a local modules cache directory. -package modsdir diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go deleted file mode 100644 index 2d45c8520e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go +++ /dev/null @@ -1,138 +0,0 @@ -package modsdir - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Record represents some metadata about an installed module, as part -// of a ModuleManifest. -type Record struct { - // Key is a unique identifier for this particular module, based on its - // position within the static module tree. - Key string `json:"Key"` - - // SourceAddr is the source address given for this module in configuration. - // This is used only to detect if the source was changed in configuration - // since the module was last installed, which means that the installer - // must re-install it. - SourceAddr string `json:"Source"` - - // Version is the exact version of the module, which results from parsing - // VersionStr. nil for un-versioned modules. - Version *version.Version `json:"-"` - - // VersionStr is the version specifier string. This is used only for - // serialization in snapshots and should not be accessed or updated - // by any other codepaths; use "Version" instead. - VersionStr string `json:"Version,omitempty"` - - // Dir is the path to the local directory where the module is installed. - Dir string `json:"Dir"` -} - -// Manifest is a map used to keep track of the filesystem locations -// and other metadata about installed modules. -// -// The configuration loader refers to this, while the module installer updates -// it to reflect any changes to the installed modules. -type Manifest map[string]Record - -func (m Manifest) ModuleKey(path addrs.Module) string { - return path.String() -} - -// manifestSnapshotFile is an internal struct used only to assist in our JSON -// serialization of manifest snapshots. It should not be used for any other -// purpose. -type manifestSnapshotFile struct { - Records []Record `json:"Modules"` -} - -func ReadManifestSnapshot(r io.Reader) (Manifest, error) { - src, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - if len(src) == 0 { - // This should never happen, but we'll tolerate it as if it were - // a valid empty JSON object. - return make(Manifest), nil - } - - var read manifestSnapshotFile - err = json.Unmarshal(src, &read) - - new := make(Manifest) - for _, record := range read.Records { - if record.VersionStr != "" { - record.Version, err = version.NewVersion(record.VersionStr) - if err != nil { - return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) - } - } - if _, exists := new[record.Key]; exists { - // This should never happen in any valid file, so we'll catch it - // and report it to avoid confusing/undefined behavior if the - // snapshot file was edited incorrectly outside of Terraform. - return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) - } - new[record.Key] = record - } - return new, nil -} - -func ReadManifestSnapshotForDir(dir string) (Manifest, error) { - fn := filepath.Join(dir, ManifestSnapshotFilename) - r, err := os.Open(fn) - if err != nil { - if os.IsNotExist(err) { - return make(Manifest), nil // missing file is okay and treated as empty - } - return nil, err - } - return ReadManifestSnapshot(r) -} - -func (m Manifest) WriteSnapshot(w io.Writer) error { - var write manifestSnapshotFile - - for _, record := range m { - // Make sure VersionStr is in sync with Version, since we encourage - // callers to manipulate Version and ignore VersionStr. - if record.Version != nil { - record.VersionStr = record.Version.String() - } else { - record.VersionStr = "" - } - write.Records = append(write.Records, record) - } - - src, err := json.Marshal(write) - if err != nil { - return err - } - - _, err = w.Write(src) - return err -} - -func (m Manifest) WriteSnapshotToDir(dir string) error { - fn := filepath.Join(dir, ManifestSnapshotFilename) - log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) - w, err := os.Create(fn) - if err != nil { - return err - } - return m.WriteSnapshot(w) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go deleted file mode 100644 index 9ebb52431b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go +++ /dev/null @@ -1,3 +0,0 @@ -package modsdir - -const ManifestSnapshotFilename = "modules.json" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go deleted file mode 100644 index c805887184..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go +++ /dev/null @@ -1,43 +0,0 @@ -package moduledeps - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// Providers describes a set of provider dependencies for a given module. -// -// Each named provider instance can have one version constraint. -type Providers map[ProviderInstance]ProviderDependency - -// ProviderDependency describes the dependency for a particular provider -// instance, including both the set of allowed versions and the reason for -// the dependency. -type ProviderDependency struct { - Constraints discovery.Constraints - Reason ProviderDependencyReason -} - -// ProviderDependencyReason is an enumeration of reasons why a dependency might be -// present. -type ProviderDependencyReason int - -const ( - // ProviderDependencyExplicit means that there is an explicit "provider" - // block in the configuration for this module. - ProviderDependencyExplicit ProviderDependencyReason = iota - - // ProviderDependencyImplicit means that there is no explicit "provider" - // block but there is at least one resource that uses this provider. - ProviderDependencyImplicit - - // ProviderDependencyInherited is a special case of - // ProviderDependencyImplicit where a parent module has defined a - // configuration for the provider that has been inherited by at least one - // resource in this module. - ProviderDependencyInherited - - // ProviderDependencyFromState means that this provider is not currently - // referenced by configuration at all, but some existing instances in - // the state still depend on it. - ProviderDependencyFromState -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go deleted file mode 100644 index 7eff083157..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package moduledeps contains types that can be used to describe the -// providers required for all of the modules in a module tree. -// -// It does not itself contain the functionality for populating such -// data structures; that's in Terraform core, since this package intentionally -// does not depend on terraform core to avoid package dependency cycles. -package moduledeps diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go deleted file mode 100644 index 5189acfc1c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go +++ /dev/null @@ -1,134 +0,0 @@ -package moduledeps - -import ( - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// Module represents the dependencies of a single module, as well being -// a node in a tree of such structures representing the dependencies of -// an entire configuration. -type Module struct { - Name string - Providers Providers - Children []*Module -} - -// WalkFunc is a callback type for use with Module.WalkTree -type WalkFunc func(path []string, parent *Module, current *Module) error - -// WalkTree calls the given callback once for the receiver and then -// once for each descendent, in an order such that parents are called -// before their children and siblings are called in the order they -// appear in the Children slice. -// -// When calling the callback, parent will be nil for the first call -// for the receiving module, and then set to the direct parent of -// each module for the subsequent calls. -// -// The path given to the callback is valid only until the callback -// returns, after which it will be mutated and reused. Callbacks must -// therefore copy the path slice if they wish to retain it. -// -// If the given callback returns an error, the walk will be aborted at -// that point and that error returned to the caller. -// -// This function is not thread-safe for concurrent modifications of the -// data structure, so it's the caller's responsibility to arrange for that -// should it be needed. -// -// It is safe for a callback to modify the descendents of the "current" -// module, including the ordering of the Children slice itself, but the -// callback MUST NOT modify the parent module. -func (m *Module) WalkTree(cb WalkFunc) error { - return walkModuleTree(make([]string, 0, 1), nil, m, cb) -} - -func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { - path = append(path, current.Name) - err := cb(path, parent, current) - if err != nil { - return err - } - - for _, child := range current.Children { - err := walkModuleTree(path, current, child, cb) - if err != nil { - return err - } - } - return nil -} - -// SortChildren sorts the Children slice into lexicographic order by -// name, in-place. -// -// This is primarily useful prior to calling WalkTree so that the walk -// will proceed in a consistent order. -func (m *Module) SortChildren() { - sort.Sort(sortModules{m.Children}) -} - -type sortModules struct { - modules []*Module -} - -func (s sortModules) Len() int { - return len(s.modules) -} - -func (s sortModules) Less(i, j int) bool { - cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) - return cmp < 0 -} - -func (s sortModules) Swap(i, j int) { - s.modules[i], s.modules[j] = s.modules[j], s.modules[i] -} - -// PluginRequirements produces a PluginRequirements structure that can -// be used with discovery.PluginMetaSet.ConstrainVersions to identify -// suitable plugins to satisfy the module's provider dependencies. -// -// This method only considers the direct requirements of the receiver. -// Use AllPluginRequirements to flatten the dependencies for the -// entire tree of modules. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) PluginRequirements() discovery.PluginRequirements { - ret := make(discovery.PluginRequirements) - for inst, dep := range m.Providers { - // m.Providers is keyed on provider names, such as "aws.foo". - // a PluginRequirements wants keys to be provider *types*, such - // as "aws". If there are multiple aliases for the same - // provider then we will flatten them into a single requirement - // by combining their constraint sets. - pty := inst.Type() - if existing, exists := ret[pty]; exists { - ret[pty].Versions = existing.Versions.Append(dep.Constraints) - } else { - ret[pty] = &discovery.PluginConstraints{ - Versions: dep.Constraints, - } - } - } - return ret -} - -// AllPluginRequirements calls PluginRequirements for the receiver and all -// of its descendents, and merges the result into a single PluginRequirements -// structure that would satisfy all of the modules together. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) AllPluginRequirements() discovery.PluginRequirements { - var ret discovery.PluginRequirements - m.WalkTree(func(path []string, parent *Module, current *Module) error { - ret = ret.Merge(current.PluginRequirements()) - return nil - }) - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go deleted file mode 100644 index 89ceefb2cf..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go +++ /dev/null @@ -1,30 +0,0 @@ -package moduledeps - -import ( - "strings" -) - -// ProviderInstance describes a particular provider instance by its full name, -// like "null" or "aws.foo". -type ProviderInstance string - -// Type returns the provider type of this instance. For example, for an instance -// named "aws.foo" the type is "aws". -func (p ProviderInstance) Type() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - t = t[:dotPos] - } - return t -} - -// Alias returns the alias of this provider, if any. An instance named "aws.foo" -// has the alias "foo", while an instance named just "docker" has no alias, -// so the empty string would be returned. -func (p ProviderInstance) Alias() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - return t[dotPos+1:] - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go deleted file mode 100644 index c653b106b3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go +++ /dev/null @@ -1,22 +0,0 @@ -package plans - -type Action rune - -const ( - NoOp Action = 0 - Create Action = '+' - Read Action = '←' - Update Action = '~' - DeleteThenCreate Action = '∓' - CreateThenDelete Action = '±' - Delete Action = '-' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type Action - -// IsReplace returns true if the action is one of the two actions that -// represents replacing an existing object with a new object: -// DeleteThenCreate or CreateThenDelete. -func (a Action) IsReplace() bool { - return a == DeleteThenCreate || a == CreateThenDelete -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go deleted file mode 100644 index be43ab1757..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by "stringer -type Action"; DO NOT EDIT. - -package plans - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoOp-0] - _ = x[Create-43] - _ = x[Read-8592] - _ = x[Update-126] - _ = x[DeleteThenCreate-8723] - _ = x[CreateThenDelete-177] - _ = x[Delete-45] -} - -const ( - _Action_name_0 = "NoOp" - _Action_name_1 = "Create" - _Action_name_2 = "Delete" - _Action_name_3 = "Update" - _Action_name_4 = "CreateThenDelete" - _Action_name_5 = "Read" - _Action_name_6 = "DeleteThenCreate" -) - -func (i Action) String() string { - switch { - case i == 0: - return _Action_name_0 - case i == 43: - return _Action_name_1 - case i == 45: - return _Action_name_2 - case i == 126: - return _Action_name_3 - case i == 177: - return _Action_name_4 - case i == 8592: - return _Action_name_5 - case i == 8723: - return _Action_name_6 - default: - return "Action(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go deleted file mode 100644 index 5c2028c832..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go +++ /dev/null @@ -1,308 +0,0 @@ -package plans - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// Changes describes various actions that Terraform will attempt to take if -// the corresponding plan is applied. -// -// A Changes object can be rendered into a visual diff (by the caller, using -// code in another package) for display to the user. -type Changes struct { - // Resources tracks planned changes to resource instance objects. - Resources []*ResourceInstanceChangeSrc - - // Outputs tracks planned changes output values. - // - // Note that although an in-memory plan contains planned changes for - // outputs throughout the configuration, a plan serialized - // to disk retains only the root outputs because they are - // externally-visible, while other outputs are implementation details and - // can be easily re-calculated during the apply phase. Therefore only root - // module outputs will survive a round-trip through a plan file. - Outputs []*OutputChangeSrc -} - -// NewChanges returns a valid Changes object that describes no changes. -func NewChanges() *Changes { - return &Changes{} -} - -func (c *Changes) Empty() bool { - for _, res := range c.Resources { - if res.Action != NoOp { - return false - } - } - return true -} - -// ResourceInstance returns the planned change for the current object of the -// resource instance of the given address, if any. Returns nil if no change is -// planned. -func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { - return rc - } - } - - return nil -} - -// ResourceInstanceDeposed returns the plan change of a deposed object of -// the resource instance of the given address, if any. Returns nil if no change -// is planned. -func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == key { - return rc - } - } - - return nil -} - -// OutputValue returns the planned change for the output value with the -// given address, if any. Returns nil if no change is planned. -func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { - addrStr := addr.String() - for _, oc := range c.Outputs { - if oc.Addr.String() == addrStr { - return oc - } - } - - return nil -} - -// SyncWrapper returns a wrapper object around the receiver that can be used -// to make certain changes to the receiver in a concurrency-safe way, as long -// as all callers share the same wrapper object. -func (c *Changes) SyncWrapper() *ChangesSync { - return &ChangesSync{ - changes: c, - } -} - -// ResourceInstanceChange describes a change to a particular resource instance -// object. -type ResourceInstanceChange struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // Change is an embedded description of the change. - Change - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the implied type of the -// corresponding resource type schema for correct operation. -func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { - cs, err := rc.Change.Encode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChangeSrc{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - ProviderAddr: rc.ProviderAddr, - ChangeSrc: *cs, - RequiredReplace: rc.RequiredReplace, - Private: rc.Private, - }, err -} - -// Simplify will, where possible, produce a change with a simpler action than -// the receiever given a flag indicating whether the caller is dealing with -// a normal apply or a destroy. This flag deals with the fact that Terraform -// Core uses a specialized graph node type for destroying; only that -// specialized node should set "destroying" to true. -// -// The following table shows the simplification behavior: -// -// Action Destroying? New Action -// --------+-------------+----------- -// Create true NoOp -// Delete false NoOp -// Replace true Delete -// Replace false Create -// -// For any combination not in the above table, the Simplify just returns the -// receiver as-is. -func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { - if destroying { - switch rc.Action { - case Delete: - // We'll fall out and just return rc verbatim, then. - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Delete, - Before: rc.Before, - After: cty.NullVal(rc.Before.Type()), - }, - } - default: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - } - } else { - switch rc.Action { - case Delete: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Create, - Before: cty.NullVal(rc.After.Type()), - After: rc.After, - }, - } - } - } - - // If we fall out here then our change is already simple enough. - return rc -} - -// OutputChange describes a change to an output value. -type OutputChange struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // Change is an embedded description of the change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - Change - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. -func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { - cs, err := oc.Change.Encode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChangeSrc{ - Addr: oc.Addr, - ChangeSrc: *cs, - Sensitive: oc.Sensitive, - }, err -} - -// Change describes a single change with a given action. -type Change struct { - // Action defines what kind of change is being made. - Action Action - - // Interpretation of Before and After depend on Action: - // - // NoOp Before and After are the same, unchanged value - // Create Before is nil, and After is the expected value after create. - // Read Before is any prior value (nil if no prior), and After is the - // value that was or will be read. - // Update Before is the value prior to update, and After is the expected - // value after update. - // Replace As with Update. - // Delete Before is the value prior to delete, and After is always nil. - // - // Unknown values may appear anywhere within the Before and After values, - // either as the values themselves or as nested elements within known - // collections/structures. - Before, After cty.Value -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the type constraint -// that the values are expected to conform to; to properly decode the values -// later an identical type constraint must be provided at that time. -// -// Where a Change is embedded in some other struct, it's generally better -// to call the corresponding Encode method of that struct rather than working -// directly with its embedded Change. -func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { - beforeDV, err := NewDynamicValue(c.Before, ty) - if err != nil { - return nil, err - } - afterDV, err := NewDynamicValue(c.After, ty) - if err != nil { - return nil, err - } - - return &ChangeSrc{ - Action: c.Action, - Before: beforeDV, - After: afterDV, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go deleted file mode 100644 index 97bc8da7c4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go +++ /dev/null @@ -1,190 +0,0 @@ -package plans - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. -// Pass the associated resource type's schema type to method Decode to -// obtain a ResourceInstancChange. -type ResourceInstanceChangeSrc struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // ChangeSrc is an embedded description of the not-yet-decoded change. - ChangeSrc - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Decode unmarshals the raw representation of the instance object being -// changed. Pass the implied type of the corresponding resource type schema -// for correct operation. -func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { - change, err := rcs.ChangeSrc.Decode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChange{ - Addr: rcs.Addr, - DeposedKey: rcs.DeposedKey, - ProviderAddr: rcs.ProviderAddr, - Change: *change, - RequiredReplace: rcs.RequiredReplace, - Private: rcs.Private, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { - if rcs == nil { - return nil - } - ret := *rcs - - ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) - - if len(ret.Private) != 0 { - private := make([]byte, len(ret.Private)) - copy(private, ret.Private) - ret.Private = private - } - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// OutputChangeSrc describes a change to an output value. -type OutputChangeSrc struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // ChangeSrc is an embedded description of the not-yet-decoded change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - ChangeSrc - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Decode unmarshals the raw representation of the output value being -// changed. -func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { - change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChange{ - Addr: ocs.Addr, - Change: *change, - Sensitive: ocs.Sensitive, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { - if ocs == nil { - return nil - } - ret := *ocs - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// ChangeSrc is a not-yet-decoded Change. -type ChangeSrc struct { - // Action defines what kind of change is being made. - Action Action - - // Before and After correspond to the fields of the same name in Change, - // but have not yet been decoded from the serialized value used for - // storage. - Before, After DynamicValue -} - -// Decode unmarshals the raw representations of the before and after values -// to produce a Change object. Pass the type constraint that the result must -// conform to. -// -// Where a ChangeSrc is embedded in some other struct, it's generally better -// to call the corresponding Decode method of that struct rather than working -// directly with its embedded Change. -func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { - var err error - before := cty.NullVal(ty) - after := cty.NullVal(ty) - - if len(cs.Before) > 0 { - before, err = cs.Before.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'before' value: %s", err) - } - } - if len(cs.After) > 0 { - after, err = cs.After.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'after' value: %s", err) - } - } - return &Change{ - Action: cs.Action, - Before: before, - After: after, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go deleted file mode 100644 index 89cc1ab225..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go +++ /dev/null @@ -1,144 +0,0 @@ -package plans - -import ( - "fmt" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ChangesSync is a wrapper around a Changes that provides a concurrency-safe -// interface to insert new changes and retrieve copies of existing changes. -// -// Each ChangesSync is independent of all others, so all concurrent writers -// to a particular Changes must share a single ChangesSync. Behavior is -// undefined if any other caller makes changes to the underlying Changes -// object or its nested objects concurrently with any of the methods of a -// particular ChangesSync. -type ChangesSync struct { - lock sync.Mutex - changes *Changes -} - -// AppendResourceInstanceChange records the given resource instance change in -// the set of planned resource changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { - if cs == nil { - panic("AppendResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Resources = append(cs.changes.Resources, s) -} - -// GetResourceInstanceChange searches the set of resource instance changes for -// one matching the given address and generation, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { - if cs == nil { - panic("GetResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - if gen == states.CurrentGen { - return cs.changes.ResourceInstance(addr).DeepCopy() - } - if dk, ok := gen.(states.DeposedKey); ok { - return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() - } - panic(fmt.Sprintf("unsupported generation value %#v", gen)) -} - -// RemoveResourceInstanceChange searches the set of resource instance changes -// for one matching the given address and generation, and removes it from the -// set if it exists. -func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { - if cs == nil { - panic("RemoveResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - dk := states.NotDeposed - if realDK, ok := gen.(states.DeposedKey); ok { - dk = realDK - } - - addrStr := addr.String() - for i, r := range cs.changes.Resources { - if r.Addr.String() != addrStr || r.DeposedKey != dk { - continue - } - copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) - cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] - return - } -} - -// AppendOutputChange records the given output value change in the set of -// planned value changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { - if cs == nil { - panic("AppendOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Outputs = append(cs.changes.Outputs, s) -} - -// GetOutputChange searches the set of output value changes for one matching -// the given address, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { - if cs == nil { - panic("GetOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - return cs.changes.OutputValue(addr) -} - -// RemoveOutputChange searches the set of output value changes for one matching -// the given address, and removes it from the set if it exists. -func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { - if cs == nil { - panic("RemoveOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - addrStr := addr.String() - for i, o := range cs.changes.Outputs { - if o.Addr.String() != addrStr { - continue - } - copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) - cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] - return - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go deleted file mode 100644 index 01ca389238..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package plans contains the types that are used to represent Terraform plans. -// -// A plan describes a set of changes that Terraform will make to update remote -// objects to match with changes to the configuration. -package plans diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go deleted file mode 100644 index 51fbb24cfb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go +++ /dev/null @@ -1,96 +0,0 @@ -package plans - -import ( - "github.com/zclconf/go-cty/cty" - ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" -) - -// DynamicValue is the representation in the plan of a value whose type cannot -// be determined at compile time, such as because it comes from a schema -// defined in a plugin. -// -// This type is used as an indirection so that the overall plan structure can -// be decoded without schema available, and then the dynamic values accessed -// at a later time once the appropriate schema has been determined. -// -// Internally, DynamicValue is a serialized version of a cty.Value created -// against a particular type constraint. Callers should not access directly -// the serialized form, whose format may change in future. Values of this -// type must always be created by calling NewDynamicValue. -// -// The zero value of DynamicValue is nil, and represents the absense of a -// value within the Go type system. This is distinct from a cty.NullVal -// result, which represents the absense of a value within the cty type system. -type DynamicValue []byte - -// NewDynamicValue creates a DynamicValue by serializing the given value -// against the given type constraint. The value must conform to the type -// constraint, or the result is undefined. -// -// If the value to be encoded has no predefined schema (for example, for -// module output values and input variables), set the type constraint to -// cty.DynamicPseudoType in order to save type information as part of the -// value, and then also pass cty.DynamicPseudoType to method Decode to recover -// the original value. -// -// cty.NilVal can be used to represent the absense of a value, but callers -// must be careful to distinguish values that are absent at the Go layer -// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal -// results). -func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { - // If we're given cty.NilVal (the zero value of cty.Value, which is - // distinct from a typed null value created by cty.NullVal) then we'll - // assume the caller is trying to represent the _absense_ of a value, - // and so we'll return a nil DynamicValue. - if val == cty.NilVal { - return DynamicValue(nil), nil - } - - // Currently our internal encoding is msgpack, via ctymsgpack. - buf, err := ctymsgpack.Marshal(val, ty) - if err != nil { - return nil, err - } - - return DynamicValue(buf), nil -} - -// Decode retrieves the effective value from the receiever by interpreting the -// serialized form against the given type constraint. For correct results, -// the type constraint must match (or be consistent with) the one that was -// used to create the receiver. -// -// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and -// instead represents the absense of a value. -func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { - if v == nil { - return cty.NilVal, nil - } - - return ctymsgpack.Unmarshal([]byte(v), ty) -} - -// ImpliedType returns the type implied by the serialized structure of the -// receiving value. -// -// This will not necessarily be exactly the type that was given when the -// value was encoded, and in particular must not be used for values that -// were encoded with their static type given as cty.DynamicPseudoType. -// It is however safe to use this method for values that were encoded using -// their runtime type as the conforming type, with the result being -// semantically equivalent but with all lists and sets represented as tuples, -// and maps as objects, due to ambiguities of the serialization. -func (v DynamicValue) ImpliedType() (cty.Type, error) { - return ctymsgpack.ImpliedType([]byte(v)) -} - -// Copy produces a copy of the receiver with a distinct backing array. -func (v DynamicValue) Copy() DynamicValue { - if v == nil { - return nil - } - - ret := make(DynamicValue, len(v)) - copy(ret, v) - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go deleted file mode 100644 index ba9cc9611a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go +++ /dev/null @@ -1,18 +0,0 @@ -package objchange - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// AllAttributesNull constructs a non-null cty.Value of the object type implied -// by the given schema that has all of its leaf attributes set to null and all -// of its nested block collections set to zero-length. -// -// This simulates what would result from decoding an empty configuration block -// with the given schema, except that it does not produce errors -func AllAttributesNull(schema *configschema.Block) cty.Value { - // "All attributes null" happens to be the definition of EmptyValue for - // a Block, so we can just delegate to that. - return schema.EmptyValue() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go deleted file mode 100644 index 36a7d496c2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go +++ /dev/null @@ -1,447 +0,0 @@ -package objchange - -import ( - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// AssertObjectCompatible checks whether the given "actual" value is a valid -// completion of the possibly-partially-unknown "planned" value. -// -// This means that any known leaf value in "planned" must be equal to the -// corresponding value in "actual", and various other similar constraints. -// -// Any inconsistencies are reported by returning a non-zero number of errors. -// These errors are usually (but not necessarily) cty.PathError values -// referring to a particular nested value within the "actual" value. -// -// The two values must have types that conform to the given schema's implied -// type, or this function will panic. -func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { - return assertObjectCompatible(schema, planned, actual, nil) -} - -func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { - var errs []error - if planned.IsNull() && !actual.IsNull() { - errs = append(errs, path.NewErrorf("was absent, but now present")) - return errs - } - if actual.IsNull() && !planned.IsNull() { - errs = append(errs, path.NewErrorf("was present, but now absent")) - return errs - } - if planned.IsNull() { - // No further checks possible if both values are null - return errs - } - - for name, attrS := range schema.Attributes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertValueCompatible(plannedV, actualV, path) - if attrS.Sensitive { - if len(moreErrs) > 0 { - // Use a vague placeholder message instead, to avoid disclosing - // sensitive information. - errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) - } - } else { - errs = append(errs, moreErrs...) - } - } - for name, blockS := range schema.BlockTypes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - // As a special case, if there were any blocks whose leaf attributes - // are all unknown then we assume (possibly incorrectly) that the - // HCL dynamic block extension is in use with an unknown for_each - // argument, and so we will do looser validation here that allows - // for those blocks to have expanded into a different number of blocks - // if the for_each value is now known. - maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) - - path := append(path, cty.GetAttrStep{Name: name}) - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - // If an unknown block placeholder was present then the placeholder - // may have expanded out into zero blocks, which is okay. - if maybeUnknownBlocks && actualV.IsNull() { - continue - } - moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - if maybeUnknownBlocks { - // When unknown blocks are present the final blocks may be - // at different indices than the planned blocks, so unfortunately - // we can't do our usual checks in this case without generating - // false negatives. - continue - } - - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL { - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so both values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - actualAtys := actualV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := actualAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q has vanished", k)) - continue - } - - plannedEV := plannedV.GetAttr(k) - actualEV := actualV.GetAttr(k) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) - errs = append(errs, moreErrs...) - } - if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan - for k := range actualAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) - continue - } - } - } - } else { - if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - } - case configschema.NestingSet: - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { - errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - if maybeUnknownBlocks { - // When unknown blocks are present the final number of blocks - // may be different, either because the unknown set values - // become equal and are collapsed, or the count is unknown due - // a dynamic block. Unfortunately this means we can't do our - // usual checks in this case without generating false - // negatives. - continue - } - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) - } - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - return errs -} - -func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { - // NOTE: We don't normally use the GoString rendering of cty.Value in - // user-facing error messages as a rule, but we make an exception - // for this function because we expect the user to pass this message on - // verbatim to the provider development team and so more detail is better. - - var errs []error - if planned.Type() == cty.DynamicPseudoType { - // Anything goes, then - return errs - } - if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { - errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) - // If the types don't match then we can't do any other comparisons, - // so we bail early. - return errs - } - - if !planned.IsKnown() { - // We didn't know what were going to end up with during plan, so - // anything goes during apply. - return errs - } - - if actual.IsNull() { - if planned.IsNull() { - return nil - } - errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) - return errs - } - if planned.IsNull() { - errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) - return errs - } - - ty := planned.Type() - switch { - - case !actual.IsKnown(): - errs = append(errs, path.NewErrorf("was known, but now unknown")) - - case ty.IsPrimitiveType(): - if !actual.Equals(planned).True() { - errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) - } - - case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): - for it := planned.ElementIterator(); it.Next(); { - k, plannedV := it.Element() - if !actual.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) - continue - } - - actualV := actual.Index(k) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) - errs = append(errs, moreErrs...) - } - - for it := actual.ElementIterator(); it.Next(); { - k, _ := it.Element() - if !planned.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) - } - } - - case ty.IsObjectType(): - atys := ty.AttributeTypes() - for name := range atys { - // Because we already tested that the two values have the same type, - // we can assume that the same attributes are present in both and - // focus just on testing their values. - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) - errs = append(errs, moreErrs...) - } - - case ty.IsSetType(): - // We can't really do anything useful for sets here because changing - // an unknown element to known changes the identity of the element, and - // so we can't correlate them properly. However, we will at least check - // to ensure that the number of elements is consistent, along with - // the general type-match checks we ran earlier in this function. - if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { - - setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { - errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - - plannedL := planned.LengthInt() - actualL := actual.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) - } - } - } - - return errs -} - -func indexStrForErrors(v cty.Value) string { - switch v.Type() { - case cty.Number: - return v.AsBigFloat().Text('f', -1) - case cty.String: - return strconv.Quote(v.AsString()) - default: - // Should be impossible, since no other index types are allowed! - return fmt.Sprintf("%#v", v) - } -} - -// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the -// HCL dynamic block extension behaves when it's asked to expand a block whose -// for_each argument is unknown. In such cases, it generates a single placeholder -// block with all leaf attribute values unknown, and once the for_each -// expression becomes known the placeholder may be replaced with any number -// of blocks, so object compatibility checks would need to be more liberal. -// -// Set "nested" if testing a block that is nested inside a candidate block -// placeholder; this changes the interpretation of there being no blocks of -// a type to allow for there being zero nested blocks. -func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - if nested && v.IsNull() { - return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder - } - return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) - default: - // These situations should be impossible for correct providers, but - // we permit the legacy SDK to produce some incorrect outcomes - // for compatibility with its existing logic, and so we must be - // tolerant here. - if !v.IsKnown() { - return true - } - if v.IsNull() { - return false // treated as if the list were empty, so we would see zero iterations below - } - - // For all other nesting modes, our value should be something iterable. - for it := v.ElementIterator(); it.Next(); { - _, ev := it.Element() - if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { - return true - } - } - - // Our default changes depending on whether we're testing the candidate - // block itself or something nested inside of it: zero blocks of a type - // can never contain a dynamic block placeholder, but a dynamic block - // placeholder might contain zero blocks of one of its own nested block - // types, if none were set in the config at all. - return nested - } -} - -func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { - if v.IsNull() { - return false // null value can never be a placeholder element - } - if !v.IsKnown() { - return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs - } - for name := range schema.Attributes { - av := v.GetAttr(name) - - // Unknown block placeholders contain only unknown or null attribute - // values, depending on whether or not a particular attribute was set - // explicitly inside the content block. Note that this is imprecise: - // non-placeholders can also match this, so this function can generate - // false positives. - if av.IsKnown() && !av.IsNull() { - return false - } - } - for name, blockS := range schema.BlockTypes { - if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { - return false - } - } - return true -} - -// assertSetValuesCompatible checks that each of the elements in a can -// be correlated with at least one equivalent element in b and vice-versa, -// using the given correlation function. -// -// This allows the number of elements in the sets to change as long as all -// elements in both sets can be correlated, making this function safe to use -// with sets that may contain unknown values as long as the unknown case is -// addressed in some reasonable way in the callback function. -// -// The callback always recieves values from set a as its first argument and -// values from set b in its second argument, so it is safe to use with -// non-commutative functions. -// -// As with assertValueCompatible, we assume that the target audience of error -// messages here is a provider developer (via a bug report from a user) and so -// we intentionally violate our usual rule of keeping cty implementation -// details out of error messages. -func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { - a := planned - b := actual - - // Our methodology here is a little tricky, to deal with the fact that - // it's impossible to directly correlate two non-equal set elements because - // they don't have identities separate from their values. - // The approach is to count the number of equivalent elements each element - // of a has in b and vice-versa, and then return true only if each element - // in both sets has at least one equivalent. - as := a.AsValueSlice() - bs := b.AsValueSlice() - aeqs := make([]bool, len(as)) - beqs := make([]bool, len(bs)) - for ai, av := range as { - for bi, bv := range bs { - if f(av, bv) { - aeqs[ai] = true - beqs[bi] = true - } - } - } - - var errs []error - for i, eq := range aeqs { - if !eq { - errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) - } - } - if len(errs) > 0 { - // Exit early since otherwise we're likely to generate duplicate - // error messages from the other perspective in the subsequent loop. - return errs - } - for i, eq := range beqs { - if !eq { - errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) - } - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go deleted file mode 100644 index 2c18a0108f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package objchange deals with the business logic of taking a prior state -// value and a config value and producing a proposed new merged value, along -// with other related rules in this domain. -package objchange diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go deleted file mode 100644 index cbfefddddb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go +++ /dev/null @@ -1,104 +0,0 @@ -package objchange - -import ( - "github.com/zclconf/go-cty/cty" -) - -// LongestCommonSubsequence finds a sequence of values that are common to both -// x and y, with the same relative ordering as in both collections. This result -// is useful as a first step towards computing a diff showing added/removed -// elements in a sequence. -// -// The approached used here is a "naive" one, assuming that both xs and ys will -// generally be small in most reasonable Terraform configurations. For larger -// lists the time/space usage may be sub-optimal. -// -// A pair of lists may have multiple longest common subsequences. In that -// case, the one selected by this function is undefined. -func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { - if len(xs) == 0 || len(ys) == 0 { - return make([]cty.Value, 0) - } - - c := make([]int, len(xs)*len(ys)) - eqs := make([]bool, len(xs)*len(ys)) - w := len(xs) - - for y := 0; y < len(ys); y++ { - for x := 0; x < len(xs); x++ { - eqV := xs[x].Equals(ys[y]) - eq := false - if eqV.IsKnown() && eqV.True() { - eq = true - eqs[(w*y)+x] = true // equality tests can be expensive, so cache it - } - if eq { - // Sequence gets one longer than for the cell at top left, - // since we'd append a new item to the sequence here. - if x == 0 || y == 0 { - c[(w*y)+x] = 1 - } else { - c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 - } - } else { - // We follow the longest of the sequence above and the sequence - // to the left of us in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - c[(w*y)+x] = l - } else { - c[(w*y)+x] = u - } - } - } - } - - // The bottom right cell tells us how long our longest sequence will be - seq := make([]cty.Value, c[len(c)-1]) - - // Now we will walk back from the bottom right cell, finding again all - // of the equal pairs to construct our sequence. - x := len(xs) - 1 - y := len(ys) - 1 - i := len(seq) - 1 - - for x > -1 && y > -1 { - if eqs[(w*y)+x] { - // Add the value to our result list and then walk diagonally - // up and to the left. - seq[i] = xs[x] - x-- - y-- - i-- - } else { - // Take the path with the greatest sequence length in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - x-- - } else { - y-- - } - } - } - - if i > -1 { - // should never happen if the matrix was constructed properly - panic("not enough elements in sequence") - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go deleted file mode 100644 index 879fc93a1e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go +++ /dev/null @@ -1,390 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// ProposedNewObject constructs a proposed new object value by combining the -// computed attribute values from "prior" with the configured attribute values -// from "config". -// -// Both value must conform to the given schema's implied type, or this function -// will panic. -// -// The prior value must be wholly known, but the config value may be unknown -// or have nested unknown values. -// -// The merging of the two objects includes the attributes of any nested blocks, -// which will be correlated in a manner appropriate for their nesting mode. -// Note in particular that the correlation for blocks backed by sets is a -// heuristic based on matching non-computed attribute values and so it may -// produce strange results with more "extreme" cases, such as a nested set -// block where _all_ attributes are computed. -func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - // If the config and prior are both null, return early here before - // populating the prior block. The prevents non-null blocks from appearing - // the proposed state value. - if config.IsNull() && prior.IsNull() { - return prior - } - - if prior.IsNull() { - // In this case, we will construct a synthetic prior value that is - // similar to the result of decoding an empty configuration block, - // which simplifies our handling of the top-level attributes/blocks - // below by giving us one non-null level of object to pull values from. - prior = AllAttributesNull(schema) - } - return proposedNewObject(schema, prior, config) -} - -// PlannedDataResourceObject is similar to ProposedNewObject but tailored for -// planning data resources in particular. Specifically, it replaces the values -// of any Computed attributes not set in the configuration with an unknown -// value, which serves as a placeholder for a value to be filled in by the -// provider when the data resource is finally read. -// -// Data resources are different because the planning of them is handled -// entirely within Terraform Core and not subject to customization by the -// provider. This function is, in effect, producing an equivalent result to -// passing the ProposedNewObject result into a provider's PlanResourceChange -// function, assuming a fixed implementation of PlanResourceChange that just -// fills in unknown values as needed. -func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { - // Our trick here is to run the ProposedNewObject logic with an - // entirely-unknown prior value. Because of cty's unknown short-circuit - // behavior, any operation on prior returns another unknown, and so - // unknown values propagate into all of the parts of the resulting value - // that would normally be filled in by preserving the prior state. - prior := cty.UnknownVal(schema.ImpliedType()) - return proposedNewObject(schema, prior, config) -} - -func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - if config.IsNull() || !config.IsKnown() { - // This is a weird situation, but we'll allow it anyway to free - // callers from needing to specifically check for these cases. - return prior - } - if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { - panic("ProposedNewObject only supports object-typed values") - } - - // From this point onwards, we can assume that both values are non-null - // object types, and that the config value itself is known (though it - // may contain nested values that are unknown.) - - newAttrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch { - case attr.Computed && attr.Optional: - // This is the trickiest scenario: we want to keep the prior value - // if the config isn't overriding it. Note that due to some - // ambiguity here, setting an optional+computed attribute from - // config and then later switching the config to null in a - // subsequent change causes the initial config value to be "sticky" - // unless the provider specifically overrides it during its own - // plan customization step. - if configV.IsNull() { - newV = priorV - } else { - newV = configV - } - case attr.Computed: - // configV will always be null in this case, by definition. - // priorV may also be null, but that's okay. - newV = priorV - default: - // For non-computed attributes, we always take the config value, - // even if it is null. If it's _required_ then null values - // should've been caught during an earlier validation step, and - // so we don't really care about that here. - newV = configV - } - newAttrs[name] = newV - } - - // Merging nested blocks is a little more complex, since we need to - // correlate blocks between both objects and then recursively propose - // a new object for each. The correlation logic depends on the nesting - // mode for each block type. - for name, blockType := range schema.BlockTypes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - newV = ProposedNewObject(&blockType.Block, priorV, configV) - - case configschema.NestingList: - // Nested blocks are correlated by index. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals = append(newVals, configEV) - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.TupleVal(newVals) - } else { - newV = cty.ListVal(newVals) - } - } else { - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.EmptyTupleVal - } else { - newV = cty.ListValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingMap: - // Despite the name, a NestingMap may produce either a map or - // object value, depending on whether the nested schema contains - // dynamically-typed attributes. - if configV.Type().IsObjectType() { - // Nested blocks are correlated by key. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - atys := configV.Type().AttributeTypes() - for name := range atys { - configEV := configV.GetAttr(name) - if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[name] = configEV - continue - } - priorEV := priorV.GetAttr(name) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[name] = newEV - } - // Although we call the nesting mode "map", we actually use - // object values so that elements might have different types - // in case of dynamically-typed attributes. - newV = cty.ObjectVal(newVals) - } else { - newV = cty.EmptyObjectVal - } - } else { - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - k := idx.AsString() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[k] = configEV - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[k] = newEV - } - newV = cty.MapVal(newVals) - } else { - newV = cty.MapValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingSet: - if !configV.Type().IsSetType() { - panic("configschema.NestingSet value is not a set as expected") - } - - // Nested blocks are correlated by comparing the element values - // after eliminating all of the computed attributes. In practice, - // this means that any config change produces an entirely new - // nested object, and we only propagate prior computed values - // if the non-computed attribute values are identical. - var cmpVals [][2]cty.Value - if priorV.IsKnown() && !priorV.IsNull() { - cmpVals = setElementCompareValues(&blockType.Block, priorV, false) - } - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - _, configEV := it.Element() - var priorEV cty.Value - for i, cmp := range cmpVals { - if used[i] { - continue - } - if cmp[1].RawEquals(configEV) { - priorEV = cmp[0] - used[i] = true // we can't use this value on a future iteration - break - } - } - if priorEV == cty.NilVal { - priorEV = cty.NullVal(blockType.ImpliedType()) - } - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - newV = cty.SetVal(newVals) - } else { - newV = cty.SetValEmpty(blockType.Block.ImpliedType()) - } - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - - newAttrs[name] = newV - } - - return cty.ObjectVal(newAttrs) -} - -// setElementCompareValues takes a known, non-null value of a cty.Set type and -// returns a table -- constructed of two-element arrays -- that maps original -// set element values to corresponding values that have all of the computed -// values removed, making them suitable for comparison with values obtained -// from configuration. The element type of the set must conform to the implied -// type of the given schema, or this function will panic. -// -// In the resulting slice, the zeroth element of each array is the original -// value and the one-indexed element is the corresponding "compare value". -// -// This is intended to help correlate prior elements with configured elements -// in ProposedNewObject. The result is a heuristic rather than an exact science, -// since e.g. two separate elements may reduce to the same value through this -// process. The caller must therefore be ready to deal with duplicates. -func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { - ret := make([][2]cty.Value, 0, set.LengthInt()) - for it := set.ElementIterator(); it.Next(); { - _, ev := it.Element() - ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) - } - return ret -} - -// setElementCompareValue creates a new value that has all of the same -// non-computed attribute values as the one given but has all computed -// attribute values forced to null. -// -// If isConfig is true then non-null Optional+Computed attribute values will -// be preserved. Otherwise, they will also be set to null. -// -// The input value must conform to the schema's implied type, and the return -// value is guaranteed to conform to it. -func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { - if v.IsNull() || !v.IsKnown() { - return v - } - - attrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - switch { - case attr.Computed && attr.Optional: - if isConfig { - attrs[name] = v.GetAttr(name) - } else { - attrs[name] = cty.NullVal(attr.Type) - } - case attr.Computed: - attrs[name] = cty.NullVal(attr.Type) - default: - attrs[name] = v.GetAttr(name) - } - } - - for name, blockType := range schema.BlockTypes { - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) - - case configschema.NestingList, configschema.NestingSet: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - if l := cv.LengthInt(); l > 0 { - elems := make([]cty.Value, 0, l) - for it := cv.ElementIterator(); it.Next(); { - _, ev := it.Element() - elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) - } - if blockType.Nesting == configschema.NestingSet { - // SetValEmpty would panic if given elements that are not - // all of the same type, but that's guaranteed not to - // happen here because our input value was _already_ a - // set and we've not changed the types of any elements here. - attrs[name] = cty.SetVal(elems) - } else { - attrs[name] = cty.TupleVal(elems) - } - } else { - if blockType.Nesting == configschema.NestingSet { - attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) - } else { - attrs[name] = cty.EmptyTupleVal - } - } - - case configschema.NestingMap: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - elems := make(map[string]cty.Value) - for it := cv.ElementIterator(); it.Next(); { - kv, ev := it.Element() - elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) - } - attrs[name] = cty.ObjectVal(elems) - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - } - - return cty.ObjectVal(attrs) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go deleted file mode 100644 index 905a91142a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go +++ /dev/null @@ -1,267 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// AssertPlanValid checks checks whether a planned new state returned by a -// provider's PlanResourceChange method is suitable to achieve a change -// from priorState to config. It returns a slice with nonzero length if -// any problems are detected. Because problems here indicate bugs in the -// provider that generated the plannedState, they are written with provider -// developers as an audience, rather than end-users. -// -// All of the given values must have the same type and must conform to the -// implied type of the given schema, or this function may panic or produce -// garbage results. -// -// During planning, a provider may only make changes to attributes that are -// null (unset) in the configuration and are marked as "computed" in the -// resource type schema, in order to insert any default values the provider -// may know about. If the default value cannot be determined until apply time, -// the provider can return an unknown value. Providers are forbidden from -// planning a change that disagrees with any non-null argument in the -// configuration. -// -// As a special exception, providers _are_ allowed to provide attribute values -// conflicting with configuration if and only if the planned value exactly -// matches the corresponding attribute value in the prior state. The provider -// can use this to signal that the new value is functionally equivalent to -// the old and thus no change is required. -func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { - return assertPlanValid(schema, priorState, config, plannedState, nil) -} - -func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { - var errs []error - if plannedState.IsNull() && !config.IsNull() { - errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) - return errs - } - if config.IsNull() && !plannedState.IsNull() { - errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) - return errs - } - if plannedState.IsNull() { - // No further checks possible if the planned value is null - return errs - } - - impTy := schema.ImpliedType() - - for name, attrS := range schema.Attributes { - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(attrS.Type) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - } - for name, blockS := range schema.BlockTypes { - path := append(path, cty.GetAttrStep{Name: name}) - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(impTy.AttributeType(name)) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - if plannedV.RawEquals(configV) { - // Easy path: nothing has changed at all - continue - } - if !plannedV.IsKnown() { - errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - if !configV.HasIndex(idx).True() { - continue // should never happen since we checked the lengths above - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so all values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - configAtys := configV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := configAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - path := append(path, cty.GetAttrStep{Name: k}) - - plannedEV := plannedV.GetAttr(k) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - configEV := configV.GetAttr(k) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.Type().HasAttribute(k) { - priorEV = priorV.GetAttr(k) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for k := range configAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) - continue - } - } - } else { - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - k := idx.AsString() - if !configV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for it := configV.ElementIterator(); it.Next(); { - idx, _ := it.Element() - if !plannedV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) - continue - } - } - } - case configschema.NestingSet: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // Because set elements have no identifier with which to correlate - // them, we can't robustly validate the plan for a nested block - // backed by a set, and so unfortunately we need to just trust the - // provider to do the right thing. :( - // - // (In principle we could correlate elements by matching the - // subset of attributes explicitly set in config, except for the - // special diff suppression rule which allows for there to be a - // planned value that is constructed by mixing part of a prior - // value with part of a config value, creating an entirely new - // element that is not present in either prior nor config.) - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - } - - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - - return errs -} - -func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { - var errs []error - if plannedV.RawEquals(configV) { - // This is the easy path: provider didn't change anything at all. - return errs - } - if plannedV.RawEquals(priorV) && !priorV.IsNull() { - // Also pretty easy: there is a prior value and the provider has - // returned it unchanged. This indicates that configV and plannedV - // are functionally equivalent and so the provider wishes to disregard - // the configuration value in favor of the prior. - return errs - } - if attrS.Computed && configV.IsNull() { - // The provider is allowed to change the value of any computed - // attribute that isn't explicitly set in the config. - return errs - } - - // If none of the above conditions match, the provider has made an invalid - // change to this attribute. - if priorV.IsNull() { - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) - } - return errs - } - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go deleted file mode 100644 index 0abed56a0f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go +++ /dev/null @@ -1,92 +0,0 @@ -package plans - -import ( - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Plan is the top-level type representing a planned set of changes. -// -// A plan is a summary of the set of changes required to move from a current -// state to a goal state derived from configuration. The described changes -// are not applied directly, but contain an approximation of the final -// result that will be completed during apply by resolving any values that -// cannot be predicted. -// -// A plan must always be accompanied by the state and configuration it was -// built from, since the plan does not itself include all of the information -// required to make the changes indicated. -type Plan struct { - VariableValues map[string]DynamicValue - Changes *Changes - TargetAddrs []addrs.Targetable - ProviderSHA256s map[string][]byte - Backend Backend -} - -// Backend represents the backend-related configuration and other data as it -// existed when a plan was created. -type Backend struct { - // Type is the type of backend that the plan will apply against. - Type string - - // Config is the configuration of the backend, whose schema is decided by - // the backend Type. - Config DynamicValue - - // Workspace is the name of the workspace that was active when the plan - // was created. It is illegal to apply a plan created for one workspace - // to the state of another workspace. - // (This constraint is already enforced by the statefile lineage mechanism, - // but storing this explicitly allows us to return a better error message - // in the situation where the user has the wrong workspace selected.) - Workspace string -} - -func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { - dv, err := NewDynamicValue(config, configSchema.ImpliedType()) - if err != nil { - return nil, err - } - - return &Backend{ - Type: typeName, - Config: dv, - Workspace: workspaceName, - }, nil -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving plan. -// -// The result is de-duplicated so that each distinct address appears only once. -func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { - if p == nil || p.Changes == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, rc := range p.Changes.Resources { - m[rc.ProviderAddr.String()] = rc.ProviderAddr - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go deleted file mode 100644 index f20f0507e9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go +++ /dev/null @@ -1,132 +0,0 @@ -package convert - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" -) - -// WarnsAndErrorsToProto converts the warnings and errors return by the legacy -// provider to protobuf diagnostics. -func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { - for _, w := range warns { - diags = AppendProtoDiag(diags, w) - } - - for _, e := range errs { - diags = AppendProtoDiag(diags, e) - } - - return diags -} - -// AppendProtoDiag appends a new diagnostic from a warning string or an error. -// This panics if d is not a string or error. -func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { - switch d := d.(type) { - case cty.PathError: - ap := PathToAttributePath(d.Path) - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - Attribute: ap, - }) - case error: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - }) - case string: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: d, - }) - case *proto.Diagnostic: - diags = append(diags, d) - case []*proto.Diagnostic: - diags = append(diags, d...) - } - return diags -} - -// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. -func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, d := range ds { - var severity tfdiags.Severity - - switch d.Severity { - case proto.Diagnostic_ERROR: - severity = tfdiags.Error - case proto.Diagnostic_WARNING: - severity = tfdiags.Warning - } - - var newDiag tfdiags.Diagnostic - - // if there's an attribute path, we need to create a AttributeValue diagnostic - if d.Attribute != nil { - path := AttributePathToPath(d.Attribute) - newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) - } else { - newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) - } - - diags = diags.Append(newDiag) - } - - return diags -} - -// AttributePathToPath takes the proto encoded path and converts it to a cty.Path -func AttributePathToPath(ap *proto.AttributePath) cty.Path { - var p cty.Path - for _, step := range ap.Steps { - switch selector := step.Selector.(type) { - case *proto.AttributePath_Step_AttributeName: - p = p.GetAttr(selector.AttributeName) - case *proto.AttributePath_Step_ElementKeyString: - p = p.Index(cty.StringVal(selector.ElementKeyString)) - case *proto.AttributePath_Step_ElementKeyInt: - p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) - } - } - return p -} - -// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. -func PathToAttributePath(p cty.Path) *proto.AttributePath { - ap := &proto.AttributePath{} - for _, step := range p { - switch selector := step.(type) { - case cty.GetAttrStep: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: selector.Name, - }, - }) - case cty.IndexStep: - key := selector.Key - switch key.Type() { - case cty.String: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: key.AsString(), - }, - }) - case cty.Number: - v, _ := key.AsBigFloat().Int64() - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: v, - }, - }) - default: - // We'll bail early if we encounter anything else, and just - // return the valid prefix. - return ap - } - } - } - return ap -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go deleted file mode 100644 index 88b8a9a699..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go +++ /dev/null @@ -1,192 +0,0 @@ -package convert - -import ( - "encoding/json" - "log" - "reflect" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" -) - -// ConfigSchemaToProto takes a *configschema.Block and converts it to a -// proto.Schema_Block for a grpc response. -func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { - block := &proto.Schema_Block{ - Description: b.Description, - DescriptionKind: protoStringKind(b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, name := range sortedKeys(b.Attributes) { - a := b.Attributes[name] - - attr := &proto.Schema_Attribute{ - Name: name, - Description: a.Description, - DescriptionKind: protoStringKind(a.DescriptionKind), - Optional: a.Optional, - Computed: a.Computed, - Required: a.Required, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - ty, err := json.Marshal(a.Type) - if err != nil { - panic(err) - } - - attr.Type = ty - - block.Attributes = append(block.Attributes, attr) - } - - for _, name := range sortedKeys(b.BlockTypes) { - b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) - } - - return block -} - -func protoStringKind(k configschema.StringKind) proto.StringKind { - switch k { - default: - log.Printf("[TRACE] unexpected configschema.StringKind: %d", k) - return proto.StringKind_PLAIN - case configschema.StringPlain: - return proto.StringKind_PLAIN - case configschema.StringMarkdown: - return proto.StringKind_MARKDOWN - } -} - -func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { - var nesting proto.Schema_NestedBlock_NestingMode - switch b.Nesting { - case configschema.NestingSingle: - nesting = proto.Schema_NestedBlock_SINGLE - case configschema.NestingGroup: - nesting = proto.Schema_NestedBlock_GROUP - case configschema.NestingList: - nesting = proto.Schema_NestedBlock_LIST - case configschema.NestingSet: - nesting = proto.Schema_NestedBlock_SET - case configschema.NestingMap: - nesting = proto.Schema_NestedBlock_MAP - default: - nesting = proto.Schema_NestedBlock_INVALID - } - return &proto.Schema_NestedBlock{ - TypeName: name, - Block: ConfigSchemaToProto(&b.Block), - Nesting: nesting, - MinItems: int64(b.MinItems), - MaxItems: int64(b.MaxItems), - } -} - -// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. -func ProtoToProviderSchema(s *proto.Schema) providers.Schema { - return providers.Schema{ - Version: s.Version, - Block: ProtoToConfigSchema(s.Block), - } -} - -// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it -// to a terraform *configschema.Block. -func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { - block := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - BlockTypes: make(map[string]*configschema.NestedBlock), - - Description: b.Description, - DescriptionKind: schemaStringKind(b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, a := range b.Attributes { - attr := &configschema.Attribute{ - Description: a.Description, - DescriptionKind: schemaStringKind(a.DescriptionKind), - Required: a.Required, - Optional: a.Optional, - Computed: a.Computed, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - if err := json.Unmarshal(a.Type, &attr.Type); err != nil { - panic(err) - } - - block.Attributes[a.Name] = attr - } - - for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(b) - } - - return block -} - -func schemaStringKind(k proto.StringKind) configschema.StringKind { - switch k { - default: - log.Printf("[TRACE] unexpected proto.StringKind: %d", k) - return configschema.StringPlain - case proto.StringKind_PLAIN: - return configschema.StringPlain - case proto.StringKind_MARKDOWN: - return configschema.StringMarkdown - } -} - -func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch b.Nesting { - case proto.Schema_NestedBlock_SINGLE: - nesting = configschema.NestingSingle - case proto.Schema_NestedBlock_GROUP: - nesting = configschema.NestingGroup - case proto.Schema_NestedBlock_LIST: - nesting = configschema.NestingList - case proto.Schema_NestedBlock_MAP: - nesting = configschema.NestingMap - case proto.Schema_NestedBlock_SET: - nesting = configschema.NestingSet - default: - // In all other cases we'll leave it as the zero value (invalid) and - // let the caller validate it and deal with this. - } - - nb := &configschema.NestedBlock{ - Nesting: nesting, - MinItems: int(b.MinItems), - MaxItems: int(b.MaxItems), - } - - nested := ProtoToConfigSchema(b.Block) - nb.Block = *nested - return nb -} - -// sortedKeys returns the lexically sorted keys from the given map. This is -// used to make schema conversions are deterministic. This panics if map keys -// are not a string. -func sortedKeys(m interface{}) []string { - v := reflect.ValueOf(m) - keys := make([]string, v.Len()) - - mapKeys := v.MapKeys() - for i, k := range mapKeys { - keys[i] = k.Interface().(string) - } - - sort.Strings(keys) - return keys -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go deleted file mode 100644 index 729e97099e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go +++ /dev/null @@ -1,64 +0,0 @@ -package discovery - -// Error is a type used to describe situations that the caller must handle -// since they indicate some form of user error. -// -// The functions and methods that return these specialized errors indicate so -// in their documentation. The Error type should not itself be used directly, -// but rather errors should be compared using the == operator with the -// error constants in this package. -// -// Values of this type are _not_ used when the error being reported is an -// operational error (server unavailable, etc) or indicative of a bug in -// this package or its caller. -type Error string - -// ErrorNoSuitableVersion indicates that a suitable version (meeting given -// constraints) is not available. -const ErrorNoSuitableVersion = Error("no suitable version is available") - -// ErrorNoVersionCompatible indicates that all of the available versions -// that otherwise met constraints are not compatible with the current -// version of Terraform. -const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") - -// ErrorVersionIncompatible indicates that all of the versions within the -// constraints are not compatible with the current version of Terrafrom, though -// there does exist a version outside of the constaints that is compatible. -const ErrorVersionIncompatible = Error("incompatible provider version") - -// ErrorNoSuchProvider indicates that no provider exists with a name given -const ErrorNoSuchProvider = Error("no provider exists with the given name") - -// ErrorNoVersionCompatibleWithPlatform indicates that all of the available -// versions that otherwise met constraints are not compatible with the -// requested platform -const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") - -// ErrorMissingChecksumVerification indicates that either the provider -// distribution is missing the SHA256SUMS file or the checksum file does -// not contain a checksum for the binary plugin -const ErrorMissingChecksumVerification = Error("unable to verify checksum") - -// ErrorChecksumVerification indicates that the current checksum of the -// provider plugin has changed since the initial release and is not trusted -// to download -const ErrorChecksumVerification = Error("unexpected plugin checksum") - -// ErrorSignatureVerification indicates that the digital signature for a -// provider distribution could not be verified for one of the following -// reasons: missing signature file, missing public key, or the signature -// was not signed by any known key for the publisher -const ErrorSignatureVerification = Error("unable to verify signature") - -// ErrorServiceUnreachable indicates that the network was unable to connect -// to the registry service -const ErrorServiceUnreachable = Error("registry service is unreachable") - -// ErrorPublicRegistryUnreachable indicates that the network was unable to connect -// to the public registry in particular, so we can show a link to the statuspage -const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") - -func (err Error) Error() string { - return string(err) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go deleted file mode 100644 index f053312b00..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go +++ /dev/null @@ -1,191 +0,0 @@ -package discovery - -import ( - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" -) - -// FindPlugins looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider") and -// returns a PluginMetaSet representing the discovered potential-plugins. -// -// Currently this supports two different naming schemes. The current -// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing -// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is -// files directly in the given directory whose names are like -// terraform-$KIND-$NAME. -// -// Only one plugin will be returned for each unique plugin (name, version) -// pair, with preference given to files found in earlier directories. -// -// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. -func FindPlugins(kind string, dirs []string) PluginMetaSet { - return ResolvePluginPaths(FindPluginPaths(kind, dirs)) -} - -// FindPluginPaths looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider"). -// -// The return value is a list of absolute paths that appear to refer to -// plugins in the given directories, based only on what can be inferred -// from the naming scheme. The paths returned are ordered such that files -// in later dirs appear after files in earlier dirs in the given directory -// list. Within the same directory plugins are returned in a consistent but -// undefined order. -func FindPluginPaths(kind string, dirs []string) []string { - // This is just a thin wrapper around findPluginPaths so that we can - // use the latter in tests with a fake machineName so we can use our - // test fixtures. - return findPluginPaths(kind, dirs) -} - -func findPluginPaths(kind string, dirs []string) []string { - prefix := "terraform-" + kind + "-" - - ret := make([]string, 0, len(dirs)) - - for _, dir := range dirs { - items, err := ioutil.ReadDir(dir) - if err != nil { - // Ignore missing dirs, non-dirs, etc - continue - } - - log.Printf("[DEBUG] checking for %s in %q", kind, dir) - - for _, item := range items { - fullName := item.Name() - - if !strings.HasPrefix(fullName, prefix) { - continue - } - - // New-style paths must have a version segment in filename - if strings.Contains(strings.ToLower(fullName), "_v") { - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[DEBUG] found %s %q", kind, fullName) - ret = append(ret, filepath.Clean(absPath)) - continue - } - - // Legacy style with files directly in the base directory - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[WARN] found legacy %s %q", kind, fullName) - - ret = append(ret, filepath.Clean(absPath)) - } - } - - return ret -} - -// Returns true if and only if the given path refers to a file or a symlink -// to a file. -func pathIsFile(path string) bool { - info, err := os.Stat(path) - if err != nil { - return false - } - - return !info.IsDir() -} - -// ResolvePluginPaths takes a list of paths to plugin executables (as returned -// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the -// referenced plugins. -// -// If the same combination of plugin name and version appears multiple times, -// the earlier reference will be preferred. Several different versions of -// the same plugin name may be returned, in which case the methods of -// PluginMetaSet can be used to filter down. -func ResolvePluginPaths(paths []string) PluginMetaSet { - s := make(PluginMetaSet) - - type nameVersion struct { - Name string - Version string - } - found := make(map[nameVersion]struct{}) - - for _, path := range paths { - baseName := strings.ToLower(filepath.Base(path)) - if !strings.HasPrefix(baseName, "terraform-") { - // Should never happen with reasonable input - continue - } - - baseName = baseName[10:] - firstDash := strings.Index(baseName, "-") - if firstDash == -1 { - // Should never happen with reasonable input - continue - } - - baseName = baseName[firstDash+1:] - if baseName == "" { - // Should never happen with reasonable input - continue - } - - // Trim the .exe suffix used on Windows before we start wrangling - // the remainder of the path. - if strings.HasSuffix(baseName, ".exe") { - baseName = baseName[:len(baseName)-4] - } - - parts := strings.SplitN(baseName, "_v", 2) - name := parts[0] - version := VersionZero - if len(parts) == 2 { - version = parts[1] - } - - // Auto-installed plugins contain an extra name portion representing - // the expected plugin version, which we must trim off. - if underX := strings.Index(version, "_x"); underX != -1 { - version = version[:underX] - } - - if _, ok := found[nameVersion{name, version}]; ok { - // Skip duplicate versions of the same plugin - // (We do this during this step because after this we will be - // dealing with sets and thus lose our ordering with which to - // decide preference.) - continue - } - - s.Add(PluginMeta{ - Name: name, - Version: VersionStr(version), - Path: path, - }) - found[nameVersion{name, version}] = struct{}{} - } - - return s -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go deleted file mode 100644 index 722bb28a2c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go +++ /dev/null @@ -1,669 +0,0 @@ -package discovery - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - - "github.com/hashicorp/errwrap" - getter "github.com/hashicorp/go-getter" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/mitchellh/cli" -) - -// Releases are located by querying the terraform registry. - -var httpClient *http.Client - -func init() { - httpClient = httpclient.New() - - httpGetter := &getter.HttpGetter{ - Client: httpClient, - Netrc: true, - } - - getter.Getters["http"] = httpGetter - getter.Getters["https"] = httpGetter -} - -// ProviderInstaller is an Installer implementation that knows how to -// download Terraform providers from the official HashiCorp releases service -// into a local directory. The files downloaded are compliant with the -// naming scheme expected by FindPlugins, so the target directory of a -// provider installer can be used as one of several plugin discovery sources. -type ProviderInstaller struct { - Dir string - - // Cache is used to access and update a local cache of plugins if non-nil. - // Can be nil to disable caching. - Cache PluginCache - - PluginProtocolVersion uint - - // OS and Arch specify the OS and architecture that should be used when - // installing plugins. These use the same labels as the runtime.GOOS and - // runtime.GOARCH variables respectively, and indeed the values of these - // are used as defaults if either of these is the empty string. - OS string - Arch string - - // Skip checksum and signature verification - SkipVerify bool - - Ui cli.Ui // Ui for output - - // Services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - Services *disco.Disco - - // registry client - registry *registry.Client -} - -// Get is part of an implementation of type Installer, and attempts to download -// and install a Terraform provider matching the given constraints. -// -// This method may return one of a number of sentinel errors from this -// package to indicate issues that are likely to be resolvable via user action: -// -// ErrorNoSuchProvider: no provider with the given name exists in the repository. -// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. -// ErrorNoVersionCompatible: a plugin was found within the constraints but it is -// incompatible with the current Terraform version. -// -// These errors should be recognized and handled as special cases by the caller -// to present a suitable user-oriented error message. -// -// All other errors indicate an internal problem that is likely _not_ solvable -// through user action, or at least not within Terraform's scope. Error messages -// are produced under the assumption that if presented to the user they will -// be presented alongside context about what is being installed, and thus the -// error messages do not redundantly include such information. -func (i *ProviderInstaller) Get(provider addrs.ProviderType, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { - var diags tfdiags.Diagnostics - - // a little bit of initialization. - if i.OS == "" { - i.OS = runtime.GOOS - } - if i.Arch == "" { - i.Arch = runtime.GOARCH - } - if i.registry == nil { - i.registry = registry.NewClient(i.Services, nil) - } - - // get a full listing of versions for the requested provider - allVersions, err := i.listProviderVersions(provider) - - // TODO: return multiple errors - if err != nil { - log.Printf("[DEBUG] %s", err) - if registry.IsServiceUnreachable(err) { - registryHost, err := i.hostname() - if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { - return PluginMeta{}, diags, ErrorPublicRegistryUnreachable - } - return PluginMeta{}, diags, ErrorServiceUnreachable - } - if registry.IsServiceNotProvided(err) { - return PluginMeta{}, diags, err - } - return PluginMeta{}, diags, ErrorNoSuchProvider - } - - // Add any warnings from the response to diags - for _, warning := range allVersions.Warnings { - hostname, err := i.hostname() - if err != nil { - return PluginMeta{}, diags, err - } - diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) - diags = diags.Append(diag) - } - - if len(allVersions.Versions) == 0 { - return PluginMeta{}, diags, ErrorNoSuitableVersion - } - providerSource := allVersions.ID - - // Filter the list of plugin versions to those which meet the version constraints - versions := allowedVersions(allVersions, req) - if len(versions) == 0 { - return PluginMeta{}, diags, ErrorNoSuitableVersion - } - - // sort them newest to oldest. The newest version wins! - response.ProviderVersionCollection(versions).Sort() - - // if the chosen provider version does not support the requested platform, - // filter the list of acceptable versions to those that support that platform - if err := i.checkPlatformCompatibility(versions[0]); err != nil { - versions = i.platformCompatibleVersions(versions) - if len(versions) == 0 { - return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform - } - } - - // we now have a winning platform-compatible version - versionMeta := versions[0] - v := VersionStr(versionMeta.Version).MustParse() - - // check protocol compatibility - if err := i.checkPluginProtocol(versionMeta); err != nil { - closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) - if err != nil { - // No operation here if we can't find a version with compatible protocol - return PluginMeta{}, diags, err - } - - // Prompt version suggestion to UI based on closest protocol match - var errMsg string - closestVersion := VersionStr(closestMatch.Version).MustParse() - if v.NewerThan(closestVersion) { - errMsg = providerProtocolTooNew - } else { - errMsg = providerProtocolTooOld - } - - constraintStr := req.String() - if constraintStr == "" { - constraintStr = "(any version)" - } - - return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( - errMsg, provider, v.String(), tfversion.String(), - closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) - } - - downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) - if err != nil { - return PluginMeta{}, diags, err - } - providerURL := downloadURLs.DownloadURL - - if !i.SkipVerify { - // Terraform verifies the integrity of a provider release before downloading - // the plugin binary. The digital signature (SHA256SUMS.sig) on the - // release distribution (SHA256SUMS) is verified with the public key of the - // publisher provided in the Terraform Registry response, ensuring that - // everything is as intended by the publisher. The checksum of the provider - // plugin is expected in the SHA256SUMS file and is double checked to match - // the checksum of the original published release to the Registry. This - // enforces immutability of releases between the Registry and the plugin's - // host location. Lastly, the integrity of the binary is verified upon - // download matches the Registry and signed checksum. - sha256, err := i.getProviderChecksum(downloadURLs) - if err != nil { - return PluginMeta{}, diags, err - } - - // add the checksum parameter for go-getter to verify the download for us. - if sha256 != "" { - providerURL = providerURL + "?checksum=sha256:" + sha256 - } - } - - printedProviderName := fmt.Sprintf("%q (%s)", provider.Name, providerSource) - i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version)) - log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version) - err = i.install(provider, v, providerURL) - if err != nil { - return PluginMeta{}, diags, err - } - - // Find what we just installed - // (This is weird, because go-getter doesn't directly return - // information about what was extracted, and we just extracted - // the archive directly into a shared dir here.) - log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider.Name, versionMeta.Version) - metas := FindPlugins("provider", []string{i.Dir}) - log.Printf("[DEBUG] all plugins found %#v", metas) - metas, _ = metas.ValidateVersions() - metas = metas.WithName(provider.Name).WithVersion(v) - log.Printf("[DEBUG] filtered plugins %#v", metas) - if metas.Count() == 0 { - // This should never happen. Suggests that the release archive - // contains an executable file whose name doesn't match the - // expected convention. - return PluginMeta{}, diags, fmt.Errorf( - "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", - versionMeta.Version, - ) - } - - if metas.Count() > 1 { - // This should also never happen, and suggests that a - // particular version was re-released with a different - // executable filename. We consider releases as immutable, so - // this is an error. - return PluginMeta{}, diags, fmt.Errorf( - "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", - versionMeta.Version, - ) - } - - // By now we know we have exactly one meta, and so "Newest" will - // return that one. - return metas.Newest(), diags, nil -} - -func (i *ProviderInstaller) install(provider addrs.ProviderType, version Version, url string) error { - if i.Cache != nil { - log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider.Name, version) - cached := i.Cache.CachedPluginPath("provider", provider.Name, version) - if cached == "" { - log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider.Name, version, url) - err := getter.Get(i.Cache.InstallDir(), url) - if err != nil { - return err - } - // should now be in cache - cached = i.Cache.CachedPluginPath("provider", provider.Name, version) - if cached == "" { - // should never happen if the getter is behaving properly - // and the plugins are packaged properly. - return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) - } - } - - // Link or copy the cached binary into our install dir so the - // normal resolution machinery can find it. - filename := filepath.Base(cached) - targetPath := filepath.Join(i.Dir, filename) - // check if the target dir exists, and create it if not - var err error - if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { - err = os.MkdirAll(i.Dir, 0700) - } - if err != nil { - return err - } - - log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider.Name, version, targetPath, cached) - - // Delete if we can. If there's nothing there already then no harm done. - // This is important because we can't create a link if there's - // already a file of the same name present. - // (any other error here we'll catch below when we try to write here) - os.Remove(targetPath) - - // We don't attempt linking on Windows because links are not - // comprehensively supported by all tools/apps in Windows and - // so we choose to be conservative to avoid creating any - // weird issues for Windows users. - linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned - if runtime.GOOS != "windows" { - // Try hard linking first. Hard links are preferable because this - // creates a self-contained directory that doesn't depend on the - // cache after install. - linkErr = os.Link(cached, targetPath) - - // If that failed, try a symlink. This _does_ depend on the cache - // after install, so the user must manage the cache more carefully - // in this case, but avoids creating redundant copies of the - // plugins on disk. - if linkErr != nil { - linkErr = os.Symlink(cached, targetPath) - } - } - - // If we still have an error then we'll try a copy as a fallback. - // In this case either the OS is Windows or the target filesystem - // can't support symlinks. - if linkErr != nil { - srcFile, err := os.Open(cached) - if err != nil { - return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) - } - defer srcFile.Close() - - destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create %s: %s", targetPath, err) - } - - _, err = io.Copy(destFile, srcFile) - if err != nil { - destFile.Close() - return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) - } - - err = destFile.Close() - if err != nil { - return fmt.Errorf("error creating %s: %s", targetPath, err) - } - } - - // One way or another, by the time we get here we should have either - // a link or a copy of the cached plugin within i.Dir, as expected. - } else { - log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider.Name, version, url) - err := getter.Get(i.Dir, url) - if err != nil { - return err - } - } - return nil -} - -func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { - purge := make(PluginMetaSet) - - present := FindPlugins("provider", []string{i.Dir}) - for meta := range present { - chosen, ok := used[meta.Name] - if !ok { - purge.Add(meta) - } - if chosen.Path != meta.Path { - purge.Add(meta) - } - } - - removed := make(PluginMetaSet) - var errs error - for meta := range purge { - path := meta.Path - err := os.Remove(path) - if err != nil { - errs = multierror.Append(errs, fmt.Errorf( - "failed to remove unused provider plugin %s: %s", - path, err, - )) - } else { - removed.Add(meta) - } - } - - return removed, errs -} - -func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { - // Get SHA256SUMS file. - shasums, err := getFile(resp.ShasumsURL) - if err != nil { - log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) - return "", ErrorMissingChecksumVerification - } - - // Get SHA256SUMS.sig file. - signature, err := getFile(resp.ShasumsSignatureURL) - if err != nil { - log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) - return "", ErrorSignatureVerification - } - - // Verify the GPG signature returned from the Registry. - asciiArmor := resp.SigningKeys.GPGASCIIArmor() - signer, err := verifySig(shasums, signature, asciiArmor) - if err != nil { - log.Printf("[ERROR] error verifying signature: %s", err) - return "", ErrorSignatureVerification - } - - // Also verify the GPG signature against the HashiCorp public key. This is - // a temporary additional check until a more robust key verification - // process is added in a future release. - _, err = verifySig(shasums, signature, HashicorpPublicKey) - if err != nil { - log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) - return "", ErrorSignatureVerification - } - - // Display identity for GPG key which succeeded verifying the signature. - // This could also be used to display to the user with i.Ui.Info(). - identities := []string{} - for k := range signer.Identities { - identities = append(identities, k) - } - identity := strings.Join(identities, ", ") - log.Printf("[DEBUG] verified GPG signature with key from %s", identity) - - // Extract checksum for this os/arch platform binary and verify against Registry - checksum := checksumForFile(shasums, resp.Filename) - if checksum == "" { - log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) - return "", ErrorMissingChecksumVerification - } else if checksum != resp.Shasum { - log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) - return "", ErrorChecksumVerification - } - - return checksum, nil -} - -func (i *ProviderInstaller) hostname() (string, error) { - provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) - svchost, err := provider.SvcHost() - if err != nil { - return "", err - } - - return svchost.ForDisplay(), nil -} - -// list all versions available for the named provider -func (i *ProviderInstaller) listProviderVersions(provider addrs.ProviderType) (*response.TerraformProviderVersions, error) { - req := regsrc.NewTerraformProvider(provider.Name, i.OS, i.Arch) - versions, err := i.registry.TerraformProviderVersions(req) - return versions, err -} - -func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { - urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) - if urls == nil { - return nil, fmt.Errorf("No download urls found for provider %s", name) - } - return urls, err -} - -// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. -// Prerelease versions are filtered. -func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { - // Loop through all the provider versions to find the earliest and latest - // versions that match the installer protocol to then select the closest of the two - var latest, earliest *response.TerraformProviderVersion - for _, version := range versions { - // Prereleases are filtered and will not be suggested - v, err := VersionStr(version.Version).Parse() - if err != nil || v.IsPrerelease() { - continue - } - - if err := i.checkPluginProtocol(version); err == nil { - if earliest == nil { - // Found the first provider version with compatible protocol - earliest = version - } - // Update the latest protocol compatible version - latest = version - } - } - if earliest == nil { - // No compatible protocol was found for any version - return nil, ErrorNoVersionCompatible - } - - // Convert protocols to comparable types - protoString := strconv.Itoa(int(i.PluginProtocolVersion)) - protocolVersion, err := VersionStr(protoString).Parse() - if err != nil { - return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) - } - - earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() - if err != nil { - return nil, err - } - - // Compare installer protocol version with the first protocol listed of the earliest match - // [A, B] where A is assumed the earliest compatible major version of the protocol pair - if protocolVersion.NewerThan(earliestVersionProtocol) { - // Provider protocols are too old, the closest version is the earliest compatible version - return earliest, nil - } - - // Provider protocols are too new, the closest version is the latest compatible version - return latest, nil -} - -func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { - // TODO: should this be a different error? We should probably differentiate between - // no compatible versions and no protocol versions listed at all - if len(versionMeta.Protocols) == 0 { - return fmt.Errorf("no plugin protocol versions listed") - } - - protoString := strconv.Itoa(int(i.PluginProtocolVersion)) - protocolVersion, err := VersionStr(protoString).Parse() - if err != nil { - return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) - } - protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() - if err != nil { - // This should not fail if the preceding function succeeded. - return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) - } - - for _, p := range versionMeta.Protocols { - proPro, err := VersionStr(p).Parse() - if err != nil { - // invalid protocol reported by the registry. Move along. - log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) - continue - } - // success! - if protocolConstraint.Allows(proPro) { - return nil - } - } - - return ErrorNoVersionCompatible -} - -// platformCompatibleVersions returns a list of provider versions that are -// compatible with the requested platform. -func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { - var v []*response.TerraformProviderVersion - for _, version := range versions { - if err := i.checkPlatformCompatibility(version); err == nil { - v = append(v, version) - } - } - return v -} - -func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { - if len(versionMeta.Platforms) == 0 { - return fmt.Errorf("no supported provider platforms listed") - } - for _, p := range versionMeta.Platforms { - if p.Arch == i.Arch && p.OS == i.OS { - return nil - } - } - return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) -} - -// take the list of available versions for a plugin, and filter out those that -// don't fit the constraints. -func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { - var allowed []*response.TerraformProviderVersion - - for _, v := range available.Versions { - version, err := VersionStr(v.Version).Parse() - if err != nil { - log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) - continue - } - if required.Allows(version) { - allowed = append(allowed, v) - } - } - return allowed -} - -func checksumForFile(sums []byte, name string) string { - for _, line := range strings.Split(string(sums), "\n") { - parts := strings.Fields(line) - if len(parts) > 1 && parts[1] == name { - return parts[0] - } - } - return "" -} - -func getFile(url string) ([]byte, error) { - resp, err := httpClient.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s", resp.Status) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return data, err - } - return data, nil -} - -// providerProtocolTooOld is a message sent to the CLI UI if the provider's -// supported protocol versions are too old for the user's version of terraform, -// but an older version of the provider is compatible. -const providerProtocolTooOld = ` -[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] - -Provider version %s is the earliest compatible version. Select it with -the following version constraint: - - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on -compatibility between provider and Terraform versions. -` - -// providerProtocolTooNew is a message sent to the CLI UI if the provider's -// supported protocol versions are too new for the user's version of terraform, -// and the user could either upgrade terraform or choose an older version of the -// provider -const providerProtocolTooNew = ` -[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] - -Provider version %s is the latest compatible version. Select it with -the following constraint: - - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on -compatibility between provider and Terraform versions. - -Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. -` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go deleted file mode 100644 index 1a10042648..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go +++ /dev/null @@ -1,48 +0,0 @@ -package discovery - -// PluginCache is an interface implemented by objects that are able to maintain -// a cache of plugins. -type PluginCache interface { - // CachedPluginPath returns a path where the requested plugin is already - // cached, or an empty string if the requested plugin is not yet cached. - CachedPluginPath(kind string, name string, version Version) string - - // InstallDir returns the directory that new plugins should be installed into - // in order to populate the cache. This directory should be used as the - // first argument to getter.Get when downloading plugins with go-getter. - // - // After installing into this directory, use CachedPluginPath to obtain the - // path where the plugin was installed. - InstallDir() string -} - -// NewLocalPluginCache returns a PluginCache that caches plugins in a -// given local directory. -func NewLocalPluginCache(dir string) PluginCache { - return &pluginCache{ - Dir: dir, - } -} - -type pluginCache struct { - Dir string -} - -func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { - allPlugins := FindPlugins(kind, []string{c.Dir}) - plugins := allPlugins.WithName(name).WithVersion(version) - - if plugins.Count() == 0 { - // nothing cached - return "" - } - - // There should generally be only one plugin here; if there's more than - // one match for some reason then we'll just choose one arbitrarily. - plugin := plugins.Newest() - return plugin.Path -} - -func (c *pluginCache) InstallDir() string { - return c.Dir -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go deleted file mode 100644 index 4622ca0545..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go +++ /dev/null @@ -1,34 +0,0 @@ -package discovery - -// HashicorpPublicKey is the HashiCorp public key, also available at -// https://www.hashicorp.com/security -const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go deleted file mode 100644 index bdcebcb9dc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go +++ /dev/null @@ -1,41 +0,0 @@ -package discovery - -import ( - "crypto/sha256" - "io" - "os" -) - -// PluginMeta is metadata about a plugin, useful for launching the plugin -// and for understanding which plugins are available. -type PluginMeta struct { - // Name is the name of the plugin, e.g. as inferred from the plugin - // binary's filename, or by explicit configuration. - Name string - - // Version is the semver version of the plugin, expressed as a string - // that might not be semver-valid. - Version VersionStr - - // Path is the absolute path of the executable that can be launched - // to provide the RPC server for this plugin. - Path string -} - -// SHA256 returns a SHA256 hash of the content of the referenced executable -// file, or an error if the file's contents cannot be read. -func (m PluginMeta) SHA256() ([]byte, error) { - f, err := os.Open(m.Path) - if err != nil { - return nil, err - } - defer f.Close() - - h := sha256.New() - _, err = io.Copy(h, f) - if err != nil { - return nil, err - } - - return h.Sum(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go deleted file mode 100644 index 3a992892df..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go +++ /dev/null @@ -1,195 +0,0 @@ -package discovery - -// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. -// -// Methods on this type allow filtering of the set to produce subsets that -// meet more restrictive criteria. -type PluginMetaSet map[PluginMeta]struct{} - -// Add inserts the given PluginMeta into the receiving set. This is a no-op -// if the given meta is already present. -func (s PluginMetaSet) Add(p PluginMeta) { - s[p] = struct{}{} -} - -// Remove removes the given PluginMeta from the receiving set. This is a no-op -// if the given meta is not already present. -func (s PluginMetaSet) Remove(p PluginMeta) { - delete(s, p) -} - -// Has returns true if the given meta is in the receiving set, or false -// otherwise. -func (s PluginMetaSet) Has(p PluginMeta) bool { - _, ok := s[p] - return ok -} - -// Count returns the number of metas in the set -func (s PluginMetaSet) Count() int { - return len(s) -} - -// ValidateVersions returns two new PluginMetaSets, separating those with -// versions that have syntax-valid semver versions from those that don't. -// -// Eliminating invalid versions from consideration (and possibly warning about -// them) is usually the first step of working with a meta set after discovery -// has completed. -func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { - valid = make(PluginMetaSet) - invalid = make(PluginMetaSet) - for p := range s { - if _, err := p.Version.Parse(); err == nil { - valid.Add(p) - } else { - invalid.Add(p) - } - } - return -} - -// WithName returns the subset of metas that have the given name. -func (s PluginMetaSet) WithName(name string) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - if p.Name == name { - ns.Add(p) - } - } - return ns -} - -// WithVersion returns the subset of metas that have the given version. -// -// This should be used only with the "valid" result from ValidateVersions; -// it will ignore any plugin metas that have invalid version strings. -func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - gotVersion, err := p.Version.Parse() - if err != nil { - continue - } - if gotVersion.Equal(version) { - ns.Add(p) - } - } - return ns -} - -// ByName groups the metas in the set by their Names, returning a map. -func (s PluginMetaSet) ByName() map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - ret[p.Name].Add(p) - } - return ret -} - -// Newest returns the one item from the set that has the newest Version value. -// -// The result is meaningful only if the set is already filtered such that -// all of the metas have the same Name. -// -// If there isn't at least one meta in the set then this function will panic. -// Use Count() to ensure that there is at least one value before calling. -// -// If any of the metas have invalid version strings then this function will -// panic. Use ValidateVersions() first to filter out metas with invalid -// versions. -// -// If two metas have the same Version then one is arbitrarily chosen. This -// situation should be avoided by pre-filtering the set. -func (s PluginMetaSet) Newest() PluginMeta { - if len(s) == 0 { - panic("can't call NewestStable on empty PluginMetaSet") - } - - var first = true - var winner PluginMeta - var winnerVersion Version - for p := range s { - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - - if first == true || version.NewerThan(winnerVersion) { - winner = p - winnerVersion = version - first = false - } - } - - return winner -} - -// ConstrainVersions takes a set of requirements and attempts to -// return a map from name to a set of metas that have the matching -// name and an appropriate version. -// -// If any of the given requirements match *no* plugins then its PluginMetaSet -// in the returned map will be empty. -// -// All viable metas are returned, so the caller can apply any desired filtering -// to reduce down to a single option. For example, calling Newest() to obtain -// the highest available version. -// -// If any of the metas in the set have invalid version strings then this -// function will panic. Use ValidateVersions() first to filter out metas with -// invalid versions. -func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - name := p.Name - allowedVersions, ok := reqd[name] - if !ok { - continue - } - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - if allowedVersions.Allows(version) { - ret[p.Name].Add(p) - } - } - return ret -} - -// OverridePaths returns a new set where any existing plugins with the given -// names are removed and replaced with the single path given in the map. -// -// This is here only to continue to support the legacy way of overriding -// plugin binaries in the .terraformrc file. It treats all given plugins -// as pre-versioning (version 0.0.0). This mechanism will eventually be -// phased out, with vendor directories being the intended replacement. -func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { - ret := make(PluginMetaSet) - for p := range s { - if _, ok := paths[p.Name]; ok { - // Skip plugins that we're overridding - continue - } - - ret.Add(p) - } - - // Now add the metadata for overriding plugins - for name, path := range paths { - ret.Add(PluginMeta{ - Name: name, - Version: VersionZero, - Path: path, - }) - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go deleted file mode 100644 index 75430fdd60..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go +++ /dev/null @@ -1,105 +0,0 @@ -package discovery - -import ( - "bytes" -) - -// PluginRequirements describes a set of plugins (assumed to be of a consistent -// kind) that are required to exist and have versions within the given -// corresponding sets. -type PluginRequirements map[string]*PluginConstraints - -// PluginConstraints represents an element of PluginRequirements describing -// the constraints for a single plugin. -type PluginConstraints struct { - // Specifies that the plugin's version must be within the given - // constraints. - Versions Constraints - - // If non-nil, the hash of the on-disk plugin executable must exactly - // match the SHA256 hash given here. - SHA256 []byte -} - -// Allows returns true if the given version is within the receiver's version -// constraints. -func (s *PluginConstraints) Allows(v Version) bool { - return s.Versions.Allows(v) -} - -// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, -// either because it matches the constraint or because there is no such -// constraint. -func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { - if s.SHA256 == nil { - return true - } - return bytes.Equal(s.SHA256, digest) -} - -// Merge takes the contents of the receiver and the other given requirements -// object and merges them together into a single requirements structure -// that satisfies both sets of requirements. -// -// Note that it doesn't make sense to merge two PluginRequirements with -// differing required plugin SHA256 hashes, since the result will never -// match any plugin. -func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { - ret := make(PluginRequirements) - for n, c := range r { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - for n, c := range other { - if existing, exists := ret[n]; exists { - ret[n].Versions = ret[n].Versions.Append(c.Versions) - - if existing.SHA256 != nil { - if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { - // If we've been asked to merge two constraints with - // different SHA256 hashes then we'll produce a dummy value - // that can never match anything. This is a silly edge case - // that no reasonable caller should hit. - ret[n].SHA256 = []byte(invalidProviderHash) - } - } else { - ret[n].SHA256 = c.SHA256 // might still be nil - } - } else { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - } - return ret -} - -// LockExecutables applies additional constraints to the receiver that -// require plugin executables with specific SHA256 digests. This modifies -// the receiver in-place, since it's intended to be applied after -// version constraints have been resolved. -// -// The given map must include a key for every plugin that is already -// required. If not, any missing keys will cause the corresponding plugin -// to never match, though the direct caller doesn't necessarily need to -// guarantee this as long as the downstream code _applying_ these constraints -// is able to deal with the non-match in some way. -func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { - for name, cons := range r { - digest := sha256s[name] - - if digest == nil { - // Prevent any match, which will then presumably cause the - // downstream consumer of this requirements to report an error. - cons.SHA256 = []byte(invalidProviderHash) - continue - } - - cons.SHA256 = digest - } -} - -const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go deleted file mode 100644 index 7bbae50c38..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go +++ /dev/null @@ -1,19 +0,0 @@ -package discovery - -import ( - "bytes" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// Verify the data using the provided openpgp detached signature and the -// embedded hashicorp public key. -func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) - if err != nil { - return nil, err - } - - return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go deleted file mode 100644 index 4311d51076..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go +++ /dev/null @@ -1,77 +0,0 @@ -package discovery - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" -) - -const VersionZero = "0.0.0" - -// A VersionStr is a string containing a possibly-invalid representation -// of a semver version number. Call Parse on it to obtain a real Version -// object, or discover that it is invalid. -type VersionStr string - -// Parse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s VersionStr) Parse() (Version, error) { - raw, err := version.NewVersion(string(s)) - if err != nil { - return Version{}, err - } - return Version{raw}, nil -} - -// MustParse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then it panics. -func (s VersionStr) MustParse() Version { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Version represents a version number that has been parsed from -// a semver string and known to be valid. -type Version struct { - // We wrap this here just because it avoids a proliferation of - // direct go-version imports all over the place, and keeps the - // version-processing details within this package. - raw *version.Version -} - -func (v Version) String() string { - return v.raw.String() -} - -func (v Version) NewerThan(other Version) bool { - return v.raw.GreaterThan(other.raw) -} - -func (v Version) Equal(other Version) bool { - return v.raw.Equal(other.raw) -} - -// IsPrerelease determines if version is a prerelease -func (v Version) IsPrerelease() bool { - return v.raw.Prerelease() != "" -} - -// MinorUpgradeConstraintStr returns a ConstraintStr that would permit -// minor upgrades relative to the receiving version. -func (v Version) MinorUpgradeConstraintStr() ConstraintStr { - segments := v.raw.Segments() - return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) -} - -type Versions []Version - -// Sort sorts version from newest to oldest. -func (v Versions) Sort() { - sort.Slice(v, func(i, j int) bool { - return v[i].NewerThan(v[j]) - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go deleted file mode 100644 index fc8b6f8bd2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go +++ /dev/null @@ -1,83 +0,0 @@ -package discovery - -import ( - "sort" - - version "github.com/hashicorp/go-version" -) - -// A ConstraintStr is a string containing a possibly-invalid representation -// of a version constraint provided in configuration. Call Parse on it to -// obtain a real Constraint object, or discover that it is invalid. -type ConstraintStr string - -// Parse transforms a ConstraintStr into a Constraints if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s ConstraintStr) Parse() (Constraints, error) { - raw, err := version.NewConstraint(string(s)) - if err != nil { - return Constraints{}, err - } - return Constraints{raw}, nil -} - -// MustParse is like Parse but it panics if the constraint string is invalid. -func (s ConstraintStr) MustParse() Constraints { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Constraints represents a set of versions which any given Version is either -// a member of or not. -type Constraints struct { - raw version.Constraints -} - -// NewConstraints creates a Constraints based on a version.Constraints. -func NewConstraints(c version.Constraints) Constraints { - return Constraints{c} -} - -// AllVersions is a Constraints containing all versions -var AllVersions Constraints - -func init() { - AllVersions = Constraints{ - raw: make(version.Constraints, 0), - } -} - -// Allows returns true if the given version permitted by the receiving -// constraints set. -func (s Constraints) Allows(v Version) bool { - return s.raw.Check(v.raw) -} - -// Append combines the receiving set with the given other set to produce -// a set that is the intersection of both sets, which is to say that resulting -// constraints contain only the versions that are members of both. -func (s Constraints) Append(other Constraints) Constraints { - raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) - - // Since "raw" is a list of constraints that remove versions from the set, - // "Intersection" is implemented by concatenating together those lists, - // thus leaving behind only the versions not removed by either list. - raw = append(raw, s.raw...) - raw = append(raw, other.raw...) - - // while the set is unordered, we sort these lexically for consistent output - sort.Slice(raw, func(i, j int) bool { - return raw[i].String() < raw[j].String() - }) - - return Constraints{raw} -} - -// String returns a string representation of the set members as a set -// of range constraints. -func (s Constraints) String() string { - return s.raw.String() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go deleted file mode 100644 index 0f48f2447d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go +++ /dev/null @@ -1,47 +0,0 @@ -package providers - -import ( - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// AddressedTypes is a helper that extracts all of the distinct provider -// types from the given list of relative provider configuration addresses. -func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]struct{}{} - for _, addr := range providerAddrs { - m[addr.Type] = struct{}{} - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - return names -} - -// AddressedTypesAbs is a helper that extracts all of the distinct provider -// types from the given list of absolute provider configuration addresses. -func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]struct{}{} - for _, addr := range providerAddrs { - m[addr.ProviderConfig.Type] = struct{}{} - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - return names -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go deleted file mode 100644 index 39aa1de60f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package providers contains the interface and primary types required to -// implement a Terraform resource provider. -package providers diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go deleted file mode 100644 index 3d0aa8ec9e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go +++ /dev/null @@ -1,359 +0,0 @@ -package providers - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Interface represents the set of methods required for a complete resource -// provider plugin. -type Interface interface { - // GetSchema returns the complete schema for the provider. - GetSchema() GetSchemaResponse - - // PrepareProviderConfig allows the provider to validate the configuration - // values, and set or override any values with defaults. - PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse - - // ValidateResourceTypeConfig allows the provider to validate the resource - // configuration values. - ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse - - // ValidateDataSource allows the provider to validate the data source - // configuration values. - ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse - - // UpgradeResourceState is called when the state loader encounters an - // instance state whose schema version is less than the one reported by the - // currently-used version of the corresponding provider, and the upgraded - // result is used for any further processing. - UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse - - // Configure configures and initialized the provider. - Configure(ConfigureRequest) ConfigureResponse - - // Stop is called when the provider should halt any in-flight actions. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provider after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // ReadResource refreshes a resource and returns its current state. - ReadResource(ReadResourceRequest) ReadResourceResponse - - // PlanResourceChange takes the current state and proposed state of a - // resource, and returns the planned final state. - PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse - - // ApplyResourceChange takes the planned state for a resource, which may - // yet contain unknown computed values, and applies the changes returning - // the final state. - ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse - - // ImportResourceState requests that the given resource be imported. - ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse - - // ReadDataSource returns the data source's current state. - ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provider is the schema for the provider itself. - Provider Schema - - // ResourceTypes map the resource type name to that type's schema. - ResourceTypes map[string]Schema - - // DataSources maps the data source name to that data source's schema. - DataSources map[string]Schema - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// Schema pairs a provider or resource schema with that schema's version. -// This is used to be able to upgrade the schema in UpgradeResourceState. -type Schema struct { - Version int64 - Block *configschema.Block -} - -type PrepareProviderConfigRequest struct { - // Config is the raw configuration value for the provider. - Config cty.Value -} - -type PrepareProviderConfigResponse struct { - // PreparedConfig is the configuration as prepared by the provider. - PreparedConfig cty.Value - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateResourceTypeConfigRequest struct { - // TypeName is the name of the resource type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateResourceTypeConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateDataSourceConfigRequest struct { - // TypeName is the name of the data source type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateDataSourceConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type UpgradeResourceStateRequest struct { - // TypeName is the name of the resource type being upgraded - TypeName string - - // Version is version of the schema that created the current state. - Version int64 - - // RawStateJSON and RawStateFlatmap contiain the state that needs to be - // upgraded to match the current schema version. Because the schema is - // unknown, this contains only the raw data as stored in the state. - // RawStateJSON is the current json state encoding. - // RawStateFlatmap is the legacy flatmap encoding. - // Only on of these fields may be set for the upgrade request. - RawStateJSON []byte - RawStateFlatmap map[string]string -} - -type UpgradeResourceStateResponse struct { - // UpgradedState is the newly upgraded resource state. - UpgradedState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ConfigureRequest struct { - // Terraform version is the version string from the running instance of - // terraform. Providers can use TerraformVersion to verify compatibility, - // and to store for informational purposes. - TerraformVersion string - - // Config is the complete configuration value for the provider. - Config cty.Value -} - -type ConfigureResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ReadResourceRequest struct { - // TypeName is the name of the resource type being read. - TypeName string - - // PriorState contains the previously saved state value for this resource. - PriorState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type ReadResourceResponse struct { - // NewState contains the current state of the resource. - NewState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type PlanResourceChangeRequest struct { - // TypeName is the name of the resource type to plan. - TypeName string - - // PriorState is the previously saved state value for this resource. - PriorState cty.Value - - // ProposedNewState is the expected state after the new configuration is - // applied. This is created by directly applying the configuration to the - // PriorState. The provider is then responsible for applying any further - // changes required to create the proposed final state. - ProposedNewState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the ProposedNewState in most circumstances. - Config cty.Value - - // PriorPrivate is the previously saved private data returned from the - // provider during the last apply. - PriorPrivate []byte -} - -type PlanResourceChangeResponse struct { - // PlannedState is the expected state of the resource once the current - // configuration is applied. - PlannedState cty.Value - - // RequiresReplace is the list of thee attributes that are requiring - // resource replacement. - RequiresReplace []cty.Path - - // PlannedPrivate is an opaque blob that is not interpreted by terraform - // core. This will be saved and relayed back to the provider during - // ApplyResourceChange. - PlannedPrivate []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ApplyResourceChangeRequest struct { - // TypeName is the name of the resource type being applied. - TypeName string - - // PriorState is the current state of resource. - PriorState cty.Value - - // Planned state is the state returned from PlanResourceChange, and should - // represent the new state, minus any remaining computed attributes. - PlannedState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the PlannedState in most circumstances. - Config cty.Value - - // PlannedPrivate is the same value as returned by PlanResourceChange. - PlannedPrivate []byte -} - -type ApplyResourceChangeResponse struct { - // NewState is the new complete state after applying the planned change. - // In the event of an error, NewState should represent the most recent - // known state of the resource, if it exists. - NewState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ImportResourceStateRequest struct { - // TypeName is the name of the resource type to be imported. - TypeName string - - // ID is a string with which the provider can identify the resource to be - // imported. - ID string -} - -type ImportResourceStateResponse struct { - // ImportedResources contains one or more state values related to the - // imported resource. It is not required that these be complete, only that - // there is enough identifying information for the provider to successfully - // update the states in ReadResource. - ImportedResources []ImportedResource - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// ImportedResource represents an object being imported into Terraform with the -// help of a provider. An ImportedObject is a RemoteObject that has been read -// by the provider's import handler but hasn't yet been committed to state. -type ImportedResource struct { - // TypeName is the name of the resource type associated with the - // returned state. It's possible for providers to import multiple related - // types with a single import request. - TypeName string - - // State is the state of the remote object being imported. This may not be - // complete, but must contain enough information to uniquely identify the - // resource. - State cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -// AsInstanceObject converts the receiving ImportedObject into a -// ResourceInstanceObject that has status ObjectReady. -// -// The returned object does not know its own resource type, so the caller must -// retain the ResourceType value from the source object if this information is -// needed. -// -// The returned object also has no dependency addresses, but the caller may -// freely modify the direct fields of the returned object without affecting -// the receiver. -func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { - return &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: ir.State, - Private: ir.Private, - } -} - -type ReadDataSourceRequest struct { - // TypeName is the name of the data source type to Read. - TypeName string - - // Config is the complete configuration for the requested data source. - Config cty.Value -} - -type ReadDataSourceResponse struct { - // State is the current state of the requested data source. - State cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go deleted file mode 100644 index b42e492027..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go +++ /dev/null @@ -1,68 +0,0 @@ -package providers - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// Resolver is an interface implemented by objects that are able to resolve -// a given set of resource provider version constraints into Factory -// callbacks. -type Resolver interface { - // Given a constraint map, return a Factory for each requested provider. - // If some or all of the constraints cannot be satisfied, return a non-nil - // slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) -} - -// ResolverFunc wraps a callback function and turns it into a Resolver -// implementation, for convenience in situations where a function and its -// associated closure are sufficient as a resolver implementation. -type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error) - -// ResolveProviders implements Resolver by calling the -// wrapped function. -func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) { - return f(reqd) -} - -// ResolverFixed returns a Resolver that has a fixed set of provider factories -// provided by the caller. The returned resolver ignores version constraints -// entirely and just returns the given factory for each requested provider -// name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResolverFixed(factories map[string]Factory) Resolver { - return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) { - ret := make(map[string]Factory, len(reqd)) - var errs []error - for name := range reqd { - if factory, exists := factories[name]; exists { - ret[name] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// Factory is a function type that creates a new instance of a resource -// provider, or returns an error if that is impossible. -type Factory func() (Interface, error) - -// FactoryFixed is a helper that creates a Factory that just returns some given -// single provider. -// -// Unlike usual factories, the exact same instance is returned for each call -// to the factory and so this must be used in only specialized situations where -// the caller can take care to either not mutate the given provider at all -// or to mutate it in ways that will not cause unexpected behavior for others -// holding the same reference. -func FactoryFixed(p Interface) Factory { - return func() (Interface, error) { - return p, nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go deleted file mode 100644 index b03ba9a1bb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package provisioners contains the interface and primary types to implement a -// Terraform resource provisioner. -package provisioners diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go deleted file mode 100644 index 7a9dca0a08..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go +++ /dev/null @@ -1,5 +0,0 @@ -package provisioners - -// Factory is a function type that creates a new instance of a resource -// provisioner, or returns an error if that is impossible. -type Factory func() (Interface, error) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go deleted file mode 100644 index 7d8f4076ba..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go +++ /dev/null @@ -1,82 +0,0 @@ -package provisioners - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Interface is the set of methods required for a resource provisioner plugin. -type Interface interface { - // GetSchema returns the schema for the provisioner configuration. - GetSchema() GetSchemaResponse - - // ValidateProvisionerConfig allows the provisioner to validate the - // configuration values. - ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse - - // ProvisionResource runs the provisioner with provided configuration. - // ProvisionResource blocks until the execution is complete. - // If the returned diagnostics contain any errors, the resource will be - // left in a tainted state. - ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse - - // Stop is called to interrupt the provisioner. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provisioner after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provisioner contains the schema for this provisioner. - Provisioner *configschema.Block - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// UIOutput provides the Output method for resource provisioner -// plugins to write any output to the UI. -// -// Provisioners may call the Output method multiple times while Apply is in -// progress. It is invalid to call Output after Apply returns. -type UIOutput interface { - Output(string) -} - -type ValidateProvisionerConfigRequest struct { - // Config is the complete configuration to be used for the provisioner. - Config cty.Value -} - -type ValidateProvisionerConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ProvisionResourceRequest struct { - // Config is the complete provisioner configuration. - Config cty.Value - - // Connection contains any information required to access the resource - // instance. - Connection cty.Value - - // UIOutput is used to return output during the Apply operation. - UIOutput UIOutput -} - -type ProvisionResourceResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go deleted file mode 100644 index 4ef22052c3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go +++ /dev/null @@ -1,346 +0,0 @@ -package registry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/httpclient" - internalhttpclient "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" - "github.com/hashicorp/terraform-plugin-sdk/internal/version" - "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/disco" -) - -const ( - xTerraformGet = "X-Terraform-Get" - xTerraformVersion = "X-Terraform-Version" - requestTimeout = 10 * time.Second - modulesServiceID = "modules.v1" - providersServiceID = "providers.v1" -) - -var tfVersion = version.String() - -// Client provides methods to query Terraform Registries. -type Client struct { - // this is the client to be used for all requests. - client *http.Client - - // services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - services *disco.Disco -} - -// NewClient returns a new initialized registry client. -func NewClient(services *disco.Disco, client *http.Client) *Client { - if services == nil { - services = disco.New() - } - - if client == nil { - client = internalhttpclient.New() - client.Timeout = requestTimeout - } - - services.Transport = client.Transport - - services.SetUserAgent(httpclient.TerraformUserAgent(version.String())) - - return &Client{ - client: client, - services: services, - } -} - -// Discover queries the host, and returns the url for the registry. -func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { - service, err := c.services.DiscoverServiceURL(host, serviceID) - if err != nil { - return nil, &ServiceUnreachableError{err} - } - if !strings.HasSuffix(service.Path, "/") { - service.Path += "/" - } - return service, nil -} - -// ModuleVersions queries the registry for a module, and returns the available versions. -func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { - host, err := module.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(module.Module(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching module versions from %q", service) - - req, err := http.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errModuleNotFound{addr: module} - default: - return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) - } - - var versions response.ModuleVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - for _, mod := range versions.Modules { - for _, v := range mod.Versions { - log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) - } - } - - return &versions, nil -} - -func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { - creds, err := c.services.CredentialsForHost(host) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) - return - } - - if creds != nil { - creds.PrepareRequest(req) - } -} - -// ModuleLocation find the download location for a specific version module. -// This returns a string, because the final location may contain special go-getter syntax. -func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { - host, err := module.SvcHost() - if err != nil { - return "", err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return "", err - } - - var p *url.URL - if version == "" { - p, err = url.Parse(path.Join(module.Module(), "download")) - } else { - p, err = url.Parse(path.Join(module.Module(), version, "download")) - } - if err != nil { - return "", err - } - download := service.ResolveReference(p) - - log.Printf("[DEBUG] looking up module location from %q", download) - - req, err := http.NewRequest("GET", download.String(), nil) - if err != nil { - return "", err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // there should be no body, but save it for logging - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("error reading response body from registry: %s", err) - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return "", fmt.Errorf("module %q version %q not found", module, version) - default: - // anything else is an error: - return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) - } - - // the download location is in the X-Terraform-Get header - location := resp.Header.Get(xTerraformGet) - if location == "" { - return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) - } - - // If location looks like it's trying to be a relative URL, treat it as - // one. - // - // We don't do this for just _any_ location, since the X-Terraform-Get - // header is a go-getter location rather than a URL, and so not all - // possible values will parse reasonably as URLs.) - // - // When used in conjunction with go-getter we normally require this header - // to be an absolute URL, but we are more liberal here because third-party - // registry implementations may not "know" their own absolute URLs if - // e.g. they are running behind a reverse proxy frontend, or such. - if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { - locationURL, err := url.Parse(location) - if err != nil { - return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) - } - locationURL = download.ResolveReference(locationURL) - location = locationURL.String() - } - - return location, nil -} - -// TerraformProviderVersions queries the registry for a provider, and returns the available versions. -func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { - host, err := provider.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, providersServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching provider versions from %q", service) - - req, err := http.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errProviderNotFound{addr: provider} - default: - return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) - } - - var versions response.TerraformProviderVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - return &versions, nil -} - -// TerraformProviderLocation queries the registry for a provider download metadata -func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { - host, err := provider.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, providersServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join( - provider.TerraformProvider(), - version, - "download", - provider.OS, - provider.Arch, - )) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching provider location from %q", service) - - req, err := http.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var loc response.TerraformProviderPlatformLocation - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&loc); err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) - default: - // anything else is an error: - return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) - } - - return &loc, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go deleted file mode 100644 index b05438c4db..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go +++ /dev/null @@ -1,55 +0,0 @@ -package registry - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" - "github.com/hashicorp/terraform-svchost/disco" -) - -type errModuleNotFound struct { - addr *regsrc.Module -} - -func (e *errModuleNotFound) Error() string { - return fmt.Sprintf("module %s not found", e.addr) -} - -// IsModuleNotFound returns true only if the given error is a "module not found" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsModuleNotFound(err error) bool { - _, ok := err.(*errModuleNotFound) - return ok -} - -type errProviderNotFound struct { - addr *regsrc.TerraformProvider -} - -func (e *errProviderNotFound) Error() string { - return fmt.Sprintf("provider %s not found", e.addr) -} - -// IsServiceNotProvided returns true only if the given error is a "service not provided" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsServiceNotProvided(err error) bool { - _, ok := err.(*disco.ErrServiceNotProvided) - return ok -} - -// ServiceUnreachableError Registry service is unreachable -type ServiceUnreachableError struct { - err error -} - -func (e *ServiceUnreachableError) Error() string { - return e.err.Error() -} - -// IsServiceUnreachable returns true if the registry/discovery service was unreachable -func IsServiceUnreachable(err error) bool { - _, ok := err.(*ServiceUnreachableError) - return ok -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go deleted file mode 100644 index c9bc40bee8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go +++ /dev/null @@ -1,140 +0,0 @@ -package regsrc - -import ( - "regexp" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - // InvalidHostString is a placeholder returned when a raw host can't be - // converted by IDNA spec. It will never be returned for any host for which - // Valid() is true. - InvalidHostString = "" - - // urlLabelEndSubRe is a sub-expression that matches any character that's - // allowed at the start or end of a URL label according to RFC1123. - urlLabelEndSubRe = "[0-9A-Za-z]" - - // urlLabelEndSubRe is a sub-expression that matches any character that's - // allowed at in a non-start or end of a URL label according to RFC1123. - urlLabelMidSubRe = "[0-9A-Za-z-]" - - // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char - // in an IDN (Unicode) display URL. It's not strict - there are only ~15k - // valid Unicode points in IDN RFC (some with conditions). We are just going - // with being liberal with matching and then erroring if we fail to convert - // to punycode later (which validates chars fully). This at least ensures - // ascii chars dissalowed by the RC1123 parts above don't become legal - // again. - urlLabelUnicodeSubRe = "[^[:ascii:]]" - - // hostLabelSubRe is the sub-expression that matches a valid hostname label. - // It does not anchor the start or end so it can be composed into more - // complex RegExps below. Note that for sanity we don't handle disallowing - // raw punycode in this regexp (esp. since re2 doesn't support negative - // lookbehind, but we can capture it's presence here to check later). - hostLabelSubRe = "" + - // Match valid initial char, or unicode char - "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + - // Optionally, match 0 to 61 valid URL or Unicode chars, - // followed by one valid end char or unicode char - "(?:" + - "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + - "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + - ")?" - - // hostSubRe is the sub-expression that matches a valid host prefix. - // Allows custom port. - hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" - - // hostRe is a regexp that matches a valid host prefix. Additional - // validation of unicode strings is needed for matches. - hostRe = regexp.MustCompile("^" + hostSubRe + "$") -) - -// FriendlyHost describes a registry instance identified in source strings by a -// simple bare hostname like registry.terraform.io. -type FriendlyHost struct { - Raw string -} - -func NewFriendlyHost(host string) *FriendlyHost { - return &FriendlyHost{Raw: host} -} - -// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the -// given string. If no valid prefix is found, host will be nil and rest will -// contain the full source string. The host prefix must terminate at the end of -// the input or at the first / character. If one or more characters exist after -// the first /, they will be returned as rest (without the / delimiter). -// Hostnames containing punycode WILL be parsed successfully since they may have -// come from an internal normalized source string, however should be considered -// invalid if the string came from a user directly. This must be checked -// explicitly for user-input strings by calling Valid() on the -// returned host. -func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { - parts := strings.SplitN(source, "/", 2) - - if hostRe.MatchString(parts[0]) { - host = &FriendlyHost{Raw: parts[0]} - if len(parts) == 2 { - rest = parts[1] - } - return - } - - // No match, return whole string as rest along with nil host - rest = source - return -} - -// Valid returns whether the host prefix is considered valid in any case. -// Example of invalid prefixes might include ones that don't conform to the host -// name specifications. Not that IDN prefixes containing punycode are not valid -// input which we expect to always be in user-input or normalised display form. -func (h *FriendlyHost) Valid() bool { - return svchost.IsValid(h.Raw) -} - -// Display returns the host formatted for display to the user in CLI or web -// output. -func (h *FriendlyHost) Display() string { - return svchost.ForDisplay(h.Raw) -} - -// Normalized returns the host formatted for internal reference or comparison. -func (h *FriendlyHost) Normalized() string { - host, err := svchost.ForComparison(h.Raw) - if err != nil { - return InvalidHostString - } - return string(host) -} - -// String returns the host formatted as the user originally typed it assuming it -// was parsed from user input. -func (h *FriendlyHost) String() string { - return h.Raw -} - -// Equal compares the FriendlyHost against another instance taking normalization -// into account. Invalid hosts cannot be compared and will always return false. -func (h *FriendlyHost) Equal(other *FriendlyHost) bool { - if other == nil { - return false - } - - otherHost, err := svchost.ForComparison(other.Raw) - if err != nil { - return false - } - - host, err := svchost.ForComparison(h.Raw) - if err != nil { - return false - } - - return otherHost == host -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go deleted file mode 100644 index eb37481ff3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go +++ /dev/null @@ -1,175 +0,0 @@ -package regsrc - -import ( - "errors" - "fmt" - "regexp" - "strings" - - svchost "github.com/hashicorp/terraform-svchost" -) - -var ( - ErrInvalidModuleSource = errors.New("not a valid registry module source") - - // nameSubRe is the sub-expression that matches a valid module namespace or - // name. It's strictly a super-set of what GitHub allows for user/org and - // repo names respectively, but more restrictive than our original repo-name - // regex which allowed periods but could cause ambiguity with hostname - // prefixes. It does not anchor the start or end so it can be composed into - // more complex RegExps below. Alphanumeric with - and _ allowed in non - // leading or trailing positions. Max length 64 chars. (GitHub username is - // 38 max.) - nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" - - // providerSubRe is the sub-expression that matches a valid provider. It - // does not anchor the start or end so it can be composed into more complex - // RegExps below. Only lowercase chars and digits are supported in practice. - // Max length 64 chars. - providerSubRe = "[0-9a-z]{1,64}" - - // moduleSourceRe is a regular expression that matches the basic - // namespace/name/provider[//...] format for registry sources. It assumes - // any FriendlyHost prefix has already been removed if present. - moduleSourceRe = regexp.MustCompile( - fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", - nameSubRe, nameSubRe, providerSubRe)) - - // these hostnames are not allowed as registry sources, because they are - // already special case module sources in terraform. - disallowed = map[string]bool{ - "github.com": true, - "bitbucket.org": true, - } -) - -// Module describes a Terraform Registry Module source. -type Module struct { - // RawHost is the friendly host prefix if one was present. It might be nil - // if the original source had no host prefix which implies - // PublicRegistryHost but is distinct from having an actual pointer to - // PublicRegistryHost since it encodes the fact the original string didn't - // include a host prefix at all which is significant for recovering actual - // input not just normalized form. Most callers should access it with Host() - // which will return public registry host instance if it's nil. - RawHost *FriendlyHost - RawNamespace string - RawName string - RawProvider string - RawSubmodule string -} - -// ParseModuleSource attempts to parse source as a Terraform registry module -// source. If the string is not found to be in a valid format, -// ErrInvalidModuleSource is returned. Note that this can only be used on -// "input" strings, e.g. either ones supplied by the user or potentially -// normalised but in Display form (unicode). It will fail to parse a source with -// a punycoded domain since this is not permitted input from a user. If you have -// an already normalized string internally, you can compare it without parsing -// by comparing with the normalized version of the subject with the normal -// string equality operator. -func ParseModuleSource(source string) (*Module, error) { - // See if there is a friendly host prefix. - host, rest := ParseFriendlyHost(source) - if host != nil { - if !host.Valid() || disallowed[host.Display()] { - return nil, ErrInvalidModuleSource - } - } - - matches := moduleSourceRe.FindStringSubmatch(rest) - if len(matches) < 4 { - return nil, ErrInvalidModuleSource - } - - m := &Module{ - RawHost: host, - RawNamespace: matches[1], - RawName: matches[2], - RawProvider: matches[3], - } - - if len(matches) == 5 { - m.RawSubmodule = matches[4] - } - - return m, nil -} - -// Display returns the source formatted for display to the user in CLI or web -// output. -func (m *Module) Display() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) -} - -// Normalized returns the source formatted for internal reference or comparison. -func (m *Module) Normalized() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) -} - -// String returns the source formatted as the user originally typed it assuming -// it was parsed from user input. -func (m *Module) String() string { - // Don't normalize public registry hostname - leave it exactly like the user - // input it. - hostPrefix := "" - if m.RawHost != nil { - hostPrefix = m.RawHost.String() + "/" - } - return m.formatWithPrefix(hostPrefix, true) -} - -// Equal compares the module source against another instance taking -// normalization into account. -func (m *Module) Equal(other *Module) bool { - return m.Normalized() == other.Normalized() -} - -// Host returns the FriendlyHost object describing which registry this module is -// in. If the original source string had not host component this will return the -// PublicRegistryHost. -func (m *Module) Host() *FriendlyHost { - if m.RawHost == nil { - return PublicRegistryHost - } - return m.RawHost -} - -func (m *Module) normalizedHostPrefix(host string) string { - if m.Host().Equal(PublicRegistryHost) { - return "" - } - return host + "/" -} - -func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { - suffix := "" - if m.RawSubmodule != "" { - suffix = "//" + m.RawSubmodule - } - str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, - m.RawProvider, suffix) - - // lower case by default - if !preserveCase { - return strings.ToLower(str) - } - return str -} - -// Module returns just the registry ID of the module, without a hostname or -// suffix. -func (m *Module) Module() string { - return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) -} - -// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may -// contain an invalid hostname, this also returns an error indicating if it -// could be converted to a svchost.Hostname. If no host is specified, the -// default PublicRegistryHost is returned. -func (m *Module) SvcHost() (svchost.Hostname, error) { - if m.RawHost == nil { - return svchost.ForComparison(PublicRegistryHost.Raw) - } - return svchost.ForComparison(m.RawHost.Raw) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go deleted file mode 100644 index c430bf1413..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package regsrc provides helpers for working with source strings that identify -// resources within a Terraform registry. -package regsrc - -var ( - // PublicRegistryHost is a FriendlyHost that represents the public registry. - PublicRegistryHost = NewFriendlyHost("registry.terraform.io") -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go deleted file mode 100644 index 7205d03b8c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go +++ /dev/null @@ -1,60 +0,0 @@ -package regsrc - -import ( - "fmt" - "runtime" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - // DefaultProviderNamespace represents the namespace for canonical - // HashiCorp-controlled providers. - DefaultProviderNamespace = "-" -) - -// TerraformProvider describes a Terraform Registry Provider source. -type TerraformProvider struct { - RawHost *FriendlyHost - RawNamespace string - RawName string - OS string - Arch string -} - -// NewTerraformProvider constructs a new provider source. -func NewTerraformProvider(name, os, arch string) *TerraformProvider { - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - - // separate namespace if included - namespace := DefaultProviderNamespace - if names := strings.SplitN(name, "/", 2); len(names) == 2 { - namespace, name = names[0], names[1] - } - p := &TerraformProvider{ - RawHost: PublicRegistryHost, - RawNamespace: namespace, - RawName: name, - OS: os, - Arch: arch, - } - - return p -} - -// Provider returns just the registry ID of the provider -func (p *TerraformProvider) TerraformProvider() string { - return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) -} - -// SvcHost returns the svchost.Hostname for this provider. The -// default PublicRegistryHost is returned. -func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { - return svchost.ForComparison(PublicRegistryHost.Raw) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go deleted file mode 100644 index 06163963ef..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go +++ /dev/null @@ -1,46 +0,0 @@ -package response - -// ModuleSubmodule is the metadata about a specific submodule within -// a module. This includes the root module as a special case. -type ModuleSubmodule struct { - Path string `json:"path"` - Readme string `json:"readme"` - Empty bool `json:"empty"` - - Inputs []*ModuleInput `json:"inputs"` - Outputs []*ModuleOutput `json:"outputs"` - Dependencies []*ModuleDep `json:"dependencies"` - Resources []*ModuleResource `json:"resources"` -} - -// ModuleInput is an input for a module. -type ModuleInput struct { - Name string `json:"name"` - Description string `json:"description"` - Default string `json:"default"` -} - -// ModuleOutput is an output for a module. -type ModuleOutput struct { - Name string `json:"name"` - Description string `json:"description"` -} - -// ModuleDep is an output for a module. -type ModuleDep struct { - Name string `json:"name"` - Source string `json:"source"` - Version string `json:"version"` -} - -// ModuleProviderDep is the output for a provider dependency -type ModuleProviderDep struct { - Name string `json:"name"` - Version string `json:"version"` -} - -// ModuleResource is an output for a module. -type ModuleResource struct { - Name string `json:"name"` - Type string `json:"type"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go deleted file mode 100644 index f69e9750c2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go +++ /dev/null @@ -1,32 +0,0 @@ -package response - -// ModuleVersions is the response format that contains all metadata about module -// versions needed for terraform CLI to resolve version constraints. See RFC -// TF-042 for details on this format. -type ModuleVersions struct { - Modules []*ModuleProviderVersions `json:"modules"` -} - -// ModuleProviderVersions is the response format for a single module instance, -// containing metadata about all versions and their dependencies. -type ModuleProviderVersions struct { - Source string `json:"source"` - Versions []*ModuleVersion `json:"versions"` -} - -// ModuleVersion is the output metadata for a given version needed by CLI to -// resolve candidate versions to satisfy requirements. -type ModuleVersion struct { - Version string `json:"version"` - Root VersionSubmodule `json:"root"` - Submodules []*VersionSubmodule `json:"submodules"` -} - -// VersionSubmodule is the output metadata for a submodule within a given -// version needed by CLI to resolve candidate versions to satisfy requirements. -// When representing the Root in JSON the path is omitted. -type VersionSubmodule struct { - Path string `json:"path,omitempty"` - Providers []*ModuleProviderDep `json:"providers"` - Dependencies []*ModuleDep `json:"dependencies"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go deleted file mode 100644 index 75a925490a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go +++ /dev/null @@ -1,65 +0,0 @@ -package response - -import ( - "net/url" - "strconv" -) - -// PaginationMeta is a structure included in responses for pagination. -type PaginationMeta struct { - Limit int `json:"limit"` - CurrentOffset int `json:"current_offset"` - NextOffset *int `json:"next_offset,omitempty"` - PrevOffset *int `json:"prev_offset,omitempty"` - NextURL string `json:"next_url,omitempty"` - PrevURL string `json:"prev_url,omitempty"` -} - -// NewPaginationMeta populates pagination meta data from result parameters -func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { - pm := PaginationMeta{ - Limit: limit, - CurrentOffset: offset, - } - - // Calculate next/prev offsets, leave nil if not valid pages - nextOffset := offset + limit - if hasMore { - pm.NextOffset = &nextOffset - } - - prevOffset := offset - limit - if prevOffset < 0 { - prevOffset = 0 - } - if prevOffset < offset { - pm.PrevOffset = &prevOffset - } - - // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should - // catch missing URLs if we call with bad URL arg (and we care about them being present). - if currentURL != "" && pm.NextOffset != nil { - pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) - } - if currentURL != "" && pm.PrevOffset != nil { - pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) - } - - return pm -} - -func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { - u, err := url.Parse(baseURL) - if err != nil { - return "", err - } - q := u.Query() - if val == defaultVal { - // elide param if it's the default value - q.Del(key) - } else { - q.Set(key, strconv.Itoa(val)) - } - u.RawQuery = q.Encode() - return u.String(), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go deleted file mode 100644 index c2c333b0dc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go +++ /dev/null @@ -1,95 +0,0 @@ -package response - -import ( - "sort" - "strings" - - version "github.com/hashicorp/go-version" -) - -// TerraformProvider is the response structure for all required information for -// Terraform to choose a download URL. It must include all versions and all -// platforms for Terraform to perform version and os/arch constraint matching -// locally. -type TerraformProvider struct { - ID string `json:"id"` - - Versions []*TerraformProviderVersion `json:"versions"` -} - -// TerraformProviderVersion is the Terraform-specific response structure for a -// provider version. -type TerraformProviderVersion struct { - Version string `json:"version"` - Protocols []string `json:"protocols"` - - Platforms []*TerraformProviderPlatform `json:"platforms"` -} - -// TerraformProviderVersions is the Terraform-specific response structure for an -// array of provider versions -type TerraformProviderVersions struct { - ID string `json:"id"` - Versions []*TerraformProviderVersion `json:"versions"` - Warnings []string `json:"warnings"` -} - -// TerraformProviderPlatform is the Terraform-specific response structure for a -// provider platform. -type TerraformProviderPlatform struct { - OS string `json:"os"` - Arch string `json:"arch"` -} - -// TerraformProviderPlatformLocation is the Terraform-specific response -// structure for a provider platform with all details required to perform a -// download. -type TerraformProviderPlatformLocation struct { - Protocols []string `json:"protocols"` - OS string `json:"os"` - Arch string `json:"arch"` - Filename string `json:"filename"` - DownloadURL string `json:"download_url"` - ShasumsURL string `json:"shasums_url"` - ShasumsSignatureURL string `json:"shasums_signature_url"` - Shasum string `json:"shasum"` - - SigningKeys SigningKeyList `json:"signing_keys"` -} - -// SigningKeyList is the response structure for a list of signing keys. -type SigningKeyList struct { - GPGKeys []*GPGKey `json:"gpg_public_keys"` -} - -// GPGKey is the response structure for a GPG key. -type GPGKey struct { - ASCIIArmor string `json:"ascii_armor"` - Source string `json:"source"` - SourceURL *string `json:"source_url"` -} - -// Collection type for TerraformProviderVersion -type ProviderVersionCollection []*TerraformProviderVersion - -// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg -// keys in the response. -func (signingKeys *SigningKeyList) GPGASCIIArmor() string { - keys := []string{} - - for _, gpgKey := range signingKeys.GPGKeys { - keys = append(keys, gpgKey.ASCIIArmor) - } - - return strings.Join(keys, "\n") -} - -// Sort sorts versions from newest to oldest. -func (v ProviderVersionCollection) Sort() { - sort.Slice(v, func(i, j int) bool { - versionA, _ := version.NewVersion(v[i].Version) - versionB, _ := version.NewVersion(v[j].Version) - - return versionA.GreaterThan(versionB) - }) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go deleted file mode 100644 index 7dd74ac785..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package states contains the types that are used to represent Terraform -// states. -package states diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go deleted file mode 100644 index 0dc73499a3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go +++ /dev/null @@ -1,35 +0,0 @@ -// Code generated by "stringer -type EachMode"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoEach-0] - _ = x[EachList-76] - _ = x[EachMap-77] -} - -const ( - _EachMode_name_0 = "NoEach" - _EachMode_name_1 = "EachListEachMap" -) - -var ( - _EachMode_index_1 = [...]uint8{0, 8, 15} -) - -func (i EachMode) String() string { - switch { - case i == 0: - return _EachMode_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] - default: - return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go deleted file mode 100644 index 891adc003c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go +++ /dev/null @@ -1,20 +0,0 @@ -package states - -// Generation is used to represent multiple objects in a succession of objects -// represented by a single resource instance address. A resource instance can -// have multiple generations over its lifetime due to object replacement -// (when a change can't be applied without destroying and re-creating), and -// multiple generations can exist at the same time when create_before_destroy -// is used. -// -// A Generation value can either be the value of the variable "CurrentGen" or -// a value of type DeposedKey. Generation values can be compared for equality -// using "==" and used as map keys. The zero value of Generation (nil) is not -// a valid generation and must not be used. -type Generation interface { - generation() -} - -// CurrentGen is the Generation representing the currently-active object for -// a resource instance. -var CurrentGen Generation diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go deleted file mode 100644 index 3bb717d332..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go +++ /dev/null @@ -1,120 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// ResourceInstanceObject is the local representation of a specific remote -// object associated with a resource instance. In practice not all remote -// objects are actually remote in the sense of being accessed over the network, -// but this is the most common case. -// -// It is not valid to mutate a ResourceInstanceObject once it has been created. -// Instead, create a new object and replace the existing one. -type ResourceInstanceObject struct { - // Value is the object-typed value representing the remote object within - // Terraform. - Value cty.Value - - // Private is an opaque value set by the provider when this object was - // last created or updated. Terraform Core does not use this value in - // any way and it is not exposed anywhere in the user interface, so - // a provider can use it for retaining any necessary private state. - Private []byte - - // Status represents the "readiness" of the object as of the last time - // it was updated. - Status ObjectStatus - - // Dependencies is a set of other addresses in the same module which - // this instance depended on when the given attributes were evaluated. - // This is used to construct the dependency relationships for an object - // whose configuration is no longer available, such as if it has been - // removed from configuration altogether, or is now deposed. - Dependencies []addrs.Referenceable -} - -// ObjectStatus represents the status of a RemoteObject. -type ObjectStatus rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus - -const ( - // ObjectReady is an object status for an object that is ready to use. - ObjectReady ObjectStatus = 'R' - - // ObjectTainted is an object status representing an object that is in - // an unrecoverable bad state due to a partial failure during a create, - // update, or delete operation. Since it cannot be moved into the - // ObjectRead state, a tainted object must be replaced. - ObjectTainted ObjectStatus = 'T' - - // ObjectPlanned is a special object status used only for the transient - // placeholder objects we place into state during the refresh and plan - // walks to stand in for objects that will be created during apply. - // - // Any object of this status must have a corresponding change recorded - // in the current plan, whose value must then be used in preference to - // the value stored in state when evaluating expressions. A planned - // object stored in state will be incomplete if any of its attributes are - // not yet known, and the plan must be consulted in order to "see" those - // unknown values, because the state is not able to represent them. - ObjectPlanned ObjectStatus = 'P' -) - -// Encode marshals the value within the receiver to produce a -// ResourceInstanceObjectSrc ready to be written to a state file. -// -// The given type must be the implied type of the resource type schema, and -// the given value must conform to it. It is important to pass the schema -// type and not the object's own type so that dynamically-typed attributes -// will be stored correctly. The caller must also provide the version number -// of the schema that the given type was derived from, which will be recorded -// in the source object so it can be used to detect when schema migration is -// required on read. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - // Our state serialization can't represent unknown values, so we convert - // them to nulls here. This is lossy, but nobody should be writing unknown - // values here and expecting to get them out again later. - // - // We get unknown values here while we're building out a "planned state" - // during the plan phase, but the value stored in the plan takes precedence - // for expression evaluation. The apply step should never produce unknown - // values, but if it does it's the responsibility of the caller to detect - // and raise an error about that. - val := cty.UnknownAsNull(o.Value) - - src, err := ctyjson.Marshal(val, ty) - if err != nil { - return nil, err - } - - return &ResourceInstanceObjectSrc{ - SchemaVersion: schemaVersion, - AttrsJSON: src, - Private: o.Private, - Status: o.Status, - Dependencies: o.Dependencies, - }, nil -} - -// AsTainted returns a deep copy of the receiver with the status updated to -// ObjectTainted. -func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { - if o == nil { - // A nil object can't be tainted, but we'll allow this anyway to - // avoid a crash, since we presumably intend to eventually record - // the object has having been deleted anyway. - return nil - } - ret := o.DeepCopy() - ret.Status = ObjectTainted - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go deleted file mode 100644 index 728ad80d12..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go +++ /dev/null @@ -1,113 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// ResourceInstanceObjectSrc is a not-fully-decoded version of -// ResourceInstanceObject. Decoding of it can be completed by first handling -// any schema migration steps to get to the latest schema version and then -// calling method Decode with the implied type of the latest schema. -type ResourceInstanceObjectSrc struct { - // SchemaVersion is the resource-type-specific schema version number that - // was current when either AttrsJSON or AttrsFlat was encoded. Migration - // steps are required if this is less than the current version number - // reported by the corresponding provider. - SchemaVersion uint64 - - // AttrsJSON is a JSON-encoded representation of the object attributes, - // encoding the value (of the object type implied by the associated resource - // type schema) that represents this remote object in Terraform Language - // expressions, and is compared with configuration when producing a diff. - // - // This is retained in JSON format here because it may require preprocessing - // before decoding if, for example, the stored attributes are for an older - // schema version which the provider must upgrade before use. If the - // version is current, it is valid to simply decode this using the - // type implied by the current schema, without the need for the provider - // to perform an upgrade first. - // - // When writing a ResourceInstanceObject into the state, AttrsJSON should - // always be conformant to the current schema version and the current - // schema version should be recorded in the SchemaVersion field. - AttrsJSON []byte - - // AttrsFlat is a legacy form of attributes used in older state file - // formats, and in the new state format for objects that haven't yet been - // upgraded. This attribute is mutually exclusive with Attrs: for any - // ResourceInstanceObject, only one of these attributes may be populated - // and the other must be nil. - // - // An instance object with this field populated should be upgraded to use - // Attrs at the earliest opportunity, since this legacy flatmap-based - // format will be phased out over time. AttrsFlat should not be used when - // writing new or updated objects to state; instead, callers must follow - // the recommendations in the AttrsJSON documentation above. - AttrsFlat map[string]string - - // These fields all correspond to the fields of the same name on - // ResourceInstanceObject. - Private []byte - Status ObjectStatus - Dependencies []addrs.Referenceable -} - -// Decode unmarshals the raw representation of the object attributes. Pass the -// implied type of the corresponding resource type schema for correct operation. -// -// Before calling Decode, the caller must check that the SchemaVersion field -// exactly equals the version number of the schema whose implied type is being -// passed, or else the result is undefined. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { - var val cty.Value - var err error - if os.AttrsFlat != nil { - // Legacy mode. We'll do our best to unpick this from the flatmap. - val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) - if err != nil { - return nil, err - } - } else { - val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) - if err != nil { - return nil, err - } - } - - return &ResourceInstanceObject{ - Value: val, - Status: os.Status, - Dependencies: os.Dependencies, - Private: os.Private, - }, nil -} - -// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the -// metadata from the receiver and writing in the given new schema version -// and attribute value that are presumed to have resulted from upgrading -// from an older schema version. -func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - new := os.DeepCopy() - new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap - - // This is the same principle as ResourceInstanceObject.Encode, but - // avoiding a decode/re-encode cycle because we don't have type info - // available for the "old" attributes. - newAttrs = cty.UnknownAsNull(newAttrs) - src, err := ctyjson.Marshal(newAttrs, newType) - if err != nil { - return nil, err - } - - new.AttrsJSON = src - new.SchemaVersion = newSchemaVersion - return new, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go deleted file mode 100644 index 6b74cbfa6b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go +++ /dev/null @@ -1,268 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Module is a container for the states of objects within a particular module. -type Module struct { - Addr addrs.ModuleInstance - - // Resources contains the state for each resource. The keys in this map are - // an implementation detail and must not be used by outside callers. - Resources map[string]*Resource - - // OutputValues contains the state for each output value. The keys in this - // map are output value names. - OutputValues map[string]*OutputValue - - // LocalValues contains the value for each named output value. The keys - // in this map are local value names. - LocalValues map[string]cty.Value -} - -// NewModule constructs an empty module state for the given module address. -func NewModule(addr addrs.ModuleInstance) *Module { - return &Module{ - Addr: addr, - Resources: map[string]*Resource{}, - OutputValues: map[string]*OutputValue{}, - LocalValues: map[string]cty.Value{}, - } -} - -// Resource returns the state for the resource with the given address within -// the receiving module state, or nil if the requested resource is not tracked -// in the state. -func (ms *Module) Resource(addr addrs.Resource) *Resource { - return ms.Resources[addr.String()] -} - -// ResourceInstance returns the state for the resource instance with the given -// address within the receiving module state, or nil if the requested instance -// is not tracked in the state. -func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { - rs := ms.Resource(addr.Resource) - if rs == nil { - return nil - } - return rs.Instance(addr.Key) -} - -// SetResourceMeta updates the resource-level metadata for the resource -// with the given address, creating the resource state for it if it doesn't -// already exist. -func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr) - if rs == nil { - rs = &Resource{ - Addr: addr, - Instances: map[addrs.InstanceKey]*ResourceInstance{}, - } - ms.Resources[addr.String()] = rs - } - - rs.EachMode = eachMode - rs.ProviderConfig = provider -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed. -func (ms *Module) RemoveResource(addr addrs.Resource) { - delete(ms.Resources, addr.String()) -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simulataneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance will be removed altogether. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - - is.Current = obj - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - if obj != nil { - is.Deposed[key] = obj - } else { - delete(is.Deposed, key) - } - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - is := rs.Instance(addr.Key) - if is == nil { - return - } - delete(is.Deposed, key) - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// deposeResourceInstanceObject is the real implementation of -// SyncState.DeposeResourceInstanceObject. -func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { - is := ms.ResourceInstance(addr) - if is == nil { - return NotDeposed - } - return is.deposeCurrentObject(forceKey) -} - -// maybeRestoreResourceInstanceDeposed is the real implementation of -// SyncState.MaybeRestoreResourceInstanceDeposed. -func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { - rs := ms.Resource(addr.Resource) - if rs == nil { - return false - } - is := rs.Instance(addr.Key) - if is == nil { - return false - } - if is.Current != nil { - return false - } - if len(is.Deposed) == 0 { - return false - } - is.Current = is.Deposed[key] - delete(is.Deposed, key) - return true -} - -// SetOutputValue writes an output value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { - os := &OutputValue{ - Value: value, - Sensitive: sensitive, - } - ms.OutputValues[name] = os - return os -} - -// RemoveOutputValue removes the output value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveOutputValue(name string) { - delete(ms.OutputValues, name) -} - -// SetLocalValue writes a local value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetLocalValue(name string, value cty.Value) { - ms.LocalValues[name] = value -} - -// RemoveLocalValue removes the local value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveLocalValue(name string) { - delete(ms.LocalValues, name) -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// You probably shouldn't call this! See the method of the same name on -// type State for more information on what this is for and the rare situations -// where it is safe to use. -func (ms *Module) PruneResourceHusks() { - for _, rs := range ms.Resources { - if len(rs.Instances) == 0 { - ms.RemoveResource(rs.Addr) - } - } -} - -// empty returns true if the receving module state is contributing nothing -// to the state. In other words, it returns true if the module could be -// removed from the state altogether without changing the meaning of the state. -// -// In practice a module containing no objects is the same as a non-existent -// module, and so we can opportunistically clean up once a module becomes -// empty on the assumption that it will be re-added if needed later. -func (ms *Module) empty() bool { - if ms == nil { - return true - } - - // This must be updated to cover any new collections added to Module - // in future. - return (len(ms.Resources) == 0 && - len(ms.OutputValues) == 0 && - len(ms.LocalValues) == 0) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go deleted file mode 100644 index 96a6db2f4c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ObjectReady-82] - _ = x[ObjectTainted-84] - _ = x[ObjectPlanned-80] -} - -const ( - _ObjectStatus_name_0 = "ObjectPlanned" - _ObjectStatus_name_1 = "ObjectReady" - _ObjectStatus_name_2 = "ObjectTainted" -) - -func (i ObjectStatus) String() string { - switch { - case i == 80: - return _ObjectStatus_name_0 - case i == 82: - return _ObjectStatus_name_1 - case i == 84: - return _ObjectStatus_name_2 - default: - return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go deleted file mode 100644 index d232b76d40..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go +++ /dev/null @@ -1,14 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" -) - -// OutputValue represents the state of a particular output value. -// -// It is not valid to mutate an OutputValue object once it has been created. -// Instead, create an entirely new OutputValue to replace the previous one. -type OutputValue struct { - Value cty.Value - Sensitive bool -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go deleted file mode 100644 index 32ea638acd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go +++ /dev/null @@ -1,233 +0,0 @@ -package states - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// Resource represents the state of a resource. -type Resource struct { - // Addr is the module-relative address for the resource this state object - // belongs to. - Addr addrs.Resource - - // EachMode is the multi-instance mode currently in use for this resource, - // or NoEach if this is a single-instance resource. This dictates what - // type of value is returned when accessing this resource via expressions - // in the Terraform language. - EachMode EachMode - - // Instances contains the potentially-multiple instances associated with - // this resource. This map can contain a mixture of different key types, - // but only the ones of InstanceKeyType are considered current. - Instances map[addrs.InstanceKey]*ResourceInstance - - // ProviderConfig is the absolute address for the provider configuration that - // most recently managed this resource. This is used to connect a resource - // with a provider configuration when the resource configuration block is - // not available, such as if it has been removed from configuration - // altogether. - ProviderConfig addrs.AbsProviderConfig -} - -// Instance returns the state for the instance with the given key, or nil -// if no such instance is tracked within the state. -func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { - return rs.Instances[key] -} - -// EnsureInstance returns the state for the instance with the given key, -// creating a new empty state for it if one doesn't already exist. -// -// Because this may create and save a new state, it is considered to be -// a write operation. -func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { - ret := rs.Instance(key) - if ret == nil { - ret = NewResourceInstance() - rs.Instances[key] = ret - } - return ret -} - -// ResourceInstance represents the state of a particular instance of a resource. -type ResourceInstance struct { - // Current, if non-nil, is the remote object that is currently represented - // by the corresponding resource instance. - Current *ResourceInstanceObjectSrc - - // Deposed, if len > 0, contains any remote objects that were previously - // represented by the corresponding resource instance but have been - // replaced and are pending destruction due to the create_before_destroy - // lifecycle mode. - Deposed map[DeposedKey]*ResourceInstanceObjectSrc -} - -// NewResourceInstance constructs and returns a new ResourceInstance, ready to -// use. -func NewResourceInstance() *ResourceInstance { - return &ResourceInstance{ - Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, - } -} - -// HasCurrent returns true if this resource instance has a "current"-generation -// object. Most instances do, but this can briefly be false during a -// create-before-destroy replace operation when the current has been deposed -// but its replacement has not yet been created. -func (i *ResourceInstance) HasCurrent() bool { - return i != nil && i.Current != nil -} - -// HasDeposed returns true if this resource instance has a deposed object -// with the given key. -func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { - return i != nil && i.Deposed[key] != nil -} - -// HasObjects returns true if this resource has any objects at all, whether -// current or deposed. -func (i *ResourceInstance) HasObjects() bool { - return i.Current != nil || len(i.Deposed) != 0 -} - -// deposeCurrentObject is part of the real implementation of -// SyncState.DeposeResourceInstanceObject. The exported method uses a lock -// to ensure that we can safely allocate an unused deposed key without -// collision. -func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { - if !i.HasCurrent() { - return NotDeposed - } - - key := forceKey - if key == NotDeposed { - key = i.findUnusedDeposedKey() - } else { - if _, exists := i.Deposed[key]; exists { - panic(fmt.Sprintf("forced key %s is already in use", forceKey)) - } - } - i.Deposed[key] = i.Current - i.Current = nil - return key -} - -// GetGeneration retrieves the object of the given generation from the -// ResourceInstance, or returns nil if there is no such object. -// -// If the given generation is nil or invalid, this method will panic. -func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { - if gen == CurrentGen { - return i.Current - } - if dk, ok := gen.(DeposedKey); ok { - return i.Deposed[dk] - } - if gen == nil { - panic(fmt.Sprintf("get with nil Generation")) - } - // Should never fall out here, since the above covers all possible - // Generation values. - panic(fmt.Sprintf("get invalid Generation %#v", gen)) -} - -// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance at the time of the call. -// -// Note that the validity of this result may change if new deposed keys are -// allocated before it is used. To avoid this risk, instead use the -// DeposeResourceInstanceObject method on the SyncState wrapper type, which -// allocates a key and uses it atomically. -func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { - return i.findUnusedDeposedKey() -} - -// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance. -func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { - for { - key := NewDeposedKey() - if _, exists := i.Deposed[key]; !exists { - return key - } - // Spin until we find a unique one. This shouldn't take long, because - // we have a 32-bit keyspace and there's rarely more than one deposed - // instance. - } -} - -// EachMode specifies the multi-instance mode for a resource. -type EachMode rune - -const ( - NoEach EachMode = 0 - EachList EachMode = 'L' - EachMap EachMode = 'M' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode - -func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { - switch key.(type) { - case addrs.IntKey: - return EachList - case addrs.StringKey: - return EachMap - default: - if key == addrs.NoKey { - return NoEach - } - panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) - } -} - -// DeposedKey is a 8-character hex string used to uniquely identify deposed -// instance objects in the state. -type DeposedKey string - -// NotDeposed is a special invalid value of DeposedKey that is used to represent -// the absense of a deposed key. It must not be used as an actual deposed key. -const NotDeposed = DeposedKey("") - -var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) - -// NewDeposedKey generates a pseudo-random deposed key. Because of the short -// length of these keys, uniqueness is not a natural consequence and so the -// caller should test to see if the generated key is already in use and generate -// another if so, until a unique key is found. -func NewDeposedKey() DeposedKey { - v := deposedKeyRand.Uint32() - return DeposedKey(fmt.Sprintf("%08x", v)) -} - -func (k DeposedKey) String() string { - return string(k) -} - -func (k DeposedKey) GoString() string { - ks := string(k) - switch { - case ks == "": - return "states.NotDeposed" - default: - return fmt.Sprintf("states.DeposedKey(%s)", ks) - } -} - -// Generation is a helper method to convert a DeposedKey into a Generation. -// If the reciever is anything other than NotDeposed then the result is -// just the same value as a Generation. If the receiver is NotDeposed then -// the result is CurrentGen. -func (k DeposedKey) Generation() Generation { - if k == NotDeposed { - return CurrentGen - } - return k -} - -// generation is an implementation of Generation. -func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go deleted file mode 100644 index 328dd53d5c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go +++ /dev/null @@ -1,229 +0,0 @@ -package states - -import ( - "sort" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// State is the top-level type of a Terraform state. -// -// A state should be mutated only via its accessor methods, to ensure that -// invariants are preserved. -// -// Access to State and the nested values within it is not concurrency-safe, -// so when accessing a State object concurrently it is the caller's -// responsibility to ensure that only one write is in progress at a time -// and that reads only occur when no write is in progress. The most common -// way to acheive this is to wrap the State in a SyncState and use the -// higher-level atomic operations supported by that type. -type State struct { - // Modules contains the state for each module. The keys in this map are - // an implementation detail and must not be used by outside callers. - Modules map[string]*Module -} - -// NewState constructs a minimal empty state, containing an empty root module. -func NewState() *State { - modules := map[string]*Module{} - modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) - return &State{ - Modules: modules, - } -} - -// BuildState is a helper -- primarily intended for tests -- to build a state -// using imperative code against the StateSync type while still acting as -// an expression of type *State to assign into a containing struct. -func BuildState(cb func(*SyncState)) *State { - s := NewState() - cb(s.SyncWrapper()) - return s -} - -// Empty returns true if there are no resources or populated output values -// in the receiver. In other words, if this state could be safely replaced -// with the return value of NewState and be functionally equivalent. -func (s *State) Empty() bool { - if s == nil { - return true - } - for _, ms := range s.Modules { - if len(ms.Resources) != 0 { - return false - } - if len(ms.OutputValues) != 0 { - return false - } - } - return true -} - -// Module returns the state for the module with the given address, or nil if -// the requested module is not tracked in the state. -func (s *State) Module(addr addrs.ModuleInstance) *Module { - if s == nil { - panic("State.Module on nil *State") - } - return s.Modules[addr.String()] -} - -// RemoveModule removes the module with the given address from the state, -// unless it is the root module. The root module cannot be deleted, and so -// this method will panic if that is attempted. -// -// Removing a module implicitly discards all of the resources, outputs and -// local values within it, and so this should usually be done only for empty -// modules. For callers accessing the state through a SyncState wrapper, modules -// are automatically pruned if they are empty after one of their contained -// elements is removed. -func (s *State) RemoveModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - panic("attempted to remove root module") - } - - delete(s.Modules, addr.String()) -} - -// RootModule is a convenient alias for Module(addrs.RootModuleInstance). -func (s *State) RootModule() *Module { - if s == nil { - panic("RootModule called on nil State") - } - return s.Modules[addrs.RootModuleInstance.String()] -} - -// EnsureModule returns the state for the module with the given address, -// creating and adding a new one if necessary. -// -// Since this might modify the state to add a new instance, it is considered -// to be a write operation. -func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { - ms := s.Module(addr) - if ms == nil { - ms = NewModule(addr) - s.Modules[addr.String()] = ms - } - return ms -} - -// HasResources returns true if there is at least one resource (of any mode) -// present in the receiving state. -func (s *State) HasResources() bool { - if s == nil { - return false - } - for _, ms := range s.Modules { - if len(ms.Resources) > 0 { - return true - } - } - return false -} - -// Resource returns the state for the resource with the given address, or nil -// if no such resource is tracked in the state. -func (s *State) Resource(addr addrs.AbsResource) *Resource { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.Resource(addr.Resource) -} - -// ResourceInstance returns the state for the resource instance with the given -// address, or nil if no such resource is tracked in the state. -func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - if s == nil { - panic("State.ResourceInstance on nil *State") - } - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.ResourceInstance(addr.Resource) -} - -// OutputValue returns the state for the output value with the given address, -// or nil if no such output value is tracked in the state. -func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.OutputValues[addr.OutputValue.Name] -} - -// LocalValue returns the value of the named local value with the given address, -// or cty.NilVal if no such value is tracked in the state. -func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { - ms := s.Module(addr.Module) - if ms == nil { - return cty.NilVal - } - return ms.LocalValues[addr.LocalValue.Name] -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving state. -// -// The result is de-duplicated so that each distinct address appears only once. -func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { - if s == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, ms := range s.Modules { - for _, rc := range ms.Resources { - m[rc.ProviderConfig.String()] = rc.ProviderConfig - } - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// This should generally be used only after a "terraform destroy" operation, -// to finalize the cleanup of the state. It is not correct to use this after -// other operations because if a resource has "count = 0" or "for_each" over -// an empty collection then we want to retain it in the state so that references -// to it, particularly in "strange" contexts like "terraform console", can be -// properly resolved. -// -// This method MUST NOT be called concurrently with other readers and writers -// of the receiving state. -func (s *State) PruneResourceHusks() { - for _, m := range s.Modules { - m.PruneResourceHusks() - if len(m.Resources) == 0 && !m.Addr.IsRoot() { - s.RemoveModule(m.Addr) - } - } -} - -// SyncWrapper returns a SyncState object wrapping the receiver. -func (s *State) SyncWrapper() *SyncState { - return &SyncState{ - state: s, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go deleted file mode 100644 index 6266aca79d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go +++ /dev/null @@ -1,221 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// Taking deep copies of states is an important operation because state is -// otherwise a mutable data structure that is challenging to share across -// many separate callers. It is important that the DeepCopy implementations -// in this file comprehensively copy all parts of the state data structure -// that could be mutated via pointers. - -// DeepCopy returns a new state that contains equivalent data to the reciever -// but shares no backing memory in common. -// -// As with all methods on State, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - modules := make(map[string]*Module, len(s.Modules)) - for k, m := range s.Modules { - modules[k] = m.DeepCopy() - } - return &State{ - Modules: modules, - } -} - -// DeepCopy returns a new module state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Module, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (ms *Module) DeepCopy() *Module { - if ms == nil { - return nil - } - - resources := make(map[string]*Resource, len(ms.Resources)) - for k, r := range ms.Resources { - resources[k] = r.DeepCopy() - } - outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) - for k, v := range ms.OutputValues { - outputValues[k] = v.DeepCopy() - } - localValues := make(map[string]cty.Value, len(ms.LocalValues)) - for k, v := range ms.LocalValues { - // cty.Value is immutable, so we don't need to copy these. - localValues[k] = v - } - - return &Module{ - Addr: ms.Addr, // technically mutable, but immutable by convention - Resources: resources, - OutputValues: outputValues, - LocalValues: localValues, - } -} - -// DeepCopy returns a new resource state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Resource, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (rs *Resource) DeepCopy() *Resource { - if rs == nil { - return nil - } - - instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) - for k, i := range rs.Instances { - instances[k] = i.DeepCopy() - } - - return &Resource{ - Addr: rs.Addr, - EachMode: rs.EachMode, - Instances: instances, - ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention - } -} - -// DeepCopy returns a new resource instance state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstance, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (is *ResourceInstance) DeepCopy() *ResourceInstance { - if is == nil { - return nil - } - - deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) - for k, obj := range is.Deposed { - deposed[k] = obj.DeepCopy() - } - - return &ResourceInstance{ - Current: is.Current.DeepCopy(), - Deposed: deposed, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObjectSrc, this method is not safe to -// use concurrently with writing to any portion of the recieving data structure. -// It is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { - if obj == nil { - return nil - } - - var attrsFlat map[string]string - if obj.AttrsFlat != nil { - attrsFlat = make(map[string]string, len(obj.AttrsFlat)) - for k, v := range obj.AttrsFlat { - attrsFlat[k] = v - } - } - - var attrsJSON []byte - if obj.AttrsJSON != nil { - attrsJSON = make([]byte, len(obj.AttrsJSON)) - copy(attrsJSON, obj.AttrsJSON) - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referencable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - - return &ResourceInstanceObjectSrc{ - Status: obj.Status, - SchemaVersion: obj.SchemaVersion, - Private: private, - AttrsFlat: attrsFlat, - AttrsJSON: attrsJSON, - Dependencies: dependencies, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObject, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { - if obj == nil { - return nil - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referenceable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - var dependencies []addrs.Referenceable - if obj.Dependencies != nil { - dependencies = make([]addrs.Referenceable, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - } - - return &ResourceInstanceObject{ - Value: obj.Value, - Status: obj.Status, - Private: private, - Dependencies: dependencies, - } -} - -// DeepCopy returns a new output value state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on OutputValue, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (os *OutputValue) DeepCopy() *OutputValue { - if os == nil { - return nil - } - - return &OutputValue{ - Value: os.Value, - Sensitive: os.Sensitive, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go deleted file mode 100644 index ea20967e5b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go +++ /dev/null @@ -1,18 +0,0 @@ -package states - -import ( - "reflect" -) - -// Equal returns true if the receiver is functionally equivalent to other, -// including any ephemeral portions of the state that would not be included -// if the state were saved to files. -// -// To test only the persistent portions of two states for equality, instead -// use statefile.StatesMarshalEqual. -func (s *State) Equal(other *State) bool { - // For the moment this is sufficient, but we may need to do something - // more elaborate in future if we have any portions of state that require - // more sophisticated comparisons. - return reflect.DeepEqual(s, other) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go deleted file mode 100644 index dffd650d6b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go +++ /dev/null @@ -1,279 +0,0 @@ -package states - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// String returns a rather-odd string representation of the entire state. -// -// This is intended to match the behavior of the older terraform.State.String -// method that is used in lots of existing tests. It should not be used in -// new tests: instead, use "cmp" to directly compare the state data structures -// and print out a diff if they do not match. -// -// This method should never be used in non-test code, whether directly by call -// or indirectly via a %s or %q verb in package fmt. -func (s *State) String() string { - if s == nil { - return "" - } - - // sort the modules by name for consistent output - modules := make([]string, 0, len(s.Modules)) - for m := range s.Modules { - modules = append(modules, m) - } - sort.Strings(modules) - - var buf bytes.Buffer - for _, name := range modules { - m := s.Modules[name] - mStr := m.testString() - - // If we're the root module, we just write the output directly. - if m.Addr.IsRoot() { - buf.WriteString(mStr + "\n") - continue - } - - // We need to build out a string that resembles the not-quite-standard - // format that terraform.State.String used to use, where there's a - // "module." prefix but then just a chain of all of the module names - // without any further "module." portions. - buf.WriteString("module") - for _, step := range m.Addr { - buf.WriteByte('.') - buf.WriteString(step.Name) - if step.InstanceKey != addrs.NoKey { - buf.WriteByte('[') - buf.WriteString(step.InstanceKey.String()) - buf.WriteByte(']') - } - } - buf.WriteString(":\n") - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// testString is used to produce part of the output of State.String. It should -// never be used directly. -func (m *Module) testString() string { - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - // We use AbsResourceInstance here, even though everything belongs to - // the same module, just because we have a sorting behavior defined - // for those but not for just ResourceInstance. - addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) - for _, rs := range m.Resources { - for ik := range rs.Instances { - addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) - } - } - - sort.Slice(addrsOrder, func(i, j int) bool { - return addrsOrder[i].Less(addrsOrder[j]) - }) - - for _, fakeAbsAddr := range addrsOrder { - addr := fakeAbsAddr.Resource - rs := m.Resource(addr.ContainingResource()) - is := m.ResourceInstance(addr) - - // Here we need to fake up a legacy-style address as the old state - // types would've used, since that's what our tests against those - // old types expect. The significant difference is that instancekey - // is dot-separated rather than using index brackets. - k := addr.ContainingResource().String() - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - k = fmt.Sprintf("%s.%d", k, tk) - default: - // No other key types existed for the legacy types, so we - // can do whatever we want here. We'll just use our standard - // syntax for these. - k = k + tk.String() - } - } - - id := LegacyInstanceObjectID(is.Current) - - taintStr := "" - if is.Current != nil && is.Current.Status == ObjectTainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(is.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) - - // Attributes were a flatmap before, but are not anymore. To preserve - // our old output as closely as possible we need to do a conversion - // to flatmap. Normally we'd want to do this with schema for - // accuracy, but for our purposes here it only needs to be approximate. - // This should produce an identical result for most cases, though - // in particular will differ in a few cases: - // - The keys used for elements in a set will be different - // - Values for attributes of type cty.DynamicPseudoType will be - // misinterpreted (but these weren't possible in old world anyway) - var attributes map[string]string - if obj := is.Current; obj != nil { - switch { - case obj.AttrsFlat != nil: - // Easy (but increasingly unlikely) case: the state hasn't - // actually been upgraded to the new form yet. - attributes = obj.AttrsFlat - case obj.AttrsJSON != nil: - ty, err := ctyjson.ImpliedType(obj.AttrsJSON) - if err == nil { - val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) - if err == nil { - attributes = hcl2shim.FlatmapValueFromHCL2(val) - } - } - } - } - attrKeys := make([]string, 0, len(attributes)) - for ak, val := range attributes { - if ak == "id" { - continue - } - - // don't show empty containers in the output - if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - // CAUTION: Since deposed keys are now random strings instead of - // incrementing integers, this result will not be deterministic - // if there is more than one deposed object. - i := 1 - for _, t := range is.Deposed { - id := LegacyInstanceObjectID(t) - taintStr := "" - if t.Status == ObjectTainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) - i++ - } - - if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range obj.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) - } - } - } - - if len(m.OutputValues) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - v := m.OutputValues[k] - lv := hcl2shim.ConfigValueFromHCL2(v.Value) - switch vTyped := lv.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - default: - buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) - } - } - } - - return buf.String() -} - -// LegacyInstanceObjectID is a helper for extracting an object id value from -// an instance object in a way that approximates how we used to do this -// for the old state types. ID is no longer first-class, so this is preserved -// only for compatibility with old tests that include the id as part of their -// expected value. -func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { - if obj == nil { - return "" - } - - if obj.AttrsJSON != nil { - type WithID struct { - ID string `json:"id"` - } - var withID WithID - err := json.Unmarshal(obj.AttrsJSON, &withID) - if err == nil { - return withID.ID - } - } else if obj.AttrsFlat != nil { - if flatID, exists := obj.AttrsFlat["id"]; exists { - return flatID - } - } - - // For resource types created after we removed id as special there may - // not actually be one at all. This is okay because older tests won't - // encounter this, and new tests shouldn't be using ids. - return "" -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go deleted file mode 100644 index 042ce51c14..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -const invalidFormat = "Invalid state file format" - -// jsonUnmarshalDiags is a helper that translates errors returned from -// json.Unmarshal into hopefully-more-helpful diagnostics messages. -func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if err == nil { - return diags - } - - switch tErr := err.(type) { - case *json.SyntaxError: - // We've usually already successfully parsed a source file as JSON at - // least once before we'd use jsonUnmarshalDiags with it (to sniff - // the version number) so this particular error should not appear much - // in practice. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - // This is likely to be the most common area, describing a - // non-conformance between the file and the expected file format - // at a semantic level. - if tErr.Field != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), - )) - break - } else { - // Without a field name, we can't really say anything helpful. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - "The state file does not conform to the expected JSON data structure.", - )) - } - default: - // Fallback for all other types of errors. This can happen only for - // custom UnmarshalJSON implementations, so should be encountered - // only rarely. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), - )) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go deleted file mode 100644 index 625d0cf429..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package statefile deals with the file format used to serialize states for -// persistent storage and then deserialize them into memory again later. -package statefile diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go deleted file mode 100644 index 70c8ba6cec..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go +++ /dev/null @@ -1,31 +0,0 @@ -package statefile - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// File is the in-memory representation of a state file. It includes the state -// itself along with various metadata used to track changing state files for -// the same configuration over time. -type File struct { - // TerraformVersion is the version of Terraform that wrote this state file. - TerraformVersion *version.Version - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial uint64 - - // Lineage is set when a new, blank state file is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string - - // State is the actual state represented by this file. - State *states.State -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go deleted file mode 100644 index f1899cd228..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go +++ /dev/null @@ -1,209 +0,0 @@ -package statefile - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -// ErrNoState is returned by ReadState when the state file is empty. -var ErrNoState = errors.New("no state") - -// Read reads a state from the given reader. -// -// Legacy state format versions 1 through 3 are supported, but the result will -// contain object attributes in the deprecated "flatmap" format and so must -// be upgraded by the caller before use. -// -// If the state file is empty, the special error value ErrNoState is returned. -// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics -// potentially describing multiple errors. -func Read(r io.Reader) (*File, error) { - // Some callers provide us a "typed nil" *os.File here, which would - // cause us to panic below if we tried to use it. - if f, ok := r.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - var diags tfdiags.Diagnostics - - // We actually just buffer the whole thing in memory, because states are - // generally not huge and we need to do be able to sniff for a version - // number before full parsing. - src, err := ioutil.ReadAll(r) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read state file", - fmt.Sprintf("The state file could not be read: %s", err), - )) - return nil, diags.Err() - } - - if len(src) == 0 { - return nil, ErrNoState - } - - state, diags := readState(src) - if diags.HasErrors() { - return nil, diags.Err() - } - - if state == nil { - // Should never happen - panic("readState returned nil state with no errors") - } - - if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { - return state, fmt.Errorf( - "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", - state.TerraformVersion, - tfversion.SemVer, - state.TerraformVersion, - ) - } - - return state, diags.Err() -} - -func readState(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if looksLikeVersion0(src) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", - )) - return nil, diags - } - - version, versionDiags := sniffJSONStateVersion(src) - diags = diags.Append(versionDiags) - if versionDiags.HasErrors() { - return nil, diags - } - - switch version { - case 0: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", - )) - return nil, diags - case 1: - return readStateV1(src) - case 2: - return readStateV2(src) - case 3: - return readStateV3(src) - case 4: - return readStateV4(src) - default: - thisVersion := tfversion.SemVer.String() - creatingVersion := sniffJSONStateTerraformVersion(src) - switch { - case creatingVersion != "": - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), - )) - } - return nil, diags - } -} - -func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - type VersionSniff struct { - Version *uint64 `json:"version"` - } - var sniff VersionSniff - err := json.Unmarshal(src, &sniff) - if err != nil { - switch tErr := err.(type) { - case *json.SyntaxError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file could not be parsed as JSON.", - )) - } - } - - if sniff.Version == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file does not have a \"version\" attribute, which is required to identify the format version.", - )) - return 0, diags - } - - return *sniff.Version, diags -} - -// sniffJSONStateTerraformVersion attempts to sniff the Terraform version -// specification from the given state file source code. The result is either -// a version string or an empty string if no version number could be extracted. -// -// This is a best-effort function intended to produce nicer error messages. It -// should not be used for any real processing. -func sniffJSONStateTerraformVersion(src []byte) string { - type VersionSniff struct { - Version string `json:"terraform_version"` - } - var sniff VersionSniff - - err := json.Unmarshal(src, &sniff) - if err != nil { - return "" - } - - // Attempt to parse the string as a version so we won't report garbage - // as a version number. - _, err = version.NewVersion(sniff.Version) - if err != nil { - return "" - } - - return sniff.Version -} - -// unsupportedFormat is a diagnostic summary message for when the state file -// seems to not be a state file at all, or is not a supported version. -// -// Use invalidFormat instead for the subtly-different case of "this looks like -// it's intended to be a state file but it's not structured correctly". -const unsupportedFormat = "Unsupported state file format" - -const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go deleted file mode 100644 index 9b533317bd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go +++ /dev/null @@ -1,23 +0,0 @@ -package statefile - -// looksLikeVersion0 sniffs for the signature indicating a version 0 state -// file. -// -// Version 0 was the number retroactively assigned to Terraform's initial -// (unversioned) binary state file format, which was later superseded by the -// version 1 format in JSON. -// -// Version 0 is no longer supported, so this is used only to detect it and -// return a nice error to the user. -func looksLikeVersion0(src []byte) bool { - // Version 0 files begin with the magic prefix "tfstate". - const magic = "tfstate" - if len(src) < len(magic) { - // Not even long enough to have the magic prefix - return false - } - if string(src[0:len(magic)]) == magic { - return true - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go deleted file mode 100644 index 85b422ad22..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go +++ /dev/null @@ -1,167 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV1 := &stateV1{} - err := json.Unmarshal(src, sV1) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV1(sV1) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2, err := upgradeStateV1ToV2(sV1) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV1 is a representation of the legacy JSON state format version 1. -// -// It is only used to read version 1 JSON files prior to upgrading them to -// the current format. -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go deleted file mode 100644 index 0b417e1c40..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go +++ /dev/null @@ -1,172 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { - log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*moduleStateV2, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &stateV2{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &remoteStateV2{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root. - path = []string{"root"} - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*outputStateV2) - for key, output := range old.Outputs { - outputs[key] = &outputStateV2{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*resourceStateV2) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &moduleStateV2{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*instanceStateV2, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &resourceStateV2{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &instanceStateV2{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Meta: newMeta, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go deleted file mode 100644 index 6d10166b2e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go +++ /dev/null @@ -1,204 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2 := &stateV2{} - err := json.Unmarshal(src, sV2) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3, err := upgradeStateV2ToV3(sV2) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 2. -// -// It is only used to read version 2 JSON files prior to upgrading them to -// the current format. -type stateV2 struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV2 `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *backendStateV2 `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV2 `json:"modules"` -} - -type remoteStateV2 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type outputStateV2 struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` -} - -type moduleStateV2 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*outputStateV2 `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV2 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` -} - -type resourceStateV2 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV2 `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*instanceStateV2 `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` -} - -type instanceStateV2 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` -} - -type backendStateV2 struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go deleted file mode 100644 index 2d03c07c9d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go +++ /dev/null @@ -1,145 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" -) - -func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { - if old == nil { - return (*stateV3)(nil), nil - } - - var new *stateV3 - { - copy, err := copystructure.Config{Lock: true}.Copy(old) - if err != nil { - panic(err) - } - newWrongType := copy.(*stateV2) - newRightType := (stateV3)(*newWrongType) - new = &newRightType - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go deleted file mode 100644 index 1c81e7169e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go +++ /dev/null @@ -1,50 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3 := &stateV3{} - err := json.Unmarshal(src, sV3) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4, err := upgradeStateV3ToV4(sV3) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 3. -// -// It is only used to read version 3 JSON files prior to upgrading them to -// the current format. -// -// The differences between version 2 and version 3 are only in the data and -// not in the structure, so stateV3 actually shares the same structs as -// stateV2. Type stateV3 represents that the data within is formatted as -// expected by the V3 format, rather than the V2 format. -type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go deleted file mode 100644 index f08a62b2d5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go +++ /dev/null @@ -1,444 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { - - if old.Serial < 0 { - // The new format is using uint64 here, which should be fine for any - // real state (we only used positive integers in practice) but we'll - // catch this explicitly here to avoid weird behavior if a state file - // has been tampered with in some way. - return nil, fmt.Errorf("state has serial less than zero, which is invalid") - } - - new := &stateV4{ - TerraformVersion: old.TFVersion, - Serial: uint64(old.Serial), - Lineage: old.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - if new.TerraformVersion == "" { - // Older formats considered this to be optional, but now it's required - // and so we'll stub it out with something that's definitely older - // than the version that really created this state. - new.TerraformVersion = "0.0.0" - } - - for _, msOld := range old.Modules { - if len(msOld.Path) < 1 || msOld.Path[0] != "root" { - return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) - } - - // Convert legacy-style module address into our newer address type. - // Since these old formats are only generated by versions of Terraform - // that don't support count and for_each on modules, we can just assume - // all of the modules are unkeyed. - moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) - for i, name := range msOld.Path[1:] { - moduleAddr[i] = addrs.ModuleInstanceStep{ - Name: name, - InstanceKey: addrs.NoKey, - } - } - - // In a v3 state file, a "resource state" is actually an instance - // state, so we need to fill in a missing level of heirarchy here - // by lazily creating resource states as we encounter them. - // We'll track them in here, keyed on the string representation of - // the resource address. - resourceStates := map[string]*resourceStateV4{} - - for legacyAddr, rsOld := range msOld.Resources { - instAddr, err := parseLegacyResourceAddress(legacyAddr) - if err != nil { - return nil, err - } - - resAddr := instAddr.Resource - rs, exists := resourceStates[resAddr.String()] - if !exists { - var modeStr string - switch resAddr.Mode { - case addrs.ManagedResourceMode: - modeStr = "managed" - case addrs.DataResourceMode: - modeStr = "data" - default: - return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) - } - - // In state versions prior to 4 we allowed each instance of a - // resource to have its own provider configuration address, - // which makes no real sense in practice because providers - // are associated with resources in the configuration. We - // elevate that to the resource level during this upgrade, - // implicitly taking the provider address of the first instance - // we encounter for each resource. While this is lossy in - // theory, in practice there is no reason for these values to - // differ between instances. - var providerAddr addrs.AbsProviderConfig - oldProviderAddr := rsOld.Provider - if strings.Contains(oldProviderAddr, "provider.") { - // Smells like a new-style provider address, but we'll test it. - var diags tfdiags.Diagnostics - providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) - if diags.HasErrors() { - return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - } else { - // Smells like an old-style module-local provider address, - // which we'll need to migrate. We'll assume it's referring - // to the same module the resource is in, which might be - // incorrect but it'll get fixed up next time any updates - // are made to an instance. - if oldProviderAddr != "" { - localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) - if diags.HasErrors() { - return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - providerAddr = localAddr.Absolute(moduleAddr) - } else { - providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) - } - } - - rs = &resourceStateV4{ - Module: moduleAddr.String(), - Mode: modeStr, - Type: resAddr.Type, - Name: resAddr.Name, - Instances: []instanceObjectStateV4{}, - ProviderConfig: providerAddr.String(), - } - resourceStates[resAddr.String()] = rs - } - - // Now we'll deal with the instance itself, which may either be - // the first instance in a resource we just created or an additional - // instance for a resource added on a prior loop. - instKey := instAddr.Key - if isOld := rsOld.Primary; isOld != nil { - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) - if err != nil { - return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - for i, isOld := range rsOld.Deposed { - // When we migrate old instances we'll use sequential deposed - // keys just so that the upgrade result is deterministic. New - // deposed keys allocated moving forward will be pseudorandomly - // selected, but we check for collisions and so these - // non-random ones won't hurt. - deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) - if err != nil { - return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - - if instKey != addrs.NoKey && rs.EachMode == "" { - rs.EachMode = "list" - } - } - - for _, rs := range resourceStates { - new.Resources = append(new.Resources, *rs) - } - - if len(msOld.Path) == 1 && msOld.Path[0] == "root" { - // We'll migrate the outputs for this module too, then. - for name, oldOS := range msOld.Outputs { - newOS := outputStateV4{ - Sensitive: oldOS.Sensitive, - } - - valRaw := oldOS.Value - valSrc, err := json.Marshal(valRaw) - if err != nil { - // Should never happen, because this value came from JSON - // in the first place and so we're just round-tripping here. - return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) - } - - // The "type" field in state V2 wasn't really that useful - // since it was only able to capture string vs. list vs. map. - // For this reason, during upgrade we'll just discard it - // altogether and use cty's idea of the implied type of - // turning our old value into JSON. - ty, err := ctyjson.ImpliedType(valSrc) - if err != nil { - // REALLY should never happen, because we literally just - // encoded this as JSON above! - return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) - } - - // ImpliedType tends to produce structural types, but since older - // version of Terraform didn't support those a collection type - // is probably what was intended, so we'll see if we can - // interpret our value as one. - ty = simplifyImpliedValueType(ty) - - tySrc, err := ctyjson.MarshalType(ty) - if err != nil { - return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) - } - - newOS.ValueRaw = json.RawMessage(valSrc) - newOS.ValueTypeRaw = json.RawMessage(tySrc) - - new.RootOutputs[name] = newOS - } - } - } - - new.normalize() - - return new, nil -} - -func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { - - // Schema versions were, in prior formats, a private concern of the provider - // SDK, and not a first-class concept in the state format. Here we're - // sniffing for the pre-0.12 SDK's way of representing schema versions - // and promoting it to our first-class field if we find it. We'll ignore - // it if it doesn't look like what the SDK would've written. If this - // sniffing fails then we'll assume schema version 0. - var schemaVersion uint64 - migratedSchemaVersion := false - if raw, exists := isOld.Meta["schema_version"]; exists { - switch tv := raw.(type) { - case string: - v, err := strconv.ParseUint(tv, 10, 64) - if err == nil { - schemaVersion = v - migratedSchemaVersion = true - } - case int: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - case float64: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - } - } - - private := map[string]interface{}{} - for k, v := range isOld.Meta { - if k == "schema_version" && migratedSchemaVersion { - // We're gonna promote this into our first-class schema version field - continue - } - private[k] = v - } - var privateJSON []byte - if len(private) != 0 { - var err error - privateJSON, err = json.Marshal(private) - if err != nil { - // This shouldn't happen, because the Meta values all came from JSON - // originally anyway. - return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) - } - } - - var status string - if isOld.Tainted { - status = "tainted" - } - - var instKeyRaw interface{} - switch tk := instKey.(type) { - case addrs.IntKey: - instKeyRaw = int(tk) - case addrs.StringKey: - instKeyRaw = string(tk) - default: - if instKeyRaw != nil { - return nil, fmt.Errorf("unsupported instance key: %#v", instKey) - } - } - - var attributes map[string]string - if isOld.Attributes != nil { - attributes = make(map[string]string, len(isOld.Attributes)) - for k, v := range isOld.Attributes { - attributes[k] = v - } - } - if isOld.ID != "" { - // As a special case, if we don't already have an "id" attribute and - // yet there's a non-empty first-class ID on the old object then we'll - // create a synthetic id attribute to avoid losing that first-class id. - // In practice this generally arises only in tests where state literals - // are hand-written in a non-standard way; real code prior to 0.12 - // would always force the first-class ID to be copied into the - // id attribute before storing. - if attributes == nil { - attributes = make(map[string]string, len(isOld.Attributes)) - } - if idVal := attributes["id"]; idVal == "" { - attributes["id"] = isOld.ID - } - } - - dependencies := make([]string, len(rsOld.Dependencies)) - for i, v := range rsOld.Dependencies { - depStr, err := parseLegacyDependency(v) - if err != nil { - return nil, fmt.Errorf("invalid dependency reference %q: %s", v, err) - } - dependencies[i] = depStr - } - - return &instanceObjectStateV4{ - IndexKey: instKeyRaw, - Status: status, - Deposed: string(deposedKey), - AttributesFlat: attributes, - Dependencies: dependencies, - SchemaVersion: schemaVersion, - PrivateRaw: privateJSON, - }, nil -} - -// parseLegacyResourceAddress parses the different identifier format used -// state formats before version 4, like "instance.name.0". -func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { - var ret addrs.ResourceInstance - - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - ret.Resource.Mode = addrs.ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - ret.Resource.Mode = addrs.DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - ret.Resource.Type = parts[0] - ret.Resource.Name = parts[1] - ret.Key = addrs.NoKey - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) - } - - ret.Key = addrs.IntKey(idx) - } - - return ret, nil -} - -// simplifyImpliedValueType attempts to heuristically simplify a value type -// derived from a legacy stored output value into something simpler that -// is closer to what would've fitted into the pre-v0.12 value type system. -func simplifyImpliedValueType(ty cty.Type) cty.Type { - switch { - case ty.IsTupleType(): - // If all of the element types are the same then we'll make this - // a list instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyTuple) { - // Don't know what the element type would be, then. - return ty - } - - etys := ty.TupleElementTypes() - ety := etys[0] - for _, other := range etys[1:] { - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.List(ety) - - case ty.IsObjectType(): - // If all of the attribute types are the same then we'll make this - // a map instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyObject) { - // Don't know what the element type would be, then. - return ty - } - - atys := ty.AttributeTypes() - var ety cty.Type - for _, other := range atys { - if ety == cty.NilType { - ety = other - continue - } - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.Map(ety) - - default: - // No other normalizations are possible - return ty - } -} - -func parseLegacyDependency(s string) (string, error) { - parts := strings.Split(s, ".") - ret := parts[0] - for _, part := range parts[1:] { - if part == "*" { - break - } - if i, err := strconv.Atoi(part); err == nil { - ret = ret + fmt.Sprintf("[%d]", i) - break - } - ret = ret + "." + part - } - - // The result must parse as a reference, or else we'll create an invalid - // state file. - var diags tfdiags.Diagnostics - _, diags = addrs.ParseRefStr(ret) - if diags.HasErrors() { - return "", diags.Err() - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go deleted file mode 100644 index 164b57f827..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go +++ /dev/null @@ -1,604 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "io" - "sort" - - version "github.com/hashicorp/go-version" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4 := &stateV4{} - err := json.Unmarshal(src, sV4) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var tfVersion *version.Version - if sV4.TerraformVersion != "" { - var err error - tfVersion, err = version.NewVersion(sV4.TerraformVersion) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid Terraform version string", - fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), - )) - } - } - - file := &File{ - TerraformVersion: tfVersion, - Serial: sV4.Serial, - Lineage: sV4.Lineage, - } - - state := states.NewState() - - for _, rsV4 := range sV4.Resources { - rAddr := addrs.Resource{ - Type: rsV4.Type, - Name: rsV4.Name, - } - switch rsV4.Mode { - case "managed": - rAddr.Mode = addrs.ManagedResourceMode - case "data": - rAddr.Mode = addrs.DataResourceMode - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource mode in state", - fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), - )) - continue - } - - moduleAddr := addrs.RootModuleInstance - if rsV4.Module != "" { - var addrDiags tfdiags.Diagnostics - moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) - diags = diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - } - - providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) - diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - - var eachMode states.EachMode - switch rsV4.EachMode { - case "": - eachMode = states.NoEach - case "list": - eachMode = states.EachList - case "map": - eachMode = states.EachMap - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource metadata in state", - fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), - )) - continue - } - - ms := state.EnsureModule(moduleAddr) - - // Ensure the resource container object is present in the state. - ms.SetResourceMeta(rAddr, eachMode, providerAddr) - - for _, isV4 := range rsV4.Instances { - keyRaw := isV4.IndexKey - var key addrs.InstanceKey - switch tk := keyRaw.(type) { - case int: - key = addrs.IntKey(tk) - case float64: - // Since JSON only has one number type, reading from encoding/json - // gives us a float64 here even if the number is whole. - // float64 has a smaller integer range than int, but in practice - // we rarely have more than a few tens of instances and so - // it's unlikely that we'll exhaust the 52 bits in a float64. - key = addrs.IntKey(int(tk)) - case string: - key = addrs.StringKey(tk) - default: - if keyRaw != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), - )) - continue - } - key = addrs.NoKey - } - - instAddr := rAddr.Instance(key) - - obj := &states.ResourceInstanceObjectSrc{ - SchemaVersion: isV4.SchemaVersion, - } - - { - // Instance attributes - switch { - case isV4.AttributesRaw != nil: - obj.AttrsJSON = isV4.AttributesRaw - case isV4.AttributesFlat != nil: - obj.AttrsFlat = isV4.AttributesFlat - default: - // This is odd, but we'll accept it and just treat the - // object has being empty. In practice this should arise - // only from the contrived sort of state objects we tend - // to hand-write inline in tests. - obj.AttrsJSON = []byte{'{', '}'} - } - } - - { - // Status - raw := isV4.Status - switch raw { - case "": - obj.Status = states.ObjectReady - case "tainted": - obj.Status = states.ObjectTainted - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), - )) - continue - } - } - - if raw := isV4.PrivateRaw; len(raw) > 0 { - obj.Private = raw - } - - { - depsRaw := isV4.Dependencies - deps := make([]addrs.Referenceable, 0, len(depsRaw)) - for _, depRaw := range depsRaw { - ref, refDiags := addrs.ParseRefStr(depRaw) - diags = diags.Append(refDiags) - if refDiags.HasErrors() { - continue - } - if len(ref.Remaining) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), - )) - } - if ref.Subject == nil { - // Should never happen - panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) - } - deps = append(deps, ref.Subject) - } - obj.Dependencies = deps - } - - switch { - case isV4.Deposed != "": - dk := states.DeposedKey(isV4.Deposed) - if len(dk) != 8 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), - )) - continue - } - is := ms.ResourceInstance(instAddr) - if is.HasDeposed(dk) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), - )) - continue - } - - ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) - default: - is := ms.ResourceInstance(instAddr) - if is.HasCurrent() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), - )) - continue - } - - ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) - } - } - - // We repeat this after creating the instances because - // SetResourceInstanceCurrent automatically resets this metadata based - // on the incoming objects. That behavior is useful when we're making - // piecemeal updates to the state during an apply, but when we're - // reading the state file we want to reflect its contents exactly. - ms.SetResourceMeta(rAddr, eachMode, providerAddr) - } - - // The root module is special in that we persist its attributes and thus - // need to reload them now. (For descendent modules we just re-calculate - // them based on the latest configuration on each run.) - { - rootModule := state.RootModule() - for name, fos := range sV4.RootOutputs { - os := &states.OutputValue{} - os.Sensitive = fos.Sensitive - - ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value type in state", - fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), - )) - continue - } - - val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value saved in state", - fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), - )) - continue - } - - os.Value = val - rootModule.OutputValues[name] = os - } - } - - file.State = state - return file, diags -} - -func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { - // Here we'll convert back from the "File" representation to our - // stateV4 struct representation and write that. - // - // While we support legacy state formats for reading, we only support the - // latest for writing and so if a V5 is added in future then this function - // should be deleted and replaced with a writeStateV5, even though the - // read/prepare V4 functions above would stick around. - - var diags tfdiags.Diagnostics - if file == nil || file.State == nil { - panic("attempt to write nil state to file") - } - - var terraformVersion string - if file.TerraformVersion != nil { - terraformVersion = file.TerraformVersion.String() - } - - sV4 := &stateV4{ - TerraformVersion: terraformVersion, - Serial: file.Serial, - Lineage: file.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - for name, os := range file.State.RootModule().OutputValues { - src, err := ctyjson.Marshal(os.Value, os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), - )) - continue - } - - typeSrc, err := ctyjson.MarshalType(os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), - )) - continue - } - - sV4.RootOutputs[name] = outputStateV4{ - Sensitive: os.Sensitive, - ValueRaw: json.RawMessage(src), - ValueTypeRaw: json.RawMessage(typeSrc), - } - } - - for _, ms := range file.State.Modules { - moduleAddr := ms.Addr - for _, rs := range ms.Resources { - resourceAddr := rs.Addr - - var mode string - switch resourceAddr.Mode { - case addrs.ManagedResourceMode: - mode = "managed" - case addrs.DataResourceMode: - mode = "data" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), - )) - continue - } - - var eachMode string - switch rs.EachMode { - case states.NoEach: - eachMode = "" - case states.EachList: - eachMode = "list" - case states.EachMap: - eachMode = "map" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), - )) - continue - } - - sV4.Resources = append(sV4.Resources, resourceStateV4{ - Module: moduleAddr.String(), - Mode: mode, - Type: resourceAddr.Type, - Name: resourceAddr.Name, - EachMode: eachMode, - ProviderConfig: rs.ProviderConfig.String(), - Instances: []instanceObjectStateV4{}, - }) - rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) - - for key, is := range rs.Instances { - if is.HasCurrent() { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, is.Current, states.NotDeposed, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - for dk, obj := range is.Deposed { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, obj, dk, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - } - } - } - - sV4.normalize() - - src, err := json.MarshalIndent(sV4, "", " ") - if err != nil { - // Shouldn't happen if we do our conversion to *stateV4 correctly above. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize state", - fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), - )) - return diags - } - src = append(src, '\n') - - _, err = w.Write(src) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write state", - fmt.Sprintf("An error occured while writing the serialized state: %s.", err), - )) - return diags - } - - return diags -} - -func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var status string - switch obj.Status { - case states.ObjectReady: - status = "" - case states.ObjectTainted: - status = "tainted" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), - )) - } - - var privateRaw []byte - if len(obj.Private) > 0 { - privateRaw = obj.Private - } - - deps := make([]string, len(obj.Dependencies)) - for i, depAddr := range obj.Dependencies { - deps[i] = depAddr.String() - } - - var rawKey interface{} - switch tk := key.(type) { - case addrs.IntKey: - rawKey = int(tk) - case addrs.StringKey: - rawKey = string(tk) - default: - if key != addrs.NoKey { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), - )) - } - } - - return append(isV4s, instanceObjectStateV4{ - IndexKey: rawKey, - Deposed: string(deposed), - Status: status, - SchemaVersion: obj.SchemaVersion, - AttributesFlat: obj.AttrsFlat, - AttributesRaw: obj.AttrsJSON, - PrivateRaw: privateRaw, - Dependencies: deps, - }), diags -} - -type stateV4 struct { - Version stateVersionV4 `json:"version"` - TerraformVersion string `json:"terraform_version"` - Serial uint64 `json:"serial"` - Lineage string `json:"lineage"` - RootOutputs map[string]outputStateV4 `json:"outputs"` - Resources []resourceStateV4 `json:"resources"` -} - -// normalize makes some in-place changes to normalize the way items are -// stored to ensure that two functionally-equivalent states will be stored -// identically. -func (s *stateV4) normalize() { - sort.Stable(sortResourcesV4(s.Resources)) - for _, rs := range s.Resources { - sort.Stable(sortInstancesV4(rs.Instances)) - } -} - -type outputStateV4 struct { - ValueRaw json.RawMessage `json:"value"` - ValueTypeRaw json.RawMessage `json:"type"` - Sensitive bool `json:"sensitive,omitempty"` -} - -type resourceStateV4 struct { - Module string `json:"module,omitempty"` - Mode string `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - EachMode string `json:"each,omitempty"` - ProviderConfig string `json:"provider"` - Instances []instanceObjectStateV4 `json:"instances"` -} - -type instanceObjectStateV4 struct { - IndexKey interface{} `json:"index_key,omitempty"` - Status string `json:"status,omitempty"` - Deposed string `json:"deposed,omitempty"` - - SchemaVersion uint64 `json:"schema_version"` - AttributesRaw json.RawMessage `json:"attributes,omitempty"` - AttributesFlat map[string]string `json:"attributes_flat,omitempty"` - - PrivateRaw []byte `json:"private,omitempty"` - - Dependencies []string `json:"depends_on,omitempty"` -} - -// stateVersionV4 is a weird special type we use to produce our hard-coded -// "version": 4 in the JSON serialization. -type stateVersionV4 struct{} - -func (sv stateVersionV4) MarshalJSON() ([]byte, error) { - return []byte{'4'}, nil -} - -func (sv stateVersionV4) UnmarshalJSON([]byte) error { - // Nothing to do: we already know we're version 4 - return nil -} - -type sortResourcesV4 []resourceStateV4 - -func (sr sortResourcesV4) Len() int { return len(sr) } -func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } -func (sr sortResourcesV4) Less(i, j int) bool { - switch { - case sr[i].Mode != sr[j].Mode: - return sr[i].Mode < sr[j].Mode - case sr[i].Type != sr[j].Type: - return sr[i].Type < sr[j].Type - case sr[i].Name != sr[j].Name: - return sr[i].Name < sr[j].Name - default: - return false - } -} - -type sortInstancesV4 []instanceObjectStateV4 - -func (si sortInstancesV4) Len() int { return len(si) } -func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } -func (si sortInstancesV4) Less(i, j int) bool { - ki := si[i].IndexKey - kj := si[j].IndexKey - if ki != kj { - if (ki == nil) != (kj == nil) { - return ki == nil - } - if kii, isInt := ki.(int); isInt { - if kji, isInt := kj.(int); isInt { - return kii < kji - } - return true - } - if kis, isStr := ki.(string); isStr { - if kjs, isStr := kj.(string); isStr { - return kis < kjs - } - return true - } - } - if si[i].Deposed != si[j].Deposed { - return si[i].Deposed < si[j].Deposed - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go deleted file mode 100644 index 8fdca45803..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go +++ /dev/null @@ -1,17 +0,0 @@ -package statefile - -import ( - "io" - - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -// Write writes the given state to the given writer in the current state -// serialization format. -func Write(s *File, w io.Writer) error { - // Always record the current terraform version in the state. - s.TerraformVersion = tfversion.SemVer - - diags := writeStateV4(s, w) - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go deleted file mode 100644 index 6d23612540..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go +++ /dev/null @@ -1,484 +0,0 @@ -package states - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// SyncState is a wrapper around State that provides concurrency-safe access to -// various common operations that occur during a Terraform graph walk, or other -// similar concurrent contexts. -// -// When a SyncState wrapper is in use, no concurrent direct access to the -// underlying objects is permitted unless the caller first acquires an explicit -// lock, using the Lock and Unlock methods. Most callers should _not_ -// explicitly lock, and should instead use the other methods of this type that -// handle locking automatically. -// -// Since SyncState is able to safely consolidate multiple updates into a single -// atomic operation, many of its methods are at a higher level than those -// of the underlying types, and operate on the state as a whole rather than -// on individual sub-structures of the state. -// -// SyncState can only protect against races within its own methods. It cannot -// provide any guarantees about the order in which concurrent operations will -// be processed, so callers may still need to employ higher-level techniques -// for ensuring correct operation sequencing, such as building and walking -// a dependency graph. -type SyncState struct { - state *State - lock sync.RWMutex -} - -// Module returns a snapshot of the state of the module instance with the given -// address, or nil if no such module is tracked. -// -// The return value is a pointer to a copy of the module state, which the -// caller may then freely access and mutate. However, since the module state -// tends to be a large data structure with many child objects, where possible -// callers should prefer to use a more granular accessor to access a child -// module directly, and thus reduce the amount of copying required. -func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { - s.lock.RLock() - ret := s.state.Module(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// OutputValue returns a snapshot of the state of the output value with the -// given address, or nil if no such output value is tracked. -// -// The return value is a pointer to a copy of the output value state, which the -// caller may then freely access and mutate. -func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - s.lock.RLock() - ret := s.state.OutputValue(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// SetOutputValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the output is not yet tracked in state then it -// be added as a side-effect. -func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) -} - -// RemoveOutputValue removes the stored value for the output value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveOutputValue(addr.OutputValue.Name) - s.maybePruneModule(addr.Module) -} - -// LocalValue returns the current value associated with the given local value -// address. -func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { - s.lock.RLock() - // cty.Value is immutable, so we don't need any extra copying here. - ret := s.state.LocalValue(addr) - s.lock.RUnlock() - return ret -} - -// SetLocalValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the local value is not yet tracked in state then it -// will be added as a side-effect. -func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetLocalValue(addr.LocalValue.Name, value) -} - -// RemoveLocalValue removes the stored value for the local value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveLocalValue(addr.LocalValue.Name) - s.maybePruneModule(addr.Module) -} - -// Resource returns a snapshot of the state of the resource with the given -// address, or nil if no such resource is tracked. -// -// The return value is a pointer to a copy of the resource state, which the -// caller may then freely access and mutate. -func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { - s.lock.RLock() - ret := s.state.Resource(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstance returns a snapshot of the state the resource instance with -// the given address, or nil if no such instance is tracked. -// -// The return value is a pointer to a copy of the instance state, which the -// caller may then freely access and mutate. -func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - s.lock.RLock() - ret := s.state.ResourceInstance(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstanceObject returns a snapshot of the current instance object -// of the given generation belonging to the instance with the given address, -// or nil if no such object is tracked.. -// -// The return value is a pointer to a copy of the object, which the caller may -// then freely access and mutate. -func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { - s.lock.RLock() - defer s.lock.RUnlock() - - inst := s.state.ResourceInstance(addr) - if inst == nil { - return nil - } - return inst.GetGeneration(gen).DeepCopy() -} - -// SetResourceMeta updates the resource-level metadata for the resource at -// the given address, creating the containing module state and resource state -// as a side-effect if not already present. -func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceMeta(addr.Resource, eachMode, provider) -} - -// RemoveResourceIfEmpty is similar to RemoveResource but first checks to -// make sure there are no instances or objects left in the resource. -// -// Returns true if the resource was removed, or false if remaining child -// objects prevented its removal. Returns true also if the resource was -// already absent, and thus no action needed to be taken. -func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return true // nothing to do - } - rs := ms.Resource(addr.Resource) - if rs == nil { - return true // nothing to do - } - if len(rs.Instances) != 0 { - // We don't check here for the possibility of instances that exist - // but don't have any objects because it's the responsibility of the - // instance-mutation methods to prune those away automatically. - return false - } - ms.RemoveResource(addr.Resource) - s.maybePruneModule(addr.Module) - return true -} - -// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a -// resource has changed from having "count" set to not set, or vice-versa, and -// so we need to rename the zeroth instance key to no key at all, or vice-versa. -// -// Set countEnabled to true if the resource has count set in its new -// configuration, or false if it does not. -// -// The state is modified in-place if necessary, moving a resource instance -// between the two addresses. The return value is true if a change was made, -// and false otherwise. -func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return false - } - - relAddr := addr.Resource - rs := ms.Resource(relAddr) - if rs == nil { - return false - } - huntKey := addrs.NoKey - replaceKey := addrs.InstanceKey(addrs.IntKey(0)) - if !countEnabled { - huntKey, replaceKey = replaceKey, huntKey - } - - is, exists := rs.Instances[huntKey] - if !exists { - return false - } - - if _, exists := rs.Instances[replaceKey]; exists { - // If the replacement key also exists then we'll do nothing and keep both. - return false - } - - // If we get here then we need to "rename" from hunt to replace - rs.Instances[replaceKey] = is - delete(rs.Instances, huntKey) - return true -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance as a whole will be removed, which -// may in turn also remove the containing module if it becomes empty. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// DeposeResourceInstanceObject moves the current instance object for the -// given resource instance address into the deposed set, leaving the instance -// without a current object. -// -// The return value is the newly-allocated deposed key, or NotDeposed if the -// given instance is already lacking a current object. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then there cannot be a current object for the -// given instance, and so NotDeposed will be returned without modifying the -// state at all. -func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return NotDeposed - } - - return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) -} - -// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject -// but uses a pre-allocated key. It's the caller's responsibility to ensure -// that there aren't any races to use a particular key; this method will panic -// if the given key is already in use. -func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { - s.lock.Lock() - defer s.lock.Unlock() - - if forcedKey == NotDeposed { - // Usage error: should use DeposeResourceInstanceObject in this case - panic("DeposeResourceInstanceObjectForceKey called without forced key") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - return // Nothing to do, since there can't be any current object either. - } - - ms.deposeResourceInstanceObject(addr.Resource, forcedKey) -} - -// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the -// given key on the specified resource as the current object for that instance -// if and only if that would not cause us to forget an existing current -// object for that instance. -// -// Returns true if the object was restored to current, or false if no change -// was made at all. -func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { - s.lock.Lock() - defer s.lock.Unlock() - - if key == NotDeposed { - panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - // Nothing to do, since the specified deposed object cannot exist. - return false - } - - return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) -} - -// RemovePlannedResourceInstanceObjects removes from the state any resource -// instance objects that have the status ObjectPlanned, indiciating that they -// are just transient placeholders created during planning. -// -// Note that this does not restore any "ready" or "tainted" object that might -// have been present before the planned object was written. The only real use -// for this method is in preparing the state created during a refresh walk, -// where we run the planning step for certain instances just to create enough -// information to allow correct expression evaluation within provider and -// data resource blocks. Discarding planned instances in that case is okay -// because the refresh phase only creates planned objects to stand in for -// objects that don't exist yet, and thus the planned object must have been -// absent before by definition. -func (s *SyncState) RemovePlannedResourceInstanceObjects() { - // TODO: Merge together the refresh and plan phases into a single walk, - // so we can remove the need to create this "partial plan" during refresh - // that we then need to clean up before proceeding. - - s.lock.Lock() - defer s.lock.Unlock() - - for _, ms := range s.state.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resAddr := rs.Addr - - for ik, is := range rs.Instances { - instAddr := resAddr.Instance(ik) - - if is.Current != nil && is.Current.Status == ObjectPlanned { - // Setting the current instance to nil removes it from the - // state altogether if there are not also deposed instances. - ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) - } - - for dk, obj := range is.Deposed { - // Deposed objects should never be "planned", but we'll - // do this anyway for the sake of completeness. - if obj.Status == ObjectPlanned { - ms.ForgetResourceInstanceDeposed(instAddr, dk) - } - } - } - } - - // We may have deleted some objects, which means that we may have - // left a module empty, and so we must prune to preserve the invariant - // that only the root module is allowed to be empty. - s.maybePruneModule(moduleAddr) - } -} - -// Lock acquires an explicit lock on the state, allowing direct read and write -// access to the returned state object. The caller must call Unlock once -// access is no longer needed, and then immediately discard the state pointer -// pointer. -// -// Most callers should not use this. Instead, use the concurrency-safe -// accessors and mutators provided directly on SyncState. -func (s *SyncState) Lock() *State { - s.lock.Lock() - return s.state -} - -// Unlock releases a lock previously acquired by Lock, at which point the -// caller must cease all use of the state pointer that was returned. -// -// Do not call this method except to end an explicit lock acquired by -// Lock. If a caller calls Unlock without first holding the lock, behavior -// is undefined. -func (s *SyncState) Unlock() { - s.lock.Unlock() -} - -// maybePruneModule will remove a module from the state altogether if it is -// empty, unless it's the root module which must always be present. -// -// This helper method is not concurrency-safe on its own, so must only be -// called while the caller is already holding the lock for writing. -func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - // We never prune the root. - return - } - - ms := s.state.Module(addr) - if ms == nil { - return - } - - if ms.empty() { - log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) - s.state.RemoveModule(addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go deleted file mode 100644 index 8e41f46ed2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go +++ /dev/null @@ -1,68 +0,0 @@ -package tfdiags - -import ( - "bytes" - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" -) - -// FormatCtyPath is a helper function to produce a user-friendly string -// representation of a cty.Path. The result uses a syntax similar to the -// HCL expression language in the hope of it being familiar to users. -func FormatCtyPath(path cty.Path) string { - var buf bytes.Buffer - for _, step := range path { - switch ts := step.(type) { - case cty.GetAttrStep: - fmt.Fprintf(&buf, ".%s", ts.Name) - case cty.IndexStep: - buf.WriteByte('[') - key := ts.Key - keyTy := key.Type() - switch { - case key.IsNull(): - buf.WriteString("null") - case !key.IsKnown(): - buf.WriteString("(not yet known)") - case keyTy == cty.Number: - bf := key.AsBigFloat() - buf.WriteString(bf.Text('g', -1)) - case keyTy == cty.String: - buf.WriteString(strconv.Quote(key.AsString())) - default: - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} - -// FormatError is a helper function to produce a user-friendly string -// representation of certain special error types that we might want to -// include in diagnostic messages. -// -// This currently has special behavior only for cty.PathError, where a -// non-empty path is rendered in a HCL-like syntax as context. -func FormatError(err error) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return err.Error() - } - - return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) -} - -// FormatErrorPrefixed is like FormatError except that it presents any path -// information after the given prefix string, which is assumed to contain -// an HCL syntax representation of the value that errors are relative to. -func FormatErrorPrefixed(err error, prefix string) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return fmt.Sprintf("%s: %s", prefix, err.Error()) - } - - return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go deleted file mode 100644 index 59c06b70b5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go +++ /dev/null @@ -1,372 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// The "contextual" family of diagnostics are designed to allow separating -// the detection of a problem from placing that problem in context. For -// example, some code that is validating an object extracted from configuration -// may not have access to the configuration that generated it, but can still -// report problems within that object which the caller can then place in -// context by calling IsConfigBody on the returned diagnostics. -// -// When contextual diagnostics are used, the documentation for a method must -// be very explicit about what context is implied for any diagnostics returned, -// to help ensure the expected result. - -// contextualFromConfig is an interface type implemented by diagnostic types -// that can elaborate themselves when given information about the configuration -// body they are embedded in. -// -// Usually this entails extracting source location information in order to -// populate the "Subject" range. -type contextualFromConfigBody interface { - ElaborateFromConfigBody(hcl.Body) Diagnostic -} - -// InConfigBody returns a copy of the receiver with any config-contextual -// diagnostics elaborated in the context of the given body. -func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { - if len(d) == 0 { - return nil - } - - ret := make(Diagnostics, len(d)) - for i, srcDiag := range d { - if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { - ret[i] = cd.ElaborateFromConfigBody(body) - } else { - ret[i] = srcDiag - } - } - - return ret -} - -// AttributeValue returns a diagnostic about an attribute value in an implied current -// configuration context. This should be returned only from functions whose -// interface specifies a clear configuration context that this will be -// resolved in. -// -// The given path is relative to the implied configuration context. To describe -// a top-level attribute, it should be a single-element cty.Path with a -// cty.GetAttrStep. It's assumed that the path is returning into a structure -// that would be produced by our conventions in the configschema package; it -// may return unexpected results for structures that can't be represented by -// configschema. -// -// Since mapping attribute paths back onto configuration is an imprecise -// operation (e.g. dynamic block generation may cause the same block to be -// evaluated multiple times) the diagnostic detail should include the attribute -// name and other context required to help the user understand what is being -// referenced in case the identified source range is not unique. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is the value assigned to the -// named attribute, or the containing body's "missing item range" if no -// value is present. -func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { - return &attributeDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - attrPath: attrPath, - } -} - -// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains -// one. Normally this is not accessed directly, and instead the config body is -// added to the Diagnostic to create a more complete message for the user. In -// some cases however, we may want to know just the name of the attribute that -// generated the Diagnostic message. -// This returns a nil cty.Path if it does not exist in the Diagnostic. -func GetAttribute(d Diagnostic) cty.Path { - if d, ok := d.(*attributeDiagnostic); ok { - return d.attrPath - } - return nil -} - -type attributeDiagnostic struct { - diagnosticBase - attrPath cty.Path - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -// ElaborateFromConfigBody finds the most accurate possible source location -// for a diagnostic's attribute path within the given body. -// -// Backing out from a path back to a source location is not always entirely -// possible because we lose some information in the decoding process, so -// if an exact position cannot be found then the returned diagnostic will -// refer to a position somewhere within the containing body, which is assumed -// to be better than no location at all. -// -// If possible it is generally better to report an error at a layer where -// source location information is still available, for more accuracy. This -// is not always possible due to system architecture, so this serves as a -// "best effort" fallback behavior for such situations. -func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if len(d.attrPath) < 1 { - // Should never happen, but we'll allow it rather than crashing. - return d - } - - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - - // This function will often end up re-decoding values that were already - // decoded by an earlier step. This is non-ideal but is architecturally - // more convenient than arranging for source location information to be - // propagated to every place in Terraform, and this happens only in the - // presence of errors where performance isn't a concern. - - traverse := d.attrPath[:] - final := d.attrPath[len(d.attrPath)-1] - - // Index should never be the first step - // as indexing of top blocks (such as resources & data sources) - // is handled elsewhere - if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret - } - - // Process index separately - idxStep, hasIdx := final.(cty.IndexStep) - if hasIdx { - final = d.attrPath[len(d.attrPath)-2] - traverse = d.attrPath[:len(d.attrPath)-1] - } - - // If we have more than one step after removing index - // then we'll first try to traverse to a child body - // corresponding to the requested path. - if len(traverse) > 1 { - body = traversePathSteps(traverse, body) - } - - // Default is to indicate a missing item in the deepest body we reached - // while traversing. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - - // Once we get here, "final" should be a GetAttr step that maps to an - // attribute in our current body. - finalStep, isAttr := final.(cty.GetAttrStep) - if !isAttr { - return &ret - } - - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: finalStep.Name, - Required: true, - }, - }, - }) - if contentDiags.HasErrors() { - return &ret - } - - if attr, ok := content.Attributes[finalStep.Name]; ok { - hclRange := attr.Expr.Range() - if hasIdx { - // Try to be more precise by finding index range - hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) - } - subject = SourceRangeFromHCL(hclRange) - ret.subject = &subject - } - - return &ret -} - -func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { - for i := 0; i < len(traverse); i++ { - step := traverse[i] - - switch tStep := step.(type) { - case cty.GetAttrStep: - - var next cty.PathStep - if i < (len(traverse) - 1) { - next = traverse[i+1] - } - - // Will be indexing into our result here? - var indexType cty.Type - var indexVal cty.Value - if nextIndex, ok := next.(cty.IndexStep); ok { - indexVal = nextIndex.Key - indexType = indexVal.Type() - i++ // skip over the index on subsequent iterations - } - - var blockLabelNames []string - if indexType == cty.String { - // Map traversal means we expect one label for the key. - blockLabelNames = []string{"key"} - } - - // For intermediate steps we expect to be referring to a child - // block, so we'll attempt decoding under that assumption. - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: tStep.Name, - LabelNames: blockLabelNames, - }, - }, - }) - if contentDiags.HasErrors() { - return body - } - filtered := make([]*hcl.Block, 0, len(content.Blocks)) - for _, block := range content.Blocks { - if block.Type == tStep.Name { - filtered = append(filtered, block) - } - } - if len(filtered) == 0 { - // Step doesn't refer to a block - continue - } - - switch indexType { - case cty.NilType: // no index at all - if len(filtered) != 1 { - return body - } - body = filtered[0].Body - case cty.Number: - var idx int - err := gocty.FromCtyValue(indexVal, &idx) - if err != nil || idx >= len(filtered) { - return body - } - body = filtered[idx].Body - case cty.String: - key := indexVal.AsString() - var block *hcl.Block - for _, candidate := range filtered { - if candidate.Labels[0] == key { - block = candidate - break - } - } - if block == nil { - // No block with this key, so we'll just indicate a - // missing item in the containing block. - return body - } - body = block.Body - default: - // Should never happen, because only string and numeric indices - // are supported by cty collections. - return body - } - - default: - // For any other kind of step, we'll just return our current body - // as the subject and accept that this is a little inaccurate. - return body - } - } - return body -} - -func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { - switch idxStep.Key.Type() { - case cty.Number: - var idx int - err := gocty.FromCtyValue(idxStep.Key, &idx) - items, diags := hcl.ExprList(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - if err != nil || idx >= len(items) { - return attr.NameRange - } - return items[idx].Range() - case cty.String: - pairs, diags := hcl.ExprMap(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - stepKey := idxStep.Key.AsString() - for _, kvPair := range pairs { - key, err := kvPair.Key.Value(nil) - if err != nil { - return attr.Expr.Range() - } - if key.AsString() == stepKey { - startRng := kvPair.Value.StartRange() - return startRng - } - } - return attr.NameRange - } - return attr.Expr.Range() -} - -func (d *attributeDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} - -// WholeContainingBody returns a diagnostic about the body that is an implied -// current configuration context. This should be returned only from -// functions whose interface specifies a clear configuration context that this -// will be resolved in. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is currently the missing item -// range of the body. In future, this may change to some other suitable -// part of the containing body. -func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { - return &wholeBodyDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - } -} - -type wholeBodyDiagnostic struct { - diagnosticBase - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - rng := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &rng - return &ret -} - -func (d *wholeBodyDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go deleted file mode 100644 index a7699cf013..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go +++ /dev/null @@ -1,40 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -type Diagnostic interface { - Severity() Severity - Description() Description - Source() Source - - // FromExpr returns the expression-related context for the diagnostic, if - // available. Returns nil if the diagnostic is not related to an - // expression evaluation. - FromExpr() *FromExpr -} - -type Severity rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity - -const ( - Error Severity = 'E' - Warning Severity = 'W' -) - -type Description struct { - Summary string - Detail string -} - -type Source struct { - Subject *SourceRange - Context *SourceRange -} - -type FromExpr struct { - Expression hcl.Expression - EvalContext *hcl.EvalContext -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go deleted file mode 100644 index 50bf9d8eba..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go +++ /dev/null @@ -1,31 +0,0 @@ -package tfdiags - -// diagnosticBase can be embedded in other diagnostic structs to get -// default implementations of Severity and Description. This type also -// has default implementations of Source and FromExpr that return no source -// location or expression-related information, so embedders should generally -// override those method to return more useful results where possible. -type diagnosticBase struct { - severity Severity - summary string - detail string -} - -func (d diagnosticBase) Severity() Severity { - return d.severity -} - -func (d diagnosticBase) Description() Description { - return Description{ - Summary: d.summary, - Detail: d.detail, - } -} - -func (d diagnosticBase) Source() Source { - return Source{} -} - -func (d diagnosticBase) FromExpr() *FromExpr { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go deleted file mode 100644 index a19fa80c40..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go +++ /dev/null @@ -1,318 +0,0 @@ -package tfdiags - -import ( - "bytes" - "fmt" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" -) - -// Diagnostics is a list of diagnostics. Diagnostics is intended to be used -// where a Go "error" might normally be used, allowing richer information -// to be conveyed (more context, support for warnings). -// -// A nil Diagnostics is a valid, empty diagnostics list, thus allowing -// heap allocation to be avoided in the common case where there are no -// diagnostics to report at all. -type Diagnostics []Diagnostic - -// Append is the main interface for constructing Diagnostics lists, taking -// an existing list (which may be nil) and appending the new objects to it -// after normalizing them to be implementations of Diagnostic. -// -// The usual pattern for a function that natively "speaks" diagnostics is: -// -// // Create a nil Diagnostics at the start of the function -// var diags diag.Diagnostics -// -// // At later points, build on it if errors / warnings occur: -// foo, err := DoSomethingRisky() -// if err != nil { -// diags = diags.Append(err) -// } -// -// // Eventually return the result and diagnostics in place of error -// return result, diags -// -// Append accepts a variety of different diagnostic-like types, including -// native Go errors and HCL diagnostics. It also knows how to unwrap -// a multierror.Error into separate error diagnostics. It can be passed -// another Diagnostics to concatenate the two lists. If given something -// it cannot handle, this function will panic. -func (diags Diagnostics) Append(new ...interface{}) Diagnostics { - for _, item := range new { - if item == nil { - continue - } - - switch ti := item.(type) { - case Diagnostic: - diags = append(diags, ti) - case Diagnostics: - diags = append(diags, ti...) // flatten - case diagnosticsAsError: - diags = diags.Append(ti.Diagnostics) // unwrap - case NonFatalError: - diags = diags.Append(ti.Diagnostics) // unwrap - case hcl.Diagnostics: - for _, hclDiag := range ti { - diags = append(diags, hclDiagnostic{hclDiag}) - } - case *hcl.Diagnostic: - diags = append(diags, hclDiagnostic{ti}) - case *multierror.Error: - for _, err := range ti.Errors { - diags = append(diags, nativeError{err}) - } - case error: - switch { - case errwrap.ContainsType(ti, Diagnostics(nil)): - // If we have an errwrap wrapper with a Diagnostics hiding - // inside then we'll unpick it here to get access to the - // individual diagnostics. - diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) - case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): - // Likewise, if we have HCL diagnostics we'll unpick that too. - diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) - default: - diags = append(diags, nativeError{ti}) - } - default: - panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) - } - } - - // Given the above, we should never end up with a non-nil empty slice - // here, but we'll make sure of that so callers can rely on empty == nil - if len(diags) == 0 { - return nil - } - - return diags -} - -// HasErrors returns true if any of the diagnostics in the list have -// a severity of Error. -func (diags Diagnostics) HasErrors() bool { - for _, diag := range diags { - if diag.Severity() == Error { - return true - } - } - return false -} - -// ForRPC returns a version of the receiver that has been simplified so that -// it is friendly to RPC protocols. -// -// Currently this means that it can be serialized with encoding/gob and -// subsequently re-inflated. It may later grow to include other serialization -// formats. -// -// Note that this loses information about the original objects used to -// construct the diagnostics, so e.g. the errwrap API will not work as -// expected on an error-wrapped Diagnostics that came from ForRPC. -func (diags Diagnostics) ForRPC() Diagnostics { - ret := make(Diagnostics, len(diags)) - for i := range diags { - ret[i] = makeRPCFriendlyDiag(diags[i]) - } - return ret -} - -// Err flattens a diagnostics list into a single Go error, or to nil -// if the diagnostics list does not include any error-level diagnostics. -// -// This can be used to smuggle diagnostics through an API that deals in -// native errors, but unfortunately it will lose naked warnings (warnings -// that aren't accompanied by at least one error) since such APIs have no -// mechanism through which to report these. -// -// return result, diags.Error() -func (diags Diagnostics) Err() error { - if !diags.HasErrors() { - return nil - } - return diagnosticsAsError{diags} -} - -// ErrWithWarnings is similar to Err except that it will also return a non-nil -// error if the receiver contains only warnings. -// -// In the warnings-only situation, the result is guaranteed to be of dynamic -// type NonFatalError, allowing diagnostics-aware callers to type-assert -// and unwrap it, treating it as non-fatal. -// -// This should be used only in contexts where the caller is able to recognize -// and handle NonFatalError. For normal callers that expect a lack of errors -// to be signaled by nil, use just Diagnostics.Err. -func (diags Diagnostics) ErrWithWarnings() error { - if len(diags) == 0 { - return nil - } - if diags.HasErrors() { - return diags.Err() - } - return NonFatalError{diags} -} - -// NonFatalErr is similar to Err except that it always returns either nil -// (if there are no diagnostics at all) or NonFatalError. -// -// This allows diagnostics to be returned over an error return channel while -// being explicit that the diagnostics should not halt processing. -// -// This should be used only in contexts where the caller is able to recognize -// and handle NonFatalError. For normal callers that expect a lack of errors -// to be signaled by nil, use just Diagnostics.Err. -func (diags Diagnostics) NonFatalErr() error { - if len(diags) == 0 { - return nil - } - return NonFatalError{diags} -} - -type diagnosticsAsError struct { - Diagnostics -} - -func (dae diagnosticsAsError) Error() string { - diags := dae.Diagnostics - switch { - case len(diags) == 0: - // should never happen, since we don't create this wrapper if - // there are no diagnostics in the list. - return "no errors" - case len(diags) == 1: - desc := diags[0].Description() - if desc.Detail == "" { - return desc.Summary - } - return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) - default: - var ret bytes.Buffer - fmt.Fprintf(&ret, "%d problems:\n", len(diags)) - for _, diag := range dae.Diagnostics { - desc := diag.Description() - if desc.Detail == "" { - fmt.Fprintf(&ret, "\n- %s", desc.Summary) - } else { - fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) - } - } - return ret.String() - } -} - -// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped -// diagnostics object can be picked apart by errwrap-aware code. -func (dae diagnosticsAsError) WrappedErrors() []error { - var errs []error - for _, diag := range dae.Diagnostics { - if wrapper, isErr := diag.(nativeError); isErr { - errs = append(errs, wrapper.err) - } - } - return errs -} - -// NonFatalError is a special error type, returned by -// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, -// that indicates that the wrapped diagnostics should be treated as non-fatal. -// Callers can conditionally type-assert an error to this type in order to -// detect the non-fatal scenario and handle it in a different way. -type NonFatalError struct { - Diagnostics -} - -func (woe NonFatalError) Error() string { - diags := woe.Diagnostics - switch { - case len(diags) == 0: - // should never happen, since we don't create this wrapper if - // there are no diagnostics in the list. - return "no errors or warnings" - case len(diags) == 1: - desc := diags[0].Description() - if desc.Detail == "" { - return desc.Summary - } - return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) - default: - var ret bytes.Buffer - if diags.HasErrors() { - fmt.Fprintf(&ret, "%d problems:\n", len(diags)) - } else { - fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) - } - for _, diag := range woe.Diagnostics { - desc := diag.Description() - if desc.Detail == "" { - fmt.Fprintf(&ret, "\n- %s", desc.Summary) - } else { - fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) - } - } - return ret.String() - } -} - -// sortDiagnostics is an implementation of sort.Interface -type sortDiagnostics []Diagnostic - -var _ sort.Interface = sortDiagnostics(nil) - -func (sd sortDiagnostics) Len() int { - return len(sd) -} - -func (sd sortDiagnostics) Less(i, j int) bool { - iD, jD := sd[i], sd[j] - iSev, jSev := iD.Severity(), jD.Severity() - iSrc, jSrc := iD.Source(), jD.Source() - - switch { - - case iSev != jSev: - return iSev == Warning - - case (iSrc.Subject == nil) != (jSrc.Subject == nil): - return iSrc.Subject == nil - - case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: - iSubj := iSrc.Subject - jSubj := jSrc.Subject - switch { - case iSubj.Filename != jSubj.Filename: - // Path with fewer segments goes first if they are different lengths - sep := string(filepath.Separator) - iCount := strings.Count(iSubj.Filename, sep) - jCount := strings.Count(jSubj.Filename, sep) - if iCount != jCount { - return iCount < jCount - } - return iSubj.Filename < jSubj.Filename - case iSubj.Start.Byte != jSubj.Start.Byte: - return iSubj.Start.Byte < jSubj.Start.Byte - case iSubj.End.Byte != jSubj.End.Byte: - return iSubj.End.Byte < jSubj.End.Byte - } - fallthrough - - default: - // The remaining properties do not have a defined ordering, so - // we'll leave it unspecified. Since we use sort.Stable in - // the caller of this, the ordering of remaining items will - // be preserved. - return false - } -} - -func (sd sortDiagnostics) Swap(i, j int) { - sd[i], sd[j] = sd[j], sd[i] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go deleted file mode 100644 index c427879ebc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package tfdiags is a utility package for representing errors and -// warnings in a manner that allows us to produce good messages for the -// user. -// -// "diag" is short for "diagnostics", and is meant as a general word for -// feedback to a user about potential or actual problems. -// -// A design goal for this package is for it to be able to provide rich -// messaging where possible but to also be pragmatic about dealing with -// generic errors produced by system components that _can't_ provide -// such rich messaging. As a consequence, the main types in this package -- -// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" -// over an error channel and then be unpacked at the other end, so that -// error diagnostics (at least) can transit through APIs that are not -// aware of this package. -package tfdiags diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go deleted file mode 100644 index 13f7a714f4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go +++ /dev/null @@ -1,28 +0,0 @@ -package tfdiags - -// nativeError is a Diagnostic implementation that wraps a normal Go error -type nativeError struct { - err error -} - -var _ Diagnostic = nativeError{} - -func (e nativeError) Severity() Severity { - return Error -} - -func (e nativeError) Description() Description { - return Description{ - Summary: FormatError(e.err), - } -} - -func (e nativeError) Source() Source { - // No source information available for a native error - return Source{} -} - -func (e nativeError) FromExpr() *FromExpr { - // Native errors are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go deleted file mode 100644 index 8c781611a5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go +++ /dev/null @@ -1,87 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic -type hclDiagnostic struct { - diag *hcl.Diagnostic -} - -var _ Diagnostic = hclDiagnostic{} - -func (d hclDiagnostic) Severity() Severity { - switch d.diag.Severity { - case hcl.DiagWarning: - return Warning - default: - return Error - } -} - -func (d hclDiagnostic) Description() Description { - return Description{ - Summary: d.diag.Summary, - Detail: d.diag.Detail, - } -} - -func (d hclDiagnostic) Source() Source { - var ret Source - if d.diag.Subject != nil { - rng := SourceRangeFromHCL(*d.diag.Subject) - ret.Subject = &rng - } - if d.diag.Context != nil { - rng := SourceRangeFromHCL(*d.diag.Context) - ret.Context = &rng - } - return ret -} - -func (d hclDiagnostic) FromExpr() *FromExpr { - if d.diag.Expression == nil || d.diag.EvalContext == nil { - return nil - } - return &FromExpr{ - Expression: d.diag.Expression, - EvalContext: d.diag.EvalContext, - } -} - -// SourceRangeFromHCL constructs a SourceRange from the corresponding range -// type within the HCL package. -func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { - return SourceRange{ - Filename: hclRange.Filename, - Start: SourcePos{ - Line: hclRange.Start.Line, - Column: hclRange.Start.Column, - Byte: hclRange.Start.Byte, - }, - End: SourcePos{ - Line: hclRange.End.Line, - Column: hclRange.End.Column, - Byte: hclRange.End.Byte, - }, - } -} - -// ToHCL constructs a HCL Range from the receiving SourceRange. This is the -// opposite of SourceRangeFromHCL. -func (r SourceRange) ToHCL() hcl.Range { - return hcl.Range{ - Filename: r.Filename, - Start: hcl.Pos{ - Line: r.Start.Line, - Column: r.Start.Column, - Byte: r.Start.Byte, - }, - End: hcl.Pos{ - Line: r.End.Line, - Column: r.End.Column, - Byte: r.End.Byte, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go deleted file mode 100644 index 485063b0c0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go +++ /dev/null @@ -1,59 +0,0 @@ -package tfdiags - -import ( - "encoding/gob" -) - -type rpcFriendlyDiag struct { - Severity_ Severity - Summary_ string - Detail_ string - Subject_ *SourceRange - Context_ *SourceRange -} - -// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to -// RPC. -// -// In particular, it currently returns an object that can be serialized and -// later re-inflated using gob. This definition may grow to include other -// serializations later. -func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { - desc := diag.Description() - source := diag.Source() - return &rpcFriendlyDiag{ - Severity_: diag.Severity(), - Summary_: desc.Summary, - Detail_: desc.Detail, - Subject_: source.Subject, - Context_: source.Context, - } -} - -func (d *rpcFriendlyDiag) Severity() Severity { - return d.Severity_ -} - -func (d *rpcFriendlyDiag) Description() Description { - return Description{ - Summary: d.Summary_, - Detail: d.Detail_, - } -} - -func (d *rpcFriendlyDiag) Source() Source { - return Source{ - Subject: d.Subject_, - Context: d.Context_, - } -} - -func (d rpcFriendlyDiag) FromExpr() *FromExpr { - // RPC-friendly diagnostics cannot preserve expression information because - // expressions themselves are not RPC-friendly. - return nil -} - -func init() { - gob.Register((*rpcFriendlyDiag)(nil)) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go deleted file mode 100644 index b0f1ecd46c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go +++ /dev/null @@ -1,30 +0,0 @@ -package tfdiags - -type simpleWarning string - -var _ Diagnostic = simpleWarning("") - -// SimpleWarning constructs a simple (summary-only) warning diagnostic. -func SimpleWarning(msg string) Diagnostic { - return simpleWarning(msg) -} - -func (e simpleWarning) Severity() Severity { - return Warning -} - -func (e simpleWarning) Description() Description { - return Description{ - Summary: string(e), - } -} - -func (e simpleWarning) Source() Source { - // No source information available for a simple warning - return Source{} -} - -func (e simpleWarning) FromExpr() *FromExpr { - // Simple warnings are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go deleted file mode 100644 index 3031168d6a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfdiags - -import ( - "fmt" - "os" - "path/filepath" -) - -type SourceRange struct { - Filename string - Start, End SourcePos -} - -type SourcePos struct { - Line, Column, Byte int -} - -// StartString returns a string representation of the start of the range, -// including the filename and the line and column numbers. -func (r SourceRange) StartString() string { - filename := r.Filename - - // We'll try to relative-ize our filename here so it's less verbose - // in the common case of being in the current working directory. If not, - // we'll just show the full path. - wd, err := os.Getwd() - if err == nil { - relFn, err := filepath.Rel(wd, filename) - if err == nil { - filename = relFn - } - } - - return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go deleted file mode 100644 index eaa27373db..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go +++ /dev/null @@ -1,13 +0,0 @@ -package tfdiags - -// Sourceless creates and returns a diagnostic with no source location -// information. This is generally used for operational-type errors that are -// caused by or relate to the environment where Terraform is running rather -// than to the provided configuration. -func Sourceless(severity Severity, summary, detail string) Diagnostic { - return diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh deleted file mode 100644 index de1d693ca4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# We do not run protoc under go:generate because we want to ensure that all -# dependencies of go:generate are "go get"-able for general dev environment -# usability. To compile all protobuf files in this repository, run -# "make protobuf" at the top-level. - -set -eu - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -cd "$DIR" - -protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go deleted file mode 100644 index 84179725d6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go +++ /dev/null @@ -1,3634 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: tfplugin5.proto - -package tfplugin5 - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type StringKind int32 - -const ( - StringKind_PLAIN StringKind = 0 - StringKind_MARKDOWN StringKind = 1 -) - -var StringKind_name = map[int32]string{ - 0: "PLAIN", - 1: "MARKDOWN", -} - -var StringKind_value = map[string]int32{ - "PLAIN": 0, - "MARKDOWN": 1, -} - -func (x StringKind) String() string { - return proto.EnumName(StringKind_name, int32(x)) -} - -func (StringKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{0} -} - -type Diagnostic_Severity int32 - -const ( - Diagnostic_INVALID Diagnostic_Severity = 0 - Diagnostic_ERROR Diagnostic_Severity = 1 - Diagnostic_WARNING Diagnostic_Severity = 2 -) - -var Diagnostic_Severity_name = map[int32]string{ - 0: "INVALID", - 1: "ERROR", - 2: "WARNING", -} - -var Diagnostic_Severity_value = map[string]int32{ - "INVALID": 0, - "ERROR": 1, - "WARNING": 2, -} - -func (x Diagnostic_Severity) String() string { - return proto.EnumName(Diagnostic_Severity_name, int32(x)) -} - -func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{1, 0} -} - -type Schema_NestedBlock_NestingMode int32 - -const ( - Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 - Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 - Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 - Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 - Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 - Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 -) - -var Schema_NestedBlock_NestingMode_name = map[int32]string{ - 0: "INVALID", - 1: "SINGLE", - 2: "LIST", - 3: "SET", - 4: "MAP", - 5: "GROUP", -} - -var Schema_NestedBlock_NestingMode_value = map[string]int32{ - "INVALID": 0, - "SINGLE": 1, - "LIST": 2, - "SET": 3, - "MAP": 4, - "GROUP": 5, -} - -func (x Schema_NestedBlock_NestingMode) String() string { - return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) -} - -func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} -} - -// DynamicValue is an opaque encoding of terraform data, with the field name -// indicating the encoding scheme used. -type DynamicValue struct { - Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` - Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DynamicValue) Reset() { *m = DynamicValue{} } -func (m *DynamicValue) String() string { return proto.CompactTextString(m) } -func (*DynamicValue) ProtoMessage() {} -func (*DynamicValue) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{0} -} - -func (m *DynamicValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DynamicValue.Unmarshal(m, b) -} -func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) -} -func (m *DynamicValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DynamicValue.Merge(m, src) -} -func (m *DynamicValue) XXX_Size() int { - return xxx_messageInfo_DynamicValue.Size(m) -} -func (m *DynamicValue) XXX_DiscardUnknown() { - xxx_messageInfo_DynamicValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DynamicValue proto.InternalMessageInfo - -func (m *DynamicValue) GetMsgpack() []byte { - if m != nil { - return m.Msgpack - } - return nil -} - -func (m *DynamicValue) GetJson() []byte { - if m != nil { - return m.Json - } - return nil -} - -type Diagnostic struct { - Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` - Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Diagnostic) Reset() { *m = Diagnostic{} } -func (m *Diagnostic) String() string { return proto.CompactTextString(m) } -func (*Diagnostic) ProtoMessage() {} -func (*Diagnostic) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{1} -} - -func (m *Diagnostic) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Diagnostic.Unmarshal(m, b) -} -func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) -} -func (m *Diagnostic) XXX_Merge(src proto.Message) { - xxx_messageInfo_Diagnostic.Merge(m, src) -} -func (m *Diagnostic) XXX_Size() int { - return xxx_messageInfo_Diagnostic.Size(m) -} -func (m *Diagnostic) XXX_DiscardUnknown() { - xxx_messageInfo_Diagnostic.DiscardUnknown(m) -} - -var xxx_messageInfo_Diagnostic proto.InternalMessageInfo - -func (m *Diagnostic) GetSeverity() Diagnostic_Severity { - if m != nil { - return m.Severity - } - return Diagnostic_INVALID -} - -func (m *Diagnostic) GetSummary() string { - if m != nil { - return m.Summary - } - return "" -} - -func (m *Diagnostic) GetDetail() string { - if m != nil { - return m.Detail - } - return "" -} - -func (m *Diagnostic) GetAttribute() *AttributePath { - if m != nil { - return m.Attribute - } - return nil -} - -type AttributePath struct { - Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributePath) Reset() { *m = AttributePath{} } -func (m *AttributePath) String() string { return proto.CompactTextString(m) } -func (*AttributePath) ProtoMessage() {} -func (*AttributePath) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{2} -} - -func (m *AttributePath) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributePath.Unmarshal(m, b) -} -func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) -} -func (m *AttributePath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributePath.Merge(m, src) -} -func (m *AttributePath) XXX_Size() int { - return xxx_messageInfo_AttributePath.Size(m) -} -func (m *AttributePath) XXX_DiscardUnknown() { - xxx_messageInfo_AttributePath.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributePath proto.InternalMessageInfo - -func (m *AttributePath) GetSteps() []*AttributePath_Step { - if m != nil { - return m.Steps - } - return nil -} - -type AttributePath_Step struct { - // Types that are valid to be assigned to Selector: - // *AttributePath_Step_AttributeName - // *AttributePath_Step_ElementKeyString - // *AttributePath_Step_ElementKeyInt - Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } -func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } -func (*AttributePath_Step) ProtoMessage() {} -func (*AttributePath_Step) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{2, 0} -} - -func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) -} -func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) -} -func (m *AttributePath_Step) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributePath_Step.Merge(m, src) -} -func (m *AttributePath_Step) XXX_Size() int { - return xxx_messageInfo_AttributePath_Step.Size(m) -} -func (m *AttributePath_Step) XXX_DiscardUnknown() { - xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo - -type isAttributePath_Step_Selector interface { - isAttributePath_Step_Selector() -} - -type AttributePath_Step_AttributeName struct { - AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` -} - -type AttributePath_Step_ElementKeyString struct { - ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` -} - -type AttributePath_Step_ElementKeyInt struct { - ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` -} - -func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} - -func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} - -func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} - -func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { - if m != nil { - return m.Selector - } - return nil -} - -func (m *AttributePath_Step) GetAttributeName() string { - if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { - return x.AttributeName - } - return "" -} - -func (m *AttributePath_Step) GetElementKeyString() string { - if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { - return x.ElementKeyString - } - return "" -} - -func (m *AttributePath_Step) GetElementKeyInt() int64 { - if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { - return x.ElementKeyInt - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AttributePath_Step) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AttributePath_Step_AttributeName)(nil), - (*AttributePath_Step_ElementKeyString)(nil), - (*AttributePath_Step_ElementKeyInt)(nil), - } -} - -type Stop struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stop) Reset() { *m = Stop{} } -func (m *Stop) String() string { return proto.CompactTextString(m) } -func (*Stop) ProtoMessage() {} -func (*Stop) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{3} -} - -func (m *Stop) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stop.Unmarshal(m, b) -} -func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stop.Marshal(b, m, deterministic) -} -func (m *Stop) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop.Merge(m, src) -} -func (m *Stop) XXX_Size() int { - return xxx_messageInfo_Stop.Size(m) -} -func (m *Stop) XXX_DiscardUnknown() { - xxx_messageInfo_Stop.DiscardUnknown(m) -} - -var xxx_messageInfo_Stop proto.InternalMessageInfo - -type Stop_Request struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stop_Request) Reset() { *m = Stop_Request{} } -func (m *Stop_Request) String() string { return proto.CompactTextString(m) } -func (*Stop_Request) ProtoMessage() {} -func (*Stop_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{3, 0} -} - -func (m *Stop_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stop_Request.Unmarshal(m, b) -} -func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) -} -func (m *Stop_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop_Request.Merge(m, src) -} -func (m *Stop_Request) XXX_Size() int { - return xxx_messageInfo_Stop_Request.Size(m) -} -func (m *Stop_Request) XXX_DiscardUnknown() { - xxx_messageInfo_Stop_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Stop_Request proto.InternalMessageInfo - -type Stop_Response struct { - Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stop_Response) Reset() { *m = Stop_Response{} } -func (m *Stop_Response) String() string { return proto.CompactTextString(m) } -func (*Stop_Response) ProtoMessage() {} -func (*Stop_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{3, 1} -} - -func (m *Stop_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stop_Response.Unmarshal(m, b) -} -func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) -} -func (m *Stop_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop_Response.Merge(m, src) -} -func (m *Stop_Response) XXX_Size() int { - return xxx_messageInfo_Stop_Response.Size(m) -} -func (m *Stop_Response) XXX_DiscardUnknown() { - xxx_messageInfo_Stop_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Stop_Response proto.InternalMessageInfo - -func (m *Stop_Response) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -// RawState holds the stored state for a resource to be upgraded by the -// provider. It can be in one of two formats, the current json encoded format -// in bytes, or the legacy flatmap format as a map of strings. -type RawState struct { - Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` - Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RawState) Reset() { *m = RawState{} } -func (m *RawState) String() string { return proto.CompactTextString(m) } -func (*RawState) ProtoMessage() {} -func (*RawState) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{4} -} - -func (m *RawState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RawState.Unmarshal(m, b) -} -func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RawState.Marshal(b, m, deterministic) -} -func (m *RawState) XXX_Merge(src proto.Message) { - xxx_messageInfo_RawState.Merge(m, src) -} -func (m *RawState) XXX_Size() int { - return xxx_messageInfo_RawState.Size(m) -} -func (m *RawState) XXX_DiscardUnknown() { - xxx_messageInfo_RawState.DiscardUnknown(m) -} - -var xxx_messageInfo_RawState proto.InternalMessageInfo - -func (m *RawState) GetJson() []byte { - if m != nil { - return m.Json - } - return nil -} - -func (m *RawState) GetFlatmap() map[string]string { - if m != nil { - return m.Flatmap - } - return nil -} - -// Schema is the configuration schema for a Resource, Provider, or Provisioner. -type Schema struct { - // The version of the schema. - // Schemas are versioned, so that providers can upgrade a saved resource - // state when the schema is changed. - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Block is the top level configuration block for this schema. - Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5} -} - -func (m *Schema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema.Unmarshal(m, b) -} -func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema.Marshal(b, m, deterministic) -} -func (m *Schema) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema.Merge(m, src) -} -func (m *Schema) XXX_Size() int { - return xxx_messageInfo_Schema.Size(m) -} -func (m *Schema) XXX_DiscardUnknown() { - xxx_messageInfo_Schema.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema proto.InternalMessageInfo - -func (m *Schema) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Schema) GetBlock() *Schema_Block { - if m != nil { - return m.Block - } - return nil -} - -type Schema_Block struct { - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` - BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` - Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema_Block) Reset() { *m = Schema_Block{} } -func (m *Schema_Block) String() string { return proto.CompactTextString(m) } -func (*Schema_Block) ProtoMessage() {} -func (*Schema_Block) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 0} -} - -func (m *Schema_Block) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema_Block.Unmarshal(m, b) -} -func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) -} -func (m *Schema_Block) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_Block.Merge(m, src) -} -func (m *Schema_Block) XXX_Size() int { - return xxx_messageInfo_Schema_Block.Size(m) -} -func (m *Schema_Block) XXX_DiscardUnknown() { - xxx_messageInfo_Schema_Block.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema_Block proto.InternalMessageInfo - -func (m *Schema_Block) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Schema_Block) GetAttributes() []*Schema_Attribute { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { - if m != nil { - return m.BlockTypes - } - return nil -} - -func (m *Schema_Block) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema_Block) GetDescriptionKind() StringKind { - if m != nil { - return m.DescriptionKind - } - return StringKind_PLAIN -} - -func (m *Schema_Block) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -type Schema_Attribute struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` - Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` - Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` - DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` - Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } -func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } -func (*Schema_Attribute) ProtoMessage() {} -func (*Schema_Attribute) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 1} -} - -func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) -} -func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) -} -func (m *Schema_Attribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_Attribute.Merge(m, src) -} -func (m *Schema_Attribute) XXX_Size() int { - return xxx_messageInfo_Schema_Attribute.Size(m) -} -func (m *Schema_Attribute) XXX_DiscardUnknown() { - xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo - -func (m *Schema_Attribute) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Schema_Attribute) GetType() []byte { - if m != nil { - return m.Type - } - return nil -} - -func (m *Schema_Attribute) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema_Attribute) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *Schema_Attribute) GetOptional() bool { - if m != nil { - return m.Optional - } - return false -} - -func (m *Schema_Attribute) GetComputed() bool { - if m != nil { - return m.Computed - } - return false -} - -func (m *Schema_Attribute) GetSensitive() bool { - if m != nil { - return m.Sensitive - } - return false -} - -func (m *Schema_Attribute) GetDescriptionKind() StringKind { - if m != nil { - return m.DescriptionKind - } - return StringKind_PLAIN -} - -func (m *Schema_Attribute) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -type Schema_NestedBlock struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` - Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` - MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } -func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } -func (*Schema_NestedBlock) ProtoMessage() {} -func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 2} -} - -func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) -} -func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) -} -func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_NestedBlock.Merge(m, src) -} -func (m *Schema_NestedBlock) XXX_Size() int { - return xxx_messageInfo_Schema_NestedBlock.Size(m) -} -func (m *Schema_NestedBlock) XXX_DiscardUnknown() { - xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo - -func (m *Schema_NestedBlock) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *Schema_NestedBlock) GetBlock() *Schema_Block { - if m != nil { - return m.Block - } - return nil -} - -func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { - if m != nil { - return m.Nesting - } - return Schema_NestedBlock_INVALID -} - -func (m *Schema_NestedBlock) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Schema_NestedBlock) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -type GetProviderSchema struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } -func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } -func (*GetProviderSchema) ProtoMessage() {} -func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{6} -} - -func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) -} -func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) -} -func (m *GetProviderSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema.Merge(m, src) -} -func (m *GetProviderSchema) XXX_Size() int { - return xxx_messageInfo_GetProviderSchema.Size(m) -} -func (m *GetProviderSchema) XXX_DiscardUnknown() { - xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo - -type GetProviderSchema_Request struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } -func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } -func (*GetProviderSchema_Request) ProtoMessage() {} -func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{6, 0} -} - -func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) -} -func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) -} -func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) -} -func (m *GetProviderSchema_Request) XXX_Size() int { - return xxx_messageInfo_GetProviderSchema_Request.Size(m) -} -func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { - xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo - -type GetProviderSchema_Response struct { - Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` - ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } -func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } -func (*GetProviderSchema_Response) ProtoMessage() {} -func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{6, 1} -} - -func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) -} -func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) -} -func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) -} -func (m *GetProviderSchema_Response) XXX_Size() int { - return xxx_messageInfo_GetProviderSchema_Response.Size(m) -} -func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { - xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo - -func (m *GetProviderSchema_Response) GetProvider() *Schema { - if m != nil { - return m.Provider - } - return nil -} - -func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { - if m != nil { - return m.ResourceSchemas - } - return nil -} - -func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { - if m != nil { - return m.DataSourceSchemas - } - return nil -} - -func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *GetProviderSchema_Response) GetProviderMeta() *Schema { - if m != nil { - return m.ProviderMeta - } - return nil -} - -type PrepareProviderConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } -func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } -func (*PrepareProviderConfig) ProtoMessage() {} -func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{7} -} - -func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) -} -func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) -} -func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig.Merge(m, src) -} -func (m *PrepareProviderConfig) XXX_Size() int { - return xxx_messageInfo_PrepareProviderConfig.Size(m) -} -func (m *PrepareProviderConfig) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo - -type PrepareProviderConfig_Request struct { - Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } -func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } -func (*PrepareProviderConfig_Request) ProtoMessage() {} -func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{7, 0} -} - -func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) -} -func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) -} -func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) -} -func (m *PrepareProviderConfig_Request) XXX_Size() int { - return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) -} -func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo - -func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type PrepareProviderConfig_Response struct { - PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } -func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } -func (*PrepareProviderConfig_Response) ProtoMessage() {} -func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{7, 1} -} - -func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) -} -func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) -} -func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) -} -func (m *PrepareProviderConfig_Response) XXX_Size() int { - return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) -} -func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo - -func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { - if m != nil { - return m.PreparedConfig - } - return nil -} - -func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type UpgradeResourceState struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } -func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } -func (*UpgradeResourceState) ProtoMessage() {} -func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{8} -} - -func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) -} -func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) -} -func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState.Merge(m, src) -} -func (m *UpgradeResourceState) XXX_Size() int { - return xxx_messageInfo_UpgradeResourceState.Size(m) -} -func (m *UpgradeResourceState) XXX_DiscardUnknown() { - xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) -} - -var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo - -type UpgradeResourceState_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - // version is the schema_version number recorded in the state file - Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - // raw_state is the raw states as stored for the resource. Core does - // not have access to the schema of prior_version, so it's the - // provider's responsibility to interpret this value using the - // appropriate older schema. The raw_state will be the json encoded - // state, or a legacy flat-mapped format. - RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } -func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } -func (*UpgradeResourceState_Request) ProtoMessage() {} -func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{8, 0} -} - -func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) -} -func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) -} -func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) -} -func (m *UpgradeResourceState_Request) XXX_Size() int { - return xxx_messageInfo_UpgradeResourceState_Request.Size(m) -} -func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { - xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo - -func (m *UpgradeResourceState_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *UpgradeResourceState_Request) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *UpgradeResourceState_Request) GetRawState() *RawState { - if m != nil { - return m.RawState - } - return nil -} - -type UpgradeResourceState_Response struct { - // new_state is a msgpack-encoded data structure that, when interpreted with - // the _current_ schema for this resource type, is functionally equivalent to - // that which was given in prior_state_raw. - UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` - // diagnostics describes any errors encountered during migration that could not - // be safely resolved, and warnings about any possibly-risky assumptions made - // in the upgrade process. - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } -func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } -func (*UpgradeResourceState_Response) ProtoMessage() {} -func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{8, 1} -} - -func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) -} -func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) -} -func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) -} -func (m *UpgradeResourceState_Response) XXX_Size() int { - return xxx_messageInfo_UpgradeResourceState_Response.Size(m) -} -func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { - xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo - -func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { - if m != nil { - return m.UpgradedState - } - return nil -} - -func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ValidateResourceTypeConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } -func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } -func (*ValidateResourceTypeConfig) ProtoMessage() {} -func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{9} -} - -func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) -} -func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) -} -func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) -} -func (m *ValidateResourceTypeConfig) XXX_Size() int { - return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) -} -func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo - -type ValidateResourceTypeConfig_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } -func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } -func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} -func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{9, 0} -} - -func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) -} -func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) -} -func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) -} -func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { - return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) -} -func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo - -func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ValidateResourceTypeConfig_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } -func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } -func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} -func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{9, 1} -} - -func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) -} -func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) -} -func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) -} -func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { - return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) -} -func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo - -func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ValidateDataSourceConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } -func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } -func (*ValidateDataSourceConfig) ProtoMessage() {} -func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{10} -} - -func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) -} -func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) -} -func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) -} -func (m *ValidateDataSourceConfig) XXX_Size() int { - return xxx_messageInfo_ValidateDataSourceConfig.Size(m) -} -func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo - -type ValidateDataSourceConfig_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } -func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } -func (*ValidateDataSourceConfig_Request) ProtoMessage() {} -func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{10, 0} -} - -func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) -} -func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) -} -func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) -} -func (m *ValidateDataSourceConfig_Request) XXX_Size() int { - return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) -} -func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo - -func (m *ValidateDataSourceConfig_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ValidateDataSourceConfig_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } -func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } -func (*ValidateDataSourceConfig_Response) ProtoMessage() {} -func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{10, 1} -} - -func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) -} -func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) -} -func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) -} -func (m *ValidateDataSourceConfig_Response) XXX_Size() int { - return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) -} -func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo - -func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type Configure struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Configure) Reset() { *m = Configure{} } -func (m *Configure) String() string { return proto.CompactTextString(m) } -func (*Configure) ProtoMessage() {} -func (*Configure) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{11} -} - -func (m *Configure) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configure.Unmarshal(m, b) -} -func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configure.Marshal(b, m, deterministic) -} -func (m *Configure) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure.Merge(m, src) -} -func (m *Configure) XXX_Size() int { - return xxx_messageInfo_Configure.Size(m) -} -func (m *Configure) XXX_DiscardUnknown() { - xxx_messageInfo_Configure.DiscardUnknown(m) -} - -var xxx_messageInfo_Configure proto.InternalMessageInfo - -type Configure_Request struct { - TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Configure_Request) Reset() { *m = Configure_Request{} } -func (m *Configure_Request) String() string { return proto.CompactTextString(m) } -func (*Configure_Request) ProtoMessage() {} -func (*Configure_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{11, 0} -} - -func (m *Configure_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configure_Request.Unmarshal(m, b) -} -func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) -} -func (m *Configure_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure_Request.Merge(m, src) -} -func (m *Configure_Request) XXX_Size() int { - return xxx_messageInfo_Configure_Request.Size(m) -} -func (m *Configure_Request) XXX_DiscardUnknown() { - xxx_messageInfo_Configure_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Configure_Request proto.InternalMessageInfo - -func (m *Configure_Request) GetTerraformVersion() string { - if m != nil { - return m.TerraformVersion - } - return "" -} - -func (m *Configure_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type Configure_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Configure_Response) Reset() { *m = Configure_Response{} } -func (m *Configure_Response) String() string { return proto.CompactTextString(m) } -func (*Configure_Response) ProtoMessage() {} -func (*Configure_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{11, 1} -} - -func (m *Configure_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configure_Response.Unmarshal(m, b) -} -func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) -} -func (m *Configure_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure_Response.Merge(m, src) -} -func (m *Configure_Response) XXX_Size() int { - return xxx_messageInfo_Configure_Response.Size(m) -} -func (m *Configure_Response) XXX_DiscardUnknown() { - xxx_messageInfo_Configure_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Configure_Response proto.InternalMessageInfo - -func (m *Configure_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ReadResource struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResource) Reset() { *m = ReadResource{} } -func (m *ReadResource) String() string { return proto.CompactTextString(m) } -func (*ReadResource) ProtoMessage() {} -func (*ReadResource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{12} -} - -func (m *ReadResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadResource.Unmarshal(m, b) -} -func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) -} -func (m *ReadResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource.Merge(m, src) -} -func (m *ReadResource) XXX_Size() int { - return xxx_messageInfo_ReadResource.Size(m) -} -func (m *ReadResource) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResource proto.InternalMessageInfo - -type ReadResource_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } -func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } -func (*ReadResource_Request) ProtoMessage() {} -func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{12, 0} -} - -func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) -} -func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) -} -func (m *ReadResource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource_Request.Merge(m, src) -} -func (m *ReadResource_Request) XXX_Size() int { - return xxx_messageInfo_ReadResource_Request.Size(m) -} -func (m *ReadResource_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo - -func (m *ReadResource_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ReadResource_Request) GetCurrentState() *DynamicValue { - if m != nil { - return m.CurrentState - } - return nil -} - -func (m *ReadResource_Request) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -func (m *ReadResource_Request) GetProviderMeta() *DynamicValue { - if m != nil { - return m.ProviderMeta - } - return nil -} - -type ReadResource_Response struct { - NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } -func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } -func (*ReadResource_Response) ProtoMessage() {} -func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{12, 1} -} - -func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) -} -func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) -} -func (m *ReadResource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource_Response.Merge(m, src) -} -func (m *ReadResource_Response) XXX_Size() int { - return xxx_messageInfo_ReadResource_Response.Size(m) -} -func (m *ReadResource_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo - -func (m *ReadResource_Response) GetNewState() *DynamicValue { - if m != nil { - return m.NewState - } - return nil -} - -func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *ReadResource_Response) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -type PlanResourceChange struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } -func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } -func (*PlanResourceChange) ProtoMessage() {} -func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{13} -} - -func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) -} -func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) -} -func (m *PlanResourceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange.Merge(m, src) -} -func (m *PlanResourceChange) XXX_Size() int { - return xxx_messageInfo_PlanResourceChange.Size(m) -} -func (m *PlanResourceChange) XXX_DiscardUnknown() { - xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) -} - -var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo - -type PlanResourceChange_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` - ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` - Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } -func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } -func (*PlanResourceChange_Request) ProtoMessage() {} -func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{13, 0} -} - -func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) -} -func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) -} -func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) -} -func (m *PlanResourceChange_Request) XXX_Size() int { - return xxx_messageInfo_PlanResourceChange_Request.Size(m) -} -func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { - xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo - -func (m *PlanResourceChange_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { - if m != nil { - return m.PriorState - } - return nil -} - -func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { - if m != nil { - return m.ProposedNewState - } - return nil -} - -func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { - if m != nil { - return m.PriorPrivate - } - return nil -} - -func (m *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { - if m != nil { - return m.ProviderMeta - } - return nil -} - -type PlanResourceChange_Response struct { - PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` - RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` - PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - // This may be set only by the helper/schema "SDK" in the main Terraform - // repository, to request that Terraform Core >=0.12 permit additional - // inconsistencies that can result from the legacy SDK type system - // and its imprecise mapping to the >=0.12 type system. - // The change in behavior implied by this flag makes sense only for the - // specific details of the legacy SDK type system, and are not a general - // mechanism to avoid proper type handling in providers. - // - // ==== DO NOT USE THIS ==== - // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== - // ==== DO NOT USE THIS ==== - LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } -func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } -func (*PlanResourceChange_Response) ProtoMessage() {} -func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{13, 1} -} - -func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) -} -func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) -} -func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) -} -func (m *PlanResourceChange_Response) XXX_Size() int { - return xxx_messageInfo_PlanResourceChange_Response.Size(m) -} -func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { - xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo - -func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { - if m != nil { - return m.PlannedState - } - return nil -} - -func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { - if m != nil { - return m.RequiresReplace - } - return nil -} - -func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { - if m != nil { - return m.PlannedPrivate - } - return nil -} - -func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { - if m != nil { - return m.LegacyTypeSystem - } - return false -} - -type ApplyResourceChange struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } -func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } -func (*ApplyResourceChange) ProtoMessage() {} -func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{14} -} - -func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) -} -func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) -} -func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange.Merge(m, src) -} -func (m *ApplyResourceChange) XXX_Size() int { - return xxx_messageInfo_ApplyResourceChange.Size(m) -} -func (m *ApplyResourceChange) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo - -type ApplyResourceChange_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` - PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` - Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } -func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } -func (*ApplyResourceChange_Request) ProtoMessage() {} -func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{14, 0} -} - -func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) -} -func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) -} -func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) -} -func (m *ApplyResourceChange_Request) XXX_Size() int { - return xxx_messageInfo_ApplyResourceChange_Request.Size(m) -} -func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo - -func (m *ApplyResourceChange_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { - if m != nil { - return m.PriorState - } - return nil -} - -func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { - if m != nil { - return m.PlannedState - } - return nil -} - -func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { - if m != nil { - return m.PlannedPrivate - } - return nil -} - -func (m *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { - if m != nil { - return m.ProviderMeta - } - return nil -} - -type ApplyResourceChange_Response struct { - NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` - Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - // This may be set only by the helper/schema "SDK" in the main Terraform - // repository, to request that Terraform Core >=0.12 permit additional - // inconsistencies that can result from the legacy SDK type system - // and its imprecise mapping to the >=0.12 type system. - // The change in behavior implied by this flag makes sense only for the - // specific details of the legacy SDK type system, and are not a general - // mechanism to avoid proper type handling in providers. - // - // ==== DO NOT USE THIS ==== - // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== - // ==== DO NOT USE THIS ==== - LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } -func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } -func (*ApplyResourceChange_Response) ProtoMessage() {} -func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{14, 1} -} - -func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) -} -func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) -} -func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) -} -func (m *ApplyResourceChange_Response) XXX_Size() int { - return xxx_messageInfo_ApplyResourceChange_Response.Size(m) -} -func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo - -func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { - if m != nil { - return m.NewState - } - return nil -} - -func (m *ApplyResourceChange_Response) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { - if m != nil { - return m.LegacyTypeSystem - } - return false -} - -type ImportResourceState struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } -func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState) ProtoMessage() {} -func (*ImportResourceState) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15} -} - -func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) -} -func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) -} -func (m *ImportResourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState.Merge(m, src) -} -func (m *ImportResourceState) XXX_Size() int { - return xxx_messageInfo_ImportResourceState.Size(m) -} -func (m *ImportResourceState) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo - -type ImportResourceState_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } -func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState_Request) ProtoMessage() {} -func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15, 0} -} - -func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) -} -func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) -} -func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_Request.Merge(m, src) -} -func (m *ImportResourceState_Request) XXX_Size() int { - return xxx_messageInfo_ImportResourceState_Request.Size(m) -} -func (m *ImportResourceState_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo - -func (m *ImportResourceState_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ImportResourceState_Request) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type ImportResourceState_ImportedResource struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } -func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState_ImportedResource) ProtoMessage() {} -func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15, 1} -} - -func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) -} -func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) -} -func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) -} -func (m *ImportResourceState_ImportedResource) XXX_Size() int { - return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) -} -func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo - -func (m *ImportResourceState_ImportedResource) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { - if m != nil { - return m.State - } - return nil -} - -func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -type ImportResourceState_Response struct { - ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } -func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState_Response) ProtoMessage() {} -func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15, 2} -} - -func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) -} -func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) -} -func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_Response.Merge(m, src) -} -func (m *ImportResourceState_Response) XXX_Size() int { - return xxx_messageInfo_ImportResourceState_Response.Size(m) -} -func (m *ImportResourceState_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo - -func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { - if m != nil { - return m.ImportedResources - } - return nil -} - -func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ReadDataSource struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } -func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } -func (*ReadDataSource) ProtoMessage() {} -func (*ReadDataSource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{16} -} - -func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) -} -func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) -} -func (m *ReadDataSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource.Merge(m, src) -} -func (m *ReadDataSource) XXX_Size() int { - return xxx_messageInfo_ReadDataSource.Size(m) -} -func (m *ReadDataSource) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDataSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo - -type ReadDataSource_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } -func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } -func (*ReadDataSource_Request) ProtoMessage() {} -func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{16, 0} -} - -func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) -} -func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) -} -func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource_Request.Merge(m, src) -} -func (m *ReadDataSource_Request) XXX_Size() int { - return xxx_messageInfo_ReadDataSource_Request.Size(m) -} -func (m *ReadDataSource_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo - -func (m *ReadDataSource_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ReadDataSource_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *ReadDataSource_Request) GetProviderMeta() *DynamicValue { - if m != nil { - return m.ProviderMeta - } - return nil -} - -type ReadDataSource_Response struct { - State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } -func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } -func (*ReadDataSource_Response) ProtoMessage() {} -func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{16, 1} -} - -func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) -} -func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) -} -func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource_Response.Merge(m, src) -} -func (m *ReadDataSource_Response) XXX_Size() int { - return xxx_messageInfo_ReadDataSource_Response.Size(m) -} -func (m *ReadDataSource_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo - -func (m *ReadDataSource_Response) GetState() *DynamicValue { - if m != nil { - return m.State - } - return nil -} - -func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type GetProvisionerSchema struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } -func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } -func (*GetProvisionerSchema) ProtoMessage() {} -func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{17} -} - -func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) -} -func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) -} -func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema.Merge(m, src) -} -func (m *GetProvisionerSchema) XXX_Size() int { - return xxx_messageInfo_GetProvisionerSchema.Size(m) -} -func (m *GetProvisionerSchema) XXX_DiscardUnknown() { - xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo - -type GetProvisionerSchema_Request struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } -func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } -func (*GetProvisionerSchema_Request) ProtoMessage() {} -func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{17, 0} -} - -func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) -} -func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) -} -func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) -} -func (m *GetProvisionerSchema_Request) XXX_Size() int { - return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) -} -func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { - xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo - -type GetProvisionerSchema_Response struct { - Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } -func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } -func (*GetProvisionerSchema_Response) ProtoMessage() {} -func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{17, 1} -} - -func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) -} -func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) -} -func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) -} -func (m *GetProvisionerSchema_Response) XXX_Size() int { - return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) -} -func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { - xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo - -func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { - if m != nil { - return m.Provisioner - } - return nil -} - -func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ValidateProvisionerConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } -func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } -func (*ValidateProvisionerConfig) ProtoMessage() {} -func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{18} -} - -func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) -} -func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) -} -func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) -} -func (m *ValidateProvisionerConfig) XXX_Size() int { - return xxx_messageInfo_ValidateProvisionerConfig.Size(m) -} -func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo - -type ValidateProvisionerConfig_Request struct { - Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } -func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } -func (*ValidateProvisionerConfig_Request) ProtoMessage() {} -func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{18, 0} -} - -func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) -} -func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) -} -func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) -} -func (m *ValidateProvisionerConfig_Request) XXX_Size() int { - return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) -} -func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo - -func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ValidateProvisionerConfig_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } -func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } -func (*ValidateProvisionerConfig_Response) ProtoMessage() {} -func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{18, 1} -} - -func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) -} -func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) -} -func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) -} -func (m *ValidateProvisionerConfig_Response) XXX_Size() int { - return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) -} -func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo - -func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ProvisionResource struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } -func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } -func (*ProvisionResource) ProtoMessage() {} -func (*ProvisionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{19} -} - -func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) -} -func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) -} -func (m *ProvisionResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource.Merge(m, src) -} -func (m *ProvisionResource) XXX_Size() int { - return xxx_messageInfo_ProvisionResource.Size(m) -} -func (m *ProvisionResource) XXX_DiscardUnknown() { - xxx_messageInfo_ProvisionResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo - -type ProvisionResource_Request struct { - Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } -func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } -func (*ProvisionResource_Request) ProtoMessage() {} -func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{19, 0} -} - -func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) -} -func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) -} -func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource_Request.Merge(m, src) -} -func (m *ProvisionResource_Request) XXX_Size() int { - return xxx_messageInfo_ProvisionResource_Request.Size(m) -} -func (m *ProvisionResource_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo - -func (m *ProvisionResource_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *ProvisionResource_Request) GetConnection() *DynamicValue { - if m != nil { - return m.Connection - } - return nil -} - -type ProvisionResource_Response struct { - Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } -func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } -func (*ProvisionResource_Response) ProtoMessage() {} -func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{19, 1} -} - -func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) -} -func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) -} -func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource_Response.Merge(m, src) -} -func (m *ProvisionResource_Response) XXX_Size() int { - return xxx_messageInfo_ProvisionResource_Response.Size(m) -} -func (m *ProvisionResource_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo - -func (m *ProvisionResource_Response) GetOutput() string { - if m != nil { - return m.Output - } - return "" -} - -func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func init() { - proto.RegisterEnum("tfplugin5.StringKind", StringKind_name, StringKind_value) - proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) - proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) - proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") - proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") - proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") - proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") - proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") - proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") - proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") - proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") - proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") - proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") - proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") - proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") - proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") - proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") - proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") - proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") - proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") - proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") - proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") - proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") - proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") - proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") - proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") - proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") - proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") - proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") - proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") - proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") - proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") - proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") - proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") - proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") - proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") - proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") - proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") - proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") - proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") - proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") - proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") - proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") - proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") - proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") - proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") - proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") - proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") - proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") - proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") - proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") - proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") - proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") - proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") - proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") - proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") - proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") - proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") - proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") - proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") - proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") -} - -func init() { - proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) -} - -var fileDescriptor_17ae6090ff270234 = []byte{ - // 2010 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49, - 0x15, 0x9f, 0x6e, 0xdb, 0x89, 0xfd, 0xec, 0x49, 0x3a, 0x35, 0x1f, 0x98, 0xde, 0x0f, 0x82, 0x61, - 0x49, 0x96, 0xdd, 0xf1, 0xac, 0x32, 0x30, 0xbb, 0x84, 0xd1, 0x6a, 0xb3, 0x49, 0xc8, 0x44, 0x33, - 0xf1, 0x84, 0xf2, 0xcc, 0x04, 0x09, 0x69, 0xad, 0x1a, 0x77, 0xc5, 0xd3, 0xc4, 0xee, 0xee, 0xad, - 0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0x46, 0xa0, 0x85, 0x03, 0x70, 0x81, 0x03, 0xe2, 0xc4, 0x0d, - 0xf1, 0x75, 0xe1, 0xce, 0x81, 0x3b, 0xdc, 0x56, 0x1c, 0xb9, 0xf0, 0x17, 0xa0, 0xaa, 0xae, 0xee, - 0x2e, 0xdb, 0xed, 0xa4, 0x93, 0xec, 0x0a, 0xed, 0xad, 0xeb, 0xbd, 0x5f, 0xbd, 0xf7, 0xea, 0xbd, - 0x5f, 0xbd, 0xaa, 0xb2, 0x61, 0x91, 0x1f, 0x06, 0xfd, 0x61, 0xcf, 0xf5, 0xbe, 0xde, 0x0c, 0x98, - 0xcf, 0x7d, 0x54, 0x49, 0x04, 0x8d, 0x7b, 0x50, 0xdb, 0x1a, 0x79, 0x64, 0xe0, 0x76, 0x9f, 0x92, - 0xfe, 0x90, 0xa2, 0x3a, 0xcc, 0x0f, 0xc2, 0x5e, 0x40, 0xba, 0x47, 0x75, 0x63, 0xd9, 0x58, 0xad, - 0xe1, 0x78, 0x88, 0x10, 0x14, 0xbf, 0x17, 0xfa, 0x5e, 0xdd, 0x94, 0x62, 0xf9, 0xdd, 0xf8, 0xd8, - 0x00, 0xd8, 0x72, 0x49, 0xcf, 0xf3, 0x43, 0xee, 0x76, 0xd1, 0x3a, 0x94, 0x43, 0x7a, 0x4c, 0x99, - 0xcb, 0x47, 0x72, 0xf6, 0xc2, 0xda, 0xab, 0xcd, 0xd4, 0x77, 0x0a, 0x6c, 0xb6, 0x15, 0x0a, 0x27, - 0x78, 0xe1, 0x38, 0x1c, 0x0e, 0x06, 0x84, 0x8d, 0xa4, 0x87, 0x0a, 0x8e, 0x87, 0xe8, 0x26, 0xcc, - 0x39, 0x94, 0x13, 0xb7, 0x5f, 0x2f, 0x48, 0x85, 0x1a, 0xa1, 0xbb, 0x50, 0x21, 0x9c, 0x33, 0xf7, - 0xd9, 0x90, 0xd3, 0x7a, 0x71, 0xd9, 0x58, 0xad, 0xae, 0xd5, 0x35, 0x77, 0x1b, 0xb1, 0x6e, 0x9f, - 0xf0, 0xe7, 0x38, 0x85, 0x36, 0x6e, 0x43, 0x39, 0xf6, 0x8f, 0xaa, 0x30, 0xbf, 0xdb, 0x7a, 0xba, - 0xf1, 0x70, 0x77, 0xcb, 0xba, 0x82, 0x2a, 0x50, 0xda, 0xc6, 0xf8, 0x11, 0xb6, 0x0c, 0x21, 0x3f, - 0xd8, 0xc0, 0xad, 0xdd, 0xd6, 0x8e, 0x65, 0x36, 0xfe, 0x65, 0xc0, 0xd5, 0x31, 0x6b, 0xe8, 0x0e, - 0x94, 0x42, 0x4e, 0x83, 0xb0, 0x6e, 0x2c, 0x17, 0x56, 0xab, 0x6b, 0xaf, 0xcc, 0x72, 0xdb, 0x6c, - 0x73, 0x1a, 0xe0, 0x08, 0x6b, 0x7f, 0x64, 0x40, 0x51, 0x8c, 0xd1, 0x0a, 0x2c, 0x24, 0xd1, 0x74, - 0x3c, 0x32, 0xa0, 0x32, 0x59, 0x95, 0xfb, 0x57, 0xf0, 0xd5, 0x44, 0xde, 0x22, 0x03, 0x8a, 0x9a, - 0x80, 0x68, 0x9f, 0x0e, 0xa8, 0xc7, 0x3b, 0x47, 0x74, 0xd4, 0x09, 0x39, 0x73, 0xbd, 0x5e, 0x94, - 0x9e, 0xfb, 0x57, 0xb0, 0xa5, 0x74, 0x0f, 0xe8, 0xa8, 0x2d, 0x35, 0x68, 0x15, 0x16, 0x75, 0xbc, - 0xeb, 0x71, 0x99, 0xb2, 0x82, 0xb0, 0x9c, 0x82, 0x77, 0x3d, 0xfe, 0x3e, 0x88, 0x4a, 0xf5, 0x69, - 0x97, 0xfb, 0xac, 0x71, 0x47, 0x84, 0xe5, 0x07, 0x76, 0x05, 0xe6, 0x31, 0xfd, 0x70, 0x48, 0x43, - 0x6e, 0x2f, 0x43, 0x19, 0xd3, 0x30, 0xf0, 0xbd, 0x90, 0xa2, 0xeb, 0x50, 0xda, 0x66, 0xcc, 0x67, - 0x51, 0x90, 0x38, 0x1a, 0x34, 0x7e, 0x66, 0x40, 0x19, 0x93, 0x17, 0x6d, 0x4e, 0x38, 0x4d, 0xa8, - 0x61, 0xa4, 0xd4, 0x40, 0xeb, 0x30, 0x7f, 0xd8, 0x27, 0x7c, 0x40, 0x82, 0xba, 0x29, 0x93, 0xb4, - 0xac, 0x25, 0x29, 0x9e, 0xd9, 0xfc, 0x56, 0x04, 0xd9, 0xf6, 0x38, 0x1b, 0xe1, 0x78, 0x82, 0xbd, - 0x0e, 0x35, 0x5d, 0x81, 0x2c, 0x28, 0x1c, 0xd1, 0x91, 0x0a, 0x40, 0x7c, 0x8a, 0xa0, 0x8e, 0x05, - 0x5f, 0x15, 0x57, 0xa2, 0xc1, 0xba, 0xf9, 0x8e, 0xd1, 0xf8, 0xfb, 0x3c, 0xcc, 0xb5, 0xbb, 0xcf, - 0xe9, 0x80, 0x08, 0x4a, 0x1d, 0x53, 0x16, 0xba, 0x2a, 0xb2, 0x02, 0x8e, 0x87, 0xe8, 0x16, 0x94, - 0x9e, 0xf5, 0xfd, 0xee, 0x91, 0x9c, 0x5e, 0x5d, 0xfb, 0x9c, 0x16, 0x5a, 0x34, 0xb7, 0xf9, 0xbe, - 0x50, 0xe3, 0x08, 0x65, 0xff, 0xda, 0x84, 0x92, 0x14, 0x9c, 0x62, 0xf2, 0x9b, 0x00, 0x49, 0xf1, - 0x42, 0xb5, 0xe4, 0x97, 0xa6, 0xed, 0x26, 0xf4, 0xc0, 0x1a, 0x1c, 0xbd, 0x0b, 0x55, 0xe9, 0xa9, - 0xc3, 0x47, 0x01, 0x0d, 0xeb, 0x85, 0x29, 0x56, 0xa9, 0xd9, 0x2d, 0x1a, 0x72, 0xea, 0x44, 0xb1, - 0x81, 0x9c, 0xf1, 0x58, 0x4c, 0x40, 0xcb, 0x50, 0x75, 0x68, 0xd8, 0x65, 0x6e, 0xc0, 0x45, 0x68, - 0x45, 0x99, 0x14, 0x5d, 0x84, 0xde, 0x03, 0x4b, 0x1b, 0x76, 0x8e, 0x5c, 0xcf, 0xa9, 0x97, 0xe4, - 0x16, 0xbd, 0xa1, 0xbb, 0x91, 0x3c, 0x7a, 0xe0, 0x7a, 0x0e, 0x5e, 0xd4, 0xe0, 0x42, 0x80, 0x5e, - 0x05, 0x70, 0x68, 0xc0, 0x68, 0x97, 0x70, 0xea, 0xd4, 0xe7, 0x96, 0x8d, 0xd5, 0x32, 0xd6, 0x24, - 0xf6, 0xef, 0x4c, 0xa8, 0x24, 0xab, 0x13, 0x94, 0x48, 0x99, 0x8d, 0xe5, 0xb7, 0x90, 0x89, 0xf5, - 0xc5, 0x1d, 0x44, 0x7c, 0x4f, 0x46, 0x5e, 0x98, 0x8e, 0xdc, 0x86, 0x32, 0xa3, 0x1f, 0x0e, 0x5d, - 0x46, 0x1d, 0xb9, 0xb0, 0x32, 0x4e, 0xc6, 0x42, 0xe7, 0x4b, 0x14, 0xe9, 0xcb, 0xd5, 0x94, 0x71, - 0x32, 0x16, 0xba, 0xae, 0x3f, 0x08, 0x86, 0x69, 0xb4, 0xc9, 0x18, 0xbd, 0x0c, 0x95, 0x90, 0x7a, - 0xa1, 0xcb, 0xdd, 0x63, 0x5a, 0x9f, 0x97, 0xca, 0x54, 0x90, 0x99, 0xab, 0xf2, 0x25, 0x72, 0x55, - 0x99, 0xca, 0xd5, 0x6f, 0x4d, 0xa8, 0x6a, 0xb5, 0x44, 0x2f, 0x41, 0x45, 0x64, 0x43, 0x6b, 0x06, - 0xb8, 0x2c, 0x04, 0xb2, 0x0b, 0x9c, 0x8f, 0xac, 0x68, 0x13, 0xe6, 0x3d, 0x1a, 0x72, 0xd1, 0x29, - 0x0a, 0x32, 0xe8, 0xd7, 0x4f, 0xe5, 0x91, 0xfc, 0x76, 0xbd, 0xde, 0x9e, 0xef, 0x50, 0x1c, 0xcf, - 0x14, 0x01, 0x0d, 0x5c, 0xaf, 0xe3, 0x72, 0x3a, 0x08, 0x65, 0xd6, 0x0b, 0xb8, 0x3c, 0x70, 0xbd, - 0x5d, 0x31, 0x96, 0x4a, 0x72, 0xa2, 0x94, 0x25, 0xa5, 0x24, 0x27, 0x52, 0xd9, 0xd8, 0x8b, 0x56, - 0xa6, 0x2c, 0x8e, 0x37, 0x58, 0x80, 0xb9, 0xf6, 0x6e, 0x6b, 0xe7, 0xe1, 0xb6, 0x65, 0xa0, 0x32, - 0x14, 0x1f, 0xee, 0xb6, 0x1f, 0x5b, 0x26, 0x9a, 0x87, 0x42, 0x7b, 0xfb, 0xb1, 0x55, 0x10, 0x1f, - 0x7b, 0x1b, 0xfb, 0x56, 0x51, 0x34, 0xe2, 0x1d, 0xfc, 0xe8, 0xc9, 0xbe, 0x55, 0x6a, 0xfc, 0xa3, - 0x08, 0x4b, 0x3b, 0x94, 0xef, 0x33, 0xff, 0xd8, 0x75, 0x28, 0x8b, 0xe2, 0xd7, 0x5b, 0xd5, 0xef, - 0x8b, 0x5a, 0xaf, 0xba, 0x05, 0xe5, 0x40, 0x21, 0x65, 0x1a, 0xab, 0x6b, 0x4b, 0x53, 0x8b, 0xc7, - 0x09, 0x04, 0x51, 0xb0, 0x18, 0x0d, 0xfd, 0x21, 0xeb, 0xd2, 0x4e, 0x28, 0x95, 0xf1, 0xce, 0x5d, - 0xd7, 0xa6, 0x4d, 0xb9, 0x6f, 0xc6, 0xfe, 0xc4, 0x87, 0x9c, 0x1d, 0xc9, 0xc3, 0xa8, 0x8d, 0x2d, - 0xb2, 0x71, 0x29, 0xea, 0xc3, 0x35, 0x87, 0x70, 0xd2, 0x99, 0xf0, 0x14, 0xed, 0xf2, 0x7b, 0xf9, - 0x3c, 0x6d, 0x11, 0x4e, 0xda, 0xd3, 0xbe, 0x96, 0x9c, 0x49, 0x39, 0x7a, 0x1b, 0xaa, 0x4e, 0x72, - 0xd2, 0x8a, 0xe2, 0x09, 0x2f, 0x37, 0x32, 0xcf, 0x61, 0xac, 0x23, 0xd1, 0x5d, 0xb8, 0x1a, 0x67, - 0xa6, 0x33, 0xa0, 0x9c, 0xc8, 0xd2, 0x66, 0x66, 0xb0, 0x16, 0xe3, 0xf6, 0x28, 0x27, 0xf6, 0x13, - 0xb8, 0x9e, 0x95, 0x87, 0x8c, 0xae, 0xbd, 0xa2, 0x77, 0xed, 0x4c, 0xcb, 0x69, 0x23, 0xb7, 0x0f, - 0xe0, 0x66, 0xf6, 0xa2, 0x2f, 0x69, 0xb8, 0xf1, 0x4f, 0x03, 0x6e, 0xec, 0x33, 0x1a, 0x10, 0x46, - 0xe3, 0x6c, 0x6f, 0xfa, 0xde, 0xa1, 0xdb, 0xb3, 0xd7, 0x13, 0x5a, 0xa1, 0xdb, 0x30, 0xd7, 0x95, - 0x42, 0xc5, 0x23, 0x7d, 0xd7, 0xe9, 0x17, 0x26, 0xac, 0x60, 0xf6, 0x0f, 0x0d, 0x8d, 0x87, 0xef, - 0xc1, 0x62, 0x10, 0x79, 0x70, 0x3a, 0xf9, 0xcc, 0x2c, 0xc4, 0xf8, 0x28, 0x94, 0xc9, 0x2a, 0x9a, - 0x79, 0xab, 0xd8, 0xf8, 0xb1, 0x09, 0xd7, 0x9f, 0x04, 0x3d, 0x46, 0x1c, 0x9a, 0x54, 0x45, 0x1c, - 0xb5, 0x36, 0x4b, 0x17, 0x77, 0x6a, 0xbb, 0xd1, 0x8e, 0x38, 0x73, 0xfc, 0x88, 0x7b, 0x0b, 0x2a, - 0x8c, 0xbc, 0xe8, 0x84, 0xc2, 0x9c, 0xec, 0x2d, 0xd5, 0xb5, 0x6b, 0x19, 0x87, 0x3a, 0x2e, 0x33, - 0xf5, 0x65, 0xff, 0x40, 0x4f, 0xca, 0xbb, 0xb0, 0x30, 0x8c, 0x02, 0x73, 0x94, 0x8d, 0x33, 0x72, - 0x72, 0x35, 0x86, 0x47, 0xb7, 0x8c, 0x0b, 0xa7, 0xe4, 0xcf, 0x06, 0xd8, 0x4f, 0x49, 0xdf, 0x75, - 0x44, 0x70, 0x2a, 0x27, 0xe2, 0xdc, 0x54, 0x55, 0x3f, 0xc8, 0x99, 0x98, 0x94, 0x12, 0x66, 0x3e, - 0x4a, 0x6c, 0x6a, 0x8b, 0x9f, 0x08, 0xde, 0xc8, 0x1d, 0xfc, 0x1f, 0x0d, 0xa8, 0xc7, 0xc1, 0xa7, - 0xfb, 0xe1, 0x33, 0x11, 0xfa, 0x9f, 0x0c, 0xa8, 0x44, 0x81, 0x0e, 0x19, 0xb5, 0x7b, 0x69, 0xac, - 0x6f, 0xc0, 0x12, 0xa7, 0x8c, 0x91, 0x43, 0x9f, 0x0d, 0x3a, 0xfa, 0x7d, 0xaa, 0x82, 0xad, 0x44, - 0xf1, 0x54, 0xb1, 0xee, 0xff, 0x13, 0xfb, 0xc7, 0x26, 0xd4, 0x30, 0x25, 0x4e, 0xcc, 0x17, 0xfb, - 0xaf, 0x46, 0xce, 0x5c, 0xdf, 0x83, 0xab, 0xdd, 0x21, 0x63, 0xe2, 0x12, 0x1e, 0xb1, 0xfc, 0x8c, - 0xb0, 0x6b, 0x0a, 0x1d, 0x91, 0xbc, 0x0e, 0xf3, 0x01, 0x73, 0x8f, 0xe3, 0x1d, 0x56, 0xc3, 0xf1, - 0x50, 0xd8, 0x1d, 0x6f, 0xcf, 0xc5, 0x33, 0xec, 0x8e, 0x35, 0xe9, 0x9f, 0xea, 0x3b, 0xf1, 0x6b, - 0x50, 0xf1, 0xe8, 0x8b, 0x7c, 0x9b, 0xb0, 0xec, 0xd1, 0x17, 0x97, 0xdb, 0x7f, 0xb3, 0xd7, 0xd4, - 0xf8, 0x6f, 0x11, 0xd0, 0x7e, 0x9f, 0x78, 0x71, 0x96, 0x37, 0x9f, 0x13, 0xaf, 0x47, 0xed, 0xbf, - 0x98, 0x39, 0x73, 0xfd, 0x0e, 0x54, 0x03, 0xe6, 0xfa, 0x2c, 0x5f, 0xa6, 0x41, 0x62, 0xa3, 0xc5, - 0x6c, 0x03, 0x0a, 0x98, 0x1f, 0xf8, 0x21, 0x75, 0x3a, 0x69, 0x2e, 0x0a, 0xa7, 0x1b, 0xb0, 0xe2, - 0x29, 0xad, 0x38, 0x27, 0x29, 0x39, 0x8b, 0xb9, 0xc8, 0x89, 0xbe, 0x24, 0xaa, 0x28, 0x22, 0x8e, - 0x33, 0x52, 0x92, 0x19, 0xa9, 0x49, 0xe1, 0xfe, 0xac, 0x52, 0xcf, 0x9d, 0xa7, 0xd4, 0xbf, 0x32, - 0xb5, 0x52, 0x0b, 0x53, 0x7d, 0xe2, 0x79, 0x79, 0x7b, 0x6e, 0x4d, 0xa1, 0xa3, 0xe5, 0x6d, 0x8a, - 0x0b, 0x92, 0xbc, 0x6b, 0x87, 0x1d, 0x46, 0x83, 0x3e, 0xe9, 0x52, 0x55, 0xf7, 0xd9, 0x2f, 0xed, - 0xc5, 0x78, 0x06, 0x8e, 0x26, 0xa0, 0x15, 0x58, 0x8c, 0x43, 0x18, 0xa7, 0xc1, 0x82, 0x12, 0xc7, - 0xcb, 0xbe, 0xf0, 0xcd, 0xe5, 0x4d, 0x40, 0x7d, 0xda, 0x23, 0xdd, 0x91, 0x7c, 0x3f, 0x75, 0xc2, - 0x51, 0xc8, 0xe9, 0x40, 0x3d, 0x08, 0xac, 0x48, 0x23, 0xfa, 0x7d, 0x5b, 0xca, 0x1b, 0x3f, 0x29, - 0xc2, 0xb5, 0x8d, 0x20, 0xe8, 0x8f, 0x26, 0x58, 0xf7, 0x87, 0x4f, 0x9f, 0x75, 0x53, 0xd5, 0x28, - 0x9c, 0xa7, 0x1a, 0xe7, 0x26, 0x5b, 0x46, 0xe6, 0x4b, 0x99, 0x99, 0xbf, 0x1c, 0xe1, 0xfe, 0x76, - 0xf9, 0xde, 0xa2, 0xb5, 0x08, 0x73, 0xbc, 0xed, 0x4d, 0x90, 0xa2, 0x70, 0x49, 0x52, 0x14, 0x67, - 0x90, 0xe2, 0x3f, 0x26, 0x5c, 0xdb, 0x1d, 0x04, 0x3e, 0xe3, 0xe3, 0xb7, 0xa6, 0xbb, 0x39, 0x39, - 0xb1, 0x00, 0xa6, 0xeb, 0xa8, 0x5f, 0x23, 0x4c, 0xd7, 0xb1, 0x4f, 0xc0, 0x8a, 0xcc, 0xd1, 0xe4, - 0x08, 0x39, 0xf3, 0x95, 0x97, 0x8b, 0x4e, 0x11, 0x6a, 0x76, 0x4f, 0xb5, 0x7f, 0xa3, 0x57, 0xe3, - 0x03, 0x40, 0xae, 0x0a, 0xa3, 0x13, 0x3f, 0x4b, 0xe2, 0x63, 0xf0, 0xb6, 0xe6, 0x22, 0x63, 0xe9, - 0xcd, 0xc9, 0xf8, 0xf1, 0x92, 0x3b, 0x21, 0x09, 0x2f, 0x7e, 0x27, 0xfb, 0xa5, 0x09, 0x0b, 0xe2, - 0x7c, 0x4d, 0xaf, 0x34, 0xf6, 0x47, 0xc6, 0xa7, 0x74, 0x9b, 0x99, 0xa6, 0x77, 0xe1, 0x3c, 0xf4, - 0x66, 0x63, 0x0f, 0xcc, 0x52, 0x2e, 0x66, 0xab, 0x2a, 0x5d, 0x38, 0x3d, 0xbf, 0x30, 0xe0, 0x7a, - 0xfc, 0x1a, 0x14, 0xb7, 0xa0, 0xac, 0x97, 0xef, 0x89, 0x16, 0xd7, 0x1d, 0xd1, 0x92, 0x12, 0xec, - 0xec, 0xb7, 0xaf, 0x8e, 0xba, 0x44, 0xf1, 0x0c, 0xf8, 0x7c, 0x7c, 0x27, 0xd5, 0x42, 0xfc, 0x04, - 0x5e, 0x51, 0x9f, 0xc8, 0xdd, 0xed, 0xdf, 0x06, 0x2c, 0x25, 0x61, 0x25, 0x17, 0xb8, 0xf0, 0xe2, - 0x61, 0xa1, 0xb7, 0x01, 0xba, 0xbe, 0xe7, 0xd1, 0x2e, 0x8f, 0x9f, 0x45, 0xa7, 0x35, 0xfc, 0x14, - 0x6a, 0x7f, 0x57, 0x5b, 0xcf, 0x4d, 0x98, 0xf3, 0x87, 0x3c, 0x18, 0x72, 0x45, 0x68, 0x35, 0xba, - 0x70, 0x19, 0xbe, 0xfa, 0x1a, 0x40, 0xfa, 0x23, 0x14, 0xaa, 0x40, 0x69, 0xff, 0xe1, 0xc6, 0x6e, - 0xcb, 0xba, 0x82, 0x6a, 0x50, 0xde, 0xdb, 0xc0, 0x0f, 0xb6, 0x1e, 0x1d, 0xb4, 0x2c, 0x63, 0xed, - 0xe7, 0x15, 0x28, 0xc7, 0x0f, 0x5d, 0xf4, 0x1d, 0xa8, 0xec, 0x50, 0xae, 0x7e, 0x20, 0xfd, 0xf2, - 0x19, 0xbf, 0x3d, 0x44, 0x3c, 0x7b, 0x2d, 0xd7, 0x2f, 0x14, 0xa8, 0x3f, 0xe3, 0x55, 0x8d, 0x56, - 0xb5, 0xf9, 0x99, 0x88, 0xc4, 0xd3, 0xeb, 0x39, 0x90, 0xca, 0xdb, 0xf7, 0x4f, 0x7b, 0xd2, 0xa1, - 0x5b, 0x9a, 0xa1, 0xd9, 0xb0, 0xc4, 0x6f, 0x33, 0x2f, 0x5c, 0x39, 0x1f, 0xce, 0x7e, 0x92, 0xa1, - 0x37, 0x32, 0x6c, 0x4d, 0x82, 0x12, 0xc7, 0x6f, 0xe6, 0x03, 0x2b, 0xb7, 0x6e, 0xf6, 0xcb, 0x1e, - 0xad, 0x68, 0x56, 0xb2, 0x00, 0x89, 0xbb, 0xd5, 0xb3, 0x81, 0xca, 0xd5, 0x7d, 0xed, 0xe5, 0x86, - 0x5e, 0xd6, 0xa6, 0x25, 0xd2, 0xc4, 0xe8, 0x2b, 0x33, 0xb4, 0xca, 0xd2, 0xb7, 0xc7, 0xdf, 0x51, - 0xe8, 0x0b, 0xfa, 0x2f, 0x06, 0x9a, 0x22, 0xb1, 0xb7, 0x3c, 0x1b, 0xa0, 0x4c, 0x76, 0xb3, 0x1e, - 0x0d, 0x48, 0xa7, 0xe9, 0xb4, 0x3a, 0x31, 0xff, 0x95, 0xb3, 0x60, 0xca, 0xc9, 0x61, 0xe6, 0x25, - 0x11, 0xe9, 0xd3, 0x33, 0xf4, 0x89, 0x9b, 0x95, 0x33, 0x71, 0xa9, 0x9f, 0x8c, 0xc3, 0x77, 0xcc, - 0x4f, 0xd6, 0xe1, 0x9c, 0xe5, 0x27, 0x1b, 0xa7, 0xfc, 0x1c, 0x4c, 0x9e, 0xb7, 0xe8, 0x8b, 0x13, - 0x89, 0x4e, 0x55, 0x89, 0xf5, 0xc6, 0x69, 0x10, 0x65, 0xf8, 0x1b, 0xd1, 0xdf, 0x47, 0x68, 0xec, - 0x77, 0x69, 0xee, 0x07, 0x89, 0x91, 0xfa, 0xb4, 0x22, 0x9a, 0xba, 0xf6, 0xa3, 0x02, 0x54, 0xb5, - 0xf3, 0x03, 0x7d, 0xa0, 0x37, 0xa7, 0x95, 0x8c, 0xb6, 0xa3, 0x1f, 0x85, 0x99, 0xac, 0x9e, 0x01, - 0x54, 0xa1, 0x9e, 0x9c, 0x72, 0x6c, 0xa1, 0xac, 0xbd, 0x38, 0x85, 0x4a, 0x9c, 0xde, 0xca, 0x89, - 0x56, 0x9e, 0x9f, 0x65, 0x9c, 0x48, 0x63, 0xed, 0x77, 0x4a, 0x9b, 0xd9, 0x7e, 0xb3, 0x50, 0x91, - 0x87, 0xb7, 0x8c, 0x4b, 0x14, 0xe2, 0xd9, 0x9c, 0xfc, 0x5f, 0xf8, 0xce, 0xff, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x8e, 0xe1, 0x22, 0x2a, 0x1e, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ProviderClient is the client API for Provider service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ProviderClient interface { - //////// Information about what a provider supports/expects - GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) - PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) - ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) - ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) - UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) - //////// One-time initialization, called before other functions below - Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) - //////// Managed Resource Lifecycle - ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) - PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) - ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) - ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) - ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) - //////// Graceful Shutdown - Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) -} - -type providerClient struct { - cc grpc.ClientConnInterface -} - -func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { - return &providerClient{cc} -} - -func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { - out := new(GetProviderSchema_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { - out := new(PrepareProviderConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { - out := new(ValidateResourceTypeConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { - out := new(ValidateDataSourceConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { - out := new(UpgradeResourceState_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { - out := new(Configure_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { - out := new(ReadResource_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { - out := new(PlanResourceChange_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { - out := new(ApplyResourceChange_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { - out := new(ImportResourceState_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { - out := new(ReadDataSource_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { - out := new(Stop_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ProviderServer is the server API for Provider service. -type ProviderServer interface { - //////// Information about what a provider supports/expects - GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) - PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) - ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) - ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) - UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) - //////// One-time initialization, called before other functions below - Configure(context.Context, *Configure_Request) (*Configure_Response, error) - //////// Managed Resource Lifecycle - ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) - PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) - ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) - ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) - ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) - //////// Graceful Shutdown - Stop(context.Context, *Stop_Request) (*Stop_Response, error) -} - -// UnimplementedProviderServer can be embedded to have forward compatible implementations. -type UnimplementedProviderServer struct { -} - -func (*UnimplementedProviderServer) GetSchema(ctx context.Context, req *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") -} -func (*UnimplementedProviderServer) PrepareProviderConfig(ctx context.Context, req *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") -} -func (*UnimplementedProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") -} -func (*UnimplementedProviderServer) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") -} -func (*UnimplementedProviderServer) UpgradeResourceState(ctx context.Context, req *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") -} -func (*UnimplementedProviderServer) Configure(ctx context.Context, req *Configure_Request) (*Configure_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") -} -func (*UnimplementedProviderServer) ReadResource(ctx context.Context, req *ReadResource_Request) (*ReadResource_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") -} -func (*UnimplementedProviderServer) PlanResourceChange(ctx context.Context, req *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") -} -func (*UnimplementedProviderServer) ApplyResourceChange(ctx context.Context, req *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") -} -func (*UnimplementedProviderServer) ImportResourceState(ctx context.Context, req *ImportResourceState_Request) (*ImportResourceState_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") -} -func (*UnimplementedProviderServer) ReadDataSource(ctx context.Context, req *ReadDataSource_Request) (*ReadDataSource_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") -} -func (*UnimplementedProviderServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") -} - -func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { - s.RegisterService(&_Provider_serviceDesc, srv) -} - -func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetProviderSchema_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).GetSchema(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/GetSchema", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PrepareProviderConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).PrepareProviderConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateResourceTypeConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateDataSourceConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpgradeResourceState_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).UpgradeResourceState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/UpgradeResourceState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Configure_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).Configure(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/Configure", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadResource_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ReadResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ReadResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PlanResourceChange_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).PlanResourceChange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/PlanResourceChange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ApplyResourceChange_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ApplyResourceChange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ApplyResourceChange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ImportResourceState_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ImportResourceState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ImportResourceState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadDataSource_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ReadDataSource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ReadDataSource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Stop_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).Stop(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/Stop", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) - } - return interceptor(ctx, in, info, handler) -} - -var _Provider_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tfplugin5.Provider", - HandlerType: (*ProviderServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSchema", - Handler: _Provider_GetSchema_Handler, - }, - { - MethodName: "PrepareProviderConfig", - Handler: _Provider_PrepareProviderConfig_Handler, - }, - { - MethodName: "ValidateResourceTypeConfig", - Handler: _Provider_ValidateResourceTypeConfig_Handler, - }, - { - MethodName: "ValidateDataSourceConfig", - Handler: _Provider_ValidateDataSourceConfig_Handler, - }, - { - MethodName: "UpgradeResourceState", - Handler: _Provider_UpgradeResourceState_Handler, - }, - { - MethodName: "Configure", - Handler: _Provider_Configure_Handler, - }, - { - MethodName: "ReadResource", - Handler: _Provider_ReadResource_Handler, - }, - { - MethodName: "PlanResourceChange", - Handler: _Provider_PlanResourceChange_Handler, - }, - { - MethodName: "ApplyResourceChange", - Handler: _Provider_ApplyResourceChange_Handler, - }, - { - MethodName: "ImportResourceState", - Handler: _Provider_ImportResourceState_Handler, - }, - { - MethodName: "ReadDataSource", - Handler: _Provider_ReadDataSource_Handler, - }, - { - MethodName: "Stop", - Handler: _Provider_Stop_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tfplugin5.proto", -} - -// ProvisionerClient is the client API for Provisioner service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ProvisionerClient interface { - GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) - ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) - ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) - Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) -} - -type provisionerClient struct { - cc grpc.ClientConnInterface -} - -func NewProvisionerClient(cc grpc.ClientConnInterface) ProvisionerClient { - return &provisionerClient{cc} -} - -func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { - out := new(GetProvisionerSchema_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { - out := new(ValidateProvisionerConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { - stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) - if err != nil { - return nil, err - } - x := &provisionerProvisionResourceClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Provisioner_ProvisionResourceClient interface { - Recv() (*ProvisionResource_Response, error) - grpc.ClientStream -} - -type provisionerProvisionResourceClient struct { - grpc.ClientStream -} - -func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { - m := new(ProvisionResource_Response) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { - out := new(Stop_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ProvisionerServer is the server API for Provisioner service. -type ProvisionerServer interface { - GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) - ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) - ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error - Stop(context.Context, *Stop_Request) (*Stop_Response, error) -} - -// UnimplementedProvisionerServer can be embedded to have forward compatible implementations. -type UnimplementedProvisionerServer struct { -} - -func (*UnimplementedProvisionerServer) GetSchema(ctx context.Context, req *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") -} -func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(ctx context.Context, req *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") -} -func (*UnimplementedProvisionerServer) ProvisionResource(req *ProvisionResource_Request, srv Provisioner_ProvisionResourceServer) error { - return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") -} -func (*UnimplementedProvisionerServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") -} - -func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { - s.RegisterService(&_Provisioner_serviceDesc, srv) -} - -func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetProvisionerSchema_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProvisionerServer).GetSchema(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provisioner/GetSchema", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateProvisionerConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ProvisionResource_Request) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) -} - -type Provisioner_ProvisionResourceServer interface { - Send(*ProvisionResource_Response) error - grpc.ServerStream -} - -type provisionerProvisionResourceServer struct { - grpc.ServerStream -} - -func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { - return x.ServerStream.SendMsg(m) -} - -func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Stop_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProvisionerServer).Stop(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provisioner/Stop", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) - } - return interceptor(ctx, in, info, handler) -} - -var _Provisioner_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tfplugin5.Provisioner", - HandlerType: (*ProvisionerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSchema", - Handler: _Provisioner_GetSchema_Handler, - }, - { - MethodName: "ValidateProvisionerConfig", - Handler: _Provisioner_ValidateProvisionerConfig_Handler, - }, - { - MethodName: "Stop", - Handler: _Provisioner_Stop_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ProvisionResource", - Handler: _Provisioner_ProvisionResource_Handler, - ServerStreams: true, - }, - }, - Metadata: "tfplugin5.proto", -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto deleted file mode 100644 index 4f365697a8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto +++ /dev/null @@ -1,368 +0,0 @@ -// Terraform Plugin RPC protocol version 5.2 -// -// This file defines version 5.2 of the RPC protocol. To implement a plugin -// against this protocol, copy this definition into your own codebase and -// use protoc to generate stubs for your target language. -// -// This file will not be updated. Any minor versions of protocol 5 to follow -// should copy this file and modify the copy while maintaing backwards -// compatibility. Breaking changes, if any are required, will come -// in a subsequent major version with its own separate proto definition. -// -// Note that only the proto files included in a release tag of Terraform are -// official protocol releases. Proto files taken from other commits may include -// incomplete changes or features that did not make it into a final release. -// In all reasonable cases, plugin developers should take the proto file from -// the tag of the most recent release of Terraform, and not from the master -// branch or any other development branch. -// -syntax = "proto3"; - -package tfplugin5; - -// DynamicValue is an opaque encoding of terraform data, with the field name -// indicating the encoding scheme used. -message DynamicValue { - bytes msgpack = 1; - bytes json = 2; -} - -message Diagnostic { - enum Severity { - INVALID = 0; - ERROR = 1; - WARNING = 2; - } - Severity severity = 1; - string summary = 2; - string detail = 3; - AttributePath attribute = 4; -} - -message AttributePath { - message Step { - oneof selector { - // Set "attribute_name" to represent looking up an attribute - // in the current object value. - string attribute_name = 1; - // Set "element_key_*" to represent looking up an element in - // an indexable collection type. - string element_key_string = 2; - int64 element_key_int = 3; - } - } - repeated Step steps = 1; -} - -message Stop { - message Request { - } - message Response { - string Error = 1; - } -} - -// RawState holds the stored state for a resource to be upgraded by the -// provider. It can be in one of two formats, the current json encoded format -// in bytes, or the legacy flatmap format as a map of strings. -message RawState { - bytes json = 1; - map flatmap = 2; -} - -enum StringKind { - PLAIN = 0; - MARKDOWN = 1; -} - -// Schema is the configuration schema for a Resource, Provider, or Provisioner. -message Schema { - message Block { - int64 version = 1; - repeated Attribute attributes = 2; - repeated NestedBlock block_types = 3; - string description = 4; - StringKind description_kind = 5; - bool deprecated = 6; - } - - message Attribute { - string name = 1; - bytes type = 2; - string description = 3; - bool required = 4; - bool optional = 5; - bool computed = 6; - bool sensitive = 7; - StringKind description_kind = 8; - bool deprecated = 9; - } - - message NestedBlock { - enum NestingMode { - INVALID = 0; - SINGLE = 1; - LIST = 2; - SET = 3; - MAP = 4; - GROUP = 5; - } - - string type_name = 1; - Block block = 2; - NestingMode nesting = 3; - int64 min_items = 4; - int64 max_items = 5; - } - - // The version of the schema. - // Schemas are versioned, so that providers can upgrade a saved resource - // state when the schema is changed. - int64 version = 1; - - // Block is the top level configuration block for this schema. - Block block = 2; -} - -service Provider { - //////// Information about what a provider supports/expects - rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); - rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); - rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); - rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); - rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); - - //////// One-time initialization, called before other functions below - rpc Configure(Configure.Request) returns (Configure.Response); - - //////// Managed Resource Lifecycle - rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); - rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); - rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); - rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); - - rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); - - //////// Graceful Shutdown - rpc Stop(Stop.Request) returns (Stop.Response); -} - -message GetProviderSchema { - message Request { - } - message Response { - Schema provider = 1; - map resource_schemas = 2; - map data_source_schemas = 3; - repeated Diagnostic diagnostics = 4; - Schema provider_meta = 5; - } -} - -message PrepareProviderConfig { - message Request { - DynamicValue config = 1; - } - message Response { - DynamicValue prepared_config = 1; - repeated Diagnostic diagnostics = 2; - } -} - -message UpgradeResourceState { - message Request { - string type_name = 1; - - // version is the schema_version number recorded in the state file - int64 version = 2; - - // raw_state is the raw states as stored for the resource. Core does - // not have access to the schema of prior_version, so it's the - // provider's responsibility to interpret this value using the - // appropriate older schema. The raw_state will be the json encoded - // state, or a legacy flat-mapped format. - RawState raw_state = 3; - } - message Response { - // new_state is a msgpack-encoded data structure that, when interpreted with - // the _current_ schema for this resource type, is functionally equivalent to - // that which was given in prior_state_raw. - DynamicValue upgraded_state = 1; - - // diagnostics describes any errors encountered during migration that could not - // be safely resolved, and warnings about any possibly-risky assumptions made - // in the upgrade process. - repeated Diagnostic diagnostics = 2; - } -} - -message ValidateResourceTypeConfig { - message Request { - string type_name = 1; - DynamicValue config = 2; - } - message Response { - repeated Diagnostic diagnostics = 1; - } -} - -message ValidateDataSourceConfig { - message Request { - string type_name = 1; - DynamicValue config = 2; - } - message Response { - repeated Diagnostic diagnostics = 1; - } -} - -message Configure { - message Request { - string terraform_version = 1; - DynamicValue config = 2; - } - message Response { - repeated Diagnostic diagnostics = 1; - } -} - -message ReadResource { - message Request { - string type_name = 1; - DynamicValue current_state = 2; - bytes private = 3; - DynamicValue provider_meta = 4; - } - message Response { - DynamicValue new_state = 1; - repeated Diagnostic diagnostics = 2; - bytes private = 3; - } -} - -message PlanResourceChange { - message Request { - string type_name = 1; - DynamicValue prior_state = 2; - DynamicValue proposed_new_state = 3; - DynamicValue config = 4; - bytes prior_private = 5; - DynamicValue provider_meta = 6; - } - - message Response { - DynamicValue planned_state = 1; - repeated AttributePath requires_replace = 2; - bytes planned_private = 3; - repeated Diagnostic diagnostics = 4; - - - // This may be set only by the helper/schema "SDK" in the main Terraform - // repository, to request that Terraform Core >=0.12 permit additional - // inconsistencies that can result from the legacy SDK type system - // and its imprecise mapping to the >=0.12 type system. - // The change in behavior implied by this flag makes sense only for the - // specific details of the legacy SDK type system, and are not a general - // mechanism to avoid proper type handling in providers. - // - // ==== DO NOT USE THIS ==== - // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== - // ==== DO NOT USE THIS ==== - bool legacy_type_system = 5; - } -} - -message ApplyResourceChange { - message Request { - string type_name = 1; - DynamicValue prior_state = 2; - DynamicValue planned_state = 3; - DynamicValue config = 4; - bytes planned_private = 5; - DynamicValue provider_meta = 6; - } - message Response { - DynamicValue new_state = 1; - bytes private = 2; - repeated Diagnostic diagnostics = 3; - - // This may be set only by the helper/schema "SDK" in the main Terraform - // repository, to request that Terraform Core >=0.12 permit additional - // inconsistencies that can result from the legacy SDK type system - // and its imprecise mapping to the >=0.12 type system. - // The change in behavior implied by this flag makes sense only for the - // specific details of the legacy SDK type system, and are not a general - // mechanism to avoid proper type handling in providers. - // - // ==== DO NOT USE THIS ==== - // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== - // ==== DO NOT USE THIS ==== - bool legacy_type_system = 4; - } -} - -message ImportResourceState { - message Request { - string type_name = 1; - string id = 2; - } - - message ImportedResource { - string type_name = 1; - DynamicValue state = 2; - bytes private = 3; - } - - message Response { - repeated ImportedResource imported_resources = 1; - repeated Diagnostic diagnostics = 2; - } -} - -message ReadDataSource { - message Request { - string type_name = 1; - DynamicValue config = 2; - DynamicValue provider_meta = 3; - } - message Response { - DynamicValue state = 1; - repeated Diagnostic diagnostics = 2; - } -} - -service Provisioner { - rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); - rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); - rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); - rpc Stop(Stop.Request) returns (Stop.Response); -} - -message GetProvisionerSchema { - message Request { - } - message Response { - Schema provisioner = 1; - repeated Diagnostic diagnostics = 2; - } -} - -message ValidateProvisionerConfig { - message Request { - DynamicValue config = 1; - } - message Response { - repeated Diagnostic diagnostics = 1; - } -} - -message ProvisionResource { - message Request { - DynamicValue config = 1; - DynamicValue connection = 2; - } - message Response { - string output = 1; - repeated Diagnostic diagnostics = 2; - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go deleted file mode 100644 index 2d56dab696..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go +++ /dev/null @@ -1,36 +0,0 @@ -// The version package provides a location to set the release versions for all -// packages to consume, without creating import cycles. -// -// This package should not import any other terraform packages. -package version - -import ( - "fmt" - - version "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -var Version = "0.12.7" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var Prerelease = "sdk" - -// SemVer is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVer *version.Version - -func init() { - SemVer = version.Must(version.NewVersion(Version)) -} - -// String returns the complete version string, including prerelease -func String() string { - if Prerelease != "" { - return fmt.Sprintf("%s-%s", Version, Prerelease) - } - return Version -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go deleted file mode 100644 index 88a8405af3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go +++ /dev/null @@ -1,36 +0,0 @@ -// The meta package provides a location to set the release version -// and any other relevant metadata for the SDK. -// -// This package should not import any other SDK packages. -package meta - -import ( - "fmt" - - version "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -var SDKVersion = "1.17.2" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var SDKPrerelease = "" - -// SemVer is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVer *version.Version - -func init() { - SemVer = version.Must(version.NewVersion(SDKVersion)) -} - -// VersionString returns the complete version string, including prerelease -func SDKVersionString() string { - if SDKPrerelease != "" { - return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) - } - return SDKVersion -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go deleted file mode 100644 index 5a99e90064..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go +++ /dev/null @@ -1,35 +0,0 @@ -package plugin - -import ( - "os" - "os/exec" - - hclog "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" -) - -// ClientConfig returns a configuration object that can be used to instantiate -// a client for the plugin described by the given metadata. -func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { - logger := hclog.New(&hclog.LoggerOptions{ - Name: "plugin", - Level: hclog.Trace, - Output: os.Stderr, - }) - - return &plugin.ClientConfig{ - Cmd: exec.Command(m.Path), - HandshakeConfig: Handshake, - VersionedPlugins: VersionedPlugins, - Managed: true, - Logger: logger, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - AutoMTLS: true, - } -} - -// Client returns a plugin client for the plugin described by the given metadata. -func Client(m discovery.PluginMeta) *plugin.Client { - return plugin.NewClient(ClientConfig(m)) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/debug.go deleted file mode 100644 index b8c4f29f37..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/debug.go +++ /dev/null @@ -1,102 +0,0 @@ -package plugin - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - "os/signal" - "time" - - "github.com/hashicorp/go-plugin" -) - -// ReattachConfig holds the information Terraform needs to be able to attach -// itself to a provider process, so it can drive the process. -type ReattachConfig struct { - Protocol string - Pid int - Test bool - Addr ReattachConfigAddr -} - -// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. -type ReattachConfigAddr struct { - Network string - String string -} - -// DebugServe starts a plugin server in debug mode; this should only be used -// when the provider will manage its own lifecycle. It is not recommended for -// normal usage; Serve is the correct function for that. -func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan struct{}, error) { - reattachCh := make(chan *plugin.ReattachConfig) - closeCh := make(chan struct{}) - - opts.TestConfig = &plugin.ServeTestConfig{ - Context: ctx, - ReattachConfigCh: reattachCh, - CloseCh: closeCh, - } - - go Serve(opts) - - var config *plugin.ReattachConfig - select { - case config = <-reattachCh: - case <-time.After(2 * time.Second): - return ReattachConfig{}, closeCh, errors.New("timeout waiting on reattach config") - } - - if config == nil { - return ReattachConfig{}, closeCh, errors.New("nil reattach config received") - } - - return ReattachConfig{ - Protocol: string(config.Protocol), - Pid: config.Pid, - Test: config.Test, - Addr: ReattachConfigAddr{ - Network: config.Addr.Network(), - String: config.Addr.String(), - }, - }, closeCh, nil -} - -// Debug starts a debug server and controls its lifecycle, printing the -// information needed for Terraform to connect to the provider to stdout. -// os.Interrupt will be captured and used to stop the server. -func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { - ctx, cancel := context.WithCancel(ctx) - // Ctrl-C will stop the server - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer func() { - signal.Stop(sigCh) - cancel() - }() - config, closeCh, err := DebugServe(ctx, opts) - if err != nil { - return fmt.Errorf("Error launching debug server: %v", err) - } - go func() { - select { - case <-sigCh: - cancel() - case <-ctx.Done(): - } - }() - reattachStr, err := json.Marshal(map[string]ReattachConfig{ - providerAddr: config, - }) - if err != nil { - return fmt.Errorf("Error building reattach string: %v", err) - } - - fmt.Printf("Provider server started; to attach Terraform, set TF_REATTACH_PROVIDERS to the following:\n%s\n", string(reattachStr)) - - // wait for the server to be done - <-closeCh - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go deleted file mode 100644 index e4520975c1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go +++ /dev/null @@ -1,563 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. -type GRPCProviderPlugin struct { - plugin.Plugin - GRPCProvider func() proto.ProviderServer -} - -func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvider{ - client: proto.NewProviderClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProviderServer(s, p.GRPCProvider()) - return nil -} - -// GRPCProvider handles the client, or core side of the plugin rpc connection. -// The GRPCProvider methods are mostly a translation layer between the -// terraform provioders types and the grpc proto types, directly converting -// between the two. -type GRPCProvider struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - // TestServer contains a grpc.Server to close when the GRPCProvider is being - // used in an end to end test of a provider. - TestServer *grpc.Server - - // Proto client use to make the grpc service calls. - client proto.ProviderClient - - // this context is created by the plugin package, and is canceled when the - // plugin process ends. - ctx context.Context - - // schema stores the schema for this provider. This is used to properly - // serialize the state for requests. - mu sync.Mutex - schemas providers.GetSchemaResponse -} - -// getSchema is used internally to get the saved provider schema. The schema -// should have already been fetched from the provider, but we have to -// synchronize access to avoid being called concurrently with GetSchema. -func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { - p.mu.Lock() - // unlock inline in case GetSchema needs to be called - if p.schemas.Provider.Block != nil { - p.mu.Unlock() - return p.schemas - } - p.mu.Unlock() - - // the schema should have been fetched already, but give it another shot - // just in case things are being called out of order. This may happen for - // tests. - schemas := p.GetSchema() - if schemas.Diagnostics.HasErrors() { - panic(schemas.Diagnostics.Err()) - } - - return schemas -} - -// getResourceSchema is a helper to extract the schema for a resource, and -// panics if the schema is not available. -func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { - schema := p.getSchema() - resSchema, ok := schema.ResourceTypes[name] - if !ok { - panic("unknown resource type " + name) - } - return resSchema -} - -// gettDatasourceSchema is a helper to extract the schema for a datasource, and -// panics if that schema is not available. -func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { - schema := p.getSchema() - dataSchema, ok := schema.DataSources[name] - if !ok { - panic("unknown data source " + name) - } - return dataSchema -} - -func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { - log.Printf("[TRACE] GRPCProvider: GetSchema") - p.mu.Lock() - defer p.mu.Unlock() - - if p.schemas.Provider.Block != nil { - return p.schemas - } - - resp.ResourceTypes = make(map[string]providers.Schema) - resp.DataSources = make(map[string]providers.Schema) - - // Some providers may generate quite large schemas, and the internal default - // grpc response size limit is 4MB. 64MB should cover most any use case, and - // if we get providers nearing that we may want to consider a finer-grained - // API to fetch individual resource schemas. - // Note: this option is marked as EXPERIMENTAL in the grpc API. - const maxRecvSize = 64 << 20 - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if protoResp.Provider == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) - return resp - } - - resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) - - for name, res := range protoResp.ResourceSchemas { - resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) - } - - for name, data := range protoResp.DataSourceSchemas { - resp.DataSources[name] = convert.ProtoToProviderSchema(data) - } - - p.schemas = resp - - return resp -} - -func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { - log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") - - schema := p.getSchema() - ty := schema.Provider.Block.ImpliedType() - - mp, err := msgpack.Marshal(r.Config, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PrepareProviderConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - config := cty.NullVal(ty) - if protoResp.PreparedConfig != nil { - config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.PreparedConfig = config - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { - log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") - resourceSchema := p.getResourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateResourceTypeConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { - log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") - - dataSchema := p.getDatasourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateDataSourceConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") - - resSchema := p.getResourceSchema(r.TypeName) - - protoReq := &proto.UpgradeResourceState_Request{ - TypeName: r.TypeName, - Version: int64(r.Version), - RawState: &proto.RawState{ - Json: r.RawStateJSON, - Flatmap: r.RawStateFlatmap, - }, - } - - protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.UpgradedState != nil { - state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - - resp.UpgradedState = state - return resp -} - -func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { - log.Printf("[TRACE] GRPCProvider: Configure") - - schema := p.getSchema() - - var mp []byte - - // we don't have anything to marshal if there's no config - mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.Configure_Request{ - TerraformVersion: r.TerraformVersion, - Config: &proto.DynamicValue{ - Msgpack: mp, - }, - } - - protoResp, err := p.client.Configure(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) Stop() error { - log.Printf("[TRACE] GRPCProvider: Stop") - - resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) - if err != nil { - return err - } - - if resp.Error != "" { - return errors.New(resp.Error) - } - return nil -} - -func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - log.Printf("[TRACE] GRPCProvider: ReadResource") - - resSchema := p.getResourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadResource_Request{ - TypeName: r.TypeName, - CurrentState: &proto.DynamicValue{Msgpack: mp}, - Private: r.Private, - } - - protoResp, err := p.client.ReadResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.NewState != nil { - state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.NewState = state - resp.Private = protoResp.Private - - return resp -} - -func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - log.Printf("[TRACE] GRPCProvider: PlanResourceChange") - - resSchema := p.getResourceSchema(r.TypeName) - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PlanResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, - PriorPrivate: r.PriorPrivate, - } - - protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.PlannedState != nil { - state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.PlannedState = state - - for _, p := range protoResp.RequiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) - } - - resp.PlannedPrivate = protoResp.PlannedPrivate - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") - - resSchema := p.getResourceSchema(r.TypeName) - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ApplyResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - PlannedPrivate: r.PlannedPrivate, - } - - protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - resp.Private = protoResp.Private - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.NewState != nil { - state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.NewState = state - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - log.Printf("[TRACE] GRPCProvider: ImportResourceState") - - protoReq := &proto.ImportResourceState_Request{ - TypeName: r.TypeName, - Id: r.ID, - } - - protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - for _, imported := range protoResp.ImportedResources { - resource := providers.ImportedResource{ - TypeName: imported.TypeName, - Private: imported.Private, - } - - resSchema := p.getResourceSchema(resource.TypeName) - state := cty.NullVal(resSchema.Block.ImpliedType()) - if imported.State != nil { - state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resource.State = state - resp.ImportedResources = append(resp.ImportedResources, resource) - } - - return resp -} - -func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - log.Printf("[TRACE] GRPCProvider: ReadDataSource") - - dataSchema := p.getDatasourceSchema(r.TypeName) - - config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadDataSource_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{ - Msgpack: config, - }, - } - - protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(dataSchema.Block.ImpliedType()) - if protoResp.State != nil { - state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.State = state - - return resp -} - -// closing the grpc connection is final, and terraform will call it at the end of every phase. -func (p *GRPCProvider) Close() error { - log.Printf("[TRACE] GRPCProvider: Close") - - // Make sure to stop the server if we're not running within go-plugin. - if p.TestServer != nil { - p.TestServer.Stop() - } - - // Check this since it's not automatically inserted during plugin creation. - // It's currently only inserted by the command package, because that is - // where the factory is built and is the only point with access to the - // plugin.Client. - if p.PluginClient == nil { - log.Println("[DEBUG] provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go deleted file mode 100644 index c0e6f549ab..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go +++ /dev/null @@ -1,178 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "io" - "log" - "sync" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. -type GRPCProvisionerPlugin struct { - plugin.Plugin - GRPCProvisioner func() proto.ProvisionerServer -} - -func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvisioner{ - client: proto.NewProvisionerClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) - return nil -} - -// provisioners.Interface grpc implementation -type GRPCProvisioner struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - client proto.ProvisionerClient - ctx context.Context - - // Cache the schema since we need it for serialization in each method call. - mu sync.Mutex - schema *configschema.Block -} - -func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { - p.mu.Lock() - defer p.mu.Unlock() - - if p.schema != nil { - return provisioners.GetSchemaResponse{ - Provisioner: p.schema, - } - } - - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if protoResp.Provisioner == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) - return resp - } - - resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) - - p.schema = resp.Provisioner - - return resp -} - -func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { - schema := p.GetSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) - return resp - } - - mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateProvisionerConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - schema := p.GetSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) - return resp - } - - mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - // connection is always assumed to be a simple string map - connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ProvisionResource_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - Connection: &proto.DynamicValue{Msgpack: connMP}, - } - - outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - for { - rcv, err := outputClient.Recv() - if rcv != nil { - r.UIOutput.Output(rcv.Output) - } - if err != nil { - if err != io.EOF { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - break - } - - if len(rcv.Diagnostics) > 0 { - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) - break - } - } - - return resp -} - -func (p *GRPCProvisioner) Stop() error { - protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) - if err != nil { - return err - } - if protoResp.Error != "" { - return errors.New(protoResp.Error) - } - return nil -} - -func (p *GRPCProvisioner) Close() error { - // check this since it's not automatically inserted during plugin creation - if p.PluginClient == nil { - log.Println("[DEBUG] provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go deleted file mode 100644 index e4fb577619..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go +++ /dev/null @@ -1,14 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" -) - -// See serve.go for serving plugins - -var VersionedPlugins = map[int]plugin.PluginSet{ - 5: { - "provider": &GRPCProviderPlugin{}, - "provisioner": &GRPCProvisionerPlugin{}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go deleted file mode 100644 index bfd62e2e9b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go +++ /dev/null @@ -1,620 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// ResourceProviderPlugin is the plugin.Plugin implementation. -type ResourceProviderPlugin struct { - ResourceProvider func() terraform.ResourceProvider -} - -func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{ - Broker: b, - Provider: p.ResourceProvider(), - }, nil -} - -func (p *ResourceProviderPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvider{Broker: b, Client: c}, nil -} - -// ResourceProvider is an implementation of terraform.ResourceProvider -// that communicates over RPC. -type ResourceProvider struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvider) Stop() error { - var resp ResourceProviderStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - var result ResourceProviderGetSchemaResponse - args := &ResourceProviderGetSchemaArgs{ - Req: req, - } - - err := p.Client.Call("Plugin.GetSchema", args, &result) - if err != nil { - return nil, err - } - - if result.Error != nil { - err = result.Error - } - - return result.Schema, err -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIInputServer{ - UIInput: input, - }) - - var resp ResourceProviderInputResponse - args := ResourceProviderInputArgs{ - InputId: id, - Config: c, - } - - err := p.Client.Call("Plugin.Input", &args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - return nil, err - } - - return resp.Config, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResponse - args := ResourceProviderValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateResource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - var resp ResourceProviderConfigureResponse - err := p.Client.Call("Plugin.Configure", c, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderApplyResponse - args := &ResourceProviderApplyArgs{ - Info: info, - State: s, - Diff: d, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderDiffResponse - args := &ResourceProviderDiffArgs{ - Info: info, - State: s, - Config: c, - } - err := p.Client.Call("Plugin.Diff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - var resp ResourceProviderRefreshResponse - args := &ResourceProviderRefreshArgs{ - Info: info, - State: s, - } - - err := p.Client.Call("Plugin.Refresh", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - var resp ResourceProviderImportStateResponse - args := &ResourceProviderImportStateArgs{ - Info: info, - Id: id, - } - - err := p.Client.Call("Plugin.ImportState", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - var result []terraform.ResourceType - - err := p.Client.Call("Plugin.Resources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderReadDataDiffResponse - args := &ResourceProviderReadDataDiffArgs{ - Info: info, - Config: c, - } - - err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderReadDataApplyResponse - args := &ResourceProviderReadDataApplyArgs{ - Info: info, - Diff: d, - } - - err := p.Client.Call("Plugin.ReadDataApply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) DataSources() []terraform.DataSource { - var result []terraform.DataSource - - err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) Close() error { - return p.Client.Close() -} - -// ResourceProviderServer is a net/rpc compatible structure for serving -// a ResourceProvider. This should not be used directly. -type ResourceProviderServer struct { - Broker *plugin.MuxBroker - Provider terraform.ResourceProvider -} - -type ResourceProviderStopResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderGetSchemaArgs struct { - Req *terraform.ProviderSchemaRequest -} - -type ResourceProviderGetSchemaResponse struct { - Schema *terraform.ProviderSchema - Error *plugin.BasicError -} - -type ResourceProviderConfigureResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderInputArgs struct { - InputId uint32 - Config *terraform.ResourceConfig -} - -type ResourceProviderInputResponse struct { - Config *terraform.ResourceConfig - Error *plugin.BasicError -} - -type ResourceProviderApplyArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Diff *terraform.InstanceDiff -} - -type ResourceProviderApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderDiffArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProviderDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderRefreshArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState -} - -type ResourceProviderRefreshResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderImportStateArgs struct { - Info *terraform.InstanceInfo - Id string -} - -type ResourceProviderImportStateResponse struct { - State []*terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataApplyArgs struct { - Info *terraform.InstanceInfo - Diff *terraform.InstanceDiff -} - -type ResourceProviderReadDataApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataDiffArgs struct { - Info *terraform.InstanceInfo - Config *terraform.ResourceConfig -} - -type ResourceProviderReadDataDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProviderValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProviderValidateResourceArgs struct { - Config *terraform.ResourceConfig - Type string -} - -type ResourceProviderValidateResourceResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -func (s *ResourceProviderServer) Stop( - _ interface{}, - reply *ResourceProviderStopResponse) error { - err := s.Provider.Stop() - *reply = ResourceProviderStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) GetSchema( - args *ResourceProviderGetSchemaArgs, - result *ResourceProviderGetSchemaResponse, -) error { - schema, err := s.Provider.GetSchema(args.Req) - result.Schema = schema - if err != nil { - result.Error = plugin.NewBasicError(err) - } - return nil -} - -func (s *ResourceProviderServer) Input( - args *ResourceProviderInputArgs, - reply *ResourceProviderInputResponse) error { - conn, err := s.Broker.Dial(args.InputId) - if err != nil { - *reply = ResourceProviderInputResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - input := &UIInput{Client: client} - - config, err := s.Provider.Input(input, args.Config) - *reply = ResourceProviderInputResponse{ - Config: config, - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) Validate( - args *ResourceProviderValidateArgs, - reply *ResourceProviderValidateResponse) error { - warns, errs := s.Provider.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ValidateResource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateResource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) Configure( - config *terraform.ResourceConfig, - reply *ResourceProviderConfigureResponse) error { - err := s.Provider.Configure(config) - *reply = ResourceProviderConfigureResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Apply( - args *ResourceProviderApplyArgs, - result *ResourceProviderApplyResponse) error { - state, err := s.Provider.Apply(args.Info, args.State, args.Diff) - *result = ResourceProviderApplyResponse{ - State: state, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Diff( - args *ResourceProviderDiffArgs, - result *ResourceProviderDiffResponse) error { - diff, err := s.Provider.Diff(args.Info, args.State, args.Config) - *result = ResourceProviderDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Refresh( - args *ResourceProviderRefreshArgs, - result *ResourceProviderRefreshResponse) error { - newState, err := s.Provider.Refresh(args.Info, args.State) - *result = ResourceProviderRefreshResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ImportState( - args *ResourceProviderImportStateArgs, - result *ResourceProviderImportStateResponse) error { - states, err := s.Provider.ImportState(args.Info, args.Id) - *result = ResourceProviderImportStateResponse{ - State: states, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Resources( - nothing interface{}, - result *[]terraform.ResourceType) error { - *result = s.Provider.Resources() - return nil -} - -func (s *ResourceProviderServer) ValidateDataSource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ReadDataDiff( - args *ResourceProviderReadDataDiffArgs, - result *ResourceProviderReadDataDiffResponse) error { - diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) - *result = ResourceProviderReadDataDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ReadDataApply( - args *ResourceProviderReadDataApplyArgs, - result *ResourceProviderReadDataApplyResponse) error { - newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) - *result = ResourceProviderReadDataApplyResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) DataSources( - nothing interface{}, - result *[]terraform.DataSource) error { - *result = s.Provider.DataSources() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go deleted file mode 100644 index 986bf41848..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go +++ /dev/null @@ -1,113 +0,0 @@ -package plugin - -import ( - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - grpcplugin "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" - proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -const ( - // The constants below are the names of the plugins that can be dispensed - // from the plugin server. - ProviderPluginName = "provider" - - // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify - // a particular version during their handshake. This is the version used when Terraform 0.10 - // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must - // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and - // 0.11. - DefaultProtocolVersion = 4 -) - -// Handshake is the HandshakeConfig used to configure clients and servers. -var Handshake = plugin.HandshakeConfig{ - // The ProtocolVersion is the version that must match between TF core - // and TF plugins. This should be bumped whenever a change happens in - // one or the other that makes it so that they can't safely communicate. - // This could be adding a new interface value, it could be how - // helper/schema computes diffs, etc. - ProtocolVersion: DefaultProtocolVersion, - - // The magic cookie values should NEVER be changed. - MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", - MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", -} - -type ProviderFunc func() terraform.ResourceProvider -type GRPCProviderFunc func() proto.ProviderServer - -// ServeOpts are the configurations to serve a plugin. -type ServeOpts struct { - ProviderFunc ProviderFunc - - // Wrapped versions of the above plugins will automatically shimmed and - // added to the GRPC functions when possible. - GRPCProviderFunc GRPCProviderFunc - - // Logger is the logger that go-plugin will use. - Logger hclog.Logger - - // TestConfig should only be set when the provider is being tested; it - // will opt out of go-plugin's lifecycle management and other features, - // and will use the supplied configuration options to control the - // plugin's lifecycle and communicate connection information. See the - // go-plugin GoDoc for more information. - TestConfig *plugin.ServeTestConfig -} - -// Serve serves a plugin. This function never returns and should be the final -// function called in the main function of the plugin. -func Serve(opts *ServeOpts) { - // since the plugins may not yet be aware of the new protocol, we - // automatically wrap the plugins in the grpc shims. - if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { - provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) - // this is almost always going to be a *schema.Provider, but check that - // we got back a valid provider just in case. - if provider != nil { - opts.GRPCProviderFunc = func() proto.ProviderServer { - return provider - } - } - } - - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - VersionedPlugins: pluginSet(opts), - GRPCServer: plugin.DefaultGRPCServer, - Logger: opts.Logger, - Test: opts.TestConfig, - }) -} - -// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring -// a plugin server or client. -func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { - return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{ - ResourceProvider: opts.ProviderFunc, - }, - } -} - -func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { - // Set the legacy netrpc plugins at version 4. - // The oldest version is returned in when executed by a legacy go-plugin - // client. - plugins := map[int]plugin.PluginSet{ - 4: legacyPluginMap(opts), - } - - // add the new protocol versions if they're configured - if opts.GRPCProviderFunc != nil { - plugins[5] = plugin.PluginSet{} - if opts.GRPCProviderFunc != nil { - plugins[5]["provider"] = &GRPCProviderPlugin{ - GRPCProvider: opts.GRPCProviderFunc, - } - } - } - return plugins -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go deleted file mode 100644 index b24b03ebfe..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go +++ /dev/null @@ -1,52 +0,0 @@ -package plugin - -import ( - "context" - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// UIInput is an implementation of terraform.UIInput that communicates -// over RPC. -type UIInput struct { - Client *rpc.Client -} - -func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - var resp UIInputInputResponse - err := i.Client.Call("Plugin.Input", opts, &resp) - if err != nil { - return "", err - } - if resp.Error != nil { - err = resp.Error - return "", err - } - - return resp.Value, nil -} - -type UIInputInputResponse struct { - Value string - Error *plugin.BasicError -} - -// UIInputServer is a net/rpc compatible structure for serving -// a UIInputServer. This should not be used directly. -type UIInputServer struct { - UIInput terraform.UIInput -} - -func (s *UIInputServer) Input( - opts *terraform.InputOpts, - reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(context.Background(), opts) - *reply = UIInputInputResponse{ - Value: value, - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go deleted file mode 100644 index 07c13d03aa..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go +++ /dev/null @@ -1,29 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// UIOutput is an implementatin of terraform.UIOutput that communicates -// over RPC. -type UIOutput struct { - Client *rpc.Client -} - -func (o *UIOutput) Output(v string) { - o.Client.Call("Plugin.Output", v, new(interface{})) -} - -// UIOutputServer is the RPC server for serving UIOutput. -type UIOutputServer struct { - UIOutput terraform.UIOutput -} - -func (s *UIOutputServer) Output( - v string, - reply *interface{}) error { - s.UIOutput.Output(v) - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go deleted file mode 100644 index eb05c68ae3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go +++ /dev/null @@ -1,882 +0,0 @@ -package terraform - -import ( - "bytes" - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// InputMode defines what sort of input will be asked for when Input -// is called on Context. -type InputMode byte - -const ( - // InputModeVar asks for all variables - InputModeVar InputMode = 1 << iota - - // InputModeVarUnset asks for variables which are not set yet. - // InputModeVar must be set for this to have an effect. - InputModeVarUnset - - // InputModeProvider asks for provider variables - InputModeProvider - - // InputModeStd is the standard operating mode and asks for both variables - // and providers. - InputModeStd = InputModeVar | InputModeProvider -) - -// ContextOpts are the user-configurable options to create a context with -// NewContext. -type ContextOpts struct { - Config *configs.Config - Changes *plans.Changes - State *states.State - Targets []addrs.Targetable - Variables InputValues - Meta *ContextMeta - Destroy bool - - Hooks []Hook - Parallelism int - ProviderResolver providers.Resolver - Provisioners map[string]ProvisionerFactory - - // If non-nil, will apply as additional constraints on the provider - // plugins that will be requested from the provider resolver. - ProviderSHA256s map[string][]byte - SkipProviderVerify bool - - UIInput UIInput -} - -// ContextMeta is metadata about the running context. This is information -// that this package or structure cannot determine on its own but exposes -// into Terraform in various ways. This must be provided by the Context -// initializer. -type ContextMeta struct { - Env string // Env is the state environment -} - -// Context represents all the context that Terraform needs in order to -// perform operations on infrastructure. This structure is built using -// NewContext. -type Context struct { - config *configs.Config - changes *plans.Changes - state *states.State - targets []addrs.Targetable - variables InputValues - meta *ContextMeta - destroy bool - - hooks []Hook - components contextComponentFactory - schemas *Schemas - sh *stopHook - uiInput UIInput - - l sync.Mutex // Lock acquired during any task - parallelSem Semaphore - providerInputConfig map[string]map[string]cty.Value - providerSHA256s map[string][]byte - runCond *sync.Cond - runContext context.Context - runContextCancel context.CancelFunc - shadowErr error -} - -// (additional methods on Context can be found in context_*.go files.) - -// NewContext creates a new Context structure. -// -// Once a Context is created, the caller must not access or mutate any of -// the objects referenced (directly or indirectly) by the ContextOpts fields. -// -// If the returned diagnostics contains errors then the resulting context is -// invalid and must not be used. -func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform.NewContext: starting") - diags := CheckCoreVersionRequirements(opts.Config) - // If version constraints are not met then we'll bail early since otherwise - // we're likely to just see a bunch of other errors related to - // incompatibilities, which could be overwhelming for the user. - if diags.HasErrors() { - return nil, diags - } - - // Copy all the hooks and add our stop hook. We don't append directly - // to the Config so that we're not modifying that in-place. - sh := new(stopHook) - hooks := make([]Hook, len(opts.Hooks)+1) - copy(hooks, opts.Hooks) - hooks[len(opts.Hooks)] = sh - - state := opts.State - if state == nil { - state = states.NewState() - } - - // Determine parallelism, default to 10. We do this both to limit - // CPU pressure but also to have an extra guard against rate throttling - // from providers. - par := opts.Parallelism - if par == 0 { - par = 10 - } - - // Set up the variables in the following sequence: - // 0 - Take default values from the configuration - // 1 - Take values from TF_VAR_x environment variables - // 2 - Take values specified in -var flags, overriding values - // set by environment variables if necessary. This includes - // values taken from -var-file in addition. - var variables InputValues - if opts.Config != nil { - // Default variables from the configuration seed our map. - variables = DefaultVariableValues(opts.Config.Module.Variables) - } - // Variables provided by the caller (from CLI, environment, etc) can - // override the defaults. - variables = variables.Override(opts.Variables) - - // Bind available provider plugins to the constraints in config - var providerFactories map[string]providers.Factory - if opts.ProviderResolver != nil { - deps := ConfigTreeDependencies(opts.Config, state) - reqd := deps.AllPluginRequirements() - if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { - reqd.LockExecutables(opts.ProviderSHA256s) - } - log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") - - var providerDiags tfdiags.Diagnostics - providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) - diags = diags.Append(providerDiags) - - if diags.HasErrors() { - return nil, diags - } - } else { - providerFactories = make(map[string]providers.Factory) - } - - components := &basicComponentFactory{ - providers: providerFactories, - provisioners: opts.Provisioners, - } - - log.Printf("[TRACE] terraform.NewContext: loading provider schemas") - schemas, err := LoadSchemas(opts.Config, opts.State, components) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - changes := opts.Changes - if changes == nil { - changes = plans.NewChanges() - } - - config := opts.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - log.Printf("[TRACE] terraform.NewContext: complete") - - return &Context{ - components: components, - schemas: schemas, - destroy: opts.Destroy, - changes: changes, - hooks: hooks, - meta: opts.Meta, - config: config, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, - - parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]cty.Value), - providerSHA256s: opts.ProviderSHA256s, - sh: sh, - }, nil -} - -func (c *Context) Schemas() *Schemas { - return c.schemas -} - -type ContextGraphOpts struct { - // If true, validates the graph structure (checks for cycles). - Validate bool - - // Legacy graphs only: won't prune the graph - Verbose bool -} - -// Graph returns the graph used for the given operation type. -// -// The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { - if opts == nil { - opts = &ContextGraphOpts{Validate: true} - } - - log.Printf("[INFO] terraform: building graph: %s", typ) - switch typ { - case GraphTypeApply: - return (&ApplyGraphBuilder{ - Config: c.config, - Changes: c.changes, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeValidate: - // The validate graph is just a slightly modified plan graph - fallthrough - case GraphTypePlan: - // Create the plan graph builder - p := &PlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - } - - // Some special cases for other graph types shared with plan currently - var b GraphBuilder = p - switch typ { - case GraphTypeValidate: - b = ValidateGraphBuilder(p) - } - - return b.Build(addrs.RootModuleInstance) - - case GraphTypePlanDestroy: - return (&DestroyPlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeRefresh: - return (&RefreshGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeEval: - return (&EvalGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - }).Build(addrs.RootModuleInstance) - - default: - // Should never happen, because the above is exhaustive for all graph types. - panic(fmt.Errorf("unsupported graph type %s", typ)) - } -} - -// ShadowError returns any errors caught during a shadow operation. -// -// A shadow operation is an operation run in parallel to a real operation -// that performs the same tasks using new logic on copied state. The results -// are compared to ensure that the new logic works the same as the old logic. -// The shadow never affects the real operation or return values. -// -// The result of the shadow operation are only available through this function -// call after a real operation is complete. -// -// For API consumers of Context, you can safely ignore this function -// completely if you have no interest in helping report experimental feature -// errors to Terraform maintainers. Otherwise, please call this function -// after every operation and report this to the user. -// -// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect -// the real state or result of a real operation. They are purely informational -// to assist in future Terraform versions being more stable. Please message -// this effectively to the end user. -// -// This must be called only when no other operation is running (refresh, -// plan, etc.). The result can be used in parallel to any other operation -// running. -func (c *Context) ShadowError() error { - return c.shadowErr -} - -// State returns a copy of the current state associated with this context. -// -// This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *states.State { - return c.state.DeepCopy() -} - -// Eval produces a scope in which expressions can be evaluated for -// the given module path. -// -// This method must first evaluate any ephemeral values (input variables, local -// values, and output values) in the configuration. These ephemeral values are -// not included in the persisted state, so they must be re-computed using other -// values in the state before they can be properly evaluated. The updated -// values are retained in the main state associated with the receiving context. -// -// This function takes no action against remote APIs but it does need access -// to all provider and provisioner instances in order to obtain their schemas -// for type checking. -// -// The result is an evaluation scope that can be used to resolve references -// against the root module. If the returned diagnostics contains errors then -// the returned scope may be nil. If it is not nil then it may still be used -// to attempt expression evaluation or other analysis, but some expressions -// may not behave as expected. -func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { - // This is intended for external callers such as the "terraform console" - // command. Internally, we create an evaluator in c.walk before walking - // the graph, and create scopes in ContextGraphWalker. - - var diags tfdiags.Diagnostics - defer c.acquireRun("eval")() - - // Start with a copy of state so that we don't affect any instances - // that other methods may have already returned. - c.state = c.state.DeepCopy() - var walker *ContextGraphWalker - - graph, graphDiags := c.Graph(GraphTypeEval, nil) - diags = diags.Append(graphDiags) - if !diags.HasErrors() { - var walkDiags tfdiags.Diagnostics - walker, walkDiags = c.walk(graph, walkEval) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - } - - if walker == nil { - // If we skipped walking the graph (due to errors) then we'll just - // use a placeholder graph walker here, which'll refer to the - // unmodified state. - walker = c.graphWalker(walkEval) - } - - // This is a bit weird since we don't normally evaluate outside of - // the context of a walk, but we'll "re-enter" our desired path here - // just to get hold of an EvalContext for it. GraphContextBuiltin - // caches its contexts, so we should get hold of the context that was - // previously used for evaluation here, unless we skipped walking. - evalCtx := walker.EnterPath(path) - return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags -} - -// Apply applies the changes represented by this context and returns -// the resulting state. -// -// Even in the case an error is returned, the state may be returned and will -// potentially be partially updated. In addition to returning the resulting -// state, this context is updated with the latest state. -// -// If the state is required after an error, the caller should call -// Context.State, rather than rely on the return value. -// -// TODO: Apply and Refresh should either always return a state, or rely on the -// State() method. Currently the helper/resource testing framework relies -// on the absence of a returned state to determine if Destroy can be -// called, so that will need to be refactored before this can be changed. -func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("apply")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Build the graph. - graph, diags := c.Graph(GraphTypeApply, nil) - if diags.HasErrors() { - return nil, diags - } - - // Determine the operation - operation := walkApply - if c.destroy { - operation = walkDestroy - } - - // Walk the graph - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - - if c.destroy && !diags.HasErrors() { - // If we know we were trying to destroy objects anyway, and we - // completed without any errors, then we'll also prune out any - // leftover empty resource husks (left after all of the instances - // of a resource with "count" or "for_each" are destroyed) to - // help ensure we end up with an _actually_ empty state, assuming - // we weren't destroying with -target here. - // - // (This doesn't actually take into account -target, but that should - // be okay because it doesn't throw away anything we can't recompute - // on a subsequent "terraform plan" run, if the resources are still - // present in the configuration. However, this _will_ cause "count = 0" - // resources to read as unknown during the next refresh walk, which - // may cause some additional churn if used in a data resource or - // provider block, until we remove refreshing as a separate walk and - // just do it as part of the plan walk.) - c.state.PruneResourceHusks() - } - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Applied changes may be incomplete", - `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: - terraform plan - -Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - return c.state, diags -} - -// Plan generates an execution plan for the given context. -// -// The execution plan encapsulates the context and can be stored -// in order to reinstantiate a context later for Apply. -// -// Plan also updates the diff of this context to be the diff generated -// by the plan, so Apply can be called after. -func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { - defer c.acquireRun("plan")() - c.changes = plans.NewChanges() - - var diags tfdiags.Diagnostics - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - varVals := make(map[string]plans.DynamicValue, len(c.variables)) - for k, iv := range c.variables { - // We use cty.DynamicPseudoType here so that we'll save both the - // value _and_ its dynamic type in the plan, so we can recover - // exactly the same value later. - dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to prepare variable value for plan", - fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), - )) - continue - } - varVals[k] = dv - } - - p := &plans.Plan{ - VariableValues: varVals, - TargetAddrs: c.targets, - ProviderSHA256s: c.providerSHA256s, - } - - var operation walkOperation - if c.destroy { - operation = walkPlanDestroy - } else { - // Set our state to be something temporary. We do this so that - // the plan can update a fake state so that variables work, then - // we replace it back with our old state. - old := c.state - if old == nil { - c.state = states.NewState() - } else { - c.state = old.DeepCopy() - } - defer func() { - c.state = old - }() - - operation = walkPlan - } - - // Build the graph. - graphType := GraphTypePlan - if c.destroy { - graphType = GraphTypePlanDestroy - } - graph, graphDiags := c.Graph(graphType, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return nil, diags - } - - // Do the walk - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - p.Changes = c.changes - - return p, diags -} - -// Refresh goes through all the resources in the state and refreshes them -// to their latest state. This will update the state that this context -// works with, along with returning it. -// -// Even in the case an error is returned, the state may be returned and -// will potentially be partially updated. -func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("refresh")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Refresh builds a partial changeset as part of its work because it must - // create placeholder stubs for any resource instances that'll be created - // in subsequent plan so that provider configurations and data resources - // can interpolate from them. This plan is always thrown away after - // the operation completes, restoring any existing changeset. - oldChanges := c.changes - defer func() { c.changes = oldChanges }() - c.changes = plans.NewChanges() - - // Build the graph. - graph, diags := c.Graph(GraphTypeRefresh, nil) - if diags.HasErrors() { - return nil, diags - } - - // Do the walk - _, walkDiags := c.walk(graph, walkRefresh) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - - // During our walk we will have created planned object placeholders in - // state for resource instances that are in configuration but not yet - // created. These were created only to allow expression evaluation to - // work properly in provider and data blocks during the walk and must - // now be discarded, since a subsequent plan walk is responsible for - // creating these "for real". - // TODO: Consolidate refresh and plan into a single walk, so that the - // refresh walk doesn't need to emulate various aspects of the plan - // walk in order to properly evaluate provider and data blocks. - c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() - - return c.state, diags -} - -// Stop stops the running task. -// -// Stop will block until the task completes. -func (c *Context) Stop() { - log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") - - c.l.Lock() - defer c.l.Unlock() - - // If we're running, then stop - if c.runContextCancel != nil { - log.Printf("[WARN] terraform: run context exists, stopping") - - // Tell the hook we want to stop - c.sh.Stop() - - // Stop the context - c.runContextCancel() - c.runContextCancel = nil - } - - // Grab the condition var before we exit - if cond := c.runCond; cond != nil { - log.Printf("[INFO] terraform: waiting for graceful stop to complete") - cond.Wait() - } - - log.Printf("[WARN] terraform: stop complete") -} - -// Validate performs semantic validation of the configuration, and returning -// any warnings or errors. -// -// Syntax and structural checks are performed by the configuration loader, -// and so are not repeated here. -func (c *Context) Validate() tfdiags.Diagnostics { - defer c.acquireRun("validate")() - - var diags tfdiags.Diagnostics - - // Validate input variables. We do this only for the values supplied - // by the root module, since child module calls are validated when we - // visit their graph nodes. - if c.config != nil { - varDiags := checkInputVariables(c.config.Module.Variables, c.variables) - diags = diags.Append(varDiags) - } - - // If we have errors at this point then we probably won't be able to - // construct a graph without producing redundant errors, so we'll halt early. - if diags.HasErrors() { - return diags - } - - // Build the graph so we can walk it and run Validate on nodes. - // We also validate the graph generated here, but this graph doesn't - // necessarily match the graph that Plan will generate, so we'll validate the - // graph again later after Planning. - graph, graphDiags := c.Graph(GraphTypeValidate, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return diags - } - - // Walk - walker, walkDiags := c.walk(graph, walkValidate) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return diags - } - - return diags -} - -// Config returns the configuration tree associated with this context. -func (c *Context) Config() *configs.Config { - return c.config -} - -// Variables will return the mapping of variables that were defined -// for this Context. If Input was called, this mapping may be different -// than what was given. -func (c *Context) Variables() InputValues { - return c.variables -} - -// SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v cty.Value) { - c.variables[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } -} - -func (c *Context) acquireRun(phase string) func() { - // With the run lock held, grab the context lock to make changes - // to the run context. - c.l.Lock() - defer c.l.Unlock() - - // Wait until we're no longer running - for c.runCond != nil { - c.runCond.Wait() - } - - // Build our lock - c.runCond = sync.NewCond(&c.l) - - // Create a new run context - c.runContext, c.runContextCancel = context.WithCancel(context.Background()) - - // Reset the stop hook so we're not stopped - c.sh.Reset() - - // Reset the shadow errors - c.shadowErr = nil - - return c.releaseRun -} - -func (c *Context) releaseRun() { - // Grab the context lock so that we can make modifications to fields - c.l.Lock() - defer c.l.Unlock() - - // End our run. We check if runContext is non-nil because it can be - // set to nil if it was cancelled via Stop() - if c.runContextCancel != nil { - c.runContextCancel() - } - - // Unlock all waiting our condition - cond := c.runCond - c.runCond = nil - cond.Broadcast() - - // Unset the context - c.runContext = nil -} - -func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - - walker := c.graphWalker(operation) - - // Watch for a stop so we can call the provider Stop() API. - watchStop, watchWait := c.watchStop(walker) - - // Walk the real graph, this will block until it completes - diags := graph.Walk(walker) - - // Close the channel so the watcher stops, and wait for it to return. - close(watchStop) - <-watchWait - - return walker, diags -} - -func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { - return &ContextGraphWalker{ - Context: c, - State: c.state.SyncWrapper(), - Changes: c.changes.SyncWrapper(), - Operation: operation, - StopContext: c.runContext, - RootVariableValues: c.variables, - } -} - -// watchStop immediately returns a `stop` and a `wait` chan after dispatching -// the watchStop goroutine. This will watch the runContext for cancellation and -// stop the providers accordingly. When the watch is no longer needed, the -// `stop` chan should be closed before waiting on the `wait` chan. -// The `wait` chan is important, because without synchronizing with the end of -// the watchStop goroutine, the runContext may also be closed during the select -// incorrectly causing providers to be stopped. Even if the graph walk is done -// at that point, stopping a provider permanently cancels its StopContext which -// can cause later actions to fail. -func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { - stop := make(chan struct{}) - wait := make(chan struct{}) - - // get the runContext cancellation channel now, because releaseRun will - // write to the runContext field. - done := c.runContext.Done() - - go func() { - defer close(wait) - // Wait for a stop or completion - select { - case <-done: - // done means the context was canceled, so we need to try and stop - // providers. - case <-stop: - // our own stop channel was closed. - return - } - - // If we're here, we're stopped, trigger the call. - log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") - - { - // Copy the providers so that a misbehaved blocking Stop doesn't - // completely hang Terraform. - walker.providerLock.Lock() - ps := make([]providers.Interface, 0, len(walker.providerCache)) - for _, p := range walker.providerCache { - ps = append(ps, p) - } - defer walker.providerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - - { - // Call stop on all the provisioners - walker.provisionerLock.Lock() - ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) - for _, p := range walker.provisionerCache { - ps = append(ps, p) - } - defer walker.provisionerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - }() - - return stop, wait -} - -// ShimLegacyState is a helper that takes the legacy state type and -// converts it to the new state type. -// -// This is implemented as a state file upgrade, so it will not preserve -// parts of the state structure that are not included in a serialized state, -// such as the resolved results of any local values, outputs in non-root -// modules, etc. -func ShimLegacyState(legacy *State) (*states.State, error) { - if legacy == nil { - return nil, nil - } - var buf bytes.Buffer - err := WriteState(legacy, &buf) - if err != nil { - return nil, err - } - f, err := statefile.Read(&buf) - if err != nil { - return nil, err - } - return f.State, err -} - -// MustShimLegacyState is a wrapper around ShimLegacyState that panics if -// the conversion does not succeed. This is primarily intended for tests where -// the given legacy state is an object constructed within the test. -func MustShimLegacyState(legacy *State) *states.State { - ret, err := ShimLegacyState(legacy) - if err != nil { - panic(err) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go deleted file mode 100644 index a627996e39..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -// contextComponentFactory is the interface that Context uses -// to initialize various components such as providers and provisioners. -// This factory gets more information than the raw maps using to initialize -// a Context. This information is used for debugging. -type contextComponentFactory interface { - // ResourceProvider creates a new ResourceProvider with the given - // type. The "uid" is a unique identifier for this provider being - // initialized that can be used for internal tracking. - ResourceProvider(typ, uid string) (providers.Interface, error) - ResourceProviders() []string - - // ResourceProvisioner creates a new ResourceProvisioner with the - // given type. The "uid" is a unique identifier for this provisioner - // being initialized that can be used for internal tracking. - ResourceProvisioner(typ, uid string) (provisioners.Interface, error) - ResourceProvisioners() []string -} - -// basicComponentFactory just calls a factory from a map directly. -type basicComponentFactory struct { - providers map[string]providers.Factory - provisioners map[string]ProvisionerFactory -} - -func (c *basicComponentFactory) ResourceProviders() []string { - result := make([]string, len(c.providers)) - for k := range c.providers { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvisioners() []string { - result := make([]string, len(c.provisioners)) - for k := range c.provisioners { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { - f, ok := c.providers[typ] - if !ok { - return nil, fmt.Errorf("unknown provider %q", typ) - } - - return f() -} - -func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { - f, ok := c.provisioners[typ] - if !ok { - return nil, fmt.Errorf("unknown provisioner %q", typ) - } - - return f() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go deleted file mode 100644 index 4448d8706e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go - -// GraphType is an enum of the type of graph to create with a Context. -// The values of the constants may change so they shouldn't be depended on; -// always use the constant name. -type GraphType byte - -const ( - GraphTypeInvalid GraphType = 0 - GraphTypeLegacy GraphType = iota - GraphTypeRefresh - GraphTypePlan - GraphTypePlanDestroy - GraphTypeApply - GraphTypeValidate - GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. -) - -// GraphTypeMap is a mapping of human-readable string to GraphType. This -// is useful to use as the mechanism for human input for configurable -// graph types. -var GraphTypeMap = map[string]GraphType{ - "apply": GraphTypeApply, - "plan": GraphTypePlan, - "plan-destroy": GraphTypePlanDestroy, - "refresh": GraphTypeRefresh, - "legacy": GraphTypeLegacy, - "validate": GraphTypeValidate, - "eval": GraphTypeEval, -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go deleted file mode 100644 index 9a9cd9626b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go +++ /dev/null @@ -1,83 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportOpts are used as the configuration for Import. -type ImportOpts struct { - // Targets are the targets to import - Targets []*ImportTarget - - // Config is optional, and specifies a config tree that will be loaded - // into the graph and evaluated. This is the source for provider - // configurations. - Config *configs.Config -} - -// ImportTarget is a single resource to import. -type ImportTarget struct { - // Addr is the address for the resource instance that the new object should - // be imported into. - Addr addrs.AbsResourceInstance - - // ID is the ID of the resource to import. This is resource-specific. - ID string - - // ProviderAddr is the address of the provider that should handle the import. - ProviderAddr addrs.AbsProviderConfig -} - -// Import takes already-created external resources and brings them -// under Terraform management. Import requires the exact type, name, and ID -// of the resources to import. -// -// This operation is idempotent. If the requested resource is already -// imported, no changes are made to the state. -// -// Further, this operation also gracefully handles partial state. If during -// an import there is a failure, all previously imported resources remain -// imported. -func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Hold a lock since we can modify our own state here - defer c.acquireRun("import")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // If no module is given, default to the module configured with - // the Context. - config := opts.Config - if config == nil { - config = c.config - } - - // Initialize our graph builder - builder := &ImportGraphBuilder{ - ImportTargets: opts.Targets, - Config: config, - Components: c.components, - Schemas: c.schemas, - } - - // Build the graph! - graph, graphDiags := builder.Build(addrs.RootModuleInstance) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return c.state, diags - } - - // Walk it - _, walkDiags := c.walk(graph, walkImport) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return c.state, diags - } - - return c.state, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go deleted file mode 100644 index b99f1afacf..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go +++ /dev/null @@ -1,251 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Input asks for input to fill variables and provider configurations. -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - defer c.acquireRun("input")() - - if c.uiInput == nil { - log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") - return diags - } - - ctx := context.Background() - - if mode&InputModeVar != 0 { - log.Printf("[TRACE] Context.Input: Prompting for variables") - - // Walk the variables first for the root module. We walk them in - // alphabetical order for UX reasons. - configs := c.config.Module.Variables - names := make([]string, 0, len(configs)) - for name := range configs { - names = append(names, name) - } - sort.Strings(names) - Variables: - for _, n := range names { - v := configs[n] - - // If we only care about unset variables, then we should set any - // variable that is already set. - if mode&InputModeVarUnset != 0 { - if _, isSet := c.variables[n]; isSet { - continue - } - } - - // this should only happen during tests - if c.uiInput == nil { - log.Println("[WARN] Context.uiInput is nil during input walk") - continue - } - - // Ask the user for a value for this variable - var rawValue string - retry := 0 - for { - var err error - rawValue, err = c.uiInput.Input(ctx, &InputOpts{ - Id: fmt.Sprintf("var.%s", n), - Query: fmt.Sprintf("var.%s", n), - Description: v.Description, - }) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to request interactive input", - fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err), - )) - return diags - } - - if rawValue == "" && v.Default == cty.NilVal { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Required variable not assigned", - fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n), - )) - continue Variables - } - retry++ - continue - } - - break - } - - val, valDiags := v.ParsingMode.Parse(n, rawValue) - diags = diags.Append(valDiags) - if diags.HasErrors() { - continue - } - - c.variables[n] = &InputValue{ - Value: val, - SourceType: ValueFromInput, - } - } - } - - if mode&InputModeProvider != 0 { - log.Printf("[TRACE] Context.Input: Prompting for provider arguments") - - // We prompt for input only for provider configurations defined in - // the root module. At the time of writing that is an arbitrary - // restriction, but we have future plans to support "count" and - // "for_each" on modules that will then prevent us from supporting - // input for child module configurations anyway (since we'd need to - // dynamic-expand first), and provider configurations in child modules - // are not recommended since v0.11 anyway, so this restriction allows - // us to keep this relatively simple without significant hardship. - - pcs := make(map[string]*configs.Provider) - pas := make(map[string]addrs.ProviderConfig) - for _, pc := range c.config.Module.ProviderConfigs { - addr := pc.Addr() - pcs[addr.String()] = pc - pas[addr.String()] = addr - log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) - } - // We also need to detect _implied_ provider configs from resources. - // These won't have *configs.Provider objects, but they will still - // exist in the map and we'll just treat them as empty below. - for _, rc := range c.config.Module.ManagedResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) - } - } - for _, rc := range c.config.Module.DataResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) - } - } - - for pk, pa := range pas { - pc := pcs[pk] // will be nil if this is an implied config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: pk, - QueryPrefix: pk + ".", - UIInput: c.uiInput, - } - - schema := c.schemas.ProviderConfig(pa.Type) - if schema == nil { - // Could either be an incorrect config or just an incomplete - // mock in tests. We'll let a later pass decide, and just - // ignore this for the purposes of gathering input. - log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) - continue - } - - // For our purposes here we just want to detect if attrbutes are - // set in config at all, so rather than doing a full decode - // (which would require us to prepare an evalcontext, etc) we'll - // use the low-level HCL API to process only the top-level - // structure. - var attrExprs hcl.Attributes // nil if there is no config - if pc != nil && pc.Config != nil { - lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) - content, _, diags := pc.Config.PartialContent(lowLevelSchema) - if diags.HasErrors() { - log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) - continue - } - attrExprs = content.Attributes - } - - keys := make([]string, 0, len(schema.Attributes)) - for key := range schema.Attributes { - keys = append(keys, key) - } - sort.Strings(keys) - - vals := map[string]cty.Value{} - for _, key := range keys { - attrS := schema.Attributes[key] - if attrS.Optional { - continue - } - if attrExprs != nil { - if _, exists := attrExprs[key]; exists { - continue - } - } - if !attrS.Type.Equals(cty.String) { - continue - } - - log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) - rawVal, err := input.Input(ctx, &InputOpts{ - Id: key, - Query: key, - Description: attrS.Description, - }) - if err != nil { - log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) - continue - } - - vals[key] = cty.StringVal(rawVal) - } - - c.providerInputConfig[pk] = vals - log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) - } - } - - return diags -} - -// schemaForInputSniffing returns a transformed version of a given schema -// that marks all attributes as optional, which the Context.Input method can -// use to detect whether a required argument is set without missing arguments -// themselves generating errors. -func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go deleted file mode 100644 index fd5b389b06..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go +++ /dev/null @@ -1,1441 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" - - "github.com/mitchellh/copystructure" -) - -// DiffChangeType is an enum with the kind of changes a diff has planned. -type DiffChangeType byte - -const ( - DiffInvalid DiffChangeType = iota - DiffNone - DiffCreate - DiffUpdate - DiffDestroy - DiffDestroyCreate - - // DiffRefresh is only used in the UI for displaying diffs. - // Managed resource reads never appear in plan, and when data source - // reads appear they are represented as DiffCreate in core before - // transforming to DiffRefresh in the UI layer. - DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion -) - -// multiVal matches the index key to a flatmapped set, list or map -var multiVal = regexp.MustCompile(`\.(#|%)$`) - -// Diff tracks the changes that are necessary to apply a configuration -// to an existing infrastructure. -type Diff struct { - // Modules contains all the modules that have a diff - Modules []*ModuleDiff -} - -// Prune cleans out unused structures in the diff without affecting -// the behavior of the diff at all. -// -// This is not safe to call concurrently. This is safe to call on a -// nil Diff. -func (d *Diff) Prune() { - if d == nil { - return - } - - // Prune all empty modules - newModules := make([]*ModuleDiff, 0, len(d.Modules)) - for _, m := range d.Modules { - // If the module isn't empty, we keep it - if !m.Empty() { - newModules = append(newModules, m) - } - } - if len(newModules) == 0 { - newModules = nil - } - d.Modules = newModules -} - -// AddModule adds the module with the given path to the diff. -// -// This should be the preferred method to add module diffs since it -// allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - legacyPath := make([]string, len(path)) - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("diff cannot represent modules with count or for_each keys") - } - - legacyPath[i] = step.Name - } - - m := &ModuleDiff{Path: legacyPath} - m.init() - d.Modules = append(d.Modules, m) - return m -} - -// ModuleByPath is used to lookup the module diff for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { - if d == nil { - return nil - } - for _, mod := range d.Modules { - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// RootModule returns the ModuleState for the root module -func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Empty returns true if the diff has no changes. -func (d *Diff) Empty() bool { - if d == nil { - return true - } - - for _, m := range d.Modules { - if !m.Empty() { - return false - } - } - - return true -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *Diff) Equal(d2 *Diff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Sort the modules - sort.Sort(moduleDiffSort(d.Modules)) - sort.Sort(moduleDiffSort(d2.Modules)) - - // Copy since we have to modify the module destroy flag to false so - // we don't compare that. TODO: delete this when we get rid of the - // destroy flag on modules. - dCopy := d.DeepCopy() - d2Copy := d2.DeepCopy() - for _, m := range dCopy.Modules { - m.Destroy = false - } - for _, m := range d2Copy.Modules { - m.Destroy = false - } - - // Use DeepEqual - return reflect.DeepEqual(dCopy, d2Copy) -} - -// DeepCopy performs a deep copy of all parts of the Diff, making the -// resulting Diff safe to use without modifying this one. -func (d *Diff) DeepCopy() *Diff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*Diff) -} - -func (d *Diff) String() string { - var buf bytes.Buffer - - keys := make([]string, 0, len(d.Modules)) - lookup := make(map[string]*ModuleDiff) - for _, m := range d.Modules { - addr := normalizeModulePath(m.Path) - key := addr.String() - keys = append(keys, key) - lookup[key] = m - } - sort.Strings(keys) - - for _, key := range keys { - m := lookup[key] - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("%s:\n", key)) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return strings.TrimSpace(buf.String()) -} - -// ModuleDiff tracks the differences between resources to apply within -// a single module. -type ModuleDiff struct { - Path []string - Resources map[string]*InstanceDiff - Destroy bool // Set only by the destroy plan -} - -func (d *ModuleDiff) init() { - if d.Resources == nil { - d.Resources = make(map[string]*InstanceDiff) - } - for _, r := range d.Resources { - r.init() - } -} - -// ChangeType returns the type of changes that the diff for this -// module includes. -// -// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or -// DiffCreate. If an instance within the module has a DiffDestroyCreate -// then this will register as a DiffCreate for a module. -func (d *ModuleDiff) ChangeType() DiffChangeType { - result := DiffNone - for _, r := range d.Resources { - change := r.ChangeType() - switch change { - case DiffCreate, DiffDestroy: - if result == DiffNone { - result = change - } - case DiffDestroyCreate, DiffUpdate: - result = DiffUpdate - } - } - - return result -} - -// Empty returns true if the diff has no changes within this module. -func (d *ModuleDiff) Empty() bool { - if d.Destroy { - return false - } - - if len(d.Resources) == 0 { - return true - } - - for _, rd := range d.Resources { - if !rd.Empty() { - return false - } - } - - return true -} - -// Instances returns the instance diffs for the id given. This can return -// multiple instance diffs if there are counts within the resource. -func (d *ModuleDiff) Instances(id string) []*InstanceDiff { - var result []*InstanceDiff - for k, diff := range d.Resources { - if k == id || strings.HasPrefix(k, id+".") { - if !diff.Empty() { - result = append(result, diff) - } - } - } - - return result -} - -// IsRoot says whether or not this module diff is for the root module. -func (d *ModuleDiff) IsRoot() bool { - return reflect.DeepEqual(d.Path, rootModulePath) -} - -// String outputs the diff in a long but command-line friendly output -// format that users can read to quickly inspect a diff. -func (d *ModuleDiff) String() string { - var buf bytes.Buffer - - names := make([]string, 0, len(d.Resources)) - for name := range d.Resources { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - rdiff := d.Resources[name] - - crud := "UPDATE" - switch { - case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): - crud = "DESTROY/CREATE" - case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): - crud = "DESTROY" - case rdiff.RequiresNew(): - crud = "CREATE" - } - - extra := "" - if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { - extra = " (deposed only)" - } - - buf.WriteString(fmt.Sprintf( - "%s: %s%s\n", - crud, - name, - extra)) - - keyLen := 0 - rdiffAttrs := rdiff.CopyAttributes() - keys := make([]string, 0, len(rdiffAttrs)) - for key := range rdiffAttrs { - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - for _, attrK := range keys { - attrDiff, _ := rdiff.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - updateMsg := "" - if attrDiff.RequiresNew { - updateMsg = " (forces new resource)" - } else if attrDiff.Sensitive { - updateMsg = " (attribute changed)" - } - - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } - } - - return buf.String() -} - -// InstanceDiff is the diff of a resource from some state to another. -type InstanceDiff struct { - mu sync.Mutex - Attributes map[string]*ResourceAttrDiff - Destroy bool - DestroyDeposed bool - DestroyTainted bool - - // Meta is a simple K/V map that is stored in a diff and persisted to - // plans but otherwise is completely ignored by Terraform core. It is - // meant to be used for additional data a resource may want to pass through. - // The value here must only contain Go primitives and collections. - Meta map[string]interface{} -} - -func (d *InstanceDiff) Lock() { d.mu.Lock() } -func (d *InstanceDiff) Unlock() { d.mu.Unlock() } - -// ApplyToValue merges the receiver into the given base value, returning a -// new value that incorporates the planned changes. The given value must -// conform to the given schema, or this method will panic. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { - // Create an InstanceState attributes from our existing state. - // We can use this to more easily apply the diff changes. - attrs := hcl2shim.FlatmapValueFromHCL2(base) - applied, err := d.Apply(attrs, schema) - if err != nil { - return base, err - } - - val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) - if err != nil { - return base, err - } - - return schema.CoerceValue(val) -} - -// Apply applies the diff to the provided flatmapped attributes, -// returning the new instance attributes. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - // We always build a new value here, even if the given diff is "empty", - // because we might be planning to create a new instance that happens - // to have no attributes set, and so we want to produce an empty object - // rather than just echoing back the null old value. - if attrs == nil { - attrs = map[string]string{} - } - - // Rather applying the diff to mutate the attrs, we'll copy new values into - // here to avoid the possibility of leaving stale values. - result := map[string]string{} - - if d.Destroy || d.DestroyDeposed || d.DestroyTainted { - return result, nil - } - - return d.applyBlockDiff(nil, attrs, schema) -} - -func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - result := map[string]string{} - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - // localPrefix is used to build the local result map - localPrefix := "" - if name != "" { - localPrefix = name + "." - } - - // iterate over the schema rather than the attributes, so we can handle - // different block types separately from plain attributes - for n, attrSchema := range schema.Attributes { - var err error - newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) - - if err != nil { - return result, err - } - - for k, v := range newAttrs { - result[localPrefix+k] = v - } - } - - blockPrefix := strings.Join(path, ".") - if blockPrefix != "" { - blockPrefix += "." - } - for n, block := range schema.BlockTypes { - // we need to find the set of all keys that traverse this block - candidateKeys := map[string]bool{} - blockKey := blockPrefix + n + "." - localBlockPrefix := localPrefix + n + "." - - // we can only trust the diff for sets, since the path changes, so don't - // count existing values as candidate keys. If it turns out we're - // keeping the attributes, we will catch it down below with "keepBlock" - // after we check the set count. - if block.Nesting != configschema.NestingSet { - for k := range attrs { - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - } - - for k, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - - if diff.NewRemoved { - continue - } - - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - - // check each set candidate to see if it was removed. - // we need to do this, because when entire sets are removed, they may - // have the wrong key, and ony show diffs going to "" - if block.Nesting == configschema.NestingSet { - for k := range candidateKeys { - indexPrefix := strings.Join(append(path, n, k), ".") + "." - keep := false - // now check each set element to see if it's a new diff, or one - // that we're dropping. Since we're only applying the "New" - // portion of the set, we can ignore diffs that only contain "Old" - for attr, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if !strings.HasPrefix(attr, indexPrefix) { - continue - } - - // check for empty "count" keys - if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { - continue - } - - // removed items don't count either - if diff.NewRemoved { - continue - } - - // this must be a diff to keep - keep = true - break - } - if !keep { - delete(candidateKeys, k) - } - } - } - - for k := range candidateKeys { - newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) - if err != nil { - return result, err - } - - for attr, v := range newAttrs { - result[localBlockPrefix+attr] = v - } - } - - keepBlock := true - // check this block's count diff directly first, since we may not - // have candidates because it was removed and only set to "0" - if diff, ok := d.Attributes[blockKey+"#"]; ok { - if diff.New == "0" || diff.NewRemoved { - keepBlock = false - } - } - - // if there was no diff at all, then we need to keep the block attributes - if len(candidateKeys) == 0 && keepBlock { - for k, v := range attrs { - if strings.HasPrefix(k, blockKey) { - // we need the key relative to this block, so remove the - // entire prefix, then re-insert the block name. - localKey := localBlockPrefix + k[len(blockKey):] - result[localKey] = v - } - } - } - - countAddr := strings.Join(append(path, n, "#"), ".") - if countDiff, ok := d.Attributes[countAddr]; ok { - if countDiff.NewComputed { - result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue - } else { - result[localBlockPrefix+"#"] = countDiff.New - - // While sets are complete, list are not, and we may not have all the - // information to track removals. If the list was truncated, we need to - // remove the extra items from the result. - if block.Nesting == configschema.NestingList && - countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { - length, _ := strconv.Atoi(countDiff.New) - for k := range result { - if !strings.HasPrefix(k, localBlockPrefix) { - continue - } - - index := k[len(localBlockPrefix):] - nextDot := strings.Index(index, ".") - if nextDot < 1 { - continue - } - index = index[:nextDot] - i, err := strconv.Atoi(index) - if err != nil { - // this shouldn't happen since we added these - // ourself, but make note of it just in case. - log.Printf("[ERROR] bad list index in %q: %s", k, err) - continue - } - if i >= length { - delete(result, k) - } - } - } - } - } else if origCount, ok := attrs[countAddr]; ok && keepBlock { - result[localBlockPrefix+"#"] = origCount - } else { - result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) - } - } - - return result, nil -} - -func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - ty := attrSchema.Type - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): - return d.applyCollectionDiff(path, attrs, attrSchema) - case ty.IsSetType(): - return d.applySetDiff(path, attrs, attrSchema) - default: - return d.applySingleAttrDiff(path, attrs, attrSchema) - } -} - -func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - currentKey := strings.Join(path, ".") - - attr := path[len(path)-1] - - result := map[string]string{} - diff := d.Attributes[currentKey] - old, exists := attrs[currentKey] - - if diff != nil && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - // "id" must exist and not be an empty string, or it must be unknown. - // This only applied to top-level "id" fields. - if attr == "id" && len(path) == 1 { - if old == "" { - result[attr] = hcl2shim.UnknownVariableValue - } else { - result[attr] = old - } - return result, nil - } - - // attribute diffs are sometimes missed, so assume no diff means keep the - // old value - if diff == nil { - if exists { - result[attr] = old - } else { - // We need required values, so set those with an empty value. It - // must be set in the config, since if it were missing it would have - // failed validation. - if attrSchema.Required { - // we only set a missing string here, since bool or number types - // would have distinct zero value which shouldn't have been - // lost. - if attrSchema.Type == cty.String { - result[attr] = "" - } - } - } - return result, nil - } - - // check for missmatched diff values - if exists && - old != diff.Old && - old != hcl2shim.UnknownVariableValue && - diff.Old != hcl2shim.UnknownVariableValue { - return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) - } - - if diff.NewRemoved { - // don't set anything in the new value - return map[string]string{}, nil - } - - if diff.Old == diff.New && diff.New == "" { - // this can only be a valid empty string - if attrSchema.Type == cty.String { - result[attr] = "" - } - return result, nil - } - - if attrSchema.Computed && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - result[attr] = diff.New - - return result, nil -} - -func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - result := map[string]string{} - - prefix := "" - if len(path) > 1 { - prefix = strings.Join(path[:len(path)-1], ".") + "." - } - - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - currentKey := prefix + name - - // check the index first for special handling - for k, diff := range d.Attributes { - // check the index value, which can be set, and 0 - if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { - if diff.NewRemoved { - return result, nil - } - - if diff.NewComputed { - result[k[len(prefix):]] = hcl2shim.UnknownVariableValue - return result, nil - } - - // do what the diff tells us to here, so that it's consistent with applies - if diff.New == "0" { - result[k[len(prefix):]] = "0" - return result, nil - } - } - } - - // collect all the keys from the diff and the old state - noDiff := true - keys := map[string]bool{} - for k := range d.Attributes { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noDiff = false - keys[k] = true - } - - noAttrs := true - for k := range attrs { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noAttrs = false - keys[k] = true - } - - // If there's no diff and no attrs, then there's no value at all. - // This prevents an unexpected zero-count attribute in the attributes. - if noDiff && noAttrs { - return result, nil - } - - idx := "#" - if attrSchema.Type.IsMapType() { - idx = "%" - } - - for k := range keys { - // generate an schema placeholder for the values - elSchema := &configschema.Attribute{ - Type: attrSchema.Type.ElementType(), - } - - res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) - if err != nil { - return result, err - } - - for k, v := range res { - result[name+"."+k] = v - } - } - - // Just like in nested list blocks, for simple lists we may need to fill in - // missing empty strings. - countKey := name + "." + idx - count := result[countKey] - length, _ := strconv.Atoi(count) - - if count != "" && count != hcl2shim.UnknownVariableValue && - attrSchema.Type.Equals(cty.List(cty.String)) { - // insert empty strings into missing indexes - for i := 0; i < length; i++ { - key := fmt.Sprintf("%s.%d", name, i) - if _, ok := result[key]; !ok { - result[key] = "" - } - } - } - - // now check for truncation in any type of list - if attrSchema.Type.IsListType() { - for key := range result { - if key == countKey { - continue - } - - if len(key) <= len(name)+1 { - // not sure what this is, but don't panic - continue - } - - index := key[len(name)+1:] - - // It is possible to have nested sets or maps, so look for another dot - dot := strings.Index(index, ".") - if dot > 0 { - index = index[:dot] - } - - // This shouldn't have any more dots, since the element type is only string. - num, err := strconv.Atoi(index) - if err != nil { - log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) - continue - } - - if num >= length { - delete(result, key) - } - } - } - - // Fill in the count value if it wasn't present in the diff for some reason, - // or if there is no count at all. - _, countDiff := d.Attributes[countKey] - if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { - result[countKey] = countFlatmapContainerValues(countKey, result) - } - - return result, nil -} - -func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - // We only need this special behavior for sets of object. - if !attrSchema.Type.ElementType().IsObjectType() { - // The normal collection apply behavior will work okay for this one, then. - return d.applyCollectionDiff(path, attrs, attrSchema) - } - - // When we're dealing with a set of an object type we actually want to - // use our normal _block type_ apply behaviors, so we'll construct ourselves - // a synthetic schema that treats the object type as a block type and - // then delegate to our block apply method. - synthSchema := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - } - - for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { - // We can safely make everything into an attribute here because in the - // event that there are nested set attributes we'll end up back in - // here again recursively and can then deal with the next level of - // expansion. - synthSchema.Attributes[name] = &configschema.Attribute{ - Type: ty, - Optional: true, - } - } - - parentPath := path[:len(path)-1] - childName := path[len(path)-1] - containerSchema := &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - childName: { - Nesting: configschema.NestingSet, - Block: *synthSchema, - }, - }, - } - - return d.applyBlockDiff(parentPath, attrs, containerSchema) -} - -// countFlatmapContainerValues returns the number of values in the flatmapped container -// (set, map, list) indexed by key. The key argument is expected to include the -// trailing ".#", or ".%". -func countFlatmapContainerValues(key string, attrs map[string]string) string { - if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - panic(fmt.Sprintf("invalid index value %q", key)) - } - - prefix := key[:len(key)-1] - items := map[string]int{} - - for k := range attrs { - if k == key { - continue - } - if !strings.HasPrefix(k, prefix) { - continue - } - - suffix := k[len(prefix):] - dot := strings.Index(suffix, ".") - if dot > 0 { - suffix = suffix[:dot] - } - - items[suffix]++ - } - return strconv.Itoa(len(items)) -} - -// ResourceAttrDiff is the diff of a single attribute of a resource. -type ResourceAttrDiff struct { - Old string // Old Value - New string // New Value - NewComputed bool // True if new value is computed (unknown currently) - NewRemoved bool // True if this attribute is being removed - NewExtra interface{} // Extra information for the provider - RequiresNew bool // True if change requires new resource - Sensitive bool // True if the data should not be displayed in UI output - Type DiffAttrType -} - -// Empty returns true if the diff for this attr is neutral -func (d *ResourceAttrDiff) Empty() bool { - return d.Old == d.New && !d.NewComputed && !d.NewRemoved -} - -func (d *ResourceAttrDiff) GoString() string { - return fmt.Sprintf("*%#v", *d) -} - -// DiffAttrType is an enum type that says whether a resource attribute -// diff is an input attribute (comes from the configuration) or an -// output attribute (comes as a result of applying the configuration). An -// example input would be "ami" for AWS and an example output would be -// "private_ip". -type DiffAttrType byte - -const ( - DiffAttrUnknown DiffAttrType = iota - DiffAttrInput - DiffAttrOutput -) - -func (d *InstanceDiff) init() { - if d.Attributes == nil { - d.Attributes = make(map[string]*ResourceAttrDiff) - } -} - -func NewInstanceDiff() *InstanceDiff { - return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} -} - -func (d *InstanceDiff) Copy() (*InstanceDiff, error) { - if d == nil { - return nil, nil - } - - dCopy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - return nil, err - } - - return dCopy.(*InstanceDiff), nil -} - -// ChangeType returns the DiffChangeType represented by the diff -// for this single instance. -func (d *InstanceDiff) ChangeType() DiffChangeType { - if d.Empty() { - return DiffNone - } - - if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { - return DiffDestroyCreate - } - - if d.GetDestroy() || d.GetDestroyDeposed() { - return DiffDestroy - } - - if d.RequiresNew() { - return DiffCreate - } - - return DiffUpdate -} - -// Empty returns true if this diff encapsulates no changes. -func (d *InstanceDiff) Empty() bool { - if d == nil { - return true - } - - d.mu.Lock() - defer d.mu.Unlock() - return !d.Destroy && - !d.DestroyTainted && - !d.DestroyDeposed && - len(d.Attributes) == 0 -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Use DeepEqual - return reflect.DeepEqual(d, d2) -} - -// DeepCopy performs a deep copy of all parts of the InstanceDiff -func (d *InstanceDiff) DeepCopy() *InstanceDiff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*InstanceDiff) -} - -func (d *InstanceDiff) GoString() string { - return fmt.Sprintf("*%#v", InstanceDiff{ - Attributes: d.Attributes, - Destroy: d.Destroy, - DestroyTainted: d.DestroyTainted, - DestroyDeposed: d.DestroyDeposed, - }) -} - -// RequiresNew returns true if the diff requires the creation of a new -// resource (implying the destruction of the old). -func (d *InstanceDiff) RequiresNew() bool { - if d == nil { - return false - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.requiresNew() -} - -func (d *InstanceDiff) requiresNew() bool { - if d == nil { - return false - } - - if d.DestroyTainted { - return true - } - - for _, rd := range d.Attributes { - if rd != nil && rd.RequiresNew { - return true - } - } - - return false -} - -func (d *InstanceDiff) GetDestroyDeposed() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyDeposed -} - -func (d *InstanceDiff) SetDestroyDeposed(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyDeposed = b -} - -// These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within the terraform package. -// TODO refactor the locking scheme -func (d *InstanceDiff) SetTainted(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyTainted = b -} - -func (d *InstanceDiff) GetDestroyTainted() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyTainted -} - -func (d *InstanceDiff) SetDestroy(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Destroy = b -} - -func (d *InstanceDiff) GetDestroy() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.Destroy -} - -func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Attributes[key] = attr -} - -func (d *InstanceDiff) DelAttribute(key string) { - d.mu.Lock() - defer d.mu.Unlock() - - delete(d.Attributes, key) -} - -func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { - d.mu.Lock() - defer d.mu.Unlock() - - attr, ok := d.Attributes[key] - return attr, ok -} -func (d *InstanceDiff) GetAttributesLen() int { - d.mu.Lock() - defer d.mu.Unlock() - - return len(d.Attributes) -} - -// Safely copies the Attributes map -func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { - d.mu.Lock() - defer d.mu.Unlock() - - attrs := make(map[string]*ResourceAttrDiff) - for k, v := range d.Attributes { - attrs[k] = v - } - - return attrs -} - -// Same checks whether or not two InstanceDiff's are the "same". When -// we say "same", it is not necessarily exactly equal. Instead, it is -// just checking that the same attributes are changing, a destroy -// isn't suddenly happening, etc. -func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { - // we can safely compare the pointers without a lock - switch { - case d == nil && d2 == nil: - return true, "" - case d == nil || d2 == nil: - return false, "one nil" - case d == d2: - return true, "" - } - - d.mu.Lock() - defer d.mu.Unlock() - - // If we're going from requiring new to NOT requiring new, then we have - // to see if all required news were computed. If so, it is allowed since - // computed may also mean "same value and therefore not new". - oldNew := d.requiresNew() - newNew := d2.RequiresNew() - if oldNew && !newNew { - oldNew = false - - // This section builds a list of ignorable attributes for requiresNew - // by removing off any elements of collections going to zero elements. - // For collections going to zero, they may not exist at all in the - // new diff (and hence RequiresNew == false). - ignoreAttrs := make(map[string]struct{}) - for k, diffOld := range d.Attributes { - if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { - continue - } - - // This case is in here as a protection measure. The bug that this - // code originally fixed (GH-11349) didn't have to deal with computed - // so I'm not 100% sure what the correct behavior is. Best to leave - // the old behavior. - if diffOld.NewComputed { - continue - } - - // We're looking for the case a map goes to exactly 0. - if diffOld.New != "0" { - continue - } - - // Found it! Ignore all of these. The prefix here is stripping - // off the "%" so it is just "k." - prefix := k[:len(k)-1] - for k2 := range d.Attributes { - if strings.HasPrefix(k2, prefix) { - ignoreAttrs[k2] = struct{}{} - } - } - } - - for k, rd := range d.Attributes { - if _, ok := ignoreAttrs[k]; ok { - continue - } - - // If the field is requires new and NOT computed, then what - // we have is a diff mismatch for sure. We set that the old - // diff does REQUIRE a ForceNew. - if rd != nil && rd.RequiresNew && !rd.NewComputed { - oldNew = true - break - } - } - } - - if oldNew != newNew { - return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", oldNew, newNew) - } - - // Verify that destroy matches. The second boolean here allows us to - // have mismatching Destroy if we're moving from RequiresNew true - // to false above. Therefore, the second boolean will only pass if - // we're moving from Destroy: true to false as well. - if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { - return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) - } - - // Go through the old diff and make sure the new diff has all the - // same attributes. To start, build up the check map to be all the keys. - checkOld := make(map[string]struct{}) - checkNew := make(map[string]struct{}) - for k := range d.Attributes { - checkOld[k] = struct{}{} - } - for k := range d2.CopyAttributes() { - checkNew[k] = struct{}{} - } - - // Make an ordered list so we are sure the approximated hashes are left - // to process at the end of the loop - keys := make([]string, 0, len(d.Attributes)) - for k := range d.Attributes { - keys = append(keys, k) - } - sort.StringSlice(keys).Sort() - - for _, k := range keys { - diffOld := d.Attributes[k] - - if _, ok := checkOld[k]; !ok { - // We're not checking this key for whatever reason (see where - // check is modified). - continue - } - - // Remove this key since we'll never hit it again - delete(checkOld, k) - delete(checkNew, k) - - _, ok := d2.GetAttribute(k) - if !ok { - // If there's no new attribute, and the old diff expected the attribute - // to be removed, that's just fine. - if diffOld.NewRemoved { - continue - } - - // If the last diff was a computed value then the absense of - // that value is allowed since it may mean the value ended up - // being the same. - if diffOld.NewComputed { - ok = true - } - - // No exact match, but maybe this is a set containing computed - // values. So check if there is an approximate hash in the key - // and if so, try to match the key. - if strings.Contains(k, "~") { - parts := strings.Split(k, ".") - parts2 := append([]string(nil), parts...) - - re := regexp.MustCompile(`^~\d+$`) - for i, part := range parts { - if re.MatchString(part) { - // we're going to consider this the base of a - // computed hash, and remove all longer matching fields - ok = true - - parts2[i] = `\d+` - parts2 = parts2[:i+1] - break - } - } - - re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) - if err != nil { - return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) - } - - for k2 := range checkNew { - if re.MatchString(k2) { - delete(checkNew, k2) - } - } - } - - // This is a little tricky, but when a diff contains a computed - // list, set, or map that can only be interpolated after the apply - // command has created the dependent resources, it could turn out - // that the result is actually the same as the existing state which - // would remove the key from the diff. - if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - // Similarly, in a RequiresNew scenario, a list that shows up in the plan - // diff can disappear from the apply diff, which is calculated from an - // empty state. - if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - if !ok { - return false, fmt.Sprintf("attribute mismatch: %s", k) - } - } - - // search for the suffix of the base of a [computed] map, list or set. - match := multiVal.FindStringSubmatch(k) - - if diffOld.NewComputed && len(match) == 2 { - matchLen := len(match[1]) - - // This is a computed list, set, or map, so remove any keys with - // this prefix from the check list. - kprefix := k[:len(k)-matchLen] - for k2 := range checkOld { - if strings.HasPrefix(k2, kprefix) { - delete(checkOld, k2) - } - } - for k2 := range checkNew { - if strings.HasPrefix(k2, kprefix) { - delete(checkNew, k2) - } - } - } - - // We don't compare the values because we can't currently actually - // guarantee to generate the same value two two diffs created from - // the same state+config: we have some pesky interpolation functions - // that do not behave as pure functions (uuid, timestamp) and so they - // can be different each time a diff is produced. - // FIXME: Re-organize our config handling so that we don't re-evaluate - // expressions when we produce a second comparison diff during - // apply (for EvalCompareDiff). - } - - // Check for leftover attributes - if len(checkNew) > 0 { - extras := make([]string, 0, len(checkNew)) - for attr := range checkNew { - extras = append(extras, attr) - } - return false, - fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) - } - - return true, "" -} - -// moduleDiffSort implements sort.Interface to sort module diffs by path. -type moduleDiffSort []*ModuleDiff - -func (s moduleDiffSort) Len() int { return len(s) } -func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s moduleDiffSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go deleted file mode 100644 index 17464bc063..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// DestroyEdge is an edge that represents a standard "destroy" relationship: -// Target depends on Source because Source is destroying. -type DestroyEdge struct { - S, T dag.Vertex -} - -func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } -func (e *DestroyEdge) Source() dag.Vertex { return e.S } -func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go deleted file mode 100644 index c490c3bcff..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalNode is the interface that must be implemented by graph nodes to -// evaluate/execute. -type EvalNode interface { - // Eval evaluates this node with the given context. The second parameter - // are the argument values. These will match in order and 1-1 with the - // results of the Args() return value. - Eval(EvalContext) (interface{}, error) -} - -// GraphNodeEvalable is the interface that graph nodes must implement -// to enable valuation. -type GraphNodeEvalable interface { - EvalTree() EvalNode -} - -// EvalEarlyExitError is a special error return value that can be returned -// by eval nodes that does an early exit. -type EvalEarlyExitError struct{} - -func (EvalEarlyExitError) Error() string { return "early exit" } - -// Eval evaluates the given EvalNode with the given context, properly -// evaluating all args in the correct order. -func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { - // Call the lower level eval which doesn't understand early exit, - // and if we early exit, it isn't an error. - result, err := EvalRaw(n, ctx) - if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - return nil, nil - } - } - - return result, err -} - -// EvalRaw is like Eval except that it returns all errors, even if they -// signal something normal such as EvalEarlyExitError. -func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { - path := "unknown" - if ctx != nil { - path = ctx.Path().String() - } - if path == "" { - path = "" - } - - log.Printf("[TRACE] %s: eval: %T", path, n) - output, err := n.Eval(ctx) - if err != nil { - switch err.(type) { - case EvalEarlyExitError: - log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) - case tfdiags.NonFatalError: - log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) - default: - log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) - } - } - - return output, err -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go deleted file mode 100644 index 6beeaea984..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go +++ /dev/null @@ -1,656 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalApply is an EvalNode implementation that writes the diff to -// the full diff. -type EvalApply struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Dependencies []addrs.Referenceable - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - CreateNew *bool - Error *error -} - -// TODO: test -func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - change := *n.Change - provider := *n.Provider - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - if state == nil { - state = &states.ResourceInstanceObject{} - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - if n.CreateNew != nil { - *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) - } - - configVal := cty.NullVal(cty.DynamicPseudoType) - if n.Config != nil { - var configDiags tfdiags.Diagnostics - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - - if !configVal.IsWhollyKnown() { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - - log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) - resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: change.Before, - Config: configVal, - PlannedState: change.After, - PlannedPrivate: change.Private, - }) - applyDiags := resp.Diagnostics - if n.Config != nil { - applyDiags = applyDiags.InConfigBody(n.Config.Config) - } - diags = diags.Append(applyDiags) - - // Even if there are errors in the returned diagnostics, the provider may - // have returned a _partial_ state for an object that already exists but - // failed to fully configure, and so the remaining code must always run - // to completion but must be defensive against the new value being - // incomplete. - newVal := resp.NewState - - if newVal == cty.NilVal { - // Providers are supposed to return a partial new value even when errors - // occur, but sometimes they don't and so in that case we'll patch that up - // by just using the prior state, so we'll at least keep track of the - // object for the user to retry. - newVal = change.Before - - // As a special case, we'll set the new value to null if it looks like - // we were trying to execute a delete, because the provider in this case - // probably left the newVal unset intending it to be interpreted as "null". - if change.After.IsNull() { - newVal = cty.NullVal(schema.ImpliedType()) - } - - // Ideally we'd produce an error or warning here if newVal is nil and - // there are no errors in diags, because that indicates a buggy - // provider not properly reporting its result, but unfortunately many - // of our historical test mocks behave in this way and so producing - // a diagnostic here fails hundreds of tests. Instead, we must just - // silently retain the old value for now. Returning a nil value with - // no errors is still always considered a bug in the provider though, - // and should be fixed for any "real" providers that do it. - } - - var conformDiags tfdiags.Diagnostics - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - conformDiags = conformDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - diags = diags.Append(conformDiags) - if conformDiags.HasErrors() { - // Bail early in this particular case, because an object that doesn't - // conform to the schema can't be saved in the state anyway -- the - // serializer will reject it. - return nil, diags.Err() - } - - // After this point we have a type-conforming result object and so we - // must always run to completion to ensure it can be saved. If n.Error - // is set then we must not return a non-nil error, in order to allow - // evaluation to continue to a later point where our state object will - // be saved. - - // By this point there must not be any unknown values remaining in our - // object, because we've applied the change and we can't save unknowns - // in our persistent state. If any are present then we will indicate an - // error (which is always a bug in the provider) but we will also replace - // them with nulls so that we can successfully save the portions of the - // returned value that are known. - if !newVal.IsWhollyKnown() { - // To generate better error messages, we'll go for a walk through the - // value and make a separate diagnostic for each unknown value we - // find. - cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { - if !val.IsKnown() { - pathStr := tfdiags.FormatCtyPath(path) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", - n.Addr.Absolute(ctx.Path()), pathStr, - ), - )) - } - return true, nil - }) - - // NOTE: This operation can potentially be lossy if there are multiple - // elements in a set that differ only by unknown values: after - // replacing with null these will be merged together into a single set - // element. Since we can only get here in the presence of a provider - // bug, we accept this because storing a result here is always a - // best-effort sort of thing. - newVal = cty.UnknownAsNull(newVal) - } - - if change.Action != plans.Delete && !diags.HasErrors() { - // Only values that were marked as unknown in the planned value are allowed - // to change during the apply operation. (We do this after the unknown-ness - // check above so that we also catch anything that became unknown after - // being known during plan.) - // - // If we are returning other errors anyway then we'll give this - // a pass since the other errors are usually the explanation for - // this one and so it's more helpful to let the user focus on the - // root cause rather than distract with this extra problem. - if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - - // The sort of inconsistency we won't catch here is if a known value - // in the plan is changed during apply. That can cause downstream - // problems because a dependent resource would make its own plan based - // on the planned value, and thus get a different result during the - // apply phase. This will usually lead to a "Provider produced invalid plan" - // error that incorrectly blames the downstream resource for the change. - - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent result after apply", - fmt.Sprintf( - "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), - ), - )) - } - } - } - } - - // If a provider returns a null or non-null object at the wrong time then - // we still want to save that but it often causes some confusing behaviors - // where it seems like Terraform is failing to take any action at all, - // so we'll generate some errors to draw attention to it. - if !diags.HasErrors() { - if change.Action == plans.Delete && !newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - if change.Action != plans.Delete && newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - } - - // Sometimes providers return a null value when an operation fails for some - // reason, but we'd rather keep the prior state so that the error can be - // corrected on a subsequent run. We must only do this for null new value - // though, or else we may discard partial updates the provider was able to - // complete. - if diags.HasErrors() && newVal.IsNull() { - // Otherwise, we'll continue but using the prior state as the new value, - // making this effectively a no-op. If the item really _has_ been - // deleted then our next refresh will detect that and fix it up. - // If change.Action is Create then change.Before will also be null, - // which is fine. - newVal = change.Before - } - - var newState *states.ResourceInstanceObject - if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case - newState = &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: newVal, - Private: resp.Private, - Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node - } - } - - // Write the final state - if n.Output != nil { - *n.Output = newState - } - - if diags.HasErrors() { - // If the caller provided an error pointer then they are expected to - // handle the error some other way and we treat our own result as - // success. - if n.Error != nil { - err := diags.Err() - *n.Error = err - log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) - return nil, nil - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalApplyPre is an EvalNode implementation that does the pre-Apply work -type EvalApplyPre struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - change := *n.Change - absAddr := n.Addr.Absolute(ctx.Path()) - - if change == nil { - panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) - } - - if resourceHasUserVisibleApply(n.Addr) { - priorState := change.Before - plannedNewState := change.After - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPost is an EvalNode implementation that does the post-Apply work -type EvalApplyPost struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Error *error -} - -// TODO: test -func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - if resourceHasUserVisibleApply(n.Addr) { - absAddr := n.Addr.Absolute(ctx.Path()) - var newState cty.Value - if state != nil { - newState = state.Value - } else { - newState = cty.NullVal(cty.DynamicPseudoType) - } - var err error - if n.Error != nil { - err = *n.Error - } - - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, n.Gen, newState, err) - }) - if hookErr != nil { - return nil, hookErr - } - } - - return nil, *n.Error -} - -// EvalMaybeTainted is an EvalNode that takes the planned change, new value, -// and possible error from an apply operation and produces a new instance -// object marked as tainted if it appears that a create operation has failed. -// -// This EvalNode never returns an error, to ensure that a subsequent EvalNode -// can still record the possibly-tainted object in the state. -type EvalMaybeTainted struct { - Addr addrs.ResourceInstance - Gen states.Generation - Change **plans.ResourceInstanceChange - State **states.ResourceInstanceObject - Error *error - - // If StateOutput is not nil, its referent will be assigned either the same - // pointer as State or a new object with its status set as Tainted, - // depending on whether an error is given and if this was a create action. - StateOutput **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - change := *n.Change - err := *n.Error - - if state != nil && state.Status == states.ObjectTainted { - log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) - return nil, nil - } - - if n.StateOutput != nil { - if err != nil && change.Action == plans.Create { - // If there are errors during a _create_ then the object is - // in an undefined state, and so we'll mark it as tainted so - // we can try again on the next run. - // - // We don't do this for other change actions because errors - // during updates will often not change the remote object at all. - // If there _were_ changes prior to the error, it's the provider's - // responsibility to record the effect of those changes in the - // object value it returned. - log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) - *n.StateOutput = state.AsTainted() - } else { - *n.StateOutput = state - } - } - - return nil, nil -} - -// resourceHasUserVisibleApply returns true if the given resource is one where -// apply actions should be exposed to the user. -// -// Certain resources do apply actions only as an implementation detail, so -// these should not be advertised to code outside of this package. -func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { - // Only managed resources have user-visible apply actions. - // In particular, this excludes data resources since we "apply" these - // only as an implementation detail of removing them from state when - // they are destroyed. (When reading, they don't get here at all because - // we present them as "Refresh" actions.) - return addr.ContainingResource().Mode == addrs.ManagedResourceMode -} - -// EvalApplyProvisioners is an EvalNode implementation that executes -// the provisioners for a resource. -// -// TODO(mitchellh): This should probably be split up into a more fine-grained -// ApplyProvisioner (single) that is looped over. -type EvalApplyProvisioners struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject - ResourceConfig *configs.Resource - CreateNew *bool - Error *error - - // When is the type of provisioner to run at this point - When configs.ProvisionerWhen -} - -// TODO: test -func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - if state == nil { - log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) - return nil, nil - } - if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { - // If we're not creating a new resource, then don't run provisioners - log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) - return nil, nil - } - if state.Status == states.ObjectTainted { - // No point in provisioning an object that is already tainted, since - // it's going to get recreated on the next apply anyway. - log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) - return nil, nil - } - - provs := n.filterProvisioners() - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil, nil - } - - if n.Error != nil && *n.Error != nil { - // We're already tainted, so just return out - return nil, nil - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - err := n.apply(ctx, provs) - if err != nil { - *n.Error = multierror.Append(*n.Error, err) - if n.Error == nil { - return nil, err - } else { - log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) - return nil, nil - } - } - - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { - // Fast path the zero case - if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { - return nil - } - - if len(n.ResourceConfig.Managed.Provisioners) == 0 { - return nil - } - - result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) - for _, p := range n.ResourceConfig.Managed.Provisioners { - if p.When == n.When { - result = append(result, p) - } - } - - return result -} - -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { - var diags tfdiags.Diagnostics - instanceAddr := n.Addr - absAddr := instanceAddr.Absolute(ctx.Path()) - - // If there's a connection block defined directly inside the resource block - // then it'll serve as a base connection configuration for all of the - // provisioners. - var baseConn hcl.Body - if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { - baseConn = n.ResourceConfig.Managed.Connection.Config - } - - for _, prov := range provs { - log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) - - // Get the provisioner - provisioner := ctx.Provisioner(prov.Type) - schema := ctx.ProvisionerSchema(prov.Type) - - forEach, forEachDiags := evaluateResourceForEachExpression(n.ResourceConfig.ForEach, ctx) - diags = diags.Append(forEachDiags) - keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) - - // Evaluate the main provisioner configuration. - config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) - diags = diags.Append(configDiags) - - // If the provisioner block contains a connection block of its own then - // it can override the base connection configuration, if any. - var localConn hcl.Body - if prov.Connection != nil { - localConn = prov.Connection.Config - } - - var connBody hcl.Body - switch { - case baseConn != nil && localConn != nil: - // Our standard merging logic applies here, similar to what we do - // with _override.tf configuration files: arguments from the - // base connection block will be masked by any arguments of the - // same name in the local connection block. - connBody = configs.MergeBodies(baseConn, localConn) - case baseConn != nil: - connBody = baseConn - case localConn != nil: - connBody = localConn - } - - // start with an empty connInfo - connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) - - if connBody != nil { - var connInfoDiags tfdiags.Diagnostics - connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) - diags = diags.Append(connInfoDiags) - if diags.HasErrors() { - // "on failure continue" setting only applies to failures of the - // provisioner itself, not to invalid configuration. - return diags.Err() - } - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstanceStep(absAddr, prov.Type) - }) - if err != nil { - return err - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(absAddr, prov.Type, msg) - return HookActionContinue, nil - }) - } - - output := CallbackUIOutput{OutputFn: outputFn} - resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: config, - Connection: connInfo, - UIOutput: &output, - }) - applyDiags := resp.Diagnostics.InConfigBody(prov.Config) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) - }) - - switch prov.OnFailure { - case configs.ProvisionerOnFailureContinue: - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) - } else { - // Maybe there are warnings that we still want to see - diags = diags.Append(applyDiags) - } - default: - diags = diags.Append(applyDiags) - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) - return diags.Err() - } - } - - // Deal with the hook - if hookErr != nil { - return hookErr - } - } - - return diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go deleted file mode 100644 index d13a965291..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go +++ /dev/null @@ -1,47 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalPreventDestroy is an EvalNode implementation that returns an -// error if a resource has PreventDestroy configured and the diff -// would destroy the resource. -type EvalCheckPreventDestroy struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Change **plans.ResourceInstanceChange -} - -func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { - return nil, nil - } - - change := *n.Change - preventDestroy := n.Config.Managed.PreventDestroy - - if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Instance cannot be destroyed", - Detail: fmt.Sprintf( - "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", - n.Addr.Absolute(ctx.Path()).String(), - ), - Subject: &n.Config.DeclRange, - }) - return nil, diags.Err() - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go deleted file mode 100644 index 4fa011e2b5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go +++ /dev/null @@ -1,133 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// EvalContext is the interface that is given to eval nodes to execute. -type EvalContext interface { - // Stopped returns a channel that is closed when evaluation is stopped - // via Terraform.Context.Stop() - Stopped() <-chan struct{} - - // Path is the current module path. - Path() addrs.ModuleInstance - - // Hook is used to call hook methods. The callback is called for each - // hook and should return the hook action to take and the error. - Hook(func(Hook) (HookAction, error)) error - - // Input is the UIInput object for interacting with the UI. - Input() UIInput - - // InitProvider initializes the provider with the given type and address, and - // returns the implementation of the resource provider or an error. - // - // It is an error to initialize the same provider more than once. - InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) - - // Provider gets the provider instance with the given address (already - // initialized) or returns nil if the provider isn't initialized. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - // InitProvider must've been called on the EvalContext of the module - // that owns the given provider before calling this method. - Provider(addrs.AbsProviderConfig) providers.Interface - - // ProviderSchema retrieves the schema for a particular provider, which - // must have already been initialized with InitProvider. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema - - // CloseProvider closes provider connections that aren't needed anymore. - CloseProvider(addrs.ProviderConfig) error - - // ConfigureProvider configures the provider with the given - // configuration. This is a separate context call because this call - // is used to store the provider configuration for inheritance lookups - // with ParentProviderConfig(). - ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics - - // ProviderInput and SetProviderInput are used to configure providers - // from user input. - ProviderInput(addrs.ProviderConfig) map[string]cty.Value - SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) - - // InitProvisioner initializes the provisioner with the given name and - // returns the implementation of the resource provisioner or an error. - // - // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (provisioners.Interface, error) - - // Provisioner gets the provisioner instance with the given name (already - // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) provisioners.Interface - - // ProvisionerSchema retrieves the main configuration schema for a - // particular provisioner, which must have already been initialized with - // InitProvisioner. - ProvisionerSchema(string) *configschema.Block - - // CloseProvisioner closes provisioner connections that aren't needed - // anymore. - CloseProvisioner(string) error - - // EvaluateBlock takes the given raw configuration block and associated - // schema and evaluates it to produce a value of an object type that - // conforms to the implied type of the schema. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - // - // The "key" argument is also optional. If given, it is the instance key - // of the current object within the multi-instance container it belongs - // to. For example, on a resource block with "count" set this should be - // set to a different addrs.IntKey for each instance created from that - // block. Set this to addrs.NoKey if not appropriate. - // - // The returned body is an expanded version of the given body, with any - // "dynamic" blocks replaced with zero or more static blocks. This can be - // used to extract correct source location information about attributes of - // the returned object value. - EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) - - // EvaluateExpr takes the given HCL expression and evaluates it to produce - // a value. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) - - // EvaluationScope returns a scope that can be used to evaluate reference - // addresses in this context. - EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope - - // SetModuleCallArguments defines values for the variables of a particular - // child module call. - // - // Calling this function multiple times has merging behavior, keeping any - // previously-set keys that are not present in the new map. - SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) - - // Changes returns the writer object that can be used to write new proposed - // changes into the global changes set. - Changes() *plans.ChangesSync - - // State returns a wrapper object that provides safe concurrent access to - // the global state. - State() *states.SyncState -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go deleted file mode 100644 index bd414a9608..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go +++ /dev/null @@ -1,329 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// BuiltinEvalContext is an EvalContext implementation that is used by -// Terraform by default. -type BuiltinEvalContext struct { - // StopContext is the context used to track whether we're complete - StopContext context.Context - - // PathValue is the Path that this context is operating within. - PathValue addrs.ModuleInstance - - // Evaluator is used for evaluating expressions within the scope of this - // eval context. - Evaluator *Evaluator - - // Schemas is a repository of all of the schemas we should need to - // decode configuration blocks and expressions. This must be constructed by - // the caller to include schemas for all of the providers, resource types, - // data sources and provisioners used by the given configuration and - // state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // VariableValues contains the variable values across all modules. This - // structure is shared across the entire containing context, and so it - // may be accessed only when holding VariableValuesLock. - // The keys of the first level of VariableValues are the string - // representations of addrs.ModuleInstance values. The second-level keys - // are variable names within each module instance. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - Components contextComponentFactory - Hooks []Hook - InputValue UIInput - ProviderCache map[string]providers.Interface - ProviderInputConfig map[string]map[string]cty.Value - ProviderLock *sync.Mutex - ProvisionerCache map[string]provisioners.Interface - ProvisionerLock *sync.Mutex - ChangesValue *plans.ChangesSync - StateValue *states.SyncState - - once sync.Once -} - -// BuiltinEvalContext implements EvalContext -var _ EvalContext = (*BuiltinEvalContext)(nil) - -func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { - // This can happen during tests. During tests, we just block forever. - if ctx.StopContext == nil { - return nil - } - - return ctx.StopContext.Done() -} - -func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - for _, h := range ctx.Hooks { - action, err := fn(h) - if err != nil { - return err - } - - switch action { - case HookActionContinue: - continue - case HookActionHalt: - // Return an early exit error to trigger an early exit - log.Printf("[WARN] Early exit triggered by hook: %T", h) - return EvalEarlyExitError{} - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Input() UIInput { - return ctx.InputValue -} - -func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { - ctx.once.Do(ctx.init) - absAddr := addr.Absolute(ctx.Path()) - - // If we already initialized, it is an error - if p := ctx.Provider(absAddr); p != nil { - return nil, fmt.Errorf("%s is already initialized", addr) - } - - // Warning: make sure to acquire these locks AFTER the call to Provider - // above, since it also acquires locks. - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := absAddr.String() - - p, err := ctx.Components.ResourceProvider(typeName, key) - if err != nil { - return nil, err - } - - log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) - ctx.ProviderCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - return ctx.ProviderCache[addr.String()] -} - -func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - ctx.once.Do(ctx.init) - - return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type) -} - -func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.Absolute(ctx.Path()).String() - provider := ctx.ProviderCache[key] - if provider != nil { - delete(ctx.ProviderCache, key) - return provider.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - absAddr := addr.Absolute(ctx.Path()) - p := ctx.Provider(absAddr) - if p == nil { - diags = diags.Append(fmt.Errorf("%s not initialized", addr)) - return diags - } - - providerSchema := ctx.ProviderSchema(absAddr) - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) - return diags - } - - req := providers.ConfigureRequest{ - TerraformVersion: version.String(), - Config: cfg, - } - - resp := p.Configure(req) - return resp.Diagnostics -} - -func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - return nil - } - - return ctx.ProviderInputConfig[pc.String()] -} - -func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { - absProvider := pc.Absolute(ctx.Path()) - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") - return - } - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[absProvider.String()] = c - ctx.ProviderLock.Unlock() -} - -func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - ctx.once.Do(ctx.init) - - // If we already initialized, it is an error - if p := ctx.Provisioner(n); p != nil { - return nil, fmt.Errorf("Provisioner '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provisioner - // above, since it also acquires locks. - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - key := PathObjectCacheKey(ctx.Path(), n) - - p, err := ctx.Components.ResourceProvisioner(n, key) - if err != nil { - return nil, err - } - - ctx.ProvisionerCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - key := PathObjectCacheKey(ctx.Path(), n) - return ctx.ProvisionerCache[key] -} - -func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { - ctx.once.Do(ctx.init) - - return ctx.Schemas.ProvisionerConfig(n) -} - -func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - key := PathObjectCacheKey(ctx.Path(), n) - - prov := ctx.ProvisionerCache[key] - if prov != nil { - return prov.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - scope := ctx.EvaluationScope(self, keyData) - body, evalDiags := scope.ExpandBlock(body, schema) - diags = diags.Append(evalDiags) - val, evalDiags := scope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - return val, body, diags -} - -func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) - return scope.EvalExpr(expr, wantType) -} - -func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - data := &evaluationStateData{ - Evaluator: ctx.Evaluator, - ModulePath: ctx.PathValue, - InstanceKeyData: keyData, - Operation: ctx.Evaluator.Operation, - } - return ctx.Evaluator.Scope(data, self) -} - -func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { - return ctx.PathValue -} - -func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - childPath := n.ModuleInstance(ctx.PathValue) - key := childPath.String() - - args := ctx.VariableValues[key] - if args == nil { - args = make(map[string]cty.Value) - ctx.VariableValues[key] = vals - return - } - - for k, v := range vals { - args[k] = v - } -} - -func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { - return ctx.ChangesValue -} - -func (ctx *BuiltinEvalContext) State() *states.SyncState { - return ctx.StateValue -} - -func (ctx *BuiltinEvalContext) init() { -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go deleted file mode 100644 index 786316fb35..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go +++ /dev/null @@ -1,319 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// MockEvalContext is a mock version of EvalContext that can be used -// for tests. -type MockEvalContext struct { - StoppedCalled bool - StoppedValue <-chan struct{} - - HookCalled bool - HookHook Hook - HookError error - - InputCalled bool - InputInput UIInput - - InitProviderCalled bool - InitProviderType string - InitProviderAddr addrs.ProviderConfig - InitProviderProvider providers.Interface - InitProviderError error - - ProviderCalled bool - ProviderAddr addrs.AbsProviderConfig - ProviderProvider providers.Interface - - ProviderSchemaCalled bool - ProviderSchemaAddr addrs.AbsProviderConfig - ProviderSchemaSchema *ProviderSchema - - CloseProviderCalled bool - CloseProviderAddr addrs.ProviderConfig - CloseProviderProvider providers.Interface - - ProviderInputCalled bool - ProviderInputAddr addrs.ProviderConfig - ProviderInputValues map[string]cty.Value - - SetProviderInputCalled bool - SetProviderInputAddr addrs.ProviderConfig - SetProviderInputValues map[string]cty.Value - - ConfigureProviderCalled bool - ConfigureProviderAddr addrs.ProviderConfig - ConfigureProviderConfig cty.Value - ConfigureProviderDiags tfdiags.Diagnostics - - InitProvisionerCalled bool - InitProvisionerName string - InitProvisionerProvisioner provisioners.Interface - InitProvisionerError error - - ProvisionerCalled bool - ProvisionerName string - ProvisionerProvisioner provisioners.Interface - - ProvisionerSchemaCalled bool - ProvisionerSchemaName string - ProvisionerSchemaSchema *configschema.Block - - CloseProvisionerCalled bool - CloseProvisionerName string - CloseProvisionerProvisioner provisioners.Interface - - EvaluateBlockCalled bool - EvaluateBlockBody hcl.Body - EvaluateBlockSchema *configschema.Block - EvaluateBlockSelf addrs.Referenceable - EvaluateBlockKeyData InstanceKeyEvalData - EvaluateBlockResultFunc func( - body hcl.Body, - schema *configschema.Block, - self addrs.Referenceable, - keyData InstanceKeyEvalData, - ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateBlockResult cty.Value - EvaluateBlockExpandedBody hcl.Body - EvaluateBlockDiags tfdiags.Diagnostics - - EvaluateExprCalled bool - EvaluateExprExpr hcl.Expression - EvaluateExprWantType cty.Type - EvaluateExprSelf addrs.Referenceable - EvaluateExprResultFunc func( - expr hcl.Expression, - wantType cty.Type, - self addrs.Referenceable, - ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateExprResult cty.Value - EvaluateExprDiags tfdiags.Diagnostics - - EvaluationScopeCalled bool - EvaluationScopeSelf addrs.Referenceable - EvaluationScopeKeyData InstanceKeyEvalData - EvaluationScopeScope *lang.Scope - - PathCalled bool - PathPath addrs.ModuleInstance - - SetModuleCallArgumentsCalled bool - SetModuleCallArgumentsModule addrs.ModuleCallInstance - SetModuleCallArgumentsValues map[string]cty.Value - - ChangesCalled bool - ChangesChanges *plans.ChangesSync - - StateCalled bool - StateState *states.SyncState -} - -// MockEvalContext implements EvalContext -var _ EvalContext = (*MockEvalContext)(nil) - -func (c *MockEvalContext) Stopped() <-chan struct{} { - c.StoppedCalled = true - return c.StoppedValue -} - -func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - c.HookCalled = true - if c.HookHook != nil { - if _, err := fn(c.HookHook); err != nil { - return err - } - } - - return c.HookError -} - -func (c *MockEvalContext) Input() UIInput { - c.InputCalled = true - return c.InputInput -} - -func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { - c.InitProviderCalled = true - c.InitProviderType = t - c.InitProviderAddr = addr - return c.InitProviderProvider, c.InitProviderError -} - -func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - c.ProviderCalled = true - c.ProviderAddr = addr - return c.ProviderProvider -} - -func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - c.ProviderSchemaCalled = true - c.ProviderSchemaAddr = addr - return c.ProviderSchemaSchema -} - -func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { - c.CloseProviderCalled = true - c.CloseProviderAddr = addr - return nil -} - -func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - c.ConfigureProviderCalled = true - c.ConfigureProviderAddr = addr - c.ConfigureProviderConfig = cfg - return c.ConfigureProviderDiags -} - -func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { - c.ProviderInputCalled = true - c.ProviderInputAddr = addr - return c.ProviderInputValues -} - -func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { - c.SetProviderInputCalled = true - c.SetProviderInputAddr = addr - c.SetProviderInputValues = vals -} - -func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - c.InitProvisionerCalled = true - c.InitProvisionerName = n - return c.InitProvisionerProvisioner, c.InitProvisionerError -} - -func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { - c.ProvisionerCalled = true - c.ProvisionerName = n - return c.ProvisionerProvisioner -} - -func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { - c.ProvisionerSchemaCalled = true - c.ProvisionerSchemaName = n - return c.ProvisionerSchemaSchema -} - -func (c *MockEvalContext) CloseProvisioner(n string) error { - c.CloseProvisionerCalled = true - c.CloseProvisionerName = n - return nil -} - -func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - c.EvaluateBlockCalled = true - c.EvaluateBlockBody = body - c.EvaluateBlockSchema = schema - c.EvaluateBlockSelf = self - c.EvaluateBlockKeyData = keyData - if c.EvaluateBlockResultFunc != nil { - return c.EvaluateBlockResultFunc(body, schema, self, keyData) - } - return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags -} - -func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - c.EvaluateExprCalled = true - c.EvaluateExprExpr = expr - c.EvaluateExprWantType = wantType - c.EvaluateExprSelf = self - if c.EvaluateExprResultFunc != nil { - return c.EvaluateExprResultFunc(expr, wantType, self) - } - return c.EvaluateExprResult, c.EvaluateExprDiags -} - -// installSimpleEval is a helper to install a simple mock implementation of -// both EvaluateBlock and EvaluateExpr into the receiver. -// -// These default implementations will either evaluate the given input against -// the scope in field EvaluationScopeScope or, if it is nil, with no eval -// context at all so that only constant values may be used. -// -// This function overwrites any existing functions installed in fields -// EvaluateBlockResultFunc and EvaluateExprResultFunc. -func (c *MockEvalContext) installSimpleEval() { - c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - var diags tfdiags.Diagnostics - body, diags = scope.ExpandBlock(body, schema) - if diags.HasErrors() { - return cty.DynamicVal, body, diags - } - val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return cty.DynamicVal, body, diags - } - return val, body, diags - } - - // Fallback codepath supporting constant values only. - val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) - return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) - } - c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - return scope.EvalExpr(expr, wantType) - } - - // Fallback codepath supporting constant values only. - var diags tfdiags.Diagnostics - val, hclDiags := expr.Value(nil) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return cty.DynamicVal, diags - } - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - diags = diags.Append(err) - return cty.DynamicVal, diags - } - return val, diags - } -} - -func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - c.EvaluationScopeCalled = true - c.EvaluationScopeSelf = self - c.EvaluationScopeKeyData = keyData - return c.EvaluationScopeScope -} - -func (c *MockEvalContext) Path() addrs.ModuleInstance { - c.PathCalled = true - return c.PathPath -} - -func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { - c.SetModuleCallArgumentsCalled = true - c.SetModuleCallArgumentsModule = n - c.SetModuleCallArgumentsValues = values -} - -func (c *MockEvalContext) Changes() *plans.ChangesSync { - c.ChangesCalled = true - return c.ChangesChanges -} - -func (c *MockEvalContext) State() *states.SyncState { - c.StateCalled = true - return c.StateState -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go deleted file mode 100644 index 7d6fa4919d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go +++ /dev/null @@ -1,120 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// evaluateResourceCountExpression is our standard mechanism for interpreting an -// expression given for a "count" argument on a resource. This should be called -// from the DynamicExpand of a node representing a resource in order to -// determine the final count value. -// -// If the result is zero or positive and no error diagnostics are returned, then -// the result is the literal count value to use. -// -// If the result is -1, this indicates that the given expression is nil and so -// the "count" behavior should not be enabled for this resource at all. -// -// If error diagnostics are returned then the result is always the meaningless -// placeholder value -1. -func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { - count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx) - if !known { - // Currently this is a rather bad outcome from a UX standpoint, since we have - // no real mechanism to deal with this situation and all we can do is produce - // an error message. - // FIXME: In future, implement a built-in mechanism for deferring changes that - // can't yet be predicted, and use it to guide the user through several - // plan/apply steps until the desired configuration is eventually reached. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, - Subject: expr.Range().Ptr(), - }) - } - return count, diags -} - -// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression -// except that it handles an unknown result by returning count = 0 and -// a known = false, rather than by reporting the unknown value as an error -// diagnostic. -func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) { - if expr == nil { - return -1, true, nil - } - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return -1, true, diags - } - - switch { - case countVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - case !countVal.IsKnown(): - return 0, false, diags - } - - err := gocty.FromCtyValue(countVal, &count) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - } - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - } - - return count, true, diags -} - -// fixResourceCountSetTransition is a helper function to fix up the state when a -// resource transitions its "count" from being set to unset or vice-versa, -// treating a 0-key and a no-key instance as aliases for one another across -// the transition. -// -// The correct time to call this function is in the DynamicExpand method for -// a node representing a resource, just after evaluating the count with -// evaluateResourceCountExpression, and before any other analysis of the -// state such as orphan detection. -// -// This function calls methods on the given EvalContext to update the current -// state in-place, if necessary. It is a no-op if there is no count transition -// taking place. -// -// Since the state is modified in-place, this function must take a writer lock -// on the state. The caller must therefore not also be holding a state lock, -// or this function will block forever awaiting the lock. -func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { - state := ctx.State() - changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) - if changed { - log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go deleted file mode 100644 index aac380632a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go +++ /dev/null @@ -1,77 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -// -// This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct { - Config *configs.Config -} - -// TODO: test -func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // We'll temporarily lock the state to grab the modules, then work on each - // one separately while taking a lock again for each separate resource. - // This means that if another caller concurrently adds a module here while - // we're working then we won't update it, but that's no worse than the - // concurrent writer blocking for our entire fixup process and _then_ - // adding a new module, and in practice the graph node associated with - // this eval depends on everything else in the graph anyway, so there - // should not be concurrent writers. - state := ctx.State().Lock() - moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) - for _, m := range state.Modules { - moduleAddrs = append(moduleAddrs, m.Addr) - } - ctx.State().Unlock() - - for _, addr := range moduleAddrs { - cfg := n.Config.DescendentForInstance(addr) - if cfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - if err := n.fixModule(ctx, addr); err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { - ms := ctx.State().Module(moduleAddr) - cfg := n.Config.DescendentForInstance(moduleAddr) - if ms == nil { - // Theoretically possible for a concurrent writer to delete a module - // while we're running, but in practice the graph node that called us - // depends on everything else in the graph and so there can never - // be a concurrent writer. - return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) - } - if cfg == nil { - return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) - } - - for _, r := range ms.Resources { - addr := r.Addr.Absolute(moduleAddr) - rCfg := cfg.Module.ResourceByAddr(r.Addr) - if rCfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - hasCount := rCfg.Count != nil - fixResourceCountSetTransition(ctx, addr, hasCount) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go deleted file mode 100644 index d6f51c9504..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go +++ /dev/null @@ -1,783 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalCheckPlannedChange is an EvalNode implementation that produces errors -// if the _actual_ expected value is not compatible with what was recorded -// in the plan. -// -// Errors here are most often indicative of a bug in the provider, so our -// error messages will report with that in mind. It's also possible that -// there's a bug in Terraform's Core's own "proposed new value" code in -// EvalDiff. -type EvalCheckPlannedChange struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // We take ResourceInstanceChange objects here just because that's what's - // convenient to pass in from the evaltree implementation, but we really - // only look at the "After" value of each change. - Planned, Actual **plans.ResourceInstanceChange -} - -func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - plannedChange := *n.Planned - actualChange := *n.Actual - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) - } - - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - - log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) - - if plannedChange.Action != actualChange.Action { - switch { - case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: - // It's okay for an update to become a NoOp once we've filled in - // all of the unknown values, since the final values might actually - // match what was there before after all. - log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, - plannedChange.Action, actualChange.Action, - ), - )) - } - } - - errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), - ), - )) - } - return nil, diags.Err() -} - -// EvalDiff is an EvalNode implementation that detects changes for a given -// resource instance. -type EvalDiff struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - PreviousDiff **plans.ResourceInstanceChange - - // CreateBeforeDestroy is set if either the resource's own config sets - // create_before_destroy explicitly or if dependencies have forced the - // resource to be handled as create_before_destroy in order to avoid - // a dependency cycle. - CreateBeforeDestroy bool - - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputState **states.ResourceInstanceObject - - Stub bool -} - -// TODO: test -func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - - if providerSchema == nil { - return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) - } - if n.ProviderAddr.ProviderConfig.Type == "" { - panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) - } - - var diags tfdiags.Diagnostics - - // Evaluate the configuration - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - absAddr := n.Addr.Absolute(ctx.Path()) - var priorVal cty.Value - var priorValTainted cty.Value - var priorPrivate []byte - if state != nil { - if state.Status != states.ObjectTainted { - priorVal = state.Value - priorPrivate = state.Private - } else { - // If the prior state is tainted then we'll proceed below like - // we're creating an entirely new object, but then turn it into - // a synthetic "Replace" change at the end, creating the same - // result as if the provider had marked at least one argument - // change as "requires replacement". - priorValTainted = state.Value - priorVal = cty.NullVal(schema.ImpliedType()) - } - } else { - priorVal = cty.NullVal(schema.ImpliedType()) - } - - proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) - - // Call pre-diff hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - } - - log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) - // Allow the provider to validate the final set of values. - // The config was statically validated early on, but there may have been - // unknown values which the provider could not validate at the time. - validateResp := provider.ValidateResourceTypeConfig( - providers.ValidateResourceTypeConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() - } - - // The provider gets an opportunity to customize the proposed new value, - // which in turn produces the _planned_ new value. But before - // we send back this information, we need to process ignore_changes - // so that CustomizeDiff will not act on them - var ignoreChangeDiags tfdiags.Diagnostics - proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: priorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: priorPrivate, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - plannedNewVal := resp.PlannedState - plannedPrivate := resp.PlannedPrivate - - if plannedNewVal == cty.NilVal { - // Should never happen. Since real-world providers return via RPC a nil - // is always a bug in the client-side stub. This is more likely caused - // by an incompletely-configured mock provider in tests, though. - panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) - } - - // We allow the planned new value to disagree with configuration _values_ - // here, since that allows the provider to do special logic like a - // DiffSuppressFunc, but we still require that the provider produces - // a value whose type conforms to the schema. - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - return nil, diags.Err() - } - } - - // TODO: We should be able to remove this repeat of processing ignored changes - // after the plan, which helps providers relying on old behavior "just work" - // in the next major version, such that we can be stricter about ignore_changes - // values - plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - // The provider produces a list of paths to attributes whose changes mean - // that we must replace rather than update an existing remote object. - // However, we only need to do that if the identified attributes _have_ - // actually changed -- particularly after we may have undone some of the - // changes in processIgnoreChanges -- so now we'll filter that list to - // include only where changes are detected. - reqRep := cty.NewPathSet() - if len(resp.RequiresReplace) > 0 { - for _, path := range resp.RequiresReplace { - if priorVal.IsNull() { - // If prior is null then we don't expect any RequiresReplace at all, - // because this is a Create action. - continue - } - - priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) - plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) - if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { - // This means the path was invalid in both the prior and new - // values, which is an error with the provider itself. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, path, - ), - )) - continue - } - - // Make sure we have valid Values for both values. - // Note: if the opposing value was of the type - // cty.DynamicPseudoType, the type assigned here may not exactly - // match the schema. This is fine here, since we're only going to - // check for equality, but if the NullVal is to be used, we need to - // check the schema for th true type. - switch { - case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: - // this should never happen without ApplyPath errors above - panic("requires replace path returned 2 nil values") - case priorChangedVal == cty.NilVal: - priorChangedVal = cty.NullVal(plannedChangedVal.Type()) - case plannedChangedVal == cty.NilVal: - plannedChangedVal = cty.NullVal(priorChangedVal.Type()) - } - - eqV := plannedChangedVal.Equals(priorChangedVal) - if !eqV.IsKnown() || eqV.False() { - reqRep.Add(path) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - eqV := plannedNewVal.Equals(priorVal) - eq := eqV.IsKnown() && eqV.True() - - var action plans.Action - switch { - case priorVal.IsNull(): - action = plans.Create - case eq: - action = plans.NoOp - case !reqRep.Empty(): - // If there are any "requires replace" paths left _after our filtering - // above_ then this is a replace action. - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - default: - action = plans.Update - // "Delete" is never chosen here, because deletion plans are always - // created more directly elsewhere, such as in "orphan" handling. - } - - if action.IsReplace() { - // In this strange situation we want to produce a change object that - // shows our real prior object but has a _new_ object that is built - // from a null prior object, since we're going to delete the one - // that has all the computed values on it. - // - // Therefore we'll ask the provider to plan again here, giving it - // a null object for the prior, and then we'll meld that with the - // _actual_ prior state to produce a correctly-shaped replace change. - // The resulting change should show any computed attributes changing - // from known prior values to unknown values, unless the provider is - // able to predict new values for any of these computed attributes. - nullPriorVal := cty.NullVal(schema.ImpliedType()) - - // create a new proposed value from the null state and the config - proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) - - resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: nullPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: plannedPrivate, - }) - // We need to tread carefully here, since if there are any warnings - // in here they probably also came out of our previous call to - // PlanResourceChange above, and so we don't want to repeat them. - // Consequently, we break from the usual pattern here and only - // append these new diagnostics if there's at least one error inside. - if resp.Diagnostics.HasErrors() { - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - return nil, diags.Err() - } - plannedNewVal = resp.PlannedState - plannedPrivate = resp.PlannedPrivate - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // If our prior value was tainted then we actually want this to appear - // as a replace change, even though so far we've been treating it as a - // create. - if action == plans.Create && priorValTainted != cty.NilVal { - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - priorVal = priorValTainted - } - - // As a special case, if we have a previous diff (presumably from the plan - // phases, whereas we're now in the apply phase) and it was for a replace, - // we've already deleted the original object from state by the time we - // get here and so we would've ended up with a _create_ action this time, - // which we now need to paper over to get a result consistent with what - // we originally intended. - if n.PreviousDiff != nil { - prevChange := *n.PreviousDiff - if prevChange.Action.IsReplace() && action == plans.Create { - log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) - action = prevChange.Action - priorVal = prevChange.Before - } - } - - // Call post-refresh hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) - }) - if err != nil { - return nil, err - } - } - - // Update our output if we care - if n.OutputChange != nil { - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - Private: plannedPrivate, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: action, - Before: priorVal, - After: plannedNewVal, - }, - RequiredReplace: reqRep, - } - } - - if n.OutputValue != nil { - *n.OutputValue = configVal - } - - // Update the state if we care - if n.OutputState != nil { - *n.OutputState = &states.ResourceInstanceObject{ - // We use the special "planned" status here to note that this - // object's value is not yet complete. Objects with this status - // cannot be used during expression evaluation, so the caller - // must _also_ record the returned change in the active plan, - // which the expression evaluator will use in preference to this - // incomplete value recorded in the state. - Status: states.ObjectPlanned, - Value: plannedNewVal, - Private: plannedPrivate, - } - } - - return nil, nil -} - -func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { - // ignore_changes only applies when an object already exists, since we - // can't ignore changes to a thing we've not created yet. - if prior.IsNull() { - return proposed, nil - } - - ignoreChanges := n.Config.Managed.IgnoreChanges - ignoreAll := n.Config.Managed.IgnoreAllChanges - - if len(ignoreChanges) == 0 && !ignoreAll { - return proposed, nil - } - if ignoreAll { - return prior, nil - } - if prior.IsNull() || proposed.IsNull() { - // Ignore changes doesn't apply when we're creating for the first time. - // Proposed should never be null here, but if it is then we'll just let it be. - return proposed, nil - } - - return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) -} - -func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { - // When we walk below we will be using cty.Path values for comparison, so - // we'll convert our traversals here so we can compare more easily. - ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) - for i, traversal := range ignoreChanges { - path := make(cty.Path, len(traversal)) - for si, step := range traversal { - switch ts := step.(type) { - case hcl.TraverseRoot: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseAttr: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseIndex: - path[si] = cty.IndexStep{ - Key: ts.Key, - } - default: - panic(fmt.Sprintf("unsupported traversal step %#v", step)) - } - } - ignoreChangesPath[i] = path - } - - var diags tfdiags.Diagnostics - ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { - // First we must see if this is a path that's being ignored at all. - // We're looking for an exact match here because this walk will visit - // leaf values first and then their containers, and we want to do - // the "ignore" transform once we reach the point indicated, throwing - // away any deeper values we already produced at that point. - var ignoreTraversal hcl.Traversal - for i, candidate := range ignoreChangesPath { - if path.Equals(candidate) { - ignoreTraversal = ignoreChanges[i] - } - } - if ignoreTraversal == nil { - return v, nil - } - - // If we're able to follow the same path through the prior value, - // we'll take the value there instead, effectively undoing the - // change that was planned. - priorV, diags := hcl.ApplyPath(prior, path, nil) - if diags.HasErrors() { - // We just ignore the errors and move on here, since we assume it's - // just because the prior value was a slightly-different shape. - // It could potentially also be that the traversal doesn't match - // the schema, but we should've caught that during the validate - // walk if so. - return v, nil - } - return priorV, nil - }) - return ret, diags -} - -// EvalDiffDestroy is an EvalNode implementation that returns a plain -// destroy diff. -type EvalDiffDestroy struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - State **states.ResourceInstanceObject - ProviderAddr addrs.AbsProviderConfig - - Output **plans.ResourceInstanceChange - OutputState **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - - if n.ProviderAddr.ProviderConfig.Type == "" { - if n.DeposedKey == "" { - panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) - } else { - panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) - } - } - - // If there is no state or our attributes object is null then we're already - // destroyed. - if state == nil || state.Value.IsNull() { - return nil, nil - } - - // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff( - absAddr, n.DeposedKey.Generation(), - state.Value, - cty.NullVal(cty.DynamicPseudoType), - ) - }) - if err != nil { - return nil, err - } - - // Change is always the same for a destroy. We don't need the provider's - // help for this one. - // TODO: Should we give the provider an opportunity to veto this? - change := &plans.ResourceInstanceChange{ - Addr: absAddr, - DeposedKey: n.DeposedKey, - Change: plans.Change{ - Action: plans.Delete, - Before: state.Value, - After: cty.NullVal(cty.DynamicPseudoType), - }, - Private: state.Private, - ProviderAddr: n.ProviderAddr, - } - - // Call post-diff hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff( - absAddr, - n.DeposedKey.Generation(), - change.Action, - change.Before, - change.After, - ) - }) - if err != nil { - return nil, err - } - - // Update our output - *n.Output = change - - if n.OutputState != nil { - // Record our proposed new state, which is nil because we're destroying. - *n.OutputState = nil - } - - return nil, nil -} - -// EvalReduceDiff is an EvalNode implementation that takes a planned resource -// instance change as might be produced by EvalDiff or EvalDiffDestroy and -// "simplifies" it to a single atomic action to be performed by a specific -// graph node. -// -// Callers must specify whether they are a destroy node or a regular apply -// node. If the result is NoOp then the given change requires no action for -// the specific graph node calling this and so evaluation of the that graph -// node should exit early and take no action. -// -// The object written to OutChange may either be identical to InChange or -// a new change object derived from InChange. Because of the former case, the -// caller must not mutate the object returned in OutChange. -type EvalReduceDiff struct { - Addr addrs.ResourceInstance - InChange **plans.ResourceInstanceChange - Destroy bool - OutChange **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { - in := *n.InChange - out := in.Simplify(n.Destroy) - if n.OutChange != nil { - *n.OutChange = out - } - if out.Action != in.Action { - if n.Destroy { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) - } else { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) - } - } - return nil, nil -} - -// EvalReadDiff is an EvalNode implementation that retrieves the planned -// change for a particular resource instance object. -type EvalReadDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - csrc := changes.GetResourceInstanceChange(addr, gen) - if csrc == nil { - log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) - return nil, nil - } - - change, err := csrc.Decode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) - } - if n.Change != nil { - *n.Change = change - } - - log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) - - return nil, nil -} - -// EvalWriteDiff is an EvalNode implementation that saves a planned change -// for an instance object into the set of global planned changes. -type EvalWriteDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - if n.Change == nil || *n.Change == nil { - // Caller sets nil to indicate that we need to remove a change from - // the set of changes. - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - changes.RemoveResourceInstanceChange(addr, gen) - return nil, nil - } - - providerSchema := *n.ProviderSchema - change := *n.Change - - if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { - // Should never happen, and indicates a bug in the caller. - panic("inconsistent address and/or deposed key in EvalWriteDiff") - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - csrc, err := change.Encode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) - } - - changes.AppendResourceInstanceChange(csrc) - if n.DeposedKey == states.NotDeposed { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) - } else { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go deleted file mode 100644 index 470f798b7f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// EvalReturnError is an EvalNode implementation that returns an -// error if it is present. -// -// This is useful for scenarios where an error has been captured by -// another EvalNode (like EvalApply) for special EvalTree-based error -// handling, and that handling has completed, so the error should be -// returned normally. -type EvalReturnError struct { - Error *error -} - -func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { - if n.Error == nil { - return nil, nil - } - - return nil, *n.Error -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go deleted file mode 100644 index 711c625c83..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -// EvalNodeFilterFunc is the callback used to replace a node with -// another to node. To not do the replacement, just return the input node. -type EvalNodeFilterFunc func(EvalNode) EvalNode - -// EvalNodeFilterable is an interface that can be implemented by -// EvalNodes to allow filtering of sub-elements. Note that this isn't -// a common thing to implement and you probably don't need it. -type EvalNodeFilterable interface { - EvalNode - Filter(EvalNodeFilterFunc) -} - -// EvalFilter runs the filter on the given node and returns the -// final filtered value. This should be called rather than checking -// the EvalNode directly since this will properly handle EvalNodeFilterables. -func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { - if f, ok := node.(EvalNodeFilterable); ok { - f.Filter(fn) - return node - } - - return fn(node) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go deleted file mode 100644 index 1a55f024a8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -// EvalNodeOpFilterable is an interface that EvalNodes can implement -// to be filterable by the operation that is being run on Terraform. -type EvalNodeOpFilterable interface { - IncludeInOp(walkOperation) bool -} - -// EvalNodeFilterOp returns a filter function that filters nodes that -// include themselves in specific operations. -func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { - return func(n EvalNode) EvalNode { - include := true - if of, ok := n.(EvalNodeOpFilterable); ok { - include = of.IncludeInOp(op) - } - if include { - return n - } - - return EvalNoop{} - } -} - -// EvalOpFilter is an EvalNode implementation that is a proxy to -// another node but filters based on the operation. -type EvalOpFilter struct { - // Ops is the list of operations to include this node in. - Ops []walkOperation - - // Node is the node to execute - Node EvalNode -} - -// TODO: test -func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { - return EvalRaw(n.Node, ctx) -} - -// EvalNodeOpFilterable impl. -func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { - for _, v := range n.Ops { - if v == op { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go deleted file mode 100644 index a63389a914..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// evaluateResourceForEachExpression interprets a "for_each" argument on a resource. -// -// Returns a cty.Value map, and diagnostics if necessary. It will return nil if -// the expression is nil, and is used to distinguish between an unset for_each and an -// empty map -func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { - forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx) - if !known { - // Attach a diag as we do with count, with the same downsides - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, - Subject: expr.Range().Ptr(), - }) - } - return forEachMap, diags -} - -// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression -// except that it handles an unknown result by returning an empty map and -// a known = false, rather than by reporting the unknown value as an error -// diagnostic. -func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) { - if expr == nil { - return nil, true, nil - } - - forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - diags = diags.Append(forEachDiags) - if diags.HasErrors() { - return nil, true, diags - } - - switch { - case forEachVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - case !forEachVal.IsKnown(): - return map[string]cty.Value{}, false, diags - } - - if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() || forEachVal.Type().IsTupleType() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - - // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap - // This also covers an empty set (toset([])) - if forEachVal.LengthInt() == 0 { - return map[string]cty.Value{}, true, diags - } - - if forEachVal.Type().IsSetType() { - if forEachVal.Type().ElementType() != cty.String { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - - // A set may contain unknown values that must be - // discovered by checking with IsWhollyKnown (which iterates through the - // structure), while for maps in cty, keys can never be unknown or null, - // thus the earlier IsKnown check suffices for maps - if !forEachVal.IsWhollyKnown() { - return map[string]cty.Value{}, false, diags - } - } - - return forEachVal.AsValueMap(), true, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go deleted file mode 100644 index d6b46a1f22..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go +++ /dev/null @@ -1,26 +0,0 @@ -package terraform - -// EvalIf is an EvalNode that is a conditional. -type EvalIf struct { - If func(EvalContext) (bool, error) - Then EvalNode - Else EvalNode -} - -// TODO: test -func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { - yes, err := n.If(ctx) - if err != nil { - return nil, err - } - - if yes { - return EvalRaw(n.Then, ctx) - } else { - if n.Else != nil { - return EvalRaw(n.Else, ctx) - } - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go deleted file mode 100644 index 25a2aae06b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalImportState is an EvalNode implementation that performs an -// ImportState operation on a provider. This will return the imported -// states but won't modify any actual state. -type EvalImportState struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ID string - Output *[]providers.ImportedResource -} - -// TODO: test -func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - provider := *n.Provider - var diags tfdiags.Diagnostics - - { - // Call pre-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(absAddr, n.ID) - }) - if err != nil { - return nil, err - } - } - - resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: n.Addr.Resource.Type, - ID: n.ID, - }) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - imported := resp.ImportedResources - - for _, obj := range imported { - log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) - } - - if n.Output != nil { - *n.Output = imported - } - - { - // Call post-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(absAddr, imported) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalImportStateVerify verifies the state after ImportState and -// after the refresh to make sure it is non-nil and valid. -type EvalImportStateVerify struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - state := *n.State - if state.Value.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", - n.Addr.String(), - ), - )) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go deleted file mode 100644 index 5ab6b44f5c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go +++ /dev/null @@ -1,61 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// EvalConfigBlock is an EvalNode implementation that takes a raw -// configuration block and evaluates any expressions within it. -// -// ExpandedConfig is populated with the result of expanding any "dynamic" -// blocks in the given body, which can be useful for extracting correct source -// location information for specific attributes in the result. -type EvalConfigBlock struct { - Config *hcl.Body - Schema *configschema.Block - SelfAddr addrs.Referenceable - Output *cty.Value - ExpandedConfig *hcl.Body - ContinueOnErr bool -} - -func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { - val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) - if diags.HasErrors() && n.ContinueOnErr { - log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) - return nil, EvalEarlyExitError{} - } - - if n.Output != nil { - *n.Output = val - } - if n.ExpandedConfig != nil { - *n.ExpandedConfig = body - } - - return nil, diags.ErrWithWarnings() -} - -// EvalConfigExpr is an EvalNode implementation that takes a raw configuration -// expression and evaluates it. -type EvalConfigExpr struct { - Expr hcl.Expression - SelfAddr addrs.Referenceable - Output *cty.Value -} - -func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) - - if n.Output != nil { - *n.Output = val - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go deleted file mode 100644 index 0310193802..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalLocal is an EvalNode implementation that evaluates the -// expression for a local value and writes it into a transient part of -// the state. -type EvalLocal struct { - Addr addrs.LocalValue - Expr hcl.Expression -} - -func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - // We ignore diags here because any problems we might find will be found - // again in EvaluateExpr below. - refs, _ := lang.ReferencesInExpr(n.Expr) - for _, ref := range refs { - if ref.Subject == n.Addr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referencing local value", - Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), - Subject: ref.SourceRange.ToHCL().Ptr(), - Context: n.Expr.Range().Ptr(), - }) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - - val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return nil, diags.Err() - } - - state := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write local value to nil state") - } - - state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) - - return nil, nil -} - -// EvalDeleteLocal is an EvalNode implementation that deletes a Local value -// from the state. Locals aren't persisted, but we don't need to evaluate them -// during destroy. -type EvalDeleteLocal struct { - Addr addrs.LocalValue -} - -func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go deleted file mode 100644 index f4bc8225c5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -// EvalNoop is an EvalNode that does nothing. -type EvalNoop struct{} - -func (EvalNoop) Eval(EvalContext) (interface{}, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go deleted file mode 100644 index 9f71e92f65..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go +++ /dev/null @@ -1,135 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// EvalDeleteOutput is an EvalNode implementation that deletes an output -// from the state. -type EvalDeleteOutput struct { - Addr addrs.OutputValue -} - -// TODO: test -func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} - -// EvalWriteOutput is an EvalNode implementation that writes the output -// for the given name to the current state. -type EvalWriteOutput struct { - Addr addrs.OutputValue - Sensitive bool - Expr hcl.Expression - // ContinueOnErr allows interpolation to fail during Input - ContinueOnErr bool -} - -// TODO: test -func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - addr := n.Addr.Absolute(ctx.Path()) - - // This has to run before we have a state lock, since evaluation also - // reads the state - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - // We'll handle errors below, after we have loaded the module. - - state := ctx.State() - if state == nil { - return nil, nil - } - - changes := ctx.Changes() // may be nil, if we're not working on a changeset - - // handling the interpolation error - if diags.HasErrors() { - if n.ContinueOnErr || flagWarnOutputErrors { - log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) - // if we're continuing, make sure the output is included, and - // marked as unknown. If the evaluator was able to find a type - // for the value in spite of the error then we'll use it. - n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) - return nil, EvalEarlyExitError{} - } - return nil, diags.Err() - } - - n.setValue(addr, state, changes, val) - - return nil, nil -} - -func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { - if val.IsKnown() && !val.IsNull() { - // The state itself doesn't represent unknown values, so we null them - // out here and then we'll save the real unknown value in the planned - // changeset below, if we have one on this graph walk. - log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) - stateVal := cty.UnknownAsNull(val) - state.SetOutputValue(addr, stateVal, n.Sensitive) - } else { - log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) - state.RemoveOutputValue(addr) - } - - // If we also have an active changeset then we'll replicate the value in - // there. This is used in preference to the state where present, since it - // *is* able to represent unknowns, while the state cannot. - if changes != nil { - // For the moment we are not properly tracking changes to output - // values, and just marking them always as "Create" or "Destroy" - // actions. A future release will rework the output lifecycle so we - // can track their changes properly, in a similar way to how we work - // with resource instances. - - var change *plans.OutputChange - if !val.IsNull() { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - Action: plans.Create, - Before: cty.NullVal(cty.DynamicPseudoType), - After: val, - }, - } - } else { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - // This is just a weird placeholder delete action since - // we don't have an actual prior value to indicate. - // FIXME: Generate real planned changes for output values - // that include the old values. - Action: plans.Delete, - Before: cty.NullVal(cty.DynamicPseudoType), - After: cty.NullVal(cty.DynamicPseudoType), - }, - } - } - - cs, err := change.Encode() - if err != nil { - // Should never happen, since we just constructed this right above - panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) - } - log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) - changes.RemoveOutputChange(addr) // remove any existing planned change, if present - changes.AppendOutputChange(cs) // add the new planned change - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go deleted file mode 100644 index 7440cff7ad..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go +++ /dev/null @@ -1,147 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { - var configBody hcl.Body - if config != nil { - configBody = config.Config - } - - var inputBody hcl.Body - inputConfig := ctx.ProviderInput(addr) - if len(inputConfig) > 0 { - inputBody = configs.SynthBody("", inputConfig) - } - - switch { - case configBody != nil && inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) - // Note that the inputBody is the _base_ here, because configs.MergeBodies - // expects the base have all of the required fields, while these are - // forced to be optional for the override. The input process should - // guarantee that we have a value for each of the required arguments and - // that in practice the sets of attributes in each body will be - // disjoint. - return configs.MergeBodies(inputBody, configBody) - case configBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) - return configBody - case inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) - return inputBody - default: - log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) - return hcl.EmptyBody() - } -} - -// EvalConfigProvider is an EvalNode implementation that configures -// a provider that is already initialized and retrieved. -type EvalConfigProvider struct { - Addr addrs.ProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil { - return nil, fmt.Errorf("EvalConfigProvider Provider is nil") - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - config := n.Config - - configBody := buildProviderConfig(ctx, n.Addr, config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configDiags := ctx.ConfigureProvider(n.Addr, configVal) - configDiags = configDiags.InConfigBody(configBody) - - return nil, configDiags.ErrWithWarnings() -} - -// EvalInitProvider is an EvalNode implementation that initializes a provider -// and returns nothing. The provider can be retrieved again with the -// EvalGetProvider node. -type EvalInitProvider struct { - TypeName string - Addr addrs.ProviderConfig -} - -func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.TypeName, n.Addr) -} - -// EvalCloseProvider is an EvalNode implementation that closes provider -// connections that aren't needed anymore. -type EvalCloseProvider struct { - Addr addrs.ProviderConfig -} - -func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Addr) - return nil, nil -} - -// EvalGetProvider is an EvalNode implementation that retrieves an already -// initialized provider instance for the given name. -// -// Unlike most eval nodes, this takes an _absolute_ provider configuration, -// because providers can be passed into and inherited between modules. -// Resource nodes must therefore know the absolute path of the provider they -// will use, which is usually accomplished by implementing -// interface GraphNodeProviderConsumer. -type EvalGetProvider struct { - Addr addrs.AbsProviderConfig - Output *providers.Interface - - // If non-nil, Schema will be updated after eval to refer to the - // schema of the provider. - Schema **ProviderSchema -} - -func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Addr.ProviderConfig.Type == "" { - // Should never happen - panic("EvalGetProvider used with uninitialized provider configuration address") - } - - result := ctx.Provider(n.Addr) - if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Addr) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProviderSchema(n.Addr) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go deleted file mode 100644 index 405ce9d0bd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner -// and returns nothing. The provisioner can be retrieved again with the -// EvalGetProvisioner node. -type EvalInitProvisioner struct { - Name string -} - -func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvisioner(n.Name) -} - -// EvalCloseProvisioner is an EvalNode implementation that closes provisioner -// connections that aren't needed anymore. -type EvalCloseProvisioner struct { - Name string -} - -func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvisioner(n.Name) - return nil, nil -} - -// EvalGetProvisioner is an EvalNode implementation that retrieves an already -// initialized provisioner instance for the given name. -type EvalGetProvisioner struct { - Name string - Output *provisioners.Interface - Schema **configschema.Block -} - -func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provisioner(n.Name) - if result == nil { - return nil, fmt.Errorf("provisioner %s not initialized", n.Name) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProvisionerSchema(n.Name) - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go deleted file mode 100644 index 0b734b793f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go +++ /dev/null @@ -1,395 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalReadData is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type EvalReadData struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Dependencies []addrs.Referenceable - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // Planned is set when dealing with data resources that were deferred to - // the apply walk, to let us see what was planned. If this is set, the - // evaluation of the config is required to produce a wholly-known - // configuration which is consistent with the partial object included - // in this planned change. - Planned **plans.ResourceInstanceChange - - // ForcePlanRead, if true, overrides the usual behavior of immediately - // reading from the data source where possible, instead forcing us to - // _always_ generate a plan. This is used during the plan walk, since we - // mustn't actually apply anything there. (The resulting state doesn't - // get persisted) - ForcePlanRead bool - - // The result from this EvalNode has a few different possibilities - // depending on the input: - // - If Planned is nil then we assume we're aiming to _produce_ the plan, - // and so the following two outcomes are possible: - // - OutputChange.Action is plans.NoOp and OutputState is the complete - // result of reading from the data source. This is the easy path. - // - OutputChange.Action is plans.Read and OutputState is a planned - // object placeholder (states.ObjectPlanned). In this case, the - // returned change must be recorded in the overral changeset and - // eventually passed to another instance of this struct during the - // apply walk. - // - If Planned is non-nil then we assume we're aiming to complete a - // planned read from an earlier plan walk. In this case the only possible - // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp - // change and put the complete resulting state in OutputState, ready to - // be saved in the overall state and used for expression evaluation. - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputConfigValue *cty.Value - OutputState **states.ResourceInstanceObject -} - -func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadData: working on %s", absAddr) - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - var change *plans.ResourceInstanceChange - var configVal cty.Value - - // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and - // EvalReadDataApply did, but it seems like we should handle that via a - // separate mechanism since it boils down to just deleting the object from - // the state... and we do that on every plan anyway, forcing the data - // resource to re-read. - - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) - } - - // We'll always start by evaluating the configuration. What we do after - // that will depend on the evaluation result along with what other inputs - // we were given. - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time - - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) - - // If our configuration contains any unknown values then we must defer the - // read to the apply phase by producing a "Read" change for this resource, - // and a placeholder value for it in the state. - if n.ForcePlanRead || !configVal.IsWhollyKnown() { - // If the configuration is still unknown when we're applying a planned - // change then that indicates a bug in Terraform, since we should have - // everything resolved by now. - if n.Planned != nil && *n.Planned != nil { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - if n.ForcePlanRead { - log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) - } else { - log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) - } - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - - change = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: proposedNewVal, - }, - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - - if n.OutputChange != nil { - *n.OutputChange = change - } - if n.OutputValue != nil { - *n.OutputValue = change.After - } - if n.OutputConfigValue != nil { - *n.OutputConfigValue = configVal - } - if n.OutputState != nil { - state := &states.ResourceInstanceObject{ - Value: change.After, - Status: states.ObjectPlanned, // because the partial value in the plan must be used for now - Dependencies: n.Dependencies, - } - *n.OutputState = state - } - - return nil, diags.ErrWithWarnings() - } - - if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { - // If any other action gets in here then that's always a bug; this - // EvalNode only deals with reading. - return nil, fmt.Errorf( - "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", - (*n.Planned).Action, absAddr, - ) - } - - log.Printf("[TRACE] Re-validating config for %s", absAddr) - validateResp := provider.ValidateDataSourceConfig( - providers.ValidateDataSourceConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err() - } - - // If we get down here then our configuration is complete and we're read - // to actually call the provider to read the data. - log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) - - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) - }) - if err != nil { - return nil, err - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - newVal := resp.State - if newVal == cty.NilVal { - // This can happen with incompletely-configured mocks. We'll allow it - // and treat it as an alias for a properly-typed null value. - newVal = cty.NullVal(schema.ImpliedType()) - } - - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced null object", - fmt.Sprintf( - "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, - ), - )) - } - if !newVal.IsWhollyKnown() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, - ), - )) - - // We'll still save the object, but we need to eliminate any unknown - // values first because we can't serialize them in the state file. - // Note that this may cause set elements to be coalesced if they - // differed only by having unknown values, but we don't worry about - // that here because we're saving the value only for inspection - // purposes; the error we added above will halt the graph walk. - newVal = cty.UnknownAsNull(newVal) - } - - // Since we've completed the read, we actually have no change to make, but - // we'll produce a NoOp one anyway to preserve the usual flow of the - // plan phase and allow it to produce a complete plan. - change = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.NoOp, - Before: newVal, - After: newVal, - }, - } - state := &states.ResourceInstanceObject{ - Value: change.After, - Status: states.ObjectReady, // because we completed the read from the provider - Dependencies: n.Dependencies, - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) - }) - if err != nil { - return nil, err - } - - if n.OutputChange != nil { - *n.OutputChange = change - } - if n.OutputValue != nil { - *n.OutputValue = change.After - } - if n.OutputConfigValue != nil { - *n.OutputConfigValue = configVal - } - if n.OutputState != nil { - *n.OutputState = state - } - - return nil, diags.ErrWithWarnings() -} - -// EvalReadDataApply is an EvalNode implementation that executes a data -// resource's ReadDataApply method to read data from the data source. -type EvalReadDataApply struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - Config *configs.Resource - Change **plans.ResourceInstanceChange - StateReferences []addrs.Referenceable -} - -func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - change := *n.Change - providerSchema := *n.ProviderSchema - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If the diff is for *destroying* this resource then we'll - // just drop its state and move on, since data resources don't - // support an actual "destroy" action. - if change != nil && change.Action == plans.Delete { - if n.Output != nil { - *n.Output = nil - } - return nil, nil - } - - // For the purpose of external hooks we present a data apply as a - // "Refresh" rather than an "Apply" because creating a data source - // is presented to users/callers as a "read" operation. - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) - }) - if err != nil { - return nil, err - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: change.After, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) - } - - newVal := resp.State - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - Dependencies: n.StateReferences, - } - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go deleted file mode 100644 index 6a834445c3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go +++ /dev/null @@ -1,106 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalRefresh is an EvalNode implementation that does a refresh for -// a resource. -type EvalRefresh struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - Output **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) - return nil, diags.ErrWithWarnings() - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - // Call pre-refresh hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, state.Value) - }) - if err != nil { - return nil, diags.ErrWithWarnings() - } - - // Refresh! - priorVal := state.Value - req := providers.ReadResourceRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: priorVal, - Private: state.Private, - } - - provider := *n.Provider - resp := provider.ReadResource(req) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - if resp.NewState == cty.NilVal { - // This ought not to happen in real cases since it's not possible to - // send NilVal over the plugin RPC channel, but it can come up in - // tests due to sloppy mocking. - panic("new state is cty.NilVal") - } - - for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - newState := state.DeepCopy() - newState.Value = resp.NewState - newState.Private = resp.Private - - // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = newState - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go deleted file mode 100644 index 7d6bb6603b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalSequence is an EvalNode that evaluates in sequence. -type EvalSequence struct { - Nodes []EvalNode -} - -func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - for _, n := range n.Nodes { - if n == nil { - continue - } - - if _, err := EvalRaw(n, ctx); err != nil { - if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { - // In this path we abort early, losing any non-error - // diagnostics we saw earlier. - return nil, err - } - diags = diags.Append(err) - if diags.HasErrors() { - // Halt if we get some errors, but warnings are okay. - break - } - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalNodeFilterable impl. -func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { - for i, node := range n.Nodes { - n.Nodes[i] = fn(node) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go deleted file mode 100644 index 70a72bbdbc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go +++ /dev/null @@ -1,475 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalReadState is an EvalNode implementation that reads the -// current object for a specific instance in the state. -type EvalReadState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadState used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadState used with no ProviderSchema object") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) - - src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalReadStateDeposed is an EvalNode implementation that reads the -// deposed InstanceState for a specific resource out of the state -type EvalReadStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key identifies which deposed object we will read. - Key states.DeposedKey - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadStateDeposed used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadStateDeposed used with no ProviderSchema object") - } - - key := n.Key - if key == states.NotDeposed { - return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") - } - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - - src := ctx.State().ResourceInstanceObject(absAddr, key) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalRequireState is an EvalNode implementation that exits early if the given -// object is null. -type EvalRequireState struct { - State **states.ResourceInstanceObject -} - -func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - return nil, EvalEarlyExitError{} - } - - state := *n.State - if state == nil || state.Value.IsNull() { - return nil, EvalEarlyExitError{} - } - - return nil, nil -} - -// EvalUpdateStateHook is an EvalNode implementation that calls the -// PostStateUpdate hook with the current state. -type EvalUpdateStateHook struct{} - -func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - // In principle we could grab the lock here just long enough to take a - // deep copy and then pass that to our hooks below, but we'll instead - // hold the hook for the duration to avoid the potential confusing - // situation of us racing to call PostStateUpdate concurrently with - // different state snapshots. - stateSync := ctx.State() - state := stateSync.Lock().DeepCopy() - defer stateSync.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - if err != nil { - return nil, err - } - - return nil, nil -} - -// EvalWriteState is an EvalNode implementation that saves the given object -// as the current object for the selected resource instance. -type EvalWriteState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteState used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - if n.ProviderAddr.ProviderConfig.Type == "" { - return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) - } - - obj := *n.State - if obj == nil || obj.Value.IsNull() { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteState used with pointer to nil ProviderSchema object") - } - - if obj != nil { - log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) - } else { - log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) - return nil, nil -} - -// EvalWriteStateDeposed is an EvalNode implementation that writes -// an InstanceState out to the Deposed list of a resource in the state. -type EvalWriteStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key indicates which deposed object to write to. - Key states.DeposedKey - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteStateDeposed used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - key := n.Key - state := ctx.State() - - if key == states.NotDeposed { - // should never happen - return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) - } - - obj := *n.State - if obj == nil { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteStateDeposed used with no ProviderSchema object") - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) - state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) - return nil, nil -} - -// EvalDeposeState is an EvalNode implementation that moves the current object -// for the given instance to instead be a deposed object, leaving the instance -// with no current object. -// This is used at the beginning of a create-before-destroy replace action so -// that the create can create while preserving the old state of the -// to-be-destroyed object. -type EvalDeposeState struct { - Addr addrs.ResourceInstance - - // ForceKey, if a value other than states.NotDeposed, will be used as the - // key for the newly-created deposed object that results from this action. - // If set to states.NotDeposed (the zero value), a new unique key will be - // allocated. - ForceKey states.DeposedKey - - // OutputKey, if non-nil, will be written with the deposed object key that - // was generated for the object. This can then be passed to - // EvalUndeposeState.Key so it knows which deposed instance to forget. - OutputKey *states.DeposedKey -} - -// TODO: test -func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - var key states.DeposedKey - if n.ForceKey == states.NotDeposed { - key = state.DeposeResourceInstanceObject(absAddr) - } else { - key = n.ForceKey - state.DeposeResourceInstanceObjectForceKey(absAddr, key) - } - log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - - if n.OutputKey != nil { - *n.OutputKey = key - } - - return nil, nil -} - -// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will -// restore a particular deposed object of the specified resource instance -// to be the "current" object if and only if the instance doesn't currently -// have a current object. -// -// This is intended for use when the create leg of a create before destroy -// fails with no partial new object: if we didn't take any action, the user -// would be left in the unfortunate situation of having no current object -// and the previously-workign object now deposed. This EvalNode causes a -// better outcome by restoring things to how they were before the replace -// operation began. -// -// The create operation may have produced a partial result even though it -// failed and it's important that we don't "forget" that state, so in that -// situation the prior object remains deposed and the partial new object -// remains the current object, allowing the situation to hopefully be -// improved in a subsequent run. -type EvalMaybeRestoreDeposedObject struct { - Addr addrs.ResourceInstance - - // Key is a pointer to the deposed object key that should be forgotten - // from the state, which must be non-nil. - Key *states.DeposedKey -} - -// TODO: test -func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - dk := *n.Key - state := ctx.State() - - restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) - if restored { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) - } else { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) - } - - return nil, nil -} - -// EvalWriteResourceState is an EvalNode implementation that ensures that -// a suitable resource-level state record is present in the state, if that's -// required for the "each mode" of that resource. -// -// This is important primarily for the situation where count = 0, since this -// eval is the only change we get to set the resource "each mode" to list -// in that case, allowing expression evaluation to see it as a zero-element -// list rather than as not set at all. -type EvalWriteResourceState struct { - Addr addrs.Resource - Config *configs.Resource - ProviderAddr addrs.AbsProviderConfig -} - -// TODO: test -func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - eachMode := states.NoEach - if count >= 0 { // -1 signals "count not set" - eachMode = states.EachList - } - - forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - if forEach != nil { - eachMode = states.EachMap - } - - // This method takes care of all of the business logic of updating this - // while ensuring that any existing instances are preserved, etc. - state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) - - return nil, nil -} - -// EvalForgetResourceState is an EvalNode implementation that prunes out an -// empty resource-level state for a given resource address, or produces an -// error if it isn't empty after all. -// -// This should be the last action taken for a resource that has been removed -// from the configuration altogether, to clean up the leftover husk of the -// resource in the state after other EvalNodes have destroyed and removed -// all of the instances and instance objects beneath it. -type EvalForgetResourceState struct { - Addr addrs.Resource -} - -func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - pruned := state.RemoveResourceIfEmpty(absAddr) - if !pruned { - // If this produces an error, it indicates a bug elsewhere in Terraform - // -- probably missing graph nodes, graph edges, or - // incorrectly-implemented evaluation steps. - return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) - } - log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go deleted file mode 100644 index 27d5f212eb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go +++ /dev/null @@ -1,106 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// UpgradeResourceState will, if necessary, run the provider-defined upgrade -// logic against the given state object to make it compliant with the -// current schema version. This is a no-op if the given state object is -// already at the latest version. -// -// If any errors occur during upgrade, error diagnostics are returned. In that -// case it is not safe to proceed with using the original state object. -func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { - if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { - // We only do state upgrading for managed resources. - return src, nil - } - - stateIsFlatmap := len(src.AttrsJSON) == 0 - - providerType := addr.Resource.Resource.DefaultProviderConfig().Type - if src.SchemaVersion > currentVersion { - log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource instance managed by newer provider version", - // This is not a very good error message, but we don't retain enough - // information in state to give good feedback on what provider - // version might be required here. :( - fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), - )) - return nil, diags - } - - // If we get down here then we need to upgrade the state, with the - // provider's help. - // If this state was originally created by a version of Terraform prior to - // v0.12, this also includes translating from legacy flatmap to new-style - // representation, since only the provider has enough information to - // understand a flatmap built against an older schema. - if src.SchemaVersion != currentVersion { - log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) - } else { - log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) - } - - req := providers.UpgradeResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - - // TODO: The internal schema version representations are all using - // uint64 instead of int64, but unsigned integers aren't friendly - // to all protobuf target languages so in practice we use int64 - // on the wire. In future we will change all of our internal - // representations to int64 too. - Version: int64(src.SchemaVersion), - } - - if stateIsFlatmap { - req.RawStateFlatmap = src.AttrsFlat - } else { - req.RawStateJSON = src.AttrsJSON - } - - resp := provider.UpgradeResourceState(req) - diags := resp.Diagnostics - if diags.HasErrors() { - return nil, diags - } - - // After upgrading, the new value must conform to the current schema. When - // going over RPC this is actually already ensured by the - // marshaling/unmarshaling of the new value, but we'll check it here - // anyway for robustness, e.g. for in-process providers. - newValue := resp.UpgradedState - if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource state upgrade", - fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), - )) - } - return nil, diags - } - - new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) - if err != nil { - // We already checked for type conformance above, so getting into this - // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to encode result of resource state upgrade", - fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), - )) - } - return new, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go deleted file mode 100644 index a4f28bd90f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go +++ /dev/null @@ -1,588 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EvalValidateCount is an EvalNode implementation that validates -// the count of a resource. -type EvalValidateCount struct { - Resource *configs.Resource -} - -// TODO: test -func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - var count int - var err error - - val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - goto RETURN - } - if val.IsNull() || !val.IsKnown() { - goto RETURN - } - - err = gocty.FromCtyValue(val, &count) - if err != nil { - // The EvaluateExpr call above already guaranteed us a number value, - // so if we end up here then we have something that is out of range - // for an int, and the error message will include a description of - // the valid range. - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), - Subject: n.Resource.Count.Range().Ptr(), - }) - } else if count < 0 { - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), - Subject: n.Resource.Count.Range().Ptr(), - }) - } - -RETURN: - return nil, diags.NonFatalErr() -} - -// EvalValidateProvider is an EvalNode implementation that validates -// a provider configuration. -type EvalValidateProvider struct { - Addr addrs.ProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - provider := *n.Provider - - configBody := buildProviderConfig(ctx, n.Addr, n.Config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - if configSchema == nil { - // Should never happen in real code, but often comes up in tests where - // mock schemas are being used that tend to be incomplete. - log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) - configSchema = &configschema.Block{} - } - - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - req := providers.PrepareProviderConfigRequest{ - Config: configVal, - } - - validateResp := provider.PrepareProviderConfig(req) - diags = diags.Append(validateResp.Diagnostics) - - return nil, diags.NonFatalErr() -} - -// EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a provisioner belonging to a resource. The provisioner -// config is expected to contain the merged connection configurations. -type EvalValidateProvisioner struct { - ResourceAddr addrs.Resource - Provisioner *provisioners.Interface - Schema **configschema.Block - Config *configs.Provisioner - ResourceHasCount bool - ResourceHasForEach bool -} - -func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { - provisioner := *n.Provisioner - config := *n.Config - schema := *n.Schema - - var diags tfdiags.Diagnostics - - { - // Validate the provisioner's own config first - - configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - if configVal == cty.NilVal { - // Should never happen for a well-behaved EvaluateBlock implementation - return nil, fmt.Errorf("EvaluateBlock returned nil value") - } - - req := provisioners.ValidateProvisionerConfigRequest{ - Config: configVal, - } - - resp := provisioner.ValidateProvisionerConfig(req) - diags = diags.Append(resp.Diagnostics) - } - - { - // Now validate the connection config, which contains the merged bodies - // of the resource and provisioner connection blocks. - connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) - diags = diags.Append(connDiags) - } - - return nil, diags.NonFatalErr() -} - -func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - - var diags tfdiags.Diagnostics - - if config == nil || config.Config == nil { - // No block to validate - return diags - } - - // We evaluate here just by evaluating the block and returning any - // diagnostics we get, since evaluation alone is enough to check for - // extraneous arguments and incorrectly-typed arguments. - _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) - diags = diags.Append(configDiags) - - return diags -} - -func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - keyData := EvalDataForNoInstanceKey - selfAddr := n.ResourceAddr.Instance(addrs.NoKey) - - if n.ResourceHasCount { - // For a resource that has count, we allow count.index but don't - // know at this stage what it will return. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // "self" can't point to an unknown key, but we'll force it to be - // key 0 here, which should return an unknown value of the - // expected type since none of these elements are known at this - // point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) - } else if n.ResourceHasForEach { - // For a resource that has for_each, we allow each.value and each.key - // but don't know at this stage what it will return. - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - // "self" can't point to an unknown key, but we'll force it to be - // key "" here, which should return an unknown value of the - // expected type since none of these elements are known at - // this point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) - } - - return ctx.EvaluateBlock(body, schema, selfAddr, keyData) -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. Once that is done, we can remove -// this and use a type-specific schema from the communicator to validate -// exactly what is expected for a given connection type. -var connectionBlockSupersetSchema = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - // NOTE: "type" is not included here because it's treated special - // by the config loader and stored away in a separate field. - - // Common attributes for both connection types - "host": { - Type: cty.String, - Required: true, - }, - "type": { - Type: cty.String, - Optional: true, - }, - "user": { - Type: cty.String, - Optional: true, - }, - "password": { - Type: cty.String, - Optional: true, - }, - "port": { - Type: cty.String, - Optional: true, - }, - "timeout": { - Type: cty.String, - Optional: true, - }, - "script_path": { - Type: cty.String, - Optional: true, - }, - - // For type=ssh only (enforced in ssh communicator) - "private_key": { - Type: cty.String, - Optional: true, - }, - "certificate": { - Type: cty.String, - Optional: true, - }, - "host_key": { - Type: cty.String, - Optional: true, - }, - "agent": { - Type: cty.Bool, - Optional: true, - }, - "agent_identity": { - Type: cty.String, - Optional: true, - }, - "bastion_host": { - Type: cty.String, - Optional: true, - }, - "bastion_host_key": { - Type: cty.String, - Optional: true, - }, - "bastion_port": { - Type: cty.Number, - Optional: true, - }, - "bastion_user": { - Type: cty.String, - Optional: true, - }, - "bastion_password": { - Type: cty.String, - Optional: true, - }, - "bastion_private_key": { - Type: cty.String, - Optional: true, - }, - "bastion_certificate": { - Type: cty.String, - Optional: true, - }, - - // For type=winrm only (enforced in winrm communicator) - "https": { - Type: cty.Bool, - Optional: true, - }, - "insecure": { - Type: cty.Bool, - Optional: true, - }, - "cacert": { - Type: cty.String, - Optional: true, - }, - "use_ntlm": { - Type: cty.Bool, - Optional: true, - }, - }, -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. It's exported only for use in the -// configs/configupgrade package and should not be used from anywhere else. -// The caller may not modify any part of the returned schema data structure. -func ConnectionBlockSupersetSchema() *configschema.Block { - return connectionBlockSupersetSchema -} - -// EvalValidateResource is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateResource struct { - Addr addrs.Resource - Provider *providers.Interface - ProviderSchema **ProviderSchema - Config *configs.Resource - - // IgnoreWarnings means that warnings will not be passed through. This allows - // "just-in-time" passes of validation to continue execution through warnings. - IgnoreWarnings bool - - // ConfigVal, if non-nil, will be updated with the value resulting from - // evaluating the given configuration body. Since validation is performed - // very early, this value is likely to contain lots of unknown values, - // but its type will conform to the schema of the resource type associated - // with the resource instance being validated. - ConfigVal *cty.Value -} - -func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - cfg := *n.Config - schema := *n.ProviderSchema - mode := cfg.Mode - - keyData := EvalDataForNoInstanceKey - if n.Config.Count != nil { - // If the config block has count, we'll evaluate with an unknown - // number as count.index so we can still type check even though - // we won't expand count until the plan phase. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // Basic type-checking of the count argument. More complete validation - // of this will happen when we DynamicExpand during the plan walk. - countDiags := n.validateCount(ctx, n.Config.Count) - diags = diags.Append(countDiags) - } - - if n.Config.ForEach != nil { - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.UnknownVal(cty.DynamicPseudoType), - } - - // Evaluate the for_each expression here so we can expose the diagnostics - forEachDiags := n.validateForEach(ctx, n.Config.ForEach) - diags = diags.Append(forEachDiags) - } - - for _, traversal := range n.Config.DependsOn { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if !refDiags.HasErrors() && len(ref.Remaining) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid depends_on reference", - Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", - Subject: ref.Remaining.SourceRange().Ptr(), - }) - } - - // The ref must also refer to something that exists. To test that, - // we'll just eval it and count on the fact that our evaluator will - // detect references to non-existent objects. - if !diags.HasErrors() { - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - if scope != nil { // sometimes nil in tests, due to incomplete mocks - _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) - diags = diags.Append(refDiags) - } - } - } - - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch mode { - case addrs.ManagedResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type", - Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks - for _, traversal := range cfg.Managed.IgnoreChanges { - moreDiags := schema.StaticValidateTraversal(traversal) - diags = diags.Append(moreDiags) - } - } - - req := providers.ValidateResourceTypeConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateResourceTypeConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - - if n.ConfigVal != nil { - *n.ConfigVal = configVal - } - - case addrs.DataResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source", - Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - req := providers.ValidateDataSourceConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateDataSourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - } - - if n.IgnoreWarnings { - // If we _only_ have warnings then we'll return nil. - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - return nil, nil - } else { - // We'll return an error if there are any diagnostics at all, even if - // some of them are warnings. - return nil, diags.NonFatalErr() - } -} - -func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { - if expr == nil { - return nil - } - - var diags tfdiags.Diagnostics - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return diags - } - - if countVal.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - var err error - countVal, err = convert.Convert(countVal, cty.Number) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk. - if !countVal.IsKnown() { - return diags - } - - // If we _do_ know the value, then we can do a few more checks here. - var count int - err = gocty.FromCtyValue(countVal, &count) - if err != nil { - // Isn't a whole number, etc. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - return diags -} - -func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !known { - return diags - } - - if forEachDiags.HasErrors() { - diags = diags.Append(forEachDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go deleted file mode 100644 index c9cc0e6daa..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go +++ /dev/null @@ -1,67 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that -// expressions within a particular referencable block do not reference that -// same block. -type EvalValidateSelfRef struct { - Addr addrs.Referenceable - Config hcl.Body - ProviderSchema **ProviderSchema -} - -func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - addr := n.Addr - - addrStrs := make([]string, 0, 1) - addrStrs = append(addrStrs, addr.String()) - switch tAddr := addr.(type) { - case addrs.ResourceInstance: - // A resource instance may not refer to its containing resource either. - addrStrs = append(addrStrs, tAddr.ContainingResource().String()) - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) - } - - providerSchema := *n.ProviderSchema - var schema *configschema.Block - switch tAddr := addr.(type) { - case addrs.Resource: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr) - case addrs.ResourceInstance: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) - } - - if schema == nil { - return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) - } - - refs, _ := lang.ReferencesInBlock(n.Config, schema) - for _, ref := range refs { - for _, addrStr := range addrStrs { - if ref.Subject.String() == addrStr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referential block", - Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return nil, diags.NonFatalErr() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go deleted file mode 100644 index 79f44b3fe9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go +++ /dev/null @@ -1,96 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// EvalSetModuleCallArguments is an EvalNode implementation that sets values -// for arguments of a child module call, for later retrieval during -// expression evaluation. -type EvalSetModuleCallArguments struct { - Module addrs.ModuleCallInstance - Values map[string]cty.Value -} - -// TODO: test -func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetModuleCallArguments(n.Module, n.Values) - return nil, nil -} - -// EvalModuleCallArgument is an EvalNode implementation that produces the value -// for a particular variable as will be used by a child module instance. -// -// The result is written into the map given in Values, with its key -// set to the local name of the variable, disregarding the module instance -// address. Any existing values in that map are deleted first. This weird -// interface is a result of trying to be convenient for use with -// EvalContext.SetModuleCallArguments, which expects a map to merge in with -// any existing arguments. -type EvalModuleCallArgument struct { - Addr addrs.InputVariable - Config *configs.Variable - Expr hcl.Expression - - // If this flag is set, any diagnostics are discarded and this operation - // will always succeed, though may produce an unknown value in the - // event of an error. - IgnoreDiagnostics bool - - Values map[string]cty.Value -} - -func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { - // Clear out the existing mapping - for k := range n.Values { - delete(n.Values, k) - } - - wantType := n.Config.Type - name := n.Addr.Name - expr := n.Expr - - if expr == nil { - // Should never happen, but we'll bail out early here rather than - // crash in case it does. We set no value at all in this case, - // making a subsequent call to EvalContext.SetModuleCallArguments - // a no-op. - log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) - return nil, nil - } - - val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - - // We intentionally passed DynamicPseudoType to EvaluateExpr above because - // now we can do our own local type conversion and produce an error message - // with better context if it fails. - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for module argument", - Detail: fmt.Sprintf( - "The given value is not suitable for child module variable %q defined at %s: %s.", - name, n.Config.DeclRange.String(), convErr, - ), - Subject: expr.Range().Ptr(), - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - n.Values[name] = val - if n.IgnoreDiagnostics { - return nil, nil - } - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go deleted file mode 100644 index d4a8d3cf74..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" -) - -// ProviderEvalTree returns the evaluation tree for initializing and -// configuring providers. -func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { - var provider providers.Interface - - addr := n.Addr - relAddr := addr.ProviderConfig - - seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{ - TypeName: relAddr.Type, - Addr: addr.ProviderConfig, - }) - - // Input stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - &EvalValidateProvider{ - Addr: relAddr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - // Apply stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - // We configure on everything but validate, since validate may - // not have access to all the variables. - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalConfigProvider{ - Addr: relAddr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - return &EvalSequence{Nodes: seq} -} - -// CloseProviderEvalTree returns the evaluation tree for closing -// provider connections that aren't needed anymore. -func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { - return &EvalCloseProvider{Addr: addr.ProviderConfig} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go deleted file mode 100644 index 2d3eabd488..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go +++ /dev/null @@ -1,838 +0,0 @@ -package terraform - -import ( - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/agext/levenshtein" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Evaluator provides the necessary contextual data for evaluating expressions -// for a particular walk operation. -type Evaluator struct { - // Operation defines what type of operation this evaluator is being used - // for. - Operation walkOperation - - // Meta is contextual metadata about the current operation. - Meta *ContextMeta - - // Config is the root node in the configuration tree. - Config *configs.Config - - // VariableValues is a map from variable names to their associated values, - // within the module indicated by ModulePath. VariableValues is modified - // concurrently, and so it must be accessed only while holding - // VariableValuesLock. - // - // The first map level is string representations of addr.ModuleInstance - // values, while the second level is variable names. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - // Schemas is a repository of all of the schemas we should need to - // evaluate expressions. This must be constructed by the caller to - // include schemas for all of the providers, resource types, data sources - // and provisioners used by the given configuration and state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // State is the current state, embedded in a wrapper that ensures that - // it can be safely accessed and modified concurrently. - State *states.SyncState - - // Changes is the set of proposed changes, embedded in a wrapper that - // ensures they can be safely accessed and modified concurrently. - Changes *plans.ChangesSync -} - -// Scope creates an evaluation scope for the given module path and optional -// resource. -// -// If the "self" argument is nil then the "self" object is not available -// in evaluated expressions. Otherwise, it behaves as an alias for the given -// address. -func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { - return &lang.Scope{ - Data: data, - SelfAddr: self, - PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, - BaseDir: ".", // Always current working directory for now. - } -} - -// evaluationStateData is an implementation of lang.Data that resolves -// references primarily (but not exclusively) using information from a State. -type evaluationStateData struct { - Evaluator *Evaluator - - // ModulePath is the path through the dynamic module tree to the module - // that references will be resolved relative to. - ModulePath addrs.ModuleInstance - - // InstanceKeyData describes the values, if any, that are accessible due - // to repetition of a containing object using "count" or "for_each" - // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, - // since the user specifies in that case which variable name to locally - // shadow.) - InstanceKeyData InstanceKeyEvalData - - // Operation records the type of walk the evaluationStateData is being used - // for. - Operation walkOperation -} - -// InstanceKeyEvalData is used during evaluation to specify which values, -// if any, should be produced for count.index, each.key, and each.value. -type InstanceKeyEvalData struct { - // CountIndex is the value for count.index, or cty.NilVal if evaluating - // in a context where the "count" argument is not active. - // - // For correct operation, this should always be of type cty.Number if not - // nil. - CountIndex cty.Value - - // EachKey and EachValue are the values for each.key and each.value - // respectively, or cty.NilVal if evaluating in a context where the - // "for_each" argument is not active. These must either both be set - // or neither set. - // - // For correct operation, EachKey must always be either of type cty.String - // or cty.Number if not nil. - EachKey, EachValue cty.Value -} - -// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for -// evaluating in a context that has the given instance key. -func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { - var countIdx cty.Value - var eachKey cty.Value - var eachVal cty.Value - - if intKey, ok := key.(addrs.IntKey); ok { - countIdx = cty.NumberIntVal(int64(intKey)) - } - - if stringKey, ok := key.(addrs.StringKey); ok { - eachKey = cty.StringVal(string(stringKey)) - eachVal = forEachMap[string(stringKey)] - } - - return InstanceKeyEvalData{ - CountIndex: countIdx, - EachKey: eachKey, - EachValue: eachVal, - } -} - -// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance -// key values at all, suitable for use in contexts where no keyed instance -// is relevant. -var EvalDataForNoInstanceKey = InstanceKeyEvalData{} - -// evaluationStateData must implement lang.Data -var _ lang.Data = (*evaluationStateData)(nil) - -func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "index": - idxVal := d.InstanceKeyData.CountIndex - if idxVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "count" in non-counted context`, - Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.Number), diags - } - return idxVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "count" attribute`, - Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var returnVal cty.Value - switch addr.Name { - - case "key": - returnVal = d.InstanceKeyData.EachKey - case "value": - returnVal = d.InstanceKeyData.EachValue - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "each" attribute`, - Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "each" in context without for_each`, - Detail: fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - return returnVal, diags -} - -func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Variables[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Variables { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } else { - suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared input variable`, - Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - wantType := cty.DynamicPseudoType - if config.Type != cty.NilType { - wantType = config.Type - } - - d.Evaluator.VariableValuesLock.Lock() - defer d.Evaluator.VariableValuesLock.Unlock() - - // During the validate walk, input variables are always unknown so - // that we are validating the configuration for all possible input values - // rather than for a specific set. Checking against a specific set of - // input values then happens during the plan walk. - // - // This is important because otherwise the validation walk will tend to be - // overly strict, requiring expressions throughout the configuration to - // be complicated to accommodate all possible inputs, whereas returning - // known here allows for simpler patterns like using input values as - // guards to broadly enable/disable resources, avoid processing things - // that are disabled, etc. Terraform's static validation leans towards - // being liberal in what it accepts because the subsequent plan walk has - // more information available and so can be more conservative. - if d.Operation == walkValidate { - return cty.UnknownVal(wantType), diags - } - - moduleAddrStr := d.ModulePath.String() - vals := d.Evaluator.VariableValues[moduleAddrStr] - if vals == nil { - return cty.UnknownVal(wantType), diags - } - - val, isSet := vals[addr.Name] - if !isSet { - if config.Default != cty.NilVal { - return config.Default, diags - } - return cty.UnknownVal(wantType), diags - } - - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - // We should never get here because this problem should've been caught - // during earlier validation, but we'll do something reasonable anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Incorrect variable type`, - Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), - Subject: &config.DeclRange, - }) - // Stub out our return value so that the semantic checker doesn't - // produce redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - return val, diags -} - -func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Locals[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Locals { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared local value`, - Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) - if val == cty.NilVal { - // Not evaluated yet? - val = cty.DynamicVal - } - - return val, diags -} - -func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - moduleAddr := addr.ModuleInstance(d.ModulePath) - - // We'll consult the configuration to see what output names we are - // expecting, so we can ensure the resulting object is of the expected - // type even if our data is incomplete for some reason. - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since this should've been caught during - // static validation. - panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) - } - outputConfigs := moduleConfig.Module.Outputs - - vals := map[string]cty.Value{} - for n := range outputConfigs { - addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) - - // If a pending change is present in our current changeset then its value - // takes priority over what's in state. (It will usually be the same but - // will differ if the new value is unknown during planning.) - if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) - vals[n] = cty.DynamicVal - continue - } - // We care only about the "after" value, which is the value this output - // will take on after the plan is applied. - vals[n] = change.After - } else { - os := d.Evaluator.State.OutputValue(addr) - if os == nil { - // Not evaluated yet? - vals[n] = cty.DynamicVal - continue - } - vals[n] = os.Value - } - } - return cty.ObjectVal(vals), diags -} - -func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - absAddr := addr.AbsOutputValue(d.ModulePath) - moduleAddr := absAddr.Module - - // First we'll consult the configuration to see if an output of this - // name is declared at all. - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // this doesn't happen in normal circumstances due to our validation - // pass, but it can turn up in some unusual situations, like in the - // "terraform console" repl where arbitrary expressions can be - // evaluated. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - config := moduleConfig.Module.Outputs[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Outputs { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared output value`, - Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - // If a pending change is present in our current changeset then its value - // takes priority over what's in state. (It will usually be the same but - // will differ if the new value is unknown during planning.) - if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) - return cty.DynamicVal, diags - } - // We care only about the "after" value, which is the value this output - // will take on after the plan is applied. - return change.After, diags - } - - os := d.Evaluator.State.OutputValue(absAddr) - if os == nil { - // Not evaluated yet? - return cty.DynamicVal, diags - } - - return os.Value, diags -} - -func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "cwd": - wd, err := os.Getwd() - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed to get working directory`, - Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - return cty.StringVal(filepath.ToSlash(wd)), diags - - case "module": - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) - } - sourceDir := moduleConfig.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - case "root": - sourceDir := d.Evaluator.Config.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - default: - suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "path" attribute`, - Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - // First we'll consult the configuration to see if an resource of this - // name is declared at all. - moduleAddr := d.ModulePath - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) - } - - config := moduleConfig.Module.ResourceByAddr(addr) - if config == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) - - if rs == nil { - // we must return DynamicVal so that both interpretations - // can proceed without generating errors, and we'll deal with this - // in a later step where more information is gathered. - // (In practice we should only end up here during the validate walk, - // since later walks should have at least partial states populated - // for all resources in the configuration.) - return cty.DynamicVal, diags - } - - // Break out early during validation, because resource may not be expanded - // yet and indexed references may show up as invalid. - if d.Operation == walkValidate { - return cty.DynamicVal, diags - } - - return d.getResourceInstancesAll(addr, rng, config, rs, rs.ProviderConfig) -} - -func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - instAddr := addrs.ResourceInstance{Resource: addr, Key: addrs.NoKey} - - schema := d.getResourceSchema(addr, providerAddr) - if schema == nil { - // This shouldn't happen, since validation before we get here should've - // taken care of it, but we'll show a reasonable error message anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource type schema`, - Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - switch rs.EachMode { - case states.NoEach: - ty := schema.ImpliedType() - is := rs.Instances[addrs.NoKey] - if is == nil || is.Current == nil { - // Assume we're dealing with an instance that hasn't been created yet. - return cty.UnknownVal(ty), diags - } - - if is.Current.Status == states.ObjectPlanned { - // If there's a pending change for this instance in our plan, we'll prefer - // that. This is important because the state can't represent unknown values - // and so its data is inaccurate when changes are pending. - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr.Absolute(d.ModulePath), states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - return val, diags - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - - return ios.Value, diags - - case states.EachList: - // We need to infer the length of our resulting tuple by searching - // for the max IntKey in our instances map. - length := 0 - for k := range rs.Instances { - if ik, ok := k.(addrs.IntKey); ok { - if int(ik) >= length { - length = int(ik) + 1 - } - } - } - - vals := make([]cty.Value, length) - for i := 0; i < length; i++ { - ty := schema.ImpliedType() - key := addrs.IntKey(i) - is, exists := rs.Instances[key] - if exists && is.Current != nil { - instAddr := addr.Instance(key).Absolute(d.ModulePath) - - // Prefer pending value in plan if present. See getResourceInstanceSingle - // comment for the rationale. - if is.Current.Status == states.ObjectPlanned { - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[i] = val - continue - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[i] = ios.Value - } else { - // There shouldn't normally be "gaps" in our list but we'll - // allow it under the assumption that we're in a weird situation - // where e.g. someone has run "terraform state mv" to reorder - // a list and left a hole behind. - vals[i] = cty.UnknownVal(schema.ImpliedType()) - } - } - - // We use a tuple rather than a list here because resource schemas may - // include dynamically-typed attributes, which will then cause each - // instance to potentially have a different runtime type even though - // they all conform to the static schema. - return cty.TupleVal(vals), diags - - case states.EachMap: - ty := schema.ImpliedType() - vals := make(map[string]cty.Value, len(rs.Instances)) - for k, is := range rs.Instances { - if sk, ok := k.(addrs.StringKey); ok { - instAddr := addr.Instance(k).Absolute(d.ModulePath) - - // Prefer pending value in plan if present. See getResourceInstanceSingle - // comment for the rationale. - // Prefer pending value in plan if present. See getResourceInstanceSingle - // comment for the rationale. - if is.Current.Status == states.ObjectPlanned { - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[string(sk)] = val - continue - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[string(sk)] = ios.Value - } - } - - // We use an object rather than a map here because resource schemas may - // include dynamically-typed attributes, which will then cause each - // instance to potentially have a different runtime type even though - // they all conform to the static schema. - return cty.ObjectVal(vals), diags - - default: - // Should never happen since caller should deal with other modes - panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) - } -} - -func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { - providerType := providerAddr.ProviderConfig.Type - schemas := d.Evaluator.Schemas - schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) - return schema -} - -func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "workspace": - workspaceName := d.Evaluator.Meta.Env - return cty.StringVal(workspaceName), diags - - case "env": - // Prior to Terraform 0.12 there was an attribute "env", which was - // an alias name for "workspace". This was deprecated and is now - // removed. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} - -// moduleDisplayAddr returns a string describing the given module instance -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleDisplayAddr(addr addrs.ModuleInstance) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go deleted file mode 100644 index 35a8be0c96..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go +++ /dev/null @@ -1,299 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// StaticValidateReferences checks the given references against schemas and -// other statically-checkable rules, producing error diagnostics if any -// problems are found. -// -// If this method returns errors for a particular reference then evaluating -// that reference is likely to generate a very similar error, so callers should -// not run this method and then also evaluate the source expression(s) and -// merge the two sets of diagnostics together, since this will result in -// confusing redundant errors. -// -// This method can find more errors than can be found by evaluating an -// expression with a partially-populated scope, since it checks the referenced -// names directly against the schema rather than relying on evaluation errors. -// -// The result may include warning diagnostics if, for example, deprecated -// features are referenced. -func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, ref := range refs { - moreDiags := d.staticValidateReference(ref, self) - diags = diags.Append(moreDiags) - } - return diags -} - -func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if modCfg == nil { - // This is a bug in the caller rather than a problem with the - // reference, but rather than crashing out here in an unhelpful way - // we'll just ignore it and trust a different layer to catch it. - return nil - } - - if ref.Subject == addrs.Self { - // The "self" address is a special alias for the address given as - // our self parameter here, if present. - if self == nil { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - return diags - } - - synthRef := *ref // shallow copy - synthRef.Subject = self - ref = &synthRef - } - - switch addr := ref.Subject.(type) { - - // For static validation we validate both resource and resource instance references the same way. - // We mostly disregard the index, though we do some simple validation of - // its _presence_ in staticValidateSingleResourceReference and - // staticValidateMultiResourceReference respectively. - case addrs.Resource: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - return diags - case addrs.ResourceInstance: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) - return diags - - // We also handle all module call references the same way, disregarding index. - case addrs.ModuleCall: - return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallInstance: - return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallOutput: - // This one is a funny one because we will take the output name referenced - // and use it to fake up a "remaining" that would make sense for the - // module call itself, rather than for the specific output, and then - // we can just re-use our static module call validation logic. - remain := make(hcl.Traversal, len(ref.Remaining)+1) - copy(remain[1:], ref.Remaining) - remain[0] = hcl.TraverseAttr{ - Name: addr.Name, - - // Using the whole reference as the source range here doesn't exactly - // match how HCL would normally generate an attribute traversal, - // but is close enough for our purposes. - SrcRange: ref.SourceRange.ToHCL(), - } - return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) - - default: - // Anything else we'll just permit through without any static validation - // and let it be caught during dynamic evaluation, in evaluate.go . - return nil - } -} - -func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - // If we have at least one step in "remain" and this resource has - // "count" set then we know for sure this in invalid because we have - // something like: - // aws_instance.foo.bar - // ...when we really need - // aws_instance.foo[count.index].bar - - // It is _not_ safe to do this check when remain is empty, because that - // would also match aws_instance.foo[count.index].bar due to `count.index` - // not being statically-resolvable as part of a reference, and match - // direct references to the whole aws_instance.foo tuple. - if len(remain) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if cfg.Count != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - if cfg.ForEach != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - - return diags -} - -func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if addr.Key == addrs.NoKey { - // This is a different path into staticValidateSingleResourceReference - return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) - } else { - if cfg.Count == nil && cfg.ForEach == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unexpected resource instance key`, - Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), - Subject: rng.ToHCL().Ptr(), - }) - } - } - - return diags -} - -func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - var modeAdjective string - switch addr.Mode { - case addrs.ManagedResourceMode: - modeAdjective = "managed" - case addrs.DataResourceMode: - modeAdjective = "data" - default: - // should never happen - modeAdjective = "" - } - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // Normally accessing this directly is wrong because it doesn't take into - // account provider inheritance, etc but it's okay here because we're only - // paying attention to the type anyway. - providerType := cfg.ProviderConfigAddr().Type - schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) - - if schema == nil { - // Prior validation should've taken care of a resource block with an - // unsupported type, so we should never get here but we'll handle it - // here anyway for robustness. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource type`, - Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // As a special case we'll detect attempts to access an attribute called - // "count" and produce a special error for it, since versions of Terraform - // prior to v0.12 offered this as a weird special case that we can no - // longer support. - if len(remain) > 0 { - if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource count attribute`, - Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - } - - // If we got this far then we'll try to validate the remaining traversal - // steps against our schema. - moreDiags := schema.StaticValidateTraversal(remain) - diags = diags.Append(moreDiags) - - return diags -} - -func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // For now, our focus here is just in testing that the referenced module - // call exists. All other validation is deferred until evaluation time. - _, exists := modCfg.Module.ModuleCalls[addr.Name] - if !exists { - var suggestions []string - for name := range modCfg.Module.ModuleCalls { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - return diags -} - -// moduleConfigDisplayAddr returns a string describing the given module -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleConfigDisplayAddr(addr addrs.Module) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go deleted file mode 100644 index 97c77bdbd0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -import "os" - -// This file holds feature flags for the next release - -var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go deleted file mode 100644 index 36e295b6f2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go +++ /dev/null @@ -1,141 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// Graph represents the graph that Terraform uses to represent resources -// and their dependencies. -type Graph struct { - // Graph is the actual DAG. This is embedded so you can call the DAG - // methods directly. - dag.AcyclicGraph - - // Path is the path in the module tree that this Graph represents. - Path addrs.ModuleInstance - - // debugName is a name for reference in the debug output. This is usually - // to indicate what topmost builder was, and if this graph is a shadow or - // not. - debugName string -} - -func (g *Graph) DirectedGraph() dag.Grapher { - return &g.AcyclicGraph -} - -// Walk walks the graph with the given walker for callbacks. The graph -// will be walked with full parallelism, so the walker should expect -// to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { - return g.walk(walker) -} - -func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { - // The callbacks for enter/exiting a graph - ctx := walker.EnterPath(g.Path) - defer walker.ExitPath(g.Path) - - // Get the path for logs - path := ctx.Path().String() - - debugName := "walk-graph.json" - if g.debugName != "" { - debugName = g.debugName + "-" + debugName - } - - // Walk the graph. - var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) - g.DebugVisitInfo(v, g.debugName) - - defer func() { - log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) - }() - - walker.EnterVertex(v) - defer walker.ExitVertex(v, diags) - - // vertexCtx is the context that we use when evaluating. This - // is normally the context of our graph but can be overridden - // with a GraphNodeSubPath impl. - vertexCtx := ctx - if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(pn.Path()) - defer walker.ExitPath(pn.Path()) - } - - // If the node is eval-able, then evaluate it. - if ev, ok := v.(GraphNodeEvalable); ok { - tree := ev.EvalTree() - if tree == nil { - panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) - } - - // Allow the walker to change our tree if needed. Eval, - // then callback with the output. - log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) - - tree = walker.EnterEvalTree(v, tree) - output, err := Eval(tree, vertexCtx) - diags = diags.Append(walker.ExitEvalTree(v, output, err)) - if diags.HasErrors() { - return - } - } - - // If the node is dynamically expanded, then expand it - if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) - - g, err := ev.DynamicExpand(vertexCtx) - if err != nil { - diags = diags.Append(err) - return - } - if g != nil { - // Walk the subgraph - log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) - subDiags := g.walk(walker) - diags = diags.Append(subDiags) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) - } else { - log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) - } - } - - // If the node has a subgraph, then walk the subgraph - if sn, ok := v.(GraphNodeSubgraph); ok { - log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) - - subDiags := sn.Subgraph().(*Graph).walk(walker) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) - } - - return - } - - return g.AcyclicGraph.Walk(walkFn) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go deleted file mode 100644 index ee2c5857af..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// GraphBuilder is an interface that can be implemented and used with -// Terraform to build the graph that Terraform walks. -type GraphBuilder interface { - // Build builds the graph for the given module path. It is up to - // the interface implementation whether this build should expand - // the graph or not. - Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) -} - -// BasicGraphBuilder is a GraphBuilder that builds a graph out of a -// series of transforms and (optionally) validates the graph is a valid -// structure. -type BasicGraphBuilder struct { - Steps []GraphTransformer - Validate bool - // Optional name to add to the graph debug log - Name string -} - -func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - g := &Graph{Path: path} - - var lastStepStr string - for _, step := range b.Steps { - if step == nil { - continue - } - log.Printf("[TRACE] Executing graph transform %T", step) - - stepName := fmt.Sprintf("%T", step) - dot := strings.LastIndex(stepName, ".") - if dot >= 0 { - stepName = stepName[dot+1:] - } - - debugOp := g.DebugOperation(stepName, "") - err := step.Transform(g) - - errMsg := "" - if err != nil { - errMsg = err.Error() - } - debugOp.End(errMsg) - - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] Completed graph transform %T (no changes)", step) - } - - if err != nil { - if nf, isNF := err.(tfdiags.NonFatalError); isNF { - diags = diags.Append(nf.Diagnostics) - } else { - diags = diags.Append(err) - return g, diags - } - } - } - - // Validate the graph structure - if b.Validate { - if err := g.Validate(); err != nil { - log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - diags = diags.Append(err) - return nil, diags - } - } - - return g, diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go deleted file mode 100644 index 9189876106..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go +++ /dev/null @@ -1,211 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ApplyGraphBuilder implements GraphBuilder and is responsible for building -// a graph for applying a Terraform diff. -// -// Because the graph is built from the diff (vs. the config or state), -// this helps ensure that the apply-time graph doesn't modify any resources -// that aren't explicitly in the diff. There are other scenarios where the -// diff can be deviated, so this is just one layer of protection. -type ApplyGraphBuilder struct { - // Config is the configuration tree that the diff was built from. - Config *configs.Config - - // Changes describes the changes that we need apply. - Changes *plans.Changes - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target. This is only required to make sure - // unnecessary outputs aren't included in the apply graph. The plan - // builder successfully handles targeting resources. In the future, - // outputs should go into the diff so that this is unnecessary. - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Destroy, if true, represents a pure destroy operation - Destroy bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "ApplyGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeApplyableResource{ - NodeAbstractResource: a, - } - } - - concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeDestroyResource{ - NodeAbstractResource: a, - } - } - - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeApplyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config. During apply, - // we use this just to ensure that the whole-resource metadata is - // updated to reflect things such as whether the count argument is - // set in config, or which provider configuration manages each resource. - &ConfigTransformer{ - Concrete: concreteResource, - Config: b.Config, - }, - - // Creates all the resource instances represented in the diff, along - // with dependency edges against the whole-resource nodes added by - // ConfigTransformer above. - &DiffTransformer{ - Concrete: concreteResourceInstance, - State: b.State, - Changes: b.Changes, - }, - - // Creates extra cleanup nodes for any entire resources that are - // no longer present in config, so we can make sure we clean up the - // leftover empty resource states after the instances have been - // destroyed. - // (We don't track this particular type of change in the plan because - // it's just cleanup of our own state object, and so doesn't effect - // any real remote objects or consumable outputs.) - &OrphanResourceTransformer{ - Concrete: concreteOrphanResource, - Config: b.Config, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{Config: b.Config, State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // add providers - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Destruction ordering - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - &CBDEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - Destroy: b.Destroy, - }, - - // Handle destroy time transformations for output and local values. - // Reverse the edges from outputs and locals, so that - // interpolations don't fail during destroy. - // Create a destroy node for outputs to remove them from the state. - // Prune unreferenced values, which may have interpolations that can't - // be resolved. - GraphTransformIf( - func() bool { return b.Destroy }, - GraphTransformMulti( - &DestroyValueReferenceTransformer{}, - &DestroyOutputTransformer{}, - &PruneUnusedValuesTransformer{}, - ), - ), - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go deleted file mode 100644 index 32fe5f9735..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go +++ /dev/null @@ -1,97 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for -// planning a pure-destroy. -// -// Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. -type DestroyPlanGraphBuilder struct { - // Config is the configuration tree to build the plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "DestroyPlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlanDestroyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates nodes for the resource instances tracked in the state. - &StateTransformer{ - ConcreteCurrent: concreteResourceInstance, - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Destruction ordering. We require this only so that - // targeting below will prune the correct things. - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. - &TargetsTransformer{Targets: b.Targets}, - - // Single root - &RootTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go deleted file mode 100644 index 8a0bcf5ba9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go +++ /dev/null @@ -1,108 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable -// for evaluating in-memory values (input variables, local values, output -// values) in the state without any other side-effects. -// -// This graph is used only in weird cases, such as the "terraform console" -// CLI command, where we need to evaluate expressions against the state -// without taking any other actions. -// -// The generated graph will include nodes for providers, resources, etc -// just to allow indirect dependencies to be resolved, but these nodes will -// not take any actions themselves since we assume that their parts of the -// state, if any, are already complete. -// -// Although the providers are never configured, they must still be available -// in order to obtain schema information used for type checking, etc. -type EvalGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "EvalGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Steps() []GraphTransformer { - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeEvalableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: nil, // just use the abstract type - Config: b.Config, - Unique: true, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Although we don't configure providers, we do still start them up - // to get their schemas, and so we must shut them down again here. - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Remove redundant edges to simplify the graph. - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go deleted file mode 100644 index dcbb10e608..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportGraphBuilder implements GraphBuilder and is responsible for building -// a graph for importing resources into Terraform. This is a much, much -// simpler graph than a normal configuration graph. -type ImportGraphBuilder struct { - // ImportTargets are the list of resources to import. - ImportTargets []*ImportTarget - - // Module is a configuration to build the graph from. See ImportOpts.Config. - Config *configs.Config - - // Components is the factory for our available plugin components. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "ImportGraphBuilder", - }).Build(path) -} - -// Steps returns the ordered list of GraphTransformers that must be executed -// to build a complete graph. -func (b *ImportGraphBuilder) Steps() []GraphTransformer { - // Get the module. If we don't have one, we just use an empty tree - // so that the transform still works but does nothing. - config := b.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Create all our resources from the configuration and state - &ConfigTransformer{Config: config}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add the import steps - &ImportStateTransformer{Targets: b.ImportTargets}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), - - // This validates that the providers only depend on variables - &ImportProviderValidateTransformer{}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Optimize - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go deleted file mode 100644 index bcd119b39d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go +++ /dev/null @@ -1,204 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// PlanGraphBuilder implements GraphBuilder and is responsible for building -// a graph for planning (creating a Terraform Diff). -// -// The primary difference between this graph and others: -// -// * Based on the config since it represents the target state -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type PlanGraphBuilder struct { - // Config is the configuration tree to build a plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool - - // CustomConcrete can be set to customize the node types created - // for various parts of the plan. This is useful in order to customize - // the plan behavior. - CustomConcrete bool - ConcreteProvider ConcreteProviderNodeFunc - ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc - - once sync.Once -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "PlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Steps() []GraphTransformer { - b.once.Do(b.init) - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config - &ConfigTransformer{ - Concrete: b.ConcreteResource, - Config: b.Config, - }, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add orphan resources - &OrphanResourceInstanceTransformer{ - Concrete: b.ConcreteResourceOrphan, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can plan to destroy them. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{ - Config: b.Config, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add module variables - &ModuleVariableTransformer{ - Config: b.Config, - }, - - TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} - -func (b *PlanGraphBuilder) init() { - // Do nothing if the user requests customizing the fields - if b.CustomConcrete { - return - } - - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResource{ - NodeAbstractResource: a, - } - } - - b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go deleted file mode 100644 index fad7bf161f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go +++ /dev/null @@ -1,194 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// RefreshGraphBuilder implements GraphBuilder and is responsible for building -// a graph for refreshing (updating the Terraform state). -// -// The primary difference between this graph and others: -// -// * Based on the state since it represents the only resources that -// need to be refreshed. -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type RefreshGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the prior state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "RefreshGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableManagedResource{ - NodeAbstractResource: a, - } - } - - concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - // The "Plan" node type also handles refreshing behavior. - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableDataResource{ - NodeAbstractResource: a, - } - } - - steps := []GraphTransformer{ - // Creates all the managed resources that aren't in the state, but only if - // we have a state already. No resources in state means there's not - // anything to refresh. - func() GraphTransformer { - if b.State.HasResources() { - return &ConfigTransformer{ - Concrete: concreteManagedResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.ManagedResourceMode, - } - } - log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") - return nil - }(), - - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: concreteDataResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.DataResourceMode, - }, - - // Add any fully-orphaned resources from config (ones that have been - // removed completely, not ones that are just orphaned due to a scaled-in - // count. - &OrphanResourceInstanceTransformer{ - Concrete: concreteManagedResourceInstance, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can check if they still exist. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go deleted file mode 100644 index 0aa8b915a9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go +++ /dev/null @@ -1,34 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// ValidateGraphBuilder creates the graph for the validate operation. -// -// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that -// we only have to validate what we'd normally plan anyways. The -// PlanGraphBuilder given will be modified so it shouldn't be used for anything -// else after calling this function. -func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodeValidatableResource{ - NodeAbstractResource: a, - } - } - - // We purposely don't set any other concrete types since they don't - // require validation. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go deleted file mode 100644 index 5dbf415ffd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - -// GraphDot returns the dot formatting of a visual representation of -// the given Terraform graph. -func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { - return string(g.Dot(opts)), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go deleted file mode 100644 index a005ea5a0a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// GraphNodeSubPath says that a node is part of a graph with a -// different path, and the context should be adjusted accordingly. -type GraphNodeSubPath interface { - Path() addrs.ModuleInstance -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go deleted file mode 100644 index d699376f2f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// GraphWalker is an interface that can be implemented that when used -// with Graph.Walk will invoke the given callbacks under certain events. -type GraphWalker interface { - EnterPath(addrs.ModuleInstance) EvalContext - ExitPath(addrs.ModuleInstance) - EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, tfdiags.Diagnostics) - EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics -} - -// NullGraphWalker is a GraphWalker implementation that does nothing. -// This can be embedded within other GraphWalker implementations for easily -// implementing all the required functions. -type NullGraphWalker struct{} - -func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} -func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} -func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go deleted file mode 100644 index 11fb2fd01e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go +++ /dev/null @@ -1,157 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ContextGraphWalker is the GraphWalker implementation used with the -// Context struct to walk and evaluate the graph. -type ContextGraphWalker struct { - NullGraphWalker - - // Configurable values - Context *Context - State *states.SyncState // Used for safe concurrent access to state - Changes *plans.ChangesSync // Used for safe concurrent writes to changes - Operation walkOperation - StopContext context.Context - RootVariableValues InputValues - - // This is an output. Do not set this, nor read it while a graph walk - // is in progress. - NonFatalDiagnostics tfdiags.Diagnostics - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - variableValues map[string]map[string]cty.Value - variableValuesLock sync.Mutex - providerCache map[string]providers.Interface - providerSchemas map[string]*ProviderSchema - providerLock sync.Mutex - provisionerCache map[string]provisioners.Interface - provisionerSchemas map[string]*configschema.Block - provisionerLock sync.Mutex -} - -func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { - w.once.Do(w.init) - - w.contextLock.Lock() - defer w.contextLock.Unlock() - - // If we already have a context for this path cached, use that - key := path.String() - if ctx, ok := w.contexts[key]; ok { - return ctx - } - - // Our evaluator shares some locks with the main context and the walker - // so that we can safely run multiple evaluations at once across - // different modules. - evaluator := &Evaluator{ - Meta: w.Context.meta, - Config: w.Context.config, - Operation: w.Operation, - State: w.State, - Changes: w.Changes, - Schemas: w.Context.schemas, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - PathValue: path, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - Components: w.Context.components, - Schemas: w.Context.schemas, - ProviderCache: w.providerCache, - ProviderInputConfig: w.Context.providerInputConfig, - ProviderLock: &w.providerLock, - ProvisionerCache: w.provisionerCache, - ProvisionerLock: &w.provisionerLock, - ChangesValue: w.Changes, - StateValue: w.State, - Evaluator: evaluator, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - w.contexts[key] = ctx - return ctx -} - -func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) - - // Acquire a lock on the semaphore - w.Context.parallelSem.Acquire() - - // We want to filter the evaluation tree to only include operations - // that belong in this operation. - return EvalFilter(n, EvalNodeFilterOp(w.Operation)) -} - -func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) - - // Release the semaphore - w.Context.parallelSem.Release() - - if err == nil { - return nil - } - - // Acquire the lock because anything is going to require a lock. - w.errorLock.Lock() - defer w.errorLock.Unlock() - - // If the error is non-fatal then we'll accumulate its diagnostics in our - // non-fatal list, rather than returning it directly, so that the graph - // walk can continue. - if nferr, ok := err.(tfdiags.NonFatalError); ok { - log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) - w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) - return nil - } - - // Otherwise, we'll let our usual diagnostics machinery figure out how to - // unpack this as one or more diagnostic messages and return that. If we - // get down here then the returned diagnostics will contain at least one - // error, causing the graph walk to halt. - var diags tfdiags.Diagnostics - diags = diags.Append(err) - return diags -} - -func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext) - w.providerCache = make(map[string]providers.Interface) - w.providerSchemas = make(map[string]*ProviderSchema) - w.provisionerCache = make(map[string]provisioners.Interface) - w.provisionerSchemas = make(map[string]*configschema.Block) - w.variableValues = make(map[string]map[string]cty.Value) - - // Populate root module variable values. Other modules will be populated - // during the graph walk. - w.variableValues[""] = make(map[string]cty.Value) - for k, iv := range w.RootVariableValues { - w.variableValues[""][k] = iv.Value - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go deleted file mode 100644 index 859f6fb121..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go +++ /dev/null @@ -1,18 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go - -// walkOperation is an enum which tells the walkContext what to do. -type walkOperation byte - -const ( - walkInvalid walkOperation = iota - walkApply - walkPlan - walkPlanDestroy - walkRefresh - walkValidate - walkDestroy - walkImport - walkEval // used just to prepare EvalContext for expression evaluation, with no other actions -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go deleted file mode 100644 index b51e1a2661..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[GraphTypeInvalid-0] - _ = x[GraphTypeLegacy-1] - _ = x[GraphTypeRefresh-2] - _ = x[GraphTypePlan-3] - _ = x[GraphTypePlanDestroy-4] - _ = x[GraphTypeApply-5] - _ = x[GraphTypeValidate-6] - _ = x[GraphTypeEval-7] -} - -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" - -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} - -func (i GraphType) String() string { - if i >= GraphType(len(_GraphType_index)-1) { - return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go deleted file mode 100644 index b5be948243..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// HookAction is an enum of actions that can be taken as a result of a hook -// callback. This allows you to modify the behavior of Terraform at runtime. -type HookAction byte - -const ( - // HookActionContinue continues with processing as usual. - HookActionContinue HookAction = iota - - // HookActionHalt halts immediately: no more hooks are processed - // and the action that Terraform was about to take is cancelled. - HookActionHalt -) - -// Hook is the interface that must be implemented to hook into various -// parts of Terraform, allowing you to inspect or change behavior at runtime. -// -// There are MANY hook points into Terraform. If you only want to implement -// some hook points, but not all (which is the likely case), then embed the -// NilHook into your struct, which implements all of the interface but does -// nothing. Then, override only the functions you want to implement. -type Hook interface { - // PreApply and PostApply are called before and after an action for a - // single instance is applied. The error argument in PostApply is the - // error, if any, that was returned from the provider Apply call itself. - PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a provider is given - // the opportunity to customize the proposed new state to produce the - // planned new state. - PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) - PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - - // The provisioning hooks signal both the overall start end end of - // provisioning for a particular instance and of each of the individual - // configured provisioners for each instance. The sequence of these - // for a given instance might look something like this: - // - // PreProvisionInstance(aws_instance.foo[1], ...) - // PreProvisionInstanceStep(aws_instance.foo[1], "file") - // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) - // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") - // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) - // PostProvisionInstance(aws_instance.foo[1], ...) - // - // ProvisionOutput is called with output sent back by the provisioners. - // This will be called multiple times as output comes in, with each call - // representing one line of output. It cannot control whether the - // provisioner continues running. - PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) - PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) - ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) - - // PreRefresh and PostRefresh are called before and after a single - // resource state is refreshed, respectively. - PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) - PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) - - // PreImportState and PostImportState are called before and after - // (respectively) each state import operation for a given resource address. - PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) - PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) - - // PostStateUpdate is called each time the state is updated. It receives - // a deep copy of the state, which it may therefore access freely without - // any need for locks to protect from concurrent writes from the caller. - PostStateUpdate(new *states.State) (HookAction, error) -} - -// NilHook is a Hook implementation that does nothing. It exists only to -// simplify implementing hooks. You can embed this into your Hook implementation -// and only implement the functions you are interested in. -type NilHook struct{} - -var _ Hook = (*NilHook)(nil) - -func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { - return HookActionContinue, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go deleted file mode 100644 index 74a29bde0e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go +++ /dev/null @@ -1,274 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// MockHook is an implementation of Hook that can be used for tests. -// It records all of its function calls. -type MockHook struct { - sync.Mutex - - PreApplyCalled bool - PreApplyAddr addrs.AbsResourceInstance - PreApplyGen states.Generation - PreApplyAction plans.Action - PreApplyPriorState cty.Value - PreApplyPlannedState cty.Value - PreApplyReturn HookAction - PreApplyError error - - PostApplyCalled bool - PostApplyAddr addrs.AbsResourceInstance - PostApplyGen states.Generation - PostApplyNewState cty.Value - PostApplyError error - PostApplyReturn HookAction - PostApplyReturnError error - PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) - - PreDiffCalled bool - PreDiffAddr addrs.AbsResourceInstance - PreDiffGen states.Generation - PreDiffPriorState cty.Value - PreDiffProposedState cty.Value - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffAddr addrs.AbsResourceInstance - PostDiffGen states.Generation - PostDiffAction plans.Action - PostDiffPriorState cty.Value - PostDiffPlannedState cty.Value - PostDiffReturn HookAction - PostDiffError error - - PreProvisionInstanceCalled bool - PreProvisionInstanceAddr addrs.AbsResourceInstance - PreProvisionInstanceState cty.Value - PreProvisionInstanceReturn HookAction - PreProvisionInstanceError error - - PostProvisionInstanceCalled bool - PostProvisionInstanceAddr addrs.AbsResourceInstance - PostProvisionInstanceState cty.Value - PostProvisionInstanceReturn HookAction - PostProvisionInstanceError error - - PreProvisionInstanceStepCalled bool - PreProvisionInstanceStepAddr addrs.AbsResourceInstance - PreProvisionInstanceStepProvisionerType string - PreProvisionInstanceStepReturn HookAction - PreProvisionInstanceStepError error - - PostProvisionInstanceStepCalled bool - PostProvisionInstanceStepAddr addrs.AbsResourceInstance - PostProvisionInstanceStepProvisionerType string - PostProvisionInstanceStepErrorArg error - PostProvisionInstanceStepReturn HookAction - PostProvisionInstanceStepError error - - ProvisionOutputCalled bool - ProvisionOutputAddr addrs.AbsResourceInstance - ProvisionOutputProvisionerType string - ProvisionOutputMessage string - - PreRefreshCalled bool - PreRefreshAddr addrs.AbsResourceInstance - PreRefreshGen states.Generation - PreRefreshPriorState cty.Value - PreRefreshReturn HookAction - PreRefreshError error - - PostRefreshCalled bool - PostRefreshAddr addrs.AbsResourceInstance - PostRefreshGen states.Generation - PostRefreshPriorState cty.Value - PostRefreshNewState cty.Value - PostRefreshReturn HookAction - PostRefreshError error - - PreImportStateCalled bool - PreImportStateAddr addrs.AbsResourceInstance - PreImportStateID string - PreImportStateReturn HookAction - PreImportStateError error - - PostImportStateCalled bool - PostImportStateAddr addrs.AbsResourceInstance - PostImportStateNewStates []providers.ImportedResource - PostImportStateReturn HookAction - PostImportStateError error - - PostStateUpdateCalled bool - PostStateUpdateState *states.State - PostStateUpdateReturn HookAction - PostStateUpdateError error -} - -var _ Hook = (*MockHook)(nil) - -func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreApplyCalled = true - h.PreApplyAddr = addr - h.PreApplyGen = gen - h.PreApplyAction = action - h.PreApplyPriorState = priorState - h.PreApplyPlannedState = plannedNewState - return h.PreApplyReturn, h.PreApplyError -} - -func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostApplyCalled = true - h.PostApplyAddr = addr - h.PostApplyGen = gen - h.PostApplyNewState = newState - h.PostApplyError = err - - if h.PostApplyFn != nil { - return h.PostApplyFn(addr, gen, newState, err) - } - - return h.PostApplyReturn, h.PostApplyReturnError -} - -func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreDiffCalled = true - h.PreDiffAddr = addr - h.PreDiffGen = gen - h.PreDiffPriorState = priorState - h.PreDiffProposedState = proposedNewState - return h.PreDiffReturn, h.PreDiffError -} - -func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostDiffCalled = true - h.PostDiffAddr = addr - h.PostDiffGen = gen - h.PostDiffAction = action - h.PostDiffPriorState = priorState - h.PostDiffPlannedState = plannedNewState - return h.PostDiffReturn, h.PostDiffError -} - -func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceCalled = true - h.PreProvisionInstanceAddr = addr - h.PreProvisionInstanceState = state - return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError -} - -func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceCalled = true - h.PostProvisionInstanceAddr = addr - h.PostProvisionInstanceState = state - return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError -} - -func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceStepCalled = true - h.PreProvisionInstanceStepAddr = addr - h.PreProvisionInstanceStepProvisionerType = typeName - return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError -} - -func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceStepCalled = true - h.PostProvisionInstanceStepAddr = addr - h.PostProvisionInstanceStepProvisionerType = typeName - h.PostProvisionInstanceStepErrorArg = err - return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError -} - -func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { - h.Lock() - defer h.Unlock() - - h.ProvisionOutputCalled = true - h.ProvisionOutputAddr = addr - h.ProvisionOutputProvisionerType = typeName - h.ProvisionOutputMessage = line -} - -func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreRefreshCalled = true - h.PreRefreshAddr = addr - h.PreRefreshGen = gen - h.PreRefreshPriorState = priorState - return h.PreRefreshReturn, h.PreRefreshError -} - -func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostRefreshCalled = true - h.PostRefreshAddr = addr - h.PostRefreshPriorState = priorState - h.PostRefreshNewState = newState - return h.PostRefreshReturn, h.PostRefreshError -} - -func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreImportStateCalled = true - h.PreImportStateAddr = addr - h.PreImportStateID = importID - return h.PreImportStateReturn, h.PreImportStateError -} - -func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostImportStateCalled = true - h.PostImportStateAddr = addr - h.PostImportStateNewStates = imported - return h.PostImportStateReturn, h.PostImportStateError -} - -func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostStateUpdateCalled = true - h.PostStateUpdateState = new - return h.PostStateUpdateReturn, h.PostStateUpdateError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go deleted file mode 100644 index 42c3d20cb9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "sync/atomic" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// stopHook is a private Hook implementation that Terraform uses to -// signal when to stop or cancel actions. -type stopHook struct { - stop uint32 -} - -var _ Hook = (*stopHook)(nil) - -func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) hook() (HookAction, error) { - if h.Stopped() { - // FIXME: This should really return an error since stopping partway - // through is not a successful run-to-completion, but we'll need to - // introduce that cautiously since existing automation solutions may - // be depending on this behavior. - return HookActionHalt, nil - } - - return HookActionContinue, nil -} - -// reset should be called within the lock context -func (h *stopHook) Reset() { - atomic.StoreUint32(&h.stop, 0) -} - -func (h *stopHook) Stop() { - atomic.StoreUint32(&h.stop, 1) -} - -func (h *stopHook) Stopped() bool { - return atomic.LoadUint32(&h.stop) == 1 -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go deleted file mode 100644 index 375a8638a8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go - -// InstanceType is an enum of the various types of instances store in the State -type InstanceType int - -const ( - TypeInvalid InstanceType = iota - TypePrimary - TypeTainted - TypeDeposed -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go deleted file mode 100644 index 95b7a9802e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeInvalid-0] - _ = x[TypePrimary-1] - _ = x[TypeTainted-2] - _ = x[TypeDeposed-3] -} - -const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" - -var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} - -func (i InstanceType) String() string { - if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go deleted file mode 100644 index f1434e6252..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go +++ /dev/null @@ -1,202 +0,0 @@ -package terraform - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps" - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ConfigTreeDependencies returns the dependencies of the tree of modules -// described by the given configuration and state. -// -// Both configuration and state are required because there can be resources -// implied by instances in the state that no longer exist in config. -func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { - // First we walk the configuration tree to build the overall structure - // and capture the explicit/implicit/inherited provider dependencies. - deps := configTreeConfigDependencies(root, nil) - - // Next we walk over the resources in the state to catch any additional - // dependencies created by existing resources that are no longer in config. - // Most things we find in state will already be present in 'deps', but - // we're interested in the rare thing that isn't. - configTreeMergeStateDependencies(deps, state) - - return deps -} - -func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { - if root == nil { - // If no config is provided, we'll make a synthetic root. - // This isn't necessarily correct if we're called with a nil that - // *isn't* at the root, but in practice that can never happen. - return &moduledeps.Module{ - Name: "root", - Providers: make(moduledeps.Providers), - } - } - - name := "root" - if len(root.Path) != 0 { - name = root.Path[len(root.Path)-1] - } - - ret := &moduledeps.Module{ - Name: name, - } - - module := root.Module - - // Provider dependencies - { - providers := make(moduledeps.Providers) - - // The main way to declare a provider dependency is explicitly inside - // the "terraform" block, which allows declaring a requirement without - // also creating a configuration. - for fullName, constraints := range module.ProviderRequirements { - inst := moduledeps.ProviderInstance(fullName) - - // The handling here is a bit fiddly because the moduledeps package - // was designed around the legacy (pre-0.12) configuration model - // and hasn't yet been revised to handle the new model. As a result, - // we need to do some translation here. - // FIXME: Eventually we should adjust the underlying model so we - // can also retain the source location of each constraint, for - // more informative output from the "terraform providers" command. - var rawConstraints version.Constraints - for _, constraint := range constraints { - rawConstraints = append(rawConstraints, constraint.Required...) - } - discoConstraints := discovery.NewConstraints(rawConstraints) - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discoConstraints, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - - // Provider configurations can also include version constraints, - // allowing for more terse declaration in situations where both a - // configuration and a constraint are defined in the same module. - for fullName, pCfg := range module.ProviderConfigs { - inst := moduledeps.ProviderInstance(fullName) - discoConstraints := discovery.AllVersions - if pCfg.Version.Required != nil { - discoConstraints = discovery.NewConstraints(pCfg.Version.Required) - } - if existing, exists := providers[inst]; exists { - existing.Constraints = existing.Constraints.Append(discoConstraints) - } else { - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discoConstraints, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - } - - // Each resource in the configuration creates an *implicit* provider - // dependency, though we'll only record it if there isn't already - // an explicit dependency on the same provider. - for _, rc := range module.ManagedResources { - addr := rc.ProviderConfigAddr() - inst := moduledeps.ProviderInstance(addr.StringCompact()) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[addr.StringCompact()]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - for _, rc := range module.DataResources { - addr := rc.ProviderConfigAddr() - inst := moduledeps.ProviderInstance(addr.StringCompact()) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[addr.String()]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - - ret.Providers = providers - } - - childInherit := make(map[string]*configs.Provider) - for k, v := range inheritProviders { - childInherit[k] = v - } - for k, v := range module.ProviderConfigs { - childInherit[k] = v - } - for _, c := range root.Children { - ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) - } - - return ret -} - -func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { - if state == nil { - return - } - - findModule := func(path addrs.ModuleInstance) *moduledeps.Module { - module := root - for _, step := range path { - var next *moduledeps.Module - for _, cm := range module.Children { - if cm.Name == step.Name { - next = cm - break - } - } - - if next == nil { - // If we didn't find a next node, we'll need to make one - next = &moduledeps.Module{ - Name: step.Name, - Providers: make(moduledeps.Providers), - } - module.Children = append(module.Children, next) - } - - module = next - } - return module - } - - for _, ms := range state.Modules { - module := findModule(ms.Addr) - - for _, rs := range ms.Resources { - inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) - if _, exists := module.Providers[inst]; !exists { - module.Providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: moduledeps.ProviderDependencyFromState, - } - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go deleted file mode 100644 index acd8262b06..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go +++ /dev/null @@ -1,22 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// NodeCountBoundary fixes up any transitions between "each modes" in objects -// saved in state, such as switching from NoEach to EachInt. -type NodeCountBoundary struct { - Config *configs.Config -} - -func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (EachMode fixup)" -} - -// GraphNodeEvalable -func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{ - Config: n.Config, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go deleted file mode 100644 index 56a33bce2c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": -// it is ready to be destroyed. -type NodeDestroyableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var providerSchema *ProviderSchema - // We don't need the provider, but we're calling EvalGetProvider to load the - // schema. - var provider providers.Interface - - // Just destroy it. - var state *states.ResourceInstanceObject - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go deleted file mode 100644 index 56283c0ac3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go +++ /dev/null @@ -1,229 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodeRefreshableDataResource represents a resource that is "refreshable". -type NodeRefreshableDataResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) - _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) -) - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - if !countKnown { - // If the count isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - if !forEachKnown { - // If the for_each isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // We also need a destroyable resource for orphans that are a result of a - // scaled-in count. - concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and provider since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans. As these are orphaned refresh nodes, we add them - // directly as NodeDestroyableDataResource. - &OrphanResourceCountTransformer{ - Concrete: concreteResourceDestroyable, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableDataResource", - } - - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableDataResourceInstance represents a single resource instance -// that is refreshable. -type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // These variables are the state for the eval sequence below, and are - // updated through pointers. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var configVal cty.Value - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Always destroy the existing state first, since we must - // make sure that values from a previous read will not - // get interpolated if we end up needing to defer our - // loading until apply time. - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, // a pointer to nil, here - ProviderSchema: &providerSchema, - }, - - // EvalReadData will _attempt_ to read the data source, but may - // generate an incomplete planned object if the configuration - // includes values that won't be known until apply. - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - OutputChange: &change, - OutputConfigValue: &configVal, - OutputState: &state, - // If the config explicitly has a depends_on for this data - // source, assume the intention is to prevent refreshing ahead - // of that dependency, and therefore we need to deal with this - // resource during the apply phase. We do that by forcing this - // read to result in a plan. - ForcePlanRead: len(n.Config.DependsOn) > 0, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return (*state).Status != states.ObjectPlanned, nil - }, - Then: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalUpdateStateHook{}, - }, - }, - Else: &EvalSequence{ - // We can't deal with this yet, so we'll repeat this step - // during the plan walk to produce a planned change to read - // this during the apply walk. However, we do still need to - // save the generated change and partial state so that - // results from it can be included in other data resources - // or provider configurations during the refresh walk. - // (The planned object we save in the state here will be - // pruned out at the end of the refresh walk, returning - // it back to being unset again for subsequent walks.) - Nodes: []EvalNode{ - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - }, - }, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go deleted file mode 100644 index 38681d83db..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// NodeLocal represents a named local value in a particular module. -// -// Local value nodes only have one operation, common to all walk types: -// evaluate the result and place it in state. -type NodeLocal struct { - Addr addrs.AbsLocalValue - Config *configs.Local -} - -var ( - _ GraphNodeSubPath = (*NodeLocal)(nil) - _ RemovableIfNotTargeted = (*NodeLocal)(nil) - _ GraphNodeReferenceable = (*NodeLocal)(nil) - _ GraphNodeReferencer = (*NodeLocal)(nil) - _ GraphNodeEvalable = (*NodeLocal)(nil) - _ dag.GraphNodeDotter = (*NodeLocal)(nil) -) - -func (n *NodeLocal) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeLocal) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeLocal) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeReferenceable -func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.LocalValue} -} - -// GraphNodeReferencer -func (n *NodeLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return appendResourceDestroyReferences(refs) -} - -// GraphNodeEvalable -func (n *NodeLocal) EvalTree() EvalNode { - return &EvalLocal{ - Addr: n.Addr.LocalValue, - Expr: n.Config.Expr, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go deleted file mode 100644 index 6e3cb41dc2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go +++ /dev/null @@ -1,89 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// NodeModuleRemoved represents a module that is no longer in the -// config. -type NodeModuleRemoved struct { - Addr addrs.ModuleInstance -} - -var ( - _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) - _ RemovableIfNotTargeted = (*NodeModuleRemoved)(nil) - _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) - _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) - _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) -) - -func (n *NodeModuleRemoved) Name() string { - return fmt.Sprintf("%s (removed)", n.Addr.String()) -} - -// GraphNodeSubPath -func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { - return n.Addr -} - -// GraphNodeEvalable -func (n *NodeModuleRemoved) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalCheckModuleRemoved{ - Addr: n.Addr, - }, - } -} - -func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - // Our "References" implementation indicates that this node depends on - // the call to the module it represents, which implicitly depends on - // everything inside the module. That reference must therefore be - // interpreted in terms of our parent module. - return n.Addr, n.Addr.Parent() -} - -func (n *NodeModuleRemoved) References() []*addrs.Reference { - // We depend on the call to the module we represent, because that - // implicitly then depends on everything inside that module. - // Our ReferenceOutside implementation causes this to be interpreted - // within the parent module. - - _, call := n.Addr.CallInstance() - return []*addrs.Reference{ - { - Subject: call, - - // No source range here, because there's nothing reasonable for - // us to return. - }, - } -} - -// RemovableIfNotTargeted -func (n *NodeModuleRemoved) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// EvalCheckModuleRemoved is an EvalNode implementation that verifies that -// a module has been removed from the state as expected. -type EvalCheckModuleRemoved struct { - Addr addrs.ModuleInstance -} - -func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { - mod := ctx.State().Module(n.Addr) - if mod != nil { - // If we get here then that indicates a bug either in the states - // module or in an earlier step of the graph walk, since we should've - // pruned out the module when the last resource was removed from it. - return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) - } - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go deleted file mode 100644 index 76311a56d3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/zclconf/go-cty/cty" -) - -// NodeApplyableModuleVariable represents a module variable input during -// the apply step. -type NodeApplyableModuleVariable struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable // Config is the var in the config - Expr hcl.Expression // Expr is the value expression given in the call -} - -// Ensure that we are implementing all of the interfaces we think we are -// implementing. -var ( - _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) -) - -func (n *NodeApplyableModuleVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { - // We execute in the parent scope (above our own module) because - // expressions in our value are resolved in that context. - return n.Addr.Module.Parent() -} - -// RemovableIfNotTargeted -func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - - // Module input variables have their value expressions defined in the - // context of their calling (parent) module, and so references from - // a node of this type should be resolved in the parent module instance. - referencePath = n.Addr.Module.Parent() - - // Input variables are _referenced_ from their own module, though. - selfPath = n.Addr.Module - - return // uses named return values -} - -// GraphNodeReferenceable -func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Variable} -} - -// GraphNodeReferencer -func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { - - // If we have no value expression, we cannot depend on anything. - if n.Expr == nil { - return nil - } - - // Variables in the root don't depend on anything, because their values - // are gathered prior to the graph walk and recorded in the context. - if len(n.Addr.Module) == 0 { - return nil - } - - // Otherwise, we depend on anything referenced by our value expression. - // We ignore diagnostics here under the assumption that we'll re-eval - // all these things later and catch them then; for our purposes here, - // we only care about valid references. - // - // Due to our GraphNodeReferenceOutside implementation, the addresses - // returned by this function are interpreted in the _parent_ module from - // where our associated variable was declared, which is correct because - // our value expression is assigned within a "module" block in the parent - // module. - refs, _ := lang.ReferencesInExpr(n.Expr) - return refs -} - -// GraphNodeEvalable -func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { - // If we have no value, do nothing - if n.Expr == nil { - return &EvalNoop{} - } - - // Otherwise, interpolate the value of this variable and set it - // within the variables mapping. - vals := make(map[string]cty.Value) - - _, call := n.Addr.Module.CallInstance() - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkValidate}, - Node: &EvalModuleCallArgument{ - Addr: n.Addr.Variable, - Config: n.Config, - Expr: n.Expr, - Values: vals, - - IgnoreDiagnostics: false, - }, - }, - - &EvalSetModuleCallArguments{ - Module: call, - Values: vals, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go deleted file mode 100644 index 7530571234..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go +++ /dev/null @@ -1,200 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// NodeApplyableOutput represents an output that is "applyable": -// it is ready to be applied. -type NodeApplyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) - _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) - _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) -) - -func (n *NodeApplyableOutput) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeTargetDownstream -func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - // If any of the direct dependencies of an output are targeted then - // the output must always be targeted as well, so its value will always - // be up-to-date at the completion of an apply walk. - return true -} - -func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { - - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = addr.Module - - // ...but they are referenced in the context of their calling module. - selfPath = addr.Module.Parent() - - return // uses named return values - -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - return referenceOutsideForOutput(n.Addr) -} - -func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if addr.Module.IsRoot() { - return nil - } - - // Otherwise, we can be referenced via a reference to our output name - // on the parent module's call, or via a reference to the entire call. - // e.g. module.foo.bar or just module.foo . - // Note that our ReferenceOutside method causes these addresses to be - // relative to the calling module, not the module where the output - // was declared. - _, outp := addr.ModuleCallOutput() - _, call := addr.Module.CallInstance() - return []addrs.Referenceable{outp, call} - -} - -// GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -func referencesForOutput(c *configs.Output) []*addrs.Reference { - impRefs, _ := lang.ReferencesInExpr(c.Expr) - expRefs, _ := lang.References(c.DependsOn) - l := len(impRefs) + len(expRefs) - if l == 0 { - return nil - } - refs := make([]*addrs.Reference, 0, l) - refs = append(refs, impRefs...) - refs = append(refs, expRefs...) - return refs - -} - -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []*addrs.Reference { - return appendResourceDestroyReferences(referencesForOutput(n.Config)) -} - -// GraphNodeEvalable -func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, - Node: &EvalWriteOutput{ - Addr: n.Addr.OutputValue, - Sensitive: n.Config.Sensitive, - Expr: n.Config.Expr, - }, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -// NodeDestroyableOutput represents an output that is "destroybale": -// its application will remove the output from the state. -type NodeDestroyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) - _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) - _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) -) - -func (n *NodeDestroyableOutput) Name() string { - return fmt.Sprintf("%s (destroy)", n.Addr.String()) -} - -// GraphNodeSubPath -func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// This will keep the destroy node in the graph if its corresponding output -// node is also in the destroy graph. -func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - return true -} - -// GraphNodeReferencer -func (n *NodeDestroyableOutput) References() []*addrs.Reference { - return referencesForOutput(n.Config) -} - -// GraphNodeEvalable -func (n *NodeDestroyableOutput) EvalTree() EvalNode { - return &EvalDeleteOutput{ - Addr: n.Addr.OutputValue, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go deleted file mode 100644 index a76d1742ce..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// NodeOutputOrphan represents an output that is an orphan. -type NodeOutputOrphan struct { - Addr addrs.AbsOutputValue -} - -var ( - _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) - _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) - _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) - _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) -) - -func (n *NodeOutputOrphan) Name() string { - return fmt.Sprintf("%s (orphan)", n.Addr.String()) -} - -// GraphNodeReferenceOutside implementation -func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - return referenceOutsideForOutput(n.Addr) -} - -// GraphNodeReferenceable -func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -// GraphNodeSubPath -func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable -func (n *NodeOutputOrphan) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalDeleteOutput{ - Addr: n.Addr.OutputValue, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go deleted file mode 100644 index 2071ab168f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -// NodeApplyableProvider represents a provider during an apply. -type NodeApplyableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n, n.ProviderConfig()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go deleted file mode 100644 index afdd4741d2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go +++ /dev/null @@ -1,96 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// ConcreteProviderNodeFunc is a callback type used to convert an -// abstract provider to a concrete one of some type. -type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex - -// NodeAbstractProvider represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeAbstractProvider struct { - Addr addrs.AbsProviderConfig - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *configs.Provider - Schema *configschema.Block -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) - _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) - _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) - _ GraphNodeProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) -) - -func (n *NodeAbstractProvider) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferencer -func (n *NodeAbstractProvider) References() []*addrs.Reference { - if n.Config == nil || n.Schema == nil { - return nil - } - - return ReferencesFromConfig(n.Config.Config, n.Schema) -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { - if n.Config == nil { - return nil - } - - return n.Config -} - -// GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { - n.Config = c -} - -// GraphNodeAttachProviderConfigSchema impl. -func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { - n.Schema = schema -} - -// GraphNodeDotter impl. -func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go deleted file mode 100644 index 51335654bd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// NodeDisabledProvider represents a provider that is disabled. A disabled -// provider does nothing. It exists to properly set inheritance information -// for child providers. -type NodeDisabledProvider struct { - *NodeAbstractProvider -} - -var ( - _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) - _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) - _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) - _ GraphNodeProvider = (*NodeDisabledProvider)(nil) - _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) - _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) -) - -func (n *NodeDisabledProvider) Name() string { - return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go deleted file mode 100644 index 580e60cb7e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// NodeEvalableProvider represents a provider during an "eval" walk. -// This special provider node type just initializes a provider and -// fetches its schema, without configuring it or otherwise interacting -// with it. -type NodeEvalableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeEvalableProvider) EvalTree() EvalNode { - addr := n.Addr - relAddr := addr.ProviderConfig - - return &EvalInitProvider{ - TypeName: relAddr.Type, - Addr: addr.ProviderConfig, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go deleted file mode 100644 index 573f030d70..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// NodeProvisioner represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeProvisioner struct { - NameValue string - PathValue addrs.ModuleInstance -} - -var ( - _ GraphNodeSubPath = (*NodeProvisioner)(nil) - _ GraphNodeProvisioner = (*NodeProvisioner)(nil) - _ GraphNodeEvalable = (*NodeProvisioner)(nil) -) - -func (n *NodeProvisioner) Name() string { - result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 0 { - result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeProvisioner) Path() addrs.ModuleInstance { - return n.PathValue -} - -// GraphNodeProvisioner -func (n *NodeProvisioner) ProvisionerName() string { - return n.NameValue -} - -// GraphNodeEvalable impl. -func (n *NodeProvisioner) EvalTree() EvalNode { - return &EvalInitProvisioner{Name: n.NameValue} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go deleted file mode 100644 index c7b0e3c8e7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go +++ /dev/null @@ -1,446 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ConcreteResourceNodeFunc is a callback type used to convert an -// abstract resource to a concrete one of some type. -type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex - -// GraphNodeResource is implemented by any nodes that represent a resource. -// The type of operation cannot be assumed, only that this node represents -// the given resource. -type GraphNodeResource interface { - ResourceAddr() addrs.AbsResource -} - -// ConcreteResourceInstanceNodeFunc is a callback type used to convert an -// abstract resource instance to a concrete one of some type. -type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex - -// GraphNodeResourceInstance is implemented by any nodes that represent -// a resource instance. A single resource may have multiple instances if, -// for example, the "count" or "for_each" argument is used for it in -// configuration. -type GraphNodeResourceInstance interface { - ResourceInstanceAddr() addrs.AbsResourceInstance -} - -// NodeAbstractResource represents a resource that has no associated -// operations. It registers all the interfaces for a resource that common -// across multiple operation types. -type NodeAbstractResource struct { - Addr addrs.AbsResource // Addr is the address for this resource - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Schema *configschema.Block // Schema for processing the configuration body - SchemaVersion uint64 // Schema version of "Schema", as decided by the provider - Config *configs.Resource // Config is the resource in the config - - ProvisionerSchemas map[string]*configschema.Block - - Targets []addrs.Targetable // Set from GraphNodeTargetable - - // The address of the provider this resource will use - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractResource)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) - _ GraphNodeReferencer = (*NodeAbstractResource)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeResource = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) - _ GraphNodeTargetable = (*NodeAbstractResource)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) -) - -// NewNodeAbstractResource creates an abstract resource graph node for -// the given absolute resource address. -func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { - return &NodeAbstractResource{ - Addr: addr, - } -} - -// NodeAbstractResourceInstance represents a resource instance with no -// associated operations. It embeds NodeAbstractResource but additionally -// contains an instance key, used to identify one of potentially many -// instances that were created from a resource in configuration, e.g. using -// the "count" or "for_each" arguments. -type NodeAbstractResourceInstance struct { - NodeAbstractResource - InstanceKey addrs.InstanceKey - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - ResourceState *states.Resource -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) -) - -// NewNodeAbstractResourceInstance creates an abstract resource instance graph -// node for the given absolute resource instance address. -func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { - // Due to the fact that we embed NodeAbstractResource, the given address - // actually ends up split between the resource address in the embedded - // object and the InstanceKey field in our own struct. The - // ResourceInstanceAddr method will stick these back together again on - // request. - return &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - Addr: addr.ContainingResource(), - }, - InstanceKey: addr.Resource.Key, - } -} - -func (n *NodeAbstractResource) Name() string { - return n.ResourceAddr().String() -} - -func (n *NodeAbstractResourceInstance) Name() string { - return n.ResourceInstanceAddr().String() -} - -// GraphNodeSubPath -func (n *NodeAbstractResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Resource} -} - -// GraphNodeReferenceable -func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - addr := n.ResourceInstanceAddr() - return []addrs.Referenceable{ - addr.Resource, - - // A resource instance can also be referenced by the address of its - // containing resource, so that e.g. a reference to aws_instance.foo - // would match both aws_instance.foo[0] and aws_instance.foo[1]. - addr.ContainingResource().Resource, - } -} - -// GraphNodeReferencer -func (n *NodeAbstractResource) References() []*addrs.Reference { - // If we have a config then we prefer to use that. - if c := n.Config; c != nil { - var result []*addrs.Reference - - for _, traversal := range c.DependsOn { - ref, err := addrs.ParseRef(traversal) - if err != nil { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err) - continue - } - - result = append(result, ref) - } - - if n.Schema == nil { - // Should never happens, but we'll log if it does so that we can - // see this easily when debugging. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - } - - refs, _ := lang.ReferencesInExpr(c.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(c.ForEach) - result = append(result, refs...) - refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) - result = append(result, refs...) - if c.Managed != nil { - for _, p := range c.Managed.Provisioners { - if p.When != configs.ProvisionerWhenCreate { - continue - } - if p.Connection != nil { - refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - schema := n.ProvisionerSchemas[p.Type] - if schema == nil { - log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) - } - refs, _ = lang.ReferencesInBlock(p.Config, schema) - result = append(result, refs...) - } - } - return result - } - - // Otherwise, we have no references. - return nil -} - -// GraphNodeReferencer -func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { - // If we have a configuration attached then we'll delegate to our - // embedded abstract resource, which knows how to extract dependencies - // from configuration. - if n.Config != nil { - if n.Schema == nil { - // We'll produce a log message about this out here so that - // we can include the full instance address, since the equivalent - // message in NodeAbstractResource.References cannot see it. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - return nil - } - return n.NodeAbstractResource.References() - } - - // Otherwise, if we have state then we'll use the values stored in state - // as a fallback. - if rs := n.ResourceState; rs != nil { - if s := rs.Instance(n.InstanceKey); s != nil { - // State is still storing dependencies as old-style strings, so we'll - // need to do a little work here to massage this to the form we now - // want. - var result []*addrs.Reference - - // It is (apparently) possible for s.Current to be nil. This proved - // difficult to reproduce, so we will fix the symptom here and hope - // to find the root cause another time. - // - // https://github.com/hashicorp/terraform-plugin-sdk/issues/21407 - if s.Current == nil { - log.Printf("[WARN] no current state found for %s", n.Name()) - } else { - for _, addr := range s.Current.Dependencies { - if addr == nil { - // Should never happen; indicates a bug in the state loader - panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) - } - - // This is a little weird: we need to manufacture an addrs.Reference - // with a fake range here because the state isn't something we can - // make source references into. - result = append(result, &addrs.Reference{ - Subject: addr, - SourceRange: tfdiags.SourceRange{ - Filename: "(state file)", - }, - }) - } - } - return result - } - } - - // If we have neither config nor state then we have no references. - return nil -} - -// StateReferences returns the dependencies to put into the state for -// this resource. -func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable { - selfAddrs := n.ReferenceableAddrs() - - // Since we don't include the source location references in our - // results from this method, we'll also filter out duplicates: - // there's no point in listing the same object twice without - // that additional context. - seen := map[string]struct{}{} - - // Pretend that we've already "seen" all of our own addresses so that we - // won't record self-references in the state. This can arise if, for - // example, a provisioner for a resource refers to the resource itself, - // which is valid (since provisioners always run after apply) but should - // not create an explicit dependency edge. - for _, selfAddr := range selfAddrs { - seen[selfAddr.String()] = struct{}{} - if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok { - seen[riAddr.ContainingResource().String()] = struct{}{} - } - } - - depsRaw := n.References() - deps := make([]addrs.Referenceable, 0, len(depsRaw)) - for _, d := range depsRaw { - subj := d.Subject - if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput { - // For state dependencies, we simplify outputs to just refer - // to the module as a whole. It's not really clear why we do this, - // but this logic is preserved from before the 0.12 rewrite of - // this function. - subj = mco.Call - } - - k := subj.String() - if _, exists := seen[k]; exists { - continue - } - seen[k] = struct{}{} - switch tr := subj.(type) { - case addrs.ResourceInstance: - deps = append(deps, tr) - case addrs.Resource: - deps = append(deps, tr) - case addrs.ModuleCallInstance: - deps = append(deps, tr) - default: - // No other reference types are recorded in the state. - } - } - - // We'll also sort them, since that'll avoid creating changes in the - // serialized state that make no semantic difference. - sort.Slice(deps, func(i, j int) bool { - // Simple string-based sort because we just care about consistency, - // not user-friendliness. - return deps[i].String() < deps[j].String() - }) - - return deps -} - -func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { - n.ResolvedProvider = p -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return relAddr.Absolute(n.Path()), false - } - - // Use our type and containing module path to guess a provider configuration address - return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return relAddr.Absolute(n.Path()), false - } - - // If we have state, then we will use the provider from there - if n.ResourceState != nil { - // An address from the state must match exactly, since we must ensure - // we refresh/destroy a resource with the same provider configuration - // that created it. - return n.ResourceState.ProviderConfig, true - } - - // Use our type and containing module path to guess a provider configuration address - return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) ProvisionedBy() []string { - // If we have no configuration, then we have no provisioners - if n.Config == nil || n.Config.Managed == nil { - return nil - } - - // Build the list of provisioners we need based on the configuration. - // It is okay to have duplicates here. - result := make([]string, len(n.Config.Managed.Provisioners)) - for i, p := range n.Config.Managed.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { - if n.ProvisionerSchemas == nil { - n.ProvisionerSchemas = make(map[string]*configschema.Block) - } - n.ProvisionerSchemas[name] = schema -} - -// GraphNodeResource -func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { - return n.Addr -} - -// GraphNodeResourceInstance -func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { - return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) -} - -// GraphNodeAddressable, TODO: remove, used by target, should unify -func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { - return NewLegacyResourceAddress(n.Addr) -} - -// GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { - n.Targets = targets -} - -// GraphNodeAttachResourceState -func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { - n.ResourceState = s -} - -// GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { - n.Config = c -} - -// GraphNodeAttachResourceSchema impl -func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { - n.Schema = schema - n.SchemaVersion = version -} - -// GraphNodeDotter impl. -func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "box", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go deleted file mode 100644 index 68d438d7bc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go +++ /dev/null @@ -1,71 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// NodeApplyableResource represents a resource that is "applyable": -// it may need to have its record in the state adjusted to match configuration. -// -// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, -// it should be inserted into the same graph as any instances of the nodes -// with dependency edges ensuring that the resource is evaluated before any -// of its instances, which will turn ensure that the whole-resource record -// in the state is suitably prepared to receive any updates to instances. -type NodeApplyableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeResource = (*NodeApplyableResource)(nil) - _ GraphNodeEvalable = (*NodeApplyableResource)(nil) - _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) - _ GraphNodeReferencer = (*NodeApplyableResource)(nil) -) - -func (n *NodeApplyableResource) Name() string { - return n.NodeAbstractResource.Name() + " (prepare state)" -} - -func (n *NodeApplyableResource) References() []*addrs.Reference { - if n.Config == nil { - log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) - return nil - } - - var result []*addrs.Reference - - // Since this node type only updates resource-level metadata, we only - // need to worry about the parts of the configuration that affect - // our "each mode": the count and for_each meta-arguments. - refs, _ := lang.ReferencesInExpr(n.Config.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(n.Config.ForEach) - result = append(result, refs...) - - return result -} - -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - providerAddr := n.ResolvedProvider - - if config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) - return &EvalNoop{} - } - - return &EvalWriteResourceState{ - Addr: addr.Resource, - Config: config, - ProviderAddr: providerAddr, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go deleted file mode 100644 index acdda45e40..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go +++ /dev/null @@ -1,426 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// NodeApplyableResourceInstance represents a resource instance that is -// "applyable": it is ready to be applied and is represented by a diff. -// -// This node is for a specific instance of a resource. It will usually be -// accompanied in the graph by a NodeApplyableResource representing its -// containing resource, and should depend on that node to ensure that the -// state is properly prepared to receive changes to instances. -type NodeApplyableResourceInstance struct { - *NodeAbstractResourceInstance - - destroyNode GraphNodeDestroyerCBD - graphNodeDeposer // implementation of GraphNodeDeposer -} - -var ( - _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) -) - -// GraphNodeAttachDestroyer -func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { - n.destroyNode = d -} - -// createBeforeDestroy checks this nodes config status and the status af any -// companion destroy node for CreateBeforeDestroy. -func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { - cbd := false - - if n.Config != nil && n.Config.Managed != nil { - cbd = n.Config.Managed.CreateBeforeDestroy - } - - if n.destroyNode != nil { - cbd = cbd || n.destroyNode.CreateBeforeDestroy() - } - - return cbd -} - -// GraphNodeCreator -func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeReferencer, overriding NodeAbstractResourceInstance -func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { - // Start with the usual resource instance implementation - ret := n.NodeAbstractResourceInstance.References() - - // Applying a resource must also depend on the destruction of any of its - // dependencies, since this may for example affect the outcome of - // evaluating an entire list of resources with "count" set (by reducing - // the count). - // - // However, we can't do this in create_before_destroy mode because that - // would create a dependency cycle. We make a compromise here of requiring - // changes to be updated across two applies in this case, since the first - // plan will use the old values. - if !n.createBeforeDestroy() { - for _, ref := range ret { - switch tr := ref.Subject.(type) { - case addrs.ResourceInstance: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - case addrs.Resource: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - } - } - } - - return ret -} - -// GraphNodeEvalable -func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - if n.Config == nil { - // This should not be possible, but we've got here in at least one - // case as discussed in the following issue: - // https://github.com/hashicorp/terraform-plugin-sdk/issues/21258 - // To avoid an outright crash here, we'll instead return an explicit - // error. - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource node has no configuration attached", - fmt.Sprintf( - "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", - addr, - ), - )) - err := diags.Err() - return &EvalReturnError{ - Error: &err, - } - } - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - - // Stop early if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if change == nil { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // In this particular call to EvalReadData we include our planned - // change, which signals that we expect this read to complete fully - // with no unknown values; it'll produce an error if not. - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - Planned: &change, // setting this indicates that the result must be complete - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - OutputState: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - - &EvalUpdateStateHook{}, - }, - } -} - -func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var diff, diffApply *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var err error - var createNew bool - var createBeforeDestroyEnabled bool - var configVal cty.Value - var deposedKey states.DeposedKey - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diffApply, - }, - - // We don't want to do any destroys - // (these are handled by NodeDestroyResourceInstance instead) - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - if diffApply.Action == plans.Delete { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) - } - if destroy && n.createBeforeDestroy() { - createBeforeDestroyEnabled = true - } - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Addr: addr.Resource, - ForceKey: n.PreallocatedDeposedKey, - OutputKey: &deposedKey, - }, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // Get the saved diff - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diff, - }, - - // Make a new diff, in case we've learned new values in the state - // during apply which we can now incorporate. - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - PreviousDiff: &diff, - OutputChange: &diffApply, - OutputValue: &configVal, - OutputState: &state, - }, - - // Compare the diffs - &EvalCheckPlannedChange{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Planned: &diff, - Actual: &diffApply, - }, - - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &diffApply, - Destroy: false, - OutChange: &diffApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it only requires destroying, since destroying - // is handled by NodeDestroyResourceInstance. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil || diffApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - }, - &EvalApply{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - State: &state, - Change: &diffApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - StateOutput: &state, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, // EvalApplyProvisioners will skip if already tainted - ResourceConfig: n.Config, - CreateNew: &createNew, - Error: &err, - When: configs.ProvisionerWhenCreate, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - StateOutput: &state, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalMaybeRestoreDeposedObject{ - Addr: addr.Resource, - Key: &deposedKey, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if !diff.Action.IsReplace() { - return true, nil - } - if !n.createBeforeDestroy() { - return true, nil - } - return false, nil - }, - Then: &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - }, - - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go deleted file mode 100644 index 049e5e9907..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go +++ /dev/null @@ -1,321 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodeDestroyResourceInstance represents a resource instance that is to be -// destroyed. -type NodeDestroyResourceInstance struct { - *NodeAbstractResourceInstance - - // If DeposedKey is set to anything other than states.NotDeposed then - // this node destroys a deposed object of the associated instance - // rather than its current object. - DeposedKey states.DeposedKey - - CreateBeforeDestroyOverride *bool -} - -var ( - _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) -) - -func (n *NodeDestroyResourceInstance) Name() string { - if n.DeposedKey != states.NotDeposed { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) - } - return n.ResourceInstanceAddr().String() + " (destroy)" -} - -// GraphNodeDestroyer -func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { - if n.CreateBeforeDestroyOverride != nil { - return *n.CreateBeforeDestroyOverride - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - n.CreateBeforeDestroyOverride = &v - return nil -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() - destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) - - phaseType := addrs.ResourceInstancePhaseDestroy - if n.CreateBeforeDestroy() { - phaseType = addrs.ResourceInstancePhaseDestroyCBD - } - - for i, normalAddr := range normalAddrs { - switch ta := normalAddr.(type) { - case addrs.Resource: - destroyAddrs[i] = ta.Phase(phaseType) - case addrs.ResourceInstance: - destroyAddrs[i] = ta.Phase(phaseType) - default: - destroyAddrs[i] = normalAddr - } - } - - return destroyAddrs -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { - // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil && c.Managed != nil { - var result []*addrs.Reference - - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - for _, p := range c.Managed.Provisioners { - schema := n.ProvisionerSchemas[p.Type] - - if p.When == configs.ProvisionerWhenDestroy { - if p.Connection != nil { - result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) - } - result = append(result, ReferencesFromConfig(p.Config, schema)...) - } - } - - return result - } - - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Get our state - rs := n.ResourceState - var is *states.ResourceInstance - if rs != nil { - is = rs.Instance(n.InstanceKey) - } - if is == nil { - log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) - } - - var changeApply *plans.ResourceInstanceChange - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var err error - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &changeApply, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &changeApply, - Destroy: true, - OutChange: &changeApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it does not require destroying. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if changeApply == nil || changeApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalReadState{ - Addr: addr.Resource, - Output: &state, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalRequireState{ - State: &state, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &changeApply, - }, - - // Run destroy provisioners if not tainted - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Status == states.ObjectTainted { - return false, nil - } - - return true, nil - }, - - Then: &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, - ResourceConfig: n.Config, - Error: &err, - When: configs.ProvisionerWhenDestroy, - }, - }, - - // If we have a provisioning error, then we just call - // the post-apply hook now. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return err != nil, nil - }, - - Then: &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - }, - - // Make sure we handle data sources properly. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil - }, - - Then: &EvalReadDataApply{ - Addr: addr.Resource, - Config: n.Config, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - }, - Else: &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - } -} - -// NodeDestroyResourceInstance represents a resource that is to be destroyed. -// -// Destroying a resource is a state-only operation: it is the individual -// instances being destroyed that affects remote objects. During graph -// construction, NodeDestroyResource should always depend on any other node -// related to the given resource, since it's just a final cleanup to avoid -// leaving skeleton resource objects in state after their instances have -// all been destroyed. -type NodeDestroyResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeResource = (*NodeDestroyResource)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) - _ GraphNodeReferencer = (*NodeDestroyResource)(nil) - _ GraphNodeEvalable = (*NodeDestroyResource)(nil) -) - -func (n *NodeDestroyResource) Name() string { - return n.ResourceAddr().String() + " (clean up state)" -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { - // NodeDestroyResource doesn't participate in references: the graph - // builder that created it should ensure directly that it already depends - // on every other node related to its resource, without relying on - // references. - return nil -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResource) References() []*addrs.Reference { - // NodeDestroyResource doesn't participate in references: the graph - // builder that created it should ensure directly that it already depends - // on every other node related to its resource, without relying on - // references. - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResource) EvalTree() EvalNode { - // This EvalNode will produce an error if the resource isn't already - // empty by the time it is called, since it should just be pruning the - // leftover husk of a resource in state after all of the child instances - // and their objects were destroyed. - return &EvalForgetResourceState{ - Addr: n.ResourceAddr().Resource, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go deleted file mode 100644 index 269c798080..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert -// an abstract resource instance to a concrete one of some type that has -// an associated deposed object key. -type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex - -type GraphNodeDeposedResourceInstanceObject interface { - DeposedInstanceObjectKey() states.DeposedKey -} - -// NodePlanDeposedResourceInstanceObject represents deposed resource -// instance objects during plan. These are distinct from the primary object -// for each resource instance since the only valid operation to do with them -// is to destroy them. -// -// This node type is also used during the refresh walk to ensure that the -// record of a deposed object is up-to-date before we plan to destroy it. -type NodePlanDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) -) - -func (n *NodePlanDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) -} - -func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeEvalable impl. -func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // During the refresh walk we will ensure that our record of the deposed - // object is up-to-date. If it was already deleted outside of Terraform - // then this will remove it from state and thus avoid us planning a - // destroy for it during the subsequent plan walk. - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Key: n.DeposedKey, - Output: &state, - }, - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - }, - }) - - // During the plan walk we always produce a planned destroy change, because - // destroying is the only supported action for deposed objects. - var change *plans.ResourceInstanceChange - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan, walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - DeposedKey: n.DeposedKey, - State: &state, - Output: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - DeposedKey: n.DeposedKey, - ProviderSchema: &providerSchema, - Change: &change, - }, - // Since deposed objects cannot be referenced by expressions - // elsewhere, we don't need to also record the planned new - // state in this case. - }, - }, - }) - - return seq -} - -// NodeDestroyDeposedResourceInstanceObject represents deposed resource -// instance objects during apply. Nodes of this type are inserted by -// DiffTransformer when the planned changeset contains "delete" changes for -// deposed instance objects, and its only supported operation is to destroy -// and then forget the associated object. -type NodeDestroyDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) -) - -func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey) -} - -func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeDestroyer -func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { - // A deposed instance is always CreateBeforeDestroy by definition, since - // we use deposed only to handle create-before-destroy. - return true -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { - if !v { - // Should never happen: deposed instances are _always_ create_before_destroy. - return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") - } - return nil -} - -// GraphNodeEvalable impl. -func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var change *plans.ResourceInstanceChange - var err error - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &change, - }, - &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - // Always write the resource back to the state deposed... if it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalReturnError{ - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} - -// GraphNodeDeposer is an optional interface implemented by graph nodes that -// might create a single new deposed object for a specific associated resource -// instance, allowing a caller to optionally pre-allocate a DeposedKey for -// it. -type GraphNodeDeposer interface { - // SetPreallocatedDeposedKey will be called during graph construction - // if a particular node must use a pre-allocated deposed key if/when it - // "deposes" the current object of its associated resource instance. - SetPreallocatedDeposedKey(key states.DeposedKey) -} - -// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. -// Embed it in a node type to get automatic support for it, and then access -// the field PreallocatedDeposedKey to access any pre-allocated key. -type graphNodeDeposer struct { - PreallocatedDeposedKey states.DeposedKey -} - -func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { - n.PreallocatedDeposedKey = key -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go deleted file mode 100644 index 2dc0df908e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go +++ /dev/null @@ -1,166 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// NodePlannableResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodePlannableResource struct { - *NodeAbstractResource - - // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD - // during graph construction, if dependencies require us to force this - // on regardless of what the configuration says. - ForceCreateBeforeDestroy *bool -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResource)(nil) - _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) - _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) - _ GraphNodeReferenceable = (*NodePlannableResource)(nil) - _ GraphNodeReferencer = (*NodePlannableResource)(nil) - _ GraphNodeResource = (*NodePlannableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - if config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) - return &EvalNoop{} - } - - // this ensures we can reference the resource even if the count is 0 - return &EvalWriteResourceState{ - Addr: addr.Resource, - Config: config, - ProviderAddr: n.ResolvedProvider, - } -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy != nil { - return *n.ForceCreateBeforeDestroy - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = &v - return nil -} - -// GraphNodeDynamicExpandable -func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - - return &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: a, - - // By the time we're walking, we've figured out whether we need - // to force on CreateBeforeDestroy due to dependencies on other - // nodes that have it. - ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - } - } - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count or for_each (if present) - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count/for_each orphans - &OrphanResourceCountTransformer{ - Concrete: concreteResourceOrphan, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodePlannableResource", - } - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go deleted file mode 100644 index 2c3a7012b9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodePlanDestroyableResourceInstance represents a resource that is ready -// to be planned for destruction. -type NodePlanDestroyableResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. These are written to by address in the EvalNodes we - // declare below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - if n.ResolvedProvider.ProviderConfig.Type == "" { - // Should never happen; indicates that the graph was not constructed - // correctly since we didn't get our provider attached. - panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go deleted file mode 100644 index ac4b24cf22..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go +++ /dev/null @@ -1,201 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/zclconf/go-cty/cty" -) - -// NodePlannableResourceInstance represents a _single_ resource -// instance that is plannable. This means this represents a single -// count index, for example. -type NodePlannableResourceInstance struct { - *NodeAbstractResourceInstance - ForceCreateBeforeDestroy bool -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var configVal cty.Value - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // If we already have a non-planned state then we already dealt - // with this during the refresh walk and so we have nothing to do - // here. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - depChanges := false - - // Check and see if any of our dependencies have changes. - changes := ctx.Changes() - for _, d := range n.StateReferences() { - ri, ok := d.(addrs.ResourceInstance) - if !ok { - continue - } - change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen) - if change != nil && change.Action != plans.NoOp { - depChanges = true - break - } - } - - refreshed := state != nil && state.Status != states.ObjectPlanned - - // If there are no dependency changes, and it's not a forced - // read because we there was no Refresh, then we don't need - // to re-read. If any dependencies have changes, it means - // our config may also have changes and we need to Read the - // data source again. - if !depChanges && refreshed { - return false, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Dependencies: n.StateReferences(), - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready - OutputChange: &change, - OutputValue: &configVal, - OutputState: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} - -func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - CreateBeforeDestroy: n.ForceCreateBeforeDestroy, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go deleted file mode 100644 index 8e4f7148ff..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go +++ /dev/null @@ -1,84 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlannableResourceInstanceOrphan struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -var ( - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -func (n *NodePlannableResourceInstanceOrphan) Name() string { - return n.ResourceInstanceAddr().String() + " (orphan)" -} - -// GraphNodeEvalable -func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var provider providers.Interface - var providerSchema *ProviderSchema - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - Output: &change, - OutputState: &state, // Will point to a nil state after this complete, signalling destroyed - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go deleted file mode 100644 index dcab37270c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go +++ /dev/null @@ -1,296 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// NodeRefreshableManagedResource represents a resource that is expanabled into -// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. -type NodeRefreshableManagedResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) -) - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans to make sure these resources are accounted for - // during a scale in. - &OrphanResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableManagedResource", - } - - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeRefreshableManagedResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - if n.ResourceState == nil { - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) - return n.evalTreeManagedResourceNoState() - } - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) - return n.evalTreeManagedResource() - - case addrs.DataResourceMode: - // Get the data source node. If we don't have a configuration - // then it is an orphan so we destroy it (remove it from the state). - var dn GraphNodeEvalable - if n.Config != nil { - dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } else { - dn = &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } - - return dn.EvalTree() - default: - panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) - } -} - -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - // This happened during initial development. All known cases were - // fixed and tested but as a sanity check let's assert here. - if n.ResourceState == nil { - err := fmt.Errorf( - "No resource state attached for addr: %s\n\n"+ - "This is a bug. Please report this to Terraform with your configuration\n"+ - "and state attached. Please be careful to scrub any sensitive information.", - addr) - return &EvalReturnError{Error: &err} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} - -// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource -// nodes that don't have state attached. An example of where this functionality -// is useful is when a resource that already exists in state is being scaled -// out, ie: has its resource count increased. In this case, the scaled out node -// needs to be available to other nodes (namely data sources) that may depend -// on it for proper interpolation, or confusing "index out of range" errors can -// occur. -// -// The steps in this sequence are very similar to the steps carried out in -// plan, but nothing is done with the diff after it is created - it is dropped, -// and its changes are not counted in the UI. -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - Stub: true, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - // We must also save the planned change, so that expressions in - // other nodes, such as provider configurations and data resources, - // can work with the planned new value. - // - // This depends on the fact that Context.Refresh creates a - // temporary new empty changeset for the duration of its graph - // walk, and so this recorded change will be discarded immediately - // after the refresh walk completes. - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go deleted file mode 100644 index f0eb18a065..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go +++ /dev/null @@ -1,90 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" - "github.com/zclconf/go-cty/cty" -) - -// NodeValidatableResource represents a resource that is used for validation -// only. -type NodeValidatableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeValidatableResource)(nil) - _ GraphNodeEvalable = (*NodeValidatableResource)(nil) - _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) - _ GraphNodeReferencer = (*NodeValidatableResource)(nil) - _ GraphNodeResource = (*NodeValidatableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) -) - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - // Declare the variables will be used are used to pass values along - // the evaluation sequence below. These are written to via pointers - // passed to the EvalNodes. - var provider providers.Interface - var providerSchema *ProviderSchema - var configVal cty.Value - - seq := &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalValidateResource{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Config: config, - ConfigVal: &configVal, - }, - }, - } - - if managed := n.Config.Managed; managed != nil { - hasCount := n.Config.Count != nil - hasForEach := n.Config.ForEach != nil - - // Validate all the provisioners - for _, p := range managed.Provisioners { - var provisioner provisioners.Interface - var provisionerSchema *configschema.Block - - if p.Connection == nil { - p.Connection = config.Managed.Connection - } else if config.Managed.Connection != nil { - p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) - } - - seq.Nodes = append( - seq.Nodes, - &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - Schema: &provisionerSchema, - }, - &EvalValidateProvisioner{ - ResourceAddr: addr.Resource, - Provisioner: &provisioner, - Schema: &provisionerSchema, - Config: p, - ResourceHasCount: hasCount, - ResourceHasForEach: hasForEach, - }, - ) - } - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go deleted file mode 100644 index 844d060c9f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// NodeRootVariable represents a root variable input. -type NodeRootVariable struct { - Addr addrs.InputVariable - Config *configs.Variable -} - -var ( - _ GraphNodeSubPath = (*NodeRootVariable)(nil) - _ GraphNodeReferenceable = (*NodeRootVariable)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) -) - -func (n *NodeRootVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeRootVariable) Path() addrs.ModuleInstance { - return addrs.RootModuleInstance -} - -// GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// dag.GraphNodeDotter impl. -func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go deleted file mode 100644 index 19e3469cb3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// PathObjectCacheKey is like PathCacheKey but includes an additional name -// to be included in the key, for module-namespaced objects. -// -// The result of this function is guaranteed unique for any distinct pair -// of path and name, but is not guaranteed to be in any particular format -// and in particular should never be shown to end-users. -func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string { - return fmt.Sprintf("%s|%s", path.String(), objectName) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go deleted file mode 100644 index 5c19f6e7cf..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go +++ /dev/null @@ -1,94 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/gob" - "fmt" - "io" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/zclconf/go-cty/cty" -) - -func init() { - gob.Register(make([]interface{}, 0)) - gob.Register(make([]map[string]interface{}, 0)) - gob.Register(make(map[string]interface{})) - gob.Register(make(map[string]string)) -} - -// Plan represents a single Terraform execution plan, which contains -// all the information necessary to make an infrastructure change. -// -// A plan has to contain basically the entire state of the world -// necessary to make a change: the state, diff, config, backend config, etc. -// This is so that it can run alone without any other data. -type Plan struct { - // Diff describes the resource actions that must be taken when this - // plan is applied. - Diff *Diff - - // Config represents the entire configuration that was present when this - // plan was created. - Config *configs.Config - - // State is the Terraform state that was current when this plan was - // created. - // - // It is not allowed to apply a plan that has a stale state, since its - // diff could be outdated. - State *State - - // Vars retains the variables that were set when creating the plan, so - // that the same variables can be applied during apply. - Vars map[string]cty.Value - - // Targets, if non-empty, contains a set of resource address strings that - // identify graph nodes that were selected as targets for plan. - // - // When targets are set, any graph node that is not directly targeted or - // indirectly targeted via dependencies is excluded from the graph. - Targets []string - - // TerraformVersion is the version of Terraform that was used to create - // this plan. - // - // It is not allowed to apply a plan created with a different version of - // Terraform, since the other fields of this structure may be interpreted - // in different ways between versions. - TerraformVersion string - - // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries - // used as plugins for each provider during plan. - // - // These must match between plan and apply to ensure that the diff is - // correctly interpreted, since different provider versions may have - // different attributes or attribute value constraints. - ProviderSHA256s map[string][]byte - - // Backend is the backend that this plan should use and store data with. - Backend *BackendState - - // Destroy indicates that this plan was created for a full destroy operation - Destroy bool -} - -func (p *Plan) String() string { - buf := new(bytes.Buffer) - buf.WriteString("DIFF:\n\n") - buf.WriteString(p.Diff.String()) - buf.WriteString("\n\nSTATE:\n\n") - buf.WriteString(p.State.String()) - return buf.String() -} - -// ReadPlan reads a plan structure out of a reader in the format that -// was written by WritePlan. -func ReadPlan(src io.Reader) (*Plan, error) { - return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") -} - -// WritePlan writes a plan somewhere in a binary format. -func WritePlan(d *Plan, dst io.Writer) error { - return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go deleted file mode 100644 index 7e401f33eb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go +++ /dev/null @@ -1,521 +0,0 @@ -package terraform - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -var _ providers.Interface = (*MockProvider)(nil) - -// MockProvider implements providers.Interface but mocks out all the -// calls for testing purposes. -type MockProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests - - PrepareProviderConfigCalled bool - PrepareProviderConfigResponse providers.PrepareProviderConfigResponse - PrepareProviderConfigRequest providers.PrepareProviderConfigRequest - PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse - - ValidateResourceTypeConfigCalled bool - ValidateResourceTypeConfigTypeName string - ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse - ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest - ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse - - ValidateDataSourceConfigCalled bool - ValidateDataSourceConfigTypeName string - ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse - ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest - ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse - - UpgradeResourceStateCalled bool - UpgradeResourceStateTypeName string - UpgradeResourceStateResponse providers.UpgradeResourceStateResponse - UpgradeResourceStateRequest providers.UpgradeResourceStateRequest - UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse - - ConfigureCalled bool - ConfigureResponse providers.ConfigureResponse - ConfigureRequest providers.ConfigureRequest - ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below - - StopCalled bool - StopFn func() error - StopResponse error - - ReadResourceCalled bool - ReadResourceResponse providers.ReadResourceResponse - ReadResourceRequest providers.ReadResourceRequest - ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse - - PlanResourceChangeCalled bool - PlanResourceChangeResponse providers.PlanResourceChangeResponse - PlanResourceChangeRequest providers.PlanResourceChangeRequest - PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse - - ApplyResourceChangeCalled bool - ApplyResourceChangeResponse providers.ApplyResourceChangeResponse - ApplyResourceChangeRequest providers.ApplyResourceChangeRequest - ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse - - ImportResourceStateCalled bool - ImportResourceStateResponse providers.ImportResourceStateResponse - ImportResourceStateRequest providers.ImportResourceStateRequest - ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - // Legacy return type for existing tests, which will be shimmed into an - // ImportResourceStateResponse if set - ImportStateReturn []*InstanceState - - ReadDataSourceCalled bool - ReadDataSourceResponse providers.ReadDataSourceResponse - ReadDataSourceRequest providers.ReadDataSourceRequest - ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse - - CloseCalled bool - CloseError error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ValidateFn func(c *ResourceConfig) (ws []string, es []error) - ConfigureFn func(c *ResourceConfig) error - DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) - ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) -} - -func (p *MockProvider) GetSchema() providers.GetSchemaResponse { - p.Lock() - defer p.Unlock() - p.GetSchemaCalled = true - return p.getSchema() -} - -func (p *MockProvider) getSchema() providers.GetSchemaResponse { - // This version of getSchema doesn't do any locking, so it's suitable to - // call from other methods of this mock as long as they are already - // holding the lock. - - ret := providers.GetSchemaResponse{ - Provider: providers.Schema{}, - DataSources: map[string]providers.Schema{}, - ResourceTypes: map[string]providers.Schema{}, - } - if p.GetSchemaReturn != nil { - ret.Provider.Block = p.GetSchemaReturn.Provider - for n, s := range p.GetSchemaReturn.DataSources { - ret.DataSources[n] = providers.Schema{ - Block: s, - } - } - for n, s := range p.GetSchemaReturn.ResourceTypes { - ret.ResourceTypes[n] = providers.Schema{ - Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), - Block: s, - } - } - } - - return ret -} - -func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { - p.Lock() - defer p.Unlock() - - p.PrepareProviderConfigCalled = true - p.PrepareProviderConfigRequest = r - if p.PrepareProviderConfigFn != nil { - return p.PrepareProviderConfigFn(r) - } - return p.PrepareProviderConfigResponse -} - -func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateResourceTypeConfigCalled = true - p.ValidateResourceTypeConfigRequest = r - - if p.ValidateFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - warns, errs := p.ValidateFn(rc) - ret := providers.ValidateResourceTypeConfigResponse{} - for _, warn := range warns { - ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - } - if p.ValidateResourceTypeConfigFn != nil { - return p.ValidateResourceTypeConfigFn(r) - } - - return p.ValidateResourceTypeConfigResponse -} - -func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceConfigCalled = true - p.ValidateDataSourceConfigRequest = r - - if p.ValidateDataSourceConfigFn != nil { - return p.ValidateDataSourceConfigFn(r) - } - - return p.ValidateDataSourceConfigResponse -} - -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - p.Lock() - defer p.Unlock() - - schemas := p.getSchema() - schema := schemas.ResourceTypes[r.TypeName] - schemaType := schema.Block.ImpliedType() - - p.UpgradeResourceStateCalled = true - p.UpgradeResourceStateRequest = r - - if p.UpgradeResourceStateFn != nil { - return p.UpgradeResourceStateFn(r) - } - - resp := p.UpgradeResourceStateResponse - - if resp.UpgradedState == cty.NilVal { - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } - } - return resp -} - -func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureRequest = r - - if p.ConfigureFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - ret := providers.ConfigureResponse{} - - err := p.ConfigureFn(rc) - if err != nil { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - return ret - } - if p.ConfigureNewFn != nil { - return p.ConfigureNewFn(r) - } - - return p.ConfigureResponse -} - -func (p *MockProvider) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provider itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadResourceCalled = true - p.ReadResourceRequest = r - - if p.ReadResourceFn != nil { - return p.ReadResourceFn(r) - } - - // make sure the NewState fits the schema - newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) - if err != nil { - panic(err) - } - resp := p.ReadResourceResponse - resp.NewState = newState - - return resp -} - -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - p.Lock() - defer p.Unlock() - - p.PlanResourceChangeCalled = true - p.PlanResourceChangeRequest = r - - if p.DiffFn != nil { - ps := p.getSchema() - if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { - return providers.PlanResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), - } - } - schema := ps.ResourceTypes[r.TypeName].Block - info := &InstanceInfo{ - Type: r.TypeName, - } - priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) - cfg := NewResourceConfigShimmed(r.Config, schema) - - legacyDiff, err := p.DiffFn(info, priorState, cfg) - - var res providers.PlanResourceChangeResponse - res.PlannedState = r.ProposedNewState - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - if legacyDiff != nil { - newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - - res.PlannedState = newVal - - var requiresNew []string - for attr, d := range legacyDiff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - res.RequiresReplace = requiresReplace - } - return res - } - if p.PlanResourceChangeFn != nil { - return p.PlanResourceChangeFn(r) - } - - return p.PlanResourceChangeResponse -} - -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - p.Lock() - p.ApplyResourceChangeCalled = true - p.ApplyResourceChangeRequest = r - p.Unlock() - - if p.ApplyFn != nil { - // ApplyFn is a special callback fashioned after our old provider - // interface, which expected to be given an actual diff rather than - // separate old/new values to apply. Therefore we need to approximate - // a diff here well enough that _most_ of our legacy ApplyFns in old - // tests still see the behavior they are expecting. New tests should - // not use this, and should instead use ApplyResourceChangeFn directly. - providerSchema := p.getSchema() - schema, ok := providerSchema.ResourceTypes[r.TypeName] - if !ok { - return providers.ApplyResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), - } - } - - info := &InstanceInfo{ - Type: r.TypeName, - } - - priorVal := r.PriorState - plannedVal := r.PlannedState - priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) - plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) - s := NewInstanceStateShimmedFromValue(priorVal, 0) - d := &InstanceDiff{ - Attributes: make(map[string]*ResourceAttrDiff), - } - if plannedMap == nil { // destroying, then - d.Destroy = true - // Destroy diffs don't have any attribute diffs - } else { - if priorMap == nil { // creating, then - // We'll just make an empty prior map to make things easier below. - priorMap = make(map[string]string) - } - - for k, new := range plannedMap { - old := priorMap[k] - newComputed := false - if new == hcl2shim.UnknownVariableValue { - new = "" - newComputed = true - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - New: new, - NewComputed: newComputed, - Type: DiffAttrInput, // not generally used in tests, so just hard-coded - } - } - // Also need any attributes that were removed in "planned" - for k, old := range priorMap { - if _, ok := plannedMap[k]; ok { - continue - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - NewRemoved: true, - Type: DiffAttrInput, - } - } - } - newState, err := p.ApplyFn(info, s, d) - resp := providers.ApplyResourceChangeResponse{} - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - if newState != nil { - var newVal cty.Value - if newState != nil { - var err error - newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - } else { - // If apply returned a nil new state then that's the old way to - // indicate that the object was destroyed. Our new interface calls - // for that to be signalled as a null value. - newVal = cty.NullVal(schema.Block.ImpliedType()) - } - resp.NewState = newVal - } - - return resp - } - if p.ApplyResourceChangeFn != nil { - return p.ApplyResourceChangeFn(r) - } - - return p.ApplyResourceChangeResponse -} - -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - p.Lock() - defer p.Unlock() - - if p.ImportStateReturn != nil { - for _, is := range p.ImportStateReturn { - if is.Attributes == nil { - is.Attributes = make(map[string]string) - } - is.Attributes["id"] = is.ID - - typeName := is.Ephemeral.Type - // Use the requested type if the resource has no type of it's own. - // We still return the empty type, which will error, but this prevents a panic. - if typeName == "" { - typeName = r.TypeName - } - - schema := p.GetSchemaReturn.ResourceTypes[typeName] - if schema == nil { - panic("no schema found for " + typeName) - } - - private, err := json.Marshal(is.Meta) - if err != nil { - panic(err) - } - - state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) - if err != nil { - panic(err) - } - - state, err = schema.CoerceValue(state) - if err != nil { - panic(err) - } - - p.ImportResourceStateResponse.ImportedResources = append( - p.ImportResourceStateResponse.ImportedResources, - providers.ImportedResource{ - TypeName: is.Ephemeral.Type, - State: state, - Private: private, - }) - } - } - - p.ImportResourceStateCalled = true - p.ImportResourceStateRequest = r - if p.ImportResourceStateFn != nil { - return p.ImportResourceStateFn(r) - } - - return p.ImportResourceStateResponse -} - -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadDataSourceCalled = true - p.ReadDataSourceRequest = r - - if p.ReadDataSourceFn != nil { - return p.ReadDataSourceFn(r) - } - - return p.ReadDataSourceResponse -} - -func (p *MockProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go deleted file mode 100644 index 93b19be57d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go +++ /dev/null @@ -1,154 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -var _ provisioners.Interface = (*MockProvisioner)(nil) - -// MockProvisioner implements provisioners.Interface but mocks out all the -// calls for testing purposes. -type MockProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaResponse provisioners.GetSchemaResponse - - ValidateProvisionerConfigCalled bool - ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest - ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse - ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse - - ProvisionResourceCalled bool - ProvisionResourceRequest provisioners.ProvisionResourceRequest - ProvisionResourceResponse provisioners.ProvisionResourceResponse - ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse - - StopCalled bool - StopResponse error - StopFn func() error - - CloseCalled bool - CloseResponse error - CloseFn func() error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ApplyFn func(rs *InstanceState, c *ResourceConfig) error -} - -func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - return p.getSchema() -} - -// getSchema is the implementation of GetSchema, which can be called from other -// methods on MockProvisioner that may already be holding the lock. -func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { - return p.GetSchemaResponse -} - -func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProvisionerConfigCalled = true - p.ValidateProvisionerConfigRequest = r - if p.ValidateProvisionerConfigFn != nil { - return p.ValidateProvisionerConfigFn(r) - } - return p.ValidateProvisionerConfigResponse -} - -func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { - p.Lock() - defer p.Unlock() - - p.ProvisionResourceCalled = true - p.ProvisionResourceRequest = r - if p.ApplyFn != nil { - if !r.Config.IsKnown() { - panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) - } - - schema := p.getSchema() - rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) - connVal := r.Connection - connMap := map[string]string{} - - if !connVal.IsNull() && connVal.IsKnown() { - for it := connVal.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - - if !av.IsKnown() || av.IsNull() { - continue - } - - av, _ = convert.Convert(av, cty.String) - connMap[name] = av.AsString() - } - } - - // We no longer pass the full instance state to a provisioner, so we'll - // construct a partial one that should be good enough for what existing - // test mocks need. - is := &InstanceState{ - Ephemeral: EphemeralState{ - ConnInfo: connMap, - }, - } - var resp provisioners.ProvisionResourceResponse - err := p.ApplyFn(is, rc) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - return resp - } - if p.ProvisionResourceFn != nil { - fn := p.ProvisionResourceFn - p.Unlock() - return fn(r) - } - - return p.ProvisionResourceResponse -} - -func (p *MockProvisioner) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provisioner itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvisioner) Close() error { - p.Lock() - defer p.Unlock() - - p.CloseCalled = true - if p.CloseFn != nil { - return p.CloseFn() - } - - return p.CloseResponse -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go deleted file mode 100644 index bd5774600e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go +++ /dev/null @@ -1,510 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" -) - -// Resource is a legacy way to identify a particular resource instance. -// -// New code should use addrs.ResourceInstance instead. This is still here -// only for codepaths that haven't been updated yet. -type Resource struct { - // These are all used by the new EvalNode stuff. - Name string - Type string - CountIndex int - - // These aren't really used anymore anywhere, but we keep them around - // since we haven't done a proper cleanup yet. - Id string - Info *InstanceInfo - Config *ResourceConfig - Dependencies []string - Diff *InstanceDiff - Provider ResourceProvider - State *InstanceState - Flags ResourceFlag -} - -// NewResource constructs a legacy Resource object from an -// addrs.ResourceInstance value. -// -// This is provided to shim to old codepaths that haven't been updated away -// from this type yet. Since this old type is not able to represent instances -// that have string keys, this function will panic if given a resource address -// that has a string key. -func NewResource(addr addrs.ResourceInstance) *Resource { - ret := &Resource{ - Name: addr.Resource.Name, - Type: addr.Resource.Type, - } - - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - ret.CountIndex = int(tk) - default: - panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) - } - } - - return ret -} - -// ResourceKind specifies what kind of instance we're working with, whether -// its a primary instance, a tainted instance, or an orphan. -type ResourceFlag byte - -// InstanceInfo is used to hold information about the instance and/or -// resource being modified. -type InstanceInfo struct { - // Id is a unique name to represent this instance. This is not related - // to InstanceState.ID in any way. - Id string - - // ModulePath is the complete path of the module containing this - // instance. - ModulePath []string - - // Type is the resource type of this instance - Type string -} - -// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. -// -// InstanceInfo is a legacy type, and uses of it should be gradually replaced -// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as -// appropriate. -// -// The legacy InstanceInfo type cannot represent module instances with instance -// keys, so this function will panic if given such a path. Uses of this type -// should all be removed or replaced before implementing "count" and "for_each" -// arguments on modules in order to avoid such panics. -// -// This legacy type also cannot represent resource instances with string -// instance keys. It will panic if the given key is not either NoKey or an -// IntKey. -func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { - // We need an old-style []string module path for InstanceInfo. - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - panic("NewInstanceInfo cannot convert module instance with key") - } - path[i] = step.Name - } - - // This is a funny old meaning of "id" that is no longer current. It should - // not be used for anything users might see. Note that it does not include - // a representation of the resource mode, and so it's impossible to - // determine from an InstanceInfo alone whether it is a managed or data - // resource that is being referred to. - id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) - if addr.Resource.Resource.Mode == addrs.DataResourceMode { - id = "data." + id - } - if addr.Resource.Key != addrs.NoKey { - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - id = id + fmt.Sprintf(".%d", int(k)) - default: - panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) - } - } - - return &InstanceInfo{ - Id: id, - ModulePath: path, - Type: addr.Resource.Resource.Type, - } -} - -// ResourceAddress returns the address of the resource that the receiver is describing. -func (i *InstanceInfo) ResourceAddress() *ResourceAddress { - // GROSS: for tainted and deposed instances, their status gets appended - // to i.Id to create a unique id for the graph node. Historically these - // ids were displayed to the user, so it's designed to be human-readable: - // "aws_instance.bar.0 (deposed #0)" - // - // So here we detect such suffixes and try to interpret them back to - // their original meaning so we can then produce a ResourceAddress - // with a suitable InstanceType. - id := i.Id - instanceType := TypeInvalid - if idx := strings.Index(id, " ("); idx != -1 { - remain := id[idx:] - id = id[:idx] - - switch { - case strings.Contains(remain, "tainted"): - instanceType = TypeTainted - case strings.Contains(remain, "deposed"): - instanceType = TypeDeposed - } - } - - addr, err := parseResourceAddressInternal(id) - if err != nil { - // should never happen, since that would indicate a bug in the - // code that constructed this InstanceInfo. - panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) - } - if len(i.ModulePath) > 1 { - addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied - } - if instanceType != TypeInvalid { - addr.InstanceTypeSet = true - addr.InstanceType = instanceType - } - return addr -} - -// ResourceConfig is a legacy type that was formerly used to represent -// interpolatable configuration blocks. It is now only used to shim to old -// APIs that still use this type, via NewResourceConfigShimmed. -type ResourceConfig struct { - ComputedKeys []string - Raw map[string]interface{} - Config map[string]interface{} -} - -// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly -// the given value. -// -// The given value may contain hcl2shim.UnknownVariableValue to signal that -// something is computed, but it must not contain unprocessed interpolation -// sequences as we might've seen in Terraform v0.11 and prior. -func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { - v := hcl2shim.HCL2ValueFromConfigValue(raw) - - // This is a little weird but we round-trip the value through the hcl2shim - // package here for two reasons: firstly, because that reduces the risk - // of it including something unlike what NewResourceConfigShimmed would - // produce, and secondly because it creates a copy of "raw" just in case - // something is relying on the fact that in the old world the raw and - // config maps were always distinct, and thus you could in principle mutate - // one without affecting the other. (I sure hope nobody was doing that, though!) - cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) - - return &ResourceConfig{ - Raw: raw, - Config: cfg, - - ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), - } -} - -// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy -// ResourceConfig object, so that it can be passed to older APIs that expect -// this wrapping. -// -// The returned ResourceConfig is already interpolated and cannot be -// re-interpolated. It is, therefore, useful only to functions that expect -// an already-populated ResourceConfig which they then treat as read-only. -// -// If the given value is not of an object type that conforms to the given -// schema then this function will panic. -func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { - if !val.Type().IsObjectType() { - panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) - } - ret := &ResourceConfig{} - - legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) - if legacyVal != nil { - ret.Config = legacyVal - - // Now we need to walk through our structure and find any unknown values, - // producing the separate list ComputedKeys to represent these. We use the - // schema here so that we can preserve the expected invariant - // that an attribute is always either wholly known or wholly unknown, while - // a child block can be partially unknown. - ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") - } else { - ret.Config = make(map[string]interface{}) - } - ret.Raw = ret.Config - - return ret -} - -// Record the any config values in ComputedKeys. This field had been unused in -// helper/schema, but in the new protocol we're using this so that the SDK can -// now handle having an unknown collection. The legacy diff code doesn't -// properly handle the unknown, because it can't be expressed in the same way -// between the config and diff. -func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { - var ret []string - ty := val.Type() - - if val.IsNull() { - return ret - } - - if !val.IsKnown() { - // we shouldn't have an entirely unknown resource, but prevent empty - // strings just in case - if len(path) > 0 { - ret = append(ret, path) - } - return ret - } - - if path != "" { - path += "." - } - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): - i := 0 - for it := val.ElementIterator(); it.Next(); i++ { - _, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) - ret = append(ret, keys...) - } - - case ty.IsMapType(), ty.IsObjectType(): - for it := val.ElementIterator(); it.Next(); { - subK, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) - ret = append(ret, keys...) - } - } - - return ret -} - -// DeepCopy performs a deep copy of the configuration. This makes it safe -// to modify any of the structures that are part of the resource config without -// affecting the original configuration. -func (c *ResourceConfig) DeepCopy() *ResourceConfig { - // DeepCopying a nil should return a nil to avoid panics - if c == nil { - return nil - } - - // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) - if err != nil { - panic(err) - } - - // Force the type - result := copy.(*ResourceConfig) - - return result -} - -// Equal checks the equality of two resource configs. -func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { - // If either are nil, then they're only equal if they're both nil - if c == nil || c2 == nil { - return c == c2 - } - - // Sort the computed keys so they're deterministic - sort.Strings(c.ComputedKeys) - sort.Strings(c2.ComputedKeys) - - // Two resource configs if their exported properties are equal. - // We don't compare "raw" because it is never used again after - // initialization and for all intents and purposes they are equal - // if the exported properties are equal. - check := [][2]interface{}{ - {c.ComputedKeys, c2.ComputedKeys}, - {c.Raw, c2.Raw}, - {c.Config, c2.Config}, - } - for _, pair := range check { - if !reflect.DeepEqual(pair[0], pair[1]) { - return false - } - } - - return true -} - -// CheckSet checks that the given list of configuration keys is -// properly set. If not, errors are returned for each unset key. -// -// This is useful to be called in the Validate method of a ResourceProvider. -func (c *ResourceConfig) CheckSet(keys []string) []error { - var errs []error - - for _, k := range keys { - if !c.IsSet(k) { - errs = append(errs, fmt.Errorf("%s must be set", k)) - } - } - - return errs -} - -// Get looks up a configuration value by key and returns the value. -// -// The second return value is true if the get was successful. Get will -// return the raw value if the key is computed, so you should pair this -// with IsComputed. -func (c *ResourceConfig) Get(k string) (interface{}, bool) { - // We aim to get a value from the configuration. If it is computed, - // then we return the pure raw value. - source := c.Config - if c.IsComputed(k) { - source = c.Raw - } - - return c.get(k, source) -} - -// GetRaw looks up a configuration value by key and returns the value, -// from the raw, uninterpolated config. -// -// The second return value is true if the get was successful. Get will -// not succeed if the value is being computed. -func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { - return c.get(k, c.Raw) -} - -// IsComputed returns whether the given key is computed or not. -func (c *ResourceConfig) IsComputed(k string) bool { - // The next thing we do is check the config if we get a computed - // value out of it. - v, ok := c.get(k, c.Config) - if !ok { - return false - } - - // If value is nil, then it isn't computed - if v == nil { - return false - } - - // Test if the value contains an unknown value - var w unknownCheckWalker - if err := reflectwalk.Walk(v, &w); err != nil { - panic(err) - } - - return w.Unknown -} - -// IsSet checks if the key in the configuration is set. A key is set if -// it has a value or the value is being computed (is unknown currently). -// -// This function should be used rather than checking the keys of the -// raw configuration itself, since a key may be omitted from the raw -// configuration if it is being computed. -func (c *ResourceConfig) IsSet(k string) bool { - if c == nil { - return false - } - - if c.IsComputed(k) { - return true - } - - if _, ok := c.Get(k); ok { - return true - } - - return false -} - -func (c *ResourceConfig) get( - k string, raw map[string]interface{}) (interface{}, bool) { - parts := strings.Split(k, ".") - if len(parts) == 1 && parts[0] == "" { - parts = nil - } - - var current interface{} = raw - var previous interface{} = nil - for i, part := range parts { - if current == nil { - return nil, false - } - - cv := reflect.ValueOf(current) - switch cv.Kind() { - case reflect.Map: - previous = current - v := cv.MapIndex(reflect.ValueOf(part)) - if !v.IsValid() { - if i > 0 && i != (len(parts)-1) { - tryKey := strings.Join(parts[i:], ".") - v := cv.MapIndex(reflect.ValueOf(tryKey)) - if !v.IsValid() { - return nil, false - } - - return v.Interface(), true - } - - return nil, false - } - - current = v.Interface() - case reflect.Slice: - previous = current - - if part == "#" { - // If any value in a list is computed, this whole thing - // is computed and we can't read any part of it. - for i := 0; i < cv.Len(); i++ { - if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { - return v, true - } - } - - current = cv.Len() - } else { - i, err := strconv.ParseInt(part, 0, 0) - if err != nil { - return nil, false - } - if int(i) < 0 || int(i) >= cv.Len() { - return nil, false - } - current = cv.Index(int(i)).Interface() - } - case reflect.String: - // This happens when map keys contain "." and have a common - // prefix so were split as path components above. - actualKey := strings.Join(parts[i-1:], ".") - if prevMap, ok := previous.(map[string]interface{}); ok { - v, ok := prevMap[actualKey] - return v, ok - } - - return nil, false - default: - panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) - } - } - - return current, true -} - -// unknownCheckWalker -type unknownCheckWalker struct { - Unknown bool -} - -func (w *unknownCheckWalker) Primitive(v reflect.Value) error { - if v.Interface() == hcl2shim.UnknownVariableValue { - w.Unknown = true - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go deleted file mode 100644 index 8a683012d2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go +++ /dev/null @@ -1,618 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// ResourceAddress is a way of identifying an individual resource (or, -// eventually, a subset of resources) within the state. It is used for Targets. -type ResourceAddress struct { - // Addresses a resource falling somewhere in the module path - // When specified alone, addresses all resources within a module path - Path []string - - // Addresses a specific resource that occurs in a list - Index int - - InstanceType InstanceType - InstanceTypeSet bool - Name string - Type string - Mode ResourceMode // significant only if InstanceTypeSet -} - -// Copy returns a copy of this ResourceAddress -func (r *ResourceAddress) Copy() *ResourceAddress { - if r == nil { - return nil - } - - n := &ResourceAddress{ - Path: make([]string, 0, len(r.Path)), - Index: r.Index, - InstanceType: r.InstanceType, - Name: r.Name, - Type: r.Type, - Mode: r.Mode, - } - - n.Path = append(n.Path, r.Path...) - - return n -} - -// String outputs the address that parses into this address. -func (r *ResourceAddress) String() string { - var result []string - for _, p := range r.Path { - result = append(result, "module", p) - } - - switch r.Mode { - case ManagedResourceMode: - // nothing to do - case DataResourceMode: - result = append(result, "data") - default: - panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) - } - - if r.Type != "" { - result = append(result, r.Type) - } - - if r.Name != "" { - name := r.Name - if r.InstanceTypeSet { - switch r.InstanceType { - case TypePrimary: - name += ".primary" - case TypeDeposed: - name += ".deposed" - case TypeTainted: - name += ".tainted" - } - } - - if r.Index >= 0 { - name += fmt.Sprintf("[%d]", r.Index) - } - result = append(result, name) - } - - return strings.Join(result, ".") -} - -// HasResourceSpec returns true if the address has a resource spec, as -// defined in the documentation: -// https://www.terraform.io/docs/internals/resource-addressing.html -// In particular, this returns false if the address contains only -// a module path, thus addressing the entire module. -func (r *ResourceAddress) HasResourceSpec() bool { - return r.Type != "" && r.Name != "" -} - -// WholeModuleAddress returns the resource address that refers to all -// resources in the same module as the receiver address. -func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { - return &ResourceAddress{ - Path: r.Path, - Index: -1, - InstanceTypeSet: false, - } -} - -// MatchesResourceConfig returns true if the receiver matches the given -// configuration resource within the given _static_ module path. Note that -// the module path in a resource address is a _dynamic_ module path, and -// multiple dynamic resource paths may map to a single static path if -// count and for_each are in use on module calls. -// -// Since resource configuration blocks represent all of the instances of -// a multi-instance resource, the index of the address (if any) is not -// considered. -func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { - if r.HasResourceSpec() { - // FIXME: Some ugliness while we are between worlds. Functionality - // in "addrs" should eventually replace this ResourceAddress idea - // completely, but for now we'll need to translate to the old - // way of representing resource modes. - switch r.Mode { - case ManagedResourceMode: - if rc.Mode != addrs.ManagedResourceMode { - return false - } - case DataResourceMode: - if rc.Mode != addrs.DataResourceMode { - return false - } - } - if r.Type != rc.Type || r.Name != rc.Name { - return false - } - } - - addrPath := r.Path - - // normalize - if len(addrPath) == 0 { - addrPath = nil - } - if len(path) == 0 { - path = nil - } - rawPath := []string(path) - return reflect.DeepEqual(addrPath, rawPath) -} - -// stateId returns the ID that this resource should be entered with -// in the state. This is also used for diffs. In the future, we'd like to -// move away from this string field so I don't export this. -func (r *ResourceAddress) stateId() string { - result := fmt.Sprintf("%s.%s", r.Type, r.Name) - switch r.Mode { - case ManagedResourceMode: - // Done - case DataResourceMode: - result = fmt.Sprintf("data.%s", result) - default: - panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) - } - if r.Index >= 0 { - result += fmt.Sprintf(".%d", r.Index) - } - - return result -} - -// parseResourceAddressInternal parses the somewhat bespoke resource -// identifier used in states and diffs, such as "instance.name.0". -func parseResourceAddressInternal(s string) (*ResourceAddress, error) { - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - mode := ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - mode = DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && mode != DataResourceMode { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - addr := &ResourceAddress{ - Type: parts[0], - Name: parts[1], - Index: -1, - InstanceType: TypePrimary, - Mode: mode, - } - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) - } - - addr.Index = int(idx) - } - - return addr, nil -} - -func ParseResourceAddress(s string) (*ResourceAddress, error) { - matches, err := tokenizeResourceAddress(s) - if err != nil { - return nil, err - } - mode := ManagedResourceMode - if matches["data_prefix"] != "" { - mode = DataResourceMode - } - resourceIndex, err := ParseResourceIndex(matches["index"]) - if err != nil { - return nil, err - } - instanceType, err := ParseInstanceType(matches["instance_type"]) - if err != nil { - return nil, err - } - path := ParseResourcePath(matches["path"]) - - // not allowed to say "data." without a type following - if mode == DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf( - "invalid resource address %q: must target specific data instance", - s, - ) - } - - return &ResourceAddress{ - Path: path, - Index: resourceIndex, - InstanceType: instanceType, - InstanceTypeSet: matches["instance_type"] != "", - Name: matches["name"], - Type: matches["type"], - Mode: mode, - }, nil -} - -// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a -// resource name as described in a module diff. -// -// For historical reasons a different addressing format is used in this -// context. The internal format should not be shown in the UI and instead -// this function should be used to translate to a ResourceAddress and -// then, where appropriate, use the String method to produce a canonical -// resource address string for display in the UI. -// -// The given path slice must be empty (or nil) for the root module, and -// otherwise consist of a sequence of module names traversing down into -// the module tree. If a non-nil path is provided, the caller must not -// modify its underlying array after passing it to this function. -func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { - addr, err := parseResourceAddressInternal(key) - if err != nil { - return nil, err - } - addr.Path = path - return addr, nil -} - -// NewLegacyResourceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Type, - Name: addr.Resource.Name, - } - - switch addr.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - ret.Index = -1 - - return ret -} - -// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Resource.Type, - Name: addr.Resource.Resource.Name, - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - - if addr.Resource.Key == addrs.NoKey { - ret.Index = -1 - } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { - ret.Index = int(ik) - } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { - ret.Index = -1 - } else { - panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) - } - - return ret -} - -// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to -// the new resource address type addrs.AbsResourceInstance. -// -// This method can be used only on an address that has a resource specification. -// It will panic if called on a module-path-only ResourceAddress. Use -// method HasResourceSpec to check before calling, in contexts where it is -// unclear. -// -// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" -// states, and so if these are present on the receiver then they are discarded. -// -// This is provided for shimming purposes so that we can easily adapt functions -// that are returning the legacy ResourceAddress type, for situations where -// the new type is required. -func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { - if !addr.HasResourceSpec() { - panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") - } - - ret := addrs.AbsResourceInstance{ - Module: addr.ModuleInstanceAddr(), - Resource: addrs.ResourceInstance{ - Resource: addrs.Resource{ - Type: addr.Type, - Name: addr.Name, - }, - }, - } - - switch addr.Mode { - case ManagedResourceMode: - ret.Resource.Resource.Mode = addrs.ManagedResourceMode - case DataResourceMode: - ret.Resource.Resource.Mode = addrs.DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) - } - - if addr.Index != -1 { - ret.Resource.Key = addrs.IntKey(addr.Index) - } - - return ret -} - -// ModuleInstanceAddr returns the module path portion of the receiver as a -// addrs.ModuleInstance value. -func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { - path := make(addrs.ModuleInstance, len(addr.Path)) - for i, name := range addr.Path { - path[i] = addrs.ModuleInstanceStep{Name: name} - } - return path -} - -// Contains returns true if and only if the given node is contained within -// the receiver. -// -// Containment is defined in terms of the module and resource heirarchy: -// a resource is contained within its module and any ancestor modules, -// an indexed resource instance is contained with the unindexed resource, etc. -func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { - ourPath := addr.Path - givenPath := other.Path - if len(givenPath) < len(ourPath) { - return false - } - for i := range ourPath { - if ourPath[i] != givenPath[i] { - return false - } - } - - // If the receiver is a whole-module address then the path prefix - // matching is all we need. - if !addr.HasResourceSpec() { - return true - } - - if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { - return false - } - - if addr.Index != -1 && addr.Index != other.Index { - return false - } - - if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { - return false - } - - return true -} - -// Equals returns true if the receiver matches the given address. -// -// The name of this method is a misnomer, since it doesn't test for exact -// equality. Instead, it tests that the _specified_ parts of each -// address match, treating any unspecified parts as wildcards. -// -// See also Contains, which takes a more heirarchical approach to comparing -// addresses. -func (addr *ResourceAddress) Equals(raw interface{}) bool { - other, ok := raw.(*ResourceAddress) - if !ok { - return false - } - - pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || - reflect.DeepEqual(addr.Path, other.Path) - - indexMatch := addr.Index == -1 || - other.Index == -1 || - addr.Index == other.Index - - nameMatch := addr.Name == "" || - other.Name == "" || - addr.Name == other.Name - - typeMatch := addr.Type == "" || - other.Type == "" || - addr.Type == other.Type - - // mode is significant only when type is set - modeMatch := addr.Type == "" || - other.Type == "" || - addr.Mode == other.Mode - - return pathMatch && - indexMatch && - addr.InstanceType == other.InstanceType && - nameMatch && - typeMatch && - modeMatch -} - -// Less returns true if and only if the receiver should be sorted before -// the given address when presenting a list of resource addresses to -// an end-user. -// -// This sort uses lexicographic sorting for most components, but uses -// numeric sort for indices, thus causing index 10 to sort after -// index 9, rather than after index 1. -func (addr *ResourceAddress) Less(other *ResourceAddress) bool { - - switch { - - case len(addr.Path) != len(other.Path): - return len(addr.Path) < len(other.Path) - - case !reflect.DeepEqual(addr.Path, other.Path): - // If the two paths are the same length but don't match, we'll just - // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn, and lexicographic - // comparison is correct for the module path portion. - addrStr := addr.String() - otherStr := other.String() - return addrStr < otherStr - - case addr.Mode != other.Mode: - return addr.Mode == DataResourceMode - - case addr.Type != other.Type: - return addr.Type < other.Type - - case addr.Name != other.Name: - return addr.Name < other.Name - - case addr.Index != other.Index: - // Since "Index" is -1 for an un-indexed address, this also conveniently - // sorts unindexed addresses before indexed ones, should they both - // appear for some reason. - return addr.Index < other.Index - - case addr.InstanceTypeSet != other.InstanceTypeSet: - return !addr.InstanceTypeSet - - case addr.InstanceType != other.InstanceType: - // InstanceType is actually an enum, so this is just an arbitrary - // sort based on the enum numeric values, and thus not particularly - // meaningful. - return addr.InstanceType < other.InstanceType - - default: - return false - - } -} - -func ParseResourceIndex(s string) (int, error) { - if s == "" { - return -1, nil - } - return strconv.Atoi(s) -} - -func ParseResourcePath(s string) []string { - if s == "" { - return nil - } - parts := strings.Split(s, ".") - path := make([]string, 0, len(parts)) - for _, s := range parts { - // Due to the limitations of the regexp match below, the path match has - // some noise in it we have to filter out :| - if s == "" || s == "module" { - continue - } - path = append(path, s) - } - return path -} - -func ParseInstanceType(s string) (InstanceType, error) { - switch s { - case "", "primary": - return TypePrimary, nil - case "deposed": - return TypeDeposed, nil - case "tainted": - return TypeTainted, nil - default: - return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) - } -} - -func tokenizeResourceAddress(s string) (map[string]string, error) { - // Example of portions of the regexp below using the - // string "aws_instance.web.tainted[1]" - re := regexp.MustCompile(`\A` + - // "module.foo.module.bar" (optional) - `(?P(?:module\.(?P[^.]+)\.?)*)` + - // possibly "data.", if targeting is a data resource - `(?P(?:data\.)?)` + - // "aws_instance.web" (optional when module path specified) - `(?:(?P[^.]+)\.(?P[^.[]+))?` + - // "tainted" (optional, omission implies: "primary") - `(?:\.(?P\w+))?` + - // "1" (optional, omission implies: "0") - `(?:\[(?P\d+)\])?` + - `\z`) - - groupNames := re.SubexpNames() - rawMatches := re.FindAllStringSubmatch(s, -1) - if len(rawMatches) != 1 { - return nil, fmt.Errorf("invalid resource address %q", s) - } - - matches := make(map[string]string) - for i, m := range rawMatches[0] { - matches[groupNames[i]] = m - } - - return matches, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go deleted file mode 100644 index c83643a65c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go +++ /dev/null @@ -1,12 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go - -// ResourceMode is deprecated, use addrs.ResourceMode instead. -// It has been preserved for backwards compatibility. -type ResourceMode int - -const ( - ManagedResourceMode ResourceMode = iota - DataResourceMode -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go deleted file mode 100644 index fec45967fb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go +++ /dev/null @@ -1,319 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" -) - -// ResourceProvider is an interface that must be implemented by any -// resource provider: the thing that creates and manages the resources in -// a Terraform configuration. -// -// Important implementation note: All returned pointers, such as -// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to -// shared data. Terraform is highly parallel and assumes that this data is safe -// to read/write in parallel so it must be unique references. Note that it is -// safe to return arguments as results, however. -type ResourceProvider interface { - /********************************************************************* - * Functions related to the provider - *********************************************************************/ - - // GetSchema returns the config schema for the main provider - // configuration, as would appear in a "provider" block in the - // configuration files. - // - // Currently not all providers support schema. Callers must therefore - // first call Resources and DataSources and ensure that at least one - // resource or data source has the SchemaAvailable flag set. - GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) - - // Input was used prior to v0.12 to ask the provider to prompt the user - // for input to complete the configuration. - // - // From v0.12 onwards this method is never called because Terraform Core - // is able to handle the necessary input logic itself based on the - // schema returned from GetSchema. - Input(UIInput, *ResourceConfig) (*ResourceConfig, error) - - // Validate is called once at the beginning with the raw configuration - // (no interpolation done) and can return a list of warnings and/or - // errors. - // - // This is called once with the provider configuration only. It may not - // be called at all if no provider configuration is given. - // - // This should not assume that any values of the configurations are valid. - // The primary use case of this call is to check that required keys are - // set. - Validate(*ResourceConfig) ([]string, []error) - - // Configure configures the provider itself with the configuration - // given. This is useful for setting things like access keys. - // - // This won't be called at all if no provider configuration is given. - // - // Configure returns an error if it occurred. - Configure(*ResourceConfig) error - - // Resources returns all the available resource types that this provider - // knows how to manage. - Resources() []ResourceType - - // Stop is called when the provider should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - /********************************************************************* - * Functions related to individual resources - *********************************************************************/ - - // ValidateResource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateResource(string, *ResourceConfig) ([]string, []error) - - // Apply applies a diff to a specific resource and returns the new - // resource state along with an error. - // - // If the resource state given has an empty ID, then a new resource - // is expected to be created. - Apply( - *InstanceInfo, - *InstanceState, - *InstanceDiff) (*InstanceState, error) - - // Diff diffs a resource versus a desired state and returns - // a diff. - Diff( - *InstanceInfo, - *InstanceState, - *ResourceConfig) (*InstanceDiff, error) - - // Refresh refreshes a resource and updates all of its attributes - // with the latest information. - Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) - - /********************************************************************* - * Functions related to importing - *********************************************************************/ - - // ImportState requests that the given resource be imported. - // - // The returned InstanceState only requires ID be set. Importing - // will always call Refresh after the state to complete it. - // - // IMPORTANT: InstanceState doesn't have the resource type attached - // to it. A type must be specified on the state via the Ephemeral - // field on the state. - // - // This function can return multiple states. Normally, an import - // will map 1:1 to a physical resource. However, some resources map - // to multiple. For example, an AWS security group may contain many rules. - // Each rule is represented by a separate resource in Terraform, - // therefore multiple states are returned. - ImportState(*InstanceInfo, string) ([]*InstanceState, error) - - /********************************************************************* - * Functions related to data resources - *********************************************************************/ - - // ValidateDataSource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per data source instance. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateDataSource(string, *ResourceConfig) ([]string, []error) - - // DataSources returns all of the available data sources that this - // provider implements. - DataSources() []DataSource - - // ReadDataDiff produces a diff that represents the state that will - // be produced when the given data source is read using a later call - // to ReadDataApply. - ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - - // ReadDataApply initializes a data instance using the configuration - // in a diff produced by ReadDataDiff. - ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) -} - -// ResourceProviderCloser is an interface that providers that can close -// connections that aren't needed anymore must implement. -type ResourceProviderCloser interface { - Close() error -} - -// ResourceType is a type of resource that a resource provider can manage. -type ResourceType struct { - Name string // Name of the resource, example "instance" (no provider prefix) - Importable bool // Whether this resource supports importing - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// DataSource is a data source that a resource provider implements. -type DataSource struct { - Name string - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// ResourceProviderResolver is an interface implemented by objects that are -// able to resolve a given set of resource provider version constraints -// into ResourceProviderFactory callbacks. -type ResourceProviderResolver interface { - // Given a constraint map, return a ResourceProviderFactory for each - // requested provider. If some or all of the constraints cannot be - // satisfied, return a non-nil slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) -} - -// ResourceProviderResolverFunc wraps a callback function and turns it into -// a ResourceProviderResolver implementation, for convenience in situations -// where a function and its associated closure are sufficient as a resolver -// implementation. -type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) - -// ResolveProviders implements ResourceProviderResolver by calling the -// wrapped function. -func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { - return f(reqd) -} - -// ResourceProviderResolverFixed returns a ResourceProviderResolver that -// has a fixed set of provider factories provided by the caller. The returned -// resolver ignores version constraints entirely and just returns the given -// factory for each requested provider name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { - return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { - ret := make(map[string]ResourceProviderFactory, len(reqd)) - var errs []error - for name := range reqd { - if factory, exists := factories[name]; exists { - ret[name] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// ResourceProviderFactory is a function type that creates a new instance -// of a resource provider. -type ResourceProviderFactory func() (ResourceProvider, error) - -// ResourceProviderFactoryFixed is a helper that creates a -// ResourceProviderFactory that just returns some fixed provider. -func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { - return func() (ResourceProvider, error) { - return p, nil - } -} - -func ProviderHasResource(p ResourceProvider, n string) bool { - for _, rt := range p.Resources() { - if rt.Name == n { - return true - } - } - - return false -} - -func ProviderHasDataSource(p ResourceProvider, n string) bool { - for _, rt := range p.DataSources() { - if rt.Name == n { - return true - } - } - - return false -} - -// resourceProviderFactories matches available plugins to the given version -// requirements to produce a map of compatible provider plugins if possible, -// or an error if the currently-available plugins are insufficient. -// -// This should be called only with configurations that have passed calls -// to config.Validate(), which ensures that all of the given version -// constraints are valid. It will panic if any invalid constraints are present. -func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret, errs := resolver.ResolveProviders(reqd) - if errs != nil { - diags = diags.Append( - tfdiags.Sourceless(tfdiags.Error, - "Could not satisfy plugin requirements", - errPluginInit, - ), - ) - - for _, err := range errs { - diags = diags.Append(err) - } - - return nil, diags - } - - return ret, nil -} - -const errPluginInit = ` -Plugin reinitialization required. Please run "terraform init". - -Plugins are external binaries that Terraform uses to access and manipulate -resources. The configuration provided requires plugins which can't be located, -don't satisfy the version constraints, or are otherwise incompatible. - -Terraform automatically discovers provider requirements from your -configuration, including providers used in child modules. To see the -requirements and constraints from each module, run "terraform providers". -` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go deleted file mode 100644 index 4000e3d214..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go +++ /dev/null @@ -1,315 +0,0 @@ -package terraform - -import ( - "sync" -) - -// MockResourceProvider implements ResourceProvider but mocks out all the -// calls for testing purposes. -type MockResourceProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - CloseCalled bool - CloseError error - GetSchemaCalled bool - GetSchemaRequest *ProviderSchemaRequest - GetSchemaReturn *ProviderSchema - GetSchemaReturnError error - InputCalled bool - InputInput UIInput - InputConfig *ResourceConfig - InputReturnConfig *ResourceConfig - InputReturnError error - InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) - ApplyCalled bool - ApplyInfo *InstanceInfo - ApplyState *InstanceState - ApplyDiff *InstanceDiff - ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) - ApplyReturn *InstanceState - ApplyReturnError error - ConfigureCalled bool - ConfigureConfig *ResourceConfig - ConfigureFn func(*ResourceConfig) error - ConfigureReturnError error - DiffCalled bool - DiffInfo *InstanceInfo - DiffState *InstanceState - DiffDesired *ResourceConfig - DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) - DiffReturn *InstanceDiff - DiffReturnError error - RefreshCalled bool - RefreshInfo *InstanceInfo - RefreshState *InstanceState - RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) - RefreshReturn *InstanceState - RefreshReturnError error - ResourcesCalled bool - ResourcesReturn []ResourceType - ReadDataApplyCalled bool - ReadDataApplyInfo *InstanceInfo - ReadDataApplyDiff *InstanceDiff - ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) - ReadDataApplyReturn *InstanceState - ReadDataApplyReturnError error - ReadDataDiffCalled bool - ReadDataDiffInfo *InstanceInfo - ReadDataDiffDesired *ResourceConfig - ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - ReadDataDiffReturn *InstanceDiff - ReadDataDiffReturnError error - StopCalled bool - StopFn func() error - StopReturnError error - DataSourcesCalled bool - DataSourcesReturn []DataSource - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(*ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateResourceCalled bool - ValidateResourceType string - ValidateResourceConfig *ResourceConfig - ValidateResourceReturnWarns []string - ValidateResourceReturnErrors []error - ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateDataSourceCalled bool - ValidateDataSourceType string - ValidateDataSourceConfig *ResourceConfig - ValidateDataSourceReturnWarns []string - ValidateDataSourceReturnErrors []error - - ImportStateCalled bool - ImportStateInfo *InstanceInfo - ImportStateID string - ImportStateReturn []*InstanceState - ImportStateReturnError error - ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) -} - -func (p *MockResourceProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} - -func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - p.GetSchemaRequest = req - return p.GetSchemaReturn, p.GetSchemaReturnError -} - -func (p *MockResourceProvider) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - p.Lock() - defer p.Unlock() - p.InputCalled = true - p.InputInput = input - p.InputConfig = c - if p.InputFn != nil { - return p.InputFn(input, c) - } - return p.InputReturnConfig, p.InputReturnError -} - -func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateResourceCalled = true - p.ValidateResourceType = t - p.ValidateResourceConfig = c - - if p.ValidateResourceFn != nil { - return p.ValidateResourceFn(t, c) - } - - return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors -} - -func (p *MockResourceProvider) Configure(c *ResourceConfig) error { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureConfig = c - - if p.ConfigureFn != nil { - return p.ConfigureFn(c) - } - - return p.ConfigureReturnError -} - -func (p *MockResourceProvider) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} - -func (p *MockResourceProvider) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // We only lock while writing data. Reading is fine - p.Lock() - p.ApplyCalled = true - p.ApplyInfo = info - p.ApplyState = state - p.ApplyDiff = diff - p.Unlock() - - if p.ApplyFn != nil { - return p.ApplyFn(info, state, diff) - } - - return p.ApplyReturn.DeepCopy(), p.ApplyReturnError -} - -func (p *MockResourceProvider) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.DiffCalled = true - p.DiffInfo = info - p.DiffState = state - p.DiffDesired = desired - - if p.DiffFn != nil { - return p.DiffFn(info, state, desired) - } - - return p.DiffReturn.DeepCopy(), p.DiffReturnError -} - -func (p *MockResourceProvider) Refresh( - info *InstanceInfo, - s *InstanceState) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.RefreshCalled = true - p.RefreshInfo = info - p.RefreshState = s - - if p.RefreshFn != nil { - return p.RefreshFn(info, s) - } - - return p.RefreshReturn.DeepCopy(), p.RefreshReturnError -} - -func (p *MockResourceProvider) Resources() []ResourceType { - p.Lock() - defer p.Unlock() - - p.ResourcesCalled = true - return p.ResourcesReturn -} - -func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ImportStateCalled = true - p.ImportStateInfo = info - p.ImportStateID = id - if p.ImportStateFn != nil { - return p.ImportStateFn(info, id) - } - - var result []*InstanceState - if p.ImportStateReturn != nil { - result = make([]*InstanceState, len(p.ImportStateReturn)) - for i, v := range p.ImportStateReturn { - result[i] = v.DeepCopy() - } - } - - return result, p.ImportStateReturnError -} - -func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceCalled = true - p.ValidateDataSourceType = t - p.ValidateDataSourceConfig = c - - if p.ValidateDataSourceFn != nil { - return p.ValidateDataSourceFn(t, c) - } - - return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors -} - -func (p *MockResourceProvider) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataDiffCalled = true - p.ReadDataDiffInfo = info - p.ReadDataDiffDesired = desired - if p.ReadDataDiffFn != nil { - return p.ReadDataDiffFn(info, desired) - } - - return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError -} - -func (p *MockResourceProvider) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataApplyCalled = true - p.ReadDataApplyInfo = info - p.ReadDataApplyDiff = d - - if p.ReadDataApplyFn != nil { - return p.ReadDataApplyFn(info, d) - } - - return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError -} - -func (p *MockResourceProvider) DataSources() []DataSource { - p.Lock() - defer p.Unlock() - - p.DataSourcesCalled = true - return p.DataSourcesReturn -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go deleted file mode 100644 index 74ee2a940d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" -) - -// ResourceProvisioner is an interface that must be implemented by any -// resource provisioner: the thing that initializes resources in -// a Terraform configuration. -type ResourceProvisioner interface { - // GetConfigSchema returns the schema for the provisioner type's main - // configuration block. This is called prior to Validate to enable some - // basic structural validation to be performed automatically and to allow - // the configuration to be properly extracted from potentially-ambiguous - // configuration file formats. - GetConfigSchema() (*configschema.Block, error) - - // Validate is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - Validate(*ResourceConfig) ([]string, []error) - - // Apply runs the provisioner on a specific resource and returns the new - // resource state along with an error. Instead of a diff, the ResourceConfig - // is provided since provisioners only run after a resource has been - // newly created. - Apply(UIOutput, *InstanceState, *ResourceConfig) error - - // Stop is called when the provisioner should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error -} - -// ResourceProvisionerCloser is an interface that provisioners that can close -// connections that aren't needed anymore must implement. -type ResourceProvisionerCloser interface { - Close() error -} - -// ResourceProvisionerFactory is a function type that creates a new instance -// of a resource provisioner. -type ResourceProvisionerFactory func() (ResourceProvisioner, error) - -// ProvisionerFactory is a function type that creates a new instance -// of a provisioners.Interface. -type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go deleted file mode 100644 index ed6f241bc8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go +++ /dev/null @@ -1,87 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" -) - -// MockResourceProvisioner implements ResourceProvisioner but mocks out all the -// calls for testing purposes. -type MockResourceProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetConfigSchemaCalled bool - GetConfigSchemaReturnSchema *configschema.Block - GetConfigSchemaReturnError error - - ApplyCalled bool - ApplyOutput UIOutput - ApplyState *InstanceState - ApplyConfig *ResourceConfig - ApplyFn func(*InstanceState, *ResourceConfig) error - ApplyReturnError error - - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(c *ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - - StopCalled bool - StopFn func() error - StopReturnError error -} - -var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) - -func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { - p.GetConfigSchemaCalled = true - return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError -} - -func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvisioner) Apply( - output UIOutput, - state *InstanceState, - c *ResourceConfig) error { - p.Lock() - - p.ApplyCalled = true - p.ApplyOutput = output - p.ApplyState = state - p.ApplyConfig = c - if p.ApplyFn != nil { - fn := p.ApplyFn - p.Unlock() - return fn(state, c) - } - - defer p.Unlock() - return p.ApplyReturnError -} - -func (p *MockResourceProvisioner) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go deleted file mode 100644 index 8bc3b017b1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go +++ /dev/null @@ -1,278 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// Schemas is a container for various kinds of schema that Terraform needs -// during processing. -type Schemas struct { - Providers map[string]*ProviderSchema - Provisioners map[string]*configschema.Block -} - -// ProviderSchema returns the entire ProviderSchema object that was produced -// by the plugin for the given provider, or nil if no such schema is available. -// -// It's usually better to go use the more precise methods offered by type -// Schemas to handle this detail automatically. -func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { - if ss.Providers == nil { - return nil - } - return ss.Providers[typeName] -} - -// ProviderConfig returns the schema for the provider configuration of the -// given provider type, or nil if no such schema is available. -func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { - ps := ss.ProviderSchema(typeName) - if ps == nil { - return nil - } - return ps.Provider -} - -// ResourceTypeConfig returns the schema for the configuration of a given -// resource type belonging to a given provider type, or nil of no such -// schema is available. -// -// In many cases the provider type is inferrable from the resource type name, -// but this is not always true because users can override the provider for -// a resource using the "provider" meta-argument. Therefore it's important to -// always pass the correct provider name, even though it many cases it feels -// redundant. -func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { - ps := ss.ProviderSchema(providerType) - if ps == nil || ps.ResourceTypes == nil { - return nil, 0 - } - return ps.SchemaForResourceType(resourceMode, resourceType) -} - -// ProvisionerConfig returns the schema for the configuration of a given -// provisioner, or nil of no such schema is available. -func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { - return ss.Provisioners[name] -} - -// LoadSchemas searches the given configuration, state and plan (any of which -// may be nil) for constructs that have an associated schema, requests the -// necessary schemas from the given component factory (which must _not_ be nil), -// and returns a single object representing all of the necessary schemas. -// -// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing -// errors across multiple separate objects. Errors here will usually indicate -// either misbehavior on the part of one of the providers or of the provider -// protocol itself. When returned with errors, the returned schemas object is -// still valid but may be incomplete. -func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { - schemas := &Schemas{ - Providers: map[string]*ProviderSchema{}, - Provisioners: map[string]*configschema.Block{}, - } - var diags tfdiags.Diagnostics - - newDiags := loadProviderSchemas(schemas.Providers, config, state, components) - diags = diags.Append(newDiags) - newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) - diags = diags.Append(newDiags) - - return schemas, diags.Err() -} - -func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(typeName string) { - if _, exists := schemas[typeName]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) - provider, err := components.ResourceProvider(typeName, "early/"+typeName) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[typeName] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), - ) - return - } - defer func() { - provider.Close() - }() - - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[typeName] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), - ) - return - } - - s := &ProviderSchema{ - Provider: resp.Provider.Block, - ResourceTypes: make(map[string]*configschema.Block), - DataSources: make(map[string]*configschema.Block), - - ResourceTypeSchemaVersions: make(map[string]uint64), - } - - if resp.Provider.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), - ) - } - - for t, r := range resp.ResourceTypes { - s.ResourceTypes[t] = r.Block - s.ResourceTypeSchemaVersions[t] = uint64(r.Version) - if r.Version < 0 { - diags = diags.Append( - fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), - ) - } - } - - for t, d := range resp.DataSources { - s.DataSources[t] = d.Block - if d.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), - ) - } - } - - schemas[typeName] = s - } - - if config != nil { - for _, typeName := range config.ProviderTypes() { - ensure(typeName) - } - } - - if state != nil { - needed := providers.AddressedTypesAbs(state.ProviderAddrs()) - for _, typeName := range needed { - ensure(typeName) - } - } - - return diags -} - -func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(name string) { - if _, exists := schemas[name]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) - provisioner, err := components.ResourceProvisioner(name, "early/"+name) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - if closer, ok := provisioner.(ResourceProvisionerCloser); ok { - closer.Close() - } - }() - - resp := provisioner.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - schemas[name] = resp.Provisioner - } - - if config != nil { - for _, rc := range config.Module.ManagedResources { - for _, pc := range rc.Managed.Provisioners { - ensure(pc.Type) - } - } - - // Must also visit our child modules, recursively. - for _, cc := range config.Children { - childDiags := loadProvisionerSchemas(schemas, cc, components) - diags = diags.Append(childDiags) - } - } - - return diags -} - -// ProviderSchema represents the schema for a provider's own configuration -// and the configuration for some or all of its resources and data sources. -// -// The completeness of this structure depends on how it was constructed. -// When constructed for a configuration, it will generally include only -// resource types and data sources used by that configuration. -type ProviderSchema struct { - Provider *configschema.Block - ResourceTypes map[string]*configschema.Block - DataSources map[string]*configschema.Block - - ResourceTypeSchemaVersions map[string]uint64 -} - -// SchemaForResourceType attempts to find a schema for the given mode and type. -// Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { - switch mode { - case addrs.ManagedResourceMode: - return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] - case addrs.DataResourceMode: - // Data resources don't have schema versions right now, since state is discarded for each refresh - return ps.DataSources[typeName], 0 - default: - // Shouldn't happen, because the above cases are comprehensive. - return nil, 0 - } -} - -// SchemaForResourceAddr attempts to find a schema for the mode and type from -// the given resource address. Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { - return ps.SchemaForResourceType(addr.Mode, addr.Type) -} - -// ProviderSchemaRequest is used to describe to a ResourceProvider which -// aspects of schema are required, when calling the GetSchema method. -type ProviderSchemaRequest struct { - ResourceTypes []string - DataSources []string -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go deleted file mode 100644 index 98b20be7cb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go +++ /dev/null @@ -1,2221 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" - "github.com/mitchellh/copystructure" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -const ( - // StateVersion is the current version for our state file - StateVersion = 3 -) - -// rootModulePath is the path of the root module -var rootModulePath = []string{"root"} - -// normalizeModulePath transforms a legacy module path (which may or may not -// have a redundant "root" label at the start of it) into an -// addrs.ModuleInstance representing the same module. -// -// For legacy reasons, different parts of Terraform disagree about whether the -// root module has the path []string{} or []string{"root"}, and so this -// function accepts both and trims off the "root". An implication of this is -// that it's not possible to actually have a module call in the root module -// that is itself named "root", since that would be ambiguous. -// -// normalizeModulePath takes a raw module path and returns a path that -// has the rootModulePath prepended to it. If I could go back in time I -// would've never had a rootModulePath (empty path would be root). We can -// still fix this but thats a big refactor that my branch doesn't make sense -// for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) addrs.ModuleInstance { - // FIXME: Remove this once everyone is using addrs.ModuleInstance. - - if len(p) > 0 && p[0] == "root" { - p = p[1:] - } - - ret := make(addrs.ModuleInstance, len(p)) - for i, name := range p { - // For now we don't actually support modules with multiple instances - // identified by keys, so we just treat every path element as a - // step with no key. - ret[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - return ret -} - -// State keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -type State struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *RemoteState `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *BackendState `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*ModuleState `json:"modules"` - - mu sync.Mutex - - // IsBinaryDrivenTest is a special flag that assists with a binary driver - // heuristic, it should not be set externally - IsBinaryDrivenTest bool -} - -func (s *State) Lock() { s.mu.Lock() } -func (s *State) Unlock() { s.mu.Unlock() } - -// NewState is used to initialize a blank state -func NewState() *State { - s := &State{} - s.init() - return s -} - -// Children returns the ModuleStates that are direct children of -// the given path. If the path is "root", for example, then children -// returned might be "root.child", but not "root.child.grandchild". -func (s *State) Children(path []string) []*ModuleState { - s.Lock() - defer s.Unlock() - // TODO: test - - return s.children(path) -} - -func (s *State) children(path []string) []*ModuleState { - result := make([]*ModuleState, 0) - for _, m := range s.Modules { - if m == nil { - continue - } - - if len(m.Path) != len(path)+1 { - continue - } - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - result = append(result, m) - } - - return result -} - -// AddModule adds the module with the given path to the state. -// -// This should be the preferred method to add module states since it -// allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { - s.Lock() - defer s.Unlock() - - return s.addModule(path) -} - -func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { - // check if the module exists first - m := s.moduleByPath(path) - if m != nil { - return m - } - - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - // For the purposes of state, the legacy address format also includes - // a redundant extra prefix element "root". It is important to include - // this because the "prune" method will remove any module that has a - // path length less than one, and other parts of the state code will - // trim off the first element indiscriminately. - legacyPath := make([]string, len(path)+1) - legacyPath[0] = "root" - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("state cannot represent modules with count or for_each keys") - } - - legacyPath[i+1] = step.Name - } - - m = &ModuleState{Path: legacyPath} - m.init() - s.Modules = append(s.Modules, m) - s.sort() - return m -} - -// ModuleByPath is used to lookup the module state for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { - if s == nil { - return nil - } - s.Lock() - defer s.Unlock() - - return s.moduleByPath(path) -} - -func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { - for _, mod := range s.Modules { - if mod == nil { - continue - } - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// Empty returns true if the state is empty. -func (s *State) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return len(s.Modules) == 0 -} - -// HasResources returns true if the state contains any resources. -// -// This is similar to !s.Empty, but returns true also in the case where the -// state has modules but all of them are devoid of resources. -func (s *State) HasResources() bool { - if s.Empty() { - return false - } - - for _, mod := range s.Modules { - if len(mod.Resources) > 0 { - return true - } - } - - return false -} - -// IsRemote returns true if State represents a state that exists and is -// remote. -func (s *State) IsRemote() bool { - if s == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Remote == nil { - return false - } - if s.Remote.Type == "" { - return false - } - - return true -} - -// Validate validates the integrity of this state file. -// -// Certain properties of the statefile are expected by Terraform in order -// to behave properly. The core of Terraform will assume that once it -// receives a State structure that it has been validated. This validation -// check should be called to ensure that. -// -// If this returns an error, then the user should be notified. The error -// response will include detailed information on the nature of the error. -func (s *State) Validate() error { - s.Lock() - defer s.Unlock() - - var result error - - // !!!! FOR DEVELOPERS !!!! - // - // Any errors returned from this Validate function will BLOCK TERRAFORM - // from loading a state file. Therefore, this should only contain checks - // that are only resolvable through manual intervention. - // - // !!!! FOR DEVELOPERS !!!! - - // Make sure there are no duplicate module states. We open a new - // block here so we can use basic variable names and future validations - // can do the same. - { - found := make(map[string]struct{}) - for _, ms := range s.Modules { - if ms == nil { - continue - } - - key := strings.Join(ms.Path, ".") - if _, ok := found[key]; ok { - result = multierror.Append(result, fmt.Errorf( - strings.TrimSpace(stateValidateErrMultiModule), key)) - continue - } - - found[key] = struct{}{} - } - } - - return result -} - -// Remove removes the item in the state at the given address, returning -// any errors that may have occurred. -// -// If the address references a module state or resource, it will delete -// all children as well. To check what will be deleted, use a StateFilter -// first. -func (s *State) Remove(addr ...string) error { - s.Lock() - defer s.Unlock() - - // Filter out what we need to delete - filter := &StateFilter{State: s} - results, err := filter.Filter(addr...) - if err != nil { - return err - } - - // If we have no results, just exit early, we're not going to do anything. - // While what happens below is fairly fast, this is an important early - // exit since the prune below might modify the state more and we don't - // want to modify the state if we don't have to. - if len(results) == 0 { - return nil - } - - // Go through each result and grab what we need - removed := make(map[interface{}]struct{}) - for _, r := range results { - // Convert the path to our own type - path := append([]string{"root"}, r.Path...) - - // If we removed this already, then ignore - if _, ok := removed[r.Value]; ok { - continue - } - - // If we removed the parent already, then ignore - if r.Parent != nil { - if _, ok := removed[r.Parent.Value]; ok { - continue - } - } - - // Add this to the removed list - removed[r.Value] = struct{}{} - - switch v := r.Value.(type) { - case *ModuleState: - s.removeModule(path, v) - case *ResourceState: - s.removeResource(path, v) - case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) - default: - return fmt.Errorf("unknown type to delete: %T", r.Value) - } - } - - // Prune since the removal functions often do the bare minimum to - // remove a thing and may leave around dangling empty modules, resources, - // etc. Prune will clean that all up. - s.prune() - - return nil -} - -func (s *State) removeModule(path []string, v *ModuleState) { - for i, m := range s.Modules { - if m == v { - s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil - return - } - } -} - -func (s *State) removeResource(path []string, v *ResourceState) { - // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(normalizeModulePath(path)) - if mod == nil { - return - } - - // Find this resource. This is a O(N) lookup when if we had the key - // it could be O(1) but even with thousands of resources this shouldn't - // matter right now. We can easily up performance here when the time comes. - for k, r := range mod.Resources { - if r == v { - // Found it - delete(mod.Resources, k) - return - } - } -} - -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { - // Go through the resource and find the instance that matches this - // (if any) and remove it. - - // Check primary - if r.Primary == v { - r.Primary = nil - return - } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } -} - -// RootModule returns the ModuleState for the root module -func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Equal tests if one state is equal to another. -func (s *State) Equal(other *State) bool { - // If one is nil, we do a direct check - if s == nil || other == nil { - return s == other - } - - s.Lock() - defer s.Unlock() - return s.equal(other) -} - -func (s *State) equal(other *State) bool { - if s == nil || other == nil { - return s == other - } - - // If the versions are different, they're certainly not equal - if s.Version != other.Version { - return false - } - - // If any of the modules are not equal, then this state isn't equal - if len(s.Modules) != len(other.Modules) { - return false - } - for _, m := range s.Modules { - // This isn't very optimal currently but works. - otherM := other.moduleByPath(normalizeModulePath(m.Path)) - if otherM == nil { - return false - } - - // If they're not equal, then we're not equal! - if !m.Equal(otherM) { - return false - } - } - - return true -} - -// MarshalEqual is similar to Equal but provides a stronger definition of -// "equal", where two states are equal if and only if their serialized form -// is byte-for-byte identical. -// -// This is primarily useful for callers that are trying to save snapshots -// of state to persistent storage, allowing them to detect when a new -// snapshot must be taken. -// -// Note that the serial number and lineage are included in the serialized form, -// so it's the caller's responsibility to properly manage these attributes -// so that this method is only called on two states that have the same -// serial and lineage, unless detecting such differences is desired. -func (s *State) MarshalEqual(other *State) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - recvBuf := &bytes.Buffer{} - otherBuf := &bytes.Buffer{} - - err := WriteState(s, recvBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - err = WriteState(other, otherBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) -} - -type StateAgeComparison int - -const ( - StateAgeEqual StateAgeComparison = 0 - StateAgeReceiverNewer StateAgeComparison = 1 - StateAgeReceiverOlder StateAgeComparison = -1 -) - -// CompareAges compares one state with another for which is "older". -// -// This is a simple check using the state's serial, and is thus only as -// reliable as the serial itself. In the normal case, only one state -// exists for a given combination of lineage/serial, but Terraform -// does not guarantee this and so the result of this method should be -// used with care. -// -// Returns an integer that is negative if the receiver is older than -// the argument, positive if the converse, and zero if they are equal. -// An error is returned if the two states are not of the same lineage, -// in which case the integer returned has no meaning. -func (s *State) CompareAges(other *State) (StateAgeComparison, error) { - // nil states are "older" than actual states - switch { - case s != nil && other == nil: - return StateAgeReceiverNewer, nil - case s == nil && other != nil: - return StateAgeReceiverOlder, nil - case s == nil && other == nil: - return StateAgeEqual, nil - } - - if !s.SameLineage(other) { - return StateAgeEqual, fmt.Errorf( - "can't compare two states of differing lineage", - ) - } - - s.Lock() - defer s.Unlock() - - switch { - case s.Serial < other.Serial: - return StateAgeReceiverOlder, nil - case s.Serial > other.Serial: - return StateAgeReceiverNewer, nil - default: - return StateAgeEqual, nil - } -} - -// SameLineage returns true only if the state given in argument belongs -// to the same "lineage" of states as the receiver. -func (s *State) SameLineage(other *State) bool { - s.Lock() - defer s.Unlock() - - // If one of the states has no lineage then it is assumed to predate - // this concept, and so we'll accept it as belonging to any lineage - // so that a lineage string can be assigned to newer versions - // without breaking compatibility with older versions. - if s.Lineage == "" || other.Lineage == "" { - return true - } - - return s.Lineage == other.Lineage -} - -// DeepCopy performs a deep copy of the state structure and returns -// a new structure. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*State) -} - -// FromFutureTerraform checks if this state was written by a Terraform -// version from the future. -func (s *State) FromFutureTerraform() bool { - s.Lock() - defer s.Unlock() - - // No TF version means it is certainly from the past - if s.TFVersion == "" { - return false - } - - v := version.Must(version.NewVersion(s.TFVersion)) - return tfversion.SemVer.LessThan(v) -} - -func (s *State) Init() { - s.Lock() - defer s.Unlock() - s.init() -} - -func (s *State) init() { - if s.Version == 0 { - s.Version = StateVersion - } - - if s.moduleByPath(addrs.RootModuleInstance) == nil { - s.addModule(addrs.RootModuleInstance) - } - s.ensureHasLineage() - - for _, mod := range s.Modules { - if mod != nil { - mod.init() - } - } - - if s.Remote != nil { - s.Remote.init() - } - -} - -func (s *State) EnsureHasLineage() { - s.Lock() - defer s.Unlock() - - s.ensureHasLineage() -} - -func (s *State) ensureHasLineage() { - if s.Lineage == "" { - lineage, err := uuid.GenerateUUID() - if err != nil { - panic(fmt.Errorf("Failed to generate lineage: %v", err)) - } - s.Lineage = lineage - log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) - } else { - log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) - } -} - -// AddModuleState insert this module state and override any existing ModuleState -func (s *State) AddModuleState(mod *ModuleState) { - mod.init() - s.Lock() - defer s.Unlock() - - s.addModuleState(mod) -} - -func (s *State) addModuleState(mod *ModuleState) { - for i, m := range s.Modules { - if reflect.DeepEqual(m.Path, mod.Path) { - s.Modules[i] = mod - return - } - } - - s.Modules = append(s.Modules, mod) - s.sort() -} - -// prune is used to remove any resources that are no longer required -func (s *State) prune() { - if s == nil { - return - } - - // Filter out empty modules. - // A module is always assumed to have a path, and it's length isn't always - // bounds checked later on. Modules may be "emptied" during destroy, but we - // never want to store those in the state. - for i := 0; i < len(s.Modules); i++ { - if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { - s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) - i-- - } - } - - for _, mod := range s.Modules { - mod.prune() - } - if s.Remote != nil && s.Remote.Empty() { - s.Remote = nil - } -} - -// sort sorts the modules -func (s *State) sort() { - sort.Sort(moduleStateSort(s.Modules)) - - // Allow modules to be sorted - for _, m := range s.Modules { - if m != nil { - m.sort() - } - } -} - -func (s *State) String() string { - if s == nil { - return "" - } - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - for _, m := range s.Modules { - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// BackendState stores the configuration to connect to a remote backend. -type BackendState struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} - -// Empty returns true if BackendState has no state. -func (s *BackendState) Empty() bool { - return s == nil || s.Type == "" -} - -// Config decodes the type-specific configuration object using the provided -// schema and returns the result as a cty.Value. -// -// An error is returned if the stored configuration does not conform to the -// given schema. -func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { - ty := schema.ImpliedType() - if s == nil { - return cty.NullVal(ty), nil - } - return ctyjson.Unmarshal(s.ConfigRaw, ty) -} - -// SetConfig replaces (in-place) the type-specific configuration object using -// the provided value and associated schema. -// -// An error is returned if the given value does not conform to the implied -// type of the schema. -func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { - ty := schema.ImpliedType() - buf, err := ctyjson.Marshal(val, ty) - if err != nil { - return err - } - s.ConfigRaw = buf - return nil -} - -// ForPlan produces an alternative representation of the reciever that is -// suitable for storing in a plan. The current workspace must additionally -// be provided, to be stored alongside the backend configuration. -// -// The backend configuration schema is required in order to properly -// encode the backend-specific configuration settings. -func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { - if s == nil { - return nil, nil - } - - configVal, err := s.Config(schema) - if err != nil { - return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) - } - return plans.NewBackend(s.Type, configVal, schema, workspaceName) -} - -// RemoteState is used to track the information about a remote -// state store that we push/pull state to. -type RemoteState struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` - - mu sync.Mutex -} - -func (s *RemoteState) Lock() { s.mu.Lock() } -func (s *RemoteState) Unlock() { s.mu.Unlock() } - -func (r *RemoteState) init() { - r.Lock() - defer r.Unlock() - - if r.Config == nil { - r.Config = make(map[string]string) - } -} - -func (r *RemoteState) Empty() bool { - if r == nil { - return true - } - r.Lock() - defer r.Unlock() - - return r.Type == "" -} - -func (r *RemoteState) Equals(other *RemoteState) bool { - r.Lock() - defer r.Unlock() - - if r.Type != other.Type { - return false - } - if len(r.Config) != len(other.Config) { - return false - } - for k, v := range r.Config { - if other.Config[k] != v { - return false - } - } - return true -} - -// OutputState is used to track the state relevant to a single output. -type OutputState struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -func (s *OutputState) Lock() { s.mu.Lock() } -func (s *OutputState) Unlock() { s.mu.Unlock() } - -func (s *OutputState) String() string { - return fmt.Sprintf("%#v", s.Value) -} - -// Equal compares two OutputState structures for equality. nil values are -// considered equal. -func (s *OutputState) Equal(other *OutputState) bool { - if s == nil && other == nil { - return true - } - - if s == nil || other == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Sensitive != other.Sensitive { - return false - } - - if !reflect.DeepEqual(s.Value, other.Value) { - return false - } - - return true -} - -// ModuleState is used to track all the state relevant to a single -// module. Previous to Terraform 0.3, all state belonged to the "root" -// module. -type ModuleState struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*OutputState `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*ResourceState `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - mu sync.Mutex -} - -func (s *ModuleState) Lock() { s.mu.Lock() } -func (s *ModuleState) Unlock() { s.mu.Unlock() } - -// Equal tests whether one module state is equal to another. -func (m *ModuleState) Equal(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - // Paths must be equal - if !reflect.DeepEqual(m.Path, other.Path) { - return false - } - - // Outputs must be equal - if len(m.Outputs) != len(other.Outputs) { - return false - } - for k, v := range m.Outputs { - if !other.Outputs[k].Equal(v) { - return false - } - } - - // Dependencies must be equal. This sorts these in place but - // this shouldn't cause any problems. - sort.Strings(m.Dependencies) - sort.Strings(other.Dependencies) - if len(m.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range m.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // Resources must be equal - if len(m.Resources) != len(other.Resources) { - return false - } - for k, r := range m.Resources { - otherR, ok := other.Resources[k] - if !ok { - return false - } - - if !r.Equal(otherR) { - return false - } - } - - return true -} - -// IsRoot says whether or not this module diff is for the root module. -func (m *ModuleState) IsRoot() bool { - m.Lock() - defer m.Unlock() - return reflect.DeepEqual(m.Path, rootModulePath) -} - -// IsDescendent returns true if other is a descendent of this module. -func (m *ModuleState) IsDescendent(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - i := len(m.Path) - return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) -} - -// Orphans returns a list of keys of resources that are in the State -// but aren't present in the configuration itself. Hence, these keys -// represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { - m.Lock() - defer m.Unlock() - - inConfig := make(map[string]struct{}) - if c != nil { - for _, r := range c.ManagedResources { - inConfig[r.Addr().String()] = struct{}{} - } - for _, r := range c.DataResources { - inConfig[r.Addr().String()] = struct{}{} - } - } - - var result []addrs.ResourceInstance - for k := range m.Resources { - // Since we've not yet updated state to use our new address format, - // we need to do some shimming here. - legacyAddr, err := parseResourceAddressInternal(k) - if err != nil { - // Suggests that the user tampered with the state, since we always - // generate valid internal addresses. - log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) - continue - } - - addr := legacyAddr.AbsResourceInstanceAddr().Resource - compareKey := addr.Resource.String() // compare by resource address, ignoring instance key - if _, exists := inConfig[compareKey]; !exists { - result = append(result, addr) - } - } - return result -} - -// RemovedOutputs returns a list of outputs that are in the State but aren't -// present in the configuration itself. -func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { - if outputs == nil { - // If we got no output map at all then we'll just treat our set of - // configured outputs as empty, since that suggests that they've all - // been removed by removing their containing module. - outputs = make(map[string]*configs.Output) - } - - s.Lock() - defer s.Unlock() - - var ret []addrs.OutputValue - for n := range s.Outputs { - if _, declared := outputs[n]; !declared { - ret = append(ret, addrs.OutputValue{ - Name: n, - }) - } - } - - return ret -} - -// View returns a view with the given resource prefix. -func (m *ModuleState) View(id string) *ModuleState { - if m == nil { - return m - } - - r := m.deepcopy() - for k := range r.Resources { - if id == k || strings.HasPrefix(k, id+".") { - continue - } - - delete(r.Resources, k) - } - - return r -} - -func (m *ModuleState) init() { - m.Lock() - defer m.Unlock() - - if m.Path == nil { - m.Path = []string{} - } - if m.Outputs == nil { - m.Outputs = make(map[string]*OutputState) - } - if m.Resources == nil { - m.Resources = make(map[string]*ResourceState) - } - - if m.Dependencies == nil { - m.Dependencies = make([]string, 0) - } - - for _, rs := range m.Resources { - rs.init() - } -} - -func (m *ModuleState) deepcopy() *ModuleState { - if m == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - - return stateCopy.(*ModuleState) -} - -// prune is used to remove any resources that are no longer required -func (m *ModuleState) prune() { - m.Lock() - defer m.Unlock() - - for k, v := range m.Resources { - if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { - delete(m.Resources, k) - continue - } - - v.prune() - } - - for k, v := range m.Outputs { - if v.Value == hcl2shim.UnknownVariableValue { - delete(m.Outputs, k) - } - } - - m.Dependencies = uniqueStrings(m.Dependencies) -} - -func (m *ModuleState) sort() { - for _, v := range m.Resources { - v.sort() - } -} - -func (m *ModuleState) String() string { - m.Lock() - defer m.Unlock() - - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - names := make([]string, 0, len(m.Resources)) - for name := range m.Resources { - names = append(names, name) - } - - sort.Sort(resourceNameSort(names)) - - for _, k := range names { - rs := m.Resources[k] - var id string - if rs.Primary != nil { - id = rs.Primary.ID - } - if id == "" { - id = "" - } - - taintStr := "" - if rs.Primary.Tainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(rs.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - if rs.Provider != "" { - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) - } - - var attributes map[string]string - if rs.Primary != nil { - attributes = rs.Primary.Attributes - } - attrKeys := make([]string, 0, len(attributes)) - for ak := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - for idx, t := range rs.Deposed { - taintStr := "" - if t.Tainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) - } - - if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range rs.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep)) - } - } - } - - if len(m.Outputs) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.Outputs)) - for k := range m.Outputs { - ks = append(ks, k) - } - - sort.Strings(ks) - - for _, k := range ks { - v := m.Outputs[k] - switch vTyped := v.Value.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - } - } - } - - return buf.String() -} - -func (m *ModuleState) Empty() bool { - return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 -} - -// ResourceStateKey is a structured representation of the key used for the -// ModuleState.Resources mapping -type ResourceStateKey struct { - Name string - Type string - Mode ResourceMode - Index int -} - -// Equal determines whether two ResourceStateKeys are the same -func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { - if rsk == nil || other == nil { - return false - } - if rsk.Mode != other.Mode { - return false - } - if rsk.Type != other.Type { - return false - } - if rsk.Name != other.Name { - return false - } - if rsk.Index != other.Index { - return false - } - return true -} - -func (rsk *ResourceStateKey) String() string { - if rsk == nil { - return "" - } - var prefix string - switch rsk.Mode { - case ManagedResourceMode: - prefix = "" - case DataResourceMode: - prefix = "data." - default: - panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) - } - if rsk.Index == -1 { - return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) - } - return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) -} - -// ParseResourceStateKey accepts a key in the format used by -// ModuleState.Resources and returns a resource name and resource index. In the -// state, a resource has the format "type.name.index" or "type.name". In the -// latter case, the index is returned as -1. -func ParseResourceStateKey(k string) (*ResourceStateKey, error) { - parts := strings.Split(k, ".") - mode := ManagedResourceMode - if len(parts) > 0 && parts[0] == "data" { - mode = DataResourceMode - // Don't need the constant "data" prefix for parsing - // now that we've figured out the mode. - parts = parts[1:] - } - if len(parts) < 2 || len(parts) > 3 { - return nil, fmt.Errorf("Malformed resource state key: %s", k) - } - rsk := &ResourceStateKey{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Index: -1, - } - if len(parts) == 3 { - index, err := strconv.Atoi(parts[2]) - if err != nil { - return nil, fmt.Errorf("Malformed resource state key index: %s", k) - } - rsk.Index = index - } - return rsk, nil -} - -// ResourceState holds the state of a resource that is used so that -// a provider can find and manage an existing resource as well as for -// storing attributes that are used to populate variables of child -// resources. -// -// Attributes has attributes about the created resource that are -// queryable in interpolation: "${type.id.attr}" -// -// Extra is just extra data that a provider can return that we store -// for later, but is not exposed in any way to the user. -// -type ResourceState struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *InstanceState `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*InstanceState `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -func (s *ResourceState) Lock() { s.mu.Lock() } -func (s *ResourceState) Unlock() { s.mu.Unlock() } - -// Equal tests whether two ResourceStates are equal. -func (s *ResourceState) Equal(other *ResourceState) bool { - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Provider != other.Provider { - return false - } - - // Dependencies must be equal - sort.Strings(s.Dependencies) - sort.Strings(other.Dependencies) - if len(s.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range s.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true -} - -// Taint marks a resource as tainted. -func (s *ResourceState) Taint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = true - } -} - -// Untaint unmarks a resource as tainted. -func (s *ResourceState) Untaint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = false - } -} - -// ProviderAddr returns the provider address for the receiver, by parsing the -// string representation saved in state. An error can be returned if the -// value in state is corrupt. -func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { - var diags tfdiags.Diagnostics - - str := s.Provider - traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(travDiags) - if travDiags.HasErrors() { - return addrs.AbsProviderConfig{}, diags.Err() - } - - addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags.Err() -} - -func (s *ResourceState) init() { - s.Lock() - defer s.Unlock() - - if s.Primary == nil { - s.Primary = &InstanceState{} - } - s.Primary.init() - - if s.Dependencies == nil { - s.Dependencies = []string{} - } - - if s.Deposed == nil { - s.Deposed = make([]*InstanceState, 0) - } -} - -// prune is used to remove any instances that are no longer required -func (s *ResourceState) prune() { - s.Lock() - defer s.Unlock() - - n := len(s.Deposed) - for i := 0; i < n; i++ { - inst := s.Deposed[i] - if inst == nil || inst.ID == "" { - copy(s.Deposed[i:], s.Deposed[i+1:]) - s.Deposed[n-1] = nil - n-- - i-- - } - } - s.Deposed = s.Deposed[:n] - - s.Dependencies = uniqueStrings(s.Dependencies) -} - -func (s *ResourceState) sort() { - s.Lock() - defer s.Unlock() - - sort.Strings(s.Dependencies) -} - -func (s *ResourceState) String() string { - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) - return buf.String() -} - -// InstanceState is used to track the unique state information belonging -// to a given instance. -type InstanceState struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral EphemeralState `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` - - mu sync.Mutex -} - -func (s *InstanceState) Lock() { s.mu.Lock() } -func (s *InstanceState) Unlock() { s.mu.Unlock() } - -func (s *InstanceState) init() { - s.Lock() - defer s.Unlock() - - if s.Attributes == nil { - s.Attributes = make(map[string]string) - } - if s.Meta == nil { - s.Meta = make(map[string]interface{}) - } - s.Ephemeral.init() -} - -// NewInstanceStateShimmedFromValue is a shim method to lower a new-style -// object value representing the attributes of an instance object into the -// legacy InstanceState representation. -// -// This is for shimming to old components only and should not be used in new code. -func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { - attrs := hcl2shim.FlatmapValueFromHCL2(state) - return &InstanceState{ - ID: attrs["id"], - Attributes: attrs, - Meta: map[string]interface{}{ - "schema_version": schemaVersion, - }, - } -} - -// AttrsAsObjectValue shims from the legacy InstanceState representation to -// a new-style cty object value representation of the state attributes, using -// the given type for guidance. -// -// The given type must be the implied type of the schema of the resource type -// of the object whose state is being converted, or the result is undefined. -// -// This is for shimming from old components only and should not be used in -// new code. -func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { - if s == nil { - // if the state is nil, we need to construct a complete cty.Value with - // null attributes, rather than a single cty.NullVal(ty) - s = &InstanceState{} - } - - if s.Attributes == nil { - s.Attributes = map[string]string{} - } - - // make sure ID is included in the attributes. The InstanceState.ID value - // takes precedence. - if s.ID != "" { - s.Attributes["id"] = s.ID - } - - return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) -} - -// Copy all the Fields from another InstanceState -func (s *InstanceState) Set(from *InstanceState) { - s.Lock() - defer s.Unlock() - - from.Lock() - defer from.Unlock() - - s.ID = from.ID - s.Attributes = from.Attributes - s.Ephemeral = from.Ephemeral - s.Meta = from.Meta - s.Tainted = from.Tainted -} - -func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*InstanceState) -} - -func (s *InstanceState) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return s.ID == "" -} - -func (s *InstanceState) Equal(other *InstanceState) bool { - // Short circuit some nil checks - if s == nil || other == nil { - return s == other - } - s.Lock() - defer s.Unlock() - - // IDs must be equal - if s.ID != other.ID { - return false - } - - // Attributes must be equal - if len(s.Attributes) != len(other.Attributes) { - return false - } - for k, v := range s.Attributes { - otherV, ok := other.Attributes[k] - if !ok { - return false - } - - if v != otherV { - return false - } - } - - // Meta must be equal - if len(s.Meta) != len(other.Meta) { - return false - } - if s.Meta != nil && other.Meta != nil { - // We only do the deep check if both are non-nil. If one is nil - // we treat it as equal since their lengths are both zero (check - // above). - // - // Since this can contain numeric values that may change types during - // serialization, let's compare the serialized values. - sMeta, err := json.Marshal(s.Meta) - if err != nil { - // marshaling primitives shouldn't ever error out - panic(err) - } - otherMeta, err := json.Marshal(other.Meta) - if err != nil { - panic(err) - } - - if !bytes.Equal(sMeta, otherMeta) { - return false - } - } - - if s.Tainted != other.Tainted { - return false - } - - return true -} - -// MergeDiff takes a ResourceDiff and merges the attributes into -// this resource state in order to generate a new state. This new -// state can be used to provide updated attribute lookups for -// variable interpolation. -// -// If the diff attribute requires computing the value, and hence -// won't be available until apply, the value is replaced with the -// computeID. -func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { - result := s.DeepCopy() - if result == nil { - result = new(InstanceState) - } - result.init() - - if s != nil { - s.Lock() - defer s.Unlock() - for k, v := range s.Attributes { - result.Attributes[k] = v - } - } - if d != nil { - for k, diff := range d.CopyAttributes() { - if diff.NewRemoved { - delete(result.Attributes, k) - continue - } - if diff.NewComputed { - result.Attributes[k] = hcl2shim.UnknownVariableValue - continue - } - - result.Attributes[k] = diff.New - } - } - - return result -} - -func (s *InstanceState) String() string { - notCreated := "" - - if s == nil { - return notCreated - } - - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - - if s.ID == "" { - return notCreated - } - - buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) - - attributes := s.Attributes - attrKeys := make([]string, 0, len(attributes)) - for ak := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) - } - - buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) - - return buf.String() -} - -// EphemeralState is used for transient state that is only kept in-memory -type EphemeralState struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` - - // Type is used to specify the resource type for this instance. This is only - // required for import operations (as documented). If the documentation - // doesn't state that you need to set this, then don't worry about - // setting it. - Type string `json:"-"` -} - -func (e *EphemeralState) init() { - if e.ConnInfo == nil { - e.ConnInfo = make(map[string]string) - } -} - -func (e *EphemeralState) DeepCopy() *EphemeralState { - copy, err := copystructure.Config{Lock: true}.Copy(e) - if err != nil { - panic(err) - } - - return copy.(*EphemeralState) -} - -type jsonStateVersionIdentifier struct { - Version int `json:"version"` -} - -// Check if this is a V0 format - the magic bytes at the start of the file -// should be "tfstate" if so. We no longer support upgrading this type of -// state but return an error message explaining to a user how they can -// upgrade via the 0.6.x series. -func testForV0State(buf *bufio.Reader) error { - start, err := buf.Peek(len("tfstate")) - if err != nil { - return fmt.Errorf("Failed to check for magic bytes: %v", err) - } - if string(start) == "tfstate" { - return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + - "format which was used prior to Terraform 0.3. Please upgrade\n" + - "this state file using Terraform 0.6.16 prior to using it with\n" + - "Terraform 0.7.") - } - - return nil -} - -// ErrNoState is returned by ReadState when the io.Reader contains no data -var ErrNoState = errors.New("no state") - -// ReadState reads a state structure out of a reader in the format that -// was written by WriteState. -func ReadState(src io.Reader) (*State, error) { - // check for a nil file specifically, since that produces a platform - // specific error if we try to use it in a bufio.Reader. - if f, ok := src.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - buf := bufio.NewReader(src) - - if _, err := buf.Peek(1); err != nil { - if err == io.EOF { - return nil, ErrNoState - } - return nil, err - } - - if err := testForV0State(buf); err != nil { - return nil, err - } - - // If we are JSON we buffer the whole thing in memory so we can read it twice. - // This is suboptimal, but will work for now. - jsonBytes, err := ioutil.ReadAll(buf) - if err != nil { - return nil, fmt.Errorf("Reading state file failed: %v", err) - } - - versionIdentifier := &jsonStateVersionIdentifier{} - if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { - return nil, fmt.Errorf("Decoding state file version failed: %v", err) - } - - var result *State - switch versionIdentifier.Version { - case 0: - return nil, fmt.Errorf("State version 0 is not supported as JSON.") - case 1: - v1State, err := ReadStateV1(jsonBytes) - if err != nil { - return nil, err - } - - v2State, err := upgradeStateV1ToV2(v1State) - if err != nil { - return nil, err - } - - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - // increment the Serial whenever we upgrade state - v3State.Serial++ - result = v3State - case 2: - v2State, err := ReadStateV2(jsonBytes) - if err != nil { - return nil, err - } - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - v3State.Serial++ - result = v3State - case 3: - v3State, err := ReadStateV3(jsonBytes) - if err != nil { - return nil, err - } - - result = v3State - default: - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), versionIdentifier.Version) - } - - // If we reached this place we must have a result set - if result == nil { - panic("resulting state in load not set, assertion failed") - } - - // Prune the state when read it. Its possible to write unpruned states or - // for a user to make a state unpruned (nil-ing a module state for example). - result.prune() - - // Validate the state file is valid - if err := result.Validate(); err != nil { - return nil, err - } - - return result, nil -} - -func ReadStateV1(jsonBytes []byte) (*stateV1, error) { - v1State := &stateV1{} - if err := json.Unmarshal(jsonBytes, v1State); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - if v1State.Version != 1 { - return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ - "read %d, expected 1", v1State.Version) - } - - return v1State, nil -} - -func ReadStateV2(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - return state, nil -} - -func ReadStateV3(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - // Now we write the state back out to detect any changes in normaliztion. - // If our state is now written out differently, bump the serial number to - // prevent conflicts. - var buf bytes.Buffer - err := WriteState(state, &buf) - if err != nil { - return nil, err - } - - if !bytes.Equal(jsonBytes, buf.Bytes()) { - log.Println("[INFO] state modified during read or write. incrementing serial number") - state.Serial++ - } - - return state, nil -} - -// WriteState writes a state somewhere in a binary format. -func WriteState(d *State, dst io.Writer) error { - // writing a nil state is a noop. - if d == nil { - return nil - } - - // make sure we have no uninitialized fields - d.init() - - // Make sure it is sorted - d.sort() - - // Ensure the version is set - d.Version = StateVersion - - // If the TFVersion is set, verify it. We used to just set the version - // here, but this isn't safe since it changes the MD5 sum on some remote - // state storage backends such as Atlas. We now leave it be if needed. - if d.TFVersion != "" { - if _, err := version.NewVersion(d.TFVersion); err != nil { - return fmt.Errorf( - "Error writing state, invalid version: %s\n\n"+ - "The Terraform version when writing the state must be a semantic\n"+ - "version.", - d.TFVersion) - } - } - - // Encode the data in a human-friendly way - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return fmt.Errorf("Failed to encode state: %s", err) - } - - // We append a newline to the data because MarshalIndent doesn't - data = append(data, '\n') - - // Write the data out to the dst - if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { - return fmt.Errorf("Failed to write state: %v", err) - } - - return nil -} - -// resourceNameSort implements the sort.Interface to sort name parts lexically for -// strings and numerically for integer indexes. -type resourceNameSort []string - -func (r resourceNameSort) Len() int { return len(r) } -func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - -func (r resourceNameSort) Less(i, j int) bool { - iParts := strings.Split(r[i], ".") - jParts := strings.Split(r[j], ".") - - end := len(iParts) - if len(jParts) < end { - end = len(jParts) - } - - for idx := 0; idx < end; idx++ { - if iParts[idx] == jParts[idx] { - continue - } - - // sort on the first non-matching part - iInt, iIntErr := strconv.Atoi(iParts[idx]) - jInt, jIntErr := strconv.Atoi(jParts[idx]) - - switch { - case iIntErr == nil && jIntErr == nil: - // sort numerically if both parts are integers - return iInt < jInt - case iIntErr == nil: - // numbers sort before strings - return true - case jIntErr == nil: - return false - default: - return iParts[idx] < jParts[idx] - } - } - - return r[i] < r[j] -} - -// moduleStateSort implements sort.Interface to sort module states -type moduleStateSort []*ModuleState - -func (s moduleStateSort) Len() int { - return len(s) -} - -func (s moduleStateSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If either is nil, then the nil one is "less" than - if a == nil || b == nil { - return a == nil - } - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} - -func (s moduleStateSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -const stateValidateErrMultiModule = ` -Multiple modules with the same path: %s - -This means that there are multiple entries in the "modules" field -in your state file that point to the same module. This will cause Terraform -to behave in unexpected and error prone ways and is invalid. Please back up -and modify your state file manually to resolve this. -` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go deleted file mode 100644 index aa13cce803..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go +++ /dev/null @@ -1,189 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*State, error) { - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*ModuleState, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &State{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - newState.sort() - newState.init() - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &RemoteState{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root and catch - // duplicate path errors later (as part of Validate). - path = rootModulePath - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*OutputState) - for key, output := range old.Outputs { - outputs[key] = &OutputState{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*ResourceState) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &ModuleState{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*InstanceState, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &ResourceState{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - ephemeral, err := old.Ephemeral.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &InstanceState{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Ephemeral: *ephemeral, - Meta: newMeta, - }, nil -} - -func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { - connInfo, err := copystructure.Copy(old.ConnInfo) - if err != nil { - return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) - } - return &EphemeralState{ - ConnInfo: connInfo.(map[string]string), - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go deleted file mode 100644 index e52d35fcd1..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" -) - -// The upgrade process from V2 to V3 state does not affect the structure, -// so we do not need to redeclare all of the structs involved - we just -// take a deep copy of the old structure and assert the version number is -// as we expect. -func upgradeStateV2ToV3(old *State) (*State, error) { - new := old.DeepCopy() - - // Ensure the copied version is v2 before attempting to upgrade - if new.Version != 2 { - return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + - "a state which is not version 2.") - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - if resource.Deposed != nil { - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *InstanceState) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go deleted file mode 100644 index 68cffb41b5..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -// stateV1 keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -// -// stateV1 is _only used for the purposes of backwards compatibility -// and is no longer used in Terraform. -// -// For the upgrade process, see state_upgrade_v1_to_v2.go -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral ephemeralStateV1 `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go deleted file mode 100644 index 3f0418d927..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "os" - "testing" -) - -// TestStateFile writes the given state to the path. -func TestStateFile(t *testing.T, path string, state *State) { - f, err := os.Create(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - if err := WriteState(state, f); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go deleted file mode 100644 index f9559f41b6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphTransformer is the interface that transformers implement. This -// interface is only for transforms that need entire graph visibility. -type GraphTransformer interface { - Transform(*Graph) error -} - -// GraphVertexTransformer is an interface that transforms a single -// Vertex within with graph. This is a specialization of GraphTransformer -// that makes it easy to do vertex replacement. -// -// The GraphTransformer that runs through the GraphVertexTransformers is -// VertexTransformer. -type GraphVertexTransformer interface { - Transform(dag.Vertex) (dag.Vertex, error) -} - -// GraphTransformIf is a helper function that conditionally returns a -// GraphTransformer given. This is useful for calling inline a sequence -// of transforms without having to split it up into multiple append() calls. -func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { - if f() { - return then - } - - return nil -} - -type graphTransformerMulti struct { - Transforms []GraphTransformer -} - -func (t *graphTransformerMulti) Transform(g *Graph) error { - var lastStepStr string - for _, t := range t.Transforms { - log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) - if err := t.Transform(g); err != nil { - return err - } - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) - } - } - - return nil -} - -// GraphTransformMulti combines multiple graph transformers into a single -// GraphTransformer that runs all the individual graph transformers. -func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { - return &graphTransformerMulti{Transforms: ts} -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go deleted file mode 100644 index cbac13387b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// GraphNodeAttachProvider is an interface that must be implemented by nodes -// that want provider configurations attached. -type GraphNodeAttachProvider interface { - // Must be implemented to determine the path for the configuration - GraphNodeSubPath - - // ProviderName with no module prefix. Example: "aws". - ProviderAddr() addrs.AbsProviderConfig - - // Sets the configuration - AttachProvider(*configs.Provider) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go deleted file mode 100644 index 23578c7846..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes -// that want resource configurations attached. -type GraphNodeAttachResourceConfig interface { - GraphNodeResource - - // Sets the configuration - AttachResourceConfig(*configs.Resource) -} - -// AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement -// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachResourceConfigTransformer struct { - Config *configs.Config // Config is the root node in the config tree -} - -func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - - // Go through and find GraphNodeAttachResource - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachResource implementations - arn, ok := v.(GraphNodeAttachResourceConfig) - if !ok { - continue - } - - // Determine what we're looking for - addr := arn.ResourceAddr() - - // Get the configuration. - config := t.Config.DescendentForInstance(addr.Module) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) - continue - } - - for _, r := range config.Module.ManagedResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - } - for _, r := range config.Module.DataResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go deleted file mode 100644 index fee220b52b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go +++ /dev/null @@ -1,99 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeAttachResourceSchema is an interface implemented by node types -// that need a resource schema attached. -type GraphNodeAttachResourceSchema interface { - GraphNodeResource - GraphNodeProviderConsumer - - AttachResourceSchema(schema *configschema.Block, version uint64) -} - -// GraphNodeAttachProviderConfigSchema is an interface implemented by node types -// that need a provider configuration schema attached. -type GraphNodeAttachProviderConfigSchema interface { - GraphNodeProvider - - AttachProviderConfigSchema(*configschema.Block) -} - -// GraphNodeAttachProvisionerSchema is an interface implemented by node types -// that need one or more provisioner schemas attached. -type GraphNodeAttachProvisionerSchema interface { - ProvisionedBy() []string - - // SetProvisionerSchema is called during transform for each provisioner - // type returned from ProvisionedBy, providing the configuration schema - // for each provisioner in turn. The implementer should save these for - // later use in evaluating provisioner configuration blocks. - AttachProvisionerSchema(name string, schema *configschema.Block) -} - -// AttachSchemaTransformer finds nodes that implement -// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or -// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each -// and then passes them to a method implemented by the node. -type AttachSchemaTransformer struct { - Schemas *Schemas -} - -func (t *AttachSchemaTransformer) Transform(g *Graph) error { - if t.Schemas == nil { - // Should never happen with a reasonable caller, but we'll return a - // proper error here anyway so that we'll fail gracefully. - return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") - } - - for _, v := range g.Vertices() { - - if tv, ok := v.(GraphNodeAttachResourceSchema); ok { - addr := tv.ResourceAddr() - mode := addr.Resource.Mode - typeName := addr.Resource.Type - providerAddr, _ := tv.ProvidedBy() - providerType := providerAddr.ProviderConfig.Type - - schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) - tv.AttachResourceSchema(schema, version) - } - - if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { - providerAddr := tv.ProviderAddr() - schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) - tv.AttachProviderConfigSchema(schema) - } - - if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { - names := tv.ProvisionedBy() - for _, name := range names { - schema := t.Schemas.ProvisionerConfig(name) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) - tv.AttachProvisionerSchema(name, schema) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go deleted file mode 100644 index f874948798..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// GraphNodeAttachResourceState is an interface that can be implemented -// to request that a ResourceState is attached to the node. -// -// Due to a historical naming inconsistency, the type ResourceState actually -// represents the state for a particular _instance_, while InstanceState -// represents the values for that instance during a particular phase -// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState -// is supported only for nodes that represent resource instances, even though -// the name might suggest it is for containing resources. -type GraphNodeAttachResourceState interface { - GraphNodeResourceInstance - - // Sets the state - AttachResourceState(*states.Resource) -} - -// AttachStateTransformer goes through the graph and attaches -// state to nodes that implement the interfaces above. -type AttachStateTransformer struct { - State *states.State // State is the root state -} - -func (t *AttachStateTransformer) Transform(g *Graph) error { - // If no state, then nothing to do - if t.State == nil { - log.Printf("[DEBUG] Not attaching any node states: overall state is nil") - return nil - } - - for _, v := range g.Vertices() { - // Nodes implement this interface to request state attachment. - an, ok := v.(GraphNodeAttachResourceState) - if !ok { - continue - } - addr := an.ResourceInstanceAddr() - - rs := t.State.Resource(addr.ContainingResource()) - if rs == nil { - log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - is := rs.Instance(addr.Resource.Key) - if is == nil { - // We don't actually need this here, since we'll attach the whole - // resource state, but we still check because it'd be weird - // for the specific instance we're attaching to not to exist. - log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - // make sure to attach a copy of the state, so instances can modify the - // same ResourceState. - an.AttachResourceState(rs.DeepCopy()) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go deleted file mode 100644 index 8920761ea2..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go +++ /dev/null @@ -1,133 +0,0 @@ -package terraform - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// ConfigTransformer is a GraphTransformer that adds all the resources -// from the configuration to the graph. -// -// The module used to configure this transformer must be the root module. -// -// Only resources are added to the graph. Variables, outputs, and -// providers must be added via other transforms. -// -// Unlike ConfigTransformerOld, this transformer creates a graph with -// all resources including module resources, rather than creating module -// nodes that are then "flattened". -type ConfigTransformer struct { - Concrete ConcreteResourceNodeFunc - - // Module is the module to add resources from. - Config *configs.Config - - // Unique will only add resources that aren't already present in the graph. - Unique bool - - // Mode will only add resources that match the given mode - ModeFilter bool - Mode addrs.ResourceMode - - l sync.Mutex - uniqueMap map[string]struct{} -} - -func (t *ConfigTransformer) Transform(g *Graph) error { - // Lock since we use some internal state - t.l.Lock() - defer t.l.Unlock() - - // If no configuration is available, we don't do anything - if t.Config == nil { - return nil - } - - // Reset the uniqueness map. If we're tracking uniques, then populate - // it with addresses. - t.uniqueMap = make(map[string]struct{}) - defer func() { t.uniqueMap = nil }() - if t.Unique { - for _, v := range g.Vertices() { - if rn, ok := v.(GraphNodeResource); ok { - t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} - } - } - } - - // Start the transformation process - return t.transform(g, t.Config) -} - -func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { - // If no config, do nothing - if config == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, config); err != nil { - return err - } - - // Transform all the children. - for _, c := range config.Children { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { - path := config.Path - module := config.Module - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) - - // For now we assume that each module call produces only one module - // instance with no key, since we don't yet support "count" and "for_each" - // on modules. - // FIXME: As part of supporting "count" and "for_each" on modules, rework - // this so that we'll "expand" the module call first and then create graph - // nodes for each module instance separately. - instPath := path.UnkeyedInstanceShim() - - allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) - for _, r := range module.ManagedResources { - allResources = append(allResources, r) - } - for _, r := range module.DataResources { - allResources = append(allResources, r) - } - - for _, r := range allResources { - relAddr := r.Addr() - - if t.ModeFilter && relAddr.Mode != t.Mode { - // Skip non-matching modes - continue - } - - addr := relAddr.Absolute(instPath) - if _, ok := t.uniqueMap[addr.String()]; ok { - // We've already seen a resource with this address. This should - // never happen, because we enforce uniqueness in the config loader. - continue - } - - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go deleted file mode 100644 index 892f75ec17..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// CountBoundaryTransformer adds a node that depends on everything else -// so that it runs last in order to clean up the state for nodes that -// are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct { - Config *configs.Config -} - -func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{ - Config: t.Config, - } - g.Add(node) - - // Depends on everything - for _, v := range g.Vertices() { - // Don't connect to ourselves - if v == node { - continue - } - - // Connect! - g.Connect(dag.BasicEdge(node, v)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go deleted file mode 100644 index 98e088eee7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go +++ /dev/null @@ -1,297 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers, or might plan a create-before-destroy -// action. -type GraphNodeDestroyerCBD interface { - // CreateBeforeDestroy returns true if this node represents a node - // that is doing a CBD. - CreateBeforeDestroy() bool - - // ModifyCreateBeforeDestroy is called when the CBD state of a node - // is changed dynamically. This can return an error if this isn't - // allowed. - ModifyCreateBeforeDestroy(bool) error -} - -// GraphNodeAttachDestroyer is implemented by applyable nodes that have a -// companion destroy node. This allows the creation node to look up the status -// of the destroy node and determine if it needs to depose the existing state, -// or replace it. -// If a node is not marked as create-before-destroy in the configuration, but a -// dependency forces that status, only the destroy node will be aware of that -// status. -type GraphNodeAttachDestroyer interface { - // AttachDestroyNode takes a destroy node and saves a reference to that - // node in the receiver, so it can later check the status of - // CreateBeforeDestroy(). - AttachDestroyNode(n GraphNodeDestroyerCBD) -} - -// ForcedCBDTransformer detects when a particular CBD-able graph node has -// dependencies with another that has create_before_destroy set that require -// it to be forced on, and forces it on. -// -// This must be used in the plan graph builder to ensure that -// create_before_destroy settings are properly propagated before constructing -// the planned changes. This requires that the plannable resource nodes -// implement GraphNodeDestroyerCBD. -type ForcedCBDTransformer struct { -} - -func (t *ForcedCBDTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - // If there are no CBD decendent (dependent nodes), then we - // do nothing here. - if !t.hasCBDDescendent(g, v) { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) - continue - } - - // If this isn't naturally a CBD node, this means that an descendent is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } - } else { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) - } - } - return nil -} - -// hasCBDDescendent returns true if any descendent (node that depends on this) -// has CBD set. -func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { - s, _ := g.Descendents(v) - if s == nil { - return true - } - - for _, ov := range s.List() { - dn, ok := ov.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some descendent is CreateBeforeDestroy, so we need to follow suit - log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) - return true - } - } - - return false -} - -// CBDEdgeTransformer modifies the edges of CBD nodes that went through -// the DestroyEdgeTransformer to have the right dependencies. There are -// two real tasks here: -// -// 1. With CBD, the destroy edge is inverted: the destroy depends on -// the creation. -// -// 2. A_d must depend on resources that depend on A. This is to enable -// the destroy to only happen once nodes that depend on A successfully -// update to A. Example: adding a web server updates the load balancer -// before deleting the old web server. -// -// This transformer requires that a previous transformer has already forced -// create_before_destroy on for nodes that are depended on by explicit CBD -// nodes. This is the logic in ForcedCBDTransformer, though in practice we -// will get here by recording the CBD-ness of each change in the plan during -// the plan walk and then forcing the nodes into the appropriate setting during -// DiffTransformer when building the apply graph. -type CBDEdgeTransformer struct { - // Module and State are only needed to look up dependencies in - // any way possible. Either can be nil if not availabile. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners so we can - // properly resolve implicit dependencies. - Schemas *Schemas - - // If the operation is a simple destroy, no transformation is done. - Destroy bool -} - -func (t *CBDEdgeTransformer) Transform(g *Graph) error { - if t.Destroy { - return nil - } - - // Go through and reverse any destroy edges - destroyMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - dern, ok := v.(GraphNodeDestroyer) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - continue - } - - // Find the destroy edge. There should only be one. - for _, e := range g.EdgesTo(v) { - // Not a destroy edge, ignore it - de, ok := e.(*DestroyEdge) - if !ok { - continue - } - - log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", - dag.VertexName(de.Source()), dag.VertexName(de.Target())) - - // Found it! Invert. - g.RemoveEdge(de) - applyNode := de.Source() - destroyNode := de.Target() - g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) - break - } - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCountBoth) so it'll be caught. - addr := dern.DestroyAddr() - key := addr.ContainingResource().String() - - // Add this to the list of nodes that we need to fix up - // the edges for (step 2 above in the docs). - destroyMap[key] = append(destroyMap[key], v) - } - - // If we have no CBD nodes, then our work here is done - if len(destroyMap) == 0 { - return nil - } - - // We have CBD nodes. We now have to move on to the much more difficult - // task of connecting dependencies of the creation side of the destroy - // to the destruction node. The easiest way to explain this is an example: - // - // Given a pre-destroy dependence of: A => B - // And A has CBD set. - // - // The resulting graph should be: A => B => A_d - // - // They key here is that B happens before A is destroyed. This is to - // facilitate the primary purpose for CBD: making sure that downstreams - // are properly updated to avoid downtime before the resource is destroyed. - depMap, err := t.depMap(g, destroyMap) - if err != nil { - return err - } - - // We now have the mapping of resource addresses to the destroy - // nodes they need to depend on. We now go through our own vertices to - // find any matching these addresses and make the connection. - for _, v := range g.Vertices() { - // We're looking for creators - rn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - // Get the address - addr := rn.CreateAddr() - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCount) so it'll be caught. - key := addr.ContainingResource().String() - - // If there is nothing this resource should depend on, ignore it - dns, ok := depMap[key] - if !ok { - continue - } - - // We have nodes! Make the connection - for _, dn := range dns { - log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", - dag.VertexName(dn), dag.VertexName(v)) - g.Connect(dag.BasicEdge(dn, v)) - } - } - - return nil -} - -func (t *CBDEdgeTransformer) depMap(g *Graph, destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { - // Build the list of destroy nodes that each resource address should depend - // on. For example, when we find B, we map the address of B to A_d in the - // "depMap" variable below. - depMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Get the address - addr := rn.ResourceAddr() - key := addr.String() - - // Get the destroy nodes that are destroying this resource. - // If there aren't any, then we don't need to worry about - // any connections. - dns, ok := destroyMap[key] - if !ok { - continue - } - - // Get the nodes that depend on this on. In the example above: - // finding B in A => B. Since dependencies can span modules, walk all - // descendents of the resource. - des, _ := g.Descendents(v) - for _, v := range des.List() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Keep track of the destroy nodes that this address - // needs to depend on. - key := rn.ResourceAddr().String() - depMap[key] = append(depMap[key], dns...) - } - } - - return depMap, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go deleted file mode 100644 index 1d211570fc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go +++ /dev/null @@ -1,323 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeDestroyer must be implemented by nodes that destroy resources. -type GraphNodeDestroyer interface { - dag.Vertex - - // DestroyAddr is the address of the resource that is being - // destroyed by this node. If this returns nil, then this node - // is not destroying anything. - DestroyAddr() *addrs.AbsResourceInstance -} - -// GraphNodeCreator must be implemented by nodes that create OR update resources. -type GraphNodeCreator interface { - // CreateAddr is the address of the resource being created or updated - CreateAddr() *addrs.AbsResourceInstance -} - -// DestroyEdgeTransformer is a GraphTransformer that creates the proper -// references for destroy resources. Destroy resources are more complex -// in that they must be depend on the destruction of resources that -// in turn depend on the CREATION of the node being destroy. -// -// That is complicated. Visually: -// -// B_d -> A_d -> A -> B -// -// Notice that A destroy depends on B destroy, while B create depends on -// A create. They're inverted. This must be done for example because often -// dependent resources will block parent resources from deleting. Concrete -// example: VPC with subnets, the VPC can't be deleted while there are -// still subnets. -type DestroyEdgeTransformer struct { - // These are needed to properly build the graph of dependencies - // to determine what a destroy node depends on. Any of these can be nil. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners in order - // to properly resolve implicit dependencies. - Schemas *Schemas -} - -func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. Usually there will be at most one destroyer - // per node, but we allow multiple if present for completeness. - destroyers := make(map[string][]GraphNodeDestroyer) - destroyerAddrs := make(map[string]addrs.AbsResourceInstance) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyer) - if !ok { - continue - } - - addrP := dn.DestroyAddr() - if addrP == nil { - continue - } - addr := *addrP - - key := addr.String() - log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key) - destroyers[key] = append(destroyers[key], dn) - destroyerAddrs[key] = addr - } - - // If we aren't destroying anything, there will be no edges to make - // so just exit early and avoid future work. - if len(destroyers) == 0 { - return nil - } - - // Go through and connect creators to destroyers. Going along with - // our example, this makes: A_d => A - for _, v := range g.Vertices() { - cn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - addr := cn.CreateAddr() - if addr == nil { - continue - } - - key := addr.String() - ds := destroyers[key] - if len(ds) == 0 { - continue - } - - for _, d := range ds { - // For illustrating our example - a_d := d.(dag.Vertex) - a := v - - log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", - dag.VertexName(a), dag.VertexName(a_d)) - - g.Connect(&DestroyEdge{S: a, T: a_d}) - - // Attach the destroy node to the creator - // There really shouldn't be more than one destroyer, but even if - // there are, any of them will represent the correct - // CreateBeforeDestroy status. - if n, ok := cn.(GraphNodeAttachDestroyer); ok { - if d, ok := d.(GraphNodeDestroyerCBD); ok { - n.AttachDestroyNode(d) - } - } - } - } - - // This is strange but is the easiest way to get the dependencies - // of a node that is being destroyed. We use another graph to make sure - // the resource is in the graph and ask for references. We have to do this - // because the node that is being destroyed may NOT be in the graph. - // - // Example: resource A is force new, then destroy A AND create A are - // in the graph. BUT if resource A is just pure destroy, then only - // destroy A is in the graph, and create A is not. - providerFn := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{NodeAbstractProvider: a} - } - steps := []GraphTransformer{ - // Add the local values - &LocalTransformer{Config: t.Config}, - - // Add outputs and metadata - &OutputTransformer{Config: t.Config}, - &AttachResourceConfigTransformer{Config: t.Config}, - &AttachStateTransformer{State: t.State}, - - // Add all the variables. We can depend on resources through - // variables due to module parameters, and we need to properly - // determine that. - &RootVariableTransformer{Config: t.Config}, - &ModuleVariableTransformer{Config: t.Config}, - - TransformProviders(nil, providerFn, t.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: t.Schemas}, - - &ReferenceTransformer{}, - } - - // Go through all the nodes being destroyed and create a graph. - // The resulting graph is only of things being CREATED. For example, - // following our example, the resulting graph would be: - // - // A, B (with no edges) - // - var tempG Graph - var tempDestroyed []dag.Vertex - for d := range destroyers { - // d is the string key for the resource being destroyed. We actually - // want the address value, which we stashed earlier. - addr := destroyerAddrs[d] - - // This part is a little bit weird but is the best way to - // find the dependencies we need to: build a graph and use the - // attach config and state transformers then ask for references. - abstract := NewNodeAbstractResourceInstance(addr) - tempG.Add(abstract) - tempDestroyed = append(tempDestroyed, abstract) - - // We also add the destroy version here since the destroy can - // depend on things that the creation doesn't (destroy provisioners). - destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} - tempG.Add(destroy) - tempDestroyed = append(tempDestroyed, destroy) - } - - // Run the graph transforms so we have the information we need to - // build references. - log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) - for _, s := range steps { - log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) - if err := s.Transform(&tempG); err != nil { - log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) - return err - } - } - log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) - - // Go through all the nodes in the graph and determine what they - // depend on. - for _, v := range tempDestroyed { - // Find all ancestors of this to determine the edges we'll depend on - vs, err := tempG.Ancestors(v) - if err != nil { - return err - } - - refs := make([]dag.Vertex, 0, vs.Len()) - for _, raw := range vs.List() { - refs = append(refs, raw.(dag.Vertex)) - } - - refNames := make([]string, len(refs)) - for i, ref := range refs { - refNames[i] = dag.VertexName(ref) - } - log.Printf( - "[TRACE] DestroyEdgeTransformer: creation node %q references %s", - dag.VertexName(v), refNames) - - // If we have no references, then we won't need to do anything - if len(refs) == 0 { - continue - } - - // Get the destroy node for this. In the example of our struct, - // we are currently at B and we're looking for B_d. - rn, ok := v.(GraphNodeResourceInstance) - if !ok { - log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) - continue - } - - addr := rn.ResourceInstanceAddr() - dns := destroyers[addr.String()] - - // We have dependencies, check if any are being destroyed - // to build the list of things that we must depend on! - // - // In the example of the struct, if we have: - // - // B_d => A_d => A => B - // - // Then at this point in the algorithm we started with B_d, - // we built B (to get dependencies), and we found A. We're now looking - // to see if A_d exists. - var depDestroyers []dag.Vertex - for _, v := range refs { - rn, ok := v.(GraphNodeResourceInstance) - if !ok { - continue - } - - addr := rn.ResourceInstanceAddr() - key := addr.String() - if ds, ok := destroyers[key]; ok { - for _, d := range ds { - depDestroyers = append(depDestroyers, d.(dag.Vertex)) - log.Printf( - "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", - key, dag.VertexName(d)) - } - } - } - - // Go through and make the connections. Use the variable - // names "a_d" and "b_d" to reference our example. - for _, a_d := range dns { - for _, b_d := range depDestroyers { - if b_d != a_d { - log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) - g.Connect(dag.BasicEdge(b_d, a_d)) - } - } - } - } - - return t.pruneResources(g) -} - -// If there are only destroy instances for a particular resource, there's no -// reason for the resource node to prepare the state. Remove Resource nodes so -// that they don't fail by trying to evaluate a resource that is only being -// destroyed along with its dependencies. -func (t *DestroyEdgeTransformer) pruneResources(g *Graph) error { - for _, v := range g.Vertices() { - n, ok := v.(*NodeApplyableResource) - if !ok { - continue - } - - // if there are only destroy dependencies, we don't need this node - des, err := g.Descendents(n) - if err != nil { - return err - } - - descendents := des.List() - nonDestroyInstanceFound := false - for _, v := range descendents { - if _, ok := v.(*NodeApplyableResourceInstance); ok { - nonDestroyInstanceFound = true - break - } - } - - if nonDestroyInstanceFound { - continue - } - - // connect all the through-edges, then delete the node - for _, d := range g.DownEdges(n).List() { - for _, u := range g.UpEdges(n).List() { - g.Connect(dag.BasicEdge(u, d)) - } - } - log.Printf("DestroyEdgeTransformer: pruning unused resource node %s", dag.VertexName(n)) - g.Remove(n) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go deleted file mode 100644 index b7a237fce3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go +++ /dev/null @@ -1,184 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/plans" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// DiffTransformer is a GraphTransformer that adds graph nodes representing -// each of the resource changes described in the given Changes object. -type DiffTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - State *states.State - Changes *plans.Changes -} - -func (t *DiffTransformer) Transform(g *Graph) error { - if t.Changes == nil || len(t.Changes.Resources) == 0 { - // Nothing to do! - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer starting") - - var diags tfdiags.Diagnostics - state := t.State - changes := t.Changes - - // DiffTransformer creates resource _instance_ nodes. If there are any - // whole-resource nodes already in the graph, we must ensure that they - // get evaluated before any of the corresponding instances by creating - // dependency edges, so we'll do some prep work here to ensure we'll only - // create connections to nodes that existed before we started here. - resourceNodes := map[string][]GraphNodeResource{} - for _, node := range g.Vertices() { - rn, ok := node.(GraphNodeResource) - if !ok { - continue - } - // We ignore any instances that _also_ implement - // GraphNodeResourceInstance, since in the unlikely event that they - // do exist we'd probably end up creating cycles by connecting them. - if _, ok := node.(GraphNodeResourceInstance); ok { - continue - } - - addr := rn.ResourceAddr().String() - resourceNodes[addr] = append(resourceNodes[addr], rn) - } - - for _, rc := range changes.Resources { - addr := rc.Addr - dk := rc.DeposedKey - - log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) - - // Depending on the action we'll need some different combinations of - // nodes, because destroying uses a special node type separate from - // other actions. - var update, delete, createBeforeDestroy bool - switch rc.Action { - case plans.NoOp: - continue - case plans.Delete: - delete = true - case plans.DeleteThenCreate, plans.CreateThenDelete: - update = true - delete = true - createBeforeDestroy = (rc.Action == plans.CreateThenDelete) - default: - update = true - } - - if dk != states.NotDeposed && update { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change for deposed object", - fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), - )) - continue - } - - // If we're going to do a create_before_destroy Replace operation then - // we need to allocate a DeposedKey to use to retain the - // not-yet-destroyed prior object, so that the delete node can destroy - // _that_ rather than the newly-created node, which will be current - // by the time the delete node is visited. - if update && delete && createBeforeDestroy { - // In this case, variable dk will be the _pre-assigned_ DeposedKey - // that must be used if the update graph node deposes the current - // instance, which will then align with the same key we pass - // into the destroy node to ensure we destroy exactly the deposed - // object we expect. - if state != nil { - ris := state.ResourceInstance(addr) - if ris == nil { - // Should never happen, since we don't plan to replace an - // instance that doesn't exist yet. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change", - fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), - )) - continue - } - - // Allocating a deposed key separately from using it can be racy - // in general, but we assume here that nothing except the apply - // node we instantiate below will actually make new deposed objects - // in practice, and so the set of already-used keys will not change - // between now and then. - dk = ris.FindUnusedDeposedKey() - } else { - // If we have no state at all yet then we can use _any_ - // DeposedKey. - dk = states.NewDeposedKey() - } - } - - if update { - // All actions except destroying the node type chosen by t.Concrete - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - if createBeforeDestroy { - // We'll attach our pre-allocated DeposedKey to the node if - // it supports that. NodeApplyableResourceInstance is the - // specific concrete node type we are looking for here really, - // since that's the only node type that might depose objects. - if dn, ok := node.(GraphNodeDeposer); ok { - dn.SetPreallocatedDeposedKey(dk) - } - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) - } else { - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) - } - - g.Add(node) - rsrcAddr := addr.ContainingResource().String() - for _, rsrcNode := range resourceNodes[rsrcAddr] { - g.Connect(dag.BasicEdge(node, rsrcNode)) - } - } - - if delete { - // Destroying always uses a destroy-specific node type, though - // which one depends on whether we're destroying a current object - // or a deposed object. - var node GraphNodeResourceInstance - abstract := NewNodeAbstractResourceInstance(addr) - if dk == states.NotDeposed { - node = &NodeDestroyResourceInstance{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) - } else { - node = &NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } - if dk == states.NotDeposed { - log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) - } else { - log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) - } - g.Add(node) - } - - } - - log.Printf("[TRACE] DiffTransformer complete") - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go deleted file mode 100644 index 03eac685eb..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeExapndable is an interface that nodes can implement to -// signal that they can be expanded. Expanded nodes turn into -// GraphNodeSubgraph nodes within the graph. -type GraphNodeExpandable interface { - Expand(GraphBuilder) (GraphNodeSubgraph, error) -} - -// GraphNodeDynamicExpandable is an interface that nodes can implement -// to signal that they can be expanded at eval-time (hence dynamic). -// These nodes are given the eval context and are expected to return -// a new subgraph. -type GraphNodeDynamicExpandable interface { - DynamicExpand(EvalContext) (*Graph, error) -} - -// GraphNodeSubgraph is an interface a node can implement if it has -// a larger subgraph that should be walked. -type GraphNodeSubgraph interface { - Subgraph() dag.Grapher -} - -// ExpandTransform is a transformer that does a subgraph expansion -// at graph transform time (vs. at eval time). The benefit of earlier -// subgraph expansion is that errors with the graph build can be detected -// at an earlier stage. -type ExpandTransform struct { - Builder GraphBuilder -} - -func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { - ev, ok := v.(GraphNodeExpandable) - if !ok { - // This isn't an expandable vertex, so just ignore it. - return v, nil - } - - // Expand the subgraph! - log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) - return ev.Expand(t.Builder) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go deleted file mode 100644 index 2ce23ddbec..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportProviderValidateTransformer is a GraphTransformer that goes through -// the providers in the graph and validates that they only depend on variables. -type ImportProviderValidateTransformer struct{} - -func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { - var diags tfdiags.Diagnostics - - for _, v := range g.Vertices() { - // We only care about providers - pv, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // We only care about providers that reference things - rn, ok := pv.(GraphNodeReferencer) - if !ok { - continue - } - - for _, ref := range rn.References() { - if _, ok := ref.Subject.(addrs.InputVariable); !ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider dependency for import", - Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go deleted file mode 100644 index 7dd2c4876d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go +++ /dev/null @@ -1,239 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/providers" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// ImportStateTransformer is a GraphTransformer that adds nodes to the -// graph to represent the imports we want to do for resources. -type ImportStateTransformer struct { - Targets []*ImportTarget -} - -func (t *ImportStateTransformer) Transform(g *Graph) error { - for _, target := range t.Targets { - // The ProviderAddr may not be supplied for non-aliased providers. - // This will be populated if the targets come from the cli, but tests - // may not specify implied provider addresses. - providerAddr := target.ProviderAddr - if providerAddr.ProviderConfig.Type == "" { - providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) - } - - node := &graphNodeImportState{ - Addr: target.Addr, - ID: target.ID, - ProviderAddr: providerAddr, - } - g.Add(node) - } - return nil -} - -type graphNodeImportState struct { - Addr addrs.AbsResourceInstance // Addr is the resource address to import into - ID string // ID is the ID to import as - ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type - ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - - states []providers.ImportedResource -} - -var ( - _ GraphNodeSubPath = (*graphNodeImportState)(nil) - _ GraphNodeEvalable = (*graphNodeImportState)(nil) - _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) - _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) -) - -func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr, false -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { - n.ResolvedProvider = addr -} - -// GraphNodeSubPath -func (n *graphNodeImportState) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportState) EvalTree() EvalNode { - var provider providers.Interface - - // Reset our states - n.states = nil - - // Return our sequence - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - }, - &EvalImportState{ - Addr: n.Addr.Resource, - Provider: &provider, - ID: n.ID, - Output: &n.states, - }, - }, - } -} - -// GraphNodeDynamicExpandable impl. -// -// We use DynamicExpand as a way to generate the subgraph of refreshes -// and state inserts we need to do for our import state. Since they're new -// resources they don't depend on anything else and refreshes are isolated -// so this is nearly a perfect use case for dynamic expand. -func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - g := &Graph{Path: ctx.Path()} - - // nameCounter is used to de-dup names in the state. - nameCounter := make(map[string]int) - - // Compile the list of addresses that we'll be inserting into the state. - // We do this ahead of time so we can verify that we aren't importing - // something that already exists. - addrs := make([]addrs.AbsResourceInstance, len(n.states)) - for i, state := range n.states { - addr := n.Addr - if t := state.TypeName; t != "" { - addr.Resource.Resource.Type = t - } - - // Determine if we need to suffix the name to de-dup - key := addr.String() - count, ok := nameCounter[key] - if ok { - count++ - addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) - } - nameCounter[key] = count - - // Add it to our list - addrs[i] = addr - } - - // Verify that all the addresses are clear - state := ctx.State() - for _, addr := range addrs { - existing := state.ResourceInstance(addr) - if existing != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource already managed by Terraform", - fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), - )) - continue - } - } - if diags.HasErrors() { - // Bail out early, then. - return nil, diags.Err() - } - - // For each of the states, we add a node to handle the refresh/add to state. - // "n.states" is populated by our own EvalTree with the result of - // ImportState. Since DynamicExpand is always called after EvalTree, this - // is safe. - for i, state := range n.states { - g.Add(&graphNodeImportStateSub{ - TargetAddr: addrs[i], - State: state, - ResolvedProvider: n.ResolvedProvider, - }) - } - - // Root transform for a single root - t := &RootTransformer{} - if err := t.Transform(g); err != nil { - return nil, err - } - - // Done! - return g, diags.Err() -} - -// graphNodeImportStateSub is the sub-node of graphNodeImportState -// and is part of the subgraph. This node is responsible for refreshing -// and adding a resource to the state once it is imported. -type graphNodeImportStateSub struct { - TargetAddr addrs.AbsResourceInstance - State providers.ImportedResource - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) - _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) -) - -func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result", n.TargetAddr) -} - -func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { - return n.TargetAddr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportStateSub) EvalTree() EvalNode { - // If the Ephemeral type isn't set, then it is an error - if n.State.TypeName == "" { - err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) - return &EvalReturnError{Error: &err} - } - - state := n.State.AsInstanceObject() - - var provider providers.Interface - var providerSchema *ProviderSchema - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalRefresh{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalImportStateVerify{ - Addr: n.TargetAddr.Resource, - State: &state, - }, - &EvalWriteState{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go deleted file mode 100644 index b97dea2abd..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// LocalTransformer is a GraphTransformer that adds all the local values -// from the configuration to the graph. -type LocalTransformer struct { - Config *configs.Config -} - -func (t *LocalTransformer) Transform(g *Graph) error { - return t.transformModule(g, t.Config) -} - -func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { - if c == nil { - // Can't have any locals if there's no config - return nil - } - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - - for _, local := range c.Module.Locals { - addr := path.LocalValue(local.Name) - node := &NodeLocal{ - Addr: addr, - Config: local, - } - g.Add(node) - } - - // Also populate locals for child modules - for _, cc := range c.Children { - if err := t.transformModule(g, cc); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go deleted file mode 100644 index caa4b6a63c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go +++ /dev/null @@ -1,126 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// ModuleVariableTransformer is a GraphTransformer that adds all the variables -// in the configuration to the graph. -// -// Any "variable" block present in any non-root module is included here, even -// if a particular variable is not referenced from anywhere. -// -// The transform will produce errors if a call to a module does not conform -// to the expected set of arguments, but this transformer is not in a good -// position to return errors and so the validate walk should include specific -// steps for validating module blocks, separate from this transform. -type ModuleVariableTransformer struct { - Config *configs.Config -} - -func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Config) -} - -func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { - // We can have no variables if we have no configuration. - if c == nil { - return nil - } - - // Transform all the children first. - for _, cc := range c.Children { - if err := t.transform(g, c, cc); err != nil { - return err - } - } - - // If we're processing anything other than the root module then we'll - // add graph nodes for variables defined inside. (Variables for the root - // module are dealt with in RootVariableTransformer). - // If we have a parent, we can determine if a module variable is being - // used, so we transform this. - if parent != nil { - if err := t.transformSingle(g, parent, c); err != nil { - return err - } - } - - return nil -} - -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - _, call := path.Call() - - // Find the call in the parent module configuration, so we can get the - // expressions given for each input variable at the call site. - callConfig, exists := parent.Module.ModuleCalls[call.Name] - if !exists { - // This should never happen, since it indicates an improperly-constructed - // configuration tree. - panic(fmt.Errorf("no module call block found for %s", path)) - } - - // We need to construct a schema for the expected call arguments based on - // the configured variables in our config, which we can then use to - // decode the content of the call block. - schema := &hcl.BodySchema{} - for _, v := range c.Module.Variables { - schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ - Name: v.Name, - Required: v.Default == cty.NilVal, - }) - } - - content, contentDiags := callConfig.Config.Content(schema) - if contentDiags.HasErrors() { - // Validation code elsewhere should deal with any errors before we - // get in here, but we'll report them out here just in case, to - // avoid crashes. - var diags tfdiags.Diagnostics - diags = diags.Append(contentDiags) - return diags.Err() - } - - for _, v := range c.Module.Variables { - var expr hcl.Expression - if attr := content.Attributes[v.Name]; attr != nil { - expr = attr.Expr - } else { - // No expression provided for this variable, so we'll make a - // synthetic one using the variable's default value. - expr = &hclsyntax.LiteralValueExpr{ - Val: v.Default, - SrcRange: v.DeclRange, // This is not exact, but close enough - } - } - - // For now we treat all module variables as "applyable", even though - // such nodes are valid to use on other walks too. We may specialize - // this in future if we find reasons to employ different behaviors - // in different scenarios. - node := &NodeApplyableModuleVariable{ - Addr: path.InputVariable(v.Name), - Config: v, - Expr: expr, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go deleted file mode 100644 index 4d1323fb09..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go +++ /dev/null @@ -1,175 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" - "github.com/zclconf/go-cty/cty" -) - -// OrphanResourceCountTransformer is a GraphTransformer that adds orphans -// for an expanded count to the graph. The determination of this depends -// on the count argument given. -// -// Orphans are found by comparing the count to what is found in the state. -// This transform assumes that if an element in the state is within the count -// bounds given, that it is not an orphan. -type OrphanResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - Count int // Actual count of the resource, or -1 if count is not set at all - ForEach map[string]cty.Value // The ForEach map on the resource - Addr addrs.AbsResource // Addr of the resource to look for orphans - State *states.State // Full global state -} - -func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { - rs := t.State.Resource(t.Addr) - if rs == nil { - return nil // Resource doesn't exist in state, so nothing to do! - } - - haveKeys := make(map[addrs.InstanceKey]struct{}) - for key := range rs.Instances { - haveKeys[key] = struct{}{} - } - - // if for_each is set, use that transformer - if t.ForEach != nil { - return t.transformForEach(haveKeys, g) - } - if t.Count < 0 { - return t.transformNoCount(haveKeys, g) - } - if t.Count == 0 { - return t.transformZeroCount(haveKeys, g) - } - return t.transformCount(haveKeys, g) -} - -func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // If there is a NoKey node, add this to the graph first, - // so that we can create edges to it in subsequent (StringKey) nodes. - // This is because the last item determines the resource mode for the whole resource, - // (see SetResourceInstanceCurrent for more information) and we need to evaluate - // an orphaned (NoKey) resource before the in-memory state is updated - // to deal with a new for_each resource - _, hasNoKeyNode := haveKeys[addrs.NoKey] - var noKeyNode dag.Vertex - if hasNoKeyNode { - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(addrs.NoKey)) - noKeyNode = abstract - if f := t.Concrete; f != nil { - noKeyNode = f(abstract) - } - g.Add(noKeyNode) - } - - for key := range haveKeys { - // If the key is no-key, we have already added it, so skip - if key == addrs.NoKey { - continue - } - - s, _ := key.(addrs.StringKey) - // If the key is present in our current for_each, carry on - if _, ok := t.ForEach[string(s)]; ok { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) - g.Add(node) - - // Add edge to noKeyNode if it exists - if hasNoKeyNode { - g.Connect(dag.BasicEdge(node, noKeyNode)) - } - } - return nil -} - -func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // Due to the logic in Transform, we only get in here if our count is - // at least one. - - _, have0Key := haveKeys[addrs.IntKey(0)] - - for key := range haveKeys { - if key == addrs.NoKey && !have0Key { - // If we have no 0-key then we will accept a no-key instance - // as an alias for it. - continue - } - - i, isInt := key.(addrs.IntKey) - if isInt && int(i) < t.Count { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} - -func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // This case is easy: we need to orphan any keys we have at all. - - for key := range haveKeys { - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} - -func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // Negative count indicates that count is not set at all, in which - // case we expect to have a single instance with no key set at all. - // However, we'll also accept an instance with key 0 set as an alias - // for it, in case the user has just deleted the "count" argument and - // so wants to keep the first instance in the set. - - _, haveNoKey := haveKeys[addrs.NoKey] - _, have0Key := haveKeys[addrs.IntKey(0)] - keepKey := addrs.NoKey - if have0Key && !haveNoKey { - // If we don't have a no-key instance then we can use the 0-key instance - // instead. - keepKey = addrs.IntKey(0) - } - - for key := range haveKeys { - if key == keepKey { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go deleted file mode 100644 index cab10da124..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go +++ /dev/null @@ -1,60 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// OrphanOutputTransformer finds the outputs that aren't present -// in the given config that are in the state and adds them to the graph -// for deletion. -type OrphanOutputTransformer struct { - Config *configs.Config // Root of config tree - State *states.State // State is the root state -} - -func (t *OrphanOutputTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[DEBUG] No state, no orphan outputs") - return nil - } - - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - return nil -} - -func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the config for this path, which is nil if the entire module has been - // removed. - var outputs map[string]*configs.Output - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - outputs = c.Module.Outputs - } - - // An output is "orphaned" if it's present in the state but not declared - // in the configuration. - for name := range ms.OutputValues { - if _, exists := outputs[name]; exists { - continue - } - - g.Add(&NodeOutputOrphan{ - Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), - }) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go deleted file mode 100644 index f927b10864..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go +++ /dev/null @@ -1,179 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned -// resource instances to the graph. An "orphan" is an instance that is present -// in the state but belongs to a resource that is no longer present in the -// configuration. -// -// This is not the transformer that deals with "count orphans" (instances that -// are no longer covered by a resource's "count" or "for_each" setting); that's -// handled instead by OrphanResourceCountTransformer. -type OrphanResourceInstanceTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *states.State - - // Config is the root node in the configuration tree. We'll look up - // the appropriate note in this tree using the path in each node. - Config *configs.Config -} - -func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceInstanceTransformer used without setting Config") - } - - // Go through the modules and for each module transform in order - // to add the orphan. - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - - return nil -} - -func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the configuration for this module. The configuration might be - // nil if the module was removed from the configuration. This is okay, - // this just means that every resource is an orphan. - var m *configs.Module - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - m = c.Module - } - - // An "orphan" is a resource that is in the state but not the configuration, - // so we'll walk the state resources and try to correlate each of them - // with a configuration block. Each orphan gets a node in the graph whose - // type is decided by t.Concrete. - // - // We don't handle orphans related to changes in the "count" and "for_each" - // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. - for _, rs := range ms.Resources { - if m != nil { - if r := m.ResourceByAddr(rs.Addr); r != nil { - continue - } - } - - for key := range rs.Instances { - addr := rs.Addr.Instance(key).Absolute(moduleAddr) - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) - g.Add(node) - } - } - - return nil -} - -// OrphanResourceTransformer is a GraphTransformer that adds orphaned -// resources to the graph. An "orphan" is a resource that is present in -// the state but no longer present in the config. -// -// This is separate to OrphanResourceInstanceTransformer in that it deals with -// whole resources, rather than individual instances of resources. Orphan -// resource nodes are only used during apply to clean up leftover empty -// resource state skeletons, after all of the instances inside have been -// removed. -// -// This transformer will also create edges in the graph to any pre-existing -// node that creates or destroys the entire orphaned resource or any of its -// instances, to ensure that the "orphan-ness" of a resource is always dealt -// with after all other aspects of it. -type OrphanResourceTransformer struct { - Concrete ConcreteResourceNodeFunc - - // State is the global state. - State *states.State - - // Config is the root node in the configuration tree. - Config *configs.Config -} - -func (t *OrphanResourceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceTransformer used without setting Config") - } - - // We'll first collect up the existing nodes for each resource so we can - // create dependency edges for any new nodes we create. - deps := map[string][]dag.Vertex{} - for _, v := range g.Vertices() { - switch tv := v.(type) { - case GraphNodeResourceInstance: - k := tv.ResourceInstanceAddr().ContainingResource().String() - deps[k] = append(deps[k], v) - case GraphNodeResource: - k := tv.ResourceAddr().String() - deps[k] = append(deps[k], v) - case GraphNodeDestroyer: - k := tv.DestroyAddr().ContainingResource().String() - deps[k] = append(deps[k], v) - } - } - - for _, ms := range t.State.Modules { - moduleAddr := ms.Addr - - mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed - - for _, rs := range ms.Resources { - if mc != nil { - if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { - // It's in the config, so nothing to do for this one. - continue - } - } - - addr := rs.Addr.Absolute(moduleAddr) - abstract := NewNodeAbstractResource(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) - g.Add(node) - for _, dn := range deps[addr.String()] { - log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) - g.Connect(dag.BasicEdge(node, dn)) - } - } - } - - return nil - -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go deleted file mode 100644 index e2979ac5c4..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// OutputTransformer is a GraphTransformer that adds all the outputs -// in the configuration to the graph. -// -// This is done for the apply graph builder even if dependent nodes -// aren't changing since there is no downside: the state will be available -// even if the dependent items aren't changing. -type OutputTransformer struct { - Config *configs.Config -} - -func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Config) -} - -func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { - // If we have no config then there can be no outputs. - if c == nil { - return nil - } - - // Transform all the children. We must do this first because - // we can reference module outputs and they must show up in the - // reference map. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - - for _, o := range c.Module.Outputs { - addr := path.OutputValue(o.Name) - node := &NodeApplyableOutput{ - Addr: addr, - Config: o, - } - g.Add(node) - } - - return nil -} - -// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete -// outputs during destroy. We need to do this to ensure that no stale outputs -// are ever left in the state. -type DestroyOutputTransformer struct { -} - -func (t *DestroyOutputTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - output, ok := v.(*NodeApplyableOutput) - if !ok { - continue - } - - // create the destroy node for this output - node := &NodeDestroyableOutput{ - Addr: output.Addr, - Config: output.Config, - } - - log.Printf("[TRACE] creating %s", node.Name()) - g.Add(node) - - deps, err := g.Descendents(v) - if err != nil { - return err - } - - // the destroy node must depend on the eval node - deps.Add(v) - - for _, d := range deps.List() { - log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) - g.Connect(dag.BasicEdge(node, d)) - } - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go deleted file mode 100644 index 9c8966face..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go +++ /dev/null @@ -1,705 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { - return GraphTransformMulti( - // Add providers from the config - &ProviderConfigTransformer{ - Config: config, - Providers: providers, - Concrete: concrete, - }, - // Add any remaining missing providers - &MissingProviderTransformer{ - Providers: providers, - Concrete: concrete, - }, - // Connect the providers - &ProviderTransformer{ - Config: config, - }, - // Remove unused providers and proxies - &PruneProviderTransformer{}, - // Connect provider to their parent provider nodes - &ParentProviderTransformer{}, - ) -} - -// GraphNodeProvider is an interface that nodes that can be a provider -// must implement. -// -// ProviderAddr returns the address of the provider configuration this -// satisfies, which is relative to the path returned by method Path(). -// -// Name returns the full name of the provider in the config. -type GraphNodeProvider interface { - GraphNodeSubPath - ProviderAddr() addrs.AbsProviderConfig - Name() string -} - -// GraphNodeCloseProvider is an interface that nodes that can be a close -// provider must implement. The CloseProviderName returned is the name of -// the provider they satisfy. -type GraphNodeCloseProvider interface { - GraphNodeSubPath - CloseProviderAddr() addrs.AbsProviderConfig -} - -// GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the address of the provider -// to use, which will be resolved to a configuration either in the same module -// or in an ancestor module, with the resulting absolute address passed to -// SetProvider. -type GraphNodeProviderConsumer interface { - // ProvidedBy returns the address of the provider configuration the node - // refers to. If the returned "exact" value is true, this address will - // be taken exactly. If "exact" is false, a provider configuration from - // an ancestor module may be selected instead. - ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) - // Set the resolved provider address for this resource. - SetProvider(addrs.AbsProviderConfig) -} - -// ProviderTransformer is a GraphTransformer that maps resources to -// providers within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProviderTransformer struct { - Config *configs.Config -} - -func (t *ProviderTransformer) Transform(g *Graph) error { - // We need to find a provider configuration address for each resource - // either directly represented by a node or referenced by a node in - // the graph, and then create graph edges from provider to provider user - // so that the providers will get initialized first. - - var diags tfdiags.Diagnostics - - // To start, we'll collect the _requested_ provider addresses for each - // node, which we'll then resolve (handling provider inheritence, etc) in - // the next step. - // Our "requested" map is from graph vertices to string representations of - // provider config addresses (for deduping) to requests. - type ProviderRequest struct { - Addr addrs.AbsProviderConfig - Exact bool // If true, inheritence from parent modules is not attempted - } - requested := map[dag.Vertex]map[string]ProviderRequest{} - needConfigured := map[string]addrs.AbsProviderConfig{} - for _, v := range g.Vertices() { - - // Does the vertex _directly_ use a provider? - if pv, ok := v.(GraphNodeProviderConsumer); ok { - requested[v] = make(map[string]ProviderRequest) - - p, exact := pv.ProvidedBy() - if exact { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) - } else { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) - } - - requested[v][p.String()] = ProviderRequest{ - Addr: p, - Exact: exact, - } - - // Direct references need the provider configured as well as initialized - needConfigured[p.String()] = p - } - } - - // Now we'll go through all the requested addresses we just collected and - // figure out which _actual_ config address each belongs to, after resolving - // for provider inheritance and passing. - m := providerVertexMap(g) - for v, reqs := range requested { - for key, req := range reqs { - p := req.Addr - target := m[key] - - _, ok := v.(GraphNodeSubPath) - if !ok && target == nil { - // No target and no path to traverse up from - diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) - continue - } - - if target != nil { - log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) - } - - // if we don't have a provider at this level, walk up the path looking for one, - // unless we were told to be exact. - if target == nil && !req.Exact { - for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { - key := pp.String() - target = m[key] - if target != nil { - log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) - break - } - log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) - } - } - - // If this provider doesn't need to be configured then we can just - // stub it out with an init-only provider node, which will just - // start up the provider and fetch its schema. - if _, exists := needConfigured[key]; target == nil && !exists { - stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) - stub := &NodeEvalableProvider{ - &NodeAbstractProvider{ - Addr: stubAddr, - }, - } - m[stubAddr.String()] = stub - log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) - target = stub - g.Add(target) - } - - if target == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider configuration not present", - fmt.Sprintf( - "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", - dag.VertexName(v), p, dag.VertexName(v), - ), - )) - break - } - - // see if this in an inherited provider - if p, ok := target.(*graphNodeProxyProvider); ok { - g.Remove(p) - target = p.Target() - key = target.(GraphNodeProvider).ProviderAddr().String() - } - - log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) - if pv, ok := v.(GraphNodeProviderConsumer); ok { - pv.SetProvider(target.ProviderAddr()) - } - g.Connect(dag.BasicEdge(v, target)) - } - } - - return diags.Err() -} - -// CloseProviderTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provider connections that aren't needed anymore. -// A provider connection is not needed anymore once all depended resources -// in the graph are evaluated. -type CloseProviderTransformer struct{} - -func (t *CloseProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - cpm := make(map[string]*graphNodeCloseProvider) - var err error - - for _, v := range pm { - p := v.(GraphNodeProvider) - key := p.ProviderAddr().String() - - // get the close provider of this type if we alread created it - closer := cpm[key] - - if closer == nil { - // create a closer for this provider type - closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} - g.Add(closer) - cpm[key] = closer - } - - // Close node depends on the provider itself - // this is added unconditionally, so it will connect to all instances - // of the provider. Extra edges will be removed by transitive - // reduction. - g.Connect(dag.BasicEdge(closer, p)) - - // connect all the provider's resources to the close node - for _, s := range g.UpEdges(p).List() { - if _, ok := s.(GraphNodeProviderConsumer); ok { - g.Connect(dag.BasicEdge(closer, s)) - } - } - } - - return err -} - -// MissingProviderTransformer is a GraphTransformer that adds to the graph -// a node for each default provider configuration that is referenced by another -// node but not already present in the graph. -// -// These "default" nodes are always added to the root module, regardless of -// where they are requested. This is important because our inheritance -// resolution behavior in ProviderTransformer will then treat these as a -// last-ditch fallback after walking up the tree, rather than preferring them -// as it would if they were placed in the same module as the requester. -// -// This transformer may create extra nodes that are not needed in practice, -// due to overriding provider configurations in child modules. -// PruneProviderTransformer can then remove these once ProviderTransformer -// has resolved all of the inheritence, etc. -type MissingProviderTransformer struct { - // Providers is the list of providers we support. - Providers []string - - // Concrete, if set, overrides how the providers are made. - Concrete ConcreteProviderNodeFunc -} - -func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Initialize factory - if t.Concrete == nil { - t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { - return a - } - } - - var err error - m := providerVertexMap(g) - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProviderConsumer) - if !ok { - continue - } - - // For our work here we actually care only about the provider type and - // we plan to place all default providers in the root module, and so - // it's safe for us to rely on ProvidedBy here rather than waiting for - // the later proper resolution of provider inheritance done by - // ProviderTransformer. - p, _ := pv.ProvidedBy() - if p.ProviderConfig.Alias != "" { - // We do not create default aliased configurations. - log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) - continue - } - - // We're going to create an implicit _default_ configuration for the - // referenced provider type in the _root_ module, ignoring all other - // aspects of the resource's declared provider address. - defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type) - key := defaultAddr.String() - provider := m[key] - - if provider != nil { - // There's already an explicit default configuration for this - // provider type in the root module, so we have nothing to do. - continue - } - - log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - - // create the missing top-level provider - provider = t.Concrete(&NodeAbstractProvider{ - Addr: defaultAddr, - }).(GraphNodeProvider) - - g.Add(provider) - m[key] = provider - } - - return err -} - -// ParentProviderTransformer connects provider nodes to their parents. -// -// This works by finding nodes that are both GraphNodeProviders and -// GraphNodeSubPath. It then connects the providers to their parent -// path. The parent provider is always at the root level. -type ParentProviderTransformer struct{} - -func (t *ParentProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - for _, v := range g.Vertices() { - // Only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // Also require non-empty path, since otherwise we're in the root - // module and so cannot have a parent. - if len(pn.Path()) <= 1 { - continue - } - - // this provider may be disabled, but we can only get it's name from - // the ProviderName string - addr := pn.ProviderAddr() - parentAddr, ok := addr.Inherited() - if ok { - parent := pm[parentAddr.String()] - if parent != nil { - g.Connect(dag.BasicEdge(v, parent)) - } - } - } - return nil -} - -// PruneProviderTransformer removes any providers that are not actually used by -// anything, and provider proxies. This avoids the provider being initialized -// and configured. This both saves resources but also avoids errors since -// configuration may imply initialization which may require auth. -type PruneProviderTransformer struct{} - -func (t *PruneProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - _, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // ProxyProviders will have up edges, but we're now done with them in the graph - if _, ok := v.(*graphNodeProxyProvider); ok { - log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) - g.Remove(v) - } - - // Remove providers with no dependencies. - if g.UpEdges(v).Len() == 0 { - log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) - g.Remove(v) - } - } - - return nil -} - -func providerVertexMap(g *Graph) map[string]GraphNodeProvider { - m := make(map[string]GraphNodeProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvider); ok { - addr := pv.ProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -type graphNodeCloseProvider struct { - Addr addrs.AbsProviderConfig -} - -var ( - _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) -) - -func (n *graphNodeCloseProvider) Name() string { - return n.Addr.String() + " (close)" -} - -// GraphNodeSubPath impl. -func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.Addr) -} - -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} -} - -func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - if !opts.Verbose { - return nil - } - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} - -// RemovableIfNotTargeted -func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to -// store the name and value of a provider node for inheritance between modules. -// These nodes are only used to store the data while loading the provider -// configurations, and are removed after all the resources have been connected -// to their providers. -type graphNodeProxyProvider struct { - addr addrs.AbsProviderConfig - target GraphNodeProvider -} - -var ( - _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) -) - -func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.addr -} - -func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { - return n.addr.Module -} - -func (n *graphNodeProxyProvider) Name() string { - return n.addr.String() + " (proxy)" -} - -// find the concrete provider instance -func (n *graphNodeProxyProvider) Target() GraphNodeProvider { - switch t := n.target.(type) { - case *graphNodeProxyProvider: - return t.Target() - default: - return n.target - } -} - -// ProviderConfigTransformer adds all provider nodes from the configuration and -// attaches the configs. -type ProviderConfigTransformer struct { - Providers []string - Concrete ConcreteProviderNodeFunc - - // each provider node is stored here so that the proxy nodes can look up - // their targets by name. - providers map[string]GraphNodeProvider - // record providers that can be overriden with a proxy - proxiable map[string]bool - - // Config is the root node of the configuration tree to add providers from. - Config *configs.Config -} - -func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no configuration is given, we don't do anything - if t.Config == nil { - return nil - } - - t.providers = make(map[string]GraphNodeProvider) - t.proxiable = make(map[string]bool) - - // Start the transformation process - if err := t.transform(g, t.Config); err != nil { - return err - } - - // finally attach the configs to the new nodes - return t.attachProviderConfigs(g) -} - -func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { - // If no config, do nothing - if c == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, c); err != nil { - return err - } - - // Transform all the children. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - return nil -} - -func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { - // Get the module associated with this configuration tree node - mod := c.Module - staticPath := c.Path - - // We actually need a dynamic module path here, but we've not yet updated - // our graph builders enough to support expansion of module calls with - // "count" and "for_each" set, so for now we'll shim this by converting to - // a dynamic path with no keys. At the time of writing this is the only - // possible kind of dynamic path anyway. - path := make(addrs.ModuleInstance, len(staticPath)) - for i, name := range staticPath { - path[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - - // add all providers from the configuration - for _, p := range mod.ProviderConfigs { - relAddr := p.Addr() - addr := relAddr.Absolute(path) - - abstract := &NodeAbstractProvider{ - Addr: addr, - } - var v dag.Vertex - if t.Concrete != nil { - v = t.Concrete(abstract) - } else { - v = abstract - } - - // Add it to the graph - g.Add(v) - key := addr.String() - t.providers[key] = v.(GraphNodeProvider) - - // A provider configuration is "proxyable" if its configuration is - // entirely empty. This means it's standing in for a provider - // configuration that must be passed in from the parent module. - // We decide this by evaluating the config with an empty schema; - // if this succeeds, then we know there's nothing in the body. - _, diags := p.Config.Content(&hcl.BodySchema{}) - t.proxiable[key] = !diags.HasErrors() - } - - // Now replace the provider nodes with proxy nodes if a provider was being - // passed in, and create implicit proxies if there was no config. Any extra - // proxies will be removed in the prune step. - return t.addProxyProviders(g, c) -} - -func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { - path := c.Path - - // can't add proxies at the root - if len(path) == 0 { - return nil - } - - parentPath, callAddr := path.Call() - parent := c.Parent - if parent == nil { - return nil - } - - callName := callAddr.Name - var parentCfg *configs.ModuleCall - for name, mod := range parent.Module.ModuleCalls { - if name == callName { - parentCfg = mod - break - } - } - - // We currently don't support count/for_each for modules and so we must - // shim our path and parentPath into module instances here so that the - // rest of Terraform can behave as if we do. This shimming should be - // removed later as part of implementing count/for_each for modules. - instPath := make(addrs.ModuleInstance, len(path)) - for i, name := range path { - instPath[i] = addrs.ModuleInstanceStep{Name: name} - } - parentInstPath := make(addrs.ModuleInstance, len(parentPath)) - for i, name := range parentPath { - parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} - } - - if parentCfg == nil { - // this can't really happen during normal execution. - return fmt.Errorf("parent module config not found for %s", c.Path.String()) - } - - // Go through all the providers the parent is passing in, and add proxies to - // the parent provider nodes. - for _, pair := range parentCfg.Providers { - fullAddr := pair.InChild.Addr().Absolute(instPath) - fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) - fullName := fullAddr.String() - fullParentName := fullParentAddr.String() - - parentProvider := t.providers[fullParentName] - - if parentProvider == nil { - return fmt.Errorf("missing provider %s", fullParentName) - } - - proxy := &graphNodeProxyProvider{ - addr: fullAddr, - target: parentProvider, - } - - concreteProvider := t.providers[fullName] - - // replace the concrete node with the provider passed in - if concreteProvider != nil && t.proxiable[fullName] { - g.Replace(concreteProvider, proxy) - t.providers[fullName] = proxy - continue - } - - // aliased configurations can't be implicitly passed in - if fullAddr.ProviderConfig.Alias != "" { - continue - } - - // There was no concrete provider, so add this as an implicit provider. - // The extra proxy will be pruned later if it's unused. - g.Add(proxy) - t.providers[fullName] = proxy - } - return nil -} - -func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - addr := apn.ProviderAddr() - - // Get the configuration. - mc := t.Config.DescendentForInstance(addr.Module) - if mc == nil { - log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) - continue - } - - // Go through the provider configs to find the matching config - for _, p := range mc.Module.ProviderConfigs { - if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias { - log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go deleted file mode 100644 index e6fe25dac0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go +++ /dev/null @@ -1,205 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeProvisioner is an interface that nodes that can be a provisioner -// must implement. The ProvisionerName returned is the name of the provisioner -// they satisfy. -type GraphNodeProvisioner interface { - ProvisionerName() string -} - -// GraphNodeCloseProvisioner is an interface that nodes that can be a close -// provisioner must implement. The CloseProvisionerName returned is the name -// of the provisioner they satisfy. -type GraphNodeCloseProvisioner interface { - CloseProvisionerName() string -} - -// GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the names of the -// provisioners to use. -type GraphNodeProvisionerConsumer interface { - ProvisionedBy() []string -} - -// ProvisionerTransformer is a GraphTransformer that maps resources to -// provisioners within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProvisionerTransformer struct{} - -func (t *ProvisionerTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to provisioners they need - var err error - m := provisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - key := provisionerMapKey(p, pv) - if m[key] == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: provisioner %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key])) - g.Connect(dag.BasicEdge(v, m[key])) - } - } - } - - return err -} - -// MissingProvisionerTransformer is a GraphTransformer that adds nodes -// for missing provisioners into the graph. -type MissingProvisionerTransformer struct { - // Provisioners is the list of provisioners we support. - Provisioners []string -} - -func (t *MissingProvisionerTransformer) Transform(g *Graph) error { - // Create a set of our supported provisioners - supported := make(map[string]struct{}, len(t.Provisioners)) - for _, v := range t.Provisioners { - supported[v] = struct{}{} - } - - // Get the map of provisioners we already have in our graph - m := provisionerVertexMap(g) - - // Go through all the provisioner consumers and make sure we add - // that provisioner if it is missing. - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProvisionerConsumer) - if !ok { - continue - } - - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - path := addrs.RootModuleInstance - if sp, ok := pv.(GraphNodeSubPath); ok { - path = sp.Path() - } - - for _, p := range pv.ProvisionedBy() { - // Build the key for storing in the map - key := provisionerMapKey(p, pv) - - if _, ok := m[key]; ok { - // This provisioner already exists as a configure node - continue - } - - if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, we skip it. - // Validation later will catch this as an error. - continue - } - - // Build the vertex - var newV dag.Vertex = &NodeProvisioner{ - NameValue: p, - PathValue: path, - } - - // Add the missing provisioner node to the graph - m[key] = g.Add(newV) - log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v)) - } - } - - return nil -} - -// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provisioner connections that aren't needed -// anymore. A provisioner connection is not needed anymore once all depended -// resources in the graph are evaluated. -type CloseProvisionerTransformer struct{} - -func (t *CloseProvisionerTransformer) Transform(g *Graph) error { - m := closeProvisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - source := m[p] - - if source == nil { - // Create a new graphNodeCloseProvisioner and add it to the graph - source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} - g.Add(source) - - // Make sure we also add the new graphNodeCloseProvisioner to the map - // so we don't create and add any duplicate graphNodeCloseProvisioners. - m[p] = source - } - - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return nil -} - -// provisionerMapKey is a helper that gives us the key to use for the -// maps returned by things such as provisionerVertexMap. -func provisionerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - pathPrefix = sp.Path().String() + "." - } - - return pathPrefix + k -} - -func provisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisioner); ok { - key := provisionerMapKey(pv.ProvisionerName(), v) - m[key] = v - } - } - - return m -} - -func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvisioner); ok { - m[pv.CloseProvisionerName()] = v - } - } - - return m -} - -type graphNodeCloseProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeCloseProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { - return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} -} - -func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { - return n.ProvisionerNameValue -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go deleted file mode 100644 index 54f9829c75..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go +++ /dev/null @@ -1,446 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/hashicorp/terraform-plugin-sdk/internal/lang" -) - -// GraphNodeReferenceable must be implemented by any node that represents -// a Terraform thing that can be referenced (resource, module, etc.). -// -// Even if the thing has no name, this should return an empty list. By -// implementing this and returning a non-nil result, you say that this CAN -// be referenced and other methods of referencing may still be possible (such -// as by path!) -type GraphNodeReferenceable interface { - GraphNodeSubPath - - // ReferenceableAddrs returns a list of addresses through which this can be - // referenced. - ReferenceableAddrs() []addrs.Referenceable -} - -// GraphNodeReferencer must be implemented by nodes that reference other -// Terraform items and therefore depend on them. -type GraphNodeReferencer interface { - GraphNodeSubPath - - // References returns a list of references made by this node, which - // include both a referenced address and source location information for - // the reference. - References() []*addrs.Reference -} - -// GraphNodeReferenceOutside is an interface that can optionally be implemented. -// A node that implements it can specify that its own referenceable addresses -// and/or the addresses it references are in a different module than the -// node itself. -// -// Any referenceable addresses returned by ReferenceableAddrs are interpreted -// relative to the returned selfPath. -// -// Any references returned by References are interpreted relative to the -// returned referencePath. -// -// It is valid but not required for either of these paths to match what is -// returned by method Path, though if both match the main Path then there -// is no reason to implement this method. -// -// The primary use-case for this is the nodes representing module input -// variables, since their expressions are resolved in terms of their calling -// module, but they are still referenced from their own module. -type GraphNodeReferenceOutside interface { - // ReferenceOutside returns a path in which any references from this node - // are resolved. - ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) -} - -// ReferenceTransformer is a GraphTransformer that connects all the -// nodes that reference each other in order to form the proper ordering. -type ReferenceTransformer struct{} - -func (t *ReferenceTransformer) Transform(g *Graph) error { - // Build a reference map so we can efficiently look up the references - vs := g.Vertices() - m := NewReferenceMap(vs) - - // Find the things that reference things and connect them - for _, v := range vs { - parents, _ := m.References(v) - parentsDbg := make([]string, len(parents)) - for i, v := range parents { - parentsDbg[i] = dag.VertexName(v) - } - log.Printf( - "[DEBUG] ReferenceTransformer: %q references: %v", - dag.VertexName(v), parentsDbg) - - for _, parent := range parents { - g.Connect(dag.BasicEdge(v, parent)) - } - } - - return nil -} - -// DestroyReferenceTransformer is a GraphTransformer that reverses the edges -// for locals and outputs that depend on other nodes which will be -// removed during destroy. If a destroy node is evaluated before the local or -// output value, it will be removed from the state, and the later interpolation -// will fail. -type DestroyValueReferenceTransformer struct{} - -func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { - vs := g.Vertices() - for _, v := range vs { - switch v.(type) { - case *NodeApplyableOutput, *NodeLocal: - // OK - default: - continue - } - - // reverse any outgoing edges so that the value is evaluated first. - for _, e := range g.EdgesFrom(v) { - target := e.Target() - - // only destroy nodes will be evaluated in reverse - if _, ok := target.(GraphNodeDestroyer); !ok { - continue - } - - log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) - - g.RemoveEdge(e) - g.Connect(&DestroyEdge{S: target, T: v}) - } - } - - return nil -} - -// PruneUnusedValuesTransformer is s GraphTransformer that removes local and -// output values which are not referenced in the graph. Since outputs and -// locals always need to be evaluated, if they reference a resource that is not -// available in the state the interpolation could fail. -type PruneUnusedValuesTransformer struct{} - -func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { - // this might need multiple runs in order to ensure that pruning a value - // doesn't effect a previously checked value. - for removed := 0; ; removed = 0 { - for _, v := range g.Vertices() { - switch v.(type) { - case *NodeApplyableOutput, *NodeLocal: - // OK - default: - continue - } - - dependants := g.UpEdges(v) - - switch dependants.Len() { - case 0: - // nothing at all depends on this - g.Remove(v) - removed++ - case 1: - // because an output's destroy node always depends on the output, - // we need to check for the case of a single destroy node. - d := dependants.List()[0] - if _, ok := d.(*NodeDestroyableOutput); ok { - g.Remove(v) - removed++ - } - } - } - if removed == 0 { - break - } - } - - return nil -} - -// ReferenceMap is a structure that can be used to efficiently check -// for references on a graph. -type ReferenceMap struct { - // vertices is a map from internal reference keys (as produced by the - // mapKey method) to one or more vertices that are identified by each key. - // - // A particular reference key might actually identify multiple vertices, - // e.g. in situations where one object is contained inside another. - vertices map[string][]dag.Vertex - - // edges is a map whose keys are a subset of the internal reference keys - // from "vertices", and whose values are the nodes that refer to each - // key. The values in this map are the referrers, while values in - // "verticies" are the referents. The keys in both cases are referents. - edges map[string][]dag.Vertex -} - -// References returns the set of vertices that the given vertex refers to, -// and any referenced addresses that do not have corresponding vertices. -func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { - rn, ok := v.(GraphNodeReferencer) - if !ok { - return nil, nil - } - if _, ok := v.(GraphNodeSubPath); !ok { - return nil, nil - } - - var matches []dag.Vertex - var missing []addrs.Referenceable - - for _, ref := range rn.References() { - subject := ref.Subject - - key := m.referenceMapKey(v, subject) - if _, exists := m.vertices[key]; !exists { - // If what we were looking for was a ResourceInstance then we - // might be in a resource-oriented graph rather than an - // instance-oriented graph, and so we'll see if we have the - // resource itself instead. - switch ri := subject.(type) { - case addrs.ResourceInstance: - subject = ri.ContainingResource() - case addrs.ResourceInstancePhase: - subject = ri.ContainingResource() - } - key = m.referenceMapKey(v, subject) - } - - vertices := m.vertices[key] - for _, rv := range vertices { - // don't include self-references - if rv == v { - continue - } - matches = append(matches, rv) - } - if len(vertices) == 0 { - missing = append(missing, ref.Subject) - } - } - - return matches, missing -} - -// Referrers returns the set of vertices that refer to the given vertex. -func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { - rn, ok := v.(GraphNodeReferenceable) - if !ok { - return nil - } - sp, ok := v.(GraphNodeSubPath) - if !ok { - return nil - } - - var matches []dag.Vertex - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(sp.Path(), addr) - referrers, ok := m.edges[key] - if !ok { - continue - } - - // If the referrer set includes our own given vertex then we skip, - // since we don't want to return self-references. - selfRef := false - for _, p := range referrers { - if p == v { - selfRef = true - break - } - } - if selfRef { - continue - } - - matches = append(matches, referrers...) - } - - return matches -} - -func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { - return fmt.Sprintf("%s|%s", path.String(), addr.String()) -} - -// vertexReferenceablePath returns the path in which the given vertex can be -// referenced. This is the path that its results from ReferenceableAddrs -// are considered to be relative to. -// -// Only GraphNodeSubPath implementations can be referenced, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { - sp, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex is referenced from a different module than where it was - // declared. - path, _ := outside.ReferenceOutside() - return path - } - - // Vertex is referenced from the same module as where it was declared. - return sp.Path() -} - -// vertexReferencePath returns the path in which references _from_ the given -// vertex must be interpreted. -// -// Only GraphNodeSubPath implementations can have references, so this method -// will panic if the given vertex does not implement that interface. -func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { - sp, ok := referrer.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) - } - - var path addrs.ModuleInstance - if outside, ok := referrer.(GraphNodeReferenceOutside); ok { - // Vertex makes references to objects in a different module than where - // it was declared. - _, path = outside.ReferenceOutside() - return path - } - - // Vertex makes references to objects in the same module as where it - // was declared. - return sp.Path() -} - -// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex -// that the reference is from, and "addr" is the address of the object being -// referenced. -// -// The result is an opaque string that includes both the address of the given -// object and the address of the module instance that object belongs to. -// -// Only GraphNodeSubPath implementations can be referrers, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { - path := vertexReferencePath(referrer) - return m.mapKey(path, addr) -} - -// NewReferenceMap is used to create a new reference map for the -// given set of vertices. -func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { - var m ReferenceMap - - // Build the lookup table - vertices := make(map[string][]dag.Vertex) - for _, v := range vs { - _, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - continue - } - - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferenceable) - if !ok { - continue - } - - path := m.vertexReferenceablePath(v) - - // Go through and cache them - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(path, addr) - vertices[key] = append(vertices[key], v) - } - - // Any node can be referenced by the address of the module it belongs - // to or any of that module's ancestors. - for _, addr := range path.Ancestors()[1:] { - // Can be referenced either as the specific call instance (with - // an instance key) or as the bare module call itself (the "module" - // block in the parent module that created the instance). - callPath, call := addr.Call() - callInstPath, callInst := addr.CallInstance() - callKey := m.mapKey(callPath, call) - callInstKey := m.mapKey(callInstPath, callInst) - vertices[callKey] = append(vertices[callKey], v) - vertices[callInstKey] = append(vertices[callInstKey], v) - } - } - - // Build the lookup table for referenced by - edges := make(map[string][]dag.Vertex) - for _, v := range vs { - _, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - continue - } - - rn, ok := v.(GraphNodeReferencer) - if !ok { - // We're only looking for referenceable nodes - continue - } - - // Go through and cache them - for _, ref := range rn.References() { - if ref.Subject == nil { - // Should never happen - panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) - } - key := m.referenceMapKey(v, ref.Subject) - edges[key] = append(edges[key], v) - } - } - - m.vertices = vertices - m.edges = edges - return &m -} - -// ReferencesFromConfig returns the references that a configuration has -// based on the interpolated variables in a configuration. -func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { - if body == nil { - return nil - } - refs, _ := lang.ReferencesInBlock(body, schema) - return refs -} - -// appendResourceDestroyReferences identifies resource and resource instance -// references in the given slice and appends to it the "destroy-phase" -// equivalents of those references, returning the result. -// -// This can be used in the References implementation for a node which must also -// depend on the destruction of anything it references. -func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { - given := refs - for _, ref := range given { - switch tr := ref.Subject.(type) { - case addrs.Resource: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - case addrs.ResourceInstance: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - } - } - return refs -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go deleted file mode 100644 index 327950d885..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// RemovedModuleTransformer implements GraphTransformer to add nodes indicating -// when a module was removed from the configuration. -type RemovedModuleTransformer struct { - Config *configs.Config // root node in the config tree - State *states.State -} - -func (t *RemovedModuleTransformer) Transform(g *Graph) error { - // nothing to remove if there's no state! - if t.State == nil { - return nil - } - - for _, m := range t.State.Modules { - cc := t.Config.DescendentForInstance(m.Addr) - if cc != nil { - continue - } - - log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) - g.Add(&NodeModuleRemoved{Addr: m.Addr}) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go deleted file mode 100644 index 51d9466a2c..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go +++ /dev/null @@ -1,71 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - "github.com/zclconf/go-cty/cty" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -// -// This assumes that the count is already interpolated. -type ResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - Schema *configschema.Block - - // Count is either the number of indexed instances to create, or -1 to - // indicate that count is not set at all and thus a no-key instance should - // be created. - Count int - ForEach map[string]cty.Value - Addr addrs.AbsResource -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - if t.Count < 0 && t.ForEach == nil { - // Negative count indicates that count is not set at all. - addr := t.Addr.Instance(addrs.NoKey) - - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - return nil - } - - // Add nodes related to the for_each expression - for key := range t.ForEach { - addr := t.Addr.Instance(addrs.StringKey(key)) - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - // For each count, build and add the node - for i := 0; i < t.Count; i++ { - key := addrs.IntKey(i) - addr := t.Addr.Instance(key) - - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go deleted file mode 100644 index 485c1c8a04..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" - -const rootNodeName = "root" - -// RootTransformer is a GraphTransformer that adds a root to the graph. -type RootTransformer struct{} - -func (t *RootTransformer) Transform(g *Graph) error { - // If we already have a good root, we're done - if _, err := g.Root(); err == nil { - return nil - } - - // Add a root - var root graphNodeRoot - g.Add(root) - - // Connect the root to all the edges that need it - for _, v := range g.Vertices() { - if v == root { - continue - } - - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(root, v)) - } - } - - return nil -} - -type graphNodeRoot struct{} - -func (n graphNodeRoot) Name() string { - return rootNodeName -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go deleted file mode 100644 index e7d95be978..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/states" -) - -// StateTransformer is a GraphTransformer that adds the elements of -// the state to the graph. -// -// This transform is used for example by the DestroyPlanGraphBuilder to ensure -// that only resources that are in the state are represented in the graph. -type StateTransformer struct { - // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract - // resource instance nodes that this transformer will create. - // - // If either of these is nil, the objects of that type will be skipped and - // not added to the graph at all. It doesn't make sense to use this - // transformer without setting at least one of these, since that would - // skip everything and thus be a no-op. - ConcreteCurrent ConcreteResourceInstanceNodeFunc - ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - - State *states.State -} - -func (t *StateTransformer) Transform(g *Graph) error { - if !t.State.HasResources() { - log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") - return nil - } - - switch { - case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") - case t.ConcreteCurrent != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") - case t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") - default: - log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") - } - - for _, ms := range t.State.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resourceAddr := rs.Addr.Absolute(moduleAddr) - - for key, is := range rs.Instances { - addr := resourceAddr.Instance(key) - - if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteCurrent(abstract) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) - } - - if t.ConcreteDeposed != nil { - for dk := range is.Deposed { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteDeposed(abstract, dk) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) - } - } - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go deleted file mode 100644 index beb1eed9e3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go +++ /dev/null @@ -1,267 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - SetTargets([]addrs.Targetable) -} - -// GraphNodeTargetDownstream is an interface for graph nodes that need to -// be remain present under targeting if any of their dependencies are targeted. -// TargetDownstream is called with the set of vertices that are direct -// dependencies for the node, and it should return true if the node must remain -// in the graph in support of those dependencies. -// -// This is used in situations where the dependency edges are representing an -// ordering relationship but the dependency must still be visited if its -// dependencies are visited. This is true for outputs, for example, since -// they must get updated if any of their dependent resources get updated, -// which would not normally be true if one of their dependencies were targeted. -type GraphNodeTargetDownstream interface { - TargetDownstream(targeted, untargeted *dag.Set) bool -} - -// TargetsTransformer is a GraphTransformer that, when the user specifies a -// list of resources to target, limits the graph to only those resources and -// their dependencies. -type TargetsTransformer struct { - // List of targeted resource names specified by the user - Targets []addrs.Targetable - - // If set, the index portions of resource addresses will be ignored - // for comparison. This is used when transforming a graph where - // counted resources have not yet been expanded, since otherwise - // the unexpanded nodes (which never have indices) would not match. - IgnoreIndices bool - - // Set to true when we're in a `terraform destroy` or a - // `terraform plan -destroy` - Destroy bool -} - -func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.Targets) - if err != nil { - return err - } - - for _, v := range g.Vertices() { - removable := false - if _, ok := v.(GraphNodeResource); ok { - removable = true - } - - if vr, ok := v.(RemovableIfNotTargeted); ok { - removable = vr.RemoveIfNotTargeted() - } - - if removable && !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } - } - } - - return nil -} - -// Returns a set of targeted nodes. A targeted node is either addressed -// directly, address indirectly via its container, or it's a dependency of a -// targeted node. Destroy mode keeps dependents instead of dependencies. -func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { - targetedNodes := new(dag.Set) - - vertices := g.Vertices() - - for _, v := range vertices { - if t.nodeIsTarget(v, addrs) { - targetedNodes.Add(v) - - // We inform nodes that ask about the list of targets - helps for nodes - // that need to dynamically expand. Note that this only occurs for nodes - // that are already directly targeted. - if tn, ok := v.(GraphNodeTargetable); ok { - tn.SetTargets(addrs) - } - - var deps *dag.Set - var err error - if t.Destroy { - deps, err = g.Descendents(v) - } else { - deps, err = g.Ancestors(v) - } - if err != nil { - return nil, err - } - - for _, d := range deps.List() { - targetedNodes.Add(d) - } - } - } - return t.addDependencies(targetedNodes, g) -} - -func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { - // Handle nodes that need to be included if their dependencies are included. - // This requires multiple passes since we need to catch transitive - // dependencies if and only if they are via other nodes that also - // support TargetDownstream. For example: - // output -> output -> targeted-resource: both outputs need to be targeted - // output -> non-targeted-resource -> targeted-resource: output not targeted - // - // We'll keep looping until we stop targeting more nodes. - queue := targetedNodes.List() - for len(queue) > 0 { - vertices := queue - queue = nil // ready to append for next iteration if neccessary - for _, v := range vertices { - // providers don't cause transitive dependencies, so don't target - // downstream from them. - if _, ok := v.(GraphNodeProvider); ok { - continue - } - - dependers := g.UpEdges(v) - if dependers == nil { - // indicates that there are no up edges for this node, so - // we have nothing to do here. - continue - } - - dependers = dependers.Filter(func(dv interface{}) bool { - _, ok := dv.(GraphNodeTargetDownstream) - return ok - }) - - if dependers.Len() == 0 { - continue - } - - for _, dv := range dependers.List() { - if targetedNodes.Include(dv) { - // Already present, so nothing to do - continue - } - - // We'll give the node some information about what it's - // depending on in case that informs its decision about whether - // it is safe to be targeted. - deps := g.DownEdges(v) - - depsTargeted := deps.Intersection(targetedNodes) - depsUntargeted := deps.Difference(depsTargeted) - - if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { - targetedNodes.Add(dv) - // Need to visit this node on the next pass to see if it - // has any transitive dependers. - queue = append(queue, dv) - } - } - } - } - - return targetedNodes.Filter(func(dv interface{}) bool { - return filterPartialOutputs(dv, targetedNodes, g) - }), nil -} - -// Outputs may have been included transitively, but if any of their -// dependencies have been pruned they won't be resolvable. -// If nothing depends on the output, and the output is missing any -// dependencies, remove it from the graph. -// This essentially maintains the previous behavior where interpolation in -// outputs would fail silently, but can now surface errors where the output -// is required. -func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { - // should this just be done with TargetDownstream? - if _, ok := v.(*NodeApplyableOutput); !ok { - return true - } - - dependers := g.UpEdges(v) - for _, d := range dependers.List() { - if _, ok := d.(*NodeCountBoundary); ok { - continue - } - - if !targetedNodes.Include(d) { - // this one is going to be removed, so it doesn't count - continue - } - - // as soon as we see a real dependency, we mark this as - // non-removable - return true - } - - depends := g.DownEdges(v) - - for _, d := range depends.List() { - if !targetedNodes.Include(d) { - log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", - dag.VertexName(v), dag.VertexName(d)) - return false - } - } - return true -} - -func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { - var vertexAddr addrs.Targetable - switch r := v.(type) { - case GraphNodeResourceInstance: - vertexAddr = r.ResourceInstanceAddr() - case GraphNodeResource: - vertexAddr = r.ResourceAddr() - default: - // Only resource and resource instance nodes can be targeted. - return false - } - _, ok := v.(GraphNodeResource) - if !ok { - return false - } - - for _, targetAddr := range targets { - if t.IgnoreIndices { - // If we're ignoring indices then we'll convert any resource instance - // addresses into resource addresses. We don't need to convert - // vertexAddr because instance addresses are contained within - // their associated resources, and so .TargetContains will take - // care of this for us. - if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { - targetAddr = instance.ContainingResource() - } - } - if targetAddr.TargetContains(vertexAddr) { - return true - } - } - - return false -} - -// RemovableIfNotTargeted is a special interface for graph nodes that -// aren't directly addressable, but need to be removed from the graph when they -// are not targeted. (Nodes that are not directly targeted end up in the set of -// targeted nodes because something that _is_ targeted depends on them.) The -// initial use case for this interface is GraphNodeConfigVariable, which was -// having trouble interpolating for module variables in targeted scenarios that -// filtered out the resource node being referenced. -type RemovableIfNotTargeted interface { - RemoveIfNotTargeted() bool -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go deleted file mode 100644 index 21842789cf..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// TransitiveReductionTransformer is a GraphTransformer that performs -// finds the transitive reduction of the graph. For a definition of -// transitive reduction, see Wikipedia. -type TransitiveReductionTransformer struct{} - -func (t *TransitiveReductionTransformer) Transform(g *Graph) error { - // If the graph isn't valid, skip the transitive reduction. - // We don't error here because Terraform itself handles graph - // validation in a better way, or we assume it does. - if err := g.Validate(); err != nil { - return nil - } - - // Do it - g.TransitiveReduction() - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go deleted file mode 100644 index 3afce56608..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" -) - -// RootVariableTransformer is a GraphTransformer that adds all the root -// variables to the graph. -// -// Root variables are currently no-ops but they must be added to the -// graph since downstream things that depend on them must be able to -// reach them. -type RootVariableTransformer struct { - Config *configs.Config -} - -func (t *RootVariableTransformer) Transform(g *Graph) error { - // We can have no variables if we have no config. - if t.Config == nil { - return nil - } - - // We're only considering root module variables here, since child - // module variables are handled by ModuleVariableTransformer. - vars := t.Config.Module.Variables - - // Add all variables here - for _, v := range vars { - node := &NodeRootVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Config: v, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go deleted file mode 100644 index 6b3c62d1f3..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/internal/dag" -) - -// VertexTransformer is a GraphTransformer that transforms vertices -// using the GraphVertexTransformers. The Transforms are run in sequential -// order. If a transform replaces a vertex then the next transform will see -// the new vertex. -type VertexTransformer struct { - Transforms []GraphVertexTransformer -} - -func (t *VertexTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - for _, vt := range t.Transforms { - newV, err := vt.Transform(v) - if err != nil { - return err - } - - // If the vertex didn't change, then don't do anything more - if newV == v { - continue - } - - // Vertex changed, replace it within the graph - if ok := g.Replace(v, newV); !ok { - // This should never happen, big problem - return fmt.Errorf( - "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", - dag.VertexName(v), dag.VertexName(newV), v, newV) - } - - // Replace v so that future transforms use the proper vertex - v = newV - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go deleted file mode 100644 index f6790d9e5f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -import "context" - -// UIInput is the interface that must be implemented to ask for input -// from this user. This should forward the request to wherever the user -// inputs things to ask for values. -type UIInput interface { - Input(context.Context, *InputOpts) (string, error) -} - -// InputOpts are options for asking for input. -type InputOpts struct { - // Id is a unique ID for the question being asked that might be - // used for logging or to look up a prior answered question. - Id string - - // Query is a human-friendly question for inputting this value. - Query string - - // Description is a description about what this option is. Be wary - // that this will probably be in a terminal so split lines as you see - // necessary. - Description string - - // Default will be the value returned if no data is entered. - Default string -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go deleted file mode 100644 index e2d9c38481..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import "context" - -// MockUIInput is an implementation of UIInput that can be used for tests. -type MockUIInput struct { - InputCalled bool - InputOpts *InputOpts - InputReturnMap map[string]string - InputReturnString string - InputReturnError error - InputFn func(*InputOpts) (string, error) -} - -func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - i.InputCalled = true - i.InputOpts = opts - if i.InputFn != nil { - return i.InputFn(opts) - } - if i.InputReturnMap != nil { - return i.InputReturnMap[opts.Id], i.InputReturnError - } - return i.InputReturnString, i.InputReturnError -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go deleted file mode 100644 index b5d32b1e85..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -import ( - "context" - "fmt" -) - -// PrefixUIInput is an implementation of UIInput that prefixes the ID -// with a string, allowing queries to be namespaced. -type PrefixUIInput struct { - IdPrefix string - QueryPrefix string - UIInput UIInput -} - -func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) - opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(ctx, opts) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go deleted file mode 100644 index 84427c63de..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// UIOutput is the interface that must be implemented to output -// data to the end user. -type UIOutput interface { - Output(string) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go deleted file mode 100644 index 135a91c5f0..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -type CallbackUIOutput struct { - OutputFn func(string) -} - -func (o *CallbackUIOutput) Output(v string) { - o.OutputFn(v) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go deleted file mode 100644 index d828c921ca..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go +++ /dev/null @@ -1,21 +0,0 @@ -package terraform - -import "sync" - -// MockUIOutput is an implementation of UIOutput that can be used for tests. -type MockUIOutput struct { - sync.Mutex - OutputCalled bool - OutputMessage string - OutputFn func(string) -} - -func (o *MockUIOutput) Output(v string) { - o.Lock() - defer o.Unlock() - o.OutputCalled = true - o.OutputMessage = v - if o.OutputFn != nil { - o.OutputFn(v) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go deleted file mode 100644 index 0d7d4ce032..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" -) - -// ProvisionerUIOutput is an implementation of UIOutput that calls a hook -// for the output so that the hooks can handle it. -type ProvisionerUIOutput struct { - InstanceAddr addrs.AbsResourceInstance - ProvisionerType string - Hooks []Hook -} - -func (o *ProvisionerUIOutput) Output(msg string) { - for _, h := range o.Hooks { - h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go deleted file mode 100644 index 5428cd5a0a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go +++ /dev/null @@ -1,75 +0,0 @@ -package terraform - -import ( - "sort" -) - -// Semaphore is a wrapper around a channel to provide -// utility methods to clarify that we are treating the -// channel as a semaphore -type Semaphore chan struct{} - -// NewSemaphore creates a semaphore that allows up -// to a given limit of simultaneous acquisitions -func NewSemaphore(n int) Semaphore { - if n == 0 { - panic("semaphore with limit 0") - } - ch := make(chan struct{}, n) - return Semaphore(ch) -} - -// Acquire is used to acquire an available slot. -// Blocks until available. -func (s Semaphore) Acquire() { - s <- struct{}{} -} - -// TryAcquire is used to do a non-blocking acquire. -// Returns a bool indicating success -func (s Semaphore) TryAcquire() bool { - select { - case s <- struct{}{}: - return true - default: - return false - } -} - -// Release is used to return a slot. Acquire must -// be called as a pre-condition. -func (s Semaphore) Release() { - select { - case <-s: - default: - panic("release without an acquire") - } -} - -// strSliceContains checks if a given string is contained in a slice -// When anybody asks why Go needs generics, here you go. -func strSliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// deduplicate a slice of strings -func uniqueStrings(s []string) []string { - if len(s) < 2 { - return s - } - - sort.Strings(s) - result := make([]string, 1, len(s)) - result[0] = s[0] - for i := 1; i < len(s); i++ { - if s[i] != result[len(result)-1] { - result = append(result, s[i]) - } - } - return result -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go deleted file mode 100644 index 627593d762..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValueFromUnknown-0] - _ = x[ValueFromConfig-67] - _ = x[ValueFromAutoFile-70] - _ = x[ValueFromNamedFile-78] - _ = x[ValueFromCLIArg-65] - _ = x[ValueFromEnvVar-69] - _ = x[ValueFromInput-73] - _ = x[ValueFromPlan-80] - _ = x[ValueFromCaller-83] -} - -const ( - _ValueSourceType_name_0 = "ValueFromUnknown" - _ValueSourceType_name_1 = "ValueFromCLIArg" - _ValueSourceType_name_2 = "ValueFromConfig" - _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" - _ValueSourceType_name_4 = "ValueFromInput" - _ValueSourceType_name_5 = "ValueFromNamedFile" - _ValueSourceType_name_6 = "ValueFromPlan" - _ValueSourceType_name_7 = "ValueFromCaller" -) - -var ( - _ValueSourceType_index_3 = [...]uint8{0, 15, 32} -) - -func (i ValueSourceType) String() string { - switch { - case i == 0: - return _ValueSourceType_name_0 - case i == 65: - return _ValueSourceType_name_1 - case i == 67: - return _ValueSourceType_name_2 - case 69 <= i && i <= 70: - i -= 69 - return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] - case i == 73: - return _ValueSourceType_name_4 - case i == 78: - return _ValueSourceType_name_5 - case i == 80: - return _ValueSourceType_name_6 - case i == 83: - return _ValueSourceType_name_7 - default: - return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go deleted file mode 100644 index 4ae9c92cfc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" -) - -// InputValue represents a value for a variable in the root module, provided -// as part of the definition of an operation. -type InputValue struct { - Value cty.Value - SourceType ValueSourceType - - // SourceRange provides source location information for values whose - // SourceType is either ValueFromConfig or ValueFromFile. It is not - // populated for other source types, and so should not be used. - SourceRange tfdiags.SourceRange -} - -// ValueSourceType describes what broad category of source location provided -// a particular value. -type ValueSourceType rune - -const ( - // ValueFromUnknown is the zero value of ValueSourceType and is not valid. - ValueFromUnknown ValueSourceType = 0 - - // ValueFromConfig indicates that a value came from a .tf or .tf.json file, - // e.g. the default value defined for a variable. - ValueFromConfig ValueSourceType = 'C' - - // ValueFromAutoFile indicates that a value came from a "values file", like - // a .tfvars file, that was implicitly loaded by naming convention. - ValueFromAutoFile ValueSourceType = 'F' - - // ValueFromNamedFile indicates that a value came from a named "values file", - // like a .tfvars file, that was passed explicitly on the command line (e.g. - // -var-file=foo.tfvars). - ValueFromNamedFile ValueSourceType = 'N' - - // ValueFromCLIArg indicates that the value was provided directly in - // a CLI argument. The name of this argument is not recorded and so it must - // be inferred from context. - ValueFromCLIArg ValueSourceType = 'A' - - // ValueFromEnvVar indicates that the value was provided via an environment - // variable. The name of the variable is not recorded and so it must be - // inferred from context. - ValueFromEnvVar ValueSourceType = 'E' - - // ValueFromInput indicates that the value was provided at an interactive - // input prompt. - ValueFromInput ValueSourceType = 'I' - - // ValueFromPlan indicates that the value was retrieved from a stored plan. - ValueFromPlan ValueSourceType = 'P' - - // ValueFromCaller indicates that the value was explicitly overridden by - // a caller to Context.SetVariable after the context was constructed. - ValueFromCaller ValueSourceType = 'S' -) - -func (v *InputValue) GoString() string { - if (v.SourceRange != tfdiags.SourceRange{}) { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) - } else { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) - } -} - -func (v ValueSourceType) GoString() string { - return fmt.Sprintf("terraform.%s", v) -} - -//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType - -// InputValues is a map of InputValue instances. -type InputValues map[string]*InputValue - -// InputValuesFromCaller turns the given map of naked values into an -// InputValues that attributes each value to "a caller", using the source -// type ValueFromCaller. This is primarily useful for testing purposes. -// -// This should not be used as a general way to convert map[string]cty.Value -// into InputValues, since in most real cases we want to set a suitable -// other SourceType and possibly SourceRange value. -func InputValuesFromCaller(vals map[string]cty.Value) InputValues { - ret := make(InputValues, len(vals)) - for k, v := range vals { - ret[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } - } - return ret -} - -// Override merges the given value maps with the receiver, overriding any -// conflicting keys so that the latest definition wins. -func (vv InputValues) Override(others ...InputValues) InputValues { - // FIXME: This should check to see if any of the values are maps and - // merge them if so, in order to preserve the behavior from prior to - // Terraform 0.12. - ret := make(InputValues) - for k, v := range vv { - ret[k] = v - } - for _, other := range others { - for k, v := range other { - ret[k] = v - } - } - return ret -} - -// JustValues returns a map that just includes the values, discarding the -// source information. -func (vv InputValues) JustValues() map[string]cty.Value { - ret := make(map[string]cty.Value, len(vv)) - for k, v := range vv { - ret[k] = v.Value - } - return ret -} - -// DefaultVariableValues returns an InputValues map representing the default -// values specified for variables in the given configuration map. -func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { - ret := make(InputValues) - for k, c := range configs { - if c.Default == cty.NilVal { - continue - } - ret[k] = &InputValue{ - Value: c.Default, - SourceType: ValueFromConfig, - SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), - } - } - return ret -} - -// SameValues returns true if the given InputValues has the same values as -// the receiever, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) SameValues(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - } - - return true -} - -// HasValues returns true if the reciever has the same values as in the given -// map, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) HasValues(vals map[string]cty.Value) bool { - if len(vv) != len(vals) { - return false - } - - for k, v := range vv { - oVal, exists := vals[k] - if !exists { - return false - } - if !v.Value.RawEquals(oVal) { - return false - } - } - - return true -} - -// Identical returns true if the given InputValues has the same values, -// source types, and source ranges as the receiver. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -// -// This method is primarily for testing. For most practical purposes, it's -// better to use SameValues or HasValues. -func (vv InputValues) Identical(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - if v.SourceType != ov.SourceType { - return false - } - if v.SourceRange != ov.SourceRange { - return false - } - } - - return true -} - -// checkInputVariables ensures that variable values supplied at the UI conform -// to their corresponding declarations in configuration. -// -// The set of values is considered valid only if the returned diagnostics -// does not contain errors. A valid set of values may still produce warnings, -// which should be returned to the user. -func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for name, vc := range vcs { - val, isSet := vs[name] - if !isSet { - // Always an error, since the caller should already have included - // default values from the configuration in the values map. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unassigned variable", - fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), - )) - continue - } - - wantType := vc.Type - - // A given value is valid if it can convert to the desired type. - _, err := convert.Convert(val.Value, wantType) - if err != nil { - switch val.SourceType { - case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: - // We have source location information for these. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for input variable", - Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), - Subject: val.SourceRange.ToHCL().Ptr(), - }) - case ValueFromEnvVar: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromCLIArg: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromInput: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), - )) - default: - // The above gets us good coverage for the situations users - // are likely to encounter with their own inputs. The other - // cases are generally implementation bugs, so we'll just - // use a generic error for these. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), - )) - } - } - } - - // Check for any variables that are assigned without being configured. - // This is always an implementation error in the caller, because we - // expect undefined variables to be caught during context construction - // where there is better context to report it well. - for name := range vs { - if _, defined := vcs[name]; !defined { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Value assigned to undeclared variable", - fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), - )) - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go deleted file mode 100644 index 4cc3bbba6a..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" - - "github.com/hashicorp/terraform-plugin-sdk/internal/configs" - - tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { - if config == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := config.Module - - for _, constraint := range module.CoreVersionConstraints { - if !constraint.Required.Check(tfversion.SemVer) { - switch { - case len(config.Path) == 0: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - config.Path, config.SourceAddr, tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - } - } - } - - for _, c := range config.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go deleted file mode 100644 index 0666aa5f3f..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[walkInvalid-0] - _ = x[walkApply-1] - _ = x[walkPlan-2] - _ = x[walkPlanDestroy-3] - _ = x[walkRefresh-4] - _ = x[walkValidate-5] - _ = x[walkDestroy-6] - _ = x[walkImport-7] - _ = x[walkEval-8] -} - -const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" - -var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} - -func (i walkOperation) String() string { - if i >= walkOperation(len(_walkOperation_index)-1) { - return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE new file mode 100644 index 0000000000..fba4c76f70 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2019 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go new file mode 100644 index 0000000000..5a53304269 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package diag + +import ( + "errors" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// Diagnostics is a collection of Diagnostic. +// +// Developers should append and build the list of diagnostics up until a fatal +// error is reached, at which point they should return the Diagnostics to the +// SDK. +type Diagnostics []Diagnostic + +// HasError returns true is Diagnostics contains an instance of +// Severity == Error. +// +// This helper aims to mimic the go error practices of if err != nil. After any +// operation that returns Diagnostics, check that it HasError and bubble up the +// stack. +func (diags Diagnostics) HasError() bool { + for i := range diags { + if diags[i].Severity == Error { + return true + } + } + return false +} + +// Diagnostic is a contextual message intended at outlining problems in user +// configuration. +// +// It supports multiple levels of severity (Error or Warning), a short Summary +// of the problem, an optional longer Detail message that can assist the user in +// fixing the problem, as well as an AttributePath representation which +// Terraform uses to indicate where the issue took place in the user's +// configuration. +// +// A Diagnostic will typically be used to pinpoint a problem with user +// configuration, however it can still be used to present warnings or errors +// to the user without any AttributePath set. +type Diagnostic struct { + // Severity indicates the level of the Diagnostic. Currently can be set to + // either Error or Warning + Severity Severity + + // Summary is a short description of the problem, rendered above location + // information + Summary string + + // Detail is an optional second message rendered below location information + // typically used to communicate a potential fix to the user. + Detail string + + // AttributePath is a representation of the path starting from the root of + // block (resource, datasource, provider) under evaluation by the SDK, to + // the attribute that the Diagnostic should be associated to. Terraform will + // use this information to render information on where the problem took + // place in the user's configuration. + // + // It is represented with cty.Path, which is a list of steps of either + // cty.GetAttrStep (an actual attribute) or cty.IndexStep (a step with Key + // of cty.StringVal for map indexes, and cty.NumberVal for list indexes). + // + // PLEASE NOTE: While cty can support indexing into sets, the SDK and + // protocol currently do not. For any Diagnostic related to a schema.TypeSet + // or a child of that type, please terminate the path at the schema.TypeSet + // and opt for more verbose Summary and Detail to help guide the user. + // + // Validity of the AttributePath is currently the responsibility of the + // developer, Terraform should render the root block (provider, resource, + // datasource) in cases where the attribute path is invalid. + AttributePath cty.Path +} + +// Validate ensures a valid Severity and a non-empty Summary are set. +func (d Diagnostic) Validate() error { + var validSev bool + for _, sev := range severities { + if d.Severity == sev { + validSev = true + break + } + } + if !validSev { + return fmt.Errorf("invalid severity: %v", d.Severity) + } + if d.Summary == "" { + return errors.New("empty summary") + } + return nil +} + +// Severity is an enum type marking the severity level of a Diagnostic +type Severity int + +const ( + Error Severity = iota + Warning +) + +var severities = []Severity{Error, Warning} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go new file mode 100644 index 0000000000..4f2b7a8833 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package diag + +import "fmt" + +// FromErr will convert an error into a Diagnostics. This returns Diagnostics +// as the most common use case in Go will be handling a single error +// returned from a function. +// +// if err != nil { +// return diag.FromErr(err) +// } +func FromErr(err error) Diagnostics { + if err == nil { + return nil + } + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: err.Error(), + }, + } +} + +// Errorf creates a Diagnostics with a single Error level Diagnostic entry. +// The summary is populated by performing a fmt.Sprintf with the supplied +// values. This returns a single error in a Diagnostics as errors typically +// do not occur in multiples as warnings may. +// +// if unexpectedCondition { +// return diag.Errorf("unexpected: %s", someValue) +// } +func Errorf(format string, a ...interface{}) Diagnostics { + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: fmt.Sprintf(format, a...), + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go new file mode 100644 index 0000000000..c26303eb60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package acctest + +import ( + "bytes" + crand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "math/rand" + "net/netip" + "strings" + "time" + + "golang.org/x/crypto/ssh" +) + +// Helpers for generating random tidbits for use in identifiers to prevent +// collisions in acceptance tests. + +// RandInt generates a random integer +func RandInt() int { + return rand.Int() +} + +// RandomWithPrefix is used to generate a unique name with a prefix, for +// randomizing names in acceptance tests +func RandomWithPrefix(name string) string { + return fmt.Sprintf("%s-%d", name, RandInt()) +} + +// RandIntRange returns a random integer between min (inclusive) and max (exclusive) +func RandIntRange(min int, max int) int { + return rand.Intn(max-min) + min +} + +// RandString generates a random alphanumeric string of the length specified +func RandString(strlen int) string { + return RandStringFromCharSet(strlen, CharSetAlphaNum) +} + +// RandStringFromCharSet generates a random string by selecting characters from +// the charset provided +func RandStringFromCharSet(strlen int, charSet string) string { + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = charSet[RandIntRange(0, len(charSet))] + } + return string(result) +} + +// RandSSHKeyPair generates a random public and private SSH key pair. +// +// The public key is returned in OpenSSH authorized key format, for example: +// +// ssh-rsa XXX comment +// +// The private key is RSA algorithm, 1024 bits, PEM encoded, and has no +// passphrase. Testing with different or stricter security requirements should +// use the standard library [crypto] and [golang.org/x/crypto/ssh] packages +// directly. +func RandSSHKeyPair(comment string) (string, string, error) { + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return "", "", err + } + keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) + return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil +} + +// RandTLSCert generates a self-signed TLS certificate with a newly created +// private key, and returns both the cert and the private key PEM encoded. +// +// The private key uses RSA algorithm, 1024 bits, and has no passphrase. +// +// The certificate expires in 24 hours, has a random serial number, and is +// set for Encipherment, Digital Signature, and Server Auth key usage. +// Only the organization name of the subject is configurable. +// +// Testing with different or stricter security requirements should +// use the standard library [crypto] and [golang.org/x/crypto] packages +// directly. +func RandTLSCert(orgName string) (string, string, error) { + template := &x509.Certificate{ + SerialNumber: big.NewInt(int64(RandInt())), + Subject: pkix.Name{ + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) + if err != nil { + return "", "", err + } + + certPEM, err := pemEncode(cert, "CERTIFICATE") + if err != nil { + return "", "", err + } + + return certPEM, privateKeyPEM, nil +} + +// RandIpAddress returns a random IP address in the specified CIDR block. +// The prefix length must be less than 31. +func RandIpAddress(s string) (string, error) { + prefix, err := netip.ParsePrefix(s) + + if err != nil { + return "", err + } + + if prefix.IsSingleIP() { + return prefix.Addr().String(), nil + } + + prefixSizeExponent := uint(prefix.Addr().BitLen() - prefix.Bits()) + + if prefix.Addr().Is4() && prefixSizeExponent > 31 { + return "", fmt.Errorf("CIDR range is too large: %d", prefixSizeExponent) + } + + // Prevent panics with rand.Int63n(). + if prefix.Addr().Is6() && prefixSizeExponent > 63 { + return "", fmt.Errorf("CIDR range is too large: %d", prefixSizeExponent) + } + + // Calculate max random integer based on the prefix. + // Bit shift 1< 0 { + return fmt.Errorf("Modules are not supported. Found %d modules.", + len(sm.ChildModules)) + } + return nil +} + +func shimResourceStateKey(res *tfjson.StateResource) (string, error) { + if res.Index == nil { + return res.Address, nil + } + + var mode terraform.ResourceMode + switch res.Mode { + case tfjson.DataResourceMode: + mode = terraform.DataResourceMode + case tfjson.ManagedResourceMode: + mode = terraform.ManagedResourceMode + default: + return "", fmt.Errorf("unexpected resource mode for %q", res.Address) + } + + var index int + switch idx := res.Index.(type) { + case json.Number: + i, err := idx.Int64() + if err != nil { + return "", fmt.Errorf("unexpected index value (%q) for %q, ", + idx, res.Address) + } + index = int(i) + default: + return "", fmt.Errorf("unexpected index type (%T) for %q, "+ + "for_each is not supported", res.Index, res.Address) + } + + rsk := &terraform.ResourceStateKey{ + Mode: mode, + Type: res.Type, + Name: res.Name, + Index: index, + } + + return rsk.String(), nil +} + +func shimResourceState(res *tfjson.StateResource) (*terraform.ResourceState, error) { + sf := &shimmedFlatmap{} + err := sf.FromMap(res.AttributeValues) + if err != nil { + return nil, err + } + attributes := sf.Flatmap() + + if _, ok := attributes["id"]; !ok { + return nil, fmt.Errorf("no %q found in attributes", "id") + } + + return &terraform.ResourceState{ + Provider: res.ProviderName, + Type: res.Type, + Primary: &terraform.InstanceState{ + ID: attributes["id"], + Attributes: attributes, + Meta: map[string]interface{}{ + "schema_version": int(res.SchemaVersion), + }, + Tainted: res.Tainted, + }, + Dependencies: res.DependsOn, + }, nil +} + +type shimmedFlatmap struct { + m map[string]string +} + +func (sf *shimmedFlatmap) FromMap(attributes map[string]interface{}) error { + if sf.m == nil { + sf.m = make(map[string]string, len(attributes)) + } + + return sf.AddMap("", attributes) +} + +func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error { + for key, value := range m { + k := key + if prefix != "" { + k = fmt.Sprintf("%s.%s", prefix, key) + } + + err := sf.AddEntry(k, value) + if err != nil { + return fmt.Errorf("unable to add map key %q entry: %w", k, err) + } + } + + mapLength := "%" + if prefix != "" { + mapLength = fmt.Sprintf("%s.%s", prefix, "%") + } + + if err := sf.AddEntry(mapLength, strconv.Itoa(len(m))); err != nil { + return fmt.Errorf("unable to add map length %q entry: %w", mapLength, err) + } + + return nil +} + +func (sf *shimmedFlatmap) AddSlice(name string, elements []interface{}) error { + for i, elem := range elements { + key := fmt.Sprintf("%s.%d", name, i) + err := sf.AddEntry(key, elem) + if err != nil { + return fmt.Errorf("unable to add slice key %q entry: %w", key, err) + } + } + + sliceLength := fmt.Sprintf("%s.#", name) + if err := sf.AddEntry(sliceLength, strconv.Itoa(len(elements))); err != nil { + return fmt.Errorf("unable to add slice length %q entry: %w", sliceLength, err) + } + + return nil +} + +func (sf *shimmedFlatmap) AddEntry(key string, value interface{}) error { + switch el := value.(type) { + case nil: + // omit the entry + return nil + case bool: + sf.m[key] = strconv.FormatBool(el) + case json.Number: + sf.m[key] = el.String() + case string: + sf.m[key] = el + case map[string]interface{}: + err := sf.AddMap(key, el) + if err != nil { + return err + } + case []interface{}: + err := sf.AddSlice(key, el) + if err != nil { + return err + } + default: + // This should never happen unless terraform-json + // changes how attributes (types) are represented. + // + // We handle all types which the JSON unmarshaler + // can possibly produce + // https://golang.org/pkg/encoding/json/#Unmarshal + + return fmt.Errorf("%q: unexpected type (%T)", key, el) + } + return nil +} + +func (sf *shimmedFlatmap) Flatmap() map[string]string { + return sf.m +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go new file mode 100644 index 0000000000..9639cb04df --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_providers.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + "strings" +) + +// providerConfig takes the list of providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (c TestCase) providerConfig(_ context.Context, skipProviderBlock bool) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + // [BF] The Providers field handling predates the logic being moved to this + // method. It's not entirely clear to me at this time why this field + // is being used and not the others, but leaving it here just in case + // it does have a special purpose that wasn't being unit tested prior. + for name := range c.Providers { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + } + + for name, externalProvider := range c.ExternalProviders { + if !skipProviderBlock { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + } + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go new file mode 100644 index 0000000000..8eb85a14ab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testcase_validate.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// hasProviders returns true if the TestCase has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, +// ProviderFactories, or Providers fields. +func (c TestCase) hasProviders(_ context.Context) bool { + if len(c.ExternalProviders) > 0 { + return true + } + + if len(c.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(c.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(c.ProviderFactories) > 0 { + return true + } + + if len(c.Providers) > 0 { + return true + } + + return false +} + +// validate ensures the TestCase is valid based on the following criteria: +// +// - No overlapping ExternalProviders and Providers entries +// - No overlapping ExternalProviders and ProviderFactories entries +// - TestStep validations performed by the (TestStep).validate() method. +func (c TestCase) validate(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Validating TestCase") + + if len(c.Steps) == 0 { + err := fmt.Errorf("TestCase missing Steps") + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range c.ExternalProviders { + if _, ok := c.Providers[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and Providers", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if _, ok := c.ProviderFactories[name]; ok { + err := fmt.Errorf("TestCase provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + testCaseHasProviders := c.hasProviders(ctx) + + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // Use 1-based index for humans + stepValidateReq := testStepValidateRequest{ + StepNumber: stepNumber, + TestCaseHasProviders: testCaseHasProviders, + } + + err := step.validate(ctx, stepValidateReq) + + if err != nil { + err := fmt.Errorf("TestStep %d/%d validation error: %w", stepNumber, len(c.Steps), err) + logging.HelperResourceError(ctx, "TestCase validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go new file mode 100644 index 0000000000..9bde8e22aa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -0,0 +1,1524 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +// TestMain adds sweeper functionality to the "go test" command, otherwise +// tests are executed as normal. Most provider acceptance tests are written +// using the Test() function of this package, which imposes its own +// requirements and Terraform CLI behavior. Refer to that function's +// documentation for additional details. +// +// Sweepers enable infrastructure cleanup functions to be included with +// resource definitions, typically so developers can remove all resources of +// that resource type from testing infrastructure in case of failures that +// prevented the normal resource destruction behavior of acceptance tests. +// Use the AddTestSweepers() function to configure available sweepers. +// +// Sweeper flags added to the "go test" command: +// +// -sweep: Comma-separated list of locations/regions to run available sweepers. +// -sweep-allow-failues: Enable to allow other sweepers to run after failures. +// -sweep-run: Comma-separated list of resource type sweepers to run. Defaults +// to all sweepers. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. +func TestMain(m interface { + Run() int +}) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + + if _, err := runSweepers(regions, sweepers, *flagSweepAllowFailures); err != nil { + os.Exit(1) + } + } else { + exitCode := m.Run() + os.Exit(exitCode) + } +} + +func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures bool) (map[string]map[string]error, error) { + var sweeperErrorFound bool + sweeperRunList := make(map[string]map[string]error) + + for _, region := range regions { + region = strings.TrimSpace(region) + + var regionSweeperErrorFound bool + regionSweeperRunList := make(map[string]error) + + start := time.Now() + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper, sweepers, regionSweeperRunList, allowFailures); err != nil { + if allowFailures { + continue + } + + sweeperRunList[region] = regionSweeperRunList + return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) + } + } + elapsed := time.Since(start) + log.Printf("Completed Sweepers for region (%s) in %s", region, elapsed) + + log.Printf("Sweeper Tests for region (%s) ran successfully:\n", region) + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr == nil { + fmt.Printf("\t- %s\n", sweeper) + } else { + regionSweeperErrorFound = true + } + } + + if regionSweeperErrorFound { + sweeperErrorFound = true + log.Printf("Sweeper Tests for region (%s) ran unsuccessfully:\n", region) + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr != nil { + fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) + } + } + } + + sweeperRunList[region] = regionSweeperRunList + } + + if sweeperErrorFound { + return sweeperRunList, errors.New("at least one sweeper failed") + } + + return sweeperRunList, nil +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + for foundName, foundSweeper := range filterSweeperWithDependencies(name, source) { + sweepers[foundName] = foundSweeper + } + } + } + } + return sweepers +} + +// filterSweeperWithDependencies recursively returns sweeper and all dependencies. +// Since filterSweepers performs fuzzy matching, this function is used +// to perform exact sweeper and dependency lookup. +func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[string]*Sweeper { + result := make(map[string]*Sweeper) + + currentSweeper, ok := source[name] + if !ok { + log.Printf("[WARN] Sweeper has dependency (%s), but that sweeper was not found", name) + return result + } + + result[name] = currentSweeper + + for _, dependency := range currentSweeper.Dependencies { + for foundName, foundSweeper := range filterSweeperWithDependencies(dependency, source) { + result[foundName] = foundSweeper + } + } + + return result +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { + for _, dep := range s.Dependencies { + depSweeper, ok := sweepers[dep] + + if !ok { + log.Printf("[ERROR] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + return fmt.Errorf("sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + + if err != nil { + if allowFailures { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) + continue + } + + return err + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + log.Printf("[DEBUG] Running Sweeper (%s) in region (%s)", s.Name, region) + + start := time.Now() + runE := s.F(region) + elapsed := time.Since(start) + + log.Printf("[DEBUG] Completed Sweeper (%s) in region (%s) in %s", s.Name, region, elapsed) + + sweeperRunList[s.Name] = runE + + if runE != nil { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", s.Name, region, runE) + } + + return runE +} + +// Deprecated: Use EnvTfAcc instead. +const TestEnvVar = EnvTfAcc + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + +// ErrorCheckFunc is a function providers can use to handle errors. +type ErrorCheckFunc func(error) error + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // ProviderFactories can be specified for the providers that are valid. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v5 providers defined using the terraform-plugin-go + // ProviderServer interface. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + + // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v6 providers defined using the terraform-plugin-go + // ProviderServer interface. + // The version of Terraform used in acceptance testing must be greater + // than or equal to v0.15.4 to use ProtoV6ProviderFactories. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + + // Providers is the ResourceProvider that will be under test. + // + // Deprecated: Providers is deprecated, please use ProviderFactories + Providers map[string]*schema.Provider + + // ExternalProviders are providers the TestCase relies on that should + // be downloaded from the registry during init. + // + // This can also be specified at the TestStep level to enable per-step + // differences in providers, however all provider specifications must + // be done either at the TestCase level or TestStep level, otherwise the + // testing framework will raise an error and fail the test. + // + // This is generally unnecessary to set at the TestCase level, however + // it has existing in the testing framework prior to the introduction of + // TestStep level specification and was only necessary for performing + // import testing where the configuration contained a provider outside the + // one under test. + ExternalProviders map[string]ExternalProvider + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // ErrorCheck allows providers the option to handle errors such as skipping + // tests based on certain errors. + ErrorCheck ErrorCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // IDRefreshName is the name of the resource to check during ID-only + // refresh testing, which ensures that a resource can be refreshed solely + // by its identifier. This will default to the first non-nil primary + // resource in the state. It runs every TestStep. + // + // While not deprecated, most resource tests should instead prefer using + // TestStep.ImportState based testing as it works with multiple attribute + // identifiers and also verifies resource import functionality. + IDRefreshName string + + // IDRefreshIgnore is a list of configuration keys that will be ignored + // during ID-only refresh testing. + IDRefreshIgnore []string +} + +// ExternalProvider holds information about third-party providers that should +// be downloaded by Terraform as part of running the test step. +type ExternalProvider struct { + VersionConstraint string // the version constraint for the provider + Source string // the provider source +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +// +// Refer to the Env prefixed constants for environment variables that further +// control testing functionality. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + // + // JSON Configuration Syntax can be used and is assumed whenever Config + // contains valid JSON. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // SkipFunc enables skipping the TestStep, based on environment criteria. + // For example, this can prevent running certain steps that may be runtime + // platform or API configuration dependent. + // + // Return true with no error to skip the test step. The error return + // should be used to signify issues that prevented the function from + // completing as expected. + // + // SkipFunc is called after PreConfig but before applying the Config. + SkipFunc func() (bool, error) + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + // + // Prefer ImportStateVerify over ImportStateCheck, unless the resource + // import explicitly is expected to create multiple resources (not a + // recommended resource implementation) or if attributes are imported with + // syntactically different but semantically/functionally equivalent values + // where special logic is needed. + // + // Terraform versions 1.3 and later can include data source states during + // import, which the testing framework will skip to prevent the need for + // Terraform version specific logic in provider testing. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. Note that this checks for strict equality + // and does not respect DiffSuppressFunc or CustomizeDiff. + // + // ImportStateVerifyIgnore is a list of prefixes of fields that should + // not be verified to be equal. These can be set to ephemeral fields or + // fields that can't be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string + + // ImportStatePersist, if true, will update the persisted state with the + // state generated by the import operation (i.e., terraform import). When + // false (default) the state generated by the import operation is discarded + // at the end of the test step that is verifying import behavior. + ImportStatePersist bool + + //--------------------------------------------------------------- + // RefreshState testing + //--------------------------------------------------------------- + + // RefreshState, if true, will test the functionality of `terraform + // refresh` by refreshing the state, running any checks against the + // refreshed state, and running a plan to verify against unexpected plan + // differences. + // + // If the refresh is expected to result in a non-empty plan + // ExpectNonEmptyPlan should be set to true in the same TestStep. + // + // RefreshState cannot be the first TestStep and, it is mutually exclusive + // with ImportState. + RefreshState bool + + // ProviderFactories can be specified for the providers that are valid for + // this TestStep. When providers are specified at the TestStep level, all + // TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v5 providers defined using the terraform-plugin-go + // ProviderServer interface. When providers are specified at the TestStep + // level, all TestStep within a TestCase must declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + + // ProtoV6ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v6 providers defined using the terraform-plugin-go + // ProviderServer interface. + // The version of Terraform used in acceptance testing must be greater + // than or equal to v0.15.4 to use ProtoV6ProviderFactories. When providers + // are specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + + // ExternalProviders are providers the TestStep relies on that should + // be downloaded from the registry during init. When providers are + // specified at the TestStep level, all TestStep within a TestCase must + // declare providers. + // + // This can also be specified at the TestCase level for all TestStep, + // however all provider specifications must be done either at the TestCase + // level or TestStep level, otherwise the testing framework will raise an + // error and fail the test. + // + // Outside specifying an earlier version of the provider under test, + // typically for state upgrader testing, this is generally only necessary + // for performing import testing where the prior TestStep configuration + // contained a provider outside the one under test. + ExternalProviders map[string]ExternalProvider +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. The number of concurrent tests is controlled by the +// "go test" command -parallel flag. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// +// Test() function requirements and documentation also apply to this function. +func ParallelTest(t testing.T, c TestCase) { + t.Helper() + t.Parallel() + Test(t, c) +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +// +// Use the ParallelTest() function to automatically set (*testing.T).Parallel() +// to enable testing concurrency. Use the UnitTest() function to automatically +// set the TestCase type IsUnitTest field. +// +// This function will automatically find or install Terraform CLI into a +// temporary directory, based on the following behavior: +// +// - If the TF_ACC_TERRAFORM_PATH environment variable is set, that +// Terraform CLI binary is used if found and executable. If not found or +// executable, an error will be returned unless the +// TF_ACC_TERRAFORM_VERSION environment variable is also set. +// - If the TF_ACC_TERRAFORM_VERSION environment variable is set, install +// and use that Terraform CLI version. +// - If both the TF_ACC_TERRAFORM_PATH and TF_ACC_TERRAFORM_VERSION +// environment variables are unset, perform a lookup for the Terraform +// CLI binary based on the operating system PATH. If not found, the +// latest available Terraform CLI binary is installed. +// +// Refer to the Env prefixed constants for additional details about these +// environment variables, and others, that control testing functionality. +func Test(t testing.T, c TestCase) { + t.Helper() + + ctx := context.Background() + ctx = logging.InitTestContext(ctx, t) + + err := c.validate(ctx) + + if err != nil { + logging.HelperResourceError(ctx, + "Test validation error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Test validation error: %s", err) + } + + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(EnvTfAcc) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + EnvTfAcc)) + return + } + + // Copy any explicitly passed providers to factories, this is for backwards compatibility. + if len(c.Providers) > 0 { + c.ProviderFactories = map[string]func() (*schema.Provider, error){} + + for name, p := range c.Providers { + prov := p + c.ProviderFactories[name] = func() (*schema.Provider, error) { //nolint:unparam // required signature + return prov, nil + } + } + } + + logging.HelperResourceDebug(ctx, "Starting TestCase") + + // Run the PreCheck if we have it. + // This is done after the auto-configure to allow providers + // to override the default auto-configure parameters. + if c.PreCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase PreCheck") + + c.PreCheck() + + logging.HelperResourceDebug(ctx, "Called TestCase PreCheck") + } + + sourceDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting working dir: %s", err) + } + helper := plugintest.AutoInitProviderHelper(ctx, sourceDir) + defer func(helper *plugintest.Helper) { + err := helper.Close() + if err != nil { + logging.HelperResourceError(ctx, "Unable to clean up temporary test files", map[string]interface{}{logging.KeyError: err}) + } + }(helper) + + runNewTest(ctx, t, c, helper) + + logging.HelperResourceDebug(ctx, "Finished TestCase") +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +// +// Test() function requirements and documentation also apply to this function. +func UnitTest(t testing.T, c TestCase) { + t.Helper() + + c.IsUnitTest = true + Test(t, c) +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// ComposeTestCheckFunc returns immediately on the first TestCheckFunc error. +// To aggregrate all errors, use ComposeAggregateTestCheckFunc instead. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result []error + + for i, f := range fs { + if err := f(s); err != nil { + result = append(result, fmt.Errorf("Check %d/%d error: %w", i+1, len(fs), err)) + } + } + + return errors.Join(result...) + } +} + +// TestCheckResourceAttrSet ensures any value exists in the state for the +// given name and key combination. The opposite of this TestCheckFunc is +// TestCheckNoResourceAttr. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// Use this as a last resort when a more specific TestCheckFunc cannot be +// implemented, such as: +// +// - TestCheckResourceAttr: Equality checking of non-TypeSet state value. +// - TestCheckResourceAttrPair: Equality checking of non-TypeSet state +// value, based on another state value. +// - TestCheckTypeSet*: Equality checking of TypeSet state values. +// - TestMatchResourceAttr: Regular expression checking of non-TypeSet +// state value. +// - TestMatchTypeSet*: Regular expression checking on TypeSet state values. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect underlying +// values of a list or map attribute: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value +// +// While it is possible to check nested attributes under list and map +// attributes using the special key syntax, checking a list, map, or set +// attribute directly is not supported. Use TestCheckResourceAttr with +// the special .# or .% key syntax for those situations instead. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + }) +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + }) +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + val, ok := is.Attributes[key] + + if ok && val != "" { + return nil + } + + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) +} + +// TestCheckResourceAttr ensures a specific value is stored in state for the +// given name and key combination. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The value parameter is the stringified data to check at the given key. Use +// the following attribute type rules to set the value: +// +// - Boolean: "false" or "true". +// - Float/Integer: Stringified number, such as "1.2" or "123". +// - String: No conversion necessary. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + }) +} + +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + }) +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + v, ok := is.Attributes[key] + + if !ok { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + return nil + } + + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + if v != value { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + + return nil +} + +// CheckResourceAttrWithFunc is the callback type used to apply a custom checking logic +// when using TestCheckResourceAttrWith and a value is found for the given name and key. +// +// When this function returns an error, TestCheckResourceAttrWith will fail the check. +type CheckResourceAttrWithFunc func(value string) error + +// TestCheckResourceAttrWith ensures a value stored in state for the +// given name and key combination, is checked against a custom logic. +// State value checking is only recommended for testing Computed attributes +// and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The checkValueFunc parameter is a CheckResourceAttrWithFunc, +// and it's provided with the attribute value to apply a custom checking logic, +// if it was found in the state. The function must return an error for the +// check to fail, or `nil` to succeed. +func TestCheckResourceAttrWith(name, key string, checkValueFunc CheckResourceAttrWithFunc) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + err = testCheckResourceAttrSet(is, name, key) + if err != nil { + return err + } + + err = checkValueFunc(is.Attributes[key]) + if err != nil { + return fmt.Errorf("%s: Attribute %q value: %w", name, key, err) + } + + return nil + }) +} + +// TestCheckNoResourceAttr ensures no value exists in the state for the +// given name and key combination. The opposite of this TestCheckFunc is +// TestCheckResourceAttrSet. State value checking is only recommended for +// testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect underlying +// values of a list or map attribute: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// +// While it is possible to check nested attributes under list and map +// attributes using the special key syntax, checking a list, map, or set +// attribute directly is not supported. Use TestCheckResourceAttr with +// the special .# or .% key syntax for those situations instead. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + }) +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + }) +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + v, ok := is.Attributes[key] + + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + if v == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + return nil + } + + if ok { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + if _, ok := is.Attributes[key+".#"]; ok { + return fmt.Errorf( + "%s: list or set attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s). Set element value checks should use TestCheckTypeSet functions instead.", + name, + key, + key+".#", + key+".0", + ) + } + + if _, ok := is.Attributes[key+".%"]; ok { + return fmt.Errorf( + "%s: map attribute '%s' must be checked by element count key (%s) or element value keys (e.g. %s).", + name, + key, + key+".%", + key+".examplekey", + ) + } + + return nil +} + +// TestMatchResourceAttr ensures a value matching a regular expression is +// stored in state for the given name and key combination. State value checking +// is only recommended for testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +// +// The value parameter is a compiled regular expression. A typical pattern is +// using the regexp.MustCompile() function, which will automatically ensure the +// regular expression is supported by the Go regular expression handlers during +// compilation. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + }) +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + }) +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +// +// Refer to the TestCheckResourceAttr documentation for more information about +// setting the name, key, and value parameters. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair ensures value equality in state between the first +// given name and key combination and the second name and key combination. +// State value checking is only recommended for testing Computed attributes +// and attribute defaults. +// +// For managed resources, the name parameter is combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The first and second names may use any combination of managed resources +// and/or data sources. +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the following special key syntax to inspect list, map, and +// set attributes: +// +// - .{NUMBER}: List value at index, e.g. .0 to inspect the first element. +// Use the TestCheckTypeSet* and TestMatchTypeSet* functions instead +// for sets. +// - .{KEY}: Map value at key, e.g. .example to inspect the example key +// value. +// - .#: Number of elements in list or set. +// - .%: Number of elements in map. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + }) +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err + } + + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + }) +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + if nameFirst == nameSecond && keyFirst == keySecond { + return fmt.Errorf( + "comparing self: resource %s attribute %s", + nameFirst, + keyFirst, + ) + } + + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) + } + + return is, nil +} + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(ms, name) +} + +// indexesIntoTypeSet is a heuristic to try and identify if a flatmap style +// string address uses a precalculated TypeSet hash, which are integers and +// typically are large and obviously not a list index +func indexesIntoTypeSet(key string) bool { + for _, part := range strings.Split(key, ".") { + if i, err := strconv.Atoi(part); err == nil && i > 100 { + return true + } + } + return false +} + +func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && indexesIntoTypeSet(key) { + return fmt.Errorf("Error in test check: %s\nTest check address %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, key) + } + return err + } +} + +func checkIfIndexesIntoTypeSetPair(keyFirst, keySecond string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { + return fmt.Errorf("Error in test check: %s\nTest check address %q or %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, keyFirst, keySecond) + } + return err + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go new file mode 100644 index 0000000000..f56c885be3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" +) + +func testStepTaint(ctx context.Context, step TestStep, wd *plugintest.WorkingDir) error { + if len(step.Taint) == 0 { + return nil + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using TestStep Taint: %v", step.Taint)) + + for _, p := range step.Taint { + err := wd.Taint(ctx, p) + if err != nil { + return fmt.Errorf("error tainting resource: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go new file mode 100644 index 0000000000..14b247306a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -0,0 +1,443 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" + tfjson "github.com/hashicorp/terraform-json" + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func runPostTestDestroy(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, providers *providerFactories, statePreDestroy *terraform.State) error { + t.Helper() + + err := runProviderCommand(ctx, t, func() error { + return wd.Destroy(ctx) + }, wd, providers) + if err != nil { + return err + } + + if c.CheckDestroy != nil { + logging.HelperResourceTrace(ctx, "Using TestCase CheckDestroy") + logging.HelperResourceDebug(ctx, "Calling TestCase CheckDestroy") + + if err := c.CheckDestroy(statePreDestroy); err != nil { + return err + } + + logging.HelperResourceDebug(ctx, "Called TestCase CheckDestroy") + } + + return nil +} + +func runNewTest(ctx context.Context, t testing.T, c TestCase, helper *plugintest.Helper) { + t.Helper() + + wd := helper.RequireNewWorkingDir(ctx, t) + + ctx = logging.TestTerraformPathContext(ctx, wd.GetHelper().TerraformExecPath()) + ctx = logging.TestWorkingDirectoryContext(ctx, wd.GetHelper().WorkingDirectory()) + + providers := &providerFactories{ + legacy: c.ProviderFactories, + protov5: c.ProtoV5ProviderFactories, + protov6: c.ProtoV6ProviderFactories, + } + + defer func() { + var statePreDestroy *terraform.State + var err error + err = runProviderCommand(ctx, t, func() error { + statePreDestroy, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + logging.HelperResourceError(ctx, + "Error retrieving state, there may be dangling resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error retrieving state, there may be dangling resources: %s", err.Error()) + return + } + + if !stateIsEmpty(statePreDestroy) { + err := runPostTestDestroy(ctx, t, c, wd, providers, statePreDestroy) + if err != nil { + logging.HelperResourceError(ctx, + "Error running post-test destroy, there may be dangling resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error running post-test destroy, there may be dangling resources: %s", err.Error()) + } + } + + wd.Close() + }() + + if c.hasProviders(ctx) { + err := wd.SetConfig(ctx, c.providerConfig(ctx, false)) + + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error setting provider configuration: %s", err) + } + + err = runProviderCommand(ctx, t, func() error { + return wd.Init(ctx) + }, wd, providers) + + if err != nil { + logging.HelperResourceError(ctx, + "TestCase error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestCase error running init: %s", err.Error()) + } + } + + logging.HelperResourceDebug(ctx, "Starting TestSteps") + + // use this to track last step successfully applied + // acts as default for import tests + var appliedCfg string + + for stepIndex, step := range c.Steps { + stepNumber := stepIndex + 1 // 1-based indexing for humans + ctx = logging.TestStepNumberContext(ctx, stepNumber) + + logging.HelperResourceDebug(ctx, "Starting TestStep") + + if step.PreConfig != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep PreConfig") + step.PreConfig() + logging.HelperResourceDebug(ctx, "Called TestStep PreConfig") + } + + if step.SkipFunc != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep SkipFunc") + + skip, err := step.SkipFunc() + if err != nil { + logging.HelperResourceError(ctx, + "Error calling TestStep SkipFunc", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Error calling TestStep SkipFunc: %s", err.Error()) + } + + logging.HelperResourceDebug(ctx, "Called TestStep SkipFunc") + + if skip { + t.Logf("Skipping step %d/%d due to SkipFunc", stepNumber, len(c.Steps)) + logging.HelperResourceWarn(ctx, "Skipping TestStep due to SkipFunc") + continue + } + } + + if step.Config != "" && !step.Destroy && len(step.Taint) > 0 { + err := testStepTaint(ctx, step, wd) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error tainting resources", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error tainting resources: %s", stepNumber, len(c.Steps), err) + } + } + + if step.hasProviders(ctx) { + providers = &providerFactories{ + legacy: sdkProviderFactories(c.ProviderFactories).merge(step.ProviderFactories), + protov5: protov5ProviderFactories(c.ProtoV5ProviderFactories).merge(step.ProtoV5ProviderFactories), + protov6: protov6ProviderFactories(c.ProtoV6ProviderFactories).merge(step.ProtoV6ProviderFactories), + } + + providerCfg := step.providerConfig(ctx, step.configHasProviderBlock(ctx)) + + err := wd.SetConfig(ctx, providerCfg) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error setting provider configuration", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d error setting test provider configuration: %s", stepNumber, len(c.Steps), err) + } + + err = runProviderCommand( + ctx, + t, + func() error { + return wd.Init(ctx) + }, + wd, + providers, + ) + + if err != nil { + logging.HelperResourceError(ctx, + "TestStep error running init", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("TestStep %d/%d running init: %s", stepNumber, len(c.Steps), err.Error()) + return + } + } + + if step.ImportState { + logging.HelperResourceTrace(ctx, "TestStep is ImportState mode") + + err := testStepNewImportState(ctx, t, helper, wd, step, appliedCfg, providers) + if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") + if err == nil { + logging.HelperResourceError(ctx, + "Error running import: expected an error but got none", + ) + t.Fatalf("Step %d/%d error running import: expected an error but got none", stepNumber, len(c.Steps)) + } + if !step.ExpectError.MatchString(err.Error()) { + logging.HelperResourceError(ctx, + fmt.Sprintf("Error running import: expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", stepNumber, len(c.Steps), step.ExpectError.String(), err) + } + } else { + if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") + err = c.ErrorCheck(err) + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") + } + if err != nil { + logging.HelperResourceError(ctx, + "Error running import", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running import: %s", stepNumber, len(c.Steps), err) + } + } + + logging.HelperResourceDebug(ctx, "Finished TestStep") + + continue + } + + if step.RefreshState { + logging.HelperResourceTrace(ctx, "TestStep is RefreshState mode") + + err := testStepNewRefreshState(ctx, t, wd, step, providers) + if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") + if err == nil { + logging.HelperResourceError(ctx, + "Error running refresh: expected an error but got none", + ) + t.Fatalf("Step %d/%d error running refresh: expected an error but got none", stepNumber, len(c.Steps)) + } + if !step.ExpectError.MatchString(err.Error()) { + logging.HelperResourceError(ctx, + fmt.Sprintf("Error running refresh: expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running refresh, expected an error with pattern (%s), no match on: %s", stepNumber, len(c.Steps), step.ExpectError.String(), err) + } + } else { + if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") + err = c.ErrorCheck(err) + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") + } + if err != nil { + logging.HelperResourceError(ctx, + "Error running refresh", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error running refresh: %s", stepNumber, len(c.Steps), err) + } + } + + logging.HelperResourceDebug(ctx, "Finished TestStep") + + continue + } + + if step.Config != "" { + logging.HelperResourceTrace(ctx, "TestStep is Config mode") + + err := testStepNewConfig(ctx, t, c, wd, step, providers) + if step.ExpectError != nil { + logging.HelperResourceDebug(ctx, "Checking TestStep ExpectError") + + if err == nil { + logging.HelperResourceError(ctx, + "Expected an error but got none", + ) + t.Fatalf("Step %d/%d, expected an error but got none", stepNumber, len(c.Steps)) + } + if !step.ExpectError.MatchString(err.Error()) { + logging.HelperResourceError(ctx, + fmt.Sprintf("Expected an error with pattern (%s)", step.ExpectError.String()), + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", stepNumber, len(c.Steps), err) + } + } else { + if err != nil && c.ErrorCheck != nil { + logging.HelperResourceDebug(ctx, "Calling TestCase ErrorCheck") + + err = c.ErrorCheck(err) + + logging.HelperResourceDebug(ctx, "Called TestCase ErrorCheck") + } + if err != nil { + logging.HelperResourceError(ctx, + "Unexpected error", + map[string]interface{}{logging.KeyError: err}, + ) + t.Fatalf("Step %d/%d error: %s", stepNumber, len(c.Steps), err) + } + } + + appliedCfg = step.mergedConfig(ctx, c) + + logging.HelperResourceDebug(ctx, "Finished TestStep") + + continue + } + + t.Fatalf("Step %d/%d, unsupported test mode", stepNumber, len(c.Steps)) + } +} + +func getState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { + t.Helper() + + jsonState, err := wd.State(ctx) + if err != nil { + return nil, err + } + state, err := shimStateFromJson(jsonState) + if err != nil { + t.Fatal(err) + } + return state, nil +} + +func stateIsEmpty(state *terraform.State) bool { + return state.Empty() || !state.HasResources() +} + +func planIsEmpty(plan *tfjson.Plan) bool { + for _, rc := range plan.ResourceChanges { + for _, a := range rc.Change.Actions { + if a != tfjson.ActionNoop { + return false + } + } + } + return true +} + +func testIDRefresh(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState, providers *providerFactories) error { + t.Helper() + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := terraform.NewState() + state.RootModule().Resources = make(map[string]*terraform.ResourceState) + state.RootModule().Resources[c.IDRefreshName] = &terraform.ResourceState{} + + // Temporarily set the config to a minimal provider config for the refresh + // test. After the refresh we can reset it. + err := wd.SetConfig(ctx, c.providerConfig(ctx, step.configHasProviderBlock(ctx))) + if err != nil { + t.Fatalf("Error setting import test config: %s", err) + } + defer func() { + err = wd.SetConfig(ctx, step.Config) + if err != nil { + t.Fatalf("Error resetting test config: %s", err) + } + }() + + // Refresh! + err = runProviderCommand(ctx, t, func() error { + err = wd.Refresh(ctx) + if err != nil { + t.Fatalf("Error running terraform refresh: %s", err) + } + state, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + return err + } + + // Verify attribute equivalence. + actualR := state.RootModule().Resources[c.IDRefreshName] + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Primary == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Primary.Attributes + expected := r.Primary.Attributes + + if len(c.IDRefreshIgnore) > 0 { + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using TestCase IDRefreshIgnore: %v", c.IDRefreshIgnore)) + } + + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + if diff := cmp.Diff(expected, actual); diff != "" { + return fmt.Errorf("IDRefreshName attributes not equivalent. Difference is shown below. The - symbol indicates attributes missing after refresh.\n\n%s", diff) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go new file mode 100644 index 0000000000..a52008768f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -0,0 +1,244 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "errors" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewConfig(ctx context.Context, t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { + t.Helper() + + err := wd.SetConfig(ctx, step.mergedConfig(ctx, c)) + if err != nil { + return fmt.Errorf("Error setting config: %w", err) + } + + // require a refresh before applying + // failing to do this will result in data sources not being updated + err = runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) + if err != nil { + return fmt.Errorf("Error running pre-apply refresh: %w", err) + } + + // If this step is a PlanOnly step, skip over this first Plan and + // subsequent Apply, and use the follow-up Plan that checks for + // permadiffs + if !step.PlanOnly { + logging.HelperResourceDebug(ctx, "Running Terraform CLI plan and apply") + + // Plan! + err := runProviderCommand(ctx, t, func() error { + if step.Destroy { + return wd.CreateDestroyPlan(ctx) + } + return wd.CreatePlan(ctx) + }, wd, providers) + if err != nil { + return fmt.Errorf("Error running pre-apply plan: %w", err) + } + + // We need to keep a copy of the state prior to destroying such + // that the destroy steps can verify their behavior in the + // check function + var stateBeforeApplication *terraform.State + err = runProviderCommand(ctx, t, func() error { + stateBeforeApplication, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving pre-apply state: %w", err) + } + + // Apply the diff, creating real resources + err = runProviderCommand(ctx, t, func() error { + return wd.Apply(ctx) + }, wd, providers) + if err != nil { + if step.Destroy { + return fmt.Errorf("Error running destroy: %w", err) + } + return fmt.Errorf("Error running apply: %w", err) + } + + // Get the new state + var state *terraform.State + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving state after apply: %w", err) + } + + // Run any configured checks + if step.Check != nil { + logging.HelperResourceTrace(ctx, "Using TestStep Check") + + state.IsBinaryDrivenTest = true + if step.Destroy { + if err := step.Check(stateBeforeApplication); err != nil { + return fmt.Errorf("Check failed: %w", err) + } + } else { + if err := step.Check(state); err != nil { + return fmt.Errorf("Check failed: %w", err) + } + } + } + } + + // Test for perpetual diffs by performing a plan, a refresh, and another plan + logging.HelperResourceDebug(ctx, "Running Terraform CLI plan to check for perpetual differences") + + // do a plan + err = runProviderCommand(ctx, t, func() error { + if step.Destroy { + return wd.CreateDestroyPlan(ctx) + } + return wd.CreatePlan(ctx) + }, wd, providers) + if err != nil { + return fmt.Errorf("Error running post-apply plan: %w", err) + } + + var plan *tfjson.Plan + err = runProviderCommand(ctx, t, func() error { + var err error + plan, err = wd.SavedPlan(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving post-apply plan: %w", err) + } + + if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { + var stdout string + err = runProviderCommand(ctx, t, func() error { + var err error + stdout, err = wd.SavedPlanRawStdout(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving formatted plan output: %w", err) + } + return fmt.Errorf("After applying this test step, the plan was not empty.\nstdout:\n\n%s", stdout) + } + + // do a refresh + if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { + err := runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) + if err != nil { + return fmt.Errorf("Error running post-apply refresh: %w", err) + } + } + + // do another plan + err = runProviderCommand(ctx, t, func() error { + if step.Destroy { + return wd.CreateDestroyPlan(ctx) + } + return wd.CreatePlan(ctx) + }, wd, providers) + if err != nil { + return fmt.Errorf("Error running second post-apply plan: %w", err) + } + + err = runProviderCommand(ctx, t, func() error { + var err error + plan, err = wd.SavedPlan(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving second post-apply plan: %w", err) + } + + // check if plan is empty + if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { + var stdout string + err = runProviderCommand(ctx, t, func() error { + var err error + stdout, err = wd.SavedPlanRawStdout(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving formatted second plan output: %w", err) + } + return fmt.Errorf("After applying this test step and performing a `terraform refresh`, the plan was not empty.\nstdout\n\n%s", stdout) + } else if step.ExpectNonEmptyPlan && planIsEmpty(plan) { + return errors.New("Expected a non-empty plan, but got an empty plan") + } + + // ID-ONLY REFRESH + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + if c.IDRefreshName != "" { + logging.HelperResourceTrace(ctx, "Using TestCase IDRefreshName") + + var state *terraform.State + + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + + if err != nil { + return err + } + + if state.Empty() { + return nil + } + + var idRefreshCheck *terraform.ResourceState + + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + if err := testIDRefresh(ctx, t, c, wd, step, idRefreshCheck, providers); err != nil { + return fmt.Errorf( + "[ERROR] Test: ID-only test failed: %s", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go new file mode 100644 index 0000000000..4ddf56c5b2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewImportState(ctx context.Context, t testing.T, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string, providers *providerFactories) error { + t.Helper() + + if step.ResourceName == "" { + t.Fatal("ResourceName is required for an import state test") + } + + // get state from check sequence + var state *terraform.State + var err error + err = runProviderCommand(ctx, t, func() error { + state, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Determine the ID to import + var importId string + switch { + case step.ImportStateIdFunc != nil: + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateIdFunc for import identifier") + + var err error + + logging.HelperResourceDebug(ctx, "Calling TestStep ImportStateIdFunc") + + importId, err = step.ImportStateIdFunc(state) + + if err != nil { + t.Fatal(err) + } + + logging.HelperResourceDebug(ctx, "Called TestStep ImportStateIdFunc") + case step.ImportStateId != "": + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateId for import identifier") + + importId = step.ImportStateId + default: + logging.HelperResourceTrace(ctx, "Using resource identifier for import identifier") + + resource, err := testResource(step, state) + if err != nil { + t.Fatal(err) + } + importId = resource.Primary.ID + } + + if step.ImportStateIdPrefix != "" { + logging.HelperResourceTrace(ctx, "Prepending TestStep ImportStateIdPrefix for import identifier") + + importId = step.ImportStateIdPrefix + importId + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Using import identifier: %s", importId)) + + // Create working directory for import tests + if step.Config == "" { + logging.HelperResourceTrace(ctx, "Using prior TestStep Config for import") + + step.Config = cfg + if step.Config == "" { + t.Fatal("Cannot import state with no specified config") + } + } + + var importWd *plugintest.WorkingDir + + // Use the same working directory to persist the state from import + if step.ImportStatePersist { + importWd = wd + } else { + importWd = helper.RequireNewWorkingDir(ctx, t) + defer importWd.Close() + } + + err = importWd.SetConfig(ctx, step.Config) + if err != nil { + t.Fatalf("Error setting test config: %s", err) + } + + logging.HelperResourceDebug(ctx, "Running Terraform CLI init and import") + + if !step.ImportStatePersist { + err = runProviderCommand(ctx, t, func() error { + return importWd.Init(ctx) + }, importWd, providers) + if err != nil { + t.Fatalf("Error running init: %s", err) + } + } + + err = runProviderCommand(ctx, t, func() error { + return importWd.Import(ctx, step.ResourceName, importId) + }, importWd, providers) + if err != nil { + return err + } + + var importState *terraform.State + err = runProviderCommand(ctx, t, func() error { + importState, err = getState(ctx, t, importWd) + if err != nil { + return err + } + return nil + }, importWd, providers) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Go through the imported state and verify + if step.ImportStateCheck != nil { + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateCheck") + + var states []*terraform.InstanceState + for address, r := range importState.RootModule().Resources { + if strings.HasPrefix(address, "data.") { + continue + } + + if r.Primary == nil { + continue + } + + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) + } + + logging.HelperResourceDebug(ctx, "Calling TestStep ImportStateCheck") + + if err := step.ImportStateCheck(states); err != nil { + t.Fatal(err) + } + + logging.HelperResourceDebug(ctx, "Called TestStep ImportStateCheck") + } + + // Verify that all the states match + if step.ImportStateVerify { + logging.HelperResourceTrace(ctx, "Using TestStep ImportStateVerify") + + // Ensure that we do not match against data sources as they + // cannot be imported and are not what we want to verify. + // Mode is not present in ResourceState so we use the + // stringified ResourceStateKey for comparison. + newResources := make(map[string]*terraform.ResourceState) + for k, v := range importState.RootModule().Resources { + if !strings.HasPrefix(k, "data.") { + newResources[k] = v + } + } + oldResources := make(map[string]*terraform.ResourceState) + for k, v := range state.RootModule().Resources { + if !strings.HasPrefix(k, "data.") { + oldResources[k] = v + } + } + + for _, r := range newResources { + // Find the existing resource + var oldR *terraform.ResourceState + for _, r2 := range oldResources { + + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type && r2.Provider == r.Provider { + oldR = r2 + break + } + } + if oldR == nil || oldR.Primary == nil { + t.Fatalf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + actual[k] = v + } + + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + // timeouts are only _sometimes_ added to state. To + // account for this, just don't compare timeouts at + // all. + for k := range actual { + if strings.HasPrefix(k, "timeouts.") { + delete(actual, k) + } + if k == "timeouts" { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, "timeouts.") { + delete(expected, k) + } + if k == "timeouts" { + delete(expected, k) + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + // go-cmp tries to show surrounding identical map key/value for + // context of differences, which may be confusing. + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + if diff := cmp.Diff(expected, actual); diff != "" { + return fmt.Errorf("ImportStateVerify attributes not equivalent. Difference is shown below. The - symbol indicates attributes missing after import.\n\n%s", diff) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_refresh_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_refresh_state.go new file mode 100644 index 0000000000..627190a9d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_refresh_state.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" + "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewRefreshState(ctx context.Context, t testing.T, wd *plugintest.WorkingDir, step TestStep, providers *providerFactories) error { + t.Helper() + + var err error + // Explicitly ensure prior state exists before refresh. + err = runProviderCommand(ctx, t, func() error { + _, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + err = runProviderCommand(ctx, t, func() error { + return wd.Refresh(ctx) + }, wd, providers) + if err != nil { + return err + } + + var refreshState *terraform.State + err = runProviderCommand(ctx, t, func() error { + refreshState, err = getState(ctx, t, wd) + if err != nil { + return err + } + return nil + }, wd, providers) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Go through the refreshed state and verify + if step.Check != nil { + logging.HelperResourceDebug(ctx, "Calling TestStep Check for RefreshState") + + if err := step.Check(refreshState); err != nil { + t.Fatal(err) + } + + logging.HelperResourceDebug(ctx, "Called TestStep Check for RefreshState") + } + + // do a plan + err = runProviderCommand(ctx, t, func() error { + return wd.CreatePlan(ctx) + }, wd, providers) + if err != nil { + return fmt.Errorf("Error running post-apply plan: %w", err) + } + + var plan *tfjson.Plan + err = runProviderCommand(ctx, t, func() error { + var err error + plan, err = wd.SavedPlan(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving post-apply plan: %w", err) + } + + if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { + var stdout string + err = runProviderCommand(ctx, t, func() error { + var err error + stdout, err = wd.SavedPlanRawStdout(ctx) + return err + }, wd, providers) + if err != nil { + return fmt.Errorf("Error retrieving formatted plan output: %w", err) + } + return fmt.Errorf("After refreshing state during this test step, a followup plan was not empty.\nstdout:\n\n%s", stdout) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go new file mode 100644 index 0000000000..8f5a731c32 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go @@ -0,0 +1,361 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// These test helpers were developed by the AWS provider team at HashiCorp. + +package resource + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const ( + sentinelIndex = "*" +) + +// TestCheckTypeSetElemNestedAttrs ensures a subset map of values is stored in +// state for the given name and key combination of attributes nested under a +// list or set block. Use this TestCheckFunc in preference over non-set +// variants to simplify testing code and ensure compatibility with indicies, +// which can easily change with schema changes. State value checking is only +// recommended for testing Computed attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The values parameter is the map of attribute names to attribute values +// expected to be nested under the list or set. +// +// You may check for unset nested attributes, however this will also match keys +// set to an empty string. Use a map with at least 1 non-empty value. +// +// map[string]string{ +// "key1": "value", +// "key2": "", +// } +// +// If the values map is not granular enough, it is possible to match an element +// you were not intending to in the set. Provide the most complete mapping of +// attributes possible to be sure the unique element exists. +func TestCheckTypeSetElemNestedAttrs(name, attr string, values map[string]string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + attrParts := strings.Split(attr, ".") + if attrParts[len(attrParts)-1] != sentinelIndex { + return fmt.Errorf("%q does not end with the special value %q", attr, sentinelIndex) + } + // account for cases where the user is trying to see if the value is unset/empty + // there may be ambiguous scenarios where a field was deliberately unset vs set + // to the empty string, this will match both, which may be a false positive. + var matchCount int + for _, v := range values { + if v != "" { + matchCount++ + } + } + if matchCount == 0 { + return fmt.Errorf("%#v has no non-empty values", values) + } + + if testCheckTypeSetElemNestedAttrsInState(is, attrParts, matchCount, values) { + return nil + } + return fmt.Errorf("%q no TypeSet element %q, with nested attrs %#v in state: %#v", name, attr, values, is.Attributes) + } +} + +// TestMatchTypeSetElemNestedAttrs ensures a subset map of values, compared by +// regular expressions, is stored in state for the given name and key +// combination of attributes nested under a list or set block. Use this +// TestCheckFunc in preference over non-set variants to simplify testing code +// and ensure compatibility with indicies, which can easily change with schema +// changes. State value checking is only recommended for testing Computed +// attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The values parameter is the map of attribute names to regular expressions +// for matching attribute values expected to be nested under the list or set. +// +// You may check for unset nested attributes, however this will also match keys +// set to an empty string. Use a map with at least 1 non-empty value. +// +// map[string]*regexp.Regexp{ +// "key1": regexp.MustCompile(`^value`), +// "key2": regexp.MustCompile(`^$`), +// } +// +// If the values map is not granular enough, it is possible to match an element +// you were not intending to in the set. Provide the most complete mapping of +// attributes possible to be sure the unique element exists. +func TestMatchTypeSetElemNestedAttrs(name, attr string, values map[string]*regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + attrParts := strings.Split(attr, ".") + if attrParts[len(attrParts)-1] != sentinelIndex { + return fmt.Errorf("%q does not end with the special value %q", attr, sentinelIndex) + } + // account for cases where the user is trying to see if the value is unset/empty + // there may be ambiguous scenarios where a field was deliberately unset vs set + // to the empty string, this will match both, which may be a false positive. + var matchCount int + for _, v := range values { + if v != nil { + matchCount++ + } + } + if matchCount == 0 { + return fmt.Errorf("%#v has no non-empty values", values) + } + + if testCheckTypeSetElemNestedAttrsInState(is, attrParts, matchCount, values) { + return nil + } + return fmt.Errorf("%q no TypeSet element %q, with the regex provided, match in state: %#v", name, attr, is.Attributes) + } +} + +// TestCheckTypeSetElemAttr is a TestCheckFunc that accepts a resource +// name, an attribute path, which should use the sentinel value '*' for indexing +// into a TypeSet. The function verifies that an element matches the provided +// value. +// +// Use this function over SDK provided TestCheckFunctions when validating a +// TypeSet where its elements are a simple value + +// TestCheckTypeSetElemAttr ensures a specific value is stored in state for the +// given name and key combination under a list or set. Use this TestCheckFunc +// in preference over non-set variants to simplify testing code and ensure +// compatibility with indicies, which can easily change with schema changes. +// State value checking is only recommended for testing Computed attributes and +// attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +// +// The value parameter is the stringified data to check at the given key. Use +// the following attribute type rules to set the value: +// +// - Boolean: "false" or "true". +// - Float/Integer: Stringified number, such as "1.2" or "123". +// - String: No conversion necessary. +func TestCheckTypeSetElemAttr(name, attr, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + err = testCheckTypeSetElem(is, attr, value) + if err != nil { + return fmt.Errorf("%q error: %s", name, err) + } + + return nil + } +} + +// TestCheckTypeSetElemAttrPair ensures value equality in state between the +// first given name and key combination and the second name and key +// combination. State value checking is only recommended for testing Computed +// attributes and attribute defaults. +// +// For managed resources, the name parameter is a combination of the resource +// type, a period (.), and the name label. The name for the below example +// configuration would be "myprovider_thing.example". +// +// resource "myprovider_thing" "example" { ... } +// +// For data sources, the name parameter is a combination of the keyword "data", +// a period (.), the data source type, a period (.), and the name label. The +// name for the below example configuration would be +// "data.myprovider_thing.example". +// +// data "myprovider_thing" "example" { ... } +// +// The first and second names may use any combination of managed resources +// and/or data sources. +// +// The key parameter is an attribute path in Terraform CLI 0.11 and earlier +// "flatmap" syntax. Keys start with the attribute name of a top-level +// attribute. Use the sentinel value '*' to replace the element indexing into +// a list or set. The sentinel value can be used for each list or set index, if +// there are multiple lists or sets in the attribute path. +func TestCheckTypeSetElemAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + vSecond, okSecond := isSecond.Attributes[keySecond] + if !okSecond { + return fmt.Errorf("%s: Attribute %q not set, cannot be checked against TypeSet", nameSecond, keySecond) + } + + return testCheckTypeSetElemPair(isFirst, keyFirst, vSecond) + } +} + +func testCheckTypeSetElem(is *terraform.InstanceState, attr, value string) error { + attrParts := strings.Split(attr, ".") + if attrParts[len(attrParts)-1] != sentinelIndex { + return fmt.Errorf("%q does not end with the special value %q", attr, sentinelIndex) + } + for stateKey, stateValue := range is.Attributes { + if stateValue == value { + stateKeyParts := strings.Split(stateKey, ".") + if len(stateKeyParts) == len(attrParts) { + for i := range attrParts { + if attrParts[i] != stateKeyParts[i] && attrParts[i] != sentinelIndex { + break + } + if i == len(attrParts)-1 { + return nil + } + } + } + } + } + + return fmt.Errorf("no TypeSet element %q, with value %q in state: %#v", attr, value, is.Attributes) +} + +func testCheckTypeSetElemPair(is *terraform.InstanceState, attr, value string) error { + attrParts := strings.Split(attr, ".") + for stateKey, stateValue := range is.Attributes { + if stateValue == value { + stateKeyParts := strings.Split(stateKey, ".") + if len(stateKeyParts) == len(attrParts) { + for i := range attrParts { + if attrParts[i] != stateKeyParts[i] && attrParts[i] != sentinelIndex { + break + } + if i == len(attrParts)-1 { + return nil + } + } + } + } + } + + return fmt.Errorf("no TypeSet element %q, with value %q in state: %#v", attr, value, is.Attributes) +} + +// testCheckTypeSetElemNestedAttrsInState is a helper function +// to determine if nested attributes and their values are equal to those +// in the instance state. Currently, the function accepts a "values" param of type +// map[string]string or map[string]*regexp.Regexp. +// Returns true if all attributes match, else false. +func testCheckTypeSetElemNestedAttrsInState(is *terraform.InstanceState, attrParts []string, matchCount int, values interface{}) bool { + matches := make(map[string]int) + + for stateKey, stateValue := range is.Attributes { + stateKeyParts := strings.Split(stateKey, ".") + // a Set/List item with nested attrs would have a flatmap address of + // at least length 3 + // foo.0.name = "bar" + if len(stateKeyParts) < 3 || len(attrParts) > len(stateKeyParts) { + continue + } + var pathMatch bool + for i := range attrParts { + if attrParts[i] != stateKeyParts[i] && attrParts[i] != sentinelIndex { + break + } + if i == len(attrParts)-1 { + pathMatch = true + } + } + if !pathMatch { + continue + } + id := stateKeyParts[len(attrParts)-1] + nestedAttr := strings.Join(stateKeyParts[len(attrParts):], ".") + + var match bool + switch t := values.(type) { + case map[string]string: + if v, keyExists := t[nestedAttr]; keyExists && v == stateValue { + match = true + } + case map[string]*regexp.Regexp: + if v, keyExists := t[nestedAttr]; keyExists && v != nil && v.MatchString(stateValue) { + match = true + } + } + if match { + matches[id] = matches[id] + 1 + if matches[id] == matchCount { + return true + } + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go new file mode 100644 index 0000000000..9b759bde03 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_providers.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +var configProviderBlockRegex = regexp.MustCompile(`provider "?[a-zA-Z0-9_-]+"? {`) + +// configHasProviderBlock returns true if the Config has declared a provider +// configuration block, e.g. provider "examplecloud" {...} +func (s TestStep) configHasProviderBlock(_ context.Context) bool { + return configProviderBlockRegex.MatchString(s.Config) +} + +// configHasTerraformBlock returns true if the Config has declared a terraform +// configuration block, e.g. terraform {...} +func (s TestStep) configHasTerraformBlock(_ context.Context) bool { + return strings.Contains(s.Config, "terraform {") +} + +// mergedConfig prepends any necessary terraform configuration blocks to the +// TestStep Config. +// +// If there are ExternalProviders configurations in either the TestCase or +// TestStep, the terraform configuration block should be included with the +// step configuration to prevent errors with providers outside the +// registry.terraform.io hostname or outside the hashicorp namespace. +func (s TestStep) mergedConfig(ctx context.Context, testCase TestCase) string { + var config strings.Builder + + // Prevent issues with existing configurations containing the terraform + // configuration block. + if s.configHasTerraformBlock(ctx) { + config.WriteString(s.Config) + + return config.String() + } + + if testCase.hasProviders(ctx) { + config.WriteString(testCase.providerConfig(ctx, s.configHasProviderBlock(ctx))) + } else { + config.WriteString(s.providerConfig(ctx, s.configHasProviderBlock(ctx))) + } + + config.WriteString(s.Config) + + return config.String() +} + +// providerConfig takes the list of providers in a TestStep and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func (s TestStep) providerConfig(_ context.Context, skipProviderBlock bool) string { + var providerBlocks, requiredProviderBlocks strings.Builder + + for name, externalProvider := range s.ExternalProviders { + if !skipProviderBlock { + providerBlocks.WriteString(fmt.Sprintf("provider %q {}\n", name)) + } + + if externalProvider.Source == "" && externalProvider.VersionConstraint == "" { + continue + } + + requiredProviderBlocks.WriteString(fmt.Sprintf(" %s = {\n", name)) + + if externalProvider.Source != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" source = %q\n", externalProvider.Source)) + } + + if externalProvider.VersionConstraint != "" { + requiredProviderBlocks.WriteString(fmt.Sprintf(" version = %q\n", externalProvider.VersionConstraint)) + } + + requiredProviderBlocks.WriteString(" }\n") + } + + if requiredProviderBlocks.Len() > 0 { + return fmt.Sprintf(` +terraform { + required_providers { +%[1]s + } +} + +%[2]s +`, strings.TrimSuffix(requiredProviderBlocks.String(), "\n"), providerBlocks.String()) + } + + return providerBlocks.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go new file mode 100644 index 0000000000..7dbf883b50 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/teststep_validate.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package resource + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// testStepValidateRequest contains data for the (TestStep).validate() method. +type testStepValidateRequest struct { + // StepNumber is the index of the TestStep in the TestCase.Steps. + StepNumber int + + // TestCaseHasProviders is enabled if the TestCase has set any of + // ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, + // or ProviderFactories. + TestCaseHasProviders bool +} + +// hasProviders returns true if the TestStep has set any of the +// ExternalProviders, ProtoV5ProviderFactories, ProtoV6ProviderFactories, or +// ProviderFactories fields. +func (s TestStep) hasProviders(_ context.Context) bool { + if len(s.ExternalProviders) > 0 { + return true + } + + if len(s.ProtoV5ProviderFactories) > 0 { + return true + } + + if len(s.ProtoV6ProviderFactories) > 0 { + return true + } + + if len(s.ProviderFactories) > 0 { + return true + } + + return false +} + +// validate ensures the TestStep is valid based on the following criteria: +// +// - Config or ImportState or RefreshState is set. +// - Config and RefreshState are not both set. +// - RefreshState and Destroy are not both set. +// - RefreshState is not the first TestStep. +// - Providers are not specified (ExternalProviders, +// ProtoV5ProviderFactories, ProtoV6ProviderFactories, ProviderFactories) +// if specified at the TestCase level. +// - Providers are specified (ExternalProviders, ProtoV5ProviderFactories, +// ProtoV6ProviderFactories, ProviderFactories) if not specified at the +// TestCase level. +// - No overlapping ExternalProviders and ProviderFactories entries +// - ResourceName is not empty when ImportState is true, ImportStateIdFunc +// is not set, and ImportStateId is not set. +func (s TestStep) validate(ctx context.Context, req testStepValidateRequest) error { + ctx = logging.TestStepNumberContext(ctx, req.StepNumber) + + logging.HelperResourceTrace(ctx, "Validating TestStep") + + if s.Config == "" && !s.ImportState && !s.RefreshState { + err := fmt.Errorf("TestStep missing Config or ImportState or RefreshState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.Config != "" && s.RefreshState { + err := fmt.Errorf("TestStep cannot have Config and RefreshState") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.RefreshState && s.Destroy { + err := fmt.Errorf("TestStep cannot have RefreshState and Destroy") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.RefreshState && req.StepNumber == 1 { + err := fmt.Errorf("TestStep cannot have RefreshState as first step") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState && s.RefreshState { + err := fmt.Errorf("TestStep cannot have ImportState and RefreshState in same step") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + for name := range s.ExternalProviders { + if _, ok := s.ProviderFactories[name]; ok { + err := fmt.Errorf("TestStep provider %q set in both ExternalProviders and ProviderFactories", name) + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + hasProviders := s.hasProviders(ctx) + + if req.TestCaseHasProviders && hasProviders { + err := fmt.Errorf("Providers must only be specified either at the TestCase or TestStep level") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if !req.TestCaseHasProviders && !hasProviders { + err := fmt.Errorf("Providers must be specified at the TestCase level or in all TestStep") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + + if s.ImportState { + if s.ImportStateId == "" && s.ImportStateIdFunc == nil && s.ResourceName == "" { + err := fmt.Errorf("TestStep ImportState must be specified with ImportStateId, ImportStateIdFunc, or ResourceName") + logging.HelperResourceError(ctx, "TestStep validation error", map[string]interface{}{logging.KeyError: err}) + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go new file mode 100644 index 0000000000..789c712f51 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retry + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +func (e *NotFoundError) Unwrap() error { + return e.LastError +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +func (e *UnexpectedStateError) Unwrap() error { + return e.LastError +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} + +func (e *TimeoutError) Unwrap() error { + return e.LastError +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go new file mode 100644 index 0000000000..4780090d95 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retry + +import ( + "context" + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. A nil result represents not found. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found (nil result from Refresh) + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForStateContext watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +// +// Cancellation from the passed in context will cancel the refresh loop +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context) (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + select { + case <-time.After(conf.Delay): + case <-cancelCh: + return + } + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + case <-ctx.Done(): + close(cancelCh) + return nil, ctx.Err() + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-ctx.Done(): + log.Println("[ERROR] Context cancelation detected, abandoning grace period") + break forSelect + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// Deprecated: Please use WaitForStateContext to ensure proper plugin shutdown +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + return conf.WaitForStateContext(context.Background()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go new file mode 100644 index 0000000000..c8d2de1439 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retry + +import ( + "context" + "errors" + "sync" + "time" +) + +// RetryContext is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Cancellation from the passed in context will propagate through to the +// underlying StateChangeConf +func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForStateContext(ctx) + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Deprecated: Please use RetryContext to ensure proper plugin shutdown +func Retry(timeout time.Duration, f RetryFunc) error { + return RetryContext(context.Background(), timeout, f) +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +func (e *RetryError) Unwrap() error { + return e.Err +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. To prevent logic errors, will return an error when passed a +// nil error. +func RetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/context.go new file mode 100644 index 0000000000..bced9ead54 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/context.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +type Key string + +var ( + StopContextKey = Key("StopContext") +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go similarity index 93% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go index d16abef88f..736af218da 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go @@ -1,16 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// StringKind represents the format a string is in. +type StringKind configschema.StringKind + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain = StringKind(configschema.StringPlain) + + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown = StringKind(configschema.StringMarkdown) ) var ( // DescriptionKind is the default StringKind of descriptions in this provider. // It defaults to StringPlain but can be globally switched to StringMarkdown. - DescriptionKind = configschema.StringPlain + DescriptionKind = StringPlain // SchemaDescriptionBuilder converts helper/schema.Schema Descriptions to configschema.Attribute // and Block Descriptions. This method can be used to modify the description text prior to it @@ -136,7 +152,7 @@ func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { } desc := SchemaDescriptionBuilder(s) - descKind := DescriptionKind + descKind := configschema.StringKind(DescriptionKind) if desc == "" { // fallback to plain text if empty descKind = configschema.StringPlain @@ -163,7 +179,7 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { ret.Block = *nested desc := SchemaDescriptionBuilder(s) - descKind := DescriptionKind + descKind := configschema.StringKind(DescriptionKind) if desc == "" { // fallback to plain text if empty descKind = configschema.StringPlain @@ -272,7 +288,7 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { block := r.coreConfigSchema() desc := ResourceDescriptionBuilder(r) - descKind := DescriptionKind + descKind := configschema.StringKind(DescriptionKind) if desc == "" { // fallback to plain text if empty descKind = configschema.StringPlain @@ -351,11 +367,5 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { } func (r *Resource) coreConfigSchema() *configschema.Block { - return schemaMap(r.Schema).CoreConfigSchema() -} - -// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema -// on the backends's schema. -func (r *Backend) CoreConfigSchema() *configschema.Block { - return schemaMap(r.Schema).CoreConfigSchema() + return schemaMap(r.SchemaMap()).CoreConfigSchema() } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go similarity index 96% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go index 8d93750aed..3a01c0f32f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go new file mode 100644 index 0000000000..92a02b3b3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +// Equal is an interface that checks for deep equality between two objects. +type Equal interface { + Equal(interface{}) bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go similarity index 94% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go index b3c023d19f..6aae74d95b 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go @@ -1,9 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( "fmt" "strconv" - "strings" ) // FieldReaders are responsible for decoding fields out of data into @@ -42,15 +44,6 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { return s.ZeroValue() } -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through. -// -// Deprecated: This function will be removed in version 2 without replacement. -func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { - parts := strings.Split(path, ".") - return addrToSchema(parts, schemaMap) -} - // addrToSchema finds the final element schema for the given address // and the given schema. It returns all the schemas that led to the final // schema. These are in order of the address (out to in). @@ -91,7 +84,7 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { case *Resource: current = &Schema{ Type: typeObject, - Elem: v.Schema, + Elem: v.SchemaMap(), } case *Schema: current = v @@ -181,7 +174,7 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { // "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. // after that point. func readListField( - r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + r FieldReader, addr []string) (FieldReadResult, error) { addrPadded := make([]string, len(addr)+1) copy(addrPadded, addr) addrPadded[len(addrPadded)-1] = "#" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go similarity index 83% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go index dc2ae1af5d..df317c20bb 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( @@ -7,8 +10,9 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // ConfigFieldReader reads fields out of an untyped map[string]string to the @@ -81,31 +85,16 @@ func (r *ConfigFieldReader) readField( k := strings.Join(address, ".") schema := schemaList[len(schemaList)-1] - // If we're getting the single element of a promoted list, then - // check to see if we have a single element we need to promote. - if address[len(address)-1] == "0" && len(schemaList) > 1 { - lastSchema := schemaList[len(schemaList)-2] - if lastSchema.Type == TypeList && lastSchema.PromoteSingle { - k := strings.Join(address[:len(address)-1], ".") - result, err := r.readPrimitive(k, schema) - if err == nil { - return result, nil - } - } - } - - if protoVersion5 { - switch schema.Type { - case TypeList, TypeSet, TypeMap, typeObject: - // Check if the value itself is unknown. - // The new protocol shims will add unknown values to this list of - // ComputedKeys. This is the only way we have to indicate that a - // collection is unknown in the config - for _, unknown := range r.Config.ComputedKeys { - if k == unknown { - log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) - return FieldReadResult{Computed: true, Exists: true}, nil - } + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil } } } @@ -114,18 +103,7 @@ func (r *ConfigFieldReader) readField( case TypeBool, TypeFloat, TypeInt, TypeString: return r.readPrimitive(k, schema) case TypeList: - // If we support promotion then we first check if we have a lone - // value that we must promote. - // a value that is alone. - if schema.PromoteSingle { - result, err := r.readPrimitive(k, schema.Elem.(*Schema)) - if err == nil && result.Exists { - result.Value = []interface{}{result.Value} - return result, nil - } - } - - return readListField(&nestedConfigFieldReader{r}, address, schema) + return readListField(&nestedConfigFieldReader{r}, address) case TypeMap: return r.readMap(k, schema) case TypeSet: @@ -228,7 +206,7 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err := mapValuesToPrimitive(k, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var value interface{} @@ -283,7 +261,7 @@ func (r *ConfigFieldReader) readSet( // Create the set that will be our result set := schema.ZeroValue().(*Set) - raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + raw, err := readListField(&nestedConfigFieldReader{r}, address) if err != nil { return FieldReadResult{}, err } @@ -325,7 +303,7 @@ func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool switch t := schema.Elem.(type) { case *Resource: - for k, schema := range t.Schema { + for k, schema := range t.SchemaMap() { if r.Config.IsComputed(prefix + k) { return true } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go similarity index 96% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go index c099029afa..c9da00e911 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go @@ -1,11 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( "fmt" "strings" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // DiffFieldReader reads fields out of a diff structures. @@ -66,7 +70,7 @@ func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { case TypeBool, TypeInt, TypeFloat, TypeString: res, err = r.readPrimitive(address, schema) case TypeList: - res, err = readListField(r, address, schema) + res, err = readListField(r, address) case TypeMap: res, err = r.readMap(address, schema) case TypeSet: @@ -127,7 +131,7 @@ func (r *DiffFieldReader) readMap( key := address[len(address)-1] err = mapValuesToPrimitive(key, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var resultVal interface{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go similarity index 85% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go index 53f73b71bb..6697606c6f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( @@ -24,7 +27,7 @@ func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { case TypeBool, TypeInt, TypeFloat, TypeString: return r.readPrimitive(address, schema) case TypeList: - return readListField(r, address, schema) + return readListField(r, address) case TypeMap: return r.readMap(k, schema) case TypeSet: @@ -63,7 +66,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err err := mapValuesToPrimitive(k, result, schema) if err != nil { - return FieldReadResult{}, nil + return FieldReadResult{}, nil //nolint:nilerr // Leave legacy flatmap handling } var resultVal interface{} @@ -159,11 +162,8 @@ func (r *MapFieldReader) readSet( // "ports.1", but the "state" map might have those plus "ports.2". // We don't want "ports.2" countActual[idx] = struct{}{} - if len(countActual) >= countExpected { - return false - } - return true + return len(countActual) < countExpected }) if !completed && err != nil { return FieldReadResult{}, err @@ -200,36 +200,3 @@ func (r BasicMapReader) Range(f func(string, string) bool) bool { return true } - -// MultiMapReader reads over multiple maps, preferring keys that are -// founder earlier (lower number index) vs. later (higher number index) -type MultiMapReader []map[string]string - -func (r MultiMapReader) Access(k string) (string, bool) { - for _, m := range r { - if v, ok := m[k]; ok { - return v, ok - } - } - - return "", false -} - -func (r MultiMapReader) Range(f func(string, string) bool) bool { - done := make(map[string]struct{}) - for _, m := range r { - for k, v := range m { - if _, ok := done[k]; ok { - continue - } - - if cont := f(k, v); !cont { - return false - } - - done[k] = struct{}{} - } - } - - return true -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go similarity index 95% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go index 89ad3a86f2..da4c9c8150 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go similarity index 78% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go index 9abc41b54f..be4fae5060 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema // FieldWriters are responsible for writing fields by address into diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go similarity index 94% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go index 85d05be4c3..c9147cec19 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( @@ -100,13 +103,13 @@ func (w *MapFieldWriter) set(addr []string, value interface{}) error { case TypeBool, TypeInt, TypeFloat, TypeString: return w.setPrimitive(addr, value, schema) case TypeList: - return w.setList(addr, value, schema) + return w.setList(addr, value) case TypeMap: - return w.setMap(addr, value, schema) + return w.setMap(addr, value) case TypeSet: return w.setSet(addr, value, schema) case typeObject: - return w.setObject(addr, value, schema) + return w.setObject(addr, value) default: panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) } @@ -114,8 +117,7 @@ func (w *MapFieldWriter) set(addr []string, value interface{}) error { func (w *MapFieldWriter) setList( addr []string, - v interface{}, - schema *Schema) error { + v interface{}) error { k := strings.Join(addr, ".") setElement := func(idx string, value interface{}) error { addrCopy := make([]string, len(addr), len(addr)+1) @@ -148,7 +150,7 @@ func (w *MapFieldWriter) setList( if err != nil { for i := range vs { is := strconv.FormatInt(int64(i), 10) - setElement(is, nil) + _ = setElement(is, nil) // best effort; error returned below } return err @@ -160,8 +162,7 @@ func (w *MapFieldWriter) setList( func (w *MapFieldWriter) setMap( addr []string, - value interface{}, - schema *Schema) error { + value interface{}) error { k := strings.Join(addr, ".") v := reflect.ValueOf(value) vs := make(map[string]interface{}) @@ -176,7 +177,7 @@ func (w *MapFieldWriter) setMap( return fmt.Errorf("%s: must be a map", k) } if v.Type().Key().Kind() != reflect.String { - return fmt.Errorf("%s: keys must strings", k) + return fmt.Errorf("%s: keys must be strings", k) } for _, mk := range v.MapKeys() { mv := v.MapIndex(mk) @@ -207,8 +208,7 @@ func (w *MapFieldWriter) setMap( func (w *MapFieldWriter) setObject( addr []string, - value interface{}, - schema *Schema) error { + value interface{}) error { // Set the entire object. First decode into a proper structure var v map[string]interface{} if err := mapstructure.Decode(value, &v); err != nil { @@ -228,11 +228,13 @@ func (w *MapFieldWriter) setObject( } if err != nil { for k1 := range v { - w.set(append(addrCopy, k1), nil) + _ = w.set(append(addrCopy, k1), nil) // best effort; error returned below } + + return err } - return err + return nil } func (w *MapFieldWriter) setPrimitive( @@ -271,7 +273,7 @@ func (w *MapFieldWriter) setPrimitive( if err := mapstructure.Decode(v, &n); err != nil { return fmt.Errorf("%s: %s", k, err) } - set = strconv.FormatFloat(float64(n), 'G', -1, 64) + set = strconv.FormatFloat(n, 'G', -1, 64) default: return fmt.Errorf("Unknown type: %#v", schema.Type) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go new file mode 100644 index 0000000000..70477da45a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go @@ -0,0 +1,1676 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/hashicorp/go-cty/cty" + ctyconvert "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/msgpack" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const ( + newExtraKey = "_new_extra_shim" +) + +// Verify provider server interface implementation. +var _ tfprotov5.ProviderServer = (*GRPCProviderServer)(nil) + +func NewGRPCProviderServer(p *Provider) *GRPCProviderServer { + return &GRPCProviderServer{ + provider: p, + stopCh: make(chan struct{}), + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *Provider + stopCh chan struct{} + stopMu sync.Mutex +} + +// mergeStop is called in a goroutine and waits for the global stop signal +// and propagates cancellation to the passed in ctx/cancel func. The ctx is +// also passed to this function and waited upon so no goroutine leak is caused. +func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { + select { + case <-ctx.Done(): + return + case <-stopCh: + cancel() + } +} + +// StopContext derives a new context from the passed in grpc context. +// It creates a goroutine to wait for the server stop and propagates +// cancellation to the derived grpc context. +func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { + ctx = logging.InitContext(ctx) + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + go mergeStop(stoppable, cancel, s.stopCh) + return stoppable +} + +func (s *GRPCProviderServer) serverCapabilities() *tfprotov5.ServerCapabilities { + return &tfprotov5.ServerCapabilities{ + GetProviderSchemaOptional: true, + } +} + +func (s *GRPCProviderServer) GetMetadata(ctx context.Context, req *tfprotov5.GetMetadataRequest) (*tfprotov5.GetMetadataResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Getting provider metadata") + + resp := &tfprotov5.GetMetadataResponse{ + DataSources: make([]tfprotov5.DataSourceMetadata, 0, len(s.provider.DataSourcesMap)), + Functions: make([]tfprotov5.FunctionMetadata, 0), + Resources: make([]tfprotov5.ResourceMetadata, 0, len(s.provider.ResourcesMap)), + ServerCapabilities: s.serverCapabilities(), + } + + for typeName := range s.provider.DataSourcesMap { + resp.DataSources = append(resp.DataSources, tfprotov5.DataSourceMetadata{ + TypeName: typeName, + }) + } + + for typeName := range s.provider.ResourcesMap { + resp.Resources = append(resp.Resources, tfprotov5.ResourceMetadata{ + TypeName: typeName, + }) + } + + return resp, nil +} + +func (s *GRPCProviderServer) GetProviderSchema(ctx context.Context, req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Getting provider schema") + + resp := &tfprotov5.GetProviderSchemaResponse{ + DataSourceSchemas: make(map[string]*tfprotov5.Schema, len(s.provider.DataSourcesMap)), + Functions: make(map[string]*tfprotov5.Function, 0), + ResourceSchemas: make(map[string]*tfprotov5.Schema, len(s.provider.ResourcesMap)), + ServerCapabilities: s.serverCapabilities(), + } + + resp.Provider = &tfprotov5.Schema{ + Block: convert.ConfigSchemaToProto(ctx, s.getProviderSchemaBlock()), + } + + resp.ProviderMeta = &tfprotov5.Schema{ + Block: convert.ConfigSchemaToProto(ctx, s.getProviderMetaSchemaBlock()), + } + + for typ, res := range s.provider.ResourcesMap { + logging.HelperSchemaTrace(ctx, "Found resource type", map[string]interface{}{logging.KeyResourceType: typ}) + + resp.ResourceSchemas[typ] = &tfprotov5.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(ctx, res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + logging.HelperSchemaTrace(ctx, "Found data source type", map[string]interface{}{logging.KeyDataSourceType: typ}) + + resp.DataSourceSchemas[typ] = &tfprotov5.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(ctx, dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { + return InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getProviderMetaSchemaBlock() *configschema.Block { + return InternalMap(s.provider.ProviderMetaSchema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(ctx context.Context, req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.PrepareProviderConfigResponse{} + + logging.HelperSchemaTrace(ctx, "Preparing provider configuration") + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + return val, fmt.Errorf("error getting default for %q: %w", getAttr.Name, err) + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + return val, fmt.Errorf("error setting default for %q: %w", getAttr.Name, err) + } + + return val, nil + }) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + configVal, err = schemaBlock.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.Validate(config)) + logging.HelperSchemaTrace(ctx, "Called downstream") + + preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &tfprotov5.DynamicValue{MsgPack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ValidateResourceTypeConfigResponse{} + + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Called downstream") + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(ctx context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ValidateDataSourceConfigResponse{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + logging.HelperSchemaTrace(ctx, "Calling downstream") + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + logging.HelperSchemaTrace(ctx, "Called downstream") + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfprotov5.UpgradeResourceStateRequest) (*tfprotov5.UpgradeResourceStateResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.UpgradeResourceStateResponse{} + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + version := int(req.Version) + + jsonMap := map[string]interface{}{} + var err error + + switch { + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + case len(req.RawState.Flatmap) > 0: + logging.HelperSchemaTrace(ctx, "Upgrading flatmap state") + + jsonMap, version, err = s.upgradeFlatmapState(ctx, version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + // if there's a JSON state, we need to decode it. + case len(req.RawState.JSON) > 0: + if res.UseJSONNumber { + err = unmarshalJSON(req.RawState.JSON, &jsonMap) + } else { + err = json.Unmarshal(req.RawState.JSON, &jsonMap) + } + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + default: + logging.HelperSchemaDebug(ctx, "no state provided to upgrade") + return resp, nil + } + + // complete the upgrade of the JSON states + logging.HelperSchemaTrace(ctx, "Upgrading JSON state") + + jsonMap, err = s.upgradeJSONState(ctx, version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // The provider isn't required to clean out removed fields + s.removeAttributes(ctx, jsonMap, schemaBlock.ImpliedType()) + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := JSONMapToStateValue(jsonMap, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Now we need to make sure blocks are represented correctly, which means + // that missing blocks are empty collections, rather than null. + // First we need to CoerceValue to ensure that all object types match. + val, err = schemaBlock.CoerceValue(val) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + // Normalize the value and fill in any missing blocks. + val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &tfprotov5.DynamicValue{MsgPack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(_ context.Context, version int, m map[string]string, res *Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := res.CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate && res.MigrateState == nil { + // Providers were previously allowed to bump the version + // without declaring MigrateState. + // If there are further upgraders, then we've only updated that far. + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else if requiresMigrate { + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := stateValueToJSONMap(newConfigVal, schemaType, res.UseJSONNumber) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(ctx context.Context, version int, m map[string]interface{}, res *Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(ctx, m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +// Remove any attributes no longer present in the schema, so that the json can +// be correctly decoded. +func (s *GRPCProviderServer) removeAttributes(ctx context.Context, v interface{}, ty cty.Type) { + // we're only concerned with finding maps that corespond to object + // attributes + switch v := v.(type) { + case []interface{}: + // If these aren't blocks the next call will be a noop + if ty.IsListType() || ty.IsSetType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(ctx, eV, eTy) + } + } + return + case map[string]interface{}: + // map blocks aren't yet supported, but handle this just in case + if ty.IsMapType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(ctx, eV, eTy) + } + return + } + + if ty == cty.DynamicPseudoType { + logging.HelperSchemaDebug(ctx, "ignoring dynamic block", map[string]interface{}{"block": v}) + return + } + + if !ty.IsObjectType() { + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + logging.HelperSchemaWarn(ctx, "unexpected type for map in JSON state", map[string]interface{}{"type": ty}) + return + } + + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + logging.HelperSchemaDebug(ctx, "attribute no longer present in schema", map[string]interface{}{"attribute": attr}) + delete(v, attr) + continue + } + + s.removeAttributes(ctx, attrV, attrTy) + } + } +} + +func (s *GRPCProviderServer) StopProvider(ctx context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Stopping provider") + + s.stopMu.Lock() + defer s.stopMu.Unlock() + + // stop + close(s.stopCh) + // reset the stop signal + s.stopCh = make(chan struct{}) + + logging.HelperSchemaTrace(ctx, "Stopped provider") + + return &tfprotov5.StopProviderResponse{}, nil +} + +func (s *GRPCProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov5.ConfigureProviderRequest) (*tfprotov5.ConfigureProviderResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ConfigureProviderResponse{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // CtyValue is the raw protocol configuration data from newer APIs. + // + // This field was only added as a targeted fix for passing raw protocol data + // through the existing (helper/schema.Provider).Configure() exported method + // and is only populated in that situation. The data could theoretically be + // set in the NewResourceConfigShimmed() function, however the consequences + // of doing this were not investigated at the time the fix was introduced. + // + // Reference: https://github.com/hashicorp/terraform-plugin-sdk/issues/1270 + config.CtyValue = configVal + + // TODO: remove global stop context hack + // This attaches a global stop synchro'd context onto the provider.Configure + // request scoped context. This provides a substitute for the removed provider.StopContext() + // function. Ideally a provider should migrate to the context aware API that receives + // request scoped contexts, however this is a large undertaking for very large providers. + ctxHack := context.WithValue(ctx, StopContextKey, s.StopContext(context.Background())) + + logging.HelperSchemaTrace(ctx, "Calling downstream") + diags := s.provider.Configure(ctxHack, config) + logging.HelperSchemaTrace(ctx, "Called downstream") + + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.ReadResourceRequest) (*tfprotov5.ReadResourceResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ReadResourceResponse{ + // helper/schema did previously handle private data during refresh, but + // core is now going to expect this to be maintained in order to + // persist it in the state. + Private: req.Private, + } + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + instanceState.RawState = stateVal + + private := make(map[string]interface{}) + if len(req.Private) > 0 { + if err := json.Unmarshal(req.Private, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + } + instanceState.Meta = private + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + instanceState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.RefreshWithoutUpgrade(ctx, instanceState, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) + if diags.HasError() { + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + } + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, false) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprotov5.PlanResourceChangeRequest) (*tfprotov5.PlanResourceChangeResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.PlanResourceChangeResponse{} + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + if !res.EnableLegacyTypeSystemPlanErrors { + //nolint:staticcheck // explicitly for this SDK + resp.UnsafeToUseLegacyTypeSystem = true + } + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + priorState.RawState = priorStateVal + priorState.RawPlan = proposedNewStateVal + priorState.RawConfig = configVal + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(ctx, proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) + + diff, err := res.SimpleDiff(ctx, priorState, cfg, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &tfprotov5.DynamicValue{ + MsgPack: plannedMP, + } + + // encode any timeouts into the diff Meta + t := &ResourceTimeout{} + if err := t.ConfigDecode(res, cfg); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + if err := t.DiffEncode(diff); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfprotov5.ApplyResourceChangeRequest) (*tfprotov5.ApplyResourceChangeResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ApplyResourceChangeResponse{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + RawPlan: plannedStateVal, + RawState: priorStateVal, + RawConfig: configVal, + } + } else { + diff, err = DiffFromValues(ctx, priorStateVal, plannedStateVal, configVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + RawPlan: plannedStateVal, + RawState: priorStateVal, + RawConfig: configVal, + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.Apply(ctx, priorState, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) + + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + if !res.EnableLegacyTypeSystemApplyErrors { + //nolint:staticcheck // explicitly for this SDK + resp.UnsafeToUseLegacyTypeSystem = true + } + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfprotov5.ImportResourceStateRequest) (*tfprotov5.ImportResourceStateResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ImportResourceStateResponse{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(ctx, info, req.ID) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + schemaBlock := s.getResourceSchemaBlock(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Normalize the value and fill in any missing blocks. + newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) + + // Ensure any timeouts block is null in the imported state. There is no + // configuration to read from during import, so it is never valid to + // return a known value for the block. + // + // This is done without modifying HCL2ValueFromFlatmap or + // NormalizeObjectFromLegacySDK to prevent other unexpected changes. + // + // Reference: https://github.com/hashicorp/terraform-plugin-sdk/issues/1145 + newStateType := newStateVal.Type() + + if newStateVal != cty.NilVal && !newStateVal.IsNull() && newStateType.IsObjectType() && newStateType.HasAttribute(TimeoutsConfigKey) { + newStateValueMap := newStateVal.AsValueMap() + newStateValueMap[TimeoutsConfigKey] = cty.NullVal(newStateType.AttributeType(TimeoutsConfigKey)) + newStateVal = cty.ObjectVal(newStateValueMap) + } + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + importedResource := &tfprotov5.ImportedResource{ + TypeName: resourceType, + State: &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) MoveResourceState(ctx context.Context, req *tfprotov5.MoveResourceStateRequest) (*tfprotov5.MoveResourceStateResponse, error) { + if req == nil { + return nil, fmt.Errorf("MoveResourceState request is nil") + } + + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Returning error for MoveResourceState") + + resp := &tfprotov5.MoveResourceStateResponse{} + + _, ok := s.provider.ResourcesMap[req.TargetTypeName] + + if !ok { + resp.Diagnostics = []*tfprotov5.Diagnostic{ + { + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Unknown Resource Type", + Detail: fmt.Sprintf("The %q resource type is not supported by this provider.", req.TargetTypeName), + }, + } + + return resp, nil + } + + resp.Diagnostics = []*tfprotov5.Diagnostic{ + { + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Move Resource State Not Supported", + Detail: fmt.Sprintf("The %q resource type does not support moving resource state across resource types.", req.TargetTypeName), + }, + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5.ReadDataSourceRequest) (*tfprotov5.ReadDataSourceResponse, error) { + ctx = logging.InitContext(ctx) + resp := &tfprotov5.ReadDataSourceResponse{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(ctx, configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // we need to still build the diff separately with the Read method to match + // the old behavior + res, ok := s.provider.DataSourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) + return resp, nil + } + diff, err := res.Diff(ctx, nil, config, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + // Not setting RawConfig here is okay, as ResourceData.GetRawConfig() + // will return a NullVal of the schema if there is no InstanceDiff. + if diff != nil { + diff.RawConfig = configVal + } + + // now we can get the new complete data source + newInstanceState, diags := res.ReadDataApply(ctx, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diags) + if diags.HasError() { + return resp, nil + } + + newStateVal, err := StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) + return resp, nil + } + resp.State = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + return resp, nil +} + +func (s *GRPCProviderServer) CallFunction(ctx context.Context, req *tfprotov5.CallFunctionRequest) (*tfprotov5.CallFunctionResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Returning error for provider function call") + + resp := &tfprotov5.CallFunctionResponse{ + Error: &tfprotov5.FunctionError{ + Text: fmt.Sprintf("Function Not Found: No function named %q was found in the provider.", req.Name), + }, + } + + return resp, nil +} + +func (s *GRPCProviderServer) GetFunctions(ctx context.Context, req *tfprotov5.GetFunctionsRequest) (*tfprotov5.GetFunctionsResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Getting provider functions") + + resp := &tfprotov5.GetFunctionsResponse{ + Functions: make(map[string]*tfprotov5.Function, 0), + } + + return resp, nil +} + +func pathToAttributePath(path cty.Path) *tftypes.AttributePath { + var steps []tftypes.AttributePathStep + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, + tftypes.AttributeName(s.Name), + ) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, + tftypes.ElementKeyInt(i), + ) + case cty.String: + steps = append(steps, + tftypes.ElementKeyString(s.Key.AsString()), + ) + } + } + } + + if len(steps) < 1 { + return nil + } + return tftypes.NewAttributePathWithSteps(steps) +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[TimeoutsConfigKey] + if ok { + toAttrs[TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *Resource) *Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*Schema{} + + for k, s := range r.SchemaMap() { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *Schema) *Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *Schema: + newSchema.Elem = stripSchema(e) + case *Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { + ty := dst.Type() + if !src.IsNull() && !src.IsKnown() { + // Return src during plan to retain unknown interpolated placeholders, + // which could be lost if we're only updating a resource. If this is a + // read scenario, then there shouldn't be any unknowns at all. + if dst.IsNull() && !apply { + return src + } + return dst + } + + // Handle null/empty changes for collections during apply. + // A change between null and empty values prefers src to make sure the state + // is consistent between plan and apply. + if ty.IsCollectionType() && apply { + dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 + srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 + + if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { + return src + } + } + + // check the invariants that we need below, to ensure we are working with + // non-null and known values. + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal, ok := dstMap[key] + if !ok && apply && ty.IsMapType() { + // don't transfer old map values to dst during apply + continue + } + + if dstVal == cty.NilVal { + if !apply && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + + dstMap[key] = normalizeNullValues(dstVal, v, apply) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && apply { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && apply { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a read or plan. + if !apply && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty == cty.String: + // The legacy SDK should not be able to remove a value during plan or + // apply, however we are only going to overwrite this if the source was + // an empty string, since that is what is often equated with unset and + // lost in the diff process. + if dst.IsNull() && src.AsString() == "" { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(ctx context.Context, v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { + var diags []*tfprotov5.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + // if this is a set, the kv is also going to be null which + // isn't a valid path element, so we can't append it to the + // diagnostic. + p := path + if !kv.IsNull() { + p = append(p, cty.IndexStep{Key: kv}) + } + + diags = append(diags, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(p), + }) + continue + } + + d := validateConfigNulls(ctx, ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(ctx, diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ctx, ev, append(path, step)) + diags = convert.AppendProtoDiag(ctx, diags, d) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go new file mode 100644 index 0000000000..8cf22a5185 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "encoding/json" +) + +func unmarshalJSON(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go new file mode 100644 index 0000000000..55ba6e2ce8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -0,0 +1,519 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/meta" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// StopContext returns a context safe for global use that will cancel +// when Terraform requests a stop. This function should only be called +// within a ConfigureContextFunc, passing in the request scoped context +// received in that method. +// +// Deprecated: The use of a global context is discouraged. Please use the new +// context aware CRUD methods. +func StopContext(ctx context.Context) (context.Context, bool) { + stopContext, ok := ctx.Value(StopContextKey).(context.Context) + return stopContext, ok +} + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ProviderMetaSchema is the schema for the configuration of the meta + // information for this provider. If this provider has no meta info, + // this can be omitted. This functionality is currently experimental + // and subject to change or break without warning; it should only be + // used by providers that are collaborating on its use with the + // Terraform team. + ProviderMetaSchema map[string]*Schema + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // Deprecated: Please use ConfigureContextFunc instead. + ConfigureFunc ConfigureFunc + + // ConfigureContextFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. This function + // receives a context.Context that will cancel when Terraform sends a + // cancellation signal. This function can yield Diagnostics. + ConfigureContextFunc ConfigureContextFunc + + // configured is enabled after a Configure() call + configured bool + + meta interface{} + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// Deprecated: Please use ConfigureContextFunc +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// ConfigureContextFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureContextFunc func(context.Context, *ResourceData) (interface{}, diag.Diagnostics) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + if p.ConfigureFunc != nil && p.ConfigureContextFunc != nil { + return errors.New("ConfigureFunc and ConfigureContextFunc must not both be set") + } + + var validationErrors []error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = append(validationErrors, err) + } + + // Provider-specific checks + for k := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return errors.Join(validationErrors...) +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// GetSchema returns the config schema for the main provider +// configuration, as would appear in a "provider" block in the +// configuration files. +// +// Currently not all providers support schema. Callers must therefore +// first call Resources and DataSources and ensure that at least one +// resource or data source has the SchemaAvailable flag set. +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Validate is called once at the beginning with the raw configuration +// (no interpolation done) and can return diagnostics +// +// This is called once with the provider configuration only. It may not +// be called at all if no provider configuration is given. +// +// This should not assume that any values of the configurations are valid. +// The primary use case of this call is to check that required keys are +// set. +func (p *Provider) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + if err := p.InternalValidate(); err != nil { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: "InternalValidate", + Detail: fmt.Sprintf("Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err), + }, + } + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per resource. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) diag.Diagnostics { + r, ok := p.ResourcesMap[t] + if !ok { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support resource: %s", t), + }, + } + } + + return r.Validate(c) +} + +// Configure configures the provider itself with the configuration +// given. This is useful for setting things like access keys. +// +// This won't be called at all if no provider configuration is given. +func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) diag.Diagnostics { + // No configuration + if p.ConfigureFunc == nil && p.ConfigureContextFunc == nil { + return nil + } + + if p.configured { + logging.HelperSchemaWarn(ctx, "Previously configured provider being re-configured. This can cause issues in concurrent testing if the configurations are not equal.") + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(ctx, nil, c, nil, p.meta, true) + if err != nil { + return diag.FromErr(err) + } + + data, err := sm.Data(nil, diff) + if err != nil { + return diag.FromErr(err) + } + + // Modify the ResourceData to contain the original ResourceConfig to support + // GetOkExists() and GetRawConfig(). + // + // Reference: https://github.com/hashicorp/terraform-plugin-sdk/issues/1270 + if data != nil { + data.config = c + } + + if p.ConfigureFunc != nil { + meta, err := p.ConfigureFunc(data) + if err != nil { + return diag.FromErr(err) + } + p.meta = meta + } + + var diags diag.Diagnostics + + if p.ConfigureContextFunc != nil { + meta, configureDiags := p.ConfigureContextFunc(ctx, data) + diags = append(diags, configureDiags...) + + if diags.HasError() { + return diags + } + + p.meta = meta + } + + p.configured = true + + return diags +} + +// Resources returns all the available resource types that this provider +// knows how to manage. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +// ImportState requests that the given resource be imported. +// +// The returned InstanceState only requires ID be set. Importing +// will always call Refresh after the state to complete it. +// +// IMPORTANT: InstanceState doesn't have the resource type attached +// to it. A type must be specified on the state via the Ephemeral +// field on the state. +// +// This function can return multiple states. Normally, an import +// will map 1:1 to a physical resource. However, some resources map +// to multiple. For example, an AWS security group may contain many rules. +// Each rule is represented by a separate resource in Terraform, +// therefore multiple states are returned. +func (p *Provider) ImportState( + ctx context.Context, + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil || r.Importer.StateContext != nil { + var err error + logging.HelperSchemaTrace(ctx, "Calling downstream") + + if r.Importer.StateContext != nil { + results, err = r.Importer.StateContext(ctx, data, p.meta) + } else { + results, err = r.Importer.State(data, p.meta) + } + logging.HelperSchemaTrace(ctx, "Called downstream") + + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + if r == nil { + return nil, fmt.Errorf("The provider returned a missing resource during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should return an error for missing resources and skip returning a missing or empty ResourceData. " + + "Please report this to the provider developers.") + } + + if r.Id() == "" { + return nil, fmt.Errorf("The provider returned a resource missing an identifier during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should not call d.SetId(\"\") or create an empty ResourceData. " + + "If the resource is missing, instead return an error. " + + "Please report this to the provider developers.") + } + + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf("The provider returned a missing resource during ImportResourceState. " + + "This is generally a bug in the resource implementation for import. " + + "Resource import code should return an error for missing resources. " + + "Please report this to the provider developers.") + } + } + + return states, nil +} + +// ValidateDataSource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per data source instance. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) diag.Diagnostics { + r, ok := p.DataSourcesMap[t] + if !ok { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support data source: %s", t), + }, + } + } + + return r.Validate(c) +} + +// DataSources returns all of the available data sources that this +// provider implements. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +// UserAgent returns a string suitable for use in the User-Agent header of +// requests generated by the provider. The generated string contains the +// version of Terraform, the Plugin SDK, and the provider used to generate the +// request. `name` should be the hyphen-separated reporting name of the +// provider, and `version` should be the version of the provider. +// +// If TF_APPEND_USER_AGENT is set, its value will be appended to the returned +// string. +func (p *Provider) UserAgent(name, version string) string { + //nolint:staticcheck // best effort usage + ua := fmt.Sprintf("Terraform/%s (+https://www.terraform.io) Terraform-Plugin-SDK/%s", p.TerraformVersion, meta.SDKVersionString()) + if name != "" { + ua += " " + name + if version != "" { + ua += "/" + version + } + } + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} + +// GRPCProvider returns a gRPC server, for use with terraform-plugin-mux. +func (p *Provider) GRPCProvider() tfprotov5.ProviderServer { + return NewGRPCProviderServer(p) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go new file mode 100644 index 0000000000..7564a0aff2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -0,0 +1,1399 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +// Resource is an abstraction for multiple Terraform concepts: +// +// - Managed Resource: An infrastructure component with a schema, lifecycle +// operations such as create, read, update, and delete +// (CRUD), and optional implementation details such as +// import support, upgrade state support, and difference +// customization. +// - Data Resource: Also known as a data source. An infrastructure component +// with a schema and only the read lifecycle operation. +// - Block: When implemented within a Schema type Elem field, a configuration +// block that contains nested schema information such as attributes +// and blocks. +// +// To fully implement managed resources, the Provider type ResourcesMap field +// should include a reference to an implementation of this type. To fully +// implement data resources, the Provider type DataSourcesMap field should +// include a reference to an implementation of this type. +// +// Each field further documents any constraints based on the Terraform concept +// being implemented. +type Resource struct { + // Schema is the structure and type information for this component. This + // field, or SchemaFunc, is required for all Resource concepts. To prevent + // storing all schema information in memory for the lifecycle of a provider, + // use SchemaFunc instead. + // + // The keys of this map are the names used in a practitioner configuration, + // such as the attribute or block name. The values describe the structure + // and type information of that attribute or block. + Schema map[string]*Schema + + // SchemaFunc is the structure and type information for this component. This + // field, or Schema, is required for all Resource concepts. Use this field + // instead of Schema on top level Resource declarations to prevent storing + // all schema information in memory for the lifecycle of a provider. + // + // The keys of this map are the names used in a practitioner configuration, + // such as the attribute or block name. The values describe the structure + // and type information of that attribute or block. + SchemaFunc func() map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. This field is only valid when the Resource is a managed + // resource. + // + // The current SchemaVersion stored in the state for each resource. + // Provider authors can increment this version number when Schema semantics + // change in an incompatible manner. If the state's SchemaVersion is less + // than the current SchemaVersion, the MigrateState and StateUpgraders + // functionality is executed to upgrade the state information. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // This field is only valid when the Resource is a managed resource. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + // + // Deprecated: MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. This field is only valid + // when the Resource is a managed resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // Create is called when the provider must create a new instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use CreateContext or CreateWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. + Create CreateFunc + + // Read is called when the provider must refresh the state of a managed + // resource instance or data resource instance. This field is only valid + // when the Resource is a managed resource or data resource. Only one of + // Read, ReadContext, or ReadWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use ReadContext or ReadWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. + Read ReadFunc + + // Update is called when the provider must update an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use UpdateContext or UpdateWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. + Update UpdateFunc + + // Delete is called when the provider must destroy the instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + // + // Deprecated: Use DeleteContext or DeleteWithoutTimeout instead. This + // implementation does not support request cancellation initiated by + // Terraform, such as a system or practitioner sending SIGINT (Ctrl-c). + // This implementation also does not support warning diagnostics. + Delete DeleteFunc + + // Exists is a function that is called to check if a resource still + // exists. This field is only valid when the Resource is a managed + // resource. + // + // If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + // + // Deprecated: Remove in preference of ReadContext or ReadWithoutTimeout. + Exists ExistsFunc + + // CreateContext is called when the provider must create a new instance of + // a managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, CreateContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement CreateWithoutTimeout + // instead of CreateContext to remove the default timeout. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + CreateContext CreateContextFunc + + // ReadContext is called when the provider must refresh the state of a managed + // resource instance or data resource instance. This field is only valid + // when the Resource is a managed resource or data resource. Only one of + // Read, ReadContext, or ReadWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, ReadContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement ReadWithoutTimeout + // instead of ReadContext to remove the default timeout. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + ReadContext ReadContextFunc + + // UpdateContext is called when the provider must update an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, UpdateContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement UpdateWithoutTimeout + // instead of UpdateContext to remove the default timeout. + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + UpdateContext UpdateContextFunc + + // DeleteContext is called when the provider must destroy the instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // The Context parameter stores SDK information, such as loggers and + // timeout deadlines. It also is wired to receive any cancellation from + // Terraform such as a system or practitioner sending SIGINT (Ctrl-c). + // + // By default, DeleteContext has a 20 minute timeout. Use the Timeouts + // field to control the default duration or implement DeleteWithoutTimeout + // instead of DeleteContext to remove the default timeout. + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + DeleteContext DeleteContextFunc + + // CreateWithoutTimeout is called when the provider must create a new + // instance of a managed resource. This field is only valid when the + // Resource is a managed resource. Only one of Create, CreateContext, or + // CreateWithoutTimeout should be implemented. + // + // Most resources should prefer CreateContext with properly implemented + // operation timeout values, however there are cases where operation + // synchronization across concurrent resources is necessary in the resource + // logic, such as a mutex, to prevent remote system errors. Since these + // operations would have an indeterminate timeout that scales with the + // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the practitioner + // configuration and any CustomizeDiff field logic. + // + // The SetId method must be called with a non-empty value for the managed + // resource instance to be properly saved into the Terraform state and + // avoid a "inconsistent result after apply" error. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + CreateWithoutTimeout CreateContextFunc + + // ReadWithoutTimeout is called when the provider must refresh the state of + // a managed resource instance or data resource instance. This field is + // only valid when the Resource is a managed resource or data resource. + // Only one of Read, ReadContext, or ReadWithoutTimeout should be + // implemented. + // + // Most resources should prefer ReadContext with properly implemented + // operation timeout values, however there are cases where operation + // synchronization across concurrent resources is necessary in the resource + // logic, such as a mutex, to prevent remote system errors. Since these + // operations would have an indeterminate timeout that scales with the + // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the state data for this managed + // resource instance or data resource instance. + // + // Managed resources can signal to Terraform that the managed resource + // instance no longer exists and potentially should be recreated by calling + // the SetId method with an empty string ("") parameter and without + // returning an error. + // + // Data resources that are designed to return state for a singular + // infrastructure component should conventionally return an error if that + // infrastructure does not exist and omit any calls to the + // SetId method. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + ReadWithoutTimeout ReadContextFunc + + // UpdateWithoutTimeout is called when the provider must update an instance + // of a managed resource. This field is only valid when the Resource is a + // managed resource. Only one of Update, UpdateContext, or + // UpdateWithoutTimeout should be implemented. + // + // Most resources should prefer UpdateContext with properly implemented + // operation timeout values, however there are cases where operation + // synchronization across concurrent resources is necessary in the resource + // logic, such as a mutex, to prevent remote system errors. Since these + // operations would have an indeterminate timeout that scales with the + // number of resources, this allows resources to control timeout behavior. + // + // This implementation is optional. If omitted, all Schema must enable + // the ForceNew field and any practitioner changes that would have + // caused and update will instead destroy and recreate the infrastructure + // compontent. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the plan and state data for this + // managed resource instance. The available data in the Get* methods is the + // the proposed state, which is the merged data of the prior state, + // practitioner configuration, and any CustomizeDiff field logic. The + // available data for the GetChange* and HasChange* methods is the prior + // state and proposed state. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + UpdateWithoutTimeout UpdateContextFunc + + // DeleteWithoutTimeout is called when the provider must destroy the + // instance of a managed resource. This field is only valid when the + // Resource is a managed resource. Only one of Delete, DeleteContext, or + // DeleteWithoutTimeout should be implemented. + // + // Most resources should prefer DeleteContext with properly implemented + // operation timeout values, however there are cases where operation + // synchronization across concurrent resources is necessary in the resource + // logic, such as a mutex, to prevent remote system errors. Since these + // operations would have an indeterminate timeout that scales with the + // number of resources, this allows resources to control timeout behavior. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceData parameter contains the state data for this managed + // resource instance. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The diagnostics return parameter, if not nil, can contain any + // combination and multiple of warning and/or error diagnostics. + DeleteWithoutTimeout DeleteContextFunc + + // CustomizeDiff is called after a difference (plan) has been generated + // for the Resource and allows for customizations, such as setting values + // not controlled by configuration, conditionally triggering resource + // recreation, or implementing additional validation logic to abort a plan. + // This field is only valid when the Resource is a managed resource. + // + // The Context parameter stores SDK information, such as loggers. It also + // is wired to receive any cancellation from Terraform such as a system or + // practitioner sending SIGINT (Ctrl-c). + // + // The *ResourceDiff parameter is similar to ResourceData but replaces the + // Set method with other difference handling methods, such as SetNew, + // SetNewComputed, and ForceNew. In general, only Schema with Computed + // enabled can have those methods executed against them. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // The interface{} parameter is the result of the Provider type + // ConfigureFunc field execution. If the Provider does not define + // a ConfigureFunc, this will be nil. This parameter is conventionally + // used to store API clients and other provider instance specific data. + // + // The error return parameter, if not nil, will be converted into an error + // diagnostic when passed back to Terraform. + CustomizeDiff CustomizeDiffFunc + + // Importer is called when the provider must import an instance of a + // managed resource. This field is only valid when the Resource is a + // managed resource. + // + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as the details of a warning + // diagnostic during validation (validate, plan, and apply operations). + // This field is only valid when the Resource is a managed resource or + // data resource. + DeprecationMessage string + + // Timeouts configures the default time duration allowed before a create, + // read, update, or delete operation is considered timed out, which returns + // an error to practitioners. This field is only valid when the Resource is + // a managed resource or data resource. + // + // When implemented, practitioners can add a timeouts configuration block + // within their managed resource or data resource configuration to further + // customize the create, read, update, or delete operation timeouts. For + // example, a configuration may specify a longer create timeout for a + // database resource due to its data size. + // + // The ResourceData that is passed to create, read, update, and delete + // functionality can access the merged time duration of the Resource + // default timeouts configured in this field and the practitioner timeouts + // configuration via the Timeout method. Practitioner configuration + // always overrides any default values set here, whether shorter or longer. + Timeouts *ResourceTimeout + + // Description is used as the description for docs, the language server and + // other user facing usage. It can be plain-text or markdown depending on the + // global DescriptionKind setting. This field is valid for any Resource. + Description string + + // UseJSONNumber should be set when state upgraders will expect + // json.Numbers instead of float64s for numbers. This is added as a + // toggle for backwards compatibility for type assertions, but should + // be used in all new resources to avoid bugs with sufficiently large + // user input. This field is only valid when the Resource is a managed + // resource. + // + // See github.com/hashicorp/terraform-plugin-sdk/issues/655 for more + // details. + UseJSONNumber bool + + // EnableLegacyTypeSystemApplyErrors when enabled will prevent the SDK from + // setting the legacy type system flag in the protocol during + // ApplyResourceChange (Create, Update, and Delete) operations. Before + // enabling this setting in a production release for a resource, the + // resource should be exhaustively acceptance tested with the setting + // enabled in an environment where it is easy to clean up resources, + // potentially outside of Terraform, since these errors may be unavoidable + // in certain cases. + // + // Disabling the legacy type system protocol flag is an unsafe operation + // when using this SDK as there are certain unavoidable behaviors imposed + // by the SDK, however this option is surfaced to allow provider developers + // to try to discover fixable data inconsistency errors more easily. + // Terraform, when encountering an enabled legacy type system protocol flag, + // will demote certain schema and data consistency errors into warning logs + // containing the text "legacy plugin SDK". Some errors for errant schema + // definitions, such as when an attribute is not marked as Computed as + // expected by Terraform, can only be resolved by migrating to + // terraform-plugin-framework since that SDK does not impose behavior + // changes with it enabled. However, data-based errors typically require + // logic fixes that should be applicable for both SDKs to be resolved. + EnableLegacyTypeSystemApplyErrors bool + + // EnableLegacyTypeSystemPlanErrors when enabled will prevent the SDK from + // setting the legacy type system flag in the protocol during + // PlanResourceChange operations. Before enabling this setting in a + // production release for a resource, the resource should be exhaustively + // acceptance tested with the setting enabled in an environment where it is + // easy to clean up resources, potentially outside of Terraform, since these + // errors may be unavoidable in certain cases. + // + // Disabling the legacy type system protocol flag is an unsafe operation + // when using this SDK as there are certain unavoidable behaviors imposed + // by the SDK, however this option is surfaced to allow provider developers + // to try to discover fixable data inconsistency errors more easily. + // Terraform, when encountering an enabled legacy type system protocol flag, + // will demote certain schema and data consistency errors into warning logs + // containing the text "legacy plugin SDK". Some errors for errant schema + // definitions, such as when an attribute is not marked as Computed as + // expected by Terraform, can only be resolved by migrating to + // terraform-plugin-framework since that SDK does not impose behavior + // changes with it enabled. However, data-based errors typically require + // logic fixes that should be applicable for both SDKs to be resolved. + EnableLegacyTypeSystemPlanErrors bool +} + +// SchemaMap returns the schema information for this Resource whether it is +// defined via the SchemaFunc field or Schema field. The SchemaFunc field, if +// defined, takes precedence over the Schema field. +func (r *Resource) SchemaMap() map[string]*Schema { + if r.SchemaFunc != nil { + return r.SchemaFunc() + } + + return r.Schema +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.SchemaMap()).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + +// The following function types are of the legacy CRUD operations. +// +// Deprecated: Please use the context aware equivalents instead. +type CreateFunc func(*ResourceData, interface{}) error + +// Deprecated: Please use the context aware equivalents instead. +type ReadFunc func(*ResourceData, interface{}) error + +// Deprecated: Please use the context aware equivalents instead. +type UpdateFunc func(*ResourceData, interface{}) error + +// Deprecated: Please use the context aware equivalents instead. +type DeleteFunc func(*ResourceData, interface{}) error + +// Deprecated: Please use the context aware equivalents instead. +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type CreateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type ReadContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type UpdateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type DeleteContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +// Implementation of a single schema version state upgrade. +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// Function signature for a schema version state upgrade handler. +// +// The Context parameter stores SDK information, such as loggers. It also +// is wired to receive any cancellation from Terraform such as a system or +// practitioner sending SIGINT (Ctrl-c). +// +// The map[string]interface{} parameter contains the previous schema version +// state data for a managed resource instance. The keys are top level attribute +// or block names mapped to values that can be type asserted similar to +// fetching values using the ResourceData Get* methods: +// +// - TypeBool: bool +// - TypeFloat: float +// - TypeInt: int +// - TypeList: []interface{} +// - TypeMap: map[string]interface{} +// - TypeSet: *Set +// - TypeString: string +// +// In certain scenarios, the map may be nil, so checking for that condition +// upfront is recommended to prevent potential panics. +// +// The interface{} parameter is the result of the Provider type +// ConfigureFunc field execution. If the Provider does not define +// a ConfigureFunc, this will be nil. This parameter is conventionally +// used to store API clients and other provider instance specific data. +// +// The map[string]interface{} return parameter should contain the upgraded +// schema version state data for a managed resource instance. Values must +// align to the typing mentioned above. +type StateUpgradeFunc func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(context.Context, *ResourceDiff, interface{}) error + +func (r *Resource) create(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Create != nil { + if err := r.Create(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + + if r.CreateWithoutTimeout != nil { + return r.CreateWithoutTimeout(ctx, d, meta) + } + + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutCreate)) + defer cancel() + return r.CreateContext(ctx, d, meta) +} + +func (r *Resource) read(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Read != nil { + if err := r.Read(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + + if r.ReadWithoutTimeout != nil { + return r.ReadWithoutTimeout(ctx, d, meta) + } + + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutRead)) + defer cancel() + return r.ReadContext(ctx, d, meta) +} + +func (r *Resource) update(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Update != nil { + if err := r.Update(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + + if r.UpdateWithoutTimeout != nil { + return r.UpdateWithoutTimeout(ctx, d, meta) + } + + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutUpdate)) + defer cancel() + return r.UpdateContext(ctx, d, meta) +} + +func (r *Resource) delete(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Delete != nil { + if err := r.Delete(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + + if r.DeleteWithoutTimeout != nil { + return r.DeleteWithoutTimeout(ctx, d, meta) + } + + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutDelete)) + defer cancel() + return r.DeleteContext(ctx, d, meta) +} + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + ctx context.Context, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + schema := schemaMap(r.SchemaMap()) + data, err := schema.Data(s, d) + if err != nil { + return s, diag.FromErr(err) + } + + if s != nil && data != nil { + data.providerMeta = s.ProviderMeta + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) + } + } + } else { + logging.HelperSchemaDebug(ctx, "No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + var diags diag.Diagnostics + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + logging.HelperSchemaTrace(ctx, "Calling downstream") + diags = append(diags, r.delete(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") + + if diags.HasError() { + return r.recordCurrentSchemaVersion(data.State()), diags + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, diags + } + + // Reset the data to be stateless since we just destroyed + data, err = schema.Data(nil, d) + if err != nil { + return nil, append(diags, diag.FromErr(err)...) + } + + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + } + + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + logging.HelperSchemaTrace(ctx, "Calling downstream") + diags = append(diags, r.create(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") + } else { + if !r.updateFuncSet() { + return s, append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "doesn't support update", + }) + } + logging.HelperSchemaTrace(ctx, "Calling downstream") + diags = append(diags, r.update(ctx, data, meta)...) + logging.HelperSchemaTrace(ctx, "Called downstream") + } + + return r.recordCurrentSchemaVersion(data.State()), diags +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.SchemaMap()).Diff(ctx, s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + logging.HelperSchemaError(ctx, "Error encoding timeout to instance diff", map[string]interface{}{logging.KeyError: err}) + } + } else { + logging.HelperSchemaDebug(ctx, "Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) SimpleDiff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.SchemaMap()).Diff(ctx, s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + diags := schemaMap(r.SchemaMap()).Validate(c) + + if r.DeprecationMessage != "" { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Resource", + Detail: r.DeprecationMessage, + }) + } + + return diags +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + ctx context.Context, + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, diag.Diagnostics) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.SchemaMap()).Data(nil, d) + if err != nil { + return nil, diag.FromErr(err) + } + + logging.HelperSchemaTrace(ctx, "Calling downstream") + diags := r.read(ctx, data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), diags +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + ctx context.Context, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + logging.HelperSchemaError(ctx, "Error decoding ResourceTimeout", map[string]interface{}{logging.KeyError: err}) + } + } + + schema := schemaMap(r.SchemaMap()) + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schema.Data(s, nil) + if err != nil { + return s, diag.FromErr(err) + } + data.timeouts = &rt + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + logging.HelperSchemaTrace(ctx, "Calling downstream") + exists, err := r.Exists(data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + + if err != nil { + return s, diag.FromErr(err) + } + + if !exists { + return nil, nil + } + } + + data, err := schema.Data(s, nil) + if err != nil { + return s, diag.FromErr(err) + } + data.timeouts = &rt + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + logging.HelperSchemaTrace(ctx, "Calling downstream") + diags := r.read(ctx, data, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + schema.handleDiffSuppressOnRefresh(ctx, s, state) + return r.recordCurrentSchemaVersion(state), diags +} + +func (r *Resource) createFuncSet() bool { + return (r.Create != nil || r.CreateContext != nil || r.CreateWithoutTimeout != nil) +} + +func (r *Resource) readFuncSet() bool { + return (r.Read != nil || r.ReadContext != nil || r.ReadWithoutTimeout != nil) +} + +func (r *Resource) updateFuncSet() bool { + return (r.Update != nil || r.UpdateContext != nil || r.UpdateWithoutTimeout != nil) +} + +func (r *Resource) deleteFuncSet() bool { + return (r.Delete != nil || r.DeleteContext != nil || r.DeleteWithoutTimeout != nil) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.createFuncSet() || r.updateFuncSet() || r.deleteFuncSet() { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + schema := schemaMap(r.SchemaMap()) + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if !r.updateFuncSet() { + nonForceNewAttrs := make([]string, 0) + for k, v := range schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schema + + // Destroy, and Read are required + if !r.readFuncSet() { + return fmt.Errorf("Read must be implemented") + } + if !r.deleteFuncSet() { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + if f, ok := tsm["id"]; ok { + // if there is an explicit ID, validate it... + err := validateResourceID(f) + if err != nil { + return err + } + } + + for k := range tsm { + if isReservedResourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schema + for k := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + if r.SchemaFunc != nil && r.Schema != nil { + return fmt.Errorf("SchemaFunc and Schema should not both be set") + } + + // check context funcs are not set alongside their nonctx counterparts + if r.CreateContext != nil && r.Create != nil { + return fmt.Errorf("CreateContext and Create should not both be set") + } + if r.ReadContext != nil && r.Read != nil { + return fmt.Errorf("ReadContext and Read should not both be set") + } + if r.UpdateContext != nil && r.Update != nil { + return fmt.Errorf("UpdateContext and Update should not both be set") + } + if r.DeleteContext != nil && r.Delete != nil { + return fmt.Errorf("DeleteContext and Delete should not both be set") + } + + // check context funcs are not set alongside their without timeout counterparts + if r.CreateContext != nil && r.CreateWithoutTimeout != nil { + return fmt.Errorf("CreateContext and CreateWithoutTimeout should not both be set") + } + if r.ReadContext != nil && r.ReadWithoutTimeout != nil { + return fmt.Errorf("ReadContext and ReadWithoutTimeout should not both be set") + } + if r.UpdateContext != nil && r.UpdateWithoutTimeout != nil { + return fmt.Errorf("UpdateContext and UpdateWithoutTimeout should not both be set") + } + if r.DeleteContext != nil && r.DeleteWithoutTimeout != nil { + return fmt.Errorf("DeleteContext and DeleteWithoutTimeout should not both be set") + } + + // check non-context funcs are not set alongside the context without timeout counterparts + if r.Create != nil && r.CreateWithoutTimeout != nil { + return fmt.Errorf("Create and CreateWithoutTimeout should not both be set") + } + if r.Read != nil && r.ReadWithoutTimeout != nil { + return fmt.Errorf("Read and ReadWithoutTimeout should not both be set") + } + if r.Update != nil && r.UpdateWithoutTimeout != nil { + return fmt.Errorf("Update and UpdateWithoutTimeout should not both be set") + } + if r.Delete != nil && r.DeleteWithoutTimeout != nil { + return fmt.Errorf("Delete and DeleteWithoutTimeout should not both be set") + } + + return schema.InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func validateResourceID(s *Schema) error { + if s.Type != TypeString { + return fmt.Errorf(`the "id" attribute must be of TypeString`) + } + + if s.Required { + return fmt.Errorf(`the "id" attribute cannot be marked Required`) + } + + // ID should at least be computed. If unspecified it will be set to Computed and Optional, + // but Optional is unnecessary if undesired. + if !s.Computed { + return fmt.Errorf(`the "id" attribute must be marked Computed`) + } + return nil +} + +func isReservedResourceFieldName(name string) bool { + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.SchemaMap()).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.SchemaMap(), + } +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.createFuncSet() || r.readFuncSet()) +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// NoopContext is a convenience implementation of context aware resource function which takes +// no action and returns no error. +func NoopContext(context.Context, *ResourceData, interface{}) diag.Diagnostics { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go new file mode 100644 index 0000000000..4380db7e1a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -0,0 +1,639 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/gocty" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but also check for changes +// +// The most relevant methods to take a look at are Get and Set. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + providerMeta cty.Value + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *terraform.InstanceState + partial bool + once sync.Once + isNew bool + + panicOnError bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists can check if TypeBool attributes that are Optional with +// no Default value have been set. +// +// Deprecated: usage is discouraged due to undefined behaviors and may be +// removed in a future version of the SDK +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceData) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + +// HasChangesExcept returns whether any keys outside the given keys have been changed. +// +// This function only works with root attribute keys. +func (d *ResourceData) HasChangesExcept(keys ...string) bool { + if d == nil || d.diff == nil { + return false + } + for attr := range d.diff.Attributes { + rootAttr := strings.Split(attr, ".")[0] + var skipAttr bool + + for _, key := range keys { + if rootAttr == key { + skipAttr = true + break + } + } + + if !skipAttr && d.HasChange(rootAttr) { + return true + } + } + + return false +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + return !cmp.Equal(n, o) +} + +// HasChangeExcept returns whether any keys outside the given key have been changed. +// +// This function only works with root attribute keys. +func (d *ResourceData) HasChangeExcept(key string) bool { + if d == nil || d.diff == nil { + return false + } + for attr := range d.diff.Attributes { + rootAttr := strings.Split(attr, ".")[0] + + if rootAttr == key { + continue + } + + if d.HasChange(rootAttr) { + return true + } + } + + return false +} + +// Partial is a legacy function that was used for capturing state of specific +// attributes if an update only partially worked. Enabling this flag without +// setting any specific keys with the now removed SetPartial has a useful side +// effect of preserving all of the resource's previous state. Although confusing, +// it has been discovered that during an update when an error is returned, the +// proposed config is set into state, even without any calls to d.Set. +// +// In practice this default behavior goes mostly unnoticed since Terraform +// refreshes between operations by default. The state situation discussed is +// subject to further investigation and potential change. Until then, this +// function has been preserved for the specific usecase. +func (d *ResourceData) Partial(on bool) { + d.partial = on +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil { + if d.panicOnError { + panic(err) + } else { + log.Printf("[ERROR] setting state: %s", err) + } + } + return err +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } + } + + if d.newState != nil { + result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *terraform.InstanceState { + var result terraform.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + } + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + return defaultTimeout +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState terraform.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + var level string + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +func (d *ResourceData) GetProviderMeta(dst interface{}) error { + if d.providerMeta.IsNull() { + return nil + } + return gocty.FromCtyValue(d.providerMeta, &dst) +} + +// GetRawConfig returns the cty.Value that Terraform sent the SDK for the +// config. If no value was sent, or if a null value was sent, the value will be +// a null value of the resource's type. +// +// GetRawConfig is considered experimental and advanced functionality, and +// familiarity with the Terraform protocol is suggested when using it. +func (d *ResourceData) GetRawConfig() cty.Value { + // These methods follow the field readers preference order. + if d.diff != nil && !d.diff.RawConfig.IsNull() { + return d.diff.RawConfig + } + if d.config != nil && !d.config.CtyValue.IsNull() { + return d.config.CtyValue + } + if d.state != nil && !d.state.RawConfig.IsNull() { + return d.state.RawConfig + } + return cty.NullVal(schemaMap(d.schema).CoreConfigSchema().ImpliedType()) +} + +// GetRawState returns the cty.Value that Terraform sent the SDK for the state. +// If no value was sent, or if a null value was sent, the value will be a null +// value of the resource's type. +// +// GetRawState is considered experimental and advanced functionality, and +// familiarity with the Terraform protocol is suggested when using it. +func (d *ResourceData) GetRawState() cty.Value { + // These methods follow the field readers preference order. + if d.diff != nil && !d.diff.RawState.IsNull() { + return d.diff.RawState + } + if d.state != nil && !d.state.RawState.IsNull() { + return d.state.RawState + } + return cty.NullVal(schemaMap(d.schema).CoreConfigSchema().ImpliedType()) +} + +// GetRawPlan returns the cty.Value that Terraform sent the SDK for the plan. +// If no value was sent, or if a null value was sent, the value will be a null +// value of the resource's type. +// +// GetRawPlan is considered experimental and advanced functionality, and +// familiarity with the Terraform protocol is suggested when using it. +func (d *ResourceData) GetRawPlan() cty.Value { + // These methods follow the field readers preference order. + if d.diff != nil && !d.diff.RawPlan.IsNull() { + return d.diff.RawPlan + } + if d.state != nil && !d.state.RawPlan.IsNull() { + return d.state.RawPlan + } + return cty.NullVal(schemaMap(d.schema).CoreConfigSchema().ImpliedType()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go new file mode 100644 index 0000000000..0639540b47 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +// getSource represents the level we want to get for a value (internally). +// Any source less than or equal to the level will be loaded (whichever +// has a value first). +type getSource byte + +const ( + getSourceState getSource = 1 << iota + getSourceConfig + getSourceDiff + getSourceSet + getSourceExact // Only get from the _exact_ level + getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go similarity index 83% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go index f55a66e14f..6af9490b9e 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( @@ -7,7 +10,8 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // newValueWriter is a minor re-implementation of MapFieldWriter to include @@ -246,20 +250,20 @@ func (d *ResourceDiff) clear(key string) error { } for k := range d.diff.Attributes { - if strings.HasPrefix(k, key) { + if k == key || childAddrOf(k, key) { delete(d.diff.Attributes, k) } } return nil } -// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff -// where we need to act on all nested fields -// without calling out each one separately +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff where we need to act +// on all nested fields without calling out each one separately. +// An empty prefix is supported, returning all changed keys. func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { keys := make([]string, 0) for k := range d.diff.Attributes { - if strings.HasPrefix(k, prefix) { + if k == prefix || childAddrOf(k, prefix) || prefix == "" { keys = append(keys, k) } } @@ -269,16 +273,16 @@ func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { // diffChange helps to implement resourceDiffer and derives its change values // from ResourceDiff's own change data, in addition to existing diff, config, and state. func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { - old, new, customized := d.getChange(key) + oldValue, newValue, customized := d.getChange(key) - if !old.Exists { - old.Value = nil + if !oldValue.Exists { + oldValue.Value = nil } - if !new.Exists || d.removed(key) { - new.Value = nil + if !newValue.Exists || d.removed(key) { + newValue.Value = nil } - return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized + return oldValue.Value, newValue.Value, !reflect.DeepEqual(oldValue.Value, newValue.Value), newValue.Computed, customized } // SetNew is used to set a new diff value for the mentioned key. The value must @@ -307,12 +311,12 @@ func (d *ResourceDiff) SetNewComputed(key string) error { } // setDiff performs common diff setting behaviour. -func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { +func (d *ResourceDiff) setDiff(key string, newValue interface{}, computed bool) error { if err := d.clear(key); err != nil { return err } - if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + if err := d.newWriter.WriteField(strings.Split(key, "."), newValue, computed); err != nil { return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) } @@ -373,8 +377,8 @@ func (d *ResourceDiff) Get(key string) interface{} { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { - old, new, _ := d.getChange(key) - return old.Value, new.Value + oldValue, newValue, _ := d.getChange(key) + return oldValue.Value, newValue.Value } // GetOk functions the same way as ResourceData.GetOk, but it also checks the @@ -421,19 +425,29 @@ func (d *ResourceDiff) NewValueKnown(key string) bool { return !r.Computed } +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceDiff) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + // HasChange checks to see if there is a change between state and the diff, or // in the overridden diff. func (d *ResourceDiff) HasChange(key string) bool { - old, new := d.GetChange(key) + oldValue, newValue := d.GetChange(key) // If the type implements the Equal interface, then call that // instead of just doing a reflect.DeepEqual. An example where this is // needed is *Set - if eq, ok := old.(Equal); ok { - return !eq.Equal(new) + if eq, ok := oldValue.(Equal); ok { + return !eq.Equal(newValue) } - return !reflect.DeepEqual(old, new) + return !reflect.DeepEqual(oldValue, newValue) } // Id returns the ID of this resource. @@ -450,6 +464,54 @@ func (d *ResourceDiff) Id() string { return result } +// GetRawConfig returns the cty.Value that Terraform sent the SDK for the +// config. If no value was sent, or if a null value was sent, the value will be +// a null value of the resource's type. +// +// GetRawConfig is considered experimental and advanced functionality, and +// familiarity with the Terraform protocol is suggested when using it. +func (d *ResourceDiff) GetRawConfig() cty.Value { + if d.diff != nil { + return d.diff.RawConfig + } + if d.state != nil { + return d.state.RawConfig + } + return cty.NullVal(schemaMap(d.schema).CoreConfigSchema().ImpliedType()) +} + +// GetRawState returns the cty.Value that Terraform sent the SDK for the state. +// If no value was sent, or if a null value was sent, the value will be a null +// value of the resource's type. +// +// GetRawState is considered experimental and advanced functionality, and +// familiarity with the Terraform protocol is suggested when using it. +func (d *ResourceDiff) GetRawState() cty.Value { + if d.diff != nil { + return d.diff.RawState + } + if d.state != nil { + return d.state.RawState + } + return cty.NullVal(schemaMap(d.schema).CoreConfigSchema().ImpliedType()) +} + +// GetRawPlan returns the cty.Value that Terraform sent the SDK for the plan. +// If no value was sent, or if a null value was sent, the value will be a null +// value of the resource's type. +// +// GetRawPlan is considered experimental and advanced functionality, and +// familiarity with the Terraform protocol is suggested when using it. +func (d *ResourceDiff) GetRawPlan() cty.Value { + if d.diff != nil { + return d.diff.RawPlan + } + if d.state != nil { + return d.state.RawPlan + } + return cty.NullVal(schemaMap(d.schema).CoreConfigSchema().ImpliedType()) +} + // getChange gets values from two different levels, designed for use in // diffChange, HasChange, and GetChange. // @@ -457,16 +519,16 @@ func (d *ResourceDiff) Id() string { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { - old := d.get(strings.Split(key, "."), "state") - var new getResult + oldValue := d.get(strings.Split(key, "."), "state") + var newValue getResult for p := range d.updatedKeys { if childAddrOf(key, p) { - new = d.getExact(strings.Split(key, "."), "newDiff") - return old, new, true + newValue = d.getExact(strings.Split(key, "."), "newDiff") + return oldValue, newValue, true } } - new = d.get(strings.Split(key, "."), "newDiff") - return old, new, false + newValue = d.get(strings.Split(key, "."), "newDiff") + return oldValue, newValue, false } // removed checks to see if the key is present in the existing, pre-customized diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go new file mode 100644 index 0000000000..ad9a5c3b9c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "errors" +) + +// ResourceImporter defines how a resource is imported in Terraform. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in Terraform is the process of taking an already-created +// resource and bringing it under Terraform management. This can include +// updating Terraform state, generating Terraform configuration, etc. +type ResourceImporter struct { + // State is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. + // + // Deprecated: State is deprecated in favor of StateContext. + // Only one of the two functions can bet set. + State StateFunc + + // StateContext is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. This function receives a context + // that will cancel if Terraform sends a cancellation signal. + StateContext StateContextFunc +} + +// StateFunc is the function called to import a resource into the Terraform state. +// +// Deprecated: Please use the context aware equivalent StateContextFunc. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// StateContextFunc is the function called to import a resource into the +// Terraform state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateContextFunc func(context.Context, *ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + if r.State != nil && r.StateContext != nil { + return errors.New("Both State and StateContext cannot be set.") + } + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. +// +// Deprecated: Please use the context aware ImportStatePassthroughContext instead +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} + +// ImportStatePassthroughContext is an implementation of StateContextFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthroughContext(ctx context.Context, d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go similarity index 92% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go index f12bf72596..90d29e6259 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( @@ -5,9 +8,10 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" @@ -136,7 +140,13 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) *timeout = rt } - return nil + + // This early return, which makes this function handle a single + // timeout configuration block, should likely not be here but the + // SDK has never raised an error for multiple blocks nor made any + // precedence decisions for them in the past. + // It is left here for compatibility reasons. + return nil //nolint:staticcheck } } @@ -153,7 +163,6 @@ func unsupportedTimeoutKeyError(key string) error { // // StateEncode encodes the timeout into the ResourceData's InstanceState for // saving to state -// func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error { return t.metaEncode(id) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go new file mode 100644 index 0000000000..176288b0cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -0,0 +1,2380 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// Schema describes the structure and type information of a value, whether +// sourced from configuration, plan, or state data. Schema is used in Provider +// and Resource types (for managed resources and data resources) and is +// fundamental to the implementations of ResourceData and ResourceDiff. +// +// The Type field must always be set. At least one of Required, Optional, +// Optional and Computed, or Computed must be enabled unless the Schema is +// directly an implementation of an Elem field of another Schema. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // Required indicates whether the practitioner must enter a value in the + // configuration for this attribute. Required cannot be used with Computed + // Default, DefaultFunc, DiffSuppressFunc, DiffSuppressOnRefresh, + // InputDefault, Optional, or StateFunc. At least one of Required, + // Optional, Optional and Computed, or Computed must be enabled. + Required bool + + // Optional indicates whether the practitioner can choose to not enter + // a value in the configuration for this attribute. Optional cannot be used + // with Required. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this attribute or not. Computed cannot be used with Required. If + // Required and Optional are both false, the attribute will be considered + // "read only" for the practitioner, with only the provider able to set + // its value. + Computed bool + + // ForceNew indicates whether a change in this value requires the + // replacement (destroy and create) of the managed resource instance, + // rather than an in-place update. This field is only valid when the + // encapsulating Resource is a managed resource. + // + // If conditional replacement logic is needed, use the Resource type + // CustomizeDiff field to call the ResourceDiff type ForceNew method. + ForceNew bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + // + // If CustomizeDiffFunc makes this field ForceNew=true, the + // following DiffSuppressFunc will come in with the value of old being + // empty, as if creating a new resource. + // + // By default, DiffSuppressFunc is considered only when deciding whether + // a configuration value is significantly different than the prior state + // value during planning. Set DiffSuppressOnRefresh to opt in to checking + // this also during the refresh step. + DiffSuppressFunc SchemaDiffSuppressFunc + + // DiffSuppressOnRefresh enables using the DiffSuppressFunc to ignore + // normalization-classified changes returned by the resource type's + // "Read" or "ReadContext" function, in addition to the default behavior of + // doing so during planning. + // + // This is a particularly good choice for attributes which take strings + // containing "microsyntaxes" where various different values are packed + // together in some serialization where there are many ways to express the + // same information. For example, attributes which accept JSON data can + // include different whitespace characters without changing meaning, and + // case-insensitive identifiers may refer to the same object using different + // characters. + // + // This is valid only for attributes of primitive types, because + // DiffSuppressFunc itself is only compatible with primitive types. + // + // The key benefit of activating this flag is that the result of Read or + // ReadContext will be cleaned of normalization-only changes in the same + // way as the planning result would normaly be, which therefore prevents + // churn for downstream expressions deriving from this attribute and + // prevents incorrect "Values changed outside of Terraform" messages + // when the remote API returns values which have the same meaning as the + // prior state but in a different serialization. + // + // This is an opt-in because it was a later addition to the DiffSuppressFunc + // functionality which would cause some significant changes in behavior + // for existing providers if activated everywhere all at once. + DiffSuppressOnRefresh bool + + // Default indicates a value to set if this attribute is not set in the + // configuration. Default cannot be used with DefaultFunc or Required. + // Default is only supported if the Type is TypeBool, TypeFloat, TypeInt, + // or TypeString. Default cannot be used if the Schema is directly an + // implementation of an Elem field of another Schema, such as trying to + // set a default value for a TypeList or TypeSet. + // + // Changing either Default can be a breaking change, especially if the + // attribute has ForceNew enabled. If a default needs to change to align + // with changing assumptions in an upstream API, then it may be necessary + // to also implement resource state upgrade functionality to change the + // state to match or update read operation logic to align with the new + // default. + Default interface{} + + // DefaultFunc can be specified to compute a dynamic default when this + // attribute is not set in the configuration. DefaultFunc cannot be used + // with Default. For legacy reasons, DefaultFunc can be used with Required + // attributes in a Provider schema, which will prompt practitioners for + // input if the result of this function is nil. + // + // The return value should be stable to avoid generating confusing + // plan differences. Changing the return value can be a breaking change, + // especially if ForceNew is enabled. If a default needs to change to align + // with changing assumptions in an upstream API, then it may be necessary + // to also implement resource state upgrade functionality to change the + // state to match or update read operation logic to align with the new + // default. + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs, the language server and + // other user facing usage. It can be plain-text or markdown depending on the + // global DescriptionKind setting. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + StateFunc SchemaStateFunc + + // Elem represents the element type for a TypeList, TypeSet, or TypeMap + // attribute or block. The only valid types are *Schema and *Resource. + // Only TypeList and TypeSet support *Resource. + // + // If the Elem is a *Schema, the surrounding Schema represents a single + // attribute with a single element type for underlying elements. In + // practitioner configurations, an equals sign (=) is required to set + // the value. Refer to the following documentation: + // + // https://www.terraform.io/docs/language/syntax/configuration.html + // + // The underlying *Schema is only required to implement Type. ValidateFunc + // or ValidateDiagFunc can be used to validate each element value. + // + // If the Elem is a *Resource, the surrounding Schema represents a + // configuration block. Blocks can contain underlying attributes or blocks. + // In practitioner configurations, an equals sign (=) cannot be used to + // set the value. Blocks are instead repeated as necessary, or require + // the use of dynamic block expressions. Refer to the following + // documentation: + // + // https://www.terraform.io/docs/language/syntax/configuration.html + // https://www.terraform.io/docs/language/expressions/dynamic-blocks.html + // + // The underlying *Resource must only implement the Schema field. + Elem interface{} + + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. + MaxItems int + + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MinItems int + + // Set defines custom hash algorithm for each TypeSet element. If not + // defined, the SDK implements a default hash algorithm based on the + // underlying structure and type information of the Elem field. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // Deprecated: This functionality is not implemented and this field + // declaration should be removed. + ComputedWhen []string + + // ConflictsWith is a set of attribute paths, including this attribute, + // whose configurations cannot be set simultaneously. This implements the + // validation logic declaratively within the schema and can trigger earlier + // in Terraform operations, rather than using create or update logic which + // only triggers during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + ConflictsWith []string + + // ExactlyOneOf is a set of attribute paths, including this attribute, + // where only one attribute out of all specified can be configured. It will + // return a validation error if none are specified as well. This implements + // the validation logic declaratively within the schema and can trigger + // earlier in Terraform operations, rather than using create or update + // logic which only triggers during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + ExactlyOneOf []string + + // AtLeastOneOf is a set of attribute paths, including this attribute, + // in which at least one of the attributes must be configured. This + // implements the validation logic declaratively within the schema and can + // trigger earlier in Terraform operations, rather than using create or + // update logic which only triggers during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + AtLeastOneOf []string + + // RequiredWith is a set of attribute paths, including this attribute, + // that must be set simultaneously. This implements the validation logic + // declaratively within the schema and can trigger earlier in Terraform + // operations, rather than using create or update logic which only triggers + // during apply. + // + // Only absolute attribute paths, ones starting with top level attribute + // names, are supported. Attribute paths cannot be accurately declared + // for TypeList (if MaxItems is greater than 1), TypeMap, or TypeSet + // attributes. To reference an attribute under a single configuration block + // (TypeList with Elem of *Resource and MaxItems of 1), the syntax is + // "parent_block_name.0.child_attribute_name". + RequiredWith []string + + // Deprecated defines warning diagnostic details to display when + // practitioner configurations use this attribute or block. The warning + // diagnostic summary is automatically set to "Argument is deprecated" + // along with configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a known value for this + // attribute and certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the attribute is Required or Optional, and if the + // practitioner configuration attempts to set the attribute value to a + // known value. It cannot detect practitioner configuration values that + // are unknown ("known after apply"). + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + Deprecated string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + ValidateFunc SchemaValidateFunc + + // ValidateDiagFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield diagnostics + // based on inspection of that value. + // + // ValidateDiagFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + // + // ValidateDiagFunc is also yielded the cty.Path the SDK has built up to this + // attribute. The SDK will automatically set the AttributePath of any returned + // Diagnostics to this path. Therefore the developer does not need to set + // the AttributePath for primitive types. + // + // In the case of TypeMap to provide the most precise information, please + // set an AttributePath with the additional cty.IndexStep: + // + // AttributePath: cty.IndexStringPath("key_name") + // + // Or alternatively use the passed in path to create the absolute path: + // + // AttributePath: append(path, cty.IndexStep{Key: cty.StringVal("key_name")}) + ValidateDiagFunc SchemaValidateDiagFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // the Terraform user interface output. It should be used for password or + // other values which should be hidden. + // + // Terraform does not support conditional sensitivity, so if the value may + // only be sensitive in certain scenarios, a pragmatic choice will be + // necessary upfront of whether or not to always hide the value. Some + // providers may opt to split up resources based on sensitivity, to ensure + // that practitioners without sensitive values do not have values + // unnecessarily hidden. + // + // Terraform does not support passing sensitivity from configurations to + // providers. For example, if a sensitive value is configured via another + // attribute, this attribute is not marked Sensitive, and the value is used + // in this attribute value, the sensitivity is not transitive. The value + // will be displayed as normal. + // + // Sensitive values propagate when referenced in other parts of a + // configuration unless the nonsensitive() configuration function is used. + // Certain configuration usage may also expand the sensitivity. For + // example, including the sensitive value in a set may mark the whole set + // as sensitive. Any outputs containing a sensitive value must enable the + // output sensitive argument. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, oldValue, newValue string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +// +// Deprecated: please use SchemaValidateDiagFunc +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +// SchemaValidateDiagFunc is a function used to validate a single field in the +// schema and has Diagnostic support. +type SchemaValidateDiagFunc func(interface{}, cty.Path) diag.Diagnostics + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + log.Println("[DEBUG] A computed value with the empty string as the new value and a non-empty old value was found. Interpreting the empty string as \"unset\" to align with legacy behavior.") + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +func (s *Schema) validateFunc(decoded interface{}, k string, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if s.ValidateDiagFunc != nil { + diags = s.ValidateDiagFunc(decoded, path) + for i := range diags { + if !diags[i].AttributePath.HasPrefix(path) { + diags[i].AttributePath = append(path, diags[i].AttributePath...) + } + } + } else if s.ValidateFunc != nil { + ws, es := s.ValidateFunc(decoded, k) + for _, w := range ws { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: w, + AttributePath: path, + }) + } + for _, e := range es { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: e.Error(), + AttributePath: path, + }) + } + } + + return diags +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + return os.Getenv("TF_ACC") != "" +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copiedMap, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copiedMap.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + result.RawConfig = s.RawConfig + result.RawState = s.RawState + result.RawPlan = s.RawPlan + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(ctx, k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + + logging.HelperSchemaTrace(ctx, "Calling downstream") + err := customizeDiff(ctx, rd, meta) + logging.HelperSchemaTrace(ctx, "Called downstream") + + if err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(ctx, k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + result2.RawConfig = result.RawConfig + result2.RawPlan = result.RawPlan + result2.RawState = result.RawState + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(ctx, k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(ctx, rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(ctx, k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + return m.validateObject("", m, c, cty.Path{}) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ExactlyOneOf) > 0 && v.Required { + return fmt.Errorf("%s: ExactlyOneOf cannot be set with Required", k) + } + + if len(v.AtLeastOneOf) > 0 && v.Required { + return fmt.Errorf("%s: AtLeastOneOf cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap, v, false) + if err != nil { + return fmt.Errorf("ConflictsWith: %+v", err) + } + } + + if len(v.RequiredWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.RequiredWith, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("RequiredWith: %+v", err) + } + } + + if len(v.ExactlyOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("ExactlyOneOf: %+v", err) + } + } + + if len(v.AtLeastOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("AtLeastOneOf: %+v", err) + } + } + + if v.DiffSuppressOnRefresh && v.DiffSuppressFunc == nil { + return fmt.Errorf("%s: cannot set DiffSuppressOnRefresh without DiffSuppressFunc", k) + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.SchemaMap()).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + if v.Type == TypeMap && v.Elem != nil { + switch v.Elem.(type) { + case *Resource: + return fmt.Errorf("%s: TypeMap with Elem *Resource not supported,"+ + "use TypeList/TypeSet with Elem *Resource or TypeMap with Elem *Schema", k) + } + } + + if computedOnly { + if len(v.AtLeastOneOf) > 0 { + return fmt.Errorf("%s: AtLeastOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if len(v.ConflictsWith) > 0 { + return fmt.Errorf("%s: ConflictsWith is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.Default != nil { + return fmt.Errorf("%s: Default is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DefaultFunc != nil { + return fmt.Errorf("%s: DefaultFunc is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + if len(v.ExactlyOneOf) > 0 { + return fmt.Errorf("%s: ExactlyOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.InputDefault != "" { + return fmt.Errorf("%s: InputDefault is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MaxItems > 0 { + return fmt.Errorf("%s: MaxItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MinItems > 0 { + return fmt.Errorf("%s: MinItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.StateFunc != nil { + return fmt.Errorf("%s: StateFunc is extraneous, "+ + "value should just be changed before setting on computed-only field", k) + } + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateDiagFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + } + + if v.ValidateFunc != nil || v.ValidateDiagFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc are not yet supported on lists or sets.", k) + } + } + + if v.ValidateFunc != nil && v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc cannot both be set", k) + } + + if v.Deprecated == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap, self *Schema, allowSelfReference bool) error { + for _, key := range keys { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for idx, part := range parts { + // Skip index fields if 0 + partInt, err := strconv.Atoi(part) + + if err == nil { + if partInt != 0 { + return fmt.Errorf("%s configuration block reference (%s) can only use the .0. index for TypeList and MaxItems: 1 configuration blocks", k, key) + } + + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s references unknown attribute (%s) at part (%s)", k, key, part) + } + + subResource, ok := target.Elem.(*Resource) + + if !ok { + continue + } + + // Skip Type/MaxItems check if not the last element + if (target.Type == TypeSet || target.MaxItems != 1) && idx+1 != len(parts) { + return fmt.Errorf("%s configuration block reference (%s) can only be used with TypeList and MaxItems: 1 configuration blocks", k, key) + } + + sm = subResource.SchemaMap() + } + + if target == nil { + return fmt.Errorf("%s cannot find target attribute (%s), sm: %#v", k, key, sm) + } + + if target == self && !allowSelfReference { + return fmt.Errorf("%s cannot reference self (%s)", k, key) + } + + if target.Required { + return fmt.Errorf("%s cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s cannot contain Computed(When) attribute (%s)", k, key) + } + } + + return nil +} + +var validFieldNameRe = regexp.MustCompile("^[a-z0-9_]+$") + +func isValidFieldName(name string) bool { + return validFieldNameRe.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + HasChanges(...string) bool + Id() string +} + +func (m schemaMap) diff( + ctx context.Context, + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(ctx, k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(ctx, k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + logging.HelperSchemaDebug(ctx, "Ignoring change due to DiffSuppressFunc", map[string]interface{}{logging.KeyAttributePath: attrK}) + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + ctx context.Context, + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } else { + delete(diff.Attributes, k+".#") + } + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.SchemaMap() { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(ctx, subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(ctx, subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".%"] = finalizedAttr + } else { + delete(diff.Attributes, k+".%") + } + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + finalizedAttr := schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[prefix+k] = finalizedAttr + } + } + for k, v := range stateMap { + finalizedAttr := schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[prefix+k] = finalizedAttr + } + } + + return nil +} + +func (m schemaMap) diffSet( + ctx context.Context, + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.SchemaMap() { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(ctx, subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(ctx, subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already setup. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + finalizedAttr := schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k] = finalizedAttr + } + + return nil +} + +// handleDiffSuppressOnRefresh visits each of the attributes set in "new" and, +// if the corresponding schema sets both DiffSuppressFunc and +// DiffSuppressOnRefresh, checks whether the new value is materially different +// than the old and if not it overwrites the new value with the old one, +// in-place. +func (m schemaMap) handleDiffSuppressOnRefresh(ctx context.Context, oldState, newState *terraform.InstanceState) { + if newState == nil || oldState == nil { + return // nothing to do, then + } + + // We'll populate this in the loop below only if we find at least one + // attribute which needs this analysis. + var d *ResourceData + + oldAttrs := oldState.Attributes + newAttrs := newState.Attributes + for k, newV := range newAttrs { + oldV, ok := oldAttrs[k] + if !ok { + continue // no old value to compare with + } + if newV == oldV { + continue // no change to test + } + + schemaList := addrToSchema(strings.Split(k, "."), m) + if len(schemaList) == 0 { + continue // no schema? weird, but not our responsibility to handle + } + schema := schemaList[len(schemaList)-1] + if !schema.DiffSuppressOnRefresh || schema.DiffSuppressFunc == nil { + continue // not relevant + } + + if d == nil { + // We populate "d" only on demand, to avoid the cost for most + // existing schemas where DiffSuppressOnRefresh won't be set. + var err error + d, err = m.Data(newState, nil) + if err != nil { + // Should not happen if we got far enough to be doing this + // analysis, but if it does then we'll bail out. + tfsdklog.Warn(ctx, fmt.Sprintf("schemaMap.handleDiffSuppressOnRefresh failed to construct ResourceData: %s", err)) + return + } + } + + if schema.DiffSuppressFunc(k, oldV, newV, d) { + tfsdklog.Debug(ctx, fmt.Sprintf("ignoring change of %q due to DiffSuppressFunc", k)) + newState.Attributes[k] = oldV // keep the old value, then + } + } +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to determine default value", + Detail: err.Error(), + AttributePath: path, + }) + } + + // We're okay as long as we had a value set + ok = raw != nil + } + + err := validateExactlyOneAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid combination of arguments", + Detail: err.Error(), + AttributePath: path, + }) + } + + err = validateAtLeastOneAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Missing required argument", + Detail: err.Error(), + AttributePath: path, + }) + } + + if !ok { + if schema.Required { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", k), + AttributePath: path, + }) + } + return diags + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Value for unconfigurable attribute", + Detail: fmt.Sprintf("Can't configure a value for %q: its value will be decided automatically based on the result of applying this configuration.", k), + AttributePath: path, + }) + } + + err = validateRequiredWithAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Missing required argument", + Detail: err.Error(), + AttributePath: path, + }) + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + return nil + } + + err = validateConflictingAttributes(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Conflicting configuration arguments", + Detail: err.Error(), + AttributePath: path, + }) + } + + return m.validateType(k, raw, schema, c, path) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func removeDuplicates(elements []string) []string { + encountered := make(map[string]struct{}, 0) + result := []string{} + + for v := range elements { + if _, ok := encountered[elements[v]]; !ok { + encountered[elements[v]] = struct{}{} + result = append(result, elements[v]) + } + } + + return result +} + +func validateRequiredWithAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.RequiredWith) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.RequiredWith, k)) + sort.Strings(allKeys) + + for _, key := range allKeys { + if _, ok := c.Get(key); !ok { + return fmt.Errorf("%q: all of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + } + + return nil +} + +func validateExactlyOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ExactlyOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.ExactlyOneOf, k)) + sort.Strings(allKeys) + specified := make([]string, 0) + unknownVariableValueCount := 0 + for _, exactlyOneOfKey := range allKeys { + if c.IsComputed(exactlyOneOfKey) { + unknownVariableValueCount++ + continue + } + + _, ok := c.Get(exactlyOneOfKey) + if ok { + specified = append(specified, exactlyOneOfKey) + } + } + + if len(specified) == 0 && unknownVariableValueCount == 0 { + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + + if len(specified) > 1 { + return fmt.Errorf("%q: only one of `%s` can be specified, but `%s` were specified.", k, strings.Join(allKeys, ","), strings.Join(specified, ",")) + } + + return nil +} + +func validateAtLeastOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.AtLeastOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.AtLeastOneOf, k)) + sort.Strings(allKeys) + + for _, atLeastOneOfKey := range allKeys { + if _, ok := c.Get(atLeastOneOfKey); ok { + // We can ignore hcl2shim.UnknownVariable by assuming it's been set and additional validation elsewhere + // will uncover this if it is in fact null. + return nil + } + } + + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return diags + } + } + + // schemaMap can't validate nil + if raw == nil { + return diags + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + if rawV.Kind() != reflect.Slice { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a list", + AttributePath: path, + }) + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if !isWhollyKnown(raw) { + return diags + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Too many list items", + Detail: fmt.Sprintf("Attribute %s supports %d item maximum, but config has %d declared.", k, schema.MaxItems, rawV.Len()), + AttributePath: path, + }) + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Not enough list items", + Detail: fmt.Sprintf("Attribute %s requires %d item minimum, but config has only %d declared.", k, schema.MinItems, rawV.Len()), + AttributePath: path, + }) + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + p := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + diags = append(diags, m.validateObject(key, t.SchemaMap(), c, p)...) + case *Schema: + diags = append(diags, m.validateType(key, raw, t, c, p)...) + } + + } + + return diags +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return diags + } + } + + // schemaMap can't validate nil + if raw == nil { + return diags + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a map", + AttributePath: path, + }) + } + // Otherwise it's likely raw is an interpolation. + return diags + case reflect.Map: + case reflect.Slice: + default: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a map", + AttributePath: path, + }) + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags + } + + return schema.validateFunc(mapIface, k, path) + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a map", + AttributePath: path, + }) + } + mapIface := v.Interface() + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags + } + } + + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.validateFunc(validatableMap, k, path) +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema, path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + for key, raw := range m { + valueType, err := getValueType(k, schema) + p := append(path, cty.IndexStep{Key: cty.StringVal(key)}) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", valueType)) + } + } + return diags +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return diags + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Expected Object Type", + Detail: fmt.Sprintf("Expected object, got %s", reflect.ValueOf(raw).Kind()), + AttributePath: path, + }) + } + + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + diags = append(diags, m.validate(key, s, c, append(path, cty.GetAttrStep{Name: subK}))...) + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid or unknown key", + AttributePath: append(path, cty.GetAttrStep{Name: subk}), + }) + } + } + } + + return diags +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return diags + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a list", + AttributePath: path, + }) + case reflect.Map: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a map", + AttributePath: path, + }) + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return diags + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + case TypeInt: + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + // also accept a string, just as the TypeBool and TypeFloat cases do + if v, ok := raw.(int); ok { + decoded = v + } else if _, ok := raw.(string); ok { + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + } else { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("Attribute must be a whole number, got %v", raw), + AttributePath: path, + }) + } + case TypeFloat: + // Verify that we can parse this as a float + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + return append(diags, schema.validateFunc(decoded, k, path)...) +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + switch schema.Type { + case TypeList: + diags = m.validateList(k, raw, schema, c, path) + case TypeSet: + // indexing into sets is not representable in the current protocol + // best we can do is associate the path up to this attribute. + diags = m.validateList(k, raw, schema, c, path) + if len(diags) > 0 { + log.Printf("[WARN] Truncating attribute path of %d diagnostics for TypeSet", len(diags)) + for i := range diags { + diags[i].AttributePath = path + } + } + case TypeMap: + diags = m.validateMap(k, raw, schema, c, path) + default: + diags = m.validatePrimitive(k, raw, schema, c, path) + } + + if schema.Deprecated != "" { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Argument is deprecated", + Detail: schema.Deprecated, + AttributePath: path, + }) + } + + return diags +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go new file mode 100644 index 0000000000..d629240fd3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "fmt" + "sort" + "strconv" +) + +func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { + if val == nil { + buf.WriteRune(';') + return + } + + switch schema.Type { + case TypeBool: + if val.(bool) { + buf.WriteRune('1') + } else { + buf.WriteRune('0') + } + case TypeInt: + buf.WriteString(strconv.Itoa(val.(int))) + case TypeFloat: + buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) + case TypeString: + buf.WriteString(val.(string)) + case TypeList: + buf.WriteRune('(') + l := val.([]interface{}) + for _, innerVal := range l { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune(')') + case TypeMap: + + m := val.(map[string]interface{}) + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + buf.WriteRune('[') + for _, k := range keys { + innerVal := m[k] + if innerVal == nil { + continue + } + buf.WriteString(k) + buf.WriteRune(':') + + switch innerVal := innerVal.(type) { + case int: + buf.WriteString(strconv.Itoa(innerVal)) + case float64: + buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) + case string: + buf.WriteString(innerVal) + default: + panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) + } + + buf.WriteRune(';') + } + buf.WriteRune(']') + case TypeSet: + buf.WriteRune('{') + s := val.(*Set) + for _, innerVal := range s.List() { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune('}') + default: + panic("unknown schema type to serialize") + } + buf.WriteRune(';') +} + +// SerializeValueForHash appends a serialization of the given resource config +// to the given buffer, guaranteeing deterministic results given the same value +// and schema. +// +// Its primary purpose is as input into a hashing function in order +// to hash complex substructures when used in sets, and so the serialization +// is not reversible. +func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } + sm := resource.SchemaMap() + m := val.(map[string]interface{}) + var keys []string + allComputed := true + for k, v := range sm { + if v.Optional || v.Required { + allComputed = false + } + + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + innerSchema := sm[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !allComputed && !(innerSchema.Required || innerSchema.Optional) { + continue + } + + buf.WriteString(k) + buf.WriteRune(':') + innerVal := m[k] + SerializeValueForHash(buf, innerVal, innerSchema) + } +} + +func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { + switch tElem := elem.(type) { + case *Schema: + SerializeValueForHash(buf, val, tElem) + case *Resource: + buf.WriteRune('<') + SerializeResourceForHash(buf, val, tElem) + buf.WriteString(">;") + default: + panic(fmt.Sprintf("invalid element type: %T", tElem)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go new file mode 100644 index 0000000000..e897817fd3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + return cmp.Equal(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go similarity index 79% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go index 2a745afd42..e8baebd70c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go @@ -1,41 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package schema import ( + "context" "encoding/json" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/hashicorp/go-cty/cty" + ctyjson "github.com/hashicorp/go-cty/cty/json" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // DiffFromValues takes the current state and desired state as cty.Values and // derives a terraform.InstanceDiff to give to the legacy providers. This is // used to take the states provided by the new ApplyResourceChange method and // convert them to a state+diff required for the legacy Apply method. -func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { - return diffFromValues(prior, planned, res, nil) +func DiffFromValues(ctx context.Context, prior, planned, config cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(ctx, prior, planned, config, res, nil) } // diffFromValues takes an additional CustomizeDiffFunc, so we can generate our // test fixtures from the legacy tests. In the new provider protocol the diff // only needs to be created for the apply operation, and any customizations // have already been done. -func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { +func diffFromValues(ctx context.Context, prior, planned, config cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { instanceState, err := res.ShimInstanceStateFromValue(prior) if err != nil { return nil, err } + instanceState.RawConfig = config + instanceState.RawPlan = planned + instanceState.RawState = prior + configSchema := res.CoreConfigSchema() cfg := terraform.NewResourceConfigShimmed(planned, configSchema) removeConfigUnknowns(cfg.Config) removeConfigUnknowns(cfg.Raw) - diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + diff, err := schemaMap(res.SchemaMap()).Diff(ctx, instanceState, cfg, cust, nil, false) if err != nil { return nil, err } @@ -79,10 +87,6 @@ func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, er return stateValueToJSONMap(val, ty, false) } -func StateValueToJSONMapJSONNumber(val cty.Value, ty cty.Type) (map[string]interface{}, error) { - return stateValueToJSONMap(val, ty, true) -} - func stateValueToJSONMap(val cty.Value, ty cty.Type, useJSONNumber bool) (map[string]interface{}, error) { js, err := ctyjson.Marshal(val, ty) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go new file mode 100644 index 0000000000..bdf56d9012 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw(t testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := terraform.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(context.Background(), nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/unknown.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/unknown.go new file mode 100644 index 0000000000..1089e4d2d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/unknown.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go new file mode 100644 index 0000000000..2563ff8410 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// NOTE: ValueType has more functions defined on it in schema.go. We can't +// put them here because we reference other files. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/expand_json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/expand_json.go similarity index 76% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/expand_json.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/expand_json.go index b3eb90fdff..520afeffcb 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/expand_json.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/expand_json.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package structure import "encoding/json" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/flatten_json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/flatten_json.go similarity index 78% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/flatten_json.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/flatten_json.go index 578ad2eade..d0913ac65f 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/flatten_json.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/flatten_json.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package structure import "encoding/json" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/normalize_json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/normalize_json.go similarity index 88% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/normalize_json.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/normalize_json.go index 3256b476dd..0521420849 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/structure/normalize_json.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/normalize_json.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package structure import "encoding/json" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go new file mode 100644 index 0000000000..c610616374 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure/suppress_json_diff.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package structure + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func SuppressJsonDiff(k, oldValue, newValue string, d *schema.ResourceData) bool { + oldMap, err := ExpandJsonFromString(oldValue) + if err != nil { + return false + } + + newMap, err := ExpandJsonFromString(newValue) + if err != nil { + return false + } + + return reflect.DeepEqual(oldMap, newMap) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/float.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/float.go similarity index 92% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/float.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/float.go index f0bfbfa530..dfc261842d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/float.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/float.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package validation import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // FloatBetween returns a SchemaValidateFunc which tests if the provided value diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/int.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/int.go similarity index 96% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/int.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/int.go index 8ade5b1e68..2873897f27 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/int.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/int.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package validation import ( "fmt" "math" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // IntBetween returns a SchemaValidateFunc which tests if the provided value diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/list.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/list.go new file mode 100644 index 0000000000..9f0eb4b65d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/list.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import "fmt" + +// ListOfUniqueStrings is a ValidateFunc that ensures a list has no +// duplicate items in it. It's useful for when a list is needed over a set +// because order matters, yet the items still need to be unique. +func ListOfUniqueStrings(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.([]interface{}) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be List", k)) + return warnings, errors + } + + for _, e := range v { + if _, eok := e.(string); !eok { + errors = append(errors, fmt.Errorf("expected %q to only contain string elements, found :%v", k, e)) + return warnings, errors + } + } + + for n1, i1 := range v { + for n2, i2 := range v { + if i1.(string) == i2.(string) && n1 != n2 { + errors = append(errors, fmt.Errorf("expected %q to not have duplicates: found 2 or more of %v", k, i1)) + return warnings, errors + } + } + } + + return warnings, errors +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go new file mode 100644 index 0000000000..7c92509054 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/map.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import ( + "fmt" + "regexp" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// MapKeyLenBetween returns a SchemaValidateDiagFunc which tests if the provided value +// is of type map and the length of all keys are between min and max (inclusive) +func MapKeyLenBetween(min, max int) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + for _, key := range sortedKeys(v.(map[string]interface{})) { + keyLen := len(key) + if keyLen < min || keyLen > max { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Bad map key length", + Detail: fmt.Sprintf("Map key lengths should be in the range (%d - %d): %s (length = %d)", min, max, key, keyLen), + AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), + }) + } + } + + return diags + } +} + +// MapValueLenBetween returns a SchemaValidateDiagFunc which tests if the provided value +// is of type map and the length of all values are between min and max (inclusive) +func MapValueLenBetween(min, max int) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + m := v.(map[string]interface{}) + + for _, key := range sortedKeys(m) { + val := m[key] + + if _, ok := val.(string); !ok { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Bad map value type", + Detail: fmt.Sprintf("Map values should be strings: %s => %v (type = %T)", key, val, val), + AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), + }) + continue + } + + valLen := len(val.(string)) + if valLen < min || valLen > max { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Bad map value length", + Detail: fmt.Sprintf("Map value lengths should be in the range (%d - %d): %s => %v (length = %d)", min, max, key, val, valLen), + AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), + }) + } + } + + return diags + } +} + +// MapKeyMatch returns a SchemaValidateDiagFunc which tests if the provided value +// is of type map and all keys match a given regexp. Optionally an error message +// can be provided to return something friendlier than "expected to match some globby regexp". +func MapKeyMatch(r *regexp.Regexp, message string) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + for _, key := range sortedKeys(v.(map[string]interface{})) { + if ok := r.MatchString(key); !ok { + var detail string + if message == "" { + detail = fmt.Sprintf("Map key expected to match regular expression %q: %s", r, key) + } else { + detail = fmt.Sprintf("%s: %s", message, key) + } + + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid map key", + Detail: detail, + AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), + }) + } + } + + return diags + } +} + +// MapValueMatch returns a SchemaValidateDiagFunc which tests if the provided value +// is of type map and all values match a given regexp. Optionally an error message +// can be provided to return something friendlier than "expected to match some globby regexp". +func MapValueMatch(r *regexp.Regexp, message string) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + m := v.(map[string]interface{}) + + for _, key := range sortedKeys(m) { + val := m[key] + + if _, ok := val.(string); !ok { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Bad map value type", + Detail: fmt.Sprintf("Map values should be strings: %s => %v (type = %T)", key, val, val), + AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), + }) + continue + } + + if ok := r.MatchString(val.(string)); !ok { + var detail string + if message == "" { + detail = fmt.Sprintf("Map value expected to match regular expression %q: %s => %v", r, key, val) + } else { + detail = fmt.Sprintf("%s: %s => %v", message, key, val) + } + + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid map value", + Detail: detail, + AttributePath: append(path, cty.IndexStep{Key: cty.StringVal(key)}), + }) + } + } + + return diags + } +} + +func sortedKeys(m map[string]interface{}) []string { + keys := make([]string, len(m)) + + i := 0 + for key := range m { + keys[i] = key + i++ + } + + sort.Strings(keys) + + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go new file mode 100644 index 0000000000..941476429c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// NoZeroValues is a SchemaValidateFunc which tests if the provided value is +// not a zero value. It's useful in situations where you want to catch +// explicit zero values on things like required fields during validation. +func NoZeroValues(i interface{}, k string) (s []string, es []error) { + if reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() { + switch reflect.TypeOf(i).Kind() { + case reflect.String: + es = append(es, fmt.Errorf("%s must not be empty, got %v", k, i)) + case reflect.Int, reflect.Float64: + es = append(es, fmt.Errorf("%s must not be zero, got %v", k, i)) + default: + // this validator should only ever be applied to TypeString, TypeInt and TypeFloat + panic(fmt.Errorf("can't use NoZeroValues with %T attribute %s", i, k)) + } + } + return +} + +// All returns a SchemaValidateFunc which tests if the provided value +// passes all provided SchemaValidateFunc +func All(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// AllDiag returns a SchemaValidateDiagFunc which tests if the provided value +// passes all provided SchemaValidateDiagFunc +func AllDiag(validators ...schema.SchemaValidateDiagFunc) schema.SchemaValidateDiagFunc { + return func(i interface{}, k cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + for _, validator := range validators { + diags = append(diags, validator(i, k)...) + } + return diags + } +} + +// Any returns a SchemaValidateFunc which tests if the provided value +// passes any of the provided SchemaValidateFunc +func Any(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + if len(validatorWarnings) == 0 && len(validatorErrors) == 0 { + return []string{}, []error{} + } + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// AnyDiag returns a SchemaValidateDiagFunc which tests if the provided value +// passes any of the provided SchemaValidateDiagFunc +func AnyDiag(validators ...schema.SchemaValidateDiagFunc) schema.SchemaValidateDiagFunc { + return func(i interface{}, k cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + for _, validator := range validators { + validatorDiags := validator(i, k) + if len(validatorDiags) == 0 { + return diag.Diagnostics{} + } + diags = append(diags, validatorDiags...) + } + return diags + } +} + +// ToDiagFunc is a wrapper for legacy schema.SchemaValidateFunc +// converting it to schema.SchemaValidateDiagFunc +func ToDiagFunc(validator schema.SchemaValidateFunc) schema.SchemaValidateDiagFunc { + return func(i interface{}, p cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + // A practitioner-friendly key for any SchemaValidateFunc output. + // Generally this should be the last attribute name on the path. + // If not found for some unexpected reason, an empty string is fine + // as the diagnostic will have the full attribute path anyways. + var key string + + // Reverse search for last cty.GetAttrStep + for i := len(p) - 1; i >= 0; i-- { + if pathStep, ok := p[i].(cty.GetAttrStep); ok { + key = pathStep.Name + break + } + } + + ws, es := validator(i, key) + + for _, w := range ws { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: w, + AttributePath: p, + }) + } + for _, e := range es { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: e.Error(), + AttributePath: p, + }) + } + return diags + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/network.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/network.go new file mode 100644 index 0000000000..9bc6da2b8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/network.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import ( + "bytes" + "fmt" + "net" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// IsIPAddress is a SchemaValidateFunc which tests if the provided value is of type string and is a single IP (v4 or v6) +func IsIPAddress(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return warnings, errors + } + + ip := net.ParseIP(v) + if ip == nil { + errors = append(errors, fmt.Errorf("expected %s to contain a valid IP, got: %s", k, v)) + } + + return warnings, errors +} + +// IsIPv6Address is a SchemaValidateFunc which tests if the provided value is of type string and a valid IPv6 address +func IsIPv6Address(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return warnings, errors + } + + ip := net.ParseIP(v) + if six := ip.To16(); six == nil { + errors = append(errors, fmt.Errorf("expected %s to contain a valid IPv6 address, got: %s", k, v)) + } + + return warnings, errors +} + +// IsIPv4Address is a SchemaValidateFunc which tests if the provided value is of type string and a valid IPv4 address +func IsIPv4Address(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return warnings, errors + } + + ip := net.ParseIP(v) + if four := ip.To4(); four == nil { + errors = append(errors, fmt.Errorf("expected %s to contain a valid IPv4 address, got: %s", k, v)) + } + + return warnings, errors +} + +// IsIPv4Range is a SchemaValidateFunc which tests if the provided value is of type string, and in valid IP range +func IsIPv4Range(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + ips := strings.Split(v, "-") + if len(ips) != 2 { + errors = append(errors, fmt.Errorf("expected %s to contain a valid IP range, got: %s", k, v)) + return warnings, errors + } + + ip1 := net.ParseIP(ips[0]) + ip2 := net.ParseIP(ips[1]) + if ip1 == nil || ip2 == nil || bytes.Compare(ip1, ip2) > 0 { + errors = append(errors, fmt.Errorf("expected %s to contain a valid IP range, got: %s", k, v)) + } + + return warnings, errors +} + +// IsCIDR is a SchemaValidateFunc which tests if the provided value is of type string and a valid CIDR +func IsCIDR(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + if _, _, err := net.ParseCIDR(v); err != nil { + errors = append(errors, fmt.Errorf("expected %q to be a valid CIDR Value, got %v: %v", k, i, err)) + } + + return warnings, errors +} + +// IsCIDRNetwork returns a SchemaValidateFunc which tests if the provided value +// is of type string, is in valid Value network notation, and has significant bits between min and max (inclusive) +func IsCIDRNetwork(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + _, ipnet, err := net.ParseCIDR(v) + if err != nil { + errors = append(errors, fmt.Errorf("expected %s to contain a valid Value, got: %s with err: %s", k, v, err)) + return warnings, errors + } + + if ipnet == nil || v != ipnet.String() { + errors = append(errors, fmt.Errorf("expected %s to contain a valid network Value, expected %s, got %s", + k, ipnet, v)) + } + + sigbits, _ := ipnet.Mask.Size() + if sigbits < min || sigbits > max { + errors = append(errors, fmt.Errorf("expected %q to contain a network Value with between %d and %d significant bits, got: %d", k, min, max, sigbits)) + } + + return warnings, errors + } +} + +// IsMACAddress is a SchemaValidateFunc which tests if the provided value is of type string and a valid MAC address +func IsMACAddress(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return warnings, errors + } + + if _, err := net.ParseMAC(v); err != nil { + errors = append(errors, fmt.Errorf("expected %q to be a valid MAC address, got %v: %v", k, i, err)) + } + + return warnings, errors +} + +// IsPortNumber is a SchemaValidateFunc which tests if the provided value is of type string and a valid TCP Port Number +func IsPortNumber(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(int) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be integer", k)) + return warnings, errors + } + + if 1 > v || v > 65535 { + errors = append(errors, fmt.Errorf("expected %q to be a valid port number, got: %v", k, v)) + } + + return warnings, errors +} + +// IsPortNumberOrZero is a SchemaValidateFunc which tests if the provided value is of type string and a valid TCP Port Number or zero +func IsPortNumberOrZero(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(int) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be integer", k)) + return warnings, errors + } + + if 0 > v || v > 65535 { + errors = append(errors, fmt.Errorf("expected %q to be a valid port number or 0, got: %v", k, v)) + } + + return warnings, errors +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go new file mode 100644 index 0000000000..375a698f2c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/strings.go @@ -0,0 +1,240 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import ( + "encoding/base64" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" +) + +// StringIsNotEmpty is a ValidateFunc that ensures a string is not empty +func StringIsNotEmpty(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %q to be string", k)} + } + + if v == "" { + return nil, []error{fmt.Errorf("expected %q to not be an empty string, got %v", k, i)} + } + + return nil, nil +} + +// StringIsNotWhiteSpace is a ValidateFunc that ensures a string is not empty or consisting entirely of whitespace characters +func StringIsNotWhiteSpace(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %q to be string", k)} + } + + if strings.TrimSpace(v) == "" { + return nil, []error{fmt.Errorf("expected %q to not be an empty string or whitespace", k)} + } + + return nil, nil +} + +// StringIsEmpty is a ValidateFunc that ensures a string has no characters +func StringIsEmpty(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %q to be string", k)} + } + + if v != "" { + return nil, []error{fmt.Errorf("expected %q to be an empty string: got %v", k, v)} + } + + return nil, nil +} + +// StringIsWhiteSpace is a ValidateFunc that ensures a string is composed of entirely whitespace +func StringIsWhiteSpace(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %q to be string", k)} + } + + if strings.TrimSpace(v) != "" { + return nil, []error{fmt.Errorf("expected %q to be an empty string or whitespace: got %v", k, v)} + } + + return nil, nil +} + +// StringLenBetween returns a SchemaValidateFunc which tests if the provided value +// is of type string and has length between min and max (inclusive) +func StringLenBetween(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + if len(v) < min || len(v) > max { + errors = append(errors, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v)) + } + + return warnings, errors + } +} + +// StringMatch returns a SchemaValidateFunc which tests if the provided value +// matches a given regexp. Optionally an error message can be provided to +// return something friendlier than "must match some globby regexp". +func StringMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if ok := r.MatchString(v); !ok { + if message != "" { + return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} + + } + return nil, []error{fmt.Errorf("expected value of %s to match regular expression %q, got %v", k, r, i)} + } + return nil, nil + } +} + +// StringDoesNotMatch returns a SchemaValidateFunc which tests if the provided value +// does not match a given regexp. Optionally an error message can be provided to +// return something friendlier than "must not match some globby regexp". +func StringDoesNotMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if ok := r.MatchString(v); ok { + if message != "" { + return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} + + } + return nil, []error{fmt.Errorf("expected value of %s to not match regular expression %q, got %v", k, r, i)} + } + return nil, nil + } +} + +// StringInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type string and matches the value of an element in the valid slice +// will test with in lower case if ignoreCase is true +func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + for _, str := range valid { + if v == str || (ignoreCase && strings.EqualFold(v, str)) { + return warnings, errors + } + } + + errors = append(errors, fmt.Errorf("expected %s to be one of %q, got %s", k, valid, v)) + return warnings, errors + } +} + +// StringNotInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type string and does not match the value of any element in the invalid slice +// will test with in lower case if ignoreCase is true +func StringNotInSlice(invalid []string, ignoreCase bool) schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + for _, str := range invalid { + if v == str || (ignoreCase && strings.EqualFold(v, str)) { + errors = append(errors, fmt.Errorf("expected %s to not be any of %v, got %s", k, invalid, v)) + return warnings, errors + } + } + + return warnings, errors + } +} + +// StringDoesNotContainAny returns a SchemaValidateFunc which validates that the +// provided value does not contain any of the specified Unicode code points in chars. +func StringDoesNotContainAny(chars string) schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + if strings.ContainsAny(v, chars) { + errors = append(errors, fmt.Errorf("expected value of %s to not contain any of %q, got %v", k, chars, i)) + return warnings, errors + } + + return warnings, errors + } +} + +// StringIsBase64 is a ValidateFunc that ensures a string can be parsed as Base64 +func StringIsBase64(i interface{}, k string) (warnings []string, errors []error) { + // Empty string is not allowed + if warnings, errors = StringIsNotEmpty(i, k); len(errors) > 0 { + return + } + + // NoEmptyStrings checks it is a string + v, _ := i.(string) + + if _, err := base64.StdEncoding.DecodeString(v); err != nil { + errors = append(errors, fmt.Errorf("expected %q to be a base64 string, got %v", k, v)) + } + + return warnings, errors +} + +// StringIsJSON is a SchemaValidateFunc which tests to make sure the supplied string is valid JSON. +func StringIsJSON(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + if _, err := structure.NormalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + + return warnings, errors +} + +// StringIsValidRegExp returns a SchemaValidateFunc which tests to make sure the supplied string is a valid regular expression. +func StringIsValidRegExp(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return warnings, errors + } + + if _, err := regexp.Compile(v); err != nil { + errors = append(errors, fmt.Errorf("%q: %s", k, err)) + } + + return warnings, errors +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go new file mode 100644 index 0000000000..a5aa6e0454 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +type testCase struct { + val interface{} + f schema.SchemaValidateFunc + expectedErr *regexp.Regexp +} + +func runTestCases(t *testing.T, cases []testCase) { + t.Helper() + + for i, tc := range cases { + t.Run(fmt.Sprintf("TestCase_%d", i), func(t *testing.T) { + _, errs := tc.f(tc.val, "test_property") + + if len(errs) == 0 && tc.expectedErr == nil { + return + } + + if len(errs) != 0 && tc.expectedErr == nil { + t.Fatalf("expected test case %d to produce no errors, got %v", i, errs) + } + + if !matchAnyError(errs, tc.expectedErr) { + t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) + } + }) + } +} + +type diagTestCase struct { + val interface{} + f schema.SchemaValidateDiagFunc + expectedDiagSummary *regexp.Regexp +} + +func runDiagTestCases(t *testing.T, cases []diagTestCase) { + t.Helper() + + for i, tc := range cases { + t.Run(fmt.Sprintf("TestCase_%d", i), func(t *testing.T) { + diags := tc.f(tc.val, cty.GetAttrPath("test_property")) + + if len(diags) == 0 && tc.expectedDiagSummary == nil { + return + } + + if len(diags) != 0 && tc.expectedDiagSummary == nil { + t.Fatalf("expected test case %d to produce no diagnostics, got %v", i, diags) + } + + if !matchAnyDiagSummary(diags, tc.expectedDiagSummary) { + t.Fatalf("expected test case %d to produce diagnostic summary matching \"%s\", got %v", i, tc.expectedDiagSummary, diags) + } + }) + } +} + +func matchAnyError(errs []error, r *regexp.Regexp) bool { + // err must match one provided + for _, err := range errs { + if r.MatchString(err.Error()) { + return true + } + } + return false +} + +func matchAnyDiagSummary(ds diag.Diagnostics, r *regexp.Regexp) bool { + for _, d := range ds { + if r.MatchString(d.Summary) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go new file mode 100644 index 0000000000..940a9ec1cf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/time.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validation + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// IsDayOfTheWeek is a SchemaValidateFunc which tests if the provided value is of type string and a valid english day of the week +func IsDayOfTheWeek(ignoreCase bool) schema.SchemaValidateFunc { + return StringInSlice([]string{ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + }, ignoreCase) +} + +// IsMonth is a SchemaValidateFunc which tests if the provided value is of type string and a valid english month +func IsMonth(ignoreCase bool) schema.SchemaValidateFunc { + return StringInSlice([]string{ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + }, ignoreCase) +} + +// IsRFC3339Time is a SchemaValidateFunc which tests if the provided value is of type string and a valid RFC33349Time +func IsRFC3339Time(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) + return warnings, errors + } + + if _, err := time.Parse(time.RFC3339, v); err != nil { + errors = append(errors, fmt.Errorf("expected %q to be a valid RFC3339 date, got %q: %+v", k, i, err)) + } + + return warnings, errors +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/uuid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/uuid.go similarity index 87% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/uuid.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/uuid.go index 00783fafce..91122ce714 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/uuid.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/uuid.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package validation import ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/web.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/web.go similarity index 92% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/web.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/web.go index eb5437f14b..2e875442f4 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/validation/web.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/web.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package validation import ( @@ -5,7 +8,7 @@ import ( "net/url" "strings" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // IsURLWithHTTPS is a SchemaValidateFunc which tests if the provided value is of type string and a valid HTTPS URL diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go new file mode 100644 index 0000000000..0d29d9f456 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go new file mode 100644 index 0000000000..8373297f87 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" +) + +// instanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// intKey and stringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type instanceKey interface { + instanceKeySigil() + String() string +} + +// NoKey represents the absense of an instanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey instanceKey + +// intKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type intKey int + +func (k intKey) instanceKeySigil() { +} + +func (k intKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// stringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type stringKey string + +func (k stringKey) instanceKeySigil() { +} + +func (k stringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go new file mode 100644 index 0000000000..8dbbb469d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go new file mode 100644 index 0000000000..12cd83051a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go @@ -0,0 +1,242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +func parseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "A module instance address must begin with \"module.\".", + )) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "The module instance address is followed by additional invalid content.", + )) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + for _, err := range parseDiags.Errs() { + // ignore warnings, they don't matter in this case + diags = append(diags, tfdiags.FromError(err)) + } + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := parseModuleInstance(traversal) + diags = append(diags, addrDiags...) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Module address prefix must be followed by dot and then a name.", + )) + } + + if next != "module" { + break + } + + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = stringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = intKey(idxInt) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + fmt.Sprintf("Invalid module index: %s.", err), + )) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Invalid module key: must be either a string or an integer.", + )) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey instanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key instanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go similarity index 97% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go index 41a533745c..d12ff8cced 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package configschema import ( "fmt" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" ) // CoerceValue attempts to force the given value to conform to the type diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go new file mode 100644 index 0000000000..d96be9c7f0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go similarity index 94% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go index 005da56bf5..3c9573bc56 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package configschema import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // EmptyValue returns the "empty value" for the recieving block, which for diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go new file mode 100644 index 0000000000..4de413519f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + atys := make(map[string]cty.Type) + + for name, attrS := range b.Attributes { + atys[name] = attrS.Type + } + + for name, blockS := range b.BlockTypes { + if _, exists := atys[name]; exists { + panic("invalid schema, blocks and attributes cannot have the same name") + } + + childType := blockS.Block.ImpliedType() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + atys[name] = childType + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.List(childType) + } + case NestingSet: + if childType.HasDynamicTypes() { + panic("can't use cty.DynamicPseudoType inside a block type with NestingSet") + } + atys[name] = cty.Set(childType) + case NestingMap: + // We prefer to use a map where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use an object + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.Map(childType) + } + default: + panic("invalid nesting type") + } + } + + return cty.Object(atys) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go new file mode 100644 index 0000000000..c445b4ba55 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// StringKind represents the format a string is in. +type StringKind int + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain StringKind = iota + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock + + // Description and DescriptionKind contain a user facing description of the block + // and the format of that string. + Description string + DescriptionKind StringKind + + // Deprecated indicates whether the block has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + DescriptionKind StringKind + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool + + // Deprecated indicates whether the attribute has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=NestingMode +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go similarity index 98% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go index bb4228d98c..2bad034de9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl2shim import ( @@ -5,9 +8,8 @@ import ( "strconv" "strings" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" ) // FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic @@ -86,15 +88,15 @@ func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) return } - len := 0 + valLen := 0 for it := val.ElementIterator(); it.Next(); { ak, av := it.Element() name := ak.AsString() flatmapValueFromHCL2Value(m, prefix+name, av) - len++ + valLen++ } if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed - m[prefix+"%"] = strconv.Itoa(len) + m[prefix+"%"] = strconv.Itoa(valLen) } } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go new file mode 100644 index 0000000000..628a8bf686 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go similarity index 97% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go index a074c749d5..f5f5de3b25 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go @@ -1,12 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl2shim import ( "fmt" "math/big" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" ) // UnknownVariableValue is a sentinel value that can be used diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go similarity index 98% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go index 92f0213d72..6b2be2239d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl2shim import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // ValuesSDKEquivalent returns true if both of the given values seem equivalent diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go new file mode 100644 index 0000000000..97bc709b2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hashcode + +import ( + "bytes" + "fmt" + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func String(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go new file mode 100644 index 0000000000..0fe8002aa7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/context.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" + helperlogging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + testing "github.com/mitchellh/go-testing-interface" +) + +// InitContext creates SDK logger contexts when the provider is running in +// "production" (not under acceptance testing). The incoming context will +// already have the root SDK logger and root provider logger setup from +// terraform-plugin-go tf5server RPC handlers. +func InitContext(ctx context.Context) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperSchema, + // All calls are through the HelperSchema* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperSchema), + // Propagate tf_req_id, tf_rpc, etc. fields + tfsdklog.WithRootFields(), + ) + + return ctx +} + +// InitTestContext registers the terraform-plugin-log/tfsdklog test sink, +// configures the standard library log package, and creates SDK logger +// contexts. The incoming context is expected to be devoid of logging setup. +// +// The standard library log package handling is important as provider code +// under test may be using that package or another logging library outside of +// terraform-plugin-log. +func InitTestContext(ctx context.Context, t testing.T) context.Context { + helperlogging.SetOutput(t) + + ctx = tfsdklog.RegisterTestSink(ctx, t) + ctx = tfsdklog.NewRootSDKLogger(ctx, tfsdklog.WithLevelFromEnv(EnvTfLogSdk)) + ctx = tfsdklog.NewSubsystem(ctx, SubsystemHelperResource, + // All calls are through the HelperResource* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperResource), + ) + ctx = TestNameContext(ctx, t.Name()) + + return ctx +} + +// TestNameContext adds the current test name to loggers. +func TestNameContext(ctx context.Context, testName string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestName, testName) + + return ctx +} + +// TestStepNumberContext adds the current test step number to loggers. +func TestStepNumberContext(ctx context.Context, stepNumber int) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestStepNumber, stepNumber) + + return ctx +} + +// TestTerraformPathContext adds the current test Terraform CLI path to loggers. +func TestTerraformPathContext(ctx context.Context, terraformPath string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestTerraformPath, terraformPath) + + return ctx +} + +// TestWorkingDirectoryContext adds the current test working directory to loggers. +func TestWorkingDirectoryContext(ctx context.Context, workingDirectory string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestWorkingDirectory, workingDirectory) + + return ctx +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go new file mode 100644 index 0000000000..2ffc73eee6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/environment_variables.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +// Environment variables. +const ( + // EnvTfLogSdk is an environment variable that sets the logging level of + // the root SDK logger, while the provider is under test. In "production" + // usage, this environment variable is handled by terraform-plugin-go. + // + // Terraform CLI's logging must be explicitly turned on before this + // environment varable can be used to reduce the SDK logging levels. It + // cannot be used to show only SDK logging unless all other logging levels + // are turned off. + EnvTfLogSdk = "TF_LOG_SDK" + + // EnvTfLogSdkHelperResource is an environment variable that sets the logging + // level of SDK helper/resource loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperResource = "TF_LOG_SDK_HELPER_RESOURCE" + + // EnvTfLogSdkHelperSchema is an environment variable that sets the logging + // level of SDK helper/schema loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkHelperSchema = "TF_LOG_SDK_HELPER_SCHEMA" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go new file mode 100644 index 0000000000..1b1459f246 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_resource.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperResource is the tfsdklog subsystem name for helper/resource. + SubsystemHelperResource = "helper_resource" +) + +// HelperResourceTrace emits a helper/resource subsystem log at TRACE level. +func HelperResourceTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceDebug emits a helper/resource subsystem log at DEBUG level. +func HelperResourceDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceWarn emits a helper/resource subsystem log at WARN level. +func HelperResourceWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperResource, msg, additionalFields...) +} + +// HelperResourceError emits a helper/resource subsystem log at ERROR level. +func HelperResourceError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperResource, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go new file mode 100644 index 0000000000..0ecf6bf2e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/helper_schema.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemHelperSchema is the tfsdklog subsystem name for helper/schema. + SubsystemHelperSchema = "helper_schema" +) + +// HelperSchemaDebug emits a helper/schema subsystem log at DEBUG level. +func HelperSchemaDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaError emits a helper/schema subsystem log at ERROR level. +func HelperSchemaError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaTrace emits a helper/schema subsystem log at TRACE level. +func HelperSchemaTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemHelperSchema, msg, additionalFields...) +} + +// HelperSchemaWarn emits a helper/schema subsystem log at WARN level. +func HelperSchemaWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemHelperSchema, msg, additionalFields...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go new file mode 100644 index 0000000000..983fde437a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging/keys.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +// Structured logging keys. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +// +// Refer to the terraform-plugin-go logging keys as well, which should be +// equivalent to these when possible. +const ( + // Attribute path representation, which is typically in flatmap form such + // as parent.0.child in this project. + KeyAttributePath = "tf_attribute_path" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Underlying Go error string when logging an error. + KeyError = "error" + + // The full address of the provider, such as + // registry.terraform.io/hashicorp/random + KeyProviderAddress = "tf_provider_addr" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" + + // The name of the test being executed. + KeyTestName = "test_name" + + // The TestStep number of the test being executed. Starts at 1. + KeyTestStepNumber = "test_step_number" + + // Terraform configuration used during acceptance testing Terraform operations. + KeyTestTerraformConfiguration = "test_terraform_configuration" + + // The Terraform CLI logging level (TF_LOG) used for an acceptance test. + KeyTestTerraformLogLevel = "test_terraform_log_level" + + // The Terraform CLI logging level (TF_LOG_CORE) used for an acceptance test. + KeyTestTerraformLogCoreLevel = "test_terraform_log_core_level" + + // The Terraform CLI logging level (TF_LOG_PROVIDER) used for an acceptance test. + KeyTestTerraformLogProviderLevel = "test_terraform_log_provider_level" + + // The path to the Terraform CLI logging file used for an acceptance test. + // + // This should match where the rest of the acceptance test logs are going + // already, but is provided for troubleshooting in case it does not. + KeyTestTerraformLogPath = "test_terraform_log_path" + + // The path to the Terraform CLI used for an acceptance test. + KeyTestTerraformPath = "test_terraform_path" + + // Terraform plan output generated during a TestStep. + KeyTestTerraformPlan = "test_terraform_plan" + + // The working directory of the acceptance test. + KeyTestWorkingDirectory = "test_working_directory" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go similarity index 95% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go index a8629046ca..b888237fc2 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go @@ -1,13 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package objchange import ( - "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" ) // NormalizeObjectFromLegacySDK takes an object that may have been generated // by the legacy Terraform SDK (i.e. returned from a provider with the -// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// UnsafeToUseLegacyTypeSystem opt-out set) and does its best to normalize it for the // assumptions we would normally enforce if the provider had not opted out. // // In particular, this function guarantees that a value representing a nested diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go new file mode 100644 index 0000000000..672f75e6d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "context" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(ctx context.Context, diags []*tfprotov5.Diagnostic, d interface{}) []*tfprotov5.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diagnostic := &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: d.Error(), + Attribute: ap, + } + + if diagnostic.Summary == "" { + logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for cty.PathError type") + diagnostic.Summary = "Empty Error String" + diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." + } + + diags = append(diags, diagnostic) + case diag.Diagnostics: + diags = append(diags, DiagsToProto(d)...) + case error: + if d == nil { + logging.HelperSchemaDebug(ctx, "skipping diagnostic for nil error in AppendProtoDiag") + return diags + } + + diagnostic := &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: d.Error(), + } + + if diagnostic.Summary == "" { + logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for error type") + diagnostic.Summary = "Error Missing Message" + diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." + } + + diags = append(diags, diagnostic) + case string: + if d == "" { + logging.HelperSchemaDebug(ctx, "skipping diagnostic for empty string in AppendProtoDiag") + return diags + } + + diags = append(diags, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityWarning, + Summary: d, + }) + case *tfprotov5.Diagnostic: + diags = append(diags, d) + case []*tfprotov5.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiags converts a list of tfprotov5.Diagnostics to a diag.Diagnostics. +func ProtoToDiags(ds []*tfprotov5.Diagnostic) diag.Diagnostics { + var diags diag.Diagnostics + for _, d := range ds { + var severity diag.Severity + + switch d.Severity { + case tfprotov5.DiagnosticSeverityError: + severity = diag.Error + case tfprotov5.DiagnosticSeverityWarning: + severity = diag.Warning + } + + diags = append(diags, diag.Diagnostic{ + Severity: severity, + Summary: d.Summary, + Detail: d.Detail, + AttributePath: AttributePathToPath(d.Attribute), + }) + } + + return diags +} + +func DiagsToProto(diags diag.Diagnostics) []*tfprotov5.Diagnostic { + var ds []*tfprotov5.Diagnostic + for _, d := range diags { + protoDiag := &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: d.Summary, + Detail: d.Detail, + Attribute: PathToAttributePath(d.AttributePath), + } + if d.Severity == diag.Warning { + protoDiag.Severity = tfprotov5.DiagnosticSeverityWarning + } + if d.Summary == "" { + protoDiag.Summary = "Empty Summary: This is always a bug in the provider and should be reported to the provider developers." + } + ds = append(ds, protoDiag) + } + return ds +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *tftypes.AttributePath) cty.Path { + var p cty.Path + if ap == nil { + return p + } + for _, step := range ap.Steps() { + switch step := step.(type) { + case tftypes.AttributeName: + p = p.GetAttr(string(step)) + case tftypes.ElementKeyString: + p = p.Index(cty.StringVal(string(step))) + case tftypes.ElementKeyInt: + p = p.Index(cty.NumberIntVal(int64(step))) + } + } + return p +} + +// PathToAttributePath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *tftypes.AttributePath { + if p == nil || len(p) < 1 { + return nil + } + ap := tftypes.NewAttributePath() + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap = ap.WithAttributeName(selector.Name) + + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap = ap.WithElementKeyString(key.AsString()) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap = ap.WithElementKeyInt(int(v)) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go new file mode 100644 index 0000000000..e2b4e431ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "context" + "fmt" + "reflect" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +func tftypeFromCtyType(in cty.Type) (tftypes.Type, error) { + switch { + case in.Equals(cty.String): + return tftypes.String, nil + case in.Equals(cty.Number): + return tftypes.Number, nil + case in.Equals(cty.Bool): + return tftypes.Bool, nil + case in.Equals(cty.DynamicPseudoType): + return tftypes.DynamicPseudoType, nil + case in.IsSetType(): + elemType, err := tftypeFromCtyType(in.ElementType()) + if err != nil { + return nil, err + } + return tftypes.Set{ + ElementType: elemType, + }, nil + case in.IsListType(): + elemType, err := tftypeFromCtyType(in.ElementType()) + if err != nil { + return nil, err + } + return tftypes.List{ + ElementType: elemType, + }, nil + case in.IsTupleType(): + elemTypes := make([]tftypes.Type, 0, in.Length()) + for _, typ := range in.TupleElementTypes() { + elemType, err := tftypeFromCtyType(typ) + if err != nil { + return nil, err + } + elemTypes = append(elemTypes, elemType) + } + return tftypes.Tuple{ + ElementTypes: elemTypes, + }, nil + case in.IsMapType(): + elemType, err := tftypeFromCtyType(in.ElementType()) + if err != nil { + return nil, err + } + return tftypes.Map{ + ElementType: elemType, + }, nil + case in.IsObjectType(): + attrTypes := make(map[string]tftypes.Type) + for key, typ := range in.AttributeTypes() { + attrType, err := tftypeFromCtyType(typ) + if err != nil { + return nil, err + } + attrTypes[key] = attrType + } + return tftypes.Object{ + AttributeTypes: attrTypes, + }, nil + } + return nil, fmt.Errorf("unknown cty type %s", in.GoString()) +} + +func ctyTypeFromTFType(in tftypes.Type) (cty.Type, error) { + switch { + case in.Is(tftypes.String): + return cty.String, nil + case in.Is(tftypes.Bool): + return cty.Bool, nil + case in.Is(tftypes.Number): + return cty.Number, nil + case in.Is(tftypes.DynamicPseudoType): + return cty.DynamicPseudoType, nil + case in.Is(tftypes.List{}): + elemType, err := ctyTypeFromTFType(in.(tftypes.List).ElementType) + if err != nil { + return cty.Type{}, err + } + return cty.List(elemType), nil + case in.Is(tftypes.Set{}): + elemType, err := ctyTypeFromTFType(in.(tftypes.Set).ElementType) + if err != nil { + return cty.Type{}, err + } + return cty.Set(elemType), nil + case in.Is(tftypes.Map{}): + elemType, err := ctyTypeFromTFType(in.(tftypes.Map).ElementType) + if err != nil { + return cty.Type{}, err + } + return cty.Map(elemType), nil + case in.Is(tftypes.Tuple{}): + elemTypes := make([]cty.Type, 0, len(in.(tftypes.Tuple).ElementTypes)) + for _, typ := range in.(tftypes.Tuple).ElementTypes { + elemType, err := ctyTypeFromTFType(typ) + if err != nil { + return cty.Type{}, err + } + elemTypes = append(elemTypes, elemType) + } + return cty.Tuple(elemTypes), nil + case in.Is(tftypes.Object{}): + attrTypes := make(map[string]cty.Type, len(in.(tftypes.Object).AttributeTypes)) + for k, v := range in.(tftypes.Object).AttributeTypes { + attrType, err := ctyTypeFromTFType(v) + if err != nil { + return cty.Type{}, err + } + attrTypes[k] = attrType + } + return cty.Object(attrTypes), nil + } + return cty.Type{}, fmt.Errorf("unknown tftypes.Type %s", in) +} + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// tfprotov5.SchemaBlock for a grpc response. +func ConfigSchemaToProto(ctx context.Context, b *configschema.Block) *tfprotov5.SchemaBlock { + block := &tfprotov5.SchemaBlock{ + Description: b.Description, + DescriptionKind: protoStringKind(ctx, b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &tfprotov5.SchemaAttribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(ctx, a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + var err error + attr.Type, err = tftypeFromCtyType(a.Type) + if err != nil { + panic(err) + } + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(ctx, name, b)) + } + + return block +} + +func protoStringKind(ctx context.Context, k configschema.StringKind) tfprotov5.StringKind { + switch k { + default: + logging.HelperSchemaTrace(ctx, fmt.Sprintf("Unexpected configschema.StringKind: %d", k)) + return tfprotov5.StringKindPlain + case configschema.StringPlain: + return tfprotov5.StringKindPlain + case configschema.StringMarkdown: + return tfprotov5.StringKindMarkdown + } +} + +func protoSchemaNestedBlock(ctx context.Context, name string, b *configschema.NestedBlock) *tfprotov5.SchemaNestedBlock { + var nesting tfprotov5.SchemaNestedBlockNestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = tfprotov5.SchemaNestedBlockNestingModeSingle + case configschema.NestingGroup: + nesting = tfprotov5.SchemaNestedBlockNestingModeGroup + case configschema.NestingList: + nesting = tfprotov5.SchemaNestedBlockNestingModeList + case configschema.NestingSet: + nesting = tfprotov5.SchemaNestedBlockNestingModeSet + case configschema.NestingMap: + nesting = tfprotov5.SchemaNestedBlockNestingModeMap + default: + nesting = tfprotov5.SchemaNestedBlockNestingModeInvalid + } + return &tfprotov5.SchemaNestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(ctx, &b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToConfigSchema takes the GetSchema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(ctx context.Context, b *tfprotov5.SchemaBlock) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(ctx, b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(ctx, a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + var err error + attr.Type, err = ctyTypeFromTFType(a.Type) + if err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(ctx, b) + } + + return block +} + +func schemaStringKind(ctx context.Context, k tfprotov5.StringKind) configschema.StringKind { + switch k { + default: + logging.HelperSchemaTrace(ctx, fmt.Sprintf("Unexpected tfprotov5.StringKind: %d", k)) + return configschema.StringPlain + case tfprotov5.StringKindPlain: + return configschema.StringPlain + case tfprotov5.StringKindMarkdown: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(ctx context.Context, b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case tfprotov5.SchemaNestedBlockNestingModeSingle: + nesting = configschema.NestingSingle + case tfprotov5.SchemaNestedBlockNestingModeGroup: + nesting = configschema.NestingGroup + case tfprotov5.SchemaNestedBlockNestingModeList: + nesting = configschema.NestingList + case tfprotov5.SchemaNestedBlockNestingModeMap: + nesting = configschema.NestingMap + case tfprotov5.SchemaNestedBlockNestingModeSet: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(ctx, b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go new file mode 100644 index 0000000000..d3cb35bcec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/hashicorp/go-version" + install "github.com/hashicorp/hc-install" + "github.com/hashicorp/hc-install/checkpoint" + "github.com/hashicorp/hc-install/fs" + "github.com/hashicorp/hc-install/product" + "github.com/hashicorp/hc-install/releases" + "github.com/hashicorp/hc-install/src" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// Config is used to configure the test helper. In most normal test programs +// the configuration is discovered automatically by an Init* function using +// DiscoverConfig, but this is exposed so that more complex scenarios can be +// implemented by direct configuration. +type Config struct { + SourceDir string + TerraformExec string + execTempDir string + PreviousPluginExec string +} + +// DiscoverConfig uses environment variables and other means to automatically +// discover a reasonable test helper configuration. +func DiscoverConfig(ctx context.Context, sourceDir string) (*Config, error) { + tfVersion := strings.TrimPrefix(os.Getenv(EnvTfAccTerraformVersion), "v") + tfPath := os.Getenv(EnvTfAccTerraformPath) + + tempDir := os.Getenv(EnvTfAccTempDir) + tfDir, err := os.MkdirTemp(tempDir, "plugintest-terraform") + if err != nil { + return nil, fmt.Errorf("failed to create temp dir: %w", err) + } + + var sources []src.Source + switch { + case tfPath != "": + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of exact path: %s", tfPath)) + + sources = append(sources, &fs.AnyVersion{ + ExactBinPath: tfPath, + }) + case tfVersion != "": + tfVersion, err := version.NewVersion(tfVersion) + + if err != nil { + return nil, fmt.Errorf("invalid Terraform version: %w", err) + } + + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of releases.hashicorp.com exact version %q for installation in: %s", tfVersion, tfDir)) + + sources = append(sources, &releases.ExactVersion{ + InstallDir: tfDir, + Product: product.Terraform, + Version: tfVersion, + }) + default: + logging.HelperResourceTrace(ctx, "Adding potential Terraform CLI source of local filesystem PATH lookup") + logging.HelperResourceTrace(ctx, fmt.Sprintf("Adding potential Terraform CLI source of checkpoint.hashicorp.com latest version for installation in: %s", tfDir)) + + sources = append(sources, &fs.AnyVersion{ + Product: &product.Terraform, + }) + sources = append(sources, &checkpoint.LatestVersion{ + InstallDir: tfDir, + Product: product.Terraform, + }) + } + + installer := install.NewInstaller() + tfExec, err := installer.Ensure(context.Background(), sources) + if err != nil { + return nil, fmt.Errorf("failed to find or install Terraform CLI from %+v: %w", sources, err) + } + + ctx = logging.TestTerraformPathContext(ctx, tfExec) + + logging.HelperResourceDebug(ctx, "Found Terraform CLI") + + return &Config{ + SourceDir: sourceDir, + TerraformExec: tfExec, + execTempDir: tfDir, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go new file mode 100644 index 0000000000..1b34a0b233 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package plugintest contains utilities to help with writing tests for +// Terraform plugins. +// +// This is not a package for testing configurations or modules written in the +// Terraform language. It is for testing the plugins that allow Terraform to +// manage various cloud services and other APIs. +package plugintest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go new file mode 100644 index 0000000000..6df86f89f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/environment_variables.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +// Environment variables +const ( + // Environment variable with acceptance testing temporary directory for + // testing files and Terraform CLI installation, if installation is + // required. By default, the operating system temporary directory is used. + // + // Setting TF_ACC_TERRAFORM_PATH does not override this value for Terraform + // CLI installation, if installation is required. + EnvTfAccTempDir = "TF_ACC_TEMP_DIR" + + // Environment variable with level to filter Terraform logs during + // acceptance testing. This value sets TF_LOG in a safe manner when + // executing Terraform CLI commands, which would otherwise interfere + // with the testing framework using TF_LOG to set the Go standard library + // log package level. + // + // This value takes precedence over TF_LOG_CORE, due to precedence rules + // in the Terraform core code, so it is not possible to set this to a level + // and also TF_LOG_CORE=OFF. Use TF_LOG_CORE and TF_LOG_PROVIDER in that + // case instead. + // + // If not set, but TF_ACC_LOG_PATH or TF_LOG_PATH_MASK is set, it defaults + // to TRACE. If Terraform CLI is version 0.14 or earlier, it will have no + // separate affect from the TF_ACC_LOG_PATH or TF_LOG_PATH_MASK behavior, + // as those earlier versions of Terraform are unreliable with the logging + // level being outside TRACE. + EnvTfAccLog = "TF_ACC_LOG" + + // Environment variable with path to save Terraform logs during acceptance + // testing. This value sets TF_LOG_PATH in a safe manner when executing + // Terraform CLI commands, which would otherwise be ignored since it could + // interfere with how the underlying execution is performed. + // + // If TF_LOG_PATH_MASK is set, it takes precedence over this value. + EnvTfAccLogPath = "TF_ACC_LOG_PATH" + + // Environment variable with level to filter Terraform core logs during + // acceptance testing. This value sets TF_LOG_CORE separate from + // TF_LOG_PROVIDER when calling Terraform. + // + // This value has no affect when TF_ACC_LOG is set (which sets Terraform's + // TF_LOG), due to precedence rules in the Terraform core code. Use + // TF_LOG_CORE and TF_LOG_PROVIDER in that case instead. + // + // If not set, defaults to TF_ACC_LOG behaviors. + EnvTfLogCore = "TF_LOG_CORE" + + // Environment variable with path containing the string %s, which is + // replaced with the test name, to save separate Terraform logs during + // acceptance testing. This value sets TF_LOG_PATH in a safe manner when + // executing Terraform CLI commands, which would otherwise be ignored since + // it could interfere with how the underlying execution is performed. + // + // Takes precedence over TF_ACC_LOG_PATH. + EnvTfLogPathMask = "TF_LOG_PATH_MASK" + + // Environment variable with level to filter Terraform provider logs during + // acceptance testing. This value sets TF_LOG_PROVIDER separate from + // TF_LOG_CORE. + // + // During testing, this only affects external providers whose logging goes + // through Terraform. The logging for the provider under test is controlled + // by the testing framework as it is running the provider code. Provider + // code using the Go standard library log package is controlled by TF_LOG + // for historical compatibility. + // + // This value takes precedence over TF_ACC_LOG for external provider logs, + // due to rules in the Terraform core code. + // + // If not set, defaults to TF_ACC_LOG behaviors. + EnvTfLogProvider = "TF_LOG_PROVIDER" + + // Environment variable with acceptance testing Terraform CLI version to + // download from releases.hashicorp.com, checksum verify, and install. The + // value can be any valid Terraform CLI version, such as 1.1.6, with or + // without a prepended v character. + // + // Setting this value takes precedence over using an available Terraform + // binary in the operation system PATH, or if not found, installing the + // latest version according to checkpoint.hashicorp.com. + // + // By default, the binary is installed in the operating system temporary + // directory, however that directory can be overridden with the + // TF_ACC_TEMP_DIR environment variable. + // + // If TF_ACC_TERRAFORM_PATH is also set, this installation method is + // only invoked when a binary does not exist at that path. No version + // checks are performed against an existing TF_ACC_TERRAFORM_PATH. + EnvTfAccTerraformVersion = "TF_ACC_TERRAFORM_VERSION" + + // Acceptance testing path to Terraform CLI binary. + // + // Setting this value takes precedence over using an available Terraform + // binary in the operation system PATH, or if not found, installing the + // latest version according to checkpoint.hashicorp.com. This value does + // not override TF_ACC_TEMP_DIR for Terraform CLI installation, if + // installation is required. + // + // If TF_ACC_TERRAFORM_VERSION is not set, the binary must exist and be + // executable, or an error will be returned. + // + // If TF_ACC_TERRAFORM_VERSION is also set, that Terraform CLI version + // will be installed if a binary is not found at the given path. No version + // checks are performed against an existing binary. + EnvTfAccTerraformPath = "TF_ACC_TERRAFORM_PATH" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go new file mode 100644 index 0000000000..77f8739800 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +import ( + "fmt" +) + +// TestControl is an interface requiring a subset of *testing.T which is used +// by the test guards and helpers in this package. Most callers can simply +// pass their *testing.T value here, but the interface allows other +// implementations to potentially be provided instead, for example to allow +// meta-testing (testing of the test utilities themselves). +// +// This interface also describes the subset of normal test functionality the +// guards and helpers can perform: they can only create log lines, fail tests, +// and skip tests. All other test control is the responsibility of the main +// test code. +type TestControl interface { + Helper() + Log(args ...interface{}) + FailNow() + SkipNow() + Name() string +} + +// testingT wraps a TestControl to recover some of the convenience behaviors +// that would normally come from a real *testing.T, so we can keep TestControl +// small while still having these conveniences. This is an abstraction +// inversion, but accepted because it makes the public API more convenient +// without any considerable disadvantage. +type testingT struct { + TestControl +} + +func (t testingT) Logf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) +} + +func (t testingT) Fatalf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) + t.FailNow() +} + +func (t testingT) Skipf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) + t.SkipNow() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go new file mode 100644 index 0000000000..f961788721 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go @@ -0,0 +1,290 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/hashicorp/terraform-exec/tfexec" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +// AutoInitProviderHelper is the main entrypoint for testing provider plugins +// using this package. It is intended to be called during TestMain to prepare +// for provider testing. +// +// AutoInitProviderHelper will discover the location of a current Terraform CLI +// executable to test against, detect whether a prior version of the plugin is +// available for upgrade tests, and then will return an object containing the +// results of that initialization which can then be stored in a global variable +// for use in other tests. +func AutoInitProviderHelper(ctx context.Context, sourceDir string) *Helper { + helper, err := AutoInitHelper(ctx, sourceDir) + if err != nil { + fmt.Fprintf(os.Stderr, "cannot run Terraform provider tests: %s\n", err) + os.Exit(1) + } + return helper +} + +// Helper is intended as a per-package singleton created in TestMain which +// other tests in a package can use to create Terraform execution contexts +type Helper struct { + baseDir string + + // sourceDir is the dir containing the provider source code, needed + // for tests that use fixture files. + sourceDir string + terraformExec string + + // execTempDir is created during DiscoverConfig to store any downloaded + // binaries + execTempDir string +} + +// AutoInitHelper uses the auto-discovery behavior of DiscoverConfig to prepare +// a configuration and then calls InitHelper with it. This is a convenient +// way to get the standard init behavior based on environment variables, and +// callers should use this unless they have an unusual requirement that calls +// for constructing a config in a different way. +func AutoInitHelper(ctx context.Context, sourceDir string) (*Helper, error) { + config, err := DiscoverConfig(ctx, sourceDir) + if err != nil { + return nil, err + } + + return InitHelper(ctx, config) +} + +// InitHelper prepares a testing helper with the given configuration. +// +// For most callers it is sufficient to call AutoInitHelper instead, which +// will construct a configuration automatically based on certain environment +// variables. +// +// If this function returns an error then it may have left some temporary files +// behind in the system's temporary directory. There is currently no way to +// automatically clean those up. +func InitHelper(ctx context.Context, config *Config) (*Helper, error) { + tempDir := os.Getenv(EnvTfAccTempDir) + baseDir, err := os.MkdirTemp(tempDir, "plugintest") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) + } + + return &Helper{ + baseDir: baseDir, + sourceDir: config.SourceDir, + terraformExec: config.TerraformExec, + execTempDir: config.execTempDir, + }, nil +} + +// Close cleans up temporary files and directories created to support this +// helper, returning an error if any of the cleanup fails. +// +// Call this before returning from TestMain to minimize the amount of detritus +// left behind in the filesystem after the tests complete. +func (h *Helper) Close() error { + if h.execTempDir != "" { + err := os.RemoveAll(h.execTempDir) + if err != nil { + return err + } + } + return os.RemoveAll(h.baseDir) +} + +// NewWorkingDir creates a new working directory for use in the implementation +// of a single test, returning a WorkingDir object representing that directory. +// +// If the working directory object is not itself closed by the time the test +// program exits, the Close method on the helper itself will attempt to +// delete it. +func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, error) { + dir, err := os.MkdirTemp(h.baseDir, "work") + if err != nil { + return nil, err + } + + ctx = logging.TestWorkingDirectoryContext(ctx, dir) + + // symlink the provider source files into the config directory + // e.g. testdata + logging.HelperResourceTrace(ctx, "Symlinking source directories to work directory") + err = symlinkDirectoriesOnly(h.sourceDir, dir) + if err != nil { + return nil, err + } + + tf, err := tfexec.NewTerraform(dir, h.terraformExec) + + if err != nil { + return nil, fmt.Errorf("unable to create terraform-exec instance: %w", err) + } + + err = tf.SetDisablePluginTLS(true) + + if err != nil { + return nil, fmt.Errorf("unable to disable terraform-exec plugin TLS: %w", err) + } + + err = tf.SetSkipProviderVerify(true) // Only required for Terraform CLI 0.12.x + + var mismatch *tfexec.ErrVersionMismatch + if err != nil && !errors.As(err, &mismatch) { + return nil, fmt.Errorf("unable to disable terraform-exec provider verification: %w", err) + } + + tfAccLog := os.Getenv(EnvTfAccLog) + tfAccLogPath := os.Getenv(EnvTfAccLogPath) + tfLogCore := os.Getenv(EnvTfLogCore) + tfLogPathMask := os.Getenv(EnvTfLogPathMask) + tfLogProvider := os.Getenv(EnvTfLogProvider) + + if tfAccLog != "" && tfLogCore != "" { + err = fmt.Errorf( + "Invalid environment variable configuration. Cannot set both TF_ACC_LOG and TF_LOG_CORE. " + + "Use TF_LOG_CORE and TF_LOG_PROVIDER to separately control the Terraform CLI logging subsystems. " + + "To control the Go standard library log package for the provider under test, use TF_LOG.", + ) + logging.HelperResourceError(ctx, err.Error()) + return nil, err + } + + if tfAccLog != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfAccLog), + map[string]interface{}{logging.KeyTestTerraformLogLevel: tfAccLog}, + ) + + err := tf.SetLog(tfAccLog) + + if err != nil { + if !errors.As(err, new(*tfexec.ErrVersionMismatch)) { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec log level (%s): %w", tfAccLog, err) + } + + logging.HelperResourceWarn( + ctx, + fmt.Sprintf("Unable to set terraform-exec log level via %s environment variable, as Terraform CLI is version 0.14 or earlier. It will default to TRACE.", EnvTfAccLog), + map[string]interface{}{logging.KeyTestTerraformLogLevel: "TRACE"}, + ) + } + } + + if tfLogCore != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec core log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfLogCore), + map[string]interface{}{ + logging.KeyTestTerraformLogCoreLevel: tfLogCore, + }, + ) + + err := tf.SetLogCore(tfLogCore) + + if err != nil { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec core log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec core log level (%s): %w", tfLogCore, err) + } + } + + if tfLogProvider != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec provider log level via %s environment variable, if Terraform CLI is version 0.15 or later", EnvTfLogProvider), + map[string]interface{}{ + logging.KeyTestTerraformLogCoreLevel: tfLogProvider, + }, + ) + + err := tf.SetLogProvider(tfLogProvider) + + if err != nil { + logging.HelperResourceError( + ctx, + "Unable to set terraform-exec provider log level", + map[string]interface{}{logging.KeyError: err.Error()}, + ) + return nil, fmt.Errorf("unable to set terraform-exec provider log level (%s): %w", tfLogProvider, err) + } + } + + var logPath, logPathEnvVar string + + if tfAccLogPath != "" { + logPath = tfAccLogPath + logPathEnvVar = EnvTfAccLogPath + } + + // Similar to helper/logging.LogOutput() and + // terraform-plugin-log/tfsdklog.RegisterTestSink(), the TF_LOG_PATH_MASK + // environment variable should take precedence over TF_ACC_LOG_PATH. + if tfLogPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + logPath = fmt.Sprintf(tfLogPathMask, testName) + logPathEnvVar = EnvTfLogPathMask + } + + if logPath != "" { + logging.HelperResourceTrace( + ctx, + fmt.Sprintf("Setting terraform-exec log path via %s environment variable", logPathEnvVar), + map[string]interface{}{logging.KeyTestTerraformLogPath: logPath}, + ) + + if err := tf.SetLogPath(logPath); err != nil { + return nil, fmt.Errorf("unable to set terraform-exec log path (%s): %w", logPath, err) + } + } + + return &WorkingDir{ + h: h, + tf: tf, + baseDir: dir, + terraformExec: h.terraformExec, + }, nil +} + +// RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl +// object and will immediately fail the running test if the creation of the +// working directory fails. +func (h *Helper) RequireNewWorkingDir(ctx context.Context, t TestControl) *WorkingDir { + t.Helper() + + wd, err := h.NewWorkingDir(ctx, t) + if err != nil { + t := testingT{t} + t.Fatalf("failed to create new working directory: %s", err) + return nil + } + return wd +} + +// WorkingDirectory returns the working directory being used when running tests. +func (h *Helper) WorkingDirectory() string { + return h.baseDir +} + +// TerraformExecPath returns the location of the Terraform CLI executable that +// should be used when running tests. +func (h *Helper) TerraformExecPath() string { + return h.terraformExec +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go new file mode 100644 index 0000000000..0d4bbe5266 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +import ( + "fmt" + "os" + "path/filepath" +) + +func symlinkFile(src string, dest string) error { + err := os.Symlink(src, dest) + + if err != nil { + return fmt.Errorf("unable to symlink %q to %q: %w", src, dest, err) + } + + srcInfo, err := os.Stat(src) + + if err != nil { + return fmt.Errorf("unable to stat %q: %w", src, err) + } + + err = os.Chmod(dest, srcInfo.Mode()) + + if err != nil { + return fmt.Errorf("unable to set %q permissions: %w", dest, err) + } + + return nil +} + +// symlinkDirectoriesOnly finds only the first-level child directories in srcDir +// and symlinks them into destDir. +// Unlike symlinkDir, this is done non-recursively in order to limit the number +// of file descriptors used. +func symlinkDirectoriesOnly(srcDir string, destDir string) error { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return fmt.Errorf("unable to stat source directory %q: %w", srcDir, err) + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return fmt.Errorf("unable to make destination directory %q: %w", destDir, err) + } + + dirEntries, err := os.ReadDir(srcDir) + + if err != nil { + return fmt.Errorf("unable to read source directory %q: %w", srcDir, err) + } + + for _, dirEntry := range dirEntries { + if !dirEntry.IsDir() { + continue + } + + srcPath := filepath.Join(srcDir, dirEntry.Name()) + destPath := filepath.Join(destDir, dirEntry.Name()) + err := symlinkFile(srcPath, destPath) + + if err != nil { + return fmt.Errorf("unable to symlink directory %q to %q: %w", srcPath, destPath, err) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go new file mode 100644 index 0000000000..05b0284420 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -0,0 +1,371 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugintest + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/terraform-exec/tfexec" + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging" +) + +const ( + ConfigFileName = "terraform_plugin_test.tf" + ConfigFileNameJSON = ConfigFileName + ".json" + PlanFileName = "tfplan" +) + +// WorkingDir represents a distinct working directory that can be used for +// running tests. Each test should construct its own WorkingDir by calling +// NewWorkingDir or RequireNewWorkingDir on its package's singleton +// plugintest.Helper. +type WorkingDir struct { + h *Helper + + // baseDir is the root of the working directory tree + baseDir string + + // configFilename is the full filename where the latest configuration + // was stored; empty until SetConfig is called. + configFilename string + + // tf is the instance of tfexec.Terraform used for running Terraform commands + tf *tfexec.Terraform + + // terraformExec is a path to a terraform binary, inherited from Helper + terraformExec string + + // reattachInfo stores the gRPC socket info required for Terraform's + // plugin reattach functionality + reattachInfo tfexec.ReattachInfo +} + +// Close deletes the directories and files created to represent the receiving +// working directory. After this method is called, the working directory object +// is invalid and may no longer be used. +func (wd *WorkingDir) Close() error { + return os.RemoveAll(wd.baseDir) +} + +func (wd *WorkingDir) SetReattachInfo(ctx context.Context, reattachInfo tfexec.ReattachInfo) { + logging.HelperResourceTrace(ctx, "Setting Terraform CLI reattach configuration", map[string]interface{}{"tf_reattach_config": reattachInfo}) + wd.reattachInfo = reattachInfo +} + +func (wd *WorkingDir) UnsetReattachInfo() { + wd.reattachInfo = nil +} + +// GetHelper returns the Helper set on the WorkingDir. +func (wd *WorkingDir) GetHelper() *Helper { + return wd.h +} + +// SetConfig sets a new configuration for the working directory. +// +// This must be called at least once before any call to Init, Plan, Apply, or +// Destroy to establish the configuration. Any previously-set configuration is +// discarded and any saved plan is cleared. +func (wd *WorkingDir) SetConfig(ctx context.Context, cfg string) error { + logging.HelperResourceTrace(ctx, "Setting Terraform configuration", map[string]any{logging.KeyTestTerraformConfiguration: cfg}) + + outFilename := filepath.Join(wd.baseDir, ConfigFileName) + rmFilename := filepath.Join(wd.baseDir, ConfigFileNameJSON) + bCfg := []byte(cfg) + if json.Valid(bCfg) { + outFilename, rmFilename = rmFilename, outFilename + } + if err := os.Remove(rmFilename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to remove %q: %w", rmFilename, err) + } + err := os.WriteFile(outFilename, bCfg, 0700) + if err != nil { + return err + } + wd.configFilename = outFilename + + // Changing configuration invalidates any saved plan. + err = wd.ClearPlan(ctx) + if err != nil { + return err + } + return nil +} + +// ClearState deletes any Terraform state present in the working directory. +// +// Any remote objects tracked by the state are not destroyed first, so this +// will leave them dangling in the remote system. +func (wd *WorkingDir) ClearState(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Clearing Terraform state") + + err := os.Remove(filepath.Join(wd.baseDir, "terraform.tfstate")) + + if os.IsNotExist(err) { + logging.HelperResourceTrace(ctx, "No Terraform state to clear") + return nil + } + + if err != nil { + return err + } + + logging.HelperResourceTrace(ctx, "Cleared Terraform state") + + return nil +} + +// ClearPlan deletes any saved plan present in the working directory. +func (wd *WorkingDir) ClearPlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Clearing Terraform plan") + + err := os.Remove(wd.planFilename()) + + if os.IsNotExist(err) { + logging.HelperResourceTrace(ctx, "No Terraform plan to clear") + return nil + } + + if err != nil { + return err + } + + logging.HelperResourceTrace(ctx, "Cleared Terraform plan") + + return nil +} + +var errWorkingDirSetConfigNotCalled = fmt.Errorf("must call SetConfig before Init") + +// Init runs "terraform init" for the given working directory, forcing Terraform +// to use the current version of the plugin under test. +func (wd *WorkingDir) Init(ctx context.Context) error { + if wd.configFilename == "" { + return errWorkingDirSetConfigNotCalled + } + if _, err := os.Stat(wd.configFilename); err != nil { + return errWorkingDirSetConfigNotCalled + } + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI init command") + + // -upgrade=true is required for per-TestStep provider version changes + // e.g. TestTest_TestStep_ExternalProviders_DifferentVersions + err := wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Upgrade(true)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI init command") + + return err +} + +func (wd *WorkingDir) planFilename() string { + return filepath.Join(wd.baseDir, PlanFileName) +} + +// CreatePlan runs "terraform plan" to create a saved plan file, which if successful +// will then be used for the next call to Apply. +func (wd *WorkingDir) CreatePlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI plan command") + + hasChanges, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI plan command") + + if err != nil { + return err + } + + if !hasChanges { + logging.HelperResourceTrace(ctx, "Created plan with no changes") + + return nil + } + + stdout, err := wd.SavedPlanRawStdout(ctx) + + if err != nil { + return fmt.Errorf("error retrieving formatted plan output: %w", err) + } + + logging.HelperResourceTrace(ctx, "Created plan with changes", map[string]any{logging.KeyTestTerraformPlan: stdout}) + + return nil +} + +// CreateDestroyPlan runs "terraform plan -destroy" to create a saved plan +// file, which if successful will then be used for the next call to Apply. +func (wd *WorkingDir) CreateDestroyPlan(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI plan -destroy command") + + hasChanges, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName), tfexec.Destroy(true)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI plan -destroy command") + + if err != nil { + return err + } + + if !hasChanges { + logging.HelperResourceTrace(ctx, "Created destroy plan with no changes") + + return nil + } + + stdout, err := wd.SavedPlanRawStdout(ctx) + + if err != nil { + return fmt.Errorf("error retrieving formatted plan output: %w", err) + } + + logging.HelperResourceTrace(ctx, "Created destroy plan with changes", map[string]any{logging.KeyTestTerraformPlan: stdout}) + + return nil +} + +// Apply runs "terraform apply". If CreatePlan has previously completed +// successfully and the saved plan has not been cleared in the meantime then +// this will apply the saved plan. Otherwise, it will implicitly create a new +// plan and apply it. +func (wd *WorkingDir) Apply(ctx context.Context) error { + args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} + if wd.HasSavedPlan() { + args = append(args, tfexec.DirOrPlan(PlanFileName)) + } + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI apply command") + + err := wd.tf.Apply(context.Background(), args...) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI apply command") + + return err +} + +// Destroy runs "terraform destroy". It does not consider or modify any saved +// plan, and is primarily for cleaning up at the end of a test run. +// +// If destroy fails then remote objects might still exist, and continue to +// exist after a particular test is concluded. +func (wd *WorkingDir) Destroy(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI destroy command") + + err := wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI destroy command") + + return err +} + +// HasSavedPlan returns true if there is a saved plan in the working directory. If +// so, a subsequent call to Apply will apply that saved plan. +func (wd *WorkingDir) HasSavedPlan() bool { + _, err := os.Stat(wd.planFilename()) + return err == nil +} + +// SavedPlan returns an object describing the current saved plan file, if any. +// +// If no plan is saved or if the plan file cannot be read, SavedPlan returns +// an error. +func (wd *WorkingDir) SavedPlan(ctx context.Context) (*tfjson.Plan, error) { + if !wd.HasSavedPlan() { + return nil, fmt.Errorf("there is no current saved plan") + } + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command for JSON plan") + + plan, err := wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command for JSON plan") + + return plan, err +} + +// SavedPlanRawStdout returns a human readable stdout capture of the current saved plan file, if any. +// +// If no plan is saved or if the plan file cannot be read, SavedPlanRawStdout returns +// an error. +func (wd *WorkingDir) SavedPlanRawStdout(ctx context.Context) (string, error) { + if !wd.HasSavedPlan() { + return "", fmt.Errorf("there is no current saved plan") + } + + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command for stdout plan") + + stdout, err := wd.tf.ShowPlanFileRaw(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI show command for stdout plan") + + if err != nil { + return "", err + } + + return stdout, nil +} + +// State returns an object describing the current state. +// + +// If the state cannot be read, State returns an error. +func (wd *WorkingDir) State(ctx context.Context) (*tfjson.State, error) { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command for JSON state") + + state, err := wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI show command for JSON state") + + return state, err +} + +// Import runs terraform import +func (wd *WorkingDir) Import(ctx context.Context, resource, id string) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI import command") + + err := wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI import command") + + return err +} + +// Taint runs terraform taint +func (wd *WorkingDir) Taint(ctx context.Context, address string) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI taint command") + + err := wd.tf.Taint(context.Background(), address) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI taint command") + + return err +} + +// Refresh runs terraform refresh +func (wd *WorkingDir) Refresh(ctx context.Context) error { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI refresh command") + + err := wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo)) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI refresh command") + + return err +} + +// Schemas returns an object describing the provider schemas. +// +// If the schemas cannot be read, Schemas returns an error. +func (wd *WorkingDir) Schemas(ctx context.Context) (*tfjson.ProviderSchemas, error) { + logging.HelperResourceTrace(ctx, "Calling Terraform CLI providers schema command") + + providerSchemas, err := wd.tf.ProvidersSchema(context.Background()) + + logging.HelperResourceTrace(ctx, "Called Terraform CLI providers schema command") + + return providerSchemas, err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go new file mode 100644 index 0000000000..6208117cbf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/hashicorp/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go new file mode 100644 index 0000000000..a9b5c7e83e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go new file mode 100644 index 0000000000..547271346a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description +} + +type Severity rune + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=Severity +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go new file mode 100644 index 0000000000..505692ce51 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func Diag(sev Severity, summary, detail string) Diagnostic { + return &diagnosticBase{ + severity: sev, + summary: summary, + detail: detail, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go new file mode 100644 index 0000000000..4fc99c1bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "bytes" + "fmt" + "sort" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + + switch { + case iSev != jSev: + return iSev == Warning + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go new file mode 100644 index 0000000000..23be0a8bec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go new file mode 100644 index 0000000000..f7c9c65d38 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func FromError(err error) Diagnostic { + return &nativeError{ + err: err, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go new file mode 100644 index 0000000000..0c90c47889 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go new file mode 100644 index 0000000000..7c62ee704b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// The meta package provides a location to set the release version +// and any other relevant metadata for the SDK. +// +// This package should not import any other SDK packages. +package meta + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +// +// Deprecated: Use Go standard library [runtime/debug] package build information +// instead. +var SDKVersion = "2.33.0" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +// +// Deprecated: Use Go standard library [runtime/debug] package build information +// instead. +var SDKPrerelease = "" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(SDKVersion)) +} + +// VersionString returns the complete version string, including prerelease +// +// Deprecated: Use Go standard library [runtime/debug] package build information +// instead. +func SDKVersionString() string { + if SDKPrerelease != "" { + return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) + } + return SDKVersion +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go new file mode 100644 index 0000000000..3e33dfed6b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "time" + + "github.com/hashicorp/go-plugin" +) + +// ReattachConfig holds the information Terraform needs to be able to attach +// itself to a provider process, so it can drive the process. +type ReattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr ReattachConfigAddr +} + +// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. +type ReattachConfigAddr struct { + Network string + String string +} + +// DebugServe starts a plugin server in debug mode; this should only be used +// when the provider will manage its own lifecycle. It is not recommended for +// normal usage; Serve is the correct function for that. +func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan struct{}, error) { + reattachCh := make(chan *plugin.ReattachConfig) + closeCh := make(chan struct{}) + + if opts == nil { + return ReattachConfig{}, closeCh, errors.New("ServeOpts must be passed in with at least GRPCProviderFunc, GRPCProviderV6Func, or ProviderFunc") + } + + opts.TestConfig = &plugin.ServeTestConfig{ + Context: ctx, + ReattachConfigCh: reattachCh, + CloseCh: closeCh, + } + + go Serve(opts) + + var config *plugin.ReattachConfig + select { + case config = <-reattachCh: + case <-time.After(2 * time.Second): + return ReattachConfig{}, closeCh, errors.New("timeout waiting on reattach config") + } + + if config == nil { + return ReattachConfig{}, closeCh, errors.New("nil reattach config received") + } + + return ReattachConfig{ + Protocol: string(config.Protocol), + ProtocolVersion: config.ProtocolVersion, + Pid: config.Pid, + Test: config.Test, + Addr: ReattachConfigAddr{ + Network: config.Addr.Network(), + String: config.Addr.String(), + }, + }, closeCh, nil +} + +// Debug starts a debug server and controls its lifecycle, printing the +// information needed for Terraform to connect to the provider to stdout. +// os.Interrupt will be captured and used to stop the server. +// +// Deprecated: Use the Serve function with the ServeOpts Debug field instead. +func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { + if opts == nil { + return errors.New("ServeOpts must be passed in with at least GRPCProviderFunc, GRPCProviderV6Func, or ProviderFunc") + } + + opts.Debug = true + + if opts.ProviderAddr == "" { + opts.ProviderAddr = providerAddr + } + + Serve(opts) + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go new file mode 100644 index 0000000000..f089ab5215 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -0,0 +1,238 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "errors" + "log" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + // + // Deprecated: This is no longer used, but left for backwards compatibility + // since it is exported. It will be removed in the next major version. + ProviderPluginName = "provider" +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +// +// Deprecated: This is no longer used, but left for backwards compatibility +// since it is exported. It will be removed in the next major version. +var Handshake = plugin.HandshakeConfig{ + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() *schema.Provider +type GRPCProviderFunc func() tfprotov5.ProviderServer +type GRPCProviderV6Func func() tfprotov6.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + + GRPCProviderV6Func GRPCProviderV6Func + + // Logger is the logger that go-plugin will use. + Logger hclog.Logger + + // Debug starts a debug server and controls its lifecycle, printing the + // information needed for Terraform to connect to the provider to stdout. + // os.Interrupt will be captured and used to stop the server. + // + // Ensure the ProviderAddr field is correctly set when this is enabled, + // otherwise the TF_REATTACH_PROVIDERS environment variable will not + // correctly point Terraform to the running provider binary. + // + // This option cannot be combined with TestConfig. + Debug bool + + // TestConfig should only be set when the provider is being tested; it + // will opt out of go-plugin's lifecycle management and other features, + // and will use the supplied configuration options to control the + // plugin's lifecycle and communicate connection information. See the + // go-plugin GoDoc for more information. + // + // This option cannot be combined with Debug. + TestConfig *plugin.ServeTestConfig + + // Set NoLogOutputOverride to not override the log output with an hclog + // adapter. This should only be used when running the plugin in + // acceptance tests. + NoLogOutputOverride bool + + // UseTFLogSink is the testing.T for a test function that will turn on + // the terraform-plugin-log logging sink. + UseTFLogSink testing.T + + // ProviderAddr is the address of the provider under test or debugging, + // such as registry.terraform.io/hashicorp/random. This value is used in + // the TF_REATTACH_PROVIDERS environment variable during debugging so + // Terraform can correctly match the provider address in the Terraform + // configuration to the running provider binary. + ProviderAddr string +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + if opts.Debug && opts.TestConfig != nil { + log.Printf("[ERROR] Error starting provider: cannot set both Debug and TestConfig") + return + } + + if !opts.NoLogOutputOverride { + // In order to allow go-plugin to correctly pass log-levels through to + // terraform, we need to use an hclog.Logger with JSON output. We can + // inject this into the std `log` package here, so existing providers will + // make use of it automatically. + logger := hclog.New(&hclog.LoggerOptions{ + // We send all output to terraform. Go-plugin will take the output and + // pass it through another hclog.Logger on the client side where it can + // be filtered. + Level: hclog.Trace, + JSONFormat: true, + }) + log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) + } + + if opts.ProviderAddr == "" { + opts.ProviderAddr = "provider" + } + + var err error + + switch { + case opts.ProviderFunc != nil && opts.GRPCProviderFunc == nil: + opts.GRPCProviderFunc = func() tfprotov5.ProviderServer { + return schema.NewGRPCProviderServer(opts.ProviderFunc()) + } + err = tf5serverServe(opts) + case opts.GRPCProviderFunc != nil: + err = tf5serverServe(opts) + case opts.GRPCProviderV6Func != nil: + err = tf6serverServe(opts) + default: + err = errors.New("no provider server defined in ServeOpts") + } + + if err != nil { + log.Printf("[ERROR] Error starting provider: %s", err) + } +} + +func tf5serverServe(opts *ServeOpts) error { + var tf5serveOpts []tf5server.ServeOpt + + if opts.Debug { + tf5serveOpts = append(tf5serveOpts, tf5server.WithManagedDebug()) + } + + if opts.Logger != nil { + tf5serveOpts = append(tf5serveOpts, tf5server.WithGoPluginLogger(opts.Logger)) + } + + if opts.TestConfig != nil { + // Convert send-only channels to bi-directional channels to appease + // the compiler. WithDebug is errantly defined to require + // bi-directional when send-only is actually needed, which may be + // fixed in the future so the opts.TestConfig channels can be passed + // through directly. + closeCh := make(chan struct{}) + reattachConfigCh := make(chan *plugin.ReattachConfig) + + go func() { + // Always forward close channel receive, since its signaling that + // the channel is closed. + val := <-closeCh + opts.TestConfig.CloseCh <- val + }() + + go func() { + val, ok := <-reattachConfigCh + + if ok { + opts.TestConfig.ReattachConfigCh <- val + } + }() + + tf5serveOpts = append(tf5serveOpts, tf5server.WithDebug( + opts.TestConfig.Context, + reattachConfigCh, + closeCh), + ) + } + + if opts.UseTFLogSink != nil { + tf5serveOpts = append(tf5serveOpts, tf5server.WithLoggingSink(opts.UseTFLogSink)) + } + + return tf5server.Serve(opts.ProviderAddr, opts.GRPCProviderFunc, tf5serveOpts...) +} + +func tf6serverServe(opts *ServeOpts) error { + var tf6serveOpts []tf6server.ServeOpt + + if opts.Debug { + tf6serveOpts = append(tf6serveOpts, tf6server.WithManagedDebug()) + } + + if opts.Logger != nil { + tf6serveOpts = append(tf6serveOpts, tf6server.WithGoPluginLogger(opts.Logger)) + } + + if opts.TestConfig != nil { + // Convert send-only channels to bi-directional channels to appease + // the compiler. WithDebug is errantly defined to require + // bi-directional when send-only is actually needed, which may be + // fixed in the future so the opts.TestConfig channels can be passed + // through directly. + closeCh := make(chan struct{}) + reattachConfigCh := make(chan *plugin.ReattachConfig) + + go func() { + // Always forward close channel receive, since its signaling that + // the channel is closed. + val := <-closeCh + opts.TestConfig.CloseCh <- val + }() + + go func() { + val, ok := <-reattachConfigCh + + if ok { + opts.TestConfig.ReattachConfigCh <- val + } + }() + + tf6serveOpts = append(tf6serveOpts, tf6server.WithDebug( + opts.TestConfig.Context, + reattachConfigCh, + closeCh), + ) + } + + if opts.UseTFLogSink != nil { + tf6serveOpts = append(tf6serveOpts, tf6server.WithLoggingSink(opts.UseTFLogSink)) + } + + return tf6server.Serve(opts.ProviderAddr, opts.GRPCProviderV6Func, tf6serveOpts...) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go new file mode 100644 index 0000000000..7b988d9f3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go @@ -0,0 +1,997 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +// diffChangeType is an enum with the kind of changes a diff has planned. +type diffChangeType byte + +const ( + diffInvalid diffChangeType = iota //nolint:deadcode,varcheck + diffNone + diffCreate + diffUpdate + diffDestroy + diffDestroyCreate +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + RawConfig cty.Value + RawState cty.Value + RawPlan cty.Value + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type diffAttrType +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type diffAttrType byte + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +// ChangeType returns the diffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() diffChangeType { + if d.Empty() { + return diffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return diffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return diffDestroy + } + + if d.RequiresNew() { + return diffCreate + } + + return diffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +// TODO: investigate why removing this unused method causes panic in tests +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2 := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k := range d.Attributes { + checkOld[k] = struct{}{} + } + for k := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2 := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2 := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2 := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go new file mode 100644 index 0000000000..1871445819 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +// instanceType is an enum of the various types of instances store in the State +type instanceType int + +const ( + typeInvalid instanceType = iota + typePrimary + typeTainted + typeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go new file mode 100644 index 0000000000..782ef90c05 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=instanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[typeInvalid-0] + _ = x[typePrimary-1] + _ = x[typeTainted-2] + _ = x[typeDeposed-3] +} + +const _instanceType_name = "typeInvalidtypePrimarytypeTaintedtypeDeposed" + +var _instanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i instanceType) String() string { + if i < 0 || i >= instanceType(len(_instanceType_index)-1) { + return "instanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _instanceType_name[_instanceType_index[i]:_instanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go new file mode 100644 index 0000000000..2c1fa4ec7d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go @@ -0,0 +1,355 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} + + // CtyValue is the raw protocol configuration data from newer APIs. + // + // This field was only added as a targeted fix for passing raw protocol data + // through the existing (helper/schema.Provider).Configure() exported method + // and is only populated in that situation. The data could theoretically be + // set in the NewResourceConfigShimmed() function, however the consequences + // of doing this were not investigated at the time the fix was introduced. + // + // This field is ignored in the Equal() method to prevent a breaking + // behavior change since the entirety of the terraform package and this type + // are unintentionally exported in v2. + // + // Reference: https://github.com/hashicorp/terraform-plugin-sdk/issues/1270 + CtyValue cty.Value +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copiedConfig, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copiedConfig.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +// +// This method intentionally ignores the CtyValue field as a major version +// compatibility concern, as this exported field was later added to the type. +// Reference: https://github.com/hashicorp/terraform-plugin-sdk/issues/1270 +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +// TODO: investigate why deleting this causes odd runtime test failures +// must be some kind of interface implementation +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go new file mode 100644 index 0000000000..8d92fbb5e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// resourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type resourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType instanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// String outputs the address that parses into this address. +func (r *resourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case typePrimary: + name += ".primary" + case typeDeposed: + name += ".deposed" + case typeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +func parseResourceAddress(s string) (*resourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := parseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := parseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := parseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &resourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *resourceAddress) Less(other *resourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func parseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func parseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func parseInstanceType(s string) (instanceType, error) { + switch s { + case "", "primary": + return typePrimary, nil + case "deposed": + return typeDeposed, nil + case "tainted": + return typeTainted, nil + default: + return typeInvalid, fmt.Errorf("Unexpected value for instanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go new file mode 100644 index 0000000000..2d7b10bcff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go new file mode 100644 index 0000000000..c8e7008c0a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go new file mode 100644 index 0000000000..86ad0e7f1c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go new file mode 100644 index 0000000000..7d2179358a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -0,0 +1,1653 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +const ( + // StateVersion is the current version for our state file + stateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex + + // IsBinaryDrivenTest is a special flag that assists with a binary driver + // heuristic, it should not be set externally + IsBinaryDrivenTest bool +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result []error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return errors.Join(result...) +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &stateFilter{State: s} + results, err := filter.filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copiedState, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copiedState.(*State) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = stateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } + } else { + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString("\n Dependencies:\n") + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func parseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + return s.Primary.Equal(other.Primary) +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + RawConfig cty.Value + RawState cty.Value + RawPlan cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copiedState, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copiedState.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go similarity index 75% rename from vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go index 2dcb11b76b..caf2c79674 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package terraform import ( @@ -5,7 +8,7 @@ import ( "sort" ) -// StateFilter is responsible for filtering and searching a state. +// stateFilter is responsible for filtering and searching a state. // // This is a separate struct from State rather than a method on State // because StateFilter might create sidecar data structures to optimize @@ -15,18 +18,26 @@ import ( // Reset should be called or a new one should be allocated. StateFilter // will not watch State for changes and do this for you. If you filter after // changing the State without calling Reset, the behavior is not defined. -type StateFilter struct { +type stateFilter struct { State *State } // Filter takes the addresses specified by fs and finds all the matches. // The values of fs are resource addressing syntax that can be parsed by -// ParseResourceAddress. -func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { +// parseResourceAddress. +func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { // Parse all the addresses - as := make([]*ResourceAddress, len(fs)) + var as []*resourceAddress + + if len(fs) == 0 { + // If we weren't given any filters, then we list all + as = []*resourceAddress{{Index: -1}} + } else { + as = make([]*resourceAddress, len(fs)) + } + for i, v := range fs { - a, err := ParseResourceAddress(v) + a, err := parseResourceAddress(v) if err != nil { return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) } @@ -34,14 +45,9 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { as[i] = a } - // If we weren't given any filters, then we list all - if len(fs) == 0 { - as = append(as, &ResourceAddress{Index: -1}) - } - // Filter each of the address. We keep track of this in a map to // strip duplicates. - resultSet := make(map[string]*StateFilterResult) + resultSet := make(map[string]*stateFilterResult) for _, a := range as { for _, r := range f.filterSingle(a) { resultSet[r.String()] = r @@ -49,19 +55,19 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { } // Make the result list - results := make([]*StateFilterResult, 0, len(resultSet)) + results := make([]*stateFilterResult, 0, len(resultSet)) for _, v := range resultSet { results = append(results, v) } // Sort them and return - sort.Sort(StateFilterResultSlice(results)) + sort.Sort(stateFilterResultSlice(results)) return results, nil } -func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { +func (f *stateFilter) filterSingle(a *resourceAddress) []*stateFilterResult { // The slice to keep track of results - var results []*StateFilterResult + var results []*stateFilterResult // Go through modules first. modules := make([]*ModuleState, 0, len(f.State.Modules)) @@ -72,9 +78,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // Only add the module to the results if we haven't specified a type. // We also ignore the root module. if a.Type == "" && len(m.Path) > 1 { - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: m.Path[1:], - Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Address: (&resourceAddress{Path: m.Path[1:]}).String(), Value: m, }) } @@ -86,7 +92,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { for _, m := range modules { for n, r := range m.Resources { // The name in the state contains valuable information. Parse. - key, err := ParseResourceStateKey(n) + key, err := parseResourceStateKey(n) if err != nil { // If we get an error parsing, then just ignore it // out of the state. @@ -116,7 +122,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // Build the address for this resource - addr := &ResourceAddress{ + addr := &resourceAddress{ Path: m.Path[1:], Name: key.Name, Type: key.Type, @@ -124,7 +130,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // Add the resource level result - resourceResult := &StateFilterResult{ + resourceResult := &stateFilterResult{ Path: addr.Path, Address: addr.String(), Value: r, @@ -135,9 +141,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // Add the instances if r.Primary != nil { - addr.InstanceType = TypePrimary + addr.InstanceType = typePrimary addr.InstanceTypeSet = false - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: addr.Path, Address: addr.String(), Parent: resourceResult, @@ -147,9 +153,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { for _, instance := range r.Deposed { if f.relevant(a, instance) { - addr.InstanceType = TypeDeposed + addr.InstanceType = typeDeposed addr.InstanceTypeSet = true - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: addr.Path, Address: addr.String(), Parent: resourceResult, @@ -165,7 +171,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // relevant checks for relevance of this address against the given value. -func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { +func (f *stateFilter) relevant(addr *resourceAddress, raw interface{}) bool { switch v := raw.(type) { case *ModuleState: path := v.Path[1:] @@ -202,10 +208,10 @@ func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { } } -// StateFilterResult is a single result from a filter operation. Filter +// stateFilterResult is a single result from a filter operation. Filter // can match multiple things within a state (module, resource, instance, etc.) // and this unifies that. -type StateFilterResult struct { +type stateFilterResult struct { // Module path of the result Path []string @@ -215,7 +221,7 @@ type StateFilterResult struct { // Parent, if non-nil, is a parent of this result. For instances, the // parent would be a resource. For resources, the parent would be // a module. For modules, this is currently nil. - Parent *StateFilterResult + Parent *stateFilterResult // Value is the actual value. This must be type switched on. It can be // any data structures that `State` can hold: `ModuleState`, @@ -223,11 +229,11 @@ type StateFilterResult struct { Value interface{} } -func (r *StateFilterResult) String() string { +func (r *stateFilterResult) String() string { return fmt.Sprintf("%T: %s", r.Value, r.Address) } -func (r *StateFilterResult) sortedType() int { +func (r *stateFilterResult) sortedType() int { switch r.Value.(type) { case *ModuleState: return 0 @@ -240,19 +246,19 @@ func (r *StateFilterResult) sortedType() int { } } -// StateFilterResultSlice is a slice of results that implements +// stateFilterResultSlice is a slice of results that implements // sort.Interface. The sorting goal is what is most appealing to // human output. -type StateFilterResultSlice []*StateFilterResult +type stateFilterResultSlice []*stateFilterResult -func (s StateFilterResultSlice) Len() int { return len(s) } -func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s StateFilterResultSlice) Less(i, j int) bool { +func (s stateFilterResultSlice) Len() int { return len(s) } +func (s stateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s stateFilterResultSlice) Less(i, j int) bool { a, b := s[i], s[j] // if these address contain an index, we want to sort by index rather than name - addrA, errA := ParseResourceAddress(a.Address) - addrB, errB := ParseResourceAddress(b.Address) + addrA, errA := parseResourceAddress(a.Address) + addrB, errB := parseResourceAddress(b.Address) if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { return addrA.Index < addrB.Index } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go new file mode 100644 index 0000000000..6353ad27d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terraform + +import ( + "sort" +) + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md b/vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md deleted file mode 100644 index 513e76a394..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md +++ /dev/null @@ -1,74 +0,0 @@ -# 2.2.1 (April 27, 2021) - -SECURITY: - - - Upgraded to terraform-exec v0.13.3 to address GPG key rotation. See [terraform-exec's CHANGELOG](https://github.com/hashicorp/terraform-exec/blob/main/CHANGELOG.md#0133-april-23-2021). - -# 2.2.0 (April 01, 2021) - -NOTES: - -In this release, we upgraded to a version of terraform-exec that surfaces numbers in state as json.Number instead of float64. You may need to update your type assertions against numbers in state. - -ENHANCEMENTS: - - - Added support for Terraform 0.15 ([#45](https://github.com/hashicorp/terraform-plugin-test/pull/45)) - -# 2.1.3 (February 22, 2021) - -BUG FIXES: - - - Fix compilation error from go-getter ([#44](https://github.com/hashicorp/terraform-plugin-test/pull/44)) - -# 2.1.2 (September 15, 2020) - -BUG FIXES: - - - Fix plan output to be in a human-friendly format ([#40](https://github.com/hashicorp/terraform-plugin-test/pull/40)) - -# 2.1.1 (September 9, 2020) - -BUG FIXES: - - - Fix propagation of plugin reattach information ([#38](https://github.com/hashicorp/terraform-plugin-test/pull/38)) - -# 2.1.0 (September 2, 2020) - -FEATURES: - - - Added the ability to create destroy plans. ([#37](https://github.com/hashicorp/terraform-plugin-test/pull/37)) - -ENHANCEMENTS: - - - Normalised internal Terraform CLI commands using github.com/hashicorp/terraform-exec module. ([#35](https://github.com/hashicorp/terraform-plugin-test/pull/35)) - -# 2.0.0 (August 10, 2020) - -FEATURES: - - - Simplified API signatures to reflect no longer needing provider name ([#32](https://github.com/hashicorp/terraform-plugin-test/pull/32)) - - Implement SavedPlanStdout which captures a non-json stdout run of `terraform show` of a planfile ([#34](https://github.com/hashicorp/terraform-plugin-test/pull/34)) - -# 1.4.4 (July 10, 2020) - -BUG FIXES: - - - Fix Windows bug in versions of Terraform below 0.13.0-beta2 ([#30](https://github.com/hashicorp/terraform-plugin-test/pull/30)) - -# 1.4.3 (July 7, 2020) - -DEPENDENCIES: - - - `github.com/hashicorp/go-getter@v1.4.0` ([#29](https://github.com/hashicorp/terraform-plugin-test/pull/29)) - -# 1.4.2 (July 7, 2020) - -DEPENDENCIES: - - - `github.com/hashicorp/terraform-exec@v0.1.1` ([#28](https://github.com/hashicorp/terraform-plugin-test/pull/28)) - -# 1.4.1 (July 7, 2020) - -BUG FIXES: - - - Fix auto-install Terraform feature ([#26](https://github.com/hashicorp/terraform-plugin-test/pull/26)) diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE deleted file mode 100644 index 82b4de97c7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md b/vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md deleted file mode 100644 index 4f1d5487e6..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md +++ /dev/null @@ -1,7 +0,0 @@ -**ARCHIVED: This project has been merged into [terraform-plugin-sdk](github.com/hashicorp/terraform-plugin-sdk) as the `plugintest` package.** - - -# Terraform Plugin Test Helper Library - -This is an **experimental** library for testing Terraform plugins in their -natural habitat as child processes of a real `terraform` executable. diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go deleted file mode 100644 index e9c4d58c7b..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go +++ /dev/null @@ -1,54 +0,0 @@ -package tftest - -import ( - "context" - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/terraform-exec/tfinstall" -) - -// Config is used to configure the test helper. In most normal test programs -// the configuration is discovered automatically by an Init* function using -// DiscoverConfig, but this is exposed so that more complex scenarios can be -// implemented by direct configuration. -type Config struct { - SourceDir string - TerraformExec string - execTempDir string - PreviousPluginExec string -} - -// DiscoverConfig uses environment variables and other means to automatically -// discover a reasonable test helper configuration. -func DiscoverConfig(sourceDir string) (*Config, error) { - tfVersion := os.Getenv("TF_ACC_TERRAFORM_VERSION") - tfPath := os.Getenv("TF_ACC_TERRAFORM_PATH") - - tempDir := os.Getenv("TF_ACC_TEMP_DIR") - tfDir, err := ioutil.TempDir(tempDir, "tftest-terraform") - if err != nil { - return nil, fmt.Errorf("failed to create temp dir: %w", err) - } - - finders := []tfinstall.ExecPathFinder{} - switch { - case tfPath != "": - finders = append(finders, tfinstall.ExactPath(tfPath)) - case tfVersion != "": - finders = append(finders, tfinstall.ExactVersion(tfVersion, tfDir)) - default: - finders = append(finders, tfinstall.LookPath(), tfinstall.LatestVersion(tfDir, true)) - } - tfExec, err := tfinstall.Find(context.Background(), finders...) - if err != nil { - return nil, err - } - - return &Config{ - SourceDir: sourceDir, - TerraformExec: tfExec, - execTempDir: tfDir, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go deleted file mode 100644 index 3b120c679d..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package tftest contains utilities to help with writing tests for -// Terraform plugins. -// -// This is not a package for testing configurations or modules written in the -// Terraform language. It is for testing the plugins that allow Terraform to -// manage various cloud services and other APIs. -package tftest diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go deleted file mode 100644 index 819937b38e..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go +++ /dev/null @@ -1,94 +0,0 @@ -package tftest - -import ( - "fmt" - "os" - "testing" -) - -// AcceptanceTest is a test guard that will produce a log and call SkipNow on -// the given TestControl if the environment variable TF_ACC isn't set to -// indicate that the caller wants to run acceptance tests. -// -// Call this immediately at the start of each acceptance test function to -// signal that it may cost money and thus requires this opt-in enviromment -// variable. -// -// For the purpose of this function, an "acceptance test" is any est that -// reaches out to services that are not directly controlled by the test program -// itself, particularly if those requests may lead to service charges. For any -// system where it is possible and realistic to run a local instance of the -// service for testing (e.g. in a daemon launched by the test program itself), -// prefer to do this and _don't_ call AcceptanceTest, thus allowing tests to be -// run more easily and without external cost by contributors. -func AcceptanceTest(t TestControl) { - t.Helper() - if os.Getenv("TF_ACC") != "" { - t.Log("TF_ACC is not set") - t.SkipNow() - } -} - -// LongTest is a test guard that will produce a log and call SkipNow on the -// given TestControl if the test harness is currently running in "short mode". -// -// What is considered a "long test" will always be pretty subjective, but test -// implementers should think of this in terms of what seems like it'd be -// inconvenient to run repeatedly for quick feedback while testing a new feature -// under development. -// -// When testing resource types that always take several minutes to complete -// operations, consider having a single general test that covers the basic -// functionality and then mark any other more specific tests as long tests so -// that developers can quickly smoke-test a particular feature when needed -// but can still run the full set of tests for a feature when needed. -func LongTest(t TestControl) { - t.Helper() - if testing.Short() { - t.Log("skipping long test because of short mode") - t.SkipNow() - } -} - -// TestControl is an interface requiring a subset of *testing.T which is used -// by the test guards and helpers in this package. Most callers can simply -// pass their *testing.T value here, but the interface allows other -// implementations to potentially be provided instead, for example to allow -// meta-testing (testing of the test utilities themselves). -// -// This interface also describes the subset of normal test functionality the -// guards and helpers can perform: they can only create log lines, fail tests, -// and skip tests. All other test control is the responsibility of the main -// test code. -type TestControl interface { - Helper() - Log(args ...interface{}) - FailNow() - SkipNow() -} - -// testingT wraps a TestControl to recover some of the convenience behaviors -// that would normally come from a real *testing.T, so we can keep TestControl -// small while still having these conveniences. This is an abstraction -// inversion, but accepted because it makes the public API more convenient -// without any considerable disadvantage. -type testingT struct { - TestControl -} - -func (t testingT) Logf(f string, args ...interface{}) { - t.Helper() - t.Log(fmt.Sprintf(f, args...)) -} - -func (t testingT) Fatalf(f string, args ...interface{}) { - t.Helper() - t.Log(fmt.Sprintf(f, args...)) - t.FailNow() -} - -func (t testingT) Skipf(f string, args ...interface{}) { - t.Helper() - t.Log(fmt.Sprintf(f, args...)) - t.SkipNow() -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go deleted file mode 100644 index 06b8f76bd8..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go +++ /dev/null @@ -1,230 +0,0 @@ -package tftest - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - getter "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform-exec/tfexec" -) - -const subprocessCurrentSigil = "4acd63807899403ca4859f5bb948d2c6" -const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" - -// AutoInitProviderHelper is the main entrypoint for testing provider plugins -// using this package. It is intended to be called during TestMain to prepare -// for provider testing. -// -// AutoInitProviderHelper will discover the location of a current Terraform CLI -// executable to test against, detect whether a prior version of the plugin is -// available for upgrade tests, and then will return an object containing the -// results of that initialization which can then be stored in a global variable -// for use in other tests. -func AutoInitProviderHelper(sourceDir string) *Helper { - helper, err := AutoInitHelper(sourceDir) - if err != nil { - fmt.Fprintf(os.Stderr, "cannot run Terraform provider tests: %s\n", err) - os.Exit(1) - } - return helper -} - -// Helper is intended as a per-package singleton created in TestMain which -// other tests in a package can use to create Terraform execution contexts -type Helper struct { - baseDir string - - // sourceDir is the dir containing the provider source code, needed - // for tests that use fixture files. - sourceDir string - terraformExec string - - // execTempDir is created during DiscoverConfig to store any downloaded - // binaries - execTempDir string -} - -// AutoInitHelper uses the auto-discovery behavior of DiscoverConfig to prepare -// a configuration and then calls InitHelper with it. This is a convenient -// way to get the standard init behavior based on environment variables, and -// callers should use this unless they have an unusual requirement that calls -// for constructing a config in a different way. -func AutoInitHelper(sourceDir string) (*Helper, error) { - config, err := DiscoverConfig(sourceDir) - if err != nil { - return nil, err - } - - return InitHelper(config) -} - -// InitHelper prepares a testing helper with the given configuration. -// -// For most callers it is sufficient to call AutoInitHelper instead, which -// will construct a configuration automatically based on certain environment -// variables. -// -// If this function returns an error then it may have left some temporary files -// behind in the system's temporary directory. There is currently no way to -// automatically clean those up. -func InitHelper(config *Config) (*Helper, error) { - tempDir := os.Getenv("TF_ACC_TEMP_DIR") - baseDir, err := ioutil.TempDir(tempDir, "tftest") - if err != nil { - return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) - } - - return &Helper{ - baseDir: baseDir, - sourceDir: config.SourceDir, - terraformExec: config.TerraformExec, - execTempDir: config.execTempDir, - }, nil -} - -// symlinkAuxiliaryProviders discovers auxiliary provider binaries, used in -// multi-provider tests, and symlinks them to the plugin directory. -// -// Auxiliary provider binaries should be included in the provider source code -// directory, under the path terraform.d/plugins/$GOOS_$GOARCH/provider-name. -// -// The environment variable TF_ACC_PROVIDER_ROOT_DIR must be set to the path of -// the provider source code directory root in order to use this feature. -func symlinkAuxiliaryProviders(pluginDir string) error { - providerRootDir := os.Getenv("TF_ACC_PROVIDER_ROOT_DIR") - if providerRootDir == "" { - // common case; assume intentional and do not log - return nil - } - - _, err := os.Stat(filepath.Join(providerRootDir, "terraform.d", "plugins")) - if os.IsNotExist(err) { - fmt.Printf("No terraform.d/plugins directory found: continuing. Unset TF_ACC_PROVIDER_ROOT_DIR or supply provider binaries in terraform.d/plugins/$GOOS_$GOARCH to disable this message.") - return nil - } else if err != nil { - return fmt.Errorf("Unexpected error: %s", err) - } - - auxiliaryProviderDir := filepath.Join(providerRootDir, "terraform.d", "plugins", runtime.GOOS+"_"+runtime.GOARCH) - - // If we can't os.Stat() terraform.d/plugins/$GOOS_$GOARCH, however, - // assume the omission was unintentional, and error. - _, err = os.Stat(auxiliaryProviderDir) - if os.IsNotExist(err) { - return fmt.Errorf("error finding auxiliary provider dir %s: %s", auxiliaryProviderDir, err) - } else if err != nil { - return fmt.Errorf("Unexpected error: %s", err) - } - - // now find all the providers in that dir and symlink them to the plugin dir - providers, err := ioutil.ReadDir(auxiliaryProviderDir) - if err != nil { - return fmt.Errorf("error reading auxiliary providers: %s", err) - } - - zipDecompressor := new(getter.ZipDecompressor) - - for _, provider := range providers { - filename := provider.Name() - filenameExt := filepath.Ext(filename) - name := strings.TrimSuffix(filename, filenameExt) - path := filepath.Join(auxiliaryProviderDir, name) - symlinkPath := filepath.Join(pluginDir, name) - - // exit early if we have already symlinked this provider - _, err := os.Stat(symlinkPath) - if err == nil { - continue - } - - // if filename ends in .zip, assume it is a zip and extract it - // otherwise assume it is a provider binary - if filenameExt == ".zip" { - _, err = os.Stat(path) - if os.IsNotExist(err) { - zipDecompressor.Decompress(path, filepath.Join(auxiliaryProviderDir, filename), false, 0) - } else if err != nil { - return fmt.Errorf("Unexpected error: %s", err) - } - } - - err = symlinkFile(path, symlinkPath) - if err != nil { - return fmt.Errorf("error symlinking auxiliary provider %s: %s", name, err) - } - } - - return nil -} - -// Close cleans up temporary files and directories created to support this -// helper, returning an error if any of the cleanup fails. -// -// Call this before returning from TestMain to minimize the amount of detritus -// left behind in the filesystem after the tests complete. -func (h *Helper) Close() error { - if h.execTempDir != "" { - err := os.RemoveAll(h.execTempDir) - if err != nil { - return err - } - } - return os.RemoveAll(h.baseDir) -} - -// NewWorkingDir creates a new working directory for use in the implementation -// of a single test, returning a WorkingDir object representing that directory. -// -// If the working directory object is not itself closed by the time the test -// program exits, the Close method on the helper itself will attempt to -// delete it. -func (h *Helper) NewWorkingDir() (*WorkingDir, error) { - dir, err := ioutil.TempDir(h.baseDir, "work") - if err != nil { - return nil, err - } - - // symlink the provider source files into the config directory - // e.g. testdata - err = symlinkDirectoriesOnly(h.sourceDir, dir) - if err != nil { - return nil, err - } - - tf, err := tfexec.NewTerraform(dir, h.terraformExec) - if err != nil { - return nil, err - } - - return &WorkingDir{ - h: h, - tf: tf, - baseDir: dir, - terraformExec: h.terraformExec, - }, nil -} - -// RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl -// object and will immediately fail the running test if the creation of the -// working directory fails. -func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { - t.Helper() - - wd, err := h.NewWorkingDir() - if err != nil { - t := testingT{t} - t.Fatalf("failed to create new working directory: %s", err) - return nil - } - return wd -} - -// TerraformExecPath returns the location of the Terraform CLI executable that -// should be used when running tests. -func (h *Helper) TerraformExecPath() string { - return h.terraformExec -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go deleted file mode 100644 index 4764c6b4c7..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go +++ /dev/null @@ -1,15 +0,0 @@ -package tftest - -import ( - "os" -) - -// RunningAsPlugin returns true if it detects the usual Terraform plugin -// detection environment variables, suggesting that the current process is -// being launched as a plugin server. -func RunningAsPlugin() bool { - const cookieVar = "TF_PLUGIN_MAGIC_COOKIE" - const cookieVal = "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2" - - return os.Getenv(cookieVar) == cookieVal -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go deleted file mode 100644 index 57bc84f2dc..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go +++ /dev/null @@ -1,95 +0,0 @@ -package tftest - -import ( - "os" - "path/filepath" -) - -func symlinkFile(src string, dest string) (err error) { - err = os.Symlink(src, dest) - if err == nil { - srcInfo, err := os.Stat(src) - if err != nil { - err = os.Chmod(dest, srcInfo.Mode()) - } - } - - return -} - -// symlinkDir is a simplistic function for recursively symlinking all files in a directory to a new path. -// It is intended only for limited internal use and does not cover all edge cases. -func symlinkDir(srcDir string, destDir string) (err error) { - srcInfo, err := os.Stat(srcDir) - if err != nil { - return err - } - - err = os.MkdirAll(destDir, srcInfo.Mode()) - if err != nil { - return err - } - - directory, _ := os.Open(srcDir) - defer directory.Close() - objects, err := directory.Readdir(-1) - - for _, obj := range objects { - srcPath := filepath.Join(srcDir, obj.Name()) - destPath := filepath.Join(destDir, obj.Name()) - - if obj.IsDir() { - err = symlinkDir(srcPath, destPath) - if err != nil { - return err - } - } else { - err = symlinkFile(srcPath, destPath) - if err != nil { - return err - } - } - - } - return -} - -// symlinkDirectoriesOnly finds only the first-level child directories in srcDir -// and symlinks them into destDir. -// Unlike symlinkDir, this is done non-recursively in order to limit the number -// of file descriptors used. -func symlinkDirectoriesOnly(srcDir string, destDir string) (err error) { - srcInfo, err := os.Stat(srcDir) - if err != nil { - return err - } - - err = os.MkdirAll(destDir, srcInfo.Mode()) - if err != nil { - return err - } - - directory, err := os.Open(srcDir) - if err != nil { - return err - } - defer directory.Close() - objects, err := directory.Readdir(-1) - if err != nil { - return err - } - - for _, obj := range objects { - srcPath := filepath.Join(srcDir, obj.Name()) - destPath := filepath.Join(destDir, obj.Name()) - - if obj.IsDir() { - err = symlinkFile(srcPath, destPath) - if err != nil { - return err - } - } - - } - return -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go deleted file mode 100644 index bcf4c5a0ae..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go +++ /dev/null @@ -1,394 +0,0 @@ -package tftest - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/hashicorp/terraform-exec/tfexec" - tfjson "github.com/hashicorp/terraform-json" -) - -const ( - ConfigFileName = "terraform_plugin_test.tf" - PlanFileName = "tfplan" -) - -// WorkingDir represents a distinct working directory that can be used for -// running tests. Each test should construct its own WorkingDir by calling -// NewWorkingDir or RequireNewWorkingDir on its package's singleton -// tftest.Helper. -type WorkingDir struct { - h *Helper - - // baseDir is the root of the working directory tree - baseDir string - - // baseArgs is arguments that should be appended to all commands - baseArgs []string - - // tf is the instance of tfexec.Terraform used for running Terraform commands - tf *tfexec.Terraform - - // terraformExec is a path to a terraform binary, inherited from Helper - terraformExec string - - // reattachInfo stores the gRPC socket info required for Terraform's - // plugin reattach functionality - reattachInfo tfexec.ReattachInfo - - env map[string]string -} - -// Close deletes the directories and files created to represent the receiving -// working directory. After this method is called, the working directory object -// is invalid and may no longer be used. -func (wd *WorkingDir) Close() error { - return os.RemoveAll(wd.baseDir) -} - -// Setenv sets an environment variable on the WorkingDir. -func (wd *WorkingDir) Setenv(envVar, val string) { - if wd.env == nil { - wd.env = map[string]string{} - } - wd.env[envVar] = val -} - -// Unsetenv removes an environment variable from the WorkingDir. -func (wd *WorkingDir) Unsetenv(envVar string) { - delete(wd.env, envVar) -} - -func (wd *WorkingDir) SetReattachInfo(reattachInfo tfexec.ReattachInfo) { - wd.reattachInfo = reattachInfo -} - -func (wd *WorkingDir) UnsetReattachInfo() { - wd.reattachInfo = nil -} - -// GetHelper returns the Helper set on the WorkingDir. -func (wd *WorkingDir) GetHelper() *Helper { - return wd.h -} - -// SetConfig sets a new configuration for the working directory. -// -// This must be called at least once before any call to Init, Plan, Apply, or -// Destroy to establish the configuration. Any previously-set configuration is -// discarded and any saved plan is cleared. -func (wd *WorkingDir) SetConfig(cfg string) error { - configFilename := filepath.Join(wd.baseDir, ConfigFileName) - err := ioutil.WriteFile(configFilename, []byte(cfg), 0700) - if err != nil { - return err - } - - var mismatch *tfexec.ErrVersionMismatch - err = wd.tf.SetDisablePluginTLS(true) - if err != nil && !errors.As(err, &mismatch) { - return err - } - err = wd.tf.SetSkipProviderVerify(true) - if err != nil && !errors.As(err, &mismatch) { - return err - } - - if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { - wd.tf.SetLogPath(p) - } - - // Changing configuration invalidates any saved plan. - err = wd.ClearPlan() - if err != nil { - return err - } - return nil -} - -// RequireSetConfig is a variant of SetConfig that will fail the test via the -// given TestControl if the configuration cannot be set. -func (wd *WorkingDir) RequireSetConfig(t TestControl, cfg string) { - t.Helper() - if err := wd.SetConfig(cfg); err != nil { - t := testingT{t} - t.Fatalf("failed to set config: %s", err) - } -} - -// ClearState deletes any Terraform state present in the working directory. -// -// Any remote objects tracked by the state are not destroyed first, so this -// will leave them dangling in the remote system. -func (wd *WorkingDir) ClearState() error { - err := os.Remove(filepath.Join(wd.baseDir, "terraform.tfstate")) - if os.IsNotExist(err) { - return nil - } - return err -} - -// RequireClearState is a variant of ClearState that will fail the test via the -// given TestControl if the state cannot be cleared. -func (wd *WorkingDir) RequireClearState(t TestControl) { - t.Helper() - if err := wd.ClearState(); err != nil { - t := testingT{t} - t.Fatalf("failed to clear state: %s", err) - } -} - -// ClearPlan deletes any saved plan present in the working directory. -func (wd *WorkingDir) ClearPlan() error { - err := os.Remove(wd.planFilename()) - if os.IsNotExist(err) { - return nil - } - return err -} - -// RequireClearPlan is a variant of ClearPlan that will fail the test via the -// given TestControl if the plan cannot be cleared. -func (wd *WorkingDir) RequireClearPlan(t TestControl) { - t.Helper() - if err := wd.ClearPlan(); err != nil { - t := testingT{t} - t.Fatalf("failed to clear plan: %s", err) - } -} - -// Init runs "terraform init" for the given working directory, forcing Terraform -// to use the current version of the plugin under test. -func (wd *WorkingDir) Init() error { - if _, err := os.Stat(wd.configFilename()); err != nil { - return fmt.Errorf("must call SetConfig before Init") - } - - return wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo)) -} - -func (wd *WorkingDir) configFilename() string { - return filepath.Join(wd.baseDir, ConfigFileName) -} - -// RequireInit is a variant of Init that will fail the test via the given -// TestControl if init fails. -func (wd *WorkingDir) RequireInit(t TestControl) { - t.Helper() - if err := wd.Init(); err != nil { - t := testingT{t} - t.Fatalf("init failed: %s", err) - } -} - -func (wd *WorkingDir) planFilename() string { - return filepath.Join(wd.baseDir, PlanFileName) -} - -// CreatePlan runs "terraform plan" to create a saved plan file, which if successful -// will then be used for the next call to Apply. -func (wd *WorkingDir) CreatePlan() error { - _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName)) - return err -} - -// RequireCreatePlan is a variant of CreatePlan that will fail the test via -// the given TestControl if plan creation fails. -func (wd *WorkingDir) RequireCreatePlan(t TestControl) { - t.Helper() - if err := wd.CreatePlan(); err != nil { - t := testingT{t} - t.Fatalf("failed to create plan: %s", err) - } -} - -// CreateDestroyPlan runs "terraform plan -destroy" to create a saved plan -// file, which if successful will then be used for the next call to Apply. -func (wd *WorkingDir) CreateDestroyPlan() error { - _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName), tfexec.Destroy(true)) - return err -} - -// Apply runs "terraform apply". If CreatePlan has previously completed -// successfully and the saved plan has not been cleared in the meantime then -// this will apply the saved plan. Otherwise, it will implicitly create a new -// plan and apply it. -func (wd *WorkingDir) Apply() error { - args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} - if wd.HasSavedPlan() { - args = append(args, tfexec.DirOrPlan(PlanFileName)) - } - - return wd.tf.Apply(context.Background(), args...) -} - -// RequireApply is a variant of Apply that will fail the test via -// the given TestControl if the apply operation fails. -func (wd *WorkingDir) RequireApply(t TestControl) { - t.Helper() - if err := wd.Apply(); err != nil { - t := testingT{t} - t.Fatalf("failed to apply: %s", err) - } -} - -// Destroy runs "terraform destroy". It does not consider or modify any saved -// plan, and is primarily for cleaning up at the end of a test run. -// -// If destroy fails then remote objects might still exist, and continue to -// exist after a particular test is concluded. -func (wd *WorkingDir) Destroy() error { - return wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) -} - -// RequireDestroy is a variant of Destroy that will fail the test via -// the given TestControl if the destroy operation fails. -// -// If destroy fails then remote objects might still exist, and continue to -// exist after a particular test is concluded. -func (wd *WorkingDir) RequireDestroy(t TestControl) { - t.Helper() - if err := wd.Destroy(); err != nil { - t := testingT{t} - t.Logf("WARNING: destroy failed, so remote objects may still exist and be subject to billing") - t.Fatalf("failed to destroy: %s", err) - } -} - -// HasSavedPlan returns true if there is a saved plan in the working directory. If -// so, a subsequent call to Apply will apply that saved plan. -func (wd *WorkingDir) HasSavedPlan() bool { - _, err := os.Stat(wd.planFilename()) - return err == nil -} - -// SavedPlan returns an object describing the current saved plan file, if any. -// -// If no plan is saved or if the plan file cannot be read, SavedPlan returns -// an error. -func (wd *WorkingDir) SavedPlan() (*tfjson.Plan, error) { - if !wd.HasSavedPlan() { - return nil, fmt.Errorf("there is no current saved plan") - } - - return wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) -} - -// RequireSavedPlan is a variant of SavedPlan that will fail the test via -// the given TestControl if the plan cannot be read. -func (wd *WorkingDir) RequireSavedPlan(t TestControl) *tfjson.Plan { - t.Helper() - ret, err := wd.SavedPlan() - if err != nil { - t := testingT{t} - t.Fatalf("failed to read saved plan: %s", err) - } - return ret -} - -// SavedPlanStdout returns a stdout capture of the current saved plan file, if any. -// -// If no plan is saved or if the plan file cannot be read, SavedPlanStdout returns -// an error. -func (wd *WorkingDir) SavedPlanStdout() (string, error) { - if !wd.HasSavedPlan() { - return "", fmt.Errorf("there is no current saved plan") - } - - var ret bytes.Buffer - - wd.tf.SetStdout(&ret) - defer wd.tf.SetStdout(ioutil.Discard) - _, err := wd.tf.ShowPlanFileRaw(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) - if err != nil { - return "", err - } - - return ret.String(), nil -} - -// RequireSavedPlanStdout is a variant of SavedPlanStdout that will fail the test via -// the given TestControl if the plan cannot be read. -func (wd *WorkingDir) RequireSavedPlanStdout(t TestControl) string { - t.Helper() - ret, err := wd.SavedPlanStdout() - if err != nil { - t := testingT{t} - t.Fatalf("failed to read saved plan: %s", err) - } - return ret -} - -// State returns an object describing the current state. -// -// If the state cannot be read, State returns an error. -func (wd *WorkingDir) State() (*tfjson.State, error) { - return wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) -} - -// RequireState is a variant of State that will fail the test via -// the given TestControl if the state cannot be read. -func (wd *WorkingDir) RequireState(t TestControl) *tfjson.State { - t.Helper() - ret, err := wd.State() - if err != nil { - t := testingT{t} - t.Fatalf("failed to read state plan: %s", err) - } - return ret -} - -// Import runs terraform import -func (wd *WorkingDir) Import(resource, id string) error { - return wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) -} - -// RequireImport is a variant of Import that will fail the test via -// the given TestControl if the import is non successful. -func (wd *WorkingDir) RequireImport(t TestControl, resource, id string) { - t.Helper() - if err := wd.Import(resource, id); err != nil { - t := testingT{t} - t.Fatalf("failed to import: %s", err) - } -} - -// Refresh runs terraform refresh -func (wd *WorkingDir) Refresh() error { - return wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.State(filepath.Join(wd.baseDir, "terraform.tfstate"))) -} - -// RequireRefresh is a variant of Refresh that will fail the test via -// the given TestControl if the refresh is non successful. -func (wd *WorkingDir) RequireRefresh(t TestControl) { - t.Helper() - if err := wd.Refresh(); err != nil { - t := testingT{t} - t.Fatalf("failed to refresh: %s", err) - } -} - -// Schemas returns an object describing the provider schemas. -// -// If the schemas cannot be read, Schemas returns an error. -func (wd *WorkingDir) Schemas() (*tfjson.ProviderSchemas, error) { - return wd.tf.ProvidersSchema(context.Background()) -} - -// RequireSchemas is a variant of Schemas that will fail the test via -// the given TestControl if the schemas cannot be read. -func (wd *WorkingDir) RequireSchemas(t TestControl) *tfjson.ProviderSchemas { - t.Helper() - - ret, err := wd.Schemas() - if err != nil { - t := testingT{t} - t.Fatalf("failed to read schemas: %s", err) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/.copywrite.hcl b/vendor/github.com/hashicorp/terraform-registry-address/.copywrite.hcl new file mode 100644 index 0000000000..235a80dc46 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/.copywrite.hcl @@ -0,0 +1,7 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2021 + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/.go-version b/vendor/github.com/hashicorp/terraform-registry-address/.go-version new file mode 100644 index 0000000000..bc4493477a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/.go-version @@ -0,0 +1 @@ +1.19 diff --git a/vendor/github.com/hashicorp/terraform-registry-address/LICENSE b/vendor/github.com/hashicorp/terraform-registry-address/LICENSE new file mode 100644 index 0000000000..84cd064397 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2021 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform-registry-address/README.md b/vendor/github.com/hashicorp/terraform-registry-address/README.md new file mode 100644 index 0000000000..27db81f7c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/README.md @@ -0,0 +1,181 @@ +# terraform-registry-address + +This module enables parsing, comparison and canonical representation of +[Terraform Registry](https://registry.terraform.io/) **provider** addresses +(such as `registry.terraform.io/grafana/grafana` or `hashicorp/aws`) +and **module** addresses (such as `hashicorp/subnets/cidr`). + +**Provider** addresses can be found in + + - [`terraform show -json `](https://www.terraform.io/internals/json-format#configuration-representation) (`full_name`) + - [`terraform version -json`](https://www.terraform.io/cli/commands/version#example) (`provider_selections`) + - [`terraform providers schema -json`](https://www.terraform.io/cli/commands/providers/schema#providers-schema-representation) (keys of `provider_schemas`) + - within `required_providers` block in Terraform configuration (`*.tf`) + - Terraform [CLI configuration file](https://www.terraform.io/cli/config/config-file#provider-installation) + - Plugin [reattach configurations](https://www.terraform.io/plugin/debugging#running-terraform-with-a-provider-in-debug-mode) + +**Module** addresses can be found within `source` argument +of `module` block in Terraform configuration (`*.tf`) +and parts of the address (namespace and name) in the Registry API. + +## Compatibility + +The module assumes compatibility with Terraform v0.12 and later, +which have the mentioned JSON output produced by corresponding CLI flags. + +We recommend carefully reading the [ambigouous provider addresses](#Ambiguous-Provider-Addresses) +section below which may impact versions `0.12` and `0.13`. + +## Related Libraries + +Other libraries which may help with consuming most of the above Terraform +outputs in automation: + + - [`hashicorp/terraform-exec`](https://github.com/hashicorp/terraform-exec) + - [`hashicorp/terraform-json`](https://github.com/hashicorp/terraform-json) + +## Usage + +### Provider + +```go +pAddr, err := ParseProviderSource("hashicorp/aws") +if err != nil { + // deal with error +} + +// pAddr == Provider{ +// Type: "aws", +// Namespace: "hashicorp", +// Hostname: DefaultProviderRegistryHost, +// } +``` + +### Module + +```go +mAddr, err := ParseModuleSource("hashicorp/consul/aws//modules/consul-cluster") +if err != nil { + // deal with error +} + +// mAddr == Module{ +// Package: ModulePackage{ +// Host: DefaultProviderRegistryHost, +// Namespace: "hashicorp", +// Name: "consul", +// TargetSystem: "aws", +// }, +// Subdir: "modules/consul-cluster", +// }, +``` + +## Other Module Address Formats + +Modules can also be sourced from [other sources](https://www.terraform.io/language/modules/sources) +and these other sources (outside of Terraform Registry) +have different address formats, such as `./local` or +`github.com/hashicorp/example`. + +This library does _not_ recognize such other address formats +and it will return error upon parsing these. + +## Ambiguous Provider Addresses + +Qualified addresses with namespace (such as `hashicorp/aws`) +are used exclusively in all recent versions (`0.14+`) of Terraform. +If you only work with Terraform `v0.14.0+` configuration/output, you may +safely ignore the rest of this section and related part of the API. + +There are a few types of ambiguous addresses you may comes accross: + + - Terraform `v0.12` uses "namespace-less address", such as `aws`. + - Terraform `v0.13` may use `-` as a placeholder for the unknown namespace, + resulting in address such as `-/aws`. + - Terraform `v0.14+` _configuration_ still allows ambiguous providers + through `provider "" {}` block _without_ corresponding + entry inside `required_providers`, but these providers are always + resolved as `hashicorp/` and all JSON outputs only use that + resolved address. + +Both ambiguous address formats are accepted by `ParseProviderSource()` + +```go +pAddr, err := ParseProviderSource("aws") +if err != nil { + // deal with error +} + +// pAddr == Provider{ +// Type: "aws", +// Namespace: UnknownProviderNamespace, // "?" +// Hostname: DefaultProviderRegistryHost, // "registry.terraform.io" +// } +pAddr.HasKnownNamespace() // == false +pAddr.IsLegacy() // == false +``` +```go +pAddr, err := ParseProviderSource("-/aws") +if err != nil { + // deal with error +} + +// pAddr == Provider{ +// Type: "aws", +// Namespace: LegacyProviderNamespace, // "-" +// Hostname: DefaultProviderRegistryHost, // "registry.terraform.io" +// } +pAddr.HasKnownNamespace() // == true +pAddr.IsLegacy() // == true +``` + +However `NewProvider()` will panic if you pass an empty namespace +or any placeholder indicating unknown namespace. + +```go +NewProvider(DefaultProviderRegistryHost, "", "aws") // panic +NewProvider(DefaultProviderRegistryHost, "-", "aws") // panic +NewProvider(DefaultProviderRegistryHost, "?", "aws") // panic +``` + +If you come across an ambiguous address, you should resolve +it to a fully qualified one and use that one instead. + +### Resolving Ambiguous Address + +The Registry API provides the safest way of resolving an ambiguous address. + +```sh +# grafana (redirected to its own namespace) +$ curl -s https://registry.terraform.io/v1/providers/-/grafana/versions | jq '(.id, .moved_to)' +"terraform-providers/grafana" +"grafana/grafana" + +# aws (provider without redirection) +$ curl -s https://registry.terraform.io/v1/providers/-/aws/versions | jq '(.id, .moved_to)' +"hashicorp/aws" +null +``` + +When you cache results, ensure you have invalidation +mechanism in place as target (migrated) namespace may change. + +#### `terraform` provider + +Like any other legacy address `terraform` is also ambiguous. Such address may +(most unlikely) represent a custom-built provider called `terraform`, +or the now archived [`hashicorp/terraform` provider in the registry](https://registry.terraform.io/providers/hashicorp/terraform/latest), +or (most likely) the `terraform` provider built into 0.11+, which is +represented via a dedicated FQN of `terraform.io/builtin/terraform` in 0.13+. + +You may be able to differentiate between these different providers if you +know the version of Terraform. + +Alternatively you may just treat the address as the builtin provider, +i.e. assume all of its logic including schema is contained within +Terraform Core. + +In such case you should construct the address in the following way +```go +pAddr := NewProvider(BuiltInProviderHost, BuiltInProviderNamespace, "terraform") +``` diff --git a/vendor/github.com/hashicorp/terraform-registry-address/errors.go b/vendor/github.com/hashicorp/terraform-registry-address/errors.go new file mode 100644 index 0000000000..cf977115a5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/errors.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfaddr + +import ( + "fmt" +) + +type ParserError struct { + Summary string + Detail string +} + +func (pe *ParserError) Error() string { + return fmt.Sprintf("%s: %s", pe.Summary, pe.Detail) +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module.go b/vendor/github.com/hashicorp/terraform-registry-address/module.go new file mode 100644 index 0000000000..f000a7410e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module.go @@ -0,0 +1,254 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfaddr + +import ( + "fmt" + "path" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// Module is representing a module listed in a Terraform module +// registry. +type Module struct { + // Package is the registry package that the target module belongs to. + // The module installer must translate this into a ModuleSourceRemote + // using the registry API and then take that underlying address's + // Package in order to find the actual package location. + Package ModulePackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package that the registry address eventually resolves to. + // This will ultimately become the suffix of the Subdir of the + // ModuleSourceRemote that the registry address translates to. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +// DefaultModuleRegistryHost is the hostname used for registry-based module +// source addresses that do not have an explicit hostname. +const DefaultModuleRegistryHost = svchost.Hostname("registry.terraform.io") + +var moduleRegistryNamePattern = regexp.MustCompile("^[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?$") +var moduleRegistryTargetSystemPattern = regexp.MustCompile("^[0-9a-z]{1,64}$") + +// ParseModuleSource only accepts module registry addresses, and +// will reject any other address type. +func ParseModuleSource(raw string) (Module, error) { + var err error + + var subDir string + raw, subDir = splitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return Module{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + parts := strings.Split(raw, "/") + // A valid registry address has either three or four parts, because the + // leading hostname part is optional. + if len(parts) != 3 && len(parts) != 4 { + return Module{}, fmt.Errorf("a module registry source address must have either three or four slash-separated components") + } + + host := DefaultModuleRegistryHost + if len(parts) == 4 { + host, err = svchost.ForComparison(parts[0]) + if err != nil { + // The svchost library doesn't produce very good error messages to + // return to an end-user, so we'll use some custom ones here. + switch { + case strings.Contains(parts[0], "--"): + // Looks like possibly punycode, which we don't allow here + // to ensure that source addresses are written readably. + return Module{}, fmt.Errorf("invalid module registry hostname %q; internationalized domain names must be given as direct unicode characters, not in punycode", parts[0]) + default: + return Module{}, fmt.Errorf("invalid module registry hostname %q", parts[0]) + } + } + if !strings.Contains(host.String(), ".") { + return Module{}, fmt.Errorf("invalid module registry hostname: must contain at least one dot") + } + // Discard the hostname prefix now that we've processed it + parts = parts[1:] + } + + ret := Module{ + Package: ModulePackage{ + Host: host, + }, + + Subdir: subDir, + } + + if host == svchost.Hostname("github.com") || host == svchost.Hostname("bitbucket.org") { + return ret, fmt.Errorf("can't use %q as a module registry host, because it's reserved for installing directly from version control repositories", host) + } + + if ret.Package.Namespace, err = parseModuleRegistryName(parts[0]); err != nil { + if strings.Contains(parts[0], ".") { + // Seems like the user omitted one of the latter components in + // an address with an explicit hostname. + return ret, fmt.Errorf("source address must have three more components after the hostname: the namespace, the name, and the target system") + } + return ret, fmt.Errorf("invalid namespace %q: %s", parts[0], err) + } + if ret.Package.Name, err = parseModuleRegistryName(parts[1]); err != nil { + return ret, fmt.Errorf("invalid module name %q: %s", parts[1], err) + } + if ret.Package.TargetSystem, err = parseModuleRegistryTargetSystem(parts[2]); err != nil { + if strings.Contains(parts[2], "?") { + // The user was trying to include a query string, probably? + return ret, fmt.Errorf("module registry addresses may not include a query string portion") + } + return ret, fmt.Errorf("invalid target system %q: %s", parts[2], err) + } + + return ret, nil +} + +// MustParseModuleSource is a wrapper around ParseModuleSource that panics if +// it returns an error. +func MustParseModuleSource(raw string) (Module) { + mod, err := ParseModuleSource(raw) + if err != nil { + panic(err) + } + return mod +} + +// parseModuleRegistryName validates and normalizes a string in either the +// "namespace" or "name" position of a module registry source address. +func parseModuleRegistryName(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can only use dashes as + // punctuation, whereas module names can use underscores. So here we're + // using some regular expressions from the original module source + // implementation, rather than using the IDNA rules as we do in + // ParseProviderPart. + + if !moduleRegistryNamePattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// parseModuleRegistryTargetSystem validates and normalizes a string in the +// "target system" position of a module registry source address. This is +// what we historically called "provider" but never actually enforced as +// being a provider address, and now _cannot_ be a provider address because +// provider addresses have three slash-separated components of their own. +func parseModuleRegistryTargetSystem(given string) (string, error) { + // Similar to the names in provider source addresses, we defined these + // to be compatible with what filesystems and typical remote systems + // like GitHub allow in names. Unfortunately we didn't end up defining + // these exactly equivalently: provider names can't use dashes or + // underscores. So here we're using some regular expressions from the + // original module source implementation, rather than using the IDNA rules + // as we do in ParseProviderPart. + + if !moduleRegistryTargetSystemPattern.MatchString(given) { + return "", fmt.Errorf("must be between one and 64 ASCII letters or digits") + } + + // We also skip normalizing the name to lowercase, because we historically + // didn't do that and so existing module registries might be doing + // case-sensitive matching. + return given, nil +} + +// String returns a full representation of the address, including any +// additional components that are typically implied by omission in +// user-written addresses. +// +// We typically use this longer representation in error message, in case +// the inclusion of normally-omitted components is helpful in debugging +// unexpected behavior. +func (s Module) String() string { + if s.Subdir != "" { + return s.Package.String() + "//" + s.Subdir + } + return s.Package.String() +} + +// ForDisplay is similar to String but instead returns a representation of +// the idiomatic way to write the address in configuration, omitting +// components that are commonly just implied in addresses written by +// users. +// +// We typically use this shorter representation in informational messages, +// such as the note that we're about to start downloading a package. +func (s Module) ForDisplay() string { + if s.Subdir != "" { + return s.Package.ForDisplay() + "//" + s.Subdir + } + return s.Package.ForDisplay() +} + +// splitPackageSubdir detects whether the given address string has a +// subdirectory portion, and if so returns a non-empty subDir string +// along with the trimmed package address. +// +// If the given string doesn't have a subdirectory portion then it'll +// just be returned verbatim in packageAddr, with an empty subDir value. +func splitPackageSubdir(given string) (packageAddr, subDir string) { + packageAddr, subDir = sourceDirSubdir(given) + if subDir != "" { + subDir = path.Clean(subDir) + } + return packageAddr, subDir +} + +// sourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +func sourceDirSubdir(src string) (string, string) { + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src[:stop], "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:stop], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/module_package.go b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go new file mode 100644 index 0000000000..be06596613 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/module_package.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfaddr + +import ( + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +// A ModulePackage is an extra indirection over a ModulePackage where +// we use a module registry to translate a more symbolic address (and +// associated version constraint given out of band) into a physical source +// location. +// +// ModulePackage is distinct from ModulePackage because they have +// disjoint use-cases: registry package addresses are only used to query a +// registry in order to find a real module package address. These being +// distinct is intended to help future maintainers more easily follow the +// series of steps in the module installer, with the help of the type checker. +type ModulePackage struct { + Host svchost.Hostname + Namespace string + Name string + TargetSystem string +} + +func (s ModulePackage) String() string { + // Note: we're using the "display" form of the hostname here because + // for our service hostnames "for display" means something different: + // it means to render non-ASCII characters directly as Unicode + // characters, rather than using the "punycode" representation we + // use for internal processing, and so the "display" representation + // is actually what users would write in their configurations. + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +func (s ModulePackage) ForDisplay() string { + if s.Host == DefaultModuleRegistryHost { + return s.ForRegistryProtocol() + } + return s.Host.ForDisplay() + "/" + s.ForRegistryProtocol() +} + +// ForRegistryProtocol returns a string representation of just the namespace, +// name, and target system portions of the address, always omitting the +// registry hostname and the subdirectory portion, if any. +// +// This is primarily intended for generating addresses to send to the +// registry in question via the registry protocol, since the protocol +// skips sending the registry its own hostname as part of identifiers. +func (s ModulePackage) ForRegistryProtocol() string { + var buf strings.Builder + buf.WriteString(s.Namespace) + buf.WriteByte('/') + buf.WriteString(s.Name) + buf.WriteByte('/') + buf.WriteString(s.TargetSystem) + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-registry-address/provider.go b/vendor/github.com/hashicorp/terraform-registry-address/provider.go new file mode 100644 index 0000000000..7fb252ea22 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-registry-address/provider.go @@ -0,0 +1,446 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfaddr + +import ( + "fmt" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + "golang.org/x/net/idna" +) + +// Provider encapsulates a single provider type. In the future this will be +// extended to include additional fields including Namespace and SourceHost +type Provider struct { + Type string + Namespace string + Hostname svchost.Hostname +} + +// DefaultProviderRegistryHost is the hostname used for provider addresses that do +// not have an explicit hostname. +const DefaultProviderRegistryHost = svchost.Hostname("registry.terraform.io") + +// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider +// namespace. Built-in provider addresses must also have their namespace set +// to BuiltInProviderNamespace in order to be considered as built-in. +const BuiltInProviderHost = svchost.Hostname("terraform.io") + +// BuiltInProviderNamespace is the provider namespace used for "built-in" +// providers. Built-in provider addresses must also have their hostname +// set to BuiltInProviderHost in order to be considered as built-in. +// +// The this namespace is literally named "builtin", in the hope that users +// who see FQNs containing this will be able to infer the way in which they are +// special, even if they haven't encountered the concept formally yet. +const BuiltInProviderNamespace = "builtin" + +// UnknownProviderNamespace is the special string used to indicate +// unknown namespace, e.g. in "aws". This is equivalent to +// LegacyProviderNamespace for <0.12 style address. This namespace +// would never be produced by Terraform itself explicitly, it is +// only an internal placeholder. +const UnknownProviderNamespace = "?" + +// LegacyProviderNamespace is the special string used in the Namespace field +// of type Provider to mark a legacy provider address. This special namespace +// value would normally be invalid, and can be used only when the hostname is +// DefaultProviderRegistryHost because that host owns the mapping from legacy name to +// FQN. This may be produced by Terraform 0.13. +const LegacyProviderNamespace = "-" + +// String returns an FQN string, indended for use in machine-readable output. +func (pt Provider) String() string { + if pt.IsZero() { + panic("called String on zero-value addrs.Provider") + } + return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type +} + +// ForDisplay returns a user-friendly FQN string, simplified for readability. If +// the provider is using the default hostname, the hostname is omitted. +func (pt Provider) ForDisplay() string { + if pt.IsZero() { + panic("called ForDisplay on zero-value addrs.Provider") + } + + if pt.Hostname == DefaultProviderRegistryHost { + return pt.Namespace + "/" + pt.Type + } + return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type +} + +// NewProvider constructs a provider address from its parts, and normalizes +// the namespace and type parts to lowercase using unicode case folding rules +// so that resulting addrs.Provider values can be compared using standard +// Go equality rules (==). +// +// The hostname is given as a svchost.Hostname, which is required by the +// contract of that type to have already been normalized for equality testing. +// +// This function will panic if the given namespace or type name are not valid. +// When accepting namespace or type values from outside the program, use +// ParseProviderPart first to check that the given value is valid. +func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { + if namespace == LegacyProviderNamespace { + // Legacy provider addresses must always be created via struct + panic("attempt to create legacy provider address using NewProvider; use Provider{} instead") + } + if namespace == UnknownProviderNamespace { + // Provider addresses with unknown namespace must always + // be created via struct + panic("attempt to create provider address with unknown namespace using NewProvider; use Provider{} instead") + } + if namespace == "" { + // This case is already handled by MustParseProviderPart() below, + // but we catch it early to provide more helpful message. + panic("attempt to create provider address with empty namespace") + } + + return Provider{ + Type: MustParseProviderPart(typeName), + Namespace: MustParseProviderPart(namespace), + Hostname: hostname, + } +} + +// LegacyString returns the provider type, which is frequently used +// interchangeably with provider name. This function can and should be removed +// when provider type is fully integrated. As a safeguard for future +// refactoring, this function panics if the Provider is not a legacy provider. +func (pt Provider) LegacyString() string { + if pt.IsZero() { + panic("called LegacyString on zero-value addrs.Provider") + } + if pt.Namespace != LegacyProviderNamespace && pt.Namespace != BuiltInProviderNamespace { + panic(pt.String() + " cannot be represented as a legacy string") + } + return pt.Type +} + +// IsZero returns true if the receiver is the zero value of addrs.Provider. +// +// The zero value is not a valid addrs.Provider and calling other methods on +// such a value is likely to either panic or otherwise misbehave. +func (pt Provider) IsZero() bool { + return pt == Provider{} +} + +// HasKnownNamespace returns true if the provider namespace is known +// (also if it is legacy namespace) +func (pt Provider) HasKnownNamespace() bool { + return pt.Namespace != UnknownProviderNamespace +} + +// IsBuiltIn returns true if the receiver is the address of a "built-in" +// provider. That is, a provider under terraform.io/builtin/ which is +// included as part of the Terraform binary itself rather than one to be +// installed from elsewhere. +// +// These are ignored by the provider installer because they are assumed to +// already be available without any further installation. +func (pt Provider) IsBuiltIn() bool { + return pt.Hostname == BuiltInProviderHost && pt.Namespace == BuiltInProviderNamespace +} + +// LessThan returns true if the receiver should sort before the other given +// address in an ordered list of provider addresses. +// +// This ordering is an arbitrary one just to allow deterministic results from +// functions that would otherwise have no natural ordering. It's subject +// to change in future. +func (pt Provider) LessThan(other Provider) bool { + switch { + case pt.Hostname != other.Hostname: + return pt.Hostname < other.Hostname + case pt.Namespace != other.Namespace: + return pt.Namespace < other.Namespace + default: + return pt.Type < other.Type + } +} + +// IsLegacy returns true if the provider is a legacy-style provider +func (pt Provider) IsLegacy() bool { + if pt.IsZero() { + panic("called IsLegacy() on zero-value addrs.Provider") + } + + return pt.Hostname == DefaultProviderRegistryHost && pt.Namespace == LegacyProviderNamespace + +} + +// Equals returns true if the receiver and other provider have the same attributes. +func (pt Provider) Equals(other Provider) bool { + return pt == other +} + +// ParseProviderSource parses the source attribute and returns a provider. +// This is intended primarily to parse the FQN-like strings returned by +// terraform-config-inspect. +// +// The following are valid source string formats: +// name +// namespace/name +// hostname/namespace/name +// +// "name"-only format is parsed as -/name (i.e. legacy namespace) +// requiring further identification of the namespace via Registry API +func ParseProviderSource(str string) (Provider, error) { + var ret Provider + parts, err := parseSourceStringParts(str) + if err != nil { + return ret, err + } + + name := parts[len(parts)-1] + ret.Type = name + ret.Hostname = DefaultProviderRegistryHost + + if len(parts) == 1 { + return Provider{ + Hostname: DefaultProviderRegistryHost, + Namespace: UnknownProviderNamespace, + Type: name, + }, nil + } + + if len(parts) >= 2 { + // the namespace is always the second-to-last part + givenNamespace := parts[len(parts)-2] + if givenNamespace == LegacyProviderNamespace { + // For now we're tolerating legacy provider addresses until we've + // finished updating the rest of the codebase to no longer use them, + // or else we'd get errors round-tripping through legacy subsystems. + ret.Namespace = LegacyProviderNamespace + } else { + namespace, err := ParseProviderPart(givenNamespace) + if err != nil { + return Provider{}, &ParserError{ + Summary: "Invalid provider namespace", + Detail: fmt.Sprintf(`Invalid provider namespace %q in source %q: %s"`, namespace, str, err), + } + } + ret.Namespace = namespace + } + } + + // Final Case: 3 parts + if len(parts) == 3 { + // the namespace is always the first part in a three-part source string + hn, err := svchost.ForComparison(parts[0]) + if err != nil { + return Provider{}, &ParserError{ + Summary: "Invalid provider source hostname", + Detail: fmt.Sprintf(`Invalid provider source hostname namespace %q in source %q: %s"`, hn, str, err), + } + } + ret.Hostname = hn + } + + if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultProviderRegistryHost { + // Legacy provider addresses must always be on the default registry + // host, because the default registry host decides what actual FQN + // each one maps to. + return Provider{}, &ParserError{ + Summary: "Invalid provider namespace", + Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultProviderRegistryHost.ForDisplay() + ".", + } + } + + // Due to how plugin executables are named and provider git repositories + // are conventionally named, it's a reasonable and + // apparently-somewhat-common user error to incorrectly use the + // "terraform-provider-" prefix in a provider source address. There is + // no good reason for a provider to have the prefix "terraform-" anyway, + // so we've made that invalid from the start both so we can give feedback + // to provider developers about the terraform- prefix being redundant + // and give specialized feedback to folks who incorrectly use the full + // terraform-provider- prefix to help them self-correct. + const redundantPrefix = "terraform-" + const userErrorPrefix = "terraform-provider-" + if strings.HasPrefix(ret.Type, redundantPrefix) { + if strings.HasPrefix(ret.Type, userErrorPrefix) { + // Likely user error. We only return this specialized error if + // whatever is after the prefix would otherwise be a + // syntactically-valid provider type, so we don't end up advising + // the user to try something that would be invalid for another + // reason anyway. + // (This is mainly just for robustness, because the validation + // we already did above should've rejected most/all ways for + // the suggestedType to end up invalid here.) + suggestedType := ret.Type[len(userErrorPrefix):] + if _, err := ParseProviderPart(suggestedType); err == nil { + suggestedAddr := ret + suggestedAddr.Type = suggestedType + return Provider{}, &ParserError{ + Summary: "Invalid provider type", + Detail: fmt.Sprintf("Provider source %q has a type with the prefix %q, which isn't valid. Although that prefix is often used in the names of version control repositories for Terraform providers, provider source strings should not include it.\n\nDid you mean %q?", ret.ForDisplay(), userErrorPrefix, suggestedAddr.ForDisplay()), + } + } + } + // Otherwise, probably instead an incorrectly-named provider, perhaps + // arising from a similar instinct to what causes there to be + // thousands of Python packages on PyPI with "python-"-prefixed + // names. + return Provider{}, &ParserError{ + Summary: "Invalid provider type", + Detail: fmt.Sprintf("Provider source %q has a type with the prefix %q, which isn't allowed because it would be redundant to name a Terraform provider with that prefix. If you are the author of this provider, rename it to not include the prefix.", ret, redundantPrefix), + } + } + + return ret, nil +} + +// MustParseProviderSource is a wrapper around ParseProviderSource that panics if +// it returns an error. +func MustParseProviderSource(raw string) (Provider) { + p, err := ParseProviderSource(raw) + if err != nil { + panic(err) + } + return p +} + +// ValidateProviderAddress returns error if the given address is not FQN, +// that is if it is missing any of the three components from +// hostname/namespace/name. +func ValidateProviderAddress(raw string) error { + parts, err := parseSourceStringParts(raw) + if err != nil { + return err + } + + if len(parts) != 3 { + return &ParserError{ + Summary: "Invalid provider address format", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + p, err := ParseProviderSource(raw) + if err != nil { + return err + } + + if !p.HasKnownNamespace() { + return &ParserError{ + Summary: "Unknown provider namespace", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + if !p.IsLegacy() { + return &ParserError{ + Summary: "Invalid legacy provider namespace", + Detail: `Expected FQN in the format "hostname/namespace/name"`, + } + } + + return nil +} + +func parseSourceStringParts(str string) ([]string, error) { + // split the source string into individual components + parts := strings.Split(str, "/") + if len(parts) == 0 || len(parts) > 3 { + return nil, &ParserError{ + Summary: "Invalid provider source string", + Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, + } + } + + // check for an invalid empty string in any part + for i := range parts { + if parts[i] == "" { + return nil, &ParserError{ + Summary: "Invalid provider source string", + Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, + } + } + } + + // check the 'name' portion, which is always the last part + givenName := parts[len(parts)-1] + name, err := ParseProviderPart(givenName) + if err != nil { + return nil, &ParserError{ + Summary: "Invalid provider type", + Detail: fmt.Sprintf(`Invalid provider type %q in source %q: %s"`, givenName, str, err), + } + } + parts[len(parts)-1] = name + + return parts, nil +} + +// ParseProviderPart processes an addrs.Provider namespace or type string +// provided by an end-user, producing a normalized version if possible or +// an error if the string contains invalid characters. +// +// A provider part is processed in the same way as an individual label in a DNS +// domain name: it is transformed to lowercase per the usual DNS case mapping +// and normalization rules and may contain only letters, digits, and dashes. +// Additionally, dashes may not appear at the start or end of the string. +// +// These restrictions are intended to allow these names to appear in fussy +// contexts such as directory/file names on case-insensitive filesystems, +// repository names on GitHub, etc. We're using the DNS rules in particular, +// rather than some similar rules defined locally, because the hostname part +// of an addrs.Provider is already a hostname and it's ideal to use exactly +// the same case folding and normalization rules for all of the parts. +// +// In practice a provider type string conventionally does not contain dashes +// either. Such names are permitted, but providers with such type names will be +// hard to use because their resource type names will not be able to contain +// the provider type name and thus each resource will need an explicit provider +// address specified. (A real-world example of such a provider is the +// "google-beta" variant of the GCP provider, which has resource types that +// start with the "google_" prefix instead.) +// +// It's valid to pass the result of this function as the argument to a +// subsequent call, in which case the result will be identical. +func ParseProviderPart(given string) (string, error) { + if len(given) == 0 { + return "", fmt.Errorf("must have at least one character") + } + + // We're going to process the given name using the same "IDNA" library we + // use for the hostname portion, since it already implements the case + // folding rules we want. + // + // The idna library doesn't expose individual label parsing directly, but + // once we've verified it doesn't contain any dots we can just treat it + // like a top-level domain for this library's purposes. + if strings.ContainsRune(given, '.') { + return "", fmt.Errorf("dots are not allowed") + } + + // We don't allow names containing multiple consecutive dashes, just as + // a matter of preference: they look weird, confusing, or incorrect. + // This also, as a side-effect, prevents the use of the "punycode" + // indicator prefix "xn--" that would cause the IDNA library to interpret + // the given name as punycode, because that would be weird and unexpected. + if strings.Contains(given, "--") { + return "", fmt.Errorf("cannot use multiple consecutive dashes") + } + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + return "", fmt.Errorf("must contain only letters, digits, and dashes, and may not use leading or trailing dashes") + } + + return result, nil +} + +// MustParseProviderPart is a wrapper around ParseProviderPart that panics if +// it returns an error. +func MustParseProviderPart(given string) string { + result, err := ParseProviderPart(given) + if err != nil { + panic(err.Error()) + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/CHANGELOG.md b/vendor/github.com/hashicorp/terraform-svchost/CHANGELOG.md new file mode 100644 index 0000000000..ed9f9932a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/CHANGELOG.md @@ -0,0 +1,13 @@ +## v0.1.1 + +The `disco.Disco` and `auth.CachingCredentialsSource` implementations are now safe for concurrent calls. Previously concurrent calls could potentially corrupt the internal cache maps or cause the Go runtime to panic. + +## v0.1.0 + +#### Features: + +- Adds hostname `Alias` method to service discovery, making it possible to interpret one hostname as another. + +## v0.0.1 + +Initial release diff --git a/vendor/github.com/hashicorp/terraform-svchost/CONTRIBUTING.md b/vendor/github.com/hashicorp/terraform-svchost/CONTRIBUTING.md new file mode 100644 index 0000000000..c12e3bb41a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contributing to the svchost library + +If you find an issue or would like to add a feature, please add an issue in GitHub. We welcome your contributions - fork the repo and submit a pull request. diff --git a/vendor/github.com/hashicorp/terraform-svchost/LICENSE b/vendor/github.com/hashicorp/terraform-svchost/LICENSE index 82b4de97c7..342bbb5bb9 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/LICENSE +++ b/vendor/github.com/hashicorp/terraform-svchost/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2019 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/terraform-svchost/README.md b/vendor/github.com/hashicorp/terraform-svchost/README.md new file mode 100644 index 0000000000..3a12f013be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/README.md @@ -0,0 +1,9 @@ +# terraform-svchost + +[![CI Tests](https://github.com/hashicorp/terraform-svchost/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/hashicorp/terraform-svchost/actions/workflows/ci.yml) +[![GitHub license](https://img.shields.io/github/license/hashicorp/terraform-svchost.svg)](https://github.com/hashicorp/terraform-svchost/blob/main/LICENSE) +[![GoDoc](https://godoc.org/github.com/hashicorp/terraform-svchost?status.svg)](https://godoc.org/github.com/hashicorp/terraform-svchost) +[![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/terraform-svchost)](https://goreportcard.com/report/github.com/hashicorp/terraform-svchost) +[![GitHub issues](https://img.shields.io/github/issues/hashicorp/terraform-svchost.svg)](https://github.com/hashicorp/terraform-svchost/issues) + +This package provides friendly hostnames, and is used by [terraform](https://github.com/hashicorp/terraform). diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go deleted file mode 100644 index 0dae567db9..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go +++ /dev/null @@ -1,61 +0,0 @@ -package auth - -import ( - "github.com/hashicorp/terraform-svchost" -) - -// CachingCredentialsSource creates a new credentials source that wraps another -// and caches its results in memory, on a per-hostname basis. -// -// No means is provided for expiration of cached credentials, so a caching -// credentials source should have a limited lifetime (one Terraform operation, -// for example) to ensure that time-limited credentials don't expire before -// their cache entries do. -func CachingCredentialsSource(source CredentialsSource) CredentialsSource { - return &cachingCredentialsSource{ - source: source, - cache: map[svchost.Hostname]HostCredentials{}, - } -} - -type cachingCredentialsSource struct { - source CredentialsSource - cache map[svchost.Hostname]HostCredentials -} - -// ForHost passes the given hostname on to the wrapped credentials source and -// caches the result to return for future requests with the same hostname. -// -// Both credentials and non-credentials (nil) responses are cached. -// -// No cache entry is created if the wrapped source returns an error, to allow -// the caller to retry the failing operation. -func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if cache, cached := s.cache[host]; cached { - return cache, nil - } - - result, err := s.source.ForHost(host) - if err != nil { - return result, err - } - - s.cache[host] = result - return result, nil -} - -func (s *cachingCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see which object (old or new) is actually present. - delete(s.cache, host) - return s.source.StoreForHost(host, credentials) -} - -func (s *cachingCredentialsSource) ForgetForHost(host svchost.Hostname) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see if the object is still present. - delete(s.cache, host) - return s.source.ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go deleted file mode 100644 index 36441cd115..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package auth contains types and functions to manage authentication -// credentials for service hosts. -package auth - -import ( - "fmt" - "net/http" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-svchost" -) - -// Credentials is a list of CredentialsSource objects that can be tried in -// turn until one returns credentials for a host, or one returns an error. -// -// A Credentials is itself a CredentialsSource, wrapping its members. -// In principle one CredentialsSource can be nested inside another, though -// there is no good reason to do so. -// -// The write operations on a Credentials are tried only on the first object, -// under the assumption that it is the primary store. -type Credentials []CredentialsSource - -// NoCredentials is an empty CredentialsSource that always returns nil -// when asked for credentials. -var NoCredentials CredentialsSource = Credentials{} - -// A CredentialsSource is an object that may be able to provide credentials -// for a given host. -// -// Credentials lookups are not guaranteed to be concurrency-safe. Callers -// using these facilities in concurrent code must use external concurrency -// primitives to prevent race conditions. -type CredentialsSource interface { - // ForHost returns a non-nil HostCredentials if the source has credentials - // available for the host, and a nil HostCredentials if it does not. - // - // If an error is returned, progress through a list of CredentialsSources - // is halted and the error is returned to the user. - ForHost(host svchost.Hostname) (HostCredentials, error) - - // StoreForHost takes a HostCredentialsWritable and saves it as the - // credentials for the given host. - // - // If credentials are already stored for the given host, it will try to - // replace those credentials but may produce an error if such replacement - // is not possible. - StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error - - // ForgetForHost discards any stored credentials for the given host. It - // does nothing and returns successfully if no credentials are saved - // for that host. - ForgetForHost(host svchost.Hostname) error -} - -// HostCredentials represents a single set of credentials for a particular -// host. -type HostCredentials interface { - // PrepareRequest modifies the given request in-place to apply the - // receiving credentials. The usual behavior of this method is to - // add some sort of Authorization header to the request. - PrepareRequest(req *http.Request) - - // Token returns the authentication token. - Token() string -} - -// HostCredentialsWritable is an extension of HostCredentials for credentials -// objects that can be serialized as a JSON-compatible object value for -// storage. -type HostCredentialsWritable interface { - HostCredentials - - // ToStore returns a cty.Value, always of an object type, - // representing data that can be serialized to represent this object - // in persistent storage. - // - // The resulting value may uses only cty values that can be accepted - // by the cty JSON encoder, though the caller may elect to instead store - // it in some other format that has a JSON-compatible type system. - ToStore() cty.Value -} - -// ForHost iterates over the contained CredentialsSource objects and -// tries to obtain credentials for the given host from each one in turn. -// -// If any source returns either a non-nil HostCredentials or a non-nil error -// then this result is returned. Otherwise, the result is nil, nil. -func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { - for _, source := range c { - creds, err := source.ForHost(host) - if creds != nil || err != nil { - return creds, err - } - } - return nil, nil -} - -// StoreForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].StoreForHost(host, credentials) -} - -// ForgetForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) ForgetForHost(host svchost.Hostname) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go deleted file mode 100644 index 7198c6744b..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go +++ /dev/null @@ -1,48 +0,0 @@ -package auth - -import ( - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsFromMap converts a map of key-value pairs from a credentials -// definition provided by the user (e.g. in a config file, or via a credentials -// helper) into a HostCredentials object if possible, or returns nil if -// no credentials could be extracted from the map. -// -// This function ignores map keys it is unfamiliar with, to allow for future -// expansion of the credentials map format for new credential types. -func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { - if m == nil { - return nil - } - if token, ok := m["token"].(string); ok { - return HostCredentialsToken(token) - } - return nil -} - -// HostCredentialsFromObject converts a cty.Value of an object type into a -// HostCredentials object if possible, or returns nil if no credentials could -// be extracted from the map. -// -// This function ignores object attributes it is unfamiliar with, to allow for -// future expansion of the credentials object structure for new credential types. -// -// If the given value is not of an object type, this function will panic. -func HostCredentialsFromObject(obj cty.Value) HostCredentials { - if !obj.Type().HasAttribute("token") { - return nil - } - - tokenV := obj.GetAttr("token") - if tokenV.IsNull() || !tokenV.IsKnown() { - return nil - } - if !cty.String.Equals(tokenV.Type()) { - // Weird, but maybe some future Terraform version accepts an object - // here for some reason, so we'll be resilient. - return nil - } - - return HostCredentialsToken(tokenV.AsString()) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go deleted file mode 100644 index 76505f2099..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go +++ /dev/null @@ -1,149 +0,0 @@ -package auth - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "path/filepath" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-svchost" -) - -type helperProgramCredentialsSource struct { - executable string - args []string -} - -// HelperProgramCredentialsSource returns a CredentialsSource that runs the -// given program with the given arguments in order to obtain credentials. -// -// The given executable path must be an absolute path; it is the caller's -// responsibility to validate and process a relative path or other input -// provided by an end-user. If the given path is not absolute, this -// function will panic. -// -// When credentials are requested, the program will be run in a child process -// with the given arguments along with two additional arguments added to the -// end of the list: the literal string "get", followed by the requested -// hostname in ASCII compatibility form (punycode form). -func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { - if !filepath.IsAbs(executable) { - panic("NewCredentialsSourceHelperProgram requires absolute path to executable") - } - - fullArgs := make([]string, len(args)+1) - fullArgs[0] = executable - copy(fullArgs[1:], args) - - return &helperProgramCredentialsSource{ - executable: executable, - args: fullArgs, - } -} - -func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "get") - args = append(args, string(host)) - - outBuf := bytes.Buffer{} - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stdout: &outBuf, - Stderr: &errBuf, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return nil, fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - var m map[string]interface{} - err = json.Unmarshal(outBuf.Bytes(), &m) - if err != nil { - return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) - } - - return HostCredentialsFromMap(m), nil -} - -func (s *helperProgramCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "store") - args = append(args, string(host)) - - toStore := credentials.ToStore() - toStoreRaw, err := ctyjson.Marshal(toStore, toStore.Type()) - if err != nil { - return fmt.Errorf("can't serialize credentials to store: %s", err) - } - - inReader := bytes.NewReader(toStoreRaw) - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: inReader, - Stderr: &errBuf, - Stdout: nil, - } - err = cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} - -func (s *helperProgramCredentialsSource) ForgetForHost(host svchost.Hostname) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "forget") - args = append(args, string(host)) - - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stderr: &errBuf, - Stdout: nil, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go deleted file mode 100644 index f8b0b076e8..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go +++ /dev/null @@ -1,38 +0,0 @@ -package auth - -import ( - "fmt" - - "github.com/hashicorp/terraform-svchost" -) - -// StaticCredentialsSource is a credentials source that retrieves credentials -// from the provided map. It returns nil if a requested hostname is not -// present in the map. -// -// The caller should not modify the given map after passing it to this function. -func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { - return staticCredentialsSource(creds) -} - -type staticCredentialsSource map[svchost.Hostname]map[string]interface{} - -func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if s == nil { - return nil, nil - } - - if m, exists := s[host]; exists { - return HostCredentialsFromMap(m), nil - } - - return nil, nil -} - -func (s staticCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - return fmt.Errorf("can't store new credentials in a static credentials source") -} - -func (s staticCredentialsSource) ForgetForHost(host svchost.Hostname) error { - return fmt.Errorf("can't discard credentials from a static credentials source") -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go deleted file mode 100644 index 1d36553aee..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go +++ /dev/null @@ -1,43 +0,0 @@ -package auth - -import ( - "net/http" - - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsToken is a HostCredentials implementation that represents a -// single "bearer token", to be sent to the server via an Authorization header -// with the auth type set to "Bearer". -// -// To save a token as the credentials for a host, convert the token string to -// this type and use the result as a HostCredentialsWritable implementation. -type HostCredentialsToken string - -// Interface implementation assertions. Compilation will fail here if -// HostCredentialsToken does not fully implement these interfaces. -var _ HostCredentials = HostCredentialsToken("") -var _ HostCredentialsWritable = HostCredentialsToken("") - -// PrepareRequest alters the given HTTP request by setting its Authorization -// header to the string "Bearer " followed by the encapsulated authentication -// token. -func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { - if req.Header == nil { - req.Header = http.Header{} - } - req.Header.Set("Authorization", "Bearer "+string(tc)) -} - -// Token returns the authentication token. -func (tc HostCredentialsToken) Token() string { - return string(tc) -} - -// ToStore returns a credentials object with a single attribute "token" whose -// value is the token string. -func (tc HostCredentialsToken) ToStore() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "token": cty.StringVal(string(tc)), - }) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go deleted file mode 100644 index 978313633f..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go +++ /dev/null @@ -1,275 +0,0 @@ -// Package disco handles Terraform's remote service discovery protocol. -// -// This protocol allows mapping from a service hostname, as produced by the -// svchost package, to a set of services supported by that host and the -// endpoint information for each supported service. -package disco - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net/http" - "net/url" - "time" - - "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/auth" -) - -const ( - // Fixed path to the discovery manifest. - discoPath = "/.well-known/terraform.json" - - // Arbitrary-but-small number to prevent runaway redirect loops. - maxRedirects = 3 - - // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. - discoTimeout = 11 * time.Second - - // 1MB - to prevent abusive services from using loads of our memory. - maxDiscoDocBytes = 1 * 1024 * 1024 -) - -// httpTransport is overridden during tests, to skip TLS verification. -var httpTransport = defaultHttpTransport() - -// Disco is the main type in this package, which allows discovery on given -// hostnames and caches the results by hostname to avoid repeated requests -// for the same information. -type Disco struct { - hostCache map[svchost.Hostname]*Host - credsSrc auth.CredentialsSource - - // Transport is a custom http.RoundTripper to use. - Transport http.RoundTripper -} - -// New returns a new initialized discovery object. -func New() *Disco { - return NewWithCredentialsSource(nil) -} - -// NewWithCredentialsSource returns a new discovery object initialized with -// the given credentials source. -func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { - return &Disco{ - hostCache: make(map[svchost.Hostname]*Host), - credsSrc: credsSrc, - Transport: httpTransport, - } -} - -func (d *Disco) SetUserAgent(uaString string) { - d.Transport = &userAgentRoundTripper{ - innerRt: d.Transport, - userAgent: uaString, - } -} - -// SetCredentialsSource provides a credentials source that will be used to -// add credentials to outgoing discovery requests, where available. -// -// If this method is never called, no outgoing discovery requests will have -// credentials. -func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { - d.credsSrc = src -} - -// CredentialsSource returns the credentials source associated with the receiver, -// or an empty credentials source if none is associated. -func (d *Disco) CredentialsSource() auth.CredentialsSource { - if d.credsSrc == nil { - // We'll return an empty one just to save the caller from having to - // protect against the nil case, since this interface already allows - // for the possibility of there being no credentials at all. - return auth.StaticCredentialsSource(nil) - } - return d.credsSrc -} - -// CredentialsForHost returns a non-nil HostCredentials if the embedded source has -// credentials available for the host, and a nil HostCredentials if it does not. -func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { - if d.credsSrc == nil { - return nil, nil - } - return d.credsSrc.ForHost(hostname) -} - -// ForceHostServices provides a pre-defined set of services for a given -// host, which prevents the receiver from attempting network-based discovery -// for the given host. Instead, the given services map will be returned -// verbatim. -// -// When providing "forced" services, any relative URLs are resolved against -// the initial discovery URL that would have been used for network-based -// discovery, yielding the same results as if the given map were published -// at the host's default discovery URL, though using absolute URLs is strongly -// recommended to make the configured behavior more explicit. -func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { - if services == nil { - services = map[string]interface{}{} - } - - d.hostCache[hostname] = &Host{ - discoURL: &url.URL{ - Scheme: "https", - Host: string(hostname), - Path: discoPath, - }, - hostname: hostname.ForDisplay(), - services: services, - transport: d.Transport, - } -} - -// Discover runs the discovery protocol against the given hostname (which must -// already have been validated and prepared with svchost.ForComparison) and -// returns an object describing the services available at that host. -// -// If a given hostname supports no Terraform services at all, a non-nil but -// empty Host object is returned. When giving feedback to the end user about -// such situations, we say "host does not provide a service", -// regardless of whether that is due to that service specifically being absent -// or due to the host not providing Terraform services at all, since we don't -// wish to expose the detail of whole-host discovery to an end-user. -func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { - if host, cached := d.hostCache[hostname]; cached { - return host, nil - } - - host, err := d.discover(hostname) - if err != nil { - return nil, err - } - d.hostCache[hostname] = host - - return host, nil -} - -// DiscoverServiceURL is a convenience wrapper for discovery on a given -// hostname and then looking up a particular service in the result. -func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { - host, err := d.Discover(hostname) - if err != nil { - return nil, err - } - return host.ServiceURL(serviceID) -} - -// discover implements the actual discovery process, with its result cached -// by the public-facing Discover method. -func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { - discoURL := &url.URL{ - Scheme: "https", - Host: hostname.String(), - Path: discoPath, - } - - client := &http.Client{ - Transport: d.Transport, - Timeout: discoTimeout, - - CheckRedirect: func(req *http.Request, via []*http.Request) error { - log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) - if len(via) > maxRedirects { - return errors.New("too many redirects") // this error will never actually be seen - } - return nil - }, - } - - req := &http.Request{ - Header: make(http.Header), - Method: "GET", - URL: discoURL, - } - req.Header.Set("Accept", "application/json") - - creds, err := d.CredentialsForHost(hostname) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) - } - if creds != nil { - // Update the request to include credentials. - creds.PrepareRequest(req) - } - - log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request discovery document: %v", err) - } - defer resp.Body.Close() - - host := &Host{ - // Use the discovery URL from resp.Request in - // case the client followed any redirects. - discoURL: resp.Request.URL, - hostname: hostname.ForDisplay(), - transport: d.Transport, - } - - // Return the host without any services. - if resp.StatusCode == 404 { - return host, nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) - } - - contentType := resp.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) - } - if mediaType != "application/json" { - return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) - } - - // This doesn't catch chunked encoding, because ContentLength is -1 in that case. - if resp.ContentLength > maxDiscoDocBytes { - // Size limit here is not a contractual requirement and so we may - // adjust it over time if we find a different limit is warranted. - return nil, fmt.Errorf( - "Discovery doc response is too large (got %d bytes; limit %d)", - resp.ContentLength, maxDiscoDocBytes, - ) - } - - // If the response is using chunked encoding then we can't predict its - // size, but we'll at least prevent reading the entire thing into memory. - lr := io.LimitReader(resp.Body, maxDiscoDocBytes) - - servicesBytes, err := ioutil.ReadAll(lr) - if err != nil { - return nil, fmt.Errorf("Error reading discovery document body: %v", err) - } - - var services map[string]interface{} - err = json.Unmarshal(servicesBytes, &services) - if err != nil { - return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) - } - host.services = services - - return host, nil -} - -// Forget invalidates any cached record of the given hostname. If the host -// has no cache entry then this is a no-op. -func (d *Disco) Forget(hostname svchost.Hostname) { - delete(d.hostCache, hostname) -} - -// ForgetAll is like Forget, but for all of the hostnames that have cache entries. -func (d *Disco) ForgetAll() { - d.hostCache = make(map[svchost.Hostname]*Host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go b/vendor/github.com/hashicorp/terraform-svchost/disco/host.go deleted file mode 100644 index d0ec8ee672..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go +++ /dev/null @@ -1,423 +0,0 @@ -package disco - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-version" -) - -const versionServiceID = "versions.v1" - -// Host represents a service discovered host. -type Host struct { - discoURL *url.URL - hostname string - services map[string]interface{} - transport http.RoundTripper -} - -// Constraints represents the version constraints of a service. -type Constraints struct { - Service string `json:"service"` - Product string `json:"product"` - Minimum string `json:"minimum"` - Maximum string `json:"maximum"` - Excluding []string `json:"excluding"` -} - -// ErrServiceNotProvided is returned when the service is not provided. -type ErrServiceNotProvided struct { - hostname string - service string -} - -// Error returns a customized error message. -func (e *ErrServiceNotProvided) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not provide a %s service", e.service) - } - return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) -} - -// ErrVersionNotSupported is returned when the version is not supported. -type ErrVersionNotSupported struct { - hostname string - service string - version string -} - -// Error returns a customized error message. -func (e *ErrVersionNotSupported) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not support %s version %s", e.service, e.version) - } - return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) -} - -// ErrNoVersionConstraints is returned when checkpoint was disabled -// or the endpoint to query for version constraints was unavailable. -type ErrNoVersionConstraints struct { - disabled bool -} - -// Error returns a customized error message. -func (e *ErrNoVersionConstraints) Error() string { - if e.disabled { - return "checkpoint disabled" - } - return "unable to contact versions service" -} - -// ServiceURL returns the URL associated with the given service identifier, -// which should be of the form "servicename.vN". -// -// A non-nil result is always an absolute URL with a scheme of either HTTPS -// or HTTP. -func (h *Host) ServiceURL(id string) (*url.URL, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - urlStr, ok := h.services[id].(string) - if !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse service URL: %v", err) - } - - return u, nil -} - -// ServiceOAuthClient returns the OAuth client configuration associated with the -// given service identifier, which should be of the form "servicename.vN". -// -// This is an alternative to ServiceURL for unusual services that require -// a full OAuth2 client definition rather than just a URL. Use this only -// for services whose specification calls for this sort of definition. -func (h *Host) ServiceOAuthClient(id string) (*OAuthClient, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - if _, ok := h.services[id]; !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - var raw map[string]interface{} - switch v := h.services[id].(type) { - case map[string]interface{}: - raw = v // Great! - case []map[string]interface{}: - // An absolutely infuriating legacy HCL ambiguity. - raw = v[0] - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] The definition for %s has Go type %T", id, h.services[id]) - return nil, fmt.Errorf("Service %s must be declared with an object value in the service discovery document", id) - } - - var grantTypes OAuthGrantTypeSet - if rawGTs, ok := raw["grant_types"]; ok { - if gts, ok := rawGTs.([]interface{}); ok { - var kws []string - for _, gtI := range gts { - gt, ok := gtI.(string) - if !ok { - // We'll ignore this so that we can potentially introduce - // other types into this array later if we need to. - continue - } - kws = append(kws, gt) - } - grantTypes = NewOAuthGrantTypeSet(kws...) - } else { - return nil, fmt.Errorf("Service %s is defined with invalid grant_types property: must be an array of grant type strings", id) - } - } else { - grantTypes = NewOAuthGrantTypeSet("authz_code") - } - - ret := &OAuthClient{ - SupportedGrantTypes: grantTypes, - } - if clientIDStr, ok := raw["client"].(string); ok { - ret.ID = clientIDStr - } else { - return nil, fmt.Errorf("Service %s definition is missing required property \"client\"", id) - } - if urlStr, ok := raw["authz"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse authorization URL: %v", err) - } - ret.AuthorizationURL = u - } else { - if grantTypes.RequiresAuthorizationEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"authz\"", id) - } - } - if urlStr, ok := raw["token"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse token URL: %v", err) - } - ret.TokenURL = u - } else { - if grantTypes.RequiresTokenEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"token\"", id) - } - } - if portsRaw, ok := raw["ports"].([]interface{}); ok { - if len(portsRaw) != 2 { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: must be a two-element array", id) - } - invalidPortsErr := fmt.Errorf("Invalid \"ports\" definition for service %s: both ports must be whole numbers between 1024 and 65535", id) - ports := make([]uint16, 2) - for i := range ports { - switch v := portsRaw[i].(type) { - case float64: - // JSON unmarshaling always produces float64. HCL 2 might, if - // an invalid fractional number were given. - if float64(uint16(v)) != v || v < 1024 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - case int: - // Legacy HCL produces int. HCL 2 will too, if the given number - // is a whole number. - if v < 1024 || v > 65535 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] Port value %d has Go type %T", i, portsRaw[i]) - return nil, invalidPortsErr - } - } - if ports[1] < ports[0] { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: minimum port cannot be greater than maximum port", id) - } - ret.MinPort = ports[0] - ret.MaxPort = ports[1] - } else { - // Default is to accept any port in the range, for a client that is - // able to call back to any localhost port. - ret.MinPort = 1024 - ret.MaxPort = 65535 - } - if scopesRaw, ok := raw["scopes"].([]interface{}); ok { - var scopes []string - for _, scopeI := range scopesRaw { - scope, ok := scopeI.(string) - if !ok { - return nil, fmt.Errorf("Invalid \"scopes\" for service %s: all scopes must be strings", id) - } - scopes = append(scopes, scope) - } - ret.Scopes = scopes - } - - return ret, nil -} - -func (h *Host) parseURL(urlStr string) (*url.URL, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - // Make relative URLs absolute using our discovery URL. - if !u.IsAbs() { - u = h.discoURL.ResolveReference(u) - } - - if u.Scheme != "https" && u.Scheme != "http" { - return nil, fmt.Errorf("unsupported scheme %s", u.Scheme) - } - if u.User != nil { - return nil, fmt.Errorf("embedded username/password information is not permitted") - } - - // Fragment part is irrelevant, since we're not a browser. - u.Fragment = "" - - return u, nil -} - -// VersionConstraints returns the contraints for a given service identifier -// (which should be of the form "servicename.vN") and product. -// -// When an exact (service and version) match is found, the constraints for -// that service are returned. -// -// When the requested version is not provided but the service is, we will -// search for all alternative versions. If mutliple alternative versions -// are found, the contrains of the latest available version are returned. -// -// When a service is not provided at all an error will be returned instead. -// -// When checkpoint is disabled or when a 404 is returned after making the -// HTTP call, an ErrNoVersionConstraints error will be returned. -func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { - svc, _, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // Return early if checkpoint is disabled. - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { - return nil, &ErrNoVersionConstraints{disabled: true} - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - // Try to get the service URL for the version service and - // return early if the service isn't provided by the host. - u, err := h.ServiceURL(versionServiceID) - if err != nil { - return nil, err - } - - // Check if we have an exact (service and version) match. - if _, ok := h.services[id].(string); !ok { - // If we don't have an exact match, we search for all matching - // services and then use the service ID of the latest version. - var services []string - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - services = append(services, serviceID) - } - } - - if len(services) == 0 { - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - // Set id to the latest service ID we found. - var latest *version.Version - for _, serviceID := range services { - if _, ver, err := parseServiceID(serviceID); err == nil { - if latest == nil || latest.LessThan(ver) { - id = serviceID - latest = ver - } - } - } - } - - // Set a default timeout of 1 sec for the versions request (in milliseconds) - timeout := 1000 - if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { - timeout = v - } - - client := &http.Client{ - Transport: h.transport, - Timeout: time.Duration(timeout) * time.Millisecond, - } - - // Prepare the service URL by setting the service and product. - v := u.Query() - v.Set("product", product) - u.Path += id - u.RawQuery = v.Encode() - - // Create a new request. - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, fmt.Errorf("Failed to create version constraints request: %v", err) - } - req.Header.Set("Accept", "application/json") - - log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request version constraints: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 { - return nil, &ErrNoVersionConstraints{disabled: false} - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) - } - - // Parse the constraints from the response body. - result := &Constraints{} - if err := json.NewDecoder(resp.Body).Decode(result); err != nil { - return nil, fmt.Errorf("Error parsing version constraints: %v", err) - } - - return result, nil -} - -func parseServiceID(id string) (string, *version.Version, error) { - parts := strings.SplitN(id, ".", 2) - if len(parts) != 2 { - return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) - } - - version, err := version.NewVersion(parts[1]) - if err != nil { - return "", nil, fmt.Errorf("Invalid service version: %v", err) - } - - return parts[0], version, nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go b/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go deleted file mode 100644 index 7e4a38567e..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go +++ /dev/null @@ -1,30 +0,0 @@ -package disco - -import ( - "net/http" - - "github.com/hashicorp/go-cleanhttp" -) - -const DefaultUserAgent = "terraform-svchost/1.0" - -func defaultHttpTransport() http.RoundTripper { - t := cleanhttp.DefaultPooledTransport() - return &userAgentRoundTripper{ - innerRt: t, - userAgent: DefaultUserAgent, - } -} - -type userAgentRoundTripper struct { - innerRt http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - - return rt.innerRt.RoundTrip(req) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go b/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go deleted file mode 100644 index 9df16ebc3a..0000000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go +++ /dev/null @@ -1,183 +0,0 @@ -package disco - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/oauth2" -) - -// OAuthClient represents an OAuth client configuration, which is used for -// unusual services that require an entire OAuth client configuration as part -// of their service discovery, rather than just a URL. -type OAuthClient struct { - // ID is the identifier for the client, to be used as "client_id" in - // OAuth requests. - ID string - - // Authorization URL is the URL of the authorization endpoint that must - // be used for this OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the authorization endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - AuthorizationURL *url.URL - - // Token URL is the URL of the token endpoint that must be used for this - // OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the token endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - TokenURL *url.URL - - // MinPort and MaxPort define a range of TCP ports on localhost that this - // client is able to use as redirect_uri in an authorization request. - // Terraform will select a port from this range for the temporary HTTP - // server it creates to receive the authorization response, giving - // a URL like http://localhost:NNN/ where NNN is the selected port number. - // - // Terraform will reject any port numbers in this range less than 1024, - // to respect the common convention (enforced on some operating systems) - // that lower port numbers are reserved for "privileged" services. - MinPort, MaxPort uint16 - - // SupportedGrantTypes is a set of the grant types that the client may - // choose from. This includes an entry for each distinct type advertised - // by the server, even if a particular keyword is not supported by the - // current version of Terraform. - SupportedGrantTypes OAuthGrantTypeSet - - // Oauth2 does not require scopes for the authorization endpoint, however - // OIDC does. Optional list of scopes to include in auth code and token - // requests. - Scopes []string -} - -// Endpoint returns an oauth2.Endpoint value ready to be used with the oauth2 -// library, representing the URLs from the receiver. -func (c *OAuthClient) Endpoint() oauth2.Endpoint { - ep := oauth2.Endpoint{ - // We don't actually auth because we're not a server-based OAuth client, - // so this instead just means that we include client_id as an argument - // in our requests. - AuthStyle: oauth2.AuthStyleInParams, - } - - if c.AuthorizationURL != nil { - ep.AuthURL = c.AuthorizationURL.String() - } - if c.TokenURL != nil { - ep.TokenURL = c.TokenURL.String() - } - - return ep -} - -// OAuthGrantType is an enumeration of grant type strings that a host can -// advertise support for. -// -// Values of this type don't necessarily match with a known constant of the -// type, because they may represent grant type keywords defined in a later -// version of Terraform which this version doesn't yet know about. -type OAuthGrantType string - -const ( - // OAuthAuthzCodeGrant represents an authorization code grant, as - // defined in IETF RFC 6749 section 4.1. - OAuthAuthzCodeGrant = OAuthGrantType("authz_code") - - // OAuthOwnerPasswordGrant represents a resource owner password - // credentials grant, as defined in IETF RFC 6749 section 4.3. - OAuthOwnerPasswordGrant = OAuthGrantType("password") -) - -// UsesAuthorizationEndpoint returns true if the receiving grant type makes -// use of the authorization endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesAuthorizationEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return false - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// UsesTokenEndpoint returns true if the receiving grant type makes -// use of the token endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesTokenEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return true - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// OAuthGrantTypeSet represents a set of OAuthGrantType values. -type OAuthGrantTypeSet map[OAuthGrantType]struct{} - -// NewOAuthGrantTypeSet constructs a new grant type set from the given list -// of grant type keyword strings. Any duplicates in the list are ignored. -func NewOAuthGrantTypeSet(keywords ...string) OAuthGrantTypeSet { - ret := make(OAuthGrantTypeSet, len(keywords)) - for _, kw := range keywords { - ret[OAuthGrantType(kw)] = struct{}{} - } - return ret -} - -// Has returns true if the given grant type is in the receiving set. -func (s OAuthGrantTypeSet) Has(t OAuthGrantType) bool { - _, ok := s[t] - return ok -} - -// RequiresAuthorizationEndpoint returns true if any of the grant types in -// the set are known to require an authorization endpoint. -func (s OAuthGrantTypeSet) RequiresAuthorizationEndpoint() bool { - for t := range s { - if t.UsesAuthorizationEndpoint() { - return true - } - } - return false -} - -// RequiresTokenEndpoint returns true if any of the grant types in -// the set are known to require a token endpoint. -func (s OAuthGrantTypeSet) RequiresTokenEndpoint() bool { - for t := range s { - if t.UsesTokenEndpoint() { - return true - } - } - return false -} - -// GoString implements fmt.GoStringer. -func (s OAuthGrantTypeSet) GoString() string { - var buf strings.Builder - i := 0 - buf.WriteString("disco.NewOAuthGrantTypeSet(") - for t := range s { - if i > 0 { - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%q", string(t)) - i++ - } - buf.WriteString(")") - return buf.String() -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go b/vendor/github.com/hashicorp/terraform-svchost/label_iter.go index af8ccbab20..eb87568144 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go +++ b/vendor/github.com/hashicorp/terraform-svchost/label_iter.go @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. + package svchost import ( diff --git a/vendor/github.com/hashicorp/terraform-svchost/svchost.go b/vendor/github.com/hashicorp/terraform-svchost/svchost.go index 4060b767e5..45a702978e 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/svchost.go +++ b/vendor/github.com/hashicorp/terraform-svchost/svchost.go @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. + // Package svchost deals with the representations of the so-called "friendly // hostnames" that we use to represent systems that provide Terraform-native // remote services, such as module registry, remote operations, etc. @@ -101,6 +103,18 @@ func ForComparison(given string) (Hostname, error) { var err error portPortion, err = normalizePortPortion(portPortion) if err != nil { + // We can get in here if someone has incorrectly specified a URL + // instead of a hostname, because normalizePortPortion will try to + // treat the colon after the scheme as the port number separator. + // We'll return a more specific error message for that situation. + given = strings.ToLower(given) + if given == "https" || given == "http" { + // Technically it's valid to have a host called "https" or "http" + // which would generate a false positive here with input like + // "http:foo", but we can only get here if the hostname exactly + // matches one of the schemes _and_ the port number is also invalid. + return Hostname(""), fmt.Errorf("need just a hostname and optional port number, not a full URL") + } return Hostname(""), err } diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go index be6ebca9c7..f6a00199cd 100644 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -54,7 +54,7 @@ func (s *Stream) LocalAddr() net.Addr { return s.session.LocalAddr() } -// LocalAddr returns the remote address +// RemoteAddr returns the remote address func (s *Stream) RemoteAddr() net.Addr { return s.session.RemoteAddr() } diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go index 4f52938287..2fdbf844a8 100644 --- a/vendor/github.com/hashicorp/yamux/const.go +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -5,6 +5,25 @@ import ( "fmt" ) +// NetError implements net.Error +type NetError struct { + err error + timeout bool + temporary bool +} + +func (e *NetError) Error() string { + return e.err.Error() +} + +func (e *NetError) Timeout() bool { + return e.timeout +} + +func (e *NetError) Temporary() bool { + return e.temporary +} + var ( // ErrInvalidVersion means we received a frame with an // invalid version @@ -30,7 +49,13 @@ var ( ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") // ErrTimeout is used when we reach an IO deadline - ErrTimeout = fmt.Errorf("i/o deadline reached") + ErrTimeout = &NetError{ + err: fmt.Errorf("i/o deadline reached"), + + // Error should meet net.Error interface for timeouts for compatability + // with standard library expectations, such as http servers. + timeout: true, + } // ErrStreamClosed is returned when using a closed stream ErrStreamClosed = fmt.Errorf("stream closed") diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go index 18a078c8ad..0c3e67b022 100644 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -31,6 +31,20 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 + // StreamOpenTimeout is the maximum amount of time that a stream will + // be allowed to remain in pending state while waiting for an ack from the peer. + // Once the timeout is reached the session will be gracefully closed. + // A zero value disables the StreamOpenTimeout allowing unbounded + // blocking on OpenStream calls. + StreamOpenTimeout time.Duration + + // StreamCloseTimeout is the maximum time that a stream will allowed to + // be in a half-closed state when `Close` is called before forcibly + // closing the connection. Forcibly closed connections will empty the + // receive buffer, drop any future packets received for that stream, + // and send a RST to the remote side. + StreamCloseTimeout time.Duration + // LogOutput is used to control the log destination. Either Logger or // LogOutput can be set, not both. LogOutput io.Writer @@ -48,6 +62,8 @@ func DefaultConfig() *Config { KeepAliveInterval: 30 * time.Second, ConnectionWriteTimeout: 10 * time.Second, MaxStreamWindowSize: initialStreamWindow, + StreamCloseTimeout: 5 * time.Minute, + StreamOpenTimeout: 75 * time.Second, LogOutput: os.Stderr, } } diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index a80ddec35e..38fe3ed1f0 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -2,6 +2,7 @@ package yamux import ( "bufio" + "bytes" "fmt" "io" "io/ioutil" @@ -63,24 +64,27 @@ type Session struct { // sendCh is used to mark a stream as ready to send, // or to send a header out directly. - sendCh chan sendReady + sendCh chan *sendReady // recvDoneCh is closed when recv() exits to avoid a race // between stream registration and stream shutdown recvDoneCh chan struct{} + sendDoneCh chan struct{} // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex } // sendReady is used to either mark a stream as ready // or to directly send a header type sendReady struct { Hdr []byte - Body io.Reader + mu sync.Mutex // Protects Body from unsafe reads. + Body []byte Err chan error } @@ -101,8 +105,9 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { inflight: make(map[uint32]struct{}), synCh: make(chan struct{}, config.AcceptBacklog), acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan sendReady, 64), + sendCh: make(chan *sendReady, 64), recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), shutdownCh: make(chan struct{}), } if client { @@ -184,6 +189,10 @@ GET_ID: s.inflight[id] = struct{}{} s.streamLock.Unlock() + if s.config.StreamOpenTimeout > 0 { + go s.setOpenTimeout(stream) + } + // Send the window update to create if err := stream.sendWindowUpdate(); err != nil { select { @@ -196,6 +205,27 @@ GET_ID: return stream, nil } +// setOpenTimeout implements a timeout for streams that are opened but not established. +// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, +// and close the session. +// The number of running timers is bounded by the capacity of the synCh. +func (s *Session) setOpenTimeout(stream *Stream) { + timer := time.NewTimer(s.config.StreamOpenTimeout) + defer timer.Stop() + + select { + case <-stream.establishCh: + return + case <-s.shutdownCh: + return + case <-timer.C: + // Timeout reached while waiting for ACK. + // Close the session to force connection re-establishment. + s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) + s.Close() + } +} + // Accept is used to block until the next available stream // is ready to be accepted. func (s *Session) Accept() (net.Conn, error) { @@ -230,10 +260,15 @@ func (s *Session) Close() error { return nil } s.shutdown = true + + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = ErrSessionShutdown } + s.shutdownErrLock.Unlock() + close(s.shutdownCh) + s.conn.Close() <-s.recvDoneCh @@ -242,17 +277,18 @@ func (s *Session) Close() error { for _, stream := range s.streams { stream.forceClose() } + <-s.sendDoneCh return nil } // exitErr is used to handle an error that is causing the // session to terminate. func (s *Session) exitErr(err error) { - s.shutdownLock.Lock() + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = err } - s.shutdownLock.Unlock() + s.shutdownErrLock.Unlock() s.Close() } @@ -327,7 +363,7 @@ func (s *Session) keepalive() { } // waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body io.Reader) error { +func (s *Session) waitForSend(hdr header, body []byte) error { errCh := make(chan error, 1) return s.waitForSendErr(hdr, body, errCh) } @@ -335,7 +371,7 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { // waitForSendErr waits to send a header with optional data, checking for a // potential shutdown. Since there's the expectation that sends can happen // in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { +func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { t := timerPool.Get() timer := t.(*time.Timer) timer.Reset(s.config.ConnectionWriteTimeout) @@ -348,7 +384,7 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e timerPool.Put(t) }() - ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} select { case s.sendCh <- ready: case <-s.shutdownCh: @@ -357,12 +393,34 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e return ErrConnectionWriteTimeout } + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + select { case err := <-errCh: return err case <-s.shutdownCh: + bodyCopy() return ErrSessionShutdown case <-timer.C: + bodyCopy() return ErrConnectionWriteTimeout } } @@ -384,7 +442,7 @@ func (s *Session) sendNoWait(hdr header) error { }() select { - case s.sendCh <- sendReady{Hdr: hdr}: + case s.sendCh <- &sendReady{Hdr: hdr}: return nil case <-s.shutdownCh: return ErrSessionShutdown @@ -395,39 +453,59 @@ func (s *Session) sendNoWait(hdr header) error { // send is a long running goroutine that sends data func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer for { + bodyBuf.Reset() + select { case ready := <-s.sendCh: // Send a header if ready if ready.Hdr != nil { - sent := 0 - for sent < len(ready.Hdr) { - n, err := s.conn.Write(ready.Hdr[sent:]) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - sent += n + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err } } - // Send data from a body if given + ready.mu.Lock() if ready.Body != nil { - _, err := io.Copy(s.conn, ready.Body) + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) if err != nil { s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) asyncSendErr(ready.Err, err) - s.exitErr(err) - return + return err } } // No error, successful send asyncSendErr(ready.Err, nil) case <-s.shutdownCh: - return + return nil } } } @@ -614,8 +692,9 @@ func (s *Session) incomingStream(id uint32) error { // Backlog exceeded! RST the stream s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") delete(s.streams, id) - stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(stream.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) } } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index aa23919739..23d08fcc8d 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -2,6 +2,7 @@ package yamux import ( "bytes" + "errors" "io" "sync" "sync/atomic" @@ -49,6 +50,13 @@ type Stream struct { readDeadline atomic.Value // time.Time writeDeadline atomic.Value // time.Time + + // establishCh is notified if the stream is established or being closed. + establishCh chan struct{} + + // closeTimer is set with stateLock held to honor the StreamCloseTimeout + // setting on Session. + closeTimer *time.Timer } // newStream is used to construct a new stream within @@ -66,6 +74,7 @@ func newStream(session *Session, id uint32, state streamState) *Stream { sendWindow: initialStreamWindow, recvNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1), + establishCh: make(chan struct{}, 1), } s.readDeadline.Store(time.Time{}) s.writeDeadline.Store(time.Time{}) @@ -119,6 +128,9 @@ START: // Send a window update potentially err = s.sendWindowUpdate() + if err == ErrSessionShutdown { + err = nil + } return n, err WAIT: @@ -161,7 +173,7 @@ func (s *Stream) Write(b []byte) (n int, err error) { func (s *Stream) write(b []byte) (n int, err error) { var flags uint16 var max uint32 - var body io.Reader + var body []byte START: s.stateLock.Lock() switch s.state { @@ -187,11 +199,15 @@ START: // Send up to our send window max = min(window, uint32(len(b))) - body = bytes.NewReader(b[:max]) + body = b[:max] // Send the header s.sendHdr.encode(typeData, flags, s.id, max) if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } return 0, err } @@ -265,6 +281,10 @@ func (s *Stream) sendWindowUpdate() error { // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -279,6 +299,10 @@ func (s *Stream) sendClose() error { flags |= flagFIN s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -312,6 +336,27 @@ func (s *Stream) Close() error { s.stateLock.Unlock() return nil SEND_CLOSE: + // This shouldn't happen (the more realistic scenario to cancel the + // timer is via processFlags) but just in case this ever happens, we + // cancel the timer to prevent dangling timers. + if s.closeTimer != nil { + s.closeTimer.Stop() + s.closeTimer = nil + } + + // If we have a StreamCloseTimeout set we start the timeout timer. + // We do this only if we're not already closing the stream since that + // means this was a graceful close. + // + // This prevents memory leaks if one side (this side) closes and the + // remote side poorly behaves and never responds with a FIN to complete + // the close. After the specified timeout, we clean our resources up no + // matter what. + if !closeStream && s.session.config.StreamCloseTimeout > 0 { + s.closeTimer = time.AfterFunc( + s.session.config.StreamCloseTimeout, s.closeTimeout) + } + s.stateLock.Unlock() s.sendClose() s.notifyWaiting() @@ -321,6 +366,23 @@ SEND_CLOSE: return nil } +// closeTimeout is called after StreamCloseTimeout during a close to +// close this stream. +func (s *Stream) closeTimeout() { + // Close our side forcibly + s.forceClose() + + // Free the stream from the session map + s.session.closeStream(s.id) + + // Send a RST so the remote side closes too. + s.sendLock.Lock() + defer s.sendLock.Unlock() + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) +} + // forceClose is used for when the session is exiting func (s *Stream) forceClose() { s.stateLock.Lock() @@ -332,20 +394,27 @@ func (s *Stream) forceClose() { // processFlags is used to update the state of the stream // based on set flags, if any. Lock must be held func (s *Stream) processFlags(flags uint16) error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + // Close the stream without holding the state lock closeStream := false defer func() { if closeStream { + if s.closeTimer != nil { + // Stop our close timeout timer since we gracefully closed + s.closeTimer.Stop() + } + s.session.closeStream(s.id) } }() - s.stateLock.Lock() - defer s.stateLock.Unlock() if flags&flagACK == flagACK { if s.state == streamSYNSent { s.state = streamEstablished } + asyncNotify(s.establishCh) s.session.establishStream(s.id) } if flags&flagFIN == flagFIN { @@ -378,6 +447,7 @@ func (s *Stream) processFlags(flags uint16) error { func (s *Stream) notifyWaiting() { asyncNotify(s.recvNotifyCh) asyncNotify(s.sendNotifyCh) + asyncNotify(s.establishCh) } // incrSendWindow updates the size of our send window @@ -412,6 +482,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length > s.recvWindow { s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() return ErrRecvWindowExceeded } @@ -420,14 +491,15 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { // This way we can read in the whole packet without further allocations. s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) } - if _, err := io.Copy(s.recvBuf, conn); err != nil { + copiedLength, err := io.Copy(s.recvBuf, conn) + if err != nil { s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) s.recvLock.Unlock() return err } // Decrement the receive window - s.recvWindow -= length + s.recvWindow -= uint32(copiedLength) s.recvLock.Unlock() // Unblock any readers @@ -446,15 +518,17 @@ func (s *Stream) SetDeadline(t time.Time) error { return nil } -// SetReadDeadline sets the deadline for future Read calls. +// SetReadDeadline sets the deadline for blocked and future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) return nil } -// SetWriteDeadline sets the deadline for future Write calls +// SetWriteDeadline sets the deadline for blocked and future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) return nil } diff --git a/vendor/github.com/hexops/gotextdiff/LICENSE b/vendor/github.com/hexops/gotextdiff/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hexops/gotextdiff/README.md b/vendor/github.com/hexops/gotextdiff/README.md new file mode 100644 index 0000000000..bfd49a0c97 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/README.md @@ -0,0 +1,54 @@ +# gotextdiff - unified text diffing in Go Hexops logo + +This is a copy of the Go text diffing packages that [the official Go language server gopls uses internally](https://github.com/golang/tools/tree/master/internal/lsp/diff) to generate unified diffs. + +If you've previously tried to generate unified text diffs in Go (like the ones you see in Git and on GitHub), you may have found [github.com/sergi/go-diff](https://github.com/sergi/go-diff) which is a Go port of Neil Fraser's google-diff-match-patch code - however it [does not support unified diffs](https://github.com/sergi/go-diff/issues/57). + +This is arguably one of the best (and most maintained) unified text diffing packages in Go as of at least 2020. + +(All credit goes to [the Go authors](http://tip.golang.org/AUTHORS), I am merely re-publishing their work so others can use it.) + +## Example usage + +Import the packages: + +```Go +import ( + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" +) +``` + +Assuming you want to diff `a.txt` and `b.txt`, whose contents are stored in `aString` and `bString` then: + +```Go +edits := myers.ComputeEdits(span.URIFromPath("a.txt"), aString, bString) +diff := fmt.Sprint(gotextdiff.ToUnified("a.txt", "b.txt", aString, edits)) +``` + +`diff` will be a string like: + +```diff +--- a.txt ++++ b.txt +@@ -1,13 +1,28 @@ +-foo ++bar +``` + +## API compatability + +We will publish a new major version anytime the API changes in a backwards-incompatible way. Because the upstream is not being developed with this being a public package in mind, API breakages may occur more often than in other Go packages (but you can always continue using the old version thanks to Go modules.) + +## Alternatives + +- [github.com/andreyvit/diff](https://github.com/andreyvit/diff): Quick'n'easy string diffing functions for Golang based on github.com/sergi/go-diff. +- [github.com/kylelemons/godebug/diff](https://github.com/kylelemons/godebug/tree/master/diff): implements a linewise diff algorithm ([inactive](https://github.com/kylelemons/godebug/issues/22#issuecomment-524573477)). + +## Contributing + +We will only accept changes made [upstream](https://github.com/golang/tools/tree/master/internal/lsp/diff), please send any contributions to the upstream instead! Compared to the upstream, only import paths will be modified (to be non-`internal` so they are importable.) The only thing we add here is this README. + +## License + +See https://github.com/golang/tools/blob/master/LICENSE diff --git a/vendor/github.com/hexops/gotextdiff/diff.go b/vendor/github.com/hexops/gotextdiff/diff.go new file mode 100644 index 0000000000..53e499bc0c --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/diff.go @@ -0,0 +1,159 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package gotextdiff supports a pluggable diff algorithm. +package gotextdiff + +import ( + "sort" + "strings" + + "github.com/hexops/gotextdiff/span" +) + +// TextEdit represents a change to a section of a document. +// The text within the specified span should be replaced by the supplied new text. +type TextEdit struct { + Span span.Span + NewText string +} + +// ComputeEdits is the type for a function that produces a set of edits that +// convert from the before content to the after content. +type ComputeEdits func(uri span.URI, before, after string) []TextEdit + +// SortTextEdits attempts to order all edits by their starting points. +// The sort is stable so that edits with the same starting point will not +// be reordered. +func SortTextEdits(d []TextEdit) { + // Use a stable sort to maintain the order of edits inserted at the same position. + sort.SliceStable(d, func(i int, j int) bool { + return span.Compare(d[i].Span, d[j].Span) < 0 + }) +} + +// ApplyEdits applies the set of edits to the before and returns the resulting +// content. +// It may panic or produce garbage if the edits are not valid for the provided +// before content. +func ApplyEdits(before string, edits []TextEdit) string { + // Preconditions: + // - all of the edits apply to before + // - and all the spans for each TextEdit have the same URI + if len(edits) == 0 { + return before + } + _, edits, _ = prepareEdits(before, edits) + after := strings.Builder{} + last := 0 + for _, edit := range edits { + start := edit.Span.Start().Offset() + if start > last { + after.WriteString(before[last:start]) + last = start + } + after.WriteString(edit.NewText) + last = edit.Span.End().Offset() + } + if last < len(before) { + after.WriteString(before[last:]) + } + return after.String() +} + +// LineEdits takes a set of edits and expands and merges them as necessary +// to ensure that there are only full line edits left when it is done. +func LineEdits(before string, edits []TextEdit) []TextEdit { + if len(edits) == 0 { + return nil + } + c, edits, partial := prepareEdits(before, edits) + if partial { + edits = lineEdits(before, c, edits) + } + return edits +} + +// prepareEdits returns a sorted copy of the edits +func prepareEdits(before string, edits []TextEdit) (*span.TokenConverter, []TextEdit, bool) { + partial := false + c := span.NewContentConverter("", []byte(before)) + copied := make([]TextEdit, len(edits)) + for i, edit := range edits { + edit.Span, _ = edit.Span.WithAll(c) + copied[i] = edit + partial = partial || + edit.Span.Start().Offset() >= len(before) || + edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1 + } + SortTextEdits(copied) + return c, copied, partial +} + +// lineEdits rewrites the edits to always be full line edits +func lineEdits(before string, c *span.TokenConverter, edits []TextEdit) []TextEdit { + adjusted := make([]TextEdit, 0, len(edits)) + current := TextEdit{Span: span.Invalid} + for _, edit := range edits { + if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() { + // overlaps with the current edit, need to combine + // first get the gap from the previous edit + gap := before[current.Span.End().Offset():edit.Span.Start().Offset()] + // now add the text of this edit + current.NewText += gap + edit.NewText + // and then adjust the end position + current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End()) + } else { + // does not overlap, add previous run (if there is one) + adjusted = addEdit(before, adjusted, current) + // and then remember this edit as the start of the next run + current = edit + } + } + // add the current pending run if there is one + return addEdit(before, adjusted, current) +} + +func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit { + if !edit.Span.IsValid() { + return edits + } + // if edit is partial, expand it to full line now + start := edit.Span.Start() + end := edit.Span.End() + if start.Column() > 1 { + // prepend the text and adjust to start of line + delta := start.Column() - 1 + start = span.NewPoint(start.Line(), 1, start.Offset()-delta) + edit.Span = span.New(edit.Span.URI(), start, end) + edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText + } + if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' { + // after end of file that does not end in eol, so join to last line of file + // to do this we need to know where the start of the last line was + eol := strings.LastIndex(before, "\n") + if eol < 0 { + // file is one non terminated line + eol = 0 + } + delta := len(before) - eol + start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta) + edit.Span = span.New(edit.Span.URI(), start, end) + edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText + } + if end.Column() > 1 { + remains := before[end.Offset():] + eol := strings.IndexRune(remains, '\n') + if eol < 0 { + eol = len(remains) + } else { + eol++ + } + end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol) + edit.Span = span.New(edit.Span.URI(), start, end) + edit.NewText = edit.NewText + remains[:eol] + } + edits = append(edits, edit) + return edits +} diff --git a/vendor/github.com/hexops/gotextdiff/myers/diff.go b/vendor/github.com/hexops/gotextdiff/myers/diff.go new file mode 100644 index 0000000000..5e3e923648 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/myers/diff.go @@ -0,0 +1,205 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package myers implements the Myers diff algorithm. +package myers + +import ( + "strings" + + diff "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/span" +) + +// Sources: +// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/ +// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2 + +func ComputeEdits(uri span.URI, before, after string) []diff.TextEdit { + ops := operations(splitLines(before), splitLines(after)) + edits := make([]diff.TextEdit, 0, len(ops)) + for _, op := range ops { + s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0)) + switch op.Kind { + case diff.Delete: + // Delete: unformatted[i1:i2] is deleted. + edits = append(edits, diff.TextEdit{Span: s}) + case diff.Insert: + // Insert: formatted[j1:j2] is inserted at unformatted[i1:i1]. + if content := strings.Join(op.Content, ""); content != "" { + edits = append(edits, diff.TextEdit{Span: s, NewText: content}) + } + } + } + return edits +} + +type operation struct { + Kind diff.OpKind + Content []string // content from b + I1, I2 int // indices of the line in a + J1 int // indices of the line in b, J2 implied by len(Content) +} + +// operations returns the list of operations to convert a into b, consolidating +// operations for multiple lines and not including equal lines. +func operations(a, b []string) []*operation { + if len(a) == 0 && len(b) == 0 { + return nil + } + + trace, offset := shortestEditSequence(a, b) + snakes := backtrack(trace, len(a), len(b), offset) + + M, N := len(a), len(b) + + var i int + solution := make([]*operation, len(a)+len(b)) + + add := func(op *operation, i2, j2 int) { + if op == nil { + return + } + op.I2 = i2 + if op.Kind == diff.Insert { + op.Content = b[op.J1:j2] + } + solution[i] = op + i++ + } + x, y := 0, 0 + for _, snake := range snakes { + if len(snake) < 2 { + continue + } + var op *operation + // delete (horizontal) + for snake[0]-snake[1] > x-y { + if op == nil { + op = &operation{ + Kind: diff.Delete, + I1: x, + J1: y, + } + } + x++ + if x == M { + break + } + } + add(op, x, y) + op = nil + // insert (vertical) + for snake[0]-snake[1] < x-y { + if op == nil { + op = &operation{ + Kind: diff.Insert, + I1: x, + J1: y, + } + } + y++ + } + add(op, x, y) + op = nil + // equal (diagonal) + for x < snake[0] { + x++ + y++ + } + if x >= M && y >= N { + break + } + } + return solution[:i] +} + +// backtrack uses the trace for the edit sequence computation and returns the +// "snakes" that make up the solution. A "snake" is a single deletion or +// insertion followed by zero or diagonals. +func backtrack(trace [][]int, x, y, offset int) [][]int { + snakes := make([][]int, len(trace)) + d := len(trace) - 1 + for ; x > 0 && y > 0 && d > 0; d-- { + V := trace[d] + if len(V) == 0 { + continue + } + snakes[d] = []int{x, y} + + k := x - y + + var kPrev int + if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { + kPrev = k + 1 + } else { + kPrev = k - 1 + } + + x = V[kPrev+offset] + y = x - kPrev + } + if x < 0 || y < 0 { + return snakes + } + snakes[d] = []int{x, y} + return snakes +} + +// shortestEditSequence returns the shortest edit sequence that converts a into b. +func shortestEditSequence(a, b []string) ([][]int, int) { + M, N := len(a), len(b) + V := make([]int, 2*(N+M)+1) + offset := N + M + trace := make([][]int, N+M+1) + + // Iterate through the maximum possible length of the SES (N+M). + for d := 0; d <= N+M; d++ { + copyV := make([]int, len(V)) + // k lines are represented by the equation y = x - k. We move in + // increments of 2 because end points for even d are on even k lines. + for k := -d; k <= d; k += 2 { + // At each point, we either go down or to the right. We go down if + // k == -d, and we go to the right if k == d. We also prioritize + // the maximum x value, because we prefer deletions to insertions. + var x int + if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { + x = V[k+1+offset] // down + } else { + x = V[k-1+offset] + 1 // right + } + + y := x - k + + // Diagonal moves while we have equal contents. + for x < M && y < N && a[x] == b[y] { + x++ + y++ + } + + V[k+offset] = x + + // Return if we've exceeded the maximum values. + if x == M && y == N { + // Makes sure to save the state of the array before returning. + copy(copyV, V) + trace[d] = copyV + return trace, offset + } + } + + // Save the state of the array. + copy(copyV, V) + trace[d] = copyV + } + return nil, 0 +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} diff --git a/vendor/github.com/hexops/gotextdiff/span/parse.go b/vendor/github.com/hexops/gotextdiff/span/parse.go new file mode 100644 index 0000000000..aa17c84ec1 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/parse.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Parse returns the location represented by the input. +// Only file paths are accepted, not URIs. +// The returned span will be normalized, and thus if printed may produce a +// different string. +func Parse(input string) Span { + // :0:0#0-0:0#0 + valid := input + var hold, offset int + hadCol := false + suf := rstripSuffix(input) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep == ":" { + valid = suf.remains + hold = suf.num + hadCol = true + suf = rstripSuffix(suf.remains) + } + switch { + case suf.sep == ":": + return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), Point{}) + case suf.sep == "-": + // we have a span, fall out of the case to continue + default: + // separator not valid, rewind to either the : or the start + return New(URIFromPath(valid), NewPoint(hold, 0, offset), Point{}) + } + // only the span form can get here + // at this point we still don't know what the numbers we have mean + // if have not yet seen a : then we might have either a line or a column depending + // on whether start has a column or not + // we build an end point and will fix it later if needed + end := NewPoint(suf.num, hold, offset) + hold, offset = 0, 0 + suf = rstripSuffix(suf.remains) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep != ":" { + // turns out we don't have a span after all, rewind + return New(URIFromPath(valid), end, Point{}) + } + valid = suf.remains + hold = suf.num + suf = rstripSuffix(suf.remains) + if suf.sep != ":" { + // line#offset only + return New(URIFromPath(valid), NewPoint(hold, 0, offset), end) + } + // we have a column, so if end only had one number, it is also the column + if !hadCol { + end = NewPoint(suf.num, end.v.Line, end.v.Offset) + } + return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), end) +} + +type suffix struct { + remains string + sep string + num int +} + +func rstripSuffix(input string) suffix { + if len(input) == 0 { + return suffix{"", "", -1} + } + remains := input + num := -1 + // first see if we have a number at the end + last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) + if last >= 0 && last < len(remains)-1 { + number, err := strconv.ParseInt(remains[last+1:], 10, 64) + if err == nil { + num = int(number) + remains = remains[:last+1] + } + } + // now see if we have a trailing separator + r, w := utf8.DecodeLastRuneInString(remains) + if r != ':' && r != '#' && r == '#' { + return suffix{input, "", -1} + } + remains = remains[:len(remains)-w] + return suffix{remains, string(r), num} +} diff --git a/vendor/github.com/hexops/gotextdiff/span/span.go b/vendor/github.com/hexops/gotextdiff/span/span.go new file mode 100644 index 0000000000..4d2ad09866 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/span.go @@ -0,0 +1,285 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package span contains support for representing with positions and ranges in +// text files. +package span + +import ( + "encoding/json" + "fmt" + "path" +) + +// Span represents a source code range in standardized form. +type Span struct { + v span +} + +// Point represents a single point within a file. +// In general this should only be used as part of a Span, as on its own it +// does not carry enough information. +type Point struct { + v point +} + +type span struct { + URI URI `json:"uri"` + Start point `json:"start"` + End point `json:"end"` +} + +type point struct { + Line int `json:"line"` + Column int `json:"column"` + Offset int `json:"offset"` +} + +// Invalid is a span that reports false from IsValid +var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} + +var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} + +// Converter is the interface to an object that can convert between line:column +// and offset forms for a single file. +type Converter interface { + //ToPosition converts from an offset to a line:column pair. + ToPosition(offset int) (int, int, error) + //ToOffset converts from a line:column pair to an offset. + ToOffset(line, col int) (int, error) +} + +func New(uri URI, start Point, end Point) Span { + s := Span{v: span{URI: uri, Start: start.v, End: end.v}} + s.v.clean() + return s +} + +func NewPoint(line, col, offset int) Point { + p := Point{v: point{Line: line, Column: col, Offset: offset}} + p.v.clean() + return p +} + +func Compare(a, b Span) int { + if r := CompareURI(a.URI(), b.URI()); r != 0 { + return r + } + if r := comparePoint(a.v.Start, b.v.Start); r != 0 { + return r + } + return comparePoint(a.v.End, b.v.End) +} + +func ComparePoint(a, b Point) int { + return comparePoint(a.v, b.v) +} + +func comparePoint(a, b point) int { + if !a.hasPosition() { + if a.Offset < b.Offset { + return -1 + } + if a.Offset > b.Offset { + return 1 + } + return 0 + } + if a.Line < b.Line { + return -1 + } + if a.Line > b.Line { + return 1 + } + if a.Column < b.Column { + return -1 + } + if a.Column > b.Column { + return 1 + } + return 0 +} + +func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } +func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } +func (s Span) IsValid() bool { return s.v.Start.isValid() } +func (s Span) IsPoint() bool { return s.v.Start == s.v.End } +func (s Span) URI() URI { return s.v.URI } +func (s Span) Start() Point { return Point{s.v.Start} } +func (s Span) End() Point { return Point{s.v.End} } +func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } +func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } + +func (p Point) HasPosition() bool { return p.v.hasPosition() } +func (p Point) HasOffset() bool { return p.v.hasOffset() } +func (p Point) IsValid() bool { return p.v.isValid() } +func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } +func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } +func (p Point) Line() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Line +} +func (p Point) Column() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Column +} +func (p Point) Offset() int { + if !p.v.hasOffset() { + panic(fmt.Errorf("offset not set in %v", p.v)) + } + return p.v.Offset +} + +func (p point) hasPosition() bool { return p.Line > 0 } +func (p point) hasOffset() bool { return p.Offset >= 0 } +func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } +func (p point) isZero() bool { + return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) +} + +func (s *span) clean() { + //this presumes the points are already clean + if !s.End.isValid() || (s.End == point{}) { + s.End = s.Start + } +} + +func (p *point) clean() { + if p.Line < 0 { + p.Line = 0 + } + if p.Column <= 0 { + if p.Line > 0 { + p.Column = 1 + } else { + p.Column = 0 + } + } + if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { + p.Offset = -1 + } +} + +// Format implements fmt.Formatter to print the Location in a standard form. +// The format produced is one that can be read back in using Parse. +func (s Span) Format(f fmt.State, c rune) { + fullForm := f.Flag('+') + preferOffset := f.Flag('#') + // we should always have a uri, simplify if it is file format + //TODO: make sure the end of the uri is unambiguous + uri := string(s.v.URI) + if c == 'f' { + uri = path.Base(uri) + } else if !fullForm { + uri = s.v.URI.Filename() + } + fmt.Fprint(f, uri) + if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { + return + } + // see which bits of start to write + printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) + printLine := s.HasPosition() && (fullForm || !printOffset) + printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) + fmt.Fprint(f, ":") + if printLine { + fmt.Fprintf(f, "%d", s.v.Start.Line) + } + if printColumn { + fmt.Fprintf(f, ":%d", s.v.Start.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.Start.Offset) + } + // start is written, do we need end? + if s.IsPoint() { + return + } + // we don't print the line if it did not change + printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) + fmt.Fprint(f, "-") + if printLine { + fmt.Fprintf(f, "%d", s.v.End.Line) + } + if printColumn { + if printLine { + fmt.Fprint(f, ":") + } + fmt.Fprintf(f, "%d", s.v.End.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.End.Offset) + } +} + +func (s Span) WithPosition(c Converter) (Span, error) { + if err := s.update(c, true, false); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithOffset(c Converter) (Span, error) { + if err := s.update(c, false, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithAll(c Converter) (Span, error) { + if err := s.update(c, true, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s *Span) update(c Converter, withPos, withOffset bool) error { + if !s.IsValid() { + return fmt.Errorf("cannot add information to an invalid span") + } + if withPos && !s.HasPosition() { + if err := s.v.Start.updatePosition(c); err != nil { + return err + } + if s.v.End.Offset == s.v.Start.Offset { + s.v.End = s.v.Start + } else if err := s.v.End.updatePosition(c); err != nil { + return err + } + } + if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { + if err := s.v.Start.updateOffset(c); err != nil { + return err + } + if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { + s.v.End.Offset = s.v.Start.Offset + } else if err := s.v.End.updateOffset(c); err != nil { + return err + } + } + return nil +} + +func (p *point) updatePosition(c Converter) error { + line, col, err := c.ToPosition(p.Offset) + if err != nil { + return err + } + p.Line = line + p.Column = col + return nil +} + +func (p *point) updateOffset(c Converter) error { + offset, err := c.ToOffset(p.Line, p.Column) + if err != nil { + return err + } + p.Offset = offset + return nil +} diff --git a/vendor/github.com/hexops/gotextdiff/span/token.go b/vendor/github.com/hexops/gotextdiff/span/token.go new file mode 100644 index 0000000000..6f8b9b570c --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/token.go @@ -0,0 +1,194 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "go/token" +) + +// Range represents a source code range in token.Pos form. +// It also carries the FileSet that produced the positions, so that it is +// self contained. +type Range struct { + FileSet *token.FileSet + Start token.Pos + End token.Pos + Converter Converter +} + +type FileConverter struct { + file *token.File +} + +// TokenConverter is a Converter backed by a token file set and file. +// It uses the file set methods to work out the conversions, which +// makes it fast and does not require the file contents. +type TokenConverter struct { + FileConverter + fset *token.FileSet +} + +// NewRange creates a new Range from a FileSet and two positions. +// To represent a point pass a 0 as the end pos. +func NewRange(fset *token.FileSet, start, end token.Pos) Range { + return Range{ + FileSet: fset, + Start: start, + End: end, + } +} + +// NewTokenConverter returns an implementation of Converter backed by a +// token.File. +func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { + return &TokenConverter{fset: fset, FileConverter: FileConverter{file: f}} +} + +// NewContentConverter returns an implementation of Converter for the +// given file content. +func NewContentConverter(filename string, content []byte) *TokenConverter { + fset := token.NewFileSet() + f := fset.AddFile(filename, -1, len(content)) + f.SetLinesForContent(content) + return NewTokenConverter(fset, f) +} + +// IsPoint returns true if the range represents a single point. +func (r Range) IsPoint() bool { + return r.Start == r.End +} + +// Span converts a Range to a Span that represents the Range. +// It will fill in all the members of the Span, calculating the line and column +// information. +func (r Range) Span() (Span, error) { + if !r.Start.IsValid() { + return Span{}, fmt.Errorf("start pos is not valid") + } + f := r.FileSet.File(r.Start) + if f == nil { + return Span{}, fmt.Errorf("file not found in FileSet") + } + return FileSpan(f, r.Converter, r.Start, r.End) +} + +// FileSpan returns a span within tok, using converter to translate between +// offsets and positions. +func FileSpan(tok *token.File, converter Converter, start, end token.Pos) (Span, error) { + var s Span + var err error + var startFilename string + startFilename, s.v.Start.Line, s.v.Start.Column, err = position(tok, start) + if err != nil { + return Span{}, err + } + s.v.URI = URIFromPath(startFilename) + if end.IsValid() { + var endFilename string + endFilename, s.v.End.Line, s.v.End.Column, err = position(tok, end) + if err != nil { + return Span{}, err + } + // In the presence of line directives, a single File can have sections from + // multiple file names. + if endFilename != startFilename { + return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename) + } + } + s.v.Start.clean() + s.v.End.clean() + s.v.clean() + if converter != nil { + return s.WithOffset(converter) + } + if startFilename != tok.Name() { + return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", tok.Name(), startFilename) + } + return s.WithOffset(&FileConverter{tok}) +} + +func position(f *token.File, pos token.Pos) (string, int, int, error) { + off, err := offset(f, pos) + if err != nil { + return "", 0, 0, err + } + return positionFromOffset(f, off) +} + +func positionFromOffset(f *token.File, offset int) (string, int, int, error) { + if offset > f.Size() { + return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size()) + } + pos := f.Pos(offset) + p := f.Position(pos) + // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if + // the file's last character is not a newline. + if offset == f.Size() { + return p.Filename, p.Line + 1, 1, nil + } + return p.Filename, p.Line, p.Column, nil +} + +// offset is a copy of the Offset function in go/token, but with the adjustment +// that it does not panic on invalid positions. +func offset(f *token.File, pos token.Pos) (int, error) { + if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { + return 0, fmt.Errorf("invalid pos") + } + return int(pos) - f.Base(), nil +} + +// Range converts a Span to a Range that represents the Span for the supplied +// File. +func (s Span) Range(converter *TokenConverter) (Range, error) { + s, err := s.WithOffset(converter) + if err != nil { + return Range{}, err + } + // go/token will panic if the offset is larger than the file's size, + // so check here to avoid panicking. + if s.Start().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) + } + if s.End().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) + } + return Range{ + FileSet: converter.fset, + Start: converter.file.Pos(s.Start().Offset()), + End: converter.file.Pos(s.End().Offset()), + Converter: converter, + }, nil +} + +func (l *FileConverter) ToPosition(offset int) (int, int, error) { + _, line, col, err := positionFromOffset(l.file, offset) + return line, col, err +} + +func (l *FileConverter) ToOffset(line, col int) (int, error) { + if line < 0 { + return -1, fmt.Errorf("line is not valid") + } + lineMax := l.file.LineCount() + 1 + if line > lineMax { + return -1, fmt.Errorf("line is beyond end of file %v", lineMax) + } else if line == lineMax { + if col > 1 { + return -1, fmt.Errorf("column is beyond end of file") + } + // at the end of the file, allowing for a trailing eol + return l.file.Size(), nil + } + pos := lineStart(l.file, line) + if !pos.IsValid() { + return -1, fmt.Errorf("line is not in file") + } + // we assume that column is in bytes here, and that the first byte of a + // line is at column 1 + pos += token.Pos(col - 1) + return offset(l.file, pos) +} diff --git a/vendor/github.com/hexops/gotextdiff/span/token111.go b/vendor/github.com/hexops/gotextdiff/span/token111.go new file mode 100644 index 0000000000..bf7a5406b6 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/token111.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package span + +import ( + "go/token" +) + +// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go +// versions <= 1.11, we borrow logic from the analysisutil package. +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + // Use binary search to find the start offset of this line. + + min := 0 // inclusive + max := f.Size() // exclusive + for { + offset := (min + max) / 2 + pos := f.Pos(offset) + posn := f.Position(pos) + if posn.Line == line { + return pos - (token.Pos(posn.Column) - 1) + } + + if min+1 >= max { + return token.NoPos + } + + if posn.Line < line { + min = offset + } else { + max = offset + } + } +} diff --git a/vendor/github.com/hexops/gotextdiff/span/token112.go b/vendor/github.com/hexops/gotextdiff/span/token112.go new file mode 100644 index 0000000000..017aec9c13 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/token112.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package span + +import ( + "go/token" +) + +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + return f.LineStart(line) +} diff --git a/vendor/github.com/hexops/gotextdiff/span/uri.go b/vendor/github.com/hexops/gotextdiff/span/uri.go new file mode 100644 index 0000000000..2504921356 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/uri.go @@ -0,0 +1,169 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +const fileScheme = "file" + +// URI represents the full URI for a file. +type URI string + +func (uri URI) IsFile() bool { + return strings.HasPrefix(string(uri), "file://") +} + +// Filename returns the file path for the given URI. +// It is an error to call this on a URI that is not a valid filename. +func (uri URI) Filename() string { + filename, err := filename(uri) + if err != nil { + panic(err) + } + return filepath.FromSlash(filename) +} + +func filename(uri URI) (string, error) { + if uri == "" { + return "", nil + } + u, err := url.ParseRequestURI(string(uri)) + if err != nil { + return "", err + } + if u.Scheme != fileScheme { + return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) + } + // If the URI is a Windows URI, we trim the leading "/" and lowercase + // the drive letter, which will never be case sensitive. + if isWindowsDriveURIPath(u.Path) { + u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:] + } + return u.Path, nil +} + +func URIFromURI(s string) URI { + if !strings.HasPrefix(s, "file://") { + return URI(s) + } + + if !strings.HasPrefix(s, "file:///") { + // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789. + s = "file:///" + s[len("file://"):] + } + // Even though the input is a URI, it may not be in canonical form. VS Code + // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize. + path, err := url.PathUnescape(s[len("file://"):]) + if err != nil { + panic(err) + } + + // File URIs from Windows may have lowercase drive letters. + // Since drive letters are guaranteed to be case insensitive, + // we change them to uppercase to remain consistent. + // For example, file:///c:/x/y/z becomes file:///C:/x/y/z. + if isWindowsDriveURIPath(path) { + path = path[:1] + strings.ToUpper(string(path[1])) + path[2:] + } + u := url.URL{Scheme: fileScheme, Path: path} + return URI(u.String()) +} + +func CompareURI(a, b URI) int { + if equalURI(a, b) { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func equalURI(a, b URI) bool { + if a == b { + return true + } + // If we have the same URI basename, we may still have the same file URIs. + if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { + return false + } + fa, err := filename(a) + if err != nil { + return false + } + fb, err := filename(b) + if err != nil { + return false + } + // Stat the files to check if they are equal. + infoa, err := os.Stat(filepath.FromSlash(fa)) + if err != nil { + return false + } + infob, err := os.Stat(filepath.FromSlash(fb)) + if err != nil { + return false + } + return os.SameFile(infoa, infob) +} + +// URIFromPath returns a span URI for the supplied file path. +// It will always have the file scheme. +func URIFromPath(path string) URI { + if path == "" { + return "" + } + // Handle standard library paths that contain the literal "$GOROOT". + // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. + const prefix = "$GOROOT" + if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { + suffix := path[len(prefix):] + path = runtime.GOROOT() + suffix + } + if !isWindowsDrivePath(path) { + if abs, err := filepath.Abs(path); err == nil { + path = abs + } + } + // Check the file path again, in case it became absolute. + if isWindowsDrivePath(path) { + path = "/" + strings.ToUpper(string(path[0])) + path[1:] + } + path = filepath.ToSlash(path) + u := url.URL{ + Scheme: fileScheme, + Path: path, + } + return URI(u.String()) +} + +// isWindowsDrivePath returns true if the file path is of the form used by +// Windows. We check if the path begins with a drive letter, followed by a ":". +// For example: C:/x/y/z. +func isWindowsDrivePath(path string) bool { + if len(path) < 3 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:"). +func isWindowsDriveURIPath(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} diff --git a/vendor/github.com/hexops/gotextdiff/span/utf16.go b/vendor/github.com/hexops/gotextdiff/span/utf16.go new file mode 100644 index 0000000000..f06a2468b6 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/utf16.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "unicode/utf16" + "unicode/utf8" +) + +// ToUTF16Column calculates the utf16 column expressed by the point given the +// supplied file contents. +// This is used to convert from the native (always in bytes) column +// representation and the utf16 counts used by some editors. +func ToUTF16Column(p Point, content []byte) (int, error) { + if !p.HasPosition() { + return -1, fmt.Errorf("ToUTF16Column: point is missing position") + } + if !p.HasOffset() { + return -1, fmt.Errorf("ToUTF16Column: point is missing offset") + } + offset := p.Offset() // 0-based + colZero := p.Column() - 1 // 0-based + if colZero == 0 { + // 0-based column 0, so it must be chr 1 + return 1, nil + } else if colZero < 0 { + return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) + } + // work out the offset at the start of the line using the column + lineOffset := offset - colZero + if lineOffset < 0 || offset > len(content) { + return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) + } + // Use the offset to pick out the line start. + // This cannot panic: offset > len(content) and lineOffset < offset. + start := content[lineOffset:] + + // Now, truncate down to the supplied column. + start = start[:colZero] + + // and count the number of utf16 characters + // in theory we could do this by hand more efficiently... + return len(utf16.Encode([]rune(string(start)))) + 1, nil +} + +// FromUTF16Column advances the point by the utf16 character offset given the +// supplied line contents. +// This is used to convert from the utf16 counts used by some editors to the +// native (always in bytes) column representation. +func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { + if !p.HasOffset() { + return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") + } + // if chr is 1 then no adjustment needed + if chr <= 1 { + return p, nil + } + if p.Offset() >= len(content) { + return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) + } + remains := content[p.Offset():] + // scan forward the specified number of characters + for count := 1; count < chr; count++ { + if len(remains) <= 0 { + return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") + } + r, w := utf8.DecodeRune(remains) + if r == '\n' { + // Per the LSP spec: + // + // > If the character value is greater than the line length it + // > defaults back to the line length. + break + } + remains = remains[w:] + if r >= 0x10000 { + // a two point rune + count++ + // if we finished in a two point rune, do not advance past the first + if count >= chr { + break + } + } + p.v.Column += w + p.v.Offset += w + } + return p, nil +} diff --git a/vendor/github.com/hexops/gotextdiff/unified.go b/vendor/github.com/hexops/gotextdiff/unified.go new file mode 100644 index 0000000000..b7d85cfccf --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/unified.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gotextdiff + +import ( + "fmt" + "strings" +) + +// Unified represents a set of edits as a unified diff. +type Unified struct { + // From is the name of the original file. + From string + // To is the name of the modified file. + To string + // Hunks is the set of edit hunks needed to transform the file content. + Hunks []*Hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type Hunk struct { + // The line in the original source where the hunk starts. + FromLine int + // The line in the original source where the hunk finishes. + ToLine int + // The set of line based edits to apply. + Lines []Line +} + +// Line represents a single line operation to apply as part of a Hunk. +type Line struct { + // Kind is the type of line this represents, deletion, insertion or copy. + Kind OpKind + // Content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + Content string +} + +// OpKind is used to denote the type of operation a line represents. +type OpKind int + +const ( + // Delete is the operation kind for a line that is present in the input + // but not in the output. + Delete OpKind = iota + // Insert is the operation kind for a line that is new in the output. + Insert + // Equal is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + Equal +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k OpKind) String() string { + switch k { + case Delete: + return "delete" + case Insert: + return "insert" + case Equal: + return "equal" + default: + panic("unknown operation kind") + } +} + +const ( + edge = 3 + gap = edge * 2 +) + +// ToUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func ToUnified(from, to string, content string, edits []TextEdit) Unified { + u := Unified{ + From: from, + To: to, + } + if len(edits) == 0 { + return u + } + c, edits, partial := prepareEdits(content, edits) + if partial { + edits = lineEdits(content, c, edits) + } + lines := splitLines(content) + var h *Hunk + last := 0 + toLine := 0 + for _, edit := range edits { + start := edit.Span.Start().Line() - 1 + end := edit.Span.End().Line() - 1 + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + toLine += start - last + h = &Hunk{ + FromLine: start + 1, + ToLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-edge, start) + h.FromLine -= delta + h.ToLine -= delta + } + last = start + for i := start; i < end; i++ { + h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]}) + last++ + } + if edit.NewText != "" { + for _, line := range splitLines(edit.NewText) { + h.Lines = append(h.Lines, Line{Kind: Insert, Content: line}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + return u +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *Hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]}) + delta++ + } + return delta +} + +// Format converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u Unified) Format(f fmt.State, r rune) { + if len(u.Hunks) == 0 { + return + } + fmt.Fprintf(f, "--- %s\n", u.From) + fmt.Fprintf(f, "+++ %s\n", u.To) + for _, hunk := range u.Hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fromCount++ + case Insert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(f, "@@") + if fromCount > 1 { + fmt.Fprintf(f, " -%d,%d", hunk.FromLine, fromCount) + } else { + fmt.Fprintf(f, " -%d", hunk.FromLine) + } + if toCount > 1 { + fmt.Fprintf(f, " +%d,%d", hunk.ToLine, toCount) + } else { + fmt.Fprintf(f, " +%d", hunk.ToLine) + } + fmt.Fprint(f, " @@\n") + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fmt.Fprintf(f, "-%s", l.Content) + case Insert: + fmt.Fprintf(f, "+%s", l.Content) + default: + fmt.Fprintf(f, " %s", l.Content) + } + if !strings.HasSuffix(l.Content, "\n") { + fmt.Fprintf(f, "\n\\ No newline at end of file\n") + } + } + } +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/huandu/xstrings/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml deleted file mode 100644 index d6460be411..0000000000 --- a/vendor/github.com/huandu/xstrings/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -v -covermode=count -coverprofile=coverage.out - - 'if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ ! -z "$COVERALLS_TOKEN" ]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi' diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md deleted file mode 100644 index d7b4b8d584..0000000000 --- a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing # - -Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. - -## New API or feature ## - -I want to speak more about how to add new functions to this package. - -Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. - -* Rule 1: Only string algorithm, which takes string as input, can be included. -* Rule 2: If a function has been implemented in package `string`, it must not be included. -* Rule 3: If a function is not language neutral, it must not be included. -* Rule 4: If a function is a part of standard library in other languages, it can be included. -* Rule 5: If a function is quite useful in some famous framework or library, it can be included. - -New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. - -## Pull request ## - -Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. - -If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE deleted file mode 100644 index 2701772593..0000000000 --- a/vendor/github.com/huandu/xstrings/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Huan Du - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md deleted file mode 100644 index 292bf2f39e..0000000000 --- a/vendor/github.com/huandu/xstrings/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# xstrings # - -[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) -[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) -[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) -[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) - -Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). - -All functions are well tested and carefully tuned for performance. - -## Propose a new function ## - -Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. - -## Install ## - -Use `go get` to install this library. - - go get github.com/huandu/xstrings - -## API document ## - -See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. - -## Function list ## - -Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. - -Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. - -### Package `xstrings` functions ### - -*Keep this table sorted by Function in ascending order.* - -| Function | Friends | # | -| -------- | ------- | --- | -| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | -| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | -| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | -| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | -| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | -| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | -| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | -| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | -| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | -| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | -| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | -| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | -| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | -| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | -| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | -| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | -| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | -| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | -| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | - -### Package `strings` functions ### - -*Keep this table sorted by Function in ascending order.* - -| Function | Friends | -| -------- | ------- | -| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | -| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | -| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | -| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | -| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | -| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | -| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | -| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | -| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | -| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | -| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | -| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | -| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | -| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | -| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | -| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | -| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | -| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | -| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | -| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | -| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | -| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | -| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | -| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | -| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | -| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | -| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | -| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | -| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | -| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | -| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | -| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | -| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | -| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | -| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | -| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | -| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | - -## License ## - -This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go deleted file mode 100644 index f427cc84e2..0000000000 --- a/vendor/github.com/huandu/xstrings/common.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -const bufferMaxInitGrowSize = 2048 - -// Lazy initialize a buffer. -func allocBuffer(orig, cur string) *stringBuilder { - output := &stringBuilder{} - maxSize := len(orig) * 4 - - // Avoid to reserve too much memory at once. - if maxSize > bufferMaxInitGrowSize { - maxSize = bufferMaxInitGrowSize - } - - output.Grow(maxSize) - output.WriteString(orig[:len(orig)-len(cur)]) - return output -} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go deleted file mode 100644 index 3d5a34950b..0000000000 --- a/vendor/github.com/huandu/xstrings/convert.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "math/rand" - "unicode" - "unicode/utf8" -) - -// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. -// -// Some samples. -// "some_words" => "SomeWords" -// "http_server" => "HttpServer" -// "no_https" => "NoHttps" -// "_complex__case_" => "_Complex_Case_" -// "some words" => "SomeWords" -func ToCamelCase(str string) string { - if len(str) == 0 { - return "" - } - - buf := &stringBuilder{} - var r0, r1 rune - var size int - - // leading connector will appear in output. - for len(str) > 0 { - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if !isConnector(r0) { - r0 = unicode.ToUpper(r0) - break - } - - buf.WriteRune(r0) - } - - if len(str) == 0 { - // A special case for a string contains only 1 rune. - if size != 0 { - buf.WriteRune(r0) - } - - return buf.String() - } - - for len(str) > 0 { - r1 = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if isConnector(r0) && isConnector(r1) { - buf.WriteRune(r1) - continue - } - - if isConnector(r1) { - r0 = unicode.ToUpper(r0) - } else { - r0 = unicode.ToLower(r0) - buf.WriteRune(r1) - } - } - - buf.WriteRune(r0) - return buf.String() -} - -// ToSnakeCase can convert all upper case characters in a string to -// snake case format. -// -// Some samples. -// "FirstName" => "first_name" -// "HTTPServer" => "http_server" -// "NoHTTPS" => "no_https" -// "GO_PATH" => "go_path" -// "GO PATH" => "go_path" // space is converted to underscore. -// "GO-PATH" => "go_path" // hyphen is converted to underscore. -// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. -// "HTTP20xOK" => "http_20x_ok" -// "Duration2m3s" => "duration_2m3s" -// "Bld4Floor3rd" => "bld4_floor_3rd" -func ToSnakeCase(str string) string { - return camelCaseToLowerCase(str, '_') -} - -// ToKebabCase can convert all upper case characters in a string to -// kebab case format. -// -// Some samples. -// "FirstName" => "first-name" -// "HTTPServer" => "http-server" -// "NoHTTPS" => "no-https" -// "GO_PATH" => "go-path" -// "GO PATH" => "go-path" // space is converted to '-'. -// "GO-PATH" => "go-path" // hyphen is converted to '-'. -// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. -// "HTTP20xOK" => "http-20x-ok" -// "Duration2m3s" => "duration-2m3s" -// "Bld4Floor3rd" => "bld4-floor-3rd" -func ToKebabCase(str string) string { - return camelCaseToLowerCase(str, '-') -} - -func camelCaseToLowerCase(str string, connector rune) string { - if len(str) == 0 { - return "" - } - - buf := &stringBuilder{} - wt, word, remaining := nextWord(str) - - for len(remaining) > 0 { - if wt != connectorWord { - toLower(buf, wt, word, connector) - } - - prev := wt - last := word - wt, word, remaining = nextWord(remaining) - - switch prev { - case numberWord: - for wt == alphabetWord || wt == numberWord { - toLower(buf, wt, word, connector) - wt, word, remaining = nextWord(remaining) - } - - if wt != invalidWord && wt != punctWord { - buf.WriteRune(connector) - } - - case connectorWord: - toLower(buf, prev, last, connector) - - case punctWord: - // nothing. - - default: - if wt != numberWord { - if wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - - break - } - - if len(remaining) == 0 { - break - } - - last := word - wt, word, remaining = nextWord(remaining) - - // consider number as a part of previous word. - // e.g. "Bld4Floor" => "bld4_floor" - if wt != alphabetWord { - toLower(buf, numberWord, last, connector) - - if wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - - break - } - - // if there are some lower case letters following a number, - // add connector before the number. - // e.g. "HTTP2xx" => "http_2xx" - buf.WriteRune(connector) - toLower(buf, numberWord, last, connector) - - for wt == alphabetWord || wt == numberWord { - toLower(buf, wt, word, connector) - wt, word, remaining = nextWord(remaining) - } - - if wt != invalidWord && wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - } - } - - toLower(buf, wt, word, connector) - return buf.String() -} - -func isConnector(r rune) bool { - return r == '-' || r == '_' || unicode.IsSpace(r) -} - -type wordType int - -const ( - invalidWord wordType = iota - numberWord - upperCaseWord - alphabetWord - connectorWord - punctWord - otherWord -) - -func nextWord(str string) (wt wordType, word, remaining string) { - if len(str) == 0 { - return - } - - var offset int - remaining = str - r, size := nextValidRune(remaining, utf8.RuneError) - offset += size - - if r == utf8.RuneError { - wt = invalidWord - word = str[:offset] - remaining = str[offset:] - return - } - - switch { - case isConnector(r): - wt = connectorWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isConnector(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsPunct(r): - wt = punctWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsPunct(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsUpper(r): - wt = upperCaseWord - remaining = remaining[size:] - - if len(remaining) == 0 { - break - } - - r, size = nextValidRune(remaining, r) - - switch { - case unicode.IsUpper(r): - prevSize := size - offset += size - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsUpper(r) { - break - } - - prevSize = size - offset += size - remaining = remaining[size:] - } - - // it's a bit complex when dealing with a case like "HTTPStatus". - // it's expected to be splitted into "HTTP" and "Status". - // Therefore "S" should be in remaining instead of word. - if len(remaining) > 0 && isAlphabet(r) { - offset -= prevSize - remaining = str[offset:] - } - - case isAlphabet(r): - offset += size - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isAlphabet(r) || unicode.IsUpper(r) { - break - } - - offset += size - remaining = remaining[size:] - } - } - - case isAlphabet(r): - wt = alphabetWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isAlphabet(r) || unicode.IsUpper(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsNumber(r): - wt = numberWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsNumber(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - default: - wt = otherWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { - break - } - - offset += size - remaining = remaining[size:] - } - } - - word = str[:offset] - return -} - -func nextValidRune(str string, prev rune) (r rune, size int) { - var sz int - - for len(str) > 0 { - r, sz = utf8.DecodeRuneInString(str) - size += sz - - if r != utf8.RuneError { - return - } - - str = str[sz:] - } - - r = prev - return -} - -func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { - buf.Grow(buf.Len() + len(str)) - - if wt != upperCaseWord && wt != connectorWord { - buf.WriteString(str) - return - } - - for len(str) > 0 { - r, size := utf8.DecodeRuneInString(str) - str = str[size:] - - if isConnector(r) { - buf.WriteRune(connector) - } else if unicode.IsUpper(r) { - buf.WriteRune(unicode.ToLower(r)) - } else { - buf.WriteRune(r) - } - } -} - -// SwapCase will swap characters case from upper to lower or lower to upper. -func SwapCase(str string) string { - var r rune - var size int - - buf := &stringBuilder{} - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case unicode.IsUpper(r): - buf.WriteRune(unicode.ToLower(r)) - - case unicode.IsLower(r): - buf.WriteRune(unicode.ToUpper(r)) - - default: - buf.WriteRune(r) - } - - str = str[size:] - } - - return buf.String() -} - -// FirstRuneToUpper converts first rune to upper case if necessary. -func FirstRuneToUpper(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsLower(r) { - return str - } - - buf := &stringBuilder{} - buf.WriteRune(unicode.ToUpper(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// FirstRuneToLower converts first rune to lower case if necessary. -func FirstRuneToLower(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsUpper(r) { - return str - } - - buf := &stringBuilder{} - buf.WriteRune(unicode.ToLower(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// Shuffle randomizes runes in a string and returns the result. -// It uses default random source in `math/rand`. -func Shuffle(str string) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - - for i := len(runes) - 1; i > 0; i-- { - index = rand.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// ShuffleSource randomizes runes in a string with given random source. -func ShuffleSource(str string, src rand.Source) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - r := rand.New(src) - - for i := len(runes) - 1; i > 0; i-- { - index = r.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// Successor returns the successor to string. -// -// If there is one alphanumeric rune is found in string, increase the rune by 1. -// If increment generates a "carry", the rune to the left of it is incremented. -// This process repeats until there is no carry, adding an additional rune if necessary. -// -// If there is no alphanumeric rune, the rightmost rune will be increased by 1 -// regardless whether the result is a valid rune or not. -// -// Only following characters are alphanumeric. -// * a - z -// * A - Z -// * 0 - 9 -// -// Samples (borrowed from ruby's String#succ document): -// "abcd" => "abce" -// "THX1138" => "THX1139" -// "<>" => "<>" -// "1999zzz" => "2000aaa" -// "ZZZ9999" => "AAAA0000" -// "***" => "**+" -func Successor(str string) string { - if str == "" { - return str - } - - var r rune - var i int - carry := ' ' - runes := []rune(str) - l := len(runes) - lastAlphanumeric := l - - for i = l - 1; i >= 0; i-- { - r = runes[i] - - if ('a' <= r && r <= 'y') || - ('A' <= r && r <= 'Y') || - ('0' <= r && r <= '8') { - runes[i]++ - carry = ' ' - lastAlphanumeric = i - break - } - - switch r { - case 'z': - runes[i] = 'a' - carry = 'a' - lastAlphanumeric = i - - case 'Z': - runes[i] = 'A' - carry = 'A' - lastAlphanumeric = i - - case '9': - runes[i] = '0' - carry = '0' - lastAlphanumeric = i - } - } - - // Needs to add one character for carry. - if i < 0 && carry != ' ' { - buf := &stringBuilder{} - buf.Grow(l + 4) // Reserve enough space for write. - - if lastAlphanumeric != 0 { - buf.WriteString(str[:lastAlphanumeric]) - } - - buf.WriteRune(carry) - - for _, r = range runes[lastAlphanumeric:] { - buf.WriteRune(r) - } - - return buf.String() - } - - // No alphanumeric character. Simply increase last rune's value. - if lastAlphanumeric == l { - runes[l-1]++ - } - - return string(runes) -} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go deleted file mode 100644 index f96e38703a..0000000000 --- a/vendor/github.com/huandu/xstrings/count.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -// Len returns str's utf8 rune length. -func Len(str string) int { - return utf8.RuneCountInString(str) -} - -// WordCount returns number of words in a string. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordCount(str string) int { - var r rune - var size, n int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - n++ - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - inWord = false - } - - str = str[size:] - } - - return n -} - -const minCJKCharacter = '\u3400' - -// Checks r is a letter but not CJK character. -func isAlphabet(r rune) bool { - if !unicode.IsLetter(r) { - return false - } - - switch { - // Quick check for non-CJK character. - case r < minCJKCharacter: - return true - - // Common CJK characters. - case r >= '\u4E00' && r <= '\u9FCC': - return false - - // Rare CJK characters. - case r >= '\u3400' && r <= '\u4D85': - return false - - // Rare and historic CJK characters. - case r >= '\U00020000' && r <= '\U0002B81D': - return false - } - - return true -} - -// Width returns string width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func Width(str string) int { - var r rune - var size, n int - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - n += RuneWidth(r) - str = str[size:] - } - - return n -} - -// RuneWidth returns character width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func RuneWidth(r rune) int { - switch { - case r == utf8.RuneError || r < '\x20': - return 0 - - case '\x20' <= r && r < '\u2000': - return 1 - - case '\u2000' <= r && r < '\uFF61': - return 2 - - case '\uFF61' <= r && r < '\uFFA0': - return 1 - - case '\uFFA0' <= r: - return 2 - } - - return 0 -} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go deleted file mode 100644 index 1a6ef069f6..0000000000 --- a/vendor/github.com/huandu/xstrings/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. -// See project home page for details. https://github.com/huandu/xstrings -// -// Package xstrings assumes all strings are encoded in utf8. -package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go deleted file mode 100644 index 8cd76c525c..0000000000 --- a/vendor/github.com/huandu/xstrings/format.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode/utf8" -) - -// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on -// current column and tabSize. -// The column number is reset to zero after each newline ('\n') occurring in the str. -// -// ExpandTabs uses RuneWidth to decide rune's width. -// For example, CJK characters will be treated as two characters. -// -// If tabSize <= 0, ExpandTabs panics with error. -// -// Samples: -// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" -// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" -// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" -func ExpandTabs(str string, tabSize int) string { - if tabSize <= 0 { - panic("tab size must be positive") - } - - var r rune - var i, size, column, expand int - var output *stringBuilder - - orig := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == '\t' { - expand = tabSize - column%tabSize - - if output == nil { - output = allocBuffer(orig, str) - } - - for i = 0; i < expand; i++ { - output.WriteRune(' ') - } - - column += expand - } else { - if r == '\n' { - column = 0 - } else { - column += RuneWidth(r) - } - - if output != nil { - output.WriteRune(r) - } - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} - -// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// LeftJustify("hello", 4, " ") => "hello" -// LeftJustify("hello", 10, " ") => "hello " -// LeftJustify("hello", 10, "123") => "hello12312" -func LeftJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - output.WriteString(str) - writePadString(output, pad, padLen, remains) - return output.String() -} - -// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// RightJustify("hello", 4, " ") => "hello" -// RightJustify("hello", 10, " ") => " hello" -// RightJustify("hello", 10, "123") => "12312hello" -func RightJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains) - output.WriteString(str) - return output.String() -} - -// Center returns a string with pad string at both side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// Center("hello", 4, " ") => "hello" -// Center("hello", 10, " ") => " hello " -// Center("hello", 10, "123") => "12hello123" -func Center(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains/2) - output.WriteString(str) - writePadString(output, pad, padLen, (remains+1)/2) - return output.String() -} - -func writePadString(output *stringBuilder, pad string, padLen, remains int) { - var r rune - var size int - - repeats := remains / padLen - - for i := 0; i < repeats; i++ { - output.WriteString(pad) - } - - remains = remains % padLen - - if remains != 0 { - for i := 0; i < remains; i++ { - r, size = utf8.DecodeRuneInString(pad) - output.WriteRune(r) - pad = pad[size:] - } - } -} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go deleted file mode 100644 index 64075f9bb8..0000000000 --- a/vendor/github.com/huandu/xstrings/manipulate.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "strings" - "unicode/utf8" -) - -// Reverse a utf8 encoded string. -func Reverse(str string) string { - var size int - - tail := len(str) - buf := make([]byte, tail) - s := buf - - for len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - tail -= size - s = append(s[:tail], []byte(str[:size])...) - str = str[size:] - } - - return string(buf) -} - -// Slice a string by rune. -// -// Start must satisfy 0 <= start <= rune length. -// -// End can be positive, zero or negative. -// If end >= 0, start and end must satisfy start <= end <= rune length. -// If end < 0, it means slice to the end of string. -// -// Otherwise, Slice will panic as out of range. -func Slice(str string, start, end int) string { - var size, startPos, endPos int - - origin := str - - if start < 0 || end > len(str) || (end >= 0 && start > end) { - panic("out of range") - } - - if end >= 0 { - end -= start - } - - for start > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - start-- - startPos += size - str = str[size:] - } - - if end < 0 { - return origin[startPos:] - } - - endPos = startPos - - for end > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - end-- - endPos += size - str = str[size:] - } - - if len(str) == 0 && (start > 0 || end > 0) { - panic("out of range") - } - - return origin[startPos:endPos] -} - -// Partition splits a string by sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", Partition returns -// "he", "l", "lo" -// -// If str doesn't contain sep, for example "hello" and "x", Partition returns -// "hello", "", "" -func Partition(str, sep string) (head, match, tail string) { - index := strings.Index(str, sep) - - if index == -1 { - head = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// LastPartition splits a string by last instance of sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", LastPartition returns -// "hel", "l", "o" -// -// If str doesn't contain sep, for example "hello" and "x", LastPartition returns -// "", "", "hello" -func LastPartition(str, sep string) (head, match, tail string) { - index := strings.LastIndex(str, sep) - - if index == -1 { - tail = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// Insert src into dst at given rune index. -// Index is counted by runes instead of bytes. -// -// If index is out of range of dst, panic with out of range. -func Insert(dst, src string, index int) string { - return Slice(dst, 0, index) + src + Slice(dst, index, -1) -} - -// Scrub scrubs invalid utf8 bytes with repl string. -// Adjacent invalid bytes are replaced only once. -func Scrub(str, repl string) string { - var buf *stringBuilder - var r rune - var size, pos int - var hasError bool - - origin := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == utf8.RuneError { - if !hasError { - if buf == nil { - buf = &stringBuilder{} - } - - buf.WriteString(origin[:pos]) - hasError = true - } - } else if hasError { - hasError = false - buf.WriteString(repl) - - origin = origin[pos:] - pos = 0 - } - - pos += size - str = str[size:] - } - - if buf != nil { - buf.WriteString(origin) - return buf.String() - } - - // No invalid byte. - return origin -} - -// WordSplit splits a string into words. Returns a slice of words. -// If there is no word in a string, return nil. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordSplit(str string) []string { - var word string - var words []string - var r rune - var size, pos int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - word = str - pos = 0 - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - if inWord { - inWord = false - words = append(words, word[:pos]) - } - } - - pos += size - str = str[size:] - } - - if inWord { - words = append(words, word[:pos]) - } - - return words -} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go deleted file mode 100644 index bb0919d32f..0000000000 --- a/vendor/github.com/huandu/xstrings/stringbuilder.go +++ /dev/null @@ -1,7 +0,0 @@ -//+build go1.10 - -package xstrings - -import "strings" - -type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go deleted file mode 100644 index dac389d139..0000000000 --- a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.10 - -package xstrings - -import "bytes" - -type stringBuilder struct { - bytes.Buffer -} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go deleted file mode 100644 index 42e694fb17..0000000000 --- a/vendor/github.com/huandu/xstrings/translate.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -type runeRangeMap struct { - FromLo rune // Lower bound of range map. - FromHi rune // An inclusive higher bound of range map. - ToLo rune - ToHi rune -} - -type runeDict struct { - Dict [unicode.MaxASCII + 1]rune -} - -type runeMap map[rune]rune - -// Translator can translate string with pre-compiled from and to patterns. -// If a from/to pattern pair needs to be used more than once, it's recommended -// to create a Translator and reuse it. -type Translator struct { - quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. - runeMap runeMap // Rune map for translation. - ranges []*runeRangeMap // Ranges of runes. - mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. - reverted bool // If to pattern is empty, all matched characters will be deleted. - hasPattern bool -} - -// NewTranslator creates new Translator through a from/to pattern pair. -func NewTranslator(from, to string) *Translator { - tr := &Translator{} - - if from == "" { - return tr - } - - reverted := from[0] == '^' - deletion := len(to) == 0 - - if reverted { - from = from[1:] - } - - var fromStart, fromEnd, fromRangeStep rune - var toStart, toEnd, toRangeStep rune - var fromRangeSize, toRangeSize rune - var singleRunes []rune - - // Update the to rune range. - updateRange := func() { - // No more rune to read in the to rune pattern. - if toEnd == utf8.RuneError { - return - } - - if toRangeStep == 0 { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) - return - } - - // Current range is not empty. Consume 1 rune from start. - if toStart != toEnd { - toStart += toRangeStep - return - } - - // No more rune. Repeat the last rune. - if to == "" { - toEnd = utf8.RuneError - return - } - - // Both start and end are used. Read two more runes from the to pattern. - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - - if deletion { - toStart = utf8.RuneError - toEnd = utf8.RuneError - } else { - // If from pattern is reverted, only the last rune in the to pattern will be used. - if reverted { - var size int - - for len(to) > 0 { - toStart, size = utf8.DecodeRuneInString(to) - to = to[size:] - } - - toEnd = utf8.RuneError - } else { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - } - - fromEnd = utf8.RuneError - - for len(from) > 0 { - from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) - - // fromStart is a single character. Just map it with a rune in the to pattern. - if fromRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - continue - } - - for toEnd != utf8.RuneError && fromStart != fromEnd { - // If mapped rune is a single character instead of a range, simply shift first - // rune in the range. - if toRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - fromStart += fromRangeStep - continue - } - - fromRangeSize = (fromEnd - fromStart) * fromRangeStep - toRangeSize = (toEnd - toStart) * toRangeStep - - // Not enough runes in the to pattern. Need to read more. - if fromRangeSize > toRangeSize { - fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) - fromStart += fromRangeStep - updateRange() - - // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered - // as a single rune. - if fromStart == fromEnd { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - } - - continue - } - - fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) - updateRange() - break - } - - if fromStart == fromEnd { - fromEnd = utf8.RuneError - continue - } - - _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) - fromEnd = utf8.RuneError - } - - if fromEnd != utf8.RuneError { - tr.addRune(fromEnd, toStart, singleRunes) - } - - tr.reverted = reverted - tr.mappedRune = -1 - tr.hasPattern = true - - // Translate RuneError only if in deletion or reverted mode. - if deletion || reverted { - tr.mappedRune = toStart - } - - return tr -} - -func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { - if from <= unicode.MaxASCII { - if tr.quickDict == nil { - tr.quickDict = &runeDict{} - } - - tr.quickDict.Dict[from] = to - } else { - if tr.runeMap == nil { - tr.runeMap = make(runeMap) - } - - tr.runeMap[from] = to - } - - singleRunes = append(singleRunes, from) - return singleRunes -} - -func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { - var r rune - var rrm *runeRangeMap - - if fromLo < fromHi { - rrm = &runeRangeMap{ - FromLo: fromLo, - FromHi: fromHi, - ToLo: toLo, - ToHi: toHi, - } - } else { - rrm = &runeRangeMap{ - FromLo: fromHi, - FromHi: fromLo, - ToLo: toHi, - ToHi: toLo, - } - } - - // If there is any single rune conflicts with this rune range, clear single rune record. - for _, r = range singleRunes { - if rrm.FromLo <= r && r <= rrm.FromHi { - if r <= unicode.MaxASCII { - tr.quickDict.Dict[r] = 0 - } else { - delete(tr.runeMap, r) - } - } - } - - tr.ranges = append(tr.ranges, rrm) - return fromHi, toHi -} - -func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { - var r rune - var size int - - remaining = str - escaping := false - isRange := false - - for len(remaining) > 0 { - r, size = utf8.DecodeRuneInString(remaining) - remaining = remaining[size:] - - // Parse special characters. - if !escaping { - if r == '\\' { - escaping = true - continue - } - - if r == '-' { - // Ignore slash at beginning of string. - if last == utf8.RuneError { - continue - } - - start = last - isRange = true - continue - } - } - - escaping = false - - if last != utf8.RuneError { - // This is a range which start and end are the same. - // Considier it as a normal character. - if isRange && last == r { - isRange = false - continue - } - - start = last - end = r - - if isRange { - if start < end { - rangeStep = 1 - } else { - rangeStep = -1 - } - } - - return - } - - last = r - } - - start = last - end = utf8.RuneError - return -} - -// Translate str with a from/to pattern pair. -// -// See comment in Translate function for usage and samples. -func (tr *Translator) Translate(str string) string { - if !tr.hasPattern || str == "" { - return str - } - - var r rune - var size int - var needTr bool - - orig := str - - var output *stringBuilder - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - r, needTr = tr.TranslateRune(r) - - if needTr && output == nil { - output = allocBuffer(orig, str) - } - - if r != utf8.RuneError && output != nil { - output.WriteRune(r) - } - - str = str[size:] - } - - // No character is translated. - if output == nil { - return orig - } - - return output.String() -} - -// TranslateRune return translated rune and true if r matches the from pattern. -// If r doesn't match the pattern, original r is returned and translated is false. -func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { - switch { - case tr.quickDict != nil: - if r <= unicode.MaxASCII { - result = tr.quickDict.Dict[r] - - if result != 0 { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - } - - fallthrough - - case tr.runeMap != nil: - var ok bool - - if result, ok = tr.runeMap[r]; ok { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - - fallthrough - - default: - var rrm *runeRangeMap - ranges := tr.ranges - - for i := len(ranges) - 1; i >= 0; i-- { - rrm = ranges[i] - - if rrm.FromLo <= r && r <= rrm.FromHi { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - break - } - - if rrm.ToLo < rrm.ToHi { - result = rrm.ToLo + r - rrm.FromLo - } else if rrm.ToLo > rrm.ToHi { - // ToHi can be smaller than ToLo if range is from higher to lower. - result = rrm.ToLo - r + rrm.FromLo - } else { - result = rrm.ToLo - } - - break - } - } - } - - if tr.reverted { - if !translated { - result = tr.mappedRune - } - - translated = !translated - } - - if !translated { - result = r - } - - return -} - -// HasPattern returns true if Translator has one pattern at least. -func (tr *Translator) HasPattern() bool { - return tr.hasPattern -} - -// Translate str with the characters defined in from replaced by characters defined in to. -// -// From and to are patterns representing a set of characters. Pattern is defined as following. -// -// * Special characters -// * '-' means a range of runes, e.g. -// * "a-z" means all characters from 'a' to 'z' inclusive; -// * "z-a" means all characters from 'z' to 'a' inclusive. -// * '^' as first character means a set of all runes excepted listed, e.g. -// * "^a-z" means all characters except 'a' to 'z' inclusive. -// * '\' escapes special characters. -// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. -// -// Translate will try to find a 1:1 mapping from from to to. -// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. -// -// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. -// -// If the to pattern is an empty string, Translate works exactly the same as Delete. -// -// Samples: -// Translate("hello", "aeiou", "12345") => "h2ll4" -// Translate("hello", "a-z", "A-Z") => "HELLO" -// Translate("hello", "z-a", "a-z") => "svool" -// Translate("hello", "aeiou", "*") => "h*ll*" -// Translate("hello", "^l", "*") => "**ll*" -// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" -func Translate(str, from, to string) string { - tr := NewTranslator(from, to) - return tr.Translate(str) -} - -// Delete runes in str matching the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// Delete("hello", "aeiou") => "hll" -// Delete("hello", "a-k") => "llo" -// Delete("hello", "^a-k") => "he" -func Delete(str, pattern string) string { - tr := NewTranslator(pattern, "") - return tr.Translate(str) -} - -// Count how many runes in str match the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// Count("hello", "aeiou") => 3 -// Count("hello", "a-k") => 3 -// Count("hello", "^a-k") => 2 -func Count(str, pattern string) int { - if pattern == "" || str == "" { - return 0 - } - - var r rune - var size int - var matched bool - - tr := NewTranslator(pattern, "") - cnt := 0 - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if _, matched = tr.TranslateRune(r); matched { - cnt++ - } - } - - return cnt -} - -// Squeeze deletes adjacent repeated runes in str. -// If pattern is not empty, only runes matching the pattern will be squeezed. -// -// Samples: -// Squeeze("hello", "") => "helo" -// Squeeze("hello", "m-z") => "hello" -// Squeeze("hello world", " ") => "hello world" -func Squeeze(str, pattern string) string { - var last, r rune - var size int - var skipSqueeze, matched bool - var tr *Translator - var output *stringBuilder - - orig := str - last = -1 - - if len(pattern) > 0 { - tr = NewTranslator(pattern, "") - } - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - // Need to squeeze the str. - if last == r && !skipSqueeze { - if tr != nil { - if _, matched = tr.TranslateRune(r); !matched { - skipSqueeze = true - } - } - - if output == nil { - output = allocBuffer(orig, str) - } - - if skipSqueeze { - output.WriteRune(r) - } - } else { - if output != nil { - output.WriteRune(r) - } - - last = r - skipSqueeze = false - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af85..0000000000 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412ba..0000000000 --- a/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba4..0000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907a..0000000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298d..0000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index aa8cbd7ce6..0000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,247 +0,0 @@ -# Mergo - - -[![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f995..0000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index a13a7ee46c..0000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 8c2a8fcd90..0000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 3cc926c7f6..0000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go index 9d2d8a4bab..06a91f0868 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go index 336142a5e3..0c56880216 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -1,81 +1,32 @@ -// +build windows -// +build !go1.4 - package mousetrap import ( - "fmt" - "os" "syscall" "unsafe" ) -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err } - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err } } } -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - // StartedByExplorer returns true if the program was invoked by the user double-clicking // on the executable from explorer.exe // @@ -83,16 +34,9 @@ func getppid() (pid int, err error) { // It does not guarantee that the program was run from a terminal. It only can tell you // whether it was launched from explorer.exe func StartedByExplorer() bool { - ppid, err := getppid() + pe, err := getProcessEntry(syscall.Getppid()) if err != nil { return false } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) } diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3c..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/jgautheron/goconst/.gitignore b/vendor/github.com/jgautheron/goconst/.gitignore new file mode 100644 index 0000000000..a9d34d9c41 --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/.gitignore @@ -0,0 +1,2 @@ +.idea +goconst \ No newline at end of file diff --git a/vendor/github.com/jgautheron/goconst/README.md b/vendor/github.com/jgautheron/goconst/README.md index 8dd093baf0..c671eb5412 100644 --- a/vendor/github.com/jgautheron/goconst/README.md +++ b/vendor/github.com/jgautheron/goconst/README.md @@ -23,6 +23,7 @@ Usage: Flags: -ignore exclude files matching the given regular expression + -ignore-strings exclude strings matching the given regular expression -ignore-tests exclude tests from the search (default: true) -min-occurrences report from how many occurrences (default: 2) -min-length only report strings with the minimum given length (default: 3) diff --git a/vendor/github.com/jgautheron/goconst/api.go b/vendor/github.com/jgautheron/goconst/api.go index d56fcd6c25..b838e035f6 100644 --- a/vendor/github.com/jgautheron/goconst/api.go +++ b/vendor/github.com/jgautheron/goconst/api.go @@ -14,6 +14,7 @@ type Issue struct { } type Config struct { + IgnoreStrings string IgnoreTests bool MatchWithConstants bool MinStringLength int @@ -28,6 +29,7 @@ func Run(files []*ast.File, fset *token.FileSet, cfg *Config) ([]Issue, error) { p := New( "", "", + cfg.IgnoreStrings, cfg.IgnoreTests, cfg.MatchWithConstants, cfg.ParseNumbers, diff --git a/vendor/github.com/jgautheron/goconst/parser.go b/vendor/github.com/jgautheron/goconst/parser.go index 2ed7a9a909..2f32740b96 100644 --- a/vendor/github.com/jgautheron/goconst/parser.go +++ b/vendor/github.com/jgautheron/goconst/parser.go @@ -24,11 +24,11 @@ const ( type Parser struct { // Meant to be passed via New() - path, ignore string - ignoreTests, matchConstant bool - minLength, minOccurrences int - numberMin, numberMax int - excludeTypes map[Type]bool + path, ignore, ignoreStrings string + ignoreTests, matchConstant bool + minLength, minOccurrences int + numberMin, numberMax int + excludeTypes map[Type]bool supportedTokens []token.Token @@ -39,7 +39,7 @@ type Parser struct { // New creates a new instance of the parser. // This is your entry point if you'd like to use goconst as an API. -func New(path, ignore string, ignoreTests, matchConstant, numbers bool, numberMin, numberMax, minLength, minOccurrences int, excludeTypes map[Type]bool) *Parser { +func New(path, ignore, ignoreStrings string, ignoreTests, matchConstant, numbers bool, numberMin, numberMax, minLength, minOccurrences int, excludeTypes map[Type]bool) *Parser { supportedTokens := []token.Token{token.STRING} if numbers { supportedTokens = append(supportedTokens, token.INT, token.FLOAT) @@ -48,6 +48,7 @@ func New(path, ignore string, ignoreTests, matchConstant, numbers bool, numberMi return &Parser{ path: path, ignore: ignore, + ignoreStrings: ignoreStrings, ignoreTests: ignoreTests, matchConstant: matchConstant, minLength: minLength, @@ -98,12 +99,22 @@ func (p *Parser) ProcessResults() { delete(p.strs, str) } + if p.ignoreStrings != "" { + match, err := regexp.MatchString(p.ignoreStrings, str) + if err != nil { + log.Println(err) + } + if match { + delete(p.strs, str) + } + } + // If the value is a number - if i, err := strconv.Atoi(str); err == nil { - if p.numberMin != 0 && i < p.numberMin { + if i, err := strconv.ParseInt(str, 0, 0); err == nil { + if p.numberMin != 0 && i < int64(p.numberMin) { delete(p.strs, str) } - if p.numberMax != 0 && i > p.numberMax { + if p.numberMax != 0 && i > int64(p.numberMax) { delete(p.strs, str) } } diff --git a/vendor/github.com/jgautheron/goconst/visitor.go b/vendor/github.com/jgautheron/goconst/visitor.go index c0974da8fd..a553814f5c 100644 --- a/vendor/github.com/jgautheron/goconst/visitor.go +++ b/vendor/github.com/jgautheron/goconst/visitor.go @@ -62,10 +62,6 @@ func (v *treeVisitor) Visit(node ast.Node) ast.Visitor { // if foo == "moo" case *ast.BinaryExpr: - if t.Op != token.EQL && t.Op != token.NEQ { - return v - } - var lit *ast.BasicLit var ok bool diff --git a/vendor/github.com/jjti/go-spancheck/.gitignore b/vendor/github.com/jjti/go-spancheck/.gitignore new file mode 100644 index 0000000000..1f83be414c --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/.gitignore @@ -0,0 +1,19 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +src/ diff --git a/vendor/github.com/jjti/go-spancheck/.golangci.yml b/vendor/github.com/jjti/go-spancheck/.golangci.yml new file mode 100644 index 0000000000..15d8513d68 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/.golangci.yml @@ -0,0 +1,103 @@ +## A good ref for this: https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322 + +run: + timeout: 5m + tests: true +linters: + enable: + - asasalint # checks for pass []any as any in variadic func(...any) + - asciicheck # checks that your code does not contain non-ASCII identifiers + - bidichk # checks for dangerous unicode character sequences + - bodyclose + - containedctx + - decorder # checks declaration order and count of types, constants, variables and functions + - dogsled + - dupword # checks for duplicate words in the source code + - durationcheck # checks for two durations multiplied together + - errcheck + - errname + - errorlint + - exhaustive # checks exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - gci + - gochecknoinits # checks that no init functions are present in Go code + - gocritic + - gomnd + - gosimple + - govet + - importas # enforces consistent import aliases + - ineffassign + - loggercheck + - makezero # finds slice declarations with non-zero initial length + - mirror + - misspell + - musttag # enforces field tags in (un)marshaled structs + - nakedret + - nestif # reports deeply nested if statements + - nilerr # finds the code that returns nil even if it checks that the error is not nil + - noctx # finds sending http request without context.Context + - nolintlint # reports ill-formed or insufficient nolint directives + - predeclared # finds code that shadows one of Go's predeclared identifiers + - promlinter + - reassign # checks that package variables are not reassigned + - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint + - staticcheck + - stylecheck + - tenv + - thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - unconvert # removes unnecessary type conversions + - unparam # reports unused function parameters + - unused + - usestdlibvars # detects the possibility to use variables/constants from the Go standard library + - wastedassign # finds wasted assignment statements + - whitespace # detects leading and trailing whitespace +linters-settings: + gci: + skip-generated: true + custom-order: true + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/jjti) + exhaustive: + # Program elements to check for exhaustiveness. + # Default: [ switch ] + check: + - switch + - map + gocritic: + settings: + captLocal: + # Whether to restrict checker to params only. + # Default: true + paramsOnly: false + underef: + # Whether to skip (*x).method() calls where x is a pointer receiver. + # Default: true + skipRecvDeref: false + govet: + enable-all: true + disable: + - fieldalignment # too strict + - shadow # bunch of false positive, doesn't realize when we return from a func + misspell: + locale: US + nakedret: + max-func-lines: 0 + nestif: + # Minimal complexity of if statements to report. + # Default: 5 + min-complexity: 4 + nolintlint: + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + stylecheck: + checks: ["all"] +issues: + include: + - EXC0001 # Error return value of x is not checked + - EXC0013 # package comment should be of the form "(.+)... + - EXC0014 # comment on exported (.+) should be of the form "(.+)..." + exclude: + - ifElseChain diff --git a/vendor/github.com/jjti/go-spancheck/CONTRIBUTING.md b/vendor/github.com/jjti/go-spancheck/CONTRIBUTING.md new file mode 100644 index 0000000000..32932fae10 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/CONTRIBUTING.md @@ -0,0 +1,51 @@ +# Contributing guideline + +Contributions are welcome + appreciated. + +## Open Requests + +These are a couple contributions I would especially appreciate: + +1. Add check for SetAttributes: https://github.com/jjti/go-spancheck/issues/1 +1. Add SuggestedFix(es): https://github.com/jjti/go-spancheck/issues/2 + +## Steps + +### 1. Create an Issue + +If one does not exist already, open a bug report or feature request in [https://github.com/jjti/go-spancheck/issues](https://github.com/jjti/go-spancheck/issues). + +### 2. Add a test case + +Test cases are in `/testdata`. + +If fixing a bug, you can add it to `testdata/enableall/enable_all.go` (for example): + +```go +func _() { + ctx, span := otel.Tracer("foo").Start(context.Background(), "bar") // want "span.End is not called on all paths, possible memory leak" + print(ctx.Done(), span.IsRecording()) +} // want "return can be reached without calling span.End" +``` + +If adding a new feature with a new combination of flags, create a new module within `testdata`: + +1. Create a new module, eg `testdata/setattributes` +1. Copy/paste go.mod/sum into the new module directory and update the module definition, eg `module github.com/jjti/go-spancheck/testdata/setattributes` +1. Add the module to the workspace in [go.work](./go.work) +1. Add the module's directory to the `testvendor` Make target in [Makefile](./Makefile) + +### 3. Run tests + +```bash +make test +``` + +### 4. Open a PR + +Eg of a GitHub snippet for PRs: + +```bash +alias gpr='gh pr view --web 2>/dev/null || gh pr create --web --fill' +gpr +``` diff --git a/vendor/github.com/jjti/go-spancheck/LICENSE b/vendor/github.com/jjti/go-spancheck/LICENSE new file mode 100644 index 0000000000..552ddf2dc5 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Joshua Timmons + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jjti/go-spancheck/Makefile b/vendor/github.com/jjti/go-spancheck/Makefile new file mode 100644 index 0000000000..39d80f7c61 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/Makefile @@ -0,0 +1,27 @@ +.PHONY: fmt +fmt: + golangci-lint run --fix --config ./.golangci.yml + +.PHONY: test +test: testvendor + go test -v ./... + +# note: I'm copying https://github.com/ghostiam/protogetter/blob/main/testdata/Makefile +# +# x/tools/go/analysis/analysistest does not support go modules. To work around this issue +# we need to vendor any external modules to `./src`. +# +# Follow https://github.com/golang/go/issues/37054 for more details. +.PHONY: testvendor +testvendor: + @rm -rf base/src + @cd testdata/base && go mod vendor + @cp -r testdata/base/vendor testdata/base/src + @cp -r testdata/base/vendor testdata/disableerrorchecks/src + @cp -r testdata/base/vendor testdata/enableall/src + @rm -rf testdata/base/vendor + +.PHONY: install +install: + go install ./cmd/spancheck + @echo "Installed in $(shell which spancheck)" \ No newline at end of file diff --git a/vendor/github.com/jjti/go-spancheck/README.md b/vendor/github.com/jjti/go-spancheck/README.md new file mode 100644 index 0000000000..953489d7a3 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/README.md @@ -0,0 +1,241 @@ +# go-spancheck + +![Latest release](https://img.shields.io/github/v/release/jjti/go-spancheck) +[![ci](https://github.com/jjti/go-spancheck/actions/workflows/ci.yaml/badge.svg)](https://github.com/jjti/go-spancheck/actions/workflows/ci.yaml) +[![Go Report Card](https://goreportcard.com/badge/github.com/jjti/go-spancheck)](https://goreportcard.com/report/github.com/jjti/go-spancheck) +[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) + +Checks usage of: + +- [OpenTelemetry spans](https://opentelemetry.io/docs/instrumentation/go/manual/) from [go.opentelemetry.io/otel/trace](go.opentelemetry.io/otel/trace) +- [OpenCensus spans](https://opencensus.io/quickstart/go/tracing/) from [go.opencensus.io/trace](https://pkg.go.dev/go.opencensus.io/trace#Span) + +## Example + +```bash +spancheck -checks 'end,set-status,record-error' ./... +``` + +```go +func _() error { + // span.End is not called on all paths, possible memory leak + // span.SetStatus is not called on all paths + // span.RecordError is not called on all paths + _, span := otel.Tracer("foo").Start(context.Background(), "bar") + + if true { + // return can be reached without calling span.End + // return can be reached without calling span.SetStatus + // return can be reached without calling span.RecordError + return errors.New("err") + } + + return nil // return can be reached without calling span.End +} +``` + +## Configuration + +### golangci-lint + +Docs on configuring the linter are also available at [https://golangci-lint.run/usage/linters/#spancheck](https://golangci-lint.run/usage/linters/#spancheck): + +```yaml +linters: + enable: + - spancheck + +linters-settings: + spancheck: + # Checks to enable. + # Options include: + # - `end`: check that `span.End()` is called + # - `record-error`: check that `span.RecordError(err)` is called when an error is returned + # - `set-status`: check that `span.SetStatus(codes.Error, msg)` is called when an error is returned + # Default: ["end"] + checks: + - end + - record-error + - set-status + # A list of regexes for function signatures that silence `record-error` and `set-status` reports + # if found in the call path to a returned error. + # https://github.com/jjti/go-spancheck#ignore-check-signatures + # Default: [] + ignore-check-signatures: + - "telemetry.RecordError" +``` + +### CLI + +To install the linter as a CLI: + +```bash +go install github.com/jjti/go-spancheck/cmd/spancheck@latest +spancheck ./... +``` + +Only the `span.End()` check is enabled by default. The others can be enabled with `-checks 'end,set-status,record-error'`. + +```txt +$ spancheck -h +... +Flags: + -checks string + comma-separated list of checks to enable (options: end, set-status, record-error) (default "end") + -ignore-check-signatures string + comma-separated list of regex for function signatures that disable checks on errors +``` + +### Ignore Check Signatures + +The `span.SetStatus()` and `span.RecordError()` checks warn when there is: + +1. a path to return statement +1. that returns an error +1. without a call (to `SetStatus` or `RecordError`, respectively) + +But it's convenient to call `SetStatus` and `RecordError` from utility methods [[1](https://andydote.co.uk/2023/09/19/tracing-is-better/#step-2-wrap-the-errors)]. To support that, the `ignore-*-check-signatures` settings will suppress warnings if the configured function is present in the path. + +For example, by default, the code below would have warnings as shown: + +```go +func task(ctx context.Context) error { + ctx, span := otel.Tracer("foo").Start(ctx, "bar") // span.SetStatus is not called on all paths + defer span.End() + + if err := subTask(ctx); err != nil { + return recordErr(span, err) // return can be reached without calling span.SetStatus + } + + return nil +} + +func recordErr(span trace.Span, err error) error { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + return err +} +``` + +The warnings are can be ignored by setting `-ignore-check-signatures` flag to `recordErr`: + +```bash +spancheck -checks 'end,set-status,record-error' -ignore-check-signatures 'recordErr' ./... +``` + +## Problem Statement + +Tracing is a celebrated [[1](https://andydote.co.uk/2023/09/19/tracing-is-better/),[2](https://charity.wtf/2022/08/15/live-your-best-life-with-structured-events/)] and well marketed [[3](https://docs.datadoghq.com/tracing/),[4](https://www.honeycomb.io/distributed-tracing)] pillar of observability. But self-instrumented tracing requires a lot of easy-to-forget boilerplate: + +```go +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" +) + +func task(ctx context.Context) error { + ctx, span := otel.Tracer("foo").Start(ctx, "bar") + defer span.End() // call `.End()` + + if err := subTask(ctx); err != nil { + span.SetStatus(codes.Error, err.Error()) // call SetStatus(codes.Error, msg) to set status:error + span.RecordError(err) // call RecordError(err) to record an error event + return err + } + + return nil +} +``` + +For spans to be _really_ useful, developers need to: + +1. call `span.End()` always +1. call `span.SetStatus(codes.Error, msg)` on error +1. call `span.RecordError(err)` on error +1. call `span.SetAttributes()` liberally + +- OpenTelemetry: [Creating spans](https://opentelemetry.io/docs/instrumentation/go/manual/#creating-spans) +- Uptrace: [OpenTelemetry Go Tracing API](https://uptrace.dev/opentelemetry/go-tracing.html#quickstart) + +This linter helps developers with steps 1-3. + +## Checks + +This linter supports three checks, each documented below. Only the check for `span.End()` is enabled by default. See [Configuration](#configuration) for instructions on enabling the others. + +### `span.End()` + +Enabled by default. + +Not calling `End` can cause memory leaks and prevents spans from being closed. + +> Any Span that is created MUST also be ended. This is the responsibility of the user. Implementations of this API may leak memory or other resources if Spans are not ended. + +[source: trace.go](https://github.com/open-telemetry/opentelemetry-go/blob/98b32a6c3a87fbee5d34c063b9096f416b250897/trace/trace.go#L523) + +```go +func task(ctx context.Context) error { + otel.Tracer("app").Start(ctx, "foo") // span is unassigned, probable memory leak + _, span := otel.Tracer().Start(ctx, "foo") // span.End is not called on all paths, possible memory leak + return nil // return can be reached without calling span.End +} +``` + +### `span.SetStatus(codes.Error, "msg")` + +Disabled by default. Enable with `-checks 'set-status'`. + +Developers should call `SetStatus` on spans. The status attribute is an important, first-class attribute: + +1. observability platforms and APMs differentiate "success" vs "failure" using [span's status codes](https://docs.datadoghq.com/tracing/metrics/). +1. telemetry collector agents, like the [Open Telemetry Collector's Tail Sampling Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md#:~:text=Sampling%20Processor.-,status_code,-%3A%20Sample%20based%20upon), are configurable to sample `Error` spans at a higher rate than `OK` spans. +1. observability platforms, like [DataDog, have trace retention filters that use spans' status](https://docs.datadoghq.com/tracing/trace_pipeline/trace_retention/). In other words, `status:error` spans often receive special treatment with the assumption they are more useful for debugging. And forgetting to set the status can lead to spans, with useful debugging information, being dropped. + +```go +func _() error { + _, span := otel.Tracer("foo").Start(context.Background(), "bar") // span.SetStatus is not called on all paths + defer span.End() + + if err := subTask(); err != nil { + span.RecordError(err) + return errors.New(err) // return can be reached without calling span.SetStatus + } + + return nil +} +``` + +OpenTelemetry docs: [Set span status](https://opentelemetry.io/docs/instrumentation/go/manual/#set-span-status). + +### `span.RecordError(err)` + +Disabled by default. Enable with `-checks 'record-error'`. + +Calling `RecordError` creates a new exception-type [event (structured log message)](https://opentelemetry.io/docs/concepts/signals/traces/#span-events) on the span. This is recommended to capture the error's stack trace. + +```go +func _() error { + _, span := otel.Tracer("foo").Start(context.Background(), "bar") // span.RecordError is not called on all paths + defer span.End() + + if err := subTask(); err != nil { + span.SetStatus(codes.Error, err.Error()) + return errors.New(err) // return can be reached without calling span.RecordError + } + + return nil +} +``` + +OpenTelemetry docs: [Record errors](https://opentelemetry.io/docs/instrumentation/go/manual/#record-errors). + +Note: this check is not applied to [OpenCensus spans](https://pkg.go.dev/go.opencensus.io/trace#SpanInterface) because they have no `RecordError` method. + +## Attribution + +This linter is the product of liberal copying of: + +- [github.com/golang/tools/go/analysis/passes/lostcancel](https://github.com/golang/tools/tree/master/go/analysis/passes/lostcancel) (half the linter) +- [github.com/tomarrell/wrapcheck](https://github.com/tomarrell/wrapcheck) (error type checking and config) +- [github.com/Antonboom/testifylint](https://github.com/Antonboom/testifylint) (README) +- [github.com/ghostiam/protogetter](https://github.com/ghostiam/protogetter/blob/main/testdata/Makefile) (test setup) diff --git a/vendor/github.com/jjti/go-spancheck/config.go b/vendor/github.com/jjti/go-spancheck/config.go new file mode 100644 index 0000000000..4005f49e01 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/config.go @@ -0,0 +1,141 @@ +package spancheck + +import ( + "flag" + "fmt" + "log" + "regexp" + "strings" +) + +// Check is a type of check that can be enabled or disabled. +type Check int + +const ( + // EndCheck if enabled, checks that span.End() is called after span creation and before the function returns. + EndCheck Check = iota + + // SetStatusCheck if enabled, checks that `span.SetStatus(codes.Error, msg)` is called when returning an error. + SetStatusCheck + + // RecordErrorCheck if enabled, checks that span.RecordError(err) is called when returning an error. + RecordErrorCheck +) + +func (c Check) String() string { + switch c { + case EndCheck: + return "end" + case SetStatusCheck: + return "set-status" + case RecordErrorCheck: + return "record-error" + default: + return "" + } +} + +var ( + // Checks is a list of all checks by name. + Checks = map[string]Check{ + EndCheck.String(): EndCheck, + SetStatusCheck.String(): SetStatusCheck, + RecordErrorCheck.String(): RecordErrorCheck, + } +) + +// Config is a configuration for the spancheck analyzer. +type Config struct { + fs flag.FlagSet + + // EnabledChecks is a list of checks to enable by name. + EnabledChecks []string + + // IgnoreChecksSignaturesSlice is a slice of strings that are turned into + // the IgnoreSetStatusCheckSignatures regex. + IgnoreChecksSignaturesSlice []string + + endCheckEnabled bool + setStatusEnabled bool + recordErrorEnabled bool + + // ignoreChecksSignatures is a regex that, if matched, disables the + // SetStatus and RecordError checks on error. + ignoreChecksSignatures *regexp.Regexp +} + +// NewDefaultConfig returns a new Config with default values. +func NewDefaultConfig() *Config { + return &Config{ + EnabledChecks: []string{EndCheck.String()}, + } +} + +// finalize parses checks and signatures from the public string slices of Config. +func (c *Config) finalize() { + c.parseSignatures() + + checks := parseChecks(c.EnabledChecks) + c.endCheckEnabled = contains(checks, EndCheck) + c.setStatusEnabled = contains(checks, SetStatusCheck) + c.recordErrorEnabled = contains(checks, RecordErrorCheck) +} + +// parseSignatures sets the Ignore*CheckSignatures regex from the string slices. +func (c *Config) parseSignatures() { + if c.ignoreChecksSignatures == nil && len(c.IgnoreChecksSignaturesSlice) > 0 { + if len(c.IgnoreChecksSignaturesSlice) == 1 && c.IgnoreChecksSignaturesSlice[0] == "" { + return + } + + c.ignoreChecksSignatures = createRegex(c.IgnoreChecksSignaturesSlice) + } +} + +func parseChecks(checksSlice []string) []Check { + if len(checksSlice) == 0 { + return nil + } + + checks := []Check{} + for _, check := range checksSlice { + checkName := strings.TrimSpace(check) + if checkName == "" { + continue + } + + check, ok := Checks[checkName] + if !ok { + continue + } + + checks = append(checks, check) + } + + return checks +} + +func createRegex(sigs []string) *regexp.Regexp { + if len(sigs) == 0 { + return nil + } + + regex := fmt.Sprintf("(%s)", strings.Join(sigs, "|")) + regexCompiled, err := regexp.Compile(regex) + if err != nil { + log.Default().Print("[WARN] failed to compile regex from signature flag", "regex", regex, "err", err) + return nil + } + + return regexCompiled +} + +func contains(s []Check, e Check) bool { + for _, a := range s { + if a == e { + return true + } + } + + return false +} diff --git a/vendor/github.com/jjti/go-spancheck/doc.go b/vendor/github.com/jjti/go-spancheck/doc.go new file mode 100644 index 0000000000..f9dec043f6 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/doc.go @@ -0,0 +1,37 @@ +// Package spancheck defines a linter that checks for mistakes with OTEL trace spans. +// +// # Analyzer spancheck +// +// spancheck: check for mistakes with OpenTelemetry trace spans. +// +// Common mistakes with OTEL trace spans include forgetting to call End: +// +// func(ctx context.Context) { +// ctx, span := otel.Tracer("app").Start(ctx, "span") +// // defer span.End() should be here +// +// // do stuff +// } +// +// Forgetting to set an Error status: +// +// ctx, span := otel.Tracer("app").Start(ctx, "span") +// defer span.End() +// +// if err := task(); err != nil { +// // span.SetStatus(codes.Error, err.Error()) should be here +// span.RecordError(err) +// return fmt.Errorf("failed to run task: %w", err) +// } +// +// Forgetting to record the Error: +// +// ctx, span := otel.Tracer("app").Start(ctx, "span") +// defer span.End() +// +// if err := task(); err != nil { +// span.SetStatus(codes.Error, err.Error()) +// // span.RecordError(err) should be here +// return fmt.Errorf("failed to run task: %w", err) +// } +package spancheck diff --git a/vendor/github.com/jjti/go-spancheck/go.work b/vendor/github.com/jjti/go-spancheck/go.work new file mode 100644 index 0000000000..7d0a87b9e1 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/go.work @@ -0,0 +1,8 @@ +go 1.20 + +use ( + . + ./testdata/base + ./testdata/disableerrorchecks + ./testdata/enableall +) diff --git a/vendor/github.com/jjti/go-spancheck/go.work.sum b/vendor/github.com/jjti/go-spancheck/go.work.sum new file mode 100644 index 0000000000..f3cdef790d --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/go.work.sum @@ -0,0 +1,3 @@ +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= diff --git a/vendor/github.com/jjti/go-spancheck/spancheck.go b/vendor/github.com/jjti/go-spancheck/spancheck.go new file mode 100644 index 0000000000..6f069a0335 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/spancheck.go @@ -0,0 +1,441 @@ +package spancheck + +import ( + "go/ast" + "go/types" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/ctrlflow" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/cfg" +) + +const stackLen = 32 + +// spanType differentiates span types. +type spanType int + +const ( + spanUnset spanType = iota // not a span + spanOpenTelemetry // from go.opentelemetry.io/otel + spanOpenCensus // from go.opencensus.io/trace +) + +var ( + // this approach stolen from errcheck + // https://github.com/kisielk/errcheck/blob/7f94c385d0116ccc421fbb4709e4a484d98325ee/errcheck/errcheck.go#L22 + errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) +) + +// NewAnalyzerWithConfig returns a new analyzer configured with the Config passed in. +// Its config can be set for testing. +func NewAnalyzerWithConfig(config *Config) *analysis.Analyzer { + return newAnalyzer(config) +} + +func newAnalyzer(config *Config) *analysis.Analyzer { + config.finalize() + + return &analysis.Analyzer{ + Name: "spancheck", + Doc: "Checks for mistakes with OpenTelemetry/Census spans.", + Flags: config.fs, + Run: run(config), + Requires: []*analysis.Analyzer{ + ctrlflow.Analyzer, + inspect.Analyzer, + }, + } +} + +func run(config *Config) func(*analysis.Pass) (interface{}, error) { + return func(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncLit)(nil), // f := func() {} + (*ast.FuncDecl)(nil), // func foo() {} + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + runFunc(pass, n, config) + }) + + return nil, nil + } +} + +type spanVar struct { + stmt ast.Node + id *ast.Ident + vr *types.Var + spanType spanType +} + +// runFunc checks if the node is a function, has a span, and the span never has SetStatus set. +func runFunc(pass *analysis.Pass, node ast.Node, config *Config) { + // copying https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/lostcancel/lostcancel.go + + // Find scope of function node + var funcScope *types.Scope + switch v := node.(type) { + case *ast.FuncLit: + funcScope = pass.TypesInfo.Scopes[v.Type] + case *ast.FuncDecl: + funcScope = pass.TypesInfo.Scopes[v.Type] + } + + // Maps each span variable to its defining ValueSpec/AssignStmt. + spanVars := make(map[*ast.Ident]spanVar) + + // Find the set of span vars to analyze. + stack := make([]ast.Node, 0, stackLen) + ast.Inspect(node, func(n ast.Node) bool { + switch n.(type) { + case *ast.FuncLit: + if len(stack) > 0 { + return false // don't stray into nested functions + } + case nil: + stack = stack[:len(stack)-1] // pop + return true + } + stack = append(stack, n) // push + + // Look for [{AssignStmt,ValueSpec} CallExpr SelectorExpr]: + // + // ctx, span := otel.Tracer("app").Start(...) + // ctx, span = otel.Tracer("app").Start(...) + // var ctx, span = otel.Tracer("app").Start(...) + sType, sStart := isSpanStart(pass.TypesInfo, n) + if !sStart || !isCall(stack[len(stack)-2]) { + return true + } + + stmt := stack[len(stack)-3] + id := getID(stmt) + if id == nil { + pass.ReportRangef(n, "span is unassigned, probable memory leak") + return true + } + + if id.Name == "_" { + pass.ReportRangef(id, "span is unassigned, probable memory leak") + } else if v, ok := pass.TypesInfo.Uses[id].(*types.Var); ok { + // If the span variable is defined outside function scope, + // do not analyze it. + if funcScope.Contains(v.Pos()) { + spanVars[id] = spanVar{ + vr: v, + stmt: stmt, + id: id, + spanType: sType, + } + } + } else if v, ok := pass.TypesInfo.Defs[id].(*types.Var); ok { + spanVars[id] = spanVar{ + vr: v, + stmt: stmt, + id: id, + spanType: sType, + } + } + + return true + }) + + if len(spanVars) == 0 { + return // no need to inspect CFG + } + + // Obtain the CFG. + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + var g *cfg.CFG + var sig *types.Signature + switch node := node.(type) { + case *ast.FuncDecl: + sig, _ = pass.TypesInfo.Defs[node.Name].Type().(*types.Signature) + g = cfgs.FuncDecl(node) + case *ast.FuncLit: + sig, _ = pass.TypesInfo.Types[node.Type].Type.(*types.Signature) + g = cfgs.FuncLit(node) + } + if sig == nil { + return // missing type information + } + + // Check for missing calls. + for _, sv := range spanVars { + if config.endCheckEnabled { + // Check if there's no End to the span. + if ret := getMissingSpanCalls(pass, g, sv, "End", func(pass *analysis.Pass, ret *ast.ReturnStmt) *ast.ReturnStmt { return ret }, nil); ret != nil { + pass.ReportRangef(sv.stmt, "%s.End is not called on all paths, possible memory leak", sv.vr.Name()) + pass.ReportRangef(ret, "return can be reached without calling %s.End", sv.vr.Name()) + } + } + + if config.setStatusEnabled { + // Check if there's no SetStatus to the span setting an error. + if ret := getMissingSpanCalls(pass, g, sv, "SetStatus", getErrorReturn, config.ignoreChecksSignatures); ret != nil { + pass.ReportRangef(sv.stmt, "%s.SetStatus is not called on all paths", sv.vr.Name()) + pass.ReportRangef(ret, "return can be reached without calling %s.SetStatus", sv.vr.Name()) + } + } + + if config.recordErrorEnabled && sv.spanType == spanOpenTelemetry { // RecordError only exists in OpenTelemetry + // Check if there's no RecordError to the span setting an error. + if ret := getMissingSpanCalls(pass, g, sv, "RecordError", getErrorReturn, config.ignoreChecksSignatures); ret != nil { + pass.ReportRangef(sv.stmt, "%s.RecordError is not called on all paths", sv.vr.Name()) + pass.ReportRangef(ret, "return can be reached without calling %s.RecordError", sv.vr.Name()) + } + } + } +} + +// isSpanStart reports whether n is tracer.Start() +func isSpanStart(info *types.Info, n ast.Node) (spanType, bool) { + sel, ok := n.(*ast.SelectorExpr) + if !ok { + return spanUnset, false + } + + switch sel.Sel.Name { + case "Start": // https://github.com/open-telemetry/opentelemetry-go/blob/98b32a6c3a87fbee5d34c063b9096f416b250897/trace/trace.go#L523 + obj, ok := info.Uses[sel.Sel] + return spanOpenTelemetry, ok && obj.Pkg().Path() == "go.opentelemetry.io/otel/trace" + case "StartSpan": // https://pkg.go.dev/go.opencensus.io/trace#StartSpan + obj, ok := info.Uses[sel.Sel] + return spanOpenCensus, ok && obj.Pkg().Path() == "go.opencensus.io/trace" + case "StartSpanWithRemoteParent": // https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/trace/trace_api.go#L66 + obj, ok := info.Uses[sel.Sel] + return spanOpenCensus, ok && obj.Pkg().Path() == "go.opencensus.io/trace" + default: + return spanUnset, false + } +} + +func isCall(n ast.Node) bool { + _, ok := n.(*ast.CallExpr) + return ok +} + +func getID(node ast.Node) *ast.Ident { + switch stmt := node.(type) { + case *ast.ValueSpec: + if len(stmt.Names) > 1 { + return stmt.Names[1] + } + case *ast.AssignStmt: + if len(stmt.Lhs) > 1 { + id, _ := stmt.Lhs[1].(*ast.Ident) + return id + } + } + return nil +} + +// getMissingSpanCalls finds a path through the CFG, from stmt (which defines +// the 'span' variable v) to a return statement, that doesn't call the passed selector on the span. +func getMissingSpanCalls( + pass *analysis.Pass, + g *cfg.CFG, + sv spanVar, + selName string, + checkErr func(pass *analysis.Pass, ret *ast.ReturnStmt) *ast.ReturnStmt, + ignoreCheckSig *regexp.Regexp, +) *ast.ReturnStmt { + // blockUses computes "uses" for each block, caching the result. + memo := make(map[*cfg.Block]bool) + blockUses := func(pass *analysis.Pass, b *cfg.Block) bool { + res, ok := memo[b] + if !ok { + res = usesCall(pass, b.Nodes, sv, selName, ignoreCheckSig, 0) + memo[b] = res + } + return res + } + + // Find the var's defining block in the CFG, + // plus the rest of the statements of that block. + var defBlock *cfg.Block + var rest []ast.Node +outer: + for _, b := range g.Blocks { + for i, n := range b.Nodes { + if n == sv.stmt { + defBlock = b + rest = b.Nodes[i+1:] + break outer + } + } + } + + // Is the call "used" in the remainder of its defining block? + if usesCall(pass, rest, sv, selName, ignoreCheckSig, 0) { + return nil + } + + // Does the defining block return without making the call? + if ret := defBlock.Return(); ret != nil { + return checkErr(pass, ret) + } + + // Search the CFG depth-first for a path, from defblock to a + // return block, in which v is never "used". + seen := make(map[*cfg.Block]bool) + var search func(blocks []*cfg.Block) *ast.ReturnStmt + search = func(blocks []*cfg.Block) *ast.ReturnStmt { + for _, b := range blocks { + if seen[b] { + continue + } + seen[b] = true + + // Prune the search if the block uses v. + if blockUses(pass, b) { + continue + } + + // Found path to return statement? + if ret := getErrorReturn(pass, b.Return()); ret != nil { + return ret // found + } + + // Recur + if ret := getErrorReturn(pass, search(b.Succs)); ret != nil { + return ret + } + } + return nil + } + + return search(defBlock.Succs) +} + +// usesCall reports whether stmts contain a use of the selName call on variable v. +func usesCall(pass *analysis.Pass, stmts []ast.Node, sv spanVar, selName string, ignoreCheckSig *regexp.Regexp, depth int) bool { + if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just one level deep check. + return false + } + + found, reAssigned := false, false + for _, subStmt := range stmts { + stack := []ast.Node{} + ast.Inspect(subStmt, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.FuncLit: + if len(stack) > 0 { + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + g := cfgs.FuncLit(n) + if g != nil && len(g.Blocks) > 0 { + return usesCall(pass, g.Blocks[0].Nodes, sv, selName, ignoreCheckSig, depth+1) + } + + return false + } + case *ast.CallExpr: + if ident, ok := n.Fun.(*ast.Ident); ok { + fnSig := pass.TypesInfo.ObjectOf(ident).String() + if ignoreCheckSig != nil && ignoreCheckSig.MatchString(fnSig) { + found = true + return false + } + } + case nil: + if len(stack) > 0 { + stack = stack[:len(stack)-1] // pop + return true + } + return false + } + stack = append(stack, n) // push + + // Check whether the span was assigned over top of its old value. + _, spanStart := isSpanStart(pass.TypesInfo, n) + if spanStart { + if id := getID(stack[len(stack)-3]); id != nil && id.Obj.Decl == sv.id.Obj.Decl { + reAssigned = true + return false + } + } + + if n, ok := n.(*ast.SelectorExpr); ok { + // Selector (End, SetStatus, RecordError) hit. + if n.Sel.Name == selName { + id, ok := n.X.(*ast.Ident) + found = ok && id.Obj.Decl == sv.id.Obj.Decl + } + + // Check if an ignore signature matches. + fnSig := pass.TypesInfo.ObjectOf(n.Sel).String() + if ignoreCheckSig != nil && ignoreCheckSig.MatchString(fnSig) { + found = true + } + } + + return !found + }) + } + + return found && !reAssigned +} + +func getErrorReturn(pass *analysis.Pass, ret *ast.ReturnStmt) *ast.ReturnStmt { + if ret == nil { + return nil + } + + for _, r := range ret.Results { + if isErrorType(pass.TypesInfo.TypeOf(r)) { + return ret + } + + if r, ok := r.(*ast.CallExpr); ok { + for _, err := range errorsByArg(pass, r) { + if err { + return ret + } + } + } + } + + return nil +} + +// errorsByArg returns a slice s such that +// len(s) == number of return types of call +// s[i] == true iff return type at position i from left is an error type +// +// copied from https://github.com/kisielk/errcheck/blob/master/errcheck/errcheck.go +func errorsByArg(pass *analysis.Pass, call *ast.CallExpr) []bool { + switch t := pass.TypesInfo.Types[call].Type.(type) { + case *types.Named: + // Single return + return []bool{isErrorType(t)} + case *types.Pointer: + // Single return via pointer + return []bool{isErrorType(t)} + case *types.Tuple: + // Multiple returns + s := make([]bool, t.Len()) + for i := 0; i < t.Len(); i++ { + switch et := t.At(i).Type().(type) { + case *types.Named: + // Single return + s[i] = isErrorType(et) + case *types.Pointer: + // Single return via pointer + s[i] = isErrorType(et) + default: + s[i] = false + } + } + return s + } + return []bool{false} +} + +func isErrorType(t types.Type) bool { + return types.Implements(t, errorType) +} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 5091fb0736..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index c56f37c0c9..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -sudo: false - -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - - 1.15.x - - tip - -allow_failures: - - go: tip - -script: make build - -matrix: - include: - - language: go - go: 1.15.x - script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a91f..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index fb38ec2760..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,51 +0,0 @@ - -CMD = jpgo - -SRC_PKGS=./ ./cmd/... ./fuzz/... - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ${SRC_PKGS} - -build: - rm -f $(CMD) - go build ${SRC_PKGS} - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: test-internal-testify - echo "making tests ${SRC_PKGS}" - go test -v ${SRC_PKGS} - -check: - go vet ${SRC_PKGS} - @echo "golint ${SRC_PKGS}" - @lint=`golint ${SRC_PKGS}`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out - -test-internal-testify: - cd internal/testify && go test ./... - diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 110ad79997..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -go-jmespath is a GO implementation of JMESPath, -which is a query language for JSON. It will take a JSON -document and transform it into another JSON document -through a JMESPath expression. - -Using go-jmespath is really easy. There's a single function -you use, `jmespath.search`: - - -```go -> import "github.com/jmespath/go-jmespath" -> -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.Search("foo.bar.baz[2]", data) -result = 2 -``` - -In the example we gave the ``search`` function input data of -`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath -expression `foo.bar.baz[2]`, and the `search` function evaluated -the expression against the input data to produce the result ``2``. - -The JMESPath language can do a lot more than select an element -from a list. Here are a few more examples: - -```go -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo.bar", data) -result = { "baz": [ 0, 1, 2, 3, 4 ] } - - -> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, - {"first": "c", "last": "d"}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search({"foo[*].first", data) -result [ 'a', 'c' ] - - -> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, - {"age": 30}, {"age": 35}, - {"age": 40}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo[?age > `30`]") -result = [ { age: 35 }, { age: 40 } ] -``` - -You can also pre-compile your query. This is usefull if -you are going to run multiple searches with it: - -```go - > var jsondata = []byte(`{"foo": "bar"}`) - > var data interface{} - > err := json.Unmarshal(jsondata, &data) - > precompiled, err := Compile("foo") - > if err != nil{ - > // ... handle the error - > } - > result, err := precompiled.Search(data) - result = "bar" -``` - -## More Resources - -The example above only show a small amount of what -a JMESPath expression can do. If you want to take a -tour of the language, the *best* place to go is the -[JMESPath Tutorial](http://jmespath.org/tutorial.html). - -One of the best things about JMESPath is that it is -implemented in many different programming languages including -python, ruby, php, lua, etc. To see a complete list of libraries, -check out the [JMESPath libraries page](http://jmespath.org/libraries.html). - -And finally, the full JMESPath specification can be found -on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 010efe9bfb..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JMESPath is the representation of a compiled JMES path query. A JMESPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d239c9..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89b4b..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c74604c2..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c8f5..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 4abc303ab4..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expression: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cbdf3..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d7d4..0000000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/karamaru-alpha/copyloopvar/.gitignore b/vendor/github.com/karamaru-alpha/copyloopvar/.gitignore new file mode 100644 index 0000000000..816abbd923 --- /dev/null +++ b/vendor/github.com/karamaru-alpha/copyloopvar/.gitignore @@ -0,0 +1,2 @@ +.idea/ +copyloopvar diff --git a/vendor/github.com/karamaru-alpha/copyloopvar/LICENSE b/vendor/github.com/karamaru-alpha/copyloopvar/LICENSE new file mode 100644 index 0000000000..e2567fd0c5 --- /dev/null +++ b/vendor/github.com/karamaru-alpha/copyloopvar/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Ryosei Karaki + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/karamaru-alpha/copyloopvar/README.md b/vendor/github.com/karamaru-alpha/copyloopvar/README.md new file mode 100644 index 0000000000..d31d1abd97 --- /dev/null +++ b/vendor/github.com/karamaru-alpha/copyloopvar/README.md @@ -0,0 +1,27 @@ +# copyloopvar + +copyloopvar is a linter detects places where loop variables are copied. + +cf. [Fixing For Loops in Go 1.22](https://go.dev/blog/loopvar-preview) + +## Example + +```go +for i, v := range []int{1, 2, 3} { + i := i // The copy of the 'for' variable "i" can be deleted (Go 1.22+) + v := v // The copy of the 'for' variable "v" can be deleted (Go 1.22+) + _, _ = i, v +} + +for i := 1; i <= 3; i++ { + i := i // The copy of the 'for' variable "i" can be deleted (Go 1.22+) + _ = i +} +``` + +## Install + +```bash +go install github.com/karamaru-alpha/copyloopvar/cmd/copyloopvar@latest +go vet -vettool=`which copyloopvar` ./... +``` diff --git a/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go b/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go new file mode 100644 index 0000000000..dfb0e7d673 --- /dev/null +++ b/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go @@ -0,0 +1,135 @@ +package copyloopvar + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var ignoreAlias bool + +func NewAnalyzer() *analysis.Analyzer { + analyzer := &analysis.Analyzer{ + Name: "copyloopvar", + Doc: "copyloopvar is a linter detects places where loop variables are copied", + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + } + analyzer.Flags.BoolVar(&ignoreAlias, "ignore-alias", false, "ignore aliasing of loop variables") + return analyzer +} + +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.RangeStmt)(nil), + (*ast.ForStmt)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch node := n.(type) { + case *ast.RangeStmt: + checkRangeStmt(pass, node) + case *ast.ForStmt: + checkForStmt(pass, node) + } + }) + + return nil, nil +} + +func checkRangeStmt(pass *analysis.Pass, rangeStmt *ast.RangeStmt) { + key, ok := rangeStmt.Key.(*ast.Ident) + if !ok { + return + } + var value *ast.Ident + if rangeStmt.Value != nil { + value = rangeStmt.Value.(*ast.Ident) + } + for _, stmt := range rangeStmt.Body.List { + assignStmt, ok := stmt.(*ast.AssignStmt) + if !ok { + continue + } + if assignStmt.Tok != token.DEFINE { + continue + } + for i, rh := range assignStmt.Rhs { + right, ok := rh.(*ast.Ident) + if !ok { + continue + } + if right.Name != key.Name && (value != nil && right.Name != value.Name) { + continue + } + if ignoreAlias { + left, ok := assignStmt.Lhs[i].(*ast.Ident) + if !ok { + continue + } + if left.Name != right.Name { + continue + } + } + pass.Report(analysis.Diagnostic{ + Pos: assignStmt.Pos(), + Message: fmt.Sprintf(`The copy of the 'for' variable "%s" can be deleted (Go 1.22+)`, right.Name), + }) + } + } +} + +func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { + if forStmt.Init == nil { + return + } + initAssignStmt, ok := forStmt.Init.(*ast.AssignStmt) + if !ok { + return + } + initVarNameMap := make(map[string]interface{}, len(initAssignStmt.Lhs)) + for _, lh := range initAssignStmt.Lhs { + if initVar, ok := lh.(*ast.Ident); ok { + initVarNameMap[initVar.Name] = struct{}{} + } + } + for _, stmt := range forStmt.Body.List { + assignStmt, ok := stmt.(*ast.AssignStmt) + if !ok { + continue + } + if assignStmt.Tok != token.DEFINE { + continue + } + for i, rh := range assignStmt.Rhs { + right, ok := rh.(*ast.Ident) + if !ok { + continue + } + if _, ok := initVarNameMap[right.Name]; !ok { + continue + } + if ignoreAlias { + left, ok := assignStmt.Lhs[i].(*ast.Ident) + if !ok { + continue + } + if left.Name != right.Name { + continue + } + } + pass.Report(analysis.Diagnostic{ + Pos: assignStmt.Pos(), + Message: fmt.Sprintf(`The copy of the 'for' variable "%s" can be deleted (Go 1.22+)`, right.Name), + }) + } + } +} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go b/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go index 68593cc9ad..82ab6298a9 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go @@ -31,7 +31,6 @@ func init() { } func runAnalyzer(pass *analysis.Pass) (interface{}, error) { - exclude := map[string]bool{} if !argExcludeOnly { for _, name := range DefaultExcludedSymbols { @@ -65,8 +64,9 @@ func runAnalyzer(pass *analysis.Pass) (interface{}, error) { for _, err := range v.errors { pass.Report(analysis.Diagnostic{ - Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), - Message: "unchecked error", + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: "unchecked error", + Category: "errcheck", }) } diff --git a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go index 0a4067f928..d61d348f77 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go @@ -167,7 +167,7 @@ func (c *Checker) LoadPackages(paths ...string) ([]*packages.Package, error) { buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", c.Mod)) } cfg := &packages.Config{ - Mode: packages.LoadAllSyntax, + Mode: packages.NeedSyntax | packages.NeedTypes | packages.NeedTypesInfo, Tests: !c.Exclusions.TestFiles, BuildFlags: buildFlags, } @@ -205,7 +205,7 @@ func (c *Checker) CheckPackage(pkg *packages.Package) Result { ignore := map[string]*regexp.Regexp{} // Apply SymbolRegexpsByPackage first so that if the same path appears in - // Packages, a more narrow regexp will be superceded by dotStar below. + // Packages, a more narrow regexp will be superseded by dotStar below. if regexps := c.Exclusions.SymbolRegexpsByPackage; regexps != nil { for pkg, re := range regexps { // TODO warn if previous entry overwritten? @@ -337,7 +337,7 @@ func (v *visitor) selectorName(call *ast.CallExpr) string { // names are returned. If the function is package-qualified (like "fmt.Printf()") // then just that function's fullName is returned. // -// Otherwise, we walk through all the potentially embeddded interfaces of the receiver +// Otherwise, we walk through all the potentially embedded interfaces of the receiver // the collect a list of type-qualified function names that we will check. func (v *visitor) namesForExcludeCheck(call *ast.CallExpr) []string { sel, fn, ok := v.selectorAndFunc(call) @@ -569,73 +569,98 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { if !v.ignoreCall(stmt.Call) && v.callReturnsError(stmt.Call) { v.addErrorAtPosition(stmt.Call.Lparen, stmt.Call) } + case *ast.GenDecl: + if stmt.Tok != token.VAR { + break + } + + for _, spec := range stmt.Specs { + vspec := spec.(*ast.ValueSpec) + + if len(vspec.Values) == 0 { + // ignore declarations w/o assignments + continue + } + + var lhs []ast.Expr + for _, name := range vspec.Names { + lhs = append(lhs, ast.Expr(name)) + } + v.checkAssignment(lhs, vspec.Values) + } + case *ast.AssignStmt: - if len(stmt.Rhs) == 1 { - // single value on rhs; check against lhs identifiers - if call, ok := stmt.Rhs[0].(*ast.CallExpr); ok { - if !v.blank { - break - } - if v.ignoreCall(call) { - break - } - isError := v.errorsByArg(call) - for i := 0; i < len(stmt.Lhs); i++ { - if id, ok := stmt.Lhs[i].(*ast.Ident); ok { - // We shortcut calls to recover() because errorsByArg can't - // check its return types for errors since it returns interface{}. - if id.Name == "_" && (v.isRecover(call) || isError[i]) { - v.addErrorAtPosition(id.NamePos, call) - } + v.checkAssignment(stmt.Lhs, stmt.Rhs) + + default: + } + return v +} + +func (v *visitor) checkAssignment(lhs, rhs []ast.Expr) { + if len(rhs) == 1 { + // single value on rhs; check against lhs identifiers + if call, ok := rhs[0].(*ast.CallExpr); ok { + if !v.blank { + return + } + if v.ignoreCall(call) { + return + } + isError := v.errorsByArg(call) + for i := 0; i < len(lhs); i++ { + if id, ok := lhs[i].(*ast.Ident); ok { + // We shortcut calls to recover() because errorsByArg can't + // check its return types for errors since it returns interface{}. + if id.Name == "_" && (v.isRecover(call) || isError[i]) { + v.addErrorAtPosition(id.NamePos, call) } } - } else if assert, ok := stmt.Rhs[0].(*ast.TypeAssertExpr); ok { - if !v.asserts { - break - } - if assert.Type == nil { - // type switch - break - } - if len(stmt.Lhs) < 2 { - // assertion result not read - v.addErrorAtPosition(stmt.Rhs[0].Pos(), nil) - } else if id, ok := stmt.Lhs[1].(*ast.Ident); ok && v.blank && id.Name == "_" { - // assertion result ignored - v.addErrorAtPosition(id.NamePos, nil) - } } - } else { - // multiple value on rhs; in this case a call can't return - // multiple values. Assume len(stmt.Lhs) == len(stmt.Rhs) - for i := 0; i < len(stmt.Lhs); i++ { - if id, ok := stmt.Lhs[i].(*ast.Ident); ok { - if call, ok := stmt.Rhs[i].(*ast.CallExpr); ok { - if !v.blank { - continue - } - if v.ignoreCall(call) { - continue - } - if id.Name == "_" && v.callReturnsError(call) { - v.addErrorAtPosition(id.NamePos, call) - } - } else if assert, ok := stmt.Rhs[i].(*ast.TypeAssertExpr); ok { - if !v.asserts { - continue - } - if assert.Type == nil { - // Shouldn't happen anyway, no multi assignment in type switches - continue - } - v.addErrorAtPosition(id.NamePos, nil) + } else if assert, ok := rhs[0].(*ast.TypeAssertExpr); ok { + if !v.asserts { + return + } + if assert.Type == nil { + // type switch + return + } + if len(lhs) < 2 { + // assertion result not read + v.addErrorAtPosition(rhs[0].Pos(), nil) + } else if id, ok := lhs[1].(*ast.Ident); ok && v.blank && id.Name == "_" { + // assertion result ignored + v.addErrorAtPosition(id.NamePos, nil) + } + } + } else { + // multiple value on rhs; in this case a call can't return + // multiple values. Assume len(lhs) == len(rhs) + for i := 0; i < len(lhs); i++ { + if id, ok := lhs[i].(*ast.Ident); ok { + if call, ok := rhs[i].(*ast.CallExpr); ok { + if !v.blank { + continue + } + if v.ignoreCall(call) { + continue + } + if id.Name == "_" && v.callReturnsError(call) { + v.addErrorAtPosition(id.NamePos, call) + } + } else if assert, ok := rhs[i].(*ast.TypeAssertExpr); ok { + if !v.asserts { + continue } + if assert.Type == nil { + // Shouldn't happen anyway, no multi assignment in type switches + continue + } + v.addErrorAtPosition(id.NamePos, nil) } } } - default: } - return v } func isErrorType(t types.Type) bool { diff --git a/vendor/github.com/kisielk/errcheck/errcheck/excludes.go b/vendor/github.com/kisielk/errcheck/errcheck/excludes.go index 22db9fe11d..a783b5a763 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/excludes.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/excludes.go @@ -3,64 +3,60 @@ package errcheck import ( "bufio" "bytes" - "io/ioutil" + "os" "strings" ) -var ( - // DefaultExcludedSymbols is a list of symbol names that are usually excluded from checks by default. - // - // Note, that they still need to be explicitly copied to Checker.Exclusions.Symbols - DefaultExcludedSymbols = []string{ - // bytes - "(*bytes.Buffer).Write", - "(*bytes.Buffer).WriteByte", - "(*bytes.Buffer).WriteRune", - "(*bytes.Buffer).WriteString", +// DefaultExcludedSymbols is a list of symbol names that are usually excluded from checks by default. +// +// Note, that they still need to be explicitly copied to Checker.Exclusions.Symbols +var DefaultExcludedSymbols = []string{ + // bytes + "(*bytes.Buffer).Write", + "(*bytes.Buffer).WriteByte", + "(*bytes.Buffer).WriteRune", + "(*bytes.Buffer).WriteString", - // fmt - "fmt.Errorf", - "fmt.Print", - "fmt.Printf", - "fmt.Println", - "fmt.Fprint(*bytes.Buffer)", - "fmt.Fprintf(*bytes.Buffer)", - "fmt.Fprintln(*bytes.Buffer)", - "fmt.Fprint(*strings.Builder)", - "fmt.Fprintf(*strings.Builder)", - "fmt.Fprintln(*strings.Builder)", - "fmt.Fprint(os.Stderr)", - "fmt.Fprintf(os.Stderr)", - "fmt.Fprintln(os.Stderr)", + // fmt + "fmt.Print", + "fmt.Printf", + "fmt.Println", + "fmt.Fprint(*bytes.Buffer)", + "fmt.Fprintf(*bytes.Buffer)", + "fmt.Fprintln(*bytes.Buffer)", + "fmt.Fprint(*strings.Builder)", + "fmt.Fprintf(*strings.Builder)", + "fmt.Fprintln(*strings.Builder)", + "fmt.Fprint(os.Stderr)", + "fmt.Fprintf(os.Stderr)", + "fmt.Fprintln(os.Stderr)", - // io - "(*io.PipeReader).CloseWithError", - "(*io.PipeWriter).CloseWithError", + // io + "(*io.PipeReader).CloseWithError", + "(*io.PipeWriter).CloseWithError", - // math/rand - "math/rand.Read", - "(*math/rand.Rand).Read", + // math/rand + "math/rand.Read", + "(*math/rand.Rand).Read", - // strings - "(*strings.Builder).Write", - "(*strings.Builder).WriteByte", - "(*strings.Builder).WriteRune", - "(*strings.Builder).WriteString", + // strings + "(*strings.Builder).Write", + "(*strings.Builder).WriteByte", + "(*strings.Builder).WriteRune", + "(*strings.Builder).WriteString", - // hash - "(hash.Hash).Write", - } -) + // hash + "(hash.Hash).Write", +} // ReadExcludes reads an excludes file, a newline delimited file that lists // patterns for which to allow unchecked errors. // // Lines that start with two forward slashes are considered comments and are ignored. -// func ReadExcludes(path string) ([]string, error) { var excludes []string - buf, err := ioutil.ReadFile(path) + buf, err := os.ReadFile(path) if err != nil { return nil, err } diff --git a/vendor/github.com/kisielk/gotool/.travis.yml b/vendor/github.com/kisielk/gotool/.travis.yml deleted file mode 100644 index d1784e1e23..0000000000 --- a/vendor/github.com/kisielk/gotool/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Skip. -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/kisielk/gotool/LEGAL b/vendor/github.com/kisielk/gotool/LEGAL deleted file mode 100644 index 72b859cd62..0000000000 --- a/vendor/github.com/kisielk/gotool/LEGAL +++ /dev/null @@ -1,32 +0,0 @@ -All the files in this distribution are covered under either the MIT -license (see the file LICENSE) except some files mentioned below. - -match.go, match_test.go: - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kisielk/gotool/LICENSE b/vendor/github.com/kisielk/gotool/LICENSE deleted file mode 100644 index 1cbf651e2f..0000000000 --- a/vendor/github.com/kisielk/gotool/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Kamil Kisiel - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kisielk/gotool/README.md b/vendor/github.com/kisielk/gotool/README.md deleted file mode 100644 index 6e4e92b2f6..0000000000 --- a/vendor/github.com/kisielk/gotool/README.md +++ /dev/null @@ -1,6 +0,0 @@ -gotool -====== -[![GoDoc](https://godoc.org/github.com/kisielk/gotool?status.svg)](https://godoc.org/github.com/kisielk/gotool) -[![Build Status](https://travis-ci.org/kisielk/gotool.svg?branch=master)](https://travis-ci.org/kisielk/gotool) - -Package gotool contains utility functions used to implement the standard "cmd/go" tool, provided as a convenience to developers who want to write tools with similar semantics. diff --git a/vendor/github.com/kisielk/gotool/go13.go b/vendor/github.com/kisielk/gotool/go13.go deleted file mode 100644 index 2dd9b3fdf0..0000000000 --- a/vendor/github.com/kisielk/gotool/go13.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.4 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src", "pkg") - -func shouldIgnoreImport(p *build.Package) bool { - return true -} diff --git a/vendor/github.com/kisielk/gotool/go14-15.go b/vendor/github.com/kisielk/gotool/go14-15.go deleted file mode 100644 index aa99a32270..0000000000 --- a/vendor/github.com/kisielk/gotool/go14-15.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.4,!go1.6 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src") - -func shouldIgnoreImport(p *build.Package) bool { - return true -} diff --git a/vendor/github.com/kisielk/gotool/go16-18.go b/vendor/github.com/kisielk/gotool/go16-18.go deleted file mode 100644 index f25cec14a8..0000000000 --- a/vendor/github.com/kisielk/gotool/go16-18.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.6,!go1.9 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src") - -func shouldIgnoreImport(p *build.Package) bool { - return p == nil || len(p.InvalidGoFiles) == 0 -} diff --git a/vendor/github.com/kisielk/gotool/internal/load/path.go b/vendor/github.com/kisielk/gotool/internal/load/path.go deleted file mode 100644 index 74e15b9d32..0000000000 --- a/vendor/github.com/kisielk/gotool/internal/load/path.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package load - -import ( - "strings" -) - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} diff --git a/vendor/github.com/kisielk/gotool/internal/load/pkg.go b/vendor/github.com/kisielk/gotool/internal/load/pkg.go deleted file mode 100644 index b937ede759..0000000000 --- a/vendor/github.com/kisielk/gotool/internal/load/pkg.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -// Package load loads packages. -package load - -import ( - "strings" -) - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} diff --git a/vendor/github.com/kisielk/gotool/internal/load/search.go b/vendor/github.com/kisielk/gotool/internal/load/search.go deleted file mode 100644 index 17ed62ddae..0000000000 --- a/vendor/github.com/kisielk/gotool/internal/load/search.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package load - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -// Context specifies values for operation of ImportPaths that would -// otherwise come from cmd/go/internal/cfg package. -// -// This is a construct added for gotool purposes and doesn't have -// an equivalent upstream in cmd/go. -type Context struct { - // BuildContext is the build context to use. - BuildContext build.Context - - // GOROOTsrc is the location of the src directory in GOROOT. - // At this time, it's used only in MatchPackages to skip - // GOOROOT/src entry from BuildContext.SrcDirs output. - GOROOTsrc string -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". -func (c *Context) allPackages(pattern string) []string { - pkgs := c.MatchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func (c *Context) allPackagesInFS(pattern string) []string { - pkgs := c.MatchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// MatchPackages returns a list of package paths matching pattern -// (see go help packages for pattern syntax). -func (c *Context) MatchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if !IsMetaPackage(pattern) { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !c.BuildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - for _, src := range c.BuildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != c.GOROOTsrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || path == src { - return nil - } - - want := true - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - want = false - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - want = false - } - if !treeCanMatch(name) { - want = false - } - - if !fi.IsDir() { - if fi.Mode()&os.ModeSymlink != 0 && want { - if target, err := os.Stat(path); err == nil && target.IsDir() { - fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) - } - } - return nil - } - if !want { - return filepath.SkipDir - } - - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - pkg, err := c.BuildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - - // If we are expanding "cmd", skip main - // packages under cmd/vendor. At least as of - // March, 2017, there is one there for the - // vendored pprof tool. - if pattern == "cmd" && strings.HasPrefix(pkg.ImportPath, "cmd/vendor") && pkg.Name == "main" { - return nil - } - - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// MatchPackagesInFS returns a list of package paths matching pattern, -// which must begin with ./ or ../ -// (see go help packages for pattern syntax). -func (c *Context) MatchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - - // We keep the directory if we can import it, or if we can't import it - // due to invalid Go source files. This means that directories containing - // parse errors will be built (and fail) instead of being silently skipped - // as not matching the pattern. Go 1.5 and earlier skipped, but that - // behavior means people miss serious mistakes. - // See golang.org/issue/11407. - if p, err := c.BuildContext.ImportDir(path, 0); err != nil && (p == nil || len(p.InvalidGoFiles) == 0) { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separted pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} - -// ImportPaths returns the import paths to use for the given command line. -func (c *Context) ImportPaths(args []string) []string { - args = c.ImportPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, c.allPackagesInFS(a)...) - } else { - out = append(out, c.allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// ImportPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func (c *Context) ImportPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if IsMetaPackage(a) { - out = append(out, c.allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// IsMetaPackage checks if name is a reserved package name that expands to multiple packages. -func IsMetaPackage(name string) bool { - return name == "std" || name == "cmd" || name == "all" -} diff --git a/vendor/github.com/kisielk/gotool/match.go b/vendor/github.com/kisielk/gotool/match.go deleted file mode 100644 index 4dbdbff47f..0000000000 --- a/vendor/github.com/kisielk/gotool/match.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.9 - -package gotool - -import ( - "path/filepath" - - "github.com/kisielk/gotool/internal/load" -) - -// importPaths returns the import paths to use for the given command line. -func (c *Context) importPaths(args []string) []string { - lctx := load.Context{ - BuildContext: c.BuildContext, - GOROOTsrc: c.joinPath(c.BuildContext.GOROOT, "src"), - } - return lctx.ImportPaths(args) -} - -// joinPath calls c.BuildContext.JoinPath (if not nil) or else filepath.Join. -// -// It's a copy of the unexported build.Context.joinPath helper. -func (c *Context) joinPath(elem ...string) string { - if f := c.BuildContext.JoinPath; f != nil { - return f(elem...) - } - return filepath.Join(elem...) -} diff --git a/vendor/github.com/kisielk/gotool/match18.go b/vendor/github.com/kisielk/gotool/match18.go deleted file mode 100644 index 6d6b1368c8..0000000000 --- a/vendor/github.com/kisielk/gotool/match18.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !go1.9 - -package gotool - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -// This file contains code from the Go distribution. - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -func matchPattern(pattern string) func(name string) bool { - re := regexp.QuoteMeta(pattern) - re = strings.Replace(re, `\.\.\.`, `.*`, -1) - // Special case: foo/... matches foo too. - if strings.HasSuffix(re, `/.*`) { - re = re[:len(re)-len(`/.*`)] + `(/.*)?` - } - reg := regexp.MustCompile(`^` + re + `$`) - return reg.MatchString -} - -// matchPackages returns a list of package paths matching pattern -// (see go help packages for pattern syntax). -func (c *Context) matchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if !isMetaPackage(pattern) { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !c.BuildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - for _, src := range c.BuildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != gorootSrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == src { - return nil - } - - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - return filepath.SkipDir - } - if !treeCanMatch(name) { - return filepath.SkipDir - } - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = c.BuildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// importPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func (c *Context) importPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if isMetaPackage(a) { - out = append(out, c.allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// importPaths returns the import paths to use for the given command line. -func (c *Context) importPaths(args []string) []string { - args = c.importPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, c.allPackagesInFS(a)...) - } else { - out = append(out, c.allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". -func (c *Context) allPackages(pattern string) []string { - pkgs := c.matchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func (c *Context) allPackagesInFS(pattern string) []string { - pkgs := c.matchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// matchPackagesInFS returns a list of package paths matching pattern, -// which must begin with ./ or ../ -// (see go help packages for pattern syntax). -func (c *Context) matchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - - // We keep the directory if we can import it, or if we can't import it - // due to invalid Go source files. This means that directories containing - // parse errors will be built (and fail) instead of being silently skipped - // as not matching the pattern. Go 1.5 and earlier skipped, but that - // behavior means people miss serious mistakes. - // See golang.org/issue/11407. - if p, err := c.BuildContext.ImportDir(path, 0); err != nil && shouldIgnoreImport(p) { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} - -// isMetaPackage checks if name is a reserved package name that expands to multiple packages. -func isMetaPackage(name string) bool { - return name == "std" || name == "cmd" || name == "all" -} - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} diff --git a/vendor/github.com/kisielk/gotool/tool.go b/vendor/github.com/kisielk/gotool/tool.go deleted file mode 100644 index c7409e11e6..0000000000 --- a/vendor/github.com/kisielk/gotool/tool.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package gotool contains utility functions used to implement the standard -// "cmd/go" tool, provided as a convenience to developers who want to write -// tools with similar semantics. -package gotool - -import "go/build" - -// Export functions here to make it easier to keep the implementations up to date with upstream. - -// DefaultContext is the default context that uses build.Default. -var DefaultContext = Context{ - BuildContext: build.Default, -} - -// A Context specifies the supporting context. -type Context struct { - // BuildContext is the build.Context that is used when computing import paths. - BuildContext build.Context -} - -// ImportPaths returns the import paths to use for the given command line. -// -// The path "all" is expanded to all packages in $GOPATH and $GOROOT. -// The path "std" is expanded to all packages in the Go standard library. -// The path "cmd" is expanded to all Go standard commands. -// The string "..." is treated as a wildcard within a path. -// When matching recursively, directories are ignored if they are prefixed with -// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". -// Relative import paths are not converted to full import paths. -// If args is empty, a single element "." is returned. -func (c *Context) ImportPaths(args []string) []string { - return c.importPaths(args) -} - -// ImportPaths returns the import paths to use for the given command line -// using default context. -// -// The path "all" is expanded to all packages in $GOPATH and $GOROOT. -// The path "std" is expanded to all packages in the Go standard library. -// The path "cmd" is expanded to all Go standard commands. -// The string "..." is treated as a wildcard within a path. -// When matching recursively, directories are ignored if they are prefixed with -// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". -// Relative import paths are not converted to full import paths. -// If args is empty, a single element "." is returned. -func ImportPaths(args []string) []string { - return DefaultContext.importPaths(args) -} diff --git a/vendor/github.com/kkHAIKE/contextcheck/.gitignore b/vendor/github.com/kkHAIKE/contextcheck/.gitignore new file mode 100644 index 0000000000..fc1b400c8a --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea +.DS_Store diff --git a/vendor/github.com/kkHAIKE/contextcheck/LICENSE b/vendor/github.com/kkHAIKE/contextcheck/LICENSE new file mode 100644 index 0000000000..99e1c482a1 --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 sylvia.wang + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kkHAIKE/contextcheck/Makefile b/vendor/github.com/kkHAIKE/contextcheck/Makefile new file mode 100644 index 0000000000..9321e9de39 --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/Makefile @@ -0,0 +1,5 @@ +build: + @GO111MODULE=on go build -ldflags '-s -w' -o contextcheck ./cmd/contextcheck/main.go + +install: + @GO111MODULE=on go install -ldflags '-s -w' ./cmd/contextcheck diff --git a/vendor/github.com/kkHAIKE/contextcheck/README.md b/vendor/github.com/kkHAIKE/contextcheck/README.md new file mode 100644 index 0000000000..2cc7b2e489 --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/README.md @@ -0,0 +1,157 @@ +[![CircleCI](https://circleci.com/gh/sylvia7788/contextcheck.svg?style=svg)](https://circleci.com/gh/sylvia7788/contextcheck) + + +# contextcheck + +`contextcheck` is a static analysis tool, it is used to check whether the function uses a non-inherited context, which will result in a broken call link. + +For example: + +```go +func call1(ctx context.Context) { + ... + + ctx = getNewCtx(ctx) + call2(ctx) // OK + + call2(context.Background()) // Non-inherited new context, use function like `context.WithXXX` instead + + call3() // Function `call3` should pass the context parameter + call4() // Function `call4->call3` should pass the context parameter + ... +} + +func call2(ctx context.Context) { + ... +} + +func call3() { + ctx := context.TODO() + call2(ctx) +} + +func call4() { + call3() +} + + +// if you want none-inherit ctx, use this function +func getNewCtx(ctx context.Context) (newCtx context.Context) { + ... + return +} + +/* ---------- check net/http.HandleFunc ---------- */ + +func call5(ctx context.Context, w http.ResponseWriter, r *http.Request) { +} + +func call6(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + call5(ctx, w, r) + call5(context.Background(), w, r) // Non-inherited new context, use function like `context.WithXXX` or `r.Context` instead +} + +func call7(in bool, w http.ResponseWriter, r *http.Request) { + call5(r.Context(), w, r) + call5(context.Background(), w, r) +} + +func call8() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + call5(r.Context(), w, r) + call5(context.Background(), w, r) // Non-inherited new context, use function like `context.WithXXX` or `r.Context` instead + + call6(w, r) + + // call7 should be like `func call7(ctx context.Context, in bool, w http.ResponseWriter, r *http.Request)` + call7(true, w, r) // Function `call7` should pass the context parameter + }) +} +``` + +## Tips +### need break ctx inheritance +eg: [issue](https://github.com/kkHAIKE/contextcheck/issues/2). + +```go +func call1(ctx context.Context) { + ... + + newCtx, cancel := NoInheritCancel(ctx) + defer cancel() + + call2(newCtx) + ... +} + +func call2(ctx context.Context) { + ... +} + +func NoInheritCancel(_ context.Context) (context.Context,context.CancelFunc) { + return context.WithCancel(context.Background()) +} +``` + +### skip check specify function +You can add `// nolint: contextcheck` in function decl doc comment, to skip this linter in some false-positive case. + +```go +// nolint: contextcheck +func call1() { + doSomeThing(context.Background()) // add nolint will no issuss for that +} + +func call2(ctx context.Context) { + call1() +} + +func call3() { + call2(context.Background()) +} +``` + +### force mark specify function have server-side http.Request parameter +default behavior is mark http.HandlerFunc or a function use r.Context(). + +```go +// @contextcheck(req_has_ctx) +func writeErr(w http.ResponseWriter, r *http.Request, err error) { + doSomeThing(r.Context()) +} + +func handler(w http.ResponseWriter, r *http.Request) { + ... + if err != nil { + writeErr(w, r, err) + return + } + ... +} +``` + +## Installation + +You can get `contextcheck` by `go get` command. + +```bash +$ go get -u github.com/kkHAIKE/contextcheck +``` + +or build yourself. + +```bash +$ make build +$ make install +``` + +## Usage + +Invoke `contextcheck` with your package name + +```bash +$ contextcheck ./... +$ # or +$ go vet -vettool=`which contextcheck` ./... +``` diff --git a/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go b/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go new file mode 100644 index 0000000000..c9ad0101fe --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go @@ -0,0 +1,835 @@ +package contextcheck + +import ( + "go/ast" + "go/types" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/ssa" +) + +type Configuration struct { + DisableFact bool +} + +var pkgprefix string + +func NewAnalyzer(cfg Configuration) *analysis.Analyzer { + analyzer := &analysis.Analyzer{ + Name: "contextcheck", + Doc: "check whether the function uses a non-inherited context", + Run: NewRun(nil, cfg.DisableFact), + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, + } + analyzer.Flags.StringVar(&pkgprefix, "pkgprefix", "", "filter init pkgs (only for cmd)") + + if !cfg.DisableFact { + analyzer.FactTypes = append(analyzer.FactTypes, (*ctxFact)(nil)) + } + + return analyzer +} + +const ( + ctxPkg = "context" + ctxName = "Context" + + httpPkg = "net/http" + httpRes = "ResponseWriter" + httpReq = "Request" +) + +const ( + CtxIn int = 1 << iota // ctx in function's param + CtxOut // ctx in function's results + CtxInField // ctx in function's field param +) + +type entryType int + +const ( + EntryNone entryType = iota + EntryNormal // without ctx in + EntryWithCtx // has ctx in + EntryWithHttpHandler // is http handler +) + +var ( + pkgFactMap = make(map[*types.Package]ctxFact) + pkgFactMu sync.RWMutex +) + +type resInfo struct { + Valid bool + Funcs []string + + // reuse for doc + ReqCtx bool + Skip bool + + EntryType entryType +} + +type ctxFact map[string]resInfo + +func (*ctxFact) String() string { return "ctxCheck" } +func (*ctxFact) AFact() {} + +type runner struct { + pass *analysis.Pass + ctxTyp *types.Named + ctxPTyp *types.Pointer + skipFile map[*ast.File]bool + + httpResTyps []types.Type + httpReqTyps []types.Type + + currentFact ctxFact + disableFact bool +} + +func getPkgRoot(pkg string) string { + arr := strings.Split(pkg, "/") + if len(arr) < 3 { + return arr[0] + } + if strings.IndexByte(arr[0], '.') == -1 { + return arr[0] + } + return strings.Join(arr[:3], "/") +} + +func NewRun(pkgs []*packages.Package, disableFact bool) func(pass *analysis.Pass) (interface{}, error) { + m := make(map[string]bool) + for _, pkg := range pkgs { + m[getPkgRoot(pkg.PkgPath)] = true + } + return func(pass *analysis.Pass) (interface{}, error) { + // skip different repo + if len(m) > 0 && !m[getPkgRoot(pass.Pkg.Path())] { + return nil, nil + } + if len(m) == 0 && pkgprefix != "" && !strings.HasPrefix(pass.Pkg.Path(), pkgprefix) { + return nil, nil + } + + r := &runner{disableFact: disableFact} + r.run(pass) + return nil, nil + } +} + +func (r *runner) run(pass *analysis.Pass) { + r.pass = pass + pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + funcs := pssa.SrcFuncs + + // collect ctx obj + var ok bool + r.ctxTyp, r.ctxPTyp, ok = r.getRequiedType(pssa, ctxPkg, ctxName) + if !ok { + return + } + + // collect http obj + r.collectHttpTyps(pssa) + + r.skipFile = make(map[*ast.File]bool) + r.currentFact = make(ctxFact) + + type entryInfo struct { + f *ssa.Function // entryfunc + tp entryType // entrytype + } + var tmpFuncs []entryInfo + for _, f := range funcs { + // skip checked function + key := f.RelString(nil) + if _, ok := r.currentFact[key]; ok { + continue + } + + if entryType := r.checkIsEntry(f); entryType == EntryNormal { + if _, ok := r.getValue(key, f); ok { + continue + } + // record the result of nomal function + checkingMap := make(map[string]bool) + checkingMap[key] = true + r.setFact(key, r.checkFuncWithoutCtx(f, checkingMap), f.Name()) + continue + } else if entryType == EntryWithCtx || entryType == EntryWithHttpHandler { + tmpFuncs = append(tmpFuncs, entryInfo{f: f, tp: entryType}) + } + } + + for _, v := range tmpFuncs { + r.checkFuncWithCtx(v.f, v.tp) + } + + if len(r.currentFact) > 0 { + if r.disableFact { + setPkgFact(pass.Pkg, r.currentFact) + } else { + pass.ExportPackageFact(&r.currentFact) + } + } +} + +func (r *runner) getRequiedType(pssa *buildssa.SSA, path, name string) (obj *types.Named, pobj *types.Pointer, ok bool) { + pkg := pssa.Pkg.Prog.ImportedPackage(path) + if pkg == nil { + return + } + + objTyp := pkg.Type(name) + if objTyp == nil { + return + } + obj, ok = objTyp.Object().Type().(*types.Named) + if !ok { + return + } + pobj = types.NewPointer(obj) + + return +} + +func (r *runner) collectHttpTyps(pssa *buildssa.SSA) { + objRes, _, ok := r.getRequiedType(pssa, httpPkg, httpRes) + if ok { + r.httpResTyps = append(r.httpResTyps, objRes) + } + + _, pobjReq, ok := r.getRequiedType(pssa, httpPkg, httpReq) + if ok { + r.httpReqTyps = append(r.httpReqTyps, pobjReq) + } +} + +func (r *runner) noImportedContextAndHttp(f *ssa.Function) (ret bool) { + if !f.Pos().IsValid() { + return false + } + + file := analysisutil.File(r.pass, f.Pos()) + if file == nil { + return false + } + + if skip, has := r.skipFile[file]; has { + return skip + } + defer func() { + r.skipFile[file] = ret + }() + + for _, impt := range file.Imports { + path, err := strconv.Unquote(impt.Path.Value) + if err != nil { + continue + } + path = analysisutil.RemoveVendor(path) + if path == ctxPkg || path == httpPkg { + return false + } + } + + return true +} + +func (r *runner) checkIsEntry(f *ssa.Function) (ret entryType) { + // if r.noImportedContextAndHttp(f) { + // return EntryNormal + // } + key := "entry:" + f.RelString(nil) + res, ok := r.getValue(key, f) + if ok { + return res.EntryType + } + defer func() { + r.currentFact[key] = resInfo{EntryType: ret} + }() + + ctxIn, ctxOut := r.checkIsCtx(f) + if ctxOut { + // skip the function which generate ctx + return EntryNone + } else if ctxIn { + // has ctx in, ignore *http.Request.Context() + return EntryWithCtx + } + + reqctx, skip := r.docFlag(f) + + // check is `func handler(w http.ResponseWriter, r *http.Request) {}` + // or use '// @contextcheck(req_has_ctx)' + if r.checkIsHttpHandler(f, reqctx) { + return EntryWithHttpHandler + } + + if skip { + return EntryNone + } + + return EntryNormal +} + +func (r *runner) docFlag(f *ssa.Function) (reqctx, skip bool) { + for _, v := range r.getDocFromFunc(f) { + if len(nolintRe.FindString(v.Text)) > 0 && strings.Contains(v.Text, "contextcheck") { + skip = true + } else if strings.HasPrefix(v.Text, "// @contextcheck(req_has_ctx)") { + reqctx = true + } + } + return +} + +var nolintRe = regexp.MustCompile(`^//\s?nolint:`) + +func (r *runner) getDocFromFunc(f *ssa.Function) []*ast.Comment { + file := analysisutil.File(r.pass, f.Pos()) + if file == nil { + return nil + } + + // only support FuncDecl comment + var fd *ast.FuncDecl + for _, v := range file.Decls { + if tmp, ok := v.(*ast.FuncDecl); ok && tmp.Name.Pos() == f.Pos() { + fd = tmp + break + } + } + if fd == nil || fd.Doc == nil || len(fd.Doc.List) == 0 { + return nil + } + return fd.Doc.List +} + +func (r *runner) checkIsCtx(f *ssa.Function) (in, out bool) { + // check params + tuple := f.Signature.Params() + for i := 0; i < tuple.Len(); i++ { + if r.isCtxType(tuple.At(i).Type()) { + in = true + break + } + } + + // check freevars + for _, param := range f.FreeVars { + if r.isCtxType(param.Type()) { + in = true + break + } + } + + // check results + tuple = f.Signature.Results() + for i := 0; i < tuple.Len(); i++ { + if r.isCtxType(tuple.At(i).Type()) { + out = true + break + } + } + return +} + +func (r *runner) checkIsHttpHandler(f *ssa.Function, reqctx bool) bool { + var hasReq bool + tuple := f.Signature.Params() + for i := 0; i < tuple.Len(); i++ { + if r.isHttpReqType(tuple.At(i).Type()) { + hasReq = true + break + } + } + if !hasReq { + return false + } + if reqctx { + return true + } + + // must be `func f(w http.ResponseWriter, r *http.Request) {}` + if f.Signature.Results().Len() == 0 && tuple.Len() == 2 && + r.isHttpResType(tuple.At(0).Type()) && r.isHttpReqType(tuple.At(1).Type()) { + return true + } + + // check if use r.Context() + return f.Blocks != nil && len(r.getHttpReqCtx(f, true)) > 0 +} + +func (r *runner) collectCtxRef(f *ssa.Function, isHttpHandler bool) (refMap map[ssa.Instruction]bool, ok bool) { + ok = true + refMap = make(map[ssa.Instruction]bool) + checkedRefMap := make(map[ssa.Value]bool) + storeInstrs := make(map[*ssa.Store]bool) + phiInstrs := make(map[*ssa.Phi]bool) + + var checkRefs func(val ssa.Value, fromAddr bool) + var checkInstr func(instr ssa.Instruction, fromAddr bool) + + checkRefs = func(val ssa.Value, fromAddr bool) { + if val == nil || val.Referrers() == nil { + return + } + + if checkedRefMap[val] { + return + } + checkedRefMap[val] = true + + for _, instr := range *val.Referrers() { + checkInstr(instr, fromAddr) + } + } + + checkInstr = func(instr ssa.Instruction, fromAddr bool) { + switch i := instr.(type) { + case ssa.CallInstruction: + refMap[i] = true + tp := r.getCallInstrCtxType(i) + if tp&CtxOut != 0 { + // collect referrers of the results + checkRefs(i.Value(), false) + return + } + case *ssa.Store: + if fromAddr { + // collect all store to judge whether it's right value is valid + storeInstrs[i] = true + } else { + checkRefs(i.Addr, true) + } + case *ssa.UnOp: + checkRefs(i, false) + case *ssa.MakeClosure: + for _, param := range i.Bindings { + if r.isCtxType(param.Type()) { + refMap[i] = true + break + } + } + case *ssa.Extract: + // only care about ctx + if r.isCtxType(i.Type()) { + checkRefs(i, false) + } + case *ssa.Phi: + phiInstrs[i] = true + checkRefs(i, false) + case *ssa.TypeAssert: + // ctx.(*bm.Context) + } + } + + if isHttpHandler { + for _, v := range r.getHttpReqCtx(f, false) { + checkRefs(v, false) + } + } else { + for _, param := range f.Params { + if r.isCtxType(param.Type()) { + checkRefs(param, false) + } + } + + for _, param := range f.FreeVars { + if r.isCtxType(param.Type()) { + checkRefs(param, false) + } + } + } + + for instr := range storeInstrs { + if !checkedRefMap[instr.Val] { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` instead") + ok = false + } + } + + for instr := range phiInstrs { + for _, v := range instr.Edges { + if !checkedRefMap[v] { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` instead") + ok = false + } + } + } + + return +} + +func (r *runner) getHttpReqCtx(f *ssa.Function, least1 bool) (rets []ssa.Value) { + checkedRefMap := make(map[ssa.Value]bool) + + var checkRefs func(val ssa.Value, fromAddr bool) + var checkInstr func(instr ssa.Instruction, fromAddr bool) + + checkRefs = func(val ssa.Value, fromAddr bool) { + if val == nil || val.Referrers() == nil { + return + } + + if checkedRefMap[val] { + return + } + checkedRefMap[val] = true + + for _, instr := range *val.Referrers() { + checkInstr(instr, fromAddr) + } + } + + checkInstr = func(instr ssa.Instruction, fromAddr bool) { + switch i := instr.(type) { + case ssa.CallInstruction: + // r.Context() only has one recv + if len(i.Common().Args) != 1 { + break + } + + // find r.Context() + if r.getCallInstrCtxType(i)&CtxOut != CtxOut { + break + } + + // check is r.Context + f := r.getFunction(instr) + if f == nil || f.Name() != ctxName { + break + } + if f.Signature.Recv() != nil { + // collect the return of r.Context + rets = append(rets, i.Value()) + if least1 { + return + } + } + case *ssa.Store: + if !fromAddr { + checkRefs(i.Addr, true) + } + case *ssa.UnOp: + checkRefs(i, false) + case *ssa.Phi: + checkRefs(i, false) + case *ssa.MakeClosure: + case *ssa.Extract: + // http.Request can only be input + } + } + + for _, param := range f.Params { + if r.isHttpReqType(param.Type()) { + checkRefs(param, false) + } + } + + return +} + +func (r *runner) checkFuncWithCtx(f *ssa.Function, tp entryType) { + isHttpHandler := tp == EntryWithHttpHandler + refMap, ok := r.collectCtxRef(f, isHttpHandler) + if !ok { + return + } + + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + tp, ok := r.getCtxType(instr) + if !ok { + continue + } + + // checked in collectCtxRef, skipped + if tp&CtxOut != 0 { + continue + } + + if tp&CtxIn != 0 { + if !refMap[instr] { + if isHttpHandler { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` or `r.Context` instead") + } else { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` instead") + } + } + } + + ff := r.getFunction(instr) + if ff == nil { + continue + } + + key := ff.RelString(nil) + res, ok := r.getValue(key, ff) + if ok { + if !res.Valid { + r.pass.Reportf(instr.Pos(), "Function `%s` should pass the context parameter", strings.Join(reverse(res.Funcs), "->")) + } + } + } + } +} + +func (r *runner) checkFuncWithoutCtx(f *ssa.Function, checkingMap map[string]bool) (ret bool) { + ret = true + orgKey := f.RelString(nil) + var seted bool + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + tp, ok := r.getCtxType(instr) + if !ok { + continue + } + + if tp&CtxOut != 0 { + continue + } + + // it is considered illegal as long as ctx is in the input and not in *struct X + if tp&CtxIn != 0 { + if tp&CtxInField == 0 { + ret = false + } + } + + ff := r.getFunction(instr) + if ff == nil { + continue + } + + key := ff.RelString(nil) + res, ok := r.getValue(key, ff) + if ok { + if !res.Valid { + ret = false + + // save the call link + if !seted { + seted = true + r.setFact(orgKey, res.Valid, res.Funcs...) + } + } + continue + } + + // check is thunk or bound + if strings.HasSuffix(key, "$thunk") || strings.HasSuffix(key, "$bound") { + continue + } + + if entryType := r.checkIsEntry(ff); entryType == EntryNormal { + // cannot get info from fact, skip + if ff.Blocks == nil { + continue + } + + // handler cycle call + if checkingMap[key] { + continue + } + checkingMap[key] = true + + valid := r.checkFuncWithoutCtx(ff, checkingMap) + r.setFact(key, valid, ff.Name()) + if res, ok := r.getValue(key, ff); ok && !valid && !seted { + seted = true + r.setFact(orgKey, valid, res.Funcs...) + } + if !valid { + ret = false + } + } + } + } + return ret +} + +func (r *runner) getCtxType(instr ssa.Instruction) (tp int, ok bool) { + switch i := instr.(type) { + case ssa.CallInstruction: + tp = r.getCallInstrCtxType(i) + ok = true + case *ssa.MakeClosure: + tp = r.getMakeClosureCtxType(i) + ok = true + } + return +} + +func (r *runner) getCallInstrCtxType(c ssa.CallInstruction) (tp int) { + // check params + for _, v := range c.Common().Args { + if r.isCtxType(v.Type()) { + if vv, ok := v.(*ssa.UnOp); ok { + if _, ok := vv.X.(*ssa.FieldAddr); ok { + tp |= CtxInField + } + } + + tp |= CtxIn + break + } + } + + // check results + if v := c.Value(); v != nil { + if r.isCtxType(v.Type()) { + tp |= CtxOut + } else { + tuple, ok := v.Type().(*types.Tuple) + if !ok { + return + } + for i := 0; i < tuple.Len(); i++ { + if r.isCtxType(tuple.At(i).Type()) { + tp |= CtxOut + break + } + } + } + } + + return +} + +func (r *runner) getMakeClosureCtxType(c *ssa.MakeClosure) (tp int) { + for _, v := range c.Bindings { + if r.isCtxType(v.Type()) { + if vv, ok := v.(*ssa.UnOp); ok { + if _, ok := vv.X.(*ssa.FieldAddr); ok { + tp |= CtxInField + } + } + + tp |= CtxIn + break + } + } + return +} + +func (r *runner) getFunction(instr ssa.Instruction) (f *ssa.Function) { + switch i := instr.(type) { + case ssa.CallInstruction: + if i.Common().IsInvoke() { + return + } + + switch c := i.Common().Value.(type) { + case *ssa.Function: + f = c + case *ssa.MakeClosure: + // captured in the outer layer + case *ssa.Builtin, *ssa.UnOp, *ssa.Lookup, *ssa.Phi: + // skipped + case *ssa.Extract, *ssa.Call: + // function is a result of a call, skipped + case *ssa.Parameter: + // function is a param, skipped + } + case *ssa.MakeClosure: + f = i.Fn.(*ssa.Function) + } + return +} + +func (r *runner) isCtxType(tp types.Type) bool { + return types.Identical(tp, r.ctxTyp) || types.Identical(tp, r.ctxPTyp) +} + +func (r *runner) isHttpResType(tp types.Type) bool { + for _, v := range r.httpResTyps { + if ok := types.Identical(v, v); ok { + return true + } + } + return false +} + +func (r *runner) isHttpReqType(tp types.Type) bool { + for _, v := range r.httpReqTyps { + if ok := types.Identical(tp, v); ok { + return true + } + } + return false +} + +func (r *runner) getValue(key string, f *ssa.Function) (res resInfo, ok bool) { + res, ok = r.currentFact[key] + if ok { + return + } + + if f.Pkg == nil { + return + } + + var fact ctxFact + var got bool + if r.disableFact { + fact, got = getPkgFact(f.Pkg.Pkg) + } else { + got = r.pass.ImportPackageFact(f.Pkg.Pkg, &fact) + } + if got { + res, ok = fact[key] + } + return +} + +func (r *runner) setFact(key string, valid bool, funcs ...string) { + var names []string + if !valid { + names = append(r.currentFact[key].Funcs, funcs...) + } + r.currentFact[key] = resInfo{ + Valid: valid, + Funcs: names, + } +} + +// setPkgFact save fact to mem +func setPkgFact(pkg *types.Package, fact ctxFact) { + pkgFactMu.Lock() + pkgFactMap[pkg] = fact + pkgFactMu.Unlock() +} + +// getPkgFact get fact from mem +func getPkgFact(pkg *types.Package) (fact ctxFact, ok bool) { + pkgFactMu.RLock() + fact, ok = pkgFactMap[pkg] + pkgFactMu.RUnlock() + return +} + +func reverse(arr1 []string) (arr2 []string) { + l := len(arr1) + if l == 0 { + return + } + arr2 = make([]string, l) + for i := 0; i <= l/2; i++ { + arr2[i] = arr1[l-1-i] + arr2[l-1-i] = arr1[i] + } + return +} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 402433593c..0000000000 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index d31b378152..0000000000 --- a/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe - -# Linux perf files -perf.data -perf.data.old - -# gdb history -.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index 0af08e65e6..0000000000 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,141 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - - ./gen.sh - - go install mvdan.cc/garble@latest - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d5574777..0000000000 --- a/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index 3c00c1af96..0000000000 --- a/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,578 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -``` - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d51..0000000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da67..0000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909c..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index 43e463611b..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d605..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 6f341914c6..0000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - default: - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - return s.bw.close() -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - br.init(s.br.unread()) - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfde..0000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205f..0000000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c6638..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index 504a7be9da..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index ec71f7a349..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d232..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 4d14542fac..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,730 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src) -} - -func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - err := bw.close() - return bw.out, err -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - var err error - idx := len(s.Out) - s.Out, err = s.compress1xDo(s.Out, toDo) - if err != nil { - return nil, err - } - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - var errs [4]error - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - if errs[i] != nil { - return nil, errs[i] - } - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 - } - // fake entry, strong barrier - huffNode0[0].count = 1 << 31 - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].nbBits++ - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { - n-- - } - huffNode[n+1].nbBits-- - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].nbBits-- - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 42a237eac4..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, err - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 8 - 8 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6b02..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index 8d2187a2ce..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,846 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 - - // Main loop -main_loop: - MOVQ SI, R8 - CMPQ R8, BX - SETGE DL - - // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ (R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 - - // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ 48(R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 - - // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ 96(R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 - - // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ 144(R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 - - // Main loop -main_loop: - MOVQ BX, R8 - CMPQ R8, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 - - // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 - - // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 - - // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exeeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exeeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exeeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exeeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17de63..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index e8ad17ad08..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - br byteReader - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.br.init(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c51219..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579c4f..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe9e9..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8b..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5d..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 298c4f8e97..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 2263853fca..0000000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.16 - diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index 65b38abed8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 97299d499c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off >= 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return b.off*8 + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 78b3c61be3..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index f52d1aed6f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,720 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc - checkCRC []byte - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if cap(b.dataStorage) < cSize { - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 12e8f6f0b6..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 32 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Maybe even add a bigger margin. - b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 176788f259..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, nil - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a242d8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 5022e71c83..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - *h = Header{} - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch - } - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch size { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch fcsSize { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index 78c10755f8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,950 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which is will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if !d.o.ignoreChecksum && len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } - } - if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { - got := d.current.crc.Sum64() - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(got)) - if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { - if debugDecoder { - println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - println("CRC ok", tmp[:]) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err != nil { - return false - } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.checkCRC = nil - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - dec.err = err - } - var tmp [4]byte - copy(tmp[:], crc) - dec.checkCRC = tmp[:] - if debugDecoder { - println("found crc to check:", dec.checkCRC) - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index f42448e69c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, *d) - } - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index a36ae83ef5..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,122 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - //llEnc, ofEnc, mlEnc []*fseEncoder - offsets [3]int - content []byte -} - -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if !bytes.Equal(b[:4], dictMagic[:]) { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, err - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 15ae8ee807..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,188 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - a := src[s:] - b := src[t:] - b = b[:len(a)] - end := int32((len(a) >> 3) << 3) - for i := int32(0); i < end; i += 8 { - if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { - return i + int32(bits.TrailingZeros64(diff)>>3) - } - } - - a = a[end:] - b = b[end:] - for i := range a { - if a[i] != b[i] { - return int32(i) + end - } - } - return int32(len(a)) + end -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index dbbb88d92b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 - _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes -} - -const highScore = 25000 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep)) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - _ = addLiterals - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { - return a - } - return b - } - const goodEnough = 100 - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} - } - if debugAsserts { - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m - } - - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) - - if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - continue - } - - s++ - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) - if false { - // Short at s+3. - // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) - } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) - } - best = bestEnd - } - } - } - - if debugAsserts { - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - if best.rep > 0 { - s = best.s - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = uint32(best.rep) - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s - s = best.s + best.length - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - - } - break encodeLoop - } - // Index skipped... - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - switch best.rep { - case 2: - offset1, offset2 = offset2, offset1 - case 3: - offset1, offset2, offset3 = offset3, offset1, offset2 - } - cv = load6432(src, s) - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - s = best.s - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index d70e3fd3d3..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1246 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index 1f4a9a2455..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1127 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index 181edc02b6..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,900 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 3 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 - nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 7aaaedb23e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst, err := fh.appendTo(tmp[:0]) - if err != nil { - return err - } - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst, _ = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst, err := fh.appendTo(dst) - if err != nil { - panic(err) - } - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - err := errIncompressible - oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: - panic(err) - } - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: - panic(err) - } - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index a7c5e1aac4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,317 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: true, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - o.pad = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 16 << 20 - case SpeedBestCompression: - o.windowSize = 32 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// The encoder *may* choose to use no dictionary instead for certain payloads. -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index b6c5054176..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID *uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - default: - return err - case nil: - signature[0] = b[0] - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - default: - return err - case nil: - copy(signature[1:], b) - } - - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if !bytes.Equal(signature[:], frameMagic) { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch size { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch fcsSize { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or very small window size. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - // Alloc with one additional block - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum if the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - - // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - if d.o.ignoreChecksum { - return nil - } - - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) - - if !bytes.Equal(tmp[:], want) { - if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) - } - return ErrCRCMismatch - } - if debugDecoder { - println("CRC ok", tmp[:]) - } - return nil -} - -// consumeCRC reads the checksum data if the frame has one. -func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - } - - return nil -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 4ef7f5a3e3..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) ([]byte, error) { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are nto stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst, nil -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a722..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829b0a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde398695..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 332e51fe44..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326a8f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21ebd..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 09164856d2..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 69aa3bb587..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index 2c112a0ab1..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,237 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index cea1785619..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,216 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI - - // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX - JGE finalize - -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index 4d64a17d69..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,186 +0,0 @@ -// +build gc,!purego,!noasm - -#include "textflag.h" - -// Register allocation. -#define digest R1 -#define h R2 // Return value. -#define p R3 // Input pointer. -#define len R4 -#define nblocks R5 // len / 32. -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc \ - -// x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x \ - -#define mergeRound(x) \ - round0(x) \ - EOR x, h \ - MADD h, prime4, prime1, h \ - -// Update v[1-4] with 32-byte blocks. Assumes len >= 32. -#define blocksLoop() \ - LSR $5, len, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 32(p), (x1, x2) \ - round(v1, x1) \ - LDP -16(p), (x3, x4) \ - round(v2, x2) \ - SUB $1, nblocks \ - round(v3, x3) \ - round(v4, x4) \ - CBNZ nblocks, loop \ - -// The primes are repeated here to ensure that they're stored -// in a contiguous array, so we can load them with LDP. -DATA primes<> +0(SB)/8, $11400714785074694791 -DATA primes<> +8(SB)/8, $14029467366897019727 -DATA primes<>+16(SB)/8, $1609587929392839161 -DATA primes<>+24(SB)/8, $9650029242287828579 -DATA primes<>+32(SB)/8, $2870177450012600261 -GLOBL primes<>(SB), NOPTR+RODATA, $40 - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 - LDP b_base+0(FP), (p, len) - - LDP primes<> +0(SB), (prime1, prime2) - LDP primes<>+16(SB), (prime3, prime4) - MOVD primes<>+32(SB), prime5 - - CMP $32, len - CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } - BLO afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blocksLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(v1) - mergeRound(v2) - mergeRound(v3) - mergeRound(v4) - -afterLoop: - ADD len, h - - TBZ $4, len, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, len, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, len, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, len, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h - MUL prime1, h - -try1: - TBZ $0, len, end - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h - MUL prime1, h - -end: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -// -// Assumes len(b) >= 32. -TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 - LDP primes<>(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, len) - - blocksLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, len - MOVD len, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index 1a1fac9c26..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 209cb4a999..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb102..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index f833d1541f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - if size-startSize == 424242 { - panic("here") - } - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) - br.fill() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - - } - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index 191384adfd..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,379 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index 52e5703c26..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4099 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index ac2a80d291..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a77..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index 9e1baad73b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - var header []byte - var n int - header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8c4e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 3eb3f1c826..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,152 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" - "math/bits" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -// matchLen returns the maximum length. -// a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) - } - } - - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go b/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go index c7da52a200..e9187d6fdb 100644 --- a/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go +++ b/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go @@ -7,7 +7,6 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" ) @@ -16,28 +15,38 @@ It also checks that the t.Parallel is used if multiple tests cases are run as pa As part of ensuring parallel tests works as expected it checks for reinitialising of the range value over the test cases.(https://tinyurl.com/y6555cy6)` -var Analyzer = &analysis.Analyzer{ - Name: "paralleltest", - Doc: Doc, - Run: run, - Flags: flags(), - Requires: []*analysis.Analyzer{inspect.Analyzer}, +func NewAnalyzer() *analysis.Analyzer { + return newParallelAnalyzer().analyzer } -const ignoreMissingFlag = "i" - -func flags() flag.FlagSet { - options := flag.NewFlagSet("", flag.ExitOnError) - options.Bool(ignoreMissingFlag, false, "ignore missing calls to t.Parallel") - return *options +// parallelAnalyzer is an internal analyzer that makes options available to a +// run pass. It wraps an `analysis.Analyzer` that should be returned for +// linters. +type parallelAnalyzer struct { + analyzer *analysis.Analyzer + ignoreMissing bool + ignoreMissingSubtests bool + ignoreLoopVar bool } -type boolValue bool +func newParallelAnalyzer() *parallelAnalyzer { + a := ¶llelAnalyzer{} -func run(pass *analysis.Pass) (interface{}, error) { + var flags flag.FlagSet + flags.BoolVar(&a.ignoreMissing, "i", false, "ignore missing calls to t.Parallel") + flags.BoolVar(&a.ignoreMissingSubtests, "ignoremissingsubtests", false, "ignore missing calls to t.Parallel in subtests") + flags.BoolVar(&a.ignoreLoopVar, "ignoreloopVar", false, "ignore loop variable detection") - ignoreMissing := pass.Analyzer.Flags.Lookup(ignoreMissingFlag).Value.(flag.Getter).Get().(bool) + a.analyzer = &analysis.Analyzer{ + Name: "paralleltest", + Doc: Doc, + Run: a.run, + Flags: flags, + } + return a +} +func (a *parallelAnalyzer) run(pass *analysis.Pass) (interface{}, error) { inspector := inspector.New(pass.Files) nodeFilter := []ast.Node{ @@ -47,8 +56,10 @@ func run(pass *analysis.Pass) (interface{}, error) { inspector.Preorder(nodeFilter, func(node ast.Node) { funcDecl := node.(*ast.FuncDecl) var funcHasParallelMethod, + funcCantParallelMethod, rangeStatementOverTestCasesExists, - rangeStatementHasParallelMethod bool + rangeStatementHasParallelMethod, + rangeStatementCantParallelMethod bool var loopVariableUsedInRun *string var numberOfTestRun int var positionOfTestRunNode []ast.Node @@ -70,20 +81,29 @@ func run(pass *analysis.Pass) (interface{}, error) { funcHasParallelMethod = methodParallelIsCalledInTestFunction(n, testVar) } + // Check if the test calls t.Setenv, cannot be used in parallel tests or tests with parallel ancestors + if !funcCantParallelMethod { + funcCantParallelMethod = methodSetenvIsCalledInTestFunction(n, testVar) + } + // Check if the t.Run within the test function is calling t.Parallel if methodRunIsCalledInTestFunction(n, testVar) { // n is a call to t.Run; find out the name of the subtest's *testing.T parameter. innerTestVar := getRunCallbackParameterName(n) hasParallel := false + cantParallel := false numberOfTestRun++ ast.Inspect(v, func(p ast.Node) bool { if !hasParallel { hasParallel = methodParallelIsCalledInTestFunction(p, innerTestVar) } + if !cantParallel { + cantParallel = methodSetenvIsCalledInTestFunction(p, innerTestVar) + } return true }) - if !hasParallel { + if !hasParallel && !cantParallel { positionOfTestRunNode = append(positionOfTestRunNode, n) } } @@ -115,7 +135,11 @@ func run(pass *analysis.Pass) (interface{}, error) { rangeStatementHasParallelMethod = methodParallelIsCalledInMethodRun(r.X, innerTestVar) } - if loopVariableUsedInRun == nil { + if !rangeStatementCantParallelMethod { + rangeStatementCantParallelMethod = methodSetenvIsCalledInMethodRun(r.X, innerTestVar) + } + + if !a.ignoreLoopVar && loopVariableUsedInRun == nil { if run, ok := r.X.(*ast.CallExpr); ok { loopVariableUsedInRun = loopVarReferencedInRun(run, loopVars, pass.TypesInfo) } @@ -127,13 +151,18 @@ func run(pass *analysis.Pass) (interface{}, error) { } } - if !ignoreMissing && !funcHasParallelMethod { + // Descendents which call Setenv, also prevent tests from calling Parallel + if rangeStatementCantParallelMethod { + funcCantParallelMethod = true + } + + if !a.ignoreMissing && !funcHasParallelMethod && !funcCantParallelMethod { pass.Reportf(node.Pos(), "Function %s missing the call to method parallel\n", funcDecl.Name.Name) } if rangeStatementOverTestCasesExists && rangeNode != nil { - if !rangeStatementHasParallelMethod { - if !ignoreMissing { + if !rangeStatementHasParallelMethod && !rangeStatementCantParallelMethod { + if !a.ignoreMissing && !a.ignoreMissingSubtests { pass.Reportf(rangeNode.Pos(), "Range statement for test %s missing the call to method parallel in test Run\n", funcDecl.Name.Name) } } else if loopVariableUsedInRun != nil { @@ -142,10 +171,10 @@ func run(pass *analysis.Pass) (interface{}, error) { } // Check if the t.Run is more than one as there is no point making one test parallel - if !ignoreMissing { + if !a.ignoreMissing && !a.ignoreMissingSubtests { if numberOfTestRun > 1 && len(positionOfTestRunNode) > 0 { for _, n := range positionOfTestRunNode { - pass.Reportf(n.Pos(), "Function %s has missing the call to method parallel in the test run\n", funcDecl.Name.Name) + pass.Reportf(n.Pos(), "Function %s missing the call to method parallel in the test run\n", funcDecl.Name.Name) } } } @@ -155,15 +184,23 @@ func run(pass *analysis.Pass) (interface{}, error) { } func methodParallelIsCalledInMethodRun(node ast.Node, testVar string) bool { - var methodParallelCalled bool + return targetMethodIsCalledInMethodRun(node, testVar, "Parallel") +} + +func methodSetenvIsCalledInMethodRun(node ast.Node, testVar string) bool { + return targetMethodIsCalledInMethodRun(node, testVar, "Setenv") +} + +func targetMethodIsCalledInMethodRun(node ast.Node, testVar, targetMethod string) bool { + var called bool // nolint: gocritic switch callExp := node.(type) { case *ast.CallExpr: for _, arg := range callExp.Args { - if !methodParallelCalled { + if !called { ast.Inspect(arg, func(n ast.Node) bool { - if !methodParallelCalled { - methodParallelCalled = methodParallelIsCalledInRunMethod(n, testVar) + if !called { + called = exprCallHasMethod(n, testVar, targetMethod) return true } return false @@ -171,11 +208,7 @@ func methodParallelIsCalledInMethodRun(node ast.Node, testVar string) bool { } } } - return methodParallelCalled -} - -func methodParallelIsCalledInRunMethod(node ast.Node, testVar string) bool { - return exprCallHasMethod(node, testVar, "Parallel") + return called } func methodParallelIsCalledInTestFunction(node ast.Node, testVar string) bool { @@ -189,6 +222,11 @@ func methodRunIsCalledInRangeStatement(node ast.Node, testVar string) bool { func methodRunIsCalledInTestFunction(node ast.Node, testVar string) bool { return exprCallHasMethod(node, testVar, "Run") } + +func methodSetenvIsCalledInTestFunction(node ast.Node, testVar string) bool { + return exprCallHasMethod(node, testVar, "Setenv") +} + func exprCallHasMethod(node ast.Node, receiverName, methodName string) bool { // nolint: gocritic switch n := node.(type) { @@ -249,7 +287,9 @@ func isTestFunction(funcDecl *ast.FuncDecl) (bool, string) { if selectExpr, ok := starExp.X.(*ast.SelectorExpr); ok { if selectExpr.Sel.Name == testMethodStruct { if s, ok := selectExpr.X.(*ast.Ident); ok { - return s.Name == testMethodPackageType, param.Names[0].Name + if len(param.Names) > 0 { + return s.Name == testMethodPackageType, param.Names[0].Name + } } } } diff --git a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml index 22ff44040c..95d44aaac3 100644 --- a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml +++ b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml @@ -1,43 +1,51 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json + project_name: exportloopref -release: - github: - owner: kyoh86 - name: exportloopref -brews: -- install: | - bin.install "exportloopref" - github: - owner: kyoh86 - name: homebrew-tap - folder: Formula - homepage: https://github.com/kyoh86/exportloopref - description: An analyzer that finds exporting pointers for loop variables. builds: -- goos: - - linux - - darwin - - windows - goarch: - - amd64 - - "386" - main: ./cmd/exportloopref - ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} - binary: exportloopref + - id: default + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + - "386" + main: ./cmd/exportloopref + binary: exportloopref +brews: + - install: | + bin.install "exportloopref" + tap: + owner: kyoh86 + name: homebrew-tap + folder: Formula + homepage: https://github.com/kyoh86/exportloopref + description: An analyzer that finds exporting pointers for loop variables. + license: MIT +nfpms: + - builds: + - default + maintainer: kyoh86 + homepage: https://github.com/kyoh86/exportloopref + description: An analyzer that finds exporting pointers for loop variables. + license: MIT + formats: + - apk + - deb + - rpm archives: -- id: gzip - format: tar.gz - format_overrides: - - goos: windows - format: zip - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - files: - - licence* - - LICENCE* - - license* - - LICENSE* - - readme* - - README* - - changelog* - - CHANGELOG* -snapshot: - name_template: SNAPSHOT-{{ .Commit }} + - id: gzip + format: tar.gz + format_overrides: + - goos: windows + format: zip + files: + - licence* + - LICENCE* + - license* + - LICENSE* + - readme* + - README* + - changelog* + - CHANGELOG* diff --git a/vendor/github.com/kyoh86/exportloopref/README.md b/vendor/github.com/kyoh86/exportloopref/README.md index 5c019c7380..0f581ffcee 100644 --- a/vendor/github.com/kyoh86/exportloopref/README.md +++ b/vendor/github.com/kyoh86/exportloopref/README.md @@ -1,6 +1,8 @@ # exportloopref An analyzer that finds exporting pointers for loop variables. +![](https://repository-images.githubusercontent.com/256768552/a1c5bb80-dd73-11eb-9453-e520f517e730) +Pin them all! [![PkgGoDev](https://pkg.go.dev/badge/kyoh86/exportloopref)](https://pkg.go.dev/kyoh86/exportloopref) [![Go Report Card](https://goreportcard.com/badge/github.com/kyoh86/exportloopref)](https://goreportcard.com/report/github.com/kyoh86/exportloopref) @@ -9,7 +11,7 @@ An analyzer that finds exporting pointers for loop variables. ## What's this? -Sample problem code from: https://github.com/kyoh86/exportloopref/blob/master/testdata/src/simple/simple.go +Sample problem code from: https://github.com/kyoh86/exportloopref/blob/main/testdata/src/simple/simple.go ```go package main @@ -109,7 +111,7 @@ func printp(p *int) { } ``` -ref: https://github.com/kyoh86/exportloopref/blob/master/testdata/src/fixed/fixed.go +ref: https://github.com/kyoh86/exportloopref/blob/main/testdata/src/fixed/fixed.go ## Sensing policy @@ -120,7 +122,7 @@ e.g. ```go var s Foo -for _, p := []int{10, 11, 12, 13} { +for _, p := range []int{10, 11, 12, 13} { s.Bar(&p) // If s stores the pointer, it will be bug. } ``` diff --git a/vendor/github.com/kyoh86/exportloopref/exportloopref.go b/vendor/github.com/kyoh86/exportloopref/exportloopref.go index 4d1671a060..d071d5c35f 100644 --- a/vendor/github.com/kyoh86/exportloopref/exportloopref.go +++ b/vendor/github.com/kyoh86/exportloopref/exportloopref.go @@ -17,21 +17,15 @@ var Analyzer = &analysis.Analyzer{ Run: run, RunDespiteErrors: true, Requires: []*analysis.Analyzer{inspect.Analyzer}, - // ResultType reflect.Type - // FactTypes []Fact -} - -func init() { - // Analyzer.Flags.StringVar(&v, "name", "default", "description") } func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) search := &Searcher{ - Stats: map[token.Pos]struct{}{}, - Vars: map[token.Pos]map[token.Pos]struct{}{}, - Types: pass.TypesInfo.Types, + LoopVars: map[token.Pos]struct{}{}, + LocalVars: map[token.Pos]map[token.Pos]struct{}{}, + Pass: pass, } nodeFilter := []ast.Node{ @@ -42,50 +36,60 @@ func run(pass *analysis.Pass) (interface{}, error) { (*ast.UnaryExpr)(nil), } - inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) bool { - id, insert, digg := search.Check(n, stack) - if id != nil { - dMsg := fmt.Sprintf("exporting a pointer for the loop variable %s", id.Name) - fMsg := fmt.Sprintf("loop variable %s should be pinned", id.Name) - var suggest []analysis.SuggestedFix - if insert != token.NoPos { - suggest = []analysis.SuggestedFix{{ - Message: fMsg, - TextEdits: []analysis.TextEdit{{ - Pos: insert, - End: insert, - NewText: []byte(fmt.Sprintf("%[1]s := %[1]s\n", id.Name)), - }}, - }} - } - d := analysis.Diagnostic{Pos: id.Pos(), - End: id.End(), - Message: dMsg, - Category: "exportloopref", - SuggestedFixes: suggest, - } - pass.Report(d) - } - return digg - }) + inspect.WithStack(nodeFilter, search.CheckAndReport) return nil, nil } type Searcher struct { - // Statement variables : map to collect positions that - // variables are declared like below. + // LoopVars is positions that loop-variables are declared like below. // - for , := range ... - // - var int - // - D := ... - Stats map[token.Pos]struct{} - // Local variables maps loop-position, decl-location to ignore - // safe pointers for variable which declared in the loop. - Vars map[token.Pos]map[token.Pos]struct{} - Types map[ast.Expr]types.TypeAndValue + // - for := ; ; + LoopVars map[token.Pos]struct{} + // LocalVars is positions of loops and the variables declared in them. + // Use this to determine if a point assignment is an export outside the loop. + LocalVars map[token.Pos]map[token.Pos]struct{} + + Pass *analysis.Pass +} + +// CheckAndReport inspects each node with stack. +// It is implemented as the I/F of the "golang.org/x/tools/go/analysis/passes/inspect".Analysis.WithStack. +func (s *Searcher) CheckAndReport(n ast.Node, push bool, stack []ast.Node) bool { + id, insert, digg := s.Check(n, stack) + if id == nil { + // no prob. + return digg + } + + // suggests fix + var suggest []analysis.SuggestedFix + if insert != token.NoPos { + suggest = []analysis.SuggestedFix{{ + Message: fmt.Sprintf("loop variable %s should be pinned", id.Name), + TextEdits: []analysis.TextEdit{{ + Pos: insert, + End: insert, + NewText: []byte(fmt.Sprintf("%[1]s := %[1]s\n", id.Name)), + }}, + }} + } + + // report a diagnostic + d := analysis.Diagnostic{Pos: id.Pos(), + End: id.End(), + Message: fmt.Sprintf("exporting a pointer for the loop variable %s", id.Name), + Category: "exportloopref", + SuggestedFixes: suggest, + } + s.Pass.Report(d) + return digg } -func (s *Searcher) Check(n ast.Node, stack []ast.Node) (*ast.Ident, token.Pos, bool) { +// Check each node and stack, whether it exports loop variables or not. +// Finding export, report the *ast.Ident of exported loop variable, +// and token.Pos to insert assignment to fix the diagnostic. +func (s *Searcher) Check(n ast.Node, stack []ast.Node) (loopVar *ast.Ident, insertPos token.Pos, digg bool) { switch typed := n.(type) { case *ast.RangeStmt: s.parseRangeStmt(typed) @@ -102,72 +106,92 @@ func (s *Searcher) Check(n ast.Node, stack []ast.Node) (*ast.Ident, token.Pos, b return nil, token.NoPos, true } +// parseRangeStmt will check range statement (i.e. `for , := range ...`), +// and collect positions of and . func (s *Searcher) parseRangeStmt(n *ast.RangeStmt) { - s.addStat(n.Key) - s.addStat(n.Value) + s.storeLoopVars(n.Key) + s.storeLoopVars(n.Value) } +// parseForStmt will check for statement (i.e. `for := ; ; `), +// and collect positions of . func (s *Searcher) parseForStmt(n *ast.ForStmt) { switch post := n.Post.(type) { case *ast.AssignStmt: // e.g. for p = head; p != nil; p = p.next for _, lhs := range post.Lhs { - s.addStat(lhs) + s.storeLoopVars(lhs) } case *ast.IncDecStmt: // e.g. for i := 0; i < n; i++ - s.addStat(post.X) + s.storeLoopVars(post.X) } } -func (s *Searcher) addStat(expr ast.Expr) { +func (s *Searcher) storeLoopVars(expr ast.Expr) { if id, ok := expr.(*ast.Ident); ok { - s.Stats[id.Pos()] = struct{}{} + s.LoopVars[id.Pos()] = struct{}{} } } +// parseDeclStmt will parse declaring statement (i.e. `var`, `type`, `const`), +// and store the position if it is "var" declaration and is in any loop. func (s *Searcher) parseDeclStmt(n *ast.DeclStmt, stack []ast.Node) { + genDecl, ok := n.Decl.(*ast.GenDecl) + if !ok { + // (dead branch) + // if the Decl is not GenDecl (i.e. `var`, `type` or `const` statement), it is ignored + return + } + if genDecl.Tok != token.VAR { + // if the Decl is not `var` (may be `type` or `const`), it is ignored + return + } + loop, _ := s.innermostLoop(stack) if loop == nil { return } - // Register declaring variables - if genDecl, ok := n.Decl.(*ast.GenDecl); ok && genDecl.Tok == token.VAR { - for _, spec := range genDecl.Specs { - for _, name := range spec.(*ast.ValueSpec).Names { - s.addVar(loop, name) - } + // Register declared variables + for _, spec := range genDecl.Specs { + for _, name := range spec.(*ast.ValueSpec).Names { + s.storeLocalVar(loop, name) } } } +// parseDeclStmt will parse assignment statement (i.e. ` = `), +// and store the position if it is . func (s *Searcher) parseAssignStmt(n *ast.AssignStmt, stack []ast.Node) { + if n.Tok != token.DEFINE { + // if the statement is simple assignment (without definement), it is ignored + return + } + loop, _ := s.innermostLoop(stack) if loop == nil { return } // Find statements declaring local variable - if n.Tok == token.DEFINE { - for _, h := range n.Lhs { - s.addVar(loop, h) - } + for _, h := range n.Lhs { + s.storeLocalVar(loop, h) } } -func (s *Searcher) addVar(loop ast.Node, expr ast.Expr) { +func (s *Searcher) storeLocalVar(loop ast.Node, expr ast.Expr) { loopPos := loop.Pos() id, ok := expr.(*ast.Ident) if !ok { return } - vars, ok := s.Vars[loopPos] + vars, ok := s.LocalVars[loopPos] if !ok { vars = map[token.Pos]struct{}{} } vars[id.Obj.Pos()] = struct{}{} - s.Vars[loopPos] = vars + s.LocalVars[loopPos] = vars } func insertionPosition(block *ast.BlockStmt) token.Pos { @@ -189,13 +213,15 @@ func (s *Searcher) innermostLoop(stack []ast.Node) (ast.Node, token.Pos) { return nil, token.NoPos } +// checkUnaryExpr check unary expression (i.e. like `-x`, `*p` or `&v`) and stack. +// THIS IS THE ESSENTIAL PART OF THIS PARSER. func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Ident, token.Pos, bool) { - loop, insert := s.innermostLoop(stack) - if loop == nil { + if n.Op != token.AND { return nil, token.NoPos, true } - if n.Op != token.AND { + loop, insert := s.innermostLoop(stack) + if loop == nil { return nil, token.NoPos, true } @@ -207,7 +233,7 @@ func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Iden // If the identity is not the loop statement variable, // it will not be reported. - if _, isStat := s.Stats[id.Obj.Pos()]; !isStat { + if _, isDecl := s.LoopVars[id.Obj.Pos()]; !isDecl { return nil, token.NoPos, true } @@ -266,12 +292,15 @@ func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Iden } func (s *Searcher) isVar(loop ast.Node, expr ast.Expr) bool { - vars := s.Vars[loop.Pos()] // map[token.Pos]struct{} + vars := s.LocalVars[loop.Pos()] // map[token.Pos]struct{} if vars == nil { return false } switch typed := expr.(type) { case (*ast.Ident): + if typed.Obj == nil { + return false // global var in another file (ref: #13) + } _, isVar := vars[typed.Obj.Pos()] return isVar case (*ast.IndexExpr): // like X[Y], check X @@ -287,7 +316,7 @@ func (s *Searcher) getIdentity(expr ast.Expr) *ast.Ident { switch typed := expr.(type) { case *ast.SelectorExpr: // Ignore if the parent is pointer ref (fix for #2) - if _, ok := s.Types[typed.X].Type.(*types.Pointer); ok { + if _, ok := s.Pass.TypesInfo.Types[typed.X].Type.(*types.Pointer); ok { return nil } diff --git a/vendor/github.com/ldez/tagliatelle/.golangci.yml b/vendor/github.com/ldez/tagliatelle/.golangci.yml index 53313e308e..ec5c5c7661 100644 --- a/vendor/github.com/ldez/tagliatelle/.golangci.yml +++ b/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -24,7 +24,7 @@ linters-settings: gofumpt: extra-rules: true depguard: - list-type: blacklist + list-type: denylist include-go-root: false packages: - github.com/sirupsen/logrus @@ -46,12 +46,19 @@ linters-settings: linters: enable-all: true disable: + - deadcode # deprecated + - exhaustivestruct # deprecated - golint # deprecated - - maligned # deprecated + - ifshort # deprecated - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated - sqlclosecheck # not relevant (SQL) - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) - cyclop # duplicate of gocyclo - lll - dupl @@ -61,7 +68,7 @@ linters: - goerr113 - wrapcheck - exhaustive - - exhaustivestruct + - exhaustruct - testpackage - tparallel - paralleltest @@ -71,9 +78,11 @@ linters: - varnamelen - nilnil - errchkjson + - nonamedreturns issues: exclude-use-default: false max-per-linter: 0 max-same-issues: 0 - exclude: [] + exclude: + - 'package-comments: should have a package comment' diff --git a/vendor/github.com/ldez/tagliatelle/readme.md b/vendor/github.com/ldez/tagliatelle/readme.md index 85849eab43..55a544db81 100644 --- a/vendor/github.com/ldez/tagliatelle/readme.md +++ b/vendor/github.com/ldez/tagliatelle/readme.md @@ -11,10 +11,12 @@ Supported string casing: - `pascal` - `kebab` - `snake` +- `upperSnake` - `goCamel` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). - `goPascal` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). - `goKebab` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). - `goSnake` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). +- `header` - `upper` - `lower` @@ -44,18 +46,18 @@ Supported string casing: | NameJSON | NameJson | NameJSON | | UneTête | UneTête | UneTête | -| Source | Snake Case | Go Snake Case | -|----------------|------------------|------------------| -| GooID | goo_id | goo_ID | -| HTTPStatusCode | http_status_code | HTTP_status_code | -| FooBAR | foo_bar | foo_bar | -| URL | url | URL | -| ID | id | ID | -| hostIP | host_ip | host_IP | -| JSON | json | JSON | -| JSONName | json_name | JSON_name | -| NameJSON | name_json | name_JSON | -| UneTête | une_tête | une_tête | +| Source | Snake Case | Upper Snake Case | Go Snake Case | +|----------------|------------------|------------------|------------------| +| GooID | goo_id | GOO_ID | goo_ID | +| HTTPStatusCode | http_status_code | HTTP_STATUS_CODE | HTTP_status_code | +| FooBAR | foo_bar | FOO_BAR | foo_bar | +| URL | url | URL | URL | +| ID | id | ID | ID | +| hostIP | host_ip | HOST_IP | host_IP | +| JSON | json | JSON | JSON | +| JSONName | json_name | JSON_NAME | JSON_name | +| NameJSON | name_json | NAME_JSON | name_JSON | +| UneTête | une_tête | UNE_TÊTE | une_tête | | Source | Kebab Case | Go KebabCase | |----------------|------------------|------------------| @@ -70,6 +72,18 @@ Supported string casing: | NameJSON | name-json | name-JSON | | UneTête | une-tête | une-tête | +| Source | Header Case | +|----------------|------------------| +| GooID | Goo-Id | +| HTTPStatusCode | Http-Status-Code | +| FooBAR | Foo-Bar | +| URL | Url | +| ID | Id | +| hostIP | Host-Ip | +| JSON | Json | +| JSONName | Json-Name | +| NameJSON | Name-Json | +| UneTête | Une-Tête | ## Examples @@ -82,3 +96,82 @@ type Foo struct { Value string `json:"val,omitempty"`// must be "value" } ``` + +## What this tool is about + +This tool is about validating tags according to rules you define. +The tool also allows to fix tags according to the rules you defined. + +This tool is not intended to validate the fact a tag in valid or not. +To do that, you can use `go vet`, or use [golangci-lint](https://golangci-lint.run) ["go vet"](https://golangci-lint.run/usage/linters/#govet) linter. + +## How to use the tool + +### As a golangci-lint linter + +Define the rules, you want via your [golangci-lint](https://golangci-lint.run) configuration file: + +```yaml +linters-settings: + tagliatelle: + # Check the struck tag name case. + case: + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`. + json: camel + yaml: camel + xml: camel +``` + +More information here https://golangci-lint.run/usage/linters/#tagliatelle + +### Install and run it from the binary + +Not recommended. + +```shell +go install github.com/ldez/tagliatelle/cmd/tagliatelle@latest +``` + +then launch it manually. + +## Rules + +Here are the default rules for the well known and used tags, when using tagliatelle as a binary or [golangci-lint linter](https://golangci-lint.run/usage/linters/#tagliatelle): + +- `json`: `camel` +- `yaml`: `camel` +- `xml`: `camel` +- `bson`: `camel` +- `avro`: `snake` +- `header`: `header` +- `envconfig`: `upperSnake` + +### Custom Rules + +The tool is not limited to the tags used in example, you can use it to validate any tag. + +You can add your own tag, for example `whatever` and tells the tool you want to use `kebab`. + +This option is only available via [golangci-lint](https://golangci-lint.run). + +```yaml +linters-settings: + tagliatelle: + # Check the struck tag name case. + case: + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` + json: camel + yaml: camel + xml: camel + whatever: kebab +``` diff --git a/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/vendor/github.com/ldez/tagliatelle/tagliatelle.go index 53e77d1cb0..22c5feb3d8 100644 --- a/vendor/github.com/ldez/tagliatelle/tagliatelle.go +++ b/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -200,11 +200,19 @@ func getConverter(c string) (func(s string) string, error) { return strcase.ToGoKebab, nil case "goSnake": return strcase.ToGoSnake, nil + case "header": + return toHeader, nil case "upper": return strings.ToUpper, nil + case "upperSnake": + return strcase.ToSNAKE, nil case "lower": return strings.ToLower, nil default: return nil, fmt.Errorf("unsupported case: %s", c) } } + +func toHeader(s string) string { + return strcase.ToCase(s, strcase.TitleCase, '-') +} diff --git a/vendor/github.com/leonklingele/grouper/LICENSE b/vendor/github.com/leonklingele/grouper/LICENSE new file mode 100644 index 0000000000..f288702d2f --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..9852c78380 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/analyzer.go @@ -0,0 +1,89 @@ +package analyzer + +import ( + "fmt" + "go/ast" + + "github.com/leonklingele/grouper/pkg/analyzer/consts" + "github.com/leonklingele/grouper/pkg/analyzer/imports" + "github.com/leonklingele/grouper/pkg/analyzer/types" + "github.com/leonklingele/grouper/pkg/analyzer/vars" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" +) + +const ( + Name = "grouper" + Doc = `expression group analyzer: require 'import', 'const', 'var' and/or 'type' declaration groups` +) + +func New() *analysis.Analyzer { + return &analysis.Analyzer{ //nolint:exhaustivestruct // we do not need all fields + Name: Name, + Doc: Doc, + Flags: Flags(), + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func run(p *analysis.Pass) (interface{}, error) { + flagLookupBool := func(name string) bool { + return p.Analyzer.Flags.Lookup(name).Value.String() == "true" + } + + c := &Config{ + ConstsConfig: &consts.Config{ + RequireSingleConst: flagLookupBool(FlagNameConstRequireSingleConst), + RequireGrouping: flagLookupBool(FlagNameConstRequireGrouping), + }, + + ImportsConfig: &imports.Config{ + RequireSingleImport: flagLookupBool(FlagNameImportRequireSingleImport), + RequireGrouping: flagLookupBool(FlagNameImportRequireGrouping), + }, + + TypesConfig: &types.Config{ + RequireSingleType: flagLookupBool(FlagNameTypeRequireSingleType), + RequireGrouping: flagLookupBool(FlagNameTypeRequireGrouping), + }, + + VarsConfig: &vars.Config{ + RequireSingleVar: flagLookupBool(FlagNameVarRequireSingleVar), + RequireGrouping: flagLookupBool(FlagNameVarRequireGrouping), + }, + } + + return nil, pass(c, p) +} + +func pass(c *Config, p *analysis.Pass) error { + for _, f := range p.Files { + if err := filepass(c, p, f); err != nil { + return err + } + } + + return nil +} + +func filepass(c *Config, p *analysis.Pass, f *ast.File) error { + if err := consts.Filepass(c.ConstsConfig, p, f); err != nil { + return fmt.Errorf("failed to consts.Filepass: %w", err) + } + + if err := imports.Filepass(c.ImportsConfig, p, f); err != nil { + return fmt.Errorf("failed to imports.Filepass: %w", err) + } + + if err := types.Filepass(c.TypesConfig, p, f); err != nil { + return fmt.Errorf("failed to types.Filepass: %w", err) + } + + if err := vars.Filepass(c.VarsConfig, p, f); err != nil { + return fmt.Errorf("failed to vars.Filepass: %w", err) + } + + return nil +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/config.go new file mode 100644 index 0000000000..b00595f9a5 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/config.go @@ -0,0 +1,15 @@ +package analyzer + +import ( + "github.com/leonklingele/grouper/pkg/analyzer/consts" + "github.com/leonklingele/grouper/pkg/analyzer/imports" + "github.com/leonklingele/grouper/pkg/analyzer/types" + "github.com/leonklingele/grouper/pkg/analyzer/vars" +) + +type Config struct { + ConstsConfig *consts.Config + ImportsConfig *imports.Config + TypesConfig *types.Config + VarsConfig *vars.Config +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/analyzer.go new file mode 100644 index 0000000000..e4e04c1270 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/analyzer.go @@ -0,0 +1,19 @@ +package consts + +import ( + "go/ast" + "go/token" + + "github.com/leonklingele/grouper/pkg/analyzer/globals" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Constant_declarations + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + return globals.Filepass( + p, f, + token.CONST, c.RequireSingleConst, c.RequireGrouping, + ) +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/config.go new file mode 100644 index 0000000000..aeeab40c78 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/config.go @@ -0,0 +1,6 @@ +package consts + +type Config struct { + RequireSingleConst bool // Require the use of a single global 'const' declaration only + RequireGrouping bool // Require the use of grouped global 'const' declarations +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/flags.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/flags.go new file mode 100644 index 0000000000..42447cbefe --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/flags.go @@ -0,0 +1,37 @@ +package analyzer + +import ( + "flag" +) + +const ( + FlagNameConstRequireSingleConst = "const-require-single-const" + FlagNameConstRequireGrouping = "const-require-grouping" + + FlagNameImportRequireSingleImport = "import-require-single-import" + FlagNameImportRequireGrouping = "import-require-grouping" + + FlagNameTypeRequireSingleType = "type-require-single-type" + FlagNameTypeRequireGrouping = "type-require-grouping" + + FlagNameVarRequireSingleVar = "var-require-single-var" + FlagNameVarRequireGrouping = "var-require-grouping" +) + +func Flags() flag.FlagSet { + fs := flag.NewFlagSet(Name, flag.ExitOnError) + + fs.Bool(FlagNameConstRequireSingleConst, false, "require the use of a single global 'const' declaration only") + fs.Bool(FlagNameConstRequireGrouping, false, "require the use of grouped global 'const' declarations") + + fs.Bool(FlagNameImportRequireSingleImport, false, "require the use of a single 'import' declaration only") + fs.Bool(FlagNameImportRequireGrouping, false, "require the use of grouped 'import' declarations") + + fs.Bool(FlagNameTypeRequireSingleType, false, "require the use of a single global 'type' declaration only") + fs.Bool(FlagNameTypeRequireGrouping, false, "require the use of grouped global 'type' declarations") + + fs.Bool(FlagNameVarRequireSingleVar, false, "require the use of a single global 'var' declaration only") + fs.Bool(FlagNameVarRequireGrouping, false, "require the use of grouped global 'var' declarations") + + return *fs +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/globals/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/globals/analyzer.go new file mode 100644 index 0000000000..15940a4807 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/globals/analyzer.go @@ -0,0 +1,105 @@ +package globals + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +type Global struct { + Decl *ast.GenDecl + IsGroup bool +} + +func Filepass( + p *analysis.Pass, f *ast.File, + tkn token.Token, requireSingle, requireGrouping bool, +) error { + var globals []*Global + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + if genDecl.Tok == tkn { + globals = append(globals, &Global{ + Decl: genDecl, + IsGroup: genDecl.Lparen != 0, + }) + } + } + + numGlobals := len(globals) + if numGlobals == 0 { + // Bail out early + return nil + } + + if requireSingle && numGlobals > 1 { + msg := fmt.Sprintf("should only use a single global '%s' declaration, %d found", tkn.String(), numGlobals) + dups := globals[1:] + firstdup := dups[0] + decl := firstdup.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if len(dups) > 1 { + report.Related = toRelated(dups[1:]) + } + + p.Report(report) + } + + if requireGrouping { + var ungrouped []*Global + for _, g := range globals { + if !g.IsGroup { + ungrouped = append(ungrouped, g) + } + } + + if numUngrouped := len(ungrouped); numUngrouped != 0 { + msg := fmt.Sprintf("should only use grouped global '%s' declarations", tkn.String()) + firstmatch := ungrouped[0] + decl := firstmatch.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if numUngrouped > 1 { + report.Related = toRelated(ungrouped[1:]) + } + + p.Report(report) + } + } + + return nil +} + +func toRelated(globals []*Global) []analysis.RelatedInformation { + related := make([]analysis.RelatedInformation, 0, len(globals)) + for _, g := range globals { + decl := g.Decl + + related = append(related, analysis.RelatedInformation{ + Pos: decl.Pos(), + End: decl.End(), + Message: "found here", + }) + } + + return related +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/analyzer.go new file mode 100644 index 0000000000..b545f00c05 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/analyzer.go @@ -0,0 +1,103 @@ +package imports + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Import_declarations + +type Import struct { + Decl *ast.GenDecl + IsGroup bool +} + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + var imports []*Import + ast.Inspect(f, func(n ast.Node) bool { + if decl, ok := n.(*ast.GenDecl); ok { + if decl.Tok == token.IMPORT { + imports = append(imports, &Import{ + Decl: decl, + IsGroup: decl.Lparen != 0, + }) + } + } + + return true + }) + + numImports := len(imports) + if numImports == 0 { + // Bail out early + return nil + } + + if c.RequireSingleImport && numImports > 1 { + msg := fmt.Sprintf("should only use a single 'import' declaration, %d found", numImports) + dups := imports[1:] + firstdup := dups[0] + decl := firstdup.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if len(dups) > 1 { + report.Related = toRelated(dups[1:]) + } + + p.Report(report) + } + + if c.RequireGrouping { + var ungroupedImports []*Import + for _, imp := range imports { + if !imp.IsGroup { + ungroupedImports = append(ungroupedImports, imp) + } + } + + if numUngroupedImports := len(ungroupedImports); numUngroupedImports != 0 { + msg := "should only use grouped 'import' declarations" + firstmatch := ungroupedImports[0] + decl := firstmatch.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if numUngroupedImports > 1 { + report.Related = toRelated(ungroupedImports[1:]) + } + + p.Report(report) + } + } + + return nil +} + +func toRelated(imports []*Import) []analysis.RelatedInformation { + related := make([]analysis.RelatedInformation, 0, len(imports)) + for _, imp := range imports { + decl := imp.Decl + + related = append(related, analysis.RelatedInformation{ + Pos: decl.Pos(), + End: decl.End(), + Message: "found here", + }) + } + + return related +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/config.go new file mode 100644 index 0000000000..6a6971b4ad --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/config.go @@ -0,0 +1,6 @@ +package imports + +type Config struct { + RequireSingleImport bool // Require the use of a single 'import' declaration only + RequireGrouping bool // Require the use of grouped 'import' declarations +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/analyzer.go new file mode 100644 index 0000000000..63bbab33b7 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/analyzer.go @@ -0,0 +1,19 @@ +package types + +import ( + "go/ast" + "go/token" + + "github.com/leonklingele/grouper/pkg/analyzer/globals" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Type_declarations + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + return globals.Filepass( + p, f, + token.TYPE, c.RequireSingleType, c.RequireGrouping, + ) +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/config.go new file mode 100644 index 0000000000..e24cef9dae --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/config.go @@ -0,0 +1,6 @@ +package types + +type Config struct { + RequireSingleType bool // Require the use of a single global 'type' declaration only + RequireGrouping bool // Require the use of grouped global 'type' declarations +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/analyzer.go new file mode 100644 index 0000000000..20c7812233 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/analyzer.go @@ -0,0 +1,19 @@ +package vars + +import ( + "go/ast" + "go/token" + + "github.com/leonklingele/grouper/pkg/analyzer/globals" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Variable_declarations + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + return globals.Filepass( + p, f, + token.VAR, c.RequireSingleVar, c.RequireGrouping, + ) +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/config.go new file mode 100644 index 0000000000..4c7c1d8384 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/config.go @@ -0,0 +1,6 @@ +package vars + +type Config struct { + RequireSingleVar bool // Require the use of a single global 'var' declaration only + RequireGrouping bool // Require the use of grouped global 'var' declarations +} diff --git a/vendor/github.com/lufeee/execinquery/.gitignore b/vendor/github.com/lufeee/execinquery/.gitignore new file mode 100644 index 0000000000..00e1abc31f --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/.gitignore @@ -0,0 +1 @@ +execinquery diff --git a/vendor/github.com/lufeee/execinquery/LICENSE b/vendor/github.com/lufeee/execinquery/LICENSE new file mode 100644 index 0000000000..b6ab14aec3 --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 lufe + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lufeee/execinquery/README.md b/vendor/github.com/lufeee/execinquery/README.md new file mode 100644 index 0000000000..38fa7c8b96 --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/README.md @@ -0,0 +1,76 @@ +# execinquery - a simple query string checker in Query function +[![Go Matrix](https://github.com/lufeee/execinquery/actions/workflows/go-cross.yml/badge.svg?branch=main)](https://github.com/lufeee/execinquery/actions/workflows/go-cross.yml) +[![Go lint](https://github.com/lufeee/execinquery/actions/workflows/lint.yml/badge.svg?branch=main)](https://github.com/lufeee/execinquery/actions/workflows/lint.yml) +[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) +## About + +execinquery is a linter about query string checker in Query function which reads your Go src files and +warnings it finds. + +## Installation + +```sh +go install github.com/lufeee/execinquery/cmd/execinquery +``` + +## Usage +```go +package main + +import ( + "database/sql" + "log" +) + +func main() { + db, err := sql.Open("mysql", "test:test@tcp(test:3306)/test") + if err != nil { + log.Fatal("Database Connect Error: ", err) + } + defer db.Close() + + test := "a" + _, err = db.Query("Update * FROM hoge where id = ?", test) + if err != nil { + log.Fatal("Query Error: ", err) + } + +} +``` + +```console +go vet -vettool=$(which execinquery) ./... + +# command-line-arguments +./a.go:16:11: Use Exec instead of Query to execute `UPDATE` query +``` + +## CI + +### CircleCI + +```yaml +- run: + name: install execinquery + command: go install github.com/lufeee/execinquery + +- run: + name: run execinquery + command: go vet -vettool=`which execinquery` ./... +``` + +### GitHub Actions + +```yaml +- name: install execinquery + run: go install github.com/lufeee/execinquery + +- name: run execinquery + run: go vet -vettool=`which execinquery` ./... +``` + +### License + +MIT license. + +
diff --git a/vendor/github.com/lufeee/execinquery/execinquery.go b/vendor/github.com/lufeee/execinquery/execinquery.go new file mode 100644 index 0000000000..c37dc17010 --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/execinquery.go @@ -0,0 +1,135 @@ +package execinquery + +import ( + "go/ast" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds" + +// Analyzer is checking database/sql pkg Query's function +var Analyzer = &analysis.Analyzer{ + Name: "execinquery", + Doc: doc, + Run: newLinter().run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +type linter struct { + commentExp *regexp.Regexp + multilineCommentExp *regexp.Regexp +} + +func newLinter() *linter { + return &linter{ + commentExp: regexp.MustCompile(`--[^\n]*\n`), + multilineCommentExp: regexp.MustCompile(`(?s)/\*.*?\*/`), + } +} + +func (l linter) run(pass *analysis.Pass) (interface{}, error) { + result := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + result.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.CallExpr: + selector, ok := n.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + if pass.TypesInfo == nil || pass.TypesInfo.Uses[selector.Sel] == nil || pass.TypesInfo.Uses[selector.Sel].Pkg() == nil { + return + } + + if "database/sql" != pass.TypesInfo.Uses[selector.Sel].Pkg().Path() { + return + } + + if !strings.Contains(selector.Sel.Name, "Query") { + return + } + + replacement := "Exec" + var i int // the index of the query argument + if strings.Contains(selector.Sel.Name, "Context") { + replacement += "Context" + i = 1 + } + + if len(n.Args) <= i { + return + } + + query := l.getQueryString(n.Args[i]) + if query == "" { + return + } + + query = strings.TrimSpace(l.cleanValue(query)) + parts := strings.SplitN(query, " ", 2) + cmd := strings.ToUpper(parts[0]) + + if strings.HasPrefix(cmd, "SELECT") { + return + } + + pass.Reportf(n.Fun.Pos(), "Use %s instead of %s to execute `%s` query", replacement, selector.Sel.Name, cmd) + } + }) + + return nil, nil +} + +func (l linter) cleanValue(s string) string { + v := strings.NewReplacer(`"`, "", "`", "").Replace(s) + + v = l.multilineCommentExp.ReplaceAllString(v, "") + + return l.commentExp.ReplaceAllString(v, "") +} + +func (l linter) getQueryString(exp interface{}) string { + switch e := exp.(type) { + case *ast.AssignStmt: + var v string + for _, stmt := range e.Rhs { + v += l.cleanValue(l.getQueryString(stmt)) + } + return v + + case *ast.BasicLit: + return e.Value + + case *ast.ValueSpec: + var v string + for _, value := range e.Values { + v += l.cleanValue(l.getQueryString(value)) + } + return v + + case *ast.Ident: + if e.Obj == nil { + return "" + } + return l.getQueryString(e.Obj.Decl) + + case *ast.BinaryExpr: + v := l.cleanValue(l.getQueryString(e.X)) + v += l.cleanValue(l.getQueryString(e.Y)) + return v + } + + return "" +} diff --git a/vendor/github.com/macabu/inamedparam/.gitignore b/vendor/github.com/macabu/inamedparam/.gitignore new file mode 100644 index 0000000000..f8d51e94cb --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/.gitignore @@ -0,0 +1,22 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +inamedparam + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/macabu/inamedparam/.golangci.yml b/vendor/github.com/macabu/inamedparam/.golangci.yml new file mode 100644 index 0000000000..f0efa1cb6c --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/.golangci.yml @@ -0,0 +1,33 @@ +run: + deadline: 30s + +linters: + enable-all: true + disable: + - cyclop + - deadcode + - depguard + - exhaustivestruct + - exhaustruct + - forcetypeassert + - gochecknoglobals + - gocognit + - golint + - ifshort + - interfacer + - maligned + - nilnil + - nosnakecase + - paralleltest + - scopelint + - structcheck + - varcheck + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/macabu/inamedparam) + section-separators: + - newLine diff --git a/vendor/github.com/macabu/inamedparam/LICENSE-MIT b/vendor/github.com/macabu/inamedparam/LICENSE-MIT new file mode 100644 index 0000000000..b95f480ee5 --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Matheus Macabu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/macabu/inamedparam/README.md b/vendor/github.com/macabu/inamedparam/README.md new file mode 100644 index 0000000000..3336cb9504 --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/README.md @@ -0,0 +1,38 @@ +# inamedparam + +A linter that reports interfaces with unnamed method parameters. + +## Flags/Config +```sh +-skip-single-param + skip interfaces with a single unnamed parameter +``` + +## Usage + +### Standalone +You can run it standalone through `go vet`. + +You must install the binary to your `$GOBIN` folder like so: +```sh +$ go install github.com/macabu/inamedparam/cmd/inamedparam +``` + +And then navigate to your Go project's root folder, where can run `go vet` in the following way: +```sh +$ go vet -vettool=$(which inamedparam) ./... +``` + +### golangci-lint +`inamedparam` was added as a linter to `golangci-lint` on version `v1.55.0`. It is disabled by default. + +To enable it, you can add it to your `.golangci.yml` file, as such: +```yaml +run: + deadline: 30s + +linters: + disable-all: true + enable: + - inamedparam +``` diff --git a/vendor/github.com/macabu/inamedparam/inamedparam.go b/vendor/github.com/macabu/inamedparam/inamedparam.go new file mode 100644 index 0000000000..8ba7fe1882 --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/inamedparam.go @@ -0,0 +1,94 @@ +package inamedparam + +import ( + "flag" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + analyzerName = "inamedparam" + + flagSkipSingleParam = "skip-single-param" +) + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: "reports interfaces with unnamed method parameters", + Run: run, + Flags: flags(), + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet(analyzerName, flag.ExitOnError) + + flags.Bool(flagSkipSingleParam, false, "skip interface methods with a single unnamed parameter") + + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + types := []ast.Node{ + &ast.InterfaceType{}, + } + + skipSingleParam := pass.Analyzer.Flags.Lookup(flagSkipSingleParam).Value.(flag.Getter).Get().(bool) + + inspect.Preorder(types, func(n ast.Node) { + interfaceType, ok := n.(*ast.InterfaceType) + if !ok || interfaceType == nil || interfaceType.Methods == nil { + return + } + + for _, method := range interfaceType.Methods.List { + interfaceFunc, ok := method.Type.(*ast.FuncType) + if !ok || interfaceFunc == nil || interfaceFunc.Params == nil { + continue + } + + // Improvement: add test case to reproduce this. Help wanted. + if len(method.Names) == 0 { + continue + } + + methodName := method.Names[0].Name + + if skipSingleParam && len(interfaceFunc.Params.List) == 1 { + continue + } + + for _, param := range interfaceFunc.Params.List { + if param.Names == nil { + var builtParamType string + + switch paramType := param.Type.(type) { + case *ast.SelectorExpr: + if ident := paramType.X.(*ast.Ident); ident != nil { + builtParamType += ident.Name + "." + } + + builtParamType += paramType.Sel.Name + case *ast.Ident: + builtParamType = paramType.Name + } + + if builtParamType != "" { + pass.Reportf(param.Pos(), "interface method %v must have named param for type %v", methodName, builtParamType) + } else { + pass.Reportf(param.Pos(), "interface method %v must have all named params", methodName) + } + } + } + } + }) + + return nil, nil +} diff --git a/vendor/github.com/maratori/testableexamples/LICENSE b/vendor/github.com/maratori/testableexamples/LICENSE new file mode 100644 index 0000000000..e8b68be3eb --- /dev/null +++ b/vendor/github.com/maratori/testableexamples/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Marat Reymers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/maratori/testableexamples/pkg/testableexamples/testableexamples.go b/vendor/github.com/maratori/testableexamples/pkg/testableexamples/testableexamples.go new file mode 100644 index 0000000000..26d22c703a --- /dev/null +++ b/vendor/github.com/maratori/testableexamples/pkg/testableexamples/testableexamples.go @@ -0,0 +1,34 @@ +package testableexamples + +import ( + "go/ast" + "go/doc" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// NewAnalyzer returns Analyzer that checks if examples are testable. +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "testableexamples", + Doc: "linter checks if examples are testable (have an expected output)", + Run: func(pass *analysis.Pass) (interface{}, error) { + testFiles := make([]*ast.File, 0, len(pass.Files)) + for _, file := range pass.Files { + fileName := pass.Fset.File(file.Pos()).Name() + if strings.HasSuffix(fileName, "_test.go") { + testFiles = append(testFiles, file) + } + } + + for _, example := range doc.Examples(testFiles...) { + if example.Output == "" && !example.EmptyOutput { + pass.Reportf(example.Code.Pos(), "missing output for example, go test can't validate it") + } + } + + return nil, nil + }, + } +} diff --git a/vendor/github.com/matoous/godox/godox.go b/vendor/github.com/matoous/godox/godox.go index 6d7104b09d..3903525c80 100644 --- a/vendor/github.com/matoous/godox/godox.go +++ b/vendor/github.com/matoous/godox/godox.go @@ -8,31 +8,23 @@ import ( "go/token" "path/filepath" "strings" + "unicode" + "unicode/utf8" ) -var ( - defaultKeywords = []string{"TODO", "BUG", "FIXME"} -) +var defaultKeywords = []string{"TODO", "BUG", "FIXME"} -// Message contains a message and position +// Message contains a message and position. type Message struct { Pos token.Position Message string } -func getMessages(c *ast.Comment, fset *token.FileSet, keywords []string) []Message { - commentText := c.Text - switch commentText[1] { - case '/': - commentText = commentText[2:] - if len(commentText) > 0 && commentText[0] == ' ' { - commentText = commentText[1:] - } - case '*': - commentText = commentText[2 : len(commentText)-2] - } +func getMessages(comment *ast.Comment, fset *token.FileSet, keywords []string) []Message { + commentText := extractComment(comment.Text) b := bufio.NewReader(bytes.NewBufferString(commentText)) + var comments []Message for lineNum := 0; ; lineNum++ { @@ -40,45 +32,88 @@ func getMessages(c *ast.Comment, fset *token.FileSet, keywords []string) []Messa if err != nil { break } + + const minimumSize = 4 + sComment := bytes.TrimSpace(line) - if len(sComment) < 4 { + if len(sComment) < minimumSize { continue } + for _, kw := range keywords { - if bytes.EqualFold([]byte(kw), sComment[0:len(kw)]) { - pos := fset.Position(c.Pos()) - // trim the comment - if len(sComment) > 40 { - sComment = []byte(fmt.Sprintf("%.40s...", sComment)) - } - comments = append(comments, Message{ - Pos: pos, - Message: fmt.Sprintf( - "%s:%d: Line contains %s: \"%s\"", - filepath.Join(pos.Filename), - pos.Line+lineNum, - strings.Join(keywords, "/"), - sComment, - ), - }) - break + if lkw := len(kw); !(bytes.EqualFold([]byte(kw), sComment[0:lkw]) && + !hasAlphanumRuneAdjacent(sComment[lkw:])) { + continue + } + + pos := fset.Position(comment.Pos()) + // trim the comment + const commentLimit = 40 + if len(sComment) > commentLimit { + sComment = []byte(fmt.Sprintf("%.40s...", sComment)) } + + comments = append(comments, Message{ + Pos: pos, + Message: fmt.Sprintf( + "%s:%d: Line contains %s: %q", + filepath.Clean(pos.Filename), + pos.Line+lineNum, + strings.Join(keywords, "/"), + sComment, + ), + }) + + break } } + return comments } +func extractComment(commentText string) string { + switch commentText[1] { + case '/': + commentText = commentText[2:] + if len(commentText) > 0 && commentText[0] == ' ' { + commentText = commentText[1:] + } + case '*': + commentText = commentText[2 : len(commentText)-2] + } + + return commentText +} + +func hasAlphanumRuneAdjacent(rest []byte) bool { + if len(rest) == 0 { + return false + } + + switch rest[0] { // most common cases + case ':', ' ', '(': + return false + } + + r, _ := utf8.DecodeRune(rest) + + return unicode.IsLetter(r) || unicode.IsNumber(r) || unicode.IsDigit(r) +} + // Run runs the godox linter on given file. // Godox searches for comments starting with given keywords and reports them. func Run(file *ast.File, fset *token.FileSet, keywords ...string) []Message { if len(keywords) == 0 { keywords = defaultKeywords } + var messages []Message + for _, c := range file.Comments { for _, ci := range c.List { messages = append(messages, getMessages(ci, fset, keywords)...) } } + return messages } diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 39bbcf00f0..d0ea68f408 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,6 +1,7 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine -// +build darwin freebsd openbsd netbsd dragonfly +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo +// +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index 31503226f6..7402e0618a 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,5 +1,6 @@ -//go:build appengine || js || nacl || wasm -// +build appengine js nacl wasm +//go:build (appengine || js || nacl || tinygo || wasm) && !windows +// +build appengine js nacl tinygo wasm +// +build !windows package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 67787657fb..0337d8cf6d 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,7 @@ -//go:build (linux || aix || zos) && !appengine +//go:build (linux || aix || zos) && !appengine && !tinygo // +build linux aix zos // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mbilski/exhaustivestruct/LICENSE b/vendor/github.com/mbilski/exhaustivestruct/LICENSE deleted file mode 100644 index 893eb73b9f..0000000000 --- a/vendor/github.com/mbilski/exhaustivestruct/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Mateusz Bilski - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go b/vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go deleted file mode 100644 index 0dfb713c5a..0000000000 --- a/vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go +++ /dev/null @@ -1,187 +0,0 @@ -package analyzer - -import ( - "flag" - "fmt" - "go/ast" - "go/types" - "path" - "strings" - - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - - "golang.org/x/tools/go/analysis" -) - -// Analyzer that checks if all struct's fields are initialized -var Analyzer = &analysis.Analyzer{ - Name: "exhaustivestruct", - Doc: "Checks if all struct's fields are initialized", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, - Flags: newFlagSet(), -} - -// StructPatternList is a comma separated list of expressions to match struct packages and names -// The struct packages have the form example.com/package.ExampleStruct -// The matching patterns can use matching syntax from https://pkg.go.dev/path#Match -// If this list is empty, all structs are tested. -var StructPatternList string - -func newFlagSet() flag.FlagSet { - fs := flag.NewFlagSet("", flag.PanicOnError) - fs.StringVar(&StructPatternList, "struct_patterns", "", "This is a comma separated list of expressions to match struct packages and names") - return *fs -} - -func run(pass *analysis.Pass) (interface{}, error) { - splitFn := func(c rune) bool { return c == ',' } - inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - structPatterns := strings.FieldsFunc(StructPatternList, splitFn) - // validate the pattern syntax - for _, pattern := range structPatterns { - _, err := path.Match(pattern, "") - if err != nil { - return nil, fmt.Errorf("invalid struct pattern %s: %w", pattern, err) - } - } - - nodeFilter := []ast.Node{ - (*ast.CompositeLit)(nil), - (*ast.ReturnStmt)(nil), - } - - var returnStmt *ast.ReturnStmt - - inspector.Preorder(nodeFilter, func(node ast.Node) { - var name string - - compositeLit, ok := node.(*ast.CompositeLit) - if !ok { - // Keep track of the last return statement whilte iterating - retLit, ok := node.(*ast.ReturnStmt) - if ok { - returnStmt = retLit - } - return - } - - i, ok := compositeLit.Type.(*ast.Ident) - - if ok { - name = i.Name - } else { - s, ok := compositeLit.Type.(*ast.SelectorExpr) - - if !ok { - return - } - - name = s.Sel.Name - } - - if compositeLit.Type == nil { - return - } - - t := pass.TypesInfo.TypeOf(compositeLit.Type) - - if t == nil { - return - } - - if len(structPatterns) > 0 { - shouldLint := false - for _, pattern := range structPatterns { - // We check the patterns for vailidy ahead of time, so we don't need to check the error here - if match, _ := path.Match(pattern, t.String()); match { - shouldLint = true - break - } - } - if !shouldLint { - return - } - } - - str, ok := t.Underlying().(*types.Struct) - - if !ok { - return - } - - // Don't report an error if: - // 1. This composite literal contains no fields and - // 2. It's in a return statement and - // 3. The return statement contains a non-nil error - if len(compositeLit.Elts) == 0 { - // Check if this composite is one of the results the last return statement - isInResults := false - if returnStmt != nil { - for _, result := range returnStmt.Results { - compareComposite, ok := result.(*ast.CompositeLit) - if ok { - if compareComposite == compositeLit { - isInResults = true - } - } - } - } - nonNilError := false - if isInResults { - // Check if any of the results has an error type and if that error is set to non-nil (if it's set to nil, the type would be "untyped nil") - for _, result := range returnStmt.Results { - if pass.TypesInfo.TypeOf(result).String() == "error" { - nonNilError = true - } - } - } - - if nonNilError { - return - } - } - - samePackage := strings.HasPrefix(t.String(), pass.Pkg.Path()+".") - - missing := []string{} - - for i := 0; i < str.NumFields(); i++ { - fieldName := str.Field(i).Name() - exists := false - - if !samePackage && !str.Field(i).Exported() { - continue - } - - for eIndex, e := range compositeLit.Elts { - if k, ok := e.(*ast.KeyValueExpr); ok { - if i, ok := k.Key.(*ast.Ident); ok { - if i.Name == fieldName { - exists = true - break - } - } - } else { - if eIndex == i { - exists = true - break - } - } - } - - if !exists { - missing = append(missing, fieldName) - } - } - - if len(missing) == 1 { - pass.Reportf(node.Pos(), "%s is missing in %s", missing[0], name) - } else if len(missing) > 1 { - pass.Reportf(node.Pos(), "%s are missing in %s", strings.Join(missing, ", "), name) - } - }) - - return nil, nil -} diff --git a/vendor/github.com/mgechev/dots/.travis.yml b/vendor/github.com/mgechev/dots/.travis.yml deleted file mode 100644 index f4a4a7363c..0000000000 --- a/vendor/github.com/mgechev/dots/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -go: master diff --git a/vendor/github.com/mgechev/dots/LICENSE b/vendor/github.com/mgechev/dots/LICENSE deleted file mode 100644 index c617c7e012..0000000000 --- a/vendor/github.com/mgechev/dots/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Minko Gechev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mgechev/dots/README.md b/vendor/github.com/mgechev/dots/README.md deleted file mode 100644 index 1203aef5f7..0000000000 --- a/vendor/github.com/mgechev/dots/README.md +++ /dev/null @@ -1,100 +0,0 @@ -[![Build Status](https://travis-ci.org/mgechev/dots.svg?branch=master)](https://travis-ci.org/mgechev/dots) - -# Dots - -Implements the wildcard file matching in Go used by golint, go test etc. - -## Usage - -```go -import "github.com/mgechev/dots" - -func main() { - result, err := dots.Resolve([]string{"./fixtures/..."}, []string{"./fixtures/foo"}) - for _, f := range result { - fmt.Println(f); - } -} -``` - -If we suppose that we have the following directory structure: - -```text -├── README.md -├── fixtures -│   ├── bar -│   │   ├── bar1.go -│   │   └── bar2.go -│   ├── baz -│   │   ├── baz1.go -│   │   ├── baz2.go -│   │   └── baz3.go -│   └── foo -│   ├── foo1.go -│   ├── foo2.go -│   └── foo3.go -└── main.go -``` - -The result will be: - -```text -fixtures/bar/bar1.go -fixtures/bar/bar2.go -fixtures/baz/baz1.go -fixtures/baz/baz2.go -fixtures/baz/baz3.go -``` - -`dots` supports wildcard in both - the first and the last argument of `Resolve`, which means that you can ignore files based on a wildcard: - -```go -dots.Resolve([]string{"github.com/mgechev/dots"}, []string{"./..."}) // empty list -dots.Resolve([]string{"./fixtures/bar/..."}, []string{"./fixture/foo/...", "./fixtures/baz/..."}) // bar1.go, bar2.go -``` - -## Preserve package structure - -`dots` allow you to receive a slice of slices where each nested slice represents an individual package: - -```go -dots.ResolvePackages([]string{"github.com/mgechev/dots/..."}, []string{}) -``` - -So we will get the result: - -```text -[ - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/bar/bar1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/bar/bar2.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz2.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz3.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo2.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo3.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/baz/baz1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/baz/baz2.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/foo1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/foo2.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/bar/bar1.go" - ] -] -``` - -This method is especially useful, when you want to perform type checking over given package from the result. - -## License - -MIT diff --git a/vendor/github.com/mgechev/dots/resolve.go b/vendor/github.com/mgechev/dots/resolve.go deleted file mode 100644 index 114534be87..0000000000 --- a/vendor/github.com/mgechev/dots/resolve.go +++ /dev/null @@ -1,468 +0,0 @@ -package dots - -import ( - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "runtime" - "strings" -) - -var ( - buildContext = build.Default - goroot = filepath.Clean(runtime.GOROOT()) - gorootSrc = filepath.Join(goroot, "src") -) - -func flatten(arr [][]string) []string { - var res []string - for _, e := range arr { - res = append(res, e...) - } - return res -} - -// Resolve accepts a slice of paths with optional "..." placeholder and a slice with paths to be skipped. -// The final result is the set of all files from the selected directories subtracted with -// the files in the skip slice. -func Resolve(includePatterns, skipPatterns []string) ([]string, error) { - skip, err := resolvePatternsIgnoringErrors(skipPatterns) - filter := newPathFilter(flatten(skip)) - if err != nil { - return nil, err - } - - pathSet := map[string]bool{} - includePackages, err := resolvePatterns(includePatterns) - include := flatten(includePackages) - if err != nil { - return nil, err - } - - var result []string - for _, i := range include { - if _, ok := pathSet[i]; !ok && !filter(i) { - pathSet[i] = true - result = append(result, i) - } - } - return result, err -} - -// ResolvePackages accepts a slice of paths with optional "..." placeholder and a slice with paths to be skipped. -// The final result is the set of all files from the selected directories subtracted with -// the files in the skip slice. The difference between `Resolve` and `ResolvePackages` -// is that `ResolvePackages` preserves the package structure in the nested slices. -func ResolvePackages(includePatterns, skipPatterns []string) ([][]string, error) { - skip, err := resolvePatternsIgnoringErrors(skipPatterns) - filter := newPathFilter(flatten(skip)) - if err != nil { - return nil, err - } - - pathSet := map[string]bool{} - include, err := resolvePatterns(includePatterns) - if err != nil { - return nil, err - } - - var result [][]string - for _, p := range include { - var packageFiles []string - for _, f := range p { - if _, ok := pathSet[f]; !ok && !filter(f) { - pathSet[f] = true - packageFiles = append(packageFiles, f) - } - } - result = append(result, packageFiles) - } - return result, err -} - -func isDir(filename string) bool { - fi, err := os.Stat(filename) - return err == nil && fi.IsDir() -} - -func exists(filename string) bool { - _, err := os.Stat(filename) - return err == nil -} - -func resolveDir(dirname string) ([]string, error) { - pkg, err := build.ImportDir(dirname, 0) - return resolveImportedPackage(pkg, err) -} - -func resolvePackage(pkgname string) ([]string, error) { - pkg, err := build.Import(pkgname, ".", 0) - return resolveImportedPackage(pkg, err) -} - -func resolveImportedPackage(pkg *build.Package, err error) ([]string, error) { - if err != nil { - if _, nogo := err.(*build.NoGoError); nogo { - // Don't complain if the failure is due to no Go source files. - return nil, nil - } - return nil, err - } - - var files []string - files = append(files, pkg.GoFiles...) - files = append(files, pkg.CgoFiles...) - files = append(files, pkg.TestGoFiles...) - if pkg.Dir != "." { - for i, f := range files { - files[i] = filepath.Join(pkg.Dir, f) - } - } - return files, nil -} - -func resolvePatterns(patterns []string) ([][]string, error) { - var files [][]string - for _, pattern := range patterns { - f, err := resolvePattern(pattern) - if err != nil { - return nil, err - } - files = append(files, f...) - } - return files, nil -} - -func resolvePatternsIgnoringErrors(patterns []string) ([][]string, error) { - var files [][]string - for _, pattern := range patterns { - f, err := resolvePattern(pattern) - if err != nil { - continue - } - files = append(files, f...) - } - return files, nil -} - -func resolvePattern(pattern string) ([][]string, error) { - // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to - // directory, file or package targets. The distinction affects which - // checks are run. It is no valid to mix target types. - var dirsRun, filesRun, pkgsRun int - var matches []string - - if strings.HasSuffix(pattern, "/...") && isDir(pattern[:len(pattern)-len("/...")]) { - dirsRun = 1 - for _, dirname := range matchPackagesInFS(pattern) { - matches = append(matches, dirname) - } - } else if isDir(pattern) { - dirsRun = 1 - matches = append(matches, pattern) - } else if exists(pattern) { - filesRun = 1 - matches = append(matches, pattern) - } else { - pkgsRun = 1 - matches = append(matches, pattern) - } - - result := [][]string{} - switch { - case dirsRun == 1: - for _, dir := range matches { - res, err := resolveDir(dir) - if err != nil { - return nil, err - } - result = append(result, res) - } - case filesRun == 1: - return [][]string{matches}, nil - case pkgsRun == 1: - for _, pkg := range importPaths(matches) { - res, err := resolvePackage(pkg) - if err != nil { - return nil, err - } - result = append(result, res) - } - } - return result, nil -} - -func newPathFilter(skip []string) func(string) bool { - filter := map[string]bool{} - for _, name := range skip { - filter[name] = true - } - - return func(path string) bool { - base := filepath.Base(path) - if filter[base] || filter[path] { - return true - } - return base != "." && base != ".." && strings.ContainsAny(base[0:1], "_.") - } -} - -// importPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func importPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if a == "all" || a == "std" { - out = append(out, matchPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// importPaths returns the import paths to use for the given command line. -func importPaths(args []string) []string { - args = importPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, matchPackagesInFS(a)...) - } else { - out = append(out, matchPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -func matchPattern(pattern string) func(name string) bool { - re := regexp.QuoteMeta(pattern) - re = strings.Replace(re, `\.\.\.`, `.*`, -1) - // Special case: foo/... matches foo too. - if strings.HasSuffix(re, `/.*`) { - re = re[:len(re)-len(`/.*`)] + `(/.*)?` - } - reg := regexp.MustCompile(`^` + re + `$`) - return func(name string) bool { - return reg.MatchString(name) - } -} - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -func matchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if pattern != "all" && pattern != "std" { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !buildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - // Commands - cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) - filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == cmd { - return nil - } - name := path[len(cmd):] - if !treeCanMatch(name) { - return filepath.SkipDir - } - // Commands are all in cmd/, not in subdirectories. - if strings.Contains(name, string(filepath.Separator)) { - return filepath.SkipDir - } - - // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. - name = "cmd/" + name - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = buildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - - for _, src := range buildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != gorootSrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == src { - return nil - } - - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - return filepath.SkipDir - } - if !treeCanMatch(name) { - return filepath.SkipDir - } - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = buildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -func matchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - if _, err = build.ImportDir(path, 0); err != nil { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} diff --git a/vendor/github.com/mgechev/revive/config/config.go b/vendor/github.com/mgechev/revive/config/config.go index 5c63f35b3e..50a2b8966f 100644 --- a/vendor/github.com/mgechev/revive/config/config.go +++ b/vendor/github.com/mgechev/revive/config/config.go @@ -1,13 +1,14 @@ +// Package config implements revive's configuration data structures and related methods package config import ( "errors" "fmt" - "io/ioutil" - - "github.com/mgechev/revive/formatter" + "os" "github.com/BurntSushi/toml" + + "github.com/mgechev/revive/formatter" "github.com/mgechev/revive/lint" "github.com/mgechev/revive/rule" ) @@ -31,21 +32,22 @@ var defaultRules = []lint.Rule{ &rule.TimeNamingRule{}, &rule.ContextKeysType{}, &rule.ContextAsArgumentRule{}, + &rule.EmptyBlockRule{}, + &rule.SuperfluousElseRule{}, + &rule.UnusedParamRule{}, + &rule.UnreachableCodeRule{}, + &rule.RedefinesBuiltinIDRule{}, } var allRules = append([]lint.Rule{ &rule.ArgumentsLimitRule{}, &rule.CyclomaticRule{}, &rule.FileHeaderRule{}, - &rule.EmptyBlockRule{}, - &rule.SuperfluousElseRule{}, &rule.ConfusingNamingRule{}, &rule.GetReturnRule{}, &rule.ModifiesParamRule{}, &rule.ConfusingResultsRule{}, &rule.DeepExitRule{}, - &rule.UnusedParamRule{}, - &rule.UnreachableCodeRule{}, &rule.AddConstantRule{}, &rule.FlagParamRule{}, &rule.UnnecessaryStmtRule{}, @@ -53,8 +55,7 @@ var allRules = append([]lint.Rule{ &rule.ModifiesValRecRule{}, &rule.ConstantLogicalExprRule{}, &rule.BoolLiteralRule{}, - &rule.RedefinesBuiltinIDRule{}, - &rule.ImportsBlacklistRule{}, + &rule.ImportsBlocklistRule{}, &rule.FunctionResultsLimitRule{}, &rule.MaxPublicStructsRule{}, &rule.RangeValInClosureRule{}, @@ -79,13 +80,21 @@ var allRules = append([]lint.Rule{ &rule.UnexportedNamingRule{}, &rule.FunctionLength{}, &rule.NestedStructs{}, - &rule.IfReturnRule{}, &rule.UselessBreak{}, + &rule.UncheckedTypeAssertionRule{}, &rule.TimeEqualRule{}, &rule.BannedCharsRule{}, &rule.OptimizeOperandsOrderRule{}, &rule.UseAnyRule{}, &rule.DataRaceRule{}, + &rule.CommentSpacingsRule{}, + &rule.IfReturnRule{}, + &rule.RedundantImportAlias{}, + &rule.ImportAliasNamingRule{}, + &rule.EnforceMapStyleRule{}, + &rule.EnforceRepeatedArgTypeStyleRule{}, + &rule.EnforceSliceStyleRule{}, + &rule.MaxControlNestingRule{}, }, defaultRules...) var allFormatters = []lint.Formatter{ @@ -123,7 +132,8 @@ func GetLintingRules(config *lint.Config, extraRules []lint.Rule) ([]lint.Rule, var lintingRules []lint.Rule for name, ruleConfig := range config.Rules { - r, ok := rulesMap[name] + actualName := actualRuleName(name) + r, ok := rulesMap[actualName] if !ok { return nil, fmt.Errorf("cannot find rule: %s", name) } @@ -138,8 +148,17 @@ func GetLintingRules(config *lint.Config, extraRules []lint.Rule) ([]lint.Rule, return lintingRules, nil } +func actualRuleName(name string) string { + switch name { + case "imports-blacklist": + return "imports-blocklist" + default: + return name + } +} + func parseConfig(path string, config *lint.Config) error { - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) if err != nil { return errors.New("cannot read the config file") } @@ -147,6 +166,14 @@ func parseConfig(path string, config *lint.Config) error { if err != nil { return fmt.Errorf("cannot parse the config file: %v", err) } + for k, r := range config.Rules { + err := r.Initialize() + if err != nil { + return fmt.Errorf("error in config of rule [%s] : [%v]", k, err) + } + config.Rules[k] = r + } + return nil } diff --git a/vendor/github.com/mgechev/revive/formatter/checkstyle.go b/vendor/github.com/mgechev/revive/formatter/checkstyle.go index 33a3b2ca17..f45b63c925 100644 --- a/vendor/github.com/mgechev/revive/formatter/checkstyle.go +++ b/vendor/github.com/mgechev/revive/formatter/checkstyle.go @@ -3,7 +3,7 @@ package formatter import ( "bytes" "encoding/xml" - plainTemplate "text/template" + plain "text/template" "github.com/mgechev/revive/lint" ) @@ -50,7 +50,7 @@ func (*Checkstyle) Format(failures <-chan lint.Failure, config lint.Config) (str issues[fn] = append(issues[fn], iss) } - t, err := plainTemplate.New("revive").Parse(checkstyleTemplate) + t, err := plain.New("revive").Parse(checkstyleTemplate) if err != nil { return "", err } diff --git a/vendor/github.com/mgechev/revive/formatter/default.go b/vendor/github.com/mgechev/revive/formatter/default.go index f76a7b29ab..2d5a04434f 100644 --- a/vendor/github.com/mgechev/revive/formatter/default.go +++ b/vendor/github.com/mgechev/revive/formatter/default.go @@ -1,6 +1,7 @@ package formatter import ( + "bytes" "fmt" "github.com/mgechev/revive/lint" @@ -19,8 +20,9 @@ func (*Default) Name() string { // Format formats the failures gotten from the lint. func (*Default) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + var buf bytes.Buffer for failure := range failures { - fmt.Printf("%v: %s\n", failure.Position.Start, failure.Failure) + fmt.Fprintf(&buf, "%v: %s\n", failure.Position.Start, failure.Failure) } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/doc.go b/vendor/github.com/mgechev/revive/formatter/doc.go new file mode 100644 index 0000000000..bb89f20ea6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/doc.go @@ -0,0 +1,2 @@ +// Package formatter implements the linter output formatters. +package formatter diff --git a/vendor/github.com/mgechev/revive/formatter/friendly.go b/vendor/github.com/mgechev/revive/formatter/friendly.go index ced8fa46c9..5ff329a23c 100644 --- a/vendor/github.com/mgechev/revive/formatter/friendly.go +++ b/vendor/github.com/mgechev/revive/formatter/friendly.go @@ -3,6 +3,7 @@ package formatter import ( "bytes" "fmt" + "io" "sort" "github.com/fatih/color" @@ -31,13 +32,14 @@ func (*Friendly) Name() string { // Format formats the failures gotten from the lint. func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + var buf bytes.Buffer errorMap := map[string]int{} warningMap := map[string]int{} totalErrors := 0 totalWarnings := 0 for failure := range failures { sev := severity(config, failure) - f.printFriendlyFailure(failure, sev) + f.printFriendlyFailure(&buf, failure, sev) if sev == lint.SeverityWarning { warningMap[failure.RuleName]++ totalWarnings++ @@ -47,29 +49,29 @@ func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (str totalErrors++ } } - f.printSummary(totalErrors, totalWarnings) - f.printStatistics(color.RedString("Errors:"), errorMap) - f.printStatistics(color.YellowString("Warnings:"), warningMap) - return "", nil + f.printSummary(&buf, totalErrors, totalWarnings) + f.printStatistics(&buf, color.RedString("Errors:"), errorMap) + f.printStatistics(&buf, color.YellowString("Warnings:"), warningMap) + return buf.String(), nil } -func (f *Friendly) printFriendlyFailure(failure lint.Failure, severity lint.Severity) { - f.printHeaderRow(failure, severity) - f.printFilePosition(failure) - fmt.Println() - fmt.Println() +func (f *Friendly) printFriendlyFailure(w io.Writer, failure lint.Failure, severity lint.Severity) { + f.printHeaderRow(w, failure, severity) + f.printFilePosition(w, failure) + fmt.Fprintln(w) + fmt.Fprintln(w) } -func (f *Friendly) printHeaderRow(failure lint.Failure, severity lint.Severity) { +func (f *Friendly) printHeaderRow(w io.Writer, failure lint.Failure, severity lint.Severity) { emoji := getWarningEmoji() if severity == lint.SeverityError { emoji = getErrorEmoji() } - fmt.Print(f.table([][]string{{emoji, "https://revive.run/r#" + failure.RuleName, color.GreenString(failure.Failure)}})) + fmt.Fprint(w, f.table([][]string{{emoji, "https://revive.run/r#" + failure.RuleName, color.GreenString(failure.Failure)}})) } -func (*Friendly) printFilePosition(failure lint.Failure) { - fmt.Printf(" %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column) +func (*Friendly) printFilePosition(w io.Writer, failure lint.Failure) { + fmt.Fprintf(w, " %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column) } type statEntry struct { @@ -77,7 +79,7 @@ type statEntry struct { failures int } -func (*Friendly) printSummary(errors, warnings int) { +func (*Friendly) printSummary(w io.Writer, errors, warnings int) { emoji := getWarningEmoji() if errors > 0 { emoji = getErrorEmoji() @@ -96,18 +98,18 @@ func (*Friendly) printSummary(errors, warnings int) { } str := fmt.Sprintf("%d %s (%d %s, %d %s)", errors+warnings, problemsLabel, errors, errorsLabel, warnings, warningsLabel) if errors > 0 { - fmt.Printf("%s %s\n", emoji, color.RedString(str)) - fmt.Println() + fmt.Fprintf(w, "%s %s\n", emoji, color.RedString(str)) + fmt.Fprintln(w) return } if warnings > 0 { - fmt.Printf("%s %s\n", emoji, color.YellowString(str)) - fmt.Println() + fmt.Fprintf(w, "%s %s\n", emoji, color.YellowString(str)) + fmt.Fprintln(w) return } } -func (f *Friendly) printStatistics(header string, stats map[string]int) { +func (f *Friendly) printStatistics(w io.Writer, header string, stats map[string]int) { if len(stats) == 0 { return } @@ -122,8 +124,8 @@ func (f *Friendly) printStatistics(header string, stats map[string]int) { for _, entry := range data { formatted = append(formatted, []string{color.GreenString(fmt.Sprintf("%d", entry.failures)), entry.name}) } - fmt.Println(header) - fmt.Println(f.table(formatted)) + fmt.Fprintln(w, header) + fmt.Fprintln(w, f.table(formatted)) } func (*Friendly) table(rows [][]string) string { diff --git a/vendor/github.com/mgechev/revive/formatter/ndjson.go b/vendor/github.com/mgechev/revive/formatter/ndjson.go index a02d9c80fa..58b35dc44d 100644 --- a/vendor/github.com/mgechev/revive/formatter/ndjson.go +++ b/vendor/github.com/mgechev/revive/formatter/ndjson.go @@ -1,8 +1,8 @@ package formatter import ( + "bytes" "encoding/json" - "os" "github.com/mgechev/revive/lint" ) @@ -20,7 +20,8 @@ func (*NDJSON) Name() string { // Format formats the failures gotten from the lint. func (*NDJSON) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { - enc := json.NewEncoder(os.Stdout) + var buf bytes.Buffer + enc := json.NewEncoder(&buf) for failure := range failures { obj := jsonObject{} obj.Severity = severity(config, failure) @@ -30,5 +31,5 @@ func (*NDJSON) Format(failures <-chan lint.Failure, config lint.Config) (string, return "", err } } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/plain.go b/vendor/github.com/mgechev/revive/formatter/plain.go index 6e083bcfd0..09ebf6cdc8 100644 --- a/vendor/github.com/mgechev/revive/formatter/plain.go +++ b/vendor/github.com/mgechev/revive/formatter/plain.go @@ -1,6 +1,7 @@ package formatter import ( + "bytes" "fmt" "github.com/mgechev/revive/lint" @@ -19,8 +20,9 @@ func (*Plain) Name() string { // Format formats the failures gotten from the lint. func (*Plain) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + var buf bytes.Buffer for failure := range failures { - fmt.Printf("%v: %s %s\n", failure.Position.Start, failure.Failure, "https://revive.run/r#"+failure.RuleName) + fmt.Fprintf(&buf, "%v: %s %s\n", failure.Position.Start, failure.Failure, "https://revive.run/r#"+failure.RuleName) } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/sarif.go b/vendor/github.com/mgechev/revive/formatter/sarif.go index ee62adcc02..c42da73eb0 100644 --- a/vendor/github.com/mgechev/revive/formatter/sarif.go +++ b/vendor/github.com/mgechev/revive/formatter/sarif.go @@ -81,14 +81,14 @@ func (l *reviveRunLog) AddResult(failure lint.Failure) { } position := failure.Position filename := position.Start.Filename - line := positiveOrZero(position.Start.Line - 1) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_line - column := positiveOrZero(position.Start.Column - 1) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_column + line := positiveOrZero(position.Start.Line) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_line + column := positiveOrZero(position.Start.Column) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_column result := garif.NewResult(garif.NewMessageFromText(failure.Failure)) location := garif.NewLocation().WithURI(filename).WithLineColumn(line, column) result.Locations = append(result.Locations, location) result.RuleId = failure.RuleName - result.Level = l.rules[failure.RuleName].Severity + result.Level = garif.ResultLevel(l.rules[failure.RuleName].Severity) l.run.Results = append(l.run.Results, result) } diff --git a/vendor/github.com/mgechev/revive/formatter/unix.go b/vendor/github.com/mgechev/revive/formatter/unix.go index ef2f1613ac..e46f3c275f 100644 --- a/vendor/github.com/mgechev/revive/formatter/unix.go +++ b/vendor/github.com/mgechev/revive/formatter/unix.go @@ -1,6 +1,7 @@ package formatter import ( + "bytes" "fmt" "github.com/mgechev/revive/lint" @@ -8,7 +9,8 @@ import ( // Unix is an implementation of the Formatter interface // which formats the errors to a simple line based error format -// main.go:24:9: [errorf] should replace errors.New(fmt.Sprintf(...)) with fmt.Errorf(...) +// +// main.go:24:9: [errorf] should replace errors.New(fmt.Sprintf(...)) with fmt.Errorf(...) type Unix struct { Metadata lint.FormatterMetadata } @@ -20,8 +22,9 @@ func (*Unix) Name() string { // Format formats the failures gotten from the lint. func (*Unix) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + var buf bytes.Buffer for failure := range failures { - fmt.Printf("%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure) + fmt.Fprintf(&buf, "%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure) } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/args.go b/vendor/github.com/mgechev/revive/internal/ifelse/args.go new file mode 100644 index 0000000000..c6e647e697 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/args.go @@ -0,0 +1,11 @@ +package ifelse + +// PreserveScope is a configuration argument that prevents suggestions +// that would enlarge variable scope +const PreserveScope = "preserveScope" + +// Args contains arguments common to the early-return, indent-error-flow +// and superfluous-else rules (currently just preserveScope) +type Args struct { + PreserveScope bool +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/branch.go b/vendor/github.com/mgechev/revive/internal/ifelse/branch.go new file mode 100644 index 0000000000..6e6036b899 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/branch.go @@ -0,0 +1,93 @@ +package ifelse + +import ( + "fmt" + "go/ast" + "go/token" +) + +// Branch contains information about a branch within an if-else chain. +type Branch struct { + BranchKind + Call // The function called at the end for kind Panic or Exit. + HasDecls bool // The branch has one or more declarations (at the top level block) +} + +// BlockBranch gets the Branch of an ast.BlockStmt. +func BlockBranch(block *ast.BlockStmt) Branch { + blockLen := len(block.List) + if blockLen == 0 { + return Empty.Branch() + } + + branch := StmtBranch(block.List[blockLen-1]) + branch.HasDecls = hasDecls(block) + return branch +} + +// StmtBranch gets the Branch of an ast.Stmt. +func StmtBranch(stmt ast.Stmt) Branch { + switch stmt := stmt.(type) { + case *ast.ReturnStmt: + return Return.Branch() + case *ast.BlockStmt: + return BlockBranch(stmt) + case *ast.BranchStmt: + switch stmt.Tok { + case token.BREAK: + return Break.Branch() + case token.CONTINUE: + return Continue.Branch() + case token.GOTO: + return Goto.Branch() + } + case *ast.ExprStmt: + fn, ok := ExprCall(stmt) + if !ok { + break + } + kind, ok := DeviatingFuncs[fn] + if ok { + return Branch{BranchKind: kind, Call: fn} + } + case *ast.EmptyStmt: + return Empty.Branch() + case *ast.LabeledStmt: + return StmtBranch(stmt.Stmt) + } + return Regular.Branch() +} + +// String returns a brief string representation +func (b Branch) String() string { + switch b.BranchKind { + case Panic, Exit: + return fmt.Sprintf("... %v()", b.Call) + default: + return b.BranchKind.String() + } +} + +// LongString returns a longer form string representation +func (b Branch) LongString() string { + switch b.BranchKind { + case Panic, Exit: + return fmt.Sprintf("call to %v function", b.Call) + default: + return b.BranchKind.LongString() + } +} + +func hasDecls(block *ast.BlockStmt) bool { + for _, stmt := range block.List { + switch stmt := stmt.(type) { + case *ast.DeclStmt: + return true + case *ast.AssignStmt: + if stmt.Tok == token.DEFINE { + return true + } + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go b/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go new file mode 100644 index 0000000000..41601d1e1d --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go @@ -0,0 +1,101 @@ +package ifelse + +// BranchKind is a classifier for if-else branches. It says whether the branch is empty, +// and whether the branch ends with a statement that deviates control flow. +type BranchKind int + +const ( + // Empty branches do nothing + Empty BranchKind = iota + + // Return branches return from the current function + Return + + // Continue branches continue a surrounding "for" loop + Continue + + // Break branches break a surrounding "for" loop + Break + + // Goto branches conclude with a "goto" statement + Goto + + // Panic branches panic the current function + Panic + + // Exit branches end the program + Exit + + // Regular branches do not fit any category above + Regular +) + +// IsEmpty tests if the branch is empty +func (k BranchKind) IsEmpty() bool { return k == Empty } + +// Returns tests if the branch returns from the current function +func (k BranchKind) Returns() bool { return k == Return } + +// Deviates tests if the control does not flow to the first +// statement following the if-else chain. +func (k BranchKind) Deviates() bool { + switch k { + case Empty, Regular: + return false + case Return, Continue, Break, Goto, Panic, Exit: + return true + default: + panic("invalid kind") + } +} + +// Branch returns a Branch with the given kind +func (k BranchKind) Branch() Branch { return Branch{BranchKind: k} } + +// String returns a brief string representation +func (k BranchKind) String() string { + switch k { + case Empty: + return "" + case Regular: + return "..." + case Return: + return "... return" + case Continue: + return "... continue" + case Break: + return "... break" + case Goto: + return "... goto" + case Panic: + return "... panic()" + case Exit: + return "... os.Exit()" + default: + panic("invalid kind") + } +} + +// LongString returns a longer form string representation +func (k BranchKind) LongString() string { + switch k { + case Empty: + return "an empty block" + case Regular: + return "a regular statement" + case Return: + return "a return statement" + case Continue: + return "a continue statement" + case Break: + return "a break statement" + case Goto: + return "a goto statement" + case Panic: + return "a function call that panics" + case Exit: + return "a function call that exits the program" + default: + panic("invalid kind") + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/chain.go b/vendor/github.com/mgechev/revive/internal/ifelse/chain.go new file mode 100644 index 0000000000..9891635ee1 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/chain.go @@ -0,0 +1,10 @@ +package ifelse + +// Chain contains information about an if-else chain. +type Chain struct { + If Branch // what happens at the end of the "if" block + Else Branch // what happens at the end of the "else" block + HasInitializer bool // is there an "if"-initializer somewhere in the chain? + HasPriorNonDeviating bool // is there a prior "if" block that does NOT deviate control flow? + AtBlockEnd bool // whether the chain is placed at the end of the surrounding block +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/doc.go b/vendor/github.com/mgechev/revive/internal/ifelse/doc.go new file mode 100644 index 0000000000..0aa2c98175 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/doc.go @@ -0,0 +1,6 @@ +// Package ifelse provides helpers for analysing the control flow in if-else chains, +// presently used by the following rules: +// - early-return +// - indent-error-flow +// - superfluous-else +package ifelse diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/func.go b/vendor/github.com/mgechev/revive/internal/ifelse/func.go new file mode 100644 index 0000000000..7ba3519184 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/func.go @@ -0,0 +1,51 @@ +package ifelse + +import ( + "fmt" + "go/ast" +) + +// Call contains the name of a function that deviates control flow. +type Call struct { + Pkg string // The package qualifier of the function, if not built-in. + Name string // The function name. +} + +// DeviatingFuncs lists known control flow deviating function calls. +var DeviatingFuncs = map[Call]BranchKind{ + {"os", "Exit"}: Exit, + {"log", "Fatal"}: Exit, + {"log", "Fatalf"}: Exit, + {"log", "Fatalln"}: Exit, + {"", "panic"}: Panic, + {"log", "Panic"}: Panic, + {"log", "Panicf"}: Panic, + {"log", "Panicln"}: Panic, +} + +// ExprCall gets the Call of an ExprStmt, if any. +func ExprCall(expr *ast.ExprStmt) (Call, bool) { + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return Call{}, false + } + switch v := call.Fun.(type) { + case *ast.Ident: + return Call{Name: v.Name}, true + case *ast.SelectorExpr: + if ident, ok := v.X.(*ast.Ident); ok { + return Call{Name: v.Sel.Name, Pkg: ident.Name}, true + } + } + return Call{}, false +} + +// String returns the function name with package qualifier (if any) +func (f Call) String() string { + switch { + case f.Pkg != "": + return fmt.Sprintf("%s.%s", f.Pkg, f.Name) + default: + return f.Name + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/rule.go b/vendor/github.com/mgechev/revive/internal/ifelse/rule.go new file mode 100644 index 0000000000..07ad456b65 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/rule.go @@ -0,0 +1,105 @@ +package ifelse + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// Rule is an interface for linters operating on if-else chains +type Rule interface { + CheckIfElse(chain Chain, args Args) (failMsg string) +} + +// Apply evaluates the given Rule on if-else chains found within the given AST, +// and returns the failures. +// +// Note that in if-else chain with multiple "if" blocks, only the *last* one is checked, +// that is to say, given: +// +// if foo { +// ... +// } else if bar { +// ... +// } else { +// ... +// } +// +// Only the block following "bar" is linted. This is because the rules that use this function +// do not presently have anything to say about earlier blocks in the chain. +func Apply(rule Rule, node ast.Node, target Target, args lint.Arguments) []lint.Failure { + v := &visitor{rule: rule, target: target} + for _, arg := range args { + if arg == PreserveScope { + v.args.PreserveScope = true + } + } + ast.Walk(v, node) + return v.failures +} + +type visitor struct { + failures []lint.Failure + target Target + rule Rule + args Args +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + block, ok := node.(*ast.BlockStmt) + if !ok { + return v + } + + for i, stmt := range block.List { + if ifStmt, ok := stmt.(*ast.IfStmt); ok { + v.visitChain(ifStmt, Chain{AtBlockEnd: i == len(block.List)-1}) + continue + } + ast.Walk(v, stmt) + } + return nil +} + +func (v *visitor) visitChain(ifStmt *ast.IfStmt, chain Chain) { + // look for other if-else chains nested inside this if { } block + ast.Walk(v, ifStmt.Body) + + if ifStmt.Else == nil { + // no else branch + return + } + + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + chain.HasInitializer = true + } + chain.If = BlockBranch(ifStmt.Body) + + switch elseBlock := ifStmt.Else.(type) { + case *ast.IfStmt: + if !chain.If.Deviates() { + chain.HasPriorNonDeviating = true + } + v.visitChain(elseBlock, chain) + case *ast.BlockStmt: + // look for other if-else chains nested inside this else { } block + ast.Walk(v, elseBlock) + + chain.Else = BlockBranch(elseBlock) + if failMsg := v.rule.CheckIfElse(chain, v.args); failMsg != "" { + if chain.HasInitializer { + // if statement has a := initializer, so we might need to move the assignment + // onto its own line in case the body references it + failMsg += " (move short variable declaration to its own line if necessary)" + } + v.failures = append(v.failures, lint.Failure{ + Confidence: 1, + Node: v.target.node(ifStmt), + Failure: failMsg, + }) + } + default: + panic("invalid node type for else") + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/target.go b/vendor/github.com/mgechev/revive/internal/ifelse/target.go new file mode 100644 index 0000000000..81ff1c3037 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/target.go @@ -0,0 +1,25 @@ +package ifelse + +import "go/ast" + +// Target decides what line/column should be indicated by the rule in question. +type Target int + +const ( + // TargetIf means the text refers to the "if" + TargetIf Target = iota + + // TargetElse means the text refers to the "else" + TargetElse +) + +func (t Target) node(ifStmt *ast.IfStmt) ast.Node { + switch t { + case TargetIf: + return ifStmt + case TargetElse: + return ifStmt.Else + default: + panic("bad target") + } +} diff --git a/vendor/github.com/mgechev/revive/lint/config.go b/vendor/github.com/mgechev/revive/lint/config.go index 2763058046..7e51a93c28 100644 --- a/vendor/github.com/mgechev/revive/lint/config.go +++ b/vendor/github.com/mgechev/revive/lint/config.go @@ -3,16 +3,45 @@ package lint // Arguments is type used for the arguments of a rule. type Arguments = []interface{} +// FileFilters is type used for modeling file filters to apply to rules. +type FileFilters = []*FileFilter + // RuleConfig is type used for the rule configuration. type RuleConfig struct { Arguments Arguments Severity Severity Disabled bool + // Exclude - rule-level file excludes, TOML related (strings) + Exclude []string + // excludeFilters - regex-based file filters, initialized from Exclude + excludeFilters []*FileFilter +} + +// Initialize - should be called after reading from TOML file +func (rc *RuleConfig) Initialize() error { + for _, f := range rc.Exclude { + ff, err := ParseFileFilter(f) + if err != nil { + return err + } + rc.excludeFilters = append(rc.excludeFilters, ff) + } + return nil } // RulesConfig defines the config for all rules. type RulesConfig = map[string]RuleConfig +// MustExclude - checks if given filename `name` must be excluded +func (rc *RuleConfig) MustExclude(name string) bool { + for _, exclude := range rc.excludeFilters { + if exclude.MatchFileName(name) { + return true + } + } + return false +} + // DirectiveConfig is type used for the linter directive configuration. type DirectiveConfig struct { Severity Severity diff --git a/vendor/github.com/mgechev/revive/lint/doc.go b/vendor/github.com/mgechev/revive/lint/doc.go new file mode 100644 index 0000000000..7048adf4b6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/doc.go @@ -0,0 +1,2 @@ +// Package lint implements the linting machinery. +package lint diff --git a/vendor/github.com/mgechev/revive/lint/file.go b/vendor/github.com/mgechev/revive/lint/file.go index dcf0e608f6..23255304c5 100644 --- a/vendor/github.com/mgechev/revive/lint/file.go +++ b/vendor/github.com/mgechev/revive/lint/file.go @@ -102,6 +102,9 @@ func (f *File) lint(rules []Rule, config Config, failures chan Failure) { disabledIntervals := f.disabledIntervals(rules, mustSpecifyDisableReason, failures) for _, currentRule := range rules { ruleConfig := rulesConfig[currentRule.Name()] + if ruleConfig.MustExclude(f.Name) { + continue + } currentFailures := currentRule.Apply(f, ruleConfig.Arguments) for idx, failure := range currentFailures { if failure.RuleName == "" { diff --git a/vendor/github.com/mgechev/revive/lint/filefilter.go b/vendor/github.com/mgechev/revive/lint/filefilter.go new file mode 100644 index 0000000000..8da090b9cc --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/filefilter.go @@ -0,0 +1,128 @@ +package lint + +import ( + "fmt" + "regexp" + "strings" +) + +// FileFilter - file filter to exclude some files for rule +// supports whole +// 1. file/dir names : pkg/mypkg/my.go, +// 2. globs: **/*.pb.go, +// 3. regexes (~ prefix) ~-tmp\.\d+\.go +// 4. special test marker `TEST` - treats as `~_test\.go` +type FileFilter struct { + // raw definition of filter inside config + raw string + // don't care what was at start, will use regexes inside + rx *regexp.Regexp + // marks filter as matching everything + matchesAll bool + // marks filter as matching nothing + matchesNothing bool +} + +// ParseFileFilter - creates [FileFilter] for given raw filter +// if empty string, it matches nothing +// if `*`, or `~`, it matches everything +// while regexp could be invalid, it could return it's compilation error +func ParseFileFilter(rawFilter string) (*FileFilter, error) { + rawFilter = strings.TrimSpace(rawFilter) + result := new(FileFilter) + result.raw = rawFilter + result.matchesNothing = len(result.raw) == 0 + result.matchesAll = result.raw == "*" || result.raw == "~" + if !result.matchesAll && !result.matchesNothing { + if err := result.prepareRegexp(); err != nil { + return nil, err + } + } + return result, nil +} + +func (ff *FileFilter) String() string { return ff.raw } + +// MatchFileName - checks if file name matches filter +func (ff *FileFilter) MatchFileName(name string) bool { + if ff.matchesAll { + return true + } + if ff.matchesNothing { + return false + } + name = strings.ReplaceAll(name, "\\", "/") + return ff.rx.MatchString(name) +} + +var fileFilterInvalidGlobRegexp = regexp.MustCompile(`[^/]\*\*[^/]`) +var escapeRegexSymbols = ".+{}()[]^$" + +func (ff *FileFilter) prepareRegexp() error { + var err error + var src = ff.raw + if src == "TEST" { + src = "~_test\\.go" + } + if strings.HasPrefix(src, "~") { + ff.rx, err = regexp.Compile(src[1:]) + if err != nil { + return fmt.Errorf("invalid file filter [%s], regexp compile error: [%v]", ff.raw, err) + } + return nil + } + /* globs */ + if strings.Contains(src, "*") { + if fileFilterInvalidGlobRegexp.MatchString(src) { + return fmt.Errorf("invalid file filter [%s], invalid glob pattern", ff.raw) + } + var rxBuild strings.Builder + rxBuild.WriteByte('^') + wasStar := false + justDirGlob := false + for _, c := range src { + if c == '*' { + if wasStar { + rxBuild.WriteString(`[\s\S]*`) + wasStar = false + justDirGlob = true + continue + } + wasStar = true + continue + } + if wasStar { + rxBuild.WriteString("[^/]*") + wasStar = false + } + if strings.ContainsRune(escapeRegexSymbols, c) { + rxBuild.WriteByte('\\') + } + rxBuild.WriteRune(c) + if c == '/' && justDirGlob { + rxBuild.WriteRune('?') + } + justDirGlob = false + } + if wasStar { + rxBuild.WriteString("[^/]*") + } + rxBuild.WriteByte('$') + ff.rx, err = regexp.Compile(rxBuild.String()) + if err != nil { + return fmt.Errorf("invalid file filter [%s], regexp compile error after glob expand: [%v]", ff.raw, err) + } + return nil + } + + // it's whole file mask, just escape dots and normilze separators + fillRx := src + fillRx = strings.ReplaceAll(fillRx, "\\", "/") + fillRx = strings.ReplaceAll(fillRx, ".", `\.`) + fillRx = "^" + fillRx + "$" + ff.rx, err = regexp.Compile(fillRx) + if err != nil { + return fmt.Errorf("invalid file filter [%s], regexp compile full path: [%v]", ff.raw, err) + } + return nil +} diff --git a/vendor/github.com/mgechev/revive/lint/utils.go b/vendor/github.com/mgechev/revive/lint/utils.go index 28657c6df0..6ccfb0ef29 100644 --- a/vendor/github.com/mgechev/revive/lint/utils.go +++ b/vendor/github.com/mgechev/revive/lint/utils.go @@ -6,7 +6,7 @@ import ( ) // Name returns a different name if it should be different. -func Name(name string, whitelist, blacklist []string) (should string) { +func Name(name string, allowlist, blocklist []string) (should string) { // Fast path for simple cases: "_" and all lowercase. if name == "_" { return name @@ -57,12 +57,12 @@ func Name(name string, whitelist, blacklist []string) (should string) { // [w,i) is a word. word := string(runes[w:i]) ignoreInitWarnings := map[string]bool{} - for _, i := range whitelist { + for _, i := range allowlist { ignoreInitWarnings[i] = true } extraInits := map[string]bool{} - for _, i := range blacklist { + for _, i := range blocklist { extraInits[i] = true } @@ -71,6 +71,10 @@ func Name(name string, whitelist, blacklist []string) (should string) { if w == 0 && unicode.IsLower(runes[w]) { u = strings.ToLower(u) } + // Keep lowercase s for IDs + if u == "IDS" { + u = "IDs" + } // All the common initialisms are ASCII, // so we can replace the bytes exactly. copy(runes[w:], []rune(u)) @@ -99,6 +103,7 @@ var commonInitialisms = map[string]bool{ "HTTP": true, "HTTPS": true, "ID": true, + "IDS": true, "IP": true, "JSON": true, "LHS": true, diff --git a/vendor/github.com/mgechev/revive/rule/add-constant.go b/vendor/github.com/mgechev/revive/rule/add-constant.go index 414be38c39..86182623a9 100644 --- a/vendor/github.com/mgechev/revive/rule/add-constant.go +++ b/vendor/github.com/mgechev/revive/rule/add-constant.go @@ -3,6 +3,7 @@ package rule import ( "fmt" "go/ast" + "regexp" "strconv" "strings" "sync" @@ -17,13 +18,13 @@ const ( kindSTRING = "STRING" ) -type whiteList map[string]map[string]bool +type allowList map[string]map[string]bool -func newWhiteList() whiteList { +func newAllowList() allowList { return map[string]map[string]bool{kindINT: {}, kindFLOAT: {}, kindSTRING: {}} } -func (wl whiteList) add(kind, list string) { +func (wl allowList) add(kind, list string) { elems := strings.Split(list, ",") for _, e := range elems { wl[kind][e] = true @@ -32,8 +33,9 @@ func (wl whiteList) add(kind, list string) { // AddConstantRule lints unused params in functions. type AddConstantRule struct { - whiteList whiteList - strLitLimit int + allowList allowList + ignoreFunctions []*regexp.Regexp + strLitLimit int sync.Mutex } @@ -47,7 +49,14 @@ func (r *AddConstantRule) Apply(file *lint.File, arguments lint.Arguments) []lin failures = append(failures, failure) } - w := lintAddConstantRule{onFailure: onFailure, strLits: make(map[string]int), strLitLimit: r.strLitLimit, whiteLst: r.whiteList} + w := &lintAddConstantRule{ + onFailure: onFailure, + strLits: make(map[string]int), + strLitLimit: r.strLitLimit, + allowList: r.allowList, + ignoreFunctions: r.ignoreFunctions, + structTags: make(map[*ast.BasicLit]struct{}), + } ast.Walk(w, file.AST) @@ -60,30 +69,93 @@ func (*AddConstantRule) Name() string { } type lintAddConstantRule struct { - onFailure func(lint.Failure) - strLits map[string]int - strLitLimit int - whiteLst whiteList + onFailure func(lint.Failure) + strLits map[string]int + strLitLimit int + allowList allowList + ignoreFunctions []*regexp.Regexp + structTags map[*ast.BasicLit]struct{} } -func (w lintAddConstantRule) Visit(node ast.Node) ast.Visitor { +func (w *lintAddConstantRule) Visit(node ast.Node) ast.Visitor { + if node == nil { + return nil + } + switch n := node.(type) { + case *ast.CallExpr: + w.checkFunc(n) + return nil case *ast.GenDecl: return nil // skip declarations case *ast.BasicLit: - switch kind := n.Kind.String(); kind { - case kindFLOAT, kindINT: - w.checkNumLit(kind, n) - case kindSTRING: - w.checkStrLit(n) + if !w.isStructTag(n) { + w.checkLit(n) + } + case *ast.StructType: + if n.Fields != nil { + for _, field := range n.Fields.List { + if field.Tag != nil { + w.structTags[field.Tag] = struct{}{} + } + } } } return w } -func (w lintAddConstantRule) checkStrLit(n *ast.BasicLit) { - if w.whiteLst[kindSTRING][n.Value] { +func (w *lintAddConstantRule) checkFunc(expr *ast.CallExpr) { + fName := w.getFuncName(expr) + + for _, arg := range expr.Args { + switch t := arg.(type) { + case *ast.CallExpr: + w.checkFunc(t) + case *ast.BasicLit: + if w.isIgnoredFunc(fName) { + continue + } + w.checkLit(t) + } + } +} + +func (*lintAddConstantRule) getFuncName(expr *ast.CallExpr) string { + switch f := expr.Fun.(type) { + case *ast.SelectorExpr: + switch prefix := f.X.(type) { + case *ast.Ident: + return prefix.Name + "." + f.Sel.Name + } + case *ast.Ident: + return f.Name + } + + return "" +} + +func (w *lintAddConstantRule) checkLit(n *ast.BasicLit) { + switch kind := n.Kind.String(); kind { + case kindFLOAT, kindINT: + w.checkNumLit(kind, n) + case kindSTRING: + w.checkStrLit(n) + } +} + +func (w *lintAddConstantRule) isIgnoredFunc(fName string) bool { + for _, pattern := range w.ignoreFunctions { + if pattern.MatchString(fName) { + return true + } + } + + return false +} + +func (w *lintAddConstantRule) checkStrLit(n *ast.BasicLit) { + if w.allowList[kindSTRING][n.Value] { return } @@ -102,8 +174,8 @@ func (w lintAddConstantRule) checkStrLit(n *ast.BasicLit) { } } -func (w lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { - if w.whiteLst[kind][n.Value] { +func (w *lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { + if w.allowList[kind][n.Value] { return } @@ -115,15 +187,20 @@ func (w lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { }) } +func (w *lintAddConstantRule) isStructTag(n *ast.BasicLit) bool { + _, ok := w.structTags[n] + return ok +} + func (r *AddConstantRule) configure(arguments lint.Arguments) { r.Lock() defer r.Unlock() - if r.whiteList == nil { + if r.allowList == nil { r.strLitLimit = defaultStrLitLimit - r.whiteList = newWhiteList() + r.allowList = newAllowList() if len(arguments) > 0 { - args, ok := arguments[0].(map[string]interface{}) + args, ok := arguments[0].(map[string]any) if !ok { panic(fmt.Sprintf("Invalid argument to the add-constant rule. Expecting a k,v map, got %T", arguments[0])) } @@ -146,7 +223,7 @@ func (r *AddConstantRule) configure(arguments lint.Arguments) { if !ok { panic(fmt.Sprintf("Invalid argument to the add-constant rule, string expected. Got '%v' (%T)", v, v)) } - r.whiteList.add(kind, list) + r.allowList.add(kind, list) case "maxLitCount": sl, ok := v.(string) if !ok { @@ -158,6 +235,25 @@ func (r *AddConstantRule) configure(arguments lint.Arguments) { panic(fmt.Sprintf("Invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v'", v)) } r.strLitLimit = limit + case "ignoreFuncs": + excludes, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the ignoreFuncs parameter of add-constant rule, string expected. Got '%v' (%T)", v, v)) + } + + for _, exclude := range strings.Split(excludes, ",") { + exclude = strings.Trim(exclude, " ") + if exclude == "" { + panic("Invalid argument to the ignoreFuncs parameter of add-constant rule, expected regular expression must not be empty.") + } + + exp, err := regexp.Compile(exclude) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the ignoreFuncs parameter of add-constant rule: regexp %q does not compile: %v", exclude, err)) + } + + r.ignoreFunctions = append(r.ignoreFunctions, exp) + } } } } diff --git a/vendor/github.com/mgechev/revive/rule/argument-limit.go b/vendor/github.com/mgechev/revive/rule/argument-limit.go index 8042da15e3..8120288fd5 100644 --- a/vendor/github.com/mgechev/revive/rule/argument-limit.go +++ b/vendor/github.com/mgechev/revive/rule/argument-limit.go @@ -14,10 +14,16 @@ type ArgumentsLimitRule struct { sync.Mutex } +const defaultArgumentsLimit = 8 + func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.total == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + r.total = defaultArgumentsLimit + return + } total, ok := arguments[0].(int64) // Alt. non panicking version if !ok { @@ -25,7 +31,6 @@ func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { } r.total = int(total) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/banned-characters.go b/vendor/github.com/mgechev/revive/rule/banned-characters.go index 76fa2235a9..12997bae11 100644 --- a/vendor/github.com/mgechev/revive/rule/banned-characters.go +++ b/vendor/github.com/mgechev/revive/rule/banned-characters.go @@ -19,11 +19,11 @@ const bannedCharsRuleName = "banned-characters" func (r *BannedCharsRule) configure(arguments lint.Arguments) { r.Lock() - if r.bannedCharList == nil { + defer r.Unlock() + if r.bannedCharList == nil && len(arguments) > 0 { checkNumberOfArguments(1, arguments, bannedCharsRuleName) r.bannedCharList = r.getBannedCharsList(arguments) } - r.Unlock() } // Apply applied the rule to the given file. diff --git a/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go b/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go index a9c11a7d0b..1973faef87 100644 --- a/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go +++ b/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go @@ -16,10 +16,17 @@ type CognitiveComplexityRule struct { sync.Mutex } +const defaultMaxCognitiveComplexity = 7 + func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.maxComplexity == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + + if len(arguments) < 1 { + r.maxComplexity = defaultMaxCognitiveComplexity + return + } complexity, ok := arguments[0].(int64) if !ok { @@ -27,7 +34,6 @@ func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { } r.maxComplexity = int(complexity) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/comment-spacings.go b/vendor/github.com/mgechev/revive/rule/comment-spacings.go new file mode 100644 index 0000000000..2b8240ca58 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/comment-spacings.go @@ -0,0 +1,91 @@ +package rule + +import ( + "fmt" + "strings" + "sync" + + "github.com/mgechev/revive/lint" +) + +// CommentSpacingsRule check the whether there is a space between +// the comment symbol( // ) and the start of the comment text +type CommentSpacingsRule struct { + allowList []string + sync.Mutex +} + +func (r *CommentSpacingsRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.allowList == nil { + r.allowList = []string{ + "//go:", + "//revive:", + "//nolint:", + } + + for _, arg := range arguments { + allow, ok := arg.(string) // Alt. non panicking version + if !ok { + panic(fmt.Sprintf("invalid argument %v for %s; expected string but got %T", arg, r.Name(), arg)) + } + r.allowList = append(r.allowList, `//`+allow+`:`) + } + } +} + +// Apply the rule. +func (r *CommentSpacingsRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) + + var failures []lint.Failure + + for _, cg := range file.AST.Comments { + for _, comment := range cg.List { + commentLine := comment.Text + if len(commentLine) < 3 { + continue // nothing to do + } + + isMultiLineComment := commentLine[1] == '*' + isOK := commentLine[2] == '\n' + if isMultiLineComment && isOK { + continue + } + + isOK = (commentLine[2] == ' ') || (commentLine[2] == '\t') + if isOK { + continue + } + + if r.isAllowed(commentLine) { + continue + } + + failures = append(failures, lint.Failure{ + Node: comment, + Confidence: 1, + Category: "style", + Failure: "no space between comment delimiter and comment text", + }) + } + } + return failures +} + +// Name yields this rule name. +func (*CommentSpacingsRule) Name() string { + return "comment-spacings" +} + +func (r *CommentSpacingsRule) isAllowed(line string) bool { + for _, allow := range r.allowList { + if strings.HasPrefix(line, allow) { + return true + } + } + + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/confusing-naming.go b/vendor/github.com/mgechev/revive/rule/confusing-naming.go index 34cdb907a8..febfd88245 100644 --- a/vendor/github.com/mgechev/revive/rule/confusing-naming.go +++ b/vendor/github.com/mgechev/revive/rule/confusing-naming.go @@ -27,10 +27,10 @@ type packages struct { func (ps *packages) methodNames(lp *lint.Package) pkgMethods { ps.mu.Lock() + defer ps.mu.Unlock() for _, pkg := range ps.pkgs { if pkg.pkg == lp { - ps.mu.Unlock() return pkg } } @@ -38,7 +38,6 @@ func (ps *packages) methodNames(lp *lint.Package) pkgMethods { pkgm := pkgMethods{pkg: lp, methods: make(map[string]map[string]*referenceMethod), mu: &sync.Mutex{}} ps.pkgs = append(ps.pkgs, pkgm) - ps.mu.Unlock() return pkgm } @@ -112,7 +111,7 @@ func checkMethodName(holder string, id *ast.Ident, w *lintConfusingNames) { pkgm.methods[holder] = make(map[string]*referenceMethod, 1) } - // update the black list + // update the block list if pkgm.methods[holder] == nil { println("no entry for '", holder, "'") } @@ -137,8 +136,11 @@ func getStructName(r *ast.FieldList) string { t := r.List[0].Type - if p, _ := t.(*ast.StarExpr); p != nil { // if a pointer receiver => dereference pointer receiver types - t = p.X + switch v := t.(type) { + case *ast.StarExpr: + t = v.X + case *ast.IndexExpr: + t = v.X } if p, _ := t.(*ast.Ident); p != nil { diff --git a/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go b/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go index 9abc95d67c..36cd641f74 100644 --- a/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go +++ b/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go @@ -11,7 +11,7 @@ import ( type ConstantLogicalExprRule struct{} // Apply applies the rule to given file. -func (r *ConstantLogicalExprRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (*ConstantLogicalExprRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { @@ -63,7 +63,7 @@ func (w *lintConstantLogicalExpr) Visit(node ast.Node) ast.Visitor { return w } -func (w *lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) bool { +func (*lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) bool { switch t { case token.LAND, token.LOR, token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ: return true @@ -72,7 +72,7 @@ func (w *lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) boo return false } -func (w *lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool { +func (*lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool { switch t { case token.EQL, token.LEQ, token.GEQ: return true @@ -81,7 +81,7 @@ func (w *lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool { return false } -func (w *lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool { +func (*lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool { switch t { case token.LSS, token.GTR, token.NEQ: return true diff --git a/vendor/github.com/mgechev/revive/rule/context-as-argument.go b/vendor/github.com/mgechev/revive/rule/context-as-argument.go index 3c400065e1..e0c8cfa5e9 100644 --- a/vendor/github.com/mgechev/revive/rule/context-as-argument.go +++ b/vendor/github.com/mgechev/revive/rule/context-as-argument.go @@ -82,7 +82,7 @@ func (w lintContextArguments) Visit(n ast.Node) ast.Visitor { func getAllowTypesFromArguments(args lint.Arguments) map[string]struct{} { allowTypesBefore := []string{} if len(args) >= 1 { - argKV, ok := args[0].(map[string]interface{}) + argKV, ok := args[0].(map[string]any) if !ok { panic(fmt.Sprintf("Invalid argument to the context-as-argument rule. Expecting a k,v map, got %T", args[0])) } diff --git a/vendor/github.com/mgechev/revive/rule/cyclomatic.go b/vendor/github.com/mgechev/revive/rule/cyclomatic.go index afd41818b8..9f6d50043d 100644 --- a/vendor/github.com/mgechev/revive/rule/cyclomatic.go +++ b/vendor/github.com/mgechev/revive/rule/cyclomatic.go @@ -17,10 +17,16 @@ type CyclomaticRule struct { sync.Mutex } +const defaultMaxCyclomaticComplexity = 10 + func (r *CyclomaticRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.maxComplexity == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + r.maxComplexity = defaultMaxCyclomaticComplexity + return + } complexity, ok := arguments[0].(int64) // Alt. non panicking version if !ok { @@ -28,7 +34,6 @@ func (r *CyclomaticRule) configure(arguments lint.Arguments) { } r.maxComplexity = int(complexity) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/datarace.go b/vendor/github.com/mgechev/revive/rule/datarace.go index 26fcadcdc9..39e96696ad 100644 --- a/vendor/github.com/mgechev/revive/rule/datarace.go +++ b/vendor/github.com/mgechev/revive/rule/datarace.go @@ -53,7 +53,7 @@ func (w lintDataRaces) Visit(n ast.Node) ast.Visitor { return nil } -func (w lintDataRaces) ExtractReturnIDs(fields []*ast.Field) map[*ast.Object]struct{} { +func (lintDataRaces) ExtractReturnIDs(fields []*ast.Field) map[*ast.Object]struct{} { r := map[*ast.Object]struct{}{} for _, f := range fields { for _, id := range f.Names { @@ -111,7 +111,7 @@ func (w lintFunctionForDataRaces) Visit(node ast.Node) ast.Visitor { return ok } - ids := pick(funcLit.Body, selectIDs, nil) + ids := pick(funcLit.Body, selectIDs) for _, id := range ids { id := id.(*ast.Ident) _, isRangeID := w.rangeIDs[id.Obj] diff --git a/vendor/github.com/mgechev/revive/rule/defer.go b/vendor/github.com/mgechev/revive/rule/defer.go index f8224fd4d1..adc6478aee 100644 --- a/vendor/github.com/mgechev/revive/rule/defer.go +++ b/vendor/github.com/mgechev/revive/rule/defer.go @@ -56,7 +56,7 @@ func (*DeferRule) allowFromArgs(args lint.Arguments) map[string]bool { return allow } - aa, ok := args[0].([]interface{}) + aa, ok := args[0].([]any) if !ok { panic(fmt.Sprintf("Invalid argument '%v' for 'defer' rule. Expecting []string, got %T", args[0], args[0])) } @@ -97,18 +97,21 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { w.newFailure("return in a defer function has no effect", n, 1.0, "logic", "return") } case *ast.CallExpr: - if !w.inADefer && isIdent(n.Fun, "recover") { + isCallToRecover := isIdent(n.Fun, "recover") + switch { + case !w.inADefer && isCallToRecover: // func fn() { recover() } // // confidence is not 1 because recover can be in a function that is deferred elsewhere w.newFailure("recover must be called inside a deferred function", n, 0.8, "logic", "recover") - } else if w.inADefer && !w.inAFuncLit && isIdent(n.Fun, "recover") { + case w.inADefer && !w.inAFuncLit && isCallToRecover: // defer helper(recover()) // // confidence is not truly 1 because this could be in a correctly-deferred func, // but it is very likely to be a misunderstanding of defer's behavior around arguments. w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, "logic", "immediate-recover") } + case *ast.DeferStmt: if isIdent(n.Call.Fun, "recover") { // defer recover() @@ -119,7 +122,12 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { } w.visitSubtree(n.Call.Fun, true, false, false) for _, a := range n.Call.Args { - w.visitSubtree(a, true, false, false) // check arguments, they should not contain recover() + switch a.(type) { + case *ast.FuncLit: + continue // too hard to analyze deferred calls with func literals args + default: + w.visitSubtree(a, true, false, false) // check arguments, they should not contain recover() + } } if w.inALoop { @@ -137,6 +145,7 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { } } } + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/doc.go b/vendor/github.com/mgechev/revive/rule/doc.go new file mode 100644 index 0000000000..55bf6caa6f --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/doc.go @@ -0,0 +1,2 @@ +// Package rule implements revive's linting rules. +package rule diff --git a/vendor/github.com/mgechev/revive/rule/dot-imports.go b/vendor/github.com/mgechev/revive/rule/dot-imports.go index 25ff526cb5..6b877677db 100644 --- a/vendor/github.com/mgechev/revive/rule/dot-imports.go +++ b/vendor/github.com/mgechev/revive/rule/dot-imports.go @@ -1,16 +1,23 @@ package rule import ( + "fmt" "go/ast" + "sync" "github.com/mgechev/revive/lint" ) // DotImportsRule lints given else constructs. -type DotImportsRule struct{} +type DotImportsRule struct { + sync.Mutex + allowedPackages allowPackages +} // Apply applies the rule to given file. -func (*DotImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *DotImportsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + var failures []lint.Failure fileAst := file.AST @@ -20,6 +27,7 @@ func (*DotImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, + allowPackages: r.allowedPackages, } ast.Walk(walker, fileAst) @@ -32,16 +40,49 @@ func (*DotImportsRule) Name() string { return "dot-imports" } +func (r *DotImportsRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.allowedPackages != nil { + return + } + + r.allowedPackages = make(allowPackages) + if len(arguments) == 0 { + return + } + + args, ok := arguments[0].(map[string]any) + if !ok { + panic(fmt.Sprintf("Invalid argument to the dot-imports rule. Expecting a k,v map, got %T", arguments[0])) + } + + if allowedPkgArg, ok := args["allowedPackages"]; ok { + if pkgs, ok := allowedPkgArg.([]any); ok { + for _, p := range pkgs { + if pkg, ok := p.(string); ok { + r.allowedPackages.add(pkg) + } else { + panic(fmt.Sprintf("Invalid argument to the dot-imports rule, string expected. Got '%v' (%T)", p, p)) + } + } + } else { + panic(fmt.Sprintf("Invalid argument to the dot-imports rule, []string expected. Got '%v' (%T)", allowedPkgArg, allowedPkgArg)) + } + } +} + type lintImports struct { - file *lint.File - fileAst *ast.File - onFailure func(lint.Failure) + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) + allowPackages allowPackages } func (w lintImports) Visit(_ ast.Node) ast.Visitor { - for i, is := range w.fileAst.Imports { - _ = i - if is.Name != nil && is.Name.Name == "." && !w.file.IsTest() { + for _, is := range w.fileAst.Imports { + if is.Name != nil && is.Name.Name == "." && !w.allowPackages.isAllowedPackage(is.Path.Value) { w.onFailure(lint.Failure{ Confidence: 1, Failure: "should not use dot imports", @@ -52,3 +93,14 @@ func (w lintImports) Visit(_ ast.Node) ast.Visitor { } return nil } + +type allowPackages map[string]struct{} + +func (ap allowPackages) add(pkg string) { + ap[fmt.Sprintf(`"%s"`, pkg)] = struct{}{} // import path strings are with double quotes +} + +func (ap allowPackages) isAllowedPackage(pkg string) bool { + _, allowed := ap[pkg] + return allowed +} diff --git a/vendor/github.com/mgechev/revive/rule/early-return.go b/vendor/github.com/mgechev/revive/rule/early-return.go index bfbf6717c8..9c04a1dbe9 100644 --- a/vendor/github.com/mgechev/revive/rule/early-return.go +++ b/vendor/github.com/mgechev/revive/rule/early-return.go @@ -1,25 +1,19 @@ package rule import ( - "go/ast" + "fmt" + "github.com/mgechev/revive/internal/ifelse" "github.com/mgechev/revive/lint" ) -// EarlyReturnRule lints given else constructs. +// EarlyReturnRule finds opportunities to reduce nesting by inverting +// the condition of an "if" block. type EarlyReturnRule struct{} // Apply applies the rule to given file. -func (*EarlyReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure - - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - w := lintEarlyReturnRule{onFailure: onFailure} - ast.Walk(w, file.AST) - return failures +func (e *EarlyReturnRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + return ifelse.Apply(e, file.AST, ifelse.TargetIf, args) } // Name returns the rule name. @@ -27,52 +21,31 @@ func (*EarlyReturnRule) Name() string { return "early-return" } -type lintEarlyReturnRule struct { - onFailure func(lint.Failure) -} - -func (w lintEarlyReturnRule) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.IfStmt: - if n.Else == nil { - // no else branch - return w - } - - elseBlock, ok := n.Else.(*ast.BlockStmt) - if !ok { - // is if-else-if - return w - } - - lenElseBlock := len(elseBlock.List) - if lenElseBlock < 1 { - // empty else block, continue (there is another rule that warns on empty blocks) - return w - } +// CheckIfElse evaluates the rule against an ifelse.Chain. +func (*EarlyReturnRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) (failMsg string) { + if !chain.Else.Deviates() { + // this rule only applies if the else-block deviates control flow + return + } - lenThenBlock := len(n.Body.List) - if lenThenBlock < 1 { - // then block is empty thus the stmt can be simplified - w.onFailure(lint.Failure{ - Confidence: 1, - Node: n, - Failure: "if c { } else {... return} can be simplified to if !c { ... return }", - }) + if chain.HasPriorNonDeviating && !chain.If.IsEmpty() { + // if we de-indent this block then a previous branch + // might flow into it, affecting program behaviour + return + } - return w - } + if chain.If.Deviates() { + // avoid overlapping with superfluous-else + return + } - _, lastThenStmtIsReturn := n.Body.List[lenThenBlock-1].(*ast.ReturnStmt) - _, lastElseStmtIsReturn := elseBlock.List[lenElseBlock-1].(*ast.ReturnStmt) - if lastElseStmtIsReturn && !lastThenStmtIsReturn { - w.onFailure(lint.Failure{ - Confidence: 1, - Node: n, - Failure: "if c {...} else {... return } can be simplified to if !c { ... return } ...", - }) - } + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.If.HasDecls) { + // avoid increasing variable scope + return } - return w + if chain.If.IsEmpty() { + return fmt.Sprintf("if c { } else { %[1]v } can be simplified to if !c { %[1]v }", chain.Else) + } + return fmt.Sprintf("if c { ... } else { %[1]v } can be simplified to if !c { %[1]v } ...", chain.Else) } diff --git a/vendor/github.com/mgechev/revive/rule/empty-block.go b/vendor/github.com/mgechev/revive/rule/empty-block.go index 8a4a0fef19..25a052a0ef 100644 --- a/vendor/github.com/mgechev/revive/rule/empty-block.go +++ b/vendor/github.com/mgechev/revive/rule/empty-block.go @@ -40,6 +40,16 @@ func (w lintEmptyBlock) Visit(node ast.Node) ast.Visitor { case *ast.FuncLit: w.ignore[n.Body] = true return w + case *ast.SelectStmt: + w.ignore[n.Body] = true + return w + case *ast.ForStmt: + if len(n.Body.List) == 0 && n.Init == nil && n.Post == nil && n.Cond != nil { + if _, isCall := n.Cond.(*ast.CallExpr); isCall { + w.ignore[n.Body] = true + return w + } + } case *ast.RangeStmt: if len(n.Body.List) == 0 { w.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/enforce-map-style.go b/vendor/github.com/mgechev/revive/rule/enforce-map-style.go new file mode 100644 index 0000000000..36ac2374c2 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/enforce-map-style.go @@ -0,0 +1,164 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +type enforceMapStyleType string + +const ( + enforceMapStyleTypeAny enforceMapStyleType = "any" + enforceMapStyleTypeMake enforceMapStyleType = "make" + enforceMapStyleTypeLiteral enforceMapStyleType = "literal" +) + +func mapStyleFromString(s string) (enforceMapStyleType, error) { + switch s { + case string(enforceMapStyleTypeAny), "": + return enforceMapStyleTypeAny, nil + case string(enforceMapStyleTypeMake): + return enforceMapStyleTypeMake, nil + case string(enforceMapStyleTypeLiteral): + return enforceMapStyleTypeLiteral, nil + default: + return enforceMapStyleTypeAny, fmt.Errorf( + "invalid map style: %s (expecting one of %v)", + s, + []enforceMapStyleType{ + enforceMapStyleTypeAny, + enforceMapStyleTypeMake, + enforceMapStyleTypeLiteral, + }, + ) + } +} + +// EnforceMapStyleRule implements a rule to enforce `make(map[type]type)` over `map[type]type{}`. +type EnforceMapStyleRule struct { + configured bool + enforceMapStyle enforceMapStyleType + sync.Mutex +} + +func (r *EnforceMapStyleRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + if len(arguments) < 1 { + r.enforceMapStyle = enforceMapStyleTypeAny + return + } + + enforceMapStyle, ok := arguments[0].(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for 'enforce-map-style' rule. Expecting string, got %T", arguments[0], arguments[0])) + } + + var err error + r.enforceMapStyle, err = mapStyleFromString(enforceMapStyle) + + if err != nil { + panic(fmt.Sprintf("Invalid argument to the enforce-map-style rule: %v", err)) + } +} + +// Apply applies the rule to given file. +func (r *EnforceMapStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + if r.enforceMapStyle == enforceMapStyleTypeAny { + // this linter is not configured + return nil + } + + var failures []lint.Failure + + astFile := file.AST + ast.Inspect(astFile, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.CompositeLit: + if r.enforceMapStyle != enforceMapStyleTypeMake { + return true + } + + if !r.isMapType(v.Type) { + return true + } + + if len(v.Elts) > 0 { + // not an empty map + return true + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v, + Category: "style", + Failure: "use make(map[type]type) instead of map[type]type{}", + }) + case *ast.CallExpr: + if r.enforceMapStyle != enforceMapStyleTypeLiteral { + // skip any function calls, even if it's make(map[type]type) + // we don't want to report it if literals are not enforced + return true + } + + ident, ok := v.Fun.(*ast.Ident) + if !ok || ident.Name != "make" { + return true + } + + if len(v.Args) != 1 { + // skip make(map[type]type, size) and invalid empty declarations + return true + } + + if !r.isMapType(v.Args[0]) { + // not a map type + return true + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v.Args[0], + Category: "style", + Failure: "use map[type]type{} instead of make(map[type]type)", + }) + } + return true + }) + + return failures +} + +// Name returns the rule name. +func (*EnforceMapStyleRule) Name() string { + return "enforce-map-style" +} + +func (r *EnforceMapStyleRule) isMapType(v ast.Expr) bool { + switch t := v.(type) { + case *ast.MapType: + return true + case *ast.Ident: + if t.Obj == nil { + return false + } + typeSpec, ok := t.Obj.Decl.(*ast.TypeSpec) + if !ok { + return false + } + return r.isMapType(typeSpec.Type) + default: + return false + } +} diff --git a/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go b/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go new file mode 100644 index 0000000000..067082b1b0 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go @@ -0,0 +1,191 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/types" + "sync" + + "github.com/mgechev/revive/lint" +) + +type enforceRepeatedArgTypeStyleType string + +const ( + enforceRepeatedArgTypeStyleTypeAny enforceRepeatedArgTypeStyleType = "any" + enforceRepeatedArgTypeStyleTypeShort enforceRepeatedArgTypeStyleType = "short" + enforceRepeatedArgTypeStyleTypeFull enforceRepeatedArgTypeStyleType = "full" +) + +func repeatedArgTypeStyleFromString(s string) enforceRepeatedArgTypeStyleType { + switch s { + case string(enforceRepeatedArgTypeStyleTypeAny), "": + return enforceRepeatedArgTypeStyleTypeAny + case string(enforceRepeatedArgTypeStyleTypeShort): + return enforceRepeatedArgTypeStyleTypeShort + case string(enforceRepeatedArgTypeStyleTypeFull): + return enforceRepeatedArgTypeStyleTypeFull + default: + err := fmt.Errorf( + "invalid repeated arg type style: %s (expecting one of %v)", + s, + []enforceRepeatedArgTypeStyleType{ + enforceRepeatedArgTypeStyleTypeAny, + enforceRepeatedArgTypeStyleTypeShort, + enforceRepeatedArgTypeStyleTypeFull, + }, + ) + + panic(fmt.Sprintf("Invalid argument to the enforce-repeated-arg-type-style rule: %v", err)) + } +} + +// EnforceRepeatedArgTypeStyleRule implements a rule to enforce repeated argument type style. +type EnforceRepeatedArgTypeStyleRule struct { + configured bool + funcArgStyle enforceRepeatedArgTypeStyleType + funcRetValStyle enforceRepeatedArgTypeStyleType + + sync.Mutex +} + +func (r *EnforceRepeatedArgTypeStyleRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + r.funcArgStyle = enforceRepeatedArgTypeStyleTypeAny + r.funcRetValStyle = enforceRepeatedArgTypeStyleTypeAny + + if len(arguments) == 0 { + return + } + + switch funcArgStyle := arguments[0].(type) { + case string: + r.funcArgStyle = repeatedArgTypeStyleFromString(funcArgStyle) + r.funcRetValStyle = repeatedArgTypeStyleFromString(funcArgStyle) + case map[string]any: // expecting map[string]string + for k, v := range funcArgStyle { + switch k { + case "funcArgStyle": + val, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid map value type for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v)) + } + r.funcArgStyle = repeatedArgTypeStyleFromString(val) + case "funcRetValStyle": + val, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid map value '%v' for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v, v)) + } + r.funcRetValStyle = repeatedArgTypeStyleFromString(val) + default: + panic(fmt.Sprintf("Invalid map key for 'enforce-repeated-arg-type-style' rule. Expecting 'funcArgStyle' or 'funcRetValStyle', got %v", k)) + } + } + default: + panic(fmt.Sprintf("Invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0])) + } +} + +// Apply applies the rule to a given file. +func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeAny && r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeAny { + // This linter is not configured, return no failures. + return nil + } + + var failures []lint.Failure + + err := file.Pkg.TypeCheck() + if err != nil { + // the file has other issues + return nil + } + typesInfo := file.Pkg.TypesInfo() + + astFile := file.AST + ast.Inspect(astFile, func(n ast.Node) bool { + switch fn := n.(type) { + case *ast.FuncDecl: + if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeFull { + if fn.Type.Params != nil { + for _, field := range fn.Type.Params.List { + if len(field.Names) > 1 { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "argument types should not be omitted", + }) + } + } + } + } + + if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeShort { + var prevType ast.Expr + if fn.Type.Params != nil { + for _, field := range fn.Type.Params.List { + if types.Identical(typesInfo.Types[field.Type].Type, typesInfo.Types[prevType].Type) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "repeated argument type can be omitted", + }) + } + prevType = field.Type + } + } + } + + if r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeFull { + if fn.Type.Results != nil { + for _, field := range fn.Type.Results.List { + if len(field.Names) > 1 { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "return types should not be omitted", + }) + } + } + } + } + + if r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeShort { + var prevType ast.Expr + if fn.Type.Results != nil { + for _, field := range fn.Type.Results.List { + if field.Names != nil && types.Identical(typesInfo.Types[field.Type].Type, typesInfo.Types[prevType].Type) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "repeated return type can be omitted", + }) + } + prevType = field.Type + } + } + } + } + return true + }) + + return failures +} + +// Name returns the name of the linter rule. +func (*EnforceRepeatedArgTypeStyleRule) Name() string { + return "enforce-repeated-arg-type-style" +} diff --git a/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go b/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go new file mode 100644 index 0000000000..abaf20be0e --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go @@ -0,0 +1,193 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +type enforceSliceStyleType string + +const ( + enforceSliceStyleTypeAny enforceSliceStyleType = "any" + enforceSliceStyleTypeMake enforceSliceStyleType = "make" + enforceSliceStyleTypeLiteral enforceSliceStyleType = "literal" +) + +func sliceStyleFromString(s string) (enforceSliceStyleType, error) { + switch s { + case string(enforceSliceStyleTypeAny), "": + return enforceSliceStyleTypeAny, nil + case string(enforceSliceStyleTypeMake): + return enforceSliceStyleTypeMake, nil + case string(enforceSliceStyleTypeLiteral): + return enforceSliceStyleTypeLiteral, nil + default: + return enforceSliceStyleTypeAny, fmt.Errorf( + "invalid slice style: %s (expecting one of %v)", + s, + []enforceSliceStyleType{ + enforceSliceStyleTypeAny, + enforceSliceStyleTypeMake, + enforceSliceStyleTypeLiteral, + }, + ) + } +} + +// EnforceSliceStyleRule implements a rule to enforce `make([]type)` over `[]type{}`. +type EnforceSliceStyleRule struct { + configured bool + enforceSliceStyle enforceSliceStyleType + sync.Mutex +} + +func (r *EnforceSliceStyleRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + if len(arguments) < 1 { + r.enforceSliceStyle = enforceSliceStyleTypeAny + return + } + + enforceSliceStyle, ok := arguments[0].(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for 'enforce-slice-style' rule. Expecting string, got %T", arguments[0], arguments[0])) + } + + var err error + r.enforceSliceStyle, err = sliceStyleFromString(enforceSliceStyle) + + if err != nil { + panic(fmt.Sprintf("Invalid argument to the enforce-slice-style rule: %v", err)) + } +} + +// Apply applies the rule to given file. +func (r *EnforceSliceStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + if r.enforceSliceStyle == enforceSliceStyleTypeAny { + // this linter is not configured + return nil + } + + var failures []lint.Failure + + astFile := file.AST + ast.Inspect(astFile, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.CompositeLit: + if r.enforceSliceStyle != enforceSliceStyleTypeMake { + return true + } + + if !r.isSliceType(v.Type) { + return true + } + + if len(v.Elts) > 0 { + // not an empty slice + return true + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v, + Category: "style", + Failure: "use make([]type) instead of []type{} (or declare nil slice)", + }) + case *ast.CallExpr: + if r.enforceSliceStyle != enforceSliceStyleTypeLiteral { + // skip any function calls, even if it's make([]type) + // we don't want to report it if literals are not enforced + return true + } + + ident, ok := v.Fun.(*ast.Ident) + if !ok || ident.Name != "make" { + return true + } + + if len(v.Args) < 2 { + // skip invalid make declarations + return true + } + + if !r.isSliceType(v.Args[0]) { + // not a slice type + return true + } + + arg, ok := v.Args[1].(*ast.BasicLit) + if !ok { + // skip invalid make declarations + return true + } + + if arg.Value != "0" { + // skip slice with non-zero size + return true + } + + if len(v.Args) > 2 { + arg, ok := v.Args[2].(*ast.BasicLit) + if !ok { + // skip invalid make declarations + return true + } + + if arg.Value != "0" { + // skip non-zero capacity slice + return true + } + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v.Args[0], + Category: "style", + Failure: "use []type{} instead of make([]type, 0) (or declare nil slice)", + }) + } + return true + }) + + return failures +} + +// Name returns the rule name. +func (*EnforceSliceStyleRule) Name() string { + return "enforce-slice-style" +} + +func (r *EnforceSliceStyleRule) isSliceType(v ast.Expr) bool { + switch t := v.(type) { + case *ast.ArrayType: + if t.Len != nil { + // array + return false + } + // slice + return true + case *ast.Ident: + if t.Obj == nil { + return false + } + typeSpec, ok := t.Obj.Decl.(*ast.TypeSpec) + if !ok { + return false + } + return r.isSliceType(typeSpec.Type) + default: + return false + } +} diff --git a/vendor/github.com/mgechev/revive/rule/file-header.go b/vendor/github.com/mgechev/revive/rule/file-header.go index 76f548f51f..a7d69ff2b1 100644 --- a/vendor/github.com/mgechev/revive/rule/file-header.go +++ b/vendor/github.com/mgechev/revive/rule/file-header.go @@ -21,21 +21,28 @@ var ( func (r *FileHeaderRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.header == "" { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + return + } + var ok bool r.header, ok = arguments[0].(string) if !ok { - panic(fmt.Sprintf("invalid argument for \"file-header\" rule: first argument should be a string, got %T", arguments[0])) + panic(fmt.Sprintf("invalid argument for \"file-header\" rule: argument should be a string, got %T", arguments[0])) } } - r.Unlock() } // Apply applies the rule to given file. func (r *FileHeaderRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { r.configure(arguments) + if r.header == "" { + return nil + } + failure := []lint.Failure{ { Node: file.AST, diff --git a/vendor/github.com/mgechev/revive/rule/flag-param.go b/vendor/github.com/mgechev/revive/rule/flag-param.go index 19a05f9fea..f9bfb712c4 100644 --- a/vendor/github.com/mgechev/revive/rule/flag-param.go +++ b/vendor/github.com/mgechev/revive/rule/flag-param.go @@ -88,7 +88,7 @@ func (w conditionVisitor) Visit(node ast.Node) ast.Visitor { return false } - uses := pick(ifStmt.Cond, fselect, nil) + uses := pick(ifStmt.Cond, fselect) if len(uses) < 1 { return w diff --git a/vendor/github.com/mgechev/revive/rule/function-length.go b/vendor/github.com/mgechev/revive/rule/function-length.go index d600d7a2a1..fd65884e97 100644 --- a/vendor/github.com/mgechev/revive/rule/function-length.go +++ b/vendor/github.com/mgechev/revive/rule/function-length.go @@ -19,13 +19,13 @@ type FunctionLength struct { func (r *FunctionLength) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if !r.configured { maxStmt, maxLines := r.parseArguments(arguments) r.maxStmt = int(maxStmt) r.maxLines = int(maxLines) r.configured = true } - r.Unlock() } // Apply applies the rule to given file. @@ -53,7 +53,14 @@ func (*FunctionLength) Name() string { return "function-length" } +const defaultFuncStmtsLimit = 50 +const defaultFuncLinesLimit = 75 + func (*FunctionLength) parseArguments(arguments lint.Arguments) (maxStmt, maxLines int64) { + if len(arguments) == 0 { + return defaultFuncStmtsLimit, defaultFuncLinesLimit + } + if len(arguments) != 2 { panic(fmt.Sprintf(`invalid configuration for "function-length" rule, expected 2 arguments but got %d`, len(arguments))) } @@ -164,7 +171,7 @@ func (w lintFuncLength) countFuncLitStmts(stmt ast.Expr) int { return 0 } -func (w lintFuncLength) countBodyListStmts(t interface{}) int { +func (w lintFuncLength) countBodyListStmts(t any) int { i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() return w.countStmts(i.([]ast.Stmt)) } diff --git a/vendor/github.com/mgechev/revive/rule/function-result-limit.go b/vendor/github.com/mgechev/revive/rule/function-result-limit.go index 5d2b87316a..6a0748011d 100644 --- a/vendor/github.com/mgechev/revive/rule/function-result-limit.go +++ b/vendor/github.com/mgechev/revive/rule/function-result-limit.go @@ -14,11 +14,16 @@ type FunctionResultsLimitRule struct { sync.Mutex } +const defaultResultsLimit = 3 + func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.max == 0 { - checkNumberOfArguments(1, arguments, r.Name()) - + if len(arguments) < 1 { + r.max = defaultResultsLimit + return + } max, ok := arguments[0].(int64) // Alt. non panicking version if !ok { panic(fmt.Sprintf(`invalid value passed as return results number to the "function-result-limit" rule; need int64 but got %T`, arguments[0])) @@ -28,7 +33,6 @@ func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { } r.max = int(max) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/identical-branches.go b/vendor/github.com/mgechev/revive/rule/identical-branches.go index b1a69097f6..9222c8a9c5 100644 --- a/vendor/github.com/mgechev/revive/rule/identical-branches.go +++ b/vendor/github.com/mgechev/revive/rule/identical-branches.go @@ -63,8 +63,10 @@ func (lintIdenticalBranches) identicalBranches(branches []*ast.BlockStmt) bool { } ref := gofmt(branches[0]) + refSize := len(branches[0].List) for i := 1; i < len(branches); i++ { - if gofmt(branches[i]) != ref { + currentSize := len(branches[i].List) + if currentSize != refSize || gofmt(branches[i]) != ref { return false } } diff --git a/vendor/github.com/mgechev/revive/rule/import-alias-naming.go b/vendor/github.com/mgechev/revive/rule/import-alias-naming.go new file mode 100644 index 0000000000..a6d096c8b2 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/import-alias-naming.go @@ -0,0 +1,126 @@ +package rule + +import ( + "fmt" + "regexp" + "sync" + + "github.com/mgechev/revive/lint" +) + +// ImportAliasNamingRule lints import alias naming. +type ImportAliasNamingRule struct { + configured bool + allowRegexp *regexp.Regexp + denyRegexp *regexp.Regexp + sync.Mutex +} + +const defaultImportAliasNamingAllowRule = "^[a-z][a-z0-9]{0,}$" + +var defaultImportAliasNamingAllowRegexp = regexp.MustCompile(defaultImportAliasNamingAllowRule) + +func (r *ImportAliasNamingRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + if r.configured { + return + } + + if len(arguments) == 0 { + r.allowRegexp = defaultImportAliasNamingAllowRegexp + return + } + + switch namingRule := arguments[0].(type) { + case string: + r.setAllowRule(namingRule) + case map[string]any: // expecting map[string]string + for k, v := range namingRule { + switch k { + case "allowRegex": + r.setAllowRule(v) + case "denyRegex": + r.setDenyRule(v) + default: + panic(fmt.Sprintf("Invalid map key for 'import-alias-naming' rule. Expecting 'allowRegex' or 'denyRegex', got %v", k)) + } + } + default: + panic(fmt.Sprintf("Invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0])) + } + + if r.allowRegexp == nil && r.denyRegexp == nil { + r.allowRegexp = defaultImportAliasNamingAllowRegexp + } +} + +// Apply applies the rule to given file. +func (r *ImportAliasNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + var failures []lint.Failure + + for _, is := range file.AST.Imports { + path := is.Path + if path == nil { + continue + } + + alias := is.Name + if alias == nil || alias.Name == "_" || alias.Name == "." { // "_" and "." are special types of import aiases and should be processed by another linter rule + continue + } + + if r.allowRegexp != nil && !r.allowRegexp.MatchString(alias.Name) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("import name (%s) must match the regular expression: %s", alias.Name, r.allowRegexp.String()), + Node: alias, + Category: "imports", + }) + } + + if r.denyRegexp != nil && r.denyRegexp.MatchString(alias.Name) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("import name (%s) must NOT match the regular expression: %s", alias.Name, r.denyRegexp.String()), + Node: alias, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (*ImportAliasNamingRule) Name() string { + return "import-alias-naming" +} + +func (r *ImportAliasNamingRule) setAllowRule(value any) { + namingRule, ok := value.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for import-alias-naming allowRegexp rule. Expecting string, got %T", value, value)) + } + + namingRuleRegexp, err := regexp.Compile(namingRule) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the import-alias-naming allowRegexp rule. Expecting %q to be a valid regular expression, got: %v", namingRule, err)) + } + r.allowRegexp = namingRuleRegexp +} + +func (r *ImportAliasNamingRule) setDenyRule(value any) { + namingRule, ok := value.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for import-alias-naming denyRegexp rule. Expecting string, got %T", value, value)) + } + + namingRuleRegexp, err := regexp.Compile(namingRule) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the import-alias-naming denyRegexp rule. Expecting %q to be a valid regular expression, got: %v", namingRule, err)) + } + r.denyRegexp = namingRuleRegexp +} diff --git a/vendor/github.com/mgechev/revive/rule/import-shadowing.go b/vendor/github.com/mgechev/revive/rule/import-shadowing.go index 2bab704d02..046aeb688e 100644 --- a/vendor/github.com/mgechev/revive/rule/import-shadowing.go +++ b/vendor/github.com/mgechev/revive/rule/import-shadowing.go @@ -29,6 +29,7 @@ func (*ImportShadowingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Fail failures = append(failures, failure) }, alreadySeen: map[*ast.Object]struct{}{}, + skipIdents: map[*ast.Ident]struct{}{}, } ast.Walk(walker, fileAst) @@ -62,6 +63,7 @@ type importShadowing struct { importNames map[string]struct{} onFailure func(lint.Failure) alreadySeen map[*ast.Object]struct{} + skipIdents map[*ast.Ident]struct{} } // Visit visits AST nodes and checks if id nodes (ast.Ident) shadow an import name @@ -80,6 +82,10 @@ func (w importShadowing) Visit(n ast.Node) ast.Visitor { *ast.SelectorExpr, // skip analysis of selector expressions (anId.otherId): because if anId shadows an import name, it was already detected, and otherId does not shadows the import name *ast.StructType: // skip analysis of struct type because struct fields can not shadow an import name return nil + case *ast.FuncDecl: + if n.Recv != nil { + w.skipIdents[n.Name] = struct{}{} + } case *ast.Ident: if n == w.packageNameIdent { return nil // skip the ident corresponding to the package name of this file @@ -92,11 +98,12 @@ func (w importShadowing) Visit(n ast.Node) ast.Visitor { _, isImportName := w.importNames[id] _, alreadySeen := w.alreadySeen[n.Obj] - if isImportName && !alreadySeen { + _, skipIdent := w.skipIdents[n] + if isImportName && !alreadySeen && !skipIdent { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "namming", + Category: "naming", Failure: fmt.Sprintf("The name '%s' shadows an import name", id), }) diff --git a/vendor/github.com/mgechev/revive/rule/imports-blacklist.go b/vendor/github.com/mgechev/revive/rule/imports-blacklist.go deleted file mode 100644 index 7106628155..0000000000 --- a/vendor/github.com/mgechev/revive/rule/imports-blacklist.go +++ /dev/null @@ -1,77 +0,0 @@ -package rule - -import ( - "fmt" - "regexp" - "sync" - - "github.com/mgechev/revive/lint" -) - -// ImportsBlacklistRule lints given else constructs. -type ImportsBlacklistRule struct { - blacklist []*regexp.Regexp - sync.Mutex -} - -var replaceRegexp = regexp.MustCompile(`/?\*\*/?`) - -func (r *ImportsBlacklistRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.blacklist == nil { - r.blacklist = make([]*regexp.Regexp, 0) - - for _, arg := range arguments { - argStr, ok := arg.(string) - if !ok { - panic(fmt.Sprintf("Invalid argument to the imports-blacklist rule. Expecting a string, got %T", arg)) - } - regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) - if err != nil { - panic(fmt.Sprintf("Invalid argument to the imports-blacklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) - } - r.blacklist = append(r.blacklist, regStr) - } - } -} - -func (r *ImportsBlacklistRule) isBlacklisted(path string) bool { - for _, regex := range r.blacklist { - if regex.MatchString(path) { - return true - } - } - return false -} - -// Apply applies the rule to given file. -func (r *ImportsBlacklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) - - var failures []lint.Failure - - if file.IsTest() { - return failures // skip, test file - } - - for _, is := range file.AST.Imports { - path := is.Path - if path != nil && r.isBlacklisted(path.Value) { - failures = append(failures, lint.Failure{ - Confidence: 1, - Failure: "should not use the following blacklisted import: " + path.Value, - Node: is, - Category: "imports", - }) - } - } - - return failures -} - -// Name returns the rule name. -func (*ImportsBlacklistRule) Name() string { - return "imports-blacklist" -} diff --git a/vendor/github.com/mgechev/revive/rule/imports-blocklist.go b/vendor/github.com/mgechev/revive/rule/imports-blocklist.go new file mode 100644 index 0000000000..431066403a --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/imports-blocklist.go @@ -0,0 +1,73 @@ +package rule + +import ( + "fmt" + "regexp" + "sync" + + "github.com/mgechev/revive/lint" +) + +// ImportsBlocklistRule lints given else constructs. +type ImportsBlocklistRule struct { + blocklist []*regexp.Regexp + sync.Mutex +} + +var replaceImportRegexp = regexp.MustCompile(`/?\*\*/?`) + +func (r *ImportsBlocklistRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.blocklist == nil { + r.blocklist = make([]*regexp.Regexp, 0) + + for _, arg := range arguments { + argStr, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting a string, got %T", arg)) + } + regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceImportRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) + } + r.blocklist = append(r.blocklist, regStr) + } + } +} + +func (r *ImportsBlocklistRule) isBlocklisted(path string) bool { + for _, regex := range r.blocklist { + if regex.MatchString(path) { + return true + } + } + return false +} + +// Apply applies the rule to given file. +func (r *ImportsBlocklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + var failures []lint.Failure + + for _, is := range file.AST.Imports { + path := is.Path + if path != nil && r.isBlocklisted(path.Value) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: "should not use the following blocklisted import: " + path.Value, + Node: is, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (*ImportsBlocklistRule) Name() string { + return "imports-blocklist" +} diff --git a/vendor/github.com/mgechev/revive/rule/indent-error-flow.go b/vendor/github.com/mgechev/revive/rule/indent-error-flow.go index e455801c47..294ceef842 100644 --- a/vendor/github.com/mgechev/revive/rule/indent-error-flow.go +++ b/vendor/github.com/mgechev/revive/rule/indent-error-flow.go @@ -1,9 +1,7 @@ package rule import ( - "go/ast" - "go/token" - + "github.com/mgechev/revive/internal/ifelse" "github.com/mgechev/revive/lint" ) @@ -11,16 +9,8 @@ import ( type IndentErrorFlowRule struct{} // Apply applies the rule to given file. -func (*IndentErrorFlowRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure - - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - w := lintElse{make(map[*ast.IfStmt]bool), onFailure} - ast.Walk(w, file.AST) - return failures +func (e *IndentErrorFlowRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + return ifelse.Apply(e, file.AST, ifelse.TargetElse, args) } // Name returns the rule name. @@ -28,51 +18,28 @@ func (*IndentErrorFlowRule) Name() string { return "indent-error-flow" } -type lintElse struct { - ignore map[*ast.IfStmt]bool - onFailure func(lint.Failure) -} - -func (w lintElse) Visit(node ast.Node) ast.Visitor { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return w - } - if w.ignore[ifStmt] { - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - } - return w +// CheckIfElse evaluates the rule against an ifelse.Chain. +func (*IndentErrorFlowRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) (failMsg string) { + if !chain.If.Deviates() { + // this rule only applies if the if-block deviates control flow + return } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - return w - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return w - } - if len(ifStmt.Body.List) == 0 { - return w + + if chain.HasPriorNonDeviating { + // if we de-indent the "else" block then a previous branch + // might flow into it, affecting program behaviour + return } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } + + if !chain.If.Returns() { + // avoid overlapping with superfluous-else + return } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - if _, ok := lastStmt.(*ast.ReturnStmt); ok { - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" - } - w.onFailure(lint.Failure{ - Confidence: 1, - Node: ifStmt.Else, - Category: "indent", - Failure: "if block ends with a return statement, so drop this else and outdent its block" + extra, - }) + + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls) { + // avoid increasing variable scope + return } - return w + + return "if block ends with a return statement, so drop this else and outdent its block" } diff --git a/vendor/github.com/mgechev/revive/rule/line-length-limit.go b/vendor/github.com/mgechev/revive/rule/line-length-limit.go index 9e512c1c2c..1a414f6914 100644 --- a/vendor/github.com/mgechev/revive/rule/line-length-limit.go +++ b/vendor/github.com/mgechev/revive/rule/line-length-limit.go @@ -18,10 +18,16 @@ type LineLengthLimitRule struct { sync.Mutex } +const defaultLineLengthLimit = 80 + func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.max == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + r.max = defaultLineLengthLimit + return + } max, ok := arguments[0].(int64) // Alt. non panicking version if !ok || max < 0 { @@ -30,7 +36,6 @@ func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { r.max = int(max) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/max-control-nesting.go b/vendor/github.com/mgechev/revive/rule/max-control-nesting.go new file mode 100644 index 0000000000..c4eb361937 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/max-control-nesting.go @@ -0,0 +1,128 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +// MaxControlNestingRule lints given else constructs. +type MaxControlNestingRule struct { + max int64 + sync.Mutex +} + +const defaultMaxControlNesting = 5 + +// Apply applies the rule to given file. +func (r *MaxControlNestingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + var failures []lint.Failure + + fileAst := file.AST + + walker := &lintMaxControlNesting{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + max: int(r.max), + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (*MaxControlNestingRule) Name() string { + return "max-control-nesting" +} + +type lintMaxControlNesting struct { + max int + onFailure func(lint.Failure) + nestingLevelAcc int + lastCtrlStmt ast.Node +} + +func (w *lintMaxControlNesting) Visit(n ast.Node) ast.Visitor { + if w.nestingLevelAcc > w.max { // we are visiting a node beyond the max nesting level + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("control flow nesting exceeds %d", w.max), + Confidence: 1, + Node: w.lastCtrlStmt, + Category: "complexity", + }) + return nil // stop visiting deeper + } + + switch v := n.(type) { + case *ast.IfStmt: + w.lastCtrlStmt = v + w.walkControlledBlock(v.Body) // "then" branch block + if v.Else != nil { + w.walkControlledBlock(v.Else) // "else" branch block + } + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.ForStmt: + w.lastCtrlStmt = v + w.walkControlledBlock(v.Body) + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.CaseClause: // switch case + w.lastCtrlStmt = v + for _, s := range v.Body { // visit each statement in the case clause + w.walkControlledBlock(s) + } + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.CommClause: // select case + w.lastCtrlStmt = v + for _, s := range v.Body { // visit each statement in the select case clause + w.walkControlledBlock(s) + } + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.FuncLit: + walker := &lintMaxControlNesting{ + onFailure: w.onFailure, + max: w.max, + } + ast.Walk(walker, v.Body) + return nil + } + + return w +} + +func (w *lintMaxControlNesting) walkControlledBlock(b ast.Node) { + oldNestingLevel := w.nestingLevelAcc + w.nestingLevelAcc++ + ast.Walk(w, b) + w.nestingLevelAcc = oldNestingLevel +} + +func (r *MaxControlNestingRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + if !(r.max < 1) { + return // max already set + } + + if len(arguments) < 1 { + r.max = defaultMaxControlNesting + return + } + + checkNumberOfArguments(1, arguments, r.Name()) + + max, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + panic(`invalid value passed as argument number to the "max-control-nesting" rule`) + } + r.max = max +} diff --git a/vendor/github.com/mgechev/revive/rule/max-public-structs.go b/vendor/github.com/mgechev/revive/rule/max-public-structs.go index e39f49c698..25be3e676f 100644 --- a/vendor/github.com/mgechev/revive/rule/max-public-structs.go +++ b/vendor/github.com/mgechev/revive/rule/max-public-structs.go @@ -14,9 +14,17 @@ type MaxPublicStructsRule struct { sync.Mutex } +const defaultMaxPublicStructs = 5 + func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.max < 1 { + if len(arguments) < 1 { + r.max = defaultMaxPublicStructs + return + } + checkNumberOfArguments(1, arguments, r.Name()) max, ok := arguments[0].(int64) // Alt. non panicking version @@ -25,7 +33,6 @@ func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { } r.max = max } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go b/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go index 34e6515570..e9e64b9a6a 100644 --- a/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go +++ b/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go @@ -78,11 +78,6 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { if name == "" || name != receiverName { continue } - - if w.skipType(ast.Expr(e.Sel)) { - continue - } - case *ast.Ident: // receiver := ... if e.Name != receiverName { continue @@ -97,7 +92,7 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { return false } - assignmentsToReceiver := pick(n.Body, fselect, nil) + assignmentsToReceiver := pick(n.Body, fselect) for _, assignment := range assignmentsToReceiver { w.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/nested-structs.go b/vendor/github.com/mgechev/revive/rule/nested-structs.go index 968511f2ec..147bd482b1 100644 --- a/vendor/github.com/mgechev/revive/rule/nested-structs.go +++ b/vendor/github.com/mgechev/revive/rule/nested-structs.go @@ -14,7 +14,6 @@ func (*NestedStructs) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure walker := &lintNestedStructs{ - fileAST: file.AST, onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, @@ -31,42 +30,46 @@ func (*NestedStructs) Name() string { } type lintNestedStructs struct { - fileAST *ast.File onFailure func(lint.Failure) } func (l *lintNestedStructs) Visit(n ast.Node) ast.Visitor { - switch v := n.(type) { - case *ast.FuncDecl: - if v.Body != nil { - ast.Walk(l, v.Body) - } - return nil - case *ast.Field: - _, isChannelField := v.Type.(*ast.ChanType) - if isChannelField { - return nil - } + if v, ok := n.(*ast.StructType); ok { + ls := &lintStruct{l.onFailure} + ast.Walk(ls, v.Fields) + } - filter := func(n ast.Node) bool { - switch n.(type) { - case *ast.StructType: - return true - default: - return false - } - } - structs := pick(v, filter, nil) - for _, s := range structs { - l.onFailure(lint.Failure{ - Failure: "no nested structs are allowed", - Category: "style", - Node: s, - Confidence: 1, - }) + return l +} + +type lintStruct struct { + onFailure func(lint.Failure) +} + +func (l *lintStruct) Visit(n ast.Node) ast.Visitor { + switch s := n.(type) { + case *ast.StructType: + l.fail(s) + return nil + case *ast.ArrayType: + if _, ok := s.Elt.(*ast.StructType); ok { + l.fail(s) } - return nil // no need to visit (again) the field + return nil + case *ast.ChanType: + return nil + case *ast.MapType: + return nil + default: + return l } +} - return l +func (l *lintStruct) fail(n ast.Node) { + l.onFailure(lint.Failure{ + Failure: "no nested structs are allowed", + Category: "style", + Node: n, + Confidence: 1, + }) } diff --git a/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go b/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go index 88928bb98c..841bde56c0 100644 --- a/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go +++ b/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go @@ -54,13 +54,13 @@ func (w lintOptimizeOperandsOrderlExpr) Visit(node ast.Node) ast.Visitor { } // check if the left sub-expression contains a function call - nodes := pick(binExpr.X, isCaller, nil) + nodes := pick(binExpr.X, isCaller) if len(nodes) < 1 { return w } // check if the right sub-expression does not contain a function call - nodes = pick(binExpr.Y, isCaller, nil) + nodes = pick(binExpr.Y, isCaller) if len(nodes) > 0 { return w } diff --git a/vendor/github.com/mgechev/revive/rule/package-comments.go b/vendor/github.com/mgechev/revive/rule/package-comments.go index 33963ab976..02f246be08 100644 --- a/vendor/github.com/mgechev/revive/rule/package-comments.go +++ b/vendor/github.com/mgechev/revive/rule/package-comments.go @@ -58,12 +58,14 @@ func (l *lintPackageComments) checkPackageComment() []lint.Failure { var packageFile *ast.File // which name is $package.go var firstFile *ast.File var firstFileName string + var fileSource string for name, file := range l.file.Pkg.Files() { if file.AST.Doc != nil { return nil } if name == "doc.go" { docFile = file.AST + fileSource = "doc.go" } if name == file.AST.Name.String()+".go" { packageFile = file.AST @@ -76,14 +78,21 @@ func (l *lintPackageComments) checkPackageComment() []lint.Failure { // prefer warning on doc.go, $package.go over first file if docFile == nil { docFile = packageFile + fileSource = l.fileAst.Name.String() + ".go" } if docFile == nil { docFile = firstFile + fileSource = firstFileName } + if docFile != nil { + pkgFile := l.file.Pkg.Files()[fileSource] return []lint.Failure{{ - Category: "comments", - Node: docFile, + Category: "comments", + Position: lint.FailurePosition{ + Start: pkgFile.ToPosition(docFile.Pos()), + End: pkgFile.ToPosition(docFile.Name.End()), + }, Confidence: 1, Failure: "should have a package comment", }} diff --git a/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go b/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go index 23dd85a7ac..b3ff084563 100644 --- a/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go +++ b/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go @@ -89,6 +89,9 @@ func (w *lintRedefinesBuiltinID) Visit(node ast.Node) ast.Visitor { case *ast.GenDecl: switch n.Tok { case token.TYPE: + if len(n.Specs) < 1 { + return nil + } typeSpec, ok := n.Specs[0].(*ast.TypeSpec) if !ok { return nil diff --git a/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go b/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go new file mode 100644 index 0000000000..fa5281f24b --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go @@ -0,0 +1,52 @@ +package rule + +import ( + "fmt" + "go/ast" + "strings" + + "github.com/mgechev/revive/lint" +) + +// RedundantImportAlias lints given else constructs. +type RedundantImportAlias struct{} + +// Apply applies the rule to given file. +func (*RedundantImportAlias) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + for _, imp := range file.AST.Imports { + if imp.Name == nil { + continue + } + + if getImportPackageName(imp) == imp.Name.Name { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("Import alias \"%s\" is redundant", imp.Name.Name), + Node: imp, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (*RedundantImportAlias) Name() string { + return "redundant-import-alias" +} + +func getImportPackageName(imp *ast.ImportSpec) string { + const pathSep = "/" + const strDelim = `"` + + path := imp.Path.Value + i := strings.LastIndex(path, pathSep) + if i == -1 { + return strings.Trim(path, strDelim) + } + + return strings.Trim(path[i+1:], strDelim) +} diff --git a/vendor/github.com/mgechev/revive/rule/string-format.go b/vendor/github.com/mgechev/revive/rule/string-format.go index e7841e8c3d..70edf7387c 100644 --- a/vendor/github.com/mgechev/revive/rule/string-format.go +++ b/vendor/github.com/mgechev/revive/rule/string-format.go @@ -38,7 +38,7 @@ func (*StringFormatRule) Name() string { // ParseArgumentsTest is a public wrapper around w.parseArguments used for testing. Returns the error message provided to panic, or nil if no error was encountered func (StringFormatRule) ParseArgumentsTest(arguments lint.Arguments) *string { w := lintStringFormatRule{} - c := make(chan interface{}) + c := make(chan any) // Parse the arguments in a goroutine, defer a recover() call, return the error encountered (or nil if there was no error) go func() { defer func() { @@ -68,6 +68,7 @@ type stringFormatSubrule struct { parent *lintStringFormatRule scope stringFormatSubruleScope regexp *regexp.Regexp + negated bool errorMessage string } @@ -89,18 +90,19 @@ var parseStringFormatScope = regexp.MustCompile( func (w *lintStringFormatRule) parseArguments(arguments lint.Arguments) { for i, argument := range arguments { - scope, regex, errorMessage := w.parseArgument(argument, i) + scope, regex, negated, errorMessage := w.parseArgument(argument, i) w.rules = append(w.rules, stringFormatSubrule{ parent: w, scope: scope, regexp: regex, + negated: negated, errorMessage: errorMessage, }) } } -func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) (scope stringFormatSubruleScope, regex *regexp.Regexp, errorMessage string) { - g, ok := argument.([]interface{}) // Cast to generic slice first +func (w lintStringFormatRule) parseArgument(argument any, ruleNum int) (scope stringFormatSubruleScope, regex *regexp.Regexp, negated bool, errorMessage string) { + g, ok := argument.([]any) // Cast to generic slice first if !ok { w.configError("argument is not a slice", ruleNum, 0) } @@ -146,7 +148,12 @@ func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) ( } // Strip / characters from the beginning and end of rule[1] before compiling - regex, err := regexp.Compile(rule[1][1 : len(rule[1])-1]) + negated = rule[1][0] == '!' + offset := 1 + if negated { + offset++ + } + regex, err := regexp.Compile(rule[1][offset : len(rule[1])-1]) if err != nil { w.parseError(fmt.Sprintf("unable to compile %s as regexp", rule[1]), ruleNum, 1) } @@ -155,7 +162,7 @@ func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) ( if len(rule) == 3 { errorMessage = rule[2] } - return scope, regex, errorMessage + return scope, regex, negated, errorMessage } // Report an invalid config, this is specifically the user's fault @@ -204,10 +211,14 @@ func (lintStringFormatRule) getCallName(call *ast.CallExpr) (callName string, ok if selector, ok := call.Fun.(*ast.SelectorExpr); ok { // Scoped function call scope, ok := selector.X.(*ast.Ident) - if !ok { - return "", false + if ok { + return scope.Name + "." + selector.Sel.Name, true + } + // Scoped function call inside structure + recv, ok := selector.X.(*ast.SelectorExpr) + if ok { + return recv.Sel.Name + "." + selector.Sel.Name, true } - return scope.Name + "." + selector.Sel.Name, true } return "", false @@ -261,7 +272,26 @@ func (r *stringFormatSubrule) Apply(call *ast.CallExpr) { } func (r *stringFormatSubrule) lintMessage(s string, node ast.Node) { - // Fail if the string doesn't match the user's regex + if r.negated { + if !r.regexp.MatchString(s) { + return + } + // Fail if the string does match the user's regex + var failure string + if len(r.errorMessage) > 0 { + failure = r.errorMessage + } else { + failure = fmt.Sprintf("string literal matches user defined regex /%s/", r.regexp.String()) + } + r.parent.onFailure(lint.Failure{ + Confidence: 1, + Failure: failure, + Node: node, + }) + return + } + + // Fail if the string does NOT match the user's regex if r.regexp.MatchString(s) { return } diff --git a/vendor/github.com/mgechev/revive/rule/struct-tag.go b/vendor/github.com/mgechev/revive/rule/struct-tag.go index 3accf58fb6..f6ee47a731 100644 --- a/vendor/github.com/mgechev/revive/rule/struct-tag.go +++ b/vendor/github.com/mgechev/revive/rule/struct-tag.go @@ -5,23 +5,55 @@ import ( "go/ast" "strconv" "strings" + "sync" "github.com/fatih/structtag" "github.com/mgechev/revive/lint" ) // StructTagRule lints struct tags. -type StructTagRule struct{} +type StructTagRule struct { + userDefined map[string][]string // map: key -> []option + sync.Mutex +} + +func (r *StructTagRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + if r.userDefined == nil && len(arguments) > 0 { + checkNumberOfArguments(1, arguments, r.Name()) + r.userDefined = make(map[string][]string, len(arguments)) + for _, arg := range arguments { + item, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the %s rule. Expecting a string, got %v (of type %T)", r.Name(), arg, arg)) + } + parts := strings.Split(item, ",") + if len(parts) < 2 { + panic(fmt.Sprintf("Invalid argument to the %s rule. Expecting a string of the form key[,option]+, got %s", r.Name(), item)) + } + key := strings.TrimSpace(parts[0]) + for i := 1; i < len(parts); i++ { + option := strings.TrimSpace(parts[i]) + r.userDefined[key] = append(r.userDefined[key], option) + } + } + } +} // Apply applies the rule to given file. -func (*StructTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure +func (r *StructTagRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) + var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - w := lintStructTagRule{onFailure: onFailure} + w := lintStructTagRule{ + onFailure: onFailure, + userDefined: r.userDefined, + } ast.Walk(w, file.AST) @@ -35,8 +67,9 @@ func (*StructTagRule) Name() string { type lintStructTagRule struct { onFailure func(lint.Failure) - usedTagNbr map[int]bool // list of used tag numbers - usedTagName map[string]bool // list of used tag keys + userDefined map[string][]string // map: key -> []option + usedTagNbr map[int]bool // list of used tag numbers + usedTagName map[string]bool // list of used tag keys } func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { @@ -57,17 +90,26 @@ func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { return w } +const keyASN1 = "asn1" +const keyBSON = "bson" +const keyDefault = "default" +const keyJSON = "json" +const keyProtobuf = "protobuf" +const keyRequired = "required" +const keyXML = "xml" +const keyYAML = "yaml" + func (w lintStructTagRule) checkTagNameIfNeed(tag *structtag.Tag) (string, bool) { isUnnamedTag := tag.Name == "" || tag.Name == "-" if isUnnamedTag { return "", true } - needsToCheckTagName := tag.Key == "bson" || - tag.Key == "json" || - tag.Key == "xml" || - tag.Key == "yaml" || - tag.Key == "protobuf" + needsToCheckTagName := tag.Key == keyBSON || + tag.Key == keyJSON || + tag.Key == keyXML || + tag.Key == keyYAML || + tag.Key == keyProtobuf if !needsToCheckTagName { return "", true @@ -92,13 +134,13 @@ func (w lintStructTagRule) checkTagNameIfNeed(tag *structtag.Tag) (string, bool) func (lintStructTagRule) getTagName(tag *structtag.Tag) string { switch tag.Key { - case "protobuf": + case keyProtobuf: for _, option := range tag.Options { if strings.HasPrefix(option, "name=") { - return strings.TrimLeft(option, "name=") + return strings.TrimPrefix(option, "name=") } } - return "" //protobuf tag lacks 'name' option + return "" // protobuf tag lacks 'name' option default: return tag.Name } @@ -123,40 +165,40 @@ func (w lintStructTagRule) checkTaggedField(f *ast.Field) { } switch key := tag.Key; key { - case "asn1": + case keyASN1: msg, ok := w.checkASN1Tag(f.Type, tag) if !ok { w.addFailure(f.Tag, msg) } - case "bson": + case keyBSON: msg, ok := w.checkBSONTag(tag.Options) if !ok { w.addFailure(f.Tag, msg) } - case "default": + case keyDefault: if !w.typeValueMatch(f.Type, tag.Name) { w.addFailure(f.Tag, "field's type and default value's type mismatch") } - case "json": + case keyJSON: msg, ok := w.checkJSONTag(tag.Name, tag.Options) if !ok { w.addFailure(f.Tag, msg) } - case "protobuf": + case keyProtobuf: msg, ok := w.checkProtobufTag(tag) if !ok { w.addFailure(f.Tag, msg) } - case "required": + case keyRequired: if tag.Name != "true" && tag.Name != "false" { w.addFailure(f.Tag, "required should be 'true' or 'false'") } - case "xml": + case keyXML: msg, ok := w.checkXMLTag(tag.Options) if !ok { w.addFailure(f.Tag, msg) } - case "yaml": + case keyYAML: msg, ok := w.checkYAMLTag(tag.Options) if !ok { w.addFailure(f.Tag, msg) @@ -201,6 +243,10 @@ func (w lintStructTagRule) checkASN1Tag(t ast.Expr, tag *structtag.Tag) (string, continue } + if w.isUserDefined(keyASN1, opt) { + continue + } + return fmt.Sprintf("unknown option '%s' in ASN1 tag", opt), false } } @@ -208,11 +254,14 @@ func (w lintStructTagRule) checkASN1Tag(t ast.Expr, tag *structtag.Tag) (string, return "", true } -func (lintStructTagRule) checkBSONTag(options []string) (string, bool) { +func (w lintStructTagRule) checkBSONTag(options []string) (string, bool) { for _, opt := range options { switch opt { case "inline", "minsize", "omitempty": default: + if w.isUserDefined(keyBSON, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in BSON tag", opt), false } } @@ -220,7 +269,7 @@ func (lintStructTagRule) checkBSONTag(options []string) (string, bool) { return "", true } -func (lintStructTagRule) checkJSONTag(name string, options []string) (string, bool) { +func (w lintStructTagRule) checkJSONTag(name string, options []string) (string, bool) { for _, opt := range options { switch opt { case "omitempty", "string": @@ -230,6 +279,9 @@ func (lintStructTagRule) checkJSONTag(name string, options []string) (string, bo return "option can not be empty in JSON tag", false } default: + if w.isUserDefined(keyJSON, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in JSON tag", opt), false } } @@ -237,11 +289,14 @@ func (lintStructTagRule) checkJSONTag(name string, options []string) (string, bo return "", true } -func (lintStructTagRule) checkXMLTag(options []string) (string, bool) { +func (w lintStructTagRule) checkXMLTag(options []string) (string, bool) { for _, opt := range options { switch opt { case "any", "attr", "cdata", "chardata", "comment", "innerxml", "omitempty", "typeattr": default: + if w.isUserDefined(keyXML, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in XML tag", opt), false } } @@ -249,11 +304,14 @@ func (lintStructTagRule) checkXMLTag(options []string) (string, bool) { return "", true } -func (lintStructTagRule) checkYAMLTag(options []string) (string, bool) { +func (w lintStructTagRule) checkYAMLTag(options []string) (string, bool) { for _, opt := range options { switch opt { case "flow", "inline", "omitempty": default: + if w.isUserDefined(keyYAML, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in YAML tag", opt), false } } @@ -330,6 +388,9 @@ func (w lintStructTagRule) checkProtobufTag(tag *structtag.Tag) (string, bool) { case "name", "json": // do nothing default: + if w.isUserDefined(keyProtobuf, k) { + continue + } return fmt.Sprintf("unknown option '%s' in protobuf tag", k), false } } @@ -344,3 +405,17 @@ func (w lintStructTagRule) addFailure(n ast.Node, msg string) { Confidence: 1, }) } + +func (w lintStructTagRule) isUserDefined(key, opt string) bool { + if w.userDefined == nil { + return false + } + + options := w.userDefined[key] + for _, o := range options { + if opt == o { + return true + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/superfluous-else.go b/vendor/github.com/mgechev/revive/rule/superfluous-else.go index a9e4380c90..2aa1b6b2ca 100644 --- a/vendor/github.com/mgechev/revive/rule/superfluous-else.go +++ b/vendor/github.com/mgechev/revive/rule/superfluous-else.go @@ -2,9 +2,7 @@ package rule import ( "fmt" - "go/ast" - "go/token" - + "github.com/mgechev/revive/internal/ifelse" "github.com/mgechev/revive/lint" ) @@ -12,27 +10,8 @@ import ( type SuperfluousElseRule struct{} // Apply applies the rule to given file. -func (*SuperfluousElseRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - branchingFunctions := map[string]map[string]bool{ - "os": {"Exit": true}, - "log": { - "Fatal": true, - "Fatalf": true, - "Fatalln": true, - "Panic": true, - "Panicf": true, - "Panicln": true, - }, - } - - w := lintSuperfluousElse{make(map[*ast.IfStmt]bool), onFailure, branchingFunctions} - ast.Walk(w, file.AST) - return failures +func (e *SuperfluousElseRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + return ifelse.Apply(e, file.AST, ifelse.TargetElse, args) } // Name returns the rule name. @@ -40,75 +19,28 @@ func (*SuperfluousElseRule) Name() string { return "superfluous-else" } -type lintSuperfluousElse struct { - ignore map[*ast.IfStmt]bool - onFailure func(lint.Failure) - branchingFunctions map[string]map[string]bool -} - -func (w lintSuperfluousElse) Visit(node ast.Node) ast.Visitor { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return w - } - if w.ignore[ifStmt] { - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - } - return w - } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - return w - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return w - } - if len(ifStmt.Body.List) == 0 { - return w - } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } - } - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" +// CheckIfElse evaluates the rule against an ifelse.Chain. +func (*SuperfluousElseRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) (failMsg string) { + if !chain.If.Deviates() { + // this rule only applies if the if-block deviates control flow + return } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - switch stmt := lastStmt.(type) { - case *ast.BranchStmt: - tok := stmt.Tok.String() - if tok != "fallthrough" { - w.onFailure(newFailure(ifStmt.Else, "if block ends with a "+tok+" statement, so drop this else and outdent its block"+extra)) - } - case *ast.ExprStmt: - if ce, ok := stmt.X.(*ast.CallExpr); ok { // it's a function call - if fc, ok := ce.Fun.(*ast.SelectorExpr); ok { - if id, ok := fc.X.(*ast.Ident); ok { - fn := fc.Sel.Name - pkg := id.Name - if w.branchingFunctions[pkg][fn] { // it's a call to a branching function - w.onFailure( - newFailure(ifStmt.Else, fmt.Sprintf("if block ends with call to %s.%s function, so drop this else and outdent its block%s", pkg, fn, extra))) - } - } - } - } + if chain.HasPriorNonDeviating { + // if we de-indent the "else" block then a previous branch + // might flow into it, affecting program behaviour + return } - return w -} + if chain.If.Returns() { + // avoid overlapping with indent-error-flow + return + } -func newFailure(node ast.Node, msg string) lint.Failure { - return lint.Failure{ - Confidence: 1, - Node: node, - Category: "indent", - Failure: msg, + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls) { + // avoid increasing variable scope + return } + + return fmt.Sprintf("if block ends with %v, so drop this else and outdent its block", chain.If.LongString()) } diff --git a/vendor/github.com/mgechev/revive/rule/time-equal.go b/vendor/github.com/mgechev/revive/rule/time-equal.go index 72ecf26fe4..3b85e18a8e 100644 --- a/vendor/github.com/mgechev/revive/rule/time-equal.go +++ b/vendor/github.com/mgechev/revive/rule/time-equal.go @@ -60,9 +60,9 @@ func (l *lintTimeEqual) Visit(node ast.Node) ast.Visitor { var failure string switch expr.Op { case token.EQL: - failure = fmt.Sprintf("use %s.Equal(%s) instead of %q operator", expr.X, expr.Y, expr.Op) + failure = fmt.Sprintf("use %s.Equal(%s) instead of %q operator", gofmt(expr.X), gofmt(expr.Y), expr.Op) case token.NEQ: - failure = fmt.Sprintf("use !%s.Equal(%s) instead of %q operator", expr.X, expr.Y, expr.Op) + failure = fmt.Sprintf("use !%s.Equal(%s) instead of %q operator", gofmt(expr.X), gofmt(expr.Y), expr.Op) } l.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go b/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go new file mode 100644 index 0000000000..df27743cbd --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go @@ -0,0 +1,194 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +const ( + ruleUTAMessagePanic = "type assertion will panic if not matched" + ruleUTAMessageIgnored = "type assertion result ignored" +) + +// UncheckedTypeAssertionRule lints missing or ignored `ok`-value in danymic type casts. +type UncheckedTypeAssertionRule struct { + sync.Mutex + acceptIgnoredAssertionResult bool + configured bool +} + +func (u *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { + u.Lock() + defer u.Unlock() + + if len(arguments) == 0 || u.configured { + return + } + + u.configured = true + + args, ok := arguments[0].(map[string]any) + if !ok { + panic("Unable to get arguments. Expected object of key-value-pairs.") + } + + for k, v := range args { + switch k { + case "acceptIgnoredAssertionResult": + u.acceptIgnoredAssertionResult, ok = v.(bool) + if !ok { + panic(fmt.Sprintf("Unable to parse argument '%s'. Expected boolean.", k)) + } + default: + panic(fmt.Sprintf("Unknown argument: %s", k)) + } + } +} + +// Apply applies the rule to given file. +func (u *UncheckedTypeAssertionRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + u.configure(args) + + var failures []lint.Failure + + walker := &lintUnchekedTypeAssertion{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + acceptIgnoredTypeAssertionResult: u.acceptIgnoredAssertionResult, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (*UncheckedTypeAssertionRule) Name() string { + return "unchecked-type-assertion" +} + +type lintUnchekedTypeAssertion struct { + onFailure func(lint.Failure) + acceptIgnoredTypeAssertionResult bool +} + +func isIgnored(e ast.Expr) bool { + ident, ok := e.(*ast.Ident) + if !ok { + return false + } + + return ident.Name == "_" +} + +func isTypeSwitch(e *ast.TypeAssertExpr) bool { + return e.Type == nil +} + +func (w *lintUnchekedTypeAssertion) requireNoTypeAssert(expr ast.Expr) { + e, ok := expr.(*ast.TypeAssertExpr) + if ok && !isTypeSwitch(e) { + w.addFailure(e, ruleUTAMessagePanic) + } +} + +func (w *lintUnchekedTypeAssertion) handleIfStmt(n *ast.IfStmt) { + ifCondition, ok := n.Cond.(*ast.BinaryExpr) + if ok { + w.requireNoTypeAssert(ifCondition.X) + w.requireNoTypeAssert(ifCondition.Y) + } +} + +func (w *lintUnchekedTypeAssertion) requireBinaryExpressionWithoutTypeAssertion(expr ast.Expr) { + binaryExpr, ok := expr.(*ast.BinaryExpr) + if ok { + w.requireNoTypeAssert(binaryExpr.X) + w.requireNoTypeAssert(binaryExpr.Y) + } +} + +func (w *lintUnchekedTypeAssertion) handleCaseClause(n *ast.CaseClause) { + for _, expr := range n.List { + w.requireNoTypeAssert(expr) + w.requireBinaryExpressionWithoutTypeAssertion(expr) + } +} + +func (w *lintUnchekedTypeAssertion) handleSwitch(n *ast.SwitchStmt) { + w.requireNoTypeAssert(n.Tag) + w.requireBinaryExpressionWithoutTypeAssertion(n.Tag) +} + +func (w *lintUnchekedTypeAssertion) handleAssignment(n *ast.AssignStmt) { + if len(n.Rhs) == 0 { + return + } + + e, ok := n.Rhs[0].(*ast.TypeAssertExpr) + if !ok || e == nil { + return + } + + if isTypeSwitch(e) { + return + } + + if len(n.Lhs) == 1 { + w.addFailure(e, ruleUTAMessagePanic) + } + + if !w.acceptIgnoredTypeAssertionResult && len(n.Lhs) == 2 && isIgnored(n.Lhs[1]) { + w.addFailure(e, ruleUTAMessageIgnored) + } +} + +// handles "return foo(.*bar)" - one of them is enough to fail as golang does not forward the type cast tuples in return statements +func (w *lintUnchekedTypeAssertion) handleReturn(n *ast.ReturnStmt) { + for _, r := range n.Results { + w.requireNoTypeAssert(r) + } +} + +func (w *lintUnchekedTypeAssertion) handleRange(n *ast.RangeStmt) { + w.requireNoTypeAssert(n.X) +} + +func (w *lintUnchekedTypeAssertion) handleChannelSend(n *ast.SendStmt) { + w.requireNoTypeAssert(n.Value) +} + +func (w *lintUnchekedTypeAssertion) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.RangeStmt: + w.handleRange(n) + case *ast.SwitchStmt: + w.handleSwitch(n) + case *ast.ReturnStmt: + w.handleReturn(n) + case *ast.AssignStmt: + w.handleAssignment(n) + case *ast.IfStmt: + w.handleIfStmt(n) + case *ast.CaseClause: + w.handleCaseClause(n) + case *ast.SendStmt: + w.handleChannelSend(n) + } + + return w +} + +func (w *lintUnchekedTypeAssertion) addFailure(n *ast.TypeAssertExpr, why string) { + s := fmt.Sprintf("type cast result is unchecked in %v - %s", gofmt(n), why) + w.onFailure(lint.Failure{ + Category: "bad practice", + Confidence: 1, + Node: n, + Failure: s, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go b/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go index bad9075338..9ac2648cdd 100644 --- a/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go +++ b/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go @@ -45,8 +45,9 @@ type funcStatus struct { } type lintUnconditionalRecursionRule struct { - onFailure func(lint.Failure) - currentFunc *funcStatus + onFailure func(lint.Failure) + currentFunc *funcStatus + inGoStatement bool } // Visit will traverse the file AST. @@ -68,9 +69,13 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { default: rec = n.Recv.List[0].Names[0] } - w.currentFunc = &funcStatus{&funcDesc{rec, n.Name}, false} case *ast.CallExpr: + // check if call arguments has a recursive call + for _, arg := range n.Args { + ast.Walk(w, arg) + } + var funcID *ast.Ident var selector *ast.Ident switch c := n.Fun.(type) { @@ -84,6 +89,9 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { return nil } funcID = c.Sel + case *ast.FuncLit: + ast.Walk(w, c.Body) // analyze the body of the function literal + return nil default: return w } @@ -93,11 +101,12 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { w.currentFunc.funcDesc.equal(&funcDesc{selector, funcID}) { w.onFailure(lint.Failure{ Category: "logic", - Confidence: 1, + Confidence: 0.8, Node: n, Failure: "unconditional recursive call", }) } + return nil case *ast.IfStmt: w.updateFuncStatus(n.Body) w.updateFuncStatus(n.Else) @@ -115,16 +124,21 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { w.updateFuncStatus(n.Body) return nil case *ast.GoStmt: - for _, a := range n.Call.Args { - ast.Walk(w, a) // check if arguments have a recursive call - } - return nil // recursive async call is not an issue + w.inGoStatement = true + ast.Walk(w, n.Call) + w.inGoStatement = false + return nil case *ast.ForStmt: if n.Cond != nil { return nil } // unconditional loop return w + case *ast.FuncLit: + if w.inGoStatement { + return w + } + return nil // literal call (closure) is not necessarily an issue } return w @@ -181,5 +195,5 @@ func (lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { return false } - return len(pick(node, isExit, nil)) != 0 + return len(pick(node, isExit)) != 0 } diff --git a/vendor/github.com/mgechev/revive/rule/unhandled-error.go b/vendor/github.com/mgechev/revive/rule/unhandled-error.go index 6cde24b7f7..ce6fa38641 100644 --- a/vendor/github.com/mgechev/revive/rule/unhandled-error.go +++ b/vendor/github.com/mgechev/revive/rule/unhandled-error.go @@ -4,6 +4,8 @@ import ( "fmt" "go/ast" "go/types" + "regexp" + "strings" "sync" "github.com/mgechev/revive/lint" @@ -11,24 +13,30 @@ import ( // UnhandledErrorRule lints given else constructs. type UnhandledErrorRule struct { - ignoreList ignoreListType + ignoreList []*regexp.Regexp sync.Mutex } -type ignoreListType map[string]struct{} - func (r *UnhandledErrorRule) configure(arguments lint.Arguments) { r.Lock() if r.ignoreList == nil { - r.ignoreList = make(ignoreListType, len(arguments)) - for _, arg := range arguments { argStr, ok := arg.(string) if !ok { panic(fmt.Sprintf("Invalid argument to the unhandled-error rule. Expecting a string, got %T", arg)) } - r.ignoreList[argStr] = struct{}{} + argStr = strings.Trim(argStr, " ") + if argStr == "" { + panic("Invalid argument to the unhandled-error rule, expected regular expression must not be empty.") + } + + exp, err := regexp.Compile(argStr) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the unhandled-error rule: regexp %q does not compile: %v", argStr, err)) + } + + r.ignoreList = append(r.ignoreList, exp) } } r.Unlock() @@ -60,7 +68,7 @@ func (*UnhandledErrorRule) Name() string { } type lintUnhandledErrors struct { - ignoreList ignoreListType + ignoreList []*regexp.Regexp pkg *lint.Package onFailure func(lint.Failure) } @@ -102,8 +110,8 @@ func (w *lintUnhandledErrors) Visit(node ast.Node) ast.Visitor { } func (w *lintUnhandledErrors) addFailure(n *ast.CallExpr) { - funcName := gofmt(n.Fun) - if _, mustIgnore := w.ignoreList[funcName]; mustIgnore { + name := w.funcName(n) + if w.isIgnoredFunc(name) { return } @@ -111,10 +119,34 @@ func (w *lintUnhandledErrors) addFailure(n *ast.CallExpr) { Category: "bad practice", Confidence: 1, Node: n, - Failure: fmt.Sprintf("Unhandled error in call to function %v", funcName), + Failure: fmt.Sprintf("Unhandled error in call to function %v", name), }) } +func (w *lintUnhandledErrors) funcName(call *ast.CallExpr) string { + fn, ok := w.getFunc(call) + if !ok { + return gofmt(call.Fun) + } + + name := fn.FullName() + name = strings.Replace(name, "(", "", -1) + name = strings.Replace(name, ")", "", -1) + name = strings.Replace(name, "*", "", -1) + + return name +} + +func (w *lintUnhandledErrors) isIgnoredFunc(funcName string) bool { + for _, pattern := range w.ignoreList { + if len(pattern.FindString(funcName)) == len(funcName) { + return true + } + } + + return false +} + func (*lintUnhandledErrors) isTypeError(t *types.Named) bool { const errorTypeName = "_.error" @@ -130,3 +162,17 @@ func (w *lintUnhandledErrors) returnsAnError(tt *types.Tuple) bool { } return false } + +func (w *lintUnhandledErrors) getFunc(call *ast.CallExpr) (*types.Func, bool) { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return nil, false + } + + fn, ok := w.pkg.TypesInfo().ObjectOf(sel.Sel).(*types.Func) + if !ok { + return nil, false + } + + return fn, true +} diff --git a/vendor/github.com/mgechev/revive/rule/unused-param.go b/vendor/github.com/mgechev/revive/rule/unused-param.go index ab3da453ee..4b04ee916b 100644 --- a/vendor/github.com/mgechev/revive/rule/unused-param.go +++ b/vendor/github.com/mgechev/revive/rule/unused-param.go @@ -3,22 +3,72 @@ package rule import ( "fmt" "go/ast" + "regexp" + "sync" "github.com/mgechev/revive/lint" ) // UnusedParamRule lints unused params in functions. -type UnusedParamRule struct{} +type UnusedParamRule struct { + configured bool + // regex to check if some name is valid for unused parameter, "^_$" by default + allowRegex *regexp.Regexp + failureMsg string + sync.Mutex +} + +func (r *UnusedParamRule) configure(args lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives + // it's more compatible to JSON nature of configurations + var allowedRegexStr string + if len(args) == 0 { + allowedRegexStr = "^_$" + r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it as _" + } else { + // Arguments = [{}] + options := args[0].(map[string]any) + // Arguments = [{allowedRegex="^_"}] + + if allowedRegexParam, ok := options["allowRegex"]; ok { + allowedRegexStr, ok = allowedRegexParam.(string) + if !ok { + panic(fmt.Errorf("error configuring %s rule: allowedRegex is not string but [%T]", r.Name(), allowedRegexParam)) + } + } + } + var err error + r.allowRegex, err = regexp.Compile(allowedRegexStr) + if err != nil { + panic(fmt.Errorf("error configuring %s rule: allowedRegex is not valid regex [%s]: %v", r.Name(), allowedRegexStr, err)) + } + + if r.failureMsg == "" { + r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it to match " + r.allowRegex.String() + } +} // Apply applies the rule to given file. -func (*UnusedParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *UnusedParamRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - - w := lintUnusedParamRule{onFailure: onFailure} + w := lintUnusedParamRule{ + onFailure: onFailure, + allowRegex: r.allowRegex, + failureMsg: r.failureMsg, + } ast.Walk(w, file.AST) @@ -31,55 +81,70 @@ func (*UnusedParamRule) Name() string { } type lintUnusedParamRule struct { - onFailure func(lint.Failure) + onFailure func(lint.Failure) + allowRegex *regexp.Regexp + failureMsg string } func (w lintUnusedParamRule) Visit(node ast.Node) ast.Visitor { + var ( + funcType *ast.FuncType + funcBody *ast.BlockStmt + ) switch n := node.(type) { + case *ast.FuncLit: + funcType = n.Type + funcBody = n.Body case *ast.FuncDecl: - params := retrieveNamedParams(n.Type.Params) - if len(params) < 1 { - return nil // skip, func without parameters - } - if n.Body == nil { return nil // skip, is a function prototype } - // inspect the func body looking for references to parameters - fselect := func(n ast.Node) bool { - ident, isAnID := n.(*ast.Ident) + funcType = n.Type + funcBody = n.Body + default: + return w // skip, not a function + } - if !isAnID { - return false - } + params := retrieveNamedParams(funcType.Params) + if len(params) < 1 { + return w // skip, func without parameters + } - _, isAParam := params[ident.Obj] - if isAParam { - params[ident.Obj] = false // mark as used - } + // inspect the func body looking for references to parameters + fselect := func(n ast.Node) bool { + ident, isAnID := n.(*ast.Ident) + if !isAnID { return false } - _ = pick(n.Body, fselect, nil) - - for _, p := range n.Type.Params.List { - for _, n := range p.Names { - if params[n.Obj] { - w.onFailure(lint.Failure{ - Confidence: 1, - Node: n, - Category: "bad practice", - Failure: fmt.Sprintf("parameter '%s' seems to be unused, consider removing or renaming it as _", n.Name), - }) - } - } + + _, isAParam := params[ident.Obj] + if isAParam { + params[ident.Obj] = false // mark as used } - return nil // full method body already inspected + return false + } + _ = pick(funcBody, fselect) + + for _, p := range funcType.Params.List { + for _, n := range p.Names { + if w.allowRegex.FindStringIndex(n.Name) != nil { + continue + } + if params[n.Obj] { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "bad practice", + Failure: fmt.Sprintf(w.failureMsg, n.Name), + }) + } + } } - return w + return w // full method body was inspected } func retrieveNamedParams(params *ast.FieldList) map[*ast.Object]bool { diff --git a/vendor/github.com/mgechev/revive/rule/unused-receiver.go b/vendor/github.com/mgechev/revive/rule/unused-receiver.go index 2289a517e5..715dba3383 100644 --- a/vendor/github.com/mgechev/revive/rule/unused-receiver.go +++ b/vendor/github.com/mgechev/revive/rule/unused-receiver.go @@ -3,22 +3,72 @@ package rule import ( "fmt" "go/ast" + "regexp" + "sync" "github.com/mgechev/revive/lint" ) // UnusedReceiverRule lints unused params in functions. -type UnusedReceiverRule struct{} +type UnusedReceiverRule struct { + configured bool + // regex to check if some name is valid for unused parameter, "^_$" by default + allowRegex *regexp.Regexp + failureMsg string + sync.Mutex +} + +func (r *UnusedReceiverRule) configure(args lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives + // it's more compatible to JSON nature of configurations + var allowedRegexStr string + if len(args) == 0 { + allowedRegexStr = "^_$" + r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it as _" + } else { + // Arguments = [{}] + options := args[0].(map[string]any) + // Arguments = [{allowedRegex="^_"}] + + if allowedRegexParam, ok := options["allowRegex"]; ok { + allowedRegexStr, ok = allowedRegexParam.(string) + if !ok { + panic(fmt.Errorf("error configuring [unused-receiver] rule: allowedRegex is not string but [%T]", allowedRegexParam)) + } + } + } + var err error + r.allowRegex, err = regexp.Compile(allowedRegexStr) + if err != nil { + panic(fmt.Errorf("error configuring [unused-receiver] rule: allowedRegex is not valid regex [%s]: %v", allowedRegexStr, err)) + } + if r.failureMsg == "" { + r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it to match " + r.allowRegex.String() + } +} // Apply applies the rule to given file. -func (*UnusedReceiverRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *UnusedReceiverRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - w := lintUnusedReceiverRule{onFailure: onFailure} + w := lintUnusedReceiverRule{ + onFailure: onFailure, + allowRegex: r.allowRegex, + failureMsg: r.failureMsg, + } ast.Walk(w, file.AST) @@ -31,7 +81,9 @@ func (*UnusedReceiverRule) Name() string { } type lintUnusedReceiverRule struct { - onFailure func(lint.Failure) + onFailure func(lint.Failure) + allowRegex *regexp.Regexp + failureMsg string } func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { @@ -51,13 +103,17 @@ func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { return nil // the receiver is already named _ } + if w.allowRegex != nil && w.allowRegex.FindStringIndex(recID.Name) != nil { + return nil + } + // inspect the func body looking for references to the receiver id fselect := func(n ast.Node) bool { ident, isAnID := n.(*ast.Ident) return isAnID && ident.Obj == recID.Obj } - refs2recID := pick(n.Body, fselect, nil) + refs2recID := pick(n.Body, fselect) if len(refs2recID) > 0 { return nil // the receiver is referenced in the func body @@ -67,7 +123,7 @@ func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { Confidence: 1, Node: recID, Category: "bad practice", - Failure: fmt.Sprintf("method receiver '%s' is not referenced in method's body, consider removing or renaming it as _", recID.Name), + Failure: fmt.Sprintf(w.failureMsg, recID.Name), }) return nil // full method body already inspected diff --git a/vendor/github.com/mgechev/revive/rule/utils.go b/vendor/github.com/mgechev/revive/rule/utils.go index dca1674ca5..5778e76963 100644 --- a/vendor/github.com/mgechev/revive/rule/utils.go +++ b/vendor/github.com/mgechev/revive/rule/utils.go @@ -93,21 +93,15 @@ func srcLine(src []byte, p token.Position) string { // pick yields a list of nodes by picking them from a sub-ast with root node n. // Nodes are selected by applying the fselect function -// f function is applied to each selected node before inserting it in the final result. -// If f==nil then it defaults to the identity function (ie it returns the node itself) -func pick(n ast.Node, fselect func(n ast.Node) bool, f func(n ast.Node) []ast.Node) []ast.Node { +func pick(n ast.Node, fselect func(n ast.Node) bool) []ast.Node { var result []ast.Node if n == nil { return result } - if f == nil { - f = func(n ast.Node) []ast.Node { return []ast.Node{n} } - } - onSelect := func(n ast.Node) { - result = append(result, f(n)...) + result = append(result, n) } p := picker{fselect: fselect, onSelect: onSelect} ast.Walk(p, n) @@ -158,7 +152,7 @@ func isExprABooleanLit(n ast.Node) (lexeme string, ok bool) { } // gofmt returns a string representation of an AST subtree. -func gofmt(x interface{}) string { +func gofmt(x any) string { buf := bytes.Buffer{} fs := token.NewFileSet() printer.Fprint(&buf, fs, x) diff --git a/vendor/github.com/mgechev/revive/rule/var-naming.go b/vendor/github.com/mgechev/revive/rule/var-naming.go index 3c0c19cdf3..e91c22dc21 100644 --- a/vendor/github.com/mgechev/revive/rule/var-naming.go +++ b/vendor/github.com/mgechev/revive/rule/var-naming.go @@ -4,33 +4,82 @@ import ( "fmt" "go/ast" "go/token" + "regexp" "strings" "sync" "github.com/mgechev/revive/lint" ) +var anyCapsRE = regexp.MustCompile(`[A-Z]`) + +// regexp for constant names like `SOME_CONST`, `SOME_CONST_2`, `X123_3`, `_SOME_PRIVATE_CONST` (#851, #865) +var upperCaseConstRE = regexp.MustCompile(`^_?[A-Z][A-Z\d]*(_[A-Z\d]+)*$`) + // VarNamingRule lints given else constructs. type VarNamingRule struct { - configured bool - whitelist []string - blacklist []string + configured bool + allowlist []string + blocklist []string + upperCaseConst bool // if true - allows to use UPPER_SOME_NAMES for constants + skipPackageNameChecks bool sync.Mutex } func (r *VarNamingRule) configure(arguments lint.Arguments) { r.Lock() - if !r.configured { - if len(arguments) >= 1 { - r.whitelist = getList(arguments[0], "whitelist") - } + defer r.Unlock() + if r.configured { + return + } + + r.configured = true + if len(arguments) >= 1 { + r.allowlist = getList(arguments[0], "allowlist") + } + + if len(arguments) >= 2 { + r.blocklist = getList(arguments[1], "blocklist") + } - if len(arguments) >= 2 { - r.blacklist = getList(arguments[1], "blacklist") + if len(arguments) >= 3 { + // not pretty code because should keep compatibility with TOML (no mixed array types) and new map parameters + thirdArgument := arguments[2] + asSlice, ok := thirdArgument.([]any) + if !ok { + panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, got %T", "options", arguments[2])) } - r.configured = true + if len(asSlice) != 1 { + panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, but %d", "options", len(asSlice))) + } + args, ok := asSlice[0].(map[string]any) + if !ok { + panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, with map, but %T", "options", asSlice[0])) + } + r.upperCaseConst = fmt.Sprint(args["upperCaseConst"]) == "true" + r.skipPackageNameChecks = fmt.Sprint(args["skipPackageNameChecks"]) == "true" + } +} + +func (r *VarNamingRule) applyPackageCheckRules(walker *lintNames) { + // Package names need slightly different handling than other names. + if strings.Contains(walker.fileAst.Name.Name, "_") && !strings.HasSuffix(walker.fileAst.Name.Name, "_test") { + walker.onFailure(lint.Failure{ + Failure: "don't use an underscore in package name", + Confidence: 1, + Node: walker.fileAst.Name, + Category: "naming", + }) + } + if anyCapsRE.MatchString(walker.fileAst.Name.Name) { + walker.onFailure(lint.Failure{ + Failure: fmt.Sprintf("don't use MixedCaps in package name; %s should be %s", walker.fileAst.Name.Name, strings.ToLower(walker.fileAst.Name.Name)), + Confidence: 1, + Node: walker.fileAst.Name, + Category: "naming", + }) } - r.Unlock() + } // Apply applies the rule to given file. @@ -44,21 +93,16 @@ func (r *VarNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint. walker := lintNames{ file: file, fileAst: fileAst, - whitelist: r.whitelist, - blacklist: r.blacklist, + allowlist: r.allowlist, + blocklist: r.blocklist, onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, + upperCaseConst: r.upperCaseConst, } - // Package names need slightly different handling than other names. - if strings.Contains(walker.fileAst.Name.Name, "_") && !strings.HasSuffix(walker.fileAst.Name.Name, "_test") { - walker.onFailure(lint.Failure{ - Failure: "don't use an underscore in package name", - Confidence: 1, - Node: walker.fileAst.Name, - Category: "naming", - }) + if !r.skipPackageNameChecks { + r.applyPackageCheckRules(&walker) } ast.Walk(&walker, fileAst) @@ -71,18 +115,18 @@ func (*VarNamingRule) Name() string { return "var-naming" } -func checkList(fl *ast.FieldList, thing string, w *lintNames) { +func (w *lintNames) checkList(fl *ast.FieldList, thing string) { if fl == nil { return } for _, f := range fl.List { for _, id := range f.Names { - check(id, thing, w) + w.check(id, thing) } } } -func check(id *ast.Ident, thing string, w *lintNames) { +func (w *lintNames) check(id *ast.Ident, thing string) { if id.Name == "_" { return } @@ -90,6 +134,12 @@ func check(id *ast.Ident, thing string, w *lintNames) { return } + // #851 upperCaseConst support + // if it's const + if thing == token.CONST.String() && w.upperCaseConst && upperCaseConstRE.MatchString(id.Name) { + return + } + // Handle two common styles from other languages that don't belong in Go. if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { w.onFailure(lint.Failure{ @@ -100,17 +150,8 @@ func check(id *ast.Ident, thing string, w *lintNames) { }) return } - if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { - should := string(id.Name[1]+'a'-'A') + id.Name[2:] - w.onFailure(lint.Failure{ - Failure: fmt.Sprintf("don't use leading k in Go names; %s %s should be %s", thing, id.Name, should), - Confidence: 0.8, - Node: id, - Category: "naming", - }) - } - should := lint.Name(id.Name, w.whitelist, w.blacklist) + should := lint.Name(id.Name, w.allowlist, w.blocklist) if id.Name == should { return } @@ -133,11 +174,12 @@ func check(id *ast.Ident, thing string, w *lintNames) { } type lintNames struct { - file *lint.File - fileAst *ast.File - onFailure func(lint.Failure) - whitelist []string - blacklist []string + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) + allowlist []string + blocklist []string + upperCaseConst bool } func (w *lintNames) Visit(n ast.Node) ast.Visitor { @@ -148,7 +190,7 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { } for _, exp := range v.Lhs { if id, ok := exp.(*ast.Ident); ok { - check(id, "var", w) + w.check(id, "var") } } case *ast.FuncDecl: @@ -170,31 +212,24 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { // not exported in the Go API. // See https://github.com/golang/lint/issues/144. if ast.IsExported(v.Name.Name) || !isCgoExported(v) { - check(v.Name, thing, w) + w.check(v.Name, thing) } - checkList(v.Type.Params, thing+" parameter", w) - checkList(v.Type.Results, thing+" result", w) + w.checkList(v.Type.Params, thing+" parameter") + w.checkList(v.Type.Results, thing+" result") case *ast.GenDecl: if v.Tok == token.IMPORT { return w } - var thing string - switch v.Tok { - case token.CONST: - thing = "const" - case token.TYPE: - thing = "type" - case token.VAR: - thing = "var" - } + + thing := v.Tok.String() for _, spec := range v.Specs { switch s := spec.(type) { case *ast.TypeSpec: - check(s.Name, thing, w) + w.check(s.Name, thing) case *ast.ValueSpec: for _, id := range s.Names { - check(id, thing, w) + w.check(id, thing) } } } @@ -206,31 +241,31 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { if !ok { // might be an embedded interface name continue } - checkList(ft.Params, "interface method parameter", w) - checkList(ft.Results, "interface method result", w) + w.checkList(ft.Params, "interface method parameter") + w.checkList(ft.Results, "interface method result") } case *ast.RangeStmt: if v.Tok == token.ASSIGN { return w } if id, ok := v.Key.(*ast.Ident); ok { - check(id, "range var", w) + w.check(id, "range var") } if id, ok := v.Value.(*ast.Ident); ok { - check(id, "range var", w) + w.check(id, "range var") } case *ast.StructType: for _, f := range v.Fields.List { for _, id := range f.Names { - check(id, "struct field", w) + w.check(id, "struct field") } } } return w } -func getList(arg interface{}, argName string) []string { - temp, ok := arg.([]interface{}) +func getList(arg any, argName string) []string { + temp, ok := arg.([]any) if !ok { panic(fmt.Sprintf("Invalid argument to the var-naming rule. Expecting a %s of type slice with initialisms, got %T", argName, arg)) } diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml deleted file mode 100644 index 155ebfa6d4..0000000000 --- a/vendor/github.com/mitchellh/cli/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false - -language: go - -env: - - GO111MODULE=on - -go: - - "1.14" - - "1.15" - -branches: - only: - - master - -script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE deleted file mode 100644 index c33dcc7c92..0000000000 --- a/vendor/github.com/mitchellh/cli/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile deleted file mode 100644 index 89c0a12097..0000000000 --- a/vendor/github.com/mitchellh/cli/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code -test: - go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) - -# testrace runs the race checker -testrace: - go list $(TEST) | xargs -n1 go test -race $(TESTARGS) - -# updatedeps installs all the dependencies to run and build -updatedeps: - go mod download - -.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md deleted file mode 100644 index 8f02cdd0a3..0000000000 --- a/vendor/github.com/mitchellh/cli/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) - -cli is a library for implementing powerful command-line interfaces in Go. -cli is the library that powers the CLI for -[Packer](https://github.com/mitchellh/packer), -[Serf](https://github.com/hashicorp/serf), -[Consul](https://github.com/hashicorp/consul), -[Vault](https://github.com/hashicorp/vault), -[Terraform](https://github.com/hashicorp/terraform), and -[Nomad](https://github.com/hashicorp/nomad). - -## Features - -* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. - -* Support for nested subcommands such as `cli foo bar`. - -* Optional support for default subcommands so `cli` does something - other than error. - -* Support for shell autocompletion of subcommands, flags, and arguments - with callbacks in Go. You don't need to write any shell code. - -* Automatic help generation for listing subcommands - -* Automatic help flag recognition of `-h`, `--help`, etc. - -* Automatic version flag recognition of `-v`, `--version`. - -* Helpers for interacting with the terminal, such as outputting information, - asking for input, etc. These are optional, you can always interact with the - terminal however you choose. - -* Use of Go interfaces/types makes augmenting various parts of the library a - piece of cake. - -## Example - -Below is a simple example of creating and running a CLI - -```go -package main - -import ( - "log" - "os" - - "github.com/mitchellh/cli" -) - -func main() { - c := cli.NewCLI("app", "1.0.0") - c.Args = os.Args[1:] - c.Commands = map[string]cli.CommandFactory{ - "foo": fooCommandFactory, - "bar": barCommandFactory, - } - - exitStatus, err := c.Run() - if err != nil { - log.Println(err) - } - - os.Exit(exitStatus) -} -``` - diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go deleted file mode 100644 index 3bec6258f0..0000000000 --- a/vendor/github.com/mitchellh/cli/autocomplete.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "github.com/posener/complete/cmd/install" -) - -// autocompleteInstaller is an interface to be implemented to perform the -// autocomplete installation and uninstallation with a CLI. -// -// This interface is not exported because it only exists for unit tests -// to be able to test that the installation is called properly. -type autocompleteInstaller interface { - Install(string) error - Uninstall(string) error -} - -// realAutocompleteInstaller uses the real install package to do the -// install/uninstall. -type realAutocompleteInstaller struct{} - -func (i *realAutocompleteInstaller) Install(cmd string) error { - return install.Install(cmd) -} - -func (i *realAutocompleteInstaller) Uninstall(cmd string) error { - return install.Uninstall(cmd) -} - -// mockAutocompleteInstaller is used for tests to record the install/uninstall. -type mockAutocompleteInstaller struct { - InstallCalled bool - UninstallCalled bool -} - -func (i *mockAutocompleteInstaller) Install(cmd string) error { - i.InstallCalled = true - return nil -} - -func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { - i.UninstallCalled = true - return nil -} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go deleted file mode 100644 index 31fafa0509..0000000000 --- a/vendor/github.com/mitchellh/cli/cli.go +++ /dev/null @@ -1,741 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "sort" - "strings" - "sync" - "text/template" - - "github.com/Masterminds/sprig" - "github.com/armon/go-radix" - "github.com/posener/complete" -) - -// CLI contains the state necessary to run subcommands and parse the -// command line arguments. -// -// CLI also supports nested subcommands, such as "cli foo bar". To use -// nested subcommands, the key in the Commands mapping below contains the -// full subcommand. In this example, it would be "foo bar". -// -// If you use a CLI with nested subcommands, some semantics change due to -// ambiguities: -// -// * We use longest prefix matching to find a matching subcommand. This -// means if you register "foo bar" and the user executes "cli foo qux", -// the "foo" command will be executed with the arg "qux". It is up to -// you to handle these args. One option is to just return the special -// help return code `RunResultHelp` to display help and exit. -// -// * The help flag "-h" or "-help" will look at all args to determine -// the help function. For example: "otto apps list -h" will show the -// help for "apps list" but "otto apps -h" will show it for "apps". -// In the normal CLI, only the first subcommand is used. -// -// * The help flag will list any subcommands that a command takes -// as well as the command's help itself. If there are no subcommands, -// it will note this. If the CLI itself has no subcommands, this entire -// section is omitted. -// -// * Any parent commands that don't exist are automatically created as -// no-op commands that just show help for other subcommands. For example, -// if you only register "foo bar", then "foo" is automatically created. -// -type CLI struct { - // Args is the list of command-line arguments received excluding - // the name of the app. For example, if the command "./cli foo bar" - // was invoked, then Args should be []string{"foo", "bar"}. - Args []string - - // Commands is a mapping of subcommand names to a factory function - // for creating that Command implementation. If there is a command - // with a blank string "", then it will be used as the default command - // if no subcommand is specified. - // - // If the key has a space in it, this will create a nested subcommand. - // For example, if the key is "foo bar", then to access it our CLI - // must be accessed with "./cli foo bar". See the docs for CLI for - // notes on how this changes some other behavior of the CLI as well. - // - // The factory should be as cheap as possible, ideally only allocating - // a struct. The factory may be called multiple times in the course - // of a command execution and certain events such as help require the - // instantiation of all commands. Expensive initialization should be - // deferred to function calls within the interface implementation. - Commands map[string]CommandFactory - - // HiddenCommands is a list of commands that are "hidden". Hidden - // commands are not given to the help function callback and do not - // show up in autocomplete. The values in the slice should be equivalent - // to the keys in the command map. - HiddenCommands []string - - // Name defines the name of the CLI. - Name string - - // Version of the CLI. - Version string - - // Autocomplete enables or disables subcommand auto-completion support. - // This is enabled by default when NewCLI is called. Otherwise, this - // must enabled explicitly. - // - // Autocomplete requires the "Name" option to be set on CLI. This name - // should be set exactly to the binary name that is autocompleted. - // - // Autocompletion is supported via the github.com/posener/complete - // library. This library supports bash, zsh and fish. To add support - // for other shells, please see that library. - // - // AutocompleteInstall and AutocompleteUninstall are the global flag - // names for installing and uninstalling the autocompletion handlers - // for the user's shell. The flag should omit the hyphen(s) in front of - // the value. Both single and double hyphens will automatically be supported - // for the flag name. These default to `autocomplete-install` and - // `autocomplete-uninstall` respectively. - // - // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- - // complete flags like -help and -version are added to the output. - // - // AutocompleteGlobalFlags are a mapping of global flags for - // autocompletion. The help and version flags are automatically added. - Autocomplete bool - AutocompleteInstall string - AutocompleteUninstall string - AutocompleteNoDefaultFlags bool - AutocompleteGlobalFlags complete.Flags - autocompleteInstaller autocompleteInstaller // For tests - - // HelpFunc is the function called to generate the generic help - // text that is shown if help must be shown for the CLI that doesn't - // pertain to a specific command. - HelpFunc HelpFunc - - // HelpWriter is used to print help text and version when requested. - // Defaults to os.Stderr for backwards compatibility. - // It is recommended that you set HelpWriter to os.Stdout, and - // ErrorWriter to os.Stderr. - HelpWriter io.Writer - - // ErrorWriter used to output errors when a command can not be run. - // Defaults to the value of HelpWriter for backwards compatibility. - // It is recommended that you set HelpWriter to os.Stdout, and - // ErrorWriter to os.Stderr. - ErrorWriter io.Writer - - //--------------------------------------------------------------- - // Internal fields set automatically - - once sync.Once - autocomplete *complete.Complete - commandTree *radix.Tree - commandNested bool - commandHidden map[string]struct{} - subcommand string - subcommandArgs []string - topFlags []string - - // These are true when special global flags are set. We can/should - // probably use a bitset for this one day. - isHelp bool - isVersion bool - isAutocompleteInstall bool - isAutocompleteUninstall bool -} - -// NewClI returns a new CLI instance with sensible defaults. -func NewCLI(app, version string) *CLI { - return &CLI{ - Name: app, - Version: version, - HelpFunc: BasicHelpFunc(app), - Autocomplete: true, - } - -} - -// IsHelp returns whether or not the help flag is present within the -// arguments. -func (c *CLI) IsHelp() bool { - c.once.Do(c.init) - return c.isHelp -} - -// IsVersion returns whether or not the version flag is present within the -// arguments. -func (c *CLI) IsVersion() bool { - c.once.Do(c.init) - return c.isVersion -} - -// Run runs the actual CLI based on the arguments given. -func (c *CLI) Run() (int, error) { - c.once.Do(c.init) - - // If this is a autocompletion request, satisfy it. This must be called - // first before anything else since its possible to be autocompleting - // -help or -version or other flags and we want to show completions - // and not actually write the help or version. - if c.Autocomplete && c.autocomplete.Complete() { - return 0, nil - } - - // Just show the version and exit if instructed. - if c.IsVersion() && c.Version != "" { - c.HelpWriter.Write([]byte(c.Version + "\n")) - return 0, nil - } - - // Just print the help when only '-h' or '--help' is passed. - if c.IsHelp() && c.Subcommand() == "" { - c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) - return 0, nil - } - - // If we're attempting to install or uninstall autocomplete then handle - if c.Autocomplete { - // Autocomplete requires the "Name" to be set so that we know what - // command to setup the autocomplete on. - if c.Name == "" { - return 1, fmt.Errorf( - "internal error: CLI.Name must be specified for autocomplete to work") - } - - // If both install and uninstall flags are specified, then error - if c.isAutocompleteInstall && c.isAutocompleteUninstall { - return 1, fmt.Errorf( - "Either the autocomplete install or uninstall flag may " + - "be specified, but not both.") - } - - // If the install flag is specified, perform the install or uninstall - if c.isAutocompleteInstall { - if err := c.autocompleteInstaller.Install(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - - if c.isAutocompleteUninstall { - if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - } - - // Attempt to get the factory function for creating the command - // implementation. If the command is invalid or blank, it is an error. - raw, ok := c.commandTree.Get(c.Subcommand()) - if !ok { - c.ErrorWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) - return 127, nil - } - - command, err := raw.(CommandFactory)() - if err != nil { - return 1, err - } - - // If we've been instructed to just print the help, then print it - if c.IsHelp() { - c.commandHelp(c.HelpWriter, command) - return 0, nil - } - - // If there is an invalid flag, then error - if len(c.topFlags) > 0 { - c.ErrorWriter.Write([]byte( - "Invalid flags before the subcommand. If these flags are for\n" + - "the subcommand, please put them after the subcommand.\n\n")) - c.commandHelp(c.ErrorWriter, command) - return 1, nil - } - - code := command.Run(c.SubcommandArgs()) - if code == RunResultHelp { - // Requesting help - c.commandHelp(c.ErrorWriter, command) - return 1, nil - } - - return code, nil -} - -// Subcommand returns the subcommand that the CLI would execute. For -// example, a CLI from "--version version --help" would return a Subcommand -// of "version" -func (c *CLI) Subcommand() string { - c.once.Do(c.init) - return c.subcommand -} - -// SubcommandArgs returns the arguments that will be passed to the -// subcommand. -func (c *CLI) SubcommandArgs() []string { - c.once.Do(c.init) - return c.subcommandArgs -} - -// subcommandParent returns the parent of this subcommand, if there is one. -// If there isn't on, "" is returned. -func (c *CLI) subcommandParent() string { - // Get the subcommand, if it is "" alread just return - sub := c.Subcommand() - if sub == "" { - return sub - } - - // Clear any trailing spaces and find the last space - sub = strings.TrimRight(sub, " ") - idx := strings.LastIndex(sub, " ") - - if idx == -1 { - // No space means our parent is root - return "" - } - - return sub[:idx] -} - -func (c *CLI) init() { - if c.HelpFunc == nil { - c.HelpFunc = BasicHelpFunc("app") - - if c.Name != "" { - c.HelpFunc = BasicHelpFunc(c.Name) - } - } - - if c.HelpWriter == nil { - c.HelpWriter = os.Stderr - } - if c.ErrorWriter == nil { - c.ErrorWriter = c.HelpWriter - } - - // Build our hidden commands - if len(c.HiddenCommands) > 0 { - c.commandHidden = make(map[string]struct{}) - for _, h := range c.HiddenCommands { - c.commandHidden[h] = struct{}{} - } - } - - // Build our command tree - c.commandTree = radix.New() - c.commandNested = false - for k, v := range c.Commands { - k = strings.TrimSpace(k) - c.commandTree.Insert(k, v) - if strings.ContainsRune(k, ' ') { - c.commandNested = true - } - } - - // Go through the key and fill in any missing parent commands - if c.commandNested { - var walkFn radix.WalkFn - toInsert := make(map[string]struct{}) - walkFn = func(k string, raw interface{}) bool { - idx := strings.LastIndex(k, " ") - if idx == -1 { - // If there is no space, just ignore top level commands - return false - } - - // Trim up to that space so we can get the expected parent - k = k[:idx] - if _, ok := c.commandTree.Get(k); ok { - // Yay we have the parent! - return false - } - - // We're missing the parent, so let's insert this - toInsert[k] = struct{}{} - - // Call the walk function recursively so we check this one too - return walkFn(k, nil) - } - - // Walk! - c.commandTree.Walk(walkFn) - - // Insert any that we're missing - for k := range toInsert { - var f CommandFactory = func() (Command, error) { - return &MockCommand{ - HelpText: "This command is accessed by using one of the subcommands below.", - RunResult: RunResultHelp, - }, nil - } - - c.commandTree.Insert(k, f) - } - } - - // Setup autocomplete if we have it enabled. We have to do this after - // the command tree is setup so we can use the radix tree to easily find - // all subcommands. - if c.Autocomplete { - c.initAutocomplete() - } - - // Process the args - c.processArgs() -} - -func (c *CLI) initAutocomplete() { - if c.AutocompleteInstall == "" { - c.AutocompleteInstall = defaultAutocompleteInstall - } - - if c.AutocompleteUninstall == "" { - c.AutocompleteUninstall = defaultAutocompleteUninstall - } - - if c.autocompleteInstaller == nil { - c.autocompleteInstaller = &realAutocompleteInstaller{} - } - - // We first set c.autocomplete to a noop autocompleter that outputs - // to nul so that we can detect if we're autocompleting or not. If we're - // not, then we do nothing. This saves a LOT of compute cycles since - // initAutoCompleteSub has to walk every command. - c.autocomplete = complete.New(c.Name, complete.Command{}) - c.autocomplete.Out = ioutil.Discard - if !c.autocomplete.Complete() { - return - } - - // Build the root command - cmd := c.initAutocompleteSub("") - - // For the root, we add the global flags to the "Flags". This way - // they don't show up on every command. - if !c.AutocompleteNoDefaultFlags { - cmd.Flags = map[string]complete.Predictor{ - "-" + c.AutocompleteInstall: complete.PredictNothing, - "-" + c.AutocompleteUninstall: complete.PredictNothing, - "-help": complete.PredictNothing, - "-version": complete.PredictNothing, - } - } - cmd.GlobalFlags = c.AutocompleteGlobalFlags - - c.autocomplete = complete.New(c.Name, cmd) -} - -// initAutocompleteSub creates the complete.Command for a subcommand with -// the given prefix. This will continue recursively for all subcommands. -// The prefix "" (empty string) can be used for the root command. -func (c *CLI) initAutocompleteSub(prefix string) complete.Command { - var cmd complete.Command - walkFn := func(k string, raw interface{}) bool { - // Ignore the empty key which can be present for default commands. - if k == "" { - return false - } - - // Keep track of the full key so that we can nest further if necessary - fullKey := k - - if len(prefix) > 0 { - // If we have a prefix, trim the prefix + 1 (for the space) - // Example: turns "sub one" to "one" with prefix "sub" - k = k[len(prefix)+1:] - } - - if idx := strings.Index(k, " "); idx >= 0 { - // If there is a space, we trim up to the space. This turns - // "sub sub2 sub3" into "sub". The prefix trim above will - // trim our current depth properly. - k = k[:idx] - } - - if _, ok := cmd.Sub[k]; ok { - // If we already tracked this subcommand then ignore - return false - } - - // If the command is hidden, don't record it at all - if _, ok := c.commandHidden[fullKey]; ok { - return false - } - - if cmd.Sub == nil { - cmd.Sub = complete.Commands(make(map[string]complete.Command)) - } - subCmd := c.initAutocompleteSub(fullKey) - - // Instantiate the command so that we can check if the command is - // a CommandAutocomplete implementation. If there is an error - // creating the command, we just ignore it since that will be caught - // later. - impl, err := raw.(CommandFactory)() - if err != nil { - impl = nil - } - - // Check if it implements ComandAutocomplete. If so, setup the autocomplete - if c, ok := impl.(CommandAutocomplete); ok { - subCmd.Args = c.AutocompleteArgs() - subCmd.Flags = c.AutocompleteFlags() - } - - cmd.Sub[k] = subCmd - return false - } - - walkPrefix := prefix - if walkPrefix != "" { - walkPrefix += " " - } - - c.commandTree.WalkPrefix(walkPrefix, walkFn) - return cmd -} - -func (c *CLI) commandHelp(out io.Writer, command Command) { - // Get the template to use - tpl := strings.TrimSpace(defaultHelpTemplate) - if t, ok := command.(CommandHelpTemplate); ok { - tpl = t.HelpTemplate() - } - if !strings.HasSuffix(tpl, "\n") { - tpl += "\n" - } - - // Parse it - t, err := template.New("root").Funcs(sprig.TxtFuncMap()).Parse(tpl) - if err != nil { - t = template.Must(template.New("root").Parse(fmt.Sprintf( - "Internal error! Failed to parse command help template: %s\n", err))) - } - - // Template data - data := map[string]interface{}{ - "Name": c.Name, - "SubcommandName": c.Subcommand(), - "Help": command.Help(), - } - - // Build subcommand list if we have it - var subcommandsTpl []map[string]interface{} - if c.commandNested { - // Get the matching keys - subcommands := c.helpCommands(c.Subcommand()) - keys := make([]string, 0, len(subcommands)) - for k := range subcommands { - keys = append(keys, k) - } - - // Sort the keys - sort.Strings(keys) - - // Figure out the padding length - var longest int - for _, k := range keys { - if v := len(k); v > longest { - longest = v - } - } - - // Go through and create their structures - subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) - for _, k := range keys { - // Get the command - raw, ok := subcommands[k] - if !ok { - c.ErrorWriter.Write([]byte(fmt.Sprintf( - "Error getting subcommand %q", k))) - } - sub, err := raw() - if err != nil { - c.ErrorWriter.Write([]byte(fmt.Sprintf( - "Error instantiating %q: %s", k, err))) - } - - // Find the last space and make sure we only include that last part - name := k - if idx := strings.LastIndex(k, " "); idx > -1 { - name = name[idx+1:] - } - - subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ - "Name": name, - "NameAligned": name + strings.Repeat(" ", longest-len(k)), - "Help": sub.Help(), - "Synopsis": sub.Synopsis(), - }) - } - } - data["Subcommands"] = subcommandsTpl - - // Write - err = t.Execute(out, data) - if err == nil { - return - } - - // An error, just output... - c.ErrorWriter.Write([]byte(fmt.Sprintf( - "Internal error rendering help: %s", err))) -} - -// helpCommands returns the subcommands for the HelpFunc argument. -// This will only contain immediate subcommands. -func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { - // If our prefix isn't empty, make sure it ends in ' ' - if prefix != "" && prefix[len(prefix)-1] != ' ' { - prefix += " " - } - - // Get all the subkeys of this command - var keys []string - c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { - // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" - if !strings.Contains(k[len(prefix):], " ") { - keys = append(keys, k) - } - - return false - }) - - // For each of the keys return that in the map - result := make(map[string]CommandFactory, len(keys)) - for _, k := range keys { - raw, ok := c.commandTree.Get(k) - if !ok { - // We just got it via WalkPrefix above, so we just panic - panic("not found: " + k) - } - - // If this is a hidden command, don't show it - if _, ok := c.commandHidden[k]; ok { - continue - } - - result[k] = raw.(CommandFactory) - } - - return result -} - -func (c *CLI) processArgs() { - for i, arg := range c.Args { - if arg == "--" { - break - } - - // Check for help flags. - if arg == "-h" || arg == "-help" || arg == "--help" { - c.isHelp = true - continue - } - - // Check for autocomplete flags - if c.Autocomplete { - if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { - c.isAutocompleteInstall = true - continue - } - - if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { - c.isAutocompleteUninstall = true - continue - } - } - - if c.subcommand == "" { - // Check for version flags if not in a subcommand. - if arg == "-v" || arg == "-version" || arg == "--version" { - c.isVersion = true - continue - } - - if arg != "" && arg[0] == '-' { - // Record the arg... - c.topFlags = append(c.topFlags, arg) - } - } - - // If we didn't find a subcommand yet and this is the first non-flag - // argument, then this is our subcommand. - if c.subcommand == "" && arg != "" && arg[0] != '-' { - c.subcommand = arg - if c.commandNested { - // If the command has a space in it, then it is invalid. - // Set a blank command so that it fails. - if strings.ContainsRune(arg, ' ') { - c.subcommand = "" - return - } - - // Determine the argument we look to to end subcommands. - // We look at all arguments until one has a space. This - // disallows commands like: ./cli foo "bar baz". An argument - // with a space is always an argument. - j := 0 - for k, v := range c.Args[i:] { - if strings.ContainsRune(v, ' ') { - break - } - - j = i + k + 1 - } - - // Nested CLI, the subcommand is actually the entire - // arg list up to a flag that is still a valid subcommand. - searchKey := strings.Join(c.Args[i:j], " ") - k, _, ok := c.commandTree.LongestPrefix(searchKey) - if ok { - // k could be a prefix that doesn't contain the full - // command such as "foo" instead of "foobar", so we - // need to verify that we have an entire key. To do that, - // we look for an ending in a space or an end of string. - reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) - if reVerify.MatchString(searchKey) { - c.subcommand = k - i += strings.Count(k, " ") - } - } - } - - // The remaining args the subcommand arguments - c.subcommandArgs = c.Args[i+1:] - } - } - - // If we never found a subcommand and support a default command, then - // switch to using that. - if c.subcommand == "" { - if _, ok := c.Commands[""]; ok { - args := c.topFlags - args = append(args, c.subcommandArgs...) - c.topFlags = nil - c.subcommandArgs = args - } - } -} - -// defaultAutocompleteInstall and defaultAutocompleteUninstall are the -// default values for the autocomplete install and uninstall flags. -const defaultAutocompleteInstall = "autocomplete-install" -const defaultAutocompleteUninstall = "autocomplete-uninstall" - -const defaultHelpTemplate = ` -{{.Help}}{{if gt (len .Subcommands) 0}} - -Subcommands: -{{- range $value := .Subcommands }} - {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} -{{- end }} -` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go deleted file mode 100644 index bed11faf57..0000000000 --- a/vendor/github.com/mitchellh/cli/command.go +++ /dev/null @@ -1,67 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -const ( - // RunResultHelp is a value that can be returned from Run to signal - // to the CLI to render the help output. - RunResultHelp = -18511 -) - -// A command is a runnable sub-command of a CLI. -type Command interface { - // Help should return long-form help text that includes the command-line - // usage, a brief few sentences explaining the function of the command, - // and the complete list of flags the command accepts. - Help() string - - // Run should run the actual command with the given CLI instance and - // command-line arguments. It should return the exit status when it is - // finished. - // - // There are a handful of special exit codes this can return documented - // above that change behavior. - Run(args []string) int - - // Synopsis should return a one-line, short synopsis of the command. - // This should be less than 50 characters ideally. - Synopsis() string -} - -// CommandAutocomplete is an extension of Command that enables fine-grained -// autocompletion. Subcommand autocompletion will work even if this interface -// is not implemented. By implementing this interface, more advanced -// autocompletion is enabled. -type CommandAutocomplete interface { - // AutocompleteArgs returns the argument predictor for this command. - // If argument completion is not supported, this should return - // complete.PredictNothing. - AutocompleteArgs() complete.Predictor - - // AutocompleteFlags returns a mapping of supported flags and autocomplete - // options for this command. The map key for the Flags map should be the - // complete flag such as "-foo" or "--foo". - AutocompleteFlags() complete.Flags -} - -// CommandHelpTemplate is an extension of Command that also has a function -// for returning a template for the help rather than the help itself. In -// this scenario, both Help and HelpTemplate should be implemented. -// -// If CommandHelpTemplate isn't implemented, the Help is output as-is. -type CommandHelpTemplate interface { - // HelpTemplate is the template in text/template format to use for - // displaying the Help. The keys available are: - // - // * ".Help" - The help text itself - // * ".Subcommands" - // - HelpTemplate() string -} - -// CommandFactory is a type of function that is a factory for commands. -// We need a factory because we may need to setup some state on the -// struct that implements the command itself. -type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go deleted file mode 100644 index 7a584b7e9b..0000000000 --- a/vendor/github.com/mitchellh/cli/command_mock.go +++ /dev/null @@ -1,63 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -// MockCommand is an implementation of Command that can be used for tests. -// It is publicly exported from this package in case you want to use it -// externally. -type MockCommand struct { - // Settable - HelpText string - RunResult int - SynopsisText string - - // Set by the command - RunCalled bool - RunArgs []string -} - -func (c *MockCommand) Help() string { - return c.HelpText -} - -func (c *MockCommand) Run(args []string) int { - c.RunCalled = true - c.RunArgs = args - - return c.RunResult -} - -func (c *MockCommand) Synopsis() string { - return c.SynopsisText -} - -// MockCommandAutocomplete is an implementation of CommandAutocomplete. -type MockCommandAutocomplete struct { - MockCommand - - // Settable - AutocompleteArgsValue complete.Predictor - AutocompleteFlagsValue complete.Flags -} - -func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { - return c.AutocompleteArgsValue -} - -func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { - return c.AutocompleteFlagsValue -} - -// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. -type MockCommandHelpTemplate struct { - MockCommand - - // Settable - HelpTemplateText string -} - -func (c *MockCommandHelpTemplate) HelpTemplate() string { - return c.HelpTemplateText -} diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go deleted file mode 100644 index f5ca58f595..0000000000 --- a/vendor/github.com/mitchellh/cli/help.go +++ /dev/null @@ -1,79 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "log" - "sort" - "strings" -) - -// HelpFunc is the type of the function that is responsible for generating -// the help output when the CLI must show the general help text. -type HelpFunc func(map[string]CommandFactory) string - -// BasicHelpFunc generates some basic help output that is usually good enough -// for most CLI applications. -func BasicHelpFunc(app string) HelpFunc { - return func(commands map[string]CommandFactory) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf( - "Usage: %s [--version] [--help] []\n\n", - app)) - buf.WriteString("Available commands are:\n") - - // Get the list of keys so we can sort them, and also get the maximum - // key length so they can be aligned properly. - keys := make([]string, 0, len(commands)) - maxKeyLen := 0 - for key := range commands { - if len(key) > maxKeyLen { - maxKeyLen = len(key) - } - - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - commandFunc, ok := commands[key] - if !ok { - // This should never happen since we JUST built the list of - // keys. - panic("command not found: " + key) - } - - command, err := commandFunc() - if err != nil { - log.Printf("[ERR] cli: Command '%s' failed to load: %s", - key, err) - continue - } - - key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) - buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) - } - - return buf.String() - } -} - -// FilteredHelpFunc will filter the commands to only include the keys -// in the include parameter. -func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { - return func(commands map[string]CommandFactory) string { - set := make(map[string]struct{}) - for _, k := range include { - set[k] = struct{}{} - } - - filtered := make(map[string]CommandFactory) - for k, f := range commands { - if _, ok := set[k]; ok { - filtered[k] = f - } - } - - return f(filtered) - } -} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go deleted file mode 100644 index a2d6f94f45..0000000000 --- a/vendor/github.com/mitchellh/cli/ui.go +++ /dev/null @@ -1,187 +0,0 @@ -package cli - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/signal" - "strings" - - "github.com/bgentry/speakeasy" - "github.com/mattn/go-isatty" -) - -// Ui is an interface for interacting with the terminal, or "interface" -// of a CLI. This abstraction doesn't have to be used, but helps provide -// a simple, layerable way to manage user interactions. -type Ui interface { - // Ask asks the user for input using the given query. The response is - // returned as the given string, or an error. - Ask(string) (string, error) - - // AskSecret asks the user for input using the given query, but does not echo - // the keystrokes to the terminal. - AskSecret(string) (string, error) - - // Output is called for normal standard output. - Output(string) - - // Info is called for information related to the previous output. - // In general this may be the exact same as Output, but this gives - // Ui implementors some flexibility with output formats. - Info(string) - - // Error is used for any error messages that might appear on standard - // error. - Error(string) - - // Warn is used for any warning messages that might appear on standard - // error. - Warn(string) -} - -// BasicUi is an implementation of Ui that just outputs to the given -// writer. This UI is not threadsafe by default, but you can wrap it -// in a ConcurrentUi to make it safe. -type BasicUi struct { - Reader io.Reader - Writer io.Writer - ErrorWriter io.Writer -} - -func (u *BasicUi) Ask(query string) (string, error) { - return u.ask(query, false) -} - -func (u *BasicUi) AskSecret(query string) (string, error) { - return u.ask(query, true) -} - -func (u *BasicUi) ask(query string, secret bool) (string, error) { - if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { - return "", err - } - - // Register for interrupts so that we can catch it and immediately - // return... - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer signal.Stop(sigCh) - - // Ask for input in a go-routine so that we can ignore it. - errCh := make(chan error, 1) - lineCh := make(chan string, 1) - go func() { - var line string - var err error - if secret && isatty.IsTerminal(os.Stdin.Fd()) { - line, err = speakeasy.Ask("") - } else { - r := bufio.NewReader(u.Reader) - line, err = r.ReadString('\n') - } - if err != nil { - errCh <- err - return - } - - lineCh <- strings.TrimRight(line, "\r\n") - }() - - select { - case err := <-errCh: - return "", err - case line := <-lineCh: - return line, nil - case <-sigCh: - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(u.Writer) - - return "", errors.New("interrupted") - } -} - -func (u *BasicUi) Error(message string) { - w := u.Writer - if u.ErrorWriter != nil { - w = u.ErrorWriter - } - - fmt.Fprint(w, message) - fmt.Fprint(w, "\n") -} - -func (u *BasicUi) Info(message string) { - u.Output(message) -} - -func (u *BasicUi) Output(message string) { - fmt.Fprint(u.Writer, message) - fmt.Fprint(u.Writer, "\n") -} - -func (u *BasicUi) Warn(message string) { - u.Error(message) -} - -// PrefixedUi is an implementation of Ui that prefixes messages. -type PrefixedUi struct { - AskPrefix string - AskSecretPrefix string - OutputPrefix string - InfoPrefix string - ErrorPrefix string - WarnPrefix string - Ui Ui -} - -func (u *PrefixedUi) Ask(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskPrefix, query) - } - - return u.Ui.Ask(query) -} - -func (u *PrefixedUi) AskSecret(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) - } - - return u.Ui.AskSecret(query) -} - -func (u *PrefixedUi) Error(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) - } - - u.Ui.Error(message) -} - -func (u *PrefixedUi) Info(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.InfoPrefix, message) - } - - u.Ui.Info(message) -} - -func (u *PrefixedUi) Output(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.OutputPrefix, message) - } - - u.Ui.Output(message) -} - -func (u *PrefixedUi) Warn(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.WarnPrefix, message) - } - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go deleted file mode 100644 index b0ec44840e..0000000000 --- a/vendor/github.com/mitchellh/cli/ui_colored.go +++ /dev/null @@ -1,73 +0,0 @@ -package cli - -import ( - "github.com/fatih/color" -) - -const ( - noColor = -1 -) - -// UiColor is a posix shell color code to use. -type UiColor struct { - Code int - Bold bool -} - -// A list of colors that are useful. These are all non-bolded by default. -var ( - UiColorNone UiColor = UiColor{noColor, false} - UiColorRed = UiColor{int(color.FgHiRed), false} - UiColorGreen = UiColor{int(color.FgHiGreen), false} - UiColorYellow = UiColor{int(color.FgHiYellow), false} - UiColorBlue = UiColor{int(color.FgHiBlue), false} - UiColorMagenta = UiColor{int(color.FgHiMagenta), false} - UiColorCyan = UiColor{int(color.FgHiCyan), false} -) - -// ColoredUi is a Ui implementation that colors its output according -// to the given color schemes for the given type of output. -type ColoredUi struct { - OutputColor UiColor - InfoColor UiColor - ErrorColor UiColor - WarnColor UiColor - Ui Ui -} - -func (u *ColoredUi) Ask(query string) (string, error) { - return u.Ui.Ask(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) AskSecret(query string) (string, error) { - return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) Output(message string) { - u.Ui.Output(u.colorize(message, u.OutputColor)) -} - -func (u *ColoredUi) Info(message string) { - u.Ui.Info(u.colorize(message, u.InfoColor)) -} - -func (u *ColoredUi) Error(message string) { - u.Ui.Error(u.colorize(message, u.ErrorColor)) -} - -func (u *ColoredUi) Warn(message string) { - u.Ui.Warn(u.colorize(message, u.WarnColor)) -} - -func (u *ColoredUi) colorize(message string, uc UiColor) string { - if uc.Code == noColor { - return message - } - - attr := []color.Attribute{color.Attribute(uc.Code)} - if uc.Bold { - attr = append(attr, color.Bold) - } - - return color.New(attr...).SprintFunc()(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go deleted file mode 100644 index b4f4dbfaa8..0000000000 --- a/vendor/github.com/mitchellh/cli/ui_concurrent.go +++ /dev/null @@ -1,54 +0,0 @@ -package cli - -import ( - "sync" -) - -// ConcurrentUi is a wrapper around a Ui interface (and implements that -// interface) making the underlying Ui concurrency safe. -type ConcurrentUi struct { - Ui Ui - l sync.Mutex -} - -func (u *ConcurrentUi) Ask(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.Ask(query) -} - -func (u *ConcurrentUi) AskSecret(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.AskSecret(query) -} - -func (u *ConcurrentUi) Error(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Error(message) -} - -func (u *ConcurrentUi) Info(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Info(message) -} - -func (u *ConcurrentUi) Output(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Output(message) -} - -func (u *ConcurrentUi) Warn(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go deleted file mode 100644 index 935f28a4a6..0000000000 --- a/vendor/github.com/mitchellh/cli/ui_mock.go +++ /dev/null @@ -1,116 +0,0 @@ -package cli - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - "sync" -) - -// NewMockUi returns a fully initialized MockUi instance -// which is safe for concurrent use. -func NewMockUi() *MockUi { - m := new(MockUi) - m.once.Do(m.init) - return m -} - -// MockUi is a mock UI that is used for tests and is exported publicly -// for use in external tests if needed as well. Do not instantite this -// directly since the buffers will be initialized on the first write. If -// there is no write then you will get a nil panic. Please use the -// NewMockUi() constructor function instead. You can fix your code with -// -// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go -type MockUi struct { - InputReader io.Reader - ErrorWriter *syncBuffer - OutputWriter *syncBuffer - - once sync.Once -} - -func (u *MockUi) Ask(query string) (string, error) { - u.once.Do(u.init) - - var result string - fmt.Fprint(u.OutputWriter, query) - r := bufio.NewReader(u.InputReader) - line, err := r.ReadString('\n') - if err != nil { - return "", err - } - result = strings.TrimRight(line, "\r\n") - - return result, nil -} - -func (u *MockUi) AskSecret(query string) (string, error) { - return u.Ask(query) -} - -func (u *MockUi) Error(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) Info(message string) { - u.Output(message) -} - -func (u *MockUi) Output(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.OutputWriter, message) - fmt.Fprint(u.OutputWriter, "\n") -} - -func (u *MockUi) Warn(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) init() { - u.ErrorWriter = new(syncBuffer) - u.OutputWriter = new(syncBuffer) -} - -type syncBuffer struct { - sync.RWMutex - b bytes.Buffer -} - -func (b *syncBuffer) Write(data []byte) (int, error) { - b.Lock() - defer b.Unlock() - return b.b.Write(data) -} - -func (b *syncBuffer) Read(data []byte) (int, error) { - b.RLock() - defer b.RUnlock() - return b.b.Read(data) -} - -func (b *syncBuffer) Reset() { - b.Lock() - b.b.Reset() - b.Unlock() -} - -func (b *syncBuffer) String() string { - return string(b.Bytes()) -} - -func (b *syncBuffer) Bytes() []byte { - b.RLock() - data := b.b.Bytes() - b.RUnlock() - return data -} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go deleted file mode 100644 index 1e1db3cf63..0000000000 --- a/vendor/github.com/mitchellh/cli/ui_writer.go +++ /dev/null @@ -1,18 +0,0 @@ -package cli - -// UiWriter is an io.Writer implementation that can be used with -// loggers that writes every line of log output data to a Ui at the -// Info level. -type UiWriter struct { - Ui Ui -} - -func (w *UiWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > 0 && p[n-1] == '\n' { - p = p[:n-1] - } - - w.Ui.Info(string(p)) - return n, nil -} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml deleted file mode 100644 index 74e286ae12..0000000000 --- a/vendor/github.com/mitchellh/colorstring/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/colorstring/LICENSE b/vendor/github.com/mitchellh/colorstring/LICENSE deleted file mode 100644 index 2298515904..0000000000 --- a/vendor/github.com/mitchellh/colorstring/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/colorstring/README.md b/vendor/github.com/mitchellh/colorstring/README.md deleted file mode 100644 index 0654d454de..0000000000 --- a/vendor/github.com/mitchellh/colorstring/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# colorstring [![Build Status](https://travis-ci.org/mitchellh/colorstring.svg)](https://travis-ci.org/mitchellh/colorstring) - -colorstring is a [Go](http://www.golang.org) library for outputting colored -strings to a console using a simple inline syntax in your string to specify -the color to print as. - -For example, the string `[blue]hello [red]world` would output the text -"hello world" in two colors. The API of colorstring allows for easily disabling -colors, adding aliases, etc. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/colorstring -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/colorstring). - -Usage is easy enough: - -```go -colorstring.Println("[blue]Hello [red]World!") -``` - -Additionally, the `Colorize` struct can be used to set options such as -custom colors, color disabling, etc. diff --git a/vendor/github.com/mitchellh/colorstring/colorstring.go b/vendor/github.com/mitchellh/colorstring/colorstring.go deleted file mode 100644 index 3de5b241d9..0000000000 --- a/vendor/github.com/mitchellh/colorstring/colorstring.go +++ /dev/null @@ -1,244 +0,0 @@ -// colorstring provides functions for colorizing strings for terminal -// output. -package colorstring - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// Color colorizes your strings using the default settings. -// -// Strings given to Color should use the syntax `[color]` to specify the -// color for text following. For example: `[blue]Hello` will return "Hello" -// in blue. See DefaultColors for all the supported colors and attributes. -// -// If an unrecognized color is given, it is ignored and assumed to be part -// of the string. For example: `[hi]world` will result in "[hi]world". -// -// A color reset is appended to the end of every string. This will reset -// the color of following strings when you output this text to the same -// terminal session. -// -// If you want to customize any of this behavior, use the Colorize struct. -func Color(v string) string { - return def.Color(v) -} - -// ColorPrefix returns the color sequence that prefixes the given text. -// -// This is useful when wrapping text if you want to inherit the color -// of the wrapped text. For example, "[green]foo" will return "[green]". -// If there is no color sequence, then this will return "". -func ColorPrefix(v string) string { - return def.ColorPrefix(v) -} - -// Colorize colorizes your strings, giving you the ability to customize -// some of the colorization process. -// -// The options in Colorize can be set to customize colorization. If you're -// only interested in the defaults, just use the top Color function directly, -// which creates a default Colorize. -type Colorize struct { - // Colors maps a color string to the code for that color. The code - // is a string so that you can use more complex colors to set foreground, - // background, attributes, etc. For example, "boldblue" might be - // "1;34" - Colors map[string]string - - // If true, color attributes will be ignored. This is useful if you're - // outputting to a location that doesn't support colors and you just - // want the strings returned. - Disable bool - - // Reset, if true, will reset the color after each colorization by - // adding a reset code at the end. - Reset bool -} - -// Color colorizes a string according to the settings setup in the struct. -// -// For more details on the syntax, see the top-level Color function. -func (c *Colorize) Color(v string) string { - matches := parseRe.FindAllStringIndex(v, -1) - if len(matches) == 0 { - return v - } - - result := new(bytes.Buffer) - colored := false - m := []int{0, 0} - for _, nm := range matches { - // Write the text in between this match and the last - result.WriteString(v[m[1]:nm[0]]) - m = nm - - var replace string - if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { - colored = true - - if !c.Disable { - replace = fmt.Sprintf("\033[%sm", code) - } - } else { - replace = v[m[0]:m[1]] - } - - result.WriteString(replace) - } - result.WriteString(v[m[1]:]) - - if colored && c.Reset && !c.Disable { - // Write the clear byte at the end - result.WriteString("\033[0m") - } - - return result.String() -} - -// ColorPrefix returns the first color sequence that exists in this string. -// -// For example: "[green]foo" would return "[green]". If no color sequence -// exists, then "" is returned. This is especially useful when wrapping -// colored texts to inherit the color of the wrapped text. -func (c *Colorize) ColorPrefix(v string) string { - return prefixRe.FindString(strings.TrimSpace(v)) -} - -// DefaultColors are the default colors used when colorizing. -// -// If the color is surrounded in underscores, such as "_blue_", then that -// color will be used for the background color. -var DefaultColors map[string]string - -func init() { - DefaultColors = map[string]string{ - // Default foreground/background colors - "default": "39", - "_default_": "49", - - // Foreground colors - "black": "30", - "red": "31", - "green": "32", - "yellow": "33", - "blue": "34", - "magenta": "35", - "cyan": "36", - "light_gray": "37", - "dark_gray": "90", - "light_red": "91", - "light_green": "92", - "light_yellow": "93", - "light_blue": "94", - "light_magenta": "95", - "light_cyan": "96", - "white": "97", - - // Background colors - "_black_": "40", - "_red_": "41", - "_green_": "42", - "_yellow_": "43", - "_blue_": "44", - "_magenta_": "45", - "_cyan_": "46", - "_light_gray_": "47", - "_dark_gray_": "100", - "_light_red_": "101", - "_light_green_": "102", - "_light_yellow_": "103", - "_light_blue_": "104", - "_light_magenta_": "105", - "_light_cyan_": "106", - "_white_": "107", - - // Attributes - "bold": "1", - "dim": "2", - "underline": "4", - "blink_slow": "5", - "blink_fast": "6", - "invert": "7", - "hidden": "8", - - // Reset to reset everything to their defaults - "reset": "0", - "reset_bold": "21", - } - - def = Colorize{ - Colors: DefaultColors, - Reset: true, - } -} - -var def Colorize -var parseReRaw = `\[[a-z0-9_-]+\]` -var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) -var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) - -// Print is a convenience wrapper for fmt.Print with support for color codes. -// -// Print formats using the default formats for its operands and writes to -// standard output with support for color codes. Spaces are added between -// operands when neither is a string. It returns the number of bytes written -// and any write error encountered. -func Print(a string) (n int, err error) { - return fmt.Print(Color(a)) -} - -// Println is a convenience wrapper for fmt.Println with support for color -// codes. -// -// Println formats using the default formats for its operands and writes to -// standard output with support for color codes. Spaces are always added -// between operands and a newline is appended. It returns the number of bytes -// written and any write error encountered. -func Println(a string) (n int, err error) { - return fmt.Println(Color(a)) -} - -// Printf is a convenience wrapper for fmt.Printf with support for color codes. -// -// Printf formats according to a format specifier and writes to standard output -// with support for color codes. It returns the number of bytes written and any -// write error encountered. -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(Color(format), a...) -} - -// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. -// -// Fprint formats using the default formats for its operands and writes to w -// with support for color codes. Spaces are added between operands when neither -// is a string. It returns the number of bytes written and any write error -// encountered. -func Fprint(w io.Writer, a string) (n int, err error) { - return fmt.Fprint(w, Color(a)) -} - -// Fprintln is a convenience wrapper for fmt.Fprintln with support for color -// codes. -// -// Fprintln formats using the default formats for its operands and writes to w -// with support for color codes. Spaces are always added between operands and a -// newline is appended. It returns the number of bytes written and any write -// error encountered. -func Fprintln(w io.Writer, a string) (n int, err error) { - return fmt.Fprintln(w, Color(a)) -} - -// Fprintf is a convenience wrapper for fmt.Fprintf with support for color -// codes. -// -// Fprintf formats according to a format specifier and writes to w with support -// for color codes. It returns the number of bytes written and any write error -// encountered. -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, Color(format), a...) -} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml deleted file mode 100644 index d7b9589ab1..0000000000 --- a/vendor/github.com/mitchellh/copystructure/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.7 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md index bcb8c8d2cb..f0fbd2e5c9 100644 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -1,21 +1,21 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go index 140435255e..8089e6670a 100644 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -8,7 +8,30 @@ import ( "github.com/mitchellh/reflectwalk" ) +const tagKey = "copy" + // Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// func Copy(v interface{}) (interface{}, error) { return Config{}.Copy(v) } @@ -28,6 +51,19 @@ type CopierFunc func(interface{}) (interface{}, error) // this map as well as to Copy in a mutex. var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + // Must is a helper that wraps a call to a function returning // (interface{}, error) and panics if the error is non-nil. It is intended // for use in variable initializations and should only be used when a copy @@ -50,6 +86,11 @@ type Config struct { // Copiers is a map of types associated with a CopierFunc. Use the global // Copiers map if this is nil. Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} } func (c Config) Copy(v interface{}) (interface{}, error) { @@ -65,6 +106,12 @@ func (c Config) Copy(v interface{}) (interface{}, error) { if c.Copiers == nil { c.Copiers = Copiers } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers err := reflectwalk.Walk(v, w) if err != nil { @@ -93,10 +140,12 @@ func ifaceKey(pointers, depth int) uint64 { type walker struct { Result interface{} - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value // This stores the number of pointers we've walked over, indexed by depth. ps []int @@ -263,6 +312,20 @@ func (w *walker) PointerExit(v bool) error { return nil } +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + func (w *walker) Interface(v reflect.Value) error { if !v.IsValid() { return nil @@ -356,7 +419,7 @@ func (w *walker) Struct(s reflect.Value) error { w.lock(s) var v reflect.Value - if c, ok := Copiers[s.Type()]; ok { + if c, ok := w.copiers[s.Type()]; ok { // We have a Copier for this struct, so we use that copier to // get the copy, and we ignore anything deeper than this. w.ignoreDepth = w.depth @@ -396,9 +459,29 @@ func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { return reflectwalk.SkipEntry } + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + // Push the field onto the stack, we'll handle it when we exit // the struct field in Exit... w.valPush(reflect.ValueOf(f)) + return nil } diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go index 3a93a0b114..7fee7b050b 100644 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -69,6 +69,13 @@ type PointerWalker interface { PointerExit(bool) error } +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + // SkipEntry can be returned from walk functions to skip walking // the value of this field. This is only valid in the following functions: // @@ -130,6 +137,17 @@ func walk(v reflect.Value, w interface{}) (err error) { } if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + pointer = true v = reflect.Indirect(pointerV) } diff --git a/vendor/github.com/moricho/tparallel/.goreleaser.yaml b/vendor/github.com/moricho/tparallel/.goreleaser.yaml new file mode 100644 index 0000000000..4a04fe25b5 --- /dev/null +++ b/vendor/github.com/moricho/tparallel/.goreleaser.yaml @@ -0,0 +1,52 @@ +project_name: tparallel +before: + hooks: + - go mod tidy +builds: + - main: ./cmd/tparallel + binary: tparallel + ldflags: + - -s -w + - -X main.Version={{.Version}} + - -X main.Revision={{.ShortCommit}} + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + +archives: + - format: tar.gz + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + format_overrides: + - goos: windows + format: zip +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' +release: + prerelease: auto +brews: + - tap: + owner: moricho + name: homebrew-tparallel + homepage: https://github.com/moricho/tparallel + description: tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + install: | + bin.install "tparallel" + test: | + system "#{bin}/goreleaser -v" diff --git a/vendor/github.com/moricho/tparallel/.goreleaser.yml b/vendor/github.com/moricho/tparallel/.goreleaser.yml deleted file mode 100644 index e9f6d727e7..0000000000 --- a/vendor/github.com/moricho/tparallel/.goreleaser.yml +++ /dev/null @@ -1,38 +0,0 @@ -project_name: tparallel -env: - - GO111MODULE=on -before: - hooks: - - go mod tidy -builds: - - main: ./cmd/tparallel - binary: tparallel - ldflags: - - -s -w - - -X main.Version={{.Version}} - - -X main.Revision={{.ShortCommit}} - env: - - CGO_ENABLED=0 -archives: - - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' - replacements: - darwin: darwin - linux: linux - windows: windows - 386: i386 - amd64: x86_64 - format_overrides: - - goos: windows - format: zip -release: - prerelease: auto -brews: - - tap: - owner: moricho - name: homebrew-tparallel - homepage: https://github.com/moricho/tparallel - description: tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - install: | - bin.install "tparallel" - test: | - system "#{bin}/goreleaser -v" diff --git a/vendor/github.com/moricho/tparallel/README.md b/vendor/github.com/moricho/tparallel/README.md index cd358d1554..65ed46c422 100644 --- a/vendor/github.com/moricho/tparallel/README.md +++ b/vendor/github.com/moricho/tparallel/README.md @@ -1,37 +1,49 @@ # tparallel + [![tparallel](https://github.com/moricho/tparallel/workflows/tparallel/badge.svg?branch=master)](https://github.com/moricho/tparallel/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/moricho/tparallel)](https://goreportcard.com/report/github.com/moricho/tparallel) [![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) `tparallel` finds inappropriate usage of `t.Parallel()` method in your Go test codes. -It detects the following: +It detects the following: + - `t.Parallel()` is called in either a top-level test function or a sub-test function only - Although `t.Parallel()` is called in the sub-test function, it is post-processed by `defer` instead of `t.Cleanup()` - -This tool was inspired by this blog: [Go言語でのテストの並列化 〜t.Parallel()メソッドを理解する〜](https://engineering.mercari.com/blog/entry/how_to_use_t_parallel/) + +This tool was inspired by this blog: [Go 言語でのテストの並列化 〜t.Parallel()メソッドを理解する〜](https://engineering.mercari.com/blog/entry/how_to_use_t_parallel/) ## Installation ### From GitHub Releases + Please see [GitHub Releases](https://github.com/moricho/tparallel/releases). Available binaries are: + - macOS - Linux - Windows ### macOS -``` sh + +```sh $ brew tap moricho/tparallel $ brew install tparallel ``` ### go get + ```sh $ go get -u github.com/moricho/tparallel/cmd/tparallel ``` ## Usage +### golangci-lint + +[golangci-lint](https://github.com/golangci/golangci-lint) now supports `tparallel`, so you can enable this linter and use in it. + +### shell + ```sh $ go vet -vettool=`which tparallel` ``` diff --git a/vendor/github.com/nbutton23/zxcvbn-go/.gitignore b/vendor/github.com/nbutton23/zxcvbn-go/.gitignore deleted file mode 100644 index 4bff1a28e4..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -zxcvbn -debug.test diff --git a/vendor/github.com/nbutton23/zxcvbn-go/Makefile b/vendor/github.com/nbutton23/zxcvbn-go/Makefile deleted file mode 100644 index 6aa13e0067..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -PKG_LIST = $$( go list ./... | grep -v /vendor/ | grep -v "zxcvbn-go/data" ) - -.DEFAULT_GOAL := help - -.PHONY: help -help: - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -.PHONY: test -test: ## Run `go test {Package list}` on the packages - go test $(PKG_LIST) - -.PHONY: lint -lint: ## Run `golint {Package list}` - golint $(PKG_LIST) \ No newline at end of file diff --git a/vendor/github.com/nbutton23/zxcvbn-go/match/match.go b/vendor/github.com/nbutton23/zxcvbn-go/match/match.go deleted file mode 100644 index dd30bea042..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/match/match.go +++ /dev/null @@ -1,44 +0,0 @@ -package match - -//Matches is an alies for []Match used for sorting -type Matches []Match - -func (s Matches) Len() int { - return len(s) -} -func (s Matches) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s Matches) Less(i, j int) bool { - if s[i].I < s[j].I { - return true - } else if s[i].I == s[j].I { - return s[i].J < s[j].J - } else { - return false - } -} - -// Match represents different matches -type Match struct { - Pattern string - I, J int - Token string - DictionaryName string - Entropy float64 -} - -//DateMatch is specifilly a match for type date -type DateMatch struct { - Pattern string - I, J int - Token string - Separator string - Day, Month, Year int64 -} - -//Matcher are a func and ID that can be used to match different passwords -type Matcher struct { - MatchingFunc func(password string) []Match - ID string -} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go deleted file mode 100644 index a93e459356..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go +++ /dev/null @@ -1,67 +0,0 @@ -package matching - -import ( - "strings" - - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" -) - -const repeatMatcherName = "REPEAT" - -//FilterRepeatMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher -func FilterRepeatMatcher(m match.Matcher) bool { - return m.ID == repeatMatcherName -} - -func repeatMatch(password string) []match.Match { - var matches []match.Match - - //Loop through password. if current == prev currentStreak++ else if currentStreak > 2 {buildMatch; currentStreak = 1} prev = current - var current, prev string - currentStreak := 1 - var i int - var char rune - for i, char = range password { - current = string(char) - if i == 0 { - prev = current - continue - } - - if strings.ToLower(current) == strings.ToLower(prev) { - currentStreak++ - - } else if currentStreak > 2 { - iPos := i - currentStreak - jPos := i - 1 - matchRepeat := match.Match{ - Pattern: "repeat", - I: iPos, - J: jPos, - Token: password[iPos : jPos+1], - DictionaryName: prev} - matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) - matches = append(matches, matchRepeat) - currentStreak = 1 - } else { - currentStreak = 1 - } - - prev = current - } - - if currentStreak > 2 { - iPos := i - currentStreak + 1 - jPos := i - matchRepeat := match.Match{ - Pattern: "repeat", - I: iPos, - J: jPos, - Token: password[iPos : jPos+1], - DictionaryName: prev} - matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) - matches = append(matches, matchRepeat) - } - return matches -} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go deleted file mode 100644 index fd858f5d17..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go +++ /dev/null @@ -1,88 +0,0 @@ -package matching - -import ( - "strings" - - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" -) - -const spatialMatcherName = "SPATIAL" - -//FilterSpatialMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher -func FilterSpatialMatcher(m match.Matcher) bool { - return m.ID == spatialMatcherName -} - -func spatialMatch(password string) (matches []match.Match) { - for _, graph := range adjacencyGraphs { - if graph.Graph != nil { - matches = append(matches, spatialMatchHelper(password, graph)...) - } - } - return matches -} - -func spatialMatchHelper(password string, graph adjacency.Graph) (matches []match.Match) { - - for i := 0; i < len(password)-1; { - j := i + 1 - lastDirection := -99 //an int that it should never be! - turns := 0 - shiftedCount := 0 - - for { - prevChar := password[j-1] - found := false - foundDirection := -1 - curDirection := -1 - //My graphs seem to be wrong. . . and where the hell is qwerty - adjacents := graph.Graph[string(prevChar)] - //Consider growing pattern by one character if j hasn't gone over the edge - if j < len(password) { - curChar := password[j] - for _, adj := range adjacents { - curDirection++ - - if strings.Index(adj, string(curChar)) != -1 { - found = true - foundDirection = curDirection - - if strings.Index(adj, string(curChar)) == 1 { - //index 1 in the adjacency means the key is shifted, 0 means unshifted: A vs a, % vs 5, etc. - //for example, 'q' is adjacent to the entry '2@'. @ is shifted w/ index 1, 2 is unshifted. - shiftedCount++ - } - - if lastDirection != foundDirection { - //adding a turn is correct even in the initial case when last_direction is null: - //every spatial pattern starts with a turn. - turns++ - lastDirection = foundDirection - } - break - } - } - } - - //if the current pattern continued, extend j and try to grow again - if found { - j++ - } else { - //otherwise push the pattern discovered so far, if any... - //don't consider length 1 or 2 chains. - if j-i > 2 { - matchSpc := match.Match{Pattern: "spatial", I: i, J: j - 1, Token: password[i:j], DictionaryName: graph.Name} - matchSpc.Entropy = entropy.SpatialEntropy(matchSpc, turns, shiftedCount) - matches = append(matches, matchSpc) - } - //. . . and then start a new search from the rest of the password - i = j - break - } - } - - } - return matches -} diff --git a/vendor/github.com/nishanths/exhaustive/.gitignore b/vendor/github.com/nishanths/exhaustive/.gitignore index 10acec6e16..94c13223bc 100644 --- a/vendor/github.com/nishanths/exhaustive/.gitignore +++ b/vendor/github.com/nishanths/exhaustive/.gitignore @@ -3,8 +3,10 @@ tags # binary -cmd/exhaustive/exhaustive -exhaustive +/cmd/exhaustive/exhaustive +/exhaustive # testing artifacts -coverage.out +/coverage.out + +/CHANGELOG.md diff --git a/vendor/github.com/nishanths/exhaustive/Makefile b/vendor/github.com/nishanths/exhaustive/Makefile index 981a7ebe99..868f7fce29 100644 --- a/vendor/github.com/nishanths/exhaustive/Makefile +++ b/vendor/github.com/nishanths/exhaustive/Makefile @@ -1,19 +1,33 @@ .PHONY: default default: build +.PHONY: all +all: build vet test + .PHONY: build build: go build ./... + go build ./cmd/exhaustive .PHONY: test test: - go test -cover ./... + go test -count=1 ./... + +.PHONY: testshort +testshort: + go test -short -count=1 ./... + +.PHONY: cover +cover: + go test -cover -coverprofile=coverage.out ./... + go tool cover -html=coverage.out .PHONY: install-vet install-vet: go install github.com/nishanths/exhaustive/cmd/exhaustive@latest go install github.com/gordonklaus/ineffassign@latest go install github.com/kisielk/errcheck@latest + go install honnef.co/go/tools/cmd/staticcheck@latest .PHONY: vet vet: @@ -21,6 +35,7 @@ vet: exhaustive ./... ineffassign ./... errcheck ./... + staticcheck -checks="inherit,-S1034" ./... .PHONY: upgrade-deps upgrade-deps: diff --git a/vendor/github.com/nishanths/exhaustive/README.md b/vendor/github.com/nishanths/exhaustive/README.md index a65d9de2f2..dbb41ab9d0 100644 --- a/vendor/github.com/nishanths/exhaustive/README.md +++ b/vendor/github.com/nishanths/exhaustive/README.md @@ -1,32 +1,45 @@ -## exhaustive [![Godoc][2]][1] +# exhaustive -Check exhaustiveness of enum switch statements and map literals in Go source code. +[![Godoc][godoc-svg]][godoc] + +`exhaustive` checks exhaustiveness of enum switch statements in Go source code. + +For the definition of enum and the definition of exhaustiveness used by this +program, see [godoc][godoc-doc]. For the changelog, see [CHANGELOG][changelog] +in the GitHub wiki. The program can be configured to additionally check +exhaustiveness of keys in map literals whose key type is an enum. + +## Usage + +Command: ``` go install github.com/nishanths/exhaustive/cmd/exhaustive@latest + +exhaustive [flags] [packages] ``` -For docs on the flags, the definition of enum, and the definition of -exhaustiveness, see [godocs.io][4]. +For available flags, refer to the [Flags][godoc-flags] section in godoc or run +`exhaustive -h`. -For the changelog, see [CHANGELOG][changelog] in the wiki. +Package: -The package provides an `Analyzer` that follows the guidelines in the -[`go/analysis`][3] package; this should make it possible to integrate -exhaustive with your own analysis driver program. +``` +go get github.com/nishanths/exhaustive -## Bugs +import "github.com/nishanths/exhaustive" +``` -`exhaustive` does not report missing cases if the switch statement -switches on a type parameterized type. See [this -issue](https://github.com/nishanths/exhaustive/issues/31) for details. +The `exhaustive.Analyzer` variable follows guidelines in the +[`golang.org/x/tools/go/analysis`][xanalysis] package. This should make it +possible to integrate `exhaustive` with your own analysis driver program. ## Example -Given the enum +Given an enum: -```go -package token +``` +package token // import "example.org/token" type Token int @@ -39,45 +52,53 @@ const ( ) ``` -and the code +and code that switches on the enum: -```go +``` package calc -import "token" +import "example.org/token" -func f(t token.Token) { +func x(t token.Token) { switch t { case token.Add: case token.Subtract: - case token.Multiply: + case token.Remainder: default: } } +``` -func g(t token.Token) string { - return map[token.Token]string{ - token.Add: "add", - token.Subtract: "subtract", - token.Multiply: "multiply", - }[t] -} +running `exhaustive` with default flags will produce: + +``` +calc.go:6:2: missing cases in switch of type token.Token: token.Multiply, token.Quotient ``` -running exhaustive will print +Specify flag `-check=switch,map` to additionally check exhaustiveness of keys +in map literals. For example: + +``` +var m = map[token.Token]rune{ + token.Add: '+', + token.Subtract: '-', + token.Multiply: '*', + token.Quotient: '/', +} +``` ``` -calc.go:6:2: missing cases in switch of type token.Token: Quotient, Remainder -calc.go:15:9: missing map keys of type token.Token: Quotient, Remainder +calc.go:14:9: missing keys in map of key type token.Token: token.Remainder ``` ## Contributing -Issues and pull requests are welcome. Before making a substantial -change, please discuss it in an issue. +Issues and changes are welcome. Please discuss substantial changes in an issue +first. -[1]: https://godocs.io/github.com/nishanths/exhaustive -[2]: https://godocs.io/github.com/nishanths/exhaustive?status.svg -[3]: https://pkg.go.dev/golang.org/x/tools/go/analysis -[4]: https://godocs.io/github.com/nishanths/exhaustive +[godoc]: https://pkg.go.dev/github.com/nishanths/exhaustive +[godoc-svg]: https://pkg.go.dev/badge/github.com/nishanths/exhaustive.svg +[godoc-doc]: https://pkg.go.dev/github.com/nishanths/exhaustive#section-documentation +[godoc-flags]: https://pkg.go.dev/github.com/nishanths/exhaustive#hdr-Flags +[xanalysis]: https://pkg.go.dev/golang.org/x/tools/go/analysis [changelog]: https://github.com/nishanths/exhaustive/wiki/CHANGELOG diff --git a/vendor/github.com/nishanths/exhaustive/comment.go b/vendor/github.com/nishanths/exhaustive/comment.go index 1232df1162..123e0181ba 100644 --- a/vendor/github.com/nishanths/exhaustive/comment.go +++ b/vendor/github.com/nishanths/exhaustive/comment.go @@ -3,80 +3,20 @@ package exhaustive import ( "go/ast" "go/token" - "regexp" "strings" ) -// Generated file definition -// http://golang.org/s/generatedcode -// -// To convey to humans and machine tools that code is generated, generated -// source should have a line that matches the following regular expression (in -// Go syntax): -// -// ^// Code generated .* DO NOT EDIT\.$ -// -// This line must appear before the first non-comment, non-blank -// text in the file. - -func isGeneratedFile(file *ast.File) bool { - // NOTE: file.Comments includes file.Doc as well, so no need - // to separately check file.Doc. - - for _, c := range file.Comments { - for _, cc := range c.List { - // This check is intended to handle "must appear before the - // first non-comment, non-blank text in the file". - // TODO: Is this check fully correct? Seems correct based - // on https://golang.org/ref/spec#Source_file_organization. - if c.Pos() >= file.Package { - return false - } - // According to the docs: - // '\r' has been removed. - // '\n' has been removed for //-style comments, which is what we care about. - // Also manually verified. - if isGeneratedFileComment(cc.Text) { - return true - } - } - } - - return false -} - -var generatedCodeRe = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) - -func isGeneratedFileComment(s string) bool { - return generatedCodeRe.MatchString(s) -} - -type generatedCache map[*ast.File]bool - -func (c generatedCache) IsGenerated(file *ast.File) bool { - if _, ok := c[file]; !ok { - c[file] = isGeneratedFile(file) - } - return c[file] -} - -// ignoreDirective is used to exclude checking of specific switch statements. -const ignoreDirective = "//exhaustive:ignore" -const enforceDirective = "//exhaustive:enforce" - -type commentsCache map[*ast.File]ast.CommentMap - -func (c commentsCache) GetComments(file *ast.File, set *token.FileSet) ast.CommentMap { - if _, ok := c[file]; !ok { - c[file] = ast.NewCommentMap(set, file, file.Comments) - } - return c[file] -} +const ( + ignoreComment = "//exhaustive:ignore" + enforceComment = "//exhaustive:enforce" + ignoreDefaultCaseRequiredComment = "//exhaustive:ignore-default-case-required" + enforceDefaultCaseRequiredComment = "//exhaustive:enforce-default-case-required" +) -func containsDirective(comments []*ast.CommentGroup, directive string) bool { +func hasCommentPrefix(comments []*ast.CommentGroup, comment string) bool { for _, c := range comments { for _, cc := range c.List { - if strings.HasPrefix(cc.Text, directive) { + if strings.HasPrefix(cc.Text, comment) { return true } } @@ -84,10 +24,6 @@ func containsDirective(comments []*ast.CommentGroup, directive string) bool { return false } -func containsEnforceDirective(comments []*ast.CommentGroup) bool { - return containsDirective(comments, enforceDirective) -} - -func containsIgnoreDirective(comments []*ast.CommentGroup) bool { - return containsDirective(comments, ignoreDirective) +func fileCommentMap(fset *token.FileSet, file *ast.File) ast.CommentMap { + return ast.NewCommentMap(fset, file, file.Comments) } diff --git a/vendor/github.com/nishanths/exhaustive/comment_go121.go b/vendor/github.com/nishanths/exhaustive/comment_go121.go new file mode 100644 index 0000000000..a7bbc8881c --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/comment_go121.go @@ -0,0 +1,11 @@ +//go:build go1.21 + +package exhaustive + +import ( + "go/ast" +) + +func isGeneratedFile(file *ast.File) bool { + return ast.IsGenerated(file) +} diff --git a/vendor/github.com/nishanths/exhaustive/comment_pre_go121.go b/vendor/github.com/nishanths/exhaustive/comment_pre_go121.go new file mode 100644 index 0000000000..28d2ed493e --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/comment_pre_go121.go @@ -0,0 +1,27 @@ +//go:build !go1.21 + +package exhaustive + +import ( + "go/ast" + "regexp" +) + +// For definition of generated file see: +// http://golang.org/s/generatedcode + +var generatedCodeRe = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) + +func isGeneratedFile(file *ast.File) bool { + for _, c := range file.Comments { + for _, cc := range c.List { + if cc.Pos() > file.Package { + break + } + if generatedCodeRe.MatchString(cc.Text) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/nishanths/exhaustive/common.go b/vendor/github.com/nishanths/exhaustive/common.go new file mode 100644 index 0000000000..f22b0e1479 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/common.go @@ -0,0 +1,474 @@ +package exhaustive + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "regexp" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/astutil" +) + +// enumTypeAndMembers combines an enumType and its members set. +type enumTypeAndMembers struct { + typ enumType + members enumMembers +} + +func fromNamed(pass *analysis.Pass, t *types.Named, typeparam bool) (result []enumTypeAndMembers, ok bool) { + if tpkg := t.Obj().Pkg(); tpkg == nil { + // go/types documentation says: nil for labels and + // objects in the Universe scope. This happens for the built-in + // error type for example. + return nil, false // not a valid enum type, so ok == false + } + + et := enumType{t.Obj()} + if em, ok := importFact(pass, et); ok { + return []enumTypeAndMembers{{et, em}}, true + } + + if typeparam { + // is it a named interface? + if intf, ok := t.Underlying().(*types.Interface); ok { + return fromInterface(pass, intf, typeparam) + } + } + + return nil, false // not a valid enum type, so ok == false +} + +func fromInterface(pass *analysis.Pass, intf *types.Interface, typeparam bool) (result []enumTypeAndMembers, ok bool) { + allOk := true + for i := 0; i < intf.NumEmbeddeds(); i++ { + r, ok := fromType(pass, intf.EmbeddedType(i), typeparam) + result = append(result, r...) + allOk = allOk && ok + } + return result, allOk +} + +func fromUnion(pass *analysis.Pass, union *types.Union, typeparam bool) (result []enumTypeAndMembers, ok bool) { + allOk := true + // gather from each term in the union. + for i := 0; i < union.Len(); i++ { + r, ok := fromType(pass, union.Term(i).Type(), typeparam) + result = append(result, r...) + allOk = allOk && ok + } + return result, allOk +} + +func fromTypeParam(pass *analysis.Pass, tp *types.TypeParam, typeparam bool) (result []enumTypeAndMembers, ok bool) { + // Does not appear to be explicitly documented, but based on Go language + // spec (see section Type constraints) and Go standard library source code, + // we can expect constraints to have underlying type *types.Interface + // Regardless it will be handled in fromType. + return fromType(pass, tp.Constraint().Underlying(), typeparam) +} + +func fromType(pass *analysis.Pass, t types.Type, typeparam bool) (result []enumTypeAndMembers, ok bool) { + switch t := t.(type) { + case *types.Named: + return fromNamed(pass, t, typeparam) + + case *types.Union: + return fromUnion(pass, t, typeparam) + + case *types.TypeParam: + return fromTypeParam(pass, t, typeparam) + + case *types.Interface: + if !typeparam { + return nil, true + } + // anonymous interface. + // e.g. func foo[T interface { M } | interface { N }](v T) {} + return fromInterface(pass, t, typeparam) + + default: + // ignore these. + return nil, true + } +} + +func composingEnumTypes(pass *analysis.Pass, t types.Type) (result []enumTypeAndMembers, ok bool) { + _, typeparam := t.(*types.TypeParam) + result, ok = fromType(pass, t, typeparam) + + if typeparam { + var kind types.BasicKind + var kindSet bool + + // sameBasicKind reports whether each type t that the function is called + // with has the same underlying basic kind. + sameBasicKind := func(t types.Type) (ok bool) { + basic, ok := t.Underlying().(*types.Basic) + if !ok { + return false + } + if kindSet && kind != basic.Kind() { + return false + } + kind = basic.Kind() + kindSet = true + return true + } + + for _, rr := range result { + if !sameBasicKind(rr.typ.TypeName.Type()) { + ok = false + break + } + } + } + + return result, ok +} + +func denotesPackage(ident *ast.Ident, info *types.Info) bool { + obj := info.ObjectOf(ident) + if obj == nil { + return false + } + _, ok := obj.(*types.PkgName) + return ok +} + +// exprConstVal returns the constantValue for an expression if the +// expression is a constant value and if the expression is considered +// valid to satisfy exhaustiveness as defined by this program. +// Otherwise it returns (_, false). +func exprConstVal(e ast.Expr, info *types.Info) (constantValue, bool) { + handleIdent := func(ident *ast.Ident) (constantValue, bool) { + obj := info.Uses[ident] + if obj == nil { + return "", false + } + if _, ok := obj.(*types.Const); !ok { + return "", false + } + // There are two scenarios. + // See related test cases in typealias/quux/quux.go. + // + // # Scenario 1 + // + // Tag package and constant package are the same. This is + // simple; we just use fs.ModeDir's value. + // Example: + // + // var mode fs.FileMode + // switch mode { + // case fs.ModeDir: + // } + // + // # Scenario 2 + // + // Tag package and constant package are different. In this + // scenario, too, we accept the case clause expr constant value, + // as is. If the Go type checker is okay with the name being + // listed in the case clause, we don't care much further. + // + // Example: + // + // var mode fs.FileMode + // switch mode { + // case os.ModeDir: + // } + // + // Or equivalently: + // + // // The type of mode is effectively fs.FileMode, + // // due to type alias. + // var mode os.FileMode + // switch mode { + // case os.ModeDir: + // } + return determineConstVal(ident, info), true + } + + e = stripTypeConversions(astutil.Unparen(e), info) + + switch e := e.(type) { + case *ast.Ident: + return handleIdent(e) + + case *ast.SelectorExpr: + x := astutil.Unparen(e.X) + // Ensure we only see the form pkg.Const, and not e.g. + // structVal.f or structVal.inner.f. + // + // For this purpose, first we check that X, which is everything + // except the rightmost field selector *ast.Ident (the Sel + // field), is also an *ast.Ident. + xIdent, ok := x.(*ast.Ident) + if !ok { + return "", false + } + // Second, check that it's a package. It doesn't matter which + // package, just that it denotes some package. + if !denotesPackage(xIdent, info) { + return "", false + } + return handleIdent(e.Sel) + + default: + // e.g. literal + // these aren't considered towards satisfying exhaustiveness. + return "", false + } +} + +// stripTypeConversions removing type conversions from the experession. +func stripTypeConversions(e ast.Expr, info *types.Info) ast.Expr { + c, ok := e.(*ast.CallExpr) + if !ok { + return e + } + typ := info.TypeOf(c.Fun) + if typ == nil { + // can happen for built-ins. + return e + } + // do not allow function calls. + if _, ok := typ.Underlying().(*types.Signature); ok { + return e + } + // type conversions have exactly one arg. + if len(c.Args) != 1 { + return e + } + return stripTypeConversions(astutil.Unparen(c.Args[0]), info) +} + +// member is a single member of an enum type. +type member struct { + pos token.Pos + typ enumType + name string + val constantValue +} + +type checklist struct { + info map[enumType]enumMembers + checkl map[member]struct{} + ignoreConstantRe *regexp.Regexp + ignoreTypeRe *regexp.Regexp +} + +func (c *checklist) ignoreConstant(pattern *regexp.Regexp) { + c.ignoreConstantRe = pattern +} + +func (c *checklist) ignoreType(pattern *regexp.Regexp) { + c.ignoreTypeRe = pattern +} + +func (*checklist) reMatch(re *regexp.Regexp, s string) bool { + if re == nil { + return false + } + return re.MatchString(s) +} + +func (c *checklist) add(et enumType, em enumMembers, includeUnexported bool) { + addOne := func(name string) { + if isBlankIdentifier(name) { + // Blank identifier is often used to skip entries in iota + // lists. Also, it can't be referenced anywhere (e.g. can't + // be referenced in switch statement cases) It doesn't make + // sense to include it as required member to satisfy + // exhaustiveness. + return + } + if !ast.IsExported(name) && !includeUnexported { + return + } + if c.reMatch(c.ignoreConstantRe, fmt.Sprintf("%s.%s", et.Pkg().Path(), name)) { + return + } + if c.reMatch(c.ignoreTypeRe, fmt.Sprintf("%s.%s", et.Pkg().Path(), et.TypeName.Name())) { + return + } + mem := member{ + em.NameToPos[name], + et, + name, + em.NameToValue[name], + } + if c.checkl == nil { + c.checkl = make(map[member]struct{}) + } + c.checkl[mem] = struct{}{} + } + + if c.info == nil { + c.info = make(map[enumType]enumMembers) + } + c.info[et] = em + + for _, name := range em.Names { + addOne(name) + } +} + +func (c *checklist) found(val constantValue) { + // delete all same-valued items. + for et, em := range c.info { + for _, name := range em.ValueToNames[val] { + delete(c.checkl, member{ + em.NameToPos[name], + et, + name, + em.NameToValue[name], + }) + } + } +} + +func (c *checklist) remaining() map[member]struct{} { + return c.checkl +} + +// group is a collection of same-valued members, possibly from +// different enum types. +type group []member + +func groupify(items map[member]struct{}, types []enumType) []group { + // indices maps each element in the input slice to its index. + indices := func(vs []enumType) map[enumType]int { + ret := make(map[enumType]int, len(vs)) + for i, v := range vs { + ret[v] = i + } + return ret + } + + typesOrder := indices(types) // for quick lookup + astBefore := func(x, y member) bool { + if typesOrder[x.typ] < typesOrder[y.typ] { + return true + } + if typesOrder[x.typ] > typesOrder[y.typ] { + return false + } + return x.pos < y.pos + } + + // byConstVal groups member names by constant value. + byConstVal := func(items map[member]struct{}) map[constantValue][]member { + ret := make(map[constantValue][]member) + for m := range items { + ret[m.val] = append(ret[m.val], m) + } + return ret + } + + var groups []group + for _, ms := range byConstVal(items) { + groups = append(groups, group(ms)) + } + + // sort members within each group in AST order. + for i := range groups { + g := groups[i] + sort.Slice(g, func(i, j int) bool { return astBefore(g[i], g[j]) }) + groups[i] = g + } + // sort groups themselves in AST order. + // the index [0] access is safe, because there will be at least one + // element per group. + sort.Slice(groups, func(i, j int) bool { return astBefore(groups[i][0], groups[j][0]) }) + + return groups +} + +func diagnosticEnumType(enumType *types.TypeName) string { + return enumType.Pkg().Name() + "." + enumType.Name() +} + +func diagnosticEnumTypes(types []enumType) string { + var buf strings.Builder + for i := range types { + buf.WriteString(diagnosticEnumType(types[i].TypeName)) + if i != len(types)-1 { + buf.WriteByte('|') + } + } + return buf.String() +} + +func diagnosticMember(m member) string { + return m.typ.Pkg().Name() + "." + m.name +} + +func diagnosticGroups(gs []group) string { + out := make([]string, len(gs)) + for i := range gs { + var buf strings.Builder + for j := range gs[i] { + buf.WriteString(diagnosticMember(gs[i][j])) + if j != len(gs[i])-1 { + buf.WriteByte('|') + } + } + out[i] = buf.String() + } + return strings.Join(out, ", ") +} + +func toEnumTypes(es []enumTypeAndMembers) []enumType { + out := make([]enumType, len(es)) + for i := range es { + out[i] = es[i].typ + } + return out +} + +func dedupEnumTypes(types []enumType) []enumType { + m := make(map[enumType]struct{}) + var ret []enumType + for _, t := range types { + _, ok := m[t] + if ok { + continue + } + m[t] = struct{}{} + ret = append(ret, t) + } + return ret +} + +type boolCache struct { + m map[*ast.File]bool + compute func(*ast.File) bool +} + +func (c *boolCache) get(file *ast.File) bool { + if _, ok := c.m[file]; !ok { + if c.m == nil { + c.m = make(map[*ast.File]bool) + } + c.m[file] = c.compute(file) + } + return c.m[file] +} + +type commentCache struct { + m map[*ast.File]ast.CommentMap + compute func(*token.FileSet, *ast.File) ast.CommentMap +} + +func (c *commentCache) get(fset *token.FileSet, file *ast.File) ast.CommentMap { + if _, ok := c.m[file]; !ok { + if c.m == nil { + c.m = make(map[*ast.File]ast.CommentMap) + } + c.m[file] = c.compute(fset, file) + } + return c.m[file] +} diff --git a/vendor/github.com/nishanths/exhaustive/doc.go b/vendor/github.com/nishanths/exhaustive/doc.go new file mode 100644 index 0000000000..a745247db3 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/doc.go @@ -0,0 +1,216 @@ +/* +Package exhaustive defines an analyzer that checks exhaustiveness of switch +statements of enum-like constants in Go source code. The analyzer can +optionally also check exhaustiveness of keys in map literals whose key type +is enum-like. + +# Definition of enum + +The Go [language spec] does not have an explicit definition for enums. For +the purpose of this analyzer, and by convention, an enum type is any named +type that: + + - has an [underlying type] of float, string, or integer (includes byte + and rune); and + - has at least one constant of its type defined in the same [block]. + +In the example below, Biome is an enum type. The three constants are its +enum members. + + package eco + + type Biome int + + const ( + Tundra Biome = 1 + Savanna Biome = 2 + Desert Biome = 3 + ) + +Enum member constants for an enum type must be declared in the same block as +the type. The constant values may be specified using iota, literal values, or +any valid means for declaring a Go constant. It is allowed for multiple enum +member constants for an enum type to have the same constant value. + +# Definition of exhaustiveness + +A switch statement that switches on a value of an enum type is exhaustive if +all enum members are listed in the switch statement's cases. If multiple enum +members have the same constant value, it is sufficient for any one of these +same-valued members to be listed. + +For an enum type defined in the same package as the switch statement, both +exported and unexported enum members must be listed to satisfy exhaustiveness. +For an enum type defined in an external package, it is sufficient that only +exported enum members are listed. Only constant identifiers (e.g. Tundra, +eco.Desert) listed in a switch statement's case clause can contribute towards +satisfying exhaustiveness; other expressions, such as literal values and +function calls, listed in case clauses do not contribute towards satisfying +exhaustiveness. + +By default, the existence of a default case in a switch statement does not +unconditionally make a switch statement exhaustive. Use the +-default-signifies-exhaustive flag to adjust this behavior. + +For a map literal whose key type is an enum type, a similar definition of +exhaustiveness applies. The map literal is considered exhaustive if all enum +members are be listed in its keys. Empty map literals are never checked for +exhaustiveness. + +# Type parameters + +A switch statement that switches on a value whose type is a type parameter is +checked for exhaustiveness if and only if each type element in the type +constraint is an enum type and the type elements share the same underlying +[BasicKind]. + +For example, the switch statement below will be checked because each type +element (i.e. M and N) in the type constraint is an enum type and the type +elements share the same underlying BasicKind, namely int8. To satisfy +exhaustiveness, the enum members collectively belonging to the enum types M +and N (i.e. A, B, and C) must be listed in the switch statement's cases. + + func bar[T M | I](v T) { + switch v { + case T(A): + case T(B): + case T(C): + } + } + + type I interface{ N } + + type M int8 + const A M = 1 + + type N int8 + const B N = 2 + const C N = 3 + +# Type aliases + +The analyzer handles type aliases as shown in the example below. newpkg.M is +an enum type. oldpkg.M is an alias for newpkg.M. Note that oldpkg.M isn't +itself an enum type; oldpkg.M is simply an alias for the actual enum type +newpkg.M. + + package oldpkg + type M = newpkg.M + const ( + A = newpkg.A + B = newpkg.B + ) + + package newpkg + type M int + const ( + A M = 1 + B M = 2 + ) + +A switch statement that switches either on a value of type newpkg.M or of type +oldpkg.M (which, being an alias, is just an alternative spelling for newpkg.M) +is exhaustive if all of newpkg.M's enum members are listed in the switch +statement's cases. The following switch statement is exhaustive. + + func f(v newpkg.M) { + switch v { + case newpkg.A: // or equivalently oldpkg.A + case newpkg.B: // or equivalently oldpkg.B + } + } + +The analyzer guarantees that introducing a type alias (such as type M = +newpkg.M) will not result in new diagnostics if the set of enum member +constant values of the RHS type is a subset of the set of enum member constant +values of the LHS type. + +# Flags + +Summary: + + flag type default value + ---- ---- ------------- + -check comma-separated strings switch + -explicit-exhaustive-switch bool false + -explicit-exhaustive-map bool false + -check-generated bool false + -default-signifies-exhaustive bool false + -ignore-enum-members regexp pattern (none) + -ignore-enum-types regexp pattern (none) + -package-scope-only bool false + +Descriptions: + + -check + Comma-separated list of program elements to check for + exhaustiveness. Supported program element values are + "switch" and "map". The default value is "switch", which + means that only switch statements are checked. + + -explicit-exhaustive-switch + Check a switch statement only if it is associated with a + "//exhaustive:enforce" comment. By default the analyzer + checks every switch statement that isn't associated with a + "//exhaustive:ignore" comment. + + -explicit-exhaustive-map + Similar to -explicit-exhaustive-switch but for map literals. + + -check-generated + Check generated files. For the definition of a generated + file, see https://golang.org/s/generatedcode. + + -default-signifies-exhaustive + Consider a switch statement to be exhaustive + unconditionally if it has a default case. (In other words, + all enum members do not have to be listed in its cases if a + default case is present.) Setting this flag usually is + counter to the purpose of exhaustiveness checks, so it is + not recommended to set this flag. + + -ignore-enum-members + Constants that match the specified regular expression (in + package regexp syntax) are not considered enum members and + hence do not have to be listed to satisfy exhaustiveness. + The specified regular expression is matched against the + constant name inclusive of import path. For example, if the + import path for the constant is "example.org/eco" and the + constant name is "Tundra", then the specified regular + expression is matched against the string + "example.org/eco.Tundra". + + -ignore-enum-types + Similar to -ignore-enum-members but for types. + + -package-scope-only + Only discover enums declared in file-level blocks. By + default, the analyzer discovers enums defined in all + blocks. + +# Skip analysis + +To skip analysis of a switch statement or a map literal, associate it with a +comment that begins with "//exhaustive:ignore". For example: + + //exhaustive:ignore ... an optional explanation goes here ... + switch v { + case A: + case B: + } + +To ignore specific constants in exhaustiveness checks, specify the +-ignore-enum-members flag: + + exhaustive -ignore-enum-members '^example\.org/eco\.Tundra$' + +To ignore specific types, specify the -ignore-enum-types flag: + + exhaustive -ignore-enum-types '^time\.Duration$|^example\.org/measure\.Unit$' + +[language spec]: https://golang.org/ref/spec +[underlying type]: https://golang.org/ref/spec#Underlying_types +[block]: https://golang.org/ref/spec#Blocks +[BasicKind]: https://pkg.go.dev/go/types#BasicKind +*/ +package exhaustive diff --git a/vendor/github.com/nishanths/exhaustive/enum.go b/vendor/github.com/nishanths/exhaustive/enum.go index 2b287e39a0..cabf1d880d 100644 --- a/vendor/github.com/nishanths/exhaustive/enum.go +++ b/vendor/github.com/nishanths/exhaustive/enum.go @@ -10,41 +10,48 @@ import ( "golang.org/x/tools/go/ast/inspector" ) -// constantValue is a constant.Value.ExactString(). +// constantValue is a (constant.Value).ExactString value. type constantValue string -// Represents an enum type (or a potential enum type). -// It is a defined (named) type's name. +// enumType represents an enum type as defined by this program, which +// effectively is a defined (named) type. type enumType struct{ *types.TypeName } func (et enumType) String() string { return et.TypeName.String() } // for debugging func (et enumType) scope() *types.Scope { return et.TypeName.Parent() } // scope that the type is declared in func (et enumType) factObject() types.Object { return et.TypeName } // types.Object for fact export -// enumMembers is the members for a single enum type. +// enumMembers is the set of enum members for a single enum type. // The zero value is ready to use. type enumMembers struct { - Names []string // enum member names, AST order + Names []string // enum member names + NameToPos map[string]token.Pos // enum member name -> AST position NameToValue map[string]constantValue // enum member name -> constant value ValueToNames map[constantValue][]string // constant value -> enum member names } -func (em *enumMembers) add(name string, val constantValue) { +// add adds an enum member to the set. +func (em *enumMembers) add(name string, val constantValue, pos token.Pos) { + if em.NameToPos == nil { + em.NameToPos = make(map[string]token.Pos) + } if em.NameToValue == nil { em.NameToValue = make(map[string]constantValue) } if em.ValueToNames == nil { em.ValueToNames = make(map[constantValue][]string) } - em.Names = append(em.Names, name) + em.NameToPos[name] = pos em.NameToValue[name] = val em.ValueToNames[val] = append(em.ValueToNames[val], name) } -func (em enumMembers) String() string { return em.factString() } // for debugging +func (em *enumMembers) String() string { + return em.factString() +} -func (em enumMembers) factString() string { +func (em *enumMembers) factString() string { var buf strings.Builder for j, vv := range em.Names { buf.WriteString(vv) @@ -74,7 +81,7 @@ func findEnums(pkgScopeOnly bool, pkg *types.Package, inspect *inspector.Inspect continue } v := result[enumTyp] - v.add(memberName, val) + v.add(memberName, val, name.Pos()) result[enumTyp] = v } } @@ -84,6 +91,25 @@ func findEnums(pkgScopeOnly bool, pkg *types.Package, inspect *inspector.Inspect } func possibleEnumMember(constName *ast.Ident, info *types.Info) (et enumType, name string, val constantValue, ok bool) { + // Notes + // + // type T int + // const A T = iota // obj.Type() is T + // + // type R T + // const B R = iota // obj.Type() is R + // + // type T2 int + // type T1 = T2 + // const C T1 = iota // obj.Type() is T2 + // + // type T3 = T4 + // type T4 int + // type T5 = T3 + // const D T5 = iota // obj.Type() is T4 + // + // In all these cases, validNamedBasic(obj.Type()) == true. + obj := info.Defs[constName] if obj == nil { panic(fmt.Sprintf("info.Defs[%s] == nil", constName)) @@ -91,44 +117,22 @@ func possibleEnumMember(constName *ast.Ident, info *types.Info) (et enumType, na if _, ok = obj.(*types.Const); !ok { panic(fmt.Sprintf("obj must be *types.Const, got %T", obj)) } - if isBlankIdentifier(obj) { + if isBlankIdentifier(obj.Name()) { // These objects have a nil parent scope. // Also, we have no real purpose to record them. return enumType{}, "", "", false } - - /* - NOTE: - - type T int - const A T = iota // obj.Type() is T - - type R T - const B R = iota // obj.Type() is R - - type T2 int - type T1 = T2 - const C T1 = iota // obj.Type() is T2 - - type T3 = T4 - type T4 int - type T5 = T3 - const D T5 = iota // obj.Type() is T4 - - // And, in all these cases, validNamedBasic(obj.Type()) == true. - */ - if !validNamedBasic(obj.Type()) { return enumType{}, "", "", false } - named := obj.Type().(*types.Named) // guaranteed by validNamedBasic() + named := obj.Type().(*types.Named) // guaranteed by validNamedBasic tn := named.Obj() - // Enum type's scope and enum member's scope must be the same. If they're - // not, don't consider the const a member. Additionally, the enum type and - // the enum member must be in the same package (the scope check accounts for - // this, too). + // By definition, enum type's scope and enum member's scope must be the + // same. If they're not, don't consider the const a member. Additionally, + // the enum type and the enum member must be in the same package (the + // scope check accounts for this, too). if tn.Parent() != obj.Parent() { return enumType{}, "", "", false } @@ -141,8 +145,8 @@ func determineConstVal(name *ast.Ident, info *types.Info) constantValue { return constantValue(c.Val().ExactString()) } -func isBlankIdentifier(obj types.Object) bool { - return obj.Name() == "_" // NOTE: go/types/decl.go does a direct comparison like this +func isBlankIdentifier(name string) bool { + return name == "_" // NOTE: go/types/decl.go does a direct comparison like this } func validBasic(basic *types.Basic) bool { @@ -154,10 +158,12 @@ func validBasic(basic *types.Basic) bool { } // validNamedBasic returns whether the type t is a named type whose underlying -// type is a valid basic type to form an enum. -// A type that passes this check meets the definition of an enum type. -// Note that -// validNamedBasic(t) == true => t.(*types.Named) +// type is a valid basic type to form an enum. A type that passes this check +// meets the definition of an enum type. +// +// The following is guaranteed: +// +// validNamedBasic(t) == true => t.(*types.Named) func validNamedBasic(t types.Type) bool { named, ok := t.(*types.Named) if !ok { diff --git a/vendor/github.com/nishanths/exhaustive/exhaustive.go b/vendor/github.com/nishanths/exhaustive/exhaustive.go index 8ec80e066c..013ac47bb7 100644 --- a/vendor/github.com/nishanths/exhaustive/exhaustive.go +++ b/vendor/github.com/nishanths/exhaustive/exhaustive.go @@ -1,251 +1,35 @@ -/* -Package exhaustive provides an analyzer that checks exhaustiveness of enum -switch statements and map literals in Go source code. - -# Definition of enum - -The Go language spec does not provide an explicit definition for an enum. For -the purpose of this analyzer, an enum type is any named type (a.k.a. defined -type) whose underlying type is an integer (includes byte and rune), a float, -or a string type. An enum type has associated with it constants of this named -type; these constants constitute the enum members. - -In the example below, Biome is an enum type with 3 members. - - type Biome int - - const ( - Tundra Biome = 1 - Savanna Biome = 2 - Desert Biome = 3 - ) - -For a constant to be an enum member for an enum type, the constant must be -declared in the same scope as the enum type. Note that the scope requirement -implies that only constants declared in the same package as the enum type's -package can constitute the enum members for the enum type. - -Enum member constants for a given enum type don't necessarily have to all be -declared in the same const block. Constant values may be specified using iota, -using explicit values, or by any means of declaring a valid Go const. It is -allowed for multiple enum member constants for a given enum type to have the -same constant value. - -# Definition of exhaustiveness - -A switch statement that switches on a value of an enum type is exhaustive if -all of the enum type's members are listed in the switch statement's cases. If -multiple enum member constants have the same constant value, it is sufficient -for any one of these same-valued members to be listed. - -For an enum type defined in the same package as the switch statement, both -exported and unexported enum members must be listed to satisfy exhaustiveness. -For an enum type defined in an external package, it is sufficient that only -exported enum members are listed. - -Only identifiers denoting constants (e.g. Tundra) and qualified identifiers -denoting constants (e.g. somepkg.Grassland) listed in a switch statement's -cases can contribute towards satisfying exhaustiveness. Literal values, struct -fields, re-assignable variables, etc. will not. - -The analyzer will produce a diagnostic about unhandled enum members if the -required memebers are not listed in a switch statement's cases (this applies -even if the switch statement has a 'default' case). - -# Map literals - -All of the above also applies to map literals in which the key type is an enum -type. Empty map literals are never checked. The -check flag must include -"map" for map literals to be checked. - -# Type aliases - -The analyzer handles type aliases for an enum type in the following manner. -Consider the example below. T2 is a enum type, and T1 is an alias for T2. Note -that we don't term T1 itself an enum type; it is only an alias for an enum -type. - - package pkg - type T1 = newpkg.T2 - const ( - A = newpkg.A - B = newpkg.B - ) - - package newpkg - type T2 int - const ( - A T2 = 1 - B T2 = 2 - ) - -Then a switch statement that switches on a value of type T1 (which, in -reality, is just an alternate spelling for type T2) is exhaustive if all of -T2's enum members are listed in the switch statement's cases. The same -conditions described in the previous section for same-valued enum members and -for exported/unexported enum members apply here too. - -It is worth noting that, though T1 and T2 are identical types, only constants -declared in the same scope as type T2's scope can be T2's enum members. In the -example, newpkg.A and newpkg.B are T2's enum members. - -The analyzer guarantees that introducing a type alias (such as type T1 = -newpkg.T2) will never result in new diagnostics from the analyzer, as long as -the set of enum member constant values of the new RHS type (newpkg.T2) is a -subset of the set of enum member constant values of the old LHS type (T1). - -# Advanced notes - -Non-enum member constants in a switch statement's cases: Recall from an -earlier section that a constant must be declared in the same scope as the enum -type to be an enum member. It is valid, however, both to the Go type checker -and to this analyzer, for any constant of the right type to be listed in the -cases of an enum switch statement (it does not necessarily have to be an enum -member constant declared in the same scope/package as the enum type's -scope/package). This is particularly useful when a type alias is involved: A -forwarding constant declaration (such as pkg.A, in type T1's package) can take -the place of the actual enum member constant (newpkg.A, in type T2's package) -in the switch statement's cases to satisfy exhaustiveness. - - var v pkg.T1 = pkg.ReturnsT1() // v is effectively of type newpkg.T2 due to alias - switch v { - case pkg.A: // valid substitute for newpkg.A (same constant value) - case pkg.B: // valid substitute for newpkg.B (same constant value) - } - -# Flags - -Notable flags supported by the analyzer are described below. -All of these flags are optional. - - flag type default value - - -check string switch - -explicit-exhaustive-switch bool false - -explicit-exhaustive-map bool false - -check-generated bool false - -default-signifies-exhaustive bool false - -ignore-enum-members string (none) - -package-scope-only bool false - -The -check flag specifies the program elements that should be checked for -exhaustiveness. By default, only switch statements are checked. Specify --check=switch,map to also check map literals. - -If the -explicit-exhaustive-switch flag is enabled, the analyzer only runs on -switch statements explicitly marked with the comment text -("exhaustive:enforce"). Otherwise, it runs on every enum switch statement not -marked with the comment text ("exhaustive:ignore"). - -If the -explicit-exhaustive-map flag is enabled, the analyzer only runs on -map literals explicitly marked with the comment text -("exhaustive:enforce"). Otherwise, it runs on every enum map literal not -marked with the comment text ("exhaustive:ignore"). - -If the -check-generated flag is enabled, switch statements in generated Go -source files are also checked. Otherwise, by default, switch statements in -generated files are not checked. See https://golang.org/s/generatedcode for the -definition of generated file. - -If the -default-signifies-exhaustive flag is enabled, the presence of a -'default' case in a switch statement always satisfies exhaustiveness, even if -all enum members are not listed. It is not recommended that you enable this -flag; enabling it generally defeats the purpose of exhaustiveness checking. - -The -ignore-enum-members flag specifies a regular expression in Go syntax. Enum -members matching the regular expression don't have to be listed in switch -statement cases to satisfy exhaustiveness. The specified regular expression is -matched against an enum member name inclusive of the enum package import path: -for example, if the enum package import path is "example.com/pkg" and the member -name is "Tundra", the specified regular expression will be matched against the -string "example.com/pkg.Tundra". - -If the -package-scope-only flag is enabled, the analyzer only finds enums -defined in package scopes, and consequently only switch statements that switch -on package-scoped enums will be checked for exhaustiveness. By default, the -analyzer finds enums defined in all scopes, and checks switch statements that -switch on all these enums. - -# Skip analysis - -In implicitly exhaustive switch mode (-explicit-exhaustive-switch=false), skip -checking of a specific switch statement by associating the comment shown in -the example below with the switch statement. Note the lack of whitespace -between the comment marker ("//") and the comment text ("exhaustive:ignore"). - - //exhaustive:ignore - switch v { ... } - -In explicitly exhaustive switch mode (-explicit-exhaustive-switch=true), run -exhaustiveness checks on a specific switch statement by associating the -comment shown in the example below with the switch statement. - - //exhaustive:enforce - switch v { ... } - -To ignore specific enum members, see the -ignore-enum-members flag. - -Switch statements in generated Go source files are not checked by default. -Use the -check-generated flag to change this behavior. -*/ package exhaustive import ( - "flag" + "fmt" "go/ast" - "regexp" - "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" ) -var _ flag.Value = (*regexpFlag)(nil) - -// regexpFlag implements the flag.Value interface for parsing -// regular expression flag values. -type regexpFlag struct{ r *regexp.Regexp } - -func (v *regexpFlag) String() string { - if v == nil || v.r == nil { - return "" - } - return v.r.String() -} - -func (v *regexpFlag) Set(expr string) error { - if expr == "" { - v.r = nil - return nil - } - - r, err := regexp.Compile(expr) - if err != nil { - return err - } - - v.r = r - return nil +func init() { + registerFlags() } -func (v *regexpFlag) value() *regexp.Regexp { return v.r } - -func init() { - Analyzer.Flags.StringVar(&fCheck, CheckFlag, checkSwitch, "program elements to check for exhaustiveness") - Analyzer.Flags.BoolVar(&fExplicitExhaustiveSwitch, ExplicitExhaustiveSwitchFlag, false, "only run exhaustive check on switches with \"//exhaustive:enforce\" comment") - Analyzer.Flags.BoolVar(&fExplicitExhaustiveMap, ExplicitExhaustiveMapFlag, false, "only run exhaustive check on map literals with \"//exhaustive:enforce\" comment") - Analyzer.Flags.BoolVar(&fCheckGenerated, CheckGeneratedFlag, false, "check switch statements in generated files") - Analyzer.Flags.BoolVar(&fDefaultSignifiesExhaustive, DefaultSignifiesExhaustiveFlag, false, "presence of \"default\" case in switch statements satisfies exhaustiveness, even if all enum members are not listed") - Analyzer.Flags.Var(&fIgnoreEnumMembers, IgnoreEnumMembersFlag, "enum members matching `regex` do not have to be listed in switch statements to satisfy exhaustiveness") - Analyzer.Flags.BoolVar(&fPackageScopeOnly, PackageScopeOnlyFlag, false, "consider enums only in package scopes, not in inner scopes") +func registerFlags() { + Analyzer.Flags.Var(&fCheck, CheckFlag, "comma-separated list of program `elements` to check for exhaustiveness; supported element values: switch, map") + Analyzer.Flags.BoolVar(&fExplicitExhaustiveSwitch, ExplicitExhaustiveSwitchFlag, false, `check switch statement only if associated with "//exhaustive:enforce" comment`) + Analyzer.Flags.BoolVar(&fExplicitExhaustiveMap, ExplicitExhaustiveMapFlag, false, `check map literal only if associated with "//exhaustive:enforce" comment`) + Analyzer.Flags.BoolVar(&fCheckGenerated, CheckGeneratedFlag, false, "check generated files") + Analyzer.Flags.BoolVar(&fDefaultSignifiesExhaustive, DefaultSignifiesExhaustiveFlag, false, "switch statement is unconditionally exhaustive if it has a default case") + Analyzer.Flags.BoolVar(&fDefaultCaseRequired, DefaultCaseRequiredFlag, false, "switch statement requires default case even if exhaustive") + Analyzer.Flags.Var(&fIgnoreEnumMembers, IgnoreEnumMembersFlag, "ignore constants matching `regexp`") + Analyzer.Flags.Var(&fIgnoreEnumTypes, IgnoreEnumTypesFlag, "ignore types matching `regexp`") + Analyzer.Flags.BoolVar(&fPackageScopeOnly, PackageScopeOnlyFlag, false, "only discover enums declared in file-level blocks") var unused string - Analyzer.Flags.StringVar(&unused, IgnorePatternFlag, "", "no effect (deprecated); see -"+IgnoreEnumMembersFlag+" instead") + Analyzer.Flags.StringVar(&unused, IgnorePatternFlag, "", "no effect (deprecated); use -"+IgnoreEnumMembersFlag) Analyzer.Flags.StringVar(&unused, CheckingStrategyFlag, "", "no effect (deprecated)") } -// Flag names used by the analyzer. They are exported for use by analyzer +// Flag names used by the analyzer. These are exported for use by analyzer // driver programs. const ( CheckFlag = "check" @@ -253,115 +37,116 @@ const ( ExplicitExhaustiveMapFlag = "explicit-exhaustive-map" CheckGeneratedFlag = "check-generated" DefaultSignifiesExhaustiveFlag = "default-signifies-exhaustive" + DefaultCaseRequiredFlag = "default-case-required" IgnoreEnumMembersFlag = "ignore-enum-members" + IgnoreEnumTypesFlag = "ignore-enum-types" PackageScopeOnlyFlag = "package-scope-only" - IgnorePatternFlag = "ignore-pattern" // Deprecated: see IgnoreEnumMembersFlag instead. - CheckingStrategyFlag = "checking-strategy" // Deprecated. + // Deprecated flag names. + IgnorePatternFlag = "ignore-pattern" // Deprecated: use IgnoreEnumMembersFlag. + CheckingStrategyFlag = "checking-strategy" // Deprecated: no longer applicable. ) +// Flag values. var ( - fCheck string + fCheck = stringsFlag{elements: defaultCheckElements, filter: validCheckElement} fExplicitExhaustiveSwitch bool fExplicitExhaustiveMap bool fCheckGenerated bool fDefaultSignifiesExhaustive bool + fDefaultCaseRequired bool fIgnoreEnumMembers regexpFlag + fIgnoreEnumTypes regexpFlag fPackageScopeOnly bool ) -const ( - checkSwitch = "switch" - checkMap = "map" -) - -// resetFlags resets the flag variables to their default values. +// resetFlags resets the flag variables to default values. // Useful in tests. func resetFlags() { - fCheck = checkSwitch + fCheck = stringsFlag{elements: defaultCheckElements, filter: validCheckElement} fExplicitExhaustiveSwitch = false fExplicitExhaustiveMap = false fCheckGenerated = false fDefaultSignifiesExhaustive = false + fDefaultCaseRequired = false fIgnoreEnumMembers = regexpFlag{} + fIgnoreEnumTypes = regexpFlag{} fPackageScopeOnly = false } +// checkElement is a program element supported by the -check flag. +type checkElement string + +const ( + elementSwitch checkElement = "switch" + elementMap checkElement = "map" +) + +func validCheckElement(s string) error { + switch checkElement(s) { + case elementSwitch: + return nil + case elementMap: + return nil + default: + return fmt.Errorf("invalid program element %q", s) + } +} + +var defaultCheckElements = []string{ + string(elementSwitch), +} + var Analyzer = &analysis.Analyzer{ Name: "exhaustive", - Doc: "check exhaustiveness of enum switch statements and map literals", + Doc: "check exhaustiveness of enum switch statements", Run: run, Requires: []*analysis.Analyzer{inspect.Analyzer}, FactTypes: []analysis.Fact{&enumMembersFact{}}, } func run(pass *analysis.Pass) (interface{}, error) { - checks := make(map[string]bool) - for _, v := range strings.Split(fCheck, ",") { - v = strings.TrimSpace(v) - switch v { - case checkSwitch: - checks[checkSwitch] = true - case checkMap: - checks[checkMap] = true - } - } - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for typ, members := range findEnums( - fPackageScopeOnly, - pass.Pkg, - inspect, - pass.TypesInfo, - ) { + for typ, members := range findEnums(fPackageScopeOnly, pass.Pkg, inspect, pass.TypesInfo) { exportFact(pass, typ, members) } - generated := make(generatedCache) - comments := make(commentsCache) - - swChecker := switchChecker( - pass, - switchConfig{ - explicitExhaustiveSwitch: fExplicitExhaustiveSwitch, - defaultSignifiesExhaustive: fDefaultSignifiesExhaustive, - checkGeneratedFiles: fCheckGenerated, - ignoreEnumMembers: fIgnoreEnumMembers.value(), - }, - generated, - comments, - ) - - mapChecker := mapChecker( - pass, - mapConfig{ - explicitExhaustiveMap: fExplicitExhaustiveMap, - checkGeneratedFiles: fCheckGenerated, - ignoreEnumMembers: fIgnoreEnumMembers.value(), - }, - generated, - comments, - ) - - var types []ast.Node - if checks[checkSwitch] { - types = append(types, &ast.SwitchStmt{}) - } - if checks[checkMap] { - types = append(types, &ast.CompositeLit{}) - } - - inspect.WithStack(types, func(n ast.Node, push bool, stack []ast.Node) bool { - var proceed bool - switch n.(type) { - case *ast.SwitchStmt: - proceed, _ = swChecker(n, push, stack) - case *ast.CompositeLit: - proceed, _ = mapChecker(n, push, stack) + generated := boolCache{compute: isGeneratedFile} + comments := commentCache{compute: fileCommentMap} + + // NOTE: should not share the same inspect.WithStack call for different + // program elements: the visitor function for a program element may + // exit traversal early, but this shouldn't affect traversal for + // other program elements. + for _, e := range fCheck.elements { + switch checkElement(e) { + case elementSwitch: + conf := switchConfig{ + explicit: fExplicitExhaustiveSwitch, + defaultSignifiesExhaustive: fDefaultSignifiesExhaustive, + defaultCaseRequired: fDefaultCaseRequired, + checkGenerated: fCheckGenerated, + ignoreConstant: fIgnoreEnumMembers.re, + ignoreType: fIgnoreEnumTypes.re, + } + checker := switchChecker(pass, conf, generated, comments) + inspect.WithStack([]ast.Node{&ast.SwitchStmt{}}, toVisitor(checker)) + + case elementMap: + conf := mapConfig{ + explicit: fExplicitExhaustiveMap, + checkGenerated: fCheckGenerated, + ignoreConstant: fIgnoreEnumMembers.re, + ignoreType: fIgnoreEnumTypes.re, + } + checker := mapChecker(pass, conf, generated, comments) + inspect.WithStack([]ast.Node{&ast.CompositeLit{}}, toVisitor(checker)) + + default: + panic(fmt.Sprintf("unknown checkElement %v", e)) } - return proceed - }) + } return nil, nil } diff --git a/vendor/github.com/nishanths/exhaustive/flag.go b/vendor/github.com/nishanths/exhaustive/flag.go new file mode 100644 index 0000000000..49d3d3c6c0 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/flag.go @@ -0,0 +1,75 @@ +package exhaustive + +import ( + "flag" + "regexp" + "strings" +) + +var _ flag.Value = (*regexpFlag)(nil) +var _ flag.Value = (*stringsFlag)(nil) + +// regexpFlag implements flag.Value for parsing +// regular expression flag inputs. +type regexpFlag struct{ re *regexp.Regexp } + +func (f *regexpFlag) String() string { + if f == nil || f.re == nil { + return "" + } + return f.re.String() +} + +func (f *regexpFlag) Set(expr string) error { + if expr == "" { + f.re = nil + return nil + } + + re, err := regexp.Compile(expr) + if err != nil { + return err + } + + f.re = re + return nil +} + +// stringsFlag implements flag.Value for parsing a comma-separated string +// list. Surrounding whitespace is stripped from the input and from each +// element. If filter is non-nil it is called for each element in the input. +type stringsFlag struct { + elements []string + filter func(string) error +} + +func (f *stringsFlag) String() string { + if f == nil { + return "" + } + return strings.Join(f.elements, ",") +} + +func (f *stringsFlag) filterFunc() func(string) error { + if f.filter != nil { + return f.filter + } + return func(_ string) error { return nil } +} + +func (f *stringsFlag) Set(input string) error { + input = strings.TrimSpace(input) + if input == "" { + f.elements = nil + return nil + } + + for _, el := range strings.Split(input, ",") { + el = strings.TrimSpace(el) + if err := f.filterFunc()(el); err != nil { + return err + } + f.elements = append(f.elements, el) + } + return nil +} diff --git a/vendor/github.com/nishanths/exhaustive/map.go b/vendor/github.com/nishanths/exhaustive/map.go index 1b86fe5bee..23b4bef1ce 100644 --- a/vendor/github.com/nishanths/exhaustive/map.go +++ b/vendor/github.com/nishanths/exhaustive/map.go @@ -5,35 +5,30 @@ import ( "go/ast" "go/types" "regexp" - "strings" "golang.org/x/tools/go/analysis" ) // mapConfig is configuration for mapChecker. type mapConfig struct { - explicitExhaustiveMap bool - checkGeneratedFiles bool - ignoreEnumMembers *regexp.Regexp // can be nil + explicit bool + checkGenerated bool + ignoreConstant *regexp.Regexp // can be nil + ignoreType *regexp.Regexp // can be nil } -// mapChecker returns a node visitor that checks exhaustiveness -// of enum keys in map literal for the supplied pass, and reports diagnostics if non-exhaustive. -// It expects to only see *ast.CompositeLit nodes. -func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, comments commentsCache) nodeVisitor { +// mapChecker returns a node visitor that checks for exhaustiveness of +// map literals for the supplied pass, and reports diagnostics. The +// node visitor expects only *ast.CompositeLit nodes. +func mapChecker(pass *analysis.Pass, cfg mapConfig, generated boolCache, comments commentCache) nodeVisitor { return func(n ast.Node, push bool, stack []ast.Node) (bool, string) { if !push { - // The proceed return value should not matter; it is ignored by - // inspector package for pop calls. - // Nevertheless, return true to be on the safe side for the future. return true, resultNotPush } file := stack[0].(*ast.File) - if !cfg.checkGeneratedFiles && generated.IsGenerated(file) { - // Don't check this file. - // Return false because the children nodes of node `n` don't have to be checked. + if !cfg.checkGenerated && generated.get(file) { return false, resultGeneratedFile } @@ -45,7 +40,6 @@ func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, co if !ok2 { return true, resultNotMapLiteral } - mapType, ok = namedType.Underlying().(*types.Map) if !ok { return true, resultNotMapLiteral @@ -53,23 +47,19 @@ func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, co } if len(lit.Elts) == 0 { - // because it may be used as an alternative for make(map[...]...) return false, resultEmptyMapLiteral } - keyType, ok := mapType.Key().(*types.Named) - if !ok { - return true, resultMapKeyIsNotNamedType - } - - fileComments := comments.GetComments(file, pass.Fset) + fileComments := comments.get(pass.Fset, file) var relatedComments []*ast.CommentGroup for i := range stack { - // iterate over stack in the reverse order (from bottom to top) + // iterate over stack in the reverse order (from inner + // node to outer node) node := stack[len(stack)-1-i] switch node.(type) { // need to check comments associated with following nodes, - // because logic of ast package doesn't allow to associate comment with *ast.CompositeLit + // because logic of ast package doesn't associate comment + // with *ast.CompositeLit as required. case *ast.CompositeLit, // stack[len(stack)-1] *ast.ReturnStmt, // return ... *ast.IndexExpr, // map[enum]...{...}[key] @@ -81,68 +71,64 @@ func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, co *ast.ValueSpec: // var declaration relatedComments = append(relatedComments, fileComments[node]...) continue + default: + // stop iteration on the first inappropriate node + break } - // stop iteration on the first inappropriate node - break } - if !cfg.explicitExhaustiveMap && containsIgnoreDirective(relatedComments) { - // Skip checking of this map literal due to ignore directive comment. - // Still return true because there may be nested map literals - // that are not to be ignored. - return true, resultMapIgnoreComment + if !cfg.explicit && hasCommentPrefix(relatedComments, ignoreComment) { + // Skip checking of this map literal due to ignore + // comment. Still return true because there may be nested + // map literals that are not to be ignored. + return true, resultIgnoreComment } - if cfg.explicitExhaustiveMap && !containsEnforceDirective(relatedComments) { - // Skip checking of this map literal due to missing enforce directive comment. - return true, resultMapNoEnforceComment + if cfg.explicit && !hasCommentPrefix(relatedComments, enforceComment) { + return true, resultNoEnforceComment } - keyPkg := keyType.Obj().Pkg() - if keyPkg == nil { - // The Go documentation says: nil for labels and objects in the Universe scope. - // This happens for the `error` type, for example. - return true, resultNilMapKeyTypePkg + es, ok := composingEnumTypes(pass, mapType.Key()) + if !ok || len(es) == 0 { + return true, resultEnumTypes } - enumTyp := enumType{keyType.Obj()} - members, ok := importFact(pass, enumTyp) - if !ok { - return true, resultMapKeyNotEnum - } + var checkl checklist + checkl.ignoreConstant(cfg.ignoreConstant) + checkl.ignoreType(cfg.ignoreType) - samePkg := keyPkg == pass.Pkg // do the map literal and the map key type (i.e. enum type) live in the same package? - checkUnexported := samePkg // we want to include unexported members in the exhaustiveness check only if we're in the same package - checklist := makeChecklist(members, keyPkg, checkUnexported, cfg.ignoreEnumMembers) - - for _, e := range lit.Elts { - expr, ok := e.(*ast.KeyValueExpr) - if !ok { - continue // is it possible for valid map literal? - } - analyzeCaseClauseExpr(expr.Key, pass.TypesInfo, checklist.found) + for _, e := range es { + checkl.add(e.typ, e.members, pass.Pkg == e.typ.Pkg()) } - if len(checklist.remaining()) == 0 { - // All enum members accounted for. - // Nothing to report. + analyzeMapLiteral(lit, pass.TypesInfo, checkl.found) + if len(checkl.remaining()) == 0 { return true, resultEnumMembersAccounted } - - pass.Report(makeMapDiagnostic(lit, samePkg, enumTyp, members, checklist.remaining())) + pass.Report(makeMapDiagnostic(lit, dedupEnumTypes(toEnumTypes(es)), checkl.remaining())) return true, resultReportedDiagnostic } } -// Makes a "missing map keys" diagnostic. -// samePkg should be true if the enum type and the map literal are defined in the same package. -func makeMapDiagnostic(lit *ast.CompositeLit, samePkg bool, enumTyp enumType, allMembers enumMembers, missingMembers map[string]struct{}) analysis.Diagnostic { - message := fmt.Sprintf("missing map keys of type %s: %s", - diagnosticEnumTypeName(enumTyp.TypeName, samePkg), - strings.Join(diagnosticMissingMembers(missingMembers, allMembers), ", ")) +func analyzeMapLiteral(lit *ast.CompositeLit, info *types.Info, each func(constantValue)) { + for _, e := range lit.Elts { + expr, ok := e.(*ast.KeyValueExpr) + if !ok { + continue + } + if val, ok := exprConstVal(expr.Key, info); ok { + each(val) + } + } +} +func makeMapDiagnostic(lit *ast.CompositeLit, enumTypes []enumType, missing map[member]struct{}) analysis.Diagnostic { return analysis.Diagnostic{ - Pos: lit.Pos(), - End: lit.End(), - Message: message, + Pos: lit.Pos(), + End: lit.End(), + Message: fmt.Sprintf( + "missing keys in map of key type %s: %s", + diagnosticEnumTypes(enumTypes), + diagnosticGroups(groupify(missing, enumTypes)), + ), } } diff --git a/vendor/github.com/nishanths/exhaustive/switch.go b/vendor/github.com/nishanths/exhaustive/switch.go index 115c317e8e..d235fbec47 100644 --- a/vendor/github.com/nishanths/exhaustive/switch.go +++ b/vendor/github.com/nishanths/exhaustive/switch.go @@ -8,45 +8,97 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/astutil" ) -// nodeVisitor is like the visitor function used by Inspector.WithStack, +// nodeVisitor is like the visitor function used by inspector.WithStack, // except that it returns an additional value: a short description of // the result of this node visit. // -// The result is typically useful in debugging or in unit tests to check +// The result value is typically useful in debugging or in unit tests to check // that the nodeVisitor function took the expected code path. type nodeVisitor func(n ast.Node, push bool, stack []ast.Node) (proceed bool, result string) -// Result values returned by a node visitor constructed via switchChecker. +// toVisitor converts a nodeVisitor to a function suitable for use +// with inspector.WithStack. +func toVisitor(v nodeVisitor) func(ast.Node, bool, []ast.Node) bool { + return func(node ast.Node, push bool, stack []ast.Node) bool { + proceed, _ := v(node, push, stack) + return proceed + } +} + +// Result values returned by node visitors. const ( - resultNotPush = "not push" - resultGeneratedFile = "generated file" - resultNoSwitchTag = "no switch tag" - resultEmptyMapLiteral = "empty map literal" - resultNotMapLiteral = "not map literal" - resultMapKeyIsNotNamedType = "map key is not named type" - resultNilMapKeyTypePkg = "nil map key type package" - resultMapKeyNotEnum = "map key not known enum type" - resultMapIgnoreComment = "map literal has ignore comment" - resultMapNoEnforceComment = "map literal has no enforce comment" - resultTagNotValue = "switch tag not value type" - resultTagNotNamed = "switch tag not named type" - resultTagNoPkg = "switch tag does not belong to regular package" - resultTagNotEnum = "switch tag not known enum type" - resultSwitchIgnoreComment = "switch statement has ignore comment" - resultSwitchNoEnforceComment = "switch statement has no enforce comment" - resultEnumMembersAccounted = "requisite enum members accounted for" - resultDefaultCaseSuffices = "default case presence satisfies exhaustiveness" - resultReportedDiagnostic = "reported diagnostic" + resultEmptyMapLiteral = "empty map literal" + resultNotMapLiteral = "not map literal" + resultKeyNilPkg = "nil map key package" + resultKeyNotEnum = "not all map key type terms are known enum types" + + resultNoSwitchTag = "no switch tag" + resultTagNotValue = "switch tag not value type" + resultTagNilPkg = "nil switch tag package" + resultTagNotEnum = "not all switch tag terms are known enum types" + + resultNotPush = "not push" + resultGeneratedFile = "generated file" + resultIgnoreComment = "has ignore comment" + resultNoEnforceComment = "has no enforce comment" + resultEnumMembersAccounted = "required enum members accounted for" + resultDefaultCaseSuffices = "default case satisfies exhaustiveness" + resultMissingDefaultCase = "missing required default case" + resultReportedDiagnostic = "reported diagnostic" + resultEnumTypes = "invalid or empty composing enum types" ) -// switchChecker returns a node visitor that checks exhaustiveness -// of enum switch statements for the supplied pass, and reports diagnostics for -// switch statements that are non-exhaustive. -// It expects to only see *ast.SwitchStmt nodes. -func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCache, comments commentsCache) nodeVisitor { +// switchConfig is configuration for switchChecker. +type switchConfig struct { + explicit bool + defaultSignifiesExhaustive bool + defaultCaseRequired bool + checkGenerated bool + ignoreConstant *regexp.Regexp // can be nil + ignoreType *regexp.Regexp // can be nil +} + +// There are few possibilities, and often none, so we use a possibly-nil slice +func userDirectives(comments []*ast.CommentGroup) []string { + var directives []string + for _, c := range comments { + for _, cc := range c.List { + // The order matters here: we always want to check the longest first. + for _, d := range []string{ + enforceDefaultCaseRequiredComment, + ignoreDefaultCaseRequiredComment, + enforceComment, + ignoreComment, + } { + if strings.HasPrefix(cc.Text, d) { + directives = append(directives, d) + // The break here is important: once we associate a comment + // with a particular (longest-possible) directive, we don't want + // to map to another! + break + } + } + } + } + return directives +} + +// Can be replaced with slices.Contains with go1.21 +func directivesIncludes(directives []string, d string) bool { + for _, ud := range directives { + if ud == d { + return true + } + } + return false +} + +// switchChecker returns a node visitor that checks exhaustiveness of +// enum switch statements for the supplied pass, and reports +// diagnostics. The node visitor expects only *ast.SwitchStmt nodes. +func switchChecker(pass *analysis.Pass, cfg switchConfig, generated boolCache, comments commentCache) nodeVisitor { return func(n ast.Node, push bool, stack []ast.Node) (bool, string) { if !push { // The proceed return value should not matter; it is ignored by @@ -57,7 +109,7 @@ func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCac file := stack[0].(*ast.File) - if !cfg.checkGeneratedFiles && generated.IsGenerated(file) { + if !cfg.checkGenerated && generated.get(file) { // Don't check this file. // Return false because the children nodes of node `n` don't have to be checked. return false, resultGeneratedFile @@ -65,20 +117,31 @@ func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCac sw := n.(*ast.SwitchStmt) - switchComments := comments.GetComments(file, pass.Fset)[sw] - if !cfg.explicitExhaustiveSwitch && containsIgnoreDirective(switchComments) { - // Skip checking of this switch statement due to ignore directive comment. - // Still return true because there may be nested switch statements - // that are not to be ignored. - return true, resultSwitchIgnoreComment + switchComments := comments.get(pass.Fset, file)[sw] + uDirectives := userDirectives(switchComments) + if !cfg.explicit && directivesIncludes(uDirectives, ignoreComment) { + // Skip checking of this switch statement due to ignore + // comment. Still return true because there may be nested + // switch statements that are not to be ignored. + return true, resultIgnoreComment + } + if cfg.explicit && !directivesIncludes(uDirectives, enforceComment) { + // Skip checking of this switch statement due to missing + // enforce comment. + return true, resultNoEnforceComment } - if cfg.explicitExhaustiveSwitch && !containsEnforceDirective(switchComments) { - // Skip checking of this switch statement due to missing enforce directive comment. - return true, resultSwitchNoEnforceComment + requireDefaultCase := cfg.defaultCaseRequired + if directivesIncludes(uDirectives, ignoreDefaultCaseRequiredComment) { + requireDefaultCase = false + } + if directivesIncludes(uDirectives, enforceDefaultCaseRequiredComment) { + // We have "if" instead of "else if" here in case of conflicting ignore/enforce directives. + // In that case, because this is second, we will default to enforcing. + requireDefaultCase = true } if sw.Tag == nil { - return true, resultNoSwitchTag + return true, resultNoSwitchTag // never possible for valid Go program? } t := pass.TypesInfo.Types[sw.Tag] @@ -86,264 +149,88 @@ func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCac return true, resultTagNotValue } - tagType, ok := t.Type.(*types.Named) - if !ok { - return true, resultTagNotNamed + es, ok := composingEnumTypes(pass, t.Type) + if !ok || len(es) == 0 { + return true, resultEnumTypes } - tagPkg := tagType.Obj().Pkg() - if tagPkg == nil { - // The Go documentation says: nil for labels and objects in the Universe scope. - // This happens for the `error` type, for example. - return true, resultTagNoPkg - } + var checkl checklist + checkl.ignoreConstant(cfg.ignoreConstant) + checkl.ignoreType(cfg.ignoreType) - enumTyp := enumType{tagType.Obj()} - members, ok := importFact(pass, enumTyp) - if !ok { - // switch tag's type is not a known enum type. - return true, resultTagNotEnum + for _, e := range es { + checkl.add(e.typ, e.members, pass.Pkg == e.typ.Pkg()) } - samePkg := tagPkg == pass.Pkg // do the switch statement and the switch tag type (i.e. enum type) live in the same package? - checkUnexported := samePkg // we want to include unexported members in the exhaustiveness check only if we're in the same package - checklist := makeChecklist(members, tagPkg, checkUnexported, cfg.ignoreEnumMembers) - - hasDefaultCase := analyzeSwitchClauses(sw, pass.TypesInfo, checklist.found) - - if len(checklist.remaining()) == 0 { + defaultCaseExists := analyzeSwitchClauses(sw, pass.TypesInfo, checkl.found) + if !defaultCaseExists && requireDefaultCase { + // Even if the switch explicitly enumerates all the + // enum values, the user has still required all switches + // to have a default case. We check this first to avoid + // early-outs + pass.Report(makeMissingDefaultDiagnostic(sw, dedupEnumTypes(toEnumTypes(es)))) + return true, resultMissingDefaultCase + } + if len(checkl.remaining()) == 0 { // All enum members accounted for. // Nothing to report. return true, resultEnumMembersAccounted } - if hasDefaultCase && cfg.defaultSignifiesExhaustive { - // Though enum members are not accounted for, - // the existence of the default case signifies exhaustiveness. - // So don't report. + if defaultCaseExists && cfg.defaultSignifiesExhaustive { + // Though enum members are not accounted for, the + // existence of the default case signifies + // exhaustiveness. So don't report. return true, resultDefaultCaseSuffices } - pass.Report(makeSwitchDiagnostic(sw, samePkg, enumTyp, members, checklist.remaining())) + pass.Report(makeSwitchDiagnostic(sw, dedupEnumTypes(toEnumTypes(es)), checkl.remaining())) return true, resultReportedDiagnostic } } -// switchConfig is configuration for switchChecker. -type switchConfig struct { - explicitExhaustiveSwitch bool - defaultSignifiesExhaustive bool - checkGeneratedFiles bool - ignoreEnumMembers *regexp.Regexp // can be nil -} - func isDefaultCase(c *ast.CaseClause) bool { return c.List == nil // see doc comment on List field } -func denotesPackage(ident *ast.Ident, info *types.Info) (*types.Package, bool) { - obj := info.ObjectOf(ident) - if obj == nil { - return nil, false - } - n, ok := obj.(*types.PkgName) - if !ok { - return nil, false - } - return n.Imported(), true -} - -// analyzeSwitchClauses analyzes the clauses in the supplied switch statement. -// The info param should typically be pass.TypesInfo. The found function is -// called for each enum member name found in the switch statement. -// The hasDefaultCase return value indicates whether the switch statement has a -// default clause. -func analyzeSwitchClauses(sw *ast.SwitchStmt, info *types.Info, found func(val constantValue)) (hasDefaultCase bool) { +// analyzeSwitchClauses analyzes the clauses in the supplied switch +// statement. The info param typically is pass.TypesInfo. The each +// function is called for each enum member name found in the switch +// statement. The hasDefaultCase return value indicates whether the +// switch statement has a default clause. +func analyzeSwitchClauses(sw *ast.SwitchStmt, info *types.Info, each func(val constantValue)) (hasDefaultCase bool) { for _, stmt := range sw.Body.List { caseCl := stmt.(*ast.CaseClause) if isDefaultCase(caseCl) { hasDefaultCase = true - continue // nothing more to do if it's the default case + continue } for _, expr := range caseCl.List { - analyzeCaseClauseExpr(expr, info, found) + if val, ok := exprConstVal(expr, info); ok { + each(val) + } } } return hasDefaultCase } -func analyzeCaseClauseExpr(e ast.Expr, info *types.Info, found func(val constantValue)) { - handleIdent := func(ident *ast.Ident) { - obj := info.Uses[ident] - if obj == nil { - return - } - if _, ok := obj.(*types.Const); !ok { - return - } - - // There are two scenarios. - // See related test cases in typealias/quux/quux.go. - // - // ### Scenario 1 - // - // Tag package and constant package are the same. - // - // For example: - // var mode fs.FileMode - // switch mode { - // case fs.ModeDir: - // } - // - // This is simple: we just use fs.ModeDir's value. - // - // ### Scenario 2 - // - // Tag package and constant package are different. - // - // For example: - // var mode fs.FileMode - // switch mode { - // case os.ModeDir: - // } - // - // Or equivalently: - // var mode os.FileMode // in effect, fs.FileMode because of type alias in package os - // switch mode { - // case os.ModeDir: - // } - // - // In this scenario, too, we accept the case clause expr constant - // value, as is. If the Go type checker is okay with the - // name being listed in the case clause, we don't care much further. - // - found(determineConstVal(ident, info)) - } - - e = astutil.Unparen(e) - switch e := e.(type) { - case *ast.Ident: - handleIdent(e) - - case *ast.SelectorExpr: - x := astutil.Unparen(e.X) - // Ensure we only see the form `pkg.Const`, and not e.g. `structVal.f` - // or `structVal.inner.f`. - // Check that X, which is everything except the rightmost *ast.Ident (or - // Sel), is also an *ast.Ident. - xIdent, ok := x.(*ast.Ident) - if !ok { - return - } - // Doesn't matter which package, just that it denotes a package. - if _, ok := denotesPackage(xIdent, info); !ok { - return - } - handleIdent(e.Sel) - } -} - -// diagnosticMissingMembers constructs the list of missing enum members, -// suitable for use in a reported diagnostic message. -// Order is the same as in enumMembers.Names. -func diagnosticMissingMembers(missingMembers map[string]struct{}, em enumMembers) []string { - missingNamesGroupedByValue := make([][]string, len(em.Names)) // empty groups will be filtered out later - firstIndex := make(map[constantValue]int, len(em.ValueToNames)) - for i, name := range em.Names { - value := em.NameToValue[name] - j, ok := firstIndex[value] - if !ok { - firstIndex[value] = i - j = i - } - - if _, missing := missingMembers[name]; missing { - missingNamesGroupedByValue[j] = append(missingNamesGroupedByValue[j], name) - } - } - - out := make([]string, 0, len(missingMembers)) - for _, names := range missingNamesGroupedByValue { - if len(names) == 0 { - continue - } - out = append(out, strings.Join(names, "|")) - } - return out -} - -// diagnosticEnumTypeName returns a string representation of an enum type for -// use in reported diagnostics. -func diagnosticEnumTypeName(enumType *types.TypeName, samePkg bool) string { - if samePkg { - return enumType.Name() - } - return enumType.Pkg().Name() + "." + enumType.Name() -} - -// Makes a "missing cases in switch" diagnostic. -// samePkg should be true if the enum type and the switch statement are defined -// in the same package. -func makeSwitchDiagnostic(sw *ast.SwitchStmt, samePkg bool, enumTyp enumType, allMembers enumMembers, missingMembers map[string]struct{}) analysis.Diagnostic { - message := fmt.Sprintf("missing cases in switch of type %s: %s", - diagnosticEnumTypeName(enumTyp.TypeName, samePkg), - strings.Join(diagnosticMissingMembers(missingMembers, allMembers), ", ")) - +func makeSwitchDiagnostic(sw *ast.SwitchStmt, enumTypes []enumType, missing map[member]struct{}) analysis.Diagnostic { return analysis.Diagnostic{ - Pos: sw.Pos(), - End: sw.End(), - Message: message, + Pos: sw.Pos(), + End: sw.End(), + Message: fmt.Sprintf( + "missing cases in switch of type %s: %s", + diagnosticEnumTypes(enumTypes), + diagnosticGroups(groupify(missing, enumTypes)), + ), } } -// A checklist holds a set of enum member names that have to be -// accounted for to satisfy exhaustiveness in an enum switch statement. -// -// The found method checks off member names from the set, based on -// constant value, when a constant value is encoutered in the switch -// statement's cases. -// -// The remaining method returns the member names not accounted for. -type checklist struct { - em enumMembers - checkl map[string]struct{} -} - -func makeChecklist(em enumMembers, enumPkg *types.Package, includeUnexported bool, ignore *regexp.Regexp) *checklist { - checkl := make(map[string]struct{}) - - add := func(memberName string) { - if memberName == "_" { - // Blank identifier is often used to skip entries in iota lists. - // Also, it can't be referenced anywhere (including in a switch - // statement's cases), so it doesn't make sense to include it - // as required member to satisfy exhaustiveness. - return - } - if !ast.IsExported(memberName) && !includeUnexported { - return - } - if ignore != nil && ignore.MatchString(enumPkg.Path()+"."+memberName) { - return - } - checkl[memberName] = struct{}{} - } - - for _, name := range em.Names { - add(name) - } - - return &checklist{ - em: em, - checkl: checkl, - } -} - -func (c *checklist) found(val constantValue) { - // Delete all of the same-valued names. - for _, name := range c.em.ValueToNames[val] { - delete(c.checkl, name) +func makeMissingDefaultDiagnostic(sw *ast.SwitchStmt, enumTypes []enumType) analysis.Diagnostic { + return analysis.Diagnostic{ + Pos: sw.Pos(), + End: sw.End(), + Message: fmt.Sprintf( + "missing default case in switch of type %s", + diagnosticEnumTypes(enumTypes), + ), } } - -func (c *checklist) remaining() map[string]struct{} { - return c.checkl -} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/.gitignore b/vendor/github.com/nunnatsa/ginkgolinter/.gitignore new file mode 100644 index 0000000000..7d7f8b10ce --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/.gitignore @@ -0,0 +1,2 @@ +ginkgolinter +bin/ diff --git a/vendor/github.com/nunnatsa/ginkgolinter/.golangci.yml b/vendor/github.com/nunnatsa/ginkgolinter/.golangci.yml new file mode 100644 index 0000000000..71ff0d034c --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/.golangci.yml @@ -0,0 +1,3 @@ +linters: + enable: + - revive diff --git a/vendor/github.com/nunnatsa/ginkgolinter/LICENSE b/vendor/github.com/nunnatsa/ginkgolinter/LICENSE new file mode 100644 index 0000000000..11096c5c8a --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Nahshon Unna Tsameret + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/nunnatsa/ginkgolinter/Makefile b/vendor/github.com/nunnatsa/ginkgolinter/Makefile new file mode 100644 index 0000000000..586633006a --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/Makefile @@ -0,0 +1,27 @@ +VERSION ?= "unknown" +VERSION_FLAG := -X github.com/nunnatsa/ginkgolinter/version.version=$(VERSION) +COMMIT_HASH := $(shell git rev-parse HEAD) +HASH_FLAG := -X github.com/nunnatsa/ginkgolinter/version.gitHash=$(COMMIT_HASH) + +BUILD_ARGS := -ldflags "$(VERSION_FLAG) $(HASH_FLAG)" + +build: unit-test + go build $(BUILD_ARGS) -o ginkgolinter ./cmd/ginkgolinter + +unit-test: + go test ./... + +build-for-windows: + GOOS=windows GOARCH=amd64 go build $(BUILD_ARGS) -o bin/ginkgolinter-amd64.exe ./cmd/ginkgolinter + +build-for-mac: + GOOS=darwin GOARCH=amd64 go build $(BUILD_ARGS) -o bin/ginkgolinter-amd64-darwin ./cmd/ginkgolinter + +build-for-linux: + GOOS=linux GOARCH=amd64 go build $(BUILD_ARGS) -o bin/ginkgolinter-amd64-linux ./cmd/ginkgolinter + GOOS=linux GOARCH=386 go build $(BUILD_ARGS) -o bin/ginkgolinter-386-linux ./cmd/ginkgolinter + +build-all: build build-for-linux build-for-mac build-for-windows + +test: build + ./tests/e2e.sh diff --git a/vendor/github.com/nunnatsa/ginkgolinter/README.md b/vendor/github.com/nunnatsa/ginkgolinter/README.md new file mode 100644 index 0000000000..977cec903e --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/README.md @@ -0,0 +1,553 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/nunnatsa/ginkgolinter)](https://goreportcard.com/report/github.com/nunnatsa/ginkgolinter) +[![Coverage Status](https://coveralls.io/repos/github/nunnatsa/ginkgolinter/badge.svg?branch=main)](https://coveralls.io/github/nunnatsa/ginkgolinter?branch=main) +![Build Status](https://github.com/nunnatsa/ginkgolinter/workflows/CI/badge.svg) +[![License](https://img.shields.io/github/license/nunnatsa/ginkgolinter)](/LICENSE) +[![Release](https://img.shields.io/github/release/nunnatsa/ginkgolinter.svg)](https://github.com/nunnatsa/ginkgolinter/releases/latest) +[![GitHub Releases Stats of ginkgolinter](https://img.shields.io/github/downloads/nunnatsa/ginkgolinter/total.svg?logo=github)](https://somsubhra.github.io/github-release-stats/?username=nunnatsa&repository=ginkgolinter) + +# ginkgo-linter +[ginkgo](https://onsi.github.io/ginkgo/) is a popular testing framework and [gomega](https://onsi.github.io/gomega/) is its assertion package. + +This is a golang linter to enforce some standards while using the ginkgo and gomega packages. + +## Install the CLI +Download the right executable from the latest release, according to your OS. + +Another option is to use go: +```shell +go install github.com/nunnatsa/ginkgolinter/cmd/ginkgolinter@latest +``` +Then add the new executable to your PATH. + +## usage +```shell +ginkgolinter [-fix] ./... +``` + +Use the `-fix` flag to apply the fix suggestions to the source code. + +### Use ginkgolinter with golangci-lint +The ginkgolinter is now part of the popular [golangci-lint](https://golangci-lint.run/), starting from version `v1.51.1`. + +It is not enabled by default, though. There are two ways to run ginkgolinter with golangci-lint: + +* From command line: + ```shell + golangci-lint run -E ginkgolinter ./... + ``` +* From configuration: + + Add ginkgolinter to the enabled linters list in .golangci.reference.yml file in your project. For more details, see + the [golangci-lint documentation](https://golangci-lint.run/usage/configuration/); e.g. + ```yaml + linters: + enable: + - ginkgolinter + ``` +## Linter Rules +The linter checks the ginkgo and gomega assertions in golang test code. Gomega may be used together with ginkgo tests, +For example: +```go +It("should test something", func() { // It is ginkgo test case function + Expect("abcd").To(HaveLen(4), "the string should have a length of 4") // Expect is the gomega assertion +}) +``` +or within a classic golang test code, like this: +```go +func TestWithGomega(t *testing.T) { + g := NewWithT(t) + g.Expect("abcd").To(HaveLen(4), "the string should have a length of 4") +} +``` + +In some cases, the gomega will be passed as a variable to function by ginkgo, for example: +```go +Eventually(func(g Gomega) error { + g.Expect("abcd").To(HaveLen(4), "the string should have a length of 4") + return nil +}).Should(Succeed()) +``` + +The linter checks the `Expect`, `ExpectWithOffset` and the `Ω` "actual" functions, with the `Should`, `ShouldNot`, `To`, `ToNot` and `NotTo` assertion functions. + +It also supports the embedded `Not()` matcher + +Some checks find actual bugs, and some are more for style. + +### Using a function call in async assertion [BUG] +This rule finds an actual bug in tests, where asserting a function call in an async function; e.g. `Eventually`. For +example: +```go +func slowInt(int val) int { + time.Sleep(time.Second) + return val +} + +... + +It("should test that slowInt returns 42, eventually", func() { + Eventually(slowInt(42)).WithPolling(time.Millisecond * 100).WithTimeout(time.Second * 2).Equal(42) +}) +``` +The problem with the above code is that it **should** poll - call the function - until it returns 42, but what actually +happens is that first the function is called, and we pass `42` to `Eventually` - not the function. This is not what we +tried to do here. + +The linter will suggest replacing this code by: +```go +It("should test that slowInt returns 42, eventually", func() { + Eventually(slowInt).WithArguments(42).WithPolling(time.Millisecond * 100).WithTimeout(time.Second * 2).Equal(42) +}) +``` + +The linter suggested replacing the function call by the function name. + +If function arguments are used, the linter will add the `WithArguments()` method to pass them. + +Please notice that `WithArguments()` is only supported from gomenga v1.22.0. + +When using an older version of gomega, change the code manually. For example: + +```go +It("should test that slowInt returns 42, eventually", func() { + Eventually(func() int { + slowint(42) + }).WithPolling(time.Millisecond * 100).WithTimeout(time.Second * 2).Equal(42) +}) +``` + +### Comparing a pointer with a value [BUG] +The linter warns when comparing a pointer with a value. +These comparisons are always wrong and will always fail. + +In case of a positive assertion (`To()` or `Should()`), the test will just fail. + +But the main concern is for false positive tests, when using a negative assertion (`NotTo()`, `ToNot()`, `ShouldNot()`, +`Should(Not())` etc.); e.g. +```go +num := 5 +... +pNum := &num +... +Expect(pNum).ShouldNot(Equal(6)) +``` +This assertion will pass, but for the wrong reasons: pNum is not equal 6, not because num == 5, but because pNum is +a pointer, while `6` is an `int`. + +In the case above, the linter will suggest `Expect(pNum).ShouldNot(HaveValue(Equal(6)))` + +This is also right for additional matchers: `BeTrue()` and `BeFalse()`, `BeIdenticalTo()`, `BeEquivalentTo()` +and `BeNumerically`. + +### Missing Assertion Method [BUG] +The linter warns when calling an "actual" method (e.g. `Expect()`, `Eventually()` etc.), without an assertion method (e.g +`Should()`, `NotTo()` etc.) + +For example: +```go +// no assertion for the result +Eventually(doSomething).WithTimeout(time.Seconds * 5).WithPolling(time.Milliseconds * 100) +``` + +The linter will not suggest a fix for this warning. + +This rule cannot be suppressed. + +### Focus Container / Focus individual spec found [BUG] +This rule finds ginkgo focus containers, or the `Focus` individual spec in the code. + +ginkgo supports the `FDescribe`, `FContext`, `FWhen`, `FIt`, `FDescribeTable` and `FEntry` +containers to allow the developer to focus +on a specific test or set of tests during test development or debug. + +For example: +```go +var _ = Describe("checking something", func() { + FIt("this test is the only one that will run", func(){ + ... + }) +}) +``` +Alternatively, the `Focus` individual spec may be used for the same purpose, e.g. +```go +var _ = Describe("checking something", Focus, func() { + It("this test is the only one that will run", func(){ + ... + }) +}) +``` + +These container, or the `Focus` spec, must not be part of the final source code, and should only be used locally by the +developer. + +***This rule is disabled by default***. Use the `--forbid-focus-container=true` command line flag to enable it. + +### Comparing values from different types [BUG] + +The `Equal` and the `BeIdentical` matchers also check the type, not only the value. + +The following code will fail in runtime: +```go +x := 5 // x is int +Expect(x).Should(Eqaul(uint(5)) // x and uint(5) are with different +``` +When using negative checks, it's even worse, because we get a false positive: +``` +x := 5 +Expect(x).ShouldNot(Equal(uint(5)) +``` + +The linter suggests two options to solve this warning: either compare with the same type, e.g. +using casting, or use the `BeEquivalentTo` matcher. + +The linter can't guess what is the best solution in each case, and so it won't auto-fix this warning. + +To suppress this warning entirely, use the `--suppress-type-compare-assertion=true` command line parameter. + +To suppress a specific file or line, use the `// ginkgo-linter:ignore-type-compare-warning` comment (see [below](#suppress-warning-from-the-code)) + +### Wrong Usage of the `MatchError` gomega Matcher [BUG] +The `MatchError` gomega matcher asserts an error value (and if it's not nil). +There are four valid formats for using this Matcher: +* error value; e.g. `Expect(err).To(MatchError(anotherErr))` +* string, to be equal to the output of the `Error()` method; e.g. `Expect(err).To(MatchError("Not Found"))` +* A gomega matcher that asserts strings; e.g. `Expect(err).To(MatchError(ContainSubstring("Found")))` +* [from v0.29.0] a function that receive a single error parameter and returns a single boolean value. + In this format, an additional single string parameter, with the function description, is also required; e.g. + `Expect(err).To(MatchError(isNotFound, "is the error is a not found error"))` + +These four format are checked on runtime, but sometimes it's too late. ginkgolinter performs a static analysis and so it +will find these issues on build time. + +ginkgolinter checks the following: +* Is the first parameter is one of the four options above. +* That there are no additional parameters passed to the matcher; e.g. + `MatchError(isNotFoundFunc, "a valid description" , "not used string")`. In this case, the matcher won't fail on run + time, but the additional parameters are not in use and ignored. +* If the first parameter is a function with the format of `func(error)bool`, ginkgolinter makes sure that the second + parameter exists and its type is string. + +### Async timing interval: timeout is shorter than polling interval [BUG] +***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` +flag **is** set. + +***Note***: This rule work with best-effort approach. It can't find many cases, like const defined not in the same +package, or when using variables. + +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). + +This rule checks if the async (`Eventually` or `Consistently`) timeout duration, is not shorter than the polling interval. + +For example: + ```go + Eventually(aFunc).WithTimeout(500 * time.Millisecond).WithPolling(10 * time.Second).Should(Succeed()) + ``` + +This will probably happen when using the old format: + ```go + Eventually(aFunc, 500 * time.Millisecond /*timeout*/, 10 * time.Second /*polling*/).Should(Succeed()) + ``` + +### Avoid Spec Pollution: Don't Initialize Variables in Container Nodes [BUG/STYLE]: +***Note***: Only applied when the `--forbid-spec-pollution=true` flag is set (disabled by default). + +According to [ginkgo documentation](https://onsi.github.io/ginkgo/#avoid-spec-pollution-dont-initialize-variables-in-container-nodes), +no variable should be assigned within a container node (`Describe`, `Context`, `When` or their `F`, `P` or `X` forms) + +For example: +```go +var _ = Describe("description", func(){ + var x = 10 + ... +}) +``` + +Instead, use `BeforeEach()`; e.g. +```go +var _ = Describe("description", func (){ + var x int + + BeforeEach(func (){ + x = 10 + }) + ... +}) +``` + +### Wrong Length Assertion [STYLE] +The linter finds assertion of the golang built-in `len` function, with all kind of matchers, while there are already +gomega matchers for these usecases; We want to assert the item, rather than its length. + +There are several wrong patterns: +```go +Expect(len(x)).To(Equal(0)) // should be: Expect(x).To(BeEmpty()) +Expect(len(x)).To(BeZero()) // should be: Expect(x).To(BeEmpty()) +Expect(len(x)).To(BeNumeric(">", 0)) // should be: Expect(x).ToNot(BeEmpty()) +Expect(len(x)).To(BeNumeric(">=", 1)) // should be: Expect(x).ToNot(BeEmpty()) +Expect(len(x)).To(BeNumeric("==", 0)) // should be: Expect(x).To(BeEmpty()) +Expect(len(x)).To(BeNumeric("!=", 0)) // should be: Expect(x).ToNot(BeEmpty()) + +Expect(len(x)).To(Equal(1)) // should be: Expect(x).To(HaveLen(1)) +Expect(len(x)).To(BeNumeric("==", 2)) // should be: Expect(x).To(HaveLen(2)) +Expect(len(x)).To(BeNumeric("!=", 3)) // should be: Expect(x).ToNot(HaveLen(3)) +``` + +It also supports the embedded `Not()` matcher; e.g. + +`Ω(len(x)).Should(Not(Equal(4)))` => `Ω(x).ShouldNot(HaveLen(4))` + +Or even (double negative): + +`Ω(len(x)).To(Not(BeNumeric(">", 0)))` => `Ω(x).To(BeEmpty())` + +The output of the linter,when finding issues, looks like this: +``` +./testdata/src/a/a.go:14:5: ginkgo-linter: wrong length assertion; consider using `Expect("abcd").Should(HaveLen(4))` instead +./testdata/src/a/a.go:18:5: ginkgo-linter: wrong length assertion; consider using `Expect("").Should(BeEmpty())` instead +./testdata/src/a/a.go:22:5: ginkgo-linter: wrong length assertion; consider using `Expect("").Should(BeEmpty())` instead +``` + +### Wrong Cap Assertion [STYLE] +The linter finds assertion of the golang built-in `cap` function, with all kind of matchers, while there are already +gomega matchers for these usecases; We want to assert the item, rather than its cap. + +There are several wrong patterns: +```go +Expect(cap(x)).To(Equal(0)) // should be: Expect(x).To(HaveCap(0)) +Expect(cap(x)).To(BeZero()) // should be: Expect(x).To(HaveCap(0)) +Expect(cap(x)).To(BeNumeric(">", 0)) // should be: Expect(x).ToNot(HaveCap(0)) +Expect(cap(x)).To(BeNumeric("==", 2)) // should be: Expect(x).To(HaveCap(2)) +Expect(cap(x)).To(BeNumeric("!=", 3)) // should be: Expect(x).ToNot(HaveCap(3)) +``` + +#### use the `HaveLen(0)` matcher. [STYLE] +The linter will also warn about the `HaveLen(0)` matcher, and will suggest to replace it with `BeEmpty()` + +### Wrong `nil` Assertion [STYLE] +The linter finds assertion of the comparison to nil, with all kind of matchers, instead of using the existing `BeNil()` +matcher; We want to assert the item, rather than a comparison result. + +There are several wrong patterns: + +```go +Expect(x == nil).To(Equal(true)) // should be: Expect(x).To(BeNil()) +Expect(nil == x).To(Equal(true)) // should be: Expect(x).To(BeNil()) +Expect(x != nil).To(Equal(true)) // should be: Expect(x).ToNot(BeNil()) +Expect(nil != nil).To(Equal(true)) // should be: Expect(x).ToNot(BeNil()) + +Expect(x == nil).To(BeTrue()) // should be: Expect(x).To(BeNil()) +Expect(x == nil).To(BeFalse()) // should be: Expect(x).ToNot(BeNil()) +``` +It also supports the embedded `Not()` matcher; e.g. + +`Ω(x == nil).Should(Not(BeTrue()))` => `Ω(x).ShouldNot(BeNil())` + +Or even (double negative): + +`Ω(x != nil).Should(Not(BeTrue()))` => `Ω(x).Should(BeNil())` + +### Wrong boolean Assertion [STYLE] +The linter finds assertion using the `Equal` method, with the values of to `true` or `false`, instead +of using the existing `BeTrue()` or `BeFalse()` matcher. + +There are several wrong patterns: + +```go +Expect(x).To(Equal(true)) // should be: Expect(x).To(BeTrue()) +Expect(x).To(Equal(false)) // should be: Expect(x).To(BeFalse()) +``` +It also supports the embedded `Not()` matcher; e.g. + +`Ω(x).Should(Not(Equal(True)))` => `Ω(x).ShouldNot(BeTrue())` + +### Wrong Error Assertion [STYLE] +The linter finds assertion of errors compared with nil, or to be equal nil, or to be nil. The linter suggests to use `Succeed` for functions or `HaveOccurred` for error values.. + +There are several wrong patterns: + +```go +Expect(err).To(BeNil()) // should be: Expect(err).ToNot(HaveOccurred()) +Expect(err == nil).To(Equal(true)) // should be: Expect(err).ToNot(HaveOccurred()) +Expect(err == nil).To(BeFalse()) // should be: Expect(err).To(HaveOccurred()) +Expect(err != nil).To(BeTrue()) // should be: Expect(err).To(HaveOccurred()) +Expect(funcReturnsError()).To(BeNil()) // should be: Expect(funcReturnsError()).To(Succeed()) + +and so on +``` +It also supports the embedded `Not()` matcher; e.g. + +`Ω(err == nil).Should(Not(BeTrue()))` => `Ω(x).Should(HaveOccurred())` + +### Wrong Comparison Assertion [STYLE] +The linter finds assertion of boolean comparisons, which are already supported by existing gomega matchers. + +The linter assumes that when compared something to literals or constants, these values should be used for the assertion, +and it will do its best to suggest the right assertion expression accordingly. + +There are several wrong patterns: +```go +var x = 10 +var s = "abcd" + +... + +Expect(x == 10).Should(BeTrue()) // should be Expect(x).Should(Equal(10)) +Expect(10 == x).Should(BeTrue()) // should be Expect(x).Should(Equal(10)) +Expect(x != 5).Should(Equal(true)) // should be Expect(x).ShouldNot(Equal(5)) +Expect(x != 0).Should(Equal(true)) // should be Expect(x).ShouldNot(BeZero()) + +Expect(s != "abcd").Should(BeFalse()) // should be Expect(s).Should(Equal("abcd")) +Expect("abcd" != s).Should(BeFalse()) // should be Expect(s).Should(Equal("abcd")) +``` +Or non-equal comparisons: +```go +Expect(x > 10).To(BeTrue()) // ==> Expect(x).To(BeNumerically(">", 10)) +Expect(x >= 15).To(BeTrue()) // ==> Expect(x).To(BeNumerically(">=", 15)) +Expect(3 > y).To(BeTrue()) // ==> Expect(y).To(BeNumerically("<", 3)) +// and so on ... +``` + +This check included a limited support in constant values. For example: +```go +const c1 = 5 + +... + +Expect(x1 == c1).Should(BeTrue()) // ==> Expect(x1).Should(Equal(c1)) +Expect(c1 == x1).Should(BeTrue()) // ==> Expect(x1).Should(Equal(c1)) +``` + +### Don't Allow Using `Expect` with `Should` or `ShouldNot` [STYLE] +This optional rule forces the usage of the `Expect` method only with the `To`, `ToNot` or `NotTo` +assertion methods; e.g. +```go +Expect("abc").Should(HaveLen(3)) // => Expect("abc").To(HaveLen(3)) +Expect("abc").ShouldNot(BeEmpty()) // => Expect("abc").ToNot(BeEmpty()) +``` +This rule support auto fixing. + +***This rule is disabled by default***. Use the `--force-expect-to=true` command line flag to enable it. + +### Async timing interval: multiple timeout or polling intervals [STYLE] +***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` +flag **is** set. + +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). + +The linter checks that there is up to one polling argument and up to one timeout argument. + +For example: + +```go +// both WithTimeout() and Within() +Eventually(aFunc).WithTimeout(time.Second * 10).Within(time.Second * 10).WithPolling(time.Millisecond * 500).Should(BeTrue()) +// both polling argument, and WithPolling() method +Eventually(aFunc, time.Second*10, time.Millisecond * 500).WithPolling(time.Millisecond * 500).Should(BeTrue()) +``` + +### Async timing interval: non-time.Duration intervals [STYLE] +***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` +flag **is** set. + +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +* a `time.Duration` value +* any kind of numeric value (int(8/16/32/64), uint(8/16/32/64) or float(32/64), as the number of seconds. +* duration string like `"12s"` + +The linter triggers a warning for any duration value that is not of the `time.Duration` type, assuming that this is +the desired type, given the type of the argument of the newer "WithTimeout", "WithPolling", "Within" and "ProbeEvery" +methods. + +For example: + ```go + Eventually(func() bool { return true }, "1s").Should(BeTrue()) + Eventually(context.Background(), func() bool { return true }, time.Second*60, float64(2)).Should(BeTrue()) + ``` + +This rule offers a limited auto fix: for integer values, or integer consts, the linter will suggest multiply the +value with `time.Second`; e.g. +```go +const polling = 1 +Eventually(aFunc, 5, polling) +``` +will be changed to: +```go +Eventually(aFunc, time.Second*5, time.Second*polling) +``` +## Suppress the linter +### Suppress warning from command line +* Use the `--suppress-len-assertion=true` flag to suppress the wrong length and cap assertions warning +* Use the `--suppress-nil-assertion=true` flag to suppress the wrong nil assertion warning +* Use the `--suppress-err-assertion=true` flag to suppress the wrong error assertion warning +* Use the `--suppress-compare-assertion=true` flag to suppress the wrong comparison assertion warning +* Use the `--suppress-async-assertion=true` flag to suppress the function call in async assertion warning +* Use the `--forbid-focus-container=true` flag to activate the focused container assertion (deactivated by default) +* Use the `--suppress-type-compare-assertion=true` to suppress the type compare assertion warning +* Use the `--allow-havelen-0=true` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from + command line, and not from a comment. + +### Suppress warning from the code +To suppress the wrong length and cap assertions warning, add a comment with (only) + +`ginkgo-linter:ignore-len-assert-warning`. + +To suppress the wrong nil assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-nil-assert-warning`. + +To suppress the wrong error assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-err-assert-warning`. + +To suppress the wrong comparison assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-compare-assert-warning`. + +To suppress the wrong async assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-async-assert-warning`. + +To supress the focus container warning, add a comment with (only) + +`ginkgo-linter:ignore-focus-container-warning` + +To suppress the different type comparison, add a comment with (only) + +`ginkgo-linter:ignore-type-compare-warning` + +Notice that this comment will not work for an anonymous variable container like +```go +// ginkgo-linter:ignore-focus-container-warning (not working!!) +var _ = FDescribe(...) +``` +In this case, use the file comment (see bellow). + +There are two options to use these comments: +1. If the comment is at the top of the file, supress the warning for the whole file; e.g.: + ```go + package mypackage + + // ginkgo-linter:ignore-len-assert-warning + + import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + ) + + var _ = Describe("my test", func() { + It("should do something", func() { + Expect(len("abc")).Should(Equal(3)) // nothing in this file will trigger the warning + }) + }) + ``` + +2. If the comment is before a wrong length check expression, the warning is suppressed for this expression only; for example: + ```golang + It("should test something", func() { + // ginkgo-linter:ignore-nil-assert-warning + Expect(x == nil).Should(BeTrue()) // this line will not trigger the warning + Expect(x == nil).Should(BeTrue()) // this line will trigger the warning + } + ``` diff --git a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go new file mode 100644 index 0000000000..edff57acd1 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go @@ -0,0 +1,58 @@ +package ginkgolinter + +import ( + "flag" + "fmt" + + "golang.org/x/tools/go/analysis" + + "github.com/nunnatsa/ginkgolinter/linter" + "github.com/nunnatsa/ginkgolinter/types" + "github.com/nunnatsa/ginkgolinter/version" +) + +// NewAnalyzerWithConfig returns an Analyzer. +func NewAnalyzerWithConfig(config *types.Config) *analysis.Analyzer { + theLinter := linter.NewGinkgoLinter(config) + + return &analysis.Analyzer{ + Name: "ginkgolinter", + Doc: fmt.Sprintf(doc, version.Version()), + Run: theLinter.Run, + } +} + +// NewAnalyzer returns an Analyzer - the package interface with nogo +func NewAnalyzer() *analysis.Analyzer { + config := &types.Config{ + SuppressLen: false, + SuppressNil: false, + SuppressErr: false, + SuppressCompare: false, + ForbidFocus: false, + AllowHaveLen0: false, + ForceExpectTo: false, + } + + a := NewAnalyzerWithConfig(config) + + var ignored bool + a.Flags.Init("ginkgolinter", flag.ExitOnError) + a.Flags.Var(&config.SuppressLen, "suppress-len-assertion", "Suppress warning for wrong length assertions") + a.Flags.Var(&config.SuppressNil, "suppress-nil-assertion", "Suppress warning for wrong nil assertions") + a.Flags.Var(&config.SuppressErr, "suppress-err-assertion", "Suppress warning for wrong error assertions") + a.Flags.Var(&config.SuppressCompare, "suppress-compare-assertion", "Suppress warning for wrong comparison assertions") + a.Flags.Var(&config.SuppressAsync, "suppress-async-assertion", "Suppress warning for function call in async assertion, like Eventually") + a.Flags.Var(&config.ValidateAsyncIntervals, "validate-async-intervals", "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") + a.Flags.Var(&config.SuppressTypeCompare, "suppress-type-compare-assertion", "Suppress warning for comparing values from different types, like int32 and uint32") + a.Flags.Var(&config.AllowHaveLen0, "allow-havelen-0", "Do not warn for HaveLen(0); default = false") + a.Flags.Var(&config.ForceExpectTo, "force-expect-to", "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") + a.Flags.BoolVar(&ignored, "suppress-focus-container", true, "Suppress warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt. Deprecated and ignored: use --forbid-focus-container instead") + a.Flags.Var(&config.ForbidFocus, "forbid-focus-container", "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") + a.Flags.Var(&config.ForbidSpecPollution, "forbid-spec-pollution", "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") + + return a +} + +// Analyzer is the interface to go_vet +var Analyzer = NewAnalyzer() diff --git a/vendor/github.com/nunnatsa/ginkgolinter/doc.go b/vendor/github.com/nunnatsa/ginkgolinter/doc.go new file mode 100644 index 0000000000..dd9ecf58a8 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/doc.go @@ -0,0 +1,99 @@ +package ginkgolinter + +const doc = `enforces standards of using ginkgo and gomega + +or + ginkgolinter version + +version: %s + +currently, the linter searches for following: +* trigger a warning when using Eventually or Consistently with a function call. This is in order to prevent the case when + using a function call instead of a function. Function call returns a value only once, and so the original value + is tested again and again and is never changed. [Bug] + +* trigger a warning when comparing a pointer to a value. [Bug] + +* trigger a warning for missing assertion method: [Bug] + Eventually(checkSomething) + +* trigger a warning when a ginkgo focus container (FDescribe, FContext, FWhen or FIt) is found. [Bug] + +* validate the MatchError gomega matcher [Bug] + +* trigger a warning when using the Equal or the BeIdentical matcher with two different types, as these matchers will + fail in runtime. + +* async timing interval: timeout is shorter than polling interval [Bug] +For example: + Eventually(aFunc).WithTimeout(500 * time.Millisecond).WithPolling(10 * time.Second).Should(Succeed()) +This will probably happen when using the old format: + Eventually(aFunc, 500 * time.Millisecond, 10 * time.Second).Should(Succeed()) + +* reject variable assignments in ginkgo containers [Bug/Style]: +For example: + var _ = Describe("description", func(){ + var x = 10 + }) + +Should use BeforeEach instead; e.g. + var _ = Describe("description", func(){ + var x int + BeforeEach(func(){ + x = 10 + }) + }) + +* wrong length assertions. We want to assert the item rather than its length. [Style] +For example: + Expect(len(x)).Should(Equal(1)) +This should be replaced with: + Expect(x)).Should(HavelLen(1)) + +* wrong cap assertions. We want to assert the item rather than its cap. [Style] +For example: + Expect(cap(x)).Should(Equal(1)) +This should be replaced with: + Expect(x)).Should(HavelCap(1)) + +* wrong nil assertions. We want to assert the item rather than a comparison result. [Style] +For example: + Expect(x == nil).Should(BeTrue()) +This should be replaced with: + Expect(x).Should(BeNil()) + +* wrong error assertions. For example: [Style] + Expect(err == nil).Should(BeTrue()) +This should be replaced with: + Expect(err).ShouldNot(HaveOccurred()) + +* wrong boolean comparison, for example: [Style] + Expect(x == 8).Should(BeTrue()) +This should be replaced with: + Expect(x).Should(BeEqual(8)) + +* replaces Equal(true/false) with BeTrue()/BeFalse() [Style] + +* replaces HaveLen(0) with BeEmpty() [Style] + +* replaces Expect(...).Should(...) with Expect(...).To() [Style] + +* async timing interval: multiple timeout or polling interval [Style] +For example: + Eventually(context.Background(), func() bool { return true }, time.Second*10).WithTimeout(time.Second * 10).WithPolling(time.Millisecond * 500).Should(BeTrue()) + Eventually(context.Background(), func() bool { return true }, time.Second*10).Within(time.Second * 10).WithPolling(time.Millisecond * 500).Should(BeTrue()) + Eventually(func() bool { return true }, time.Second*10, 500*time.Millisecond).WithPolling(time.Millisecond * 500).Should(BeTrue()) + Eventually(func() bool { return true }, time.Second*10, 500*time.Millisecond).ProbeEvery(time.Millisecond * 500).Should(BeTrue()) + +* async timing interval: non-time.Duration intervals [Style] +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): + * time.Duration + * any kind of numeric value, as number of seconds + * duration string like "12s" +The linter triggers a warning for any duration value that is not of the time.Duration type, assuming that this is +the desired type, given the type of the argument of the newer "WithTimeout", "WithPolling", "Within" and "ProbeEvery" +methods. +For example: + Eventually(context.Background(), func() bool { return true }, "1s").Should(BeTrue()) + Eventually(context.Background(), func() bool { return true }, time.Second*60, 15).Should(BeTrue()) +` diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handler.go new file mode 100644 index 0000000000..f10d831840 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handler.go @@ -0,0 +1,129 @@ +package ginkgohandler + +import ( + "go/ast" +) + +const ( + importPath = `"github.com/onsi/ginkgo"` + importPathV2 = `"github.com/onsi/ginkgo/v2"` + + focusSpec = "Focus" +) + +// Handler provide different handling, depend on the way ginkgo was imported, whether +// in imported with "." name, custom name or without any name. +type Handler interface { + GetFocusContainerName(*ast.CallExpr) (bool, *ast.Ident) + IsWrapContainer(*ast.CallExpr) bool + IsFocusSpec(ident ast.Expr) bool +} + +// GetGinkgoHandler returns a ginkgor handler according to the way ginkgo was imported in the specific file +func GetGinkgoHandler(file *ast.File) Handler { + for _, imp := range file.Imports { + if imp.Path.Value != importPath && imp.Path.Value != importPathV2 { + continue + } + + switch name := imp.Name.String(); { + case name == ".": + return dotHandler{} + case name == "": // import with no local name + return nameHandler("ginkgo") + default: + return nameHandler(name) + } + } + + return nil // no ginkgo import; this file does not use ginkgo +} + +// dotHandler is used when importing ginkgo with dot; i.e. +// import . "github.com/onsi/ginkgo" +type dotHandler struct{} + +func (h dotHandler) GetFocusContainerName(exp *ast.CallExpr) (bool, *ast.Ident) { + if fun, ok := exp.Fun.(*ast.Ident); ok { + return isFocusContainer(fun.Name), fun + } + return false, nil +} + +func (h dotHandler) IsWrapContainer(exp *ast.CallExpr) bool { + if fun, ok := exp.Fun.(*ast.Ident); ok { + return IsWrapContainer(fun.Name) + } + return false +} + +func (h dotHandler) IsFocusSpec(exp ast.Expr) bool { + id, ok := exp.(*ast.Ident) + return ok && id.Name == focusSpec +} + +// nameHandler is used when importing ginkgo without name; i.e. +// import "github.com/onsi/ginkgo" +// +// or with a custom name; e.g. +// import customname "github.com/onsi/ginkgo" +type nameHandler string + +func (h nameHandler) GetFocusContainerName(exp *ast.CallExpr) (bool, *ast.Ident) { + if sel, ok := exp.Fun.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok && id.Name == string(h) { + return isFocusContainer(sel.Sel.Name), sel.Sel + } + } + return false, nil +} + +func (h nameHandler) IsWrapContainer(exp *ast.CallExpr) bool { + if sel, ok := exp.Fun.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok && id.Name == string(h) { + return IsWrapContainer(sel.Sel.Name) + } + } + return false + +} + +func (h nameHandler) IsFocusSpec(exp ast.Expr) bool { + if selExp, ok := exp.(*ast.SelectorExpr); ok { + if x, ok := selExp.X.(*ast.Ident); ok && x.Name == string(h) { + return selExp.Sel.Name == focusSpec + } + } + + return false +} + +func isFocusContainer(name string) bool { + switch name { + case "FDescribe", "FContext", "FWhen", "FIt", "FDescribeTable", "FEntry": + return true + } + return false +} + +func IsContainer(name string) bool { + switch name { + case "It", "When", "Context", "Describe", "DescribeTable", "Entry", + "PIt", "PWhen", "PContext", "PDescribe", "PDescribeTable", "PEntry", + "XIt", "XWhen", "XContext", "XDescribe", "XDescribeTable", "XEntry": + return true + } + return isFocusContainer(name) +} + +func IsWrapContainer(name string) bool { + switch name { + case "When", "Context", "Describe", + "FWhen", "FContext", "FDescribe", + "PWhen", "PContext", "PDescribe", + "XWhen", "XContext", "XDescribe": + return true + } + + return false +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go new file mode 100644 index 0000000000..4290e73736 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go @@ -0,0 +1,241 @@ +package gomegahandler + +import ( + "go/ast" + "go/token" +) + +const ( + importPath = `"github.com/onsi/gomega"` +) + +// Handler provide different handling, depend on the way gomega was imported, whether +// in imported with "." name, custom name or without any name. +type Handler interface { + // GetActualFuncName returns the name of the gomega function, e.g. `Expect` + GetActualFuncName(*ast.CallExpr) (string, bool) + // ReplaceFunction replaces the function with another one, for fix suggestions + ReplaceFunction(*ast.CallExpr, *ast.Ident) + + getDefFuncName(expr *ast.CallExpr) string + + getFieldType(field *ast.Field) string + + GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr +} + +// GetGomegaHandler returns a gomegar handler according to the way gomega was imported in the specific file +func GetGomegaHandler(file *ast.File) Handler { + for _, imp := range file.Imports { + if imp.Path.Value != importPath { + continue + } + + switch name := imp.Name.String(); { + case name == ".": + return dotHandler{} + case name == "": // import with no local name + return nameHandler("gomega") + default: + return nameHandler(name) + } + } + + return nil // no gomega import; this file does not use gomega +} + +// dotHandler is used when importing gomega with dot; i.e. +// import . "github.com/onsi/gomega" +type dotHandler struct{} + +// GetActualFuncName returns the name of the gomega function, e.g. `Expect` +func (h dotHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { + switch actualFunc := expr.Fun.(type) { + case *ast.Ident: + return actualFunc.Name, true + case *ast.SelectorExpr: + if isGomegaVar(actualFunc.X, h) { + return actualFunc.Sel.Name, true + } + + if x, ok := actualFunc.X.(*ast.CallExpr); ok { + return h.GetActualFuncName(x) + } + + case *ast.CallExpr: + return h.GetActualFuncName(actualFunc) + } + return "", false +} + +// ReplaceFunction replaces the function with another one, for fix suggestions +func (dotHandler) ReplaceFunction(caller *ast.CallExpr, newExpr *ast.Ident) { + switch f := caller.Fun.(type) { + case *ast.Ident: + caller.Fun = newExpr + case *ast.SelectorExpr: + f.Sel = newExpr + } +} + +func (dotHandler) getDefFuncName(expr *ast.CallExpr) string { + if f, ok := expr.Fun.(*ast.Ident); ok { + return f.Name + } + return "" +} + +func (dotHandler) getFieldType(field *ast.Field) string { + switch t := field.Type.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + if name, ok := t.X.(*ast.Ident); ok { + return name.Name + } + } + return "" +} + +// nameHandler is used when importing gomega without name; i.e. +// import "github.com/onsi/gomega" +// +// or with a custom name; e.g. +// import customname "github.com/onsi/gomega" +type nameHandler string + +// GetActualFuncName returns the name of the gomega function, e.g. `Expect` +func (g nameHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { + selector, ok := expr.Fun.(*ast.SelectorExpr) + if !ok { + return "", false + } + + switch x := selector.X.(type) { + case *ast.Ident: + if x.Name != string(g) { + if !isGomegaVar(x, g) { + return "", false + } + } + + return selector.Sel.Name, true + + case *ast.CallExpr: + return g.GetActualFuncName(x) + } + + return "", false +} + +// ReplaceFunction replaces the function with another one, for fix suggestions +func (nameHandler) ReplaceFunction(caller *ast.CallExpr, newExpr *ast.Ident) { + caller.Fun.(*ast.SelectorExpr).Sel = newExpr +} + +func (g nameHandler) getDefFuncName(expr *ast.CallExpr) string { + if sel, ok := expr.Fun.(*ast.SelectorExpr); ok { + if f, ok := sel.X.(*ast.Ident); ok && f.Name == string(g) { + return sel.Sel.Name + } + } + return "" +} + +func (g nameHandler) getFieldType(field *ast.Field) string { + switch t := field.Type.(type) { + case *ast.SelectorExpr: + if id, ok := t.X.(*ast.Ident); ok { + if id.Name == string(g) { + return t.Sel.Name + } + } + case *ast.StarExpr: + if sel, ok := t.X.(*ast.SelectorExpr); ok { + if x, ok := sel.X.(*ast.Ident); ok && x.Name == string(g) { + return sel.Sel.Name + } + } + + } + return "" +} + +func isGomegaVar(x ast.Expr, handler Handler) bool { + if i, ok := x.(*ast.Ident); ok { + if i.Obj != nil && i.Obj.Kind == ast.Var { + switch decl := i.Obj.Decl.(type) { + case *ast.AssignStmt: + if decl.Tok == token.DEFINE { + if defFunc, ok := decl.Rhs[0].(*ast.CallExpr); ok { + fName := handler.getDefFuncName(defFunc) + switch fName { + case "NewGomega", "NewWithT", "NewGomegaWithT": + return true + } + } + } + case *ast.Field: + name := handler.getFieldType(decl) + switch name { + case "Gomega", "WithT", "GomegaWithT": + return true + } + } + } + } + return false +} + +func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { + actualExpr, ok := assertionFunc.X.(*ast.CallExpr) + if !ok { + return nil + } + + switch fun := actualExpr.Fun.(type) { + case *ast.Ident: + return actualExpr + case *ast.SelectorExpr: + if isHelperMethods(fun.Sel.Name) { + return h.GetActualExpr(fun) + } + if isGomegaVar(fun.X, h) { + return actualExpr + } + } + return nil +} + +func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { + actualExpr, ok := assertionFunc.X.(*ast.CallExpr) + if !ok { + return nil + } + + switch fun := actualExpr.Fun.(type) { + case *ast.Ident: + return actualExpr + case *ast.SelectorExpr: + if x, ok := fun.X.(*ast.Ident); ok && x.Name == string(g) { + return actualExpr + } + if isHelperMethods(fun.Sel.Name) { + return g.GetActualExpr(fun) + } + + if isGomegaVar(fun.X, g) { + return actualExpr + } + } + return nil +} + +func isHelperMethods(funcName string) bool { + switch funcName { + case "WithOffset", "WithTimeout", "WithPolling", "Within", "ProbeEvery", "WithContext", "WithArguments", "MustPassRepeatedly": + return true + } + + return false +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/interfaces/interfaces.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/interfaces/interfaces.go new file mode 100644 index 0000000000..dafeacd4ff --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/interfaces/interfaces.go @@ -0,0 +1,76 @@ +package interfaces + +import ( + "go/token" + gotypes "go/types" +) + +var ( + errorType *gotypes.Interface + gomegaMatcherType *gotypes.Interface +) + +func init() { + errorType = gotypes.Universe.Lookup("error").Type().Underlying().(*gotypes.Interface) + gomegaMatcherType = generateTheGomegaMatcherInfType() +} + +// generateTheGomegaMatcherInfType generates a types.Interface instance that represents the +// GomegaMatcher interface. +// The original code is (copied from https://github.com/nunnatsa/ginkgolinter/blob/8fdd05eee922578d4699f49d267001c01e0b9f1e/testdata/src/a/vendor/github.com/onsi/gomega/types/types.go) +// +// type GomegaMatcher interface { +// Match(actual interface{}) (success bool, err error) +// FailureMessage(actual interface{}) (message string) +// NegatedFailureMessage(actual interface{}) (message string) +// } +func generateTheGomegaMatcherInfType() *gotypes.Interface { + err := gotypes.Universe.Lookup("error").Type() + bl := gotypes.Typ[gotypes.Bool] + str := gotypes.Typ[gotypes.String] + anyType := gotypes.Universe.Lookup("any").Type() + + return gotypes.NewInterfaceType([]*gotypes.Func{ + // Match(actual interface{}) (success bool, err error) + gotypes.NewFunc(token.NoPos, nil, "Match", gotypes.NewSignatureType( + nil, nil, nil, + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "actual", anyType), + ), + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", bl), + gotypes.NewVar(token.NoPos, nil, "", err), + ), false), + ), + // FailureMessage(actual interface{}) (message string) + gotypes.NewFunc(token.NoPos, nil, "FailureMessage", gotypes.NewSignatureType( + nil, nil, nil, + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", anyType), + ), + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", str), + ), + false), + ), + //NegatedFailureMessage(actual interface{}) (message string) + gotypes.NewFunc(token.NoPos, nil, "NegatedFailureMessage", gotypes.NewSignatureType( + nil, nil, nil, + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", anyType), + ), + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", str), + ), + false), + ), + }, nil) +} + +func ImplementsError(t gotypes.Type) bool { + return gotypes.Implements(t, errorType) +} + +func ImplementsGomegaMatcher(t gotypes.Type) bool { + return gotypes.Implements(t, gomegaMatcherType) +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/intervals/intervals.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/intervals/intervals.go new file mode 100644 index 0000000000..b8166bdb21 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/intervals/intervals.go @@ -0,0 +1,285 @@ +package intervals + +import ( + "errors" + "go/ast" + "go/constant" + "go/token" + gotypes "go/types" + "strconv" + "time" + + "golang.org/x/tools/go/analysis" + + "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" + "github.com/nunnatsa/ginkgolinter/internal/reports" +) + +type noDurationIntervalErr struct { + value string +} + +func (err noDurationIntervalErr) Error() string { + return "only use time.Duration for timeout and polling in Eventually() or Consistently()" +} + +func CheckIntervals(pass *analysis.Pass, expr *ast.CallExpr, actualExpr *ast.CallExpr, reportBuilder *reports.Builder, handler gomegahandler.Handler, timePkg string, funcIndex int) { + var ( + timeout time.Duration + polling time.Duration + err error + ) + + timeoutOffset := funcIndex + 1 + if len(actualExpr.Args) > timeoutOffset { + timeout, err = getDuration(pass, actualExpr.Args[timeoutOffset], timePkg) + if err != nil { + suggestFix := false + if tryFixIntDuration(expr, err, handler, timePkg, timeoutOffset) { + suggestFix = true + } + reportBuilder.AddIssue(suggestFix, err.Error()) + } + pollingOffset := funcIndex + 2 + if len(actualExpr.Args) > pollingOffset { + polling, err = getDuration(pass, actualExpr.Args[pollingOffset], timePkg) + if err != nil { + suggestFix := false + if tryFixIntDuration(expr, err, handler, timePkg, pollingOffset) { + suggestFix = true + } + reportBuilder.AddIssue(suggestFix, err.Error()) + } + } + } + + selExp := expr.Fun.(*ast.SelectorExpr) + for { + call, ok := selExp.X.(*ast.CallExpr) + if !ok { + break + } + + fun, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + break + } + + switch fun.Sel.Name { + case "WithTimeout", "Within": + if timeout != 0 { + reportBuilder.AddIssue(false, "timeout defined more than once") + } else if len(call.Args) == 1 { + timeout, err = getDurationFromValue(pass, call.Args[0], timePkg) + if err != nil { + reportBuilder.AddIssue(false, err.Error()) + } + } + + case "WithPolling", "ProbeEvery": + if polling != 0 { + reportBuilder.AddIssue(false, "polling defined more than once") + } else if len(call.Args) == 1 { + polling, err = getDurationFromValue(pass, call.Args[0], timePkg) + if err != nil { + reportBuilder.AddIssue(false, err.Error()) + } + } + } + + selExp = fun + } + + if timeout != 0 && polling != 0 && timeout < polling { + reportBuilder.AddIssue(false, "timeout must not be shorter than the polling interval") + } +} + +func tryFixIntDuration(expr *ast.CallExpr, err error, handler gomegahandler.Handler, timePkg string, offset int) bool { + suggestFix := false + var durErr noDurationIntervalErr + if errors.As(err, &durErr) { + if len(durErr.value) > 0 { + actualExpr := handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + var newArg ast.Expr + second := &ast.SelectorExpr{ + Sel: ast.NewIdent("Second"), + X: ast.NewIdent(timePkg), + } + if durErr.value == "1" { + newArg = second + } else { + newArg = &ast.BinaryExpr{ + X: second, + Op: token.MUL, + Y: actualExpr.Args[offset], + } + } + actualExpr.Args[offset] = newArg + suggestFix = true + } + } + + return suggestFix +} + +func getDuration(pass *analysis.Pass, interval ast.Expr, timePkg string) (time.Duration, error) { + argType := pass.TypesInfo.TypeOf(interval) + if durType, ok := argType.(*gotypes.Named); ok { + if durType.Obj().Name() == "Duration" && durType.Obj().Pkg().Name() == "time" { + return getDurationFromValue(pass, interval, timePkg) + } + } + + value := "" + switch val := interval.(type) { + case *ast.BasicLit: + if val.Kind == token.INT { + value = val.Value + } + case *ast.Ident: + i, err := getConstDuration(pass, val, timePkg) + if err != nil || i == 0 { + return 0, nil + } + value = val.Name + } + + return 0, noDurationIntervalErr{value: value} +} + +func getDurationFromValue(pass *analysis.Pass, interval ast.Expr, timePkg string) (time.Duration, error) { + switch dur := interval.(type) { + case *ast.SelectorExpr: + ident, ok := dur.X.(*ast.Ident) + if ok { + if ident.Name == timePkg { + return getTimeDurationValue(dur) + } + return getDurationFromValue(pass, dur.Sel, timePkg) + } + case *ast.BinaryExpr: + return getBinaryExprDuration(pass, dur, timePkg) + + case *ast.Ident: + return getConstDuration(pass, dur, timePkg) + } + + return 0, nil +} + +func getConstDuration(pass *analysis.Pass, ident *ast.Ident, timePkg string) (time.Duration, error) { + o := pass.TypesInfo.ObjectOf(ident) + if o != nil { + if c, ok := o.(*gotypes.Const); ok { + if c.Val().Kind() == constant.Int { + i, err := strconv.Atoi(c.Val().String()) + if err != nil { + return 0, nil + } + return time.Duration(i), nil + } + } + } + + if ident.Obj != nil && ident.Obj.Kind == ast.Con && ident.Obj.Decl != nil { + if vals, ok := ident.Obj.Decl.(*ast.ValueSpec); ok { + if len(vals.Values) == 1 { + switch val := vals.Values[0].(type) { + case *ast.BasicLit: + if val.Kind == token.INT { + i, err := strconv.Atoi(val.Value) + if err != nil { + return 0, nil + } + return time.Duration(i), nil + } + return 0, nil + case *ast.BinaryExpr: + return getBinaryExprDuration(pass, val, timePkg) + } + } + } + } + + return 0, nil +} + +func getTimeDurationValue(dur *ast.SelectorExpr) (time.Duration, error) { + switch dur.Sel.Name { + case "Nanosecond": + return time.Nanosecond, nil + case "Microsecond": + return time.Microsecond, nil + case "Millisecond": + return time.Millisecond, nil + case "Second": + return time.Second, nil + case "Minute": + return time.Minute, nil + case "Hour": + return time.Hour, nil + default: + return 0, errors.New("unknown duration value") // should never happen + } +} + +func getBinaryExprDuration(pass *analysis.Pass, expr *ast.BinaryExpr, timePkg string) (time.Duration, error) { + x, err := getBinaryDurValue(pass, expr.X, timePkg) + if err != nil || x == 0 { + return 0, nil + } + y, err := getBinaryDurValue(pass, expr.Y, timePkg) + if err != nil || y == 0 { + return 0, nil + } + + switch expr.Op { + case token.ADD: + return x + y, nil + case token.SUB: + val := x - y + if val > 0 { + return val, nil + } + return 0, nil + case token.MUL: + return x * y, nil + case token.QUO: + if y == 0 { + return 0, nil + } + return x / y, nil + case token.REM: + if y == 0 { + return 0, nil + } + return x % y, nil + default: + return 0, nil + } +} + +func getBinaryDurValue(pass *analysis.Pass, expr ast.Expr, timePkg string) (time.Duration, error) { + switch x := expr.(type) { + case *ast.SelectorExpr: + return getDurationFromValue(pass, x, timePkg) + case *ast.BinaryExpr: + return getBinaryExprDuration(pass, x, timePkg) + case *ast.BasicLit: + if x.Kind == token.INT { + val, err := strconv.Atoi(x.Value) + if err != nil { + return 0, err + } + return time.Duration(val), nil + } + case *ast.ParenExpr: + return getBinaryDurValue(pass, x.X, timePkg) + + case *ast.Ident: + return getConstDuration(pass, x, timePkg) + } + + return 0, nil +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/reports/report-builder.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/reports/report-builder.go new file mode 100644 index 0000000000..c7f931ca75 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/reports/report-builder.go @@ -0,0 +1,98 @@ +package reports + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type Builder struct { + pos token.Pos + end token.Pos + oldExpr string + issues []string + fixOffer string + suggestFix bool +} + +func NewBuilder(fset *token.FileSet, oldExpr ast.Expr) *Builder { + b := &Builder{ + pos: oldExpr.Pos(), + end: oldExpr.End(), + oldExpr: goFmt(fset, oldExpr), + suggestFix: false, + } + + return b +} + +func (b *Builder) AddIssue(suggestFix bool, issue string, args ...any) { + if len(args) > 0 { + issue = fmt.Sprintf(issue, args...) + } + b.issues = append(b.issues, issue) + + if suggestFix { + b.suggestFix = true + } +} + +func (b *Builder) SetFixOffer(fset *token.FileSet, fixOffer ast.Expr) { + if offer := goFmt(fset, fixOffer); offer != b.oldExpr { + b.fixOffer = offer + } +} + +func (b *Builder) HasReport() bool { + return len(b.issues) > 0 +} + +func (b *Builder) Build() analysis.Diagnostic { + diagnostic := analysis.Diagnostic{ + Pos: b.pos, + Message: b.getMessage(), + } + + if b.suggestFix && len(b.fixOffer) > 0 { + diagnostic.SuggestedFixes = []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace %s with %s", b.oldExpr, b.fixOffer), + TextEdits: []analysis.TextEdit{ + { + Pos: b.pos, + End: b.end, + NewText: []byte(b.fixOffer), + }, + }, + }, + } + } + + return diagnostic +} + +func goFmt(fset *token.FileSet, x ast.Expr) string { + var b bytes.Buffer + _ = printer.Fprint(&b, fset, x) + return b.String() +} + +func (b *Builder) getMessage() string { + sb := strings.Builder{} + sb.WriteString("ginkgo-linter: ") + if len(b.issues) > 1 { + sb.WriteString("multiple issues: ") + } + sb.WriteString(strings.Join(b.issues, "; ")) + + if b.suggestFix && len(b.fixOffer) != 0 { + sb.WriteString(fmt.Sprintf(". Consider using `%s` instead", b.fixOffer)) + } + + return sb.String() +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/reverseassertion/reverse_assertion.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/reverseassertion/reverse_assertion.go new file mode 100644 index 0000000000..1dbd898106 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/reverseassertion/reverse_assertion.go @@ -0,0 +1,42 @@ +package reverseassertion + +import "go/token" + +var reverseLogicAssertions = map[string]string{ + "To": "ToNot", + "ToNot": "To", + "NotTo": "To", + "Should": "ShouldNot", + "ShouldNot": "Should", +} + +// ChangeAssertionLogic get gomega assertion function name, and returns the reverse logic function name +func ChangeAssertionLogic(funcName string) string { + if revFunc, ok := reverseLogicAssertions[funcName]; ok { + return revFunc + } + return funcName +} + +func IsNegativeLogic(funcName string) bool { + switch funcName { + case "ToNot", "NotTo", "ShouldNot": + return true + } + return false +} + +var reverseCompareOperators = map[token.Token]token.Token{ + token.LSS: token.GTR, + token.GTR: token.LSS, + token.LEQ: token.GEQ, + token.GEQ: token.LEQ, +} + +// ChangeCompareOperator return the reversed comparison operator +func ChangeCompareOperator(op token.Token) token.Token { + if revOp, ok := reverseCompareOperators[op]; ok { + return revOp + } + return op +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/linter/ginkgo_linter.go b/vendor/github.com/nunnatsa/ginkgolinter/linter/ginkgo_linter.go new file mode 100644 index 0000000000..b158c3a3ae --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/linter/ginkgo_linter.go @@ -0,0 +1,1669 @@ +package linter + +import ( + "bytes" + "fmt" + "go/ast" + "go/constant" + "go/printer" + "go/token" + gotypes "go/types" + "reflect" + + "github.com/go-toolsmith/astcopy" + "golang.org/x/tools/go/analysis" + + "github.com/nunnatsa/ginkgolinter/internal/ginkgohandler" + "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" + "github.com/nunnatsa/ginkgolinter/internal/interfaces" + "github.com/nunnatsa/ginkgolinter/internal/intervals" + "github.com/nunnatsa/ginkgolinter/internal/reports" + "github.com/nunnatsa/ginkgolinter/internal/reverseassertion" + "github.com/nunnatsa/ginkgolinter/types" +) + +// The ginkgolinter enforces standards of using ginkgo and gomega. +// +// For more details, look at the README.md file + +const ( + linterName = "ginkgo-linter" + wrongLengthWarningTemplate = "wrong length assertion" + wrongCapWarningTemplate = "wrong cap assertion" + wrongNilWarningTemplate = "wrong nil assertion" + wrongBoolWarningTemplate = "wrong boolean assertion" + wrongErrWarningTemplate = "wrong error assertion" + wrongCompareWarningTemplate = "wrong comparison assertion" + doubleNegativeWarningTemplate = "avoid double negative assertion" + valueInEventually = "use a function call in %s. This actually checks nothing, because %s receives the function returned value, instead of function itself, and this value is never changed" + comparePointerToValue = "comparing a pointer to a value will always fail" + missingAssertionMessage = linterName + `: %q: missing assertion method. Expected %s` + focusContainerFound = linterName + ": Focus container found. This is used only for local debug and should not be part of the actual source code. Consider to replace with %q" + focusSpecFound = linterName + ": Focus spec found. This is used only for local debug and should not be part of the actual source code. Consider to remove it" + compareDifferentTypes = "use %[1]s with different types: Comparing %[2]s with %[3]s; either change the expected value type if possible, or use the BeEquivalentTo() matcher, instead of %[1]s()" + matchErrorArgWrongType = "the MatchError matcher used to assert a non error type (%s)" + matchErrorWrongTypeAssertion = "MatchError first parameter (%s) must be error, string, GomegaMatcher or func(error)bool are allowed" + matchErrorMissingDescription = "missing function description as second parameter of MatchError" + matchErrorRedundantArg = "redundant MatchError arguments; consider removing them" + matchErrorNoFuncDescription = "The second parameter of MatchError must be the function description (string)" + forceExpectToTemplate = "must not use Expect with %s" + useBeforeEachTemplate = "use BeforeEach() to assign variable %s" +) + +const ( // gomega matchers + beEmpty = "BeEmpty" + beEquivalentTo = "BeEquivalentTo" + beFalse = "BeFalse" + beIdenticalTo = "BeIdenticalTo" + beNil = "BeNil" + beNumerically = "BeNumerically" + beTrue = "BeTrue" + beZero = "BeZero" + equal = "Equal" + haveLen = "HaveLen" + haveCap = "HaveCap" + haveOccurred = "HaveOccurred" + haveValue = "HaveValue" + not = "Not" + omega = "Ω" + succeed = "Succeed" + and = "And" + or = "Or" + withTransform = "WithTransform" + matchError = "MatchError" +) + +const ( // gomega actuals + expect = "Expect" + expectWithOffset = "ExpectWithOffset" + eventually = "Eventually" + eventuallyWithOffset = "EventuallyWithOffset" + consistently = "Consistently" + consistentlyWithOffset = "ConsistentlyWithOffset" +) + +type GinkgoLinter struct { + config *types.Config +} + +// NewGinkgoLinter return new ginkgolinter object +func NewGinkgoLinter(config *types.Config) *GinkgoLinter { + return &GinkgoLinter{ + config: config, + } +} + +// Run is the main assertion function +func (l *GinkgoLinter) Run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + fileConfig := l.config.Clone() + + cm := ast.NewCommentMap(pass.Fset, file, file.Comments) + + fileConfig.UpdateFromFile(cm) + + gomegaHndlr := gomegahandler.GetGomegaHandler(file) + ginkgoHndlr := ginkgohandler.GetGinkgoHandler(file) + + if gomegaHndlr == nil && ginkgoHndlr == nil { // no gomega or ginkgo imports => no use in gomega in this file; nothing to do here + continue + } + + timePks := "" + for _, imp := range file.Imports { + if imp.Path.Value == `"time"` { + if imp.Name == nil { + timePks = "time" + } else { + timePks = imp.Name.Name + } + } + } + + ast.Inspect(file, func(n ast.Node) bool { + if ginkgoHndlr != nil { + goDeeper := false + spec, ok := n.(*ast.ValueSpec) + if ok { + for _, val := range spec.Values { + if exp, ok := val.(*ast.CallExpr); ok { + if bool(fileConfig.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, exp) { + goDeeper = true + } + + if bool(fileConfig.ForbidSpecPollution) && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { + goDeeper = true + } + } + } + } + if goDeeper { + return true + } + } + + stmt, ok := n.(*ast.ExprStmt) + if !ok { + return true + } + + config := fileConfig.Clone() + + if comments, ok := cm[stmt]; ok { + config.UpdateFromComment(comments) + } + + // search for function calls + assertionExp, ok := stmt.X.(*ast.CallExpr) + if !ok { + return true + } + + if ginkgoHndlr != nil { + goDeeper := false + if bool(config.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, assertionExp) { + goDeeper = true + } + if bool(config.ForbidSpecPollution) && checkAssignmentsInContainer(pass, ginkgoHndlr, assertionExp) { + goDeeper = true + } + if goDeeper { + return true + } + } + + // no more ginkgo checks. From here it's only gomega. So if there is no gomega handler, exit here. This is + // mostly to prevent nil pointer error. + if gomegaHndlr == nil { + return true + } + + assertionFunc, ok := assertionExp.Fun.(*ast.SelectorExpr) + if !ok { + checkNoAssertion(pass, assertionExp, gomegaHndlr) + return true + } + + if !isAssertionFunc(assertionFunc.Sel.Name) { + checkNoAssertion(pass, assertionExp, gomegaHndlr) + return true + } + + actualExpr := gomegaHndlr.GetActualExpr(assertionFunc) + if actualExpr == nil { + return true + } + + return checkExpression(pass, config, assertionExp, actualExpr, gomegaHndlr, timePks) + }) + } + return nil, nil +} + +func checkAssignmentsInContainer(pass *analysis.Pass, ginkgoHndlr ginkgohandler.Handler, exp *ast.CallExpr) bool { + foundSomething := false + if ginkgoHndlr.IsWrapContainer(exp) { + for _, arg := range exp.Args { + if fn, ok := arg.(*ast.FuncLit); ok { + if fn.Body != nil { + if checkAssignments(pass, fn.Body.List) { + foundSomething = true + } + break + } + } + } + } + + return foundSomething +} + +func checkAssignments(pass *analysis.Pass, list []ast.Stmt) bool { + foundSomething := false + for _, stmt := range list { + switch st := stmt.(type) { + case *ast.DeclStmt: + if gen, ok := st.Decl.(*ast.GenDecl); ok { + if gen.Tok != token.VAR { + continue + } + for _, spec := range gen.Specs { + if valSpec, ok := spec.(*ast.ValueSpec); ok { + if checkAssignmentsValues(pass, valSpec.Names, valSpec.Values) { + foundSomething = true + } + } + } + } + + case *ast.AssignStmt: + for i, val := range st.Rhs { + if _, isFunc := val.(*ast.FuncLit); !isFunc { + if id, isIdent := st.Lhs[i].(*ast.Ident); isIdent { + reportNoFix(pass, id.Pos(), useBeforeEachTemplate, id.Name) + foundSomething = true + } + } + } + + case *ast.IfStmt: + if st.Body != nil { + if checkAssignments(pass, st.Body.List) { + foundSomething = true + } + } + if st.Else != nil { + if block, isBlock := st.Else.(*ast.BlockStmt); isBlock { + if checkAssignments(pass, block.List) { + foundSomething = true + } + } + } + } + } + + return foundSomething +} + +func checkAssignmentsValues(pass *analysis.Pass, names []*ast.Ident, values []ast.Expr) bool { + foundSomething := false + for i, val := range values { + if _, isFunc := val.(*ast.FuncLit); !isFunc { + reportNoFix(pass, names[i].Pos(), useBeforeEachTemplate, names[i].Name) + foundSomething = true + } + } + + return foundSomething +} + +func checkFocusContainer(pass *analysis.Pass, ginkgoHndlr ginkgohandler.Handler, exp *ast.CallExpr) bool { + foundFocus := false + isFocus, id := ginkgoHndlr.GetFocusContainerName(exp) + if isFocus { + reportNewName(pass, id, id.Name[1:], focusContainerFound, id.Name) + foundFocus = true + } + + if id != nil && ginkgohandler.IsContainer(id.Name) { + for _, arg := range exp.Args { + if ginkgoHndlr.IsFocusSpec(arg) { + reportNoFix(pass, arg.Pos(), focusSpecFound) + foundFocus = true + } else if callExp, ok := arg.(*ast.CallExpr); ok { + if checkFocusContainer(pass, ginkgoHndlr, callExp) { // handle table entries + foundFocus = true + } + } + } + } + + return foundFocus +} + +func checkExpression(pass *analysis.Pass, config types.Config, assertionExp *ast.CallExpr, actualExpr *ast.CallExpr, handler gomegahandler.Handler, timePkg string) bool { + expr := astcopy.CallExpr(assertionExp) + + reportBuilder := reports.NewBuilder(pass.Fset, expr) + + goNested := false + if checkAsyncAssertion(pass, config, expr, actualExpr, handler, reportBuilder, timePkg) { + goNested = true + } else { + + actualArg := getActualArg(actualExpr, handler) + if actualArg == nil { + return true + } + + if config.ForceExpectTo { + goNested = forceExpectTo(expr, handler, reportBuilder) || goNested + } + + goNested = doCheckExpression(pass, config, assertionExp, actualArg, expr, handler, reportBuilder) || goNested + } + + if reportBuilder.HasReport() { + reportBuilder.SetFixOffer(pass.Fset, expr) + pass.Report(reportBuilder.Build()) + } + + return goNested +} + +func forceExpectTo(expr *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + if asrtFun, ok := expr.Fun.(*ast.SelectorExpr); ok { + if actualFuncName, ok := handler.GetActualFuncName(expr); ok && actualFuncName == expect { + var ( + name string + newIdent *ast.Ident + ) + + switch name = asrtFun.Sel.Name; name { + case "Should": + newIdent = ast.NewIdent("To") + case "ShouldNot": + newIdent = ast.NewIdent("ToNot") + default: + return false + } + + handler.ReplaceFunction(expr, newIdent) + reportBuilder.AddIssue(true, fmt.Sprintf(forceExpectToTemplate, name)) + return true + } + } + + return false +} + +func doCheckExpression(pass *analysis.Pass, config types.Config, assertionExp *ast.CallExpr, actualArg ast.Expr, expr *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + if !bool(config.SuppressLen) && isActualIsLenFunc(actualArg) { + return checkLengthMatcher(expr, pass, handler, reportBuilder) + + } else if !bool(config.SuppressLen) && isActualIsCapFunc(actualArg) { + return checkCapMatcher(expr, handler, reportBuilder) + + } else if nilable, compOp := getNilableFromComparison(actualArg); nilable != nil { + if isExprError(pass, nilable) { + if config.SuppressErr { + return true + } + } else if config.SuppressNil { + return true + } + + return checkNilMatcher(expr, pass, nilable, handler, compOp == token.NEQ, reportBuilder) + + } else if first, second, op, ok := isComparison(pass, actualArg); ok { + matcher, shouldContinue := startCheckComparison(expr, handler) + if !shouldContinue { + return false + } + if !config.SuppressLen { + if isActualIsLenFunc(first) { + if handleLenComparison(pass, expr, matcher, first, second, op, handler, reportBuilder) { + return false + } + } + if isActualIsCapFunc(first) { + if handleCapComparison(expr, matcher, first, second, op, handler, reportBuilder) { + return false + } + } + } + return bool(config.SuppressCompare) || checkComparison(expr, pass, matcher, handler, first, second, op, reportBuilder) + + } else if checkMatchError(pass, assertionExp, actualArg, handler, reportBuilder) { + return false + } else if isExprError(pass, actualArg) { + return bool(config.SuppressErr) || checkNilError(pass, expr, handler, actualArg, reportBuilder) + + } else if checkPointerComparison(pass, config, assertionExp, expr, actualArg, handler, reportBuilder) { + return false + } else if !handleAssertionOnly(pass, config, expr, handler, actualArg, reportBuilder) { + return false + } else if !config.SuppressTypeCompare { + return !checkEqualWrongType(pass, assertionExp, actualArg, handler, reportBuilder) + } + + return true +} + +func checkMatchError(pass *analysis.Pass, origExp *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + matcher, ok := origExp.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return doCheckMatchError(pass, origExp, matcher, actualArg, handler, reportBuilder) +} + +func doCheckMatchError(pass *analysis.Pass, origExp *ast.CallExpr, matcher *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + name, ok := handler.GetActualFuncName(matcher) + if !ok { + return false + } + switch name { + case matchError: + case not: + nested, ok := matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return doCheckMatchError(pass, origExp, nested, actualArg, handler, reportBuilder) + case and, or: + res := false + for _, arg := range matcher.Args { + if nested, ok := arg.(*ast.CallExpr); ok { + if valid := doCheckMatchError(pass, origExp, nested, actualArg, handler, reportBuilder); valid { + res = true + } + } + } + return res + default: + return false + } + + if !isExprError(pass, actualArg) { + reportBuilder.AddIssue(false, matchErrorArgWrongType, goFmt(pass.Fset, actualArg)) + } + + expr := astcopy.CallExpr(matcher) + + validAssertion, requiredParams := checkMatchErrorAssertion(pass, matcher) + if !validAssertion { + reportBuilder.AddIssue(false, matchErrorWrongTypeAssertion, goFmt(pass.Fset, matcher.Args[0])) + } + + numParams := len(matcher.Args) + if numParams == requiredParams { + if numParams == 2 { + t := pass.TypesInfo.TypeOf(matcher.Args[1]) + if !gotypes.Identical(t, gotypes.Typ[gotypes.String]) { + reportBuilder.AddIssue(false, matchErrorNoFuncDescription) + return true + } + } + return true + } + + if requiredParams == 2 && numParams == 1 { + reportBuilder.AddIssue(false, matchErrorMissingDescription) + return true + } + + var newArgsSuggestion = []ast.Expr{expr.Args[0]} + if requiredParams == 2 { + newArgsSuggestion = append(newArgsSuggestion, expr.Args[1]) + } + expr.Args = newArgsSuggestion + + reportBuilder.AddIssue(true, matchErrorRedundantArg) + return true +} + +func checkMatchErrorAssertion(pass *analysis.Pass, matcher *ast.CallExpr) (bool, int) { + if isErrorMatcherValidArg(pass, matcher.Args[0]) { + return true, 1 + } + + t1 := pass.TypesInfo.TypeOf(matcher.Args[0]) + if isFuncErrBool(t1) { + return true, 2 + } + + return false, 0 +} + +// isFuncErrBool checks if a function is with the signature `func(error) bool` +func isFuncErrBool(t gotypes.Type) bool { + sig, ok := t.(*gotypes.Signature) + if !ok { + return false + } + if sig.Params().Len() != 1 || sig.Results().Len() != 1 { + return false + } + + if !interfaces.ImplementsError(sig.Params().At(0).Type()) { + return false + } + + b, ok := sig.Results().At(0).Type().(*gotypes.Basic) + if ok && b.Name() == "bool" && b.Info() == gotypes.IsBoolean && b.Kind() == gotypes.Bool { + return true + } + + return false +} + +func isErrorMatcherValidArg(pass *analysis.Pass, arg ast.Expr) bool { + if isExprError(pass, arg) { + return true + } + + if t, ok := pass.TypesInfo.TypeOf(arg).(*gotypes.Basic); ok && t.Kind() == gotypes.String { + return true + } + + t := pass.TypesInfo.TypeOf(arg) + + return interfaces.ImplementsGomegaMatcher(t) +} + +func checkEqualWrongType(pass *analysis.Pass, origExp *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + matcher, ok := origExp.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return checkEqualDifferentTypes(pass, matcher, actualArg, handler, false, reportBuilder) +} + +func checkEqualDifferentTypes(pass *analysis.Pass, matcher *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, parentPointer bool, reportBuilder *reports.Builder) bool { + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return false + } + + actualType := pass.TypesInfo.TypeOf(actualArg) + + switch matcherFuncName { + case equal, beIdenticalTo: // continue + case and, or: + foundIssue := false + for _, nestedExp := range matcher.Args { + nested, ok := nestedExp.(*ast.CallExpr) + if !ok { + continue + } + if checkEqualDifferentTypes(pass, nested, actualArg, handler, parentPointer, reportBuilder) { + foundIssue = true + } + } + + return foundIssue + case withTransform: + nested, ok := matcher.Args[1].(*ast.CallExpr) + if !ok { + return false + } + + matcherFuncName, ok = handler.GetActualFuncName(nested) + switch matcherFuncName { + case equal, beIdenticalTo: + case not: + return checkEqualDifferentTypes(pass, nested, actualArg, handler, parentPointer, reportBuilder) + default: + return false + } + + if t := getFuncType(pass, matcher.Args[0]); t != nil { + actualType = t + matcher = nested + + if !ok { + return false + } + } else { + return checkEqualDifferentTypes(pass, nested, actualArg, handler, parentPointer, reportBuilder) + } + + case not: + nested, ok := matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return checkEqualDifferentTypes(pass, nested, actualArg, handler, parentPointer, reportBuilder) + + case haveValue: + nested, ok := matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return checkEqualDifferentTypes(pass, nested, actualArg, handler, true, reportBuilder) + default: + return false + } + + matcherValue := matcher.Args[0] + + switch act := actualType.(type) { + case *gotypes.Tuple: + actualType = act.At(0).Type() + case *gotypes.Pointer: + if parentPointer { + actualType = act.Elem() + } + } + + matcherType := pass.TypesInfo.TypeOf(matcherValue) + + if !reflect.DeepEqual(matcherType, actualType) { + // Equal can handle comparison of interface and a value that implements it + if isImplementing(matcherType, actualType) || isImplementing(actualType, matcherType) { + return false + } + + reportBuilder.AddIssue(false, compareDifferentTypes, matcherFuncName, actualType, matcherType) + return true + } + + return false +} + +func getFuncType(pass *analysis.Pass, expr ast.Expr) gotypes.Type { + switch f := expr.(type) { + case *ast.FuncLit: + if f.Type != nil && f.Type.Results != nil && len(f.Type.Results.List) > 0 { + return pass.TypesInfo.TypeOf(f.Type.Results.List[0].Type) + } + case *ast.Ident: + a := pass.TypesInfo.TypeOf(f) + if sig, ok := a.(*gotypes.Signature); ok && sig.Results().Len() > 0 { + return sig.Results().At(0).Type() + } + } + + return nil +} + +func isImplementing(ifs, impl gotypes.Type) bool { + if gotypes.IsInterface(ifs) { + + var ( + theIfs *gotypes.Interface + ok bool + ) + + for { + theIfs, ok = ifs.(*gotypes.Interface) + if ok { + break + } + ifs = ifs.Underlying() + } + + return gotypes.Implements(impl, theIfs) + } + return false +} + +// be careful - never change origExp!!! only modify its clone, expr!!! +func checkPointerComparison(pass *analysis.Pass, config types.Config, origExp *ast.CallExpr, expr *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + if !isPointer(pass, actualArg) { + return false + } + matcher, ok := origExp.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return false + } + + // not using recurse here, since we need the original expression, in order to get the TypeInfo, while we should not + // modify it. + for matcherFuncName == not { + reverseAssertionFuncLogic(expr) + expr.Args[0] = expr.Args[0].(*ast.CallExpr).Args[0] + matcher, ok = matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + matcherFuncName, ok = handler.GetActualFuncName(matcher) + if !ok { + return false + } + } + + switch matcherFuncName { + case equal, beIdenticalTo, beEquivalentTo: + arg := matcher.Args[0] + if isPointer(pass, arg) { + return false + } + if isNil(arg) { + return false + } + if isInterface(pass, arg) { + return false + } + case beFalse, beTrue, beNumerically: + default: + return false + } + + handleAssertionOnly(pass, config, expr, handler, actualArg, reportBuilder) + + args := []ast.Expr{astcopy.CallExpr(expr.Args[0].(*ast.CallExpr))} + handler.ReplaceFunction(expr.Args[0].(*ast.CallExpr), ast.NewIdent(haveValue)) + expr.Args[0].(*ast.CallExpr).Args = args + + reportBuilder.AddIssue(true, comparePointerToValue) + return true +} + +// check async assertion does not assert function call. This is a real bug in the test. In this case, the assertion is +// done on the returned value, instead of polling the result of a function, for instance. +func checkAsyncAssertion(pass *analysis.Pass, config types.Config, expr *ast.CallExpr, actualExpr *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder, timePkg string) bool { + funcName, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return false + } + + var funcIndex int + switch funcName { + case eventually, consistently: + funcIndex = 0 + case eventuallyWithOffset, consistentlyWithOffset: + funcIndex = 1 + default: + return false + } + + if !config.SuppressAsync && len(actualExpr.Args) > funcIndex { + t := pass.TypesInfo.TypeOf(actualExpr.Args[funcIndex]) + + // skip context variable, if used as first argument + if "context.Context" == t.String() { + funcIndex++ + } + + if len(actualExpr.Args) > funcIndex { + if fun, funcCall := actualExpr.Args[funcIndex].(*ast.CallExpr); funcCall { + t = pass.TypesInfo.TypeOf(fun) + if !isValidAsyncValueType(t) { + actualExpr = handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + + if len(fun.Args) > 0 { + origArgs := actualExpr.Args + origFunc := actualExpr.Fun + actualExpr.Args = fun.Args + + origArgs[funcIndex] = fun.Fun + call := &ast.SelectorExpr{ + Sel: ast.NewIdent("WithArguments"), + X: &ast.CallExpr{ + Fun: origFunc, + Args: origArgs, + }, + } + + actualExpr.Fun = call + actualExpr.Args = fun.Args + actualExpr = actualExpr.Fun.(*ast.SelectorExpr).X.(*ast.CallExpr) + } else { + actualExpr.Args[funcIndex] = fun.Fun + } + + reportBuilder.AddIssue(true, valueInEventually, funcName, funcName) + } + } + } + + if config.ValidateAsyncIntervals { + intervals.CheckIntervals(pass, expr, actualExpr, reportBuilder, handler, timePkg, funcIndex) + } + } + + handleAssertionOnly(pass, config, expr, handler, actualExpr, reportBuilder) + return true +} + +func isValidAsyncValueType(t gotypes.Type) bool { + switch t.(type) { + // allow functions that return function or channel. + case *gotypes.Signature, *gotypes.Chan, *gotypes.Pointer: + return true + case *gotypes.Named: + return isValidAsyncValueType(t.Underlying()) + } + + return false +} + +func startCheckComparison(exp *ast.CallExpr, handler gomegahandler.Handler) (*ast.CallExpr, bool) { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return nil, false + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return nil, false + } + + switch matcherFuncName { + case beTrue: + case beFalse: + reverseAssertionFuncLogic(exp) + case equal: + boolean, found := matcher.Args[0].(*ast.Ident) + if !found { + return nil, false + } + + if boolean.Name == "false" { + reverseAssertionFuncLogic(exp) + } else if boolean.Name != "true" { + return nil, false + } + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return startCheckComparison(exp, handler) + + default: + return nil, false + } + + return matcher, true +} + +func checkComparison(exp *ast.CallExpr, pass *analysis.Pass, matcher *ast.CallExpr, handler gomegahandler.Handler, first ast.Expr, second ast.Expr, op token.Token, reportBuilder *reports.Builder) bool { + fun, ok := exp.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + + call := handler.GetActualExpr(fun) + if call == nil { + return true + } + + switch op { + case token.EQL: + handleEqualComparison(pass, matcher, first, second, handler) + + case token.NEQ: + reverseAssertionFuncLogic(exp) + handleEqualComparison(pass, matcher, first, second, handler) + case token.GTR, token.GEQ, token.LSS, token.LEQ: + if !isNumeric(pass, first) { + return true + } + handler.ReplaceFunction(matcher, ast.NewIdent(beNumerically)) + matcher.Args = []ast.Expr{ + &ast.BasicLit{Kind: token.STRING, Value: fmt.Sprintf(`"%s"`, op.String())}, + second, + } + default: + return true + } + + call.Args = []ast.Expr{first} + reportBuilder.AddIssue(true, wrongCompareWarningTemplate) + return false +} + +func handleEqualComparison(pass *analysis.Pass, matcher *ast.CallExpr, first ast.Expr, second ast.Expr, handler gomegahandler.Handler) { + if isZero(pass, second) { + handler.ReplaceFunction(matcher, ast.NewIdent(beZero)) + matcher.Args = nil + } else { + t := pass.TypesInfo.TypeOf(first) + if gotypes.IsInterface(t) { + handler.ReplaceFunction(matcher, ast.NewIdent(beIdenticalTo)) + } else if _, ok := t.(*gotypes.Pointer); ok { + handler.ReplaceFunction(matcher, ast.NewIdent(beIdenticalTo)) + } else { + handler.ReplaceFunction(matcher, ast.NewIdent(equal)) + } + + matcher.Args = []ast.Expr{second} + } +} + +func handleLenComparison(pass *analysis.Pass, exp *ast.CallExpr, matcher *ast.CallExpr, first ast.Expr, second ast.Expr, op token.Token, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + switch op { + case token.EQL: + case token.NEQ: + reverseAssertionFuncLogic(exp) + default: + return false + } + + var eql *ast.Ident + if isZero(pass, second) { + eql = ast.NewIdent(beEmpty) + } else { + eql = ast.NewIdent(haveLen) + matcher.Args = []ast.Expr{second} + } + + handler.ReplaceFunction(matcher, eql) + firstLen, ok := first.(*ast.CallExpr) // assuming it's len() + if !ok { + return false // should never happen + } + + val := firstLen.Args[0] + fun := handler.GetActualExpr(exp.Fun.(*ast.SelectorExpr)) + fun.Args = []ast.Expr{val} + + reportBuilder.AddIssue(true, wrongLengthWarningTemplate) + return true +} + +func handleCapComparison(exp *ast.CallExpr, matcher *ast.CallExpr, first ast.Expr, second ast.Expr, op token.Token, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + switch op { + case token.EQL: + case token.NEQ: + reverseAssertionFuncLogic(exp) + default: + return false + } + + eql := ast.NewIdent(haveCap) + matcher.Args = []ast.Expr{second} + + handler.ReplaceFunction(matcher, eql) + firstLen, ok := first.(*ast.CallExpr) // assuming it's len() + if !ok { + return false // should never happen + } + + val := firstLen.Args[0] + fun := handler.GetActualExpr(exp.Fun.(*ast.SelectorExpr)) + fun.Args = []ast.Expr{val} + + reportBuilder.AddIssue(true, wrongCapWarningTemplate) + return true +} + +// Check if the "actual" argument is a call to the golang built-in len() function +func isActualIsLenFunc(actualArg ast.Expr) bool { + return checkActualFuncName(actualArg, "len") +} + +// Check if the "actual" argument is a call to the golang built-in len() function +func isActualIsCapFunc(actualArg ast.Expr) bool { + return checkActualFuncName(actualArg, "cap") +} + +func checkActualFuncName(actualArg ast.Expr, name string) bool { + lenArgExp, ok := actualArg.(*ast.CallExpr) + if !ok { + return false + } + + lenFunc, ok := lenArgExp.Fun.(*ast.Ident) + return ok && lenFunc.Name == name +} + +// Check if matcher function is in one of the patterns we want to avoid +func checkLengthMatcher(exp *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return true + } + + switch matcherFuncName { + case equal: + handleEqualLenMatcher(matcher, pass, exp, handler, reportBuilder) + return false + + case beZero: + handleBeZero(exp, handler, reportBuilder) + return false + + case beNumerically: + return handleBeNumerically(matcher, pass, exp, handler, reportBuilder) + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return checkLengthMatcher(exp, pass, handler, reportBuilder) + + default: + return true + } +} + +// Check if matcher function is in one of the patterns we want to avoid +func checkCapMatcher(exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return true + } + + switch matcherFuncName { + case equal: + handleEqualCapMatcher(matcher, exp, handler, reportBuilder) + return false + + case beZero: + handleCapBeZero(exp, handler, reportBuilder) + return false + + case beNumerically: + return handleCapBeNumerically(matcher, exp, handler, reportBuilder) + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return checkCapMatcher(exp, handler, reportBuilder) + + default: + return true + } +} + +// Check if matcher function is in one of the patterns we want to avoid +func checkNilMatcher(exp *ast.CallExpr, pass *analysis.Pass, nilable ast.Expr, handler gomegahandler.Handler, notEqual bool, reportBuilder *reports.Builder) bool { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return true + } + + switch matcherFuncName { + case equal: + handleEqualNilMatcher(matcher, pass, exp, handler, nilable, notEqual, reportBuilder) + + case beTrue: + handleNilBeBoolMatcher(pass, exp, handler, nilable, notEqual, reportBuilder) + + case beFalse: + reverseAssertionFuncLogic(exp) + handleNilBeBoolMatcher(pass, exp, handler, nilable, notEqual, reportBuilder) + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return checkNilMatcher(exp, pass, nilable, handler, notEqual, reportBuilder) + + default: + return true + } + return false +} + +func checkNilError(pass *analysis.Pass, assertionExp *ast.CallExpr, handler gomegahandler.Handler, actualArg ast.Expr, reportBuilder *reports.Builder) bool { + if len(assertionExp.Args) == 0 { + return true + } + + equalFuncExpr, ok := assertionExp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + funcName, ok := handler.GetActualFuncName(equalFuncExpr) + if !ok { + return true + } + + switch funcName { + case beNil: // no additional processing needed. + case equal: + + if len(equalFuncExpr.Args) == 0 { + return true + } + + nilable, ok := equalFuncExpr.Args[0].(*ast.Ident) + if !ok || nilable.Name != "nil" { + return true + } + + case not: + reverseAssertionFuncLogic(assertionExp) + assertionExp.Args[0] = assertionExp.Args[0].(*ast.CallExpr).Args[0] + return checkNilError(pass, assertionExp, handler, actualArg, reportBuilder) + default: + return true + } + + var newFuncName string + if _, ok := actualArg.(*ast.CallExpr); ok { + newFuncName = succeed + } else { + reverseAssertionFuncLogic(assertionExp) + newFuncName = haveOccurred + } + + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(newFuncName)) + equalFuncExpr.Args = nil + + reportBuilder.AddIssue(true, wrongErrWarningTemplate) + return false +} + +// handleAssertionOnly checks use-cases when the actual value is valid, but only the assertion should be fixed +// it handles: +// +// Equal(nil) => BeNil() +// Equal(true) => BeTrue() +// Equal(false) => BeFalse() +// HaveLen(0) => BeEmpty() +func handleAssertionOnly(pass *analysis.Pass, config types.Config, expr *ast.CallExpr, handler gomegahandler.Handler, actualArg ast.Expr, reportBuilder *reports.Builder) bool { + if len(expr.Args) == 0 { + return true + } + + equalFuncExpr, ok := expr.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + funcName, ok := handler.GetActualFuncName(equalFuncExpr) + if !ok { + return true + } + + switch funcName { + case equal: + if len(equalFuncExpr.Args) == 0 { + return true + } + + tkn, ok := equalFuncExpr.Args[0].(*ast.Ident) + if !ok { + return true + } + + var replacement string + var template string + switch tkn.Name { + case "nil": + if config.SuppressNil { + return true + } + replacement = beNil + template = wrongNilWarningTemplate + case "true": + replacement = beTrue + template = wrongBoolWarningTemplate + case "false": + if isNegativeAssertion(expr) { + reverseAssertionFuncLogic(expr) + replacement = beTrue + } else { + replacement = beFalse + } + template = wrongBoolWarningTemplate + default: + return true + } + + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(replacement)) + equalFuncExpr.Args = nil + + reportBuilder.AddIssue(true, template) + return false + + case beFalse: + if isNegativeAssertion(expr) { + reverseAssertionFuncLogic(expr) + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(beTrue)) + reportBuilder.AddIssue(true, doubleNegativeWarningTemplate) + return false + } + return false + + case haveLen: + if config.AllowHaveLen0 { + return true + } + + if len(equalFuncExpr.Args) > 0 { + if isZero(pass, equalFuncExpr.Args[0]) { + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(beEmpty)) + equalFuncExpr.Args = nil + reportBuilder.AddIssue(true, wrongLengthWarningTemplate) + return false + } + } + + return true + + case not: + reverseAssertionFuncLogic(expr) + expr.Args[0] = expr.Args[0].(*ast.CallExpr).Args[0] + return handleAssertionOnly(pass, config, expr, handler, actualArg, reportBuilder) + default: + return true + } +} + +func isZero(pass *analysis.Pass, arg ast.Expr) bool { + if val, ok := arg.(*ast.BasicLit); ok && val.Kind == token.INT && val.Value == "0" { + return true + } + info, ok := pass.TypesInfo.Types[arg] + if ok { + if t, ok := info.Type.(*gotypes.Basic); ok && t.Kind() == gotypes.Int && info.Value != nil { + if i, ok := constant.Int64Val(info.Value); ok && i == 0 { + return true + } + } + } else if val, ok := arg.(*ast.Ident); ok && val.Obj != nil && val.Obj.Kind == ast.Con { + if spec, ok := val.Obj.Decl.(*ast.ValueSpec); ok { + if len(spec.Values) == 1 { + if value, ok := spec.Values[0].(*ast.BasicLit); ok && value.Kind == token.INT && value.Value == "0" { + return true + } + } + } + } + + return false +} + +// getActualArg checks that the function is an assertion's actual function and return the "actual" parameter. If the +// function is not assertion's actual function, return nil. +func getActualArg(actualExpr *ast.CallExpr, handler gomegahandler.Handler) ast.Expr { + funcName, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return nil + } + + switch funcName { + case expect, omega: + return actualExpr.Args[0] + case expectWithOffset: + return actualExpr.Args[1] + default: + return nil + } +} + +// Replace the len function call by its parameter, to create a fix suggestion +func replaceLenActualArg(actualExpr *ast.CallExpr, handler gomegahandler.Handler) { + name, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return + } + + switch name { + case expect, omega: + arg := actualExpr.Args[0] + if isActualIsLenFunc(arg) || isActualIsCapFunc(arg) { + // replace the len function call by its parameter, to create a fix suggestion + actualExpr.Args[0] = arg.(*ast.CallExpr).Args[0] + } + case expectWithOffset: + arg := actualExpr.Args[1] + if isActualIsLenFunc(arg) || isActualIsCapFunc(arg) { + // replace the len function call by its parameter, to create a fix suggestion + actualExpr.Args[1] = arg.(*ast.CallExpr).Args[0] + } + } +} + +// Replace the nil comparison with the compared object, to create a fix suggestion +func replaceNilActualArg(actualExpr *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr) bool { + actualFuncName, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return false + } + + switch actualFuncName { + case expect, omega: + actualExpr.Args[0] = nilable + return true + + case expectWithOffset: + actualExpr.Args[1] = nilable + return true + + default: + return false + } +} + +// For the BeNumerically matcher, we want to avoid the assertion of length to be > 0 or >= 1, or just == number +func handleBeNumerically(matcher *ast.CallExpr, pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + opExp, ok1 := matcher.Args[0].(*ast.BasicLit) + valExp, ok2 := matcher.Args[1].(*ast.BasicLit) + + if ok1 && ok2 { + op := opExp.Value + val := valExp.Value + + if (op == `">"` && val == "0") || (op == `">="` && val == "1") { + reverseAssertionFuncLogic(exp) + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(beEmpty)) + exp.Args[0].(*ast.CallExpr).Args = nil + } else if op == `"=="` { + chooseNumericMatcher(pass, exp, handler, valExp) + } else if op == `"!="` { + reverseAssertionFuncLogic(exp) + chooseNumericMatcher(pass, exp, handler, valExp) + } else { + return true + } + + reportLengthAssertion(exp, handler, reportBuilder) + return false + } + return true +} + +// For the BeNumerically matcher, we want to avoid the assertion of length to be > 0 or >= 1, or just == number +func handleCapBeNumerically(matcher *ast.CallExpr, exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) bool { + opExp, ok1 := matcher.Args[0].(*ast.BasicLit) + valExp, ok2 := matcher.Args[1].(*ast.BasicLit) + + if ok1 && ok2 { + op := opExp.Value + val := valExp.Value + + if (op == `">"` && val == "0") || (op == `">="` && val == "1") { + reverseAssertionFuncLogic(exp) + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(haveCap)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}} + } else if op == `"=="` { + replaceNumericCapMatcher(exp, handler, valExp) + } else if op == `"!="` { + reverseAssertionFuncLogic(exp) + replaceNumericCapMatcher(exp, handler, valExp) + } else { + return true + } + + reportCapAssertion(exp, handler, reportBuilder) + return false + } + return true +} + +func chooseNumericMatcher(pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, valExp ast.Expr) { + caller := exp.Args[0].(*ast.CallExpr) + if isZero(pass, valExp) { + handler.ReplaceFunction(caller, ast.NewIdent(beEmpty)) + exp.Args[0].(*ast.CallExpr).Args = nil + } else { + handler.ReplaceFunction(caller, ast.NewIdent(haveLen)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{valExp} + } +} + +func replaceNumericCapMatcher(exp *ast.CallExpr, handler gomegahandler.Handler, valExp ast.Expr) { + caller := exp.Args[0].(*ast.CallExpr) + handler.ReplaceFunction(caller, ast.NewIdent(haveCap)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{valExp} +} + +func reverseAssertionFuncLogic(exp *ast.CallExpr) { + assertionFunc := exp.Fun.(*ast.SelectorExpr).Sel + assertionFunc.Name = reverseassertion.ChangeAssertionLogic(assertionFunc.Name) +} + +func isNegativeAssertion(exp *ast.CallExpr) bool { + assertionFunc := exp.Fun.(*ast.SelectorExpr).Sel + return reverseassertion.IsNegativeLogic(assertionFunc.Name) +} + +func handleEqualLenMatcher(matcher *ast.CallExpr, pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) { + equalTo, ok := matcher.Args[0].(*ast.BasicLit) + if ok { + chooseNumericMatcher(pass, exp, handler, equalTo) + } else { + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(haveLen)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{matcher.Args[0]} + } + reportLengthAssertion(exp, handler, reportBuilder) +} + +func handleEqualCapMatcher(matcher *ast.CallExpr, exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) { + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(haveCap)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{matcher.Args[0]} + reportCapAssertion(exp, handler, reportBuilder) +} + +func handleBeZero(exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) { + exp.Args[0].(*ast.CallExpr).Args = nil + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(beEmpty)) + reportLengthAssertion(exp, handler, reportBuilder) +} + +func handleCapBeZero(exp *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) { + exp.Args[0].(*ast.CallExpr).Args = nil + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(haveCap)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}} + reportCapAssertion(exp, handler, reportBuilder) +} + +func handleEqualNilMatcher(matcher *ast.CallExpr, pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr, notEqual bool, reportBuilder *reports.Builder) { + equalTo, ok := matcher.Args[0].(*ast.Ident) + if !ok { + return + } + + if equalTo.Name == "false" { + reverseAssertionFuncLogic(exp) + } else if equalTo.Name != "true" { + return + } + + newFuncName, isItError := handleNilComparisonErr(pass, exp, nilable) + + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(newFuncName)) + exp.Args[0].(*ast.CallExpr).Args = nil + + reportNilAssertion(exp, handler, nilable, notEqual, isItError, reportBuilder) +} + +func handleNilBeBoolMatcher(pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr, notEqual bool, reportBuilder *reports.Builder) { + newFuncName, isItError := handleNilComparisonErr(pass, exp, nilable) + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(newFuncName)) + exp.Args[0].(*ast.CallExpr).Args = nil + + reportNilAssertion(exp, handler, nilable, notEqual, isItError, reportBuilder) +} + +func handleNilComparisonErr(pass *analysis.Pass, exp *ast.CallExpr, nilable ast.Expr) (string, bool) { + newFuncName := beNil + isItError := isExprError(pass, nilable) + if isItError { + if _, ok := nilable.(*ast.CallExpr); ok { + newFuncName = succeed + } else { + reverseAssertionFuncLogic(exp) + newFuncName = haveOccurred + } + } + + return newFuncName, isItError +} + +func isAssertionFunc(name string) bool { + switch name { + case "To", "ToNot", "NotTo", "Should", "ShouldNot": + return true + } + return false +} + +func reportLengthAssertion(expr *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) { + actualExpr := handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + replaceLenActualArg(actualExpr, handler) + + reportBuilder.AddIssue(true, wrongLengthWarningTemplate) +} + +func reportCapAssertion(expr *ast.CallExpr, handler gomegahandler.Handler, reportBuilder *reports.Builder) { + actualExpr := handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + replaceLenActualArg(actualExpr, handler) + + reportBuilder.AddIssue(true, wrongCapWarningTemplate) +} + +func reportNilAssertion(expr *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr, notEqual bool, isItError bool, reportBuilder *reports.Builder) { + actualExpr := handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + changed := replaceNilActualArg(actualExpr, handler, nilable) + if !changed { + return + } + + if notEqual { + reverseAssertionFuncLogic(expr) + } + template := wrongNilWarningTemplate + if isItError { + template = wrongErrWarningTemplate + } + + reportBuilder.AddIssue(true, template) +} + +func reportNewName(pass *analysis.Pass, id *ast.Ident, newName string, messageTemplate, oldExpr string) { + pass.Report(analysis.Diagnostic{ + Pos: id.Pos(), + Message: fmt.Sprintf(messageTemplate, newName), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace %s with %s", oldExpr, newName), + TextEdits: []analysis.TextEdit{ + { + Pos: id.Pos(), + End: id.End(), + NewText: []byte(newName), + }, + }, + }, + }, + }) +} + +func reportNoFix(pass *analysis.Pass, pos token.Pos, message string, args ...any) { + if len(args) > 0 { + message = fmt.Sprintf(message, args...) + } + + pass.Report(analysis.Diagnostic{ + Pos: pos, + Message: message, + }) +} + +func getNilableFromComparison(actualArg ast.Expr) (ast.Expr, token.Token) { + bin, ok := actualArg.(*ast.BinaryExpr) + if !ok { + return nil, token.ILLEGAL + } + + if bin.Op == token.EQL || bin.Op == token.NEQ { + if isNil(bin.Y) { + return bin.X, bin.Op + } else if isNil(bin.X) { + return bin.Y, bin.Op + } + } + + return nil, token.ILLEGAL +} + +func isNil(expr ast.Expr) bool { + nilObject, ok := expr.(*ast.Ident) + return ok && nilObject.Name == "nil" && nilObject.Obj == nil +} + +func isComparison(pass *analysis.Pass, actualArg ast.Expr) (ast.Expr, ast.Expr, token.Token, bool) { + bin, ok := actualArg.(*ast.BinaryExpr) + if !ok { + return nil, nil, token.ILLEGAL, false + } + + first, second, op := bin.X, bin.Y, bin.Op + replace := false + switch realFirst := first.(type) { + case *ast.Ident: // check if const + info, ok := pass.TypesInfo.Types[realFirst] + if ok { + if _, ok := info.Type.(*gotypes.Basic); ok && info.Value != nil { + replace = true + } + } + + case *ast.BasicLit: + replace = true + } + + if replace { + first, second = second, first + } + + switch op { + case token.EQL: + case token.NEQ: + case token.GTR, token.GEQ, token.LSS, token.LEQ: + if replace { + op = reverseassertion.ChangeCompareOperator(op) + } + default: + return nil, nil, token.ILLEGAL, false + } + return first, second, op, true +} + +func goFmt(fset *token.FileSet, x ast.Expr) string { + var b bytes.Buffer + _ = printer.Fprint(&b, fset, x) + return b.String() +} + +func isExprError(pass *analysis.Pass, expr ast.Expr) bool { + actualArgType := pass.TypesInfo.TypeOf(expr) + switch t := actualArgType.(type) { + case *gotypes.Named: + if interfaces.ImplementsError(actualArgType) { + return true + } + case *gotypes.Tuple: + if t.Len() > 0 { + switch t0 := t.At(0).Type().(type) { + case *gotypes.Named, *gotypes.Pointer: + if interfaces.ImplementsError(t0) { + return true + } + } + } + } + return false +} + +func isPointer(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + _, ok := t.(*gotypes.Pointer) + return ok +} + +func isInterface(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + return gotypes.IsInterface(t) +} + +func isNumeric(pass *analysis.Pass, node ast.Expr) bool { + t := pass.TypesInfo.TypeOf(node) + + switch t.String() { + case "int", "uint", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "float32", "float64": + return true + } + return false +} + +func checkNoAssertion(pass *analysis.Pass, expr *ast.CallExpr, handler gomegahandler.Handler) { + funcName, ok := handler.GetActualFuncName(expr) + if ok { + var allowedFunction string + switch funcName { + case expect, expectWithOffset: + allowedFunction = `"To()", "ToNot()" or "NotTo()"` + case eventually, eventuallyWithOffset, consistently, consistentlyWithOffset: + allowedFunction = `"Should()" or "ShouldNot()"` + case omega: + allowedFunction = `"Should()", "To()", "ShouldNot()", "ToNot()" or "NotTo()"` + default: + return + } + reportNoFix(pass, expr.Pos(), missingAssertionMessage, funcName, allowedFunction) + } +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go new file mode 100644 index 0000000000..be510c4e95 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go @@ -0,0 +1,32 @@ +package types + +import ( + "errors" + "strings" +) + +// Boolean is a bool, implementing the flag.Value interface, to be used as a flag var. +type Boolean bool + +func (b *Boolean) Set(value string) error { + if b == nil { + return errors.New("trying to set nil parameter") + } + switch strings.ToLower(value) { + case "true": + *b = true + case "false": + *b = false + default: + return errors.New(value + " is not a Boolean value") + + } + return nil +} + +func (b Boolean) String() string { + if b { + return "true" + } + return "false" +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go new file mode 100644 index 0000000000..b6838e5244 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go @@ -0,0 +1,99 @@ +package types + +import ( + "go/ast" + "strings" +) + +const ( + suppressPrefix = "ginkgo-linter:" + suppressLengthAssertionWarning = suppressPrefix + "ignore-len-assert-warning" + suppressNilAssertionWarning = suppressPrefix + "ignore-nil-assert-warning" + suppressErrAssertionWarning = suppressPrefix + "ignore-err-assert-warning" + suppressCompareAssertionWarning = suppressPrefix + "ignore-compare-assert-warning" + suppressAsyncAsertWarning = suppressPrefix + "ignore-async-assert-warning" + suppressFocusContainerWarning = suppressPrefix + "ignore-focus-container-warning" + suppressTypeCompareWarning = suppressPrefix + "ignore-type-compare-warning" +) + +type Config struct { + SuppressLen Boolean + SuppressNil Boolean + SuppressErr Boolean + SuppressCompare Boolean + SuppressAsync Boolean + ForbidFocus Boolean + SuppressTypeCompare Boolean + AllowHaveLen0 Boolean + ForceExpectTo Boolean + ValidateAsyncIntervals Boolean + ForbidSpecPollution Boolean +} + +func (s *Config) AllTrue() bool { + return bool(s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus) +} + +func (s *Config) Clone() Config { + return Config{ + SuppressLen: s.SuppressLen, + SuppressNil: s.SuppressNil, + SuppressErr: s.SuppressErr, + SuppressCompare: s.SuppressCompare, + SuppressAsync: s.SuppressAsync, + ForbidFocus: s.ForbidFocus, + SuppressTypeCompare: s.SuppressTypeCompare, + AllowHaveLen0: s.AllowHaveLen0, + ForceExpectTo: s.ForceExpectTo, + ValidateAsyncIntervals: s.ValidateAsyncIntervals, + ForbidSpecPollution: s.ForbidSpecPollution, + } +} + +func (s *Config) UpdateFromComment(commentGroup []*ast.CommentGroup) { + for _, cmntList := range commentGroup { + if s.AllTrue() { + break + } + + for _, cmnt := range cmntList.List { + commentLines := strings.Split(cmnt.Text, "\n") + for _, comment := range commentLines { + comment = strings.TrimPrefix(comment, "//") + comment = strings.TrimPrefix(comment, "/*") + comment = strings.TrimSuffix(comment, "*/") + comment = strings.TrimSpace(comment) + + switch comment { + case suppressLengthAssertionWarning: + s.SuppressLen = true + case suppressNilAssertionWarning: + s.SuppressNil = true + case suppressErrAssertionWarning: + s.SuppressErr = true + case suppressCompareAssertionWarning: + s.SuppressCompare = true + case suppressAsyncAsertWarning: + s.SuppressAsync = true + case suppressFocusContainerWarning: + s.ForbidFocus = false + case suppressTypeCompareWarning: + s.SuppressTypeCompare = true + } + } + } + } +} + +func (s *Config) UpdateFromFile(cm ast.CommentMap) { + + for key, commentGroup := range cm { + if s.AllTrue() { + break + } + + if _, ok := key.(*ast.GenDecl); ok { + s.UpdateFromComment(commentGroup) + } + } +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/version/version.go b/vendor/github.com/nunnatsa/ginkgolinter/version/version.go new file mode 100644 index 0000000000..7bf181a8e8 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/version/version.go @@ -0,0 +1,14 @@ +package version + +var ( + version = "unknown" + gitHash = "unknown" +) + +func Version() string { + return version +} + +func GitHash() string { + return gitHash +} diff --git a/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/vendor/github.com/pelletier/go-toml/v2/.dockerignore new file mode 100644 index 0000000000..7b5883475d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/vendor/github.com/pelletier/go-toml/v2/.gitattributes new file mode 100644 index 0000000000..34a0a21a36 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +benchmark/benchmark.toml text eol=lf +testdata/** text eol=lf diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore new file mode 100644 index 0000000000..4b7c4eda3a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -0,0 +1,7 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen +dist +tests/ diff --git a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml new file mode 100644 index 0000000000..067db55174 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml @@ -0,0 +1,84 @@ +[service] +golangci-lint-version = "1.39.0" + +[linters-settings.wsl] +allow-assign-and-anything = true + +[linters-settings.exhaustive] +default-signifies-exhaustive = true + +[linters] +disable-all = true +enable = [ + "asciicheck", + "bodyclose", + "cyclop", + "deadcode", + "depguard", + "dogsled", + "dupl", + "durationcheck", + "errcheck", + "errorlint", + "exhaustive", + # "exhaustivestruct", + "exportloopref", + "forbidigo", + # "forcetypeassert", + "funlen", + "gci", + # "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godot", + "godox", + # "goerr113", + "gofmt", + "gofumpt", + "goheader", + "goimports", + "golint", + "gomnd", + # "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "govet", + # "ifshort", + "importas", + "ineffassign", + "lll", + "makezero", + "misspell", + "nakedret", + "nestif", + "nilerr", + # "nlreturn", + "noctx", + "nolintlint", + #"paralleltest", + "prealloc", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + # "testpackage", + "thelper", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "wastedassign", + "whitespace", + # "wrapcheck", + # "wsl" +] diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml new file mode 100644 index 0000000000..1d8b69e65e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -0,0 +1,126 @@ +before: + hooks: + - go mod tidy + - go fmt ./... + - go test ./... +builds: + - id: tomll + main: ./cmd/tomll + binary: tomll + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - linux_riscv64 + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: tomljson + main: ./cmd/tomljson + binary: tomljson + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - linux_riscv64 + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: jsontoml + main: ./cmd/jsontoml + binary: jsontoml + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_riscv64 + - linux_arm + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 +universal_binaries: + - id: tomll + replace: true + name_template: tomll + - id: tomljson + replace: true + name_template: tomljson + - id: jsontoml + replace: true + name_template: jsontoml +archives: +- id: jsontoml + format: tar.xz + builds: + - jsontoml + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomljson + format: tar.xz + builds: + - tomljson + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomll + format: tar.xz + builds: + - tomll + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +dockers: + - id: tools + goos: linux + goarch: amd64 + ids: + - jsontoml + - tomljson + - tomll + image_templates: + - "ghcr.io/pelletier/go-toml:latest" + - "ghcr.io/pelletier/go-toml:{{ .Tag }}" + - "ghcr.io/pelletier/go-toml:v{{ .Major }}" + skip_push: false +checksum: + name_template: 'sha256sums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +release: + github: + owner: pelletier + name: go-toml + draft: true + prerelease: auto + mode: replace +changelog: + use: github-native +announce: + skip: true diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..96ecf9e2b3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -0,0 +1,193 @@ +# Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use and efficient TOML +implementation for Go that gets the job done and gets out of your way – dealing +with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or small, is +more than welcomed! + +## Ask questions + +Any question you may have, somebody else might have it too. Always feel free to +ask them on the [discussion board][discussions]. We will try to answer them as +clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and ask +away! + +[discussions]: https://github.com/pelletier/go-toml/discussions + +## Improve the documentation + +The best way to share your knowledge and experience with go-toml is to improve +the documentation. Fix a typo, clarify an interface, add an example, anything +goes! + +The documentation is present in the [README][readme] and thorough the source +code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change +to the documentation, create a pull request with your proposed changes. For +simple changes like that, the easiest way to go is probably the "Fork this +project and edit the file" button on Github, displayed at the top right of the +file. Unless it's a trivial change (for example a typo), provide a little bit of +context in your pull request description or commit message. + +## Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and fix by +reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on what to +include. When in doubt: add more details! By reducing ambiguity and providing +more information, it decreases back and forth and saves everyone time. + +## Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +- A short proposal with some POC code is better than a lengthy piece of text + with no code. Code speaks louder than words. That being said, bigger changes + should probably start with a [discussion][discussions]. +- No backward-incompatible patch will be accepted unless discussed. Sometimes + it's hard, but we try not to break people's programs unless we absolutely have + to. +- If you are writing a new feature or extending an existing one, make sure to + write some documentation. +- Bug fixes need to be accompanied with regression tests. +- New code needs to be tested. +- Your commit messages need to explain why the change is needed, even if already + included in the PR description. + +It does sound like a lot, but those best practices are here to save time overall +and continuously improve the quality of the project, which is something everyone +benefits from. + +### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +### Run the tests + +You can run tests for go-toml using Go's test tool: `go test -race ./...`. + +During the pull request process, all tests will be ran on Linux, Windows, and +MacOS on the last two versions of Go. + +However, given GitHub's new policy to _not_ run Actions on pull requests until a +maintainer clicks on button, it is highly recommended that you run them locally +as you make changes. + +### Check coverage + +We use `go tool cover` to compute test coverage. Most code editors have a way to +run and display code coverage, but at the end of the day, we do this: + +``` +go test -covermode=atomic -coverprofile=coverage.out +go tool cover -func=coverage.out +``` + +and verify that the overall percentage of tested code does not go down. This is +a requirement. As a rule of thumb, all lines of code touched by your changes +should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your +code lowers the coverage. + +### Verify performance + +Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's +builtin benchmark systems. Because of their noisy nature, containers provided by +Github Actions cannot be reliably used for benchmarking. As a result, you are +responsible for checking that your changes do not incur a performance penalty. +You can run their following to execute benchmarks: + +``` +go test ./... -bench=. -count=10 +``` + +Benchmark results should be compared against each other with +[benchstat][benchstat]. Typical flow looks like this: + +1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to + a file (for example `old.txt`). +2. Make some code changes. +3. Run `go test ....` again, and save the output to an other file (for example + `new.txt`). +4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any + test. + +On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts +performance. + +It is highly encouraged to add the benchstat results to your pull request +description. Pull requests that lower performance will receive more scrutiny. + +[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat + +### Style + +Try to look around and follow the same format and structure as the rest of the +code. We enforce using `go fmt` on the whole code base. + +--- + +## Maintainers-only + +### Merge pull request + +Checklist: + +- Passing CI. +- Does not introduce backward-incompatible changes (unless discussed). +- Has relevant doc changes. +- Benchstat does not show performance regression. +- Pull request is [labeled appropriately][pr-labels]. +- Title will be understandable in the changelog. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +### New release + +1. Decide on the next version number. Use semver. Review commits since last + version to assess. +2. Tag release. For example: +``` +git checkout v2 +git pull +git tag v2.2.0 +git push --tags +``` +3. CI automatically builds a draft Github release. Review it and edit as + necessary. Look for "Other changes". That would indicate a pull request not + labeled properly. Tweak labels and pull request titles until changelog looks + good for users. +4. Check "create discussion" box, in the "Releases" category. +5. If new version is an alpha or beta only, check pre-release box. + + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[new-release]: https://github.com/pelletier/go-toml/releases/new +[gh]: https://github.com/cli/cli +[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/vendor/github.com/pelletier/go-toml/v2/Dockerfile new file mode 100644 index 0000000000..b9e9332379 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch +ENV PATH "$PATH:/bin" +COPY tomll /bin/tomll +COPY tomljson /bin/tomljson +COPY jsontoml /bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/v2/LICENSE b/vendor/github.com/pelletier/go-toml/v2/LICENSE new file mode 100644 index 0000000000..991e2ae966 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +go-toml v2 +Copyright (c) 2021 - 2023 Thomas Pelletier + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md new file mode 100644 index 0000000000..d964b25fe1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -0,0 +1,576 @@ +# go-toml v2 + +Go library for the [TOML](https://toml.io/en/) format. + +This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). + +[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) + +[💬 Anything else](https://github.com/pelletier/go-toml/discussions) + +## Documentation + +Full API, examples, and implementation notes are available in the Go +documentation. + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) + +## Import + +```go +import "github.com/pelletier/go-toml/v2" +``` + +See [Modules](#Modules). + +## Features + +### Stdlib behavior + +As much as possible, this library is designed to behave similarly as the +standard library's `encoding/json`. + +### Performance + +While go-toml favors usability, it is written with performance in mind. Most +operations should not be shockingly slow. See [benchmarks](#benchmarks). + +### Strict mode + +`Decoder` can be set to "strict mode", which makes it error when some parts of +the TOML document was not present in the target structure. This is a great way +to check for typos. [See example in the documentation][strict]. + +[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields + +### Contextualized errors + +When most decoding errors occur, go-toml returns [`DecodeError`][decode-err], +which contains a human readable contextualized version of the error. For +example: + +``` +1| [server] +2| path = 100 + | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string +3| port = 50 +``` + +[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError + +### Local date and time support + +TOML supports native [local date/times][ldt]. It allows to represent a given +date, time, or date-time without relation to a timezone or offset. To support +this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and +[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, +making them convenient yet unambiguous structures for their respective TOML +representation. + +[ldt]: https://toml.io/en/v1.0.0#local-date-time +[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate +[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime +[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime + +### Commented config + +Since TOML is often used for configuration files, go-toml can emit documents +annotated with [comments and commented-out values][comments-example]. For +example, it can generate the following file: + +```toml +# Host IP to connect to. +host = '127.0.0.1' +# Port of the remote server. +port = 4242 + +# Encryption parameters (optional) +# [TLS] +# cipher = 'AEAD-AES128-GCM-SHA256' +# version = 'TLS 1.3' +``` + +[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented + +## Getting started + +Given the following struct, let's see how to read it and write it as TOML: + +```go +type MyConfig struct { + Version int + Name string + Tags []string +} +``` + +### Unmarshaling + +[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its +content. For example: + +```go +doc := ` +version = 2 +name = "go-toml" +tags = ["go", "toml"] +` + +var cfg MyConfig +err := toml.Unmarshal([]byte(doc), &cfg) +if err != nil { + panic(err) +} +fmt.Println("version:", cfg.Version) +fmt.Println("name:", cfg.Name) +fmt.Println("tags:", cfg.Tags) + +// Output: +// version: 2 +// name: go-toml +// tags: [go toml] +``` + +[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal + +### Marshaling + +[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure +as a TOML document: + +```go +cfg := MyConfig{ + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, +} + +b, err := toml.Marshal(cfg) +if err != nil { + panic(err) +} +fmt.Println(string(b)) + +// Output: +// Version = 2 +// Name = 'go-toml' +// Tags = ['go', 'toml'] +``` + +[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal + +## Unstable API + +This API does not yet follow the backward compatibility guarantees of this +library. They provide early access to features that may have rough edges or an +API subject to change. + +### Parser + +Parser is the unstable API that allows iterative parsing of a TOML document at +the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. + +## Benchmarks + +Execution time speedup compared to other Go TOML libraries: + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
+
See more +

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

+ + + + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x
+

This table can be generated with ./ci.sh benchmark -a -html.

+
+ +## Modules + +go-toml uses Go's standard modules system. + +Installation instructions: + +- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals + with it automatically. +- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. + +In case of trouble: [Go Modules FAQ][mod-faq]. + +[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module + +## Tools + +Go-toml provides three handy command line tools: + + * `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` + + * `tomll`: Lints and reformats a TOML file. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` + +### Docker image + +Those tools are also available as a [Docker image][docker]. For example, to use +`tomljson`: + +``` +docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml +``` + +Multiple versions are available on [ghcr.io][docker]. + +[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml + +## Migrating from v1 + +This section describes the differences between v1 and v2, with some pointers on +how to get the original behavior when possible. + +### Decoding / Unmarshal + +#### Automatic field name guessing + +When unmarshaling to a struct, if a key in the TOML document does not exactly +match the name of a struct field or any of the `toml`-tagged field, v1 tries +multiple variations of the key ([code][v1-keys]). + +V2 instead does a case-insensitive matching, like `encoding/json`. + +This could impact you if you are relying on casing to differentiate two fields, +and one of them is a not using the `toml` struct tag. The recommended solution +is to be specific about tag names for those fields using the `toml` struct tag. + +[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 + +#### Ignore preexisting value in interface + +When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the +element in the interface to decode the object. For example: + +```go +type inner struct { + B interface{} +} +type doc struct { + A interface{} +} + +d := doc{ + A: inner{ + B: "Before", + }, +} + +data := ` +[A] +B = "After" +` + +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v1: %#v\n", d) + +// toml v1: main.doc{A:main.inner{B:"After"}} +``` + +In this case, field `A` is of type `interface{}`, containing a `inner` struct. +V1 sees that type and uses it when decoding the object. + +When decoding an object into an `interface{}`, V2 instead disregards whatever +value the `interface{}` may contain and replaces it with a +`map[string]interface{}`. With the same data structure as above, here is what +the result looks like: + +```go +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v2: %#v\n", d) + +// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Values out of array bounds ignored + +When decoding into an array, v1 returns an error when the number of elements +contained in the doc is superior to the capacity of the array. For example: + +```go +type doc struct { + A [2]string +} +d := doc{} +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println(err) + +// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) +``` + +In the same situation, v2 ignores the last value: + +```go +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println("err:", err, "d:", d) +// err: d: {[one two]} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Support for `toml.Unmarshaler` has been dropped + +This method was not widely used, poorly defined, and added a lot of complexity. +A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` +interface and use strings. + +#### Support for `default` struct tag has been dropped + +This feature adds complexity and a poorly defined API for an effect that can be +accomplished outside of the library. + +It does not seem like other format parsers in Go support that feature (the +project referenced in the original ticket #202 has not been updated since 2017). +Given that go-toml v2 should not touch values not in the document, the same +effect can be achieved by pre-filling the struct with defaults (libraries like +[go-defaults][go-defaults] can help). Also, string representation is not well +defined for all types: it creates issues like #278. + +The recommended replacement is pre-filling the struct before unmarshaling. + +[go-defaults]: https://github.com/mcuadros/go-defaults + +#### `toml.Tree` replacement + +This structure was the initial attempt at providing a document model for +go-toml. It allows manipulating the structure of any document, encoding and +decoding from their TOML representation. While a more robust feature was +initially planned in go-toml v2, this has been ultimately [removed from +scope][nodoc] of this library, with no plan to add it back at the moment. The +closest equivalent at the moment would be to unmarshal into an `interface{}` and +use type assertions and/or reflection to manipulate the arbitrary +structure. However this would fall short of providing all of the TOML features +such as adding comments and be specific about whitespace. + + +#### `toml.Position` are not retrievable anymore + +The API for retrieving the position (line, column) of a specific TOML element do +not exist anymore. This was done to minimize the amount of concepts introduced +by the library (query path), and avoid the performance hit related to storing +positions in the absence of a document model, for a feature that seemed to have +little use. Errors however have gained more detailed position +information. Position retrieval seems better fitted for a document model, which +has been [removed from the scope][nodoc] of go-toml v2 at the moment. + +### Encoding / Marshal + +#### Default struct fields order + +V1 emits struct fields order alphabetically by default. V2 struct fields are +emitted in order they are defined. For example: + +```go +type S struct { + B string + A string +} + +data := S{ + B: "B", + A: "A", +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +// Output: +// v1: +// A = "A" +// B = "B" + +// v2: +// B = 'B' +// A = 'A' +``` + +There is no way to make v2 encoder behave like v1. A workaround could be to +manually sort the fields alphabetically in the struct definition, or generate +struct types using `reflect.StructOf`. + +#### No indentation by default + +V1 automatically indents content of tables by default. V2 does not. However the +same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: + +```go +data := map[string]interface{}{ + "table": map[string]string{ + "key": "value", + }, +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +buf := bytes.Buffer{} +enc := tomlv2.NewEncoder(&buf) +enc.SetIndentTables(true) +enc.Encode(data) +fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) + +// Output: +// v1: +// +// [table] +// key = "value" +// +// v2: +// [table] +// key = 'value' +// +// +// v2 Encoder: +// [table] +// key = 'value' +``` + +[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables + +#### Keys and strings are single quoted + +V1 always uses double quotes (`"`) around strings and keys that cannot be +represented bare (unquoted). V2 uses single quotes instead by default (`'`), +unless a character cannot be represented, then falls back to double quotes. As a +result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not +useful anymore. + +There is no way to make v2 encoder behave like v1. + +#### `TextMarshaler` emits as a string, not TOML + +Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in +v1. The encoder would append the result to the output directly. In v2 the result +is wrapped in a string. As a result, this interface cannot be implemented by the +root object. + +There is no way to make v2 encoder behave like v1. + +[tm]: https://golang.org/pkg/encoding/#TextMarshaler + +#### `Encoder.CompactComments` has been removed + +Emitting compact comments is now the default behavior of go-toml. This option +is not necessary anymore. + +#### Struct tags have been merged + +V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, +`toml`, and `omitempty`. To behave more like the standard library, v2 has merged +`toml`, `multiline`, `commented`, and `omitempty`. For example: + +```go +type doc struct { + // v1 + F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"` + // v2 + F string `toml:"field,multiline,omitempty,commented"` +} +``` + +Has a result, the `Encoder.SetTag*` methods have been removed, as there is just +one tag now. + +#### `Encoder.ArraysWithOneElementPerLine` has been renamed + +The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. + +#### `Encoder.Indentation` has been renamed + +The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. + + +#### Embedded structs behave like stdlib + +V1 defaults to merging embedded struct fields into the embedding struct. This +behavior was unexpected because it does not follow the standard library. To +avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was +added to make the encoder behave correctly. Given backward compatibility is not +a problem anymore, v2 does the right thing by default: it follows the behavior +of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. + +[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 + +### `query` + +go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run +JSONPath-style queries on TOML files. This feature is not available in v2. For a +replacement, check out [dasel][dasel]. + +This package has been removed because it was essentially not supported anymore +(last commit May 2020), increased the complexity of the code base, and more +complete solutions exist out there. + +[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query +[dasel]: https://github.com/TomWright/dasel + +## Versioning + +Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md new file mode 100644 index 0000000000..d4d554fda9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -0,0 +1,16 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh new file mode 100644 index 0000000000..86217a9b09 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -0,0 +1,284 @@ +#!/usr/bin/env bash + + +stderr() { + echo "$@" 1>&2 +} + +usage() { + b=$(basename "$0") + echo $b: ERROR: "$@" 1>&2 + + cat 1>&2 < coverage.out + go tool cover -func=coverage.out + echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +coverage() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + output_dir="$(mktemp -d)" + target_out="${output_dir}/target.txt" + head_out="${output_dir}/head.txt" + + cover "${target}" > "${target_out}" + cover "HEAD" > "${head_out}" + + cat "${target_out}" + cat "${head_out}" + + echo "" + + target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" + head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" + echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" + + delta_pct=$(echo "$head_pct - $target_pct" | bc -l) + echo "Delta: ${delta_pct}" + + if [[ $delta_pct = \-* ]]; then + echo "Regression!"; + + target_diff="${output_dir}/target.diff.txt" + head_diff="${output_dir}/head.diff.txt" + cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" + cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" + + diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" + return 1 + fi + return 0 + ;; + esac + + cover "${1-HEAD}" +} + +bench() { + branch="${1}" + out="${2}" + replace="${3}" + dir="$(mktemp -d)" + + stderr "Executing benchmark for ${branch} at ${dir}" + + if [ "${branch}" = "HEAD" ]; then + cp -r . "${dir}/" + else + git worktree add "$dir" "$branch" + fi + + pushd "$dir" + + if [ "${replace}" != "" ]; then + find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; + go get "${replace}" + fi + + export GOMAXPROCS=2 + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +fmktemp() { + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 + else + # BSD + mktemp -t $1 + fi +} + +benchstathtml() { +python3 - $1 <<'EOF' +import sys + +lines = [] +stop = False + +with open(sys.argv[1]) as f: + for line in f.readlines(): + line = line.strip() + if line == "": + stop = True + if not stop: + lines.append(line.split(',')) + +results = [] +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue + v2 = float(line[1]) + results.append([ + line[0].replace("-32", ""), + "%.1fx" % (float(line[3])/v2), # v1 + "%.1fx" % (float(line[7])/v2), # bs + ]) +# move geomean to the end +results.append(results[0]) +del results[0] + + +def printtable(data): + print(""" + + + + + """) + + for r in data: + print(" ".format(*r)) + + print(""" +
Benchmarkgo-toml v1BurntSushi/toml
{}{}{}
""") + + +def match(x): + return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] + +above = [x for x in results if match(x)] +below = [x for x in results if not match(x)] + +printtable(above) +print("
See more") +print("""

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

""") +printtable(below) +print('

This table can be generated with ./ci.sh benchmark -a -html.

') +print("
") + +EOF +} + +benchmark() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + old=`fmktemp ${target}` + bench "${target}" "${old}" + + new=`fmktemp HEAD` + bench HEAD "${new}" + + benchstat "${old}" "${new}" + return 0 + ;; + -a) + shift + + v2stats=`fmktemp go-toml-v2` + bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" + v1stats=`fmktemp go-toml-v1` + bench HEAD "${v1stats}" "github.com/pelletier/go-toml" + bsstats=`fmktemp bs-toml` + bench HEAD "${bsstats}" "github.com/BurntSushi/toml" + + cp "${v2stats}" go-toml-v2.txt + cp "${v1stats}" go-toml-v1.txt + cp "${bsstats}" bs-toml.txt + + if [ "$1" = "-html" ]; then + tmpcsv=`fmktemp csv` + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstathtml $tmpcsv + else + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt + fi + + rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt + return $? + esac + + bench "${1-HEAD}" `mktemp` +} + +case "$1" in + coverage) shift; coverage $@;; + benchmark) shift; benchmark $@;; + *) usage "bad argument $1";; +esac diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go new file mode 100644 index 0000000000..f0ec3b1705 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -0,0 +1,550 @@ +package toml + +import ( + "fmt" + "math" + "strconv" + "time" + + "github.com/pelletier/go-toml/v2/unstable" +) + +func parseInteger(b []byte) (int64, error) { + if len(b) > 2 && b[0] == '0' { + switch b[1] { + case 'x': + return parseIntHex(b) + case 'b': + return parseIntBin(b) + case 'o': + return parseIntOct(b) + default: + panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) + } + } + + return parseIntDec(b) +} + +func parseLocalDate(b []byte) (LocalDate, error) { + // full-date = date-fullyear "-" date-month "-" date-mday + // date-fullyear = 4DIGIT + // date-month = 2DIGIT ; 01-12 + // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year + var date LocalDate + + if len(b) != 10 || b[4] != '-' || b[7] != '-' { + return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") + } + + var err error + + date.Year, err = parseDecimalDigits(b[0:4]) + if err != nil { + return LocalDate{}, err + } + + date.Month, err = parseDecimalDigits(b[5:7]) + if err != nil { + return LocalDate{}, err + } + + date.Day, err = parseDecimalDigits(b[8:10]) + if err != nil { + return LocalDate{}, err + } + + if !isValidDate(date.Year, date.Month, date.Day) { + return LocalDate{}, unstable.NewParserError(b, "impossible date") + } + + return date, nil +} + +func parseDecimalDigits(b []byte) (int, error) { + v := 0 + + for i, c := range b { + if c < '0' || c > '9' { + return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") + } + v *= 10 + v += int(c - '0') + } + + return v, nil +} + +func parseDateTime(b []byte) (time.Time, error) { + // offset-date-time = full-date time-delim full-time + // full-time = partial-time time-offset + // time-offset = "Z" / time-numoffset + // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute + + dt, b, err := parseLocalDateTime(b) + if err != nil { + return time.Time{}, err + } + + var zone *time.Location + + if len(b) == 0 { + // parser should have checked that when assigning the date time node + panic("date time should have a timezone") + } + + if b[0] == 'Z' || b[0] == 'z' { + b = b[1:] + zone = time.UTC + } else { + const dateTimeByteLen = 6 + if len(b) != dateTimeByteLen { + return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") + } + var direction int + switch b[0] { + case '-': + direction = -1 + case '+': + direction = +1 + default: + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") + } + + if b[3] != ':' { + return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") + } + + hours, err := parseDecimalDigits(b[1:3]) + if err != nil { + return time.Time{}, err + } + if hours > 23 { + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") + } + + minutes, err := parseDecimalDigits(b[4:6]) + if err != nil { + return time.Time{}, err + } + if minutes > 59 { + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") + } + + seconds := direction * (hours*3600 + minutes*60) + if seconds == 0 { + zone = time.UTC + } else { + zone = time.FixedZone("", seconds) + } + b = b[dateTimeByteLen:] + } + + if len(b) > 0 { + return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") + } + + t := time.Date( + dt.Year, + time.Month(dt.Month), + dt.Day, + dt.Hour, + dt.Minute, + dt.Second, + dt.Nanosecond, + zone) + + return t, nil +} + +func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { + var dt LocalDateTime + + const localDateTimeByteMinLen = 11 + if len(b) < localDateTimeByteMinLen { + return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + } + + date, err := parseLocalDate(b[:10]) + if err != nil { + return dt, nil, err + } + dt.LocalDate = date + + sep := b[10] + if sep != 'T' && sep != ' ' && sep != 't' { + return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") + } + + t, rest, err := parseLocalTime(b[11:]) + if err != nil { + return dt, nil, err + } + dt.LocalTime = t + + return dt, rest, nil +} + +// parseLocalTime is a bit different because it also returns the remaining +// []byte that is didn't need. This is to allow parseDateTime to parse those +// remaining bytes as a timezone. +func parseLocalTime(b []byte) (LocalTime, []byte, error) { + var ( + nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} + t LocalTime + ) + + // check if b matches to have expected format HH:MM:SS[.NNNNNN] + const localTimeByteLen = 8 + if len(b) < localTimeByteLen { + return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + } + + var err error + + t.Hour, err = parseDecimalDigits(b[0:2]) + if err != nil { + return t, nil, err + } + + if t.Hour > 23 { + return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") + } + if b[2] != ':' { + return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") + } + + t.Minute, err = parseDecimalDigits(b[3:5]) + if err != nil { + return t, nil, err + } + if t.Minute > 59 { + return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") + } + if b[5] != ':' { + return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") + } + + t.Second, err = parseDecimalDigits(b[6:8]) + if err != nil { + return t, nil, err + } + + if t.Second > 60 { + return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") + } + + b = b[8:] + + if len(b) >= 1 && b[0] == '.' { + frac := 0 + precision := 0 + digits := 0 + + for i, c := range b[1:] { + if !isDigit(c) { + if i == 0 { + return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") + } + break + } + digits++ + + const maxFracPrecision = 9 + if i >= maxFracPrecision { + // go-toml allows decoding fractional seconds + // beyond the supported precision of 9 + // digits. It truncates the fractional component + // to the supported precision and ignores the + // remaining digits. + // + // https://github.com/pelletier/go-toml/discussions/707 + continue + } + + frac *= 10 + frac += int(c - '0') + precision++ + } + + if precision == 0 { + return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") + } + + t.Nanosecond = frac * nspow[precision] + t.Precision = precision + + return t, b[1+digits:], nil + } + return t, b, nil +} + +//nolint:cyclop +func parseFloat(b []byte) (float64, error) { + if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { + return math.NaN(), nil + } + + cleaned, err := checkAndRemoveUnderscoresFloats(b) + if err != nil { + return 0, err + } + + if cleaned[0] == '.' { + return 0, unstable.NewParserError(b, "float cannot start with a dot") + } + + if cleaned[len(cleaned)-1] == '.' { + return 0, unstable.NewParserError(b, "float cannot end with a dot") + } + + dotAlreadySeen := false + for i, c := range cleaned { + if c == '.' { + if dotAlreadySeen { + return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") + } + if !isDigit(cleaned[i-1]) { + return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") + } + if !isDigit(cleaned[i+1]) { + return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") + } + dotAlreadySeen = true + } + } + + start := 0 + if cleaned[0] == '+' || cleaned[0] == '-' { + start = 1 + } + if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { + return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") + } + + f, err := strconv.ParseFloat(string(cleaned), 64) + if err != nil { + return 0, unstable.NewParserError(b, "unable to parse float: %w", err) + } + + return f, nil +} + +func parseIntHex(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 16, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) + } + + return i, nil +} + +func parseIntOct(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 8, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) + } + + return i, nil +} + +func parseIntBin(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 2, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) + } + + return i, nil +} + +func isSign(b byte) bool { + return b == '+' || b == '-' +} + +func parseIntDec(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b) + if err != nil { + return 0, err + } + + startIdx := 0 + + if isSign(cleaned[0]) { + startIdx++ + } + + if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { + return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") + } + + i, err := strconv.ParseInt(string(cleaned), 10, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) + } + + return i, nil +} + +func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { + start := 0 + if b[start] == '+' || b[start] == '-' { + start++ + } + + if len(b) == start { + return b, nil + } + + if b[start] == '_' { + return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, i, len(b)) + copy(cleaned, b) + + for i++; i < len(b); i++ { + c := b[i] + if c == '_' { + if !before { + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") + } + before = false + } else { + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { + if b[0] == '_' { + return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, 0, len(b)) + + for i := 0; i < len(b); i++ { + c := b[i] + + switch c { + case '_': + if !before { + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") + } + if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") + } + before = false + case '+', '-': + // signed exponents + cleaned = append(cleaned, c) + before = false + case 'e', 'E': + if i < len(b)-1 && b[i+1] == '_' { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") + } + cleaned = append(cleaned, c) + case '.': + if i < len(b)-1 && b[i+1] == '_' { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") + } + if i > 0 && b[i-1] == '_' { + return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") + } + cleaned = append(cleaned, c) + default: + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +// isValidDate checks if a provided date is a date that exists. +func isValidDate(year int, month int, day int) bool { + return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m int, year int) int { + if m == 2 && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} diff --git a/vendor/github.com/pelletier/go-toml/v2/doc.go b/vendor/github.com/pelletier/go-toml/v2/doc.go new file mode 100644 index 0000000000..b7bc599bde --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/doc.go @@ -0,0 +1,2 @@ +// Package toml is a library to read and write TOML documents. +package toml diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go new file mode 100644 index 0000000000..309733f1f9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -0,0 +1,252 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/unstable" +) + +// DecodeError represents an error encountered during the parsing or decoding +// of a TOML document. +// +// In addition to the error message, it contains the position in the document +// where it happened, as well as a human-readable representation that shows +// where the error occurred in the document. +type DecodeError struct { + message string + line int + column int + key Key + + human string +} + +// StrictMissingError occurs in a TOML document that does not have a +// corresponding field in the target value. It contains all the missing fields +// in Errors. +// +// Emitted by Decoder when DisallowUnknownFields() was called. +type StrictMissingError struct { + // One error per field that could not be found. + Errors []DecodeError +} + +// Error returns the canonical string for this error. +func (s *StrictMissingError) Error() string { + return "strict mode: fields in the document are missing in the target struct" +} + +// String returns a human readable description of all errors. +func (s *StrictMissingError) String() string { + var buf strings.Builder + + for i, e := range s.Errors { + if i > 0 { + buf.WriteString("\n---\n") + } + + buf.WriteString(e.String()) + } + + return buf.String() +} + +type Key []string + +// Error returns the error message contained in the DecodeError. +func (e *DecodeError) Error() string { + return "toml: " + e.message +} + +// String returns the human-readable contextualized error. This string is multi-line. +func (e *DecodeError) String() string { + return e.human +} + +// Position returns the (line, column) pair indicating where the error +// occurred in the document. Positions are 1-indexed. +func (e *DecodeError) Position() (row int, column int) { + return e.line, e.column +} + +// Key that was being processed when the error occurred. The key is present only +// if this DecodeError is part of a StrictMissingError. +func (e *DecodeError) Key() Key { + return e.key +} + +// decodeErrorFromHighlight creates a DecodeError referencing a highlighted +// range of bytes from document. +// +// highlight needs to be a sub-slice of document, or this function panics. +// +// The function copies all bytes used in DecodeError, so that document and +// highlight can be freely deallocated. +// +//nolint:funlen +func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { + offset := danger.SubsliceOffset(document, de.Highlight) + + errMessage := de.Error() + errLine, errColumn := positionAtEnd(document[:offset]) + before, after := linesOfContext(document, de.Highlight, offset, 3) + + var buf strings.Builder + + maxLine := errLine + len(after) - 1 + lineColumnWidth := len(strconv.Itoa(maxLine)) + + // Write the lines of context strictly before the error. + for i := len(before) - 1; i > 0; i-- { + line := errLine - i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(before[i]) > 0 { + buf.WriteString(" ") + buf.Write(before[i]) + } + + buf.WriteRune('\n') + } + + // Write the document line that contains the error. + + buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.Write(before[0]) + } + + buf.Write(de.Highlight) + + if len(after) > 0 { + buf.Write(after[0]) + } + + buf.WriteRune('\n') + + // Write the line with the error message itself (so it does not have a line + // number). + + buf.WriteString(strings.Repeat(" ", lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.WriteString(strings.Repeat(" ", len(before[0]))) + } + + buf.WriteString(strings.Repeat("~", len(de.Highlight))) + + if len(errMessage) > 0 { + buf.WriteString(" ") + buf.WriteString(errMessage) + } + + // Write the lines of context strictly after the error. + + for i := 1; i < len(after); i++ { + buf.WriteRune('\n') + line := errLine + i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(after[i]) > 0 { + buf.WriteString(" ") + buf.Write(after[i]) + } + } + + return &DecodeError{ + message: errMessage, + line: errLine, + column: errColumn, + key: de.Key, + human: buf.String(), + } +} + +func formatLineNumber(line int, width int) string { + format := "%" + strconv.Itoa(width) + "d" + + return fmt.Sprintf(format, line) +} + +func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { + return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) +} + +func beforeLines(document []byte, offset int, linesAround int) [][]byte { + var beforeLines [][]byte + + // Walk the document backward from the highlight to find previous lines + // of context. + rest := document[:offset] +backward: + for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { + switch { + case rest[o] == '\n': + // handle individual lines + beforeLines = append(beforeLines, rest[o+1:]) + rest = rest[:o] + o = len(rest) - 1 + case o == 0: + // add the first line only if it's non-empty + beforeLines = append(beforeLines, rest) + + break backward + default: + o-- + } + } + + return beforeLines +} + +func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { + var afterLines [][]byte + + // Walk the document forward from the highlight to find the following + // lines of context. + rest := document[offset+len(highlight):] +forward: + for o := 0; o < len(rest) && len(afterLines) <= linesAround; { + switch { + case rest[o] == '\n': + // handle individual lines + afterLines = append(afterLines, rest[:o]) + rest = rest[o+1:] + o = 0 + + case o == len(rest)-1: + // add last line only if it's non-empty + afterLines = append(afterLines, rest) + + break forward + default: + o++ + } + } + + return afterLines +} + +func positionAtEnd(b []byte) (row int, column int) { + row = 1 + column = 1 + + for _, c := range b { + if c == '\n' { + row++ + column = 1 + } else { + column++ + } + } + + return +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go new file mode 100644 index 0000000000..80f698db4b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go @@ -0,0 +1,42 @@ +package characters + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func InvalidAscii(b byte) bool { + return invalidAsciiTable[b] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go new file mode 100644 index 0000000000..db4f45acbf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go @@ -0,0 +1,199 @@ +package characters + +import ( + "unicode/utf8" +) + +type utf8Err struct { + Index int + Size int +} + +func (u utf8Err) Zero() bool { + return u.Size == 0 +} + +// Verified that a given string is only made of valid UTF-8 characters allowed +// by the TOML spec: +// +// Any Unicode character may be used except those that must be escaped: +// quotation mark, backslash, and the control characters other than tab (U+0000 +// to U+0008, U+000A to U+001F, U+007F). +// +// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early +// when a character is not allowed. +// +// The returned utf8Err is Zero() if the string is valid, or contains the byte +// index and size of the invalid character. +// +// quotation mark => already checked +// backslash => already checked +// 0-0x8 => invalid +// 0x9 => tab, ok +// 0xA - 0x1F => invalid +// 0x7F => invalid +func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { + // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. + offset := 0 + for len(p) >= 8 { + // Combining two 32 bit loads allows the same code to be used + // for 32 and 64 bit platforms. + // The compiler can generate a 32bit load for first32 and second32 + // on many platforms. See test/codegen/memcombine.go. + first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 + if (first32|second32)&0x80808080 != 0 { + // Found a non ASCII byte (>= RuneSelf). + break + } + + for i, b := range p[:8] { + if InvalidAscii(b) { + err.Index = offset + i + err.Size = 1 + return + } + } + + p = p[8:] + offset += 8 + } + n := len(p) + for i := 0; i < n; { + pi := p[i] + if pi < utf8.RuneSelf { + if InvalidAscii(pi) { + err.Index = offset + i + err.Size = 1 + return + } + i++ + continue + } + x := first[pi] + if x == xx { + // Illegal starter byte. + err.Index = offset + i + err.Size = 1 + return + } + size := int(x & 7) + if i+size > n { + // Short or invalid. + err.Index = offset + i + err.Size = n - i + return + } + accept := acceptRanges[x>>4] + if c := p[i+1]; c < accept.lo || accept.hi < c { + err.Index = offset + i + err.Size = 2 + return + } else if size == 2 { + } else if c := p[i+2]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 3 + return + } else if size == 3 { + } else if c := p[i+3]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 4 + return + } + i += size + } + return +} + +// Return the size of the next rune if valid, 0 otherwise. +func Utf8ValidNext(p []byte) int { + c := p[0] + + if c < utf8.RuneSelf { + if InvalidAscii(c) { + return 0 + } + return 1 + } + + x := first[c] + if x == xx { + // Illegal starter byte. + return 0 + } + size := int(x & 7) + if size > len(p) { + // Short or invalid. + return 0 + } + accept := acceptRanges[x>>4] + if c := p[1]; c < accept.lo || accept.hi < c { + return 0 + } else if size == 2 { + } else if c := p[2]; c < locb || hicb < c { + return 0 + } else if size == 3 { + } else if c := p[3]; c < locb || hicb < c { + return 0 + } + + return size +} + +// acceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence. +type acceptRange struct { + lo uint8 // lowest value for second byte. + hi uint8 // highest value for second byte. +} + +// acceptRanges has size 16 to avoid bounds checks in the code that uses it. +var acceptRanges = [16]acceptRange{ + 0: {locb, hicb}, + 1: {0xA0, hicb}, + 2: {locb, 0x9F}, + 3: {0x90, hicb}, + 4: {locb, 0x8F}, +} + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + // The default lowest and highest continuation byte. + locb = 0b10000000 + hicb = 0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go new file mode 100644 index 0000000000..e38e1131b8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go @@ -0,0 +1,65 @@ +package danger + +import ( + "fmt" + "reflect" + "unsafe" +) + +const maxInt = uintptr(int(^uint(0) >> 1)) + +func SubsliceOffset(data []byte, subslice []byte) int { + datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) + + if hlp.Data < datap.Data { + panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) + } + offset := hlp.Data - datap.Data + + if offset > maxInt { + panic(fmt.Errorf("slice offset larger than int (%d)", offset)) + } + + intoffset := int(offset) + + if intoffset > datap.Len { + panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) + } + + if intoffset+hlp.Len > datap.Len { + panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) + } + + return intoffset +} + +func BytesRange(start []byte, end []byte) []byte { + if start == nil || end == nil { + panic("cannot call BytesRange with nil") + } + startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) + endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) + + if startp.Data > endp.Data { + panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) + } + + l := startp.Len + endLen := int(endp.Data-startp.Data) + endp.Len + if endLen > l { + l = endLen + } + + if l > startp.Cap { + panic(fmt.Errorf("range length is larger than capacity")) + } + + return start[:l] +} + +func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { + // TODO: replace with unsafe.Add when Go 1.17 is released + // https://github.com/golang/go/issues/40481 + return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go new file mode 100644 index 0000000000..9d41c28a2f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go @@ -0,0 +1,23 @@ +package danger + +import ( + "reflect" + "unsafe" +) + +// typeID is used as key in encoder and decoder caches to enable using +// the optimize runtime.mapaccess2_fast64 function instead of the more +// expensive lookup if we were to use reflect.Type as map key. +// +// typeID holds the pointer to the reflect.Type value, which is unique +// in the program. +// +// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 +type TypeID unsafe.Pointer + +func MakeTypeID(t reflect.Type) TypeID { + // reflect.Type has the fields: + // typ unsafe.Pointer + // ptr unsafe.Pointer + return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go new file mode 100644 index 0000000000..149b17f538 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -0,0 +1,48 @@ +package tracker + +import "github.com/pelletier/go-toml/v2/unstable" + +// KeyTracker is a tracker that keeps track of the current Key as the AST is +// walked. +type KeyTracker struct { + k []string +} + +// UpdateTable sets the state of the tracker with the AST table node. +func (t *KeyTracker) UpdateTable(node *unstable.Node) { + t.reset() + t.Push(node) +} + +// UpdateArrayTable sets the state of the tracker with the AST array table node. +func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { + t.reset() + t.Push(node) +} + +// Push the given key on the stack. +func (t *KeyTracker) Push(node *unstable.Node) { + it := node.Key() + for it.Next() { + t.k = append(t.k, string(it.Node().Data)) + } +} + +// Pop key from stack. +func (t *KeyTracker) Pop(node *unstable.Node) { + it := node.Key() + for it.Next() { + t.k = t.k[:len(t.k)-1] + } +} + +// Key returns the current key +func (t *KeyTracker) Key() []string { + k := make([]string, len(t.k)) + copy(k, t.k) + return k +} + +func (t *KeyTracker) reset() { + t.k = t.k[:0] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go new file mode 100644 index 0000000000..ce7dd4af17 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -0,0 +1,360 @@ +package tracker + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pelletier/go-toml/v2/unstable" +) + +type keyKind uint8 + +const ( + invalidKind keyKind = iota + valueKind + tableKind + arrayTableKind +) + +func (k keyKind) String() string { + switch k { + case invalidKind: + return "invalid" + case valueKind: + return "value" + case tableKind: + return "table" + case arrayTableKind: + return "array table" + } + panic("missing keyKind string mapping") +} + +// SeenTracker tracks which keys have been seen with which TOML type to flag +// duplicates and mismatches according to the spec. +// +// Each node in the visited tree is represented by an entry. Each entry has an +// identifier, which is provided by a counter. Entries are stored in the array +// entries. As new nodes are discovered (referenced for the first time in the +// TOML document), entries are created and appended to the array. An entry +// points to its parent using its id. +// +// To find whether a given key (sequence of []byte) has already been visited, +// the entries are linearly searched, looking for one with the right name and +// parent id. +// +// Given that all keys appear in the document after their parent, it is +// guaranteed that all descendants of a node are stored after the node, this +// speeds up the search process. +// +// When encountering [[array tables]], the descendants of that node are removed +// to allow that branch of the tree to be "rediscovered". To maintain the +// invariant above, the deletion process needs to keep the order of entries. +// This results in more copies in that case. +type SeenTracker struct { + entries []entry + currentIdx int +} + +var pool sync.Pool + +func (s *SeenTracker) reset() { + // Always contains a root element at index 0. + s.currentIdx = 0 + if len(s.entries) == 0 { + s.entries = make([]entry, 1, 2) + } else { + s.entries = s.entries[:1] + } + s.entries[0].child = -1 + s.entries[0].next = -1 +} + +type entry struct { + // Use -1 to indicate no child or no sibling. + child int + next int + + name []byte + kind keyKind + explicit bool + kv bool +} + +// Find the index of the child of parentIdx with key k. Returns -1 if +// it does not exist. +func (s *SeenTracker) find(parentIdx int, k []byte) int { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if bytes.Equal(s.entries[i].name, k) { + return i + } + } + return -1 +} + +// Remove all descendants of node at position idx. +func (s *SeenTracker) clear(idx int) { + if idx >= len(s.entries) { + return + } + + for i := s.entries[idx].child; i >= 0; { + next := s.entries[i].next + n := s.entries[0].next + s.entries[0].next = i + s.entries[i].next = n + s.entries[i].name = nil + s.clear(i) + i = next + } + + s.entries[idx].child = -1 +} + +func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { + e := entry{ + child: -1, + next: s.entries[parentIdx].child, + + name: name, + kind: kind, + explicit: explicit, + kv: kv, + } + var idx int + if s.entries[0].next >= 0 { + idx = s.entries[0].next + s.entries[0].next = s.entries[idx].next + s.entries[idx] = e + } else { + idx = len(s.entries) + s.entries = append(s.entries, e) + } + + s.entries[parentIdx].child = idx + + return idx +} + +func (s *SeenTracker) setExplicitFlag(parentIdx int) { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if s.entries[i].kv { + s.entries[i].explicit = true + s.entries[i].kv = false + } + s.setExplicitFlag(i) + } +} + +// CheckExpression takes a top-level node and checks that it does not contain +// keys that have been seen in previous calls, and validates that types are +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { + if s.entries == nil { + s.reset() + } + switch node.Kind { + case unstable.KeyValue: + return s.checkKeyValue(node) + case unstable.Table: + return s.checkTable(node) + case unstable.ArrayTable: + return s.checkArrayTable(node) + default: + panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) + } +} + +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + // This code is duplicated in checkArrayTable. This is because factoring + // it in a function requires to copy the iterator, or allocate it to the + // heap, which is not cheap. + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + first := false + if idx >= 0 { + kind := s.entries[idx].kind + if kind != tableKind { + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + } + if s.entries[idx].explicit { + return false, fmt.Errorf("toml: table %s already exists", string(k)) + } + s.entries[idx].explicit = true + } else { + idx = s.create(parentIdx, k, tableKind, true, false) + first = true + } + + s.currentIdx = idx + + return first, nil +} + +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { + kind := s.entries[idx].kind + if kind != arrayTableKind { + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + } + s.clear(idx) + } + + s.currentIdx = idx + + return firstTime, nil +} + +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { + parentIdx := s.currentIdx + it := node.Key() + + for it.Next() { + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, true) + } else { + entry := s.entries[idx] + if it.IsLast() { + return false, fmt.Errorf("toml: key %s is already defined", string(k)) + } else if entry.kind != tableKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } else if entry.explicit { + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + } + } + + parentIdx = idx + } + + s.entries[parentIdx].kind = valueKind + + value := node.Value() + + switch value.Kind { + case unstable.InlineTable: + return s.checkInlineTable(value) + case unstable.Array: + return s.checkArray(value) + } + + return false, nil +} + +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { + it := node.Children() + for it.Next() { + n := it.Node() + switch n.Kind { + case unstable.InlineTable: + first, err = s.checkInlineTable(n) + if err != nil { + return false, err + } + case unstable.Array: + first, err = s.checkArray(n) + if err != nil { + return false, err + } + } + } + return first, nil +} + +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { + if pool.New == nil { + pool.New = func() interface{} { + return &SeenTracker{} + } + } + + s = pool.Get().(*SeenTracker) + s.reset() + + it := node.Children() + for it.Next() { + n := it.Node() + first, err = s.checkKeyValue(n) + if err != nil { + return false, err + } + } + + // As inline tables are self-contained, the tracker does not + // need to retain the details of what they contain. The + // keyValue element that creates the inline table is kept to + // mark the presence of the inline table and prevent + // redefinition of its keys: check* functions cannot walk into + // a value. + pool.Put(s) + return first, nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go new file mode 100644 index 0000000000..bf0317392f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go @@ -0,0 +1 @@ +package tracker diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go new file mode 100644 index 0000000000..a856bfdb0d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -0,0 +1,122 @@ +package toml + +import ( + "fmt" + "strings" + "time" + + "github.com/pelletier/go-toml/v2/unstable" +) + +// LocalDate represents a calendar day in no specific timezone. +type LocalDate struct { + Year int + Month int + Day int +} + +// AsTime converts d into a specific time instance at midnight in zone. +func (d LocalDate) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDate) UnmarshalText(b []byte) error { + res, err := parseLocalDate(b) + if err != nil { + return err + } + *d = res + return nil +} + +// LocalTime represents a time of day of no specific day in no specific +// timezone. +type LocalTime struct { + Hour int // Hour of the day: [0; 24[ + Minute int // Minute of the hour: [0; 60[ + Second int // Second of the minute: [0; 60[ + Nanosecond int // Nanoseconds within the second: [0, 1000000000[ + Precision int // Number of digits to display for Nanosecond. +} + +// String returns RFC 3339 representation of d. +// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond +// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number +// of digits for nanoseconds is provided. +func (d LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) + + if d.Precision > 0 { + s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] + } else if d.Nanosecond > 0 { + // Nanoseconds are specified, but precision is not provided. Use the + // minimum. + s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") + } + + return s +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalTime) UnmarshalText(b []byte) error { + res, left, err := parseLocalTime(b) + if err == nil && len(left) != 0 { + err = unstable.NewParserError(left, "extra characters") + } + if err != nil { + return err + } + *d = res + return nil +} + +// LocalDateTime represents a time of a specific day in no specific timezone. +type LocalDateTime struct { + LocalDate + LocalTime +} + +// AsTime converts d into a specific time instance in zone. +func (d LocalDateTime) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDateTime) String() string { + return d.LocalDate.String() + "T" + d.LocalTime.String() +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDateTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDateTime) UnmarshalText(data []byte) error { + res, left, err := parseLocalDateTime(data) + if err == nil && len(left) != 0 { + err = unstable.NewParserError(left, "extra characters") + } + if err != nil { + return err + } + + *d = res + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go new file mode 100644 index 0000000000..ffc9927208 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -0,0 +1,1117 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" +) + +// Marshal serializes a Go value as a TOML document. +// +// It is a shortcut for Encoder.Encode() with the default options. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + err := enc.Encode(v) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Encoder writes a TOML document to an output stream. +type Encoder struct { + // output + w io.Writer + + // global settings + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool +} + +// NewEncoder returns a new Encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + indentSymbol: " ", + } +} + +// SetTablesInline forces the encoder to emit all tables inline. +// +// This behavior can be controlled on an individual struct field basis with the +// inline tag: +// +// MyField `toml:",inline"` +func (enc *Encoder) SetTablesInline(inline bool) *Encoder { + enc.tablesInline = inline + return enc +} + +// SetArraysMultiline forces the encoder to emit all arrays with one element per +// line. +// +// This behavior can be controlled on an individual struct field basis with the multiline tag: +// +// MyField `multiline:"true"` +func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { + enc.arraysMultiline = multiline + return enc +} + +// SetIndentSymbol defines the string that should be used for indentation. The +// provided string is repeated for each indentation level. Defaults to two +// spaces. +func (enc *Encoder) SetIndentSymbol(s string) *Encoder { + enc.indentSymbol = s + return enc +} + +// SetIndentTables forces the encoder to intent tables and array tables. +func (enc *Encoder) SetIndentTables(indent bool) *Encoder { + enc.indentTables = indent + return enc +} + +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + +// Encode writes a TOML representation of v to the stream. +// +// If v cannot be represented to TOML it returns an error. +// +// # Encoding rules +// +// A top level slice containing only maps or structs is encoded as [[table +// array]]. +// +// All slices not matching rule 1 are encoded as [array]. As a result, any map +// or struct they contain is encoded as an {inline table}. +// +// Nil interfaces and nil pointers are not supported. +// +// Keys in key-values always have one part. +// +// Intermediate tables are always printed. +// +// By default, strings are encoded as literal string, unless they contain either +// a newline character or a single quote. In that case they are emitted as +// quoted strings. +// +// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so +// results in an error. This rule exists because the TOML specification only +// requires parsers to support at least the 64 bits integer range. Allowing +// larger numbers would create non-standard TOML documents, which may not be +// readable (at best) by other implementations. To encode such numbers, a +// solution is a custom type that implements encoding.TextMarshaler. +// +// When encoding structs, fields are encoded in order of definition, with their +// exact name. +// +// Tables and array tables are separated by empty lines. However, consecutive +// subtables definitions are not. For example: +// +// [top1] +// +// [top2] +// [top2.child1] +// +// [[array]] +// +// [[array]] +// [array.child2] +// +// # Struct tags +// +// The encoding of each public struct field can be customized by the format +// string in the "toml" key of the struct field's tag. This follows +// encoding/json's convention. The format string starts with the name of the +// field, optionally followed by a comma-separated list of options. The name may +// be empty in order to provide options without overriding the default name. +// +// The "multiline" option emits strings as quoted multi-line TOML strings. It +// has no effect on fields that would not be encoded as strings. +// +// The "inline" option turns fields that would be emitted as tables into inline +// tables instead. It has no effect on other fields. +// +// The "omitempty" option prevents empty values or groups from being emitted. +// +// The "commented" option prefixes the value and all its children with a comment +// symbol. +// +// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit +// a TOML comment before the value being annotated. Comments are ignored inside +// inline tables. For array tables, the comment is only present before the first +// element of the array. +func (enc *Encoder) Encode(v interface{}) error { + var ( + b []byte + ctx encoderCtx + ) + + ctx.inline = enc.tablesInline + + if v == nil { + return fmt.Errorf("toml: cannot encode a nil interface") + } + + b, err := enc.encode(b, ctx, reflect.ValueOf(v)) + if err != nil { + return err + } + + _, err = enc.w.Write(b) + if err != nil { + return fmt.Errorf("toml: cannot write: %w", err) + } + + return nil +} + +type valueOptions struct { + multiline bool + omitempty bool + commented bool + comment string +} + +type encoderCtx struct { + // Current top-level key. + parentKey []string + + // Key that should be used for a KV. + key string + // Extra flag to account for the empty string + hasKey bool + + // Set to true to indicate that the encoder is inside a KV, so that all + // tables need to be inlined. + insideKv bool + + // Set to true to skip the first table header in an array table. + skipTableHeader bool + + // Should the next table be encoded as inline + inline bool + + // Indentation level + indent int + + // Prefix the current value with a comment. + commented bool + + // Options coming from struct tags + options valueOptions +} + +func (ctx *encoderCtx) shiftKey() { + if ctx.hasKey { + ctx.parentKey = append(ctx.parentKey, ctx.key) + ctx.clearKey() + } +} + +func (ctx *encoderCtx) setKey(k string) { + ctx.key = k + ctx.hasKey = true +} + +func (ctx *encoderCtx) clearKey() { + ctx.key = "" + ctx.hasKey = false +} + +func (ctx *encoderCtx) isRoot() bool { + return len(ctx.parentKey) == 0 && !ctx.hasKey +} + +func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + i := v.Interface() + + switch x := i.(type) { + case time.Time: + if x.Nanosecond() > 0 { + return x.AppendFormat(b, time.RFC3339Nano), nil + } + return x.AppendFormat(b, time.RFC3339), nil + case LocalTime: + return append(b, x.String()...), nil + case LocalDate: + return append(b, x.String()...), nil + case LocalDateTime: + return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } + } + + hasTextMarshaler := v.Type().Implements(textMarshalerType) + if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if !hasTextMarshaler { + v = v.Addr() + } + + if ctx.isRoot() { + return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) + } + + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return nil, err + } + + b = enc.encodeString(b, string(text), ctx.options) + + return b, nil + } + + switch v.Kind() { + // containers + case reflect.Map: + return enc.encodeMap(b, ctx, v) + case reflect.Struct: + return enc.encodeStruct(b, ctx, v) + case reflect.Slice, reflect.Array: + return enc.encodeSlice(b, ctx, v) + case reflect.Interface: + if v.IsNil() { + return nil, fmt.Errorf("toml: encoding a nil interface is not supported") + } + + return enc.encode(b, ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) + } + + return enc.encode(b, ctx, v.Elem()) + + // values + case reflect.String: + b = enc.encodeString(b, v.String(), ctx.options) + case reflect.Float32: + f := v.Float() + + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat32 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat32 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 32) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 32) + } + case reflect.Float64: + f := v.Float() + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat64 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat64 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 64) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 64) + } + case reflect.Bool: + if v.Bool() { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: + x := v.Uint() + if x > uint64(math.MaxInt64) { + return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) + } + b = strconv.AppendUint(b, x, 10) + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + b = strconv.AppendInt(b, v.Int(), 10) + default: + return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) + } + + return b, nil +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Map: + return v.IsNil() + default: + return false + } +} + +func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { + return options.omitempty && isEmptyValue(v) +} + +func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { + var err error + + if !ctx.inline { + b = enc.encodeComment(ctx.indent, options.comment, b) + b = enc.commented(ctx.commented, b) + b = enc.indent(ctx.indent, b) + } + + b = enc.encodeKey(b, ctx.key) + b = append(b, " = "...) + + // create a copy of the context because the value of a KV shouldn't + // modify the global context. + subctx := ctx + subctx.insideKv = true + subctx.shiftKey() + subctx.options = options + + b, err = enc.encode(b, subctx, v) + if err != nil { + return nil, err + } + + return b, nil +} + +func (enc *Encoder) commented(commented bool, b []byte) []byte { + if commented { + return append(b, "# "...) + } + return b +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Struct: + return isEmptyStruct(v) + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func isEmptyStruct(v reflect.Value) bool { + // TODO: merge with walkStruct and cache. + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + f := v.Field(i) + + if !isEmptyValue(f) { + return false + } + } + + return true +} + +const literalQuote = '\'' + +func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { + if needsQuoting(v) { + return enc.encodeQuotedString(options.multiline, b, v) + } + + return enc.encodeLiteralString(b, v) +} + +func needsQuoting(v string) bool { + // TODO: vectorize + for _, b := range []byte(v) { + if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { + return true + } + } + return false +} + +// caller should have checked that the string does not contain new lines or ' . +func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { + b = append(b, literalQuote) + b = append(b, v...) + b = append(b, literalQuote) + + return b +} + +func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { + stringQuote := `"` + + if multiline { + stringQuote = `"""` + } + + b = append(b, stringQuote...) + if multiline { + b = append(b, '\n') + } + + const ( + hextable = "0123456789ABCDEF" + // U+0000 to U+0008, U+000A to U+001F, U+007F + nul = 0x0 + bs = 0x8 + lf = 0xa + us = 0x1f + del = 0x7f + ) + + for _, r := range []byte(v) { + switch r { + case '\\': + b = append(b, `\\`...) + case '"': + b = append(b, `\"`...) + case '\b': + b = append(b, `\b`...) + case '\f': + b = append(b, `\f`...) + case '\n': + if multiline { + b = append(b, r) + } else { + b = append(b, `\n`...) + } + case '\r': + b = append(b, `\r`...) + case '\t': + b = append(b, `\t`...) + default: + switch { + case r >= nul && r <= bs, r >= lf && r <= us, r == del: + b = append(b, `\u00`...) + b = append(b, hextable[r>>4]) + b = append(b, hextable[r&0x0f]) + default: + b = append(b, r) + } + } + } + + b = append(b, stringQuote...) + + return b +} + +// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . +func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { + return append(b, v...) +} + +func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { + if len(ctx.parentKey) == 0 { + return b, nil + } + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + b = enc.commented(ctx.commented, b) + + b = enc.indent(ctx.indent, b) + + b = append(b, '[') + + b = enc.encodeKey(b, ctx.parentKey[0]) + + for _, k := range ctx.parentKey[1:] { + b = append(b, '.') + b = enc.encodeKey(b, k) + } + + b = append(b, "]\n"...) + + return b, nil +} + +//nolint:cyclop +func (enc *Encoder) encodeKey(b []byte, k string) []byte { + needsQuotation := false + cannotUseLiteral := false + + if len(k) == 0 { + return append(b, "''"...) + } + + for _, c := range k { + if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { + continue + } + + if c == literalQuote { + cannotUseLiteral = true + } + + needsQuotation = true + } + + if needsQuotation && needsQuoting(k) { + cannotUseLiteral = true + } + + switch { + case cannotUseLiteral: + return enc.encodeQuotedString(false, b, k) + case needsQuotation: + return enc.encodeLiteralString(b, k) + default: + return enc.encodeUnquotedKey(b, k) + } +} + +func (enc *Encoder) keyToString(k reflect.Value) (string, error) { + keyType := k.Type() + switch { + case keyType.Kind() == reflect.String: + return k.String(), nil + + case keyType.Implements(textMarshalerType): + keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) + } + return string(keyB), nil + } + return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) +} + +func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var ( + t table + emptyValueOptions valueOptions + ) + + iter := v.MapRange() + for iter.Next() { + v := iter.Value() + + if isNil(v) { + continue + } + + k, err := enc.keyToString(iter.Key()) + if err != nil { + return nil, err + } + + if willConvertToTableOrArrayTable(ctx, v) { + t.pushTable(k, v, emptyValueOptions) + } else { + t.pushKV(k, v, emptyValueOptions) + } + } + + sortEntriesByKey(t.kvs) + sortEntriesByKey(t.tables) + + return enc.encodeTable(b, ctx, t) +} + +func sortEntriesByKey(e []entry) { + sort.Slice(e, func(i, j int) bool { + return e[i].Key < e[j].Key + }) +} + +type entry struct { + Key string + Value reflect.Value + Options valueOptions +} + +type table struct { + kvs []entry + tables []entry +} + +func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { + for _, e := range t.kvs { + if e.Key == k { + return + } + } + + t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) +} + +func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { + for _, e := range t.tables { + if e.Key == k { + return + } + } + t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) +} + +func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { + // TODO: cache this + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + k, opts := parseTag(tag) + if !isValidName(k) { + k = "" + } + + f := v.Field(i) + + if k == "" { + if fieldType.Anonymous { + if fieldType.Type.Kind() == reflect.Struct { + walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) + } + continue + } else { + k = fieldType.Name + } + } + + if isNil(f) { + continue + } + + options := valueOptions{ + multiline: opts.multiline, + omitempty: opts.omitempty, + commented: opts.commented, + comment: fieldType.Tag.Get("comment"), + } + + if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { + t.pushKV(k, f, options) + } else { + t.pushTable(k, f, options) + } + } +} + +func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var t table + + walkStruct(ctx, &t, v) + + return enc.encodeTable(b, ctx, t) +} + +func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { + for len(comment) > 0 { + var line string + idx := strings.IndexByte(comment, '\n') + if idx >= 0 { + line = comment[:idx] + comment = comment[idx+1:] + } else { + line = comment + comment = "" + } + b = enc.indent(indent, b) + b = append(b, "# "...) + b = append(b, line...) + b = append(b, '\n') + } + return b +} + +func isValidName(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +type tagOptions struct { + multiline bool + inline bool + omitempty bool + commented bool +} + +func parseTag(tag string) (string, tagOptions) { + opts := tagOptions{} + + idx := strings.Index(tag, ",") + if idx == -1 { + return tag, opts + } + + raw := tag[idx+1:] + tag = string(tag[:idx]) + for raw != "" { + var o string + i := strings.Index(raw, ",") + if i >= 0 { + o, raw = raw[:i], raw[i+1:] + } else { + o, raw = raw, "" + } + switch o { + case "multiline": + opts.multiline = true + case "inline": + opts.inline = true + case "omitempty": + opts.omitempty = true + case "commented": + opts.commented = true + } + } + + return tag, opts +} + +func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + ctx.shiftKey() + + if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { + return enc.encodeTableInline(b, ctx, t) + } + + if !ctx.skipTableHeader { + b, err = enc.encodeTableHeader(ctx, b) + if err != nil { + return nil, err + } + + if enc.indentTables && len(ctx.parentKey) > 0 { + ctx.indent++ + } + } + ctx.skipTableHeader = false + + hasNonEmptyKV := false + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + hasNonEmptyKV = true + + ctx.setKey(kv.Key) + ctx2 := ctx + ctx2.commented = kv.Options.commented || ctx2.commented + + b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) + if err != nil { + return nil, err + } + + b = append(b, '\n') + } + + first := true + for _, table := range t.tables { + if shouldOmitEmpty(table.Options, table.Value) { + continue + } + if first { + first = false + if hasNonEmptyKV { + b = append(b, '\n') + } + } else { + b = append(b, "\n"...) + } + + ctx.setKey(table.Key) + + ctx.options = table.Options + ctx2 := ctx + ctx2.commented = ctx2.commented || ctx.options.commented + + b, err = enc.encode(b, ctx2, table.Value) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + b = append(b, '{') + + first := true + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + + if first { + first = false + } else { + b = append(b, `, `...) + } + + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + } + + if len(t.tables) > 0 { + panic("inline table cannot contain nested tables, only key-values") + } + + b = append(b, "}"...) + + return b, nil +} + +func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { + if !v.IsValid() { + return false + } + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + return false + } + + t := v.Type() + switch t.Kind() { + case reflect.Map, reflect.Struct: + return !ctx.inline + case reflect.Interface: + return willConvertToTable(ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return false + } + + return willConvertToTable(ctx, v.Elem()) + default: + return false + } +} + +func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { + if ctx.insideKv { + return false + } + t := v.Type() + + if t.Kind() == reflect.Interface { + return willConvertToTableOrArrayTable(ctx, v.Elem()) + } + + if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + if v.Len() == 0 { + // An empty slice should be a kv = []. + return false + } + + for i := 0; i < v.Len(); i++ { + t := willConvertToTable(ctx, v.Index(i)) + + if !t { + return false + } + } + + return true + } + + return willConvertToTable(ctx, v) +} + +func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Len() == 0 { + b = append(b, "[]"...) + + return b, nil + } + + if willConvertToTableOrArrayTable(ctx, v) { + return enc.encodeSliceAsArrayTable(b, ctx, v) + } + + return enc.encodeSliceAsArray(b, ctx, v) +} + +// caller should have checked that v is a slice that only contains values that +// encode into tables. +func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + ctx.shiftKey() + + scratch := make([]byte, 0, 64) + + scratch = enc.commented(ctx.commented, scratch) + + scratch = append(scratch, "[["...) + + for i, k := range ctx.parentKey { + if i > 0 { + scratch = append(scratch, '.') + } + + scratch = enc.encodeKey(scratch, k) + } + + scratch = append(scratch, "]]\n"...) + ctx.skipTableHeader = true + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + if enc.indentTables { + ctx.indent++ + } + + for i := 0; i < v.Len(); i++ { + if i != 0 { + b = append(b, "\n"...) + } + + b = append(b, scratch...) + + var err error + b, err = enc.encode(b, ctx, v.Index(i)) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + multiline := ctx.options.multiline || enc.arraysMultiline + separator := ", " + + b = append(b, '[') + + subCtx := ctx + subCtx.options = valueOptions{} + + if multiline { + separator = ",\n" + + b = append(b, '\n') + + subCtx.indent++ + } + + var err error + first := true + + for i := 0; i < v.Len(); i++ { + if first { + first = false + } else { + b = append(b, separator...) + } + + if multiline { + b = enc.indent(subCtx.indent, b) + } + + b, err = enc.encode(b, subCtx, v.Index(i)) + if err != nil { + return nil, err + } + } + + if multiline { + b = append(b, '\n') + b = enc.indent(ctx.indent, b) + } + + b = append(b, ']') + + return b, nil +} + +func (enc *Encoder) indent(level int, b []byte) []byte { + for i := 0; i < level; i++ { + b = append(b, enc.indentSymbol...) + } + + return b +} diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go new file mode 100644 index 0000000000..802e7e4d15 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -0,0 +1,107 @@ +package toml + +import ( + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" +) + +type strict struct { + Enabled bool + + // Tracks the current key being processed. + key tracker.KeyTracker + + missing []unstable.ParserError +} + +func (s *strict) EnterTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.UpdateTable(node) +} + +func (s *strict) EnterArrayTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.UpdateArrayTable(node) +} + +func (s *strict) EnterKeyValue(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.Push(node) +} + +func (s *strict) ExitKeyValue(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.Pop(node) +} + +func (s *strict) MissingTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing table", + Key: s.key.Key(), + }) +} + +func (s *strict) MissingField(node *unstable.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing field", + Key: s.key.Key(), + }) +} + +func (s *strict) Error(doc []byte) error { + if !s.Enabled || len(s.missing) == 0 { + return nil + } + + err := &StrictMissingError{ + Errors: make([]DecodeError, 0, len(s.missing)), + } + + for _, derr := range s.missing { + derr := derr + err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) + } + + return err +} + +func keyLocation(node *unstable.Node) []byte { + k := node.Key() + + hasOne := k.Next() + if !hasOne { + panic("should not be called with empty key") + } + + start := k.Node().Data + end := k.Node().Data + + for k.Next() { + end = k.Node().Data + } + + return danger.BytesRange(start, end) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/vendor/github.com/pelletier/go-toml/v2/toml.abnf new file mode 100644 index 0000000000..473f3749e8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/toml.abnf @@ -0,0 +1,243 @@ +;; This document describes TOML's syntax, using the ABNF format (defined in +;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). +;; +;; All valid TOML documents will match this description, however certain +;; invalid documents would need to be rejected as per the semantics described +;; in the supporting text description. + +;; It is possible to try this grammar interactively, using instaparse. +;; http://instaparse.mojombo.com/ +;; +;; To do so, in the lower right, click on Options and change `:input-format` to +;; ':abnf'. Then paste this entire ABNF document into the grammar entry box +;; (above the options). Then you can type or paste a sample TOML document into +;; the beige box on the left. Tada! + +;; Overall Structure + +toml = expression *( newline expression ) + +expression = ws [ comment ] +expression =/ ws keyval ws [ comment ] +expression =/ ws table ws [ comment ] + +;; Whitespace + +ws = *wschar +wschar = %x20 ; Space +wschar =/ %x09 ; Horizontal tab + +;; Newline + +newline = %x0A ; LF +newline =/ %x0D.0A ; CRLF + +;; Comment + +comment-start-symbol = %x23 ; # +non-ascii = %x80-D7FF / %xE000-10FFFF +non-eol = %x09 / %x20-7F / non-ascii + +comment = comment-start-symbol *non-eol + +;; Key-Value pairs + +keyval = key keyval-sep val + +key = simple-key / dotted-key +simple-key = quoted-key / unquoted-key + +unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ +quoted-key = basic-string / literal-string +dotted-key = simple-key 1*( dot-sep simple-key ) + +dot-sep = ws %x2E ws ; . Period +keyval-sep = ws %x3D ws ; = + +val = string / boolean / array / inline-table / date-time / float / integer + +;; String + +string = ml-basic-string / basic-string / ml-literal-string / literal-string + +;; Basic String + +basic-string = quotation-mark *basic-char quotation-mark + +quotation-mark = %x22 ; " + +basic-char = basic-unescaped / escaped +basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +escaped = escape escape-seq-char + +escape = %x5C ; \ +escape-seq-char = %x22 ; " quotation mark U+0022 +escape-seq-char =/ %x5C ; \ reverse solidus U+005C +escape-seq-char =/ %x62 ; b backspace U+0008 +escape-seq-char =/ %x66 ; f form feed U+000C +escape-seq-char =/ %x6E ; n line feed U+000A +escape-seq-char =/ %x72 ; r carriage return U+000D +escape-seq-char =/ %x74 ; t tab U+0009 +escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX +escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + +;; Multiline Basic String + +ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + ml-basic-string-delim +ml-basic-string-delim = 3quotation-mark +ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + +mlb-content = mlb-char / newline / mlb-escaped-nl +mlb-char = mlb-unescaped / escaped +mlb-quotes = 1*2quotation-mark +mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +mlb-escaped-nl = escape ws newline *( wschar / newline ) + +;; Literal String + +literal-string = apostrophe *literal-char apostrophe + +apostrophe = %x27 ; ' apostrophe + +literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + +;; Multiline Literal String + +ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + ml-literal-string-delim +ml-literal-string-delim = 3apostrophe +ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + +mll-content = mll-char / newline +mll-char = %x09 / %x20-26 / %x28-7E / non-ascii +mll-quotes = 1*2apostrophe + +;; Integer + +integer = dec-int / hex-int / oct-int / bin-int + +minus = %x2D ; - +plus = %x2B ; + +underscore = %x5F ; _ +digit1-9 = %x31-39 ; 1-9 +digit0-7 = %x30-37 ; 0-7 +digit0-1 = %x30-31 ; 0-1 + +hex-prefix = %x30.78 ; 0x +oct-prefix = %x30.6F ; 0o +bin-prefix = %x30.62 ; 0b + +dec-int = [ minus / plus ] unsigned-dec-int +unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) + +hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) +oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) +bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) + +;; Float + +float = float-int-part ( exp / frac [ exp ] ) +float =/ special-float + +float-int-part = dec-int +frac = decimal-point zero-prefixable-int +decimal-point = %x2E ; . +zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) + +exp = "e" float-exp-part +float-exp-part = [ minus / plus ] zero-prefixable-int + +special-float = [ minus / plus ] ( inf / nan ) +inf = %x69.6e.66 ; inf +nan = %x6e.61.6e ; nan + +;; Boolean + +boolean = true / false + +true = %x74.72.75.65 ; true +false = %x66.61.6C.73.65 ; false + +;; Date and Time (as defined in RFC 3339) + +date-time = offset-date-time / local-date-time / local-date / local-time + +date-fullyear = 4DIGIT +date-month = 2DIGIT ; 01-12 +date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year +time-delim = "T" / %x20 ; T, t, or space +time-hour = 2DIGIT ; 00-23 +time-minute = 2DIGIT ; 00-59 +time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules +time-secfrac = "." 1*DIGIT +time-numoffset = ( "+" / "-" ) time-hour ":" time-minute +time-offset = "Z" / time-numoffset + +partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] +full-date = date-fullyear "-" date-month "-" date-mday +full-time = partial-time time-offset + +;; Offset Date-Time + +offset-date-time = full-date time-delim full-time + +;; Local Date-Time + +local-date-time = full-date time-delim partial-time + +;; Local Date + +local-date = full-date + +;; Local Time + +local-time = partial-time + +;; Array + +array = array-open [ array-values ] ws-comment-newline array-close + +array-open = %x5B ; [ +array-close = %x5D ; ] + +array-values = ws-comment-newline val ws-comment-newline array-sep array-values +array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + +array-sep = %x2C ; , Comma + +ws-comment-newline = *( wschar / [ comment ] newline ) + +;; Table + +table = std-table / array-table + +;; Standard Table + +std-table = std-table-open key std-table-close + +std-table-open = %x5B ws ; [ Left square bracket +std-table-close = ws %x5D ; ] Right square bracket + +;; Inline Table + +inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + +inline-table-open = %x7B ws ; { +inline-table-close = ws %x7D ; } +inline-table-sep = ws %x2C ws ; , Comma + +inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + +;; Array Table + +array-table = array-table-open key array-table-close + +array-table-open = %x5B.5B ws ; [[ Double left square bracket +array-table-close = ws %x5D.5D ; ]] Double right square bracket + +;; Built-in ABNF terms, reproduced here for clarity + +ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +DIGIT = %x30-39 ; 0-9 +HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go new file mode 100644 index 0000000000..3c6b8fe570 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/types.go @@ -0,0 +1,14 @@ +package toml + +import ( + "encoding" + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) +var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) +var stringType = reflect.TypeOf("") diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go new file mode 100644 index 0000000000..98231bae65 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -0,0 +1,1311 @@ +package toml + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "sync/atomic" + "time" + + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" +) + +// Unmarshal deserializes a TOML document into a Go value. +// +// It is a shortcut for Decoder.Decode() with the default options. +func Unmarshal(data []byte, v interface{}) error { + p := unstable.Parser{} + p.Reset(data) + d := decoder{p: &p} + + return d.FromParser(v) +} + +// Decoder reads and decode a TOML document from an input stream. +type Decoder struct { + // input + r io.Reader + + // global settings + strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool +} + +// NewDecoder creates a new Decoder that will read from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// DisallowUnknownFields causes the Decoder to return an error when the +// destination is a struct and the input contains a key that does not match a +// non-ignored field. +// +// In that case, the Decoder returns a StrictMissingError that can be used to +// retrieve the individual errors as well as generate a human readable +// description of the missing fields. +func (d *Decoder) DisallowUnknownFields() *Decoder { + d.strict = true + return d +} + +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + +// Decode the whole content of r into v. +// +// By default, values in the document that don't exist in the target Go value +// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. +// +// When a TOML local date, time, or date-time is decoded into a time.Time, its +// value is represented in time.Local timezone. Otherwise the appropriate Local* +// structure is used. For time values, precision up to the nanosecond is +// supported by truncating extra digits. +// +// Empty tables decoded in an interface{} create an empty initialized +// map[string]interface{}. +// +// Types implementing the encoding.TextUnmarshaler interface are decoded from a +// TOML string. +// +// When decoding a number, go-toml will return an error if the number is out of +// bounds for the target type (which includes negative numbers when decoding +// into an unsigned int). +// +// If an error occurs while decoding the content of the document, this function +// returns a toml.DecodeError, providing context about the issue. When using +// strict mode and a field is missing, a `toml.StrictMissingError` is +// returned. In any other case, this function returns a standard Go error. +// +// # Type mapping +// +// List of supported TOML types and their associated accepted Go types: +// +// String -> string +// Integer -> uint*, int*, depending on size +// Float -> float*, depending on size +// Boolean -> bool +// Offset Date-Time -> time.Time +// Local Date-time -> LocalDateTime, time.Time +// Local Date -> LocalDate, time.Time +// Local Time -> LocalTime, time.Time +// Array -> slice and array, depending on elements types +// Table -> map and struct +// Inline Table -> same as Table +// Array of Tables -> same as Array and Table +func (d *Decoder) Decode(v interface{}) error { + b, err := ioutil.ReadAll(d.r) + if err != nil { + return fmt.Errorf("toml: %w", err) + } + + p := unstable.Parser{} + p.Reset(b) + dec := decoder{ + p: &p, + strict: strict{ + Enabled: d.strict, + }, + unmarshalerInterface: d.unmarshalerInterface, + } + + return dec.FromParser(v) +} + +type decoder struct { + // Which parser instance in use for this decoding session. + p *unstable.Parser + + // Flag indicating that the current expression is stashed. + // If set to true, calling nextExpr will not actually pull a new expression + // but turn off the flag instead. + stashedExpr bool + + // Skip expressions until a table is found. This is set to true when a + // table could not be created (missing field in map), so all KV expressions + // need to be skipped. + skipUntilTable bool + + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + + // Tracks position in Go arrays. + // This is used when decoding [[array tables]] into Go arrays. Given array + // tables are separate TOML expression, we need to keep track of where we + // are at in the Go array, as we can't just introspect its size. + arrayIndexes map[reflect.Value]int + + // Tracks keys that have been seen, with which type. + seen tracker.SeenTracker + + // Strict mode + strict strict + + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + + // Current context for the error. + errorContext *errorContext +} + +type errorContext struct { + Struct reflect.Type + Field []int +} + +func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { + return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) +} + +func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { + if d.errorContext != nil && d.errorContext.Struct != nil { + ctx := d.errorContext + f := ctx.Struct.FieldByIndex(ctx.Field) + return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) + } + return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) +} + +func (d *decoder) expr() *unstable.Node { + return d.p.Expression() +} + +func (d *decoder) nextExpr() bool { + if d.stashedExpr { + d.stashedExpr = false + return true + } + return d.p.NextExpression() +} + +func (d *decoder) stashExpr() { + d.stashedExpr = true +} + +func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { + if d.arrayIndexes == nil { + d.arrayIndexes = make(map[reflect.Value]int, 1) + } + + idx, ok := d.arrayIndexes[v] + + if !ok { + d.arrayIndexes[v] = 0 + } else if shouldAppend { + idx++ + d.arrayIndexes[v] = idx + } + + return idx +} + +func (d *decoder) FromParser(v interface{}) error { + r := reflect.ValueOf(v) + if r.Kind() != reflect.Ptr { + return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) + } + + if r.IsNil() { + return fmt.Errorf("toml: decoding pointer target cannot be nil") + } + + r = r.Elem() + if r.Kind() == reflect.Interface && r.IsNil() { + newMap := map[string]interface{}{} + r.Set(reflect.ValueOf(newMap)) + } + + err := d.fromParser(r) + if err == nil { + return d.strict.Error(d.p.Data()) + } + + var e *unstable.ParserError + if errors.As(err, &e) { + return wrapDecodeError(d.p.Data(), e) + } + + return err +} + +func (d *decoder) fromParser(root reflect.Value) error { + for d.nextExpr() { + err := d.handleRootExpression(d.expr(), root) + if err != nil { + return err + } + } + + return d.p.Error() +} + +/* +Rules for the unmarshal code: + +- The stack is used to keep track of which values need to be set where. +- handle* functions <=> switch on a given unstable.Kind. +- unmarshalX* functions need to unmarshal a node of kind X. +- An "object" is either a struct or a map. +*/ + +func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { + var x reflect.Value + var err error + var first bool // used for to clear array tables on first use + + if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { + first, err = d.seen.CheckExpression(expr) + if err != nil { + return err + } + } + + switch expr.Kind { + case unstable.KeyValue: + if d.skipUntilTable { + return nil + } + x, err = d.handleKeyValue(expr, v) + case unstable.Table: + d.skipUntilTable = false + d.strict.EnterTable(expr) + x, err = d.handleTable(expr.Key(), v) + case unstable.ArrayTable: + d.skipUntilTable = false + d.strict.EnterArrayTable(expr) + d.clearArrayTable = first + x, err = d.handleArrayTable(expr.Key(), v) + default: + panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) + } + + if d.skipUntilTable { + if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { + d.strict.MissingTable(expr) + } + } else if err == nil && x.IsValid() { + v.Set(x) + } + + return err +} + +func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if key.Next() { + return d.handleArrayTablePart(key, v) + } + return d.handleKeyValues(v) +} + +func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + switch v.Kind() { + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } + } + return d.handleArrayTableCollectionLast(key, elem) + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollectionLast(key, elem) + if err != nil { + return reflect.Value{}, err + } + v.Elem().Set(elem) + + return v, nil + case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } + elemType := v.Type().Elem() + var elem reflect.Value + if elemType.Kind() == reflect.Interface { + elem = makeMapStringInterface() + } else { + elem = reflect.New(elemType).Elem() + } + elem2, err := d.handleArrayTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + return reflect.Append(v, elem), nil + case reflect.Array: + idx := d.arrayIndex(true, v) + if idx >= v.Len() { + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + default: + return reflect.Value{}, d.typeMismatchError("array table", v.Type()) + } +} + +// When parsing an array table expression, each part of the key needs to be +// evaluated like a normal key, but if it returns a collection, it also needs to +// point to the last element of the collection. Unless it is the last part of +// the key, then it needs to create a new element at the end. +func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if key.IsLast() { + return d.handleArrayTableCollectionLast(key, v) + } + + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollection(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem.IsValid() { + v.Elem().Set(elem) + } + + return v, nil + case reflect.Slice: + elem := v.Index(v.Len() - 1) + x, err := d.handleArrayTable(key, elem) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + + return v, err + case reflect.Array: + idx := d.arrayIndex(false, v) + if idx >= v.Len() { + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + } + + return d.handleArrayTable(key, v) +} + +func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + v.Set(reflect.New(v.Type().Elem())) + } + elem = v.Elem() + return d.handleKeyPart(key, elem, nextFn, makeFn) + case reflect.Map: + vt := v.Type() + + // Create the key for the map element. Convert to key type. + mk, err := d.keyFromData(vt.Key(), key.Node().Data) + if err != nil { + return reflect.Value{}, err + } + + // If the map does not exist, create it. + if v.IsNil() { + vt := v.Type() + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + // If there is no value in the map, create a new one according to + // the map type. If the element type is interface, create either a + // map[string]interface{} or a []interface{} depending on whether + // this is the last part of the array table key. + + t := vt.Elem() + if t.Kind() == reflect.Interface { + mv = makeFn() + } else { + mv = reflect.New(t).Elem() + } + set = true + } else if mv.Kind() == reflect.Interface { + mv = mv.Elem() + if !mv.IsValid() { + mv = makeFn() + } + set = true + } else if !mv.CanAddr() { + vt := v.Type() + t := vt.Elem() + oldmv := mv + mv = reflect.New(t).Elem() + mv.Set(oldmv) + set = true + } + + x, err := nextFn(key, mv) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + mv = x + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + return reflect.Value{}, nil + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + x, err := nextFn(key, f) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + f.Set(x) + } + d.errorContext.Field = nil + d.errorContext.Struct = nil + case reflect.Interface: + if v.Elem().IsValid() { + v = v.Elem() + } else { + v = makeMapStringInterface() + } + + x, err := d.handleKeyPart(key, v, nextFn, makeFn) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + default: + panic(fmt.Errorf("unhandled part: %s", v.Kind())) + } + + return rv, nil +} + +// HandleArrayTablePart navigates the Go structure v using the key v. It is +// only used for the prefix (non-last) parts of an array-table. When +// encountering a collection, it should go to the last element. +func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + var makeFn valueMakerFn + if key.IsLast() { + makeFn = makeSliceInterface + } else { + makeFn = makeMapStringInterface + } + return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) +} + +// HandleTable returns a reference when it has checked the next expression but +// cannot handle it. +func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if v.Kind() == reflect.Slice { + if v.Len() == 0 { + return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") + } + elem := v.Index(v.Len() - 1) + x, err := d.handleTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + return reflect.Value{}, nil + } + if key.Next() { + // Still scoping the key + return d.handleTablePart(key, v) + } + // Done scoping the key. + // Now handle all the key-value expressions in this table. + return d.handleKeyValues(v) +} + +// Handle root expressions until the end of the document or the next +// non-key-value. +func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { + var rv reflect.Value + for d.nextExpr() { + expr := d.expr() + if expr.Kind != unstable.KeyValue { + // Stash the expression so that fromParser can just loop and use + // the right handler. + // We could just recurse ourselves here, but at least this gives a + // chance to pop the stack a bit. + d.stashExpr() + break + } + + _, err := d.seen.CheckExpression(expr) + if err != nil { + return reflect.Value{}, err + } + + x, err := d.handleKeyValue(expr, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + rv = x + } + } + return rv, nil +} + +type ( + handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) + valueMakerFn func() reflect.Value +) + +func makeMapStringInterface() reflect.Value { + return reflect.MakeMap(mapStringInterfaceType) +} + +func makeSliceInterface() reflect.Value { + return reflect.MakeSlice(sliceInterfaceType, 0, 16) +} + +func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) +} + +func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { + // Special case for time, because we allow to unmarshal to it from + // different kind of AST nodes. + if v.Type() == timeType { + return false, nil + } + + if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { + err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) + if err != nil { + return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) + } + + return true, nil + } + + return false, nil +} + +func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { + for v.Kind() == reflect.Ptr { + v = initAndDereferencePointer(v) + } + + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + + ok, err := d.tryTextUnmarshaler(value, v) + if ok || err != nil { + return err + } + + switch value.Kind { + case unstable.String: + return d.unmarshalString(value, v) + case unstable.Integer: + return d.unmarshalInteger(value, v) + case unstable.Float: + return d.unmarshalFloat(value, v) + case unstable.Bool: + return d.unmarshalBool(value, v) + case unstable.DateTime: + return d.unmarshalDateTime(value, v) + case unstable.LocalDate: + return d.unmarshalLocalDate(value, v) + case unstable.LocalTime: + return d.unmarshalLocalTime(value, v) + case unstable.LocalDateTime: + return d.unmarshalLocalDateTime(value, v) + case unstable.InlineTable: + return d.unmarshalInlineTable(value, v) + case unstable.Array: + return d.unmarshalArray(value, v) + default: + panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) + } +} + +func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + if v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 16)) + } else { + v.SetLen(0) + } + case reflect.Array: + // arrays are always initialized + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + err := d.unmarshalArray(array, elem) + if err != nil { + return err + } + v.Set(elem) + return nil + default: + // TODO: use newDecodeError, but first the parser needs to fill + // array.Data. + return d.typeMismatchError("array", v.Type()) + } + + elemType := v.Type().Elem() + + it := array.Children() + idx := 0 + for it.Next() { + n := it.Node() + + // TODO: optimize + if v.Kind() == reflect.Slice { + elem := reflect.New(elemType).Elem() + + err := d.handleValue(n, elem) + if err != nil { + return err + } + + v.Set(reflect.Append(v, elem)) + } else { // array + if idx >= v.Len() { + return nil + } + elem := v.Index(idx) + err := d.handleValue(n, elem) + if err != nil { + return err + } + idx++ + } + } + + return nil +} + +func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { + // Make sure v is an initialized object. + switch v.Kind() { + case reflect.Map: + if v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + case reflect.Struct: + // structs are always initialized. + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = makeMapStringInterface() + v.Set(elem) + } + return d.unmarshalInlineTable(itable, elem) + default: + return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) + } + + it := itable.Children() + for it.Next() { + n := it.Node() + + x, err := d.handleKeyValue(n, v) + if err != nil { + return err + } + if x.IsValid() { + v = x + } + } + + return nil +} + +func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { + dt, err := parseDateTime(value.Data) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(dt)) + return nil +} + +func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { + ld, err := parseLocalDate(value.Data) + if err != nil { + return err + } + + if v.Type() == timeType { + cast := ld.AsTime(time.Local) + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ld)) + + return nil +} + +func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { + lt, rest, err := parseLocalTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return unstable.NewParserError(rest, "extra characters at the end of a local time") + } + + v.Set(reflect.ValueOf(lt)) + return nil +} + +func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { + ldt, rest, err := parseLocalDateTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return unstable.NewParserError(rest, "extra characters at the end of a local date time") + } + + if v.Type() == timeType { + cast := ldt.AsTime(time.Local) + + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ldt)) + + return nil +} + +func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { + b := value.Data[0] == 't' + + switch v.Kind() { + case reflect.Bool: + v.SetBool(b) + case reflect.Interface: + v.Set(reflect.ValueOf(b)) + default: + return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) + } + + return nil +} + +func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { + f, err := parseFloat(value.Data) + if err != nil { + return err + } + + switch v.Kind() { + case reflect.Float64: + v.SetFloat(f) + case reflect.Float32: + if f > math.MaxFloat32 { + return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) + } + v.SetFloat(f) + case reflect.Interface: + v.Set(reflect.ValueOf(f)) + default: + return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) + } + + return nil +} + +const ( + maxInt = int64(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +// Maximum value of uint for decoding. Currently the decoder parses the integer +// into an int64. As a result, on architectures where uint is 64 bits, the +// effective maximum uint we can decode is the maximum of int64. On +// architectures where uint is 32 bits, the maximum value we can decode is +// lower: the maximum of uint32. I didn't find a way to figure out this value at +// compile time, so it is computed during initialization. +var maxUint int64 = math.MaxInt64 + +func init() { + m := uint64(^uint(0)) + if m < uint64(maxUint) { + maxUint = int64(m) + } +} + +func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { + kind := v.Kind() + if kind == reflect.Float32 || kind == reflect.Float64 { + return d.unmarshalFloat(value, v) + } + + i, err := parseInteger(value.Data) + if err != nil { + return err + } + + var r reflect.Value + + switch kind { + case reflect.Int64: + v.SetInt(i) + return nil + case reflect.Int32: + if i < math.MinInt32 || i > math.MaxInt32 { + return fmt.Errorf("toml: number %d does not fit in an int32", i) + } + + r = reflect.ValueOf(int32(i)) + case reflect.Int16: + if i < math.MinInt16 || i > math.MaxInt16 { + return fmt.Errorf("toml: number %d does not fit in an int16", i) + } + + r = reflect.ValueOf(int16(i)) + case reflect.Int8: + if i < math.MinInt8 || i > math.MaxInt8 { + return fmt.Errorf("toml: number %d does not fit in an int8", i) + } + + r = reflect.ValueOf(int8(i)) + case reflect.Int: + if i < minInt || i > maxInt { + return fmt.Errorf("toml: number %d does not fit in an int", i) + } + + r = reflect.ValueOf(int(i)) + case reflect.Uint64: + if i < 0 { + return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) + } + + r = reflect.ValueOf(uint64(i)) + case reflect.Uint32: + if i < 0 || i > math.MaxUint32 { + return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) + } + + r = reflect.ValueOf(uint32(i)) + case reflect.Uint16: + if i < 0 || i > math.MaxUint16 { + return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) + } + + r = reflect.ValueOf(uint16(i)) + case reflect.Uint8: + if i < 0 || i > math.MaxUint8 { + return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) + } + + r = reflect.ValueOf(uint8(i)) + case reflect.Uint: + if i < 0 || i > maxUint { + return fmt.Errorf("toml: negative number %d does not fit in an uint", i) + } + + r = reflect.ValueOf(uint(i)) + case reflect.Interface: + r = reflect.ValueOf(i) + default: + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) + } + + if !r.Type().AssignableTo(v.Type()) { + r = r.Convert(v.Type()) + } + + v.Set(r) + + return nil +} + +func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.String: + v.SetString(string(value.Data)) + case reflect.Interface: + v.Set(reflect.ValueOf(string(value.Data))) + default: + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) + } + + return nil +} + +func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { + d.strict.EnterKeyValue(expr) + + v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) + if d.skipUntilTable { + d.strict.MissingField(expr) + d.skipUntilTable = false + } + + d.strict.ExitKeyValue(expr) + + return v, err +} + +func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { + if key.Next() { + // Still scoping the key + return d.handleKeyValuePart(key, value, v) + } + // Done scoping the key. + // v is whatever Go value we need to fill. + return reflect.Value{}, d.handleValue(value, v) +} + +func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) { + switch { + case stringType.AssignableTo(keyType): + return reflect.ValueOf(string(data)), nil + + case stringType.ConvertibleTo(keyType): + return reflect.ValueOf(string(data)).Convert(keyType), nil + + case keyType.Implements(textUnmarshalerType): + mk := reflect.New(keyType.Elem()) + if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) + } + return mk, nil + + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + mk := reflect.New(keyType) + if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) + } + return mk.Elem(), nil + } + return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) +} + +func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { + // contains the replacement for v + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Map: + vt := v.Type() + + mk, err := d.keyFromData(vt.Key(), key.Node().Data) + if err != nil { + return reflect.Value{}, err + } + + // If the map does not exist, create it. + if v.IsNil() { + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() || key.IsLast() { + set = true + mv = reflect.New(v.Type().Elem()).Elem() + } + + nv, err := d.handleKeyValueInner(key, value, mv) + if err != nil { + return reflect.Value{}, err + } + if nv.IsValid() { + mv = nv + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + break + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. + nvp := reflect.New(v.Type()) + nvp.Elem().Set(v) + v = nvp.Elem() + _, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + return nvp.Elem(), nil + } + x, err := d.handleKeyValueInner(key, value, f) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + f.Set(x) + } + d.errorContext.Struct = nil + d.errorContext.Field = nil + case reflect.Interface: + v = v.Elem() + + // Following encoding/json: decoding an object into an + // interface{}, it needs to always hold a + // map[string]interface{}. This is for the types to be + // consistent whether a previous value was set or not. + if !v.IsValid() || v.Type() != mapStringInterfaceType { + v = makeMapStringInterface() + } + + x, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + rv = v + elem = ptr.Elem() + } + + elem2, err := d.handleKeyValuePart(key, value, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + v.Elem().Set(elem) + default: + return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) + } + + return rv, nil +} + +func initAndDereferencePointer(v reflect.Value) reflect.Value { + var elem reflect.Value + if v.IsNil() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + } + elem = v.Elem() + return elem +} + +// Same as reflect.Value.FieldByIndex, but creates pointers if needed. +func fieldByIndex(v reflect.Value, path []int) reflect.Value { + for _, x := range path { + v = v.Field(x) + + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + return v +} + +type fieldPathsMap = map[string][]int + +var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap + +func structFieldPath(v reflect.Value, name string) ([]int, bool) { + t := v.Type() + + cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) + fieldPaths, ok := cache[danger.MakeTypeID(t)] + + if !ok { + fieldPaths = map[string][]int{} + + forEachField(t, nil, func(name string, path []int) { + fieldPaths[name] = path + // extra copy for the case-insensitive match + fieldPaths[strings.ToLower(name)] = path + }) + + newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) + newCache[danger.MakeTypeID(t)] = fieldPaths + for k, v := range cache { + newCache[k] = v + } + globalFieldPathsCache.Store(newCache) + } + + path, ok := fieldPaths[name] + if !ok { + path, ok = fieldPaths[strings.ToLower(name)] + } + return path, ok +} + +func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + + if !f.Anonymous && f.PkgPath != "" { + // only consider exported fields. + continue + } + + fieldPath := append(path, i) + fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] + + name := f.Tag.Get("toml") + if name == "-" { + continue + } + + if i := strings.IndexByte(name, ','); i >= 0 { + name = name[:i] + } + + if f.Anonymous && name == "" { + t2 := f.Type + if t2.Kind() == reflect.Ptr { + t2 = t2.Elem() + } + + if t2.Kind() == reflect.Struct { + forEachField(t2, fieldPath, do) + } + continue + } + + if name == "" { + name = f.Name + } + + do(name, fieldPath) + } +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go new file mode 100644 index 0000000000..f526bf2c09 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -0,0 +1,136 @@ +package unstable + +import ( + "fmt" + "unsafe" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// Iterator over a sequence of nodes. +// +// Starts uninitialized, you need to call Next() first. +// +// For example: +// +// it := n.Children() +// for it.Next() { +// n := it.Node() +// // do something with n +// } +type Iterator struct { + started bool + node *Node +} + +// Next moves the iterator forward and returns true if points to a +// node, false otherwise. +func (c *Iterator) Next() bool { + if !c.started { + c.started = true + } else if c.node.Valid() { + c.node = c.node.Next() + } + return c.node.Valid() +} + +// IsLast returns true if the current node of the iterator is the last +// one. Subsequent calls to Next() will return false. +func (c *Iterator) IsLast() bool { + return c.node.next == 0 +} + +// Node returns a pointer to the node pointed at by the iterator. +func (c *Iterator) Node() *Node { + return c.node +} + +// Node in a TOML expression AST. +// +// Depending on Kind, its sequence of children should be interpreted +// differently. +// +// - Array have one child per element in the array. +// - InlineTable have one child per key-value in the table (each of kind +// InlineTable). +// - KeyValue have at least two children. The first one is the value. The rest +// make a potentially dotted key. +// - Table and ArrayTable's children represent a dotted key (same as +// KeyValue, but without the first node being the value). +// +// When relevant, Raw describes the range of bytes this node is referring to in +// the input document. Use Parser.Raw() to retrieve the actual bytes. +type Node struct { + Kind Kind + Raw Range // Raw bytes from the input. + Data []byte // Node value (either allocated or referencing the input). + + // References to other nodes, as offsets in the backing array + // from this node. References can go backward, so those can be + // negative. + next int // 0 if last element + child int // 0 if no child +} + +// Range of bytes in the document. +type Range struct { + Offset uint32 + Length uint32 +} + +// Next returns a pointer to the next node, or nil if there is no next node. +func (n *Node) Next() *Node { + if n.next == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.next)) +} + +// Child returns a pointer to the first child node of this node. Other children +// can be accessed calling Next on the first child. Returns an nil if this Node +// has no child. +func (n *Node) Child() *Node { + if n.child == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.child)) +} + +// Valid returns true if the node's kind is set (not to Invalid). +func (n *Node) Valid() bool { + return n != nil +} + +// Key returns the children nodes making the Key on a supported node. Panics +// otherwise. They are guaranteed to be all be of the Kind Key. A simple key +// would return just one element. +func (n *Node) Key() Iterator { + switch n.Kind { + case KeyValue: + value := n.Child() + if !value.Valid() { + panic(fmt.Errorf("KeyValue should have at least two children")) + } + return Iterator{node: value.Next()} + case Table, ArrayTable: + return Iterator{node: n.Child()} + default: + panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) + } +} + +// Value returns a pointer to the value node of a KeyValue. +// Guaranteed to be non-nil. Panics if not called on a KeyValue node, +// or if the Children are malformed. +func (n *Node) Value() *Node { + return n.Child() +} + +// Children returns an iterator over a node's children. +func (n *Node) Children() Iterator { + return Iterator{node: n.Child()} +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go new file mode 100644 index 0000000000..9538e30df9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go @@ -0,0 +1,71 @@ +package unstable + +// root contains a full AST. +// +// It is immutable once constructed with Builder. +type root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *root) at(idx reference) *Node { + return &r.nodes[idx] +} + +type reference int + +const invalidReference reference = -1 + +func (r reference) Valid() bool { + return r != invalidReference +} + +type builder struct { + tree root + lastIdx int +} + +func (b *builder) Tree() *root { + return &b.tree +} + +func (b *builder) NodeAt(ref reference) *Node { + return b.tree.at(ref) +} + +func (b *builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *builder) Push(n Node) reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return reference(b.lastIdx) +} + +func (b *builder) PushAndChain(n Node) reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return reference(b.lastIdx) +} + +func (b *builder) AttachChild(parent reference, child reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *builder) Chain(from reference, to reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go new file mode 100644 index 0000000000..7ff26c53c7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go @@ -0,0 +1,3 @@ +// Package unstable provides APIs that do not meet the backward compatibility +// guarantees yet. +package unstable diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go new file mode 100644 index 0000000000..ff9df1bef8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go @@ -0,0 +1,71 @@ +package unstable + +import "fmt" + +// Kind represents the type of TOML structure contained in a given Node. +type Kind int + +const ( + // Meta + Invalid Kind = iota + Comment + Key + + // Top level structures + Table + ArrayTable + KeyValue + + // Containers values + Array + InlineTable + + // Values + String + Bool + Float + Integer + LocalDate + LocalTime + LocalDateTime + DateTime +) + +// String implementation of fmt.Stringer. +func (k Kind) String() string { + switch k { + case Invalid: + return "Invalid" + case Comment: + return "Comment" + case Key: + return "Key" + case Table: + return "Table" + case ArrayTable: + return "ArrayTable" + case KeyValue: + return "KeyValue" + case Array: + return "Array" + case InlineTable: + return "InlineTable" + case String: + return "String" + case Bool: + return "Bool" + case Float: + return "Float" + case Integer: + return "Integer" + case LocalDate: + return "LocalDate" + case LocalTime: + return "LocalTime" + case LocalDateTime: + return "LocalDateTime" + case DateTime: + return "DateTime" + } + panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go new file mode 100644 index 0000000000..50358a44ff --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -0,0 +1,1245 @@ +package unstable + +import ( + "bytes" + "fmt" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// ParserError describes an error relative to the content of the document. +// +// It cannot outlive the instance of Parser it refers to, and may cause panics +// if the parser is reset. +type ParserError struct { + Highlight []byte + Message string + Key []string // optional +} + +// Error is the implementation of the error interface. +func (e *ParserError) Error() string { + return e.Message +} + +// NewParserError is a convenience function to create a ParserError +// +// Warning: Highlight needs to be a subslice of Parser.data, so only slices +// returned by Parser.Raw are valid candidates. +func NewParserError(highlight []byte, format string, args ...interface{}) error { + return &ParserError{ + Highlight: highlight, + Message: fmt.Errorf(format, args...).Error(), + } +} + +// Parser scans over a TOML-encoded document and generates an iterative AST. +// +// To prime the Parser, first reset it with the contents of a TOML document. +// Then, process all top-level expressions sequentially. See Example. +// +// Don't forget to check Error() after you're done parsing. +// +// Each top-level expression needs to be fully processed before calling +// NextExpression() again. Otherwise, calls to various Node methods may panic if +// the parser has moved on the next expression. +// +// For performance reasons, go-toml doesn't make a copy of the input bytes to +// the parser. Make sure to copy all the bytes you need to outlive the slice +// given to the parser. +type Parser struct { + data []byte + builder builder + ref reference + left []byte + err error + first bool + + KeepComments bool +} + +// Data returns the slice provided to the last call to Reset. +func (p *Parser) Data() []byte { + return p.data +} + +// Range returns a range description that corresponds to a given slice of the +// input. If the argument is not a subslice of the parser input, this function +// panics. +func (p *Parser) Range(b []byte) Range { + return Range{ + Offset: uint32(danger.SubsliceOffset(p.data, b)), + Length: uint32(len(b)), + } +} + +// Raw returns the slice corresponding to the bytes in the given range. +func (p *Parser) Raw(raw Range) []byte { + return p.data[raw.Offset : raw.Offset+raw.Length] +} + +// Reset brings the parser to its initial state for a given input. It wipes an +// reuses internal storage to reduce allocation. +func (p *Parser) Reset(b []byte) { + p.builder.Reset() + p.ref = invalidReference + p.data = b + p.left = b + p.err = nil + p.first = true +} + +// NextExpression parses the next top-level expression. If an expression was +// successfully parsed, it returns true. If the parser is at the end of the +// document or an error occurred, it returns false. +// +// Retrieve the parsed expression with Expression(). +func (p *Parser) NextExpression() bool { + if len(p.left) == 0 || p.err != nil { + return false + } + + p.builder.Reset() + p.ref = invalidReference + + for { + if len(p.left) == 0 || p.err != nil { + return false + } + + if !p.first { + p.left, p.err = p.parseNewline(p.left) + } + + if len(p.left) == 0 || p.err != nil { + return false + } + + p.ref, p.left, p.err = p.parseExpression(p.left) + + if p.err != nil { + return false + } + + p.first = false + + if p.ref.Valid() { + return true + } + } +} + +// Expression returns a pointer to the node representing the last successfully +// parsed expression. +func (p *Parser) Expression() *Node { + return p.builder.NodeAt(p.ref) +} + +// Error returns any error that has occurred during parsing. +func (p *Parser) Error() error { + return p.err +} + +// Position describes a position in the input. +type Position struct { + // Number of bytes from the beginning of the input. + Offset int + // Line number, starting at 1. + Line int + // Column number, starting at 1. + Column int +} + +// Shape describes the position of a range in the input. +type Shape struct { + Start Position + End Position +} + +func (p *Parser) position(b []byte) Position { + offset := danger.SubsliceOffset(p.data, b) + + lead := p.data[:offset] + + return Position{ + Offset: offset, + Line: bytes.Count(lead, []byte{'\n'}) + 1, + Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}), + } +} + +// Shape returns the shape of the given range in the input. Will +// panic if the range is not a subslice of the input. +func (p *Parser) Shape(r Range) Shape { + raw := p.Raw(r) + return Shape{ + Start: p.position(raw), + End: p.position(raw[r.Length:]), + } +} + +func (p *Parser) parseNewline(b []byte) ([]byte, error) { + if b[0] == '\n' { + return b[1:], nil + } + + if b[0] == '\r' { + _, rest, err := scanWindowsNewline(b) + return rest, err + } + + return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) +} + +func (p *Parser) parseComment(b []byte) (reference, []byte, error) { + ref := invalidReference + data, rest, err := scanComment(b) + if p.KeepComments && err == nil { + ref = p.builder.Push(Node{ + Kind: Comment, + Raw: p.Range(data), + Data: data, + }) + } + return ref, rest, err +} + +func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { + // expression = ws [ comment ] + // expression =/ ws keyval ws [ comment ] + // expression =/ ws table ws [ comment ] + ref := invalidReference + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ref, b, nil + } + + if b[0] == '#' { + ref, rest, err := p.parseComment(b) + return ref, rest, err + } + + if b[0] == '\n' || b[0] == '\r' { + return ref, b, nil + } + + var err error + if b[0] == '[' { + ref, b, err = p.parseTable(b) + } else { + ref, b, err = p.parseKeyval(b) + } + + if err != nil { + return ref, nil, err + } + + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + cref, rest, err := p.parseComment(b) + if cref != invalidReference { + p.builder.Chain(ref, cref) + } + return ref, rest, err + } + + return ref, b, nil +} + +func (p *Parser) parseTable(b []byte) (reference, []byte, error) { + // table = std-table / array-table + if len(b) > 1 && b[1] == '[' { + return p.parseArrayTable(b) + } + + return p.parseStdTable(b) +} + +func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { + // array-table = array-table-open key array-table-close + // array-table-open = %x5B.5B ws ; [[ Double left square bracket + // array-table-close = ws %x5D.5D ; ]] Double right square bracket + ref := p.builder.Push(Node{ + Kind: ArrayTable, + }) + + b = b[2:] + b = p.parseWhitespace(b) + + k, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, k) + b = p.parseWhitespace(b) + + b, err = expect(']', b) + if err != nil { + return ref, nil, err + } + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { + // std-table = std-table-open key std-table-close + // std-table-open = %x5B ws ; [ Left square bracket + // std-table-close = ws %x5D ; ] Right square bracket + ref := p.builder.Push(Node{ + Kind: Table, + }) + + b = b[1:] + b = p.parseWhitespace(b) + + key, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, key) + + b = p.parseWhitespace(b) + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { + // keyval = key keyval-sep val + ref := p.builder.Push(Node{ + Kind: KeyValue, + }) + + key, b, err := p.parseKey(b) + if err != nil { + return invalidReference, nil, err + } + + // keyval-sep = ws %x3D ws ; = + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") + } + + b, err = expect('=', b) + if err != nil { + return invalidReference, nil, err + } + + b = p.parseWhitespace(b) + + valRef, b, err := p.parseVal(b) + if err != nil { + return ref, b, err + } + + p.builder.Chain(valRef, key) + p.builder.AttachChild(ref, valRef) + + return ref, b, err +} + +//nolint:cyclop,funlen +func (p *Parser) parseVal(b []byte) (reference, []byte, error) { + // val = string / boolean / array / inline-table / date-time / float / integer + ref := invalidReference + + if len(b) == 0 { + return ref, nil, NewParserError(b, "expected value, not eof") + } + + var err error + c := b[0] + + switch c { + case '"': + var raw []byte + var v []byte + if scanFollowsMultilineBasicStringDelimiter(b) { + raw, v, b, err = p.parseMultilineBasicString(b) + } else { + raw, v, b, err = p.parseBasicString(b) + } + + if err == nil { + ref = p.builder.Push(Node{ + Kind: String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case '\'': + var raw []byte + var v []byte + if scanFollowsMultilineLiteralStringDelimiter(b) { + raw, v, b, err = p.parseMultilineLiteralString(b) + } else { + raw, v, b, err = p.parseLiteralString(b) + } + + if err == nil { + ref = p.builder.Push(Node{ + Kind: String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case 't': + if !scanFollowsTrue(b) { + return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") + } + + ref = p.builder.Push(Node{ + Kind: Bool, + Data: b[:4], + }) + + return ref, b[4:], nil + case 'f': + if !scanFollowsFalse(b) { + return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") + } + + ref = p.builder.Push(Node{ + Kind: Bool, + Data: b[:5], + }) + + return ref, b[5:], nil + case '[': + return p.parseValArray(b) + case '{': + return p.parseInlineTable(b) + default: + return p.parseIntOrFloatOrDateTime(b) + } +} + +func atmost(b []byte, n int) []byte { + if n >= len(b) { + return b + } + + return b[:n] +} + +func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { + v, rest, err := scanLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + return v, v[1 : len(v)-1], rest, nil +} + +func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { + // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + // inline-table-open = %x7B ws ; { + // inline-table-close = ws %x7D ; } + // inline-table-sep = ws %x2C ws ; , Comma + // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + parent := p.builder.Push(Node{ + Kind: InlineTable, + Raw: p.Range(b[:1]), + }) + + first := true + + var child reference + + b = b[1:] + + var err error + + for len(b) > 0 { + previousB := b + b = p.parseWhitespace(b) + + if len(b) == 0 { + return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") + } + + if b[0] == '}' { + break + } + + if !first { + b, err = expect(',', b) + if err != nil { + return parent, nil, err + } + b = p.parseWhitespace(b) + } + + var kv reference + + kv, b, err = p.parseKeyval(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, kv) + } else { + p.builder.Chain(child, kv) + } + child = kv + + first = false + } + + rest, err := expect('}', b) + + return parent, rest, err +} + +//nolint:funlen,cyclop +func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { + // array = array-open [ array-values ] ws-comment-newline array-close + // array-open = %x5B ; [ + // array-close = %x5D ; ] + // array-values = ws-comment-newline val ws-comment-newline array-sep array-values + // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + // array-sep = %x2C ; , Comma + // ws-comment-newline = *( wschar / [ comment ] newline ) + arrayStart := b + b = b[1:] + + parent := p.builder.Push(Node{ + Kind: Array, + }) + + // First indicates whether the parser is looking for the first element + // (non-comment) of the array. + first := true + + lastChild := invalidReference + + addChild := func(valueRef reference) { + if lastChild == invalidReference { + p.builder.AttachChild(parent, valueRef) + } else { + p.builder.Chain(lastChild, valueRef) + } + lastChild = valueRef + } + + var err error + for len(b) > 0 { + cref := invalidReference + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + + if cref != invalidReference { + addChild(cref) + } + + if len(b) == 0 { + return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") + } + + if b[0] == ']' { + break + } + + if b[0] == ',' { + if first { + return parent, nil, NewParserError(b[0:1], "array cannot start with comma") + } + b = b[1:] + + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + if cref != invalidReference { + addChild(cref) + } + } else if !first { + return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") + } + + // TOML allows trailing commas in arrays. + if len(b) > 0 && b[0] == ']' { + break + } + + var valueRef reference + valueRef, b, err = p.parseVal(b) + if err != nil { + return parent, nil, err + } + + addChild(valueRef) + + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + if cref != invalidReference { + addChild(cref) + } + + first = false + } + + rest, err := expect(']', b) + + return parent, rest, err +} + +func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) { + rootCommentRef := invalidReference + latestCommentRef := invalidReference + + addComment := func(ref reference) { + if rootCommentRef == invalidReference { + rootCommentRef = ref + } else if latestCommentRef == invalidReference { + p.builder.AttachChild(rootCommentRef, ref) + latestCommentRef = ref + } else { + p.builder.Chain(latestCommentRef, ref) + latestCommentRef = ref + } + } + + for len(b) > 0 { + var err error + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + var ref reference + ref, b, err = p.parseComment(b) + if err != nil { + return invalidReference, nil, err + } + if ref != invalidReference { + addComment(ref) + } + } + + if len(b) == 0 { + break + } + + if b[0] == '\n' || b[0] == '\r' { + b, err = p.parseNewline(b) + if err != nil { + return invalidReference, nil, err + } + } else { + break + } + } + + return rootCommentRef, b, nil +} + +func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { + token, rest, err := scanMultilineLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + return token, token[i : len(token)-3], rest, err +} + +//nolint:funlen,gocognit,cyclop +func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + token, escaped, rest, err := scanMultilineBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + // fast path + startIdx := i + endIdx := len(token) - len(`"""`) + + if !escaped { + str := token[startIdx:endIdx] + verr := characters.Utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-3 { + c := token[i] + + //nolint:nestif + if c == '\\' { + // When the last non-whitespace character on a line is an unescaped \, + // it will be trimmed along with all whitespace (including newlines) up + // to the next non-whitespace character or closing delimiter. + + isLastNonWhitespaceOnLine := false + j := 1 + findEOLLoop: + for ; j < len(token)-3-i; j++ { + switch token[i+j] { + case ' ', '\t': + continue + case '\r': + if token[i+j+1] == '\n' { + continue + } + case '\n': + isLastNonWhitespaceOnLine = true + } + break findEOLLoop + } + if isLastNonWhitespaceOnLine { + i += j + for ; i < len(token)-3; i++ { + c := token[i] + if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { + i-- + break + } + } + i++ + continue + } + + // handle escaping + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(atmost(token[i+1:], 4), 4) + if err != nil { + return nil, nil, nil, err + } + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(atmost(token[i+1:], 8), 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := characters.Utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func (p *Parser) parseKey(b []byte) (reference, []byte, error) { + // key = simple-key / dotted-key + // simple-key = quoted-key / unquoted-key + // + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + // dotted-key = simple-key 1*( dot-sep simple-key ) + // + // dot-sep = ws %x2E ws ; . Period + raw, key, b, err := p.parseSimpleKey(b) + if err != nil { + return invalidReference, nil, err + } + + ref := p.builder.Push(Node{ + Kind: Key, + Raw: p.Range(raw), + Data: key, + }) + + for { + b = p.parseWhitespace(b) + if len(b) > 0 && b[0] == '.' { + b = p.parseWhitespace(b[1:]) + + raw, key, b, err = p.parseSimpleKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.PushAndChain(Node{ + Kind: Key, + Raw: p.Range(raw), + Data: key, + }) + } else { + break + } + } + + return ref, b, nil +} + +func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { + if len(b) == 0 { + return nil, nil, nil, NewParserError(b, "expected key but found none") + } + + // simple-key = quoted-key / unquoted-key + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + switch { + case b[0] == '\'': + return p.parseLiteralString(b) + case b[0] == '"': + return p.parseBasicString(b) + case isUnquotedKeyChar(b[0]): + key, rest = scanUnquotedKey(b) + return key, key, rest, nil + default: + return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) + } +} + +//nolint:funlen,cyclop +func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + // escape-seq-char = %x22 ; " quotation mark U+0022 + // escape-seq-char =/ %x5C ; \ reverse solidus U+005C + // escape-seq-char =/ %x62 ; b backspace U+0008 + // escape-seq-char =/ %x66 ; f form feed U+000C + // escape-seq-char =/ %x6E ; n line feed U+000A + // escape-seq-char =/ %x72 ; r carriage return U+000D + // escape-seq-char =/ %x74 ; t tab U+0009 + // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX + // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + token, escaped, rest, err := scanBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + startIdx := len(`"`) + endIdx := len(token) - len(`"`) + + // Fast path. If there is no escape sequence, the string should just be + // an UTF-8 encoded string, which is the same as Go. In that case, + // validate the string and return a direct reference to the buffer. + if !escaped { + str := token[startIdx:endIdx] + verr := characters.Utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + i := startIdx + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-1 { + c := token[i] + if c == '\\' { + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(token[i+1:len(token)-1], 4) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(token[i+1:len(token)-1], 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := characters.Utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func hexToRune(b []byte, length int) (rune, error) { + if len(b) < length { + return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) + } + b = b[:length] + + var r uint32 + for i, c := range b { + d := uint32(0) + switch { + case '0' <= c && c <= '9': + d = uint32(c - '0') + case 'a' <= c && c <= 'f': + d = uint32(c - 'a' + 10) + case 'A' <= c && c <= 'F': + d = uint32(c - 'A' + 10) + default: + return -1, NewParserError(b[i:i+1], "non-hex character") + } + r = r*16 + d + } + + if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { + return -1, NewParserError(b, "escape sequence is invalid Unicode code point") + } + + return rune(r), nil +} + +func (p *Parser) parseWhitespace(b []byte) []byte { + // ws = *wschar + // wschar = %x20 ; Space + // wschar =/ %x09 ; Horizontal tab + _, rest := scanWhitespace(b) + + return rest +} + +//nolint:cyclop +func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { + switch b[0] { + case 'i': + if !scanFollowsInf(b) { + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") + } + + return p.builder.Push(Node{ + Kind: Float, + Data: b[:3], + Raw: p.Range(b[:3]), + }), b[3:], nil + case 'n': + if !scanFollowsNan(b) { + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") + } + + return p.builder.Push(Node{ + Kind: Float, + Data: b[:3], + Raw: p.Range(b[:3]), + }), b[3:], nil + case '+', '-': + return p.scanIntOrFloat(b) + } + + if len(b) < 3 { + return p.scanIntOrFloat(b) + } + + s := 5 + if len(b) < s { + s = len(b) + } + + for idx, c := range b[:s] { + if isDigit(c) { + continue + } + + if idx == 2 && c == ':' || (idx == 4 && c == '-') { + return p.scanDateTime(b) + } + + break + } + + return p.scanIntOrFloat(b) +} + +func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { + // scans for contiguous characters in [0-9T:Z.+-], and up to one space if + // followed by a digit. + hasDate := false + hasTime := false + hasTz := false + seenSpace := false + + i := 0 +byteLoop: + for ; i < len(b); i++ { + c := b[i] + + switch { + case isDigit(c): + case c == '-': + hasDate = true + const minOffsetOfTz = 8 + if i >= minOffsetOfTz { + hasTz = true + } + case c == 'T' || c == 't' || c == ':' || c == '.': + hasTime = true + case c == '+' || c == '-' || c == 'Z' || c == 'z': + hasTz = true + case c == ' ': + if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { + i += 2 + // Avoid reaching past the end of the document in case the time + // is malformed. See TestIssue585. + if i >= len(b) { + i-- + } + seenSpace = true + hasTime = true + } else { + break byteLoop + } + default: + break byteLoop + } + } + + var kind Kind + + if hasTime { + if hasDate { + if hasTz { + kind = DateTime + } else { + kind = LocalDateTime + } + } else { + kind = LocalTime + } + } else { + kind = LocalDate + } + + return p.builder.Push(Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +//nolint:funlen,gocognit,cyclop +func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { + i := 0 + + if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { + var isValidRune validRuneFn + + switch b[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + i++ + } + + if isValidRune != nil { + i += 2 + for ; i < len(b); i++ { + if !isValidRune(b[i]) { + break + } + } + } + + return p.builder.Push(Node{ + Kind: Integer, + Data: b[:i], + Raw: p.Range(b[:i]), + }), b[i:], nil + } + + isFloat := false + + for ; i < len(b); i++ { + c := b[i] + + if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { + continue + } + + if c == '.' || c == 'e' || c == 'E' { + isFloat = true + + continue + } + + if c == 'i' { + if scanFollowsInf(b[i:]) { + return p.builder.Push(Node{ + Kind: Float, + Data: b[:i+3], + Raw: p.Range(b[:i+3]), + }), b[i+3:], nil + } + + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") + } + + if c == 'n' { + if scanFollowsNan(b[i:]) { + return p.builder.Push(Node{ + Kind: Float, + Data: b[:i+3], + Raw: p.Range(b[:i+3]), + }), b[i+3:], nil + } + + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") + } + + break + } + + if i == 0 { + return invalidReference, b, NewParserError(b, "incomplete number") + } + + kind := Integer + + if isFloat { + kind = Float + } + + return p.builder.Push(Node{ + Kind: kind, + Data: b[:i], + Raw: p.Range(b[:i]), + }), b[i:], nil +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} + +type validRuneFn func(r byte) bool + +func isValidHexRune(r byte) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r byte) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r byte) bool { + return r == '0' || r == '1' || r == '_' +} + +func expect(x byte, b []byte) ([]byte, error) { + if len(b) == 0 { + return nil, NewParserError(b, "expected character %c but the document ended here", x) + } + + if b[0] != x { + return nil, NewParserError(b[0:1], "expected character %c", x) + } + + return b[1:], nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go new file mode 100644 index 0000000000..0512181d28 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go @@ -0,0 +1,270 @@ +package unstable + +import "github.com/pelletier/go-toml/v2/internal/characters" + +func scanFollows(b []byte, pattern string) bool { + n := len(pattern) + + return len(b) >= n && string(b[:n]) == pattern +} + +func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { + return scanFollows(b, `"""`) +} + +func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { + return scanFollows(b, `'''`) +} + +func scanFollowsTrue(b []byte) bool { + return scanFollows(b, `true`) +} + +func scanFollowsFalse(b []byte) bool { + return scanFollows(b, `false`) +} + +func scanFollowsInf(b []byte) bool { + return scanFollows(b, `inf`) +} + +func scanFollowsNan(b []byte) bool { + return scanFollows(b, `nan`) +} + +func scanUnquotedKey(b []byte) ([]byte, []byte) { + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + for i := 0; i < len(b); i++ { + if !isUnquotedKeyChar(b[i]) { + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func isUnquotedKeyChar(r byte) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' +} + +func scanLiteralString(b []byte) ([]byte, []byte, error) { + // literal-string = apostrophe *literal-char apostrophe + // apostrophe = %x27 ; ' apostrophe + // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + for i := 1; i < len(b); { + switch b[i] { + case '\'': + return b[:i+1], b[i+1:], nil + case '\n', '\r': + return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, NewParserError(b[len(b):], "unterminated literal string") +} + +func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { + // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + // ml-literal-string-delim + // ml-literal-string-delim = 3apostrophe + // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + // + // mll-content = mll-char / newline + // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii + // mll-quotes = 1*2apostrophe + for i := 3; i < len(b); { + switch b[i] { + case '\'': + if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i < len(b) && b[i] == '\'' { + return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") + } + + return b[:i], b[i:], nil + } + case '\r': + if len(b) < i+2 { + return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) + } + i += 2 // skip the \n + continue + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) +} + +func scanWindowsNewline(b []byte) ([]byte, []byte, error) { + const lenCRLF = 2 + if len(b) < lenCRLF { + return nil, nil, NewParserError(b, "windows new line expected") + } + + if b[1] != '\n' { + return nil, nil, NewParserError(b, `windows new line should be \r\n`) + } + + return b[:lenCRLF], b[lenCRLF:], nil +} + +func scanWhitespace(b []byte) ([]byte, []byte) { + for i := 0; i < len(b); i++ { + switch b[i] { + case ' ', '\t': + continue + default: + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func scanComment(b []byte) ([]byte, []byte, error) { + // comment-start-symbol = %x23 ; # + // non-ascii = %x80-D7FF / %xE000-10FFFF + // non-eol = %x09 / %x20-7F / non-ascii + // + // comment = comment-start-symbol *non-eol + + for i := 1; i < len(b); { + if b[i] == '\n' { + return b[:i], b[i:], nil + } + if b[i] == '\r' { + if i+1 < len(b) && b[i+1] == '\n' { + return b[:i+1], b[i+1:], nil + } + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") + } + + i += size + } + + return b, b[len(b):], nil +} + +func scanBasicString(b []byte) ([]byte, bool, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + escaped := false + i := 1 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + return b[:i+1], escaped, b[i+1:], nil + case '\n', '\r': + return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") + } + escaped = true + i++ // skip the next character + } + } + + return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) +} + +func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + + escaped := false + i := 3 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + if scanFollowsMultilineBasicStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i < len(b) && b[i] == '"' { + return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) + } + + return b[:i], escaped, b[i:], nil + } + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") + } + escaped = true + i++ // skip the next character + case '\r': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) + } + i++ // skip the \n + } + } + + return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 0000000000..00cfd6de45 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/vendor/github.com/phayes/checkstyle/.scrutinizer.yml b/vendor/github.com/phayes/checkstyle/.scrutinizer.yml deleted file mode 100644 index d9284b6b4b..0000000000 --- a/vendor/github.com/phayes/checkstyle/.scrutinizer.yml +++ /dev/null @@ -1,15 +0,0 @@ -build: - dependencies: - before: - - 'source <(curl -fsSL https://raw.githubusercontent.com/phayes/go-scrutinize/master/install-golang)' - - tests: - override: - - - command: 'cd $PROJECTPATH && go-scrutinize' - coverage: - file: 'coverage.xml' - format: 'clover' - analysis: - file: 'checkstyle_report.xml' - format: 'general-checkstyle' \ No newline at end of file diff --git a/vendor/github.com/phayes/checkstyle/LICENSE b/vendor/github.com/phayes/checkstyle/LICENSE deleted file mode 100644 index 6dc912f39e..0000000000 --- a/vendor/github.com/phayes/checkstyle/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2017, Patrick D Hayes -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/phayes/checkstyle/README.md b/vendor/github.com/phayes/checkstyle/README.md deleted file mode 100644 index 358cf6752c..0000000000 --- a/vendor/github.com/phayes/checkstyle/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# checkstyle -[![GoDoc](https://godoc.org/github.com/phayes/checkstyle?status.svg)](https://godoc.org/github.com/phayes/checkstyle) -[![Go Report Card](https://goreportcard.com/badge/github.com/phayes/checkstyle)](https://goreportcard.com/report/github.com/phayes/checkstyle) -[![Build Status](https://scrutinizer-ci.com/g/phayes/checkstyle/badges/build.png?b=master)](https://scrutinizer-ci.com/g/phayes/checkstyle/build-status/master) - -Read and write checksyle_report.xml files with golang - -Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools. - -Example usage: - -```go - -import "github.com/phayes/checkstyle" - -// Print XML into human readable format -checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml") -if err != nil { - log.Fatal(err) -} -for _, file := range checkStyle.File { - fmt.Println(File.Name) - for _, codingError := range file.Error { - fmt.Println("\t", codingError.Line, codingError.Message) - } -} - -// Create a new XML file from scratch -check := checkstyle.New() - -// Ensure that a file has been added -file := check.EnsureFile("/path/to/file") - -// Create an error on line 10 -codingError := checkstyle.NewError(10, "format", "line must end with a full stop") - -// Add the error to the file -file.AddError(codingError) - -// Output XML -fmt.Print(check) -``` - -For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html diff --git a/vendor/github.com/phayes/checkstyle/checkstyle.go b/vendor/github.com/phayes/checkstyle/checkstyle.go deleted file mode 100644 index cabbd4b40e..0000000000 --- a/vendor/github.com/phayes/checkstyle/checkstyle.go +++ /dev/null @@ -1,112 +0,0 @@ -package checkstyle - -import "encoding/xml" -import "io/ioutil" - -// DefaultCheckStyleVersion defines the default "version" attribute on "" lememnt -var DefaultCheckStyleVersion = "1.0.0" - -// Severity defines a checkstyle severity code -type Severity string - -var ( - SeverityError Severity = "error" - SeverityInfo Severity = "info" - SeverityWarning Severity = "warning" - SeverityIgnore Severity = "ignore" - SeverityNone Severity -) - -// CheckStyle represents a xml element found in a checkstyle_report.xml file. -type CheckStyle struct { - XMLName xml.Name `xml:"checkstyle"` - Version string `xml:"version,attr"` - File []*File `xml:"file"` -} - -// AddFile adds a checkstyle.File with the given filename. -func (cs *CheckStyle) AddFile(csf *File) { - cs.File = append(cs.File, csf) -} - -// GetFile gets a CheckStyleFile with the given filename. -func (cs *CheckStyle) GetFile(filename string) (csf *File, ok bool) { - for _, file := range cs.File { - if file.Name == filename { - csf = file - ok = true - return - } - } - return -} - -// EnsureFile ensures that a CheckStyleFile with the given name exists -// Returns either an exiting CheckStyleFile (if a file with that name exists) -// or a new CheckStyleFile (if a file with that name does not exists) -func (cs *CheckStyle) EnsureFile(filename string) (csf *File) { - csf, ok := cs.GetFile(filename) - if !ok { - csf = NewFile(filename) - cs.AddFile(csf) - } - return csf -} - -// String implements Stringer. Returns as xml. -func (cs *CheckStyle) String() string { - checkStyleXML, err := xml.Marshal(cs) - if err != nil { - panic(err) - } - return string(checkStyleXML) -} - -// New returns a new CheckStyle -func New() *CheckStyle { - return &CheckStyle{Version: DefaultCheckStyleVersion, File: []*File{}} -} - -// File represents a xml element. -type File struct { - XMLName xml.Name `xml:"file"` - Name string `xml:"name,attr"` - Error []*Error `xml:"error"` -} - -// AddError adds a checkstyle.Error to the file. -func (csf *File) AddError(cse *Error) { - csf.Error = append(csf.Error, cse) -} - -// NewFile creates a new checkstyle.File -func NewFile(filename string) *File { - return &File{Name: filename, Error: []*Error{}} -} - -// Error represents a xml element -type Error struct { - XMLName xml.Name `xml:"error"` - Line int `xml:"line,attr"` - Column int `xml:"column,attr,omitempty"` - Severity Severity `xml:"severity,attr,omitempty"` - Message string `xml:"message,attr"` - Source string `xml:"source,attr"` -} - -// NewError creates a new checkstyle.Error -// Note that line starts at 0, and column starts at 1 -func NewError(line int, column int, severity Severity, message string, source string) *Error { - return &Error{Line: line, Column: column, Severity: severity, Message: message, Source: source} -} - -// ReadFile reads a checkfile.xml file and returns a CheckStyle object. -func ReadFile(filename string) (*CheckStyle, error) { - checkStyleXML, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - checkStyle := New() - err = xml.Unmarshal(checkStyleXML, checkStyle) - return checkStyle, err -} diff --git a/vendor/github.com/phayes/checkstyle/godoc.go b/vendor/github.com/phayes/checkstyle/godoc.go deleted file mode 100644 index c9662fe9ed..0000000000 --- a/vendor/github.com/phayes/checkstyle/godoc.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Package checkstyle allows the parsing of generation of checkstyle XML files. - -Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools. - -Example usage: - // Print XML into human readable format - checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml") - if err != nil { - log.Fatal(err) - } - for _, file := range checkStyle.File { - fmt.Println(File.Name) - for _, codingError := range file.Error { - fmt.Println("\t", codingError.Line, codingError.Message) - } - } - - // Create a new XML file from scratch - check := checkstyle.New() - - // Ensure that a file has been added - file := check.EnsureFile("/path/to/file") - - // Create an error on line 10, column 5 - codingError := checkstyle.NewError(10, 5, checkstyle.SeverityWarning, "format", "line must end with a full stop") - - // Add the error to the file - file.AddError(codingError) - - // Output XML - fmt.Print(check) - -For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html -*/ -package checkstyle diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de03e0..0000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e755..0000000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cded6..0000000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb12e..0000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade02..0000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2582..0000000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d0c7..0000000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a8348fb..0000000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go index 7fe4c38cc2..8bfb4c9b2a 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go @@ -3,6 +3,8 @@ package errorlint import ( "fmt" "go/ast" + "go/types" + "strings" ) var allowedErrors = []struct { @@ -10,8 +12,8 @@ var allowedErrors = []struct { fun string }{ // pkg/archive/tar - {err: "io.EOF", fun: "(*tar.Reader).Next"}, - {err: "io.EOF", fun: "(*tar.Reader).Read"}, + {err: "io.EOF", fun: "(*archive/tar.Reader).Next"}, + {err: "io.EOF", fun: "(*archive/tar.Reader).Read"}, // pkg/bufio {err: "io.EOF", fun: "(*bufio.Reader).Discard"}, {err: "io.EOF", fun: "(*bufio.Reader).Peek"}, @@ -34,22 +36,31 @@ var allowedErrors = []struct { {err: "io.EOF", fun: "(*bytes.Reader).ReadRune"}, {err: "io.EOF", fun: "(*bytes.Reader).ReadString"}, // pkg/database/sql - {err: "sql.ErrNoRows", fun: "(*database/sql.Row).Scan"}, + {err: "database/sql.ErrNoRows", fun: "(*database/sql.Row).Scan"}, + // pkg/debug/elf + {err: "io.EOF", fun: "debug/elf.Open"}, + {err: "io.EOF", fun: "debug/elf.NewFile"}, // pkg/io + {err: "io.EOF", fun: "(io.ReadCloser).Read"}, {err: "io.EOF", fun: "(io.Reader).Read"}, + {err: "io.EOF", fun: "(io.ReaderAt).ReadAt"}, + {err: "io.EOF", fun: "(*io.LimitedReader).Read"}, + {err: "io.EOF", fun: "(*io.SectionReader).Read"}, + {err: "io.EOF", fun: "(*io.SectionReader).ReadAt"}, {err: "io.ErrClosedPipe", fun: "(*io.PipeWriter).Write"}, {err: "io.ErrShortBuffer", fun: "io.ReadAtLeast"}, {err: "io.ErrUnexpectedEOF", fun: "io.ReadAtLeast"}, + {err: "io.EOF", fun: "io.ReadFull"}, {err: "io.ErrUnexpectedEOF", fun: "io.ReadFull"}, // pkg/net/http - {err: "http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServe"}, - {err: "http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServeTLS"}, - {err: "http.ErrServerClosed", fun: "(*net/http.Server).Serve"}, - {err: "http.ErrServerClosed", fun: "(*net/http.Server).ServeTLS"}, - {err: "http.ErrServerClosed", fun: "http.ListenAndServe"}, - {err: "http.ErrServerClosed", fun: "http.ListenAndServeTLS"}, - {err: "http.ErrServerClosed", fun: "http.Serve"}, - {err: "http.ErrServerClosed", fun: "http.ServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServe"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).Serve"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).ServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "net/http.ListenAndServe"}, + {err: "net/http.ErrServerClosed", fun: "net/http.ListenAndServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "net/http.Serve"}, + {err: "net/http.ErrServerClosed", fun: "net/http.ServeTLS"}, // pkg/os {err: "io.EOF", fun: "(*os.File).Read"}, {err: "io.EOF", fun: "(*os.File).ReadAt"}, @@ -61,9 +72,26 @@ var allowedErrors = []struct { {err: "io.EOF", fun: "(*strings.Reader).ReadAt"}, {err: "io.EOF", fun: "(*strings.Reader).ReadByte"}, {err: "io.EOF", fun: "(*strings.Reader).ReadRune"}, + // pkg/context + {err: "context.DeadlineExceeded", fun: "(context.Context).Err"}, + {err: "context.Canceled", fun: "(context.Context).Err"}, +} + +var allowedErrorWildcards = []struct { + err string + fun string +}{ + // golang.org/x/sys/unix + {err: "golang.org/x/sys/unix.E", fun: "golang.org/x/sys/unix."}, } func isAllowedErrAndFunc(err, fun string) bool { + for _, allow := range allowedErrorWildcards { + if strings.HasPrefix(fun, allow.fun) && strings.HasPrefix(err, allow.err) { + return true + } + } + for _, allow := range allowedErrors { if allow.fun == fun && allow.err == err { return true @@ -72,7 +100,7 @@ func isAllowedErrAndFunc(err, fun string) bool { return false } -func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool { +func isAllowedErrorComparison(pass *TypesInfoExt, binExpr *ast.BinaryExpr) bool { var errName string // `.`, e.g. `io.EOF` var callExprs []*ast.CallExpr @@ -83,11 +111,11 @@ func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool case *ast.SelectorExpr: // A selector which we assume refers to a staticaly declared error // in a package. - errName = selectorToString(t) + errName = selectorToString(pass, t) case *ast.Ident: // Identifier, most likely to be the `err` variable or whatever // produces it. - callExprs = assigningCallExprs(info, t) + callExprs = assigningCallExprs(pass, t, map[types.Object]bool{}) case *ast.CallExpr: callExprs = append(callExprs, t) } @@ -107,11 +135,11 @@ func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool // allowed. return false } - if sel, ok := info.Selections[functionSelector]; ok { + if sel, ok := pass.TypesInfo.Selections[functionSelector]; ok { functionNames[i] = fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) } else { // If there is no selection, assume it is a package. - functionNames[i] = selectorToString(callExpr.Fun.(*ast.SelectorExpr)) + functionNames[i] = selectorToString(pass, callExpr.Fun.(*ast.SelectorExpr)) } } @@ -126,17 +154,23 @@ func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool // assigningCallExprs finds all *ast.CallExpr nodes that are part of an // *ast.AssignStmt that assign to the subject identifier. -func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr { +func assigningCallExprs(pass *TypesInfoExt, subject *ast.Ident, visitedObjects map[types.Object]bool) []*ast.CallExpr { if subject.Obj == nil { return nil } - // Find other identifiers that reference this same object. Make sure to - // exclude the subject identifier as it will cause an infinite recursion - // and is being used in a read operation anyway. - sobj := info.ObjectOf(subject) + // Find other identifiers that reference this same object. + sobj := pass.TypesInfo.ObjectOf(subject) + + if visitedObjects[sobj] { + return nil + } + visitedObjects[sobj] = true + + // Make sure to exclude the subject identifier as it will cause an infinite recursion and is + // being used in a read operation anyway. identifiers := []*ast.Ident{} - for _, ident := range info.IdentifiersForObject[sobj] { + for _, ident := range pass.IdentifiersForObject[sobj] { if subject.Pos() != ident.Pos() { identifiers = append(identifiers, ident) } @@ -145,7 +179,7 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr // Find out whether the identifiers are part of an assignment statement. var callExprs []*ast.CallExpr for _, ident := range identifiers { - parent := info.NodeParent[ident] + parent := pass.NodeParent[ident] switch declT := parent.(type) { case *ast.AssignStmt: // The identifier is LHS of an assignment. @@ -153,10 +187,10 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr assigningExpr := assignment.Rhs[0] // If the assignment is comprised of multiple expressions, find out - // which LHS expression we should use by finding its index in the LHS. - if len(assignment.Rhs) > 1 { + // which RHS expression we should use by finding its index in the LHS. + if len(assignment.Lhs) == len(assignment.Rhs) { for i, lhs := range assignment.Lhs { - if subject.Name == lhs.(*ast.Ident).Name { + if ident, ok := lhs.(*ast.Ident); ok && subject.Name == ident.Name { assigningExpr = assignment.Rhs[i] break } @@ -173,7 +207,7 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr continue } // The subject was the result of assigning from another identifier. - callExprs = append(callExprs, assigningCallExprs(info, assignT)...) + callExprs = append(callExprs, assigningCallExprs(pass, assignT, visitedObjects)...) default: // TODO: inconclusive? } @@ -182,9 +216,7 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr return callExprs } -func selectorToString(selExpr *ast.SelectorExpr) string { - if ident, ok := selExpr.X.(*ast.Ident); ok { - return ident.Name + "." + selExpr.Sel.Name - } - return "" +func selectorToString(pass *TypesInfoExt, selExpr *ast.SelectorExpr) string { + o := pass.TypesInfo.Uses[selExpr.Sel] + return fmt.Sprintf("%s.%s", o.Pkg().Path(), o.Name()) } diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go index ab02136f47..f034913ea3 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go @@ -19,43 +19,45 @@ func NewAnalyzer() *analysis.Analyzer { } var ( - flagSet flag.FlagSet - checkComparison bool - checkAsserts bool - checkErrorf bool + flagSet flag.FlagSet + checkComparison bool + checkAsserts bool + checkErrorf bool + checkErrorfMulti bool ) func init() { flagSet.BoolVar(&checkComparison, "comparison", true, "Check for plain error comparisons") flagSet.BoolVar(&checkAsserts, "asserts", true, "Check for plain type assertions and type switches") flagSet.BoolVar(&checkErrorf, "errorf", false, "Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats") + flagSet.BoolVar(&checkErrorfMulti, "errorf-multi", true, "Permit more than 1 %w verb, valid per Go 1.20 (Requires -errorf=true)") } func run(pass *analysis.Pass) (interface{}, error) { - lints := []Lint{} - extInfo := newTypesInfoExt(pass.TypesInfo) + lints := []analysis.Diagnostic{} + extInfo := newTypesInfoExt(pass) if checkComparison { - l := LintErrorComparisons(pass.Fset, extInfo) + l := LintErrorComparisons(extInfo) lints = append(lints, l...) } if checkAsserts { - l := LintErrorTypeAssertions(pass.Fset, *pass.TypesInfo) + l := LintErrorTypeAssertions(pass.Fset, extInfo) lints = append(lints, l...) } if checkErrorf { - l := LintFmtErrorfCalls(pass.Fset, *pass.TypesInfo) + l := LintFmtErrorfCalls(pass.Fset, *pass.TypesInfo, checkErrorfMulti) lints = append(lints, l...) } sort.Sort(ByPosition(lints)) for _, l := range lints { - pass.Report(analysis.Diagnostic{Pos: l.Pos, Message: l.Message}) + pass.Report(l) } return nil, nil } type TypesInfoExt struct { - types.Info + *analysis.Pass // Maps AST nodes back to the node they are contained within. NodeParent map[ast.Node]ast.Node @@ -64,9 +66,9 @@ type TypesInfoExt struct { IdentifiersForObject map[types.Object][]*ast.Ident } -func newTypesInfoExt(info *types.Info) *TypesInfoExt { +func newTypesInfoExt(pass *analysis.Pass) *TypesInfoExt { nodeParent := map[ast.Node]ast.Node{} - for node := range info.Scopes { + for node := range pass.TypesInfo.Scopes { file, ok := node.(*ast.File) if !ok { continue @@ -84,15 +86,15 @@ func newTypesInfoExt(info *types.Info) *TypesInfoExt { } identifiersForObject := map[types.Object][]*ast.Ident{} - for node, obj := range info.Defs { + for node, obj := range pass.TypesInfo.Defs { identifiersForObject[obj] = append(identifiersForObject[obj], node) } - for node, obj := range info.Uses { + for node, obj := range pass.TypesInfo.Uses { identifiersForObject[obj] = append(identifiersForObject[obj], node) } return &TypesInfoExt{ - Info: *info, + Pass: pass, NodeParent: nodeParent, IdentifiersForObject: identifiersForObject, } diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go index fb065ced13..572a3816d8 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go @@ -6,14 +6,11 @@ import ( "go/constant" "go/token" "go/types" -) -type Lint struct { - Message string - Pos token.Pos -} + "golang.org/x/tools/go/analysis" +) -type ByPosition []Lint +type ByPosition []analysis.Diagnostic func (l ByPosition) Len() int { return len(l) } func (l ByPosition) Swap(i, j int) { l[i], l[j] = l[j], l[i] } @@ -22,8 +19,8 @@ func (l ByPosition) Less(i, j int) bool { return l[i].Pos < l[j].Pos } -func LintFmtErrorfCalls(fset *token.FileSet, info types.Info) []Lint { - lints := []Lint{} +func LintFmtErrorfCalls(fset *token.FileSet, info types.Info, multipleWraps bool) []analysis.Diagnostic { + lints := []analysis.Diagnostic{} for expr, t := range info.Types { // Search for error expressions that are the result of fmt.Errorf // invocations. @@ -41,53 +38,88 @@ func LintFmtErrorfCalls(fset *token.FileSet, info types.Info) []Lint { continue } - // For any arguments that are errors, check whether the wrapping verb - // is used. Only one %w verb may be used in a single format string at a - // time, so we stop after finding a correct %w. - var lintArg ast.Expr + // For any arguments that are errors, check whether the wrapping verb is used. %w may occur + // for multiple errors in one Errorf invocation, unless multipleWraps is true. We raise an + // issue if at least one error does not have a corresponding wrapping verb. args := call.Args[1:] - for i := 0; i < len(args) && i < len(formatVerbs); i++ { - if !implementsError(info.Types[args[i]].Type) && !isErrorStringCall(info, args[i]) { - continue - } + if !multipleWraps { + wrapCount := 0 + for i := 0; i < len(args) && i < len(formatVerbs); i++ { + arg := args[i] + if !implementsError(info.Types[arg].Type) { + continue + } + verb := formatVerbs[i] + + if verb.format == "w" { + wrapCount++ + if wrapCount > 1 { + lints = append(lints, analysis.Diagnostic{ + Message: "only one %w verb is permitted per format string", + Pos: arg.Pos(), + }) + break + } + } - if formatVerbs[i] == "w" { - lintArg = nil - break + if wrapCount == 0 { + lints = append(lints, analysis.Diagnostic{ + Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", + Pos: args[i].Pos(), + }) + break + } } - if lintArg == nil { - lintArg = args[i] - } - } - if lintArg != nil { - lints = append(lints, Lint{ - Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", - Pos: lintArg.Pos(), - }) - } - } - return lints -} + } else { + var lint *analysis.Diagnostic + argIndex := 0 + for _, verb := range formatVerbs { + if verb.index != -1 { + argIndex = verb.index + } else { + argIndex++ + } -// isErrorStringCall tests whether the expression is a string expression that -// is the result of an `(error).Error()` method call. -func isErrorStringCall(info types.Info, expr ast.Expr) bool { - if info.Types[expr].Type.String() == "string" { - if call, ok := expr.(*ast.CallExpr); ok { - if callSel, ok := call.Fun.(*ast.SelectorExpr); ok { - fun := info.Uses[callSel.Sel].(*types.Func) - return fun.Type().String() == "func() string" && fun.Name() == "Error" + if verb.format == "w" { + continue + } + if argIndex-1 >= len(args) { + continue + } + arg := args[argIndex-1] + if !implementsError(info.Types[arg].Type) { + continue + } + + strStart := call.Args[0].Pos() + if lint == nil { + lint = &analysis.Diagnostic{ + Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", + Pos: arg.Pos(), + } + } + lint.SuggestedFixes = append(lint.SuggestedFixes, analysis.SuggestedFix{ + Message: "Use `%w` to format errors", + TextEdits: []analysis.TextEdit{{ + Pos: strStart + token.Pos(verb.formatOffset) + 1, + End: strStart + token.Pos(verb.formatOffset) + 2, + NewText: []byte("w"), + }}, + }) + } + if lint != nil { + lints = append(lints, *lint) } } } - return false + return lints } // printfFormatStringVerbs returns a normalized list of all the verbs that are used per argument to -// the printf function. The index of each returned element corresponds to index of the respective -// argument. -func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]string, bool) { +// the printf function. The index of each returned element corresponds to the index of the +// respective argument. +func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]verb, bool) { if len(call.Args) <= 1 { return nil, false } @@ -103,18 +135,8 @@ func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]string, boo if err != nil { return nil, false } - orderedVerbs := verbOrder(verbs, len(call.Args)-1) - - resolvedVerbs := make([]string, len(orderedVerbs)) - for i, vv := range orderedVerbs { - for _, v := range vv { - resolvedVerbs[i] = v.format - if v.format == "w" { - break - } - } - } - return resolvedVerbs, true + + return verbs, true } func isFmtErrorfCallExpr(info types.Info, expr ast.Expr) (*ast.CallExpr, bool) { @@ -136,10 +158,10 @@ func isFmtErrorfCallExpr(info types.Info, expr ast.Expr) (*ast.CallExpr, bool) { return nil, false } -func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { - lints := []Lint{} +func LintErrorComparisons(info *TypesInfoExt) []analysis.Diagnostic { + lints := []analysis.Diagnostic{} - for expr := range info.Types { + for expr := range info.TypesInfo.Types { // Find == and != operations. binExpr, ok := expr.(*ast.BinaryExpr) if !ok { @@ -153,7 +175,7 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { continue } // Find comparisons of which one side is a of type error. - if !isErrorComparison(info.Info, binExpr) { + if !isErrorComparison(info.TypesInfo, binExpr) { continue } // Some errors that are returned from some functions are exempt. @@ -165,13 +187,13 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { continue } - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: fmt.Sprintf("comparing with %s will fail on wrapped errors. Use errors.Is to check for a specific error", binExpr.Op), Pos: binExpr.Pos(), }) } - for scope := range info.Scopes { + for scope := range info.TypesInfo.Scopes { // Find value switch blocks. switchStmt, ok := scope.(*ast.SwitchStmt) if !ok { @@ -181,7 +203,7 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { if switchStmt.Tag == nil { continue } - tagType := info.Types[switchStmt.Tag] + tagType := info.TypesInfo.Types[switchStmt.Tag] if tagType.Type.String() != "error" { continue } @@ -190,7 +212,7 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { } if switchComparesNonNil(switchStmt) { - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: "switch on an error will fail on wrapped errors. Use errors.Is to check for specific errors", Pos: switchStmt.Pos(), }) @@ -211,7 +233,7 @@ func isNilComparison(binExpr *ast.BinaryExpr) bool { return false } -func isErrorComparison(info types.Info, binExpr *ast.BinaryExpr) bool { +func isErrorComparison(info *types.Info, binExpr *ast.BinaryExpr) bool { tx := info.Types[binExpr.X] ty := info.Types[binExpr.Y] return tx.Type.String() == "error" || ty.Type.String() == "error" @@ -230,11 +252,11 @@ func isNodeInErrorIsFunc(info *TypesInfoExt, node ast.Node) bool { return false } // There should be 1 argument of type error. - if ii := funcDecl.Type.Params.List; len(ii) != 1 || info.Types[ii[0].Type].Type.String() != "error" { + if ii := funcDecl.Type.Params.List; len(ii) != 1 || info.TypesInfo.Types[ii[0].Type].Type.String() != "error" { return false } // The return type should be bool. - if ii := funcDecl.Type.Results.List; len(ii) != 1 || info.Types[ii[0].Type].Type.String() != "bool" { + if ii := funcDecl.Type.Results.List; len(ii) != 1 || info.TypesInfo.Types[ii[0].Type].Type.String() != "bool" { return false } @@ -266,10 +288,10 @@ func switchComparesNonNil(switchStmt *ast.SwitchStmt) bool { return false } -func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { - lints := []Lint{} +func LintErrorTypeAssertions(fset *token.FileSet, info *TypesInfoExt) []analysis.Diagnostic { + lints := []analysis.Diagnostic{} - for expr := range info.Types { + for expr := range info.TypesInfo.Types { // Find type assertions. typeAssert, ok := expr.(*ast.TypeAssertExpr) if !ok { @@ -277,17 +299,26 @@ func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { } // Find type assertions that operate on values of type error. - if !isErrorTypeAssertion(info, typeAssert) { + if !isErrorTypeAssertion(*info.TypesInfo, typeAssert) { + continue + } + + if isNodeInErrorIsFunc(info, typeAssert) { + continue + } + + // If the asserted type is not an error, allow the expression. + if !implementsError(info.TypesInfo.Types[typeAssert.Type].Type) { continue } - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: "type assertion on error will fail on wrapped errors. Use errors.As to check for specific errors", Pos: typeAssert.Pos(), }) } - for scope := range info.Scopes { + for scope := range info.TypesInfo.Scopes { // Find type switches. typeSwitch, ok := scope.(*ast.TypeSwitchStmt) if !ok { @@ -304,11 +335,15 @@ func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { } // Check whether the type switch is on a value of type error. - if !isErrorTypeAssertion(info, typeAssert) { + if !isErrorTypeAssertion(*info.TypesInfo, typeAssert) { + continue + } + + if isNodeInErrorIsFunc(info, typeSwitch) { continue } - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors", Pos: typeAssert.Pos(), }) diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go index d9a935ff2b..4c0e12525d 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go @@ -7,30 +7,15 @@ import ( "strings" ) -func verbOrder(verbs []verb, numArgs int) [][]verb { - orderedVerbs := make([][]verb, numArgs) - i := 0 - for _, v := range verbs { - if v.index != -1 { - i = v.index - 1 - } - if i >= len(orderedVerbs) { - continue - } - orderedVerbs[i] = append(orderedVerbs[i], v) - verbs = verbs[1:] - i++ - } - return orderedVerbs -} - type verb struct { - format string - index int + format string + formatOffset int + index int } type printfParser struct { str string + at int } func (pp *printfParser) ParseAllVerbs() ([]verb, error) { @@ -80,7 +65,7 @@ func (pp *printfParser) parseVerb() (*verb, error) { format := pp.next() - return &verb{format: string(format), index: index}, nil + return &verb{format: string(format), formatOffset: pp.at - 1, index: index}, nil } func (pp *printfParser) parseIndex() (int, error) { @@ -96,6 +81,7 @@ func (pp *printfParser) parseIndex() (int, error) { return -1, err } pp.str = pp.str[end+1:] + pp.at += end + 1 return index, nil } @@ -114,6 +100,7 @@ func (pp *printfParser) skipToPercent() error { return io.EOF } pp.str = pp.str[i:] + pp.at += i return nil } @@ -130,5 +117,6 @@ func (pp *printfParser) next() rune { } r := rune(pp.str[0]) pp.str = pp.str[1:] + pp.at++ return r } diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore deleted file mode 100644 index 293955f99a..0000000000 --- a/vendor/github.com/posener/complete/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea -coverage.txt -gocomplete/gocomplete -example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml deleted file mode 100644 index 6ba8d865b1..0000000000 --- a/vendor/github.com/posener/complete/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go: - - tip - - 1.12.x - - 1.11.x - - 1.10.x - -script: - - go test -race -coverprofile=coverage.txt -covermode=atomic ./... - -after_success: - - bash <(curl -s https://codecov.io/bash) - -matrix: - allow_failures: - - go: tip \ No newline at end of file diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt deleted file mode 100644 index 16249b4a1e..0000000000 --- a/vendor/github.com/posener/complete/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2017 Eyal Posener - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/README.md b/vendor/github.com/posener/complete/README.md deleted file mode 100644 index dcc6c89324..0000000000 --- a/vendor/github.com/posener/complete/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# complete - -[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) -[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) -[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) -[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) -[![goreadme](https://goreadme.herokuapp.com/badge/posener/complete.svg)](https://goreadme.herokuapp.com) - -Package complete provides a tool for bash writing bash completion in go, and bash completion for the go command line. - -Writing bash completion scripts is a hard work. This package provides an easy way -to create bash completion scripts for any command, and also an easy way to install/uninstall -the completion of the command. - -#### Go Command Bash Completion - -In [./cmd/gocomplete](./cmd/gocomplete) there is an example for bash completion for the `go` command line. - -This is an example that uses the `complete` package on the `go` command - the `complete` package -can also be used to implement any completions, see #usage. - -#### Install - -1. Type in your shell: - -```go -go get -u github.com/posener/complete/gocomplete -gocomplete -install -``` - -2. Restart your shell - -Uninstall by `gocomplete -uninstall` - -#### Features - -- Complete `go` command, including sub commands and all flags. -- Complete packages names or `.go` files when necessary. -- Complete test names after `-run` flag. - -#### Complete package - -Supported shells: - -- [x] bash -- [x] zsh -- [x] fish - -#### Usage - -Assuming you have program called `run` and you want to have bash completion -for it, meaning, if you type `run` then space, then press the `Tab` key, -the shell will suggest relevant complete options. - -In that case, we will create a program called `runcomplete`, a go program, -with a `func main()` and so, that will make the completion of the `run` -program. Once the `runcomplete` will be in a binary form, we could -`runcomplete -install` and that will add to our shell all the bash completion -options for `run`. - -So here it is: - -```go -import "github.com/posener/complete" - -func main() { - - // create a Command object, that represents the command we want - // to complete. - run := complete.Command{ - - // Sub defines a list of sub commands of the program, - // this is recursive, since every command is of type command also. - Sub: complete.Commands{ - - // add a build sub command - "build": complete.Command { - - // define flags of the build sub command - Flags: complete.Flags{ - // build sub command has a flag '-cpus', which - // expects number of cpus after it. in that case - // anything could complete this flag. - "-cpus": complete.PredictAnything, - }, - }, - }, - - // define flags of the 'run' main command - Flags: complete.Flags{ - // a flag -o, which expects a file ending with .out after - // it, the tab completion will auto complete for files matching - // the given pattern. - "-o": complete.PredictFiles("*.out"), - }, - - // define global flags of the 'run' main command - // those will show up also when a sub command was entered in the - // command line - GlobalFlags: complete.Flags{ - - // a flag '-h' which does not expects anything after it - "-h": complete.PredictNothing, - }, - } - - // run the command completion, as part of the main() function. - // this triggers the autocompletion when needed. - // name must be exactly as the binary that we want to complete. - complete.New("run", run).Run() -} -``` - -#### Self completing program - -In case that the program that we want to complete is written in go we -can make it self completing. -Here is an example: [./example/self/main.go](./example/self/main.go) . - -## Sub Packages - -* [cmd](./cmd): Package cmd used for command line options for the complete tool - -* [gocomplete](./gocomplete): Package main is complete tool for the go command line - -* [match](./match): Package match contains matchers that decide if to apply completion. - - ---- - -Created by [goreadme](https://github.com/apps/goreadme) diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go deleted file mode 100644 index 3340285e1c..0000000000 --- a/vendor/github.com/posener/complete/args.go +++ /dev/null @@ -1,114 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" - "strings" - "unicode" -) - -// Args describes command line arguments -type Args struct { - // All lists of all arguments in command line (not including the command itself) - All []string - // Completed lists of all completed arguments in command line, - // If the last one is still being typed - no space after it, - // it won't appear in this list of arguments. - Completed []string - // Last argument in command line, the one being typed, if the last - // character in the command line is a space, this argument will be empty, - // otherwise this would be the last word. - Last string - // LastCompleted is the last argument that was fully typed. - // If the last character in the command line is space, this would be the - // last word, otherwise, it would be the word before that. - LastCompleted string -} - -// Directory gives the directory of the current written -// last argument if it represents a file name being written. -// in case that it is not, we fall back to the current directory. -// -// Deprecated. -func (a Args) Directory() string { - if info, err := os.Stat(a.Last); err == nil && info.IsDir() { - return fixPathForm(a.Last, a.Last) - } - dir := filepath.Dir(a.Last) - if info, err := os.Stat(dir); err != nil || !info.IsDir() { - return "./" - } - return fixPathForm(a.Last, dir) -} - -func newArgs(line string) Args { - var ( - all []string - completed []string - ) - parts := splitFields(line) - if len(parts) > 0 { - all = parts[1:] - completed = removeLast(parts[1:]) - } - return Args{ - All: all, - Completed: completed, - Last: last(parts), - LastCompleted: last(completed), - } -} - -// splitFields returns a list of fields from the given command line. -// If the last character is space, it appends an empty field in the end -// indicating that the field before it was completed. -// If the last field is of the form "a=b", it splits it to two fields: "a", "b", -// So it can be completed. -func splitFields(line string) []string { - parts := strings.Fields(line) - - // Add empty field if the last field was completed. - if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { - parts = append(parts, "") - } - - // Treat the last field if it is of the form "a=b" - parts = splitLastEqual(parts) - return parts -} - -func splitLastEqual(line []string) []string { - if len(line) == 0 { - return line - } - parts := strings.Split(line[len(line)-1], "=") - return append(line[:len(line)-1], parts...) -} - -// from returns a copy of Args of all arguments after the i'th argument. -func (a Args) from(i int) Args { - if i >= len(a.All) { - i = len(a.All) - 1 - } - a.All = a.All[i+1:] - - if i >= len(a.Completed) { - i = len(a.Completed) - 1 - } - a.Completed = a.Completed[i+1:] - return a -} - -func removeLast(a []string) []string { - if len(a) > 0 { - return a[:len(a)-1] - } - return a -} - -func last(args []string) string { - if len(args) == 0 { - return "" - } - return args[len(args)-1] -} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go deleted file mode 100644 index b99fe52901..0000000000 --- a/vendor/github.com/posener/complete/cmd/cmd.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package cmd used for command line options for the complete tool -package cmd - -import ( - "errors" - "flag" - "fmt" - "os" - "strings" - - "github.com/posener/complete/cmd/install" -) - -// CLI for command line -type CLI struct { - Name string - InstallName string - UninstallName string - - install bool - uninstall bool - yes bool -} - -const ( - defaultInstallName = "install" - defaultUninstallName = "uninstall" -) - -// Run is used when running complete in command line mode. -// this is used when the complete is not completing words, but to -// install it or uninstall it. -func (f *CLI) Run() bool { - err := f.validate() - if err != nil { - os.Stderr.WriteString(err.Error() + "\n") - os.Exit(1) - } - - switch { - case f.install: - f.prompt() - err = install.Install(f.Name) - case f.uninstall: - f.prompt() - err = install.Uninstall(f.Name) - default: - // non of the action flags matched, - // returning false should make the real program execute - return false - } - - if err != nil { - fmt.Printf("%s failed! %s\n", f.action(), err) - os.Exit(3) - } - fmt.Println("Done!") - return true -} - -// prompt use for approval -// exit if approval was not given -func (f *CLI) prompt() { - defer fmt.Println(f.action() + "ing...") - if f.yes { - return - } - fmt.Printf("%s completion for %s? ", f.action(), f.Name) - var answer string - fmt.Scanln(&answer) - - switch strings.ToLower(answer) { - case "y", "yes": - return - default: - fmt.Println("Cancelling...") - os.Exit(1) - } -} - -// AddFlags adds the CLI flags to the flag set. -// If flags is nil, the default command line flags will be taken. -// Pass non-empty strings as installName and uninstallName to override the default -// flag names. -func (f *CLI) AddFlags(flags *flag.FlagSet) { - if flags == nil { - flags = flag.CommandLine - } - - if f.InstallName == "" { - f.InstallName = defaultInstallName - } - if f.UninstallName == "" { - f.UninstallName = defaultUninstallName - } - - if flags.Lookup(f.InstallName) == nil { - flags.BoolVar(&f.install, f.InstallName, false, - fmt.Sprintf("Install completion for %s command", f.Name)) - } - if flags.Lookup(f.UninstallName) == nil { - flags.BoolVar(&f.uninstall, f.UninstallName, false, - fmt.Sprintf("Uninstall completion for %s command", f.Name)) - } - if flags.Lookup("y") == nil { - flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") - } -} - -// validate the CLI -func (f *CLI) validate() error { - if f.install && f.uninstall { - return errors.New("Install and uninstall are mutually exclusive") - } - return nil -} - -// action name according to the CLI values. -func (f *CLI) action() string { - switch { - case f.install: - return "Install" - case f.uninstall: - return "Uninstall" - default: - return "unknown" - } -} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go deleted file mode 100644 index 17c64de136..0000000000 --- a/vendor/github.com/posener/complete/cmd/install/bash.go +++ /dev/null @@ -1,37 +0,0 @@ -package install - -import "fmt" - -// (un)install in bash -// basically adds/remove from .bashrc: -// -// complete -C -type bash struct { - rc string -} - -func (b bash) IsInstalled(cmd, bin string) bool { - completeCmd := b.cmd(cmd, bin) - return lineInFile(b.rc, completeCmd) -} - -func (b bash) Install(cmd, bin string) error { - if b.IsInstalled(cmd, bin) { - return fmt.Errorf("already installed in %s", b.rc) - } - completeCmd := b.cmd(cmd, bin) - return appendToFile(b.rc, completeCmd) -} - -func (b bash) Uninstall(cmd, bin string) error { - if !b.IsInstalled(cmd, bin) { - return fmt.Errorf("does not installed in %s", b.rc) - } - - completeCmd := b.cmd(cmd, bin) - return removeFromFile(b.rc, completeCmd) -} - -func (bash) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go deleted file mode 100644 index 2b64bfc832..0000000000 --- a/vendor/github.com/posener/complete/cmd/install/fish.go +++ /dev/null @@ -1,69 +0,0 @@ -package install - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "text/template" -) - -// (un)install in fish - -type fish struct { - configDir string -} - -func (f fish) IsInstalled(cmd, bin string) bool { - completionFile := f.getCompletionFilePath(cmd) - if _, err := os.Stat(completionFile); err == nil { - return true - } - return false -} - -func (f fish) Install(cmd, bin string) error { - if f.IsInstalled(cmd, bin) { - return fmt.Errorf("already installed at %s", f.getCompletionFilePath(cmd)) - } - - completionFile := f.getCompletionFilePath(cmd) - completeCmd, err := f.cmd(cmd, bin) - if err != nil { - return err - } - - return createFile(completionFile, completeCmd) -} - -func (f fish) Uninstall(cmd, bin string) error { - if !f.IsInstalled(cmd, bin) { - return fmt.Errorf("does not installed in %s", f.configDir) - } - - completionFile := f.getCompletionFilePath(cmd) - return os.Remove(completionFile) -} - -func (f fish) getCompletionFilePath(cmd string) string { - return filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) -} - -func (f fish) cmd(cmd, bin string) (string, error) { - var buf bytes.Buffer - params := struct{ Cmd, Bin string }{cmd, bin} - tmpl := template.Must(template.New("cmd").Parse(` -function __complete_{{.Cmd}} - set -lx COMP_LINE (commandline -cp) - test -z (commandline -ct) - and set COMP_LINE "$COMP_LINE " - {{.Bin}} -end -complete -f -c {{.Cmd}} -a "(__complete_{{.Cmd}})" -`)) - err := tmpl.Execute(&buf, params) - if err != nil { - return "", err - } - return buf.String(), nil -} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go deleted file mode 100644 index 884c23f5b4..0000000000 --- a/vendor/github.com/posener/complete/cmd/install/install.go +++ /dev/null @@ -1,148 +0,0 @@ -package install - -import ( - "errors" - "os" - "os/user" - "path/filepath" - "runtime" - - "github.com/hashicorp/go-multierror" -) - -type installer interface { - IsInstalled(cmd, bin string) bool - Install(cmd, bin string) error - Uninstall(cmd, bin string) error -} - -// Install complete command given: -// cmd: is the command name -func Install(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not find any shells to install") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Install(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -// IsInstalled returns true if the completion -// for the given cmd is installed. -func IsInstalled(cmd string) bool { - bin, err := getBinaryPath() - if err != nil { - return false - } - - for _, i := range installers() { - installed := i.IsInstalled(cmd, bin) - if installed { - return true - } - } - - return false -} - -// Uninstall complete command given: -// cmd: is the command name -func Uninstall(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not find any shells to uninstall") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Uninstall(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -func installers() (i []installer) { - // The list of bash config files candidates where it is - // possible to install the completion command. - var bashConfFiles []string - switch runtime.GOOS { - case "darwin": - bashConfFiles = []string{".bash_profile"} - default: - bashConfFiles = []string{".bashrc", ".bash_profile", ".bash_login", ".profile"} - } - for _, rc := range bashConfFiles { - if f := rcFile(rc); f != "" { - i = append(i, bash{f}) - break - } - } - if f := rcFile(".zshrc"); f != "" { - i = append(i, zsh{f}) - } - if d := fishConfigDir(); d != "" { - i = append(i, fish{d}) - } - return -} - -func fishConfigDir() string { - configDir := filepath.Join(getConfigHomePath(), "fish") - if configDir == "" { - return "" - } - if info, err := os.Stat(configDir); err != nil || !info.IsDir() { - return "" - } - return configDir -} - -func getConfigHomePath() string { - u, err := user.Current() - if err != nil { - return "" - } - - configHome := os.Getenv("XDG_CONFIG_HOME") - if configHome == "" { - return filepath.Join(u.HomeDir, ".config") - } - return configHome -} - -func getBinaryPath() (string, error) { - bin, err := os.Executable() - if err != nil { - return "", err - } - return filepath.Abs(bin) -} - -func rcFile(name string) string { - u, err := user.Current() - if err != nil { - return "" - } - path := filepath.Join(u.HomeDir, name) - if _, err := os.Stat(path); err != nil { - return "" - } - return path -} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go deleted file mode 100644 index d34ac8cae8..0000000000 --- a/vendor/github.com/posener/complete/cmd/install/utils.go +++ /dev/null @@ -1,140 +0,0 @@ -package install - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -func lineInFile(name string, lookFor string) bool { - f, err := os.Open(name) - if err != nil { - return false - } - defer f.Close() - r := bufio.NewReader(f) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - return false - } - if err != nil { - return false - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - if string(line) == lookFor { - return true - } - prefix = prefix[:0] - } -} - -func createFile(name string, content string) error { - // make sure file directory exists - if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { - return err - } - - // create the file - f, err := os.Create(name) - if err != nil { - return err - } - defer f.Close() - - // write file content - _, err = f.WriteString(fmt.Sprintf("%s\n", content)) - return err -} - -func appendToFile(name string, content string) error { - f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) - if err != nil { - return err - } - defer f.Close() - _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) - return err -} - -func removeFromFile(name string, content string) error { - backup := name + ".bck" - err := copyFile(name, backup) - if err != nil { - return err - } - temp, err := removeContentToTempFile(name, content) - if err != nil { - return err - } - - err = copyFile(temp, name) - if err != nil { - return err - } - - return os.Remove(backup) -} - -func removeContentToTempFile(name, content string) (string, error) { - rf, err := os.Open(name) - if err != nil { - return "", err - } - defer rf.Close() - wf, err := ioutil.TempFile("/tmp", "complete-") - if err != nil { - return "", err - } - defer wf.Close() - - r := bufio.NewReader(rf) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - break - } - if err != nil { - return "", err - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - str := string(line) - if str == content { - continue - } - _, err = wf.WriteString(str + "\n") - if err != nil { - return "", err - } - prefix = prefix[:0] - } - return wf.Name(), nil -} - -func copyFile(src string, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return err - } - defer out.Close() - _, err = io.Copy(out, in) - return err -} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go deleted file mode 100644 index 29950ab171..0000000000 --- a/vendor/github.com/posener/complete/cmd/install/zsh.go +++ /dev/null @@ -1,44 +0,0 @@ -package install - -import "fmt" - -// (un)install in zsh -// basically adds/remove from .zshrc: -// -// autoload -U +X bashcompinit && bashcompinit" -// complete -C
-type zsh struct { - rc string -} - -func (z zsh) IsInstalled(cmd, bin string) bool { - completeCmd := z.cmd(cmd, bin) - return lineInFile(z.rc, completeCmd) -} - -func (z zsh) Install(cmd, bin string) error { - if z.IsInstalled(cmd, bin) { - return fmt.Errorf("already installed in %s", z.rc) - } - - completeCmd := z.cmd(cmd, bin) - bashCompInit := "autoload -U +X bashcompinit && bashcompinit" - if !lineInFile(z.rc, bashCompInit) { - completeCmd = bashCompInit + "\n" + completeCmd - } - - return appendToFile(z.rc, completeCmd) -} - -func (z zsh) Uninstall(cmd, bin string) error { - if !z.IsInstalled(cmd, bin) { - return fmt.Errorf("does not installed in %s", z.rc) - } - - completeCmd := z.cmd(cmd, bin) - return removeFromFile(z.rc, completeCmd) -} - -func (zsh) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go deleted file mode 100644 index 82d37d529b..0000000000 --- a/vendor/github.com/posener/complete/command.go +++ /dev/null @@ -1,111 +0,0 @@ -package complete - -// Command represents a command line -// It holds the data that enables auto completion of command line -// Command can also be a sub command. -type Command struct { - // Sub is map of sub commands of the current command - // The key refer to the sub command name, and the value is it's - // Command descriptive struct. - Sub Commands - - // Flags is a map of flags that the command accepts. - // The key is the flag name, and the value is it's predictions. - Flags Flags - - // GlobalFlags is a map of flags that the command accepts. - // Global flags that can appear also after a sub command. - GlobalFlags Flags - - // Args are extra arguments that the command accepts, those who are - // given without any flag before. - Args Predictor -} - -// Predict returns all possible predictions for args according to the command struct -func (c *Command) Predict(a Args) []string { - options, _ := c.predict(a) - return options -} - -// Commands is the type of Sub member, it maps a command name to a command struct -type Commands map[string]Command - -// Predict completion of sub command names names according to command line arguments -func (c Commands) Predict(a Args) (prediction []string) { - for sub := range c { - prediction = append(prediction, sub) - } - return -} - -// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. -type Flags map[string]Predictor - -// Predict completion of flags names according to command line arguments -func (f Flags) Predict(a Args) (prediction []string) { - for flag := range f { - // If the flag starts with a hyphen, we avoid emitting the prediction - // unless the last typed arg contains a hyphen as well. - flagHyphenStart := len(flag) != 0 && flag[0] == '-' - lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' - if flagHyphenStart && !lastHyphenStart { - continue - } - prediction = append(prediction, flag) - } - return -} - -// predict options -// only is set to true if no more options are allowed to be returned -// those are in cases of special flag that has specific completion arguments, -// and other flags or sub commands can't come after it. -func (c *Command) predict(a Args) (options []string, only bool) { - - // search sub commands for predictions first - subCommandFound := false - for i, arg := range a.Completed { - if cmd, ok := c.Sub[arg]; ok { - subCommandFound = true - - // recursive call for sub command - options, only = cmd.predict(a.from(i)) - if only { - return - } - - // We matched so stop searching. Continuing to search can accidentally - // match a subcommand with current set of commands, see issue #46. - break - } - } - - // if last completed word is a global flag that we need to complete - if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to global flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.GlobalFlags.Predict(a)...) - - // if a sub command was entered, we won't add the parent command - // completions and we return here. - if subCommandFound { - return - } - - // if last completed word is a command flag that we need to complete - if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.Sub.Predict(a)...) - options = append(options, c.Flags.Predict(a)...) - if c.Args != nil { - options = append(options, c.Args.Predict(a)...) - } - - return -} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go deleted file mode 100644 index 423cbec6c1..0000000000 --- a/vendor/github.com/posener/complete/complete.go +++ /dev/null @@ -1,104 +0,0 @@ -package complete - -import ( - "flag" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/posener/complete/cmd" -) - -const ( - envLine = "COMP_LINE" - envPoint = "COMP_POINT" - envDebug = "COMP_DEBUG" -) - -// Complete structs define completion for a command with CLI options -type Complete struct { - Command Command - cmd.CLI - Out io.Writer -} - -// New creates a new complete command. -// name is the name of command we want to auto complete. -// IMPORTANT: it must be the same name - if the auto complete -// completes the 'go' command, name must be equal to "go". -// command is the struct of the command completion. -func New(name string, command Command) *Complete { - return &Complete{ - Command: command, - CLI: cmd.CLI{Name: name}, - Out: os.Stdout, - } -} - -// Run runs the completion and add installation flags beforehand. -// The flags are added to the main flag CommandLine variable. -func (c *Complete) Run() bool { - c.AddFlags(nil) - flag.Parse() - return c.Complete() -} - -// Complete a command from completion line in environment variable, -// and print out the complete options. -// returns success if the completion ran or if the cli matched -// any of the given flags, false otherwise -// For installation: it assumes that flags were added and parsed before -// it was called. -func (c *Complete) Complete() bool { - line, point, ok := getEnv() - if !ok { - // make sure flags parsed, - // in case they were not added in the main program - return c.CLI.Run() - } - - if point >= 0 && point < len(line) { - line = line[:point] - } - - Log("Completing phrase: %s", line) - a := newArgs(line) - Log("Completing last field: %s", a.Last) - options := c.Command.Predict(a) - Log("Options: %s", options) - - // filter only options that match the last argument - matches := []string{} - for _, option := range options { - if strings.HasPrefix(option, a.Last) { - matches = append(matches, option) - } - } - Log("Matches: %s", matches) - c.output(matches) - return true -} - -func getEnv() (line string, point int, ok bool) { - line = os.Getenv(envLine) - if line == "" { - return - } - point, err := strconv.Atoi(os.Getenv(envPoint)) - if err != nil { - // If failed parsing point for some reason, set it to point - // on the end of the line. - Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) - point = len(line) - } - return line, point, true -} - -func (c *Complete) output(options []string) { - // stdout of program defines the complete options - for _, option := range options { - fmt.Fprintln(c.Out, option) - } -} diff --git a/vendor/github.com/posener/complete/doc.go b/vendor/github.com/posener/complete/doc.go deleted file mode 100644 index 0ae09a1b74..0000000000 --- a/vendor/github.com/posener/complete/doc.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Package complete provides a tool for bash writing bash completion in go, and bash completion for the go command line. - -Writing bash completion scripts is a hard work. This package provides an easy way -to create bash completion scripts for any command, and also an easy way to install/uninstall -the completion of the command. - -Go Command Bash Completion - -In ./cmd/gocomplete there is an example for bash completion for the `go` command line. - -This is an example that uses the `complete` package on the `go` command - the `complete` package -can also be used to implement any completions, see #usage. - -Install - -1. Type in your shell: - - go get -u github.com/posener/complete/gocomplete - gocomplete -install - -2. Restart your shell - -Uninstall by `gocomplete -uninstall` - -Features - -- Complete `go` command, including sub commands and all flags. -- Complete packages names or `.go` files when necessary. -- Complete test names after `-run` flag. - -Complete package - -Supported shells: - -- [x] bash -- [x] zsh -- [x] fish - -Usage - -Assuming you have program called `run` and you want to have bash completion -for it, meaning, if you type `run` then space, then press the `Tab` key, -the shell will suggest relevant complete options. - -In that case, we will create a program called `runcomplete`, a go program, -with a `func main()` and so, that will make the completion of the `run` -program. Once the `runcomplete` will be in a binary form, we could -`runcomplete -install` and that will add to our shell all the bash completion -options for `run`. - -So here it is: - - import "github.com/posener/complete" - - func main() { - - // create a Command object, that represents the command we want - // to complete. - run := complete.Command{ - - // Sub defines a list of sub commands of the program, - // this is recursive, since every command is of type command also. - Sub: complete.Commands{ - - // add a build sub command - "build": complete.Command { - - // define flags of the build sub command - Flags: complete.Flags{ - // build sub command has a flag '-cpus', which - // expects number of cpus after it. in that case - // anything could complete this flag. - "-cpus": complete.PredictAnything, - }, - }, - }, - - // define flags of the 'run' main command - Flags: complete.Flags{ - // a flag -o, which expects a file ending with .out after - // it, the tab completion will auto complete for files matching - // the given pattern. - "-o": complete.PredictFiles("*.out"), - }, - - // define global flags of the 'run' main command - // those will show up also when a sub command was entered in the - // command line - GlobalFlags: complete.Flags{ - - // a flag '-h' which does not expects anything after it - "-h": complete.PredictNothing, - }, - } - - // run the command completion, as part of the main() function. - // this triggers the autocompletion when needed. - // name must be exactly as the binary that we want to complete. - complete.New("run", run).Run() - } - -Self completing program - -In case that the program that we want to complete is written in go we -can make it self completing. -Here is an example: ./example/self/main.go . - -*/ -package complete diff --git a/vendor/github.com/posener/complete/goreadme.json b/vendor/github.com/posener/complete/goreadme.json deleted file mode 100644 index 025ec76c98..0000000000 --- a/vendor/github.com/posener/complete/goreadme.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "badges": { - "travis_ci": true, - "code_cov": true, - "golang_ci": true, - "go_doc": true, - "goreadme": true - } -} \ No newline at end of file diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go deleted file mode 100644 index c3029556e5..0000000000 --- a/vendor/github.com/posener/complete/log.go +++ /dev/null @@ -1,22 +0,0 @@ -package complete - -import ( - "io/ioutil" - "log" - "os" -) - -// Log is used for debugging purposes -// since complete is running on tab completion, it is nice to -// have logs to the stderr (when writing your own completer) -// to write logs, set the COMP_DEBUG environment variable and -// use complete.Log in the complete program -var Log = getLogger() - -func getLogger() func(format string, args ...interface{}) { - var logfile = ioutil.Discard - if os.Getenv(envDebug) != "" { - logfile = os.Stderr - } - return log.New(logfile, "complete ", log.Flags()).Printf -} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go deleted file mode 100644 index 820706325b..0000000000 --- a/vendor/github.com/posener/complete/predict.go +++ /dev/null @@ -1,41 +0,0 @@ -package complete - -// Predictor implements a predict method, in which given -// command line arguments returns a list of options it predicts. -type Predictor interface { - Predict(Args) []string -} - -// PredictOr unions two predicate functions, so that the result predicate -// returns the union of their predication -func PredictOr(predictors ...Predictor) Predictor { - return PredictFunc(func(a Args) (prediction []string) { - for _, p := range predictors { - if p == nil { - continue - } - prediction = append(prediction, p.Predict(a)...) - } - return - }) -} - -// PredictFunc determines what terms can follow a command or a flag -// It is used for auto completion, given last - the last word in the already -// in the command line, what words can complete it. -type PredictFunc func(Args) []string - -// Predict invokes the predict function and implements the Predictor interface -func (p PredictFunc) Predict(a Args) []string { - if p == nil { - return nil - } - return p(a) -} - -// PredictNothing does not expect anything after. -var PredictNothing Predictor - -// PredictAnything expects something, but nothing particular, such as a number -// or arbitrary name. -var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go deleted file mode 100644 index 25ae2d5144..0000000000 --- a/vendor/github.com/posener/complete/predict_files.go +++ /dev/null @@ -1,174 +0,0 @@ -package complete - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -// PredictDirs will search for directories in the given started to be typed -// path, if no path was started to be typed, it will complete to directories -// in the current working directory. -func PredictDirs(pattern string) Predictor { - return files(pattern, false) -} - -// PredictFiles will search for files matching the given pattern in the started to -// be typed path, if no path was started to be typed, it will complete to files that -// match the pattern in the current working directory. -// To match any file, use "*" as pattern. To match go files use "*.go", and so on. -func PredictFiles(pattern string) Predictor { - return files(pattern, true) -} - -func files(pattern string, allowFiles bool) PredictFunc { - - // search for files according to arguments, - // if only one directory has matched the result, search recursively into - // this directory to give more results. - return func(a Args) (prediction []string) { - prediction = predictFiles(a, pattern, allowFiles) - - // if the number of prediction is not 1, we either have many results or - // have no results, so we return it. - if len(prediction) != 1 { - return - } - - // only try deeper, if the one item is a directory - if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { - return - } - - a.Last = prediction[0] - return predictFiles(a, pattern, allowFiles) - } -} - -func predictFiles(a Args, pattern string, allowFiles bool) []string { - if strings.HasSuffix(a.Last, "/..") { - return nil - } - - dir := directory(a.Last) - files := listFiles(dir, pattern, allowFiles) - - // add dir if match - files = append(files, dir) - - return PredictFilesSet(files).Predict(a) -} - -// directory gives the directory of the given partial path -// in case that it is not, we fall back to the current directory. -func directory(path string) string { - if info, err := os.Stat(path); err == nil && info.IsDir() { - return fixPathForm(path, path) - } - dir := filepath.Dir(path) - if info, err := os.Stat(dir); err == nil && info.IsDir() { - return fixPathForm(path, dir) - } - return "./" -} - -// PredictFilesSet predict according to file rules to a given set of file names -func PredictFilesSet(files []string) PredictFunc { - return func(a Args) (prediction []string) { - // add all matching files to prediction - for _, f := range files { - f = fixPathForm(a.Last, f) - - // test matching of file to the argument - if matchFile(f, a.Last) { - prediction = append(prediction, f) - } - } - return - } -} - -func listFiles(dir, pattern string, allowFiles bool) []string { - // set of all file names - m := map[string]bool{} - - // list files - if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { - for _, f := range files { - if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { - m[f] = true - } - } - } - - // list directories - if dirs, err := ioutil.ReadDir(dir); err == nil { - for _, d := range dirs { - if d.IsDir() { - m[filepath.Join(dir, d.Name())] = true - } - } - } - - list := make([]string, 0, len(m)) - for k := range m { - list = append(list, k) - } - return list -} - -// MatchFile returns true if prefix can match the file -func matchFile(file, prefix string) bool { - // special case for current directory completion - if file == "./" && (prefix == "." || prefix == "") { - return true - } - if prefix == "." && strings.HasPrefix(file, ".") { - return true - } - - file = strings.TrimPrefix(file, "./") - prefix = strings.TrimPrefix(prefix, "./") - - return strings.HasPrefix(file, prefix) -} - -// fixPathForm changes a file name to a relative name -func fixPathForm(last string, file string) string { - // get wording directory for relative name - workDir, err := os.Getwd() - if err != nil { - return file - } - - abs, err := filepath.Abs(file) - if err != nil { - return file - } - - // if last is absolute, return path as absolute - if filepath.IsAbs(last) { - return fixDirPath(abs) - } - - rel, err := filepath.Rel(workDir, abs) - if err != nil { - return file - } - - // fix ./ prefix of path - if rel != "." && strings.HasPrefix(last, ".") { - rel = "./" + rel - } - - return fixDirPath(rel) -} - -func fixDirPath(path string) string { - info, err := os.Stat(path) - if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { - path += "/" - } - return path -} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go deleted file mode 100644 index fa4a34ae46..0000000000 --- a/vendor/github.com/posener/complete/predict_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package complete - -// PredictSet expects specific set of terms, given in the options argument. -func PredictSet(options ...string) Predictor { - return predictSet(options) -} - -type predictSet []string - -func (p predictSet) Predict(a Args) []string { - return p -} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go b/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go index 2f207aa07a..52d0f5204f 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go @@ -3,46 +3,26 @@ package goenv import ( "errors" "os/exec" - "runtime" - "strconv" "strings" ) func Read() (map[string]string, error) { - out, err := exec.Command("go", "env").CombinedOutput() + // pass in a fixed set of var names to avoid needing to unescape output + // pass in literals here instead of a variable list to avoid security linter warnings about command injection + out, err := exec.Command("go", "env", "GOROOT", "GOPATH", "GOARCH", "GOOS", "CGO_ENABLED").CombinedOutput() if err != nil { return nil, err } - return parseGoEnv(out, runtime.GOOS) + return parseGoEnv([]string{"GOROOT", "GOPATH", "GOARCH", "GOOS", "CGO_ENABLED"}, out) } -func parseGoEnv(data []byte, goos string) (map[string]string, error) { +func parseGoEnv(varNames []string, data []byte) (map[string]string, error) { vars := make(map[string]string) lines := strings.Split(strings.ReplaceAll(string(data), "\r\n", "\n"), "\n") - - if goos == "windows" { - // Line format is: `set $name=$value` - for _, l := range lines { - l = strings.TrimPrefix(l, "set ") - parts := strings.Split(l, "=") - if len(parts) != 2 { - continue - } - vars[parts[0]] = parts[1] - } - } else { - // Line format is: `$name="$value"` - for _, l := range lines { - parts := strings.Split(strings.TrimSpace(l), "=") - if len(parts) != 2 { - continue - } - val, err := strconv.Unquote(parts[1]) - if err != nil { - continue - } - vars[parts[0]] = val + for i, varName := range varNames { + if i < len(lines) && len(lines[i]) > 0 { + vars[varName] = lines[i] } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go index a5e6ca4d6b..e4cf954ffd 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go @@ -8,7 +8,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "sort" "strings" @@ -48,7 +47,7 @@ func (e *engine) LoadedGroups() []GoRuleGroup { } func (e *engine) Load(ctx *LoadContext, buildContext *build.Context, filename string, r io.Reader) error { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return err } @@ -131,6 +130,7 @@ func (e *engine) Run(ctx *RunContext, buildContext *build.Context, f *ast.File) } // engineState is a shared state inside the engine. +// Its access is synchronized, unlike the RunnerState which should be thread-local. type engineState struct { env *quasigo.Env diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go index 604ae4a189..7320ab7fb7 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go @@ -23,9 +23,16 @@ func filterFailure(reason string) matchFilterResult { return matchFilterResult(reason) } -func exprListFilterApply(src string, list gogrep.ExprSlice, fn func(ast.Expr) bool) matchFilterResult { - for i := 0; i < list.Len(); i++ { - if !fn(list.At(i).(ast.Expr)) { +func asExprSlice(x ast.Node) *gogrep.NodeSlice { + if x, ok := x.(*gogrep.NodeSlice); ok && x.Kind == gogrep.ExprNodeSlice { + return x + } + return nil +} + +func exprListFilterApply(src string, list []ast.Expr, fn func(ast.Expr) bool) matchFilterResult { + for _, e := range list { + if !fn(e) { return filterFailure(src) } } @@ -99,12 +106,11 @@ func makeFileNameMatchesFilter(src string, re textmatch.Pattern) filterFunc { func makePureFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isPure(params.ctx.Types, x) }) } - n := params.subExpr(varname) if isPure(params.ctx.Types, n) { return filterSuccess @@ -115,8 +121,8 @@ func makePureFilter(src, varname string) filterFunc { func makeConstFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isConstant(params.ctx.Types, x) }) } @@ -131,8 +137,8 @@ func makeConstFilter(src, varname string) filterFunc { func makeConstSliceFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isConstantSlice(params.ctx.Types, x) }) } @@ -147,8 +153,8 @@ func makeConstSliceFilter(src, varname string) filterFunc { func makeAddressableFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isAddressable(params.ctx.Types, x) }) } @@ -163,8 +169,8 @@ func makeAddressableFilter(src, varname string) filterFunc { func makeComparableFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return types.Comparable(params.typeofNode(x)) }) } @@ -212,8 +218,8 @@ func makeCustomVarFilter(src, varname string, fn *quasigo.Func) filterFunc { func makeTypeImplementsFilter(src, varname string, iface *types.Interface) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return xtypes.Implements(params.typeofNode(x), iface) }) } @@ -322,8 +328,8 @@ func makeRootSinkTypeIsFilter(src string, pat *typematch.Pattern) filterFunc { func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Pattern) filterFunc { if underlying { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return pat.MatchIdentical(params.typematchState, params.typeofNode(x).Underlying()) }) } @@ -336,8 +342,8 @@ func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Patte } return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return pat.MatchIdentical(params.typematchState, params.typeofNode(x)) }) } @@ -351,8 +357,8 @@ func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Patte func makeTypeConvertibleToFilter(src, varname string, dstType types.Type) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return types.ConvertibleTo(params.typeofNode(x), dstType) }) } @@ -367,8 +373,8 @@ func makeTypeConvertibleToFilter(src, varname string, dstType types.Type) filter func makeTypeAssignableToFilter(src, varname string, dstType types.Type) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return types.AssignableTo(params.typeofNode(x), dstType) }) } @@ -395,6 +401,28 @@ func makeLineFilter(src, varname string, op token.Token, rhsVarname string) filt } } +func makeObjectIsVariadicParamFilter(src, varname string) filterFunc { + return func(params *filterParams) matchFilterResult { + if params.currentFunc == nil { + return filterFailure(src) + } + funcObj, ok := params.ctx.Types.ObjectOf(params.currentFunc.Name).(*types.Func) + if !ok { + return filterFailure(src) + } + funcSig := funcObj.Type().(*types.Signature) + if !funcSig.Variadic() { + return filterFailure(src) + } + paramObj := funcSig.Params().At(funcSig.Params().Len() - 1) + obj := params.ctx.Types.ObjectOf(identOf(params.subExpr(varname))) + if paramObj != obj { + return filterFailure(src) + } + return filterSuccess + } +} + func makeObjectIsGlobalFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { obj := params.ctx.Types.ObjectOf(identOf(params.subExpr(varname))) @@ -433,15 +461,21 @@ func makeLineConstFilter(src, varname string, op token.Token, rhsValue constant. func makeTypeSizeConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { typ := params.typeofNode(x) + if isTypeParam(typ) { + return false + } lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(typ)) return constant.Compare(lhsValue, op, rhsValue) }) } typ := params.typeofNode(params.subExpr(varname)) + if isTypeParam(typ) { + return filterFailure(src) + } lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(typ)) if constant.Compare(lhsValue, op, rhsValue) { return filterSuccess @@ -453,8 +487,11 @@ func makeTypeSizeConstFilter(src, varname string, op token.Token, rhsValue const func makeTypeSizeFilter(src, varname string, op token.Token, rhsVarname string) filterFunc { return func(params *filterParams) matchFilterResult { lhsTyp := params.typeofNode(params.subExpr(varname)) - lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(lhsTyp)) rhsTyp := params.typeofNode(params.subExpr(rhsVarname)) + if isTypeParam(lhsTyp) || isTypeParam(rhsTyp) { + return filterFailure(src) + } + lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(lhsTyp)) rhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(rhsTyp)) if constant.Compare(lhsValue, op, rhsValue) { return filterSuccess @@ -465,8 +502,8 @@ func makeTypeSizeFilter(src, varname string, op token.Token, rhsVarname string) func makeValueIntConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { lhsValue := intValueOf(params.ctx.Types, x) return lhsValue != nil && constant.Compare(lhsValue, op, rhsValue) }) @@ -606,8 +643,8 @@ func makeObjectIsFilter(src, varname, objectName string) filterFunc { } return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { ident := identOf(x) return ident != nil && predicate(params.ctx.Types.ObjectOf(ident)) }) diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go index c9401c0208..bc2a5ee5b9 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go @@ -88,189 +88,195 @@ const ( // $Value type: string FilterVarObjectIsGlobalOp FilterOp = 23 + // m[$Value].Object.IsVariadicParam() + // $Value type: string + FilterVarObjectIsVariadicParamOp FilterOp = 24 + // m[$Value].Type.Is($Args[0]) // $Value type: string - FilterVarTypeIsOp FilterOp = 24 + FilterVarTypeIsOp FilterOp = 25 // m[$Value].Type.IdenticalTo($Args[0]) // $Value type: string - FilterVarTypeIdenticalToOp FilterOp = 25 + FilterVarTypeIdenticalToOp FilterOp = 26 // m[$Value].Type.Underlying().Is($Args[0]) // $Value type: string - FilterVarTypeUnderlyingIsOp FilterOp = 26 + FilterVarTypeUnderlyingIsOp FilterOp = 27 // m[$Value].Type.OfKind($Args[0]) // $Value type: string - FilterVarTypeOfKindOp FilterOp = 27 + FilterVarTypeOfKindOp FilterOp = 28 // m[$Value].Type.Underlying().OfKind($Args[0]) // $Value type: string - FilterVarTypeUnderlyingOfKindOp FilterOp = 28 + FilterVarTypeUnderlyingOfKindOp FilterOp = 29 // m[$Value].Type.ConvertibleTo($Args[0]) // $Value type: string - FilterVarTypeConvertibleToOp FilterOp = 29 + FilterVarTypeConvertibleToOp FilterOp = 30 // m[$Value].Type.AssignableTo($Args[0]) // $Value type: string - FilterVarTypeAssignableToOp FilterOp = 30 + FilterVarTypeAssignableToOp FilterOp = 31 // m[$Value].Type.Implements($Args[0]) // $Value type: string - FilterVarTypeImplementsOp FilterOp = 31 + FilterVarTypeImplementsOp FilterOp = 32 // m[$Value].Type.HasMethod($Args[0]) // $Value type: string - FilterVarTypeHasMethodOp FilterOp = 32 + FilterVarTypeHasMethodOp FilterOp = 33 // m[$Value].Text.Matches($Args[0]) // $Value type: string - FilterVarTextMatchesOp FilterOp = 33 + FilterVarTextMatchesOp FilterOp = 34 // m[$Value].Contains($Args[0]) // $Value type: string - FilterVarContainsOp FilterOp = 34 + FilterVarContainsOp FilterOp = 35 // m.Deadcode() - FilterDeadcodeOp FilterOp = 35 + FilterDeadcodeOp FilterOp = 36 // m.GoVersion().Eq($Value) // $Value type: string - FilterGoVersionEqOp FilterOp = 36 + FilterGoVersionEqOp FilterOp = 37 // m.GoVersion().LessThan($Value) // $Value type: string - FilterGoVersionLessThanOp FilterOp = 37 + FilterGoVersionLessThanOp FilterOp = 38 // m.GoVersion().GreaterThan($Value) // $Value type: string - FilterGoVersionGreaterThanOp FilterOp = 38 + FilterGoVersionGreaterThanOp FilterOp = 39 // m.GoVersion().LessEqThan($Value) // $Value type: string - FilterGoVersionLessEqThanOp FilterOp = 39 + FilterGoVersionLessEqThanOp FilterOp = 40 // m.GoVersion().GreaterEqThan($Value) // $Value type: string - FilterGoVersionGreaterEqThanOp FilterOp = 40 + FilterGoVersionGreaterEqThanOp FilterOp = 41 // m.File.Imports($Value) // $Value type: string - FilterFileImportsOp FilterOp = 41 + FilterFileImportsOp FilterOp = 42 // m.File.PkgPath.Matches($Value) // $Value type: string - FilterFilePkgPathMatchesOp FilterOp = 42 + FilterFilePkgPathMatchesOp FilterOp = 43 // m.File.Name.Matches($Value) // $Value type: string - FilterFileNameMatchesOp FilterOp = 43 + FilterFileNameMatchesOp FilterOp = 44 // $Value holds a function name // $Value type: string - FilterFilterFuncRefOp FilterOp = 44 + FilterFilterFuncRefOp FilterOp = 45 // $Value holds a string constant // $Value type: string - FilterStringOp FilterOp = 45 + FilterStringOp FilterOp = 46 // $Value holds an int64 constant // $Value type: int64 - FilterIntOp FilterOp = 46 + FilterIntOp FilterOp = 47 // m[`$$`].Node.Parent().Is($Args[0]) - FilterRootNodeParentIsOp FilterOp = 47 + FilterRootNodeParentIsOp FilterOp = 48 // m[`$$`].SinkType.Is($Args[0]) - FilterRootSinkTypeIsOp FilterOp = 48 + FilterRootSinkTypeIsOp FilterOp = 49 ) var filterOpNames = map[FilterOp]string{ - FilterInvalidOp: `Invalid`, - FilterNotOp: `Not`, - FilterAndOp: `And`, - FilterOrOp: `Or`, - FilterEqOp: `Eq`, - FilterNeqOp: `Neq`, - FilterGtOp: `Gt`, - FilterLtOp: `Lt`, - FilterGtEqOp: `GtEq`, - FilterLtEqOp: `LtEq`, - FilterVarAddressableOp: `VarAddressable`, - FilterVarComparableOp: `VarComparable`, - FilterVarPureOp: `VarPure`, - FilterVarConstOp: `VarConst`, - FilterVarConstSliceOp: `VarConstSlice`, - FilterVarTextOp: `VarText`, - FilterVarLineOp: `VarLine`, - FilterVarValueIntOp: `VarValueInt`, - FilterVarTypeSizeOp: `VarTypeSize`, - FilterVarTypeHasPointersOp: `VarTypeHasPointers`, - FilterVarFilterOp: `VarFilter`, - FilterVarNodeIsOp: `VarNodeIs`, - FilterVarObjectIsOp: `VarObjectIs`, - FilterVarObjectIsGlobalOp: `VarObjectIsGlobal`, - FilterVarTypeIsOp: `VarTypeIs`, - FilterVarTypeIdenticalToOp: `VarTypeIdenticalTo`, - FilterVarTypeUnderlyingIsOp: `VarTypeUnderlyingIs`, - FilterVarTypeOfKindOp: `VarTypeOfKind`, - FilterVarTypeUnderlyingOfKindOp: `VarTypeUnderlyingOfKind`, - FilterVarTypeConvertibleToOp: `VarTypeConvertibleTo`, - FilterVarTypeAssignableToOp: `VarTypeAssignableTo`, - FilterVarTypeImplementsOp: `VarTypeImplements`, - FilterVarTypeHasMethodOp: `VarTypeHasMethod`, - FilterVarTextMatchesOp: `VarTextMatches`, - FilterVarContainsOp: `VarContains`, - FilterDeadcodeOp: `Deadcode`, - FilterGoVersionEqOp: `GoVersionEq`, - FilterGoVersionLessThanOp: `GoVersionLessThan`, - FilterGoVersionGreaterThanOp: `GoVersionGreaterThan`, - FilterGoVersionLessEqThanOp: `GoVersionLessEqThan`, - FilterGoVersionGreaterEqThanOp: `GoVersionGreaterEqThan`, - FilterFileImportsOp: `FileImports`, - FilterFilePkgPathMatchesOp: `FilePkgPathMatches`, - FilterFileNameMatchesOp: `FileNameMatches`, - FilterFilterFuncRefOp: `FilterFuncRef`, - FilterStringOp: `String`, - FilterIntOp: `Int`, - FilterRootNodeParentIsOp: `RootNodeParentIs`, - FilterRootSinkTypeIsOp: `RootSinkTypeIs`, + FilterInvalidOp: `Invalid`, + FilterNotOp: `Not`, + FilterAndOp: `And`, + FilterOrOp: `Or`, + FilterEqOp: `Eq`, + FilterNeqOp: `Neq`, + FilterGtOp: `Gt`, + FilterLtOp: `Lt`, + FilterGtEqOp: `GtEq`, + FilterLtEqOp: `LtEq`, + FilterVarAddressableOp: `VarAddressable`, + FilterVarComparableOp: `VarComparable`, + FilterVarPureOp: `VarPure`, + FilterVarConstOp: `VarConst`, + FilterVarConstSliceOp: `VarConstSlice`, + FilterVarTextOp: `VarText`, + FilterVarLineOp: `VarLine`, + FilterVarValueIntOp: `VarValueInt`, + FilterVarTypeSizeOp: `VarTypeSize`, + FilterVarTypeHasPointersOp: `VarTypeHasPointers`, + FilterVarFilterOp: `VarFilter`, + FilterVarNodeIsOp: `VarNodeIs`, + FilterVarObjectIsOp: `VarObjectIs`, + FilterVarObjectIsGlobalOp: `VarObjectIsGlobal`, + FilterVarObjectIsVariadicParamOp: `VarObjectIsVariadicParam`, + FilterVarTypeIsOp: `VarTypeIs`, + FilterVarTypeIdenticalToOp: `VarTypeIdenticalTo`, + FilterVarTypeUnderlyingIsOp: `VarTypeUnderlyingIs`, + FilterVarTypeOfKindOp: `VarTypeOfKind`, + FilterVarTypeUnderlyingOfKindOp: `VarTypeUnderlyingOfKind`, + FilterVarTypeConvertibleToOp: `VarTypeConvertibleTo`, + FilterVarTypeAssignableToOp: `VarTypeAssignableTo`, + FilterVarTypeImplementsOp: `VarTypeImplements`, + FilterVarTypeHasMethodOp: `VarTypeHasMethod`, + FilterVarTextMatchesOp: `VarTextMatches`, + FilterVarContainsOp: `VarContains`, + FilterDeadcodeOp: `Deadcode`, + FilterGoVersionEqOp: `GoVersionEq`, + FilterGoVersionLessThanOp: `GoVersionLessThan`, + FilterGoVersionGreaterThanOp: `GoVersionGreaterThan`, + FilterGoVersionLessEqThanOp: `GoVersionLessEqThan`, + FilterGoVersionGreaterEqThanOp: `GoVersionGreaterEqThan`, + FilterFileImportsOp: `FileImports`, + FilterFilePkgPathMatchesOp: `FilePkgPathMatches`, + FilterFileNameMatchesOp: `FileNameMatches`, + FilterFilterFuncRefOp: `FilterFuncRef`, + FilterStringOp: `String`, + FilterIntOp: `Int`, + FilterRootNodeParentIsOp: `RootNodeParentIs`, + FilterRootSinkTypeIsOp: `RootSinkTypeIs`, } var filterOpFlags = map[FilterOp]uint64{ - FilterAndOp: flagIsBinaryExpr, - FilterOrOp: flagIsBinaryExpr, - FilterEqOp: flagIsBinaryExpr, - FilterNeqOp: flagIsBinaryExpr, - FilterGtOp: flagIsBinaryExpr, - FilterLtOp: flagIsBinaryExpr, - FilterGtEqOp: flagIsBinaryExpr, - FilterLtEqOp: flagIsBinaryExpr, - FilterVarAddressableOp: flagHasVar, - FilterVarComparableOp: flagHasVar, - FilterVarPureOp: flagHasVar, - FilterVarConstOp: flagHasVar, - FilterVarConstSliceOp: flagHasVar, - FilterVarTextOp: flagHasVar, - FilterVarLineOp: flagHasVar, - FilterVarValueIntOp: flagHasVar, - FilterVarTypeSizeOp: flagHasVar, - FilterVarTypeHasPointersOp: flagHasVar, - FilterVarFilterOp: flagHasVar, - FilterVarNodeIsOp: flagHasVar, - FilterVarObjectIsOp: flagHasVar, - FilterVarObjectIsGlobalOp: flagHasVar, - FilterVarTypeIsOp: flagHasVar, - FilterVarTypeIdenticalToOp: flagHasVar, - FilterVarTypeUnderlyingIsOp: flagHasVar, - FilterVarTypeOfKindOp: flagHasVar, - FilterVarTypeUnderlyingOfKindOp: flagHasVar, - FilterVarTypeConvertibleToOp: flagHasVar, - FilterVarTypeAssignableToOp: flagHasVar, - FilterVarTypeImplementsOp: flagHasVar, - FilterVarTypeHasMethodOp: flagHasVar, - FilterVarTextMatchesOp: flagHasVar, - FilterVarContainsOp: flagHasVar, - FilterStringOp: flagIsBasicLit, - FilterIntOp: flagIsBasicLit, + FilterAndOp: flagIsBinaryExpr, + FilterOrOp: flagIsBinaryExpr, + FilterEqOp: flagIsBinaryExpr, + FilterNeqOp: flagIsBinaryExpr, + FilterGtOp: flagIsBinaryExpr, + FilterLtOp: flagIsBinaryExpr, + FilterGtEqOp: flagIsBinaryExpr, + FilterLtEqOp: flagIsBinaryExpr, + FilterVarAddressableOp: flagHasVar, + FilterVarComparableOp: flagHasVar, + FilterVarPureOp: flagHasVar, + FilterVarConstOp: flagHasVar, + FilterVarConstSliceOp: flagHasVar, + FilterVarTextOp: flagHasVar, + FilterVarLineOp: flagHasVar, + FilterVarValueIntOp: flagHasVar, + FilterVarTypeSizeOp: flagHasVar, + FilterVarTypeHasPointersOp: flagHasVar, + FilterVarFilterOp: flagHasVar, + FilterVarNodeIsOp: flagHasVar, + FilterVarObjectIsOp: flagHasVar, + FilterVarObjectIsGlobalOp: flagHasVar, + FilterVarObjectIsVariadicParamOp: flagHasVar, + FilterVarTypeIsOp: flagHasVar, + FilterVarTypeIdenticalToOp: flagHasVar, + FilterVarTypeUnderlyingIsOp: flagHasVar, + FilterVarTypeOfKindOp: flagHasVar, + FilterVarTypeUnderlyingOfKindOp: flagHasVar, + FilterVarTypeConvertibleToOp: flagHasVar, + FilterVarTypeAssignableToOp: flagHasVar, + FilterVarTypeImplementsOp: flagHasVar, + FilterVarTypeHasMethodOp: flagHasVar, + FilterVarTextMatchesOp: flagHasVar, + FilterVarContainsOp: flagHasVar, + FilterStringOp: flagIsBasicLit, + FilterIntOp: flagIsBasicLit, } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go index d3b7409053..b1c8194926 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go @@ -7,7 +7,7 @@ import ( "bytes" "fmt" "go/format" - "io/ioutil" + "os" "strings" ) @@ -55,6 +55,7 @@ func main() { {name: "VarNodeIs", comment: "m[$Value].Node.Is($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarObjectIs", comment: "m[$Value].Object.Is($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarObjectIsGlobal", comment: "m[$Value].Object.IsGlobal()", valueType: "string", flags: flagHasVar}, + {name: "VarObjectIsVariadicParam", comment: "m[$Value].Object.IsVariadicParam()", valueType: "string", flags: flagHasVar}, {name: "VarTypeIs", comment: "m[$Value].Type.Is($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarTypeIdenticalTo", comment: "m[$Value].Type.IdenticalTo($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarTypeUnderlyingIs", comment: "m[$Value].Type.Underlying().Is($Args[0])", valueType: "string", flags: flagHasVar}, @@ -141,7 +142,7 @@ func main() { panic(err) } - if err := ioutil.WriteFile("filter_op.gen.go", pretty, 0644); err != nil { + if err := os.WriteFile("filter_op.gen.go", pretty, 0644); err != nil { panic(err) } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go index c07a19f54f..90dea56acd 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go @@ -8,7 +8,7 @@ import ( "go/parser" "go/token" "go/types" - "io/ioutil" + "os" "regexp" "github.com/quasilyte/gogrep" @@ -144,7 +144,7 @@ func (l *irLoader) loadBundle(bundle ir.BundleImport) error { } func (l *irLoader) loadExternFile(prefix, pkgPath, filename string) (*goRuleSet, error) { - src, err := ioutil.ReadFile(filename) + src, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -195,7 +195,7 @@ func (l *irLoader) compileFilterFuncs(filename string, irfile *ir.File) error { if err != nil { // If this ever happens, user will get unexpected error // lines for it; but we should trust that 99.9% errors - // should be catched at irconv phase so we get a valid Go + // should be caught at irconv phase so we get a valid Go // source here as well? return fmt.Errorf("parse custom decls: %w", err) } @@ -699,6 +699,8 @@ func (l *irLoader) newFilter(filter ir.FilterExpr, info *filterInfo) (matchFilte result.fn = makeConstFilter(result.src, filter.Value.(string)) case ir.FilterVarObjectIsGlobalOp: result.fn = makeObjectIsGlobalFilter(result.src, filter.Value.(string)) + case ir.FilterVarObjectIsVariadicParamOp: + result.fn = makeObjectIsVariadicParamFilter(result.src, filter.Value.(string)) case ir.FilterVarConstSliceOp: result.fn = makeConstSliceFilter(result.src, filter.Value.(string)) case ir.FilterVarAddressableOp: diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go index 646091fedc..4eb90d51b2 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go @@ -746,6 +746,8 @@ func (conv *converter) convertFilterExprImpl(e ast.Expr) ir.FilterExpr { return ir.FilterExpr{Op: ir.FilterVarObjectIsOp, Value: op.varName, Args: args} case "Object.IsGlobal": return ir.FilterExpr{Op: ir.FilterVarObjectIsGlobalOp, Value: op.varName} + case "Object.IsVariadicParam": + return ir.FilterExpr{Op: ir.FilterVarObjectIsVariadicParamOp, Value: op.varName} case "SinkType.Is": if op.varName != "$$" { // TODO: remove this restriction. diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go index 3bf3bf5a82..b0909f75fd 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go @@ -6,41 +6,14 @@ import ( "github.com/quasilyte/gogrep" ) -// matchData is used to handle both regexp and AST match sets in the same way. -type matchData interface { - // TODO: don't use gogrep.CapturedNode type here. - - Node() ast.Node - CaptureList() []gogrep.CapturedNode - CapturedByName(name string) (ast.Node, bool) -} - -type commentMatchData struct { - node ast.Node - capture []gogrep.CapturedNode -} - -func (m commentMatchData) Node() ast.Node { return m.node } - -func (m commentMatchData) CaptureList() []gogrep.CapturedNode { return m.capture } - -func (m commentMatchData) CapturedByName(name string) (ast.Node, bool) { - for _, c := range m.capture { - if c.Name == name { - return c.Node, true - } - } - return nil, false -} - -type astMatchData struct { +type matchData struct { match gogrep.MatchData } -func (m astMatchData) Node() ast.Node { return m.match.Node } +func (m matchData) Node() ast.Node { return m.match.Node } -func (m astMatchData) CaptureList() []gogrep.CapturedNode { return m.match.Capture } +func (m matchData) CaptureList() []gogrep.CapturedNode { return m.match.Capture } -func (m astMatchData) CapturedByName(name string) (ast.Node, bool) { +func (m matchData) CapturedByName(name string) (ast.Node, bool) { return m.match.CapturedByName(name) } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go index b0f02f0aa2..4ba741ee2f 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go @@ -10,8 +10,8 @@ type nodePath struct { stack []ast.Node } -func newNodePath() nodePath { - return nodePath{stack: make([]ast.Node, 0, 32)} +func newNodePath() *nodePath { + return &nodePath{stack: make([]ast.Node, 0, 32)} } func (p nodePath) String() string { @@ -22,15 +22,15 @@ func (p nodePath) String() string { return strings.Join(parts, "/") } -func (p nodePath) Parent() ast.Node { +func (p *nodePath) Parent() ast.Node { return p.NthParent(1) } -func (p nodePath) Current() ast.Node { +func (p *nodePath) Current() ast.Node { return p.NthParent(0) } -func (p nodePath) NthParent(n int) ast.Node { +func (p *nodePath) NthParent(n int) ast.Node { index := uint(len(p.stack) - n - 1) if index < uint(len(p.stack)) { return p.stack[index] diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go index c8d512038c..80386df28a 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go @@ -7,8 +7,8 @@ import ( "bytes" "fmt" "go/format" - "io/ioutil" "log" + "os" "strings" "text/template" ) @@ -186,7 +186,7 @@ func writeFile(filename string, data []byte) { if err != nil { log.Panicf("gofmt: %v", err) } - if err := ioutil.WriteFile(filename, pretty, 0666); err != nil { + if err := os.WriteFile(filename, pretty, 0666); err != nil { log.Panicf("write %s: %v", filename, err) } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go index 1a2e2f05f9..41fbc8995d 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go @@ -8,6 +8,9 @@ import ( "io" "github.com/quasilyte/go-ruleguard/ruleguard/ir" + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" + "github.com/quasilyte/go-ruleguard/ruleguard/typematch" + "github.com/quasilyte/gogrep" ) // Engine is the main ruleguard package API object. @@ -88,6 +91,21 @@ type LoadContext struct { Fset *token.FileSet } +type RunnerState struct { + gogrepState gogrep.MatcherState + gogrepSubState gogrep.MatcherState + nodePath *nodePath + evalEnv *quasigo.EvalEnv + typematchState *typematch.MatcherState + + object *rulesRunner +} + +// NewRunnerState creates a state object that can be used with RunContext. +func NewRunnerState(e *Engine) *RunnerState { + return newRunnerState(e.impl.state) +} + type RunContext struct { Debug string DebugImports bool @@ -115,6 +133,20 @@ type RunContext struct { // Note that this value is ignored for Suggest templates. // Ruleguard doesn't truncate suggested replacement candidates. TruncateLen int + + // State is an object that contains reusable resources needed for the rules to be executed. + // + // If nil, a new state will be allocated. + // + // The State object access is not synchronized. + // State should not be shared between multiple goroutines. + // There are 3 patterns that are safe: + // 1. For single-threaded programs, you can use a single state. + // 2. For controlled concurrency with workers, you can use a per-worker state. + // 3. For uncontrolled concurrency you can use a sync.Pool of states. + // + // Reusing the state properly can increase the performance significantly. + State *RunnerState } type ReportData struct { diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go index 92f6cc34b7..fdc95ab5e7 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go @@ -8,7 +8,7 @@ import ( "go/build" "go/printer" "go/token" - "io/ioutil" + "os" "path/filepath" "reflect" "sort" @@ -56,47 +56,77 @@ type rulesRunner struct { // For named submatches we can't use it as the node can be located // deeper into the tree than the current node. // In those cases we need a more complicated algorithm. - nodePath nodePath + nodePath *nodePath filterParams filterParams } +func newRunnerState(es *engineState) *RunnerState { + gogrepState := gogrep.NewMatcherState() + gogrepSubState := gogrep.NewMatcherState() + state := &RunnerState{ + gogrepState: gogrepState, + gogrepSubState: gogrepSubState, + nodePath: newNodePath(), + evalEnv: es.env.GetEvalEnv(), + typematchState: typematch.NewMatcherState(), + object: &rulesRunner{}, + } + return state +} + +func (state *RunnerState) Reset() { + state.nodePath.stack = state.nodePath.stack[:0] + state.evalEnv.Stack.Reset() +} + func newRulesRunner(ctx *RunContext, buildContext *build.Context, state *engineState, rules *goRuleSet) *rulesRunner { + runnerState := ctx.State + if runnerState == nil { + runnerState = newRunnerState(state) + } else { + runnerState.Reset() + } + importer := newGoImporter(state, goImporterConfig{ fset: ctx.Fset, debugImports: ctx.DebugImports, debugPrint: ctx.DebugPrint, buildContext: buildContext, }) - gogrepState := gogrep.NewMatcherState() + gogrepState := runnerState.gogrepState gogrepState.Types = ctx.Types - gogrepSubState := gogrep.NewMatcherState() + gogrepSubState := runnerState.gogrepSubState gogrepSubState.Types = ctx.Types - evalEnv := state.env.GetEvalEnv() - rr := &rulesRunner{ + evalEnv := runnerState.evalEnv + + rr := runnerState.object + *rr = rulesRunner{ bgContext: context.Background(), ctx: ctx, importer: importer, rules: rules, gogrepState: gogrepState, gogrepSubState: gogrepSubState, - nodePath: newNodePath(), + nodePath: runnerState.nodePath, truncateLen: ctx.TruncateLen, filterParams: filterParams{ - typematchState: typematch.NewMatcherState(), + typematchState: runnerState.typematchState, env: evalEnv, importer: importer, ctx: ctx, }, } + evalEnv.Stack.Push(&rr.filterParams) if ctx.TruncateLen == 0 { rr.truncateLen = 60 } rr.filterParams.nodeText = rr.nodeText rr.filterParams.nodeString = rr.nodeString - rr.filterParams.nodePath = &rr.nodePath + rr.filterParams.nodePath = rr.nodePath rr.filterParams.gogrepSubState = &rr.gogrepSubState + return rr } @@ -136,7 +166,7 @@ func (rr *rulesRunner) fileBytes() []byte { } // TODO(quasilyte): re-use src slice? - src, err := ioutil.ReadFile(rr.filename) + src, err := os.ReadFile(rr.filename) if err != nil || src == nil { // Assign a zero-length slice so rr.src // is never nil during the second fileBytes call. @@ -160,7 +190,7 @@ func (rr *rulesRunner) run(f *ast.File) error { if rr.rules.universal.categorizedNum != 0 { var inspector astWalker - inspector.nodePath = &rr.nodePath + inspector.nodePath = rr.nodePath inspector.filterParams = &rr.filterParams inspector.Walk(f, func(n ast.Node, tag nodetag.Value) { rr.runRules(n, tag) @@ -183,7 +213,7 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { file := rr.ctx.Fset.File(comment.Pos()) for _, rule := range rr.rules.universal.commentRules { - var m commentMatchData + var m matchData if rule.captureGroups { result := rule.pat.FindStringSubmatchIndex(comment.Text) if result == nil { @@ -200,13 +230,13 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { // Consider this pattern: `(?Pfoo)|(bar)`. // If we have `bar` input string, will remain empty. if beginPos < 0 || endPos < 0 { - m.capture = append(m.capture, gogrep.CapturedNode{ + m.match.Capture = append(m.match.Capture, gogrep.CapturedNode{ Name: name, Node: &ast.Comment{Slash: comment.Pos()}, }) continue } - m.capture = append(m.capture, gogrep.CapturedNode{ + m.match.Capture = append(m.match.Capture, gogrep.CapturedNode{ Name: name, Node: &ast.Comment{ Slash: file.Pos(beginPos + file.Offset(comment.Pos())), @@ -214,7 +244,7 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { }, }) } - m.node = &ast.Comment{ + m.match.Node = &ast.Comment{ Slash: file.Pos(result[0] + file.Offset(comment.Pos())), Text: comment.Text[result[0]:result[1]], } @@ -224,7 +254,7 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { if result == nil { continue } - m.node = &ast.Comment{ + m.match.Node = &ast.Comment{ Slash: file.Pos(result[0] + file.Offset(comment.Pos())), Text: comment.Text[result[0]:result[1]], } @@ -307,7 +337,7 @@ func (rr *rulesRunner) reject(rule goRule, reason string, m matchData) { } } -func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m commentMatchData) bool { +func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m matchData) bool { if rule.base.filter.fn != nil { rr.filterParams.match = m filterResult := rule.base.filter.fn(&rr.filterParams) @@ -345,13 +375,13 @@ func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m commentMatchData func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool { if rule.filter.fn != nil || rule.do != nil { - rr.filterParams.match = astMatchData{match: m} + rr.filterParams.match = matchData{match: m} } if rule.filter.fn != nil { filterResult := rule.filter.fn(&rr.filterParams) if !filterResult.Matched() { - rr.reject(rule, filterResult.RejectReason(), astMatchData{match: m}) + rr.reject(rule, filterResult.RejectReason(), matchData{match: m}) return false } } @@ -379,9 +409,9 @@ func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool { suggestText = rr.filterParams.suggestString } } else { - messageText = rr.renderMessage(rule.msg, astMatchData{match: m}, true) + messageText = rr.renderMessage(rule.msg, matchData{match: m}, true) if rule.suggestion != "" { - suggestText = rr.renderMessage(rule.suggestion, astMatchData{match: m}, false) + suggestText = rr.renderMessage(rule.suggestion, matchData{match: m}, false) } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/textmatch/textmatch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/textmatch/textmatch.go index a3787e2c16..135f95740e 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/textmatch/textmatch.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/textmatch/textmatch.go @@ -9,7 +9,7 @@ type Pattern interface { } // Compile parses a regular expression and returns a compiled -// pattern that can match inputs descriped by the regexp. +// pattern that can match inputs described by the regexp. // // Semantically it's close to the regexp.Compile, but // it does recognize some common patterns and creates diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go index b74740378f..4b740b207c 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go @@ -507,9 +507,14 @@ func (p *Pattern) matchIdentical(state *MatcherState, sub *pattern, typ types.Ty } pkgPath := sub.value.([2]string)[0] typeName := sub.value.([2]string)[1] - // obj.Pkg().Path() may be in a vendor directory. - path := strings.SplitAfter(obj.Pkg().Path(), "/vendor/") - return path[len(path)-1] == pkgPath && typeName == obj.Name() + if typeName != obj.Name() { + return false + } + objPath := obj.Pkg().Path() + if vendorPos := strings.Index(objPath, "/vendor/"); vendorPos != -1 { + objPath = objPath[vendorPos+len("/vendor/"):] + } + return objPath == pkgPath case opFuncNoSeq: typ, ok := typ.(*types.Signature) diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go index 962e9da2a9..6403d91cdc 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go @@ -9,6 +9,8 @@ import ( "regexp/syntax" "strconv" "strings" + + "golang.org/x/exp/typeparams" ) var invalidType = types.Typ[types.Invalid] @@ -271,7 +273,7 @@ func isTypeExpr(info *types.Info, x ast.Expr) bool { case *ast.Ident: // Identifier may be a type expression if object - // it reffers to is a type name. + // it refers to is a type name. _, ok := info.ObjectOf(x).(*types.TypeName) return ok @@ -295,3 +297,8 @@ func identOf(e ast.Expr) *ast.Ident { return nil } } + +func isTypeParam(typ types.Type) bool { + _, ok := typ.(*typeparams.TypeParam) + return ok +} diff --git a/vendor/github.com/quasilyte/gogrep/README.md b/vendor/github.com/quasilyte/gogrep/README.md index b6c2c47c16..ecf0dc4c7e 100644 --- a/vendor/github.com/quasilyte/gogrep/README.md +++ b/vendor/github.com/quasilyte/gogrep/README.md @@ -24,7 +24,7 @@ $ go get github.com/quasilyte/gogrep To get a gogrep command-line tool, install the `cmd/gogrep` Go submodule. ```bash -$ go install github.com/quasilyte/cmd/gogrep +$ go install github.com/quasilyte/gogrep/cmd/gogrep@latest ``` See [docs/gogrep_cli.md](_docs/gogrep_cli.md) to learn how to use it. diff --git a/vendor/github.com/quasilyte/gogrep/compile.go b/vendor/github.com/quasilyte/gogrep/compile.go index a00a39cc83..31b60dfa5b 100644 --- a/vendor/github.com/quasilyte/gogrep/compile.go +++ b/vendor/github.com/quasilyte/gogrep/compile.go @@ -122,16 +122,19 @@ func (c *compiler) compileNode(n ast.Node) { c.compileStmt(n) case *ast.ValueSpec: c.compileValueSpec(n) - case stmtSlice: - c.compileStmtSlice(n) - case declSlice: - c.compileDeclSlice(n) - case ExprSlice: - c.compileExprSlice(n) case *rangeClause: c.compileRangeClause(n) case *rangeHeader: c.compileRangeHeader(n) + case *NodeSlice: + switch n.Kind { + case StmtNodeSlice: + c.compileStmtSlice(n.stmtSlice) + case DeclNodeSlice: + c.compileDeclSlice(n.declSlice) + case ExprNodeSlice: + c.compileExprSlice(n.exprSlice) + } default: panic(c.errorf(n, "compileNode: unexpected %T", n)) } @@ -1191,7 +1194,7 @@ func (c *compiler) compileSendStmt(n *ast.SendStmt) { c.compileExpr(n.Value) } -func (c *compiler) compileDeclSlice(decls declSlice) { +func (c *compiler) compileDeclSlice(decls []ast.Decl) { c.emitInstOp(opMultiDecl) for _, n := range decls { c.compileDecl(n) @@ -1199,7 +1202,7 @@ func (c *compiler) compileDeclSlice(decls declSlice) { c.emitInstOp(opEnd) } -func (c *compiler) compileStmtSlice(stmts stmtSlice) { +func (c *compiler) compileStmtSlice(stmts []ast.Stmt) { c.emitInstOp(opMultiStmt) insideStmtList := c.insideStmtList c.insideStmtList = true @@ -1210,7 +1213,7 @@ func (c *compiler) compileStmtSlice(stmts stmtSlice) { c.emitInstOp(opEnd) } -func (c *compiler) compileExprSlice(exprs ExprSlice) { +func (c *compiler) compileExprSlice(exprs []ast.Expr) { c.emitInstOp(opMultiExpr) for _, n := range exprs { c.compileExpr(n) diff --git a/vendor/github.com/quasilyte/gogrep/gogrep.go b/vendor/github.com/quasilyte/gogrep/gogrep.go index 313a9a2515..47a03f9b43 100644 --- a/vendor/github.com/quasilyte/gogrep/gogrep.go +++ b/vendor/github.com/quasilyte/gogrep/gogrep.go @@ -11,7 +11,7 @@ import ( ) func IsEmptyNodeSlice(n ast.Node) bool { - if list, ok := n.(NodeSlice); ok { + if list, ok := n.(*NodeSlice); ok { return list.Len() == 0 } return false @@ -62,6 +62,9 @@ type MatcherState struct { // actual matching phase) capture []CapturedNode + nodeSlices []NodeSlice + nodeSlicesUsed int + pc int partial PartialNode @@ -69,7 +72,8 @@ type MatcherState struct { func NewMatcherState() MatcherState { return MatcherState{ - capture: make([]CapturedNode, 0, 8), + capture: make([]CapturedNode, 0, 8), + nodeSlices: make([]NodeSlice, 16), } } @@ -143,34 +147,37 @@ func Compile(config CompileConfig) (*Pattern, PatternInfo, error) { } func Walk(root ast.Node, fn func(n ast.Node) bool) { - switch root := root.(type) { - case ExprSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case stmtSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case fieldSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case identSlice: - for _, e := range root { - ast.Inspect(e, fn) + if root, ok := root.(*NodeSlice); ok { + switch root.Kind { + case ExprNodeSlice: + for _, e := range root.exprSlice { + ast.Inspect(e, fn) + } + case StmtNodeSlice: + for _, e := range root.stmtSlice { + ast.Inspect(e, fn) + } + case FieldNodeSlice: + for _, e := range root.fieldSlice { + ast.Inspect(e, fn) + } + case IdentNodeSlice: + for _, e := range root.identSlice { + ast.Inspect(e, fn) + } + case SpecNodeSlice: + for _, e := range root.specSlice { + ast.Inspect(e, fn) + } + default: + for _, e := range root.declSlice { + ast.Inspect(e, fn) + } } - case specSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case declSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - default: - ast.Inspect(root, fn) + return } + + ast.Inspect(root, fn) } func newPatternInfo() PatternInfo { diff --git a/vendor/github.com/quasilyte/gogrep/match.go b/vendor/github.com/quasilyte/gogrep/match.go index d4e3243aad..d4b317b96f 100644 --- a/vendor/github.com/quasilyte/gogrep/match.go +++ b/vendor/github.com/quasilyte/gogrep/match.go @@ -45,8 +45,36 @@ func (m *matcher) resetCapture(state *MatcherState) { } } +func (m *matcher) toStmtSlice(state *MatcherState, nodes ...ast.Node) *NodeSlice { + slice := m.allocNodeSlice(state) + var stmts []ast.Stmt + for _, node := range nodes { + switch x := node.(type) { + case nil: + case ast.Stmt: + stmts = append(stmts, x) + case ast.Expr: + stmts = append(stmts, &ast.ExprStmt{X: x}) + default: + panic(fmt.Sprintf("unexpected node type: %T", x)) + } + } + slice.assignStmtSlice(stmts) + return slice +} + +func (m *matcher) allocNodeSlice(state *MatcherState) *NodeSlice { + if state.nodeSlicesUsed < len(state.nodeSlices) { + i := state.nodeSlicesUsed + state.nodeSlicesUsed++ + return &state.nodeSlices[i] + } + return &NodeSlice{} +} + func (m *matcher) MatchNode(state *MatcherState, n ast.Node, accept func(MatchData)) { state.pc = 0 + state.nodeSlicesUsed = 0 inst := m.nextInst(state) switch inst.op { case opMultiStmt: @@ -91,24 +119,32 @@ func (m *matcher) MatchNode(state *MatcherState, n ast.Node, accept func(MatchDa } func (m *matcher) walkDeclSlice(state *MatcherState, decls []ast.Decl, accept func(MatchData)) { - m.walkNodeSlice(state, declSlice(decls), accept) + slice := m.allocNodeSlice(state) + slice.assignDeclSlice(decls) + m.walkNodeSlice(state, slice, accept) } func (m *matcher) walkExprSlice(state *MatcherState, exprs []ast.Expr, accept func(MatchData)) { - m.walkNodeSlice(state, ExprSlice(exprs), accept) + slice := m.allocNodeSlice(state) + slice.assignExprSlice(exprs) + m.walkNodeSlice(state, slice, accept) } func (m *matcher) walkStmtSlice(state *MatcherState, stmts []ast.Stmt, accept func(MatchData)) { - m.walkNodeSlice(state, stmtSlice(stmts), accept) + slice := m.allocNodeSlice(state) + slice.assignStmtSlice(stmts) + m.walkNodeSlice(state, slice, accept) } -func (m *matcher) walkNodeSlice(state *MatcherState, nodes NodeSlice, accept func(MatchData)) { +func (m *matcher) walkNodeSlice(state *MatcherState, nodes *NodeSlice, accept func(MatchData)) { sliceLen := nodes.Len() from := 0 + tmpSlice := m.allocNodeSlice(state) for { state.pc = 1 // FIXME: this is a kludge m.resetCapture(state) - matched, offset := m.matchNodeList(state, nodes.slice(from, sliceLen), true) + nodes.SliceInto(tmpSlice, from, sliceLen) + matched, offset := m.matchNodeList(state, tmpSlice, true) if matched == nil { break } @@ -422,11 +458,11 @@ func (m *matcher) matchNodeWithInst(state *MatcherState, inst instruction, n ast case opIfNamedOptStmt: n, ok := n.(*ast.IfStmt) return ok && n.Else == nil && m.matchNode(state, n.Body) && - m.matchNamed(state, m.stringValue(inst), toStmtSlice(n.Cond, n.Init)) + m.matchNamed(state, m.stringValue(inst), m.toStmtSlice(state, n.Cond, n.Init)) case opIfNamedOptElseStmt: n, ok := n.(*ast.IfStmt) return ok && n.Else != nil && m.matchNode(state, n.Body) && m.matchNode(state, n.Else) && - m.matchNamed(state, m.stringValue(inst), toStmtSlice(n.Cond, n.Init)) + m.matchNamed(state, m.stringValue(inst), m.toStmtSlice(state, n.Cond, n.Init)) case opCaseClause: n, ok := n.(*ast.CaseClause) @@ -641,33 +677,43 @@ func (m *matcher) matchArgList(state *MatcherState, exprs []ast.Expr) bool { } func (m *matcher) matchStmtSlice(state *MatcherState, stmts []ast.Stmt) bool { - matched, _ := m.matchNodeList(state, stmtSlice(stmts), false) + slice := m.allocNodeSlice(state) + slice.assignStmtSlice(stmts) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchExprSlice(state *MatcherState, exprs []ast.Expr) bool { - matched, _ := m.matchNodeList(state, ExprSlice(exprs), false) + slice := m.allocNodeSlice(state) + slice.assignExprSlice(exprs) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchFieldSlice(state *MatcherState, fields []*ast.Field) bool { - matched, _ := m.matchNodeList(state, fieldSlice(fields), false) + slice := m.allocNodeSlice(state) + slice.assignFieldSlice(fields) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchIdentSlice(state *MatcherState, idents []*ast.Ident) bool { - matched, _ := m.matchNodeList(state, identSlice(idents), false) + slice := m.allocNodeSlice(state) + slice.assignIdentSlice(idents) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchSpecSlice(state *MatcherState, specs []ast.Spec) bool { - matched, _ := m.matchNodeList(state, specSlice(specs), false) + slice := m.allocNodeSlice(state) + slice.assignSpecSlice(specs) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } // matchNodeList matches two lists of nodes. It uses a common algorithm to match // wildcard patterns with any number of nodes without recursion. -func (m *matcher) matchNodeList(state *MatcherState, nodes NodeSlice, partial bool) (matched ast.Node, offset int) { +func (m *matcher) matchNodeList(state *MatcherState, nodes *NodeSlice, partial bool) (matched ast.Node, offset int) { sliceLen := nodes.Len() inst := m.nextInst(state) if inst.op == opEnd { @@ -727,7 +773,9 @@ func (m *matcher) matchNodeList(state *MatcherState, nodes NodeSlice, partial bo case "", "_": return true } - return m.matchNamed(state, wildName, nodes.slice(wildStart, j)) + slice := m.allocNodeSlice(state) + nodes.SliceInto(slice, wildStart, j) + return m.matchNamed(state, wildName, slice) } for ; inst.op != opEnd || j < sliceLen; inst = m.nextInst(state) { if inst.op != opEnd { @@ -776,7 +824,9 @@ func (m *matcher) matchNodeList(state *MatcherState, nodes NodeSlice, partial bo if !wouldMatch() { return nil, -1 } - return nodes.slice(partialStart, partialEnd), partialEnd + 1 + slice := m.allocNodeSlice(state) + nodes.SliceInto(slice, partialStart, partialEnd) + return slice, partialEnd + 1 } func (m *matcher) matchRangeClause(state *MatcherState, n ast.Node, accept func(MatchData)) { @@ -919,58 +969,56 @@ func equalNodes(x, y ast.Node) bool { if x == nil || y == nil { return x == y } - switch x := x.(type) { - case stmtSlice: - y, ok := y.(stmtSlice) - if !ok || len(x) != len(y) { + if x, ok := x.(*NodeSlice); ok { + y, ok := y.(*NodeSlice) + if !ok || x.Kind != y.Kind || x.Len() != y.Len() { return false } - for i := range x { - if !astequal.Stmt(x[i], y[i]) { - return false + switch x.Kind { + case ExprNodeSlice: + for i, n1 := range x.exprSlice { + n2 := y.exprSlice[i] + if !astequal.Expr(n1, n2) { + return false + } } - } - return true - case ExprSlice: - y, ok := y.(ExprSlice) - if !ok || len(x) != len(y) { - return false - } - for i := range x { - if !astequal.Expr(x[i], y[i]) { - return false + case StmtNodeSlice: + for i, n1 := range x.stmtSlice { + n2 := y.stmtSlice[i] + if !astequal.Stmt(n1, n2) { + return false + } } - } - return true - case declSlice: - y, ok := y.(declSlice) - if !ok || len(x) != len(y) { - return false - } - for i := range x { - if !astequal.Decl(x[i], y[i]) { - return false + case FieldNodeSlice: + for i, n1 := range x.fieldSlice { + n2 := y.fieldSlice[i] + if !astequal.Node(n1, n2) { + return false + } + } + case IdentNodeSlice: + for i, n1 := range x.identSlice { + n2 := y.identSlice[i] + if n1.Name != n2.Name { + return false + } + } + case SpecNodeSlice: + for i, n1 := range x.specSlice { + n2 := y.specSlice[i] + if !astequal.Node(n1, n2) { + return false + } + } + case DeclNodeSlice: + for i, n1 := range x.declSlice { + n2 := y.declSlice[i] + if !astequal.Decl(n1, n2) { + return false + } } } return true - - default: - return astequal.Node(x, y) - } -} - -func toStmtSlice(nodes ...ast.Node) stmtSlice { - var stmts []ast.Stmt - for _, node := range nodes { - switch x := node.(type) { - case nil: - case ast.Stmt: - stmts = append(stmts, x) - case ast.Expr: - stmts = append(stmts, &ast.ExprStmt{X: x}) - default: - panic(fmt.Sprintf("unexpected node type: %T", x)) - } } - return stmtSlice(stmts) + return astequal.Node(x, y) } diff --git a/vendor/github.com/quasilyte/gogrep/parse.go b/vendor/github.com/quasilyte/gogrep/parse.go index aa5ffbf924..3c6854bda0 100644 --- a/vendor/github.com/quasilyte/gogrep/parse.go +++ b/vendor/github.com/quasilyte/gogrep/parse.go @@ -174,7 +174,9 @@ func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, error) { if len(cl.Elts) == 1 { return cl.Elts[0], nil } - return ExprSlice(cl.Elts), nil + slice := &NodeSlice{} + slice.assignExprSlice(cl.Elts) + return slice, nil } // then try as statements @@ -185,7 +187,9 @@ func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, error) { if len(bl.List) == 1 { return bl.List[0], nil } - return stmtSlice(bl.List), nil + slice := &NodeSlice{} + slice.assignStmtSlice(bl.List) + return slice, nil } // Statements is what covers most cases, so it will give // the best overall error message. Show positions @@ -199,7 +203,9 @@ func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, error) { if len(f.Decls) == 1 { return f.Decls[0], nil } - return declSlice(f.Decls), nil + slice := &NodeSlice{} + slice.assignDeclSlice(f.Decls) + return slice, nil } // try as a whole file diff --git a/vendor/github.com/quasilyte/gogrep/slices.go b/vendor/github.com/quasilyte/gogrep/slices.go index 13775a818f..fb969b51bd 100644 --- a/vendor/github.com/quasilyte/gogrep/slices.go +++ b/vendor/github.com/quasilyte/gogrep/slices.go @@ -5,54 +5,146 @@ import ( "go/token" ) -type NodeSlice interface { - At(i int) ast.Node - Len() int - slice(from, to int) NodeSlice - ast.Node -} +type NodeSliceKind uint32 + +const ( + ExprNodeSlice NodeSliceKind = iota + StmtNodeSlice + FieldNodeSlice + IdentNodeSlice + SpecNodeSlice + DeclNodeSlice +) + +type NodeSlice struct { + Kind NodeSliceKind -type ( - ExprSlice []ast.Expr + exprSlice []ast.Expr stmtSlice []ast.Stmt fieldSlice []*ast.Field identSlice []*ast.Ident specSlice []ast.Spec declSlice []ast.Decl -) +} + +func (s *NodeSlice) GetExprSlice() []ast.Expr { return s.exprSlice } +func (s *NodeSlice) GetStmtSlice() []ast.Stmt { return s.stmtSlice } +func (s *NodeSlice) GetFieldSlice() []*ast.Field { return s.fieldSlice } +func (s *NodeSlice) GetIdentSlice() []*ast.Ident { return s.identSlice } +func (s *NodeSlice) GetSpecSlice() []ast.Spec { return s.specSlice } +func (s *NodeSlice) GetDeclSlice() []ast.Decl { return s.declSlice } + +func (s *NodeSlice) assignExprSlice(xs []ast.Expr) { + s.Kind = ExprNodeSlice + s.exprSlice = xs +} + +func (s *NodeSlice) assignStmtSlice(xs []ast.Stmt) { + s.Kind = StmtNodeSlice + s.stmtSlice = xs +} + +func (s *NodeSlice) assignFieldSlice(xs []*ast.Field) { + s.Kind = FieldNodeSlice + s.fieldSlice = xs +} + +func (s *NodeSlice) assignIdentSlice(xs []*ast.Ident) { + s.Kind = IdentNodeSlice + s.identSlice = xs +} + +func (s *NodeSlice) assignSpecSlice(xs []ast.Spec) { + s.Kind = SpecNodeSlice + s.specSlice = xs +} + +func (s *NodeSlice) assignDeclSlice(xs []ast.Decl) { + s.Kind = DeclNodeSlice + s.declSlice = xs +} + +func (s *NodeSlice) Len() int { + switch s.Kind { + case ExprNodeSlice: + return len(s.exprSlice) + case StmtNodeSlice: + return len(s.stmtSlice) + case FieldNodeSlice: + return len(s.fieldSlice) + case IdentNodeSlice: + return len(s.identSlice) + case SpecNodeSlice: + return len(s.specSlice) + default: + return len(s.declSlice) + } +} -func (l ExprSlice) Len() int { return len(l) } -func (l ExprSlice) At(i int) ast.Node { return l[i] } -func (l ExprSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l ExprSlice) Pos() token.Pos { return l[0].Pos() } -func (l ExprSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l stmtSlice) Len() int { return len(l) } -func (l stmtSlice) At(i int) ast.Node { return l[i] } -func (l stmtSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l stmtSlice) Pos() token.Pos { return l[0].Pos() } -func (l stmtSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l fieldSlice) Len() int { return len(l) } -func (l fieldSlice) At(i int) ast.Node { return l[i] } -func (l fieldSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l fieldSlice) Pos() token.Pos { return l[0].Pos() } -func (l fieldSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l identSlice) Len() int { return len(l) } -func (l identSlice) At(i int) ast.Node { return l[i] } -func (l identSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l identSlice) Pos() token.Pos { return l[0].Pos() } -func (l identSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l specSlice) Len() int { return len(l) } -func (l specSlice) At(i int) ast.Node { return l[i] } -func (l specSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l specSlice) Pos() token.Pos { return l[0].Pos() } -func (l specSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l declSlice) Len() int { return len(l) } -func (l declSlice) At(i int) ast.Node { return l[i] } -func (l declSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l declSlice) Pos() token.Pos { return l[0].Pos() } -func (l declSlice) End() token.Pos { return l[len(l)-1].End() } +func (s *NodeSlice) At(i int) ast.Node { + switch s.Kind { + case ExprNodeSlice: + return s.exprSlice[i] + case StmtNodeSlice: + return s.stmtSlice[i] + case FieldNodeSlice: + return s.fieldSlice[i] + case IdentNodeSlice: + return s.identSlice[i] + case SpecNodeSlice: + return s.specSlice[i] + default: + return s.declSlice[i] + } +} + +func (s *NodeSlice) SliceInto(dst *NodeSlice, i, j int) { + switch s.Kind { + case ExprNodeSlice: + dst.assignExprSlice(s.exprSlice[i:j]) + case StmtNodeSlice: + dst.assignStmtSlice(s.stmtSlice[i:j]) + case FieldNodeSlice: + dst.assignFieldSlice(s.fieldSlice[i:j]) + case IdentNodeSlice: + dst.assignIdentSlice(s.identSlice[i:j]) + case SpecNodeSlice: + dst.assignSpecSlice(s.specSlice[i:j]) + default: + dst.assignDeclSlice(s.declSlice[i:j]) + } +} + +func (s *NodeSlice) Pos() token.Pos { + switch s.Kind { + case ExprNodeSlice: + return s.exprSlice[0].Pos() + case StmtNodeSlice: + return s.stmtSlice[0].Pos() + case FieldNodeSlice: + return s.fieldSlice[0].Pos() + case IdentNodeSlice: + return s.identSlice[0].Pos() + case SpecNodeSlice: + return s.specSlice[0].Pos() + default: + return s.declSlice[0].Pos() + } +} + +func (s *NodeSlice) End() token.Pos { + switch s.Kind { + case ExprNodeSlice: + return s.exprSlice[len(s.exprSlice)-1].End() + case StmtNodeSlice: + return s.stmtSlice[len(s.stmtSlice)-1].End() + case FieldNodeSlice: + return s.fieldSlice[len(s.fieldSlice)-1].End() + case IdentNodeSlice: + return s.identSlice[len(s.identSlice)-1].End() + case SpecNodeSlice: + return s.specSlice[len(s.specSlice)-1].End() + default: + return s.declSlice[len(s.declSlice)-1].End() + } +} diff --git a/vendor/github.com/quasilyte/regex/syntax/README.md b/vendor/github.com/quasilyte/regex/syntax/README.md index 13064ec39a..b70e25ad96 100644 --- a/vendor/github.com/quasilyte/regex/syntax/README.md +++ b/vendor/github.com/quasilyte/regex/syntax/README.md @@ -4,19 +4,17 @@ Package `syntax` provides regular expressions parser as well as AST definitions. ## Rationale -There are several problems with the stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/) package: +The advantages of this package over stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/): -1. It does several transformations during the parsing that make it - hard to do any kind of syntax analysis afterward. +1. Does not transformations/optimizations during the parsing. + The produced parse tree is loseless. -2. The AST used there is optimized for the compilation and - execution inside the [regexp](https://golang.org/pkg/regexp) package. - It's somewhat complicated, especially in a way character ranges are encoded. +2. Simpler AST representation. -3. It only supports [re2](https://github.com/google/re2/wiki/Syntax) syntax. - This parser recognizes most PCRE operations. +3. Can parse most PCRE operations in addition to [re2](https://github.com/google/re2/wiki/Syntax) syntax. + It can also handle PHP/Perl style patterns with delimiters. -4. It's easier to extend this package than something from the standard library. +4. This package is easier to extend than something from the standard library. This package does almost no assumptions about how generated AST is going to be used so it preserves as much syntax information as possible. @@ -24,3 +22,8 @@ so it preserves as much syntax information as possible. It's easy to write another intermediate representation on top of it. The main function of this package is to convert a textual regexp pattern into a more structured form that can be processed more easily. + +## Users + +* [go-critic](https://github.com/go-critic/go-critic) - Go static analyzer +* [NoVerify](https://github.com/VKCOM/noverify) - PHP static analyzer diff --git a/vendor/github.com/quasilyte/regex/syntax/ast.go b/vendor/github.com/quasilyte/regex/syntax/ast.go index 44b7b61bb3..4d21a9432b 100644 --- a/vendor/github.com/quasilyte/regex/syntax/ast.go +++ b/vendor/github.com/quasilyte/regex/syntax/ast.go @@ -1,7 +1,6 @@ package syntax import ( - "fmt" "strings" ) @@ -63,85 +62,3 @@ func (e Expr) LastArg() Expr { type Operation byte type Form byte - -func FormatSyntax(re *Regexp) string { - return formatExprSyntax(re, re.Expr) -} - -func formatExprSyntax(re *Regexp, e Expr) string { - switch e.Op { - case OpChar, OpLiteral: - switch e.Value { - case "{": - return "'{'" - case "}": - return "'}'" - default: - return e.Value - } - case OpString, OpEscapeChar, OpEscapeMeta, OpEscapeOctal, OpEscapeUni, OpEscapeHex, OpPosixClass: - return e.Value - case OpRepeat: - return fmt.Sprintf("(repeat %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) - case OpCaret: - return "^" - case OpDollar: - return "$" - case OpDot: - return "." - case OpQuote: - return fmt.Sprintf("(q %s)", e.Value) - case OpCharRange: - return fmt.Sprintf("%s-%s", formatExprSyntax(re, e.Args[0]), formatExprSyntax(re, e.Args[1])) - case OpCharClass: - return fmt.Sprintf("[%s]", formatArgsSyntax(re, e.Args)) - case OpNegCharClass: - return fmt.Sprintf("[^%s]", formatArgsSyntax(re, e.Args)) - case OpConcat: - return fmt.Sprintf("{%s}", formatArgsSyntax(re, e.Args)) - case OpAlt: - return fmt.Sprintf("(or %s)", formatArgsSyntax(re, e.Args)) - case OpCapture: - return fmt.Sprintf("(capture %s)", formatExprSyntax(re, e.Args[0])) - case OpNamedCapture: - return fmt.Sprintf("(capture %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) - case OpGroup: - return fmt.Sprintf("(group %s)", formatExprSyntax(re, e.Args[0])) - case OpAtomicGroup: - return fmt.Sprintf("(atomic %s)", formatExprSyntax(re, e.Args[0])) - case OpGroupWithFlags: - return fmt.Sprintf("(group %s ?%s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) - case OpFlagOnlyGroup: - return fmt.Sprintf("(flags ?%s)", formatExprSyntax(re, e.Args[0])) - case OpPositiveLookahead: - return fmt.Sprintf("(?= %s)", formatExprSyntax(re, e.Args[0])) - case OpNegativeLookahead: - return fmt.Sprintf("(?! %s)", formatExprSyntax(re, e.Args[0])) - case OpPositiveLookbehind: - return fmt.Sprintf("(?<= %s)", formatExprSyntax(re, e.Args[0])) - case OpNegativeLookbehind: - return fmt.Sprintf("(?", e.Op) - } -} - -func formatArgsSyntax(re *Regexp, args []Expr) string { - parts := make([]string, len(args)) - for i, e := range args { - parts[i] = formatExprSyntax(re, e) - } - return strings.Join(parts, " ") -} diff --git a/vendor/github.com/quasilyte/regex/syntax/errors.go b/vendor/github.com/quasilyte/regex/syntax/errors.go index cfafc1d0e8..beefba5f9c 100644 --- a/vendor/github.com/quasilyte/regex/syntax/errors.go +++ b/vendor/github.com/quasilyte/regex/syntax/errors.go @@ -1,9 +1,5 @@ package syntax -import ( - "fmt" -) - type ParseError struct { Pos Position Message string @@ -11,17 +7,21 @@ type ParseError struct { func (e ParseError) Error() string { return e.Message } -func throwfPos(pos Position, format string, args ...interface{}) { - panic(ParseError{ - Pos: pos, - Message: fmt.Sprintf(format, args...), - }) +func throw(pos Position, message string) { + panic(ParseError{Pos: pos, Message: message}) +} + +func throwExpectedFound(pos Position, expected, found string) { + throw(pos, "expected '"+expected+"', found '"+found+"'") +} + +func throwUnexpectedToken(pos Position, token string) { + throw(pos, "unexpected token: "+token) } -func throwErrorf(posBegin, posEnd int, format string, args ...interface{}) { - pos := Position{ - Begin: uint16(posBegin), - End: uint16(posEnd), +func newPos(begin, end int) Position { + return Position{ + Begin: uint16(begin), + End: uint16(end), } - throwfPos(pos, format, args...) } diff --git a/vendor/github.com/quasilyte/regex/syntax/lexer.go b/vendor/github.com/quasilyte/regex/syntax/lexer.go index e92b038c20..aae146c2e6 100644 --- a/vendor/github.com/quasilyte/regex/syntax/lexer.go +++ b/vendor/github.com/quasilyte/regex/syntax/lexer.go @@ -2,7 +2,6 @@ package syntax import ( "strings" - "unicode" "unicode/utf8" ) @@ -111,7 +110,7 @@ func (l *lexer) Peek() token { func (l *lexer) scan() { for l.pos < len(l.input) { ch := l.input[l.pos] - if ch > unicode.MaxASCII { + if ch >= utf8.RuneSelf { _, size := utf8.DecodeRuneInString(l.input[l.pos:]) l.pushTok(tokChar, size) l.maybeInsertConcat() @@ -161,7 +160,7 @@ func (l *lexer) scan() { } else if l.tryScanGroupName(l.pos + 2) { } else if l.tryScanGroupFlags(l.pos + 2) { } else { - throwErrorf(l.pos, l.pos+1, "group token is incomplete") + throw(newPos(l.pos, l.pos+1), "group token is incomplete") } } } else { @@ -190,7 +189,7 @@ func (l *lexer) scanCharClass() { for l.pos < len(l.input) { ch := l.input[l.pos] - if ch > unicode.MaxASCII { + if ch >= utf8.RuneSelf { _, size := utf8.DecodeRuneInString(l.input[l.pos:]) l.pushTok(tokChar, size) continue @@ -224,17 +223,17 @@ func (l *lexer) scanCharClass() { func (l *lexer) scanEscape(insideCharClass bool) { s := l.input if l.pos+1 >= len(s) { - throwErrorf(l.pos, l.pos+1, `unexpected end of pattern: trailing '\'`) + throw(newPos(l.pos, l.pos+1), `unexpected end of pattern: trailing '\'`) } switch { case s[l.pos+1] == 'p' || s[l.pos+1] == 'P': if l.pos+2 >= len(s) { - throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected uni-class-short or '{'") + throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected uni-class-short or '{'") } if s[l.pos+2] == '{' { j := strings.IndexByte(s[l.pos+2:], '}') if j < 0 { - throwErrorf(l.pos, l.pos+2, "can't find closing '}'") + throw(newPos(l.pos, l.pos+2), "can't find closing '}'") } l.pushTok(tokEscapeUniFull, len(`\p{`)+j) } else { @@ -242,12 +241,12 @@ func (l *lexer) scanEscape(insideCharClass bool) { } case s[l.pos+1] == 'x': if l.pos+2 >= len(s) { - throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected hex-digit or '{'") + throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected hex-digit or '{'") } if s[l.pos+2] == '{' { j := strings.IndexByte(s[l.pos+2:], '}') if j < 0 { - throwErrorf(l.pos, l.pos+2, "can't find closing '}'") + throw(newPos(l.pos, l.pos+2), "can't find closing '}'") } l.pushTok(tokEscapeHexFull, len(`\x{`)+j) } else { @@ -277,7 +276,7 @@ func (l *lexer) scanEscape(insideCharClass bool) { default: ch := l.byteAt(l.pos + 1) - if ch > unicode.MaxASCII { + if ch >= utf8.RuneSelf { _, size := utf8.DecodeRuneInString(l.input[l.pos+1:]) l.pushTok(tokEscapeChar, len(`\`)+size) return diff --git a/vendor/github.com/quasilyte/regex/syntax/operation.go b/vendor/github.com/quasilyte/regex/syntax/operation.go index 284e5dc5b4..0fc8fc521a 100644 --- a/vendor/github.com/quasilyte/regex/syntax/operation.go +++ b/vendor/github.com/quasilyte/regex/syntax/operation.go @@ -65,46 +65,51 @@ const ( // OpQuote is a \Q...\E enclosed literal. // Examples: `\Q.?\E` `\Q?q[]=1` - // - // Note that closing \E is not mandatory. + // FormQuoteUnclosed: `\Qabc` + // Args[0] - literal value (OpString) OpQuote // OpEscapeChar is a single char escape. // Examples: `\d` `\a` `\n` + // Args[0] - escaped value (OpString) OpEscapeChar // OpEscapeMeta is an escaped meta char. // Examples: `\(` `\[` `\+` + // Args[0] - escaped value (OpString) OpEscapeMeta // OpEscapeOctal is an octal char code escape (up to 3 digits). // Examples: `\123` `\12` + // Args[0] - escaped value (OpString) OpEscapeOctal // OpEscapeHex is a hex char code escape. // Examples: `\x7F` `\xF7` // FormEscapeHexFull examples: `\x{10FFFF}` `\x{F}`. + // Args[0] - escaped value (OpString) OpEscapeHex // OpEscapeUni is a Unicode char class escape. // Examples: `\pS` `\pL` `\PL` // FormEscapeUniFull examples: `\p{Greek}` `\p{Symbol}` `\p{^L}` + // Args[0] - escaped value (OpString) OpEscapeUni // OpCharClass is a char class enclosed in []. // Examples: `[abc]` `[a-z0-9\]]` - // Args - char class elements (can include OpCharRange and OpPosixClass). + // Args - char class elements (can include OpCharRange and OpPosixClass) OpCharClass // OpNegCharClass is a negated char class enclosed in []. // Examples: `[^abc]` `[^a-z0-9\]]` - // Args - char class elements (can include OpCharRange and OpPosixClass). + // Args - char class elements (can include OpCharRange and OpPosixClass) OpNegCharClass // OpCharRange is an inclusive char range inside a char class. // Examples: `0-9` `A-Z` - // Args[0] - range lower bound (OpChar or OpEscape). - // Args[1] - range upper bound (OpChar or OpEscape). + // Args[0] - range lower bound + // Args[1] - range upper bound OpCharRange // OpPosixClass is a named ASCII char set inside a char class. @@ -186,4 +191,5 @@ const ( FormEscapeUniFull FormNamedCaptureAngle FormNamedCaptureQuote + FormQuoteUnclosed ) diff --git a/vendor/github.com/quasilyte/regex/syntax/parser.go b/vendor/github.com/quasilyte/regex/syntax/parser.go index faf0f8b212..f1c154f315 100644 --- a/vendor/github.com/quasilyte/regex/syntax/parser.go +++ b/vendor/github.com/quasilyte/regex/syntax/parser.go @@ -2,7 +2,6 @@ package syntax import ( "errors" - "fmt" "strings" ) @@ -94,13 +93,39 @@ func newParser(opts *ParserOptions) *Parser { } } + p.prefixParselets[tokQ] = func(tok token) *Expr { + litPos := tok.pos + litPos.Begin += uint16(len(`\Q`)) + form := FormQuoteUnclosed + if strings.HasSuffix(p.tokenValue(tok), `\E`) { + litPos.End -= uint16(len(`\E`)) + form = FormDefault + } + lit := p.newExpr(OpString, litPos) + return p.newExprForm(OpQuote, form, tok.pos, lit) + } + p.prefixParselets[tokEscapeHexFull] = func(tok token) *Expr { - return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos) + litPos := tok.pos + litPos.Begin += uint16(len(`\x{`)) + litPos.End -= uint16(len(`}`)) + lit := p.newExpr(OpString, litPos) + return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos, lit) } p.prefixParselets[tokEscapeUniFull] = func(tok token) *Expr { - return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos) + litPos := tok.pos + litPos.Begin += uint16(len(`\p{`)) + litPos.End -= uint16(len(`}`)) + lit := p.newExpr(OpString, litPos) + return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos, lit) } + p.prefixParselets[tokEscapeHex] = func(tok token) *Expr { return p.parseEscape(OpEscapeHex, `\x`, tok) } + p.prefixParselets[tokEscapeOctal] = func(tok token) *Expr { return p.parseEscape(OpEscapeOctal, `\`, tok) } + p.prefixParselets[tokEscapeChar] = func(tok token) *Expr { return p.parseEscape(OpEscapeChar, `\`, tok) } + p.prefixParselets[tokEscapeMeta] = func(tok token) *Expr { return p.parseEscape(OpEscapeMeta, `\`, tok) } + p.prefixParselets[tokEscapeUni] = func(tok token) *Expr { return p.parseEscape(OpEscapeUni, `\p`, tok) } + p.prefixParselets[tokLparen] = func(tok token) *Expr { return p.parseGroup(OpCapture, tok) } p.prefixParselets[tokLparenAtomic] = func(tok token) *Expr { return p.parseGroup(OpAtomicGroup, tok) } p.prefixParselets[tokLparenPositiveLookahead] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookahead, tok) } @@ -163,6 +188,10 @@ func (p *Parser) setValues(e *Expr) { e.Value = p.exprValue(e) } +func (p *Parser) tokenValue(tok token) string { + return p.out.Pattern[tok.pos.Begin:tok.pos.End] +} + func (p *Parser) exprValue(e *Expr) string { return p.out.Pattern[e.Begin():e.End()] } @@ -239,7 +268,7 @@ func (p *Parser) allocExpr() *Expr { func (p *Parser) expect(kind tokenKind) Position { tok := p.lexer.NextToken() if tok.kind != kind { - throwErrorf(int(tok.pos.Begin), int(tok.pos.End), "expected '%s', found '%s'", kind, tok.kind) + throwExpectedFound(tok.pos, kind.String(), tok.kind.String()) } return tok.pos } @@ -248,7 +277,7 @@ func (p *Parser) parseExpr(precedence int) *Expr { tok := p.lexer.NextToken() prefix := p.prefixParselets[tok.kind] if prefix == nil { - throwfPos(tok.pos, "unexpected token: %v", tok) + throwUnexpectedToken(tok.pos, tok.String()) } left := prefix(tok) @@ -277,7 +306,7 @@ func (p *Parser) parseCharClass(op Operation, tok token) *Expr { break } if next.kind == tokNone { - throwfPos(tok.pos, "unterminated '['") + throw(tok.pos, "unterminated '['") } } @@ -400,6 +429,13 @@ func (p *Parser) parseGroupWithFlags(tok token) *Expr { return result } +func (p *Parser) parseEscape(op Operation, prefix string, tok token) *Expr { + litPos := tok.pos + litPos.Begin += uint16(len(prefix)) + lit := p.newExpr(OpString, litPos) + return p.newExpr(op, tok.pos, lit) +} + func (p *Parser) precedenceOf(tok token) int { switch tok.kind { case tokPipe: @@ -436,36 +472,32 @@ func (p *Parser) newPCRE(source string) (*RegexpPCRE, error) { return nil, errors.New("whitespace is not a valid delimiter") } if isAlphanumeric(delim) { - return nil, fmt.Errorf("'%c' is not a valid delimiter", delim) + return nil, errors.New("'" + string(delim) + "' is not a valid delimiter") } } - j := strings.LastIndexByte(source, endDelim) + const delimLen = 1 + j := strings.LastIndexByte(source[delimLen:], endDelim) if j == -1 { - return nil, fmt.Errorf("can't find '%c' ending delimiter", endDelim) + return nil, errors.New("can't find '" + string(endDelim) + "' ending delimiter") } + j += delimLen pcre := &RegexpPCRE{ - Pattern: source[1:j], + Pattern: source[delimLen:j], Source: source, Delim: [2]byte{delim, endDelim}, - Modifiers: source[j+1:], + Modifiers: source[j+delimLen:], } return pcre, nil } var tok2op = [256]Operation{ - tokDollar: OpDollar, - tokCaret: OpCaret, - tokDot: OpDot, - tokChar: OpChar, - tokMinus: OpChar, - tokEscapeChar: OpEscapeChar, - tokEscapeMeta: OpEscapeMeta, - tokEscapeHex: OpEscapeHex, - tokEscapeOctal: OpEscapeOctal, - tokEscapeUni: OpEscapeUni, - tokPosixClass: OpPosixClass, - tokQ: OpQuote, - tokComment: OpComment, + tokDollar: OpDollar, + tokCaret: OpCaret, + tokDot: OpDot, + tokChar: OpChar, + tokMinus: OpChar, + tokPosixClass: OpPosixClass, + tokComment: OpComment, } diff --git a/vendor/github.com/quasilyte/regex/syntax/utils.go b/vendor/github.com/quasilyte/regex/syntax/utils.go index 934680c8ba..e5b6548254 100644 --- a/vendor/github.com/quasilyte/regex/syntax/utils.go +++ b/vendor/github.com/quasilyte/regex/syntax/utils.go @@ -2,7 +2,7 @@ package syntax func isSpace(ch byte) bool { switch ch { - case '\r', '\n', '\t', '\f', '\v': + case '\r', '\n', '\t', '\f', '\v', ' ': return true default: return false diff --git a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml index 0fbf6c04ac..a0e6fd55ea 100644 --- a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml +++ b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml @@ -60,7 +60,6 @@ linters: enable: - asciicheck - bodyclose - - deadcode - dogsled - dupl - durationcheck @@ -100,7 +99,6 @@ linters: - rowserrcheck - sqlclosecheck - staticcheck - - structcheck - stylecheck - testpackage - thelper @@ -109,6 +107,5 @@ linters: - unconvert - unparam - unused - - varcheck - whitespace - wsl diff --git a/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml index 3daecfd798..f3675a9c27 100644 --- a/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml +++ b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml @@ -3,12 +3,12 @@ builds: env: - CGO_ENABLED=0 archives: -- replacements: - darwin: Darwin - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 +- name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} checksum: name_template: 'checksums.txt' dockers: @@ -21,7 +21,6 @@ dockers: dockerfile: Dockerfile.goreleaser build_flag_templates: - "--pull" - - "--build-arg=gomodguard_VERSION={{.Version}}" - "--label=org.opencontainers.image.created={{.Date}}" - "--label=org.opencontainers.image.name={{.ProjectName}}" - "--label=org.opencontainers.image.revision={{.FullCommit}}" diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile b/vendor/github.com/ryancurrah/gomodguard/Dockerfile index 719a0ebdb6..2f1d3340c1 100644 --- a/vendor/github.com/ryancurrah/gomodguard/Dockerfile +++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile @@ -1,16 +1,12 @@ -ARG GO_VERSION=1.14.2 -ARG ALPINE_VERSION=3.11 -ARG gomodguard_VERSION= - # ---- Build container -FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS builder +FROM golang:alpine AS builder WORKDIR /gomodguard COPY . . RUN apk add --no-cache git RUN go build -o gomodguard cmd/gomodguard/main.go # ---- App container -FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} +FROM golang:alpine WORKDIR / RUN apk --no-cache add ca-certificates COPY --from=builder gomodguard/gomodguard / diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser index 57a042a67c..ccaaa8959f 100644 --- a/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser +++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser @@ -1,9 +1,5 @@ -ARG GO_VERSION=1.14.2 -ARG ALPINE_VERSION=3.11 -ARG gomodguard_VERSION= - # ---- App container -FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} +FROM golang:alpine WORKDIR / RUN apk --no-cache add ca-certificates COPY gomodguard /gomodguard diff --git a/vendor/github.com/ryancurrah/gomodguard/Makefile b/vendor/github.com/ryancurrah/gomodguard/Makefile index 766675799d..5235d5aade 100644 --- a/vendor/github.com/ryancurrah/gomodguard/Makefile +++ b/vendor/github.com/ryancurrah/gomodguard/Makefile @@ -24,6 +24,10 @@ cover: dockerrun: dockerbuild docker run -v "${current_dir}/.gomodguard.yaml:/.gomodguard.yaml" ryancurrah/gomodguard:latest +.PHONY: snapshot +snapshot: + goreleaser --rm-dist --snapshot + .PHONY: release release: goreleaser --rm-dist @@ -39,4 +43,4 @@ install-tools-mac: .PHONY: install-go-tools install-go-tools: - go get github.com/t-yuki/gocover-cobertura + go install -v github.com/t-yuki/gocover-cobertura diff --git a/vendor/github.com/ryancurrah/gomodguard/README.md b/vendor/github.com/ryancurrah/gomodguard/README.md index 8e2e416888..4945f01012 100644 --- a/vendor/github.com/ryancurrah/gomodguard/README.md +++ b/vendor/github.com/ryancurrah/gomodguard/README.md @@ -115,7 +115,7 @@ Resulting checkstyle file ## Install ``` -go get -u github.com/ryancurrah/gomodguard/cmd/gomodguard +go install github.com/ryancurrah/gomodguard/cmd/gomodguard ``` ## Develop diff --git a/vendor/github.com/ryancurrah/gomodguard/allowed.go b/vendor/github.com/ryancurrah/gomodguard/allowed.go new file mode 100644 index 0000000000..5b0d26f835 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/allowed.go @@ -0,0 +1,39 @@ +package gomodguard + +import "strings" + +// Allowed is a list of modules and module +// domains that are allowed to be used. +type Allowed struct { + Modules []string `yaml:"modules"` + Domains []string `yaml:"domains"` +} + +// IsAllowedModule returns true if the given module +// name is in the allowed modules list. +func (a *Allowed) IsAllowedModule(moduleName string) bool { + allowedModules := a.Modules + + for i := range allowedModules { + if strings.TrimSpace(moduleName) == strings.TrimSpace(allowedModules[i]) { + return true + } + } + + return false +} + +// IsAllowedModuleDomain returns true if the given modules domain is +// in the allowed module domains list. +func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool { + allowedDomains := a.Domains + + for i := range allowedDomains { + if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), + strings.TrimSpace(strings.ToLower(allowedDomains[i]))) { + return true + } + } + + return false +} diff --git a/vendor/github.com/ryancurrah/gomodguard/blocked.go b/vendor/github.com/ryancurrah/gomodguard/blocked.go new file mode 100644 index 0000000000..2a6e5c2159 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/blocked.go @@ -0,0 +1,189 @@ +package gomodguard + +import ( + "fmt" + "strings" + + "github.com/Masterminds/semver" +) + +// Blocked is a list of modules that are +// blocked and not to be used. +type Blocked struct { + Modules BlockedModules `yaml:"modules"` + Versions BlockedVersions `yaml:"versions"` + LocalReplaceDirectives bool `yaml:"local_replace_directives"` +} + +// BlockedVersion has a version constraint a reason why the the module version is blocked. +type BlockedVersion struct { + Version string `yaml:"version"` + Reason string `yaml:"reason"` +} + +// IsLintedModuleVersionBlocked returns true if a version constraint is specified and the +// linted module version matches the constraint. +func (r *BlockedVersion) IsLintedModuleVersionBlocked(lintedModuleVersion string) bool { + if r.Version == "" { + return false + } + + constraint, err := semver.NewConstraint(r.Version) + if err != nil { + return false + } + + version, err := semver.NewVersion(lintedModuleVersion) + if err != nil { + return false + } + + meet := constraint.Check(version) + + return meet +} + +// Message returns the reason why the module version is blocked. +func (r *BlockedVersion) Message(lintedModuleVersion string) string { + var sb strings.Builder + + // Add version contraint to message. + _, _ = fmt.Fprintf(&sb, "version `%s` is blocked because it does not meet the version constraint `%s`.", + lintedModuleVersion, r.Version) + + if r.Reason == "" { + return sb.String() + } + + // Add reason to message. + _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) + + return sb.String() +} + +// BlockedModule has alternative modules to use and a reason why the module is blocked. +type BlockedModule struct { + Recommendations []string `yaml:"recommendations"` + Reason string `yaml:"reason"` +} + +// IsCurrentModuleARecommendation returns true if the current module is in the Recommendations list. +// +// If the current go.mod file being linted is a recommended module of a +// blocked module and it imports that blocked module, do not set as blocked. +// This could mean that the linted module is a wrapper for that blocked module. +func (r *BlockedModule) IsCurrentModuleARecommendation(currentModuleName string) bool { + if r == nil { + return false + } + + for n := range r.Recommendations { + if strings.TrimSpace(currentModuleName) == strings.TrimSpace(r.Recommendations[n]) { + return true + } + } + + return false +} + +// Message returns the reason why the module is blocked and a list of recommended modules if provided. +func (r *BlockedModule) Message() string { + var sb strings.Builder + + // Add recommendations to message + for i := range r.Recommendations { + switch { + case len(r.Recommendations) == 1: + _, _ = fmt.Fprintf(&sb, "`%s` is a recommended module.", r.Recommendations[i]) + case (i+1) != len(r.Recommendations) && (i+1) == (len(r.Recommendations)-1): + _, _ = fmt.Fprintf(&sb, "`%s` ", r.Recommendations[i]) + case (i + 1) != len(r.Recommendations): + _, _ = fmt.Fprintf(&sb, "`%s`, ", r.Recommendations[i]) + default: + _, _ = fmt.Fprintf(&sb, "and `%s` are recommended modules.", r.Recommendations[i]) + } + } + + if r.Reason == "" { + return sb.String() + } + + // Add reason to message + if sb.Len() == 0 { + _, _ = fmt.Fprintf(&sb, "%s.", strings.TrimRight(r.Reason, ".")) + } else { + _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) + } + + return sb.String() +} + +// HasRecommendations returns true if the blocked package has +// recommended modules. +func (r *BlockedModule) HasRecommendations() bool { + if r == nil { + return false + } + + return len(r.Recommendations) > 0 +} + +// BlockedVersions a list of blocked modules by a version constraint. +type BlockedVersions []map[string]BlockedVersion + +// Get returns the module names that are blocked. +func (b BlockedVersions) Get() []string { + modules := make([]string, len(b)) + + for n := range b { + for module := range b[n] { + modules[n] = module + break + } + } + + return modules +} + +// GetBlockReason returns a block version if one is set for the provided linted module name. +func (b BlockedVersions) GetBlockReason(lintedModuleName string) *BlockedVersion { + for _, blockedModule := range b { + for blockedModuleName, blockedVersion := range blockedModule { + if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { + return &blockedVersion + } + } + } + + return nil +} + +// BlockedModules a list of blocked modules. +type BlockedModules []map[string]BlockedModule + +// Get returns the module names that are blocked. +func (b BlockedModules) Get() []string { + modules := make([]string, len(b)) + + for n := range b { + for module := range b[n] { + modules[n] = module + break + } + } + + return modules +} + +// GetBlockReason returns a block module if one is set for the provided linted module name. +func (b BlockedModules) GetBlockReason(lintedModuleName string) *BlockedModule { + for _, blockedModule := range b { + for blockedModuleName, blockedModule := range blockedModule { + if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { + return &blockedModule + } + } + } + + return nil +} diff --git a/vendor/github.com/ryancurrah/gomodguard/cmd.go b/vendor/github.com/ryancurrah/gomodguard/cmd.go deleted file mode 100644 index a26fac8900..0000000000 --- a/vendor/github.com/ryancurrah/gomodguard/cmd.go +++ /dev/null @@ -1,247 +0,0 @@ -package gomodguard - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - - "github.com/go-xmlfmt/xmlfmt" - "github.com/mitchellh/go-homedir" - "github.com/phayes/checkstyle" - "gopkg.in/yaml.v2" -) - -const ( - errFindingHomedir = "unable to find home directory, %w" - errReadingConfigFile = "could not read config file: %w" - errParsingConfigFile = "could not parse config file: %w" -) - -var ( - configFile = ".gomodguard.yaml" - logger = log.New(os.Stderr, "", 0) - errFindingConfigFile = fmt.Errorf("could not find config file") -) - -// Run the gomodguard linter. Returns the exit code to use. -//nolint:funlen -func Run() int { - var ( - args []string - help bool - noTest bool - report string - reportFile string - issuesExitCode int - cwd, _ = os.Getwd() - ) - - flag.BoolVar(&help, "h", false, "Show this help text") - flag.BoolVar(&help, "help", false, "") - flag.BoolVar(&noTest, "n", false, "Don't lint test files") - flag.BoolVar(&noTest, "no-test", false, "") - flag.StringVar(&report, "r", "", "Report results to one of the following formats: checkstyle. "+ - "A report file destination must also be specified") - flag.StringVar(&report, "report", "", "") - flag.StringVar(&reportFile, "f", "", "Report results to the specified file. A report type must also be specified") - flag.StringVar(&reportFile, "file", "", "") - flag.IntVar(&issuesExitCode, "i", 2, "Exit code when issues were found") - flag.IntVar(&issuesExitCode, "issues-exit-code", 2, "") - flag.Parse() - - report = strings.TrimSpace(strings.ToLower(report)) - - if help { - showHelp() - return 0 - } - - if report != "" && report != "checkstyle" { - logger.Fatalf("error: invalid report type '%s'", report) - } - - if report != "" && reportFile == "" { - logger.Fatalf("error: a report file must be specified when a report is enabled") - } - - if report == "" && reportFile != "" { - logger.Fatalf("error: a report type must be specified when a report file is enabled") - } - - args = flag.Args() - if len(args) == 0 { - args = []string{"./..."} - } - - config, err := GetConfig(configFile) - if err != nil { - logger.Fatalf("error: %s", err) - } - - filteredFiles := GetFilteredFiles(cwd, noTest, args) - - processor, err := NewProcessor(config) - if err != nil { - logger.Fatalf("error: %s", err) - } - - logger.Printf("info: allowed modules, %+v", config.Allowed.Modules) - logger.Printf("info: allowed module domains, %+v", config.Allowed.Domains) - logger.Printf("info: blocked modules, %+v", config.Blocked.Modules.Get()) - logger.Printf("info: blocked modules with version constraints, %+v", config.Blocked.Versions.Get()) - - results := processor.ProcessFiles(filteredFiles) - - if report == "checkstyle" { - err := WriteCheckstyle(reportFile, results) - if err != nil { - logger.Fatalf("error: %s", err) - } - } - - for _, r := range results { - fmt.Println(r.String()) - } - - if len(results) > 0 { - return issuesExitCode - } - - return 0 -} - -// GetConfig from YAML file. -func GetConfig(configFile string) (*Configuration, error) { - config := Configuration{} - - home, err := homedir.Dir() - if err != nil { - return nil, fmt.Errorf(errFindingHomedir, err) - } - - cfgFile := "" - homeDirCfgFile := filepath.Join(home, configFile) - - switch { - case fileExists(configFile): - cfgFile = configFile - case fileExists(homeDirCfgFile): - cfgFile = homeDirCfgFile - default: - return nil, fmt.Errorf("%w: %s %s", errFindingConfigFile, configFile, homeDirCfgFile) - } - - data, err := ioutil.ReadFile(cfgFile) - if err != nil { - return nil, fmt.Errorf(errReadingConfigFile, err) - } - - err = yaml.Unmarshal(data, &config) - if err != nil { - return nil, fmt.Errorf(errParsingConfigFile, err) - } - - return &config, nil -} - -// GetFilteredFiles returns files based on search string arguments and filters. -func GetFilteredFiles(cwd string, skipTests bool, args []string) []string { - var ( - foundFiles = []string{} - filteredFiles = []string{} - ) - - for _, f := range args { - if strings.HasSuffix(f, "/...") { - dir, _ := filepath.Split(f) - - foundFiles = append(foundFiles, expandGoWildcard(dir)...) - - continue - } - - if _, err := os.Stat(f); err == nil { - foundFiles = append(foundFiles, f) - } - } - - // Use relative path to print shorter names, sort out test foundFiles if chosen. - for _, f := range foundFiles { - if skipTests { - if strings.HasSuffix(f, "_test.go") { - continue - } - } - - if relativePath, err := filepath.Rel(cwd, f); err == nil { - filteredFiles = append(filteredFiles, relativePath) - - continue - } - - filteredFiles = append(filteredFiles, f) - } - - return filteredFiles -} - -// showHelp text for command line. -func showHelp() { - helpText := `Usage: gomodguard [files...] -Also supports package syntax but will use it in relative path, i.e. ./pkg/... -Flags:` - fmt.Println(helpText) - flag.PrintDefaults() -} - -// WriteCheckstyle takes the results and writes them to a checkstyle formated file. -func WriteCheckstyle(checkstyleFilePath string, results []Issue) error { - check := checkstyle.New() - - for i := range results { - file := check.EnsureFile(results[i].FileName) - file.AddError(checkstyle.NewError(results[i].LineNumber, 1, checkstyle.SeverityError, results[i].Reason, - "gomodguard")) - } - - checkstyleXML := fmt.Sprintf("\n%s", check.String()) - - err := ioutil.WriteFile(checkstyleFilePath, []byte(xmlfmt.FormatXML(checkstyleXML, "", " ")), 0644) // nolint:gosec - if err != nil { - return err - } - - return nil -} - -// fileExists returns true if the file path provided exists. -func fileExists(filename string) bool { - info, err := os.Stat(filename) - if os.IsNotExist(err) { - return false - } - - return !info.IsDir() -} - -// expandGoWildcard path provided. -func expandGoWildcard(root string) []string { - foundFiles := []string{} - - _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - // Only append go foundFiles. - if !strings.HasSuffix(info.Name(), ".go") { - return nil - } - - foundFiles = append(foundFiles, path) - - return nil - }) - - return foundFiles -} diff --git a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go deleted file mode 100644 index efd0d17ef1..0000000000 --- a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go +++ /dev/null @@ -1,499 +0,0 @@ -package gomodguard - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "go/parser" - "go/token" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strings" - - "github.com/Masterminds/semver" - - "golang.org/x/mod/modfile" -) - -const ( - goModFilename = "go.mod" - errReadingGoModFile = "unable to read module file %s: %w" - errParsingGoModFile = "unable to parse module file %s: %w" -) - -var ( - blockReasonNotInAllowedList = "import of package `%s` is blocked because the module is not in the " + - "allowed modules list." - blockReasonInBlockedList = "import of package `%s` is blocked because the module is in the " + - "blocked modules list." - blockReasonHasLocalReplaceDirective = "import of package `%s` is blocked because the module has a " + - "local replace directive." - - // startsWithVersion is used to test when a string begins with the version identifier of a module, - // after having stripped the prefix base module name. IE "github.com/foo/bar/v2/baz" => "/v2/baz" - // probably indicates that the module is actually github.com/foo/bar/v2, not github.com/foo/bar. - startsWithVersion = regexp.MustCompile(`^\/v[0-9]+`) -) - -// BlockedVersion has a version constraint a reason why the the module version is blocked. -type BlockedVersion struct { - Version string `yaml:"version"` - Reason string `yaml:"reason"` -} - -// IsLintedModuleVersionBlocked returns true if a version constraint is specified and the -// linted module version matches the constraint. -func (r *BlockedVersion) IsLintedModuleVersionBlocked(lintedModuleVersion string) bool { - if r.Version == "" { - return false - } - - constraint, err := semver.NewConstraint(r.Version) - if err != nil { - return false - } - - version, err := semver.NewVersion(lintedModuleVersion) - if err != nil { - return false - } - - meet := constraint.Check(version) - - return meet -} - -// Message returns the reason why the module version is blocked. -func (r *BlockedVersion) Message(lintedModuleVersion string) string { - var sb strings.Builder - - // Add version contraint to message. - _, _ = fmt.Fprintf(&sb, "version `%s` is blocked because it does not meet the version constraint `%s`.", - lintedModuleVersion, r.Version) - - if r.Reason == "" { - return sb.String() - } - - // Add reason to message. - _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) - - return sb.String() -} - -// BlockedModule has alternative modules to use and a reason why the module is blocked. -type BlockedModule struct { - Recommendations []string `yaml:"recommendations"` - Reason string `yaml:"reason"` -} - -// IsCurrentModuleARecommendation returns true if the current module is in the Recommendations list. -// -// If the current go.mod file being linted is a recommended module of a -// blocked module and it imports that blocked module, do not set as blocked. -// This could mean that the linted module is a wrapper for that blocked module. -func (r *BlockedModule) IsCurrentModuleARecommendation(currentModuleName string) bool { - if r == nil { - return false - } - - for n := range r.Recommendations { - if strings.TrimSpace(currentModuleName) == strings.TrimSpace(r.Recommendations[n]) { - return true - } - } - - return false -} - -// Message returns the reason why the module is blocked and a list of recommended modules if provided. -func (r *BlockedModule) Message() string { - var sb strings.Builder - - // Add recommendations to message - for i := range r.Recommendations { - switch { - case len(r.Recommendations) == 1: - _, _ = fmt.Fprintf(&sb, "`%s` is a recommended module.", r.Recommendations[i]) - case (i+1) != len(r.Recommendations) && (i+1) == (len(r.Recommendations)-1): - _, _ = fmt.Fprintf(&sb, "`%s` ", r.Recommendations[i]) - case (i + 1) != len(r.Recommendations): - _, _ = fmt.Fprintf(&sb, "`%s`, ", r.Recommendations[i]) - default: - _, _ = fmt.Fprintf(&sb, "and `%s` are recommended modules.", r.Recommendations[i]) - } - } - - if r.Reason == "" { - return sb.String() - } - - // Add reason to message - if sb.Len() == 0 { - _, _ = fmt.Fprintf(&sb, "%s.", strings.TrimRight(r.Reason, ".")) - } else { - _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) - } - - return sb.String() -} - -// HasRecommendations returns true if the blocked package has -// recommended modules. -func (r *BlockedModule) HasRecommendations() bool { - if r == nil { - return false - } - - return len(r.Recommendations) > 0 -} - -// BlockedVersions a list of blocked modules by a version constraint. -type BlockedVersions []map[string]BlockedVersion - -// Get returns the module names that are blocked. -func (b BlockedVersions) Get() []string { - modules := make([]string, len(b)) - - for n := range b { - for module := range b[n] { - modules[n] = module - break - } - } - - return modules -} - -// GetBlockReason returns a block version if one is set for the provided linted module name. -func (b BlockedVersions) GetBlockReason(lintedModuleName string) *BlockedVersion { - for _, blockedModule := range b { - for blockedModuleName, blockedVersion := range blockedModule { - if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { - return &blockedVersion - } - } - } - - return nil -} - -// BlockedModules a list of blocked modules. -type BlockedModules []map[string]BlockedModule - -// Get returns the module names that are blocked. -func (b BlockedModules) Get() []string { - modules := make([]string, len(b)) - - for n := range b { - for module := range b[n] { - modules[n] = module - break - } - } - - return modules -} - -// GetBlockReason returns a block module if one is set for the provided linted module name. -func (b BlockedModules) GetBlockReason(lintedModuleName string) *BlockedModule { - for _, blockedModule := range b { - for blockedModuleName, blockedModule := range blockedModule { - if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { - return &blockedModule - } - } - } - - return nil -} - -// Allowed is a list of modules and module -// domains that are allowed to be used. -type Allowed struct { - Modules []string `yaml:"modules"` - Domains []string `yaml:"domains"` -} - -// IsAllowedModule returns true if the given module -// name is in the allowed modules list. -func (a *Allowed) IsAllowedModule(moduleName string) bool { - allowedModules := a.Modules - - for i := range allowedModules { - if strings.TrimSpace(moduleName) == strings.TrimSpace(allowedModules[i]) { - return true - } - } - - return false -} - -// IsAllowedModuleDomain returns true if the given modules domain is -// in the allowed module domains list. -func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool { - allowedDomains := a.Domains - - for i := range allowedDomains { - if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), - strings.TrimSpace(strings.ToLower(allowedDomains[i]))) { - return true - } - } - - return false -} - -// Blocked is a list of modules that are -// blocked and not to be used. -type Blocked struct { - Modules BlockedModules `yaml:"modules"` - Versions BlockedVersions `yaml:"versions"` - LocalReplaceDirectives bool `yaml:"local_replace_directives"` -} - -// Configuration of gomodguard allow and block lists. -type Configuration struct { - Allowed Allowed `yaml:"allowed"` - Blocked Blocked `yaml:"blocked"` -} - -// Issue represents the result of one error. -type Issue struct { - FileName string - LineNumber int - Position token.Position - Reason string -} - -// String returns the filename, line -// number and reason of a Issue. -func (r *Issue) String() string { - return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason) -} - -// Processor processes Go files. -type Processor struct { - Config *Configuration - Modfile *modfile.File - blockedModulesFromModFile map[string][]string -} - -// NewProcessor will create a Processor to lint blocked packages. -func NewProcessor(config *Configuration) (*Processor, error) { - goModFileBytes, err := loadGoModFile() - if err != nil { - return nil, fmt.Errorf(errReadingGoModFile, goModFilename, err) - } - - modFile, err := modfile.Parse(goModFilename, goModFileBytes, nil) - if err != nil { - return nil, fmt.Errorf(errParsingGoModFile, goModFilename, err) - } - - p := &Processor{ - Config: config, - Modfile: modFile, - } - - p.SetBlockedModules() - - return p, nil -} - -// ProcessFiles takes a string slice with file names (full paths) -// and lints them. -func (p *Processor) ProcessFiles(filenames []string) (issues []Issue) { - for _, filename := range filenames { - data, err := ioutil.ReadFile(filename) - if err != nil { - issues = append(issues, Issue{ - FileName: filename, - LineNumber: 0, - Reason: fmt.Sprintf("unable to read file, file cannot be linted (%s)", err.Error()), - }) - - continue - } - - issues = append(issues, p.process(filename, data)...) - } - - return issues -} - -// process file imports and add lint error if blocked package is imported. -func (p *Processor) process(filename string, data []byte) (issues []Issue) { - fileSet := token.NewFileSet() - - file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments) - if err != nil { - issues = append(issues, Issue{ - FileName: filename, - LineNumber: 0, - Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()), - }) - - return - } - - imports := file.Imports - for n := range imports { - importedPkg := strings.TrimSpace(strings.Trim(imports[n].Path.Value, "\"")) - - blockReasons := p.isBlockedPackageFromModFile(importedPkg) - if blockReasons == nil { - continue - } - - for _, blockReason := range blockReasons { - issues = append(issues, p.addError(fileSet, imports[n].Pos(), blockReason)) - } - } - - return issues -} - -// addError adds an error for the file and line number for the current token.Pos -// with the given reason. -func (p *Processor) addError(fileset *token.FileSet, pos token.Pos, reason string) Issue { - position := fileset.Position(pos) - - return Issue{ - FileName: position.Filename, - LineNumber: position.Line, - Position: position, - Reason: reason, - } -} - -// SetBlockedModules determines and sets which modules are blocked by reading -// the go.mod file of the module that is being linted. -// -// It works by iterating over the dependant modules specified in the require -// directive, checking if the module domain or full name is in the allowed list. -func (p *Processor) SetBlockedModules() { //nolint:gocognit,funlen - blockedModules := make(map[string][]string, len(p.Modfile.Require)) - currentModuleName := p.Modfile.Module.Mod.Path - lintedModules := p.Modfile.Require - replacedModules := p.Modfile.Replace - - for i := range lintedModules { - if lintedModules[i].Indirect { - continue // Do not lint indirect modules. - } - - lintedModuleName := strings.TrimSpace(lintedModules[i].Mod.Path) - lintedModuleVersion := strings.TrimSpace(lintedModules[i].Mod.Version) - - var isAllowed bool - - switch { - case len(p.Config.Allowed.Modules) == 0 && len(p.Config.Allowed.Domains) == 0: - isAllowed = true - case p.Config.Allowed.IsAllowedModuleDomain(lintedModuleName): - isAllowed = true - case p.Config.Allowed.IsAllowedModule(lintedModuleName): - isAllowed = true - default: - isAllowed = false - } - - blockModuleReason := p.Config.Blocked.Modules.GetBlockReason(lintedModuleName) - blockVersionReason := p.Config.Blocked.Versions.GetBlockReason(lintedModuleName) - - if !isAllowed && blockModuleReason == nil && blockVersionReason == nil { - blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], blockReasonNotInAllowedList) - continue - } - - if blockModuleReason != nil && !blockModuleReason.IsCurrentModuleARecommendation(currentModuleName) { - blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], - fmt.Sprintf("%s %s", blockReasonInBlockedList, blockModuleReason.Message())) - } - - if blockVersionReason != nil && blockVersionReason.IsLintedModuleVersionBlocked(lintedModuleVersion) { - blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], - fmt.Sprintf("%s %s", blockReasonInBlockedList, blockVersionReason.Message(lintedModuleVersion))) - } - } - - // Replace directives with local paths are blocked. - // Filesystem paths found in "replace" directives are represented by a path with an empty version. - // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 - if p.Config.Blocked.LocalReplaceDirectives { - for i := range replacedModules { - replacedModuleOldName := strings.TrimSpace(replacedModules[i].Old.Path) - replacedModuleNewName := strings.TrimSpace(replacedModules[i].New.Path) - replacedModuleNewVersion := strings.TrimSpace(replacedModules[i].New.Version) - - if replacedModuleNewName != "" && replacedModuleNewVersion == "" { - blockedModules[replacedModuleOldName] = append(blockedModules[replacedModuleOldName], - blockReasonHasLocalReplaceDirective) - } - } - } - - p.blockedModulesFromModFile = blockedModules -} - -// isBlockedPackageFromModFile returns the block reason if the package is blocked. -func (p *Processor) isBlockedPackageFromModFile(packageName string) []string { - for blockedModuleName, blockReasons := range p.blockedModulesFromModFile { - if strings.HasPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) { - // Test if a versioned module matched its base version - // ie github.com/foo/bar/v2 matched github.com/foo/bar, even though the former may be allowed. - suffix := strings.TrimPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) - if startsWithVersion.MatchString(suffix) { - continue - } - - formattedReasons := make([]string, 0, len(blockReasons)) - - for _, blockReason := range blockReasons { - formattedReasons = append(formattedReasons, fmt.Sprintf(blockReason, packageName)) - } - - return formattedReasons - } - } - - return nil -} - -func loadGoModFile() ([]byte, error) { - cmd := exec.Command("go", "env", "-json") - stdout, _ := cmd.StdoutPipe() - _ = cmd.Start() - - if stdout == nil { - return ioutil.ReadFile(goModFilename) - } - - buf := new(bytes.Buffer) - _, _ = buf.ReadFrom(stdout) - - goEnv := make(map[string]string) - - err := json.Unmarshal(buf.Bytes(), &goEnv) - if err != nil { - return ioutil.ReadFile(goModFilename) - } - - if _, ok := goEnv["GOMOD"]; !ok { - return ioutil.ReadFile(goModFilename) - } - - if _, err = os.Stat(goEnv["GOMOD"]); os.IsNotExist(err) { - return ioutil.ReadFile(goModFilename) - } - - if goEnv["GOMOD"] == "/dev/null" { - return nil, errors.New("current working directory must have a go.mod file") - } - - return ioutil.ReadFile(goEnv["GOMOD"]) -} diff --git a/vendor/github.com/ryancurrah/gomodguard/issue.go b/vendor/github.com/ryancurrah/gomodguard/issue.go new file mode 100644 index 0000000000..d60fc3a868 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/issue.go @@ -0,0 +1,20 @@ +package gomodguard + +import ( + "fmt" + "go/token" +) + +// Issue represents the result of one error. +type Issue struct { + FileName string + LineNumber int + Position token.Position + Reason string +} + +// String returns the filename, line +// number and reason of a Issue. +func (r *Issue) String() string { + return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason) +} diff --git a/vendor/github.com/ryancurrah/gomodguard/processor.go b/vendor/github.com/ryancurrah/gomodguard/processor.go new file mode 100644 index 0000000000..51038f37f2 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/processor.go @@ -0,0 +1,291 @@ +package gomodguard + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "go/parser" + "go/token" + "os" + "os/exec" + "regexp" + "strings" + + "golang.org/x/mod/modfile" +) + +const ( + goModFilename = "go.mod" + errReadingGoModFile = "unable to read module file %s: %w" + errParsingGoModFile = "unable to parse module file %s: %w" +) + +var ( + blockReasonNotInAllowedList = "import of package `%s` is blocked because the module is not in the " + + "allowed modules list." + blockReasonInBlockedList = "import of package `%s` is blocked because the module is in the " + + "blocked modules list." + blockReasonHasLocalReplaceDirective = "import of package `%s` is blocked because the module has a " + + "local replace directive." + + // startsWithVersion is used to test when a string begins with the version identifier of a module, + // after having stripped the prefix base module name. IE "github.com/foo/bar/v2/baz" => "v2/baz" + // probably indicates that the module is actually github.com/foo/bar/v2, not github.com/foo/bar. + startsWithVersion = regexp.MustCompile(`^v[0-9]+`) +) + +// Configuration of gomodguard allow and block lists. +type Configuration struct { + Allowed Allowed `yaml:"allowed"` + Blocked Blocked `yaml:"blocked"` +} + +// Processor processes Go files. +type Processor struct { + Config *Configuration + Modfile *modfile.File + blockedModulesFromModFile map[string][]string +} + +// NewProcessor will create a Processor to lint blocked packages. +func NewProcessor(config *Configuration) (*Processor, error) { + goModFileBytes, err := loadGoModFile() + if err != nil { + return nil, fmt.Errorf(errReadingGoModFile, goModFilename, err) + } + + modFile, err := modfile.Parse(goModFilename, goModFileBytes, nil) + if err != nil { + return nil, fmt.Errorf(errParsingGoModFile, goModFilename, err) + } + + p := &Processor{ + Config: config, + Modfile: modFile, + } + + p.SetBlockedModules() + + return p, nil +} + +// ProcessFiles takes a string slice with file names (full paths) +// and lints them. +func (p *Processor) ProcessFiles(filenames []string) (issues []Issue) { + for _, filename := range filenames { + data, err := os.ReadFile(filename) + if err != nil { + issues = append(issues, Issue{ + FileName: filename, + LineNumber: 0, + Reason: fmt.Sprintf("unable to read file, file cannot be linted (%s)", err.Error()), + }) + + continue + } + + issues = append(issues, p.process(filename, data)...) + } + + return issues +} + +// process file imports and add lint error if blocked package is imported. +func (p *Processor) process(filename string, data []byte) (issues []Issue) { + fileSet := token.NewFileSet() + + file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments) + if err != nil { + issues = append(issues, Issue{ + FileName: filename, + LineNumber: 0, + Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()), + }) + + return + } + + imports := file.Imports + for n := range imports { + importedPkg := strings.TrimSpace(strings.Trim(imports[n].Path.Value, "\"")) + + blockReasons := p.isBlockedPackageFromModFile(importedPkg) + if blockReasons == nil { + continue + } + + for _, blockReason := range blockReasons { + issues = append(issues, p.addError(fileSet, imports[n].Pos(), blockReason)) + } + } + + return issues +} + +// addError adds an error for the file and line number for the current token.Pos +// with the given reason. +func (p *Processor) addError(fileset *token.FileSet, pos token.Pos, reason string) Issue { + position := fileset.Position(pos) + + return Issue{ + FileName: position.Filename, + LineNumber: position.Line, + Position: position, + Reason: reason, + } +} + +// SetBlockedModules determines and sets which modules are blocked by reading +// the go.mod file of the module that is being linted. +// +// It works by iterating over the dependant modules specified in the require +// directive, checking if the module domain or full name is in the allowed list. +func (p *Processor) SetBlockedModules() { //nolint:funlen + blockedModules := make(map[string][]string, len(p.Modfile.Require)) + currentModuleName := p.Modfile.Module.Mod.Path + lintedModules := p.Modfile.Require + replacedModules := p.Modfile.Replace + + for i := range lintedModules { + if lintedModules[i].Indirect { + continue // Do not lint indirect modules. + } + + lintedModuleName := strings.TrimSpace(lintedModules[i].Mod.Path) + lintedModuleVersion := strings.TrimSpace(lintedModules[i].Mod.Version) + + var isAllowed bool + + switch { + case len(p.Config.Allowed.Modules) == 0 && len(p.Config.Allowed.Domains) == 0: + isAllowed = true + case p.Config.Allowed.IsAllowedModuleDomain(lintedModuleName): + isAllowed = true + case p.Config.Allowed.IsAllowedModule(lintedModuleName): + isAllowed = true + default: + isAllowed = false + } + + blockModuleReason := p.Config.Blocked.Modules.GetBlockReason(lintedModuleName) + blockVersionReason := p.Config.Blocked.Versions.GetBlockReason(lintedModuleName) + + if !isAllowed && blockModuleReason == nil && blockVersionReason == nil { + blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], blockReasonNotInAllowedList) + continue + } + + if blockModuleReason != nil && !blockModuleReason.IsCurrentModuleARecommendation(currentModuleName) { + blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], + fmt.Sprintf("%s %s", blockReasonInBlockedList, blockModuleReason.Message())) + } + + if blockVersionReason != nil && blockVersionReason.IsLintedModuleVersionBlocked(lintedModuleVersion) { + blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], + fmt.Sprintf("%s %s", blockReasonInBlockedList, blockVersionReason.Message(lintedModuleVersion))) + } + } + + // Replace directives with local paths are blocked. + // Filesystem paths found in "replace" directives are represented by a path with an empty version. + // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 + if p.Config.Blocked.LocalReplaceDirectives { + for i := range replacedModules { + replacedModuleOldName := strings.TrimSpace(replacedModules[i].Old.Path) + replacedModuleNewName := strings.TrimSpace(replacedModules[i].New.Path) + replacedModuleNewVersion := strings.TrimSpace(replacedModules[i].New.Version) + + if replacedModuleNewName != "" && replacedModuleNewVersion == "" { + blockedModules[replacedModuleOldName] = append(blockedModules[replacedModuleOldName], + blockReasonHasLocalReplaceDirective) + } + } + } + + p.blockedModulesFromModFile = blockedModules +} + +// isBlockedPackageFromModFile returns the block reason if the package is blocked. +func (p *Processor) isBlockedPackageFromModFile(packageName string) []string { + for blockedModuleName, blockReasons := range p.blockedModulesFromModFile { + if isPackageInModule(packageName, blockedModuleName) { + formattedReasons := make([]string, 0, len(blockReasons)) + + for _, blockReason := range blockReasons { + formattedReasons = append(formattedReasons, fmt.Sprintf(blockReason, packageName)) + } + + return formattedReasons + } + } + + return nil +} + +func loadGoModFile() ([]byte, error) { + cmd := exec.Command("go", "env", "-json") + stdout, _ := cmd.StdoutPipe() + _ = cmd.Start() + + if stdout == nil { + return os.ReadFile(goModFilename) + } + + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(stdout) + + goEnv := make(map[string]string) + + err := json.Unmarshal(buf.Bytes(), &goEnv) + if err != nil { + return os.ReadFile(goModFilename) + } + + if _, ok := goEnv["GOMOD"]; !ok { + return os.ReadFile(goModFilename) + } + + if _, err = os.Stat(goEnv["GOMOD"]); os.IsNotExist(err) { + return os.ReadFile(goModFilename) + } + + if goEnv["GOMOD"] == "/dev/null" { + return nil, errors.New("current working directory must have a go.mod file") + } + + return os.ReadFile(goEnv["GOMOD"]) +} + +// isPackageInModule determines if a package is apart of the specified go module. +func isPackageInModule(pkg, mod string) bool { + // Split pkg and mod paths into parts + pkgPart := strings.Split(pkg, "/") + modPart := strings.Split(mod, "/") + + pkgPartMatches := 0 + + // Count number of times pkg path matches the mod path + for i, m := range modPart { + if len(pkgPart) > i && pkgPart[i] == m { + pkgPartMatches++ + } + } + + // If pkgPartMatches are not the same length as modPart + // than the package is not in this module + if pkgPartMatches != len(modPart) { + return false + } + + if len(pkgPart) > len(modPart) { + // If pkgPart path starts with a major version + // than the package is not in this module as + // major versions are completely different modules + if startsWithVersion.MatchString(pkgPart[len(modPart)]) { + return false + } + } + + return true +} diff --git a/vendor/github.com/ryancurrah/gomodguard/tools.go b/vendor/github.com/ryancurrah/gomodguard/tools.go new file mode 100644 index 0000000000..d56bcc7470 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package gomodguard + +import _ "github.com/t-yuki/gocover-cobertura" diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go index bc42dfb3a0..55e931a898 100644 --- a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go +++ b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go @@ -9,22 +9,38 @@ import ( ) const ( - rowsName = "Rows" - stmtName = "Stmt" - closeMethod = "Close" + rowsName = "Rows" + stmtName = "Stmt" + namedStmtName = "NamedStmt" + closeMethod = "Close" +) + +type action uint8 + +const ( + actionUnhandled action = iota + actionHandled + actionReturned + actionPassed + actionClosed + actionUnvaluedCall + actionUnvaluedDefer + actionNoOp ) var ( sqlPackages = []string{ "database/sql", "github.com/jmoiron/sqlx", + "github.com/jackc/pgx/v5", + "github.com/jackc/pgx/v5/pgxpool", } ) func NewAnalyzer() *analysis.Analyzer { return &analysis.Analyzer{ Name: "sqlclosecheck", - Doc: "Checks that sql.Rows and sql.Stmt are closed.", + Doc: "Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed.", Run: run, Requires: []*analysis.Analyzer{ buildssa.Analyzer, @@ -33,7 +49,10 @@ func NewAnalyzer() *analysis.Analyzer { } func run(pass *analysis.Pass) (interface{}, error) { - pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + pssa, ok := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + if !ok { + return nil, nil + } // Build list of types we are looking for targetTypes := getTargetTypes(pssa, sqlPackages) @@ -47,20 +66,18 @@ func run(pass *analysis.Pass) (interface{}, error) { for _, f := range funcs { for _, b := range f.Blocks { for i := range b.Instrs { - // Check if instruction is call that returns a target type + // Check if instruction is call that returns a target pointer type targetValues := getTargetTypesValues(b, i, targetTypes) if len(targetValues) == 0 { continue } - // log.Printf("%s", f.Name()) - // For each found target check if they are closed and deferred for _, targetValue := range targetValues { refs := (*targetValue.value).Referrers() isClosed := checkClosed(refs, targetTypes) if !isClosed { - pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt was not closed") + pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt/NamedStmt was not closed") } checkDeferred(pass, refs, targetTypes, false) @@ -72,17 +89,22 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } -func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointer { - targets := []*types.Pointer{} +func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []any { + targets := []any{} for _, sqlPkg := range targetPackages { pkg := pssa.Pkg.Prog.ImportedPackage(sqlPkg) if pkg == nil { // the SQL package being checked isn't imported - return targets + continue } - rowsType := getTypePointerFromName(pkg, rowsName) + rowsPtrType := getTypePointerFromName(pkg, rowsName) + if rowsPtrType != nil { + targets = append(targets, rowsPtrType) + } + + rowsType := getTypeFromName(pkg, rowsName) if rowsType != nil { targets = append(targets, rowsType) } @@ -91,6 +113,11 @@ func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointe if stmtType != nil { targets = append(targets, stmtType) } + + namedStmtType := getTypePointerFromName(pkg, namedStmtName) + if namedStmtType != nil { + targets = append(targets, namedStmtType) + } } return targets @@ -99,7 +126,7 @@ func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointe func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer { pkgType := pkg.Type(name) if pkgType == nil { - // this package does not use Rows/Stmt + // this package does not use Rows/Stmt/NamedStmt return nil } @@ -112,12 +139,28 @@ func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer { return types.NewPointer(named) } +func getTypeFromName(pkg *ssa.Package, name string) *types.Named { + pkgType := pkg.Type(name) + if pkgType == nil { + // this package does not use Rows/Stmt + return nil + } + + obj := pkgType.Object() + named, ok := obj.Type().(*types.Named) + if !ok { + return nil + } + + return named +} + type targetValue struct { value *ssa.Value instr ssa.Instruction } -func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer) []targetValue { +func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []any) []targetValue { targetValues := []targetValue{} instr := b.Instrs[i] @@ -133,21 +176,32 @@ func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer varType := v.Type() for _, targetType := range targetTypes { - if !types.Identical(varType, targetType) { + var tt types.Type + + switch t := targetType.(type) { + case *types.Pointer: + tt = t + case *types.Named: + tt = t + default: + continue + } + + if !types.Identical(varType, tt) { continue } for _, cRef := range *call.Referrers() { switch instr := cRef.(type) { case *ssa.Call: - if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), targetType) { + if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), tt) { targetValues = append(targetValues, targetValue{ value: &instr.Call.Args[0], instr: call, }) } case ssa.Value: - if types.Identical(instr.Type(), targetType) { + if types.Identical(instr.Type(), tt) { targetValues = append(targetValues, targetValue{ value: &instr, instr: call, @@ -161,77 +215,86 @@ func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer return targetValues } -func checkClosed(refs *[]ssa.Instruction, targetTypes []*types.Pointer) bool { +func checkClosed(refs *[]ssa.Instruction, targetTypes []any) bool { numInstrs := len(*refs) for idx, ref := range *refs { - // log.Printf("%T - %s", ref, ref) - action := getAction(ref, targetTypes) switch action { - case "closed": + case actionClosed, actionReturned, actionHandled: return true - case "passed": + case actionPassed: // Passed and not used after if numInstrs == idx+1 { return true } - case "returned": - return true - case "handled": - return true - default: - // log.Printf(action) } } return false } -func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string { +func getAction(instr ssa.Instruction, targetTypes []any) action { switch instr := instr.(type) { case *ssa.Defer: - if instr.Call.Value == nil { - return "unvalued defer" + if instr.Call.Value != nil { + name := instr.Call.Value.Name() + if name == closeMethod { + return actionClosed + } } - name := instr.Call.Value.Name() - if name == closeMethod { - return "closed" + if instr.Call.Method != nil { + name := instr.Call.Method.Name() + if name == closeMethod { + return actionClosed + } } + + return actionUnvaluedDefer case *ssa.Call: if instr.Call.Value == nil { - return "unvalued call" + return actionUnvaluedCall } isTarget := false - receiver := instr.Call.StaticCallee().Signature.Recv() - if receiver != nil { - isTarget = isTargetType(receiver.Type(), targetTypes) + staticCallee := instr.Call.StaticCallee() + if staticCallee != nil { + receiver := instr.Call.StaticCallee().Signature.Recv() + if receiver != nil { + isTarget = isTargetType(receiver.Type(), targetTypes) + } } name := instr.Call.Value.Name() if isTarget && name == closeMethod { - return "closed" + return actionClosed } if !isTarget { - return "passed" + return actionPassed } case *ssa.Phi: - return "passed" + return actionPassed case *ssa.MakeInterface: - return "passed" + return actionPassed case *ssa.Store: + // A Row/Stmt is stored in a struct, which may be closed later + // by a different flow. + if _, ok := instr.Addr.(*ssa.FieldAddr); ok { + return actionReturned + } + if len(*instr.Addr.Referrers()) == 0 { - return "noop" + return actionNoOp } for _, aRef := range *instr.Addr.Referrers() { if c, ok := aRef.(*ssa.MakeClosure); ok { - f := c.Fn.(*ssa.Function) - for _, b := range f.Blocks { - if checkClosed(&b.Instrs, targetTypes) { - return "handled" + if f, ok := c.Fn.(*ssa.Function); ok { + for _, b := range f.Blocks { + if checkClosed(&b.Instrs, targetTypes) { + return actionHandled + } } } } @@ -239,32 +302,45 @@ func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string { case *ssa.UnOp: instrType := instr.Type() for _, targetType := range targetTypes { - if types.Identical(instrType, targetType) { + var tt types.Type + + switch t := targetType.(type) { + case *types.Pointer: + tt = t + case *types.Named: + tt = t + default: + continue + } + + if types.Identical(instrType, tt) { if checkClosed(instr.Referrers(), targetTypes) { - return "handled" + return actionHandled } } } case *ssa.FieldAddr: if checkClosed(instr.Referrers(), targetTypes) { - return "handled" + return actionHandled } case *ssa.Return: - return "returned" - default: - // log.Printf("%s", instr) + return actionReturned } - return "unhandled" + return actionUnhandled } -func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []*types.Pointer, inDefer bool) { +func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []any, inDefer bool) { for _, instr := range *instrs { switch instr := instr.(type) { case *ssa.Defer: if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod { return } + + if instr.Call.Method != nil && instr.Call.Method.Name() == closeMethod { + return + } case *ssa.Call: if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod { if !inDefer { @@ -280,17 +356,28 @@ func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes [ for _, aRef := range *instr.Addr.Referrers() { if c, ok := aRef.(*ssa.MakeClosure); ok { - f := c.Fn.(*ssa.Function) - - for _, b := range f.Blocks { - checkDeferred(pass, &b.Instrs, targetTypes, true) + if f, ok := c.Fn.(*ssa.Function); ok { + for _, b := range f.Blocks { + checkDeferred(pass, &b.Instrs, targetTypes, true) + } } } } case *ssa.UnOp: instrType := instr.Type() for _, targetType := range targetTypes { - if types.Identical(instrType, targetType) { + var tt types.Type + + switch t := targetType.(type) { + case *types.Pointer: + tt = t + case *types.Named: + tt = t + default: + continue + } + + if types.Identical(instrType, tt) { checkDeferred(pass, instr.Referrers(), targetTypes, inDefer) } } @@ -300,10 +387,17 @@ func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes [ } } -func isTargetType(t types.Type, targetTypes []*types.Pointer) bool { +func isTargetType(t types.Type, targetTypes []any) bool { for _, targetType := range targetTypes { - if types.Identical(t, targetType) { - return true + switch tt := targetType.(type) { + case *types.Pointer: + if types.Identical(t, tt) { + return true + } + case *types.Named: + if types.Identical(t, tt) { + return true + } } } diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/README.md b/vendor/github.com/sanposhiho/wastedassign/v2/README.md index cd2deedad5..6b736f7f1d 100644 --- a/vendor/github.com/sanposhiho/wastedassign/v2/README.md +++ b/vendor/github.com/sanposhiho/wastedassign/v2/README.md @@ -39,10 +39,19 @@ $ go vet -vettool=`which wastedassign` sample.go ## Installation + +### Go version < 1.16 + ``` go get -u github.com/sanposhiho/wastedassign/v2/cmd/wastedassign ``` +### Go version 1.16+ + +``` +go install github.com/sanposhiho/wastedassign/v2/cmd/wastedassign@latest +``` + ## Usage ``` diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore new file mode 100644 index 0000000000..3c0af38259 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore @@ -0,0 +1,4 @@ +.vscode +.idea +*.swp +cmd/jv/jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules new file mode 100644 index 0000000000..314da31c5e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules @@ -0,0 +1,3 @@ +[submodule "testdata/JSON-Schema-Test-Suite"] + path = testdata/JSON-Schema-Test-Suite + url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE new file mode 100644 index 0000000000..19dc35b243 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md new file mode 100644 index 0000000000..b0d05054ca --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md @@ -0,0 +1,220 @@ +# jsonschema v5.3.1 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v5)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v5) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/master/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema) + +Package jsonschema provides json-schema compilation and validation. + +[Benchmarks](https://dev.to/vearutop/benchmarking-correctness-and-performance-of-go-json-schema-validators-3247) + +### Features: + - implements + [draft 2020-12](https://json-schema.org/specification-links.html#2020-12), + [draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8), + [draft-7](https://json-schema.org/specification-links.html#draft-7), + [draft-6](https://json-schema.org/specification-links.html#draft-6), + [draft-4](https://json-schema.org/specification-links.html#draft-4) + - fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional) + - list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L24)) + - validates schemas against meta-schema + - full support of remote references + - support of recursive references between schemas + - detects infinite loop in schemas + - thread safe validation + - rich, intuitive hierarchial error messages with json-pointers to exact location + - supports output formats flag, basic and detailed + - supports enabling format and content Assertions in draft2019-09 or above + - change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true` + - compiled schema can be introspected. easier to develop tools like generating go structs given schema + - supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension) + - implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat)) + - date-time, date, time, duration, period (supports leap-second) + - uuid, hostname, email + - ip-address, ipv4, ipv6 + - uri, uriref, uri-template(limited validation) + - json-pointer, relative-json-pointer + - regex, format + - implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) + - base64 + - implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) + - application/json + - can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader)) + + +see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) + +The schema is compiled against the version specified in `$schema` property. +If "$schema" property is missing, it uses latest draft which currently implemented +by this library. + +You can force to use specific version, when `$schema` is missing, as follows: + +```go +compiler := jsonschema.NewCompiler() +compiler.Draft = jsonschema.Draft4 +``` + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + +```go +import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" +``` + +## Rich Errors + +The ValidationError returned by Validate method contains detailed context to understand why and where the error is. + +schema.json: +```json +{ + "$ref": "t.json#/definitions/employee" +} +``` + +t.json: +```json +{ + "definitions": { + "employee": { + "type": "string" + } + } +} +``` + +doc.json: +```json +1 +``` + +assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`, +```go +fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy +``` +Prints: +``` +[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json# + [I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee' + [I#] [S#/definitions/employee/type] expected string, but got number +``` + +Here `I` stands for instance document and `S` stands for schema document. +The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. +Nested causes are printed with indent. + +To output `err` in `flag` output format: +```go +b, _ := json.MarshalIndent(err.FlagOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false +} +``` +To output `err` in `basic` output format: +```go +b, _ := json.MarshalIndent(err.BasicOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false, + "errors": [ + { + "keywordLocation": "", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", + "instanceLocation": "", + "error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#" + }, + { + "keywordLocation": "/$ref", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", + "instanceLocation": "", + "error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'" + }, + { + "keywordLocation": "/$ref/type", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", + "instanceLocation": "", + "error": "expected string, but got number" + } + ] +} +``` +To output `err` in `detailed` output format: +```go +b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false, + "keywordLocation": "", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", + "instanceLocation": "", + "errors": [ + { + "valid": false, + "keywordLocation": "/$ref", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", + "instanceLocation": "", + "errors": [ + { + "valid": false, + "keywordLocation": "/$ref/type", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", + "instanceLocation": "", + "error": "expected string, but got number" + } + ] + } + ] +} +``` + +## CLI + +to install `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +```bash +jv [-draft INT] [-output FORMAT] [-assertformat] [-assertcontent] []... + -assertcontent + enable content assertions with draft >= 2019 + -assertformat + enable format assertions with draft >= 2019 + -draft int + draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020) + -output string + output format. valid values flag, basic, detailed +``` + +if no `` arguments are passed, it simply validates the ``. +if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag + +exit-code is 1, if there are any validation errors + +`jv` can also validate yaml files. It also accepts schema from yaml files. + +## Validating YAML Documents + +since yaml supports non-string keys, such yaml documents are rendered as invalid json documents. + +most yaml parser use `map[interface{}]interface{}` for object, +whereas json parser uses `map[string]interface{}`. + +so we need to manually convert them to `map[string]interface{}`. +below code shows such conversion by `toStringKeys` function. + +https://play.golang.org/p/Hhax3MrtD8r + +NOTE: if you are using `gopkg.in/yaml.v3`, then you do not need such conversion. since this library +returns `map[string]interface{}` if all keys are strings. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go new file mode 100644 index 0000000000..fdb68e6480 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go @@ -0,0 +1,812 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "regexp" + "strconv" + "strings" +) + +// A Compiler represents a json-schema compiler. +type Compiler struct { + // Draft represents the draft used when '$schema' attribute is missing. + // + // This defaults to latest supported draft (currently 2020-12). + Draft *Draft + resources map[string]*resource + + // Extensions is used to register extensions. + extensions map[string]extension + + // ExtractAnnotations tells whether schema annotations has to be extracted + // in compiled Schema or not. + ExtractAnnotations bool + + // LoadURL loads the document at given absolute URL. + // + // If nil, package global LoadURL is used. + LoadURL func(s string) (io.ReadCloser, error) + + // Formats can be registered by adding to this map. Key is format name, + // value is function that knows how to validate that format. + Formats map[string]func(interface{}) bool + + // AssertFormat for specifications >= draft2019-09. + AssertFormat bool + + // Decoders can be registered by adding to this map. Key is encoding name, + // value is function that knows how to decode string in that format. + Decoders map[string]func(string) ([]byte, error) + + // MediaTypes can be registered by adding to this map. Key is mediaType name, + // value is function that knows how to validate that mediaType. + MediaTypes map[string]func([]byte) error + + // AssertContent for specifications >= draft2019-09. + AssertContent bool +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// Returned error can be *SchemaError +func Compile(url string) (*Schema, error) { + return NewCompiler().Compile(url) +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func MustCompile(url string) *Schema { + return NewCompiler().MustCompile(url) +} + +// CompileString parses and compiles the given schema with given base url. +func CompileString(url, schema string) (*Schema, error) { + c := NewCompiler() + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + return nil, err + } + return c.Compile(url) +} + +// MustCompileString is like CompileString but panics on error. +// It simplified safe initialization of global variables holding compiled Schema. +func MustCompileString(url, schema string) *Schema { + c := NewCompiler() + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + panic(err) + } + return c.MustCompile(url) +} + +// NewCompiler returns a json-schema Compiler object. +// if '$schema' attribute is missing, it is treated as draft7. to change this +// behavior change Compiler.Draft value +func NewCompiler() *Compiler { + return &Compiler{ + Draft: latest, + resources: make(map[string]*resource), + Formats: make(map[string]func(interface{}) bool), + Decoders: make(map[string]func(string) ([]byte, error)), + MediaTypes: make(map[string]func([]byte) error), + extensions: make(map[string]extension), + } +} + +// AddResource adds in-memory resource to the compiler. +// +// Note that url must not have fragment +func (c *Compiler) AddResource(url string, r io.Reader) error { + res, err := newResource(url, r) + if err != nil { + return err + } + c.resources[res.url] = res + return nil +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func (c *Compiler) MustCompile(url string) *Schema { + s, err := c.Compile(url) + if err != nil { + panic(fmt.Sprintf("jsonschema: %#v", err)) + } + return s +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// error returned will be of type *SchemaError +func (c *Compiler) Compile(url string) (*Schema, error) { + // make url absolute + u, err := toAbs(url) + if err != nil { + return nil, &SchemaError{url, err} + } + url = u + + sch, err := c.compileURL(url, nil, "#") + if err != nil { + err = &SchemaError{url, err} + } + return sch, err +} + +func (c *Compiler) findResource(url string) (*resource, error) { + if _, ok := c.resources[url]; !ok { + // load resource + var rdr io.Reader + if sch, ok := vocabSchemas[url]; ok { + rdr = strings.NewReader(sch) + } else { + loadURL := LoadURL + if c.LoadURL != nil { + loadURL = c.LoadURL + } + r, err := loadURL(url) + if err != nil { + return nil, err + } + defer r.Close() + rdr = r + } + if err := c.AddResource(url, rdr); err != nil { + return nil, err + } + } + + r := c.resources[url] + if r.draft != nil { + return r, nil + } + + // set draft + r.draft = c.Draft + if m, ok := r.doc.(map[string]interface{}); ok { + if sch, ok := m["$schema"]; ok { + sch, ok := sch.(string) + if !ok { + return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) + } + if !isURI(sch) { + return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url) + } + r.draft = findDraft(sch) + if r.draft == nil { + sch, _ := split(sch) + if sch == url { + return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url) + } + mr, err := c.findResource(sch) + if err != nil { + return nil, err + } + r.draft = mr.draft + } + } + } + + id, err := r.draft.resolveID(r.url, r.doc) + if err != nil { + return nil, err + } + if id != "" { + r.url = id + } + + if err := r.fillSubschemas(c, r); err != nil { + return nil, err + } + + return r, nil +} + +func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) { + // if url points to a draft, return Draft.meta + if d := findDraft(url); d != nil && d.meta != nil { + return d.meta, nil + } + + b, f := split(url) + r, err := c.findResource(b) + if err != nil { + return nil, err + } + return c.compileRef(r, stack, ptr, r, f) +} + +func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) { + base := r.baseURL(res.floc) + ref, err := resolveURL(base, ref) + if err != nil { + return nil, err + } + + u, f := split(ref) + sr := r.findResource(u) + if sr == nil { + // external resource + return c.compileURL(ref, stack, refPtr) + } + + // ensure root resource is always compiled first. + // this is required to get schema.meta from root resource + if r.schema == nil { + r.schema = newSchema(r.url, r.floc, r.draft, r.doc) + if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil { + return nil, err + } + } + + sr, err = r.resolveFragment(c, sr, f) + if err != nil { + return nil, err + } + if sr == nil { + return nil, fmt.Errorf("jsonschema: %s not found", ref) + } + + if sr.schema != nil { + if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil { + return nil, err + } + return sr.schema, nil + } + + sr.schema = newSchema(r.url, sr.floc, r.draft, sr.doc) + return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr) +} + +func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error { + if r.draft.version < 2020 { + return nil + } + + rr := r.listResources(res) + rr = append(rr, res) + for _, sr := range rr { + if m, ok := sr.doc.(map[string]interface{}); ok { + if _, ok := m["$dynamicAnchor"]; ok { + sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc) + if err != nil { + return err + } + res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch) + } + } + } + return nil +} + +func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) { + if err := c.compileDynamicAnchors(r, res); err != nil { + return nil, err + } + + switch v := res.doc.(type) { + case bool: + res.schema.Always = &v + return res.schema, nil + default: + return res.schema, c.compileMap(r, stack, sref, res) + } +} + +func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error { + m := res.doc.(map[string]interface{}) + + if err := checkLoop(stack, sref); err != nil { + return err + } + stack = append(stack, sref) + + var s = res.schema + var err error + + if r == res { // root schema + if sch, ok := m["$schema"]; ok { + sch := sch.(string) + if d := findDraft(sch); d != nil { + s.meta = d.meta + } else { + if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil { + return err + } + } + } + } + + if ref, ok := m["$ref"]; ok { + s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string)) + if err != nil { + return err + } + if r.draft.version < 2019 { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if r.draft.version >= 2019 { + if r == res { // root schema + if vocab, ok := m["$vocabulary"]; ok { + for url, reqd := range vocab.(map[string]interface{}) { + if reqd, ok := reqd.(bool); ok && !reqd { + continue + } + if !r.draft.isVocab(url) { + return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res) + } + s.vocab = append(s.vocab, url) + } + } else { + s.vocab = r.draft.defaultVocab + } + } + + if ref, ok := m["$recursiveRef"]; ok { + s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string)) + if err != nil { + return err + } + } + } + if r.draft.version >= 2020 { + if dref, ok := m["$dynamicRef"]; ok { + s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string)) + if err != nil { + return err + } + if dref, ok := dref.(string); ok { + _, frag := split(dref) + if frag != "#" && !strings.HasPrefix(frag, "#/") { + // frag is anchor + s.dynamicRefAnchor = frag[1:] + } + } + } + } + + loadInt := func(pname string) int { + if num, ok := m[pname]; ok { + i, _ := num.(json.Number).Float64() + return int(i) + } + return -1 + } + + loadRat := func(pname string) *big.Rat { + if num, ok := m[pname]; ok { + r, _ := new(big.Rat).SetString(string(num.(json.Number))) + return r + } + return nil + } + + if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") { + if t, ok := m["type"]; ok { + switch t := t.(type) { + case string: + s.Types = []string{t} + case []interface{}: + s.Types = toStrings(t) + } + } + + if e, ok := m["enum"]; ok { + s.Enum = e.([]interface{}) + allPrimitives := true + for _, item := range s.Enum { + switch jsonType(item) { + case "object", "array": + allPrimitives = false + break + } + } + s.enumError = "enum failed" + if allPrimitives { + if len(s.Enum) == 1 { + s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) + } else { + strEnum := make([]string, len(s.Enum)) + for i, item := range s.Enum { + strEnum[i] = fmt.Sprintf("%#v", item) + } + s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) + } + } + } + + s.Minimum = loadRat("minimum") + if exclusive, ok := m["exclusiveMinimum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Minimum, s.ExclusiveMinimum = nil, s.Minimum + } + } else { + s.ExclusiveMinimum = loadRat("exclusiveMinimum") + } + } + + s.Maximum = loadRat("maximum") + if exclusive, ok := m["exclusiveMaximum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Maximum, s.ExclusiveMaximum = nil, s.Maximum + } + } else { + s.ExclusiveMaximum = loadRat("exclusiveMaximum") + } + } + + s.MultipleOf = loadRat("multipleOf") + + s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") + + if req, ok := m["required"]; ok { + s.Required = toStrings(req.([]interface{})) + } + + s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") + + if unique, ok := m["uniqueItems"]; ok { + s.UniqueItems = unique.(bool) + } + + s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") + + if pattern, ok := m["pattern"]; ok { + s.Pattern = regexp.MustCompile(pattern.(string)) + } + + if r.draft.version >= 2019 { + s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains") + if s.MinContains == -1 { + s.MinContains = 1 + } + + if deps, ok := m["dependentRequired"]; ok { + deps := deps.(map[string]interface{}) + s.DependentRequired = make(map[string][]string, len(deps)) + for pname, pvalue := range deps { + s.DependentRequired[pname] = toStrings(pvalue.([]interface{})) + } + } + } + } + + compile := func(stack []schemaRef, ptr string) (*Schema, error) { + return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr) + } + + loadSchema := func(pname string, stack []schemaRef) (*Schema, error) { + if _, ok := m[pname]; ok { + return compile(stack, escape(pname)) + } + return nil, nil + } + + loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) { + if pvalue, ok := m[pname]; ok { + pvalue := pvalue.([]interface{}) + schemas := make([]*Schema, len(pvalue)) + for i := range pvalue { + sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i)) + if err != nil { + return nil, err + } + schemas[i] = sch + } + return schemas, nil + } + return nil, nil + } + + if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") { + if s.Not, err = loadSchema("not", stack); err != nil { + return err + } + if s.AllOf, err = loadSchemas("allOf", stack); err != nil { + return err + } + if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil { + return err + } + if s.OneOf, err = loadSchemas("oneOf", stack); err != nil { + return err + } + + if props, ok := m["properties"]; ok { + props := props.(map[string]interface{}) + s.Properties = make(map[string]*Schema, len(props)) + for pname := range props { + s.Properties[pname], err = compile(nil, "properties/"+escape(pname)) + if err != nil { + return err + } + } + } + + if regexProps, ok := m["regexProperties"]; ok { + s.RegexProperties = regexProps.(bool) + } + + if patternProps, ok := m["patternProperties"]; ok { + patternProps := patternProps.(map[string]interface{}) + s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) + for pattern := range patternProps { + s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern)) + if err != nil { + return err + } + } + } + + if additionalProps, ok := m["additionalProperties"]; ok { + switch additionalProps := additionalProps.(type) { + case bool: + s.AdditionalProperties = additionalProps + case map[string]interface{}: + s.AdditionalProperties, err = compile(nil, "additionalProperties") + if err != nil { + return err + } + } + } + + if deps, ok := m["dependencies"]; ok { + deps := deps.(map[string]interface{}) + s.Dependencies = make(map[string]interface{}, len(deps)) + for pname, pvalue := range deps { + switch pvalue := pvalue.(type) { + case []interface{}: + s.Dependencies[pname] = toStrings(pvalue) + default: + s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname)) + if err != nil { + return err + } + } + } + } + + if r.draft.version >= 6 { + if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil { + return err + } + if s.Contains, err = loadSchema("contains", nil); err != nil { + return err + } + } + + if r.draft.version >= 7 { + if m["if"] != nil { + if s.If, err = loadSchema("if", stack); err != nil { + return err + } + if s.Then, err = loadSchema("then", stack); err != nil { + return err + } + if s.Else, err = loadSchema("else", stack); err != nil { + return err + } + } + } + if r.draft.version >= 2019 { + if deps, ok := m["dependentSchemas"]; ok { + deps := deps.(map[string]interface{}) + s.DependentSchemas = make(map[string]*Schema, len(deps)) + for pname := range deps { + s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname)) + if err != nil { + return err + } + } + } + } + + if r.draft.version >= 2020 { + if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil { + return err + } + if s.Items2020, err = loadSchema("items", nil); err != nil { + return err + } + } else { + if items, ok := m["items"]; ok { + switch items.(type) { + case []interface{}: + s.Items, err = loadSchemas("items", nil) + if err != nil { + return err + } + if additionalItems, ok := m["additionalItems"]; ok { + switch additionalItems := additionalItems.(type) { + case bool: + s.AdditionalItems = additionalItems + case map[string]interface{}: + s.AdditionalItems, err = compile(nil, "additionalItems") + if err != nil { + return err + } + } + } + default: + s.Items, err = compile(nil, "items") + if err != nil { + return err + } + } + } + } + + } + + // unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020 + if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) { + if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil { + return err + } + if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil { + return err + } + if r.draft.version >= 2020 { + // any item in an array that passes validation of the contains schema is considered "evaluated" + s.ContainsEval = true + } + } + + if format, ok := m["format"]; ok { + s.Format = format.(string) + if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") { + if format, ok := c.Formats[s.Format]; ok { + s.format = format + } else { + s.format, _ = Formats[s.Format] + } + } + } + + if c.ExtractAnnotations { + if title, ok := m["title"]; ok { + s.Title = title.(string) + } + if description, ok := m["description"]; ok { + s.Description = description.(string) + } + s.Default = m["default"] + } + + if r.draft.version >= 6 { + if c, ok := m["const"]; ok { + s.Constant = []interface{}{c} + } + } + + if r.draft.version >= 7 { + if encoding, ok := m["contentEncoding"]; ok { + s.ContentEncoding = encoding.(string) + if decoder, ok := c.Decoders[s.ContentEncoding]; ok { + s.decoder = decoder + } else { + s.decoder, _ = Decoders[s.ContentEncoding] + } + } + if mediaType, ok := m["contentMediaType"]; ok { + s.ContentMediaType = mediaType.(string) + if mediaType, ok := c.MediaTypes[s.ContentMediaType]; ok { + s.mediaType = mediaType + } else { + s.mediaType, _ = MediaTypes[s.ContentMediaType] + } + if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil { + return err + } + } + if c.ExtractAnnotations { + if comment, ok := m["$comment"]; ok { + s.Comment = comment.(string) + } + if readOnly, ok := m["readOnly"]; ok { + s.ReadOnly = readOnly.(bool) + } + if writeOnly, ok := m["writeOnly"]; ok { + s.WriteOnly = writeOnly.(bool) + } + if examples, ok := m["examples"]; ok { + s.Examples = examples.([]interface{}) + } + } + } + + if r.draft.version >= 2019 { + if !c.AssertContent { + s.decoder = nil + s.mediaType = nil + s.ContentSchema = nil + } + if c.ExtractAnnotations { + if deprecated, ok := m["deprecated"]; ok { + s.Deprecated = deprecated.(bool) + } + } + } + + for name, ext := range c.extensions { + es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m) + if err != nil { + return err + } + if es != nil { + if s.Extensions == nil { + s.Extensions = make(map[string]ExtSchema) + } + s.Extensions[name] = es + } + } + + return nil +} + +func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error { + validate := func(meta *Schema) error { + if meta == nil { + return nil + } + return meta.validateValue(v, vloc) + } + + if err := validate(r.draft.meta); err != nil { + return err + } + for _, ext := range c.extensions { + if err := validate(ext.meta); err != nil { + return err + } + } + return nil +} + +func toStrings(arr []interface{}) []string { + s := make([]string, len(arr)) + for i, v := range arr { + s[i] = v.(string) + } + return s +} + +// SchemaRef captures schema and the path referring to it. +type schemaRef struct { + path string // relative-json-pointer to schema + schema *Schema // target schema + discard bool // true when scope left +} + +func (sr schemaRef) String() string { + return fmt.Sprintf("(%s)%v", sr.path, sr.schema) +} + +func checkLoop(stack []schemaRef, sref schemaRef) error { + for _, ref := range stack { + if ref.schema == sref.schema { + return infiniteLoopError(stack, sref) + } + } + return nil +} + +func keywordLocation(stack []schemaRef, path string) string { + var loc string + for _, ref := range stack[1:] { + loc += "/" + ref.path + } + if path != "" { + loc = loc + "/" + path + } + return loc +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go new file mode 100644 index 0000000000..7570b8b5a9 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go @@ -0,0 +1,29 @@ +package jsonschema + +import ( + "encoding/base64" + "encoding/json" +) + +// Decoders is a registry of functions, which know how to decode +// string encoded in specific format. +// +// New Decoders can be registered by adding to this map. Key is encoding name, +// value is function that knows how to decode string in that format. +var Decoders = map[string]func(string) ([]byte, error){ + "base64": base64.StdEncoding.DecodeString, +} + +// MediaTypes is a registry of functions, which know how to validate +// whether the bytes represent data of that mediaType. +// +// New mediaTypes can be registered by adding to this map. Key is mediaType name, +// value is function that knows how to validate that mediaType. +var MediaTypes = map[string]func([]byte) error{ + "application/json": validateJSON, +} + +func validateJSON(b []byte) error { + var v interface{} + return json.Unmarshal(b, &v) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go new file mode 100644 index 0000000000..a124262a51 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go @@ -0,0 +1,49 @@ +/* +Package jsonschema provides json-schema compilation and validation. + +Features: + - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4 + - fully compliant with JSON-Schema-Test-Suite, (excluding some optional) + - list of optional tests that are excluded can be found in schema_test.go(variable skipTests) + - validates schemas against meta-schema + - full support of remote references + - support of recursive references between schemas + - detects infinite loop in schemas + - thread safe validation + - rich, intuitive hierarchial error messages with json-pointers to exact location + - supports output formats flag, basic and detailed + - supports enabling format and content Assertions in draft2019-09 or above + - change Compiler.AssertFormat, Compiler.AssertContent to true + - compiled schema can be introspected. easier to develop tools like generating go structs given schema + - supports user-defined keywords via extensions + - implements following formats (supports user-defined) + - date-time, date, time, duration (supports leap-second) + - uuid, hostname, email + - ip-address, ipv4, ipv6 + - uri, uriref, uri-template(limited validation) + - json-pointer, relative-json-pointer + - regex, format + - implements following contentEncoding (supports user-defined) + - base64 + - implements following contentMediaType (supports user-defined) + - application/json + - can load from files/http/https/string/[]byte/io.Reader (supports user-defined) + +The schema is compiled against the version specified in "$schema" property. +If "$schema" property is missing, it uses latest draft which currently implemented +by this library. + +You can force to use specific draft, when "$schema" is missing, as follows: + + compiler := jsonschema.NewCompiler() + compiler.Draft = jsonschema.Draft4 + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + + import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" + +you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA +*/ +package jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go new file mode 100644 index 0000000000..154fa5837d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go @@ -0,0 +1,1454 @@ +package jsonschema + +import ( + "fmt" + "strconv" + "strings" +) + +// A Draft represents json-schema draft +type Draft struct { + version int + meta *Schema + id string // property name used to represent schema id. + boolSchema bool // is boolean valid schema + vocab []string // built-in vocab + defaultVocab []string // vocabs when $vocabulary is not used + subschemas map[string]position +} + +func (d *Draft) URL() string { + switch d.version { + case 2020: + return "https://json-schema.org/draft/2020-12/schema" + case 2019: + return "https://json-schema.org/draft/2019-09/schema" + case 7: + return "https://json-schema.org/draft-07/schema" + case 6: + return "https://json-schema.org/draft-06/schema" + case 4: + return "https://json-schema.org/draft-04/schema" + } + return "" +} + +func (d *Draft) String() string { + return fmt.Sprintf("Draft%d", d.version) +} + +func (d *Draft) loadMeta(url, schema string) { + c := NewCompiler() + c.AssertFormat = true + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + panic(err) + } + d.meta = c.MustCompile(url) + d.meta.meta = d.meta +} + +func (d *Draft) getID(sch interface{}) string { + m, ok := sch.(map[string]interface{}) + if !ok { + return "" + } + if _, ok := m["$ref"]; ok && d.version <= 7 { + // $ref prevents a sibling id from changing the base uri + return "" + } + v, ok := m[d.id] + if !ok { + return "" + } + id, ok := v.(string) + if !ok { + return "" + } + return id +} + +func (d *Draft) resolveID(base string, sch interface{}) (string, error) { + id, _ := split(d.getID(sch)) // strip fragment + if id == "" { + return "", nil + } + url, err := resolveURL(base, id) + url, _ = split(url) // strip fragment + return url, err +} + +func (d *Draft) anchors(sch interface{}) []string { + m, ok := sch.(map[string]interface{}) + if !ok { + return nil + } + + var anchors []string + + // before draft2019, anchor is specified in id + _, f := split(d.getID(m)) + if f != "#" { + anchors = append(anchors, f[1:]) + } + + if v, ok := m["$anchor"]; ok && d.version >= 2019 { + anchors = append(anchors, v.(string)) + } + if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 { + anchors = append(anchors, v.(string)) + } + return anchors +} + +// listSubschemas collects subschemas in r into rr. +func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error { + add := func(loc string, sch interface{}) error { + url, err := d.resolveID(base, sch) + if err != nil { + return err + } + floc := r.floc + "/" + loc + sr := &resource{url: url, floc: floc, doc: sch} + rr[floc] = sr + + base := base + if url != "" { + base = url + } + return d.listSubschemas(sr, base, rr) + } + + sch, ok := r.doc.(map[string]interface{}) + if !ok { + return nil + } + for kw, pos := range d.subschemas { + v, ok := sch[kw] + if !ok { + continue + } + if pos&self != 0 { + switch v := v.(type) { + case map[string]interface{}: + if err := add(kw, v); err != nil { + return err + } + case bool: + if d.boolSchema { + if err := add(kw, v); err != nil { + return err + } + } + } + } + if pos&item != 0 { + if v, ok := v.([]interface{}); ok { + for i, item := range v { + if err := add(kw+"/"+strconv.Itoa(i), item); err != nil { + return err + } + } + } + } + if pos&prop != 0 { + if v, ok := v.(map[string]interface{}); ok { + for pname, pval := range v { + if err := add(kw+"/"+escape(pname), pval); err != nil { + return err + } + } + } + } + } + return nil +} + +// isVocab tells whether url is built-in vocab. +func (d *Draft) isVocab(url string) bool { + for _, v := range d.vocab { + if url == v { + return true + } + } + return false +} + +type position uint + +const ( + self position = 1 << iota + prop + item +) + +// supported drafts +var ( + Draft4 = &Draft{version: 4, id: "id", boolSchema: false} + Draft6 = &Draft{version: 6, id: "$id", boolSchema: true} + Draft7 = &Draft{version: 7, id: "$id", boolSchema: true} + Draft2019 = &Draft{ + version: 2019, + id: "$id", + boolSchema: true, + vocab: []string{ + "https://json-schema.org/draft/2019-09/vocab/core", + "https://json-schema.org/draft/2019-09/vocab/applicator", + "https://json-schema.org/draft/2019-09/vocab/validation", + "https://json-schema.org/draft/2019-09/vocab/meta-data", + "https://json-schema.org/draft/2019-09/vocab/format", + "https://json-schema.org/draft/2019-09/vocab/content", + }, + defaultVocab: []string{ + "https://json-schema.org/draft/2019-09/vocab/core", + "https://json-schema.org/draft/2019-09/vocab/applicator", + "https://json-schema.org/draft/2019-09/vocab/validation", + }, + } + Draft2020 = &Draft{ + version: 2020, + id: "$id", + boolSchema: true, + vocab: []string{ + "https://json-schema.org/draft/2020-12/vocab/core", + "https://json-schema.org/draft/2020-12/vocab/applicator", + "https://json-schema.org/draft/2020-12/vocab/unevaluated", + "https://json-schema.org/draft/2020-12/vocab/validation", + "https://json-schema.org/draft/2020-12/vocab/meta-data", + "https://json-schema.org/draft/2020-12/vocab/format-annotation", + "https://json-schema.org/draft/2020-12/vocab/format-assertion", + "https://json-schema.org/draft/2020-12/vocab/content", + }, + defaultVocab: []string{ + "https://json-schema.org/draft/2020-12/vocab/core", + "https://json-schema.org/draft/2020-12/vocab/applicator", + "https://json-schema.org/draft/2020-12/vocab/unevaluated", + "https://json-schema.org/draft/2020-12/vocab/validation", + }, + } + + latest = Draft2020 +) + +func findDraft(url string) *Draft { + if strings.HasPrefix(url, "http://") { + url = "https://" + strings.TrimPrefix(url, "http://") + } + if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") { + url = url[:strings.IndexByte(url, '#')] + } + switch url { + case "https://json-schema.org/schema": + return latest + case "https://json-schema.org/draft/2020-12/schema": + return Draft2020 + case "https://json-schema.org/draft/2019-09/schema": + return Draft2019 + case "https://json-schema.org/draft-07/schema": + return Draft7 + case "https://json-schema.org/draft-06/schema": + return Draft6 + case "https://json-schema.org/draft-04/schema": + return Draft4 + } + return nil +} + +func init() { + subschemas := map[string]position{ + // type agnostic + "definitions": prop, + "not": self, + "allOf": item, + "anyOf": item, + "oneOf": item, + // object + "properties": prop, + "additionalProperties": self, + "patternProperties": prop, + // array + "items": self | item, + "additionalItems": self, + "dependencies": prop, + } + Draft4.subschemas = clone(subschemas) + + subschemas["propertyNames"] = self + subschemas["contains"] = self + Draft6.subschemas = clone(subschemas) + + subschemas["if"] = self + subschemas["then"] = self + subschemas["else"] = self + Draft7.subschemas = clone(subschemas) + + subschemas["$defs"] = prop + subschemas["dependentSchemas"] = prop + subschemas["unevaluatedProperties"] = self + subschemas["unevaluatedItems"] = self + subschemas["contentSchema"] = self + Draft2019.subschemas = clone(subschemas) + + subschemas["prefixItems"] = item + Draft2020.subschemas = clone(subschemas) + + Draft4.loadMeta("http://json-schema.org/draft-04/schema", `{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "regexProperties": { "type": "boolean" }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} + }`) + Draft6.loadMeta("http://json-schema.org/draft-06/schema", `{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} + }`) + Draft7.loadMeta("http://json-schema.org/draft-07/schema", `{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true + }`) + Draft2019.loadMeta("https://json-schema.org/draft/2019-09/schema", `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } + }`) + Draft2020.loadMeta("https://json-schema.org/draft/2020-12/schema", `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } + }`) +} + +var vocabSchemas = map[string]string{ + "https://json-schema.org/draft/2019-09/meta/core": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/applicator": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/validation": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/meta-data": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/format": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "https://json-schema.org/draft/2019-09/meta/content": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/core": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/applicator": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/unevaluated": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/validation": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/meta-data": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/format-annotation": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/format-assertion": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/content": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } + }`, +} + +func clone(m map[string]position) map[string]position { + mm := make(map[string]position) + for k, v := range m { + mm[k] = v + } + return mm +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go new file mode 100644 index 0000000000..deaded89f7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go @@ -0,0 +1,129 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +// InvalidJSONTypeError is the error type returned by ValidateInterface. +// this tells that specified go object is not valid jsonType. +type InvalidJSONTypeError string + +func (e InvalidJSONTypeError) Error() string { + return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e)) +} + +// InfiniteLoopError is returned by Compile/Validate. +// this gives url#keywordLocation that lead to infinity loop. +type InfiniteLoopError string + +func (e InfiniteLoopError) Error() string { + return "jsonschema: infinite loop " + string(e) +} + +func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError { + var path string + for _, ref := range stack { + if path == "" { + path += ref.schema.Location + } else { + path += "/" + ref.path + } + } + return InfiniteLoopError(path + "/" + sref.path) +} + +// SchemaError is the error type returned by Compile. +type SchemaError struct { + // SchemaURL is the url to json-schema that filed to compile. + // This is helpful, if your schema refers to external schemas + SchemaURL string + + // Err is the error that occurred during compilation. + // It could be ValidationError, because compilation validates + // given schema against the json meta-schema + Err error +} + +func (se *SchemaError) Unwrap() error { + return se.Err +} + +func (se *SchemaError) Error() string { + s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL) + if se.Err != nil { + return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: ")) + } + return s +} + +func (se *SchemaError) GoString() string { + if _, ok := se.Err.(*ValidationError); ok { + return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err) + } + return se.Error() +} + +// ValidationError is the error type returned by Validate. +type ValidationError struct { + KeywordLocation string // validation path of validating keyword or schema + AbsoluteKeywordLocation string // absolute location of validating keyword or schema + InstanceLocation string // location of the json value within the instance being validated + Message string // describes error + Causes []*ValidationError // nested validation errors +} + +func (ve *ValidationError) add(causes ...error) error { + for _, cause := range causes { + ve.Causes = append(ve.Causes, cause.(*ValidationError)) + } + return ve +} + +func (ve *ValidationError) causes(err error) error { + if err := err.(*ValidationError); err.Message == "" { + ve.Causes = err.Causes + } else { + ve.add(err) + } + return ve +} + +func (ve *ValidationError) Error() string { + leaf := ve + for len(leaf.Causes) > 0 { + leaf = leaf.Causes[0] + } + u, _ := split(ve.AbsoluteKeywordLocation) + return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message) +} + +func (ve *ValidationError) GoString() string { + sloc := ve.AbsoluteKeywordLocation + sloc = sloc[strings.IndexByte(sloc, '#')+1:] + msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message) + for _, c := range ve.Causes { + for _, line := range strings.Split(c.GoString(), "\n") { + msg += "\n " + line + } + } + return msg +} + +func joinPtr(ptr1, ptr2 string) string { + if len(ptr1) == 0 { + return ptr2 + } + if len(ptr2) == 0 { + return ptr1 + } + return ptr1 + "/" + ptr2 +} + +// quote returns single-quoted string +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go new file mode 100644 index 0000000000..452ba118c5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go @@ -0,0 +1,116 @@ +package jsonschema + +// ExtCompiler compiles custom keyword(s) into ExtSchema. +type ExtCompiler interface { + // Compile compiles the custom keywords in schema m and returns its compiled representation. + // if the schema m does not contain the keywords defined by this extension, + // compiled representation nil should be returned. + Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error) +} + +// ExtSchema is schema representation of custom keyword(s) +type ExtSchema interface { + // Validate validates the json value v with this ExtSchema. + // Returned error must be *ValidationError. + Validate(ctx ValidationContext, v interface{}) error +} + +type extension struct { + meta *Schema + compiler ExtCompiler +} + +// RegisterExtension registers custom keyword(s) into this compiler. +// +// name is extension name, used only to avoid name collisions. +// meta captures the metaschema for the new keywords. +// This is used to validate the schema before calling ext.Compile. +func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) { + c.extensions[name] = extension{meta, ext} +} + +// CompilerContext --- + +// CompilerContext provides additional context required in compiling for extension. +type CompilerContext struct { + c *Compiler + r *resource + stack []schemaRef + res *resource +} + +// Compile compiles given value at ptr into *Schema. This is useful in implementing +// keyword like allOf/not/patternProperties. +// +// schPath is the relative-json-pointer to the schema to be compiled from parent schema. +// +// applicableOnSameInstance tells whether current schema and the given schema +// are applied on same instance value. this is used to detect infinite loop in schema. +func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) { + var stack []schemaRef + if applicableOnSameInstance { + stack = ctx.stack + } + return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath) +} + +// CompileRef compiles the schema referenced by ref uri +// +// refPath is the relative-json-pointer to ref. +// +// applicableOnSameInstance tells whether current schema and the given schema +// are applied on same instance value. this is used to detect infinite loop in schema. +func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) { + var stack []schemaRef + if applicableOnSameInstance { + stack = ctx.stack + } + return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref) +} + +// ValidationContext --- + +// ValidationContext provides additional context required in validating for extension. +type ValidationContext struct { + result validationResult + validate func(sch *Schema, schPath string, v interface{}, vpath string) error + validateInplace func(sch *Schema, schPath string) error + validationError func(keywordPath string, format string, a ...interface{}) *ValidationError +} + +// EvaluatedProp marks given property of object as evaluated. +func (ctx ValidationContext) EvaluatedProp(prop string) { + delete(ctx.result.unevalProps, prop) +} + +// EvaluatedItem marks given index of array as evaluated. +func (ctx ValidationContext) EvaluatedItem(index int) { + delete(ctx.result.unevalItems, index) +} + +// Validate validates schema s with value v. Extension must use this method instead of +// *Schema.ValidateInterface method. This will be useful in implementing keywords like +// allOf/oneOf +// +// spath is relative-json-pointer to s +// vpath is relative-json-pointer to v. +func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error { + if vpath == "" { + return ctx.validateInplace(s, spath) + } + return ctx.validate(s, spath, v, vpath) +} + +// Error used to construct validation error by extensions. +// +// keywordPath is relative-json-pointer to keyword. +func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError { + return ctx.validationError(keywordPath, format, a...) +} + +// Group is used by extensions to group multiple errors as causes to parent error. +// This is useful in implementing keywords like allOf where each schema specified +// in allOf can result a validationError. +func (ValidationError) Group(parent *ValidationError, causes ...error) error { + return parent.add(causes...) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go new file mode 100644 index 0000000000..05686073f0 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go @@ -0,0 +1,567 @@ +package jsonschema + +import ( + "errors" + "net" + "net/mail" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// Formats is a registry of functions, which know how to validate +// a specific format. +// +// New Formats can be registered by adding to this map. Key is format name, +// value is function that knows how to validate that format. +var Formats = map[string]func(interface{}) bool{ + "date-time": isDateTime, + "date": isDate, + "time": isTime, + "duration": isDuration, + "period": isPeriod, + "hostname": isHostname, + "email": isEmail, + "ip-address": isIPV4, + "ipv4": isIPV4, + "ipv6": isIPV6, + "uri": isURI, + "iri": isURI, + "uri-reference": isURIReference, + "uriref": isURIReference, + "iri-reference": isURIReference, + "uri-template": isURITemplate, + "regex": isRegex, + "json-pointer": isJSONPointer, + "relative-json-pointer": isRelativeJSONPointer, + "uuid": isUUID, +} + +// isDateTime tells whether given string is a valid date representation +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isDateTime(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ + return false + } + if s[10] != 'T' && s[10] != 't' { + return false + } + return isDate(s[:10]) && isTime(s[11:]) +} + +// isDate tells whether given string is a valid full-date production +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isDate(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := time.Parse("2006-01-02", s) + return err == nil +} + +// isTime tells whether given string is a valid full-time production +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isTime(v interface{}) bool { + str, ok := v.(string) + if !ok { + return true + } + + // golang time package does not support leap seconds. + // so we are parsing it manually here. + + // hh:mm:ss + // 01234567 + if len(str) < 9 || str[2] != ':' || str[5] != ':' { + return false + } + isInRange := func(str string, min, max int) (int, bool) { + n, err := strconv.Atoi(str) + if err != nil { + return 0, false + } + if n < min || n > max { + return 0, false + } + return n, true + } + var h, m, s int + if h, ok = isInRange(str[0:2], 0, 23); !ok { + return false + } + if m, ok = isInRange(str[3:5], 0, 59); !ok { + return false + } + if s, ok = isInRange(str[6:8], 0, 60); !ok { + return false + } + str = str[8:] + + // parse secfrac if present + if str[0] == '.' { + // dot following more than one digit + str = str[1:] + var numDigits int + for str != "" { + if str[0] < '0' || str[0] > '9' { + break + } + numDigits++ + str = str[1:] + } + if numDigits == 0 { + return false + } + } + + if len(str) == 0 { + return false + } + + if str[0] == 'z' || str[0] == 'Z' { + if len(str) != 1 { + return false + } + } else { + // time-numoffset + // +hh:mm + // 012345 + if len(str) != 6 || str[3] != ':' { + return false + } + + var sign int + if str[0] == '+' { + sign = -1 + } else if str[0] == '-' { + sign = +1 + } else { + return false + } + + var zh, zm int + if zh, ok = isInRange(str[1:3], 0, 23); !ok { + return false + } + if zm, ok = isInRange(str[4:6], 0, 59); !ok { + return false + } + + // apply timezone offset + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leapsecond + if s == 60 { // leap second + if h != 23 || m != 59 { + return false + } + } + + return true +} + +// isDuration tells whether given string is a valid duration format +// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details +func isDuration(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if len(s) == 0 || s[0] != 'P' { + return false + } + s = s[1:] + parseUnits := func() (units string, ok bool) { + for len(s) > 0 && s[0] != 'T' { + digits := false + for { + if len(s) == 0 { + break + } + if s[0] < '0' || s[0] > '9' { + break + } + digits = true + s = s[1:] + } + if !digits || len(s) == 0 { + return units, false + } + units += s[:1] + s = s[1:] + } + return units, true + } + units, ok := parseUnits() + if !ok { + return false + } + if units == "W" { + return len(s) == 0 // P_W + } + if len(units) > 0 { + if strings.Index("YMD", units) == -1 { + return false + } + if len(s) == 0 { + return true // "P" dur-date + } + } + if len(s) == 0 || s[0] != 'T' { + return false + } + s = s[1:] + units, ok = parseUnits() + return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1 +} + +// isPeriod tells whether given string is a valid period format +// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details +func isPeriod(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + slash := strings.IndexByte(s, '/') + if slash == -1 { + return false + } + start, end := s[:slash], s[slash+1:] + if isDateTime(start) { + return isDateTime(end) || isDuration(end) + } + return isDuration(start) && isDateTime(end) +} + +// isHostname tells whether given string is a valid representation +// for an Internet host name, as defined by RFC 1034 section 3.1 and +// RFC 1123 section 2.1. +// +// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. +func isHostname(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return false + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if labelLen := len(label); labelLen < 1 || labelLen > 63 { + return false + } + + // labels must not start with a hyphen + // RFC 1123 section 2.1: restriction on the first character + // is relaxed to allow either a letter or a digit + if first := s[0]; first == '-' { + return false + } + + // must not end with a hyphen + if label[len(label)-1] == '-' { + return false + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, c := range label { + if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { + return false + } + } + } + + return true +} + +// isEmail tells whether given string is a valid Internet email address +// as defined by RFC 5322, section 3.4.1. +// +// See https://en.wikipedia.org/wiki/Email_address, for details. +func isEmail(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return false + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return false + } + local := s[0:at] + domain := s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return false + } + + // domain if enclosed in brackets, must match an IP address + if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' { + ip := domain[1 : len(domain)-1] + if strings.HasPrefix(ip, "IPv6:") { + return isIPV6(strings.TrimPrefix(ip, "IPv6:")) + } + return isIPV4(ip) + } + + // domain must match the requirements for a hostname + if !isHostname(domain) { + return false + } + + _, err := mail.ParseAddress(s) + return err == nil +} + +// isIPV4 tells whether given string is a valid representation of an IPv4 address +// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. +func isIPV4(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return false + } + for _, group := range groups { + n, err := strconv.Atoi(group) + if err != nil { + return false + } + if n < 0 || n > 255 { + return false + } + if n != 0 && group[0] == '0' { + return false // leading zeroes should be rejected, as they are treated as octals + } + } + return true +} + +// isIPV6 tells whether given string is a valid representation of an IPv6 address +// as defined in RFC 2373, section 2.2. +func isIPV6(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if !strings.Contains(s, ":") { + return false + } + return net.ParseIP(s) != nil +} + +// isURI tells whether given string is valid URI, according to RFC 3986. +func isURI(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + u, err := urlParse(s) + return err == nil && u.IsAbs() +} + +func urlParse(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + + // if hostname is ipv6, validate it + hostname := u.Hostname() + if strings.IndexByte(hostname, ':') != -1 { + if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 { + return nil, errors.New("ipv6 address is not enclosed in brackets") + } + if !isIPV6(hostname) { + return nil, errors.New("invalid ipv6 address") + } + } + return u, nil +} + +// isURIReference tells whether given string is a valid URI Reference +// (either a URI or a relative-reference), according to RFC 3986. +func isURIReference(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := urlParse(s) + return err == nil && !strings.Contains(s, `\`) +} + +// isURITemplate tells whether given string is a valid URI Template +// according to RFC6570. +// +// Current implementation does minimal validation. +func isURITemplate(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + u, err := urlParse(s) + if err != nil { + return false + } + for _, item := range strings.Split(u.RawPath, "/") { + depth := 0 + for _, ch := range item { + switch ch { + case '{': + depth++ + if depth != 1 { + return false + } + case '}': + depth-- + if depth != 0 { + return false + } + } + } + if depth != 0 { + return false + } + } + return true +} + +// isRegex tells whether given string is a valid regular expression, +// according to the ECMA 262 regular expression dialect. +// +// The implementation uses go-lang regexp package. +func isRegex(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := regexp.Compile(s) + return err == nil +} + +// isJSONPointer tells whether given string is a valid JSON Pointer. +// +// Note: It returns false for JSON Pointer URI fragments. +func isJSONPointer(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if s != "" && !strings.HasPrefix(s, "/") { + return false + } + for _, item := range strings.Split(s, "/") { + for i := 0; i < len(item); i++ { + if item[i] == '~' { + if i == len(item)-1 { + return false + } + switch item[i+1] { + case '0', '1': + // valid + default: + return false + } + } + } + } + return true +} + +// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. +// +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func isRelativeJSONPointer(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if s == "" { + return false + } + if s[0] == '0' { + s = s[1:] + } else if s[0] >= '0' && s[0] <= '9' { + for s != "" && s[0] >= '0' && s[0] <= '9' { + s = s[1:] + } + } else { + return false + } + return s == "#" || isJSONPointer(s) +} + +// isUUID tells whether given string is a valid uuid format +// as specified in RFC4122. +// +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details +func isUUID(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + parseHex := func(n int) bool { + for n > 0 { + if len(s) == 0 { + return false + } + hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F') + if !hex { + return false + } + s = s[1:] + n-- + } + return true + } + groups := []int{8, 4, 4, 4, 12} + for i, numDigits := range groups { + if !parseHex(numDigits) { + return false + } + if i == len(groups)-1 { + break + } + if len(s) == 0 || s[0] != '-' { + return false + } + s = s[1:] + } + return len(s) == 0 +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go new file mode 100644 index 0000000000..4198cfe37c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go @@ -0,0 +1,38 @@ +// Package httploader implements loader.Loader for http/https url. +// +// The package is typically only imported for the side effect of +// registering its Loaders. +// +// To use httploader, link this package into your program: +// +// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" +package httploader + +import ( + "fmt" + "io" + "net/http" + + "github.com/santhosh-tekuri/jsonschema/v5" +) + +// Client is the default HTTP Client used to Get the resource. +var Client = http.DefaultClient + +// Load loads resource from given http(s) url. +func Load(url string) (io.ReadCloser, error) { + resp, err := Client.Get(url) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + _ = resp.Body.Close() + return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) + } + return resp.Body, nil +} + +func init() { + jsonschema.Loaders["http"] = Load + jsonschema.Loaders["https"] = Load +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go new file mode 100644 index 0000000000..c94195c335 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go @@ -0,0 +1,60 @@ +package jsonschema + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +func loadFileURL(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + f := u.Path + if runtime.GOOS == "windows" { + f = strings.TrimPrefix(f, "/") + f = filepath.FromSlash(f) + } + return os.Open(f) +} + +// Loaders is a registry of functions, which know how to load +// absolute url of specific schema. +// +// New loaders can be registered by adding to this map. Key is schema, +// value is function that knows how to load url of that schema +var Loaders = map[string]func(url string) (io.ReadCloser, error){ + "file": loadFileURL, +} + +// LoaderNotFoundError is the error type returned by Load function. +// It tells that no Loader is registered for that URL Scheme. +type LoaderNotFoundError string + +func (e LoaderNotFoundError) Error() string { + return fmt.Sprintf("jsonschema: no Loader found for %s", string(e)) +} + +// LoadURL loads document at given absolute URL. The default implementation +// uses Loaders registry to lookup by schema and uses that loader. +// +// Users can change this variable, if they would like to take complete +// responsibility of loading given URL. Used by Compiler if its LoadURL +// field is nil. +var LoadURL = func(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + loader, ok := Loaders[u.Scheme] + if !ok { + return nil, LoaderNotFoundError(s) + + } + return loader(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go new file mode 100644 index 0000000000..d65ae2a929 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go @@ -0,0 +1,77 @@ +package jsonschema + +// Flag is output format with simple boolean property valid. +type Flag struct { + Valid bool `json:"valid"` +} + +// FlagOutput returns output in flag format +func (ve *ValidationError) FlagOutput() Flag { + return Flag{} +} + +// Basic --- + +// Basic is output format with flat list of output units. +type Basic struct { + Valid bool `json:"valid"` + Errors []BasicError `json:"errors"` +} + +// BasicError is output unit in basic format. +type BasicError struct { + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` + InstanceLocation string `json:"instanceLocation"` + Error string `json:"error"` +} + +// BasicOutput returns output in basic format +func (ve *ValidationError) BasicOutput() Basic { + var errors []BasicError + var flatten func(*ValidationError) + flatten = func(ve *ValidationError) { + errors = append(errors, BasicError{ + KeywordLocation: ve.KeywordLocation, + AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, + InstanceLocation: ve.InstanceLocation, + Error: ve.Message, + }) + for _, cause := range ve.Causes { + flatten(cause) + } + } + flatten(ve) + return Basic{Errors: errors} +} + +// Detailed --- + +// Detailed is output format based on structure of schema. +type Detailed struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` + InstanceLocation string `json:"instanceLocation"` + Error string `json:"error,omitempty"` + Errors []Detailed `json:"errors,omitempty"` +} + +// DetailedOutput returns output in detailed format +func (ve *ValidationError) DetailedOutput() Detailed { + var errors []Detailed + for _, cause := range ve.Causes { + errors = append(errors, cause.DetailedOutput()) + } + var message = ve.Message + if len(ve.Causes) > 0 { + message = "" + } + return Detailed{ + KeywordLocation: ve.KeywordLocation, + AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, + InstanceLocation: ve.InstanceLocation, + Error: message, + Errors: errors, + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go new file mode 100644 index 0000000000..18349daac7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go @@ -0,0 +1,280 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +type resource struct { + url string // base url of resource. can be empty + floc string // fragment with json-pointer from root resource + doc interface{} + draft *Draft + subresources map[string]*resource // key is floc. only applicable for root resource + schema *Schema +} + +func (r *resource) String() string { + return r.url + r.floc +} + +func newResource(url string, r io.Reader) (*resource, error) { + if strings.IndexByte(url, '#') != -1 { + panic(fmt.Sprintf("BUG: newResource(%q)", url)) + } + doc, err := unmarshal(r) + if err != nil { + return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err) + } + url, err = toAbs(url) + if err != nil { + return nil, err + } + return &resource{ + url: url, + floc: "#", + doc: doc, + }, nil +} + +// fillSubschemas fills subschemas in res into r.subresources +func (r *resource) fillSubschemas(c *Compiler, res *resource) error { + if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil { + return err + } + + if r.subresources == nil { + r.subresources = make(map[string]*resource) + } + if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil { + return err + } + + // ensure subresource.url uniqueness + url2floc := make(map[string]string) + for _, sr := range r.subresources { + if sr.url != "" { + if floc, ok := url2floc[sr.url]; ok { + return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url) + } + url2floc[sr.url] = sr.floc + } + } + + return nil +} + +// listResources lists all subresources in res +func (r *resource) listResources(res *resource) []*resource { + var result []*resource + prefix := res.floc + "/" + for _, sr := range r.subresources { + if strings.HasPrefix(sr.floc, prefix) { + result = append(result, sr) + } + } + return result +} + +func (r *resource) findResource(url string) *resource { + if r.url == url { + return r + } + for _, res := range r.subresources { + if res.url == url { + return res + } + } + return nil +} + +// resolve fragment f with sr as base +func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) { + if f == "#" || f == "#/" { + return sr, nil + } + + // resolve by anchor + if !strings.HasPrefix(f, "#/") { + // check in given resource + for _, anchor := range r.draft.anchors(sr.doc) { + if anchor == f[1:] { + return sr, nil + } + } + + // check in subresources that has same base url + prefix := sr.floc + "/" + for _, res := range r.subresources { + if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url { + for _, anchor := range r.draft.anchors(res.doc) { + if anchor == f[1:] { + return res, nil + } + } + } + } + return nil, nil + } + + // resolve by ptr + floc := sr.floc + f[1:] + if res, ok := r.subresources[floc]; ok { + return res, nil + } + + // non-standrad location + doc := r.doc + for _, item := range strings.Split(floc[2:], "/") { + item = strings.Replace(item, "~1", "/", -1) + item = strings.Replace(item, "~0", "~", -1) + item, err := url.PathUnescape(item) + if err != nil { + return nil, err + } + switch d := doc.(type) { + case map[string]interface{}: + if _, ok := d[item]; !ok { + return nil, nil + } + doc = d[item] + case []interface{}: + index, err := strconv.Atoi(item) + if err != nil { + return nil, err + } + if index < 0 || index >= len(d) { + return nil, nil + } + doc = d[index] + default: + return nil, nil + } + } + + id, err := r.draft.resolveID(r.baseURL(floc), doc) + if err != nil { + return nil, err + } + res := &resource{url: id, floc: floc, doc: doc} + r.subresources[floc] = res + if err := r.fillSubschemas(c, res); err != nil { + return nil, err + } + return res, nil +} + +func (r *resource) baseURL(floc string) string { + for { + if sr, ok := r.subresources[floc]; ok { + if sr.url != "" { + return sr.url + } + } + slash := strings.LastIndexByte(floc, '/') + if slash == -1 { + break + } + floc = floc[:slash] + } + return r.url +} + +// url helpers --- + +func toAbs(s string) (string, error) { + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` { + s = "file:///" + filepath.ToSlash(s) + } + + u, err := url.Parse(s) + if err != nil { + return "", err + } + if u.IsAbs() { + return s, nil + } + + // s is filepath + if s, err = filepath.Abs(s); err != nil { + return "", err + } + if runtime.GOOS == "windows" { + s = "file:///" + filepath.ToSlash(s) + } else { + s = "file://" + s + } + u, err = url.Parse(s) // to fix spaces in filepath + return u.String(), err +} + +func resolveURL(base, ref string) (string, error) { + if ref == "" { + return base, nil + } + if strings.HasPrefix(ref, "urn:") { + return ref, nil + } + + refURL, err := url.Parse(ref) + if err != nil { + return "", err + } + if refURL.IsAbs() { + return ref, nil + } + + if strings.HasPrefix(base, "urn:") { + base, _ = split(base) + return base + ref, nil + } + + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + return baseURL.ResolveReference(refURL).String(), nil +} + +func split(uri string) (string, string) { + hash := strings.IndexByte(uri, '#') + if hash == -1 { + return uri, "#" + } + f := uri[hash:] + if f == "#/" { + f = "#" + } + return uri[0:hash], f +} + +func (s *Schema) url() string { + u, _ := split(s.Location) + return u +} + +func (s *Schema) loc() string { + _, f := split(s.Location) + return f[1:] +} + +func unmarshal(r io.Reader) (interface{}, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc interface{} + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if t, _ := decoder.Token(); t != nil { + return nil, fmt.Errorf("invalid character %v after top-level value", t) + } + return doc, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go new file mode 100644 index 0000000000..688f0a6fee --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go @@ -0,0 +1,900 @@ +package jsonschema + +import ( + "bytes" + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +// A Schema represents compiled version of json-schema. +type Schema struct { + Location string // absolute location + + Draft *Draft // draft used by schema. + meta *Schema + vocab []string + dynamicAnchors []*Schema + + // type agnostic validations + Format string + format func(interface{}) bool + Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. + Ref *Schema + RecursiveAnchor bool + RecursiveRef *Schema + DynamicAnchor string + DynamicRef *Schema + dynamicRefAnchor string + Types []string // allowed types. + Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. + Enum []interface{} // allowed values. + enumError string // error message for enum fail. captured here to avoid constructing error message every time. + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema // nil, when If is nil. + Else *Schema // nil, when If is nil. + + // object validations + MinProperties int // -1 if not specified. + MaxProperties int // -1 if not specified. + Required []string // list of required properties. + Properties map[string]*Schema + PropertyNames *Schema + RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. + PatternProperties map[*regexp.Regexp]*Schema + AdditionalProperties interface{} // nil or bool or *Schema. + Dependencies map[string]interface{} // map value is *Schema or []string. + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array validations + MinItems int // -1 if not specified. + MaxItems int // -1 if not specified. + UniqueItems bool + Items interface{} // nil or *Schema or []*Schema + AdditionalItems interface{} // nil or bool or *Schema. + PrefixItems []*Schema + Items2020 *Schema // items keyword reintroduced in draft 2020-12 + Contains *Schema + ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated" + MinContains int // 1 if not specified + MaxContains int // -1 if not specified + UnevaluatedItems *Schema + + // string validations + MinLength int // -1 if not specified. + MaxLength int // -1 if not specified. + Pattern *regexp.Regexp + ContentEncoding string + decoder func(string) ([]byte, error) + ContentMediaType string + mediaType func([]byte) error + ContentSchema *Schema + + // number validators + Minimum *big.Rat + ExclusiveMinimum *big.Rat + Maximum *big.Rat + ExclusiveMaximum *big.Rat + MultipleOf *big.Rat + + // annotations. captured only when Compiler.ExtractAnnotations is true. + Title string + Description string + Default interface{} + Comment string + ReadOnly bool + WriteOnly bool + Examples []interface{} + Deprecated bool + + // user defined extensions + Extensions map[string]ExtSchema +} + +func (s *Schema) String() string { + return s.Location +} + +func newSchema(url, floc string, draft *Draft, doc interface{}) *Schema { + // fill with default values + s := &Schema{ + Location: url + floc, + Draft: draft, + MinProperties: -1, + MaxProperties: -1, + MinItems: -1, + MaxItems: -1, + MinContains: 1, + MaxContains: -1, + MinLength: -1, + MaxLength: -1, + } + + if doc, ok := doc.(map[string]interface{}); ok { + if ra, ok := doc["$recursiveAnchor"]; ok { + if ra, ok := ra.(bool); ok { + s.RecursiveAnchor = ra + } + } + if da, ok := doc["$dynamicAnchor"]; ok { + if da, ok := da.(string); ok { + s.DynamicAnchor = da + } + } + } + return s +} + +func (s *Schema) hasVocab(name string) bool { + if s == nil { // during bootstrap + return true + } + if name == "core" { + return true + } + for _, url := range s.vocab { + if url == "https://json-schema.org/draft/2019-09/vocab/"+name { + return true + } + if url == "https://json-schema.org/draft/2020-12/vocab/"+name { + return true + } + } + return false +} + +// Validate validates given doc, against the json-schema s. +// +// the v must be the raw json value. for number precision +// unmarshal with json.UseNumber(). +// +// returns *ValidationError if v does not confirm with schema s. +// returns InfiniteLoopError if it detects loop during validation. +// returns InvalidJSONTypeError if it detects any non json value in v. +func (s *Schema) Validate(v interface{}) (err error) { + return s.validateValue(v, "") +} + +func (s *Schema) validateValue(v interface{}, vloc string) (err error) { + defer func() { + if r := recover(); r != nil { + switch r := r.(type) { + case InfiniteLoopError, InvalidJSONTypeError: + err = r.(error) + default: + panic(r) + } + } + }() + if _, err := s.validate(nil, 0, "", v, vloc); err != nil { + ve := ValidationError{ + KeywordLocation: "", + AbsoluteKeywordLocation: s.Location, + InstanceLocation: vloc, + Message: fmt.Sprintf("doesn't validate with %s", s.Location), + } + return ve.causes(err) + } + return nil +} + +// validate validates given value v with this schema. +func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) { + validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError { + return &ValidationError{ + KeywordLocation: keywordLocation(scope, keywordPath), + AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath), + InstanceLocation: vloc, + Message: fmt.Sprintf(format, a...), + } + } + + sref := schemaRef{spath, s, false} + if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil { + panic(err) + } + scope = append(scope, sref) + vscope++ + + // populate result + switch v := v.(type) { + case map[string]interface{}: + result.unevalProps = make(map[string]struct{}) + for pname := range v { + result.unevalProps[pname] = struct{}{} + } + case []interface{}: + result.unevalItems = make(map[int]struct{}) + for i := range v { + result.unevalItems[i] = struct{}{} + } + } + + validate := func(sch *Schema, schPath string, v interface{}, vpath string) error { + vloc := vloc + if vpath != "" { + vloc += "/" + vpath + } + _, err := sch.validate(scope, 0, schPath, v, vloc) + return err + } + + validateInplace := func(sch *Schema, schPath string) error { + vr, err := sch.validate(scope, vscope, schPath, v, vloc) + if err == nil { + // update result + for pname := range result.unevalProps { + if _, ok := vr.unevalProps[pname]; !ok { + delete(result.unevalProps, pname) + } + } + for i := range result.unevalItems { + if _, ok := vr.unevalItems[i]; !ok { + delete(result.unevalItems, i) + } + } + } + return err + } + + if s.Always != nil { + if !*s.Always { + return result, validationError("", "not allowed") + } + return result, nil + } + + if len(s.Types) > 0 { + vType := jsonType(v) + matched := false + for _, t := range s.Types { + if vType == t { + matched = true + break + } else if t == "integer" && vType == "number" { + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + if num.IsInt() { + matched = true + break + } + } + } + if !matched { + return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) + } + } + + var errors []error + + if len(s.Constant) > 0 { + if !equals(v, s.Constant[0]) { + switch jsonType(s.Constant[0]) { + case "object", "array": + errors = append(errors, validationError("const", "const failed")) + default: + errors = append(errors, validationError("const", "value must be %#v", s.Constant[0])) + } + } + } + + if len(s.Enum) > 0 { + matched := false + for _, item := range s.Enum { + if equals(v, item) { + matched = true + break + } + } + if !matched { + errors = append(errors, validationError("enum", s.enumError)) + } + } + + if s.format != nil && !s.format(v) { + var val = v + if v, ok := v.(string); ok { + val = quote(v) + } + errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format))) + } + + switch v := v.(type) { + case map[string]interface{}: + if s.MinProperties != -1 && len(v) < s.MinProperties { + errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v))) + } + if s.MaxProperties != -1 && len(v) > s.MaxProperties { + errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v))) + } + if len(s.Required) > 0 { + var missing []string + for _, pname := range s.Required { + if _, ok := v[pname]; !ok { + missing = append(missing, quote(pname)) + } + } + if len(missing) > 0 { + errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", "))) + } + } + + for pname, sch := range s.Properties { + if pvalue, ok := v[pname]; ok { + delete(result.unevalProps, pname) + if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + + if s.PropertyNames != nil { + for pname := range v { + if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + + if s.RegexProperties { + for pname := range v { + if !isRegex(pname) { + errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname))) + } + } + } + for pattern, sch := range s.PatternProperties { + for pname, pvalue := range v { + if pattern.MatchString(pname) { + delete(result.unevalProps, pname) + if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + } + if s.AdditionalProperties != nil { + if allowed, ok := s.AdditionalProperties.(bool); ok { + if !allowed && len(result.unevalProps) > 0 { + errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames())) + } + } else { + schema := s.AdditionalProperties.(*Schema) + for pname := range result.unevalProps { + if pvalue, ok := v[pname]; ok { + if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + } + result.unevalProps = nil + } + for dname, dvalue := range s.Dependencies { + if _, ok := v[dname]; ok { + switch dvalue := dvalue.(type) { + case *Schema: + if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil { + errors = append(errors, err) + } + case []string: + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) + } + } + } + } + } + for dname, dvalue := range s.DependentRequired { + if _, ok := v[dname]; ok { + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) + } + } + } + } + for dname, sch := range s.DependentSchemas { + if _, ok := v[dname]; ok { + if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil { + errors = append(errors, err) + } + } + } + + case []interface{}: + if s.MinItems != -1 && len(v) < s.MinItems { + errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v))) + } + if s.MaxItems != -1 && len(v) > s.MaxItems { + errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v))) + } + if s.UniqueItems { + if len(v) <= 20 { + outer1: + for i := 1; i < len(v); i++ { + for j := 0; j < i; j++ { + if equals(v[i], v[j]) { + errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) + break outer1 + } + } + } + } else { + m := make(map[uint64][]int) + var h maphash.Hash + outer2: + for i, item := range v { + h.Reset() + hash(item, &h) + k := h.Sum64() + if err != nil { + panic(err) + } + arr, ok := m[k] + if ok { + for _, j := range arr { + if equals(v[j], item) { + errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) + break outer2 + } + } + } + arr = append(arr, i) + m[k] = arr + } + } + } + + // items + additionalItems + switch items := s.Items.(type) { + case *Schema: + for i, item := range v { + if err := validate(items, "items", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } + result.unevalItems = nil + case []*Schema: + for i, item := range v { + if i < len(items) { + delete(result.unevalItems, i) + if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else if sch, ok := s.AdditionalItems.(*Schema); ok { + delete(result.unevalItems, i) + if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else { + break + } + } + if additionalItems, ok := s.AdditionalItems.(bool); ok { + if additionalItems { + result.unevalItems = nil + } else if len(v) > len(items) { + errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v))) + } + } + } + + // prefixItems + items + for i, item := range v { + if i < len(s.PrefixItems) { + delete(result.unevalItems, i) + if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else if s.Items2020 != nil { + delete(result.unevalItems, i) + if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else { + break + } + } + + // contains + minContains + maxContains + if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) { + matched := 0 + var causes []error + for i, item := range v { + if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil { + causes = append(causes, err) + } else { + matched++ + if s.ContainsEval { + delete(result.unevalItems, i) + } + } + } + if s.MinContains != -1 && matched < s.MinContains { + errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...)) + } + if s.MaxContains != -1 && matched > s.MaxContains { + errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched)) + } + } + + case string: + // minLength + maxLength + if s.MinLength != -1 || s.MaxLength != -1 { + length := utf8.RuneCount([]byte(v)) + if s.MinLength != -1 && length < s.MinLength { + errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length)) + } + if s.MaxLength != -1 && length > s.MaxLength { + errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length)) + } + } + + if s.Pattern != nil && !s.Pattern.MatchString(v) { + errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String()))) + } + + // contentEncoding + contentMediaType + if s.decoder != nil || s.mediaType != nil { + decoded := s.ContentEncoding == "" + var content []byte + if s.decoder != nil { + b, err := s.decoder(v) + if err != nil { + errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding)) + } else { + content, decoded = b, true + } + } + if decoded && s.mediaType != nil { + if s.decoder == nil { + content = []byte(v) + } + if err := s.mediaType(content); err != nil { + errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType))) + } + } + if decoded && s.ContentSchema != nil { + contentJSON, err := unmarshal(bytes.NewReader(content)) + if err != nil { + errors = append(errors, validationError("contentSchema", "value is not valid json")) + } else { + err := validate(s.ContentSchema, "contentSchema", contentJSON, "") + if err != nil { + errors = append(errors, err) + } + } + } + } + + case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: + // lazy convert to *big.Rat to avoid allocation + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprint(v)) + } + return numVal + } + f64 := func(r *big.Rat) float64 { + f, _ := r.Float64() + return f + } + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v)) + } + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v)) + } + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v)) + } + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v)) + } + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf))) + } + } + } + + // $ref + $recursiveRef + $dynamicRef + validateRef := func(sch *Schema, refPath string) error { + if sch != nil { + if err := validateInplace(sch, refPath); err != nil { + var url = sch.Location + if s.url() == sch.url() { + url = sch.loc() + } + return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err) + } + } + return nil + } + if err := validateRef(s.Ref, "$ref"); err != nil { + errors = append(errors, err) + } + if s.RecursiveRef != nil { + sch := s.RecursiveRef + if sch.RecursiveAnchor { + // recursiveRef based on scope + for _, e := range scope { + if e.schema.RecursiveAnchor { + sch = e.schema + break + } + } + } + if err := validateRef(sch, "$recursiveRef"); err != nil { + errors = append(errors, err) + } + } + if s.DynamicRef != nil { + sch := s.DynamicRef + if s.dynamicRefAnchor != "" && sch.DynamicAnchor == s.dynamicRefAnchor { + // dynamicRef based on scope + for i := len(scope) - 1; i >= 0; i-- { + sr := scope[i] + if sr.discard { + break + } + for _, da := range sr.schema.dynamicAnchors { + if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef { + sch = da + break + } + } + } + } + if err := validateRef(sch, "$dynamicRef"); err != nil { + errors = append(errors, err) + } + } + + if s.Not != nil && validateInplace(s.Not, "not") == nil { + errors = append(errors, validationError("not", "not failed")) + } + + for i, sch := range s.AllOf { + schPath := "allOf/" + strconv.Itoa(i) + if err := validateInplace(sch, schPath); err != nil { + errors = append(errors, validationError(schPath, "allOf failed").add(err)) + } + } + + if len(s.AnyOf) > 0 { + matched := false + var causes []error + for i, sch := range s.AnyOf { + if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil { + matched = true + } else { + causes = append(causes, err) + } + } + if !matched { + errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...)) + } + } + + if len(s.OneOf) > 0 { + matched := -1 + var causes []error + for i, sch := range s.OneOf { + if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil { + if matched == -1 { + matched = i + } else { + errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i)) + break + } + } else { + causes = append(causes, err) + } + } + if matched == -1 { + errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...)) + } + } + + // if + then + else + if s.If != nil { + err := validateInplace(s.If, "if") + // "if" leaves dynamic scope + scope[len(scope)-1].discard = true + if err == nil { + if s.Then != nil { + if err := validateInplace(s.Then, "then"); err != nil { + errors = append(errors, validationError("then", "if-then failed").add(err)) + } + } + } else { + if s.Else != nil { + if err := validateInplace(s.Else, "else"); err != nil { + errors = append(errors, validationError("else", "if-else failed").add(err)) + } + } + } + // restore dynamic scope + scope[len(scope)-1].discard = false + } + + for _, ext := range s.Extensions { + if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil { + errors = append(errors, err) + } + } + + // unevaluatedProperties + unevaluatedItems + switch v := v.(type) { + case map[string]interface{}: + if s.UnevaluatedProperties != nil { + for pname := range result.unevalProps { + if pvalue, ok := v[pname]; ok { + if err := validate(s.UnevaluatedProperties, "unevaluatedProperties", pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + result.unevalProps = nil + } + case []interface{}: + if s.UnevaluatedItems != nil { + for i := range result.unevalItems { + if err := validate(s.UnevaluatedItems, "unevaluatedItems", v[i], strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } + result.unevalItems = nil + } + } + + switch len(errors) { + case 0: + return result, nil + case 1: + return result, errors[0] + default: + return result, validationError("", "").add(errors...) // empty message, used just for wrapping + } +} + +type validationResult struct { + unevalProps map[string]struct{} + unevalItems map[int]struct{} +} + +func (vr validationResult) unevalPnames() string { + pnames := make([]string, 0, len(vr.unevalProps)) + for pname := range vr.unevalProps { + pnames = append(pnames, quote(pname)) + } + return strings.Join(pnames, ", ") +} + +// jsonType returns the json type of given value v. +// +// It panics if the given value is not valid json value +func jsonType(v interface{}) string { + switch v.(type) { + case nil: + return "null" + case bool: + return "boolean" + case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: + return "number" + case string: + return "string" + case []interface{}: + return "array" + case map[string]interface{}: + return "object" + } + panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) +} + +// equals tells if given two json values are equal or not. +func equals(v1, v2 interface{}) bool { + v1Type := jsonType(v1) + if v1Type != jsonType(v2) { + return false + } + switch v1Type { + case "array": + arr1, arr2 := v1.([]interface{}), v2.([]interface{}) + if len(arr1) != len(arr2) { + return false + } + for i := range arr1 { + if !equals(arr1[i], arr2[i]) { + return false + } + } + return true + case "object": + obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) + if len(obj1) != len(obj2) { + return false + } + for k, v1 := range obj1 { + if v2, ok := obj2[k]; ok { + if !equals(v1, v2) { + return false + } + } else { + return false + } + } + return true + case "number": + num1, _ := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, _ := new(big.Rat).SetString(fmt.Sprint(v2)) + return num1.Cmp(num2) == 0 + default: + return v1 == v2 + } +} + +func hash(v interface{}, h *maphash.Hash) { + switch v := v.(type) { + case nil: + h.WriteByte(0) + case bool: + h.WriteByte(1) + if v { + h.WriteByte(1) + } else { + h.WriteByte(0) + } + case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: + h.WriteByte(2) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + h.Write(num.Num().Bytes()) + h.Write(num.Denom().Bytes()) + case string: + h.WriteByte(3) + h.WriteString(v) + case []interface{}: + h.WriteByte(4) + for _, item := range v { + hash(item, h) + } + case map[string]interface{}: + h.WriteByte(5) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + sort.Slice(props, func(i, j int) bool { + return props[i] < props[j] + }) + for _, prop := range props { + hash(prop, h) + hash(v[prop], h) + } + default: + panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) + } +} + +// escape converts given token to valid json-pointer token +func escape(token string) string { + token = strings.ReplaceAll(token, "~", "~0") + token = strings.ReplaceAll(token, "/", "~1") + return url.PathEscape(token) +} diff --git a/vendor/github.com/sashamelentyev/interfacebloat/LICENSE b/vendor/github.com/sashamelentyev/interfacebloat/LICENSE new file mode 100644 index 0000000000..a52602b3da --- /dev/null +++ b/vendor/github.com/sashamelentyev/interfacebloat/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Sasha Melentyev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sashamelentyev/interfacebloat/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/interfacebloat/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..4a6afdf8cb --- /dev/null +++ b/vendor/github.com/sashamelentyev/interfacebloat/pkg/analyzer/analyzer.go @@ -0,0 +1,57 @@ +package analyzer + +import ( + "flag" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const InterfaceMaxMethodsFlag = "max" + +const defaultMaxMethods = 10 + +// New returns new interfacebloat analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "interfacebloat", + Doc: "A linter that checks the number of methods inside an interface.", + Run: run, + Flags: flags(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.Int(InterfaceMaxMethodsFlag, 10, "maximum number of methods") + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + maxMethods, ok := pass.Analyzer.Flags.Lookup(InterfaceMaxMethodsFlag).Value.(flag.Getter).Get().(int) + if !ok { + maxMethods = defaultMaxMethods + } + + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + filter := []ast.Node{ + (*ast.InterfaceType)(nil), + } + + insp.Preorder(filter, func(node ast.Node) { + i, ok := node.(*ast.InterfaceType) + if !ok { + return + } + + if len(i.Methods.List) > maxMethods { + pass.Reportf(node.Pos(), `the interface has more than %d methods: %d`, maxMethods, len(i.Methods.List)) + } + }) + + return nil, nil +} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/LICENSE b/vendor/github.com/sashamelentyev/usestdlibvars/LICENSE new file mode 100644 index 0000000000..a52602b3da --- /dev/null +++ b/vendor/github.com/sashamelentyev/usestdlibvars/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Sasha Melentyev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..4d6ab3ccab --- /dev/null +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go @@ -0,0 +1,569 @@ +package analyzer + +import ( + "flag" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping" +) + +const ( + TimeWeekdayFlag = "time-weekday" + TimeMonthFlag = "time-month" + TimeLayoutFlag = "time-layout" + CryptoHashFlag = "crypto-hash" + HTTPMethodFlag = "http-method" + HTTPStatusCodeFlag = "http-status-code" + RPCDefaultPathFlag = "rpc-default-path" + OSDevNullFlag = "os-dev-null" + SQLIsolationLevelFlag = "sql-isolation-level" + TLSSignatureSchemeFlag = "tls-signature-scheme" + ConstantKindFlag = "constant-kind" + SyslogPriorityFlag = "syslog-priority" +) + +// New returns new usestdlibvars analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "usestdlibvars", + Doc: "A linter that detect the possibility to use variables/constants from the Go standard library.", + Run: run, + Flags: flags(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.Bool(HTTPMethodFlag, true, "suggest the use of http.MethodXX") + flags.Bool(HTTPStatusCodeFlag, true, "suggest the use of http.StatusXX") + flags.Bool(TimeWeekdayFlag, false, "suggest the use of time.Weekday.String()") + flags.Bool(TimeMonthFlag, false, "suggest the use of time.Month.String()") + flags.Bool(TimeLayoutFlag, false, "suggest the use of time.Layout") + flags.Bool(CryptoHashFlag, false, "suggest the use of crypto.Hash.String()") + flags.Bool(RPCDefaultPathFlag, false, "suggest the use of rpc.DefaultXXPath") + flags.Bool(OSDevNullFlag, false, "[DEPRECATED] suggest the use of os.DevNull") + flags.Bool(SQLIsolationLevelFlag, false, "suggest the use of sql.LevelXX.String()") + flags.Bool(TLSSignatureSchemeFlag, false, "suggest the use of tls.SignatureScheme.String()") + flags.Bool(ConstantKindFlag, false, "suggest the use of constant.Kind.String()") + flags.Bool(SyslogPriorityFlag, false, "[DEPRECATED] suggest the use of syslog.Priority") + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + types := []ast.Node{ + (*ast.CallExpr)(nil), + (*ast.BasicLit)(nil), + (*ast.CompositeLit)(nil), + (*ast.IfStmt)(nil), + (*ast.SwitchStmt)(nil), + (*ast.ForStmt)(nil), + } + + insp.Preorder(types, func(node ast.Node) { + switch n := node.(type) { + case *ast.CallExpr: + fun, ok := n.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + x, ok := fun.X.(*ast.Ident) + if !ok { + return + } + + funArgs(pass, x, fun, n.Args) + + case *ast.BasicLit: + for _, c := range []struct { + flag string + checkFunc func(pass *analysis.Pass, basicLit *ast.BasicLit) + }{ + {flag: TimeWeekdayFlag, checkFunc: checkTimeWeekday}, + {flag: TimeMonthFlag, checkFunc: checkTimeMonth}, + {flag: TimeLayoutFlag, checkFunc: checkTimeLayout}, + {flag: CryptoHashFlag, checkFunc: checkCryptoHash}, + {flag: RPCDefaultPathFlag, checkFunc: checkRPCDefaultPath}, + {flag: OSDevNullFlag, checkFunc: checkOSDevNull}, + {flag: SQLIsolationLevelFlag, checkFunc: checkSQLIsolationLevel}, + {flag: TLSSignatureSchemeFlag, checkFunc: checkTLSSignatureScheme}, + {flag: ConstantKindFlag, checkFunc: checkConstantKind}, + } { + if lookupFlag(pass, c.flag) { + c.checkFunc(pass, n) + } + } + + case *ast.CompositeLit: + typ, ok := n.Type.(*ast.SelectorExpr) + if !ok { + return + } + + x, ok := typ.X.(*ast.Ident) + if !ok { + return + } + + typeElts(pass, x, typ, n.Elts) + + case *ast.IfStmt: + cond, ok := n.Cond.(*ast.BinaryExpr) + if !ok { + return + } + + switch cond.Op { + case token.LSS, token.GTR, token.LEQ, token.GEQ: + return + } + + x, ok := cond.X.(*ast.SelectorExpr) + if !ok { + return + } + + y, ok := cond.Y.(*ast.BasicLit) + if !ok { + return + } + + ifElseStmt(pass, x, y) + + case *ast.SwitchStmt: + x, ok := n.Tag.(*ast.SelectorExpr) + if ok { + switchStmt(pass, x, n.Body.List) + } else { + switchStmtAsIfElseStmt(pass, n.Body.List) + } + + case *ast.ForStmt: + cond, ok := n.Cond.(*ast.BinaryExpr) + if !ok { + return + } + + x, ok := cond.X.(*ast.SelectorExpr) + if !ok { + return + } + + y, ok := cond.Y.(*ast.BasicLit) + if !ok { + return + } + + ifElseStmt(pass, x, y) + } + }) + + return nil, nil +} + +// funArgs checks arguments of function or method. +func funArgs(pass *analysis.Pass, x *ast.Ident, fun *ast.SelectorExpr, args []ast.Expr) { + switch x.Name { + case "http": + switch fun.Sel.Name { + // http.NewRequest(http.MethodGet, "localhost", http.NoBody) + case "NewRequest": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 3, 0, token.STRING); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + + // http.NewRequestWithContext(context.Background(), http.MethodGet, "localhost", http.NoBody) + case "NewRequestWithContext": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 4, 1, token.STRING); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + + // http.Error(w, err, http.StatusInternalServerError) + case "Error": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 3, 2, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + + // http.StatusText(http.StatusOK) + case "StatusText": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 1, 0, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + + // http.Redirect(w, r, "localhost", http.StatusMovedPermanently) + case "Redirect": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 4, 3, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + + // http.RedirectHandler("localhost", http.StatusMovedPermanently) + case "RedirectHandler": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 2, 1, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + case "httptest": + if fun.Sel.Name == "NewRequest" { + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 3, 0, token.STRING); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + } + case "syslog": + if !lookupFlag(pass, SyslogPriorityFlag) { + return + } + + switch fun.Sel.Name { + case "New": + if basicLit := getBasicLitFromArgs(args, 2, 0, token.INT); basicLit != nil { + checkSyslogPriority(pass, basicLit) + } + + case "Dial": + if basicLit := getBasicLitFromArgs(args, 4, 2, token.INT); basicLit != nil { + checkSyslogPriority(pass, basicLit) + } + + case "NewLogger": + if basicLit := getBasicLitFromArgs(args, 2, 0, token.INT); basicLit != nil { + checkSyslogPriority(pass, basicLit) + } + } + default: + // w.WriteHeader(http.StatusOk) + if fun.Sel.Name == "WriteHeader" { + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 1, 0, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + } +} + +// typeElts checks elements of type. +func typeElts(pass *analysis.Pass, x *ast.Ident, typ *ast.SelectorExpr, elts []ast.Expr) { + switch x.Name { + case "http": + switch typ.Sel.Name { + // http.Request{Method: http.MethodGet} + case "Request": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromElts(elts, "Method"); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + + // http.Response{StatusCode: http.StatusOK} + case "Response": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromElts(elts, "StatusCode"); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + case "httptest": + if typ.Sel.Name == "ResponseRecorder" { + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromElts(elts, "Code"); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + } +} + +// ifElseStmt checks X and Y in if-else-statement. +func ifElseStmt(pass *analysis.Pass, x *ast.SelectorExpr, y *ast.BasicLit) { + switch x.Sel.Name { + case "StatusCode": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + checkHTTPStatusCode(pass, y) + + case "Method": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + checkHTTPMethod(pass, y) + } +} + +func switchStmt(pass *analysis.Pass, x *ast.SelectorExpr, cases []ast.Stmt) { + var checkFunc func(pass *analysis.Pass, basicLit *ast.BasicLit) + + switch x.Sel.Name { + case "StatusCode": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + checkFunc = checkHTTPStatusCode + + case "Method": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + checkFunc = checkHTTPMethod + + default: + return + } + + for _, c := range cases { + caseClause, ok := c.(*ast.CaseClause) + if !ok { + continue + } + + for _, expr := range caseClause.List { + basicLit, ok := expr.(*ast.BasicLit) + if !ok { + continue + } + + checkFunc(pass, basicLit) + } + } +} + +func switchStmtAsIfElseStmt(pass *analysis.Pass, cases []ast.Stmt) { + for _, c := range cases { + caseClause, ok := c.(*ast.CaseClause) + if !ok { + continue + } + + for _, expr := range caseClause.List { + binaryExpr, ok := expr.(*ast.BinaryExpr) + if !ok { + continue + } + + x, ok := binaryExpr.X.(*ast.SelectorExpr) + if !ok { + continue + } + + y, ok := binaryExpr.Y.(*ast.BasicLit) + if !ok { + continue + } + + ifElseStmt(pass, x, y) + } + } +} + +func lookupFlag(pass *analysis.Pass, name string) bool { + return pass.Analyzer.Flags.Lookup(name).Value.(flag.Getter).Get().(bool) +} + +func checkHTTPMethod(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + key := strings.ToUpper(currentVal) + + if newVal, ok := mapping.HTTPMethod[key]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkHTTPStatusCode(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.HTTPStatusCode[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTimeWeekday(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TimeWeekday[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTimeMonth(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TimeMonth[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTimeLayout(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TimeLayout[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkCryptoHash(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.CryptoHash[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkRPCDefaultPath(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.RPCDefaultPath[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkOSDevNull(pass *analysis.Pass, basicLit *ast.BasicLit) {} + +func checkSQLIsolationLevel(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.SQLIsolationLevel[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTLSSignatureScheme(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TLSSignatureScheme[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkConstantKind(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.ConstantKind[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkSyslogPriority(pass *analysis.Pass, basicLit *ast.BasicLit) {} + +// getBasicLitFromArgs gets the *ast.BasicLit of a function argument. +// +// Arguments: +// - args - slice of function arguments +// - count - expected number of argument in function +// - idx - index of the argument to get the *ast.BasicLit +// - typ - argument type +func getBasicLitFromArgs(args []ast.Expr, count, idx int, typ token.Token) *ast.BasicLit { + if len(args) != count { + return nil + } + + if idx > count-1 { + return nil + } + + basicLit, ok := args[idx].(*ast.BasicLit) + if !ok { + return nil + } + + if basicLit.Kind != typ { + return nil + } + + return basicLit +} + +// getBasicLitFromElts gets the *ast.BasicLit of a struct elements. +// +// Arguments: +// - elts - slice of a struct elements +// - key - name of key in struct +func getBasicLitFromElts(elts []ast.Expr, key string) *ast.BasicLit { + for _, e := range elts { + keyValueExpr, ok := e.(*ast.KeyValueExpr) + if !ok { + continue + } + + ident, ok := keyValueExpr.Key.(*ast.Ident) + if !ok { + continue + } + + if ident.Name != key { + continue + } + + if basicLit, ok := keyValueExpr.Value.(*ast.BasicLit); ok { + return basicLit + } + } + + return nil +} + +// getBasicLitValue returns BasicLit value as string without quotes. +func getBasicLitValue(basicLit *ast.BasicLit) string { + var val strings.Builder + for _, r := range basicLit.Value { + if r == '"' { + continue + } else { + val.WriteRune(r) + } + } + return val.String() +} + +func report(pass *analysis.Pass, pos token.Pos, currentVal, newVal string) { + pass.Reportf(pos, "%q can be replaced by %s", currentVal, newVal) +} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping/mapping.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping/mapping.go new file mode 100644 index 0000000000..5bad23d288 --- /dev/null +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping/mapping.go @@ -0,0 +1,202 @@ +package mapping + +import ( + "crypto" + "crypto/tls" + "database/sql" + "go/constant" + "net/http" + "net/rpc" + "strconv" + "time" +) + +var CryptoHash = map[string]string{ + crypto.MD4.String(): "crypto.MD4.String()", + crypto.MD5.String(): "crypto.MD5.String()", + crypto.SHA1.String(): "crypto.SHA1.String()", + crypto.SHA224.String(): "crypto.SHA224.String()", + crypto.SHA256.String(): "crypto.SHA256.String()", + crypto.SHA384.String(): "crypto.SHA384.String()", + crypto.SHA512.String(): "crypto.SHA512.String()", + crypto.MD5SHA1.String(): "crypto.MD5SHA1.String()", + crypto.RIPEMD160.String(): "crypto.RIPEMD160.String()", + crypto.SHA3_224.String(): "crypto.SHA3_224.String()", + crypto.SHA3_256.String(): "crypto.SHA3_256.String()", + crypto.SHA3_384.String(): "crypto.SHA3_384.String()", + crypto.SHA3_512.String(): "crypto.SHA3_512.String()", + crypto.SHA512_224.String(): "crypto.SHA512_224.String()", + crypto.SHA512_256.String(): "crypto.SHA512_256.String()", + crypto.BLAKE2s_256.String(): "crypto.BLAKE2s_256.String()", + crypto.BLAKE2b_256.String(): "crypto.BLAKE2b_256.String()", + crypto.BLAKE2b_384.String(): "crypto.BLAKE2b_384.String()", + crypto.BLAKE2b_512.String(): "crypto.BLAKE2b_512.String()", +} + +var HTTPMethod = map[string]string{ + http.MethodGet: "http.MethodGet", + http.MethodHead: "http.MethodHead", + http.MethodPost: "http.MethodPost", + http.MethodPut: "http.MethodPut", + http.MethodPatch: "http.MethodPatch", + http.MethodDelete: "http.MethodDelete", + http.MethodConnect: "http.MethodConnect", + http.MethodOptions: "http.MethodOptions", + http.MethodTrace: "http.MethodTrace", +} + +var HTTPStatusCode = map[string]string{ + strconv.Itoa(http.StatusContinue): "http.StatusContinue", + strconv.Itoa(http.StatusSwitchingProtocols): "http.StatusSwitchingProtocols", + strconv.Itoa(http.StatusProcessing): "http.StatusProcessing", + strconv.Itoa(http.StatusEarlyHints): "http.StatusEarlyHints", + + strconv.Itoa(http.StatusOK): "http.StatusOK", + strconv.Itoa(http.StatusCreated): "http.StatusCreated", + strconv.Itoa(http.StatusAccepted): "http.StatusAccepted", + strconv.Itoa(http.StatusNonAuthoritativeInfo): "http.StatusNonAuthoritativeInfo", + strconv.Itoa(http.StatusNoContent): "http.StatusNoContent", + strconv.Itoa(http.StatusResetContent): "http.StatusResetContent", + strconv.Itoa(http.StatusPartialContent): "http.StatusPartialContent", + strconv.Itoa(http.StatusMultiStatus): "http.StatusMultiStatus", + strconv.Itoa(http.StatusAlreadyReported): "http.StatusAlreadyReported", + strconv.Itoa(http.StatusIMUsed): "http.StatusIMUsed", + + strconv.Itoa(http.StatusMultipleChoices): "http.StatusMultipleChoices", + strconv.Itoa(http.StatusMovedPermanently): "http.StatusMovedPermanently", + strconv.Itoa(http.StatusFound): "http.StatusFound", + strconv.Itoa(http.StatusSeeOther): "http.StatusSeeOther", + strconv.Itoa(http.StatusNotModified): "http.StatusNotModified", + strconv.Itoa(http.StatusUseProxy): "http.StatusUseProxy", + strconv.Itoa(http.StatusTemporaryRedirect): "http.StatusTemporaryRedirect", + strconv.Itoa(http.StatusPermanentRedirect): "http.StatusPermanentRedirect", + + strconv.Itoa(http.StatusBadRequest): "http.StatusBadRequest", + strconv.Itoa(http.StatusUnauthorized): "http.StatusUnauthorized", + strconv.Itoa(http.StatusPaymentRequired): "http.StatusPaymentRequired", + strconv.Itoa(http.StatusForbidden): "http.StatusForbidden", + strconv.Itoa(http.StatusNotFound): "http.StatusNotFound", + strconv.Itoa(http.StatusMethodNotAllowed): "http.StatusMethodNotAllowed", + strconv.Itoa(http.StatusNotAcceptable): "http.StatusNotAcceptable", + strconv.Itoa(http.StatusProxyAuthRequired): "http.StatusProxyAuthRequired", + strconv.Itoa(http.StatusRequestTimeout): "http.StatusRequestTimeout", + strconv.Itoa(http.StatusConflict): "http.StatusConflict", + strconv.Itoa(http.StatusGone): "http.StatusGone", + strconv.Itoa(http.StatusLengthRequired): "http.StatusLengthRequired", + strconv.Itoa(http.StatusPreconditionFailed): "http.StatusPreconditionFailed", + strconv.Itoa(http.StatusRequestEntityTooLarge): "http.StatusRequestEntityTooLarge", + strconv.Itoa(http.StatusRequestURITooLong): "http.StatusRequestURITooLong", + strconv.Itoa(http.StatusUnsupportedMediaType): "http.StatusUnsupportedMediaType", + strconv.Itoa(http.StatusRequestedRangeNotSatisfiable): "http.StatusRequestedRangeNotSatisfiable", + strconv.Itoa(http.StatusExpectationFailed): "http.StatusExpectationFailed", + strconv.Itoa(http.StatusTeapot): "http.StatusTeapot", + strconv.Itoa(http.StatusMisdirectedRequest): "http.StatusMisdirectedRequest", + strconv.Itoa(http.StatusUnprocessableEntity): "http.StatusUnprocessableEntity", + strconv.Itoa(http.StatusLocked): "http.StatusLocked", + strconv.Itoa(http.StatusFailedDependency): "http.StatusFailedDependency", + strconv.Itoa(http.StatusTooEarly): "http.StatusTooEarly", + strconv.Itoa(http.StatusUpgradeRequired): "http.StatusUpgradeRequired", + strconv.Itoa(http.StatusPreconditionRequired): "http.StatusPreconditionRequired", + strconv.Itoa(http.StatusTooManyRequests): "http.StatusTooManyRequests", + strconv.Itoa(http.StatusRequestHeaderFieldsTooLarge): "http.StatusRequestHeaderFieldsTooLarge", + strconv.Itoa(http.StatusUnavailableForLegalReasons): "http.StatusUnavailableForLegalReasons", + + strconv.Itoa(http.StatusInternalServerError): "http.StatusInternalServerError", + strconv.Itoa(http.StatusNotImplemented): "http.StatusNotImplemented", + strconv.Itoa(http.StatusBadGateway): "http.StatusBadGateway", + strconv.Itoa(http.StatusServiceUnavailable): "http.StatusServiceUnavailable", + strconv.Itoa(http.StatusGatewayTimeout): "http.StatusGatewayTimeout", + strconv.Itoa(http.StatusHTTPVersionNotSupported): "http.StatusHTTPVersionNotSupported", + strconv.Itoa(http.StatusVariantAlsoNegotiates): "http.StatusVariantAlsoNegotiates", + strconv.Itoa(http.StatusInsufficientStorage): "http.StatusInsufficientStorage", + strconv.Itoa(http.StatusLoopDetected): "http.StatusLoopDetected", + strconv.Itoa(http.StatusNotExtended): "http.StatusNotExtended", + strconv.Itoa(http.StatusNetworkAuthenticationRequired): "http.StatusNetworkAuthenticationRequired", +} + +var RPCDefaultPath = map[string]string{ + rpc.DefaultRPCPath: "rpc.DefaultRPCPath", + rpc.DefaultDebugPath: "rpc.DefaultDebugPath", +} + +var TimeWeekday = map[string]string{ + time.Sunday.String(): "time.Sunday.String()", + time.Monday.String(): "time.Monday.String()", + time.Tuesday.String(): "time.Tuesday.String()", + time.Wednesday.String(): "time.Wednesday.String()", + time.Thursday.String(): "time.Thursday.String()", + time.Friday.String(): "time.Friday.String()", + time.Saturday.String(): "time.Saturday.String()", +} + +var TimeMonth = map[string]string{ + time.January.String(): "time.January.String()", + time.February.String(): "time.February.String()", + time.March.String(): "time.March.String()", + time.April.String(): "time.April.String()", + time.May.String(): "time.May.String()", + time.June.String(): "time.June.String()", + time.July.String(): "time.July.String()", + time.August.String(): "time.August.String()", + time.September.String(): "time.September.String()", + time.October.String(): "time.October.String()", + time.November.String(): "time.November.String()", + time.December.String(): "time.December.String()", +} + +var TimeLayout = map[string]string{ + time.Layout: "time.Layout", + time.ANSIC: "time.ANSIC", + time.UnixDate: "time.UnixDate", + time.RubyDate: "time.RubyDate", + time.RFC822: "time.RFC822", + time.RFC822Z: "time.RFC822Z", + time.RFC850: "time.RFC850", + time.RFC1123: "time.RFC1123", + time.RFC1123Z: "time.RFC1123Z", + time.RFC3339: "time.RFC3339", + time.RFC3339Nano: "time.RFC3339Nano", + time.Kitchen: "time.Kitchen", + time.Stamp: "time.Stamp", + time.StampMilli: "time.StampMilli", + time.StampMicro: "time.StampMicro", + time.StampNano: "time.StampNano", + time.DateTime: "time.DateTime", + time.DateOnly: "time.DateOnly", + time.TimeOnly: "time.TimeOnly", +} + +var SQLIsolationLevel = map[string]string{ + // sql.LevelDefault.String(): "sql.LevelDefault.String()", + sql.LevelReadUncommitted.String(): "sql.LevelReadUncommitted.String()", + sql.LevelReadCommitted.String(): "sql.LevelReadCommitted.String()", + sql.LevelWriteCommitted.String(): "sql.LevelWriteCommitted.String()", + sql.LevelRepeatableRead.String(): "sql.LevelRepeatableRead.String()", + // sql.LevelSnapshot.String(): "sql.LevelSnapshot.String()", + // sql.LevelSerializable.String(): "sql.LevelSerializable.String()", + // sql.LevelLinearizable.String(): "sql.LevelLinearizable.String()", +} + +var TLSSignatureScheme = map[string]string{ + tls.PSSWithSHA256.String(): "tls.PSSWithSHA256.String()", + tls.ECDSAWithP256AndSHA256.String(): "tls.ECDSAWithP256AndSHA256.String()", + tls.Ed25519.String(): "tls.Ed25519.String()", + tls.PSSWithSHA384.String(): "tls.PSSWithSHA384.String()", + tls.PSSWithSHA512.String(): "tls.PSSWithSHA512.String()", + tls.PKCS1WithSHA256.String(): "tls.PKCS1WithSHA256.String()", + tls.PKCS1WithSHA384.String(): "tls.PKCS1WithSHA384.String()", + tls.PKCS1WithSHA512.String(): "tls.PKCS1WithSHA512.String()", + tls.ECDSAWithP384AndSHA384.String(): "tls.ECDSAWithP384AndSHA384.String()", + tls.ECDSAWithP521AndSHA512.String(): "tls.ECDSAWithP521AndSHA512.String()", + tls.PKCS1WithSHA1.String(): "tls.PKCS1WithSHA1.String()", + tls.ECDSAWithSHA1.String(): "tls.ECDSAWithSHA1.String()", +} + +var ConstantKind = map[string]string{ + // constant.Unknown.String(): "constant.Unknown.String()", + constant.Bool.String(): "constant.Bool.String()", + constant.String.String(): "constant.String.String()", + constant.Int.String(): "constant.Int.String()", + constant.Float.String(): "constant.Float.String()", + constant.Complex.String(): "constant.Complex.String()", +} diff --git a/vendor/github.com/securego/gosec/v2/.golangci.yml b/vendor/github.com/securego/gosec/v2/.golangci.yml index 64e4e4515c..d591dc2493 100644 --- a/vendor/github.com/securego/gosec/v2/.golangci.yml +++ b/vendor/github.com/securego/gosec/v2/.golangci.yml @@ -1,33 +1,45 @@ linters: enable: - - asciicheck - - bodyclose - - deadcode - - depguard - - dogsled - - durationcheck - - errcheck - - errorlint - - exportloopref - - gci - - gofmt - - gofumpt - - goimports - - gosec - - gosimple - - govet - - importas - - ineffassign - - megacheck - - misspell - - nakedret - - nolintlint - - revive - - staticcheck - - structcheck - - typecheck - - unconvert - - unparam - - unused - - varcheck - - wastedassign + - asciicheck + - bodyclose + - dogsled + - durationcheck + - errcheck + - errorlint + - exportloopref + - gci + - ginkgolinter + - gochecknoinits + - gofmt + - gofumpt + - goimports + - gosec + - gosimple + - govet + - importas + - ineffassign + - megacheck + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + - wastedassign + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/securego) + revive: + rules: + - name: dot-imports + disabled: true + +run: + timeout: 5m diff --git a/vendor/github.com/securego/gosec/v2/.goreleaser.yml b/vendor/github.com/securego/gosec/v2/.goreleaser.yml index 539be5659e..bd85bab3ac 100644 --- a/vendor/github.com/securego/gosec/v2/.goreleaser.yml +++ b/vendor/github.com/securego/gosec/v2/.goreleaser.yml @@ -18,6 +18,8 @@ builds: goarch: - amd64 - arm64 + - s390x + - ppc64le ldflags: -X main.Version={{.Version}} -X main.GitTag={{.Tag}} -X main.BuildDate={{.Date}} env: - CGO_ENABLED=0 @@ -25,6 +27,11 @@ builds: signs: - cmd: cosign stdin: '{{ .Env.COSIGN_PASSWORD}}' - args: ["sign-blob", "--key=/tmp/cosign.key", "--output=${signature}", "${artifact}"] + args: + - "sign-blob" + - "--key=/tmp/cosign.key" + - "--output=${signature}" + - "${artifact}" + - "--yes" artifacts: all diff --git a/vendor/github.com/securego/gosec/v2/Dockerfile b/vendor/github.com/securego/gosec/v2/Dockerfile index b57c981fb3..1bf94da7d7 100644 --- a/vendor/github.com/securego/gosec/v2/Dockerfile +++ b/vendor/github.com/securego/gosec/v2/Dockerfile @@ -1,11 +1,11 @@ ARG GO_VERSION FROM golang:${GO_VERSION}-alpine AS builder -RUN apk add --no-cache ca-certificates make git curl gcc libc-dev -RUN mkdir -p /build +RUN apk add --no-cache ca-certificates make git curl gcc libc-dev \ + && mkdir -p /build WORKDIR /build COPY . /build/ -RUN go mod download -RUN make build-linux +RUN go mod download \ + && make build-linux FROM golang:${GO_VERSION}-alpine RUN apk add --no-cache ca-certificates bash git gcc libc-dev openssh diff --git a/vendor/github.com/securego/gosec/v2/Makefile b/vendor/github.com/securego/gosec/v2/Makefile index 5dbfd7764b..4f6cce7651 100644 --- a/vendor/github.com/securego/gosec/v2/Makefile +++ b/vendor/github.com/securego/gosec/v2/Makefile @@ -2,27 +2,37 @@ GIT_TAG?= $(shell git describe --always --tags) BIN = gosec FMT_CMD = $(gofmt -s -l -w $(find . -type f -name '*.go' -not -path './vendor/*') | tee /dev/stderr) IMAGE_REPO = securego -BUILD_DATE ?= $(shell date +%Y-%m-%d) +DATE_FMT=+%Y-%m-%d +ifdef SOURCE_DATE_EPOCH + BUILD_DATE ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u "$(DATE_FMT)") +else + BUILD_DATE ?= $(shell date "$(DATE_FMT)") +endif BUILDFLAGS := "-w -s -X 'main.Version=$(GIT_TAG)' -X 'main.GitTag=$(GIT_TAG)' -X 'main.BuildDate=$(BUILD_DATE)'" CGO_ENABLED = 0 GO := GO111MODULE=on go -GO_NOMOD :=GO111MODULE=off go GOPATH ?= $(shell $(GO) env GOPATH) GOBIN ?= $(GOPATH)/bin -GOLINT ?= $(GOBIN)/golint GOSEC ?= $(GOBIN)/gosec GINKGO ?= $(GOBIN)/ginkgo -GO_VERSION = 1.18 +GO_MINOR_VERSION = $(shell $(GO) version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) +GOVULN_MIN_VERSION = 17 +GO_VERSION = 1.22 default: $(MAKE) build install-test-deps: go install github.com/onsi/ginkgo/v2/ginkgo@latest - $(GO_NOMOD) get -u golang.org/x/crypto/ssh - $(GO_NOMOD) get -u github.com/lib/pq + go install golang.org/x/crypto/...@latest + go install github.com/lib/pq/...@latest -test: install-test-deps build fmt lint sec +install-govulncheck: + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + go install golang.org/x/vuln/cmd/govulncheck@latest; \ + fi + +test: install-test-deps build-race fmt vet sec govulncheck $(GINKGO) -v --fail-fast fmt: @@ -30,10 +40,7 @@ fmt: @FORMATTED=`$(GO) fmt ./...` @([ ! -z "$(FORMATTED)" ] && printf "Fixed unformatted files:\n$(FORMATTED)") || true -lint: - @echo "LINTING: golint" - $(GO_NOMOD) get -u golang.org/x/lint/golint - $(GOLINT) -set_exit_status ./... +vet: @echo "VETTING" $(GO) vet ./... @@ -45,12 +52,21 @@ sec: @echo "SECURITY SCANNING" ./$(BIN) ./... +govulncheck: install-govulncheck + @echo "CHECKING VULNERABILITIES" + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + govulncheck ./...; \ + fi + test-coverage: install-test-deps go test -race -v -count=1 -coverprofile=coverage.out ./... build: go build -o $(BIN) ./cmd/gosec/ +build-race: + go build -race -o $(BIN) ./cmd/gosec/ + clean: rm -rf build vendor dist coverage.txt rm -f release image $(BIN) @@ -60,7 +76,7 @@ release: goreleaser release build-linux: - CGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=amd64 go build -ldflags=$(BUILDFLAGS) -o $(BIN) ./cmd/gosec/ + CGO_ENABLED=$(CGO_ENABLED) GOOS=linux go build -ldflags=$(BUILDFLAGS) -o $(BIN) ./cmd/gosec/ image: @echo "Building the Docker image..." @@ -73,4 +89,7 @@ image-push: image docker push $(IMAGE_REPO)/$(BIN):$(GIT_TAG) docker push $(IMAGE_REPO)/$(BIN):latest -.PHONY: test build clean release image image-push +tlsconfig: + go generate ./... + +.PHONY: test build clean release image image-push tlsconfig diff --git a/vendor/github.com/securego/gosec/v2/README.md b/vendor/github.com/securego/gosec/v2/README.md index cc824393b8..f7b41df2ed 100644 --- a/vendor/github.com/securego/gosec/v2/README.md +++ b/vendor/github.com/securego/gosec/v2/README.md @@ -1,7 +1,7 @@ -# gosec - Golang Security Checker +# gosec - Go Security Checker -Inspects source code for security problems by scanning the Go AST. +Inspects source code for security problems by scanning the Go AST and SSA code representation. @@ -21,7 +21,7 @@ You may obtain a copy of the License [here](http://www.apache.org/licenses/LICEN [![Docs](https://readthedocs.org/projects/docs/badge/?version=latest)](https://securego.io/) [![Downloads](https://img.shields.io/github/downloads/securego/gosec/total.svg)](https://github.com/securego/gosec/releases) [![Docker Pulls](https://img.shields.io/docker/pulls/securego/gosec.svg)](https://hub.docker.com/r/securego/gosec/tags) -[![Slack](http://securego.herokuapp.com/badge.svg)](http://securego.herokuapp.com) +[![Slack](https://img.shields.io/badge/Slack-4A154B?style=for-the-badge&logo=slack&logoColor=white)](http://securego.slack.com) ## Install @@ -68,7 +68,7 @@ jobs: GO111MODULE: on steps: - name: Checkout Source - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run Gosec Security Scanner uses: securego/gosec@master with: @@ -98,14 +98,14 @@ jobs: GO111MODULE: on steps: - name: Checkout Source - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run Gosec Security Scanner uses: securego/gosec@master with: # we let the report trigger content trigger a failure using the GitHub Security features. args: '-no-fail -fmt sarif -out results.sarif ./...' - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v1 + uses: github/codeql-action/upload-sarif@v2 with: # Path to SARIF file relative to the root of the repository sarif_file: results.sarif @@ -113,18 +113,10 @@ jobs: ### Local Installation -#### Go 1.16+ - ```bash go install github.com/securego/gosec/v2/cmd/gosec@latest ``` -#### Go version < 1.16 - -```bash -go get -u github.com/securego/gosec/v2/cmd/gosec -``` - ## Usage Gosec can be configured to only run a subset of rules, to exclude certain file @@ -157,7 +149,7 @@ directory you can supply `./...` as the input argument. - G304: File path provided as taint input - G305: File traversal when extracting zip/tar archive - G306: Poor file permissions used when writing to a new file -- G307: Deferring a method which returns an error +- G307: Poor file permissions used when creating a file with os.Create - G401: Detect the usage of DES, RC4, MD5 or SHA1 - G402: Look for bad TLS connection settings - G403: Ensure minimum RSA key length of 2048 bits @@ -168,10 +160,12 @@ directory you can supply `./...` as the input argument. - G504: Import blocklist: net/http/cgi - G505: Import blocklist: crypto/sha1 - G601: Implicit memory aliasing of items from a range statement +- G602: Slice access out of bounds ### Retired rules - G105: Audit the use of math/big.Int.Exp - [CVE is fixed](https://github.com/golang/go/issues/15184) +- G307: Deferring a method which returns an error - causing more inconvenience than fixing a security issue, despite the details from this [blog post](https://www.joeshaw.org/dont-defer-close-on-writable-files/) ### Selecting rules @@ -188,7 +182,7 @@ $ gosec -exclude=G303 ./... ### CWE Mapping -Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/master/issue.go#L50). +Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/master/issue/issue.go#L50). ### Configuration @@ -272,31 +266,33 @@ gosec -exclude-generated ./... ### Annotating code -As with all automated detection tools, there will be cases of false positives. In cases where gosec reports a failure that has been manually verified as being safe, +As with all automated detection tools, there will be cases of false positives. +In cases where gosec reports a failure that has been manually verified as being safe, it is possible to annotate the code with a comment that starts with `#nosec`. + The `#nosec` comment should have the format `#nosec [RuleList] [-- Justification]`. -The annotation causes gosec to stop processing any further nodes within the -AST so can apply to a whole block or more granularly to a single expression. +The `#nosec` comment needs to be placed on the line where the warning is reported. ```go - -import "md5" //#nosec - - -func main(){ - - /* #nosec */ - if x > y { - h := md5.New() // this will also be ignored - } - +func main() { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // #nosec G402 + }, + } + + client := &http.Client{Transport: tr} + _, err := client.Get("https://golang.org/") + if err != nil { + fmt.Println(err) + } } - ``` -When a specific false positive has been identified and verified as safe, you may wish to suppress only that single rule (or a specific set of rules) -within a section of code, while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within +When a specific false positive has been identified and verified as safe, you may +wish to suppress only that single rule (or a specific set of rules) within a section of code, +while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within the `#nosec` annotation, e.g: `/* #nosec G401 */` or `//#nosec G201 G202 G203` You could put the description or justification text for the annotation. The @@ -389,7 +385,7 @@ schema-generate -i sarif-schema-2.1.0.json -o mypath/types.go ``` Most of the MarshallJSON/UnmarshalJSON are removed except the one for PropertyBag which is handy to inline the additional properties. The rest can be removed. -The URI,ID, UUID, GUID were renamed so it fits the Golang convention defined [here](https://github.com/golang/lint/blob/master/lint.go#L700) +The URI,ID, UUID, GUID were renamed so it fits the Go convention defined [here](https://github.com/golang/lint/blob/master/lint.go#L700) ### Tests diff --git a/vendor/github.com/securego/gosec/v2/USERS.md b/vendor/github.com/securego/gosec/v2/USERS.md index ffc0560814..9b6e4eeee4 100644 --- a/vendor/github.com/securego/gosec/v2/USERS.md +++ b/vendor/github.com/securego/gosec/v2/USERS.md @@ -15,6 +15,7 @@ This is a list of gosec's users. Please send a pull request with your organisati 9. [PingCAP/tidb](https://github.com/pingcap/tidb) 10. [Checkmarx](https://www.checkmarx.com/) 11. [SeatGeek](https://www.seatgeek.com/) +12. [reMarkable](https://remarkable.com) ## Projects diff --git a/vendor/github.com/securego/gosec/v2/action.yml b/vendor/github.com/securego/gosec/v2/action.yml index aab6c8039d..3097075ce8 100644 --- a/vendor/github.com/securego/gosec/v2/action.yml +++ b/vendor/github.com/securego/gosec/v2/action.yml @@ -10,7 +10,7 @@ inputs: runs: using: 'docker' - image: 'docker://securego/gosec' + image: 'docker://securego/gosec:2.18.2' args: - ${{ inputs.args }} diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go index 0f9fef2d12..0b1225b9b3 100644 --- a/vendor/github.com/securego/gosec/v2/analyzer.go +++ b/vendor/github.com/securego/gosec/v2/analyzer.go @@ -31,6 +31,10 @@ import ( "strings" "sync" + "github.com/securego/gosec/v2/analyzers" + "github.com/securego/gosec/v2/issue" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/packages" ) @@ -42,7 +46,10 @@ const LoadMode = packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | - packages.NeedSyntax + packages.NeedSyntax | + packages.NeedModule | + packages.NeedEmbedFiles | + packages.NeedEmbedPatterns const externalSuppressionJustification = "Globally suppressed." @@ -50,9 +57,83 @@ const aliasOfAllRules = "*" var generatedCodePattern = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) +type ignore struct { + start int + end int + suppressions map[string][]issue.SuppressionInfo +} + +type ignores map[string][]ignore + +func newIgnores() ignores { + return make(map[string][]ignore) +} + +func (i ignores) parseLine(line string) (int, int) { + parts := strings.Split(line, "-") + start, err := strconv.Atoi(parts[0]) + if err != nil { + start = 0 + } + end := start + if len(parts) > 1 { + if e, err := strconv.Atoi(parts[1]); err == nil { + end = e + } + } + return start, end +} + +func (i ignores) add(file string, line string, suppressions map[string]issue.SuppressionInfo) { + is := []ignore{} + if _, ok := i[file]; ok { + is = i[file] + } + found := false + start, end := i.parseLine(line) + for _, ig := range is { + if ig.start <= start && ig.end >= end { + found = true + for r, s := range suppressions { + ss, ok := ig.suppressions[r] + if !ok { + ss = []issue.SuppressionInfo{} + } + ss = append(ss, s) + ig.suppressions[r] = ss + } + break + } + } + if !found { + ig := ignore{ + start: start, + end: end, + suppressions: map[string][]issue.SuppressionInfo{}, + } + for r, s := range suppressions { + ig.suppressions[r] = []issue.SuppressionInfo{s} + } + is = append(is, ig) + } + i[file] = is +} + +func (i ignores) get(file string, line string) map[string][]issue.SuppressionInfo { + start, end := i.parseLine(line) + if is, ok := i[file]; ok { + for _, i := range is { + if start <= i.start && end >= i.end { + return i.suppressions + } + } + } + return map[string][]issue.SuppressionInfo{} +} + // The Context is populated with data parsed from the source code as it is scanned. // It is passed through to all rule functions as they are called. Rules may use -// this data in conjunction withe the encountered AST node. +// this data in conjunction with the encountered AST node. type Context struct { FileSet *token.FileSet Comments ast.CommentMap @@ -60,12 +141,24 @@ type Context struct { Pkg *types.Package PkgFiles []*ast.File Root *ast.File - Config Config Imports *ImportTracker - Ignores []map[string][]SuppressionInfo + Config Config + Ignores ignores PassedValues map[string]interface{} } +// GetFileAtNodePos returns the file at the node position in the file set available in the context. +func (ctx *Context) GetFileAtNodePos(node ast.Node) *token.File { + return ctx.FileSet.File(node.Pos()) +} + +// NewIssue creates a new issue +func (ctx *Context) NewIssue(node ast.Node, ruleID, desc string, + severity, confidence issue.Score, +) *issue.Issue { + return issue.New(ctx.GetFileAtNodePos(node), node, ruleID, desc, severity, confidence) +} + // Metrics used when reporting information about a scanning run. type Metrics struct { NumFiles int `json:"files"` @@ -82,7 +175,7 @@ type Analyzer struct { context *Context config Config logger *log.Logger - issues []*Issue + issues []*issue.Issue stats *Metrics errors map[string][]Error // keys are file paths; values are the golang errors in those files tests bool @@ -90,13 +183,8 @@ type Analyzer struct { showIgnored bool trackSuppressions bool concurrency int -} - -// SuppressionInfo object is to record the kind and the justification that used -// to suppress violations. -type SuppressionInfo struct { - Kind string `json:"kind"` - Justification string `json:"justification"` + analyzerList []*analysis.Analyzer + mu sync.Mutex } // NewAnalyzer builds a new analyzer. @@ -119,13 +207,14 @@ func NewAnalyzer(conf Config, tests bool, excludeGenerated bool, trackSuppressio context: &Context{}, config: conf, logger: logger, - issues: make([]*Issue, 0, 16), + issues: make([]*issue.Issue, 0, 16), stats: &Metrics{}, errors: make(map[string][]Error), tests: tests, concurrency: concurrency, excludeGenerated: excludeGenerated, trackSuppressions: trackSuppressions, + analyzerList: analyzers.BuildDefaultAnalyzers(), } } @@ -172,9 +261,9 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error for { select { case s := <-j: - packages, err := gosec.load(s, config) + pkgs, err := gosec.load(s, config) select { - case r <- result{pkgPath: s, pkgs: packages, err: err}: + case r <- result{pkgPath: s, pkgs: pkgs, err: err}: case <-quit: // we've been told to stop, probably an error while // processing a previous result. @@ -216,7 +305,8 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error wg.Wait() // wait for the goroutines to stop return fmt.Errorf("parsing errors in pkg %q: %w", pkg.Name, err) } - gosec.Check(pkg) + gosec.CheckRules(pkg) + gosec.CheckAnalyzers(pkg) } } } @@ -235,7 +325,9 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. // step 1/3 create build context. buildD := build.Default // step 2/3: add build tags to get env dependent files into basePackage. + gosec.mu.Lock() buildD.BuildTags = conf.BuildFlags + gosec.mu.Unlock() basePackage, err := buildD.ImportDir(pkgPath, build.ImportComment) if err != nil { return []*packages.Package{}, fmt.Errorf("importing dir %q: %w", pkgPath, err) @@ -259,7 +351,9 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. } // step 3/3 remove build tags from conf to proceed build correctly. + gosec.mu.Lock() conf.BuildFlags = nil + defer gosec.mu.Unlock() pkgs, err := packages.Load(conf, packageFiles...) if err != nil { return []*packages.Package{}, fmt.Errorf("loading files from package %q: %w", pkgPath, err) @@ -267,8 +361,8 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. return pkgs, nil } -// Check runs analysis on the given package -func (gosec *Analyzer) Check(pkg *packages.Package) { +// CheckRules runs analysis on the given package. +func (gosec *Analyzer) CheckRules(pkg *packages.Package) { gosec.logger.Println("Checking package:", pkg.Name) for _, file := range pkg.Syntax { fp := pkg.Fset.File(file.Pos()) @@ -296,14 +390,116 @@ func (gosec *Analyzer) Check(pkg *packages.Package) { gosec.context.Pkg = pkg.Types gosec.context.PkgFiles = pkg.Syntax gosec.context.Imports = NewImportTracker() - gosec.context.Imports.TrackFile(file) gosec.context.PassedValues = make(map[string]interface{}) + gosec.context.Ignores = newIgnores() + gosec.updateIgnores() ast.Walk(gosec, file) gosec.stats.NumFiles++ gosec.stats.NumLines += pkg.Fset.File(file.Pos()).LineCount() } } +// CheckAnalyzers runs analyzers on a given package. +func (gosec *Analyzer) CheckAnalyzers(pkg *packages.Package) { + ssaResult, err := gosec.buildSSA(pkg) + if err != nil || ssaResult == nil { + gosec.logger.Printf("Error building the SSA representation of the package %q: %s", pkg.Name, err) + return + } + + resultMap := map[*analysis.Analyzer]interface{}{ + buildssa.Analyzer: &analyzers.SSAAnalyzerResult{ + Config: gosec.Config(), + Logger: gosec.logger, + SSA: ssaResult.(*buildssa.SSA), + }, + } + + generatedFiles := gosec.generatedFiles(pkg) + + for _, analyzer := range gosec.analyzerList { + pass := &analysis.Pass{ + Analyzer: analyzer, + Fset: pkg.Fset, + Files: pkg.Syntax, + OtherFiles: pkg.OtherFiles, + IgnoredFiles: pkg.IgnoredFiles, + Pkg: pkg.Types, + TypesInfo: pkg.TypesInfo, + TypesSizes: pkg.TypesSizes, + ResultOf: resultMap, + Report: func(d analysis.Diagnostic) {}, + ImportObjectFact: nil, + ExportObjectFact: nil, + ImportPackageFact: nil, + ExportPackageFact: nil, + AllObjectFacts: nil, + AllPackageFacts: nil, + } + result, err := pass.Analyzer.Run(pass) + if err != nil { + gosec.logger.Printf("Error running analyzer %s: %s\n", analyzer.Name, err) + continue + } + if result != nil { + if passIssues, ok := result.([]*issue.Issue); ok { + for _, iss := range passIssues { + if gosec.excludeGenerated { + if _, ok := generatedFiles[iss.File]; ok { + continue + } + } + gosec.updateIssues(iss) + } + } + } + } +} + +func (gosec *Analyzer) generatedFiles(pkg *packages.Package) map[string]bool { + generatedFiles := map[string]bool{} + for _, file := range pkg.Syntax { + if isGeneratedFile(file) { + fp := pkg.Fset.File(file.Pos()) + if fp == nil { + // skip files which cannot be located + continue + } + generatedFiles[fp.Name()] = true + } + } + return generatedFiles +} + +// buildSSA runs the SSA pass which builds the SSA representation of the package. It handles gracefully any panic. +func (gosec *Analyzer) buildSSA(pkg *packages.Package) (interface{}, error) { + defer func() { + if r := recover(); r != nil { + gosec.logger.Printf("Panic when running SSA analyser on package: %s", pkg.Name) + } + }() + ssaPass := &analysis.Pass{ + Analyzer: buildssa.Analyzer, + Fset: pkg.Fset, + Files: pkg.Syntax, + OtherFiles: pkg.OtherFiles, + IgnoredFiles: pkg.IgnoredFiles, + Pkg: pkg.Types, + TypesInfo: pkg.TypesInfo, + TypesSizes: pkg.TypesSizes, + ResultOf: nil, + Report: nil, + ImportObjectFact: nil, + ExportObjectFact: nil, + ImportPackageFact: nil, + ExportPackageFact: nil, + AllObjectFacts: nil, + AllPackageFacts: nil, + } + + return ssaPass.Analyzer.Run(ssaPass) +} + func isGeneratedFile(file *ast.File) bool { for _, comment := range file.Comments { for _, row := range comment.List { @@ -365,20 +561,27 @@ func (gosec *Analyzer) AppendError(file string, err error) { } // ignore a node (and sub-tree) if it is tagged with a nosec tag comment -func (gosec *Analyzer) ignore(n ast.Node) map[string]SuppressionInfo { +func (gosec *Analyzer) ignore(n ast.Node) map[string]issue.SuppressionInfo { if groups, ok := gosec.context.Comments[n]; ok && !gosec.ignoreNosec { // Checks if an alternative for #nosec is set and, if not, uses the default. - noSecDefaultTag := "#nosec" + noSecDefaultTag, err := gosec.config.GetGlobal(Nosec) + if err != nil { + noSecDefaultTag = NoSecTag(string(Nosec)) + } else { + noSecDefaultTag = NoSecTag(noSecDefaultTag) + } noSecAlternativeTag, err := gosec.config.GetGlobal(NoSecAlternative) if err != nil { noSecAlternativeTag = noSecDefaultTag + } else { + noSecAlternativeTag = NoSecTag(noSecAlternativeTag) } for _, group := range groups { comment := strings.TrimSpace(group.Text()) - foundDefaultTag := strings.HasPrefix(comment, noSecDefaultTag) || regexp.MustCompile("\n *"+noSecDefaultTag).Match([]byte(comment)) - foundAlternativeTag := strings.HasPrefix(comment, noSecAlternativeTag) || regexp.MustCompile("\n *"+noSecAlternativeTag).Match([]byte(comment)) + foundDefaultTag := strings.HasPrefix(comment, noSecDefaultTag) || regexp.MustCompile("\n *"+noSecDefaultTag).MatchString(comment) + foundAlternativeTag := strings.HasPrefix(comment, noSecAlternativeTag) || regexp.MustCompile("\n *"+noSecAlternativeTag).MatchString(comment) if foundDefaultTag || foundAlternativeTag { gosec.stats.NumNosec++ @@ -402,13 +605,13 @@ func (gosec *Analyzer) ignore(n ast.Node) map[string]SuppressionInfo { re := regexp.MustCompile(`(G\d{3})`) matches := re.FindAllStringSubmatch(directive, -1) - suppression := SuppressionInfo{ + suppression := issue.SuppressionInfo{ Kind: "inSource", Justification: justification, } // Find the rule IDs to ignore. - ignores := make(map[string]SuppressionInfo) + ignores := make(map[string]issue.SuppressionInfo) for _, v := range matches { ignores[v[1]] = suppression } @@ -427,86 +630,92 @@ func (gosec *Analyzer) ignore(n ast.Node) map[string]SuppressionInfo { // Visit runs the gosec visitor logic over an AST created by parsing go code. // Rule methods added with AddRule will be invoked as necessary. func (gosec *Analyzer) Visit(n ast.Node) ast.Visitor { - // If we've reached the end of this branch, pop off the ignores stack. - if n == nil { - if len(gosec.context.Ignores) > 0 { - gosec.context.Ignores = gosec.context.Ignores[1:] - } - return gosec + // Using ast.File instead of ast.ImportSpec, so that we can track all imports at once. + switch i := n.(type) { + case *ast.File: + gosec.context.Imports.TrackFile(i) } - // Get any new rule exclusions. - ignoredRules := gosec.ignore(n) - - // Now create the union of exclusions. - ignores := map[string][]SuppressionInfo{} - if len(gosec.context.Ignores) > 0 { - for k, v := range gosec.context.Ignores[0] { - ignores[k] = v + for _, rule := range gosec.ruleset.RegisteredFor(n) { + issue, err := rule.Match(n, gosec.context) + if err != nil { + file, line := GetLocation(n, gosec.context) + file = path.Base(file) + gosec.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) } + gosec.updateIssues(issue) } + return gosec +} - for ruleID, suppression := range ignoredRules { - ignores[ruleID] = append(ignores[ruleID], suppression) +func (gosec *Analyzer) updateIgnores() { + for n := range gosec.context.Comments { + gosec.updateIgnoredRulesForNode(n) } +} - // Push the new set onto the stack. - gosec.context.Ignores = append([]map[string][]SuppressionInfo{ignores}, gosec.context.Ignores...) +func (gosec *Analyzer) updateIgnoredRulesForNode(n ast.Node) { + ignoredRules := gosec.ignore(n) + if len(ignoredRules) > 0 { + if gosec.context.Ignores == nil { + gosec.context.Ignores = newIgnores() + } + line := issue.GetLine(gosec.context.FileSet.File(n.Pos()), n) + gosec.context.Ignores.add( + gosec.context.FileSet.File(n.Pos()).Name(), + line, + ignoredRules, + ) + } +} - // Track aliased and initialization imports - gosec.context.Imports.TrackImport(n) +func (gosec *Analyzer) getSuppressionsAtLineInFile(file string, line string, id string) ([]issue.SuppressionInfo, bool) { + ignoredRules := gosec.context.Ignores.get(file, line) + + // Check if the rule was specifically suppressed at this location. + generalSuppressions, generalIgnored := ignoredRules[aliasOfAllRules] + ruleSuppressions, ruleIgnored := ignoredRules[id] + ignored := generalIgnored || ruleIgnored + suppressions := append(generalSuppressions, ruleSuppressions...) + + // Track external suppressions of this rule. + if gosec.ruleset.IsRuleSuppressed(id) { + ignored = true + suppressions = append(suppressions, issue.SuppressionInfo{ + Kind: "external", + Justification: externalSuppressionJustification, + }) + } + return suppressions, ignored +} - for _, rule := range gosec.ruleset.RegisteredFor(n) { - // Check if all rules are ignored. - generalSuppressions, generalIgnored := ignores[aliasOfAllRules] - // Check if the specific rule is ignored - ruleSuppressions, ruleIgnored := ignores[rule.ID()] - - ignored := generalIgnored || ruleIgnored - suppressions := append(generalSuppressions, ruleSuppressions...) - - // Track external suppressions. - if gosec.ruleset.IsRuleSuppressed(rule.ID()) { - ignored = true - suppressions = append(suppressions, SuppressionInfo{ - Kind: "external", - Justification: externalSuppressionJustification, - }) +func (gosec *Analyzer) updateIssues(issue *issue.Issue) { + if issue != nil { + suppressions, ignored := gosec.getSuppressionsAtLineInFile(issue.File, issue.Line, issue.RuleID) + if gosec.showIgnored { + issue.NoSec = ignored } - - issue, err := rule.Match(n, gosec.context) - if err != nil { - file, line := GetLocation(n, gosec.context) - file = path.Base(file) - gosec.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) + if !ignored || !gosec.showIgnored { + gosec.stats.NumFound++ } - if issue != nil { - if gosec.showIgnored { - issue.NoSec = ignored - } - if !ignored || !gosec.showIgnored { - gosec.stats.NumFound++ - } - if ignored && gosec.trackSuppressions { - issue.WithSuppressions(suppressions) - gosec.issues = append(gosec.issues, issue) - } else if !ignored || gosec.showIgnored || gosec.ignoreNosec { - gosec.issues = append(gosec.issues, issue) - } + if ignored && gosec.trackSuppressions { + issue.WithSuppressions(suppressions) + gosec.issues = append(gosec.issues, issue) + } else if !ignored || gosec.showIgnored || gosec.ignoreNosec { + gosec.issues = append(gosec.issues, issue) } } - return gosec } // Report returns the current issues discovered and the metrics about the scan -func (gosec *Analyzer) Report() ([]*Issue, *Metrics, map[string][]Error) { +func (gosec *Analyzer) Report() ([]*issue.Issue, *Metrics, map[string][]Error) { return gosec.issues, gosec.stats, gosec.errors } // Reset clears state such as context, issues and metrics from the configured analyzer func (gosec *Analyzer) Reset() { gosec.context = &Context{} - gosec.issues = make([]*Issue, 0, 16) + gosec.issues = make([]*issue.Issue, 0, 16) gosec.stats = &Metrics{} gosec.ruleset = NewRuleSet() } diff --git a/vendor/github.com/securego/gosec/v2/analyzers/slice_bounds.go b/vendor/github.com/securego/gosec/v2/analyzers/slice_bounds.go new file mode 100644 index 0000000000..08a55eb429 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/analyzers/slice_bounds.go @@ -0,0 +1,386 @@ +// (c) Copyright gosec's authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analyzers + +import ( + "errors" + "fmt" + "go/token" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" + + "github.com/securego/gosec/v2/issue" +) + +type bound int + +const ( + lowerUnbounded bound = iota + upperUnbounded + unbounded + upperBounded +) + +const maxDepth = 20 + +func newSliceBoundsAnalyzer(id string, description string) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: id, + Doc: description, + Run: runSliceBounds, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + } +} + +func runSliceBounds(pass *analysis.Pass) (interface{}, error) { + ssaResult, err := getSSAResult(pass) + if err != nil { + return nil, err + } + + issues := map[ssa.Instruction]*issue.Issue{} + ifs := map[ssa.If]*ssa.BinOp{} + for _, mcall := range ssaResult.SSA.SrcFuncs { + for _, block := range mcall.DomPreorder() { + for _, instr := range block.Instrs { + switch instr := instr.(type) { + case *ssa.Alloc: + sliceCap, err := extractSliceCapFromAlloc(instr.String()) + if err != nil { + break + } + allocRefs := instr.Referrers() + if allocRefs == nil { + break + } + for _, instr := range *allocRefs { + if slice, ok := instr.(*ssa.Slice); ok { + if _, ok := slice.X.(*ssa.Alloc); ok { + if slice.Parent() != nil { + l, h := extractSliceBounds(slice) + newCap := computeSliceNewCap(l, h, sliceCap) + violations := []ssa.Instruction{} + trackSliceBounds(0, newCap, slice, &violations, ifs) + for _, s := range violations { + switch s := s.(type) { + case *ssa.Slice: + issue := newIssue( + pass.Analyzer.Name, + "slice bounds out of range", + pass.Fset, + s.Pos(), + issue.Low, + issue.High) + issues[s] = issue + case *ssa.IndexAddr: + issue := newIssue( + pass.Analyzer.Name, + "slice index out of range", + pass.Fset, + s.Pos(), + issue.Low, + issue.High) + issues[s] = issue + } + } + } + } + } + } + } + } + } + } + + for ifref, binop := range ifs { + bound, value, err := extractBinOpBound(binop) + if err != nil { + continue + } + for i, block := range ifref.Block().Succs { + if i == 1 { + bound = invBound(bound) + } + for _, instr := range block.Instrs { + if _, ok := issues[instr]; ok { + switch bound { + case lowerUnbounded: + break + case upperUnbounded, unbounded: + delete(issues, instr) + case upperBounded: + switch tinstr := instr.(type) { + case *ssa.Slice: + lower, upper := extractSliceBounds(tinstr) + if isSliceInsideBounds(0, value, lower, upper) { + delete(issues, instr) + } + case *ssa.IndexAddr: + indexValue, err := extractIntValue(tinstr.Index.String()) + if err != nil { + break + } + if isSliceIndexInsideBounds(0, value, indexValue) { + delete(issues, instr) + } + } + } + } + } + } + } + + foundIssues := []*issue.Issue{} + for _, issue := range issues { + foundIssues = append(foundIssues, issue) + } + if len(foundIssues) > 0 { + return foundIssues, nil + } + return nil, nil +} + +func trackSliceBounds(depth int, sliceCap int, slice ssa.Node, violations *[]ssa.Instruction, ifs map[ssa.If]*ssa.BinOp) { + if depth == maxDepth { + return + } + depth++ + if violations == nil { + violations = &[]ssa.Instruction{} + } + referrers := slice.Referrers() + if referrers != nil { + for _, refinstr := range *referrers { + switch refinstr := refinstr.(type) { + case *ssa.Slice: + checkAllSlicesBounds(depth, sliceCap, refinstr, violations, ifs) + switch refinstr.X.(type) { + case *ssa.Alloc, *ssa.Parameter: + l, h := extractSliceBounds(refinstr) + newCap := computeSliceNewCap(l, h, sliceCap) + trackSliceBounds(depth, newCap, refinstr, violations, ifs) + } + case *ssa.IndexAddr: + indexValue, err := extractIntValue(refinstr.Index.String()) + if err == nil && !isSliceIndexInsideBounds(0, sliceCap, indexValue) { + *violations = append(*violations, refinstr) + } + case *ssa.Call: + if ifref, cond := extractSliceIfLenCondition(refinstr); ifref != nil && cond != nil { + ifs[*ifref] = cond + } else { + parPos := -1 + for pos, arg := range refinstr.Call.Args { + if a, ok := arg.(*ssa.Slice); ok && a == slice { + parPos = pos + } + } + if fn, ok := refinstr.Call.Value.(*ssa.Function); ok { + if len(fn.Params) > parPos && parPos > -1 { + param := fn.Params[parPos] + trackSliceBounds(depth, sliceCap, param, violations, ifs) + } + } + } + } + } + } +} + +func checkAllSlicesBounds(depth int, sliceCap int, slice *ssa.Slice, violations *[]ssa.Instruction, ifs map[ssa.If]*ssa.BinOp) { + if depth == maxDepth { + return + } + depth++ + if violations == nil { + violations = &[]ssa.Instruction{} + } + sliceLow, sliceHigh := extractSliceBounds(slice) + if !isSliceInsideBounds(0, sliceCap, sliceLow, sliceHigh) { + *violations = append(*violations, slice) + } + switch slice.X.(type) { + case *ssa.Alloc, *ssa.Parameter, *ssa.Slice: + l, h := extractSliceBounds(slice) + newCap := computeSliceNewCap(l, h, sliceCap) + trackSliceBounds(depth, newCap, slice, violations, ifs) + } + + references := slice.Referrers() + if references == nil { + return + } + for _, ref := range *references { + switch s := ref.(type) { + case *ssa.Slice: + checkAllSlicesBounds(depth, sliceCap, s, violations, ifs) + switch s.X.(type) { + case *ssa.Alloc, *ssa.Parameter: + l, h := extractSliceBounds(s) + newCap := computeSliceNewCap(l, h, sliceCap) + trackSliceBounds(depth, newCap, s, violations, ifs) + } + } + } +} + +func extractSliceIfLenCondition(call *ssa.Call) (*ssa.If, *ssa.BinOp) { + if builtInLen, ok := call.Call.Value.(*ssa.Builtin); ok { + if builtInLen.Name() == "len" { + refs := call.Referrers() + if refs != nil { + for _, ref := range *refs { + if binop, ok := ref.(*ssa.BinOp); ok { + binoprefs := binop.Referrers() + for _, ref := range *binoprefs { + if ifref, ok := ref.(*ssa.If); ok { + return ifref, binop + } + } + } + } + } + } + } + return nil, nil +} + +func computeSliceNewCap(l, h, oldCap int) int { + if l == 0 && h == 0 { + return oldCap + } + if l > 0 && h == 0 { + return oldCap - l + } + if l == 0 && h > 0 { + return h + } + return h - l +} + +func invBound(bound bound) bound { + switch bound { + case lowerUnbounded: + return upperUnbounded + case upperUnbounded: + return lowerUnbounded + case upperBounded: + return unbounded + case unbounded: + return upperBounded + default: + return unbounded + } +} + +func extractBinOpBound(binop *ssa.BinOp) (bound, int, error) { + if binop.X != nil { + if x, ok := binop.X.(*ssa.Const); ok { + value, err := strconv.Atoi(x.Value.String()) + if err != nil { + return lowerUnbounded, value, err + } + switch binop.Op { + case token.LSS, token.LEQ: + return upperUnbounded, value, nil + case token.GTR, token.GEQ: + return lowerUnbounded, value, nil + case token.EQL: + return upperBounded, value, nil + case token.NEQ: + return unbounded, value, nil + } + } + } + if binop.Y != nil { + if y, ok := binop.Y.(*ssa.Const); ok { + value, err := strconv.Atoi(y.Value.String()) + if err != nil { + return lowerUnbounded, value, err + } + switch binop.Op { + case token.LSS, token.LEQ: + return lowerUnbounded, value, nil + case token.GTR, token.GEQ: + return upperUnbounded, value, nil + case token.EQL: + return upperBounded, value, nil + case token.NEQ: + return unbounded, value, nil + } + } + } + return lowerUnbounded, 0, fmt.Errorf("unable to extract constant from binop") +} + +func isSliceIndexInsideBounds(l, h int, index int) bool { + return (l <= index && index < h) +} + +func isSliceInsideBounds(l, h int, cl, ch int) bool { + return (l <= cl && h >= ch) && (l <= ch && h >= cl) +} + +func extractSliceBounds(slice *ssa.Slice) (int, int) { + var low int + if slice.Low != nil { + l, err := extractIntValue(slice.Low.String()) + if err == nil { + low = l + } + } + var high int + if slice.High != nil { + h, err := extractIntValue(slice.High.String()) + if err == nil { + high = h + } + } + return low, high +} + +func extractIntValue(value string) (int, error) { + parts := strings.Split(value, ":") + if len(parts) != 2 { + return 0, fmt.Errorf("invalid value: %s", value) + } + if parts[1] != "int" { + return 0, fmt.Errorf("invalid value: %s", value) + } + return strconv.Atoi(parts[0]) +} + +func extractSliceCapFromAlloc(instr string) (int, error) { + re := regexp.MustCompile(`new \[(\d+)\]*`) + var sliceCap int + matches := re.FindAllStringSubmatch(instr, -1) + if matches == nil { + return sliceCap, errors.New("no slice cap found") + } + + if len(matches) > 0 { + m := matches[0] + if len(m) > 1 { + return strconv.Atoi(m[1]) + } + } + + return 0, errors.New("no slice cap found") +} diff --git a/vendor/github.com/securego/gosec/v2/analyzers/util.go b/vendor/github.com/securego/gosec/v2/analyzers/util.go new file mode 100644 index 0000000000..5941184aa2 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/analyzers/util.go @@ -0,0 +1,98 @@ +// (c) Copyright gosec's authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analyzers + +import ( + "fmt" + "go/token" + "log" + "os" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + + "github.com/securego/gosec/v2/issue" +) + +// SSAAnalyzerResult contains various information returned by the +// SSA analysis along with some configuration +type SSAAnalyzerResult struct { + Config map[string]interface{} + Logger *log.Logger + SSA *buildssa.SSA +} + +// BuildDefaultAnalyzers returns the default list of analyzers +func BuildDefaultAnalyzers() []*analysis.Analyzer { + return []*analysis.Analyzer{ + newSliceBoundsAnalyzer("G602", "Possible slice bounds out of range"), + } +} + +// getSSAResult retrieves the SSA result from analysis pass +func getSSAResult(pass *analysis.Pass) (*SSAAnalyzerResult, error) { + result, ok := pass.ResultOf[buildssa.Analyzer] + if !ok { + return nil, fmt.Errorf("no SSA result found in the analysis pass") + } + ssaResult, ok := result.(*SSAAnalyzerResult) + if !ok { + return nil, fmt.Errorf("the analysis pass result is not of type SSA") + } + return ssaResult, nil +} + +// newIssue creates a new gosec issue +func newIssue(analyzerID string, desc string, fileSet *token.FileSet, + pos token.Pos, severity, confidence issue.Score, +) *issue.Issue { + file := fileSet.File(pos) + line := file.Line(pos) + col := file.Position(pos).Column + + return &issue.Issue{ + RuleID: analyzerID, + File: file.Name(), + Line: strconv.Itoa(line), + Col: strconv.Itoa(col), + Severity: severity, + Confidence: confidence, + What: desc, + Cwe: issue.GetCweByRule(analyzerID), + Code: issueCodeSnippet(fileSet, pos), + } +} + +func issueCodeSnippet(fileSet *token.FileSet, pos token.Pos) string { + file := fileSet.File(pos) + + start := (int64)(file.Line(pos)) + if start-issue.SnippetOffset > 0 { + start = start - issue.SnippetOffset + } + end := (int64)(file.Line(pos)) + end = end + issue.SnippetOffset + + var code string + if file, err := os.Open(file.Name()); err == nil { + defer file.Close() // #nosec + code, err = issue.CodeSnippet(file, start, end) + if err != nil { + return err.Error() + } + } + return code +} diff --git a/vendor/github.com/securego/gosec/v2/config.go b/vendor/github.com/securego/gosec/v2/config.go index 443d45f78b..9cbb7a7134 100644 --- a/vendor/github.com/securego/gosec/v2/config.go +++ b/vendor/github.com/securego/gosec/v2/config.go @@ -29,8 +29,15 @@ const ( ExcludeRules GlobalOption = "exclude" // IncludeRules global option for should be load IncludeRules GlobalOption = "include" + // SSA global option to enable go analysis framework with SSA support + SSA GlobalOption = "ssa" ) +// NoSecTag returns the tag used to disable gosec for a line of code. +func NoSecTag(tag string) string { + return fmt.Sprintf("%s%s", "#", tag) +} + // Config is used to provide configuration and customization to each of the rules. type Config map[string]interface{} diff --git a/vendor/github.com/securego/gosec/v2/cwe/data.go b/vendor/github.com/securego/gosec/v2/cwe/data.go index 0e377b96a6..79a6b9d231 100644 --- a/vendor/github.com/securego/gosec/v2/cwe/data.go +++ b/vendor/github.com/securego/gosec/v2/cwe/data.go @@ -1,7 +1,5 @@ package cwe -import "fmt" - const ( // Acronym is the acronym of CWE Acronym = "CWE" @@ -13,134 +11,128 @@ const ( Organization = "MITRE" // Description the description of CWE Description = "The MITRE Common Weakness Enumeration" -) - -var ( // InformationURI link to the published CWE PDF - InformationURI = fmt.Sprintf("https://cwe.mitre.org/data/published/cwe_v%s.pdf/", Version) + InformationURI = "https://cwe.mitre.org/data/published/cwe_v" + Version + ".pdf/" // DownloadURI link to the zipped XML of the CWE list - DownloadURI = fmt.Sprintf("https://cwe.mitre.org/data/xml/cwec_v%s.xml.zip", Version) - - data = map[string]*Weakness{} - - weaknesses = []*Weakness{ - { - ID: "118", - Description: "The software does not restrict or incorrectly restricts operations within the boundaries of a resource that is accessed using an index or pointer, such as memory or files.", - Name: "Incorrect Access of Indexable Resource ('Range Error')", - }, - { - ID: "190", - Description: "The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.", - Name: "Integer Overflow or Wraparound", - }, - { - ID: "200", - Description: "The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information.", - Name: "Exposure of Sensitive Information to an Unauthorized Actor", - }, - { - ID: "22", - Description: "The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory.", - Name: "Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')", - }, - { - ID: "242", - Description: "The program calls a function that can never be guaranteed to work safely.", - Name: "Use of Inherently Dangerous Function", - }, - { - ID: "276", - Description: "During installation, installed file permissions are set to allow anyone to modify those files.", - Name: "Incorrect Default Permissions", - }, - { - ID: "295", - Description: "The software does not validate, or incorrectly validates, a certificate.", - Name: "Improper Certificate Validation", - }, - { - ID: "310", - Description: "Weaknesses in this category are related to the design and implementation of data confidentiality and integrity. Frequently these deal with the use of encoding techniques, encryption libraries, and hashing algorithms. The weaknesses in this category could lead to a degradation of the quality data if they are not addressed.", - Name: "Cryptographic Issues", - }, - { - ID: "322", - Description: "The software performs a key exchange with an actor without verifying the identity of that actor.", - Name: "Key Exchange without Entity Authentication", - }, - { - ID: "326", - Description: "The software stores or transmits sensitive data using an encryption scheme that is theoretically sound, but is not strong enough for the level of protection required.", - Name: "Inadequate Encryption Strength", - }, - { - ID: "327", - Description: "The use of a broken or risky cryptographic algorithm is an unnecessary risk that may result in the exposure of sensitive information.", - Name: "Use of a Broken or Risky Cryptographic Algorithm", - }, - { - ID: "338", - Description: "The product uses a Pseudo-Random Number Generator (PRNG) in a security context, but the PRNG's algorithm is not cryptographically strong.", - Name: "Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG)", - }, - { - ID: "377", - Description: "Creating and using insecure temporary files can leave application and system data vulnerable to attack.", - Name: "Insecure Temporary File", - }, - { - ID: "400", - Description: "The software does not properly control the allocation and maintenance of a limited resource, thereby enabling an actor to influence the amount of resources consumed, eventually leading to the exhaustion of available resources.", - Name: "Uncontrolled Resource Consumption", - }, - { - ID: "409", - Description: "The software does not handle or incorrectly handles a compressed input with a very high compression ratio that produces a large output.", - Name: "Improper Handling of Highly Compressed Data (Data Amplification)", - }, - { - ID: "703", - Description: "The software does not properly anticipate or handle exceptional conditions that rarely occur during normal operation of the software.", - Name: "Improper Check or Handling of Exceptional Conditions", - }, - { - ID: "78", - Description: "The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component.", - Name: "Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')", - }, - { - ID: "79", - Description: "The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users.", - Name: "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", - }, - { - ID: "798", - Description: "The software contains hard-coded credentials, such as a password or cryptographic key, which it uses for its own inbound authentication, outbound communication to external components, or encryption of internal data.", - Name: "Use of Hard-coded Credentials", - }, - { - ID: "88", - Description: "The software constructs a string for a command to executed by a separate component\nin another control sphere, but it does not properly delimit the\nintended arguments, options, or switches within that command string.", - Name: "Improper Neutralization of Argument Delimiters in a Command ('Argument Injection')", - }, - { - ID: "89", - Description: "The software constructs all or part of an SQL command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended SQL command when it is sent to a downstream component.", - Name: "Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", - }, - } + DownloadURI = "https://cwe.mitre.org/data/xml/cwec_v" + Version + ".xml.zip" ) -func init() { - for _, weakness := range weaknesses { - data[weakness.ID] = weakness - } +var idWeaknesses = map[string]*Weakness{ + "118": { + ID: "118", + Description: "The software does not restrict or incorrectly restricts operations within the boundaries of a resource that is accessed using an index or pointer, such as memory or files.", + Name: "Incorrect Access of Indexable Resource ('Range Error')", + }, + "190": { + ID: "190", + Description: "The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.", + Name: "Integer Overflow or Wraparound", + }, + "200": { + ID: "200", + Description: "The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information.", + Name: "Exposure of Sensitive Information to an Unauthorized Actor", + }, + "22": { + ID: "22", + Description: "The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory.", + Name: "Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')", + }, + "242": { + ID: "242", + Description: "The program calls a function that can never be guaranteed to work safely.", + Name: "Use of Inherently Dangerous Function", + }, + "276": { + ID: "276", + Description: "During installation, installed file permissions are set to allow anyone to modify those files.", + Name: "Incorrect Default Permissions", + }, + "295": { + ID: "295", + Description: "The software does not validate, or incorrectly validates, a certificate.", + Name: "Improper Certificate Validation", + }, + "310": { + ID: "310", + Description: "Weaknesses in this category are related to the design and implementation of data confidentiality and integrity. Frequently these deal with the use of encoding techniques, encryption libraries, and hashing algorithms. The weaknesses in this category could lead to a degradation of the quality data if they are not addressed.", + Name: "Cryptographic Issues", + }, + "322": { + ID: "322", + Description: "The software performs a key exchange with an actor without verifying the identity of that actor.", + Name: "Key Exchange without Entity Authentication", + }, + "326": { + ID: "326", + Description: "The software stores or transmits sensitive data using an encryption scheme that is theoretically sound, but is not strong enough for the level of protection required.", + Name: "Inadequate Encryption Strength", + }, + "327": { + ID: "327", + Description: "The use of a broken or risky cryptographic algorithm is an unnecessary risk that may result in the exposure of sensitive information.", + Name: "Use of a Broken or Risky Cryptographic Algorithm", + }, + "338": { + ID: "338", + Description: "The product uses a Pseudo-Random Number Generator (PRNG) in a security context, but the PRNG's algorithm is not cryptographically strong.", + Name: "Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG)", + }, + "377": { + ID: "377", + Description: "Creating and using insecure temporary files can leave application and system data vulnerable to attack.", + Name: "Insecure Temporary File", + }, + "400": { + ID: "400", + Description: "The software does not properly control the allocation and maintenance of a limited resource, thereby enabling an actor to influence the amount of resources consumed, eventually leading to the exhaustion of available resources.", + Name: "Uncontrolled Resource Consumption", + }, + "409": { + ID: "409", + Description: "The software does not handle or incorrectly handles a compressed input with a very high compression ratio that produces a large output.", + Name: "Improper Handling of Highly Compressed Data (Data Amplification)", + }, + "703": { + ID: "703", + Description: "The software does not properly anticipate or handle exceptional conditions that rarely occur during normal operation of the software.", + Name: "Improper Check or Handling of Exceptional Conditions", + }, + "78": { + ID: "78", + Description: "The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component.", + Name: "Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')", + }, + "79": { + ID: "79", + Description: "The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users.", + Name: "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", + }, + "798": { + ID: "798", + Description: "The software contains hard-coded credentials, such as a password or cryptographic key, which it uses for its own inbound authentication, outbound communication to external components, or encryption of internal data.", + Name: "Use of Hard-coded Credentials", + }, + "88": { + ID: "88", + Description: "The software constructs a string for a command to executed by a separate component\nin another control sphere, but it does not properly delimit the\nintended arguments, options, or switches within that command string.", + Name: "Improper Neutralization of Argument Delimiters in a Command ('Argument Injection')", + }, + "89": { + ID: "89", + Description: "The software constructs all or part of an SQL command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended SQL command when it is sent to a downstream component.", + Name: "Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", + }, + "676": { + ID: "676", + Description: "The program invokes a potentially dangerous function that could introduce a vulnerability if it is used incorrectly, but the function can also be used safely.", + Name: "Use of Potentially Dangerous Function", + }, } // Get Retrieves a CWE weakness by it's id func Get(id string) *Weakness { - weakness, ok := data[id] + weakness, ok := idWeaknesses[id] if ok && weakness != nil { return weakness } diff --git a/vendor/github.com/securego/gosec/v2/entrypoint.sh b/vendor/github.com/securego/gosec/v2/entrypoint.sh index af2acd4b9c..bc6ad6a241 100644 --- a/vendor/github.com/securego/gosec/v2/entrypoint.sh +++ b/vendor/github.com/securego/gosec/v2/entrypoint.sh @@ -4,4 +4,8 @@ # provides all arguments concatenated as a single string. ARGS=("$@") +if [[ ! -z "${GITHUB_AUTHENTICATION_TOKEN}" ]]; then + git config --global --add url."https://x-access-token:${GITHUB_AUTHENTICATION_TOKEN}@github.com/".insteadOf "https://github.com/" +fi + /bin/gosec ${ARGS[*]} diff --git a/vendor/github.com/securego/gosec/v2/helpers.go b/vendor/github.com/securego/gosec/v2/helpers.go index 437d0324b5..15b2b5f3a3 100644 --- a/vendor/github.com/securego/gosec/v2/helpers.go +++ b/vendor/github.com/securego/gosec/v2/helpers.go @@ -37,12 +37,9 @@ import ( // // node, matched := MatchCallByPackage(n, ctx, "math/rand", "Read") func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*ast.CallExpr, bool) { - importedName, found := GetImportedName(pkg, c) + importedNames, found := GetImportedNames(pkg, c) if !found { - importedName, found = GetAliasedName(pkg, c) - if !found { - return nil, false - } + return nil, false } if callExpr, ok := n.(*ast.CallExpr); ok { @@ -50,7 +47,10 @@ func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*a if err != nil { return nil, false } - if packageName == importedName { + for _, in := range importedNames { + if packageName != in { + continue + } for _, name := range names { if callName == name { return callExpr, true @@ -96,11 +96,46 @@ func GetChar(n ast.Node) (byte, error) { return 0, fmt.Errorf("Unexpected AST node type: %T", n) } +// GetStringRecursive will recursively walk down a tree of *ast.BinaryExpr. It will then concat the results, and return. +// Unlike the other getters, it does _not_ raise an error for unknown ast.Node types. At the base, the recursion will hit a non-BinaryExpr type, +// either BasicLit or other, so it's not an error case. It will only error if `strconv.Unquote` errors. This matters, because there's +// currently functionality that relies on error values being returned by GetString if and when it hits a non-basiclit string node type, +// hence for cases where recursion is needed, we use this separate function, so that we can still be backwards compatible. +// +// This was added to handle a SQL injection concatenation case where the injected value is infixed between two strings, not at the start or end. See example below +// +// Do note that this will omit non-string values. So for example, if you were to use this node: +// ```go +// q := "SELECT * FROM foo WHERE name = '" + os.Args[0] + "' AND 1=1" // will result in "SELECT * FROM foo WHERE ” AND 1=1" + +func GetStringRecursive(n ast.Node) (string, error) { + if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING { + return strconv.Unquote(node.Value) + } + + if expr, ok := n.(*ast.BinaryExpr); ok { + x, err := GetStringRecursive(expr.X) + if err != nil { + return "", err + } + + y, err := GetStringRecursive(expr.Y) + if err != nil { + return "", err + } + + return x + y, nil + } + + return "", nil +} + // GetString will read and return a string value from an ast.BasicLit func GetString(n ast.Node) (string, error) { if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING { return strconv.Unquote(node.Value) } + return "", fmt.Errorf("Unexpected AST node type: %T", n) } @@ -148,7 +183,7 @@ func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) { case *ast.CallExpr: switch call := expr.Fun.(type) { case *ast.Ident: - if call.Name == "new" { + if call.Name == "new" && len(expr.Args) > 0 { t := ctx.Info.TypeOf(expr.Args[0]) if t != nil { return t.String(), fn.Sel.Name, nil @@ -182,7 +217,7 @@ func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) { } // GetCallStringArgsValues returns the values of strings arguments if they can be resolved -func GetCallStringArgsValues(n ast.Node, ctx *Context) []string { +func GetCallStringArgsValues(n ast.Node, _ *Context) []string { values := []string{} switch node := n.(type) { case *ast.CallExpr: @@ -201,22 +236,21 @@ func GetCallStringArgsValues(n ast.Node, ctx *Context) []string { return values } -// GetIdentStringValues return the string values of an Ident if they can be resolved -func GetIdentStringValues(ident *ast.Ident) []string { +func getIdentStringValues(ident *ast.Ident, stringFinder func(ast.Node) (string, error)) []string { values := []string{} obj := ident.Obj if obj != nil { switch decl := obj.Decl.(type) { case *ast.ValueSpec: for _, v := range decl.Values { - value, err := GetString(v) + value, err := stringFinder(v) if err == nil { values = append(values, value) } } case *ast.AssignStmt: for _, v := range decl.Rhs { - value, err := GetString(v) + value, err := stringFinder(v) if err == nil { values = append(values, value) } @@ -226,6 +260,18 @@ func GetIdentStringValues(ident *ast.Ident) []string { return values } +// getIdentStringRecursive returns the string of values of an Ident if they can be resolved +// The difference between this and GetIdentStringValues is that it will attempt to resolve the strings recursively, +// if it is passed a *ast.BinaryExpr. See GetStringRecursive for details +func GetIdentStringValuesRecursive(ident *ast.Ident) []string { + return getIdentStringValues(ident, GetStringRecursive) +} + +// GetIdentStringValues return the string values of an Ident if they can be resolved +func GetIdentStringValues(ident *ast.Ident) []string { + return getIdentStringValues(ident, GetString) +} + // GetBinaryExprOperands returns all operands of a binary expression by traversing // the expression tree func GetBinaryExprOperands(be *ast.BinaryExpr) []ast.Node { @@ -247,48 +293,23 @@ func GetBinaryExprOperands(be *ast.BinaryExpr) []ast.Node { return result } -// GetImportedName returns the name used for the package within the -// code. It will ignore initialization only imports. -func GetImportedName(path string, ctx *Context) (string, bool) { - importName, imported := ctx.Imports.Imported[path] - if !imported { - return "", false - } - - if _, initonly := ctx.Imports.InitOnly[path]; initonly { - return "", false - } - - return importName, true -} - -// GetAliasedName returns the aliased name used for the package within the -// code. It will ignore initialization only imports. -func GetAliasedName(path string, ctx *Context) (string, bool) { - importName, imported := ctx.Imports.Aliased[path] - if !imported { - return "", false - } - - if _, initonly := ctx.Imports.InitOnly[path]; initonly { - return "", false - } - - return importName, true +// GetImportedNames returns the name(s)/alias(es) used for the package within +// the code. It ignores initialization-only imports. +func GetImportedNames(path string, ctx *Context) (names []string, found bool) { + importNames, imported := ctx.Imports.Imported[path] + return importNames, imported } // GetImportPath resolves the full import path of an identifier based on // the imports in the current context(including aliases). func GetImportPath(name string, ctx *Context) (string, bool) { for path := range ctx.Imports.Imported { - if imported, ok := GetImportedName(path, ctx); ok && imported == name { - return path, true - } - } - - for path := range ctx.Imports.Aliased { - if imported, ok := GetAliasedName(path, ctx); ok && imported == name { - return path, true + if imported, ok := GetImportedNames(path, ctx); ok { + for _, n := range imported { + if n == name { + return path, true + } + } } } @@ -326,7 +347,7 @@ func Getenv(key, userDefault string) string { return userDefault } -// GetPkgRelativePath returns the Go relative relative path derived +// GetPkgRelativePath returns the Go relative path derived // form the given path func GetPkgRelativePath(path string) (string, error) { abspath, err := filepath.Abs(path) diff --git a/vendor/github.com/securego/gosec/v2/import_tracker.go b/vendor/github.com/securego/gosec/v2/import_tracker.go index cbb8c5518c..7984e99f42 100644 --- a/vendor/github.com/securego/gosec/v2/import_tracker.go +++ b/vendor/github.com/securego/gosec/v2/import_tracker.go @@ -22,54 +22,49 @@ import ( // by a source file. It is able to differentiate between plain imports, aliased // imports and init only imports. type ImportTracker struct { - Imported map[string]string - Aliased map[string]string - InitOnly map[string]bool + // Imported is a map of Imported with their associated names/aliases. + Imported map[string][]string } // NewImportTracker creates an empty Import tracker instance func NewImportTracker() *ImportTracker { return &ImportTracker{ - make(map[string]string), - make(map[string]string), - make(map[string]bool), + Imported: make(map[string][]string), } } // TrackFile track all the imports used by the supplied file func (t *ImportTracker) TrackFile(file *ast.File) { for _, imp := range file.Imports { - path := strings.Trim(imp.Path.Value, `"`) - parts := strings.Split(path, "/") - if len(parts) > 0 { - name := parts[len(parts)-1] - t.Imported[path] = name - } + t.TrackImport(imp) } } // TrackPackages tracks all the imports used by the supplied packages func (t *ImportTracker) TrackPackages(pkgs ...*types.Package) { for _, pkg := range pkgs { - t.Imported[pkg.Path()] = pkg.Name() + t.Imported[pkg.Path()] = []string{pkg.Name()} } } -// TrackImport tracks imports and handles the 'unsafe' import -func (t *ImportTracker) TrackImport(n ast.Node) { - if imported, ok := n.(*ast.ImportSpec); ok { - path := strings.Trim(imported.Path.Value, `"`) - if imported.Name != nil { - if imported.Name.Name == "_" { - // Initialization only import - t.InitOnly[path] = true - } else { - // Aliased import - t.Aliased[path] = imported.Name.Name - } - } - if path == "unsafe" { - t.Imported[path] = path +// TrackImport tracks imports. +func (t *ImportTracker) TrackImport(imported *ast.ImportSpec) { + importPath := strings.Trim(imported.Path.Value, `"`) + if imported.Name != nil { + if imported.Name.Name != "_" { + // Aliased import + t.Imported[importPath] = append(t.Imported[importPath], imported.Name.String()) } + } else { + t.Imported[importPath] = append(t.Imported[importPath], importName(importPath)) + } +} + +func importName(importPath string) string { + parts := strings.Split(importPath, "/") + name := importPath + if len(parts) > 0 { + name = parts[len(parts)-1] } + return name } diff --git a/vendor/github.com/securego/gosec/v2/install.sh b/vendor/github.com/securego/gosec/v2/install.sh index 0da55d3791..2b6403cb25 100644 --- a/vendor/github.com/securego/gosec/v2/install.sh +++ b/vendor/github.com/securego/gosec/v2/install.sh @@ -280,11 +280,13 @@ http_copy() { github_release() { owner_repo=$1 version=$2 - test -z "$version" && version="latest" - giturl="https://github.com/${owner_repo}/releases/${version}" + giturl="https://api.github.com/repos/${owner_repo}/releases/tags/${version}" + if [ -z "${version}" ]; then + giturl="https://api.github.com/repos/${owner_repo}/releases/latest" + fi json=$(http_copy "$giturl" "Accept:application/json") test -z "$json" && return 1 - version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name": *"//' | sed 's/".*//') test -z "$version" && return 1 echo "$version" } diff --git a/vendor/github.com/securego/gosec/v2/issue.go b/vendor/github.com/securego/gosec/v2/issue.go deleted file mode 100644 index 32b9bc0cc8..0000000000 --- a/vendor/github.com/securego/gosec/v2/issue.go +++ /dev/null @@ -1,212 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gosec - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "go/ast" - "go/token" - "os" - "strconv" - - "github.com/securego/gosec/v2/cwe" -) - -// Score type used by severity and confidence values -type Score int - -const ( - // Low severity or confidence - Low Score = iota - // Medium severity or confidence - Medium - // High severity or confidence - High -) - -// SnippetOffset defines the number of lines captured before -// the beginning and after the end of a code snippet -const SnippetOffset = 1 - -// GetCweByRule retrieves a cwe weakness for a given RuleID -func GetCweByRule(id string) *cwe.Weakness { - cweID, ok := ruleToCWE[id] - if ok && cweID != "" { - return cwe.Get(cweID) - } - return nil -} - -// ruleToCWE maps gosec rules to CWEs -var ruleToCWE = map[string]string{ - "G101": "798", - "G102": "200", - "G103": "242", - "G104": "703", - "G106": "322", - "G107": "88", - "G108": "200", - "G109": "190", - "G110": "409", - "G111": "22", - "G112": "400", - "G113": "190", - "G201": "89", - "G202": "89", - "G203": "79", - "G204": "78", - "G301": "276", - "G302": "276", - "G303": "377", - "G304": "22", - "G305": "22", - "G306": "276", - "G307": "703", - "G401": "326", - "G402": "295", - "G403": "310", - "G404": "338", - "G501": "327", - "G502": "327", - "G503": "327", - "G504": "327", - "G505": "327", - "G601": "118", -} - -// Issue is returned by a gosec rule if it discovers an issue with the scanned code. -type Issue struct { - Severity Score `json:"severity"` // issue severity (how problematic it is) - Confidence Score `json:"confidence"` // issue confidence (how sure we are we found it) - Cwe *cwe.Weakness `json:"cwe"` // Cwe associated with RuleID - RuleID string `json:"rule_id"` // Human readable explanation - What string `json:"details"` // Human readable explanation - File string `json:"file"` // File name we found it in - Code string `json:"code"` // Impacted code line - Line string `json:"line"` // Line number in file - Col string `json:"column"` // Column number in line - NoSec bool `json:"nosec"` // true if the issue is nosec - Suppressions []SuppressionInfo `json:"suppressions"` // Suppression info of the issue -} - -// FileLocation point out the file path and line number in file -func (i Issue) FileLocation() string { - return fmt.Sprintf("%s:%s", i.File, i.Line) -} - -// MetaData is embedded in all gosec rules. The Severity, Confidence and What message -// will be passed through to reported issues. -type MetaData struct { - ID string - Severity Score - Confidence Score - What string -} - -// MarshalJSON is used convert a Score object into a JSON representation -func (c Score) MarshalJSON() ([]byte, error) { - return json.Marshal(c.String()) -} - -// String converts a Score into a string -func (c Score) String() string { - switch c { - case High: - return "HIGH" - case Medium: - return "MEDIUM" - case Low: - return "LOW" - } - return "UNDEFINED" -} - -// codeSnippet extracts a code snippet based on the ast reference -func codeSnippet(file *os.File, start int64, end int64, n ast.Node) (string, error) { - if n == nil { - return "", fmt.Errorf("invalid AST node provided") - } - var pos int64 - var buf bytes.Buffer - scanner := bufio.NewScanner(file) - scanner.Split(bufio.ScanLines) - for scanner.Scan() { - pos++ - if pos > end { - break - } else if pos >= start && pos <= end { - code := fmt.Sprintf("%d: %s\n", pos, scanner.Text()) - buf.WriteString(code) - } - } - return buf.String(), nil -} - -func codeSnippetStartLine(node ast.Node, fobj *token.File) int64 { - s := (int64)(fobj.Line(node.Pos())) - if s-SnippetOffset > 0 { - return s - SnippetOffset - } - return s -} - -func codeSnippetEndLine(node ast.Node, fobj *token.File) int64 { - e := (int64)(fobj.Line(node.End())) - return e + SnippetOffset -} - -// NewIssue creates a new Issue -func NewIssue(ctx *Context, node ast.Node, ruleID, desc string, severity Score, confidence Score) *Issue { - fobj := ctx.FileSet.File(node.Pos()) - name := fobj.Name() - start, end := fobj.Line(node.Pos()), fobj.Line(node.End()) - line := strconv.Itoa(start) - if start != end { - line = fmt.Sprintf("%d-%d", start, end) - } - col := strconv.Itoa(fobj.Position(node.Pos()).Column) - - var code string - if file, err := os.Open(fobj.Name()); err == nil { - defer file.Close() // #nosec - s := codeSnippetStartLine(node, fobj) - e := codeSnippetEndLine(node, fobj) - code, err = codeSnippet(file, s, e, node) - if err != nil { - code = err.Error() - } - } - - return &Issue{ - File: name, - Line: line, - Col: col, - RuleID: ruleID, - What: desc, - Confidence: confidence, - Severity: severity, - Code: code, - Cwe: GetCweByRule(ruleID), - } -} - -// WithSuppressions set the suppressions of the issue -func (i *Issue) WithSuppressions(suppressions []SuppressionInfo) *Issue { - i.Suppressions = suppressions - return i -} diff --git a/vendor/github.com/securego/gosec/v2/issue/issue.go b/vendor/github.com/securego/gosec/v2/issue/issue.go new file mode 100644 index 0000000000..1000b20423 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/issue/issue.go @@ -0,0 +1,225 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package issue + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/ast" + "go/token" + "os" + "strconv" + + "github.com/securego/gosec/v2/cwe" +) + +// Score type used by severity and confidence values +type Score int + +const ( + // Low severity or confidence + Low Score = iota + // Medium severity or confidence + Medium + // High severity or confidence + High +) + +// SnippetOffset defines the number of lines captured before +// the beginning and after the end of a code snippet +const SnippetOffset = 1 + +// GetCweByRule retrieves a cwe weakness for a given RuleID +func GetCweByRule(id string) *cwe.Weakness { + cweID, ok := ruleToCWE[id] + if ok && cweID != "" { + return cwe.Get(cweID) + } + return nil +} + +// ruleToCWE maps gosec rules to CWEs +var ruleToCWE = map[string]string{ + "G101": "798", + "G102": "200", + "G103": "242", + "G104": "703", + "G106": "322", + "G107": "88", + "G108": "200", + "G109": "190", + "G110": "409", + "G111": "22", + "G112": "400", + "G113": "190", + "G114": "676", + "G201": "89", + "G202": "89", + "G203": "79", + "G204": "78", + "G301": "276", + "G302": "276", + "G303": "377", + "G304": "22", + "G305": "22", + "G306": "276", + "G401": "326", + "G402": "295", + "G403": "310", + "G404": "338", + "G501": "327", + "G502": "327", + "G503": "327", + "G504": "327", + "G505": "327", + "G601": "118", + "G602": "118", +} + +// Issue is returned by a gosec rule if it discovers an issue with the scanned code. +type Issue struct { + Severity Score `json:"severity"` // issue severity (how problematic it is) + Confidence Score `json:"confidence"` // issue confidence (how sure we are we found it) + Cwe *cwe.Weakness `json:"cwe"` // Cwe associated with RuleID + RuleID string `json:"rule_id"` // Human readable explanation + What string `json:"details"` // Human readable explanation + File string `json:"file"` // File name we found it in + Code string `json:"code"` // Impacted code line + Line string `json:"line"` // Line number in file + Col string `json:"column"` // Column number in line + NoSec bool `json:"nosec"` // true if the issue is nosec + Suppressions []SuppressionInfo `json:"suppressions"` // Suppression info of the issue +} + +// SuppressionInfo object is to record the kind and the justification that used +// to suppress violations. +type SuppressionInfo struct { + Kind string `json:"kind"` + Justification string `json:"justification"` +} + +// FileLocation point out the file path and line number in file +func (i *Issue) FileLocation() string { + return fmt.Sprintf("%s:%s", i.File, i.Line) +} + +// MetaData is embedded in all gosec rules. The Severity, Confidence and What message +// will be passed through to reported issues. +type MetaData struct { + ID string + Severity Score + Confidence Score + What string +} + +// MarshalJSON is used convert a Score object into a JSON representation +func (c Score) MarshalJSON() ([]byte, error) { + return json.Marshal(c.String()) +} + +// String converts a Score into a string +func (c Score) String() string { + switch c { + case High: + return "HIGH" + case Medium: + return "MEDIUM" + case Low: + return "LOW" + } + return "UNDEFINED" +} + +// CodeSnippet extracts a code snippet based on the ast reference +func CodeSnippet(file *os.File, start int64, end int64) (string, error) { + var pos int64 + var buf bytes.Buffer + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + pos++ + if pos > end { + break + } else if pos >= start && pos <= end { + code := fmt.Sprintf("%d: %s\n", pos, scanner.Text()) + buf.WriteString(code) + } + } + return buf.String(), nil +} + +func codeSnippetStartLine(node ast.Node, fobj *token.File) int64 { + s := (int64)(fobj.Line(node.Pos())) + if s-SnippetOffset > 0 { + return s - SnippetOffset + } + return s +} + +func codeSnippetEndLine(node ast.Node, fobj *token.File) int64 { + e := (int64)(fobj.Line(node.End())) + return e + SnippetOffset +} + +// New creates a new Issue +func New(fobj *token.File, node ast.Node, ruleID, desc string, severity, confidence Score) *Issue { + name := fobj.Name() + line := GetLine(fobj, node) + col := strconv.Itoa(fobj.Position(node.Pos()).Column) + + var code string + if node == nil { + code = "invalid AST node provided" + } + if file, err := os.Open(fobj.Name()); err == nil && node != nil { + defer file.Close() // #nosec + s := codeSnippetStartLine(node, fobj) + e := codeSnippetEndLine(node, fobj) + code, err = CodeSnippet(file, s, e) + if err != nil { + code = err.Error() + } + } + + return &Issue{ + File: name, + Line: line, + Col: col, + RuleID: ruleID, + What: desc, + Confidence: confidence, + Severity: severity, + Code: code, + Cwe: GetCweByRule(ruleID), + } +} + +// WithSuppressions set the suppressions of the issue +func (i *Issue) WithSuppressions(suppressions []SuppressionInfo) *Issue { + i.Suppressions = suppressions + return i +} + +// GetLine returns the line number of a given ast.Node +func GetLine(fobj *token.File, node ast.Node) string { + start, end := fobj.Line(node.Pos()), fobj.Line(node.End()) + line := strconv.Itoa(start) + if start != end { + line = fmt.Sprintf("%d-%d", start, end) + } + return line +} diff --git a/vendor/github.com/securego/gosec/v2/report.go b/vendor/github.com/securego/gosec/v2/report.go index 96b1466d58..4fdeea5206 100644 --- a/vendor/github.com/securego/gosec/v2/report.go +++ b/vendor/github.com/securego/gosec/v2/report.go @@ -1,15 +1,19 @@ package gosec +import ( + "github.com/securego/gosec/v2/issue" +) + // ReportInfo this is report information type ReportInfo struct { Errors map[string][]Error `json:"Golang errors"` - Issues []*Issue + Issues []*issue.Issue Stats *Metrics GosecVersion string } // NewReportInfo instantiate a ReportInfo -func NewReportInfo(issues []*Issue, metrics *Metrics, errors map[string][]Error) *ReportInfo { +func NewReportInfo(issues []*issue.Issue, metrics *Metrics, errors map[string][]Error) *ReportInfo { return &ReportInfo{ Errors: errors, Issues: issues, diff --git a/vendor/github.com/securego/gosec/v2/resolve.go b/vendor/github.com/securego/gosec/v2/resolve.go index cdc287e8e5..a201b8d32b 100644 --- a/vendor/github.com/securego/gosec/v2/resolve.go +++ b/vendor/github.com/securego/gosec/v2/resolve.go @@ -66,7 +66,7 @@ func resolveBinExpr(n *ast.BinaryExpr, c *Context) bool { return (TryResolve(n.X, c) && TryResolve(n.Y, c)) } -func resolveCallExpr(n *ast.CallExpr, c *Context) bool { +func resolveCallExpr(_ *ast.CallExpr, _ *Context) bool { // TODO(tkelsey): next step, full function resolution return false } diff --git a/vendor/github.com/securego/gosec/v2/rule.go b/vendor/github.com/securego/gosec/v2/rule.go index c0429c4c23..490a25da02 100644 --- a/vendor/github.com/securego/gosec/v2/rule.go +++ b/vendor/github.com/securego/gosec/v2/rule.go @@ -15,12 +15,14 @@ package gosec import ( "go/ast" "reflect" + + "github.com/securego/gosec/v2/issue" ) // The Rule interface used by all rules supported by gosec. type Rule interface { ID() string - Match(ast.Node, *Context) (*Issue, error) + Match(ast.Node, *Context) (*issue.Issue, error) } // RuleBuilder is used to register a rule definition with the analyzer @@ -41,7 +43,7 @@ func NewRuleSet() RuleSet { return RuleSet{make(map[reflect.Type][]Rule), make(map[string]bool)} } -// Register adds a trigger for the supplied rule for the the +// Register adds a trigger for the supplied rule for the // specified ast nodes. func (r RuleSet) Register(rule Rule, isSuppressed bool, nodes ...ast.Node) { for _, n := range nodes { diff --git a/vendor/github.com/securego/gosec/v2/rules/archive.go b/vendor/github.com/securego/gosec/v2/rules/archive.go index 92c7e4481c..987047435b 100644 --- a/vendor/github.com/securego/gosec/v2/rules/archive.go +++ b/vendor/github.com/securego/gosec/v2/rules/archive.go @@ -5,10 +5,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type archive struct { - gosec.MetaData + issue.MetaData calls gosec.CallList argTypes []string } @@ -18,7 +19,7 @@ func (a *archive) ID() string { } // Match inspects AST nodes to determine if the filepath.Joins uses any argument derived from type zip.File or tar.Header -func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (a *archive) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node := a.calls.ContainsPkgCallExpr(n, c, false); node != nil { for _, arg := range node.Args { var argType types.Type @@ -38,7 +39,7 @@ func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { if argType != nil { for _, t := range a.argTypes { if argType.String() == t { - return gosec.NewIssue(c, n, a.ID(), a.What, a.Severity, a.Confidence), nil + return c.NewIssue(n, a.ID(), a.What, a.Severity, a.Confidence), nil } } } @@ -48,17 +49,17 @@ func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // NewArchive creates a new rule which detects the file traversal when extracting zip/tar archives -func NewArchive(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewArchive(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("path/filepath", "Join") calls.Add("path", "Join") return &archive{ calls: calls, argTypes: []string{"*archive/zip.File", "*archive/tar.Header"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "File traversal when extracting zip/tar archive", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go b/vendor/github.com/securego/gosec/v2/rules/bad_defer.go deleted file mode 100644 index 13b42070da..0000000000 --- a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go +++ /dev/null @@ -1,68 +0,0 @@ -package rules - -import ( - "fmt" - "go/ast" - "strings" - - "github.com/securego/gosec/v2" -) - -type deferType struct { - typ string - methods []string -} - -type badDefer struct { - gosec.MetaData - types []deferType -} - -func (r *badDefer) ID() string { - return r.MetaData.ID -} - -func normalize(typ string) string { - return strings.TrimPrefix(typ, "*") -} - -func contains(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - return false -} - -func (r *badDefer) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { - if deferStmt, ok := n.(*ast.DeferStmt); ok { - for _, deferTyp := range r.types { - if typ, method, err := gosec.GetCallInfo(deferStmt.Call, c); err == nil { - if normalize(typ) == deferTyp.typ && contains(deferTyp.methods, method) { - return gosec.NewIssue(c, n, r.ID(), fmt.Sprintf(r.What, method, typ), r.Severity, r.Confidence), nil - } - } - } - } - - return nil, nil -} - -// NewDeferredClosing detects unsafe defer of error returning methods -func NewDeferredClosing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { - return &badDefer{ - types: []deferType{ - { - typ: "os.File", - methods: []string{"Close"}, - }, - }, - MetaData: gosec.MetaData{ - ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, - What: "Deferring unsafe method %q on type %q", - }, - }, []ast.Node{(*ast.DeferStmt)(nil)} -} diff --git a/vendor/github.com/securego/gosec/v2/rules/bind.go b/vendor/github.com/securego/gosec/v2/rules/bind.go index 8f6af067ad..fef760c808 100644 --- a/vendor/github.com/securego/gosec/v2/rules/bind.go +++ b/vendor/github.com/securego/gosec/v2/rules/bind.go @@ -19,11 +19,12 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) // Looks for net.Listen("0.0.0.0") or net.Listen(":8080") type bindsToAllNetworkInterfaces struct { - gosec.MetaData + issue.MetaData calls gosec.CallList pattern *regexp.Regexp } @@ -32,7 +33,7 @@ func (r *bindsToAllNetworkInterfaces) ID() string { return r.MetaData.ID } -func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { callExpr := r.calls.ContainsPkgCallExpr(n, c, false) if callExpr == nil { return nil, nil @@ -42,14 +43,14 @@ func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gose if bl, ok := arg.(*ast.BasicLit); ok { if arg, err := gosec.GetString(bl); err == nil { if r.pattern.MatchString(arg) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } else if ident, ok := arg.(*ast.Ident); ok { values := gosec.GetIdentStringValues(ident) for _, value := range values { if r.pattern.MatchString(value) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -57,7 +58,7 @@ func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gose values := gosec.GetCallStringArgsValues(callExpr.Args[0], c) for _, value := range values { if r.pattern.MatchString(value) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -66,17 +67,17 @@ func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gose // NewBindsToAllNetworkInterfaces detects socket connections that are setup to // listen on all network interfaces. -func NewBindsToAllNetworkInterfaces(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewBindsToAllNetworkInterfaces(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("net", "Listen") calls.Add("crypto/tls", "Listen") return &bindsToAllNetworkInterfaces{ calls: calls, pattern: regexp.MustCompile(`^(0.0.0.0|:).*$`), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "Binds to all network interfaces", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/blocklist.go b/vendor/github.com/securego/gosec/v2/rules/blocklist.go index afd4ee56b7..5e03cf7a0c 100644 --- a/vendor/github.com/securego/gosec/v2/rules/blocklist.go +++ b/vendor/github.com/securego/gosec/v2/rules/blocklist.go @@ -19,27 +19,28 @@ import ( "strings" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type blocklistedImport struct { - gosec.MetaData + issue.MetaData Blocklisted map[string]string } func unquote(original string) string { - copy := strings.TrimSpace(original) - copy = strings.TrimLeft(copy, `"`) - return strings.TrimRight(copy, `"`) + cleaned := strings.TrimSpace(original) + cleaned = strings.TrimLeft(cleaned, `"`) + return strings.TrimRight(cleaned, `"`) } func (r *blocklistedImport) ID() string { return r.MetaData.ID } -func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node, ok := n.(*ast.ImportSpec); ok { if description, ok := r.Blocklisted[unquote(node.Path.Value)]; ok { - return gosec.NewIssue(c, node, r.ID(), description, r.Severity, r.Confidence), nil + return c.NewIssue(node, r.ID(), description, r.Severity, r.Confidence), nil } } return nil, nil @@ -47,12 +48,12 @@ func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, e // NewBlocklistedImports reports when a blocklisted import is being used. // Typically when a deprecated technology is being used. -func NewBlocklistedImports(id string, conf gosec.Config, blocklist map[string]string) (gosec.Rule, []ast.Node) { +func NewBlocklistedImports(id string, _ gosec.Config, blocklist map[string]string) (gosec.Rule, []ast.Node) { return &blocklistedImport{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, Blocklisted: blocklist, }, []ast.Node{(*ast.ImportSpec)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go index 02256faa98..7e57f1a5b1 100644 --- a/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go +++ b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go @@ -19,10 +19,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type decompressionBombCheck struct { - gosec.MetaData + issue.MetaData readerCalls gosec.CallList copyCalls gosec.CallList } @@ -40,7 +41,7 @@ func containsReaderCall(node ast.Node, ctx *gosec.Context, list gosec.CallList) return list.Contains(s, idt) } -func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*issue.Issue, error) { var readerVarObj map[*ast.Object]struct{} // To check multiple lines, ctx.PassedValues is used to store temporary data. @@ -72,7 +73,7 @@ func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gose if idt, ok := n.Args[1].(*ast.Ident); ok { if _, ok := readerVarObj[idt.Obj]; ok { // Detect io.Copy(x, r) - return gosec.NewIssue(ctx, n, d.ID(), d.What, d.Severity, d.Confidence), nil + return ctx.NewIssue(n, d.ID(), d.What, d.Severity, d.Confidence), nil } } } @@ -82,7 +83,7 @@ func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gose } // NewDecompressionBombCheck detects if there is potential DoS vulnerability via decompression bomb -func NewDecompressionBombCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewDecompressionBombCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { readerCalls := gosec.NewCallList() readerCalls.Add("compress/gzip", "NewReader") readerCalls.AddAll("compress/zlib", "NewReader", "NewReaderDict") @@ -98,10 +99,10 @@ func NewDecompressionBombCheck(id string, conf gosec.Config) (gosec.Rule, []ast. copyCalls.Add("io", "CopyBuffer") return &decompressionBombCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.Medium, + Severity: issue.Medium, + Confidence: issue.Medium, What: "Potential DoS vulnerability via decompression bomb", }, readerCalls: readerCalls, diff --git a/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go b/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go index c373427b8f..47bcb2dc4a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go +++ b/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go @@ -5,18 +5,19 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type traversal struct { pattern *regexp.Regexp - gosec.MetaData + issue.MetaData } func (r *traversal) ID() string { return r.MetaData.ID } -func (r *traversal) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *traversal) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.CallExpr: return r.matchCallExpr(node, ctx) @@ -24,14 +25,14 @@ func (r *traversal) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) return nil, nil } -func (r *traversal) matchCallExpr(assign *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *traversal) matchCallExpr(assign *ast.CallExpr, ctx *gosec.Context) (*issue.Issue, error) { for _, i := range assign.Args { if basiclit, ok1 := i.(*ast.BasicLit); ok1 { if fun, ok2 := assign.Fun.(*ast.SelectorExpr); ok2 { if x, ok3 := fun.X.(*ast.Ident); ok3 { - string := x.Name + "." + fun.Sel.Name + "(" + basiclit.Value + ")" - if r.pattern.MatchString(string) { - return gosec.NewIssue(ctx, assign, r.ID(), r.What, r.Severity, r.Confidence), nil + str := x.Name + "." + fun.Sel.Name + "(" + basiclit.Value + ")" + if r.pattern.MatchString(str) { + return ctx.NewIssue(assign, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -54,11 +55,11 @@ func NewDirectoryTraversal(id string, conf gosec.Config) (gosec.Rule, []ast.Node return &traversal{ pattern: regexp.MustCompile(pattern), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential directory traversal", - Confidence: gosec.Medium, - Severity: gosec.Medium, + Confidence: issue.Medium, + Severity: issue.Medium, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/errors.go b/vendor/github.com/securego/gosec/v2/rules/errors.go index 0838382b32..d31248ccb4 100644 --- a/vendor/github.com/securego/gosec/v2/rules/errors.go +++ b/vendor/github.com/securego/gosec/v2/rules/errors.go @@ -19,10 +19,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type noErrorCheck struct { - gosec.MetaData + issue.MetaData whitelist gosec.CallList } @@ -49,7 +50,7 @@ func returnsError(callExpr *ast.CallExpr, ctx *gosec.Context) int { return -1 } -func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: cfg := ctx.Config @@ -61,7 +62,7 @@ func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro return nil, nil } if id, ok := stmt.Lhs[pos].(*ast.Ident); ok && id.Name == "_" { - return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -70,7 +71,7 @@ func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro if callExpr, ok := stmt.X.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(stmt.X, ctx) == nil { pos := returnsError(callExpr, ctx) if pos >= 0 { - return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -100,10 +101,10 @@ func NewNoErrorCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { } return &noErrorCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Low, - Confidence: gosec.High, + Severity: issue.Low, + Confidence: issue.High, What: "Errors unhandled.", }, whitelist: whitelist, diff --git a/vendor/github.com/securego/gosec/v2/rules/fileperms.go b/vendor/github.com/securego/gosec/v2/rules/fileperms.go index a379a8c0b6..5311f74c6c 100644 --- a/vendor/github.com/securego/gosec/v2/rules/fileperms.go +++ b/vendor/github.com/securego/gosec/v2/rules/fileperms.go @@ -20,15 +20,17 @@ import ( "strconv" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type filePermissions struct { - gosec.MetaData + issue.MetaData mode int64 pkgs []string calls []string } +// ID returns the ID of the rule. func (r *filePermissions) ID() string { return r.MetaData.ID } @@ -50,12 +52,17 @@ func getConfiguredMode(conf map[string]interface{}, configKey string, defaultMod return mode } -func (r *filePermissions) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func modeIsSubset(subset int64, superset int64) bool { + return (subset | superset) == superset +} + +// Match checks if the rule is matched. +func (r *filePermissions) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { for _, pkg := range r.pkgs { if callexpr, matched := gosec.MatchCallByPackage(n, c, pkg, r.calls...); matched { modeArg := callexpr.Args[len(callexpr.Args)-1] - if mode, err := gosec.GetInt(modeArg); err == nil && mode > r.mode { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + if mode, err := gosec.GetInt(modeArg); err == nil && !modeIsSubset(mode, r.mode) { + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -69,10 +76,10 @@ func NewWritePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode: mode, pkgs: []string{"io/ioutil", "os"}, calls: []string{"WriteFile"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("Expect WriteFile permissions to be %#o or less", mode), }, }, []ast.Node{(*ast.CallExpr)(nil)} @@ -86,10 +93,10 @@ func NewFilePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode: mode, pkgs: []string{"os"}, calls: []string{"OpenFile", "Chmod"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("Expect file permissions to be %#o or less", mode), }, }, []ast.Node{(*ast.CallExpr)(nil)} @@ -103,11 +110,55 @@ func NewMkdirPerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode: mode, pkgs: []string{"os"}, calls: []string{"Mkdir", "MkdirAll"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("Expect directory permissions to be %#o or less", mode), }, }, []ast.Node{(*ast.CallExpr)(nil)} } + +type osCreatePermissions struct { + issue.MetaData + mode int64 + pkgs []string + calls []string +} + +const defaultOsCreateMode = 0o666 + +// ID returns the ID of the rule. +func (r *osCreatePermissions) ID() string { + return r.MetaData.ID +} + +// Match checks if the rule is matched. +func (r *osCreatePermissions) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { + for _, pkg := range r.pkgs { + if _, matched := gosec.MatchCallByPackage(n, c, pkg, r.calls...); matched { + if !modeIsSubset(defaultOsCreateMode, r.mode) { + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + return nil, nil +} + +// NewOsCreatePerms reates a rule to detect file creation with a more permissive than configured +// permission mask. +func NewOsCreatePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + mode := getConfiguredMode(conf, id, 0o666) + return &osCreatePermissions{ + mode: mode, + pkgs: []string{"os"}, + calls: []string{"Create"}, + MetaData: issue.MetaData{ + ID: id, + Severity: issue.Medium, + Confidence: issue.High, + What: fmt.Sprintf("Expect file permissions to be %#o or less but os.Create used with default permissions %#o", + mode, defaultOsCreateMode), + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go index cf2e6638d9..ed1fb947d1 100644 --- a/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go +++ b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go @@ -15,17 +15,180 @@ package rules import ( + "fmt" "go/ast" "go/token" "regexp" "strconv" - zxcvbn "github.com/nbutton23/zxcvbn-go" + zxcvbn "github.com/ccojocar/zxcvbn-go" + "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) +type secretPattern struct { + name string + regexp *regexp.Regexp +} + +var secretsPatterns = [...]secretPattern{ + { + name: "RSA private key", + regexp: regexp.MustCompile(`-----BEGIN RSA PRIVATE KEY-----`), + }, + { + name: "SSH (DSA) private key", + regexp: regexp.MustCompile(`-----BEGIN DSA PRIVATE KEY-----`), + }, + { + name: "SSH (EC) private key", + regexp: regexp.MustCompile(`-----BEGIN EC PRIVATE KEY-----`), + }, + { + name: "PGP private key block", + regexp: regexp.MustCompile(`-----BEGIN PGP PRIVATE KEY BLOCK-----`), + }, + { + name: "Slack Token", + regexp: regexp.MustCompile(`xox[pborsa]-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32}`), + }, + { + name: "AWS API Key", + regexp: regexp.MustCompile(`AKIA[0-9A-Z]{16}`), + }, + { + name: "Amazon MWS Auth Token", + regexp: regexp.MustCompile(`amzn\.mws\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}`), + }, + { + name: "AWS AppSync GraphQL Key", + regexp: regexp.MustCompile(`da2-[a-z0-9]{26}`), + }, + { + name: "GitHub personal access token", + regexp: regexp.MustCompile(`ghp_[a-zA-Z0-9]{36}`), + }, + { + name: "GitHub fine-grained access token", + regexp: regexp.MustCompile(`github_pat_[a-zA-Z0-9]{22}_[a-zA-Z0-9]{59}`), + }, + { + name: "GitHub action temporary token", + regexp: regexp.MustCompile(`ghs_[a-zA-Z0-9]{36}`), + }, + { + name: "Google API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Cloud Platform API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Cloud Platform OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Google Drive API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Drive OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Google (GCP) Service-account", + regexp: regexp.MustCompile(`"type": "service_account"`), + }, + { + name: "Google Gmail API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Gmail OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Google OAuth Access Token", + regexp: regexp.MustCompile(`ya29\.[0-9A-Za-z\-_]+`), + }, + { + name: "Google YouTube API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google YouTube OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Generic API Key", + regexp: regexp.MustCompile(`[aA][pP][iI]_?[kK][eE][yY].*[''|"][0-9a-zA-Z]{32,45}[''|"]`), + }, + { + name: "Generic Secret", + regexp: regexp.MustCompile(`[sS][eE][cC][rR][eE][tT].*[''|"][0-9a-zA-Z]{32,45}[''|"]`), + }, + { + name: "Heroku API Key", + regexp: regexp.MustCompile(`[hH][eE][rR][oO][kK][uU].*[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}`), + }, + { + name: "MailChimp API Key", + regexp: regexp.MustCompile(`[0-9a-f]{32}-us[0-9]{1,2}`), + }, + { + name: "Mailgun API Key", + regexp: regexp.MustCompile(`key-[0-9a-zA-Z]{32}`), + }, + { + name: "Password in URL", + regexp: regexp.MustCompile(`[a-zA-Z]{3,10}://[^/\\s:@]{3,20}:[^/\\s:@]{3,20}@.{1,100}["'\\s]`), + }, + { + name: "Slack Webhook", + regexp: regexp.MustCompile(`https://hooks\.slack\.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}`), + }, + { + name: "Stripe API Key", + regexp: regexp.MustCompile(`sk_live_[0-9a-zA-Z]{24}`), + }, + { + name: "Stripe API Key", + regexp: regexp.MustCompile(`sk_live_[0-9a-zA-Z]{24}`), + }, + { + name: "Stripe Restricted API Key", + regexp: regexp.MustCompile(`rk_live_[0-9a-zA-Z]{24}`), + }, + { + name: "Square Access Token", + regexp: regexp.MustCompile(`sq0atp-[0-9A-Za-z\-_]{22}`), + }, + { + name: "Square OAuth Secret", + regexp: regexp.MustCompile(`sq0csp-[0-9A-Za-z\-_]{43}`), + }, + { + name: "Telegram Bot API Key", + regexp: regexp.MustCompile(`[0-9]+:AA[0-9A-Za-z\-_]{33}`), + }, + { + name: "Twilio API Key", + regexp: regexp.MustCompile(`SK[0-9a-fA-F]{32}`), + }, + { + name: "Twitter Access Token", + regexp: regexp.MustCompile(`[tT][wW][iI][tT][tT][eE][rR].*[1-9][0-9]+-[0-9a-zA-Z]{40}`), + }, + { + name: "Twitter OAuth", + regexp: regexp.MustCompile(`[tT][wW][iI][tT][tT][eE][rR].*[''|"][0-9a-zA-Z]{35,44}[''|"]`), + }, +} + type credentials struct { - gosec.MetaData + issue.MetaData pattern *regexp.Regexp entropyThreshold float64 perCharThreshold float64 @@ -53,7 +216,16 @@ func (r *credentials) isHighEntropyString(str string) bool { entropyPerChar >= r.perCharThreshold)) } -func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) isSecretPattern(str string) (bool, string) { + for _, pattern := range secretsPatterns { + if pattern.regexp.MatchString(str) { + return true, pattern.name + } + } + return false, "" +} + +func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.AssignStmt: return r.matchAssign(node, ctx) @@ -65,24 +237,41 @@ func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error return nil, nil } -func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gosec.Context) (*issue.Issue, error) { for _, i := range assign.Lhs { if ident, ok := i.(*ast.Ident); ok { + // First check LHS to find anything being assigned to variables whose name appears to be a cred if r.pattern.MatchString(ident.Name) { for _, e := range assign.Rhs { if val, err := gosec.GetString(e); err == nil { if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { - return gosec.NewIssue(ctx, assign, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(assign, r.ID(), r.What, r.Severity, r.Confidence), nil } } } } + + // Now that no names were matched, match the RHS to see if the actual values being assigned are creds + for _, e := range assign.Rhs { + val, err := gosec.GetString(e) + if err != nil { + continue + } + + if r.ignoreEntropy || r.isHighEntropyString(val) { + if ok, patternName := r.isSecretPattern(val); ok { + return ctx.NewIssue(assign, r.ID(), fmt.Sprintf("%s: %s", r.What, patternName), r.Severity, r.Confidence), nil + } + } + } } } return nil, nil } -func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Context) (*issue.Issue, error) { + // Running match against the variable name(s) first. Will catch any creds whose var name matches the pattern, + // then will go back over to check the values themselves. for index, ident := range valueSpec.Names { if r.pattern.MatchString(ident.Name) && valueSpec.Values != nil { // const foo, bar = "same value" @@ -91,22 +280,57 @@ func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Contex } if val, err := gosec.GetString(valueSpec.Values[index]); err == nil { if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { - return gosec.NewIssue(ctx, valueSpec, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(valueSpec, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + + // Now that no variable names have been matched, match the actual values to find any creds + for _, ident := range valueSpec.Values { + if val, err := gosec.GetString(ident); err == nil { + if r.ignoreEntropy || r.isHighEntropyString(val) { + if ok, patternName := r.isSecretPattern(val); ok { + return ctx.NewIssue(valueSpec, r.ID(), fmt.Sprintf("%s: %s", r.What, patternName), r.Severity, r.Confidence), nil } } } } + return nil, nil } -func (r *credentials) matchEqualityCheck(binaryExpr *ast.BinaryExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) matchEqualityCheck(binaryExpr *ast.BinaryExpr, ctx *gosec.Context) (*issue.Issue, error) { if binaryExpr.Op == token.EQL || binaryExpr.Op == token.NEQ { - if ident, ok := binaryExpr.X.(*ast.Ident); ok { - if r.pattern.MatchString(ident.Name) { - if val, err := gosec.GetString(binaryExpr.Y); err == nil { - if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { - return gosec.NewIssue(ctx, binaryExpr, r.ID(), r.What, r.Severity, r.Confidence), nil - } + ident, ok := binaryExpr.X.(*ast.Ident) + if !ok { + ident, _ = binaryExpr.Y.(*ast.Ident) + } + + if ident != nil && r.pattern.MatchString(ident.Name) { + valueNode := binaryExpr.Y + if !ok { + valueNode = binaryExpr.X + } + if val, err := gosec.GetString(valueNode); err == nil { + if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { + return ctx.NewIssue(binaryExpr, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + + // Now that the variable names have been checked, and no matches were found, make sure that + // either the left or right operands is a string literal so we can match the value. + identStrConst, ok := binaryExpr.X.(*ast.BasicLit) + if !ok { + identStrConst, ok = binaryExpr.Y.(*ast.BasicLit) + } + + if ok && identStrConst.Kind == token.STRING { + s, _ := gosec.GetString(identStrConst) + if r.ignoreEntropy || r.isHighEntropyString(s) { + if ok, patternName := r.isSecretPattern(s); ok { + return ctx.NewIssue(binaryExpr, r.ID(), fmt.Sprintf("%s: %s", r.What, patternName), r.Severity, r.Confidence), nil } } } @@ -129,6 +353,7 @@ func NewHardcodedCredentials(id string, conf gosec.Config) (gosec.Rule, []ast.No pattern = cfgPattern } } + if configIgnoreEntropy, ok := conf["ignore_entropy"]; ok { if cfgIgnoreEntropy, ok := configIgnoreEntropy.(bool); ok { ignoreEntropy = cfgIgnoreEntropy @@ -163,11 +388,11 @@ func NewHardcodedCredentials(id string, conf gosec.Config) (gosec.Rule, []ast.No perCharThreshold: perCharThreshold, ignoreEntropy: ignoreEntropy, truncate: truncateString, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential hardcoded credentials", - Confidence: gosec.Low, - Severity: gosec.High, + Confidence: issue.Low, + Severity: issue.High, }, }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ValueSpec)(nil), (*ast.BinaryExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/http_serve.go b/vendor/github.com/securego/gosec/v2/rules/http_serve.go index e460b3a680..525ed4ebc7 100644 --- a/vendor/github.com/securego/gosec/v2/rules/http_serve.go +++ b/vendor/github.com/securego/gosec/v2/rules/http_serve.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type httpServeWithoutTimeouts struct { - gosec.MetaData + issue.MetaData pkg string calls []string } @@ -16,23 +17,23 @@ func (r *httpServeWithoutTimeouts) ID() string { return r.MetaData.ID } -func (r *httpServeWithoutTimeouts) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (r *httpServeWithoutTimeouts) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } return nil, nil } // NewHTTPServeWithoutTimeouts detects use of net/http serve functions that have no support for setting timeouts. -func NewHTTPServeWithoutTimeouts(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewHTTPServeWithoutTimeouts(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &httpServeWithoutTimeouts{ pkg: "net/http", calls: []string{"ListenAndServe", "ListenAndServeTLS", "Serve", "ServeTLS"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Use of net/http serve function that has no support for setting timeouts", - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go index b2668dec88..a7eabb20b4 100644 --- a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go +++ b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go @@ -3,12 +3,14 @@ package rules import ( "go/ast" "go/token" + "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type implicitAliasing struct { - gosec.MetaData + issue.MetaData aliases map[*ast.Object]struct{} rightBrace token.Pos acceptableAlias []*ast.UnaryExpr @@ -27,7 +29,24 @@ func containsUnary(exprs []*ast.UnaryExpr, expr *ast.UnaryExpr) bool { return false } -func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func getIdentExpr(expr ast.Expr) (*ast.Ident, bool) { + return doGetIdentExpr(expr, false) +} + +func doGetIdentExpr(expr ast.Expr, hasSelector bool) (*ast.Ident, bool) { + switch node := expr.(type) { + case *ast.Ident: + return node, hasSelector + case *ast.SelectorExpr: + return doGetIdentExpr(node.X, true) + case *ast.UnaryExpr: + return doGetIdentExpr(node.X, hasSelector) + default: + return nil, false + } +} + +func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.RangeStmt: // When presented with a range statement, get the underlying Object bound to @@ -71,9 +90,13 @@ func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, er } // If we find a unary op of & (reference) of an object within r.aliases, complain. - if ident, ok := node.X.(*ast.Ident); ok && node.Op.String() == "&" { - if _, contains := r.aliases[ident.Obj]; contains { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + if identExpr, hasSelector := getIdentExpr(node); identExpr != nil && node.Op.String() == "&" { + if _, contains := r.aliases[identExpr.Obj]; contains { + _, isPointer := c.Info.TypeOf(identExpr).(*types.Pointer) + + if !hasSelector || !isPointer { + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil + } } } case *ast.ReturnStmt: @@ -89,15 +112,15 @@ func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, er } // NewImplicitAliasing detects implicit memory aliasing of type: for blah := SomeCall() {... SomeOtherCall(&blah) ...} -func NewImplicitAliasing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewImplicitAliasing(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &implicitAliasing{ aliases: make(map[*ast.Object]struct{}), rightBrace: token.NoPos, acceptableAlias: make([]*ast.UnaryExpr, 0), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.Medium, + Severity: issue.Medium, + Confidence: issue.Medium, What: "Implicit memory aliasing in for loop.", }, }, []ast.Node{(*ast.RangeStmt)(nil), (*ast.UnaryExpr)(nil), (*ast.ReturnStmt)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go index f55211a923..1d57906642 100644 --- a/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go +++ b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go @@ -19,10 +19,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type integerOverflowCheck struct { - gosec.MetaData + issue.MetaData calls gosec.CallList } @@ -30,7 +31,7 @@ func (i *integerOverflowCheck) ID() string { return i.MetaData.ID } -func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*issue.Issue, error) { var atoiVarObj map[*ast.Object]ast.Node // To check multiple lines, ctx.PassedValues is used to store temporary data. @@ -63,7 +64,7 @@ func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec. if idt, ok := n.Args[0].(*ast.Ident); ok { if _, ok := atoiVarObj[idt.Obj]; ok { // Detect int32(v) and int16(v) - return gosec.NewIssue(ctx, n, i.ID(), i.What, i.Severity, i.Confidence), nil + return ctx.NewIssue(n, i.ID(), i.What, i.Severity, i.Confidence), nil } } } @@ -74,14 +75,14 @@ func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec. } // NewIntegerOverflowCheck detects if there is potential Integer OverFlow -func NewIntegerOverflowCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewIntegerOverflowCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("strconv", "Atoi") return &integerOverflowCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.High, - Confidence: gosec.Medium, + Severity: issue.High, + Confidence: issue.Medium, What: "Potential Integer overflow made by strconv.Atoi result conversion to int16/32", }, calls: calls, diff --git a/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go b/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go index 69037e18f9..1aac1fa201 100644 --- a/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go +++ b/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type usingOldMathBig struct { - gosec.MetaData + issue.MetaData calls gosec.CallList } @@ -15,18 +16,18 @@ func (r *usingOldMathBig) ID() string { return r.MetaData.ID } -func (r *usingOldMathBig) Match(node ast.Node, ctx *gosec.Context) (gi *gosec.Issue, err error) { +func (r *usingOldMathBig) Match(node ast.Node, ctx *gosec.Context) (gi *issue.Issue, err error) { if callExpr := r.calls.ContainsPkgCallExpr(node, ctx, false); callExpr == nil { return nil, nil } - confidence := gosec.Low + confidence := issue.Low major, minor, build := gosec.GoVersion() if major == 1 && (minor == 16 && build < 14 || minor == 17 && build < 7) { - confidence = gosec.Medium + confidence = issue.Medium } - return gosec.NewIssue(ctx, node, r.ID(), r.What, r.Severity, confidence), nil + return ctx.NewIssue(node, r.ID(), r.What, r.Severity, confidence), nil } // NewUsingOldMathBig rule detects the use of Rat.SetString from math/big. @@ -35,10 +36,10 @@ func NewUsingOldMathBig(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls.Add("math/big.Rat", "SetString") return &usingOldMathBig{ calls: calls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential uncontrolled memory consumption in Rat.SetString (CVE-2022-23772)", - Severity: gosec.High, + Severity: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/pprof.go b/vendor/github.com/securego/gosec/v2/rules/pprof.go index 4c99af7523..68498dd5e0 100644 --- a/vendor/github.com/securego/gosec/v2/rules/pprof.go +++ b/vendor/github.com/securego/gosec/v2/rules/pprof.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type pprofCheck struct { - gosec.MetaData + issue.MetaData importPath string importName string } @@ -18,22 +19,22 @@ func (p *pprofCheck) ID() string { } // Match checks for pprof imports -func (p *pprofCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (p *pprofCheck) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node, ok := n.(*ast.ImportSpec); ok { if p.importPath == unquote(node.Path.Value) && node.Name != nil && p.importName == node.Name.Name { - return gosec.NewIssue(c, node, p.ID(), p.What, p.Severity, p.Confidence), nil + return c.NewIssue(node, p.ID(), p.What, p.Severity, p.Confidence), nil } } return nil, nil } // NewPprofCheck detects when the profiling endpoint is automatically exposed -func NewPprofCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewPprofCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &pprofCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.High, - Confidence: gosec.High, + Severity: issue.High, + Confidence: issue.High, What: "Profiling endpoint is automatically exposed on /debug/pprof", }, importPath: "net/http/pprof", diff --git a/vendor/github.com/securego/gosec/v2/rules/rand.go b/vendor/github.com/securego/gosec/v2/rules/rand.go index 055adce4d4..4491fd9284 100644 --- a/vendor/github.com/securego/gosec/v2/rules/rand.go +++ b/vendor/github.com/securego/gosec/v2/rules/rand.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type weakRand struct { - gosec.MetaData + issue.MetaData funcNames []string packagePath string } @@ -30,10 +31,10 @@ func (w *weakRand) ID() string { return w.MetaData.ID } -func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { for _, funcName := range w.funcNames { if _, matched := gosec.MatchCallByPackage(n, c, w.packagePath, funcName); matched { - return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil + return c.NewIssue(n, w.ID(), w.What, w.Severity, w.Confidence), nil } } @@ -41,17 +42,17 @@ func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // NewWeakRandCheck detects the use of random number generator that isn't cryptographically secure -func NewWeakRandCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewWeakRandCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &weakRand{ funcNames: []string{ "New", "Read", "Float32", "Float64", "Int", "Int31", "Int31n", "Int63", "Int63n", "Intn", "NormalFloat64", "Uint32", "Uint64", }, packagePath: "math/rand", - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.High, - Confidence: gosec.Medium, + Severity: issue.High, + Confidence: issue.Medium, What: "Use of weak random number generator (math/rand instead of crypto/rand)", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/readfile.go b/vendor/github.com/securego/gosec/v2/rules/readfile.go index 579f2fa447..7ef4bbad13 100644 --- a/vendor/github.com/securego/gosec/v2/rules/readfile.go +++ b/vendor/github.com/securego/gosec/v2/rules/readfile.go @@ -19,13 +19,15 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type readfile struct { - gosec.MetaData + issue.MetaData gosec.CallList - pathJoin gosec.CallList - clean gosec.CallList + pathJoin gosec.CallList + clean gosec.CallList + cleanedVar map[any]ast.Node } // ID returns the identifier for this rule @@ -57,8 +59,11 @@ func (r *readfile) isJoinFunc(n ast.Node, c *gosec.Context) bool { return false } -// isFilepathClean checks if there is a filepath.Clean before assigning to a variable +// isFilepathClean checks if there is a filepath.Clean for given variable func (r *readfile) isFilepathClean(n *ast.Ident, c *gosec.Context) bool { + if _, ok := r.cleanedVar[n.Obj.Decl]; ok { + return true + } if n.Obj.Kind != ast.Var { return false } @@ -72,22 +77,38 @@ func (r *readfile) isFilepathClean(n *ast.Ident, c *gosec.Context) bool { return false } +// trackFilepathClean tracks back the declaration of variable from filepath.Clean argument +func (r *readfile) trackFilepathClean(n ast.Node) { + if clean, ok := n.(*ast.CallExpr); ok && len(clean.Args) > 0 { + if ident, ok := clean.Args[0].(*ast.Ident); ok { + // ident.Obj may be nil if the referenced declaration is in another file. It also may be incorrect. + // if it is nil, do not follow it. + if ident.Obj != nil { + r.cleanedVar[ident.Obj.Decl] = n + } + } + } +} + // Match inspects AST nodes to determine if the match the methods `os.Open` or `ioutil.ReadFile` -func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { - if node := r.ContainsPkgCallExpr(n, c, false); node != nil { +func (r *readfile) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { + if node := r.clean.ContainsPkgCallExpr(n, c, false); node != nil { + r.trackFilepathClean(n) + return nil, nil + } else if node := r.ContainsPkgCallExpr(n, c, false); node != nil { for _, arg := range node.Args { // handles path joining functions in Arg // eg. os.Open(filepath.Join("/tmp/", file)) if callExpr, ok := arg.(*ast.CallExpr); ok { if r.isJoinFunc(callExpr, c) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } // handles binary string concatenation eg. ioutil.Readfile("/tmp/" + file + "/blob") if binExp, ok := arg.(*ast.BinaryExpr); ok { // resolve all found identities from the BinaryExpr if _, ok := gosec.FindVarIdentities(binExp, c); ok { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } @@ -96,7 +117,7 @@ func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) && !r.isFilepathClean(ident, c) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -105,17 +126,18 @@ func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // NewReadFile detects cases where we read files -func NewReadFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewReadFile(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &readfile{ pathJoin: gosec.NewCallList(), clean: gosec.NewCallList(), CallList: gosec.NewCallList(), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential file inclusion via variable", - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, + cleanedVar: map[any]ast.Node{}, } rule.pathJoin.Add("path/filepath", "Join") rule.pathJoin.Add("path", "Join") diff --git a/vendor/github.com/securego/gosec/v2/rules/rsa.go b/vendor/github.com/securego/gosec/v2/rules/rsa.go index f2ed5db53d..331e7fc80a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/rsa.go +++ b/vendor/github.com/securego/gosec/v2/rules/rsa.go @@ -19,10 +19,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type weakKeyStrength struct { - gosec.MetaData + issue.MetaData calls gosec.CallList bits int } @@ -31,27 +32,27 @@ func (w *weakKeyStrength) ID() string { return w.MetaData.ID } -func (w *weakKeyStrength) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (w *weakKeyStrength) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if callExpr := w.calls.ContainsPkgCallExpr(n, c, false); callExpr != nil { if bits, err := gosec.GetInt(callExpr.Args[1]); err == nil && bits < (int64)(w.bits) { - return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil + return c.NewIssue(n, w.ID(), w.What, w.Severity, w.Confidence), nil } } return nil, nil } // NewWeakKeyStrength builds a rule that detects RSA keys < 2048 bits -func NewWeakKeyStrength(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewWeakKeyStrength(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("crypto/rsa", "GenerateKey") bits := 2048 return &weakKeyStrength{ calls: calls, bits: bits, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("RSA keys should be at least %d bits", bits), }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/rulelist.go b/vendor/github.com/securego/gosec/v2/rules/rulelist.go index b97813ed02..f9ca4f52c4 100644 --- a/vendor/github.com/securego/gosec/v2/rules/rulelist.go +++ b/vendor/github.com/securego/gosec/v2/rules/rulelist.go @@ -91,7 +91,7 @@ func Generate(trackSuppressions bool, filters ...RuleFilter) RuleList { {"G304", "File path provided as taint input", NewReadFile}, {"G305", "File path traversal when extracting zip archive", NewArchive}, {"G306", "Poor file permissions used when writing to a file", NewWritePerms}, - {"G307", "Unsafe defer call of a method returning an error", NewDeferredClosing}, + {"G307", "Poor file permissions used when creating a file with os.Create", NewOsCreatePerms}, // crypto {"G401", "Detect the usage of DES, RC4, MD5 or SHA1", NewUsesWeakCryptography}, diff --git a/vendor/github.com/securego/gosec/v2/rules/slowloris.go b/vendor/github.com/securego/gosec/v2/rules/slowloris.go index 60b5e95211..70db73f5f3 100644 --- a/vendor/github.com/securego/gosec/v2/rules/slowloris.go +++ b/vendor/github.com/securego/gosec/v2/rules/slowloris.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type slowloris struct { - gosec.MetaData + issue.MetaData } func (r *slowloris) ID() string { @@ -44,13 +45,13 @@ func containsReadHeaderTimeout(node *ast.CompositeLit) bool { return false } -func (r *slowloris) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *slowloris) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.CompositeLit: actualType := ctx.Info.TypeOf(node.Type) if actualType != nil && actualType.String() == "net/http.Server" { if !containsReadHeaderTimeout(node) { - return gosec.NewIssue(ctx, node, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(node, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -58,13 +59,13 @@ func (r *slowloris) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) } // NewSlowloris attempts to find the http.Server struct and check if the ReadHeaderTimeout is configured. -func NewSlowloris(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSlowloris(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &slowloris{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server", - Confidence: gosec.Low, - Severity: gosec.Medium, + Confidence: issue.Low, + Severity: issue.Medium, }, }, []ast.Node{(*ast.CompositeLit)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/sql.go b/vendor/github.com/securego/gosec/v2/rules/sql.go index ee99737d64..61222bfdb3 100644 --- a/vendor/github.com/securego/gosec/v2/rules/sql.go +++ b/vendor/github.com/securego/gosec/v2/rules/sql.go @@ -20,10 +20,11 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type sqlStatement struct { - gosec.MetaData + issue.MetaData gosec.CallList // Contains a list of patterns which must all match for the rule to match. @@ -97,6 +98,32 @@ func (s *sqlStrConcat) ID() string { return s.MetaData.ID } +// findInjectionInBranch walks diwb a set if expressions, and will create new issues if it finds SQL injections +// This method assumes you've already verified that the branch contains SQL syntax +func (s *sqlStrConcat) findInjectionInBranch(ctx *gosec.Context, branch []ast.Expr) *ast.BinaryExpr { + for _, node := range branch { + be, ok := node.(*ast.BinaryExpr) + if !ok { + continue + } + + operands := gosec.GetBinaryExprOperands(be) + + for _, op := range operands { + if _, ok := op.(*ast.BasicLit); ok { + continue + } + + if ident, ok := op.(*ast.Ident); ok && s.checkObject(ident, ctx) { + continue + } + + return be + } + } + return nil +} + // see if we can figure out what it is func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool { if n.Obj != nil { @@ -113,7 +140,7 @@ func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool { } // checkQuery verifies if the query parameters is a string concatenation -func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*issue.Issue, error) { query, err := findQueryArg(call, ctx) if err != nil { return nil, err @@ -134,7 +161,29 @@ func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gose if op, ok := op.(*ast.Ident); ok && s.checkObject(op, ctx) { continue } - return gosec.NewIssue(ctx, be, s.ID(), s.What, s.Severity, s.Confidence), nil + return ctx.NewIssue(be, s.ID(), s.What, s.Severity, s.Confidence), nil + } + } + } + + // Handle the case where an injection occurs as an infixed string concatenation, ie "SELECT * FROM foo WHERE name = '" + os.Args[0] + "' AND 1=1" + if id, ok := query.(*ast.Ident); ok { + var match bool + for _, str := range gosec.GetIdentStringValuesRecursive(id) { + if s.MatchPatterns(str) { + match = true + break + } + } + + if !match { + return nil, nil + } + + switch decl := id.Obj.Decl.(type) { + case *ast.AssignStmt: + if injection := s.findInjectionInBranch(ctx, decl.Rhs); injection != nil { + return ctx.NewIssue(injection, s.ID(), s.What, s.Severity, s.Confidence), nil } } } @@ -143,7 +192,7 @@ func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gose } // Checks SQL query concatenation issues such as "SELECT * FROM table WHERE " + " ' OR 1=1" -func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: for _, expr := range stmt.Rhs { @@ -156,20 +205,21 @@ func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro return s.checkQuery(sqlQueryCall, ctx) } } + return nil, nil } // NewSQLStrConcat looks for cases where we are building SQL strings via concatenation -func NewSQLStrConcat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSQLStrConcat(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &sqlStrConcat{ sqlStatement: sqlStatement{ patterns: []*regexp.Regexp{ - regexp.MustCompile(`(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `), + regexp.MustCompile("(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE)( |\n|\r|\t)"), }, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "SQL string concatenation", }, CallList: gosec.NewCallList(), @@ -212,7 +262,7 @@ func (s *sqlStrFormat) constObject(e ast.Expr, c *gosec.Context) bool { return false } -func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*issue.Issue, error) { query, err := findQueryArg(call, ctx) if err != nil { return nil, err @@ -233,7 +283,7 @@ func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gose return nil, nil } -func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *gosec.Issue { +func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *issue.Issue { // argIndex changes the function argument which gets matched to the regex argIndex := 0 if node := s.fmtCalls.ContainsPkgCallExpr(n, ctx, false); node != nil { @@ -286,14 +336,14 @@ func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *gosec.Is } } if s.MatchPatterns(formatter) { - return gosec.NewIssue(ctx, n, s.ID(), s.What, s.Severity, s.Confidence) + return ctx.NewIssue(n, s.ID(), s.What, s.Severity, s.Confidence) } } return nil } // Check SQL query formatting issues such as "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)" -func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: for _, expr := range stmt.Rhs { @@ -323,7 +373,7 @@ func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro } // NewSQLStrFormat looks for cases where we're building SQL query strings using format strings -func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSQLStrFormat(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &sqlStrFormat{ CallList: gosec.NewCallList(), fmtCalls: gosec.NewCallList(), @@ -334,10 +384,10 @@ func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { regexp.MustCompile("(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE)( |\n|\r|\t)"), regexp.MustCompile("%[^bdoxXfFp]"), }, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "SQL string formatting", }, }, diff --git a/vendor/github.com/securego/gosec/v2/rules/ssh.go b/vendor/github.com/securego/gosec/v2/rules/ssh.go index 01f37da510..e2ba5a3f4e 100644 --- a/vendor/github.com/securego/gosec/v2/rules/ssh.go +++ b/vendor/github.com/securego/gosec/v2/rules/ssh.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type sshHostKey struct { - gosec.MetaData + issue.MetaData pkg string calls []string } @@ -16,23 +17,23 @@ func (r *sshHostKey) ID() string { return r.MetaData.ID } -func (r *sshHostKey) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (r *sshHostKey) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } return nil, nil } // NewSSHHostKey rule detects the use of insecure ssh HostKeyCallback. -func NewSSHHostKey(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSSHHostKey(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &sshHostKey{ pkg: "golang.org/x/crypto/ssh", calls: []string{"InsecureIgnoreHostKey"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Use of ssh InsecureIgnoreHostKey should be audited", - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/ssrf.go b/vendor/github.com/securego/gosec/v2/rules/ssrf.go index 86bb8278d3..dbf01081b2 100644 --- a/vendor/github.com/securego/gosec/v2/rules/ssrf.go +++ b/vendor/github.com/securego/gosec/v2/rules/ssrf.go @@ -5,10 +5,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type ssrf struct { - gosec.MetaData + issue.MetaData gosec.CallList } @@ -40,25 +41,25 @@ func (r *ssrf) ResolveVar(n *ast.CallExpr, c *gosec.Context) bool { } // Match inspects AST nodes to determine if certain net/http methods are called with variable input -func (r *ssrf) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *ssrf) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { // Call expression is using http package directly if node := r.ContainsPkgCallExpr(n, c, false); node != nil { if r.ResolveVar(node, c) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } return nil, nil } // NewSSRFCheck detects cases where HTTP requests are sent -func NewSSRFCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSSRFCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &ssrf{ CallList: gosec.NewCallList(), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential HTTP request made with variable url", - Severity: gosec.Medium, - Confidence: gosec.Medium, + Severity: issue.Medium, + Confidence: issue.Medium, }, } rule.AddAll("net/http", "Do", "Get", "Head", "Post", "PostForm", "RoundTrip") diff --git a/vendor/github.com/securego/gosec/v2/rules/subproc.go b/vendor/github.com/securego/gosec/v2/rules/subproc.go index 2b6cb186cd..1e2cedaa58 100644 --- a/vendor/github.com/securego/gosec/v2/rules/subproc.go +++ b/vendor/github.com/securego/gosec/v2/rules/subproc.go @@ -19,10 +19,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type subprocess struct { - gosec.MetaData + issue.MetaData gosec.CallList } @@ -39,7 +40,7 @@ func (r *subprocess) ID() string { // is unsafe. For example: // // syscall.Exec("echo", "foobar" + tainted) -func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node := r.ContainsPkgCallExpr(n, c, false); node != nil { args := node.Args if r.isContext(n, c) { @@ -64,7 +65,7 @@ func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { _, assignment := ident.Obj.Decl.(*ast.AssignStmt) if variable && assignment { if !gosec.TryResolve(ident, c) { - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with variable", issue.Medium, issue.High), nil } } case *ast.Field: @@ -74,21 +75,21 @@ func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { vv, vvok := obj.(*types.Var) if vvok && vv.Parent().Lookup(ident.Name) == nil { - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with variable", issue.Medium, issue.High), nil } } case *ast.ValueSpec: _, valueSpec := ident.Obj.Decl.(*ast.ValueSpec) if variable && valueSpec { if !gosec.TryResolve(ident, c) { - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with variable", issue.Medium, issue.High), nil } } } } } else if !gosec.TryResolve(arg, c) { // the arg is not a constant or a variable but instead a function call or os.Args[i] - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with a potential tainted input or cmd arguments", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with a potential tainted input or cmd arguments", issue.Medium, issue.High), nil } } } @@ -96,7 +97,7 @@ func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // isContext checks whether or not the node is a CommandContext call or not -// Thi is required in order to skip the first argument from the check. +// This is required in order to skip the first argument from the check. func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool { selector, indent, err := gosec.GetCallInfo(n, ctx) if err != nil { @@ -109,8 +110,8 @@ func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool { } // NewSubproc detects cases where we are forking out to an external process -func NewSubproc(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { - rule := &subprocess{gosec.MetaData{ID: id}, gosec.NewCallList()} +func NewSubproc(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { + rule := &subprocess{issue.MetaData{ID: id}, gosec.NewCallList()} rule.Add("os/exec", "Command") rule.Add("os/exec", "CommandContext") rule.Add("syscall", "Exec") diff --git a/vendor/github.com/securego/gosec/v2/rules/tempfiles.go b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go index 63822c093c..6fef52a2cb 100644 --- a/vendor/github.com/securego/gosec/v2/rules/tempfiles.go +++ b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go @@ -19,10 +19,11 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type badTempFile struct { - gosec.MetaData + issue.MetaData calls gosec.CallList args *regexp.Regexp argCalls gosec.CallList @@ -33,15 +34,15 @@ func (t *badTempFile) ID() string { return t.MetaData.ID } -func (t *badTempFile) findTempDirArgs(n ast.Node, c *gosec.Context, suspect ast.Node) *gosec.Issue { +func (t *badTempFile) findTempDirArgs(n ast.Node, c *gosec.Context, suspect ast.Node) *issue.Issue { if s, e := gosec.GetString(suspect); e == nil { if t.args.MatchString(s) { - return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence) + return c.NewIssue(n, t.ID(), t.What, t.Severity, t.Confidence) } return nil } if ce := t.argCalls.ContainsPkgCallExpr(suspect, c, false); ce != nil { - return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence) + return c.NewIssue(n, t.ID(), t.What, t.Severity, t.Confidence) } if be, ok := suspect.(*ast.BinaryExpr); ok { if ops := gosec.GetBinaryExprOperands(be); len(ops) != 0 { @@ -55,7 +56,7 @@ func (t *badTempFile) findTempDirArgs(n ast.Node, c *gosec.Context, suspect ast. return nil } -func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil { return t.findTempDirArgs(n, c, node.Args[0]), nil } @@ -63,7 +64,7 @@ func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err } // NewBadTempFile detects direct writes to predictable path in temporary directory -func NewBadTempFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewBadTempFile(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("io/ioutil", "WriteFile") calls.AddAll("os", "Create", "WriteFile") @@ -77,10 +78,10 @@ func NewBadTempFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { args: regexp.MustCompile(`^(/(usr|var))?/tmp(/.*)?$`), argCalls: argCalls, nestedCalls: nestedCalls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "File creation in shared tmp directory without using ioutil.Tempfile", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/templates.go b/vendor/github.com/securego/gosec/v2/rules/templates.go index 1eec7fba10..728766f457 100644 --- a/vendor/github.com/securego/gosec/v2/rules/templates.go +++ b/vendor/github.com/securego/gosec/v2/rules/templates.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type templateCheck struct { - gosec.MetaData + issue.MetaData calls gosec.CallList } @@ -29,11 +30,11 @@ func (t *templateCheck) ID() string { return t.MetaData.ID } -func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil { for _, arg := range node.Args { if _, ok := arg.(*ast.BasicLit); !ok { // basic lits are safe - return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence), nil + return c.NewIssue(n, t.ID(), t.What, t.Severity, t.Confidence), nil } } } @@ -42,7 +43,7 @@ func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error // NewTemplateCheck constructs the template check rule. This rule is used to // find use of templates where HTML/JS escaping is not being used -func NewTemplateCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewTemplateCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("html/template", "HTML") calls.Add("html/template", "HTMLAttr") @@ -50,10 +51,10 @@ func NewTemplateCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { calls.Add("html/template", "URL") return &templateCheck{ calls: calls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.Low, + Severity: issue.Medium, + Confidence: issue.Low, What: "The used method does not auto-escape HTML. This can potentially lead to 'Cross-site Scripting' vulnerabilities, in case the attacker controls the input.", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/tls.go b/vendor/github.com/securego/gosec/v2/rules/tls.go index 76dfd84ff6..65a0b5a33a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/tls.go +++ b/vendor/github.com/securego/gosec/v2/rules/tls.go @@ -24,10 +24,11 @@ import ( "strconv" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type insecureConfigTLS struct { - gosec.MetaData + issue.MetaData MinVersion int64 MaxVersion int64 requiredType string @@ -49,13 +50,13 @@ func stringInSlice(a string, list []string) bool { return false } -func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *gosec.Issue { +func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *issue.Issue { if ciphers, ok := n.(*ast.CompositeLit); ok { for _, cipher := range ciphers.Elts { if ident, ok := cipher.(*ast.SelectorExpr); ok { if !stringInSlice(ident.Sel.Name, t.goodCiphers) { err := fmt.Sprintf("TLS Bad Cipher Suite: %s", ident.Sel.Name) - return gosec.NewIssue(c, ident, t.ID(), err, gosec.High, gosec.High) + return c.NewIssue(ident, t.ID(), err, issue.High, issue.High) } } } @@ -63,31 +64,51 @@ func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) return nil } -func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Context) *gosec.Issue { - if ident, ok := n.Key.(*ast.Ident); ok { +func (t *insecureConfigTLS) processTLSConf(n ast.Node, c *gosec.Context) *issue.Issue { + if kve, ok := n.(*ast.KeyValueExpr); ok { + issue := t.processTLSConfVal(kve.Key, kve.Value, c) + if issue != nil { + return issue + } + } else if assign, ok := n.(*ast.AssignStmt); ok { + if len(assign.Lhs) < 1 || len(assign.Rhs) < 1 { + return nil + } + if selector, ok := assign.Lhs[0].(*ast.SelectorExpr); ok { + issue := t.processTLSConfVal(selector.Sel, assign.Rhs[0], c) + if issue != nil { + return issue + } + } + } + return nil +} + +func (t *insecureConfigTLS) processTLSConfVal(key ast.Expr, value ast.Expr, c *gosec.Context) *issue.Issue { + if ident, ok := key.(*ast.Ident); ok { switch ident.Name { case "InsecureSkipVerify": - if node, ok := n.Value.(*ast.Ident); ok { + if node, ok := value.(*ast.Ident); ok { if node.Name != "false" { - return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify set true.", gosec.High, gosec.High) + return c.NewIssue(value, t.ID(), "TLS InsecureSkipVerify set true.", issue.High, issue.High) } } else { // TODO(tk): symbol tab look up to get the actual value - return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify may be true.", gosec.High, gosec.Low) + return c.NewIssue(value, t.ID(), "TLS InsecureSkipVerify may be true.", issue.High, issue.Low) } case "PreferServerCipherSuites": - if node, ok := n.Value.(*ast.Ident); ok { + if node, ok := value.(*ast.Ident); ok { if node.Name == "false" { - return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites set false.", gosec.Medium, gosec.High) + return c.NewIssue(value, t.ID(), "TLS PreferServerCipherSuites set false.", issue.Medium, issue.High) } } else { // TODO(tk): symbol tab look up to get the actual value - return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites may be false.", gosec.Medium, gosec.Low) + return c.NewIssue(value, t.ID(), "TLS PreferServerCipherSuites may be false.", issue.Medium, issue.Low) } case "MinVersion": - if d, ok := n.Value.(*ast.Ident); ok { + if d, ok := value.(*ast.Ident); ok { obj := d.Obj if obj == nil { for _, f := range c.PkgFiles { @@ -107,7 +128,7 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont tObj := imp.Scope().Lookup(sel) if cst, ok := tObj.(*types.Const); ok { // ..got the value check if this can be translated - if minVersion, err := strconv.ParseInt(cst.Val().String(), 10, 64); err == nil { + if minVersion, err := strconv.ParseInt(cst.Val().String(), 0, 64); err == nil { t.actualMinVersion = minVersion } } @@ -118,10 +139,10 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont t.actualMinVersion = ival } } - } else if ival, ierr := gosec.GetInt(n.Value); ierr == nil { + } else if ival, ierr := gosec.GetInt(value); ierr == nil { t.actualMinVersion = ival } else { - if se, ok := n.Value.(*ast.SelectorExpr); ok { + if se, ok := value.(*ast.SelectorExpr); ok { if pkg, ok := se.X.(*ast.Ident); ok { if ip, ok := gosec.GetImportPath(pkg.Name, c); ok && ip == "crypto/tls" { t.actualMinVersion = t.mapVersion(se.Sel.Name) @@ -131,10 +152,10 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont } case "MaxVersion": - if ival, ierr := gosec.GetInt(n.Value); ierr == nil { + if ival, ierr := gosec.GetInt(value); ierr == nil { t.actualMaxVersion = ival } else { - if se, ok := n.Value.(*ast.SelectorExpr); ok { + if se, ok := value.(*ast.SelectorExpr); ok { if pkg, ok := se.X.(*ast.Ident); ok { if ip, ok := gosec.GetImportPath(pkg.Name, c); ok && ip == "crypto/tls" { t.actualMaxVersion = t.mapVersion(se.Sel.Name) @@ -144,7 +165,7 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont } case "CipherSuites": - if ret := t.processTLSCipherSuites(n.Value, c); ret != nil { + if ret := t.processTLSCipherSuites(value, c); ret != nil { return ret } @@ -168,16 +189,16 @@ func (t *insecureConfigTLS) mapVersion(version string) int64 { return v } -func (t *insecureConfigTLS) checkVersion(n ast.Node, c *gosec.Context) *gosec.Issue { +func (t *insecureConfigTLS) checkVersion(n ast.Node, c *gosec.Context) *issue.Issue { if t.actualMaxVersion == 0 && t.actualMinVersion >= t.MinVersion { // no warning is generated since the min version is greater than the secure min version return nil } if t.actualMinVersion < t.MinVersion { - return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion too low.", gosec.High, gosec.High) + return c.NewIssue(n, t.ID(), "TLS MinVersion too low.", issue.High, issue.High) } if t.actualMaxVersion < t.MaxVersion { - return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion too low.", gosec.High, gosec.High) + return c.NewIssue(n, t.ID(), "TLS MaxVersion too low.", issue.High, issue.High) } return nil } @@ -187,22 +208,32 @@ func (t *insecureConfigTLS) resetVersion() { t.actualMinVersion = 0 } -func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if complit, ok := n.(*ast.CompositeLit); ok && complit.Type != nil { actualType := c.Info.TypeOf(complit.Type) if actualType != nil && actualType.String() == t.requiredType { for _, elt := range complit.Elts { - if kve, ok := elt.(*ast.KeyValueExpr); ok { - issue := t.processTLSConfVal(kve, c) - if issue != nil { - return issue, nil - } + issue := t.processTLSConf(elt, c) + if issue != nil { + return issue, nil } } issue := t.checkVersion(complit, c) t.resetVersion() return issue, nil } + } else { + if assign, ok := n.(*ast.AssignStmt); ok && len(assign.Lhs) > 0 { + if selector, ok := assign.Lhs[0].(*ast.SelectorExpr); ok { + actualType := c.Info.TypeOf(selector.X) + if actualType != nil && actualType.String() == t.requiredType { + issue := t.processTLSConf(assign, c) + if issue != nil { + return issue, nil + } + } + } + } } return nil, nil } diff --git a/vendor/github.com/securego/gosec/v2/rules/tls_config.go b/vendor/github.com/securego/gosec/v2/rules/tls_config.go index 5d68593d82..cbbdf7983a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/tls_config.go +++ b/vendor/github.com/securego/gosec/v2/rules/tls_config.go @@ -4,13 +4,14 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) // NewModernTLSCheck creates a check for Modern TLS ciphers // DO NOT EDIT - generated by tlsconfig tool -func NewModernTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewModernTLSCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &insecureConfigTLS{ - MetaData: gosec.MetaData{ID: id}, + MetaData: issue.MetaData{ID: id}, requiredType: "crypto/tls.Config", MinVersion: 0x0304, MaxVersion: 0x0304, @@ -19,14 +20,14 @@ func NewModernTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + }, []ast.Node{(*ast.CompositeLit)(nil), (*ast.AssignStmt)(nil)} } // NewIntermediateTLSCheck creates a check for Intermediate TLS ciphers // DO NOT EDIT - generated by tlsconfig tool -func NewIntermediateTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewIntermediateTLSCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &insecureConfigTLS{ - MetaData: gosec.MetaData{ID: id}, + MetaData: issue.MetaData{ID: id}, requiredType: "crypto/tls.Config", MinVersion: 0x0303, MaxVersion: 0x0304, @@ -45,14 +46,14 @@ func NewIntermediateTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.No "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + }, []ast.Node{(*ast.CompositeLit)(nil), (*ast.AssignStmt)(nil)} } // NewOldTLSCheck creates a check for Old TLS ciphers // DO NOT EDIT - generated by tlsconfig tool -func NewOldTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewOldTLSCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &insecureConfigTLS{ - MetaData: gosec.MetaData{ID: id}, + MetaData: issue.MetaData{ID: id}, requiredType: "crypto/tls.Config", MinVersion: 0x0301, MaxVersion: 0x0304, @@ -88,5 +89,5 @@ func NewOldTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + }, []ast.Node{(*ast.CompositeLit)(nil), (*ast.AssignStmt)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/unsafe.go b/vendor/github.com/securego/gosec/v2/rules/unsafe.go index 88a298fb52..2e2adca7c7 100644 --- a/vendor/github.com/securego/gosec/v2/rules/unsafe.go +++ b/vendor/github.com/securego/gosec/v2/rules/unsafe.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type usingUnsafe struct { - gosec.MetaData + issue.MetaData pkg string calls []string } @@ -30,24 +31,24 @@ func (r *usingUnsafe) ID() string { return r.MetaData.ID } -func (r *usingUnsafe) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (r *usingUnsafe) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } return nil, nil } // NewUsingUnsafe rule detects the use of the unsafe package. This is only // really useful for auditing purposes. -func NewUsingUnsafe(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewUsingUnsafe(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &usingUnsafe{ pkg: "unsafe", - calls: []string{"Alignof", "Offsetof", "Sizeof", "Pointer"}, - MetaData: gosec.MetaData{ + calls: []string{"Pointer", "String", "StringData", "Slice", "SliceData"}, + MetaData: issue.MetaData{ ID: id, What: "Use of unsafe calls should be audited", - Severity: gosec.Low, - Confidence: gosec.High, + Severity: issue.Low, + Confidence: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go index eecb88f046..4f2ab11d15 100644 --- a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go +++ b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type usesWeakCryptography struct { - gosec.MetaData + issue.MetaData blocklist map[string][]string } @@ -29,17 +30,17 @@ func (r *usesWeakCryptography) ID() string { return r.MetaData.ID } -func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { for pkg, funcs := range r.blocklist { if _, matched := gosec.MatchCallByPackage(n, c, pkg, funcs...); matched { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } return nil, nil } // NewUsesWeakCryptography detects uses of des.* md5.* or rc4.* -func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewUsesWeakCryptography(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := make(map[string][]string) calls["crypto/des"] = []string{"NewCipher", "NewTripleDESCipher"} calls["crypto/md5"] = []string{"New", "Sum"} @@ -47,10 +48,10 @@ func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.No calls["crypto/rc4"] = []string{"NewCipher"} rule := &usesWeakCryptography{ blocklist: calls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "Use of weak cryptographic primitive", }, } diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index b042c896f2..d1d4a85fd7 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1b6..074fd4b8bd 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function for scanner.Scan() { - printFunc(scanner.Text()) + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/github.com/sivchari/containedctx/.golangci.yml b/vendor/github.com/sivchari/containedctx/.golangci.yml new file mode 100644 index 0000000000..f687df8362 --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/.golangci.yml @@ -0,0 +1,38 @@ +run: + timeout: 5m + skip-files: [] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 12 + misspell: + locale: US + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + +linters: + disable-all: true + enable: + - govet + - revive + - goimports + - staticcheck + - gosimple + - unused + - godox + - gofumpt + - misspell + - gocyclo + +issues: + exclude-use-default: true + max-per-linter: 0 + max-same-issues: 0 + exclude: [] diff --git a/vendor/github.com/sivchari/containedctx/LICENCE b/vendor/github.com/sivchari/containedctx/LICENCE new file mode 100644 index 0000000000..5185ec09a5 --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/LICENCE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 sivchari + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sivchari/containedctx/README.md b/vendor/github.com/sivchari/containedctx/README.md new file mode 100644 index 0000000000..0c2dd208d4 --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/README.md @@ -0,0 +1,64 @@ +# containedctx + +[![test_and_lint](https://github.com/sivchari/containedctx/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/sivchari/containedctx/actions/workflows/ci.yml) + +containedctx is a linter that detects struct contained context.Context field. +This is discouraged technique in favour of passing context as first argument of method or function. +For rationale please read [Contexts and structs](https://go.dev/blog/context-and-structs) the Go blog post. + +## Instruction + +```sh +go install github.com/sivchari/containedctx/cmd/containedctx +``` + +## Usage + +```go +package main + +import "context" + +type ok struct { + i int + s string +} + +type ng struct { + ctx context.Context +} + +type empty struct{} +``` + +```console +go vet -vettool=(which containedctx) ./... + +# a +./main.go:11:2: found a struct that contains a context.Context field +``` + + +## CI + +### CircleCI + +```yaml +- run: + name: install containedctx + command: go install github.com/sivchari/containedctx/cmd/containedctx + +- run: + name: run containedctx + command: go vet -vettool=`which containedctx` ./... +``` + +### GitHub Actions + +```yaml +- name: install containedctx + run: go install github.com/sivchari/containedctx/cmd/containedctx + +- name: run containedctx + run: go vet -vettool=`which containedctx` ./... +``` diff --git a/vendor/github.com/sivchari/containedctx/containedctx.go b/vendor/github.com/sivchari/containedctx/containedctx.go new file mode 100644 index 0000000000..0260d6a6eb --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/containedctx.go @@ -0,0 +1,45 @@ +package containedctx + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "containedctx is a linter that detects struct contained context.Context field" + +// Analyzer is the contanedctx analyzer +var Analyzer = &analysis.Analyzer{ + Name: "containedctx", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch structTyp := n.(type) { + case *ast.StructType: + if structTyp.Fields.List == nil { + return + } + for _, field := range structTyp.Fields.List { + if pass.TypesInfo.TypeOf(field.Type).String() == "context.Context" { + pass.Reportf(field.Pos(), "found a struct that contains a context.Context field") + } + } + } + }) + + return nil, nil +} diff --git a/vendor/github.com/sivchari/tenv/.gitignore b/vendor/github.com/sivchari/tenv/.gitignore new file mode 100644 index 0000000000..83470100fc --- /dev/null +++ b/vendor/github.com/sivchari/tenv/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +.idea + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/sivchari/tenv/.golangci.yml b/vendor/github.com/sivchari/tenv/.golangci.yml new file mode 100644 index 0000000000..f687df8362 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/.golangci.yml @@ -0,0 +1,38 @@ +run: + timeout: 5m + skip-files: [] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 12 + misspell: + locale: US + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + +linters: + disable-all: true + enable: + - govet + - revive + - goimports + - staticcheck + - gosimple + - unused + - godox + - gofumpt + - misspell + - gocyclo + +issues: + exclude-use-default: true + max-per-linter: 0 + max-same-issues: 0 + exclude: [] diff --git a/vendor/github.com/sivchari/tenv/LICENSE b/vendor/github.com/sivchari/tenv/LICENSE new file mode 100644 index 0000000000..5185ec09a5 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 sivchari + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sivchari/tenv/README.md b/vendor/github.com/sivchari/tenv/README.md new file mode 100644 index 0000000000..c5d0047734 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/README.md @@ -0,0 +1,107 @@ +# tenv + +![tenv Gopher](./tenv.png "Gopher") + + +[![test_and_lint](https://github.com/sivchari/tenv/actions/workflows/workflows.yml/badge.svg?branch=main)](https://github.com/sivchari/tenv/actions/workflows/workflows.yml) + +tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + +## Instruction + +```sh +go install github.com/sivchari/tenv/cmd/tenv +``` + +## Usage + +```go +package main + +import ( + "fmt" + "os" + "testing" +) + +func TestMain(t *testing.T) { + fmt.Println(os.Getenv("GO")) + os.Setenv("GO", "HACKING GOPHER") +} + +func TestMain2(t *testing.T) { + fmt.Println(os.Getenv("GO")) +} + +func helper() { + os.Setenv("GO", "HACKING GOPHER") +} +``` + +```console +go vet -vettool=(which tenv) ./... + +# a +./main_test.go:11:2: os.Setenv() can be replaced by `t.Setenv()` in TestMain +``` + +### option + +The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + +By default, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + +```go +package main + +import ( + "fmt" + "os" + "testing" +) + +func TestMain(t *testing.T) { + fmt.Println(os.Getenv("GO")) + os.Setenv("GO", "HACKING GOPHER") +} + +func TestMain2(t *testing.T) { + fmt.Println(os.Getenv("GO")) +} + +func helper() { + os.Setenv("GO", "HACKING GOPHER") +} +``` + +```console +go vet -vettool=(which tenv) -tenv.all ./... + +# a +./main_test.go:11:2: os.Setenv() can be replaced by `t.Setenv()` in TestMain +./main_test.go:19:2: os.Setenv() can be replaced by `testing.Setenv()` in helper +``` + +## CI + +### CircleCI + +```yaml +- run: + name: install tenv + command: go install github.com/sivchari/tenv + +- run: + name: run tenv + command: go vet -vettool=`which tenv` ./... +``` + +### GitHub Actions + +```yaml +- name: install tenv + run: go install github.com/sivchari/tenv + +- name: run tenv + run: go vet -vettool=`which tenv` ./... +``` diff --git a/vendor/github.com/sivchari/tenv/tenv.go b/vendor/github.com/sivchari/tenv/tenv.go new file mode 100644 index 0000000000..fcff98d058 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/tenv.go @@ -0,0 +1,213 @@ +package tenv + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17" + +// Analyzer is tenv analyzer +var Analyzer = &analysis.Analyzer{ + Name: "tenv", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +var ( + A = "all" + aflag bool +) + +func init() { + Analyzer.Flags.BoolVar(&aflag, A, false, "the all option will run against all method in test file") +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl: + checkFuncDecl(pass, n, pass.Fset.File(n.Pos()).Name()) + case *ast.FuncLit: + checkFuncLit(pass, n, pass.Fset.File(n.Pos()).Name()) + } + }) + + return nil, nil +} + +func checkFuncDecl(pass *analysis.Pass, f *ast.FuncDecl, fileName string) { + argName, ok := targetRunner(f.Type.Params.List, fileName) + if !ok { + return + } + checkStmts(pass, f.Body.List, f.Name.Name, argName) +} + +func checkFuncLit(pass *analysis.Pass, f *ast.FuncLit, fileName string) { + argName, ok := targetRunner(f.Type.Params.List, fileName) + if !ok { + return + } + checkStmts(pass, f.Body.List, "anonymous function", argName) +} + +func checkStmts(pass *analysis.Pass, stmts []ast.Stmt, funcName, argName string) { + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case *ast.ExprStmt: + if !checkExprStmt(pass, stmt, funcName, argName) { + continue + } + case *ast.IfStmt: + if !checkIfStmt(pass, stmt, funcName, argName) { + continue + } + case *ast.AssignStmt: + if !checkAssignStmt(pass, stmt, funcName, argName) { + continue + } + } + } +} + +func checkExprStmt(pass *analysis.Pass, stmt *ast.ExprStmt, funcName, argName string) bool { + callExpr, ok := stmt.X.(*ast.CallExpr) + if !ok { + return false + } + fun, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + fun.Sel.Name + if targetName == "os.Setenv" { + if argName == "" { + argName = "testing" + } + pass.Reportf(stmt.Pos(), "os.Setenv() can be replaced by `%s.Setenv()` in %s", argName, funcName) + } + return true +} + +func checkIfStmt(pass *analysis.Pass, stmt *ast.IfStmt, funcName, argName string) bool { + assignStmt, ok := stmt.Init.(*ast.AssignStmt) + if !ok { + return false + } + rhs, ok := assignStmt.Rhs[0].(*ast.CallExpr) + if !ok { + return false + } + fun, ok := rhs.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + fun.Sel.Name + if targetName == "os.Setenv" { + if argName == "" { + argName = "testing" + } + pass.Reportf(stmt.Pos(), "os.Setenv() can be replaced by `%s.Setenv()` in %s", argName, funcName) + } + return true +} + +func checkAssignStmt(pass *analysis.Pass, stmt *ast.AssignStmt, funcName, argName string) bool { + rhs, ok := stmt.Rhs[0].(*ast.CallExpr) + if !ok { + return false + } + fun, ok := rhs.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + fun.Sel.Name + if targetName == "os.Setenv" { + if argName == "" { + argName = "testing" + } + pass.Reportf(stmt.Pos(), "os.Setenv() can be replaced by `%s.Setenv()` in %s", argName, funcName) + } + return true +} + +func targetRunner(params []*ast.Field, fileName string) (string, bool) { + for _, p := range params { + switch typ := p.Type.(type) { + case *ast.StarExpr: + if checkStarExprTarget(typ) { + if len(p.Names) == 0 { + return "", false + } + argName := p.Names[0].Name + return argName, true + } + case *ast.SelectorExpr: + if checkSelectorExprTarget(typ) { + if len(p.Names) == 0 { + return "", false + } + argName := p.Names[0].Name + return argName, true + } + } + } + if aflag && strings.HasSuffix(fileName, "_test.go") { + return "", true + } + return "", false +} + +func checkStarExprTarget(typ *ast.StarExpr) bool { + selector, ok := typ.X.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := selector.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + selector.Sel.Name + switch targetName { + case "testing.T", "testing.B", "testing.F": + return true + default: + return false + } +} + +func checkSelectorExprTarget(typ *ast.SelectorExpr) bool { + x, ok := typ.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + typ.Sel.Name + return targetName == "testing.TB" +} diff --git a/vendor/github.com/sivchari/tenv/tenv.png b/vendor/github.com/sivchari/tenv/tenv.png new file mode 100644 index 0000000000..96dc967e38 Binary files /dev/null and b/vendor/github.com/sivchari/tenv/tenv.png differ diff --git a/vendor/github.com/sonatard/noctx/.golangci.yml b/vendor/github.com/sonatard/noctx/.golangci.yml index 1580acde27..55ebeebdb0 100644 --- a/vendor/github.com/sonatard/noctx/.golangci.yml +++ b/vendor/github.com/sonatard/noctx/.golangci.yml @@ -1,20 +1,14 @@ run: - -linters-settings: - govet: + linters-settings: + govet: + enable-all: true + linters: enable-all: true - -linters: - enable-all: true - disable: - - gochecknoglobals - - gomnd - - gocognit - - nestif - -issues: - exclude-rules: - - path: reqwithoutctx/ssa.go - text: "Consider preallocating `exts`" - linters: - - prealloc + disable: + - gochecknoglobals + - gomnd + - gocognit + - nestif + - nilnil + - paralleltest + - varnamelen \ No newline at end of file diff --git a/vendor/github.com/sonatard/noctx/README.md b/vendor/github.com/sonatard/noctx/README.md index bfe9782c6d..b3793fc968 100644 --- a/vendor/github.com/sonatard/noctx/README.md +++ b/vendor/github.com/sonatard/noctx/README.md @@ -1,25 +1,60 @@ # noctx -![](https://github.com/sonatard/noctx/workflows/.github/workflows/ci.yml/badge.svg) +![](https://github.com/sonatard/noctx/workflows/CI/badge.svg) `noctx` finds sending http request without context.Context. -You should use `noctx` if sending http request in your library. +You should use `noctx` if sending http request in your library. Passing `context.Context` enables library user to cancel http request, getting trace information and so on. -## Install +## Usage + + +### noctx with go vet + +go vet is a Go standard tool for analyzing source code. +1. Install noctx. ```sh -$ go get -u github.com/sonatard/noctx/cmd/noctx +$ go install github.com/sonatard/noctx/cmd/noctx@latest ``` -## Usage - +2. noctx execute ```sh $ go vet -vettool=`which noctx` main.go ./main.go:6:11: net/http.Get must not be called ``` +### noctx with golangci-lint + +golangci-lint is a fast Go linters runner. + +1. Install golangci-lint. +[golangci-lint - Install](https://golangci-lint.run/usage/install/) + +2. Setup .golangci.yml +```yaml: +# Add noctx to enable linters. +linters: + enable: + - noctx + +# Or enable-all is true. +linters: + enable-all: true + disable: + - xxx # Add unused linter to disable linters. +``` + +3. noctx execute +```sh +# Use .golangci.yml +$ golangci-lint run + +# Only noctx execute +golangci-lint run --disable-all -E noctx +``` + ## Detection rules - Executing following functions - `net/http.Get` @@ -39,7 +74,51 @@ $ go vet -vettool=`which noctx` main.go `(http.Request).WithContext(ctx)` has a disadvantage of performance because it returns a copy of `http.Request`. Use `http.NewRequestWithContext` function if you only support Go1.13 or later. -## Sample Code + +If your library already provides functions that don't accept context, you define a new function that accepts context and make the existing function a wrapper for a new function. + + +```go +// Before fix code +// Sending an HTTP request but not accepting context +func Send(body io.Reader) error { + req,err := http.NewRequest(http.MethodPost, "http://example.com", body) + if err != nil { + return err + } + _, err = http.DefaultClient.Do(req) + if err != nil{ + return err + } + + return nil +} +``` + +```go +// After fix code +func Send(body io.Reader) error { + // Pass context.Background() to SendWithContext + return SendWithContext(context.Background(), body) +} + +// Sending an HTTP request and accepting context +func SendWithContext(ctx context.Context, body io.Reader) error { + // Change NewRequest to NewRequestWithContext and pass context it + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://example.com", body) + if err != nil { + return nil + } + _, err = http.DefaultClient.Do(req) + if err != nil { + return err + } + + return nil +} +``` + +## Detection sample ```go package main diff --git a/vendor/github.com/sonatard/noctx/ngfunc/main.go b/vendor/github.com/sonatard/noctx/ngfunc/main.go index cfeb0f0010..46306218d2 100644 --- a/vendor/github.com/sonatard/noctx/ngfunc/main.go +++ b/vendor/github.com/sonatard/noctx/ngfunc/main.go @@ -1,6 +1,7 @@ package ngfunc import ( + "fmt" "go/types" "github.com/gostaticanalysis/analysisutil" @@ -34,8 +35,11 @@ func Run(pass *analysis.Pass) (interface{}, error) { func ngCalledFuncs(pass *analysis.Pass, ngFuncs []*types.Func) []*Report { var reports []*Report - srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs - for _, sf := range srcFuncs { + ssa, ok := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + if !ok { + panic(fmt.Sprintf("%T is not *buildssa.SSA", pass.ResultOf[buildssa.Analyzer])) + } + for _, sf := range ssa.SrcFuncs { for _, b := range sf.Blocks { for _, instr := range b.Instrs { for _, ngFunc := range ngFuncs { diff --git a/vendor/github.com/sonatard/noctx/noctx.go b/vendor/github.com/sonatard/noctx/noctx.go index 478ad8855d..89e0446ecd 100644 --- a/vendor/github.com/sonatard/noctx/noctx.go +++ b/vendor/github.com/sonatard/noctx/noctx.go @@ -1,6 +1,7 @@ package noctx import ( + "fmt" "github.com/sonatard/noctx/ngfunc" "github.com/sonatard/noctx/reqwithoutctx" "golang.org/x/tools/go/analysis" @@ -8,23 +9,26 @@ import ( ) var Analyzer = &analysis.Analyzer{ - Name: "noctx", - Doc: Doc, - Run: run, + Name: "noctx", + Doc: Doc, + Run: run, + RunDespiteErrors: false, Requires: []*analysis.Analyzer{ buildssa.Analyzer, }, + ResultType: nil, + FactTypes: nil, } const Doc = "noctx finds sending http request without context.Context" func run(pass *analysis.Pass) (interface{}, error) { if _, err := ngfunc.Run(pass); err != nil { - return nil, err + return nil, fmt.Errorf("run: %w", err) } if _, err := reqwithoutctx.Run(pass); err != nil { - return nil, err + return nil, fmt.Errorf("run: %w", err) } return nil, nil diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go index 35751269ee..d7e0f5084d 100644 --- a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go +++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go @@ -1,6 +1,7 @@ package reqwithoutctx import ( + "fmt" "go/types" "github.com/gostaticanalysis/analysisutil" @@ -10,6 +11,7 @@ import ( "golang.org/x/tools/go/ssa" ) +//nolint:govet type Analyzer struct { Funcs []*ssa.Function newRequestType types.Type @@ -20,10 +22,13 @@ func NewAnalyzer(pass *analysis.Pass) *Analyzer { newRequestType := analysisutil.TypeOf(pass, "net/http", "NewRequest") requestType := analysisutil.TypeOf(pass, "net/http", "*Request") - srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + ssa, ok := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + if !ok { + panic(fmt.Sprintf("%T is not *buildssa.SSA", pass.ResultOf[buildssa.Analyzer])) + } return &Analyzer{ - Funcs: srcFuncs, + Funcs: ssa.SrcFuncs, newRequestType: newRequestType, requestType: requestType, } @@ -88,14 +93,14 @@ func (a *Analyzer) usedReqs() map[string]*ssa.Extract { } func (a *Analyzer) usedReqByCall(call *ssa.Call) []*ssa.Extract { - var exts []*ssa.Extract + args := call.Common().Args + exts := make([]*ssa.Extract, 0, len(args)) // skip net/http.Request method call if call.Common().Signature().Recv() != nil && types.Identical(call.Value().Type(), a.requestType) { return exts } - args := call.Common().Args if len(args) == 0 { return exts } diff --git a/vendor/github.com/sourcegraph/go-diff/LICENSE b/vendor/github.com/sourcegraph/go-diff/LICENSE index 0733b6e5f2..5ba1c44360 100644 --- a/vendor/github.com/sourcegraph/go-diff/LICENSE +++ b/vendor/github.com/sourcegraph/go-diff/LICENSE @@ -33,3 +33,14 @@ in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.go index 0f465b9e27..81aa655707 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/diff.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.go @@ -120,6 +120,10 @@ const onlyInMessage = "Only in %s: %s\n" // See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html. const diffTimeParseLayout = "2006-01-02 15:04:05 -0700" +// Apple's diff is based on freebsd diff, which uses a timestamp format that does +// not include the timezone offset. +const diffTimeParseWithoutTZLayout = "2006-01-02 15:04:05" + // diffTimeFormatLayout is the layout used to format (i.e., print) the time in unified diff file // header timestamps. // See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html. diff --git a/vendor/github.com/sourcegraph/go-diff/diff/parse.go b/vendor/github.com/sourcegraph/go-diff/diff/parse.go index 8d5cfc238e..48eeb96702 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/parse.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/parse.go @@ -23,14 +23,14 @@ func ParseMultiFileDiff(diff []byte) ([]*FileDiff, error) { // NewMultiFileDiffReader returns a new MultiFileDiffReader that reads // a multi-file unified diff from r. func NewMultiFileDiffReader(r io.Reader) *MultiFileDiffReader { - return &MultiFileDiffReader{reader: bufio.NewReader(r)} + return &MultiFileDiffReader{reader: newLineReader(r)} } // MultiFileDiffReader reads a multi-file unified diff. type MultiFileDiffReader struct { line int offset int64 - reader *bufio.Reader + reader *lineReader // TODO(sqs): line and offset tracking in multi-file diffs is broken; add tests and fix @@ -46,6 +46,14 @@ type MultiFileDiffReader struct { // all hunks) from r. If there are no more files in the diff, it // returns error io.EOF. func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { + fd, _, err := r.ReadFileWithTrailingContent() + return fd, err +} + +// ReadFileWithTrailingContent reads the next file unified diff (including +// headers and all hunks) from r, also returning any trailing content. If there +// are no more files in the diff, it returns error io.EOF. +func (r *MultiFileDiffReader) ReadFileWithTrailingContent() (*FileDiff, string, error) { fr := &FileDiffReader{ line: r.line, offset: r.offset, @@ -59,23 +67,33 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { switch e := err.(type) { case *ParseError: if e.Err == ErrNoFileHeader || e.Err == ErrExtendedHeadersEOF { - return nil, io.EOF + // Any non-diff content preceding a valid diff is included in the + // extended headers of the following diff. In this way, mixed diff / + // non-diff content can be parsed. Trailing non-diff content is + // different: it doesn't make sense to return a FileDiff with only + // extended headers populated. Instead, we return any trailing content + // in case the caller needs it. + trailing := "" + if fd != nil { + trailing = strings.Join(fd.Extended, "\n") + } + return nil, trailing, io.EOF } - return nil, err + return nil, "", err case OverflowError: r.nextFileFirstLine = []byte(e) - return fd, nil + return fd, "", nil default: - return nil, err + return nil, "", err } } // FileDiff is added/deleted file // No further collection of hunks needed if fd.NewName == "" { - return fd, nil + return fd, "", nil } // Before reading hunks, check to see if there are any. If there @@ -85,9 +103,9 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { // caused by the lack of any hunks, or a malformatted hunk, so we // need to perform the check here. hr := fr.HunksReader() - line, err := readLine(r.reader) + line, err := r.reader.readLine() if err != nil && err != io.EOF { - return fd, err + return fd, "", err } line = bytes.TrimSuffix(line, []byte{'\n'}) if bytes.HasPrefix(line, hunkPrefix) { @@ -101,10 +119,10 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { // This just means we finished reading the hunks for the // current file. See the ErrBadHunkLine doc for more info. r.nextFileFirstLine = e.Line - return fd, nil + return fd, "", nil } } - return nil, err + return nil, "", err } } else { // There weren't any hunks, so that line we peeked ahead at @@ -112,7 +130,7 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { r.nextFileFirstLine = line } - return fd, nil + return fd, "", nil } // ReadAllFiles reads all file unified diffs (including headers and all @@ -141,14 +159,14 @@ func ParseFileDiff(diff []byte) (*FileDiff, error) { // NewFileDiffReader returns a new FileDiffReader that reads a file // unified diff. func NewFileDiffReader(r io.Reader) *FileDiffReader { - return &FileDiffReader{reader: bufio.NewReader(r)} + return &FileDiffReader{reader: &lineReader{reader: bufio.NewReader(r)}} } // FileDiffReader reads a unified file diff. type FileDiffReader struct { line int offset int64 - reader *bufio.Reader + reader *lineReader // fileHeaderLine is the first file header line, set by: // @@ -236,7 +254,6 @@ func (r *FileDiffReader) ReadFileHeaders() (origName, newName string, origTimest "", nil, nil, nil } } - origName, origTimestamp, err = r.readOneFileHeader([]byte("--- ")) if err != nil { return "", "", nil, nil, err @@ -266,7 +283,7 @@ func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, time if r.fileHeaderLine == nil { var err error - line, err = readLine(r.reader) + line, err = r.reader.readLine() if err == io.EOF { return "", nil, &ParseError{r.line, r.offset, ErrNoFileHeader} } else if err != nil { @@ -289,10 +306,16 @@ func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, time parts := strings.SplitN(trimmedLine, "\t", 2) filename = parts[0] if len(parts) == 2 { + var ts time.Time // Timestamp is optional, but this header has it. - ts, err := time.Parse(diffTimeParseLayout, parts[1]) + ts, err = time.Parse(diffTimeParseLayout, parts[1]) if err != nil { - return "", nil, err + var err1 error + ts, err1 = time.Parse(diffTimeParseWithoutTZLayout, parts[1]) + if err1 != nil { + return "", nil, err + } + err = nil } timestamp = &ts } @@ -318,7 +341,7 @@ func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) { var line []byte if r.fileHeaderLine == nil { var err error - line, err = readLine(r.reader) + line, err = r.reader.readLine() if err == io.EOF { return xheaders, &ParseError{r.line, r.offset, ErrExtendedHeadersEOF} } else if err != nil { @@ -354,65 +377,192 @@ func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) { } } +// readQuotedFilename extracts a quoted filename from the beginning of a string, +// returning the unquoted filename and any remaining text after the filename. +func readQuotedFilename(text string) (value string, remainder string, err error) { + if text == "" || text[0] != '"' { + return "", "", fmt.Errorf(`string must start with a '"': %s`, text) + } + + // The end quote is the first quote NOT preceeded by an uneven number of backslashes. + numberOfBackslashes := 0 + for i, c := range text { + if c == '"' && i > 0 && numberOfBackslashes%2 == 0 { + value, err = strconv.Unquote(text[:i+1]) + remainder = text[i+1:] + return + } else if c == '\\' { + numberOfBackslashes++ + } else { + numberOfBackslashes = 0 + } + } + return "", "", fmt.Errorf(`end of string found while searching for '"': %s`, text) +} + +// parseDiffGitArgs extracts the two filenames from a 'diff --git' line. +// Returns false on syntax error, true if syntax is valid. Even with a +// valid syntax, it may be impossible to extract filenames; if so, the +// function returns ("", "", true). +func parseDiffGitArgs(diffArgs string) (string, string, bool) { + length := len(diffArgs) + if length < 3 { + return "", "", false + } + + if diffArgs[0] != '"' && diffArgs[length-1] != '"' { + // Both filenames are unquoted. + firstSpace := strings.IndexByte(diffArgs, ' ') + if firstSpace <= 0 || firstSpace == length-1 { + return "", "", false + } + + secondSpace := strings.IndexByte(diffArgs[firstSpace+1:], ' ') + if secondSpace == -1 { + if diffArgs[firstSpace+1] == '"' { + // The second filename begins with '"', but doesn't end with one. + return "", "", false + } + return diffArgs[:firstSpace], diffArgs[firstSpace+1:], true + } + + // One or both filenames contain a space, but the names are + // unquoted. Here, the 'diff --git' syntax is ambiguous, and + // we have to obtain the filenames elsewhere (e.g. from the + // hunk headers or extended headers). HOWEVER, if the file + // is newly created and empty, there IS no other place to + // find the filename. In this case, the two filenames are + // identical (except for the leading 'a/' prefix), and we have + // to handle that case here. + first := diffArgs[:length/2] + second := diffArgs[length/2+1:] + + // If the two strings could be equal, based on length, proceed. + if length%2 == 1 { + // If the name minus the a/ b/ prefixes is equal, proceed. + if len(first) >= 3 && first[1] == '/' && first[1:] == second[1:] { + return first, second, true + } + // If the names don't have the a/ and b/ prefixes and they're equal, proceed. + if !(first[:2] == "a/" && second[:2] == "b/") && first == second { + return first, second, true + } + } + + // The syntax is (unfortunately) valid, but we could not extract + // the filenames. + return "", "", true + } + + if diffArgs[0] == '"' { + first, remainder, err := readQuotedFilename(diffArgs) + if err != nil || len(remainder) < 2 || remainder[0] != ' ' { + return "", "", false + } + if remainder[1] == '"' { + second, remainder, err := readQuotedFilename(remainder[1:]) + if remainder != "" || err != nil { + return "", "", false + } + return first, second, true + } + return first, remainder[1:], true + } + + // In this case, second argument MUST be quoted (or it's a syntax error) + i := strings.IndexByte(diffArgs, '"') + if i == -1 || i+2 >= length || diffArgs[i-1] != ' ' { + return "", "", false + } + + second, remainder, err := readQuotedFilename(diffArgs[i:]) + if remainder != "" || err != nil { + return "", "", false + } + return diffArgs[:i-1], second, true +} + // handleEmpty detects when FileDiff was an empty diff and will not have any hunks // that follow. It updates fd fields from the parsed extended headers. func handleEmpty(fd *FileDiff) (wasEmpty bool) { - var err error lineCount := len(fd.Extended) if lineCount > 0 && !strings.HasPrefix(fd.Extended[0], "diff --git ") { return false } - switch { - case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) && - strings.HasPrefix(fd.Extended[1], "new file mode "): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + lineHasPrefix := func(idx int, prefix string) bool { + return strings.HasPrefix(fd.Extended[idx], prefix) + } + + linesHavePrefixes := func(idx1 int, prefix1 string, idx2 int, prefix2 string) bool { + return lineHasPrefix(idx1, prefix1) && lineHasPrefix(idx2, prefix2) + } + + isCopy := (lineCount == 4 && linesHavePrefixes(2, "copy from ", 3, "copy to ")) || + (lineCount == 6 && linesHavePrefixes(2, "copy from ", 3, "copy to ") && lineHasPrefix(5, "Binary files ")) || + (lineCount == 6 && linesHavePrefixes(1, "old mode ", 2, "new mode ") && linesHavePrefixes(4, "copy from ", 5, "copy to ")) + + isRename := (lineCount == 4 && linesHavePrefixes(2, "rename from ", 3, "rename to ")) || + (lineCount == 5 && linesHavePrefixes(2, "rename from ", 3, "rename to ") && lineHasPrefix(4, "Binary files ")) || + (lineCount == 6 && linesHavePrefixes(2, "rename from ", 3, "rename to ") && lineHasPrefix(5, "Binary files ")) || + (lineCount == 6 && linesHavePrefixes(1, "old mode ", 2, "new mode ") && linesHavePrefixes(4, "rename from ", 5, "rename to ")) + + isDeletedFile := (lineCount == 3 || lineCount == 4 && lineHasPrefix(3, "Binary files ") || lineCount > 4 && lineHasPrefix(3, "GIT binary patch")) && + lineHasPrefix(1, "deleted file mode ") + + isNewFile := (lineCount == 3 || lineCount == 4 && lineHasPrefix(3, "Binary files ") || lineCount > 4 && lineHasPrefix(3, "GIT binary patch")) && + lineHasPrefix(1, "new file mode ") + + isModeChange := lineCount == 3 && linesHavePrefixes(1, "old mode ", 2, "new mode ") + + isBinaryPatch := lineCount == 3 && lineHasPrefix(2, "Binary files ") || lineCount > 3 && lineHasPrefix(2, "GIT binary patch") + + if !isModeChange && !isCopy && !isRename && !isBinaryPatch && !isNewFile && !isDeletedFile { + return false + } + + var success bool + fd.OrigName, fd.NewName, success = parseDiffGitArgs(fd.Extended[0][len("diff --git "):]) + if isNewFile { fd.OrigName = "/dev/null" - fd.NewName, err = strconv.Unquote(names[1]) - if err != nil { - fd.NewName = names[1] - } - return true - case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) && - strings.HasPrefix(fd.Extended[1], "deleted file mode "): + } - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName, err = strconv.Unquote(names[0]) - if err != nil { - fd.OrigName = names[0] - } + if isDeletedFile { fd.NewName = "/dev/null" - return true - case lineCount == 4 && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName, err = strconv.Unquote(names[0]) - if err != nil { - fd.OrigName = names[0] - } - fd.NewName, err = strconv.Unquote(names[1]) - if err != nil { - fd.NewName = names[1] - } - return true - case lineCount == 6 && strings.HasPrefix(fd.Extended[5], "Binary files ") && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName = names[0] - fd.NewName = names[1] - return true - case lineCount == 3 && strings.HasPrefix(fd.Extended[2], "Binary files ") || lineCount > 3 && strings.HasPrefix(fd.Extended[2], "GIT binary patch"): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName, err = strconv.Unquote(names[0]) - if err != nil { - fd.OrigName = names[0] + } + + // For ambiguous 'diff --git' lines, try to reconstruct filenames using extended headers. + if success && (isCopy || isRename) && fd.OrigName == "" && fd.NewName == "" { + diffArgs := fd.Extended[0][len("diff --git "):] + + tryReconstruct := func(header string, prefix string, whichFile int, result *string) { + if !strings.HasPrefix(header, prefix) { + return + } + rawFilename := header[len(prefix):] + + // extract the filename prefix (e.g. "a/") from the 'diff --git' line. + var prefixLetterIndex int + if whichFile == 1 { + prefixLetterIndex = 0 + } else if whichFile == 2 { + prefixLetterIndex = len(diffArgs) - len(rawFilename) - 2 + } + if prefixLetterIndex < 0 || diffArgs[prefixLetterIndex+1] != '/' { + return + } + + *result = diffArgs[prefixLetterIndex:prefixLetterIndex+2] + rawFilename } - fd.NewName, err = strconv.Unquote(names[1]) - if err != nil { - fd.NewName = names[1] + + for _, header := range fd.Extended { + tryReconstruct(header, "copy from ", 1, &fd.OrigName) + tryReconstruct(header, "copy to ", 2, &fd.NewName) + tryReconstruct(header, "rename from ", 1, &fd.OrigName) + tryReconstruct(header, "rename to ", 2, &fd.NewName) } - return true - default: - return false } + return success } var ( @@ -447,7 +597,7 @@ func ParseHunks(diff []byte) ([]*Hunk, error) { // NewHunksReader returns a new HunksReader that reads unified diff hunks // from r. func NewHunksReader(r io.Reader) *HunksReader { - return &HunksReader{reader: bufio.NewReader(r)} + return &HunksReader{reader: &lineReader{reader: bufio.NewReader(r)}} } // A HunksReader reads hunks from a unified diff. @@ -455,7 +605,7 @@ type HunksReader struct { line int offset int64 hunk *Hunk - reader *bufio.Reader + reader *lineReader nextHunkHeaderLine []byte } @@ -474,7 +624,7 @@ func (r *HunksReader) ReadHunk() (*Hunk, error) { line = r.nextHunkHeaderLine r.nextHunkHeaderLine = nil } else { - line, err = readLine(r.reader) + line, err = r.reader.readLine() if err != nil { if err == io.EOF && r.hunk != nil { return r.hunk, nil @@ -518,12 +668,15 @@ func (r *HunksReader) ReadHunk() (*Hunk, error) { // If the line starts with `---` and the next one with `+++` we're // looking at a non-extended file header and need to abort. if bytes.HasPrefix(line, []byte("---")) { - ok, err := peekPrefix(r.reader, "+++") + ok, err := r.reader.nextLineStartsWith("+++") if err != nil { return r.hunk, err } if ok { - return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}} + ok2, _ := r.reader.nextNextLineStartsWith(string(hunkPrefix)) + if ok2 { + return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}} + } } } @@ -593,19 +746,6 @@ func linePrefix(c byte) bool { return false } -// peekPrefix peeks into the given reader to check whether the next -// bytes match the given prefix. -func peekPrefix(reader *bufio.Reader, prefix string) (bool, error) { - next, err := reader.Peek(len(prefix)) - if err != nil { - if err == io.EOF { - return false, nil - } - return false, err - } - return bytes.HasPrefix(next, []byte(prefix)), nil -} - // normalizeHeader takes a header of the form: // "@@ -linestart[,chunksize] +linestart[,chunksize] @@ section" // and returns two strings, with the first in the form: diff --git a/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go index 395fb7baf9..45300252b7 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go @@ -2,9 +2,92 @@ package diff import ( "bufio" + "bytes" + "errors" "io" ) +var ErrLineReaderUninitialized = errors.New("line reader not initialized") + +func newLineReader(r io.Reader) *lineReader { + return &lineReader{reader: bufio.NewReader(r)} +} + +// lineReader is a wrapper around a bufio.Reader that caches the next line to +// provide lookahead functionality for the next two lines. +type lineReader struct { + reader *bufio.Reader + + cachedNextLine []byte + cachedNextLineErr error +} + +// readLine returns the next unconsumed line and advances the internal cache of +// the lineReader. +func (l *lineReader) readLine() ([]byte, error) { + if l.cachedNextLine == nil && l.cachedNextLineErr == nil { + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + } + + if l.cachedNextLineErr != nil { + return nil, l.cachedNextLineErr + } + + next := l.cachedNextLine + + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + + return next, nil +} + +// nextLineStartsWith looks at the line that would be returned by the next call +// to readLine to check whether it has the given prefix. +// +// io.EOF and bufio.ErrBufferFull errors are ignored so that the function can +// be used when at the end of the file. +func (l *lineReader) nextLineStartsWith(prefix string) (bool, error) { + if l.cachedNextLine == nil && l.cachedNextLineErr == nil { + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + } + + return l.lineHasPrefix(l.cachedNextLine, prefix, l.cachedNextLineErr) +} + +// nextNextLineStartsWith checks the prefix of the line *after* the line that +// would be returned by the next readLine. +// +// io.EOF and bufio.ErrBufferFull errors are ignored so that the function can +// be used when at the end of the file. +// +// The lineReader MUST be initialized by calling readLine at least once before +// calling nextLineStartsWith. Otherwise ErrLineReaderUninitialized will be +// returned. +func (l *lineReader) nextNextLineStartsWith(prefix string) (bool, error) { + if l.cachedNextLine == nil && l.cachedNextLineErr == nil { + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + } + + next, err := l.reader.Peek(len(prefix)) + return l.lineHasPrefix(next, prefix, err) +} + +// lineHasPrefix checks whether the given line has the given prefix with +// bytes.HasPrefix. +// +// The readErr should be the error that was returned when the line was read. +// lineHasPrefix checks the error to adjust its return value to, e.g., return +// false and ignore the error when readErr is io.EOF. +func (l *lineReader) lineHasPrefix(line []byte, prefix string, readErr error) (bool, error) { + if readErr != nil { + if readErr == io.EOF || readErr == bufio.ErrBufferFull { + return false, nil + } + return false, readErr + } + + return bytes.HasPrefix(line, []byte(prefix)), nil +} + // readLine is a helper that mimics the functionality of calling bufio.Scanner.Scan() and // bufio.Scanner.Bytes(), but without the token size limitation. It will read and return // the next line in the Reader with the trailing newline stripped. It will return an diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index 1459644981..0000000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false -language: go - -go: - - "1.13" - - "1.14" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build -v ./... - - go test -count=1 -cover -race -v ./... - - go vet ./... - - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index c3e807aef8..3bafbfdfca 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,7 @@ A FileSystem Abstraction System for Go -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Overview @@ -33,7 +33,7 @@ filesystem for full interoperability. * Support for compositional (union) file systems by combining multiple file systems acting as one * Specialized backends which modify existing filesystems (Read Only, Regexp filtered) * A set of utility functions ported from io, ioutil & hugo to be afero aware - +* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` # Using Afero @@ -79,11 +79,11 @@ would. So if my application before had: ```go -os.Open('/tmp/foo') +os.Open("/tmp/foo") ``` We would replace it with: ```go -AppFs.Open('/tmp/foo') +AppFs.Open("/tmp/foo") ``` `AppFs` being the variable we defined above. @@ -94,6 +94,7 @@ AppFs.Open('/tmp/foo') File System Methods Available: ```go Chmod(name string, mode os.FileMode) : error +Chown(name string, uid, gid int) : error Chtimes(name string, atime time.Time, mtime time.Time) : error Create(name string) : File, error Mkdir(name string, perm os.FileMode) : error @@ -258,6 +259,18 @@ system using InMemoryFile. Afero has experimental support for secure file transfer protocol (sftp). Which can be used to perform file operations over a encrypted channel. +### GCSFs + +Afero has experimental support for Google Cloud Storage (GCS). You can either set the +`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in +`NewGcsFS` to configure access to your GCS bucket. + +Some known limitations of the existing implementation: +* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. +* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. +* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. + + ## Filtering Backends ### BasePathFs diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go index f5b5e127cd..39f6585209 100644 --- a/vendor/github.com/spf13/afero/afero.go +++ b/vendor/github.com/spf13/afero/afero.go @@ -91,17 +91,20 @@ type Fs interface { // The name of this FileSystem Name() string - //Chmod changes the mode of the named file to mode. + // Chmod changes the mode of the named file to mode. Chmod(name string, mode os.FileMode) error - //Chtimes changes the access and modification times of the named file + // Chown changes the uid and gid of the named file. + Chown(name string, uid, gid int) error + + // Chtimes changes the access and modification times of the named file Chtimes(name string, atime time.Time, mtime time.Time) error } var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml index 5d2f34bf16..65e20e8ca3 100644 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -1,3 +1,5 @@ +# This currently does nothing. We have moved to GitHub action, but this is kept +# until spf13 has disabled this project in AppVeyor. version: '{build}' clone_folder: C:\gopath\src\github.com\spf13\afero environment: @@ -6,10 +8,3 @@ build_script: - cmd: >- go version - go env - - go get -v github.com/spf13/afero/... - - go build -v github.com/spf13/afero/... -test_script: -- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 3a14b833e4..2e72793a3e 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -1,6 +1,7 @@ package afero import ( + "io/fs" "os" "path/filepath" "runtime" @@ -8,7 +9,10 @@ import ( "time" ) -var _ Lstater = (*BasePathFs)(nil) +var ( + _ Lstater = (*BasePathFs)(nil) + _ fs.ReadDirFile = (*BasePathFile)(nil) +) // The BasePathFs restricts all operations to a given path within an Fs. // The given file name to the operations on this Fs will be prepended with @@ -33,6 +37,13 @@ func (f *BasePathFile) Name() string { return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) } +func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { + if rdf, ok := f.File.(fs.ReadDirFile); ok { + return rdf.ReadDir(n) + } + return readDirFile{f.File}.ReadDir(n) +} + func NewBasePathFs(source Fs, path string) Fs { return &BasePathFs{source: source, path: path} } @@ -83,6 +94,13 @@ func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { return b.source.Chmod(name, mode) } +func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chown", Path: name, Err: err} + } + return b.source.Chown(name, uid, gid) +} + func (b *BasePathFs) Name() string { return "BasePathFs" } @@ -202,5 +220,3 @@ func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { } return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} } - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go index 29a26c67dd..017d344fd5 100644 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -75,6 +75,10 @@ func (u *CacheOnReadFs) copyToLayer(name string) error { return copyToLayer(u.base, u.layer, name) } +func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { + return copyFileToLayer(u.base, u.layer, name, flag, perm) +} + func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { st, _, err := u.cacheStatus(name) if err != nil { @@ -117,6 +121,27 @@ func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { return u.layer.Chmod(name, mode) } +func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chown(name, uid, gid) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chown(name, uid, gid) + } + if err != nil { + return err + } + return u.layer.Chown(name, uid, gid) +} + func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { st, fi, err := u.cacheStatus(name) if err != nil { @@ -191,7 +216,7 @@ func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, switch st { case cacheLocal, cacheHit: default: - if err := u.copyToLayer(name); err != nil { + if err := u.copyFileToLayer(name, flag, perm); err != nil { return nil, err } } diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index 18b45824be..30855de572 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aix darwin openbsd freebsd netbsd dragonfly +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos +// +build aix darwin openbsd freebsd netbsd dragonfly zos package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 2b850e4ddb..12792d21e2 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -10,12 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd -// +build !aix +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos package afero diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index 96b7701261..184d6dd702 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -14,7 +14,7 @@ var _ Lstater = (*CopyOnWriteFs)(nil) // a possibly writeable layer on top. Changes to the file system will only // be made in the overlay: Changing an existing file in the base layer which // is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). +// includes also calls to e.g. Chtimes(), Chmod() and Chown()). // // Reading directories is currently only supported via Open(), not OpenFile(). type CopyOnWriteFs struct { @@ -75,6 +75,19 @@ func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { return u.layer.Chmod(name, mode) } +func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chown(name, uid, gid) +} + func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { fi, err := u.layer.Stat(name) if err != nil { @@ -210,7 +223,7 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return nil, err } if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { + if err = u.layer.MkdirAll(dir, 0o777); err != nil { return nil, err } return u.layer.OpenFile(name, flag, perm) @@ -234,8 +247,9 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, // This function handles the 9 different possibilities caused // by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory +// +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory func (u *CopyOnWriteFs) Open(name string) (File, error) { // Since the overlay overrides the base we check that first b, err := u.isBaseFile(name) @@ -309,5 +323,5 @@ func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { } func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) } diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go index c42193688c..ac0de6d51f 100644 --- a/vendor/github.com/spf13/afero/httpFs.go +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -29,7 +29,7 @@ type httpDir struct { } func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || strings.Contains(name, "\x00") { return nil, errors.New("http: invalid character in file path") } @@ -67,6 +67,10 @@ func (h HttpFs) Chmod(name string, mode os.FileMode) error { return h.source.Chmod(name, mode) } +func (h HttpFs) Chown(name string, uid, gid int) error { + return h.source.Chown(name, uid, gid) +} + func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { return h.source.Chtimes(name, atime, mtime) } diff --git a/vendor/github.com/spf13/afero/internal/common/adapters.go b/vendor/github.com/spf13/afero/internal/common/adapters.go new file mode 100644 index 0000000000..60685caa54 --- /dev/null +++ b/vendor/github.com/spf13/afero/internal/common/adapters.go @@ -0,0 +1,27 @@ +// Copyright © 2022 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import "io/fs" + +// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry +type FileInfoDirEntry struct { + fs.FileInfo +} + +var _ fs.DirEntry = FileInfoDirEntry{} + +func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go new file mode 100644 index 0000000000..938b9316e6 --- /dev/null +++ b/vendor/github.com/spf13/afero/iofs.go @@ -0,0 +1,298 @@ +//go:build go1.16 +// +build go1.16 + +package afero + +import ( + "io" + "io/fs" + "os" + "path" + "sort" + "time" + + "github.com/spf13/afero/internal/common" +) + +// IOFS adopts afero.Fs to stdlib io/fs.FS +type IOFS struct { + Fs +} + +func NewIOFS(fs Fs) IOFS { + return IOFS{Fs: fs} +} + +var ( + _ fs.FS = IOFS{} + _ fs.GlobFS = IOFS{} + _ fs.ReadDirFS = IOFS{} + _ fs.ReadFileFS = IOFS{} + _ fs.StatFS = IOFS{} + _ fs.SubFS = IOFS{} +) + +func (iofs IOFS) Open(name string) (fs.File, error) { + const op = "open" + + // by convention for fs.FS implementations we should perform this check + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + file, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + // file should implement fs.ReadDirFile + if _, ok := file.(fs.ReadDirFile); !ok { + file = readDirFile{file} + } + + return file, nil +} + +func (iofs IOFS) Glob(pattern string) ([]string, error) { + const op = "glob" + + // afero.Glob does not perform this check but it's required for implementations + if _, err := path.Match(pattern, ""); err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + items, err := Glob(iofs.Fs, pattern) + if err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + return items, nil +} + +func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { + f, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + + defer f.Close() + + if rdf, ok := f.(fs.ReadDirFile); ok { + items, err := rdf.ReadDir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) + return items, nil + } + + items, err := f.Readdir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Sort(byName(items)) + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +func (iofs IOFS) ReadFile(name string) ([]byte, error) { + const op = "readfile" + + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + bytes, err := ReadFile(iofs.Fs, name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + return bytes, nil +} + +func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } + +func (IOFS) wrapError(op, path string, err error) error { + if _, ok := err.(*fs.PathError); ok { + return err // don't need to wrap again + } + + return &fs.PathError{ + Op: op, + Path: path, + Err: err, + } +} + +// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open +type readDirFile struct { + File +} + +var _ fs.ReadDirFile = readDirFile{} + +func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + items, err := r.File.Readdir(n) + if err != nil { + return nil, err + } + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +// FromIOFS adopts io/fs.FS to use it as afero.Fs +// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission +// To store modifications you may use afero.CopyOnWriteFs +type FromIOFS struct { + fs.FS +} + +var _ Fs = FromIOFS{} + +func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } + +func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } + +func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { + return notImplemented("mkdirall", path) +} + +func (f FromIOFS) Open(name string) (File, error) { + file, err := f.FS.Open(name) + if err != nil { + return nil, err + } + + return fromIOFSFile{File: file, name: name}, nil +} + +func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return f.Open(name) +} + +func (f FromIOFS) Remove(name string) error { + return notImplemented("remove", name) +} + +func (f FromIOFS) RemoveAll(path string) error { + return notImplemented("removeall", path) +} + +func (f FromIOFS) Rename(oldname, newname string) error { + return notImplemented("rename", oldname) +} + +func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } + +func (f FromIOFS) Name() string { return "fromiofs" } + +func (f FromIOFS) Chmod(name string, mode os.FileMode) error { + return notImplemented("chmod", name) +} + +func (f FromIOFS) Chown(name string, uid, gid int) error { + return notImplemented("chown", name) +} + +func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { + return notImplemented("chtimes", name) +} + +type fromIOFSFile struct { + fs.File + name string +} + +func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { + readerAt, ok := f.File.(io.ReaderAt) + if !ok { + return -1, notImplemented("readat", f.name) + } + + return readerAt.ReadAt(p, off) +} + +func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { + seeker, ok := f.File.(io.Seeker) + if !ok { + return -1, notImplemented("seek", f.name) + } + + return seeker.Seek(offset, whence) +} + +func (f fromIOFSFile) Write(p []byte) (n int, err error) { + return -1, notImplemented("write", f.name) +} + +func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { + return -1, notImplemented("writeat", f.name) +} + +func (f fromIOFSFile) Name() string { return f.name } + +func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(count) + if err != nil { + return nil, err + } + + ret := make([]os.FileInfo, len(entries)) + for i := range entries { + ret[i], err = entries[i].Info() + + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(n) + if err != nil { + return nil, err + } + + ret := make([]string, len(entries)) + for i := range entries { + ret[i] = entries[i].Name() + } + + return ret, nil +} + +func (f fromIOFSFile) Sync() error { return nil } + +func (f fromIOFSFile) Truncate(size int64) error { + return notImplemented("truncate", f.name) +} + +func (f fromIOFSFile) WriteString(s string) (ret int, err error) { + return -1, notImplemented("writestring", f.name) +} + +func notImplemented(op, path string) error { + return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index a403133e27..fa6abe1eee 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -141,8 +141,10 @@ func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in // TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex +var ( + randNum uint32 + randmu sync.Mutex +) func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -150,12 +152,12 @@ func reseed() uint32 { func nextRandom() string { randmu.Lock() - r := rand + r := randNum if r == 0 { r = reseed() } r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r + randNum = r randmu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } @@ -190,11 +192,11 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue @@ -214,6 +216,7 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { func (a Afero) TempDir(dir, prefix string) (name string, err error) { return TempDir(a.Fs, dir, prefix) } + func TempDir(fs Fs, dir, prefix string) (name string, err error) { if dir == "" { dir = os.TempDir() @@ -222,11 +225,11 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { nconflict := 0 for i := 0; i < 10000; i++ { try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0700) + err = fs.Mkdir(try, 0o700) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 07b2e12ae5..62fe4498e1 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -18,16 +18,20 @@ import ( "bytes" "errors" "io" + "io/fs" "os" "path/filepath" "sync" "sync/atomic" -) + "time" -import "time" + "github.com/spf13/afero/internal/common" +) const FilePathSeparator = string(filepath.Separator) +var _ fs.ReadDirFile = &File{} + type File struct { // atomic requires 64-bit alignment for struct field access at int64 @@ -57,6 +61,8 @@ type FileData struct { dir bool mode os.FileMode modtime time.Time + uid int + gid int } func (d *FileData) Name() string { @@ -70,7 +76,7 @@ func CreateFile(name string) *FileData { } func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} + return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} } func ChangeFileName(f *FileData, newname string) { @@ -95,6 +101,18 @@ func setModTime(f *FileData, mtime time.Time) { f.modtime = mtime } +func SetUID(f *FileData, uid int) { + f.Lock() + f.uid = uid + f.Unlock() +} + +func SetGID(f *FileData, gid int) { + f.Lock() + f.gid = gid + f.Unlock() +} + func GetFileInfo(f *FileData) *FileInfo { return &FileInfo{f} } @@ -170,10 +188,23 @@ func (f *File) Readdirnames(n int) (names []string, err error) { return names, err } +// Implements fs.ReadDirFile +func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { + fi, err := f.Readdir(n) + if err != nil { + return nil, err + } + di := make([]fs.DirEntry, len(fi)) + for i, f := range fi { + di[i] = common.FileInfoDirEntry{FileInfo: f} + } + return di, nil +} + func (f *File) Read(b []byte) (n int, err error) { f.fileData.Lock() defer f.fileData.Unlock() - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if len(b) > 0 && int(f.at) == len(f.fileData.data) { @@ -201,7 +232,7 @@ func (f *File) ReadAt(b []byte, off int64) (n int, err error) { } func (f *File) Truncate(size int64) error { - if f.closed == true { + if f.closed { return ErrFileClosed } if f.readOnly { @@ -210,9 +241,11 @@ func (f *File) Truncate(size int64) error { if size < 0 { return ErrOutOfRange } + f.fileData.Lock() + defer f.fileData.Unlock() if size > int64(len(f.fileData.data)) { diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) } else { f.fileData.data = f.fileData.data[0:size] } @@ -221,7 +254,7 @@ func (f *File) Truncate(size int64) error { } func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } switch whence { @@ -236,7 +269,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { } func (f *File) Write(b []byte) (n int, err error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if f.readOnly { @@ -252,7 +285,7 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) @@ -288,16 +321,19 @@ func (s *FileInfo) Name() string { s.Unlock() return name } + func (s *FileInfo) Mode() os.FileMode { s.Lock() defer s.Unlock() return s.mode } + func (s *FileInfo) ModTime() time.Time { s.Lock() defer s.Unlock() return s.modtime } + func (s *FileInfo) IsDir() bool { s.Lock() defer s.Unlock() @@ -315,8 +351,8 @@ func (s *FileInfo) Size() int64 { var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index 0fa9592499..d6c744e8d5 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -15,9 +15,13 @@ package afero import ( "fmt" + "io" + "log" "os" "path/filepath" + + "sort" "strings" "sync" "time" @@ -43,7 +47,7 @@ func (m *MemMapFs) getData() map[string]*mem.FileData { // Root should always exist, right? // TODO: what about windows? root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0755) + mem.SetMode(root, os.ModeDir|0o755) m.data[FilePathSeparator] = root }) return m.data @@ -87,6 +91,24 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { return pfile } +func (m *MemMapFs) findDescendants(name string) []*mem.FileData { + fData := m.getData() + descendants := make([]*mem.FileData, 0, len(fData)) + for p, dFile := range fData { + if strings.HasPrefix(p, name+FilePathSeparator) { + descendants = append(descendants, dFile) + } + } + + sort.Slice(descendants, func(i, j int) bool { + cur := len(strings.Split(descendants[i].Name(), FilePathSeparator)) + next := len(strings.Split(descendants[j].Name(), FilePathSeparator)) + return cur < next + }) + + return descendants +} + func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { if f == nil { return @@ -96,12 +118,12 @@ func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { pdir := filepath.Dir(filepath.Clean(f.Name())) err := m.lockfreeMkdir(pdir, perm) if err != nil { - //log.Println("Mkdir error:", err) + // log.Println("Mkdir error:", err) return } parent, err = m.lockfreeOpen(pdir) if err != nil { - //log.Println("Open after Mkdir error:", err) + // log.Println("Open after Mkdir error:", err) return } } @@ -142,6 +164,11 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { } m.mu.Lock() + // Dobule check that it doesn't exist. + if _, ok := m.getData()[name]; ok { + m.mu.Unlock() + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } item := mem.CreateDir(name) mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item @@ -232,7 +259,7 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) } if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) + _, err = file.Seek(0, io.SeekEnd) if err != nil { file.Close() return nil, err @@ -279,7 +306,7 @@ func (m *MemMapFs) RemoveAll(path string) error { defer m.mu.RUnlock() for p := range m.getData() { - if strings.HasPrefix(p, path) { + if p == path || strings.HasPrefix(p, path+FilePathSeparator) { m.mu.RUnlock() m.mu.Lock() delete(m.getData(), p) @@ -303,11 +330,22 @@ func (m *MemMapFs) Rename(oldname, newname string) error { if _, ok := m.getData()[oldname]; ok { m.mu.RUnlock() m.mu.Lock() - m.unRegisterWithParent(oldname) + err := m.unRegisterWithParent(oldname) + if err != nil { + return err + } + fileData := m.getData()[oldname] - delete(m.getData(), oldname) mem.ChangeFileName(fileData, newname) m.getData()[newname] = fileData + + err = m.renameDescendants(oldname, newname) + if err != nil { + return err + } + + delete(m.getData(), oldname) + m.registerWithParent(fileData, 0) m.mu.Unlock() m.mu.RLock() @@ -317,6 +355,29 @@ func (m *MemMapFs) Rename(oldname, newname string) error { return nil } +func (m *MemMapFs) renameDescendants(oldname, newname string) error { + descendants := m.findDescendants(oldname) + removes := make([]string, 0, len(descendants)) + for _, desc := range descendants { + descNewName := strings.Replace(desc.Name(), oldname, newname, 1) + err := m.unRegisterWithParent(desc.Name()) + if err != nil { + return err + } + + removes = append(removes, desc.Name()) + mem.ChangeFileName(desc, descNewName) + m.getData()[descNewName] = desc + + m.registerWithParent(desc, 0) + } + for _, r := range removes { + delete(m.getData(), r) + } + + return nil +} + func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { fileInfo, err := m.Stat(name) return fileInfo, false, err @@ -363,6 +424,22 @@ func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { return nil } +func (m *MemMapFs) Chown(name string, uid, gid int) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} + } + + mem.SetUID(f, uid) + mem.SetGID(f, gid) + + return nil +} + func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { name = normalizePath(name) @@ -386,9 +463,3 @@ func (m *MemMapFs) List() { fmt.Println(x.Name(), y.Size()) } } - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go index 4761db5d72..f1366321ec 100644 --- a/vendor/github.com/spf13/afero/os.go +++ b/vendor/github.com/spf13/afero/os.go @@ -91,6 +91,10 @@ func (OsFs) Chmod(name string, mode os.FileMode) error { return os.Chmod(name, mode) } +func (OsFs) Chown(name string, uid, gid int) error { + return os.Chown(name, uid, gid) +} + func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { return os.Chtimes(name, atime, mtime) } diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go index f94b181b6c..bd8f9264dd 100644 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -28,6 +28,10 @@ func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { return syscall.EPERM } +func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { + return syscall.EPERM +} + func (r *ReadOnlyFs) Name() string { return "ReadOnlyFilter" } diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go index c8fc008670..218f3b235b 100644 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -10,7 +10,6 @@ import ( // The RegexpFs filters files (not directories) by regular expression. Only // files matching the given regexp will be allowed, all others get a ENOENT error ( // "No such file or directory"). -// type RegexpFs struct { re *regexp.Regexp source Fs @@ -60,6 +59,13 @@ func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { return r.source.Chmod(name, mode) } +func (r *RegexpFs) Chown(name string, uid, gid int) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chown(name, uid, gid) +} + func (r *RegexpFs) Name() string { return "RegexpFs" } diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go index d1c6ea53d9..aa6ae125b6 100644 --- a/vendor/github.com/spf13/afero/symlink.go +++ b/vendor/github.com/spf13/afero/symlink.go @@ -21,9 +21,9 @@ import ( // filesystems saying so. // It indicates support for 3 symlink related interfaces that implement the // behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink +// - Lstat +// - Symlink, and +// - Readlink type Symlinker interface { Lstater Linker diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 985363eea7..62dd6c93c8 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -47,7 +47,7 @@ func (f *UnionFile) Read(s []byte) (int, error) { if (err == nil || err == io.EOF) && f.Base != nil { // advance the file position also in the base file, the next // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil { // only overwrite err in case the seek fails: we need to // report an eventual io.EOF to the caller err = seekErr @@ -65,7 +65,7 @@ func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { if f.Layer != nil { n, err := f.Layer.ReadAt(s, o) if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + _, err = f.Base.Seek(o+int64(n), io.SeekStart) } return n, err } @@ -130,7 +130,7 @@ func (f *UnionFile) Name() string { type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) + files := make(map[string]os.FileInfo) for _, fi := range lofi { files[fi.Name()] = fi @@ -151,7 +151,6 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err } return rfi, nil - } // Readdir will weave the two directories together and @@ -268,20 +267,14 @@ func (f *UnionFile) WriteString(s string) (n int, err error) { return 0, BADFD } -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - +func copyFile(base Fs, layer Fs, name string, bfh File) error { // First make sure the directory exists exists, err := Exists(layer, filepath.Dir(name)) if err != nil { return err } if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? if err != nil { return err } @@ -315,3 +308,23 @@ func copyToLayer(base Fs, layer Fs, name string) error { } return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) } + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} + +func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { + bfh, err := base.OpenFile(name, flag, perm) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 4f253f481e..9e4cba2746 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -25,6 +25,7 @@ import ( "strings" "unicode" + "golang.org/x/text/runes" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" ) @@ -42,7 +43,7 @@ func WriteReader(fs Fs, path string, r io.Reader) (err error) { ospath := filepath.FromSlash(dir) if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r if err != nil { if err != os.ErrExist { return err @@ -70,7 +71,7 @@ func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { ospath := filepath.FromSlash(dir) if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r if err != nil { return } @@ -123,7 +124,7 @@ func GetTempDir(fs Fs, subPath string) string { return addSlash(dir) } - err := fs.MkdirAll(dir, 0777) + err := fs.MkdirAll(dir, 0o777) if err != nil { panic(err) } @@ -158,16 +159,12 @@ func UnicodeSanitize(s string) string { // Transform characters with accents into plain forms. func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) result, _, _ := transform.String(t, string(s)) return result } -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { return FileContainsBytes(a.Fs, filename, subslice) } @@ -200,7 +197,6 @@ func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, err // readerContains reports whether any of the subslices is within r. func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - if r == nil || len(subslices) == 0 { return false } @@ -299,6 +295,9 @@ func IsEmpty(fs Fs, path string) (bool, error) { } defer f.Close() list, err := f.Readdir(-1) + if err != nil { + return false, err + } return len(list) == 0, nil } return fi.Size() == 0, nil diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 439d3e1de4..2578d94b5e 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -1,4 +1,4 @@ -# Copyright 2013-2022 The Cobra Authors +# Copyright 2013-2023 The Cobra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index c433a01bce..0da8d7aa08 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -5,10 +5,6 @@ ifeq (, $(shell which golangci-lint)) $(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") endif -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest") -endif - .PHONY: fmt lint test install_deps clean default: all @@ -25,6 +21,10 @@ lint: test: install_deps $(info ******************** running tests ********************) + go test -v ./... + +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) richgo test -v ./... install_deps: diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 7cc726beb4..592c0b8ab0 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,4 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) +![cobra logo](assets/CobraMain.png) Cobra is a library for creating powerful modern CLI applications. @@ -6,7 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 95e03aecb6..2d0239437a 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 2c1f99e787..e79ec33a81 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( type PositionalArgs func(cmd *Command, args []string) error -// Legacy arg validation has the following behaviour: +// legacyArgs validation has the following behaviour: // - root commands with no subcommands can take arbitrary arguments // - root commands with subcommands will do subcommand validity checking // - subcommands will always accept arbitrary arguments diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 3acdb27974..10c78847de 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -532,7 +532,7 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { } } -// Setup annotations for go completions for registered flags +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { flagCompletionMutex.RLock() defer flagCompletionMutex.RUnlock() diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index bb4b71892c..19b09560c1 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ func genBashComp(buf io.StringWriter, name string, includeDesc bool) { __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -65,7 +65,7 @@ __%[1]s_get_completion_results() { lastChar=${lastParam:$((${#lastParam}-1)):1} __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + if [[ -z ${cur} && ${lastChar} != = ]]; then # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" @@ -75,7 +75,7 @@ __%[1]s_get_completion_results() { # When completing a flag with an = (e.g., %[1]s -n=) # bash focuses on the part after the =, so we need to remove # the flag part from $cur - if [[ "${cur}" == -*=* ]]; then + if [[ ${cur} == -*=* ]]; then cur="${cur#*=}" fi @@ -87,7 +87,7 @@ __%[1]s_get_completion_results() { directive=${out##*:} # Remove the directive out=${out%%:*} - if [ "${directive}" = "${out}" ]; then + if [[ ${directive} == "${out}" ]]; then # There is not directive specified directive=0 fi @@ -101,22 +101,36 @@ __%[1]s_process_completion_results() { local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + if (((directive & shellCompDirectiveError) != 0)); then # Error code. No completion. __%[1]s_debug "Received error from custom completion go code" return else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no space" compopt -o nospace else __%[1]s_debug "No space directive not supported in this version of bash" fi fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no file completion" compopt +o default else @@ -130,7 +144,7 @@ __%[1]s_process_completion_results() { local activeHelp=() __%[1]s_extract_activeHelp - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering local fullFilter filter filteringCmd @@ -143,13 +157,12 @@ __%[1]s_process_completion_results() { filteringCmd="_filedir $fullFilter" __%[1]s_debug "File filtering command: $filteringCmd" $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then # File completion for directories only - # Use printf to strip any trailing newline local subdir - subdir=$(printf "%%s" "${completions[0]}") - if [ -n "$subdir" ]; then + subdir=${completions[0]} + if [[ -n $subdir ]]; then __%[1]s_debug "Listing directories in $subdir" pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return else @@ -164,7 +177,7 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish - if [ ${#activeHelp[*]} -ne 0 ]; then + if ((${#activeHelp[*]} != 0)); then printf "\n"; printf "%%s\n" "${activeHelp[@]}" printf "\n" @@ -184,21 +197,21 @@ __%[1]s_process_completion_results() { # Separate activeHelp lines from real completions. # Fills the $activeHelp and $completions arrays. __%[1]s_extract_activeHelp() { - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do - if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" - if [ -n "$comp" ]; then + if [[ -n $comp ]]; then activeHelp+=("$comp") fi else # Not an activeHelp line but a normal completion completions+=("$comp") fi - done < <(printf "%%s\n" "${out}") + done <<<"${out}" } __%[1]s_handle_completion_types() { @@ -254,7 +267,7 @@ __%[1]s_handle_standard_completion_case() { done < <(printf "%%s\n" "${completions[@]}") # If there is a single completion left, remove the description text - if [ ${#COMPREPLY[*]} -eq 1 ]; then + if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" comp="${COMPREPLY[0]%%%%$tab*}" __%[1]s_debug "Removed description from single completion, which is now: ${comp}" @@ -271,8 +284,8 @@ __%[1]s_handle_special_char() if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then local word=${comp%%"${comp##*${char}}"} local idx=${#COMPREPLY[*]} - while [[ $((--idx)) -ge 0 ]]; do - COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} done fi } @@ -298,7 +311,7 @@ __%[1]s_format_comp_descriptions() # Make sure we can fit a description of at least 8 characters # if we are to align the descriptions. - if [[ $maxdesclength -gt 8 ]]; then + if ((maxdesclength > 8)); then # Add the proper number of spaces to align the descriptions for ((i = ${#comp} ; i < longest ; i++)); do comp+=" " @@ -310,8 +323,8 @@ __%[1]s_format_comp_descriptions() # If there is enough space for any description text, # truncate the descriptions that are too long for the shell width - if [ $maxdesclength -gt 0 ]; then - if [ ${#desc} -gt $maxdesclength ]; then + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then desc=${desc:0:$(( maxdesclength - 1 ))} desc+="…" fi @@ -332,9 +345,9 @@ __start_%[1]s() # Call _init_completion from the bash-completion package # to prepare the arguments properly if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n "=:" || return + _init_completion -n =: || return else - __%[1]s_init_completion -n "=:" || return + __%[1]s_init_completion -n =: || return fi __%[1]s_debug @@ -361,7 +374,7 @@ fi # ex: ts=4 sw=4 et filetype=sh `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index fe44bc8a07..b07b44a0ce 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,8 +167,8 @@ func appendIfNotPresent(s, stringToAppend string) string { // rpad adds padding to the right of a string. func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) } // tmpl executes the given template text on data, writing the result to w. diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 9d5e9cf5eb..01f7c6f1c5 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist -// Structure to manage groups for commands +// Group Structure to manage groups for commands type Group struct { ID string Title string @@ -47,7 +47,7 @@ type Group struct { // definition to ensure usability. type Command struct { // Use is the one-line usage message. - // Recommended syntax is as follow: + // Recommended syntax is as follows: // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. // ... indicates that you can specify multiple values for the previous argument. // | indicates mutually exclusive information. You can use the argument to the left of the separator or the @@ -321,7 +321,7 @@ func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } -// SetHelpCommandGroup sets the group id of the help command. +// SetHelpCommandGroupID sets the group id of the help command. func (c *Command) SetHelpCommandGroupID(groupID string) { if c.helpCommand != nil { c.helpCommand.GroupID = groupID @@ -330,7 +330,7 @@ func (c *Command) SetHelpCommandGroupID(groupID string) { c.helpCommandGroupID = groupID } -// SetCompletionCommandGroup sets the group id of the completion command. +// SetCompletionCommandGroupID sets the group id of the completion command. func (c *Command) SetCompletionCommandGroupID(groupID string) { // completionCommandGroupID is used if no completion command is defined by the user c.Root().completionCommandGroupID = groupID @@ -655,20 +655,44 @@ Loop: // argsMinusFirstX removes only the first x from args. Otherwise, commands that look like // openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := []string{} + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } } } return args } func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || + return ((len(arg) >= 3 && arg[0:2] == "--") || (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) } @@ -686,7 +710,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) { cmd := c.findNext(nextSubCmd) if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) } return c, innerArgs } @@ -998,6 +1022,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize completion at the last point to allow for user overriding c.InitDefaultCompletionCmd() + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 @@ -1092,6 +1120,19 @@ func (c *Command) ValidateRequiredFlags() error { return nil } +// checkCommandGroups checks if a command has been added to a group that does not exists. +// If so, we panic because it indicates a coding error that should be corrected. +func (c *Command) checkCommandGroups() { + for _, sub := range c.commands { + // if Group is not defined let the developer know right away + if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { + panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) + } + + sub.checkCommandGroups() + } +} + // InitDefaultHelpFlag adds default help flag to c. // It is called automatically by executing the c or by calling help and usage. // If c already has help flag, it will do nothing. @@ -1218,10 +1259,6 @@ func (c *Command) AddCommand(cmds ...*Command) { panic("Command can't be a child of itself") } cmds[i].parent = c - // if Group is not defined let the developer know right away - if x.GroupID != "" && !c.ContainsGroup(x.GroupID) { - panic(fmt.Sprintf("Group id '%s' is not defined for subcommand '%s'", x.GroupID, cmds[i].CommandPath())) - } // update max lengths usageLen := len(x.Use) if usageLen > c.commandsMaxUseLen { @@ -1259,7 +1296,7 @@ func (c *Command) AllChildCommandsHaveGroup() bool { return true } -// ContainGroups return if groupID exists in the list of command groups. +// ContainsGroup return if groupID exists in the list of command groups. func (c *Command) ContainsGroup(groupID string) bool { for _, x := range c.commandgroups { if x.ID == groupID { diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 2b77f8f019..307f0c127f 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 520f23abf0..adbef395c2 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index e8a0206db1..ee38c4d0b8 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,10 @@ const ( // obtain the same behavior but only for flags. ShellCompDirectiveFilterDirs + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + // =========================================================================== // All directives using iota should be above this one. @@ -159,6 +163,9 @@ func (d ShellCompDirective) string() string { if d&ShellCompDirectiveFilterDirs != 0 { directives = append(directives, "ShellCompDirectiveFilterDirs") } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } if len(directives) == 0 { directives = append(directives, "ShellCompDirectiveDefault") } @@ -169,7 +176,7 @@ func (d ShellCompDirective) string() string { return strings.Join(directives, ", ") } -// Adds a special hidden command that can be used to request custom completions. +// initCompleteCmd adds a special hidden command that can be used to request custom completions. func (c *Command) initCompleteCmd(args []string) { completeCmd := &Command{ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), @@ -727,7 +734,7 @@ to enable it. You can execute the following once: To load completions in your current shell session: - source <(%[1]s completion zsh); compdef _%[1]s %[1]s + source <(%[1]s completion zsh) To load completions for every new session, execute once: diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 97112a17b2..12ca0d2b11 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ function __%[1]s_perform_completion __%[1]s_debug "last arg: $lastArg" # Disable ActiveHelp which is not supported for fish shell - set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" __%[1]s_debug "Calling $requestComp" set -l results (eval $requestComp 2> /dev/null) @@ -89,6 +89,60 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" + + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 + end + + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" + return 1 + end + + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + # This function does two things: # - Obtain the completions and store them in the global __%[1]s_comp_results # - Return false if file completion should be performed @@ -99,17 +153,17 @@ function __%[1]s_prepare_completions # Start fresh set --erase __%[1]s_comp_results - set -l results (__%[1]s_perform_completion) - __%[1]s_debug "Completion results: $results" + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" - if test -z "$results" + if test -z "$__%[1]s_perform_completion_once_result" __%[1]s_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps return 1 end - set -l directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" @@ -205,13 +259,17 @@ end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' # The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results # which provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } // GenFishCompletion generates fish completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 9c377aaf9c..b35fde1554 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 004de42e41..177d2755f2 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,7 @@ filter __%[1]s_escapeStringWithSpecialChars { $ShellCompDirectiveNoFileComp=%[6]d $ShellCompDirectiveFilterFileExt=%[7]d $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. @@ -106,13 +107,22 @@ filter __%[1]s_escapeStringWithSpecialChars { # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } } __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[9]s=0 + $env:%[10]s=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -137,7 +147,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } $Longest = 0 - $Values = $Out | ForEach-Object { + [Array]$Values = $Out | ForEach-Object { #Split the output in name and description `+" $Name, $Description = $_.Split(\"`t\",2)"+` __%[1]s_debug "Name: $Name Description: $Description" @@ -182,6 +192,11 @@ filter __%[1]s_escapeStringWithSpecialChars { } } + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { __%[1]s_debug "ShellCompDirectiveNoFileComp is called" @@ -267,7 +282,7 @@ filter __%[1]s_escapeStringWithSpecialChars { Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index 6865f88e79..8a291eb20e 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -1,11 +1,13 @@ ## Projects using Cobra - [Allero](https://github.com/allero-io/allero) +- [Arewefastyet](https://benchmark.vitess.io) - [Arduino CLI](https://github.com/arduino/arduino-cli) - [Bleve](https://blevesearch.com/) - [Cilium](https://cilium.io/) - [CloudQuery](https://github.com/cloudquery/cloudquery) - [CockroachDB](https://www.cockroachlabs.com/) +- [Constellation](https://github.com/edgelesssys/constellation) - [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) - [Datree](https://github.com/datreeio/datree) - [Delve](https://github.com/derekparker/delve) @@ -25,7 +27,7 @@ - [Istio](https://istio.io) - [Kool](https://github.com/kool-dev/kool) - [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/armosec/kubescape) +- [Kubescape](https://github.com/kubescape/kubescape) - [KubeVirt](https://github.com/kubevirt/kubevirt) - [Linkerd](https://linkerd.io/) - [Mattermost-server](https://github.com/mattermost/mattermost-server) @@ -51,10 +53,12 @@ - [Random](https://github.com/erdaltsksn/random) - [Rclone](https://rclone.org/) - [Scaleway CLI](https://github.com/scaleway/scaleway-cli) +- [Sia](https://github.com/SiaFoundation/siad) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) - [Twitch CLI](https://github.com/twitchdev/twitch-cli) - [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- [Vitess](https://vitess.io) - VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) - [Werf](https://werf.io/) - [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go index 126e83c307..b035742d39 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index 553ee5df8a..065c0621d4 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -71,7 +71,7 @@ PowerShell: `,cmd.Root().Name()), DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": @@ -162,16 +162,7 @@ cmd := &cobra.Command{ } ``` -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. +The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. ### Dynamic completion of nouns @@ -237,6 +228,10 @@ ShellCompDirectiveFilterFileExt // return []string{"themes"}, ShellCompDirectiveFilterDirs // ShellCompDirectiveFilterDirs + +// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order +// in which the completions are provided +ShellCompDirectiveKeepOrder ``` ***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. @@ -385,6 +380,19 @@ or ```go ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} ``` + +If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: + +```bash +$ source <(helm completion bash) +$ helm completion [tab][tab] +bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) +fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) + +$ source <(helm completion bash --no-descriptions) +$ helm completion [tab][tab] +bash fish powershell zsh +``` ## Bash completions ### Dependencies diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md index 977306aa8c..85201d840c 100644 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -188,6 +188,37 @@ var versionCmd = &cobra.Command{ } ``` +### Organizing subcommands + +A command may have subcommands which in turn may have other subcommands. This is achieved by using +`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in +its own go package. + +The suggested approach is for the parent command to use `AddCommand` to add its most immediate +subcommands. For example, consider the following directory structure: + +```text +├── cmd +│   ├── root.go +│   └── sub1 +│   ├── sub1.go +│   └── sub2 +│   ├── leafA.go +│   ├── leafB.go +│   └── sub2.go +└── main.go +``` + +In this case: + +* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. +* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. +* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the + sub2 command. + +This approach ensures the subcommands are always included at compile time while avoiding cyclic +references. + ### Returning and handling errors If you wish to return an error to the caller of a command, `RunE` can be used. @@ -313,8 +344,8 @@ rootCmd.MarkFlagsRequiredTogether("username", "password") You can also prevent different flags from being provided together if they represent mutually exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: ```go -rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML") +rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") +rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") ``` @@ -349,7 +380,7 @@ shown below: ```go var cmd = &cobra.Command{ Short: "hello", - Args: MatchAll(ExactArgs(2), OnlyValidArgs), + Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { fmt.Println("Hello, World!") }, @@ -492,10 +523,11 @@ around it. In fact, you can provide your own if you want. ### Grouping commands in help -Cobra supports grouping of available commands. Groups must be explicitly defined by `AddGroup` and set by -the `GroupId` element of a subcommand. The groups will appear in the same order as they are defined. -If you use the generated `help` or `completion` commands, you can set the group ids by `SetHelpCommandGroupId` -and `SetCompletionCommandGroupId`, respectively. +Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly +defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element +of that subcommand. The groups will appear in the help output in the same order as they are defined using different +calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using +`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. ### Defining your own help diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 84cec76fde..1856e4c7f6 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -90,6 +90,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) { compCmd = ShellCompNoDescRequestCmd } WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- @@ -108,8 +109,9 @@ _%[1]s() local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -177,7 +179,7 @@ _%[1]s() return fi - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} local startIndex=$((${#activeHelpMarker}+1)) local hasActiveHelp=0 @@ -227,6 +229,11 @@ _%[1]s() noSpace="-S ''" fi + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -262,7 +269,7 @@ _%[1]s() return $result else __%[1]s_debug "Calling _describe" - if eval _describe "completions" completions $flagPrefix $noSpace; then + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then __%[1]s_debug "_describe found some completions" # Return the success of having called _describe @@ -296,6 +303,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig index 63afcbcdd4..6d0b6d356b 100644 --- a/vendor/github.com/spf13/viper/.editorconfig +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -11,5 +11,5 @@ trim_trailing_whitespace = true [*.go] indent_style = tab -[{Makefile, *.mk}] +[{Makefile,*.mk}] indent_style = tab diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml new file mode 100644 index 0000000000..16e039652e --- /dev/null +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -0,0 +1,96 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/viper) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/spf13/viper + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - dupl + - durationcheck + - exhaustive + - exportloopref + - gci + - gofmt + - gofumpt + - goimports + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + - wastedassign + - whitespace + + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocritic + # - gocyclo + # - godot + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck + + # unused + # - depguard + # - goheader + # - gomodguard + + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml deleted file mode 100644 index a0755ce7e1..0000000000 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - golint: - min-confidence: 0.1 - goimports: - local-prefixes: github.com/spf13/viper - -linters: - enable-all: true - disable: - - funlen - - maligned - - # TODO: fix me - - wsl - - gochecknoinits - - gosimple - - gochecknoglobals - - errcheck - - lll - - godox - - scopelint - - gocyclo - - gocognit - - gocritic - -service: - golangci-lint-version: 1.21.x diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index 1c2cab03f4..02d3e3715a 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -15,8 +15,8 @@ TEST_FORMAT = short-verbose endif # Dependency versions -GOTESTSUM_VERSION = 0.4.0 -GOLANGCI_VERSION = 1.21.0 +GOTESTSUM_VERSION = 1.8.0 +GOLANGCI_VERSION = 1.45.2 # Add the ability to override some variables # Use with care @@ -49,7 +49,7 @@ bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint-${GOLANGCI_VERSION}: @mkdir -p bin curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} - @mv bin/golangci-lint $@ + @mv bin/golangci-lint "$@" .PHONY: lint lint: bin/golangci-lint ## Run linter diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index dfd8034fd5..c14e8927a1 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -1,11 +1,18 @@ +> ## Viper v2 feedback +> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 +> +> **Thank you!** + ![Viper](.github/logo.png?raw=true) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) +[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/viper) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -24,10 +31,12 @@ Many Go projects are built using Viper including: ## Install -```console +```shell go get github.com/spf13/viper ``` +**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. + ## What is Viper? @@ -110,7 +119,7 @@ viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %s \n", err)) + panic(fmt.Errorf("Fatal error config file: %w \n", err)) } ``` @@ -118,11 +127,11 @@ You can handle the specific case where no config file is found like this: ```go if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + } else { + // Config file was found but another error was produced + } } // Config file found and successfully parsed @@ -166,10 +175,10 @@ Optionally you can provide a function for Viper to run each time a change occurs **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go -viper.WatchConfig() viper.OnConfigChange(func(e fsnotify.Event) { fmt.Println("Config file changed:", e.Name) }) +viper.WatchConfig() ``` ### Reading Config from io.Reader @@ -245,9 +254,10 @@ using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from the environment variables. Both `BindEnv` and `AutomaticEnv` will use this prefix. -`BindEnv` takes one or two parameters. The first parameter is the key name, the -second is the name of the environment variable. The name of the environment -variable is case sensitive. If the ENV variable name is not provided, then +`BindEnv` takes one or more parameters. The first parameter is the key name, the +rest are the name of the environment variables to bind to this key. If more than +one are provided, they will take precedence in the specified order. The name of +the environment variable is case sensitive. If the ENV variable name is not provided, then Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), it **does not** automatically add the prefix. For example if the second parameter is "id", Viper will look for the ENV variable "ID". @@ -259,7 +269,7 @@ the `BindEnv` is called. `AutomaticEnv` is a powerful helper especially when combined with `SetEnvPrefix`. When called, Viper will check for an environment variable any time a `viper.Get` request is made. It will apply the following rules. It will -check for a environment variable with a name matching the key uppercased and +check for an environment variable with a name matching the key uppercased and prefixed with the `EnvPrefix` if set. `SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env @@ -344,7 +354,7 @@ func main() { i := viper.GetInt("flagname") // retrieve value from viper - ... + // ... } ``` @@ -493,18 +503,18 @@ runtime_viper.Unmarshal(&runtime_conf) // open a goroutine to watch remote changes forever go func(){ for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) + time.Sleep(time.Second * 5) // delay after each request + + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } + + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) } }() ``` @@ -536,7 +546,7 @@ Example: ```go viper.GetString("logfile") // case-insensitive Setting & Getting if viper.GetBool("verbose") { - fmt.Println("verbose enabled") + fmt.Println("verbose enabled") } ``` ### Accessing nested keys @@ -582,6 +592,33 @@ the `Set()` method, …) with an immediate value, then all sub-keys of `datastore.metric` become undefined, they are “shadowed” by the higher-priority configuration level. +Viper can access array indices by using numbers in the path. For example: + +```json +{ + "host": { + "address": "localhost", + "ports": [ + 5799, + 6029 + ] + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +GetInt("host.ports.1") // returns 6029 + +``` + Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. @@ -607,14 +644,15 @@ will be returned instead. E.g. GetString("datastore.metric.host") // returns "0.0.0.0" ``` -### Extract sub-tree +### Extracting a sub-tree -Extract sub-tree from Viper. +When developing reusable modules, it's often useful to extract a subset of the configuration +and pass it to a module. This way the module can be instantiated more than once, with different configurations. -For example, `viper` represents: +For example, an application might use multiple different cache stores for different purposes: -```json -app: +```yaml +cache: cache1: max-items: 100 item-size: 64 @@ -623,35 +661,36 @@ app: item-size: 80 ``` -After executing: +We could pass the cache name to a module (eg. `NewCache("cache1")`), +but it would require weird concatenation for accessing config keys and would be less separated from the global config. -```go -subv := viper.Sub("app.cache1") -``` +So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: -`subv` represents: +```go +cache1Config := viper.Sub("cache.cache1") +if cache1Config == nil { // Sub returns nil if the key cannot be found + panic("cache configuration not found") +} -```json -max-items: 100 -item-size: 64 +cache1 := NewCache(cache1Config) ``` -Suppose we have: +**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. + +Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: ```go -func NewCache(cfg *Viper) *Cache {...} +func NewCache(v *Viper) *Cache { + return &Cache{ + MaxItems: v.GetInt("max-items"), + ItemSize: v.GetInt("item-size"), + } +} ``` -which creates a cache based on config information formatted as `subv`. -Now it’s easy to create these 2 caches separately as: +The resulting code is easy to test, since it's decoupled from the main config structure, +and easier to reuse (for the same reason). -```go -cfg1 := viper.Sub("app.cache1") -cache1 := NewCache(cfg1) - -cfg2 := viper.Sub("app.cache2") -cache2 := NewCache(cfg2) -``` ### Unmarshaling @@ -687,18 +726,18 @@ you have to change the delimiter: v := viper.NewWithOptions(viper.KeyDelimiter("::")) v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, + "ingress": map[string]interface{}{ + "annotations": map[string]interface{}{ + "traefik.frontend.rule.type": "PathPrefix", + "traefik.ingress.kubernetes.io/ssl-redirect": "true", + }, + }, }) type config struct { Chart struct{ - Values map[string]interface{} - } + Values map[string]interface{} + } } var C config @@ -739,6 +778,15 @@ if err != nil { Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +### Decoding custom formats + +A frequently requested feature for Viper is adding more value formats and decoders. +For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. + +This is already available in Viper using mapstructure decode hooks. + +Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). + ### Marshalling to string You may need to marshal all the settings held in viper into a string rather than write them to a file. @@ -746,17 +794,17 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" - // ... + yaml "gopkg.in/yaml.v2" + // ... ) func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) } ``` @@ -792,15 +840,35 @@ y.SetDefault("ContentDir", "foobar") When working with multiple vipers, it is up to the user to keep track of the different vipers. + ## Q & A -Q: Why is it called “Viper”? +### Why is it called “Viper”? A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) to [Cobra](https://github.com/spf13/cobra). While both can operate completely independently, together they make a powerful pair to handle much of your application foundation needs. -Q: Why is it called “Cobra”? +### Why is it called “Cobra”? + +Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? + +### Does Viper support case sensitive keys? + +**tl;dr:** No. + +Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). +In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. + +There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. + +You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 + +### Is it safe to concurrently read and write to a viper? + +No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. + +## Troubleshooting -A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? +See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md new file mode 100644 index 0000000000..c4e36c6860 --- /dev/null +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -0,0 +1,32 @@ +# Troubleshooting + +## Unmarshaling doesn't work + +The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. + +## Cannot find package + +Viper installation seems to fail a lot lately with the following (or a similar) error: + +``` +cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: +/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) +/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) +``` + +As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. +Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). + +The solution is easy: switch to using Go Modules. +Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. + +**tl;dr* `export GO111MODULE=on` + +## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file + +This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740). + +Potential solutions are: + +1. Quoting values resolved as boolean +1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build) diff --git a/vendor/github.com/spf13/viper/experimental_logger.go b/vendor/github.com/spf13/viper/experimental_logger.go new file mode 100644 index 0000000000..206dad6a0c --- /dev/null +++ b/vendor/github.com/spf13/viper/experimental_logger.go @@ -0,0 +1,11 @@ +//go:build viper_logger +// +build viper_logger + +package viper + +// WithLogger sets a custom logger. +func WithLogger(l Logger) Option { + return optionFunc(func(v *Viper) { + v.logger = l + }) +} diff --git a/vendor/github.com/spf13/viper/fs.go b/vendor/github.com/spf13/viper/fs.go new file mode 100644 index 0000000000..ecb1769e52 --- /dev/null +++ b/vendor/github.com/spf13/viper/fs.go @@ -0,0 +1,65 @@ +//go:build go1.16 && finder +// +build go1.16,finder + +package viper + +import ( + "errors" + "io/fs" + "path" +) + +type finder struct { + paths []string + fileNames []string + extensions []string + + withoutExtension bool +} + +func (f finder) Find(fsys fs.FS) (string, error) { + for _, searchPath := range f.paths { + for _, fileName := range f.fileNames { + for _, extension := range f.extensions { + filePath := path.Join(searchPath, fileName+"."+extension) + + ok, err := fileExists(fsys, filePath) + if err != nil { + return "", err + } + + if ok { + return filePath, nil + } + } + + if f.withoutExtension { + filePath := path.Join(searchPath, fileName) + + ok, err := fileExists(fsys, filePath) + if err != nil { + return "", err + } + + if ok { + return filePath, nil + } + } + } + } + + return "", nil +} + +func fileExists(fsys fs.FS, filePath string) (bool, error) { + fileInfo, err := fs.Stat(fsys, filePath) + if err == nil { + return !fileInfo.IsDir(), nil + } + + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + + return false, err +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go new file mode 100644 index 0000000000..f472e9ff1a --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go @@ -0,0 +1,61 @@ +package encoding + +import ( + "sync" +) + +// Decoder decodes the contents of b into v. +// It's primarily used for decoding contents of a file into a map[string]interface{}. +type Decoder interface { + Decode(b []byte, v map[string]interface{}) error +} + +const ( + // ErrDecoderNotFound is returned when there is no decoder registered for a format. + ErrDecoderNotFound = encodingError("decoder not found for this format") + + // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. + ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") +) + +// DecoderRegistry can choose an appropriate Decoder based on the provided format. +type DecoderRegistry struct { + decoders map[string]Decoder + + mu sync.RWMutex +} + +// NewDecoderRegistry returns a new, initialized DecoderRegistry. +func NewDecoderRegistry() *DecoderRegistry { + return &DecoderRegistry{ + decoders: make(map[string]Decoder), + } +} + +// RegisterDecoder registers a Decoder for a format. +// Registering a Decoder for an already existing format is not supported. +func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.decoders[format]; ok { + return ErrDecoderFormatAlreadyRegistered + } + + e.decoders[format] = enc + + return nil +} + +// Decode calls the underlying Decoder based on the format. +func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]interface{}) error { + e.mu.RLock() + decoder, ok := e.decoders[format] + e.mu.RUnlock() + + if !ok { + return ErrDecoderNotFound + } + + return decoder.Decode(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go new file mode 100644 index 0000000000..4485063b61 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go @@ -0,0 +1,61 @@ +package dotenv + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/subosito/gotenv" +) + +const keyDelimiter = "_" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables +// (commonly called as dotenv format). +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + var buf bytes.Buffer + + for _, key := range keys { + _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key])) + if err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + var buf bytes.Buffer + + _, err := buf.Write(b) + if err != nil { + return err + } + + env, err := gotenv.StrictParse(&buf) + if err != nil { + return err + } + + for key, value := range env { + v[key] = value + } + + return nil +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go new file mode 100644 index 0000000000..ce6e6efa3e --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go @@ -0,0 +1,41 @@ +package dotenv + +import ( + "strings" + + "github.com/spf13/cast" +) + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go new file mode 100644 index 0000000000..2341bf2350 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go @@ -0,0 +1,60 @@ +package encoding + +import ( + "sync" +) + +// Encoder encodes the contents of v into a byte representation. +// It's primarily used for encoding a map[string]interface{} into a file format. +type Encoder interface { + Encode(v map[string]interface{}) ([]byte, error) +} + +const ( + // ErrEncoderNotFound is returned when there is no encoder registered for a format. + ErrEncoderNotFound = encodingError("encoder not found for this format") + + // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. + ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") +) + +// EncoderRegistry can choose an appropriate Encoder based on the provided format. +type EncoderRegistry struct { + encoders map[string]Encoder + + mu sync.RWMutex +} + +// NewEncoderRegistry returns a new, initialized EncoderRegistry. +func NewEncoderRegistry() *EncoderRegistry { + return &EncoderRegistry{ + encoders: make(map[string]Encoder), + } +} + +// RegisterEncoder registers an Encoder for a format. +// Registering a Encoder for an already existing format is not supported. +func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.encoders[format]; ok { + return ErrEncoderFormatAlreadyRegistered + } + + e.encoders[format] = enc + + return nil +} + +func (e *EncoderRegistry) Encode(format string, v map[string]interface{}) ([]byte, error) { + e.mu.RLock() + encoder, ok := e.encoders[format] + e.mu.RUnlock() + + if !ok { + return nil, ErrEncoderNotFound + } + + return encoder.Encode(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go new file mode 100644 index 0000000000..e4cde02d7b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/error.go @@ -0,0 +1,7 @@ +package encoding + +type encodingError string + +func (e encodingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go new file mode 100644 index 0000000000..7fde8e4bc6 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "bytes" + "encoding/json" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. +// TODO: add printer config to the codec? +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // TODO: use printer.Format? Is the trailing newline an issue? + + ast, err := hcl.Parse(string(b)) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + + err = printer.Fprint(&buf, ast.Node) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return hcl.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go new file mode 100644 index 0000000000..9acd87fc3c --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go @@ -0,0 +1,99 @@ +package ini + +import ( + "bytes" + "sort" + "strings" + + "github.com/spf13/cast" + "gopkg.in/ini.v1" +) + +// LoadOptions contains all customized options used for load data source(s). +// This type is added here for convenience: this way consumers can import a single package called "ini". +type LoadOptions = ini.LoadOptions + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. +type Codec struct { + KeyDelimiter string + LoadOptions LoadOptions +} + +func (c Codec) Encode(v map[string]interface{}) ([]byte, error) { + cfg := ini.Empty() + ini.PrettyFormat = false + + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + sectionName, keyName := "", key + + lastSep := strings.LastIndex(key, ".") + if lastSep != -1 { + sectionName = key[:(lastSep)] + keyName = key[(lastSep + 1):] + } + + // TODO: is this a good idea? + if sectionName == "default" { + sectionName = "" + } + + cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) + } + + var buf bytes.Buffer + + _, err := cfg.WriteTo(&buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c Codec) Decode(b []byte, v map[string]interface{}) error { + cfg := ini.Empty(c.LoadOptions) + + err := cfg.Append(b) + if err != nil { + return err + } + + sections := cfg.Sections() + + for i := 0; i < len(sections); i++ { + section := sections[i] + keys := section.Keys() + + for j := 0; j < len(keys); j++ { + key := keys[j] + value := cfg.Section(section.Name()).Key(key.Name()).String() + + deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) + + // set innermost value + deepestMap[key.Name()] = value + } + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go new file mode 100644 index 0000000000..8329856b5b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go @@ -0,0 +1,74 @@ +package ini + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go new file mode 100644 index 0000000000..b8a2251c11 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go @@ -0,0 +1,86 @@ +package javaproperties + +import ( + "bytes" + "sort" + "strings" + + "github.com/magiconair/properties" + "github.com/spf13/cast" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. +type Codec struct { + KeyDelimiter string + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + // TODO: drop this feature in v2 + // TODO: make use of the global properties object optional + Properties *properties.Properties +} + +func (c *Codec) Encode(v map[string]interface{}) ([]byte, error) { + if c.Properties == nil { + c.Properties = properties.NewProperties() + } + + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) + if err != nil { + return nil, err + } + } + + var buf bytes.Buffer + + _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Codec) Decode(b []byte, v map[string]interface{}) error { + var err error + c.Properties, err = properties.Load(b, properties.UTF8) + if err != nil { + return err + } + + for _, key := range c.Properties.Keys() { + // ignore existence check: we know it's there + value, _ := c.Properties.Get(key) + + // recursively build nested maps + path := strings.Split(key, c.keyDelimiter()) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go new file mode 100644 index 0000000000..93755cac1a --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go @@ -0,0 +1,74 @@ +package javaproperties + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go new file mode 100644 index 0000000000..1b7caaceb5 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -0,0 +1,17 @@ +package json + +import ( + "encoding/json" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + // TODO: expose prefix and indent in the Codec as setting? + return json.MarshalIndent(v, "", " ") +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return json.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go new file mode 100644 index 0000000000..45fddc8b59 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -0,0 +1,39 @@ +//go:build viper_toml1 +// +build viper_toml1 + +package toml + +import ( + "github.com/pelletier/go-toml" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + t, err := toml.TreeFromMap(v) + if err != nil { + return nil, err + } + + s, err := t.ToTomlString() + if err != nil { + return nil, err + } + + return []byte(s), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + tree, err := toml.LoadBytes(b) + if err != nil { + return err + } + + tmap := tree.ToMap() + for key, value := range tmap { + v[key] = value + } + + return nil +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go new file mode 100644 index 0000000000..112c6d3725 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go @@ -0,0 +1,19 @@ +//go:build !viper_toml1 +// +build !viper_toml1 + +package toml + +import ( + "github.com/pelletier/go-toml/v2" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + return toml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return toml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go new file mode 100644 index 0000000000..24cc19dfca --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -0,0 +1,14 @@ +package yaml + +// import "gopkg.in/yaml.v2" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + return yaml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return yaml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go new file mode 100644 index 0000000000..4c398c2f42 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go @@ -0,0 +1,14 @@ +//go:build viper_yaml2 +// +build viper_yaml2 + +package yaml + +import yamlv2 "gopkg.in/yaml.v2" + +var yaml = struct { + Marshal func(in interface{}) (out []byte, err error) + Unmarshal func(in []byte, out interface{}) (err error) +}{ + Marshal: yamlv2.Marshal, + Unmarshal: yamlv2.Unmarshal, +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go new file mode 100644 index 0000000000..3a4775ced9 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go @@ -0,0 +1,14 @@ +//go:build !viper_yaml2 +// +build !viper_yaml2 + +package yaml + +import yamlv3 "gopkg.in/yaml.v3" + +var yaml = struct { + Marshal func(in interface{}) (out []byte, err error) + Unmarshal func(in []byte, out interface{}) (err error) +}{ + Marshal: yamlv3.Marshal, + Unmarshal: yamlv3.Unmarshal, +} diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go new file mode 100644 index 0000000000..0115067ae6 --- /dev/null +++ b/vendor/github.com/spf13/viper/logger.go @@ -0,0 +1,77 @@ +package viper + +import ( + "fmt" + + jww "github.com/spf13/jwalterweatherman" +) + +// Logger is a unified interface for various logging use cases and practices, including: +// - leveled logging +// - structured logging +type Logger interface { + // Trace logs a Trace event. + // + // Even more fine-grained information than Debug events. + // Loggers not supporting this level should fall back to Debug. + Trace(msg string, keyvals ...interface{}) + + // Debug logs a Debug event. + // + // A verbose series of information events. + // They are useful when debugging the system. + Debug(msg string, keyvals ...interface{}) + + // Info logs an Info event. + // + // General information about what's happening inside the system. + Info(msg string, keyvals ...interface{}) + + // Warn logs a Warn(ing) event. + // + // Non-critical events that should be looked at. + Warn(msg string, keyvals ...interface{}) + + // Error logs an Error event. + // + // Critical events that require immediate attention. + // Loggers commonly provide Fatal and Panic levels above Error level, + // but exiting and panicing is out of scope for a logging library. + Error(msg string, keyvals ...interface{}) +} + +type jwwLogger struct{} + +func (jwwLogger) Trace(msg string, keyvals ...interface{}) { + jww.TRACE.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Debug(msg string, keyvals ...interface{}) { + jww.DEBUG.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Info(msg string, keyvals ...interface{}) { + jww.INFO.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Warn(msg string, keyvals ...interface{}) { + jww.WARN.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Error(msg string, keyvals ...interface{}) { + jww.ERROR.Printf(jwwLogMessage(msg, keyvals...)) +} + +func jwwLogMessage(msg string, keyvals ...interface{}) string { + out := msg + + if len(keyvals) > 0 && len(keyvals)%2 == 1 { + keyvals = append(keyvals, nil) + } + + for i := 0; i <= len(keyvals)-2; i += 2 { + out = fmt.Sprintf("%s %v=%v", out, keyvals[i], keyvals[i+1]) + } + + return out +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index cee6b24296..ee7a86d9df 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -18,9 +18,7 @@ import ( "strings" "unicode" - "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" ) // ConfigParseError denotes failing to parse configuration file. @@ -88,26 +86,14 @@ func insensitiviseMap(m map[string]interface{}) { } } -func absPathify(inPath string) string { - jww.INFO.Println("Trying to resolve absolute path to", inPath) +func absPathify(logger Logger, inPath string) string { + logger.Info("trying to resolve absolute path", "path", inPath) if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { inPath = userHomeDir() + inPath[5:] } - if strings.HasPrefix(inPath, "$") { - end := strings.Index(inPath, string(os.PathSeparator)) - - var value, suffix string - if end == -1 { - value = os.Getenv(inPath[1:]) - } else { - value = os.Getenv(inPath[1:end]) - suffix = inPath[end:] - } - - inPath = value + suffix - } + inPath = os.ExpandEnv(inPath) if filepath.IsAbs(inPath) { return filepath.Clean(inPath) @@ -118,21 +104,9 @@ func absPathify(inPath string) string { return filepath.Clean(p) } - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) - return "" -} + logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error()) -// Check if file Exists -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err + return "" } func stringInSlice(a string, list []string) bool { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 405dc20fe3..a3812e92f3 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -22,7 +22,6 @@ package viper import ( "bytes" "encoding/csv" - "encoding/json" "errors" "fmt" "io" @@ -30,23 +29,25 @@ import ( "os" "path/filepath" "reflect" + "strconv" "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" - "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" - "github.com/pelletier/go-toml" "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" - "github.com/subosito/gotenv" - "gopkg.in/ini.v1" - "gopkg.in/yaml.v2" + + "github.com/spf13/viper/internal/encoding" + "github.com/spf13/viper/internal/encoding/dotenv" + "github.com/spf13/viper/internal/encoding/hcl" + "github.com/spf13/viper/internal/encoding/ini" + "github.com/spf13/viper/internal/encoding/javaproperties" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -175,6 +176,8 @@ func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { // "user": "root", // "endpoint": "https://localhost" // } +// +// Note: Vipers are not safe for concurrent Get() and Set() operations. type Viper struct { // Delimiter that separates a list of keys // used to access a nested value in one go @@ -196,6 +199,9 @@ type Viper struct { configPermissions os.FileMode envPrefix string + // Specific commands for ini parsing + iniLoadOptions ini.LoadOptions + automaticEnvApplied bool envKeyReplacer StringReplacer allowEmptyEnv bool @@ -205,15 +211,17 @@ type Viper struct { defaults map[string]interface{} kvstore map[string]interface{} pflags map[string]FlagValue - env map[string]string + env map[string][]string aliases map[string]string typeByDefValue bool - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - properties *properties.Properties - onConfigChange func(fsnotify.Event) + + logger Logger + + // TODO: should probably be protected with a mutex + encoderRegistry *encoding.EncoderRegistry + decoderRegistry *encoding.DecoderRegistry } // New returns an initialized Viper instance. @@ -221,16 +229,19 @@ func New() *Viper { v := new(Viper) v.keyDelim = "." v.configName = "config" - v.configPermissions = os.FileMode(0644) + v.configPermissions = os.FileMode(0o644) v.fs = afero.NewOsFs() v.config = make(map[string]interface{}) v.override = make(map[string]interface{}) v.defaults = make(map[string]interface{}) v.kvstore = make(map[string]interface{}) v.pflags = make(map[string]FlagValue) - v.env = make(map[string]string) + v.env = make(map[string][]string) v.aliases = make(map[string]string) v.typeByDefValue = false + v.logger = jwwLogger{} + + v.resetEncoding() return v } @@ -278,6 +289,8 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } + v.resetEncoding() + return v } @@ -286,10 +299,88 @@ func NewWithOptions(opts ...Option) *Viper { // can use it in their testing as well. func Reset() { v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} } +// TODO: make this lazy initialization instead +func (v *Viper) resetEncoding() { + encoderRegistry := encoding.NewEncoderRegistry() + decoderRegistry := encoding.NewDecoderRegistry() + + { + codec := yaml.Codec{} + + encoderRegistry.RegisterEncoder("yaml", codec) + decoderRegistry.RegisterDecoder("yaml", codec) + + encoderRegistry.RegisterEncoder("yml", codec) + decoderRegistry.RegisterDecoder("yml", codec) + } + + { + codec := json.Codec{} + + encoderRegistry.RegisterEncoder("json", codec) + decoderRegistry.RegisterDecoder("json", codec) + } + + { + codec := toml.Codec{} + + encoderRegistry.RegisterEncoder("toml", codec) + decoderRegistry.RegisterDecoder("toml", codec) + } + + { + codec := hcl.Codec{} + + encoderRegistry.RegisterEncoder("hcl", codec) + decoderRegistry.RegisterDecoder("hcl", codec) + + encoderRegistry.RegisterEncoder("tfvars", codec) + decoderRegistry.RegisterDecoder("tfvars", codec) + } + + { + codec := ini.Codec{ + KeyDelimiter: v.keyDelim, + LoadOptions: v.iniLoadOptions, + } + + encoderRegistry.RegisterEncoder("ini", codec) + decoderRegistry.RegisterDecoder("ini", codec) + } + + { + codec := &javaproperties.Codec{ + KeyDelimiter: v.keyDelim, + } + + encoderRegistry.RegisterEncoder("properties", codec) + decoderRegistry.RegisterDecoder("properties", codec) + + encoderRegistry.RegisterEncoder("props", codec) + decoderRegistry.RegisterDecoder("props", codec) + + encoderRegistry.RegisterEncoder("prop", codec) + decoderRegistry.RegisterDecoder("prop", codec) + } + + { + codec := &dotenv.Codec{} + + encoderRegistry.RegisterEncoder("dotenv", codec) + decoderRegistry.RegisterDecoder("dotenv", codec) + + encoderRegistry.RegisterEncoder("env", codec) + decoderRegistry.RegisterDecoder("env", codec) + } + + v.encoderRegistry = encoderRegistry + v.decoderRegistry = decoderRegistry +} + type defaultRemoteProvider struct { provider string endpoint string @@ -325,7 +416,7 @@ type RemoteProvider interface { } // SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} @@ -341,7 +432,7 @@ func (v *Viper) WatchConfig() { initWG := sync.WaitGroup{} initWG.Add(1) go func() { - watcher, err := fsnotify.NewWatcher() + watcher, err := newWatcher() if err != nil { log.Fatal(err) } @@ -385,7 +476,7 @@ func (v *Viper) WatchConfig() { v.onConfigChange(event) } } else if filepath.Clean(event.Name) == configFile && - event.Op&fsnotify.Remove&fsnotify.Remove != 0 { + event.Op&fsnotify.Remove != 0 { eventsWG.Done() return } @@ -409,6 +500,7 @@ func (v *Viper) WatchConfig() { // SetConfigFile explicitly defines the path, name and extension of the config file. // Viper will use this and not check any of the config paths. func SetConfigFile(in string) { v.SetConfigFile(in) } + func (v *Viper) SetConfigFile(in string) { if in != "" { v.configFile = in @@ -419,6 +511,7 @@ func (v *Viper) SetConfigFile(in string) { // E.g. if your prefix is "spf", the env registry will look for env // variables that start with "SPF_". func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } + func (v *Viper) SetEnvPrefix(in string) { if in != "" { v.envPrefix = in @@ -437,6 +530,7 @@ func (v *Viper) mergeWithEnvPrefix(in string) string { // but empty environment variables as valid values instead of falling back. // For backward compatibility reasons this is false by default. func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } + func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { v.allowEmptyEnv = allowEmptyEnv } @@ -465,10 +559,12 @@ func (v *Viper) ConfigFileUsed() string { return v.configFile } // AddConfigPath adds a path for Viper to search for the config file in. // Can be called multiple times to define multiple search paths. func AddConfigPath(in string) { v.AddConfigPath(in) } + func (v *Viper) AddConfigPath(in string) { if in != "" { - absin := absPathify(in) - jww.INFO.Println("adding", absin, "to paths to search") + absin := absPathify(v.logger, in) + + v.logger.Info("adding path to search paths", "path", absin) if !stringInSlice(absin, v.configPaths) { v.configPaths = append(v.configPaths, absin) } @@ -486,12 +582,14 @@ func (v *Viper) AddConfigPath(in string) { func AddRemoteProvider(provider, endpoint, path string) error { return v.AddRemoteProvider(provider, endpoint, path) } + func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { if !stringInSlice(provider, SupportedRemoteProviders) { return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -523,7 +621,8 @@ func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -577,9 +676,9 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac return nil } -// searchMapWithPathPrefixes recursively searches for a value for path in source map. +// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. // -// While searchMap() considers each path element as a single map key, this +// While searchMap() considers each path element as a single map key or slice index, this // function searches for, and prioritizes, merged path elements. // e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" // is also defined, this latter value is returned for path ["foo", "bar"]. @@ -588,7 +687,7 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac // in their keys). // // Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { +func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} { if len(path) == 0 { return source } @@ -597,28 +696,15 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path [] for i := len(path); i > 0; i-- { prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - next, ok := source[prefixKey] - if ok { - // Fast path - if i == len(path) { - return next - } - - // Nested case - var val interface{} - switch next.(type) { - case map[interface{}]interface{}: - val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - if val != nil { - return val - } + var val interface{} + switch sourceIndexable := source.(type) { + case []interface{}: + val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) + case map[string]interface{}: + val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) + } + if val != nil { + return val } } @@ -626,6 +712,76 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path [] return nil } +// searchSliceWithPathPrefixes searches for a value for path in sourceSlice +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchSliceWithPathPrefixes( + sourceSlice []interface{}, + prefixKey string, + pathIndex int, + path []string, +) interface{} { + // if the prefixKey is not a number or it is out of bounds of the slice + index, err := strconv.Atoi(prefixKey) + if err != nil || len(sourceSlice) <= index { + return nil + } + + next := sourceSlice[index] + + // Fast path + if pathIndex == len(path) { + return next + } + + switch n := next.(type) { + case map[interface{}]interface{}: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]interface{}, []interface{}: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + +// searchMapWithPathPrefixes searches for a value for path in sourceMap +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchMapWithPathPrefixes( + sourceMap map[string]interface{}, + prefixKey string, + pathIndex int, + path []string, +) interface{} { + next, ok := sourceMap[prefixKey] + if !ok { + return nil + } + + // Fast path + if pathIndex == len(path) { + return next + } + + // Nested case + switch n := next.(type) { + case map[interface{}]interface{}: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]interface{}, []interface{}: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + // isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere // on its path in the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” @@ -706,6 +862,7 @@ func (v *Viper) isPathShadowedInAutoEnv(path []string) string { // // "a b c" func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } + func (v *Viper) SetTypeByDefaultValue(enable bool) { v.typeByDefValue = enable } @@ -723,6 +880,7 @@ func GetViper() *Viper { // // Get returns an interface. For a specific value use one of the Get____ methods. func Get(key string) interface{} { return v.Get(key) } + func (v *Viper) Get(key string) interface{} { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, true) @@ -773,6 +931,7 @@ func (v *Viper) Get(key string) interface{} { // Sub returns new Viper instance representing a sub tree of this instance. // Sub is case-insensitive for a key. func Sub(key string) *Viper { return v.Sub(key) } + func (v *Viper) Sub(key string) *Viper { subv := New() data := v.Get(key) @@ -789,96 +948,112 @@ func (v *Viper) Sub(key string) *Viper { // GetString returns the value associated with the key as a string. func GetString(key string) string { return v.GetString(key) } + func (v *Viper) GetString(key string) string { return cast.ToString(v.Get(key)) } // GetBool returns the value associated with the key as a boolean. func GetBool(key string) bool { return v.GetBool(key) } + func (v *Viper) GetBool(key string) bool { return cast.ToBool(v.Get(key)) } // GetInt returns the value associated with the key as an integer. func GetInt(key string) int { return v.GetInt(key) } + func (v *Viper) GetInt(key string) int { return cast.ToInt(v.Get(key)) } // GetInt32 returns the value associated with the key as an integer. func GetInt32(key string) int32 { return v.GetInt32(key) } + func (v *Viper) GetInt32(key string) int32 { return cast.ToInt32(v.Get(key)) } // GetInt64 returns the value associated with the key as an integer. func GetInt64(key string) int64 { return v.GetInt64(key) } + func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } // GetUint returns the value associated with the key as an unsigned integer. func GetUint(key string) uint { return v.GetUint(key) } + func (v *Viper) GetUint(key string) uint { return cast.ToUint(v.Get(key)) } // GetUint32 returns the value associated with the key as an unsigned integer. func GetUint32(key string) uint32 { return v.GetUint32(key) } + func (v *Viper) GetUint32(key string) uint32 { return cast.ToUint32(v.Get(key)) } // GetUint64 returns the value associated with the key as an unsigned integer. func GetUint64(key string) uint64 { return v.GetUint64(key) } + func (v *Viper) GetUint64(key string) uint64 { return cast.ToUint64(v.Get(key)) } // GetFloat64 returns the value associated with the key as a float64. func GetFloat64(key string) float64 { return v.GetFloat64(key) } + func (v *Viper) GetFloat64(key string) float64 { return cast.ToFloat64(v.Get(key)) } // GetTime returns the value associated with the key as time. func GetTime(key string) time.Time { return v.GetTime(key) } + func (v *Viper) GetTime(key string) time.Time { return cast.ToTime(v.Get(key)) } // GetDuration returns the value associated with the key as a duration. func GetDuration(key string) time.Duration { return v.GetDuration(key) } + func (v *Viper) GetDuration(key string) time.Duration { return cast.ToDuration(v.Get(key)) } // GetIntSlice returns the value associated with the key as a slice of int values. func GetIntSlice(key string) []int { return v.GetIntSlice(key) } + func (v *Viper) GetIntSlice(key string) []int { return cast.ToIntSlice(v.Get(key)) } // GetStringSlice returns the value associated with the key as a slice of strings. func GetStringSlice(key string) []string { return v.GetStringSlice(key) } + func (v *Viper) GetStringSlice(key string) []string { return cast.ToStringSlice(v.Get(key)) } // GetStringMap returns the value associated with the key as a map of interfaces. func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } + func (v *Viper) GetStringMap(key string) map[string]interface{} { return cast.ToStringMap(v.Get(key)) } // GetStringMapString returns the value associated with the key as a map of strings. func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } + func (v *Viper) GetStringMapString(key string) map[string]string { return cast.ToStringMapString(v.Get(key)) } // GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } + func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { return cast.ToStringMapStringSlice(v.Get(key)) } @@ -886,6 +1061,7 @@ func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { // GetSizeInBytes returns the size of the value associated with the given key // in bytes. func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } + func (v *Viper) GetSizeInBytes(key string) uint { sizeStr := cast.ToString(v.Get(key)) return parseSizeInBytes(sizeStr) @@ -895,6 +1071,7 @@ func (v *Viper) GetSizeInBytes(key string) uint { func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { return v.UnmarshalKey(key, rawVal, opts...) } + func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) } @@ -904,6 +1081,7 @@ func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConf func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { return v.Unmarshal(rawVal, opts...) } + func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) } @@ -940,6 +1118,7 @@ func decode(input interface{}, config *mapstructure.DecoderConfig) error { func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { return v.UnmarshalExact(rawVal, opts...) } + func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { config := defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true @@ -950,6 +1129,7 @@ func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) // BindPFlags binds a full flag set to the configuration, using each flag's long // name as the config key. func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } + func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { return v.BindFlagValues(pflagValueSet{flags}) } @@ -961,13 +1141,18 @@ func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { // Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) // func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } + func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } return v.BindFlagValue(key, pflagValue{flag}) } // BindFlagValues binds a full FlagValue set to the configuration, using each flag's long // name as the config key. func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } + func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { flags.VisitAll(func(flag FlagValue) { if err = v.BindFlagValue(flag.Name(), flag); err != nil { @@ -979,6 +1164,7 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { // BindFlagValue binds a specific key to a FlagValue. func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } + func (v *Viper) BindFlagValue(key string, flag FlagValue) error { if flag == nil { return fmt.Errorf("flag for %q is nil", key) @@ -990,27 +1176,38 @@ func (v *Viper) BindFlagValue(key string, flag FlagValue) error { // BindEnv binds a Viper key to a ENV variable. // ENV variables are case sensitive. // If only a key is provided, it will use the env key matching the key, uppercased. +// If more arguments are provided, they will represent the env variable names that +// should bind to this key and will be taken in the specified order. // EnvPrefix will be used when set when env name is not provided. func BindEnv(input ...string) error { return v.BindEnv(input...) } + func (v *Viper) BindEnv(input ...string) error { - var key, envkey string if len(input) == 0 { return fmt.Errorf("missing key to bind to") } - key = strings.ToLower(input[0]) + key := strings.ToLower(input[0]) if len(input) == 1 { - envkey = v.mergeWithEnvPrefix(key) + v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) } else { - envkey = input[1] + v.env[key] = append(v.env[key], input[1:]...) } - v.env[key] = envkey - return nil } +// MustBindEnv wraps BindEnv in a panic. +// If there is an error binding an environment variable, MustBindEnv will +// panic. +func MustBindEnv(input ...string) { v.MustBindEnv(input...) } + +func (v *Viper) MustBindEnv(input ...string) { + if err := v.BindEnv(input...); err != nil { + panic(fmt.Sprintf("error while binding environment variable: %v", err)) + } +} + // Given a key, find the value. // // Viper will check to see if an alias exists first. @@ -1055,7 +1252,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1086,10 +1283,12 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return nil } } - envkey, exists := v.env[lcaseKey] + envkeys, exists := v.env[lcaseKey] if exists { - if val, ok := v.getEnv(envkey); ok { - return val + for _, envkey := range envkeys { + if val, ok := v.getEnv(envkey); ok { + return val + } } } if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { @@ -1097,7 +1296,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { } // Config file next - val = v.searchMapWithPathPrefixes(v.config, path) + val = v.searchIndexableWithPathPrefixes(v.config, path) if val != nil { return val } @@ -1132,7 +1331,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1190,15 +1389,17 @@ func stringToStringConv(val string) interface{} { // IsSet checks to see if the key has been set in any of the data locations. // IsSet is case-insensitive for a key. func IsSet(key string) bool { return v.IsSet(key) } + func (v *Viper) IsSet(key string) bool { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, false) return val != nil } -// AutomaticEnv has Viper check ENV variables for all. -// keys set in config, default & flags +// AutomaticEnv makes Viper check if environment variables match any of the existing keys +// (config, default or flags). If matching env vars are found, they are loaded into Viper. func AutomaticEnv() { v.AutomaticEnv() } + func (v *Viper) AutomaticEnv() { v.automaticEnvApplied = true } @@ -1207,6 +1408,7 @@ func (v *Viper) AutomaticEnv() { // Useful for mapping an environmental variable to a key that does // not match it. func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } + func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { v.envKeyReplacer = r } @@ -1214,6 +1416,7 @@ func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { // RegisterAlias creates an alias that provides another accessor for the same key. // This enables one to change a name without breaking the application. func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } + func (v *Viper) RegisterAlias(alias string, key string) { v.registerAlias(alias, strings.ToLower(key)) } @@ -1246,14 +1449,15 @@ func (v *Viper) registerAlias(alias string, key string) { v.aliases[alias] = key } } else { - jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key)) } } func (v *Viper) realKey(key string) string { newkey, exists := v.aliases[key] if exists { - jww.DEBUG.Println("Alias", key, "to", newkey) + v.logger.Debug("key is an alias", "alias", key, "to", newkey) + return v.realKey(newkey) } return key @@ -1261,18 +1465,22 @@ func (v *Viper) realKey(key string) string { // InConfig checks to see if the given key (or an alias) is in the config file. func InConfig(key string) bool { return v.InConfig(key) } + func (v *Viper) InConfig(key string) bool { + lcaseKey := strings.ToLower(key) + // if the requested key is an alias, then return the proper key - key = v.realKey(key) + lcaseKey = v.realKey(lcaseKey) + path := strings.Split(lcaseKey, v.keyDelim) - _, exists := v.config[key] - return exists + return v.searchIndexableWithPathPrefixes(v.config, path) != nil } // SetDefault sets the default value for this key. // SetDefault is case-insensitive for a key. // Default only used when no value is provided by the user via flag, config or ENV. func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } + func (v *Viper) SetDefault(key string, value interface{}) { // If alias passed in, then set the proper default key = v.realKey(strings.ToLower(key)) @@ -1291,6 +1499,7 @@ func (v *Viper) SetDefault(key string, value interface{}) { // Will be used instead of values obtained via // flags, config file, ENV, default, or key/value store. func Set(key string, value interface{}) { v.Set(key, value) } + func (v *Viper) Set(key string, value interface{}) { // If alias passed in, then set the proper override key = v.realKey(strings.ToLower(key)) @@ -1307,8 +1516,9 @@ func (v *Viper) Set(key string, value interface{}) { // ReadInConfig will discover and load the configuration file from disk // and key/value stores, searching in one of the defined paths. func ReadInConfig() error { return v.ReadInConfig() } + func (v *Viper) ReadInConfig() error { - jww.INFO.Println("Attempting to read in config file") + v.logger.Info("attempting to read in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1318,7 +1528,7 @@ func (v *Viper) ReadInConfig() error { return UnsupportedConfigError(v.getConfigType()) } - jww.DEBUG.Println("Reading file: ", filename) + v.logger.Debug("reading file", "file", filename) file, err := afero.ReadFile(v.fs, filename) if err != nil { return err @@ -1337,8 +1547,9 @@ func (v *Viper) ReadInConfig() error { // MergeInConfig merges a new configuration with an existing config. func MergeInConfig() error { return v.MergeInConfig() } + func (v *Viper) MergeInConfig() error { - jww.INFO.Println("Attempting to merge in config file") + v.logger.Info("attempting to merge in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1359,6 +1570,7 @@ func (v *Viper) MergeInConfig() error { // ReadConfig will read a configuration file, setting existing keys to nil if the // key does not exist in the file. func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } + func (v *Viper) ReadConfig(in io.Reader) error { v.config = make(map[string]interface{}) return v.unmarshalReader(in, v.config) @@ -1366,6 +1578,7 @@ func (v *Viper) ReadConfig(in io.Reader) error { // MergeConfig merges a new configuration with an existing config. func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } + func (v *Viper) MergeConfig(in io.Reader) error { cfg := make(map[string]interface{}) if err := v.unmarshalReader(in, cfg); err != nil { @@ -1377,6 +1590,7 @@ func (v *Viper) MergeConfig(in io.Reader) error { // MergeConfigMap merges the configuration from the map given with an existing config. // Note that the map given may be modified. func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } + func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { if v.config == nil { v.config = make(map[string]interface{}) @@ -1388,6 +1602,7 @@ func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { // WriteConfig writes the current configuration to a file. func WriteConfig() error { return v.WriteConfig() } + func (v *Viper) WriteConfig() error { filename, err := v.getConfigFile() if err != nil { @@ -1398,6 +1613,7 @@ func (v *Viper) WriteConfig() error { // SafeWriteConfig writes current configuration to file only if the file does not exist. func SafeWriteConfig() error { return v.SafeWriteConfig() } + func (v *Viper) SafeWriteConfig() error { if len(v.configPaths) < 1 { return errors.New("missing configuration for 'configPath'") @@ -1407,12 +1623,14 @@ func (v *Viper) SafeWriteConfig() error { // WriteConfigAs writes current configuration to a given filename. func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } + func (v *Viper) WriteConfigAs(filename string) error { return v.writeConfig(filename, true) } // SafeWriteConfigAs writes current configuration to a given filename if it does not exist. func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } + func (v *Viper) SafeWriteConfigAs(filename string) error { alreadyExists, err := afero.Exists(v.fs, filename) if alreadyExists && err == nil { @@ -1422,11 +1640,12 @@ func (v *Viper) SafeWriteConfigAs(filename string) error { } func (v *Viper) writeConfig(filename string, force bool) error { - jww.INFO.Println("Attempting to write configuration to file.") + v.logger.Info("attempting to write configuration to file") + var configType string ext := filepath.Ext(filename) - if ext != "" { + if ext != "" && ext != filepath.Base(filename) { configType = ext[1:] } else { configType = v.configType @@ -1463,81 +1682,17 @@ func (v *Viper) writeConfig(filename string, force bool) error { func unmarshalReader(in io.Reader, c map[string]interface{}) error { return v.unmarshalReader(in, c) } + func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch strings.ToLower(v.getConfigType()) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(buf.String()) + switch format := strings.ToLower(v.getConfigType()); format { + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": + err := v.decoderRegistry.Decode(format, buf.Bytes(), c) if err != nil { return ConfigParseError{err} } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) - if err != nil { - return ConfigParseError{err} - } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } - - case "dotenv", "env": - env, err := gotenv.StrictParse(buf) - if err != nil { - return ConfigParseError{err} - } - for k, v := range env { - c[k] = v - } - - case "properties", "props", "prop": - v.properties = properties.NewProperties() - var err error - if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range v.properties.Keys() { - value, _ := v.properties.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - - case "ini": - cfg := ini.Empty() - err := cfg.Append(buf.Bytes()) - if err != nil { - return ConfigParseError{err} - } - sections := cfg.Sections() - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - c[section.Name()+"."+key.Name()] = value - } - } } insensitiviseMap(c) @@ -1548,92 +1703,16 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { func (v *Viper) marshalWriter(f afero.File, configType string) error { c := v.AllSettings() switch configType { - case "json": - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": + b, err := v.encoderRegistry.Encode(configType, c) if err != nil { return ConfigMarshalError{err} } - case "hcl": - b, err := json.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - ast, err := hcl.Parse(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - err = printer.Fprint(f, ast.Node) - if err != nil { - return ConfigMarshalError{err} - } - - case "prop", "props", "properties": - if v.properties == nil { - v.properties = properties.NewProperties() - } - p := v.properties - for _, key := range v.AllKeys() { - _, _, err := p.Set(key, v.GetString(key)) - if err != nil { - return ConfigMarshalError{err} - } - } - _, err := p.WriteComment(f, "#", properties.UTF8) - if err != nil { - return ConfigMarshalError{err} - } - - case "dotenv", "env": - lines := []string{} - for _, key := range v.AllKeys() { - envName := strings.ToUpper(strings.Replace(key, ".", "_", -1)) - val := v.Get(key) - lines = append(lines, fmt.Sprintf("%v=%v", envName, val)) - } - s := strings.Join(lines, "\n") - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "toml": - t, err := toml.TreeFromMap(c) - if err != nil { - return ConfigMarshalError{err} - } - s := t.String() - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "yaml", "yml": - b, err := yaml.Marshal(c) + _, err = f.WriteString(string(b)) if err != nil { return ConfigMarshalError{err} } - if _, err = f.WriteString(string(b)); err != nil { - return ConfigMarshalError{err} - } - - case "ini": - keys := v.AllKeys() - cfg := ini.Empty() - ini.PrettyFormat = false - for i := 0; i < len(keys); i++ { - key := keys[i] - lastSep := strings.LastIndex(key, ".") - sectionName := key[:(lastSep)] - keyName := key[(lastSep + 1):] - if sectionName == "default" { - sectionName = "" - } - cfg.Section(sectionName).Key(keyName).SetValue(v.Get(key).(string)) - } - cfg.WriteTo(f) } return nil } @@ -1650,7 +1729,8 @@ func keyExists(k string, m map[string]interface{}) string { } func castToMapStringInterface( - src map[interface{}]interface{}) map[string]interface{} { + src map[interface{}]interface{}, +) map[string]interface{} { tgt := map[string]interface{}{} for k, v := range src { tgt[fmt.Sprintf("%v", k)] = v @@ -1658,6 +1738,14 @@ func castToMapStringInterface( return tgt } +func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + func castMapStringToMapInterface(src map[string]string) map[string]interface{} { tgt := map[string]interface{}{} for k, v := range src { @@ -1680,11 +1768,12 @@ func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} // deep. Both map types are supported as there is a go-yaml fork that uses // `map[string]interface{}` instead. func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { + src, tgt map[string]interface{}, itgt map[interface{}]interface{}, +) { for sk, sv := range src { tk := keyExists(sk, tgt) if tk == "" { - jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + v.logger.Trace("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1694,7 +1783,7 @@ func mergeMaps( tv, ok := tgt[tk] if !ok { - jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + v.logger.Trace("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1704,28 +1793,52 @@ func mergeMaps( svType := reflect.TypeOf(sv) tvType := reflect.TypeOf(tv) - if svType != tvType { - jww.ERROR.Printf( - "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - continue - } - jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) + v.logger.Trace( + "processing", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) switch ttv := tv.(type) { case map[interface{}]interface{}: - jww.TRACE.Printf("merging maps (must convert)") - tsv := sv.(map[interface{}]interface{}) + v.logger.Trace("merging maps (must convert)") + tsv, ok := sv.(map[interface{}]interface{}) + if !ok { + v.logger.Error( + "Could not cast sv to map[interface{}]interface{}", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) + continue + } + ssv := castToMapStringInterface(tsv) stv := castToMapStringInterface(ttv) mergeMaps(ssv, stv, ttv) case map[string]interface{}: - jww.TRACE.Printf("merging maps") - mergeMaps(sv.(map[string]interface{}), ttv, nil) + v.logger.Trace("merging maps") + tsv, ok := sv.(map[string]interface{}) + if !ok { + v.logger.Error( + "Could not cast sv to map[string]interface{}", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) + continue + } + mergeMaps(tsv, ttv, nil) default: - jww.TRACE.Printf("setting value") + v.logger.Trace("setting value") tgt[tk] = sv if itgt != nil { itgt[tk] = sv @@ -1737,6 +1850,7 @@ func mergeMaps( // ReadRemoteConfig attempts to get configuration from a remote source // and read it in the remote configuration registry. func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + func (v *Viper) ReadRemoteConfig() error { return v.getKeyValueConfig() } @@ -1759,9 +1873,13 @@ func (v *Viper) getKeyValueConfig() error { for _, rp := range v.remoteProviders { val, err := v.getRemoteConfig(rp) if err != nil { + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) + continue } + v.kvstore = val + return nil } return RemoteConfigError("No Files Found") @@ -1818,13 +1936,14 @@ func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface // AllKeys returns all keys holding a value, regardless of where they are set. // Nested keys are returned with a v.keyDelim separator func AllKeys() []string { return v.AllKeys() } + func (v *Viper) AllKeys() []string { m := map[string]bool{} // add all paths, by order of descending priority to ensure correct shadowing m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") m = v.flattenAndMergeMap(m, v.override, "") m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) + m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) m = v.flattenAndMergeMap(m, v.config, "") m = v.flattenAndMergeMap(m, v.kvstore, "") m = v.flattenAndMergeMap(m, v.defaults, "") @@ -1898,6 +2017,7 @@ outer: // AllSettings merges all settings and returns them as a map[string]interface{}. func AllSettings() map[string]interface{} { return v.AllSettings() } + func (v *Viper) AllSettings() map[string]interface{} { m := map[string]interface{}{} // start from the list of keys, and construct the map one value at a time @@ -1919,6 +2039,7 @@ func (v *Viper) AllSettings() map[string]interface{} { // SetFs sets the filesystem to use to read configuration. func SetFs(fs afero.Fs) { v.SetFs(fs) } + func (v *Viper) SetFs(fs afero.Fs) { v.fs = fs } @@ -1926,6 +2047,7 @@ func (v *Viper) SetFs(fs afero.Fs) { // SetConfigName sets name for the config file. // Does not include extension. func SetConfigName(in string) { v.SetConfigName(in) } + func (v *Viper) SetConfigName(in string) { if in != "" { v.configName = in @@ -1936,6 +2058,7 @@ func (v *Viper) SetConfigName(in string) { // SetConfigType sets the type of the configuration returned by the // remote source, e.g. "json". func SetConfigType(in string) { v.SetConfigType(in) } + func (v *Viper) SetConfigType(in string) { if in != "" { v.configType = in @@ -1944,10 +2067,18 @@ func (v *Viper) SetConfigType(in string) { // SetConfigPermissions sets the permissions for the config file. func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } + func (v *Viper) SetConfigPermissions(perm os.FileMode) { v.configPermissions = perm.Perm() } +// IniLoadOptions sets the load options for ini parsing. +func IniLoadOptions(in ini.LoadOptions) Option { + return optionFunc(func(v *Viper) { + v.iniLoadOptions = in + }) +} + func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType @@ -1978,42 +2109,10 @@ func (v *Viper) getConfigFile() (string, error) { return v.configFile, nil } -func (v *Viper) searchInPath(in string) (filename string) { - jww.DEBUG.Println("Searching for config in ", in) - for _, ext := range SupportedExts { - jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - // Debug prints all configuration registries for debugging // purposes. func Debug() { v.Debug() } + func (v *Viper) Debug() { fmt.Printf("Aliases:\n%#v\n", v.aliases) fmt.Printf("Override:\n%#v\n", v.override) diff --git a/vendor/github.com/spf13/viper/viper_go1_15.go b/vendor/github.com/spf13/viper/viper_go1_15.go new file mode 100644 index 0000000000..19a771cbda --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_go1_15.go @@ -0,0 +1,57 @@ +//go:build !go1.16 || !finder +// +build !go1.16 !finder + +package viper + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + v.logger.Info("searching for config in paths", "paths", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +func (v *Viper) searchInPath(in string) (filename string) { + v.logger.Debug("searching for config in path", "path", in) + for _, ext := range SupportedExts { + v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + if v.configType != "" { + if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { + return filepath.Join(in, v.configName) + } + } + + return "" +} + +// Check if file Exists +func exists(fs afero.Fs, path string) (bool, error) { + stat, err := fs.Stat(path) + if err == nil { + return !stat.IsDir(), nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/spf13/viper/viper_go1_16.go b/vendor/github.com/spf13/viper/viper_go1_16.go new file mode 100644 index 0000000000..e10172fa3f --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_go1_16.go @@ -0,0 +1,32 @@ +//go:build go1.16 && finder +// +build go1.16,finder + +package viper + +import ( + "fmt" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + finder := finder{ + paths: v.configPaths, + fileNames: []string{v.configName}, + extensions: SupportedExts, + withoutExtension: v.configType != "", + } + + file, err := finder.Find(afero.NewIOFS(v.fs)) + if err != nil { + return "", err + } + + if file == "" { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + return file, nil +} diff --git a/vendor/github.com/spf13/viper/watch.go b/vendor/github.com/spf13/viper/watch.go new file mode 100644 index 0000000000..b5523b8f9d --- /dev/null +++ b/vendor/github.com/spf13/viper/watch.go @@ -0,0 +1,12 @@ +//go:build !js +// +build !js + +package viper + +import "github.com/fsnotify/fsnotify" + +type watcher = fsnotify.Watcher + +func newWatcher() (*watcher, error) { + return fsnotify.NewWatcher() +} diff --git a/vendor/github.com/spf13/viper/watch_wasm.go b/vendor/github.com/spf13/viper/watch_wasm.go new file mode 100644 index 0000000000..8e47e6a910 --- /dev/null +++ b/vendor/github.com/spf13/viper/watch_wasm.go @@ -0,0 +1,30 @@ +// +build js,wasm + +package viper + +import ( + "errors" + + "github.com/fsnotify/fsnotify" +) + +type watcher struct { + Events chan fsnotify.Event + Errors chan error +} + +func (*watcher) Close() error { + return nil +} + +func (*watcher) Add(name string) error { + return nil +} + +func (*watcher) Remove(name string) error { + return nil +} + +func newWatcher() (*watcher, error) { + return &watcher{}, errors.New("fsnotify is not supported on WASM") +} diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/LICENSE b/vendor/github.com/stbenjam/no-sprintf-host-port/LICENSE new file mode 100644 index 0000000000..586dfd8cce --- /dev/null +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Stephen Benjamin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..374bb0d242 --- /dev/null +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -0,0 +1,96 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "nosprintfhostport", + Doc: "Checks for misuse of Sprintf to construct a host with port in a URL.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + callExpr := node.(*ast.CallExpr) + if p, f, ok := getCallExprFunction(callExpr); ok && p == "fmt" && f == "Sprintf" { + if err := checkForHostPortConstruction(callExpr); err != nil { + pass.Reportf(node.Pos(), err.Error()) + } + } + }) + + return nil, nil +} + +// getCallExprFunction returns the package and function name from a callExpr, if any. +func getCallExprFunction(callExpr *ast.CallExpr) (pkg string, fn string, result bool) { + selector, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + return "", "", false + } + gopkg, ok := selector.X.(*ast.Ident) + if !ok { + return "", "", false + } + return gopkg.Name, selector.Sel.Name, true +} + +// getStringLiteral returns the value at a position if it's a string literal. +func getStringLiteral(args []ast.Expr, pos int) (string, bool) { + if len(args) < pos + 1 { + return "", false + } + + // Let's see if our format string is a string literal. + fsRaw, ok := args[pos].(*ast.BasicLit) + if !ok { + return "", false + } + if fsRaw.Kind == token.STRING && len(fsRaw.Value) >= 2 { + return fsRaw.Value[1 : len(fsRaw.Value)-1], true + } else { + return "", false + } +} + +// checkForHostPortConstruction checks to see if a sprintf call looks like a URI with a port, +// essentially scheme://%s:, or scheme://user:pass@%s:. +// +// Matching requirements: +// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// - A format string substitution in the host portion, preceded by an optional username/password@ +// - A colon indicating a port will be specified +func checkForHostPortConstruction(sprintf *ast.CallExpr) error { + fs, ok := getStringLiteral(sprintf.Args, 0) + if !ok { + return nil + } + + regexes := []*regexp.Regexp{ + regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+-.]*://%s:[^@]*$`), // URL without basic auth user + regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+-.]*://[^/]*@%s:.*$`), // URL with basic auth + } + + for _, re := range regexes { + if re.MatchString(fs) { + return fmt.Errorf("host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf") + } + } + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md index 246660b21a..78dc1f8b03 100644 --- a/vendor/github.com/stretchr/objx/README.md +++ b/vendor/github.com/stretchr/objx/README.md @@ -4,20 +4,20 @@ [![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) [![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) +[![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) Objx - Go package for dealing with maps, slices, JSON and other data. Get started: - Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx +- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx ## Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. ### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: m, err := objx.FromJSON(json) @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. +We currently support the three recent major Go versions. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index 7746f516da..8a79e8d674 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,7 +1,4 @@ -version: '2' - -env: - GOFLAGS: -mod=vendor +version: '3' tasks: default: diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 4c60455886..72f1d1c1ce 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -14,17 +14,17 @@ const ( // For example, `location.address.city` PathSeparator string = "." - // arrayAccesRegexString is the regex used to extract the array number + // arrayAccessRegexString is the regex used to extract the array number // from the access path - arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + arrayAccessRegexString = `^(.+)\[([0-9]+)\]$` // mapAccessRegexString is the regex used to extract the map key // from the access path mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// arrayAccessRegex is the compiled arrayAccessRegexString +var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString) // mapAccessRegex is the compiled mapAccessRegexString var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) @@ -37,11 +37,11 @@ var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) // // Get can only operate directly on map[string]interface{} and []interface. // -// Example +// # Example // // To access the title of the third chapter of the second book, do: // -// o.Get("books[1].chapters[2].title") +// o.Get("books[1].chapters[2].title") func (m Map) Get(selector string) *Value { rawObj := access(m, selector, nil, false) return &Value{data: rawObj} @@ -52,26 +52,26 @@ func (m Map) Get(selector string) *Value { // // Set can only operate directly on map[string]interface{} and []interface // -// Example +// # Example // // To set the title of the third chapter of the second book, do: // -// o.Set("books[1].chapters[2].title","Time to Go") +// o.Set("books[1].chapters[2].title","Time to Go") func (m Map) Set(selector string, value interface{}) Map { access(m, selector, value, true) return m } -// getIndex returns the index, which is hold in s by two braches. -// It also returns s withour the index part, e.g. name[1] will return (1, name). +// getIndex returns the index, which is hold in s by two branches. +// It also returns s without the index part, e.g. name[1] will return (1, name). // If no index is found, -1 is returned func getIndex(s string) (int, string) { - arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + arrayMatches := arrayAccessRegex.FindStringSubmatch(s) if len(arrayMatches) > 0 { // Get the key into the map selector := arrayMatches[1] // Get the index into the array at the key - // We know this cannt fail because arrayMatches[2] is an int for sure + // We know this can't fail because arrayMatches[2] is an int for sure index, _ := strconv.Atoi(arrayMatches[2]) return index, selector } diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go index 080aa46e47..01c63d7d3b 100644 --- a/vendor/github.com/stretchr/objx/conversions.go +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -15,7 +15,7 @@ import ( const SignatureSeparator = "_" // URLValuesSliceKeySuffix is the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c @@ -30,7 +30,7 @@ const ( ) // SetURLValuesSliceKeySuffix sets the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go index 6d6af1a83a..b170af74b3 100644 --- a/vendor/github.com/stretchr/objx/doc.go +++ b/vendor/github.com/stretchr/objx/doc.go @@ -1,19 +1,19 @@ /* -Objx - Go package for dealing with maps, slices, JSON and other data. +Package objx provides utilities for dealing with maps, slices, JSON and other data. -Overview +# Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. -Pattern +# Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - m, err := objx.FromJSON(json) + m, err := objx.FromJSON(json) NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. @@ -21,46 +21,46 @@ the rest will be optimistic and try to figure things out without panicking. Use `Get` to access the value you're interested in. You can use dot and array notation too: - m.Get("places[0].latlng") + m.Get("places[0].latlng") Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - if m.Get("code").IsStr() { // Your code... } + if m.Get("code").IsStr() { // Your code... } Or you can just assume the type, and use one of the strong type methods to extract the real value: - m.Get("code").Int() + m.Get("code").Int() If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - Get("code").Int(-1) + Get("code").Int(-1) If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. -Reading data +# Reading data A simple example of how to use Objx: - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) -Ranging +# Ranging Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } */ package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index a64712a08b..ab9f9ae67c 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -47,17 +47,16 @@ func New(data interface{}) Map { // // The arguments follow a key, value pattern. // -// // Returns nil if any key argument is non-string or if there are an odd number of arguments. // -// Example +// # Example // // To easily create Maps: // -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) // -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} func MSI(keyAndValuePairs ...interface{}) Map { newMap := Map{} keyAndValuePairsLen := len(keyAndValuePairs) diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d8..4d4b4aad6f 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e2..0000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3c..0000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c790..3ddab109ad 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec53..a84e09bd40 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba92..0b7570f21c 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a7..861ed4b7ce 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index f4b42e44ff..213bde2ea6 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -18,6 +18,9 @@ import ( "github.com/stretchr/testify/assert" ) +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + // TestingT is an interface wrapper around *testing.T type TestingT interface { Logf(format string, args ...interface{}) @@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } -// Panic specifies if the functon call should fail and the panic message +// Panic specifies if the function call should fail and the panic message // // Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { @@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call { return c } -// Once indicates that that the mock should only return the value once. +// Once indicates that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } -// Twice indicates that that the mock should only return the value twice. +// Twice indicates that the mock should only return the value twice. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } -// Times indicates that that the mock should only return the indicated number +// Times indicates that the mock should only return the indicated number // of times. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) @@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree // With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] + if gccgoRE.MatchString(functionPath) { + functionPath = gccgoRE.Split(functionPath, -1)[0] } parts := strings.Split(functionPath, ".") functionName := parts[len(parts)-1] @@ -474,7 +476,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { - // expected call found but it has already been called with repeatable times + // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -563,7 +565,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen Assertions */ -type assertExpectationser interface { +type assertExpectationiser interface { AssertExpectations(TestingT) bool } @@ -580,7 +582,7 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") obj = m } - m := obj.(assertExpectationser) + m := obj.(assertExpectationiser) if !m.AssertExpectations(t) { t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) return false @@ -592,6 +594,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { // AssertExpectations asserts that everything specified with On and Return was // in fact called as expected. Calls may have occurred in any order. func (m *Mock) AssertExpectations(t TestingT) bool { + if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -606,8 +611,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool { satisfied, reason := m.checkExpectation(expectedCall) if !satisfied { failedExpectations++ + t.Logf(reason) } - t.Logf(reason) } if failedExpectations != 0 { @@ -758,25 +763,33 @@ const ( Anything = "mock.Anything" ) -// AnythingOfTypeArgument is a string that contains the type of an argument +// AnythingOfTypeArgument contains the type of an argument +// for use when type checking. Used in Diff and Assert. +// +// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +type AnythingOfTypeArgument = anythingOfTypeArgument + +// anythingOfTypeArgument is a string that contains the type of an argument // for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string +type anythingOfTypeArgument string -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. +// AnythingOfType returns a special value containing the +// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String]. +// +// Used in Diff and Assert. // // For example: // // Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) + return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument // for use when type checking. This is an alternative to AnythingOfType. // Used in Diff and Assert. type IsTypeArgument struct { - t interface{} + t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. @@ -786,7 +799,7 @@ type IsTypeArgument struct { // For example: // Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { - return &IsTypeArgument{t: t} + return &IsTypeArgument{t: reflect.TypeOf(t)} } // FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument @@ -950,53 +963,55 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { - t := expected.(*IsTypeArgument).t - if reflect.TypeOf(t) != reflect.TypeOf(actual) { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { - t := expected.(*FunctionalOptionsArgument).value + } else { + switch expected := expected.(type) { + case anythingOfTypeArgument: + // type checking + if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + case *IsTypeArgument: + actualT := reflect.TypeOf(actual) + if actualT != expected.t { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) + } + case *FunctionalOptionsArgument: + t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() - } + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) - } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } + + default: + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) } else { // not match differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) } } - } else { - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } } } diff --git a/vendor/github.com/t-yuki/gocover-cobertura/.travis.yml b/vendor/github.com/t-yuki/gocover-cobertura/.travis.yml new file mode 100644 index 0000000000..f17bb61693 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +sudo: false +before_install: + - go get github.com/mattn/goveralls +script: + - $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/t-yuki/gocover-cobertura/LICENSE b/vendor/github.com/t-yuki/gocover-cobertura/LICENSE new file mode 100644 index 0000000000..7ec1b3d853 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013 Yukinari Toyota + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/t-yuki/gocover-cobertura/README.md b/vendor/github.com/t-yuki/gocover-cobertura/README.md new file mode 100644 index 0000000000..60ab1dbe72 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/README.md @@ -0,0 +1,35 @@ +[![Build Status](https://travis-ci.org/t-yuki/gocover-cobertura.svg?branch=master)](https://travis-ci.org/t-yuki/gocover-cobertura) +[![Coverage Status](https://coveralls.io/repos/github/t-yuki/gocover-cobertura/badge.svg?branch=master)](https://coveralls.io/github/t-yuki/gocover-cobertura?branch=master) + +go tool cover XML (Cobertura) export +==================================== + +This is a simple helper tool for generating XML output in [Cobertura](http://cobertura.sourceforge.net/) format +for CIs like [Jenkins](https://wiki.jenkins-ci.org/display/JENKINS/Cobertura+Plugin) and others +from [go tool cover](https://code.google.com/p/go.tools/) output. + +Installation +------------ + +Just type the following to install the program and its dependencies: + + $ go get code.google.com/p/go.tools/cmd/cover + $ go get github.com/t-yuki/gocover-cobertura + +Usage +----- + +`gocover-cobertura` reads from the standard input: + + $ go test -coverprofile=coverage.txt -covermode count github.com/gorilla/mux + $ gocover-cobertura < coverage.txt > coverage.xml + +Authors +------- + +* [Yukinari Toyota (t-yuki)](https://github.com/t-yuki) + +Thanks +------ + +This tool is originated from [gocov-xml](https://github.com/AlekSi/gocov-xml) by [Alexey Palazhchenko (AlekSi)](https://github.com/AlekSi) diff --git a/vendor/github.com/t-yuki/gocover-cobertura/cobertura.go b/vendor/github.com/t-yuki/gocover-cobertura/cobertura.go new file mode 100644 index 0000000000..8556dc5636 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/cobertura.go @@ -0,0 +1,178 @@ +package main + +import ( + "encoding/xml" +) + +type Coverage struct { + XMLName xml.Name `xml:"coverage"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Version string `xml:"version,attr"` + Timestamp int64 `xml:"timestamp,attr"` + LinesCovered int64 `xml:"lines-covered,attr"` + LinesValid int64 `xml:"lines-valid,attr"` + BranchesCovered int64 `xml:"branches-covered,attr"` + BranchesValid int64 `xml:"branches-valid,attr"` + Complexity float32 `xml:"complexity,attr"` + Sources []*Source `xml:"sources>source"` + Packages []*Package `xml:"packages>package"` +} + +type Source struct { + Path string `xml:",chardata"` +} + +type Package struct { + Name string `xml:"name,attr"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Complexity float32 `xml:"complexity,attr"` + Classes []*Class `xml:"classes>class"` +} + +type Class struct { + Name string `xml:"name,attr"` + Filename string `xml:"filename,attr"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Complexity float32 `xml:"complexity,attr"` + Methods []*Method `xml:"methods>method"` + Lines Lines `xml:"lines>line"` +} + +type Method struct { + Name string `xml:"name,attr"` + Signature string `xml:"signature,attr"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Complexity float32 `xml:"complexity,attr"` + Lines Lines `xml:"lines>line"` +} + +type Line struct { + Number int `xml:"number,attr"` + Hits int64 `xml:"hits,attr"` +} + +// Lines is a slice of Line pointers, with some convenience methods +type Lines []*Line + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (lines Lines) HitRate() (hitRate float32) { + return float32(lines.NumLinesWithHits()) / float32(len(lines)) +} + +// NumLines returns the number of lines +func (lines Lines) NumLines() int64 { + return int64(len(lines)) +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (lines Lines) NumLinesWithHits() (numLinesWithHits int64) { + for _, line := range lines { + if line.Hits > 0 { + numLinesWithHits++ + } + } + return numLinesWithHits +} + +// AddOrUpdateLine adds a line if it is a different line than the last line recorded. +// If it's the same line as the last line recorded then we update the hits down +// if the new hits is less; otherwise just leave it as-is +func (lines *Lines) AddOrUpdateLine(lineNumber int, hits int64) { + if len(*lines) > 0 { + lastLine := (*lines)[len(*lines)-1] + if lineNumber == lastLine.Number { + if hits < lastLine.Hits { + lastLine.Hits = hits + } + return + } + } + *lines = append(*lines, &Line{Number: lineNumber, Hits: hits}) +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (method Method) HitRate() float32 { + return method.Lines.HitRate() +} + +// NumLines returns the number of lines +func (method Method) NumLines() int64 { + return method.Lines.NumLines() +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (method Method) NumLinesWithHits() int64 { + return method.Lines.NumLinesWithHits() +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (class Class) HitRate() float32 { + return float32(class.NumLinesWithHits()) / float32(class.NumLines()) +} + +// NumLines returns the number of lines +func (class Class) NumLines() (numLines int64) { + for _, method := range class.Methods { + numLines += method.NumLines() + } + return numLines +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (class Class) NumLinesWithHits() (numLinesWithHits int64) { + for _, method := range class.Methods { + numLinesWithHits += method.NumLinesWithHits() + } + return numLinesWithHits +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (pkg Package) HitRate() float32 { + return float32(pkg.NumLinesWithHits()) / float32(pkg.NumLines()) +} + +// NumLines returns the number of lines +func (pkg Package) NumLines() (numLines int64) { + for _, class := range pkg.Classes { + numLines += class.NumLines() + } + return numLines +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (pkg Package) NumLinesWithHits() (numLinesWithHits int64) { + for _, class := range pkg.Classes { + numLinesWithHits += class.NumLinesWithHits() + } + return numLinesWithHits +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (cov Coverage) HitRate() float32 { + return float32(cov.NumLinesWithHits()) / float32(cov.NumLines()) +} + +// NumLines returns the number of lines +func (cov Coverage) NumLines() (numLines int64) { + for _, pkg := range cov.Packages { + numLines += pkg.NumLines() + } + return numLines +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (cov Coverage) NumLinesWithHits() (numLinesWithHits int64) { + for _, pkg := range cov.Packages { + numLinesWithHits += pkg.NumLinesWithHits() + } + return numLinesWithHits +} diff --git a/vendor/github.com/t-yuki/gocover-cobertura/gocover-cobertura.go b/vendor/github.com/t-yuki/gocover-cobertura/gocover-cobertura.go new file mode 100644 index 0000000000..e64b5de029 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/gocover-cobertura.go @@ -0,0 +1,176 @@ +package main + +import ( + "encoding/xml" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +const coberturaDTDDecl = "\n" + +func main() { + convert(os.Stdin, os.Stdout) +} + +func convert(in io.Reader, out io.Writer) { + profiles, err := ParseProfiles(in) + if err != nil { + panic("Can't parse profiles") + } + + srcDirs := build.Default.SrcDirs() + sources := make([]*Source, len(srcDirs)) + for i, dir := range srcDirs { + sources[i] = &Source{dir} + } + + coverage := Coverage{Sources: sources, Packages: nil, Timestamp: time.Now().UnixNano() / int64(time.Millisecond)} + coverage.parseProfiles(profiles) + + fmt.Fprintf(out, xml.Header) + fmt.Fprintf(out, coberturaDTDDecl) + + encoder := xml.NewEncoder(out) + encoder.Indent("", "\t") + err = encoder.Encode(coverage) + if err != nil { + panic(err) + } + + fmt.Fprintln(out) +} + +func (cov *Coverage) parseProfiles(profiles []*Profile) error { + cov.Packages = []*Package{} + for _, profile := range profiles { + cov.parseProfile(profile) + } + cov.LinesValid = cov.NumLines() + cov.LinesCovered = cov.NumLinesWithHits() + cov.LineRate = cov.HitRate() + return nil +} + +func (cov *Coverage) parseProfile(profile *Profile) error { + fileName := profile.FileName + absFilePath, err := findFile(fileName) + if err != nil { + return err + } + fset := token.NewFileSet() + parsed, err := parser.ParseFile(fset, absFilePath, nil, 0) + if err != nil { + return err + } + data, err := ioutil.ReadFile(absFilePath) + if err != nil { + return err + } + + pkgPath, _ := filepath.Split(fileName) + pkgPath = strings.TrimRight(pkgPath, string(os.PathSeparator)) + + var pkg *Package + for _, p := range cov.Packages { + if p.Name == pkgPath { + pkg = p + } + } + if pkg == nil { + pkg = &Package{Name: pkgPath, Classes: []*Class{}} + cov.Packages = append(cov.Packages, pkg) + } + visitor := &fileVisitor{ + fset: fset, + fileName: fileName, + fileData: data, + classes: make(map[string]*Class), + pkg: pkg, + profile: profile, + } + ast.Walk(visitor, parsed) + pkg.LineRate = pkg.HitRate() + return nil +} + +type fileVisitor struct { + fset *token.FileSet + fileName string + fileData []byte + pkg *Package + classes map[string]*Class + profile *Profile +} + +func (v *fileVisitor) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + class := v.class(n) + method := v.method(n) + method.LineRate = method.Lines.HitRate() + class.Methods = append(class.Methods, method) + for _, line := range method.Lines { + class.Lines = append(class.Lines, line) + } + class.LineRate = class.Lines.HitRate() + } + return v +} + +func (v *fileVisitor) method(n *ast.FuncDecl) *Method { + method := &Method{Name: n.Name.Name} + method.Lines = []*Line{} + + start := v.fset.Position(n.Pos()) + end := v.fset.Position(n.End()) + startLine := start.Line + startCol := start.Column + endLine := end.Line + endCol := end.Column + // The blocks are sorted, so we can stop counting as soon as we reach the end of the relevant block. + for _, b := range v.profile.Blocks { + if b.StartLine > endLine || (b.StartLine == endLine && b.StartCol >= endCol) { + // Past the end of the function. + break + } + if b.EndLine < startLine || (b.EndLine == startLine && b.EndCol <= startCol) { + // Before the beginning of the function + continue + } + for i := b.StartLine; i <= b.EndLine; i++ { + method.Lines.AddOrUpdateLine(i, int64(b.Count)) + } + } + return method +} + +func (v *fileVisitor) class(n *ast.FuncDecl) *Class { + className := v.recvName(n) + var class *Class = v.classes[className] + if class == nil { + class = &Class{Name: className, Filename: v.fileName, Methods: []*Method{}, Lines: []*Line{}} + v.classes[className] = class + v.pkg.Classes = append(v.pkg.Classes, class) + } + return class +} + +func (v *fileVisitor) recvName(n *ast.FuncDecl) string { + if n.Recv == nil { + return "-" + } + recv := n.Recv.List[0].Type + start := v.fset.Position(recv.Pos()) + end := v.fset.Position(recv.End()) + name := string(v.fileData[start.Offset:end.Offset]) + return strings.TrimSpace(strings.TrimLeft(name, "*")) +} diff --git a/vendor/github.com/t-yuki/gocover-cobertura/profile.go b/vendor/github.com/t-yuki/gocover-cobertura/profile.go new file mode 100644 index 0000000000..99cbac234c --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/profile.go @@ -0,0 +1,202 @@ +// Imported from https://code.google.com/p/go/source/browse/cmd/cover/profile.go?repo=tools&r=c10a9dd5e0b0a859a8385b6f004584cb083a3934 + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "go/build" + "io" + "math" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data from the given Reader and returns a +// Profile for each file. +func ParseProfiles(in io.Reader) ([]*Profile, error) { + files := make(map[string]*Profile) + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + s := bufio.NewScanner(in) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + m := lineRe.FindStringSubmatch(line) + if m == nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", m, lineRe) + } + fn := m[1] + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, ProfileBlock{ + StartLine: toInt(m[2]), + StartCol: toInt(m[3]), + EndLine: toInt(m[4]), + EndCol: toInt(m[5]), + NumStmt: toInt(m[6]), + Count: toInt(m[7]), + }) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +var lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`) + +func toInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return i +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count} + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + return !b[i].Start && b[j].Start + } + return b[i].Offset < b[j].Offset +} + +// findFile finds the location of the named file in GOROOT, GOPATH etc. +func findFile(file string) (string, error) { + if strings.HasPrefix(file, "_") { + file = file[1:] + } + if _, err := os.Stat(file); err == nil { + return file, nil + } + dir, file := filepath.Split(file) + pkg, err := build.Import(dir, ".", build.FindOnly) + if err != nil { + return "", fmt.Errorf("can't find %q: %v", file, err) + } + return filepath.Join(pkg.Dir, file), nil +} diff --git a/vendor/github.com/tetafro/godot/.golangci.yml b/vendor/github.com/tetafro/godot/.golangci.yml index 2b799b2653..ea380eb83d 100644 --- a/vendor/github.com/tetafro/godot/.golangci.yml +++ b/vendor/github.com/tetafro/godot/.golangci.yml @@ -8,60 +8,73 @@ skip-dirs: linters: disable-all: true enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck + - asciicheck - bodyclose - - depguard + - cyclop - dogsled - - dupl - - funlen + - durationcheck + - errcheck + - errname + - errorlint + - exhaustive + - exportloopref + - exportloopref - gochecknoinits + - gocognit - goconst - gocritic - gocyclo - godot + - goerr113 - gofmt - gofumpt - goimports - - golint - - gomnd - - gomodguard - goprintffuncname - gosec + - gosimple + - govet + - importas + - ineffassign - lll - - maligned - misspell - nakedret - nestif + - noctx + - nolintlint - prealloc + - revive - rowserrcheck - - scopelint + - sqlclosecheck + - sqlclosecheck + - staticcheck - stylecheck + - typecheck - unconvert - unparam + - unused + - wastedassign - whitespace + - wrapcheck linters-settings: godot: - check-all: true + scope: toplevel issues: exclude-use-default: false + exclude: + - "do not define dynamic errors, use wrapped static errors instead" exclude-rules: - path: _test\.go linters: - dupl - errcheck - funlen + - gocognit + - cyclop - gosec - - path: cmd/godot/main\.go + - noctx + - path: main\.go linters: + - cyclop - gomnd diff --git a/vendor/github.com/tetafro/godot/README.md b/vendor/github.com/tetafro/godot/README.md index 3f97b0e395..6b2e530b93 100644 --- a/vendor/github.com/tetafro/godot/README.md +++ b/vendor/github.com/tetafro/godot/README.md @@ -1,7 +1,7 @@ # godot [![License](http://img.shields.io/badge/license-MIT-green.svg?style=flat)](https://raw.githubusercontent.com/tetafro/godot/master/LICENSE) -[![Github CI](https://img.shields.io/github/workflow/status/tetafro/godot/Test)](https://github.com/tetafro/godot/actions?query=workflow%3ATest) +[![Github CI](https://img.shields.io/github/actions/workflow/status/tetafro/godot/push.yml)](https://github.com/tetafro/godot/actions) [![Go Report](https://goreportcard.com/badge/github.com/tetafro/godot)](https://goreportcard.com/report/github.com/tetafro/godot) [![Codecov](https://codecov.io/gh/tetafro/godot/branch/master/graph/badge.svg)](https://codecov.io/gh/tetafro/godot) @@ -21,7 +21,7 @@ end of the last sentence if needed. Build from source ```sh -go get -u github.com/tetafro/godot/cmd/godot +go install github.com/tetafro/godot/cmd/godot@latest ``` or download binary from [releases page](https://github.com/tetafro/godot/releases). diff --git a/vendor/github.com/tetafro/godot/checks.go b/vendor/github.com/tetafro/godot/checks.go index cba54f310c..0e53c220a2 100644 --- a/vendor/github.com/tetafro/godot/checks.go +++ b/vendor/github.com/tetafro/godot/checks.go @@ -21,7 +21,7 @@ var ( // Abbreviations to exclude from capital letters check. abbreviations = []string{"i.e.", "i. e.", "e.g.", "e. g.", "etc."} - // Special tags in comments like "// nolint:", or "// +k8s:". + // Special tags in comments like "//nolint:", or "//+k8s:". tags = regexp.MustCompile(`^\+?[a-z0-9]+:`) // Special hashtags in comments like "// #nosec". @@ -31,18 +31,24 @@ var ( endURL = regexp.MustCompile(`[a-z]+://[^\s]+$`) ) +// position is a position inside a comment (might be multiline comment). +type position struct { + line int // starts at 1 + column int // starts at 1, byte count +} + // checkComments checks every comment accordings to the rules from // `settings` argument. func checkComments(comments []comment, settings Settings) []Issue { - var issues []Issue // nolint: prealloc + var issues []Issue for _, c := range comments { if settings.Period { - if iss := checkCommentForPeriod(c); iss != nil { + if iss := checkPeriod(c); iss != nil { issues = append(issues, *iss) } } if settings.Capital { - if iss := checkCommentForCapital(c); len(iss) > 0 { + if iss := checkCapital(c); len(iss) > 0 { issues = append(issues, iss...) } } @@ -50,14 +56,34 @@ func checkComments(comments []comment, settings Settings) []Issue { return issues } -// checkCommentForPeriod checks that the last sentense of the comment ends +// checkPeriod checks that the last sentense of the comment ends // in a period. -func checkCommentForPeriod(c comment) *Issue { - pos, ok := checkPeriod(c.text) - if ok { +func checkPeriod(c comment) *Issue { + // Check last non-empty line + var found bool + var line string + var pos position + lines := strings.Split(c.text, "\n") + for i := len(lines) - 1; i >= 0; i-- { + line = strings.TrimRightFunc(lines[i], unicode.IsSpace) + if line == "" { + continue + } + found = true + pos.line = i + 1 + break + } + // All lines are empty + if !found { + return nil + } + // Correct line + if hasSuffix(line, lastChars) { return nil } + pos.column = len(line) + 1 + // Shift position to its real value. `c.text` doesn't contain comment's // special symbols: /* or //, and line indentations inside. It also // contains */ in the end in case of block comment. @@ -94,95 +120,15 @@ func checkCommentForPeriod(c comment) *Issue { return &iss } -// checkCommentForCapital checks that each sentense of the comment starts with -// a capital letter. -// nolint: unparam -func checkCommentForCapital(c comment) []Issue { - pp := checkCapital(c.text, c.decl) - if len(pp) == 0 { - return nil - } - - issues := make([]Issue, len(pp)) - for i, pos := range pp { - // Shift position by the length of comment's special symbols: /* or // - isBlock := strings.HasPrefix(c.lines[0], "/*") - if (isBlock && pos.line == 1) || !isBlock { - pos.column += 2 - } - - iss := Issue{ - Pos: token.Position{ - Filename: c.start.Filename, - Offset: c.start.Offset, - Line: pos.line + c.start.Line - 1, - Column: pos.column + c.start.Column - 1, - }, - Message: noCapitalMessage, - } - - // Make a replacement. Use `pos.original` to get an original original from - // attached lines. Use `iss.Pos.Column` because it's a position in - // the original original. - original := c.lines[pos.line-1] - col := byteToRuneColumn(original, iss.Pos.Column) - 1 - rep := string(unicode.ToTitle([]rune(original)[col])) // capital letter - if len(original) < iss.Pos.Column-1+len(rep) { - // This should never happen. Avoid panics, skip this check. - continue - } - iss.Replacement = original[:iss.Pos.Column-1] + rep + - original[iss.Pos.Column-1+len(rep):] - - // Save replacement to raw lines to be able to combine it with - // further replacements - c.lines[pos.line-1] = iss.Replacement - - issues[i] = iss - } - - return issues -} - -// checkPeriod checks that the last sentense of the text ends in a period. -// NOTE: Returned position is a position inside given text, not in the -// original file. -func checkPeriod(comment string) (pos position, ok bool) { - // Check last non-empty line - var found bool - var line string - lines := strings.Split(comment, "\n") - for i := len(lines) - 1; i >= 0; i-- { - line = strings.TrimRightFunc(lines[i], unicode.IsSpace) - if line == "" { - continue - } - found = true - pos.line = i + 1 - break - } - // All lines are empty - if !found { - return position{}, true - } - // Correct line - if hasSuffix(line, lastChars) { - return position{}, true - } - - pos.column = len(line) + 1 - return pos, false -} - -// checkCapital checks that each sentense of the text starts with +// checkCapital checks that each sentense of the comment starts with // a capital letter. -// NOTE: First letter is not checked in declaration comments, because they -// can describe unexported functions, which start with small letter. -func checkCapital(comment string, skipFirst bool) (pp []position) { +// +//nolint:cyclop,funlen +func checkCapital(c comment) []Issue { // Remove common abbreviations from the comment for _, abbr := range abbreviations { repl := strings.ReplaceAll(abbr, ".", "_") - comment = strings.ReplaceAll(comment, abbr, repl) + c.text = strings.ReplaceAll(c.text, abbr, repl) } // List of states during the scan: `empty` - nothing special, @@ -190,12 +136,14 @@ func checkCapital(comment string, skipFirst bool) (pp []position) { // `endOfSentence` - found `endChar`, and then space or newline. const empty, endChar, endOfSentence = 1, 2, 3 + var pp []position pos := position{line: 1} state := endOfSentence - if skipFirst { + if c.decl { + // Skip first state = empty } - for _, r := range comment { + for _, r := range c.text { s := string(r) pos.column++ @@ -223,12 +171,54 @@ func checkCapital(comment string, skipFirst bool) (pp []position) { if state == endOfSentence && unicode.IsLower(r) { pp = append(pp, position{ line: pos.line, - column: runeToByteColumn(comment, pos.column), + column: runeToByteColumn(c.text, pos.column), }) } state = empty } - return pp + if len(pp) == 0 { + return nil + } + + issues := make([]Issue, len(pp)) + for i, pos := range pp { + // Shift position by the length of comment's special symbols: /* or // + isBlock := strings.HasPrefix(c.lines[0], "/*") + if (isBlock && pos.line == 1) || !isBlock { + pos.column += 2 + } + + iss := Issue{ + Pos: token.Position{ + Filename: c.start.Filename, + Offset: c.start.Offset, + Line: pos.line + c.start.Line - 1, + Column: pos.column + c.start.Column - 1, + }, + Message: noCapitalMessage, + } + + // Make a replacement. Use `pos.original` to get an original original from + // attached lines. Use `iss.Pos.Column` because it's a position in + // the original original. + original := c.lines[pos.line-1] + col := byteToRuneColumn(original, iss.Pos.Column) - 1 + rep := string(unicode.ToTitle([]rune(original)[col])) // capital letter + if len(original) < iss.Pos.Column-1+len(rep) { + // This should never happen. Avoid panics, skip this check. + continue + } + iss.Replacement = original[:iss.Pos.Column-1] + rep + + original[iss.Pos.Column-1+len(rep):] + + // Save replacement to raw lines to be able to combine it with + // further replacements + c.lines[pos.line-1] = iss.Replacement + + issues[i] = iss + } + + return issues } // isSpecialBlock checks that given block of comment lines is special and @@ -240,10 +230,13 @@ func isSpecialBlock(comment string) bool { strings.Contains(comment, "#define")) { return true } + if strings.HasPrefix(comment, "// Output: ") { + return true + } return false } -// isSpecialBlock checks that given comment line is special and +// isSpecialLine checks that given comment line is special and // shouldn't be checked as a regular sentence. func isSpecialLine(comment string) bool { // Skip cgo export tags: https://golang.org/cmd/cgo/#hdr-C_references_to_Go diff --git a/vendor/github.com/tetafro/godot/getters.go b/vendor/github.com/tetafro/godot/getters.go index 6153772bdf..7d3d22fb13 100644 --- a/vendor/github.com/tetafro/godot/getters.go +++ b/vendor/github.com/tetafro/godot/getters.go @@ -5,7 +5,7 @@ import ( "fmt" "go/ast" "go/token" - "io/ioutil" + "os" "regexp" "strings" ) @@ -44,7 +44,7 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { // from "go/format" won't help here if the original file is not gofmt-ed. pf.lines, err = readFile(file, fset) if err != nil { - return nil, fmt.Errorf("read file: %v", err) + return nil, fmt.Errorf("read file: %w", err) } // Dirty hack. For some cases Go generates temporary files during @@ -58,9 +58,13 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { return nil, errUnsuitableInput } - // Check consistency to avoid checking slice indexes in each function + // Check consistency to avoid checking slice indexes in each function. + // Note that `PositionFor` is used with `adjusted=false` to skip `//line` + // directives that can set references to other files (e.g. templates) + // instead of the real ones, and break consistency here. + // Issue: https://github.com/tetafro/godot/issues/32 lastComment := pf.file.Comments[len(pf.file.Comments)-1] - if p := pf.fset.Position(lastComment.End()); len(pf.lines) < p.Line { + if p := pf.fset.PositionFor(lastComment.End(), false); len(pf.lines) < p.Line { return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) } @@ -82,7 +86,7 @@ func (pf *parsedFile) getComments(scope Scope, exclude []*regexp.Regexp) []comme pf.getBlockComments(exclude), pf.getTopLevelComments(exclude)..., ) - default: + case DeclScope: // Top level declaration comments and comments from the inside // of top level blocks comments = append(pf.getBlockComments(exclude), decl...) @@ -118,7 +122,7 @@ func (pf *parsedFile) getBlockComments(exclude []*regexp.Regexp) []comment { // Skip comments that are not top-level for this block // (the block itself is top level, so comments inside this block // would be on column 2) - // nolint: gomnd + //nolint:gomnd if pf.fset.Position(c.Pos()).Column != 2 { continue } @@ -136,7 +140,7 @@ func (pf *parsedFile) getBlockComments(exclude []*regexp.Regexp) []comment { // getTopLevelComments gets all top level comments. func (pf *parsedFile) getTopLevelComments(exclude []*regexp.Regexp) []comment { - var comments []comment // nolint: prealloc + var comments []comment //nolint:prealloc for _, c := range pf.file.Comments { if c == nil || len(c.List) == 0 { continue @@ -157,7 +161,7 @@ func (pf *parsedFile) getTopLevelComments(exclude []*regexp.Regexp) []comment { // getDeclarationComments gets top level declaration comments. func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment { - var comments []comment // nolint: prealloc + var comments []comment //nolint:prealloc for _, decl := range pf.file.Decls { var cg *ast.CommentGroup switch d := decl.(type) { @@ -184,7 +188,7 @@ func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment // getAllComments gets every single comment from the file. func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { - var comments []comment //nolint: prealloc + var comments []comment //nolint:prealloc for _, c := range pf.file.Comments { if c == nil || len(c.List) == 0 { continue @@ -200,11 +204,13 @@ func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { return comments } -// getText extracts text from comment. If comment is a special block +// getText extracts text from comment. If the comment is a special block // (e.g., CGO code), a block of empty lines is returned. If comment contains // special lines (e.g., tags or indented code examples), they are replaced -// with `specialReplacer` to skip checks for it. +// with `specialReplacer` to skip checks for them. // The result can be multiline. +// +//nolint:cyclop func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { if len(comment.List) == 1 && strings.HasPrefix(comment.List[0].Text, "/*") && @@ -241,12 +247,12 @@ func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { return s[:len(s)-1] // trim last "\n" } -// readFile reads file and returns it's lines as strings. +// readFile reads file and returns its lines as strings. func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { fname := fset.File(file.Package) - f, err := ioutil.ReadFile(fname.Name()) + f, err := os.ReadFile(fname.Name()) if err != nil { - return nil, err + return nil, err //nolint:wrapcheck } return strings.Split(string(f), "\n"), nil } diff --git a/vendor/github.com/tetafro/godot/godot.go b/vendor/github.com/tetafro/godot/godot.go index 3a360a214b..e825e9a6de 100644 --- a/vendor/github.com/tetafro/godot/godot.go +++ b/vendor/github.com/tetafro/godot/godot.go @@ -3,10 +3,10 @@ package godot import ( + "errors" "fmt" "go/ast" "go/token" - "io/ioutil" "os" "regexp" "sort" @@ -25,12 +25,6 @@ type Issue struct { Replacement string } -// position is a position inside a comment (might be multiline comment). -type position struct { - line int // starts at 1 - column int // starts at 1, byte count -} - // comment is an internal representation of AST comment entity with additional // data attached. The latter is used for creating a full replacement for // the line with issues. @@ -44,18 +38,18 @@ type comment struct { // Run runs this linter on the provided code. func Run(file *ast.File, fset *token.FileSet, settings Settings) ([]Issue, error) { pf, err := newParsedFile(file, fset) - if err == errEmptyInput || err == errUnsuitableInput { + if errors.Is(err, errEmptyInput) || errors.Is(err, errUnsuitableInput) { return nil, nil } if err != nil { - return nil, fmt.Errorf("parse input file: %v", err) + return nil, fmt.Errorf("parse input file: %w", err) } exclude := make([]*regexp.Regexp, len(settings.Exclude)) for i := 0; i < len(settings.Exclude); i++ { exclude[i], err = regexp.Compile(settings.Exclude[i]) if err != nil { - return nil, fmt.Errorf("invalid regexp: %v", err) + return nil, fmt.Errorf("invalid regexp: %w", err) } } @@ -69,9 +63,9 @@ func Run(file *ast.File, fset *token.FileSet, settings Settings) ([]Issue, error // Fix fixes all issues and returns new version of file content. func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([]byte, error) { // Read file - content, err := ioutil.ReadFile(path) // nolint: gosec + content, err := os.ReadFile(path) //nolint:gosec if err != nil { - return nil, fmt.Errorf("read file: %v", err) + return nil, fmt.Errorf("read file: %w", err) } if len(content) == 0 { return nil, nil @@ -79,7 +73,7 @@ func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([ issues, err := Run(file, fset, settings) if err != nil { - return nil, fmt.Errorf("run linter: %v", err) + return nil, fmt.Errorf("run linter: %w", err) } // slice -> map @@ -102,21 +96,21 @@ func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([ return fixed, nil } -// Replace rewrites original file with it's fixed version. +// Replace rewrites original file with its fixed version. func Replace(path string, file *ast.File, fset *token.FileSet, settings Settings) error { info, err := os.Stat(path) if err != nil { - return fmt.Errorf("check file: %v", err) + return fmt.Errorf("check file: %w", err) } mode := info.Mode() fixed, err := Fix(path, file, fset, settings) if err != nil { - return fmt.Errorf("fix issues: %v", err) + return fmt.Errorf("fix issues: %w", err) } - if err := ioutil.WriteFile(path, fixed, mode); err != nil { - return fmt.Errorf("write file: %v", err) + if err := os.WriteFile(path, fixed, mode); err != nil { + return fmt.Errorf("write file: %w", err) } return nil } diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go index a7ff30b499..21bb485b4e 100644 --- a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go +++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -64,6 +64,8 @@ func (r runner) run(pass *analysis.Pass) (interface{}, error) { field := resStruct.Field(i) if field.Id() == "Body" { r.bodyObj = field + + break } } if r.bodyObj == nil { @@ -75,21 +77,20 @@ func (r runner) run(pass *analysis.Pass) (interface{}, error) { bmthd := bodyItrf.Method(i) if bmthd.Id() == closeMethod { r.closeMthd = bmthd + + break } } r.skipFile = map[*ast.File]bool{} +FuncLoop: for _, f := range funcs { // skip if the function is just referenced - var isreffunc bool for i := 0; i < f.Signature.Results().Len(); i++ { if f.Signature.Results().At(i).Type().String() == r.resTyp.String() { - isreffunc = true + continue FuncLoop } } - if isreffunc { - continue - } for _, b := range f.Blocks { for i := range b.Instrs { @@ -126,7 +127,12 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { resRefs := *val.Referrers() for _, resRef := range resRefs { switch resRef := resRef.(type) { - case *ssa.Store: // Call in Closure function + case *ssa.Store: // Call in Closure function / Response is global variable + if _, ok := resRef.Addr.(*ssa.Global); ok { + // Referrers for globals are always nil, so skip. + return false + } + if len(*resRef.Addr.Referrers()) == 0 { return true } @@ -144,11 +150,26 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } - case *ssa.Call: // Indirect function call - if f, ok := resRef.Call.Value.(*ssa.Function); ok { + case *ssa.Call, *ssa.Defer: // Indirect function call + // Hacky way to extract CommonCall + var call ssa.CallCommon + switch rr := resRef.(type) { + case *ssa.Call: + call = rr.Call + case *ssa.Defer: + call = rr.Call + } + + if f, ok := call.Value.(*ssa.Function); ok { for _, b := range f.Blocks { - for i := range b.Instrs { - return r.isopen(b, i) + for i, bi := range b.Instrs { + if r.isCloseCall(bi) { + return false + } + + if r.isopen(b, i) { + return true + } } } } @@ -202,6 +223,10 @@ func (r *runner) getResVal(instr ssa.Instruction) (ssa.Value, bool) { if instr.Type().String() == r.resTyp.String() { return instr, true } + case *ssa.Store: + if instr.Val.Type().String() == r.resTyp.String() { + return instr.Val, true + } } return nil, false } diff --git a/vendor/github.com/timonwong/loggercheck/.codecov.yml b/vendor/github.com/timonwong/loggercheck/.codecov.yml new file mode 100644 index 0000000000..ef90457cac --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: 70..90 # green if 90+, red if 70- + status: + patch: + # coverage status for pull request diff + default: + threshold: 1% # allow a little drop + project: + # coverage status for whole project + default: + target: auto # use coverage of base commit as target + threshold: 1% # allow a little drop + +ignore: + - "plugin/**" + - "cmd/**" diff --git a/vendor/github.com/timonwong/loggercheck/.gitignore b/vendor/github.com/timonwong/loggercheck/.gitignore new file mode 100644 index 0000000000..33df0df91c --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.gitignore @@ -0,0 +1,9 @@ +.vscode/ +.idea/ + +bin/ +vendor/ + +dist/ + +cover.out diff --git a/vendor/github.com/timonwong/loggercheck/.golangci.yml b/vendor/github.com/timonwong/loggercheck/.golangci.yml new file mode 100644 index 0000000000..2873278937 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.golangci.yml @@ -0,0 +1,94 @@ +linters-settings: + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - whyNoLint + gocyclo: + min-complexity: 15 + goimports: + local-prefixes: github.com/timonwong/loggercheck + gomnd: + # don't include the "operation" and "assign" + checks: + - argument + - case + - condition + - return + ignored-numbers: + - '0' + - '1' + - '2' + - '3' + ignored-functions: + - strings.SplitN + - strconv.ParseInt + govet: + check-shadowing: true + lll: + line-length: 140 + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped +linters: + disable-all: true + enable: + - bodyclose + - dogsled + - dupl + - errcheck + - exportloopref + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofumpt + - goimports + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - noctx + - nolintlint + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - whitespace + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + +run: + timeout: 5m + go: '1.17' + skip-dirs: + - testdata \ No newline at end of file diff --git a/vendor/github.com/timonwong/loggercheck/.goreleaser.yml b/vendor/github.com/timonwong/loggercheck/.goreleaser.yml new file mode 100644 index 0000000000..55ffe7ea03 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.goreleaser.yml @@ -0,0 +1,59 @@ +--- +project_name: loggercheck + +release: + github: + owner: timonwong + name: loggercheck + +builds: + - binary: loggercheck + goos: + - darwin + - windows + - linux + goarch: + - '386' + - amd64 + - arm + - arm64 + goarm: + - '7' + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: '386' + main: ./cmd/loggercheck/ + flags: + - -trimpath + ldflags: -s -w + +archives: + - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + format_overrides: + - goos: windows + format: zip + files: + - LICENSE + - README.md + +snapshot: + name_template: '{{ incpatch .Version }}-next' + +checksum: + name_template: 'checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - '^build\(deps\): bump .* in /docs \(#\d+\)' + - '^build\(deps\): bump .* in /\.github/peril \(#\d+\)' + - Merge pull request + - Merge branch diff --git a/vendor/github.com/timonwong/loggercheck/LICENSE b/vendor/github.com/timonwong/loggercheck/LICENSE new file mode 100644 index 0000000000..fb65310900 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Timon Wong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/timonwong/loggercheck/Makefile b/vendor/github.com/timonwong/loggercheck/Makefile new file mode 100644 index 0000000000..37bf872024 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/Makefile @@ -0,0 +1,22 @@ +.PHONY: lint +lint: + golangci-lint run ./... + +.PHONY: test-deps +test-deps: + cd testdata/src/a && go mod vendor + +.PHONY: test +test: test-deps + go test -v -covermode=atomic -coverprofile=cover.out -coverpkg ./... ./... + +.PHONY: build +build: + go build -o bin/loggercheck ./cmd/loggercheck + +.PHONY: build-plugin +build-plugin: + CGO_ENABLED=1 go build -o bin/loggercheck.so -buildmode=plugin ./plugin + +.PHONY: build-all +build-all: build build-plugin diff --git a/vendor/github.com/timonwong/loggercheck/README.md b/vendor/github.com/timonwong/loggercheck/README.md new file mode 100644 index 0000000000..14aeca3717 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/README.md @@ -0,0 +1,103 @@ +# loggercheck + +## Description + +A linter checks the odd number of key and value pairs for common logger libraries: +- [kitlog](https://github.com/go-kit/log) +- [klog](https://github.com/kubernetes/klog) +- [logr](https://github.com/go-logr/logr) +- [zap](https://github.com/uber-go/zap) + +## Badges + +![Build Status](https://github.com/timonwong/loggercheck/workflows/CI/badge.svg) +[![Coverage](https://img.shields.io/codecov/c/github/timonwong/loggercheck?token=Nutf41gwoG)](https://app.codecov.io/gh/timonwong/loggercheck) +[![License](https://img.shields.io/github/license/timonwong/loggercheck.svg)](/LICENSE) +[![Release](https://img.shields.io/github/release/timonwong/loggercheck.svg)](https://github.com/timonwong/loggercheck/releases/latest) + +## Install + +```shel +go install github.com/timonwong/loggercheck/cmd/loggercheck +``` + +## Usage + +``` +loggercheck: Checks key value pairs for common logger libraries (kitlog,logr,klog,zap). + +Usage: loggercheck [-flag] [package] + + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -disable value + comma-separated list of disabled logger checker (kitlog,klog,logr,zap) (default kitlog) + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -json + emit JSON output + -memprofile string + write memory profile to this file + -noprintflike + require printf-like format specifier not present in args + -requirestringkey + require all logging keys to be inlined constant strings + -rulefile string + path to a file contains a list of rules + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -test + indicates whether test files should be analyzed, too (default true) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + +## Example + +```go +package a + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" +) + +func Example() { + log := logr.Discard() + log = log.WithValues("key") + log.Info("message", "key1", "value1", "key2", "value2", "key3") + log.Error(fmt.Errorf("error"), "message", "key1", "value1", "key2") + log.Error(fmt.Errorf("error"), "message", "key1", "value1", "key2", "value2") + + var log2 logr.Logger + log2 = log + log2.Info("message", "key1") + + log3 := logr.FromContextOrDiscard(context.TODO()) + log3.Error(fmt.Errorf("error"), "message", "key1") +} +``` + +``` +a.go:12:23: odd number of arguments passed as key-value pairs for logging +a.go:13:22: odd number of arguments passed as key-value pairs for logging +a.go:14:44: odd number of arguments passed as key-value pairs for logging +a.go:19:23: odd number of arguments passed as key-value pairs for logging +a.go:22:45: odd number of arguments passed as key-value pairs for logging +``` \ No newline at end of file diff --git a/vendor/github.com/timonwong/loggercheck/internal/bytebufferpool/pool.go b/vendor/github.com/timonwong/loggercheck/internal/bytebufferpool/pool.go new file mode 100644 index 0000000000..9d88d21c49 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/bytebufferpool/pool.go @@ -0,0 +1,22 @@ +package bytebufferpool + +import ( + "bytes" + "sync" +) + +var pool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func Get() *bytes.Buffer { + buf := pool.Get().(*bytes.Buffer) + buf.Reset() + return buf +} + +func Put(buf *bytes.Buffer) { + pool.Put(buf) +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/checker.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/checker.go new file mode 100644 index 0000000000..5615636efb --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/checker.go @@ -0,0 +1,59 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +type Config struct { + RequireStringKey bool + NoPrintfLike bool +} + +type CallContext struct { + Expr *ast.CallExpr + Func *types.Func + Signature *types.Signature +} + +type Checker interface { + FilterKeyAndValues(pass *analysis.Pass, keyAndValues []ast.Expr) []ast.Expr + CheckLoggingKey(pass *analysis.Pass, keyAndValues []ast.Expr) + CheckPrintfLikeSpecifier(pass *analysis.Pass, args []ast.Expr) +} + +func ExecuteChecker(c Checker, pass *analysis.Pass, call CallContext, cfg Config) { + params := call.Signature.Params() + nparams := params.Len() // variadic => nonzero + startIndex := nparams - 1 + + lastArg := params.At(nparams - 1) + iface, ok := lastArg.Type().(*types.Slice).Elem().(*types.Interface) + if !ok || !iface.Empty() { + return // final (args) param is not ...interface{} + } + + keyValuesArgs := c.FilterKeyAndValues(pass, call.Expr.Args[startIndex:]) + + if len(keyValuesArgs)%2 != 0 { + firstArg := keyValuesArgs[0] + lastArg := keyValuesArgs[len(keyValuesArgs)-1] + pass.Report(analysis.Diagnostic{ + Pos: firstArg.Pos(), + End: lastArg.End(), + Category: DiagnosticCategory, + Message: "odd number of arguments passed as key-value pairs for logging", + }) + } + + if cfg.RequireStringKey { + c.CheckLoggingKey(pass, keyValuesArgs) + } + + if cfg.NoPrintfLike { + // Check all args + c.CheckPrintfLikeSpecifier(pass, call.Expr.Args) + } +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/common.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/common.go new file mode 100644 index 0000000000..42cbd01937 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/common.go @@ -0,0 +1,49 @@ +package checkers + +import ( + "go/ast" + "go/constant" + "go/printer" + "go/token" + "go/types" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + + "github.com/timonwong/loggercheck/internal/bytebufferpool" +) + +const ( + DiagnosticCategory = "logging" +) + +// extractValueFromStringArg returns true if the argument is a string type (literal or constant). +func extractValueFromStringArg(pass *analysis.Pass, arg ast.Expr) (value string, ok bool) { + if typeAndValue, ok := pass.TypesInfo.Types[arg]; ok { + if typ, ok := typeAndValue.Type.(*types.Basic); ok && typ.Kind() == types.String && typeAndValue.Value != nil { + return constant.StringVal(typeAndValue.Value), true + } + } + + return "", false +} + +func renderNodeEllipsis(fset *token.FileSet, v interface{}) string { + const maxLen = 20 + + buf := bytebufferpool.Get() + defer bytebufferpool.Put(buf) + + _ = printer.Fprint(buf, fset, v) + s := buf.String() + if utf8.RuneCountInString(s) > maxLen { + // Copied from go/constant/value.go + i := 0 + for n := 0; n < maxLen-3; n++ { + _, size := utf8.DecodeRuneInString(s[i:]) + i += size + } + s = s[:i] + "..." + } + return s +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/general.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/general.go new file mode 100644 index 0000000000..6512cce30d --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/general.go @@ -0,0 +1,68 @@ +package checkers + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/timonwong/loggercheck/internal/checkers/printf" + "github.com/timonwong/loggercheck/internal/stringutil" +) + +type General struct{} + +func (g General) FilterKeyAndValues(_ *analysis.Pass, keyAndValues []ast.Expr) []ast.Expr { + return keyAndValues +} + +func (g General) CheckLoggingKey(pass *analysis.Pass, keyAndValues []ast.Expr) { + for i := 0; i < len(keyAndValues); i += 2 { + arg := keyAndValues[i] + if value, ok := extractValueFromStringArg(pass, arg); ok { + if stringutil.IsASCII(value) { + continue + } + + pass.Report(analysis.Diagnostic{ + Pos: arg.Pos(), + End: arg.End(), + Category: DiagnosticCategory, + Message: fmt.Sprintf( + "logging keys are expected to be alphanumeric strings, please remove any non-latin characters from %q", + value), + }) + } else { + pass.Report(analysis.Diagnostic{ + Pos: arg.Pos(), + End: arg.End(), + Category: DiagnosticCategory, + Message: fmt.Sprintf( + "logging keys are expected to be inlined constant strings, please replace %q provided with string", + renderNodeEllipsis(pass.Fset, arg)), + }) + } + } +} + +func (g General) CheckPrintfLikeSpecifier(pass *analysis.Pass, args []ast.Expr) { + for _, arg := range args { + format, ok := extractValueFromStringArg(pass, arg) + if !ok { + continue + } + + if specifier, ok := printf.IsPrintfLike(format); ok { + pass.Report(analysis.Diagnostic{ + Pos: arg.Pos(), + End: arg.End(), + Category: DiagnosticCategory, + Message: fmt.Sprintf("logging message should not use format specifier %q", specifier), + }) + + return // One error diagnostic is enough + } + } +} + +var _ Checker = (*General)(nil) diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/printf/printf.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/printf/printf.go new file mode 100644 index 0000000000..b38f46f201 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/printf/printf.go @@ -0,0 +1,252 @@ +package printf + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Copied from golang.org/x/tools +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +type printVerb struct { + verb rune // User may provide verb through Formatter; could be a rune. + flags string // known flags are all ASCII +} + +// Common flag sets for printf verbs. +const ( + noFlag = "" + numFlag = " -+.0" + sharpNumFlag = " -+.0#" + allFlags = " -+.0#" +) + +// printVerbs identifies which flags are known to printf for each verb. +var printVerbs = []printVerb{ + // '-' is a width modifier, always valid. + // '.' is a precision for float, max width for strings. + // '+' is required sign for numbers, Go format for %v. + // '#' is alternate format for several verbs. + // ' ' is spacer for numbers + {'%', noFlag}, + {'b', sharpNumFlag}, + {'c', "-"}, + {'d', numFlag}, + {'e', sharpNumFlag}, + {'E', sharpNumFlag}, + {'f', sharpNumFlag}, + {'F', sharpNumFlag}, + {'g', sharpNumFlag}, + {'G', sharpNumFlag}, + {'o', sharpNumFlag}, + {'O', sharpNumFlag}, + {'p', "-#"}, + {'q', " -+.0#"}, + {'s', " -+.0"}, + {'t', "-"}, + {'T', "-"}, + {'U', "-#"}, + {'v', allFlags}, + {'w', allFlags}, + {'x', sharpNumFlag}, + {'X', sharpNumFlag}, +} + +// formatState holds the parsed representation of a printf directive such as "%3.*[4]d". +// It is constructed by parsePrintfVerb. +type formatState struct { + verb rune // the format verb: 'd' for "%d" + format string // the full format directive from % through verb, "%.3d". + flags []byte // the list of # + etc. + // Used only during parse. + hasIndex bool // Whether the argument is indexed. + indexPending bool // Whether we have an indexed argument that has not resolved. + nbytes int // number of bytes of the format string consumed. +} + +// parseFlags accepts any printf flags. +func (s *formatState) parseFlags() { + for s.nbytes < len(s.format) { + switch c := s.format[s.nbytes]; c { + case '#', '0', '+', '-', ' ': + s.flags = append(s.flags, c) + s.nbytes++ + default: + return + } + } +} + +// scanNum advances through a decimal number if present. +func (s *formatState) scanNum() { + for ; s.nbytes < len(s.format); s.nbytes++ { + c := s.format[s.nbytes] + if c < '0' || '9' < c { + return + } + } +} + +func stringIndexAt(s, substr string, start int) int { + idx := strings.Index(s[start:], substr) + if idx < 0 { + return idx + } + return idx + start +} + +// parseIndex scans an index expression. It returns false if there is a syntax error. +func (s *formatState) parseIndex() bool { + if s.nbytes == len(s.format) || s.format[s.nbytes] != '[' { + return true + } + // Argument index present. + s.nbytes++ // skip '[' + start := s.nbytes + s.scanNum() + ok := true + if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' { + ok = false + s.nbytes = stringIndexAt(s.format, "]", start) + if s.nbytes < 0 { + return false + } + } + arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32) + if err != nil || !ok || arg32 <= 0 { + return false + } + s.nbytes++ // skip ']' + s.hasIndex = true + s.indexPending = true + return true +} + +// parseNum scans a width or precision (or *). +func (s *formatState) parseNum() { + if s.nbytes < len(s.format) && s.format[s.nbytes] == '*' { + if s.indexPending { // Absorb it. + s.indexPending = false + } + s.nbytes++ + } else { + s.scanNum() + } +} + +// parsePrecision scans for a precision. It returns false if there's a bad index expression. +func (s *formatState) parsePrecision() bool { + // If there's a period, there may be a precision. + if s.nbytes < len(s.format) && s.format[s.nbytes] == '.' { + s.flags = append(s.flags, '.') // Treat precision as a flag. + s.nbytes++ + if !s.parseIndex() { + return false + } + s.parseNum() + } + return true +} + +// parsePrintfVerb looks the formatting directive that begins the format string +// and returns a formatState that encodes what the directive wants, without looking +// at the actual arguments present in the call. The result is nil if there is an error. +func parsePrintfVerb(format string) *formatState { + state := &formatState{ + format: format, + flags: make([]byte, 0, 5), //nolint:gomnd + nbytes: 1, // There's guaranteed to be a percent sign. + } + + // There may be flags. + state.parseFlags() + // There may be an index. + if !state.parseIndex() { + return nil + } + // There may be a width. + state.parseNum() + // There may be a precision. + if !state.parsePrecision() { + return nil + } + // Now a verb, possibly prefixed by an index (which we may already have). + if !state.indexPending && !state.parseIndex() { + return nil + } + if state.nbytes == len(state.format) { + // missing verb at end of string + return nil + } + verb, w := utf8.DecodeRuneInString(state.format[state.nbytes:]) + state.verb = verb + state.nbytes += w + state.format = state.format[:state.nbytes] + return state +} + +func containsAll(s string, pattern []byte) bool { + for _, c := range pattern { + if !strings.ContainsRune(s, rune(c)) { + return false + } + } + return true +} + +func isPrintfArg(state *formatState) bool { + var v printVerb + found := false + // Linear scan is fast enough for a small list. + for _, v = range printVerbs { + if v.verb == state.verb { + found = true + break + } + } + + if !found { + // unknown verb, just skip + return false + } + + if !containsAll(v.flags, state.flags) { + // unrecognized format flag, just skip + return false + } + + return true +} + +func IsPrintfLike(format string) (firstSpecifier string, ok bool) { + if !strings.Contains(format, "%") { + return "", false + } + + for i, w := 0, 0; i < len(format); i += w { + w = 1 + if format[i] != '%' { + continue + } + + state := parsePrintfVerb(format[i:]) + if state == nil { + return "", false + } + + w = len(state.format) + if !isPrintfArg(state) { + return "", false + } + + if !ok { + firstSpecifier = state.format + ok = true + } + } + + return +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/zap.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/zap.go new file mode 100644 index 0000000000..2356f83482 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/zap.go @@ -0,0 +1,42 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +type Zap struct { + General +} + +func (z Zap) FilterKeyAndValues(pass *analysis.Pass, keyAndValues []ast.Expr) []ast.Expr { + // Check the argument count + filtered := make([]ast.Expr, 0, len(keyAndValues)) + for _, arg := range keyAndValues { + // Skip any zapcore.Field we found + switch arg := arg.(type) { + case *ast.CallExpr, *ast.Ident: + typ := pass.TypesInfo.TypeOf(arg) + switch typ := typ.(type) { + case *types.Named: + obj := typ.Obj() + // This is a strongly-typed field. Consume it and move on. + // Actually it's go.uber.org/zap/zapcore.Field, however for simplicity + // we don't check the import path + if obj != nil && obj.Name() == "Field" { + continue + } + default: + // pass + } + } + + filtered = append(filtered, arg) + } + + return filtered +} + +var _ Checker = (*Zap)(nil) diff --git a/vendor/github.com/timonwong/loggercheck/internal/rules/rules.go b/vendor/github.com/timonwong/loggercheck/internal/rules/rules.go new file mode 100644 index 0000000000..27d6ebb274 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/rules/rules.go @@ -0,0 +1,201 @@ +package rules + +import ( + "bufio" + "errors" + "fmt" + "go/types" + "io" + "strings" + + "github.com/timonwong/loggercheck/internal/bytebufferpool" +) + +var ErrInvalidRule = errors.New("invalid rule format") + +const CustomRulesetName = "custom" + +type Ruleset struct { + Name string + PackageImport string + Rules []FuncRule + + ruleIndicesByFuncName map[string][]int +} + +func (rs *Ruleset) Match(fn *types.Func) bool { + // PackageImport is already checked (by indices), skip checking it here + sig := fn.Type().(*types.Signature) // it's safe since we already checked + + // Fail fast if the function name is not in the rule list. + indices, ok := rs.ruleIndicesByFuncName[fn.Name()] + if !ok { + return false + } + + for _, idx := range indices { + rule := &rs.Rules[idx] + if matchRule(rule, sig) { + return true + } + } + + return false +} + +func receiverTypeOf(recvType types.Type) string { + buf := bytebufferpool.Get() + defer bytebufferpool.Put(buf) + + var recvNamed *types.Named + switch recvType := recvType.(type) { + case *types.Pointer: + buf.WriteByte('*') + if elem, ok := recvType.Elem().(*types.Named); ok { + recvNamed = elem + } + case *types.Named: + recvNamed = recvType + } + + if recvNamed == nil { + // not supported type + return "" + } + + buf.WriteString(recvNamed.Obj().Name()) + typeParams := recvNamed.TypeParams() + if typeParamsLen := typeParams.Len(); typeParamsLen > 0 { + buf.WriteByte('[') + for i := 0; i < typeParamsLen; i++ { + if i > 0 { + // comma as separator + buf.WriteByte(',') + } + p := typeParams.At(i) + buf.WriteString(p.Obj().Name()) + } + buf.WriteByte(']') + } + + return buf.String() +} + +func matchRule(p *FuncRule, sig *types.Signature) bool { + // we do not check package import here since it's already checked in Match() + recv := sig.Recv() + isReceiver := recv != nil + if isReceiver != p.IsReceiver { + return false + } + + if isReceiver { + recvType := recv.Type() + receiverType := receiverTypeOf(recvType) + if receiverType != p.ReceiverType { + return false + } + } + + return true +} + +type FuncRule struct { // package import should be accessed from Rulset + ReceiverType string + FuncName string + IsReceiver bool +} + +func ParseFuncRule(rule string) (packageImport string, pat FuncRule, err error) { + lastDot := strings.LastIndexFunc(rule, func(r rune) bool { + return r == '.' || r == '/' + }) + if lastDot == -1 || rule[lastDot] == '/' { + return "", pat, ErrInvalidRule + } + + importOrReceiver := rule[:lastDot] + pat.FuncName = rule[lastDot+1:] + + if strings.HasPrefix(rule, "(") { // package + if !strings.HasSuffix(importOrReceiver, ")") { + return "", FuncRule{}, ErrInvalidRule + } + + var isPointerReceiver bool + pat.IsReceiver = true + receiver := importOrReceiver[1 : len(importOrReceiver)-1] + if strings.HasPrefix(receiver, "*") { + isPointerReceiver = true + receiver = receiver[1:] + } + + typeDotIdx := strings.LastIndexFunc(receiver, func(r rune) bool { + return r == '.' || r == '/' + }) + if typeDotIdx == -1 || receiver[typeDotIdx] == '/' { + return "", FuncRule{}, ErrInvalidRule + } + receiverType := receiver[typeDotIdx+1:] + if isPointerReceiver { + receiverType = "*" + receiverType + } + pat.ReceiverType = receiverType + packageImport = receiver[:typeDotIdx] + } else { + packageImport = importOrReceiver + } + + return packageImport, pat, nil +} + +func ParseRules(lines []string) (result []Ruleset, err error) { + rulesByImport := make(map[string][]FuncRule) + for i, line := range lines { + if line == "" { + continue + } + + if strings.HasPrefix(line, "#") { // comments + continue + } + + packageImport, pat, err := ParseFuncRule(line) + if err != nil { + return nil, fmt.Errorf("error parse rule at line %d: %w", i+1, err) + } + rulesByImport[packageImport] = append(rulesByImport[packageImport], pat) + } + + for packageImport, rules := range rulesByImport { + ruleIndicesByFuncName := make(map[string][]int, len(rules)) + for idx, rule := range rules { + fnName := rule.FuncName + ruleIndicesByFuncName[fnName] = append(ruleIndicesByFuncName[fnName], idx) + } + + result = append(result, Ruleset{ + Name: CustomRulesetName, // NOTE(timonwong) Always "custom" for custom rule + PackageImport: packageImport, + Rules: rules, + ruleIndicesByFuncName: ruleIndicesByFuncName, + }) + } + return result, nil +} + +func ParseRuleFile(r io.Reader) (result []Ruleset, err error) { + // Rule files are relatively small, so read it into string slice first. + var lines []string + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + lines = append(lines, line) + } + if err := scanner.Err(); err != nil { + return nil, err + } + + return ParseRules(lines) +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/sets/string.go b/vendor/github.com/timonwong/loggercheck/internal/sets/string.go new file mode 100644 index 0000000000..daf8d57fce --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/sets/string.go @@ -0,0 +1,59 @@ +package sets + +import ( + "sort" + "strings" +) + +type Empty struct{} + +type StringSet map[string]Empty + +func NewString(items ...string) StringSet { + s := make(StringSet) + s.Insert(items...) + return s +} + +func (s StringSet) Insert(items ...string) { + for _, item := range items { + s[item] = Empty{} + } +} + +func (s StringSet) Has(item string) bool { + _, contained := s[item] + return contained +} + +func (s StringSet) List() []string { + if len(s) == 0 { + return nil + } + + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Strings(res) + return res +} + +// Set implements flag.Value interface. +func (s *StringSet) Set(v string) error { + v = strings.TrimSpace(v) + if v == "" { + *s = nil + return nil + } + + parts := strings.Split(v, ",") + set := NewString(parts...) + *s = set + return nil +} + +// String implements flag.Value interface +func (s StringSet) String() string { + return strings.Join(s.List(), ",") +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/stringutil/is.go b/vendor/github.com/timonwong/loggercheck/internal/stringutil/is.go new file mode 100644 index 0000000000..a36b742fc4 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/stringutil/is.go @@ -0,0 +1,15 @@ +package stringutil + +import "unicode/utf8" + +// IsASCII returns true if string are ASCII. +func IsASCII(s string) bool { + for _, r := range s { + if r >= utf8.RuneSelf { + // Not ASCII. + return false + } + } + + return true +} diff --git a/vendor/github.com/timonwong/loggercheck/loggercheck.go b/vendor/github.com/timonwong/loggercheck/loggercheck.go new file mode 100644 index 0000000000..8bd10aee80 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/loggercheck.go @@ -0,0 +1,196 @@ +package loggercheck + +import ( + "flag" + "fmt" + "go/ast" + "go/types" + "os" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + + "github.com/timonwong/loggercheck/internal/checkers" + "github.com/timonwong/loggercheck/internal/rules" + "github.com/timonwong/loggercheck/internal/sets" +) + +const Doc = `Checks key value pairs for common logger libraries (kitlog,klog,logr,zap).` + +func NewAnalyzer(opts ...Option) *analysis.Analyzer { + l := newLoggerCheck(opts...) + a := &analysis.Analyzer{ + Name: "loggercheck", + Doc: Doc, + Flags: *l.fs, + Run: l.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + return a +} + +type loggercheck struct { + fs *flag.FlagSet + + disable sets.StringSet // flag -disable + ruleFile string // flag -rulefile + requireStringKey bool // flag -requirestringkey + noPrintfLike bool // flag -noprintflike + + rules []string // used for external integration, for example golangci-lint + rulesetList []rules.Ruleset // populate at runtime + rulesetIndicesByImport map[string][]int // ruleset index, populate at runtime +} + +func newLoggerCheck(opts ...Option) *loggercheck { + fs := flag.NewFlagSet("loggercheck", flag.ExitOnError) + l := &loggercheck{ + fs: fs, + disable: sets.NewString("kitlog"), + rulesetList: append([]rules.Ruleset{}, staticRuleList...), // ensure we make a clone of static rules first + } + + fs.StringVar(&l.ruleFile, "rulefile", "", "path to a file contains a list of rules") + fs.Var(&l.disable, "disable", "comma-separated list of disabled logger checker (kitlog,klog,logr,zap)") + fs.BoolVar(&l.requireStringKey, "requirestringkey", false, "require all logging keys to be inlined constant strings") + fs.BoolVar(&l.noPrintfLike, "noprintflike", false, "require printf-like format specifier not present in args") + + for _, opt := range opts { + opt(l) + } + + return l +} + +func (l *loggercheck) isCheckerDisabled(name string) bool { + return l.disable.Has(name) +} + +// vendorLessPath returns the devendorized version of the import path ipath. +// For example: "a/vendor/github.com/go-logr/logr" will become "github.com/go-logr/logr". +func vendorLessPath(ipath string) string { + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + return ipath +} + +func (l *loggercheck) getCheckerForFunc(fn *types.Func) checkers.Checker { + pkg := fn.Pkg() + if pkg == nil { + return nil + } + + pkgPath := vendorLessPath(pkg.Path()) + indices := l.rulesetIndicesByImport[pkgPath] + + for _, idx := range indices { + rs := &l.rulesetList[idx] + if l.isCheckerDisabled(rs.Name) { + // Skip ignored logger checker. + continue + } + + if !rs.Match(fn) { + continue + } + + checker := checkerByRulesetName[rs.Name] + if checker == nil { + return checkers.General{} + } + return checker + } + + return nil +} + +func (l *loggercheck) checkLoggerArguments(pass *analysis.Pass, call *ast.CallExpr) { + fn, _ := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if fn == nil { + return // function pointer is not supported + } + + sig, ok := fn.Type().(*types.Signature) + if !ok || !sig.Variadic() { + return // not variadic + } + + // ellipsis args is hard, just skip + if call.Ellipsis.IsValid() { + return + } + + checker := l.getCheckerForFunc(fn) + if checker == nil { + return + } + + checkers.ExecuteChecker(checker, pass, checkers.CallContext{ + Expr: call, + Func: fn, + Signature: sig, + }, checkers.Config{ + RequireStringKey: l.requireStringKey, + NoPrintfLike: l.noPrintfLike, + }) +} + +func (l *loggercheck) processConfig() error { + if l.ruleFile != "" { // flags takes precedence over configs + f, err := os.Open(l.ruleFile) + if err != nil { + return fmt.Errorf("failed to open rule file: %w", err) + } + defer f.Close() + + custom, err := rules.ParseRuleFile(f) + if err != nil { + return fmt.Errorf("failed to parse rule file: %w", err) + } + l.rulesetList = append(l.rulesetList, custom...) + } else if len(l.rules) > 0 { + custom, err := rules.ParseRules(l.rules) + if err != nil { + return fmt.Errorf("failed to parse rules: %w", err) + } + l.rulesetList = append(l.rulesetList, custom...) + } + + // Build index + indices := make(map[string][]int) + for i, rs := range l.rulesetList { + indices[rs.PackageImport] = append(indices[rs.PackageImport], i) + } + l.rulesetIndicesByImport = indices + + return nil +} + +func (l *loggercheck) run(pass *analysis.Pass) (interface{}, error) { + err := l.processConfig() + if err != nil { + return nil, err + } + + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + + typ := pass.TypesInfo.Types[call.Fun].Type + if typ == nil { + // Skip checking functions with unknown type. + return + } + + l.checkLoggerArguments(pass, call) + }) + + return nil, nil +} diff --git a/vendor/github.com/timonwong/loggercheck/options.go b/vendor/github.com/timonwong/loggercheck/options.go new file mode 100644 index 0000000000..6b5f00af1e --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/options.go @@ -0,0 +1,31 @@ +package loggercheck + +import ( + "github.com/timonwong/loggercheck/internal/sets" +) + +type Option func(*loggercheck) + +func WithDisable(disable []string) Option { + return func(l *loggercheck) { + l.disable = sets.NewString(disable...) + } +} + +func WithRules(customRules []string) Option { + return func(l *loggercheck) { + l.rules = customRules + } +} + +func WithRequireStringKey(requireStringKey bool) Option { + return func(l *loggercheck) { + l.requireStringKey = requireStringKey + } +} + +func WithNoPrintfLike(noPrintfLike bool) Option { + return func(l *loggercheck) { + l.noPrintfLike = noPrintfLike + } +} diff --git a/vendor/github.com/timonwong/loggercheck/staticrules.go b/vendor/github.com/timonwong/loggercheck/staticrules.go new file mode 100644 index 0000000000..f955b34341 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/staticrules.go @@ -0,0 +1,68 @@ +package loggercheck + +import ( + "errors" + "fmt" + + "github.com/timonwong/loggercheck/internal/checkers" + "github.com/timonwong/loggercheck/internal/rules" +) + +var ( + staticRuleList = []rules.Ruleset{ + mustNewStaticRuleSet("logr", []string{ + "(github.com/go-logr/logr.Logger).Error", + "(github.com/go-logr/logr.Logger).Info", + "(github.com/go-logr/logr.Logger).WithValues", + }), + mustNewStaticRuleSet("klog", []string{ + "k8s.io/klog/v2.InfoS", + "k8s.io/klog/v2.InfoSDepth", + "k8s.io/klog/v2.ErrorS", + "(k8s.io/klog/v2.Verbose).InfoS", + "(k8s.io/klog/v2.Verbose).InfoSDepth", + "(k8s.io/klog/v2.Verbose).ErrorS", + }), + mustNewStaticRuleSet("zap", []string{ + "(*go.uber.org/zap.SugaredLogger).With", + "(*go.uber.org/zap.SugaredLogger).Debugw", + "(*go.uber.org/zap.SugaredLogger).Infow", + "(*go.uber.org/zap.SugaredLogger).Warnw", + "(*go.uber.org/zap.SugaredLogger).Errorw", + "(*go.uber.org/zap.SugaredLogger).DPanicw", + "(*go.uber.org/zap.SugaredLogger).Panicw", + "(*go.uber.org/zap.SugaredLogger).Fatalw", + }), + mustNewStaticRuleSet("kitlog", []string{ + "github.com/go-kit/log.With", + "github.com/go-kit/log.WithPrefix", + "github.com/go-kit/log.WithSuffix", + "(github.com/go-kit/log.Logger).Log", + }), + } + checkerByRulesetName = map[string]checkers.Checker{ + // by default, checkers.General will be used. + "zap": checkers.Zap{}, + } +) + +// mustNewStaticRuleSet only called at init, catch errors during development. +// In production it will not panic. +func mustNewStaticRuleSet(name string, lines []string) rules.Ruleset { + if len(lines) == 0 { + panic(errors.New("no rules provided")) + } + + rulesetList, err := rules.ParseRules(lines) + if err != nil { + panic(err) + } + + if len(rulesetList) != 1 { + panic(fmt.Errorf("expected 1 ruleset, got %d", len(rulesetList))) + } + + ruleset := rulesetList[0] + ruleset.Name = name + return ruleset +} diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go index 3b445e2950..79e7bba863 100644 --- a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go +++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -1,29 +1,28 @@ package wrapcheck import ( + "fmt" "go/ast" "go/token" "go/types" - "log" - "os" + "regexp" "strings" "github.com/gobwas/glob" "golang.org/x/tools/go/analysis" ) -var ( - DefaultIgnoreSigs = []string{ - ".Errorf(", - "errors.New(", - "errors.Unwrap(", - ".Wrap(", - ".Wrapf(", - ".WithMessage(", - ".WithMessagef(", - ".WithStack(", - } -) +var DefaultIgnoreSigs = []string{ + ".Errorf(", + "errors.New(", + "errors.Unwrap(", + "errors.Join(", + ".Wrap(", + ".Wrapf(", + ".WithMessage(", + ".WithMessagef(", + ".WithStack(", +} // WrapcheckConfig is the set of configuration values which configure the // behaviour of the linter. @@ -33,7 +32,7 @@ type WrapcheckConfig struct { // allows you to specify functions that wrapcheck will not report as // unwrapped. // - // For example, an ingoredSig of `[]string{"errors.New("}` will ignore errors + // For example, an ignoreSig of `[]string{"errors.New("}` will ignore errors // returned from the stdlib package error's function: // // `func errors.New(message string) error` @@ -45,6 +44,20 @@ type WrapcheckConfig struct { // list to your config. IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` + // IgnoreSigRegexps defines a list of regular expressions which if matched + // to the signature of the function call returning the error, will be ignored. This + // allows you to specify functions that wrapcheck will not report as + // unwrapped. + // + // For example, an ignoreSigRegexp of `[]string{"\.New.*Err\("}`` will ignore errors + // returned from any signature whose method name starts with "New" and ends with "Err" + // due to the signature matching the regular expression `\.New.*Err\(`. + // + // Note that this is similar to the ignoreSigs configuration, but provides + // slightly more flexibility in defining rules by which signatures will be + // ignored. + IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps" yaml:"ignoreSigRegexps"` + // IgnorePackageGlobs defines a list of globs which, if matching the package // of the function returning the error, will ignore the error when doing // wrapcheck analysis. @@ -57,12 +70,23 @@ type WrapcheckConfig struct { // ignorePackageGlobs: // - encoding/* IgnorePackageGlobs []string `mapstructure:"ignorePackageGlobs" yaml:"ignorePackageGlobs"` + + // IgnoreInterfaceRegexps defines a list of regular expressions which, if matched + // to a underlying interface name, will ignore unwrapped errors returned from a + // function whose call is defined on the given interface. + // + // For example, an ignoreInterfaceRegexps of `[]string{"Transac(tor|tion)"}` will ignore errors + // returned from any function whose call is defined on a interface named 'Transactor' + // or 'Transaction' due to the name matching the regular expression `Transac(tor|tion)`. + IgnoreInterfaceRegexps []string `mapstructure:"ignoreInterfaceRegexps" yaml:"ignoreInterfaceRegexps"` } func NewDefaultConfig() WrapcheckConfig { return WrapcheckConfig{ - IgnoreSigs: DefaultIgnoreSigs, - IgnorePackageGlobs: []string{}, + IgnoreSigs: DefaultIgnoreSigs, + IgnoreSigRegexps: []string{}, + IgnorePackageGlobs: []string{}, + IgnoreInterfaceRegexps: []string{}, } } @@ -75,11 +99,40 @@ func NewAnalyzer(cfg WrapcheckConfig) *analysis.Analyzer { } func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { + // Precompile the regexps, report the error + var ( + ignoreSigRegexp []*regexp.Regexp + ignoreInterfaceRegexps []*regexp.Regexp + ignorePackageGlobs []glob.Glob + err error + ) + + ignoreSigRegexp, err = compileRegexps(cfg.IgnoreSigRegexps) + if err == nil { + ignoreInterfaceRegexps, err = compileRegexps(cfg.IgnoreInterfaceRegexps) + } + if err == nil { + ignorePackageGlobs, err = compileGlobs(cfg.IgnorePackageGlobs) + } + return func(pass *analysis.Pass) (interface{}, error) { + if err != nil { + return nil, err + } + for _, file := range pass.Files { + // Keep track of parents so that can can traverse upwards to check for + // FuncDecls and FuncLits. + var parents []ast.Node + ast.Inspect(file, func(n ast.Node) bool { - if _, ok := n.(*ast.AssignStmt); ok { - return true + if n == nil { + // Pop, since we're done with this node and its children. + parents = parents[:len(parents)-1] + } else { + // Push this node on the stack, since its children will be visited + // next. + parents = append(parents, n) } ret, ok := n.(*ast.ReturnStmt) @@ -97,11 +150,23 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { // to handle it by checking the return params of the function. retFn, ok := expr.(*ast.CallExpr) if ok { + // If you go up, and the parent is a FuncLit, then don't report an + // error as you are in an anonymous function. If you are inside a + // FuncDecl, then continue as normal. + for i := len(parents) - 1; i > 0; i-- { + if _, ok := parents[i].(*ast.FuncLit); ok { + return true + } else if _, ok := parents[i].(*ast.FuncDecl); ok { + break + } + } + // If the return type of the function is a single error. This will not // match an error within multiple return values, for that, the below // tuple check is required. + if isError(pass.TypesInfo.TypeOf(expr)) { - reportUnwrapped(pass, retFn, retFn.Pos(), cfg) + reportUnwrapped(pass, retFn, retFn.Pos(), cfg, ignoreSigRegexp, ignoreInterfaceRegexps, ignorePackageGlobs) return true } @@ -119,7 +184,7 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { return true } if isError(v.Type()) { - reportUnwrapped(pass, retFn, expr.Pos(), cfg) + reportUnwrapped(pass, retFn, expr.Pos(), cfg, ignoreSigRegexp, ignoreInterfaceRegexps, ignorePackageGlobs) return true } } @@ -134,9 +199,7 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { return true } - var ( - call *ast.CallExpr - ) + var call *ast.CallExpr // Attempt to find the most recent short assign if shortAss := prevErrAssign(pass, file, ident); shortAss != nil { @@ -183,7 +246,7 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { return true } - reportUnwrapped(pass, call, ident.NamePos, cfg) + reportUnwrapped(pass, call, ident.NamePos, cfg, ignoreSigRegexp, ignoreInterfaceRegexps, ignorePackageGlobs) } return true @@ -196,7 +259,16 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { // Report unwrapped takes a call expression and an identifier and reports // if the call is unwrapped. -func reportUnwrapped(pass *analysis.Pass, call *ast.CallExpr, tokenPos token.Pos, cfg WrapcheckConfig) { +func reportUnwrapped( + pass *analysis.Pass, + call *ast.CallExpr, + tokenPos token.Pos, + cfg WrapcheckConfig, + regexpsSig []*regexp.Regexp, + regexpsInter []*regexp.Regexp, + pkgGlobs []glob.Glob, +) { + sel, ok := call.Fun.(*ast.SelectorExpr) if !ok { return @@ -206,19 +278,26 @@ func reportUnwrapped(pass *analysis.Pass, call *ast.CallExpr, tokenPos token.Pos fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() if contains(cfg.IgnoreSigs, fnSig) { return + } else if containsMatch(regexpsSig, fnSig) { + return } // Check if the underlying type of the "x" in x.y.z is an interface, as - // errors returned from interface types should be wrapped. + // errors returned from interface types should be wrapped, unless ignored + // as per `ignoreInterfaceRegexps` if isInterface(pass, sel) { - pass.Reportf(tokenPos, "error returned from interface method should be wrapped: sig: %s", fnSig) - return + pkgPath := pass.TypesInfo.ObjectOf(sel.Sel).Pkg().Path() + name := types.TypeString(pass.TypesInfo.TypeOf(sel.X), func(p *types.Package) string { return p.Name() }) + if !containsMatch(regexpsInter, name) && !containsMatchGlob(pkgGlobs, pkgPath) { + pass.Reportf(tokenPos, "error returned from interface method should be wrapped: sig: %s", fnSig) + return + } } // Check whether the function being called comes from another package, // as functions called across package boundaries which returns errors // should be wrapped - if isFromOtherPkg(pass, sel, cfg) { + if isFromOtherPkg(pass, sel, pkgGlobs) { pass.Reportf(tokenPos, "error returned from external package is unwrapped: sig: %s", fnSig) return } @@ -231,20 +310,14 @@ func isInterface(pass *analysis.Pass, sel *ast.SelectorExpr) bool { return ok } -func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, config WrapcheckConfig) bool { +// isFromotherPkg returns whether the function is defined in the package +// currently under analysis or is considered external. It will ignore packages +// defined in config.IgnorePackageGlobs. +func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, pkgGlobs []glob.Glob) bool { // The package of the function that we are calling which returns the error fn := pass.TypesInfo.ObjectOf(sel.Sel) - - for _, globString := range config.IgnorePackageGlobs { - g, err := glob.Compile(globString) - if err != nil { - log.Printf("unable to parse glob: %s\n", globString) - os.Exit(1) - } - - if g.Match(fn.Pkg().Path()) { - return false - } + if containsMatchGlob(pkgGlobs, fn.Pkg().Path()) { + return false } // If it's not a package name, then we should check the selector to make sure @@ -264,7 +337,7 @@ func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, config Wrapcheck // `=`. This does not include `var` statements. This function will return nil if // the only declaration is a `var` (aka ValueSpec) declaration. func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) *ast.AssignStmt { - // A slice containing all the assignments which contain an identifer + // A slice containing all the assignments which contain an identifier // referring to the source declaration of the error. This is to catch // cases where err is defined once, and then reassigned multiple times // within the same block. In these cases, we should check the method of @@ -278,6 +351,7 @@ func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) if !isError(pass.TypesInfo.TypeOf(expr)) { continue } + if assIdent, ok := expr.(*ast.Ident); ok { if assIdent.Obj == nil || returnIdent.Obj == nil { // If we can't find the Obj for one of the identifiers, just skip @@ -301,6 +375,7 @@ func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) if ass.Pos() > returnIdent.Pos() { break } + mostRecentAssign = ass } @@ -317,6 +392,26 @@ func contains(slice []string, el string) bool { return false } +func containsMatch(regexps []*regexp.Regexp, el string) bool { + for _, re := range regexps { + if re.MatchString(el) { + return true + } + } + + return false +} + +func containsMatchGlob(globs []glob.Glob, el string) bool { + for _, g := range globs { + if g.Match(el) { + return true + } + } + + return false +} + // isError returns whether or not the provided type interface is an error func isError(typ types.Type) bool { if typ == nil { @@ -335,3 +430,34 @@ func isUnresolved(file *ast.File, ident *ast.Ident) bool { return false } + +// compileRegexps compiles a set of regular expressions returning them for use, +// or the first encountered error due to an invalid expression. +func compileRegexps(regexps []string) ([]*regexp.Regexp, error) { + compiledRegexps := make([]*regexp.Regexp, len(regexps)) + for idx, reg := range regexps { + re, err := regexp.Compile(reg) + if err != nil { + return nil, fmt.Errorf("unable to compile regexp %s: %v\n", reg, err) + } + + compiledRegexps[idx] = re + } + + return compiledRegexps, nil +} + +// compileGlobs compiles a set of globs, returning them for use, +// or the first encountered error due to an invalid expression. +func compileGlobs(globs []string) ([]glob.Glob, error) { + compiledGlobs := make([]glob.Glob, len(globs)) + for idx, globString := range globs { + glob, err := glob.Compile(globString) + if err != nil { + return nil, fmt.Errorf("unable to compile globs %s: %v\n", glob, err) + } + + compiledGlobs[idx] = glob + } + return compiledGlobs, nil +} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore deleted file mode 100644 index e3c2fc2f1d..0000000000 --- a/vendor/github.com/ulikunitz/xz/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# .gitignore - -TODO.html -README.html - -lzma/writer.txt -lzma/reader.txt - -cmd/gxz/gxz -cmd/xb/xb - -# test executables -*.test - -# profile files -*.out - -# vim swap file -.*.swp - -# executables on windows -*.exe - -# default compression test file -enwik8* diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE deleted file mode 100644 index 009b848706..0000000000 --- a/vendor/github.com/ulikunitz/xz/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2014-2021 Ulrich Kunitz -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* My name, Ulrich Kunitz, may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md deleted file mode 100644 index 0a2dc8284f..0000000000 --- a/vendor/github.com/ulikunitz/xz/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Package xz - -This Go language package supports the reading and writing of xz -compressed streams. It includes also a gxz command for compressing and -decompressing data. The package is completely written in Go and doesn't -have any dependency on any C code. - -The package is currently under development. There might be bugs and APIs -are not considered stable. At this time the package cannot compete with -the xz tool regarding compression speed and size. The algorithms there -have been developed over a long time and are highly optimized. However -there are a number of improvements planned and I'm very optimistic about -parallel compression and decompression. Stay tuned! - -## Using the API - -The following example program shows how to use the API. - -```go -package main - -import ( - "bytes" - "io" - "log" - "os" - - "github.com/ulikunitz/xz" -) - -func main() { - const text = "The quick brown fox jumps over the lazy dog.\n" - var buf bytes.Buffer - // compress text - w, err := xz.NewWriter(&buf) - if err != nil { - log.Fatalf("xz.NewWriter error %s", err) - } - if _, err := io.WriteString(w, text); err != nil { - log.Fatalf("WriteString error %s", err) - } - if err := w.Close(); err != nil { - log.Fatalf("w.Close error %s", err) - } - // decompress buffer and write output to stdout - r, err := xz.NewReader(&buf) - if err != nil { - log.Fatalf("NewReader error %s", err) - } - if _, err = io.Copy(os.Stdout, r); err != nil { - log.Fatalf("io.Copy error %s", err) - } -} -``` - -## Using the gxz compression tool - -The package includes a gxz command line utility for compression and -decompression. - -Use following command for installation: - - $ go get github.com/ulikunitz/xz/cmd/gxz - -To test it call the following command. - - $ gxz bigfile - -After some time a much smaller file bigfile.xz will replace bigfile. -To decompress it use the following command. - - $ gxz -d bigfile.xz - diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md deleted file mode 100644 index 5f7ec01b3b..0000000000 --- a/vendor/github.com/ulikunitz/xz/SECURITY.md +++ /dev/null @@ -1,10 +0,0 @@ -# Security Policy - -## Supported Versions - -Currently the last minor version v0.5.x is supported. - -## Reporting a Vulnerability - -Report a vulnerability by creating a Github issue at -. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md deleted file mode 100644 index 594e0c7fed..0000000000 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ /dev/null @@ -1,363 +0,0 @@ -# TODO list - -## Release v0.5.x - -1. Support check flag in gxz command. - -## Release v0.6 - -1. Review encoder and check for lzma improvements under xz. -2. Fix binary tree matcher. -3. Compare compression ratio with xz tool using comparable parameters and optimize parameters -4. rename operation action and make it a simple type of size 8 -5. make maxMatches, wordSize parameters -6. stop searching after a certain length is found (parameter sweetLen) - -## Release v0.7 - -1. Optimize code -2. Do statistical analysis to get linear presets. -3. Test sync.Pool compatability for xz and lzma Writer and Reader -4. Fuzz optimized code. - -## Release v0.8 - -1. Support parallel go routines for writing and reading xz files. -2. Support a ReaderAt interface for xz files with small block sizes. -3. Improve compatibility between gxz and xz -4. Provide manual page for gxz - -## Release v0.9 - -1. Improve documentation -2. Fuzz again - -## Release v1.0 - -1. Full functioning gxz -2. Add godoc URL to README.md (godoc.org) -3. Resolve all issues. -4. Define release candidates. -5. Public announcement. - -## Package lzma - -### v0.6 - -* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including - * simple scan at the dictionary head for the same byte - * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) - -## Optimizations - -* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. - -* Check whether batching encoding and decoding improves speed. - -### DAG optimizations - -* Use full buffer to create minimal bit-length above range encoder. -* Might be too slow (see v0.4) - -### Different match finders - -* hashes with 2, 3 characters additional to 4 characters -* binary trees with 2-7 characters (uint64 as key, use uint32 as - - pointers into a an array) - -* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers - - into an array with bit-steeling for the colors) - -## Release Procedure - -* execute goch -l for all packages; probably with lower param like 0.5. -* check orthography with gospell -* Write release notes in doc/relnotes. -* Update README.md -* xb copyright . in xz directory to ensure all new files have Copyright header -* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files -* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. -* Update TODO.md - write short log entry -* `git checkout master && git merge dev` -* `git tag -a ` -* `git push` - -## Log - -### 2021-02-02 - -Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The -function allocated a slice of records immediately after reading the value -without further checks. Since the number has been too large the make function -did panic. The fix is to check the number against the expected number of records -before allocating the records. - -### 2020-12-17 - -Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. - -One fix is interesting. - -```go -const ( - a byte = 0x1 - b = 0x2 -) -``` - -The constants a and b don't have the same type. Correct is - -```go -const ( - a byte = 0x1 - b byte = 0x2 -) -``` - -### 2020-08-19 - -Release v0.5.8 fixes issue -[issue #35](https://github.com/ulikunitz/xz/issues/35). - -### 2020-02-24 - -Release v0.5.7 supports the check-ID None and fixes -[issue #27](https://github.com/ulikunitz/xz/issues/27). - -### 2019-02-20 - -Release v0.5.6 supports the go.mod file. - -### 2018-10-28 - -Release v0.5.5 fixes issues #19 observing ErrLimit outputs. - -### 2017-06-05 - -Release v0.5.4 fixes issues #15 of another problem with the padding size -check for the xz block header. I removed the check completely. - -### 2017-02-15 - -Release v0.5.3 fixes issue #12 regarding the decompression of an empty -XZ stream. Many thanks to Tomasz Kłak, who reported the issue. - -### 2016-12-02 - -Release v0.5.2 became necessary to allow the decoding of xz files with -4-byte padding in the block header. Many thanks to Greg, who reported -the issue. - -### 2016-07-23 - -Release v0.5.1 became necessary to fix problems with 32-bit platforms. -Many thanks to Bruno Brigas, who reported the issue. - -### 2016-07-04 - -Release v0.5 provides improvements to the compressor and provides support for -the decompression of xz files with multiple xz streams. - -### 2016-01-31 - -Another compression rate increase by checking the byte at length of the -best match first, before checking the whole prefix. This makes the -compressor even faster. We have now a large time budget to beat the -compression ratio of the xz tool. For enwik8 we have now over 40 seconds -to reduce the compressed file size for another 7 MiB. - -### 2016-01-30 - -I simplified the encoder. Speed and compression rate increased -dramatically. A high compression rate affects also the decompression -speed. The approach with the buffer and optimizing for operation -compression rate has not been successful. Going for the maximum length -appears to be the best approach. - -### 2016-01-28 - -The release v0.4 is ready. It provides a working xz implementation, -which is rather slow, but works and is interoperable with the xz tool. -It is an important milestone. - -### 2016-01-10 - -I have the first working implementation of an xz reader and writer. I'm -happy about reaching this milestone. - -### 2015-12-02 - -I'm now ready to implement xz because, I have a working LZMA2 -implementation. I decided today that v0.4 will use the slow encoder -using the operations buffer to be able to go back, if I intend to do so. - -### 2015-10-21 - -I have restarted the work on the library. While trying to implement -LZMA2, I discovered that I need to resimplify the encoder and decoder -functions. The option approach is too complicated. Using a limited byte -writer and not caring for written bytes at all and not to try to handle -uncompressed data simplifies the LZMA encoder and decoder much. -Processing uncompressed data and handling limits is a feature of the -LZMA2 format not of LZMA. - -I learned an interesting method from the LZO format. If the last copy is -too far away they are moving the head one 2 bytes and not 1 byte to -reduce processing times. - -### 2015-08-26 - -I have now reimplemented the lzma package. The code is reasonably fast, -but can still be optimized. The next step is to implement LZMA2 and then -xz. - -### 2015-07-05 - -Created release v0.3. The version is the foundation for a full xz -implementation that is the target of v0.4. - -### 2015-06-11 - -The gflag package has been developed because I couldn't use flag and -pflag for a fully compatible support of gzip's and lzma's options. It -seems to work now quite nicely. - -### 2015-06-05 - -The overflow issue was interesting to research, however Henry S. Warren -Jr. Hacker's Delight book was very helpful as usual and had the issue -explained perfectly. Fefe's information on his website was based on the -C FAQ and quite bad, because it didn't address the issue of -MININT == -MININT. - -### 2015-06-04 - -It has been a productive day. I improved the interface of lzma. Reader -and lzma. Writer and fixed the error handling. - -### 2015-06-01 - -By computing the bit length of the LZMA operations I was able to -improve the greedy algorithm implementation. By using an 8 MByte buffer -the compression rate was not as good as for xz but already better then -gzip default. - -Compression is currently slow, but this is something we will be able to -improve over time. - -### 2015-05-26 - -Checked the license of ogier/pflag. The binary lzmago binary should -include the license terms for the pflag library. - -I added the endorsement clause as used by Google for the Go sources the -LICENSE file. - -### 2015-05-22 - -The package lzb contains now the basic implementation for creating or -reading LZMA byte streams. It allows the support for the implementation -of the DAG-shortest-path algorithm for the compression function. - -### 2015-04-23 - -Completed yesterday the lzbase classes. I'm a little bit concerned that -using the components may require too much code, but on the other hand -there is a lot of flexibility. - -### 2015-04-22 - -Implemented Reader and Writer during the Bayern game against Porto. The -second half gave me enough time. - -### 2015-04-21 - -While showering today morning I discovered that the design for OpEncoder -and OpDecoder doesn't work, because encoding/decoding might depend on -the current status of the dictionary. This is not exactly the right way -to start the day. - -Therefore we need to keep the Reader and Writer design. This time around -we simplify it by ignoring size limits. These can be added by wrappers -around the Reader and Writer interfaces. The Parameters type isn't -needed anymore. - -However I will implement a ReaderState and WriterState type to use -static typing to ensure the right State object is combined with the -right lzbase. Reader and lzbase. Writer. - -As a start I have implemented ReaderState and WriterState to ensure -that the state for reading is only used by readers and WriterState only -used by Writers. - -### 2015-04-20 - -Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. - -### 2015-04-08 - -Came up with a new simplified design for lzbase. I implemented already -the type State that replaces OpCodec. - -### 2015-04-06 - -The new lzma package is now fully usable and lzmago is using it now. The -old lzma package has been completely removed. - -### 2015-04-05 - -Implemented lzma. Reader and tested it. - -### 2015-04-04 - -Implemented baseReader by adapting code form lzma. Reader. - -### 2015-04-03 - -The opCodec has been copied yesterday to lzma2. opCodec has a high -number of dependencies on other files in lzma2. Therefore I had to copy -almost all files from lzma. - -### 2015-03-31 - -Removed only a TODO item. - -However in Francesco Campoy's presentation "Go for Javaneros -(Javaïstes?)" is the the idea that using an embedded field E, all the -methods of E will be defined on T. If E is an interface T satisfies E. - - - -I have never used this, but it seems to be a cool idea. - -### 2015-03-30 - -Finished the type writerDict and wrote a simple test. - -### 2015-03-25 - -I started to implement the writerDict. - -### 2015-03-24 - -After thinking long about the LZMA2 code and several false starts, I -have now a plan to create a self-sufficient lzma2 package that supports -the classic LZMA format as well as LZMA2. The core idea is to support a -baseReader and baseWriter type that support the basic LZMA stream -without any headers. Both types must support the reuse of dictionaries -and the opCodec. - -### 2015-01-10 - -1. Implemented simple lzmago tool -2. Tested tool against large 4.4G file - * compression worked correctly; tested decompression with lzma - * decompression hits a full buffer condition -3. Fixed a bug in the compressor and wrote a test for it -4. Executed full cycle for 4.4 GB file; performance can be improved ;-) - -### 2015-01-11 - -* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go deleted file mode 100644 index e48450c2ca..0000000000 --- a/vendor/github.com/ulikunitz/xz/bits.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import ( - "errors" - "io" -) - -// putUint32LE puts the little-endian representation of x into the first -// four bytes of p. -func putUint32LE(p []byte, x uint32) { - p[0] = byte(x) - p[1] = byte(x >> 8) - p[2] = byte(x >> 16) - p[3] = byte(x >> 24) -} - -// putUint64LE puts the little-endian representation of x into the first -// eight bytes of p. -func putUint64LE(p []byte, x uint64) { - p[0] = byte(x) - p[1] = byte(x >> 8) - p[2] = byte(x >> 16) - p[3] = byte(x >> 24) - p[4] = byte(x >> 32) - p[5] = byte(x >> 40) - p[6] = byte(x >> 48) - p[7] = byte(x >> 56) -} - -// uint32LE converts a little endian representation to an uint32 value. -func uint32LE(p []byte) uint32 { - return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | - uint32(p[3])<<24 -} - -// putUvarint puts a uvarint representation of x into the byte slice. -func putUvarint(p []byte, x uint64) int { - i := 0 - for x >= 0x80 { - p[i] = byte(x) | 0x80 - x >>= 7 - i++ - } - p[i] = byte(x) - return i + 1 -} - -// errOverflow indicates an overflow of the 64-bit unsigned integer. -var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") - -// readUvarint reads a uvarint from the given byte reader. -func readUvarint(r io.ByteReader) (x uint64, n int, err error) { - const maxUvarintLen = 10 - - var s uint - i := 0 - for { - b, err := r.ReadByte() - if err != nil { - return x, i, err - } - i++ - if i > maxUvarintLen { - return x, i, errOverflowU64 - } - if b < 0x80 { - if i == maxUvarintLen && b > 1 { - return x, i, errOverflowU64 - } - return x | uint64(b)< 0 { - k = 4 - k - } - return k -} - -/*** Header ***/ - -// headerMagic stores the magic bytes for the header -var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} - -// HeaderLen provides the length of the xz file header. -const HeaderLen = 12 - -// Constants for the checksum methods supported by xz. -const ( - None byte = 0x0 - CRC32 byte = 0x1 - CRC64 byte = 0x4 - SHA256 byte = 0xa -) - -// errInvalidFlags indicates that flags are invalid. -var errInvalidFlags = errors.New("xz: invalid flags") - -// verifyFlags returns the error errInvalidFlags if the value is -// invalid. -func verifyFlags(flags byte) error { - switch flags { - case None, CRC32, CRC64, SHA256: - return nil - default: - return errInvalidFlags - } -} - -// flagstrings maps flag values to strings. -var flagstrings = map[byte]string{ - None: "None", - CRC32: "CRC-32", - CRC64: "CRC-64", - SHA256: "SHA-256", -} - -// flagString returns the string representation for the given flags. -func flagString(flags byte) string { - s, ok := flagstrings[flags] - if !ok { - return "invalid" - } - return s -} - -// newHashFunc returns a function that creates hash instances for the -// hash method encoded in flags. -func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { - switch flags { - case None: - newHash = newNoneHash - case CRC32: - newHash = newCRC32 - case CRC64: - newHash = newCRC64 - case SHA256: - newHash = sha256.New - default: - err = errInvalidFlags - } - return -} - -// header provides the actual content of the xz file header: the flags. -type header struct { - flags byte -} - -// Errors returned by readHeader. -var errHeaderMagic = errors.New("xz: invalid header magic bytes") - -// ValidHeader checks whether data is a correct xz file header. The -// length of data must be HeaderLen. -func ValidHeader(data []byte) bool { - var h header - err := h.UnmarshalBinary(data) - return err == nil -} - -// String returns a string representation of the flags. -func (h header) String() string { - return flagString(h.flags) -} - -// UnmarshalBinary reads header from the provided data slice. -func (h *header) UnmarshalBinary(data []byte) error { - // header length - if len(data) != HeaderLen { - return errors.New("xz: wrong file header length") - } - - // magic header - if !bytes.Equal(headerMagic, data[:6]) { - return errHeaderMagic - } - - // checksum - crc := crc32.NewIEEE() - crc.Write(data[6:8]) - if uint32LE(data[8:]) != crc.Sum32() { - return errors.New("xz: invalid checksum for file header") - } - - // stream flags - if data[6] != 0 { - return errInvalidFlags - } - flags := data[7] - if err := verifyFlags(flags); err != nil { - return err - } - - h.flags = flags - return nil -} - -// MarshalBinary generates the xz file header. -func (h *header) MarshalBinary() (data []byte, err error) { - if err = verifyFlags(h.flags); err != nil { - return nil, err - } - - data = make([]byte, 12) - copy(data, headerMagic) - data[7] = h.flags - - crc := crc32.NewIEEE() - crc.Write(data[6:8]) - putUint32LE(data[8:], crc.Sum32()) - - return data, nil -} - -/*** Footer ***/ - -// footerLen defines the length of the footer. -const footerLen = 12 - -// footerMagic contains the footer magic bytes. -var footerMagic = []byte{'Y', 'Z'} - -// footer represents the content of the xz file footer. -type footer struct { - indexSize int64 - flags byte -} - -// String prints a string representation of the footer structure. -func (f footer) String() string { - return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) -} - -// Minimum and maximum for the size of the index (backward size). -const ( - minIndexSize = 4 - maxIndexSize = (1 << 32) * 4 -) - -// MarshalBinary converts footer values into an xz file footer. Note -// that the footer value is checked for correctness. -func (f *footer) MarshalBinary() (data []byte, err error) { - if err = verifyFlags(f.flags); err != nil { - return nil, err - } - if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { - return nil, errors.New("xz: index size out of range") - } - if f.indexSize%4 != 0 { - return nil, errors.New( - "xz: index size not aligned to four bytes") - } - - data = make([]byte, footerLen) - - // backward size (index size) - s := (f.indexSize / 4) - 1 - putUint32LE(data[4:], uint32(s)) - // flags - data[9] = f.flags - // footer magic - copy(data[10:], footerMagic) - - // CRC-32 - crc := crc32.NewIEEE() - crc.Write(data[4:10]) - putUint32LE(data, crc.Sum32()) - - return data, nil -} - -// UnmarshalBinary sets the footer value by unmarshalling an xz file -// footer. -func (f *footer) UnmarshalBinary(data []byte) error { - if len(data) != footerLen { - return errors.New("xz: wrong footer length") - } - - // magic bytes - if !bytes.Equal(data[10:], footerMagic) { - return errors.New("xz: footer magic invalid") - } - - // CRC-32 - crc := crc32.NewIEEE() - crc.Write(data[4:10]) - if uint32LE(data) != crc.Sum32() { - return errors.New("xz: footer checksum error") - } - - var g footer - // backward size (index size) - g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 - - // flags - if data[8] != 0 { - return errInvalidFlags - } - g.flags = data[9] - if err := verifyFlags(g.flags); err != nil { - return err - } - - *f = g - return nil -} - -/*** Block Header ***/ - -// blockHeader represents the content of an xz block header. -type blockHeader struct { - compressedSize int64 - uncompressedSize int64 - filters []filter -} - -// String converts the block header into a string. -func (h blockHeader) String() string { - var buf bytes.Buffer - first := true - if h.compressedSize >= 0 { - fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) - first = false - } - if h.uncompressedSize >= 0 { - if !first { - buf.WriteString(" ") - } - fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) - first = false - } - for _, f := range h.filters { - if !first { - buf.WriteString(" ") - } - fmt.Fprintf(&buf, "filter %s", f) - first = false - } - return buf.String() -} - -// Masks for the block flags. -const ( - filterCountMask = 0x03 - compressedSizePresent = 0x40 - uncompressedSizePresent = 0x80 - reservedBlockFlags = 0x3C -) - -// errIndexIndicator signals that an index indicator (0x00) has been found -// instead of an expected block header indicator. -var errIndexIndicator = errors.New("xz: found index indicator") - -// readBlockHeader reads the block header. -func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { - var buf bytes.Buffer - buf.Grow(20) - - // block header size - z, err := io.CopyN(&buf, r, 1) - n = int(z) - if err != nil { - return nil, n, err - } - s := buf.Bytes()[0] - if s == 0 { - return nil, n, errIndexIndicator - } - - // read complete header - headerLen := (int(s) + 1) * 4 - buf.Grow(headerLen - 1) - z, err = io.CopyN(&buf, r, int64(headerLen-1)) - n += int(z) - if err != nil { - return nil, n, err - } - - // unmarshal block header - h = new(blockHeader) - if err = h.UnmarshalBinary(buf.Bytes()); err != nil { - return nil, n, err - } - - return h, n, nil -} - -// readSizeInBlockHeader reads the uncompressed or compressed size -// fields in the block header. The present value informs the function -// whether the respective field is actually present in the header. -func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { - if !present { - return -1, nil - } - x, _, err := readUvarint(r) - if err != nil { - return 0, err - } - if x >= 1<<63 { - return 0, errors.New("xz: size overflow in block header") - } - return int64(x), nil -} - -// UnmarshalBinary unmarshals the block header. -func (h *blockHeader) UnmarshalBinary(data []byte) error { - // Check header length - s := data[0] - if data[0] == 0 { - return errIndexIndicator - } - headerLen := (int(s) + 1) * 4 - if len(data) != headerLen { - return fmt.Errorf("xz: data length %d; want %d", len(data), - headerLen) - } - n := headerLen - 4 - - // Check CRC-32 - crc := crc32.NewIEEE() - crc.Write(data[:n]) - if crc.Sum32() != uint32LE(data[n:]) { - return errors.New("xz: checksum error for block header") - } - - // Block header flags - flags := data[1] - if flags&reservedBlockFlags != 0 { - return errors.New("xz: reserved block header flags set") - } - - r := bytes.NewReader(data[2:n]) - - // Compressed size - var err error - h.compressedSize, err = readSizeInBlockHeader( - r, flags&compressedSizePresent != 0) - if err != nil { - return err - } - - // Uncompressed size - h.uncompressedSize, err = readSizeInBlockHeader( - r, flags&uncompressedSizePresent != 0) - if err != nil { - return err - } - - h.filters, err = readFilters(r, int(flags&filterCountMask)+1) - if err != nil { - return err - } - - // Check padding - // Since headerLen is a multiple of 4 we don't need to check - // alignment. - k := r.Len() - // The standard spec says that the padding should have not more - // than 3 bytes. However we found paddings of 4 or 5 in the - // wild. See https://github.com/ulikunitz/xz/pull/11 and - // https://github.com/ulikunitz/xz/issues/15 - // - // The only reasonable approach seems to be to ignore the - // padding size. We still check that all padding bytes are zero. - if !allZeros(data[n-k : n]) { - return errPadding - } - return nil -} - -// MarshalBinary marshals the binary header. -func (h *blockHeader) MarshalBinary() (data []byte, err error) { - if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { - return nil, errors.New("xz: filter count wrong") - } - for i, f := range h.filters { - if i < len(h.filters)-1 { - if f.id() == lzmaFilterID { - return nil, errors.New( - "xz: LZMA2 filter is not the last") - } - } else { - // last filter - if f.id() != lzmaFilterID { - return nil, errors.New("xz: " + - "last filter must be the LZMA2 filter") - } - } - } - - var buf bytes.Buffer - // header size must set at the end - buf.WriteByte(0) - - // flags - flags := byte(len(h.filters) - 1) - if h.compressedSize >= 0 { - flags |= compressedSizePresent - } - if h.uncompressedSize >= 0 { - flags |= uncompressedSizePresent - } - buf.WriteByte(flags) - - p := make([]byte, 10) - if h.compressedSize >= 0 { - k := putUvarint(p, uint64(h.compressedSize)) - buf.Write(p[:k]) - } - if h.uncompressedSize >= 0 { - k := putUvarint(p, uint64(h.uncompressedSize)) - buf.Write(p[:k]) - } - - for _, f := range h.filters { - fp, err := f.MarshalBinary() - if err != nil { - return nil, err - } - buf.Write(fp) - } - - // padding - for i := padLen(int64(buf.Len())); i > 0; i-- { - buf.WriteByte(0) - } - - // crc place holder - buf.Write(p[:4]) - - data = buf.Bytes() - if len(data)%4 != 0 { - panic("data length not aligned") - } - s := len(data)/4 - 1 - if !(1 < s && s <= 255) { - panic("wrong block header size") - } - data[0] = byte(s) - - crc := crc32.NewIEEE() - crc.Write(data[:len(data)-4]) - putUint32LE(data[len(data)-4:], crc.Sum32()) - - return data, nil -} - -// Constants used for marshalling and unmarshalling filters in the xz -// block header. -const ( - minFilters = 1 - maxFilters = 4 - minReservedID = 1 << 62 -) - -// filter represents a filter in the block header. -type filter interface { - id() uint64 - UnmarshalBinary(data []byte) error - MarshalBinary() (data []byte, err error) - reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) - writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) - // filter must be last filter - last() bool -} - -// readFilter reads a block filter from the block header. At this point -// in time only the LZMA2 filter is supported. -func readFilter(r io.Reader) (f filter, err error) { - br := lzma.ByteReader(r) - - // index - id, _, err := readUvarint(br) - if err != nil { - return nil, err - } - - var data []byte - switch id { - case lzmaFilterID: - data = make([]byte, lzmaFilterLen) - data[0] = lzmaFilterID - if _, err = io.ReadFull(r, data[1:]); err != nil { - return nil, err - } - f = new(lzmaFilter) - default: - if id >= minReservedID { - return nil, errors.New( - "xz: reserved filter id in block stream header") - } - return nil, errors.New("xz: invalid filter id") - } - if err = f.UnmarshalBinary(data); err != nil { - return nil, err - } - return f, err -} - -// readFilters reads count filters. At this point in time only the count -// 1 is supported. -func readFilters(r io.Reader, count int) (filters []filter, err error) { - if count != 1 { - return nil, errors.New("xz: unsupported filter count") - } - f, err := readFilter(r) - if err != nil { - return nil, err - } - return []filter{f}, err -} - -/*** Index ***/ - -// record describes a block in the xz file index. -type record struct { - unpaddedSize int64 - uncompressedSize int64 -} - -// readRecord reads an index record. -func readRecord(r io.ByteReader) (rec record, n int, err error) { - u, k, err := readUvarint(r) - n += k - if err != nil { - return rec, n, err - } - rec.unpaddedSize = int64(u) - if rec.unpaddedSize < 0 { - return rec, n, errors.New("xz: unpadded size negative") - } - - u, k, err = readUvarint(r) - n += k - if err != nil { - return rec, n, err - } - rec.uncompressedSize = int64(u) - if rec.uncompressedSize < 0 { - return rec, n, errors.New("xz: uncompressed size negative") - } - - return rec, n, nil -} - -// MarshalBinary converts an index record in its binary encoding. -func (rec *record) MarshalBinary() (data []byte, err error) { - // maximum length of a uvarint is 10 - p := make([]byte, 20) - n := putUvarint(p, uint64(rec.unpaddedSize)) - n += putUvarint(p[n:], uint64(rec.uncompressedSize)) - return p[:n], nil -} - -// writeIndex writes the index, a sequence of records. -func writeIndex(w io.Writer, index []record) (n int64, err error) { - crc := crc32.NewIEEE() - mw := io.MultiWriter(w, crc) - - // index indicator - k, err := mw.Write([]byte{0}) - n += int64(k) - if err != nil { - return n, err - } - - // number of records - p := make([]byte, 10) - k = putUvarint(p, uint64(len(index))) - k, err = mw.Write(p[:k]) - n += int64(k) - if err != nil { - return n, err - } - - // list of records - for _, rec := range index { - p, err := rec.MarshalBinary() - if err != nil { - return n, err - } - k, err = mw.Write(p) - n += int64(k) - if err != nil { - return n, err - } - } - - // index padding - k, err = mw.Write(make([]byte, padLen(int64(n)))) - n += int64(k) - if err != nil { - return n, err - } - - // crc32 checksum - putUint32LE(p, crc.Sum32()) - k, err = w.Write(p[:4]) - n += int64(k) - - return n, err -} - -// readIndexBody reads the index from the reader. It assumes that the -// index indicator has already been read. -func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { - crc := crc32.NewIEEE() - // index indicator - crc.Write([]byte{0}) - - br := lzma.ByteReader(io.TeeReader(r, crc)) - - // number of records - u, k, err := readUvarint(br) - n += int64(k) - if err != nil { - return nil, n, err - } - recLen := int(u) - if recLen < 0 || uint64(recLen) != u { - return nil, n, errors.New("xz: record number overflow") - } - if recLen != expectedRecordLen { - return nil, n, fmt.Errorf( - "xz: index length is %d; want %d", - recLen, expectedRecordLen) - } - - // list of records - records = make([]record, recLen) - for i := range records { - records[i], k, err = readRecord(br) - n += int64(k) - if err != nil { - return nil, n, err - } - } - - p := make([]byte, padLen(int64(n+1)), 4) - k, err = io.ReadFull(br.(io.Reader), p) - n += int64(k) - if err != nil { - return nil, n, err - } - if !allZeros(p) { - return nil, n, errors.New("xz: non-zero byte in index padding") - } - - // crc32 - s := crc.Sum32() - p = p[:4] - k, err = io.ReadFull(br.(io.Reader), p) - n += int64(k) - if err != nil { - return records, n, err - } - if uint32LE(p) != s { - return nil, n, errors.New("xz: wrong checksum for index") - } - - return records, n, nil -} diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz deleted file mode 100644 index 46043f7dc8..0000000000 Binary files a/vendor/github.com/ulikunitz/xz/fox-check-none.xz and /dev/null differ diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz deleted file mode 100644 index 4b820bd5a1..0000000000 Binary files a/vendor/github.com/ulikunitz/xz/fox.xz and /dev/null differ diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go deleted file mode 100644 index f723cf252d..0000000000 --- a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hash - -// CyclicPoly provides a cyclic polynomial rolling hash. -type CyclicPoly struct { - h uint64 - p []uint64 - i int -} - -// ror rotates the unsigned 64-bit integer to right. The argument s must be -// less than 64. -func ror(x uint64, s uint) uint64 { - return (x >> s) | (x << (64 - s)) -} - -// NewCyclicPoly creates a new instance of the CyclicPoly structure. The -// argument n gives the number of bytes for which a hash will be executed. -// This number must be positive; the method panics if this isn't the case. -func NewCyclicPoly(n int) *CyclicPoly { - if n < 1 { - panic("argument n must be positive") - } - return &CyclicPoly{p: make([]uint64, 0, n)} -} - -// Len returns the length of the byte sequence for which a hash is generated. -func (r *CyclicPoly) Len() int { - return cap(r.p) -} - -// RollByte hashes the next byte and returns a hash value. The complete becomes -// available after at least Len() bytes have been hashed. -func (r *CyclicPoly) RollByte(x byte) uint64 { - y := hash[x] - if len(r.p) < cap(r.p) { - r.h = ror(r.h, 1) ^ y - r.p = append(r.p, y) - } else { - r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) - r.h = ror(r.h, 1) ^ y - r.p[r.i] = y - r.i = (r.i + 1) % cap(r.p) - } - return r.h -} - -// Stores the hash for the individual bytes. -var hash = [256]uint64{ - 0x2e4fc3f904065142, 0xc790984cfbc99527, - 0x879f95eb8c62f187, 0x3b61be86b5021ef2, - 0x65a896a04196f0a5, 0xc5b307b80470b59e, - 0xd3bff376a70df14b, 0xc332f04f0b3f1701, - 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, - 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, - 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, - 0x71aadeded184f21e, 0xd73426fccda23b2d, - 0x29773fb5fb9600b5, 0xce410261cd32981a, - 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, - 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, - 0xa5f10b3910482cea, 0x2945d59be02dfaad, - 0x06ee334ff70571b5, 0xbabf9d8070f44380, - 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, - 0x26183cb9f7b1664c, 0xea71dac7da068f21, - 0xea92eca5bd1d0bb7, 0x415595862defcd75, - 0x248a386023c60648, 0x9cf021ab284b3c8a, - 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, - 0x73e799d139dc6975, 0x7b15ae312486363c, - 0xb70e5454a2239c80, 0x208e3fb31d3b2263, - 0x01f563cabb930f44, 0x2ac4533d2a3240d8, - 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, - 0x213c227271c20122, 0x09fe8a9a0a03d07a, - 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, - 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, - 0x70adb010543bea12, 0xcdae938f7ea6f579, - 0x3f3d870208672f4d, 0x8e6ccbce9d349536, - 0xe4c0871a389095ae, 0xf5f2a49152bca080, - 0x9a43f9b97269934e, 0xc17b3753cb6f475c, - 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, - 0xa06d5a011912a550, 0x5537ed19537ad1df, - 0xa32fe713d611449d, 0x2a1d05b47c3b579f, - 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, - 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, - 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, - 0x0b63d5d801708420, 0x8f227ca8f37ffaec, - 0x0256278670887c24, 0x107e14877dbf540b, - 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, - 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, - 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, - 0xd99264421147eb03, 0x535a2d6d38aefcfe, - 0x6ba8b4454a916237, 0xfa39366eaae4719c, - 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, - 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, - 0xd61c2503fe639144, 0x30ce625441eb92d3, - 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, - 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, - 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, - 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, - 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, - 0x7808e902b3857d0b, 0x171c9c4ea4607972, - 0x58d66274850146df, 0x42b311c10d3981d1, - 0x647fa8c621c41a4c, 0xf472771c66ddfedc, - 0x338d27e3f847b46b, 0x6402ce3da97545ce, - 0x5162db616fc38638, 0x9c83be97bc22a50e, - 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, - 0x9454614eb0f81c45, 0x395fb6e742ed39b6, - 0x77dd9179d06037bf, 0xc478d0fee4d2656d, - 0x35d9d6cb772007af, 0x83a56e92c883f0f6, - 0x27937453250c00a1, 0x27bd6ebc3a46a97d, - 0x9f543bf784342d51, 0xd158f38c48b0ed52, - 0x8dd8537c045f66b4, 0x846a57230226f6d5, - 0x6b13939e0c4e7cdf, 0xfca25425d8176758, - 0x92e5fc6cd52788e6, 0x9992e13d7a739170, - 0x518246f7a199e8ea, 0xf104c2a71b9979c7, - 0x86b3ffaabea4768f, 0x6388061cf3e351ad, - 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, - 0x1d759846499e148d, 0x4c0ff015e5f96ef4, - 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, - 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, - 0x337523aabbe6cf8d, 0x646bb14001d42b12, - 0xc178729d138adc74, 0xf900ef4491f24086, - 0xee1a90d334bb5ac4, 0x9755c92247301a50, - 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, - 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, - 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, - 0x80118d4ae46bd210, 0x58ab61a522843733, - 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, - 0x765669e0e5e8157b, 0xa5347830737132f0, - 0x3ba485a69f01510c, 0x0b247d7b957a01c3, - 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, - 0x8b535ed3829b2b14, 0xee41d0cad65d232c, - 0xe6a99ed97a6a982f, 0x65ac6194c202003d, - 0x692accf3a70573eb, 0xcc3c02c3e200d5af, - 0x0d419e8b325914a3, 0x320f160f42c25e40, - 0x00710d647a51fe7a, 0x3c947692330aed60, - 0x9288aa280d355a7a, 0xa1806a9b791d1696, - 0x5d60e38496763da1, 0x6c69e22e613fd0f4, - 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, - 0x460c17992cbaece1, 0xf7822c5444d3297f, - 0x344a9790c69b74aa, 0xb80a42e6cae09dce, - 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, - 0x88e0b7be347627cc, 0x45246009b7a99490, - 0x8011c6dd3fe50472, 0xc341d682bffb99d7, - 0x2511be93808e2d15, 0xd5bc13d7fd739840, - 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, - 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, - 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, - 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, - 0xa559cce0d9199aac, 0xde39d47ef3723380, - 0xe5b69d848ce42e35, 0xefa24296f8e79f52, - 0x70190b59db9a5afc, 0x26f166cdb211e7bf, - 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, - 0xb9059b05e9420d90, 0x2f0da855c9388754, - 0x611d5e9ab77949cc, 0x2912038ac01163f4, - 0x0231df50402b2fba, 0x45660fc4f3245f58, - 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, - 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, - 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, - 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, - 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, - 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, - 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, - 0x6d1b3c1149dda943, 0x372c943a518c1093, - 0xad27af45e77c09c4, 0x3b6f92b646044604, - 0xac2917909f5fcf4f, 0x2069a60e977e5557, - 0x353a469e71014de5, 0x24be356281f55c15, - 0x2b6d710ba8e9adea, 0x404ad1751c749c29, - 0xed7311bf23d7f185, 0xba4f6976b4acc43e, - 0x32d7198d2bc39000, 0xee667019014d6e01, - 0x494ef3e128d14c83, 0x1f95a152baecd6be, - 0x201648dff1f483a5, 0x68c28550c8384af6, - 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, - 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, - 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, - 0xf8f6b97f5585080a, 0x74236084be57b95b, - 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, - 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, -} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go deleted file mode 100644 index cc60a6b5ce..0000000000 --- a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package hash provides rolling hashes. - -Rolling hashes have to be used for maintaining the positions of n-byte -sequences in the dictionary buffer. - -The package provides currently the Rabin-Karp rolling hash and a Cyclic -Polynomial hash. Both support the Hashes method to be used with an interface. -*/ -package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go deleted file mode 100644 index c6432913fd..0000000000 --- a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hash - -// A is the default constant for Robin-Karp rolling hash. This is a random -// prime. -const A = 0x97b548add41d5da1 - -// RabinKarp supports the computation of a rolling hash. -type RabinKarp struct { - A uint64 - // a^n - aOldest uint64 - h uint64 - p []byte - i int -} - -// NewRabinKarp creates a new RabinKarp value. The argument n defines the -// length of the byte sequence to be hashed. The default constant will will be -// used. -func NewRabinKarp(n int) *RabinKarp { - return NewRabinKarpConst(n, A) -} - -// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the -// length of the byte sequence to be hashed. The argument a provides the -// constant used to compute the hash. -func NewRabinKarpConst(n int, a uint64) *RabinKarp { - if n <= 0 { - panic("number of bytes n must be positive") - } - aOldest := uint64(1) - // There are faster methods. For the small n required by the LZMA - // compressor O(n) is sufficient. - for i := 0; i < n; i++ { - aOldest *= a - } - return &RabinKarp{ - A: a, aOldest: aOldest, - p: make([]byte, 0, n), - } -} - -// Len returns the length of the byte sequence. -func (r *RabinKarp) Len() int { - return cap(r.p) -} - -// RollByte computes the hash after x has been added. -func (r *RabinKarp) RollByte(x byte) uint64 { - if len(r.p) < cap(r.p) { - r.h += uint64(x) - r.h *= r.A - r.p = append(r.p, x) - } else { - r.h -= uint64(r.p[r.i]) * r.aOldest - r.h += uint64(x) - r.h *= r.A - r.p[r.i] = x - r.i = (r.i + 1) % cap(r.p) - } - return r.h -} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go deleted file mode 100644 index f1de88b445..0000000000 --- a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hash - -// Roller provides an interface for rolling hashes. The hash value will become -// valid after hash has been called Len times. -type Roller interface { - Len() int - RollByte(x byte) uint64 -} - -// Hashes computes all hash values for the array p. Note that the state of the -// roller is changed. -func Hashes(r Roller, p []byte) []uint64 { - n := r.Len() - if len(p) < n { - return nil - } - h := make([]uint64, len(p)-n+1) - for i := 0; i < n-1; i++ { - r.RollByte(p[i]) - } - for i := range h { - h[i] = r.RollByte(p[i+n-1]) - } - return h -} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go deleted file mode 100644 index 6c20c77ba6..0000000000 --- a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xlog provides a simple logging package that allows to disable -// certain message categories. It defines a type, Logger, with multiple -// methods for formatting output. The package has also a predefined -// 'standard' Logger accessible through helper function Print[f|ln], -// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] -// that are easier to use then creating a Logger manually. That logger -// writes to standard error and prints the date and time of each logged -// message, which can be configured using the function SetFlags. -// -// The Fatal functions call os.Exit(1) after the message is output -// unless not suppressed by the flags. The Panic functions call panic -// after the writing the log message unless suppressed. -package xlog - -import ( - "fmt" - "io" - "os" - "runtime" - "sync" - "time" -) - -// The flags define what information is prefixed to each log entry -// generated by the Logger. The Lno* versions allow the suppression of -// specific output. The bits are or'ed together to control what will be -// printed. There is no control over the order of the items printed and -// the format. The full format is: -// -// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message -// -const ( - Ldate = 1 << iota // the date: 2009-01-23 - Ltime // the time: 01:23:23 - Lmicroseconds // microsecond resolution: 01:23:23.123123 - Llongfile // full file name and line number: /a/b/c/d.go:23 - Lshortfile // final file name element and line number: d.go:23 - Lnopanic // suppresses output from Panic[f|ln] but not the panic call - Lnofatal // suppresses output from Fatal[f|ln] but not the exit - Lnowarn // suppresses output from Warn[f|ln] - Lnoprint // suppresses output from Print[f|ln] - Lnodebug // suppresses output from Debug[f|ln] - // initial values for the standard logger - Lstdflags = Ldate | Ltime | Lnodebug -) - -// A Logger represents an active logging object that generates lines of -// output to an io.Writer. Each logging operation if not suppressed -// makes a single call to the Writer's Write method. A Logger can be -// used simultaneously from multiple goroutines; it guarantees to -// serialize access to the Writer. -type Logger struct { - mu sync.Mutex // ensures atomic writes; and protects the following - // fields - prefix string // prefix to write at beginning of each line - flag int // properties - out io.Writer // destination for output - buf []byte // for accumulating text to write -} - -// New creates a new Logger. The out argument sets the destination to -// which the log output will be written. The prefix appears at the -// beginning of each log line. The flag argument defines the logging -// properties. -func New(out io.Writer, prefix string, flag int) *Logger { - return &Logger{out: out, prefix: prefix, flag: flag} -} - -// std is the standard logger used by the package scope functions. -var std = New(os.Stderr, "", Lstdflags) - -// itoa converts the integer to ASCII. A negative widths will avoid -// zero-padding. The function supports only non-negative integers. -func itoa(buf *[]byte, i int, wid int) { - var u = uint(i) - if u == 0 && wid <= 1 { - *buf = append(*buf, '0') - return - } - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - *buf = append(*buf, b[bp:]...) -} - -// formatHeader puts the header into the buf field of the buffer. -func (l *Logger) formatHeader(t time.Time, file string, line int) { - l.buf = append(l.buf, l.prefix...) - if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { - if l.flag&Ldate != 0 { - year, month, day := t.Date() - itoa(&l.buf, year, 4) - l.buf = append(l.buf, '-') - itoa(&l.buf, int(month), 2) - l.buf = append(l.buf, '-') - itoa(&l.buf, day, 2) - l.buf = append(l.buf, ' ') - } - if l.flag&(Ltime|Lmicroseconds) != 0 { - hour, min, sec := t.Clock() - itoa(&l.buf, hour, 2) - l.buf = append(l.buf, ':') - itoa(&l.buf, min, 2) - l.buf = append(l.buf, ':') - itoa(&l.buf, sec, 2) - if l.flag&Lmicroseconds != 0 { - l.buf = append(l.buf, '.') - itoa(&l.buf, t.Nanosecond()/1e3, 6) - } - l.buf = append(l.buf, ' ') - } - } - if l.flag&(Lshortfile|Llongfile) != 0 { - if l.flag&Lshortfile != 0 { - short := file - for i := len(file) - 1; i > 0; i-- { - if file[i] == '/' { - short = file[i+1:] - break - } - } - file = short - } - l.buf = append(l.buf, file...) - l.buf = append(l.buf, ':') - itoa(&l.buf, line, -1) - l.buf = append(l.buf, ": "...) - } -} - -func (l *Logger) output(calldepth int, now time.Time, s string) error { - var file string - var line int - if l.flag&(Lshortfile|Llongfile) != 0 { - l.mu.Unlock() - var ok bool - _, file, line, ok = runtime.Caller(calldepth) - if !ok { - file = "???" - line = 0 - } - l.mu.Lock() - } - l.buf = l.buf[:0] - l.formatHeader(now, file, line) - l.buf = append(l.buf, s...) - if len(s) == 0 || s[len(s)-1] != '\n' { - l.buf = append(l.buf, '\n') - } - _, err := l.out.Write(l.buf) - return err -} - -// Output writes the string s with the header controlled by the flags to -// the l.out writer. A newline will be appended if s doesn't end in a -// newline. Calldepth is used to recover the PC, although all current -// calls of Output use the call depth 2. Access to the function is serialized. -func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { - now := time.Now() - l.mu.Lock() - defer l.mu.Unlock() - if l.flag&noflag != 0 { - return nil - } - s := fmt.Sprint(v...) - return l.output(calldepth+1, now, s) -} - -// Outputf works like output but formats the output like Printf. -func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { - now := time.Now() - l.mu.Lock() - defer l.mu.Unlock() - if l.flag&noflag != 0 { - return nil - } - s := fmt.Sprintf(format, v...) - return l.output(calldepth+1, now, s) -} - -// Outputln works like output but formats the output like Println. -func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { - now := time.Now() - l.mu.Lock() - defer l.mu.Unlock() - if l.flag&noflag != 0 { - return nil - } - s := fmt.Sprintln(v...) - return l.output(calldepth+1, now, s) -} - -// Panic prints the message like Print and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func (l *Logger) Panic(v ...interface{}) { - l.Output(2, Lnopanic, v...) - s := fmt.Sprint(v...) - panic(s) -} - -// Panic prints the message like Print and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func Panic(v ...interface{}) { - std.Output(2, Lnopanic, v...) - s := fmt.Sprint(v...) - panic(s) -} - -// Panicf prints the message like Printf and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func (l *Logger) Panicf(format string, v ...interface{}) { - l.Outputf(2, Lnopanic, format, v...) - s := fmt.Sprintf(format, v...) - panic(s) -} - -// Panicf prints the message like Printf and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func Panicf(format string, v ...interface{}) { - std.Outputf(2, Lnopanic, format, v...) - s := fmt.Sprintf(format, v...) - panic(s) -} - -// Panicln prints the message like Println and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func (l *Logger) Panicln(v ...interface{}) { - l.Outputln(2, Lnopanic, v...) - s := fmt.Sprintln(v...) - panic(s) -} - -// Panicln prints the message like Println and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func Panicln(v ...interface{}) { - std.Outputln(2, Lnopanic, v...) - s := fmt.Sprintln(v...) - panic(s) -} - -// Fatal prints the message like Print and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func (l *Logger) Fatal(v ...interface{}) { - l.Output(2, Lnofatal, v...) - os.Exit(1) -} - -// Fatal prints the message like Print and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func Fatal(v ...interface{}) { - std.Output(2, Lnofatal, v...) - os.Exit(1) -} - -// Fatalf prints the message like Printf and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func (l *Logger) Fatalf(format string, v ...interface{}) { - l.Outputf(2, Lnofatal, format, v...) - os.Exit(1) -} - -// Fatalf prints the message like Printf and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func Fatalf(format string, v ...interface{}) { - std.Outputf(2, Lnofatal, format, v...) - os.Exit(1) -} - -// Fatalln prints the message like Println and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func (l *Logger) Fatalln(format string, v ...interface{}) { - l.Outputln(2, Lnofatal, v...) - os.Exit(1) -} - -// Fatalln prints the message like Println and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func Fatalln(format string, v ...interface{}) { - std.Outputln(2, Lnofatal, v...) - os.Exit(1) -} - -// Warn prints the message like Print. The printing might be suppressed -// by the flag Lnowarn. -func (l *Logger) Warn(v ...interface{}) { - l.Output(2, Lnowarn, v...) -} - -// Warn prints the message like Print. The printing might be suppressed -// by the flag Lnowarn. -func Warn(v ...interface{}) { - std.Output(2, Lnowarn, v...) -} - -// Warnf prints the message like Printf. The printing might be suppressed -// by the flag Lnowarn. -func (l *Logger) Warnf(format string, v ...interface{}) { - l.Outputf(2, Lnowarn, format, v...) -} - -// Warnf prints the message like Printf. The printing might be suppressed -// by the flag Lnowarn. -func Warnf(format string, v ...interface{}) { - std.Outputf(2, Lnowarn, format, v...) -} - -// Warnln prints the message like Println. The printing might be suppressed -// by the flag Lnowarn. -func (l *Logger) Warnln(v ...interface{}) { - l.Outputln(2, Lnowarn, v...) -} - -// Warnln prints the message like Println. The printing might be suppressed -// by the flag Lnowarn. -func Warnln(v ...interface{}) { - std.Outputln(2, Lnowarn, v...) -} - -// Print prints the message like fmt.Print. The printing might be suppressed -// by the flag Lnoprint. -func (l *Logger) Print(v ...interface{}) { - l.Output(2, Lnoprint, v...) -} - -// Print prints the message like fmt.Print. The printing might be suppressed -// by the flag Lnoprint. -func Print(v ...interface{}) { - std.Output(2, Lnoprint, v...) -} - -// Printf prints the message like fmt.Printf. The printing might be suppressed -// by the flag Lnoprint. -func (l *Logger) Printf(format string, v ...interface{}) { - l.Outputf(2, Lnoprint, format, v...) -} - -// Printf prints the message like fmt.Printf. The printing might be suppressed -// by the flag Lnoprint. -func Printf(format string, v ...interface{}) { - std.Outputf(2, Lnoprint, format, v...) -} - -// Println prints the message like fmt.Println. The printing might be -// suppressed by the flag Lnoprint. -func (l *Logger) Println(v ...interface{}) { - l.Outputln(2, Lnoprint, v...) -} - -// Println prints the message like fmt.Println. The printing might be -// suppressed by the flag Lnoprint. -func Println(v ...interface{}) { - std.Outputln(2, Lnoprint, v...) -} - -// Debug prints the message like Print. The printing might be suppressed -// by the flag Lnodebug. -func (l *Logger) Debug(v ...interface{}) { - l.Output(2, Lnodebug, v...) -} - -// Debug prints the message like Print. The printing might be suppressed -// by the flag Lnodebug. -func Debug(v ...interface{}) { - std.Output(2, Lnodebug, v...) -} - -// Debugf prints the message like Printf. The printing might be suppressed -// by the flag Lnodebug. -func (l *Logger) Debugf(format string, v ...interface{}) { - l.Outputf(2, Lnodebug, format, v...) -} - -// Debugf prints the message like Printf. The printing might be suppressed -// by the flag Lnodebug. -func Debugf(format string, v ...interface{}) { - std.Outputf(2, Lnodebug, format, v...) -} - -// Debugln prints the message like Println. The printing might be suppressed -// by the flag Lnodebug. -func (l *Logger) Debugln(v ...interface{}) { - l.Outputln(2, Lnodebug, v...) -} - -// Debugln prints the message like Println. The printing might be suppressed -// by the flag Lnodebug. -func Debugln(v ...interface{}) { - std.Outputln(2, Lnodebug, v...) -} - -// Flags returns the current flags used by the logger. -func (l *Logger) Flags() int { - l.mu.Lock() - defer l.mu.Unlock() - return l.flag -} - -// Flags returns the current flags used by the standard logger. -func Flags() int { - return std.Flags() -} - -// SetFlags sets the flags of the logger. -func (l *Logger) SetFlags(flag int) { - l.mu.Lock() - defer l.mu.Unlock() - l.flag = flag -} - -// SetFlags sets the flags for the standard logger. -func SetFlags(flag int) { - std.SetFlags(flag) -} - -// Prefix returns the prefix used by the logger. -func (l *Logger) Prefix() string { - l.mu.Lock() - defer l.mu.Unlock() - return l.prefix -} - -// Prefix returns the prefix used by the standard logger of the package. -func Prefix() string { - return std.Prefix() -} - -// SetPrefix sets the prefix for the logger. -func (l *Logger) SetPrefix(prefix string) { - l.mu.Lock() - defer l.mu.Unlock() - l.prefix = prefix -} - -// SetPrefix sets the prefix of the standard logger of the package. -func SetPrefix(prefix string) { - std.SetPrefix(prefix) -} - -// SetOutput sets the output of the logger. -func (l *Logger) SetOutput(w io.Writer) { - l.mu.Lock() - defer l.mu.Unlock() - l.out = w -} - -// SetOutput sets the output for the standard logger of the package. -func SetOutput(w io.Writer) { - std.SetOutput(w) -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go deleted file mode 100644 index 2a7bd19ec1..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "unicode" -) - -// node represents a node in the binary tree. -type node struct { - // x is the search value - x uint32 - // p parent node - p uint32 - // l left child - l uint32 - // r right child - r uint32 -} - -// wordLen is the number of bytes represented by the v field of a node. -const wordLen = 4 - -// binTree supports the identification of the next operation based on a -// binary tree. -// -// Nodes will be identified by their index into the ring buffer. -type binTree struct { - dict *encoderDict - // ring buffer of nodes - node []node - // absolute offset of the entry for the next node. Position 4 - // byte larger. - hoff int64 - // front position in the node ring buffer - front uint32 - // index of the root node - root uint32 - // current x value - x uint32 - // preallocated array - data []byte -} - -// null represents the nonexistent index. We can't use zero because it -// would always exist or we would need to decrease the index for each -// reference. -const null uint32 = 1<<32 - 1 - -// newBinTree initializes the binTree structure. The capacity defines -// the size of the buffer and defines the maximum distance for which -// matches will be found. -func newBinTree(capacity int) (t *binTree, err error) { - if capacity < 1 { - return nil, errors.New( - "newBinTree: capacity must be larger than zero") - } - if int64(capacity) >= int64(null) { - return nil, errors.New( - "newBinTree: capacity must less 2^{32}-1") - } - t = &binTree{ - node: make([]node, capacity), - hoff: -int64(wordLen), - root: null, - data: make([]byte, maxMatchLen), - } - return t, nil -} - -func (t *binTree) SetDict(d *encoderDict) { t.dict = d } - -// WriteByte writes a single byte into the binary tree. -func (t *binTree) WriteByte(c byte) error { - t.x = (t.x << 8) | uint32(c) - t.hoff++ - if t.hoff < 0 { - return nil - } - v := t.front - if int64(v) < t.hoff { - // We are overwriting old nodes stored in the tree. - t.remove(v) - } - t.node[v].x = t.x - t.add(v) - t.front++ - if int64(t.front) >= int64(len(t.node)) { - t.front = 0 - } - return nil -} - -// Writes writes a sequence of bytes into the binTree structure. -func (t *binTree) Write(p []byte) (n int, err error) { - for _, c := range p { - t.WriteByte(c) - } - return len(p), nil -} - -// add puts the node v into the tree. The node must not be part of the -// tree before. -func (t *binTree) add(v uint32) { - vn := &t.node[v] - // Set left and right to null indices. - vn.l, vn.r = null, null - // If the binary tree is empty make v the root. - if t.root == null { - t.root = v - vn.p = null - return - } - x := vn.x - p := t.root - // Search for the right leave link and add the new node. - for { - pn := &t.node[p] - if x <= pn.x { - if pn.l == null { - pn.l = v - vn.p = p - return - } - p = pn.l - } else { - if pn.r == null { - pn.r = v - vn.p = p - return - } - p = pn.r - } - } -} - -// parent returns the parent node index of v and the pointer to v value -// in the parent. -func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { - if t.root == v { - return null, &t.root - } - p = t.node[v].p - if t.node[p].l == v { - ptr = &t.node[p].l - } else { - ptr = &t.node[p].r - } - return -} - -// Remove node v. -func (t *binTree) remove(v uint32) { - vn := &t.node[v] - p, ptr := t.parent(v) - l, r := vn.l, vn.r - if l == null { - // Move the right child up. - *ptr = r - if r != null { - t.node[r].p = p - } - return - } - if r == null { - // Move the left child up. - *ptr = l - t.node[l].p = p - return - } - - // Search the in-order predecessor u. - un := &t.node[l] - ur := un.r - if ur == null { - // In order predecessor is l. Move it up. - un.r = r - t.node[r].p = l - un.p = p - *ptr = l - return - } - var u uint32 - for { - // Look for the max value in the tree where l is root. - u = ur - ur = t.node[u].r - if ur == null { - break - } - } - // replace u with ul - un = &t.node[u] - ul := un.l - up := un.p - t.node[up].r = ul - if ul != null { - t.node[ul].p = up - } - - // replace v by u - un.l, un.r = l, r - t.node[l].p = u - t.node[r].p = u - *ptr = u - un.p = p -} - -// search looks for the node that have the value x or for the nodes that -// brace it. The node highest in the tree with the value x will be -// returned. All other nodes with the same value live in left subtree of -// the returned node. -func (t *binTree) search(v uint32, x uint32) (a, b uint32) { - a, b = null, null - if v == null { - return - } - for { - vn := &t.node[v] - if x <= vn.x { - if x == vn.x { - return v, v - } - b = v - if vn.l == null { - return - } - v = vn.l - } else { - a = v - if vn.r == null { - return - } - v = vn.r - } - } -} - -// max returns the node with maximum value in the subtree with v as -// root. -func (t *binTree) max(v uint32) uint32 { - if v == null { - return null - } - for { - r := t.node[v].r - if r == null { - return v - } - v = r - } -} - -// min returns the node with the minimum value in the subtree with v as -// root. -func (t *binTree) min(v uint32) uint32 { - if v == null { - return null - } - for { - l := t.node[v].l - if l == null { - return v - } - v = l - } -} - -// pred returns the in-order predecessor of node v. -func (t *binTree) pred(v uint32) uint32 { - if v == null { - return null - } - u := t.max(t.node[v].l) - if u != null { - return u - } - for { - p := t.node[v].p - if p == null { - return null - } - if t.node[p].r == v { - return p - } - v = p - } -} - -// succ returns the in-order successor of node v. -func (t *binTree) succ(v uint32) uint32 { - if v == null { - return null - } - u := t.min(t.node[v].r) - if u != null { - return u - } - for { - p := t.node[v].p - if p == null { - return null - } - if t.node[p].l == v { - return p - } - v = p - } -} - -// xval converts the first four bytes of a into an 32-bit unsigned -// integer in big-endian order. -func xval(a []byte) uint32 { - var x uint32 - switch len(a) { - default: - x |= uint32(a[3]) - fallthrough - case 3: - x |= uint32(a[2]) << 8 - fallthrough - case 2: - x |= uint32(a[1]) << 16 - fallthrough - case 1: - x |= uint32(a[0]) << 24 - case 0: - } - return x -} - -// dumpX converts value x into a four-letter string. -func dumpX(x uint32) string { - a := make([]byte, 4) - for i := 0; i < 4; i++ { - c := byte(x >> uint((3-i)*8)) - if unicode.IsGraphic(rune(c)) { - a[i] = c - } else { - a[i] = '.' - } - } - return string(a) -} - -/* -// dumpNode writes a representation of the node v into the io.Writer. -func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { - if v == null { - return - } - - vn := &t.node[v] - - t.dumpNode(w, vn.r, indent+2) - - for i := 0; i < indent; i++ { - fmt.Fprint(w, " ") - } - if vn.p == null { - fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) - } else { - fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) - } - - t.dumpNode(w, vn.l, indent+2) -} - -// dump prints a representation of the binary tree into the writer. -func (t *binTree) dump(w io.Writer) error { - bw := bufio.NewWriter(w) - t.dumpNode(bw, t.root, 0) - return bw.Flush() -} -*/ - -func (t *binTree) distance(v uint32) int { - dist := int(t.front) - int(v) - if dist <= 0 { - dist += len(t.node) - } - return dist -} - -type matchParams struct { - rep [4]uint32 - // length when match will be accepted - nAccept int - // nodes to check - check int - // finish if length get shorter - stopShorter bool -} - -func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, -) (r match, checked int, accepted bool) { - buf := &t.dict.buf - for { - if checked >= p.check { - return m, checked, true - } - dist, ok := distIter() - if !ok { - return m, checked, false - } - checked++ - if m.n > 0 { - i := buf.rear - dist + m.n - 1 - if i < 0 { - i += len(buf.data) - } else if i >= len(buf.data) { - i -= len(buf.data) - } - if buf.data[i] != t.data[m.n-1] { - if p.stopShorter { - return m, checked, false - } - continue - } - } - n := buf.matchLen(dist, t.data) - switch n { - case 0: - if p.stopShorter { - return m, checked, false - } - continue - case 1: - if uint32(dist-minDistance) != p.rep[0] { - continue - } - } - if n < m.n || (n == m.n && int64(dist) >= m.distance) { - continue - } - m = match{int64(dist), n} - if n >= p.nAccept { - return m, checked, true - } - } -} - -func (t *binTree) NextOp(rep [4]uint32) operation { - // retrieve maxMatchLen data - n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) - if n == 0 { - panic("no data in buffer") - } - t.data = t.data[:n] - - var ( - m match - x, u, v uint32 - iterPred, iterSucc func() (int, bool) - ) - p := matchParams{ - rep: rep, - nAccept: maxMatchLen, - check: 32, - } - i := 4 - iterSmall := func() (dist int, ok bool) { - i-- - if i <= 0 { - return 0, false - } - return i, true - } - m, checked, accepted := t.match(m, iterSmall, p) - if accepted { - goto end - } - p.check -= checked - x = xval(t.data) - u, v = t.search(t.root, x) - if u == v && len(t.data) == 4 { - iter := func() (dist int, ok bool) { - if u == null { - return 0, false - } - dist = t.distance(u) - u, v = t.search(t.node[u].l, x) - if u != v { - u = null - } - return dist, true - } - m, _, _ = t.match(m, iter, p) - goto end - } - p.stopShorter = true - iterSucc = func() (dist int, ok bool) { - if v == null { - return 0, false - } - dist = t.distance(v) - v = t.succ(v) - return dist, true - } - m, checked, accepted = t.match(m, iterSucc, p) - if accepted { - goto end - } - p.check -= checked - iterPred = func() (dist int, ok bool) { - if u == null { - return 0, false - } - dist = t.distance(u) - u = t.pred(u) - return dist, true - } - m, _, _ = t.match(m, iterPred, p) -end: - if m.n == 0 { - return lit{t.data[0]} - } - return m -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go deleted file mode 100644 index d2c07e8c91..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ - -// ntz32Const is used by the functions NTZ and NLZ. -const ntz32Const = 0x04d7651f - -// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. -// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. -var ntz32Table = [32]int8{ - 0, 1, 2, 24, 3, 19, 6, 25, - 22, 4, 20, 10, 16, 7, 12, 26, - 31, 23, 18, 5, 21, 9, 15, 11, - 30, 17, 8, 14, 29, 13, 28, 27, -} - -/* -// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. -func ntz32(x uint32) int { - if x == 0 { - return 32 - } - x = (x & -x) * ntz32Const - return int(ntz32Table[x>>27]) -} -*/ - -// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. -func nlz32(x uint32) int { - // Smear left most bit to the right - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - // Use ntz mechanism to calculate nlz. - x++ - if x == 0 { - return 0 - } - x *= ntz32Const - return 32 - int(ntz32Table[x>>27]) -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go deleted file mode 100644 index 939be8845a..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/breader.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" -) - -// breader provides the ReadByte function for a Reader. It doesn't read -// more data from the reader than absolutely necessary. -type breader struct { - io.Reader - // helper slice to save allocations - p []byte -} - -// ByteReader converts an io.Reader into an io.ByteReader. -func ByteReader(r io.Reader) io.ByteReader { - br, ok := r.(io.ByteReader) - if !ok { - return &breader{r, make([]byte, 1)} - } - return br -} - -// ReadByte read byte function. -func (r *breader) ReadByte() (c byte, err error) { - n, err := r.Reader.Read(r.p) - if n < 1 { - if err == nil { - err = errors.New("breader.ReadByte: no data") - } - return 0, err - } - return r.p[0], nil -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go deleted file mode 100644 index 2761de5f0b..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/buffer.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" -) - -// buffer provides a circular buffer of bytes. If the front index equals -// the rear index the buffer is empty. As a consequence front cannot be -// equal rear for a full buffer. So a full buffer has a length that is -// one byte less the the length of the data slice. -type buffer struct { - data []byte - front int - rear int -} - -// newBuffer creates a buffer with the given size. -func newBuffer(size int) *buffer { - return &buffer{data: make([]byte, size+1)} -} - -// Cap returns the capacity of the buffer. -func (b *buffer) Cap() int { - return len(b.data) - 1 -} - -// Resets the buffer. The front and rear index are set to zero. -func (b *buffer) Reset() { - b.front = 0 - b.rear = 0 -} - -// Buffered returns the number of bytes buffered. -func (b *buffer) Buffered() int { - delta := b.front - b.rear - if delta < 0 { - delta += len(b.data) - } - return delta -} - -// Available returns the number of bytes available for writing. -func (b *buffer) Available() int { - delta := b.rear - 1 - b.front - if delta < 0 { - delta += len(b.data) - } - return delta -} - -// addIndex adds a non-negative integer to the index i and returns the -// resulting index. The function takes care of wrapping the index as -// well as potential overflow situations. -func (b *buffer) addIndex(i int, n int) int { - // subtraction of len(b.data) prevents overflow - i += n - len(b.data) - if i < 0 { - i += len(b.data) - } - return i -} - -// Read reads bytes from the buffer into p and returns the number of -// bytes read. The function never returns an error but might return less -// data than requested. -func (b *buffer) Read(p []byte) (n int, err error) { - n, err = b.Peek(p) - b.rear = b.addIndex(b.rear, n) - return n, err -} - -// Peek reads bytes from the buffer into p without changing the buffer. -// Peek will never return an error but might return less data than -// requested. -func (b *buffer) Peek(p []byte) (n int, err error) { - m := b.Buffered() - n = len(p) - if m < n { - n = m - p = p[:n] - } - k := copy(p, b.data[b.rear:]) - if k < n { - copy(p[k:], b.data) - } - return n, nil -} - -// Discard skips the n next bytes to read from the buffer, returning the -// bytes discarded. -// -// If Discards skips fewer than n bytes, it returns an error. -func (b *buffer) Discard(n int) (discarded int, err error) { - if n < 0 { - return 0, errors.New("buffer.Discard: negative argument") - } - m := b.Buffered() - if m < n { - n = m - err = errors.New( - "buffer.Discard: discarded less bytes then requested") - } - b.rear = b.addIndex(b.rear, n) - return n, err -} - -// ErrNoSpace indicates that there is insufficient space for the Write -// operation. -var ErrNoSpace = errors.New("insufficient space") - -// Write puts data into the buffer. If less bytes are written than -// requested ErrNoSpace is returned. -func (b *buffer) Write(p []byte) (n int, err error) { - m := b.Available() - n = len(p) - if m < n { - n = m - p = p[:m] - err = ErrNoSpace - } - k := copy(b.data[b.front:], p) - if k < n { - copy(b.data, p[k:]) - } - b.front = b.addIndex(b.front, n) - return n, err -} - -// WriteByte writes a single byte into the buffer. The error ErrNoSpace -// is returned if no single byte is available in the buffer for writing. -func (b *buffer) WriteByte(c byte) error { - if b.Available() < 1 { - return ErrNoSpace - } - b.data[b.front] = c - b.front = b.addIndex(b.front, 1) - return nil -} - -// prefixLen returns the length of the common prefix of a and b. -func prefixLen(a, b []byte) int { - if len(a) > len(b) { - a, b = b, a - } - for i, c := range a { - if b[i] != c { - return i - } - } - return len(a) -} - -// matchLen returns the length of the common prefix for the given -// distance from the rear and the byte slice p. -func (b *buffer) matchLen(distance int, p []byte) int { - var n int - i := b.rear - distance - if i < 0 { - if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { - return n - } - p = p[n:] - i = 0 - } - n += prefixLen(p, b.data[i:]) - return n -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go deleted file mode 100644 index 040874c1a4..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" -) - -// ErrLimit indicates that the limit of the LimitedByteWriter has been -// reached. -var ErrLimit = errors.New("limit reached") - -// LimitedByteWriter provides a byte writer that can be written until a -// limit is reached. The field N provides the number of remaining -// bytes. -type LimitedByteWriter struct { - BW io.ByteWriter - N int64 -} - -// WriteByte writes a single byte to the limited byte writer. It returns -// ErrLimit if the limit has been reached. If the byte is successfully -// written the field N of the LimitedByteWriter will be decremented by -// one. -func (l *LimitedByteWriter) WriteByte(c byte) error { - if l.N <= 0 { - return ErrLimit - } - if err := l.BW.WriteByte(c); err != nil { - return err - } - l.N-- - return nil -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go deleted file mode 100644 index cbb943a062..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - "io" -) - -// decoder decodes a raw LZMA stream without any header. -type decoder struct { - // dictionary; the rear pointer of the buffer will be used for - // reading the data. - Dict *decoderDict - // decoder state - State *state - // range decoder - rd *rangeDecoder - // start stores the head value of the dictionary for the LZMA - // stream - start int64 - // size of uncompressed data - size int64 - // end-of-stream encountered - eos bool - // EOS marker found - eosMarker bool -} - -// newDecoder creates a new decoder instance. The parameter size provides -// the expected byte size of the decompressed data. If the size is -// unknown use a negative value. In that case the decoder will look for -// a terminating end-of-stream marker. -func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { - rd, err := newRangeDecoder(br) - if err != nil { - return nil, err - } - d = &decoder{ - State: state, - Dict: dict, - rd: rd, - size: size, - start: dict.pos(), - } - return d, nil -} - -// Reopen restarts the decoder with a new byte reader and a new size. Reopen -// resets the Decompressed counter to zero. -func (d *decoder) Reopen(br io.ByteReader, size int64) error { - var err error - if d.rd, err = newRangeDecoder(br); err != nil { - return err - } - d.start = d.Dict.pos() - d.size = size - d.eos = false - return nil -} - -// decodeLiteral decodes a single literal from the LZMA stream. -func (d *decoder) decodeLiteral() (op operation, err error) { - litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) - match := d.Dict.byteAt(int(d.State.rep[0]) + 1) - s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) - if err != nil { - return nil, err - } - return lit{s}, nil -} - -// errEOS indicates that an EOS marker has been found. -var errEOS = errors.New("EOS marker found") - -// readOp decodes the next operation from the compressed stream. It -// returns the operation. If an explicit end of stream marker is -// identified the eos error is returned. -func (d *decoder) readOp() (op operation, err error) { - // Value of the end of stream (EOS) marker - const eosDist = 1<<32 - 1 - - state, state2, posState := d.State.states(d.Dict.head) - - b, err := d.State.isMatch[state2].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - // literal - op, err := d.decodeLiteral() - if err != nil { - return nil, err - } - d.State.updateStateLiteral() - return op, nil - } - b, err = d.State.isRep[state].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - // simple match - d.State.rep[3], d.State.rep[2], d.State.rep[1] = - d.State.rep[2], d.State.rep[1], d.State.rep[0] - - d.State.updateStateMatch() - // The length decoder returns the length offset. - n, err := d.State.lenCodec.Decode(d.rd, posState) - if err != nil { - return nil, err - } - // The dist decoder returns the distance offset. The actual - // distance is 1 higher. - d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) - if err != nil { - return nil, err - } - if d.State.rep[0] == eosDist { - d.eosMarker = true - return nil, errEOS - } - op = match{n: int(n) + minMatchLen, - distance: int64(d.State.rep[0]) + minDistance} - return op, nil - } - b, err = d.State.isRepG0[state].Decode(d.rd) - if err != nil { - return nil, err - } - dist := d.State.rep[0] - if b == 0 { - // rep match 0 - b, err = d.State.isRepG0Long[state2].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - d.State.updateStateShortRep() - op = match{n: 1, distance: int64(dist) + minDistance} - return op, nil - } - } else { - b, err = d.State.isRepG1[state].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - dist = d.State.rep[1] - } else { - b, err = d.State.isRepG2[state].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - dist = d.State.rep[2] - } else { - dist = d.State.rep[3] - d.State.rep[3] = d.State.rep[2] - } - d.State.rep[2] = d.State.rep[1] - } - d.State.rep[1] = d.State.rep[0] - d.State.rep[0] = dist - } - n, err := d.State.repLenCodec.Decode(d.rd, posState) - if err != nil { - return nil, err - } - d.State.updateStateRep() - op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} - return op, nil -} - -// apply takes the operation and transforms the decoder dictionary accordingly. -func (d *decoder) apply(op operation) error { - var err error - switch x := op.(type) { - case match: - err = d.Dict.writeMatch(x.distance, x.n) - case lit: - err = d.Dict.WriteByte(x.b) - default: - panic("op is neither a match nor a literal") - } - return err -} - -// decompress fills the dictionary unless no space for new data is -// available. If the end of the LZMA stream has been reached io.EOF will -// be returned. -func (d *decoder) decompress() error { - if d.eos { - return io.EOF - } - for d.Dict.Available() >= maxMatchLen { - op, err := d.readOp() - switch err { - case nil: - // break - case errEOS: - d.eos = true - if !d.rd.possiblyAtEnd() { - return errDataAfterEOS - } - if d.size >= 0 && d.size != d.Decompressed() { - return errSize - } - return io.EOF - case io.EOF: - d.eos = true - return io.ErrUnexpectedEOF - default: - return err - } - if err = d.apply(op); err != nil { - return err - } - if d.size >= 0 && d.Decompressed() >= d.size { - d.eos = true - if d.Decompressed() > d.size { - return errSize - } - if !d.rd.possiblyAtEnd() { - switch _, err = d.readOp(); err { - case nil: - return errSize - case io.EOF: - return io.ErrUnexpectedEOF - case errEOS: - break - default: - return err - } - } - return io.EOF - } - } - return nil -} - -// Errors that may be returned while decoding data. -var ( - errDataAfterEOS = errors.New("lzma: data after end of stream marker") - errSize = errors.New("lzma: wrong uncompressed data size") -) - -// Read reads data from the buffer. If no more data is available io.EOF is -// returned. -func (d *decoder) Read(p []byte) (n int, err error) { - var k int - for { - // Read of decoder dict never returns an error. - k, err = d.Dict.Read(p[n:]) - if err != nil { - panic(fmt.Errorf("dictionary read error %s", err)) - } - if k == 0 && d.eos { - return n, io.EOF - } - n += k - if n >= len(p) { - return n, nil - } - if err = d.decompress(); err != nil && err != io.EOF { - return n, err - } - } -} - -// Decompressed returns the number of bytes decompressed by the decoder. -func (d *decoder) Decompressed() int64 { - return d.Dict.pos() - d.start -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go deleted file mode 100644 index 8cd616ef9b..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" -) - -// decoderDict provides the dictionary for the decoder. The whole -// dictionary is used as reader buffer. -type decoderDict struct { - buf buffer - head int64 -} - -// newDecoderDict creates a new decoder dictionary. The whole dictionary -// will be used as reader buffer. -func newDecoderDict(dictCap int) (d *decoderDict, err error) { - // lower limit supports easy test cases - if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { - return nil, errors.New("lzma: dictCap out of range") - } - d = &decoderDict{buf: *newBuffer(dictCap)} - return d, nil -} - -// Reset clears the dictionary. The read buffer is not changed, so the -// buffered data can still be read. -func (d *decoderDict) Reset() { - d.head = 0 -} - -// WriteByte writes a single byte into the dictionary. It is used to -// write literals into the dictionary. -func (d *decoderDict) WriteByte(c byte) error { - if err := d.buf.WriteByte(c); err != nil { - return err - } - d.head++ - return nil -} - -// pos returns the position of the dictionary head. -func (d *decoderDict) pos() int64 { return d.head } - -// dictLen returns the actual length of the dictionary. -func (d *decoderDict) dictLen() int { - capacity := d.buf.Cap() - if d.head >= int64(capacity) { - return capacity - } - return int(d.head) -} - -// byteAt returns a byte stored in the dictionary. If the distance is -// non-positive or exceeds the current length of the dictionary the zero -// byte is returned. -func (d *decoderDict) byteAt(dist int) byte { - if !(0 < dist && dist <= d.dictLen()) { - return 0 - } - i := d.buf.front - dist - if i < 0 { - i += len(d.buf.data) - } - return d.buf.data[i] -} - -// writeMatch writes the match at the top of the dictionary. The given -// distance must point in the current dictionary and the length must not -// exceed the maximum length 273 supported in LZMA. -// -// The error value ErrNoSpace indicates that no space is available in -// the dictionary for writing. You need to read from the dictionary -// first. -func (d *decoderDict) writeMatch(dist int64, length int) error { - if !(0 < dist && dist <= int64(d.dictLen())) { - return errors.New("writeMatch: distance out of range") - } - if !(0 < length && length <= maxMatchLen) { - return errors.New("writeMatch: length out of range") - } - if length > d.buf.Available() { - return ErrNoSpace - } - d.head += int64(length) - - i := d.buf.front - int(dist) - if i < 0 { - i += len(d.buf.data) - } - for length > 0 { - var p []byte - if i >= d.buf.front { - p = d.buf.data[i:] - i = 0 - } else { - p = d.buf.data[i:d.buf.front] - i = d.buf.front - } - if len(p) > length { - p = p[:length] - } - if _, err := d.buf.Write(p); err != nil { - panic(fmt.Errorf("d.buf.Write returned error %s", err)) - } - length -= len(p) - } - return nil -} - -// Write writes the given bytes into the dictionary and advances the -// head. -func (d *decoderDict) Write(p []byte) (n int, err error) { - n, err = d.buf.Write(p) - d.head += int64(n) - return n, err -} - -// Available returns the number of available bytes for writing into the -// decoder dictionary. -func (d *decoderDict) Available() int { return d.buf.Available() } - -// Read reads data from the buffer contained in the decoder dictionary. -func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go deleted file mode 100644 index 20b256a9d6..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// directCodec allows the encoding and decoding of values with a fixed number -// of bits. The number of bits must be in the range [1,32]. -type directCodec byte - -// Bits returns the number of bits supported by this codec. -func (dc directCodec) Bits() int { - return int(dc) -} - -// Encode uses the range encoder to encode a value with the fixed number of -// bits. The most-significant bit is encoded first. -func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { - for i := int(dc) - 1; i >= 0; i-- { - if err := e.DirectEncodeBit(v >> uint(i)); err != nil { - return err - } - } - return nil -} - -// Decode uses the range decoder to decode a value with the given number of -// given bits. The most-significant bit is decoded first. -func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { - for i := int(dc) - 1; i >= 0; i-- { - x, err := d.DirectDecodeBit() - if err != nil { - return 0, err - } - v = (v << 1) | x - } - return v, nil -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go deleted file mode 100644 index 60ed9aef13..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// Constants used by the distance codec. -const ( - // minimum supported distance - minDistance = 1 - // maximum supported distance, value is used for the eos marker. - maxDistance = 1 << 32 - // number of the supported len states - lenStates = 4 - // start for the position models - startPosModel = 4 - // first index with align bits support - endPosModel = 14 - // bits for the position slots - posSlotBits = 6 - // number of align bits - alignBits = 4 -) - -// distCodec provides encoding and decoding of distance values. -type distCodec struct { - posSlotCodecs [lenStates]treeCodec - posModel [endPosModel - startPosModel]treeReverseCodec - alignCodec treeReverseCodec -} - -// deepcopy initializes dc as deep copy of the source. -func (dc *distCodec) deepcopy(src *distCodec) { - if dc == src { - return - } - for i := range dc.posSlotCodecs { - dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) - } - for i := range dc.posModel { - dc.posModel[i].deepcopy(&src.posModel[i]) - } - dc.alignCodec.deepcopy(&src.alignCodec) -} - -// newDistCodec creates a new distance codec. -func (dc *distCodec) init() { - for i := range dc.posSlotCodecs { - dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) - } - for i := range dc.posModel { - posSlot := startPosModel + i - bits := (posSlot >> 1) - 1 - dc.posModel[i] = makeTreeReverseCodec(bits) - } - dc.alignCodec = makeTreeReverseCodec(alignBits) -} - -// lenState converts the value l to a supported lenState value. -func lenState(l uint32) uint32 { - if l >= lenStates { - l = lenStates - 1 - } - return l -} - -// Encode encodes the distance using the parameter l. Dist can have values from -// the full range of uint32 values. To get the distance offset the actual match -// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) -// indicates the end of the stream. -func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { - // Compute the posSlot using nlz32 - var posSlot uint32 - var bits uint32 - if dist < startPosModel { - posSlot = dist - } else { - bits = uint32(30 - nlz32(dist)) - posSlot = startPosModel - 2 + (bits << 1) - posSlot += (dist >> uint(bits)) & 1 - } - - if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { - return - } - - switch { - case posSlot < startPosModel: - return nil - case posSlot < endPosModel: - tc := &dc.posModel[posSlot-startPosModel] - return tc.Encode(dist, e) - } - dic := directCodec(bits - alignBits) - if err = dic.Encode(e, dist>>alignBits); err != nil { - return - } - return dc.alignCodec.Encode(dist, e) -} - -// Decode decodes the distance offset using the parameter l. The dist value -// 0xffffffff (eos) indicates the end of the stream. Add one to the distance -// offset to get the actual match distance. -func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { - posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) - if err != nil { - return - } - - // posSlot equals distance - if posSlot < startPosModel { - return posSlot, nil - } - - // posSlot uses the individual models - bits := (posSlot >> 1) - 1 - dist = (2 | (posSlot & 1)) << bits - var u uint32 - if posSlot < endPosModel { - tc := &dc.posModel[posSlot-startPosModel] - if u, err = tc.Decode(d); err != nil { - return 0, err - } - dist += u - return dist, nil - } - - // posSlots use direct encoding and a single model for the four align - // bits. - dic := directCodec(bits - alignBits) - if u, err = dic.Decode(d); err != nil { - return 0, err - } - dist += u << alignBits - if u, err = dc.alignCodec.Decode(d); err != nil { - return 0, err - } - dist += u - return dist, nil -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go deleted file mode 100644 index 5ed057a718..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/encoder.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "fmt" - "io" -) - -// opLenMargin provides the upper limit of the number of bytes required -// to encode a single operation. -const opLenMargin = 16 - -// compressFlags control the compression process. -type compressFlags uint32 - -// Values for compressFlags. -const ( - // all data should be compressed, even if compression is not - // optimal. - all compressFlags = 1 << iota -) - -// encoderFlags provide the flags for an encoder. -type encoderFlags uint32 - -// Flags for the encoder. -const ( - // eosMarker requests an EOS marker to be written. - eosMarker encoderFlags = 1 << iota -) - -// Encoder compresses data buffered in the encoder dictionary and writes -// it into a byte writer. -type encoder struct { - dict *encoderDict - state *state - re *rangeEncoder - start int64 - // generate eos marker - marker bool - limit bool - margin int -} - -// newEncoder creates a new encoder. If the byte writer must be -// limited use LimitedByteWriter provided by this package. The flags -// argument supports the eosMarker flag, controlling whether a -// terminating end-of-stream marker must be written. -func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, - flags encoderFlags) (e *encoder, err error) { - - re, err := newRangeEncoder(bw) - if err != nil { - return nil, err - } - e = &encoder{ - dict: dict, - state: state, - re: re, - marker: flags&eosMarker != 0, - start: dict.Pos(), - margin: opLenMargin, - } - if e.marker { - e.margin += 5 - } - return e, nil -} - -// Write writes the bytes from p into the dictionary. If not enough -// space is available the data in the dictionary buffer will be -// compressed to make additional space available. If the limit of the -// underlying writer has been reached ErrLimit will be returned. -func (e *encoder) Write(p []byte) (n int, err error) { - for { - k, err := e.dict.Write(p[n:]) - n += k - if err == ErrNoSpace { - if err = e.compress(0); err != nil { - return n, err - } - continue - } - return n, err - } -} - -// Reopen reopens the encoder with a new byte writer. -func (e *encoder) Reopen(bw io.ByteWriter) error { - var err error - if e.re, err = newRangeEncoder(bw); err != nil { - return err - } - e.start = e.dict.Pos() - e.limit = false - return nil -} - -// writeLiteral writes a literal into the LZMA stream -func (e *encoder) writeLiteral(l lit) error { - var err error - state, state2, _ := e.state.states(e.dict.Pos()) - if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { - return err - } - litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) - match := e.dict.ByteAt(int(e.state.rep[0]) + 1) - err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) - if err != nil { - return err - } - e.state.updateStateLiteral() - return nil -} - -// iverson implements the Iverson operator as proposed by Donald Knuth in his -// book Concrete Mathematics. -func iverson(ok bool) uint32 { - if ok { - return 1 - } - return 0 -} - -// writeMatch writes a repetition operation into the operation stream -func (e *encoder) writeMatch(m match) error { - var err error - if !(minDistance <= m.distance && m.distance <= maxDistance) { - panic(fmt.Errorf("match distance %d out of range", m.distance)) - } - dist := uint32(m.distance - minDistance) - if !(minMatchLen <= m.n && m.n <= maxMatchLen) && - !(dist == e.state.rep[0] && m.n == 1) { - panic(fmt.Errorf( - "match length %d out of range; dist %d rep[0] %d", - m.n, dist, e.state.rep[0])) - } - state, state2, posState := e.state.states(e.dict.Pos()) - if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { - return err - } - g := 0 - for ; g < 4; g++ { - if e.state.rep[g] == dist { - break - } - } - b := iverson(g < 4) - if err = e.state.isRep[state].Encode(e.re, b); err != nil { - return err - } - n := uint32(m.n - minMatchLen) - if b == 0 { - // simple match - e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = - e.state.rep[2], e.state.rep[1], e.state.rep[0], dist - e.state.updateStateMatch() - if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { - return err - } - return e.state.distCodec.Encode(e.re, dist, n) - } - b = iverson(g != 0) - if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { - return err - } - if b == 0 { - // g == 0 - b = iverson(m.n != 1) - if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { - return err - } - if b == 0 { - e.state.updateStateShortRep() - return nil - } - } else { - // g in {1,2,3} - b = iverson(g != 1) - if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { - return err - } - if b == 1 { - // g in {2,3} - b = iverson(g != 2) - err = e.state.isRepG2[state].Encode(e.re, b) - if err != nil { - return err - } - if b == 1 { - e.state.rep[3] = e.state.rep[2] - } - e.state.rep[2] = e.state.rep[1] - } - e.state.rep[1] = e.state.rep[0] - e.state.rep[0] = dist - } - e.state.updateStateRep() - return e.state.repLenCodec.Encode(e.re, n, posState) -} - -// writeOp writes a single operation to the range encoder. The function -// checks whether there is enough space available to close the LZMA -// stream. -func (e *encoder) writeOp(op operation) error { - if e.re.Available() < int64(e.margin) { - return ErrLimit - } - switch x := op.(type) { - case lit: - return e.writeLiteral(x) - case match: - return e.writeMatch(x) - default: - panic("unexpected operation") - } -} - -// compress compressed data from the dictionary buffer. If the flag all -// is set, all data in the dictionary buffer will be compressed. The -// function returns ErrLimit if the underlying writer has reached its -// limit. -func (e *encoder) compress(flags compressFlags) error { - n := 0 - if flags&all == 0 { - n = maxMatchLen - 1 - } - d := e.dict - m := d.m - for d.Buffered() > n { - op := m.NextOp(e.state.rep) - if err := e.writeOp(op); err != nil { - return err - } - d.Discard(op.Len()) - } - return nil -} - -// eosMatch is a pseudo operation that indicates the end of the stream. -var eosMatch = match{distance: maxDistance, n: minMatchLen} - -// Close terminates the LZMA stream. If requested the end-of-stream -// marker will be written. If the byte writer limit has been or will be -// reached during compression of the remaining data in the buffer the -// LZMA stream will be closed and data will remain in the buffer. -func (e *encoder) Close() error { - err := e.compress(all) - if err != nil && err != ErrLimit { - return err - } - if e.marker { - if err := e.writeMatch(eosMatch); err != nil { - return err - } - } - err = e.re.Close() - return err -} - -// Compressed returns the number bytes of the input data that been -// compressed. -func (e *encoder) Compressed() int64 { - return e.dict.Pos() - e.start -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go deleted file mode 100644 index 056f89757c..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - "io" -) - -// matcher is an interface that supports the identification of the next -// operation. -type matcher interface { - io.Writer - SetDict(d *encoderDict) - NextOp(rep [4]uint32) operation -} - -// encoderDict provides the dictionary of the encoder. It includes an -// additional buffer atop of the actual dictionary. -type encoderDict struct { - buf buffer - m matcher - head int64 - capacity int - // preallocated array - data [maxMatchLen]byte -} - -// newEncoderDict creates the encoder dictionary. The argument bufSize -// defines the size of the additional buffer. -func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { - if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { - return nil, errors.New( - "lzma: dictionary capacity out of range") - } - if bufSize < 1 { - return nil, errors.New( - "lzma: buffer size must be larger than zero") - } - d = &encoderDict{ - buf: *newBuffer(dictCap + bufSize), - capacity: dictCap, - m: m, - } - m.SetDict(d) - return d, nil -} - -// Discard discards n bytes. Note that n must not be larger than -// MaxMatchLen. -func (d *encoderDict) Discard(n int) { - p := d.data[:n] - k, _ := d.buf.Read(p) - if k < n { - panic(fmt.Errorf("lzma: can't discard %d bytes", n)) - } - d.head += int64(n) - d.m.Write(p) -} - -// Len returns the data available in the encoder dictionary. -func (d *encoderDict) Len() int { - n := d.buf.Available() - if int64(n) > d.head { - return int(d.head) - } - return n -} - -// DictLen returns the actual length of data in the dictionary. -func (d *encoderDict) DictLen() int { - if d.head < int64(d.capacity) { - return int(d.head) - } - return d.capacity -} - -// Available returns the number of bytes that can be written by a -// following Write call. -func (d *encoderDict) Available() int { - return d.buf.Available() - d.DictLen() -} - -// Write writes data into the dictionary buffer. Note that the position -// of the dictionary head will not be moved. If there is not enough -// space in the buffer ErrNoSpace will be returned. -func (d *encoderDict) Write(p []byte) (n int, err error) { - m := d.Available() - if len(p) > m { - p = p[:m] - err = ErrNoSpace - } - var e error - if n, e = d.buf.Write(p); e != nil { - err = e - } - return n, err -} - -// Pos returns the position of the head. -func (d *encoderDict) Pos() int64 { return d.head } - -// ByteAt returns the byte at the given distance. -func (d *encoderDict) ByteAt(distance int) byte { - if !(0 < distance && distance <= d.Len()) { - return 0 - } - i := d.buf.rear - distance - if i < 0 { - i += len(d.buf.data) - } - return d.buf.data[i] -} - -// CopyN copies the last n bytes from the dictionary into the provided -// writer. This is used for copying uncompressed data into an -// uncompressed segment. -func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { - if n <= 0 { - return 0, nil - } - m := d.Len() - if n > m { - n = m - err = ErrNoSpace - } - i := d.buf.rear - n - var e error - if i < 0 { - i += len(d.buf.data) - if written, e = w.Write(d.buf.data[i:]); e != nil { - return written, e - } - i = 0 - } - var k int - k, e = w.Write(d.buf.data[i:d.buf.rear]) - written += k - if e != nil { - err = e - } - return written, err -} - -// Buffered returns the number of bytes in the buffer. -func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma deleted file mode 100644 index 5edad63326..0000000000 Binary files a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma and /dev/null differ diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go deleted file mode 100644 index 0fb7910bc0..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - - "github.com/ulikunitz/xz/internal/hash" -) - -/* For compression we need to find byte sequences that match the byte - * sequence at the dictionary head. A hash table is a simple method to - * provide this capability. - */ - -// maxMatches limits the number of matches requested from the Matches -// function. This controls the speed of the overall encoding. -const maxMatches = 16 - -// shortDists defines the number of short distances supported by the -// implementation. -const shortDists = 8 - -// The minimum is somehow arbitrary but the maximum is limited by the -// memory requirements of the hash table. -const ( - minTableExponent = 9 - maxTableExponent = 20 -) - -// newRoller contains the function used to create an instance of the -// hash.Roller. -var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } - -// hashTable stores the hash table including the rolling hash method. -// -// We implement chained hashing into a circular buffer. Each entry in -// the circular buffer stores the delta distance to the next position with a -// word that has the same hash value. -type hashTable struct { - dict *encoderDict - // actual hash table - t []int64 - // circular list data with the offset to the next word - data []uint32 - front int - // mask for computing the index for the hash table - mask uint64 - // hash offset; initial value is -int64(wordLen) - hoff int64 - // length of the hashed word - wordLen int - // hash roller for computing the hash values for the Write - // method - wr hash.Roller - // hash roller for computing arbitrary hashes - hr hash.Roller - // preallocated slices - p [maxMatches]int64 - distances [maxMatches + shortDists]int -} - -// hashTableExponent derives the hash table exponent from the dictionary -// capacity. -func hashTableExponent(n uint32) int { - e := 30 - nlz32(n) - switch { - case e < minTableExponent: - e = minTableExponent - case e > maxTableExponent: - e = maxTableExponent - } - return e -} - -// newHashTable creates a new hash table for words of length wordLen -func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { - if !(0 < capacity) { - return nil, errors.New( - "newHashTable: capacity must not be negative") - } - exp := hashTableExponent(uint32(capacity)) - if !(1 <= wordLen && wordLen <= 4) { - return nil, errors.New("newHashTable: " + - "argument wordLen out of range") - } - n := 1 << uint(exp) - if n <= 0 { - panic("newHashTable: exponent is too large") - } - t = &hashTable{ - t: make([]int64, n), - data: make([]uint32, capacity), - mask: (uint64(1) << uint(exp)) - 1, - hoff: -int64(wordLen), - wordLen: wordLen, - wr: newRoller(wordLen), - hr: newRoller(wordLen), - } - return t, nil -} - -func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } - -// buffered returns the number of bytes that are currently hashed. -func (t *hashTable) buffered() int { - n := t.hoff + 1 - switch { - case n <= 0: - return 0 - case n >= int64(len(t.data)): - return len(t.data) - } - return int(n) -} - -// addIndex adds n to an index ensuring that is stays inside the -// circular buffer for the hash chain. -func (t *hashTable) addIndex(i, n int) int { - i += n - len(t.data) - if i < 0 { - i += len(t.data) - } - return i -} - -// putDelta puts the delta instance at the current front of the circular -// chain buffer. -func (t *hashTable) putDelta(delta uint32) { - t.data[t.front] = delta - t.front = t.addIndex(t.front, 1) -} - -// putEntry puts a new entry into the hash table. If there is already a -// value stored it is moved into the circular chain buffer. -func (t *hashTable) putEntry(h uint64, pos int64) { - if pos < 0 { - return - } - i := h & t.mask - old := t.t[i] - 1 - t.t[i] = pos + 1 - var delta int64 - if old >= 0 { - delta = pos - old - if delta > 1<<32-1 || delta > int64(t.buffered()) { - delta = 0 - } - } - t.putDelta(uint32(delta)) -} - -// WriteByte converts a single byte into a hash and puts them into the hash -// table. -func (t *hashTable) WriteByte(b byte) error { - h := t.wr.RollByte(b) - t.hoff++ - t.putEntry(h, t.hoff) - return nil -} - -// Write converts the bytes provided into hash tables and stores the -// abbreviated offsets into the hash table. The method will never return an -// error. -func (t *hashTable) Write(p []byte) (n int, err error) { - for _, b := range p { - // WriteByte doesn't generate an error. - t.WriteByte(b) - } - return len(p), nil -} - -// getMatches the matches for a specific hash. The functions returns the -// number of positions found. -// -// TODO: Make a getDistances because that we are actually interested in. -func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { - if t.hoff < 0 || len(positions) == 0 { - return 0 - } - buffered := t.buffered() - tailPos := t.hoff + 1 - int64(buffered) - rear := t.front - buffered - if rear >= 0 { - rear -= len(t.data) - } - // get the slot for the hash - pos := t.t[h&t.mask] - 1 - delta := pos - tailPos - for { - if delta < 0 { - return n - } - positions[n] = tailPos + delta - n++ - if n >= len(positions) { - return n - } - i := rear + int(delta) - if i < 0 { - i += len(t.data) - } - u := t.data[i] - if u == 0 { - return n - } - delta -= int64(u) - } -} - -// hash computes the rolling hash for the word stored in p. For correct -// results its length must be equal to t.wordLen. -func (t *hashTable) hash(p []byte) uint64 { - var h uint64 - for _, b := range p { - h = t.hr.RollByte(b) - } - return h -} - -// Matches fills the positions slice with potential matches. The -// functions returns the number of positions filled into positions. The -// byte slice p must have word length of the hash table. -func (t *hashTable) Matches(p []byte, positions []int64) int { - if len(p) != t.wordLen { - panic(fmt.Errorf( - "byte slice must have length %d", t.wordLen)) - } - h := t.hash(p) - return t.getMatches(h, positions) -} - -// NextOp identifies the next operation using the hash table. -// -// TODO: Use all repetitions to find matches. -func (t *hashTable) NextOp(rep [4]uint32) operation { - // get positions - data := t.dict.data[:maxMatchLen] - n, _ := t.dict.buf.Peek(data) - data = data[:n] - var p []int64 - if n < t.wordLen { - p = t.p[:0] - } else { - p = t.p[:maxMatches] - n = t.Matches(data[:t.wordLen], p) - p = p[:n] - } - - // convert positions in potential distances - head := t.dict.head - dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) - for _, pos := range p { - dis := int(head - pos) - if dis > shortDists { - dists = append(dists, dis) - } - } - - // check distances - var m match - dictLen := t.dict.DictLen() - for _, dist := range dists { - if dist > dictLen { - continue - } - - // Here comes a trick. We are only interested in matches - // that are longer than the matches we have been found - // before. So before we test the whole byte sequence at - // the given distance, we test the first byte that would - // make the match longer. If it doesn't match the byte - // to match, we don't to care any longer. - i := t.dict.buf.rear - dist + m.n - if i < 0 { - i += len(t.dict.buf.data) - } - if t.dict.buf.data[i] != data[m.n] { - // We can't get a longer match. Jump to the next - // distance. - continue - } - - n := t.dict.buf.matchLen(dist, data) - switch n { - case 0: - continue - case 1: - if uint32(dist-minDistance) != rep[0] { - continue - } - } - if n > m.n { - m = match{int64(dist), n} - if n == len(data) { - // No better match will be found. - break - } - } - } - - if m.n == 0 { - return lit{data[0]} - } - return m -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go deleted file mode 100644 index 04276c8163..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/header.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" -) - -// uint32LE reads an uint32 integer from a byte slice -func uint32LE(b []byte) uint32 { - x := uint32(b[3]) << 24 - x |= uint32(b[2]) << 16 - x |= uint32(b[1]) << 8 - x |= uint32(b[0]) - return x -} - -// uint64LE converts the uint64 value stored as little endian to an uint64 -// value. -func uint64LE(b []byte) uint64 { - x := uint64(b[7]) << 56 - x |= uint64(b[6]) << 48 - x |= uint64(b[5]) << 40 - x |= uint64(b[4]) << 32 - x |= uint64(b[3]) << 24 - x |= uint64(b[2]) << 16 - x |= uint64(b[1]) << 8 - x |= uint64(b[0]) - return x -} - -// putUint32LE puts an uint32 integer into a byte slice that must have at least -// a length of 4 bytes. -func putUint32LE(b []byte, x uint32) { - b[0] = byte(x) - b[1] = byte(x >> 8) - b[2] = byte(x >> 16) - b[3] = byte(x >> 24) -} - -// putUint64LE puts the uint64 value into the byte slice as little endian -// value. The byte slice b must have at least place for 8 bytes. -func putUint64LE(b []byte, x uint64) { - b[0] = byte(x) - b[1] = byte(x >> 8) - b[2] = byte(x >> 16) - b[3] = byte(x >> 24) - b[4] = byte(x >> 32) - b[5] = byte(x >> 40) - b[6] = byte(x >> 48) - b[7] = byte(x >> 56) -} - -// noHeaderSize defines the value of the length field in the LZMA header. -const noHeaderSize uint64 = 1<<64 - 1 - -// HeaderLen provides the length of the LZMA file header. -const HeaderLen = 13 - -// header represents the header of an LZMA file. -type header struct { - properties Properties - dictCap int - // uncompressed size; negative value if no size is given - size int64 -} - -// marshalBinary marshals the header. -func (h *header) marshalBinary() (data []byte, err error) { - if err = h.properties.verify(); err != nil { - return nil, err - } - if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { - return nil, fmt.Errorf("lzma: DictCap %d out of range", - h.dictCap) - } - - data = make([]byte, 13) - - // property byte - data[0] = h.properties.Code() - - // dictionary capacity - putUint32LE(data[1:5], uint32(h.dictCap)) - - // uncompressed size - var s uint64 - if h.size > 0 { - s = uint64(h.size) - } else { - s = noHeaderSize - } - putUint64LE(data[5:], s) - - return data, nil -} - -// unmarshalBinary unmarshals the header. -func (h *header) unmarshalBinary(data []byte) error { - if len(data) != HeaderLen { - return errors.New("lzma.unmarshalBinary: data has wrong length") - } - - // properties - var err error - if h.properties, err = PropertiesForCode(data[0]); err != nil { - return err - } - - // dictionary capacity - h.dictCap = int(uint32LE(data[1:])) - if h.dictCap < 0 { - return errors.New( - "LZMA header: dictionary capacity exceeds maximum " + - "integer") - } - - // uncompressed size - s := uint64LE(data[5:]) - if s == noHeaderSize { - h.size = -1 - } else { - h.size = int64(s) - if h.size < 0 { - return errors.New( - "LZMA header: uncompressed size " + - "out of int64 range") - } - } - - return nil -} - -// validDictCap checks whether the dictionary capacity is correct. This -// is used to weed out wrong file headers. -func validDictCap(dictcap int) bool { - if int64(dictcap) == MaxDictCap { - return true - } - for n := uint(10); n < 32; n++ { - if dictcap == 1<= 10 or 2^32-1. If -// there is an explicit size it must not exceed 256 GiB. The length of -// the data argument must be HeaderLen. -func ValidHeader(data []byte) bool { - var h header - if err := h.unmarshalBinary(data); err != nil { - return false - } - if !validDictCap(h.dictCap) { - return false - } - return h.size < 0 || h.size <= 1<<38 -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go deleted file mode 100644 index be54dd85fd..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - "io" -) - -const ( - // maximum size of compressed data in a chunk - maxCompressed = 1 << 16 - // maximum size of uncompressed data in a chunk - maxUncompressed = 1 << 21 -) - -// chunkType represents the type of an LZMA2 chunk. Note that this -// value is an internal representation and no actual encoding of a LZMA2 -// chunk header. -type chunkType byte - -// Possible values for the chunk type. -const ( - // end of stream - cEOS chunkType = iota - // uncompressed; reset dictionary - cUD - // uncompressed; no reset of dictionary - cU - // LZMA compressed; no reset - cL - // LZMA compressed; reset state - cLR - // LZMA compressed; reset state; new property value - cLRN - // LZMA compressed; reset state; new property value; reset dictionary - cLRND -) - -// chunkTypeStrings provide a string representation for the chunk types. -var chunkTypeStrings = [...]string{ - cEOS: "EOS", - cU: "U", - cUD: "UD", - cL: "L", - cLR: "LR", - cLRN: "LRN", - cLRND: "LRND", -} - -// String returns a string representation of the chunk type. -func (c chunkType) String() string { - if !(cEOS <= c && c <= cLRND) { - return "unknown" - } - return chunkTypeStrings[c] -} - -// Actual encodings for the chunk types in the value. Note that the high -// uncompressed size bits are stored in the header byte additionally. -const ( - hEOS = 0 - hUD = 1 - hU = 2 - hL = 1 << 7 - hLR = 1<<7 | 1<<5 - hLRN = 1<<7 | 1<<6 - hLRND = 1<<7 | 1<<6 | 1<<5 -) - -// errHeaderByte indicates an unsupported value for the chunk header -// byte. These bytes starts the variable-length chunk header. -var errHeaderByte = errors.New("lzma: unsupported chunk header byte") - -// headerChunkType converts the header byte into a chunk type. It -// ignores the uncompressed size bits in the chunk header byte. -func headerChunkType(h byte) (c chunkType, err error) { - if h&hL == 0 { - // no compression - switch h { - case hEOS: - c = cEOS - case hUD: - c = cUD - case hU: - c = cU - default: - return 0, errHeaderByte - } - return - } - switch h & hLRND { - case hL: - c = cL - case hLR: - c = cLR - case hLRN: - c = cLRN - case hLRND: - c = cLRND - default: - return 0, errHeaderByte - } - return -} - -// uncompressedHeaderLen provides the length of an uncompressed header -const uncompressedHeaderLen = 3 - -// headerLen returns the length of the LZMA2 header for a given chunk -// type. -func headerLen(c chunkType) int { - switch c { - case cEOS: - return 1 - case cU, cUD: - return uncompressedHeaderLen - case cL, cLR: - return 5 - case cLRN, cLRND: - return 6 - } - panic(fmt.Errorf("unsupported chunk type %d", c)) -} - -// chunkHeader represents the contents of a chunk header. -type chunkHeader struct { - ctype chunkType - uncompressed uint32 - compressed uint16 - props Properties -} - -// String returns a string representation of the chunk header. -func (h *chunkHeader) String() string { - return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, - h.compressed, &h.props) -} - -// UnmarshalBinary reads the content of the chunk header from the data -// slice. The slice must have the correct length. -func (h *chunkHeader) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return errors.New("no data") - } - c, err := headerChunkType(data[0]) - if err != nil { - return err - } - - n := headerLen(c) - if len(data) < n { - return errors.New("incomplete data") - } - if len(data) > n { - return errors.New("invalid data length") - } - - *h = chunkHeader{ctype: c} - if c == cEOS { - return nil - } - - h.uncompressed = uint32(uint16BE(data[1:3])) - if c <= cU { - return nil - } - h.uncompressed |= uint32(data[0]&^hLRND) << 16 - - h.compressed = uint16BE(data[3:5]) - if c <= cLR { - return nil - } - - h.props, err = PropertiesForCode(data[5]) - return err -} - -// MarshalBinary encodes the chunk header value. The function checks -// whether the content of the chunk header is correct. -func (h *chunkHeader) MarshalBinary() (data []byte, err error) { - if h.ctype > cLRND { - return nil, errors.New("invalid chunk type") - } - if err = h.props.verify(); err != nil { - return nil, err - } - - data = make([]byte, headerLen(h.ctype)) - - switch h.ctype { - case cEOS: - return data, nil - case cUD: - data[0] = hUD - case cU: - data[0] = hU - case cL: - data[0] = hL - case cLR: - data[0] = hLR - case cLRN: - data[0] = hLRN - case cLRND: - data[0] = hLRND - } - - putUint16BE(data[1:3], uint16(h.uncompressed)) - if h.ctype <= cU { - return data, nil - } - data[0] |= byte(h.uncompressed>>16) &^ hLRND - - putUint16BE(data[3:5], h.compressed) - if h.ctype <= cLR { - return data, nil - } - - data[5] = h.props.Code() - return data, nil -} - -// readChunkHeader reads the chunk header from the IO reader. -func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { - p := make([]byte, 1, 6) - if _, err = io.ReadFull(r, p); err != nil { - return - } - c, err := headerChunkType(p[0]) - if err != nil { - return - } - p = p[:headerLen(c)] - if _, err = io.ReadFull(r, p[1:]); err != nil { - return - } - h = new(chunkHeader) - if err = h.UnmarshalBinary(p); err != nil { - return nil, err - } - return h, nil -} - -// uint16BE converts a big-endian uint16 representation to an uint16 -// value. -func uint16BE(p []byte) uint16 { - return uint16(p[0])<<8 | uint16(p[1]) -} - -// putUint16BE puts the big-endian uint16 presentation into the given -// slice. -func putUint16BE(p []byte, x uint16) { - p[0] = byte(x >> 8) - p[1] = byte(x) -} - -// chunkState is used to manage the state of the chunks -type chunkState byte - -// start and stop define the initial and terminating state of the chunk -// state -const ( - start chunkState = 'S' - stop chunkState = 'T' -) - -// errors for the chunk state handling -var ( - errChunkType = errors.New("lzma: unexpected chunk type") - errState = errors.New("lzma: wrong chunk state") -) - -// next transitions state based on chunk type input -func (c *chunkState) next(ctype chunkType) error { - switch *c { - // start state - case 'S': - switch ctype { - case cEOS: - *c = 'T' - case cUD: - *c = 'R' - case cLRND: - *c = 'L' - default: - return errChunkType - } - // normal LZMA mode - case 'L': - switch ctype { - case cEOS: - *c = 'T' - case cUD: - *c = 'R' - case cU: - *c = 'U' - case cL, cLR, cLRN, cLRND: - break - default: - return errChunkType - } - // reset required - case 'R': - switch ctype { - case cEOS: - *c = 'T' - case cUD, cU: - break - case cLRN, cLRND: - *c = 'L' - default: - return errChunkType - } - // uncompressed - case 'U': - switch ctype { - case cEOS: - *c = 'T' - case cUD: - *c = 'R' - case cU: - break - case cL, cLR, cLRN, cLRND: - *c = 'L' - default: - return errChunkType - } - // terminal state - case 'T': - return errChunkType - default: - return errState - } - return nil -} - -// defaultChunkType returns the default chunk type for each chunk state. -func (c chunkState) defaultChunkType() chunkType { - switch c { - case 'S': - return cLRND - case 'L', 'U': - return cL - case 'R': - return cLRN - default: - // no error - return cEOS - } -} - -// maxDictCap defines the maximum dictionary capacity supported by the -// LZMA2 dictionary capacity encoding. -const maxDictCap = 1<<32 - 1 - -// maxDictCapCode defines the maximum dictionary capacity code. -const maxDictCapCode = 40 - -// The function decodes the dictionary capacity byte, but doesn't change -// for the correct range of the given byte. -func decodeDictCap(c byte) int64 { - return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) -} - -// DecodeDictCap decodes the encoded dictionary capacity. The function -// returns an error if the code is out of range. -func DecodeDictCap(c byte) (n int64, err error) { - if c >= maxDictCapCode { - if c == maxDictCapCode { - return maxDictCap, nil - } - return 0, errors.New("lzma: invalid dictionary size code") - } - return decodeDictCap(c), nil -} - -// EncodeDictCap encodes a dictionary capacity. The function returns the -// code for the capacity that is greater or equal n. If n exceeds the -// maximum support dictionary capacity, the maximum value is returned. -func EncodeDictCap(n int64) byte { - a, b := byte(0), byte(40) - for a < b { - c := a + (b-a)>>1 - m := decodeDictCap(c) - if n <= m { - if n == m { - return c - } - b = c - } else { - a = c + 1 - } - } - return a -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go deleted file mode 100644 index 6e0edfc8c0..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import "errors" - -// maxPosBits defines the number of bits of the position value that are used to -// to compute the posState value. The value is used to select the tree codec -// for length encoding and decoding. -const maxPosBits = 4 - -// minMatchLen and maxMatchLen give the minimum and maximum values for -// encoding and decoding length values. minMatchLen is also used as base -// for the encoded length values. -const ( - minMatchLen = 2 - maxMatchLen = minMatchLen + 16 + 256 - 1 -) - -// lengthCodec support the encoding of the length value. -type lengthCodec struct { - choice [2]prob - low [1 << maxPosBits]treeCodec - mid [1 << maxPosBits]treeCodec - high treeCodec -} - -// deepcopy initializes the lc value as deep copy of the source value. -func (lc *lengthCodec) deepcopy(src *lengthCodec) { - if lc == src { - return - } - lc.choice = src.choice - for i := range lc.low { - lc.low[i].deepcopy(&src.low[i]) - } - for i := range lc.mid { - lc.mid[i].deepcopy(&src.mid[i]) - } - lc.high.deepcopy(&src.high) -} - -// init initializes a new length codec. -func (lc *lengthCodec) init() { - for i := range lc.choice { - lc.choice[i] = probInit - } - for i := range lc.low { - lc.low[i] = makeTreeCodec(3) - } - for i := range lc.mid { - lc.mid[i] = makeTreeCodec(3) - } - lc.high = makeTreeCodec(8) -} - -// Encode encodes the length offset. The length offset l can be compute by -// subtracting minMatchLen (2) from the actual length. -// -// l = length - minMatchLen -// -func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, -) (err error) { - if l > maxMatchLen-minMatchLen { - return errors.New("lengthCodec.Encode: l out of range") - } - if l < 8 { - if err = lc.choice[0].Encode(e, 0); err != nil { - return - } - return lc.low[posState].Encode(e, l) - } - if err = lc.choice[0].Encode(e, 1); err != nil { - return - } - if l < 16 { - if err = lc.choice[1].Encode(e, 0); err != nil { - return - } - return lc.mid[posState].Encode(e, l-8) - } - if err = lc.choice[1].Encode(e, 1); err != nil { - return - } - if err = lc.high.Encode(e, l-16); err != nil { - return - } - return nil -} - -// Decode reads the length offset. Add minMatchLen to compute the actual length -// to the length offset l. -func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, -) (l uint32, err error) { - var b uint32 - if b, err = lc.choice[0].Decode(d); err != nil { - return - } - if b == 0 { - l, err = lc.low[posState].Decode(d) - return - } - if b, err = lc.choice[1].Decode(d); err != nil { - return - } - if b == 0 { - l, err = lc.mid[posState].Decode(d) - l += 8 - return - } - l, err = lc.high.Decode(d) - l += 16 - return -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go deleted file mode 100644 index 0bfc763cee..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// literalCodec supports the encoding of literal. It provides 768 probability -// values per literal state. The upper 512 probabilities are used with the -// context of a match bit. -type literalCodec struct { - probs []prob -} - -// deepcopy initializes literal codec c as a deep copy of the source. -func (c *literalCodec) deepcopy(src *literalCodec) { - if c == src { - return - } - c.probs = make([]prob, len(src.probs)) - copy(c.probs, src.probs) -} - -// init initializes the literal codec. -func (c *literalCodec) init(lc, lp int) { - switch { - case !(minLC <= lc && lc <= maxLC): - panic("lc out of range") - case !(minLP <= lp && lp <= maxLP): - panic("lp out of range") - } - c.probs = make([]prob, 0x300<= 7 { - m := uint32(match) - for { - matchBit := (m >> 7) & 1 - m <<= 1 - bit := (r >> 7) & 1 - r <<= 1 - i := ((1 + matchBit) << 8) | symbol - if err = probs[i].Encode(e, bit); err != nil { - return - } - symbol = (symbol << 1) | bit - if matchBit != bit { - break - } - if symbol >= 0x100 { - break - } - } - } - for symbol < 0x100 { - bit := (r >> 7) & 1 - r <<= 1 - if err = probs[symbol].Encode(e, bit); err != nil { - return - } - symbol = (symbol << 1) | bit - } - return nil -} - -// Decode decodes a literal byte using the range decoder as well as the LZMA -// state, a match byte, and the literal state. -func (c *literalCodec) Decode(d *rangeDecoder, - state uint32, match byte, litState uint32, -) (s byte, err error) { - k := litState * 0x300 - probs := c.probs[k : k+0x300] - symbol := uint32(1) - if state >= 7 { - m := uint32(match) - for { - matchBit := (m >> 7) & 1 - m <<= 1 - i := ((1 + matchBit) << 8) | symbol - bit, err := d.DecodeBit(&probs[i]) - if err != nil { - return 0, err - } - symbol = (symbol << 1) | bit - if matchBit != bit { - break - } - if symbol >= 0x100 { - break - } - } - } - for symbol < 0x100 { - bit, err := d.DecodeBit(&probs[symbol]) - if err != nil { - return 0, err - } - symbol = (symbol << 1) | bit - } - s = byte(symbol - 0x100) - return s, nil -} - -// minLC and maxLC define the range for LC values. -const ( - minLC = 0 - maxLC = 8 -) - -// minLC and maxLC define the range for LP values. -const ( - minLP = 0 - maxLP = 4 -) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go deleted file mode 100644 index 96ebda0fd3..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import "errors" - -// MatchAlgorithm identifies an algorithm to find matches in the -// dictionary. -type MatchAlgorithm byte - -// Supported matcher algorithms. -const ( - HashTable4 MatchAlgorithm = iota - BinaryTree -) - -// maStrings are used by the String method. -var maStrings = map[MatchAlgorithm]string{ - HashTable4: "HashTable4", - BinaryTree: "BinaryTree", -} - -// String returns a string representation of the Matcher. -func (a MatchAlgorithm) String() string { - if s, ok := maStrings[a]; ok { - return s - } - return "unknown" -} - -var errUnsupportedMatchAlgorithm = errors.New( - "lzma: unsupported match algorithm value") - -// verify checks whether the matcher value is supported. -func (a MatchAlgorithm) verify() error { - if _, ok := maStrings[a]; !ok { - return errUnsupportedMatchAlgorithm - } - return nil -} - -func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { - switch a { - case HashTable4: - return newHashTable(dictCap, 4) - case BinaryTree: - return newBinTree(dictCap) - } - return nil, errUnsupportedMatchAlgorithm -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go deleted file mode 100644 index 026ce48af2..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "fmt" - "unicode" -) - -// operation represents an operation on the dictionary during encoding or -// decoding. -type operation interface { - Len() int -} - -// rep represents a repetition at the given distance and the given length -type match struct { - // supports all possible distance values, including the eos marker - distance int64 - // length - n int -} - -// Len returns the number of bytes matched. -func (m match) Len() int { - return m.n -} - -// String returns a string representation for the repetition. -func (m match) String() string { - return fmt.Sprintf("M{%d,%d}", m.distance, m.n) -} - -// lit represents a single byte literal. -type lit struct { - b byte -} - -// Len returns 1 for the single byte literal. -func (l lit) Len() int { - return 1 -} - -// String returns a string representation for the literal. -func (l lit) String() string { - var c byte - if unicode.IsPrint(rune(l.b)) { - c = l.b - } else { - c = '.' - } - return fmt.Sprintf("L{%c/%02x}", c, l.b) -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go deleted file mode 100644 index 9a2648e0f7..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/prob.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// movebits defines the number of bits used for the updates of probability -// values. -const movebits = 5 - -// probbits defines the number of bits of a probability value. -const probbits = 11 - -// probInit defines 0.5 as initial value for prob values. -const probInit prob = 1 << (probbits - 1) - -// Type prob represents probabilities. The type can also be used to encode and -// decode single bits. -type prob uint16 - -// Dec decreases the probability. The decrease is proportional to the -// probability value. -func (p *prob) dec() { - *p -= *p >> movebits -} - -// Inc increases the probability. The Increase is proportional to the -// difference of 1 and the probability value. -func (p *prob) inc() { - *p += ((1 << probbits) - *p) >> movebits -} - -// Computes the new bound for a given range using the probability value. -func (p prob) bound(r uint32) uint32 { - return (r >> probbits) * uint32(p) -} - -// Bits returns 1. One is the number of bits that can be encoded or decoded -// with a single prob value. -func (p prob) Bits() int { - return 1 -} - -// Encode encodes the least-significant bit of v. Note that the p value will be -// changed. -func (p *prob) Encode(e *rangeEncoder, v uint32) error { - return e.EncodeBit(v, p) -} - -// Decode decodes a single bit. Note that the p value will change. -func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { - return d.DecodeBit(p) -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go deleted file mode 100644 index f229fc9fe8..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/properties.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" -) - -// maximum and minimum values for the LZMA properties. -const ( - minPB = 0 - maxPB = 4 -) - -// maxPropertyCode is the possible maximum of a properties code byte. -const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 - -// Properties contains the parameters LC, LP and PB. The parameter LC -// defines the number of literal context bits; parameter LP the number -// of literal position bits and PB the number of position bits. -type Properties struct { - LC int - LP int - PB int -} - -// String returns the properties in a string representation. -func (p *Properties) String() string { - return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) -} - -// PropertiesForCode converts a properties code byte into a Properties value. -func PropertiesForCode(code byte) (p Properties, err error) { - if code > maxPropertyCode { - return p, errors.New("lzma: invalid properties code") - } - p.LC = int(code % 9) - code /= 9 - p.LP = int(code % 5) - code /= 5 - p.PB = int(code % 5) - return p, err -} - -// verify checks the properties for correctness. -func (p *Properties) verify() error { - if p == nil { - return errors.New("lzma: properties are nil") - } - if !(minLC <= p.LC && p.LC <= maxLC) { - return errors.New("lzma: lc out of range") - } - if !(minLP <= p.LP && p.LP <= maxLP) { - return errors.New("lzma: lp out of range") - } - if !(minPB <= p.PB && p.PB <= maxPB) { - return errors.New("lzma: pb out of range") - } - return nil -} - -// Code converts the properties to a byte. The function assumes that -// the properties components are all in range. -func (p Properties) Code() byte { - return byte((p.PB*5+p.LP)*9 + p.LC) -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go deleted file mode 100644 index 57f1ab904a..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" -) - -// rangeEncoder implements range encoding of single bits. The low value can -// overflow therefore we need uint64. The cache value is used to handle -// overflows. -type rangeEncoder struct { - lbw *LimitedByteWriter - nrange uint32 - low uint64 - cacheLen int64 - cache byte -} - -// maxInt64 provides the maximal value of the int64 type -const maxInt64 = 1<<63 - 1 - -// newRangeEncoder creates a new range encoder. -func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { - lbw, ok := bw.(*LimitedByteWriter) - if !ok { - lbw = &LimitedByteWriter{BW: bw, N: maxInt64} - } - return &rangeEncoder{ - lbw: lbw, - nrange: 0xffffffff, - cacheLen: 1}, nil -} - -// Available returns the number of bytes that still can be written. The -// method takes the bytes that will be currently written by Close into -// account. -func (e *rangeEncoder) Available() int64 { - return e.lbw.N - (e.cacheLen + 4) -} - -// writeByte writes a single byte to the underlying writer. An error is -// returned if the limit is reached. The written byte will be counted if -// the underlying writer doesn't return an error. -func (e *rangeEncoder) writeByte(c byte) error { - if e.Available() < 1 { - return ErrLimit - } - return e.lbw.WriteByte(c) -} - -// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. -func (e *rangeEncoder) DirectEncodeBit(b uint32) error { - e.nrange >>= 1 - e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) - - // normalize - const top = 1 << 24 - if e.nrange >= top { - return nil - } - e.nrange <<= 8 - return e.shiftLow() -} - -// EncodeBit encodes the least significant bit of b. The p value will be -// updated by the function depending on the bit encoded. -func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { - bound := p.bound(e.nrange) - if b&1 == 0 { - e.nrange = bound - p.inc() - } else { - e.low += uint64(bound) - e.nrange -= bound - p.dec() - } - - // normalize - const top = 1 << 24 - if e.nrange >= top { - return nil - } - e.nrange <<= 8 - return e.shiftLow() -} - -// Close writes a complete copy of the low value. -func (e *rangeEncoder) Close() error { - for i := 0; i < 5; i++ { - if err := e.shiftLow(); err != nil { - return err - } - } - return nil -} - -// shiftLow shifts the low value for 8 bit. The shifted byte is written into -// the byte writer. The cache value is used to handle overflows. -func (e *rangeEncoder) shiftLow() error { - if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { - tmp := e.cache - for { - err := e.writeByte(tmp + byte(e.low>>32)) - if err != nil { - return err - } - tmp = 0xff - e.cacheLen-- - if e.cacheLen <= 0 { - if e.cacheLen < 0 { - panic("negative cacheLen") - } - break - } - } - e.cache = byte(uint32(e.low) >> 24) - } - e.cacheLen++ - e.low = uint64(uint32(e.low) << 8) - return nil -} - -// rangeDecoder decodes single bits of the range encoding stream. -type rangeDecoder struct { - br io.ByteReader - nrange uint32 - code uint32 -} - -// newRangeDecoder initializes a range decoder. It reads five bytes from the -// reader and therefore may return an error. -func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { - d = &rangeDecoder{br: br, nrange: 0xffffffff} - - b, err := d.br.ReadByte() - if err != nil { - return nil, err - } - if b != 0 { - return nil, errors.New("newRangeDecoder: first byte not zero") - } - - for i := 0; i < 4; i++ { - if err = d.updateCode(); err != nil { - return nil, err - } - } - - if d.code >= d.nrange { - return nil, errors.New("newRangeDecoder: d.code >= d.nrange") - } - - return d, nil -} - -// possiblyAtEnd checks whether the decoder may be at the end of the stream. -func (d *rangeDecoder) possiblyAtEnd() bool { - return d.code == 0 -} - -// DirectDecodeBit decodes a bit with probability 1/2. The return value b will -// contain the bit at the least-significant position. All other bits will be -// zero. -func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { - d.nrange >>= 1 - d.code -= d.nrange - t := 0 - (d.code >> 31) - d.code += d.nrange & t - b = (t + 1) & 1 - - // d.code will stay less then d.nrange - - // normalize - // assume d.code < d.nrange - const top = 1 << 24 - if d.nrange >= top { - return b, nil - } - d.nrange <<= 8 - // d.code < d.nrange will be maintained - return b, d.updateCode() -} - -// decodeBit decodes a single bit. The bit will be returned at the -// least-significant position. All other bits will be zero. The probability -// value will be updated. -func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { - bound := p.bound(d.nrange) - if d.code < bound { - d.nrange = bound - p.inc() - b = 0 - } else { - d.code -= bound - d.nrange -= bound - p.dec() - b = 1 - } - // normalize - // assume d.code < d.nrange - const top = 1 << 24 - if d.nrange >= top { - return b, nil - } - d.nrange <<= 8 - // d.code < d.nrange will be maintained - return b, d.updateCode() -} - -// updateCode reads a new byte into the code. -func (d *rangeDecoder) updateCode() error { - b, err := d.br.ReadByte() - if err != nil { - return err - } - d.code = (d.code << 8) | uint32(b) - return nil -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go deleted file mode 100644 index 2ed13c886e..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lzma supports the decoding and encoding of LZMA streams. -// Reader and Writer support the classic LZMA format. Reader2 and -// Writer2 support the decoding and encoding of LZMA2 streams. -// -// The package is written completely in Go and doesn't rely on any external -// library. -package lzma - -import ( - "errors" - "io" -) - -// ReaderConfig stores the parameters for the reader of the classic LZMA -// format. -type ReaderConfig struct { - DictCap int -} - -// fill converts the zero values of the configuration to the default values. -func (c *ReaderConfig) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - -// Verify checks the reader configuration for errors. Zero values will -// be replaced by default values. -func (c *ReaderConfig) Verify() error { - c.fill() - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - return nil -} - -// Reader provides a reader for LZMA files or streams. -type Reader struct { - lzma io.Reader - h header - d *decoder -} - -// NewReader creates a new reader for an LZMA stream using the classic -// format. NewReader reads and checks the header of the LZMA stream. -func NewReader(lzma io.Reader) (r *Reader, err error) { - return ReaderConfig{}.NewReader(lzma) -} - -// NewReader creates a new reader for an LZMA stream in the classic -// format. The function reads and verifies the the header of the LZMA -// stream. -func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - data := make([]byte, HeaderLen) - if _, err := io.ReadFull(lzma, data); err != nil { - if err == io.EOF { - return nil, errors.New("lzma: unexpected EOF") - } - return nil, err - } - r = &Reader{lzma: lzma} - if err = r.h.unmarshalBinary(data); err != nil { - return nil, err - } - if r.h.dictCap < MinDictCap { - return nil, errors.New("lzma: dictionary capacity too small") - } - dictCap := r.h.dictCap - if c.DictCap > dictCap { - dictCap = c.DictCap - } - - state := newState(r.h.properties) - dict, err := newDecoderDict(dictCap) - if err != nil { - return nil, err - } - r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) - if err != nil { - return nil, err - } - return r, nil -} - -// EOSMarker indicates that an EOS marker has been encountered. -func (r *Reader) EOSMarker() bool { - return r.d.eosMarker -} - -// Read returns uncompressed data. -func (r *Reader) Read(p []byte) (n int, err error) { - return r.d.Read(p) -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go deleted file mode 100644 index de3da37ee6..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" - - "github.com/ulikunitz/xz/internal/xlog" -) - -// Reader2Config stores the parameters for the LZMA2 reader. -// format. -type Reader2Config struct { - DictCap int -} - -// fill converts the zero values of the configuration to the default values. -func (c *Reader2Config) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - -// Verify checks the reader configuration for errors. Zero configuration values -// will be replaced by default values. -func (c *Reader2Config) Verify() error { - c.fill() - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - return nil -} - -// Reader2 supports the reading of LZMA2 chunk sequences. Note that the -// first chunk should have a dictionary reset and the first compressed -// chunk a properties reset. The chunk sequence may not be terminated by -// an end-of-stream chunk. -type Reader2 struct { - r io.Reader - err error - - dict *decoderDict - ur *uncompressedReader - decoder *decoder - chunkReader io.Reader - - cstate chunkState -} - -// NewReader2 creates a reader for an LZMA2 chunk sequence. -func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { - return Reader2Config{}.NewReader2(lzma2) -} - -// NewReader2 creates an LZMA2 reader using the given configuration. -func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - r = &Reader2{r: lzma2, cstate: start} - r.dict, err = newDecoderDict(c.DictCap) - if err != nil { - return nil, err - } - if err = r.startChunk(); err != nil { - r.err = err - } - return r, nil -} - -// uncompressed tests whether the chunk type specifies an uncompressed -// chunk. -func uncompressed(ctype chunkType) bool { - return ctype == cU || ctype == cUD -} - -// startChunk parses a new chunk. -func (r *Reader2) startChunk() error { - r.chunkReader = nil - header, err := readChunkHeader(r.r) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - xlog.Debugf("chunk header %v", header) - if err = r.cstate.next(header.ctype); err != nil { - return err - } - if r.cstate == stop { - return io.EOF - } - if header.ctype == cUD || header.ctype == cLRND { - r.dict.Reset() - } - size := int64(header.uncompressed) + 1 - if uncompressed(header.ctype) { - if r.ur != nil { - r.ur.Reopen(r.r, size) - } else { - r.ur = newUncompressedReader(r.r, r.dict, size) - } - r.chunkReader = r.ur - return nil - } - br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) - if r.decoder == nil { - state := newState(header.props) - r.decoder, err = newDecoder(br, state, r.dict, size) - if err != nil { - return err - } - r.chunkReader = r.decoder - return nil - } - switch header.ctype { - case cLR: - r.decoder.State.Reset() - case cLRN, cLRND: - r.decoder.State = newState(header.props) - } - err = r.decoder.Reopen(br, size) - if err != nil { - return err - } - r.chunkReader = r.decoder - return nil -} - -// Read reads data from the LZMA2 chunk sequence. -func (r *Reader2) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - for n < len(p) { - var k int - k, err = r.chunkReader.Read(p[n:]) - n += k - if err != nil { - if err == io.EOF { - err = r.startChunk() - if err == nil { - continue - } - } - r.err = err - return n, err - } - if k == 0 { - r.err = errors.New("lzma: Reader2 doesn't get data") - return n, r.err - } - } - return n, nil -} - -// EOS returns whether the LZMA2 stream has been terminated by an -// end-of-stream chunk. -func (r *Reader2) EOS() bool { - return r.cstate == stop -} - -// uncompressedReader is used to read uncompressed chunks. -type uncompressedReader struct { - lr io.LimitedReader - Dict *decoderDict - eof bool - err error -} - -// newUncompressedReader initializes a new uncompressedReader. -func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { - ur := &uncompressedReader{ - lr: io.LimitedReader{R: r, N: size}, - Dict: dict, - } - return ur -} - -// Reopen reinitializes an uncompressed reader. -func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { - ur.err = nil - ur.eof = false - ur.lr = io.LimitedReader{R: r, N: size} -} - -// fill reads uncompressed data into the dictionary. -func (ur *uncompressedReader) fill() error { - if !ur.eof { - n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) - if err != io.EOF { - return err - } - ur.eof = true - if n > 0 { - return nil - } - } - if ur.lr.N != 0 { - return io.ErrUnexpectedEOF - } - return io.EOF -} - -// Read reads uncompressed data from the limited reader. -func (ur *uncompressedReader) Read(p []byte) (n int, err error) { - if ur.err != nil { - return 0, ur.err - } - for { - var k int - k, err = ur.Dict.Read(p[n:]) - n += k - if n >= len(p) { - return n, nil - } - if err != nil { - break - } - err = ur.fill() - if err != nil { - break - } - } - ur.err = err - return n, err -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go deleted file mode 100644 index 09d62f7d99..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// states defines the overall state count -const states = 12 - -// State maintains the full state of the operation encoding or decoding -// process. -type state struct { - rep [4]uint32 - isMatch [states << maxPosBits]prob - isRepG0Long [states << maxPosBits]prob - isRep [states]prob - isRepG0 [states]prob - isRepG1 [states]prob - isRepG2 [states]prob - litCodec literalCodec - lenCodec lengthCodec - repLenCodec lengthCodec - distCodec distCodec - state uint32 - posBitMask uint32 - Properties Properties -} - -// initProbSlice initializes a slice of probabilities. -func initProbSlice(p []prob) { - for i := range p { - p[i] = probInit - } -} - -// Reset sets all state information to the original values. -func (s *state) Reset() { - p := s.Properties - *s = state{ - Properties: p, - // dict: s.dict, - posBitMask: (uint32(1) << uint(p.PB)) - 1, - } - initProbSlice(s.isMatch[:]) - initProbSlice(s.isRep[:]) - initProbSlice(s.isRepG0[:]) - initProbSlice(s.isRepG1[:]) - initProbSlice(s.isRepG2[:]) - initProbSlice(s.isRepG0Long[:]) - s.litCodec.init(p.LC, p.LP) - s.lenCodec.init() - s.repLenCodec.init() - s.distCodec.init() -} - -// newState creates a new state from the give Properties. -func newState(p Properties) *state { - s := &state{Properties: p} - s.Reset() - return s -} - -// deepcopy initializes s as a deep copy of the source. -func (s *state) deepcopy(src *state) { - if s == src { - return - } - s.rep = src.rep - s.isMatch = src.isMatch - s.isRepG0Long = src.isRepG0Long - s.isRep = src.isRep - s.isRepG0 = src.isRepG0 - s.isRepG1 = src.isRepG1 - s.isRepG2 = src.isRepG2 - s.litCodec.deepcopy(&src.litCodec) - s.lenCodec.deepcopy(&src.lenCodec) - s.repLenCodec.deepcopy(&src.repLenCodec) - s.distCodec.deepcopy(&src.distCodec) - s.state = src.state - s.posBitMask = src.posBitMask - s.Properties = src.Properties -} - -// cloneState creates a new clone of the give state. -func cloneState(src *state) *state { - s := new(state) - s.deepcopy(src) - return s -} - -// updateStateLiteral updates the state for a literal. -func (s *state) updateStateLiteral() { - switch { - case s.state < 4: - s.state = 0 - return - case s.state < 10: - s.state -= 3 - return - } - s.state -= 6 -} - -// updateStateMatch updates the state for a match. -func (s *state) updateStateMatch() { - if s.state < 7 { - s.state = 7 - } else { - s.state = 10 - } -} - -// updateStateRep updates the state for a repetition. -func (s *state) updateStateRep() { - if s.state < 7 { - s.state = 8 - } else { - s.state = 11 - } -} - -// updateStateShortRep updates the state for a short repetition. -func (s *state) updateStateShortRep() { - if s.state < 7 { - s.state = 9 - } else { - s.state = 11 - } -} - -// states computes the states of the operation codec. -func (s *state) states(dictHead int64) (state1, state2, posState uint32) { - state1 = s.state - posState = uint32(dictHead) & s.posBitMask - state2 = (s.state << maxPosBits) | posState - return -} - -// litState computes the literal state. -func (s *state) litState(prev byte, dictHead int64) uint32 { - lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) - litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | - (uint32(prev) >> (8 - lc)) - return litState -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go deleted file mode 100644 index 6e927e9359..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// treeCodec encodes or decodes values with a fixed bit size. It is using a -// tree of probability value. The root of the tree is the most-significant bit. -type treeCodec struct { - probTree -} - -// makeTreeCodec makes a tree codec. The bits value must be inside the range -// [1,32]. -func makeTreeCodec(bits int) treeCodec { - return treeCodec{makeProbTree(bits)} -} - -// deepcopy initializes tc as a deep copy of the source. -func (tc *treeCodec) deepcopy(src *treeCodec) { - tc.probTree.deepcopy(&src.probTree) -} - -// Encode uses the range encoder to encode a fixed-bit-size value. -func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { - m := uint32(1) - for i := int(tc.bits) - 1; i >= 0; i-- { - b := (v >> uint(i)) & 1 - if err := e.EncodeBit(b, &tc.probs[m]); err != nil { - return err - } - m = (m << 1) | b - } - return nil -} - -// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may -// be caused by the range decoder. -func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { - m := uint32(1) - for j := 0; j < int(tc.bits); j++ { - b, err := d.DecodeBit(&tc.probs[m]) - if err != nil { - return 0, err - } - m = (m << 1) | b - } - return m - (1 << uint(tc.bits)), nil -} - -// treeReverseCodec is another tree codec, where the least-significant bit is -// the start of the probability tree. -type treeReverseCodec struct { - probTree -} - -// deepcopy initializes the treeReverseCodec as a deep copy of the -// source. -func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { - tc.probTree.deepcopy(&src.probTree) -} - -// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must -// be in the range [1,32]. -func makeTreeReverseCodec(bits int) treeReverseCodec { - return treeReverseCodec{makeProbTree(bits)} -} - -// Encode uses range encoder to encode a fixed-bit-size value. The range -// encoder may cause errors. -func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { - m := uint32(1) - for i := uint(0); i < uint(tc.bits); i++ { - b := (v >> i) & 1 - if err := e.EncodeBit(b, &tc.probs[m]); err != nil { - return err - } - m = (m << 1) | b - } - return nil -} - -// Decodes uses the range decoder to decode a fixed-bit-size value. Errors -// returned by the range decoder will be returned. -func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { - m := uint32(1) - for j := uint(0); j < uint(tc.bits); j++ { - b, err := d.DecodeBit(&tc.probs[m]) - if err != nil { - return 0, err - } - m = (m << 1) | b - v |= b << j - } - return v, nil -} - -// probTree stores enough probability values to be used by the treeEncode and -// treeDecode methods of the range coder types. -type probTree struct { - probs []prob - bits byte -} - -// deepcopy initializes the probTree value as a deep copy of the source. -func (t *probTree) deepcopy(src *probTree) { - if t == src { - return - } - t.probs = make([]prob, len(src.probs)) - copy(t.probs, src.probs) - t.bits = src.bits -} - -// makeProbTree initializes a probTree structure. -func makeProbTree(bits int) probTree { - if !(1 <= bits && bits <= 32) { - panic("bits outside of range [1,32]") - } - t := probTree{ - bits: byte(bits), - probs: make([]prob, 1< 0 { - c.SizeInHeader = true - } - if !c.SizeInHeader { - c.EOSMarker = true - } -} - -// Verify checks WriterConfig for errors. Verify will replace zero -// values with default values. -func (c *WriterConfig) Verify() error { - c.fill() - var err error - if c == nil { - return errors.New("lzma: WriterConfig is nil") - } - if c.Properties == nil { - return errors.New("lzma: WriterConfig has no Properties set") - } - if err = c.Properties.verify(); err != nil { - return err - } - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - if !(maxMatchLen <= c.BufSize) { - return errors.New("lzma: lookahead buffer size too small") - } - if c.SizeInHeader { - if c.Size < 0 { - return errors.New("lzma: negative size not supported") - } - } else if !c.EOSMarker { - return errors.New("lzma: EOS marker is required") - } - if err = c.Matcher.verify(); err != nil { - return err - } - - return nil -} - -// header returns the header structure for this configuration. -func (c *WriterConfig) header() header { - h := header{ - properties: *c.Properties, - dictCap: c.DictCap, - size: -1, - } - if c.SizeInHeader { - h.size = c.Size - } - return h -} - -// Writer writes an LZMA stream in the classic format. -type Writer struct { - h header - bw io.ByteWriter - buf *bufio.Writer - e *encoder -} - -// NewWriter creates a new LZMA writer for the classic format. The -// method will write the header to the underlying stream. -func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - w = &Writer{h: c.header()} - - var ok bool - w.bw, ok = lzma.(io.ByteWriter) - if !ok { - w.buf = bufio.NewWriter(lzma) - w.bw = w.buf - } - state := newState(w.h.properties) - m, err := c.Matcher.new(w.h.dictCap) - if err != nil { - return nil, err - } - dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) - if err != nil { - return nil, err - } - var flags encoderFlags - if c.EOSMarker { - flags = eosMarker - } - if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { - return nil, err - } - - if err = w.writeHeader(); err != nil { - return nil, err - } - return w, nil -} - -// NewWriter creates a new LZMA writer using the classic format. The -// function writes the header to the underlying stream. -func NewWriter(lzma io.Writer) (w *Writer, err error) { - return WriterConfig{}.NewWriter(lzma) -} - -// writeHeader writes the LZMA header into the stream. -func (w *Writer) writeHeader() error { - data, err := w.h.marshalBinary() - if err != nil { - return err - } - _, err = w.bw.(io.Writer).Write(data) - return err -} - -// Write puts data into the Writer. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.h.size >= 0 { - m := w.h.size - m -= w.e.Compressed() + int64(w.e.dict.Buffered()) - if m < 0 { - m = 0 - } - if m < int64(len(p)) { - p = p[:m] - err = ErrNoSpace - } - } - var werr error - if n, werr = w.e.Write(p); werr != nil { - err = werr - } - return n, err -} - -// Close closes the writer stream. It ensures that all data from the -// buffer will be compressed and the LZMA stream will be finished. -func (w *Writer) Close() error { - if w.h.size >= 0 { - n := w.e.Compressed() + int64(w.e.dict.Buffered()) - if n != w.h.size { - return errSize - } - } - err := w.e.Close() - if w.buf != nil { - ferr := w.buf.Flush() - if err == nil { - err = ferr - } - } - return err -} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go deleted file mode 100644 index dfaaec95b6..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzma/writer2.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "bytes" - "errors" - "io" -) - -// Writer2Config is used to create a Writer2 using parameters. -type Writer2Config struct { - // The properties for the encoding. If the it is nil the value - // {LC: 3, LP: 0, PB: 2} will be chosen. - Properties *Properties - // The capacity of the dictionary. If DictCap is zero, the value - // 8 MiB will be chosen. - DictCap int - // Size of the lookahead buffer; value 0 indicates default size - // 4096 - BufSize int - // Match algorithm - Matcher MatchAlgorithm -} - -// fill replaces zero values with default values. -func (c *Writer2Config) fill() { - if c.Properties == nil { - c.Properties = &Properties{LC: 3, LP: 0, PB: 2} - } - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } - if c.BufSize == 0 { - c.BufSize = 4096 - } -} - -// Verify checks the Writer2Config for correctness. Zero values will be -// replaced by default values. -func (c *Writer2Config) Verify() error { - c.fill() - var err error - if c == nil { - return errors.New("lzma: WriterConfig is nil") - } - if c.Properties == nil { - return errors.New("lzma: WriterConfig has no Properties set") - } - if err = c.Properties.verify(); err != nil { - return err - } - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - if !(maxMatchLen <= c.BufSize) { - return errors.New("lzma: lookahead buffer size too small") - } - if c.Properties.LC+c.Properties.LP > 4 { - return errors.New("lzma: sum of lc and lp exceeds 4") - } - if err = c.Matcher.verify(); err != nil { - return err - } - return nil -} - -// Writer2 supports the creation of an LZMA2 stream. But note that -// written data is buffered, so call Flush or Close to write data to the -// underlying writer. The Close method writes the end-of-stream marker -// to the stream. So you may be able to concatenate the output of two -// writers as long the output of the first writer has only been flushed -// but not closed. -// -// Any change to the fields Properties, DictCap must be done before the -// first call to Write, Flush or Close. -type Writer2 struct { - w io.Writer - - start *state - encoder *encoder - - cstate chunkState - ctype chunkType - - buf bytes.Buffer - lbw LimitedByteWriter -} - -// NewWriter2 creates an LZMA2 chunk sequence writer with the default -// parameters and options. -func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { - return Writer2Config{}.NewWriter2(lzma2) -} - -// NewWriter2 creates a new LZMA2 writer using the given configuration. -func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - w = &Writer2{ - w: lzma2, - start: newState(*c.Properties), - cstate: start, - ctype: start.defaultChunkType(), - } - w.buf.Grow(maxCompressed) - w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} - m, err := c.Matcher.new(c.DictCap) - if err != nil { - return nil, err - } - d, err := newEncoderDict(c.DictCap, c.BufSize, m) - if err != nil { - return nil, err - } - w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) - if err != nil { - return nil, err - } - return w, nil -} - -// written returns the number of bytes written to the current chunk -func (w *Writer2) written() int { - if w.encoder == nil { - return 0 - } - return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() -} - -// errClosed indicates that the writer is closed. -var errClosed = errors.New("lzma: writer closed") - -// Writes data to LZMA2 stream. Note that written data will be buffered. -// Use Flush or Close to ensure that data is written to the underlying -// writer. -func (w *Writer2) Write(p []byte) (n int, err error) { - if w.cstate == stop { - return 0, errClosed - } - for n < len(p) { - m := maxUncompressed - w.written() - if m <= 0 { - panic("lzma: maxUncompressed reached") - } - var q []byte - if n+m < len(p) { - q = p[n : n+m] - } else { - q = p[n:] - } - k, err := w.encoder.Write(q) - n += k - if err != nil && err != ErrLimit { - return n, err - } - if err == ErrLimit || k == m { - if err = w.flushChunk(); err != nil { - return n, err - } - } - } - return n, nil -} - -// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 -// stream. -func (w *Writer2) writeUncompressedChunk() error { - u := w.encoder.Compressed() - if u <= 0 { - return errors.New("lzma: can't write empty uncompressed chunk") - } - if u > maxUncompressed { - panic("overrun of uncompressed data limit") - } - switch w.ctype { - case cLRND: - w.ctype = cUD - default: - w.ctype = cU - } - w.encoder.state = w.start - - header := chunkHeader{ - ctype: w.ctype, - uncompressed: uint32(u - 1), - } - hdata, err := header.MarshalBinary() - if err != nil { - return err - } - if _, err = w.w.Write(hdata); err != nil { - return err - } - _, err = w.encoder.dict.CopyN(w.w, int(u)) - return err -} - -// writeCompressedChunk writes a compressed chunk to the underlying -// writer. -func (w *Writer2) writeCompressedChunk() error { - if w.ctype == cU || w.ctype == cUD { - panic("chunk type uncompressed") - } - - u := w.encoder.Compressed() - if u <= 0 { - return errors.New("writeCompressedChunk: empty chunk") - } - if u > maxUncompressed { - panic("overrun of uncompressed data limit") - } - c := w.buf.Len() - if c <= 0 { - panic("no compressed data") - } - if c > maxCompressed { - panic("overrun of compressed data limit") - } - header := chunkHeader{ - ctype: w.ctype, - uncompressed: uint32(u - 1), - compressed: uint16(c - 1), - props: w.encoder.state.Properties, - } - hdata, err := header.MarshalBinary() - if err != nil { - return err - } - if _, err = w.w.Write(hdata); err != nil { - return err - } - _, err = io.Copy(w.w, &w.buf) - return err -} - -// writes a single chunk to the underlying writer. -func (w *Writer2) writeChunk() error { - u := int(uncompressedHeaderLen + w.encoder.Compressed()) - c := headerLen(w.ctype) + w.buf.Len() - if u < c { - return w.writeUncompressedChunk() - } - return w.writeCompressedChunk() -} - -// flushChunk terminates the current chunk. The encoder will be reset -// to support the next chunk. -func (w *Writer2) flushChunk() error { - if w.written() == 0 { - return nil - } - var err error - if err = w.encoder.Close(); err != nil { - return err - } - if err = w.writeChunk(); err != nil { - return err - } - w.buf.Reset() - w.lbw.N = maxCompressed - if err = w.encoder.Reopen(&w.lbw); err != nil { - return err - } - if err = w.cstate.next(w.ctype); err != nil { - return err - } - w.ctype = w.cstate.defaultChunkType() - w.start = cloneState(w.encoder.state) - return nil -} - -// Flush writes all buffered data out to the underlying stream. This -// could result in multiple chunks to be created. -func (w *Writer2) Flush() error { - if w.cstate == stop { - return errClosed - } - for w.written() > 0 { - if err := w.flushChunk(); err != nil { - return err - } - } - return nil -} - -// Close terminates the LZMA2 stream with an EOS chunk. -func (w *Writer2) Close() error { - if w.cstate == stop { - return errClosed - } - if err := w.Flush(); err != nil { - return nil - } - // write zero byte EOS chunk - _, err := w.w.Write([]byte{0}) - if err != nil { - return err - } - w.cstate = stop - return nil -} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go deleted file mode 100644 index 4f1bb33935..0000000000 --- a/vendor/github.com/ulikunitz/xz/lzmafilter.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import ( - "errors" - "fmt" - "io" - - "github.com/ulikunitz/xz/lzma" -) - -// LZMA filter constants. -const ( - lzmaFilterID = 0x21 - lzmaFilterLen = 3 -) - -// lzmaFilter declares the LZMA2 filter information stored in an xz -// block header. -type lzmaFilter struct { - dictCap int64 -} - -// String returns a representation of the LZMA filter. -func (f lzmaFilter) String() string { - return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) -} - -// id returns the ID for the LZMA2 filter. -func (f lzmaFilter) id() uint64 { return lzmaFilterID } - -// MarshalBinary converts the lzmaFilter in its encoded representation. -func (f lzmaFilter) MarshalBinary() (data []byte, err error) { - c := lzma.EncodeDictCap(f.dictCap) - return []byte{lzmaFilterID, 1, c}, nil -} - -// UnmarshalBinary unmarshals the given data representation of the LZMA2 -// filter. -func (f *lzmaFilter) UnmarshalBinary(data []byte) error { - if len(data) != lzmaFilterLen { - return errors.New("xz: data for LZMA2 filter has wrong length") - } - if data[0] != lzmaFilterID { - return errors.New("xz: wrong LZMA2 filter id") - } - if data[1] != 1 { - return errors.New("xz: wrong LZMA2 filter size") - } - dc, err := lzma.DecodeDictCap(data[2]) - if err != nil { - return errors.New("xz: wrong LZMA2 dictionary size property") - } - - f.dictCap = dc - return nil -} - -// reader creates a new reader for the LZMA2 filter. -func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, - err error) { - - config := new(lzma.Reader2Config) - if c != nil { - config.DictCap = c.DictCap - } - dc := int(f.dictCap) - if dc < 1 { - return nil, errors.New("xz: LZMA2 filter parameter " + - "dictionary capacity overflow") - } - if dc > config.DictCap { - config.DictCap = dc - } - - fr, err = config.NewReader2(r) - if err != nil { - return nil, err - } - return fr, nil -} - -// writeCloser creates a io.WriteCloser for the LZMA2 filter. -func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, -) (fw io.WriteCloser, err error) { - config := new(lzma.Writer2Config) - if c != nil { - *config = lzma.Writer2Config{ - Properties: c.Properties, - DictCap: c.DictCap, - BufSize: c.BufSize, - Matcher: c.Matcher, - } - } - - dc := int(f.dictCap) - if dc < 1 { - return nil, errors.New("xz: LZMA2 filter parameter " + - "dictionary capacity overflow") - } - if dc > config.DictCap { - config.DictCap = dc - } - - fw, err = config.NewWriter2(w) - if err != nil { - return nil, err - } - return fw, nil -} - -// last returns true, because an LZMA2 filter must be the last filter in -// the filter list. -func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs deleted file mode 100644 index a8c612ce17..0000000000 --- a/vendor/github.com/ulikunitz/xz/make-docs +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -set -x -pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md -pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go deleted file mode 100644 index 95240135d5..0000000000 --- a/vendor/github.com/ulikunitz/xz/none-check.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import "hash" - -type noneHash struct{} - -func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } - -func (h noneHash) Sum(b []byte) []byte { return b } - -func (h noneHash) Reset() {} - -func (h noneHash) Size() int { return 0 } - -func (h noneHash) BlockSize() int { return 0 } - -func newNoneHash() hash.Hash { - return &noneHash{} -} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go deleted file mode 100644 index 7f974ffc56..0000000000 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xz supports the compression and decompression of xz files. It -// supports version 1.0.4 of the specification without the non-LZMA2 -// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt -package xz - -import ( - "bytes" - "errors" - "fmt" - "hash" - "io" - - "github.com/ulikunitz/xz/internal/xlog" - "github.com/ulikunitz/xz/lzma" -) - -// ReaderConfig defines the parameters for the xz reader. The -// SingleStream parameter requests the reader to assume that the -// underlying stream contains only a single stream. -type ReaderConfig struct { - DictCap int - SingleStream bool -} - -// Verify checks the reader parameters for Validity. Zero values will be -// replaced by default values. -func (c *ReaderConfig) Verify() error { - if c == nil { - return errors.New("xz: reader parameters are nil") - } - lc := lzma.Reader2Config{DictCap: c.DictCap} - if err := lc.Verify(); err != nil { - return err - } - return nil -} - -// Reader supports the reading of one or multiple xz streams. -type Reader struct { - ReaderConfig - - xz io.Reader - sr *streamReader -} - -// streamReader decodes a single xz stream -type streamReader struct { - ReaderConfig - - xz io.Reader - br *blockReader - newHash func() hash.Hash - h header - index []record -} - -// NewReader creates a new xz reader using the default parameters. -// The function reads and checks the header of the first XZ stream. The -// reader will process multiple streams including padding. -func NewReader(xz io.Reader) (r *Reader, err error) { - return ReaderConfig{}.NewReader(xz) -} - -// NewReader creates an xz stream reader. The created reader will be -// able to process multiple streams and padding unless a SingleStream -// has been set in the reader configuration c. -func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - r = &Reader{ - ReaderConfig: c, - xz: xz, - } - if r.sr, err = c.newStreamReader(xz); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - return r, nil -} - -var errUnexpectedData = errors.New("xz: unexpected data after stream") - -// Read reads uncompressed data from the stream. -func (r *Reader) Read(p []byte) (n int, err error) { - for n < len(p) { - if r.sr == nil { - if r.SingleStream { - data := make([]byte, 1) - _, err = io.ReadFull(r.xz, data) - if err != io.EOF { - return n, errUnexpectedData - } - return n, io.EOF - } - for { - r.sr, err = r.ReaderConfig.newStreamReader(r.xz) - if err != errPadding { - break - } - } - if err != nil { - return n, err - } - } - k, err := r.sr.Read(p[n:]) - n += k - if err != nil { - if err == io.EOF { - r.sr = nil - continue - } - return n, err - } - } - return n, nil -} - -var errPadding = errors.New("xz: padding (4 zero bytes) encountered") - -// newStreamReader creates a new xz stream reader using the given configuration -// parameters. NewReader reads and checks the header of the xz stream. -func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - data := make([]byte, HeaderLen) - if _, err := io.ReadFull(xz, data[:4]); err != nil { - return nil, err - } - if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { - return nil, errPadding - } - if _, err = io.ReadFull(xz, data[4:]); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - r = &streamReader{ - ReaderConfig: c, - xz: xz, - index: make([]record, 0, 4), - } - if err = r.h.UnmarshalBinary(data); err != nil { - return nil, err - } - xlog.Debugf("xz header %s", r.h) - if r.newHash, err = newHashFunc(r.h.flags); err != nil { - return nil, err - } - return r, nil -} - -// readTail reads the index body and the xz footer. -func (r *streamReader) readTail() error { - index, n, err := readIndexBody(r.xz, len(r.index)) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - - for i, rec := range r.index { - if rec != index[i] { - return fmt.Errorf("xz: record %d is %v; want %v", - i, rec, index[i]) - } - } - - p := make([]byte, footerLen) - if _, err = io.ReadFull(r.xz, p); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - var f footer - if err = f.UnmarshalBinary(p); err != nil { - return err - } - xlog.Debugf("xz footer %s", f) - if f.flags != r.h.flags { - return errors.New("xz: footer flags incorrect") - } - if f.indexSize != int64(n)+1 { - return errors.New("xz: index size in footer wrong") - } - return nil -} - -// Read reads actual data from the xz stream. -func (r *streamReader) Read(p []byte) (n int, err error) { - for n < len(p) { - if r.br == nil { - bh, hlen, err := readBlockHeader(r.xz) - if err != nil { - if err == errIndexIndicator { - if err = r.readTail(); err != nil { - return n, err - } - return n, io.EOF - } - return n, err - } - xlog.Debugf("block %v", *bh) - r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, - hlen, r.newHash()) - if err != nil { - return n, err - } - } - k, err := r.br.Read(p[n:]) - n += k - if err != nil { - if err == io.EOF { - r.index = append(r.index, r.br.record()) - r.br = nil - } else { - return n, err - } - } - } - return n, nil -} - -// countingReader is a reader that counts the bytes read. -type countingReader struct { - r io.Reader - n int64 -} - -// Read reads data from the wrapped reader and adds it to the n field. -func (lr *countingReader) Read(p []byte) (n int, err error) { - n, err = lr.r.Read(p) - lr.n += int64(n) - return n, err -} - -// blockReader supports the reading of a block. -type blockReader struct { - lxz countingReader - header *blockHeader - headerLen int - n int64 - hash hash.Hash - r io.Reader -} - -// newBlockReader creates a new block reader. -func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, - hlen int, hash hash.Hash) (br *blockReader, err error) { - - br = &blockReader{ - lxz: countingReader{r: xz}, - header: h, - headerLen: hlen, - hash: hash, - } - - fr, err := c.newFilterReader(&br.lxz, h.filters) - if err != nil { - return nil, err - } - if br.hash.Size() != 0 { - br.r = io.TeeReader(fr, br.hash) - } else { - br.r = fr - } - - return br, nil -} - -// uncompressedSize returns the uncompressed size of the block. -func (br *blockReader) uncompressedSize() int64 { - return br.n -} - -// compressedSize returns the compressed size of the block. -func (br *blockReader) compressedSize() int64 { - return br.lxz.n -} - -// unpaddedSize computes the unpadded size for the block. -func (br *blockReader) unpaddedSize() int64 { - n := int64(br.headerLen) - n += br.compressedSize() - n += int64(br.hash.Size()) - return n -} - -// record returns the index record for the current block. -func (br *blockReader) record() record { - return record{br.unpaddedSize(), br.uncompressedSize()} -} - -// Read reads data from the block. -func (br *blockReader) Read(p []byte) (n int, err error) { - n, err = br.r.Read(p) - br.n += int64(n) - - u := br.header.uncompressedSize - if u >= 0 && br.uncompressedSize() > u { - return n, errors.New("xz: wrong uncompressed size for block") - } - c := br.header.compressedSize - if c >= 0 && br.compressedSize() > c { - return n, errors.New("xz: wrong compressed size for block") - } - if err != io.EOF { - return n, err - } - if br.uncompressedSize() < u || br.compressedSize() < c { - return n, io.ErrUnexpectedEOF - } - - s := br.hash.Size() - k := padLen(br.lxz.n) - q := make([]byte, k+s, k+2*s) - if _, err = io.ReadFull(br.lxz.r, q); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return n, err - } - if !allZeros(q[:k]) { - return n, errors.New("xz: non-zero block padding") - } - checkSum := q[k:] - computedSum := br.hash.Sum(checkSum[s:]) - if !bytes.Equal(checkSum, computedSum) { - return n, errors.New("xz: checksum error for block") - } - return n, io.EOF -} - -func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, - err error) { - - if err = verifyFilters(f); err != nil { - return nil, err - } - - fr = r - for i := len(f) - 1; i >= 0; i-- { - fr, err = f[i].reader(fr, c) - if err != nil { - return nil, err - } - } - return fr, nil -} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go deleted file mode 100644 index 6b3a66620f..0000000000 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import ( - "errors" - "fmt" - "hash" - "io" - - "github.com/ulikunitz/xz/lzma" -) - -// WriterConfig describe the parameters for an xz writer. -type WriterConfig struct { - Properties *lzma.Properties - DictCap int - BufSize int - BlockSize int64 - // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) - CheckSum byte - // Forces NoChecksum (default: false) - NoCheckSum bool - // match algorithm - Matcher lzma.MatchAlgorithm -} - -// fill replaces zero values with default values. -func (c *WriterConfig) fill() { - if c.Properties == nil { - c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} - } - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } - if c.BufSize == 0 { - c.BufSize = 4096 - } - if c.BlockSize == 0 { - c.BlockSize = maxInt64 - } - if c.CheckSum == 0 { - c.CheckSum = CRC64 - } - if c.NoCheckSum { - c.CheckSum = None - } -} - -// Verify checks the configuration for errors. Zero values will be -// replaced by default values. -func (c *WriterConfig) Verify() error { - if c == nil { - return errors.New("xz: writer configuration is nil") - } - c.fill() - lc := lzma.Writer2Config{ - Properties: c.Properties, - DictCap: c.DictCap, - BufSize: c.BufSize, - Matcher: c.Matcher, - } - if err := lc.Verify(); err != nil { - return err - } - if c.BlockSize <= 0 { - return errors.New("xz: block size out of range") - } - if err := verifyFlags(c.CheckSum); err != nil { - return err - } - return nil -} - -// filters creates the filter list for the given parameters. -func (c *WriterConfig) filters() []filter { - return []filter{&lzmaFilter{int64(c.DictCap)}} -} - -// maxInt64 defines the maximum 64-bit signed integer. -const maxInt64 = 1<<63 - 1 - -// verifyFilters checks the filter list for the length and the right -// sequence of filters. -func verifyFilters(f []filter) error { - if len(f) == 0 { - return errors.New("xz: no filters") - } - if len(f) > 4 { - return errors.New("xz: more than four filters") - } - for _, g := range f[:len(f)-1] { - if g.last() { - return errors.New("xz: last filter is not last") - } - } - if !f[len(f)-1].last() { - return errors.New("xz: wrong last filter") - } - return nil -} - -// newFilterWriteCloser converts a filter list into a WriteCloser that -// can be used by a blockWriter. -func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { - if err = verifyFilters(f); err != nil { - return nil, err - } - fw = nopWriteCloser(w) - for i := len(f) - 1; i >= 0; i-- { - fw, err = f[i].writeCloser(fw, c) - if err != nil { - return nil, err - } - } - return fw, nil -} - -// nopWCloser implements a WriteCloser with a Close method not doing -// anything. -type nopWCloser struct { - io.Writer -} - -// Close returns nil and doesn't do anything else. -func (c nopWCloser) Close() error { - return nil -} - -// nopWriteCloser converts the Writer into a WriteCloser with a Close -// function that does nothing beside returning nil. -func nopWriteCloser(w io.Writer) io.WriteCloser { - return nopWCloser{w} -} - -// Writer compresses data written to it. It is an io.WriteCloser. -type Writer struct { - WriterConfig - - xz io.Writer - bw *blockWriter - newHash func() hash.Hash - h header - index []record - closed bool -} - -// newBlockWriter creates a new block writer writes the header out. -func (w *Writer) newBlockWriter() error { - var err error - w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) - if err != nil { - return err - } - if err = w.bw.writeHeader(w.xz); err != nil { - return err - } - return nil -} - -// closeBlockWriter closes a block writer and records the sizes in the -// index. -func (w *Writer) closeBlockWriter() error { - var err error - if err = w.bw.Close(); err != nil { - return err - } - w.index = append(w.index, w.bw.record()) - return nil -} - -// NewWriter creates a new xz writer using default parameters. -func NewWriter(xz io.Writer) (w *Writer, err error) { - return WriterConfig{}.NewWriter(xz) -} - -// NewWriter creates a new Writer using the given configuration parameters. -func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - w = &Writer{ - WriterConfig: c, - xz: xz, - h: header{c.CheckSum}, - index: make([]record, 0, 4), - } - if w.newHash, err = newHashFunc(c.CheckSum); err != nil { - return nil, err - } - data, err := w.h.MarshalBinary() - if err != nil { - return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) - } - if _, err = xz.Write(data); err != nil { - return nil, err - } - if err = w.newBlockWriter(); err != nil { - return nil, err - } - return w, nil - -} - -// Write compresses the uncompressed data provided. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.closed { - return 0, errClosed - } - for { - k, err := w.bw.Write(p[n:]) - n += k - if err != errNoSpace { - return n, err - } - if err = w.closeBlockWriter(); err != nil { - return n, err - } - if err = w.newBlockWriter(); err != nil { - return n, err - } - } -} - -// Close closes the writer and adds the footer to the Writer. Close -// doesn't close the underlying writer. -func (w *Writer) Close() error { - if w.closed { - return errClosed - } - w.closed = true - var err error - if err = w.closeBlockWriter(); err != nil { - return err - } - - f := footer{flags: w.h.flags} - if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { - return err - } - data, err := f.MarshalBinary() - if err != nil { - return err - } - if _, err = w.xz.Write(data); err != nil { - return err - } - return nil -} - -// countingWriter is a writer that counts all data written to it. -type countingWriter struct { - w io.Writer - n int64 -} - -// Write writes data to the countingWriter. -func (cw *countingWriter) Write(p []byte) (n int, err error) { - n, err = cw.w.Write(p) - cw.n += int64(n) - if err == nil && cw.n < 0 { - return n, errors.New("xz: counter overflow") - } - return -} - -// blockWriter is writes a single block. -type blockWriter struct { - cxz countingWriter - // mw combines io.WriteCloser w and the hash. - mw io.Writer - w io.WriteCloser - n int64 - blockSize int64 - closed bool - headerLen int - - filters []filter - hash hash.Hash -} - -// newBlockWriter creates a new block writer. -func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { - bw = &blockWriter{ - cxz: countingWriter{w: xz}, - blockSize: c.BlockSize, - filters: c.filters(), - hash: hash, - } - bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) - if err != nil { - return nil, err - } - if bw.hash.Size() != 0 { - bw.mw = io.MultiWriter(bw.w, bw.hash) - } else { - bw.mw = bw.w - } - return bw, nil -} - -// writeHeader writes the header. If the function is called after Close -// the commpressedSize and uncompressedSize fields will be filled. -func (bw *blockWriter) writeHeader(w io.Writer) error { - h := blockHeader{ - compressedSize: -1, - uncompressedSize: -1, - filters: bw.filters, - } - if bw.closed { - h.compressedSize = bw.compressedSize() - h.uncompressedSize = bw.uncompressedSize() - } - data, err := h.MarshalBinary() - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - bw.headerLen = len(data) - return nil -} - -// compressed size returns the amount of data written to the underlying -// stream. -func (bw *blockWriter) compressedSize() int64 { - return bw.cxz.n -} - -// uncompressedSize returns the number of data written to the -// blockWriter -func (bw *blockWriter) uncompressedSize() int64 { - return bw.n -} - -// unpaddedSize returns the sum of the header length, the uncompressed -// size of the block and the hash size. -func (bw *blockWriter) unpaddedSize() int64 { - if bw.headerLen <= 0 { - panic("xz: block header not written") - } - n := int64(bw.headerLen) - n += bw.compressedSize() - n += int64(bw.hash.Size()) - return n -} - -// record returns the record for the current stream. Call Close before -// calling this method. -func (bw *blockWriter) record() record { - return record{bw.unpaddedSize(), bw.uncompressedSize()} -} - -var errClosed = errors.New("xz: writer already closed") - -var errNoSpace = errors.New("xz: no space") - -// Write writes uncompressed data to the block writer. -func (bw *blockWriter) Write(p []byte) (n int, err error) { - if bw.closed { - return 0, errClosed - } - - t := bw.blockSize - bw.n - if int64(len(p)) > t { - err = errNoSpace - p = p[:t] - } - - var werr error - n, werr = bw.mw.Write(p) - bw.n += int64(n) - if werr != nil { - return n, werr - } - return n, err -} - -// Close closes the writer. -func (bw *blockWriter) Close() error { - if bw.closed { - return errClosed - } - bw.closed = true - if err := bw.w.Close(); err != nil { - return err - } - s := bw.hash.Size() - k := padLen(bw.cxz.n) - p := make([]byte, k+s) - bw.hash.Sum(p[k:k]) - if _, err := bw.cxz.w.Write(p); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md index aaf348521c..af2187694e 100644 --- a/vendor/github.com/ultraware/funlen/README.md +++ b/vendor/github.com/ultraware/funlen/README.md @@ -1,9 +1,47 @@ # Funlen linter -Funlen is a linter that checks for long functions. It can checks both on the number of lines and the number of statements. +Funlen is a linter that checks for long functions. It can check both on the number of lines and the number of statements. The default limits are 60 lines and 40 statements. You can configure these. -## Installation guide +## Description + +The intent for the funlen linter is to fit a function within one screen. If you need to scroll through a long function, tracing variables back to their definition or even just finding matching brackets can become difficult. + +Besides checking lines there's also a separate check for the number of statements, which gives a clearer idea of how much is actually being done in a function. + +The default values are used internally, but might to be adjusted for your specific environment. + +## Installation Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. + +# Exclude for tests + +golangci-lint offers a way to exclude linters in certain cases. More info can be found here: https://golangci-lint.run/usage/configuration/#issues-configuration. + +## Disable funlen for \_test.go files + +You can utilize the issues configuration in `.golangci.yml` to exclude the funlen linter for all test files: + +```yaml +issues: + exclude-rules: + # disable funlen for all _test.go files + - path: _test.go + linters: + - funlen +``` + +## Disable funlen only for Test funcs + +If you want to keep funlen enabled for example in helper functions in test files but disable it specifically for Test funcs, you can use the following configuration: + +```yaml +issues: + exclude-rules: + # disable funlen for test funcs + - source: "^func Test" + linters: + - funlen +``` diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go index 2ba3530027..b68ddb926f 100644 --- a/vendor/github.com/ultraware/funlen/main.go +++ b/vendor/github.com/ultraware/funlen/main.go @@ -13,7 +13,7 @@ const ( ) // Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Message { +func Run(file *ast.File, fset *token.FileSet, lineLimit int, stmtLimit int, ignoreComments bool) []Message { if lineLimit == 0 { lineLimit = defaultLineLimit } @@ -21,6 +21,8 @@ func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Messag stmtLimit = defaultStmtLimit } + cmap := ast.NewCommentMap(fset, file, file.Comments) + var msgs []Message for _, f := range file.Decls { decl, ok := f.(*ast.FuncDecl) @@ -36,7 +38,7 @@ func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Messag } if lineLimit > 0 { - if lines := getLines(fset, decl); lines > lineLimit { + if lines := getLines(fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) } } @@ -65,8 +67,26 @@ func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit } } -func getLines(fset *token.FileSet, f *ast.FuncDecl) int { // nolint: interfacer - return fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 +func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { // nolint: interfacer + var lineCount int + var commentCount int + + lineCount = fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 + + if !ignoreComments { + return lineCount + } + + for _, c := range cmap.Comments() { + // If the CommenGroup's lines are inside the function + // count how many comments are in the CommentGroup + if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && + (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { + commentCount += len(c.List) + } + } + + return lineCount - commentCount } func parseStmts(s []ast.Stmt) (total int) { diff --git a/vendor/github.com/ultraware/whitespace/README.md b/vendor/github.com/ultraware/whitespace/README.md index 2a88f13388..f99ecce36c 100644 --- a/vendor/github.com/ultraware/whitespace/README.md +++ b/vendor/github.com/ultraware/whitespace/README.md @@ -4,4 +4,6 @@ Whitespace is a linter that checks for unnecessary newlines at the start and end ## Installation guide -Whitespace is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. +To install as a standalone linter, run `go install github.com/ultraware/whitespace`. + +Whitespace is also included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. diff --git a/vendor/github.com/ultraware/whitespace/main.go b/vendor/github.com/ultraware/whitespace/main.go deleted file mode 100644 index d178ea2939..0000000000 --- a/vendor/github.com/ultraware/whitespace/main.go +++ /dev/null @@ -1,162 +0,0 @@ -package whitespace - -import ( - "go/ast" - "go/token" -) - -// Message contains a message -type Message struct { - Pos token.Position - Type MessageType - Message string -} - -// MessageType describes what should happen to fix the warning -type MessageType uint8 - -// List of MessageTypes -const ( - MessageTypeLeading MessageType = iota + 1 - MessageTypeTrailing - MessageTypeAddAfter -) - -// Settings contains settings for edge-cases -type Settings struct { - MultiIf bool - MultiFunc bool -} - -// Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, settings Settings) []Message { - var messages []Message - - for _, f := range file.Decls { - decl, ok := f.(*ast.FuncDecl) - if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo - continue - } - - vis := visitor{file.Comments, fset, nil, make(map[*ast.BlockStmt]bool), settings} - ast.Walk(&vis, decl) - - messages = append(messages, vis.messages...) - } - - return messages -} - -type visitor struct { - comments []*ast.CommentGroup - fset *token.FileSet - messages []Message - wantNewline map[*ast.BlockStmt]bool - settings Settings -} - -func (v *visitor) Visit(node ast.Node) ast.Visitor { - if node == nil { - return v - } - - if stmt, ok := node.(*ast.IfStmt); ok && v.settings.MultiIf { - checkMultiLine(v, stmt.Body, stmt.Cond) - } - - if stmt, ok := node.(*ast.FuncLit); ok && v.settings.MultiFunc { - checkMultiLine(v, stmt.Body, stmt.Type) - } - - if stmt, ok := node.(*ast.FuncDecl); ok && v.settings.MultiFunc { - checkMultiLine(v, stmt.Body, stmt.Type) - } - - if stmt, ok := node.(*ast.BlockStmt); ok { - wantNewline := v.wantNewline[stmt] - - comments := v.comments - if wantNewline { - comments = nil // Comments also count as a newline if we want a newline - } - first, last := firstAndLast(comments, v.fset, stmt.Pos(), stmt.End(), stmt.List) - - startMsg := checkStart(v.fset, stmt.Lbrace, first) - - if wantNewline && startMsg == nil { - v.messages = append(v.messages, Message{v.fset.Position(stmt.Pos()), MessageTypeAddAfter, `multi-line statement should be followed by a newline`}) - } else if !wantNewline && startMsg != nil { - v.messages = append(v.messages, *startMsg) - } - - if msg := checkEnd(v.fset, stmt.Rbrace, last); msg != nil { - v.messages = append(v.messages, *msg) - } - } - - return v -} - -func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { - start, end := posLine(v.fset, stmtStart.Pos()), posLine(v.fset, stmtStart.End()) - - if end > start { // Check only multi line conditions - v.wantNewline[body] = true - } -} - -func posLine(fset *token.FileSet, pos token.Pos) int { - return fset.Position(pos).Line -} - -func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, start, end token.Pos, stmts []ast.Stmt) (ast.Node, ast.Node) { - if len(stmts) == 0 { - return nil, nil - } - - first, last := ast.Node(stmts[0]), ast.Node(stmts[len(stmts)-1]) - - for _, c := range comments { - if posLine(fset, c.Pos()) == posLine(fset, start) || posLine(fset, c.End()) == posLine(fset, end) { - continue - } - - if c.Pos() < start || c.End() > end { - continue - } - if c.Pos() < first.Pos() { - first = c - } - if c.End() > last.End() { - last = c - } - } - - return first, last -} - -func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { - if first == nil { - return nil - } - - if posLine(fset, start)+1 < posLine(fset, first.Pos()) { - pos := fset.Position(start) - return &Message{pos, MessageTypeLeading, `unnecessary leading newline`} - } - - return nil -} - -func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { - if last == nil { - return nil - } - - if posLine(fset, end)-1 > posLine(fset, last.End()) { - pos := fset.Position(end) - return &Message{pos, MessageTypeTrailing, `unnecessary trailing newline`} - } - - return nil -} diff --git a/vendor/github.com/ultraware/whitespace/whitespace.go b/vendor/github.com/ultraware/whitespace/whitespace.go new file mode 100644 index 0000000000..350e9b7e4e --- /dev/null +++ b/vendor/github.com/ultraware/whitespace/whitespace.go @@ -0,0 +1,307 @@ +package whitespace + +import ( + "flag" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// MessageType describes what should happen to fix the warning. +type MessageType uint8 + +// List of MessageTypes. +const ( + MessageTypeRemove MessageType = iota + 1 + MessageTypeAdd +) + +// RunningMode describes the mode the linter is run in. This can be either +// native or golangci-lint. +type RunningMode uint8 + +const ( + RunningModeNative RunningMode = iota + RunningModeGolangCI +) + +// Message contains a message and diagnostic information. +type Message struct { + // Diagnostic is what position the diagnostic should be put at. This isn't + // always the same as the fix start, f.ex. when we fix trailing newlines we + // put the diagnostic at the right bracket but we fix between the end of the + // last statement and the bracket. + Diagnostic token.Pos + + // FixStart is the span start of the fix. + FixStart token.Pos + + // FixEnd is the span end of the fix. + FixEnd token.Pos + + // LineNumbers represent the actual line numbers in the file. This is set + // when finding the diagnostic to make it easier to suggest fixes in + // golangci-lint. + LineNumbers []int + + // MessageType represents the type of message it is. + MessageType MessageType + + // Message is the diagnostic to show. + Message string +} + +// Settings contains settings for edge-cases. +type Settings struct { + Mode RunningMode + MultiIf bool + MultiFunc bool +} + +// NewAnalyzer creates a new whitespace analyzer. +func NewAnalyzer(settings *Settings) *analysis.Analyzer { + if settings == nil { + settings = &Settings{} + } + + return &analysis.Analyzer{ + Name: "whitespace", + Doc: "Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc.", + Flags: flags(settings), + Run: func(p *analysis.Pass) (any, error) { + Run(p, settings) + return nil, nil + }, + RunDespiteErrors: true, + } +} + +func flags(settings *Settings) flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.BoolVar(&settings.MultiIf, "multi-if", settings.MultiIf, "Check that multi line if-statements have a leading newline") + flags.BoolVar(&settings.MultiFunc, "multi-func", settings.MultiFunc, "Check that multi line functions have a leading newline") + + return *flags +} + +func Run(pass *analysis.Pass, settings *Settings) []Message { + messages := []Message{} + + for _, file := range pass.Files { + filename := pass.Fset.Position(file.Pos()).Filename + if !strings.HasSuffix(filename, ".go") { + continue + } + + fileMessages := runFile(file, pass.Fset, *settings) + + if settings.Mode == RunningModeGolangCI { + messages = append(messages, fileMessages...) + continue + } + + for _, message := range fileMessages { + pass.Report(analysis.Diagnostic{ + Pos: message.Diagnostic, + Category: "whitespace", + Message: message.Message, + SuggestedFixes: []analysis.SuggestedFix{ + { + TextEdits: []analysis.TextEdit{ + { + Pos: message.FixStart, + End: message.FixEnd, + NewText: []byte("\n"), + }, + }, + }, + }, + }) + } + } + + return messages +} + +func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { + var messages []Message + + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + vis := visitor{file.Comments, fset, nil, make(map[*ast.BlockStmt]bool), settings} + ast.Walk(&vis, decl) + + messages = append(messages, vis.messages...) + } + + return messages +} + +type visitor struct { + comments []*ast.CommentGroup + fset *token.FileSet + messages []Message + wantNewline map[*ast.BlockStmt]bool + settings Settings +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + return v + } + + if stmt, ok := node.(*ast.IfStmt); ok && v.settings.MultiIf { + checkMultiLine(v, stmt.Body, stmt.Cond) + } + + if stmt, ok := node.(*ast.FuncLit); ok && v.settings.MultiFunc { + checkMultiLine(v, stmt.Body, stmt.Type) + } + + if stmt, ok := node.(*ast.FuncDecl); ok && v.settings.MultiFunc { + checkMultiLine(v, stmt.Body, stmt.Type) + } + + if stmt, ok := node.(*ast.BlockStmt); ok { + wantNewline := v.wantNewline[stmt] + + comments := v.comments + if wantNewline { + comments = nil // Comments also count as a newline if we want a newline + } + + opening, first, last := firstAndLast(comments, v.fset, stmt) + startMsg := checkStart(v.fset, opening, first) + + if wantNewline && startMsg == nil && len(stmt.List) >= 1 { + v.messages = append(v.messages, Message{ + Diagnostic: opening, + FixStart: stmt.List[0].Pos(), + FixEnd: stmt.List[0].Pos(), + LineNumbers: []int{v.fset.PositionFor(stmt.List[0].Pos(), false).Line}, + MessageType: MessageTypeAdd, + Message: "multi-line statement should be followed by a newline", + }) + } else if !wantNewline && startMsg != nil { + v.messages = append(v.messages, *startMsg) + } + + if msg := checkEnd(v.fset, stmt.Rbrace, last); msg != nil { + v.messages = append(v.messages, *msg) + } + } + + return v +} + +func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { + start, end := posLine(v.fset, stmtStart.Pos()), posLine(v.fset, stmtStart.End()) + + if end > start { // Check only multi line conditions + v.wantNewline[body] = true + } +} + +func posLine(fset *token.FileSet, pos token.Pos) int { + return fset.PositionFor(pos, false).Line +} + +func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.BlockStmt) (token.Pos, ast.Node, ast.Node) { + openingPos := stmt.Lbrace + 1 + + if len(stmt.List) == 0 { + return openingPos, nil, nil + } + + first, last := ast.Node(stmt.List[0]), ast.Node(stmt.List[len(stmt.List)-1]) + + for _, c := range comments { + // If the comment is on the same line as the opening pos (initially the + // left bracket) but it starts after the pos the comment must be after + // the bracket and where that comment ends should be considered where + // the fix should start. + if posLine(fset, c.Pos()) == posLine(fset, openingPos) && c.Pos() > openingPos { + if posLine(fset, c.End()) != posLine(fset, openingPos) { + // This is a multiline comment that spans from the `LBrace` line + // to a line further down. This should always be seen as ok! + first = c + } else { + openingPos = c.End() + } + } + + if posLine(fset, c.Pos()) == posLine(fset, stmt.Pos()) || posLine(fset, c.End()) == posLine(fset, stmt.End()) { + continue + } + + if c.Pos() < stmt.Pos() || c.End() > stmt.End() { + continue + } + + if c.Pos() < first.Pos() { + first = c + } + + if c.End() > last.End() { + last = c + } + } + + return openingPos, first, last +} + +func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { + if first == nil { + return nil + } + + if posLine(fset, start)+1 < posLine(fset, first.Pos()) { + return &Message{ + Diagnostic: start, + FixStart: start, + FixEnd: first.Pos(), + LineNumbers: linesBetween(fset, start, first.Pos()), + MessageType: MessageTypeRemove, + Message: "unnecessary leading newline", + } + } + + return nil +} + +func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { + if last == nil { + return nil + } + + if posLine(fset, end)-1 > posLine(fset, last.End()) { + return &Message{ + Diagnostic: end, + FixStart: last.End(), + FixEnd: end, + LineNumbers: linesBetween(fset, last.End(), end), + MessageType: MessageTypeRemove, + Message: "unnecessary trailing newline", + } + } + + return nil +} + +func linesBetween(fset *token.FileSet, a, b token.Pos) []int { + lines := []int{} + aPosition := fset.PositionFor(a, false) + bPosition := fset.PositionFor(b, false) + + for i := aPosition.Line + 1; i < bPosition.Line; i++ { + lines = append(lines, i) + } + + return lines +} diff --git a/vendor/github.com/uudashr/gocognit/README.md b/vendor/github.com/uudashr/gocognit/README.md index 1e028c7897..57f31cf740 100644 --- a/vendor/github.com/uudashr/gocognit/README.md +++ b/vendor/github.com/uudashr/gocognit/README.md @@ -1,6 +1,6 @@ [![GoDoc](https://godoc.org/github.com/uudashr/gocognit?status.svg)](https://godoc.org/github.com/uudashr/gocognit) # Gocognit -Gocognit calculates cognitive complexities of functions in Go source code. A measurement of how hard does the code is intuitively to understand. +Gocognit calculates cognitive complexities of functions (and methods) in Go source code. A measurement of how hard does the code is intuitively to understand. ## Understanding the complexity @@ -37,10 +37,10 @@ func GetWords(number int) string { As you see above codes are the same, but the second code are easier to understand, that is why the cognitive complexity score are lower compare to the first one. -## Comparison with cyclometic complexity +## Comparison with cyclomatic complexity ### Example 1 -#### Cyclometic complexity +#### Cyclomatic complexity ```go func GetWords(number int) string { // +1 switch number { @@ -160,16 +160,40 @@ $ go get github.com/uudashr/gocognit/cmd/gocognit ``` $ gocognit Calculate cognitive complexities of Go functions. + Usage: - gocognit [flags] ... + + gocognit [ ...] ... + Flags: - -over N show functions with complexity > N only and - return exit code 1 if the set is non-empty - -top N show the top N most complex functions only - -avg show the average complexity over all functions, - not depending on whether -over or -top are set -The output fields for each line are: - + + -over N show functions with complexity > N only + and return exit code 1 if the output is non-empty + -top N show the top N most complex functions only + -avg show the average complexity over all functions, + not depending on whether -over or -top are set + -json encode the output as JSON + -f format string the format to use + (default "{{.PkgName}}.{{.FuncName}}:{{.Complexity}}:{{.Pos}}") + +The (default) output fields for each line are: + + + +The (default) output fields for each line are: + + {{.Complexity}} {{.PkgName}} {{.FuncName}} {{.Pos}} + +or equal to + +The struct being passed to the template is: + + type Stat struct { + PkgName string + FuncName string + Complexity int + Pos token.Position + } ``` Examples: @@ -180,6 +204,7 @@ $ gocognit main.go $ gocognit -top 10 src/ $ gocognit -over 25 docker $ gocognit -avg . +$ gocognit -ignore "_test|testdata" . ``` The output fields for each line are: @@ -187,6 +212,15 @@ The output fields for each line are: ``` +## Ignore individual functions +Ignore individual functions by specifying `gocognit:ignore` directive. +```go +//gocognit:ignore +func IgnoreMe() { + // ... +} +``` + ## Related project - [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. - [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. \ No newline at end of file diff --git a/vendor/github.com/uudashr/gocognit/gocognit.go b/vendor/github.com/uudashr/gocognit/gocognit.go index 1d539ee780..2bba2eb4f0 100644 --- a/vendor/github.com/uudashr/gocognit/gocognit.go +++ b/vendor/github.com/uudashr/gocognit/gocognit.go @@ -26,6 +26,11 @@ func (s Stat) String() string { func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { for _, decl := range f.Decls { if fn, ok := decl.(*ast.FuncDecl); ok { + d := parseDirective(fn.Doc) + if d.Ignore { + continue + } + stats = append(stats, Stat{ PkgName: f.Name.Name, FuncName: funcName(fn), @@ -37,6 +42,24 @@ func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { return stats } +type directive struct { + Ignore bool +} + +func parseDirective(doc *ast.CommentGroup) directive { + if doc == nil { + return directive{} + } + + for _, c := range doc.List { + if c.Text == "//gocognit:ignore" { + return directive{Ignore: true} + } + } + + return directive{} +} + // funcName returns the name representation of a function or method: // "(Type).Name" for methods or simply "Name" for functions. func funcName(fn *ast.FuncDecl) string { @@ -272,7 +295,7 @@ func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { } func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { - if (n.Op == token.LAND || n.Op == token.LOR) && !v.isCalculated(n) { + if isBinaryLogicalOp(n.Op) && !v.isCalculated(n) { ops := v.collectBinaryOps(n) var lastOp token.Token @@ -299,15 +322,10 @@ func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { v.markCalculated(exp) - switch exp := exp.(type) { - case *ast.BinaryExpr: + if exp, ok := exp.(*ast.BinaryExpr); ok { return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) - case *ast.ParenExpr: - // interest only on what inside paranthese - return v.collectBinaryOps(exp.X) - default: - return []token.Token{} } + return nil } func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { @@ -320,16 +338,18 @@ func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { var out []token.Token - if len(x) != 0 { - out = append(out, x...) - } - out = append(out, op) - if len(y) != 0 { - out = append(out, y...) + out = append(out, x...) + if isBinaryLogicalOp(op) { + out = append(out, op) } + out = append(out, y...) return out } +func isBinaryLogicalOp(op token.Token) bool { + return op == token.LAND || op == token.LOR +} + const Doc = `Find complex function using cognitive complexity calculation. The gocognit analysis reports functions or methods which the complexity is over @@ -359,13 +379,19 @@ func run(pass *analysis.Pass) (interface{}, error) { (*ast.FuncDecl)(nil), } inspect.Preorder(nodeFilter, func(n ast.Node) { - fnDecl := n.(*ast.FuncDecl) + funcDecl := n.(*ast.FuncDecl) + + d := parseDirective(funcDecl.Doc) + if d.Ignore { + return + } + + fnName := funcName(funcDecl) - fnName := funcName(fnDecl) - fnComplexity := Complexity(fnDecl) + fnComplexity := Complexity(funcDecl) if fnComplexity > over { - pass.Reportf(fnDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over) + pass.Reportf(funcDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over) } }) diff --git a/vendor/github.com/vmihailenco/msgpack/.travis.yml b/vendor/github.com/vmihailenco/msgpack/.travis.yml new file mode 100644 index 0000000000..6aa1fc312c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/check.v1 diff --git a/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md new file mode 100644 index 0000000000..9a4f38a93c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md @@ -0,0 +1,24 @@ +## 3.4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/LICENSE b/vendor/github.com/vmihailenco/msgpack/LICENSE similarity index 100% rename from vendor/github.com/vmihailenco/msgpack/v4/LICENSE rename to vendor/github.com/vmihailenco/msgpack/LICENSE diff --git a/vendor/github.com/vmihailenco/msgpack/Makefile b/vendor/github.com/vmihailenco/msgpack/Makefile new file mode 100644 index 0000000000..29660d2bc0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/Makefile @@ -0,0 +1,6 @@ +all: + go test ./... + env GOOS=linux GOARCH=386 go test ./... + go test ./... -short -race + go test ./... -run=nothing -bench=. -benchmem + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/README.md b/vendor/github.com/vmihailenco/msgpack/README.md new file mode 100644 index 0000000000..0c75ae16e2 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/README.md @@ -0,0 +1,69 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) +[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) + +Supports: +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine *datastore.Key and datastore.Cursor. +- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. +- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. +- Renaming fields via `msgpack:"my_field_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). +- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). +- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.StructAsArray) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). +- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. +- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). + +API docs: https://godoc.org/github.com/vmihailenco/msgpack. +Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. + +## Installation + +Install: + +```shell +go get -u github.com/vmihailenco/msgpack +``` + +## Quickstart + +```go +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## Benchmark + +``` +BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op +BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op +BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op +BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op +BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/vmihailenco/msgpack/v4/appengine.go b/vendor/github.com/vmihailenco/msgpack/appengine.go similarity index 100% rename from vendor/github.com/vmihailenco/msgpack/v4/appengine.go rename to vendor/github.com/vmihailenco/msgpack/appengine.go diff --git a/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/codes/codes.go similarity index 100% rename from vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go rename to vendor/github.com/vmihailenco/msgpack/codes/codes.go diff --git a/vendor/github.com/vmihailenco/msgpack/decode.go b/vendor/github.com/vmihailenco/msgpack/decode.go new file mode 100644 index 0000000000..2edfa9c372 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode.go @@ -0,0 +1,547 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +const bytesAllocLimit = 1024 * 1024 // 1mb + +type bufReader interface { + io.Reader + io.ByteScanner +} + +func newBufReader(r io.Reader) bufReader { + if br, ok := r.(bufReader); ok { + return br + } + return bufio.NewReader(r) +} + +func makeBuffer() []byte { + return make([]byte, 0, 64) +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + useLoose bool + useJSONTag bool + + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + buf: makeBuffer(), + } + d.resetReader(r) + return d +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(flag bool) *Decoder { + d.useLoose = flag + return d +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(v bool) *Decoder { + d.useJSONTag = v + return d +} + +func (d *Decoder) Reset(r io.Reader) error { + d.resetReader(r) + return nil +} + +func (d *Decoder) resetReader(r io.Reader) { + reader := newBufReader(r) + d.r = reader + d.s = reader +} + +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.useLoose { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } else if codes.IsFixedMap(c) { + return d.skipMap(c) + } else if codes.IsFixedArray(c) { + return d.skipSlice(c) + } else if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + buf, err := readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + d.buf = buf + if d.rec != nil { + d.rec = append(d.rec, buf...) + } + return buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + if n <= bytesAllocLimit { + b = make([]byte, n) + } else { + b = make([]byte, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := n - len(b) + if alloc > bytesAllocLimit { + alloc = bytesAllocLimit + } + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return nil, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_map.go b/vendor/github.com/vmihailenco/msgpack/decode_map.go new file mode 100644 index 0000000000..2a3d3ecb28 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_map.go @@ -0,0 +1,339 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const mapElemsAllocLimit = 1e4 + +var mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) +var mapStringStringType = mapStringStringPtrType.Elem() + +var mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) +var mapStringInterfaceType = mapStringInterfacePtrType.Elem() + +var errInvalidCode = errors.New("invalid code") + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) || codes.IsBin(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, mapElemsAllocLimit)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.useJSONTag { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + if f := fields.Table[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else { + if err := d.Skip(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_number.go b/vendor/github.com/vmihailenco/msgpack/decode_number.go new file mode 100644 index 0000000000..15019cc97a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_query.go b/vendor/github.com/vmihailenco/msgpack/decode_query.go new file mode 100644 index 0000000000..d680be80c0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/decode_slice.go new file mode 100644 index 0000000000..7d43ec610d --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_slice.go @@ -0,0 +1,193 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const sliceElemsAllocLimit = 1e4 + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := setStringsCap(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func setStringsCap(s []string, n int) []string { + if n > sliceElemsAllocLimit { + n = sliceElemsAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceElemsAllocLimit { + diff = sliceElemsAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceElemsAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_string.go b/vendor/github.com/vmihailenco/msgpack/decode_string.go new file mode 100644 index 0000000000..5402022ee9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_string.go @@ -0,0 +1,175 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + if err = mustSet(v); err != nil { + return err + } + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_value.go b/vendor/github.com/vmihailenco/msgpack/decode_value.go new file mode 100644 index 0000000000..d458de8600 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_value.go @@ -0,0 +1,236 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + decoder, ok := typDecMap[typ] + if ok { + return decoder + } + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + switch elem.Kind() { + case reflect.Uint8: + return decodeBytesValue + } + switch elem { + case stringType: + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.extLen == 0 || d.extLen == 1 { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + if d.extLen != 0 { + b, err := d.readN(d.extLen) + if err != nil { + return err + } + d.rec = b + } else { + d.rec = makeBuffer() + if err := d.Skip(); err != nil { + return err + } + } + + unmarshaler := v.Interface().(Unmarshaler) + err := unmarshaler.UnmarshalMsgpack(d.rec) + d.rec = nil + return err +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode.go b/vendor/github.com/vmihailenco/msgpack/encode.go new file mode 100644 index 0000000000..08ca7dec6d --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode.go @@ -0,0 +1,177 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +type writer interface { + io.Writer + WriteByte(byte) error + WriteString(string) (int, error) +} + +type byteWriter struct { + io.Writer + + buf []byte + bootstrap [64]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := &byteWriter{ + Writer: w, + } + bw.buf = bw.bootstrap[:] + return bw +} + +func (w *byteWriter) WriteByte(c byte) error { + w.buf = w.buf[:1] + w.buf[0] = c + _, err := w.Write(w.buf) + return err +} + +func (w *byteWriter) WriteString(s string) (int, error) { + w.buf = append(w.buf[:0], s...) + return w.Write(w.buf) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(v) + return buf.Bytes(), err +} + +type Encoder struct { + w writer + + buf []byte + // timeBuf is lazily allocated in encodeTime() to + // avoid allocations when time.Time value are encoded + // + // buf can't be reused for time encoding, as buf is used + // to encode msgpack extLen + timeBuf []byte + + sortMapKeys bool + structAsArray bool + useJSONTag bool + useCompact bool +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + bw, ok := w.(writer) + if !ok { + bw = newByteWriter(w) + } + return &Encoder{ + w: bw, + buf: make([]byte, 9), + } +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(flag bool) *Encoder { + e.sortMapKeys = flag + return e +} + +// StructAsArray causes the Encoder to encode Go structs as MessagePack arrays. +func (e *Encoder) StructAsArray(flag bool) *Encoder { + e.structAsArray = flag + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(flag bool) *Encoder { + e.useJSONTag = flag + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(flag bool) *Encoder { + e.useCompact = flag + return e +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.WriteString(s) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_map.go b/vendor/github.com/vmihailenco/msgpack/encode_map.go new file mode 100644 index 0000000000..a87c4075fe --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.sortMapKeys { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.sortMapKeys { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.useJSONTag { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.structAsArray || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_number.go b/vendor/github.com/vmihailenco/msgpack/encode_number.go new file mode 100644 index 0000000000..dd7db6fdd8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_number.go @@ -0,0 +1,230 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.useCompact { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.useCompact { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(uint64(n)) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(int64(n)) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/encode_slice.go new file mode 100644 index 0000000000..5ddbd63117 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_slice.go @@ -0,0 +1,124 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_value.go b/vendor/github.com/vmihailenco/msgpack/encode_value.go new file mode 100644 index 0000000000..b46ab02a18 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_value.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if encoder, ok := typEncMap[typ]; ok { + return encoder + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + + kind := typ.Kind() + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/ext.go b/vendor/github.com/vmihailenco/msgpack/ext.go new file mode 100644 index 0000000000..2f9dfea3a2 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/ext.go @@ -0,0 +1,244 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/codes" +) + +type extInfo struct { + Type reflect.Type + Decoder decoderFunc +} + +var extTypes = make(map[int8]extInfo) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if enc != nil { + typEncMap[typ] = makeExtEncoder(id, enc) + } + if dec != nil { + extTypes[id] = extInfo{ + Type: typ, + Decoder: dec, + } + typDecMap[typ] = makeExtDecoder(id, dec) + } +} + +func (e *Encoder) EncodeExtHeader(typeId int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeId)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeId int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeId, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeId int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if int8(id) != typeId { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", int8(id), typeId) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) decodeExtHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeId, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeId), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeId int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.decodeExtHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extId, extLen, err := d.decodeExtHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extId] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extId) + } + + v := reflect.New(info.Type) + + d.extLen = extLen + err = info.Decoder(d, v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go b/vendor/github.com/vmihailenco/msgpack/msgpack.go similarity index 100% rename from vendor/github.com/vmihailenco/msgpack/v4/msgpack.go rename to vendor/github.com/vmihailenco/msgpack/msgpack.go diff --git a/vendor/github.com/vmihailenco/msgpack/tag.go b/vendor/github.com/vmihailenco/msgpack/tag.go new file mode 100644 index 0000000000..48e6f942c0 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/tag.go @@ -0,0 +1,42 @@ +package msgpack + +import ( + "strings" +) + +type tagOptions string + +func (o tagOptions) Get(name string) (string, bool) { + s := string(o) + for len(s) > 0 { + var next string + idx := strings.IndexByte(s, ',') + if idx >= 0 { + s, next = s[:idx], s[idx+1:] + } + if strings.HasPrefix(s, name) { + return s[len(name):], true + } + s = next + } + return "", false +} + +func (o tagOptions) Contains(name string) bool { + _, ok := o.Get(name) + return ok +} + +func parseTag(tag string) (string, tagOptions) { + if idx := strings.IndexByte(tag, ','); idx != -1 { + name := tag[:idx] + if strings.IndexByte(name, ':') == -1 { + return name, tagOptions(tag[idx+1:]) + } + } + + if strings.IndexByte(tag, ':') == -1 { + return tag, "" + } + return "", tagOptions(tag) +} diff --git a/vendor/github.com/vmihailenco/msgpack/time.go b/vendor/github.com/vmihailenco/msgpack/time.go new file mode 100644 index 0000000000..3cf01e4c82 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/time.go @@ -0,0 +1,149 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +var timeExtId int8 = -1 + +func init() { + timeType := reflect.TypeOf((*time.Time)(nil)).Elem() + registerExt(timeExtId, timeType, encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtId)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + if e.timeBuf == nil { + e.timeBuf = make([]byte, 12) + } + + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } else { + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], uint64(secs)) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + v.Set(reflect.ValueOf(tm)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/types.go b/vendor/github.com/vmihailenco/msgpack/types.go new file mode 100644 index 0000000000..6a1bf7f913 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/types.go @@ -0,0 +1,310 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() +var customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() + +var marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +type encoderFunc func(*Encoder, reflect.Value) error +type decoderFunc func(*Decoder, reflect.Value) error + +var typEncMap = make(map[reflect.Type]encoderFunc) +var typDecMap = make(map[reflect.Type]decoderFunc) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typEncMap[typ] = enc + } + if dec != nil { + typDecMap[typ] = dec + } +} + +//------------------------------------------------------------------------------ + +var structs = newStructCache(false) +var jsonStructs = newStructCache(true) + +type structCache struct { + mu sync.RWMutex + m map[reflect.Type]*fields + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + m: make(map[reflect.Type]*fields), + + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + m.mu.RLock() + fs, ok := m.m[typ] + m.mu.RUnlock() + if ok { + return fs + } + + m.mu.Lock() + fs, ok = m.m[typ] + if !ok { + fs = getFields(typ, m.useJSONTag) + m.m[typ] = fs + } + m.mu.Unlock() + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) value(v reflect.Value) reflect.Value { + return fieldByIndex(v, f.index) +} + +func (f *field) Omit(strct reflect.Value) bool { + return f.omitEmpty && isEmptyValue(f.value(strct)) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + return f.encoder(e, f.value(strct)) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + return f.decoder(d, f.value(strct)) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Table map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(numField int) *fields { + return &fields{ + Table: make(map[string]*field, numField), + List: make([]*field, 0, numField), + } +} + +func (fs *fields) Add(field *field) { + fs.Table[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + numField := typ.NumField() + fs := newFields(numField) + + var omitEmpty bool + for i := 0; i < numField; i++ { + f := typ.Field(i) + + tag := f.Tag.Get("msgpack") + if useJSONTag && tag == "" { + tag = f.Tag.Get("json") + } + + name, opt := parseTag(tag) + if name == "-" { + continue + } + + if f.Name == "_msgpack" { + if opt.Contains("asArray") { + fs.AsArray = true + } + if opt.Contains("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: name, + index: f.Index, + omitEmpty: omitEmpty || opt.Contains("omitempty"), + encoder: getEncoder(f.Type), + decoder: getDecoder(f.Type), + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !opt.Contains("noinline") { + inline := opt.Contains("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = autoinlineFields(fs, f.Type, field, useJSONTag) + } + if inline { + fs.Table[field.name] = field + continue + } + } + + fs.Add(field) + } + return fs +} + +var encodeStructValuePtr uintptr +var decodeStructValuePtr uintptr + +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func autoinlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + for i, x := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(x) + } + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml b/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml deleted file mode 100644 index 98d6cb7797..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml +++ /dev/null @@ -1,12 +0,0 @@ -run: - concurrency: 8 - deadline: 5m - tests: false -linters: - enable-all: true - disable: - - gochecknoglobals - - gocognit - - godox - - wsl - - funlen diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml deleted file mode 100644 index b35bf5484e..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/vmihailenco/msgpack - -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md deleted file mode 100644 index fac97090e4..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md +++ /dev/null @@ -1,24 +0,0 @@ -## v4 - -- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. -- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. -- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. - -## v3.3 - -- `msgpack:",inline"` tag is restored to force inlining structs. - -## v3.2 - -- Decoding extension types returns pointer to the value instead of the value. Fixes #153 - -## v3 - -- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. -- Msgpack maps are decoded into map[string]interface{} by default. -- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. -- Embedded structs are automatically inlined where possible. -- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. -- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. -- DecodeInterface can now return int8/16/32 and uint8/16/32. -- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/Makefile b/vendor/github.com/vmihailenco/msgpack/v4/Makefile deleted file mode 100644 index 57914e333a..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - golangci-lint run diff --git a/vendor/github.com/vmihailenco/msgpack/v4/README.md b/vendor/github.com/vmihailenco/msgpack/v4/README.md deleted file mode 100644 index a5b1004e08..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# MessagePack encoding for Golang - -[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) -[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) - -Supports: -- Primitives, arrays, maps, structs, time.Time and interface{}. -- Appengine *datastore.Key and datastore.Cursor. -- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. -- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. -- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. -- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). -- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). -- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseArrayForStructs) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). -- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. -- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). - -API docs: https://godoc.org/github.com/vmihailenco/msgpack. -Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. - -## Installation - -This project uses [Go Modules](https://github.com/golang/go/wiki/Modules) and semantic import versioning since v4: - -``` shell -go mod init github.com/my/repo -go get github.com/vmihailenco/msgpack/v4 -``` - -## Quickstart - -``` go -import "github.com/vmihailenco/msgpack/v4" - -func ExampleMarshal() { - type Item struct { - Foo string - } - - b, err := msgpack.Marshal(&Item{Foo: "bar"}) - if err != nil { - panic(err) - } - - var item Item - err = msgpack.Unmarshal(b, &item) - if err != nil { - panic(err) - } - fmt.Println(item.Foo) - // Output: bar -} -``` - -## Benchmark - -``` -BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op -BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op -BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op -BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op -BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op -``` - -## Howto - -Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. - -## See also - -- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) -- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode.go b/vendor/github.com/vmihailenco/msgpack/v4/decode.go deleted file mode 100644 index 1711675e9a..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode.go +++ /dev/null @@ -1,617 +0,0 @@ -package msgpack - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sync" - "time" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -const ( - looseIfaceFlag uint32 = 1 << iota - decodeUsingJSONFlag - disallowUnknownFieldsFlag -) - -const ( - bytesAllocLimit = 1e6 // 1mb - sliceAllocLimit = 1e4 - maxMapSize = 1e6 -) - -type bufReader interface { - io.Reader - io.ByteScanner -} - -//------------------------------------------------------------------------------ - -var decPool = sync.Pool{ - New: func() interface{} { - return NewDecoder(nil) - }, -} - -// Unmarshal decodes the MessagePack-encoded data and stores the result -// in the value pointed to by v. -func Unmarshal(data []byte, v interface{}) error { - dec := decPool.Get().(*Decoder) - - if r, ok := dec.r.(*bytes.Reader); ok { - r.Reset(data) - } else { - dec.Reset(bytes.NewReader(data)) - } - err := dec.Decode(v) - - decPool.Put(dec) - - return err -} - -// A Decoder reads and decodes MessagePack values from an input stream. -type Decoder struct { - r io.Reader - s io.ByteScanner - buf []byte - - extLen int - rec []byte // accumulates read data if not nil - - intern []string - flags uint32 - decodeMapFunc func(*Decoder) (interface{}, error) -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read data from r -// beyond the MessagePack values requested. Buffering can be disabled -// by passing a reader that implements io.ByteScanner interface. -func NewDecoder(r io.Reader) *Decoder { - d := new(Decoder) - d.Reset(r) - return d -} - -// Reset discards any buffered data, resets all state, and switches the buffered -// reader to read from r. -func (d *Decoder) Reset(r io.Reader) { - if br, ok := r.(bufReader); ok { - d.r = br - d.s = br - } else if br, ok := d.r.(*bufio.Reader); ok { - br.Reset(r) - } else { - br := bufio.NewReader(r) - d.r = br - d.s = br - } - - if d.intern != nil { - d.intern = d.intern[:0] - } - - //TODO: - //d.useLoose = false - //d.useJSONTag = false - //d.disallowUnknownFields = false - //d.decodeMapFunc = nil -} - -func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { - d.decodeMapFunc = fn -} - -// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose -// to decode msgpack value into Go interface{}. -func (d *Decoder) UseDecodeInterfaceLoose(on bool) *Decoder { - if on { - d.flags |= looseIfaceFlag - } else { - d.flags &= ^looseIfaceFlag - } - return d -} - -// UseJSONTag causes the Decoder to use json struct tag as fallback option -// if there is no msgpack tag. -func (d *Decoder) UseJSONTag(on bool) *Decoder { - if on { - d.flags |= decodeUsingJSONFlag - } else { - d.flags &= ^decodeUsingJSONFlag - } - return d -} - -// DisallowUnknownFields causes the Decoder to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -func (d *Decoder) DisallowUnknownFields() { - if true { - d.flags |= disallowUnknownFieldsFlag - } else { - d.flags &= ^disallowUnknownFieldsFlag - } -} - -// Buffered returns a reader of the data remaining in the Decoder's buffer. -// The reader is valid until the next call to Decode. -func (d *Decoder) Buffered() io.Reader { - return d.r -} - -//nolint:gocyclo -func (d *Decoder) Decode(v interface{}) error { - var err error - switch v := v.(type) { - case *string: - if v != nil { - *v, err = d.DecodeString() - return err - } - case *[]byte: - if v != nil { - return d.decodeBytesPtr(v) - } - case *int: - if v != nil { - *v, err = d.DecodeInt() - return err - } - case *int8: - if v != nil { - *v, err = d.DecodeInt8() - return err - } - case *int16: - if v != nil { - *v, err = d.DecodeInt16() - return err - } - case *int32: - if v != nil { - *v, err = d.DecodeInt32() - return err - } - case *int64: - if v != nil { - *v, err = d.DecodeInt64() - return err - } - case *uint: - if v != nil { - *v, err = d.DecodeUint() - return err - } - case *uint8: - if v != nil { - *v, err = d.DecodeUint8() - return err - } - case *uint16: - if v != nil { - *v, err = d.DecodeUint16() - return err - } - case *uint32: - if v != nil { - *v, err = d.DecodeUint32() - return err - } - case *uint64: - if v != nil { - *v, err = d.DecodeUint64() - return err - } - case *bool: - if v != nil { - *v, err = d.DecodeBool() - return err - } - case *float32: - if v != nil { - *v, err = d.DecodeFloat32() - return err - } - case *float64: - if v != nil { - *v, err = d.DecodeFloat64() - return err - } - case *[]string: - return d.decodeStringSlicePtr(v) - case *map[string]string: - return d.decodeMapStringStringPtr(v) - case *map[string]interface{}: - return d.decodeMapStringInterfacePtr(v) - case *time.Duration: - if v != nil { - vv, err := d.DecodeInt64() - *v = time.Duration(vv) - return err - } - case *time.Time: - if v != nil { - *v, err = d.DecodeTime() - return err - } - } - - vv := reflect.ValueOf(v) - if !vv.IsValid() { - return errors.New("msgpack: Decode(nil)") - } - if vv.Kind() != reflect.Ptr { - return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) - } - vv = vv.Elem() - if !vv.IsValid() { - return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) - } - return d.DecodeValue(vv) -} - -func (d *Decoder) DecodeMulti(v ...interface{}) error { - for _, vv := range v { - if err := d.Decode(vv); err != nil { - return err - } - } - return nil -} - -func (d *Decoder) decodeInterfaceCond() (interface{}, error) { - if d.flags&looseIfaceFlag != 0 { - return d.DecodeInterfaceLoose() - } - return d.DecodeInterface() -} - -func (d *Decoder) DecodeValue(v reflect.Value) error { - decode := getDecoder(v.Type()) - return decode(d, v) -} - -func (d *Decoder) DecodeNil() error { - c, err := d.readCode() - if err != nil { - return err - } - if c != codes.Nil { - return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) - } - return nil -} - -func (d *Decoder) decodeNilValue(v reflect.Value) error { - err := d.DecodeNil() - if v.IsNil() { - return err - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - v.Set(reflect.Zero(v.Type())) - return err -} - -func (d *Decoder) DecodeBool() (bool, error) { - c, err := d.readCode() - if err != nil { - return false, err - } - return d.bool(c) -} - -func (d *Decoder) bool(c codes.Code) (bool, error) { - if c == codes.False { - return false, nil - } - if c == codes.True { - return true, nil - } - return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) -} - -func (d *Decoder) DecodeDuration() (time.Duration, error) { - n, err := d.DecodeInt64() - if err != nil { - return 0, err - } - return time.Duration(n), nil -} - -// DecodeInterface decodes value into interface. It returns following types: -// - nil, -// - bool, -// - int8, int16, int32, int64, -// - uint8, uint16, uint32, uint64, -// - float32 and float64, -// - string, -// - []byte, -// - slices of any of the above, -// - maps of any of the above. -// -// DecodeInterface should be used only when you don't know the type of value -// you are decoding. For example, if you are decoding number it is better to use -// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. -func (d *Decoder) DecodeInterface() (interface{}, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - - if codes.IsFixedNum(c) { - return int8(c), nil - } - if codes.IsFixedMap(c) { - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - } - if codes.IsFixedArray(c) { - return d.decodeSlice(c) - } - if codes.IsFixedString(c) { - return d.string(c) - } - - switch c { - case codes.Nil: - return nil, nil - case codes.False, codes.True: - return d.bool(c) - case codes.Float: - return d.float32(c) - case codes.Double: - return d.float64(c) - case codes.Uint8: - return d.uint8() - case codes.Uint16: - return d.uint16() - case codes.Uint32: - return d.uint32() - case codes.Uint64: - return d.uint64() - case codes.Int8: - return d.int8() - case codes.Int16: - return d.int16() - case codes.Int32: - return d.int32() - case codes.Int64: - return d.int64() - case codes.Bin8, codes.Bin16, codes.Bin32: - return d.bytes(c, nil) - case codes.Str8, codes.Str16, codes.Str32: - return d.string(c) - case codes.Array16, codes.Array32: - return d.decodeSlice(c) - case codes.Map16, codes.Map32: - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, - codes.Ext8, codes.Ext16, codes.Ext32: - return d.extInterface(c) - } - - return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) -} - -// DecodeInterfaceLoose is like DecodeInterface except that: -// - int8, int16, and int32 are converted to int64, -// - uint8, uint16, and uint32 are converted to uint64, -// - float32 is converted to float64. -func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - - if codes.IsFixedNum(c) { - return int64(int8(c)), nil - } - if codes.IsFixedMap(c) { - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - } - if codes.IsFixedArray(c) { - return d.decodeSlice(c) - } - if codes.IsFixedString(c) { - return d.string(c) - } - - switch c { - case codes.Nil: - return nil, nil - case codes.False, codes.True: - return d.bool(c) - case codes.Float, codes.Double: - return d.float64(c) - case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: - return d.uint(c) - case codes.Int8, codes.Int16, codes.Int32, codes.Int64: - return d.int(c) - case codes.Bin8, codes.Bin16, codes.Bin32: - return d.bytes(c, nil) - case codes.Str8, codes.Str16, codes.Str32: - return d.string(c) - case codes.Array16, codes.Array32: - return d.decodeSlice(c) - case codes.Map16, codes.Map32: - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, - codes.Ext8, codes.Ext16, codes.Ext32: - return d.extInterface(c) - } - - return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) -} - -// Skip skips next value. -func (d *Decoder) Skip() error { - c, err := d.readCode() - if err != nil { - return err - } - - if codes.IsFixedNum(c) { - return nil - } - if codes.IsFixedMap(c) { - return d.skipMap(c) - } - if codes.IsFixedArray(c) { - return d.skipSlice(c) - } - if codes.IsFixedString(c) { - return d.skipBytes(c) - } - - switch c { - case codes.Nil, codes.False, codes.True: - return nil - case codes.Uint8, codes.Int8: - return d.skipN(1) - case codes.Uint16, codes.Int16: - return d.skipN(2) - case codes.Uint32, codes.Int32, codes.Float: - return d.skipN(4) - case codes.Uint64, codes.Int64, codes.Double: - return d.skipN(8) - case codes.Bin8, codes.Bin16, codes.Bin32: - return d.skipBytes(c) - case codes.Str8, codes.Str16, codes.Str32: - return d.skipBytes(c) - case codes.Array16, codes.Array32: - return d.skipSlice(c) - case codes.Map16, codes.Map32: - return d.skipMap(c) - case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, - codes.Ext8, codes.Ext16, codes.Ext32: - return d.skipExt(c) - } - - return fmt.Errorf("msgpack: unknown code %x", c) -} - -// PeekCode returns the next MessagePack code without advancing the reader. -// Subpackage msgpack/codes contains list of available codes. -func (d *Decoder) PeekCode() (codes.Code, error) { - c, err := d.s.ReadByte() - if err != nil { - return 0, err - } - return codes.Code(c), d.s.UnreadByte() -} - -func (d *Decoder) hasNilCode() bool { - code, err := d.PeekCode() - return err == nil && code == codes.Nil -} - -func (d *Decoder) readCode() (codes.Code, error) { - d.extLen = 0 - c, err := d.s.ReadByte() - if err != nil { - return 0, err - } - if d.rec != nil { - d.rec = append(d.rec, c) - } - return codes.Code(c), nil -} - -func (d *Decoder) readFull(b []byte) error { - _, err := io.ReadFull(d.r, b) - if err != nil { - return err - } - if d.rec != nil { - //TODO: read directly into d.rec? - d.rec = append(d.rec, b...) - } - return nil -} - -func (d *Decoder) readN(n int) ([]byte, error) { - var err error - d.buf, err = readN(d.r, d.buf, n) - if err != nil { - return nil, err - } - if d.rec != nil { - //TODO: read directly into d.rec? - d.rec = append(d.rec, d.buf...) - } - return d.buf, nil -} - -func readN(r io.Reader, b []byte, n int) ([]byte, error) { - if b == nil { - if n == 0 { - return make([]byte, 0), nil - } - switch { - case n < 64: - b = make([]byte, 0, 64) - case n <= bytesAllocLimit: - b = make([]byte, 0, n) - default: - b = make([]byte, 0, bytesAllocLimit) - } - } - - if n <= cap(b) { - b = b[:n] - _, err := io.ReadFull(r, b) - return b, err - } - b = b[:cap(b)] - - var pos int - for { - alloc := min(n-len(b), bytesAllocLimit) - b = append(b, make([]byte, alloc)...) - - _, err := io.ReadFull(r, b[pos:]) - if err != nil { - return b, err - } - - if len(b) == n { - break - } - pos = len(b) - } - - return b, nil -} - -func min(a, b int) int { //nolint:unparam - if a <= b { - return a - } - return b -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go deleted file mode 100644 index 16c40fe77e..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go +++ /dev/null @@ -1,350 +0,0 @@ -package msgpack - -import ( - "errors" - "fmt" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var ( - mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) - mapStringStringType = mapStringStringPtrType.Elem() -) - -var ( - mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) - mapStringInterfaceType = mapStringInterfacePtrType.Elem() -) - -func decodeMapValue(d *Decoder, v reflect.Value) error { - size, err := d.DecodeMapLen() - if err != nil { - return err - } - - typ := v.Type() - if size == -1 { - v.Set(reflect.Zero(typ)) - return nil - } - - if v.IsNil() { - v.Set(reflect.MakeMap(typ)) - } - if size == 0 { - return nil - } - - return decodeMapValueSize(d, v, size) -} - -func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { - typ := v.Type() - keyType := typ.Key() - valueType := typ.Elem() - - for i := 0; i < size; i++ { - mk := reflect.New(keyType).Elem() - if err := d.DecodeValue(mk); err != nil { - return err - } - - mv := reflect.New(valueType).Elem() - if err := d.DecodeValue(mv); err != nil { - return err - } - - v.SetMapIndex(mk, mv) - } - - return nil -} - -// DecodeMapLen decodes map length. Length is -1 when map is nil. -func (d *Decoder) DecodeMapLen() (int, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - - if codes.IsExt(c) { - if err = d.skipExtHeader(c); err != nil { - return 0, err - } - - c, err = d.readCode() - if err != nil { - return 0, err - } - } - return d.mapLen(c) -} - -func (d *Decoder) mapLen(c codes.Code) (int, error) { - size, err := d._mapLen(c) - err = expandInvalidCodeMapLenError(c, err) - return size, err -} - -func (d *Decoder) _mapLen(c codes.Code) (int, error) { - if c == codes.Nil { - return -1, nil - } - if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { - return int(c & codes.FixedMapMask), nil - } - if c == codes.Map16 { - size, err := d.uint16() - return int(size), err - } - if c == codes.Map32 { - size, err := d.uint32() - return int(size), err - } - return 0, errInvalidCode -} - -var errInvalidCode = errors.New("invalid code") - -func expandInvalidCodeMapLenError(c codes.Code, err error) error { - if err == errInvalidCode { - return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) - } - return err -} - -func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { - mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) - return d.decodeMapStringStringPtr(mptr) -} - -func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { - size, err := d.DecodeMapLen() - if err != nil { - return err - } - if size == -1 { - *ptr = nil - return nil - } - - m := *ptr - if m == nil { - *ptr = make(map[string]string, min(size, maxMapSize)) - m = *ptr - } - - for i := 0; i < size; i++ { - mk, err := d.DecodeString() - if err != nil { - return err - } - mv, err := d.DecodeString() - if err != nil { - return err - } - m[mk] = mv - } - - return nil -} - -func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { - ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) - return d.decodeMapStringInterfacePtr(ptr) -} - -func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { - n, err := d.DecodeMapLen() - if err != nil { - return err - } - if n == -1 { - *ptr = nil - return nil - } - - m := *ptr - if m == nil { - *ptr = make(map[string]interface{}, min(n, maxMapSize)) - m = *ptr - } - - for i := 0; i < n; i++ { - mk, err := d.DecodeString() - if err != nil { - return err - } - mv, err := d.decodeInterfaceCond() - if err != nil { - return err - } - m[mk] = mv - } - - return nil -} - -var errUnsupportedMapKey = errors.New("msgpack: unsupported map key") - -func (d *Decoder) DecodeMap() (interface{}, error) { - if d.decodeMapFunc != nil { - return d.decodeMapFunc(d) - } - - size, err := d.DecodeMapLen() - if err != nil { - return nil, err - } - if size == -1 { - return nil, nil - } - if size == 0 { - return make(map[string]interface{}), nil - } - - code, err := d.PeekCode() - if err != nil { - return nil, err - } - - if codes.IsString(code) || codes.IsBin(code) { - return d.decodeMapStringInterfaceSize(size) - } - - key, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - - value, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - - keyType := reflect.TypeOf(key) - valueType := reflect.TypeOf(value) - - if !keyType.Comparable() { - return nil, errUnsupportedMapKey - } - - mapType := reflect.MapOf(keyType, valueType) - mapValue := reflect.MakeMap(mapType) - - mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) - size-- - - err = decodeMapValueSize(d, mapValue, size) - if err != nil { - return nil, err - } - - return mapValue.Interface(), nil -} - -func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { - m := make(map[string]interface{}, min(size, maxMapSize)) - for i := 0; i < size; i++ { - mk, err := d.DecodeString() - if err != nil { - return nil, err - } - mv, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - m[mk] = mv - } - return m, nil -} - -func (d *Decoder) skipMap(c codes.Code) error { - n, err := d.mapLen(c) - if err != nil { - return err - } - for i := 0; i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - if err := d.Skip(); err != nil { - return err - } - } - return nil -} - -func decodeStructValue(d *Decoder, v reflect.Value) error { - c, err := d.readCode() - if err != nil { - return err - } - - var isArray bool - - n, err := d._mapLen(c) - if err != nil { - var err2 error - n, err2 = d.arrayLen(c) - if err2 != nil { - return expandInvalidCodeMapLenError(c, err) - } - isArray = true - } - if n == -1 { - if err = mustSet(v); err != nil { - return err - } - v.Set(reflect.Zero(v.Type())) - return nil - } - - var fields *fields - if d.flags&decodeUsingJSONFlag != 0 { - fields = jsonStructs.Fields(v.Type()) - } else { - fields = structs.Fields(v.Type()) - } - - if isArray { - for i, f := range fields.List { - if i >= n { - break - } - if err := f.DecodeValue(d, v); err != nil { - return err - } - } - - // Skip extra values. - for i := len(fields.List); i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - } - - return nil - } - - for i := 0; i < n; i++ { - name, err := d.DecodeString() - if err != nil { - return err - } - - if f := fields.Map[name]; f != nil { - if err := f.DecodeValue(d, v); err != nil { - return err - } - } else if d.flags&disallowUnknownFieldsFlag != 0 { - return fmt.Errorf("msgpack: unknown field %q", name) - } else if err := d.Skip(); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go deleted file mode 100644 index f6b9151f01..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go +++ /dev/null @@ -1,307 +0,0 @@ -package msgpack - -import ( - "fmt" - "math" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -func (d *Decoder) skipN(n int) error { - _, err := d.readN(n) - return err -} - -func (d *Decoder) uint8() (uint8, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return uint8(c), nil -} - -func (d *Decoder) int8() (int8, error) { - n, err := d.uint8() - return int8(n), err -} - -func (d *Decoder) uint16() (uint16, error) { - b, err := d.readN(2) - if err != nil { - return 0, err - } - return (uint16(b[0]) << 8) | uint16(b[1]), nil -} - -func (d *Decoder) int16() (int16, error) { - n, err := d.uint16() - return int16(n), err -} - -func (d *Decoder) uint32() (uint32, error) { - b, err := d.readN(4) - if err != nil { - return 0, err - } - n := (uint32(b[0]) << 24) | - (uint32(b[1]) << 16) | - (uint32(b[2]) << 8) | - uint32(b[3]) - return n, nil -} - -func (d *Decoder) int32() (int32, error) { - n, err := d.uint32() - return int32(n), err -} - -func (d *Decoder) uint64() (uint64, error) { - b, err := d.readN(8) - if err != nil { - return 0, err - } - n := (uint64(b[0]) << 56) | - (uint64(b[1]) << 48) | - (uint64(b[2]) << 40) | - (uint64(b[3]) << 32) | - (uint64(b[4]) << 24) | - (uint64(b[5]) << 16) | - (uint64(b[6]) << 8) | - uint64(b[7]) - return n, nil -} - -func (d *Decoder) int64() (int64, error) { - n, err := d.uint64() - return int64(n), err -} - -// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 -// into Go uint64. -func (d *Decoder) DecodeUint64() (uint64, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.uint(c) -} - -func (d *Decoder) uint(c codes.Code) (uint64, error) { - if c == codes.Nil { - return 0, nil - } - if codes.IsFixedNum(c) { - return uint64(int8(c)), nil - } - switch c { - case codes.Uint8: - n, err := d.uint8() - return uint64(n), err - case codes.Int8: - n, err := d.int8() - return uint64(n), err - case codes.Uint16: - n, err := d.uint16() - return uint64(n), err - case codes.Int16: - n, err := d.int16() - return uint64(n), err - case codes.Uint32: - n, err := d.uint32() - return uint64(n), err - case codes.Int32: - n, err := d.int32() - return uint64(n), err - case codes.Uint64, codes.Int64: - return d.uint64() - } - return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) -} - -// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 -// into Go int64. -func (d *Decoder) DecodeInt64() (int64, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.int(c) -} - -func (d *Decoder) int(c codes.Code) (int64, error) { - if c == codes.Nil { - return 0, nil - } - if codes.IsFixedNum(c) { - return int64(int8(c)), nil - } - switch c { - case codes.Uint8: - n, err := d.uint8() - return int64(n), err - case codes.Int8: - n, err := d.uint8() - return int64(int8(n)), err - case codes.Uint16: - n, err := d.uint16() - return int64(n), err - case codes.Int16: - n, err := d.uint16() - return int64(int16(n)), err - case codes.Uint32: - n, err := d.uint32() - return int64(n), err - case codes.Int32: - n, err := d.uint32() - return int64(int32(n)), err - case codes.Uint64, codes.Int64: - n, err := d.uint64() - return int64(n), err - } - return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) -} - -func (d *Decoder) DecodeFloat32() (float32, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.float32(c) -} - -func (d *Decoder) float32(c codes.Code) (float32, error) { - if c == codes.Float { - n, err := d.uint32() - if err != nil { - return 0, err - } - return math.Float32frombits(n), nil - } - - n, err := d.int(c) - if err != nil { - return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) - } - return float32(n), nil -} - -// DecodeFloat64 decodes msgpack float32/64 into Go float64. -func (d *Decoder) DecodeFloat64() (float64, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.float64(c) -} - -func (d *Decoder) float64(c codes.Code) (float64, error) { - switch c { - case codes.Float: - n, err := d.float32(c) - if err != nil { - return 0, err - } - return float64(n), nil - case codes.Double: - n, err := d.uint64() - if err != nil { - return 0, err - } - return math.Float64frombits(n), nil - } - - n, err := d.int(c) - if err != nil { - return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) - } - return float64(n), nil -} - -func (d *Decoder) DecodeUint() (uint, error) { - n, err := d.DecodeUint64() - return uint(n), err -} - -func (d *Decoder) DecodeUint8() (uint8, error) { - n, err := d.DecodeUint64() - return uint8(n), err -} - -func (d *Decoder) DecodeUint16() (uint16, error) { - n, err := d.DecodeUint64() - return uint16(n), err -} - -func (d *Decoder) DecodeUint32() (uint32, error) { - n, err := d.DecodeUint64() - return uint32(n), err -} - -func (d *Decoder) DecodeInt() (int, error) { - n, err := d.DecodeInt64() - return int(n), err -} - -func (d *Decoder) DecodeInt8() (int8, error) { - n, err := d.DecodeInt64() - return int8(n), err -} - -func (d *Decoder) DecodeInt16() (int16, error) { - n, err := d.DecodeInt64() - return int16(n), err -} - -func (d *Decoder) DecodeInt32() (int32, error) { - n, err := d.DecodeInt64() - return int32(n), err -} - -func decodeFloat32Value(d *Decoder, v reflect.Value) error { - f, err := d.DecodeFloat32() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetFloat(float64(f)) - return nil -} - -func decodeFloat64Value(d *Decoder, v reflect.Value) error { - f, err := d.DecodeFloat64() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetFloat(f) - return nil -} - -func decodeInt64Value(d *Decoder, v reflect.Value) error { - n, err := d.DecodeInt64() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetInt(n) - return nil -} - -func decodeUint64Value(d *Decoder, v reflect.Value) error { - n, err := d.DecodeUint64() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetUint(n) - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go deleted file mode 100644 index 80cd80e785..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go +++ /dev/null @@ -1,158 +0,0 @@ -package msgpack - -import ( - "fmt" - "strconv" - "strings" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -type queryResult struct { - query string - key string - hasAsterisk bool - - values []interface{} -} - -func (q *queryResult) nextKey() { - ind := strings.IndexByte(q.query, '.') - if ind == -1 { - q.key = q.query - q.query = "" - return - } - q.key = q.query[:ind] - q.query = q.query[ind+1:] -} - -// Query extracts data specified by the query from the msgpack stream skipping -// any other data. Query consists of map keys and array indexes separated with dot, -// e.g. key1.0.key2. -func (d *Decoder) Query(query string) ([]interface{}, error) { - res := queryResult{ - query: query, - } - if err := d.query(&res); err != nil { - return nil, err - } - return res.values, nil -} - -func (d *Decoder) query(q *queryResult) error { - q.nextKey() - if q.key == "" { - v, err := d.decodeInterfaceCond() - if err != nil { - return err - } - q.values = append(q.values, v) - return nil - } - - code, err := d.PeekCode() - if err != nil { - return err - } - - switch { - case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): - err = d.queryMapKey(q) - case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): - err = d.queryArrayIndex(q) - default: - err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) - } - return err -} - -func (d *Decoder) queryMapKey(q *queryResult) error { - n, err := d.DecodeMapLen() - if err != nil { - return err - } - if n == -1 { - return nil - } - - for i := 0; i < n; i++ { - k, err := d.bytesNoCopy() - if err != nil { - return err - } - - if string(k) == q.key { - if err := d.query(q); err != nil { - return err - } - if q.hasAsterisk { - return d.skipNext((n - i - 1) * 2) - } - return nil - } - - if err := d.Skip(); err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) queryArrayIndex(q *queryResult) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - if n == -1 { - return nil - } - - if q.key == "*" { - q.hasAsterisk = true - - query := q.query - for i := 0; i < n; i++ { - q.query = query - if err := d.query(q); err != nil { - return err - } - } - - q.hasAsterisk = false - return nil - } - - ind, err := strconv.Atoi(q.key) - if err != nil { - return err - } - - for i := 0; i < n; i++ { - if i == ind { - if err := d.query(q); err != nil { - return err - } - if q.hasAsterisk { - return d.skipNext(n - i - 1) - } - return nil - } - - if err := d.Skip(); err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) skipNext(n int) error { - for i := 0; i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go deleted file mode 100644 index adf17ae5cf..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go +++ /dev/null @@ -1,191 +0,0 @@ -package msgpack - -import ( - "fmt" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) - -// DecodeArrayLen decodes array length. Length is -1 when array is nil. -func (d *Decoder) DecodeArrayLen() (int, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.arrayLen(c) -} - -func (d *Decoder) arrayLen(c codes.Code) (int, error) { - if c == codes.Nil { - return -1, nil - } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { - return int(c & codes.FixedArrayMask), nil - } - switch c { - case codes.Array16: - n, err := d.uint16() - return int(n), err - case codes.Array32: - n, err := d.uint32() - return int(n), err - } - return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) -} - -func decodeStringSliceValue(d *Decoder, v reflect.Value) error { - ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) - return d.decodeStringSlicePtr(ptr) -} - -func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - if n == -1 { - return nil - } - - ss := makeStrings(*ptr, n) - for i := 0; i < n; i++ { - s, err := d.DecodeString() - if err != nil { - return err - } - ss = append(ss, s) - } - *ptr = ss - - return nil -} - -func makeStrings(s []string, n int) []string { - if n > sliceAllocLimit { - n = sliceAllocLimit - } - - if s == nil { - return make([]string, 0, n) - } - - if cap(s) >= n { - return s[:0] - } - - s = s[:cap(s)] - s = append(s, make([]string, n-len(s))...) - return s[:0] -} - -func decodeSliceValue(d *Decoder, v reflect.Value) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - - if n == -1 { - v.Set(reflect.Zero(v.Type())) - return nil - } - if n == 0 && v.IsNil() { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - return nil - } - - if v.Cap() >= n { - v.Set(v.Slice(0, n)) - } else if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Cap())) - } - - for i := 0; i < n; i++ { - if i >= v.Len() { - v.Set(growSliceValue(v, n)) - } - elem := v.Index(i) - if err := d.DecodeValue(elem); err != nil { - return err - } - } - - return nil -} - -func growSliceValue(v reflect.Value, n int) reflect.Value { - diff := n - v.Len() - if diff > sliceAllocLimit { - diff = sliceAllocLimit - } - v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) - return v -} - -func decodeArrayValue(d *Decoder, v reflect.Value) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - - if n == -1 { - return nil - } - if n > v.Len() { - return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) - } - - for i := 0; i < n; i++ { - sv := v.Index(i) - if err := d.DecodeValue(sv); err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) DecodeSlice() ([]interface{}, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - return d.decodeSlice(c) -} - -func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { - n, err := d.arrayLen(c) - if err != nil { - return nil, err - } - if n == -1 { - return nil, nil - } - - s := make([]interface{}, 0, min(n, sliceAllocLimit)) - for i := 0; i < n; i++ { - v, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - s = append(s, v) - } - - return s, nil -} - -func (d *Decoder) skipSlice(c codes.Code) error { - n, err := d.arrayLen(c) - if err != nil { - return err - } - - for i := 0; i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go deleted file mode 100644 index c5adf3865c..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go +++ /dev/null @@ -1,186 +0,0 @@ -package msgpack - -import ( - "fmt" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -func (d *Decoder) bytesLen(c codes.Code) (int, error) { - if c == codes.Nil { - return -1, nil - } - - if codes.IsFixedString(c) { - return int(c & codes.FixedStrMask), nil - } - - switch c { - case codes.Str8, codes.Bin8: - n, err := d.uint8() - return int(n), err - case codes.Str16, codes.Bin16: - n, err := d.uint16() - return int(n), err - case codes.Str32, codes.Bin32: - n, err := d.uint32() - return int(n), err - } - - return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) -} - -func (d *Decoder) DecodeString() (string, error) { - c, err := d.readCode() - if err != nil { - return "", err - } - return d.string(c) -} - -func (d *Decoder) string(c codes.Code) (string, error) { - n, err := d.bytesLen(c) - if err != nil { - return "", err - } - return d.stringWithLen(n) -} - -func (d *Decoder) stringWithLen(n int) (string, error) { - if n <= 0 { - return "", nil - } - b, err := d.readN(n) - return string(b), err -} - -func decodeStringValue(d *Decoder, v reflect.Value) error { - if err := mustSet(v); err != nil { - return err - } - - s, err := d.DecodeString() - if err != nil { - return err - } - - v.SetString(s) - return nil -} - -func (d *Decoder) DecodeBytesLen() (int, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.bytesLen(c) -} - -func (d *Decoder) DecodeBytes() ([]byte, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - return d.bytes(c, nil) -} - -func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { - n, err := d.bytesLen(c) - if err != nil { - return nil, err - } - if n == -1 { - return nil, nil - } - return readN(d.r, b, n) -} - -func (d *Decoder) bytesNoCopy() ([]byte, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - n, err := d.bytesLen(c) - if err != nil { - return nil, err - } - if n == -1 { - return nil, nil - } - return d.readN(n) -} - -func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { - c, err := d.readCode() - if err != nil { - return err - } - return d.bytesPtr(c, ptr) -} - -func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { - n, err := d.bytesLen(c) - if err != nil { - return err - } - if n == -1 { - *ptr = nil - return nil - } - - *ptr, err = readN(d.r, *ptr, n) - return err -} - -func (d *Decoder) skipBytes(c codes.Code) error { - n, err := d.bytesLen(c) - if err != nil { - return err - } - if n <= 0 { - return nil - } - return d.skipN(n) -} - -func decodeBytesValue(d *Decoder, v reflect.Value) error { - if err := mustSet(v); err != nil { - return err - } - - c, err := d.readCode() - if err != nil { - return err - } - - b, err := d.bytes(c, v.Bytes()) - if err != nil { - return err - } - - v.SetBytes(b) - - return nil -} - -func decodeByteArrayValue(d *Decoder, v reflect.Value) error { - c, err := d.readCode() - if err != nil { - return err - } - - n, err := d.bytesLen(c) - if err != nil { - return err - } - if n == -1 { - return nil - } - if n > v.Len() { - return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) - } - - b := v.Slice(0, n).Bytes() - return d.readFull(b) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go deleted file mode 100644 index 810d3be8f9..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go +++ /dev/null @@ -1,276 +0,0 @@ -package msgpack - -import ( - "encoding" - "errors" - "fmt" - "reflect" -) - -var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() -var stringType = reflect.TypeOf((*string)(nil)).Elem() - -var valueDecoders []decoderFunc - -//nolint:gochecknoinits -func init() { - valueDecoders = []decoderFunc{ - reflect.Bool: decodeBoolValue, - reflect.Int: decodeInt64Value, - reflect.Int8: decodeInt64Value, - reflect.Int16: decodeInt64Value, - reflect.Int32: decodeInt64Value, - reflect.Int64: decodeInt64Value, - reflect.Uint: decodeUint64Value, - reflect.Uint8: decodeUint64Value, - reflect.Uint16: decodeUint64Value, - reflect.Uint32: decodeUint64Value, - reflect.Uint64: decodeUint64Value, - reflect.Float32: decodeFloat32Value, - reflect.Float64: decodeFloat64Value, - reflect.Complex64: decodeUnsupportedValue, - reflect.Complex128: decodeUnsupportedValue, - reflect.Array: decodeArrayValue, - reflect.Chan: decodeUnsupportedValue, - reflect.Func: decodeUnsupportedValue, - reflect.Interface: decodeInterfaceValue, - reflect.Map: decodeMapValue, - reflect.Ptr: decodeUnsupportedValue, - reflect.Slice: decodeSliceValue, - reflect.String: decodeStringValue, - reflect.Struct: decodeStructValue, - reflect.UnsafePointer: decodeUnsupportedValue, - } -} - -func mustSet(v reflect.Value) error { - if !v.CanSet() { - return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) - } - return nil -} - -func getDecoder(typ reflect.Type) decoderFunc { - if v, ok := typeDecMap.Load(typ); ok { - return v.(decoderFunc) - } - fn := _getDecoder(typ) - typeDecMap.Store(typ, fn) - return fn -} - -func _getDecoder(typ reflect.Type) decoderFunc { - kind := typ.Kind() - - if typ.Implements(customDecoderType) { - return decodeCustomValue - } - if typ.Implements(unmarshalerType) { - return unmarshalValue - } - if typ.Implements(binaryUnmarshalerType) { - return unmarshalBinaryValue - } - - // Addressable struct field value. - if kind != reflect.Ptr { - ptr := reflect.PtrTo(typ) - if ptr.Implements(customDecoderType) { - return decodeCustomValueAddr - } - if ptr.Implements(unmarshalerType) { - return unmarshalValueAddr - } - if ptr.Implements(binaryUnmarshalerType) { - return unmarshalBinaryValueAddr - } - } - - switch kind { - case reflect.Ptr: - return ptrDecoderFunc(typ) - case reflect.Slice: - elem := typ.Elem() - if elem.Kind() == reflect.Uint8 { - return decodeBytesValue - } - if elem == stringType { - return decodeStringSliceValue - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return decodeByteArrayValue - } - case reflect.Map: - if typ.Key() == stringType { - switch typ.Elem() { - case stringType: - return decodeMapStringStringValue - case interfaceType: - return decodeMapStringInterfaceValue - } - } - } - - return valueDecoders[kind] -} - -func ptrDecoderFunc(typ reflect.Type) decoderFunc { - decoder := getDecoder(typ.Elem()) - return func(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - if err := mustSet(v); err != nil { - return err - } - if !v.IsNil() { - v.Set(reflect.Zero(v.Type())) - } - return d.DecodeNil() - } - if v.IsNil() { - if err := mustSet(v); err != nil { - return err - } - v.Set(reflect.New(v.Type().Elem())) - } - return decoder(d, v.Elem()) - } -} - -func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) - } - return decodeCustomValue(d, v.Addr()) -} - -func decodeCustomValue(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - return d.decodeNilValue(v) - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - decoder := v.Interface().(CustomDecoder) - return decoder.DecodeMsgpack(d) -} - -func unmarshalValueAddr(d *Decoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) - } - return unmarshalValue(d, v.Addr()) -} - -func unmarshalValue(d *Decoder, v reflect.Value) error { - if d.extLen == 0 || d.extLen == 1 { - if d.hasNilCode() { - return d.decodeNilValue(v) - } - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - var b []byte - - if d.extLen != 0 { - var err error - b, err = d.readN(d.extLen) - if err != nil { - return err - } - } else { - d.rec = make([]byte, 0, 64) - if err := d.Skip(); err != nil { - return err - } - b = d.rec - d.rec = nil - } - - unmarshaler := v.Interface().(Unmarshaler) - return unmarshaler.UnmarshalMsgpack(b) -} - -func decodeBoolValue(d *Decoder, v reflect.Value) error { - flag, err := d.DecodeBool() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetBool(flag) - return nil -} - -func decodeInterfaceValue(d *Decoder, v reflect.Value) error { - if v.IsNil() { - return d.interfaceValue(v) - } - - elem := v.Elem() - if !elem.CanAddr() { - if d.hasNilCode() { - v.Set(reflect.Zero(v.Type())) - return d.DecodeNil() - } - } - - return d.DecodeValue(elem) -} - -func (d *Decoder) interfaceValue(v reflect.Value) error { - vv, err := d.decodeInterfaceCond() - if err != nil { - return err - } - - if vv != nil { - if v.Type() == errorType { - if vv, ok := vv.(string); ok { - v.Set(reflect.ValueOf(errors.New(vv))) - return nil - } - } - - v.Set(reflect.ValueOf(vv)) - } - - return nil -} - -func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { - return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) -} - -//------------------------------------------------------------------------------ - -func unmarshalBinaryValueAddr(d *Decoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) - } - return unmarshalBinaryValue(d, v.Addr()) -} - -func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - return d.decodeNilValue(v) - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - data, err := d.DecodeBytes() - if err != nil { - return err - } - - unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) - return unmarshaler.UnmarshalBinary(data) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode.go b/vendor/github.com/vmihailenco/msgpack/v4/encode.go deleted file mode 100644 index 37f098701e..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode.go +++ /dev/null @@ -1,241 +0,0 @@ -package msgpack - -import ( - "bytes" - "io" - "reflect" - "sync" - "time" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -const ( - sortMapKeysFlag uint32 = 1 << iota - structAsArrayFlag - encodeUsingJSONFlag - useCompactIntsFlag - useCompactFloatsFlag -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -type byteWriter struct { - io.Writer - - buf [1]byte -} - -func newByteWriter(w io.Writer) *byteWriter { - bw := new(byteWriter) - bw.Reset(w) - return bw -} - -func (bw *byteWriter) Reset(w io.Writer) { - bw.Writer = w -} - -func (bw *byteWriter) WriteByte(c byte) error { - bw.buf[0] = c - _, err := bw.Write(bw.buf[:]) - return err -} - -//------------------------------------------------------------------------------ - -var encPool = sync.Pool{ - New: func() interface{} { - return NewEncoder(nil) - }, -} - -// Marshal returns the MessagePack encoding of v. -func Marshal(v interface{}) ([]byte, error) { - enc := encPool.Get().(*Encoder) - - var buf bytes.Buffer - enc.Reset(&buf) - - err := enc.Encode(v) - b := buf.Bytes() - - encPool.Put(enc) - - if err != nil { - return nil, err - } - return b, err -} - -type Encoder struct { - w writer - - buf []byte - timeBuf []byte - bootstrap [9 + 12]byte - - intern map[string]int - - flags uint32 -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - e := new(Encoder) - e.buf = e.bootstrap[:9] - e.timeBuf = e.bootstrap[9 : 9+12] - e.Reset(w) - return e -} - -func (e *Encoder) Reset(w io.Writer) { - if bw, ok := w.(writer); ok { - e.w = bw - } else if bw, ok := e.w.(*byteWriter); ok { - bw.Reset(w) - } else { - e.w = newByteWriter(w) - } - - for k := range e.intern { - delete(e.intern, k) - } - - //TODO: - //e.sortMapKeys = false - //e.structAsArray = false - //e.useJSONTag = false - //e.useCompact = false -} - -// SortMapKeys causes the Encoder to encode map keys in increasing order. -// Supported map types are: -// - map[string]string -// - map[string]interface{} -func (e *Encoder) SortMapKeys(on bool) *Encoder { - if on { - e.flags |= sortMapKeysFlag - } else { - e.flags &= ^sortMapKeysFlag - } - return e -} - -// StructAsArray causes the Encoder to encode Go structs as msgpack arrays. -func (e *Encoder) StructAsArray(on bool) *Encoder { - if on { - e.flags |= structAsArrayFlag - } else { - e.flags &= ^structAsArrayFlag - } - return e -} - -// UseJSONTag causes the Encoder to use json struct tag as fallback option -// if there is no msgpack tag. -func (e *Encoder) UseJSONTag(on bool) *Encoder { - if on { - e.flags |= encodeUsingJSONFlag - } else { - e.flags &= ^encodeUsingJSONFlag - } - return e -} - -// UseCompactEncoding causes the Encoder to chose the most compact encoding. -// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. -func (e *Encoder) UseCompactEncoding(on bool) *Encoder { - if on { - e.flags |= useCompactIntsFlag - } else { - e.flags &= ^useCompactIntsFlag - } - return e -} - -// UseCompactFloats causes the Encoder to chose a compact integer encoding -// for floats that can be represented as integers. -func (e *Encoder) UseCompactFloats(on bool) { - if on { - e.flags |= useCompactFloatsFlag - } else { - e.flags &= ^useCompactFloatsFlag - } -} - -func (e *Encoder) Encode(v interface{}) error { - switch v := v.(type) { - case nil: - return e.EncodeNil() - case string: - return e.EncodeString(v) - case []byte: - return e.EncodeBytes(v) - case int: - return e.encodeInt64Cond(int64(v)) - case int64: - return e.encodeInt64Cond(v) - case uint: - return e.encodeUint64Cond(uint64(v)) - case uint64: - return e.encodeUint64Cond(v) - case bool: - return e.EncodeBool(v) - case float32: - return e.EncodeFloat32(v) - case float64: - return e.EncodeFloat64(v) - case time.Duration: - return e.encodeInt64Cond(int64(v)) - case time.Time: - return e.EncodeTime(v) - } - return e.EncodeValue(reflect.ValueOf(v)) -} - -func (e *Encoder) EncodeMulti(v ...interface{}) error { - for _, vv := range v { - if err := e.Encode(vv); err != nil { - return err - } - } - return nil -} - -func (e *Encoder) EncodeValue(v reflect.Value) error { - fn := getEncoder(v.Type()) - return fn(e, v) -} - -func (e *Encoder) EncodeNil() error { - return e.writeCode(codes.Nil) -} - -func (e *Encoder) EncodeBool(value bool) error { - if value { - return e.writeCode(codes.True) - } - return e.writeCode(codes.False) -} - -func (e *Encoder) EncodeDuration(d time.Duration) error { - return e.EncodeInt(int64(d)) -} - -func (e *Encoder) writeCode(c codes.Code) error { - return e.w.WriteByte(byte(c)) -} - -func (e *Encoder) write(b []byte) error { - _, err := e.w.Write(b) - return err -} - -func (e *Encoder) writeString(s string) error { - _, err := e.w.Write(stringToBytes(s)) - return err -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go deleted file mode 100644 index d9b954d866..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go +++ /dev/null @@ -1,172 +0,0 @@ -package msgpack - -import ( - "reflect" - "sort" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -func encodeMapValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - if err := e.EncodeMapLen(v.Len()); err != nil { - return err - } - - for _, key := range v.MapKeys() { - if err := e.EncodeValue(key); err != nil { - return err - } - if err := e.EncodeValue(v.MapIndex(key)); err != nil { - return err - } - } - - return nil -} - -func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - if err := e.EncodeMapLen(v.Len()); err != nil { - return err - } - - m := v.Convert(mapStringStringType).Interface().(map[string]string) - if e.flags&sortMapKeysFlag != 0 { - return e.encodeSortedMapStringString(m) - } - - for mk, mv := range m { - if err := e.EncodeString(mk); err != nil { - return err - } - if err := e.EncodeString(mv); err != nil { - return err - } - } - - return nil -} - -func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - if err := e.EncodeMapLen(v.Len()); err != nil { - return err - } - - m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) - if e.flags&sortMapKeysFlag != 0 { - return e.encodeSortedMapStringInterface(m) - } - - for mk, mv := range m { - if err := e.EncodeString(mk); err != nil { - return err - } - if err := e.Encode(mv); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - err := e.EncodeString(k) - if err != nil { - return err - } - if err = e.EncodeString(m[k]); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - err := e.EncodeString(k) - if err != nil { - return err - } - if err = e.Encode(m[k]); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) EncodeMapLen(l int) error { - if l < 16 { - return e.writeCode(codes.FixedMapLow | codes.Code(l)) - } - if l < 65536 { - return e.write2(codes.Map16, uint16(l)) - } - return e.write4(codes.Map32, uint32(l)) -} - -func encodeStructValue(e *Encoder, strct reflect.Value) error { - var structFields *fields - if e.flags&encodeUsingJSONFlag != 0 { - structFields = jsonStructs.Fields(strct.Type()) - } else { - structFields = structs.Fields(strct.Type()) - } - - if e.flags&structAsArrayFlag != 0 || structFields.AsArray { - return encodeStructValueAsArray(e, strct, structFields.List) - } - fields := structFields.OmitEmpty(strct) - - if err := e.EncodeMapLen(len(fields)); err != nil { - return err - } - - for _, f := range fields { - if err := e.EncodeString(f.name); err != nil { - return err - } - if err := f.EncodeValue(e, strct); err != nil { - return err - } - } - - return nil -} - -func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { - if err := e.EncodeArrayLen(len(fields)); err != nil { - return err - } - for _, f := range fields { - if err := f.EncodeValue(e, strct); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go deleted file mode 100644 index bf3c2f851a..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go +++ /dev/null @@ -1,244 +0,0 @@ -package msgpack - -import ( - "math" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. -func (e *Encoder) EncodeUint8(n uint8) error { - return e.write1(codes.Uint8, n) -} - -func (e *Encoder) encodeUint8Cond(n uint8) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeUint(uint64(n)) - } - return e.EncodeUint8(n) -} - -// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. -func (e *Encoder) EncodeUint16(n uint16) error { - return e.write2(codes.Uint16, n) -} - -func (e *Encoder) encodeUint16Cond(n uint16) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeUint(uint64(n)) - } - return e.EncodeUint16(n) -} - -// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. -func (e *Encoder) EncodeUint32(n uint32) error { - return e.write4(codes.Uint32, n) -} - -func (e *Encoder) encodeUint32Cond(n uint32) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeUint(uint64(n)) - } - return e.EncodeUint32(n) -} - -// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. -func (e *Encoder) EncodeUint64(n uint64) error { - return e.write8(codes.Uint64, n) -} - -func (e *Encoder) encodeUint64Cond(n uint64) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeUint(n) - } - return e.EncodeUint64(n) -} - -// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. -func (e *Encoder) EncodeInt8(n int8) error { - return e.write1(codes.Int8, uint8(n)) -} - -func (e *Encoder) encodeInt8Cond(n int8) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeInt(int64(n)) - } - return e.EncodeInt8(n) -} - -// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. -func (e *Encoder) EncodeInt16(n int16) error { - return e.write2(codes.Int16, uint16(n)) -} - -func (e *Encoder) encodeInt16Cond(n int16) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeInt(int64(n)) - } - return e.EncodeInt16(n) -} - -// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. -func (e *Encoder) EncodeInt32(n int32) error { - return e.write4(codes.Int32, uint32(n)) -} - -func (e *Encoder) encodeInt32Cond(n int32) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeInt(int64(n)) - } - return e.EncodeInt32(n) -} - -// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. -func (e *Encoder) EncodeInt64(n int64) error { - return e.write8(codes.Int64, uint64(n)) -} - -func (e *Encoder) encodeInt64Cond(n int64) error { - if e.flags&useCompactIntsFlag != 0 { - return e.EncodeInt(n) - } - return e.EncodeInt64(n) -} - -// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. -// Type of the number is lost during encoding. -func (e *Encoder) EncodeUint(n uint64) error { - if n <= math.MaxInt8 { - return e.w.WriteByte(byte(n)) - } - if n <= math.MaxUint8 { - return e.EncodeUint8(uint8(n)) - } - if n <= math.MaxUint16 { - return e.EncodeUint16(uint16(n)) - } - if n <= math.MaxUint32 { - return e.EncodeUint32(uint32(n)) - } - return e.EncodeUint64(n) -} - -// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. -// Type of the number is lost during encoding. -func (e *Encoder) EncodeInt(n int64) error { - if n >= 0 { - return e.EncodeUint(uint64(n)) - } - if n >= int64(int8(codes.NegFixedNumLow)) { - return e.w.WriteByte(byte(n)) - } - if n >= math.MinInt8 { - return e.EncodeInt8(int8(n)) - } - if n >= math.MinInt16 { - return e.EncodeInt16(int16(n)) - } - if n >= math.MinInt32 { - return e.EncodeInt32(int32(n)) - } - return e.EncodeInt64(n) -} - -func (e *Encoder) EncodeFloat32(n float32) error { - if e.flags&useCompactFloatsFlag != 0 { - if float32(int64(n)) == n { - return e.EncodeInt(int64(n)) - } - } - return e.write4(codes.Float, math.Float32bits(n)) -} - -func (e *Encoder) EncodeFloat64(n float64) error { - if e.flags&useCompactFloatsFlag != 0 { - // Both NaN and Inf convert to int64(-0x8000000000000000) - // If n is NaN then it never compares true with any other value - // If n is Inf then it doesn't convert from int64 back to +/-Inf - // In both cases the comparison works. - if float64(int64(n)) == n { - return e.EncodeInt(int64(n)) - } - } - return e.write8(codes.Double, math.Float64bits(n)) -} - -func (e *Encoder) write1(code codes.Code, n uint8) error { - e.buf = e.buf[:2] - e.buf[0] = byte(code) - e.buf[1] = n - return e.write(e.buf) -} - -func (e *Encoder) write2(code codes.Code, n uint16) error { - e.buf = e.buf[:3] - e.buf[0] = byte(code) - e.buf[1] = byte(n >> 8) - e.buf[2] = byte(n) - return e.write(e.buf) -} - -func (e *Encoder) write4(code codes.Code, n uint32) error { - e.buf = e.buf[:5] - e.buf[0] = byte(code) - e.buf[1] = byte(n >> 24) - e.buf[2] = byte(n >> 16) - e.buf[3] = byte(n >> 8) - e.buf[4] = byte(n) - return e.write(e.buf) -} - -func (e *Encoder) write8(code codes.Code, n uint64) error { - e.buf = e.buf[:9] - e.buf[0] = byte(code) - e.buf[1] = byte(n >> 56) - e.buf[2] = byte(n >> 48) - e.buf[3] = byte(n >> 40) - e.buf[4] = byte(n >> 32) - e.buf[5] = byte(n >> 24) - e.buf[6] = byte(n >> 16) - e.buf[7] = byte(n >> 8) - e.buf[8] = byte(n) - return e.write(e.buf) -} - -func encodeUint8CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint8Cond(uint8(v.Uint())) -} - -func encodeUint16CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint16Cond(uint16(v.Uint())) -} - -func encodeUint32CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint32Cond(uint32(v.Uint())) -} - -func encodeUint64CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint64Cond(v.Uint()) -} - -func encodeInt8CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt8Cond(int8(v.Int())) -} - -func encodeInt16CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt16Cond(int16(v.Int())) -} - -func encodeInt32CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt32Cond(int32(v.Int())) -} - -func encodeInt64CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt64Cond(v.Int()) -} - -func encodeFloat32Value(e *Encoder, v reflect.Value) error { - return e.EncodeFloat32(float32(v.Float())) -} - -func encodeFloat64Value(e *Encoder, v reflect.Value) error { - return e.EncodeFloat64(v.Float()) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go deleted file mode 100644 index 69a9618e06..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go +++ /dev/null @@ -1,131 +0,0 @@ -package msgpack - -import ( - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var sliceStringType = reflect.TypeOf(([]string)(nil)) - -func encodeStringValue(e *Encoder, v reflect.Value) error { - return e.EncodeString(v.String()) -} - -func encodeByteSliceValue(e *Encoder, v reflect.Value) error { - return e.EncodeBytes(v.Bytes()) -} - -func encodeByteArrayValue(e *Encoder, v reflect.Value) error { - if err := e.EncodeBytesLen(v.Len()); err != nil { - return err - } - - if v.CanAddr() { - b := v.Slice(0, v.Len()).Bytes() - return e.write(b) - } - - e.buf = grow(e.buf, v.Len()) - reflect.Copy(reflect.ValueOf(e.buf), v) - return e.write(e.buf) -} - -func grow(b []byte, n int) []byte { - if cap(b) >= n { - return b[:n] - } - b = b[:cap(b)] - b = append(b, make([]byte, n-len(b))...) - return b -} - -func (e *Encoder) EncodeBytesLen(l int) error { - if l < 256 { - return e.write1(codes.Bin8, uint8(l)) - } - if l < 65536 { - return e.write2(codes.Bin16, uint16(l)) - } - return e.write4(codes.Bin32, uint32(l)) -} - -func (e *Encoder) encodeStrLen(l int) error { - if l < 32 { - return e.writeCode(codes.FixedStrLow | codes.Code(l)) - } - if l < 256 { - return e.write1(codes.Str8, uint8(l)) - } - if l < 65536 { - return e.write2(codes.Str16, uint16(l)) - } - return e.write4(codes.Str32, uint32(l)) -} - -func (e *Encoder) EncodeString(v string) error { - if err := e.encodeStrLen(len(v)); err != nil { - return err - } - return e.writeString(v) -} - -func (e *Encoder) EncodeBytes(v []byte) error { - if v == nil { - return e.EncodeNil() - } - if err := e.EncodeBytesLen(len(v)); err != nil { - return err - } - return e.write(v) -} - -func (e *Encoder) EncodeArrayLen(l int) error { - if l < 16 { - return e.writeCode(codes.FixedArrayLow | codes.Code(l)) - } - if l < 65536 { - return e.write2(codes.Array16, uint16(l)) - } - return e.write4(codes.Array32, uint32(l)) -} - -func encodeStringSliceValue(e *Encoder, v reflect.Value) error { - ss := v.Convert(sliceStringType).Interface().([]string) - return e.encodeStringSlice(ss) -} - -func (e *Encoder) encodeStringSlice(s []string) error { - if s == nil { - return e.EncodeNil() - } - if err := e.EncodeArrayLen(len(s)); err != nil { - return err - } - for _, v := range s { - if err := e.EncodeString(v); err != nil { - return err - } - } - return nil -} - -func encodeSliceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return encodeArrayValue(e, v) -} - -func encodeArrayValue(e *Encoder, v reflect.Value) error { - l := v.Len() - if err := e.EncodeArrayLen(l); err != nil { - return err - } - for i := 0; i < l; i++ { - if err := e.EncodeValue(v.Index(i)); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go deleted file mode 100644 index 335fcdb7ed..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go +++ /dev/null @@ -1,216 +0,0 @@ -package msgpack - -import ( - "encoding" - "fmt" - "reflect" -) - -var valueEncoders []encoderFunc - -//nolint:gochecknoinits -func init() { - valueEncoders = []encoderFunc{ - reflect.Bool: encodeBoolValue, - reflect.Int: encodeInt64CondValue, - reflect.Int8: encodeInt8CondValue, - reflect.Int16: encodeInt16CondValue, - reflect.Int32: encodeInt32CondValue, - reflect.Int64: encodeInt64CondValue, - reflect.Uint: encodeUint64CondValue, - reflect.Uint8: encodeUint8CondValue, - reflect.Uint16: encodeUint16CondValue, - reflect.Uint32: encodeUint32CondValue, - reflect.Uint64: encodeUint64CondValue, - reflect.Float32: encodeFloat32Value, - reflect.Float64: encodeFloat64Value, - reflect.Complex64: encodeUnsupportedValue, - reflect.Complex128: encodeUnsupportedValue, - reflect.Array: encodeArrayValue, - reflect.Chan: encodeUnsupportedValue, - reflect.Func: encodeUnsupportedValue, - reflect.Interface: encodeInterfaceValue, - reflect.Map: encodeMapValue, - reflect.Ptr: encodeUnsupportedValue, - reflect.Slice: encodeSliceValue, - reflect.String: encodeStringValue, - reflect.Struct: encodeStructValue, - reflect.UnsafePointer: encodeUnsupportedValue, - } -} - -func getEncoder(typ reflect.Type) encoderFunc { - if v, ok := typeEncMap.Load(typ); ok { - return v.(encoderFunc) - } - fn := _getEncoder(typ) - typeEncMap.Store(typ, fn) - return fn -} - -func _getEncoder(typ reflect.Type) encoderFunc { - kind := typ.Kind() - - if kind == reflect.Ptr { - if _, ok := typeEncMap.Load(typ.Elem()); ok { - return ptrEncoderFunc(typ) - } - } - - if typ.Implements(customEncoderType) { - return encodeCustomValue - } - if typ.Implements(marshalerType) { - return marshalValue - } - if typ.Implements(binaryMarshalerType) { - return marshalBinaryValue - } - - // Addressable struct field value. - if kind != reflect.Ptr { - ptr := reflect.PtrTo(typ) - if ptr.Implements(customEncoderType) { - return encodeCustomValuePtr - } - if ptr.Implements(marshalerType) { - return marshalValuePtr - } - if ptr.Implements(binaryMarshalerType) { - return marshalBinaryValuePtr - } - } - - if typ == errorType { - return encodeErrorValue - } - - switch kind { - case reflect.Ptr: - return ptrEncoderFunc(typ) - case reflect.Slice: - elem := typ.Elem() - if elem.Kind() == reflect.Uint8 { - return encodeByteSliceValue - } - if elem == stringType { - return encodeStringSliceValue - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return encodeByteArrayValue - } - case reflect.Map: - if typ.Key() == stringType { - switch typ.Elem() { - case stringType: - return encodeMapStringStringValue - case interfaceType: - return encodeMapStringInterfaceValue - } - } - } - - return valueEncoders[kind] -} - -func ptrEncoderFunc(typ reflect.Type) encoderFunc { - encoder := getEncoder(typ.Elem()) - return func(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return encoder(e, v.Elem()) - } -} - -func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) - } - encoder := v.Addr().Interface().(CustomEncoder) - return encoder.EncodeMsgpack(e) -} - -func encodeCustomValue(e *Encoder, v reflect.Value) error { - if nilable(v) && v.IsNil() { - return e.EncodeNil() - } - - encoder := v.Interface().(CustomEncoder) - return encoder.EncodeMsgpack(e) -} - -func marshalValuePtr(e *Encoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) - } - return marshalValue(e, v.Addr()) -} - -func marshalValue(e *Encoder, v reflect.Value) error { - if nilable(v) && v.IsNil() { - return e.EncodeNil() - } - - marshaler := v.Interface().(Marshaler) - b, err := marshaler.MarshalMsgpack() - if err != nil { - return err - } - _, err = e.w.Write(b) - return err -} - -func encodeBoolValue(e *Encoder, v reflect.Value) error { - return e.EncodeBool(v.Bool()) -} - -func encodeInterfaceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return e.EncodeValue(v.Elem()) -} - -func encodeErrorValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return e.EncodeString(v.Interface().(error).Error()) -} - -func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { - return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) -} - -func nilable(v reflect.Value) bool { - switch v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -//------------------------------------------------------------------------------ - -func marshalBinaryValuePtr(e *Encoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) - } - return marshalBinaryValue(e, v.Addr()) -} - -func marshalBinaryValue(e *Encoder, v reflect.Value) error { - if nilable(v) && v.IsNil() { - return e.EncodeNil() - } - - marshaler := v.Interface().(encoding.BinaryMarshaler) - data, err := marshaler.MarshalBinary() - if err != nil { - return err - } - - return e.EncodeBytes(data) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/ext.go b/vendor/github.com/vmihailenco/msgpack/v4/ext.go deleted file mode 100644 index 17e709bc8f..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/ext.go +++ /dev/null @@ -1,244 +0,0 @@ -package msgpack - -import ( - "bytes" - "fmt" - "reflect" - "sync" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -type extInfo struct { - Type reflect.Type - Decoder decoderFunc -} - -var extTypes = make(map[int8]*extInfo) - -var bufferPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// RegisterExt records a type, identified by a value for that type, -// under the provided id. That id will identify the concrete type of a value -// sent or received as an interface variable. Only types that will be -// transferred as implementations of interface values need to be registered. -// Expecting to be used only during initialization, it panics if the mapping -// between types and ids is not a bijection. -func RegisterExt(id int8, value interface{}) { - typ := reflect.TypeOf(value) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - ptr := reflect.PtrTo(typ) - - if _, ok := extTypes[id]; ok { - panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) - } - - registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) - registerExt(id, typ, getEncoder(typ), getDecoder(typ)) -} - -func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { - if enc != nil { - typeEncMap.Store(typ, makeExtEncoder(id, enc)) - } - if dec != nil { - extTypes[id] = &extInfo{ - Type: typ, - Decoder: dec, - } - typeDecMap.Store(typ, makeExtDecoder(id, dec)) - } -} - -func (e *Encoder) EncodeExtHeader(typeID int8, length int) error { - if err := e.encodeExtLen(length); err != nil { - return err - } - if err := e.w.WriteByte(byte(typeID)); err != nil { - return err - } - return nil -} - -func makeExtEncoder(typeID int8, enc encoderFunc) encoderFunc { - return func(e *Encoder, v reflect.Value) error { - buf := bufferPool.Get().(*bytes.Buffer) - defer bufferPool.Put(buf) - buf.Reset() - - oldw := e.w - e.w = buf - err := enc(e, v) - e.w = oldw - - if err != nil { - return err - } - - err = e.EncodeExtHeader(typeID, buf.Len()) - if err != nil { - return err - } - return e.write(buf.Bytes()) - } -} - -func makeExtDecoder(typeID int8, dec decoderFunc) decoderFunc { - return func(d *Decoder, v reflect.Value) error { - c, err := d.PeekCode() - if err != nil { - return err - } - - if !codes.IsExt(c) { - return dec(d, v) - } - - id, extLen, err := d.DecodeExtHeader() - if err != nil { - return err - } - - if id != typeID { - return fmt.Errorf("msgpack: got ext type=%d, wanted %d", id, typeID) - } - - d.extLen = extLen - return dec(d, v) - } -} - -func (e *Encoder) encodeExtLen(l int) error { - switch l { - case 1: - return e.writeCode(codes.FixExt1) - case 2: - return e.writeCode(codes.FixExt2) - case 4: - return e.writeCode(codes.FixExt4) - case 8: - return e.writeCode(codes.FixExt8) - case 16: - return e.writeCode(codes.FixExt16) - } - if l < 256 { - return e.write1(codes.Ext8, uint8(l)) - } - if l < 65536 { - return e.write2(codes.Ext16, uint16(l)) - } - return e.write4(codes.Ext32, uint32(l)) -} - -func (d *Decoder) parseExtLen(c codes.Code) (int, error) { - switch c { - case codes.FixExt1: - return 1, nil - case codes.FixExt2: - return 2, nil - case codes.FixExt4: - return 4, nil - case codes.FixExt8: - return 8, nil - case codes.FixExt16: - return 16, nil - case codes.Ext8: - n, err := d.uint8() - return int(n), err - case codes.Ext16: - n, err := d.uint16() - return int(n), err - case codes.Ext32: - n, err := d.uint32() - return int(n), err - default: - return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) - } -} - -func (d *Decoder) extHeader(c codes.Code) (int8, int, error) { - length, err := d.parseExtLen(c) - if err != nil { - return 0, 0, err - } - - typeID, err := d.readCode() - if err != nil { - return 0, 0, err - } - - return int8(typeID), length, nil -} - -func (d *Decoder) DecodeExtHeader() (typeID int8, length int, err error) { - c, err := d.readCode() - if err != nil { - return - } - return d.extHeader(c) -} - -func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { - extID, extLen, err := d.extHeader(c) - if err != nil { - return nil, err - } - - info, ok := extTypes[extID] - if !ok { - return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) - } - - v := reflect.New(info.Type) - - d.extLen = extLen - err = info.Decoder(d, v.Elem()) - d.extLen = 0 - if err != nil { - return nil, err - } - - return v.Interface(), nil -} - -func (d *Decoder) skipExt(c codes.Code) error { - n, err := d.parseExtLen(c) - if err != nil { - return err - } - return d.skipN(n + 1) -} - -func (d *Decoder) skipExtHeader(c codes.Code) error { - // Read ext type. - _, err := d.readCode() - if err != nil { - return err - } - // Read ext body len. - for i := 0; i < extHeaderLen(c); i++ { - _, err := d.readCode() - if err != nil { - return err - } - } - return nil -} - -func extHeaderLen(c codes.Code) int { - switch c { - case codes.Ext8: - return 1 - case codes.Ext16: - return 2 - case codes.Ext32: - return 4 - } - return 0 -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/intern.go b/vendor/github.com/vmihailenco/msgpack/v4/intern.go deleted file mode 100644 index 6ca5692739..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/intern.go +++ /dev/null @@ -1,236 +0,0 @@ -package msgpack - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var internStringExtID int8 = -128 - -var errUnexpectedCode = errors.New("msgpack: unexpected code") - -func encodeInternInterfaceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - v = v.Elem() - if v.Kind() == reflect.String { - return encodeInternStringValue(e, v) - } - return e.EncodeValue(v) -} - -func encodeInternStringValue(e *Encoder, v reflect.Value) error { - s := v.String() - - if s != "" { - if idx, ok := e.intern[s]; ok { - return e.internStringIndex(idx) - } - - if e.intern == nil { - e.intern = make(map[string]int) - } - - idx := len(e.intern) - e.intern[s] = idx - } - - return e.EncodeString(s) -} - -func (e *Encoder) internStringIndex(idx int) error { - if idx < math.MaxUint8 { - if err := e.writeCode(codes.FixExt1); err != nil { - return err - } - if err := e.w.WriteByte(byte(internStringExtID)); err != nil { - return err - } - return e.w.WriteByte(byte(idx)) - } - - if idx < math.MaxUint16 { - if err := e.writeCode(codes.FixExt2); err != nil { - return err - } - if err := e.w.WriteByte(byte(internStringExtID)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 8)); err != nil { - return err - } - return e.w.WriteByte(byte(idx)) - } - - if int64(idx) < math.MaxUint32 { - if err := e.writeCode(codes.FixExt4); err != nil { - return err - } - if err := e.w.WriteByte(byte(internStringExtID)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 24)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 16)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 8)); err != nil { - return err - } - return e.w.WriteByte(byte(idx)) - } - - return fmt.Errorf("msgpack: intern string index=%d is too large", idx) -} - -//------------------------------------------------------------------------------ - -func decodeInternInterfaceValue(d *Decoder, v reflect.Value) error { - c, err := d.readCode() - if err != nil { - return err - } - - s, err := d.internString(c) - if err == nil { - v.Set(reflect.ValueOf(s)) - return nil - } - if err != nil && err != errUnexpectedCode { - return err - } - - if err := d.s.UnreadByte(); err != nil { - return err - } - - return decodeInterfaceValue(d, v) -} - -func decodeInternStringValue(d *Decoder, v reflect.Value) error { - if err := mustSet(v); err != nil { - return err - } - - c, err := d.readCode() - if err != nil { - return err - } - - s, err := d.internString(c) - if err != nil { - if err == errUnexpectedCode { - return fmt.Errorf("msgpack: invalid code=%x decoding intern string", c) - } - return err - } - - v.SetString(s) - return nil -} - -func (d *Decoder) internString(c codes.Code) (string, error) { - if codes.IsFixedString(c) { - n := int(c & codes.FixedStrMask) - return d.internStringWithLen(n) - } - - switch c { - case codes.FixExt1, codes.FixExt2, codes.FixExt4: - typeID, length, err := d.extHeader(c) - if err != nil { - return "", err - } - if typeID != internStringExtID { - err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", - typeID, internStringExtID) - return "", err - } - - idx, err := d.internStringIndex(length) - if err != nil { - return "", err - } - - return d.internStringAtIndex(idx) - case codes.Str8, codes.Bin8: - n, err := d.uint8() - if err != nil { - return "", err - } - return d.internStringWithLen(int(n)) - case codes.Str16, codes.Bin16: - n, err := d.uint16() - if err != nil { - return "", err - } - return d.internStringWithLen(int(n)) - case codes.Str32, codes.Bin32: - n, err := d.uint32() - if err != nil { - return "", err - } - return d.internStringWithLen(int(n)) - } - - return "", errUnexpectedCode -} - -func (d *Decoder) internStringIndex(length int) (int, error) { - switch length { - case 1: - c, err := d.s.ReadByte() - if err != nil { - return 0, err - } - return int(c), nil - case 2: - b, err := d.readN(2) - if err != nil { - return 0, err - } - n := binary.BigEndian.Uint16(b) - return int(n), nil - case 4: - b, err := d.readN(4) - if err != nil { - return 0, err - } - n := binary.BigEndian.Uint32(b) - return int(n), nil - } - - err := fmt.Errorf("msgpack: unsupported intern string index length=%d", length) - return 0, err -} - -func (d *Decoder) internStringAtIndex(idx int) (string, error) { - if idx >= len(d.intern) { - err := fmt.Errorf("msgpack: intern string with index=%d does not exist", idx) - return "", err - } - return d.intern[idx], nil -} - -func (d *Decoder) internStringWithLen(n int) (string, error) { - if n <= 0 { - return "", nil - } - - s, err := d.stringWithLen(n) - if err != nil { - return "", err - } - - d.intern = append(d.intern, s) - - return s, nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/time.go b/vendor/github.com/vmihailenco/msgpack/v4/time.go deleted file mode 100644 index bf53eb2a36..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/time.go +++ /dev/null @@ -1,149 +0,0 @@ -package msgpack - -import ( - "encoding/binary" - "fmt" - "reflect" - "time" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var timeExtID int8 = -1 - -var timePtrType = reflect.TypeOf((*time.Time)(nil)) - -//nolint:gochecknoinits -func init() { - registerExt(timeExtID, timePtrType.Elem(), encodeTimeValue, decodeTimeValue) -} - -func (e *Encoder) EncodeTime(tm time.Time) error { - b := e.encodeTime(tm) - if err := e.encodeExtLen(len(b)); err != nil { - return err - } - if err := e.w.WriteByte(byte(timeExtID)); err != nil { - return err - } - return e.write(b) -} - -func (e *Encoder) encodeTime(tm time.Time) []byte { - secs := uint64(tm.Unix()) - if secs>>34 == 0 { - data := uint64(tm.Nanosecond())<<34 | secs - if data&0xffffffff00000000 == 0 { - b := e.timeBuf[:4] - binary.BigEndian.PutUint32(b, uint32(data)) - return b - } - b := e.timeBuf[:8] - binary.BigEndian.PutUint64(b, data) - return b - } - - b := e.timeBuf[:12] - binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) - binary.BigEndian.PutUint64(b[4:], secs) - return b -} - -func (d *Decoder) DecodeTime() (time.Time, error) { - tm, err := d.decodeTime() - if err != nil { - return tm, err - } - - if tm.IsZero() { - // Assume that zero time does not have timezone information. - return tm.UTC(), nil - } - return tm, nil -} - -func (d *Decoder) decodeTime() (time.Time, error) { - extLen := d.extLen - d.extLen = 0 - if extLen == 0 { - c, err := d.readCode() - if err != nil { - return time.Time{}, err - } - - // Legacy format. - if c == codes.FixedArrayLow|2 { - sec, err := d.DecodeInt64() - if err != nil { - return time.Time{}, err - } - - nsec, err := d.DecodeInt64() - if err != nil { - return time.Time{}, err - } - - return time.Unix(sec, nsec), nil - } - - if codes.IsString(c) { - s, err := d.string(c) - if err != nil { - return time.Time{}, err - } - return time.Parse(time.RFC3339Nano, s) - } - - extLen, err = d.parseExtLen(c) - if err != nil { - return time.Time{}, err - } - - // Skip ext id. - _, err = d.s.ReadByte() - if err != nil { - return time.Time{}, nil - } - } - - b, err := d.readN(extLen) - if err != nil { - return time.Time{}, err - } - - switch len(b) { - case 4: - sec := binary.BigEndian.Uint32(b) - return time.Unix(int64(sec), 0), nil - case 8: - sec := binary.BigEndian.Uint64(b) - nsec := int64(sec >> 34) - sec &= 0x00000003ffffffff - return time.Unix(int64(sec), nsec), nil - case 12: - nsec := binary.BigEndian.Uint32(b) - sec := binary.BigEndian.Uint64(b[4:]) - return time.Unix(int64(sec), int64(nsec)), nil - default: - err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) - return time.Time{}, err - } -} - -func encodeTimeValue(e *Encoder, v reflect.Value) error { - tm := v.Interface().(time.Time) - b := e.encodeTime(tm) - return e.write(b) -} - -func decodeTimeValue(d *Decoder, v reflect.Value) error { - tm, err := d.DecodeTime() - if err != nil { - return err - } - - ptr := v.Addr().Interface().(*time.Time) - *ptr = tm - - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/types.go b/vendor/github.com/vmihailenco/msgpack/v4/types.go deleted file mode 100644 index 08cf099dd2..0000000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/types.go +++ /dev/null @@ -1,382 +0,0 @@ -package msgpack - -import ( - "encoding" - "fmt" - "log" - "reflect" - "sync" - - "github.com/vmihailenco/tagparser" -) - -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -var ( - customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() - customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() -) - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -var ( - binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() -) - -type ( - encoderFunc func(*Encoder, reflect.Value) error - decoderFunc func(*Decoder, reflect.Value) error -) - -var ( - typeEncMap sync.Map - typeDecMap sync.Map -) - -// Register registers encoder and decoder functions for a value. -// This is low level API and in most cases you should prefer implementing -// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. -func Register(value interface{}, enc encoderFunc, dec decoderFunc) { - typ := reflect.TypeOf(value) - if enc != nil { - typeEncMap.Store(typ, enc) - } - if dec != nil { - typeDecMap.Store(typ, dec) - } -} - -//------------------------------------------------------------------------------ - -var ( - structs = newStructCache(false) - jsonStructs = newStructCache(true) -) - -type structCache struct { - m sync.Map - - useJSONTag bool -} - -func newStructCache(useJSONTag bool) *structCache { - return &structCache{ - useJSONTag: useJSONTag, - } -} - -func (m *structCache) Fields(typ reflect.Type) *fields { - if v, ok := m.m.Load(typ); ok { - return v.(*fields) - } - - fs := getFields(typ, m.useJSONTag) - m.m.Store(typ, fs) - return fs -} - -//------------------------------------------------------------------------------ - -type field struct { - name string - index []int - omitEmpty bool - encoder encoderFunc - decoder decoderFunc -} - -func (f *field) Omit(strct reflect.Value) bool { - v, isNil := fieldByIndex(strct, f.index) - if isNil { - return true - } - return f.omitEmpty && isEmptyValue(v) -} - -func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { - v, isNil := fieldByIndex(strct, f.index) - if isNil { - return e.EncodeNil() - } - return f.encoder(e, v) -} - -func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { - v := fieldByIndexAlloc(strct, f.index) - return f.decoder(d, v) -} - -//------------------------------------------------------------------------------ - -type fields struct { - Type reflect.Type - Map map[string]*field - List []*field - AsArray bool - - hasOmitEmpty bool -} - -func newFields(typ reflect.Type) *fields { - return &fields{ - Type: typ, - Map: make(map[string]*field, typ.NumField()), - List: make([]*field, 0, typ.NumField()), - } -} - -func (fs *fields) Add(field *field) { - fs.warnIfFieldExists(field.name) - fs.Map[field.name] = field - fs.List = append(fs.List, field) - if field.omitEmpty { - fs.hasOmitEmpty = true - } -} - -func (fs *fields) warnIfFieldExists(name string) { - if _, ok := fs.Map[name]; ok { - log.Printf("msgpack: %s already has field=%s", fs.Type, name) - } -} - -func (fs *fields) OmitEmpty(strct reflect.Value) []*field { - if !fs.hasOmitEmpty { - return fs.List - } - - fields := make([]*field, 0, len(fs.List)) - - for _, f := range fs.List { - if !f.Omit(strct) { - fields = append(fields, f) - } - } - - return fields -} - -func getFields(typ reflect.Type, useJSONTag bool) *fields { - fs := newFields(typ) - - var omitEmpty bool - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - - tagStr := f.Tag.Get("msgpack") - if useJSONTag && tagStr == "" { - tagStr = f.Tag.Get("json") - } - - tag := tagparser.Parse(tagStr) - if tag.Name == "-" { - continue - } - - if f.Name == "_msgpack" { - if tag.HasOption("asArray") { - fs.AsArray = true - } - if tag.HasOption("omitempty") { - omitEmpty = true - } - } - - if f.PkgPath != "" && !f.Anonymous { - continue - } - - field := &field{ - name: tag.Name, - index: f.Index, - omitEmpty: omitEmpty || tag.HasOption("omitempty"), - } - - if tag.HasOption("intern") { - switch f.Type.Kind() { - case reflect.Interface: - field.encoder = encodeInternInterfaceValue - field.decoder = decodeInternInterfaceValue - case reflect.String: - field.encoder = encodeInternStringValue - field.decoder = decodeInternStringValue - default: - err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) - panic(err) - } - } else { - field.encoder = getEncoder(f.Type) - field.decoder = getDecoder(f.Type) - } - - if field.name == "" { - field.name = f.Name - } - - if f.Anonymous && !tag.HasOption("noinline") { - inline := tag.HasOption("inline") - if inline { - inlineFields(fs, f.Type, field, useJSONTag) - } else { - inline = shouldInline(fs, f.Type, field, useJSONTag) - } - - if inline { - if _, ok := fs.Map[field.name]; ok { - log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) - } - fs.Map[field.name] = field - continue - } - } - - fs.Add(field) - - if alias, ok := tag.Options["alias"]; ok { - fs.warnIfFieldExists(alias) - fs.Map[alias] = field - } - } - return fs -} - -var ( - encodeStructValuePtr uintptr - decodeStructValuePtr uintptr -) - -//nolint:gochecknoinits -func init() { - encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() - decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() -} - -func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { - inlinedFields := getFields(typ, useJSONTag).List - for _, field := range inlinedFields { - if _, ok := fs.Map[field.name]; ok { - // Don't inline shadowed fields. - continue - } - field.index = append(f.index, field.index...) - fs.Add(field) - } -} - -func shouldInline(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { - var encoder encoderFunc - var decoder decoderFunc - - if typ.Kind() == reflect.Struct { - encoder = f.encoder - decoder = f.decoder - } else { - for typ.Kind() == reflect.Ptr { - typ = typ.Elem() - encoder = getEncoder(typ) - decoder = getDecoder(typ) - } - if typ.Kind() != reflect.Struct { - return false - } - } - - if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { - return false - } - if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { - return false - } - - inlinedFields := getFields(typ, useJSONTag).List - for _, field := range inlinedFields { - if _, ok := fs.Map[field.name]; ok { - // Don't auto inline if there are shadowed fields. - return false - } - } - - for _, field := range inlinedFields { - field.index = append(f.index, field.index...) - fs.Add(field) - } - return true -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, isNil bool) { - if len(index) == 1 { - return v.Field(index[0]), false - } - - for i, idx := range index { - if i > 0 { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return v, true - } - v = v.Elem() - } - } - v = v.Field(idx) - } - - return v, false -} - -func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { - if len(index) == 1 { - return v.Field(index[0]) - } - - for i, idx := range index { - if i > 0 { - var ok bool - v, ok = indirectNew(v) - if !ok { - return v - } - } - v = v.Field(idx) - } - - return v -} - -func indirectNew(v reflect.Value) (reflect.Value, bool) { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - if !v.CanSet() { - return v, false - } - elemType := v.Type().Elem() - if elemType.Kind() != reflect.Struct { - return v, false - } - v.Set(reflect.New(elemType)) - } - v = v.Elem() - } - return v, true -} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc b/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc new file mode 100644 index 0000000000..8b7f044ad1 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/.prettierrc @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml new file mode 100644 index 0000000000..e2ce06c49f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +go: + - 1.15.x + - 1.16.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/msgpack + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go + env GOPATH)/bin v1.31.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md new file mode 100644 index 0000000000..d45441e6d7 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md @@ -0,0 +1,75 @@ +## [5.4.1](https://github.com/vmihailenco/msgpack/compare/v5.4.0...v5.4.1) (2023-10-26) + + +### Bug Fixes + +* **reflect:** not assignable to type ([edeaedd](https://github.com/vmihailenco/msgpack/commit/edeaeddb2d51868df8c6ff2d8a218b527aeaf5fd)) + + + +# [5.4.0](https://github.com/vmihailenco/msgpack/compare/v5.3.6...v5.4.0) (2023-10-01) + + + +## [5.3.6](https://github.com/vmihailenco/msgpack/compare/v5.3.5...v5.3.6) (2023-10-01) + + +### Features + +* allow overwriting time.Time parsing from extID 13 (for NodeJS Date) ([9a6b73b](https://github.com/vmihailenco/msgpack/commit/9a6b73b3588fd962d568715f4375e24b089f7066)) +* apply omitEmptyFlag to empty structs ([e5f8d03](https://github.com/vmihailenco/msgpack/commit/e5f8d03c0a1dd9cc571d648cd610305139078de5)) +* support sorted keys for map[string]bool ([690c1fa](https://github.com/vmihailenco/msgpack/commit/690c1fab9814fab4842295ea986111f49850d9a4)) + + + +## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22) + +- Allow decoding `nil` code as boolean false. + +## v5 + +### Added + +- `DecodeMap` is split into `DecodeMap`, `DecodeTypedMap`, and `DecodeUntypedMap`. +- New msgpack extensions API. + +### Changed + +- `Reset*` functions also reset flags. +- `SetMapDecodeFunc` is renamed to `SetMapDecoder`. +- `StructAsArray` is renamed to `UseArrayEncodedStructs`. +- `SortMapKeys` is renamed to `SetSortMapKeys`. + +### Removed + +- `UseJSONTag` is removed. Use `SetCustomStructTag("json")` instead. + +## v4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and + DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old + behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of + DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old + format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. + There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v5/LICENSE b/vendor/github.com/vmihailenco/msgpack/v5/LICENSE new file mode 100644 index 0000000000..b749d07079 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/v5/Makefile b/vendor/github.com/vmihailenco/msgpack/v5/Makefile new file mode 100644 index 0000000000..e9aade7829 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/Makefile @@ -0,0 +1,6 @@ +test: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/v5/README.md b/vendor/github.com/vmihailenco/msgpack/v5/README.md new file mode 100644 index 0000000000..038464f182 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/README.md @@ -0,0 +1,100 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg)](https://travis-ci.org/vmihailenco/msgpack) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/vmihailenco/msgpack/v5)](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) +[![Documentation](https://img.shields.io/badge/msgpack-documentation-informational)](https://msgpack.uptrace.dev/) +[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) + +> msgpack is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an [open source APM](https://uptrace.dev/get/open-source-apm.html) and blazingly fast +> [distributed tracing tool](https://get.uptrace.dev/compare/distributed-tracing-tools.html) powered +> by OpenTelemetry and ClickHouse. Give it a star as well! + +## Resources + +- [Documentation](https://msgpack.uptrace.dev) +- [Chat](https://discord.gg/rWtp5Aj) +- [Reference](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) +- [Examples](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#pkg-examples) + +## Features + +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine \*datastore.Key and datastore.Cursor. +- [CustomEncoder]/[CustomDecoder] interfaces for custom encoding. +- [Extensions](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-RegisterExt) to encode + type information. +- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all + [empty fields in a struct](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Marshal-OmitEmpty). +- [Map keys sorting](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.SetSortMapKeys). +- Encoding/decoding all + [structs as arrays](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.UseArrayEncodedStructs) + or + [individual structs](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Marshal-AsArray). +- [Encoder.SetCustomStructTag] with [Decoder.SetCustomStructTag] can turn msgpack into drop-in + replacement for any tag. +- Simple but very fast and efficient + [queries](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#example-Decoder.Query). + +[customencoder]: https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#CustomEncoder +[customdecoder]: https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#CustomDecoder +[encoder.setcustomstructtag]: + https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Encoder.SetCustomStructTag +[decoder.setcustomstructtag]: + https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#Decoder.SetCustomStructTag + +## Installation + +msgpack supports 2 last Go versions and requires support for +[Go modules](https://github.com/golang/go/wiki/Modules). So make sure to initialize a Go module: + +```shell +go mod init github.com/my/repo +``` + +And then install msgpack/v5 (note _v5_ in the import; omitting it is a popular mistake): + +```shell +go get github.com/vmihailenco/msgpack/v5 +``` + +## Quickstart + +```go +import "github.com/vmihailenco/msgpack/v5" + +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## See also + +- [Golang ORM](https://github.com/uptrace/bun) for PostgreSQL, MySQL, MSSQL, and SQLite +- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/) +- [Golang HTTP router](https://github.com/uptrace/bunrouter) +- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse) + +## Contributors + +Thanks to all the people who already contributed! + + + + diff --git a/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js b/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js new file mode 100644 index 0000000000..4fedde6daf --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/commitlint.config.js @@ -0,0 +1 @@ +module.exports = { extends: ['@commitlint/config-conventional'] } diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode.go b/vendor/github.com/vmihailenco/msgpack/v5/decode.go new file mode 100644 index 0000000000..ea645aadb3 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode.go @@ -0,0 +1,708 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + bytesAllocLimit = 1 << 20 // 1mb + sliceAllocLimit = 1e6 // 1m elements + maxMapSize = 1e6 // 1m elements +) + +const ( + looseInterfaceDecodingFlag uint32 = 1 << iota + disallowUnknownFieldsFlag + usePreallocateValues + disableAllocLimitFlag +) + +type bufReader interface { + io.Reader + io.ByteScanner +} + +//------------------------------------------------------------------------------ + +var decPool = sync.Pool{ + New: func() interface{} { + return NewDecoder(nil) + }, +} + +func GetDecoder() *Decoder { + return decPool.Get().(*Decoder) +} + +func PutDecoder(dec *Decoder) { + dec.r = nil + dec.s = nil + decPool.Put(dec) +} + +//------------------------------------------------------------------------------ + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + dec := GetDecoder() + dec.UsePreallocateValues(true) + dec.Reset(bytes.NewReader(data)) + err := dec.Decode(v) + + PutDecoder(dec) + + return err +} + +// A Decoder reads and decodes MessagePack values from an input stream. +type Decoder struct { + r io.Reader + s io.ByteScanner + mapDecoder func(*Decoder) (interface{}, error) + structTag string + buf []byte + rec []byte + dict []string + flags uint32 +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the requested msgpack values. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + d.Reset(r) + return d +} + +// Reset discards any buffered data, resets all state, and switches the buffered +// reader to read from r. +func (d *Decoder) Reset(r io.Reader) { + d.ResetDict(r, nil) +} + +// ResetDict is like Reset, but also resets the dict. +func (d *Decoder) ResetDict(r io.Reader, dict []string) { + d.ResetReader(r) + d.flags = 0 + d.structTag = "" + d.dict = dict +} + +func (d *Decoder) WithDict(dict []string, fn func(*Decoder) error) error { + oldDict := d.dict + d.dict = dict + err := fn(d) + d.dict = oldDict + return err +} + +func (d *Decoder) ResetReader(r io.Reader) { + d.mapDecoder = nil + d.dict = nil + + if br, ok := r.(bufReader); ok { + d.r = br + d.s = br + } else if r == nil { + d.r = nil + d.s = nil + } else { + br := bufio.NewReader(r) + d.r = br + d.s = br + } +} + +func (d *Decoder) SetMapDecoder(fn func(*Decoder) (interface{}, error)) { + d.mapDecoder = fn +} + +// UseLooseInterfaceDecoding causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseLooseInterfaceDecoding(on bool) { + if on { + d.flags |= looseInterfaceDecodingFlag + } else { + d.flags &= ^looseInterfaceDecodingFlag + } +} + +// SetCustomStructTag causes the decoder to use the supplied tag as a fallback option +// if there is no msgpack tag. +func (d *Decoder) SetCustomStructTag(tag string) { + d.structTag = tag +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields(on bool) { + if on { + d.flags |= disallowUnknownFieldsFlag + } else { + d.flags &= ^disallowUnknownFieldsFlag + } +} + +// UseInternedStrings enables support for decoding interned strings. +func (d *Decoder) UseInternedStrings(on bool) { + if on { + d.flags |= useInternedStringsFlag + } else { + d.flags &= ^useInternedStringsFlag + } +} + +// UsePreallocateValues enables preallocating values in chunks +func (d *Decoder) UsePreallocateValues(on bool) { + if on { + d.flags |= usePreallocateValues + } else { + d.flags &= ^usePreallocateValues + } +} + +// DisableAllocLimit enables fully allocating slices/maps when the size is known +func (d *Decoder) DisableAllocLimit(on bool) { + if on { + d.flags |= disableAllocLimitFlag + } else { + d.flags &= ^disableAllocLimitFlag + } +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. +// The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.r +} + +//nolint:gocyclo +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(non-pointer %T)", v) + } + if vv.IsNil() { + return fmt.Errorf("msgpack: Decode(non-settable %T)", v) + } + + vv = vv.Elem() + if vv.Kind() == reflect.Interface { + if !vv.IsNil() { + vv = vv.Elem() + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(non-pointer %s)", vv.Type().String()) + } + } + } + + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.flags&looseInterfaceDecodingFlag != 0 { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != msgpcode.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c byte) (bool, error) { + if c == msgpcode.Nil { + return false, nil + } + if c == msgpcode.False { + return false, nil + } + if c == msgpcode.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +func (d *Decoder) DecodeDuration() (time.Duration, error) { + n, err := d.DecodeInt64() + if err != nil { + return 0, err + } + return time.Duration(n), nil +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if msgpcode.IsFixedNum(c) { + return int8(c), nil + } + if msgpcode.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + } + if msgpcode.IsFixedArray(c) { + return d.decodeSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.string(c) + } + + switch c { + case msgpcode.Nil: + return nil, nil + case msgpcode.False, msgpcode.True: + return d.bool(c) + case msgpcode.Float: + return d.float32(c) + case msgpcode.Double: + return d.float64(c) + case msgpcode.Uint8: + return d.uint8() + case msgpcode.Uint16: + return d.uint16() + case msgpcode.Uint32: + return d.uint32() + case msgpcode.Uint64: + return d.uint64() + case msgpcode.Int8: + return d.int8() + case msgpcode.Int16: + return d.int16() + case msgpcode.Int32: + return d.int32() + case msgpcode.Int64: + return d.int64() + case msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.bytes(c, nil) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32: + return d.string(c) + case msgpcode.Array16, msgpcode.Array32: + return d.decodeSlice(c) + case msgpcode.Map16, msgpcode.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.decodeInterfaceExt(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +// - []byte is converted to string. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if msgpcode.IsFixedNum(c) { + return int64(int8(c)), nil + } + if msgpcode.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + } + if msgpcode.IsFixedArray(c) { + return d.decodeSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.string(c) + } + + switch c { + case msgpcode.Nil: + return nil, nil + case msgpcode.False, msgpcode.True: + return d.bool(c) + case msgpcode.Float, msgpcode.Double: + return d.float64(c) + case msgpcode.Uint8, msgpcode.Uint16, msgpcode.Uint32, msgpcode.Uint64: + return d.uint(c) + case msgpcode.Int8, msgpcode.Int16, msgpcode.Int32, msgpcode.Int64: + return d.int(c) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32, + msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.string(c) + case msgpcode.Array16, msgpcode.Array32: + return d.decodeSlice(c) + case msgpcode.Map16, msgpcode.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.decodeMapDefault() + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.decodeInterfaceExt(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if msgpcode.IsFixedNum(c) { + return nil + } + if msgpcode.IsFixedMap(c) { + return d.skipMap(c) + } + if msgpcode.IsFixedArray(c) { + return d.skipSlice(c) + } + if msgpcode.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case msgpcode.Nil, msgpcode.False, msgpcode.True: + return nil + case msgpcode.Uint8, msgpcode.Int8: + return d.skipN(1) + case msgpcode.Uint16, msgpcode.Int16: + return d.skipN(2) + case msgpcode.Uint32, msgpcode.Int32, msgpcode.Float: + return d.skipN(4) + case msgpcode.Uint64, msgpcode.Int64, msgpcode.Double: + return d.skipN(8) + case msgpcode.Bin8, msgpcode.Bin16, msgpcode.Bin32: + return d.skipBytes(c) + case msgpcode.Str8, msgpcode.Str16, msgpcode.Str32: + return d.skipBytes(c) + case msgpcode.Array16, msgpcode.Array32: + return d.skipSlice(c) + case msgpcode.Map16, msgpcode.Map32: + return d.skipMap(c) + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4, msgpcode.FixExt8, msgpcode.FixExt16, + msgpcode.Ext8, msgpcode.Ext16, msgpcode.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +func (d *Decoder) DecodeRaw() (RawMessage, error) { + d.rec = make([]byte, 0) + if err := d.Skip(); err != nil { + return nil, err + } + msg := RawMessage(d.rec) + d.rec = nil + return msg, nil +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes defines the list of available msgpcode. +func (d *Decoder) PeekCode() (byte, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return c, d.s.UnreadByte() +} + +// ReadFull reads exactly len(buf) bytes into the buf. +func (d *Decoder) ReadFull(buf []byte) error { + _, err := readN(d.r, buf, len(buf)) + return err +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == msgpcode.Nil +} + +func (d *Decoder) readCode() (byte, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return c, nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + var err error + if d.flags&disableAllocLimitFlag != 0 { + d.buf, err = readN(d.r, d.buf, n) + } else { + d.buf, err = readNGrow(d.r, d.buf, n) + } + if err != nil { + return nil, err + } + if d.rec != nil { + // TODO: read directly into d.rec? + d.rec = append(d.rec, d.buf...) + } + return d.buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + b = make([]byte, 0, n) + } + + if n > cap(b) { + b = append(b, make([]byte, n-len(b))...) + } else if n <= cap(b) { + b = b[:n] + } + + _, err := io.ReadFull(r, b) + return b, err +} + +func readNGrow(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + switch { + case n < 64: + b = make([]byte, 0, 64) + case n <= bytesAllocLimit: + b = make([]byte, 0, n) + default: + b = make([]byte, 0, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := min(n-len(b), bytesAllocLimit) + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return b, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { //nolint:unparam + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go new file mode 100644 index 0000000000..c54dae374f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go @@ -0,0 +1,356 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var errArrayStruct = errors.New("msgpack: number of fields in array-encoded struct has changed") + +var ( + mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) + mapStringStringType = mapStringStringPtrType.Elem() + mapStringBoolPtrType = reflect.TypeOf((*map[string]bool)(nil)) + mapStringBoolType = mapStringBoolPtrType.Elem() +) + +var ( + mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) + mapStringInterfaceType = mapStringInterfacePtrType.Elem() +) + +func decodeMapValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if n == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + ln := n + if d.flags&disableAllocLimitFlag == 0 { + ln = min(ln, maxMapSize) + } + v.Set(reflect.MakeMapWithSize(typ, ln)) + } + if n == 0 { + return nil + } + + return d.decodeTypedMapValue(v, n) +} + +func (d *Decoder) decodeMapDefault() (interface{}, error) { + if d.mapDecoder != nil { + return d.mapDecoder(d) + } + return d.DecodeMap() +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if msgpcode.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } + if c >= msgpcode.FixedMapLow && c <= msgpcode.FixedMapHigh { + return int(c & msgpcode.FixedMapMask), nil + } + if c == msgpcode.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == msgpcode.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, unexpectedCodeError{code: c, hint: "map length"} +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + ln := size + if d.flags&disableAllocLimitFlag == 0 { + ln = min(size, maxMapSize) + } + *ptr = make(map[string]string, ln) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + m, err := d.DecodeMap() + if err != nil { + return err + } + *ptr = m + return nil +} + +func (d *Decoder) DecodeMap() (map[string]interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + + if n == -1 { + return nil, nil + } + + m := make(map[string]interface{}, n) + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + + return m, nil +} + +func (d *Decoder) DecodeUntypedMap() (map[interface{}]interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + + if n == -1 { + return nil, nil + } + + m := make(map[interface{}]interface{}, n) + + for i := 0; i < n; i++ { + mk, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + m[mk] = mv + } + + return m, nil +} + +// DecodeTypedMap decodes a typed map. Typed map is a map that has a fixed type for keys and values. +// Key and value types may be different. +func (d *Decoder) DecodeTypedMap() (interface{}, error) { + n, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if n <= 0 { + return nil, nil + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + if !keyType.Comparable() { + return nil, fmt.Errorf("msgpack: unsupported map key: %s", keyType.String()) + } + + mapType := reflect.MapOf(keyType, valueType) + + ln := n + if d.flags&disableAllocLimitFlag == 0 { + ln = min(ln, maxMapSize) + } + + mapValue := reflect.MakeMapWithSize(mapType, ln) + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + + n-- + if err := d.decodeTypedMapValue(mapValue, n); err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeTypedMapValue(v reflect.Value, n int) error { + var ( + typ = v.Type() + keyType = typ.Key() + valueType = typ.Elem() + ) + for i := 0; i < n; i++ { + mk := d.newValue(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := d.newValue(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +func (d *Decoder) skipMap(c byte) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.mapLen(c) + if err == nil { + return d.decodeStruct(v, n) + } + + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return err + } + + if n <= 0 { + v.Set(reflect.Zero(v.Type())) + return nil + } + + fields := structs.Fields(v.Type(), d.structTag) + if n != len(fields.List) { + return errArrayStruct + } + + for _, f := range fields.List { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeStruct(v reflect.Value, n int) error { + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + + fields := structs.Fields(v.Type(), d.structTag) + for i := 0; i < n; i++ { + name, err := d.decodeStringTemp() + if err != nil { + return err + } + + if f := fields.Map[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + continue + } + + if d.flags&disallowUnknownFieldsFlag != 0 { + return fmt.Errorf("msgpack: unknown field %q", name) + } + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go new file mode 100644 index 0000000000..45d6a74186 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_number.go @@ -0,0 +1,295 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return c, nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c byte) (uint64, error) { + if c == msgpcode.Nil { + return 0, nil + } + if msgpcode.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case msgpcode.Uint8: + n, err := d.uint8() + return uint64(n), err + case msgpcode.Int8: + n, err := d.int8() + return uint64(n), err + case msgpcode.Uint16: + n, err := d.uint16() + return uint64(n), err + case msgpcode.Int16: + n, err := d.int16() + return uint64(n), err + case msgpcode.Uint32: + n, err := d.uint32() + return uint64(n), err + case msgpcode.Int32: + n, err := d.int32() + return uint64(n), err + case msgpcode.Uint64, msgpcode.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c byte) (int64, error) { + if c == msgpcode.Nil { + return 0, nil + } + if msgpcode.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case msgpcode.Uint8: + n, err := d.uint8() + return int64(n), err + case msgpcode.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case msgpcode.Uint16: + n, err := d.uint16() + return int64(n), err + case msgpcode.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case msgpcode.Uint32: + n, err := d.uint32() + return int64(n), err + case msgpcode.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case msgpcode.Uint64, msgpcode.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c byte) (float32, error) { + if c == msgpcode.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c byte) (float64, error) { + switch c { + case msgpcode.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case msgpcode.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go new file mode 100644 index 0000000000..4dce0fe5b9 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go @@ -0,0 +1,157 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type queryResult struct { + query string + key string + values []interface{} + hasAsterisk bool +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == msgpcode.Map16 || code == msgpcode.Map32 || msgpcode.IsFixedMap(code): + err = d.queryMapKey(q) + case code == msgpcode.Array16 || code == msgpcode.Array32 || msgpcode.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + key, err := d.decodeStringTemp() + if err != nil { + return err + } + + if key == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go new file mode 100644 index 0000000000..9c155f2ba6 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go @@ -0,0 +1,198 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } else if c >= msgpcode.FixedArrayLow && c <= msgpcode.FixedArrayHigh { + return int(c & msgpcode.FixedArrayMask), nil + } + switch c { + case msgpcode.Array16: + n, err := d.uint16() + return int(n), err + case msgpcode.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := makeStrings(*ptr, n, d.flags&disableAllocLimitFlag != 0) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func makeStrings(s []string, n int, noLimit bool) []string { + if !noLimit && n > sliceAllocLimit { + n = sliceAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + noLimit := d.flags&disableAllocLimitFlag != 1 + + if noLimit && n > v.Len() { + v.Set(growSliceValue(v, n, noLimit)) + } + + for i := 0; i < n; i++ { + if !noLimit && i >= v.Len() { + v.Set(growSliceValue(v, n, noLimit)) + } + + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int, noLimit bool) reflect.Value { + diff := n - v.Len() + if !noLimit && diff > sliceAllocLimit { + diff = sliceAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c byte) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, n) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c byte) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go new file mode 100644 index 0000000000..e837e08bf1 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_string.go @@ -0,0 +1,192 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func (d *Decoder) bytesLen(c byte) (int, error) { + if c == msgpcode.Nil { + return -1, nil + } + + if msgpcode.IsFixedString(c) { + return int(c & msgpcode.FixedStrMask), nil + } + + switch c { + case msgpcode.Str8, msgpcode.Bin8: + n, err := d.uint8() + return int(n), err + case msgpcode.Str16, msgpcode.Bin16: + n, err := d.uint16() + return int(n), err + case msgpcode.Str32, msgpcode.Bin32: + n, err := d.uint32() + return int(n), err + } + + return 0, fmt.Errorf("msgpack: invalid code=%x decoding string/bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + if intern := d.flags&useInternedStringsFlag != 0; intern || len(d.dict) > 0 { + return d.decodeInternedString(intern) + } + + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c byte) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + return d.stringWithLen(n) +} + +func (d *Decoder) stringWithLen(n int) (string, error) { + if n <= 0 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c byte, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) decodeStringTemp() (string, error) { + if intern := d.flags&useInternedStringsFlag != 0; intern || len(d.dict) > 0 { + return d.decodeInternedString(intern) + } + + c, err := d.readCode() + if err != nil { + return "", err + } + + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + + b, err := d.readN(n) + if err != nil { + return "", err + } + + return bytesToString(b), nil +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c byte, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n <= 0 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go new file mode 100644 index 0000000000..0b4c1d04ae --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go @@ -0,0 +1,46 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var cachedValues struct { + m map[reflect.Type]chan reflect.Value + sync.RWMutex +} + +func cachedValue(t reflect.Type) reflect.Value { + cachedValues.RLock() + ch := cachedValues.m[t] + cachedValues.RUnlock() + if ch != nil { + return <-ch + } + + cachedValues.Lock() + defer cachedValues.Unlock() + if ch = cachedValues.m[t]; ch != nil { + return <-ch + } + + ch = make(chan reflect.Value, 256) + go func() { + for { + ch <- reflect.New(t) + } + }() + if cachedValues.m == nil { + cachedValues.m = make(map[reflect.Type]chan reflect.Value, 8) + } + cachedValues.m[t] = ch + return <-ch +} + +func (d *Decoder) newValue(t reflect.Type) reflect.Value { + if d.flags&usePreallocateValues == 0 { + return reflect.New(t) + } + + return cachedValue(t) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go new file mode 100644 index 0000000000..c44a674e54 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go @@ -0,0 +1,251 @@ +package msgpack + +import ( + "encoding" + "errors" + "fmt" + "reflect" +) + +var ( + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + boolType = reflect.TypeOf((*bool)(nil)).Elem() +) + +var valueDecoders []decoderFunc + +//nolint:gochecknoinits +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func getDecoder(typ reflect.Type) decoderFunc { + if v, ok := typeDecMap.Load(typ); ok { + return v.(decoderFunc) + } + fn := _getDecoder(typ) + typeDecMap.Store(typ, fn) + return fn +} + +func _getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeDecMap.Load(typ.Elem()); ok { + return ptrValueDecoder(typ) + } + } + + if typ.Implements(customDecoderType) { + return nilAwareDecoder(typ, decodeCustomValue) + } + if typ.Implements(unmarshalerType) { + return nilAwareDecoder(typ, unmarshalValue) + } + if typ.Implements(binaryUnmarshalerType) { + return nilAwareDecoder(typ, unmarshalBinaryValue) + } + if typ.Implements(textUnmarshalerType) { + return nilAwareDecoder(typ, unmarshalTextValue) + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return addrDecoder(nilAwareDecoder(typ, decodeCustomValue)) + } + if ptr.Implements(unmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalValue)) + } + if ptr.Implements(binaryUnmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalBinaryValue)) + } + if ptr.Implements(textUnmarshalerType) { + return addrDecoder(nilAwareDecoder(typ, unmarshalTextValue)) + } + } + + switch kind { + case reflect.Ptr: + return ptrValueDecoder(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return decodeBytesValue + } + if elem == stringType { + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + + return valueDecoders[kind] +} + +func ptrValueDecoder(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if !v.IsNil() { + v.Set(d.newValue(typ).Elem()) + } + return d.DecodeNil() + } + if v.IsNil() { + v.Set(d.newValue(typ.Elem())) + } + return decoder(d, v.Elem()) + } +} + +func addrDecoder(fn decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return fn(d, v.Addr()) + } +} + +func nilAwareDecoder(typ reflect.Type, fn decoderFunc) decoderFunc { + if nilable(typ.Kind()) { + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + if v.IsNil() { + v.Set(d.newValue(typ.Elem())) + } + return fn(d, v) + } + } + + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + return fn(d, v) + } +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + return d.DecodeValue(v.Elem()) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} + +//------------------------------------------------------------------------------ + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + var b []byte + + d.rec = make([]byte, 0, 64) + if err := d.Skip(); err != nil { + return err + } + b = d.rec + d.rec = nil + + unmarshaler := v.Interface().(Unmarshaler) + return unmarshaler.UnmarshalMsgpack(b) +} + +func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) + return unmarshaler.UnmarshalBinary(data) +} + +func unmarshalTextValue(d *Decoder, v reflect.Value) error { + data, err := d.DecodeBytes() + if err != nil { + return err + } + + unmarshaler := v.Interface().(encoding.TextUnmarshaler) + return unmarshaler.UnmarshalText(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode.go b/vendor/github.com/vmihailenco/msgpack/v5/encode.go new file mode 100644 index 0000000000..135adc8f37 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode.go @@ -0,0 +1,270 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "sync" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + sortMapKeysFlag uint32 = 1 << iota + arrayEncodedStructsFlag + useCompactIntsFlag + useCompactFloatsFlag + useInternedStringsFlag + omitEmptyFlag +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +type byteWriter struct { + io.Writer +} + +func newByteWriter(w io.Writer) byteWriter { + return byteWriter{ + Writer: w, + } +} + +func (bw byteWriter) WriteByte(c byte) error { + _, err := bw.Write([]byte{c}) + return err +} + +//------------------------------------------------------------------------------ + +var encPool = sync.Pool{ + New: func() interface{} { + return NewEncoder(nil) + }, +} + +func GetEncoder() *Encoder { + return encPool.Get().(*Encoder) +} + +func PutEncoder(enc *Encoder) { + enc.w = nil + encPool.Put(enc) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + enc := GetEncoder() + + var buf bytes.Buffer + enc.Reset(&buf) + + err := enc.Encode(v) + b := buf.Bytes() + + PutEncoder(enc) + + if err != nil { + return nil, err + } + return b, err +} + +type Encoder struct { + w writer + dict map[string]int + structTag string + buf []byte + timeBuf []byte + flags uint32 +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + buf: make([]byte, 9), + } + e.Reset(w) + return e +} + +// Writer returns the Encoder's writer. +func (e *Encoder) Writer() io.Writer { + return e.w +} + +// Reset discards any buffered data, resets all state, and switches the writer to write to w. +func (e *Encoder) Reset(w io.Writer) { + e.ResetDict(w, nil) +} + +// ResetDict is like Reset, but also resets the dict. +func (e *Encoder) ResetDict(w io.Writer, dict map[string]int) { + e.ResetWriter(w) + e.flags = 0 + e.structTag = "" + e.dict = dict +} + +func (e *Encoder) WithDict(dict map[string]int, fn func(*Encoder) error) error { + oldDict := e.dict + e.dict = dict + err := fn(e) + e.dict = oldDict + return err +} + +func (e *Encoder) ResetWriter(w io.Writer) { + e.dict = nil + if bw, ok := w.(writer); ok { + e.w = bw + } else if w == nil { + e.w = nil + } else { + e.w = newByteWriter(w) + } +} + +// SetSortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]bool +// - map[string]interface{} +func (e *Encoder) SetSortMapKeys(on bool) *Encoder { + if on { + e.flags |= sortMapKeysFlag + } else { + e.flags &= ^sortMapKeysFlag + } + return e +} + +// SetCustomStructTag causes the Encoder to use a custom struct tag as +// fallback option if there is no msgpack tag. +func (e *Encoder) SetCustomStructTag(tag string) { + e.structTag = tag +} + +// SetOmitEmpty causes the Encoder to omit empty values by default. +func (e *Encoder) SetOmitEmpty(on bool) { + if on { + e.flags |= omitEmptyFlag + } else { + e.flags &= ^omitEmptyFlag + } +} + +// UseArrayEncodedStructs causes the Encoder to encode Go structs as msgpack arrays. +func (e *Encoder) UseArrayEncodedStructs(on bool) { + if on { + e.flags |= arrayEncodedStructsFlag + } else { + e.flags &= ^arrayEncodedStructsFlag + } +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactInts(on bool) { + if on { + e.flags |= useCompactIntsFlag + } else { + e.flags &= ^useCompactIntsFlag + } +} + +// UseCompactFloats causes the Encoder to chose a compact integer encoding +// for floats that can be represented as integers. +func (e *Encoder) UseCompactFloats(on bool) { + if on { + e.flags |= useCompactFloatsFlag + } else { + e.flags &= ^useCompactFloatsFlag + } +} + +// UseInternedStrings causes the Encoder to intern strings. +func (e *Encoder) UseInternedStrings(on bool) { + if on { + e.flags |= useInternedStringsFlag + } else { + e.flags &= ^useInternedStringsFlag + } +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.EncodeInt(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.EncodeUint(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(msgpcode.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(msgpcode.True) + } + return e.writeCode(msgpcode.False) +} + +func (e *Encoder) EncodeDuration(d time.Duration) error { + return e.EncodeInt(int64(d)) +} + +func (e *Encoder) writeCode(c byte) error { + return e.w.WriteByte(c) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.Write(stringToBytes(s)) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go new file mode 100644 index 0000000000..a5aa31bb3c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go @@ -0,0 +1,225 @@ +package msgpack + +import ( + "math" + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + iter := v.MapRange() + for iter.Next() { + if err := e.EncodeValue(iter.Key()); err != nil { + return err + } + if err := e.EncodeValue(iter.Value()); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringBoolValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringBoolType).Interface().(map[string]bool) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringBool(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeBool(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.flags&sortMapKeysFlag != 0 { + return e.EncodeMapSorted(m) + } + return e.EncodeMap(m) +} + +func (e *Encoder) EncodeMap(m map[string]interface{}) error { + if m == nil { + return e.EncodeNil() + } + if err := e.EncodeMapLen(len(m)); err != nil { + return err + } + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeMapSorted(m map[string]interface{}) error { + if m == nil { + return e.EncodeNil() + } + if err := e.EncodeMapLen(len(m)); err != nil { + return err + } + + keys := make([]string, 0, len(m)) + + for k := range m { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, k := range keys { + if err := e.EncodeString(k); err != nil { + return err + } + if err := e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringBool(m map[string]bool) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeBool(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(msgpcode.FixedMapLow | byte(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Map16, uint16(l)) + } + return e.write4(msgpcode.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + structFields := structs.Fields(strct.Type(), e.structTag) + if e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(e, strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go new file mode 100644 index 0000000000..63c311bfae --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_number.go @@ -0,0 +1,252 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(msgpcode.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(msgpcode.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(msgpcode.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(msgpcode.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(msgpcode.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(msgpcode.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(msgpcode.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(msgpcode.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.flags&useCompactIntsFlag != 0 { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(n) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(msgpcode.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(n) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + if e.flags&useCompactFloatsFlag != 0 { + if float32(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write4(msgpcode.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + if e.flags&useCompactFloatsFlag != 0 { + // Both NaN and Inf convert to int64(-0x8000000000000000) + // If n is NaN then it never compares true with any other value + // If n is Inf then it doesn't convert from int64 back to +/-Inf + // In both cases the comparison works. + if float64(int64(n)) == n { + return e.EncodeInt(int64(n)) + } + } + return e.write8(msgpcode.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code byte, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = code + e.buf[1] = n + return e.write(e.buf) +} + +func (e *Encoder) write2(code byte, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = code + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code byte, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = code + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code byte, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = code + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUintValue(e *Encoder, v reflect.Value) error { + return e.EncodeUint(v.Uint()) +} + +func encodeIntValue(e *Encoder, v reflect.Value) error { + return e.EncodeInt(v.Int()) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go new file mode 100644 index 0000000000..ca46eadae5 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_slice.go @@ -0,0 +1,139 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var stringSliceType = reflect.TypeOf(([]string)(nil)) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(msgpcode.Bin8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Bin16, uint16(l)) + } + return e.write4(msgpcode.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStringLen(l int) error { + if l < 32 { + return e.writeCode(msgpcode.FixedStrLow | byte(l)) + } + if l < 256 { + return e.write1(msgpcode.Str8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Str16, uint16(l)) + } + return e.write4(msgpcode.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if intern := e.flags&useInternedStringsFlag != 0; intern || len(e.dict) > 0 { + return e.encodeInternedString(v, intern) + } + return e.encodeNormalString(v) +} + +func (e *Encoder) encodeNormalString(v string) error { + if err := e.encodeStringLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(msgpcode.FixedArrayLow | byte(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Array16, uint16(l)) + } + return e.write4(msgpcode.Array32, uint32(l)) +} + +func encodeStringSliceValue(e *Encoder, v reflect.Value) error { + ss := v.Convert(stringSliceType).Interface().([]string) + return e.encodeStringSlice(ss) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go new file mode 100644 index 0000000000..1d6303a25c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go @@ -0,0 +1,254 @@ +package msgpack + +import ( + "encoding" + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +//nolint:gochecknoinits +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeIntValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUintValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if v, ok := typeEncMap.Load(typ); ok { + return v.(encoderFunc) + } + fn := _getEncoder(typ) + typeEncMap.Store(typ, fn) + return fn +} + +func _getEncoder(typ reflect.Type) encoderFunc { + kind := typ.Kind() + + if kind == reflect.Ptr { + if _, ok := typeEncMap.Load(typ.Elem()); ok { + return ptrEncoderFunc(typ) + } + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + if typ.Implements(binaryMarshalerType) { + return marshalBinaryValue + } + if typ.Implements(textMarshalerType) { + return marshalTextValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + if ptr.Implements(binaryMarshalerType) { + return marshalBinaryValueAddr + } + if ptr.Implements(textMarshalerType) { + return marshalTextValueAddr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + if elem == stringType { + return encodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case boolType: + return encodeMapStringBoolValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} + +func nilable(kind reflect.Kind) bool { + switch kind { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +func nilableType(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return nilable(t.Kind()) +} + +//------------------------------------------------------------------------------ + +func marshalBinaryValueAddr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalBinaryValue(e, v.Addr()) +} + +func marshalBinaryValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.BinaryMarshaler) + data, err := marshaler.MarshalBinary() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} + +//------------------------------------------------------------------------------ + +func marshalTextValueAddr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalTextValue(e, v.Addr()) +} + +func marshalTextValue(e *Encoder, v reflect.Value) error { + if nilable(v.Kind()) && v.IsNil() { + return e.EncodeNil() + } + + marshaler := v.Interface().(encoding.TextMarshaler) + data, err := marshaler.MarshalText() + if err != nil { + return err + } + + return e.EncodeBytes(data) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/ext.go b/vendor/github.com/vmihailenco/msgpack/v5/ext.go new file mode 100644 index 0000000000..354b9d92d7 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/ext.go @@ -0,0 +1,303 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +type extInfo struct { + Type reflect.Type + Decoder func(d *Decoder, v reflect.Value, extLen int) error +} + +var extTypes = make(map[int8]*extInfo) + +type MarshalerUnmarshaler interface { + Marshaler + Unmarshaler +} + +func RegisterExt(extID int8, value MarshalerUnmarshaler) { + RegisterExtEncoder(extID, value, func(e *Encoder, v reflect.Value) ([]byte, error) { + marshaler := v.Interface().(Marshaler) + return marshaler.MarshalMsgpack() + }) + RegisterExtDecoder(extID, value, func(d *Decoder, v reflect.Value, extLen int) error { + b, err := d.readN(extLen) + if err != nil { + return err + } + return v.Interface().(Unmarshaler).UnmarshalMsgpack(b) + }) +} + +func UnregisterExt(extID int8) { + unregisterExtEncoder(extID) + unregisterExtDecoder(extID) +} + +func RegisterExtEncoder( + extID int8, + value interface{}, + encoder func(enc *Encoder, v reflect.Value) ([]byte, error), +) { + unregisterExtEncoder(extID) + + typ := reflect.TypeOf(value) + extEncoder := makeExtEncoder(extID, typ, encoder) + typeEncMap.Store(extID, typ) + typeEncMap.Store(typ, extEncoder) + if typ.Kind() == reflect.Ptr { + typeEncMap.Store(typ.Elem(), makeExtEncoderAddr(extEncoder)) + } +} + +func unregisterExtEncoder(extID int8) { + t, ok := typeEncMap.Load(extID) + if !ok { + return + } + typeEncMap.Delete(extID) + typ := t.(reflect.Type) + typeEncMap.Delete(typ) + if typ.Kind() == reflect.Ptr { + typeEncMap.Delete(typ.Elem()) + } +} + +func makeExtEncoder( + extID int8, + typ reflect.Type, + encoder func(enc *Encoder, v reflect.Value) ([]byte, error), +) encoderFunc { + nilable := typ.Kind() == reflect.Ptr + + return func(e *Encoder, v reflect.Value) error { + if nilable && v.IsNil() { + return e.EncodeNil() + } + + b, err := encoder(e, v) + if err != nil { + return err + } + + if err := e.EncodeExtHeader(extID, len(b)); err != nil { + return err + } + + return e.write(b) + } +} + +func makeExtEncoderAddr(extEncoder encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: EncodeExt(nonaddressable %T)", v.Interface()) + } + return extEncoder(e, v.Addr()) + } +} + +func RegisterExtDecoder( + extID int8, + value interface{}, + decoder func(dec *Decoder, v reflect.Value, extLen int) error, +) { + unregisterExtDecoder(extID) + + typ := reflect.TypeOf(value) + extDecoder := makeExtDecoder(extID, typ, decoder) + extTypes[extID] = &extInfo{ + Type: typ, + Decoder: decoder, + } + + typeDecMap.Store(extID, typ) + typeDecMap.Store(typ, extDecoder) + if typ.Kind() == reflect.Ptr { + typeDecMap.Store(typ.Elem(), makeExtDecoderAddr(extDecoder)) + } +} + +func unregisterExtDecoder(extID int8) { + t, ok := typeDecMap.Load(extID) + if !ok { + return + } + typeDecMap.Delete(extID) + delete(extTypes, extID) + typ := t.(reflect.Type) + typeDecMap.Delete(typ) + if typ.Kind() == reflect.Ptr { + typeDecMap.Delete(typ.Elem()) + } +} + +func makeExtDecoder( + wantedExtID int8, + typ reflect.Type, + decoder func(d *Decoder, v reflect.Value, extLen int) error, +) decoderFunc { + return nilAwareDecoder(typ, func(d *Decoder, v reflect.Value) error { + extID, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + if extID != wantedExtID { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", extID, wantedExtID) + } + return decoder(d, v, extLen) + }) +} + +func makeExtDecoderAddr(extDecoder decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: DecodeExt(nonaddressable %T)", v.Interface()) + } + return extDecoder(d, v.Addr()) + } +} + +func (e *Encoder) EncodeExtHeader(extID int8, extLen int) error { + if err := e.encodeExtLen(extLen); err != nil { + return err + } + if err := e.w.WriteByte(byte(extID)); err != nil { + return err + } + return nil +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(msgpcode.FixExt1) + case 2: + return e.writeCode(msgpcode.FixExt2) + case 4: + return e.writeCode(msgpcode.FixExt4) + case 8: + return e.writeCode(msgpcode.FixExt8) + case 16: + return e.writeCode(msgpcode.FixExt16) + } + if l <= math.MaxUint8 { + return e.write1(msgpcode.Ext8, uint8(l)) + } + if l <= math.MaxUint16 { + return e.write2(msgpcode.Ext16, uint16(l)) + } + return e.write4(msgpcode.Ext32, uint32(l)) +} + +func (d *Decoder) DecodeExtHeader() (extID int8, extLen int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.extHeader(c) +} + +func (d *Decoder) extHeader(c byte) (int8, int, error) { + extLen, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + extID, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(extID), extLen, nil +} + +func (d *Decoder) parseExtLen(c byte) (int, error) { + switch c { + case msgpcode.FixExt1: + return 1, nil + case msgpcode.FixExt2: + return 2, nil + case msgpcode.FixExt4: + return 4, nil + case msgpcode.FixExt8: + return 8, nil + case msgpcode.FixExt16: + return 16, nil + case msgpcode.Ext8: + n, err := d.uint8() + return int(n), err + case msgpcode.Ext16: + n, err := d.uint16() + return int(n), err + case msgpcode.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext len", c) + } +} + +func (d *Decoder) decodeInterfaceExt(c byte) (interface{}, error) { + extID, extLen, err := d.extHeader(c) + if err != nil { + return nil, err + } + + info, ok := extTypes[extID] + if !ok { + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) + } + + v := d.newValue(info.Type).Elem() + if nilable(v.Kind()) && v.IsNil() { + v.Set(d.newValue(info.Type.Elem())) + } + + if err := info.Decoder(d, v, extLen); err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c byte) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c byte) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c byte) int { + switch c { + case msgpcode.Ext8: + return 1 + case msgpcode.Ext16: + return 2 + case msgpcode.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/intern.go b/vendor/github.com/vmihailenco/msgpack/v5/intern.go new file mode 100644 index 0000000000..7f019aaacc --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/intern.go @@ -0,0 +1,236 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +const ( + minInternedStringLen = 3 + maxDictLen = math.MaxUint16 +) + +var internedStringExtID = int8(math.MinInt8) + +func init() { + extTypes[internedStringExtID] = &extInfo{ + Type: stringType, + Decoder: decodeInternedStringExt, + } +} + +func decodeInternedStringExt(d *Decoder, v reflect.Value, extLen int) error { + idx, err := d.decodeInternedStringIndex(extLen) + if err != nil { + return err + } + + s, err := d.internedStringAtIndex(idx) + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +//------------------------------------------------------------------------------ + +func encodeInternedInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + v = v.Elem() + if v.Kind() == reflect.String { + return e.encodeInternedString(v.String(), true) + } + return e.EncodeValue(v) +} + +func encodeInternedStringValue(e *Encoder, v reflect.Value) error { + return e.encodeInternedString(v.String(), true) +} + +func (e *Encoder) encodeInternedString(s string, intern bool) error { + // Interned string takes at least 3 bytes. Plain string 1 byte + string len. + if idx, ok := e.dict[s]; ok { + return e.encodeInternedStringIndex(idx) + } + + if intern && len(s) >= minInternedStringLen && len(e.dict) < maxDictLen { + if e.dict == nil { + e.dict = make(map[string]int) + } + idx := len(e.dict) + e.dict[s] = idx + } + + return e.encodeNormalString(s) +} + +func (e *Encoder) encodeInternedStringIndex(idx int) error { + if idx <= math.MaxUint8 { + if err := e.writeCode(msgpcode.FixExt1); err != nil { + return err + } + return e.write1(byte(internedStringExtID), uint8(idx)) + } + + if idx <= math.MaxUint16 { + if err := e.writeCode(msgpcode.FixExt2); err != nil { + return err + } + return e.write2(byte(internedStringExtID), uint16(idx)) + } + + if uint64(idx) <= math.MaxUint32 { + if err := e.writeCode(msgpcode.FixExt4); err != nil { + return err + } + return e.write4(byte(internedStringExtID), uint32(idx)) + } + + return fmt.Errorf("msgpack: interned string index=%d is too large", idx) +} + +//------------------------------------------------------------------------------ + +func decodeInternedInterfaceValue(d *Decoder, v reflect.Value) error { + s, err := d.decodeInternedString(true) + if err == nil { + v.Set(reflect.ValueOf(s)) + return nil + } + if err != nil { + if _, ok := err.(unexpectedCodeError); !ok { + return err + } + } + + if err := d.s.UnreadByte(); err != nil { + return err + } + return decodeInterfaceValue(d, v) +} + +func decodeInternedStringValue(d *Decoder, v reflect.Value) error { + s, err := d.decodeInternedString(true) + if err != nil { + return err + } + + v.SetString(s) + return nil +} + +func (d *Decoder) decodeInternedString(intern bool) (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + + if msgpcode.IsFixedString(c) { + n := int(c & msgpcode.FixedStrMask) + return d.decodeInternedStringWithLen(n, intern) + } + + switch c { + case msgpcode.Nil: + return "", nil + case msgpcode.FixExt1, msgpcode.FixExt2, msgpcode.FixExt4: + typeID, extLen, err := d.extHeader(c) + if err != nil { + return "", err + } + if typeID != internedStringExtID { + err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", + typeID, internedStringExtID) + return "", err + } + + idx, err := d.decodeInternedStringIndex(extLen) + if err != nil { + return "", err + } + + return d.internedStringAtIndex(idx) + case msgpcode.Str8, msgpcode.Bin8: + n, err := d.uint8() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + case msgpcode.Str16, msgpcode.Bin16: + n, err := d.uint16() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + case msgpcode.Str32, msgpcode.Bin32: + n, err := d.uint32() + if err != nil { + return "", err + } + return d.decodeInternedStringWithLen(int(n), intern) + } + + return "", unexpectedCodeError{ + code: c, + hint: "interned string", + } +} + +func (d *Decoder) decodeInternedStringIndex(extLen int) (int, error) { + switch extLen { + case 1: + n, err := d.uint8() + if err != nil { + return 0, err + } + return int(n), nil + case 2: + n, err := d.uint16() + if err != nil { + return 0, err + } + return int(n), nil + case 4: + n, err := d.uint32() + if err != nil { + return 0, err + } + return int(n), nil + } + + err := fmt.Errorf("msgpack: unsupported ext len=%d decoding interned string", extLen) + return 0, err +} + +func (d *Decoder) internedStringAtIndex(idx int) (string, error) { + if idx >= len(d.dict) { + err := fmt.Errorf("msgpack: interned string at index=%d does not exist", idx) + return "", err + } + return d.dict[idx], nil +} + +func (d *Decoder) decodeInternedStringWithLen(n int, intern bool) (string, error) { + if n <= 0 { + return "", nil + } + + s, err := d.stringWithLen(n) + if err != nil { + return "", err + } + + if intern && len(s) >= minInternedStringLen && len(d.dict) < maxDictLen { + d.dict = append(d.dict, s) + } + + return s, nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go b/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go new file mode 100644 index 0000000000..4fa000b826 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go @@ -0,0 +1,52 @@ +package msgpack + +import "fmt" + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} + +//------------------------------------------------------------------------------ + +type RawMessage []byte + +var ( + _ CustomEncoder = (RawMessage)(nil) + _ CustomDecoder = (*RawMessage)(nil) +) + +func (m RawMessage) EncodeMsgpack(enc *Encoder) error { + return enc.write(m) +} + +func (m *RawMessage) DecodeMsgpack(dec *Decoder) error { + msg, err := dec.DecodeRaw() + if err != nil { + return err + } + *m = msg + return nil +} + +//------------------------------------------------------------------------------ + +type unexpectedCodeError struct { + hint string + code byte +} + +func (err unexpectedCodeError) Error() string { + return fmt.Sprintf("msgpack: unexpected code=%x decoding %s", err.code, err.hint) +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go b/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go new file mode 100644 index 0000000000..e35389cccf --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/msgpcode/msgpcode.go @@ -0,0 +1,88 @@ +package msgpcode + +var ( + PosFixedNumHigh byte = 0x7f + NegFixedNumLow byte = 0xe0 + + Nil byte = 0xc0 + + False byte = 0xc2 + True byte = 0xc3 + + Float byte = 0xca + Double byte = 0xcb + + Uint8 byte = 0xcc + Uint16 byte = 0xcd + Uint32 byte = 0xce + Uint64 byte = 0xcf + + Int8 byte = 0xd0 + Int16 byte = 0xd1 + Int32 byte = 0xd2 + Int64 byte = 0xd3 + + FixedStrLow byte = 0xa0 + FixedStrHigh byte = 0xbf + FixedStrMask byte = 0x1f + Str8 byte = 0xd9 + Str16 byte = 0xda + Str32 byte = 0xdb + + Bin8 byte = 0xc4 + Bin16 byte = 0xc5 + Bin32 byte = 0xc6 + + FixedArrayLow byte = 0x90 + FixedArrayHigh byte = 0x9f + FixedArrayMask byte = 0xf + Array16 byte = 0xdc + Array32 byte = 0xdd + + FixedMapLow byte = 0x80 + FixedMapHigh byte = 0x8f + FixedMapMask byte = 0xf + Map16 byte = 0xde + Map32 byte = 0xdf + + FixExt1 byte = 0xd4 + FixExt2 byte = 0xd5 + FixExt4 byte = 0xd6 + FixExt8 byte = 0xd7 + FixExt16 byte = 0xd8 + Ext8 byte = 0xc7 + Ext16 byte = 0xc8 + Ext32 byte = 0xc9 +) + +func IsFixedNum(c byte) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c byte) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c byte) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c byte) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c byte) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c byte) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c byte) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c byte) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/package.json b/vendor/github.com/vmihailenco/msgpack/v5/package.json new file mode 100644 index 0000000000..921f8eab22 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/package.json @@ -0,0 +1,4 @@ +{ + "name": "msgpack", + "version": "5.4.1" +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/safe.go b/vendor/github.com/vmihailenco/msgpack/v5/safe.go similarity index 100% rename from vendor/github.com/vmihailenco/msgpack/v4/safe.go rename to vendor/github.com/vmihailenco/msgpack/v5/safe.go diff --git a/vendor/github.com/vmihailenco/msgpack/v5/time.go b/vendor/github.com/vmihailenco/msgpack/v5/time.go new file mode 100644 index 0000000000..1a4ba12652 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/time.go @@ -0,0 +1,151 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/v5/msgpcode" +) + +var timeExtID int8 = -1 + +func init() { + RegisterExtEncoder(timeExtID, time.Time{}, timeEncoder) + RegisterExtDecoder(timeExtID, time.Time{}, timeDecoder) +} + +func timeEncoder(e *Encoder, v reflect.Value) ([]byte, error) { + return e.encodeTime(v.Interface().(time.Time)), nil +} + +func timeDecoder(d *Decoder, v reflect.Value, extLen int) error { + tm, err := d.decodeTime(extLen) + if err != nil { + return err + } + + if tm.IsZero() { + // Zero time does not have timezone information. + tm = tm.UTC() + } + + ptr := v.Addr().Interface().(*time.Time) + *ptr = tm + + return nil +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtID)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + if e.timeBuf == nil { + e.timeBuf = make([]byte, 12) + } + + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + + if data&0xffffffff00000000 == 0 { + b := e.timeBuf[:4] + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } + + b := e.timeBuf[:8] + binary.BigEndian.PutUint64(b, data) + return b + } + + b := e.timeBuf[:12] + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], secs) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == msgpcode.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if msgpcode.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extID, extLen, err := d.extHeader(c) + if err != nil { + return time.Time{}, err + } + + // NodeJS seems to use extID 13. + if extID != timeExtID && extID != 13 { + return time.Time{}, fmt.Errorf("msgpack: invalid time ext id=%d", extID) + } + + tm, err := d.decodeTime(extLen) + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime(extLen int) (time.Time, error) { + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} diff --git a/vendor/github.com/vmihailenco/msgpack/v5/types.go b/vendor/github.com/vmihailenco/msgpack/v5/types.go new file mode 100644 index 0000000000..d212e098e7 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/types.go @@ -0,0 +1,413 @@ +package msgpack + +import ( + "encoding" + "fmt" + "log" + "reflect" + "sync" + + "github.com/vmihailenco/tagparser/v2" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var ( + customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() + customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() +) + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +var ( + binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() +) + +var ( + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +type ( + encoderFunc func(*Encoder, reflect.Value) error + decoderFunc func(*Decoder, reflect.Value) error +) + +var ( + typeEncMap sync.Map + typeDecMap sync.Map +) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// CustomEncoder/CustomDecoder or Marshaler/Unmarshaler interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typeEncMap.Store(typ, enc) + } + if dec != nil { + typeDecMap.Store(typ, dec) + } +} + +//------------------------------------------------------------------------------ + +const defaultStructTag = "msgpack" + +var structs = newStructCache() + +type structCache struct { + m sync.Map +} + +type structCacheKey struct { + typ reflect.Type + tag string +} + +func newStructCache() *structCache { + return new(structCache) +} + +func (m *structCache) Fields(typ reflect.Type, tag string) *fields { + key := structCacheKey{tag: tag, typ: typ} + + if v, ok := m.m.Load(key); ok { + return v.(*fields) + } + + fs := getFields(typ, tag) + m.m.Store(key, fs) + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + encoder encoderFunc + decoder decoderFunc + name string + index []int + omitEmpty bool +} + +func (f *field) Omit(e *Encoder, strct reflect.Value) bool { + v, ok := fieldByIndex(strct, f.index) + if !ok { + return true + } + forced := e.flags&omitEmptyFlag != 0 + return (f.omitEmpty || forced) && e.isEmptyValue(v) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + v, ok := fieldByIndex(strct, f.index) + if !ok { + return e.EncodeNil() + } + return f.encoder(e, v) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + v := fieldByIndexAlloc(strct, f.index) + return f.decoder(d, v) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Type reflect.Type + Map map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(typ reflect.Type) *fields { + return &fields{ + Type: typ, + Map: make(map[string]*field, typ.NumField()), + List: make([]*field, 0, typ.NumField()), + } +} + +func (fs *fields) Add(field *field) { + fs.warnIfFieldExists(field.name) + fs.Map[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) warnIfFieldExists(name string) { + if _, ok := fs.Map[name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, name) + } +} + +func (fs *fields) OmitEmpty(e *Encoder, strct reflect.Value) []*field { + forced := e.flags&omitEmptyFlag != 0 + if !fs.hasOmitEmpty && !forced { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + + for _, f := range fs.List { + if !f.Omit(e, strct) { + fields = append(fields, f) + } + } + + return fields +} + +func getFields(typ reflect.Type, fallbackTag string) *fields { + fs := newFields(typ) + + var omitEmpty bool + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + + tagStr := f.Tag.Get(defaultStructTag) + if tagStr == "" && fallbackTag != "" { + tagStr = f.Tag.Get(fallbackTag) + } + + tag := tagparser.Parse(tagStr) + if tag.Name == "-" { + continue + } + + if f.Name == "_msgpack" { + fs.AsArray = tag.HasOption("as_array") || tag.HasOption("asArray") + if tag.HasOption("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: tag.Name, + index: f.Index, + omitEmpty: omitEmpty || tag.HasOption("omitempty"), + } + + if tag.HasOption("intern") { + switch f.Type.Kind() { + case reflect.Interface: + field.encoder = encodeInternedInterfaceValue + field.decoder = decodeInternedInterfaceValue + case reflect.String: + field.encoder = encodeInternedStringValue + field.decoder = decodeInternedStringValue + default: + err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) + panic(err) + } + } else { + field.encoder = getEncoder(f.Type) + field.decoder = getDecoder(f.Type) + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !tag.HasOption("noinline") { + inline := tag.HasOption("inline") + if inline { + inlineFields(fs, f.Type, field, fallbackTag) + } else { + inline = shouldInline(fs, f.Type, field, fallbackTag) + } + + if inline { + if _, ok := fs.Map[field.name]; ok { + log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) + } + fs.Map[field.name] = field + continue + } + } + + fs.Add(field) + + if alias, ok := tag.Options["alias"]; ok { + fs.warnIfFieldExists(alias) + fs.Map[alias] = field + } + } + return fs +} + +var ( + encodeStructValuePtr uintptr + decodeStructValuePtr uintptr +) + +//nolint:gochecknoinits +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, tag string) { + inlinedFields := getFields(typ, tag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func shouldInline(fs *fields, typ reflect.Type, f *field, tag string) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, tag).List + for _, field := range inlinedFields { + if _, ok := fs.Map[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +type isZeroer interface { + IsZero() bool +} + +func (e *Encoder) isEmptyValue(v reflect.Value) bool { + kind := v.Kind() + + for kind == reflect.Interface { + if v.IsNil() { + return true + } + v = v.Elem() + kind = v.Kind() + } + + if z, ok := v.Interface().(isZeroer); ok { + return nilable(kind) && v.IsNil() || z.IsZero() + } + + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: + structFields := structs.Fields(v.Type(), e.structTag) + fields := structFields.OmitEmpty(e, v) + return len(fields) == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Ptr: + return v.IsNil() + default: + return false + } +} + +func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, ok bool) { + if len(index) == 1 { + return v.Field(index[0]), true + } + + for i, idx := range index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return v, false + } + v = v.Elem() + } + } + v = v.Field(idx) + } + + return v, true +} + +func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + + for i, idx := range index { + if i > 0 { + var ok bool + v, ok = indirectNil(v) + if !ok { + return v + } + } + v = v.Field(idx) + } + + return v +} + +func indirectNil(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(cachedValue(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go b/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go similarity index 83% rename from vendor/github.com/vmihailenco/msgpack/v4/unsafe.go rename to vendor/github.com/vmihailenco/msgpack/v5/unsafe.go index 50c0da8b5b..192ac47920 100644 --- a/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go +++ b/vendor/github.com/vmihailenco/msgpack/v5/unsafe.go @@ -7,7 +7,7 @@ import ( ) // bytesToString converts byte slice to string. -func bytesToString(b []byte) string { //nolint:deadcode,unused +func bytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } diff --git a/vendor/github.com/vmihailenco/msgpack/v5/version.go b/vendor/github.com/vmihailenco/msgpack/v5/version.go new file mode 100644 index 0000000000..ca10205f29 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/v5/version.go @@ -0,0 +1,6 @@ +package msgpack + +// Version is the current release version. +func Version() string { + return "5.4.1" +} diff --git a/vendor/github.com/vmihailenco/tagparser/.travis.yml b/vendor/github.com/vmihailenco/tagparser/.travis.yml deleted file mode 100644 index ec53845232..0000000000 --- a/vendor/github.com/vmihailenco/tagparser/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -dist: xenial -sudo: false -language: go - -go: - - 1.11.x - - 1.12.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/vmihailenco/tagparser - -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 - -script: - - make - - golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/Makefile b/vendor/github.com/vmihailenco/tagparser/Makefile deleted file mode 100644 index fe9dc5bdba..0000000000 --- a/vendor/github.com/vmihailenco/tagparser/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - go vet ./... - go get github.com/gordonklaus/ineffassign - ineffassign . diff --git a/vendor/github.com/vmihailenco/tagparser/README.md b/vendor/github.com/vmihailenco/tagparser/README.md deleted file mode 100644 index 411aa5444d..0000000000 --- a/vendor/github.com/vmihailenco/tagparser/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Opinionated Golang tag parser - -[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) -[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) - -## Installation - -Install: - -```shell -go get -u github.com/vmihailenco/tagparser -``` - -## Quickstart - -```go -func ExampleParse() { - tag := tagparser.Parse("some_name,key:value,key2:'complex value'") - fmt.Println(tag.Name) - fmt.Println(tag.Options) - // Output: some_name - // map[key:value key2:'complex value'] -} -``` diff --git a/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go deleted file mode 100644 index 2de1c6f7bd..0000000000 --- a/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go +++ /dev/null @@ -1,82 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/vmihailenco/tagparser/internal" -) - -type Parser struct { - b []byte - i int -} - -func New(b []byte) *Parser { - return &Parser{ - b: b, - } -} - -func NewString(s string) *Parser { - return New(internal.StringToBytes(s)) -} - -func (p *Parser) Bytes() []byte { - return p.b[p.i:] -} - -func (p *Parser) Valid() bool { - return p.i < len(p.b) -} - -func (p *Parser) Read() byte { - if p.Valid() { - c := p.b[p.i] - p.Advance() - return c - } - return 0 -} - -func (p *Parser) Peek() byte { - if p.Valid() { - return p.b[p.i] - } - return 0 -} - -func (p *Parser) Advance() { - p.i++ -} - -func (p *Parser) Skip(skip byte) bool { - if p.Peek() == skip { - p.Advance() - return true - } - return false -} - -func (p *Parser) SkipBytes(skip []byte) bool { - if len(skip) > len(p.b[p.i:]) { - return false - } - if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { - return false - } - p.i += len(skip) - return true -} - -func (p *Parser) ReadSep(sep byte) ([]byte, bool) { - ind := bytes.IndexByte(p.b[p.i:], sep) - if ind == -1 { - b := p.b[p.i:] - p.i = len(p.b) - return b, false - } - - b := p.b[p.i : p.i+ind] - p.i += ind + 1 - return b, true -} diff --git a/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml b/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml new file mode 100644 index 0000000000..7194cd0010 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/.travis.yml @@ -0,0 +1,19 @@ +dist: xenial +language: go + +go: + - 1.14.x + - 1.15.x + - tip + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +go_import_path: github.com/vmihailenco/tagparser + +before_install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 diff --git a/vendor/github.com/vmihailenco/tagparser/LICENSE b/vendor/github.com/vmihailenco/tagparser/v2/LICENSE similarity index 100% rename from vendor/github.com/vmihailenco/tagparser/LICENSE rename to vendor/github.com/vmihailenco/tagparser/v2/LICENSE diff --git a/vendor/github.com/vmihailenco/tagparser/v2/Makefile b/vendor/github.com/vmihailenco/tagparser/v2/Makefile new file mode 100644 index 0000000000..0b1b59595a --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/Makefile @@ -0,0 +1,9 @@ +all: + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + go vet ./... + go get github.com/gordonklaus/ineffassign + ineffassign . + golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/v2/README.md b/vendor/github.com/vmihailenco/tagparser/v2/README.md new file mode 100644 index 0000000000..c0259de565 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/README.md @@ -0,0 +1,24 @@ +# Opinionated Golang tag parser + +[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) +[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) + +## Installation + +Install: + +```shell +go get github.com/vmihailenco/tagparser/v2 +``` + +## Quickstart + +```go +func ExampleParse() { + tag := tagparser.Parse("some_name,key:value,key2:'complex value'") + fmt.Println(tag.Name) + fmt.Println(tag.Options) + // Output: some_name + // map[key:value key2:'complex value'] +} +``` diff --git a/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go new file mode 100644 index 0000000000..21a9bc7f74 --- /dev/null +++ b/vendor/github.com/vmihailenco/tagparser/v2/internal/parser/parser.go @@ -0,0 +1,82 @@ +package parser + +import ( + "bytes" + + "github.com/vmihailenco/tagparser/v2/internal" +) + +type Parser struct { + b []byte + i int +} + +func New(b []byte) *Parser { + return &Parser{ + b: b, + } +} + +func NewString(s string) *Parser { + return New(internal.StringToBytes(s)) +} + +func (p *Parser) Bytes() []byte { + return p.b[p.i:] +} + +func (p *Parser) Valid() bool { + return p.i < len(p.b) +} + +func (p *Parser) Read() byte { + if p.Valid() { + c := p.b[p.i] + p.Advance() + return c + } + return 0 +} + +func (p *Parser) Peek() byte { + if p.Valid() { + return p.b[p.i] + } + return 0 +} + +func (p *Parser) Advance() { + p.i++ +} + +func (p *Parser) Skip(skip byte) bool { + if p.Peek() == skip { + p.Advance() + return true + } + return false +} + +func (p *Parser) SkipBytes(skip []byte) bool { + if len(skip) > len(p.b[p.i:]) { + return false + } + if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { + return false + } + p.i += len(skip) + return true +} + +func (p *Parser) ReadSep(sep byte) ([]byte, bool) { + ind := bytes.IndexByte(p.b[p.i:], sep) + if ind == -1 { + b := p.b[p.i:] + p.i = len(p.b) + return b, false + } + + b := p.b[p.i : p.i+ind] + p.i += ind + 1 + return b, true +} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/safe.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go similarity index 100% rename from vendor/github.com/vmihailenco/tagparser/internal/safe.go rename to vendor/github.com/vmihailenco/tagparser/v2/internal/safe.go diff --git a/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go b/vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go similarity index 100% rename from vendor/github.com/vmihailenco/tagparser/internal/unsafe.go rename to vendor/github.com/vmihailenco/tagparser/v2/internal/unsafe.go diff --git a/vendor/github.com/vmihailenco/tagparser/tagparser.go b/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go similarity index 88% rename from vendor/github.com/vmihailenco/tagparser/tagparser.go rename to vendor/github.com/vmihailenco/tagparser/v2/tagparser.go index 56b918011b..5002e6453e 100644 --- a/vendor/github.com/vmihailenco/tagparser/tagparser.go +++ b/vendor/github.com/vmihailenco/tagparser/v2/tagparser.go @@ -1,7 +1,9 @@ package tagparser import ( - "github.com/vmihailenco/tagparser/internal/parser" + "strings" + + "github.com/vmihailenco/tagparser/v2/internal/parser" ) type Tag struct { @@ -31,6 +33,9 @@ type tagParser struct { } func (p *tagParser) setTagOption(key, value string) { + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + if !p.hasName { p.hasName = true if key == "" { @@ -79,7 +84,6 @@ func (p *tagParser) parseKey() { func (p *tagParser) parseValue() { const quote = '\'' - c := p.Peek() if c == quote { p.Skip(quote) @@ -134,10 +138,7 @@ loop: func (p *tagParser) parseQuotedValue() { const quote = '\'' - var b []byte - b = append(b, quote) - for p.Valid() { bb, ok := p.ReadSep(quote) if !ok { @@ -145,6 +146,8 @@ func (p *tagParser) parseQuotedValue() { break } + // keep the escaped single-quote, and continue until we've found the + // one that isn't. if len(bb) > 0 && bb[len(bb)-1] == '\\' { b = append(b, bb[:len(bb)-1]...) b = append(b, quote) @@ -152,7 +155,6 @@ func (p *tagParser) parseQuotedValue() { } b = append(b, bb...) - b = append(b, quote) break } @@ -162,15 +164,3 @@ func (p *tagParser) parseQuotedValue() { } p.parseKey() } - -func Unquote(s string) (string, bool) { - const quote = '\'' - - if len(s) < 2 { - return s, false - } - if s[0] == quote && s[len(s)-1] == quote { - return s[1 : len(s)-1], true - } - return s, false -} diff --git a/vendor/github.com/xen0n/gosmopolitan/.editorconfig b/vendor/github.com/xen0n/gosmopolitan/.editorconfig new file mode 100644 index 0000000000..0c0f7e7e28 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig is awesome: http://EditorConfig.org + +root = true + +[*] +indent_style = space +trim_trailing_whitespace = true +end_of_line = lf +insert_final_newline = true +charset = utf-8 + +[{*.sh,*.md}] +indent_size = 4 + +[{*.yaml,*.yml}] +indent_size = 2 + +# hard tabs for Go and Makefile per best practice of file format +[*.go] +indent_style = tab + +[Makefile] +indent_style = tab diff --git a/vendor/github.com/xen0n/gosmopolitan/.gitignore b/vendor/github.com/xen0n/gosmopolitan/.gitignore new file mode 100644 index 0000000000..1a1abaa202 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/.gitignore @@ -0,0 +1,188 @@ +# ignore the local build artifact +/gosmopolitan + +# and test artifacts +/coverage.txt + +# the following are auto-generated + +# Created by https://www.toptal.com/developers/gitignore/api/go,visualstudiocode,vim,goland +# Edit at https://www.toptal.com/developers/gitignore?templates=go,visualstudiocode,vim,goland + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +### GoLand ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### GoLand Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +# End of https://www.toptal.com/developers/gitignore/api/go,visualstudiocode,vim,goland diff --git a/vendor/github.com/xen0n/gosmopolitan/.golangci.yml b/vendor/github.com/xen0n/gosmopolitan/.golangci.yml new file mode 100644 index 0000000000..0bce12501a --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/.golangci.yml @@ -0,0 +1,31 @@ +run: + go: '1.19' + modules-download-mode: readonly + +linters: + enable: + - goheader + - goimports + - gosec + - gosimple + - lll + - nakedret + - revive + - stylecheck + - unused + +linters-settings: + goheader: + template: |- + SPDX-License-Identifier: GPL-3.0-or-later + goimports: + local-prefixes: github.com/xen0n/gosmopolitan + gosimple: + go: '1.19' + lll: + line-length: 120 + tab-width: 4 + nakedret: + max-func-lines: 1 + stylecheck: + go: '1.19' diff --git a/vendor/github.com/xen0n/gosmopolitan/LICENSE b/vendor/github.com/xen0n/gosmopolitan/LICENSE new file mode 100644 index 0000000000..94a9ed024d --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/xen0n/gosmopolitan/README.md b/vendor/github.com/xen0n/gosmopolitan/README.md new file mode 100644 index 0000000000..86a5e64e00 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/README.md @@ -0,0 +1,84 @@ +# gosmopolitan + +![GitHub Workflow Status (main branch)](https://img.shields.io/github/actions/workflow/status/xen0n/gosmopolitan/go.yml?branch=main) +![Codecov](https://img.shields.io/codecov/c/gh/xen0n/gosmopolitan) +![GitHub license info](https://img.shields.io/github/license/xen0n/gosmopolitan) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/xen0n/gosmopolitan) +[![Go Report Card](https://goreportcard.com/badge/github.com/xen0n/gosmopolitan)](https://goreportcard.com/report/github.com/xen0n/gosmopolitan) +[![Go Reference](https://pkg.go.dev/badge/github.com/xen0n/gosmopolitan.svg)](https://pkg.go.dev/github.com/xen0n/gosmopolitan) + +[简体中文](./README.zh-Hans.md) + +`gosmopolitan` checks your Go codebase for code smells that may prove to be +hindrance to internationalization ("i18n") and/or localization ("l10n"). + +The name is a wordplay on "cosmopolitan". + +## Checks + +Currently `gosmopolitan` checks for the following anti-patterns: + +* Occurrences of string literals containing characters from certain writing + systems. + + Existence of such strings often means the relevant logic is hard to + internationalize, or at least, require special care when doing i18n/l10n. + +* Usages of `time.Local`. + + An internationalized app or library should almost never process time and + date values in the timezone in which it is running; instead one should use + the respective user preference, or the timezone as dictated by the domain + logic. + +Note that local times are produced in a lot more ways than via direct casts to +`time.Local` alone, such as: + +* `time.LoadLocation("Local")` +* received from a `time.Ticker` +* functions explicitly documented to return local times + * `time.Now()` + * `time.Unix()` + * `time.UnixMilli()` + * `time.UnixMicro()` + +Proper identification of these use cases require a fairly complete dataflow +analysis pass, which is not implemented currently. In addition, right now you +have to pay close attention to externally-provided time values (such as from +your framework like Gin or gRPC) as they are not properly tracked either. + +## Caveats + +Note that the checks implemented here are only suitable for codebases with the +following characteristics, and may not suit your particular project's needs: + +* Originally developed for an audience using non-Latin writing system(s), +* Returns bare strings intended for humans containing such non-Latin characters, and +* May occasionally (or frequently) refer to the system timezone, but is + architecturally forbidden/discouraged to just treat the system timezone as + the reference timezone. + +For example, the lints may prove valuable if you're revamping a web service +originally targetting the Chinese market (hence producing strings with Chinese +characters all over the place) to be more i18n-aware. Conversely, if you want +to identify some of the i18n-naïve places in an English-only app, the linter +will output nothing. + +## golangci-lint integration + +`gosmopolitan` support [has been merged][gcl-pr] into [`golangci-lint`][gcl-home], +and will be usable out-of-the-box in golangci-lint v1.53.0 or later. + +Due to the opinionated coding style this linter advocates and checks for, if +you have `enable-all: true` in your `golangci.yml` and your project deals a +lot with Chinese text and/or `time.Local`, then you'll get flooded with lints +when you upgrade to golangci-lint v1.53.0. Just disable this linter (and +better yet, move away from `enable-all: true`) if the style does not suit your +specific use case. + +[gcl-pr]: https://github.com/golangci/golangci-lint/pull/3458 +[gcl-home]: https://golangci-lint.run + +## License + +`gosmopolitan` is licensed under the GPL license, version 3 or later. diff --git a/vendor/github.com/xen0n/gosmopolitan/README.zh-Hans.md b/vendor/github.com/xen0n/gosmopolitan/README.zh-Hans.md new file mode 100644 index 0000000000..682d10880e --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/README.zh-Hans.md @@ -0,0 +1,69 @@ +# gosmopolitan + +![GitHub Workflow Status (main branch)](https://img.shields.io/github/actions/workflow/status/xen0n/gosmopolitan/go.yml?branch=main) +![Codecov](https://img.shields.io/codecov/c/gh/xen0n/gosmopolitan) +![GitHub license info](https://img.shields.io/github/license/xen0n/gosmopolitan) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/xen0n/gosmopolitan) +[![Go Report Card](https://goreportcard.com/badge/github.com/xen0n/gosmopolitan)](https://goreportcard.com/report/github.com/xen0n/gosmopolitan) +[![Go Reference](https://pkg.go.dev/badge/github.com/xen0n/gosmopolitan.svg)](https://pkg.go.dev/github.com/xen0n/gosmopolitan) + +[English](./README.md) + +用 `gosmopolitan` 检查你的 Go 代码库里有没有国际化(“i18n“)或者本地化(”l10n“)的阻碍。 + +项目名字来自“cosmopolitan”的文字游戏。 + +## 检查 + +`gosmopolitan` 目前会检查以下的反模式(anti-patterns): + +* 含有来自特定书写系统字符的字符串字面量(string literals)。 + + 项目中存在这种字符串,通常意味着相关的逻辑不便于国际化,或者至少在国际化/本地化适配过程中会涉及特殊对待。 + +* `time.Local` 的使用。 + + 支持国际化的应用或程序库,几乎永远不应以程序当前运行环境的时区来处理时间、日期数据。 + 相反,在这种场景下,开发者应该使用相应的用户偏好,或者按照领域逻辑确定应该使用的时区。 + +注意:除了直接向 `time.Local` 转换之外,还有很多其他写法会产生本地时区的时刻,例如: + +* `time.LoadLocation("Local")` +* 从 `time.Ticker` 收到的值 +* 文档中明确了会返回本地时刻的函数 + * `time.Now()` + * `time.Unix()` + * `time.UnixMilli()` + * `time.UnixMicro()` + +为了正确识别这些使用场景,需要有一个相当完善的数据流分析 pass,目前还没实现。 +此外,当前您还需要自行密切注意从外部传入的时刻值(例如从您使用的 Gin 或 gRPC +之类框架传来的那些),因为这些值当前也没有被正确跟踪。 + +## 注意事项 + +请注意,本库中实现的检查仅适用于具有以下性质的代码库,因此可能不适用于您的具体场景: + +* 项目原先是为使用非拉丁字母书写系统的受众群体开发的, +* 项目会返回包含这些非拉丁字母字符的裸的字符串(即,未经处理或变换的), +* 项目可能偶尔(或者经常)引用程序当前运行环境的系统时区,但项目架构上禁止或不建议把系统时区直接作为业务参考时区使用。 + +举个例子:如果您在翻新一个本来面向中国用户群体(因此到处都在产生含有汉字的字符串)的 +web 服务,以使其更加国际化,这里的 lints 可能会很有价值。 +反之,如果您想在一个仅支持英语的应用里,寻找其中不利于国际化的那部分写法,本 +linter 则什么都不会输出。 + +## 与 golangci-lint 集成 + +`gosmopolitan` 支持[已经被合并][gcl-pr]入 [`golangci-lint`][gcl-home] 上游,在 golangci-lint v1.53.0 及以后的版本可以开箱即用。 + +[gcl-pr]: https://github.com/golangci/golangci-lint/pull/3458 +[gcl-home]: https://golangci-lint.run + +由于本 linter 倡导和检查的代码风格带有鲜明立场,如果您在 `golangci.yml` 开了 +`enable-all: true` 并且您的项目处理很多中文文本或者 `time.Local`,那么您一旦升级到 +golangci-lint v1.53.0 就将被 lints 淹没。如果这种代码风格不适合您的具体使用场景,直接禁用本 linter(或者更彻底一些,不要 `enable-all: true` 了)就好。 + +## 许可证 + +`gosmopolitan` 以 GPL v3 或更新的版本许可使用。 diff --git a/vendor/github.com/xen0n/gosmopolitan/lib.go b/vendor/github.com/xen0n/gosmopolitan/lib.go new file mode 100644 index 0000000000..67b1151c71 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/lib.go @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package gosmopolitan + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "regexp" + "strings" + "unicode" + + "golang.org/x/text/runes" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const analyzerName = "gosmopolitan" +const analyzerDoc = "Report certain i18n/l10n anti-patterns in your Go codebase" + +type AnalyzerConfig struct { + // LookAtTests is flag controlling whether the lints are going to look at + // test files, despite other config knobs of the Go analysis tooling + // framework telling us otherwise. + // + // By default gosmopolitan does not look at test files, because i18n-aware + // apps most probably have many unmarked strings in test cases, and names + // and descriptions *of* test cases are probably in the program's original + // natural language too. + LookAtTests bool + // EscapeHatches is optionally a list of fully qualified names, in the + // `(full/pkg/path).name` form, to act as "i18n escape hatches". Inside + // call-like expressions to those names, the string literal script check + // is ignored. + // + // With this functionality in place, you can use type aliases like + // `type R = string` as markers, or have explicitly i18n-aware functions + // exempt from the checks. + EscapeHatches []string + // WatchForScripts is optionally a list of Unicode script names to watch + // for any usage in string literals. The range of supported scripts is + // determined by the [unicode.Scripts] map and values are case-sensitive. + WatchForScripts []string + // AllowTimeLocal is flag controlling whether usages of [time.Local] are + // allowed (i.e. not reported). + AllowTimeLocal bool +} + +func NewAnalyzer() *analysis.Analyzer { + var lookAtTests bool + var escapeHatchesStr string + var watchForScriptsStr string + var allowTimeLocal bool + + a := &analysis.Analyzer{ + Name: analyzerName, + Doc: analyzerDoc, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: func(p *analysis.Pass) (any, error) { + cfg := AnalyzerConfig{ + LookAtTests: lookAtTests, + EscapeHatches: strings.Split(escapeHatchesStr, ","), + WatchForScripts: strings.Split(watchForScriptsStr, ","), + AllowTimeLocal: allowTimeLocal, + } + pctx := processCtx{cfg: &cfg, p: p} + return pctx.run() + }, + RunDespiteErrors: false, + } + + a.Flags.BoolVar(&lookAtTests, + "lookattests", + false, + "also check the test files", + ) + a.Flags.StringVar( + &escapeHatchesStr, + "escapehatches", + "", + "comma-separated list of fully qualified names to act as i18n escape hatches", + ) + a.Flags.StringVar( + &watchForScriptsStr, + "watchforscripts", + "Han", + "comma-separated list of Unicode scripts to watch out for occurrence in string literals", + ) + a.Flags.BoolVar(&allowTimeLocal, + "allowtimelocal", + false, + "allow time.Local usages", + ) + + return a +} + +func NewAnalyzerWithConfig(cfg *AnalyzerConfig) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: analyzerName, + Doc: analyzerDoc, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: func(p *analysis.Pass) (any, error) { + pctx := processCtx{cfg: cfg, p: p} + return pctx.run() + }, + RunDespiteErrors: false, + } +} + +var DefaultAnalyzer = NewAnalyzer() + +func validateUnicodeScriptName(name string) error { + if _, ok := unicode.Scripts[name]; !ok { + return fmt.Errorf("invalid Unicode script name: %s", name) + } + return nil +} + +// example input: ["Han", "Arabic"] +// example output: `\p{Han}|\p{Arabic}` +// assumes len(scriptNames) > 0 +func makeUnicodeScriptMatcherRegexpString(scriptNames []string) string { + var sb strings.Builder + for i, s := range scriptNames { + if i > 0 { + sb.WriteRune('|') + } + sb.WriteString(`\p{`) + sb.WriteString(s) + sb.WriteRune('}') + } + return sb.String() +} + +func makeUnicodeScriptMatcherRegexp(scriptNames []string) (*regexp.Regexp, error) { + return regexp.Compile(makeUnicodeScriptMatcherRegexpString(scriptNames)) +} + +type processCtx struct { + cfg *AnalyzerConfig + p *analysis.Pass +} + +func mapSlice[T any, U any](x []T, fn func(T) U) []U { + if x == nil { + return nil + } + y := make([]U, len(x)) + for i, v := range x { + y[i] = fn(v) + } + return y +} + +func sliceToSet[T comparable](x []T) map[T]struct{} { + // lo.SliceToMap(x, func(k T) (T, struct{}) { return k, struct{}{} }) + y := make(map[T]struct{}, len(x)) + for _, k := range x { + y[k] = struct{}{} + } + return y +} + +func getFullyQualifiedName(x types.Object) string { + pkg := x.Pkg() + if pkg == nil { + return x.Name() + } + return fmt.Sprintf("%s.%s", pkg.Path(), x.Name()) +} + +// if input is in the "(%s).%s" form, remove the parens, else return the +// unchanged input +// +// this is for maintaining compatibility with the previous FQN notation that +// was born out of my confusion (the previous notation, while commonly seen, +// seems to be only for methods or pointer receiver types; the parens-less +// form is in fact unambiguous, because Go identifiers can't contain periods.) +func unquoteInputFQN(x string) string { + if len(x) == 0 || x[0] != '(' { + return x + } + + before, after, found := strings.Cut(x[1:], ")") + if !found { + // malformed input: string in "(xxxxx" form with unclosed parens! + // in this case, only removing the opening parens might be better than + // doing nothing after all + return x[1:] + } + + // at this point, + // input: "(foo).bar" + // before: "foo" + // after: ".bar" + return before + after +} + +func (c *processCtx) run() (any, error) { + escapeHatchesSet := sliceToSet(mapSlice(c.cfg.EscapeHatches, unquoteInputFQN)) + + if len(c.cfg.WatchForScripts) == 0 { + c.cfg.WatchForScripts = []string{"Han"} + } + + for _, s := range c.cfg.WatchForScripts { + if err := validateUnicodeScriptName(s); err != nil { + return nil, err + } + } + + charRE, err := makeUnicodeScriptMatcherRegexp(c.cfg.WatchForScripts) + if err != nil { + return nil, err + } + + usq := newUnicodeScriptQuerier(c.cfg.WatchForScripts) + + insp := c.p.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // support ignoring the test files, because test files could be full of + // i18n and l10n fixtures, and we want to focus on the actual run-time + // logic + // + // TODO: is there a way to both ignore test files earlier, and make use of + // inspect.Analyzer's cached results? currently Inspector doesn't provide + // a way to selectively travese some files' AST but not others. + isBelongingToTestFiles := func(n ast.Node) bool { + return strings.HasSuffix(c.p.Fset.File(n.Pos()).Name(), "_test.go") + } + + shouldSkipTheContainingFile := func(n ast.Node) bool { + if c.cfg.LookAtTests { + return false + } + return isBelongingToTestFiles(n) + } + + insp.Nodes(nil, func(n ast.Node, push bool) bool { + // we only need to look at each node once + if !push { + return false + } + + if shouldSkipTheContainingFile(n) { + return false + } + + // skip blocks that can contain string literals but are not otherwise + // interesting for us + switch n.(type) { + case *ast.ImportSpec, *ast.TypeSpec: + // import blocks, type declarations + return false + } + + // and don't look inside escape hatches + referentFQN := c.getFullyQualifiedNameOfReferent(n) + if referentFQN != "" { + _, isEscapeHatch := escapeHatchesSet[referentFQN] + // if isEscapeHatch: don't recurse (false) + return !isEscapeHatch + } + + // check only string literals + lit, ok := n.(*ast.BasicLit) + if !ok { + return true + } + if lit.Kind != token.STRING { + return true + } + + // report string literals containing characters of given script (in + // the sense of "writing system") + if charRE.MatchString(lit.Value) { + match := charRE.FindIndex([]byte(lit.Value)) + matchCh := []byte(lit.Value)[match[0]:match[1]] + scriptName := usq.queryScriptForRuneBytes(matchCh) + + c.p.Report(analysis.Diagnostic{ + Pos: lit.Pos() + token.Pos(match[0]), + End: lit.Pos() + token.Pos(match[1]), + Message: fmt.Sprintf("string literal contains rune in %s script", scriptName), + }) + } + + return true + }) + + if !c.cfg.AllowTimeLocal { + // check time.Local usages + insp.Nodes([]ast.Node{(*ast.Ident)(nil)}, func(n ast.Node, push bool) bool { + // we only need to look at each node once + if !push { + return false + } + + if shouldSkipTheContainingFile(n) { + return false + } + + ident := n.(*ast.Ident) + + d := c.p.TypesInfo.ObjectOf(ident) + if d == nil || d.Pkg() == nil { + return true + } + + if d.Pkg().Path() == "time" && d.Name() == "Local" { + c.p.Report(analysis.Diagnostic{ + Pos: n.Pos(), + End: n.End(), + Message: "usage of time.Local", + }) + } + + return true + }) + } + + return nil, nil +} + +func (c *processCtx) getFullyQualifiedNameOfReferent(n ast.Node) string { + var ident *ast.Ident + switch e := n.(type) { + case *ast.CallExpr: + ident = getIdentOfTypeOfExpr(e.Fun) + + case *ast.CompositeLit: + ident = getIdentOfTypeOfExpr(e.Type) + + default: + return "" + } + + referent := c.p.TypesInfo.Uses[ident] + if referent == nil { + return "" + } + + return getFullyQualifiedName(referent) +} + +func getIdentOfTypeOfExpr(e ast.Expr) *ast.Ident { + switch x := e.(type) { + case *ast.Ident: + return x + case *ast.SelectorExpr: + return x.Sel + } + return nil +} + +type unicodeScriptQuerier struct { + sets map[string]runes.Set +} + +func newUnicodeScriptQuerier(scriptNames []string) *unicodeScriptQuerier { + sets := make(map[string]runes.Set, len(scriptNames)) + for _, s := range scriptNames { + sets[s] = runes.In(unicode.Scripts[s]) + } + return &unicodeScriptQuerier{ + sets: sets, + } +} + +func (x *unicodeScriptQuerier) queryScriptForRuneBytes(b []byte) string { + r := []rune(string(b))[0] + for s, set := range x.sets { + if set.Contains(r) { + return s + } + } + return "" +} diff --git a/vendor/github.com/yagipy/maintidx/.gitignore b/vendor/github.com/yagipy/maintidx/.gitignore new file mode 100644 index 0000000000..a676215fa9 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/.gitignore @@ -0,0 +1,2 @@ +.idea +bin diff --git a/vendor/github.com/yagipy/maintidx/LICENSE b/vendor/github.com/yagipy/maintidx/LICENSE new file mode 100644 index 0000000000..b94c2ede81 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Hiroyuki Yagihashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/yagipy/maintidx/Makefile b/vendor/github.com/yagipy/maintidx/Makefile new file mode 100644 index 0000000000..14b8fc9797 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/Makefile @@ -0,0 +1,2 @@ +build: + go build -o bin/maintidx ./cmd/maintidx diff --git a/vendor/github.com/yagipy/maintidx/README.md b/vendor/github.com/yagipy/maintidx/README.md new file mode 100644 index 0000000000..8d5e26df08 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/README.md @@ -0,0 +1,45 @@ +# maintidx +`maintidx` measures the maintainability index of each function. +https://docs.microsoft.com/en-us/visualstudio/code-quality/code-metrics-maintainability-index-range-and-meaning + +## Installation +### Go version < 1.16 +```shell +go get -u github.com/yagipy/maintidx/cmd/maintidx +``` + +### Go version 1.16+ +```shell +go install github.com/yagipy/maintidx/cmd/maintidx +``` + +## Usage +### standalone +```shell +maintidx ./... +``` + +### with go run +No installation required +```shell +go run github.com/yagipy/maintidx/cmd/maintidx ./... +``` + +### with go vet +```shell +go vet -vettool=`which maintidx` ./... +``` + +## Flag +```shell +Flags: + -under int + show functions with maintainability index < N only. (default 20) +``` + +## TODO +- [ ] Setup execute env on container +- [ ] Impl cyc.Cyc.Calc() +- [ ] Move maintidx.Visitor.PrintHalstVol to halstval package +- [ ] Consider the necessity of halstvol.incrIfAllTrue +- [ ] Test under pkg file diff --git a/vendor/github.com/yagipy/maintidx/maintidx.go b/vendor/github.com/yagipy/maintidx/maintidx.go new file mode 100644 index 0000000000..31ad9ca0c2 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/maintidx.go @@ -0,0 +1,63 @@ +package maintidx + +import ( + "go/ast" + "go/token" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "maintidx measures the maintainability index of each function." + +var Analyzer = &analysis.Analyzer{ + Name: "maintidx", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +var under int + +func init() { + Analyzer.Flags.IntVar(&under, "under", 20, "show functions with maintainability index < N only.") +} + +func run(pass *analysis.Pass) (interface{}, error) { + i := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + i.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl: + v := analyze(n) + + v.Coef.Cyc.Calc() + v.Coef.HalstVol.Calc() + v.calc(loc(pass.Fset, n)) + if v.MaintIdx < under { + pass.Reportf(n.Pos(), "Function name: %v, Cyclomatic Complexity: %v, Halstead Volume: %0.2f, Maintainability Index: %v", n.Name, v.Coef.Cyc.Val, v.Coef.HalstVol.Val, v.MaintIdx) + } + } + }) + + return nil, nil +} + +func analyze(n ast.Node) Visitor { + v := NewVisitor() + ast.Walk(v, n) + return *v +} + +func loc(fs *token.FileSet, n *ast.FuncDecl) int { + f := fs.File(n.Pos()) + startLine := f.Line(n.Pos()) + endLine := f.Line(n.End()) + return endLine - startLine + 1 +} diff --git a/vendor/github.com/yagipy/maintidx/pkg/cyc/cyc.go b/vendor/github.com/yagipy/maintidx/pkg/cyc/cyc.go new file mode 100644 index 0000000000..9ea009106b --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/pkg/cyc/cyc.go @@ -0,0 +1,36 @@ +package cyc + +import ( + "go/ast" + "go/token" +) + +type Cyc struct { + Val int + Coef Coef +} + +type Coef struct{} + +func (c *Cyc) Analyze(n ast.Node) { + switch n := n.(type) { + case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt: + c.Val++ + case *ast.CaseClause: + if n.List != nil { + c.Val++ + } + case *ast.CommClause: + if n.Comm != nil { + c.Val++ + } + case *ast.BinaryExpr: + if n.Op == token.LAND || n.Op == token.LOR { + c.Val++ + } + } +} + +// TODO: Implement +func (c *Cyc) Calc() { +} diff --git a/vendor/github.com/yagipy/maintidx/pkg/halstvol/halstvol.go b/vendor/github.com/yagipy/maintidx/pkg/halstvol/halstvol.go new file mode 100644 index 0000000000..f0212759b1 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/pkg/halstvol/halstvol.go @@ -0,0 +1,71 @@ +package halstvol + +import ( + "go/ast" + "math" +) + +type HalstVol struct { + Val float64 + Coef Coef +} + +type Coef struct { + Opt map[string]int + Opd map[string]int +} + +func (v *HalstVol) Analyze(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl, *ast.GenDecl: + v.handleDecl(n) + case *ast.ParenExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, *ast.CallExpr, *ast.StarExpr, *ast.UnaryExpr, *ast.BinaryExpr, *ast.KeyValueExpr: + v.handleExpr(n) + case *ast.BasicLit, *ast.CompositeLit: + v.handleLit(n) + case *ast.Ident: + v.handleIdent(n) + case *ast.Ellipsis: + incrIfAllTrue(v.Coef.Opt, "...", []bool{n.Ellipsis.IsValid()}) + case *ast.FuncType: + incrIfAllTrue(v.Coef.Opt, "func", []bool{n.Func.IsValid()}) + v.Coef.Opt["()"]++ + case *ast.ChanType: + incrIfAllTrue(v.Coef.Opt, "chan", []bool{n.Begin.IsValid()}) + incrIfAllTrue(v.Coef.Opt, "<-", []bool{n.Arrow.IsValid()}) + case *ast.SendStmt, *ast.IncDecStmt, *ast.AssignStmt, *ast.GoStmt, *ast.DeferStmt, *ast.ReturnStmt, *ast.BranchStmt, *ast.BlockStmt, *ast.IfStmt, *ast.SwitchStmt, *ast.SelectStmt, *ast.ForStmt, *ast.RangeStmt: + v.handleStmt(n) + case *ast.CaseClause: + v.handleCaseClause(n) + } +} + +func (v *HalstVol) Calc() { + distOpt := len(v.Coef.Opt) + distOpd := len(v.Coef.Opd) + + var sumOpt, sumOpd int + + for _, val := range v.Coef.Opt { + sumOpt += val + } + + for _, val := range v.Coef.Opd { + sumOpd += val + } + + vocab := distOpt + distOpd + length := sumOpt + sumOpd + + v.Val = float64(length) * math.Log2(float64(vocab)) +} + +// TODO: Consider the necessity +func incrIfAllTrue(coef map[string]int, sym string, cond []bool) { + for _, ok := range cond { + if !ok { + return + } + } + coef[sym]++ +} diff --git a/vendor/github.com/yagipy/maintidx/pkg/halstvol/handle.go b/vendor/github.com/yagipy/maintidx/pkg/halstvol/handle.go new file mode 100644 index 0000000000..9f5e33500d --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/pkg/halstvol/handle.go @@ -0,0 +1,151 @@ +package halstvol + +import "go/ast" + +func (v *HalstVol) handleDecl(decl ast.Node) { + switch n := decl.(type) { + case *ast.FuncDecl: + if n.Recv == nil { + // In the case of receiver functions, the function name is incremented in *ast.Ident + v.Coef.Opt[n.Name.Name]++ + } else { + v.Coef.Opt["()"]++ + } + case *ast.GenDecl: + if n.Lparen.IsValid() && n.Rparen.IsValid() { + v.Coef.Opt["()"]++ + } + + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } else { + v.Coef.Opd[n.Tok.String()]++ + } + } +} + +func (v *HalstVol) handleIdent(ident *ast.Ident) { + if ident.Obj == nil { + v.Coef.Opt[ident.Name]++ + } else { + if ident.Obj.Kind.String() != "func" { + v.Coef.Opd[ident.Name]++ + } + } +} + +func (v *HalstVol) handleLit(lit ast.Node) { + switch n := lit.(type) { + case *ast.BasicLit: + if n.Kind.IsLiteral() { + v.Coef.Opd[n.Value]++ + } else { + v.Coef.Opt[n.Value]++ + } + case *ast.CompositeLit: + incrIfAllTrue(v.Coef.Opt, "{}", []bool{n.Lbrace.IsValid(), n.Rbrace.IsValid()}) + } +} + +func (v *HalstVol) handleExpr(expr ast.Node) { + switch n := expr.(type) { + case *ast.ParenExpr: + incrIfAllTrue(v.Coef.Opt, "()", []bool{n.Lparen.IsValid(), n.Rparen.IsValid()}) + case *ast.IndexExpr: + incrIfAllTrue(v.Coef.Opt, "{}", []bool{n.Lbrack.IsValid(), n.Rbrack.IsValid()}) + case *ast.SliceExpr: + incrIfAllTrue(v.Coef.Opt, "[]", []bool{n.Lbrack.IsValid(), n.Rbrack.IsValid()}) + case *ast.TypeAssertExpr: + incrIfAllTrue(v.Coef.Opt, "()", []bool{n.Lparen.IsValid(), n.Rparen.IsValid()}) + case *ast.CallExpr: + incrIfAllTrue(v.Coef.Opt, "()", []bool{n.Lparen.IsValid(), n.Rparen.IsValid()}) + incrIfAllTrue(v.Coef.Opt, "...", []bool{n.Ellipsis != 0}) + case *ast.StarExpr: + incrIfAllTrue(v.Coef.Opt, "*", []bool{n.Star.IsValid()}) + case *ast.UnaryExpr: + if n.Op.IsOperator() { + v.Coef.Opt[n.Op.String()]++ + } else { + v.Coef.Opd[n.Op.String()]++ + } + case *ast.BinaryExpr: + v.Coef.Opt[n.Op.String()]++ + case *ast.KeyValueExpr: + incrIfAllTrue(v.Coef.Opt, ":", []bool{n.Colon.IsValid()}) + } +} + +func (v *HalstVol) handleStmt(stmt ast.Node) { + switch n := stmt.(type) { + case *ast.SendStmt: + incrIfAllTrue(v.Coef.Opt, "<-", []bool{n.Arrow.IsValid()}) + case *ast.IncDecStmt: + incrIfAllTrue(v.Coef.Opt, n.Tok.String(), []bool{n.Tok.IsOperator()}) + case *ast.AssignStmt: + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } + case *ast.GoStmt: + if n.Go.IsValid() { + v.Coef.Opt["go"]++ + } + case *ast.DeferStmt: + if n.Defer.IsValid() { + v.Coef.Opt["defer"]++ + } + case *ast.ReturnStmt: + if n.Return.IsValid() { + v.Coef.Opt["return"]++ + } + case *ast.BranchStmt: + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } else { + v.Coef.Opd[n.Tok.String()]++ + } + case *ast.BlockStmt: + if n.Lbrace.IsValid() && n.Rbrace.IsValid() { + v.Coef.Opt["{}"]++ + } + case *ast.IfStmt: + if n.If.IsValid() { + v.Coef.Opt["if"]++ + } + if n.Else != nil { + v.Coef.Opt["else"]++ + } + case *ast.SwitchStmt: + if n.Switch.IsValid() { + v.Coef.Opt["switch"]++ + } + case *ast.SelectStmt: + if n.Select.IsValid() { + v.Coef.Opt["select"]++ + } + case *ast.ForStmt: + if n.For.IsValid() { + v.Coef.Opt["for"]++ + } + case *ast.RangeStmt: + if n.For.IsValid() { + v.Coef.Opt["for"]++ + } + if n.Key != nil { + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } else { + v.Coef.Opd[n.Tok.String()]++ + } + } + v.Coef.Opt["range"]++ + } +} + +func (v *HalstVol) handleCaseClause(cc *ast.CaseClause) { + if cc.List == nil { + v.Coef.Opt["default"]++ + } + if cc.Colon.IsValid() { + v.Coef.Opt[":"]++ + } +} diff --git a/vendor/github.com/yagipy/maintidx/visitor.go b/vendor/github.com/yagipy/maintidx/visitor.go new file mode 100644 index 0000000000..e6f74c50d7 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/visitor.go @@ -0,0 +1,77 @@ +package maintidx + +import ( + "github.com/yagipy/maintidx/pkg/cyc" + "github.com/yagipy/maintidx/pkg/halstvol" + "go/ast" + "math" + "sort" +) + +type Visitor struct { + MaintIdx int + Coef Coef +} + +var _ ast.Visitor = &Visitor{} + +type Coef struct { + Cyc cyc.Cyc + HalstVol halstvol.HalstVol +} + +func NewVisitor() *Visitor { + return &Visitor{ + MaintIdx: 0, + Coef: Coef{ + Cyc: cyc.Cyc{ + Val: 1, + Coef: cyc.Coef{}, + }, + HalstVol: halstvol.HalstVol{ + Val: 0.0, + Coef: halstvol.Coef{ + Opt: map[string]int{}, + Opd: map[string]int{}, + }, + }, + }, + } +} + +func (v *Visitor) Visit(n ast.Node) ast.Visitor { + v.Coef.Cyc.Analyze(n) + v.Coef.HalstVol.Analyze(n) + return v +} + +// Calc https://docs.microsoft.com/ja-jp/archive/blogs/codeanalysis/maintainability-index-range-and-meaning +func (v *Visitor) calc(loc int) { + origVal := 171.0 - 5.2*math.Log(v.Coef.HalstVol.Val) - 0.23*float64(v.Coef.Cyc.Val) - 16.2*math.Log(float64(loc)) + normVal := int(math.Max(0.0, origVal*100.0/171.0)) + v.MaintIdx = normVal +} + +// TODO: Move halstvol package +func (v *Visitor) printHalstVol() { + sortedOpt := make([]string, len(v.Coef.HalstVol.Coef.Opt)) + sortedOpd := make([]string, len(v.Coef.HalstVol.Coef.Opd)) + optIndex := 0 + opdIndex := 0 + for key := range v.Coef.HalstVol.Coef.Opt { + sortedOpt[optIndex] = key + optIndex++ + } + for key := range v.Coef.HalstVol.Coef.Opd { + sortedOpd[opdIndex] = key + opdIndex++ + } + sort.Strings(sortedOpt) + sort.Strings(sortedOpd) + for _, val := range sortedOpt { + println("operators", val, v.Coef.HalstVol.Coef.Opt[val]) + } + for _, val := range sortedOpd { + println("operands", val, v.Coef.HalstVol.Coef.Opd[val]) + } +} diff --git a/vendor/github.com/ykadowak/zerologlint/.goreleaser.yaml b/vendor/github.com/ykadowak/zerologlint/.goreleaser.yaml new file mode 100644 index 0000000000..f3af3f2121 --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/.goreleaser.yaml @@ -0,0 +1,24 @@ +before: + hooks: + - go mod tidy +builds: + - id: zerologlint + main: ./cmd/zerologlint + binary: zerologlint + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + - '^ci:' diff --git a/vendor/github.com/ykadowak/zerologlint/LICENSE b/vendor/github.com/ykadowak/zerologlint/LICENSE new file mode 100644 index 0000000000..92a1e3b318 --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Yusuke Kadowaki + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ykadowak/zerologlint/README.md b/vendor/github.com/ykadowak/zerologlint/README.md new file mode 100644 index 0000000000..b0a5fc0f9f --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/README.md @@ -0,0 +1,63 @@ +# zerologlint +![build](https://github.com/ykadowak/zerologlint/actions/workflows/testing.yaml/badge.svg) + +`zerologlint` is a linter for [zerolog](https://github.com/rs/zerolog) that can be run with `go vet` or through [golangci-lint](https://golangci-lint.run/) since `v1.53.0`. +It detects the wrong usage of `zerolog` that a user forgets to dispatch `zerolog.Event` with `Send` or `Msg` like functions, in which case nothing will be logged. For more detailed explanations of the cases it detects, see [Examples](#Example). + +## Install + +```bash +go install github.com/ykadowak/zerologlint/cmd/zerologlint@latest +``` + +## Usage +```bash +go vet -vettool=`which zerologlint` ./... +``` + +or you can also use it with [golangci-lint](https://golangci-lint.run/) since `v1.53.0`. + +## Examples +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // 1. Basic case + log.Info() // "must be dispatched by Msg or Send method" + + // 2. Nested case + log.Info(). // "must be dispatched by Msg or Send method" + Str("foo", "bar"). + Dict("dict", zerolog.Dict(). + Str("bar", "baz"). + Int("n", 1), + ) + + // 3. Reassignment case + logger := log.Info() // "must be dispatched by Msg or Send method" + if err != nil { + logger = log.Error() // "must be dispatched by Msg or Send method" + } + logger.Str("foo", "bar") + + // 4. Deferred case + defer log.Info() // "must be dispatched by Msg or Send method" + + // 5. zerolog.Logger case + logger2 := zerolog.New(os.Stdout) + logger2.Info().Send() + + // 6. Dispatch in other function case + event := log.Info() + dispatcher(event) +} + +func dispatcher(e *zerolog.Event) { + e.Send() +} +``` diff --git a/vendor/github.com/ykadowak/zerologlint/zerologlint.go b/vendor/github.com/ykadowak/zerologlint/zerologlint.go new file mode 100644 index 0000000000..8c8fb74fc3 --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/zerologlint.go @@ -0,0 +1,261 @@ +package zerologlint + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" + + "github.com/gostaticanalysis/comment/passes/commentmap" +) + +var Analyzer = &analysis.Analyzer{ + Name: "zerologlint", + Doc: "Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg`", + Run: run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + commentmap.Analyzer, + }, +} + +type posser interface { + Pos() token.Pos +} + +// callDefer is an interface just to hold both ssa.Call and ssa.Defer in our set +type callDefer interface { + Common() *ssa.CallCommon + Pos() token.Pos +} + +type linter struct { + // eventSet holds all the ssa block that is a zerolog.Event type instance + // that should be dispatched. + // Everytime the zerolog.Event is dispatched with Msg() or Send(), + // deletes that block from this set. + // At the end, check if the set is empty, or report the not dispatched block. + eventSet map[posser]struct{} + // deleteLater holds the ssa block that should be deleted from eventSet after + // all the inspection is done. + // this is required because `else` ssa block comes after the dispatch of `if`` block. + // e.g., if err != nil { log.Error() } else { log.Info() } log.Send() + // deleteLater takes care of the log.Info() block. + deleteLater map[posser]struct{} + recLimit uint +} + +func run(pass *analysis.Pass) (interface{}, error) { + srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + + l := &linter{ + eventSet: make(map[posser]struct{}), + deleteLater: make(map[posser]struct{}), + recLimit: 100, + } + + for _, sf := range srcFuncs { + for _, b := range sf.Blocks { + for _, instr := range b.Instrs { + if c, ok := instr.(*ssa.Call); ok { + l.inspect(c) + } else if c, ok := instr.(*ssa.Defer); ok { + l.inspect(c) + } + } + } + } + + // apply deleteLater to envetSet for else branches of if-else cases + + for k := range l.deleteLater { + delete(l.eventSet, k) + } + + // At the end, if the set is clear -> ok. + // Otherwise, there must be a left zerolog.Event var that weren't dispatched. So report it. + for k := range l.eventSet { + pass.Reportf(k.Pos(), "must be dispatched by Msg or Send method") + } + + return nil, nil +} + +func (l *linter) inspect(cd callDefer) { + c := cd.Common() + + // check if it's in github.com/rs/zerolog/log since there's some + // functions in github.com/rs/zerolog that returns zerolog.Event + // which should not be included. However, zerolog.Logger receiver is an exception. + if isInLogPkg(*c) || isLoggerRecv(*c) { + if isZerologEvent(c.Value) { + // this ssa block should be dispatched afterwards at some point + l.eventSet[cd] = struct{}{} + return + } + } + + // if the call does not return zerolog.Event, + // check if the base is zerolog.Event. + // if so, check if the StaticCallee is Send() or Msg(). + // if so, remove the arg[0] from the set. + f := c.StaticCallee() + if f == nil { + return + } + if !isDispatchMethod(f) { + shouldReturn := true + for _, p := range f.Params { + if isZerologEvent(p) { + // check if this zerolog.Event as a parameter is dispatched in the function + // TODO: technically, it can be dispatched in another function that is called in this function, and + // this algorithm cannot track that. But I'm tired of thinking about that for now. + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch v := instr.(type) { + case *ssa.Call: + if inspectDispatchInFunction(v.Common()) { + shouldReturn = false + break + } + case *ssa.Defer: + if inspectDispatchInFunction(v.Common()) { + shouldReturn = false + break + } + } + } + } + } + } + if shouldReturn { + return + } + } + for _, arg := range c.Args { + if isZerologEvent(arg) { + // if there's branch, track both ways + // this is for the case like: + // logger := log.Info() + // if err != nil { + // logger = log.Error() + // } + // logger.Send() + // + // Similar case like below goes to the same root but that doesn't + // have any side effect. + // logger := log.Info() + // if err != nil { + // logger = logger.Str("a", "b") + // } + // logger.Send() + if phi, ok := arg.(*ssa.Phi); ok { + for _, edge := range phi.Edges { + l.dfsEdge(edge, make(map[ssa.Value]struct{}), 0) + } + } else { + val := getRootSsaValue(arg) + delete(l.eventSet, val) + } + } + } +} + +func (l *linter) dfsEdge(v ssa.Value, visit map[ssa.Value]struct{}, cnt uint) { + // only for safety + if cnt > l.recLimit { + return + } + cnt++ + + if _, ok := visit[v]; ok { + return + } + visit[v] = struct{}{} + + val := getRootSsaValue(v) + phi, ok := val.(*ssa.Phi) + if !ok { + l.deleteLater[val] = struct{}{} + return + } + for _, edge := range phi.Edges { + l.dfsEdge(edge, visit, cnt) + } +} + +func inspectDispatchInFunction(cc *ssa.CallCommon) bool { + if isDispatchMethod(cc.StaticCallee()) { + for _, arg := range cc.Args { + if isZerologEvent(arg) { + return true + } + } + } + return false +} + +func isInLogPkg(c ssa.CallCommon) bool { + switch v := c.Value.(type) { + case ssa.Member: + p := v.Package() + if p == nil { + return false + } + return strings.HasSuffix(p.Pkg.Path(), "github.com/rs/zerolog/log") + } + return false +} + +func isLoggerRecv(c ssa.CallCommon) bool { + switch f := c.Value.(type) { + case *ssa.Function: + if recv := f.Signature.Recv(); recv != nil { + return strings.HasSuffix(types.TypeString(recv.Type(), nil), "zerolog.Logger") + } + } + return false +} + +func isZerologEvent(v ssa.Value) bool { + ts := v.Type().String() + return strings.HasSuffix(ts, "github.com/rs/zerolog.Event") +} + +func isDispatchMethod(f *ssa.Function) bool { + if f == nil { + return false + } + m := f.Name() + if m == "Send" || m == "Msg" || m == "Msgf" || m == "MsgFunc" { + return true + } + return false +} + +func getRootSsaValue(v ssa.Value) ssa.Value { + if c, ok := v.(*ssa.Call); ok { + v := c.Value() + + // When there is no receiver, that's the block of zerolog.Event + // eg. Error() method in log.Error().Str("foo", "bar").Send() + if len(v.Call.Args) == 0 { + return v + } + + // Even when there is a receiver, if it's a zerolog.Logger instance, return this block + // eg. Info() method in zerolog.New(os.Stdout).Info() + root := v.Call.Args[0] + if !isZerologEvent(root) { + return v + } + + // Ok to just return the receiver because all the method in this + // chain is zerolog.Event at this point. + return getRootSsaValue(root) + } + return v +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml deleted file mode 100644 index 13ff998668..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - 1.12 - diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md deleted file mode 100644 index b329bd05d9..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md +++ /dev/null @@ -1,16 +0,0 @@ -# 1.0.2 (June 17, 2020) - -* The YAML decoder now follows the YAML specification more closely when parsing - numeric values. - ([#6](https://github.com/zclconf/go-cty-yaml/pull/6)) - -# 1.0.1 (July 30, 2019) - -* The YAML decoder is now correctly treating quoted scalars as verbatim literal - strings rather than using the fuzzy type selection rules for them. Fuzzy - type selection rules still apply to unquoted scalars. - ([#4](https://github.com/zclconf/go-cty-yaml/pull/4)) - -# 1.0.0 (May 26, 2019) - -Initial release. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6f..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE deleted file mode 100644 index 4e6c00ab31..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -This package is derived from gopkg.in/yaml.v2, which is copyright -2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Includes mechanical ports of code from libyaml, distributed under its original -license. See LICENSE.libyaml for more information. - -Modifications for cty interfacing copyright 2019 Martin Atkins, and -distributed under the same license terms. diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go deleted file mode 100644 index 1f7e87e672..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/apic.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go deleted file mode 100644 index a73b34a8b2..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/converter.go +++ /dev/null @@ -1,69 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ConverterConfig is used to configure a new converter, using NewConverter. -type ConverterConfig struct { - // EncodeAsFlow, when set to true, causes Marshal to produce flow-style - // mapping and sequence serializations. - EncodeAsFlow bool -} - -// A Converter can marshal and unmarshal between cty values and YAML bytes. -// -// Because there are many different ways to map cty to YAML and vice-versa, -// a converter is configurable using the settings in ConverterConfig, which -// allow for a few different permutations of mapping to YAML. -// -// If you are just trying to work with generic, standard YAML, the predefined -// converter in Standard should be good enough. -type Converter struct { - encodeAsFlow bool -} - -// NewConverter creates a new Converter with the given configuration. -func NewConverter(config *ConverterConfig) *Converter { - return &Converter{ - encodeAsFlow: config.EncodeAsFlow, - } -} - -// Standard is a predefined Converter that produces and consumes generic YAML -// using only built-in constructs that any other YAML implementation ought to -// understand. -var Standard *Converter = NewConverter(&ConverterConfig{}) - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { - return c.impliedType(src) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func (c *Converter) Marshal(v cty.Value) ([]byte, error) { - return c.marshal(v) -} - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// An error is returned if the given source contains any YAML document -// delimiters. -func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return c.unmarshal(src, ty) -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go deleted file mode 100644 index b91141ccaa..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go +++ /dev/null @@ -1,57 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code -// into a cty Value, using the ImpliedType and Unmarshal methods of the -// Standard pre-defined converter. -var YAMLDecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "src", - Type: cty.String, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsKnown() { - return cty.DynamicPseudoType, nil - } - if args[0].IsNull() { - return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") - } - return Standard.ImpliedType([]byte(args[0].AsString())) - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - return Standard.Unmarshal([]byte(args[0].AsString()), retType) - }, -}) - -// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value -// into YAML. -var YAMLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if !args[0].IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - raw, err := Standard.Marshal(args[0]) - if err != nil { - return cty.NilVal, err - } - return cty.StringVal(string(raw)), nil - }, -}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go deleted file mode 100644 index e369ff27c8..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/decode.go +++ /dev/null @@ -1,261 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilVal, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &valueAnalysis{ - anchorsPending: map[string]int{}, - anchorVals: map[string]cty.Value{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing start of document") - } - - v, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return convert.Convert(v, ty) -} - -func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - return c.unmarshalParseRemainder(an, &evt, p) -} - -func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.unmarshalScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.unmarshalAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.unmarshalMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.unmarshalSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilVal, parseEventErrorWrap(evt, err) - } - - if val.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - val = cty.StringVal("<<") - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, val) - } - return val, nil -} - -func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - vals := make(map[string]cty.Value) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - v := cty.ObjectVal(vals) - if anchor != "" { - an.completeAnchor(anchor, v) - } - return v, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilVal, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - ty := val.Type() - if !(ty.IsObjectType() || ty.IsMapType()) { - return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for it := val.ElementIterator(); it.Next(); { - k, v := it.Element() - vals[k.AsString()] = v - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") - } - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - vals[keyVal.AsString()] = val - } -} - -func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var vals []cty.Value - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.TupleVal(vals) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - val, err := c.unmarshalParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilVal, err - } - - vals = append(vals, val) - } -} - -func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - v, err := an.anchorVal(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return v, err -} - -type valueAnalysis struct { - anchorsPending map[string]int - anchorVals map[string]cty.Value -} - -func (an *valueAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorVals[name] = v -} - -func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorVals[name] - if !ok { - return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go deleted file mode 100644 index a1c2cc5262..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go deleted file mode 100644 index daa1478a93..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/encode.go +++ /dev/null @@ -1,189 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -func (c *Converter) marshal(v cty.Value) ([]byte, error) { - var buf bytes.Buffer - - e := &yaml_emitter_t{} - yaml_emitter_initialize(e) - yaml_emitter_set_output_writer(e, &buf) - yaml_emitter_set_unicode(e, true) - - var evt yaml_event_t - yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_document_start_event_initialize(&evt, nil, nil, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - if err := c.marshalEmit(v, e); err != nil { - return nil, err - } - - yaml_document_end_event_initialize(&evt, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_stream_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - return buf.Bytes(), nil -} - -func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { - ty := v.Type() - switch { - case v.IsNull(): - return c.marshalPrimitive(v, e) - case !v.IsKnown(): - return fmt.Errorf("cannot serialize unknown value as YAML") - case ty.IsPrimitiveType(): - return c.marshalPrimitive(v, e) - case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): - return c.marshalSequence(v, e) - case ty.IsObjectType(), ty.IsMapType(): - return c.marshalMapping(v, e) - default: - return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) - } -} - -func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { - var evt yaml_event_t - - if v.IsNull() { - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte("null"), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil - } - - switch v.Type() { - case cty.String: - str := v.AsString() - style := yaml_DOUBLE_QUOTED_SCALAR_STYLE - if strings.Contains(str, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - style, - ) - case cty.Number: - str := v.AsBigFloat().Text('f', -1) - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - case cty.Bool: - var str string - switch v { - case cty.True: - str = "true" - case cty.False: - str = "false" - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - } - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_SEQUENCE_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_SEQUENCE_STYLE - } - - var evt yaml_event_t - yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - _, v := it.Element() - err := c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_sequence_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_MAPPING_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_MAPPING_STYLE - } - - var evt yaml_event_t - yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - k, v := it.Element() - err := c.marshalEmit(k, e) - if err != nil { - return err - } - err = c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_mapping_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go deleted file mode 100644 index ae41c488f8..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/error.go +++ /dev/null @@ -1,97 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" -) - -// Error is an error implementation used to report errors that correspond to -// a particular position in an input buffer. -type Error struct { - cause error - Line, Column int -} - -func (e Error) Error() string { - return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) -} - -// Cause is an implementation of the interface used by -// github.com/pkg/errors.Cause, returning the underlying error without the -// position information. -func (e Error) Cause() error { - return e.cause -} - -// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper -// returning the underlying error without the position information. -func (e Error) WrappedErrors() []error { - return []error{e.cause} -} - -func parserError(p *yaml_parser_t) error { - var cause error - if len(p.problem) > 0 { - cause = errors.New(p.problem) - } else { - cause = errors.New("invalid YAML syntax") // useless generic error, then - } - - return parserErrorWrap(p, cause) -} - -func parserErrorWrap(p *yaml_parser_t, cause error) error { - switch { - case p.problem_mark.line != 0: - line := p.problem_mark.line - column := p.problem_mark.column - // Scanner errors don't iterate line before returning error - if p.error == yaml_SCANNER_ERROR { - line++ - column = 0 - } - return Error{ - cause: cause, - Line: line, - Column: column + 1, - } - case p.context_mark.line != 0: - return Error{ - cause: cause, - Line: p.context_mark.line, - Column: p.context_mark.column + 1, - } - default: - return cause - } -} - -func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { - return parserErrorWrap(p, fmt.Errorf(f, vals...)) -} - -func parseEventErrorWrap(evt *yaml_event_t, cause error) error { - if evt.start_mark.line == 0 { - // Event does not have a start mark, so we won't wrap the error at all - return cause - } - return Error{ - cause: cause, - Line: evt.start_mark.line, - Column: evt.start_mark.column + 1, - } -} - -func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { - return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) -} - -func emitterError(e *yaml_emitter_t) error { - var cause error - if len(e.problem) > 0 { - cause = errors.New(e.problem) - } else { - cause = errors.New("failed to write YAML token") // useless generic error, then - } - return cause -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go deleted file mode 100644 index 5b7b0686fa..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go +++ /dev/null @@ -1,268 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) impliedType(src []byte) (cty.Type, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilType, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &typeAnalysis{ - anchorsPending: map[string]int{}, - anchorTypes: map[string]cty.Type{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing start of document") - } - - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return ty, err -} - -func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - return c.impliedTypeParseRemainder(an, &evt, p) -} - -func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.impliedTypeScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.impliedTypeAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.impliedTypeMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.impliedTypeSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - implicit := evt.implicit - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - var ty cty.Type - switch { - case tag == "" && !implicit: - // Untagged explicit string - ty = cty.String - default: - v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilType, parseEventErrorWrap(evt, err) - } - if v.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - ty = cty.String - } else { - ty = v.Type() - } - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, ty) - } - return ty, nil -} - -func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - atys := make(map[string]cty.Type) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - ty := cty.Object(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilType, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - if !ty.IsObjectType() { - return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for name, aty := range ty.AttributeTypes() { - atys[name] = aty - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") - } - valTy, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - atys[keyVal.AsString()] = valTy - } -} - -func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var atys []cty.Type - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.Tuple(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilType, err - } - - atys = append(atys, valTy) - } -} - -func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - ty, err := an.anchorType(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return ty, err -} - -type typeAnalysis struct { - anchorsPending map[string]int - anchorTypes map[string]cty.Type -} - -func (an *typeAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorTypes[name] = ty -} - -func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorTypes[name] - if !ok { - return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go deleted file mode 100644 index 81d05dfe57..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go deleted file mode 100644 index 7c1f5fac3d..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go deleted file mode 100644 index 138c7aaa98..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/resolve.go +++ /dev/null @@ -1,293 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/zclconf/go-cty/cty" -) - -type resolveMapItem struct { - value cty.Value - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -// Numeric literal regular expressions from the YAML 1.2 spec: -// -// https://yaml.org/spec/1.2/spec.html#id2805071 -var integerLiteralRegexp = regexp.MustCompile(`` + - // start of string, optional sign, and one of: - `\A[-+]?(` + - // octal literal with 0o prefix and optional _ spaces - `|0o[0-7_]+` + - // decimal literal and optional _ spaces - `|[0-9_]+` + - // hexadecimal literal with 0x prefix and optional _ spaces - `|0x[0-9a-fA-F_]+` + - // end of group, and end of string - `)\z`, -) -var floatLiteralRegexp = regexp.MustCompile( - `\A[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?\z`, -) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v cty.Value - tag string - l []string - }{ - {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { - if !resolvableTag(tag) { - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if src != "" { - hint = resolveTable[src[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { - return cty.StringVal(src), nil - } - - // Handle things we can lookup in a map. - if item, ok := resolveMap[src]; ok { - return item.value, nil - } - - if tag == "" { - for _, nan := range []string{".nan", ".NaN", ".NAN"} { - if src == nan { - // cty cannot represent NaN, so this is an error - return cty.NilVal, fmt.Errorf("floating point NaN is not supported") - } - } - } - - // Base 60 floats are intentionally not supported. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - if numberVal, err := cty.ParseNumberVal(src); err == nil { - return numberVal, nil - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - } - - if integerLiteralRegexp.MatchString(src) { - tag = yaml_INT_TAG // will handle parsing below in our tag switch - break - } - if floatLiteralRegexp.MatchString(src) { - tag = yaml_FLOAT_TAG // will handle parsing below in our tag switch - break - } - default: - panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) - } - } - - if tag == "" && src == "<<" { - return mergeMappingVal, nil - } - - switch tag { - case yaml_STR_TAG, yaml_BINARY_TAG: - // If it's binary then we want to keep the base64 representation, because - // cty has no binary type, but we will check that it's actually base64. - if tag == yaml_BINARY_TAG { - _, err := base64.StdEncoding.DecodeString(src) - if err != nil { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) - } - } - return cty.StringVal(src), nil - case yaml_BOOL_TAG: - item, ok := resolveMap[src] - if !ok || item.tag != yaml_BOOL_TAG { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - } - return item.value, nil - case yaml_FLOAT_TAG, yaml_INT_TAG: - // Note: We don't actually check that a value tagged INT is a whole - // number here. We could, but cty generally doesn't care about the - // int/float distinction, so we'll just be generous and accept it. - plain := strings.Replace(src, "_", "", -1) - if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats - return numberVal, nil - } - if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberIntVal(intv), nil - } - if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberUIntVal(uintv), nil - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_TIMESTAMP_TAG: - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_NULL_TAG: - return cty.NullVal(cty.DynamicPseudoType), nil - case "": - return cty.StringVal(src), nil - default: - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} - -type mergeMapping struct{} - -var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) -var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go deleted file mode 100644 index 077fd1dd2d..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go +++ /dev/null @@ -1,2696 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go deleted file mode 100644 index a2dde608cb..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go deleted file mode 100644 index 2c314cc164..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yaml.go +++ /dev/null @@ -1,215 +0,0 @@ -// Package yaml can marshal and unmarshal cty values in YAML format. -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" - - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// This is an alias for Unmarshal on the predefined Converter in "Standard". -// -// An error is returned if the given source contains any YAML document -// delimiters. -func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return Standard.Unmarshal(src, ty) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// This is an alias for Marshal on the predefined Converter in "Standard". -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func Marshal(v cty.Value) ([]byte, error) { - return Standard.Marshal(v) -} - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -// -// This is an alias for ImpliedType on the predefined Converter in "Standard". -func ImpliedType(src []byte) (cty.Type, error) { - return Standard.ImpliedType(src) -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go deleted file mode 100644 index e25cee563b..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go +++ /dev/null @@ -1,738 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go deleted file mode 100644 index 8110ce3c37..0000000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule_ops.go b/vendor/github.com/zclconf/go-cty/cty/capsule_ops.go index 3ff6855ecd..102d26fa95 100644 --- a/vendor/github.com/zclconf/go-cty/cty/capsule_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/capsule_ops.go @@ -49,6 +49,18 @@ type CapsuleOps struct { // pointer identity of the encapsulated value. RawEquals func(a, b interface{}) bool + // HashKey provides a hashing function for values of the corresponding + // capsule type. If defined, cty will use the resulting hashes as part + // of the implementation of sets whose element type is or contains the + // corresponding capsule type. + // + // If a capsule type defines HashValue then the function _must_ return + // an equal hash value for any two values that would cause Equals or + // RawEquals to return true when given those values. If a given type + // does not uphold that assumption then sets including this type will + // not behave correctly. + HashKey func(v interface{}) string + // ConversionFrom can provide conversions from the corresponding type to // some other type when values of the corresponding type are used with // the "convert" package. (The main cty package does not use this operation.) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go index ededc5f376..bc79df8cf3 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -43,14 +43,14 @@ func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { out = out.WithoutOptionalAttributesDeep() if !isKnown { - return cty.UnknownVal(out), nil + return prepareUnknownResult(in.Range(), dynamicReplace(in.Type(), out)), nil } if isNull { // We'll pass through nulls, albeit type converted, and let // the caller deal with whatever handling they want to do in // case null values are considered valid in some applications. - return cty.NullVal(out), nil + return cty.NullVal(dynamicReplace(in.Type(), out)), nil } } @@ -199,3 +199,64 @@ func retConversion(conv conversion) Conversion { return conv(in, cty.Path(nil)) } } + +// prepareUnknownResult can apply value refinements to a returned unknown value +// in certain cases where characteristics of the source value or type can +// transfer into range constraints on the result value. +func prepareUnknownResult(sourceRange cty.ValueRange, targetTy cty.Type) cty.Value { + sourceTy := sourceRange.TypeConstraint() + + ret := cty.UnknownVal(targetTy) + if sourceRange.DefinitelyNotNull() { + ret = ret.RefineNotNull() + } + + switch { + case sourceTy.IsObjectType() && targetTy.IsMapType(): + // A map built from an object type always has the same number of + // elements as the source type has attributes. + return ret.Refine().CollectionLength(len(sourceTy.AttributeTypes())).NewValue() + case sourceTy.IsTupleType() && targetTy.IsListType(): + // A list built from a typle type always has the same number of + // elements as the source type has elements. + return ret.Refine().CollectionLength(sourceTy.Length()).NewValue() + case sourceTy.IsTupleType() && targetTy.IsSetType(): + // When building a set from a tuple type we can't exactly constrain + // the length because some elements might coalesce, but we can + // guarantee an upper limit. We can also guarantee at least one + // element if the tuple isn't empty. + switch l := sourceTy.Length(); l { + case 0, 1: + return ret.Refine().CollectionLength(l).NewValue() + default: + return ret.Refine(). + CollectionLengthLowerBound(1). + CollectionLengthUpperBound(sourceTy.Length()). + NewValue() + } + case sourceTy.IsCollectionType() && targetTy.IsCollectionType(): + // NOTE: We only reach this function if there is an available + // conversion between the source and target type, so we don't + // need to repeat element type compatibility checks and such here. + // + // If the source value already has a refined length then we'll + // transfer those refinements to the result, because conversion + // does not change length (aside from set element coalescing). + b := ret.Refine() + if targetTy.IsSetType() { + if sourceRange.LengthLowerBound() > 0 { + // If the source has at least one element then the result + // must always have at least one too, because value coalescing + // cannot totally empty the set. + b = b.CollectionLengthLowerBound(1) + } + } else { + b = b.CollectionLengthLowerBound(sourceRange.LengthLowerBound()) + } + b = b.CollectionLengthUpperBound(sourceRange.LengthUpperBound()) + return b.NewValue() + default: + return ret + } + +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go index e70b0184c4..05399c9a67 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -39,6 +39,11 @@ func conversionCollectionToList(ety cty.Type, conv conversion) conversion { return cty.NilVal, err } } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + elems = append(elems, val) i++ @@ -50,7 +55,7 @@ func conversionCollectionToList(ety cty.Type, conv conversion) conversion { if ety == cty.DynamicPseudoType { return cty.ListValEmpty(val.Type().ElementType()), nil } - return cty.ListValEmpty(ety), nil + return cty.ListValEmpty(ety.WithoutOptionalAttributesDeep()), nil } if !cty.CanListVal(elems) { @@ -88,6 +93,11 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { return cty.NilVal, err } } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + elems = append(elems, val) i++ @@ -99,7 +109,7 @@ func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { if ety == cty.DynamicPseudoType { return cty.SetValEmpty(val.Type().ElementType()), nil } - return cty.SetValEmpty(ety), nil + return cty.SetValEmpty(ety.WithoutOptionalAttributesDeep()), nil } if !cty.CanSetVal(elems) { @@ -180,7 +190,7 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv if len(tupleEtys) == 0 { // Empty tuple short-circuit return func(val cty.Value, path cty.Path) (cty.Value, error) { - return cty.SetValEmpty(setEty), nil + return cty.SetValEmpty(setEty.WithoutOptionalAttributesDeep()), nil } } @@ -242,6 +252,11 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv return cty.NilVal, err } } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + elems = append(elems, val) i++ @@ -265,7 +280,7 @@ func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) co if len(tupleEtys) == 0 { // Empty tuple short-circuit return func(val cty.Value, path cty.Path) (cty.Value, error) { - return cty.ListValEmpty(listEty), nil + return cty.ListValEmpty(listEty.WithoutOptionalAttributesDeep()), nil } } @@ -357,7 +372,7 @@ func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) co if len(objectAtys) == 0 { // Empty object short-circuit return func(val cty.Value, path cty.Path) (cty.Value, error) { - return cty.MapValEmpty(mapEty), nil + return cty.MapValEmpty(mapEty.WithoutOptionalAttributesDeep()), nil } } @@ -448,13 +463,28 @@ func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conv elemConvs[name] = getConversion(mapEty, objectAty, unsafe) if elemConvs[name] == nil { - // If any of our element conversions are impossible, then the our - // whole conversion is impossible. + // This means that this conversion is impossible. Typically, we + // would give up at this point and declare the whole conversion + // impossible. But, if this attribute is optional then maybe we will + // be able to do this conversion anyway provided the actual concrete + // map doesn't have this value set. + // + // We only do this in "unsafe" mode, because we cannot guarantee + // that the returned conversion will actually succeed once applied. + if objType.AttributeOptional(name) && unsafe { + // This attribute is optional, so let's leave this conversion in + // as a nil, and we can error later if we actually have to + // convert this. + continue + } + + // Otherwise, give up. This conversion is impossible as we have a + // required attribute that doesn't match the map's inner type. return nil } } - // If we fall out here then a conversion is possible, using the + // If we fall out here then a conversion may be possible, using the // element conversions in elemConvs return func(val cty.Value, path cty.Path) (cty.Value, error) { elems := make(map[string]cty.Value, len(elemConvs)) @@ -474,12 +504,43 @@ func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conv Key: name, } - conv := elemConvs[name.AsString()] - if conv != nil { + // There are 3 cases here: + // 1. This attribute is not in elemConvs + // 2. This attribute is in elemConvs and is not nil + // 3. This attribute is in elemConvs and is nil. + + // In case 1, we do not enter any of the branches below. This case + // means the attribute type is the same between the map and the + // object, and we don't need to do any conversion. + + if conv, ok := elemConvs[name.AsString()]; conv != nil { + // This is case 2. The attribute type is different between the + // map and the object, and we know how to convert between them. + // So, we reset val to be the converted value and carry on. val, err = conv(val, elemPath) if err != nil { return cty.NilVal, err } + } else if ok { + // This is case 3 and it is an error. The attribute types are + // different between the map and the object, but we cannot + // convert between them. + // + // Now typically, this would be picked earlier on when we were + // building elemConvs. However, in the case of optional + // attributes there was a chance we could still convert the + // overall object even if this particular attribute was not + // convertable. This is because it could have not been set in + // the map, and we could skip over it here and set a null value. + // + // Since we reached this branch, we know that map did actually + // contain a non-convertable optional attribute. This means we + // error. + return cty.NilVal, path.NewErrorf("map element type is incompatible with attribute %q: %s", name.AsString(), MismatchMessage(val.Type(), objType.AttributeType(name.AsString()))) + } + + if val.IsNull() { + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) } elems[name.AsString()] = val diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go index 4d19cf6c5c..95f3925b59 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -31,3 +31,106 @@ func dynamicFixup(wantType cty.Type) conversion { func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { return in, nil } + +// dynamicReplace aims to return the out type unchanged, but if it finds a +// dynamic type either directly or in any descendent elements it replaces them +// with the equivalent type from in. +// +// This function assumes that in and out are compatible from a Convert +// perspective, and will panic if it finds that they are not. For example if +// in is an object and out is a map, this function will still attempt to iterate +// through both as if they were the same. +// While the outermost in and out types may be compatible from a Convert +// perspective, inner types may not match when converting between maps and +// objects with optional attributes when the optional attributes don't match +// the map element type. Therefor in the case of a non-primitive type mismatch, +// we have to assume conversion was possible and pass the out type through. +func dynamicReplace(in, out cty.Type) cty.Type { + if in == cty.DynamicPseudoType || in == cty.NilType { + // Short circuit this case, there's no point worrying about this if in + // is a dynamic type or a nil type. Out is the best we can do. + return out + } + + switch { + case out == cty.DynamicPseudoType: + // So replace out with in. + return in + case out.IsPrimitiveType(), out.IsCapsuleType(): + // out is not dynamic and it doesn't contain descendent elements so just + // return it unchanged. + return out + case out.IsMapType(): + // Maps are compatible with other maps or objects. + if in.IsMapType() { + return cty.Map(dynamicReplace(in.ElementType(), out.ElementType())) + } + + if in.IsObjectType() { + var types []cty.Type + for _, t := range in.AttributeTypes() { + types = append(types, t) + } + unifiedType, _ := unify(types, true) + return cty.Map(dynamicReplace(unifiedType, out.ElementType())) + } + + return out + case out.IsObjectType(): + // Objects are compatible with other objects and maps. + outTypes := map[string]cty.Type{} + if in.IsMapType() { + for attr, attrType := range out.AttributeTypes() { + outTypes[attr] = dynamicReplace(in.ElementType(), attrType) + } + } + + if in.IsObjectType() { + for attr, attrType := range out.AttributeTypes() { + if !in.HasAttribute(attr) { + // If in does not have this attribute, then it is an + // optional attribute and there is nothing we can do except + // to return the type from out even if it is dynamic. + outTypes[attr] = attrType + continue + } + outTypes[attr] = dynamicReplace(in.AttributeType(attr), attrType) + } + } + + return cty.Object(outTypes) + case out.IsSetType(): + // Sets are compatible with other sets, lists, tuples. + if in.IsSetType() || in.IsListType() { + return cty.Set(dynamicReplace(in.ElementType(), out.ElementType())) + } + + if in.IsTupleType() { + unifiedType, _ := unify(in.TupleElementTypes(), true) + return cty.Set(dynamicReplace(unifiedType, out.ElementType())) + } + + return out + case out.IsListType(): + // Lists are compatible with other lists, sets, and tuples. + if in.IsSetType() || in.IsListType() { + return cty.List(dynamicReplace(in.ElementType(), out.ElementType())) + } + + if in.IsTupleType() { + unifiedType, _ := unify(in.TupleElementTypes(), true) + return cty.List(dynamicReplace(unifiedType, out.ElementType())) + } + + return out + case out.IsTupleType(): + // Tuples are only compatible with other tuples + var types []cty.Type + for ix := 0; ix < len(out.TupleElementTypes()); ix++ { + types = append(types, dynamicReplace(in.TupleElementType(ix), out.TupleElementType(ix))) + } + return cty.Tuple(types) + default: + panic("unrecognized type " + out.FriendlyName()) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go index 098c109bdb..51958ef4b4 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -80,13 +80,19 @@ func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { } } + if val.IsNull() { + // Strip optional attributes out of the embedded type for null + // values. + val = cty.NullVal(val.Type().WithoutOptionalAttributesDeep()) + } + attrVals[name] = val } for name := range outOptionals { if _, exists := attrVals[name]; !exists { wantTy := outAtys[name] - attrVals[name] = cty.NullVal(wantTy) + attrVals[name] = cty.NullVal(wantTy.WithoutOptionalAttributesDeep()) } } diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go index af19bdc501..aab0d0ec9a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/public.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -40,7 +40,7 @@ func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { // This is a convenience wrapper around calling GetConversionUnsafe and then // immediately passing the given value to the resulting function. func Convert(in cty.Value, want cty.Type) (cty.Value, error) { - if in.Type().Equals(want) { + if in.Type().Equals(want.WithoutOptionalAttributesDeep()) { return in, nil } diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go index 144acd8516..ac6b64db5a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -447,7 +447,6 @@ func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, conversions[i] = GetConversion(ty, retTy) } if conversions[i] == nil { - // Shouldn't be reachable, since we were able to unify return unifyTupleTypesToList(types, unsafe) } } @@ -483,8 +482,8 @@ func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversio conversions[i] = GetConversion(ty, retTy) } if conversions[i] == nil { - // Shouldn't be reachable, since we were able to unify - return unifyObjectTypesToMap(types, unsafe) + // no conversion was found + return cty.NilType, nil } } return retTy, conversions diff --git a/vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go b/vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go new file mode 100644 index 0000000000..0ea7f984e4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go @@ -0,0 +1,26 @@ +// Package ctystrings is a collection of string manipulation utilities which +// intend to help application developers implement string-manipulation +// functionality in a way that respects the cty model of strings, even when +// they are working in the realm of Go strings. +// +// cty strings are, internally, NFC-normalized as defined in Unicode Standard +// Annex #15 and encoded as UTF-8. +// +// When working with [cty.Value] of string type cty manages this +// automatically as an implementation detail, but when applications call +// [Value.AsString] they will receive a value that has been subjected to that +// normalization, and so may need to take that normalization into account when +// manipulating the resulting string or comparing it with other Go strings +// that did not originate in a [cty.Value]. +// +// Although the core representation of [cty.String] only considers whole +// strings, it's also conventional in other locations such as the standard +// library functions to consider strings as being sequences of grapheme +// clusters as defined by Unicode Standard Annex #29, which adds further +// rules about combining multiple consecutive codepoints together into a +// single user-percieved character. Functions that work with substrings should +// always use grapheme clusters as their smallest unit of splitting strings, +// and never break strings in the middle of a grapheme cluster. The functions +// in this package respect that convention unless otherwise stated in their +// documentation. +package ctystrings diff --git a/vendor/github.com/zclconf/go-cty/cty/ctystrings/normalize.go b/vendor/github.com/zclconf/go-cty/cty/ctystrings/normalize.go new file mode 100644 index 0000000000..9b3bce9031 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/ctystrings/normalize.go @@ -0,0 +1,14 @@ +package ctystrings + +import ( + "golang.org/x/text/unicode/norm" +) + +// Normalize applies NFC normalization to the given string, returning the +// transformed string. +// +// This function achieves the same effect as wrapping a string in a value +// using [cty.StringVal] and then unwrapping it again using [Value.AsString]. +func Normalize(str string) string { + return norm.NFC.String(str) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/ctystrings/prefix.go b/vendor/github.com/zclconf/go-cty/cty/ctystrings/prefix.go new file mode 100644 index 0000000000..bbf045233b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/ctystrings/prefix.go @@ -0,0 +1,135 @@ +package ctystrings + +import ( + "fmt" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/v15/textseg" + "golang.org/x/text/unicode/norm" +) + +// SafeKnownPrefix takes a string intended to represent a known prefix of +// another string and modifies it so that it would be safe to use with +// byte-based prefix matching against another NFC-normalized string. It +// also takes into account grapheme cluster boundaries and trims off any +// suffix that could potentially be an incomplete grapheme cluster. +// +// Specifically, SafeKnownPrefix first applies NFC normalization to the prefix +// and then trims off one or more characters from the end of the string which +// could potentially be transformed into a different character if another +// string were appended to it. For example, a trailing latin letter will +// typically be trimmed because appending a combining diacritic mark would +// transform it into a different character. +// +// This transformation is important whenever the remainder of the string is +// arbitrary user input not directly controlled by the application. If an +// application can guarantee that the remainder of the string will not begin +// with combining marks then it is safe to instead just normalize the prefix +// string with [Normalize]. +func SafeKnownPrefix(prefix string) string { + prefix = Normalize(prefix) + + // Our starting approach here is essentially what a streaming parser would + // do when consuming a Unicode string in chunks and needing to determine + // what prefix of the current buffer is safe to process without waiting for + // more information, which is described in TR15 section 13.1 + // "Buffering with Unicode Normalization": + // https://unicode.org/reports/tr15/#Buffering_with_Unicode_Normalization + // + // The general idea here is to find the last character in the string that + // could potentially start a sequence of codepoints that would combine + // together, and then truncate the string to exclude that character and + // everything after it. + + form := norm.NFC + lastBoundary := form.LastBoundary([]byte(prefix)) + if lastBoundary != -1 && lastBoundary != len(prefix) { + prefix = prefix[:lastBoundary] + // If we get here then we've already shortened the prefix and so + // further analysis below is unnecessary because it would be relying + // on an incomplete prefix anyway. + return prefix + } + + // Now we'll use the textseg package's grapheme cluster scanner to scan + // as far through the string as we can without the scanner telling us + // that it would need more bytes to decide. + // + // This step is conservative because the grapheme cluster rules are not + // designed with prefix-matching in mind. In the base case we'll just + // always discard the last grapheme cluster, although we do have some + // special cases for trailing codepoints that can't possibly combine with + // subsequent codepoints to form a single grapheme cluster and which seem + // likely to arise often in practical use. + remain := []byte(prefix) + prevBoundary := 0 + thisBoundary := 0 + for len(remain) > 0 { + advance, _, err := textseg.ScanGraphemeClusters(remain, false) + if err != nil { + // ScanGraphemeClusters should never return an error because + // any sequence of valid UTF-8 encodings is valid input. + panic(fmt.Sprintf("textseg.ScanGraphemeClusters returned error: %s", err)) + } + if advance == 0 { + // If we have at least one byte remaining but the scanner cannot + // advance then that means the remainder might be an incomplete + // grapheme cluster and so we need to stop here, discarding the + // rest of the input. However, we do now know that we can safely + // include what we found on the previous iteration of this loop. + prevBoundary = thisBoundary + break + } + prevBoundary = thisBoundary + thisBoundary += advance + remain = remain[advance:] + } + + // This is our heuristic for detecting cases where we can be sure that + // the above algorithm was too conservative because the last segment + // we found is definitely not subject to the grapheme cluster "do not split" + // rules. + suspect := prefix[prevBoundary:thisBoundary] + if sequenceMustEndGraphemeCluster(suspect) { + prevBoundary = thisBoundary + } + + return prefix[:prevBoundary] +} + +// sequenceMustEndGraphemeCluster is a heuristic we use to avoid discarding +// the final grapheme cluster of a prefix in SafeKnownPrefix by recognizing +// that a particular sequence is one known to not be subject to any of +// the UAX29 "do not break" rules. +// +// If this function returns true then it is safe to include the given byte +// sequence at the end of a safe prefix. Otherwise we don't know whether or +// not it is safe. +func sequenceMustEndGraphemeCluster(s string) bool { + // For now we're only considering sequences that represent a single + // codepoint. We'll assume that any sequence of two or more codepoints + // that could be a grapheme cluster might be extendable. + if utf8.RuneCountInString(s) != 1 { + return false + } + + r, _ := utf8.DecodeRuneInString(s) + + // Our initial ruleset is focused on characters that are commonly used + // as delimiters in text intended for both human and machine use, such + // as JSON documents. + // + // We don't include any letters or digits of any script here intentionally + // because those are the ones most likely to be subject to combining rules + // in either current or future Unicode specifications. + // + // We can safely grow this set over time, but we should be very careful + // about shrinking it because it could cause value refinements to loosen + // and thus cause results that were once known to become unknown. + switch r { + case '-', '_', ':', ';', '/', '\\', ',', '.', '(', ')', '{', '}', '[', ']', '|', '?', '!', '~', ' ', '\t', '@', '#', '$', '%', '^', '&', '*', '+', '"', '\'': + return true + default: + return false + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go index 9e4fff66f5..62c9ea57cd 100644 --- a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go +++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -66,7 +66,7 @@ func elementIterator(val Value) ElementIterator { idx: -1, } case val.ty.IsSetType(): - rawSet := val.v.(set.Set) + rawSet := val.v.(set.Set[interface{}]) return &setElementIterator{ ety: val.ty.ElementType(), setIt: rawSet.Iterator(), @@ -139,7 +139,7 @@ func (it *mapElementIterator) Next() bool { type setElementIterator struct { ety Type - setIt *set.Iterator + setIt *set.Iterator[interface{}] } func (it *setElementIterator) Element() (Value, Value) { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go index 5a26c275f4..61a1cf97df 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/argument.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -10,6 +10,9 @@ type Parameter struct { // value, but callers may use it for documentation, etc. Name string + // Description is an optional description for the argument. + Description string + // A type that any argument for this parameter must conform to. // cty.DynamicPseudoType can be used, either at top-level or nested // in a parameterized type, to indicate that any type should be diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go index c00a0e7f85..6fc968282e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/function.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -14,6 +14,9 @@ type Function struct { // Spec is the specification of a function, used to instantiate // a new Function. type Spec struct { + // Description is an optional description for the function specification. + Description string + // Params is a description of the positional parameters for the function. // The standard checking logic rejects any calls that do not provide // arguments conforming to this definition, freeing the function @@ -36,6 +39,19 @@ type Spec struct { // depending on its arguments. Type TypeFunc + // RefineResult is an optional callback for describing additional + // refinements for the result value beyond what can be described using + // a type constraint. + // + // A refinement callback should always return the same builder it was + // given, typically after modifying it using the methods of + // [cty.RefinementBuilder]. + // + // Any refinements described by this callback must hold for the entire + // range of results from the function. For refinements that only apply + // to certain results, use direct refinement within [Impl] instead. + RefineResult func(*cty.RefinementBuilder) *cty.RefinementBuilder + // Impl is the ImplFunc that implements the function's behavior. // // Functions are expected to behave as pure functions, and not create @@ -106,20 +122,13 @@ func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { return f.ReturnTypeForValues(vals) } -// ReturnTypeForValues is similar to ReturnType but can be used if the caller -// already knows the values of some or all of the arguments, in which case -// the function may be able to determine a more definite result if its -// return type depends on the argument *values*. -// -// For any arguments whose values are not known, pass an Unknown value of -// the appropriate type. -func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { +func (f Function) returnTypeForValues(args []cty.Value) (ty cty.Type, dynTypedArgs bool, err error) { var posArgs []cty.Value var varArgs []cty.Value if f.spec.VarParam == nil { if len(args) != len(f.spec.Params) { - return cty.Type{}, fmt.Errorf( + return cty.Type{}, false, fmt.Errorf( "wrong number of arguments (%d required; %d given)", len(f.spec.Params), len(args), ) @@ -129,7 +138,7 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) varArgs = nil } else { if len(args) < len(f.spec.Params) { - return cty.Type{}, fmt.Errorf( + return cty.Type{}, false, fmt.Errorf( "wrong number of arguments (at least %d required; %d given)", len(f.spec.Params), len(args), ) @@ -158,7 +167,7 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) } if val.IsNull() && !spec.AllowNull { - return cty.Type{}, NewArgErrorf(i, "argument must not be null") + return cty.Type{}, false, NewArgErrorf(i, "argument must not be null") } // AllowUnknown is ignored for type-checking, since we expect to be @@ -168,13 +177,13 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { - return cty.DynamicPseudoType, nil + return cty.DynamicPseudoType, true, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... - return cty.Type{}, NewArgError(i, errs[0]) + return cty.Type{}, false, NewArgError(i, errs[0]) } } @@ -193,18 +202,18 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) } if val.IsNull() && !spec.AllowNull { - return cty.Type{}, NewArgErrorf(realI, "argument must not be null") + return cty.Type{}, false, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { - return cty.DynamicPseudoType, nil + return cty.DynamicPseudoType, true, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... - return cty.Type{}, NewArgError(i, errs[0]) + return cty.Type{}, false, NewArgError(i, errs[0]) } } } @@ -218,17 +227,53 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) } }() - return f.spec.Type(args) + ty, err = f.spec.Type(args) + return ty, false, err +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + ty, _, err = f.returnTypeForValues(args) + return ty, err } // Call actually calls the function with the given arguments, which must // conform to the function's parameter specification or an error will be // returned. func (f Function) Call(args []cty.Value) (val cty.Value, err error) { - expectedType, err := f.ReturnTypeForValues(args) + expectedType, dynTypeArgs, err := f.returnTypeForValues(args) if err != nil { return cty.NilVal, err } + if dynTypeArgs { + // returnTypeForValues sets this if any argument was inexactly typed + // and the corresponding parameter did not indicate it could deal with + // that. In that case we also avoid calling the implementation function + // because it will also typically not be ready to deal with that case. + return cty.UnknownVal(expectedType), nil + } + + if refineResult := f.spec.RefineResult; refineResult != nil { + // If this function has a refinement callback then we'll refine + // our result value in the same way regardless of how we return. + // It's the function author's responsibility to ensure that the + // refinements they specify are valid for the full range of possible + // return values from the function. If not, this will panic when + // detecting an inconsistency. + defer func() { + if val != cty.NilVal { + if val.IsKnown() || val.Type() != cty.DynamicPseudoType { + val = val.RefineWith(refineResult) + } + } + }() + } // Type checking already dealt with most situations relating to our // parameter specification, but we still need to deal with unknown @@ -344,3 +389,62 @@ func (f Function) VarParam() *Parameter { ret := *f.spec.VarParam return &ret } + +// Description returns a human-readable description of the function. +func (f Function) Description() string { + return f.spec.Description +} + +// WithNewDescriptions returns a new function that has the same signature +// and implementation as the receiver but has the function description and +// the parameter descriptions replaced with those given in the arguments. +// +// All descriptions may be given as an empty string to specify that there +// should be no description at all. +// +// The paramDescs argument must match the number of parameters +// the reciever expects, or this function will panic. If the function has a +// VarParam then that counts as one parameter for the sake of this rule. The +// given descriptions will be assigned in order starting with the positional +// arguments in their declared order, followed by the variadic parameter if +// any. +// +// As a special case, WithNewDescriptions will accept a paramDescs which +// does not cover the reciever's variadic parameter (if any), so that it's +// possible to add a variadic parameter to a function which didn't previously +// have one without that being a breaking change for an existing caller using +// WithNewDescriptions against that function. In this case the base description +// of the variadic parameter will be preserved. +func (f Function) WithNewDescriptions(funcDesc string, paramDescs []string) Function { + retSpec := *f.spec // shallow copy of the reciever + retSpec.Description = funcDesc + + retSpec.Params = make([]Parameter, len(f.spec.Params)) + copy(retSpec.Params, f.spec.Params) // shallow copy of positional parameters + if f.spec.VarParam != nil { + retVarParam := *f.spec.VarParam // shallow copy of variadic parameter + retSpec.VarParam = &retVarParam + } + + if retSpec.VarParam != nil { + if with, without := len(retSpec.Params)+1, len(retSpec.Params); len(paramDescs) != with && len(paramDescs) != without { + panic(fmt.Sprintf("paramDescs must have length of either %d or %d", with, without)) + } + } else { + if want := len(retSpec.Params); len(paramDescs) != want { + panic(fmt.Sprintf("paramDescs must have length %d", want)) + } + } + + posParamDescs := paramDescs[:len(retSpec.Params)] + varParamDescs := paramDescs[len(retSpec.Params):] // guaranteed to be zero or one elements because of the rules above + + for i, desc := range posParamDescs { + retSpec.Params[i].Description = desc + } + for _, desc := range varParamDescs { + retSpec.VarParam.Description = desc + } + + return New(&retSpec) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go index 4f1ecc8d9b..2826bf6eb0 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -6,6 +6,7 @@ import ( ) var NotFunc = function.New(&function.Spec{ + Description: `Applies the logical NOT operation to the given boolean value.`, Params: []function.Parameter{ { Name: "val", @@ -14,13 +15,15 @@ var NotFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Not(), nil }, }) var AndFunc = function.New(&function.Spec{ + Description: `Applies the logical AND operation to the given boolean values.`, Params: []function.Parameter{ { Name: "a", @@ -35,13 +38,15 @@ var AndFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].And(args[1]), nil }, }) var OrFunc = function.New(&function.Spec{ + Description: `Applies the logical OR operation to the given boolean values.`, Params: []function.Parameter{ { Name: "a", @@ -56,7 +61,8 @@ var OrFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Or(args[1]), nil }, diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go index a132e0cde5..fe67e6f3fe 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -30,6 +30,7 @@ func BytesVal(buf []byte) cty.Value { // BytesLen is a Function that returns the length of the buffer encapsulated // in a Bytes value. var BytesLenFunc = function.New(&function.Spec{ + Description: `Returns the total number of bytes in the given buffer.`, Params: []function.Parameter{ { Name: "buf", @@ -37,7 +38,8 @@ var BytesLenFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { bufPtr := args[0].EncapsulatedValue().(*[]byte) return cty.NumberIntVal(int64(len(*bufPtr))), nil @@ -46,6 +48,7 @@ var BytesLenFunc = function.New(&function.Spec{ // BytesSlice is a Function that returns a slice of the given Bytes value. var BytesSliceFunc = function.New(&function.Spec{ + Description: `Extracts a subslice from the given buffer.`, Params: []function.Parameter{ { Name: "buf", @@ -63,7 +66,8 @@ var BytesSliceFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(Bytes), + Type: function.StaticReturnType(Bytes), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { bufPtr := args[0].EncapsulatedValue().(*[]byte) diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go index f05132e1c5..1816bb9c96 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -12,6 +12,7 @@ import ( ) var HasIndexFunc = function.New(&function.Spec{ + Description: `Returns true if if the given collection can be indexed with the given key without producing an error, or false otherwise.`, Params: []function.Parameter{ { Name: "collection", @@ -31,12 +32,14 @@ var HasIndexFunc = function.New(&function.Spec{ } return cty.Bool, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].HasIndex(args[1]), nil }, }) var IndexFunc = function.New(&function.Spec{ + Description: `Returns the element with the given key from the given collection, or raises an error if there is no such element.`, Params: []function.Parameter{ { Name: "collection", @@ -106,11 +109,13 @@ var IndexFunc = function.New(&function.Spec{ }) var LengthFunc = function.New(&function.Spec{ + Description: `Returns the number of elements in the given collection.`, Params: []function.Parameter{ { Name: "collection", Type: cty.DynamicPseudoType, AllowDynamicType: true, + AllowUnknown: true, AllowMarked: true, }, }, @@ -121,16 +126,19 @@ var LengthFunc = function.New(&function.Spec{ } return cty.Number, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].Length(), nil }, }) var ElementFunc = function.New(&function.Spec{ + Description: `Returns the element with the given index from the given list or tuple, applying the modulo operation to the given index if it's greater than the number of elements.`, Params: []function.Parameter{ { - Name: "list", - Type: cty.DynamicPseudoType, + Name: "list", + Type: cty.DynamicPseudoType, + AllowMarked: true, }, { Name: "index", @@ -185,11 +193,12 @@ var ElementFunc = function.New(&function.Spec{ return cty.DynamicVal, fmt.Errorf("cannot use element function with a negative index") } - if !args[0].IsKnown() { + input, marks := args[0].Unmark() + if !input.IsKnown() { return cty.UnknownVal(retType), nil } - l := args[0].LengthInt() + l := input.LengthInt() if l == 0 { return cty.DynamicVal, errors.New("cannot use element function with an empty list") } @@ -197,16 +206,18 @@ var ElementFunc = function.New(&function.Spec{ // We did all the necessary type checks in the type function above, // so this is guaranteed not to fail. - return args[0].Index(cty.NumberIntVal(int64(index))), nil + return input.Index(cty.NumberIntVal(int64(index))).WithMarks(marks), nil }, }) // CoalesceListFunc is a function that takes any number of list arguments // and returns the first one that isn't empty. var CoalesceListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the first of the given sequences that has a length greater than zero.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "vals", + Description: `List or tuple values to test in the given order.`, Type: cty.DynamicPseudoType, AllowUnknown: true, AllowDynamicType: true, @@ -243,6 +254,7 @@ var CoalesceListFunc = function.New(&function.Spec{ return last, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { for _, arg := range args { if !arg.IsKnown() { @@ -268,13 +280,15 @@ var CoalesceListFunc = function.New(&function.Spec{ // CompactFunc is a function that takes a list of strings and returns a new list // with any empty string elements removed. var CompactFunc = function.New(&function.Spec{ + Description: `Removes all empty string elements from the given list of strings.`, Params: []function.Parameter{ { Name: "list", Type: cty.List(cty.String), }, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { listVal := args[0] if !listVal.IsWhollyKnown() { @@ -304,6 +318,7 @@ var CompactFunc = function.New(&function.Spec{ // ContainsFunc is a function that determines whether a given list or // set contains a given single value as one of its elements. var ContainsFunc = function.New(&function.Spec{ + Description: `Returns true if the given value is a value in the given list, tuple, or set, or false otherwise.`, Params: []function.Parameter{ { Name: "list", @@ -314,7 +329,8 @@ var ContainsFunc = function.New(&function.Spec{ Type: cty.DynamicPseudoType, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { arg := args[0] ty := arg.Type() @@ -362,6 +378,7 @@ var ContainsFunc = function.New(&function.Spec{ // DistinctFunc is a function that takes a list and returns a new list // with any duplicate elements removed. var DistinctFunc = function.New(&function.Spec{ + Description: `Removes any duplicate values from the given list, preserving the order of remaining elements.`, Params: []function.Parameter{ { Name: "list", @@ -371,6 +388,7 @@ var DistinctFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { return args[0].Type(), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { listVal := args[0] @@ -397,14 +415,17 @@ var DistinctFunc = function.New(&function.Spec{ // ChunklistFunc is a function that splits a single list into fixed-size chunks, // returning a list of lists. var ChunklistFunc = function.New(&function.Spec{ + Description: `Splits a single list into multiple lists where each has at most the given number of elements.`, Params: []function.Parameter{ { Name: "list", + Description: `The list to split into chunks.`, Type: cty.List(cty.DynamicPseudoType), AllowMarked: true, }, { Name: "size", + Description: `The maximum length of each chunk. All but the last element of the result is guaranteed to be of exactly this size.`, Type: cty.Number, AllowMarked: true, }, @@ -412,6 +433,7 @@ var ChunklistFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { return cty.List(args[0].Type()), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { listVal := args[0] sizeVal := args[1] @@ -469,6 +491,7 @@ var ChunklistFunc = function.New(&function.Spec{ // FlattenFunc is a function that takes a list and replaces any elements // that are lists with a flattened sequence of the list contents. var FlattenFunc = function.New(&function.Spec{ + Description: `Transforms a list, set, or tuple value into a tuple by replacing any given elements that are themselves sequences with a flattened tuple of all of the nested elements concatenated together.`, Params: []function.Parameter{ { Name: "list", @@ -498,6 +521,7 @@ var FlattenFunc = function.New(&function.Spec{ } return cty.Tuple(tys), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { inputList := args[0] @@ -523,16 +547,25 @@ func flattener(flattenList cty.Value) ([]cty.Value, []cty.ValueMarks, bool) { if len(flattenListMarks) > 0 { markses = append(markses, flattenListMarks) } + if !flattenList.Length().IsKnown() { // If we don't know the length of what we're flattening then we can't // predict the length of our result yet either. return nil, markses, false } + out := make([]cty.Value, 0) isKnown := true for it := flattenList.ElementIterator(); it.Next(); { _, val := it.Element() - if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { + + // Any dynamic types could result in more collections that need to be + // flattened, so the type cannot be known. + if val == cty.DynamicVal { + isKnown = false + } + + if !val.IsNull() && (val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType()) { if !val.IsKnown() { isKnown = false _, unknownMarks := val.Unmark() @@ -556,9 +589,11 @@ func flattener(flattenList cty.Value) ([]cty.Value, []cty.ValueMarks, bool) { // KeysFunc is a function that takes a map and returns a sorted list of the map keys. var KeysFunc = function.New(&function.Spec{ + Description: `Returns a list of the keys of the given map in lexicographical order.`, Params: []function.Parameter{ { Name: "inputMap", + Description: `The map to extract keys from. May instead be an object-typed value, in which case the result is a tuple of the object attributes.`, Type: cty.DynamicPseudoType, AllowUnknown: true, AllowMarked: true, @@ -585,6 +620,7 @@ var KeysFunc = function.New(&function.Spec{ return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") } }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { // We must unmark the value before we can use ElementIterator on it, and // then re-apply the same marks (possibly none) when we return. Since we @@ -631,6 +667,7 @@ var KeysFunc = function.New(&function.Spec{ // LookupFunc is a function that performs dynamic lookups of map types. var LookupFunc = function.New(&function.Spec{ + Description: `Returns the value of the element with the given key from the given map, or returns the default value if there is no such element.`, Params: []function.Parameter{ { Name: "inputMap", @@ -723,7 +760,8 @@ var LookupFunc = function.New(&function.Spec{ // If more than one given map or object defines the same key then the one that // is later in the argument sequence takes precedence. var MergeFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Merges all of the elements from the given maps into a single map, or the attributes from given objects into a single object.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "maps", Type: cty.DynamicPseudoType, @@ -804,6 +842,7 @@ var MergeFunc = function.New(&function.Spec{ return cty.Object(attrs), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { outputMap := make(map[string]cty.Value) var markses []cty.ValueMarks // remember any marked maps/objects we find @@ -839,10 +878,12 @@ var MergeFunc = function.New(&function.Spec{ // ReverseListFunc takes a sequence and produces a new sequence of the same length // with all of the same elements as the given sequence but in reverse order. var ReverseListFunc = function.New(&function.Spec{ + Description: `Returns the given list with its elements in reverse order.`, Params: []function.Parameter{ { - Name: "list", - Type: cty.DynamicPseudoType, + Name: "list", + Type: cty.DynamicPseudoType, + AllowMarked: true, }, }, Type: func(args []cty.Value) (cty.Type, error) { @@ -861,20 +902,23 @@ var ReverseListFunc = function.New(&function.Spec{ return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) } }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - in := args[0].AsValueSlice() - outVals := make([]cty.Value, len(in)) - for i, v := range in { + in, marks := args[0].Unmark() + inVals := in.AsValueSlice() + outVals := make([]cty.Value, len(inVals)) + + for i, v := range inVals { outVals[len(outVals)-i-1] = v } switch { case retType.IsTupleType(): - return cty.TupleVal(outVals), nil + return cty.TupleVal(outVals).WithMarks(marks), nil default: if len(outVals) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil + return cty.ListValEmpty(retType.ElementType()).WithMarks(marks), nil } - return cty.ListVal(outVals), nil + return cty.ListVal(outVals).WithMarks(marks), nil } }, }) @@ -884,11 +928,14 @@ var ReverseListFunc = function.New(&function.Spec{ // preserving the ordering of all of the input lists. Otherwise the result is a // set of tuples. var SetProductFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Calculates the cartesian product of two or more sets.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ - Name: "sets", - Type: cty.DynamicPseudoType, - AllowMarked: true, + Name: "sets", + Description: "The sets to consider. Also accepts lists and tuples, and if all arguments are of list or tuple type then the result will preserve the input ordering", + Type: cty.DynamicPseudoType, + AllowMarked: true, + AllowUnknown: true, }, Type: func(args []cty.Value) (retType cty.Type, err error) { if len(args) < 2 { @@ -930,17 +977,22 @@ var SetProductFunc = function.New(&function.Spec{ } return cty.Set(cty.Tuple(elemTys)), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { ety := retType.ElementType() var retMarks cty.ValueMarks total := 1 + var hasUnknownLength bool for _, arg := range args { arg, marks := arg.Unmark() retMarks = cty.NewValueMarks(retMarks, marks) - if !arg.Length().IsKnown() { - return cty.UnknownVal(retType).Mark(marks), nil + // Continue processing after we find an argument with unknown + // length to ensure that we cover all the marks + if !(arg.IsKnown() && arg.Length().IsKnown()) { + hasUnknownLength = true + continue } // Because of our type checking function, we are guaranteed that @@ -949,13 +1001,72 @@ var SetProductFunc = function.New(&function.Spec{ total *= arg.LengthInt() } + if hasUnknownLength { + defer func() { + // We're definitely going to return from somewhere in this + // branch and however we do it we must reapply the marks + // on the way out. + ret = ret.WithMarks(retMarks) + }() + ret := cty.UnknownVal(retType) + + // Even if we don't know the exact length we may be able to + // constrain the upper and lower bounds of the resulting length. + maxLength := 1 + for _, arg := range args { + arg, _ := arg.Unmark() // safe to discard marks because "retMarks" already contains them all + argRng := arg.Range() + ty := argRng.TypeConstraint() + var argMaxLen int + if ty.IsCollectionType() { + argMaxLen = argRng.LengthUpperBound() + } else if ty.IsTupleType() { + argMaxLen = ty.Length() + } else { + // Should not get here but if we do then we'll just + // bail out with an unrefined unknown value. + return ret, nil + } + // The upper bound of a totally-unrefined collection is + // math.MaxInt, which will quickly get us to integer overflow + // here, and so out of pragmatism we'll just impose a reasonable + // upper limit on what is a useful bound to track and return + // unrefined for unusually-large input. + if argMaxLen > 1024 { // arbitrarily-decided threshold + return ret, nil + } + maxLength *= argMaxLen + if maxLength > 2048 { // arbitrarily-decided threshold + return ret, nil + } + if maxLength < 0 { // Seems like we already overflowed, then. + return ret, nil + } + } + + if maxLength == 0 { + // This refinement will typically allow the unknown value to + // collapse into a known empty collection. + ret = ret.Refine().CollectionLength(0).NewValue() + } else { + // If we know there's a nonzero maximum number of elements then + // set element coalescing cannot reduce to fewer than one + // element. + ret = ret.Refine(). + CollectionLengthLowerBound(1). + CollectionLengthUpperBound(maxLength). + NewValue() + } + return ret, nil + } + if total == 0 { // If any of the arguments was an empty collection then our result // is also an empty collection, which we'll short-circuit here. if retType.IsListType() { - return cty.ListValEmpty(ety).Mark(retMarks), nil + return cty.ListValEmpty(ety).WithMarks(retMarks), nil } - return cty.SetValEmpty(ety).Mark(retMarks), nil + return cty.SetValEmpty(ety).WithMarks(retMarks), nil } subEtys := ety.TupleElementTypes() @@ -1016,10 +1127,12 @@ var SetProductFunc = function.New(&function.Spec{ // SliceFunc is a function that extracts some consecutive elements // from within a list. var SliceFunc = function.New(&function.Spec{ + Description: `Extracts a subslice of the given list or tuple value.`, Params: []function.Parameter{ { - Name: "list", - Type: cty.DynamicPseudoType, + Name: "list", + Type: cty.DynamicPseudoType, + AllowMarked: true, }, { Name: "start_index", @@ -1057,11 +1170,12 @@ var SliceFunc = function.New(&function.Spec{ } return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] + inputList, marks := args[0].Unmark() if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil + return cty.DynamicVal.WithMarks(marks), nil } // we ignore idxsKnown return value here because the indices are always @@ -1073,18 +1187,18 @@ var SliceFunc = function.New(&function.Spec{ if endIndex-startIndex == 0 { if retType.IsTupleType() { - return cty.EmptyTupleVal, nil + return cty.EmptyTupleVal.WithMarks(marks), nil } - return cty.ListValEmpty(retType.ElementType()), nil + return cty.ListValEmpty(retType.ElementType()).WithMarks(marks), nil } outputList := inputList.AsValueSlice()[startIndex:endIndex] if retType.IsTupleType() { - return cty.TupleVal(outputList), nil + return cty.TupleVal(outputList).WithMarks(marks), nil } - return cty.ListVal(outputList), nil + return cty.ListVal(outputList).WithMarks(marks), nil }, }) @@ -1092,9 +1206,12 @@ func sliceIndexes(args []cty.Value) (int, int, bool, error) { var startIndex, endIndex, length int var startKnown, endKnown, lengthKnown bool + // remove marks from args[0] + list, _ := args[0].Unmark() + // If it's a tuple then we always know the length by the type, but collections might be unknown or have unknown length - if args[0].Type().IsTupleType() || args[0].Length().IsKnown() { - length = args[0].LengthInt() + if list.Type().IsTupleType() || list.Length().IsKnown() { + length = list.LengthInt() lengthKnown = true } @@ -1133,9 +1250,10 @@ func sliceIndexes(args []cty.Value) (int, int, bool, error) { // ValuesFunc is a function that returns a list of the map values, // in the order of the sorted keys. var ValuesFunc = function.New(&function.Spec{ + Description: `Returns the values of elements of a given map, or the values of attributes of a given object, in lexicographic order by key or attribute name.`, Params: []function.Parameter{ { - Name: "values", + Name: "mapping", Type: cty.DynamicPseudoType, AllowMarked: true, }, @@ -1167,6 +1285,7 @@ var ValuesFunc = function.New(&function.Spec{ } return cty.NilType, errors.New("values() requires a map as the first argument") }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { mapVar := args[0] @@ -1200,6 +1319,7 @@ var ValuesFunc = function.New(&function.Spec{ // ZipmapFunc is a function that constructs a map from a list of keys // and a corresponding list of values. var ZipmapFunc = function.New(&function.Spec{ + Description: `Constructs a map from a list of keys and a corresponding list of values, which must both be of the same length.`, Params: []function.Parameter{ { Name: "keys", @@ -1254,6 +1374,7 @@ var ZipmapFunc = function.New(&function.Spec{ return cty.NilType, errors.New("values argument must be a list or tuple value") } }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { keys := args[0] values := args[1] diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go index 66eb97e251..5d06a4519e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go @@ -1,6 +1,7 @@ package stdlib import ( + "fmt" "strconv" "github.com/zclconf/go-cty/cty" @@ -18,6 +19,7 @@ import ( // a tuple. func MakeToFunc(wantTy cty.Type) function.Function { return function.New(&function.Spec{ + Description: fmt.Sprintf("Converts the given value to %s, or raises an error if that conversion is impossible.", wantTy.FriendlyName()), Params: []function.Parameter{ { Name: "v", @@ -85,3 +87,36 @@ func MakeToFunc(wantTy cty.Type) function.Function { }, }) } + +// AssertNotNullFunc is a function which does nothing except return an error +// if the argument given to it is null. +// +// This could be useful in some cases where the automatic refinment of +// nullability isn't precise enough, because the result is guaranteed to not +// be null and can therefore allow downstream comparisons to null to return +// a known value even if the value is otherwise unknown. +var AssertNotNullFunc = function.New(&function.Spec{ + Description: "Returns the given value varbatim if it is non-null, or raises an error if it's null.", + Params: []function.Parameter{ + { + Name: "v", + Type: cty.DynamicPseudoType, + // NOTE: We intentionally don't set AllowNull here, and so + // the function system will automatically reject a null argument + // for us before calling Impl. + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + RefineResult: refineNonNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // Our argument doesn't set AllowNull: true, so we're guaranteed to + // have a non-null value in args[0]. + return args[0], nil + }, +}) + +func AssertNotNull(v cty.Value) (cty.Value, error) { + return AssertNotNullFunc.Call([]cty.Value{v}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go index 5070a5adf5..e854e817b2 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -11,6 +11,7 @@ import ( ) var CSVDecodeFunc = function.New(&function.Spec{ + Description: `Parses the given string as Comma Separated Values (as defined by RFC 4180) and returns a map of objects representing the table of data, using the first row as a header row to define the object attributes.`, Params: []function.Parameter{ { Name: "str", @@ -30,7 +31,7 @@ var CSVDecodeFunc = function.New(&function.Spec{ return cty.DynamicPseudoType, fmt.Errorf("missing header line") } if err != nil { - return cty.DynamicPseudoType, err + return cty.DynamicPseudoType, csvError(err) } atys := make(map[string]cty.Type, len(headers)) @@ -42,6 +43,7 @@ var CSVDecodeFunc = function.New(&function.Spec{ } return cty.List(cty.Object(atys)), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { ety := retType.ElementType() atys := ety.AttributeTypes() @@ -64,7 +66,7 @@ var CSVDecodeFunc = function.New(&function.Spec{ break } if err != nil { - return cty.DynamicVal, err + return cty.DynamicVal, csvError(err) } vals := make(map[string]cty.Value, len(cols)) @@ -91,3 +93,12 @@ var CSVDecodeFunc = function.New(&function.Spec{ func CSVDecode(str cty.Value) (cty.Value, error) { return CSVDecodeFunc.Call([]cty.Value{str}) } + +func csvError(err error) error { + switch err := err.(type) { + case *csv.ParseError: + return fmt.Errorf("CSV parse error on line %d: %w", err.Line, err.Err) + default: + return err + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go index 1ceffcf63c..85f58d4cc7 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -12,6 +12,7 @@ import ( ) var FormatDateFunc = function.New(&function.Spec{ + Description: `Formats a timestamp given in RFC 3339 syntax into another timestamp in some other machine-oriented time syntax, as described in the format string.`, Params: []function.Parameter{ { Name: "format", @@ -22,7 +23,8 @@ var FormatDateFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { formatStr := args[0].AsString() timeStr := args[1].AsString() @@ -205,6 +207,7 @@ var FormatDateFunc = function.New(&function.Spec{ // TimeAddFunc is a function that adds a duration to a timestamp, returning a new timestamp. var TimeAddFunc = function.New(&function.Spec{ + Description: `Adds the duration represented by the given duration string to the given RFC 3339 timestamp string, returning another RFC 3339 timestamp.`, Params: []function.Parameter{ { Name: "timestamp", @@ -279,67 +282,6 @@ func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) { return FormatDateFunc.Call([]cty.Value{format, timestamp}) } -func parseTimestamp(ts string) (time.Time, error) { - t, err := time.Parse(time.RFC3339, ts) - if err != nil { - switch err := err.(type) { - case *time.ParseError: - // If err is s time.ParseError then its string representation is not - // appropriate since it relies on details of Go's strange date format - // representation, which a caller of our functions is not expected - // to be familiar with. - // - // Therefore we do some light transformation to get a more suitable - // error that should make more sense to our callers. These are - // still not awesome error messages, but at least they refer to - // the timestamp portions by name rather than by Go's example - // values. - if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { - // For some reason err.Message is populated with a ": " prefix - // by the time package. - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) - } - var what string - switch err.LayoutElem { - case "2006": - what = "year" - case "01": - what = "month" - case "02": - what = "day of month" - case "15": - what = "hour" - case "04": - what = "minute" - case "05": - what = "second" - case "Z07:00": - what = "UTC offset" - case "T": - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") - case ":", "-": - if err.ValueElem == "" { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) - } else { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) - } - default: - // Should never get here, because time.RFC3339 includes only the - // above portions, but since that might change in future we'll - // be robust here. - what = "timestamp segment" - } - if err.ValueElem == "" { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) - } else { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) - } - } - return time.Time{}, err - } - return t, nil -} - // splitDataFormat is a bufio.SplitFunc used to tokenize a date format. func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) { if len(data) == 0 { @@ -416,6 +358,75 @@ func startsDateFormatVerb(b byte) bool { return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') } +func parseTimestamp(ts string) (time.Time, error) { + t, err := parseStrictRFC3339(ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is s time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + // For some reason err.Message is populated with a ": " prefix + // by the time package. + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because RFC3339 includes only the + // above portions. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + switch { + case what == "hour" && strings.Contains(err.ValueElem, ":"): + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: hour must be between 0 and 23 inclusive") + case what == "hour" && len(err.ValueElem) != 2: + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: hour must have exactly two digits") + case what == "minute" && len(err.ValueElem) != 2: + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: minute must have exactly two digits") + default: + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + } + return time.Time{}, err + } + return t, nil +} + // TimeAdd adds a duration to a timestamp, returning a new timestamp. // // In the HCL language, timestamps are conventionally represented as diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime_rfc3339.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime_rfc3339.go new file mode 100644 index 0000000000..687854f378 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime_rfc3339.go @@ -0,0 +1,219 @@ +package stdlib + +import ( + "errors" + "strconv" + "time" +) + +// This file inlines some RFC3339 parsing code that was added to the Go standard +// library's "time" package during the Go 1.20 development period but then +// reverted prior to release to follow the Go proposals process first. +// +// Our goal is to support only valid RFC3339 strings regardless of what version +// of Go is being used, because the Go stdlib is just an implementation detail +// of the cty stdlib and so these functions should not very their behavior +// significantly due to being compiled against a different Go version. +// +// These inline copies of the code from upstream should likely stay here +// indefinitely even if functionality like this _is_ accepted in a later version +// of Go, because this now defines cty's definition of RFC3339 parsing as +// intentionally independent of Go's. + +func parseStrictRFC3339(str string) (time.Time, error) { + t, ok := parseRFC3339(str) + if !ok { + // If parsing failed then we'll try to use time.Parse to gather up a + // helpful error object. + _, err := time.Parse(time.RFC3339, str) + if err != nil { + return time.Time{}, err + } + + // The parse template syntax cannot correctly validate RFC 3339. + // Explicitly check for cases that Parse is unable to validate for. + // See https://go.dev/issue/54580. + num2 := func(str string) byte { return 10*(str[0]-'0') + (str[1] - '0') } + switch { + case str[len("2006-01-02T")+1] == ':': // hour must be two digits + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: "15", + ValueElem: str[len("2006-01-02T"):][:1], + Message: ": hour must have two digits", + } + case str[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: ".", + ValueElem: ",", + Message: ": sub-second separator must be a period", + } + case str[len(str)-1] != 'Z': + switch { + case num2(str[len(str)-len("07:00"):]) >= 24: // timezone hour must be in range + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: "Z07:00", + ValueElem: str[len(str)-len("Z07:00"):], + Message: ": timezone hour out of range", + } + case num2(str[len(str)-len("00"):]) >= 60: // timezone minute must be in range + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: "Z07:00", + ValueElem: str[len(str)-len("Z07:00"):], + Message: ": timezone minute out of range", + } + } + default: // unknown error; should not occur + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: time.RFC3339, + ValueElem: str, + Message: "", + } + } + } + return t, nil +} + +func parseRFC3339(s string) (time.Time, bool) { + // parseUint parses s as an unsigned decimal integer and + // verifies that it is within some range. + // If it is invalid or out-of-range, + // it sets ok to false and returns the min value. + ok := true + parseUint := func(s string, min, max int) (x int) { + for _, c := range []byte(s) { + if c < '0' || '9' < c { + ok = false + return min + } + x = x*10 + int(c) - '0' + } + if x < min || max < x { + ok = false + return min + } + return x + } + + // Parse the date and time. + if len(s) < len("2006-01-02T15:04:05") { + return time.Time{}, false + } + year := parseUint(s[0:4], 0, 9999) // e.g., 2006 + month := parseUint(s[5:7], 1, 12) // e.g., 01 + day := parseUint(s[8:10], 1, daysIn(time.Month(month), year)) // e.g., 02 + hour := parseUint(s[11:13], 0, 23) // e.g., 15 + min := parseUint(s[14:16], 0, 59) // e.g., 04 + sec := parseUint(s[17:19], 0, 59) // e.g., 05 + if !ok || !(s[4] == '-' && s[7] == '-' && s[10] == 'T' && s[13] == ':' && s[16] == ':') { + return time.Time{}, false + } + s = s[19:] + + // Parse the fractional second. + var nsec int + if len(s) >= 2 && s[0] == '.' && isDigit(s, 1) { + n := 2 + for ; n < len(s) && isDigit(s, n); n++ { + } + nsec, _, _ = parseNanoseconds(s, n) + s = s[n:] + } + + // Parse the time zone. + loc := time.UTC + if len(s) != 1 || s[0] != 'Z' { + if len(s) != len("-07:00") { + return time.Time{}, false + } + hr := parseUint(s[1:3], 0, 23) // e.g., 07 + mm := parseUint(s[4:6], 0, 59) // e.g., 00 + if !ok || !((s[0] == '-' || s[0] == '+') && s[3] == ':') { + return time.Time{}, false + } + zoneOffsetSecs := (hr*60 + mm) * 60 + if s[0] == '-' { + zoneOffsetSecs = -zoneOffsetSecs + } + loc = time.FixedZone("", zoneOffsetSecs) + } + t := time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc) + + return t, true +} + +func isDigit(s string, i int) bool { + if len(s) <= i { + return false + } + c := s[i] + return '0' <= c && c <= '9' +} + +func parseNanoseconds(value string, nbytes int) (ns int, rangeErrString string, err error) { + if value[0] != '.' && value[0] != ',' { + err = errBadTimestamp + return + } + if nbytes > 10 { + value = value[:10] + nbytes = 10 + } + if ns, err = strconv.Atoi(value[1:nbytes]); err != nil { + return + } + if ns < 0 { + rangeErrString = "fractional second" + return + } + // We need nanoseconds, which means scaling by the number + // of missing digits in the format, maximum length 10. + scaleDigits := 10 - nbytes + for i := 0; i < scaleDigits; i++ { + ns *= 10 + } + return +} + +// These are internal errors used by the date parsing code and are not ever +// returned by public functions. +var errBadTimestamp = errors.New("bad value for field") + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m time.Month, year int) int { + if m == time.February && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go index 63881f5853..2339cc33a7 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -6,7 +6,7 @@ import ( "math/big" "strings" - "github.com/apparentlymart/go-textseg/v13/textseg" + "github.com/apparentlymart/go-textseg/v15/textseg" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" @@ -18,6 +18,7 @@ import ( //go:generate gofmt -w format_fsm.go var FormatFunc = function.New(&function.Spec{ + Description: `Constructs a string by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\".`, Params: []function.Parameter{ { Name: "format", @@ -25,17 +26,27 @@ var FormatFunc = function.New(&function.Spec{ }, }, VarParam: &function.Parameter{ - Name: "args", - Type: cty.DynamicPseudoType, - AllowNull: true, + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { for _, arg := range args[1:] { if !arg.IsWhollyKnown() { // We require all nested values to be known because the only // thing we can do for a collection/structural type is print // it as JSON and that requires it to be wholly known. + // However, we might be able to refine the result with a + // known prefix, if there are literal characters before the + // first formatting verb. + f := args[0].AsString() + if idx := strings.IndexByte(f, '%'); idx > 0 { + prefix := f[:idx] + return cty.UnknownVal(cty.String).Refine().StringPrefix(prefix).NewValue(), nil + } return cty.UnknownVal(cty.String), nil } } @@ -45,6 +56,7 @@ var FormatFunc = function.New(&function.Spec{ }) var FormatListFunc = function.New(&function.Spec{ + Description: `Constructs a list of strings by applying formatting verbs to a series of arguments, using a similar syntax to the C function \"printf\".`, Params: []function.Parameter{ { Name: "format", @@ -57,7 +69,8 @@ var FormatListFunc = function.New(&function.Spec{ AllowNull: true, AllowUnknown: true, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { fmtVal := args[0] args = args[1:] @@ -114,6 +127,8 @@ var FormatListFunc = function.New(&function.Spec{ continue } iterators[i] = arg.ElementIterator() + case arg == cty.DynamicVal: + unknowns[i] = true default: singleVals[i] = arg } @@ -160,7 +175,7 @@ var FormatListFunc = function.New(&function.Spec{ // We require all nested values to be known because the only // thing we can do for a collection/structural type is print // it as JSON and that requires it to be wholly known. - ret = append(ret, cty.UnknownVal(cty.String)) + ret = append(ret, cty.UnknownVal(cty.String).RefineNotNull()) continue Results } } diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go index 6b31f26614..627b55a5cc 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -9,6 +9,7 @@ import ( ) var EqualFunc = function.New(&function.Spec{ + Description: `Returns true if the two given values are equal, or false otherwise.`, Params: []function.Parameter{ { Name: "a", @@ -25,13 +26,15 @@ var EqualFunc = function.New(&function.Spec{ AllowNull: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].Equals(args[1]), nil }, }) var NotEqualFunc = function.New(&function.Spec{ + Description: `Returns false if the two given values are equal, or true otherwise.`, Params: []function.Parameter{ { Name: "a", @@ -48,14 +51,16 @@ var NotEqualFunc = function.New(&function.Spec{ AllowNull: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].Equals(args[1]).Not(), nil }, }) var CoalesceFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the first of the given arguments that isn't null, or raises an error if there are no non-null arguments.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "vals", Type: cty.DynamicPseudoType, @@ -74,6 +79,7 @@ var CoalesceFunc = function.New(&function.Spec{ } return retType, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { for _, argVal := range args { if !argVal.IsKnown() { @@ -89,6 +95,10 @@ var CoalesceFunc = function.New(&function.Spec{ }, }) +func refineNonNull(b *cty.RefinementBuilder) *cty.RefinementBuilder { + return b.NotNull() +} + // Equal determines whether the two given values are equal, returning a // bool value. func Equal(a cty.Value, b cty.Value) (cty.Value, error) { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go index 02770a6528..655977656f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -1,28 +1,55 @@ package stdlib import ( + "bytes" + "strings" + "unicode/utf8" + "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/json" ) var JSONEncodeFunc = function.New(&function.Spec{ + Description: `Returns a string containing a JSON representation of the given value.`, Params: []function.Parameter{ { Name: "val", Type: cty.DynamicPseudoType, + AllowUnknown: true, AllowDynamicType: true, AllowNull: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { val := args[0] if !val.IsWhollyKnown() { // We can't serialize unknowns, so if the value is unknown or // contains any _nested_ unknowns then our result must be - // unknown. - return cty.UnknownVal(retType), nil + // unknown. However, we might still be able to at least constrain + // the prefix of our string so that downstreams can sniff for + // whether it's valid JSON and what result types it could have. + + valRng := val.Range() + if valRng.CouldBeNull() { + // If null is possible then we can't constrain the result + // beyond the type constraint, because the very first character + // of the string is what distinguishes a null. + return cty.UnknownVal(retType), nil + } + b := cty.UnknownVal(retType).Refine() + ty := valRng.TypeConstraint() + switch { + case ty == cty.String: + b = b.StringPrefixFull(`"`) + case ty.IsObjectType() || ty.IsMapType(): + b = b.StringPrefixFull("{") + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + b = b.StringPrefixFull("[") + } + return b.NewValue(), nil } if val.IsNull() { @@ -34,11 +61,17 @@ var JSONEncodeFunc = function.New(&function.Spec{ return cty.NilVal, err } + // json.Marshal should already produce a trimmed string, but we'll + // make sure it always is because our unknown value refinements above + // assume there will be no leading whitespace before the value. + buf = bytes.TrimSpace(buf) + return cty.StringVal(string(buf)), nil }, }) var JSONDecodeFunc = function.New(&function.Spec{ + Description: `Parses the given string as JSON and returns a value corresponding to what the JSON document describes.`, Params: []function.Parameter{ { Name: "str", @@ -48,6 +81,42 @@ var JSONDecodeFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { str := args[0] if !str.IsKnown() { + // If the string isn't known then we can't fully parse it, but + // if the value has been refined with a prefix then we may at + // least be able to reject obviously-invalid syntax and maybe + // even predict the result type. It's safe to return a specific + // result type only if parsing a full document with this prefix + // would return exactly that type or fail with a syntax error. + rng := str.Range() + if prefix := strings.TrimSpace(rng.StringPrefix()); prefix != "" { + // If we know at least one character then it should be one + // of the few characters that can introduce a JSON value. + switch r, _ := utf8.DecodeRuneInString(prefix); r { + case '{', '[': + // These can start object values and array values + // respectively, but we can't actually form a full + // object type constraint or tuple type constraint + // without knowing all of the attributes, so we + // will still return DynamicPseudoType in this case. + case '"': + // This means that the result will either be a string + // or parsing will fail. + return cty.String, nil + case 't', 'f': + // Must either be a boolean value or a syntax error. + return cty.Bool, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': + // These characters would all start the "number" production. + return cty.Number, nil + case 'n': + // n is valid to begin the keyword "null" but that doesn't + // give us any extra type information. + default: + // No other characters are valid as the beginning of a + // JSON value, so we can safely return an early error. + return cty.NilType, function.NewArgErrorf(0, "a JSON document cannot begin with the character %q", r) + } + } return cty.DynamicPseudoType, nil } diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go index 7bbe584b0a..73ef32f14d 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -11,6 +11,7 @@ import ( ) var AbsoluteFunc = function.New(&function.Spec{ + Description: `If the given number is negative then returns its positive equivalent, or otherwise returns the given number unchanged.`, Params: []function.Parameter{ { Name: "num", @@ -19,13 +20,15 @@ var AbsoluteFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Absolute(), nil }, }) var AddFunc = function.New(&function.Spec{ + Description: `Returns the sum of the two given numbers.`, Params: []function.Parameter{ { Name: "a", @@ -38,7 +41,8 @@ var AddFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Add can panic if the input values are opposing infinities, // so we must catch that here in order to remain within @@ -59,6 +63,7 @@ var AddFunc = function.New(&function.Spec{ }) var SubtractFunc = function.New(&function.Spec{ + Description: `Returns the difference between the two given numbers.`, Params: []function.Parameter{ { Name: "a", @@ -71,7 +76,8 @@ var SubtractFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Sub can panic if the input values are infinities, // so we must catch that here in order to remain within @@ -92,6 +98,7 @@ var SubtractFunc = function.New(&function.Spec{ }) var MultiplyFunc = function.New(&function.Spec{ + Description: `Returns the product of the two given numbers.`, Params: []function.Parameter{ { Name: "a", @@ -104,7 +111,8 @@ var MultiplyFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Mul can panic if the input values are both zero or both // infinity, so we must catch that here in order to remain within @@ -126,6 +134,7 @@ var MultiplyFunc = function.New(&function.Spec{ }) var DivideFunc = function.New(&function.Spec{ + Description: `Divides the first given number by the second.`, Params: []function.Parameter{ { Name: "a", @@ -138,7 +147,8 @@ var DivideFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Quo can panic if the input values are both zero or both // infinity, so we must catch that here in order to remain within @@ -160,6 +170,7 @@ var DivideFunc = function.New(&function.Spec{ }) var ModuloFunc = function.New(&function.Spec{ + Description: `Divides the first given number by the second and then returns the remainder.`, Params: []function.Parameter{ { Name: "a", @@ -172,7 +183,8 @@ var ModuloFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Mul can panic if the input values are both zero or both // infinity, so we must catch that here in order to remain within @@ -194,90 +206,107 @@ var ModuloFunc = function.New(&function.Spec{ }) var GreaterThanFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is greater than the first.`, Params: []function.Parameter{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].GreaterThan(args[1]), nil }, }) var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is greater than or equal to the first.`, Params: []function.Parameter{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].GreaterThanOrEqualTo(args[1]), nil }, }) var LessThanFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is less than the first.`, Params: []function.Parameter{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].LessThan(args[1]), nil }, }) var LessThanOrEqualToFunc = function.New(&function.Spec{ + Description: `Returns true if and only if the second number is less than or equal to the first.`, Params: []function.Parameter{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].LessThanOrEqualTo(args[1]), nil }, }) var NegateFunc = function.New(&function.Spec{ + Description: `Multiplies the given number by -1.`, Params: []function.Parameter{ { Name: "num", @@ -286,20 +315,23 @@ var NegateFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Negate(), nil }, }) var MinFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the numerically smallest of all of the given numbers.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "numbers", Type: cty.Number, AllowDynamicType: true, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { if len(args) == 0 { return cty.NilVal, fmt.Errorf("must pass at least one number") @@ -317,13 +349,15 @@ var MinFunc = function.New(&function.Spec{ }) var MaxFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Returns the numerically greatest of all of the given numbers.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "numbers", Type: cty.Number, AllowDynamicType: true, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { if len(args) == 0 { return cty.NilVal, fmt.Errorf("must pass at least one number") @@ -341,6 +375,7 @@ var MaxFunc = function.New(&function.Spec{ }) var IntFunc = function.New(&function.Spec{ + Description: `Discards any fractional portion of the given number.`, Params: []function.Parameter{ { Name: "num", @@ -348,7 +383,8 @@ var IntFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { bf := args[0].AsBigFloat() if bf.IsInt() { @@ -363,49 +399,68 @@ var IntFunc = function.New(&function.Spec{ // CeilFunc is a function that returns the closest whole number greater // than or equal to the given value. var CeilFunc = function.New(&function.Spec{ + Description: `Returns the smallest whole number that is greater than or equal to the given value.`, Params: []function.Parameter{ { Name: "num", Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err + f := args[0].AsBigFloat() + + if f.IsInf() { + return cty.NumberVal(f), nil } - if math.IsInf(val, 0) { - return cty.NumberFloatVal(val), nil + + i, acc := f.Int(nil) + switch acc { + case big.Exact, big.Above: + // Done. + case big.Below: + i.Add(i, big.NewInt(1)) } - return cty.NumberIntVal(int64(math.Ceil(val))), nil + + return cty.NumberVal(f.SetInt(i)), nil }, }) // FloorFunc is a function that returns the closest whole number lesser // than or equal to the given value. var FloorFunc = function.New(&function.Spec{ + Description: `Returns the greatest whole number that is less than or equal to the given value.`, Params: []function.Parameter{ { Name: "num", Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err + f := args[0].AsBigFloat() + + if f.IsInf() { + return cty.NumberVal(f), nil } - if math.IsInf(val, 0) { - return cty.NumberFloatVal(val), nil + + i, acc := f.Int(nil) + switch acc { + case big.Exact, big.Below: + // Done. + case big.Above: + i.Sub(i, big.NewInt(1)) } - return cty.NumberIntVal(int64(math.Floor(val))), nil + + return cty.NumberVal(f.SetInt(i)), nil }, }) // LogFunc is a function that returns the logarithm of a given number in a given base. var LogFunc = function.New(&function.Spec{ + Description: `Returns the logarithm of the given number in the given base.`, Params: []function.Parameter{ { Name: "num", @@ -416,7 +471,8 @@ var LogFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var num float64 if err := gocty.FromCtyValue(args[0], &num); err != nil { @@ -434,6 +490,7 @@ var LogFunc = function.New(&function.Spec{ // PowFunc is a function that returns the logarithm of a given number in a given base. var PowFunc = function.New(&function.Spec{ + Description: `Returns the given number raised to the given power (exponentiation).`, Params: []function.Parameter{ { Name: "num", @@ -444,7 +501,8 @@ var PowFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var num float64 if err := gocty.FromCtyValue(args[0], &num); err != nil { @@ -463,13 +521,15 @@ var PowFunc = function.New(&function.Spec{ // SignumFunc is a function that determines the sign of a number, returning a // number between -1 and 1 to represent the sign.. var SignumFunc = function.New(&function.Spec{ + Description: `Returns 0 if the given number is zero, 1 if the given number is positive, or -1 if the given number is negative.`, Params: []function.Parameter{ { Name: "num", Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var num int if err := gocty.FromCtyValue(args[0], &num); err != nil { @@ -488,6 +548,7 @@ var SignumFunc = function.New(&function.Spec{ // ParseIntFunc is a function that parses a string argument and returns an integer of the specified base. var ParseIntFunc = function.New(&function.Spec{ + Description: `Parses the given string as a number of the given base, or raises an error if the string contains invalid characters.`, Params: []function.Parameter{ { Name: "number", @@ -505,6 +566,7 @@ var ParseIntFunc = function.New(&function.Spec{ } return cty.Number, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { var numstr string diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go index 2dd6348a2c..246544421c 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go @@ -10,6 +10,7 @@ import ( ) var RegexFunc = function.New(&function.Spec{ + Description: `Applies the given regular expression pattern to the given string and returns information about a single match, or raises an error if there is no match.`, Params: []function.Parameter{ { Name: "pattern", @@ -32,6 +33,7 @@ var RegexFunc = function.New(&function.Spec{ } return retTy, err }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { if retType == cty.DynamicPseudoType { return cty.DynamicVal, nil @@ -54,6 +56,7 @@ var RegexFunc = function.New(&function.Spec{ }) var RegexAllFunc = function.New(&function.Spec{ + Description: `Applies the given regular expression pattern to the given string and returns a list of information about all non-overlapping matches, or an empty list if there are no matches.`, Params: []function.Parameter{ { Name: "pattern", @@ -77,6 +80,7 @@ var RegexAllFunc = function.New(&function.Spec{ } return cty.List(retTy), err }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { ety := retType.ElementType() if ety == cty.DynamicPseudoType { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go index 8cfed57f53..009949d472 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -9,7 +9,8 @@ import ( ) var ConcatFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, + Description: `Concatenates together all of the given lists or tuples into a single sequence, preserving the input order.`, + Params: []function.Parameter{}, VarParam: &function.Parameter{ Name: "seqs", Type: cty.DynamicPseudoType, @@ -43,6 +44,10 @@ var ConcatFunc = function.New(&function.Spec{ etys := make([]cty.Type, 0, len(args)) for i, val := range args { + // Discard marks for nested values, as we only need to handle types + // and lengths. + val, _ := val.UnmarkDeep() + ety := val.Type() switch { case ety.IsTupleType(): @@ -69,6 +74,7 @@ var ConcatFunc = function.New(&function.Spec{ } return cty.Tuple(etys), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { switch { case retType.IsListType(): @@ -133,11 +139,13 @@ var ConcatFunc = function.New(&function.Spec{ }) var RangeFunc = function.New(&function.Spec{ + Description: `Returns a list of numbers spread evenly over a particular range.`, VarParam: &function.Parameter{ Name: "params", Type: cty.Number, }, - Type: function.StaticReturnType(cty.List(cty.Number)), + Type: function.StaticReturnType(cty.List(cty.Number)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var start, end, step cty.Value switch len(args) { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go index 29c425eaf2..6da2291916 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -10,6 +10,7 @@ import ( ) var SetHasElementFunc = function.New(&function.Spec{ + Description: `Returns true if the given set contains the given element, or false otherwise.`, Params: []function.Parameter{ { Name: "set", @@ -22,13 +23,15 @@ var SetHasElementFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].HasElement(args[1]), nil }, }) var SetUnionFunc = function.New(&function.Spec{ + Description: `Returns the union of all given sets.`, Params: []function.Parameter{ { Name: "first_set", @@ -41,13 +44,15 @@ var SetUnionFunc = function.New(&function.Spec{ Type: cty.Set(cty.DynamicPseudoType), AllowDynamicType: true, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.Union(s2) }, true), }) var SetIntersectionFunc = function.New(&function.Spec{ + Description: `Returns the intersection of all given sets.`, Params: []function.Parameter{ { Name: "first_set", @@ -60,13 +65,15 @@ var SetIntersectionFunc = function.New(&function.Spec{ Type: cty.Set(cty.DynamicPseudoType), AllowDynamicType: true, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.Intersection(s2) }, false), }) var SetSubtractFunc = function.New(&function.Spec{ + Description: `Returns the relative complement of the two given sets.`, Params: []function.Parameter{ { Name: "a", @@ -79,13 +86,15 @@ var SetSubtractFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.Subtract(s2) }, false), }) var SetSymmetricDifferenceFunc = function.New(&function.Spec{ + Description: `Returns the symmetric difference of the two given sets.`, Params: []function.Parameter{ { Name: "first_set", @@ -98,7 +107,8 @@ var SetSymmetricDifferenceFunc = function.New(&function.Spec{ Type: cty.Set(cty.DynamicPseudoType), AllowDynamicType: true, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.SymmetricDifference(s2) }, false), diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go index 43182dd5ad..f79bf98df2 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -6,7 +6,7 @@ import ( "sort" "strings" - "github.com/apparentlymart/go-textseg/v13/textseg" + "github.com/apparentlymart/go-textseg/v15/textseg" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/function" @@ -14,6 +14,7 @@ import ( ) var UpperFunc = function.New(&function.Spec{ + Description: "Returns the given string with all Unicode letters translated to their uppercase equivalents.", Params: []function.Parameter{ { Name: "str", @@ -21,7 +22,8 @@ var UpperFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := args[0].AsString() out := strings.ToUpper(in) @@ -30,6 +32,7 @@ var UpperFunc = function.New(&function.Spec{ }) var LowerFunc = function.New(&function.Spec{ + Description: "Returns the given string with all Unicode letters translated to their lowercase equivalents.", Params: []function.Parameter{ { Name: "str", @@ -37,7 +40,8 @@ var LowerFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := args[0].AsString() out := strings.ToLower(in) @@ -46,6 +50,7 @@ var LowerFunc = function.New(&function.Spec{ }) var ReverseFunc = function.New(&function.Spec{ + Description: "Returns the given string with all of its Unicode characters in reverse order.", Params: []function.Parameter{ { Name: "str", @@ -53,7 +58,8 @@ var ReverseFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := []byte(args[0].AsString()) out := make([]byte, len(in)) @@ -73,48 +79,75 @@ var ReverseFunc = function.New(&function.Spec{ }) var StrlenFunc = function.New(&function.Spec{ + Description: "Returns the number of Unicode characters (technically: grapheme clusters) in the given string.", Params: []function.Parameter{ { Name: "str", Type: cty.String, + AllowUnknown: true, AllowDynamicType: true, }, }, Type: function.StaticReturnType(cty.Number), + RefineResult: func(b *cty.RefinementBuilder) *cty.RefinementBuilder { + // String length is never null and never negative. + // (We might refine the lower bound even more inside Impl.) + return b.NotNull().NumberRangeLowerBound(cty.NumberIntVal(0), true) + }, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - in := args[0].AsString() - l := 0 - - inB := []byte(in) - for i := 0; i < len(in); { - d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) - l++ - i += d + if !args[0].IsKnown() { + ret := cty.UnknownVal(cty.Number) + // We may be able to still return a constrained result based on the + // refined range of the unknown value. + inRng := args[0].Range() + if inRng.TypeConstraint() == cty.String { + prefixLen := int64(graphemeClusterCount(inRng.StringPrefix())) + ret = ret.Refine().NumberRangeLowerBound(cty.NumberIntVal(prefixLen), true).NewValue() + } + return ret, nil } + in := args[0].AsString() + l := graphemeClusterCount(in) return cty.NumberIntVal(int64(l)), nil }, }) +func graphemeClusterCount(in string) int { + l := 0 + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + return l +} + var SubstrFunc = function.New(&function.Spec{ + Description: "Extracts a substring from the given string.", Params: []function.Parameter{ { Name: "str", + Description: "The input string.", Type: cty.String, AllowDynamicType: true, }, { Name: "offset", + Description: "The starting offset in Unicode characters.", Type: cty.Number, AllowDynamicType: true, }, { Name: "length", + Description: "The maximum length of the result in Unicode characters.", Type: cty.Number, AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := []byte(args[0].AsString()) var offset, length int @@ -197,17 +230,21 @@ var SubstrFunc = function.New(&function.Spec{ }) var JoinFunc = function.New(&function.Spec{ + Description: "Concatenates together the elements of all given lists with a delimiter, producing a single string.", Params: []function.Parameter{ { - Name: "separator", - Type: cty.String, + Name: "separator", + Description: "Delimiter to insert between the given strings.", + Type: cty.String, }, }, VarParam: &function.Parameter{ - Name: "lists", - Type: cty.List(cty.String), + Name: "lists", + Description: "One or more lists of strings to join.", + Type: cty.List(cty.String), }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { sep := args[0].AsString() listVals := args[1:] @@ -244,20 +281,32 @@ var JoinFunc = function.New(&function.Spec{ }) var SortFunc = function.New(&function.Spec{ + Description: "Applies a lexicographic sort to the elements of the given list.", Params: []function.Parameter{ { - Name: "list", - Type: cty.List(cty.String), + Name: "list", + Type: cty.List(cty.String), + AllowUnknown: true, }, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { listVal := args[0] if !listVal.IsWhollyKnown() { // If some of the element values aren't known yet then we - // can't yet predict the order of the result. - return cty.UnknownVal(retType), nil + // can't yet predict the order of the result, but we can be + // sure that the length won't change. + ret := cty.UnknownVal(retType) + if listVal.Type().IsListType() { + rng := listVal.Range() + ret = ret.Refine(). + CollectionLengthLowerBound(rng.LengthLowerBound()). + CollectionLengthUpperBound(rng.LengthUpperBound()). + NewValue() + } + return ret, nil } if listVal.LengthInt() == 0 { // Easy path return listVal, nil @@ -282,17 +331,21 @@ var SortFunc = function.New(&function.Spec{ }) var SplitFunc = function.New(&function.Spec{ + Description: "Produces a list of one or more strings by splitting the given string at all instances of a given separator substring.", Params: []function.Parameter{ { - Name: "separator", - Type: cty.String, + Name: "separator", + Description: "The substring that delimits the result strings.", + Type: cty.String, }, { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to split.", + Type: cty.String, }, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { sep := args[0].AsString() str := args[1].AsString() @@ -311,13 +364,15 @@ var SplitFunc = function.New(&function.Spec{ // ChompFunc is a function that removes newline characters at the end of a // string. var ChompFunc = function.New(&function.Spec{ + Description: "Removes one or more newline characters from the end of the given string.", Params: []function.Parameter{ { Name: "str", Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil @@ -327,17 +382,21 @@ var ChompFunc = function.New(&function.Spec{ // IndentFunc is a function that adds a given number of spaces to the // beginnings of all but the first line in a given multi-line string. var IndentFunc = function.New(&function.Spec{ + Description: "Adds a given number of spaces after each newline character in the given string.", Params: []function.Parameter{ { - Name: "spaces", - Type: cty.Number, + Name: "spaces", + Description: "Number of spaces to add after each newline character.", + Type: cty.Number, }, { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to transform.", + Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var spaces int if err := gocty.FromCtyValue(args[0], &spaces); err != nil { @@ -352,13 +411,15 @@ var IndentFunc = function.New(&function.Spec{ // TitleFunc is a function that converts the first letter of each word in the // given string to uppercase. var TitleFunc = function.New(&function.Spec{ + Description: "Replaces one letter after each non-letter and non-digit character with its uppercase equivalent.", Params: []function.Parameter{ { Name: "str", Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return cty.StringVal(strings.Title(args[0].AsString())), nil }, @@ -367,13 +428,15 @@ var TitleFunc = function.New(&function.Spec{ // TrimSpaceFunc is a function that removes any space characters from the start // and end of the given string. var TrimSpaceFunc = function.New(&function.Spec{ + Description: "Removes any consecutive space characters (as defined by Unicode) from the start and end of the given string.", Params: []function.Parameter{ { Name: "str", Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil }, @@ -382,20 +445,27 @@ var TrimSpaceFunc = function.New(&function.Spec{ // TrimFunc is a function that removes the specified characters from the start // and end of the given string. var TrimFunc = function.New(&function.Spec{ + Description: "Removes consecutive sequences of characters in \"cutset\" from the start and end of the given string.", Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to trim.", + Type: cty.String, }, { - Name: "cutset", - Type: cty.String, + Name: "cutset", + Description: "A string containing all of the characters to trim. Each character is taken separately, so the order of characters is insignificant.", + Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() cutset := args[1].AsString() + // NOTE: This doesn't properly handle any character that is encoded + // with multiple sequential code units, such as letters with + // combining diacritics and emoji modifier sequences. return cty.StringVal(strings.Trim(str, cutset)), nil }, }) @@ -403,17 +473,21 @@ var TrimFunc = function.New(&function.Spec{ // TrimPrefixFunc is a function that removes the specified characters from the // start the given string. var TrimPrefixFunc = function.New(&function.Spec{ + Description: "Removes the given prefix from the start of the given string, if present.", Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to trim.", + Type: cty.String, }, { - Name: "prefix", - Type: cty.String, + Name: "prefix", + Description: "The prefix to remove, if present.", + Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() prefix := args[1].AsString() @@ -424,17 +498,21 @@ var TrimPrefixFunc = function.New(&function.Spec{ // TrimSuffixFunc is a function that removes the specified characters from the // end of the given string. var TrimSuffixFunc = function.New(&function.Spec{ + Description: "Removes the given suffix from the start of the given string, if present.", Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: "The string to trim.", + Type: cty.String, }, { - Name: "suffix", - Type: cty.String, + Name: "suffix", + Description: "The suffix to remove, if present.", + Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() cutset := args[1].AsString() diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go index f777ce5c3e..25a821bbf0 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go @@ -12,21 +12,26 @@ import ( // substring, and replaces each occurence with a given replacement string. // The substr argument is a simple string. var ReplaceFunc = function.New(&function.Spec{ + Description: `Replaces all instances of the given substring in the given string with the given replacement string.`, Params: []function.Parameter{ { - Name: "str", - Type: cty.String, + Name: "str", + Description: `The string to search within.`, + Type: cty.String, }, { - Name: "substr", - Type: cty.String, + Name: "substr", + Description: `The substring to search for.`, + Type: cty.String, }, { - Name: "replace", - Type: cty.String, + Name: "replace", + Description: `The new substring to replace substr with.`, + Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() substr := args[1].AsString() @@ -40,13 +45,14 @@ var ReplaceFunc = function.New(&function.Spec{ // given substring, and replaces each occurence with a given replacement // string. The substr argument must be a valid regular expression. var RegexReplaceFunc = function.New(&function.Spec{ + Description: `Applies the given regular expression pattern to the given string and replaces all matches with the given replacement string.`, Params: []function.Parameter{ { Name: "str", Type: cty.String, }, { - Name: "substr", + Name: "pattern", Type: cty.String, }, { @@ -54,7 +60,8 @@ var RegexReplaceFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { str := args[0].AsString() substr := args[1].AsString() diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go deleted file mode 100644 index a0961b8a0c..0000000000 --- a/vendor/github.com/zclconf/go-cty/cty/gob.go +++ /dev/null @@ -1,204 +0,0 @@ -package cty - -import ( - "bytes" - "encoding/gob" - "errors" - "fmt" - "math/big" - - "github.com/zclconf/go-cty/cty/set" -) - -// GobEncode is an implementation of the gob.GobEncoder interface, which -// allows Values to be included in structures encoded with encoding/gob. -// -// Currently it is not possible to represent values of capsule types in gob, -// because the types themselves cannot be represented. -func (val Value) GobEncode() ([]byte, error) { - if val.IsMarked() { - return nil, errors.New("value is marked") - } - - buf := &bytes.Buffer{} - enc := gob.NewEncoder(buf) - - gv := gobValue{ - Version: 0, - Ty: val.ty, - V: val.v, - } - - err := enc.Encode(gv) - if err != nil { - return nil, fmt.Errorf("error encoding cty.Value: %s", err) - } - - return buf.Bytes(), nil -} - -// GobDecode is an implementation of the gob.GobDecoder interface, which -// inverts the operation performed by GobEncode. See the documentation of -// GobEncode for considerations when using cty.Value instances with gob. -func (val *Value) GobDecode(buf []byte) error { - r := bytes.NewReader(buf) - dec := gob.NewDecoder(r) - - var gv gobValue - err := dec.Decode(&gv) - if err != nil { - return fmt.Errorf("error decoding cty.Value: %s", err) - } - if gv.Version != 0 { - return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) - } - - // Because big.Float.GobEncode is implemented with a pointer reciever, - // gob encoding of an interface{} containing a *big.Float value does not - // round-trip correctly, emerging instead as a non-pointer big.Float. - // The rest of cty expects all number values to be represented by - // *big.Float, so we'll fix that up here. - gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty) - - val.ty = gv.Ty - val.v = gv.V - - return nil -} - -// GobEncode is an implementation of the gob.GobEncoder interface, which -// allows Types to be included in structures encoded with encoding/gob. -// -// Currently it is not possible to represent capsule types in gob. -func (t Type) GobEncode() ([]byte, error) { - buf := &bytes.Buffer{} - enc := gob.NewEncoder(buf) - - gt := gobType{ - Version: 0, - Impl: t.typeImpl, - } - - err := enc.Encode(gt) - if err != nil { - return nil, fmt.Errorf("error encoding cty.Type: %s", err) - } - - return buf.Bytes(), nil -} - -// GobDecode is an implementatino of the gob.GobDecoder interface, which -// reverses the encoding performed by GobEncode to allow types to be recovered -// from gob buffers. -func (t *Type) GobDecode(buf []byte) error { - r := bytes.NewReader(buf) - dec := gob.NewDecoder(r) - - var gt gobType - err := dec.Decode(>) - if err != nil { - return fmt.Errorf("error decoding cty.Type: %s", err) - } - if gt.Version != 0 { - return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) - } - - t.typeImpl = gt.Impl - - return nil -} - -// Capsule types cannot currently be gob-encoded, because they rely on pointer -// equality and we have no way to recover the original pointer on decode. -func (t *capsuleType) GobEncode() ([]byte, error) { - return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) -} - -func (t *capsuleType) GobDecode() ([]byte, error) { - return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) -} - -type gobValue struct { - Version int - Ty Type - V interface{} -} - -type gobType struct { - Version int - Impl typeImpl -} - -type gobCapsuleTypeImpl struct { -} - -// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number -// values through gob: the big.Float.GobEncode method is implemented on a -// pointer receiver, and so it loses the "pointer-ness" of the value on -// encode, causing the values to emerge the other end as big.Float rather than -// *big.Float as we expect elsewhere in cty. -// -// The implementation of gobDecodeFixNumberPtr mutates the given raw value -// during its work, and may either return the same value mutated or a new -// value. Callers must no longer use whatever value they pass as "raw" after -// this function is called. -func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} { - // Unfortunately we need to work recursively here because number values - // might be embedded in structural or collection type values. - - switch { - case ty.Equals(Number): - if bf, ok := raw.(big.Float); ok { - return &bf // wrap in pointer - } - case ty.IsMapType() && ty.ElementType().Equals(Number): - if m, ok := raw.(map[string]interface{}); ok { - for k, v := range m { - m[k] = gobDecodeFixNumberPtr(v, ty.ElementType()) - } - } - case ty.IsListType() && ty.ElementType().Equals(Number): - if s, ok := raw.([]interface{}); ok { - for i, v := range s { - s[i] = gobDecodeFixNumberPtr(v, ty.ElementType()) - } - } - case ty.IsSetType() && ty.ElementType().Equals(Number): - if s, ok := raw.(set.Set); ok { - newS := set.NewSet(s.Rules()) - for it := s.Iterator(); it.Next(); { - newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType()) - newS.Add(newV) - } - return newS - } - case ty.IsObjectType(): - if m, ok := raw.(map[string]interface{}); ok { - for k, v := range m { - aty := ty.AttributeType(k) - m[k] = gobDecodeFixNumberPtr(v, aty) - } - } - case ty.IsTupleType(): - if s, ok := raw.([]interface{}); ok { - for i, v := range s { - ety := ty.TupleElementType(i) - s[i] = gobDecodeFixNumberPtr(v, ety) - } - } - } - - return raw -} - -// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr -// that works with already-constructed values. This is primarily for testing, -// to fix up intentionally-invalid number values for the parts of the test -// code that need them to be valid, such as calling GoString on them. -func gobDecodeFixNumberPtrVal(v Value) Value { - raw := gobDecodeFixNumberPtr(v.v, v.ty) - return Value{ - v: raw, - ty: v.ty, - } -} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go index 94ffd2fb74..98e5ba1a2b 100644 --- a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go @@ -11,7 +11,7 @@ import ( var valueType = reflect.TypeOf(cty.Value{}) var typeType = reflect.TypeOf(cty.Type{}) -var setType = reflect.TypeOf(set.Set{}) +var setType = reflect.TypeOf(set.Set[interface{}]{}) var bigFloatType = reflect.TypeOf(big.Float{}) var bigIntType = reflect.TypeOf(big.Int{}) diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go index ca9de21d2e..6cb308b532 100644 --- a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -268,7 +268,7 @@ func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) } - rawSet := val.Interface().(set.Set) + rawSet := val.Interface().(set.Set[interface{}]) inVals := rawSet.Values() if len(inVals) == 0 { diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go index 1b88e9fa08..c342f13cba 100644 --- a/vendor/github.com/zclconf/go-cty/cty/helper.go +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -8,7 +8,7 @@ import ( // unknowns, for operations that short-circuit to return unknown in that case. func anyUnknown(values ...Value) bool { for _, val := range values { - if val.v == unknown { + if _, unknown := val.v.(*unknownType); unknown { return true } } @@ -39,7 +39,7 @@ func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, e ) } - if val.v == unknown { + if _, unknown := val.v.(*unknownType); unknown { hasUnknown = true } } diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go index 82d36c6282..732c78a80d 100644 --- a/vendor/github.com/zclconf/go-cty/cty/map_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -51,7 +51,7 @@ func (t typeMap) GoString() string { return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) } -// IsMapType returns true if the given type is a list type, regardless of its +// IsMapType returns true if the given type is a map type, regardless of its // element type. func (t Type) IsMapType() bool { _, ok := t.typeImpl.(typeMap) diff --git a/vendor/github.com/zclconf/go-cty/cty/marks.go b/vendor/github.com/zclconf/go-cty/cty/marks.go index b889e73fa6..e747503ea9 100644 --- a/vendor/github.com/zclconf/go-cty/cty/marks.go +++ b/vendor/github.com/zclconf/go-cty/cty/marks.go @@ -190,6 +190,9 @@ func (val Value) HasSameMarks(other Value) bool { // An application that never calls this method does not need to worry about // handling marked values. func (val Value) Mark(mark interface{}) Value { + if _, ok := mark.(ValueMarks); ok { + panic("cannot call Value.Mark with a ValueMarks value (use WithMarks instead)") + } var newMarker marker newMarker.realV = val.v if mr, ok := val.v.(marker); ok { diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go deleted file mode 100644 index 2c4da8b506..0000000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go +++ /dev/null @@ -1,212 +0,0 @@ -package msgpack - -import ( - "bytes" - "math/big" - "sort" - - "github.com/vmihailenco/msgpack/v4" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// Marshal produces a msgpack serialization of the given value that -// can be decoded into the given type later using Unmarshal. -// -// The given value must conform to the given type, or an error will -// be returned. -func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { - errs := val.Type().TestConformance(ty) - if errs != nil { - // Attempt a conversion - var err error - val, err = convert.Convert(val, ty) - if err != nil { - return nil, err - } - } - - // From this point onward, val can be assumed to be conforming to t. - - var path cty.Path - var buf bytes.Buffer - enc := msgpack.NewEncoder(&buf) - enc.UseCompactEncoding(true) - - err := marshal(val, ty, path, enc) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { - if val.IsMarked() { - return path.NewErrorf("value has marks, so it cannot be serialized") - } - - // If we're going to decode as DynamicPseudoType then we need to save - // dynamic type information to recover the real type. - if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { - return marshalDynamic(val, path, enc) - } - - if !val.IsKnown() { - err := enc.Encode(unknownVal) - if err != nil { - return path.NewError(err) - } - return nil - } - if val.IsNull() { - err := enc.EncodeNil() - if err != nil { - return path.NewError(err) - } - return nil - } - - // The caller should've guaranteed that the given val is conformant with - // the given type ty, so we'll proceed under that assumption here. - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - err := enc.EncodeString(val.AsString()) - if err != nil { - return path.NewError(err) - } - return nil - case cty.Number: - var err error - switch { - case val.RawEquals(cty.PositiveInfinity): - err = enc.EncodeFloat64(positiveInfinity) - case val.RawEquals(cty.NegativeInfinity): - err = enc.EncodeFloat64(negativeInfinity) - default: - bf := val.AsBigFloat() - if iv, acc := bf.Int64(); acc == big.Exact { - err = enc.EncodeInt(iv) - } else if fv, acc := bf.Float64(); acc == big.Exact { - err = enc.EncodeFloat64(fv) - } else { - err = enc.EncodeString(bf.Text('f', -1)) - } - } - if err != nil { - return path.NewError(err) - } - return nil - case cty.Bool: - err := enc.EncodeBool(val.True()) - if err != nil { - return path.NewError(err) - } - return nil - default: - panic("unsupported primitive type") - } - case ty.IsListType(), ty.IsSetType(): - enc.EncodeArrayLen(val.LengthInt()) - ety := ty.ElementType() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - for it.Next() { - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - err := marshal(ev, ety, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsMapType(): - enc.EncodeMapLen(val.LengthInt()) - ety := ty.ElementType() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - for it.Next() { - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - var err error - err = marshal(ek, ek.Type(), path, enc) - if err != nil { - return err - } - err = marshal(ev, ety, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsTupleType(): - etys := ty.TupleElementTypes() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - i := 0 - enc.EncodeArrayLen(len(etys)) - for it.Next() { - ety := etys[i] - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - err := marshal(ev, ety, path, enc) - if err != nil { - return err - } - i++ - } - return nil - case ty.IsObjectType(): - atys := ty.AttributeTypes() - path := append(path, nil) // local override of 'path' with extra element - - names := make([]string, 0, len(atys)) - for k := range atys { - names = append(names, k) - } - sort.Strings(names) - - enc.EncodeMapLen(len(names)) - - for _, k := range names { - aty := atys[k] - av := val.GetAttr(k) - path[len(path)-1] = cty.GetAttrStep{ - Name: k, - } - var err error - err = marshal(cty.StringVal(k), cty.String, path, enc) - if err != nil { - return err - } - err = marshal(av, aty, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsCapsuleType(): - return path.NewErrorf("capsule types not supported for msgpack encoding") - default: - // should never happen - return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) - } -} - -// marshalDynamic adds an extra wrapping object containing dynamic type -// information for the given value. -func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { - dv := dynamicVal{ - Value: val, - Path: path, - } - return enc.Encode(&dv) -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go deleted file mode 100644 index a169f28f80..0000000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go +++ /dev/null @@ -1,167 +0,0 @@ -package msgpack - -import ( - "bytes" - "fmt" - "io" - - "github.com/vmihailenco/msgpack/v4" - msgpackcodes "github.com/vmihailenco/msgpack/v4/codes" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty Type implied by the structure of the given -// msgpack-compliant buffer. This function implements the default type mapping -// behavior used when decoding arbitrary msgpack without explicit cty Type -// information. -// -// The rules are as follows: -// -// msgpack strings, numbers and bools map to their equivalent primitive type in -// cty. -// -// msgpack maps become cty object types, with the attributes defined by the -// map keys and the types of their values. -// -// msgpack arrays become cty tuple types, with the elements defined by the -// types of the array members. -// -// Any nulls are typed as DynamicPseudoType, so callers of this function -// must be prepared to deal with this. Callers that do not wish to deal with -// dynamic typing should not use this function and should instead describe -// their required types explicitly with a cty.Type instance when decoding. -// -// Any unknown values are similarly typed as DynamicPseudoType, because these -// do not carry type information on the wire. -// -// Any parse errors will be returned as an error, and the type will be the -// invalid value cty.NilType. -func ImpliedType(buf []byte) (cty.Type, error) { - r := bytes.NewReader(buf) - dec := msgpack.NewDecoder(r) - - ty, err := impliedType(dec) - if err != nil { - return cty.NilType, err - } - - // We must now be at the end of the buffer - err = dec.Skip() - if err != io.EOF { - return ty, fmt.Errorf("extra bytes after msgpack value") - } - - return ty, nil -} - -func impliedType(dec *msgpack.Decoder) (cty.Type, error) { - // If this function returns with a nil error then it must have already - // consumed the next value from the decoder, since when called recursively - // the caller will be expecting to find a following value here. - - code, err := dec.PeekCode() - if err != nil { - return cty.NilType, err - } - - switch { - - case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): - err := dec.Skip() - return cty.DynamicPseudoType, err - - case code == msgpackcodes.True || code == msgpackcodes.False: - _, err := dec.DecodeBool() - return cty.Bool, err - - case msgpackcodes.IsFixedNum(code): - _, err := dec.DecodeInt64() - return cty.Number, err - - case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: - _, err := dec.DecodeInt64() - return cty.Number, err - - case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: - _, err := dec.DecodeUint64() - return cty.Number, err - - case code == msgpackcodes.Float || code == msgpackcodes.Double: - _, err := dec.DecodeFloat64() - return cty.Number, err - - case msgpackcodes.IsString(code): - _, err := dec.DecodeString() - return cty.String, err - - case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: - return impliedObjectType(dec) - - case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: - return impliedTupleType(dec) - - default: - return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) - } -} - -func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { - // If we get in here then we've already peeked the next code and know - // it's some sort of map. - l, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicPseudoType, nil - } - - var atys map[string]cty.Type - - for i := 0; i < l; i++ { - // Read the map key first. We require maps to be strings, but msgpack - // doesn't so we're prepared to error here if not. - k, err := dec.DecodeString() - if err != nil { - return cty.DynamicPseudoType, err - } - - aty, err := impliedType(dec) - if err != nil { - return cty.DynamicPseudoType, err - } - - if atys == nil { - atys = make(map[string]cty.Type) - } - atys[k] = aty - } - - if len(atys) == 0 { - return cty.EmptyObject, nil - } - - return cty.Object(atys), nil -} - -func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { - // If we get in here then we've already peeked the next code and know - // it's some sort of array. - l, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicPseudoType, nil - } - - if l == 0 { - return cty.EmptyTuple, nil - } - - etys := make([]cty.Type, l) - - for i := 0; i < l; i++ { - ety, err := impliedType(dec) - if err != nil { - return cty.DynamicPseudoType, err - } - etys[i] = ety - } - - return cty.Tuple(etys), nil -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go deleted file mode 100644 index 1ea0b0a2e3..0000000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go +++ /dev/null @@ -1,334 +0,0 @@ -package msgpack - -import ( - "bytes" - - "github.com/vmihailenco/msgpack/v4" - msgpackCodes "github.com/vmihailenco/msgpack/v4/codes" - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of -// the given type, returning the result. -// -// If an error is returned, the error is written with a hypothetical -// end-user that wrote the msgpack file as its audience, using cty type -// system concepts rather than Go type system concepts. -func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { - r := bytes.NewReader(b) - dec := msgpack.NewDecoder(r) - - var path cty.Path - return unmarshal(dec, ty, path) -} - -func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { - peek, err := dec.PeekCode() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - if msgpackCodes.IsExt(peek) { - // We just assume _all_ extensions are unknown values, - // since we don't have any other extensions. - dec.Skip() // skip what we've peeked - return cty.UnknownVal(ty), nil - } - if ty == cty.DynamicPseudoType { - return unmarshalDynamic(dec, path) - } - if peek == msgpackCodes.Nil { - dec.Skip() // skip what we've peeked - return cty.NullVal(ty), nil - } - - switch { - case ty.IsPrimitiveType(): - val, err := unmarshalPrimitive(dec, ty, path) - if err != nil { - return cty.NilVal, err - } - return val, nil - case ty.IsListType(): - return unmarshalList(dec, ty.ElementType(), path) - case ty.IsSetType(): - return unmarshalSet(dec, ty.ElementType(), path) - case ty.IsMapType(): - return unmarshalMap(dec, ty.ElementType(), path) - case ty.IsTupleType(): - return unmarshalTuple(dec, ty.TupleElementTypes(), path) - case ty.IsObjectType(): - return unmarshalObject(dec, ty.AttributeTypes(), path) - default: - return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) - } -} - -func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { - switch ty { - case cty.Bool: - rv, err := dec.DecodeBool() - if err != nil { - return cty.DynamicVal, path.NewErrorf("bool is required") - } - return cty.BoolVal(rv), nil - case cty.Number: - // Marshal will try int and float first, if the value can be - // losslessly represented in these encodings, and then fall - // back on a string if the number is too large or too precise. - peek, err := dec.PeekCode() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - - if msgpackCodes.IsFixedNum(peek) { - rv, err := dec.DecodeInt64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberIntVal(rv), nil - } - - switch peek { - case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: - rv, err := dec.DecodeInt64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberIntVal(rv), nil - case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: - rv, err := dec.DecodeUint64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberUIntVal(rv), nil - case msgpackCodes.Float, msgpackCodes.Double: - rv, err := dec.DecodeFloat64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberFloatVal(rv), nil - default: - rv, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - v, err := cty.ParseNumberVal(rv) - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return v, nil - } - case cty.String: - rv, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path.NewErrorf("string is required") - } - return cty.StringVal(rv), nil - default: - // should never happen - panic("unsupported primitive type") - } -} - -func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a list is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.List(ety)), nil - case length == 0: - return cty.ListValEmpty(ety), nil - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.ListVal(vals), nil -} - -func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a set is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Set(ety)), nil - case length == 0: - return cty.SetValEmpty(ety), nil - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.SetVal(vals), nil -} - -func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a map is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Map(ety)), nil - case length == 0: - return cty.MapValEmpty(ety), nil - } - - vals := make(map[string]cty.Value, length) - path = append(path, nil) - for i := 0; i < length; i++ { - key, err := dec.DecodeString() - if err != nil { - path[:len(path)-1].NewErrorf("non-string key in map") - } - - path[len(path)-1] = cty.IndexStep{ - Key: cty.StringVal(key), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals[key] = val - } - - return cty.MapVal(vals), nil -} - -func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a tuple is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Tuple(etys)), nil - case length == 0: - return cty.TupleVal(nil), nil - case length != len(etys): - return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - ety := etys[i] - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.TupleVal(vals), nil -} - -func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("an object is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Object(atys)), nil - case length == 0: - return cty.ObjectVal(nil), nil - case length != len(atys): - return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", - len(atys), length) - } - - vals := make(map[string]cty.Value, length) - path = append(path, nil) - for i := 0; i < length; i++ { - key, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") - } - - path[len(path)-1] = cty.IndexStep{ - Key: cty.StringVal(key), - } - aty, exists := atys[key] - if !exists { - return cty.DynamicVal, path.NewErrorf("unsupported attribute") - } - - val, err := unmarshal(dec, aty, path) - if err != nil { - return cty.DynamicVal, err - } - - vals[key] = val - } - - return cty.ObjectVal(vals), nil -} - -func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - - switch { - case length == -1: - return cty.NullVal(cty.DynamicPseudoType), nil - case length != 2: - return cty.DynamicVal, path.NewErrorf( - "dynamic value array must have exactly two elements", - ) - } - - typeJSON, err := dec.DecodeBytes() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - var ty cty.Type - err = (&ty).UnmarshalJSON(typeJSON) - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - - return unmarshal(dec, ty, path) -} diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go index 636e68c63d..4995a8c7bf 100644 --- a/vendor/github.com/zclconf/go-cty/cty/path.go +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -225,7 +225,9 @@ func (s IndexStep) Apply(val Value) (Value, error) { return NilVal, errors.New("key value not number or string") } - has := val.HasIndex(s.Key) + // This value needs to be stripped of marks to check True(), but Index will + // apply the correct marks for the result. + has, _ := val.HasIndex(s.Key).Unmark() if !has.IsKnown() { return UnknownVal(val.Type().ElementType()), nil } diff --git a/vendor/github.com/zclconf/go-cty/cty/path_set.go b/vendor/github.com/zclconf/go-cty/cty/path_set.go index 1960c01e97..3ebfdc3859 100644 --- a/vendor/github.com/zclconf/go-cty/cty/path_set.go +++ b/vendor/github.com/zclconf/go-cty/cty/path_set.go @@ -11,14 +11,14 @@ import ( // to talk about a subset of paths within a value that meet some criteria, // without directly modifying the values at those paths. type PathSet struct { - set set.Set + set set.Set[Path] } // NewPathSet creates and returns a PathSet, with initial contents optionally // set by the given arguments. func NewPathSet(paths ...Path) PathSet { ret := PathSet{ - set: set.NewSet(pathSetRules{}), + set: set.NewSet(set.Rules[Path](pathSetRules{})), } for _, path := range paths { @@ -61,7 +61,7 @@ func (s PathSet) List() []Path { } ret := make([]Path, 0, s.set.Length()) for it := s.set.Iterator(); it.Next(); { - ret = append(ret, it.Value().(Path)) + ret = append(ret, it.Value()) } return ret } @@ -134,8 +134,7 @@ var indexStepPlaceholder = []byte("#") type pathSetRules struct { } -func (r pathSetRules) Hash(v interface{}) int { - path := v.(Path) +func (r pathSetRules) Hash(path Path) int { hash := crc64.New(crc64Table) for _, rawStep := range path { @@ -159,10 +158,7 @@ func (r pathSetRules) Hash(v interface{}) int { return int(hash.Sum64()) } -func (r pathSetRules) Equivalent(a, b interface{}) bool { - aPath := a.(Path) - bPath := b.(Path) - +func (r pathSetRules) Equivalent(aPath, bPath Path) bool { if len(aPath) != len(bPath) { return false } @@ -198,7 +194,7 @@ func (r pathSetRules) Equivalent(a, b interface{}) bool { } // SameRules is true if both Rules instances are pathSetRules structs. -func (r pathSetRules) SameRules(other set.Rules) bool { +func (r pathSetRules) SameRules(other set.Rules[Path]) bool { _, ok := other.(pathSetRules) return ok } diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go index 7b3d1196cd..3ce2540bb6 100644 --- a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -52,6 +52,53 @@ func (t primitiveType) GoString() string { } } +// rawNumberEqual is our cty-specific definition of whether two big floats +// underlying cty.Number are "equal" for the purposes of the Value.Equals and +// Value.RawEquals methods. +// +// The built-in equality for big.Float is a direct comparison of the mantissa +// bits and the exponent, but that's too precise a check for cty because we +// routinely send numbers through decimal approximations and back and so +// we only promise to accurately represent the subset of binary floating point +// numbers that can be derived from a decimal string representation. +// +// In respect of the fact that cty only tries to preserve numbers that can +// reasonably be written in JSON documents, we use the string representation of +// a decimal approximation of the number as our comparison, relying on the +// big.Float type's heuristic for discarding extraneous mantissa bits that seem +// likely to only be there as a result of an earlier decimal-to-binary +// approximation during parsing, e.g. in ParseNumberVal. +func rawNumberEqual(a, b *big.Float) bool { + switch { + case (a == nil) != (b == nil): + return false + case a == nil: // b == nil too then, due to previous case + return true + case a.Sign() != b.Sign(): + return false + default: + // This format and precision matches that used by cty/json.Marshal, + // and thus achieves our definition of "two numbers are equal if + // we'd use the same JSON serialization for both of them". + const format = 'f' + const prec = -1 + aStr := a.Text(format, prec) + bStr := b.Text(format, prec) + + // The one exception to our rule about equality-by-stringification is + // negative zero, because we want -0 to always be equal to +0. + const posZero = "0" + const negZero = "-0" + if aStr == negZero { + aStr = posZero + } + if bStr == negZero { + bStr = posZero + } + return aStr == bStr + } +} + // Number is the numeric type. Number values are arbitrary-precision // decimal numbers, which can then be converted into Go's various numeric // types only if they are in the appropriate range. diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go index 4a60494f9d..60825b0c2d 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go +++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -1,15 +1,15 @@ package set -type Iterator struct { - vals []interface{} +type Iterator[T any] struct { + vals []T idx int } -func (it *Iterator) Value() interface{} { +func (it *Iterator[T]) Value() T { return it.vals[it.idx] } -func (it *Iterator) Next() bool { +func (it *Iterator[T]) Next() bool { it.idx++ return it.idx < len(it.vals) } diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go index fd1555f218..ffd950ac6b 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set/ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -7,10 +7,10 @@ import ( // Add inserts the given value into the receiving Set. // // This mutates the set in-place. This operation is not thread-safe. -func (s Set) Add(val interface{}) { +func (s Set[T]) Add(val T) { hv := s.rules.Hash(val) if _, ok := s.vals[hv]; !ok { - s.vals[hv] = make([]interface{}, 0, 1) + s.vals[hv] = make([]T, 0, 1) } bucket := s.vals[hv] @@ -26,7 +26,7 @@ func (s Set) Add(val interface{}) { // Remove deletes the given value from the receiving set, if indeed it was // there in the first place. If the value is not present, this is a no-op. -func (s Set) Remove(val interface{}) { +func (s Set[T]) Remove(val T) { hv := s.rules.Hash(val) bucket, ok := s.vals[hv] if !ok { @@ -35,7 +35,7 @@ func (s Set) Remove(val interface{}) { for i, ev := range bucket { if s.rules.Equivalent(val, ev) { - newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket := make([]T, 0, len(bucket)-1) newBucket = append(newBucket, bucket[:i]...) newBucket = append(newBucket, bucket[i+1:]...) if len(newBucket) > 0 { @@ -50,7 +50,7 @@ func (s Set) Remove(val interface{}) { // Has returns true if the given value is in the receiving set, or false if // it is not. -func (s Set) Has(val interface{}) bool { +func (s Set[T]) Has(val T) bool { hv := s.rules.Hash(val) bucket, ok := s.vals[hv] if !ok { @@ -67,7 +67,7 @@ func (s Set) Has(val interface{}) bool { // Copy performs a shallow copy of the receiving set, returning a new set // with the same rules and elements. -func (s Set) Copy() Set { +func (s Set[T]) Copy() Set[T] { ret := NewSet(s.rules) for k, v := range s.vals { ret.vals[k] = v @@ -92,10 +92,10 @@ func (s Set) Copy() Set { // // Once an iterator has been created for a set, the set *must not* be mutated // until the iterator is no longer in use. -func (s Set) Iterator() *Iterator { +func (s Set[T]) Iterator() *Iterator[T] { vals := s.Values() - return &Iterator{ + return &Iterator[T]{ vals: vals, idx: -1, } @@ -103,7 +103,7 @@ func (s Set) Iterator() *Iterator { // EachValue calls the given callback once for each value in the set, in an // undefined order that callers should not depend on. -func (s Set) EachValue(cb func(interface{})) { +func (s Set[T]) EachValue(cb func(T)) { it := s.Iterator() for it.Next() { cb(it.Value()) @@ -114,8 +114,8 @@ func (s Set) EachValue(cb func(interface{})) { // an order then the result is in that order. If no order is provided or if // it is not a total order then the result order is undefined, but consistent // for a particular set value within a specific release of cty. -func (s Set) Values() []interface{} { - var ret []interface{} +func (s Set[T]) Values() []T { + var ret []T // Sort the bucketIds to ensure that we always traverse in a // consistent order. bucketIDs := make([]int, 0, len(s.vals)) @@ -128,7 +128,7 @@ func (s Set) Values() []interface{} { ret = append(ret, s.vals[bucketID]...) } - if orderRules, ok := s.rules.(OrderedRules); ok { + if orderRules, ok := s.rules.(OrderedRules[T]); ok { sort.SliceStable(ret, func(i, j int) bool { return orderRules.Less(ret[i], ret[j]) }) @@ -138,7 +138,7 @@ func (s Set) Values() []interface{} { } // Length returns the number of values in the set. -func (s Set) Length() int { +func (s Set[T]) Length() int { var count int for _, bucket := range s.vals { count = count + len(bucket) @@ -149,13 +149,13 @@ func (s Set) Length() int { // Union returns a new set that contains all of the members of both the // receiving set and the given set. Both sets must have the same rules, or // else this function will panic. -func (s1 Set) Union(s2 Set) Set { +func (s1 Set[T]) Union(s2 Set[T]) Set[T] { mustHaveSameRules(s1, s2) rs := NewSet(s1.rules) - s1.EachValue(func(v interface{}) { + s1.EachValue(func(v T) { rs.Add(v) }) - s2.EachValue(func(v interface{}) { + s2.EachValue(func(v T) { rs.Add(v) }) return rs @@ -164,10 +164,10 @@ func (s1 Set) Union(s2 Set) Set { // Intersection returns a new set that contains the values that both the // receiver and given sets have in common. Both sets must have the same rules, // or else this function will panic. -func (s1 Set) Intersection(s2 Set) Set { +func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] { mustHaveSameRules(s1, s2) rs := NewSet(s1.rules) - s1.EachValue(func(v interface{}) { + s1.EachValue(func(v T) { if s2.Has(v) { rs.Add(v) } @@ -178,10 +178,10 @@ func (s1 Set) Intersection(s2 Set) Set { // Subtract returns a new set that contains all of the values from the receiver // that are not also in the given set. Both sets must have the same rules, // or else this function will panic. -func (s1 Set) Subtract(s2 Set) Set { +func (s1 Set[T]) Subtract(s2 Set[T]) Set[T] { mustHaveSameRules(s1, s2) rs := NewSet(s1.rules) - s1.EachValue(func(v interface{}) { + s1.EachValue(func(v T) { if !s2.Has(v) { rs.Add(v) } @@ -193,15 +193,15 @@ func (s1 Set) Subtract(s2 Set) Set { // both the receiver and given sets, except those that both sets have in // common. Both sets must have the same rules, or else this function will // panic. -func (s1 Set) SymmetricDifference(s2 Set) Set { +func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] { mustHaveSameRules(s1, s2) rs := NewSet(s1.rules) - s1.EachValue(func(v interface{}) { + s1.EachValue(func(v T) { if !s2.Has(v) { rs.Add(v) } }) - s2.EachValue(func(v interface{}) { + s2.EachValue(func(v T) { if !s1.Has(v) { rs.Add(v) } diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go index 03ecd25b97..da4c768473 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set/rules.go +++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -4,13 +4,13 @@ package set // // Each Set has a Rules instance, whose methods must satisfy the interface // contracts given below for any value that will be added to the set. -type Rules interface { +type Rules[T any] interface { // Hash returns an int that somewhat-uniquely identifies the given value. // // A good hash function will minimize collisions for values that will be // added to the set, though collisions *are* permitted. Collisions will // simply reduce the efficiency of operations on the set. - Hash(interface{}) int + Hash(T) int // Equivalent returns true if and only if the two values are considered // equivalent for the sake of set membership. Two values that are @@ -21,11 +21,11 @@ type Rules interface { // Two values that are equivalent *must* result in the same hash value, // though it is *not* required that two values with the same hash value // be equivalent. - Equivalent(interface{}, interface{}) bool + Equivalent(T, T) bool // SameRules returns true if the instance is equivalent to another Rules - // instance. - SameRules(Rules) bool + // instance over the same element type. + SameRules(Rules[T]) bool } // OrderedRules is an extension of Rules that can apply a partial order to @@ -37,8 +37,8 @@ type Rules interface { // is undefined but consistent for a particular version of cty. The exact // order in that case is not part of the contract and is subject to change // between versions. -type OrderedRules interface { - Rules +type OrderedRules[T any] interface { + Rules[T] // Less returns true if and only if the first argument should sort before // the second argument. If the second argument should sort before the first diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go index 15a76638f5..761b0ffe9f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set/set.go +++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -19,20 +19,20 @@ import ( // Set operations are not optimized to minimize memory pressure. Mutating // a set will generally create garbage and so should perhaps be avoided in // tight loops where memory pressure is a concern. -type Set struct { - vals map[int][]interface{} - rules Rules +type Set[T any] struct { + vals map[int][]T + rules Rules[T] } // NewSet returns an empty set with the membership rules given. -func NewSet(rules Rules) Set { - return Set{ - vals: map[int][]interface{}{}, +func NewSet[T any](rules Rules[T]) Set[T] { + return Set[T]{ + vals: map[int][]T{}, rules: rules, } } -func NewSetFromSlice(rules Rules, vals []interface{}) Set { +func NewSetFromSlice[T any](rules Rules[T], vals []T) Set[T] { s := NewSet(rules) for _, v := range vals { s.Add(v) @@ -40,11 +40,11 @@ func NewSetFromSlice(rules Rules, vals []interface{}) Set { return s } -func sameRules(s1 Set, s2 Set) bool { +func sameRules[T any](s1 Set[T], s2 Set[T]) bool { return s1.rules.SameRules(s2.rules) } -func mustHaveSameRules(s1 Set, s2 Set) { +func mustHaveSameRules[T any](s1 Set[T], s2 Set[T]) { if !sameRules(s1, s2) { panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) } @@ -52,11 +52,11 @@ func mustHaveSameRules(s1 Set, s2 Set) { // HasRules returns true if and only if the receiving set has the given rules // instance as its rules. -func (s Set) HasRules(rules Rules) bool { +func (s Set[T]) HasRules(rules Rules[T]) bool { return s.rules.SameRules(rules) } // Rules returns the receiving set's rules instance. -func (s Set) Rules() Rules { +func (s Set[T]) Rules() Rules[T] { return s.rules } diff --git a/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/vendor/github.com/zclconf/go-cty/cty/set_helper.go index 962bb52951..5d39805ba4 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set_helper.go +++ b/vendor/github.com/zclconf/go-cty/cty/set_helper.go @@ -21,15 +21,15 @@ type ValueSet struct { // ValueSet is just a thin wrapper around a set.Set with our value-oriented // "rules" applied. We do this so that the caller can work in terms of // cty.Value objects even though the set internals use the raw values. - s set.Set + s set.Set[interface{}] } // NewValueSet creates and returns a new ValueSet with the given element type. func NewValueSet(ety Type) ValueSet { - return newValueSet(set.NewSet(setRules{Type: ety})) + return newValueSet(set.NewSet(newSetRules(ety))) } -func newValueSet(s set.Set) ValueSet { +func newValueSet(s set.Set[interface{}]) ValueSet { return ValueSet{ s: s, } diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go index 2b8af1e217..7b3d425034 100644 --- a/vendor/github.com/zclconf/go-cty/cty/set_internals.go +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -21,7 +21,11 @@ type setRules struct { Type Type } -var _ set.OrderedRules = setRules{} +var _ set.OrderedRules[interface{}] = setRules{} + +func newSetRules(ety Type) set.Rules[interface{}] { + return setRules{ety} +} // Hash returns a hash value for the receiver that can be used for equality // checks where some inaccuracy is tolerable. @@ -67,7 +71,7 @@ func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { // SameRules is only true if the other Rules instance is also a setRules struct, // and the types are considered equal. -func (r setRules) SameRules(other set.Rules) bool { +func (r setRules) SameRules(other set.Rules[interface{}]) bool { rules, ok := other.(setRules) if !ok { return false @@ -250,6 +254,25 @@ func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) { return } + if val.ty.IsCapsuleType() { + buf.WriteRune('«') + ops := val.ty.CapsuleOps() + if ops != nil && ops.HashKey != nil { + key := ops.HashKey(val.EncapsulatedValue()) + buf.WriteString(fmt.Sprintf("%q", key)) + } else { + // If there isn't an explicit hash implementation then we'll + // just generate the same hash value for every value of this + // type, which is logically fine but less efficient for + // larger sets because we'll have to bucket all values + // together and scan over them with Equals to determine + // set membership. + buf.WriteRune('?') + } + buf.WriteRune('»') + return + } + // should never get down here - panic("unsupported type in set hash") + panic(fmt.Sprintf("unsupported type %#v in set hash", val.ty)) } diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go index 83893c0237..b3aefa4503 100644 --- a/vendor/github.com/zclconf/go-cty/cty/unknown.go +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -3,11 +3,19 @@ package cty // unknownType is the placeholder type used for the sigil value representing // "Unknown", to make it unambigiously distinct from any other possible value. type unknownType struct { + // refinement is an optional object which, if present, describes some + // additional constraints we know about the range of real values this + // unknown value could be a placeholder for. + refinement unknownValRefinement } -// unknown is a special value that can be used as the internal value of a -// Value to create a placeholder for a value that isn't yet known. -var unknown interface{} = &unknownType{} +// totallyUnknown is the representation a a value we know nothing about at +// all. Subsequent refinements of an unknown value will cause creation of +// other values of unknownType that can represent additional constraints +// on the unknown value, but all unknown values start as totally unknown +// and we will also typically lose all unknown value refinements when +// round-tripping through serialization formats. +var totallyUnknown interface{} = &unknownType{} // UnknownVal returns an Value that represents an unknown value of the given // type. Unknown values can be used to represent a value that is @@ -19,7 +27,7 @@ var unknown interface{} = &unknownType{} func UnknownVal(t Type) Value { return Value{ ty: t, - v: unknown, + v: totallyUnknown, } } @@ -80,6 +88,6 @@ func init() { } DynamicVal = Value{ ty: DynamicPseudoType, - v: unknown, + v: totallyUnknown, } } diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown_refinement.go b/vendor/github.com/zclconf/go-cty/cty/unknown_refinement.go new file mode 100644 index 0000000000..85fb28d632 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown_refinement.go @@ -0,0 +1,788 @@ +package cty + +import ( + "fmt" + "math" + "strings" + + "github.com/zclconf/go-cty/cty/ctystrings" +) + +// Refine creates a [RefinementBuilder] with which to annotate the reciever +// with zero or more additional refinements that constrain the range of +// the value. +// +// Calling methods on a RefinementBuilder for a known value essentially just +// serves as assertions about the range of that value, leading to panics if +// those assertions don't hold in practice. This is mainly supported just to +// make programs that rely on refinements automatically self-check by using +// the refinement codepath unconditionally on both placeholders and final +// values for those placeholders. It's always a bug to refine the range of +// an unknown value and then later substitute an exact value outside of the +// refined range. +// +// Calling methods on a RefinementBuilder for an unknown value is perhaps +// more useful because the newly-refined value will then be a placeholder for +// a smaller range of values and so it may be possible for other operations +// on the unknown value to return a known result despite the exact value not +// yet being known. +// +// It is never valid to refine [DynamicVal], because that value is a +// placeholder for a value about which we knkow absolutely nothing. A value +// must at least have a known root type before it can support further +// refinement. +func (v Value) Refine() *RefinementBuilder { + v, marks := v.Unmark() + if unk, isUnk := v.v.(*unknownType); isUnk && unk.refinement != nil { + // We're refining a value that's already been refined before, so + // we'll start from a copy of its existing refinements. + wip := unk.refinement.copy() + return &RefinementBuilder{v, marks, wip} + } + + ty := v.Type() + var wip unknownValRefinement + switch { + case ty == DynamicPseudoType && !v.IsKnown(): + // This case specifically matches DynamicVal, which is constrained + // by backward compatibility to be a singleton and so we cannot allow + // any refinements to it. + // To preserve the typical assumption that DynamicVal is a safe + // placeholder to use when no value is known at all, we silently + // ignore all attempts to refine this particular value and just + // always echo back a totally-unrefined DynamicVal. + return &RefinementBuilder{ + orig: DynamicVal, + marks: marks, + } + case ty == String: + wip = &refinementString{} + case ty == Number: + wip = &refinementNumber{} + case ty.IsCollectionType(): + wip = &refinementCollection{ + // A collection can never have a negative length, so we'll + // start with that already constrained. + minLen: 0, + maxLen: math.MaxInt, + } + case ty == Bool || ty.IsObjectType() || ty.IsTupleType() || ty.IsCapsuleType(): + // For other known types we'll just track nullability + wip = &refinementNullable{} + case ty == DynamicPseudoType && v.IsNull(): + // It's okay in principle to refine a null value of unknown type, + // although all we can refine about it is that it's definitely null and + // so this is pretty pointless and only supported to avoid callers + // always needing to treat this situation as a special case to avoid + // panic. + wip = &refinementNullable{ + isNull: tristateTrue, + } + default: + // we leave "wip" as nil for all other types, representing that + // they don't support refinements at all and so any call on the + // RefinementBuilder should fail. + + // NOTE: We intentionally don't allow any refinements for + // cty.DynamicVal here, even though it could be nice in principle + // to at least track non-nullness for those, because it's historically + // been valid to directly compare values with cty.DynamicVal using + // the Go "==" operator and recording a refinement for an untyped + // unknown value would break existing code relying on that. + } + + return &RefinementBuilder{v, marks, wip} +} + +// RefineWith is a variant of Refine which uses callback functions instead of +// the builder pattern. +// +// The result is equivalent to passing the return value of [Value.Refine] to the +// first callback, and then continue passing the builder through any other +// callbacks in turn, and then calling [RefinementBuilder.NewValue] on the +// final result. +// +// The builder pattern approach of [Value.Refine] is more convenient for inline +// annotation of refinements when constructing a value, but this alternative +// approach may be more convenient when applying pre-defined collections of +// refinements, or when refinements are defined separately from the values +// they will apply to. +// +// Each refiner callback should return the same pointer that it was given, +// typically after having mutated it using the [RefinementBuilder] methods. +// It's invalid to return a different builder. +func (v Value) RefineWith(refiners ...func(*RefinementBuilder) *RefinementBuilder) Value { + if len(refiners) == 0 { + return v + } + origBuilder := v.Refine() + builder := origBuilder + for _, refiner := range refiners { + builder = refiner(builder) + if builder != origBuilder { + panic("refiner callback returned a different builder") + } + } + return builder.NewValue() +} + +// RefineNotNull is a shorthand for Value.Refine().NotNull().NewValue(), because +// declaring that a unknown value isn't null is by far the most common use of +// refinements. +func (v Value) RefineNotNull() Value { + return v.Refine().NotNull().NewValue() +} + +// RefinementBuilder is a supporting type for the [Value.Refine] method, +// using the builder pattern to apply zero or more constraints before +// constructing a new value with all of those constraints applied. +// +// Most of the methods of this type return the same reciever to allow +// for method call chaining. End call chains with a call to +// [RefinementBuilder.NewValue] to obtain the newly-refined value. +type RefinementBuilder struct { + orig Value + marks ValueMarks + wip unknownValRefinement +} + +// refineable is an internal detail to help with two special situations +// related to refinements: +// - If the refinement is to a value of a type that doesn't support any +// refinements at all, this function will immediately panic with a +// message reporting that, because it's a caller bug to try to refine +// a value in a way that's inappropriate for its known type. +// - If the refinement is to an unknown value of an unknown type +// (i.e. cty.DynamicVal) then it returns false, indicating that the +// caller should just silently ignore whatever refinement was requested. +// - In all other cases this function returns true, which means the direct +// caller should attempt to apply the requested refinement, and then +// panic itself if the requested refinement doesn't make sense for the +// specific value being refined. +func (b *RefinementBuilder) refineable() bool { + if b.orig == DynamicVal { + return false + } + if b.wip == nil { + panic(fmt.Sprintf("cannot refine a %#v value", b.orig.Type())) + } + return true +} + +// NotNull constrains the value as definitely not being null. +// +// NotNull is valid when refining values of the following types: +// - number, boolean, and string values +// - list, set, or map types of any element type +// - values of object types +// - values of collection types +// - values of capsule types +// +// When refining any other type this function will panic. +// +// In particular note that it is not valid to constrain an untyped value +// -- a value whose type is `cty.DynamicPseudoType` -- as being non-null. +// An unknown value of an unknown type is always completely unconstrained. +func (b *RefinementBuilder) NotNull() *RefinementBuilder { + if !b.refineable() { + return b + } + + if b.orig.IsKnown() && b.orig.IsNull() { + panic("refining null value as non-null") + } + if b.wip.null() == tristateTrue { + panic("refining null value as non-null") + } + + b.wip.setNull(tristateFalse) + + return b +} + +// Null constrains the value as definitely null. +// +// Null is valid for the same types as [RefinementBuilder.NotNull]. +// When refining any other type this function will panic. +// +// Explicitly cnstraining a value to be null is strange because that suggests +// that the caller does actually know the value -- there is only one null +// value for each type constraint -- but this is here for symmetry with the +// fact that a [ValueRange] can also represent that a value is definitely null. +func (b *RefinementBuilder) Null() *RefinementBuilder { + if !b.refineable() { + return b + } + + if b.orig.IsKnown() && !b.orig.IsNull() { + panic("refining non-null value as null") + } + if b.wip.null() == tristateFalse { + panic("refining non-null value as null") + } + + b.wip.setNull(tristateTrue) + + return b +} + +// NumericRange constrains the upper and/or lower bounds of a number value, +// or panics if this builder is not refining a number value. +// +// The two given values are interpreted as inclusive bounds and either one +// may be an unknown number if only one of the two bounds is currently known. +// If either of the given values is not a non-null number value then this +// function will panic. +func (b *RefinementBuilder) NumberRangeInclusive(min, max Value) *RefinementBuilder { + return b.NumberRangeLowerBound(min, true).NumberRangeUpperBound(max, true) +} + +// NumberRangeLowerBound constraints the lower bound of a number value, or +// panics if this builder is not refining a number value. +func (b *RefinementBuilder) NumberRangeLowerBound(min Value, inclusive bool) *RefinementBuilder { + if !b.refineable() { + return b + } + + wip, ok := b.wip.(*refinementNumber) + if !ok { + panic(fmt.Sprintf("cannot refine numeric bounds for a %#v value", b.orig.Type())) + } + + if !min.IsKnown() { + // Nothing to do if the lower bound is unknown. + return b + } + if min.IsNull() { + panic("number range lower bound must not be null") + } + + if inclusive { + if gt := min.GreaterThan(b.orig); gt.IsKnown() && gt.True() { + panic(fmt.Sprintf("refining %#v to be >= %#v", b.orig, min)) + } + } else { + if gt := min.GreaterThanOrEqualTo(b.orig); gt.IsKnown() && gt.True() { + panic(fmt.Sprintf("refining %#v to be > %#v", b.orig, min)) + } + } + + if wip.min != NilVal { + var ok Value + if inclusive && !wip.minInc { + ok = min.GreaterThan(wip.min) + } else { + ok = min.GreaterThanOrEqualTo(wip.min) + } + if ok.IsKnown() && ok.False() { + return b // Our existing refinement is more constrained + } + } + + if min != NegativeInfinity { + wip.min = min + wip.minInc = inclusive + } + + wip.assertConsistentBounds() + return b +} + +// NumberRangeUpperBound constraints the upper bound of a number value, or +// panics if this builder is not refining a number value. +func (b *RefinementBuilder) NumberRangeUpperBound(max Value, inclusive bool) *RefinementBuilder { + if !b.refineable() { + return b + } + + wip, ok := b.wip.(*refinementNumber) + if !ok { + panic(fmt.Sprintf("cannot refine numeric bounds for a %#v value", b.orig.Type())) + } + + if !max.IsKnown() { + // Nothing to do if the upper bound is unknown. + return b + } + if max.IsNull() { + panic("number range upper bound must not be null") + } + + if inclusive { + if lt := max.LessThan(b.orig); lt.IsKnown() && lt.True() { + panic(fmt.Sprintf("refining %#v to be <= %#v", b.orig, max)) + } + } else { + if lt := max.LessThanOrEqualTo(b.orig); lt.IsKnown() && lt.True() { + panic(fmt.Sprintf("refining %#v to be < %#v", b.orig, max)) + } + } + + if wip.max != NilVal { + var ok Value + if inclusive && !wip.maxInc { + ok = max.LessThan(wip.max) + } else { + ok = max.LessThanOrEqualTo(wip.max) + } + if ok.IsKnown() && ok.False() { + return b // Our existing refinement is more constrained + } + } + + if max != PositiveInfinity { + wip.max = max + wip.maxInc = inclusive + } + + wip.assertConsistentBounds() + return b +} + +// CollectionLengthLowerBound constrains the lower bound of the length of a +// collection value, or panics if this builder is not refining a collection +// value. +func (b *RefinementBuilder) CollectionLengthLowerBound(min int) *RefinementBuilder { + if !b.refineable() { + return b + } + + wip, ok := b.wip.(*refinementCollection) + if !ok { + panic(fmt.Sprintf("cannot refine collection length bounds for a %#v value", b.orig.Type())) + } + + minVal := NumberIntVal(int64(min)) + if b.orig.IsKnown() { + realLen := b.orig.Length() + if gt := minVal.GreaterThan(realLen); gt.IsKnown() && gt.True() { + panic(fmt.Sprintf("refining collection of length %#v with lower bound %#v", realLen, min)) + } + } + + if wip.minLen > min { + return b // Our existing refinement is more constrained + } + + wip.minLen = min + wip.assertConsistentLengthBounds() + + return b +} + +// CollectionLengthUpperBound constrains the upper bound of the length of a +// collection value, or panics if this builder is not refining a collection +// value. +// +// The upper bound must be a known, non-null number or this function will +// panic. +func (b *RefinementBuilder) CollectionLengthUpperBound(max int) *RefinementBuilder { + if !b.refineable() { + return b + } + + wip, ok := b.wip.(*refinementCollection) + if !ok { + panic(fmt.Sprintf("cannot refine collection length bounds for a %#v value", b.orig.Type())) + } + + if b.orig.IsKnown() { + maxVal := NumberIntVal(int64(max)) + realLen := b.orig.Length() + if lt := maxVal.LessThan(realLen); lt.IsKnown() && lt.True() { + panic(fmt.Sprintf("refining collection of length %#v with upper bound %#v", realLen, max)) + } + } + + if wip.maxLen < max { + return b // Our existing refinement is more constrained + } + + wip.maxLen = max + wip.assertConsistentLengthBounds() + + return b +} + +// CollectionLength is a shorthand for passing the same length to both +// [CollectionLengthLowerBound] and [CollectionLengthUpperBound]. +// +// A collection with a refined length with equal bounds can sometimes collapse +// to a known value. Refining to length zero always produces a known value. +// The behavior for other lengths varies by collection type kind. +// +// If the unknown value is of a set type, it's only valid to use this method +// if the caller knows that there will be the given number of _unique_ values +// in the set. If any values might potentially coalesce together once known, +// use [CollectionLengthUpperBound] instead. +func (b *RefinementBuilder) CollectionLength(length int) *RefinementBuilder { + return b.CollectionLengthLowerBound(length).CollectionLengthUpperBound(length) +} + +// StringPrefix constrains the prefix of a string value, or panics if this +// builder is not refining a string value. +// +// The given prefix will be Unicode normalized in the same way that a +// cty.StringVal would be. +// +// Due to Unicode normalization and grapheme cluster rules, appending new +// characters to a string can change the meaning of earlier characters. +// StringPrefix may discard one or more characters from the end of the given +// prefix to avoid that problem. +// +// Although cty cannot check this automatically, applications should avoid +// relying on the discarding of the suffix for correctness. For example, if the +// prefix ends with an emoji base character then StringPrefix will discard it +// in case subsequent characters include emoji modifiers, but it's still +// incorrect for the final string to use an entirely different base character. +// +// Applications which fully control the final result and can guarantee the +// subsequent characters will not combine with the prefix may be able to use +// [RefinementBuilder.StringPrefixFull] instead, after carefully reviewing +// the constraints described in its documentation. +func (b *RefinementBuilder) StringPrefix(prefix string) *RefinementBuilder { + return b.StringPrefixFull(ctystrings.SafeKnownPrefix(prefix)) +} + +// StringPrefixFull is a variant of StringPrefix that will never shorten the +// given prefix to take into account the possibility of the next character +// combining with the end of the prefix. +// +// Applications which fully control the subsequent characters can use this +// as long as they guarantee that the characters added later cannot possibly +// combine with characters at the end of the prefix to form a single grapheme +// cluster. For example, it would be unsafe to use the full prefix "hello" if +// there is any chance that the final string will add a combining diacritic +// character after the "o", because that would then change the final character. +// +// Use [RefinementBuilder.StringPrefix] instead if an application cannot fully +// control the final result to avoid violating this rule. +func (b *RefinementBuilder) StringPrefixFull(prefix string) *RefinementBuilder { + if !b.refineable() { + return b + } + + wip, ok := b.wip.(*refinementString) + if !ok { + panic(fmt.Sprintf("cannot refine string prefix for a %#v value", b.orig.Type())) + } + + // We must apply the same Unicode processing we'd normally use for a + // cty string so that the prefix will be comparable. + prefix = NormalizeString(prefix) + + // If we have a known string value then the given prefix must actually + // match it. + if b.orig.IsKnown() && !b.orig.IsNull() { + have := b.orig.AsString() + matchLen := len(have) + if l := len(prefix); l < matchLen { + matchLen = l + } + have = have[:matchLen] + new := prefix[:matchLen] + if have != new { + panic("refined prefix is inconsistent with known value") + } + } + + // If we already have a refined prefix then the overlapping parts of that + // and the new prefix must match. + { + matchLen := len(wip.prefix) + if l := len(prefix); l < matchLen { + matchLen = l + } + + have := wip.prefix[:matchLen] + new := prefix[:matchLen] + if have != new { + panic("refined prefix is inconsistent with previous refined prefix") + } + } + + // We'll only save the new prefix if it's longer than the one we already + // had. + if len(prefix) > len(wip.prefix) { + wip.prefix = prefix + } + + return b +} + +// NewValue completes the refinement process by constructing a new value +// that is guaranteed to meet all of the previously-specified refinements. +// +// If the original value being refined was known then the result is exactly +// that value, because otherwise the previous refinement calls would have +// panicked reporting the refinements as invalid for the value. +// +// If the original value was unknown then the result is typically also unknown +// but may have additional refinements compared to the original. If the applied +// refinements have reduced the range to a single exact value then the result +// might be that known value. +func (b *RefinementBuilder) NewValue() (ret Value) { + defer func() { + // Regardless of how we return, the new value should have the same + // marks as our original value. + ret = ret.WithMarks(b.marks) + }() + + if b.orig.IsKnown() || b.orig == DynamicVal { + return b.orig + } + + // We have a few cases where the value has been refined enough that we now + // know exactly what the value is, or at least we can produce a more + // detailed approximation of it. + switch b.wip.null() { + case tristateTrue: + // There is only one null value of each type so this is now known. + return NullVal(b.orig.Type()) + case tristateFalse: + // If we know it's definitely not null then we might have enough + // information to construct a known, non-null value. + if rfn, ok := b.wip.(*refinementNumber); ok { + // If both bounds are inclusive and equal then our value can + // only be the same number as the bounds. + if rfn.maxInc && rfn.minInc { + if rfn.min != NilVal && rfn.max != NilVal { + eq := rfn.min.Equals(rfn.max) + if eq.IsKnown() && eq.True() { + return rfn.min + } + } + } + } else if rfn, ok := b.wip.(*refinementCollection); ok { + // If both of the bounds are equal then we know the length is + // the same number as the bounds. + if rfn.minLen == rfn.maxLen { + knownLen := rfn.minLen + ty := b.orig.Type() + if knownLen == 0 { + // If we know the length is zero then we can construct + // a known value of any collection kind. + switch { + case ty.IsListType(): + return ListValEmpty(ty.ElementType()) + case ty.IsSetType(): + return SetValEmpty(ty.ElementType()) + case ty.IsMapType(): + return MapValEmpty(ty.ElementType()) + } + } else if ty.IsListType() { + // If we know the length of the list then we can + // create a known list with unknown elements instead + // of a wholly-unknown list. + elems := make([]Value, knownLen) + unk := UnknownVal(ty.ElementType()) + for i := range elems { + elems[i] = unk + } + return ListVal(elems) + } else if ty.IsSetType() && knownLen == 1 { + // If we know we have a one-element set then we + // know the one element can't possibly coalesce with + // anything else and so we can create a known set with + // an unknown element. + return SetVal([]Value{UnknownVal(ty.ElementType())}) + } + } + } + } + + return Value{ + ty: b.orig.ty, + v: &unknownType{refinement: b.wip}, + } +} + +// unknownValRefinment is an interface pretending to be a sum type representing +// the different kinds of unknown value refinements we support for different +// types of value. +type unknownValRefinement interface { + unknownValRefinementSigil() + copy() unknownValRefinement + null() tristateBool + setNull(tristateBool) + rawEqual(other unknownValRefinement) bool + GoString() string +} + +type refinementString struct { + refinementNullable + prefix string +} + +func (r *refinementString) unknownValRefinementSigil() {} + +func (r *refinementString) copy() unknownValRefinement { + ret := *r + // Everything in refinementString is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementString) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementString) + if !ok { + return false + } + return (r.refinementNullable.rawEqual(&other.refinementNullable) && + r.prefix == other.prefix) + } +} + +func (r *refinementString) GoString() string { + var b strings.Builder + b.WriteString(r.refinementNullable.GoString()) + if r.prefix != "" { + fmt.Fprintf(&b, ".StringPrefixFull(%q)", r.prefix) + } + return b.String() +} + +type refinementNumber struct { + refinementNullable + min, max Value + minInc, maxInc bool +} + +func (r *refinementNumber) unknownValRefinementSigil() {} + +func (r *refinementNumber) copy() unknownValRefinement { + ret := *r + // Everything in refinementNumber is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementNumber) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementNumber) + if !ok { + return false + } + return (r.refinementNullable.rawEqual(&other.refinementNullable) && + r.min.RawEquals(other.min) && + r.max.RawEquals(other.max) && + r.minInc == other.minInc && + r.maxInc == other.maxInc) + } +} + +func (r *refinementNumber) GoString() string { + var b strings.Builder + b.WriteString(r.refinementNullable.GoString()) + if r.min != NilVal && r.min != NegativeInfinity { + fmt.Fprintf(&b, ".NumberLowerBound(%#v, %t)", r.min, r.minInc) + } + if r.max != NilVal && r.max != PositiveInfinity { + fmt.Fprintf(&b, ".NumberUpperBound(%#v, %t)", r.max, r.maxInc) + } + return b.String() +} + +func (r *refinementNumber) assertConsistentBounds() { + if r.min == NilVal || r.max == NilVal { + return // If only one bound is constrained then there's nothing to be inconsistent with + } + var ok Value + if r.minInc != r.maxInc { + ok = r.min.LessThan(r.max) + } else { + ok = r.min.LessThanOrEqualTo(r.max) + } + if ok.IsKnown() && ok.False() { + panic(fmt.Sprintf("number lower bound %#v is greater than upper bound %#v", r.min, r.max)) + } +} + +type refinementCollection struct { + refinementNullable + minLen, maxLen int +} + +func (r *refinementCollection) unknownValRefinementSigil() {} + +func (r *refinementCollection) copy() unknownValRefinement { + ret := *r + // Everything in refinementCollection is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementCollection) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementCollection) + if !ok { + return false + } + return (r.refinementNullable.rawEqual(&other.refinementNullable) && + r.minLen == other.minLen && + r.maxLen == other.maxLen) + } +} + +func (r *refinementCollection) GoString() string { + var b strings.Builder + b.WriteString(r.refinementNullable.GoString()) + if r.minLen != 0 { + fmt.Fprintf(&b, ".CollectionLengthLowerBound(%d)", r.minLen) + } + if r.maxLen != math.MaxInt { + fmt.Fprintf(&b, ".CollectionLengthUpperBound(%d)", r.maxLen) + } + return b.String() +} + +func (r *refinementCollection) assertConsistentLengthBounds() { + if r.maxLen < r.minLen { + panic(fmt.Sprintf("collection length upper bound %d is less than lower bound %d", r.maxLen, r.minLen)) + } +} + +type refinementNullable struct { + isNull tristateBool +} + +func (r *refinementNullable) unknownValRefinementSigil() {} + +func (r *refinementNullable) copy() unknownValRefinement { + ret := *r + // Everything in refinementJustNull is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementNullable) null() tristateBool { + return r.isNull +} + +func (r *refinementNullable) setNull(v tristateBool) { + r.isNull = v +} + +func (r *refinementNullable) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementNullable) + if !ok { + return false + } + return r.isNull == other.isNull + } +} + +func (r *refinementNullable) GoString() string { + switch r.isNull { + case tristateFalse: + return ".NotNull()" + case tristateTrue: + return ".Null()" + default: + return "" + } +} + +type tristateBool rune + +const tristateTrue tristateBool = 'T' +const tristateFalse tristateBool = 'F' +const tristateUnknown tristateBool = 0 diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go index f6a25ddef9..e5b29b603a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value.go +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -48,7 +48,8 @@ func (val Value) IsKnown() bool { if val.IsMarked() { return val.unmarkForce().IsKnown() } - return val.v != unknown + _, unknown := val.v.(*unknownType) + return !unknown } // IsNull returns true if the value is null. Values of any type can be diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go index 25ee0b678c..a1743a09ef 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_init.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -5,8 +5,7 @@ import ( "math/big" "reflect" - "golang.org/x/text/unicode/norm" - + "github.com/zclconf/go-cty/cty/ctystrings" "github.com/zclconf/go-cty/cty/set" ) @@ -107,7 +106,7 @@ func StringVal(v string) Value { // A return value from this function can be meaningfully compared byte-for-byte // with a Value.AsString result. func NormalizeString(s string) string { - return norm.NFC.String(s) + return ctystrings.Normalize(s) } // ObjectVal returns a Value of an object type whose structure is defined @@ -287,7 +286,7 @@ func SetVal(vals []Value) Value { rawList[i] = val.v } - rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + rawVal := set.NewSetFromSlice(set.Rules[interface{}](setRules{elementType}), rawList) return Value{ ty: Set(elementType), @@ -334,7 +333,7 @@ func SetValFromValueSet(s ValueSet) Value { func SetValEmpty(element Type) Value { return Value{ ty: Set(element), - v: set.NewSet(setRules{element}), + v: set.NewSet(set.Rules[interface{}](setRules{element})), } } diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index 8c37535c88..c4584bd936 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -33,7 +33,17 @@ func (val Value) GoString() string { return "cty.DynamicVal" } if !val.IsKnown() { - return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + rfn := val.v.(*unknownType).refinement + var suffix string + if rfn != nil { + calls := rfn.GoString() + if calls == ".NotNull()" { + suffix = ".RefineNotNull()" + } else { + suffix = ".Refine()" + rfn.GoString() + ".NewValue()" + } + } + return fmt.Sprintf("cty.UnknownVal(%#v)%s", val.ty, suffix) } // By the time we reach here we've dealt with all of the exceptions around @@ -47,6 +57,9 @@ func (val Value) GoString() string { } return "cty.False" case Number: + if f, ok := val.v.(big.Float); ok { + panic(fmt.Sprintf("number value contains big.Float value %s, rather than pointer to big.Float", f.Text('g', -1))) + } fv := val.v.(*big.Float) // We'll try to use NumberIntVal or NumberFloatVal if we can, since // the fully-general initializer call is pretty ugly-looking. @@ -116,19 +129,44 @@ func (val Value) GoString() string { // Use RawEquals to compare if two values are equal *ignoring* the // short-circuit rules and the exception for null values. func (val Value) Equals(other Value) Value { - if val.IsMarked() || other.IsMarked() { - val, valMarks := val.Unmark() - other, otherMarks := other.Unmark() + if val.ContainsMarked() || other.ContainsMarked() { + val, valMarks := val.UnmarkDeep() + other, otherMarks := other.UnmarkDeep() return val.Equals(other).WithMarks(valMarks, otherMarks) } - // Start by handling Unknown values before considering types. - // This needs to be done since Null values are always equal regardless of - // type. + // Some easy cases with comparisons to null. + switch { + case val.IsNull() && definitelyNotNull(other): + return False + case other.IsNull() && definitelyNotNull(val): + return False + } + // If we have one known value and one unknown value then we may be + // able to quickly disqualify equality based on the range of the unknown + // value. + if val.IsKnown() && !other.IsKnown() { + otherRng := other.Range() + if ok := otherRng.Includes(val); ok.IsKnown() && ok.False() { + return False + } + } else if other.IsKnown() && !val.IsKnown() { + valRng := val.Range() + if ok := valRng.Includes(other); ok.IsKnown() && ok.False() { + return False + } + } + + // We need to deal with unknown values before anything else with nulls + // because any unknown value that hasn't yet been refined as non-null + // could become null, and nulls of any types are equal to one another. + unknownResult := func() Value { + return UnknownVal(Bool).Refine().NotNull().NewValue() + } switch { case !val.IsKnown() && !other.IsKnown(): // both unknown - return UnknownVal(Bool) + return unknownResult() case val.IsKnown() && !other.IsKnown(): switch { case val.IsNull(), other.ty.HasDynamicTypes(): @@ -136,13 +174,13 @@ func (val Value) Equals(other Value) Value { // nulls of any type are equal. // An unknown with a dynamic type compares as unknown, which we need // to check before the type comparison below. - return UnknownVal(Bool) + return unknownResult() case !val.ty.Equals(other.ty): // There is no null comparison or dynamic types, so unequal types // will never be equal. return False default: - return UnknownVal(Bool) + return unknownResult() } case other.IsKnown() && !val.IsKnown(): switch { @@ -151,13 +189,13 @@ func (val Value) Equals(other Value) Value { // nulls of any type are equal. // An unknown with a dynamic type compares as unknown, which we need // to check before the type comparison below. - return UnknownVal(Bool) + return unknownResult() case !other.ty.Equals(val.ty): // There's no null comparison or dynamic types, so unequal types // will never be equal. return False default: - return UnknownVal(Bool) + return unknownResult() } } @@ -179,7 +217,7 @@ func (val Value) Equals(other Value) Value { return BoolVal(false) } - return UnknownVal(Bool) + return unknownResult() } if !val.ty.Equals(other.ty) { @@ -191,7 +229,7 @@ func (val Value) Equals(other Value) Value { switch { case ty == Number: - result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + result = rawNumberEqual(val.v.(*big.Float), other.v.(*big.Float)) case ty == Bool: result = val.v.(bool) == other.v.(bool) case ty == String: @@ -213,7 +251,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -234,7 +272,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -256,7 +294,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -265,16 +303,16 @@ func (val Value) Equals(other Value) Value { } } case ty.IsSetType(): - s1 := val.v.(set.Set) - s2 := other.v.(set.Set) + s1 := val.v.(set.Set[interface{}]) + s2 := other.v.(set.Set[interface{}]) equal := true // Two sets are equal if all of their values are known and all values // in one are also in the other. for it := s1.Iterator(); it.Next(); { rv := it.Value() - if rv == unknown { // "unknown" is the internal representation of unknown-ness - return UnknownVal(Bool) + if _, unknown := rv.(*unknownType); unknown { // "*unknownType" is the internal representation of unknown-ness + return unknownResult() } if !s2.Has(rv) { equal = false @@ -282,8 +320,8 @@ func (val Value) Equals(other Value) Value { } for it := s2.Iterator(); it.Next(); { rv := it.Value() - if rv == unknown { // "unknown" is the internal representation of unknown-ness - return UnknownVal(Bool) + if _, unknown := rv.(*unknownType); unknown { // "*unknownType" is the internal representation of unknown-ness + return unknownResult() } if !s1.Has(rv) { equal = false @@ -310,7 +348,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -390,7 +428,17 @@ func (val Value) RawEquals(other Value) bool { other = other.unmarkForce() if (!val.IsKnown()) && (!other.IsKnown()) { - return true + // If either unknown value has refinements then they must match. + valRfn := val.v.(*unknownType).refinement + otherRfn := other.v.(*unknownType).refinement + switch { + case (valRfn == nil) != (otherRfn == nil): + return false + case valRfn != nil: + return valRfn.rawEqual(otherRfn) + default: + return true + } } if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { return false @@ -545,7 +593,8 @@ func (val Value) Add(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Add, val.Range(), other.Range())) + return ret.RefineNotNull() } ret := new(big.Float) @@ -564,7 +613,8 @@ func (val Value) Subtract(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Subtract, val.Range(), other.Range())) + return ret.RefineNotNull() } return val.Add(other.Negate()) @@ -580,7 +630,7 @@ func (val Value) Negate() Value { if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } ret := new(big.Float).Neg(val.v.(*big.Float)) @@ -597,8 +647,14 @@ func (val Value) Multiply(other Value) Value { } if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + // If either value is exactly zero then the result must either be + // zero or an error. + if val == Zero || other == Zero { + return Zero + } shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Multiply, val.Range(), other.Range())) + return ret.RefineNotNull() } // find the larger precision of the arguments @@ -643,7 +699,10 @@ func (val Value) Divide(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + // TODO: We could potentially refine the range of the result here, but + // we don't right now because our division operation is not monotone + // if the denominator could potentially be zero. + return (*shortCircuit).RefineNotNull() } ret := new(big.Float) @@ -675,7 +734,7 @@ func (val Value) Modulo(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } // We cheat a bit here with infinities, just abusing the Multiply operation @@ -713,7 +772,7 @@ func (val Value) Absolute() Value { if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + return (*shortCircuit).Refine().NotNull().NumberRangeInclusive(Zero, UnknownVal(Number)).NewValue() } ret := (&big.Float{}).Abs(val.v.(*big.Float)) @@ -886,23 +945,23 @@ func (val Value) HasIndex(key Value) Value { } if val.ty == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } switch { case val.Type().IsListType(): if key.Type() == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if key.Type() != Number { return False } if !key.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if !val.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } index, accuracy := key.v.(*big.Float).Int64() @@ -913,17 +972,17 @@ func (val Value) HasIndex(key Value) Value { return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) case val.Type().IsMapType(): if key.Type() == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if key.Type() != String { return False } if !key.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if !val.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } keyStr := key.v.(string) @@ -932,14 +991,14 @@ func (val Value) HasIndex(key Value) Value { return BoolVal(exists) case val.Type().IsTupleType(): if key.Type() == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if key.Type() != Number { return False } if !key.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } index, accuracy := key.v.(*big.Float).Int64() @@ -974,16 +1033,16 @@ func (val Value) HasElement(elem Value) Value { panic("not a set type") } if !val.IsKnown() || !elem.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if val.IsNull() { - panic("can't call HasElement on a nil value") + panic("can't call HasElement on a null value") } if !ty.ElementType().Equals(elem.Type()) { return False } - s := val.v.(set.Set) + s := val.v.(set.Set[interface{}]) return BoolVal(s.Has(elem.v)) } @@ -1009,7 +1068,10 @@ func (val Value) Length() Value { } if !val.IsKnown() { - return UnknownVal(Number) + // If the whole collection isn't known then the length isn't known + // either, but we can still put some bounds on the range of the result. + rng := val.Range() + return UnknownVal(Number).RefineWith(valueRefineLengthResult(rng)) } if val.Type().IsSetType() { // The Length rules are a little different for sets because if any @@ -1017,7 +1079,7 @@ func (val Value) Length() Value { // may or may not be equal to other elements in the set, and thus they // may or may not coalesce with other elements and produce fewer // items in the resulting set. - storeLength := int64(val.v.(set.Set).Length()) + storeLength := int64(val.v.(set.Set[interface{}]).Length()) if storeLength == 1 || val.IsWhollyKnown() { // If our set is wholly known then we know its length. // @@ -1027,13 +1089,26 @@ func (val Value) Length() Value { // unknown value cannot represent more than one known value. return NumberIntVal(storeLength) } - // Otherwise, we cannot predict the length. - return UnknownVal(Number) + // Otherwise, we cannot predict the length exactly but we can at + // least constrain both bounds of its range, because value coalescing + // can only ever reduce the number of elements in the set. + return UnknownVal(Number).Refine().NotNull().NumberRangeInclusive(NumberIntVal(1), NumberIntVal(storeLength)).NewValue() } return NumberIntVal(int64(val.LengthInt())) } +func valueRefineLengthResult(collRng ValueRange) func(*RefinementBuilder) *RefinementBuilder { + return func(b *RefinementBuilder) *RefinementBuilder { + return b. + NotNull(). + NumberRangeInclusive( + NumberIntVal(int64(collRng.LengthLowerBound())), + NumberIntVal(int64(collRng.LengthUpperBound())), + ) + } +} + // LengthInt is like Length except it returns an int. It has the same behavior // as Length except that it will panic if the receiver is unknown. // @@ -1078,7 +1153,7 @@ func (val Value) LengthInt() int { // compatibility with callers that were relying on LengthInt rather // than calling Length. Instead of panicking when a set contains an // unknown value, LengthInt returns the largest possible length. - return val.v.(set.Set).Length() + return val.v.(set.Set[interface{}]).Length() case val.ty.IsMapType(): return len(val.v.(map[string]interface{})) @@ -1164,7 +1239,7 @@ func (val Value) Not() Value { if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(!val.v.(bool)) @@ -1180,8 +1255,14 @@ func (val Value) And(other Value) Value { } if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + // If either value is known to be exactly False then it doesn't + // matter what the other value is, because the final result must + // either be False or an error. + if val == False || other == False { + return False + } shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(bool) && other.v.(bool)) @@ -1197,8 +1278,14 @@ func (val Value) Or(other Value) Value { } if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + // If either value is known to be exactly True then it doesn't + // matter what the other value is, because the final result must + // either be True or an error. + if val == True || other == True { + return True + } shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(bool) || other.v.(bool)) @@ -1214,8 +1301,30 @@ func (val Value) LessThan(other Value) Value { } if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + // We might be able to return a known answer even with unknown inputs. + // FIXME: This is more conservative than it needs to be, because it + // treats all bounds as exclusive bounds. + valRng := val.Range() + otherRng := other.Range() + if valRng.TypeConstraint() == Number && other.Range().TypeConstraint() == Number { + valMax, _ := valRng.NumberUpperBound() + otherMin, _ := otherRng.NumberLowerBound() + if valMax.IsKnown() && otherMin.IsKnown() { + if r := valMax.LessThan(otherMin); r.True() { + return True + } + } + valMin, _ := valRng.NumberLowerBound() + otherMax, _ := otherRng.NumberUpperBound() + if valMin.IsKnown() && otherMax.IsKnown() { + if r := valMin.GreaterThan(otherMax); r.True() { + return False + } + } + } + shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) @@ -1231,8 +1340,30 @@ func (val Value) GreaterThan(other Value) Value { } if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + // We might be able to return a known answer even with unknown inputs. + // FIXME: This is more conservative than it needs to be, because it + // treats all bounds as exclusive bounds. + valRng := val.Range() + otherRng := other.Range() + if valRng.TypeConstraint() == Number && other.Range().TypeConstraint() == Number { + valMin, _ := valRng.NumberLowerBound() + otherMax, _ := otherRng.NumberUpperBound() + if valMin.IsKnown() && otherMax.IsKnown() { + if r := valMin.GreaterThan(otherMax); r.True() { + return True + } + } + valMax, _ := valRng.NumberUpperBound() + otherMin, _ := otherRng.NumberLowerBound() + if valMax.IsKnown() && otherMin.IsKnown() { + if r := valMax.LessThan(otherMin); r.True() { + return False + } + } + } + shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) @@ -1283,9 +1414,7 @@ func (val Value) AsBigFloat() *big.Float { } // Copy the float so that callers can't mutate our internal state - ret := *(val.v.(*big.Float)) - - return &ret + return new(big.Float).Copy(val.v.(*big.Float)) } // AsValueSlice returns a []cty.Value representation of a non-null, non-unknown diff --git a/vendor/github.com/zclconf/go-cty/cty/value_range.go b/vendor/github.com/zclconf/go-cty/cty/value_range.go new file mode 100644 index 0000000000..d7d1535393 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_range.go @@ -0,0 +1,412 @@ +package cty + +import ( + "fmt" + "math" + "strings" +) + +// Range returns an object that offers partial information about the range +// of the receiver. +// +// This is most relevant for unknown values, because it gives access to any +// optional additional constraints on the final value (specified by the source +// of the value using "refinements") beyond what we can assume from the value's +// type. +// +// Calling Range for a known value is a little strange, but it's supported by +// returning a [ValueRange] object that describes the exact value as closely +// as possible. Typically a caller should work directly with the exact value +// in that case, but some purposes might only need the level of detail +// offered by ranges and so can share code between both known and unknown +// values. +func (v Value) Range() ValueRange { + if v.IsMarked() { + panic("Value.Range on marked value; must be unmarked first") + } + + // For an unknown value we just use its own refinements. + if unk, isUnk := v.v.(*unknownType); isUnk { + refinement := unk.refinement + if refinement == nil { + // We'll generate an unconstrained refinement, just to + // simplify the code in ValueRange methods which can + // therefore assume that there's always a refinement. + refinement = &refinementNullable{isNull: tristateUnknown} + } + return ValueRange{v.Type(), refinement} + } + + if v.IsNull() { + // If we know a value is null then we'll just report that, + // since no other refinements make sense for a definitely-null value. + return ValueRange{ + v.Type(), + &refinementNullable{isNull: tristateTrue}, + } + } + + // For a known value we construct synthetic refinements that match + // the value, just as a convenience for callers that want to share + // codepaths between both known and unknown values. + ty := v.Type() + var synth unknownValRefinement + switch { + case ty == String: + synth = &refinementString{ + prefix: v.AsString(), + } + case ty == Number: + synth = &refinementNumber{ + min: v, + max: v, + minInc: true, + maxInc: true, + } + case ty.IsCollectionType(): + if lenVal := v.Length(); lenVal.IsKnown() { + l, _ := lenVal.AsBigFloat().Int64() + synth = &refinementCollection{ + minLen: int(l), + maxLen: int(l), + } + } else { + synth = &refinementCollection{ + minLen: 0, + maxLen: math.MaxInt, + } + } + + default: + // If we don't have anything else to say then we can at least + // guarantee that the value isn't null. + synth = &refinementNullable{} + } + + // If we get down here then the value is definitely not null + synth.setNull(tristateFalse) + + return ValueRange{ty, synth} +} + +// ValueRange offers partial information about the range of a value. +// +// This is primarily interesting for unknown values, because it provides access +// to any additional known constraints (specified using "refinements") on the +// range of the value beyond what is represented by the value's type. +type ValueRange struct { + ty Type + raw unknownValRefinement +} + +// TypeConstraint returns a type constraint describing the value's type as +// precisely as possible with the available information. +func (r ValueRange) TypeConstraint() Type { + return r.ty +} + +// CouldBeNull returns true unless the value being described is definitely +// known to represent a non-null value. +func (r ValueRange) CouldBeNull() bool { + if r.raw == nil { + // A totally-unconstrained unknown value could be null + return true + } + return r.raw.null() != tristateFalse +} + +// DefinitelyNotNull returns true if there are no null values in the range. +func (r ValueRange) DefinitelyNotNull() bool { + if r.raw == nil { + // A totally-unconstrained unknown value could be null + return false + } + return r.raw.null() == tristateFalse +} + +// NumberLowerBound returns information about the lower bound of the range of +// a number value, or panics if the value is definitely not a number. +// +// If the value is nullable then the result represents the range of the number +// only if it turns out not to be null. +// +// The resulting value might itself be an unknown number if there is no +// known lower bound. In that case the "inclusive" flag is meaningless. +func (r ValueRange) NumberLowerBound() (min Value, inclusive bool) { + if r.ty == DynamicPseudoType { + // We don't even know if this is a number yet. + return UnknownVal(Number), false + } + if r.ty != Number { + panic(fmt.Sprintf("NumberLowerBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementNumber); ok && rfn.min != NilVal { + if !rfn.min.IsKnown() { + return NegativeInfinity, true + } + return rfn.min, rfn.minInc + } + return NegativeInfinity, false +} + +// NumberUpperBound returns information about the upper bound of the range of +// a number value, or panics if the value is definitely not a number. +// +// If the value is nullable then the result represents the range of the number +// only if it turns out not to be null. +// +// The resulting value might itself be an unknown number if there is no +// known upper bound. In that case the "inclusive" flag is meaningless. +func (r ValueRange) NumberUpperBound() (max Value, inclusive bool) { + if r.ty == DynamicPseudoType { + // We don't even know if this is a number yet. + return UnknownVal(Number), false + } + if r.ty != Number { + panic(fmt.Sprintf("NumberUpperBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementNumber); ok && rfn.max != NilVal { + if !rfn.max.IsKnown() { + return PositiveInfinity, true + } + return rfn.max, rfn.maxInc + } + return PositiveInfinity, false +} + +// StringPrefix returns a string that is guaranteed to be the prefix of +// the string value being described, or panics if the value is definitely not +// a string. +// +// If the value is nullable then the result represents the prefix of the string +// only if it turns out to not be null. +// +// If the resulting value is zero-length then the value could potentially be +// a string but it has no known prefix. +// +// cty.String values always contain normalized UTF-8 sequences; the result is +// also guaranteed to be a normalized UTF-8 sequence so the result also +// represents the exact bytes of the string value's prefix. +func (r ValueRange) StringPrefix() string { + if r.ty == DynamicPseudoType { + // We don't even know if this is a string yet. + return "" + } + if r.ty != String { + panic(fmt.Sprintf("StringPrefix for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementString); ok { + return rfn.prefix + } + return "" +} + +// LengthLowerBound returns information about the lower bound of the length of +// a collection-typed value, or panics if the value is definitely not a +// collection. +// +// If the value is nullable then the result represents the range of the length +// only if the value turns out not to be null. +func (r ValueRange) LengthLowerBound() int { + if r.ty == DynamicPseudoType { + // We don't even know if this is a collection yet. + return 0 + } + if !r.ty.IsCollectionType() { + panic(fmt.Sprintf("LengthLowerBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementCollection); ok { + return rfn.minLen + } + return 0 +} + +// LengthUpperBound returns information about the upper bound of the length of +// a collection-typed value, or panics if the value is definitely not a +// collection. +// +// If the value is nullable then the result represents the range of the length +// only if the value turns out not to be null. +// +// The resulting value might itself be an unknown number if there is no +// known upper bound. In that case the "inclusive" flag is meaningless. +func (r ValueRange) LengthUpperBound() int { + if r.ty == DynamicPseudoType { + // We don't even know if this is a collection yet. + return math.MaxInt + } + if !r.ty.IsCollectionType() { + panic(fmt.Sprintf("LengthUpperBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementCollection); ok { + return rfn.maxLen + } + return math.MaxInt +} + +// Includes determines whether the given value is in the receiving range. +// +// It can return only three possible values: +// - [cty.True] if the range definitely includes the value +// - [cty.False] if the range definitely does not include the value +// - An unknown value of [cty.Bool] if there isn't enough information to decide. +// +// This function is not fully comprehensive: it may return an unknown value +// in some cases where a definitive value could be computed in principle, and +// those same situations may begin returning known values in later releases as +// the rules are refined to be more complete. Currently the rules focus mainly +// on answering [cty.False], because disproving membership tends to be more +// useful than proving membership. +func (r ValueRange) Includes(v Value) Value { + unknownResult := UnknownVal(Bool).RefineNotNull() + + if r.raw.null() == tristateTrue { + if v.IsNull() { + return True + } else { + return False + } + } + if r.raw.null() == tristateFalse { + if v.IsNull() { + return False + } + // A definitely-not-null value could potentially match + // but we won't know until we do some more checks below. + } + // If our range includes both null and non-null values and the value is + // null then it's definitely in range. + if v.IsNull() { + return True + } + if len(v.Type().TestConformance(r.TypeConstraint())) != 0 { + // If the value doesn't conform to the type constraint then it's + // definitely not in the range. + return False + } + if v.Type() == DynamicPseudoType { + // If it's an unknown value of an unknown type then there's no + // further tests we can make. + return unknownResult + } + + switch r.raw.(type) { + case *refinementString: + if v.IsKnown() { + prefix := r.StringPrefix() + got := v.AsString() + + if !strings.HasPrefix(got, prefix) { + return False + } + } + case *refinementCollection: + lenVal := v.Length() + minLen := NumberIntVal(int64(r.LengthLowerBound())) + maxLen := NumberIntVal(int64(r.LengthUpperBound())) + if minOk := lenVal.GreaterThanOrEqualTo(minLen); minOk.IsKnown() && minOk.False() { + return False + } + if maxOk := lenVal.LessThanOrEqualTo(maxLen); maxOk.IsKnown() && maxOk.False() { + return False + } + case *refinementNumber: + minVal, minInc := r.NumberLowerBound() + maxVal, maxInc := r.NumberUpperBound() + var minOk, maxOk Value + if minInc { + minOk = v.GreaterThanOrEqualTo(minVal) + } else { + minOk = v.GreaterThan(minVal) + } + if maxInc { + maxOk = v.LessThanOrEqualTo(maxVal) + } else { + maxOk = v.LessThan(maxVal) + } + if minOk.IsKnown() && minOk.False() { + return False + } + if maxOk.IsKnown() && maxOk.False() { + return False + } + } + + // If we fall out here then we don't have enough information to decide. + return unknownResult +} + +// numericRangeArithmetic is a helper we use to calculate derived numeric ranges +// for arithmetic on refined numeric values. +// +// op must be a monotone operation. numericRangeArithmetic adapts that operation +// into the equivalent interval arithmetic operation. +// +// The result is a superset of the range of the given operation against the +// given input ranges, if it's possible to calculate that without encountering +// an invalid operation. Currently the result is inexact due to ignoring +// the inclusiveness of the input bounds and just always returning inclusive +// bounds. +func numericRangeArithmetic(op func(a, b Value) Value, a, b ValueRange) func(*RefinementBuilder) *RefinementBuilder { + wrapOp := func(a, b Value) (ret Value) { + // Our functions have various panicking edge cases involving incompatible + // uses of infinities. To keep things simple here we'll catch those + // and just return an unconstrained number. + defer func() { + if v := recover(); v != nil { + ret = UnknownVal(Number) + } + }() + return op(a, b) + } + + return func(builder *RefinementBuilder) *RefinementBuilder { + aMin, _ := a.NumberLowerBound() + aMax, _ := a.NumberUpperBound() + bMin, _ := b.NumberLowerBound() + bMax, _ := b.NumberUpperBound() + + v1 := wrapOp(aMin, bMin) + v2 := wrapOp(aMin, bMax) + v3 := wrapOp(aMax, bMin) + v4 := wrapOp(aMax, bMax) + + newMin := mostNumberValue(Value.LessThan, v1, v2, v3, v4) + newMax := mostNumberValue(Value.GreaterThan, v1, v2, v3, v4) + + if isInf := newMin.Equals(NegativeInfinity); isInf.IsKnown() && isInf.False() { + builder = builder.NumberRangeLowerBound(newMin, true) + } + if isInf := newMax.Equals(PositiveInfinity); isInf.IsKnown() && isInf.False() { + builder = builder.NumberRangeUpperBound(newMax, true) + } + return builder + } +} + +func mostNumberValue(op func(i, j Value) Value, v1 Value, vN ...Value) Value { + r := v1 + for _, v := range vN { + more := op(v, r) + if !more.IsKnown() { + return UnknownVal(Number) + } + if more.True() { + r = v + } + } + return r +} + +// definitelyNotNull is a convenient helper for the common situation of checking +// whether a value could possibly be null. +// +// Returns true if the given value is either a known value that isn't null +// or an unknown value that has been refined to exclude null values from its +// range. +func definitelyNotNull(v Value) bool { + if v.IsKnown() { + return !v.IsNull() + } + return v.Range().DefinitelyNotNull() +} diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go index d17f48ccd1..87ba32e796 100644 --- a/vendor/github.com/zclconf/go-cty/cty/walk.go +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -33,10 +33,15 @@ func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { return nil } + // The callback already got a chance to see the mark in our + // call above, so can safely strip it off here in order to + // visit the child elements, which might still have their own marks. + rawVal, _ := val.Unmark() + ty := val.Type() switch { case ty.IsObjectType(): - for it := val.ElementIterator(); it.Next(); { + for it := rawVal.ElementIterator(); it.Next(); { nameVal, av := it.Element() path := append(path, GetAttrStep{ Name: nameVal.AsString(), @@ -46,8 +51,8 @@ func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { return err } } - case val.CanIterateElements(): - for it := val.ElementIterator(); it.Next(); { + case rawVal.CanIterateElements(): + for it := rawVal.ElementIterator(); it.Next(); { kv, ev := it.Element() path := append(path, IndexStep{ Key: kv, @@ -134,6 +139,12 @@ func transform(path Path, val Value, t Transformer) (Value, error) { ty := val.Type() var newVal Value + // We need to peel off any marks here so that we can dig around + // inside any collection values. We'll reapply these to any + // new collections we construct, but the transformer's Exit + // method gets the final say on what to do with those. + rawVal, marks := val.Unmark() + switch { case val.IsNull() || !val.IsKnown(): @@ -141,14 +152,14 @@ func transform(path Path, val Value, t Transformer) (Value, error) { newVal = val case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): - l := val.LengthInt() + l := rawVal.LengthInt() switch l { case 0: // No deep transform for an empty sequence newVal = val default: elems := make([]Value, 0, l) - for it := val.ElementIterator(); it.Next(); { + for it := rawVal.ElementIterator(); it.Next(); { kv, ev := it.Element() path := append(path, IndexStep{ Key: kv, @@ -161,25 +172,25 @@ func transform(path Path, val Value, t Transformer) (Value, error) { } switch { case ty.IsListType(): - newVal = ListVal(elems) + newVal = ListVal(elems).WithMarks(marks) case ty.IsSetType(): - newVal = SetVal(elems) + newVal = SetVal(elems).WithMarks(marks) case ty.IsTupleType(): - newVal = TupleVal(elems) + newVal = TupleVal(elems).WithMarks(marks) default: panic("unknown sequence type") // should never happen because of the case we are in } } case ty.IsMapType(): - l := val.LengthInt() + l := rawVal.LengthInt() switch l { case 0: // No deep transform for an empty map newVal = val default: elems := make(map[string]Value) - for it := val.ElementIterator(); it.Next(); { + for it := rawVal.ElementIterator(); it.Next(); { kv, ev := it.Element() path := append(path, IndexStep{ Key: kv, @@ -190,7 +201,7 @@ func transform(path Path, val Value, t Transformer) (Value, error) { } elems[kv.AsString()] = newEv } - newVal = MapVal(elems) + newVal = MapVal(elems).WithMarks(marks) } case ty.IsObjectType(): @@ -212,7 +223,7 @@ func transform(path Path, val Value, t Transformer) (Value, error) { } newAVs[name] = newAV } - newVal = ObjectVal(newAVs) + newVal = ObjectVal(newAVs).WithMarks(marks) } default: diff --git a/vendor/gitlab.com/bosi/decorder/.gitignore b/vendor/gitlab.com/bosi/decorder/.gitignore new file mode 100644 index 0000000000..48baa654b0 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/.gitignore @@ -0,0 +1,7 @@ +/.idea +/.env +/.env.example +/decorder +/LICENSES-3RD-PARTY +/ytt +/yq \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/.gitlab-ci.params.yml b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.params.yml new file mode 100644 index 0000000000..fe6b852887 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.params.yml @@ -0,0 +1,15 @@ +#@data/values +--- + +app: + name: decorder + +code_quality: + enable_tests: true + enable_static_code_analyses: true + enable_license_check: true + +deployment: + enable_rc_handling: false + use_gitlab_container_registry: false + enable_image_build_and_deploy: false diff --git a/vendor/gitlab.com/bosi/decorder/.gitlab-ci.yml b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.yml new file mode 100644 index 0000000000..73e1273b34 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.yml @@ -0,0 +1,63 @@ +############################### +# This file is auto-generated # +############################### + +variables: + APP_NAME: decorder + +stages: + - test + - build + - release + +test: + stage: test + image: golang:1.21.0@sha256:b490ae1f0ece153648dd3c5d25be59a63f966b5f9e1311245c947de4506981aa + before_script: + - set -eu + - if [[ -f .env.pipeline ]];then cp .env.pipeline .env;fi + - mkdir -p ~/.ssh + - touch ~/.ssh/known_hosts + - ssh-keyscan gitlab.com > ~/.ssh/known_hosts + retry: 2 + script: + - '### run tests ###' + - make test + - make test-cover + +lint:source-code: + stage: test + image: golangci/golangci-lint:v1.54.2-alpine@sha256:e950721f6ae622dcc041f57cc0b61c3a78d4bbfc588facfc8b0166901a9f4848 + script: + - apk add make bash + - make settings + - '### run linter ###' + - golangci-lint run ./... + +license-check: + stage: test + image: golang:1.21.0@sha256:b490ae1f0ece153648dd3c5d25be59a63f966b5f9e1311245c947de4506981aa + before_script: + - set -eu + - if [[ -f .env.pipeline ]];then cp .env.pipeline .env;fi + - mkdir -p ~/.ssh + - touch ~/.ssh/known_hosts + - ssh-keyscan gitlab.com > ~/.ssh/known_hosts + script: + - '### run license-check ###' + - make check-licenses + artifacts: + paths: + - LICENSES-3RD-PARTY + expire_in: 7 days + +pages: + stage: release + image: golang:1.21.0@sha256:b490ae1f0ece153648dd3c5d25be59a63f966b5f9e1311245c947de4506981aa + only: + - tags + script: + - make gitlab-pages + artifacts: + paths: + - public/ diff --git a/vendor/gitlab.com/bosi/decorder/LICENSE.md b/vendor/gitlab.com/bosi/decorder/LICENSE.md new file mode 100644 index 0000000000..d46c30e18e --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/LICENSE.md @@ -0,0 +1,16 @@ +MIT License + +Copyright (c) 2021 Florian Bosdorff + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/Makefile b/vendor/gitlab.com/bosi/decorder/Makefile new file mode 100644 index 0000000000..8d4c05690f --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/Makefile @@ -0,0 +1,7 @@ +include project-templates/base.mk + +project-templates/base.mk: + @cp -ar ~/.dotfiles/projects/golang ./project-templates + +.env: + touch .env \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/README.md b/vendor/gitlab.com/bosi/decorder/README.md new file mode 100644 index 0000000000..5947e5ca2c --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/README.md @@ -0,0 +1,50 @@ +# Decorder + +A declaration order linter for Go. In case of this tool declarations are `type`, `const`, `var` and `func`. + +## Rules + +This linter applies multiple rules where each can be disabled via cli parameter. + +| rule | description | cli-options | +|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| declaration order | Enforces the order of global declarations (e.g. all global constants are always defined before variables).
You can also define a subset of declarations if you don't want to enforce the order of all of them. | * disable all checks: `-disable-dec-order-check`
* disable type checks: `-disable-type-dec-order-check`
* disable const checks: `-disable-const-dec-order-check`
* disable var checks: `-disable-var-dec-order-check`
* custom order: `-dec-order var,const,func,type` | +| declaration number | Enforces that the statements const, var and type are only used once per file. You have to use parenthesis
to declare e.g multiple global types inside a file. | disable check: `-disable-dec-num-check` | +| init func first | Enforces the init func to be the first function in file. | disable check: `-disable-init-func-first-check` | + +You may find the implementation of the rules inside `analyzer.go`. + +Underscore var declarations can be ignored via `-ignore-underscore-vars`. + +## Installation + +```shell +go install gitlab.com/bosi/decorder/cmd/decorder +``` + +You can use the linter via golangci-lint as well: https://golangci-lint.run/usage/linters/#decorder. + +## Usage + +```shell +# with default options +decorder ./... + +# custom declaration order +decorder -dec-order var,const,func,type ./... + +# disable declaration order check +decorder -disable-dec-order-check ./... + +# disable check for multiple declarations statements +decorder -disable-dec-num-check ./... + +# disable check for multiple declarations (var only) statements +decorder -disable-var-dec-num-check ./... + +# disable check that init func is always first function +decorder -disable-init-func-first-check ./... + +# ignore underscore variables for all checks +decorder -ignore-underscore-vars ./... +``` \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/analyzer.go b/vendor/gitlab.com/bosi/decorder/analyzer.go new file mode 100644 index 0000000000..08f82ccc11 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/analyzer.go @@ -0,0 +1,255 @@ +package decorder + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + "sync" + + "golang.org/x/tools/go/analysis" +) + +type ( + decNumChecker struct { + tokenMap map[string]token.Token + tokenCounts map[token.Token]int + decOrder []string + funcPoss []funcPos + } + + options struct { + decOrder string + ignoreUnderscoreVars bool + disableDecNumCheck bool + disableTypeDecNumCheck bool + disableConstDecNumCheck bool + disableVarDecNumCheck bool + disableDecOrderCheck bool + disableInitFuncFirstCheck bool + } + + funcPos struct { + start token.Pos + end token.Pos + } +) + +const ( + Name = "decorder" + + FlagDo = "dec-order" + FlagIuv = "ignore-underscore-vars" + FlagDdnc = "disable-dec-num-check" + FlagDtdnc = "disable-type-dec-num-check" + FlagDcdnc = "disable-const-dec-num-check" + FlagDvdnc = "disable-var-dec-num-check" + FlagDdoc = "disable-dec-order-check" + FlagDiffc = "disable-init-func-first-check" + + defaultDecOrder = "type,const,var,func" +) + +var ( + Analyzer = &analysis.Analyzer{ + Name: Name, + Doc: "check declaration order and count of types, constants, variables and functions", + Run: run, + } + + opts = options{} + + tokens = []token.Token{token.TYPE, token.CONST, token.VAR, token.FUNC} + + decNumConf = map[token.Token]bool{ + token.TYPE: false, + token.CONST: false, + token.VAR: false, + } + decLock sync.Mutex +) + +//nolint:lll +func init() { + Analyzer.Flags.StringVar(&opts.decOrder, FlagDo, defaultDecOrder, "define the required order of types, constants, variables and functions declarations inside a file") + Analyzer.Flags.BoolVar(&opts.ignoreUnderscoreVars, FlagIuv, false, "option to ignore underscore vars for dec order and dec num check") + Analyzer.Flags.BoolVar(&opts.disableDecNumCheck, FlagDdnc, false, "option to disable (all) checks for number of declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableTypeDecNumCheck, FlagDtdnc, false, "option to disable check for number of type declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableConstDecNumCheck, FlagDcdnc, false, "option to disable check for number of const declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableVarDecNumCheck, FlagDvdnc, false, "option to disable check for number of var declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableDecOrderCheck, FlagDdoc, false, "option to disable check for order of declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableInitFuncFirstCheck, FlagDiffc, false, "option to disable check that init function is always first function in file") +} + +func initDec() { + decLock.Lock() + decNumConf[token.TYPE] = opts.disableTypeDecNumCheck + decNumConf[token.CONST] = opts.disableConstDecNumCheck + decNumConf[token.VAR] = opts.disableVarDecNumCheck + decLock.Unlock() +} + +func run(pass *analysis.Pass) (interface{}, error) { + initDec() + + for _, f := range pass.Files { + ast.Inspect(f, runDeclNumAndDecOrderCheck(pass)) + + if !opts.disableInitFuncFirstCheck { + ast.Inspect(f, runInitFuncFirstCheck(pass)) + } + } + + return nil, nil +} + +func runInitFuncFirstCheck(pass *analysis.Pass) func(ast.Node) bool { + nonInitFound := false + + return func(n ast.Node) bool { + dec, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + + if dec.Name.Name == "init" && dec.Recv == nil { + if nonInitFound { + pass.Reportf(dec.Pos(), "init func must be the first function in file") + } + } else { + nonInitFound = true + } + + return true + } +} + +func runDeclNumAndDecOrderCheck(pass *analysis.Pass) func(ast.Node) bool { + dnc := newDecNumChecker() + + if opts.disableDecNumCheck && opts.disableDecOrderCheck { + return func(n ast.Node) bool { + return true + } + } + + return func(n ast.Node) bool { + fd, ok := n.(*ast.FuncDecl) + if ok { + return dnc.handleFuncDec(fd, pass) + } + + gd, ok := n.(*ast.GenDecl) + if !ok { + return true + } + + if dnc.isInsideFunction(gd) { + return true + } + + dnc.handleGenDecl(gd, pass) + + if !opts.disableDecOrderCheck { + dnc.handleDecOrderCheck(gd, pass) + } + + return true + } +} + +func newDecNumChecker() decNumChecker { + dnc := decNumChecker{ + tokenMap: map[string]token.Token{}, + tokenCounts: map[token.Token]int{}, + decOrder: []string{}, + funcPoss: []funcPos{}, + } + + for _, t := range tokens { + dnc.tokenCounts[t] = 0 + dnc.tokenMap[t.String()] = t + } + + for _, do := range strings.Split(opts.decOrder, ",") { + dnc.decOrder = append(dnc.decOrder, strings.TrimSpace(do)) + } + + return dnc +} + +func (dnc decNumChecker) isToLate(t token.Token) (string, bool) { + for i, do := range dnc.decOrder { + if do == t.String() { + for j := i + 1; j < len(dnc.decOrder); j++ { + if dnc.tokenCounts[dnc.tokenMap[dnc.decOrder[j]]] > 0 { + return dnc.decOrder[j], false + } + } + return "", true + } + } + + return "", true +} + +func (dnc *decNumChecker) handleGenDecl(gd *ast.GenDecl, pass *analysis.Pass) { + for _, t := range tokens { + if gd.Tok == t { + if opts.ignoreUnderscoreVars && declName(gd) == "_" { + continue + } + + dnc.tokenCounts[t]++ + + if !opts.disableDecNumCheck && !decNumConf[t] && dnc.tokenCounts[t] > 1 { + pass.Reportf(gd.Pos(), "multiple \"%s\" declarations are not allowed; use parentheses instead", t.String()) + } + } + } +} + +func declName(gd *ast.GenDecl) string { + for _, spec := range gd.Specs { + s, ok := spec.(*ast.ValueSpec) + if ok && len(s.Names) > 0 && s.Names[0] != nil { + return s.Names[0].Name + } + } + return "" +} + +func (dnc decNumChecker) handleDecOrderCheck(gd *ast.GenDecl, pass *analysis.Pass) { + l, c := dnc.isToLate(gd.Tok) + if !c { + pass.Reportf(gd.Pos(), fmtWrongOrderMsg(gd.Tok.String(), l)) + } +} + +func (dnc decNumChecker) isInsideFunction(dn *ast.GenDecl) bool { + for _, poss := range dnc.funcPoss { + if poss.start < dn.Pos() && poss.end > dn.Pos() { + return true + } + } + return false +} + +func (dnc *decNumChecker) handleFuncDec(fd *ast.FuncDecl, pass *analysis.Pass) bool { + dnc.funcPoss = append(dnc.funcPoss, funcPos{start: fd.Pos(), end: fd.End()}) + + dnc.tokenCounts[token.FUNC]++ + + if !opts.disableDecOrderCheck { + l, c := dnc.isToLate(token.FUNC) + if !c { + pass.Reportf(fd.Pos(), fmtWrongOrderMsg(token.FUNC.String(), l)) + } + } + + return true +} + +func fmtWrongOrderMsg(target string, notAfter string) string { + return fmt.Sprintf("%s must not be placed after %s (desired order: %s)", target, notAfter, opts.decOrder) +} diff --git a/vendor/gitlab.com/bosi/decorder/renovate.json b/vendor/gitlab.com/bosi/decorder/renovate.json new file mode 100644 index 0000000000..60041578ce --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/renovate.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "gitlab>bosi/renovate-configs//configs/golang", + "gitlab>bosi/renovate-configs//configs/automerge" + ] +} diff --git a/vendor/go-simpler.org/musttag/.golangci.yml b/vendor/go-simpler.org/musttag/.golangci.yml new file mode 100644 index 0000000000..641471cc44 --- /dev/null +++ b/vendor/go-simpler.org/musttag/.golangci.yml @@ -0,0 +1,23 @@ +linters: + disable-all: true + enable: + # enabled by default: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused + # disabled by default: + - gocritic + - gofumpt + +linters-settings: + gocritic: + enabled-tags: + - diagnostic + - style + - performance + - experimental + - opinionated diff --git a/vendor/go-simpler.org/musttag/.goreleaser.yml b/vendor/go-simpler.org/musttag/.goreleaser.yml new file mode 100644 index 0000000000..dd75ac945b --- /dev/null +++ b/vendor/go-simpler.org/musttag/.goreleaser.yml @@ -0,0 +1,18 @@ +builds: + - main: ./cmd/musttag + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{ .Version }} + targets: + - darwin_amd64 + - darwin_arm64 + - linux_amd64 + - windows_amd64 + +archives: + - format_overrides: + - goos: windows + format: zip diff --git a/vendor/go-simpler.org/musttag/LICENSE b/vendor/go-simpler.org/musttag/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/vendor/go-simpler.org/musttag/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/go-simpler.org/musttag/README.md b/vendor/go-simpler.org/musttag/README.md new file mode 100644 index 0000000000..3f3a253302 --- /dev/null +++ b/vendor/go-simpler.org/musttag/README.md @@ -0,0 +1,102 @@ +# musttag + +[![checks](https://github.com/go-simpler/musttag/actions/workflows/checks.yml/badge.svg)](https://github.com/go-simpler/musttag/actions/workflows/checks.yml) +[![pkg.go.dev](https://pkg.go.dev/badge/go-simpler.org/musttag.svg)](https://pkg.go.dev/go-simpler.org/musttag) +[![goreportcard](https://goreportcard.com/badge/go-simpler.org/musttag)](https://goreportcard.com/report/go-simpler.org/musttag) +[![codecov](https://codecov.io/gh/go-simpler/musttag/branch/main/graph/badge.svg)](https://codecov.io/gh/go-simpler/musttag) + +A Go linter that enforces field tags in (un)marshaled structs. + +## 📌 About + +`musttag` checks that exported fields of a struct passed to a `Marshal`-like function are annotated with the relevant tag: + +```go +// BAD: +var user struct { + Name string +} +data, err := json.Marshal(user) + +// GOOD: +var user struct { + Name string `json:"name"` +} +data, err := json.Marshal(user) +``` + +The rational from [Uber Style Guide][1]: + +> The serialized form of the structure is a contract between different systems. +> Changes to the structure of the serialized form, including field names, break this contract. +> Specifying field names inside tags makes the contract explicit, +> and it guards against accidentally breaking the contract by refactoring or renaming fields. + +## 🚀 Features + +The following packages are supported out of the box: + +* [encoding/json][2] +* [encoding/xml][3] +* [gopkg.in/yaml.v3][4] +* [github.com/BurntSushi/toml][5] +* [github.com/mitchellh/mapstructure][6] +* [github.com/jmoiron/sqlx][7] + +In addition, any [custom package](#custom-packages) can be added to the list. + +## 📦 Install + +`musttag` is integrated into [`golangci-lint`][8], and this is the recommended way to use it. + +To enable the linter, add the following lines to `.golangci.yml`: + +```yaml +linters: + enable: + - musttag +``` + +Alternatively, you can download a prebuilt binary from the [Releases][9] page to use `musttag` standalone. + +## 📋 Usage + +Run `golangci-lint` with `musttag` enabled. +See the list of [available options][10] to configure the linter. + +When using `musttag` standalone, pass the options as flags. + +### Custom packages + +To report a custom function, you need to add its description to `.golangci.yml`. +The following is an example of adding support for [`hclsimple.Decode`][11]: + +```yaml +linters-settings: + musttag: + functions: + # The full name of the function, including the package. + - name: github.com/hashicorp/hcl/v2/hclsimple.Decode + # The struct tag whose presence should be ensured. + tag: hcl + # The position of the argument to check. + arg-pos: 2 +``` + +The same can be done via the `-fn=` flag when using `musttag` standalone: + +```shell +musttag -fn="github.com/hashicorp/hcl/v2/hclsimple.DecodeFile:hcl:2" ./... +``` + +[1]: https://github.com/uber-go/guide/blob/master/style.md#use-field-tags-in-marshaled-structs +[2]: https://pkg.go.dev/encoding/json +[3]: https://pkg.go.dev/encoding/xml +[4]: https://pkg.go.dev/gopkg.in/yaml.v3 +[5]: https://pkg.go.dev/github.com/BurntSushi/toml +[6]: https://pkg.go.dev/github.com/mitchellh/mapstructure +[7]: https://pkg.go.dev/github.com/jmoiron/sqlx +[8]: https://golangci-lint.run +[9]: https://github.com/go-simpler/musttag/releases +[10]: https://golangci-lint.run/usage/linters/#musttag +[11]: https://pkg.go.dev/github.com/hashicorp/hcl/v2/hclsimple#Decode diff --git a/vendor/go-simpler.org/musttag/builtins.go b/vendor/go-simpler.org/musttag/builtins.go new file mode 100644 index 0000000000..3305513f8f --- /dev/null +++ b/vendor/go-simpler.org/musttag/builtins.go @@ -0,0 +1,133 @@ +package musttag + +// builtins is a set of functions supported out of the box. +var builtins = []Func{ + // https://pkg.go.dev/encoding/json + { + Name: "encoding/json.Marshal", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/json.MarshalIndent", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/json.Unmarshal", Tag: "json", ArgPos: 1, + ifaceWhitelist: []string{"encoding/json.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*encoding/json.Encoder).Encode", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "(*encoding/json.Decoder).Decode", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + + // https://pkg.go.dev/encoding/xml + { + Name: "encoding/xml.Marshal", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/xml.MarshalIndent", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/xml.Unmarshal", Tag: "xml", ArgPos: 1, + ifaceWhitelist: []string{"encoding/xml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*encoding/xml.Encoder).Encode", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "(*encoding/xml.Decoder).Decode", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*encoding/xml.Encoder).EncodeElement", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "(*encoding/xml.Decoder).DecodeElement", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + + // https://pkg.go.dev/gopkg.in/yaml.v3 + { + Name: "gopkg.in/yaml.v3.Marshal", Tag: "yaml", ArgPos: 0, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Marshaler"}, + }, + { + Name: "gopkg.in/yaml.v3.Unmarshal", Tag: "yaml", ArgPos: 1, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Unmarshaler"}, + }, + { + Name: "(*gopkg.in/yaml.v3.Encoder).Encode", Tag: "yaml", ArgPos: 0, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Marshaler"}, + }, + { + Name: "(*gopkg.in/yaml.v3.Decoder).Decode", Tag: "yaml", ArgPos: 0, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Unmarshaler"}, + }, + + // https://pkg.go.dev/github.com/BurntSushi/toml + { + Name: "github.com/BurntSushi/toml.Unmarshal", Tag: "toml", ArgPos: 1, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "github.com/BurntSushi/toml.Decode", Tag: "toml", ArgPos: 1, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "github.com/BurntSushi/toml.DecodeFS", Tag: "toml", ArgPos: 2, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "github.com/BurntSushi/toml.DecodeFile", Tag: "toml", ArgPos: 1, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*github.com/BurntSushi/toml.Encoder).Encode", Tag: "toml", ArgPos: 0, + ifaceWhitelist: []string{"encoding.TextMarshaler"}, + }, + { + Name: "(*github.com/BurntSushi/toml.Decoder).Decode", Tag: "toml", ArgPos: 0, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + + // https://pkg.go.dev/github.com/mitchellh/mapstructure + {Name: "github.com/mitchellh/mapstructure.Decode", Tag: "mapstructure", ArgPos: 1}, + {Name: "github.com/mitchellh/mapstructure.DecodeMetadata", Tag: "mapstructure", ArgPos: 1}, + {Name: "github.com/mitchellh/mapstructure.WeakDecode", Tag: "mapstructure", ArgPos: 1}, + {Name: "github.com/mitchellh/mapstructure.WeakDecodeMetadata", Tag: "mapstructure", ArgPos: 1}, + + // https://pkg.go.dev/github.com/jmoiron/sqlx + {Name: "github.com/jmoiron/sqlx.Get", Tag: "db", ArgPos: 1}, + {Name: "github.com/jmoiron/sqlx.GetContext", Tag: "db", ArgPos: 2}, + {Name: "github.com/jmoiron/sqlx.Select", Tag: "db", ArgPos: 1}, + {Name: "github.com/jmoiron/sqlx.SelectContext", Tag: "db", ArgPos: 2}, + {Name: "github.com/jmoiron/sqlx.StructScan", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Conn).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Conn).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.DB).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.DB).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.DB).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.DB).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Row).StructScan", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Rows).StructScan", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Tx).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Tx).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Tx).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Tx).SelectContext", Tag: "db", ArgPos: 1}, +} diff --git a/vendor/go-simpler.org/musttag/musttag.go b/vendor/go-simpler.org/musttag/musttag.go new file mode 100644 index 0000000000..d7911770fb --- /dev/null +++ b/vendor/go-simpler.org/musttag/musttag.go @@ -0,0 +1,265 @@ +// Package musttag implements the musttag analyzer. +package musttag + +import ( + "flag" + "fmt" + "go/ast" + "go/types" + "reflect" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +// Func describes a function call to look for, e.g. [json.Marshal]. +type Func struct { + Name string // The full name of the function, including the package. + Tag string // The struct tag whose presence should be ensured. + ArgPos int // The position of the argument to check. + + // a list of interface names (including the package); + // if at least one is implemented by the argument, no check is performed. + ifaceWhitelist []string +} + +// New creates a new musttag analyzer. +// To report a custom function, provide its description as [Func]. +func New(funcs ...Func) *analysis.Analyzer { + var flagFuncs []Func + return &analysis.Analyzer{ + Name: "musttag", + Doc: "enforce field tags in (un)marshaled structs", + Flags: flags(&flagFuncs), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (any, error) { + l := len(builtins) + len(funcs) + len(flagFuncs) + allFuncs := make(map[string]Func, l) + + merge := func(slice []Func) { + for _, fn := range slice { + allFuncs[fn.Name] = fn + } + } + merge(builtins) + merge(funcs) + merge(flagFuncs) + + mainModule, err := getMainModule() + if err != nil { + return nil, err + } + + return run(pass, mainModule, allFuncs) + }, + } +} + +func flags(funcs *[]Func) flag.FlagSet { + fs := flag.NewFlagSet("musttag", flag.ContinueOnError) + fs.Func("fn", "report a custom function (name:tag:arg-pos)", func(s string) error { + parts := strings.Split(s, ":") + if len(parts) != 3 || parts[0] == "" || parts[1] == "" { + return strconv.ErrSyntax + } + pos, err := strconv.Atoi(parts[2]) + if err != nil { + return err + } + *funcs = append(*funcs, Func{ + Name: parts[0], + Tag: parts[1], + ArgPos: pos, + }) + return nil + }) + return *fs +} + +func run(pass *analysis.Pass, mainModule string, funcs map[string]Func) (_ any, err error) { + visit := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + filter := []ast.Node{(*ast.CallExpr)(nil)} + + visit.Preorder(filter, func(node ast.Node) { + if err != nil { + return // there is already an error. + } + + call, ok := node.(*ast.CallExpr) + if !ok { + return // not a function call. + } + + callee := typeutil.StaticCallee(pass.TypesInfo, call) + if callee == nil { + return // not a static call. + } + + fn, ok := funcs[cutVendor(callee.FullName())] + if !ok { + return // unsupported function. + } + + if len(call.Args) <= fn.ArgPos { + err = fmt.Errorf("musttag: Func.ArgPos cannot be %d: %s accepts only %d argument(s)", fn.ArgPos, fn.Name, len(call.Args)) + return + } + + arg := call.Args[fn.ArgPos] + if ident, ok := arg.(*ast.Ident); ok && ident.Obj == nil { + return // e.g. json.Marshal(nil) + } + + typ := pass.TypesInfo.TypeOf(arg) + if typ == nil { + return // no type info found. + } + + // TODO: check nested structs too. + if implementsInterface(typ, fn.ifaceWhitelist, pass.Pkg.Imports()) { + return // the type implements a Marshaler interface; see issue #64. + } + + checker := checker{ + mainModule: mainModule, + seenTypes: make(map[string]struct{}), + } + + styp, ok := checker.parseStruct(typ) + if !ok { + return // not a struct. + } + + if valid := checker.checkStruct(styp, fn.Tag); valid { + return // nothing to report. + } + + pass.Reportf(arg.Pos(), "the given struct should be annotated with the `%s` tag", fn.Tag) + }) + + return nil, err +} + +type checker struct { + mainModule string + seenTypes map[string]struct{} +} + +func (c *checker) parseStruct(typ types.Type) (*types.Struct, bool) { + switch typ := typ.(type) { + case *types.Pointer: + return c.parseStruct(typ.Elem()) + + case *types.Array: + return c.parseStruct(typ.Elem()) + + case *types.Slice: + return c.parseStruct(typ.Elem()) + + case *types.Map: + return c.parseStruct(typ.Elem()) + + case *types.Named: // a struct of the named type. + pkg := typ.Obj().Pkg() + if pkg == nil { + return nil, false + } + if !strings.HasPrefix(pkg.Path(), c.mainModule) { + return nil, false + } + styp, ok := typ.Underlying().(*types.Struct) + if !ok { + return nil, false + } + return styp, true + + case *types.Struct: // an anonymous struct. + return typ, true + + default: + return nil, false + } +} + +func (c *checker) checkStruct(styp *types.Struct, tag string) (valid bool) { + c.seenTypes[styp.String()] = struct{}{} + + for i := 0; i < styp.NumFields(); i++ { + field := styp.Field(i) + if !field.Exported() { + continue + } + + if _, ok := reflect.StructTag(styp.Tag(i)).Lookup(tag); !ok { + // tag is not required for embedded types; see issue #12. + if !field.Embedded() { + return false + } + } + + nested, ok := c.parseStruct(field.Type()) + if !ok { + continue + } + if _, ok := c.seenTypes[nested.String()]; ok { + continue + } + if valid := c.checkStruct(nested, tag); !valid { + return false + } + } + + return true +} + +func implementsInterface(typ types.Type, ifaces []string, imports []*types.Package) bool { + findScope := func(pkgName string) (*types.Scope, bool) { + // fast path: check direct imports (e.g. looking for "encoding/json.Marshaler"). + for _, direct := range imports { + if pkgName == cutVendor(direct.Path()) { + return direct.Scope(), true + } + } + // slow path: check indirect imports (e.g. looking for "encoding.TextMarshaler"). + // TODO: only check indirect imports from the package (e.g. "encoding/json") of the analyzed function (e.g. "encoding/json.Marshal"). + for _, direct := range imports { + for _, indirect := range direct.Imports() { + if pkgName == cutVendor(indirect.Path()) { + return indirect.Scope(), true + } + } + } + return nil, false + } + + for _, ifacePath := range ifaces { + // "encoding/json.Marshaler" -> "encoding/json" + "Marshaler" + idx := strings.LastIndex(ifacePath, ".") + if idx == -1 { + continue + } + pkgName, ifaceName := ifacePath[:idx], ifacePath[idx+1:] + + scope, ok := findScope(pkgName) + if !ok { + continue + } + obj := scope.Lookup(ifaceName) + if obj == nil { + continue + } + iface, ok := obj.Type().Underlying().(*types.Interface) + if !ok { + continue + } + if types.Implements(typ, iface) { + return true + } + } + + return false +} diff --git a/vendor/go-simpler.org/musttag/utils.go b/vendor/go-simpler.org/musttag/utils.go new file mode 100644 index 0000000000..1a13f96c28 --- /dev/null +++ b/vendor/go-simpler.org/musttag/utils.go @@ -0,0 +1,49 @@ +package musttag + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +func getMainModule() (string, error) { + args := [...]string{"go", "mod", "edit", "-json"} + + out, err := exec.Command(args[0], args[1:]...).Output() + if err != nil { + return "", fmt.Errorf("running %q: %w", strings.Join(args[:], " "), err) + } + + var info struct { + Module struct { + Path string `json:"Path"` + } `json:"Module"` + } + if err := json.Unmarshal(out, &info); err != nil { + return "", fmt.Errorf("decoding module info: %w\n%s", err, out) + } + + return info.Module.Path, nil +} + +// based on golang.org/x/tools/imports.VendorlessPath +func cutVendor(path string) string { + var prefix string + + switch { + case strings.HasPrefix(path, "(*"): + prefix, path = "(*", path[len("(*"):] + case strings.HasPrefix(path, "("): + prefix, path = "(", path[len("("):] + } + + if i := strings.LastIndex(path, "/vendor/"); i >= 0 { + return prefix + path[i+len("/vendor/"):] + } + if strings.HasPrefix(path, "vendor/") { + return prefix + path[len("vendor/"):] + } + + return prefix + path +} diff --git a/vendor/go-simpler.org/sloglint/.golangci.yml b/vendor/go-simpler.org/sloglint/.golangci.yml new file mode 100644 index 0000000000..ef926a0562 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/.golangci.yml @@ -0,0 +1,22 @@ +linters: + disable-all: true + enable: + # enabled by default: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + # disabled by default: + - gocritic + - gofumpt + +linters-settings: + gocritic: + enabled-tags: + - diagnostic + - style + - performance + - experimental + - opinionated diff --git a/vendor/go-simpler.org/sloglint/.goreleaser.yml b/vendor/go-simpler.org/sloglint/.goreleaser.yml new file mode 100644 index 0000000000..d31ea11d39 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/.goreleaser.yml @@ -0,0 +1,18 @@ +builds: + - main: ./cmd/sloglint + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{.Version}} + targets: + - darwin_amd64 + - darwin_arm64 + - linux_amd64 + - windows_amd64 + +archives: + - format_overrides: + - goos: windows + format: zip diff --git a/vendor/go-simpler.org/sloglint/LICENSE b/vendor/go-simpler.org/sloglint/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/go-simpler.org/sloglint/Makefile b/vendor/go-simpler.org/sloglint/Makefile new file mode 100644 index 0000000000..6165b16f47 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/Makefile @@ -0,0 +1,28 @@ +.POSIX: +.SUFFIXES: + +all: test lint + +test: + go test -race -shuffle=on -cover ./... + +test/cover: + go test -race -shuffle=on -coverprofile=coverage.out ./... + go tool cover -html=coverage.out + +lint: + golangci-lint run + +tidy: + go mod tidy + +generate: + go generate ./... + +# run `make pre-commit` once to install the hook. +pre-commit: .git/hooks/pre-commit test lint tidy generate + git diff --exit-code + +.git/hooks/pre-commit: + echo "make pre-commit" > .git/hooks/pre-commit + chmod +x .git/hooks/pre-commit diff --git a/vendor/go-simpler.org/sloglint/README.md b/vendor/go-simpler.org/sloglint/README.md new file mode 100644 index 0000000000..7f6455c1c7 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/README.md @@ -0,0 +1,177 @@ +# sloglint + +[![checks](https://github.com/go-simpler/sloglint/actions/workflows/checks.yml/badge.svg)](https://github.com/go-simpler/sloglint/actions/workflows/checks.yml) +[![pkg.go.dev](https://pkg.go.dev/badge/go-simpler.org/sloglint.svg)](https://pkg.go.dev/go-simpler.org/sloglint) +[![goreportcard](https://goreportcard.com/badge/go-simpler.org/sloglint)](https://goreportcard.com/report/go-simpler.org/sloglint) +[![codecov](https://codecov.io/gh/go-simpler/sloglint/branch/main/graph/badge.svg)](https://codecov.io/gh/go-simpler/sloglint) + +A Go linter that ensures consistent code style when using `log/slog`. + +## 📌 About + +The `log/slog` API allows two different types of arguments: key-value pairs and attributes. +While people may have different opinions about which one is better, most seem to agree on one thing: it should be consistent. +With `sloglint` you can enforce various rules for `log/slog` based on your preferred code style. + +## 🚀 Features + +* Enforce not mixing key-value pairs and attributes (default) +* Enforce using either key-value pairs only or attributes only (optional) +* Enforce not using global loggers (optional) +* Enforce using methods that accept a context (optional) +* Enforce using static log messages (optional) +* Enforce using constants instead of raw keys (optional) +* Enforce a single key naming convention (optional) +* Enforce putting arguments on separate lines (optional) + +## 📦 Install + +`sloglint` is integrated into [`golangci-lint`][1], and this is the recommended way to use it. + +To enable the linter, add the following lines to `.golangci.yml`: + +```yaml +linters: + enable: + - sloglint +``` + +Alternatively, you can download a prebuilt binary from the [Releases][2] page to use `sloglint` standalone. + +## 📋 Usage + +Run `golangci-lint` with `sloglint` enabled. +See the list of [available options][3] to configure the linter. + +When using `sloglint` standalone, pass the options as flags of the same name. + +### No mixed arguments + +The `no-mixed-args` option causes `sloglint` to report mixing key-values pairs and attributes within a single function call: + +```go +slog.Info("a user has logged in", "user_id", 42, slog.String("ip_address", "192.0.2.0")) // sloglint: key-value pairs and attributes should not be mixed +``` + +It is enabled by default. + +### Key-value pairs only + +The `kv-only` option causes `sloglint` to report any use of attributes: + +```go +slog.Info("a user has logged in", slog.Int("user_id", 42)) // sloglint: attributes should not be used +``` + +### Attributes only + +In contrast, the `attr-only` option causes `sloglint` to report any use of key-value pairs: + +```go +slog.Info("a user has logged in", "user_id", 42) // sloglint: key-value pairs should not be used +``` + +### No global + +Some projects prefer to pass loggers as explicit dependencies. +The `no-global` option causes `sloglint` to report the usage of global loggers. + +```go +slog.Info("a user has logged in", "user_id", 42) // sloglint: global logger should not be used +``` + +Possible values are `all` (report all global loggers) and `default` (report only the default `slog` logger). + +### Context only + +Some `slog.Handler` implementations make use of the given `context.Context` (e.g. to access context values). +For them to work properly, you need to pass a context to all logger calls. +The `context-only` option causes `sloglint` to report the use of methods without a context: + +```go +slog.Info("a user has logged in") // sloglint: methods without a context should not be used +``` + +This report can be fixed by using the equivalent method with the `Context` suffix: + +```go +slog.InfoContext(ctx, "a user has logged in") +``` + +### Static messages + +To get the most out of structured logging, you may want to require log messages to be static. +The `static-msg` option causes `sloglint` to report non-static messages: + +```go +slog.Info(fmt.Sprintf("a user with id %d has logged in", 42)) // sloglint: message should be a string literal or a constant +``` + +The report can be fixed by moving dynamic values to arguments: + +```go +slog.Info("a user has logged in", "user_id", 42) +``` + +### No raw keys + +To prevent typos, you may want to forbid the use of raw keys altogether. +The `no-raw-keys` option causes `sloglint` to report the use of strings as keys +(including `slog.Attr` calls, e.g. `slog.Int("user_id", 42)`): + +```go +slog.Info("a user has logged in", "user_id", 42) // sloglint: raw keys should not be used +``` + +This report can be fixed by using either constants... + +```go +const UserId = "user_id" + +slog.Info("a user has logged in", UserId, 42) +``` + +...or custom `slog.Attr` constructors: + +```go +func UserId(value int) slog.Attr { return slog.Int("user_id", value) } + +slog.Info("a user has logged in", UserId(42)) +``` + +> [!TIP] +> Such helpers can be automatically generated for you by the [`sloggen`][4] tool. Give it a try too! + +### Key naming convention + +To ensure consistency in logs, you may want to enforce a single key naming convention. +The `key-naming-case` option causes `sloglint` to report keys written in a case other than the given one: + +```go +slog.Info("a user has logged in", "user-id", 42) // sloglint: keys should be written in snake_case +``` + +Possible values are `snake`, `kebab`, `camel`, or `pascal`. + +### Arguments on separate lines + +To improve code readability, you may want to put arguments on separate lines, especially when using key-value pairs. +The `args-on-sep-lines` option causes `sloglint` to report 2+ arguments on the same line: + +```go +slog.Info("a user has logged in", "user_id", 42, "ip_address", "192.0.2.0") // sloglint: arguments should be put on separate lines +``` + +This report can be fixed by reformatting the code: + +```go +slog.Info("a user has logged in", + "user_id", 42, + "ip_address", "192.0.2.0", +) +``` + +[1]: https://golangci-lint.run +[2]: https://github.com/go-simpler/sloglint/releases +[3]: https://golangci-lint.run/usage/linters/#sloglint +[4]: https://github.com/go-simpler/sloggen diff --git a/vendor/go-simpler.org/sloglint/sloglint.go b/vendor/go-simpler.org/sloglint/sloglint.go new file mode 100644 index 0000000000..35cac14d13 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/sloglint.go @@ -0,0 +1,388 @@ +// Package sloglint implements the sloglint analyzer. +package sloglint + +import ( + "errors" + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "strconv" + "strings" + + "github.com/ettle/strcase" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +// Options are options for the sloglint analyzer. +type Options struct { + NoMixedArgs bool // Enforce not mixing key-value pairs and attributes (default true). + KVOnly bool // Enforce using key-value pairs only (overrides NoMixedArgs, incompatible with AttrOnly). + AttrOnly bool // Enforce using attributes only (overrides NoMixedArgs, incompatible with KVOnly). + NoGlobal string // Enforce not using global loggers ("all" or "default"). + ContextOnly bool // Enforce using methods that accept a context. + StaticMsg bool // Enforce using static log messages. + NoRawKeys bool // Enforce using constants instead of raw keys. + KeyNamingCase string // Enforce a single key naming convention ("snake", "kebab", "camel", or "pascal"). + ArgsOnSepLines bool // Enforce putting arguments on separate lines. +} + +// New creates a new sloglint analyzer. +func New(opts *Options) *analysis.Analyzer { + if opts == nil { + opts = &Options{NoMixedArgs: true} + } + return &analysis.Analyzer{ + Name: "sloglint", + Doc: "ensure consistent code style when using log/slog", + Flags: flags(opts), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (any, error) { + if opts.KVOnly && opts.AttrOnly { + return nil, fmt.Errorf("sloglint: Options.KVOnly and Options.AttrOnly: %w", errIncompatible) + } + + switch opts.NoGlobal { + case "", "all", "default": + default: + return nil, fmt.Errorf("sloglint: Options.NoGlobal=%s: %w", opts.NoGlobal, errInvalidValue) + } + + switch opts.KeyNamingCase { + case "", snakeCase, kebabCase, camelCase, pascalCase: + default: + return nil, fmt.Errorf("sloglint: Options.KeyNamingCase=%s: %w", opts.KeyNamingCase, errInvalidValue) + } + + run(pass, opts) + return nil, nil + }, + } +} + +var ( + errIncompatible = errors.New("incompatible options") + errInvalidValue = errors.New("invalid value") +) + +func flags(opts *Options) flag.FlagSet { + fset := flag.NewFlagSet("sloglint", flag.ContinueOnError) + + boolVar := func(value *bool, name, usage string) { + fset.Func(name, usage, func(s string) error { + v, err := strconv.ParseBool(s) + *value = v + return err + }) + } + + strVar := func(value *string, name, usage string) { + fset.Func(name, usage, func(s string) error { + *value = s + return nil + }) + } + + boolVar(&opts.NoMixedArgs, "no-mixed-args", "enforce not mixing key-value pairs and attributes (default true)") + boolVar(&opts.KVOnly, "kv-only", "enforce using key-value pairs only (overrides -no-mixed-args, incompatible with -attr-only)") + boolVar(&opts.AttrOnly, "attr-only", "enforce using attributes only (overrides -no-mixed-args, incompatible with -kv-only)") + strVar(&opts.NoGlobal, "no-global", "enforce not using global loggers (all|default)") + boolVar(&opts.ContextOnly, "context-only", "enforce using methods that accept a context") + boolVar(&opts.StaticMsg, "static-msg", "enforce using static log messages") + boolVar(&opts.NoRawKeys, "no-raw-keys", "enforce using constants instead of raw keys") + strVar(&opts.KeyNamingCase, "key-naming-case", "enforce a single key naming convention (snake|kebab|camel|pascal)") + boolVar(&opts.ArgsOnSepLines, "args-on-sep-lines", "enforce putting arguments on separate lines") + + return *fset +} + +var slogFuncs = map[string]int{ // funcName:argsPos + "log/slog.Log": 3, + "log/slog.Debug": 1, + "log/slog.Info": 1, + "log/slog.Warn": 1, + "log/slog.Error": 1, + "log/slog.DebugContext": 2, + "log/slog.InfoContext": 2, + "log/slog.WarnContext": 2, + "log/slog.ErrorContext": 2, + "(*log/slog.Logger).Log": 3, + "(*log/slog.Logger).Debug": 1, + "(*log/slog.Logger).Info": 1, + "(*log/slog.Logger).Warn": 1, + "(*log/slog.Logger).Error": 1, + "(*log/slog.Logger).DebugContext": 2, + "(*log/slog.Logger).InfoContext": 2, + "(*log/slog.Logger).WarnContext": 2, + "(*log/slog.Logger).ErrorContext": 2, +} + +var attrFuncs = map[string]struct{}{ + "log/slog.String": {}, + "log/slog.Int64": {}, + "log/slog.Int": {}, + "log/slog.Uint64": {}, + "log/slog.Float64": {}, + "log/slog.Bool": {}, + "log/slog.Time": {}, + "log/slog.Duration": {}, + "log/slog.Group": {}, + "log/slog.Any": {}, +} + +const ( + snakeCase = "snake" + kebabCase = "kebab" + camelCase = "camel" + pascalCase = "pascal" +) + +func run(pass *analysis.Pass, opts *Options) { + visit := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + filter := []ast.Node{(*ast.CallExpr)(nil)} + + visit.Preorder(filter, func(node ast.Node) { + call := node.(*ast.CallExpr) + + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if fn == nil { + return + } + + name := fn.FullName() + argsPos, ok := slogFuncs[name] + if !ok { + return + } + + switch opts.NoGlobal { + case "all": + if strings.HasPrefix(name, "log/slog.") || globalLoggerUsed(pass.TypesInfo, call.Fun) { + pass.Reportf(call.Pos(), "global logger should not be used") + } + case "default": + if strings.HasPrefix(name, "log/slog.") { + pass.Reportf(call.Pos(), "default logger should not be used") + } + } + + if opts.ContextOnly { + typ := pass.TypesInfo.TypeOf(call.Args[0]) + if typ != nil && typ.String() != "context.Context" { + pass.Reportf(call.Pos(), "methods without a context should not be used") + } + } + + if opts.StaticMsg && !staticMsg(call.Args[argsPos-1]) { + pass.Reportf(call.Pos(), "message should be a string literal or a constant") + } + + // NOTE: we assume that the arguments have already been validated by govet. + args := call.Args[argsPos:] + if len(args) == 0 { + return + } + + var keys []ast.Expr + var attrs []ast.Expr + + for i := 0; i < len(args); i++ { + typ := pass.TypesInfo.TypeOf(args[i]) + if typ == nil { + continue + } + switch typ.String() { + case "string": + keys = append(keys, args[i]) + i++ // skip the value. + case "log/slog.Attr": + attrs = append(attrs, args[i]) + } + } + + switch { + case opts.KVOnly && len(attrs) > 0: + pass.Reportf(call.Pos(), "attributes should not be used") + case opts.AttrOnly && len(attrs) < len(args): + pass.Reportf(call.Pos(), "key-value pairs should not be used") + case opts.NoMixedArgs && 0 < len(attrs) && len(attrs) < len(args): + pass.Reportf(call.Pos(), "key-value pairs and attributes should not be mixed") + } + + if opts.NoRawKeys && rawKeysUsed(pass.TypesInfo, keys, attrs) { + pass.Reportf(call.Pos(), "raw keys should not be used") + } + + if opts.ArgsOnSepLines && argsOnSameLine(pass.Fset, call, keys, attrs) { + pass.Reportf(call.Pos(), "arguments should be put on separate lines") + } + + switch { + case opts.KeyNamingCase == snakeCase && badKeyNames(pass.TypesInfo, strcase.ToSnake, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in snake_case") + case opts.KeyNamingCase == kebabCase && badKeyNames(pass.TypesInfo, strcase.ToKebab, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in kebab-case") + case opts.KeyNamingCase == camelCase && badKeyNames(pass.TypesInfo, strcase.ToCamel, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in camelCase") + case opts.KeyNamingCase == pascalCase && badKeyNames(pass.TypesInfo, strcase.ToPascal, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in PascalCase") + } + }) +} + +func globalLoggerUsed(info *types.Info, expr ast.Expr) bool { + selector, ok := expr.(*ast.SelectorExpr) + if !ok { + return false + } + ident, ok := selector.X.(*ast.Ident) + if !ok { + return false + } + obj := info.ObjectOf(ident) + return obj.Parent() == obj.Pkg().Scope() +} + +func staticMsg(expr ast.Expr) bool { + switch msg := expr.(type) { + case *ast.BasicLit: // e.g. slog.Info("msg") + return msg.Kind == token.STRING + case *ast.Ident: // e.g. const msg = "msg"; slog.Info(msg) + return msg.Obj != nil && msg.Obj.Kind == ast.Con + default: + return false + } +} + +func rawKeysUsed(info *types.Info, keys, attrs []ast.Expr) bool { + isConst := func(expr ast.Expr) bool { + ident, ok := expr.(*ast.Ident) + return ok && ident.Obj != nil && ident.Obj.Kind == ast.Con + } + + for _, key := range keys { + if !isConst(key) { + return true + } + } + + for _, attr := range attrs { + switch attr := attr.(type) { + case *ast.CallExpr: // e.g. slog.Int() + fn := typeutil.StaticCallee(info, attr) + if _, ok := attrFuncs[fn.FullName()]; ok && !isConst(attr.Args[0]) { + return true + } + + case *ast.CompositeLit: // slog.Attr{} + isRawKey := func(kv *ast.KeyValueExpr) bool { + return kv.Key.(*ast.Ident).Name == "Key" && !isConst(kv.Value) + } + + switch len(attr.Elts) { + case 1: // slog.Attr{Key: ...} | slog.Attr{Value: ...} + kv := attr.Elts[0].(*ast.KeyValueExpr) + if isRawKey(kv) { + return true + } + case 2: // slog.Attr{..., ...} | slog.Attr{Key: ..., Value: ...} + kv1, ok := attr.Elts[0].(*ast.KeyValueExpr) + if ok { + kv2 := attr.Elts[1].(*ast.KeyValueExpr) + if isRawKey(kv1) || isRawKey(kv2) { + return true + } + } else if !isConst(attr.Elts[0]) { + return true + } + } + } + } + + return false +} + +func badKeyNames(info *types.Info, caseFn func(string) string, keys, attrs []ast.Expr) bool { + for _, key := range keys { + if name, ok := getKeyName(key); ok && name != caseFn(name) { + return true + } + } + + for _, attr := range attrs { + var expr ast.Expr + switch attr := attr.(type) { + case *ast.CallExpr: // e.g. slog.Int() + fn := typeutil.StaticCallee(info, attr) + if _, ok := attrFuncs[fn.FullName()]; ok { + expr = attr.Args[0] + } + case *ast.CompositeLit: // slog.Attr{} + switch len(attr.Elts) { + case 1: // slog.Attr{Key: ...} | slog.Attr{Value: ...} + if kv := attr.Elts[0].(*ast.KeyValueExpr); kv.Key.(*ast.Ident).Name == "Key" { + expr = kv.Value + } + case 2: // slog.Attr{..., ...} | slog.Attr{Key: ..., Value: ...} + expr = attr.Elts[0] + if kv1, ok := attr.Elts[0].(*ast.KeyValueExpr); ok && kv1.Key.(*ast.Ident).Name == "Key" { + expr = kv1.Value + } + if kv2, ok := attr.Elts[1].(*ast.KeyValueExpr); ok && kv2.Key.(*ast.Ident).Name == "Key" { + expr = kv2.Value + } + } + } + if name, ok := getKeyName(expr); ok && name != caseFn(name) { + return true + } + } + + return false +} + +func getKeyName(expr ast.Expr) (string, bool) { + if expr == nil { + return "", false + } + if ident, ok := expr.(*ast.Ident); ok { + if ident.Obj == nil || ident.Obj.Decl == nil || ident.Obj.Kind != ast.Con { + return "", false + } + if spec, ok := ident.Obj.Decl.(*ast.ValueSpec); ok && len(spec.Values) > 0 { + // TODO: support len(spec.Values) > 1; e.g. "const foo, bar = 1, 2" + expr = spec.Values[0] + } + } + if lit, ok := expr.(*ast.BasicLit); ok && lit.Kind == token.STRING { + return lit.Value, true + } + return "", false +} + +func argsOnSameLine(fset *token.FileSet, call ast.Expr, keys, attrs []ast.Expr) bool { + if len(keys)+len(attrs) <= 1 { + return false // special case: slog.Info("msg", "key", "value") is ok. + } + + l := len(keys) + len(attrs) + 1 + args := make([]ast.Expr, 0, l) + args = append(args, call) + args = append(args, keys...) + args = append(args, attrs...) + + lines := make(map[int]struct{}, l) + for _, arg := range args { + line := fset.Position(arg.Pos()).Line + if _, ok := lines[line]; ok { + return true + } + lines[line] = struct{}{} + } + + return false +} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore deleted file mode 100644 index 74a6db472e..0000000000 --- a/vendor/go.opencensus.io/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -/.idea/ - -# go.opencensus.io/exporter/aws -/exporter/aws/ - -# Exclude vendor, use dep ensure after checkout: -/vendor/github.com/ -/vendor/golang.org/ -/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS deleted file mode 100644 index e491a9e7f7..0000000000 --- a/vendor/go.opencensus.io/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md deleted file mode 100644 index 1ba3962c8b..0000000000 --- a/vendor/go.opencensus.io/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ - -## Instructions - -Fork the repo, checkout the upstream repo to your GOPATH by: - -``` -$ go get -d go.opencensus.io -``` - -Add your fork as an origin: - -``` -cd $(go env GOPATH)/src/go.opencensus.io -git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git -``` - -Run tests: - -``` -$ make install-tools # Only first time. -$ make -``` - -Checkout a new branch, make modifications and push the branch to your fork: - -``` -$ git checkout -b feature -# edit files -$ git commit -$ git push fork feature -``` - -Open a pull request against the main opencensus-go repo. - -## General Notes -This project uses Appveyor and Travis for CI. - -The dependencies are managed with `go mod` if you work with the sources under your -`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE deleted file mode 100644 index 7a4a3ea242..0000000000 --- a/vendor/go.opencensus.io/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile deleted file mode 100644 index d896edc996..0000000000 --- a/vendor/go.opencensus.io/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -# TODO: Fix this on windows. -ALL_SRC := $(shell find . -name '*.go' \ - -not -path './vendor/*' \ - -not -path '*/gen-go/*' \ - -type f | sort) -ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) - -GOTEST_OPT?=-v -race -timeout 30s -GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic -GOTEST=go test -GOIMPORTS=goimports -GOLINT=golint -GOVET=go vet -EMBEDMD=embedmd -# TODO decide if we need to change these names. -TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" -TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" -README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') - -.DEFAULT_GOAL := imports-lint-vet-embedmd-test - -.PHONY: imports-lint-vet-embedmd-test -imports-lint-vet-embedmd-test: imports lint vet embedmd test - -# TODO enable test-with-coverage in tavis -.PHONY: travis-ci -travis-ci: imports lint vet embedmd test test-386 - -all-pkgs: - @echo $(ALL_PKGS) | tr ' ' '\n' | sort - -all-srcs: - @echo $(ALL_SRC) | tr ' ' '\n' | sort - -.PHONY: test -test: - $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) - -.PHONY: test-386 -test-386: - GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) - -.PHONY: test-with-coverage -test-with-coverage: - $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) - -.PHONY: imports -imports: - @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ - if [ "$$IMPORTSOUT" ]; then \ - echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ - echo "$$IMPORTSOUT\n"; \ - exit 1; \ - else \ - echo "Imports finished successfully"; \ - fi - -.PHONY: lint -lint: - @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ - if [ "$$LINTOUT" ]; then \ - echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ - echo "$$LINTOUT\n"; \ - exit 1; \ - else \ - echo "Lint finished successfully"; \ - fi - -.PHONY: vet -vet: - # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" - @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ - if [ "$$VETOUT" ]; then \ - echo "$(GOVET) FAILED => go vet the following files:\n"; \ - echo "$$VETOUT\n"; \ - exit 1; \ - else \ - echo "Vet finished successfully"; \ - fi - -.PHONY: embedmd -embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ - if [ "$$EMBEDMDOUT" ]; then \ - echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ - echo "$$EMBEDMDOUT\n"; \ - exit 1; \ - else \ - echo "Embedmd finished successfully"; \ - fi - -.PHONY: install-tools -install-tools: - go install golang.org/x/lint/golint@latest - go install golang.org/x/tools/cmd/cover@latest - go install golang.org/x/tools/cmd/goimports@latest - go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md deleted file mode 100644 index 1d7e837116..0000000000 --- a/vendor/go.opencensus.io/README.md +++ /dev/null @@ -1,267 +0,0 @@ -# OpenCensus Libraries for Go - -[![Build Status][travis-image]][travis-url] -[![Windows Build Status][appveyor-image]][appveyor-url] -[![GoDoc][godoc-image]][godoc-url] -[![Gitter chat][gitter-image]][gitter-url] - -OpenCensus Go is a Go implementation of OpenCensus, a toolkit for -collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats and tracing. - -#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). - -## Installation - -``` -$ go get -u go.opencensus.io -``` - -The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). -The use of vendoring or a dependency management tool is recommended. - -## Prerequisites - -OpenCensus Go libraries require Go 1.8 or later. - -## Getting Started - -The easiest way to get started using OpenCensus in your application is to use an existing -integration with your RPC framework: - -* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) -* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) -* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) -* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) -* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) -* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) -* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) -* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) -* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) -* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) - -If you're using a framework not listed here, you could either implement your own middleware for your -framework or use [custom stats](#stats) and [spans](#spans) directly in your application. - -## Exporters - -OpenCensus can export instrumentation data to various backends. -OpenCensus has exporter implementations for the following, users -can implement their own exporters by implementing the exporter interfaces -([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), -[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): - -* [Prometheus][exporter-prom] for stats -* [OpenZipkin][exporter-zipkin] for traces -* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces -* [Jaeger][exporter-jaeger] for traces -* [AWS X-Ray][exporter-xray] for traces -* [Datadog][exporter-datadog] for stats and traces -* [Graphite][exporter-graphite] for stats -* [Honeycomb][exporter-honeycomb] for traces -* [New Relic][exporter-newrelic] for stats and traces - -## Overview - -![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) - -In a microservices environment, a user request may go through -multiple services until there is a response. OpenCensus allows -you to instrument your services and collect diagnostics data all -through your services end-to-end. - -## Tags - -Tags represent propagated key-value pairs. They are propagated using `context.Context` -in the same process or can be encoded to be transmitted on the wire. Usually, this will -be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` -for gRPC. - -Package `tag` allows adding or modifying tags in the current context. - -[embedmd]:# (internal/readme/tags.go new) -```go -ctx, err := tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Upsert(userIDKey, "cde36753ed"), -) -if err != nil { - log.Fatal(err) -} -``` - -## Stats - -OpenCensus is a low-overhead framework even if instrumentation is always enabled. -In order to be so, it is optimized to make recording of data points fast -and separate from the data aggregation. - -OpenCensus stats collection happens in two stages: - -* Definition of measures and recording of data points -* Definition of views and aggregation of the recorded data - -### Recording - -Measurements are data points associated with a measure. -Recording implicitly tags the set of Measurements with the tags from the -provided context: - -[embedmd]:# (internal/readme/stats.go record) -```go -stats.Record(ctx, videoSize.M(102478)) -``` - -### Views - -Views are how Measures are aggregated. You can think of them as queries over the -set of recorded data points (measurements). - -Views have two parts: the tags to group by and the aggregation type used. - -Currently three types of aggregations are supported: -* CountAggregation is used to count the number of times a sample was recorded. -* DistributionAggregation is used to provide a histogram of the values of the samples. -* SumAggregation is used to sum up all sample values. - -[embedmd]:# (internal/readme/stats.go aggs) -```go -distAgg := view.Distribution(1<<32, 2<<32, 3<<32) -countAgg := view.Count() -sumAgg := view.Sum() -``` - -Here we create a view with the DistributionAggregation over our measure. - -[embedmd]:# (internal/readme/stats.go view) -```go -if err := view.Register(&view.View{ - Name: "example.com/video_size_distribution", - Description: "distribution of processed video size over time", - Measure: videoSize, - Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), -}); err != nil { - log.Fatalf("Failed to register view: %v", err) -} -``` - -Register begins collecting data for the view. Registered views' data will be -exported via the registered exporters. - -## Traces - -A distributed trace tracks the progression of a single user request as -it is handled by the services and processes that make up an application. -Each step is called a span in the trace. Spans include metadata about the step, -including especially the time spent in the step, called the span’s latency. - -Below you see a trace and several spans underneath it. - -![Traces and spans](https://i.imgur.com/7hZwRVj.png) - -### Spans - -Span is the unit step in a trace. Each span has a name, latency, status and -additional metadata. - -Below we are starting a span for a cache read and ending it -when we are done: - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -### Propagation - -Spans can have parents or can be root spans if they don't have any parents. -The current span is propagated in-process and across the network to allow associating -new child spans with the parent. - -In the same process, `context.Context` is used to propagate spans. -`trace.StartSpan` creates a new span as a root if the current context -doesn't contain a span. Or, it creates a child of the span that is -already in current context. The returned context can be used to keep -propagating the newly created span in the current context. - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -Across the network, OpenCensus provides different propagation -methods for different protocols. - -* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). -* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) - by default but can be configured to use a custom propagation method by setting another - [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). - -## Execution Tracer - -With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. -See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) -for an example of their mutual use. - -## Profiles - -OpenCensus tags can be applied as profiler labels -for users who are on Go 1.9 and above. - -[embedmd]:# (internal/readme/tags.go profiler) -```go -ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Insert(userIDKey, "fff0989878"), -) -if err != nil { - log.Fatal(err) -} -tag.Do(ctx, func(ctx context.Context) { - // Do work. - // When profiling is on, samples will be - // recorded with the key/values from the tag map. -}) -``` - -A screenshot of the CPU profile from the program above: - -![CPU profile](https://i.imgur.com/jBKjlkw.png) - -## Deprecation Policy - -Before version 1.0.0, the following deprecation policy will be observed: - -No backwards-incompatible changes will be made except for the removal of symbols that have -been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release -removing the *Deprecated* functionality will be made no sooner than 28 days after the first -release in which the functionality was marked *Deprecated*. - -[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master -[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go -[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true -[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master -[godoc-image]: https://godoc.org/go.opencensus.io?status.svg -[godoc-url]: https://godoc.org/go.opencensus.io -[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg -[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge - - -[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap -[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace - -[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus -[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver -[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger -[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws -[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog -[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite -[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter -[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml deleted file mode 100644 index d08f0edaff..0000000000 --- a/vendor/go.opencensus.io/appveyor.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "{build}" - -platform: x64 - -clone_folder: c:\gopath\src\go.opencensus.io - -environment: - GOPATH: 'c:\gopath' - GO111MODULE: 'on' - CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 - -stack: go 1.11 - -before_test: - - go version - - go env - -build: false -deploy: false - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go build -v .\... - - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go deleted file mode 100644 index 81dc7183ec..0000000000 --- a/vendor/go.opencensus.io/internal/internal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opencensus.io/internal" - -import ( - "fmt" - "time" - - opencensus "go.opencensus.io" -) - -// UserAgent is the user agent to be added to the outgoing -// requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go deleted file mode 100644 index de8ccf236c..0000000000 --- a/vendor/go.opencensus.io/internal/sanitize.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "strings" - "unicode" -) - -const labelKeySizeLimit = 100 - -// Sanitize returns a string that is trunacated to 100 characters if it's too -// long, and replaces non-alphanumeric characters to underscores. -func Sanitize(s string) string { - if len(s) == 0 { - return s - } - if len(s) > labelKeySizeLimit { - s = s[:labelKeySizeLimit] - } - s = strings.Map(sanitizeRune, s) - if unicode.IsDigit(rune(s[0])) { - s = "key_" + s - } - if s[0] == '_' { - s = "key" + s - } - return s -} - -// converts anything that is not a letter or digit to an underscore -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - // Everything else turns into an underscore - return '_' -} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go deleted file mode 100644 index 41b2c3fc03..0000000000 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package tagencoding contains the tag encoding -// used interally by the stats collector. -package tagencoding // import "go.opencensus.io/internal/tagencoding" - -// Values represent the encoded buffer for the values. -type Values struct { - Buffer []byte - WriteIndex int - ReadIndex int -} - -func (vb *Values) growIfRequired(expected int) { - if len(vb.Buffer)-vb.WriteIndex < expected { - tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) - copy(tmp, vb.Buffer) - vb.Buffer = tmp - } -} - -// WriteValue is the helper method to encode Values from map[Key][]byte. -func (vb *Values) WriteValue(v []byte) { - length := len(v) & 0xff - vb.growIfRequired(1 + length) - - // writing length of v - vb.Buffer[vb.WriteIndex] = byte(length) - vb.WriteIndex++ - - if length == 0 { - // No value was encoded for this key - return - } - - // writing v - copy(vb.Buffer[vb.WriteIndex:], v[:length]) - vb.WriteIndex += length -} - -// ReadValue is the helper method to decode Values to a map[Key][]byte. -func (vb *Values) ReadValue() []byte { - // read length of v - length := int(vb.Buffer[vb.ReadIndex]) - vb.ReadIndex++ - if length == 0 { - // No value was encoded for this key - return nil - } - - // read value of v - v := make([]byte, length) - endIdx := vb.ReadIndex + length - copy(v, vb.Buffer[vb.ReadIndex:endIdx]) - vb.ReadIndex = endIdx - return v -} - -// Bytes returns a reference to already written bytes in the Buffer. -func (vb *Values) Bytes() []byte { - return vb.Buffer[:vb.WriteIndex] -} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go deleted file mode 100644 index 073af7b473..0000000000 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "time" -) - -// Trace allows internal access to some trace functionality. -// TODO(#412): remove this -var Trace interface{} - -// LocalSpanStoreEnabled true if the local span store is enabled. -var LocalSpanStoreEnabled bool - -// BucketConfiguration stores the number of samples to store for span buckets -// for successful and failed spans for a particular span name. -type BucketConfiguration struct { - Name string - MaxRequestsSucceeded int - MaxRequestsErrors int -} - -// PerMethodSummary is a summary of the spans stored for a single span name. -type PerMethodSummary struct { - Active int - LatencyBuckets []LatencyBucketSummary - ErrorBuckets []ErrorBucketSummary -} - -// LatencyBucketSummary is a summary of a latency bucket. -type LatencyBucketSummary struct { - MinLatency, MaxLatency time.Duration - Size int -} - -// ErrorBucketSummary is a summary of an error bucket. -type ErrorBucketSummary struct { - ErrorCode int32 - Size int -} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go deleted file mode 100644 index 52a7b3bf85..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metricdata contains the metrics data model. -// -// This is an EXPERIMENTAL package, and may change in arbitrary ways without -// notice. -package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go deleted file mode 100644 index 12695ce2dc..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/exemplar.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Exemplars keys. -const ( - AttachmentKeySpanContext = "SpanContext" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -// -// Their purpose is to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go deleted file mode 100644 index aadae41e6a..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/label.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// LabelKey represents key of a label. It has optional -// description attribute. -type LabelKey struct { - Key string - Description string -} - -// LabelValue represents the value of a label. -// The zero value represents a missing label value, which may be treated -// differently to an empty string value by some back ends. -type LabelValue struct { - Value string // string value of the label - Present bool // flag that indicated whether a value is present or not -} - -// NewLabelValue creates a new non-nil LabelValue that represents the given string. -func NewLabelValue(val string) LabelValue { - return LabelValue{Value: val, Present: true} -} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go deleted file mode 100644 index 8293712c77..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/metric.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" - - "go.opencensus.io/resource" -) - -// Descriptor holds metadata about a metric. -type Descriptor struct { - Name string // full name of the metric - Description string // human-readable description - Unit Unit // units for the measure - Type Type // type of measure - LabelKeys []LabelKey // label keys -} - -// Metric represents a quantity measured against a resource with different -// label value combinations. -type Metric struct { - Descriptor Descriptor // metric descriptor - Resource *resource.Resource // resource against which this was measured - TimeSeries []*TimeSeries // one time series for each combination of label values -} - -// TimeSeries is a sequence of points associated with a combination of label -// values. -type TimeSeries struct { - LabelValues []LabelValue // label values, same order as keys in the metric descriptor - Points []Point // points sequence - StartTime time.Time // time we started recording this time series -} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go deleted file mode 100644 index 7fe057b19c..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/point.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Point is a single data point of a time series. -type Point struct { - // Time is the point in time that this point represents in a time series. - Time time.Time - // Value is the value of this point. Prefer using ReadValue to switching on - // the value type, since new value types might be added. - Value interface{} -} - -//go:generate stringer -type ValueType - -// NewFloat64Point creates a new Point holding a float64 value. -func NewFloat64Point(t time.Time, val float64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewInt64Point creates a new Point holding an int64 value. -func NewInt64Point(t time.Time, val int64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewDistributionPoint creates a new Point holding a Distribution value. -func NewDistributionPoint(t time.Time, val *Distribution) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewSummaryPoint creates a new Point holding a Summary value. -func NewSummaryPoint(t time.Time, val *Summary) Point { - return Point{ - Value: val, - Time: t, - } -} - -// ValueVisitor allows reading the value of a point. -type ValueVisitor interface { - VisitFloat64Value(float64) - VisitInt64Value(int64) - VisitDistributionValue(*Distribution) - VisitSummaryValue(*Summary) -} - -// ReadValue accepts a ValueVisitor and calls the appropriate method with the -// value of this point. -// Consumers of Point should use this in preference to switching on the type -// of the value directly, since new value types may be added. -func (p Point) ReadValue(vv ValueVisitor) { - switch v := p.Value.(type) { - case int64: - vv.VisitInt64Value(v) - case float64: - vv.VisitFloat64Value(v) - case *Distribution: - vv.VisitDistributionValue(v) - case *Summary: - vv.VisitSummaryValue(v) - default: - panic("unexpected value type") - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type Distribution struct { - // Count is the number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 - // Sum is the sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 - // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 - // BucketOptions describes the bounds of the histogram buckets in this - // distribution. - // - // A Distribution may optionally contain a histogram of the values in the - // population. - // - // If nil, there is no associated histogram. - BucketOptions *BucketOptions - // Bucket If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []Bucket -} - -// BucketOptions describes the bounds of the histogram buckets in this -// distribution. -type BucketOptions struct { - // Bounds specifies a set of bucket upper bounds. - // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket - // index i are: - // - // [0, Bounds[i]) for i == 0 - // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 - // [Bounds[i-1], +infinity) for i == N-1 - Bounds []float64 -} - -// Bucket represents a single bucket (value range) in a distribution. -type Bucket struct { - // Count is the number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 - // Exemplar associated with this bucket (if any). - Exemplar *Exemplar -} - -// Summary is a representation of percentiles. -type Summary struct { - // Count is the cumulative count (if available). - Count int64 - // Sum is the cumulative sum of values (if available). - Sum float64 - // HasCountAndSum is true if Count and Sum are available. - HasCountAndSum bool - // Snapshot represents percentiles calculated over an arbitrary time window. - // The values in this struct can be reset at arbitrary unknown times, with - // the requirement that all of them are reset at the same time. - Snapshot Snapshot -} - -// Snapshot represents percentiles over an arbitrary time. -// The values in this struct can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type Snapshot struct { - // Count is the number of values in the snapshot. Optional since some systems don't - // expose this. Set to 0 if not available. - Count int64 - // Sum is the sum of values in the snapshot. Optional since some systems don't - // expose this. If count is 0 then this field must be zero. - Sum float64 - // Percentiles is a map from percentile (range (0-100.0]) to the value of - // the percentile. - Percentiles map[float64]float64 -} - -//go:generate stringer -type Type - -// Type is the overall type of metric, including its value type and whether it -// represents a cumulative total (since the start time) or if it represents a -// gauge value. -type Type int - -// Metric types. -const ( - TypeGaugeInt64 Type = iota - TypeGaugeFloat64 - TypeGaugeDistribution - TypeCumulativeInt64 - TypeCumulativeFloat64 - TypeCumulativeDistribution - TypeSummary -) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go deleted file mode 100644 index c3f8ec27b5..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/type_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type Type"; DO NOT EDIT. - -package metricdata - -import "strconv" - -const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" - -var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go deleted file mode 100644 index b483a1371b..0000000000 --- a/vendor/go.opencensus.io/metric/metricdata/unit.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// Unit is a string encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -type Unit string - -// Predefined units. To record against a unit not represented here, create your -// own Unit type constant from a string. -const ( - UnitDimensionless Unit = "1" - UnitBytes Unit = "By" - UnitMilliseconds Unit = "ms" -) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go deleted file mode 100644 index ca1f390493..0000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/manager.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "sync" -) - -// Manager maintains a list of active producers. Producers can register -// with the manager to allow readers to read all metrics provided by them. -// Readers can retrieve all producers registered with the manager, -// read metrics from the producers and export them. -type Manager struct { - mu sync.RWMutex - producers map[Producer]struct{} -} - -var prodMgr *Manager -var once sync.Once - -// GlobalManager is a single instance of producer manager -// that is used by all producers and all readers. -func GlobalManager() *Manager { - once.Do(func() { - prodMgr = &Manager{} - prodMgr.producers = make(map[Producer]struct{}) - }) - return prodMgr -} - -// AddProducer adds the producer to the Manager if it is not already present. -func (pm *Manager) AddProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - pm.producers[producer] = struct{}{} -} - -// DeleteProducer deletes the producer from the Manager if it is present. -func (pm *Manager) DeleteProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - delete(pm.producers, producer) -} - -// GetAll returns a slice of all producer currently registered with -// the Manager. For each call it generates a new slice. The slice -// should not be cached as registration may change at any time. It is -// typically called periodically by exporter to read metrics from -// the producers. -func (pm *Manager) GetAll() []Producer { - pm.mu.Lock() - defer pm.mu.Unlock() - producers := make([]Producer, len(pm.producers)) - i := 0 - for producer := range pm.producers { - producers[i] = producer - i++ - } - return producers -} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go deleted file mode 100644 index 6cee9ed178..0000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/producer.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "go.opencensus.io/metric/metricdata" -) - -// Producer is a source of metrics. -type Producer interface { - // Read should return the current values of all metrics supported by this - // metric provider. - // The returned metrics should be unique for each combination of name and - // resource. - Read() []*metricdata.Metric -} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go deleted file mode 100644 index 11e31f421c..0000000000 --- a/vendor/go.opencensus.io/opencensus.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package opencensus contains Go support for OpenCensus. -package opencensus // import "go.opencensus.io" - -// Version is the current release version of OpenCensus in use. -func Version() string { - return "0.24.0" -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go deleted file mode 100644 index 2063b6f76a..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "go.opencensus.io/trace" - "google.golang.org/grpc/stats" -) - -// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and -// traces. Use with gRPC clients only. -type ClientHandler struct { - // StartOptions allows configuring the StartOptions used to create new spans. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this handler. - StartOptions trace.StartOptions -} - -// HandleConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = c.traceTagRPC(ctx, rti) - ctx = c.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go deleted file mode 100644 index fb3c19d6b6..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ClientHandler: -var ( - ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) - ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) - ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) - ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) - ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) -) - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ClientSentBytesPerRPCView = &view.View{ - Measure: ClientSentBytesPerRPC, - Name: "grpc.io/client/sent_bytes_per_rpc", - Description: "Distribution of bytes sent per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientReceivedBytesPerRPCView = &view.View{ - Measure: ClientReceivedBytesPerRPC, - Name: "grpc.io/client/received_bytes_per_rpc", - Description: "Distribution of bytes received per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientRoundtripLatencyView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/roundtrip_latency", - Description: "Distribution of round-trip latency, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ClientRoundtripLatency`, tagging - // with method and status to result in ClientCompletedRpcs. - ClientCompletedRPCsView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - Aggregation: view.Count(), - } - - ClientStartedRPCsView = &view.View{ - Measure: ClientStartedRPCs, - Name: "grpc.io/client/started_rpcs", - Description: "Number of started client RPCs.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: view.Count(), - } - - ClientSentMessagesPerRPCView = &view.View{ - Measure: ClientSentMessagesPerRPC, - Name: "grpc.io/client/sent_messages_per_rpc", - Description: "Distribution of sent messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientReceivedMessagesPerRPCView = &view.View{ - Measure: ClientReceivedMessagesPerRPC, - Name: "grpc.io/client/received_messages_per_rpc", - Description: "Distribution of received messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientServerLatencyView = &view.View{ - Measure: ClientServerLatency, - Name: "grpc.io/client/server_latency", - Description: "Distribution of server latency as viewed by client, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } -) - -// DefaultClientViews are the default client views provided by this package. -var DefaultClientViews = []*view.View{ - ClientSentBytesPerRPCView, - ClientReceivedBytesPerRPCView, - ClientRoundtripLatencyView, - ClientCompletedRPCsView, -} - -// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go deleted file mode 100644 index b36349820d..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "time" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the tag.Map populated by the application code, serializes -// its tags into the GRPC metadata in order to be sent to the server. -func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Info("clientHandler.TagRPC called with nil info.") - } - return ctx - } - - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - ts := tag.FromContext(ctx) - if ts != nil { - encoded := tag.Encode(ts) - ctx = stats.SetTags(ctx, encoded) - } - - return context.WithValue(ctx, rpcDataKey, d) -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go deleted file mode 100644 index 1370323fb7..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ocgrpc contains OpenCensus stats and trace -// integrations for gRPC. -// -// Use ServerHandler for servers and ClientHandler for clients. -package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go deleted file mode 100644 index 8a53e09727..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "google.golang.org/grpc/stats" - - "go.opencensus.io/trace" -) - -// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and -// traces. Use with gRPC servers. -// -// When installed (see Example), tracing metadata is read from inbound RPCs -// by default. If no tracing metadata is present, or if the tracing metadata is -// present but the SpanContext isn't sampled, then a new trace may be started -// (as determined by Sampler). -type ServerHandler struct { - // IsPublicEndpoint may be set to true to always start a new trace around - // each RPC. Any SpanContext in the RPC metadata will be added as a linked - // span instead of making it the parent of the span created around the - // server RPC. - // - // Be aware that if you leave this false (the default) on a public-facing - // server, callers will be able to send tracing metadata in gRPC headers - // and trigger traces in your backend. - IsPublicEndpoint bool - - // StartOptions to use for to spans started around RPCs handled by this server. - // - // These will apply even if there is tracing metadata already - // present on the inbound RPC but the SpanContext is not sampled. This - // ensures that each service has some opportunity to be traced. If you would - // like to not add any additional traces for this gRPC service, set: - // - // StartOptions.Sampler = trace.ProbabilitySampler(0.0) - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this handler. - StartOptions trace.StartOptions -} - -var _ stats.Handler = (*ServerHandler)(nil) - -// HandleConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = s.traceTagRPC(ctx, rti) - ctx = s.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go deleted file mode 100644 index fe0e971086..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ServerHandler: -var ( - ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) - ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) - ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) - ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) -) - -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ServerReceivedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/received_bytes_per_rpc", - Description: "Distribution of received bytes per RPC, by method.", - Measure: ServerReceivedBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", - Description: "Distribution of total sent bytes per RPC, by method.", - Measure: ServerSentBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerLatencyView = &view.View{ - Name: "grpc.io/server/server_latency", - Description: "Distribution of server latency in milliseconds, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerLatency, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ServerLatency`, tagging - // with method and status to result in ServerCompletedRpcs. - ServerCompletedRPCsView = &view.View{ - Name: "grpc.io/server/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, - Measure: ServerLatency, - Aggregation: view.Count(), - } - - ServerStartedRPCsView = &view.View{ - Measure: ServerStartedRPCs, - Name: "grpc.io/server/started_rpcs", - Description: "Number of started server RPCs.", - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: view.Count(), - } - - ServerReceivedMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/received_messages_per_rpc", - Description: "Distribution of messages received count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerReceivedMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } - - ServerSentMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_messages_per_rpc", - Description: "Distribution of messages sent count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerSentMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } -) - -// DefaultServerViews are the default server views provided by this package. -var DefaultServerViews = []*view.View{ - ServerReceivedBytesPerRPCView, - ServerSentBytesPerRPCView, - ServerLatencyView, - ServerCompletedRPCsView, -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go deleted file mode 100644 index afcef023af..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "time" - - "context" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from -// it and creates a new tag.Map and puts them into the returned context. -func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Infof("opencensus: TagRPC called with nil info.") - } - return ctx - } - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - propagated := h.extractPropagatedTags(ctx) - ctx = tag.NewContext(ctx, propagated) - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) - return context.WithValue(ctx, rpcDataKey, d) -} - -// extractPropagatedTags creates a new tag map containing the tags extracted from the -// gRPC metadata. -func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { - buf := stats.Tags(ctx) - if buf == nil { - return nil - } - propagated, err := tag.Decode(buf) - if err != nil { - if grpclog.V(2) { - grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) - } - return nil - } - return propagated -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go deleted file mode 100644 index 9cb27320ca..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "strconv" - "strings" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - ocstats "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -type grpcInstrumentationKey string - -// rpcData holds the instrumentation RPC data that is needed between the start -// and end of an call. It holds the info that this package needs to keep track -// of between the various GRPC events. -type rpcData struct { - // reqCount and respCount has to be the first words - // in order to be 64-aligned on 32-bit architectures. - sentCount, sentBytes, recvCount, recvBytes int64 // access atomically - - // startTime represents the time at which TagRPC was invoked at the - // beginning of an RPC. It is an appoximation of the time when the - // application code invoked GRPC code. - startTime time.Time - method string -} - -// The following variables define the default hard-coded auxiliary data used by -// both the default GRPC client and GRPC server metrics. -var ( - DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) -) - -// Server tags are applied to the context used to process each RPC, as well as -// the measures at the end of each RPC. -var ( - KeyServerMethod = tag.MustNewKey("grpc_server_method") - KeyServerStatus = tag.MustNewKey("grpc_server_status") -) - -// Client tags are applied to measures at the end of each RPC. -var ( - KeyClientMethod = tag.MustNewKey("grpc_client_method") - KeyClientStatus = tag.MustNewKey("grpc_client_status") -) - -var ( - rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") -) - -func methodName(fullname string) string { - return strings.TrimLeft(fullname, "/") -} - -// statsHandleRPC processes the RPC events. -func statsHandleRPC(ctx context.Context, s stats.RPCStats) { - switch st := s.(type) { - case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: - // do nothing for client - case *stats.Begin: - handleRPCBegin(ctx, st) - case *stats.OutPayload: - handleRPCOutPayload(ctx, st) - case *stats.InPayload: - handleRPCInPayload(ctx, st) - case *stats.End: - handleRPCEnd(ctx, st) - default: - grpclog.Infof("unexpected stats: %T", st) - } -} - -func handleRPCBegin(ctx context.Context, s *stats.Begin) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - } - - if s.IsClient() { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ClientStartedRPCs.M(1))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ServerStartedRPCs.M(1))) - } -} - -func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.sentBytes, int64(s.Length)) - atomic.AddInt64(&d.sentCount, 1) -} - -func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.recvBytes, int64(s.Length)) - atomic.AddInt64(&d.recvCount, 1) -} - -func handleRPCEnd(ctx context.Context, s *stats.End) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - elapsedTime := time.Since(d.startTime) - - var st string - if s.Error != nil { - s, ok := status.FromError(s.Error) - if ok { - st = statusCodeToString(s) - } - } else { - st = "OK" - } - - latencyMillis := float64(elapsedTime) / float64(time.Millisecond) - attachments := getSpanCtxAttachment(ctx) - if s.Client { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyServerStatus, st), - ), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis))) - } -} - -func statusCodeToString(s *status.Status) string { - // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md - switch c := s.Code(); c { - case codes.OK: - return "OK" - case codes.Canceled: - return "CANCELLED" - case codes.Unknown: - return "UNKNOWN" - case codes.InvalidArgument: - return "INVALID_ARGUMENT" - case codes.DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case codes.NotFound: - return "NOT_FOUND" - case codes.AlreadyExists: - return "ALREADY_EXISTS" - case codes.PermissionDenied: - return "PERMISSION_DENIED" - case codes.ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case codes.FailedPrecondition: - return "FAILED_PRECONDITION" - case codes.Aborted: - return "ABORTED" - case codes.OutOfRange: - return "OUT_OF_RANGE" - case codes.Unimplemented: - return "UNIMPLEMENTED" - case codes.Internal: - return "INTERNAL" - case codes.Unavailable: - return "UNAVAILABLE" - case codes.DataLoss: - return "DATA_LOSS" - case codes.Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE_" + strconv.FormatInt(int64(c), 10) - } -} - -func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { - attachments := map[string]interface{}{} - span := trace.FromContext(ctx) - if span == nil { - return attachments - } - spanCtx := span.SpanContext() - if spanCtx.IsSampled() { - attachments[metricdata.AttachmentKeySpanContext] = spanCtx - } - return attachments -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go deleted file mode 100644 index 61bc543d0a..0000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const traceContextKey = "grpc-trace-bin" - -// TagRPC creates a new trace span for the client side of the RPC. -// -// It returns ctx with the new trace span added and a serialization of the -// SpanContext added to the outgoing gRPC metadata. -func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - ctx, span := trace.StartSpan(ctx, name, - trace.WithSampler(c.StartOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC - traceContextBinary := propagation.Binary(span.SpanContext()) - return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) -} - -// TagRPC creates a new trace span for the server side of the RPC. -// -// It checks the incoming gRPC metadata in ctx for a SpanContext, and if -// it finds one, uses that SpanContext as the parent context of the new span. -// -// It returns ctx, with the new trace span added. -func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - md, _ := metadata.FromIncomingContext(ctx) - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - traceContext := md[traceContextKey] - var ( - parent trace.SpanContext - haveParent bool - ) - if len(traceContext) > 0 { - // Metadata with keys ending in -bin are actually binary. They are base64 - // encoded before being put on the wire, see: - // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata - traceContextBinary := []byte(traceContext[0]) - parent, haveParent = propagation.FromBinary(traceContextBinary) - if haveParent && !s.IsPublicEndpoint { - ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler), - ) - return ctx - } - } - ctx, span := trace.StartSpan(ctx, name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler)) - if haveParent { - span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) - } - return ctx -} - -func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - span := trace.FromContext(ctx) - // TODO: compressed and uncompressed sizes are not populated in every message. - switch rs := rs.(type) { - case *stats.Begin: - span.AddAttributes( - trace.BoolAttribute("Client", rs.Client), - trace.BoolAttribute("FailFast", rs.FailFast)) - case *stats.InPayload: - span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) - case *stats.OutPayload: - span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) - case *stats.End: - if rs.Error != nil { - s, ok := status.FromError(rs.Error) - if ok { - span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) - } else { - span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) - } - } - span.End() - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go deleted file mode 100644 index da815b2a73..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "net/http" - "net/http/httptrace" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Transport is an http.RoundTripper that instruments all outgoing requests with -// OpenCensus stats and tracing. -// -// The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation, since the default -// for this may change. -type Transport struct { - // Base may be set to wrap another http.RoundTripper that does the actual - // requests. By default http.DefaultTransport is used. - // - // If base HTTP roundtripper implements CancelRequest, - // the returned round tripper will be cancelable. - Base http.RoundTripper - - // Propagation defines how traces are propagated. If unspecified, a default - // (currently B3 format) will be used. - Propagation propagation.HTTPFormat - - // StartOptions are applied to the span started by this Transport around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // NameFromRequest holds the function to use for generating the span name - // from the information found in the outgoing HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // NewClientTrace may be set to a function allowing the current *trace.Span - // to be annotated with HTTP request event information emitted by the - // httptrace package. - NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace - - // TODO: Implement tag propagation for HTTP. -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base() - if isHealthEndpoint(req.URL.Path) { - return rt.RoundTrip(req) - } - // TODO: remove excessive nesting of http.RoundTrippers here. - format := t.Propagation - if format == nil { - format = defaultFormat - } - spanNameFormatter := t.FormatSpanName - if spanNameFormatter == nil { - spanNameFormatter = spanNameFromURL - } - - startOpts := t.StartOptions - if t.GetStartOptions != nil { - startOpts = t.GetStartOptions(req) - } - - rt = &traceTransport{ - base: rt, - format: format, - startOptions: trace.StartOptions{ - Sampler: startOpts.Sampler, - SpanKind: trace.SpanKindClient, - }, - formatSpanName: spanNameFormatter, - newClientTrace: t.NewClientTrace, - } - rt = statsTransport{base: rt} - return rt.RoundTrip(req) -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - cr.CancelRequest(req) - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go deleted file mode 100644 index 17142aabe0..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. -type statsTransport struct { - base http.RoundTripper -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. -func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { - ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.Host), - tag.Upsert(Host, req.Host), - tag.Upsert(KeyClientPath, req.URL.Path), - tag.Upsert(Path, req.URL.Path), - tag.Upsert(KeyClientMethod, req.Method), - tag.Upsert(Method, req.Method)) - req = req.WithContext(ctx) - track := &tracker{ - start: time.Now(), - ctx: ctx, - } - if req.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if req.ContentLength > 0 { - track.reqSize = req.ContentLength - } - stats.Record(ctx, ClientRequestCount.M(1)) - - // Perform request. - resp, err := t.base.RoundTrip(req) - - if err != nil { - track.statusCode = http.StatusInternalServerError - track.end() - } else { - track.statusCode = resp.StatusCode - if req.Method != "HEAD" { - track.respContentLength = resp.ContentLength - } - if resp.Body == nil { - track.end() - } else { - track.body = resp.Body - resp.Body = wrappedBody(track, resp.Body) - } - } - return resp, err -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t statsTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -type tracker struct { - ctx context.Context - respSize int64 - respContentLength int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once -} - -var _ io.ReadCloser = (*tracker)(nil) - -func (t *tracker) end() { - t.endOnce.Do(func() { - latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) - respSize := t.respSize - if t.respSize == 0 && t.respContentLength > 0 { - respSize = t.respContentLength - } - m := []stats.Measurement{ - ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(respSize), - ClientRoundtripLatency.M(latencyMs), - ClientLatency.M(latencyMs), - ClientResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ClientRequestBytes.M(t.reqSize)) - } - - stats.RecordWithTags(t.ctx, []tag.Mutator{ - tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), - tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), - }, m...) - }) -} - -func (t *tracker) Read(b []byte) (int, error) { - n, err := t.body.Read(b) - t.respSize += int64(n) - switch err { - case nil: - return n, nil - case io.EOF: - t.end() - } - return n, err -} - -func (t *tracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - t.end() - return t.body.Close() -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go deleted file mode 100644 index 10e626b16e..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ochttp provides OpenCensus instrumentation for net/http package. -// -// For server instrumentation, see Handler. For client-side instrumentation, -// see Transport. -package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go deleted file mode 100644 index 9ad8852198..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b3 contains a propagation.HTTPFormat implementation -// for B3 propagation. See https://github.com/openzipkin/b3-propagation -// for more details. -package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" - -import ( - "encoding/hex" - "net/http" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// B3 headers that OpenCensus understands. -const ( - TraceIDHeader = "X-B3-TraceId" - SpanIDHeader = "X-B3-SpanId" - SampledHeader = "X-B3-Sampled" -) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers in B3 propagation format. -// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers -// because there are additional fields not represented in the -// OpenCensus span context. Spans created from the incoming -// header will be the direct children of the client-side span. -// Similarly, receiver of the outgoing spans should use client-side -// span created by OpenCensus as the parent. -type HTTPFormat struct{} - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// SpanContextFromRequest extracts a B3 span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) - return trace.SpanContext{ - TraceID: tid, - SpanID: sid, - TraceOptions: sampled, - }, true -} - -// ParseTraceID parses the value of the X-B3-TraceId header. -func ParseTraceID(tid string) (trace.TraceID, bool) { - if tid == "" { - return trace.TraceID{}, false - } - b, err := hex.DecodeString(tid) - if err != nil || len(b) > 16 { - return trace.TraceID{}, false - } - var traceID trace.TraceID - if len(b) <= 8 { - // The lower 64-bits. - start := 8 + (8 - len(b)) - copy(traceID[start:], b) - } else { - start := 16 - len(b) - copy(traceID[start:], b) - } - - return traceID, true -} - -// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. -func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { - if sid == "" { - return trace.SpanID{}, false - } - b, err := hex.DecodeString(sid) - if err != nil || len(b) > 8 { - return trace.SpanID{}, false - } - start := 8 - len(b) - copy(spanID[start:], b) - return spanID, true -} - -// ParseSampled parses the value of the X-B3-Sampled header. -func ParseSampled(sampled string) (trace.TraceOptions, bool) { - switch sampled { - case "true", "1": - return trace.TraceOptions(1), true - default: - return trace.TraceOptions(0), false - } -} - -// SpanContextToRequest modifies the given request to include B3 headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) - req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) - - var sampled string - if sc.IsSampled() { - sampled = "1" - } else { - sampled = "0" - } - req.Header.Set(SampledHeader, sampled) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go deleted file mode 100644 index 5e6a343076..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/route.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "net/http" - - "go.opencensus.io/tag" -) - -// SetRoute sets the http_server_route tag to the given value. -// It's useful when an HTTP framework does not support the http.Handler interface -// and using WithRouteTag is not an option, but provides a way to hook into the request flow. -func SetRoute(ctx context.Context, route string) { - if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) - } -} - -// WithRouteTag returns an http.Handler that records stats with the -// http_server_route tag set to the given value. -func WithRouteTag(handler http.Handler, route string) http.Handler { - return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { - addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} - ctx, _ := tag.New(r.Context(), addRoute...) - r = r.WithContext(ctx) - handler.ServeHTTP(w, r) - return addRoute - }) -} - -// taggedHandlerFunc is a http.Handler that returns tags describing the -// processing of the request. These tags will be recorded along with the -// measures in this package at the end of the request. -type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator - -func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { - tags := h(w, r) - if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tags...) - } -} - -type addedTagsKey struct{} - -type addedTags struct { - t []tag.Mutator -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go deleted file mode 100644 index f7c8434be0..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Handler is an http.Handler wrapper to instrument your HTTP server with -// OpenCensus. It supports both stats and tracing. -// -// # Tracing -// -// This handler is aware of the incoming request's span, reading it from request -// headers as configured using the Propagation field. -// The extracted span can be accessed from the incoming request's -// context. -// -// span := trace.FromContext(r.Context()) -// -// The server span will be automatically ended at the end of ServeHTTP. -type Handler struct { - // Propagation defines how traces are propagated. If unspecified, - // B3 propagation will be used. - Propagation propagation.HTTPFormat - - // Handler is the handler used to handle the incoming request. - Handler http.Handler - - // StartOptions are applied to the span started by this Handler around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) - // servers. If true, any trace metadata set on the incoming request will - // be added as a linked trace instead of being added as a parent of the - // current trace. - IsPublicEndpoint bool - - // FormatSpanName holds the function to use for generating the span name - // from the information found in the incoming HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // IsHealthEndpoint holds the function to use for determining if the - // incoming HTTP request should be considered a health check. This is in - // addition to the private isHealthEndpoint func which may also indicate - // tracing should be skipped. - IsHealthEndpoint func(*http.Request) bool -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var tags addedTags - r, traceEnd := h.startTrace(w, r) - defer traceEnd() - w, statsEnd := h.startStats(w, r) - defer statsEnd(&tags) - handler := h.Handler - if handler == nil { - handler = http.DefaultServeMux - } - r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) - handler.ServeHTTP(w, r) -} - -func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { - return r, func() {} - } - var name string - if h.FormatSpanName == nil { - name = spanNameFromURL(r) - } else { - name = h.FormatSpanName(r) - } - ctx := r.Context() - - startOpts := h.StartOptions - if h.GetStartOptions != nil { - startOpts = h.GetStartOptions(r) - } - - var span *trace.Span - sc, ok := h.extractSpanContext(r) - if ok && !h.IsPublicEndpoint { - ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer)) - } else { - ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer), - ) - if ok { - span.AddLink(trace.Link{ - TraceID: sc.TraceID, - SpanID: sc.SpanID, - Type: trace.LinkTypeParent, - Attributes: nil, - }) - } - } - span.AddAttributes(requestAttrs(r)...) - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - } else if r.ContentLength > 0 { - span.AddMessageReceiveEvent(0, /* TODO: messageID */ - r.ContentLength, -1) - } - return r.WithContext(ctx), span.End -} - -func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { - if h.Propagation == nil { - return defaultFormat.SpanContextFromRequest(r) - } - return h.Propagation.SpanContextFromRequest(r) -} - -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { - ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.Host), - tag.Upsert(Path, r.URL.Path), - tag.Upsert(Method, r.Method)) - track := &trackingResponseWriter{ - start: time.Now(), - ctx: ctx, - writer: w, - } - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if r.ContentLength > 0 { - track.reqSize = r.ContentLength - } - stats.Record(ctx, ServerRequestCount.M(1)) - return track.wrappedResponseWriter(), track.end -} - -type trackingResponseWriter struct { - ctx context.Context - reqSize int64 - respSize int64 - start time.Time - statusCode int - statusLine string - endOnce sync.Once - writer http.ResponseWriter -} - -// Compile time assertion for ResponseWriter interface -var _ http.ResponseWriter = (*trackingResponseWriter)(nil) - -func (t *trackingResponseWriter) end(tags *addedTags) { - t.endOnce.Do(func() { - if t.statusCode == 0 { - t.statusCode = 200 - } - - span := trace.FromContext(t.ctx) - span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) - span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) - - m := []stats.Measurement{ - ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), - ServerResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ServerRequestBytes.M(t.reqSize)) - } - allTags := make([]tag.Mutator, len(tags.t)+1) - allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) - copy(allTags[1:], tags.t) - stats.RecordWithTags(t.ctx, allTags, m...) - }) -} - -func (t *trackingResponseWriter) Header() http.Header { - return t.writer.Header() -} - -func (t *trackingResponseWriter) Write(data []byte) (int, error) { - n, err := t.writer.Write(data) - t.respSize += int64(n) - // Add message event for request bytes sent. - span := trace.FromContext(t.ctx) - span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) - return n, err -} - -func (t *trackingResponseWriter) WriteHeader(statusCode int) { - t.writer.WriteHeader(statusCode) - t.statusCode = statusCode - t.statusLine = http.StatusText(t.statusCode) -} - -// wrappedResponseWriter returns a wrapped version of the original -// -// ResponseWriter and only implements the same combination of additional -// -// interfaces as the original. -// This implementation is based on https://github.com/felixge/httpsnoop. -func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = t.writer.(http.Hijacker) - cn, i1 = t.writer.(http.CloseNotifier) - pu, i2 = t.writer.(http.Pusher) - fl, i3 = t.writer.(http.Flusher) - rf, i4 = t.writer.(io.ReaderFrom) - ) - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{t} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{t, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{t, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{t, fl, rf} - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - }{t, pu} - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - io.ReaderFrom - }{t, pu, rf} - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - }{t, pu, fl} - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - io.ReaderFrom - }{t, pu, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{t, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{t, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{t, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, cn, fl, rf} - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{t, cn, pu} - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, cn, pu, rf} - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - }{t, cn, pu, fl} - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, cn, pu, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{t, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{t, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{t, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{t, hj, fl, rf} - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{t, hj, pu} - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - io.ReaderFrom - }{t, hj, pu, rf} - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - }{t, hj, pu, fl} - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, pu, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{t, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{t, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{t, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, hj, cn, fl, rf} - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - }{t, hj, cn, pu} - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, hj, cn, pu, rf} - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - }{t, hj, cn, pu, fl} - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, cn, pu, fl, rf} - default: - return struct { - http.ResponseWriter - }{t} - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go deleted file mode 100644 index 05c6c56cc7..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "strings" - - "go.opencensus.io/trace" -) - -type spanAnnotator struct { - sp *trace.Span -} - -// TODO: Remove NewSpanAnnotator at the next release. - -// NewSpanAnnotator returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -// Deprecated: Use NewSpanAnnotatingClientTrace instead -func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { - return NewSpanAnnotatingClientTrace(r, s) -} - -// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { - sa := spanAnnotator{sp: s} - - return &httptrace.ClientTrace{ - GetConn: sa.getConn, - GotConn: sa.gotConn, - PutIdleConn: sa.putIdleConn, - GotFirstResponseByte: sa.gotFirstResponseByte, - Got100Continue: sa.got100Continue, - DNSStart: sa.dnsStart, - DNSDone: sa.dnsDone, - ConnectStart: sa.connectStart, - ConnectDone: sa.connectDone, - TLSHandshakeStart: sa.tlsHandshakeStart, - TLSHandshakeDone: sa.tlsHandshakeDone, - WroteHeaders: sa.wroteHeaders, - Wait100Continue: sa.wait100Continue, - WroteRequest: sa.wroteRequest, - } -} - -func (s spanAnnotator) getConn(hostPort string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.get_connection.host_port", hostPort), - } - s.sp.Annotate(attrs, "GetConn") -} - -func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { - attrs := []trace.Attribute{ - trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), - trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), - } - if info.WasIdle { - attrs = append(attrs, - trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) - } - s.sp.Annotate(attrs, "GotConn") -} - -// PutIdleConn implements a httptrace.ClientTrace hook -func (s spanAnnotator) putIdleConn(err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) - } - s.sp.Annotate(attrs, "PutIdleConn") -} - -func (s spanAnnotator) gotFirstResponseByte() { - s.sp.Annotate(nil, "GotFirstResponseByte") -} - -func (s spanAnnotator) got100Continue() { - s.sp.Annotate(nil, "Got100Continue") -} - -func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_start.host", info.Host), - } - s.sp.Annotate(attrs, "DNSStart") -} - -func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { - var addrs []string - for _, addr := range info.Addrs { - addrs = append(addrs, addr.String()) - } - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), - } - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "DNSDone") -} - -func (s spanAnnotator) connectStart(network, addr string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_start.network", network), - trace.StringAttribute("httptrace.connect_start.addr", addr), - } - s.sp.Annotate(attrs, "ConnectStart") -} - -func (s spanAnnotator) connectDone(network, addr string, err error) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_done.network", network), - trace.StringAttribute("httptrace.connect_done.addr", addr), - } - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.connect_done.error", err.Error())) - } - s.sp.Annotate(attrs, "ConnectDone") -} - -func (s spanAnnotator) tlsHandshakeStart() { - s.sp.Annotate(nil, "TLSHandshakeStart") -} - -func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) - } - s.sp.Annotate(attrs, "TLSHandshakeDone") -} - -func (s spanAnnotator) wroteHeaders() { - s.sp.Annotate(nil, "WroteHeaders") -} - -func (s spanAnnotator) wait100Continue() { - s.sp.Annotate(nil, "Wait100Continue") -} - -func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { - var attrs []trace.Attribute - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "WroteRequest") -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go deleted file mode 100644 index ee3729040d..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// Deprecated: client HTTP measures. -var ( - // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64( - "opencensus.io/http/client/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64( - "opencensus.io/http/client/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64( - "opencensus.io/http/client/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64( - "opencensus.io/http/client/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following client HTTP measures are supported for use in custom views. -var ( - ClientSentBytes = stats.Int64( - "opencensus.io/http/client/sent_bytes", - "Total bytes sent in request body (not including headers)", - stats.UnitBytes, - ) - ClientReceivedBytes = stats.Int64( - "opencensus.io/http/client/received_bytes", - "Total bytes received in response bodies (not including headers but including error responses with bodies)", - stats.UnitBytes, - ) - ClientRoundtripLatency = stats.Float64( - "opencensus.io/http/client/roundtrip_latency", - "Time between first byte of request headers sent to last byte of response received, or terminal error", - stats.UnitMilliseconds, - ) -) - -// The following server HTTP measures are supported for use in custom views: -var ( - ServerRequestCount = stats.Int64( - "opencensus.io/http/server/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - ServerRequestBytes = stats.Int64( - "opencensus.io/http/server/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - ServerResponseBytes = stats.Int64( - "opencensus.io/http/server/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - ServerLatency = stats.Float64( - "opencensus.io/http/server/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following tags are applied to stats recorded by this package. Host, Path -// and Method are applied to all measures. StatusCode is not applied to -// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. -var ( - // Host is the value of the HTTP Host header. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Host = tag.MustNewKey("http.host") - - // StatusCode is the numeric HTTP response status code, - // or "error" if a transport error occurred and no status code was read. - StatusCode = tag.MustNewKey("http.status") - - // Path is the URL path (not including query string) in the request. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Path = tag.MustNewKey("http.path") - - // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method = tag.MustNewKey("http.method") - - // KeyServerRoute is a low cardinality string representing the logical - // handler of the request. This is usually the pattern registered on the a - // ServeMux (or similar string). - KeyServerRoute = tag.MustNewKey("http_server_route") -) - -// Client tag keys. -var ( - // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod = tag.MustNewKey("http_client_method") - // KeyClientPath is the URL path (not including query string). - KeyClientPath = tag.MustNewKey("http_client_path") - // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus = tag.MustNewKey("http_client_status") - // KeyClientHost is the value of the request Host header. - KeyClientHost = tag.MustNewKey("http_client_host") -) - -// Default distributions used by views in this package. -var ( - DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Package ochttp provides some convenience views for client measures. -// You still need to register these views for data to actually be collected. -var ( - ClientSentBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/sent_bytes", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientReceivedBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/received_bytes", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientRoundtripLatencyDistribution = &view.View{ - Name: "opencensus.io/http/client/roundtrip_latency", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - Description: "End-to-end latency, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientCompletedCount = &view.View{ - Name: "opencensus.io/http/client/completed_count", - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - Description: "Count of completed requests, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } -) - -// Deprecated: Old client Views. -var ( - // Deprecated: No direct replacement, but see ClientCompletedCount. - ClientRequestCountView = &view.View{ - Name: "opencensus.io/http/client/request_count", - Description: "Count of HTTP requests started", - Measure: ClientRequestCount, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientSentBytesDistribution. - ClientRequestBytesView = &view.View{ - Name: "opencensus.io/http/client/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientReceivedBytesDistribution instead. - ClientResponseBytesView = &view.View{ - Name: "opencensus.io/http/client/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientRoundtripLatencyDistribution instead. - ClientLatencyView = &view.View{ - Name: "opencensus.io/http/client/latency", - Description: "Latency distribution of HTTP requests", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - } - - // Deprecated: Use ClientCompletedCount instead. - ClientRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/client/request_count_by_method", - Description: "Client request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ClientSentBytes, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientCompletedCount instead. - ClientResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/client/response_count_by_status_code", - Description: "Client response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } -) - -// Package ochttp provides some convenience views for server measures. -// You still need to register these views for data to actually be collected. -var ( - ServerRequestCountView = &view.View{ - Name: "opencensus.io/http/server/request_count", - Description: "Count of HTTP requests started", - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerRequestBytesView = &view.View{ - Name: "opencensus.io/http/server/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ServerRequestBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerResponseBytesView = &view.View{ - Name: "opencensus.io/http/server/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ServerResponseBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerLatencyView = &view.View{ - Name: "opencensus.io/http/server/latency", - Description: "Latency distribution of HTTP requests", - Measure: ServerLatency, - Aggregation: DefaultLatencyDistribution, - } - - ServerRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/server/request_count_by_method", - Description: "Server request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/server/response_count_by_status_code", - Description: "Server response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ServerLatency, - Aggregation: view.Count(), - } -) - -// DefaultClientViews are the default client views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultClientViews = []*view.View{ - ClientRequestCountView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientLatencyView, - ClientRequestCountByMethod, - ClientResponseCountByStatusCode, -} - -// DefaultServerViews are the default server views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultServerViews = []*view.View{ - ServerRequestCountView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerLatencyView, - ServerRequestCountByMethod, - ServerResponseCountByStatusCode, -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go deleted file mode 100644 index ed3a5db561..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" - "net/http" - "net/http/httptrace" - - "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// TODO(jbd): Add godoc examples. - -var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} - -// Attributes recorded on the span for the requests. -// Only trace exporters will need them. -const ( - HostAttribute = "http.host" - MethodAttribute = "http.method" - PathAttribute = "http.path" - URLAttribute = "http.url" - UserAgentAttribute = "http.user_agent" - StatusCodeAttribute = "http.status_code" -) - -type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat - formatSpanName func(*http.Request) string - newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace -} - -// TODO(jbd): Add message events for request and response size. - -// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. -// The created span can follow a parent span, if a parent is presented in -// the request's context. -func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := t.formatSpanName(req) - // TODO(jbd): Discuss whether we want to prefix - // outgoing requests with Sent. - ctx, span := trace.StartSpan(req.Context(), name, - trace.WithSampler(t.startOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) - - if t.newClientTrace != nil { - req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) - } else { - req = req.WithContext(ctx) - } - - if t.format != nil { - // SpanContextToRequest will modify its Request argument, which is - // contrary to the contract for http.RoundTripper, so we need to - // pass it a copy of the Request. - // However, the Request struct itself was already copied by - // the WithContext calls above and so we just need to copy the header. - header := make(http.Header) - for k, v := range req.Header { - header[k] = v - } - req.Header = header - t.format.SpanContextToRequest(span.SpanContext(), req) - } - - span.AddAttributes(requestAttrs(req)...) - resp, err := t.base.RoundTrip(req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - span.End() - return resp, err - } - - span.AddAttributes(responseAttrs(resp)...) - span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) - - // span.End() will be invoked after - // a read from resp.Body returns io.EOF or when - // resp.Body.Close() is invoked. - bt := &bodyTracker{rc: resp.Body, span: span} - resp.Body = wrappedBody(bt, resp.Body) - return resp, err -} - -// bodyTracker wraps a response.Body and invokes -// trace.EndSpan on encountering io.EOF on reading -// the body of the original response. -type bodyTracker struct { - rc io.ReadCloser - span *trace.Span -} - -var _ io.ReadCloser = (*bodyTracker)(nil) - -func (bt *bodyTracker) Read(b []byte) (int, error) { - n, err := bt.rc.Read(b) - - switch err { - case nil: - return n, nil - case io.EOF: - bt.span.End() - default: - // For all other errors, set the span status - bt.span.SetStatus(trace.Status{ - // Code 2 is the error code for Internal server error. - Code: 2, - Message: err.Error(), - }) - } - return n, err -} - -func (bt *bodyTracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - bt.span.End() - return bt.rc.Close() -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *traceTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -func spanNameFromURL(req *http.Request) string { - return req.URL.Path -} - -func requestAttrs(r *http.Request) []trace.Attribute { - userAgent := r.UserAgent() - - attrs := make([]trace.Attribute, 0, 5) - attrs = append(attrs, - trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(URLAttribute, r.URL.String()), - trace.StringAttribute(HostAttribute, r.Host), - trace.StringAttribute(MethodAttribute, r.Method), - ) - - if userAgent != "" { - attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) - } - - return attrs -} - -func responseAttrs(resp *http.Response) []trace.Attribute { - return []trace.Attribute{ - trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), - } -} - -// TraceStatus is a utility to convert the HTTP status code to a trace.Status that -// represents the outcome as closely as possible. -func TraceStatus(httpStatusCode int, statusLine string) trace.Status { - var code int32 - if httpStatusCode < 200 || httpStatusCode >= 400 { - code = trace.StatusCodeUnknown - } - switch httpStatusCode { - case 499: - code = trace.StatusCodeCancelled - case http.StatusBadRequest: - code = trace.StatusCodeInvalidArgument - case http.StatusUnprocessableEntity: - code = trace.StatusCodeInvalidArgument - case http.StatusGatewayTimeout: - code = trace.StatusCodeDeadlineExceeded - case http.StatusNotFound: - code = trace.StatusCodeNotFound - case http.StatusForbidden: - code = trace.StatusCodePermissionDenied - case http.StatusUnauthorized: // 401 is actually unauthenticated. - code = trace.StatusCodeUnauthenticated - case http.StatusTooManyRequests: - code = trace.StatusCodeResourceExhausted - case http.StatusNotImplemented: - code = trace.StatusCodeUnimplemented - case http.StatusServiceUnavailable: - code = trace.StatusCodeUnavailable - case http.StatusOK: - code = trace.StatusCodeOK - case http.StatusConflict: - code = trace.StatusCodeAlreadyExists - } - - return trace.Status{Code: code, Message: codeToStr[code]} -} - -var codeToStr = map[int32]string{ - trace.StatusCodeOK: `OK`, - trace.StatusCodeCancelled: `CANCELLED`, - trace.StatusCodeUnknown: `UNKNOWN`, - trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, - trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, - trace.StatusCodeNotFound: `NOT_FOUND`, - trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, - trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, - trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, - trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, - trace.StatusCodeAborted: `ABORTED`, - trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, - trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, - trace.StatusCodeInternal: `INTERNAL`, - trace.StatusCodeUnavailable: `UNAVAILABLE`, - trace.StatusCodeDataLoss: `DATA_LOSS`, - trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, -} - -func isHealthEndpoint(path string) bool { - // Health checking is pretty frequent and - // traces collected for health endpoints - // can be extremely noisy and expensive. - // Disable canonical health checking endpoints - // like /healthz and /_ah/health for now. - if path == "/healthz" || path == "/_ah/health" { - return true - } - return false -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go deleted file mode 100644 index 7d75cae2b1..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" -) - -// wrappedBody returns a wrapped version of the original -// Body and only implements the same combination of additional -// interfaces as the original. -func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { - var ( - wr, i0 = body.(io.Writer) - ) - switch { - case !i0: - return struct { - io.ReadCloser - }{wrapper} - - case i0: - return struct { - io.ReadCloser - io.Writer - }{wrapper, wr} - default: - return struct { - io.ReadCloser - }{wrapper} - } -} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go deleted file mode 100644 index b1764e1d3b..0000000000 --- a/vendor/go.opencensus.io/resource/resource.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package resource provides functionality for resource, which capture -// identifying information about the entities for which signals are exported. -package resource - -import ( - "context" - "fmt" - "os" - "regexp" - "sort" - "strconv" - "strings" -) - -// Environment variables used by FromEnv to decode a resource. -const ( - EnvVarType = "OC_RESOURCE_TYPE" - EnvVarLabels = "OC_RESOURCE_LABELS" -) - -// Resource describes an entity about which identifying information and metadata is exposed. -// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. -type Resource struct { - Type string - Labels map[string]string -} - -// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. -func EncodeLabels(labels map[string]string) string { - sortedKeys := make([]string, 0, len(labels)) - for k := range labels { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - s := "" - for i, k := range sortedKeys { - if i > 0 { - s += "," - } - s += k + "=" + strconv.Quote(labels[k]) - } - return s -} - -var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) - -// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. -// A list of labels of the form `="",="",...` is accepted. -// Domain names and paths are accepted as label keys. -// Most users will want to use FromEnv instead. -func DecodeLabels(s string) (map[string]string, error) { - m := map[string]string{} - // Ensure a trailing comma, which allows us to keep the regex simpler - s = strings.TrimRight(strings.TrimSpace(s), ",") + "," - - for len(s) > 0 { - match := labelRegex.FindStringSubmatch(s) - if len(match) == 0 { - return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) - } - v := match[2] - if v == "" { - v = match[3] - } else { - var err error - if v, err = strconv.Unquote(v); err != nil { - return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) - } - } - m[match[1]] = v - - s = s[len(match[0]):] - } - return m, nil -} - -// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE -// and OC_RESOURCE_labelS environment variables. -func FromEnv(context.Context) (*Resource, error) { - res := &Resource{ - Type: strings.TrimSpace(os.Getenv(EnvVarType)), - } - labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) - if labels == "" { - return res, nil - } - var err error - if res.Labels, err = DecodeLabels(labels); err != nil { - return nil, err - } - return res, nil -} - -var _ Detector = FromEnv - -// merge resource information from b into a. In case of a collision, a takes precedence. -func merge(a, b *Resource) *Resource { - if a == nil { - return b - } - if b == nil { - return a - } - res := &Resource{ - Type: a.Type, - Labels: map[string]string{}, - } - if res.Type == "" { - res.Type = b.Type - } - for k, v := range b.Labels { - res.Labels[k] = v - } - // Labels from resource a overwrite labels from resource b. - for k, v := range a.Labels { - res.Labels[k] = v - } - return res -} - -// Detector attempts to detect resource information. -// If the detector cannot find resource information, the returned resource is nil but no -// error is returned. -// An error is only returned on unexpected failures. -type Detector func(context.Context) (*Resource, error) - -// MultiDetector returns a Detector that calls all input detectors in order and -// merges each result with the previous one. In case a type of label key is already set, -// the first set value is takes precedence. -// It returns on the first error that a sub-detector encounters. -func MultiDetector(detectors ...Detector) Detector { - return func(ctx context.Context) (*Resource, error) { - return detectAll(ctx, detectors...) - } -} - -// detectall calls all input detectors sequentially an merges each result with the previous one. -// It returns on the first error that a sub-detector encounters. -func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { - var res *Resource - for _, d := range detectors { - r, err := d(ctx) - if err != nil { - return nil, err - } - res = merge(res, r) - } - return res, nil -} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go deleted file mode 100644 index 31477a464f..0000000000 --- a/vendor/go.opencensus.io/stats/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package stats contains support for OpenCensus stats recording. - -OpenCensus allows users to create typed measures, record measurements, -aggregate the collected data, and export the aggregated data. - -# Measures - -A measure represents a type of data point to be tracked and recorded. -For example, latency, request Mb/s, and response Mb/s are measures -to collect from a server. - -Measure constructors such as Int64 and Float64 automatically -register the measure by the given name. Each registered measure needs -to be unique by name. Measures also have a description and a unit. - -Libraries can define and export measures. Application authors can then -create views and collect and break down measures by the tags they are -interested in. - -# Recording measurements - -Measurement is a data point to be collected for a measure. For example, -for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Measurements are created from measures with -the current context. Tags from the current context are recorded with the -measurements if they are any. - -Recorded measurements are dropped immediately if no views are registered for them. -There is usually no need to conditionally enable and disable -recording to reduce cost. Recording of measurements is cheap. - -Libraries can always record measurements, and applications can later decide -on which measurements they want to collect by registering views. This allows -libraries to turn on the instrumentation by default. - -# Exemplars - -For a given recorded measurement, the associated exemplar is a diagnostic map -that gives more information about the measurement. - -When aggregated using a Distribution aggregation, an exemplar is kept for each -bucket in the Distribution. This allows you to easily find an example of a -measurement that fell into each bucket. - -For example, if you also use the OpenCensus trace package and you -record a measurement with a context that contains a sampled trace span, -then the trace span will be added to the exemplar associated with the measurement. - -When exported to a supporting back end, you should be able to easily navigate -to example traces that fell into each bucket in the Distribution. -*/ -package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go deleted file mode 100644 index 436dc791f8..0000000000 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "go.opencensus.io/tag" -) - -// DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) - -// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but -// avoids interface{} conversion. -// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, -// but is interface{} here to avoid import loops -var MeasurementRecorder interface{} - -// SubscriptionReporter reports when a view subscribed with a measure. -var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go deleted file mode 100644 index 1ffd3cefc7..0000000000 --- a/vendor/go.opencensus.io/stats/measure.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "sync" - "sync/atomic" -) - -// Measure represents a single numeric value to be tracked and recorded. -// For example, latency, request bytes, and response bytes could be measures -// to collect from a server. -// -// Measures by themselves have no outside effects. In order to be exported, -// the measure needs to be used in a View. If no Views are defined over a -// measure, there is very little cost in recording it. -type Measure interface { - // Name returns the name of this measure. - // - // Measure names are globally unique (among all libraries linked into your program). - // We recommend prefixing the measure name with a domain name relevant to your - // project or application. - // - // Measure names are never sent over the wire or exported to backends. - // They are only used to create Views. - Name() string - - // Description returns the human-readable description of this measure. - Description() string - - // Unit returns the units for the values this measure takes on. - // - // Units are encoded according to the case-sensitive abbreviations from the - // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html - Unit() string -} - -// measureDescriptor is the untyped descriptor associated with each measure. -// Int64Measure and Float64Measure wrap measureDescriptor to provide typed -// recording APIs. -// Two Measures with the same name will have the same measureDescriptor. -type measureDescriptor struct { - subs int32 // access atomically - - name string - description string - unit string -} - -func (m *measureDescriptor) subscribe() { - atomic.StoreInt32(&m.subs, 1) -} - -func (m *measureDescriptor) subscribed() bool { - return atomic.LoadInt32(&m.subs) == 1 -} - -var ( - mu sync.RWMutex - measures = make(map[string]*measureDescriptor) -) - -func registerMeasureHandle(name, desc, unit string) *measureDescriptor { - mu.Lock() - defer mu.Unlock() - - if stored, ok := measures[name]; ok { - return stored - } - m := &measureDescriptor{ - name: name, - description: desc, - unit: unit, - } - measures[name] = m - return m -} - -// Measurement is the numeric value measured when recording stats. Each measure -// provides methods to create measurements of their kind. For example, Int64Measure -// provides M to convert an int64 into a measurement. -type Measurement struct { - v float64 - m Measure - desc *measureDescriptor -} - -// Value returns the value of the Measurement as a float64. -func (m Measurement) Value() float64 { - return m.v -} - -// Measure returns the Measure from which this Measurement was created. -func (m Measurement) Measure() Measure { - return m.m -} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go deleted file mode 100644 index f02c1eda84..0000000000 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Float64Measure is a measure for float64 values. -type Float64Measure struct { - desc *measureDescriptor -} - -// M creates a new float64 measurement. -// Use Record to record measurements. -func (m *Float64Measure) M(v float64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: v, - } -} - -// Float64 creates a new measure for float64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Float64(name, description, unit string) *Float64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Float64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go deleted file mode 100644 index d101d79735..0000000000 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Int64Measure is a measure for int64 values. -type Int64Measure struct { - desc *measureDescriptor -} - -// M creates a new int64 measurement. -// Use Record to record measurements. -func (m *Int64Measure) M(v int64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: float64(v), - } -} - -// Int64 creates a new measure for int64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Int64(name, description, unit string) *Int64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Int64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go deleted file mode 100644 index 8b5b99803c..0000000000 --- a/vendor/go.opencensus.io/stats/record.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "context" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - internal.SubscriptionReporter = func(measure string) { - mu.Lock() - measures[measure].subscribe() - mu.Unlock() - } -} - -// Recorder provides an interface for exporting measurement information from -// the static Record method by using the WithRecorder option. -type Recorder interface { - // Record records a set of measurements associated with the given tags and attachments. - // The second argument is a `[]Measurement`. - Record(*tag.Map, interface{}, map[string]interface{}) -} - -type recordOptions struct { - attachments metricdata.Attachments - mutators []tag.Mutator - measurements []Measurement - recorder Recorder -} - -// WithAttachments applies provided exemplar attachments. -func WithAttachments(attachments metricdata.Attachments) Options { - return func(ro *recordOptions) { - ro.attachments = attachments - } -} - -// WithTags applies provided tag mutators. -func WithTags(mutators ...tag.Mutator) Options { - return func(ro *recordOptions) { - ro.mutators = mutators - } -} - -// WithMeasurements applies provided measurements. -func WithMeasurements(measurements ...Measurement) Options { - return func(ro *recordOptions) { - ro.measurements = measurements - } -} - -// WithRecorder records the measurements to the specified `Recorder`, rather -// than to the global metrics recorder. -func WithRecorder(meter Recorder) Options { - return func(ro *recordOptions) { - ro.recorder = meter - } -} - -// Options apply changes to recordOptions. -type Options func(*recordOptions) - -func createRecordOption(ros ...Options) *recordOptions { - o := &recordOptions{} - for _, ro := range ros { - ro(o) - } - return o -} - -type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) - -// Record records one or multiple measurements with the same context at once. -// If there are any tags in the context, measurements will be tagged with them. -func Record(ctx context.Context, ms ...Measurement) { - // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality - // (RecordOptions) we can reduce some allocations to speed up this hot path - if len(ms) == 0 { - return - } - recorder := internal.MeasurementRecorder.(measurementRecorder) - record := false - for _, m := range ms { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return - } - recorder(tag.FromContext(ctx), ms, nil) - return -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) -} - -// RecordWithOptions records measurements from the given options (if any) against context -// and tags and attachments in the options (if any). -// If there are any tags in the context, measurements will be tagged with them. -func RecordWithOptions(ctx context.Context, ros ...Options) error { - o := createRecordOption(ros...) - if len(o.measurements) == 0 { - return nil - } - recorder := internal.DefaultRecorder - if o.recorder != nil { - recorder = o.recorder.Record - } - if recorder == nil { - return nil - } - record := false - for _, m := range o.measurements { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return nil - } - if len(o.mutators) > 0 { - var err error - if ctx, err = tag.New(ctx, o.mutators...); err != nil { - return err - } - } - recorder(tag.FromContext(ctx), o.measurements, o.attachments) - return nil -} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go deleted file mode 100644 index 736399652c..0000000000 --- a/vendor/go.opencensus.io/stats/units.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Units are encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -const ( - UnitNone = "1" // Deprecated: Use UnitDimensionless. - UnitDimensionless = "1" - UnitBytes = "By" - UnitMilliseconds = "ms" - UnitSeconds = "s" -) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go deleted file mode 100644 index 61f72d20da..0000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import "time" - -// AggType represents the type of aggregation function used on a View. -type AggType int - -// All available aggregation types. -const ( - AggTypeNone AggType = iota // no aggregation; reserved for future use. - AggTypeCount // the count aggregation, see Count. - AggTypeSum // the sum aggregation, see Sum. - AggTypeDistribution // the distribution aggregation, see Distribution. - AggTypeLastValue // the last value aggregation, see LastValue. -) - -func (t AggType) String() string { - return aggTypeName[t] -} - -var aggTypeName = map[AggType]string{ - AggTypeNone: "None", - AggTypeCount: "Count", - AggTypeSum: "Sum", - AggTypeDistribution: "Distribution", - AggTypeLastValue: "LastValue", -} - -// Aggregation represents a data aggregation method. Use one of the functions: -// Count, Sum, or Distribution to construct an Aggregation. -type Aggregation struct { - Type AggType // Type is the AggType of this Aggregation. - Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - - newData func(time.Time) AggregationData -} - -var ( - aggCount = &Aggregation{ - Type: AggTypeCount, - newData: func(t time.Time) AggregationData { - return &CountData{Start: t} - }, - } - aggSum = &Aggregation{ - Type: AggTypeSum, - newData: func(t time.Time) AggregationData { - return &SumData{Start: t} - }, - } -) - -// Count indicates that data collected and aggregated -// with this method will be turned into a count value. -// For example, total number of accepted requests can be -// aggregated by using Count. -func Count() *Aggregation { - return aggCount -} - -// Sum indicates that data collected and aggregated -// with this method will be summed up. -// For example, accumulated request bytes can be aggregated by using -// Sum. -func Sum() *Aggregation { - return aggSum -} - -// Distribution indicates that the desired aggregation is -// a histogram distribution. -// -// A distribution aggregation may contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described -// by the bounds. This defines len(bounds)+1 buckets. -// -// If len(bounds) >= 2 then the boundaries for bucket index i are: -// -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length -// -// If len(bounds) is 0 then there is no histogram associated with the -// distribution. There will be a single bucket with boundaries -// (-infinity, +infinity). -// -// If len(bounds) is 1 then there is no finite buckets, and that single -// element is the common boundary of the overflow and underflow buckets. -func Distribution(bounds ...float64) *Aggregation { - agg := &Aggregation{ - Type: AggTypeDistribution, - Buckets: bounds, - } - agg.newData = func(t time.Time) AggregationData { - return newDistributionData(agg, t) - } - return agg -} - -// LastValue only reports the last value recorded using this -// aggregation. All other measurements will be dropped. -func LastValue() *Aggregation { - return &Aggregation{ - Type: AggTypeLastValue, - newData: func(_ time.Time) AggregationData { - return &LastValueData{} - }, - } -} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go deleted file mode 100644 index d93b520662..0000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "math" - "time" - - "go.opencensus.io/metric/metricdata" -) - -// AggregationData represents an aggregated value from a collection. -// They are reported on the view data during exporting. -// Mosts users won't directly access aggregration data. -type AggregationData interface { - isAggregationData() bool - addSample(v float64, attachments map[string]interface{}, t time.Time) - clone() AggregationData - equal(other AggregationData) bool - toPoint(t metricdata.Type, time time.Time) metricdata.Point - StartTime() time.Time -} - -const epsilon = 1e-9 - -// CountData is the aggregated data for the Count aggregation. -// A count aggregation processes data and counts the recordings. -// -// Most users won't directly access count data. -type CountData struct { - Start time.Time - Value int64 -} - -func (a *CountData) isAggregationData() bool { return true } - -func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { - a.Value = a.Value + 1 -} - -func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value, Start: a.Start} -} - -func (a *CountData) equal(other AggregationData) bool { - a2, ok := other.(*CountData) - if !ok { - return false - } - - return a.Start.Equal(a2.Start) && a.Value == a2.Value -} - -func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by CountData. -func (a *CountData) StartTime() time.Time { - return a.Start -} - -// SumData is the aggregated data for the Sum aggregation. -// A sum aggregation processes data and sums up the recordings. -// -// Most users won't directly access sum data. -type SumData struct { - Start time.Time - Value float64 -} - -func (a *SumData) isAggregationData() bool { return true } - -func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - a.Value += v -} - -func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value, Start: a.Start} -} - -func (a *SumData) equal(other AggregationData) bool { - a2, ok := other.(*SumData) - if !ok { - return false - } - return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon -} - -func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, int64(a.Value)) - case metricdata.TypeCumulativeFloat64: - return metricdata.NewFloat64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by SumData. -func (a *SumData) StartTime() time.Time { - return a.Start -} - -// DistributionData is the aggregated data for the -// Distribution aggregation. -// -// Most users won't directly access distribution data. -// -// For a distribution with N bounds, the associated DistributionData will have -// N+1 buckets. -type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*metricdata.Exemplar - bounds []float64 // histogram distribution of the values - Start time.Time -} - -func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { - bucketCount := len(agg.Buckets) + 1 - return &DistributionData{ - CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: agg.Buckets, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, - Start: t, - } -} - -// Sum returns the sum of all samples collected. -func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } - -func (a *DistributionData) variance() float64 { - if a.Count <= 1 { - return 0 - } - return a.SumOfSquaredDev / float64(a.Count-1) -} - -func (a *DistributionData) isAggregationData() bool { return true } - -// TODO(songy23): support exemplar attachments. -func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { - if v < a.Min { - a.Min = v - } - if v > a.Max { - a.Max = v - } - a.Count++ - a.addToBucket(v, attachments, t) - - if a.Count == 1 { - a.Mean = v - return - } - - oldMean := a.Mean - a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) -} - -func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { - var count *int64 - var i int - var b float64 - for i, b = range a.bounds { - if v < b { - count = &a.CountPerBucket[i] - break - } - } - if count == nil { // Last bucket. - i = len(a.bounds) - count = &a.CountPerBucket[i] - } - *count++ - if exemplar := getExemplar(v, attachments, t); exemplar != nil { - a.ExemplarsPerBucket[i] = exemplar - } -} - -func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { - if len(attachments) == 0 { - return nil - } - return &metricdata.Exemplar{ - Value: v, - Timestamp: t, - Attachments: attachments, - } -} - -func (a *DistributionData) clone() AggregationData { - c := *a - c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) - return &c -} - -func (a *DistributionData) equal(other AggregationData) bool { - a2, ok := other.(*DistributionData) - if !ok { - return false - } - if a2 == nil { - return false - } - if len(a.CountPerBucket) != len(a2.CountPerBucket) { - return false - } - for i := range a.CountPerBucket { - if a.CountPerBucket[i] != a2.CountPerBucket[i] { - return false - } - } - return a.Start.Equal(a2.Start) && - a.Count == a2.Count && - a.Min == a2.Min && - a.Max == a2.Max && - math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon -} - -func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeDistribution: - buckets := []metricdata.Bucket{} - for i := 0; i < len(a.CountPerBucket); i++ { - buckets = append(buckets, metricdata.Bucket{ - Count: a.CountPerBucket[i], - Exemplar: a.ExemplarsPerBucket[i], - }) - } - bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} - - val := &metricdata.Distribution{ - Count: a.Count, - Sum: a.Sum(), - SumOfSquaredDeviation: a.SumOfSquaredDev, - BucketOptions: bucketOptions, - Buckets: buckets, - } - return metricdata.NewDistributionPoint(t, val) - - default: - // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by DistributionData. -func (a *DistributionData) StartTime() time.Time { - return a.Start -} - -// LastValueData returns the last value recorded for LastValue aggregation. -type LastValueData struct { - Value float64 -} - -func (l *LastValueData) isAggregationData() bool { - return true -} - -func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - l.Value = v -} - -func (l *LastValueData) clone() AggregationData { - return &LastValueData{l.Value} -} - -func (l *LastValueData) equal(other AggregationData) bool { - a2, ok := other.(*LastValueData) - if !ok { - return false - } - return l.Value == a2.Value -} - -func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeGaugeInt64: - return metricdata.NewInt64Point(t, int64(l.Value)) - case metricdata.TypeGaugeFloat64: - return metricdata.NewFloat64Point(t, l.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns an empty time value as start time is not recorded when using last value -// aggregation. -func (l *LastValueData) StartTime() time.Time { - return time.Time{} -} - -// ClearStart clears the Start field from data if present. Useful for testing in cases where the -// start time will be nondeterministic. -func ClearStart(data AggregationData) { - switch data := data.(type) { - case *CountData: - data.Start = time.Time{} - case *SumData: - data.Start = time.Time{} - case *DistributionData: - data.Start = time.Time{} - } -} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go deleted file mode 100644 index bcd6e08c74..0000000000 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "sort" - "time" - - "go.opencensus.io/internal/tagencoding" - "go.opencensus.io/tag" -) - -type collector struct { - // signatures holds the aggregations values for each unique tag signature - // (values for all keys) to its aggregator. - signatures map[string]AggregationData - // Aggregation is the description of the aggregation to perform for this - // view. - a *Aggregation -} - -func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { - aggregator, ok := c.signatures[s] - if !ok { - aggregator = c.a.newData(t) - c.signatures[s] = aggregator - } - aggregator.addSample(v, attachments, t) -} - -// collectRows returns a snapshot of the collected Row values. -func (c *collector) collectedRows(keys []tag.Key) []*Row { - rows := make([]*Row, 0, len(c.signatures)) - for sig, aggregator := range c.signatures { - tags := decodeTags([]byte(sig), keys) - row := &Row{Tags: tags, Data: aggregator.clone()} - rows = append(rows, row) - } - return rows -} - -func (c *collector) clearRows() { - c.signatures = make(map[string]AggregationData) -} - -// encodeWithKeys encodes the map by using values -// only associated with the keys provided. -func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { - // Compute the buffer length we will need ahead of time to avoid resizing later - reqLen := 0 - for _, k := range keys { - s, _ := m.Value(k) - // We will store each key + its length - reqLen += len(s) + 1 - } - vb := &tagencoding.Values{ - Buffer: make([]byte, reqLen), - } - for _, k := range keys { - v, _ := m.Value(k) - vb.WriteValue([]byte(v)) - } - return vb.Bytes() -} - -// decodeTags decodes tags from the buffer and -// orders them by the keys. -func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { - vb := &tagencoding.Values{Buffer: buf} - var tags []tag.Tag - for _, k := range keys { - v := vb.ReadValue() - if v != nil { - tags = append(tags, tag.Tag{Key: k, Value: string(v)}) - } - } - vb.ReadIndex = 0 - sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) - return tags -} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go deleted file mode 100644 index 60bf0e3925..0000000000 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package view contains support for collecting and exposing aggregates over stats. -// -// In order to collect measurements, views need to be defined and registered. -// A view allows recorded measurements to be filtered and aggregated. -// -// All recorded measurements can be grouped by a list of tags. -// -// OpenCensus provides several aggregation methods: Count, Distribution and Sum. -// -// Count only counts the number of measurement points recorded. -// Distribution provides statistical summary of the aggregated data by counting -// how many recorded measurements fall into each bucket. -// Sum adds up the measurement values. -// LastValue just keeps track of the most recently recorded measurement value. -// All aggregations are cumulative. -// -// Views can be registered and unregistered at any time during program execution. -// -// Libraries can define views but it is recommended that in most cases registering -// views be left up to applications. -// -// # Exporting -// -// Collected and aggregated data can be exported to a metric collection -// backend by registering its exporter. -// -// Multiple exporters can be registered to upload the data to various -// different back ends. -package view // import "go.opencensus.io/stats/view" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go deleted file mode 100644 index 73ba11f5b6..0000000000 --- a/vendor/go.opencensus.io/stats/view/export.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package view - -// Exporter exports the collected records as view data. -// -// The ExportView method should return quickly; if an -// Exporter takes a significant amount of time to -// process a Data, that work should be done on another goroutine. -// -// It is safe to assume that ExportView will not be called concurrently from -// multiple goroutines. -// -// The Data should not be modified. -type Exporter interface { - ExportView(viewData *Data) -} - -// RegisterExporter registers an exporter. -// Collected data will be reported via all the -// registered exporters. Once you no longer -// want data to be exported, invoke UnregisterExporter -// with the previously registered exporter. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - defaultWorker.RegisterExporter(e) -} - -// UnregisterExporter unregisters an exporter. -func UnregisterExporter(e Exporter) { - defaultWorker.UnregisterExporter(e) -} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go deleted file mode 100644 index 293b54ecbe..0000000000 --- a/vendor/go.opencensus.io/stats/view/view.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function before data will be -// collected and sent to Exporters. -type View struct { - Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. - Description string // Description is a human-readable description for this view. - - // TagKeys are the tag keys describing the grouping of this view. - // A single Row will be produced for each combination of associated tag values. - TagKeys []tag.Key - - // Measure is a stats.Measure to aggregate in this view. - Measure stats.Measure - - // Aggregation is the aggregation function to apply to the set of Measurements. - Aggregation *Aggregation -} - -// WithName returns a copy of the View with a new name. This is useful for -// renaming views to cope with limitations placed on metric names by various -// backends. -func (v *View) WithName(name string) *View { - vNew := *v - vNew.Name = name - return &vNew -} - -// same compares two views and returns true if they represent the same aggregation. -func (v *View) same(other *View) bool { - if v == other { - return true - } - if v == nil { - return false - } - return reflect.DeepEqual(v.Aggregation, other.Aggregation) && - v.Measure.Name() == other.Measure.Name() -} - -// ErrNegativeBucketBounds error returned if histogram contains negative bounds. -// -// Deprecated: this should not be public. -var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") - -// canonicalize canonicalizes v by setting explicit -// defaults for Name and Description and sorting the TagKeys -func (v *View) canonicalize() error { - if v.Measure == nil { - return fmt.Errorf("cannot register view %q: measure not set", v.Name) - } - if v.Aggregation == nil { - return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) - } - if v.Name == "" { - v.Name = v.Measure.Name() - } - if v.Description == "" { - v.Description = v.Measure.Description() - } - if err := checkViewName(v.Name); err != nil { - return err - } - sort.Slice(v.TagKeys, func(i, j int) bool { - return v.TagKeys[i].Name() < v.TagKeys[j].Name() - }) - sort.Float64s(v.Aggregation.Buckets) - for _, b := range v.Aggregation.Buckets { - if b < 0 { - return ErrNegativeBucketBounds - } - } - // drop 0 bucket silently. - v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) - - return nil -} - -func dropZeroBounds(bounds ...float64) []float64 { - for i, bound := range bounds { - if bound > 0 { - return bounds[i:] - } - } - return []float64{} -} - -// viewInternal is the internal representation of a View. -type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector - metricDescriptor *metricdata.Descriptor -} - -func newViewInternal(v *View) (*viewInternal, error) { - return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, - metricDescriptor: viewToMetricDescriptor(v), - }, nil -} - -func (v *viewInternal) subscribe() { - atomic.StoreUint32(&v.subscribed, 1) -} - -func (v *viewInternal) unsubscribe() { - atomic.StoreUint32(&v.subscribed, 0) -} - -// isSubscribed returns true if the view is exporting -// data by subscription. -func (v *viewInternal) isSubscribed() bool { - return atomic.LoadUint32(&v.subscribed) == 1 -} - -func (v *viewInternal) clearRows() { - v.collector.clearRows() -} - -func (v *viewInternal) collectedRows() []*Row { - return v.collector.collectedRows(v.view.TagKeys) -} - -func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { - if !v.isSubscribed() { - return - } - sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val, attachments, t) -} - -// A Data is a set of rows about usage of the single measure associated -// with the given view. Each row is specific to a unique set of tags. -type Data struct { - View *View - Start, End time.Time - Rows []*Row -} - -// Row is the collected value for a specific set of key value pairs a.k.a tags. -type Row struct { - Tags []tag.Tag - Data AggregationData -} - -func (r *Row) String() string { - var buffer bytes.Buffer - buffer.WriteString("{ ") - buffer.WriteString("{ ") - for _, t := range r.Tags { - buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) - } - buffer.WriteString(" }") - buffer.WriteString(fmt.Sprintf("%v", r.Data)) - buffer.WriteString(" }") - return buffer.String() -} - -// Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even if both rows have the same tags but the tags appear in -// different orders it will return false. -func (r *Row) Equal(other *Row) bool { - if r == other { - return true - } - return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) -} - -const maxNameLength = 255 - -// Returns true if the given string contains only printable characters. -func isPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} - -func checkViewName(name string) error { - if len(name) > maxNameLength { - return fmt.Errorf("view name cannot be larger than %v", maxNameLength) - } - if !isPrintable(name) { - return fmt.Errorf("view name needs to be an ASCII string") - } - return nil -} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go deleted file mode 100644 index 57d615ec7e..0000000000 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" -) - -func getUnit(unit string) metricdata.Unit { - switch unit { - case "1": - return metricdata.UnitDimensionless - case "ms": - return metricdata.UnitMilliseconds - case "By": - return metricdata.UnitBytes - } - return metricdata.UnitDimensionless -} - -func getType(v *View) metricdata.Type { - m := v.Measure - agg := v.Aggregation - - switch agg.Type { - case AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeDistribution: - return metricdata.TypeCumulativeDistribution - case AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeGaugeInt64 - case *stats.Float64Measure: - return metricdata.TypeGaugeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeCount: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeInt64 - default: - panic("unexpected measure type") - } - default: - panic("unexpected aggregation type") - } -} - -func getLabelKeys(v *View) []metricdata.LabelKey { - labelKeys := []metricdata.LabelKey{} - for _, k := range v.TagKeys { - labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) - } - return labelKeys -} - -func viewToMetricDescriptor(v *View) *metricdata.Descriptor { - return &metricdata.Descriptor{ - Name: v.Name, - Description: v.Description, - Unit: convertUnit(v), - Type: getType(v), - LabelKeys: getLabelKeys(v), - } -} - -func convertUnit(v *View) metricdata.Unit { - switch v.Aggregation.Type { - case AggTypeCount: - return metricdata.UnitDimensionless - default: - return getUnit(v.Measure.Unit()) - } -} - -func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { - labelValues := []metricdata.LabelValue{} - tagMap := make(map[string]string) - for _, tag := range row.Tags { - tagMap[tag.Key.Name()] = tag.Value - } - - for _, key := range expectedKeys { - if val, ok := tagMap[key.Key]; ok { - labelValues = append(labelValues, metricdata.NewLabelValue(val)) - } else { - labelValues = append(labelValues, metricdata.LabelValue{}) - } - } - return labelValues -} - -func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { - return &metricdata.TimeSeries{ - Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, - LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: row.Data.StartTime(), - } -} - -func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { - rows := v.collectedRows() - if len(rows) == 0 { - return nil - } - - ts := []*metricdata.TimeSeries{} - for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now)) - } - - m := &metricdata.Metric{ - Descriptor: *v.metricDescriptor, - TimeSeries: ts, - Resource: r, - } - return m -} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go deleted file mode 100644 index 6a79cd8a34..0000000000 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "fmt" - "sync" - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricproducer" - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - defaultWorker = NewMeter().(*worker) - go defaultWorker.start() - internal.DefaultRecorder = record - internal.MeasurementRecorder = recordMeasurement -} - -type measureRef struct { - measure string - views map[*viewInternal]struct{} -} - -type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - viewStartTimes map[*viewInternal]time.Time - - timer *time.Ticker - c chan command - quit, done chan bool - mu sync.RWMutex - r *resource.Resource - - exportersMu sync.RWMutex - exporters map[Exporter]struct{} -} - -// Meter defines an interface which allows a single process to maintain -// multiple sets of metrics exports (intended for the advanced case where a -// single process wants to report metrics about multiple objects, such as -// multiple databases or HTTP services). -// -// Note that this is an advanced use case, and the static functions in this -// module should cover the common use cases. -type Meter interface { - stats.Recorder - // Find returns a registered view associated with this name. - // If no registered view is found, nil is returned. - Find(name string) *View - // Register begins collecting data for the given views. - // Once a view is registered, it reports data to the registered exporters. - Register(views ...*View) error - // Unregister the given views. Data will not longer be exported for these views - // after Unregister returns. - // It is not necessary to unregister from views you expect to collect for the - // duration of your program execution. - Unregister(views ...*View) - // SetReportingPeriod sets the interval between reporting aggregated views in - // the program. If duration is less than or equal to zero, it enables the - // default behavior. - // - // Note: each exporter makes different promises about what the lowest supported - // duration is. For example, the Stackdriver exporter recommends a value no - // lower than 1 minute. Consult each exporter per your needs. - SetReportingPeriod(time.Duration) - - // RegisterExporter registers an exporter. - // Collected data will be reported via all the - // registered exporters. Once you no longer - // want data to be exported, invoke UnregisterExporter - // with the previously registered exporter. - // - // Binaries can register exporters, libraries shouldn't register exporters. - RegisterExporter(Exporter) - // UnregisterExporter unregisters an exporter. - UnregisterExporter(Exporter) - // SetResource may be used to set the Resource associated with this registry. - // This is intended to be used in cases where a single process exports metrics - // for multiple Resources, typically in a multi-tenant situation. - SetResource(*resource.Resource) - - // Start causes the Meter to start processing Record calls and aggregating - // statistics as well as exporting data. - Start() - // Stop causes the Meter to stop processing calls and terminate data export. - Stop() - - // RetrieveData gets a snapshot of the data collected for the the view registered - // with the given name. It is intended for testing only. - RetrieveData(viewName string) ([]*Row, error) -} - -var _ Meter = (*worker)(nil) - -var defaultWorker *worker - -var defaultReportingDuration = 10 * time.Second - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func Find(name string) (v *View) { - return defaultWorker.Find(name) -} - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func (w *worker) Find(name string) (v *View) { - req := &getViewByNameReq{ - name: name, - c: make(chan *getViewByNameResp), - } - w.c <- req - resp := <-req.c - return resp.v -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func Register(views ...*View) error { - return defaultWorker.Register(views...) -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func (w *worker) Register(views ...*View) error { - req := ®isterViewReq{ - views: views, - err: make(chan error), - } - w.c <- req - return <-req.err -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func Unregister(views ...*View) { - defaultWorker.Unregister(views...) -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func (w *worker) Unregister(views ...*View) { - names := make([]string, len(views)) - for i := range views { - names[i] = views[i].Name - } - req := &unregisterFromViewReq{ - views: names, - done: make(chan struct{}), - } - w.c <- req - <-req.done -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func RetrieveData(viewName string) ([]*Row, error) { - return defaultWorker.RetrieveData(viewName) -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func (w *worker) RetrieveData(viewName string) ([]*Row, error) { - req := &retrieveDataReq{ - now: time.Now(), - v: viewName, - c: make(chan *retrieveDataResp), - } - w.c <- req - resp := <-req.c - return resp.rows, resp.err -} - -func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - defaultWorker.Record(tags, ms, attachments) -} - -func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - defaultWorker.recordMeasurement(tags, ms, attachments) -} - -// Record records a set of measurements ms associated with the given tags and attachments. -func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) -} - -// recordMeasurement records a set of measurements ms associated with the given tags and attachments. -// This is the same as Record but without an interface{} type to avoid allocations -func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - req := &recordReq{ - tm: tags, - ms: ms, - attachments: attachments, - t: time.Now(), - } - w.c <- req -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func SetReportingPeriod(d time.Duration) { - defaultWorker.SetReportingPeriod(d) -} - -// Stop stops the default worker. -func Stop() { - defaultWorker.Stop() -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func (w *worker) SetReportingPeriod(d time.Duration) { - // TODO(acetechnologist): ensure that the duration d is more than a certain - // value. e.g. 1s - req := &setReportingPeriodReq{ - d: d, - c: make(chan bool), - } - w.c <- req - <-req.c // don't return until the timer is set to the new duration. -} - -// NewMeter constructs a Meter instance. You should only need to use this if -// you need to separate out Measurement recordings and View aggregations within -// a single process. -func NewMeter() Meter { - return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - viewStartTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), - - exporters: make(map[Exporter]struct{}), - } -} - -// SetResource associates all data collected by this Meter with the specified -// resource. This resource is reported when using metricexport.ReadAndExport; -// it is not provided when used with ExportView/RegisterExporter, because that -// interface does not provide a means for reporting the Resource. -func (w *worker) SetResource(r *resource.Resource) { - w.r = r -} - -func (w *worker) Start() { - go w.start() -} - -func (w *worker) start() { - prodMgr := metricproducer.GlobalManager() - prodMgr.AddProducer(w) - - for { - select { - case cmd := <-w.c: - cmd.handleCommand(w) - case <-w.timer.C: - w.reportUsage() - case <-w.quit: - w.timer.Stop() - close(w.c) - close(w.done) - return - } - } -} - -func (w *worker) Stop() { - prodMgr := metricproducer.GlobalManager() - prodMgr.DeleteProducer(w) - select { - case <-w.quit: - default: - close(w.quit) - } - <-w.done -} - -func (w *worker) getMeasureRef(name string) *measureRef { - if mr, ok := w.measures[name]; ok { - return mr - } - mr := &measureRef{ - measure: name, - views: make(map[*viewInternal]struct{}), - } - w.measures[name] = mr - return mr -} - -func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { - w.mu.Lock() - defer w.mu.Unlock() - vi, err := newViewInternal(v) - if err != nil { - return nil, err - } - if x, ok := w.views[vi.view.Name]; ok { - if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) - } - - // the view is already registered so there is nothing to do and the - // command is considered successful. - return x, nil - } - w.views[vi.view.Name] = vi - w.viewStartTimes[vi] = time.Now() - ref := w.getMeasureRef(vi.view.Measure.Name()) - ref.views[vi] = struct{}{} - return vi, nil -} - -func (w *worker) unregisterView(v *viewInternal) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.views, v.view.Name) - delete(w.viewStartTimes, v) - if measure := w.measures[v.view.Measure.Name()]; measure != nil { - delete(measure.views, v) - } -} - -func (w *worker) reportView(v *viewInternal) { - if !v.isSubscribed() { - return - } - rows := v.collectedRows() - viewData := &Data{ - View: v.view, - Start: w.viewStartTimes[v], - End: time.Now(), - Rows: rows, - } - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - for e := range w.exporters { - e.ExportView(viewData) - } -} - -func (w *worker) reportUsage() { - w.mu.Lock() - defer w.mu.Unlock() - for _, v := range w.views { - w.reportView(v) - } -} - -func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { - if !v.isSubscribed() { - return nil - } - - return viewToMetric(v, w.r, now) -} - -// Read reads all view data and returns them as metrics. -// It is typically invoked by metric reader to export stats in metric format. -func (w *worker) Read() []*metricdata.Metric { - w.mu.Lock() - defer w.mu.Unlock() - now := time.Now() - metrics := make([]*metricdata.Metric, 0, len(w.views)) - for _, v := range w.views { - metric := w.toMetric(v, now) - if metric != nil { - metrics = append(metrics, metric) - } - } - return metrics -} - -func (w *worker) RegisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - w.exporters[e] = struct{}{} -} - -func (w *worker) UnregisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - delete(w.exporters, e) -} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go deleted file mode 100644 index 9ac4cc0599..0000000000 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "errors" - "fmt" - "strings" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -type command interface { - handleCommand(w *worker) -} - -// getViewByNameReq is the command to get a view given its name. -type getViewByNameReq struct { - name string - c chan *getViewByNameResp -} - -type getViewByNameResp struct { - v *View -} - -func (cmd *getViewByNameReq) handleCommand(w *worker) { - v := w.views[cmd.name] - if v == nil { - cmd.c <- &getViewByNameResp{nil} - return - } - cmd.c <- &getViewByNameResp{v.view} -} - -// registerViewReq is the command to register a view. -type registerViewReq struct { - views []*View - err chan error -} - -func (cmd *registerViewReq) handleCommand(w *worker) { - for _, v := range cmd.views { - if err := v.canonicalize(); err != nil { - cmd.err <- err - return - } - } - var errstr []string - for _, view := range cmd.views { - vi, err := w.tryRegisterView(view) - if err != nil { - errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) - continue - } - internal.SubscriptionReporter(view.Measure.Name()) - vi.subscribe() - } - if len(errstr) > 0 { - cmd.err <- errors.New(strings.Join(errstr, "\n")) - } else { - cmd.err <- nil - } -} - -// unregisterFromViewReq is the command to unregister to a view. Has no -// impact on the data collection for client that are pulling data from the -// library. -type unregisterFromViewReq struct { - views []string - done chan struct{} -} - -func (cmd *unregisterFromViewReq) handleCommand(w *worker) { - for _, name := range cmd.views { - vi, ok := w.views[name] - if !ok { - continue - } - - // Report pending data for this view before removing it. - w.reportView(vi) - - vi.unsubscribe() - if !vi.isSubscribed() { - // this was the last subscription and view is not collecting anymore. - // The collected data can be cleared. - vi.clearRows() - } - w.unregisterView(vi) - } - cmd.done <- struct{}{} -} - -// retrieveDataReq is the command to retrieve data for a view. -type retrieveDataReq struct { - now time.Time - v string - c chan *retrieveDataResp -} - -type retrieveDataResp struct { - rows []*Row - err error -} - -func (cmd *retrieveDataReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - vi, ok := w.views[cmd.v] - if !ok { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), - } - return - } - - if !vi.isSubscribed() { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), - } - return - } - cmd.c <- &retrieveDataResp{ - vi.collectedRows(), - nil, - } -} - -// recordReq is the command to record data related to multiple measures -// at once. -type recordReq struct { - tm *tag.Map - ms []stats.Measurement - attachments map[string]interface{} - t time.Time -} - -func (cmd *recordReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not registered - continue - } - ref := w.getMeasureRef(m.Measure().Name()) - for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) - } - } -} - -// setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the registered clients. -type setReportingPeriodReq struct { - d time.Duration - c chan bool -} - -func (cmd *setReportingPeriodReq) handleCommand(w *worker) { - w.timer.Stop() - if cmd.d <= 0 { - w.timer = time.NewTicker(defaultReportingDuration) - } else { - w.timer = time.NewTicker(cmd.d) - } - cmd.c <- true -} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go deleted file mode 100644 index b27d1b26b1..0000000000 --- a/vendor/go.opencensus.io/tag/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "context" -) - -// FromContext returns the tag map stored in the context. -func FromContext(ctx context.Context) *Map { - // The returned tag map shouldn't be mutated. - ts := ctx.Value(mapCtxKey) - if ts == nil { - return nil - } - return ts.(*Map) -} - -// NewContext creates a new context with the given tag map. -// To propagate a tag map to downstream methods and downstream RPCs, add a tag map -// to the current context. NewContext will return a copy of the current context, -// and put the tag map into the returned one. -// If there is already a tag map in the current context, it will be replaced with m. -func NewContext(ctx context.Context, m *Map) context.Context { - return context.WithValue(ctx, mapCtxKey, m) -} - -type ctxKey struct{} - -var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go deleted file mode 100644 index da16b74e4d..0000000000 --- a/vendor/go.opencensus.io/tag/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package tag contains OpenCensus tags. - -Tags are key-value pairs. Tags provide additional cardinality to -the OpenCensus instrumentation data. - -Tags can be propagated on the wire and in the same -process via context.Context. Encode and Decode should be -used to represent tags into their binary propagation form. -*/ -package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go deleted file mode 100644 index 71ec913657..0000000000 --- a/vendor/go.opencensus.io/tag/key.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -// Key represents a tag key. -type Key struct { - name string -} - -// NewKey creates or retrieves a string key identified by name. -// Calling NewKey more than once with the same name returns the same key. -func NewKey(name string) (Key, error) { - if !checkKeyName(name) { - return Key{}, errInvalidKeyName - } - return Key{name: name}, nil -} - -// MustNewKey returns a key with the given name, and panics if name is an invalid key name. -func MustNewKey(name string) Key { - k, err := NewKey(name) - if err != nil { - panic(err) - } - return k -} - -// Name returns the name of the key. -func (k Key) Name() string { - return k.name -} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go deleted file mode 100644 index 0272ef85a4..0000000000 --- a/vendor/go.opencensus.io/tag/map.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "bytes" - "context" - "fmt" - "sort" -) - -// Tag is a key value pair that can be propagated on wire. -type Tag struct { - Key Key - Value string -} - -type tagContent struct { - value string - m metadatas -} - -// Map is a map of tags. Use New to create a context containing -// a new Map. -type Map struct { - m map[Key]tagContent -} - -// Value returns the value for the key if a value for the key exists. -func (m *Map) Value(k Key) (string, bool) { - if m == nil { - return "", false - } - v, ok := m.m[k] - return v.value, ok -} - -func (m *Map) String() string { - if m == nil { - return "nil" - } - keys := make([]Key, 0, len(m.m)) - for k := range m.m { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) - - var buffer bytes.Buffer - buffer.WriteString("{ ") - for _, k := range keys { - buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) - } - buffer.WriteString(" }") - return buffer.String() -} - -func (m *Map) insert(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - return - } - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) update(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - m.m[k] = tagContent{value: v, m: md} - } -} - -func (m *Map) upsert(k Key, v string, md metadatas) { - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) delete(k Key) { - delete(m.m, k) -} - -func newMap() *Map { - return &Map{m: make(map[Key]tagContent)} -} - -// Mutator modifies a tag map. -type Mutator interface { - Mutate(t *Map) (*Map, error) -} - -// Insert returns a mutator that inserts a -// value associated with k. If k already exists in the tag map, -// mutator doesn't update the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Insert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.insert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Update returns a mutator that updates the -// value of the tag associated with k with v. If k doesn't -// exists in the tag map, the mutator doesn't insert the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Update(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.update(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Upsert returns a mutator that upserts the -// value of the tag associated with k with v. It inserts the -// value if k doesn't exist already. It mutates the value -// if k already exists. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Upsert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.upsert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -func createMetadatas(mds ...Metadata) metadatas { - var metas metadatas - if len(mds) > 0 { - for _, md := range mds { - if md != nil { - md(&metas) - } - } - } else { - WithTTL(TTLUnlimitedPropagation)(&metas) - } - return metas - -} - -// Delete returns a mutator that deletes -// the value associated with k. -func Delete(k Key) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - m.delete(k) - return m, nil - }, - } -} - -// New returns a new context that contains a tag map -// originated from the incoming context and modified -// with the provided mutators. -func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { - m := newMap() - orig := FromContext(ctx) - if orig != nil { - for k, v := range orig.m { - if !checkKeyName(k.Name()) { - return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) - } - if !checkValue(v.value) { - return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) - } - m.insert(k, v.value, v.m) - } - } - var err error - for _, mod := range mutator { - m, err = mod.Mutate(m) - if err != nil { - return ctx, err - } - } - return NewContext(ctx, m), nil -} - -// Do is similar to pprof.Do: a convenience for installing the tags -// from the context as Go profiler labels. This allows you to -// correlated runtime profiling with stats. -// -// It converts the key/values from the given map to Go profiler labels -// and calls pprof.Do. -// -// Do is going to do nothing if your Go version is below 1.9. -func Do(ctx context.Context, f func(ctx context.Context)) { - do(ctx, f) -} - -type mutator struct { - fn func(t *Map) (*Map, error) -} - -func (m *mutator) Mutate(t *Map) (*Map, error) { - return m.fn(t) -} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go deleted file mode 100644 index c242e695c8..0000000000 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "encoding/binary" - "fmt" -) - -// KeyType defines the types of keys allowed. Currently only keyTypeString is -// supported. -type keyType byte - -const ( - keyTypeString keyType = iota - keyTypeInt64 - keyTypeTrue - keyTypeFalse - - tagsVersionID = byte(0) -) - -type encoderGRPC struct { - buf []byte - writeIdx, readIdx int -} - -// writeKeyString writes the fieldID '0' followed by the key string and value -// string. -func (eg *encoderGRPC) writeTagString(k, v string) { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k) - eg.writeStringWithVarintLen(v) -} - -func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { - eg.writeByte(byte(keyTypeInt64)) - eg.writeStringWithVarintLen(k) - eg.writeUint64(i) -} - -func (eg *encoderGRPC) writeTagTrue(k string) { - eg.writeByte(byte(keyTypeTrue)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeTagFalse(k string) { - eg.writeByte(byte(keyTypeFalse)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { - length := len(bytes) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], bytes) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeStringWithVarintLen(s string) { - length := len(s) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], s) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeByte(v byte) { - eg.growIfRequired(1) - eg.buf[eg.writeIdx] = v - eg.writeIdx++ -} - -func (eg *encoderGRPC) writeUint32(i uint32) { - eg.growIfRequired(4) - binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 4 -} - -func (eg *encoderGRPC) writeUint64(i uint64) { - eg.growIfRequired(8) - binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 8 -} - -func (eg *encoderGRPC) readByte() byte { - b := eg.buf[eg.readIdx] - eg.readIdx++ - return b -} - -func (eg *encoderGRPC) readUint32() uint32 { - i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) - eg.readIdx += 4 - return i -} - -func (eg *encoderGRPC) readUint64() uint64 { - i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) - eg.readIdx += 8 - return i -} - -func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { - if eg.readEnded() { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) - if valueStart <= 0 { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - - valueStart += eg.readIdx - valueEnd := valueStart + int(length) - if valueEnd > len(eg.buf) { - return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) - } - - eg.readIdx = valueEnd - return eg.buf[valueStart:valueEnd], nil -} - -func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { - bytes, err := eg.readBytesWithVarintLen() - if err != nil { - return "", err - } - return string(bytes), nil -} - -func (eg *encoderGRPC) growIfRequired(expected int) { - if len(eg.buf)-eg.writeIdx < expected { - tmp := make([]byte, 2*(len(eg.buf)+1)+expected) - copy(tmp, eg.buf) - eg.buf = tmp - } -} - -func (eg *encoderGRPC) readEnded() bool { - return eg.readIdx >= len(eg.buf) -} - -func (eg *encoderGRPC) bytes() []byte { - return eg.buf[:eg.writeIdx] -} - -// Encode encodes the tag map into a []byte. It is useful to propagate -// the tag maps on wire in binary format. -func Encode(m *Map) []byte { - if m == nil { - return nil - } - eg := &encoderGRPC{ - buf: make([]byte, len(m.m)), - } - eg.writeByte(tagsVersionID) - for k, v := range m.m { - if v.m.ttl.ttl == valueTTLUnlimitedPropagation { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v.value)) - } - } - return eg.bytes() -} - -// Decode decodes the given []byte into a tag map. -func Decode(bytes []byte) (*Map, error) { - ts := newMap() - err := DecodeEach(bytes, ts.upsert) - if err != nil { - // no partial failures - return nil, err - } - return ts, nil -} - -// DecodeEach decodes the given serialized tag map, calling handler for each -// tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { - eg := &encoderGRPC{ - buf: bytes, - } - if len(eg.buf) == 0 { - return nil - } - - version := eg.readByte() - if version > tagsVersionID { - return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) - } - - for !eg.readEnded() { - typ := keyType(eg.readByte()) - - if typ != keyTypeString { - return fmt.Errorf("cannot decode: invalid key type: %q", typ) - } - - k, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - v, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - key, err := NewKey(string(k)) - if err != nil { - return err - } - val := string(v) - if !checkValue(val) { - return errInvalidValue - } - fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go deleted file mode 100644 index 6571a583ea..0000000000 --- a/vendor/go.opencensus.io/tag/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -const ( - // valueTTLNoPropagation prevents tag from propagating. - valueTTLNoPropagation = 0 - - // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. - valueTTLUnlimitedPropagation = -1 -) - -// TTL is metadata that specifies number of hops a tag can propagate. -// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata -type TTL struct { - ttl int -} - -var ( - // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. - TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} - - // TTLNoPropagation is TTL metadata that prevents tag from propagating. - TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} -) - -type metadatas struct { - ttl TTL -} - -// Metadata applies metadatas specified by the function. -type Metadata func(*metadatas) - -// WithTTL applies metadata with provided ttl. -func WithTTL(ttl TTL) Metadata { - return func(m *metadatas) { - m.ttl = ttl - } -} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go deleted file mode 100644 index 8fb17226fe..0000000000 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package tag - -import ( - "context" - "runtime/pprof" -) - -func do(ctx context.Context, f func(ctx context.Context)) { - m := FromContext(ctx) - keyvals := make([]string, 0, 2*len(m.m)) - for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v.value) - } - pprof.Do(ctx, pprof.Labels(keyvals...), f) -} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go deleted file mode 100644 index e28cf13cde..0000000000 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package tag - -import "context" - -func do(ctx context.Context, f func(ctx context.Context)) { - f(ctx) -} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go deleted file mode 100644 index 0939fc6748..0000000000 --- a/vendor/go.opencensus.io/tag/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tag - -import "errors" - -const ( - maxKeyLength = 255 - - // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). - validKeyValueMin = 32 - validKeyValueMax = 126 -) - -var ( - errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") - errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") -) - -func checkKeyName(name string) bool { - if len(name) == 0 { - return false - } - if len(name) > maxKeyLength { - return false - } - return isASCII(name) -} - -func isASCII(s string) bool { - for _, c := range s { - if (c < validKeyValueMin) || (c > validKeyValueMax) { - return false - } - } - return true -} - -func checkValue(v string) bool { - if len(v) > maxKeyLength { - return false - } - return isASCII(v) -} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go deleted file mode 100644 index c8e26ed635..0000000000 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "fmt" - "time" -) - -type ( - // TraceID is a 16-byte identifier for a set of spans. - TraceID [16]byte - - // SpanID is an 8-byte identifier for a single span. - SpanID [8]byte -) - -func (t TraceID) String() string { - return fmt.Sprintf("%02x", t[:]) -} - -func (s SpanID) String() string { - return fmt.Sprintf("%02x", s[:]) -} - -// Annotation represents a text annotation with a set of attributes and a timestamp. -type Annotation struct { - Time time.Time - Message string - Attributes map[string]interface{} -} - -// Attribute represents a key-value pair on a span, link or annotation. -// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. -type Attribute struct { - key string - value interface{} -} - -// Key returns the attribute's key -func (a *Attribute) Key() string { - return a.key -} - -// Value returns the attribute's value -func (a *Attribute) Value() interface{} { - return a.value -} - -// BoolAttribute returns a bool-valued attribute. -func BoolAttribute(key string, value bool) Attribute { - return Attribute{key: key, value: value} -} - -// Int64Attribute returns an int64-valued attribute. -func Int64Attribute(key string, value int64) Attribute { - return Attribute{key: key, value: value} -} - -// Float64Attribute returns a float64-valued attribute. -func Float64Attribute(key string, value float64) Attribute { - return Attribute{key: key, value: value} -} - -// StringAttribute returns a string-valued attribute. -func StringAttribute(key string, value string) Attribute { - return Attribute{key: key, value: value} -} - -// LinkType specifies the relationship between the span that had the link -// added, and the linked span. -type LinkType int32 - -// LinkType values. -const ( - LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The linked span is a child of the current span. - LinkTypeParent // The linked span is the parent of the current span. -) - -// Link represents a reference from one span to another span. -type Link struct { - TraceID TraceID - SpanID SpanID - Type LinkType - // Attributes is a set of attributes on the link. - Attributes map[string]interface{} -} - -// MessageEventType specifies the type of message event. -type MessageEventType int32 - -// MessageEventType values. -const ( - MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. - MessageEventTypeSent // Indicates a sent RPC message. - MessageEventTypeRecv // Indicates a received RPC message. -) - -// MessageEvent represents an event describing a message sent or received on the network. -type MessageEvent struct { - Time time.Time - EventType MessageEventType - MessageID int64 - UncompressedByteSize int64 - CompressedByteSize int64 -} - -// Status is the status of a Span. -type Status struct { - // Code is a status code. Zero indicates success. - // - // If Code will be propagated to Google APIs, it ideally should be a value from - // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . - Code int32 - Message string -} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go deleted file mode 100644 index 775f8274fa..0000000000 --- a/vendor/go.opencensus.io/trace/config.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - - "go.opencensus.io/trace/internal" -) - -// Config represents the global tracing configuration. -type Config struct { - // DefaultSampler is the default sampler used when creating new spans. - DefaultSampler Sampler - - // IDGenerator is for internal use only. - IDGenerator internal.IDGenerator - - // MaxAnnotationEventsPerSpan is max number of annotation events per span - MaxAnnotationEventsPerSpan int - - // MaxMessageEventsPerSpan is max number of message events per span - MaxMessageEventsPerSpan int - - // MaxAnnotationEventsPerSpan is max number of attributes per span - MaxAttributesPerSpan int - - // MaxLinksPerSpan is max number of links per span - MaxLinksPerSpan int -} - -var configWriteMu sync.Mutex - -const ( - // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span - DefaultMaxAnnotationEventsPerSpan = 32 - - // DefaultMaxMessageEventsPerSpan is default max number of message events per span - DefaultMaxMessageEventsPerSpan = 128 - - // DefaultMaxAttributesPerSpan is default max number of attributes per span - DefaultMaxAttributesPerSpan = 32 - - // DefaultMaxLinksPerSpan is default max number of links per span - DefaultMaxLinksPerSpan = 32 -) - -// ApplyConfig applies changes to the global tracing configuration. -// -// Fields not provided in the given config are going to be preserved. -func ApplyConfig(cfg Config) { - configWriteMu.Lock() - defer configWriteMu.Unlock() - c := *config.Load().(*Config) - if cfg.DefaultSampler != nil { - c.DefaultSampler = cfg.DefaultSampler - } - if cfg.IDGenerator != nil { - c.IDGenerator = cfg.IDGenerator - } - if cfg.MaxAnnotationEventsPerSpan > 0 { - c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan - } - if cfg.MaxMessageEventsPerSpan > 0 { - c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan - } - if cfg.MaxAttributesPerSpan > 0 { - c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan - } - if cfg.MaxLinksPerSpan > 0 { - c.MaxLinksPerSpan = cfg.MaxLinksPerSpan - } - config.Store(&c) -} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go deleted file mode 100644 index 7a1616a55c..0000000000 --- a/vendor/go.opencensus.io/trace/doc.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace contains support for OpenCensus distributed tracing. - -The following assumes a basic familiarity with OpenCensus concepts. -See http://opencensus.io - -# Exporting Traces - -To export collected tracing data, register at least one exporter. You can use -one of the provided exporters or write your own. - - trace.RegisterExporter(exporter) - -By default, traces will be sampled relatively rarely. To change the sampling -frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler -to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - -Be careful about using trace.AlwaysSample in a production application with -significant traffic: a new trace will be started and exported for every request. - -# Adding Spans to a Trace - -A trace consists of a tree of spans. In Go, the current span is carried in a -context.Context. - -It is common to want to capture all the activity of a function call in a span. For -this to work, the function must take a context.Context as a parameter. Add these two -lines to the top of the function: - - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() - -StartSpan will create a new top-level span if the context -doesn't contain another span, otherwise it will create a child span. -*/ -package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go deleted file mode 100644 index ffc264f23d..0000000000 --- a/vendor/go.opencensus.io/trace/evictedqueue.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -type evictedQueue struct { - queue []interface{} - capacity int - droppedCount int -} - -func newEvictedQueue(capacity int) *evictedQueue { - eq := &evictedQueue{ - capacity: capacity, - queue: make([]interface{}, 0), - } - - return eq -} - -func (eq *evictedQueue) add(value interface{}) { - if len(eq.queue) == eq.capacity { - eq.queue = eq.queue[1:] - eq.droppedCount++ - } - eq.queue = append(eq.queue, value) -} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go deleted file mode 100644 index e0d9a4b99e..0000000000 --- a/vendor/go.opencensus.io/trace/export.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "sync/atomic" - "time" -) - -// Exporter is a type for functions that receive sampled trace spans. -// -// The ExportSpan method should be safe for concurrent use and should return -// quickly; if an Exporter takes a significant amount of time to process a -// SpanData, that work should be done on another goroutine. -// -// The SpanData should not be modified, but a pointer to it can be kept. -type Exporter interface { - ExportSpan(s *SpanData) -} - -type exportersMap map[Exporter]struct{} - -var ( - exporterMu sync.Mutex - exporters atomic.Value -) - -// RegisterExporter adds to the list of Exporters that will receive sampled -// trace spans. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - new[e] = struct{}{} - exporters.Store(new) - exporterMu.Unlock() -} - -// UnregisterExporter removes from the list of Exporters the Exporter that was -// registered with the given name. -func UnregisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - delete(new, e) - exporters.Store(new) - exporterMu.Unlock() -} - -// SpanData contains all the information collected by a Span. -type SpanData struct { - SpanContext - ParentSpanID SpanID - SpanKind int - Name string - StartTime time.Time - // The wall clock time of EndTime will be adjusted to always be offset - // from StartTime by the duration of the span. - EndTime time.Time - // The values of Attributes each have type string, bool, or int64. - Attributes map[string]interface{} - Annotations []Annotation - MessageEvents []MessageEvent - Status - Links []Link - HasRemoteParent bool - DroppedAttributeCount int - DroppedAnnotationCount int - DroppedMessageEventCount int - DroppedLinkCount int - - // ChildSpanCount holds the number of child span created for this span. - ChildSpanCount int -} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go deleted file mode 100644 index 7e808d8f30..0000000000 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides trace internals. -package internal - -// IDGenerator allows custom generators for TraceId and SpanId. -type IDGenerator interface { - NewTraceID() [16]byte - NewSpanID() [8]byte -} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go deleted file mode 100644 index 80095a5f6c..0000000000 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "github.com/golang/groupcache/lru" -) - -// A simple lru.Cache wrapper that tracks the keys of the current contents and -// the cumulative number of evicted items. -type lruMap struct { - cacheKeys map[lru.Key]bool - cache *lru.Cache - droppedCount int -} - -func newLruMap(size int) *lruMap { - lm := &lruMap{ - cacheKeys: make(map[lru.Key]bool), - cache: lru.New(size), - droppedCount: 0, - } - lm.cache.OnEvicted = func(key lru.Key, value interface{}) { - delete(lm.cacheKeys, key) - lm.droppedCount++ - } - return lm -} - -func (lm lruMap) len() int { - return lm.cache.Len() -} - -func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, 0, len(lm.cacheKeys)) - for k := range lm.cacheKeys { - keys = append(keys, k) - } - return keys -} - -func (lm *lruMap) add(key, value interface{}) { - lm.cacheKeys[lru.Key(key)] = true - lm.cache.Add(lru.Key(key), value) -} - -func (lm *lruMap) get(key interface{}) (interface{}, bool) { - return lm.cache.Get(key) -} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go deleted file mode 100644 index 1eb190a96a..0000000000 --- a/vendor/go.opencensus.io/trace/propagation/propagation.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package propagation implements the binary trace context format. -package propagation // import "go.opencensus.io/trace/propagation" - -// TODO: link to external spec document. - -// BinaryFormat format: -// -// Binary value: -// version_id: 1 byte representing the version id. -// -// For version_id = 0: -// -// version_format: -// field_format: -// -// Fields: -// -// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. -// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. -// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. -// -// Fields MUST be encoded using the field id order (smaller to higher). -// -// Valid value example: -// -// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, -// 98, 99, 100, 101, 102, 103, 104, 2, 1} -// -// version_id = 0; -// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} -// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; -// trace_options = {1}; - -import ( - "net/http" - - "go.opencensus.io/trace" -) - -// Binary returns the binary format representation of a SpanContext. -// -// If sc is the zero value, Binary returns nil. -func Binary(sc trace.SpanContext) []byte { - if sc == (trace.SpanContext{}) { - return nil - } - var b [29]byte - copy(b[2:18], sc.TraceID[:]) - b[18] = 1 - copy(b[19:27], sc.SpanID[:]) - b[27] = 2 - b[28] = uint8(sc.TraceOptions) - return b[:] -} - -// FromBinary returns the SpanContext represented by b. -// -// If b has an unsupported version ID or contains no TraceID, FromBinary -// returns with ok==false. -func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { - if len(b) == 0 || b[0] != 0 { - return trace.SpanContext{}, false - } - b = b[1:] - if len(b) >= 17 && b[0] == 0 { - copy(sc.TraceID[:], b[1:17]) - b = b[17:] - } else { - return trace.SpanContext{}, false - } - if len(b) >= 9 && b[0] == 1 { - copy(sc.SpanID[:], b[1:9]) - b = b[9:] - } - if len(b) >= 2 && b[0] == 2 { - sc.TraceOptions = trace.TraceOptions(b[1]) - } - return sc, true -} - -// HTTPFormat implementations propagate span contexts -// in HTTP requests. -// -// SpanContextFromRequest extracts a span context from incoming -// requests. -// -// SpanContextToRequest modifies the given request to include the given -// span context. -type HTTPFormat interface { - SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) - SpanContextToRequest(sc trace.SpanContext, req *http.Request) -} - -// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go deleted file mode 100644 index 71c10f9e3b..0000000000 --- a/vendor/go.opencensus.io/trace/sampling.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "encoding/binary" -) - -const defaultSamplingProbability = 1e-4 - -// Sampler decides whether a trace should be sampled and exported. -type Sampler func(SamplingParameters) SamplingDecision - -// SamplingParameters contains the values passed to a Sampler. -type SamplingParameters struct { - ParentContext SpanContext - TraceID TraceID - SpanID SpanID - Name string - HasRemoteParent bool -} - -// SamplingDecision is the value returned by a Sampler. -type SamplingDecision struct { - Sample bool -} - -// ProbabilitySampler returns a Sampler that samples a given fraction of traces. -// -// It also samples spans whose parents are sampled. -func ProbabilitySampler(fraction float64) Sampler { - if !(fraction >= 0) { - fraction = 0 - } else if fraction >= 1 { - return AlwaysSample() - } - - traceIDUpperBound := uint64(fraction * (1 << 63)) - return Sampler(func(p SamplingParameters) SamplingDecision { - if p.ParentContext.IsSampled() { - return SamplingDecision{Sample: true} - } - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 - return SamplingDecision{Sample: x < traceIDUpperBound} - }) -} - -// AlwaysSample returns a Sampler that samples every trace. -// Be careful about using this sampler in a production application with -// significant traffic: a new trace will be started and exported for every -// request. -func AlwaysSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: true} - } -} - -// NeverSample returns a Sampler that samples no traces. -func NeverSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: false} - } -} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go deleted file mode 100644 index fbabad34c0..0000000000 --- a/vendor/go.opencensus.io/trace/spanbucket.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "time" -) - -// samplePeriod is the minimum time between accepting spans in a single bucket. -const samplePeriod = time.Second - -// defaultLatencies contains the default latency bucket bounds. -// TODO: consider defaults, make configurable -var defaultLatencies = [...]time.Duration{ - 10 * time.Microsecond, - 100 * time.Microsecond, - time.Millisecond, - 10 * time.Millisecond, - 100 * time.Millisecond, - time.Second, - 10 * time.Second, - time.Minute, -} - -// bucket is a container for a set of spans for a particular error code or latency range. -type bucket struct { - nextTime time.Time // next time we can accept a span - buffer []*SpanData // circular buffer of spans - nextIndex int // location next SpanData should be placed in buffer - overflow bool // whether the circular buffer has wrapped around -} - -func makeBucket(bufferSize int) bucket { - return bucket{ - buffer: make([]*SpanData, bufferSize), - } -} - -// add adds a span to the bucket, if nextTime has been reached. -func (b *bucket) add(s *SpanData) { - if s.EndTime.Before(b.nextTime) { - return - } - if len(b.buffer) == 0 { - return - } - b.nextTime = s.EndTime.Add(samplePeriod) - b.buffer[b.nextIndex] = s - b.nextIndex++ - if b.nextIndex == len(b.buffer) { - b.nextIndex = 0 - b.overflow = true - } -} - -// size returns the number of spans in the bucket. -func (b *bucket) size() int { - if b.overflow { - return len(b.buffer) - } - return b.nextIndex -} - -// span returns the ith span in the bucket. -func (b *bucket) span(i int) *SpanData { - if !b.overflow { - return b.buffer[i] - } - if i < len(b.buffer)-b.nextIndex { - return b.buffer[b.nextIndex+i] - } - return b.buffer[b.nextIndex+i-len(b.buffer)] -} - -// resize changes the size of the bucket to n, keeping up to n existing spans. -func (b *bucket) resize(n int) { - cur := b.size() - newBuffer := make([]*SpanData, n) - if cur < n { - for i := 0; i < cur; i++ { - newBuffer[i] = b.span(i) - } - b.buffer = newBuffer - b.nextIndex = cur - b.overflow = false - return - } - for i := 0; i < n; i++ { - newBuffer[i] = b.span(i + cur - n) - } - b.buffer = newBuffer - b.nextIndex = 0 - b.overflow = true -} - -// latencyBucket returns the appropriate bucket number for a given latency. -func latencyBucket(latency time.Duration) int { - i := 0 - for i < len(defaultLatencies) && latency >= defaultLatencies[i] { - i++ - } - return i -} - -// latencyBucketBounds returns the lower and upper bounds for a latency bucket -// number. -// -// The lower bound is inclusive, the upper bound is exclusive (except for the -// last bucket.) -func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { - if index == 0 { - return 0, defaultLatencies[index] - } - if index == len(defaultLatencies) { - return defaultLatencies[index-1], 1<<63 - 1 - } - return defaultLatencies[index-1], defaultLatencies[index] -} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go deleted file mode 100644 index e601f76f2c..0000000000 --- a/vendor/go.opencensus.io/trace/spanstore.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "time" - - "go.opencensus.io/internal" -) - -const ( - maxBucketSize = 100000 - defaultBucketSize = 10 -) - -var ( - ssmu sync.RWMutex // protects spanStores - spanStores = make(map[string]*spanStore) -) - -// This exists purely to avoid exposing internal methods used by z-Pages externally. -type internalOnly struct{} - -func init() { - //TODO(#412): remove - internal.Trace = &internalOnly{} -} - -// ReportActiveSpans returns the active spans for the given name. -func (i internalOnly) ReportActiveSpans(name string) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for activeSpan := range s.active { - if s, ok := activeSpan.(*span); ok { - out = append(out, s.makeSpanData()) - } - } - return out -} - -// ReportSpansByError returns a sample of error spans. -// -// If code is nonzero, only spans with that status code are returned. -func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - if code != 0 { - if b, ok := s.errors[code]; ok { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } else { - for _, b := range s.errors { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } - return out -} - -// ConfigureBucketSizes sets the number of spans to keep per latency and error -// bucket for different span names. -func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { - for _, bc := range bcs { - latencyBucketSize := bc.MaxRequestsSucceeded - if latencyBucketSize < 0 { - latencyBucketSize = 0 - } - if latencyBucketSize > maxBucketSize { - latencyBucketSize = maxBucketSize - } - errorBucketSize := bc.MaxRequestsErrors - if errorBucketSize < 0 { - errorBucketSize = 0 - } - if errorBucketSize > maxBucketSize { - errorBucketSize = maxBucketSize - } - spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) - } -} - -// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. -func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { - out := make(map[string]internal.PerMethodSummary) - ssmu.RLock() - defer ssmu.RUnlock() - for name, s := range spanStores { - s.mu.Lock() - p := internal.PerMethodSummary{ - Active: len(s.active), - } - for code, b := range s.errors { - p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ - ErrorCode: code, - Size: b.size(), - }) - } - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ - MinLatency: min, - MaxLatency: max, - Size: b.size(), - }) - } - s.mu.Unlock() - out[name] = p - } - return out -} - -// ReportSpansByLatency returns a sample of successful spans. -// -// minLatency is the minimum latency of spans to be returned. -// maxLatency, if nonzero, is the maximum latency of spans to be returned. -func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - if i+1 != len(s.latency) && max <= minLatency { - continue - } - if maxLatency != 0 && maxLatency < min { - continue - } - for _, sd := range b.buffer { - if sd == nil { - break - } - if minLatency != 0 || maxLatency != 0 { - d := sd.EndTime.Sub(sd.StartTime) - if d < minLatency { - continue - } - if maxLatency != 0 && d > maxLatency { - continue - } - } - out = append(out, sd) - } - } - return out -} - -// spanStore keeps track of spans stored for a particular span name. -// -// It contains all active spans; a sample of spans for failed requests, -// categorized by error code; and a sample of spans for successful requests, -// bucketed by latency. -type spanStore struct { - mu sync.Mutex // protects everything below. - active map[SpanInterface]struct{} - errors map[int32]*bucket - latency []bucket - maxSpansPerErrorBucket int -} - -// newSpanStore creates a span store. -func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { - s := &spanStore{ - active: make(map[SpanInterface]struct{}), - latency: make([]bucket, len(defaultLatencies)+1), - maxSpansPerErrorBucket: errorBucketSize, - } - for i := range s.latency { - s.latency[i] = makeBucket(latencyBucketSize) - } - return s -} - -// spanStoreForName returns the spanStore for the given name. -// -// It returns nil if it doesn't exist. -func spanStoreForName(name string) *spanStore { - var s *spanStore - ssmu.RLock() - s, _ = spanStores[name] - ssmu.RUnlock() - return s -} - -// spanStoreForNameCreateIfNew returns the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreForNameCreateIfNew(name string) *spanStore { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - return s - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - return s - } - s = newSpanStore(name, defaultBucketSize, defaultBucketSize) - spanStores[name] = s - return s -} - -// spanStoreSetSize resizes the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - s = newSpanStore(name, latencyBucketSize, errorBucketSize) - spanStores[name] = s -} - -func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { - s.mu.Lock() - for i := range s.latency { - s.latency[i].resize(latencyBucketSize) - } - for _, b := range s.errors { - b.resize(errorBucketSize) - } - s.maxSpansPerErrorBucket = errorBucketSize - s.mu.Unlock() -} - -// add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span SpanInterface) { - s.mu.Lock() - s.active[span] = struct{}{} - s.mu.Unlock() -} - -// finished removes a span from the active set, and adds a corresponding -// SpanData to a latency or error bucket. -func (s *spanStore) finished(span SpanInterface, sd *SpanData) { - latency := sd.EndTime.Sub(sd.StartTime) - if latency < 0 { - latency = 0 - } - code := sd.Status.Code - - s.mu.Lock() - delete(s.active, span) - if code == 0 { - s.latency[latencyBucket(latency)].add(sd) - } else { - if s.errors == nil { - s.errors = make(map[int32]*bucket) - } - if b := s.errors[code]; b != nil { - b.add(sd) - } else { - b := makeBucket(s.maxSpansPerErrorBucket) - s.errors[code] = &b - b.add(sd) - } - } - s.mu.Unlock() -} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go deleted file mode 100644 index ec60effd10..0000000000 --- a/vendor/go.opencensus.io/trace/status_codes.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -// Status codes for use with Span.SetStatus. These correspond to the status -// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -const ( - StatusCodeOK = 0 - StatusCodeCancelled = 1 - StatusCodeUnknown = 2 - StatusCodeInvalidArgument = 3 - StatusCodeDeadlineExceeded = 4 - StatusCodeNotFound = 5 - StatusCodeAlreadyExists = 6 - StatusCodePermissionDenied = 7 - StatusCodeResourceExhausted = 8 - StatusCodeFailedPrecondition = 9 - StatusCodeAborted = 10 - StatusCodeOutOfRange = 11 - StatusCodeUnimplemented = 12 - StatusCodeInternal = 13 - StatusCodeUnavailable = 14 - StatusCodeDataLoss = 15 - StatusCodeUnauthenticated = 16 -) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go deleted file mode 100644 index 861df9d391..0000000000 --- a/vendor/go.opencensus.io/trace/trace.go +++ /dev/null @@ -1,595 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "go.opencensus.io/internal" - "go.opencensus.io/trace/tracestate" -) - -type tracer struct{} - -var _ Tracer = &tracer{} - -// Span represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type span struct { - // data contains information recorded about the span. - // - // It will be non-nil if we are exporting the span or recording events for it. - // Otherwise, data is nil, and the Span is simply a carrier for the - // SpanContext, so that the trace ID is propagated. - data *SpanData - mu sync.Mutex // protects the contents of *data (but not the pointer value.) - spanContext SpanContext - - // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry - // is removed to create room for a new entry. - lruAttributes *lruMap - - // annotations are stored in FIFO queue capped by configured limit. - annotations *evictedQueue - - // messageEvents are stored in FIFO queue capped by configured limit. - messageEvents *evictedQueue - - // links are stored in FIFO queue capped by configured limit. - links *evictedQueue - - // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. - *spanStore - endOnce sync.Once - - executionTracerTaskEnd func() // ends the execution tracer span -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.data != nil -} - -// TraceOptions contains options associated with a trace span. -type TraceOptions uint32 - -// IsSampled returns true if the span will be exported. -func (sc SpanContext) IsSampled() bool { - return sc.TraceOptions.IsSampled() -} - -// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. -func (sc *SpanContext) setIsSampled(sampled bool) { - if sampled { - sc.TraceOptions |= 1 - } else { - sc.TraceOptions &= ^TraceOptions(1) - } -} - -// IsSampled returns true if the span will be exported. -func (t TraceOptions) IsSampled() bool { - return t&1 == 1 -} - -// SpanContext contains the state that must propagate across process boundaries. -// -// SpanContext is not an implementation of context.Context. -// TODO: add reference to external Census docs for SpanContext. -type SpanContext struct { - TraceID TraceID - SpanID SpanID - TraceOptions TraceOptions - Tracestate *tracestate.Tracestate -} - -type contextKey struct{} - -// FromContext returns the Span stored in a context, or nil if there isn't one. -func (t *tracer) FromContext(ctx context.Context) *Span { - s, _ := ctx.Value(contextKey{}).(*Span) - return s -} - -// NewContext returns a new context with the given Span attached. -func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { - return context.WithValue(parent, contextKey{}, s) -} - -// All available span kinds. Span kind must be either one of these values. -const ( - SpanKindUnspecified = iota - SpanKindServer - SpanKindClient -) - -// StartOptions contains options concerning how a span is started. -type StartOptions struct { - // Sampler to consult for this Span. If provided, it is always consulted. - // - // If not provided, then the behavior differs based on whether - // the parent of this Span is remote, local, or there is no parent. - // In the case of a remote parent or no parent, the - // default sampler (see Config) will be consulted. Otherwise, - // when there is a non-remote parent, no new sampling decision will be made: - // we will preserve the sampling of the parent. - Sampler Sampler - - // SpanKind represents the kind of a span. If none is set, - // SpanKindUnspecified is used. - SpanKind int -} - -// StartOption apply changes to StartOptions. -type StartOption func(*StartOptions) - -// WithSpanKind makes new spans to be created with the given kind. -func WithSpanKind(spanKind int) StartOption { - return func(o *StartOptions) { - o.SpanKind = spanKind - } -} - -// WithSampler makes new spans to be be created with a custom sampler. -// Otherwise, the global sampler is used. -func WithSampler(sampler Sampler) StartOption { - return func(o *StartOptions) { - o.Sampler = sampler - } -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - var parent SpanContext - if p := t.FromContext(ctx); p != nil { - if ps, ok := p.internal.(*span); ok { - ps.addChild() - } - parent = p.SpanContext() - } - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) - - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - extSpan := NewSpan(span) - return t.NewContext(ctx, extSpan), extSpan -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - extSpan := NewSpan(span) - return t.NewContext(ctx, extSpan), extSpan -} - -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { - s := &span{} - s.spanContext = parent - - cfg := config.Load().(*Config) - if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { - // lazy initialization - gen.init() - } - - if !hasParent { - s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() - } - s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() - sampler := cfg.DefaultSampler - - if !hasParent || remoteParent || o.Sampler != nil { - // If this span is the child of a local span and no Sampler is set in the - // options, keep the parent's TraceOptions. - // - // Otherwise, consult the Sampler in the options if it is non-nil, otherwise - // the default sampler. - if o.Sampler != nil { - sampler = o.Sampler - } - s.spanContext.setIsSampled(sampler(SamplingParameters{ - ParentContext: parent, - TraceID: s.spanContext.TraceID, - SpanID: s.spanContext.SpanID, - Name: name, - HasRemoteParent: remoteParent}).Sample) - } - - if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { - return s - } - - s.data = &SpanData{ - SpanContext: s.spanContext, - StartTime: time.Now(), - SpanKind: o.SpanKind, - Name: name, - HasRemoteParent: remoteParent, - } - s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - s.links = newEvictedQueue(cfg.MaxLinksPerSpan) - - if hasParent { - s.data.ParentSpanID = parent.SpanID - } - if internal.LocalSpanStoreEnabled { - var ss *spanStore - ss = spanStoreForNameCreateIfNew(name) - if ss != nil { - s.spanStore = ss - ss.add(s) - } - } - - return s -} - -// End ends the span. -func (s *span) End() { - if s == nil { - return - } - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - if !s.IsRecordingEvents() { - return - } - s.endOnce.Do(func() { - exp, _ := exporters.Load().(exportersMap) - mustExport := s.spanContext.IsSampled() && len(exp) > 0 - if s.spanStore != nil || mustExport { - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if mustExport { - for e := range exp { - e.ExportSpan(sd) - } - } - } - }) -} - -// makeSpanData produces a SpanData representing the current state of the Span. -// It requires that s.data is non-nil. -func (s *span) makeSpanData() *SpanData { - var sd SpanData - s.mu.Lock() - sd = *s.data - if s.lruAttributes.len() > 0 { - sd.Attributes = s.lruAttributesToAttributeMap() - sd.DroppedAttributeCount = s.lruAttributes.droppedCount - } - if len(s.annotations.queue) > 0 { - sd.Annotations = s.interfaceArrayToAnnotationArray() - sd.DroppedAnnotationCount = s.annotations.droppedCount - } - if len(s.messageEvents.queue) > 0 { - sd.MessageEvents = s.interfaceArrayToMessageEventArray() - sd.DroppedMessageEventCount = s.messageEvents.droppedCount - } - if len(s.links.queue) > 0 { - sd.Links = s.interfaceArrayToLinksArray() - sd.DroppedLinkCount = s.links.droppedCount - } - s.mu.Unlock() - return &sd -} - -// SpanContext returns the SpanContext of the span. -func (s *span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.spanContext -} - -// SetName sets the name of the span, if it is recording events. -func (s *span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Name = name - s.mu.Unlock() -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Status = status - s.mu.Unlock() -} - -func (s *span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0, len(s.links.queue)) - for _, value := range s.links.queue { - linksArr = append(linksArr, value.(Link)) - } - return linksArr -} - -func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) - for _, value := range s.messageEvents.queue { - messageEventArr = append(messageEventArr, value.(MessageEvent)) - } - return messageEventArr -} - -func (s *span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0, len(s.annotations.queue)) - for _, value := range s.annotations.queue { - annotationArr = append(annotationArr, value.(Annotation)) - } - return annotationArr -} - -func (s *span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}, s.lruAttributes.len()) - for _, key := range s.lruAttributes.keys() { - value, ok := s.lruAttributes.get(key) - if ok { - keyStr := key.(string) - attributes[keyStr] = value - } - } - return attributes -} - -func (s *span) copyToCappedAttributes(attributes []Attribute) { - for _, a := range attributes { - s.lruAttributes.add(a.key, a.value) - } -} - -func (s *span) addChild() { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.ChildSpanCount++ - s.mu.Unlock() -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.copyToCappedAttributes(attributes) - s.mu.Unlock() -} - -func (s *span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var am map[string]interface{} - if len(attributes) != 0 { - am = make(map[string]interface{}, len(attributes)) - for _, attr := range attributes { - am[attr.key] = attr.value - } - } - s.mu.Lock() - s.annotations.add(Annotation{ - Time: now, - Message: str, - Attributes: am, - }) - s.mu.Unlock() -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, fmt.Sprintf(format, a...)) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.messageEvents.add(MessageEvent{ - Time: now, - EventType: MessageEventTypeSent, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.messageEvents.add(MessageEvent{ - Time: now, - EventType: MessageEventTypeRecv, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddLink adds a link to the span. -func (s *span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.links.add(l) - s.mu.Unlock() -} - -func (s *span) String() string { - if s == nil { - return "" - } - if s.data == nil { - return fmt.Sprintf("span %s", s.spanContext.SpanID) - } - s.mu.Lock() - str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) - s.mu.Unlock() - return str -} - -var config atomic.Value // access atomically - -func init() { - config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: &defaultIDGenerator{}, - MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, - MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, - MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, - MaxLinksPerSpan: DefaultMaxLinksPerSpan, - }) -} - -type defaultIDGenerator struct { - sync.Mutex - - // Please keep these as the first fields - // so that these 8 byte fields will be aligned on addresses - // divisible by 8, on both 32-bit and 64-bit machines when - // performing atomic increments and accesses. - // See: - // * https://github.com/census-instrumentation/opencensus-go/issues/587 - // * https://github.com/census-instrumentation/opencensus-go/issues/865 - // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG - nextSpanID uint64 - spanIDInc uint64 - - traceIDAdd [2]uint64 - traceIDRand *rand.Rand - - initOnce sync.Once -} - -// init initializes the generator on the first call to avoid consuming entropy -// unnecessarily. -func (gen *defaultIDGenerator) init() { - gen.initOnce.Do(func() { - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - }) -} - -// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *defaultIDGenerator) NewSpanID() [8]byte { - var id uint64 - for id == 0 { - id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) - } - var sid [8]byte - binary.LittleEndian.PutUint64(sid[:], id) - return sid -} - -// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. -// mu should be held while this function is called. -func (gen *defaultIDGenerator) NewTraceID() [16]byte { - var tid [16]byte - // Construct the trace ID from two outputs of traceIDRand, with a constant - // added to each half for additional entropy. - gen.Lock() - binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) - binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) - gen.Unlock() - return tid -} diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go deleted file mode 100644 index 9e2c3a9992..0000000000 --- a/vendor/go.opencensus.io/trace/trace_api.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" -) - -// DefaultTracer is the tracer used when package-level exported functions are invoked. -var DefaultTracer Tracer = &tracer{} - -// Tracer can start spans and access context functions. -type Tracer interface { - - // StartSpan starts a new child span of the current span in the context. If - // there is no span in the context, creates a new trace and span. - // - // Returned context contains the newly created span. You can use it to - // propagate the returned span in process. - StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) - - // StartSpanWithRemoteParent starts a new child span of the span from the given parent. - // - // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is - // preferred for cases where the parent is propagated via an incoming request. - // - // Returned context contains the newly created span. You can use it to - // propagate the returned span in process. - StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) - - // FromContext returns the Span stored in a context, or nil if there isn't one. - FromContext(ctx context.Context) *Span - - // NewContext returns a new context with the given Span attached. - NewContext(parent context.Context, s *Span) context.Context -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - return DefaultTracer.StartSpan(ctx, name, o...) -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) -} - -// FromContext returns the Span stored in a context, or a Span that is not -// recording events if there isn't one. -func FromContext(ctx context.Context) *Span { - return DefaultTracer.FromContext(ctx) -} - -// NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { - return DefaultTracer.NewContext(parent, s) -} - -// SpanInterface represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type SpanInterface interface { - - // IsRecordingEvents returns true if events are being recorded for this span. - // Use this check to avoid computing expensive annotations when they will never - // be used. - IsRecordingEvents() bool - - // End ends the span. - End() - - // SpanContext returns the SpanContext of the span. - SpanContext() SpanContext - - // SetName sets the name of the span, if it is recording events. - SetName(name string) - - // SetStatus sets the status of the span, if it is recording events. - SetStatus(status Status) - - // AddAttributes sets attributes in the span. - // - // Existing attributes whose keys appear in the attributes parameter are overwritten. - AddAttributes(attributes ...Attribute) - - // Annotate adds an annotation with attributes. - // Attributes can be nil. - Annotate(attributes []Attribute, str string) - - // Annotatef adds an annotation with attributes. - Annotatef(attributes []Attribute, format string, a ...interface{}) - - // AddMessageSendEvent adds a message send event to the span. - // - // messageID is an identifier for the message, which is recommended to be - // unique in this span and the same between the send event and the receive - // event (this allows to identify a message between the sender and receiver). - // For example, this could be a sequence id. - AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) - - // AddMessageReceiveEvent adds a message receive event to the span. - // - // messageID is an identifier for the message, which is recommended to be - // unique in this span and the same between the send event and the receive - // event (this allows to identify a message between the sender and receiver). - // For example, this could be a sequence id. - AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) - - // AddLink adds a link to the span. - AddLink(l Link) - - // String prints a string representation of a span. - String() string -} - -// NewSpan is a convenience function for creating a *Span out of a *span -func NewSpan(s SpanInterface) *Span { - return &Span{internal: s} -} - -// Span is a struct wrapper around the SpanInt interface, which allows correctly handling -// nil spans, while also allowing the SpanInterface implementation to be swapped out. -type Span struct { - internal SpanInterface -} - -// Internal returns the underlying implementation of the Span -func (s *Span) Internal() SpanInterface { - return s.internal -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *Span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.internal.IsRecordingEvents() -} - -// End ends the span. -func (s *Span) End() { - if s == nil { - return - } - s.internal.End() -} - -// SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.internal.SpanContext() -} - -// SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.internal.SetName(name) -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.internal.SetStatus(status) -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddAttributes(attributes...) -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.internal.Annotate(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.internal.Annotatef(attributes, format, a...) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) -} - -// AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddLink(l) -} - -// String prints a string representation of a span. -func (s *Span) String() string { - if s == nil { - return "" - } - return s.internal.String() -} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go deleted file mode 100644 index b8fc1e495a..0000000000 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.11 -// +build go1.11 - -package trace - -import ( - "context" - t "runtime/trace" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - if !t.IsEnabled() { - // Avoid additional overhead if - // runtime/trace is not enabled. - return ctx, func() {} - } - nctx, task := t.NewTask(ctx, name) - return nctx, task.End -} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go deleted file mode 100644 index da488fc874..0000000000 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.11 -// +build !go1.11 - -package trace - -import ( - "context" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - return ctx, func() {} -} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go deleted file mode 100644 index 2d6c713eb3..0000000000 --- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tracestate implements support for the Tracestate header of the -// W3C TraceContext propagation format. -package tracestate - -import ( - "fmt" - "regexp" -) - -const ( - keyMaxSize = 256 - valueMaxSize = 256 - maxKeyValuePairs = 32 -) - -const ( - keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` -) - -var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) -var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) - -// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different -// vendors propagate additional information and inter-operate with their legacy Id formats. -type Tracestate struct { - entries []Entry -} - -// Entry represents one key-value pair in a list of key-value pair of Tracestate. -type Entry struct { - // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, - // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and - // forward slashes /. - Key string - - // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the - // range 0x20 to 0x7E) except comma , and =. - Value string -} - -// Entries returns a slice of Entry. -func (ts *Tracestate) Entries() []Entry { - if ts == nil { - return nil - } - return ts.entries -} - -func (ts *Tracestate) remove(key string) *Entry { - for index, entry := range ts.entries { - if entry.Key == key { - ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) - return &entry - } - } - return nil -} - -func (ts *Tracestate) add(entries []Entry) error { - for _, entry := range entries { - ts.remove(entry.Key) - } - if len(ts.entries)+len(entries) > maxKeyValuePairs { - return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", - len(entries), len(ts.entries), maxKeyValuePairs) - } - ts.entries = append(entries, ts.entries...) - return nil -} - -func isValid(entry Entry) bool { - return keyValidationRegExp.MatchString(entry.Key) && - valueValidationRegExp.MatchString(entry.Value) -} - -func containsDuplicateKey(entries ...Entry) (string, bool) { - keyMap := make(map[string]int) - for _, entry := range entries { - if _, ok := keyMap[entry.Key]; ok { - return entry.Key, true - } - keyMap[entry.Key] = 1 - } - return "", false -} - -func areEntriesValid(entries ...Entry) (*Entry, bool) { - for _, entry := range entries { - if !isValid(entry) { - return &entry, false - } - } - return nil, true -} - -// New creates a Tracestate object from a parent and/or entries (key-value pair). -// Entries from the parent are copied if present. The entries passed to this function -// are inserted in front of those copied from the parent. If an entry copied from the -// parent contains the same key as one of the entry in entries then the entry copied -// from the parent is removed. See add func. -// -// An error is returned with nil Tracestate if -// 1. one or more entry in entries is invalid. -// 2. two or more entries in the input entries have the same key. -// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. -// (duplicate entry is counted only once). -func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { - if parent == nil && len(entries) == 0 { - return nil, nil - } - if entry, ok := areEntriesValid(entries...); !ok { - return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) - } - - if key, duplicate := containsDuplicateKey(entries...); duplicate { - return nil, fmt.Errorf("contains duplicate keys (%s)", key) - } - - tracestate := Tracestate{} - - if parent != nil && len(parent.entries) > 0 { - tracestate.entries = append([]Entry{}, parent.entries...) - } - - err := tracestate.add(entries) - if err != nil { - return nil, err - } - return &tracestate, nil -} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 0000000000..571116cc39 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 0000000000..c3fa253893 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,12 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 0000000000..13d0a4f254 --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: oldstable + - go: stable + env: LINT=1 + +cache: + directories: + - vendor + +before_install: + - go version + +script: + - test -z "$LINT" || make lint + - make cover + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 0000000000..24c0274dc3 --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,76 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 0000000000..8765c9fbc6 --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 0000000000..1b1376d425 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,78 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 0000000000..ade0c20f16 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 0000000000..9cf1914b1f --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(v bool) *Bool { + x := &Bool{} + if v != _zeroBool { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(v bool) { + x.v.Store(boolToInt(v)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(o, n bool) bool { + return x.v.CAS(boolToInt(o), boolToInt(n)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(o bool) bool { + return truthy(x.v.Swap(boolToInt(o))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 0000000000..c7bf7a827a --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 0000000000..ae7390ee68 --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 0000000000..027cfcb20b --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(v time.Duration) *Duration { + x := &Duration{} + if v != _zeroDuration { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(v time.Duration) { + x.v.Store(int64(v)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(o, n time.Duration) bool { + return x.v.CAS(int64(o), int64(n)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(o time.Duration) time.Duration { + return time.Duration(x.v.Swap(int64(o))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 0000000000..6273b66bd6 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 0000000000..a6166fbea0 --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,51 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(v error) *Error { + x := &Error{} + if v != _zeroError { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(v error) { + x.v.Store(packError(v)) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 0000000000..ffe0be21cb --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 0000000000..0719060207 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,76 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(v float64) *Float64 { + x := &Float64{} + if v != _zeroFloat64 { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(v float64) { + x.v.Store(math.Float64bits(v)) +} + +// CAS is an atomic compare-and-swap for float64 values. +func (x *Float64) CAS(o, n float64) bool { + return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 0000000000..927b1add74 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "strconv" + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 0000000000..50d6b24858 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,26 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 0000000000..18ae56493e --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(i int32) *Int32 { + return &Int32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 0000000000..2bcbbfaa95 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(i int64) *Int64 { + return &Int64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 0000000000..a8201cb4a1 --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 0000000000..225b7a2be0 --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,54 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(v string) *String { + x := &String{} + if v != _zeroString { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) + } + return _zeroString +} + +// Store atomically stores the passed string. +func (x *String) Store(v string) { + x.v.Store(v) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 0000000000..3a9558213d --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 0000000000..a973aba1a6 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 0000000000..3b6c71fd5a --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 0000000000..671f3a3824 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE new file mode 100644 index 0000000000..20dcf51d96 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go new file mode 100644 index 0000000000..fe4ecf561e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go new file mode 100644 index 0000000000..e89f543602 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go @@ -0,0 +1,118 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota + // parameter. + _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us" + // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period + // parameter. + _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup controller. +// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of +// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`. +func (cg CGroups) CPUQuota() (float64, bool, error) { + cpuCGroup, exists := cg[_cgroupSubsysCPU] + if !exists { + return -1, false, nil + } + + cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam) + if defined := cfsQuotaUs > 0; err != nil || !defined { + return -1, defined, err + } + + cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam) + if defined := cfsPeriodUs > 0; err != nil || !defined { + return -1, defined, err + } + + return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go new file mode 100644 index 0000000000..78556062fe --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go @@ -0,0 +1,176 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" +) + +const ( + // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period + // parameter. + _cgroupv2CPUMax = "cpu.max" + // _cgroupFSType is the Linux CGroup-V2 file system type used in + // `/proc/$PID/mountinfo`. + _cgroupv2FSType = "cgroup2" + + _cgroupv2MountPoint = "/sys/fs/cgroup" + + _cgroupV2CPUMaxDefaultPeriod = 100000 + _cgroupV2CPUMaxQuotaMax = "max" +) + +const ( + _cgroupv2CPUMaxQuotaIndex = iota + _cgroupv2CPUMaxPeriodIndex +) + +// ErrNotV2 indicates that the system is not using cgroups2. +var ErrNotV2 = errors.New("not using cgroups2") + +// CGroups2 provides access to cgroups data for systems using cgroups2. +type CGroups2 struct { + mountPoint string + groupPath string + cpuMaxFile string +} + +// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process. +// +// This returns ErrNotV2 if the system is not using cgroups2. +func NewCGroups2ForCurrentProcess() (*CGroups2, error) { + return newCGroups2From(_procPathMountInfo, _procPathCGroup) +} + +func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) { + isV2, err := isCGroupV2(mountInfoPath) + if err != nil { + return nil, err + } + + if !isV2 { + return nil, ErrNotV2 + } + + subsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + // Find v2 subsystem by looking for the `0` id + var v2subsys *CGroupSubsys + for _, subsys := range subsystems { + if subsys.ID == 0 { + v2subsys = subsys + break + } + } + + if v2subsys == nil { + return nil, ErrNotV2 + } + + return &CGroups2{ + mountPoint: _cgroupv2MountPoint, + groupPath: v2subsys.Name, + cpuMaxFile: _cgroupv2CPUMax, + }, nil +} + +func isCGroupV2(procPathMountInfo string) (bool, error) { + var ( + isV2 bool + newMountPoint = func(mp *MountPoint) error { + isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint) + return nil + } + ) + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return false, err + } + + return isV2, nil +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller. +// It is a result of reading cpu quota and period from cpu.max file. +// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns +// (-1, false, nil) +func (cg *CGroups2) CPUQuota() (float64, bool, error) { + cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, false, nil + } + return -1, false, err + } + defer cpuMaxParams.Close() + + scanner := bufio.NewScanner(cpuMaxParams) + if scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 || len(fields) > 2 { + return -1, false, fmt.Errorf("invalid format") + } + + if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax { + return -1, false, nil + } + + max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex]) + if err != nil { + return -1, false, err + } + + var period int + if len(fields) == 1 { + period = _cgroupV2CPUMaxDefaultPeriod + } else { + period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex]) + if err != nil { + return -1, false, err + } + + if period == 0 { + return -1, false, errors.New("zero value for period is not allowed") + } + } + + return float64(max) / float64(period), true, nil + } + + if err := scanner.Err(); err != nil { + return -1, false, err + } + + return 0, false, io.ErrUnexpectedEOF +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go new file mode 100644 index 0000000000..113555f63d --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (CPU quota, for example) for a given process. +package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go new file mode 100644 index 0000000000..94ac75a46e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go @@ -0,0 +1,52 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go new file mode 100644 index 0000000000..f3877f78aa --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go @@ -0,0 +1,171 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + // End of optional fields. + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + // Now we know where the optional fields end, split the line again with a + // limit to avoid issues with spaces in super options as present on WSL. + fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf) + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(procPathMountInfo) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go new file mode 100644 index 0000000000..cddc3eaec3 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go @@ -0,0 +1,103 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.SplitN(line, _cgroupSep, _csFieldCount) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(procPathCGroup) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go new file mode 100644 index 0000000000..3b974754c3 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package runtime + +import ( + "errors" + "math" + + cg "go.uber.org/automaxprocs/internal/cgroups" +) + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. +func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) { + cgroups, err := newQueryer() + if err != nil { + return -1, CPUQuotaUndefined, err + } + + quota, defined, err := cgroups.CPUQuota() + if !defined || err != nil { + return -1, CPUQuotaUndefined, err + } + + maxProcs := int(math.Floor(quota)) + if minValue > 0 && maxProcs < minValue { + return minValue, CPUQuotaMinUsed, nil + } + return maxProcs, CPUQuotaUsed, nil +} + +type queryer interface { + CPUQuota() (float64, bool, error) +} + +var ( + _newCgroups2 = cg.NewCGroups2ForCurrentProcess + _newCgroups = cg.NewCGroupsForCurrentProcess +) + +func newQueryer() (queryer, error) { + cgroups, err := _newCgroups2() + if err == nil { + return cgroups, nil + } + if errors.Is(err, cg.ErrNotV2) { + return _newCgroups() + } + return nil, err +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go new file mode 100644 index 0000000000..6922554484 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -0,0 +1,31 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !linux +// +build !linux + +package runtime + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the +// current OS. +func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) { + return -1, CPUQuotaUndefined, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go new file mode 100644 index 0000000000..df6eacf053 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package runtime + +// CPUQuotaStatus presents the status of how CPU quota is used +type CPUQuotaStatus int + +const ( + // CPUQuotaUndefined is returned when CPU quota is undefined + CPUQuotaUndefined CPUQuotaStatus = iota + // CPUQuotaUsed is returned when a valid CPU quota can be used + CPUQuotaUsed + // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value + CPUQuotaMinUsed +) diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go new file mode 100644 index 0000000000..98176d6457 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -0,0 +1,130 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package maxprocs // import "go.uber.org/automaxprocs/maxprocs" + +import ( + "os" + "runtime" + + iruntime "go.uber.org/automaxprocs/internal/runtime" +) + +const _maxProcsKey = "GOMAXPROCS" + +func currentMaxProcs() int { + return runtime.GOMAXPROCS(0) +} + +type config struct { + printf func(string, ...interface{}) + procs func(int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int +} + +func (c *config) log(fmt string, args ...interface{}) { + if c.printf != nil { + c.printf(fmt, args...) + } +} + +// An Option alters the behavior of Set. +type Option interface { + apply(*config) +} + +// Logger uses the supplied printf implementation for log output. By default, +// Set doesn't log anything. +func Logger(printf func(string, ...interface{})) Option { + return optionFunc(func(cfg *config) { + cfg.printf = printf + }) +} + +// Min sets the minimum GOMAXPROCS value that will be used. +// Any value below 1 is ignored. +func Min(n int) Option { + return optionFunc(func(cfg *config) { + if n >= 1 { + cfg.minGOMAXPROCS = n + } + }) +} + +type optionFunc func(*config) + +func (of optionFunc) apply(cfg *config) { of(cfg) } + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set(opts ...Option) (func(), error) { + cfg := &config{ + procs: iruntime.CPUQuotaToGOMAXPROCS, + minGOMAXPROCS: 1, + } + for _, o := range opts { + o.apply(cfg) + } + + undoNoop := func() { + cfg.log("maxprocs: No GOMAXPROCS change to reset") + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if max, exists := os.LookupEnv(_maxProcsKey); exists { + cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) + return undoNoop, nil + } + + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS) + if err != nil { + return undoNoop, err + } + + if status == iruntime.CPUQuotaUndefined { + cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) + return undoNoop, nil + } + + prev := currentMaxProcs() + undo := func() { + cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) + runtime.GOMAXPROCS(prev) + } + + switch status { + case iruntime.CPUQuotaMinUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) + case iruntime.CPUQuotaUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) + } + + runtime.GOMAXPROCS(maxProcs) + return undo, nil +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go new file mode 100644 index 0000000000..108a95535e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package maxprocs + +// Version is the current package version. +const Version = "1.5.2" diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 0000000000..6d4d1be7b5 --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 0000000000..b9a05e3da0 --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml new file mode 100644 index 0000000000..8636ab42ad --- /dev/null +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -0,0 +1,23 @@ +sudo: false +language: go +go_import_path: go.uber.org/multierr + +env: + global: + - GO111MODULE=on + +go: + - oldstable + - stable + +before_install: +- go version + +script: +- | + set -e + make lint + make cover + +after_success: +- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 0000000000..6f1db9ef4a --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,60 @@ +Releases +======== + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 0000000000..858e02475f --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 0000000000..316004400b --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,42 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html + +update-license: + @cd tools && go install go.uber.org/tools/update-license + @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 0000000000..751bd65e58 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,23 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Installation + + go get -u go.uber.org/multierr + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg +[doc]: https://godoc.org/go.uber.org/multierr +[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://travis-ci.com/uber-go/multierr +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 0000000000..5c9b67d537 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,449 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a verison that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml new file mode 100644 index 0000000000..6ef084ec24 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/multierr +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go new file mode 100644 index 0000000000..264b0eac0d --- /dev/null +++ b/vendor/go.uber.org/multierr/go113.go @@ -0,0 +1,52 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build go1.13 + +package multierr + +import "errors" + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 0000000000..8e5ca7d3e2 --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 0000000000..da9d9d00b4 --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 0000000000..92aa65d660 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,109 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 0000000000..0db1f9f15f --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,617 @@ +# Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 +[#487]: https://github.com/uber-go/zap/pull/487 +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 +[#504]: https://github.com/uber-go/zap/pull/504 +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 +[#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 +[#751]: https://github.com/uber-go/zap/pull/751 +[#758]: https://github.com/uber-go/zap/pull/758 +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e327d9aa5c --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 0000000000..ea02f3cae2 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +```bash +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +```bash +make test +make lint +``` + +## Making Changes + +Start by creating a new branch for your changes: + +```bash +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +```bash +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We _try_ to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 0000000000..b183b20bc1 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 0000000000..6652bed45f --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 0000000000..9b1bc3b0e1 --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,73 @@ +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +# +# We track coverage only for the main module. +MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test + +# Many Go tools take file globs or directories as arguments instead of packages. +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: all +all: lint test + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking lint..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking staticcheck..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./checklicense.sh | tee -a lint.log + @[ ! -s lint.log ] + @echo "Checking 'go mod tidy'..." + @make tidy + @if ! git diff --quiet; then \ + echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ + git --no-pager diff; \ + fi + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 0000000000..a553a428c8 --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,133 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users _choose_ when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :---------: | :-----------: | :---------------: | +| :zap: zap | 2900 ns/op | +0% | 5 allocs/op | +| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op | +| zerolog | 10639 ns/op | +267% | 32 allocs/op | +| go-kit | 14434 ns/op | +398% | 59 allocs/op | +| logrus | 17104 ns/op | +490% | 81 allocs/op | +| apex/log | 32424 ns/op | +1018% | 66 allocs/op | +| log15 | 33579 ns/op | +1058% | 76 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :---------: | :-----------: | :---------------: | +| :zap: zap | 373 ns/op | +0% | 0 allocs/op | +| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op | +| zerolog | 288 ns/op | -23% | 0 allocs/op | +| go-kit | 11785 ns/op | +3060% | 58 allocs/op | +| logrus | 19629 ns/op | +5162% | 70 allocs/op | +| log15 | 21866 ns/op | +5762% | 72 allocs/op | +| apex/log | 30890 ns/op | +8182% | 55 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :--------: | :-----------: | :---------------: | +| :zap: zap | 381 ns/op | +0% | 0 allocs/op | +| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op | +| zerolog | 369 ns/op | -3% | 0 allocs/op | +| standard library | 385 ns/op | +1% | 2 allocs/op | +| go-kit | 606 ns/op | +59% | 11 allocs/op | +| logrus | 1730 ns/op | +354% | 25 allocs/op | +| apex/log | 1998 ns/op | +424% | 7 allocs/op | +| log15 | 4546 ns/op | +1093% | 22 allocs/op | + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 0000000000..5be3704a3e --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go new file mode 100644 index 0000000000..d0d2c49d69 --- /dev/null +++ b/vendor/go.uber.org/zap/array_go118.go @@ -0,0 +1,156 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 +// +build go1.18 + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 0000000000..9e929cd98e --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,141 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 0000000000..8fb3e202cf --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 0000000000..345ac8b89a --- /dev/null +++ b/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 0000000000..ee6096766a --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,264 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, errors.New("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 0000000000..3c50d7b4d3 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,117 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// # Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// # Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// # Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 0000000000..caa04ceefd --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, errors.New("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 0000000000..65982a51e5 --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 0000000000..bbb745db5b --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,549 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case *bool: + return Boolp(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case *complex128: + return Complex128p(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case *complex64: + return Complex64p(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case *float64: + return Float64p(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case *float32: + return Float32p(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case *int: + return Intp(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case *int64: + return Int64p(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case *int32: + return Int32p(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case *int16: + return Int16p(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case *int8: + return Int8p(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case *string: + return Stringp(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case *uint: + return Uintp(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case *uint64: + return Uint64p(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case *uint32: + return Uint32p(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case *uint16: + return Uint16p(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case *uint8: + return Uint8p(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case *uintptr: + return Uintptrp(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case *time.Time: + return Timep(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case *time.Duration: + return Durationp(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 0000000000..1312875072 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 0000000000..8e1d05e9ab --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 0000000000..3cb46c9e0a --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 1 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 0000000000..632b6831a8 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,133 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// # GET +// +// The GET request returns a JSON description of the current logging level like: +// +// {"level":"info"} +// +// # PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: err.Error()}) + return + } + lvl.SetLevel(requestedLvl) + enc.Encode(payload{Level: lvl.Level()}) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, errors.New("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, errors.New("must specify logging level") + } + return *pld.Level, nil + +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 0000000000..dad583aaa5 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 0000000000..c4d5d02abc --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 0000000000..f673f9947b --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,66 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var _exit = os.Exit + +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + Code int + prev func(code int) +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: _exit} + _exit = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + _exit = se.prev +} + +func (se *StubbedExit) exit(code int) { + se.Exited = true + se.Code = code +} diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 0000000000..5f3e3f1b92 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,35 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 0000000000..db951e19a5 --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,152 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/internal" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +var _ internal.LeveledEnabler = AtomicLevel{} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 0000000000..cd44030d13 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,400 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "os" + "strings" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(io.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.After(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + onFatal := log.onFatal + // nil or WriteThenNoop will lead to continued execution after + // a Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the + // log.Fatal. + if onFatal == nil || onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.After(ent, onFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.After(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktraceFirst + if addStack { + stackDepth = stacktraceFull + } + stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 0000000000..c4f3bca3d2 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,167 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. +func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 0000000000..478c9a10ff --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var _sinkRegistry = newSinkRegistry() + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := sr.factories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + sr.factories[normalized] = factory + return nil +} + +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 0000000000..817a3bde8b --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,176 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _stacktracePool = sync.Pool{ + New: func() interface{} { + return &stacktrace{ + storage: make([]uintptr, 64), + } + }, +} + +type stacktrace struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// stacktraceDepth specifies how deep of a stack trace should be captured. +type stacktraceDepth int + +const ( + // stacktraceFirst captures only the first frame. + stacktraceFirst stacktraceDepth = iota + + // stacktraceFull captures the entire call stack, allocating more + // storage for it if needed. + stacktraceFull +) + +// captureStacktrace captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// captureStacktrace. +// +// The caller must call Free on the returned stacktrace after using it. +func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { + stack := _stacktracePool.Get().(*stacktrace) + + switch depth { + case stacktraceFirst: + stack.pcs = stack.storage[:1] + case stacktraceFull: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == stacktraceFull { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *stacktrace) Free() { + st.frames = nil + st.pcs = nil + _stacktracePool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *stacktrace) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +func takeStacktrace(skip int) string { + stack := captureStacktrace(skip+1, stacktraceFull) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// stackFormatter formats a stack trace into a readable string representation. +type stackFormatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// newStackFormatter builds a new stackFormatter. +func newStackFormatter(b *buffer.Buffer) stackFormatter { + return stackFormatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *stackFormatter) FormatStack(stack *stacktrace) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 0000000000..ac387b3e47 --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,416 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// +// is the equivalent of +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Debugln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln uses fmt.Sprintln to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln uses fmt.Sprintln to construct and log a message, then panics. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +// log message with Sprint, Sprintf, or neither. +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 0000000000..c5a1f16225 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 0000000000..f08728e1ec --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := _sinkRegistry.newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return nil, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(io.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 0000000000..a40e93b3ec --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 0000000000..422fd82a6b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 0000000000..1aa5dc3646 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + line.AppendString(c.LineEnding) + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 0000000000..9dfd64051f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 0000000000..31000e91f7 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 0000000000..5769ff3e4e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,451 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// +// If value is string, it uses UnmarshalText. +// +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 0000000000..9d326e95ea --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,300 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "sync" + "time" + + "go.uber.org/multierr" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes an os.Exit(1) after Write. + WriteThenFatal +) + +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or After on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + after CheckWriteHook + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.after = nil + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() + } + + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) + } + putCheckedEntry(ce) +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.after = hook + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 0000000000..06359907af --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,132 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArray and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 0000000000..95bdb0a126 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 0000000000..198def9917 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,77 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 0000000000..7a11237ae9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 0000000000..3921c5cd33 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,562 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc ReflectedEncoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// +// {"foo":"bar","foo":"baz"} +// +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" && final.EncodeLevel != nil { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + final.buf.AppendString(final.LineEnding) + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } + enc.openNamespaces = 0 +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 0000000000..e01a241316 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 +) + +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 0000000000..7af8dadcb3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 0000000000..c3c55ba0d9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 0000000000..dfead0829d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go new file mode 100644 index 0000000000..8746360eca --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" +) + +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 0000000000..dc518055a4 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,230 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 0000000000..9bb32f0557 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 0000000000..d4a1af3d07 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 0000000000..29f0a2de45 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 0000000000..10f46948dc --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 0000000000..063e7784f8 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 0000000000..6713accac0 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,243 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $128, DI + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, DI + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $128, DI + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, DI + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 0000000000..a481b2243f --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 0000000000..16d58c650e --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc31160908..0000000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index 5577c0f939..0000000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// ErrPasswordTooLong is returned when the password passed to -// GenerateFromPassword is too long (i.e. > 72 bytes). -var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -// GenerateFromPassword does not accept passwords longer than 72 bytes, which -// is the longest password bcrypt will operate on. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - if len(password) > 72 { - return nil, ErrPasswordTooLong - } - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go deleted file mode 100644 index 93da7322bc..0000000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its -// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and -// draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" - -import ( - "crypto/cipher" - "errors" -) - -const ( - // KeySize is the size of the key used by this AEAD, in bytes. - KeySize = 32 - - // NonceSize is the size of the nonce used with the standard variant of this - // AEAD, in bytes. - // - // Note that this is too short to be safely generated at random if the same - // key is reused more than 2³² times. - NonceSize = 12 - - // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 - // variant of this AEAD, in bytes. - NonceSizeX = 24 - - // Overhead is the size of the Poly1305 authentication tag, and the - // difference between a ciphertext length and its plaintext. - Overhead = 16 -) - -type chacha20poly1305 struct { - key [KeySize]byte -} - -// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. -func New(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20poly1305: bad key length") - } - ret := new(chacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (c *chacha20poly1305) NonceSize() int { - return NonceSize -} - -func (c *chacha20poly1305) Overhead() int { - return Overhead -} - -func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSize { - panic("chacha20poly1305: bad nonce length passed to Seal") - } - - if uint64(len(plaintext)) > (1<<38)-64 { - panic("chacha20poly1305: plaintext too large") - } - - return c.seal(dst, nonce, plaintext, additionalData) -} - -var errOpen = errors.New("chacha20poly1305: message authentication failed") - -func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSize { - panic("chacha20poly1305: bad nonce length passed to Open") - } - if len(ciphertext) < 16 { - return nil, errOpen - } - if uint64(len(ciphertext)) > (1<<38)-48 { - panic("chacha20poly1305: ciphertext too large") - } - - return c.open(dst, nonce, ciphertext, additionalData) -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go deleted file mode 100644 index 50695a14f6..0000000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego - -package chacha20poly1305 - -import ( - "encoding/binary" - - "golang.org/x/crypto/internal/alias" - "golang.org/x/sys/cpu" -) - -//go:noescape -func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool - -//go:noescape -func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) - -var ( - useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 -) - -// setupState writes a ChaCha20 input matrix to state. See -// https://tools.ietf.org/html/rfc7539#section-2.3. -func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { - state[0] = 0x61707865 - state[1] = 0x3320646e - state[2] = 0x79622d32 - state[3] = 0x6b206574 - - state[4] = binary.LittleEndian.Uint32(key[0:4]) - state[5] = binary.LittleEndian.Uint32(key[4:8]) - state[6] = binary.LittleEndian.Uint32(key[8:12]) - state[7] = binary.LittleEndian.Uint32(key[12:16]) - state[8] = binary.LittleEndian.Uint32(key[16:20]) - state[9] = binary.LittleEndian.Uint32(key[20:24]) - state[10] = binary.LittleEndian.Uint32(key[24:28]) - state[11] = binary.LittleEndian.Uint32(key[28:32]) - - state[12] = 0 - state[13] = binary.LittleEndian.Uint32(nonce[0:4]) - state[14] = binary.LittleEndian.Uint32(nonce[4:8]) - state[15] = binary.LittleEndian.Uint32(nonce[8:12]) -} - -func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { - if !cpu.X86.HasSSSE3 { - return c.sealGeneric(dst, nonce, plaintext, additionalData) - } - - var state [16]uint32 - setupState(&state, &c.key, nonce) - - ret, out := sliceForAppend(dst, len(plaintext)+16) - if alias.InexactOverlap(out, plaintext) { - panic("chacha20poly1305: invalid buffer overlap") - } - chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) - return ret -} - -func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if !cpu.X86.HasSSSE3 { - return c.openGeneric(dst, nonce, ciphertext, additionalData) - } - - var state [16]uint32 - setupState(&state, &c.key, nonce) - - ciphertext = ciphertext[:len(ciphertext)-16] - ret, out := sliceForAppend(dst, len(ciphertext)) - if alias.InexactOverlap(out, ciphertext) { - panic("chacha20poly1305: invalid buffer overlap") - } - if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { - for i := range out { - out[i] = 0 - } - return nil, errOpen - } - - return ret, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s deleted file mode 100644 index 731d2ac6db..0000000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ /dev/null @@ -1,2715 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. - -//go:build gc && !purego - -#include "textflag.h" -// General register allocation -#define oup DI -#define inp SI -#define inl BX -#define adp CX // free to reuse, after we hash the additional data -#define keyp R8 // free to reuse, when we copy the key to stack -#define itr2 R9 // general iterator -#define itr1 CX // general iterator -#define acc0 R10 -#define acc1 R11 -#define acc2 R12 -#define t0 R13 -#define t1 R14 -#define t2 R15 -#define t3 R8 -// Register and stack allocation for the SSE code -#define rStore (0*16)(BP) -#define sStore (1*16)(BP) -#define state1Store (2*16)(BP) -#define state2Store (3*16)(BP) -#define tmpStore (4*16)(BP) -#define ctr0Store (5*16)(BP) -#define ctr1Store (6*16)(BP) -#define ctr2Store (7*16)(BP) -#define ctr3Store (8*16)(BP) -#define A0 X0 -#define A1 X1 -#define A2 X2 -#define B0 X3 -#define B1 X4 -#define B2 X5 -#define C0 X6 -#define C1 X7 -#define C2 X8 -#define D0 X9 -#define D1 X10 -#define D2 X11 -#define T0 X12 -#define T1 X13 -#define T2 X14 -#define T3 X15 -#define A3 T0 -#define B3 T1 -#define C3 T2 -#define D3 T3 -// Register and stack allocation for the AVX2 code -#define rsStoreAVX2 (0*32)(BP) -#define state1StoreAVX2 (1*32)(BP) -#define state2StoreAVX2 (2*32)(BP) -#define ctr0StoreAVX2 (3*32)(BP) -#define ctr1StoreAVX2 (4*32)(BP) -#define ctr2StoreAVX2 (5*32)(BP) -#define ctr3StoreAVX2 (6*32)(BP) -#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack -#define AA0 Y0 -#define AA1 Y5 -#define AA2 Y6 -#define AA3 Y7 -#define BB0 Y14 -#define BB1 Y9 -#define BB2 Y10 -#define BB3 Y11 -#define CC0 Y12 -#define CC1 Y13 -#define CC2 Y8 -#define CC3 Y15 -#define DD0 Y4 -#define DD1 Y1 -#define DD2 Y2 -#define DD3 Y3 -#define TT0 DD3 -#define TT1 AA3 -#define TT2 BB3 -#define TT3 CC3 -// ChaCha20 constants -DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 -DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 -// <<< 16 with PSHUFB -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -// <<< 8 with PSHUFB -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B - -DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 -DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 - -DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 -DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 -// Poly1305 key clamp -DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA ·sseIncMask<>+0x00(SB)/8, $0x1 -DATA ·sseIncMask<>+0x08(SB)/8, $0x0 -// To load/store the last < 16 bytes in a buffer -DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff -DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff - -GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 -GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 -GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 -// No PALIGNR in Go ASM yet (but VPALIGNR is present). -#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 -#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 -#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 -#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 -#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 -#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 -#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 -#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 -#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 -#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 -#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 -#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 -#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 -#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 -#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 -#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 -#define shiftC0Right shiftC0Left -#define shiftC1Right shiftC1Left -#define shiftC2Right shiftC2Left -#define shiftC3Right shiftC3Left -#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 -#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 -#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 -#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 - -// Some macros - -// ROL rotates the uint32s in register R left by N bits, using temporary T. -#define ROL(N, R, T) \ - MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R - -// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL16(R, T) PSHUFB ·rol16<>(SB), R -#else -#define ROL16(R, T) ROL(16, R, T) -#endif - -// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL8(R, T) PSHUFB ·rol8<>(SB), R -#else -#define ROL8(R, T) ROL(8, R, T) -#endif - -#define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; ROL16(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; ROL8(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B - -#define chachaQR_AVX2(A, B, C, D, T) \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B - -#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 -#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX -#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 -#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 - -#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 -#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 - -#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage -#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage -// ---------------------------------------------------------------------------- -TEXT polyHashADInternal<>(SB), NOSPLIT, $0 - // adp points to beginning of additional data - // itr2 holds ad length - XORQ acc0, acc0 - XORQ acc1, acc1 - XORQ acc2, acc2 - CMPQ itr2, $13 - JNE hashADLoop - -openFastTLSAD: - // Special treatment for the TLS case of 13 bytes - MOVQ (adp), acc0 - MOVQ 5(adp), acc1 - SHRQ $24, acc1 - MOVQ $1, acc2 - polyMul - RET - -hashADLoop: - // Hash in 16 byte chunks - CMPQ itr2, $16 - JB hashADTail - polyAdd(0(adp)) - LEAQ (1*16)(adp), adp - SUBQ $16, itr2 - polyMul - JMP hashADLoop - -hashADTail: - CMPQ itr2, $0 - JE hashADDone - - // Hash last < 16 byte tail - XORQ t0, t0 - XORQ t1, t1 - XORQ t2, t2 - ADDQ itr2, adp - -hashADTailLoop: - SHLQ $8, t0, t1 - SHLQ $8, t0 - MOVB -1(adp), t2 - XORQ t2, t0 - DECQ adp - DECQ itr2 - JNE hashADTailLoop - -hashADTailFinish: - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Finished AD -hashADDone: - RET - -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Open(dst, key, src, ad []byte) bool -TEXT ·chacha20Poly1305Open(SB), 0, $288-97 - // For aligned stack access - MOVQ SP, BP - ADDQ $32, BP - ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - // Check for AVX2 support - CMPB ·useAVX2(SB), $1 - JE chacha20Poly1305Open_AVX2 - - // Special optimization, for very short buffers - CMPQ inl, $128 - JBE openSSE128 // About 16% faster - - // For long buffers, prepare the poly key first - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - MOVO D0, T1 - - // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - MOVO D0, ctr3Store - MOVQ $10, itr2 - -openSSEPreparePolyKey: - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - DECQ itr2 - JNE openSSEPreparePolyKey - - // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 - - // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore; MOVO B0, sStore - - // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - -openSSEMainLoop: - CMPQ inl, $256 - JB openSSEMainLoopDone - - // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - - // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 - MOVQ $4, itr1 - MOVQ inp, itr2 - -openSSEInternalLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(itr2)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(itr2), itr2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr1 - JGE openSSEInternalLoop - - polyAdd(0(itr2)) - polyMul - LEAQ (2*8)(itr2), itr2 - - CMPQ itr1, $-6 - JG openSSEInternalLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - - // Load - xor - store - MOVO D3, tmpStore - MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) - MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) - MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) - MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) - MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) - MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) - MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) - MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) - MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) - MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) - MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) - MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) - MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) - MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) - LEAQ 256(inp), inp - LEAQ 256(oup), oup - SUBQ $256, inl - JMP openSSEMainLoop - -openSSEMainLoopDone: - // Handle the various tail sizes efficiently - TESTQ inl, inl - JE openSSEFinalize - CMPQ inl, $64 - JBE openSSETail64 - CMPQ inl, $128 - JBE openSSETail128 - CMPQ inl, $192 - JBE openSSETail192 - JMP openSSETail256 - -openSSEFinalize: - // Hash in the PT, AAD lengths - ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 - polyMul - - // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 - - // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 - - // Finally, constant time compare to the tag at the end of the message - XORQ AX, AX - MOVQ $1, DX - XORQ (0*8)(inp), acc0 - XORQ (1*8)(inp), acc1 - ORQ acc1, acc0 - CMOVQEQ DX, AX - - // Return true iff tags are equal - MOVB AX, ret+96(FP) - RET - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 129 bytes -openSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 - -openSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE openSSE128InnerCipherLoop - - // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - - // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore; MOVOU B0, sStore - - // Hash - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - -openSSE128Open: - CMPQ inl, $16 - JB openSSETail16 - SUBQ $16, inl - - // Load for hashing - polyAdd(0(inp)) - - // Load for decryption - MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - polyMul - - // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 - JMP openSSE128Open - -openSSETail16: - TESTQ inl, inl - JE openSSEFinalize - - // We can safely load the CT from the end, because it is padded with the MAC - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVOU (inp), T0 - ADDQ inl, inp - PAND -16(t0)(itr2*1), T0 - MOVO T0, 0+tmpStore - MOVQ T0, t0 - MOVQ 8+tmpStore, t1 - PXOR A1, T0 - - // We can only store one byte at a time, since plaintext can be shorter than 16 bytes -openSSETail16Store: - MOVQ T0, t3 - MOVB t3, (oup) - PSRLDQ $1, T0 - INCQ oup - DECQ inl - JNE openSSETail16Store - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - JMP openSSEFinalize - -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of ciphertext -openSSETail64: - // Need to decrypt up to 64 bytes - prepare single block - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - XORQ itr2, itr2 - MOVQ inl, itr1 - CMPQ itr1, $16 - JB openSSETail64LoopB - -openSSETail64LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - SUBQ $16, itr1 - -openSSETail64LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - - CMPQ itr1, $16 - JAE openSSETail64LoopA - - CMPQ itr2, $160 - JNE openSSETail64LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 - -openSSETail64DecLoop: - CMPQ inl, $16 - JB openSSETail64DecLoopDone - SUBQ $16, inl - MOVOU (inp), T0 - PXOR T0, A0 - MOVOU A0, (oup) - LEAQ 16(inp), inp - LEAQ 16(oup), oup - MOVO B0, A0 - MOVO C0, B0 - MOVO D0, C0 - JMP openSSETail64DecLoop - -openSSETail64DecLoopDone: - MOVO A0, A1 - JMP openSSETail16 - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext -openSSETail128: - // Need to decrypt up to 128 bytes - prepare two blocks - MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - -openSSETail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - -openSSETail128LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - CMPQ itr2, itr1 - JB openSSETail128LoopA - - CMPQ itr2, $160 - JNE openSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr1Store, D0; PADDL ctr0Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - - SUBQ $64, inl - LEAQ 64(inp), inp - LEAQ 64(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of ciphertext -openSSETail192: - // Need to decrypt up to 192 bytes - prepare three blocks - MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store - MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store - - MOVQ inl, itr1 - MOVQ $160, itr2 - CMPQ itr1, $160 - CMOVQGT itr2, itr1 - ANDQ $-16, itr1 - XORQ itr2, itr2 - -openSSLTail192LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - -openSSLTail192LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - CMPQ itr2, itr1 - JB openSSLTail192LoopA - - CMPQ itr2, $160 - JNE openSSLTail192LoopB - - CMPQ inl, $176 - JB openSSLTail192Store - - polyAdd(160(inp)) - polyMul - - CMPQ inl, $192 - JB openSSLTail192Store - - polyAdd(176(inp)) - polyMul - -openSSLTail192Store: - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 - MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) - - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - SUBQ $128, inl - LEAQ 128(inp), inp - LEAQ 128(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext -openSSETail256: - // Need to decrypt up to 256 bytes - prepare four blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - XORQ itr2, itr2 - -openSSETail256Loop: - // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication - polyAdd(0(inp)(itr2*1)) - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulStage3 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - ADDQ $2*8, itr2 - CMPQ itr2, $160 - JB openSSETail256Loop - MOVQ inl, itr1 - ANDQ $-16, itr1 - -openSSETail256HashLoop: - polyAdd(0(inp)(itr2*1)) - polyMul - ADDQ $2*8, itr2 - CMPQ itr2, itr1 - JB openSSETail256HashLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore - - // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - LEAQ 192(inp), inp - LEAQ 192(oup), oup - SUBQ $192, inl - MOVO A3, A0 - MOVO B3, B0 - MOVO C3, C0 - MOVO tmpStore, D0 - - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- -chacha20Poly1305Open_AVX2: - VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 - - // Special optimization, for very short buffers - CMPQ inl, $192 - JBE openAVX2192 - CMPQ inl, $320 - JBE openAVX2320 - - // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, state2StoreAVX2 - VMOVDQA DD0, ctr3StoreAVX2 - MOVQ $10, itr2 - -openAVX2PreparePolyKey: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - DECQ itr2 - JNE openAVX2PreparePolyKey - - VPADDD ·chacha20Constants<>(SB), AA0, AA0 - VPADDD state1StoreAVX2, BB0, BB0 - VPADDD state2StoreAVX2, CC0, CC0 - VPADDD ctr3StoreAVX2, DD0, DD0 - - VPERM2I128 $0x02, AA0, BB0, TT0 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for the first 64 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - - // Hash AD + first 64 bytes - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 - -openAVX2InitialHash64: - polyAdd(0(inp)(itr1*1)) - polyMulAVX2 - ADDQ $16, itr1 - CMPQ itr1, $64 - JNE openAVX2InitialHash64 - - // Decrypt the first 64 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), BB0, BB0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU BB0, (1*32)(oup) - LEAQ (2*32)(inp), inp - LEAQ (2*32)(oup), oup - SUBQ $64, inl - -openAVX2MainLoop: - CMPQ inl, $512 - JB openAVX2MainLoopDone - - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - -openAVX2InternalLoop: - // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications - // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext - polyAdd(0*8(inp)(itr1*1)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(inp)(itr1*1)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(inp)(itr1*1)) - LEAQ (6*8)(itr1), itr1 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - CMPQ itr1, $480 - JNE openAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - - // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(480(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - // and here - polyAdd(496(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - LEAQ (32*16)(oup), oup - SUBQ $(32*16), inl - JMP openAVX2MainLoop - -openAVX2MainLoopDone: - // Handle the various tail sizes efficiently - TESTQ inl, inl - JE openSSEFinalize - CMPQ inl, $128 - JBE openAVX2Tail128 - CMPQ inl, $256 - JBE openAVX2Tail256 - CMPQ inl, $384 - JBE openAVX2Tail384 - JMP openAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes -openAVX2192: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 - -openAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 - JNE openAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - -openAVX2ShortOpen: - // Hash - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - -openAVX2ShortOpenLoop: - CMPQ inl, $32 - JB openAVX2ShortTail32 - SUBQ $32, inl - - // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - polyAdd(2*8(inp)) - polyMulAVX2 - - // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - - // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 - JMP openAVX2ShortOpenLoop - -openAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 - JB openAVX2ShortDone - - SUBQ $16, inl - - // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - - // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 - -openAVX2ShortDone: - VZEROUPPER - JMP openSSETail16 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes -openAVX2320: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 - -openAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 - JNE openAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 - - // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 - JMP openAVX2ShortOpen - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext -openAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD1 - VMOVDQA DD1, DD0 - - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - TESTQ itr1, itr1 - JE openAVX2Tail128LoopB - -openAVX2Tail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMulAVX2 - -openAVX2Tail128LoopB: - ADDQ $16, itr2 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail128LoopA - CMPQ itr2, $160 - JNE openAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC1, CC1 - VPADDD DD0, DD1, DD1 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - -openAVX2TailLoop: - CMPQ inl, $32 - JB openAVX2Tail - SUBQ $32, inl - - // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - JMP openAVX2TailLoop - -openAVX2Tail: - CMPQ inl, $16 - VMOVDQA A0, A1 - JB openAVX2TailDone - SUBQ $16, inl - - // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 - -openAVX2TailDone: - VZEROUPPER - JMP openSSETail16 - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext -openAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare four blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 - - // Compute the number of iterations that will hash data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $128, itr1 - SHRQ $4, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - -openAVX2Tail256LoopA: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - - // Perform ChaCha rounds, while hashing the remaining input -openAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail256LoopA - - CMPQ itr2, $10 - JNE openAVX2Tail256LoopB - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - - // Hash the remainder of data (if any) -openAVX2Tail256Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail256HashEnd - polyAdd (0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail256Hash - -// Store 128 bytes safely, then go to store loop -openAVX2Tail256HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - - VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 - VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) - LEAQ (4*32)(inp), inp - LEAQ (4*32)(oup), oup - SUBQ $4*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext -openAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare six blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, ctr0StoreAVX2 - VMOVDQA DD1, ctr1StoreAVX2 - VMOVDQA DD2, ctr2StoreAVX2 - - // Compute the number of iterations that will hash two blocks of data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $256, itr1 - SHRQ $4, itr1 - ADDQ $6, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - - // Perform ChaCha rounds, while hashing the remaining input -openAVX2Tail384LoopB: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - -openAVX2Tail384LoopA: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - - CMPQ itr2, itr1 - JB openAVX2Tail384LoopB - - CMPQ itr2, $10 - JNE openAVX2Tail384LoopA - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - -openAVX2Tail384Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail384HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail384Hash - -// Store 256 bytes safely, then go to store loop -openAVX2Tail384HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - LEAQ (8*32)(inp), inp - LEAQ (8*32)(oup), oup - SUBQ $8*32, inl - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext -openAVX2Tail512: - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - MOVQ inp, itr2 - -openAVX2Tail512LoopB: - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ (2*8)(itr2), itr2 - -openAVX2Tail512LoopA: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(itr2)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(itr2)) - polyMulAVX2 - LEAQ (4*8)(itr2), itr2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - INCQ itr1 - CMPQ itr1, $4 - JLT openAVX2Tail512LoopB - - CMPQ itr1, $10 - JNE openAVX2Tail512LoopA - - MOVQ inl, itr1 - SUBQ $384, itr1 - ANDQ $-16, itr1 - -openAVX2Tail512HashLoop: - TESTQ itr1, itr1 - JE openAVX2Tail512HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - SUBQ $16, itr1 - JMP openAVX2Tail512HashLoop - -openAVX2Tail512HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - LEAQ (12*32)(inp), inp - LEAQ (12*32)(oup), oup - SUBQ $12*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Seal(dst, key, src, ad []byte) -TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 - // For aligned stack access - MOVQ SP, BP - ADDQ $32, BP - ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - CMPB ·useAVX2(SB), $1 - JE chacha20Poly1305Seal_AVX2 - - // Special optimization, for very short buffers - CMPQ inl, $128 - JBE sealSSE128 // About 15% faster - - // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - - // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - - // Load state, increment counter blocks - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - MOVQ $10, itr2 - -sealSSEIntroLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JNE sealSSEIntroLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - - // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore - MOVO B0, sStore - - // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) - - MOVQ $128, itr1 - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 - - CMPQ inl, $64 - JBE sealSSE128SealHash - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) - - ADDQ $64, itr1 - SUBQ $64, inl - LEAQ 64(inp), inp - - MOVQ $2, itr1 - MOVQ $8, itr2 - - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - CMPQ inl, $192 - JBE sealSSETail192 - -sealSSEMainLoop: - // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - -sealSSEInnerLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(oup)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(oup), oup - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JGE sealSSEInnerLoop - polyAdd(0(oup)) - polyMul - LEAQ (2*8)(oup), oup - DECQ itr1 - JG sealSSEInnerLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore - - // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVO tmpStore, D3 - - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - ADDQ $192, inp - MOVQ $192, itr1 - SUBQ $192, inl - MOVO A3, A1 - MOVO B3, B1 - MOVO C3, C1 - MOVO D3, D1 - CMPQ inl, $64 - JBE sealSSE128SealHash - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) - LEAQ 64(inp), inp - SUBQ $64, inl - MOVQ $6, itr1 - MOVQ $4, itr2 - CMPQ inl, $192 - JG sealSSEMainLoop - - MOVQ inl, itr1 - TESTQ inl, inl - JE sealSSE128SealHash - MOVQ $6, itr1 - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - JMP sealSSETail192 - -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of plaintext -sealSSETail64: - // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A1 - MOVO state1Store, B1 - MOVO state2Store, C1 - MOVO ctr3Store, D1 - PADDL ·sseIncMask<>(SB), D1 - MOVO D1, ctr0Store - -sealSSETail64LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealSSETail64LoopB: - chachaQR(A1, B1, C1, D1, T1) - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A1, B1, C1, D1, T1) - shiftB1Right; shiftC1Right; shiftD1Right - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - DECQ itr1 - JG sealSSETail64LoopA - - DECQ itr2 - JGE sealSSETail64LoopB - PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B1 - PADDL state2Store, C1 - PADDL ctr0Store, D1 - - JMP sealSSE128Seal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of plaintext -sealSSETail128: - // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - -sealSSETail128LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealSSETail128LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - DECQ itr1 - JG sealSSETail128LoopA - - DECQ itr2 - JGE sealSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr0Store, D0; PADDL ctr1Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - - MOVQ $64, itr1 - LEAQ 64(inp), inp - SUBQ $64, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of plaintext -sealSSETail192: - // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store - -sealSSETail192LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealSSETail192LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - DECQ itr1 - JG sealSSETail192LoopA - - DECQ itr2 - JGE sealSSETail192LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - MOVO A2, A1 - MOVO B2, B1 - MOVO C2, C1 - MOVO D2, D1 - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special seal optimization for buffers smaller than 129 bytes -sealSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 - -sealSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE sealSSE128InnerCipherLoop - - // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore - MOVOU B0, sStore - - // Hash - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 - -sealSSE128SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealSSE128Seal - polyAdd(0(oup)) - polyMul - - SUBQ $16, itr1 - ADDQ $16, oup - - JMP sealSSE128SealHash - -sealSSE128Seal: - CMPQ inl, $16 - JB sealSSETail - SUBQ $16, inl - - // Load for decryption - MOVOU (inp), T0 - PXOR T0, A1 - MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - - // Extract for hashing - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 - JMP sealSSE128Seal - -sealSSETail: - TESTQ inl, inl - JE sealSSEFinalize - - // We can only load the PT one byte at a time to avoid read after end of buffer - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVQ inl, itr1 - LEAQ -1(inp)(inl*1), inp - XORQ t2, t2 - XORQ t3, t3 - XORQ AX, AX - -sealSSETailLoadLoop: - SHLQ $8, t2, t3 - SHLQ $8, t2 - MOVB (inp), AX - XORQ AX, t2 - LEAQ -1(inp), inp - DECQ itr1 - JNE sealSSETailLoadLoop - MOVQ t2, 0+tmpStore - MOVQ t3, 8+tmpStore - PXOR 0+tmpStore, A1 - MOVOU A1, (oup) - MOVOU -16(t0)(itr2*1), T0 - PAND T0, A1 - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - ADDQ inl, oup - -sealSSEFinalize: - // Hash in the buffer lengths - ADDQ ad_len+80(FP), acc0 - ADCQ src_len+56(FP), acc1 - ADCQ $1, acc2 - polyMul - - // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 - - // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 - - // Finally store the tag at the end of the message - MOVQ acc0, (0*8)(oup) - MOVQ acc1, (1*8)(oup) - RET - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- -chacha20Poly1305Seal_AVX2: - VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 - - // Special optimizations, for very short buffers - CMPQ inl, $192 - JBE seal192AVX2 // 33% faster - CMPQ inl, $320 - JBE seal320AVX2 // 17% faster - - // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 - VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr2 - -sealAVX2IntroLoop: - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr2 - JNE sealAVX2IntroLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - - VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 - VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key - VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), DD0, DD0 - VMOVDQA DD0, rsStoreAVX2 - - // Hash AD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - // Can store at least 320 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), CC0, CC0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU CC0, (1*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 - VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 - VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) - - MOVQ $320, itr1 - SUBQ $320, inl - LEAQ 320(inp), inp - - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 - CMPQ inl, $128 - JBE sealAVX2SealHash - - VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 - VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVQ $8, itr1 - MOVQ $2, itr2 - - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - CMPQ inl, $512 - JBE sealAVX2Tail512 - - // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - - SUBQ $16, oup // Adjust the pointer - MOVQ $9, itr1 - JMP sealAVX2InternalLoopStart - -sealAVX2MainLoop: - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr1 - -sealAVX2InternalLoop: - polyAdd(0*8(oup)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - -sealAVX2InternalLoopStart: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(oup)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(oup)) - LEAQ (6*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr1 - JNE sealAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - - // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - // and here - polyAdd(-2*8(oup)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - SUBQ $(32*16), inl - CMPQ inl, $512 - JG sealAVX2MainLoop - - // Tail can only hash 480 bytes - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ 32(oup), oup - - MOVQ $10, itr1 - MOVQ $0, itr2 - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - JMP sealAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes -seal192AVX2: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 - -sealAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 - JNE sealAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - -sealAVX2ShortSeal: - // Hash aad - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 - -sealAVX2SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealAVX2ShortSealLoop - polyAdd(0(oup)) - polyMul - SUBQ $16, itr1 - ADDQ $16, oup - JMP sealAVX2SealHash - -sealAVX2ShortSealLoop: - CMPQ inl, $32 - JB sealAVX2ShortTail32 - SUBQ $32, inl - - // Load for encryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - - // Now can hash - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (1*32)(oup), oup - - // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 - JMP sealAVX2ShortSealLoop - -sealAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 - JB sealAVX2ShortDone - - SUBQ $16, inl - - // Load for encryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - - // Hash - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 - -sealAVX2ShortDone: - VZEROUPPER - JMP sealSSETail - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes -seal320AVX2: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 - -sealAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 - JNE sealAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 - - // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 - JMP sealAVX2ShortSeal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext -sealAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0 - VMOVDQA state1StoreAVX2, BB0 - VMOVDQA state2StoreAVX2, CC0 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VMOVDQA DD0, DD1 - -sealAVX2Tail128LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail128LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $4, DD0, DD0, DD0 - DECQ itr1 - JG sealAVX2Tail128LoopA - DECQ itr2 - JGE sealAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA1 - VPADDD state1StoreAVX2, BB0, BB1 - VPADDD state2StoreAVX2, CC0, CC1 - VPADDD DD1, DD0, DD1 - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - JMP sealAVX2ShortSealLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext -sealAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 - -sealAVX2Tail256LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr1 - JG sealAVX2Tail256LoopA - DECQ itr2 - JGE sealAVX2Tail256LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext -sealAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 - -sealAVX2Tail384LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail384LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr1 - JG sealAVX2Tail384LoopA - DECQ itr2 - JGE sealAVX2Tail384LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0 - VPERM2I128 $0x02, CC1, DD1, TT1 - VPERM2I128 $0x13, AA1, BB1, TT2 - VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - MOVQ $256, itr1 - LEAQ 256(inp), inp - SUBQ $256, inl - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext -sealAVX2Tail512: - // Need to decrypt up to 512 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - -sealAVX2Tail512LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail512LoopB: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(oup)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - - DECQ itr1 - JG sealAVX2Tail512LoopA - DECQ itr2 - JGE sealAVX2Tail512LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3 - VPXOR (0*32)(inp), CC3, CC3 - VMOVDQU CC3, (0*32)(oup) - VPERM2I128 $0x02, CC0, DD0, CC3 - VPXOR (1*32)(inp), CC3, CC3 - VMOVDQU CC3, (1*32)(oup) - VPERM2I128 $0x13, AA0, BB0, CC3 - VPXOR (2*32)(inp), CC3, CC3 - VMOVDQU CC3, (2*32)(oup) - VPERM2I128 $0x13, CC0, DD0, CC3 - VPXOR (3*32)(inp), CC3, CC3 - VMOVDQU CC3, (3*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - - MOVQ $384, itr1 - LEAQ 384(inp), inp - SUBQ $384, inl - VPERM2I128 $0x02, AA3, BB3, AA0 - VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 - VPERM2I128 $0x13, AA3, BB3, CC0 - VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go deleted file mode 100644 index 6313898f0a..0000000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chacha20poly1305 - -import ( - "encoding/binary" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/internal/alias" - "golang.org/x/crypto/internal/poly1305" -) - -func writeWithPadding(p *poly1305.MAC, b []byte) { - p.Write(b) - if rem := len(b) % 16; rem != 0 { - var buf [16]byte - padLen := 16 - rem - p.Write(buf[:padLen]) - } -} - -func writeUint64(p *poly1305.MAC, n int) { - var buf [8]byte - binary.LittleEndian.PutUint64(buf[:], uint64(n)) - p.Write(buf[:]) -} - -func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { - ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) - ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] - if alias.InexactOverlap(out, plaintext) { - panic("chacha20poly1305: invalid buffer overlap") - } - - var polyKey [32]byte - s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) - s.XORKeyStream(polyKey[:], polyKey[:]) - s.SetCounter(1) // set the counter to 1, skipping 32 bytes - s.XORKeyStream(ciphertext, plaintext) - - p := poly1305.New(&polyKey) - writeWithPadding(p, additionalData) - writeWithPadding(p, ciphertext) - writeUint64(p, len(additionalData)) - writeUint64(p, len(plaintext)) - p.Sum(tag[:0]) - - return ret -} - -func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - tag := ciphertext[len(ciphertext)-16:] - ciphertext = ciphertext[:len(ciphertext)-16] - - var polyKey [32]byte - s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) - s.XORKeyStream(polyKey[:], polyKey[:]) - s.SetCounter(1) // set the counter to 1, skipping 32 bytes - - p := poly1305.New(&polyKey) - writeWithPadding(p, additionalData) - writeWithPadding(p, ciphertext) - writeUint64(p, len(additionalData)) - writeUint64(p, len(ciphertext)) - - ret, out := sliceForAppend(dst, len(ciphertext)) - if alias.InexactOverlap(out, ciphertext) { - panic("chacha20poly1305: invalid buffer overlap") - } - if !p.Verify(tag) { - for i := range out { - out[i] = 0 - } - return nil, errOpen - } - - s.XORKeyStream(out, ciphertext) - return ret, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go deleted file mode 100644 index 34e6ab1df8..0000000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego - -package chacha20poly1305 - -func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { - return c.sealGeneric(dst, nonce, plaintext, additionalData) -} - -func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - return c.openGeneric(dst, nonce, ciphertext, additionalData) -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go deleted file mode 100644 index 1cebfe946f..0000000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chacha20poly1305 - -import ( - "crypto/cipher" - "errors" - - "golang.org/x/crypto/chacha20" -) - -type xchacha20poly1305 struct { - key [KeySize]byte -} - -// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. -// -// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, -// suitable to be generated randomly without risk of collisions. It should be -// preferred when nonce uniqueness cannot be trivially ensured, or whenever -// nonces are randomly generated. -func NewX(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20poly1305: bad key length") - } - ret := new(xchacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (*xchacha20poly1305) NonceSize() int { - return NonceSizeX -} - -func (*xchacha20poly1305) Overhead() int { - return Overhead -} - -func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSizeX { - panic("chacha20poly1305: bad nonce length passed to Seal") - } - - // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no - // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, - // the second half of the counter is not available. This is unlikely to be - // an issue because the cipher.AEAD API requires the entire message to be in - // memory, and the counter overflows at 256 GB. - if uint64(len(plaintext)) > (1<<38)-64 { - panic("chacha20poly1305: plaintext too large") - } - - c := new(chacha20poly1305) - hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) - copy(c.key[:], hKey) - - // The first 4 bytes of the final nonce are unused counter space. - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - - return c.seal(dst, cNonce[:], plaintext, additionalData) -} - -func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSizeX { - panic("chacha20poly1305: bad nonce length passed to Open") - } - if len(ciphertext) < 16 { - return nil, errOpen - } - if uint64(len(ciphertext)) > (1<<38)-48 { - panic("chacha20poly1305: ciphertext too large") - } - - c := new(chacha20poly1305) - hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) - copy(c.key[:], hKey) - - // The first 4 bytes of the final nonce are unused counter space. - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - - return c.open(dst, cNonce[:], ciphertext, additionalData) -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go deleted file mode 100644 index 2492f796af..0000000000 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - encoding_asn1 "encoding/asn1" - "fmt" - "math/big" - "reflect" - "time" - - "golang.org/x/crypto/cryptobyte/asn1" -) - -// This file contains ASN.1-related methods for String and Builder. - -// Builder - -// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Int64(v int64) { - b.addASN1Signed(asn1.INTEGER, v) -} - -// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the -// given tag. -func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { - b.addASN1Signed(tag, v) -} - -// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. -func (b *Builder) AddASN1Enum(v int64) { - b.addASN1Signed(asn1.ENUM, v) -} - -func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { - b.AddASN1(tag, func(c *Builder) { - length := 1 - for i := v; i >= 0x80 || i < -0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Uint64(v uint64) { - b.AddASN1(asn1.INTEGER, func(c *Builder) { - length := 1 - for i := v; i >= 0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1BigInt(n *big.Int) { - if b.err != nil { - return - } - - b.AddASN1(asn1.INTEGER, func(c *Builder) { - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement form. So we - // invert and subtract 1. If the most-significant-bit isn't set then - // we'll need to pad the beginning with 0xff in order to keep the number - // negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - c.add(0xff) - } - c.add(bytes...) - } else if n.Sign() == 0 { - c.add(0) - } else { - bytes := n.Bytes() - if bytes[0]&0x80 != 0 { - c.add(0) - } - c.add(bytes...) - } - }) -} - -// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. -func (b *Builder) AddASN1OctetString(bytes []byte) { - b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { - c.AddBytes(bytes) - }) -} - -const generalizedTimeFormatStr = "20060102150405Z0700" - -// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. -func (b *Builder) AddASN1GeneralizedTime(t time.Time) { - if t.Year() < 0 || t.Year() > 9999 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) - return - } - b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { - c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) - }) -} - -// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. -func (b *Builder) AddASN1UTCTime(t time.Time) { - b.AddASN1(asn1.UTCTime, func(c *Builder) { - // As utilized by the X.509 profile, UTCTime can only - // represent the years 1950 through 2049. - if t.Year() < 1950 || t.Year() >= 2050 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) - return - } - c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) - }) -} - -// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not -// support BIT STRINGs that are not a whole number of bytes. -func (b *Builder) AddASN1BitString(data []byte) { - b.AddASN1(asn1.BIT_STRING, func(b *Builder) { - b.AddUint8(0) - b.AddBytes(data) - }) -} - -func (b *Builder) addBase128Int(n int64) { - var length int - if n == 0 { - length = 1 - } else { - for i := n; i > 0; i >>= 7 { - length++ - } - } - - for i := length - 1; i >= 0; i-- { - o := byte(n >> uint(i*7)) - o &= 0x7f - if i != 0 { - o |= 0x80 - } - - b.add(o) - } -} - -func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { - if len(oid) < 2 { - return false - } - - if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { - return false - } - - for _, v := range oid { - if v < 0 { - return false - } - } - - return true -} - -func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { - b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { - if !isValidOID(oid) { - b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) - return - } - - b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) - for _, v := range oid[2:] { - b.addBase128Int(int64(v)) - } - }) -} - -func (b *Builder) AddASN1Boolean(v bool) { - b.AddASN1(asn1.BOOLEAN, func(b *Builder) { - if v { - b.AddUint8(0xff) - } else { - b.AddUint8(0) - } - }) -} - -func (b *Builder) AddASN1NULL() { - b.add(uint8(asn1.NULL), 0) -} - -// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if -// successful or records an error if one occurred. -func (b *Builder) MarshalASN1(v interface{}) { - // NOTE(martinkr): This is somewhat of a hack to allow propagation of - // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a - // value embedded into a struct, its tag information is lost. - if b.err != nil { - return - } - bytes, err := encoding_asn1.Marshal(v) - if err != nil { - b.err = err - return - } - b.AddBytes(bytes) -} - -// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. -// Tags greater than 30 are not supported and result in an error (i.e. -// low-tag-number form only). The child builder passed to the -// BuilderContinuation can be used to build the content of the ASN.1 object. -func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { - if b.err != nil { - return - } - // Identifiers with the low five bits set indicate high-tag-number format - // (two or more octets), which we don't support. - if tag&0x1f == 0x1f { - b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) - return - } - b.AddUint8(uint8(tag)) - b.addLengthPrefixed(1, true, f) -} - -// String - -// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean -// representation into out and advances. It reports whether the read -// was successful. -func (s *String) ReadASN1Boolean(out *bool) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { - return false - } - - switch bytes[0] { - case 0: - *out = false - case 0xff: - *out = true - default: - return false - } - - return true -} - -// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does -// not point to an integer, to a big.Int, or to a []byte it panics. Only -// positive and zero values can be decoded into []byte, and they are returned as -// big-endian binary values that share memory with s. Positive values will have -// no leading zeroes, and zero will be returned as a single zero byte. -// ReadASN1Integer reports whether the read was successful. -func (s *String) ReadASN1Integer(out interface{}) bool { - switch out := out.(type) { - case *int, *int8, *int16, *int32, *int64: - var i int64 - if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { - return false - } - reflect.ValueOf(out).Elem().SetInt(i) - return true - case *uint, *uint8, *uint16, *uint32, *uint64: - var u uint64 - if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { - return false - } - reflect.ValueOf(out).Elem().SetUint(u) - return true - case *big.Int: - return s.readASN1BigInt(out) - case *[]byte: - return s.readASN1Bytes(out) - default: - panic("out does not point to an integer type") - } -} - -func checkASN1Integer(bytes []byte) bool { - if len(bytes) == 0 { - // An INTEGER is encoded with at least one octet. - return false - } - if len(bytes) == 1 { - return true - } - if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { - // Value is not minimally encoded. - return false - } - return true -} - -var bigOne = big.NewInt(1) - -func (s *String) readASN1BigInt(out *big.Int) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - // Negative number. - neg := make([]byte, len(bytes)) - for i, b := range bytes { - neg[i] = ^b - } - out.SetBytes(neg) - out.Add(out, bigOne) - out.Neg(out) - } else { - out.SetBytes(bytes) - } - return true -} - -func (s *String) readASN1Bytes(out *[]byte) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - return false - } - for len(bytes) > 1 && bytes[0] == 0 { - bytes = bytes[1:] - } - *out = bytes - return true -} - -func (s *String) readASN1Int64(out *int64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { - return false - } - return true -} - -func asn1Signed(out *int64, n []byte) bool { - length := len(n) - if length > 8 { - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= int64(n[i]) - } - // Shift up and down in order to sign extend the result. - *out <<= 64 - uint8(length)*8 - *out >>= 64 - uint8(length)*8 - return true -} - -func (s *String) readASN1Uint64(out *uint64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { - return false - } - return true -} - -func asn1Unsigned(out *uint64, n []byte) bool { - length := len(n) - if length > 9 || length == 9 && n[0] != 0 { - // Too large for uint64. - return false - } - if n[0]&0x80 != 0 { - // Negative number. - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= uint64(n[i]) - } - return true -} - -// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out -// and advances. It reports whether the read was successful and resulted in a -// value that can be represented in an int64. -func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { - var bytes String - return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) -} - -// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports -// whether the read was successful. -func (s *String) ReadASN1Enum(out *int) bool { - var bytes String - var i int64 - if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { - return false - } - if int64(int(i)) != i { - return false - } - *out = int(i) - return true -} - -func (s *String) readBase128Int(out *int) bool { - ret := 0 - for i := 0; len(*s) > 0; i++ { - if i == 5 { - return false - } - // Avoid overflowing int on a 32-bit platform. - // We don't want different behavior based on the architecture. - if ret >= 1<<(31-7) { - return false - } - ret <<= 7 - b := s.read(1)[0] - - // ITU-T X.690, section 8.19.2: - // The subidentifier shall be encoded in the fewest possible octets, - // that is, the leading octet of the subidentifier shall not have the value 0x80. - if i == 0 && b == 0x80 { - return false - } - - ret |= int(b & 0x7f) - if b&0x80 == 0 { - *out = ret - return true - } - } - return false // truncated -} - -// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { - return false - } - - // In the worst case, we get two elements from the first byte (which is - // encoded differently) and then every varint is a single byte long. - components := make([]int, len(bytes)+1) - - // The first varint is 40*value1 + value2: - // According to this packing, value1 can take the values 0, 1 and 2 only. - // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, - // then there are no restrictions on value2. - var v int - if !bytes.readBase128Int(&v) { - return false - } - if v < 80 { - components[0] = v / 40 - components[1] = v % 40 - } else { - components[0] = 2 - components[1] = v - 80 - } - - i := 2 - for ; len(bytes) > 0; i++ { - if !bytes.readBase128Int(&v) { - return false - } - components[i] = v - } - *out = components[:i] - return true -} - -// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { - return false - } - t := string(bytes) - res, err := time.Parse(generalizedTimeFormatStr, t) - if err != nil { - return false - } - if serialized := res.Format(generalizedTimeFormatStr); serialized != t { - return false - } - *out = res - return true -} - -const defaultUTCTimeFormatStr = "060102150405Z0700" - -// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1UTCTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.UTCTime) { - return false - } - t := string(bytes) - - formatStr := defaultUTCTimeFormatStr - var err error - res, err := time.Parse(formatStr, t) - if err != nil { - // Fallback to minute precision if we can't parse second - // precision. If we are following X.509 or X.690 we shouldn't - // support this, but we do. - formatStr = "0601021504Z0700" - res, err = time.Parse(formatStr, t) - } - if err != nil { - return false - } - - if serialized := res.Format(formatStr); serialized != t { - return false - } - - if res.Year() >= 2050 { - // UTCTime interprets the low order digits 50-99 as 1950-99. - // This only applies to its use in the X.509 profile. - // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 - res = res.AddDate(-100, 0, 0) - } - *out = res - return true -} - -// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || - len(bytes)*8/8 != len(bytes) { - return false - } - - paddingBits := bytes[0] - bytes = bytes[1:] - if paddingBits > 7 || - len(bytes) == 0 && paddingBits != 0 || - len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { - return false - } - - lenBytes := String((*s)[2 : 2+lenLen]) - if !lenBytes.readUnsigned(&len32, int(lenLen)) { - return false - } - - // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - // with the minimum number of octets. - if len32 < 128 { - // Length should have used short-form encoding. - return false - } - if len32>>((lenLen-1)*8) == 0 { - // Leading octet is 0. Length should have been at least one byte shorter. - return false - } - - headerLen = 2 + uint32(lenLen) - if headerLen+len32 < len32 { - // Overflow. - return false - } - length = headerLen + len32 - } - - if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { - return false - } - if skipHeader && !out.Skip(int(headerLen)) { - panic("cryptobyte: internal error") - } - - return true -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go deleted file mode 100644 index cda8e3edfd..0000000000 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package asn1 contains supporting types for parsing and building ASN.1 -// messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" - -// Tag represents an ASN.1 identifier octet, consisting of a tag number -// (indicating a type) and class (such as context-specific or constructed). -// -// Methods in the cryptobyte package only support the low-tag-number form, i.e. -// a single identifier octet with bits 7-8 encoding the class and bits 1-6 -// encoding the tag number. -type Tag uint8 - -const ( - classConstructed = 0x20 - classContextSpecific = 0x80 -) - -// Constructed returns t with the constructed class bit set. -func (t Tag) Constructed() Tag { return t | classConstructed } - -// ContextSpecific returns t with the context-specific class bit set. -func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } - -// The following is a list of standard tag and class combinations. -const ( - BOOLEAN = Tag(1) - INTEGER = Tag(2) - BIT_STRING = Tag(3) - OCTET_STRING = Tag(4) - NULL = Tag(5) - OBJECT_IDENTIFIER = Tag(6) - ENUM = Tag(10) - UTF8String = Tag(12) - SEQUENCE = Tag(16 | classConstructed) - SET = Tag(17 | classConstructed) - PrintableString = Tag(19) - T61String = Tag(20) - IA5String = Tag(22) - UTCTime = Tag(23) - GeneralizedTime = Tag(24) - GeneralString = Tag(27) -) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go deleted file mode 100644 index cf254f5f1e..0000000000 --- a/vendor/golang.org/x/crypto/cryptobyte/builder.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - "errors" - "fmt" -) - -// A Builder builds byte strings from fixed-length and length-prefixed values. -// Builders either allocate space as needed, or are ‘fixed’, which means that -// they write into a given buffer and produce an error if it's exhausted. -// -// The zero value is a usable Builder that allocates space as needed. -// -// Simple values are marshaled and appended to a Builder using methods on the -// Builder. Length-prefixed values are marshaled by providing a -// BuilderContinuation, which is a function that writes the inner contents of -// the value to a given Builder. See the documentation for BuilderContinuation -// for details. -type Builder struct { - err error - result []byte - fixedSize bool - child *Builder - offset int - pendingLenLen int - pendingIsASN1 bool - inContinuation *bool -} - -// NewBuilder creates a Builder that appends its output to the given buffer. -// Like append(), the slice will be reallocated if its capacity is exceeded. -// Use Bytes to get the final buffer. -func NewBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - } -} - -// NewFixedBuilder creates a Builder that appends its output into the given -// buffer. This builder does not reallocate the output buffer. Writes that -// would exceed the buffer's capacity are treated as an error. -func NewFixedBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - fixedSize: true, - } -} - -// SetError sets the value to be returned as the error from Bytes. Writes -// performed after calling SetError are ignored. -func (b *Builder) SetError(err error) { - b.err = err -} - -// Bytes returns the bytes written by the builder or an error if one has -// occurred during building. -func (b *Builder) Bytes() ([]byte, error) { - if b.err != nil { - return nil, b.err - } - return b.result[b.offset:], nil -} - -// BytesOrPanic returns the bytes written by the builder or panics if an error -// has occurred during building. -func (b *Builder) BytesOrPanic() []byte { - if b.err != nil { - panic(b.err) - } - return b.result[b.offset:] -} - -// AddUint8 appends an 8-bit value to the byte string. -func (b *Builder) AddUint8(v uint8) { - b.add(byte(v)) -} - -// AddUint16 appends a big-endian, 16-bit value to the byte string. -func (b *Builder) AddUint16(v uint16) { - b.add(byte(v>>8), byte(v)) -} - -// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest -// byte of the 32-bit input value is silently truncated. -func (b *Builder) AddUint24(v uint32) { - b.add(byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint32 appends a big-endian, 32-bit value to the byte string. -func (b *Builder) AddUint32(v uint32) { - b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint48 appends a big-endian, 48-bit value to the byte string. -func (b *Builder) AddUint48(v uint64) { - b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint64 appends a big-endian, 64-bit value to the byte string. -func (b *Builder) AddUint64(v uint64) { - b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddBytes appends a sequence of bytes to the byte string. -func (b *Builder) AddBytes(v []byte) { - b.add(v...) -} - -// BuilderContinuation is a continuation-passing interface for building -// length-prefixed byte sequences. Builder methods for length-prefixed -// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation -// supplied to them. The child builder passed to the continuation can be used -// to build the content of the length-prefixed sequence. For example: -// -// parent := cryptobyte.NewBuilder() -// parent.AddUint8LengthPrefixed(func (child *Builder) { -// child.AddUint8(42) -// child.AddUint8LengthPrefixed(func (grandchild *Builder) { -// grandchild.AddUint8(5) -// }) -// }) -// -// It is an error to write more bytes to the child than allowed by the reserved -// length prefix. After the continuation returns, the child must be considered -// invalid, i.e. users must not store any copies or references of the child -// that outlive the continuation. -// -// If the continuation panics with a value of type BuildError then the inner -// error will be returned as the error from Bytes. If the child panics -// otherwise then Bytes will repanic with the same value. -type BuilderContinuation func(child *Builder) - -// BuildError wraps an error. If a BuilderContinuation panics with this value, -// the panic will be recovered and the inner error will be returned from -// Builder.Bytes. -type BuildError struct { - Err error -} - -// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. -func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(1, false, f) -} - -// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. -func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(2, false, f) -} - -// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. -func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(3, false, f) -} - -// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. -func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(4, false, f) -} - -func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { - if !*b.inContinuation { - *b.inContinuation = true - - defer func() { - *b.inContinuation = false - - r := recover() - if r == nil { - return - } - - if buildError, ok := r.(BuildError); ok { - b.err = buildError.Err - } else { - panic(r) - } - }() - } - - f(arg) -} - -func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { - // Subsequent writes can be ignored if the builder has encountered an error. - if b.err != nil { - return - } - - offset := len(b.result) - b.add(make([]byte, lenLen)...) - - if b.inContinuation == nil { - b.inContinuation = new(bool) - } - - b.child = &Builder{ - result: b.result, - fixedSize: b.fixedSize, - offset: offset, - pendingLenLen: lenLen, - pendingIsASN1: isASN1, - inContinuation: b.inContinuation, - } - - b.callContinuation(f, b.child) - b.flushChild() - if b.child != nil { - panic("cryptobyte: internal error") - } -} - -func (b *Builder) flushChild() { - if b.child == nil { - return - } - b.child.flushChild() - child := b.child - b.child = nil - - if child.err != nil { - b.err = child.err - return - } - - length := len(child.result) - child.pendingLenLen - child.offset - - if length < 0 { - panic("cryptobyte: internal error") // result unexpectedly shrunk - } - - if child.pendingIsASN1 { - // For ASN.1, we reserved a single byte for the length. If that turned out - // to be incorrect, we have to move the contents along in order to make - // space. - if child.pendingLenLen != 1 { - panic("cryptobyte: internal error") - } - var lenLen, lenByte uint8 - if int64(length) > 0xfffffffe { - b.err = errors.New("pending ASN.1 child too long") - return - } else if length > 0xffffff { - lenLen = 5 - lenByte = 0x80 | 4 - } else if length > 0xffff { - lenLen = 4 - lenByte = 0x80 | 3 - } else if length > 0xff { - lenLen = 3 - lenByte = 0x80 | 2 - } else if length > 0x7f { - lenLen = 2 - lenByte = 0x80 | 1 - } else { - lenLen = 1 - lenByte = uint8(length) - length = 0 - } - - // Insert the initial length byte, make space for successive length bytes, - // and adjust the offset. - child.result[child.offset] = lenByte - extraBytes := int(lenLen - 1) - if extraBytes != 0 { - child.add(make([]byte, extraBytes)...) - childStart := child.offset + child.pendingLenLen - copy(child.result[childStart+extraBytes:], child.result[childStart:]) - } - child.offset++ - child.pendingLenLen = extraBytes - } - - l := length - for i := child.pendingLenLen - 1; i >= 0; i-- { - child.result[child.offset+i] = uint8(l) - l >>= 8 - } - if l != 0 { - b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) - return - } - - if b.fixedSize && &b.result[0] != &child.result[0] { - panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") - } - - b.result = child.result -} - -func (b *Builder) add(bytes ...byte) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted write while child is pending") - } - if len(b.result)+len(bytes) < len(bytes) { - b.err = errors.New("cryptobyte: length overflow") - } - if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { - b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") - return - } - b.result = append(b.result, bytes...) -} - -// Unwrite rolls back non-negative n bytes written directly to the Builder. -// An attempt by a child builder passed to a continuation to unwrite bytes -// from its parent will panic. -func (b *Builder) Unwrite(n int) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted unwrite while child is pending") - } - length := len(b.result) - b.pendingLenLen - b.offset - if length < 0 { - panic("cryptobyte: internal error") - } - if n < 0 { - panic("cryptobyte: attempted to unwrite negative number of bytes") - } - if n > length { - panic("cryptobyte: attempted to unwrite more than was written") - } - b.result = b.result[:len(b.result)-n] -} - -// A MarshalingValue marshals itself into a Builder. -type MarshalingValue interface { - // Marshal is called by Builder.AddValue. It receives a pointer to a builder - // to marshal itself into. It may return an error that occurred during - // marshaling, such as unset or invalid values. - Marshal(b *Builder) error -} - -// AddValue calls Marshal on v, passing a pointer to the builder to append to. -// If Marshal returns an error, it is set on the Builder so that subsequent -// appends don't have an effect. -func (b *Builder) AddValue(v MarshalingValue) { - err := v.Marshal(b) - if err != nil { - b.err = err - } -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go deleted file mode 100644 index 10692a8a31..0000000000 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cryptobyte contains types that help with parsing and constructing -// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage -// contains useful ASN.1 constants.) -// -// The String type is for parsing. It wraps a []byte slice and provides helper -// functions for consuming structures, value by value. -// -// The Builder type is for constructing messages. It providers helper functions -// for appending values and also for appending length-prefixed submessages – -// without having to worry about calculating the length prefix ahead of time. -// -// See the documentation and examples for the Builder and String types to get -// started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" - -// String represents a string of bytes. It provides methods for parsing -// fixed-length and length-prefixed values from it. -type String []byte - -// read advances a String by n bytes and returns them. If less than n bytes -// remain, it returns nil. -func (s *String) read(n int) []byte { - if len(*s) < n || n < 0 { - return nil - } - v := (*s)[:n] - *s = (*s)[n:] - return v -} - -// Skip advances the String by n byte and reports whether it was successful. -func (s *String) Skip(n int) bool { - return s.read(n) != nil -} - -// ReadUint8 decodes an 8-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint8(out *uint8) bool { - v := s.read(1) - if v == nil { - return false - } - *out = uint8(v[0]) - return true -} - -// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint16(out *uint16) bool { - v := s.read(2) - if v == nil { - return false - } - *out = uint16(v[0])<<8 | uint16(v[1]) - return true -} - -// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint24(out *uint32) bool { - v := s.read(3) - if v == nil { - return false - } - *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) - return true -} - -// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint32(out *uint32) bool { - v := s.read(4) - if v == nil { - return false - } - *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) - return true -} - -// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint48(out *uint64) bool { - v := s.read(6) - if v == nil { - return false - } - *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) - return true -} - -// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint64(out *uint64) bool { - v := s.read(8) - if v == nil { - return false - } - *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) - return true -} - -func (s *String) readUnsigned(out *uint32, length int) bool { - v := s.read(length) - if v == nil { - return false - } - var result uint32 - for i := 0; i < length; i++ { - result <<= 8 - result |= uint32(v[i]) - } - *out = result - return true -} - -func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { - lenBytes := s.read(lenLen) - if lenBytes == nil { - return false - } - var length uint32 - for _, b := range lenBytes { - length = length << 8 - length = length | uint32(b) - } - v := s.read(int(length)) - if v == nil { - return false - } - *outChild = v - return true -} - -// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value -// into out and advances over it. It reports whether the read was successful. -func (s *String) ReadUint8LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(1, out) -} - -// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit -// length-prefixed value into out and advances over it. It reports whether the -// read was successful. -func (s *String) ReadUint16LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(2, out) -} - -// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit -// length-prefixed value into out and advances over it. It reports whether -// the read was successful. -func (s *String) ReadUint24LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(3, out) -} - -// ReadBytes reads n bytes into out and advances over them. It reports -// whether the read was successful. -func (s *String) ReadBytes(out *[]byte, n int) bool { - v := s.read(n) - if v == nil { - return false - } - *out = v - return true -} - -// CopyBytes copies len(out) bytes into out and advances over them. It reports -// whether the copy operation was successful -func (s *String) CopyBytes(out []byte) bool { - n := len(out) - v := s.read(n) - if v == nil { - return false - } - return copy(out, v) == n -} - -// Empty reports whether the string does not contain any bytes. -func (s String) Empty() bool { - return len(s) == 0 -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5deeb9..b3c1699bff 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index 8907183ec0..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if bytes.HasPrefix(line, armorEnd) { - l.eof = true - return 0, io.EOF - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask { - return 0, ArmorCorrupt - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 5b6e16c19d..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389f1..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index 1d7a0ea05a..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index d62f787e9d..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - identity.SelfSignature = sig - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index 353f945247..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriteCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc963..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 6d7639722c..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 1713503395..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 0a19794a8e..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - "math/bits" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte - sentFirst bool - buf []byte -} - -// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. -const minFirstPartialWrite = 512 - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - off := 0 - if !w.sentFirst { - if len(w.buf) > 0 || len(p) < minFirstPartialWrite { - off = len(w.buf) - w.buf = append(w.buf, p...) - if len(w.buf) < minFirstPartialWrite { - return len(p), nil - } - p = w.buf - w.buf = nil - } - w.sentFirst = true - } - - power := uint8(30) - for len(p) > 0 { - l := 1 << power - if len(p) < l { - power = uint8(bits.Len32(uint32(len(p)))) - 1 - l = 1 << power - } - w.lengthByte[0] = 224 + power - _, err = w.w.Write(w.lengthByte[:]) - if err == nil { - var m int - m, err = w.w.Write(p[:l]) - n += m - } - if err != nil { - if n < off { - return 0, err - } - return n - off, err - } - p = p[l:] - } - return n - off, nil -} - -func (w *partialLengthWriter) Close() error { - if len(w.buf) > 0 { - // In this case we can't send a 512 byte packet. - // Just send what we have. - p := w.buf - w.sentFirst = true - w.buf = nil - if _, err := w.Write(p); err != nil { - return err - } - } - - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 192aac376d..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = io.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f52519..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6cfd..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c613e..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index b2a24a5323..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff88934..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c42..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 1a1a62964f..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 48a8931468..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index f53244a1c7..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index b89d48b81d..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - return writeAndSign(payload, candidateHashes, signed, hints, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[len(candidateHashes)-1:] - preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go deleted file mode 100644 index c971a99fa6..0000000000 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scrypt implements the scrypt key derivation function as defined in -// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard -// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" - -import ( - "crypto/sha256" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/pbkdf2" -) - -const maxInt = int(^uint(0) >> 1) - -// blockCopy copies n numbers from src into dst. -func blockCopy(dst, src []uint32, n int) { - copy(dst, src[:n]) -} - -// blockXOR XORs numbers from dst with n numbers from src. -func blockXOR(dst, src []uint32, n int) { - for i, v := range src[:n] { - dst[i] ^= v - } -} - -// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, -// and puts the result into both tmp and out. -func salsaXOR(tmp *[16]uint32, in, out []uint32) { - w0 := tmp[0] ^ in[0] - w1 := tmp[1] ^ in[1] - w2 := tmp[2] ^ in[2] - w3 := tmp[3] ^ in[3] - w4 := tmp[4] ^ in[4] - w5 := tmp[5] ^ in[5] - w6 := tmp[6] ^ in[6] - w7 := tmp[7] ^ in[7] - w8 := tmp[8] ^ in[8] - w9 := tmp[9] ^ in[9] - w10 := tmp[10] ^ in[10] - w11 := tmp[11] ^ in[11] - w12 := tmp[12] ^ in[12] - w13 := tmp[13] ^ in[13] - w14 := tmp[14] ^ in[14] - w15 := tmp[15] ^ in[15] - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 - x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 - - for i := 0; i < 8; i += 2 { - x4 ^= bits.RotateLeft32(x0+x12, 7) - x8 ^= bits.RotateLeft32(x4+x0, 9) - x12 ^= bits.RotateLeft32(x8+x4, 13) - x0 ^= bits.RotateLeft32(x12+x8, 18) - - x9 ^= bits.RotateLeft32(x5+x1, 7) - x13 ^= bits.RotateLeft32(x9+x5, 9) - x1 ^= bits.RotateLeft32(x13+x9, 13) - x5 ^= bits.RotateLeft32(x1+x13, 18) - - x14 ^= bits.RotateLeft32(x10+x6, 7) - x2 ^= bits.RotateLeft32(x14+x10, 9) - x6 ^= bits.RotateLeft32(x2+x14, 13) - x10 ^= bits.RotateLeft32(x6+x2, 18) - - x3 ^= bits.RotateLeft32(x15+x11, 7) - x7 ^= bits.RotateLeft32(x3+x15, 9) - x11 ^= bits.RotateLeft32(x7+x3, 13) - x15 ^= bits.RotateLeft32(x11+x7, 18) - - x1 ^= bits.RotateLeft32(x0+x3, 7) - x2 ^= bits.RotateLeft32(x1+x0, 9) - x3 ^= bits.RotateLeft32(x2+x1, 13) - x0 ^= bits.RotateLeft32(x3+x2, 18) - - x6 ^= bits.RotateLeft32(x5+x4, 7) - x7 ^= bits.RotateLeft32(x6+x5, 9) - x4 ^= bits.RotateLeft32(x7+x6, 13) - x5 ^= bits.RotateLeft32(x4+x7, 18) - - x11 ^= bits.RotateLeft32(x10+x9, 7) - x8 ^= bits.RotateLeft32(x11+x10, 9) - x9 ^= bits.RotateLeft32(x8+x11, 13) - x10 ^= bits.RotateLeft32(x9+x8, 18) - - x12 ^= bits.RotateLeft32(x15+x14, 7) - x13 ^= bits.RotateLeft32(x12+x15, 9) - x14 ^= bits.RotateLeft32(x13+x12, 13) - x15 ^= bits.RotateLeft32(x14+x13, 18) - } - x0 += w0 - x1 += w1 - x2 += w2 - x3 += w3 - x4 += w4 - x5 += w5 - x6 += w6 - x7 += w7 - x8 += w8 - x9 += w9 - x10 += w10 - x11 += w11 - x12 += w12 - x13 += w13 - x14 += w14 - x15 += w15 - - out[0], tmp[0] = x0, x0 - out[1], tmp[1] = x1, x1 - out[2], tmp[2] = x2, x2 - out[3], tmp[3] = x3, x3 - out[4], tmp[4] = x4, x4 - out[5], tmp[5] = x5, x5 - out[6], tmp[6] = x6, x6 - out[7], tmp[7] = x7, x7 - out[8], tmp[8] = x8, x8 - out[9], tmp[9] = x9, x9 - out[10], tmp[10] = x10, x10 - out[11], tmp[11] = x11, x11 - out[12], tmp[12] = x12, x12 - out[13], tmp[13] = x13, x13 - out[14], tmp[14] = x14, x14 - out[15], tmp[15] = x15, x15 -} - -func blockMix(tmp *[16]uint32, in, out []uint32, r int) { - blockCopy(tmp[:], in[(2*r-1)*16:], 16) - for i := 0; i < 2*r; i += 2 { - salsaXOR(tmp, in[i*16:], out[i*8:]) - salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) - } -} - -func integer(b []uint32, r int) uint64 { - j := (2*r - 1) * 16 - return uint64(b[j]) | uint64(b[j+1])<<32 -} - -func smix(b []byte, r, N int, v, xy []uint32) { - var tmp [16]uint32 - R := 32 * r - x := xy - y := xy[R:] - - j := 0 - for i := 0; i < R; i++ { - x[i] = binary.LittleEndian.Uint32(b[j:]) - j += 4 - } - for i := 0; i < N; i += 2 { - blockCopy(v[i*R:], x, R) - blockMix(&tmp, x, y, r) - - blockCopy(v[(i+1)*R:], y, R) - blockMix(&tmp, y, x, r) - } - for i := 0; i < N; i += 2 { - j := int(integer(x, r) & uint64(N-1)) - blockXOR(x, v[j*R:], R) - blockMix(&tmp, x, y, r) - - j = int(integer(y, r) & uint64(N-1)) - blockXOR(y, v[j*R:], R) - blockMix(&tmp, y, x, r) - } - j = 0 - for _, v := range x[:R] { - binary.LittleEndian.PutUint32(b[j:], v) - j += 4 - } -} - -// Key derives a key from the password, salt, and cost parameters, returning -// a byte slice of length keyLen that can be used as cryptographic key. -// -// N is a CPU/memory cost parameter, which must be a power of two greater than 1. -// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the -// limits, the function returns a nil byte slice and an error. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) -// -// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 -// and p=1. The parameters N, r, and p should be increased as memory latency and -// CPU parallelism increases; consider setting N to the highest power of 2 you -// can derive within 100 milliseconds. Remember to get a good random salt. -func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { - if N <= 1 || N&(N-1) != 0 { - return nil, errors.New("scrypt: N must be > 1 and a power of 2") - } - if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { - return nil, errors.New("scrypt: parameters are too large") - } - - xy := make([]uint32, 64*r) - v := make([]uint32, 32*N*r) - b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) - - for i := 0; i < p; i++ { - smix(b[i*128*r:], r, N, v, xy) - } - - return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil -} diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 0000000000..decd8cf9bf --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 // import "golang.org/x/crypto/sha3" diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 0000000000..0d8043fd2a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + if h := new224Asm(); h != nil { + return h + } + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + if h := new256Asm(); h != nil { + return h + } + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + if h := new384Asm(); h != nil { + return h + } + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + if h := new512Asm(); h != nil { + return h + } + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go new file mode 100644 index 0000000000..fe8c84793c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +import ( + "hash" +) + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { return nil } + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { return nil } + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { return nil } + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 0000000000..ce48b1dd3e --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,414 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package sha3 + +import "math/bits" + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 0000000000..b908696be5 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 0000000000..1f53938861 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,390 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(a *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ a+0(FP), rpState + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go new file mode 100644 index 0000000000..addfd5049b --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.4 + +package sha3 + +import ( + "crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 0000000000..4884d172a4 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,197 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + buf []byte // points into storage + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.buf = d.storage.asBytes()[:0] +} + +func (d *state) clone() *state { + ret := *d + if ret.state == spongeAbsorbing { + ret.buf = ret.storage.asBytes()[:len(ret.buf)] + } else { + ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] + } + + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf) + d.buf = d.storage.asBytes()[:0] + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + keccakF1600(&d.a) + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute(dsbyte byte) { + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.buf = append(d.buf, dsbyte) + zerosStart := len(d.buf) + d.buf = d.storage.asBytes()[:d.rate] + for i := zerosStart; i < d.rate; i++ { + d.buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + written = len(p) + + for len(p) > 0 { + if len(d.buf) == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - len(d.buf) + if todo > len(p) { + todo = len(p) + } + d.buf = append(d.buf, p[:todo]...) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if len(d.buf) == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.buf) + d.buf = d.buf[n:] + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if len(d.buf) == 0 { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 0000000000..d861bca528 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,288 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +// +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +// +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length for full security + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + s.outputLen = 32 + case shake_256: + s.rate = 136 + s.outputLen = 64 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(b) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return nil +} + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return nil +} + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return nil +} + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return nil +} + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 0000000000..826b862c77 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 0000000000..bb69984027 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "encoding/binary" + "hash" + "io" +) + +// ShakeHash defines the interface to hash functions that support +// arbitrary-length output. When used as a plain [hash.Hash], it +// produces minimum-length outputs that provide full-strength generic +// security. +type ShakeHash interface { + hash.Hash + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error, but subsequent calls to Write or Sum + // will panic. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + dsbyteCShake = 0x04 + rate128 = 168 + rate256 = 136 +) + +func bytepad(input []byte, w int) []byte { + // leftEncode always returns max 9 bytes + buf := make([]byte, 0, 9+len(input)+w) + buf = append(buf, leftEncode(uint64(w))...) + buf = append(buf, input...) + padlen := w - (len(buf) % w) + return append(buf, make([]byte, padlen)...) +} + +func leftEncode(value uint64) []byte { + var b [9]byte + binary.BigEndian.PutUint64(b[1:], value) + // Trim all but last leading zero bytes + i := byte(1) + for i < 8 && b[i] == 0 { + i++ + } + // Prepend number of encoded bytes + b[i-1] = 9 - i + return b[i-1:] +} + +func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} + + // leftEncode returns max 9 bytes + c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + if h := newShake128Asm(); h != nil { + return h + } + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + if h := newShake256Asm(); h != nil { + return h + } + return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rate128, 32, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rate256, 64, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go new file mode 100644 index 0000000000..8d31cf5be2 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 0000000000..7337cca88e --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || purego + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} + +var ( + xorIn = xorInGeneric + copyOut = copyOutGeneric + xorInUnaligned = xorInGeneric + copyOutUnaligned = copyOutGeneric +) + +const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go new file mode 100644 index 0000000000..8d94771127 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import "encoding/binary" + +// xorInGeneric xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorInGeneric(d *state, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOutGeneric copies uint64s to a byte buffer. +func copyOutGeneric(d *state, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go new file mode 100644 index 0000000000..870e2d16e0 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -0,0 +1,66 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !purego + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorInUnaligned(d *state, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOutUnaligned(d *state, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} + +var ( + xorIn = xorInUnaligned + copyOut = copyOutUnaligned +) + +const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 0000000000..2c033dff47 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 0000000000..ecc0dabb74 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 0000000000..fbf1934a06 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 0000000000..5e8158bba8 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,499 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +package slices + +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[S ~[]E, E comparable](s1, s2 S) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using an equality +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmpCompare(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) + copy(s2[i:], v) + copy(s2[i+m:], s[i:]) + return s2 + } + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// When DeleteFunc removes m elements, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage +// collected. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 0000000000..b67897f76b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,195 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. +// +// SortFunc requires that cmp is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + n := len(x) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { + for i := len(x) - 1; i > 0; i-- { + if cmpLess(x[i], x[i-1]) { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { + for i := len(x) - 1; i > 0; i-- { + if cmp(x[i], x[i-1]) < 0 { + return false + } + } + return true +} + +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +} + +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go new file mode 100644 index 0000000000..06f2c7a248 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownCmpFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { + child++ + } + if !(cmp(data[first+root], data[first+child]) < 0) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownCmpFunc(data, i, hi, first, cmp) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownCmpFunc(data, lo, i, first, cmp) + } +} + +// pdqsortCmpFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortCmpFunc(data, a, b, cmp) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortCmpFunc(data, a, b, cmp) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsCmpFunc(data, a, b, cmp) + limit-- + } + + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) + if hint == decreasingHint { + reverseRangeCmpFunc(data, a, b, cmp) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortCmpFunc(data, a, b, cmp) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) + a = mid + continue + } + + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortCmpFunc(data, a, mid, limit, cmp) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortCmpFunc(data, mid+1, b, limit, cmp) + b = mid + } + } +} + +// partitionCmpFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(cmp(data[a], data[i]) < 0) { + i++ + } + for i <= j && (cmp(data[a], data[j]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(cmp(data[i], data[i-1]) < 0) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotCmpFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) + } + // Find the median among i, j, k and stores it into j. + j = medianCmpFunc(data, i, j, k, &swaps, cmp) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { + *swaps++ + return b, a + } + return a, b +} + +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) + return b +} + +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) +} + +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortCmpFunc(data, a, b, cmp) + a = b + b += blockSize + } + insertionSortCmpFunc(data, a, n, cmp) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeCmpFunc(data, a, a+blockSize, b, cmp) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeCmpFunc(data, a, m, n, cmp) + } + blockSize *= 2 + } +} + +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmp(data[h], data[a]) < 0 { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(cmp(data[m], data[h]) < 0) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(cmp(data[p-c], data[c]) < 0) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateCmpFunc(data, start, m, end, cmp) + } + if a < start && start < mid { + symMergeCmpFunc(data, a, start, mid, cmp) + } + if mid < end && end < b { + symMergeCmpFunc(data, mid, end, b, cmp) + } +} + +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeCmpFunc(data, m-i, m, j, cmp) + i -= j + } else { + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) + j -= i + } + } + // i == j + swapRangeCmpFunc(data, m-i, m, i, cmp) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 0000000000..99b47c3986 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { + child++ + } + if !cmpLess(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !cmpLess(data[a-1], data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !cmpLess(data[a], data[i]) { + i++ + } + for i <= j && cmpLess(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !cmpLess(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if cmpLess(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmpLess(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !cmpLess(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !cmpLess(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/exp/typeparams/typeparams_go117.go b/vendor/golang.org/x/exp/typeparams/typeparams_go117.go index efc33f10f3..c1da793168 100644 --- a/vendor/golang.org/x/exp/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/exp/typeparams/typeparams_go117.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package typeparams diff --git a/vendor/golang.org/x/exp/typeparams/typeparams_go118.go b/vendor/golang.org/x/exp/typeparams/typeparams_go118.go index 1176104b20..0b35449d15 100644 --- a/vendor/golang.org/x/exp/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/exp/typeparams/typeparams_go118.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package typeparams diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go index 2681af35af..150f887e7a 100644 --- a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -13,7 +13,7 @@ import ( "sync" ) -// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be // compiled the first time it is needed. type Regexp struct { str string diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go index 524f93022a..2a0123d4b9 100644 --- a/vendor/golang.org/x/mod/modfile/print.go +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -16,7 +16,13 @@ import ( func Format(f *FileSyntax) []byte { pr := &printer{} pr.file(f) - return pr.Bytes() + + // remove trailing blank lines + b := pr.Bytes() + for len(b) > 0 && b[len(b)-1] == '\n' && (len(b) == 1 || b[len(b)-2] == '\n') { + b = b[:len(b)-1] + } + return b } // A printer collects the state during printing of a file or expression. @@ -59,7 +65,11 @@ func (p *printer) newline() { } p.trim() - p.printf("\n") + if b := p.Bytes(); len(b) == 0 || (len(b) >= 2 && b[len(b)-1] == '\n' && b[len(b)-2] == '\n') { + // skip the blank line at top of file or after a blank line + } else { + p.printf("\n") + } for i := 0; i < p.margin; i++ { p.printf("\t") } diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index a503bc2105..5b5bb5e115 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -65,7 +65,7 @@ type Comments struct { } // Comment returns the receiver. This isn't useful by itself, but -// a Comments struct is embedded into all the expression +// a [Comments] struct is embedded into all the expression // implementation types, and this gives each of those a Comment // method to satisfy the Expr interface. func (c *Comments) Comment() *Comments { diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 6bcde8fabe..26acaa5f7c 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -5,17 +5,17 @@ // Package modfile implements a parser and formatter for go.mod files. // // The go.mod syntax is described in -// https://golang.org/cmd/go/#hdr-The_go_mod_file. +// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file. // -// The Parse and ParseLax functions both parse a go.mod file and return an +// The [Parse] and [ParseLax] functions both parse a go.mod file and return an // abstract syntax tree. ParseLax ignores unknown statements and may be used to // parse go.mod files that may have been developed with newer versions of Go. // -// The File struct returned by Parse and ParseLax represent an abstract -// go.mod file. File has several methods like AddNewRequire and DropReplace -// that can be used to programmatically edit a file. +// The [File] struct returned by Parse and ParseLax represent an abstract +// go.mod file. File has several methods like [File.AddNewRequire] and +// [File.DropReplace] that can be used to programmatically edit a file. // -// The Format function formats a File back to a byte slice which can be +// The [Format] function formats a File back to a byte slice which can be // written to a file. package modfile @@ -35,12 +35,13 @@ import ( // A File is the parsed, interpreted form of a go.mod file. type File struct { - Module *Module - Go *Go - Require []*Require - Exclude []*Exclude - Replace []*Replace - Retract []*Retract + Module *Module + Go *Go + Toolchain *Toolchain + Require []*Require + Exclude []*Exclude + Replace []*Replace + Retract []*Retract Syntax *FileSyntax } @@ -58,6 +59,12 @@ type Go struct { Syntax *Line } +// A Toolchain is the toolchain statement. +type Toolchain struct { + Name string // "go1.21rc1" + Syntax *Line +} + // An Exclude is a single exclude statement. type Exclude struct { Mod module.Version @@ -219,7 +226,7 @@ var dontFixRetract VersionFixer = func(_, vers string) (string, error) { // data is the content of the file. // // fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func Parse(file string, data []byte, fix VersionFixer) (*File, error) { return parseToFile(file, data, fix, true) @@ -296,9 +303,14 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse return f, nil } -var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) +var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?([a-z]+[0-9]+)?$`) var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) +// Toolchains must be named beginning with `go1`, +// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. +// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+ +var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) + func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. // We ignore all unknown directives as well as main-module-only @@ -356,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } } if !fixed { - errorf("invalid go version '%s': must match format 1.23", args[0]) + errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } } @@ -364,6 +376,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if strict && !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) + return + } + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + case "module": if f.Module != nil { errorf("repeated module statement") @@ -516,7 +543,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V if strings.Contains(ns, "@") { return nil, errorf("replacement module must match format 'path version', not 'path@version'") } - return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") } if filepath.Separator == '/' && strings.Contains(ns, `\`) { return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") @@ -529,7 +556,6 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V } if IsDirectoryPath(ns) { return nil, errorf("replacement module directory path %q cannot have version", ns) - } } return &Replace{ @@ -605,13 +631,29 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, errorf("go directive expects exactly one argument") return } else if !GoVersionRE.MatchString(args[0]) { - errorf("invalid go version '%s': must match format 1.23", args[0]) + errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) + return + } + + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + case "use": if len(args) != 1 { errorf("usage: %s local/dir", verb) @@ -637,14 +679,15 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, } } -// IsDirectoryPath reports whether the given path should be interpreted -// as a directory path. Just like on the go command line, relative paths +// IsDirectoryPath reports whether the given path should be interpreted as a directory path. +// Just like on the go command line, relative paths starting with a '.' or '..' path component // and rooted paths are directory paths; the rest are module paths. func IsDirectoryPath(ns string) bool { // Because go.mod files can move from one system to another, // we check all known path syntaxes, both Unix and Windows. - return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || - strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || + ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || + strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' } @@ -881,7 +924,7 @@ func (f *File) Format() ([]byte, error) { } // Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire +// To avoid quadratic behavior, modifications like [File.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *File) Cleanup() { @@ -926,7 +969,7 @@ func (f *File) Cleanup() { func (f *File) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version string %q", version) + return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { var hint Expr @@ -944,6 +987,44 @@ func (f *File) AddGoStmt(version string) error { return nil } +// DropGoStmt deletes the go statement from the file. +func (f *File) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *File) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + +func (f *File) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + var hint Expr + if f.Go != nil && f.Go.Syntax != nil { + hint = f.Go.Syntax + } else if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } + f.Toolchain = &Toolchain{ + Name: name, + Syntax: f.Syntax.addLine(hint, "toolchain", name), + } + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + // AddRequire sets the first require line for path to version vers, // preserving any existing comments for that line and removing all // other lines for path. @@ -995,8 +1076,8 @@ func (f *File) AddNewRequire(path, vers string, indirect bool) { // The requirements in req must specify at most one distinct version for each // module path. // -// If any existing requirements may be removed, the caller should call Cleanup -// after all edits are complete. +// If any existing requirements may be removed, the caller should call +// [File.Cleanup] after all edits are complete. func (f *File) SetRequire(req []*Require) { type elem struct { version string @@ -1387,13 +1468,21 @@ func (f *File) DropRetract(vi VersionInterval) error { func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe + // semanticSortForExcludeVersionV is the Go version (plus leading "v") at which + // lines in exclude blocks start to use semantic sort instead of lexicographic sort. + // See go.dev/issue/60028. + const semanticSortForExcludeVersionV = "v1.21" + useSemanticSortForExclude := f.Go != nil && semver.Compare("v"+f.Go.Version, semanticSortForExcludeVersionV) >= 0 + for _, stmt := range f.Syntax.Stmt { block, ok := stmt.(*LineBlock) if !ok { continue } less := lineLess - if block.Token[0] == "retract" { + if block.Token[0] == "exclude" && useSemanticSortForExclude { + less = lineExcludeLess + } else if block.Token[0] == "retract" { less = lineRetractLess } sort.SliceStable(block.Line, func(i, j int) bool { @@ -1496,6 +1585,22 @@ func lineLess(li, lj *Line) bool { return len(li.Token) < len(lj.Token) } +// lineExcludeLess reports whether li should be sorted before lj for lines in +// an "exclude" block. +func lineExcludeLess(li, lj *Line) bool { + if len(li.Token) != 2 || len(lj.Token) != 2 { + // Not a known exclude specification. + // Fall back to sorting lexicographically. + return lineLess(li, lj) + } + // An exclude specification has two tokens: ModulePath and Version. + // Compare module path by string order and version by semver rules. + if pi, pj := li.Token[0], lj.Token[0]; pi != pj { + return pi < pj + } + return semver.Compare(li.Token[1], lj.Token[1]) < 0 +} + // lineRetractLess returns whether li should be sorted before lj for lines in // a "retract" block. It treats each line as a version interval. Single versions // are compared as if they were intervals with the same low and high version. diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go index 0c0e521525..d7b99376eb 100644 --- a/vendor/golang.org/x/mod/modfile/work.go +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -12,9 +12,10 @@ import ( // A WorkFile is the parsed, interpreted form of a go.work file. type WorkFile struct { - Go *Go - Use []*Use - Replace []*Replace + Go *Go + Toolchain *Toolchain + Use []*Use + Replace []*Replace Syntax *FileSyntax } @@ -33,7 +34,7 @@ type Use struct { // data is the content of the file. // // fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { fs, err := parse(file, data) @@ -82,7 +83,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { } // Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire +// To avoid quadratic behavior, modifications like [WorkFile.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *WorkFile) Cleanup() { @@ -109,7 +110,7 @@ func (f *WorkFile) Cleanup() { func (f *WorkFile) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version string %q", version) + return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { stmt := &Line{Token: []string{"go", version}} @@ -117,7 +118,7 @@ func (f *WorkFile) AddGoStmt(version string) error { Version: version, Syntax: stmt, } - // Find the first non-comment-only block that's and add + // Find the first non-comment-only block and add // the go statement before it. That will keep file comments at the top. i := 0 for i = 0; i < len(f.Syntax.Stmt); i++ { @@ -133,6 +134,56 @@ func (f *WorkFile) AddGoStmt(version string) error { return nil } +func (f *WorkFile) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + stmt := &Line{Token: []string{"toolchain", name}} + f.Toolchain = &Toolchain{ + Name: name, + Syntax: stmt, + } + // Find the go line and add the toolchain line after it. + // Or else find the first non-comment-only block and add + // the toolchain line before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if line, ok := f.Syntax.Stmt[i].(*Line); ok && len(line.Token) > 0 && line.Token[0] == "go" { + i++ + goto Found + } + } + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + Found: + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + +// DropGoStmt deletes the go statement from the file. +func (f *WorkFile) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *WorkFile) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + func (f *WorkFile) AddUse(diskPath, modulePath string) error { need := true for _, d := range f.Use { diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index e9dec6e614..2a364b229b 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -4,7 +4,7 @@ // Package module defines the module.Version type along with support code. // -// The module.Version type is a simple Path, Version pair: +// The [module.Version] type is a simple Path, Version pair: // // type Version struct { // Path string @@ -12,7 +12,7 @@ // } // // There are no restrictions imposed directly by use of this structure, -// but additional checking functions, most notably Check, verify that +// but additional checking functions, most notably [Check], verify that // a particular path, version pair is valid. // // # Escaped Paths @@ -140,7 +140,7 @@ type ModuleError struct { Err error } -// VersionError returns a ModuleError derived from a Version and error, +// VersionError returns a [ModuleError] derived from a [Version] and error, // or err itself if it is already such an error. func VersionError(v Version, err error) error { var mErr *ModuleError @@ -169,7 +169,7 @@ func (e *ModuleError) Unwrap() error { return e.Err } // An InvalidVersionError indicates an error specific to a version, with the // module path unknown or specified externally. // -// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError // must not wrap a ModuleError. type InvalidVersionError struct { Version string @@ -193,8 +193,8 @@ func (e *InvalidVersionError) Error() string { func (e *InvalidVersionError) Unwrap() error { return e.Err } // An InvalidPathError indicates a module, import, or file path doesn't -// satisfy all naming constraints. See CheckPath, CheckImportPath, -// and CheckFilePath for specific restrictions. +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. type InvalidPathError struct { Kind string // "module", "import", or "file" Path string @@ -294,7 +294,7 @@ func fileNameOK(r rune) bool { } // CheckPath checks that a module path is valid. -// A valid module path is a valid import path, as checked by CheckImportPath, +// A valid module path is a valid import path, as checked by [CheckImportPath], // with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, @@ -380,7 +380,7 @@ const ( // checkPath returns an error describing why the path is not valid. // Because these checks apply to module, import, and file paths, // and because other checks may be applied, the caller is expected to wrap -// this error with InvalidPathError. +// this error with [InvalidPathError]. func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") @@ -532,7 +532,7 @@ var badWindowsNames = []string{ // they require ".vN" instead of "/vN", and for all N, not just N >= 2. // SplitPathVersion returns with ok = false when presented with // a path whose last path element does not satisfy the constraints -// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) @@ -582,7 +582,7 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. // -// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. func MatchPathMajor(v, pathMajor string) bool { return CheckPathMajor(v, pathMajor) == nil } @@ -622,7 +622,7 @@ func CheckPathMajor(v, pathMajor string) error { // PathMajorPrefix returns the major-version tag prefix implied by pathMajor. // An empty PathMajorPrefix allows either v0 or v1. // -// Note that MatchPathMajor may accept some versions that do not actually begin +// Note that [MatchPathMajor] may accept some versions that do not actually begin // with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' // pathMajor, even though that pathMajor implies 'v1' tagging. func PathMajorPrefix(pathMajor string) string { @@ -643,7 +643,7 @@ func PathMajorPrefix(pathMajor string) string { } // CanonicalVersion returns the canonical form of the version string v. -// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". func CanonicalVersion(v string) string { cv := semver.Canonical(v) if semver.Build(v) == "+incompatible" { @@ -652,8 +652,8 @@ func CanonicalVersion(v string) string { return cv } -// Sort sorts the list by Path, breaking ties by comparing Version fields. -// The Version fields are interpreted as semantic versions (using semver.Compare) +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { @@ -793,7 +793,7 @@ func unescapeString(escaped string) (string, bool) { } // MatchPrefixPatterns reports whether any path prefix of target matches one of -// the glob patterns (as defined by path.Match) in the comma-separated globs +// the glob patterns (as defined by [path.Match]) in the comma-separated globs // list. This implements the algorithm used when matching a module path to the // GOPRIVATE environment variable, as described by 'go help module-private'. // diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go index f04ad37886..9cf19d3254 100644 --- a/vendor/golang.org/x/mod/module/pseudo.go +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -125,7 +125,7 @@ func IsPseudoVersion(v string) bool { } // IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, -// timestamp, and revision, as returned by ZeroPseudoVersion. +// timestamp, and revision, as returned by [ZeroPseudoVersion]. func IsZeroPseudoVersion(v string) bool { return v == ZeroPseudoVersion(semver.Major(v)) } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index a30a22bf20..9a2dfd33a7 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool { return vs[i] < vs[j] } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4a..0000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b867937..0000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a904..0000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa5..0000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a638033..0000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90dc3..e2b298d859 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c6..c2a5b44b3d 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2911,6 +2911,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() diff --git a/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/vendor/golang.org/x/oauth2/authhandler/authhandler.go deleted file mode 100644 index 9bc6cd7bc5..0000000000 --- a/vendor/golang.org/x/oauth2/authhandler/authhandler.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package authhandler implements a TokenSource to support -// "three-legged OAuth 2.0" via a custom AuthorizationHandler. -package authhandler - -import ( - "context" - "errors" - - "golang.org/x/oauth2" -) - -const ( - // Parameter keys for AuthCodeURL method to support PKCE. - codeChallengeKey = "code_challenge" - codeChallengeMethodKey = "code_challenge_method" - - // Parameter key for Exchange method to support PKCE. - codeVerifierKey = "code_verifier" -) - -// PKCEParams holds parameters to support PKCE. -type PKCEParams struct { - Challenge string // The unpadded, base64-url-encoded string of the encrypted code verifier. - ChallengeMethod string // The encryption method (ex. S256). - Verifier string // The original, non-encrypted secret. -} - -// AuthorizationHandler is a 3-legged-OAuth helper that prompts -// the user for OAuth consent at the specified auth code URL -// and returns an auth code and state upon approval. -type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) - -// TokenSourceWithPKCE is an enhanced version of TokenSource with PKCE support. -// -// The pkce parameter supports PKCE flow, which uses code challenge and code verifier -// to prevent CSRF attacks. A unique code challenge and code verifier should be generated -// by the caller at runtime. See https://www.oauth.com/oauth2-servers/pkce/ for more info. -func TokenSourceWithPKCE(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler, pkce *PKCEParams) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state, pkce: pkce}) -} - -// TokenSource returns an oauth2.TokenSource that fetches access tokens -// using 3-legged-OAuth flow. -// -// The provided context.Context is used for oauth2 Exchange operation. -// -// The provided oauth2.Config should be a full configuration containing AuthURL, -// TokenURL, and Scope. -// -// An environment-specific AuthorizationHandler is used to obtain user consent. -// -// Per the OAuth protocol, a unique "state" string should be specified here. -// This token source will verify that the "state" is identical in the request -// and response before exchanging the auth code for OAuth token to prevent CSRF -// attacks. -func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { - return TokenSourceWithPKCE(ctx, config, state, authHandler, nil) -} - -type authHandlerSource struct { - ctx context.Context - config *oauth2.Config - authHandler AuthorizationHandler - state string - pkce *PKCEParams -} - -func (source authHandlerSource) Token() (*oauth2.Token, error) { - // Step 1: Obtain auth code. - var authCodeUrlOptions []oauth2.AuthCodeOption - if source.pkce != nil && source.pkce.Challenge != "" && source.pkce.ChallengeMethod != "" { - authCodeUrlOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeChallengeKey, source.pkce.Challenge), - oauth2.SetAuthURLParam(codeChallengeMethodKey, source.pkce.ChallengeMethod)} - } - url := source.config.AuthCodeURL(source.state, authCodeUrlOptions...) - code, state, err := source.authHandler(url) - if err != nil { - return nil, err - } - if state != source.state { - return nil, errors.New("state mismatch in 3-legged-OAuth flow") - } - - // Step 2: Exchange auth code for access token. - var exchangeOptions []oauth2.AuthCodeOption - if source.pkce != nil && source.pkce.Verifier != "" { - exchangeOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeVerifierKey, source.pkce.Verifier)} - } - return source.config.Exchange(source.ctx, code, exchangeOptions...) -} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index feb1157b15..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "time" - - "golang.org/x/oauth2" -) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string - -// AppEngineTokenSource returns a token source that fetches tokens from either -// the current application's service account or from the metadata server, -// depending on the App Engine environment. See below for environment-specific -// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that -// involves user accounts, see oauth2.Config instead. -// -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the -// current App Engine application's service account. The provided context must have -// come from appengine.NewContext. -// -// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: -// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the -// flexible environment. It delegates to ComputeTokenSource, and the provided -// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, -// which DefaultTokenSource will use in this case) instead. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index e61587945b..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 9c79aa0a0c..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index 02ccd08a77..0000000000 --- a/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "os" - "path/filepath" - "runtime" - "sync" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - "golang.org/x/oauth2/authhandler" -) - -const ( - adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" - universeDomainDefault = "googleapis.com" -) - -// Credentials holds Google credentials, including "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// Credentials from external accounts (workload identity federation) are used to -// identify a particular application from an on-prem or non-Google Cloud platform -// including Amazon Web Services (AWS), Microsoft Azure or any identity provider -// that supports OpenID Connect (OIDC). -type Credentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte - - udMu sync.Mutex // guards universeDomain - // universeDomain is the default service domain for a given Cloud universe. - universeDomain string -} - -// UniverseDomain returns the default service domain for a given Cloud universe. -// -// The default value is "googleapis.com". -// -// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports -// obtaining the universe domain when authenticating via the GCE metadata server. -// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the -// default value when authenticating via the GCE metadata server. -// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). -func (c *Credentials) UniverseDomain() string { - if c.universeDomain == "" { - return universeDomainDefault - } - return c.universeDomain -} - -// GetUniverseDomain returns the default service domain for a given Cloud -// universe. -// -// The default value is "googleapis.com". -// -// It obtains the universe domain from the attached service account on GCE when -// authenticating via the GCE metadata server. See also [The attached service -// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). -// If the GCE metadata server returns a 404 error, the default value is -// returned. If the GCE metadata server returns an error other than 404, the -// error is returned. -func (c *Credentials) GetUniverseDomain() (string, error) { - c.udMu.Lock() - defer c.udMu.Unlock() - if c.universeDomain == "" && metadata.OnGCE() { - // If we're on Google Compute Engine, an App Engine standard second - // generation runtime, or App Engine flexible, use the metadata server. - err := c.computeUniverseDomain() - if err != nil { - return "", err - } - } - // If not on Google Compute Engine, or in case of any non-error path in - // computeUniverseDomain that did not set universeDomain, set the default - // universe domain. - if c.universeDomain == "" { - c.universeDomain = universeDomainDefault - } - return c.universeDomain, nil -} - -// computeUniverseDomain fetches the default service domain for a given Cloud -// universe from Google Compute Engine (GCE)'s metadata server. It's only valid -// to use this method if your program is running on a GCE instance. -func (c *Credentials) computeUniverseDomain() error { - var err error - c.universeDomain, err = metadata.Get("universe/universe_domain") - if err != nil { - if _, ok := err.(metadata.NotDefinedError); ok { - // http.StatusNotFound (404) - c.universeDomain = universeDomainDefault - return nil - } else { - return err - } - } - return nil -} - -// DefaultCredentials is the old name of Credentials. -// -// Deprecated: use Credentials instead. -type DefaultCredentials = Credentials - -// CredentialsParams holds user supplied parameters that are used together -// with a credentials file for building a Credentials object. -type CredentialsParams struct { - // Scopes is the list OAuth scopes. Required. - // Example: https://www.googleapis.com/auth/cloud-platform - Scopes []string - - // Subject is the user email used for domain wide delegation (see - // https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). - // Optional. - Subject string - - // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. - AuthHandler authhandler.AuthorizationHandler - - // State is a unique string used with AuthHandler. Required for 3LO flow. - State string - - // PKCE is used to support PKCE flow. Optional for 3LO flow. - PKCE *authhandler.PKCEParams - - // The OAuth2 TokenURL default override. This value overrides the default TokenURL, - // unless explicitly specified by the credentials config file. Optional. - TokenURL string - - // EarlyTokenRefresh is the amount of time before a token expires that a new - // token will be preemptively fetched. If unset the default value is 10 - // seconds. - // - // Note: This option is currently only respected when using credentials - // fetched from the GCE metadata server. - EarlyTokenRefresh time.Duration - - // UniverseDomain is the default service domain for a given Cloud universe. - // Only supported in authentication flows that support universe domains. - // This value takes precedence over a universe domain explicitly specified - // in a credentials config file or by the GCE metadata server. Optional. - UniverseDomain string -} - -func (params CredentialsParams) deepCopy() CredentialsParams { - paramsCopy := params - paramsCopy.Scopes = make([]string, len(params.Scopes)) - copy(paramsCopy.Scopes, params.Scopes) - return paramsCopy -} - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource returns the token source for -// "Application Default Credentials". -// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - creds, err := FindDefaultCredentials(ctx, scope...) - if err != nil { - return nil, err - } - return creds.TokenSource, nil -} - -// FindDefaultCredentialsWithParams searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// For workload identity federation, refer to -// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on -// how to generate the JSON configuration file for on-prem/non-Google cloud -// platforms. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. -func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { - // Make defensive copy of the slices in params. - params = params.deepCopy() - - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, params) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return creds, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - if b, err := os.ReadFile(filename); err == nil { - return CredentialsFromJSONWithParams(ctx, b, params) - } - - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &Credentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, params.Scopes...), - }, nil - } - - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, - // or App Engine flexible, use the metadata server. - if metadata.OnGCE() { - id, _ := metadata.ProjectID() - return &Credentials{ - ProjectID: id, - TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), - universeDomain: params.UniverseDomain, - }, nil - } - - // None are found; return helpful error. - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) -} - -// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { - var params CredentialsParams - params.Scopes = scopes - return FindDefaultCredentialsWithParams(ctx, params) -} - -// CredentialsFromJSONWithParams obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in ConfigFromJSON), -// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh -// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud -// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). -func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { - // Make defensive copy of the slices in params. - params = params.deepCopy() - - // First, attempt to parse jsonData as a Google Developers Console client_credentials.json. - config, _ := ConfigFromJSON(jsonData, params.Scopes...) - if config != nil { - return &Credentials{ - ProjectID: "", - TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), - JSON: jsonData, - }, nil - } - - // Otherwise, parse jsonData as one of the other supported credentials files. - var f credentialsFile - if err := json.Unmarshal(jsonData, &f); err != nil { - return nil, err - } - - universeDomain := f.UniverseDomain - if params.UniverseDomain != "" { - universeDomain = params.UniverseDomain - } - // Authorized user credentials are only supported in the googleapis.com universe. - if f.Type == userCredentialsKey { - universeDomain = universeDomainDefault - } - - ts, err := f.tokenSource(ctx, params) - if err != nil { - return nil, err - } - ts = newErrWrappingTokenSource(ts) - return &Credentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, - universeDomain: universeDomain, - }, nil -} - -// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { - var params CredentialsParams - params.Scopes = scopes - return CredentialsFromJSONWithParams(ctx, jsonData, params) -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { - b, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - return CredentialsFromJSONWithParams(ctx, b, params) -} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go deleted file mode 100644 index 03c42c6f87..0000000000 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and authenticated -// HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, -// Google App Engine service accounts and workload identity federation -// from non-Google cloud platforms. -// -// A brief overview of the package follows. For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -// For more information on using workload identity federation, refer to -// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. -// -// # OAuth2 Configs -// -// Two functions in this package return golang.org/x/oauth2.Config values from Google credential -// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, -// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or -// create an http.Client. -// -// # Workload Identity Federation -// -// Using workload identity federation, your application can access Google Cloud -// resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC) or SAML 2.0. -// Traditionally, applications running outside Google Cloud have used service -// account keys to access Google Cloud resources. Using identity federation, -// you can allow your workload to impersonate a service account. -// This lets you access Google Cloud resources directly, eliminating the -// maintenance and security burden associated with service account keys. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws -// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure -// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml -// -// For OIDC and SAML providers, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. -// -// # Workforce Identity Federation -// -// Workforce identity federation lets you use an external identity provider (IdP) to -// authenticate and authorize a workforce—a group of users, such as employees, partners, -// and contractors—using IAM, so that the users can access Google Cloud services. -// Workforce identity federation extends Google Cloud's identity capabilities to support -// syncless, attribute-based single sign on. -// -// With workforce identity federation, your workforce can access Google Cloud resources -// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or -// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation -// Services (AD FS), Okta, and others. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad -// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta -// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml -// -// For workforce identity federation, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in -// -// # Security considerations -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. -// -// # Credentials -// -// The Credentials type represents Google credentials, including Application Default -// Credentials. -// -// Use FindDefaultCredentials to obtain Application Default Credentials. -// FindDefaultCredentials looks in some well-known places for a credentials file, and -// will call AppEngineTokenSource or ComputeTokenSource as needed. -// -// Application Default Credentials also support workload identity federation to -// access Google Cloud resources from non-Google Cloud platforms including Amazon -// Web Services (AWS), Microsoft Azure or any identity provider that supports -// OpenID Connect (OIDC). Workload identity federation is recommended for -// non-Google Cloud environments as it avoids the need to download, manage and -// store service account private keys locally. -// -// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, -// then use the credentials to construct an http.Client or an oauth2.TokenSource. -// -// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats -// described in OAuth2 Configs, above. The TokenSource in the returned value is the -// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or -// JWTConfigFromJSON, but the Credentials may contain additional information -// that is useful is some circumstances. -package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/error.go b/vendor/golang.org/x/oauth2/google/error.go deleted file mode 100644 index d84dd00473..0000000000 --- a/vendor/golang.org/x/oauth2/google/error.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "errors" - - "golang.org/x/oauth2" -) - -// AuthenticationError indicates there was an error in the authentication flow. -// -// Use (*AuthenticationError).Temporary to check if the error can be retried. -type AuthenticationError struct { - err *oauth2.RetrieveError -} - -func newAuthenticationError(err error) error { - re := &oauth2.RetrieveError{} - if !errors.As(err, &re) { - return err - } - return &AuthenticationError{ - err: re, - } -} - -// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. -func (e *AuthenticationError) Temporary() bool { - if e.err.Response == nil { - return false - } - sc := e.err.Response.StatusCode - return sc == 500 || sc == 503 || sc == 408 || sc == 429 -} - -func (e *AuthenticationError) Error() string { - return e.err.Error() -} - -func (e *AuthenticationError) Unwrap() error { - return e.err -} - -type errWrappingTokenSource struct { - src oauth2.TokenSource -} - -func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { - return &errWrappingTokenSource{src: ts} -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { - t, err := s.src.Token() - if err != nil { - return nil, newAuthenticationError(err) - } - return t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index c66c53527d..0000000000 --- a/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/url" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/externalaccount" - "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" - "golang.org/x/oauth2/jwt" -) - -// Endpoint is Google's OAuth 2.0 default endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - DeviceAuthURL: "https://oauth2.googleapis.com/device/code", - AuthStyle: oauth2.AuthStyleInParams, -} - -// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. -const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://oauth2.googleapis.com/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloaded from -// https://console.developers.google.com, under "Credentials". Download the Web -// application credentials in the JSON format and provide the contents of the -// file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" for your project at -// https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var f credentialsFile - if err := json.Unmarshal(jsonKey, &f); err != nil { - return nil, err - } - if f.Type != serviceAccountKey { - return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) - } - scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope, ""), nil -} - -// JSON key file types. -const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - externalAccountAuthorizedUserKey = "external_account_authorized_user" - impersonatedServiceAccount = "impersonated_service_account" -) - -// credentialsFile is the unmarshalled representation of a credentials file. -type credentialsFile struct { - Type string `json:"type"` - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` - UniverseDomain string `json:"universe_domain"` - - // User Credential fields - // (These typically come from gcloud auth.) - ClientSecret string `json:"client_secret"` - ClientID string `json:"client_id"` - RefreshToken string `json:"refresh_token"` - - // External Account fields - Audience string `json:"audience"` - SubjectTokenType string `json:"subject_token_type"` - TokenURLExternal string `json:"token_url"` - TokenInfoURL string `json:"token_info_url"` - ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` - ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` - Delegates []string `json:"delegates"` - CredentialSource externalaccount.CredentialSource `json:"credential_source"` - QuotaProjectID string `json:"quota_project_id"` - WorkforcePoolUserProject string `json:"workforce_pool_user_project"` - - // External Account Authorized User fields - RevokeURL string `json:"revoke_url"` - - // Service account impersonation - SourceCredentials *credentialsFile `json:"source_credentials"` -} - -type serviceAccountImpersonationInfo struct { - TokenLifetimeSeconds int `json:"token_lifetime_seconds"` -} - -func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { - cfg := &jwt.Config{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - Scopes: scopes, - TokenURL: f.TokenURL, - Subject: subject, // This is the user email to impersonate - Audience: f.Audience, - } - if cfg.TokenURL == "" { - cfg.TokenURL = JWTTokenURL - } - return cfg -} - -func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsParams) (oauth2.TokenSource, error) { - switch f.Type { - case serviceAccountKey: - cfg := f.jwtConfig(params.Scopes, params.Subject) - return cfg.TokenSource(ctx), nil - case userCredentialsKey: - cfg := &oauth2.Config{ - ClientID: f.ClientID, - ClientSecret: f.ClientSecret, - Scopes: params.Scopes, - Endpoint: oauth2.Endpoint{ - AuthURL: f.AuthURL, - TokenURL: f.TokenURL, - AuthStyle: oauth2.AuthStyleInParams, - }, - } - if cfg.Endpoint.AuthURL == "" { - cfg.Endpoint.AuthURL = Endpoint.AuthURL - } - if cfg.Endpoint.TokenURL == "" { - if params.TokenURL != "" { - cfg.Endpoint.TokenURL = params.TokenURL - } else { - cfg.Endpoint.TokenURL = Endpoint.TokenURL - } - } - tok := &oauth2.Token{RefreshToken: f.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case externalAccountKey: - cfg := &externalaccount.Config{ - Audience: f.Audience, - SubjectTokenType: f.SubjectTokenType, - TokenURL: f.TokenURLExternal, - TokenInfoURL: f.TokenInfoURL, - ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, - ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, - ClientSecret: f.ClientSecret, - ClientID: f.ClientID, - CredentialSource: f.CredentialSource, - QuotaProjectID: f.QuotaProjectID, - Scopes: params.Scopes, - WorkforcePoolUserProject: f.WorkforcePoolUserProject, - } - return cfg.TokenSource(ctx) - case externalAccountAuthorizedUserKey: - cfg := &externalaccountauthorizeduser.Config{ - Audience: f.Audience, - RefreshToken: f.RefreshToken, - TokenURL: f.TokenURLExternal, - TokenInfoURL: f.TokenInfoURL, - ClientID: f.ClientID, - ClientSecret: f.ClientSecret, - RevokeURL: f.RevokeURL, - QuotaProjectID: f.QuotaProjectID, - Scopes: params.Scopes, - } - return cfg.TokenSource(ctx) - case impersonatedServiceAccount: - if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { - return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") - } - - ts, err := f.SourceCredentials.tokenSource(ctx, params) - if err != nil { - return nil, err - } - imp := externalaccount.ImpersonateTokenSource{ - Ctx: ctx, - URL: f.ServiceAccountImpersonationURL, - Scopes: params.Scopes, - Ts: ts, - Delegates: f.Delegates, - } - return oauth2.ReuseTokenSource(nil, imp), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", f.Type) - } -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// If no scopes are specified, a set of default scopes are automatically granted. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return computeTokenSource(account, 0, scope...) -} - -func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) -} - -type computeSource struct { - account string - scopes []string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenURI := "instance/service-accounts/" + acct + "/token" - if len(cs.scopes) > 0 { - v := url.Values{} - v.Set("scopes", strings.Join(cs.scopes, ",")) - tokenURI = tokenURI + "?" + v.Encode() - } - tokenJSON, err := metadata.Get(tokenURI) - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - tok := &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - } - // NOTE(cbro): add hidden metadata about where the token is from. - // This is needed for detection by client libraries to know that credentials come from the metadata server. - // This may be removed in a future version of this library. - return tok.WithExtra(map[string]interface{}{ - "oauth2.google.tokenSource": "compute-metadata", - "oauth2.google.serviceAccount": acct, - }), nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go deleted file mode 100644 index bd4efd19ba..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "bytes" - "context" - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path" - "sort" - "strings" - "time" - - "golang.org/x/oauth2" -) - -type awsSecurityCredentials struct { - AccessKeyID string `json:"AccessKeyID"` - SecretAccessKey string `json:"SecretAccessKey"` - SecurityToken string `json:"Token"` -} - -// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. -type awsRequestSigner struct { - RegionName string - AwsSecurityCredentials awsSecurityCredentials -} - -// getenv aliases os.Getenv for testing -var getenv = os.Getenv - -const ( - // AWS Signature Version 4 signing algorithm identifier. - awsAlgorithm = "AWS4-HMAC-SHA256" - - // The termination string for the AWS credential scope value as defined in - // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html - awsRequestType = "aws4_request" - - // The AWS authorization header name for the security session token if available. - awsSecurityTokenHeader = "x-amz-security-token" - - // The name of the header containing the session token for metadata endpoint calls - awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" - - awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" - - awsIMDSv2SessionTtl = "300" - - // The AWS authorization header name for the auto-generated date. - awsDateHeader = "x-amz-date" - - // Supported AWS configuration environment variables. - awsAccessKeyId = "AWS_ACCESS_KEY_ID" - awsDefaultRegion = "AWS_DEFAULT_REGION" - awsRegion = "AWS_REGION" - awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" - awsSessionToken = "AWS_SESSION_TOKEN" - - awsTimeFormatLong = "20060102T150405Z" - awsTimeFormatShort = "20060102" -) - -func getSha256(input []byte) (string, error) { - hash := sha256.New() - if _, err := hash.Write(input); err != nil { - return "", err - } - return hex.EncodeToString(hash.Sum(nil)), nil -} - -func getHmacSha256(key, input []byte) ([]byte, error) { - hash := hmac.New(sha256.New, key) - if _, err := hash.Write(input); err != nil { - return nil, err - } - return hash.Sum(nil), nil -} - -func cloneRequest(r *http.Request) *http.Request { - r2 := new(http.Request) - *r2 = *r - if r.Header != nil { - r2.Header = make(http.Header, len(r.Header)) - - // Find total number of values. - headerCount := 0 - for _, headerValues := range r.Header { - headerCount += len(headerValues) - } - copiedHeaders := make([]string, headerCount) // shared backing array for headers' values - - for headerKey, headerValues := range r.Header { - headerCount = copy(copiedHeaders, headerValues) - r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] - copiedHeaders = copiedHeaders[headerCount:] - } - } - return r2 -} - -func canonicalPath(req *http.Request) string { - result := req.URL.EscapedPath() - if result == "" { - return "/" - } - return path.Clean(result) -} - -func canonicalQuery(req *http.Request) string { - queryValues := req.URL.Query() - for queryKey := range queryValues { - sort.Strings(queryValues[queryKey]) - } - return queryValues.Encode() -} - -func canonicalHeaders(req *http.Request) (string, string) { - // Header keys need to be sorted alphabetically. - var headers []string - lowerCaseHeaders := make(http.Header) - for k, v := range req.Header { - k := strings.ToLower(k) - if _, ok := lowerCaseHeaders[k]; ok { - // include additional values - lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) - } else { - headers = append(headers, k) - lowerCaseHeaders[k] = v - } - } - sort.Strings(headers) - - var fullHeaders bytes.Buffer - for _, header := range headers { - headerValue := strings.Join(lowerCaseHeaders[header], ",") - fullHeaders.WriteString(header) - fullHeaders.WriteRune(':') - fullHeaders.WriteString(headerValue) - fullHeaders.WriteRune('\n') - } - - return strings.Join(headers, ";"), fullHeaders.String() -} - -func requestDataHash(req *http.Request) (string, error) { - var requestData []byte - if req.Body != nil { - requestBody, err := req.GetBody() - if err != nil { - return "", err - } - defer requestBody.Close() - - requestData, err = ioutil.ReadAll(io.LimitReader(requestBody, 1<<20)) - if err != nil { - return "", err - } - } - - return getSha256(requestData) -} - -func requestHost(req *http.Request) string { - if req.Host != "" { - return req.Host - } - return req.URL.Host -} - -func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { - dataHash, err := requestDataHash(req) - if err != nil { - return "", err - } - - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil -} - -// SignRequest adds the appropriate headers to an http.Request -// or returns an error if something prevented this. -func (rs *awsRequestSigner) SignRequest(req *http.Request) error { - signedRequest := cloneRequest(req) - timestamp := now() - - signedRequest.Header.Add("host", requestHost(req)) - - if rs.AwsSecurityCredentials.SecurityToken != "" { - signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) - } - - if signedRequest.Header.Get("date") == "" { - signedRequest.Header.Add(awsDateHeader, timestamp.Format(awsTimeFormatLong)) - } - - authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) - if err != nil { - return err - } - signedRequest.Header.Set("Authorization", authorizationCode) - - req.Header = signedRequest.Header - return nil -} - -func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { - canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) - - dateStamp := timestamp.Format(awsTimeFormatShort) - serviceName := "" - if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { - serviceName = splitHost[0] - } - - credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) - - requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) - if err != nil { - return "", err - } - requestHash, err := getSha256([]byte(requestString)) - if err != nil { - return "", err - } - - stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) - - signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) - for _, signingInput := range []string{ - dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, - } { - signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) - if err != nil { - return "", err - } - } - - return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil -} - -type awsCredentialSource struct { - EnvironmentID string - RegionURL string - RegionalCredVerificationURL string - CredVerificationURL string - IMDSv2SessionTokenURL string - TargetResource string - requestSigner *awsRequestSigner - region string - ctx context.Context - client *http.Client -} - -type awsRequestHeader struct { - Key string `json:"key"` - Value string `json:"value"` -} - -type awsRequest struct { - URL string `json:"url"` - Method string `json:"method"` - Headers []awsRequestHeader `json:"headers"` -} - -func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { - if cs.client == nil { - cs.client = oauth2.NewClient(cs.ctx, nil) - } - return cs.client.Do(req.WithContext(cs.ctx)) -} - -func canRetrieveRegionFromEnvironment() bool { - // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is - // required. - return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" -} - -func canRetrieveSecurityCredentialFromEnvironment() bool { - // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. - return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" -} - -func shouldUseMetadataServer() bool { - return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() -} - -func (cs awsCredentialSource) credentialSourceType() string { - return "aws" -} - -func (cs awsCredentialSource) subjectToken() (string, error) { - if cs.requestSigner == nil { - headers := make(map[string]string) - if shouldUseMetadataServer() { - awsSessionToken, err := cs.getAWSSessionToken() - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := cs.getSecurityCredentials(headers) - if err != nil { - return "", err - } - - if cs.region, err = cs.getRegion(headers); err != nil { - return "", err - } - - cs.requestSigner = &awsRequestSigner{ - RegionName: cs.region, - AwsSecurityCredentials: awsSecurityCredentials, - } - } - - // Generate the signed request to AWS STS GetCallerIdentity API. - // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) - if err != nil { - return "", err - } - // The full, canonical resource name of the workload identity pool - // provider, with or without the HTTPS prefix. - // Including this header as part of the signature is recommended to - // ensure data integrity. - if cs.TargetResource != "" { - req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) - } - cs.requestSigner.SignRequest(req) - - /* - The GCP STS endpoint expects the headers to be formatted as: - # [ - # {key: 'x-amz-date', value: '...'}, - # {key: 'Authorization', value: '...'}, - # ... - # ] - # And then serialized as: - # quote(json.dumps({ - # url: '...', - # method: 'POST', - # headers: [{key: 'x-amz-date', value: '...'}, ...] - # })) - */ - - awsSignedReq := awsRequest{ - URL: req.URL.String(), - Method: "POST", - } - for headerKey, headerList := range req.Header { - for _, headerValue := range headerList { - awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ - Key: headerKey, - Value: headerValue, - }) - } - } - sort.Slice(awsSignedReq.Headers, func(i, j int) bool { - headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) - if headerCompare == 0 { - return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 - } - return headerCompare < 0 - }) - - result, err := json.Marshal(awsSignedReq) - if err != nil { - return "", err - } - return url.QueryEscape(string(result)), nil -} - -func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { - if cs.IMDSv2SessionTokenURL == "" { - return "", nil - } - - req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) - if err != nil { - return "", err - } - - req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) - - resp, err := cs.doRequest(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return "", err - } - - if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) - } - - return string(respBody), nil -} - -func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { - if canRetrieveRegionFromEnvironment() { - if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { - return envAwsRegion, nil - } - return getenv("AWS_DEFAULT_REGION"), nil - } - - if cs.RegionURL == "" { - return "", errors.New("oauth2/google: unable to determine AWS region") - } - - req, err := http.NewRequest("GET", cs.RegionURL, nil) - if err != nil { - return "", err - } - - for name, value := range headers { - req.Header.Add(name, value) - } - - resp, err := cs.doRequest(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return "", err - } - - if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) - } - - // This endpoint will return the region in format: us-east-2b. - // Only the us-east-2 part should be used. - respBodyEnd := 0 - if len(respBody) > 1 { - respBodyEnd = len(respBody) - 1 - } - return string(respBody[:respBodyEnd]), nil -} - -func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { - if canRetrieveSecurityCredentialFromEnvironment() { - return awsSecurityCredentials{ - AccessKeyID: getenv(awsAccessKeyId), - SecretAccessKey: getenv(awsSecretAccessKey), - SecurityToken: getenv(awsSessionToken), - }, nil - } - - roleName, err := cs.getMetadataRoleName(headers) - if err != nil { - return - } - - credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) - if err != nil { - return - } - - if credentials.AccessKeyID == "" { - return result, errors.New("oauth2/google: missing AccessKeyId credential") - } - - if credentials.SecretAccessKey == "" { - return result, errors.New("oauth2/google: missing SecretAccessKey credential") - } - - return credentials, nil -} - -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { - var result awsSecurityCredentials - - req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) - if err != nil { - return result, err - } - req.Header.Add("Content-Type", "application/json") - - for name, value := range headers { - req.Header.Add(name, value) - } - - resp, err := cs.doRequest(req) - if err != nil { - return result, err - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return result, err - } - - if resp.StatusCode != 200 { - return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) - } - - err = json.Unmarshal(respBody, &result) - return result, err -} - -func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { - if cs.CredVerificationURL == "" { - return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") - } - - req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) - if err != nil { - return "", err - } - - for name, value := range headers { - req.Header.Add(name, value) - } - - resp, err := cs.doRequest(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return "", err - } - - if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) - } - - return string(respBody), nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go deleted file mode 100644 index 33288d3677..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "context" - "fmt" - "net/http" - "regexp" - "strconv" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/stsexchange" -) - -// now aliases time.Now for testing -var now = func() time.Time { - return time.Now().UTC() -} - -// Config stores the configuration for fetching tokens with external credentials. -type Config struct { - // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload - // identity pool or the workforce pool and the provider identifier in that pool. - Audience string - // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec - // e.g. `urn:ietf:params:oauth:token-type:jwt`. - SubjectTokenType string - // TokenURL is the STS token exchange endpoint. - TokenURL string - // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( - // user attributes like account identifier, eg. email, username, uid, etc). This is - // needed for gCloud session account identification. - TokenInfoURL string - // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only - // required for workload identity pools when APIs to be accessed have not integrated with UberMint. - ServiceAccountImpersonationURL string - // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation - // token will be valid for. - ServiceAccountImpersonationLifetimeSeconds int - // ClientSecret is currently only required if token_info endpoint also - // needs to be called with the generated GCP access token. When provided, STS will be - // called with additional basic authentication using client_id as username and client_secret as password. - ClientSecret string - // ClientID is only required in conjunction with ClientSecret, as described above. - ClientID string - // CredentialSource contains the necessary information to retrieve the token itself, as well - // as some environmental information. - CredentialSource CredentialSource - // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries - // will set the x-goog-user-project which overrides the project associated with the credentials. - QuotaProjectID string - // Scopes contains the desired scopes for the returned access token. - Scopes []string - // The optional workforce pool user project number when the credential - // corresponds to a workforce pool and not a workload identity pool. - // The underlying principal must still have serviceusage.services.use IAM - // permission to use the project for billing/quota. - WorkforcePoolUserProject string -} - -var ( - validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) -) - -func validateWorkforceAudience(input string) bool { - return validWorkforceAudiencePattern.MatchString(input) -} - -// TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. -func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, "https") -} - -// tokenSource is a private function that's directly called by some of the tests, -// because the unit test URLs are mocked, and would otherwise fail the -// validity check. -func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { - if c.WorkforcePoolUserProject != "" { - valid := validateWorkforceAudience(c.Audience) - if !valid { - return nil, fmt.Errorf("oauth2/google: workforce_pool_user_project should not be set for non-workforce pool credentials") - } - } - - ts := tokenSource{ - ctx: ctx, - conf: c, - } - if c.ServiceAccountImpersonationURL == "" { - return oauth2.ReuseTokenSource(nil, ts), nil - } - scopes := c.Scopes - ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} - imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), - TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, - } - return oauth2.ReuseTokenSource(nil, imp), nil -} - -// Subject token file types. -const ( - fileTypeText = "text" - fileTypeJSON = "json" -) - -type format struct { - // Type is either "text" or "json". When not provided "text" type is assumed. - Type string `json:"type"` - // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. - SubjectTokenFieldName string `json:"subject_token_field_name"` -} - -// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. -// The EnvironmentID should start with AWS if being used for an AWS credential. -type CredentialSource struct { - File string `json:"file"` - - URL string `json:"url"` - Headers map[string]string `json:"headers"` - - Executable *ExecutableConfig `json:"executable"` - - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format format `json:"format"` -} - -type ExecutableConfig struct { - Command string `json:"command"` - TimeoutMillis *int `json:"timeout_millis"` - OutputFile string `json:"output_file"` -} - -// parse determines the type of CredentialSource needed. -func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { - if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { - if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { - if awsVersion != 1 { - return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) - } - - awsCredSource := awsCredentialSource{ - EnvironmentID: c.CredentialSource.EnvironmentID, - RegionURL: c.CredentialSource.RegionURL, - RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, - CredVerificationURL: c.CredentialSource.URL, - TargetResource: c.Audience, - ctx: ctx, - } - if c.CredentialSource.IMDSv2SessionTokenURL != "" { - awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL - } - - return awsCredSource, nil - } - } else if c.CredentialSource.File != "" { - return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil - } else if c.CredentialSource.URL != "" { - return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil - } else if c.CredentialSource.Executable != nil { - return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) - } - return nil, fmt.Errorf("oauth2/google: unable to parse credential source") -} - -type baseCredentialSource interface { - credentialSourceType() string - subjectToken() (string, error) -} - -// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. -type tokenSource struct { - ctx context.Context - conf *Config -} - -func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { - return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", - goVersion(), - "unknown", - credSource.credentialSourceType(), - conf.ServiceAccountImpersonationURL != "", - conf.ServiceAccountImpersonationLifetimeSeconds != 0) -} - -// Token allows tokenSource to conform to the oauth2.TokenSource interface. -func (ts tokenSource) Token() (*oauth2.Token, error) { - conf := ts.conf - - credSource, err := conf.parse(ts.ctx) - if err != nil { - return nil, err - } - subjectToken, err := credSource.subjectToken() - - if err != nil { - return nil, err - } - stsRequest := stsexchange.TokenExchangeRequest{ - GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", - Audience: conf.Audience, - Scope: conf.Scopes, - RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", - SubjectToken: subjectToken, - SubjectTokenType: conf.SubjectTokenType, - } - header := make(http.Header) - header.Add("Content-Type", "application/x-www-form-urlencoded") - header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) - clientAuth := stsexchange.ClientAuthentication{ - AuthStyle: oauth2.AuthStyleInHeader, - ClientID: conf.ClientID, - ClientSecret: conf.ClientSecret, - } - var options map[string]interface{} - // Do not pass workforce_pool_user_project when client authentication is used. - // The client ID is sufficient for determining the user project. - if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { - options = map[string]interface{}{ - "userProject": conf.WorkforcePoolUserProject, - } - } - stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) - if err != nil { - return nil, err - } - - accessToken := &oauth2.Token{ - AccessToken: stsResp.AccessToken, - TokenType: stsResp.TokenType, - } - if stsResp.ExpiresIn < 0 { - return nil, fmt.Errorf("oauth2/google: got invalid expiry from security token service") - } else if stsResp.ExpiresIn >= 0 { - accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) - } - - if stsResp.RefreshToken != "" { - accessToken.RefreshToken = stsResp.RefreshToken - } - return accessToken, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go deleted file mode 100644 index 233a78cef2..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import "fmt" - -// Error for handling OAuth related error responses as stated in rfc6749#5.2. -type Error struct { - Code string - URI string - Description string -} - -func (err *Error) Error() string { - return fmt.Sprintf("got error code %s from %s: %s", err.Code, err.URI, err.Description) -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go deleted file mode 100644 index 6497dc022e..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strings" - "time" -) - -var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") - -const ( - executableSupportedMaxVersion = 1 - defaultTimeout = 30 * time.Second - timeoutMinimum = 5 * time.Second - timeoutMaximum = 120 * time.Second - executableSource = "response" - outputFileSource = "output file" -) - -type nonCacheableError struct { - message string -} - -func (nce nonCacheableError) Error() string { - return nce.message -} - -func missingFieldError(source, field string) error { - return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) -} - -func jsonParsingError(source, data string) error { - return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) -} - -func malformedFailureError() error { - return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} -} - -func userDefinedError(code, message string) error { - return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} -} - -func unsupportedVersionError(source string, version int) error { - return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) -} - -func tokenExpiredError() error { - return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} -} - -func tokenTypeError(source string) error { - return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) -} - -func exitCodeError(exitCode int) error { - return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) -} - -func executableError(err error) error { - return fmt.Errorf("oauth2/google: executable command failed: %v", err) -} - -func executablesDisallowedError() error { - return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") -} - -func timeoutRangeError() error { - return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") -} - -func commandMissingError() error { - return errors.New("oauth2/google: missing `command` field — executable command must be provided") -} - -type environment interface { - existingEnv() []string - getenv(string) string - run(ctx context.Context, command string, env []string) ([]byte, error) - now() time.Time -} - -type runtimeEnvironment struct{} - -func (r runtimeEnvironment) existingEnv() []string { - return os.Environ() -} - -func (r runtimeEnvironment) getenv(key string) string { - return os.Getenv(key) -} - -func (r runtimeEnvironment) now() time.Time { - return time.Now().UTC() -} - -func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { - splitCommand := strings.Fields(command) - cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) - cmd.Env = env - - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - if ctx.Err() == context.DeadlineExceeded { - return nil, context.DeadlineExceeded - } - - if exitError, ok := err.(*exec.ExitError); ok { - return nil, exitCodeError(exitError.ExitCode()) - } - - return nil, executableError(err) - } - - bytesStdout := bytes.TrimSpace(stdout.Bytes()) - if len(bytesStdout) > 0 { - return bytesStdout, nil - } - return bytes.TrimSpace(stderr.Bytes()), nil -} - -type executableCredentialSource struct { - Command string - Timeout time.Duration - OutputFile string - ctx context.Context - config *Config - env environment -} - -// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. -// It also performs defaulting and type conversions. -func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { - if ec.Command == "" { - return executableCredentialSource{}, commandMissingError() - } - - result := executableCredentialSource{} - result.Command = ec.Command - if ec.TimeoutMillis == nil { - result.Timeout = defaultTimeout - } else { - result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond - if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { - return executableCredentialSource{}, timeoutRangeError() - } - } - result.OutputFile = ec.OutputFile - result.ctx = ctx - result.config = config - result.env = runtimeEnvironment{} - return result, nil -} - -type executableResponse struct { - Version int `json:"version,omitempty"` - Success *bool `json:"success,omitempty"` - TokenType string `json:"token_type,omitempty"` - ExpirationTime int64 `json:"expiration_time,omitempty"` - IdToken string `json:"id_token,omitempty"` - SamlResponse string `json:"saml_response,omitempty"` - Code string `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { - var result executableResponse - if err := json.Unmarshal(response, &result); err != nil { - return "", jsonParsingError(source, string(response)) - } - - if result.Version == 0 { - return "", missingFieldError(source, "version") - } - - if result.Success == nil { - return "", missingFieldError(source, "success") - } - - if !*result.Success { - if result.Code == "" || result.Message == "" { - return "", malformedFailureError() - } - return "", userDefinedError(result.Code, result.Message) - } - - if result.Version > executableSupportedMaxVersion || result.Version < 0 { - return "", unsupportedVersionError(source, result.Version) - } - - if result.ExpirationTime == 0 && cs.OutputFile != "" { - return "", missingFieldError(source, "expiration_time") - } - - if result.TokenType == "" { - return "", missingFieldError(source, "token_type") - } - - if result.ExpirationTime != 0 && result.ExpirationTime < now { - return "", tokenExpiredError() - } - - if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { - if result.IdToken == "" { - return "", missingFieldError(source, "id_token") - } - return result.IdToken, nil - } - - if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { - if result.SamlResponse == "" { - return "", missingFieldError(source, "saml_response") - } - return result.SamlResponse, nil - } - - return "", tokenTypeError(source) -} - -func (cs executableCredentialSource) credentialSourceType() string { - return "executable" -} - -func (cs executableCredentialSource) subjectToken() (string, error) { - if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { - return token, err - } - - return cs.getTokenFromExecutableCommand() -} - -func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { - if cs.OutputFile == "" { - // This ExecutableCredentialSource doesn't use an OutputFile. - return "", nil - } - - file, err := os.Open(cs.OutputFile) - if err != nil { - // No OutputFile found. Hasn't been created yet, so skip it. - return "", nil - } - defer file.Close() - - data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) - if err != nil || len(data) == 0 { - // Cachefile exists, but no data found. Get new credential. - return "", nil - } - - token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) - if err != nil { - if _, ok := err.(nonCacheableError); ok { - // If the cached token is expired we need a new token, - // and if the cache contains a failure, we need to try again. - return "", nil - } - - // There was an error in the cached token, and the developer should be aware of it. - return "", err - } - // Token parsing succeeded. Use found token. - return token, nil -} - -func (cs executableCredentialSource) executableEnvironment() []string { - result := cs.env.existingEnv() - result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) - result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) - result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") - if cs.config.ServiceAccountImpersonationURL != "" { - matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) - if matches != nil { - result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) - } - } - if cs.OutputFile != "" { - result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) - } - return result -} - -func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { - // For security reasons, we need our consumers to set this environment variable to allow executables to be run. - if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { - return "", executablesDisallowedError() - } - - ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) - defer cancel() - - output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) - if err != nil { - return "", err - } - return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go deleted file mode 100644 index f35f73c5cb..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" -) - -type fileCredentialSource struct { - File string - Format format -} - -func (cs fileCredentialSource) credentialSourceType() string { - return "file" -} - -func (cs fileCredentialSource) subjectToken() (string, error) { - tokenFile, err := os.Open(cs.File) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to open credential file %q", cs.File) - } - defer tokenFile.Close() - tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to read credential file: %v", err) - } - tokenBytes = bytes.TrimSpace(tokenBytes) - switch cs.Format.Type { - case "json": - jsonData := make(map[string]interface{}) - err = json.Unmarshal(tokenBytes, &jsonData) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) - } - val, ok := jsonData[cs.Format.SubjectTokenFieldName] - if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") - } - token, ok := val.(string) - if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") - } - return token, nil - case "text": - return string(tokenBytes), nil - case "": - return string(tokenBytes), nil - default: - return "", errors.New("oauth2/google: invalid credential_source file format type") - } - -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go deleted file mode 100644 index 1d5aad2e2d..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "runtime" - "strings" - "unicode" -) - -var ( - // version is a package internal global variable for testing purposes. - version = runtime.Version -) - -// versionUnknown is only used when the runtime version cannot be determined. -const versionUnknown = "UNKNOWN" - -// goVersion returns a Go runtime version derived from the runtime environment -// that is modified to be suitable for reporting in a header, meaning it has no -// whitespace. If it is unable to determine the Go runtime version, it returns -// versionUnknown. -func goVersion() string { - const develPrefix = "devel +" - - s := version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - // Some release candidates already have a dash in them. - if !strings.HasPrefix(prerelease, "-") { - prerelease = "-" + prerelease - } - s += prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go deleted file mode 100644 index 54c8f209f3..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "golang.org/x/oauth2" -) - -// generateAccesstokenReq is used for service account impersonation -type generateAccessTokenReq struct { - Delegates []string `json:"delegates,omitempty"` - Lifetime string `json:"lifetime,omitempty"` - Scope []string `json:"scope,omitempty"` -} - -type impersonateTokenResponse struct { - AccessToken string `json:"accessToken"` - ExpireTime string `json:"expireTime"` -} - -// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. -// Scopes can be defined when the access token is requested. -type ImpersonateTokenSource struct { - // Ctx is the execution context of the impersonation process - // used to perform http call to the URL. Required - Ctx context.Context - // Ts is the source credential used to generate a token on the - // impersonated service account. Required. - Ts oauth2.TokenSource - - // URL is the endpoint to call to generate a token - // on behalf the service account. Required. - URL string - // Scopes that the impersonated credential should have. Required. - Scopes []string - // Delegates are the service account email addresses in a delegation chain. - // Each service account must be granted roles/iam.serviceAccountTokenCreator - // on the next service account in the chain. Optional. - Delegates []string - // TokenLifetimeSeconds is the number of seconds the impersonation token will - // be valid for. - TokenLifetimeSeconds int -} - -// Token performs the exchange to get a temporary service account token to allow access to GCP. -func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { - lifetimeString := "3600s" - if its.TokenLifetimeSeconds != 0 { - lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) - } - reqBody := generateAccessTokenReq{ - Lifetime: lifetimeString, - Scope: its.Scopes, - Delegates: its.Delegates, - } - b, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) - } - client := oauth2.NewClient(its.Ctx, its.Ts) - req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) - } - req = req.WithContext(its.Ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) - } - - var accessTokenResp impersonateTokenResponse - if err := json.Unmarshal(body, &accessTokenResp); err != nil { - return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) - } - expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) - if err != nil { - return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) - } - return &oauth2.Token{ - AccessToken: accessTokenResp.AccessToken, - Expiry: expiry, - TokenType: "Bearer", - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go deleted file mode 100644 index 606bb4e800..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "golang.org/x/oauth2" -) - -type urlCredentialSource struct { - URL string - Headers map[string]string - Format format - ctx context.Context -} - -func (cs urlCredentialSource) credentialSourceType() string { - return "url" -} - -func (cs urlCredentialSource) subjectToken() (string, error) { - client := oauth2.NewClient(cs.ctx, nil) - req, err := http.NewRequest("GET", cs.URL, nil) - if err != nil { - return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err) - } - req = req.WithContext(cs.ctx) - - for key, val := range cs.Headers { - req.Header.Add(key, val) - } - resp, err := client.Do(req) - if err != nil { - return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err) - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody) - } - - switch cs.Format.Type { - case "json": - jsonData := make(map[string]interface{}) - err = json.Unmarshal(respBody, &jsonData) - if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) - } - val, ok := jsonData[cs.Format.SubjectTokenFieldName] - if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") - } - token, ok := val.(string) - if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") - } - return token, nil - case "text": - return string(respBody), nil - case "": - return string(respBody), nil - default: - return "", errors.New("oauth2/google: invalid credential_source file format type") - } - -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go deleted file mode 100644 index cb58207074..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccountauthorizeduser - -import ( - "context" - "errors" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/stsexchange" -) - -// now aliases time.Now for testing. -var now = func() time.Time { - return time.Now().UTC() -} - -var tokenValid = func(token oauth2.Token) bool { - return token.Valid() -} - -type Config struct { - // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and - // the provider identifier in that pool. - Audience string - // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. - RefreshToken string - // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as - // None if the token can not be refreshed. - TokenURL string - // TokenInfoURL is the optional STS endpoint URL for token introspection. - TokenInfoURL string - // ClientID is only required in conjunction with ClientSecret, as described above. - ClientID string - // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP - // access token. When provided, STS will be called with additional basic authentication using client_id as username - // and client_secret as password. - ClientSecret string - // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. - Token string - // Expiry is the optional expiration datetime of the OAuth 2.0 access token. - Expiry time.Time - // RevokeURL is the optional STS endpoint URL for revoking tokens. - RevokeURL string - // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the - // project used to create the credentials. - QuotaProjectID string - Scopes []string -} - -func (c *Config) canRefresh() bool { - return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" -} - -func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - var token oauth2.Token - if c.Token != "" && !c.Expiry.IsZero() { - token = oauth2.Token{ - AccessToken: c.Token, - Expiry: c.Expiry, - TokenType: "Bearer", - } - } - if !tokenValid(token) && !c.canRefresh() { - return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") - } - - ts := tokenSource{ - ctx: ctx, - conf: c, - } - - return oauth2.ReuseTokenSource(&token, ts), nil -} - -type tokenSource struct { - ctx context.Context - conf *Config -} - -func (ts tokenSource) Token() (*oauth2.Token, error) { - conf := ts.conf - if !conf.canRefresh() { - return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") - } - - clientAuth := stsexchange.ClientAuthentication{ - AuthStyle: oauth2.AuthStyleInHeader, - ClientID: conf.ClientID, - ClientSecret: conf.ClientSecret, - } - - stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) - if err != nil { - return nil, err - } - if stsResponse.ExpiresIn < 0 { - return nil, errors.New("oauth2/google: got invalid expiry from security token service") - } - - if stsResponse.RefreshToken != "" { - conf.RefreshToken = stsResponse.RefreshToken - } - - token := &oauth2.Token{ - AccessToken: stsResponse.AccessToken, - Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), - TokenType: "Bearer", - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go deleted file mode 100644 index ebd520eace..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stsexchange - -import ( - "encoding/base64" - "net/http" - "net/url" - - "golang.org/x/oauth2" -) - -// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type ClientAuthentication struct { - // AuthStyle can be either basic or request-body - AuthStyle oauth2.AuthStyle - ClientID string - ClientSecret string -} - -// InjectAuthentication is used to add authentication to a Secure Token Service exchange -// request. It modifies either the passed url.Values or http.Header depending on the desired -// authentication format. -func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { - if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { - return - } - - switch c.AuthStyle { - case oauth2.AuthStyleInHeader: // AuthStyleInHeader corresponds to basic authentication as defined in rfc7617#2 - plainHeader := c.ClientID + ":" + c.ClientSecret - headers.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) - case oauth2.AuthStyleInParams: // AuthStyleInParams corresponds to request-body authentication with ClientID and ClientSecret in the message body. - values.Set("client_id", c.ClientID) - values.Set("client_secret", c.ClientSecret) - case oauth2.AuthStyleAutoDetect: - values.Set("client_id", c.ClientID) - values.Set("client_secret", c.ClientSecret) - default: - values.Set("client_id", c.ClientID) - values.Set("client_secret", c.ClientSecret) - } -} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go deleted file mode 100644 index 1a0bebd159..0000000000 --- a/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package stsexchange - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - - "golang.org/x/oauth2" -) - -func defaultHeader() http.Header { - header := make(http.Header) - header.Add("Content-Type", "application/x-www-form-urlencoded") - return header -} - -// ExchangeToken performs an oauth2 token exchange with the provided endpoint. -// The first 4 fields are all mandatory. headers can be used to pass additional -// headers beyond the bare minimum required by the token exchange. options can -// be used to pass additional JSON-structured options to the remote server. -func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { - data := url.Values{} - data.Set("audience", request.Audience) - data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") - data.Set("requested_token_type", "urn:ietf:params:oauth:token-type:access_token") - data.Set("subject_token_type", request.SubjectTokenType) - data.Set("subject_token", request.SubjectToken) - data.Set("scope", strings.Join(request.Scope, " ")) - if options != nil { - opts, err := json.Marshal(options) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) - } - data.Set("options", string(opts)) - } - - return makeRequest(ctx, endpoint, data, authentication, headers) -} - -func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { - data := url.Values{} - data.Set("grant_type", "refresh_token") - data.Set("refresh_token", refreshToken) - - return makeRequest(ctx, endpoint, data, authentication, headers) -} - -func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { - if headers == nil { - headers = defaultHeader() - } - client := oauth2.NewClient(ctx, nil) - authentication.InjectAuthentication(data, headers) - encodedData := data.Encode() - - req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } - req = req.WithContext(ctx) - for key, list := range headers { - for _, val := range list { - req.Header.Add(key, val) - } - } - req.Header.Add("Content-Length", strconv.Itoa(len(encodedData))) - - resp, err := client.Do(req) - - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid response from Secure Token Server: %v", err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, err - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) - } - var stsResp Response - err = json.Unmarshal(body, &stsResp) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) - - } - - return &stsResp, nil -} - -// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type TokenExchangeRequest struct { - ActingParty struct { - ActorToken string - ActorTokenType string - } - GrantType string - Resource string - Audience string - Scope []string - RequestedTokenType string - SubjectToken string - SubjectTokenType string -} - -// Response is used to decode the remote server response during an oauth2 token exchange. -type Response struct { - AccessToken string `json:"access_token"` - IssuedTokenType string `json:"issued_token_type"` - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in"` - Scope string `json:"scope"` - RefreshToken string `json:"refresh_token"` -} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index e89e6ae17b..0000000000 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - return newJWTSource(jsonKey, audience, nil) -} - -// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The scope is typically a list of URLs that specifies the scope of the -// credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { - return newJWTSource(jsonKey, "", scope) -} - -func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { - if len(scopes) == 0 && audience == "" { - return nil, fmt.Errorf("google: missing scope/audience for JWT access token") - } - - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - scopes: scopes, - pk: pk, - pkID: cfg.PrivateKeyID, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) - return rts, nil -} - -type jwtAccessTokenSource struct { - email, audience string - scopes []string - pk *rsa.PrivateKey - pkID string -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - scope := strings.Join(ts.scopes, " ") - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Scope: scope, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - KeyID: string(ts.pkID), - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index 456224bc78..0000000000 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/oauth2" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := parseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -func parseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": {}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - if v := os.Getenv("HOME"); v != "" { - return v - } - // Else, fall back to user.Current: - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "" -} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 95015648b4..0000000000 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides a partial implementation -// of JSON Web Signature encoding and decoding. -// It exists to support the golang.org/x/oauth2 package. -// -// See RFC 7515. -// -// Deprecated: this package is not intended for public use and might be -// removed in the future. It exists for internal use only. -// Please switch to another JWS package or copy this package into your own -// source tree. -package jws // import "golang.org/x/oauth2/jws" - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64.RawURLEncoding.EncodeToString(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` - - // The optional hint of which key is being used. - KeyID string `json:"kid,omitempty"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64.RawURLEncoding.DecodeString(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// Verify tests whether the provided JWT token's signature was produced by the private key -// associated with the supplied public key. -func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { - return err - } - - h := sha256.New() - h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) -} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index b2bf18298b..0000000000 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // PrivateKeyID contains an optional hint indicating which key is being - // used. - PrivateKeyID string - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration - - // Audience optionally specifies the intended audience of the - // request. If empty, the value of TokenURL is used as the - // intended audience. - Audience string - - // PrivateClaims optionally specifies custom private claims in the JWT. - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - PrivateClaims map[string]interface{} - - // UseIDToken optionally specifies whether ID token should be used instead - // of access token when the server returns both. - UseIDToken bool -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - PrivateClaims: js.conf.PrivateClaims, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - if aud := js.conf.Audience; aud != "" { - claimSet.Aud = aud - } - h := *defaultHeader - h.KeyID = js.conf.PrivateKeyID - payload, err := jws.Encode(&h, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, &oauth2.RetrieveError{ - Response: resp, - Body: body, - } - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - if js.conf.UseIDToken { - if tokenRes.IDToken == "" { - return nil, fmt.Errorf("oauth2: response doesn't have JWT token") - } - token.AccessToken = tokenRes.IDToken - } - return token, nil -} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..948a3ee63d --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 0000000000..f93c740b63 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 0000000000..88ce33434e --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdfec..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e39..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b419..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4bd3..b0e4198575 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020ec..fdcaa974d2 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc699379..2f0fa76e4f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4dba..2b57e0f73b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e62..5682e2628a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8df9..36bf8399f4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821cf3..42ff8c3c1b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e41127..dca436004f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c63985560f..5cca668ac3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e25c..d8cae6d153 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09e2..28e39afdcb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642a5..cd66e92cb4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d758..c1595eba78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0d1..ee9456b0da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84f0..8cfca81e1b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d68..60b0deb3af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc72f..f90aa7281b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e2b..ba9e015033 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813ed5..07cdfd6e9f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4ec8..2f1dd214a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994da7..f40519d901 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d27128..87d8612a1d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbdde..0cc3ce496e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc2504a..856d92d69e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf24676..8d467094cf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e23..edc173244d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77e1..445eba2061 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362ef4..adba01bca7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4fd3..014c4e9c7a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca818..ccc97d74d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d391..ec2b64a95d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e37471..21a839e338 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c87..c11121ec3b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e5911..909b631fcb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195a0..e49bed16ea 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943bc4..66017d2d32 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc515..47bab18dce 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399ff5..eff6bcdef8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -832,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1546,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1561,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1608,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1645,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1666,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1689,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1722,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1736,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1748,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1777,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1792,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1850,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1881,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -3399,7 +3463,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4247,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5199,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5612,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad192506..d4577a4238 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708ccf..6395a031d4 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 0000000000..df7aa02db6 --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + sIn, ok := tIn.(transform.SpanningTransformer) + if !ok { + sIn = dummySpan{tIn} + } + sNotIn, ok := tNotIn.(transform.SpanningTransformer) + if !ok { + sNotIn = dummySpan{tNotIn} + } + + a := &cond{ + tIn: sIn, + tNotIn: sNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type dummySpan struct{ transform.Transformer } + +func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +type cond struct { + tIn, tNotIn transform.SpanningTransformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.SpanningTransformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +// This implementation of Span doesn't help all too much, but it needs to be +// there to satisfy this package's Transformer interface. +// TODO: there are certainly room for improvements, though. For example, if +// t.t == transform.Nop (which will a common occurrence) it will save a bundle +// to special-case that loop. +func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { + p := 0 + for n < len(src) && err == nil { + // Don't process too much at a time as the Spanner that will be + // called on this block may terminate early. + const maxChunk = 4096 + max := len(src) + if v := n + maxChunk; v < max { + max = v + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) + n += n2 + if err2 != nil { + return n, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = n + size + } + return n, err +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 0000000000..930e87fedb --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,355 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes // import "golang.org/x/text/runes" + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// NotIn creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { + return t.t.Span(b, atEOF) +} + +func (t Transformer) Reset() { t.t.Reset() } + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Span implements transform.Spanner. +func (t remove) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) { + err = transform.ErrEndOfSpan + break + } + n += size + } + return +} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = r + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 44ada22a03..5da33c7e6e 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -24,6 +24,10 @@ type Analyzer struct { // (no capital or period, max ~60 letters). Doc string + // URL holds an optional link to a web page with additional + // documentation for this analyzer. + URL string + // Flags defines any flags accepted by the analyzer. // The manner in which these flags are exposed to the user // depends on the driver which runs the analyzer. @@ -135,32 +139,24 @@ type Pass struct { // See comments for ExportObjectFact. ExportPackageFact func(fact Fact) - // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes - // in unspecified order. - // WARNING: This is an experimental API and may change in the future. + // AllPackageFacts returns a new slice containing all package + // facts of the analysis's FactTypes in unspecified order. AllPackageFacts func() []PackageFact - // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes - // in unspecified order. - // WARNING: This is an experimental API and may change in the future. + // AllObjectFacts returns a new slice containing all object + // facts of the analysis's FactTypes in unspecified order. AllObjectFacts func() []ObjectFact - // typeErrors contains types.Errors that are associated with the pkg. - typeErrors []types.Error - /* Further fields may be added in future. */ - // For example, suggested or applied refactorings. } // PackageFact is a package together with an associated fact. -// WARNING: This is an experimental API and may change in the future. type PackageFact struct { Package *types.Package Fact Fact } // ObjectFact is an object together with an associated fact. -// WARNING: This is an experimental API and may change in the future. type ObjectFact struct { Object types.Object Fact Fact diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index 5cdcf46d2a..c638f27581 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -20,15 +20,25 @@ type Diagnostic struct { Category string // optional Message string - // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform - // edits to a file that address the diagnostic. - // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? - // Diagnostics should not contain SuggestedFixes that overlap. - // Experimental: This API is experimental and may change in the future. - SuggestedFixes []SuggestedFix // optional + // URL is the optional location of a web page that provides + // additional documentation for this diagnostic. + // + // If URL is empty but a Category is specified, then the + // Analysis driver should treat the URL as "#"+Category. + // + // The URL may be relative. If so, the base URL is that of the + // Analyzer that produced the diagnostic; + // see https://pkg.go.dev/net/url#URL.ResolveReference. + URL string - // Experimental: This API is experimental and may change in the future. - Related []RelatedInformation // optional + // SuggestedFixes is an optional list of fixes to address the + // problem described by the diagnostic, each one representing + // an alternative strategy; at most one may be applied. + SuggestedFixes []SuggestedFix + + // Related contains optional secondary positions and messages + // related to the primary diagnostic. + Related []RelatedInformation } // RelatedInformation contains information related to a diagnostic. @@ -41,12 +51,11 @@ type RelatedInformation struct { Message string } -// A SuggestedFix is a code change associated with a Diagnostic that a user can choose -// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged -// by the diagnostic. -// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix -// should not contain edits for other packages. -// Experimental: This API is experimental and may change in the future. +// A SuggestedFix is a code change associated with a Diagnostic that a +// user can choose to apply to their code. Usually the SuggestedFix is +// meant to fix the issue flagged by the diagnostic. +// +// The TextEdits must not overlap, nor contain edits for other packages. type SuggestedFix struct { // A description for this suggested fix to be shown to a user deciding // whether to accept it. @@ -56,7 +65,6 @@ type SuggestedFix struct { // A TextEdit represents the replacement of the code between Pos and End with the new text. // Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. -// Experimental: This API is experimental and may change in the future. type TextEdit struct { // For a pure insertion, End can either be set to Pos or token.NoPos. Pos token.Pos diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index c5429c9e23..44867d599e 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -191,7 +191,7 @@ and buildtag, inspect the raw text of Go source files or even non-Go files such as assembly. To report a diagnostic against a line of a raw text file, use the following sequence: - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { ... } tf := fset.AddFile(filename, -1, len(content)) tf.SetLinesForContent(content) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go new file mode 100644 index 0000000000..6976f0d909 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -0,0 +1,47 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package appends defines an Analyzer that detects +// if there is only one variable in append. +package appends + +import ( + _ "embed" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "appends", + Doc: analysisutil.MustExtractDoc(doc, "appends"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + b, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Builtin) + if ok && b.Name() == "append" && len(call.Args) == 1 { + pass.ReportRangef(call, "append with no values") + } + }) + + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go new file mode 100644 index 0000000000..2e6a2e010b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package appends defines an Analyzer that detects +// if there is only one variable in append. +// +// # Analyzer appends +// +// appends: check for missing values after append +// +// This checker reports calls to append that pass +// no values to be appended to the slice. +// +// s := []string{"a", "b", "c"} +// _ = append(s) +// +// Such calls are always no-ops and often indicate an +// underlying mistake. +package appends diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go deleted file mode 100644 index d8211afdc8..0000000000 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 -// +build !go1.19 - -package asmdecl - -func additionalArches() []*asmArch { - return nil -} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go deleted file mode 100644 index 3018383e7f..0000000000 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package asmdecl - -var asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true} - -func additionalArches() []*asmArch { - return []*asmArch{&asmArchLoong64} -} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index 7288559fc0..f2ca95aa9e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -27,6 +27,7 @@ const Doc = "report mismatches between assembly files and Go declarations" var Analyzer = &analysis.Analyzer{ Name: "asmdecl", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/asmdecl", Run: run, } @@ -95,6 +96,7 @@ var ( asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true, retRegs: []string{"X10", "F10"}} asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true} asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false} + asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true} arches = []*asmArch{ &asmArch386, @@ -110,11 +112,11 @@ var ( &asmArchRISCV64, &asmArchS390X, &asmArchWasm, + &asmArchLoong64, } ) func init() { - arches = append(arches, additionalArches()...) for _, arch := range arches { arch.sizes = types.SizesFor("gc", arch.name) if arch.sizes == nil { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 89146b7334..3bfd501226 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -2,13 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package assign defines an Analyzer that detects useless assignments. package assign // TODO(adonovan): check also for assignments to struct fields inside // methods that are on T instead of *T. import ( + _ "embed" "fmt" "go/ast" "go/token" @@ -18,18 +18,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check for useless assignments - -This checker reports assignments of the form x = x or a[i] = a[i]. -These are almost always useless, and even when they aren't they are -usually a mistake.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "assign"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -79,7 +78,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // isMapIndex returns true if e is a map index expression. func isMapIndex(info *types.Info, e ast.Expr) bool { - if idx, ok := analysisutil.Unparen(e).(*ast.IndexExpr); ok { + if idx, ok := astutil.Unparen(e).(*ast.IndexExpr); ok { if typ := info.Types[idx.X].Type; typ != nil { _, ok := typ.Underlying().(*types.Map) return ok diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/doc.go new file mode 100644 index 0000000000..a4b1b64c51 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/doc.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package assign defines an Analyzer that detects useless assignments. +// +// # Analyzer assign +// +// assign: check for useless assignments +// +// This checker reports assignments of the form x = x or a[i] = a[i]. +// These are almost always useless, and even when they aren't they are +// usually a mistake. +package assign diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index 9261db7e4e..931f9ca754 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -2,38 +2,37 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package atomic defines an Analyzer that checks for common mistakes -// using the sync/atomic package. package atomic import ( + _ "embed" "go/ast" "go/token" - "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" ) -const Doc = `check for common mistakes using the sync/atomic package - -The atomic checker looks for assignment statements of the form: - - x = atomic.AddUint64(&x, 1) - -which are not atomic.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "atomic"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, } func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "sync/atomic") { + return nil, nil // doesn't directly import sync/atomic + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -53,18 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) { if !ok { continue } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - continue - } - pkgIdent, _ := sel.X.(*ast.Ident) - pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) - if !ok || pkgName.Imported().Path() != "sync/atomic" { - continue - } - - switch sel.Sel.Name { - case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr": + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if analysisutil.IsFunctionNamed(fn, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { checkAtomicAddAssignment(pass, n.Lhs[i], call) } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/doc.go new file mode 100644 index 0000000000..5aafe25d32 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomic defines an Analyzer that checks for common mistakes +// using the sync/atomic package. +// +// # Analyzer atomic +// +// atomic: check for common mistakes using the sync/atomic package +// +// The atomic checker looks for assignment statements of the form: +// +// x = atomic.AddUint64(&x, 1) +// +// which are not atomic. +package atomic diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go index e2e1a4f67c..aff6d25b3e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go @@ -18,6 +18,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" ) const Doc = "check for non-64-bits-aligned arguments to sync/atomic functions" @@ -25,6 +26,7 @@ const Doc = "check for non-64-bits-aligned arguments to sync/atomic functions" var Analyzer = &analysis.Analyzer{ Name: "atomicalign", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomicalign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -41,31 +43,20 @@ func run(pass *analysis.Pass) (interface{}, error) { nodeFilter := []ast.Node{ (*ast.CallExpr)(nil), } + funcNames := []string{ + "AddInt64", "AddUint64", + "LoadInt64", "LoadUint64", + "StoreInt64", "StoreUint64", + "SwapInt64", "SwapUint64", + "CompareAndSwapInt64", "CompareAndSwapUint64", + } inspect.Preorder(nodeFilter, func(node ast.Node) { call := node.(*ast.CallExpr) - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return - } - pkgIdent, ok := sel.X.(*ast.Ident) - if !ok { - return - } - pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) - if !ok || pkgName.Imported().Path() != "sync/atomic" { - return - } - - switch sel.Sel.Name { - case "AddInt64", "AddUint64", - "LoadInt64", "LoadUint64", - "StoreInt64", "StoreUint64", - "SwapInt64", "SwapUint64", - "CompareAndSwapInt64", "CompareAndSwapUint64": - + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if analysisutil.IsFunctionNamed(fn, "sync/atomic", funcNames...) { // For all the listed functions, the expression to check is always the first function argument. - check64BitAlignment(pass, sel.Sel.Name, call.Args[0]) + check64BitAlignment(pass, fn.Name(), call.Args[0]) } }) @@ -74,8 +65,8 @@ func run(pass *analysis.Pass) (interface{}, error) { func check64BitAlignment(pass *analysis.Pass, funcName string, arg ast.Expr) { // Checks the argument is made of the address operator (&) applied to - // to a struct field (as opposed to a variable as the first word of - // uint64 and int64 variables can be relied upon to be 64-bit aligned. + // a struct field (as opposed to a variable as the first word of + // uint64 and int64 variables can be relied upon to be 64-bit aligned). unary, ok := arg.(*ast.UnaryExpr) if !ok || unary.Op != token.AND { return diff --git a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index 0d8b0bf4f1..564329774e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -14,6 +14,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) @@ -22,6 +23,7 @@ const Doc = "check for common mistakes involving boolean operators" var Analyzer = &analysis.Analyzer{ Name: "bools", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/bools", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -82,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* i := 0 var sets [][]ast.Expr for j := 0; j <= len(exprs); j++ { - if j == len(exprs) || hasSideEffects(info, exprs[j]) { + if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) { if i < j { sets = append(sets, exprs[i:j]) } @@ -161,46 +163,13 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { } } -// hasSideEffects reports whether evaluation of e has side effects. -func hasSideEffects(info *types.Info, e ast.Expr) bool { - safe := true - ast.Inspect(e, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - typVal := info.Types[n.Fun] - switch { - case typVal.IsType(): - // Type conversion, which is safe. - case typVal.IsBuiltin(): - // Builtin func, conservatively assumed to not - // be safe for now. - safe = false - return false - default: - // A non-builtin func or method call. - // Conservatively assume that all of them have - // side effects for now. - safe = false - return false - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - safe = false - return false - } - } - return true - }) - return !safe -} - // split returns a slice of all subexpressions in e that are connected by op. // For example, given 'a || (b || c) || d' with the or op, // split returns []{d, c, b, a}. // seen[e] is already true; any newly processed exprs are added to seen. func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.Expr) { for { - e = unparen(e) + e = astutil.Unparen(e) if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok { seen[b] = true exprs = append(exprs, op.split(b.Y, seen)...) @@ -212,14 +181,3 @@ func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.E } return } - -// unparen returns e with any enclosing parentheses stripped. -func unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go index 02b7b18b3f..f077ea2824 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go @@ -6,8 +6,6 @@ // representation of an error-free package and returns the set of all // functions within it. It does not report any diagnostics itself but // may be used as an input to other analyzers. -// -// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. package buildssa import ( @@ -22,20 +20,19 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "buildssa", Doc: "build SSA-form IR for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/buildssa", Run: run, ResultType: reflect.TypeOf(new(SSA)), } // SSA provides SSA-form intermediate representation for all the -// non-blank source functions in the current package. +// source functions in the current package. type SSA struct { Pkg *ssa.Package SrcFuncs []*ssa.Function } func run(pass *analysis.Pass) (interface{}, error) { - // Plundered from ssautil.BuildPackage. - // We must create a new Program for each Package because the // analysis API provides no place to hang a Program shared by // all Packages. Consequently, SSA Packages and Functions do not @@ -52,20 +49,10 @@ func run(pass *analysis.Pass) (interface{}, error) { prog := ssa.NewProgram(pass.Fset, mode) - // Create SSA packages for all imports. - // Order is not significant. - created := make(map[*types.Package]bool) - var createAll func(pkgs []*types.Package) - createAll = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !created[p] { - created[p] = true - prog.CreatePackage(p, nil, nil, true) - createAll(p.Imports()) - } - } + // Create SSA packages for direct imports. + for _, p := range pass.Pkg.Imports() { + prog.CreatePackage(p, nil, nil, true) } - createAll(pass.Pkg.Imports()) // Create and build the primary package. ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) @@ -77,16 +64,6 @@ func run(pass *analysis.Pass) (interface{}, error) { for _, f := range pass.Files { for _, decl := range f.Decls { if fdecl, ok := decl.(*ast.FuncDecl); ok { - - // SSA will not build a Function - // for a FuncDecl named blank. - // That's arguably too strict but - // relaxing it would break uniqueness of - // names of package members. - if fdecl.Name.Name == "_" { - continue - } - // (init functions have distinct Func // objects named "init" and distinct // ssa.Functions named "init#1", ...) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 775e507a34..55bdad78b7 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -25,6 +25,7 @@ const Doc = "check //go:build and // +build directives" var Analyzer = &analysis.Analyzer{ Name: "buildtag", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/buildtag", Run: runBuildTag, } @@ -39,7 +40,7 @@ func runBuildTag(pass *analysis.Pass) (interface{}, error) { } for _, name := range pass.IgnoredFiles { if strings.HasSuffix(name, ".go") { - f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments|parser.SkipObjectResolution) if err != nil { // Not valid Go source code - not our job to diagnose, so ignore. return nil, nil diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index b61ee5c3dc..4e86439757 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -19,6 +19,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" ) const debug = false @@ -35,6 +36,7 @@ or slice to C, either directly, or via a pointer, array, or struct.` var Analyzer = &analysis.Analyzer{ Name: "cgocall", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/cgocall", RunDespiteErrors: true, Run: run, } @@ -63,7 +65,7 @@ func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(t // Is this a C.f() call? var name string - if sel, ok := analysisutil.Unparen(call.Fun).(*ast.SelectorExpr); ok { + if sel, ok := astutil.Unparen(call.Fun).(*ast.SelectorExpr); ok { if id, ok := sel.X.(*ast.Ident); ok && id.Name == "C" { name = sel.Sel.Name } @@ -179,7 +181,7 @@ func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*a // If f is a cgo-generated file, Position reports // the original file, honoring //line directives. filename := fset.Position(raw.Pos()).Filename - f, err := parser.ParseFile(fset, filename, nil, parser.Mode(0)) + f, err := parser.ParseFile(fset, filename, nil, parser.SkipObjectResolution) if err != nil { return nil, nil, fmt.Errorf("can't parse raw cgo file: %v", err) } @@ -270,6 +272,7 @@ func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*a Sizes: sizes, Error: func(error) {}, // ignore errors (e.g. unused import) } + setGoVersion(tc, pkg) // It's tempting to record the new types in the // existing pass.TypesInfo, but we don't own it. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go120.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go120.go new file mode 100644 index 0000000000..06b54946d7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package cgocall + +import "go/types" + +func setGoVersion(tc *types.Config, pkg *types.Package) { + // no types.Package.GoVersion until Go 1.21 +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go121.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go121.go new file mode 100644 index 0000000000..2a3e1fad22 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go121.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cgocall + +import "go/types" + +func setGoVersion(tc *types.Config, pkg *types.Package) { + tc.GoVersion = pkg.GoVersion() +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go index 64e184d343..6b126f897d 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go @@ -15,6 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -37,6 +38,7 @@ should be replaced by: var Analyzer = &analysis.Analyzer{ Name: "composites", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/composite", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, @@ -70,8 +72,8 @@ func run(pass *analysis.Pass) (interface{}, error) { return } var structuralTypes []types.Type - switch typ := typ.(type) { - case *typeparams.TypeParam: + switch typ := aliases.Unalias(typ).(type) { + case *types.TypeParam: terms, err := typeparams.StructuralTerms(typ) if err != nil { return // invalid type @@ -83,7 +85,8 @@ func run(pass *analysis.Pass) (interface{}, error) { structuralTypes = append(structuralTypes, typ) } for _, typ := range structuralTypes { - under := deref(typ.Underlying()) + // TODO(adonovan): this operation is questionable. + under := aliases.Unalias(deref(typ.Underlying())) strct, ok := under.(*types.Struct) if !ok { // skip non-struct composite literals @@ -141,9 +144,11 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } +// Note: this is not the usual deref operator! +// It strips off all Pointer constructors (and their Aliases). func deref(typ types.Type) types.Type { for { - ptr, ok := typ.(*types.Pointer) + ptr, ok := aliases.Unalias(typ).(*types.Pointer) if !ok { break } @@ -152,18 +157,18 @@ func deref(typ types.Type) types.Type { return typ } +// isLocalType reports whether typ belongs to the same package as pass. +// TODO(adonovan): local means "internal to a function"; rename to isSamePackageType. func isLocalType(pass *analysis.Pass, typ types.Type) bool { - switch x := typ.(type) { + switch x := aliases.Unalias(typ).(type) { case *types.Struct: // struct literals are local types return true case *types.Pointer: return isLocalType(pass, x.Elem()) - case *types.Named: + case interface{ Obj() *types.TypeName }: // *Named or *TypeParam (aliases were removed already) // names in package foo are local to foo_test too return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test") - case *typeparams.TypeParam: - return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test") } return false } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index 8cc93e94dc..8f39159c0f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -16,7 +16,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -29,6 +31,7 @@ values should be referred to through a pointer.` var Analyzer = &analysis.Analyzer{ Name: "copylocks", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/copylocks", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, @@ -222,6 +225,8 @@ func (path typePath) String() string { } func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { + x = astutil.Unparen(x) // ignore parens on rhs + if _, ok := x.(*ast.CompositeLit); ok { return nil } @@ -230,7 +235,7 @@ func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { return nil } if star, ok := x.(*ast.StarExpr); ok { - if _, ok := star.X.(*ast.CallExpr); ok { + if _, ok := astutil.Unparen(star.X).(*ast.CallExpr); ok { // A call may return a pointer to a zero value. return nil } @@ -241,29 +246,23 @@ func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { // lockPath returns a typePath describing the location of a lock value // contained in typ. If there is no contained lock, it returns nil. // -// The seenTParams map is used to short-circuit infinite recursion via type -// parameters. -func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.TypeParam]bool) typePath { - if typ == nil { +// The seen map is used to short-circuit infinite recursion due to type cycles. +func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typePath { + if typ == nil || seen[typ] { return nil } + if seen == nil { + seen = make(map[types.Type]bool) + } + seen[typ] = true - if tpar, ok := typ.(*typeparams.TypeParam); ok { - if seenTParams == nil { - // Lazily allocate seenTParams, since the common case will not involve - // any type parameters. - seenTParams = make(map[*typeparams.TypeParam]bool) - } - if seenTParams[tpar] { - return nil - } - seenTParams[tpar] = true + if tpar, ok := aliases.Unalias(typ).(*types.TypeParam); ok { terms, err := typeparams.StructuralTerms(tpar) if err != nil { return nil // invalid type } for _, term := range terms { - subpath := lockPath(tpkg, term.Type(), seenTParams) + subpath := lockPath(tpkg, term.Type(), seen) if len(subpath) > 0 { if term.Tilde() { // Prepend a tilde to our lock path entry to clarify the resulting @@ -297,7 +296,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.T ttyp, ok := typ.Underlying().(*types.Tuple) if ok { for i := 0; i < ttyp.Len(); i++ { - subpath := lockPath(tpkg, ttyp.At(i).Type(), seenTParams) + subpath := lockPath(tpkg, ttyp.At(i).Type(), seen) if subpath != nil { return append(subpath, typ.String()) } @@ -322,16 +321,14 @@ func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.T // In go1.10, sync.noCopy did not implement Locker. // (The Unlock method was added only in CL 121876.) // TODO(adonovan): remove workaround when we drop go1.10. - if named, ok := typ.(*types.Named); ok && - named.Obj().Name() == "noCopy" && - named.Obj().Pkg().Path() == "sync" { + if analysisutil.IsNamedType(typ, "sync", "noCopy") { return []string{typ.String()} } nfields := styp.NumFields() for i := 0; i < nfields; i++ { ftyp := styp.Field(i).Type() - subpath := lockPath(tpkg, ftyp, seenTParams) + subpath := lockPath(tpkg, ftyp, seen) if subpath != nil { return append(subpath, typ.String()) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go index 73746d6f04..d21adeee90 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -24,6 +24,7 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "ctrlflow", Doc: "build a control-flow graph", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ctrlflow", Run: run, ResultType: reflect.TypeOf(new(CFGs)), FactTypes: []analysis.Fact{new(noReturn)}, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go index 9ea137386b..95cd9a061e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go @@ -12,8 +12,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" ) const Doc = `check for calls of reflect.DeepEqual on error values @@ -28,11 +30,16 @@ errors is discouraged.` var Analyzer = &analysis.Analyzer{ Name: "deepequalerrors", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/deepequalerrors", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "reflect") { + return nil, nil // doesn't directly import reflect + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -40,11 +47,8 @@ func run(pass *analysis.Pass) (interface{}, error) { } inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) - fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) - if !ok { - return - } - if fn.FullName() == "reflect.DeepEqual" && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { + fn, _ := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if analysisutil.IsFunctionNamed(fn, "reflect", "DeepEqual") && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { pass.ReportRangef(call, "avoid using reflect.DeepEqual with errors") } }) @@ -98,7 +102,7 @@ func containsError(typ types.Type) bool { return true } } - case *types.Named: + case *types.Named, *aliases.Alias: return check(t.Underlying()) // We list the remaining valid type kinds for completeness. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go new file mode 100644 index 0000000000..5e8e80a6a7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -0,0 +1,59 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package defers + +import ( + _ "embed" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +//go:embed doc.go +var doc string + +// Analyzer is the defers analyzer. +var Analyzer = &analysis.Analyzer{ + Name: "defers", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", + Doc: analysisutil.MustExtractDoc(doc, "defers"), + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "time") { + return nil, nil + } + + checkDeferCall := func(node ast.Node) bool { + switch v := node.(type) { + case *ast.CallExpr: + if analysisutil.IsFunctionNamed(typeutil.StaticCallee(pass.TypesInfo, v), "time", "Since") { + pass.Reportf(v.Pos(), "call to time.Since is not deferred") + } + case *ast.FuncLit: + return false // prune + } + return true + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.DeferStmt)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + d := n.(*ast.DeferStmt) + ast.Inspect(d.Call, checkDeferCall) + }) + + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go new file mode 100644 index 0000000000..bdb1351628 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go @@ -0,0 +1,25 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defers defines an Analyzer that checks for common mistakes in defer +// statements. +// +// # Analyzer defers +// +// defers: report common mistakes in defer statements +// +// The defers analyzer reports a diagnostic when a defer statement would +// result in a non-deferred call to time.Since, as experience has shown +// that this is nearly always a mistake. +// +// For example: +// +// start := time.Now() +// ... +// defer recordLatency(time.Since(start)) // error: call to time.Since is not deferred +// +// The correct code is: +// +// defer func() { recordLatency(time.Since(start)) }() +package defers diff --git a/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go new file mode 100644 index 0000000000..2691f189aa --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -0,0 +1,209 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package directive defines an Analyzer that checks known Go toolchain directives. +package directive + +import ( + "go/ast" + "go/parser" + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = `check Go toolchain directives such as //go:debug + +This analyzer checks for problems with known Go toolchain directives +in all Go source files in a package directory, even those excluded by +//go:build constraints, and all non-Go source files too. + +For //go:debug (see https://go.dev/doc/godebug), the analyzer checks +that the directives are placed only in Go source files, only above the +package comment, and only in package main or *_test.go files. + +Support for other known directives may be added in the future. + +This analyzer does not check //go:build, which is handled by the +buildtag analyzer. +` + +var Analyzer = &analysis.Analyzer{ + Name: "directive", + Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/directive", + Run: runDirective, +} + +func runDirective(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + checkGoFile(pass, f) + } + for _, name := range pass.OtherFiles { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + if err != nil { + // Not valid Go source code - not our job to diagnose, so ignore. + continue + } + checkGoFile(pass, f) + } else { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + } + return nil, nil +} + +func checkGoFile(pass *analysis.Pass, f *ast.File) { + check := newChecker(pass, pass.Fset.File(f.Package).Name(), f) + + for _, group := range f.Comments { + // A +build comment is ignored after or adjoining the package declaration. + if group.End()+1 >= f.Package { + check.inHeader = false + } + // A //go:build comment is ignored after the package declaration + // (but adjoining it is OK, in contrast to +build comments). + if group.Pos() >= f.Package { + check.inHeader = false + } + + // Check each line of a //-comment. + for _, c := range group.List { + check.comment(c.Slash, c.Text) + } + } +} + +func checkOtherFile(pass *analysis.Pass, filename string) error { + // We cannot use the Go parser, since is not a Go source file. + // Read the raw bytes instead. + content, tf, err := analysisutil.ReadFile(pass.Fset, filename) + if err != nil { + return err + } + + check := newChecker(pass, filename, nil) + check.nonGoFile(token.Pos(tf.Base()), string(content)) + return nil +} + +type checker struct { + pass *analysis.Pass + filename string + file *ast.File // nil for non-Go file + inHeader bool // in file header (before package declaration) + inStar bool // currently in a /* */ comment +} + +func newChecker(pass *analysis.Pass, filename string, file *ast.File) *checker { + return &checker{ + pass: pass, + filename: filename, + file: file, + inHeader: true, + } +} + +func (check *checker) nonGoFile(pos token.Pos, fullText string) { + // Process each line. + text := fullText + inStar := false + for text != "" { + offset := len(fullText) - len(text) + var line string + line, text, _ = strings.Cut(text, "\n") + + if !inStar && strings.HasPrefix(line, "//") { + check.comment(pos+token.Pos(offset), line) + continue + } + + // Skip over, cut out any /* */ comments, + // to avoid being confused by a commented-out // comment. + for { + line = strings.TrimSpace(line) + if inStar { + var ok bool + _, line, ok = strings.Cut(line, "*/") + if !ok { + break + } + inStar = false + continue + } + line, inStar = stringsCutPrefix(line, "/*") + if !inStar { + break + } + } + if line != "" { + // Found non-comment non-blank line. + // Ends space for valid //go:build comments, + // but also ends the fraction of the file we can + // reliably parse. From this point on we might + // incorrectly flag "comments" inside multiline + // string constants or anything else (this might + // not even be a Go program). So stop. + break + } + } +} + +func (check *checker) comment(pos token.Pos, line string) { + if !strings.HasPrefix(line, "//go:") { + return + } + // testing hack: stop at // ERROR + if i := strings.Index(line, " // ERROR "); i >= 0 { + line = line[:i] + } + + verb := line + if i := strings.IndexFunc(verb, unicode.IsSpace); i >= 0 { + verb = verb[:i] + if line[i] != ' ' && line[i] != '\t' && line[i] != '\n' { + r, _ := utf8.DecodeRuneInString(line[i:]) + check.pass.Reportf(pos, "invalid space %#q in %s directive", r, verb) + } + } + + switch verb { + default: + // TODO: Use the go language version for the file. + // If that version is not newer than us, then we can + // report unknown directives. + + case "//go:build": + // Ignore. The buildtag analyzer reports misplaced comments. + + case "//go:debug": + if check.file == nil { + check.pass.Reportf(pos, "//go:debug directive only valid in Go source files") + } else if check.file.Name.Name != "main" && !strings.HasSuffix(check.filename, "_test.go") { + check.pass.Reportf(pos, "//go:debug directive only valid in package main or test") + } else if !check.inHeader { + check.pass.Reportf(pos, "//go:debug directive only valid before package declaration") + } + } +} + +// Go 1.20 strings.CutPrefix. +func stringsCutPrefix(s, prefix string) (after string, found bool) { + if !strings.HasPrefix(s, prefix) { + return s, false + } + return s[len(prefix):], true +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index 96adad3ee8..7f62ad4c82 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -13,6 +13,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" ) @@ -25,6 +26,7 @@ of the second argument is not a pointer to a type implementing error.` var Analyzer = &analysis.Analyzer{ Name: "errorsas", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/errorsas", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -37,6 +39,10 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } + if !analysisutil.Imports(pass.Pkg, "errors") { + return nil, nil // doesn't directly import errors + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -45,15 +51,12 @@ func run(pass *analysis.Pass) (interface{}, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) fn := typeutil.StaticCallee(pass.TypesInfo, call) - if fn == nil { - return // not a static call + if !analysisutil.IsFunctionNamed(fn, "errors", "As") { + return } if len(call.Args) < 2 { return // not enough arguments, e.g. called with return values of another function } - if fn.FullName() != "errors.As" { - return - } if err := checkAsTarget(pass, call.Args[1]); err != nil { pass.ReportRangef(call, "%v", err) } @@ -63,9 +66,6 @@ func run(pass *analysis.Pass) (interface{}, error) { var errorType = types.Universe.Lookup("error").Type() -// pointerToInterfaceOrError reports whether the type of e is a pointer to an interface or a type implementing error, -// or is the empty interface. - // checkAsTarget reports an error if the second argument to errors.As is invalid. func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { t := pass.TypesInfo.Types[e].Type diff --git a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go index aff663046a..012e2ecd0c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -51,6 +51,7 @@ known as "false sharing" that slows down both goroutines. var Analyzer = &analysis.Analyzer{ Name: "fieldalignment", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/fieldalignment", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go index 27b1b8400f..2671573d1f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -26,6 +26,7 @@ of a particular name.` var Analyzer = &analysis.Analyzer{ Name: "findcall", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/findcall", Run: run, RunDespiteErrors: true, FactTypes: []analysis.Fact{new(foundFact)}, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index 741492e477..0b3ded47ea 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -20,6 +20,7 @@ const Doc = "report assembly that clobbers the frame pointer before saving it" var Analyzer = &analysis.Analyzer{ Name: "framepointer", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/framepointer", Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go index 3b9168c6c3..047ae07cca 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -14,6 +14,8 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) const Doc = `check for mistakes using HTTP responses @@ -35,6 +37,7 @@ diagnostic for such mistakes.` var Analyzer = &analysis.Analyzer{ Name: "httpresponse", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/httpresponse", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -115,7 +118,8 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { if res.Len() != 2 { return false // the function called does not return two values. } - if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !isNamedType(ptr.Elem(), "net/http", "Response") { + isPtr, named := typesinternal.ReceiverNamed(res.At(0)) + if !isPtr || !analysisutil.IsNamedType(named, "net/http", "Response") { return false // the first return type is not *http.Response. } @@ -130,11 +134,11 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return ok && id.Name == "http" // function in net/http package. } - if isNamedType(typ, "net/http", "Client") { + if analysisutil.IsNamedType(typ, "net/http", "Client") { return true // method on http.Client. } - ptr, ok := typ.(*types.Pointer) - return ok && isNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client. + ptr, ok := aliases.Unalias(typ).(*types.Pointer) + return ok && analysisutil.IsNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client. } // restOfBlock, given a traversal stack, finds the innermost containing @@ -170,13 +174,3 @@ func rootIdent(n ast.Node) *ast.Ident { return nil } } - -// isNamedType reports whether t is the named type path.name. -func isNamedType(t types.Type, path, name string) bool { - n, ok := t.(*types.Named) - if !ok { - return false - } - obj := n.Obj() - return obj.Name() == name && obj.Pkg() != nil && obj.Pkg().Path() == path -} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/doc.go new file mode 100644 index 0000000000..3d2b1a3dcb --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/doc.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ifaceassert defines an Analyzer that flags +// impossible interface-interface type assertions. +// +// # Analyzer ifaceassert +// +// ifaceassert: detect impossible interface-to-interface type assertions +// +// This checker flags type assertions v.(T) and corresponding type-switch cases +// in which the static type V of v is an interface that cannot possibly implement +// the target interface T. This occurs when V and T contain methods with the same +// name but different signatures. Example: +// +// var v interface { +// Read() +// } +// _ = v.(io.Reader) +// +// The Read method in v has a different signature than the Read method in +// io.Reader, so this assertion cannot succeed. +package ifaceassert diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 30130f63ea..cd4a477626 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -2,38 +2,26 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package ifaceassert defines an Analyzer that flags -// impossible interface-interface type assertions. package ifaceassert import ( + _ "embed" "go/ast" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `detect impossible interface-to-interface type assertions - -This checker flags type assertions v.(T) and corresponding type-switch cases -in which the static type V of v is an interface that cannot possibly implement -the target interface T. This occurs when V and T contain methods with the same -name but different signatures. Example: - - var v interface { - Read() - } - _ = v.(io.Reader) - -The Read method in v has a different signature than the Read method in -io.Reader, so this assertion cannot succeed. -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go index b35f62dc73..a077d44024 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go @@ -7,6 +7,7 @@ package ifaceassert import ( "go/types" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -67,7 +68,7 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { // of a generic function type (or an interface method) that is // part of the type we're testing. We don't care about these type // parameters. - // Similarly, the receiver of a method may declare (rather then + // Similarly, the receiver of a method may declare (rather than // use) type parameters, we don't care about those either. // Thus, we only need to look at the input and result parameters. return w.isParameterized(t.Params()) || w.isParameterized(t.Results()) @@ -94,15 +95,19 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { case *types.Chan: return w.isParameterized(t.Elem()) + case *aliases.Alias: + // TODO(adonovan): think about generic aliases. + return w.isParameterized(aliases.Unalias(t)) + case *types.Named: - list := typeparams.NamedTypeArgs(t) + list := t.TypeArgs() for i, n := 0, list.Len(); i < n; i++ { if w.isParameterized(list.At(i)) { return true } } - case *typeparams.TypeParam: + case *types.TypeParam: return true default: diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go index 165c70cbd3..3b121cb0ce 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -38,6 +38,7 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "inspect", Doc: "optimize AST traversal for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", Run: run, RunDespiteErrors: true, ResultType: reflect.TypeOf(new(inspector.Inspector)), diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go index ac37e4784e..89291602a5 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go @@ -12,7 +12,10 @@ import ( "go/printer" "go/token" "go/types" - "io/ioutil" + "os" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/analysisinternal" ) // Format returns a string representation of the expression. @@ -55,21 +58,10 @@ func HasSideEffects(info *types.Info, e ast.Expr) bool { return !safe } -// Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} - // ReadFile reads a file and adds it to the FileSet // so that we can report errors against it using lineStart. func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return nil, nil, err } @@ -118,3 +110,48 @@ func Imports(pkg *types.Package, path string) bool { } return false } + +// IsNamedType reports whether t is the named type with the given package path +// and one of the given names. +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsNamedType(t types.Type, pkgPath string, names ...string) bool { + n, ok := aliases.Unalias(t).(*types.Named) + if !ok { + return false + } + obj := n.Obj() + if obj == nil || obj.Pkg() == nil || obj.Pkg().Path() != pkgPath { + return false + } + name := obj.Name() + for _, n := range names { + if name == n { + return true + } + } + return false +} + +// IsFunctionNamed reports whether f is a top-level function defined in the +// given package and has one of the given names. +// It returns false if f is nil or a method. +func IsFunctionNamed(f *types.Func, pkgPath string, names ...string) bool { + if f == nil { + return false + } + if f.Pkg() == nil || f.Pkg().Path() != pkgPath { + return false + } + if f.Type().(*types.Signature).Recv() != nil { + return false + } + for _, n := range names { + if f.Name() == n { + return true + } + } + return false +} + +var MustExtractDoc = analysisinternal.MustExtractDoc diff --git a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go new file mode 100644 index 0000000000..c95b1c1c98 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go @@ -0,0 +1,75 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loopclosure defines an Analyzer that checks for references to +// enclosing loop variables from within nested functions. +// +// # Analyzer loopclosure +// +// loopclosure: check references to loop variables from within nested functions +// +// This analyzer reports places where a function literal references the +// iteration variable of an enclosing loop, and the loop calls the function +// in such a way (e.g. with go or defer) that it may outlive the loop +// iteration and possibly observe the wrong value of the variable. +// +// Note: An iteration variable can only outlive a loop iteration in Go versions <=1.21. +// In Go 1.22 and later, the loop variable lifetimes changed to create a new +// iteration variable per loop iteration. (See go.dev/issue/60078.) +// +// In this example, all the deferred functions run after the loop has +// completed, so all observe the final value of v [. or // *.. - rtype := recv.Type() - if ptr, ok := recv.Type().(*types.Pointer); ok { - rtype = ptr.Elem() - } - named, ok := rtype.(*types.Named) - if !ok { - return false - } - if named.Obj().Name() != typeName { - return false - } - pkg := f.Pkg() - if pkg == nil { - return false - } - if pkg.Path() != pkgPath { - return false - } - - return true + _, named := typesinternal.ReceiverNamed(recv) + return analysisutil.IsNamedType(named, pkgPath, typeName) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/doc.go new file mode 100644 index 0000000000..28bf6c7e26 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/doc.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lostcancel defines an Analyzer that checks for failure to +// call a context cancellation function. +// +// # Analyzer lostcancel +// +// lostcancel: check cancel func returned by context.WithCancel is called +// +// The cancellation function returned by context.WithCancel, WithTimeout, +// and WithDeadline must be called or the new context will remain live +// until its parent context is cancelled. +// (The background context is never cancelled.) +package lostcancel diff --git a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index de6f840f68..bf56a5c06f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -2,11 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package lostcancel defines an Analyzer that checks for failure to -// call a context cancellation function. package lostcancel import ( + _ "embed" "fmt" "go/ast" "go/types" @@ -14,20 +13,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/ctrlflow" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" ) -const Doc = `check cancel func returned by context.WithCancel is called - -The cancellation function returned by context.WithCancel, WithTimeout, -and WithDeadline must be called or the new context will remain live -until its parent context is cancelled. -(The background context is never cancelled.)` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "lostcancel", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "lostcancel"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel", Run: run, Requires: []*analysis.Analyzer{ inspect.Analyzer, @@ -51,7 +48,7 @@ var contextPackage = "context" // checkLostCancel analyzes a single named or literal function. func run(pass *analysis.Pass) (interface{}, error) { // Fast path: bypass check if file doesn't use context.WithCancel. - if !hasImport(pass.Pkg, contextPackage) { + if !analysisutil.Imports(pass.Pkg, contextPackage) { return nil, nil } @@ -175,22 +172,24 @@ func runFunc(pass *analysis.Pass, node ast.Node) { if ret := lostCancelPath(pass, g, v, stmt, sig); ret != nil { lineno := pass.Fset.Position(stmt.Pos()).Line pass.ReportRangef(stmt, "the %s function is not used on all paths (possible context leak)", v.Name()) - pass.ReportRangef(ret, "this return statement may be reached without using the %s var defined on line %d", v.Name(), lineno) + + pos, end := ret.Pos(), ret.End() + // golang/go#64547: cfg.Block.Return may return a synthetic + // ReturnStmt that overflows the file. + if pass.Fset.File(pos) != pass.Fset.File(end) { + end = pos + } + pass.Report(analysis.Diagnostic{ + Pos: pos, + End: end, + Message: fmt.Sprintf("this return statement may be reached without using the %s var defined on line %d", v.Name(), lineno), + }) } } } func isCall(n ast.Node) bool { _, ok := n.(*ast.CallExpr); return ok } -func hasImport(pkg *types.Package, path string) bool { - for _, imp := range pkg.Imports() { - if imp.Path() == path { - return true - } - } - return false -} - // isContextWithCancel reports whether n is one of the qualified identifiers // context.With{Cancel,Timeout,Deadline}. func isContextWithCancel(info *types.Info, n ast.Node) bool { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/doc.go new file mode 100644 index 0000000000..07f79332b2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/doc.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nilfunc defines an Analyzer that checks for useless +// comparisons against nil. +// +// # Analyzer nilfunc +// +// nilfunc: check for useless comparisons between functions and nil +// +// A useless comparison is one like f == nil as opposed to f() == nil. +package nilfunc diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index e4c66df6d6..778f7f1f8f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -7,23 +7,25 @@ package nilfunc import ( + _ "embed" "go/ast" "go/token" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/typeparams" ) -const Doc = `check for useless comparisons between functions and nil - -A useless comparison is one like f == nil as opposed to f() == nil.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "nilfunc", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "nilfunc"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -60,7 +62,7 @@ func run(pass *analysis.Pass) (interface{}, error) { obj = pass.TypesInfo.Uses[v] case *ast.SelectorExpr: obj = pass.TypesInfo.Uses[v.Sel] - case *ast.IndexExpr, *typeparams.IndexListExpr: + case *ast.IndexExpr, *ast.IndexListExpr: // Check generic functions such as "f[T1,T2]". x, _, _, _ := typeparams.UnpackIndexExpr(v) if id, ok := x.(*ast.Ident); ok { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/doc.go new file mode 100644 index 0000000000..212263741d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nilness inspects the control-flow graph of an SSA function +// and reports errors such as nil pointer dereferences and degenerate +// nil pointer comparisons. +// +// # Analyzer nilness +// +// nilness: check for redundant or impossible nil comparisons +// +// The nilness checker inspects the control-flow graph of each function in +// a package and reports nil pointer dereferences, degenerate nil +// pointers, and panics with nil values. A degenerate comparison is of the form +// x==nil or x!=nil where x is statically known to be nil or non-nil. These are +// often a mistake, especially in control flow related to errors. Panics with nil +// values are checked because they are not detectable by +// +// if r := recover(); r != nil { +// +// This check reports conditions such as: +// +// if f == nil { // impossible condition (f is a function) +// } +// +// and: +// +// p := &v +// ... +// if p != nil { // tautological condition +// } +// +// and: +// +// if p == nil { +// print(*p) // nil dereference +// } +// +// and: +// +// if p == nil { +// panic(p) +// } +package nilness diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go index 6849c33cce..774f04c94a 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -2,61 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package nilness inspects the control-flow graph of an SSA function -// and reports errors such as nil pointer dereferences and degenerate -// nil pointer comparisons. package nilness import ( + _ "embed" "fmt" "go/token" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" "golang.org/x/tools/internal/typeparams" ) -const Doc = `check for redundant or impossible nil comparisons - -The nilness checker inspects the control-flow graph of each function in -a package and reports nil pointer dereferences, degenerate nil -pointers, and panics with nil values. A degenerate comparison is of the form -x==nil or x!=nil where x is statically known to be nil or non-nil. These are -often a mistake, especially in control flow related to errors. Panics with nil -values are checked because they are not detectable by - - if r := recover(); r != nil { - -This check reports conditions such as: - - if f == nil { // impossible condition (f is a function) - } - -and: - - p := &v - ... - if p != nil { // tautological condition - } - -and: - - if p == nil { - print(*p) // nil dereference - } - -and: - - if p == nil { - panic(p) - } -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "nilness", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "nilness"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilness", Run: run, Requires: []*analysis.Analyzer{buildssa.Analyzer}, } @@ -71,17 +38,21 @@ func run(pass *analysis.Pass) (interface{}, error) { func runFunc(pass *analysis.Pass, fn *ssa.Function) { reportf := func(category string, pos token.Pos, format string, args ...interface{}) { - pass.Report(analysis.Diagnostic{ - Pos: pos, - Category: category, - Message: fmt.Sprintf(format, args...), - }) + // We ignore nil-checking ssa.Instructions + // that don't correspond to syntax. + if pos.IsValid() { + pass.Report(analysis.Diagnostic{ + Pos: pos, + Category: category, + Message: fmt.Sprintf(format, args...), + }) + } } // notNil reports an error if v is provably nil. notNil := func(stack []fact, instr ssa.Instruction, v ssa.Value, descr string) { if nilnessOf(stack, v) == isnil { - reportf("nilderef", instr.Pos(), "nil dereference in "+descr) + reportf("nilderef", instr.Pos(), descr) } } @@ -106,29 +77,50 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { // A nil receiver may be okay for type params. cc := instr.Common() if !(cc.IsInvoke() && typeparams.IsTypeParam(cc.Value.Type())) { - notNil(stack, instr, cc.Value, cc.Description()) + notNil(stack, instr, cc.Value, "nil dereference in "+cc.Description()) } case *ssa.FieldAddr: - notNil(stack, instr, instr.X, "field selection") + notNil(stack, instr, instr.X, "nil dereference in field selection") case *ssa.IndexAddr: - notNil(stack, instr, instr.X, "index operation") + switch typeparams.CoreType(instr.X.Type()).(type) { + case *types.Pointer: // *array + notNil(stack, instr, instr.X, "nil dereference in array index operation") + case *types.Slice: + // This is not necessarily a runtime error, because + // it is usually dominated by a bounds check. + if isRangeIndex(instr) { + notNil(stack, instr, instr.X, "range of nil slice") + } else { + notNil(stack, instr, instr.X, "index of nil slice") + } + } case *ssa.MapUpdate: - notNil(stack, instr, instr.Map, "map update") + notNil(stack, instr, instr.Map, "nil dereference in map update") + case *ssa.Range: + // (Not a runtime error, but a likely mistake.) + notNil(stack, instr, instr.X, "range over nil map") case *ssa.Slice: // A nilcheck occurs in ptr[:] iff ptr is a pointer to an array. - if _, ok := instr.X.Type().Underlying().(*types.Pointer); ok { - notNil(stack, instr, instr.X, "slice operation") + if is[*types.Pointer](instr.X.Type().Underlying()) { + notNil(stack, instr, instr.X, "nil dereference in slice operation") } case *ssa.Store: - notNil(stack, instr, instr.Addr, "store") + notNil(stack, instr, instr.Addr, "nil dereference in store") case *ssa.TypeAssert: if !instr.CommaOk { - notNil(stack, instr, instr.X, "type assertion") + notNil(stack, instr, instr.X, "nil dereference in type assertion") } case *ssa.UnOp: - if instr.Op == token.MUL { // *X - notNil(stack, instr, instr.X, "load") + switch instr.Op { + case token.MUL: // *X + notNil(stack, instr, instr.X, "nil dereference in load") + case token.ARROW: // <-ch + // (Not a runtime error, but a likely mistake.) + notNil(stack, instr, instr.X, "receive from nil channel") } + case *ssa.Send: + // (Not a runtime error, but a likely mistake.) + notNil(stack, instr, instr.Chan, "send to nil channel") } } @@ -218,6 +210,42 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { } } + // In code of the form: + // + // if ptr, ok := x.(*T); ok { ... } else { fsucc } + // + // the fsucc block learns that ptr == nil, + // since that's its zero value. + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + // Handle "if ok" and "if !ok" variants. + cond, fsucc := If.Cond, b.Succs[1] + if unop, ok := cond.(*ssa.UnOp); ok && unop.Op == token.NOT { + cond, fsucc = unop.X, b.Succs[0] + } + + // Match pattern: + // t0 = typeassert (pointerlike) + // t1 = extract t0 #0 // ptr + // t2 = extract t0 #1 // ok + // if t2 goto tsucc, fsucc + if extract1, ok := cond.(*ssa.Extract); ok && extract1.Index == 1 { + if assert, ok := extract1.Tuple.(*ssa.TypeAssert); ok && + isNillable(assert.AssertedType) { + for _, pinstr := range *assert.Referrers() { + if extract0, ok := pinstr.(*ssa.Extract); ok && + extract0.Index == 0 && + extract0.Tuple == extract1.Tuple { + for _, d := range b.Dominees() { + if len(d.Preds) == 1 && d == fsucc { + visit(d, append(stack, fact{extract0, isnil})) + } + } + } + } + } + } + } + for _, d := range b.Dominees() { visit(d, stack) } @@ -389,3 +417,59 @@ func (ff facts) negate() facts { } return nn } + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} + +func isNillable(t types.Type) bool { + switch t := typeparams.CoreType(t).(type) { + case *types.Pointer, + *types.Map, + *types.Signature, + *types.Chan, + *types.Interface, + *types.Slice: + return true + case *types.Basic: + return t == types.Typ[types.UnsafePointer] + } + return false +} + +// isRangeIndex reports whether the instruction is a slice indexing +// operation slice[i] within a "for range slice" loop. The operation +// could be explicit, such as slice[i] within (or even after) the +// loop, or it could be implicit, such as "for i, v := range slice {}". +// (These cannot be reliably distinguished.) +func isRangeIndex(instr *ssa.IndexAddr) bool { + // Here we reverse-engineer the go/ssa lowering of range-over-slice: + // + // n = len(x) + // jump loop + // loop: "rangeindex.loop" + // phi = φ(-1, incr) #rangeindex + // incr = phi + 1 + // cond = incr < n + // if cond goto body else done + // body: "rangeindex.body" + // instr = &x[incr] + // ... + // done: + if incr, ok := instr.Index.(*ssa.BinOp); ok && incr.Op == token.ADD { + if b := incr.Block(); b.Comment == "rangeindex.loop" { + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + if cond := If.Cond.(*ssa.BinOp); cond.X == incr && cond.Op == token.LSS { + if call, ok := cond.Y.(*ssa.Call); ok { + common := call.Common() + if blt, ok := common.Value.(*ssa.Builtin); ok && blt.Name() == "len" { + return common.Args[0] == instr.X + } + } + } + } + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go b/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go index f4f5616e56..4bf33d45f5 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go @@ -38,6 +38,7 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "pkgfact", Doc: "gather name/value pairs from constant declarations", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/pkgfact", Run: run, FactTypes: []analysis.Fact{new(pairsFact)}, ResultType: reflect.TypeOf(map[string]string{}), diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go new file mode 100644 index 0000000000..1ee16126ad --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go @@ -0,0 +1,47 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package printf defines an Analyzer that checks consistency +// of Printf format strings and arguments. +// +// # Analyzer printf +// +// printf: check consistency of Printf format strings and arguments +// +// The check applies to calls of the formatting functions such as +// [fmt.Printf] and [fmt.Sprintf], as well as any detected wrappers of +// those functions. +// +// In this example, the %d format operator requires an integer operand: +// +// fmt.Printf("%d", "hello") // fmt.Printf format %d has arg "hello" of wrong type string +// +// See the documentation of the fmt package for the complete set of +// format operators and their operand types. +// +// To enable printf checking on a function that is not found by this +// analyzer's heuristics (for example, because control is obscured by +// dynamic method calls), insert a bogus call: +// +// func MyPrintf(format string, args ...any) { +// if false { +// _ = fmt.Sprintf(format, args...) // enable printf checker +// } +// ... +// } +// +// The -funcs flag specifies a comma-separated list of names of additional +// known formatting functions or methods. If the name contains a period, +// it must denote a specific function using one of the following forms: +// +// dir/pkg.Function +// dir/pkg.Type.Method +// (*dir/pkg.Type).Method +// +// Otherwise the name is interpreted as a case-insensitive unqualified +// identifier such as "errorf". Either way, if a listed name ends in f, the +// function is assumed to be Printf-like, taking a format string before the +// argument list. Otherwise it is assumed to be Print-like, taking a list +// of arguments with no format string. +package printf diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index daaf709a44..3235019258 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package printf defines an Analyzer that checks consistency -// of Printf format strings and arguments. package printf import ( "bytes" + _ "embed" "fmt" "go/ast" "go/constant" @@ -25,6 +24,7 @@ import ( "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -32,43 +32,19 @@ func init() { Analyzer.Flags.Var(isPrint, "funcs", "comma-separated list of print function names to check") } +//go:embed doc.go +var doc string + var Analyzer = &analysis.Analyzer{ Name: "printf", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "printf"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, ResultType: reflect.TypeOf((*Result)(nil)), FactTypes: []analysis.Fact{new(isWrapper)}, } -const Doc = `check consistency of Printf format strings and arguments - -The check applies to known functions (for example, those in package fmt) -as well as any detected wrappers of known functions. - -A function that wants to avail itself of printf checking but is not -found by this analyzer's heuristics (for example, due to use of -dynamic calls) can insert a bogus call: - - if false { - _ = fmt.Sprintf(format, args...) // enable printf checking - } - -The -funcs flag specifies a comma-separated list of names of additional -known formatting functions or methods. If the name contains a period, -it must denote a specific function using one of the following forms: - - dir/pkg.Function - dir/pkg.Type.Method - (*dir/pkg.Type).Method - -Otherwise the name is interpreted as a case-insensitive unqualified -identifier such as "errorf". Either way, if a listed name ends in f, the -function is assumed to be Printf-like, taking a format string before the -argument list. Otherwise it is assumed to be Print-like, taking a list -of arguments with no format string. -` - // Kind is a kind of fmt function behavior. type Kind int @@ -303,7 +279,7 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k // print/printf function can take, adding an ellipsis // would break the program. For example: // - // func foo(arg1 string, arg2 ...interface{} { + // func foo(arg1 string, arg2 ...interface{}) { // fmt.Printf("%s %v", arg1, arg2) // } return @@ -340,9 +316,10 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k // example, fmt.Printf forwards to fmt.Fprintf. We avoid relying on the // driver applying analyzers to standard packages because "go vet" does // not do so with gccgo, and nor do some other build systems. -// TODO(adonovan): eliminate the redundant facts once this restriction -// is lifted. var isPrint = stringSet{ + "fmt.Appendf": true, + "fmt.Append": true, + "fmt.Appendln": true, "fmt.Errorf": true, "fmt.Fprint": true, "fmt.Fprintf": true, @@ -535,15 +512,10 @@ func isFormatter(typ types.Type) bool { sig := fn.Type().(*types.Signature) return sig.Params().Len() == 2 && sig.Results().Len() == 0 && - isNamed(sig.Params().At(0).Type(), "fmt", "State") && + analysisutil.IsNamedType(sig.Params().At(0).Type(), "fmt", "State") && types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune]) } -func isNamed(T types.Type, pkgpath, name string) bool { - named, ok := T.(*types.Named) - return ok && named.Obj().Pkg().Path() == pkgpath && named.Obj().Name() == name -} - // formatState holds the parsed representation of a printf directive such as "%3.*[4]d". // It is constructed by parsePrintfVerb. type formatState struct { @@ -988,6 +960,8 @@ func isStringer(sig *types.Signature) bool { // It is almost always a mistake to print a function value. func isFunctionValue(pass *analysis.Pass, e ast.Expr) bool { if typ := pass.TypesInfo.Types[e].Type; typ != nil { + // Don't call Underlying: a named func type with a String method is ok. + // TODO(adonovan): it would be more precise to check isStringer. _, ok := typ.(*types.Signature) return ok } @@ -1039,7 +1013,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { // Skip checking functions with unknown type. return } - if sig, ok := typ.(*types.Signature); ok { + if sig, ok := typ.Underlying().(*types.Signature); ok { if !sig.Variadic() { // Skip checking non-variadic functions. return @@ -1049,7 +1023,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { typ := params.At(firstArg).Type() typ = typ.(*types.Slice).Elem() - it, ok := typ.(*types.Interface) + it, ok := aliases.Unalias(typ).(*types.Interface) if !ok || !it.Empty() { // Skip variadic functions accepting non-interface{} args. return @@ -1080,7 +1054,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { if strings.Contains(s, "%") { m := printFormatRE.FindStringSubmatch(s) if m != nil { - pass.ReportRangef(call, "%s call has possible formatting directive %s", fn.FullName(), m[0]) + pass.ReportRangef(call, "%s call has possible Printf formatting directive %s", fn.FullName(), m[0]) } } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go index 7cbb0bdbf5..017c8a247e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -10,6 +10,7 @@ import ( "go/types" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -72,7 +73,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { return true } - if typ, _ := typ.(*typeparams.TypeParam); typ != nil { + if typ, _ := aliases.Unalias(typ).(*types.TypeParam); typ != nil { // Avoid infinite recursion through type parameters. if m.seen[typ] { return true @@ -275,7 +276,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { } func isConvertibleToString(typ types.Type) bool { - if bt, ok := typ.(*types.Basic); ok && bt.Kind() == types.UntypedNil { + if bt, ok := aliases.Unalias(typ).(*types.Basic); ok && bt.Kind() == types.UntypedNil { // We explicitly don't want untyped nil, which is // convertible to both of the interfaces below, as it // would just panic anyway. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/doc.go new file mode 100644 index 0000000000..32f342b97f --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/doc.go @@ -0,0 +1,27 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package reflectvaluecompare defines an Analyzer that checks for accidentally +// using == or reflect.DeepEqual to compare reflect.Value values. +// See issues 43993 and 18871. +// +// # Analyzer reflectvaluecompare +// +// reflectvaluecompare: check for comparing reflect.Value values with == or reflect.DeepEqual +// +// The reflectvaluecompare checker looks for expressions of the form: +// +// v1 == v2 +// v1 != v2 +// reflect.DeepEqual(v1, v2) +// +// where v1 or v2 are reflect.Values. Comparing reflect.Values directly +// is almost certainly not correct, as it compares the reflect package's +// internal representation, not the underlying value. +// Likely what is intended is: +// +// v1.Interface() == v2.Interface() +// v1.Interface() != v2.Interface() +// reflect.DeepEqual(v1.Interface(), v2.Interface()) +package reflectvaluecompare diff --git a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go index ef21f0e7da..6789d73579 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go @@ -2,43 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package reflectvaluecompare defines an Analyzer that checks for accidentally -// using == or reflect.DeepEqual to compare reflect.Value values. -// See issues 43993 and 18871. package reflectvaluecompare import ( + _ "embed" "go/ast" "go/token" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" ) -const Doc = `check for comparing reflect.Value values with == or reflect.DeepEqual - -The reflectvaluecompare checker looks for expressions of the form: - - v1 == v2 - v1 != v2 - reflect.DeepEqual(v1, v2) - -where v1 or v2 are reflect.Values. Comparing reflect.Values directly -is almost certainly not correct, as it compares the reflect package's -internal representation, not the underlying value. -Likely what is intended is: - - v1.Interface() == v2.Interface() - v1.Interface() != v2.Interface() - reflect.DeepEqual(v1.Interface(), v2.Interface()) -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "reflectvaluecompare", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "reflectvaluecompare"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/reflectvaluecompare", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -64,11 +49,8 @@ func run(pass *analysis.Pass) (interface{}, error) { } } case *ast.CallExpr: - fn, ok := typeutil.Callee(pass.TypesInfo, n).(*types.Func) - if !ok { - return - } - if fn.FullName() == "reflect.DeepEqual" && (isReflectValue(pass, n.Args[0]) || isReflectValue(pass, n.Args[1])) { + fn, _ := typeutil.Callee(pass.TypesInfo, n).(*types.Func) + if analysisutil.IsFunctionNamed(fn, "reflect", "DeepEqual") && (isReflectValue(pass, n.Args[0]) || isReflectValue(pass, n.Args[1])) { pass.ReportRangef(n, "avoid using reflect.DeepEqual with reflect.Value") } } @@ -83,11 +65,7 @@ func isReflectValue(pass *analysis.Pass, e ast.Expr) bool { return false } // See if the type is reflect.Value - named, ok := tv.Type.(*types.Named) - if !ok { - return false - } - if obj := named.Obj(); obj == nil || obj.Pkg() == nil || obj.Pkg().Path() != "reflect" || obj.Name() != "Value" { + if !analysisutil.IsNamedType(tv.Type, "reflect", "Value") { return false } if _, ok := e.(*ast.CompositeLit); ok { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shadow/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/shadow/doc.go new file mode 100644 index 0000000000..781fd2eb81 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/shadow/doc.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package shadow defines an Analyzer that checks for shadowed variables. +// +// # Analyzer shadow +// +// shadow: check for possible unintended shadowing of variables +// +// This analyzer check for shadowed variables. +// A shadowed variable is a variable declared in an inner scope +// with the same name and type as a variable in an outer scope, +// and where the outer variable is mentioned after the inner one +// is declared. +// +// (This definition can be refined; the module generates too many +// false positives and is not yet enabled by default.) +// +// For example: +// +// func BadRead(f *os.File, buf []byte) error { +// var err error +// for { +// n, err := f.Read(buf) // shadows the function variable 'err' +// if err != nil { +// break // causes return of wrong value +// } +// foo(buf) +// } +// return err +// } +package shadow diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go index a19cecd14b..30258c991f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go @@ -2,50 +2,29 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package shadow defines an Analyzer that checks for shadowed variables. package shadow import ( + _ "embed" "go/ast" "go/token" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) // NOTE: Experimental. Not part of the vet suite. -const Doc = `check for possible unintended shadowing of variables - -This analyzer check for shadowed variables. -A shadowed variable is a variable declared in an inner scope -with the same name and type as a variable in an outer scope, -and where the outer variable is mentioned after the inner one -is declared. - -(This definition can be refined; the module generates too many -false positives and is not yet enabled by default.) - -For example: - - func BadRead(f *os.File, buf []byte) error { - var err error - for { - n, err := f.Read(buf) // shadows the function variable 'err' - if err != nil { - break // causes return of wrong value - } - foo(buf) - } - return err - } -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "shadow", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "shadow"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/shadow", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go index e968f27b40..d01eb1eebe 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -21,6 +21,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -29,6 +30,7 @@ const Doc = "check for shifts that equal or exceed the width of the integer" var Analyzer = &analysis.Analyzer{ Name: "shift", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/shift", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -88,7 +90,8 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { if v == nil { return } - amt, ok := constant.Int64Val(v) + u := constant.ToInt(v) // either an Int or Unknown + amt, ok := constant.Int64Val(u) if !ok { return } @@ -97,8 +100,8 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { return } var structuralTypes []types.Type - switch t := t.(type) { - case *typeparams.TypeParam: + switch t := aliases.Unalias(t).(type) { + case *types.TypeParam: terms, err := typeparams.StructuralTerms(t) if err != nil { return // invalid type diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/doc.go new file mode 100644 index 0000000000..583fed0147 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sigchanyzer defines an Analyzer that detects +// misuse of unbuffered signal as argument to signal.Notify. +// +// # Analyzer sigchanyzer +// +// sigchanyzer: check for unbuffered channel of os.Signal +// +// This checker reports call expression of the form +// +// signal.Notify(c <-chan os.Signal, sig ...os.Signal), +// +// where c is an unbuffered channel, which can be at risk of missing the signal. +package sigchanyzer diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go index c490a84ea7..5f121f720d 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -8,6 +8,7 @@ package sigchanyzer import ( "bytes" + _ "embed" "go/ast" "go/format" "go/token" @@ -15,23 +16,27 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check for unbuffered channel of os.Signal - -This checker reports call expression of the form signal.Notify(c <-chan os.Signal, sig ...os.Signal), -where c is an unbuffered channel, which can be at risk of missing the signal.` +//go:embed doc.go +var doc string // Analyzer describes sigchanyzer analysis function detector. var Analyzer = &analysis.Analyzer{ Name: "sigchanyzer", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "sigchanyzer"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "os/signal") { + return nil, nil // doesn't directly import signal + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/vendor/golang.org/x/tools/go/analysis/passes/slog/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/slog/doc.go new file mode 100644 index 0000000000..ecb10e0948 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/slog/doc.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slog defines an Analyzer that checks for +// mismatched key-value pairs in log/slog calls. +// +// # Analyzer slog +// +// slog: check for invalid structured logging calls +// +// The slog checker looks for calls to functions from the log/slog +// package that take alternating key-value pairs. It reports calls +// where an argument in a key position is neither a string nor a +// slog.Attr, and where a final key is missing its value. +// For example,it would report +// +// slog.Warn("message", 11, "k") // slog.Warn arg "11" should be a string or a slog.Attr +// +// and +// +// slog.Info("message", "k1", v1, "k2") // call to slog.Info missing a final value +package slog diff --git a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go new file mode 100644 index 0000000000..b3c683b61c --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go @@ -0,0 +1,226 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(jba) deduce which functions wrap the log/slog functions, and use the +// fact mechanism to propagate this information, so we can provide diagnostics +// for user-supplied wrappers. + +package slog + +import ( + _ "embed" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typesinternal" +) + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "slog", + Doc: analysisutil.MustExtractDoc(doc, "slog"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +var stringType = types.Universe.Lookup("string").Type() + +// A position describes what is expected to appear in an argument position. +type position int + +const ( + // key is an argument position that should hold a string key or an Attr. + key position = iota + // value is an argument position that should hold a value. + value + // unknown represents that we do not know if position should hold a key or a value. + unknown +) + +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if fn == nil { + return // not a static call + } + if call.Ellipsis != token.NoPos { + return // skip calls with "..." args + } + skipArgs, ok := kvFuncSkipArgs(fn) + if !ok { + // Not a slog function that takes key-value pairs. + return + } + if isMethodExpr(pass.TypesInfo, call) { + // Call is to a method value. Skip the first argument. + skipArgs++ + } + if len(call.Args) <= skipArgs { + // Too few args; perhaps there are no k-v pairs. + return + } + + // Check this call. + // The first position should hold a key or Attr. + pos := key + var unknownArg ast.Expr // nil or the last unknown argument + for _, arg := range call.Args[skipArgs:] { + t := pass.TypesInfo.Types[arg].Type + switch pos { + case key: + // Expect a string or Attr. + switch { + case t == stringType: + pos = value + case isAttr(t): + pos = key + case types.IsInterface(t): + // As we do not do dataflow, we do not know what the dynamic type is. + // It could be a string or an Attr so we don't know what to expect next. + pos = unknown + default: + if unknownArg == nil { + pass.ReportRangef(arg, "%s arg %q should be a string or a slog.Attr (possible missing key or value)", + shortName(fn), analysisutil.Format(pass.Fset, arg)) + } else { + pass.ReportRangef(arg, "%s arg %q should probably be a string or a slog.Attr (previous arg %q cannot be a key)", + shortName(fn), analysisutil.Format(pass.Fset, arg), analysisutil.Format(pass.Fset, unknownArg)) + } + // Stop here so we report at most one missing key per call. + return + } + + case value: + // Anything can appear in this position. + // The next position should be a key. + pos = key + + case unknown: + // Once we encounter an unknown position, we can never be + // sure if a problem later or at the end of the call is due to a + // missing final value, or a non-key in key position. + // In both cases, unknownArg != nil. + unknownArg = arg + + // We don't know what is expected about this position, but all hope is not lost. + if t != stringType && !isAttr(t) && !types.IsInterface(t) { + // This argument is definitely not a key. + // + // unknownArg cannot have been a key, in which case this is the + // corresponding value, and the next position should hold another key. + pos = key + } + } + } + if pos == value { + if unknownArg == nil { + pass.ReportRangef(call, "call to %s missing a final value", shortName(fn)) + } else { + pass.ReportRangef(call, "call to %s has a missing or misplaced value", shortName(fn)) + } + } + }) + return nil, nil +} + +func isAttr(t types.Type) bool { + return analysisutil.IsNamedType(t, "log/slog", "Attr") +} + +// shortName returns a name for the function that is shorter than FullName. +// Examples: +// +// "slog.Info" (instead of "log/slog.Info") +// "slog.Logger.With" (instead of "(*log/slog.Logger).With") +func shortName(fn *types.Func) string { + var r string + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if _, named := typesinternal.ReceiverNamed(recv); named != nil { + r = named.Obj().Name() + } else { + r = recv.Type().String() // anon struct/interface + } + r += "." + } + return fmt.Sprintf("%s.%s%s", fn.Pkg().Name(), r, fn.Name()) +} + +// If fn is a slog function that has a ...any parameter for key-value pairs, +// kvFuncSkipArgs returns the number of arguments to skip over to reach the +// corresponding arguments, and true. +// Otherwise it returns (0, false). +func kvFuncSkipArgs(fn *types.Func) (int, bool) { + if pkg := fn.Pkg(); pkg == nil || pkg.Path() != "log/slog" { + return 0, false + } + var recvName string // by default a slog package function + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + _, named := typesinternal.ReceiverNamed(recv) + if named == nil { + return 0, false // anon struct/interface + } + recvName = named.Obj().Name() + } + skip, ok := kvFuncs[recvName][fn.Name()] + return skip, ok +} + +// The names of functions and methods in log/slog that take +// ...any for key-value pairs, mapped to the number of initial args to skip in +// order to get to the ones that match the ...any parameter. +// The first key is the dereferenced receiver type name, or "" for a function. +var kvFuncs = map[string]map[string]int{ + "": map[string]int{ + "Debug": 1, + "Info": 1, + "Warn": 1, + "Error": 1, + "DebugContext": 2, + "InfoContext": 2, + "WarnContext": 2, + "ErrorContext": 2, + "Log": 3, + "Group": 1, + }, + "Logger": map[string]int{ + "Debug": 1, + "Info": 1, + "Warn": 1, + "Error": 1, + "DebugContext": 2, + "InfoContext": 2, + "WarnContext": 2, + "ErrorContext": 2, + "Log": 3, + "With": 0, + }, + "Record": map[string]int{ + "Add": 0, + }, +} + +// isMethodExpr reports whether a call is to a MethodExpr. +func isMethodExpr(info *types.Info, c *ast.CallExpr) bool { + s, ok := c.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + sel := info.Selections[s] + return sel != nil && sel.Kind() == types.MethodExpr +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go index f85837d66b..6c151a02c1 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go @@ -15,6 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" ) @@ -27,11 +28,16 @@ the interface{} value passed to sort.Slice is actually a slice.` var Analyzer = &analysis.Analyzer{ Name: "sortslice", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sortslice", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "sort") { + return nil, nil // doesn't directly import sort + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -41,12 +47,7 @@ func run(pass *analysis.Pass) (interface{}, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) fn, _ := typeutil.Callee(pass.TypesInfo, call).(*types.Func) - if fn == nil { - return - } - - fnName := fn.FullName() - if fnName != "sort.Slice" && fnName != "sort.SliceStable" && fnName != "sort.SliceIsSorted" { + if !analysisutil.IsFunctionNamed(fn, "sort", "Slice", "SliceStable", "SliceIsSorted") { return } @@ -125,7 +126,7 @@ func run(pass *analysis.Pass) (interface{}, error) { pass.Report(analysis.Diagnostic{ Pos: call.Pos(), End: call.End(), - Message: fmt.Sprintf("%s's argument must be a slice; is called with %s", fnName, typ.String()), + Message: fmt.Sprintf("%s's argument must be a slice; is called with %s", fn.FullName(), typ.String()), SuggestedFixes: fixes, }) }) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/doc.go new file mode 100644 index 0000000000..9ed88698dd --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/doc.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stdmethods defines an Analyzer that checks for misspellings +// in the signatures of methods similar to well-known interfaces. +// +// # Analyzer stdmethods +// +// stdmethods: check signature of methods of well-known interfaces +// +// Sometimes a type may be intended to satisfy an interface but may fail to +// do so because of a mistake in its method signature. +// For example, the result of this WriteTo method should be (int64, error), +// not error, to satisfy io.WriterTo: +// +// type myWriterTo struct{...} +// func (myWriterTo) WriteTo(w io.Writer) error { ... } +// +// This check ensures that each method whose name matches one of several +// well-known interface methods from the standard library has the correct +// signature for that interface. +// +// Checked method names include: +// +// Format GobEncode GobDecode MarshalJSON MarshalXML +// Peek ReadByte ReadFrom ReadRune Scan Seek +// UnmarshalJSON UnreadByte UnreadRune WriteByte +// WriteTo +package stdmethods diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index 41f455d100..28f51b1ec9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -2,44 +2,27 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package stdmethods defines an Analyzer that checks for misspellings -// in the signatures of methods similar to well-known interfaces. package stdmethods import ( + _ "embed" "go/ast" "go/types" "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check signature of methods of well-known interfaces - -Sometimes a type may be intended to satisfy an interface but may fail to -do so because of a mistake in its method signature. -For example, the result of this WriteTo method should be (int64, error), -not error, to satisfy io.WriterTo: - - type myWriterTo struct{...} - func (myWriterTo) WriteTo(w io.Writer) error { ... } - -This check ensures that each method whose name matches one of several -well-known interface methods from the standard library has the correct -signature for that interface. - -Checked method names include: - Format GobEncode GobDecode MarshalJSON MarshalXML - Peek ReadByte ReadFrom ReadRune Scan Seek - UnmarshalJSON UnreadByte UnreadRune WriteByte - WriteTo -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "stdmethods", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "stdmethods"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/doc.go new file mode 100644 index 0000000000..205cd64011 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/doc.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stringintconv defines an Analyzer that flags type conversions +// from integers to strings. +// +// # Analyzer stringintconv +// +// stringintconv: check for string(int) conversions +// +// This checker flags conversions of the form string(x) where x is an integer +// (but not byte or rune) type. Such conversions are discouraged because they +// return the UTF-8 representation of the Unicode code point x, and not a decimal +// string representation of x as one might expect. Furthermore, if x denotes an +// invalid code point, the conversion cannot be statically rejected. +// +// For conversions that intend on using the code point, consider replacing them +// with string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the +// string representation of the value in the desired base. +package stringintconv diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index e41de809de..16a4b3e551 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -2,11 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package stringintconv defines an Analyzer that flags type conversions -// from integers to strings. package stringintconv import ( + _ "embed" "fmt" "go/ast" "go/types" @@ -14,26 +13,19 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) -const Doc = `check for string(int) conversions - -This checker flags conversions of the form string(x) where x is an integer -(but not byte or rune) type. Such conversions are discouraged because they -return the UTF-8 representation of the Unicode code point x, and not a decimal -string representation of x as one might expect. Furthermore, if x denotes an -invalid code point, the conversion cannot be statically rejected. - -For conversions that intend on using the code point, consider replacing them -with string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the -string representation of the value in the desired base. -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "stringintconv", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "stringintconv"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -68,10 +60,12 @@ func describe(typ, inType types.Type, inName string) string { } func typeName(typ types.Type) string { - if v, _ := typ.(interface{ Name() string }); v != nil { + typ = aliases.Unalias(typ) + // TODO(adonovan): don't discard alias type, return its name. + if v, _ := typ.(*types.Basic); v != nil { return v.Name() } - if v, _ := typ.(interface{ Obj() *types.TypeName }); v != nil { + if v, _ := typ.(interface{ Obj() *types.TypeName }); v != nil { // Named, TypeParam return v.Obj().Name() } return "" @@ -203,16 +197,15 @@ func run(pass *analysis.Pass) (interface{}, error) { func structuralTypes(t types.Type) ([]types.Type, error) { var structuralTypes []types.Type - switch t := t.(type) { - case *typeparams.TypeParam: - terms, err := typeparams.StructuralTerms(t) + if tp, ok := aliases.Unalias(t).(*types.TypeParam); ok { + terms, err := typeparams.StructuralTerms(tp) if err != nil { return nil, err } for _, term := range terms { structuralTypes = append(structuralTypes, term.Type()) } - default: + } else { structuralTypes = append(structuralTypes, t) } return structuralTypes, nil diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index f0b15051c5..a0beb46bd1 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -28,6 +28,7 @@ Also report certain struct tags (json, xml) used with unexported fields.` var Analyzer = &analysis.Analyzer{ Name: "structtag", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/structtag", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go new file mode 100644 index 0000000000..4cd5b71e9e --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/doc.go @@ -0,0 +1,22 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testinggoroutine defines an Analyzerfor detecting calls to +// Fatal from a test goroutine. +// +// # Analyzer testinggoroutine +// +// testinggoroutine: report calls to (*testing.T).Fatal from goroutines started by a test +// +// Functions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and +// Skip{,f,Now} methods of *testing.T, must be called from the test goroutine itself. +// This checker detects calls to these functions that occur within a goroutine +// started by the test. For example: +// +// func TestFoo(t *testing.T) { +// go func() { +// t.Fatal("oops") // error: (*T).Fatal called from non-test goroutine +// }() +// } +package testinggoroutine diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index 7ea8f77e33..828f95bc86 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -5,45 +5,38 @@ package testinggoroutine import ( + _ "embed" + "fmt" "go/ast" + "go/token" + "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" ) -const Doc = `report calls to (*testing.T).Fatal from goroutines started by a test. +//go:embed doc.go +var doc string -Functions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and -Skip{,f,Now} methods of *testing.T, must be called from the test goroutine itself. -This checker detects calls to these functions that occur within a goroutine -started by the test. For example: +var reportSubtest bool -func TestFoo(t *testing.T) { - go func() { - t.Fatal("oops") // error: (*T).Fatal called from non-test goroutine - }() +func init() { + Analyzer.Flags.BoolVar(&reportSubtest, "subtest", false, "whether to check if t.Run subtest is terminated correctly; experimental") } -` var Analyzer = &analysis.Analyzer{ Name: "testinggoroutine", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "testinggoroutine"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } -var forbidden = map[string]bool{ - "FailNow": true, - "Fatal": true, - "Fatalf": true, - "Skip": true, - "Skipf": true, - "SkipNow": true, -} - func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -51,38 +44,90 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } - // Filter out anything that isn't a function declaration. - onlyFuncs := []ast.Node{ - (*ast.FuncDecl)(nil), + toDecl := localFunctionDecls(pass.TypesInfo, pass.Files) + + // asyncs maps nodes whose statements will be executed concurrently + // with respect to some test function, to the call sites where they + // are invoked asynchronously. There may be multiple such call sites + // for e.g. test helpers. + asyncs := make(map[ast.Node][]*asyncCall) + var regions []ast.Node + addCall := func(c *asyncCall) { + if c != nil { + r := c.region + if asyncs[r] == nil { + regions = append(regions, r) + } + asyncs[r] = append(asyncs[r], c) + } } - inspect.Nodes(onlyFuncs, func(node ast.Node, push bool) bool { - fnDecl, ok := node.(*ast.FuncDecl) - if !ok { + // Collect all of the go callee() and t.Run(name, callee) extents. + inspect.Nodes([]ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.GoStmt)(nil), + (*ast.CallExpr)(nil), + }, func(node ast.Node, push bool) bool { + if !push { return false } + switch node := node.(type) { + case *ast.FuncDecl: + return hasBenchmarkOrTestParams(node) - if !hasBenchmarkOrTestParams(fnDecl) { - return false + case *ast.GoStmt: + c := goAsyncCall(pass.TypesInfo, node, toDecl) + addCall(c) + + case *ast.CallExpr: + c := tRunAsyncCall(pass.TypesInfo, node) + addCall(c) } + return true + }) + + // Check for t.Forbidden() calls within each region r that is a + // callee in some go r() or a t.Run("name", r). + // + // Also considers a special case when r is a go t.Forbidden() call. + for _, region := range regions { + ast.Inspect(region, func(n ast.Node) bool { + if n == region { + return true // always descend into the region itself. + } else if asyncs[n] != nil { + return false // will be visited by another region. + } - // Now traverse the benchmark/test's body and check that none of the - // forbidden methods are invoked in the goroutines within the body. - ast.Inspect(fnDecl, func(n ast.Node) bool { - goStmt, ok := n.(*ast.GoStmt) + call, ok := n.(*ast.CallExpr) if !ok { return true } + x, sel, fn := forbiddenMethod(pass.TypesInfo, call) + if x == nil { + return true + } - checkGoStmt(pass, goStmt) + for _, e := range asyncs[region] { + if !withinScope(e.scope, x) { + forbidden := formatMethod(sel, fn) // e.g. "(*testing.T).Forbidden - // No need to further traverse the GoStmt since right - // above we manually traversed it in the ast.Inspect(goStmt, ...) - return false + var context string + var where analysis.Range = e.async // Put the report at the go fun() or t.Run(name, fun). + if _, local := e.fun.(*ast.FuncLit); local { + where = call // Put the report at the t.Forbidden() call. + } else if id, ok := e.fun.(*ast.Ident); ok { + context = fmt.Sprintf(" (%s calls %s)", id.Name, forbidden) + } + if _, ok := e.async.(*ast.GoStmt); ok { + pass.ReportRangef(where, "call to %s from a non-test goroutine%s", forbidden, context) + } else if reportSubtest { + pass.ReportRangef(where, "call to %s on %s defined outside of the subtest%s", forbidden, x.Name(), context) + } + } + } + return true }) - - return false - }) + } return nil, nil } @@ -109,7 +154,6 @@ func typeIsTestingDotTOrB(expr ast.Expr) (string, bool) { if !ok { return "", false } - varPkg := selExpr.X.(*ast.Ident) if varPkg.Name != "testing" { return "", false @@ -120,73 +164,116 @@ func typeIsTestingDotTOrB(expr ast.Expr) (string, bool) { return varTypeName, ok } -// goStmtFunc returns the ast.Node of a call expression -// that was invoked as a go statement. Currently, only -// function literals declared in the same function, and -// static calls within the same package are supported. -func goStmtFun(goStmt *ast.GoStmt) ast.Node { - switch fun := goStmt.Call.Fun.(type) { - case *ast.IndexExpr, *typeparams.IndexListExpr: - x, _, _, _ := typeparams.UnpackIndexExpr(fun) - id, _ := x.(*ast.Ident) - if id == nil { - break - } - if id.Obj == nil { - break - } - if funDecl, ok := id.Obj.Decl.(ast.Node); ok { - return funDecl - } - case *ast.Ident: - // TODO(cuonglm): improve this once golang/go#48141 resolved. - if fun.Obj == nil { - break - } - if funDecl, ok := fun.Obj.Decl.(ast.Node); ok { - return funDecl - } - case *ast.FuncLit: - return goStmt.Call.Fun +// asyncCall describes a region of code that needs to be checked for +// t.Forbidden() calls as it is started asynchronously from an async +// node go fun() or t.Run(name, fun). +type asyncCall struct { + region ast.Node // region of code to check for t.Forbidden() calls. + async ast.Node // *ast.GoStmt or *ast.CallExpr (for t.Run) + scope ast.Node // Report t.Forbidden() if t is not declared within scope. + fun ast.Expr // fun in go fun() or t.Run(name, fun) +} + +// withinScope returns true if x.Pos() is in [scope.Pos(), scope.End()]. +func withinScope(scope ast.Node, x *types.Var) bool { + if scope != nil { + return x.Pos() != token.NoPos && scope.Pos() <= x.Pos() && x.Pos() <= scope.End() } - return goStmt.Call + return false } -// checkGoStmt traverses the goroutine and checks for the -// use of the forbidden *testing.(B, T) methods. -func checkGoStmt(pass *analysis.Pass, goStmt *ast.GoStmt) { - fn := goStmtFun(goStmt) - // Otherwise examine the goroutine to check for the forbidden methods. - ast.Inspect(fn, func(n ast.Node) bool { - selExpr, ok := n.(*ast.SelectorExpr) - if !ok { - return true - } +// goAsyncCall returns the extent of a call from a go fun() statement. +func goAsyncCall(info *types.Info, goStmt *ast.GoStmt, toDecl func(*types.Func) *ast.FuncDecl) *asyncCall { + call := goStmt.Call - _, bad := forbidden[selExpr.Sel.Name] - if !bad { - return true + fun := astutil.Unparen(call.Fun) + if id := funcIdent(fun); id != nil { + if lit := funcLitInScope(id); lit != nil { + return &asyncCall{region: lit, async: goStmt, scope: nil, fun: fun} } + } - // Now filter out false positives by the import-path/type. - ident, ok := selExpr.X.(*ast.Ident) - if !ok { - return true - } - if ident.Obj == nil || ident.Obj.Decl == nil { - return true - } - field, ok := ident.Obj.Decl.(*ast.Field) - if !ok { - return true + if fn := typeutil.StaticCallee(info, call); fn != nil { // static call or method in the package? + if decl := toDecl(fn); decl != nil { + return &asyncCall{region: decl, async: goStmt, scope: nil, fun: fun} } - if typeName, ok := typeIsTestingDotTOrB(field.Type); ok { - var fnRange analysis.Range = goStmt - if _, ok := fn.(*ast.FuncLit); ok { - fnRange = selExpr - } - pass.ReportRangef(fnRange, "call to (*%s).%s from a non-test goroutine", typeName, selExpr.Sel) + } + + // Check go statement for go t.Forbidden() or go func(){t.Forbidden()}(). + return &asyncCall{region: goStmt, async: goStmt, scope: nil, fun: fun} +} + +// tRunAsyncCall returns the extent of a call from a t.Run("name", fun) expression. +func tRunAsyncCall(info *types.Info, call *ast.CallExpr) *asyncCall { + if len(call.Args) != 2 { + return nil + } + run := typeutil.Callee(info, call) + if run, ok := run.(*types.Func); !ok || !isMethodNamed(run, "testing", "Run") { + return nil + } + + fun := astutil.Unparen(call.Args[1]) + if lit, ok := fun.(*ast.FuncLit); ok { // function lit? + return &asyncCall{region: lit, async: call, scope: lit, fun: fun} + } + + if id := funcIdent(fun); id != nil { + if lit := funcLitInScope(id); lit != nil { // function lit in variable? + return &asyncCall{region: lit, async: call, scope: lit, fun: fun} } - return true - }) + } + + // Check within t.Run(name, fun) for calls to t.Forbidden, + // e.g. t.Run(name, func(t *testing.T){ t.Forbidden() }) + return &asyncCall{region: call, async: call, scope: fun, fun: fun} +} + +var forbidden = []string{ + "FailNow", + "Fatal", + "Fatalf", + "Skip", + "Skipf", + "SkipNow", +} + +// forbiddenMethod decomposes a call x.m() into (x, x.m, m) where +// x is a variable, x.m is a selection, and m is the static callee m. +// Returns (nil, nil, nil) if call is not of this form. +func forbiddenMethod(info *types.Info, call *ast.CallExpr) (*types.Var, *types.Selection, *types.Func) { + // Compare to typeutil.StaticCallee. + fun := astutil.Unparen(call.Fun) + selExpr, ok := fun.(*ast.SelectorExpr) + if !ok { + return nil, nil, nil + } + sel := info.Selections[selExpr] + if sel == nil { + return nil, nil, nil + } + + var x *types.Var + if id, ok := astutil.Unparen(selExpr.X).(*ast.Ident); ok { + x, _ = info.Uses[id].(*types.Var) + } + if x == nil { + return nil, nil, nil + } + + fn, _ := sel.Obj().(*types.Func) + if fn == nil || !isMethodNamed(fn, "testing", forbidden...) { + return nil, nil, nil + } + return x, sel, fn +} + +func formatMethod(sel *types.Selection, fn *types.Func) string { + var ptr string + rtype := sel.Recv() + if p, ok := aliases.Unalias(rtype).(*types.Pointer); ok { + ptr = "*" + rtype = p.Elem() + } + return fmt.Sprintf("(%s%s).%s", ptr, rtype.String(), fn.Name()) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go new file mode 100644 index 0000000000..ad815f1901 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go @@ -0,0 +1,96 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testinggoroutine + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/typeparams" +) + +// AST and types utilities that not specific to testinggoroutines. + +// localFunctionDecls returns a mapping from *types.Func to *ast.FuncDecl in files. +func localFunctionDecls(info *types.Info, files []*ast.File) func(*types.Func) *ast.FuncDecl { + var fnDecls map[*types.Func]*ast.FuncDecl // computed lazily + return func(f *types.Func) *ast.FuncDecl { + if f != nil && fnDecls == nil { + fnDecls = make(map[*types.Func]*ast.FuncDecl) + for _, file := range files { + for _, decl := range file.Decls { + if fnDecl, ok := decl.(*ast.FuncDecl); ok { + if fn, ok := info.Defs[fnDecl.Name].(*types.Func); ok { + fnDecls[fn] = fnDecl + } + } + } + } + } + // TODO: set f = f.Origin() here. + return fnDecls[f] + } +} + +// isMethodNamed returns true if f is a method defined +// in package with the path pkgPath with a name in names. +func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { + if f == nil { + return false + } + if f.Pkg() == nil || f.Pkg().Path() != pkgPath { + return false + } + if f.Type().(*types.Signature).Recv() == nil { + return false + } + for _, n := range names { + if f.Name() == n { + return true + } + } + return false +} + +func funcIdent(fun ast.Expr) *ast.Ident { + switch fun := astutil.Unparen(fun).(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + x, _, _, _ := typeparams.UnpackIndexExpr(fun) // necessary? + id, _ := x.(*ast.Ident) + return id + case *ast.Ident: + return fun + default: + return nil + } +} + +// funcLitInScope returns a FuncLit that id is at least initially assigned to. +// +// TODO: This is closely tied to id.Obj which is deprecated. +func funcLitInScope(id *ast.Ident) *ast.FuncLit { + // Compare to (*ast.Object).Pos(). + if id.Obj == nil { + return nil + } + var rhs ast.Expr + switch d := id.Obj.Decl.(type) { + case *ast.AssignStmt: + for i, x := range d.Lhs { + if ident, isIdent := x.(*ast.Ident); isIdent && ident.Name == id.Name && i < len(d.Rhs) { + rhs = d.Rhs[i] + } + } + case *ast.ValueSpec: + for i, n := range d.Names { + if n.Name == id.Name && i < len(d.Values) { + rhs = d.Values[i] + } + } + } + lit, _ := rhs.(*ast.FuncLit) + return lit +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/doc.go new file mode 100644 index 0000000000..3ae27db9c1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/doc.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tests defines an Analyzer that checks for common mistaken +// usages of tests and examples. +// +// # Analyzer tests +// +// tests: check for common mistaken usages of tests and examples +// +// The tests checker walks Test, Benchmark, Fuzzing and Example functions checking +// malformed names, wrong signatures and examples documenting non-existent +// identifiers. +// +// Please see the documentation for package testing in golang.org/pkg/testing +// for the conventions that are enforced for Tests, Benchmarks, and Examples. +package tests diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index 935aad00c9..39d0d9e429 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -2,11 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package tests defines an Analyzer that checks for common mistaken -// usages of tests and examples. package tests import ( + _ "embed" "fmt" "go/ast" "go/token" @@ -17,22 +16,16 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" ) -const Doc = `check for common mistaken usages of tests and examples - -The tests checker walks Test, Benchmark and Example functions checking -malformed names, wrong signatures and examples documenting non-existent -identifiers. - -Please see the documentation for package testing in golang.org/pkg/testing -for the conventions that are enforced for Tests, Benchmarks, and Examples.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "tests", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "tests"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests", Run: run, } @@ -73,9 +66,7 @@ func run(pass *analysis.Pass) (interface{}, error) { checkTest(pass, fn, "Test") case strings.HasPrefix(fn.Name.Name, "Benchmark"): checkTest(pass, fn, "Benchmark") - } - // run fuzz tests diagnostics only for 1.18 i.e. when analysisinternal.DiagnoseFuzzTests is turned on. - if strings.HasPrefix(fn.Name.Name, "Fuzz") && analysisinternal.DiagnoseFuzzTests { + case strings.HasPrefix(fn.Name.Name, "Fuzz"): checkTest(pass, fn, "Fuzz") checkFuzz(pass, fn) } @@ -261,17 +252,13 @@ func validateFuzzArgs(pass *analysis.Pass, params *types.Tuple, expr ast.Expr) b } func isTestingType(typ types.Type, testingType string) bool { + // No Unalias here: I doubt "go test" recognizes + // "type A = *testing.T; func Test(A) {}" as a test. ptr, ok := typ.(*types.Pointer) if !ok { return false } - named, ok := ptr.Elem().(*types.Named) - if !ok { - return false - } - obj := named.Obj() - // obj.Pkg is nil for the error type. - return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == testingType + return analysisutil.IsNamedType(ptr.Elem(), "testing", testingType) } // Validate that fuzz target function's arguments are of accepted types. @@ -405,7 +392,7 @@ func checkExampleName(pass *analysis.Pass, fn *ast.FuncDecl) { if results := fn.Type.Results; results != nil && len(results.List) != 0 { pass.Reportf(fn.Pos(), "%s should return nothing", fnName) } - if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 { + if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { pass.Reportf(fn.Pos(), "%s should not have type params", fnName) } @@ -474,7 +461,7 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { return } - if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 { + if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. // TODO(adonovan): use ReportRangef(tparams). diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/doc.go new file mode 100644 index 0000000000..5c665b298b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/doc.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeformat defines an Analyzer that checks for the use +// of time.Format or time.Parse calls with a bad format. +// +// # Analyzer timeformat +// +// timeformat: check for calls of (time.Time).Format or time.Parse with 2006-02-01 +// +// The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm) +// format. Internationally, "yyyy-dd-mm" does not occur in common calendar date +// standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended. +package timeformat diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go new file mode 100644 index 0000000000..4a6c6b8bc6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -0,0 +1,120 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeformat defines an Analyzer that checks for the use +// of time.Format or time.Parse calls with a bad format. +package timeformat + +import ( + _ "embed" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const badFormat = "2006-02-01" +const goodFormat = "2006-01-02" + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "timeformat", + Doc: analysisutil.MustExtractDoc(doc, "timeformat"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Note: (time.Time).Format is a method and can be a typeutil.Callee + // without directly importing "time". So we cannot just skip this package + // when !analysisutil.Imports(pass.Pkg, "time"). + // TODO(taking): Consider using a prepass to collect typeutil.Callees. + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if !ok { + return + } + if !isTimeDotFormat(fn) && !isTimeDotParse(fn) { + return + } + if len(call.Args) > 0 { + arg := call.Args[0] + badAt := badFormatAt(pass.TypesInfo, arg) + + if badAt > -1 { + // Check if it's a literal string, otherwise we can't suggest a fix. + if _, ok := arg.(*ast.BasicLit); ok { + pos := int(arg.Pos()) + badAt + 1 // +1 to skip the " or ` + end := pos + len(badFormat) + + pass.Report(analysis.Diagnostic{ + Pos: token.Pos(pos), + End: token.Pos(end), + Message: badFormat + " should be " + goodFormat, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace " + badFormat + " with " + goodFormat, + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos), + End: token.Pos(end), + NewText: []byte(goodFormat), + }}, + }}, + }) + } else { + pass.Reportf(arg.Pos(), badFormat+" should be "+goodFormat) + } + } + } + }) + return nil, nil +} + +func isTimeDotFormat(f *types.Func) bool { + if f.Name() != "Format" || f.Pkg() == nil || f.Pkg().Path() != "time" { + return false + } + // Verify that the receiver is time.Time. + recv := f.Type().(*types.Signature).Recv() + return recv != nil && analysisutil.IsNamedType(recv.Type(), "time", "Time") +} + +func isTimeDotParse(f *types.Func) bool { + return analysisutil.IsFunctionNamed(f, "time", "Parse") +} + +// badFormatAt return the start of a bad format in e or -1 if no bad format is found. +func badFormatAt(info *types.Info, e ast.Expr) int { + tv, ok := info.Types[e] + if !ok { // no type info, assume good + return -1 + } + + t, ok := tv.Type.(*types.Basic) // sic, no unalias + if !ok || t.Info()&types.IsString == 0 { + return -1 + } + + if tv.Value == nil { + return -1 + } + + return strings.Index(constant.StringVal(tv.Value), badFormat) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/doc.go new file mode 100644 index 0000000000..5781bbd32d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/doc.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unmarshal package defines an Analyzer that checks for passing +// non-pointer or non-interface types to unmarshal and decode functions. +// +// # Analyzer unmarshal +// +// unmarshal: report passing non-pointer or non-interface values to unmarshal +// +// The unmarshal analysis reports calls to functions such as json.Unmarshal +// in which the argument type is not a pointer or an interface. +package unmarshal diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 5129048a07..a7889fa459 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -2,29 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// The unmarshal package defines an Analyzer that checks for passing -// non-pointer or non-interface types to unmarshal and decode functions. package unmarshal import ( + _ "embed" "go/ast" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" ) -const Doc = `report passing non-pointer or non-interface values to unmarshal - -The unmarshal analysis reports calls to functions such as json.Unmarshal -in which the argument type is not a pointer or an interface.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unmarshal"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -37,6 +36,12 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } + // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode + // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee + // without directly importing their packages. So we cannot just skip this package + // when !analysisutil.Imports(pass.Pkg, "encoding/..."). + // TODO(taking): Consider using a prepass to collect typeutil.Callees. + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -51,6 +56,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // Classify the callee (without allocating memory). argidx := -1 + recv := fn.Type().(*types.Signature).Recv() if fn.Name() == "Unmarshal" && recv == nil { // "encoding/json".Unmarshal @@ -64,12 +70,8 @@ func run(pass *analysis.Pass) (interface{}, error) { // (*"encoding/json".Decoder).Decode // (* "encoding/gob".Decoder).Decode // (* "encoding/xml".Decoder).Decode - t := recv.Type() - if ptr, ok := t.(*types.Pointer); ok { - t = ptr.Elem() - } - tname := t.(*types.Named).Obj() - if tname.Name() == "Decoder" { + _, named := typesinternal.ReceiverNamed(recv) + if tname := named.Obj(); tname.Name() == "Decoder" { switch tname.Pkg().Path() { case "encoding/json", "encoding/xml", "encoding/gob": argidx = 0 // func(interface{}) @@ -86,7 +88,7 @@ func run(pass *analysis.Pass) (interface{}, error) { t := pass.TypesInfo.Types[call.Args[argidx]].Type switch t.Underlying().(type) { - case *types.Pointer, *types.Interface, *typeparams.TypeParam: + case *types.Pointer, *types.Interface, *types.TypeParam: return } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go new file mode 100644 index 0000000000..d17d0d9444 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unreachable defines an Analyzer that checks for unreachable code. +// +// # Analyzer unreachable +// +// unreachable: check for unreachable code +// +// The unreachable analyzer finds statements that execution can never reach +// because they are preceded by an return statement, a call to panic, an +// infinite loop, or similar constructs. +package unreachable diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 90896dd1bb..b810db7ee9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -2,30 +2,29 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package unreachable defines an Analyzer that checks for unreachable code. package unreachable // TODO(adonovan): use the new cfg package, which is more precise. import ( + _ "embed" "go/ast" "go/token" "log" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check for unreachable code - -The unreachable analyzer finds statements that execution can never reach -because they are preceded by an return statement, a call to panic, an -infinite loop, or similar constructs.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unreachable"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/doc.go new file mode 100644 index 0000000000..de10804cb1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeptr defines an Analyzer that checks for invalid +// conversions of uintptr to unsafe.Pointer. +// +// # Analyzer unsafeptr +// +// unsafeptr: check for invalid conversions of uintptr to unsafe.Pointer +// +// The unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer +// to convert integers to pointers. A conversion from uintptr to +// unsafe.Pointer is invalid if it implies that there is a uintptr-typed +// word in memory that holds a pointer value, because that word will be +// invisible to stack copying and to the garbage collector. +package unsafeptr diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index ed86e5ebf0..14e4a6c1e4 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -7,6 +7,7 @@ package unsafeptr import ( + _ "embed" "go/ast" "go/token" "go/types" @@ -14,20 +15,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/aliases" ) -const Doc = `check for invalid conversions of uintptr to unsafe.Pointer - -The unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer -to convert integers to pointers. A conversion from uintptr to -unsafe.Pointer is invalid if it implies that there is a uintptr-typed -word in memory that holds a pointer value, because that word will be -invisible to stack copying and to the garbage collector.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -71,7 +70,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // Check unsafe.Pointer safety rules according to // https://golang.org/pkg/unsafe/#Pointer. - switch x := analysisutil.Unparen(x).(type) { + switch x := astutil.Unparen(x).(type) { case *ast.SelectorExpr: // "(6) Conversion of a reflect.SliceHeader or // reflect.StringHeader Data field to or from Pointer." @@ -90,7 +89,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // by the time we get to the conversion at the end. // For now approximate by saying that *Header is okay // but Header is not. - pt, ok := info.Types[x.X].Type.(*types.Pointer) + pt, ok := aliases.Unalias(info.Types[x.X].Type).(*types.Pointer) if ok && isReflectHeader(pt.Elem()) { return true } @@ -107,8 +106,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { } switch sel.Sel.Name { case "Pointer", "UnsafeAddr": - t, ok := info.Types[sel.X].Type.(*types.Named) - if ok && t.Obj().Pkg().Path() == "reflect" && t.Obj().Name() == "Value" { + if analysisutil.IsNamedType(info.Types[sel.X].Type, "reflect", "Value") { return true } } @@ -121,7 +119,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // isSafeArith reports whether x is a pointer arithmetic expression that is safe // to convert to unsafe.Pointer. func isSafeArith(info *types.Info, x ast.Expr) bool { - switch x := analysisutil.Unparen(x).(type) { + switch x := astutil.Unparen(x).(type) { case *ast.CallExpr: // Base case: initial conversion from unsafe.Pointer to uintptr. return len(x.Args) == 1 && @@ -156,13 +154,5 @@ func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool { // isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader. func isReflectHeader(t types.Type) bool { - if named, ok := t.(*types.Named); ok { - if obj := named.Obj(); obj.Pkg() != nil && obj.Pkg().Path() == "reflect" { - switch obj.Name() { - case "SliceHeader", "StringHeader": - return true - } - } - } - return false + return analysisutil.IsNamedType(t, "reflect", "SliceHeader", "StringHeader") } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/doc.go new file mode 100644 index 0000000000..a1bf4cf940 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/doc.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedresult defines an analyzer that checks for unused +// results of calls to certain pure functions. +// +// # Analyzer unusedresult +// +// unusedresult: check for unused results of calls to some functions +// +// Some functions like fmt.Errorf return a result and have no side +// effects, so it is always a mistake to discard the result. Other +// functions may return an error that must not be ignored, or a cleanup +// operation that must be called. This analyzer reports calls to +// functions like these when the result of the call is ignored. +// +// The set of functions may be controlled using flags. +package unusedresult diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index 06747ba72b..76f42b052e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -3,10 +3,18 @@ // license that can be found in the LICENSE file. // Package unusedresult defines an analyzer that checks for unused -// results of calls to certain pure functions. +// results of calls to certain functions. package unusedresult +// It is tempting to make this analysis inductive: for each function +// that tail-calls one of the functions that we check, check those +// functions too. However, just because you must use the result of +// fmt.Sprintf doesn't mean you need to use the result of every +// function that returns a formatted string: it may have other results +// and effects. + import ( + _ "embed" "go/ast" "go/token" "go/types" @@ -16,25 +24,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/types/typeutil" ) -// TODO(adonovan): make this analysis modular: export a mustUseResult -// fact for each function that tail-calls one of the functions that we -// check, and check those functions too. - -const Doc = `check for unused results of calls to some functions - -Some functions like fmt.Errorf return a result and have no side effects, -so it is always a mistake to discard the result. This analyzer reports -calls to certain functions in which the result of the call is ignored. - -The set of functions may be controlled using flags.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unusedresult"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -43,9 +44,40 @@ var Analyzer = &analysis.Analyzer{ var funcs, stringMethods stringSetFlag func init() { - // TODO(adonovan): provide a comment syntax to allow users to - // add their functions to this set using facts. - funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse,context.WithValue,context.WithCancel,context.WithDeadline,context.WithTimeout") + // TODO(adonovan): provide a comment or declaration syntax to + // allow users to add their functions to this set using facts. + // For example: + // + // func ignoringTheErrorWouldBeVeryBad() error { + // type mustUseResult struct{} // enables vet unusedresult check + // ... + // } + // + // ignoringTheErrorWouldBeVeryBad() // oops + // + + // List standard library functions here. + // The context.With{Cancel,Deadline,Timeout} entries are + // effectively redundant wrt the lostcancel analyzer. + funcs = stringSetFlag{ + "context.WithCancel": true, + "context.WithDeadline": true, + "context.WithTimeout": true, + "context.WithValue": true, + "errors.New": true, + "fmt.Errorf": true, + "fmt.Sprint": true, + "fmt.Sprintf": true, + "slices.Clip": true, + "slices.Compact": true, + "slices.CompactFunc": true, + "slices.Delete": true, + "slices.DeleteFunc": true, + "slices.Grow": true, + "slices.Insert": true, + "slices.Replace": true, + "sort.Reverse": true, + } Analyzer.Flags.Var(&funcs, "funcs", "comma-separated list of functions whose results must be used") @@ -57,49 +89,41 @@ func init() { func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + // Split functions into (pkg, name) pairs to save allocation later. + pkgFuncs := make(map[[2]string]bool, len(funcs)) + for s := range funcs { + if i := strings.LastIndexByte(s, '.'); i > 0 { + pkgFuncs[[2]string{s[:i], s[i+1:]}] = true + } + } + nodeFilter := []ast.Node{ (*ast.ExprStmt)(nil), } inspect.Preorder(nodeFilter, func(n ast.Node) { - call, ok := analysisutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) + call, ok := astutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) if !ok { return // not a call statement } - fun := analysisutil.Unparen(call.Fun) - - if pass.TypesInfo.Types[fun].IsType() { - return // a conversion, not a call - } - x, _, _, _ := typeparams.UnpackIndexExpr(fun) - if x != nil { - fun = x // If this is generic function or method call, skip the instantiation arguments - } - - selector, ok := fun.(*ast.SelectorExpr) + // Call to function or method? + fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) if !ok { - return // neither a method call nor a qualified ident + return // e.g. var or builtin } - - sel, ok := pass.TypesInfo.Selections[selector] - if ok && sel.Kind() == types.MethodVal { + if sig := fn.Type().(*types.Signature); sig.Recv() != nil { // method (e.g. foo.String()) - obj := sel.Obj().(*types.Func) - sig := sel.Type().(*types.Signature) if types.Identical(sig, sigNoArgsStringResult) { - if stringMethods[obj.Name()] { + if stringMethods[fn.Name()] { pass.Reportf(call.Lparen, "result of (%s).%s call not used", - sig.Recv().Type(), obj.Name()) + sig.Recv().Type(), fn.Name()) } } - } else if !ok { - // package-qualified function (e.g. fmt.Errorf) - obj := pass.TypesInfo.Uses[selector.Sel] - if obj, ok := obj.(*types.Func); ok { - qname := obj.Pkg().Path() + "." + obj.Name() - if funcs[qname] { - pass.Reportf(call.Lparen, "result of %v call not used", qname) - } + } else { + // package-level function (e.g. fmt.Errorf) + if pkgFuncs[[2]string{fn.Pkg().Path(), fn.Name()}] { + pass.Reportf(call.Lparen, "result of %s.%s call not used", + fn.Pkg().Path(), fn.Name()) } } }) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/doc.go new file mode 100644 index 0000000000..de10dc8c8e --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/doc.go @@ -0,0 +1,34 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedwrite checks for unused writes to the elements of a struct or array object. +// +// # Analyzer unusedwrite +// +// unusedwrite: checks for unused writes +// +// The analyzer reports instances of writes to struct fields and +// arrays that are never read. Specifically, when a struct object +// or an array is copied, its elements are copied implicitly by +// the compiler, and any element write to this copy does nothing +// with the original object. +// +// For example: +// +// type T struct { x int } +// +// func f(input []T) { +// for i, v := range input { // v is a copy +// v.x = i // unused write to field x +// } +// } +// +// Another example is about non-pointer receiver: +// +// type T struct { x int } +// +// func (t T) f() { // t is a copy +// t.x = i // unused write to field x +// } +package unusedwrite diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go index 9cc45e0a36..a01cbb8f83 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go @@ -2,49 +2,29 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package unusedwrite checks for unused writes to the elements of a struct or array object. package unusedwrite import ( + _ "embed" "fmt" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/aliases" ) -// Doc is a documentation string. -const Doc = `checks for unused writes - -The analyzer reports instances of writes to struct fields and -arrays that are never read. Specifically, when a struct object -or an array is copied, its elements are copied implicitly by -the compiler, and any element write to this copy does nothing -with the original object. - -For example: - - type T struct { x int } - func f(input []T) { - for i, v := range input { // v is a copy - v.x = i // unused write to field x - } - } - -Another example is about non-pointer receiver: - - type T struct { x int } - func (t T) f() { // t is a copy - t.x = i // unused write to field x - } -` +//go:embed doc.go +var doc string // Analyzer reports instances of writes to struct fields and arrays // that are never read. var Analyzer = &analysis.Analyzer{ Name: "unusedwrite", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unusedwrite"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedwrite", Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: run, } @@ -145,10 +125,7 @@ func isDeadStore(store *ssa.Store, obj ssa.Value, addr ssa.Instruction) bool { // isStructOrArray returns whether the underlying type is struct or array. func isStructOrArray(tp types.Type) bool { - if named, ok := tp.(*types.Named); ok { - tp = named.Underlying() - } - switch tp.(type) { + switch tp.Underlying().(type) { case *types.Array: return true case *types.Struct: @@ -166,7 +143,7 @@ func hasStructOrArrayType(v ssa.Value) bool { // func (t T) f() { ...} // the receiver object is of type *T: // t0 = local T (t) *T - if tp, ok := alloc.Type().(*types.Pointer); ok { + if tp, ok := aliases.Unalias(alloc.Type()).(*types.Pointer); ok { return isStructOrArray(tp.Elem()) } return false @@ -180,13 +157,14 @@ func hasStructOrArrayType(v ssa.Value) bool { // // For example, for struct T {x int, y int), getFieldName(*T, 1) returns "y". func getFieldName(tp types.Type, index int) string { - if pt, ok := tp.(*types.Pointer); ok { + // TODO(adonovan): use + // stp, ok := typeparams.Deref(tp).Underlying().(*types.Struct); ok { + // when Deref is defined. But see CL 565456 for a better fix. + + if pt, ok := aliases.Unalias(tp).(*types.Pointer); ok { tp = pt.Elem() } - if named, ok := tp.(*types.Named); ok { - tp = named.Underlying() - } - if stp, ok := tp.(*types.Struct); ok { + if stp, ok := tp.Underlying().(*types.Struct); ok { return stp.Field(index).Name() } return fmt.Sprintf("%d", index) diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go index 9da5692af5..4f2c404562 100644 --- a/vendor/golang.org/x/tools/go/analysis/validate.go +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -19,6 +19,8 @@ import ( // that the Requires graph is acyclic; // that analyzer fact types are unique; // that each fact type is a pointer. +// +// Analyzer names need not be unique, though this may be confusing. func Validate(analyzers []*Analyzer) error { // Map each fact type to its sole generating analyzer. factTypes := make(map[reflect.Type]*Analyzer) diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 9fa5aa192c..2c4c4e2328 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -11,8 +11,6 @@ import ( "go/ast" "go/token" "sort" - - "golang.org/x/tools/internal/typeparams" ) // PathEnclosingInterval returns the node that encloses the source @@ -322,7 +320,7 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, n.Recv) } children = append(children, n.Name) - if tparams := typeparams.ForFuncType(n.Type); tparams != nil { + if tparams := n.Type.TypeParams; tparams != nil { children = append(children, tparams) } if n.Type.Params != nil { @@ -377,7 +375,7 @@ func childrenOf(n ast.Node) []ast.Node { tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: children = append(children, tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) @@ -588,7 +586,7 @@ func NodeDescription(n ast.Node) string { return "decrement statement" case *ast.IndexExpr: return "index expression" - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return "index list expression" case *ast.InterfaceType: return "interface type" diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index f430b21b9b..58934f7663 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -9,8 +9,6 @@ import ( "go/ast" "reflect" "sort" - - "golang.org/x/tools/internal/typeparams" ) // An ApplyFunc is invoked by Apply for each node n, even if n is nil, @@ -252,7 +250,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "X", nil, n.X) a.apply(n, "Index", nil, n.Index) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: a.apply(n, "X", nil, n.X) a.applyList(n, "Indices") @@ -293,7 +291,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "Fields", nil, n.Fields) case *ast.FuncType: - if tparams := typeparams.ForFuncType(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Params", nil, n.Params) @@ -408,7 +406,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. case *ast.TypeSpec: a.apply(n, "Doc", nil, n.Doc) a.apply(n, "Name", nil, n.Name) - if tparams := typeparams.ForTypeSpec(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Type", nil, n.Type) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 3fbfebf369..1fc1de0bd1 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -64,8 +64,9 @@ type event struct { // depth-first order. It calls f(n) for each node n before it visits // n's children. // +// The complete traversal sequence is determined by ast.Inspect. // The types argument, if non-empty, enables type-based filtering of -// events. The function f if is called only for nodes whose type +// events. The function f is called only for nodes whose type // matches an element of the types slice. func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // Because it avoids postorder calls to f, and the pruning @@ -97,6 +98,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // of the non-nil children of the node, followed by a call of // f(n, false). // +// The complete traversal sequence is determined by ast.Inspect. // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 703c813954..2a872f89d4 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - "golang.org/x/tools/internal/typeparams" ) const ( @@ -171,7 +169,7 @@ func typeOf(n ast.Node) uint64 { return 1 << nIncDecStmt case *ast.IndexExpr: return 1 << nIndexExpr - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return 1 << nIndexListExpr case *ast.InterfaceType: return 1 << nInterfaceType diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go index 15025f645f..763d18809b 100644 --- a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -8,7 +8,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -76,7 +75,7 @@ func FakeContext(pkgs map[string]map[string]string) *build.Context { if !ok { return nil, fmt.Errorf("file not found: %s", filename) } - return ioutil.NopCloser(strings.NewReader(content)), nil + return io.NopCloser(strings.NewReader(content)), nil } ctxt.IsAbsPath = func(path string) bool { path = filepath.ToSlash(path) diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go index bdbfd93147..7e371658d9 100644 --- a/vendor/golang.org/x/tools/go/buildutil/overlay.go +++ b/vendor/golang.org/x/tools/go/buildutil/overlay.go @@ -10,7 +10,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "path/filepath" "strconv" "strings" @@ -33,7 +32,7 @@ func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Conte // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir rc := func(data []byte) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(data)), nil + return io.NopCloser(bytes.NewBuffer(data)), nil } copy := *orig // make a copy diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go index 7cf523bca4..32c8d1424d 100644 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -4,17 +4,22 @@ package buildutil -// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. +// This duplicated logic must be kept in sync with that from go build: +// $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) +// $GOROOT/src/cmd/go/internal/base/flag.go (StringsFlag.Set) +// $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) -import "fmt" +import ( + "fmt" + "strings" +) const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + "For more information about build tags, see the description of " + "build constraints in the documentation for the go/build package" // TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses -// a flag value in the same manner as go build's -tags flag and -// populates a []string slice. +// a flag value the same as go build's -tags flag and populates a []string slice. // // See $GOROOT/src/go/build/doc.go for description of build tags. // See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. @@ -25,19 +30,32 @@ const TagsFlagDoc = "a list of `build tags` to consider satisfied during the bui type TagsFlag []string func (v *TagsFlag) Set(s string) error { - var err error - *v, err = splitQuotedFields(s) - if *v == nil { - *v = []string{} + // See $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) + // For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'". + if strings.Contains(s, " ") || strings.Contains(s, "'") { + var err error + *v, err = splitQuotedFields(s) + if *v == nil { + *v = []string{} + } + return err + } + + // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. + *v = []string{} + for _, s := range strings.Split(s, ",") { + if s != "" { + *v = append(*v, s) + } } - return err + return nil } func (v *TagsFlag) Get() interface{} { return *v } func splitQuotedFields(s string) ([]string, error) { - // Split fields allowing '' or "" around elements. - // Quotes further inside the string do not count. + // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) + // This must remain in sync with that logic. var f []string for len(s) > 0 { for len(s) > 0 && isSpaceByte(s[0]) { @@ -76,5 +94,7 @@ func (v *TagsFlag) String() string { } func isSpaceByte(c byte) bool { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + // This list must remain in sync with that. return c == ' ' || c == '\t' || c == '\n' || c == '\r' } diff --git a/vendor/golang.org/x/tools/go/cfg/builder.go b/vendor/golang.org/x/tools/go/cfg/builder.go index dad6a444d8..ac4d63c400 100644 --- a/vendor/golang.org/x/tools/go/cfg/builder.go +++ b/vendor/golang.org/x/tools/go/cfg/builder.go @@ -16,8 +16,8 @@ type builder struct { cfg *CFG mayReturn func(*ast.CallExpr) bool current *Block - lblocks map[*ast.Object]*lblock // labeled blocks - targets *targets // linked stack of branch targets + lblocks map[string]*lblock // labeled blocks + targets *targets // linked stack of branch targets } func (b *builder) stmt(_s ast.Stmt) { @@ -42,7 +42,7 @@ start: b.add(s) if call, ok := s.X.(*ast.CallExpr); ok && !b.mayReturn(call) { // Calls to panic, os.Exit, etc, never return. - b.current = b.newBlock("unreachable.call") + b.current = b.newBlock(KindUnreachable, s) } case *ast.DeclStmt: @@ -57,7 +57,7 @@ start: } case *ast.LabeledStmt: - label = b.labeledBlock(s.Label) + label = b.labeledBlock(s.Label, s) b.jump(label._goto) b.current = label._goto _s = s.Stmt @@ -65,7 +65,7 @@ start: case *ast.ReturnStmt: b.add(s) - b.current = b.newBlock("unreachable.return") + b.current = b.newBlock(KindUnreachable, s) case *ast.BranchStmt: b.branchStmt(s) @@ -77,11 +77,11 @@ start: if s.Init != nil { b.stmt(s.Init) } - then := b.newBlock("if.then") - done := b.newBlock("if.done") + then := b.newBlock(KindIfThen, s) + done := b.newBlock(KindIfDone, s) _else := done if s.Else != nil { - _else = b.newBlock("if.else") + _else = b.newBlock(KindIfElse, s) } b.add(s.Cond) b.ifelse(then, _else) @@ -128,7 +128,7 @@ func (b *builder) branchStmt(s *ast.BranchStmt) { switch s.Tok { case token.BREAK: if s.Label != nil { - if lb := b.labeledBlock(s.Label); lb != nil { + if lb := b.labeledBlock(s.Label, nil); lb != nil { block = lb._break } } else { @@ -139,7 +139,7 @@ func (b *builder) branchStmt(s *ast.BranchStmt) { case token.CONTINUE: if s.Label != nil { - if lb := b.labeledBlock(s.Label); lb != nil { + if lb := b.labeledBlock(s.Label, nil); lb != nil { block = lb._continue } } else { @@ -155,14 +155,14 @@ func (b *builder) branchStmt(s *ast.BranchStmt) { case token.GOTO: if s.Label != nil { - block = b.labeledBlock(s.Label)._goto + block = b.labeledBlock(s.Label, nil)._goto } } - if block == nil { - block = b.newBlock("undefined.branch") + if block == nil { // ill-typed (e.g. undefined label) + block = b.newBlock(KindUnreachable, s) } b.jump(block) - b.current = b.newBlock("unreachable.branch") + b.current = b.newBlock(KindUnreachable, s) } func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) { @@ -172,7 +172,7 @@ func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) { if s.Tag != nil { b.add(s.Tag) } - done := b.newBlock("switch.done") + done := b.newBlock(KindSwitchDone, s) if label != nil { label._break = done } @@ -188,13 +188,13 @@ func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) { for i, clause := range s.Body.List { body := fallthru if body == nil { - body = b.newBlock("switch.body") // first case only + body = b.newBlock(KindSwitchCaseBody, clause) // first case only } // Preallocate body block for the next case. fallthru = done if i+1 < ncases { - fallthru = b.newBlock("switch.body") + fallthru = b.newBlock(KindSwitchCaseBody, s.Body.List[i+1]) } cc := clause.(*ast.CaseClause) @@ -208,7 +208,7 @@ func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) { var nextCond *Block for _, cond := range cc.List { - nextCond = b.newBlock("switch.next") + nextCond = b.newBlock(KindSwitchNextCase, cc) b.add(cond) // one half of the tag==cond condition b.ifelse(body, nextCond) b.current = nextCond @@ -247,7 +247,7 @@ func (b *builder) typeSwitchStmt(s *ast.TypeSwitchStmt, label *lblock) { b.add(s.Assign) } - done := b.newBlock("typeswitch.done") + done := b.newBlock(KindSwitchDone, s) if label != nil { label._break = done } @@ -258,10 +258,10 @@ func (b *builder) typeSwitchStmt(s *ast.TypeSwitchStmt, label *lblock) { default_ = cc continue } - body := b.newBlock("typeswitch.body") + body := b.newBlock(KindSwitchCaseBody, cc) var next *Block for _, casetype := range cc.List { - next = b.newBlock("typeswitch.next") + next = b.newBlock(KindSwitchNextCase, cc) // casetype is a type, so don't call b.add(casetype). // This block logically contains a type assertion, // x.(casetype), but it's unclear how to represent x. @@ -300,7 +300,7 @@ func (b *builder) selectStmt(s *ast.SelectStmt, label *lblock) { } } - done := b.newBlock("select.done") + done := b.newBlock(KindSelectDone, s) if label != nil { label._break = done } @@ -312,8 +312,8 @@ func (b *builder) selectStmt(s *ast.SelectStmt, label *lblock) { defaultBody = &clause.Body continue } - body := b.newBlock("select.body") - next := b.newBlock("select.next") + body := b.newBlock(KindSelectCaseBody, clause) + next := b.newBlock(KindSelectAfterCase, clause) b.ifelse(body, next) b.current = body b.targets = &targets{ @@ -358,15 +358,15 @@ func (b *builder) forStmt(s *ast.ForStmt, label *lblock) { if s.Init != nil { b.stmt(s.Init) } - body := b.newBlock("for.body") - done := b.newBlock("for.done") // target of 'break' - loop := body // target of back-edge + body := b.newBlock(KindForBody, s) + done := b.newBlock(KindForDone, s) // target of 'break' + loop := body // target of back-edge if s.Cond != nil { - loop = b.newBlock("for.loop") + loop = b.newBlock(KindForLoop, s) } cont := loop // target of 'continue' if s.Post != nil { - cont = b.newBlock("for.post") + cont = b.newBlock(KindForPost, s) } if label != nil { label._break = done @@ -414,12 +414,12 @@ func (b *builder) rangeStmt(s *ast.RangeStmt, label *lblock) { // jump loop // done: (target of break) - loop := b.newBlock("range.loop") + loop := b.newBlock(KindRangeLoop, s) b.jump(loop) b.current = loop - body := b.newBlock("range.body") - done := b.newBlock("range.done") + body := b.newBlock(KindRangeBody, s) + done := b.newBlock(KindRangeDone, s) b.ifelse(body, done) b.current = body @@ -461,14 +461,19 @@ type lblock struct { // labeledBlock returns the branch target associated with the // specified label, creating it if needed. -func (b *builder) labeledBlock(label *ast.Ident) *lblock { - lb := b.lblocks[label.Obj] +func (b *builder) labeledBlock(label *ast.Ident, stmt *ast.LabeledStmt) *lblock { + lb := b.lblocks[label.Name] if lb == nil { - lb = &lblock{_goto: b.newBlock(label.Name)} + lb = &lblock{_goto: b.newBlock(KindLabel, nil)} if b.lblocks == nil { - b.lblocks = make(map[*ast.Object]*lblock) + b.lblocks = make(map[string]*lblock) } - b.lblocks[label.Obj] = lb + b.lblocks[label.Name] = lb + } + // Fill in the label later (in case of forward goto). + // Stmt may be set already if labels are duplicated (ill-typed). + if stmt != nil && lb._goto.Stmt == nil { + lb._goto.Stmt = stmt } return lb } @@ -477,11 +482,12 @@ func (b *builder) labeledBlock(label *ast.Ident) *lblock { // slice and returns it. // It does not automatically become the current block. // comment is an optional string for more readable debugging output. -func (b *builder) newBlock(comment string) *Block { +func (b *builder) newBlock(kind BlockKind, stmt ast.Stmt) *Block { g := b.cfg block := &Block{ - Index: int32(len(g.Blocks)), - comment: comment, + Index: int32(len(g.Blocks)), + Kind: kind, + Stmt: stmt, } block.Succs = block.succs2[:0] g.Blocks = append(g.Blocks, block) diff --git a/vendor/golang.org/x/tools/go/cfg/cfg.go b/vendor/golang.org/x/tools/go/cfg/cfg.go index 37d799f4bc..01668359af 100644 --- a/vendor/golang.org/x/tools/go/cfg/cfg.go +++ b/vendor/golang.org/x/tools/go/cfg/cfg.go @@ -9,7 +9,10 @@ // // The blocks of the CFG contain all the function's non-control // statements. The CFG does not contain control statements such as If, -// Switch, Select, and Branch, but does contain their subexpressions. +// Switch, Select, and Branch, but does contain their subexpressions; +// also, each block records the control statement (Block.Stmt) that +// gave rise to it and its relationship (Block.Kind) to that statement. +// // For example, this source code: // // if x := f(); x != nil { @@ -20,14 +23,14 @@ // // produces this CFG: // -// 1: x := f() +// 1: x := f() Body // x != nil // succs: 2, 3 -// 2: T() +// 2: T() IfThen // succs: 4 -// 3: F() +// 3: F() IfElse // succs: 4 -// 4: +// 4: IfDone // // The CFG does contain Return statements; even implicit returns are // materialized (at the position of the function's closing brace). @@ -50,6 +53,7 @@ import ( // // The entry point is Blocks[0]; there may be multiple return blocks. type CFG struct { + fset *token.FileSet Blocks []*Block // block[0] is entry; order otherwise undefined } @@ -64,9 +68,63 @@ type Block struct { Succs []*Block // successor nodes in the graph Index int32 // index within CFG.Blocks Live bool // block is reachable from entry + Kind BlockKind // block kind + Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) + + succs2 [2]*Block // underlying array for Succs +} + +// A BlockKind identifies the purpose of a block. +// It also determines the possible types of its Stmt field. +type BlockKind uint8 + +const ( + KindInvalid BlockKind = iota // Stmt=nil + + KindUnreachable // unreachable block after {Branch,Return}Stmt / no-return call ExprStmt + KindBody // function body BlockStmt + KindForBody // body of ForStmt + KindForDone // block after ForStmt + KindForLoop // head of ForStmt + KindForPost // post condition of ForStmt + KindIfDone // block after IfStmt + KindIfElse // else block of IfStmt + KindIfThen // then block of IfStmt + KindLabel // labeled block of BranchStmt (Stmt may be nil for dangling label) + KindRangeBody // body of RangeStmt + KindRangeDone // block after RangeStmt + KindRangeLoop // head of RangeStmt + KindSelectCaseBody // body of SelectStmt + KindSelectDone // block after SelectStmt + KindSelectAfterCase // block after a CommClause + KindSwitchCaseBody // body of CaseClause + KindSwitchDone // block after {Type.}SwitchStmt + KindSwitchNextCase // secondary expression of a multi-expression CaseClause +) - comment string // for debugging - succs2 [2]*Block // underlying array for Succs +func (kind BlockKind) String() string { + return [...]string{ + KindInvalid: "Invalid", + KindUnreachable: "Unreachable", + KindBody: "Body", + KindForBody: "ForBody", + KindForDone: "ForDone", + KindForLoop: "ForLoop", + KindForPost: "ForPost", + KindIfDone: "IfDone", + KindIfElse: "IfElse", + KindIfThen: "IfThen", + KindLabel: "Label", + KindRangeBody: "RangeBody", + KindRangeDone: "RangeDone", + KindRangeLoop: "RangeLoop", + KindSelectCaseBody: "SelectCaseBody", + KindSelectDone: "SelectDone", + KindSelectAfterCase: "SelectAfterCase", + KindSwitchCaseBody: "SwitchCaseBody", + KindSwitchDone: "SwitchDone", + KindSwitchNextCase: "SwitchNextCase", + }[kind] } // New returns a new control-flow graph for the specified function body, @@ -82,7 +140,7 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { mayReturn: mayReturn, cfg: new(CFG), } - b.current = b.newBlock("entry") + b.current = b.newBlock(KindBody, body) b.stmt(body) // Compute liveness (reachability from entry point), breadth-first. @@ -110,10 +168,22 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { } func (b *Block) String() string { - return fmt.Sprintf("block %d (%s)", b.Index, b.comment) + return fmt.Sprintf("block %d (%s)", b.Index, b.comment(nil)) +} + +func (b *Block) comment(fset *token.FileSet) string { + s := b.Kind.String() + if fset != nil && b.Stmt != nil { + s = fmt.Sprintf("%s@L%d", s, fset.Position(b.Stmt.Pos()).Line) + } + return s } -// Return returns the return statement at the end of this block if present, nil otherwise. +// Return returns the return statement at the end of this block if present, nil +// otherwise. +// +// When control falls off the end of the function, the ReturnStmt is synthetic +// and its [ast.Node.End] position may be beyond the end of the file. func (b *Block) Return() (ret *ast.ReturnStmt) { if len(b.Nodes) > 0 { ret, _ = b.Nodes[len(b.Nodes)-1].(*ast.ReturnStmt) @@ -125,7 +195,7 @@ func (b *Block) Return() (ret *ast.ReturnStmt) { func (g *CFG) Format(fset *token.FileSet) string { var buf bytes.Buffer for _, b := range g.Blocks { - fmt.Fprintf(&buf, ".%d: # %s\n", b.Index, b.comment) + fmt.Fprintf(&buf, ".%d: # %s\n", b.Index, b.comment(fset)) for _, n := range b.Nodes { fmt.Fprintf(&buf, "\t%s\n", formatNode(fset, n)) } @@ -141,6 +211,35 @@ func (g *CFG) Format(fset *token.FileSet) string { return buf.String() } +// digraph emits AT&T GraphViz (dot) syntax for the CFG. +// TODO(adonovan): publish; needs a proposal. +func (g *CFG) digraph(fset *token.FileSet) string { + var buf bytes.Buffer + buf.WriteString("digraph CFG {\n") + buf.WriteString(" node [shape=box];\n") + for _, b := range g.Blocks { + // node label + var text bytes.Buffer + text.WriteString(b.comment(fset)) + for _, n := range b.Nodes { + fmt.Fprintf(&text, "\n%s", formatNode(fset, n)) + } + + // node and edges + fmt.Fprintf(&buf, " n%d [label=%q];\n", b.Index, &text) + for _, succ := range b.Succs { + fmt.Fprintf(&buf, " n%d -> n%d;\n", b.Index, succ.Index) + } + } + buf.WriteString("}\n") + return buf.String() +} + +// exposed to main.go +func digraph(g *CFG, fset *token.FileSet) string { + return g.digraph(fset) +} + func formatNode(fset *token.FileSet, n ast.Node) string { var buf bytes.Buffer format.Node(&buf, fset, n) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f8f..137cc8df1d 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -47,7 +47,7 @@ import ( func Find(importPath, srcDir string) (filename, path string) { cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) cmd.Dir = srcDir - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { return "", "" } @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go index 3fce480034..697974bb9b 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -57,20 +57,18 @@ import ( "go/build" "go/parser" "go/token" - "io/ioutil" "log" "os" + "os/exec" "path/filepath" "regexp" "strings" - - exec "golang.org/x/sys/execabs" ) // ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses // the output and returns the resulting ASTs. func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { - tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") + tmpdir, err := os.MkdirTemp("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go index 7d94bbc1e5..2455be54f6 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -8,19 +8,22 @@ import ( "errors" "fmt" "go/build" - exec "golang.org/x/sys/execabs" + "os/exec" "strings" ) // pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. func pkgConfig(mode string, pkgs []string) (flags []string, err error) { cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) if len(out) > 0 { s = fmt.Sprintf("%s: %s", s, out) } + if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 { + s = fmt.Sprintf("%s\nstderr:\n%s", s, err.Stderr) + } return nil, errors.New(s) } if len(out) > 0 { diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f82a..333676b7cf 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,42 +8,46 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" ) -var debug = false - -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { - if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? inv.Verb = "env" inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr } else { - return nil, friendlyErr + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go index edf62c2cc0..013c0f505b 100644 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -23,7 +23,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/internal/cgo" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) var ignoreVendor build.ImportMode @@ -1033,13 +1033,14 @@ func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), }, errorFunc: imp.conf.TypeChecker.Error, dir: dir, } - typeparams.InitInstanceInfo(&info.Info) + versions.InitFileVersions(&info.Info) // Copy the types.Config so we can vary it across PackageInfos. tc := imp.conf.TypeChecker diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89fe6..a8d7b06ac0 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,20 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. + +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -35,7 +43,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) @@ -64,9 +72,31 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7d2..4335c1eb14 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,46 +2,85 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d2e..22305d9c90 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,10 +9,9 @@ import ( "context" "encoding/json" "fmt" - "go/types" - "io/ioutil" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -22,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -37,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -130,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -148,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) - sizeswg.Done() + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + response.dr.Compiler = compiler + response.dr.Arch = arch + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -210,87 +210,10 @@ extractQueries: } } - // Only use go/packages' overlay processing if we're using a Go version - // below 1.16. Otherwise, go list handles it. - if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } - - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{{ - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }}, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) - } - } - } - } - } - // Add root for any package that matches a pattern. This applies only to - // packages that are modified by overlays, since they are not added as - // roots automatically. - for _, pattern := range restPatterns { - match := matchPattern(pattern) - for _, pkgID := range modifiedPkgs { - pkg, ok := response.seenPackages[pkgID] - if !ok { - continue - } - if match(pkg.PkgPath) { - response.addRoot(pkg.ID) - } - } - } - } - - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } -func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } - dr, err := state.createDriverResponse(pkgs...) - if err != nil { - return err - } - for _, pkg := range dr.Packages { - response.addPackage(pkg) - } - _, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return err - } - return state.addNeededOverlayPackages(response, needPkgs) -} - func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. @@ -342,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -433,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -460,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { @@ -625,7 +548,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,16 +591,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { addFilenameFromPos := func(pos string) bool { split := strings.Split(pos, ":") @@ -891,6 +815,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) @@ -1100,7 +1033,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err if len(state.cfg.Overlay) == 0 { return "", func() {}, nil } - dir, err := ioutil.TempDir("", "gopackages-*") + dir, err := os.MkdirTemp("", "gopackages-*") if err != nil { return "", nil, err } @@ -1119,7 +1052,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err // Create a unique filename for the overlaid files, to avoid // creating nested directories. noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) if err != nil { return "", func() {}, err } @@ -1137,7 +1070,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err } // Write out the overlay file that contains the filepath mappings. filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { + if err := os.WriteFile(filename, b, 0665); err != nil { return "", func() {}, err } return filename, cleanup, nil diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 9576b472f9..d823c474ad 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -6,314 +6,11 @@ package packages import ( "encoding/json" - "fmt" - "go/parser" - "go/token" - "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" "golang.org/x/tools/internal/gocommand" ) -// processGolistOverlay provides rudimentary support for adding -// files that don't exist on disk to an overlay. The results can be -// sometimes incorrect. -// TODO(matloob): Handle unsupported cases, including the following: -// - determining the correct package to add given a new import path -func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { - havePkgs := make(map[string]string) // importPath -> non-test package ID - needPkgsSet := make(map[string]bool) - modifiedPkgsSet := make(map[string]bool) - - pkgOfDir := make(map[string][]*Package) - for _, pkg := range response.dr.Packages { - // This is an approximation of import path to id. This can be - // wrong for tests, vendored packages, and a number of other cases. - havePkgs[pkg.PkgPath] = pkg.ID - dir, err := commonDir(pkg.GoFiles) - if err != nil { - return nil, nil, err - } - if dir != "" { - pkgOfDir[dir] = append(pkgOfDir[dir], pkg) - } - } - - // If no new imports are added, it is safe to avoid loading any needPkgs. - // Otherwise, it's hard to tell which package is actually being loaded - // (due to vendoring) and whether any modified package will show up - // in the transitive set of dependencies (because new imports are added, - // potentially modifying the transitive set of dependencies). - var overlayAddsImports bool - - // If both a package and its test package are created by the overlay, we - // need the real package first. Process all non-test files before test - // files, and make the whole process deterministic while we're at it. - var overlayFiles []string - for opath := range state.cfg.Overlay { - overlayFiles = append(overlayFiles, opath) - } - sort.Slice(overlayFiles, func(i, j int) bool { - iTest := strings.HasSuffix(overlayFiles[i], "_test.go") - jTest := strings.HasSuffix(overlayFiles[j], "_test.go") - if iTest != jTest { - return !iTest // non-tests are before tests. - } - return overlayFiles[i] < overlayFiles[j] - }) - for _, opath := range overlayFiles { - contents := state.cfg.Overlay[opath] - base := filepath.Base(opath) - dir := filepath.Dir(opath) - var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant - var testVariantOf *Package // if opath is a test file, this is the package it is testing - var fileExists bool - isTestFile := strings.HasSuffix(opath, "_test.go") - pkgName, ok := extractPackageName(opath, contents) - if !ok { - // Don't bother adding a file that doesn't even have a parsable package statement - // to the overlay. - continue - } - // If all the overlay files belong to a different package, change the - // package name to that package. - maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) - nextPackage: - for _, p := range response.dr.Packages { - if pkgName != p.Name && p.ID != "command-line-arguments" { - continue - } - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue - } - // Make sure to capture information on the package's test variant, if needed. - if isTestFile && !hasTestFiles(p) { - // TODO(matloob): Are there packages other than the 'production' variant - // of a package that this can match? This shouldn't match the test main package - // because the file is generated in another directory. - testVariantOf = p - continue nextPackage - } else if !isTestFile && hasTestFiles(p) { - // We're examining a test variant, but the overlaid file is - // a non-test file. Because the overlay implementation - // (currently) only adds a file to one package, skip this - // package, so that we can add the file to the production - // variant of the package. (https://golang.org/issue/36857 - // tracks handling overlays on both the production and test - // variant of a package). - continue nextPackage - } - if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // We have already seen the production version of the - // for which p is a test variant. - if hasTestFiles(p) { - testVariantOf = pkg - } - } - pkg = p - if filepath.Base(f) == base { - fileExists = true - } - } - } - // The overlay could have included an entirely new package or an - // ad-hoc package. An ad-hoc package is one that we have manually - // constructed from inadequate `go list` results for a file= query. - // It will have the ID command-line-arguments. - if pkg == nil || pkg.ID == "command-line-arguments" { - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - pkgPath, ok, err := state.getPkgPath(dir) - if err != nil { - return nil, nil, err - } - if !ok { - break - } - var forTest string // only set for x tests - isXTest := strings.HasSuffix(pkgName, "_test") - if isXTest { - forTest = pkgPath - pkgPath += "_test" - } - id := pkgPath - if isTestFile { - if isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) - } else { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - } - if pkg != nil { - // TODO(rstambler): We should change the package's path and ID - // here. The only issue is that this messes with the roots. - } else { - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break - } - } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), - } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} - } - } - if isXTest { - pkg.forTest = forTest - } - } - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - // TODO(rstambler): If the package is an x test and the import has - // a test variant, make sure to replace it. - if _, found := pkg.Imports[imp]; found { - continue - } - overlayAddsImports = true - id, ok := havePkgs[imp] - if !ok { - var err error - id, err = state.resolveImport(dir, imp) - if err != nil { - return nil, nil, err - } - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as well. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} - } - } - } - - // toPkgPath guesses the package path given the id. - toPkgPath := func(sourceDir, id string) (string, error) { - if i := strings.IndexByte(id, ' '); i >= 0 { - return state.resolveImport(sourceDir, id[:i]) - } - return state.resolveImport(sourceDir, id) - } - - // Now that new packages have been created, do another pass to determine - // the new set of missing packages. - for _, pkg := range response.dr.Packages { - for _, imp := range pkg.Imports { - if len(pkg.GoFiles) == 0 { - return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) - } - pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) - if err != nil { - return nil, nil, err - } - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true - } - } - } - - if overlayAddsImports { - needPkgs = make([]string, 0, len(needPkgsSet)) - for pkg := range needPkgsSet { - needPkgs = append(needPkgs, pkg) - } - } - modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) - for pkg := range modifiedPkgsSet { - modifiedPkgs = append(modifiedPkgs, pkg) - } - return modifiedPkgs, needPkgs, err -} - -// resolveImport finds the ID of a package given its import path. -// In particular, it will find the right vendored copy when in GOPATH mode. -func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { - env, err := state.getEnv() - if err != nil { - return "", err - } - if env["GOMOD"] != "" { - return importPath, nil - } - - searchDir := sourceDir - for { - vendorDir := filepath.Join(searchDir, "vendor") - exists, ok := state.vendorDirs[vendorDir] - if !ok { - info, err := os.Stat(vendorDir) - exists = err == nil && info.IsDir() - state.vendorDirs[vendorDir] = exists - } - - if exists { - vendoredPath := filepath.Join(vendorDir, importPath) - if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { - // We should probably check for .go files here, but shame on anyone who fools us. - path, ok, err := state.getPkgPath(vendoredPath) - if err != nil { - return "", err - } - if ok { - return path, nil - } - } - } - - // We know we've hit the top of the filesystem when we Dir / and get /, - // or C:\ and get C:\, etc. - next := filepath.Dir(searchDir) - if next == searchDir { - break - } - searchDir = next - } - return importPath, nil -} - -func hasTestFiles(p *Package) bool { - for _, f := range p.GoFiles { - if strings.HasSuffix(f, "_test.go") { - return true - } - } - return false -} - // determineRootDirs returns a mapping from absolute directories that could // contain code to their corresponding import path prefixes. func (state *golistState) determineRootDirs() (map[string]string, error) { @@ -384,192 +81,3 @@ func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { } return m, nil } - -func extractImports(filename string, contents []byte) ([]string, error) { - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? - if err != nil { - return nil, err - } - var res []string - for _, imp := range f.Imports { - quotedPath := imp.Path.Value - path, err := strconv.Unquote(quotedPath) - if err != nil { - return nil, err - } - res = append(res, path) - } - return res, nil -} - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} - -// commonDir returns the directory that all files are in, "" if files is empty, -// or an error if they aren't in the same directory. -func commonDir(files []string) (string, error) { - seen := make(map[string]bool) - for _, f := range files { - seen[filepath.Dir(f)] = true - } - if len(seen) > 1 { - return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) - } - for k := range seen { - // seen has only one element; return it. - return k, nil - } - return "", nil // no files -} - -// It is possible that the files in the disk directory dir have a different package -// name from newName, which is deduced from the overlays. If they all have a different -// package name, and they all have the same package name, then that name becomes -// the package name. -// It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { - names := make(map[string]int) - for _, p := range pkgsOfDir { - names[p.Name]++ - } - if len(names) != 1 { - // some files are in different packages - return - } - var oldName string - for k := range names { - oldName = k - } - if newName == oldName { - return - } - // We might have a case where all of the package names in the directory are - // the same, but the overlay file is for an x test, which belongs to its - // own package. If the x test does not yet exist on disk, we may not yet - // have its package name on disk, but we should not rename the packages. - // - // We use a heuristic to determine if this file belongs to an x test: - // The test file should have a package name whose package name has a _test - // suffix or looks like "newName_test". - maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") - if isTestFile && maybeXTest { - return - } - for _, p := range pkgsOfDir { - p.Name = newName - } -} - -// This function is copy-pasted from -// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. -// It should be deleted when we remove support for overlays from go/packages. -// -// NOTE: This does not handle any ./... or ./ style queries, as this function -// doesn't know the working directory. -// -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separated pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b808..f33b0afc22 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,7 +16,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -28,8 +27,8 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -207,41 +206,6 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; @@ -257,31 +221,55 @@ type driverResponse struct { // proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { - l := newLoader(cfg) - response, err := defaultDriver(&l.Config, patterns...) + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) if err != nil { return nil, err } - l.sizes = response.Sizes - return l.refine(response) + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. // It will try to request to an external driver, if one exists. If there's // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - driver := findExternalDriver(cfg) - if driver == nil { - driver = goListDriver +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + if driver := findExternalDriver(cfg); driver != nil { + response, err := driver(cfg, patterns...) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) } - response, err := driver(cfg, patterns...) + + response, err := goListDriver(cfg, patterns...) if err != nil { - return response, err - } else if response.NotHandled { - return goListDriver(cfg, patterns...) + return nil, false, err } - return response, nil + return response, false, nil } // A Package describes a loaded Go package. @@ -308,6 +296,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source @@ -407,12 +398,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -549,7 +534,7 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes + sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue parseCacheMu sync.Mutex exportMu sync.Mutex // enforces mutual exclusion of exportdata operations @@ -627,9 +612,9 @@ func newLoader(cfg *Config) *loader { return ld } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { @@ -674,39 +659,38 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } } - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - var srcPkgs []*loaderPackage - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - // If NeedImports isn't set, the imports fields will all be zeroed out. - if ld.Mode&NeedImports != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports lpkg.Imports = make(map[string]*Package, len(stubs)) for importPath, ipkg := range stubs { var importErr error @@ -730,40 +714,39 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } lpkg.Imports[importPath] = imp.Package } - } - if lpkg.needsrc { - srcPkgs = append(srcPkgs, lpkg) - } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc - } + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } - if ld.Mode&NeedImports == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc } - } else { + // For each initial package, create its import DAG. for _, lpkg := range initial { visit(lpkg) } - } - if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil } } + // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { @@ -997,10 +980,11 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1038,7 +1022,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, Error: appendError, - Sizes: ld.sizes, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { @@ -1119,7 +1106,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index be8d36a6ee..72e906c385 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -4,106 +4,73 @@ package ssa -// This file implements the BUILD phase of SSA construction. +// This file defines the builder, which builds SSA-form IR for function bodies. // -// SSA construction has two phases, CREATE and BUILD. In the CREATE phase -// (create.go), all packages are constructed and type-checked and -// definitions of all package members are created, method-sets are -// computed, and wrapper methods are synthesized. -// ssa.Packages are created in arbitrary order. +// SSA construction has two phases, "create" and "build". First, one +// or more packages are created in any order by a sequence of calls to +// CreatePackage, either from syntax or from mere type information. +// Each created package has a complete set of Members (const, var, +// type, func) that can be accessed through methods like +// Program.FuncValue. // -// In the BUILD phase (builder.go), the builder traverses the AST of -// each Go source function and generates SSA instructions for the -// function body. Initializer expressions for package-level variables -// are emitted to the package's init() function in the order specified -// by go/types.Info.InitOrder, then code for each function in the -// package is generated in lexical order. -// The BUILD phases for distinct packages are independent and are -// executed in parallel. +// It is not necessary to call CreatePackage for all dependencies of +// each syntax package, only for its direct imports. (In future +// perhaps even this restriction may be lifted.) // -// TODO(adonovan): indeed, building functions is now embarrassingly parallel. -// Audit for concurrency then benchmark using more goroutines. +// Second, packages created from syntax are built, by one or more +// calls to Package.Build, which may be concurrent; or by a call to +// Program.Build, which builds all packages in parallel. Building +// traverses the type-annotated syntax tree of each function body and +// creates SSA-form IR, a control-flow graph of instructions, +// populating fields such as Function.Body, .Params, and others. // -// State: +// Building may create additional methods, including: +// - wrapper methods (e.g. for embeddding, or implicit &recv) +// - bound method closures (e.g. for use(recv.f)) +// - thunks (e.g. for use(I.f) or use(T.f)) +// - generic instances (e.g. to produce f[int] from f[any]). +// As these methods are created, they are added to the build queue, +// and then processed in turn, until a fixed point is reached, +// Since these methods might belong to packages that were not +// created (by a call to CreatePackage), their Pkg field is unset. // -// The Package's and Program's indices (maps) are populated and -// mutated during the CREATE phase, but during the BUILD phase they -// remain constant. The sole exception is Prog.methodSets and its -// related maps, which are protected by a dedicated mutex. +// Instances of generic functions may be either instantiated (f[int] +// is a copy of f[T] with substitutions) or wrapped (f[int] delegates +// to f[T]), depending on the availability of generic syntax and the +// InstantiateGenerics mode flag. // -// Generic functions declared in a package P can be instantiated from functions -// outside of P. This happens independently of the CREATE and BUILD phase of P. +// Each package has an initializer function named "init" that calls +// the initializer functions of each direct import, computes and +// assigns the initial value of each global variable, and calls each +// source-level function named "init". (These generate SSA functions +// named "init#1", "init#2", etc.) // -// Locks: +// Runtime types // -// Mutexes are currently acquired according to the following order: -// Prog.methodsMu ⊃ canonizer.mu ⊃ printMu -// where x ⊃ y denotes that y can be acquired while x is held -// and x cannot be acquired while y is held. +// Each MakeInterface operation is a conversion from a non-interface +// type to an interface type. The semantics of this operation requires +// a runtime type descriptor, which is the type portion of an +// interface, and the value abstracted by reflect.Type. // -// Synthetics: +// The program accumulates all non-parameterized types that are +// encountered as MakeInterface operands, along with all types that +// may be derived from them using reflection. This set is available as +// Program.RuntimeTypes, and the methods of these types may be +// reachable via interface calls or reflection even if they are never +// referenced from the SSA IR. (In practice, algorithms such as RTA +// that compute reachability from package main perform their own +// tracking of runtime types at a finer grain, so this feature is not +// very useful.) // -// During the BUILD phase new functions can be created and built. These include: -// - wrappers (wrappers, bounds, thunks) -// - generic function instantiations -// These functions do not belong to a specific Pkg (Pkg==nil). Instead the -// Package that led to them being CREATED is obligated to ensure these -// are BUILT during the BUILD phase of the Package. +// Function literals // -// Runtime types: +// Anonymous functions must be built as soon as they are encountered, +// as it may affect locals of the enclosing function, but they are not +// marked 'built' until the end of the outermost enclosing function. +// (Among other things, this causes them to be logged in top-down order.) // -// A concrete type is a type that is fully monomorphized with concrete types, -// i.e. it cannot reach a TypeParam type. -// Some concrete types require full runtime type information. Cases -// include checking whether a type implements an interface or -// interpretation by the reflect package. All such types that may require -// this information will have all of their method sets built and will be added to Prog.methodSets. -// A type T is considered to require runtime type information if it is -// a runtime type and has a non-empty method set and either: -// - T flows into a MakeInterface instructions, -// - T appears in a concrete exported member, or -// - T is a type reachable from a type S that has non-empty method set. -// For any such type T, method sets must be created before the BUILD -// phase of the package is done. -// -// Function literals: -// -// The BUILD phase of a function literal (anonymous function) is tied to the -// BUILD phase of the enclosing parent function. The FreeVars of an anonymous -// function are discovered by building the anonymous function. This in turn -// changes which variables must be bound in a MakeClosure instruction in the -// parent. Anonymous functions also track where they are referred to in their -// parent function. -// -// Happens-before: -// -// The above discussion leads to the following happens-before relation for -// the BUILD and CREATE phases. -// The happens-before relation (with X 0 { targs := fn.subst.types(instanceArgs(fn.info, e)) - callee = fn.Prog.needsInstance(callee, targs, b.created) + callee = callee.instance(targs, b.created) } return callee } // Local var. - return emitLoad(fn, fn.lookup(obj, false)) // var (address) + return emitLoad(fn, fn.lookup(obj.(*types.Var), false)) // var (address) case *ast.SelectorExpr: sel := fn.selection(e) @@ -823,7 +786,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case types.MethodExpr: // (*T).f or T.f, the method f from the method-set of type T. // The result is a "thunk". - thunk := makeThunk(fn.Prog, sel, b.created) + thunk := createThunk(fn.Prog, sel, b.created) return emitConv(fn, thunk, fn.typ(tv.Type)) case types.MethodVal: @@ -831,14 +794,14 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { // The result is a "bound". obj := sel.obj.(*types.Func) rt := fn.typ(recvType(obj)) - wantAddr := isPointer(rt) + _, wantAddr := deref(rt) escaping := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) if types.IsInterface(rt) { // If v may be an interface type I (after instantiating), // we must emit a check that v is non-nil. - if recv, ok := sel.recv.(*typeparams.TypeParam); ok { + if recv, ok := aliases.Unalias(sel.recv).(*types.TypeParam); ok { // Emit a nil check if any possible instantiation of the // type parameter is an interface type. if typeSetOf(recv).Len() > 0 { @@ -858,7 +821,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { } else { // non-type param interface // Emit nil check: typeassert v.(I). - emitTypeAssert(fn, v, rt, token.NoPos) + emitTypeAssert(fn, v, rt, e.Sel.Pos()) } } if targs := receiverTypeArgs(obj); len(targs) > 0 { @@ -866,7 +829,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { obj = fn.Prog.canon.instantiateMethod(obj, fn.subst.types(targs), fn.Prog.ctxt) } c := &MakeClosure{ - Fn: makeBound(fn.Prog, obj, b.created), + Fn: createBound(fn.Prog, obj, b.created), Bindings: []Value{v}, } c.setPos(e.Sel.Pos()) @@ -884,7 +847,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { panic("unexpected expression-relative selector") - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: // f[X, Y] must be a generic function if !instance(fn.info, e.X) { panic("unexpected expression-could not match index list to instantiation") @@ -952,14 +915,14 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // returns the effective receiver after applying the implicit field // selections of sel. // -// wantAddr requests that the result is an an address. If +// wantAddr requests that the result is an address. If // !sel.indirect, this may require that e be built in addr() mode; it // must thus be addressable. // // escaping is defined as per builder.addr(). func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *selection) Value { var v Value - if wantAddr && !sel.indirect && !isPointer(fn.typeOf(e)) { + if _, eptr := deref(fn.typeOf(e)); wantAddr && !sel.indirect && !eptr { v = b.addr(fn, e, escaping).address(fn) } else { v = b.expr(fn, e) @@ -968,7 +931,10 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se last := len(sel.index) - 1 // The position of implicit selection is the position of the inducing receiver expression. v = emitImplicitSelections(fn, v, sel.index[:last], e.Pos()) - if !wantAddr && isPointer(v.Type()) { + if types.IsInterface(v.Type()) { + // When v is an interface, sel.Kind()==MethodValue and v.f is invoked. + // So v is not loaded, even if v has a pointer core type. + } else if _, vptr := deref(v.Type()); !wantAddr && vptr { v = emitLoad(fn, v) } return v @@ -987,7 +953,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { obj := sel.obj.(*types.Func) recv := recvType(obj) - wantAddr := isPointer(recv) + _, wantAddr := deref(recv) escaping := true v := b.receiver(fn, selector.X, wantAddr, escaping, sel) if types.IsInterface(recv) { @@ -996,11 +962,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { c.Method = obj } else { // "Call"-mode call. - callee := fn.Prog.originFunc(obj) - if callee.typeparams.Len() > 0 { - callee = fn.Prog.needsInstance(callee, receiverTypeArgs(obj), b.created) - } - c.Value = callee + c.Value = fn.Prog.objectMethod(obj, b.created) c.Args = append(c.Args, v) } return @@ -1092,9 +1054,8 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx } else { // Replace a suffix of args with a slice containing it. at := types.NewArray(vt, int64(len(varargs))) - a := emitNew(fn, at, token.NoPos) + a := emitNew(fn, at, token.NoPos, "varargs") a.setPos(e.Rparen) - a.Comment = "varargs" for i, arg := range varargs { iaddr := &IndexAddr{ X: a, @@ -1141,7 +1102,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // 1:1 assignment for i, id := range spec.Names { if !isBlankIdent(id) { - fn.addLocalForIdent(id) + emitLocalVar(fn, identVar(fn, id)) } lval := b.addr(fn, id, false) // non-escaping b.assign(fn, lval, spec.Values[i], true, nil) @@ -1152,7 +1113,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // Locals are implicitly zero-initialized. for _, id := range spec.Names { if !isBlankIdent(id) { - lhs := fn.addLocalForIdent(id) + lhs := emitLocalVar(fn, identVar(fn, id)) if fn.debugInfo() { emitDebugRef(fn, id, lhs, true) } @@ -1164,7 +1125,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { tuple := b.exprN(fn, spec.Values[0]) for i, id := range spec.Names { if !isBlankIdent(id) { - fn.addLocalForIdent(id) + emitLocalVar(fn, identVar(fn, id)) lhs := b.addr(fn, id, false) // non-escaping lhs.store(fn, emitExtract(fn, tuple, i)) } @@ -1184,8 +1145,8 @@ func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { var lval lvalue = blank{} if !isBlankIdent(lhs) { if isDef { - if obj := fn.info.Defs[lhs.(*ast.Ident)]; obj != nil { - fn.addNamedLocal(obj) + if obj, ok := fn.info.Defs[lhs.(*ast.Ident)].(*types.Var); ok { + emitLocalVar(fn, obj) isZero[i] = true } } @@ -1253,37 +1214,13 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ := deref(fn.typeOf(e)) // type with name [may be type param] - t := deref(typeparams.CoreType(typ)).Underlying() // core type for comp lit case - // Computing typ and t is subtle as these handle pointer types. - // For example, &T{...} is valid even for maps and slices. - // Also typ should refer to T (not *T) while t should be the core type of T. - // - // To show the ordering to take into account, consider the composite literal - // expressions `&T{f: 1}` and `{f: 1}` within the expression `[]S{{f: 1}}` here: - // type N struct{f int} - // func _[T N, S *N]() { - // _ = &T{f: 1} - // _ = []S{{f: 1}} - // } - // For `&T{f: 1}`, we compute `typ` and `t` as: - // typeOf(&T{f: 1}) == *T - // deref(*T) == T (typ) - // CoreType(T) == N - // deref(N) == N - // N.Underlying() == struct{f int} (t) - // For `{f: 1}` in `[]S{{f: 1}}`, we compute `typ` and `t` as: - // typeOf({f: 1}) == S - // deref(S) == S (typ) - // CoreType(S) == *N - // deref(*N) == N - // N.Underlying() == struct{f int} (t) - switch t := t.(type) { + typ, _ := deref(fn.typeOf(e)) // type with name [may be type param] + switch t := typeparams.CoreType(typ).(type) { case *types.Struct: if !isZero && len(e.Elts) != t.NumFields() { // memclear - sb.store(&address{addr, e.Lbrace, nil}, - zeroValue(fn, deref(addr.Type()))) + zt, _ := deref(addr.Type()) + sb.store(&address{addr, e.Lbrace, nil}, zeroConst(zt)) isZero = true } for i, e := range e.Elts { @@ -1315,20 +1252,18 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero case *types.Array, *types.Slice: var at *types.Array var array Value - switch t := t.(type) { + switch t := aliases.Unalias(t).(type) { case *types.Slice: at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) - alloc := emitNew(fn, at, e.Lbrace) - alloc.Comment = "slicelit" - array = alloc + array = emitNew(fn, at, e.Lbrace, "slicelit") case *types.Array: at = t array = addr if !isZero && int64(len(e.Elts)) != at.Len() { // memclear - sb.store(&address{array, e.Lbrace, nil}, - zeroValue(fn, deref(array.Type()))) + zt, _ := deref(array.Type()) + sb.store(&address{array, e.Lbrace, nil}, zeroConst(zt)) } } @@ -1381,8 +1316,13 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero // map[*struct{}]bool{{}: true} // An &-operation may be implied: // map[*struct{}]bool{&struct{}{}: true} + wantAddr := false + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok { + _, wantAddr = deref(t.Key()) + } + var key Value - if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + if wantAddr { // A CompositeLit never evaluates to a pointer, // so if the type of the location is a pointer, // an &-operation is implied. @@ -1409,7 +1349,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) default: - panic("unexpected CompositeLit type: " + t.String()) + panic("unexpected CompositeLit type: " + typ.String()) } } @@ -1603,13 +1543,13 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl } func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { - if obj := fn.info.Implicits[cc]; obj != nil { + if obj, ok := fn.info.Implicits[cc].(*types.Var); ok { // In a switch y := x.(type), each case clause // implicitly declares a distinct object y. // In a single-type case, y has that type. // In multi-type cases, 'case nil' and default, // y has the same type as the interface operand. - emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) + emitStore(fn, emitLocalVar(fn, obj), x, obj.Pos()) } fn.targets = &targets{ tail: fn.targets, @@ -1758,7 +1698,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { case *ast.AssignStmt: // x := <-states[state].Chan if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) + emitLocalVar(fn, identVar(fn, comm.Lhs[0].(*ast.Ident))) } x := b.addr(fn, comm.Lhs[0], false) // non-escaping v := emitExtract(fn, sel, r) @@ -1769,7 +1709,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { if len(comm.Lhs) == 2 { // x, ok := ... if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) + emitLocalVar(fn, identVar(fn, comm.Lhs[1].(*ast.Ident))) } ok := b.addr(fn, comm.Lhs[1], false) // non-escaping ok.store(fn, emitExtract(fn, sel, 1)) @@ -1804,20 +1744,31 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { // forStmt emits to fn code for the for statement s, optionally // labelled by label. func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { - // ...init... - // jump loop + // Use forStmtGo122 instead if it applies. + if s.Init != nil { + if assign, ok := s.Init.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE { + if versions.AtLeast(fn.goversion, versions.Go1_22) { + b.forStmtGo122(fn, s, label) + return + } + } + } + + // ...init... + // jump loop // loop: - // if cond goto body else done + // if cond goto body else done // body: - // ...body... - // jump post - // post: (target of continue) - // ...post... - // jump loop + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop // done: (target of break) if s.Init != nil { b.stmt(fn, s.Init) } + body := fn.newBasicBlock("for.body") done := fn.newBasicBlock("for.done") // target of 'break' loop := body // target of back-edge @@ -1855,35 +1806,199 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { fn.currentBlock = done } +// forStmtGo122 emits to fn code for the for statement s, optionally +// labelled by label. s must define its variables. +// +// This allocates once per loop iteration. This is only correct in +// GoVersions >= go1.22. +func (b *builder) forStmtGo122(fn *Function, s *ast.ForStmt, label *lblock) { + // i_outer = alloc[T] + // *i_outer = ...init... // under objects[i] = i_outer + // jump loop + // loop: + // i = phi [head: i_outer, loop: i_next] + // ...cond... // under objects[i] = i + // if cond goto body else done + // body: + // ...body... // under objects[i] = i (same as loop) + // jump post + // post: + // tmp = *i + // i_next = alloc[T] + // *i_next = tmp + // ...post... // under objects[i] = i_next + // goto loop + // done: + + init := s.Init.(*ast.AssignStmt) + startingBlocks := len(fn.Blocks) + + pre := fn.currentBlock // current block before starting + loop := fn.newBasicBlock("for.loop") // target of back-edge + body := fn.newBasicBlock("for.body") + post := fn.newBasicBlock("for.post") // target of 'continue' + done := fn.newBasicBlock("for.done") // target of 'break' + + // For each of the n loop variables, we create five SSA values, + // outer, phi, next, load, and store in pre, loop, and post. + // There is no limit on n. + type loopVar struct { + obj *types.Var + outer *Alloc + phi *Phi + load *UnOp + next *Alloc + store *Store + } + vars := make([]loopVar, len(init.Lhs)) + for i, lhs := range init.Lhs { + v := identVar(fn, lhs.(*ast.Ident)) + typ := fn.typ(v.Type()) + + fn.currentBlock = pre + outer := emitLocal(fn, typ, v.Pos(), v.Name()) + + fn.currentBlock = loop + phi := &Phi{Comment: v.Name()} + phi.pos = v.Pos() + phi.typ = outer.Type() + fn.emit(phi) + + fn.currentBlock = post + // If next is is local, it reuses the address and zeroes the old value so + // load before allocating next. + load := emitLoad(fn, phi) + next := emitLocal(fn, typ, v.Pos(), v.Name()) + store := emitStore(fn, next, load, token.NoPos) + + phi.Edges = []Value{outer, next} // pre edge is emitted before post edge. + + vars[i] = loopVar{v, outer, phi, load, next, store} + } + + // ...init... under fn.objects[v] = i_outer + fn.currentBlock = pre + for _, v := range vars { + fn.vars[v.obj] = v.outer + } + const isDef = false // assign to already-allocated outers + b.assignStmt(fn, init.Lhs, init.Rhs, isDef) + if label != nil { + label._break = done + label._continue = post + } + emitJump(fn, loop) + + // ...cond... under fn.objects[v] = i + fn.currentBlock = loop + for _, v := range vars { + fn.vars[v.obj] = v.phi + } + if s.Cond != nil { + b.cond(fn, s.Cond, body, done) + } else { + emitJump(fn, body) + } + + // ...body... under fn.objects[v] = i + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: post, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, post) + + // ...post... under fn.objects[v] = i_next + for _, v := range vars { + fn.vars[v.obj] = v.next + } + fn.currentBlock = post + if s.Post != nil { + b.stmt(fn, s.Post) + } + emitJump(fn, loop) // back-edge + fn.currentBlock = done + + // For each loop variable that does not escape, + // (the common case), fuse its next cells into its + // (local) outer cell as they have disjoint live ranges. + // + // It is sufficient to test whether i_next escapes, + // because its Heap flag will be marked true if either + // the cond or post expression causes i to escape + // (because escape distributes over phi). + var nlocals int + for _, v := range vars { + if !v.next.Heap { + nlocals++ + } + } + if nlocals > 0 { + replace := make(map[Value]Value, 2*nlocals) + dead := make(map[Instruction]bool, 4*nlocals) + for _, v := range vars { + if !v.next.Heap { + replace[v.next] = v.outer + replace[v.phi] = v.outer + dead[v.phi], dead[v.next], dead[v.load], dead[v.store] = true, true, true, true + } + } + + // Replace all uses of i_next and phi with i_outer. + // Referrers have not been built for fn yet so only update Instruction operands. + // We need only look within the blocks added by the loop. + var operands []*Value // recycle storage + for _, b := range fn.Blocks[startingBlocks:] { + for _, instr := range b.Instrs { + operands = instr.Operands(operands[:0]) + for _, ptr := range operands { + k := *ptr + if v := replace[k]; v != nil { + *ptr = v + } + } + } + } + + // Remove instructions for phi, load, and store. + // lift() will remove the unused i_next *Alloc. + isDead := func(i Instruction) bool { return dead[i] } + loop.Instrs = removeInstrsIf(loop.Instrs, isDead) + post.Instrs = removeInstrsIf(post.Instrs, isDead) + } +} + // rangeIndexed emits to fn the header for an integer-indexed loop // over array, *array or slice value x. // The v result is defined only if tv is non-nil. // forPos is the position of the "for" token. func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { // - // length = len(x) - // index = -1 - // loop: (target of continue) - // index++ - // if index < length goto body else done + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done // body: - // k = index - // v = x[index] - // ...body... - // jump loop - // done: (target of break) + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) // Determine number of iterations. var length Value - if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + dt, _ := deref(x.Type()) + if arr, ok := typeparams.CoreType(dt).(*types.Array); ok { // For array or *array, the number of iterations is // known statically thanks to the type. We avoid a // data dependence upon x, permitting later dead-code // elimination if x is pure, static unrolling, etc. // Ranging over a nil *array may have >0 iterations. // We still generate code for x, in case it has effects. - // - // TypeParams do not have constant length. Use underlying instead of core type. length = intConst(arr.Len()) } else { // length = len(x). @@ -1894,7 +2009,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P length = fn.emit(&c) } - index := fn.addLocal(tInt, token.NoPos) + index := emitLocal(fn, tInt, token.NoPos, "rangeindex") emitStore(fn, index, intConst(-1), pos) loop = fn.newBasicBlock("rangeindex.loop") @@ -1957,16 +2072,16 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // if the respective component is not wanted. func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { // - // it = range x + // it = range x // loop: (target of continue) - // okv = next it (ok, key, value) - // ok = extract okv #0 - // if ok goto body else done + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done // body: - // k = extract okv #1 - // v = extract okv #2 - // ...body... - // jump loop + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop // done: (target of break) // @@ -2019,13 +2134,13 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token. func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { // // loop: (target of continue) - // ko = <-x (key, ok) - // ok = extract ko #1 - // if ok goto body else done + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done // body: - // k = extract ko #0 - // ... - // goto loop + // k = extract ko #0 + // ...body... + // goto loop // done: (target of break) loop = fn.newBasicBlock("rangechan.loop") @@ -2052,6 +2167,57 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) return } +// rangeInt emits to fn the header for a range loop with an integer operand. +// tk is the key value's type, or nil if the k result is not wanted. +// pos is the position of the "for" token. +func (b *builder) rangeInt(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // iter = 0 + // if 0 < x goto body else done + // loop: (target of continue) + // iter++ + // if iter < x goto body else done + // body: + // k = x + // ...body... + // jump loop + // done: (target of break) + + if isUntyped(x.Type()) { + x = emitConv(fn, x, tInt) + } + + T := x.Type() + iter := emitLocal(fn, T, token.NoPos, "rangeint.iter") + // x may be unsigned. Avoid initializing x to -1. + + body := fn.newBasicBlock("rangeint.body") + done = fn.newBasicBlock("rangeint.done") + emitIf(fn, emitCompare(fn, token.LSS, zeroConst(T), x, token.NoPos), body, done) + + loop = fn.newBasicBlock("rangeint.loop") + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, iter), + Y: emitConv(fn, vOne, T), + } + incr.setType(T) + emitStore(fn, iter, fn.emit(incr), pos) + emitIf(fn, emitCompare(fn, token.LSS, incr, x, token.NoPos), body, done) + fn.currentBlock = body + + if tk != nil { + // Integer types (int, uint8, etc.) are named and + // we know that k is assignable to x when tk != nil. + // This implies tk and T are identical so no conversion is needed. + k = emitLoad(fn, iter) + } + + return +} + // rangeStmt emits to fn code for the range statement s, optionally // labelled by label. func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { @@ -2063,21 +2229,26 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { tv = fn.typeOf(s.Value) } - // If iteration variables are defined (:=), this - // occurs once outside the loop. - // - // Unlike a short variable declaration, a RangeStmt - // using := never redeclares an existing variable; it - // always creates a new one. - if s.Tok == token.DEFINE { + // create locals for s.Key and s.Value. + createVars := func() { + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. if tk != nil { - fn.addLocalForIdent(s.Key.(*ast.Ident)) + emitLocalVar(fn, identVar(fn, s.Key.(*ast.Ident))) } if tv != nil { - fn.addLocalForIdent(s.Value.(*ast.Ident)) + emitLocalVar(fn, identVar(fn, s.Value.(*ast.Ident))) } } + afterGo122 := versions.AtLeast(fn.goversion, versions.Go1_22) + if s.Tok == token.DEFINE && !afterGo122 { + // pre-go1.22: If iteration variables are defined (:=), this + // occurs once outside the loop. + createVars() + } + x := b.expr(fn, s.X) var k, v Value @@ -2089,13 +2260,30 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { case *types.Chan: k, loop, done = b.rangeChan(fn, x, tk, s.For) - case *types.Map, *types.Basic: // string + case *types.Map: k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + case *types.Basic: + switch { + case rt.Info()&types.IsString != 0: + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + case rt.Info()&types.IsInteger != 0: + k, loop, done = b.rangeInt(fn, x, tk, s.For) + + default: + panic("Cannot range over basic type: " + rt.String()) + } + default: panic("Cannot range over: " + rt.String()) } + if s.Tok == token.DEFINE && afterGo122 { + // go1.22: If iteration variables are defined (:=), this occurs inside the loop. + createVars() + } + // Evaluate both LHS expressions before we update either. var kl, vl lvalue if tk != nil { @@ -2319,73 +2507,71 @@ start: } } +// A buildFunc is a strategy for building the SSA body for a function. +type buildFunc = func(*builder, *Function) + +// iterate causes all created but unbuilt functions to be built. As +// this may create new methods, the process is iterated until it +// converges. +func (b *builder) iterate() { + for ; b.finished < b.created.Len(); b.finished++ { + fn := b.created.At(b.finished) + b.buildFunction(fn) + } +} + // buildFunction builds SSA code for the body of function fn. Idempotent. func (b *builder) buildFunction(fn *Function) { - if !fn.built { + if fn.build != nil { assert(fn.parent == nil, "anonymous functions should not be built by buildFunction()") - b.buildFunctionBody(fn) + + if fn.Prog.mode&LogSource != 0 { + defer logStack("build %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + } + fn.build(b, fn) fn.done() } } -// buildFunctionBody builds SSA code for the body of function fn. -// -// fn is not done building until fn.done() is called. -func (b *builder) buildFunctionBody(fn *Function) { - // TODO(taking): see if this check is reachable. - if fn.Blocks != nil { - return // building already started +// buildParamsOnly builds fn.Params from fn.Signature, but does not build fn.Body. +func (b *builder) buildParamsOnly(fn *Function) { + // For external (C, asm) functions or functions loaded from + // export data, we must set fn.Params even though there is no + // body code to reference them. + if recv := fn.Signature.Recv(); recv != nil { + fn.addParamVar(recv) } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + fn.addParamVar(params.At(i)) + } +} - var recvField *ast.FieldList - var body *ast.BlockStmt - var functype *ast.FuncType - switch n := fn.syntax.(type) { - case nil: - if fn.Params != nil { - return // not a Go source function. (Synthetic, or from object file.) - } +// buildFromSyntax builds fn.Body from fn.syntax, which must be non-nil. +func (b *builder) buildFromSyntax(fn *Function) { + var ( + recvField *ast.FieldList + body *ast.BlockStmt + functype *ast.FuncType + ) + switch syntax := fn.syntax.(type) { case *ast.FuncDecl: - functype = n.Type - recvField = n.Recv - body = n.Body + functype = syntax.Type + recvField = syntax.Recv + body = syntax.Body + if body == nil { + b.buildParamsOnly(fn) // no body (non-Go function) + return + } case *ast.FuncLit: - functype = n.Type - body = n.Body + functype = syntax.Type + body = syntax.Body + case nil: + panic("no syntax") default: - panic(n) - } - - if body == nil { - // External function. - if fn.Params == nil { - // This condition ensures we add a non-empty - // params list once only, but we may attempt - // the degenerate empty case repeatedly. - // TODO(adonovan): opt: don't do that. - - // We set Function.Params even though there is no body - // code to reference them. This simplifies clients. - if recv := fn.Signature.Recv(); recv != nil { - fn.addParamObj(recv) - } - params := fn.Signature.Params() - for i, n := 0, params.Len(); i < n; i++ { - fn.addParamObj(params.At(i)) - } - } - return + panic(syntax) // unexpected syntax } - // Build instantiation wrapper around generic body? - if fn.topLevelOrigin != nil && fn.subst == nil { - buildInstantiationWrapper(fn) - return - } - - if fn.Prog.mode&LogSource != 0 { - defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() - } fn.startBody() fn.createSyntacticParams(recvField, functype) b.stmt(fn, body) @@ -2403,45 +2589,17 @@ func (b *builder) buildFunctionBody(fn *Function) { fn.finishBody() } -// buildCreated does the BUILD phase for each function created by builder that is not yet BUILT. -// Functions are built using buildFunction. +// addRuntimeType records t as a runtime type, +// along with all types derivable from it using reflection. // -// May add types that require runtime type information to builder. -func (b *builder) buildCreated() { - for ; b.finished < b.created.Len(); b.finished++ { - fn := b.created.At(b.finished) - b.buildFunction(fn) - } -} - -// Adds any needed runtime type information for the created functions. -// -// May add newly CREATEd functions that may need to be built or runtime type information. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -func (b *builder) needsRuntimeTypes() { - if b.created.Len() == 0 { - return - } - prog := b.created.At(0).Prog - - var rtypes []types.Type - for ; b.rtypes < b.finished; b.rtypes++ { - fn := b.created.At(b.rtypes) - rtypes = append(rtypes, mayNeedRuntimeTypes(fn)...) - } - - // Calling prog.needMethodsOf(T) on a basic type T is a no-op. - // Filter out the basic types to reduce acquiring prog.methodsMu. - rtypes = nonbasicTypes(rtypes) - - for _, T := range rtypes { - prog.needMethodsOf(T, b.created) - } -} - -func (b *builder) done() bool { - return b.rtypes >= b.created.Len() +// Acquires prog.runtimeTypesMu. +func addRuntimeType(prog *Program, t types.Type) { + prog.runtimeTypesMu.Lock() + defer prog.runtimeTypesMu.Unlock() + forEachReachable(&prog.MethodSets, t, func(t types.Type) bool { + prev, _ := prog.runtimeTypes.Set(t, true).(bool) + return !prev // already seen? + }) } // Build calls Package.Build for each package in prog. @@ -2469,9 +2627,11 @@ func (prog *Program) Build() { // Build builds SSA code for all functions and vars in package p. // -// Precondition: CreatePackage must have been called for all of p's -// direct imports (and hence its direct imports must have been -// error-free). +// CreatePackage must have been called for all of p's direct imports +// (and hence its direct imports must have been error-free). It is not +// necessary to call CreatePackage for indirect dependencies. +// Functions will be created for all necessary methods in those +// packages on demand. // // Build is idempotent and thread-safe. func (p *Package) Build() { p.buildOnce.Do(p.build) } @@ -2480,45 +2640,39 @@ func (p *Package) build() { if p.info == nil { return // synthetic package, e.g. "testmain" } - - // Ensure we have runtime type info for all exported members. - // Additionally filter for just concrete types that can be runtime types. - // - // TODO(adonovan): ideally belongs in memberFromObject, but - // that would require package creation in topological order. - for name, mem := range p.Members { - isGround := func(m Member) bool { - switch m := m.(type) { - case *Type: - named, _ := m.Type().(*types.Named) - return named == nil || typeparams.ForNamed(named) == nil - case *Function: - return m.typeparams.Len() == 0 - } - return true // *NamedConst, *Global - } - if ast.IsExported(name) && isGround(mem) { - p.Prog.needMethodsOf(mem.Type(), &p.created) - } - } if p.Prog.mode&LogSource != 0 { defer logStack("build %s", p)() } b := builder{created: &p.created} - init := p.init - init.startBody() + b.iterate() + + // We no longer need transient information: ASTs or go/types deductions. + p.info = nil + p.created = nil + p.files = nil + p.initVersion = nil + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// buildPackageInit builds fn.Body for the synthetic package initializer. +func (b *builder) buildPackageInit(fn *Function) { + p := fn.Pkg + fn.startBody() var done *BasicBlock if p.Prog.mode&BareInits == 0 { // Make init() skip if package is already initialized. initguard := p.Var("init$guard") - doinit := init.newBasicBlock("init.start") - done = init.newBasicBlock("init.done") - emitIf(init, emitLoad(init, initguard), done, doinit) - init.currentBlock = doinit - emitStore(init, initguard, vTrue, token.NoPos) + doinit := fn.newBasicBlock("init.start") + done = fn.newBasicBlock("init.done") + emitIf(fn, emitLoad(fn, initguard), done, doinit) + fn.currentBlock = doinit + emitStore(fn, initguard, vTrue, token.NoPos) // Call the init() function of each package we import. for _, pkg := range p.Pkg.Imports() { @@ -2528,9 +2682,9 @@ func (p *Package) build() { } var v Call v.Call.Value = prereq.init - v.Call.pos = init.pos + v.Call.pos = fn.pos v.setType(types.NewTuple()) - init.emit(&v) + fn.emit(&v) } } @@ -2538,11 +2692,18 @@ func (p *Package) build() { if len(p.info.InitOrder) > 0 && len(p.files) == 0 { panic("no source files provided for package. cannot initialize globals") } + for _, varinit := range p.info.InitOrder { - if init.Prog.mode&LogSource != 0 { + if fn.Prog.mode&LogSource != 0 { fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) } + // Initializers for global vars are evaluated in dependency + // order, but may come from arbitrary files of the package + // with different versions, so we transiently update + // fn.goversion for each one. (Since init is a synthetic + // function it has no syntax of its own that needs a version.) + fn.goversion = p.initVersion[varinit.Rhs] if len(varinit.Lhs) == 1 { // 1:1 initialization: var x, y = a(), b() var lval lvalue @@ -2551,28 +2712,33 @@ func (p *Package) build() { } else { lval = blank{} } - b.assign(init, lval, varinit.Rhs, true, nil) + b.assign(fn, lval, varinit.Rhs, true, nil) } else { // n:1 initialization: var x, y := f() - tuple := b.exprN(init, varinit.Rhs) + tuple := b.exprN(fn, varinit.Rhs) for i, v := range varinit.Lhs { if v.Name() == "_" { continue } - emitStore(init, p.objects[v].(*Global), emitExtract(init, tuple, i), v.Pos()) + emitStore(fn, p.objects[v].(*Global), emitExtract(fn, tuple, i), v.Pos()) } } } + // The rest of the init function is synthetic: + // no syntax, info, goversion. + fn.info = nil + fn.goversion = "" + // Call all of the declared init() functions in source order. for _, file := range p.files { for _, decl := range file.Decls { if decl, ok := decl.(*ast.FuncDecl); ok { id := decl.Name if !isBlankIdent(id) && id.Name == "init" && decl.Recv == nil { - fn := p.objects[p.info.Defs[id]].(*Function) + declaredInit := p.objects[p.info.Defs[id]].(*Function) var v Call - v.Call.Value = fn + v.Call.Value = declaredInit v.setType(types.NewTuple()) p.init.emit(&v) } @@ -2582,35 +2748,9 @@ func (p *Package) build() { // Finish up init(). if p.Prog.mode&BareInits == 0 { - emitJump(init, done) - init.currentBlock = done - } - init.emit(new(Return)) - init.finishBody() - init.done() - - // Build all CREATEd functions and add runtime types. - // These Functions include package-level functions, init functions, methods, and synthetic (including unreachable/blank ones). - // Builds any functions CREATEd while building this package. - // - // Initially the created functions for the package are: - // [init, decl0, ... , declN] - // Where decl0, ..., declN are declared functions in source order, but it's not significant. - // - // As these are built, more functions (function literals, wrappers, etc.) can be CREATEd. - // Iterate until we reach a fixed point. - // - // Wait for init() to be BUILT as that cannot be built by buildFunction(). - // - for !b.done() { - b.buildCreated() // build any CREATEd and not BUILT function. May add runtime types. - b.needsRuntimeTypes() // Add all of the runtime type information. May CREATE Functions. - } - - p.info = nil // We no longer need ASTs or go/types deductions. - p.created = nil // We no longer need created functions. - - if p.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckPackage(p) + emitJump(fn, done) + fn.currentBlock = done } + fn.emit(new(Return)) + fn.finishBody() } diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go index 4a51a2cb4b..e0d79f5ef7 100644 --- a/vendor/golang.org/x/tools/go/ssa/const.go +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -47,7 +48,7 @@ func soleTypeKind(typ types.Type) types.BasicInfo { state := types.IsBoolean | types.IsInteger | types.IsString underIs(typeSetOf(typ), func(t types.Type) bool { var c types.BasicInfo - if t, ok := t.(*types.Basic); ok { + if t, ok := aliases.Unalias(t).(*types.Basic); ok { c = t.Info() } if c&types.IsNumeric != 0 { // int/float/complex @@ -113,7 +114,7 @@ func zeroString(t types.Type, from *types.Package) string { } case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: return "nil" - case *types.Named: + case *types.Named, *aliases.Alias: return zeroString(t.Underlying(), from) case *types.Array, *types.Struct: return relType(t, from) + "{}" @@ -125,7 +126,7 @@ func zeroString(t types.Type, from *types.Package) string { components[i] = zeroString(t.At(i).Type(), from) } return "(" + strings.Join(components, ", ") + ")" - case *typeparams.TypeParam: + case *types.TypeParam: return "*new(" + relType(t, from) + ")" } panic(fmt.Sprint("zeroString: unexpected ", t)) diff --git a/vendor/golang.org/x/tools/go/ssa/coretype.go b/vendor/golang.org/x/tools/go/ssa/coretype.go index 128d61e426..3a512830b1 100644 --- a/vendor/golang.org/x/tools/go/ssa/coretype.go +++ b/vendor/golang.org/x/tools/go/ssa/coretype.go @@ -7,6 +7,7 @@ package ssa import ( "go/types" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -40,19 +41,19 @@ func isBytestring(T types.Type) bool { } // termList is a list of types. -type termList []*typeparams.Term // type terms of the type set +type termList []*types.Term // type terms of the type set func (s termList) Len() int { return len(s) } func (s termList) At(i int) types.Type { return s[i].Type() } // typeSetOf returns the type set of typ. Returns an empty typeset on an error. func typeSetOf(typ types.Type) termList { // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on. - var terms []*typeparams.Term + var terms []*types.Term var err error - switch typ := typ.(type) { - case *typeparams.TypeParam: + switch typ := aliases.Unalias(typ).(type) { + case *types.TypeParam: terms, err = typeparams.StructuralTerms(typ) - case *typeparams.Union: + case *types.Union: terms, err = typeparams.UnionTermSet(typ) case *types.Interface: terms, err = typeparams.InterfaceTermSet(typ) @@ -60,7 +61,7 @@ func typeSetOf(typ types.Type) termList { // Common case. // Specializing the len=1 case to avoid a slice // had no measurable space/time benefit. - terms = []*typeparams.Term{typeparams.NewTerm(false, typ)} + terms = []*types.Term{types.NewTerm(false, typ)} } if err != nil { diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go index ccb20e7968..f8f584a1a5 100644 --- a/vendor/golang.org/x/tools/go/ssa/create.go +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -15,41 +15,43 @@ import ( "os" "sync" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) // NewProgram returns a new SSA Program. // // mode controls diagnostics and checking during SSA construction. +// +// To construct an SSA program: +// +// - Call NewProgram to create an empty Program. +// - Call CreatePackage providing typed syntax for each package +// you want to build, and call it with types but not +// syntax for each of those package's direct dependencies. +// - Call [Package.Build] on each syntax package you wish to build, +// or [Program.Build] to build all of them. +// +// See the Example tests for simple examples. func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { - prog := &Program{ + return &Program{ Fset: fset, imported: make(map[string]*Package), packages: make(map[*types.Package]*Package), - thunks: make(map[selectionKey]*Function), - bounds: make(map[boundsKey]*Function), mode: mode, canon: newCanonizer(), - ctxt: typeparams.NewContext(), - instances: make(map[*Function]*instanceSet), + ctxt: types.NewContext(), parameterized: tpWalker{seen: make(map[types.Type]bool)}, } - - h := typeutil.MakeHasher() // protected by methodsMu, in effect - prog.methodSets.SetHasher(h) - prog.runtimeTypes.SetHasher(h) - - return prog } // memberFromObject populates package pkg with a member for the // typechecker object obj. // // For objects from Go source code, syntax is the associated syntax -// tree (for funcs and vars only); it will be used during the build +// tree (for funcs and vars only) and goversion defines the +// appropriate interpretation; they will be used during the build // phase. -func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node, goversion string) { name := obj.Name() switch obj := obj.(type) { case *types.Builtin: @@ -58,9 +60,11 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { } case *types.TypeName: - pkg.Members[name] = &Type{ - object: obj, - pkg: pkg, + if name != "_" { + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } } case *types.Const: @@ -70,7 +74,9 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { pkg: pkg, } pkg.objects[obj] = c - pkg.Members[name] = c + if name != "_" { + pkg.Members[name] = c + } case *types.Var: g := &Global{ @@ -81,7 +87,9 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { pos: obj.Pos(), } pkg.objects[obj] = g - pkg.Members[name] = g + if name != "_" { + pkg.Members[name] = g + } case *types.Func: sig := obj.Type().(*types.Signature) @@ -89,36 +97,10 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { pkg.ninit++ name = fmt.Sprintf("init#%d", pkg.ninit) } - - // Collect type parameters if this is a generic function/method. - var tparams *typeparams.TypeParamList - if rtparams := typeparams.RecvTypeParams(sig); rtparams.Len() > 0 { - tparams = rtparams - } else if sigparams := typeparams.ForSignature(sig); sigparams.Len() > 0 { - tparams = sigparams - } - - fn := &Function{ - name: name, - object: obj, - Signature: sig, - syntax: syntax, - pos: obj.Pos(), - Pkg: pkg, - Prog: pkg.Prog, - typeparams: tparams, - info: pkg.info, - } - pkg.created.Add(fn) - if syntax == nil { - fn.Synthetic = "loaded from gc object file" - } - if tparams.Len() > 0 { - fn.Prog.createInstanceSet(fn) - } - + fn := createFunction(pkg.Prog, obj, name, syntax, pkg.info, goversion, &pkg.created) + fn.Pkg = pkg pkg.objects[obj] = fn - if sig.Recv() == nil { + if name != "_" && sig.Recv() == nil { pkg.Members[name] = fn // package-level function } @@ -127,45 +109,79 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { } } +// createFunction creates a function or method. It supports both +// CreatePackage (with or without syntax) and the on-demand creation +// of methods in non-created packages based on their types.Func. +func createFunction(prog *Program, obj *types.Func, name string, syntax ast.Node, info *types.Info, goversion string, cr *creator) *Function { + sig := obj.Type().(*types.Signature) + + // Collect type parameters. + var tparams *types.TypeParamList + if rtparams := sig.RecvTypeParams(); rtparams.Len() > 0 { + tparams = rtparams // method of generic type + } else if sigparams := sig.TypeParams(); sigparams.Len() > 0 { + tparams = sigparams // generic function + } + + /* declared function/method (from syntax or export data) */ + fn := &Function{ + name: name, + object: obj, + Signature: sig, + build: (*builder).buildFromSyntax, + syntax: syntax, + info: info, + goversion: goversion, + pos: obj.Pos(), + Pkg: nil, // may be set by caller + Prog: prog, + typeparams: tparams, + } + if fn.syntax == nil { + fn.Synthetic = "from type information" + fn.build = (*builder).buildParamsOnly + } + if tparams.Len() > 0 { + fn.generic = new(generic) + } + cr.Add(fn) + return fn +} + // membersFromDecl populates package pkg with members for each // typechecker object (var, func, const or type) associated with the // specified decl. -func membersFromDecl(pkg *Package, decl ast.Decl) { +func membersFromDecl(pkg *Package, decl ast.Decl, goversion string) { switch decl := decl.(type) { case *ast.GenDecl: // import, const, type or var switch decl.Tok { case token.CONST: for _, spec := range decl.Specs { for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } + memberFromObject(pkg, pkg.info.Defs[id], nil, "") } } case token.VAR: for _, spec := range decl.Specs { + for _, rhs := range spec.(*ast.ValueSpec).Values { + pkg.initVersion[rhs] = goversion + } for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], spec) - } + memberFromObject(pkg, pkg.info.Defs[id], spec, goversion) } } case token.TYPE: for _, spec := range decl.Specs { id := spec.(*ast.TypeSpec).Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } + memberFromObject(pkg, pkg.info.Defs[id], nil, "") } } case *ast.FuncDecl: id := decl.Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], decl) - } + memberFromObject(pkg, pkg.info.Defs[id], decl, goversion) } } @@ -182,7 +198,7 @@ func (c *creator) Add(fn *Function) { func (c *creator) At(i int) *Function { return (*c)[i] } func (c *creator) Len() int { return len(*c) } -// CreatePackage constructs and returns an SSA Package from the +// CreatePackage creates and returns an SSA Package from the // specified type-checked, error-free file ASTs, and populates its // Members mapping. // @@ -190,36 +206,48 @@ func (c *creator) Len() int { return len(*c) } // subsequent call to ImportedPackage(pkg.Path()). // // The real work of building SSA form for each function is not done -// until a subsequent call to Package.Build(). +// until a subsequent call to Package.Build. +// +// CreatePackage should not be called after building any package in +// the program. func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + // TODO(adonovan): assert that no package has yet been built. + if pkg == nil { + panic("nil pkg") // otherwise pkg.Scope below returns types.Universe! + } p := &Package{ Prog: prog, Members: make(map[string]Member), objects: make(map[types.Object]Member), Pkg: pkg, - info: info, // transient (CREATE and BUILD phases) - files: files, // transient (CREATE and BUILD phases) + syntax: info != nil, + // transient values (cleared after Package.Build) + info: info, + files: files, + initVersion: make(map[ast.Expr]string), } - // Add init() function. + /* synthesized package initializer */ p.init = &Function{ name: "init", Signature: new(types.Signature), Synthetic: "package initializer", Pkg: p, Prog: prog, + build: (*builder).buildPackageInit, info: p.info, + goversion: "", // See Package.build for details. } p.Members[p.init.name] = p.init p.created.Add(p.init) - // CREATE phase. // Allocate all package members: vars, funcs, consts and types. if len(files) > 0 { // Go source package. for _, file := range files { + goversion := versions.Lang(versions.FileVersion(p.info, file)) for _, decl := range file.Decls { - membersFromDecl(p, decl) + membersFromDecl(p, decl, goversion) } } } else { @@ -229,11 +257,12 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info * scope := p.Pkg.Scope() for _, name := range scope.Names() { obj := scope.Lookup(name) - memberFromObject(p, obj, nil) + memberFromObject(p, obj, nil, "") if obj, ok := obj.(*types.TypeName); ok { + // No Unalias: aliases should not duplicate methods. if named, ok := obj.Type().(*types.Named); ok { for i, n := 0, named.NumMethods(); i < n; i++ { - memberFromObject(p, named.Method(i), nil) + memberFromObject(p, named.Method(i), nil, "") } } } @@ -271,8 +300,8 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info * // printMu serializes printing of Packages/Functions to stdout. var printMu sync.Mutex -// AllPackages returns a new slice containing all packages in the -// program prog in unspecified order. +// AllPackages returns a new slice containing all packages created by +// prog.CreatePackage in in unspecified order. func (prog *Program) AllPackages() []*Package { pkgs := make([]*Package, 0, len(prog.packages)) for _, pkg := range prog.packages { @@ -294,6 +323,10 @@ func (prog *Program) AllPackages() []*Package { // false---yet this function remains very convenient. // Clients should use (*Program).Package instead where possible. // SSA doesn't really need a string-keyed map of packages. +// +// Furthermore, the graph of packages may contain multiple variants +// (e.g. "p" vs "p as compiled for q.test"), and each has a different +// view of its dependencies. func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] } diff --git a/vendor/golang.org/x/tools/go/ssa/doc.go b/vendor/golang.org/x/tools/go/ssa/doc.go index afda476b36..3310b5509b 100644 --- a/vendor/golang.org/x/tools/go/ssa/doc.go +++ b/vendor/golang.org/x/tools/go/ssa/doc.go @@ -7,8 +7,6 @@ // static single-assignment (SSA) form intermediate representation // (IR) for the bodies of functions. // -// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. -// // For an introduction to SSA form, see // http://en.wikipedia.org/wiki/Static_single_assignment_form. // This page provides a broader reading list: @@ -21,15 +19,15 @@ // All looping, branching and switching constructs are replaced with // unstructured control flow. Higher-level control flow constructs // such as multi-way branch can be reconstructed as needed; see -// ssautil.Switches() for an example. +// [golang.org/x/tools/go/ssa/ssautil.Switches] for an example. // // The simplest way to create the SSA representation of a package is -// to load typed syntax trees using golang.org/x/tools/go/packages, then -// invoke the ssautil.Packages helper function. See Example_loadPackages -// and Example_loadWholeProgram for examples. -// The resulting ssa.Program contains all the packages and their +// to load typed syntax trees using [golang.org/x/tools/go/packages], then +// invoke the [golang.org/x/tools/go/ssa/ssautil.Packages] helper function. +// (See the package-level Examples named LoadPackages and LoadWholeProgram.) +// The resulting [ssa.Program] contains all the packages and their // members, but SSA code is not created for function bodies until a -// subsequent call to (*Package).Build or (*Program).Build. +// subsequent call to [Package.Build] or [Program.Build]. // // The builder initially builds a naive SSA form in which all local // variables are addresses of stack locations with explicit loads and @@ -41,13 +39,13 @@ // // The primary interfaces of this package are: // -// - Member: a named member of a Go package. -// - Value: an expression that yields a value. -// - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph) +// - [Member]: a named member of a Go package. +// - [Value]: an expression that yields a value. +// - [Instruction]: a statement that consumes values and performs computation. +// - [Node]: a [Value] or [Instruction] (emphasizing its membership in the SSA value graph) // -// A computation that yields a result implements both the Value and -// Instruction interfaces. The following table shows for each +// A computation that yields a result implements both the [Value] and +// [Instruction] interfaces. The following table shows for each // concrete type which of these interfaces it implements. // // Value? Instruction? Member? @@ -66,7 +64,6 @@ // *FieldAddr ✔ ✔ // *FreeVar ✔ // *Function ✔ ✔ (func) -// *GenericConvert ✔ ✔ // *Global ✔ ✔ (var) // *Go ✔ // *If ✔ @@ -80,6 +77,7 @@ // *MakeMap ✔ ✔ // *MakeSlice ✔ ✔ // *MapUpdate ✔ +// *MultiConvert ✔ ✔ // *NamedConst ✔ (const) // *Next ✔ ✔ // *Panic ✔ @@ -97,15 +95,15 @@ // *TypeAssert ✔ ✔ // *UnOp ✔ ✔ // -// Other key types in this package include: Program, Package, Function -// and BasicBlock. +// Other key types in this package include: [Program], [Package], [Function] +// and [BasicBlock]. // // The program representation constructed by this package is fully // resolved internally, i.e. it does not rely on the names of Values, // Packages, Functions, Types or BasicBlocks for the correct // interpretation of the program. Only the identities of objects and // the topology of the SSA and type graphs are semantically -// significant. (There is one exception: Ids, used to identify field +// significant. (There is one exception: [types.Id] values, which identify field // and method names, contain strings.) Avoidance of name-based // operations simplifies the implementation of subsequent passes and // can make them very efficient. Many objects are nonetheless named @@ -113,11 +111,9 @@ // either accurate or unambiguous. The public API exposes a number of // name-based maps for client convenience. // -// The ssa/ssautil package provides various utilities that depend only -// on the public API of this package. -// -// TODO(adonovan): Consider the exceptional control-flow implications -// of defer and recover(). +// The [golang.org/x/tools/go/ssa/ssautil] package provides various +// helper functions, for example to simplify loading a Go program into +// SSA form. // // TODO(adonovan): write a how-to document for all the various cases // of trying to determine corresponding elements across the four diff --git a/vendor/golang.org/x/tools/go/ssa/dom.go b/vendor/golang.org/x/tools/go/ssa/dom.go index 66a2f5e6ed..02c1ae83ae 100644 --- a/vendor/golang.org/x/tools/go/ssa/dom.go +++ b/vendor/golang.org/x/tools/go/ssa/dom.go @@ -40,20 +40,25 @@ func (b *BasicBlock) Dominates(c *BasicBlock) bool { return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post } -type byDomPreorder []*BasicBlock - -func (a byDomPreorder) Len() int { return len(a) } -func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre } - -// DomPreorder returns a new slice containing the blocks of f in -// dominator tree preorder. +// DomPreorder returns a new slice containing the blocks of f +// in a preorder traversal of the dominator tree. func (f *Function) DomPreorder() []*BasicBlock { - n := len(f.Blocks) - order := make(byDomPreorder, n) - copy(order, f.Blocks) - sort.Sort(order) - return order + slice := append([]*BasicBlock(nil), f.Blocks...) + sort.Slice(slice, func(i, j int) bool { + return slice[i].dom.pre < slice[j].dom.pre + }) + return slice +} + +// DomPostorder returns a new slice containing the blocks of f +// in a postorder traversal of the dominator tree. +// (This is not the same as a postdominance order.) +func (f *Function) DomPostorder() []*BasicBlock { + slice := append([]*BasicBlock(nil), f.Blocks...) + sort.Slice(slice, func(i, j int) bool { + return slice[i].dom.post < slice[j].dom.post + }) + return slice } // domInfo contains a BasicBlock's dominance information. diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go index 1731c79750..549c9114d4 100644 --- a/vendor/golang.org/x/tools/go/ssa/emit.go +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -12,24 +12,62 @@ import ( "go/token" "go/types" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) -// emitNew emits to f a new (heap Alloc) instruction allocating an -// object of type typ. pos is the optional source location. -func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { - v := &Alloc{Heap: true} +// emitAlloc emits to f a new Alloc instruction allocating a variable +// of type typ. +// +// The caller must set Alloc.Heap=true (for an heap-allocated variable) +// or add the Alloc to f.Locals (for a frame-allocated variable). +// +// During building, a variable in f.Locals may have its Heap flag +// set when it is discovered that its address is taken. +// These Allocs are removed from f.Locals at the end. +// +// The builder should generally call one of the emit{New,Local,LocalVar} wrappers instead. +func emitAlloc(f *Function, typ types.Type, pos token.Pos, comment string) *Alloc { + v := &Alloc{Comment: comment} v.setType(types.NewPointer(typ)) v.setPos(pos) f.emit(v) return v } +// emitNew emits to f a new Alloc instruction heap-allocating a +// variable of type typ. pos is the optional source location. +func emitNew(f *Function, typ types.Type, pos token.Pos, comment string) *Alloc { + alloc := emitAlloc(f, typ, pos, comment) + alloc.Heap = true + return alloc +} + +// emitLocal creates a local var for (t, pos, comment) and +// emits an Alloc instruction for it. +// +// (Use this function or emitNew for synthetic variables; +// for source-level variables, use emitLocalVar.) +func emitLocal(f *Function, t types.Type, pos token.Pos, comment string) *Alloc { + local := emitAlloc(f, t, pos, comment) + f.Locals = append(f.Locals, local) + return local +} + +// emitLocalVar creates a local var for v and emits an Alloc instruction for it. +// Subsequent calls to f.lookup(v) return it. +// It applies the appropriate generic instantiation to the type. +func emitLocalVar(f *Function, v *types.Var) *Alloc { + alloc := emitLocal(f, f.typ(v.Type()), v.Pos(), v.Name()) + f.vars[v] = alloc + return alloc +} + // emitLoad emits to f an instruction to load the address addr into a // new temporary, and returns the value so defined. func emitLoad(f *Function, addr Value) *UnOp { v := &UnOp{Op: token.MUL, X: addr} - v.setType(deref(typeparams.CoreType(addr.Type()))) + v.setType(typeparams.MustDeref(addr.Type())) f.emit(v) return v } @@ -103,7 +141,7 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token. } // emitCompare emits to f code compute the boolean result of -// comparison comparison 'x op y'. +// comparison 'x op y'. func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { xt := x.Type().Underlying() yt := y.Type().Underlying() @@ -147,10 +185,10 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { // isValuePreserving returns true if a conversion from ut_src to // ut_dst is value-preserving, i.e. just a change of type. -// Precondition: neither argument is a named type. +// Precondition: neither argument is a named or alias type. func isValuePreserving(ut_src, ut_dst types.Type) bool { // Identical underlying types? - if structTypesIdentical(ut_dst, ut_src) { + if types.IdenticalIgnoreTags(ut_dst, ut_src) { return true } @@ -208,6 +246,13 @@ func emitConv(f *Function, val Value, typ types.Type) Value { val = emitConv(f, val, types.Default(ut_src)) } + // Record the types of operands to MakeInterface, if + // non-parameterized, as they are the set of runtime types. + t := val.Type() + if f.typeparams.Len() == 0 || !f.Prog.parameterized.isParameterized(t) { + addRuntimeType(f.Prog, t) + } + mi := &MakeInterface{X: val} mi.setType(typ) return f.emit(mi) @@ -239,11 +284,11 @@ func emitConv(f *Function, val Value, typ types.Type) Value { } // Conversion from slice to array or slice to array pointer? - if slice, ok := s.(*types.Slice); ok { + if slice, ok := aliases.Unalias(s).(*types.Slice); ok { var arr *types.Array var ptr bool // Conversion from slice to array pointer? - switch d := d.(type) { + switch d := aliases.Unalias(d).(type) { case *types.Array: arr = d case *types.Pointer: @@ -372,9 +417,10 @@ func emitTypeCoercion(f *Function, v Value, typ types.Type) Value { // emitStore emits to f an instruction to store value val at location // addr, applying implicit conversions as required by assignability rules. func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { + typ := typeparams.MustDeref(addr.Type()) s := &Store{ Addr: addr, - Val: emitConv(f, val, deref(addr.Type())), + Val: emitConv(f, val, typ), pos: pos, } f.emit(s) @@ -477,9 +523,8 @@ func emitTailCall(f *Function, call *Call) { // value of a field. func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { for _, index := range indices { - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) - - if isPointer(v.Type()) { + if st, vptr := deref(v.Type()); vptr { + fld := fieldOf(st, index) instr := &FieldAddr{ X: v, Field: index, @@ -488,10 +533,11 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) instr.setType(types.NewPointer(fld.Type())) v = f.emit(instr) // Load the field's value iff indirectly embedded. - if isPointer(fld.Type()) { + if _, fldptr := deref(fld.Type()); fldptr { v = emitLoad(f, v) } } else { + fld := fieldOf(v.Type(), index) instr := &Field{ X: v, Field: index, @@ -511,8 +557,8 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) // field's value. // Ident id is used for position and debug info. func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) - if isPointer(v.Type()) { + if st, vptr := deref(v.Type()); vptr { + fld := fieldOf(st, index) instr := &FieldAddr{ X: v, Field: index, @@ -525,6 +571,7 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. v = emitLoad(f, v) } } else { + fld := fieldOf(v.Type(), index) instr := &Field{ X: v, Field: index, @@ -537,17 +584,6 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. return v } -// zeroValue emits to f code to produce a zero value of type t, -// and returns it. -func zeroValue(f *Function, t types.Type) Value { - switch t.Underlying().(type) { - case *types.Struct, *types.Array: - return emitLoad(f, f.addLocal(t, token.NoPos)) - default: - return zeroConst(t) - } -} - // createRecoverBlock emits to f a block of code to return after a // recovered panic, and sets f.Recover to it. // @@ -577,7 +613,7 @@ func createRecoverBlock(f *Function) { T := R.At(i).Type() // Return zero value of each result type. - results = append(results, zeroValue(f, T)) + results = append(results, zeroConst(T)) } } f.emit(&Return{Results: results}) diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index 57f5f718f7..4d3e39129c 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -10,7 +10,6 @@ import ( "bytes" "fmt" "go/ast" - "go/token" "go/types" "io" "os" @@ -46,7 +45,7 @@ func (f *Function) typ(T types.Type) types.Type { // If id is an Instance, returns info.Instances[id].Type. // Otherwise returns f.typeOf(id). func (f *Function) instanceType(id *ast.Ident) types.Type { - if t, ok := typeparams.GetInstances(f.info)[id]; ok { + if t, ok := f.info.Instances[id]; ok { return t.Type } return f.typeOf(id) @@ -108,52 +107,40 @@ type lblock struct { // labelledBlock returns the branch target associated with the // specified label, creating it if needed. func (f *Function) labelledBlock(label *ast.Ident) *lblock { - obj := f.objectOf(label) + obj := f.objectOf(label).(*types.Label) lb := f.lblocks[obj] if lb == nil { lb = &lblock{_goto: f.newBasicBlock(label.Name)} if f.lblocks == nil { - f.lblocks = make(map[types.Object]*lblock) + f.lblocks = make(map[*types.Label]*lblock) } f.lblocks[obj] = lb } return lb } -// addParam adds a (non-escaping) parameter to f.Params of the -// specified name, type and source position. -func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter { - v := &Parameter{ - name: name, - typ: typ, - pos: pos, - parent: f, - } - f.Params = append(f.Params, v) - return v -} - -func (f *Function) addParamObj(obj types.Object) *Parameter { - name := obj.Name() +// addParamVar adds a parameter to f.Params. +func (f *Function) addParamVar(v *types.Var) *Parameter { + name := v.Name() if name == "" { name = fmt.Sprintf("arg%d", len(f.Params)) } - param := f.addParam(name, f.typ(obj.Type()), obj.Pos()) - param.object = obj + param := &Parameter{ + name: name, + object: v, + typ: f.typ(v.Type()), + parent: f, + } + f.Params = append(f.Params, param) return param } // addSpilledParam declares a parameter that is pre-spilled to the // stack; the function body will load/store the spilled location. // Subsequent lifting will eliminate spills where possible. -func (f *Function) addSpilledParam(obj types.Object) { - param := f.addParamObj(obj) - spill := &Alloc{Comment: obj.Name()} - spill.setType(types.NewPointer(param.Type())) - spill.setPos(obj.Pos()) - f.objects[obj] = spill - f.Locals = append(f.Locals, spill) - f.emit(spill) +func (f *Function) addSpilledParam(obj *types.Var) { + param := f.addParamVar(obj) + spill := emitLocalVar(f, obj) f.emit(&Store{Addr: spill, Val: param}) } @@ -161,7 +148,7 @@ func (f *Function) addSpilledParam(obj types.Object) { // Precondition: f.Type() already set. func (f *Function) startBody() { f.currentBlock = f.newBasicBlock("entry") - f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init + f.vars = make(map[*types.Var]Value) // needed for some synthetics, e.g. init } // createSyntacticParams populates f.Params and generates code (spills @@ -177,11 +164,11 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func if recv != nil { for _, field := range recv.List { for _, n := range field.Names { - f.addSpilledParam(f.info.Defs[n]) + f.addSpilledParam(identVar(f, n)) } // Anonymous receiver? No need to spill. if field.Names == nil { - f.addParamObj(f.Signature.Recv()) + f.addParamVar(f.Signature.Recv()) } } } @@ -191,11 +178,11 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func n := len(f.Params) // 1 if has recv, 0 otherwise for _, field := range functype.Params.List { for _, n := range field.Names { - f.addSpilledParam(f.info.Defs[n]) + f.addSpilledParam(identVar(f, n)) } // Anonymous parameter? No need to spill. if field.Names == nil { - f.addParamObj(f.Signature.Params().At(len(f.Params) - n)) + f.addParamVar(f.Signature.Params().At(len(f.Params) - n)) } } } @@ -205,7 +192,8 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func for _, field := range functype.Results.List { // Implicit "var" decl of locals for named results. for _, n := range field.Names { - f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) + namedResult := emitLocalVar(f, identVar(f, n)) + f.namedResults = append(f.namedResults, namedResult) } } } @@ -250,49 +238,14 @@ func buildReferrers(f *Function) { } } -// mayNeedRuntimeTypes returns all of the types in the body of fn that might need runtime types. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -func mayNeedRuntimeTypes(fn *Function) []types.Type { - // Collect all types that may need rtypes, i.e. those that flow into an interface. - var ts []types.Type - for _, bb := range fn.Blocks { - for _, instr := range bb.Instrs { - if mi, ok := instr.(*MakeInterface); ok { - ts = append(ts, mi.X.Type()) - } - } - } - - // Types that contain a parameterized type are considered to not be runtime types. - if fn.typeparams.Len() == 0 { - return ts // No potentially parameterized types. - } - // Filter parameterized types, in place. - fn.Prog.methodsMu.Lock() - defer fn.Prog.methodsMu.Unlock() - filtered := ts[:0] - for _, t := range ts { - if !fn.Prog.parameterized.isParameterized(t) { - filtered = append(filtered, t) - } - } - return filtered -} - // finishBody() finalizes the contents of the function after SSA code generation of its body. // // The function is not done being built until done() is called. func (f *Function) finishBody() { - f.objects = nil + f.vars = nil f.currentBlock = nil f.lblocks = nil - // Don't pin the AST in memory (except in debug mode). - if n := f.syntax; n != nil && !f.debugInfo() { - f.syntax = extentNode{n.Pos(), n.End()} - } - // Remove from f.Locals any Allocs that escape to the heap. j := 0 for _, l := range f.Locals { @@ -320,15 +273,15 @@ func (f *Function) finishBody() { lift(f) } - // clear remaining stateful variables + // clear remaining builder state f.namedResults = nil // (used by lifting) - f.info = nil f.subst = nil numberRegisters(f) // uses f.namedRegisters } -// After this, function is done with BUILD phase. +// done marks the building of f's SSA body complete, +// along with any nested functions, and optionally prints them. func (f *Function) done() { assert(f.parent == nil, "done called on an anonymous function") @@ -338,7 +291,7 @@ func (f *Function) done() { visit(anon) // anon is done building before f. } - f.built = true // function is done with BUILD phase + f.build = nil // function is built if f.Prog.mode&PrintFunctions != 0 { printMu.Lock() @@ -376,49 +329,35 @@ func (f *Function) removeNilBlocks() { // size of the instruction stream, and causes Functions to depend upon // the ASTs, potentially keeping them live in memory for longer. func (pkg *Package) SetDebugMode(debug bool) { - // TODO(adonovan): do we want ast.File granularity? pkg.debug = debug } // debugInfo reports whether debug info is wanted for this function. func (f *Function) debugInfo() bool { - return f.Pkg != nil && f.Pkg.debug -} - -// addNamedLocal creates a local variable, adds it to function f and -// returns it. Its name and type are taken from obj. Subsequent -// calls to f.lookup(obj) will return the same local. -func (f *Function) addNamedLocal(obj types.Object) *Alloc { - l := f.addLocal(obj.Type(), obj.Pos()) - l.Comment = obj.Name() - f.objects[obj] = l - return l -} - -func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { - return f.addNamedLocal(f.info.Defs[id]) -} - -// addLocal creates an anonymous local variable of type typ, adds it -// to function f and returns it. pos is the optional source location. -func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc { - typ = f.typ(typ) - v := &Alloc{} - v.setType(types.NewPointer(typ)) - v.setPos(pos) - f.Locals = append(f.Locals, v) - f.emit(v) - return v + // debug info for instantiations follows the debug info of their origin. + p := f.declaredPackage() + return p != nil && p.debug } // lookup returns the address of the named variable identified by obj // that is local to function f or one of its enclosing functions. // If escaping, the reference comes from a potentially escaping pointer // expression and the referent must be heap-allocated. -func (f *Function) lookup(obj types.Object, escaping bool) Value { - if v, ok := f.objects[obj]; ok { - if alloc, ok := v.(*Alloc); ok && escaping { - alloc.Heap = true +// We assume the referent is a *Alloc or *Phi. +// (The only Phis at this stage are those created directly by go1.22 "for" loops.) +func (f *Function) lookup(obj *types.Var, escaping bool) Value { + if v, ok := f.vars[obj]; ok { + if escaping { + switch v := v.(type) { + case *Alloc: + v.Heap = true + case *Phi: + for _, edge := range v.Edges { + if alloc, ok := edge.(*Alloc); ok { + alloc.Heap = true + } + } + } } return v // function-local var (address) } @@ -436,7 +375,7 @@ func (f *Function) lookup(obj types.Object, escaping bool) Value { outer: outer, parent: f, } - f.objects[obj] = v + f.vars[obj] = v f.FreeVars = append(f.FreeVars, v) return v } @@ -514,15 +453,15 @@ func (f *Function) relMethod(from *types.Package, recv types.Type) string { } // writeSignature writes to buf the signature sig in declaration syntax. -func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature) { buf.WriteString("func ") if recv := sig.Recv(); recv != nil { buf.WriteString("(") - if n := params[0].Name(); n != "" { - buf.WriteString(n) + if name := recv.Name(); name != "" { + buf.WriteString(name) buf.WriteString(" ") } - types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) + types.WriteType(buf, recv.Type(), types.RelativeTo(from)) buf.WriteString(") ") } buf.WriteString(name) @@ -534,7 +473,7 @@ func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *ty func (fn *Function) declaredPackage() *Package { switch { case fn.Pkg != nil: - return fn.Pkg // non-generic function + return fn.Pkg // non-generic function (does that follow??) case fn.topLevelOrigin != nil: return fn.topLevelOrigin.Pkg // instance of a named generic function case fn.parent != nil: @@ -594,10 +533,10 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { if len(f.Locals) > 0 { buf.WriteString("# Locals:\n") for i, l := range f.Locals { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(typeparams.MustDeref(l.Type()), from)) } } - writeSignature(buf, from, f.Name(), f.Signature, f.Params) + writeSignature(buf, from, f.Name(), f.Signature) buf.WriteString(":\n") if f.Blocks == nil { @@ -649,6 +588,12 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { default: buf.WriteString(instr.String()) } + // -mode=S: show line numbers + if f.Prog.mode&LogSource != 0 { + if pos := instr.Pos(); pos.IsValid() { + fmt.Fprintf(buf, " L%d", f.Prog.Fset.Position(pos).Line) + } + } buf.WriteString("\n") } } @@ -687,17 +632,11 @@ func (prog *Program) NewFunction(name string, sig *types.Signature, provenance s return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} } -type extentNode [2]token.Pos - -func (n extentNode) Pos() token.Pos { return n[0] } -func (n extentNode) End() token.Pos { return n[1] } - -// Syntax returns an ast.Node whose Pos/End methods provide the -// lexical extent of the function if it was defined by Go source code -// (f.Synthetic==""), or nil otherwise. -// -// If f was built with debug information (see Package.SetDebugRef), -// the result is the *ast.FuncDecl or *ast.FuncLit that declared the -// function. Otherwise, it is an opaque Node providing only position -// information; this avoids pinning the AST in memory. +// Syntax returns the function's syntax (*ast.Func{Decl,Lit) +// if it was produced from syntax. func (f *Function) Syntax() ast.Node { return f.syntax } + +// identVar returns the variable defined by id. +func identVar(fn *Function, id *ast.Ident) *types.Var { + return fn.info.Defs[id].(*types.Var) +} diff --git a/vendor/golang.org/x/tools/go/ssa/identical.go b/vendor/golang.org/x/tools/go/ssa/identical.go deleted file mode 100644 index e8026967be..0000000000 --- a/vendor/golang.org/x/tools/go/ssa/identical.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.8 -// +build go1.8 - -package ssa - -import "go/types" - -var structTypesIdentical = types.IdenticalIgnoreTags diff --git a/vendor/golang.org/x/tools/go/ssa/identical_17.go b/vendor/golang.org/x/tools/go/ssa/identical_17.go deleted file mode 100644 index 575aa5dfc1..0000000000 --- a/vendor/golang.org/x/tools/go/ssa/identical_17.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.8 -// +build !go1.8 - -package ssa - -import "go/types" - -var structTypesIdentical = types.Identical diff --git a/vendor/golang.org/x/tools/go/ssa/instantiate.go b/vendor/golang.org/x/tools/go/ssa/instantiate.go index f6b2533f24..c155f6736a 100644 --- a/vendor/golang.org/x/tools/go/ssa/instantiate.go +++ b/vendor/golang.org/x/tools/go/ssa/instantiate.go @@ -6,130 +6,60 @@ package ssa import ( "fmt" - "go/ast" "go/types" + "sync" "golang.org/x/tools/internal/typeparams" ) -// _Instances returns all of the instances generated by runtime types for this function in an unspecified order. -// -// Thread-safe. -// -// This is an experimental interface! It may change without warning. -func (prog *Program) _Instances(fn *Function) []*Function { - if fn.typeparams.Len() == 0 || len(fn.typeargs) > 0 { - return nil - } - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - return prog.instances[fn].list() -} - -// A set of instantiations of a generic function fn. -type instanceSet struct { - fn *Function // fn.typeparams.Len() > 0 and len(fn.typeargs) == 0. - instances map[*typeList]*Function // canonical type arguments to an instance. - syntax *ast.FuncDecl // fn.syntax copy for instantiating after fn is done. nil on synthetic packages. - info *types.Info // fn.pkg.info copy for building after fn is done.. nil on synthetic packages. - - // TODO(taking): Consider ways to allow for clearing syntax and info when done building. - // May require a public API change as MethodValue can request these be built after prog.Build() is done. -} - -func (insts *instanceSet) list() []*Function { - if insts == nil { - return nil - } - - fns := make([]*Function, 0, len(insts.instances)) - for _, fn := range insts.instances { - fns = append(fns, fn) - } - return fns +// A generic records information about a generic origin function, +// including a cache of existing instantiations. +type generic struct { + instancesMu sync.Mutex + instances map[*typeList]*Function // canonical type arguments to an instance. } -// createInstanceSet adds a new instanceSet for a generic function fn if one does not exist. +// instance returns a Function that is the instantiation of generic +// origin function fn with the type arguments targs. // -// Precondition: fn is a package level declaration (function or method). +// Any created instance is added to cr. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu) -func (prog *Program) createInstanceSet(fn *Function) { - assert(fn.typeparams.Len() > 0 && len(fn.typeargs) == 0, "Can only create instance sets for generic functions") - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - syntax, _ := fn.syntax.(*ast.FuncDecl) - assert((syntax == nil) == (fn.syntax == nil), "fn.syntax is either nil or a *ast.FuncDecl") - - if _, ok := prog.instances[fn]; !ok { - prog.instances[fn] = &instanceSet{ - fn: fn, - syntax: syntax, - info: fn.info, +// Acquires fn.generic.instancesMu. +func (fn *Function) instance(targs []types.Type, cr *creator) *Function { + key := fn.Prog.canon.List(targs) + + gen := fn.generic + + gen.instancesMu.Lock() + defer gen.instancesMu.Unlock() + inst, ok := gen.instances[key] + if !ok { + inst = createInstance(fn, targs, cr) + if gen.instances == nil { + gen.instances = make(map[*typeList]*Function) } + gen.instances[key] = inst } + return inst } -// needsInstance returns a Function that is the instantiation of fn with the type arguments targs. -// -// Any CREATEd instance is added to cr. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu) -func (prog *Program) needsInstance(fn *Function, targs []types.Type, cr *creator) *Function { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - return prog.lookupOrCreateInstance(fn, targs, cr) -} - -// lookupOrCreateInstance returns a Function that is the instantiation of fn with the type arguments targs. -// -// Any CREATEd instance is added to cr. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodMu) -func (prog *Program) lookupOrCreateInstance(fn *Function, targs []types.Type, cr *creator) *Function { - return prog.instances[fn].lookupOrCreate(targs, &prog.parameterized, cr) -} - -// lookupOrCreate returns the instantiation of insts.fn using targs. +// createInstance returns the instantiation of generic function fn using targs. // If the instantiation is created, this is added to cr. -func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWalker, cr *creator) *Function { - if insts.instances == nil { - insts.instances = make(map[*typeList]*Function) - } - - fn := insts.fn +// +// Requires fn.generic.instancesMu. +func createInstance(fn *Function, targs []types.Type, cr *creator) *Function { prog := fn.Prog - // canonicalize on a tuple of targs. Sig is not unique. - // - // func A[T any]() { - // var x T - // fmt.Println("%T", x) - // } - key := prog.canon.List(targs) - if inst, ok := insts.instances[key]; ok { - return inst - } - - // CREATE instance/instantiation wrapper - var syntax ast.Node - if insts.syntax != nil { - syntax = insts.syntax - } - + // Compute signature. var sig *types.Signature var obj *types.Func if recv := fn.Signature.Recv(); recv != nil { // method - m := fn.object.(*types.Func) - obj = prog.canon.instantiateMethod(m, targs, prog.ctxt) + obj = prog.canon.instantiateMethod(fn.object, targs, prog.ctxt) sig = obj.Type().(*types.Signature) } else { - instSig, err := typeparams.Instantiate(prog.ctxt, fn.Signature, targs, false) + // function + instSig, err := types.Instantiate(prog.ctxt, fn.Signature, targs, false) if err != nil { panic(err) } @@ -137,40 +67,48 @@ func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWa if !ok { panic("Instantiate of a Signature returned a non-signature") } - obj = fn.object.(*types.Func) // instantiation does not exist yet + obj = fn.object // instantiation does not exist yet sig = prog.canon.Type(instance).(*types.Signature) } - var synthetic string - var subst *subster - - concrete := !parameterized.anyParameterized(targs) - - if prog.mode&InstantiateGenerics != 0 && concrete { + // Choose strategy (instance or wrapper). + var ( + synthetic string + subst *subster + build buildFunc + ) + if prog.mode&InstantiateGenerics != 0 && !prog.parameterized.anyParameterized(targs) { synthetic = fmt.Sprintf("instance of %s", fn.Name()) - subst = makeSubster(prog.ctxt, fn.typeparams, targs, false) + if fn.syntax != nil { + scope := typeparams.OriginMethod(obj).Scope() + subst = makeSubster(prog.ctxt, scope, fn.typeparams, targs, false) + build = (*builder).buildFromSyntax + } else { + build = (*builder).buildParamsOnly + } } else { synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name()) + build = (*builder).buildInstantiationWrapper } - name := fmt.Sprintf("%s%s", fn.Name(), targs) // may not be unique + /* generic instance or instantiation wrapper */ instance := &Function{ - name: name, + name: fmt.Sprintf("%s%s", fn.Name(), targs), // may not be unique object: obj, Signature: sig, Synthetic: synthetic, - syntax: syntax, + syntax: fn.syntax, // \ + info: fn.info, // } empty for non-created packages + goversion: fn.goversion, // / + build: build, topLevelOrigin: fn, pos: obj.Pos(), Pkg: nil, Prog: fn.Prog, typeparams: fn.typeparams, // share with origin typeargs: targs, - info: insts.info, // on synthetic packages info is nil. subst: subst, } - cr.Add(instance) - insts.instances[key] = instance return instance } diff --git a/vendor/golang.org/x/tools/go/ssa/lift.go b/vendor/golang.org/x/tools/go/ssa/lift.go index 945536bbbf..8bb1949449 100644 --- a/vendor/golang.org/x/tools/go/ssa/lift.go +++ b/vendor/golang.org/x/tools/go/ssa/lift.go @@ -41,7 +41,6 @@ package ssa import ( "fmt" "go/token" - "go/types" "math/big" "os" @@ -106,9 +105,14 @@ func buildDomFrontier(fn *Function) domFrontier { } func removeInstr(refs []Instruction, instr Instruction) []Instruction { + return removeInstrsIf(refs, func(i Instruction) bool { return i == instr }) +} + +func removeInstrsIf(refs []Instruction, p func(Instruction) bool) []Instruction { + // TODO(taking): replace with go1.22 slices.DeleteFunc. i := 0 for _, ref := range refs { - if ref == instr { + if p(ref) { continue } refs[i] = ref @@ -383,12 +387,6 @@ type newPhiMap map[*BasicBlock][]newPhi // // fresh is a source of fresh ids for phi nodes. func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { - // TODO(taking): zero constants of aggregated types can now be lifted. - switch deref(alloc.Type()).Underlying().(type) { - case *types.Array, *types.Struct, *typeparams.TypeParam: - return false - } - // Don't lift named return values in functions that defer // calls that may recover from panic. if fn := alloc.Parent(); fn.Recover != nil { @@ -469,7 +467,7 @@ func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool *fresh++ phi.pos = alloc.Pos() - phi.setType(deref(alloc.Type())) + phi.setType(typeparams.MustDeref(alloc.Type())) phi.block = v if debugLifting { fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) @@ -514,7 +512,7 @@ func replaceAll(x, y Value) { func renamed(renaming []Value, alloc *Alloc) Value { v := renaming[alloc.index] if v == nil { - v = zeroConst(deref(alloc.Type())) + v = zeroConst(typeparams.MustDeref(alloc.Type())) renaming[alloc.index] = v } return v diff --git a/vendor/golang.org/x/tools/go/ssa/lvalue.go b/vendor/golang.org/x/tools/go/ssa/lvalue.go index 51122b8e85..eede307eab 100644 --- a/vendor/golang.org/x/tools/go/ssa/lvalue.go +++ b/vendor/golang.org/x/tools/go/ssa/lvalue.go @@ -11,6 +11,8 @@ import ( "go/ast" "go/token" "go/types" + + "golang.org/x/tools/internal/typeparams" ) // An lvalue represents an assignable location that may appear on the @@ -25,7 +27,7 @@ type lvalue interface { // An address is an lvalue represented by a true pointer. type address struct { - addr Value + addr Value // must have a pointer core type. pos token.Pos // source position expr ast.Expr // source syntax of the value (not address) [debug mode] } @@ -52,7 +54,7 @@ func (a *address) address(fn *Function) Value { } func (a *address) typ() types.Type { - return deref(a.addr.Type()) + return typeparams.MustDeref(a.addr.Type()) } // An element is an lvalue represented by m[k], the location of an diff --git a/vendor/golang.org/x/tools/go/ssa/methods.go b/vendor/golang.org/x/tools/go/ssa/methods.go index 4185618cdd..5f46a18484 100644 --- a/vendor/golang.org/x/tools/go/ssa/methods.go +++ b/vendor/golang.org/x/tools/go/ssa/methods.go @@ -10,54 +10,125 @@ import ( "fmt" "go/types" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) // MethodValue returns the Function implementing method sel, building -// wrapper methods on demand. It returns nil if sel denotes an -// abstract (interface or parameterized) method. +// wrapper methods on demand. It returns nil if sel denotes an +// interface or generic method. // // Precondition: sel.Kind() == MethodVal. // // Thread-safe. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// Acquires prog.methodsMu. func (prog *Program) MethodValue(sel *types.Selection) *Function { if sel.Kind() != types.MethodVal { panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) } T := sel.Recv() if types.IsInterface(T) { - return nil // abstract method (interface, possibly type param) + return nil // interface method or type parameter } + + if prog.parameterized.isParameterized(T) { + return nil // generic method + } + if prog.mode&LogSource != 0 { defer logStack("MethodValue %s %v", T, sel)() } - var m *Function - b := builder{created: &creator{}} + var cr creator - prog.methodsMu.Lock() - // Checks whether a type param is reachable from T. - // This is an expensive check. May need to be optimized later. - if !prog.parameterized.isParameterized(T) { - m = prog.addMethod(prog.createMethodSet(T), sel, b.created) + m := func() *Function { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + // Get or create SSA method set. + mset, ok := prog.methodSets.At(T).(*methodSet) + if !ok { + mset = &methodSet{mapping: make(map[string]*Function)} + prog.methodSets.Set(T, mset) + } + + // Get or create SSA method. + id := sel.Obj().Id() + fn, ok := mset.mapping[id] + if !ok { + obj := sel.Obj().(*types.Func) + _, ptrObj := deptr(recvType(obj)) + _, ptrRecv := deptr(T) + needsPromotion := len(sel.Index()) > 1 + needsIndirection := !ptrObj && ptrRecv + if needsPromotion || needsIndirection { + fn = createWrapper(prog, toSelection(sel), &cr) + } else { + fn = prog.objectMethod(obj, &cr) + } + if fn.Signature.Recv() == nil { + panic(fn) + } + mset.mapping[id] = fn + } + + return fn + }() + + b := builder{created: &cr} + b.iterate() + + return m +} + +// objectMethod returns the Function for a given method symbol. +// The symbol may be an instance of a generic function. It need not +// belong to an existing SSA package created by a call to +// prog.CreatePackage. +// +// objectMethod panics if the function is not a method. +// +// Acquires prog.objectMethodsMu. +func (prog *Program) objectMethod(obj *types.Func, cr *creator) *Function { + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil { + panic("not a method: " + obj.String()) } - prog.methodsMu.Unlock() - if m == nil { - return nil // abstract method (generic) + // Belongs to a created package? + if fn := prog.FuncValue(obj); fn != nil { + return fn } - for !b.done() { - b.buildCreated() - b.needsRuntimeTypes() + + // Instantiation of generic? + if originObj := typeparams.OriginMethod(obj); originObj != obj { + origin := prog.objectMethod(originObj, cr) + assert(origin.typeparams.Len() > 0, "origin is not generic") + targs := receiverTypeArgs(obj) + return origin.instance(targs, cr) } - return m + + // Consult/update cache of methods created from types.Func. + prog.objectMethodsMu.Lock() + defer prog.objectMethodsMu.Unlock() + fn, ok := prog.objectMethods[obj] + if !ok { + fn = createFunction(prog, obj, obj.Name(), nil, nil, "", cr) + fn.Synthetic = "from type information (on demand)" + + if prog.objectMethods == nil { + prog.objectMethods = make(map[*types.Func]*Function) + } + prog.objectMethods[obj] = fn + } + return fn } // LookupMethod returns the implementation of the method of type T // identified by (pkg, name). It returns nil if the method exists but -// is abstract, and panics if T has no such method. +// is an interface method or generic method, and panics if T has no such method. func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) if sel == nil { @@ -68,205 +139,139 @@ func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) // methodSet contains the (concrete) methods of a concrete type (non-interface, non-parameterized). type methodSet struct { - mapping map[string]*Function // populated lazily - complete bool // mapping contains all methods + mapping map[string]*Function // populated lazily } -// Precondition: T is a concrete type, e.g. !isInterface(T) and not parameterized. -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) createMethodSet(T types.Type) *methodSet { - if prog.mode&SanityCheckFunctions != 0 { - if types.IsInterface(T) || prog.parameterized.isParameterized(T) { - panic("type is interface or parameterized") - } - } - mset, ok := prog.methodSets.At(T).(*methodSet) - if !ok { - mset = &methodSet{mapping: make(map[string]*Function)} - prog.methodSets.Set(T, mset) - } - return mset -} - -// Adds any created functions to cr. -// Precondition: T is a concrete type, e.g. !isInterface(T) and not parameterized. -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) addMethod(mset *methodSet, sel *types.Selection, cr *creator) *Function { - if sel.Kind() == types.MethodExpr { - panic(sel) - } - id := sel.Obj().Id() - fn := mset.mapping[id] - if fn == nil { - sel := toSelection(sel) - obj := sel.obj.(*types.Func) - - needsPromotion := len(sel.index) > 1 - needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.recv) - if needsPromotion || needsIndirection { - fn = makeWrapper(prog, sel, cr) - } else { - fn = prog.originFunc(obj) - if fn.typeparams.Len() > 0 { // instantiate - targs := receiverTypeArgs(obj) - fn = prog.lookupOrCreateInstance(fn, targs, cr) - } - } - if fn.Signature.Recv() == nil { - panic(fn) // missing receiver - } - mset.mapping[id] = fn - } - return fn -} - -// RuntimeTypes returns a new unordered slice containing all -// concrete types in the program for which a complete (non-empty) -// method set is required at run-time. +// RuntimeTypes returns a new unordered slice containing all types in +// the program for which a runtime type is required. +// +// A runtime type is required for any non-parameterized, non-interface +// type that is converted to an interface, or for any type (including +// interface types) derivable from one through reflection. +// +// The methods of such types may be reachable through reflection or +// interface calls even if they are never called directly. // // Thread-safe. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// Acquires prog.runtimeTypesMu. func (prog *Program) RuntimeTypes() []types.Type { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - var res []types.Type - prog.methodSets.Iterate(func(T types.Type, v interface{}) { - if v.(*methodSet).complete { - res = append(res, T) - } - }) - return res -} - -// declaredFunc returns the concrete function/method denoted by obj. -// Panic ensues if there is none. -func (prog *Program) declaredFunc(obj *types.Func) *Function { - if v := prog.packageLevelMember(obj); v != nil { - return v.(*Function) - } - panic("no concrete method: " + obj.String()) + prog.runtimeTypesMu.Lock() + defer prog.runtimeTypesMu.Unlock() + return prog.runtimeTypes.Keys() } -// needMethodsOf ensures that runtime type information (including the -// complete method set) is available for the specified type T and all -// its subcomponents. -// -// needMethodsOf must be called for at least every type that is an -// operand of some MakeInterface instruction, and for the type of -// every exported package member. -// -// Adds any created functions to cr. -// -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// Precondition: T is not parameterized. +// forEachReachable calls f for type T and each type reachable from +// its type through reflection. // -// Thread-safe. (Called via Package.build from multiple builder goroutines.) +// The function f must use memoization to break cycles and +// return false when the type has already been visited. // -// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -func (prog *Program) needMethodsOf(T types.Type, cr *creator) { - prog.methodsMu.Lock() - prog.needMethods(T, false, cr) - prog.methodsMu.Unlock() -} - -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// Precondition: T is not parameterized. -// Recursive case: skip => don't create methods for T. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) needMethods(T types.Type, skip bool, cr *creator) { - // Each package maintains its own set of types it has visited. - if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { - // needMethods(T) was previously called - if !prevSkip || skip { - return // already seen, with same or false 'skip' value - } - } - prog.runtimeTypes.Set(T, skip) - - tmset := prog.MethodSets.MethodSet(T) - - if !skip && !types.IsInterface(T) && tmset.Len() > 0 { - // Create methods of T. - mset := prog.createMethodSet(T) - if !mset.complete { - mset.complete = true - n := tmset.Len() - for i := 0; i < n; i++ { - prog.addMethod(mset, tmset.At(i), cr) +// TODO(adonovan): publish in typeutil and share with go/callgraph/rta. +func forEachReachable(msets *typeutil.MethodSetCache, T types.Type, f func(types.Type) bool) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if !f(T) { + return } } - } - // Recursion over signatures of each method. - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) - prog.needMethods(sig.Params(), false, cr) - prog.needMethods(sig.Results(), false, cr) - } + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } - switch t := T.(type) { - case *types.Basic: - // nop + switch T := T.(type) { + case *aliases.Alias: + visit(aliases.Unalias(T), false) - case *types.Interface: - // nop---handled by recursion over method set. + case *types.Basic: + // nop - case *types.Pointer: - prog.needMethods(t.Elem(), false, cr) + case *types.Interface: + // nop---handled by recursion over method set. - case *types.Slice: - prog.needMethods(t.Elem(), false, cr) + case *types.Pointer: + visit(T.Elem(), false) - case *types.Chan: - prog.needMethods(t.Elem(), false, cr) + case *types.Slice: + visit(T.Elem(), false) - case *types.Map: - prog.needMethods(t.Key(), false, cr) - prog.needMethods(t.Elem(), false, cr) + case *types.Chan: + visit(T.Elem(), false) - case *types.Signature: - if t.Recv() != nil { - panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) - } - prog.needMethods(t.Params(), false, cr) - prog.needMethods(t.Results(), false, cr) - - case *types.Named: - // A pointer-to-named type can be derived from a named - // type via reflection. It may have methods too. - prog.needMethods(types.NewPointer(T), false, cr) - - // Consider 'type T struct{S}' where S has methods. - // Reflection provides no way to get from T to struct{S}, - // only to S, so the method set of struct{S} is unwanted, - // so set 'skip' flag during recursion. - prog.needMethods(t.Underlying(), true, cr) - - case *types.Array: - prog.needMethods(t.Elem(), false, cr) - - case *types.Struct: - for i, n := 0, t.NumFields(); i < n; i++ { - prog.needMethods(t.Field(i).Type(), false, cr) - } + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) - case *types.Tuple: - for i, n := 0, t.Len(); i < n; i++ { - prog.needMethods(t.At(i).Type(), false, cr) - } + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } - case *typeparams.TypeParam: - panic(T) // type parameters are always abstract. + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } - case *typeparams.Union: - // nop + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) - default: - panic(T) + default: + panic(T) + } } + visit(T, false) } diff --git a/vendor/golang.org/x/tools/go/ssa/parameterized.go b/vendor/golang.org/x/tools/go/ssa/parameterized.go index b11413c818..74c541107e 100644 --- a/vendor/golang.org/x/tools/go/ssa/parameterized.go +++ b/vendor/golang.org/x/tools/go/ssa/parameterized.go @@ -6,7 +6,9 @@ package ssa import ( "go/types" + "sync" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -14,11 +16,24 @@ import ( // // NOTE: Adapted from go/types/infer.go. If that is exported in a future release remove this copy. type tpWalker struct { + mu sync.Mutex seen map[types.Type]bool } -// isParameterized returns true when typ contains any type parameters. -func (w *tpWalker) isParameterized(typ types.Type) (res bool) { +// isParameterized reports whether t recursively contains a type parameter. +// Thread-safe. +func (w *tpWalker) isParameterized(t types.Type) bool { + // TODO(adonovan): profile. If this operation is expensive, + // handle the most common but shallow cases such as T, pkg.T, + // *T without consulting the cache under the lock. + + w.mu.Lock() + defer w.mu.Unlock() + return w.isParameterizedLocked(t) +} + +// Requires w.mu. +func (w *tpWalker) isParameterizedLocked(typ types.Type) (res bool) { // NOTE: Adapted from go/types/infer.go. Try to keep in sync. // detect cycles @@ -34,26 +49,29 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { case nil, *types.Basic: // TODO(gri) should nil be handled here? break + case *aliases.Alias: + return w.isParameterizedLocked(aliases.Unalias(t)) + case *types.Array: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Slice: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Struct: for i, n := 0, t.NumFields(); i < n; i++ { - if w.isParameterized(t.Field(i).Type()) { + if w.isParameterizedLocked(t.Field(i).Type()) { return true } } case *types.Pointer: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Tuple: n := t.Len() for i := 0; i < n; i++ { - if w.isParameterized(t.At(i).Type()) { + if w.isParameterizedLocked(t.At(i).Type()) { return true } } @@ -63,14 +81,14 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { // of a generic function type (or an interface method) that is // part of the type we're testing. We don't care about these type // parameters. - // Similarly, the receiver of a method may declare (rather then + // Similarly, the receiver of a method may declare (rather than // use) type parameters, we don't care about those either. // Thus, we only need to look at the input and result parameters. - return w.isParameterized(t.Params()) || w.isParameterized(t.Results()) + return w.isParameterizedLocked(t.Params()) || w.isParameterizedLocked(t.Results()) case *types.Interface: for i, n := 0, t.NumMethods(); i < n; i++ { - if w.isParameterized(t.Method(i).Type()) { + if w.isParameterizedLocked(t.Method(i).Type()) { return true } } @@ -79,30 +97,31 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { panic(err) } for _, term := range terms { - if w.isParameterized(term.Type()) { + if w.isParameterizedLocked(term.Type()) { return true } } case *types.Map: - return w.isParameterized(t.Key()) || w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Key()) || w.isParameterizedLocked(t.Elem()) case *types.Chan: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Named: - args := typeparams.NamedTypeArgs(t) + args := t.TypeArgs() // TODO(taking): this does not match go/types/infer.go. Check with rfindley. - if params := typeparams.ForNamed(t); params.Len() > args.Len() { + if params := t.TypeParams(); params.Len() > args.Len() { return true } for i, n := 0, args.Len(); i < n; i++ { - if w.isParameterized(args.At(i)) { + if w.isParameterizedLocked(args.At(i)) { return true } } + return w.isParameterizedLocked(t.Underlying()) // recurse for types local to parameterized functions - case *typeparams.TypeParam: + case *types.TypeParam: return true default: @@ -112,9 +131,13 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { return false } +// anyParameterized reports whether any element of ts is parameterized. +// Thread-safe. func (w *tpWalker) anyParameterized(ts []types.Type) bool { + w.mu.Lock() + defer w.mu.Unlock() for _, t := range ts { - if w.isParameterized(t) { + if w.isParameterizedLocked(t) { return true } } diff --git a/vendor/golang.org/x/tools/go/ssa/print.go b/vendor/golang.org/x/tools/go/ssa/print.go index 8b783196e4..38d8404fdc 100644 --- a/vendor/golang.org/x/tools/go/ssa/print.go +++ b/vendor/golang.org/x/tools/go/ssa/print.go @@ -51,7 +51,7 @@ func relType(t types.Type, from *types.Package) string { return s } -func relTerm(term *typeparams.Term, from *types.Package) string { +func relTerm(term *types.Term, from *types.Package) string { s := relType(term.Type(), from) if term.Tilde() { return "~" + s @@ -95,7 +95,7 @@ func (v *Alloc) String() string { op = "new" } from := v.Parent().relPkg() - return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) + return fmt.Sprintf("%s %s (%s)", op, relType(typeparams.MustDeref(v.Type()), from), v.Comment) } func (v *Phi) String() string { @@ -259,21 +259,19 @@ func (v *MakeChan) String() string { } func (v *FieldAddr) String() string { - st := typeparams.CoreType(deref(v.X.Type())).(*types.Struct) // Be robust against a bad index. name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() + if fld := fieldOf(typeparams.MustDeref(v.X.Type()), v.Field); fld != nil { + name = fld.Name() } return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) } func (v *Field) String() string { - st := typeparams.CoreType(v.X.Type()).(*types.Struct) // Be robust against a bad index. name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() + if fld := fieldOf(v.X.Type(), v.Field); fld != nil { + name = fld.Name() } return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) } @@ -452,7 +450,7 @@ func WritePackage(buf *bytes.Buffer, p *Package) { case *Global: fmt.Fprintf(buf, " var %-*s %s\n", - maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) + maxname, name, relType(typeparams.MustDeref(mem.Type()), from)) } } diff --git a/vendor/golang.org/x/tools/go/ssa/sanity.go b/vendor/golang.org/x/tools/go/ssa/sanity.go index 88ad374ded..13bd39fe86 100644 --- a/vendor/golang.org/x/tools/go/ssa/sanity.go +++ b/vendor/golang.org/x/tools/go/ssa/sanity.go @@ -8,6 +8,7 @@ package ssa // Currently it checks CFG invariants but little at the instruction level. import ( + "bytes" "fmt" "go/types" "io" @@ -131,6 +132,11 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *BinOp: case *Call: + if common := instr.Call; common.IsInvoke() { + if !types.IsInterface(common.Value.Type()) { + s.errorf("invoke on %s (%s) which is not an interface type (or type param)", common.Value, common.Value.Type()) + } + } case *ChangeInterface: case *ChangeType: case *SliceToArrayPointer: @@ -343,7 +349,7 @@ func (s *sanity) checkBlock(b *BasicBlock, index int) { // Check that "untyped" types only appear on constant operands. if _, ok := (*op).(*Const); !ok { - if basic, ok := (*op).Type().(*types.Basic); ok { + if basic, ok := (*op).Type().Underlying().(*types.Basic); ok { if basic.Info()&types.IsUntyped != 0 { s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) } @@ -412,14 +418,17 @@ func (s *sanity) checkFunction(fn *Function) bool { s.errorf("nil Prog") } + var buf bytes.Buffer _ = fn.String() // must not crash _ = fn.RelString(fn.relPkg()) // must not crash + WriteFunction(&buf, fn) // must not crash // All functions have a package, except delegates (which are // shared across packages, or duplicated as weak symbols in a // separate-compilation model), and error.Error. if fn.Pkg == nil { - if strings.HasPrefix(fn.Synthetic, "wrapper ") || + if strings.HasPrefix(fn.Synthetic, "from type information (on demand)") || + strings.HasPrefix(fn.Synthetic, "wrapper ") || strings.HasPrefix(fn.Synthetic, "bound ") || strings.HasPrefix(fn.Synthetic, "thunk ") || strings.HasSuffix(fn.name, "Error") || diff --git a/vendor/golang.org/x/tools/go/ssa/source.go b/vendor/golang.org/x/tools/go/ssa/source.go index b9a08363ec..6700305bd9 100644 --- a/vendor/golang.org/x/tools/go/ssa/source.go +++ b/vendor/golang.org/x/tools/go/ssa/source.go @@ -121,7 +121,9 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // Don't call Program.Method: avoid creating wrappers. obj := mset.At(i).Obj().(*types.Func) if obj.Pos() == pos { - return pkg.objects[obj].(*Function) + // obj from MethodSet may not be the origin type. + m := typeparams.OriginMethod(obj) + return pkg.objects[m].(*Function) } } } @@ -170,16 +172,19 @@ func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { // --- Lookup functions for source-level named entities (types.Objects) --- // Package returns the SSA Package corresponding to the specified -// type-checker package object. -// It returns nil if no such SSA package has been created. -func (prog *Program) Package(obj *types.Package) *Package { - return prog.packages[obj] +// type-checker package. It returns nil if no such Package was +// created by a prior call to prog.CreatePackage. +func (prog *Program) Package(pkg *types.Package) *Package { + return prog.packages[pkg] } -// packageLevelMember returns the package-level member corresponding to -// the specified named object, which may be a package-level const -// (*NamedConst), var (*Global) or func (*Function) of some package in -// prog. It returns nil if the object is not found. +// packageLevelMember returns the package-level member corresponding +// to the specified symbol, which may be a package-level const +// (*NamedConst), var (*Global) or func/method (*Function) of some +// package in prog. +// +// It returns nil if the object belongs to a package that has not been +// created by prog.CreatePackage. func (prog *Program) packageLevelMember(obj types.Object) Member { if pkg, ok := prog.packages[obj.Pkg()]; ok { return pkg.objects[obj] @@ -187,24 +192,16 @@ func (prog *Program) packageLevelMember(obj types.Object) Member { return nil } -// originFunc returns the package-level generic function that is the -// origin of obj. If returns nil if the generic function is not found. -func (prog *Program) originFunc(obj *types.Func) *Function { - return prog.declaredFunc(typeparams.OriginMethod(obj)) -} - -// FuncValue returns the concrete Function denoted by the source-level -// named function obj, or nil if obj denotes an interface method. -// -// TODO(adonovan): check the invariant that obj.Type() matches the -// result's Signature, both in the params/results and in the receiver. +// FuncValue returns the SSA function or (non-interface) method +// denoted by the specified func symbol. It returns nil id the symbol +// denotes an interface method, or belongs to a package that was not +// created by prog.CreatePackage. func (prog *Program) FuncValue(obj *types.Func) *Function { fn, _ := prog.packageLevelMember(obj).(*Function) return fn } -// ConstValue returns the SSA Value denoted by the source-level named -// constant obj. +// ConstValue returns the SSA constant denoted by the specified const symbol. func (prog *Program) ConstValue(obj *types.Const) *Const { // TODO(adonovan): opt: share (don't reallocate) // Consts for const objects and constant ast.Exprs. @@ -221,7 +218,7 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { } // VarValue returns the SSA Value that corresponds to a specific -// identifier denoting the source-level named variable obj. +// identifier denoting the specified var symbol. // // VarValue returns nil if a local variable was not found, perhaps // because its package was not built, the debug information was not diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go index 5904b817b3..30bf4bc677 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssa.go +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -23,20 +23,25 @@ import ( type Program struct { Fset *token.FileSet // position information for the files of this Program imported map[string]*Package // all importable Packages, keyed by import path - packages map[*types.Package]*Package // all loaded Packages, keyed by object + packages map[*types.Package]*Package // all created Packages mode BuilderMode // set of mode bits for SSA construction MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets - canon *canonizer // type canonicalization map - ctxt *typeparams.Context // cache for type checking instantiations + canon *canonizer // type canonicalization map + ctxt *types.Context // cache for type checking instantiations - methodsMu sync.Mutex // guards the following maps: - methodSets typeutil.Map // maps type to its concrete methodSet - runtimeTypes typeutil.Map // types for which rtypes are needed - bounds map[boundsKey]*Function // bounds for curried x.Method closures - thunks map[selectionKey]*Function // thunks for T.Method expressions - instances map[*Function]*instanceSet // instances of generic functions - parameterized tpWalker // determines whether a type is parameterized. + methodsMu sync.Mutex + methodSets typeutil.Map // maps type to its concrete *methodSet + + parameterized tpWalker // memoization of whether a type refers to type parameters + + runtimeTypesMu sync.Mutex + runtimeTypes typeutil.Map // set of runtime types (from MakeInterface) + + // objectMethods is a memoization of objectMethod + // to avoid creation of duplicate methods from type information. + objectMethodsMu sync.Mutex + objectMethods map[*types.Func]*Function } // A Package is a single analyzed Go package containing Members for @@ -51,17 +56,19 @@ type Package struct { Prog *Program // the owning program Pkg *types.Package // the corresponding go/types.Package Members map[string]Member // all package members keyed by name (incl. init and init#%d) - objects map[types.Object]Member // mapping of package objects to members (incl. methods). Contains *NamedConst, *Global, *Function. + objects map[types.Object]Member // mapping of package objects to members (incl. methods). Contains *NamedConst, *Global, *Function (values but not types) init *Function // Func("init"); the package's init function debug bool // include full debug info in this package + syntax bool // package was loaded from syntax // The following fields are set transiently, then cleared // after building. - buildOnce sync.Once // ensures package building occurs once - ninit int32 // number of init functions - info *types.Info // package type information - files []*ast.File // package ASTs - created creator // members created as a result of building this package (includes declared functions, wrappers) + buildOnce sync.Once // ensures package building occurs once + ninit int32 // number of init functions + info *types.Info // package type information + files []*ast.File // package ASTs + created creator // members created as a result of building this package (includes declared functions, wrappers) + initVersion map[ast.Expr]string // goversion to use for each global var init expr } // A Member is a member of a Go package, implemented by *NamedConst, @@ -258,8 +265,8 @@ type Node interface { // or method. // // If Blocks is nil, this indicates an external function for which no -// Go source code is available. In this case, FreeVars and Locals -// are nil too. Clients performing whole-program analysis must +// Go source code is available. In this case, FreeVars, Locals, and +// Params are nil too. Clients performing whole-program analysis must // handle external functions specially. // // Blocks contains the function's control-flow graph (CFG). @@ -296,8 +303,8 @@ type Node interface { // // A generic function is a function or method that has uninstantiated type // parameters (TypeParams() != nil). Consider a hypothetical generic -// method, (*Map[K,V]).Get. It may be instantiated with all ground -// (non-parameterized) types as (*Map[string,int]).Get or with +// method, (*Map[K,V]).Get. It may be instantiated with all +// non-parameterized types as (*Map[string,int]).Get or with // parameterized types as (*Map[string,U]).Get, where U is a type parameter. // In both instantiations, Origin() refers to the instantiated generic // method, (*Map[K,V]).Get, TypeParams() refers to the parameters [K,V] of @@ -305,39 +312,45 @@ type Node interface { // respectively, and is nil in the generic method. type Function struct { name string - object types.Object // a declared *types.Func or one of its wrappers - method *selection // info about provenance of synthetic methods; thunk => non-nil + object *types.Func // symbol for declared function (nil for FuncLit or synthetic init) + method *selection // info about provenance of synthetic methods; thunk => non-nil Signature *types.Signature pos token.Pos - Synthetic string // provenance of synthetic function; "" for true source functions - syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode - parent *Function // enclosing function if anon; nil if global - Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) - Prog *Program // enclosing program + // source information + Synthetic string // provenance of synthetic function; "" for true source functions + syntax ast.Node // *ast.Func{Decl,Lit}, if from syntax (incl. generic instances) + info *types.Info // type annotations (iff syntax != nil) + goversion string // Go version of syntax (NB: init is special) + + build buildFunc // algorithm to build function body (nil => built) + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + + // These fields are populated only when the function body is built: + Params []*Parameter // function parameters; for methods, includes receiver FreeVars []*FreeVar // free variables whose values must be supplied by closure - Locals []*Alloc // local variables of this function + Locals []*Alloc // frame-allocated variables of this function Blocks []*BasicBlock // basic blocks of the function; nil => external Recover *BasicBlock // optional; control transfers here after recovered panic AnonFuncs []*Function // anonymous functions directly beneath this one referrers []Instruction // referring instructions (iff Parent() != nil) - built bool // function has completed both CREATE and BUILD phase. anonIdx int32 // position of a nested function in parent's AnonFuncs. fn.Parent()!=nil => fn.Parent().AnonFunc[fn.anonIdx] == fn. - typeparams *typeparams.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function - typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function - topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil. + typeparams *types.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function + typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function + topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil. + generic *generic // instances of this function, if generic - // The following fields are set transiently during building, - // then cleared. + // The following fields are cleared after building. currentBlock *BasicBlock // where to emit code - objects map[types.Object]Value // addresses of local variables + vars map[*types.Var]Value // addresses of local variables namedResults []*Alloc // tuple of named results targets *targets // linked stack of branch targets - lblocks map[types.Object]*lblock // labelled blocks - info *types.Info // *types.Info to build from. nil for wrappers. - subst *subster // non-nil => expand generic body using this type substitution of ground types + lblocks map[*types.Label]*lblock // labelled blocks + subst *subster // type parameter substitutions (if non-nil) } // BasicBlock represents an SSA basic block. @@ -402,9 +415,8 @@ type FreeVar struct { // A Parameter represents an input parameter of a function. type Parameter struct { name string - object types.Object // a *types.Var; nil for non-source locals + object *types.Var // non-nil typ types.Type - pos token.Pos parent *Function referrers []Instruction } @@ -482,15 +494,12 @@ type Builtin struct { // type of the allocated variable is actually // Type().Underlying().(*types.Pointer).Elem(). // -// If Heap is false, Alloc allocates space in the function's -// activation record (frame); we refer to an Alloc(Heap=false) as a -// "local" alloc. Each local Alloc returns the same address each time -// it is executed within the same activation; the space is -// re-initialized to zero. +// If Heap is false, Alloc zero-initializes the same local variable in +// the call frame and returns its address; in this case the Alloc must +// be present in Function.Locals. We call this a "local" alloc. // -// If Heap is true, Alloc allocates space in the heap; we -// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc -// returns a different address each time it is executed. +// If Heap is true, Alloc allocates a new zero-initialized variable +// each time the instruction is executed. We call this a "new" alloc. // // When Alloc is applied to a channel, map or slice type, it returns // the address of an uninitialized (nil) reference of that kind; store @@ -681,8 +690,8 @@ type Convert struct { type MultiConvert struct { register X Value - from []*typeparams.Term - to []*typeparams.Term + from []*types.Term + to []*types.Term } // ChangeInterface constructs a value of one interface type from a @@ -865,7 +874,7 @@ type Slice struct { type FieldAddr struct { register X Value // *struct - Field int // field is typeparams.CoreType(X.Type().Underlying().(*types.Pointer).Elem()).(*types.Struct).Field(Field) + Field int // index into CoreType(CoreType(X.Type()).(*types.Pointer).Elem()).(*types.Struct).Fields } // The Field instruction yields the Field of struct X. @@ -884,7 +893,7 @@ type FieldAddr struct { type Field struct { register X Value // struct - Field int // index into typeparams.CoreType(X.Type()).(*types.Struct).Fields + Field int // index into CoreType(X.Type()).(*types.Struct).Fields } // The IndexAddr instruction yields the address of the element at @@ -1068,11 +1077,12 @@ type Next struct { // Type() reflects the actual type of the result, possibly a // 2-types.Tuple; AssertedType is the asserted type. // -// Pos() returns the ast.CallExpr.Lparen if the instruction arose from -// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the -// instruction arose from an explicit e.(T) operation; or the -// ast.CaseClause.Case if the instruction arose from a case of a -// type-switch statement. +// Depending on the TypeAssert's purpose, Pos may return: +// - the ast.CallExpr.Lparen of an explicit T(e) conversion; +// - the ast.TypeAssertExpr.Lparen of an explicit e.(T) operation; +// - the ast.CaseClause.Case of a case of a type-switch statement; +// - the Ident(m).NamePos of an interface method value i.m +// (for which TypeAssert may be used to effect the nil check). // // Example printed form: // @@ -1390,7 +1400,7 @@ type anInstruction struct { // represents a dynamically dispatched call to an interface method. // In this mode, Value is the interface value and Method is the // interface's abstract method. The interface value may be a type -// parameter. Note: an abstract method may be shared by multiple +// parameter. Note: an interface method may be shared by multiple // interfaces due to embedding; Value.Type() provides the specific // interface used for this call. // @@ -1408,7 +1418,7 @@ type anInstruction struct { // the last element of Args is a slice. type CallCommon struct { Value Value // receiver (invoke mode) or func value (call mode) - Method *types.Func // abstract method (invoke mode) + Method *types.Func // interface method (invoke mode) Args []Value // actual parameters (in static method call, includes receiver) pos token.Pos // position of CallExpr.Lparen, iff explicit in source } @@ -1507,14 +1517,19 @@ func (v *Global) String() string { return v.RelString(nil) func (v *Global) Package() *Package { return v.Pkg } func (v *Global) RelString(from *types.Package) string { return relString(v, from) } -func (v *Function) Name() string { return v.name } -func (v *Function) Type() types.Type { return v.Signature } -func (v *Function) Pos() token.Pos { return v.pos } -func (v *Function) Token() token.Token { return token.FUNC } -func (v *Function) Object() types.Object { return v.object } -func (v *Function) String() string { return v.RelString(nil) } -func (v *Function) Package() *Package { return v.Pkg } -func (v *Function) Parent() *Function { return v.parent } +func (v *Function) Name() string { return v.name } +func (v *Function) Type() types.Type { return v.Signature } +func (v *Function) Pos() token.Pos { return v.pos } +func (v *Function) Token() token.Token { return token.FUNC } +func (v *Function) Object() types.Object { + if v.object != nil { + return types.Object(v.object) + } + return nil +} +func (v *Function) String() string { return v.RelString(nil) } +func (v *Function) Package() *Package { return v.Pkg } +func (v *Function) Parent() *Function { return v.parent } func (v *Function) Referrers() *[]Instruction { if v.parent != nil { return &v.referrers @@ -1524,10 +1539,7 @@ func (v *Function) Referrers() *[]Instruction { // TypeParams are the function's type parameters if generic or the // type parameters that were instantiated if fn is an instantiation. -// -// TODO(taking): declare result type as *types.TypeParamList -// after we drop support for go1.17. -func (fn *Function) TypeParams() *typeparams.TypeParamList { +func (fn *Function) TypeParams() *types.TypeParamList { return fn.typeparams } @@ -1535,12 +1547,25 @@ func (fn *Function) TypeParams() *typeparams.TypeParamList { // from fn.Origin(). func (fn *Function) TypeArgs() []types.Type { return fn.typeargs } -// Origin is the function fn is an instantiation of. Returns nil if fn is not -// an instantiation. +// Origin returns the generic function from which fn was instantiated, +// or nil if fn is not an instantiation. func (fn *Function) Origin() *Function { if fn.parent != nil && len(fn.typeargs) > 0 { - // Nested functions are BUILT at a different time than there instances. - return fn.parent.Origin().AnonFuncs[fn.anonIdx] + // Nested functions are BUILT at a different time than their instances. + // Build declared package if not yet BUILT. This is not an expected use + // case, but is simple and robust. + fn.declaredPackage().Build() + } + return origin(fn) +} + +// origin is the function that fn is an instantiation of. Returns nil if fn is +// not an instantiation. +// +// Precondition: fn and the origin function are done building. +func origin(fn *Function) *Function { + if fn.parent != nil && len(fn.typeargs) > 0 { + return origin(fn.parent).AnonFuncs[fn.anonIdx] } return fn.topLevelOrigin } @@ -1549,7 +1574,7 @@ func (v *Parameter) Type() types.Type { return v.typ } func (v *Parameter) Name() string { return v.name } func (v *Parameter) Object() types.Object { return v.object } func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } -func (v *Parameter) Pos() token.Pos { return v.pos } +func (v *Parameter) Pos() token.Pos { return v.object.Pos() } func (v *Parameter) Parent() *Function { return v.parent } func (v *Alloc) Type() types.Type { return v.typ } diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go index 96d69a20a1..3daa67a07e 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go @@ -14,14 +14,14 @@ import ( "golang.org/x/tools/go/loader" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/ssa" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) // Packages creates an SSA program for a set of packages. // // The packages must have been loaded from source syntax using the -// golang.org/x/tools/go/packages.Load function in LoadSyntax or -// LoadAllSyntax mode. +// [packages.Load] function in [packages.LoadSyntax] or +// [packages.LoadAllSyntax] mode. // // Packages creates an SSA package for each well-typed package in the // initial list, plus all their dependencies. The resulting list of @@ -29,12 +29,30 @@ import ( // a nil if SSA code could not be constructed for the corresponding initial // package due to type errors. // -// Code for bodies of functions is not built until Build is called on -// the resulting Program. SSA code is constructed only for the initial -// packages with well-typed syntax trees. +// Code for bodies of functions is not built until [Program.Build] is +// called on the resulting Program. SSA code is constructed only for +// the initial packages with well-typed syntax trees. // // The mode parameter controls diagnostics and checking during SSA construction. func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + // TODO(adonovan): opt: this calls CreatePackage far more than + // necessary: for all dependencies, not just the (non-initial) + // direct dependencies of the initial packages. + // + // But can it reasonably be changed without breaking the + // spirit and/or letter of the law above? Clients may notice + // if we call CreatePackage less, as methods like + // Program.FuncValue will return nil. Or must we provide a new + // function (and perhaps deprecate this one)? Is it worth it? + // + // Tim King makes the interesting point that it would be + // possible to entirely alleviate the client from the burden + // of calling CreatePackage for non-syntax packages, if we + // were to treat vars and funcs lazily in the same way we now + // treat methods. (In essence, try to move away from the + // notion of ssa.Packages, and make the Program answer + // all reasonable questions about any types.Object.) + return doPackages(initial, mode, false) } @@ -42,7 +60,7 @@ func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, // their dependencies. // // The packages must have been loaded from source syntax using the -// golang.org/x/tools/go/packages.Load function in LoadAllSyntax mode. +// [packages.Load] function in [packages.LoadAllSyntax] mode. // // AllPackages creates an SSA package for each well-typed package in the // initial list, plus all their dependencies. The resulting list of @@ -102,7 +120,7 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (* // // The mode parameter controls diagnostics and checking during SSA construction. // -// Deprecated: Use golang.org/x/tools/go/packages and the Packages +// Deprecated: Use [golang.org/x/tools/go/packages] and the [Packages] // function instead; see ssa.Example_loadPackages. func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { prog := ssa.NewProgram(lprog.Fset, mode) @@ -116,16 +134,17 @@ func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { return prog } -// BuildPackage builds an SSA program with IR for a single package. +// BuildPackage builds an SSA program with SSA intermediate +// representation (IR) for all functions of a single package. // -// It populates pkg by type-checking the specified file ASTs. All +// It populates pkg by type-checking the specified file syntax trees. All // dependencies are loaded using the importer specified by tc, which // typically loads compiler export data; SSA code cannot be built for -// those packages. BuildPackage then constructs an ssa.Program with all +// those packages. BuildPackage then constructs an [ssa.Program] with all // dependency packages created, and builds and returns the SSA package // corresponding to pkg. // -// The caller must have set pkg.Path() to the import path. +// The caller must have set pkg.Path to the import path. // // The operation fails if there were any type-checking or import errors. // @@ -143,10 +162,11 @@ func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, fil Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(info) + versions.InitFileVersions(info) if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { return nil, nil, err } @@ -168,6 +188,25 @@ func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, fil } createAll(pkg.Imports()) + // TODO(adonovan): we could replace createAll with just: + // + // // Create SSA packages for all imports. + // for _, p := range pkg.Imports() { + // prog.CreatePackage(p, nil, nil, true) + // } + // + // (with minor changes to changes to ../builder_test.go as + // shown in CL 511715 PS 10.) But this would strictly violate + // the letter of the doc comment above, which says "all + // dependencies created". + // + // Tim makes the good point with some extra work we could + // remove the need for any CreatePackage calls except the + // ones with syntax (i.e. primary packages). Of course + // You wouldn't have ssa.Packages and Members for as + // many things but no-one really uses that anyway. + // I wish I had done this from the outset. + // Create and build the primary package. ssapkg := prog.CreatePackage(pkg, files, info, false) ssapkg.Build() diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go index 5f27050b02..b4feb42cb3 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go @@ -4,7 +4,14 @@ package ssautil // import "golang.org/x/tools/go/ssa/ssautil" -import "golang.org/x/tools/go/ssa" +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ssa" + + _ "unsafe" // for linkname hack +) // This file defines utilities for visiting the SSA representation of // a Program. @@ -18,50 +25,113 @@ import "golang.org/x/tools/go/ssa" // synthetic wrappers. // // Precondition: all packages are built. +// +// TODO(adonovan): this function is underspecified. It doesn't +// actually work like a linker, which computes reachability from main +// using something like go/callgraph/cha (without materializing the +// call graph). In fact, it treats all public functions and all +// methods of public non-parameterized types as roots, even though +// they may be unreachable--but only in packages created from syntax. +// +// I think we should deprecate AllFunctions function in favor of two +// clearly defined ones: +// +// 1. The first would efficiently compute CHA reachability from a set +// of main packages, making it suitable for a whole-program +// analysis context with InstantiateGenerics, in conjunction with +// Program.Build. +// +// 2. The second would return only the set of functions corresponding +// to source Func{Decl,Lit} syntax, like SrcFunctions in +// go/analysis/passes/buildssa; this is suitable for +// package-at-a-time (or handful of packages) context. +// ssa.Package could easily expose it as a field. +// +// We could add them unexported for now and use them via the linkname hack. func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { - visit := visitor{ - prog: prog, - seen: make(map[*ssa.Function]bool), + seen := make(map[*ssa.Function]bool) + + var function func(fn *ssa.Function) + function = func(fn *ssa.Function) { + if !seen[fn] { + seen[fn] = true + var buf [10]*ssa.Value // avoid alloc in common case + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ssa.Function); ok { + function(fn) + } + } + } + } + } } - visit.program() - return visit.seen -} -type visitor struct { - prog *ssa.Program - seen map[*ssa.Function]bool -} + // TODO(adonovan): opt: provide a way to share a builder + // across a sequence of MethodValue calls. -func (visit *visitor) program() { - for _, pkg := range visit.prog.AllPackages() { - for _, mem := range pkg.Members { - if fn, ok := mem.(*ssa.Function); ok { - visit.function(fn) + methodsOf := func(T types.Type) { + if !types.IsInterface(T) { + mset := prog.MethodSets.MethodSet(T) + for i := 0; i < mset.Len(); i++ { + function(prog.MethodValue(mset.At(i))) } } } - for _, T := range visit.prog.RuntimeTypes() { - mset := visit.prog.MethodSets.MethodSet(T) - for i, n := 0, mset.Len(); i < n; i++ { - visit.function(visit.prog.MethodValue(mset.At(i))) + + // Historically, Program.RuntimeTypes used to include the type + // of any exported member of a package loaded from syntax that + // has a non-parameterized type, plus all types + // reachable from that type using reflection, even though + // these runtime types may not be required for them. + // + // Rather than break existing programs that rely on + // AllFunctions visiting extra methods that are unreferenced + // by IR and unreachable via reflection, we moved the logic + // here, unprincipled though it is. + // (See doc comment for better ideas.) + // + // Nonetheless, after the move, we no longer visit every + // method of any type recursively reachable from T, only the + // methods of T and *T themselves, and we only apply this to + // named types T, and not to the type of every exported + // package member. + exportedTypeHack := func(t *ssa.Type) { + if isSyntactic(t.Package()) && + ast.IsExported(t.Name()) && + !types.IsInterface(t.Type()) { + // Consider only named types. + // (Ignore aliases and unsafe.Pointer.) + if named, ok := t.Type().(*types.Named); ok { + if named.TypeParams() == nil { + methodsOf(named) // T + methodsOf(types.NewPointer(named)) // *T + } + } } } -} -func (visit *visitor) function(fn *ssa.Function) { - if !visit.seen[fn] { - visit.seen[fn] = true - var buf [10]*ssa.Value // avoid alloc in common case - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - for _, op := range instr.Operands(buf[:0]) { - if fn, ok := (*op).(*ssa.Function); ok { - visit.function(fn) - } - } + for _, pkg := range prog.AllPackages() { + for _, mem := range pkg.Members { + switch mem := mem.(type) { + case *ssa.Function: + // Visit all package-level declared functions. + function(mem) + + case *ssa.Type: + exportedTypeHack(mem) } } } + + // Visit all methods of types for which runtime types were + // materialized, as they are reachable through reflection. + for _, T := range prog.RuntimeTypes() { + methodsOf(T) + } + + return seen } // MainPackages returns the subset of the specified packages @@ -76,3 +146,12 @@ func MainPackages(pkgs []*ssa.Package) []*ssa.Package { } return mains } + +// TODO(adonovan): propose a principled API for this. One possibility +// is a new field, Package.SrcFunctions []*Function, which would +// contain the list of SrcFunctions described in point 2 of the +// AllFunctions doc comment, or nil if the package is not from syntax. +// But perhaps overloading nil vs empty slice is too subtle. +// +//go:linkname isSyntactic golang.org/x/tools/go/ssa.isSyntactic +func isSyntactic(pkg *ssa.Package) bool diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go index d7f8ae4a70..9f2f2f3000 100644 --- a/vendor/golang.org/x/tools/go/ssa/subst.go +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -5,10 +5,9 @@ package ssa import ( - "fmt" "go/types" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/aliases" ) // Type substituter for a fixed set of replacement types. @@ -19,41 +18,42 @@ import ( // // Not concurrency-safe. type subster struct { + replacements map[*types.TypeParam]types.Type // values should contain no type params + cache map[types.Type]types.Type // cache of subst results + ctxt *types.Context // cache for instantiation + scope *types.Scope // *types.Named declared within this scope can be substituted (optional) + debug bool // perform extra debugging checks + // TODO(taking): consider adding Pos // TODO(zpavlinovic): replacements can contain type params // when generating instances inside of a generic function body. - replacements map[*typeparams.TypeParam]types.Type // values should contain no type params - cache map[types.Type]types.Type // cache of subst results - ctxt *typeparams.Context - debug bool // perform extra debugging checks - // TODO(taking): consider adding Pos } // Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. // targs should not contain any types in tparams. -func makeSubster(ctxt *typeparams.Context, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster { +// scope is the (optional) lexical block of the generic function for which we are substituting. +func makeSubster(ctxt *types.Context, scope *types.Scope, tparams *types.TypeParamList, targs []types.Type, debug bool) *subster { assert(tparams.Len() == len(targs), "makeSubster argument count must match") subst := &subster{ - replacements: make(map[*typeparams.TypeParam]types.Type, tparams.Len()), + replacements: make(map[*types.TypeParam]types.Type, tparams.Len()), cache: make(map[types.Type]types.Type), ctxt: ctxt, + scope: scope, debug: debug, } for i := 0; i < tparams.Len(); i++ { subst.replacements[tparams.At(i)] = targs[i] } if subst.debug { - if err := subst.wellFormed(); err != nil { - panic(err) - } + subst.wellFormed() } return subst } -// wellFormed returns an error if subst was not properly initialized. -func (subst *subster) wellFormed() error { - if subst == nil || len(subst.replacements) == 0 { - return nil +// wellFormed asserts that subst was properly initialized. +func (subst *subster) wellFormed() { + if subst == nil { + return } // Check that all of the type params do not appear in the arguments. s := make(map[types.Type]bool, len(subst.replacements)) @@ -62,10 +62,9 @@ func (subst *subster) wellFormed() error { } for _, r := range subst.replacements { if reaches(r, s) { - return fmt.Errorf("\n‰r %s s %v replacements %v\n", r, s, subst.replacements) + panic(subst) } } - return nil } // typ returns the type of t with the type parameter tparams[i] substituted @@ -83,7 +82,10 @@ func (subst *subster) typ(t types.Type) (res types.Type) { // fall through if result r will be identical to t, types.Identical(r, t). switch t := t.(type) { - case *typeparams.TypeParam: + case *aliases.Alias: + return subst.typ(aliases.Unalias(t)) + + case *types.TypeParam: r := subst.replacements[t] assert(r != nil, "type param without replacement encountered") return r @@ -132,7 +134,7 @@ func (subst *subster) typ(t types.Type) (res types.Type) { case *types.Signature: return subst.signature(t) - case *typeparams.Union: + case *types.Union: return subst.union(t) case *types.Interface: @@ -221,25 +223,25 @@ func (subst *subster) var_(v *types.Var) *types.Var { return v } -func (subst *subster) union(u *typeparams.Union) *typeparams.Union { - var out []*typeparams.Term // nil => no updates +func (subst *subster) union(u *types.Union) *types.Union { + var out []*types.Term // nil => no updates for i, n := 0, u.Len(); i < n; i++ { t := u.Term(i) r := subst.typ(t.Type()) if r != t.Type() && out == nil { - out = make([]*typeparams.Term, n) + out = make([]*types.Term, n) for j := 0; j < i; j++ { out[j] = u.Term(j) } } if out != nil { - out[i] = typeparams.NewTerm(t.Tilde(), r) + out[i] = types.NewTerm(t.Tilde(), r) } } if out != nil { - return typeparams.NewUnion(out) + return types.NewUnion(out) } return u } @@ -250,7 +252,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { } // methods for the interface. Initially nil if there is no known change needed. - // Signatures for the method where recv is nil. NewInterfaceType fills in the recievers. + // Signatures for the method where recv is nil. NewInterfaceType fills in the receivers. var methods []*types.Func initMethods := func(n int) { // copy first n explicit methods methods = make([]*types.Func, iface.NumExplicitMethods()) @@ -263,7 +265,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { for i := 0; i < iface.NumExplicitMethods(); i++ { f := iface.ExplicitMethod(i) // On interfaces, we need to cycle break on anonymous interface types - // being in a cycle with their signatures being in cycles with their recievers + // being in a cycle with their signatures being in cycles with their receivers // that do not go through a Named. norecv := changeRecv(f.Type().(*types.Signature), nil) sig := subst.typ(norecv) @@ -306,29 +308,56 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { } func (subst *subster) named(t *types.Named) types.Type { - // A name type may be: - // (1) ordinary (no type parameters, no type arguments), - // (2) generic (type parameters but no type arguments), or - // (3) instantiated (type parameters and type arguments). - tparams := typeparams.ForNamed(t) + // A named type may be: + // (1) ordinary named type (non-local scope, no type parameters, no type arguments), + // (2) locally scoped type, + // (3) generic (type parameters but no type arguments), or + // (4) instantiated (type parameters and type arguments). + tparams := t.TypeParams() if tparams.Len() == 0 { - // case (1) ordinary + if subst.scope != nil && !subst.scope.Contains(t.Obj().Pos()) { + // Outside the current function scope? + return t // case (1) ordinary + } - // Note: If Go allows for local type declarations in generic - // functions we may need to descend into underlying as well. - return t + // case (2) locally scoped type. + // Create a new named type to represent this instantiation. + // We assume that local types of distinct instantiations of a + // generic function are distinct, even if they don't refer to + // type parameters, but the spec is unclear; see golang/go#58573. + // + // Subtle: We short circuit substitution and use a newly created type in + // subst, i.e. cache[t]=n, to pre-emptively replace t with n in recursive + // types during traversal. This both breaks infinite cycles and allows for + // constructing types with the replacement applied in subst.typ(under). + // + // Example: + // func foo[T any]() { + // type linkedlist struct { + // next *linkedlist + // val T + // } + // } + // + // When the field `next *linkedlist` is visited during subst.typ(under), + // we want the substituted type for the field `next` to be `*n`. + n := types.NewNamed(t.Obj(), nil, nil) + subst.cache[t] = n + subst.cache[n] = n + n.SetUnderlying(subst.typ(t.Underlying())) + return n } - targs := typeparams.NamedTypeArgs(t) + targs := t.TypeArgs() // insts are arguments to instantiate using. insts := make([]types.Type, tparams.Len()) - // case (2) generic ==> targs.Len() == 0 + // case (3) generic ==> targs.Len() == 0 // Instantiating a generic with no type arguments should be unreachable. // Please report a bug if you encounter this. assert(targs.Len() != 0, "substition into a generic Named type is currently unsupported") - // case (3) instantiated. + // case (4) instantiated. // Substitute into the type arguments and instantiate the replacements/ // Example: // type N[A any] func() A @@ -341,13 +370,13 @@ func (subst *subster) named(t *types.Named) types.Type { inst := subst.typ(targs.At(i)) // TODO(generic): Check with rfindley for mutual recursion insts[i] = inst } - r, err := typeparams.Instantiate(subst.ctxt, typeparams.NamedTypeOrigin(t), insts, false) + r, err := types.Instantiate(subst.ctxt, t.Origin(), insts, false) assert(err == nil, "failed to Instantiate Named type") return r } func (subst *subster) signature(t *types.Signature) types.Type { - tparams := typeparams.ForSignature(t) + tparams := t.TypeParams() // We are choosing not to support tparams.Len() > 0 until a need has been observed in practice. // @@ -362,7 +391,7 @@ func (subst *subster) signature(t *types.Signature) types.Type { // no type params to substitute // (2)generic method and recv needs to be substituted. - // Recievers can be either: + // Receivers can be either: // named // pointer to named // interface @@ -372,25 +401,32 @@ func (subst *subster) signature(t *types.Signature) types.Type { params := subst.tuple(t.Params()) results := subst.tuple(t.Results()) if recv != t.Recv() || params != t.Params() || results != t.Results() { - return typeparams.NewSignatureType(recv, nil, nil, params, results, t.Variadic()) + return types.NewSignatureType(recv, nil, nil, params, results, t.Variadic()) } return t } // reaches returns true if a type t reaches any type t' s.t. c[t'] == true. -// Updates c to cache results. +// It updates c to cache results. +// +// reaches is currently only part of the wellFormed debug logic, and +// in practice c is initially only type parameters. It is not currently +// relied on in production. func reaches(t types.Type, c map[types.Type]bool) (res bool) { if c, ok := c[t]; ok { return c } - c[t] = false // prevent cycles + + // c is populated with temporary false entries as types are visited. + // This avoids repeat visits and break cycles. + c[t] = false defer func() { c[t] = res }() switch t := t.(type) { - case *typeparams.TypeParam, *types.Basic: - // no-op => c == false + case *types.TypeParam, *types.Basic: + return false case *types.Array: return reaches(t.Elem(), c) case *types.Slice: @@ -418,7 +454,7 @@ func reaches(t types.Type, c map[types.Type]bool) (res bool) { return true } return reaches(t.Params(), c) || reaches(t.Results(), c) - case *typeparams.Union: + case *types.Union: for i := 0; i < t.Len(); i++ { if reaches(t.Term(i).Type(), c) { return true @@ -435,7 +471,7 @@ func reaches(t types.Type, c map[types.Type]bool) (res bool) { return true } } - case *types.Named: + case *types.Named, *aliases.Alias: return reaches(t.Underlying(), c) default: panic("unreachable") diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index db53aebee4..4d65259ed9 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -17,7 +17,9 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" ) //// Sanity checking utilities @@ -43,12 +45,6 @@ func isBlankIdent(e ast.Expr) bool { //// Type utilities. Some of these belong in go/types. -// isPointer returns true for types whose underlying type is a pointer. -func isPointer(typ types.Type) bool { - _, ok := typ.Underlying().(*types.Pointer) - return ok -} - // isNonTypeParamInterface reports whether t is an interface type but not a type parameter. func isNonTypeParamInterface(t types.Type) bool { return !typeparams.IsTypeParam(t) && types.IsInterface(t) @@ -56,16 +52,19 @@ func isNonTypeParamInterface(t types.Type) bool { // isBasic reports whether t is a basic type. func isBasic(t types.Type) bool { - _, ok := t.(*types.Basic) + _, ok := aliases.Unalias(t).(*types.Basic) return ok } // isString reports whether t is exactly a string type. +// t is assumed to be an Underlying type (not Named or Alias). func isString(t types.Type) bool { - return isBasic(t) && t.(*types.Basic).Info()&types.IsString != 0 + basic, ok := t.(*types.Basic) + return ok && basic.Info()&types.IsString != 0 } // isByteSlice reports whether t is of the form []~bytes. +// t is assumed to be an Underlying type (not Named or Alias). func isByteSlice(t types.Type) bool { if b, ok := t.(*types.Slice); ok { e, _ := b.Elem().Underlying().(*types.Basic) @@ -75,6 +74,7 @@ func isByteSlice(t types.Type) bool { } // isRuneSlice reports whether t is of the form []~runes. +// t is assumed to be an Underlying type (not Named or Alias). func isRuneSlice(t types.Type) bool { if b, ok := t.(*types.Slice); ok { e, _ := b.Elem().Underlying().(*types.Basic) @@ -100,12 +100,24 @@ func isBasicConvTypes(tset termList) bool { return all && basics >= 1 && tset.Len()-basics <= 1 } -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { +// deptr returns a pointer's element type and true; otherwise it returns (typ, false). +// This function is oblivious to core types and is not suitable for generics. +// +// TODO: Deprecate this function once all usages have been audited. +func deptr(typ types.Type) (types.Type, bool) { if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() + return p.Elem(), true } - return typ + return typ, false +} + +// deref returns the element type of a type with a pointer core type and true; +// otherwise it returns (typ, false). +func deref(typ types.Type) (types.Type, bool) { + if p, ok := typeparams.CoreType(typ).(*types.Pointer); ok { + return p.Elem(), true + } + return typ, false } // recvType returns the receiver type of method obj. @@ -113,8 +125,20 @@ func recvType(obj *types.Func) types.Type { return obj.Type().(*types.Signature).Recv().Type() } -// isUntyped returns true for types that are untyped. +// fieldOf returns the index'th field of the (core type of) a struct type; +// otherwise returns nil. +func fieldOf(typ types.Type, index int) *types.Var { + if st, ok := typeparams.CoreType(typ).(*types.Struct); ok { + if 0 <= index && index < st.NumFields() { + return st.Field(index) + } + } + return nil +} + +// isUntyped reports whether typ is the type of an untyped constant. func isUntyped(typ types.Type) bool { + // No Underlying/Unalias: untyped constant types cannot be Named or Alias. b, ok := typ.(*types.Basic) return ok && b.Info()&types.IsUntyped != 0 } @@ -154,39 +178,15 @@ func makeLen(T types.Type) *Builtin { } } -// nonbasicTypes returns a list containing all of the types T in ts that are non-basic. -func nonbasicTypes(ts []types.Type) []types.Type { - if len(ts) == 0 { - return nil - } - added := make(map[types.Type]bool) // additionally filter duplicates - var filtered []types.Type - for _, T := range ts { - if !isBasic(T) { - if !added[T] { - added[T] = true - filtered = append(filtered, T) - } - } - } - return filtered -} - -// receiverTypeArgs returns the type arguments to a function's reciever. -// Returns an empty list if obj does not have a reciever or its reciever does not have type arguments. -func receiverTypeArgs(obj *types.Func) []types.Type { - rtype := recvType(obj) - if rtype == nil { - return nil - } - if isPointer(rtype) { - rtype = rtype.(*types.Pointer).Elem() +// receiverTypeArgs returns the type arguments to a method's receiver. +// Returns an empty list if the receiver does not have type arguments. +func receiverTypeArgs(method *types.Func) []types.Type { + recv := method.Type().(*types.Signature).Recv() + _, named := typesinternal.ReceiverNamed(recv) + if named == nil { + return nil // recv is anonymous struct/interface } - named, ok := rtype.(*types.Named) - if !ok { - return nil - } - ts := typeparams.NamedTypeArgs(named) + ts := named.TypeArgs() if ts.Len() == 0 { return nil } @@ -205,7 +205,7 @@ func recvAsFirstArg(sig *types.Signature) *types.Signature { for i := 0; i < sig.Params().Len(); i++ { params = append(params, sig.Params().At(i)) } - return typeparams.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) + return types.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) } // instance returns whether an expression is a simple or qualified identifier @@ -222,13 +222,13 @@ func instance(info *types.Info, expr ast.Expr) bool { default: return false } - _, ok := typeparams.GetInstances(info)[id] + _, ok := info.Instances[id] return ok } // instanceArgs returns the Instance[id].TypeArgs as a slice. func instanceArgs(info *types.Info, id *ast.Ident) []types.Type { - targList := typeparams.GetInstances(info)[id].TypeArgs + targList := info.Instances[id].TypeArgs if targList == nil { return nil } @@ -280,7 +280,7 @@ func (c *canonizer) Type(T types.Type) types.Type { return T } -// A type for representating an canonized list of types. +// A type for representing a canonized list of types. type typeList []types.Type func (l *typeList) identical(ts []types.Type) bool { @@ -346,13 +346,13 @@ func (m *typeListMap) hash(ts []types.Type) uint32 { } // instantiateMethod instantiates m with targs and returns a canonical representative for this method. -func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *typeparams.Context) *types.Func { +func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *types.Context) *types.Func { recv := recvType(m) - if p, ok := recv.(*types.Pointer); ok { + if p, ok := aliases.Unalias(recv).(*types.Pointer); ok { recv = p.Elem() } - named := recv.(*types.Named) - inst, err := typeparams.Instantiate(ctxt, typeparams.NamedTypeOrigin(named), targs, false) + named := aliases.Unalias(recv).(*types.Named) + inst, err := types.Instantiate(ctxt, named.Origin(), targs, false) if err != nil { panic(err) } @@ -360,3 +360,16 @@ func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctx obj, _, _ := types.LookupFieldOrMethod(rep, true, m.Pkg(), m.Name()) return obj.(*types.Func) } + +// Exposed to ssautil using the linkname hack. +func isSyntactic(pkg *Package) bool { return pkg.syntax } + +// mapValues returns a new unordered array of map values. +func mapValues[K comparable, V any](m map[K]V) []V { + vals := make([]V, 0, len(m)) + for _, fn := range m { + vals = append(vals, fn) + } + return vals + +} diff --git a/vendor/golang.org/x/tools/go/ssa/wrappers.go b/vendor/golang.org/x/tools/go/ssa/wrappers.go index 228daf6158..7c7ee4099e 100644 --- a/vendor/golang.org/x/tools/go/ssa/wrappers.go +++ b/vendor/golang.org/x/tools/go/ssa/wrappers.go @@ -28,7 +28,7 @@ import ( // -- wrappers ----------------------------------------------------------- -// makeWrapper returns a synthetic method that delegates to the +// createWrapper returns a synthetic method that delegates to the // declared method denoted by meth.Obj(), first performing any // necessary pointer indirections or field selections implied by meth. // @@ -40,21 +40,17 @@ import ( // - optional implicit field selections // - meth.Obj() may denote a concrete or an interface method // - the result may be a thunk or a wrapper. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { +func createWrapper(prog *Program, sel *selection, cr *creator) *Function { obj := sel.obj.(*types.Func) // the declared function sig := sel.typ.(*types.Signature) // type of this wrapper var recv *types.Var // wrapper's receiver or thunk's params[0] name := obj.Name() var description string - var start int // first regular param if sel.kind == types.MethodExpr { name += "$thunk" description = "thunk" recv = sig.Params().At(0) - start = 1 } else { description = "wrapper" recv = sig.Recv() @@ -62,8 +58,9 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { description = fmt.Sprintf("%s for %s", description, sel.obj) if prog.mode&LogSource != 0 { - defer logStack("make %s to (%s)", description, recv.Type())() + defer logStack("create %s to (%s)", description, recv.Type())() } + /* method wrapper */ fn := &Function{ name: name, method: sel, @@ -72,33 +69,53 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { Synthetic: description, Prog: prog, pos: obj.Pos(), - info: nil, // info is not set on wrappers. + // wrappers have no syntax + build: (*builder).buildWrapper, + syntax: nil, + info: nil, + goversion: "", } cr.Add(fn) + return fn +} + +// buildWrapper builds fn.Body for a method wrapper. +func (b *builder) buildWrapper(fn *Function) { + var recv *types.Var // wrapper's receiver or thunk's params[0] + var start int // first regular param + if fn.method.kind == types.MethodExpr { + recv = fn.Signature.Params().At(0) + start = 1 + } else { + recv = fn.Signature.Recv() + } + fn.startBody() fn.addSpilledParam(recv) createParams(fn, start) - indices := sel.index + indices := fn.method.index var v Value = fn.Locals[0] // spilled receiver - if isPointer(sel.recv) { + srdt, ptrRecv := deptr(fn.method.recv) + if ptrRecv { v = emitLoad(fn, v) // For simple indirection wrappers, perform an informative nil-check: // "value method (T).f called using nil *T pointer" - if len(indices) == 1 && !isPointer(recvType(obj)) { + _, ptrObj := deptr(recvType(fn.object)) + if len(indices) == 1 && !ptrObj { var c Call c.Call.Value = &Builtin{ name: "ssa:wrapnilchk", sig: types.NewSignature(nil, - types.NewTuple(anonVar(sel.recv), anonVar(tString), anonVar(tString)), - types.NewTuple(anonVar(sel.recv)), false), + types.NewTuple(anonVar(fn.method.recv), anonVar(tString), anonVar(tString)), + types.NewTuple(anonVar(fn.method.recv)), false), } c.Call.Args = []Value{ v, - stringConst(deref(sel.recv).String()), - stringConst(sel.obj.Name()), + stringConst(srdt.String()), + stringConst(fn.method.obj.Name()), } c.setType(v.Type()) v = fn.emit(&c) @@ -120,18 +137,14 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { // address of implicit C field. var c Call - if r := recvType(obj); !types.IsInterface(r) { // concrete method - if !isPointer(r) { + if r := recvType(fn.object); !types.IsInterface(r) { // concrete method + if _, ptrObj := deptr(r); !ptrObj { v = emitLoad(fn, v) } - callee := prog.originFunc(obj) - if callee.typeparams.Len() > 0 { - callee = prog.lookupOrCreateInstance(callee, receiverTypeArgs(obj), cr) - } - c.Call.Value = callee + c.Call.Value = fn.Prog.objectMethod(fn.object, b.created) c.Call.Args = append(c.Call.Args, v) } else { - c.Call.Method = obj + c.Call.Method = fn.object c.Call.Value = emitLoad(fn, v) // interface (possibly a typeparam) } for _, arg := range fn.Params[1:] { @@ -139,8 +152,6 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { } emitTailCall(fn, &c) fn.finishBody() - fn.done() - return fn } // createParams creates parameters for wrapper method fn based on its @@ -149,13 +160,13 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { func createParams(fn *Function, start int) { tparams := fn.Signature.Params() for i, n := start, tparams.Len(); i < n; i++ { - fn.addParamObj(tparams.At(i)) + fn.addParamVar(tparams.At(i)) } } // -- bounds ----------------------------------------------------------- -// makeBound returns a bound method wrapper (or "bound"), a synthetic +// createBound returns a bound method wrapper (or "bound"), a synthetic // function that delegates to a concrete or interface method denoted // by obj. The resulting function has no receiver, but has one free // variable which will be used as the method's receiver in the @@ -174,66 +185,57 @@ func createParams(fn *Function, start int) { // // f := func() { return t.meth() } // -// Unlike makeWrapper, makeBound need perform no indirection or field +// Unlike createWrapper, createBound need perform no indirection or field // selections because that can be done before the closure is // constructed. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -func makeBound(prog *Program, obj *types.Func, cr *creator) *Function { - targs := receiverTypeArgs(obj) - key := boundsKey{obj, prog.canon.List(targs)} - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - fn, ok := prog.bounds[key] - if !ok { - description := fmt.Sprintf("bound method wrapper for %s", obj) - if prog.mode&LogSource != 0 { - defer logStack("%s", description)() - } - fn = &Function{ - name: obj.Name() + "$bound", - object: obj, - Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver - Synthetic: description, - Prog: prog, - pos: obj.Pos(), - info: nil, // info is not set on wrappers. - } - cr.Add(fn) - - fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} - fn.FreeVars = []*FreeVar{fv} - fn.startBody() - createParams(fn, 0) - var c Call - - if !types.IsInterface(recvType(obj)) { // concrete - callee := prog.originFunc(obj) - if callee.typeparams.Len() > 0 { - callee = prog.lookupOrCreateInstance(callee, targs, cr) - } - c.Call.Value = callee - c.Call.Args = []Value{fv} - } else { - c.Call.Method = obj - c.Call.Value = fv // interface (possibly a typeparam) - } - for _, arg := range fn.Params { - c.Call.Args = append(c.Call.Args, arg) - } - emitTailCall(fn, &c) - fn.finishBody() - fn.done() - - prog.bounds[key] = fn +func createBound(prog *Program, obj *types.Func, cr *creator) *Function { + description := fmt.Sprintf("bound method wrapper for %s", obj) + if prog.mode&LogSource != 0 { + defer logStack("%s", description)() + } + /* bound method wrapper */ + fn := &Function{ + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + // wrappers have no syntax + build: (*builder).buildBound, + syntax: nil, + info: nil, + goversion: "", } + fn.FreeVars = []*FreeVar{{name: "recv", typ: recvType(obj), parent: fn}} // (cyclic) + cr.Add(fn) return fn } +// buildBound builds fn.Body for a bound method closure. +func (b *builder) buildBound(fn *Function) { + fn.startBody() + createParams(fn, 0) + var c Call + + recv := fn.FreeVars[0] + if !types.IsInterface(recvType(fn.object)) { // concrete + c.Call.Value = fn.Prog.objectMethod(fn.object, b.created) + c.Call.Args = []Value{recv} + } else { + c.Call.Method = fn.object + c.Call.Value = recv // interface (possibly a typeparam) + } + for _, arg := range fn.Params { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() +} + // -- thunks ----------------------------------------------------------- -// makeThunk returns a thunk, a synthetic function that delegates to a +// createThunk returns a thunk, a synthetic function that delegates to a // concrete or interface method denoted by sel.obj. The resulting // function has no receiver, but has an additional (first) regular // parameter. @@ -249,38 +251,16 @@ func makeBound(prog *Program, obj *types.Func, cr *creator) *Function { // f is a synthetic wrapper defined as if by: // // f := func(t T) { return t.meth() } -// -// TODO(adonovan): opt: currently the stub is created even when used -// directly in a function call: C.f(i, 0). This is less efficient -// than inlining the stub. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -func makeThunk(prog *Program, sel *selection, cr *creator) *Function { +func createThunk(prog *Program, sel *selection, cr *creator) *Function { if sel.kind != types.MethodExpr { panic(sel) } - // Canonicalize sel.recv to avoid constructing duplicate thunks. - canonRecv := prog.canon.Type(sel.recv) - key := selectionKey{ - kind: sel.kind, - recv: canonRecv, - obj: sel.obj, - index: fmt.Sprint(sel.index), - indirect: sel.indirect, + fn := createWrapper(prog, sel, cr) + if fn.Signature.Recv() != nil { + panic(fn) // unexpected receiver } - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - fn, ok := prog.thunks[key] - if !ok { - fn = makeWrapper(prog, sel, cr) - if fn.Signature.Recv() != nil { - panic(fn) // unexpected receiver - } - prog.thunks[key] = fn - } return fn } @@ -288,21 +268,6 @@ func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) } -// selectionKey is like types.Selection but a usable map key. -type selectionKey struct { - kind types.SelectionKind - recv types.Type // canonicalized via Program.canon - obj types.Object - index string - indirect bool -} - -// boundsKey is a unique for the object and a type instantiation. -type boundsKey struct { - obj types.Object // t.meth - inst *typeList // canonical type instantiation list. -} - // A local version of *types.Selection. // Needed for some additional control, such as creating a MethodExpr for an instantiation. type selection struct { @@ -327,16 +292,16 @@ func toSelection(sel *types.Selection) *selection { // -- instantiations -------------------------------------------------- -// buildInstantiationWrapper creates a body for an instantiation +// buildInstantiationWrapper builds the body of an instantiation // wrapper fn. The body calls the original generic function, // bracketed by ChangeType conversions on its arguments and results. -func buildInstantiationWrapper(fn *Function) { +func (b *builder) buildInstantiationWrapper(fn *Function) { orig := fn.topLevelOrigin sig := fn.Signature fn.startBody() if sig.Recv() != nil { - fn.addParamObj(sig.Recv()) + fn.addParamVar(sig.Recv()) } createParams(fn, 0) diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index c160acb686..6a57ce3b13 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -26,13 +26,16 @@ package objectpath import ( "fmt" "go/types" - "sort" "strconv" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" ) +// TODO(adonovan): think about generic aliases. + // A Path is an opaque name that identifies a types.Object // relative to its package. Conceptually, the name consists of a // sequence of destructuring operations applied to the package scope @@ -111,7 +114,20 @@ const ( opObj = 'O' // .Obj() (Named, TypeParam) ) -// The For function returns the path to an object relative to its package, +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects +} + +// For returns the path to an object relative to its package, // or an error if the object is not accessible from the package's Scope. // // The For function guarantees to return a path only for the following objects: @@ -123,6 +139,17 @@ const ( // These objects are sufficient to define the API of their package. // The objects described by a package's export data are drawn from this set. // +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// // For does not return a path for predeclared names, imported package // names, local names, and unexported package-level names (except // types). @@ -143,7 +170,7 @@ const ( // .Type().Field(0) (field Var X) // // where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { +func (enc *Encoder) For(obj types.Object) (Path, error) { pkg := obj.Pkg() // This table lists the cases of interest. @@ -200,7 +227,7 @@ func For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -225,7 +252,7 @@ func For(obj types.Object) (Path, error) { return "", fmt.Errorf("func is not a method: %v", obj) } - if path, ok := concreteMethod(obj); ok { + if path, ok := enc.concreteMethod(obj); ok { // Fast path for concrete methods that avoids looping over scope. return path, nil } @@ -241,15 +268,14 @@ func For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - names := scope.Names() - for _, name := range names { - o := scope.Lookup(name) + objs := enc.scopeObjects(scope) + for _, o := range objs { tname, ok := o.(*types.TypeName) if !ok { continue // handle non-types in second pass } - path := append(empty, name...) + path := append(empty, o.Name()...) path = append(path, opType) T := o.Type() @@ -261,7 +287,7 @@ func For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -275,9 +301,8 @@ func For(obj types.Object) (Path, error) { // Then inspect everything else: // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) + for _, o := range objs { + path := append(empty, o.Name()...) if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) @@ -289,14 +314,14 @@ func For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - canonical := canonicalize(T) - for i := 0; i < len(canonical); i++ { - m := canonical[i] + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) path2 := appendOpArg(path, opMethod, i) if m == obj { return Path(path2), nil // found declared method @@ -324,7 +349,7 @@ func appendOpArg(path []byte, op byte, arg int) []byte { // This function is just an optimization that avoids the general scope walking // approach. You are expected to fall back to the general approach if this // function fails. -func concreteMethod(meth *types.Func) (Path, bool) { +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // Concrete methods can only be declared on package-scoped named types. For // that reason we can skip the expensive walk over the package scope: the // path will always be package -> named type -> method. We can trivially get @@ -374,13 +399,8 @@ func concreteMethod(meth *types.Func) (Path, bool) { return "", false } - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { return "", false } @@ -397,15 +417,24 @@ func concreteMethod(meth *types.Func) (Path, bool) { path := make([]byte, 0, len(name)+8) path = append(path, name...) path = append(path, opType) - canonical := canonicalize(named) - for i, m := range canonical { - if m == meth { + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { path = appendOpArg(path, opMethod, i) return Path(path), true } } - panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) } // find finds obj within type T, returning the path to it, or nil if not found. @@ -414,6 +443,8 @@ func concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -432,7 +463,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -475,7 +506,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -495,7 +526,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -508,11 +539,11 @@ func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte // Object returns the object denoted by path p within the package pkg. func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { + pathstr := string(p) + if pathstr == "" { return nil, fmt.Errorf("empty path") } - pathstr := string(p) var pkgobj, suffix string if dot := strings.IndexByte(pathstr, opType); dot < 0 { pkgobj = pathstr @@ -532,7 +563,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -586,6 +617,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil + t = aliases.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -634,7 +666,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } @@ -663,15 +695,22 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = nil case opMethod: - hasMethods, ok := t.(hasMethods) // Interface or Named - if !ok { + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) + + default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) } - canonical := canonicalize(hasMethods) - if n := len(canonical); index >= n { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) - } - obj = canonical[index] t = nil case opObj: @@ -694,27 +733,22 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { return obj, nil // success } -// hasMethods is an abstraction of *types.{Interface,Named}. This is pulled up -// because it is used by methodOrdering, which is in turn used by both encoding -// and decoding. -type hasMethods interface { - Method(int) *types.Func - NumMethods() int -} - -// canonicalize returns a canonical order for the methods in a hasMethod. -func canonicalize(hm hasMethods) []*types.Func { - count := hm.NumMethods() - if count <= 0 { - return nil +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m } - canon := make([]*types.Func, count) - for i := 0; i < count; i++ { - canon[i] = hm.Method(i) - } - less := func(i, j int) bool { - return canon[i].Id() < canon[j].Id() + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs } - sort.Slice(canon, less) - return canon + return objs } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 90b3ab0e21..90dc541adf 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -22,7 +22,7 @@ func Callee(info *types.Info, call *ast.CallExpr) types.Object { // Look through type instantiation if necessary. isInstance := false switch fun.(type) { - case *ast.IndexExpr, *typeparams.IndexListExpr: + case *ast.IndexExpr, *ast.IndexListExpr: // When extracting the callee from an *IndexExpr, we need to check that // it is a *types.Func and not a *types.Var. // Example: Don't match a slice m within the expression `m[0]()`. diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 7bd2fdb38b..e154be0bd6 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -12,6 +12,7 @@ import ( "go/types" "reflect" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" ) @@ -219,7 +220,7 @@ type Hasher struct { // generic types or functions, and instantiated signatures do not have type // parameter lists, we should never encounter a second non-empty type // parameter list when hashing a generic signature. - sigTParams *typeparams.TypeParamList + sigTParams *types.TypeParamList } // MakeHasher returns a new Hasher instance. @@ -259,6 +260,9 @@ func (h Hasher) hashFor(t types.Type) uint32 { case *types.Basic: return uint32(t.Kind()) + case *aliases.Alias: + return h.Hash(t.Underlying()) + case *types.Array: return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) @@ -297,7 +301,7 @@ func (h Hasher) hashFor(t types.Type) uint32 { // We should never encounter a generic signature while hashing another // generic signature, but defensively set sigTParams only if h.mask is // unset. - tparams := typeparams.ForSignature(t) + tparams := t.TypeParams() if h.sigTParams == nil && tparams.Len() != 0 { h = Hasher{ // There may be something more efficient than discarding the existing @@ -318,7 +322,7 @@ func (h Hasher) hashFor(t types.Type) uint32 { return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - case *typeparams.Union: + case *types.Union: return h.hashUnion(t) case *types.Interface: @@ -354,14 +358,14 @@ func (h Hasher) hashFor(t types.Type) uint32 { case *types.Named: hash := h.hashPtr(t.Obj()) - targs := typeparams.NamedTypeArgs(t) + targs := t.TypeArgs() for i := 0; i < targs.Len(); i++ { targ := targs.At(i) hash += 2 * h.Hash(targ) } return hash - case *typeparams.TypeParam: + case *types.TypeParam: return h.hashTypeParam(t) case *types.Tuple: @@ -381,7 +385,7 @@ func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { return hash } -func (h Hasher) hashUnion(t *typeparams.Union) uint32 { +func (h Hasher) hashUnion(t *types.Union) uint32 { // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -392,7 +396,7 @@ func (h Hasher) hashUnion(t *typeparams.Union) uint32 { return h.hashTermSet(terms) } -func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 { +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. @@ -416,7 +420,7 @@ func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 { // are not identical. // // Otherwise the hash of t depends only on t's pointer identity. -func (h Hasher) hashTypeParam(t *typeparams.TypeParam) uint32 { +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { if h.sigTParams != nil { i := t.Index() if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { @@ -457,6 +461,9 @@ func (h Hasher) shallowHash(t types.Type) uint32 { // elements (mostly Slice, Pointer, Basic, Named), // so there's no need to optimize anything else. switch t := t.(type) { + case *aliases.Alias: + return h.shallowHash(t.Underlying()) + case *types.Signature: var hash uint32 = 604171 if t.Variadic() { @@ -489,7 +496,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Pointer: return 4393139 - case *typeparams.Union: + case *types.Union: return 562448657 case *types.Interface: @@ -504,7 +511,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Named: return h.hashPtr(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: return h.hashPtr(t.Obj()) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go index a5d9310830..bd71aafaaa 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -9,6 +9,8 @@ package typeutil import ( "go/types" "sync" + + "golang.org/x/tools/internal/aliases" ) // A MethodSetCache records the method set of each type T for which @@ -32,12 +34,12 @@ func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { cache.mu.Lock() defer cache.mu.Unlock() - switch T := T.(type) { + switch T := aliases.Unalias(T).(type) { case *types.Named: return cache.lookupNamed(T).value case *types.Pointer: - if N, ok := T.Elem().(*types.Named); ok { + if N, ok := aliases.Unalias(T.Elem()).(*types.Named); ok { return cache.lookupNamed(N).pointer } } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go index fa55b0a1e6..a0c1a60ac0 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -6,7 +6,11 @@ package typeutil // This file defines utilities for user interfaces that display types. -import "go/types" +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) // IntuitiveMethodSet returns the intuitive method set of a type T, // which is the set of methods you can call on an addressable value of @@ -24,7 +28,7 @@ import "go/types" // The order of the result is as for types.MethodSet(T). func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { isPointerToConcrete := func(T types.Type) bool { - ptr, ok := T.(*types.Pointer) + ptr, ok := aliases.Unalias(T).(*types.Pointer) return ok && !types.IsInterface(ptr.Elem()) } diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index d2547c7433..cb6db8893f 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -7,8 +7,8 @@ package imports // import "golang.org/x/tools/imports" import ( - "io/ioutil" "log" + "os" "golang.org/x/tools/internal/gocommand" intimp "golang.org/x/tools/internal/imports" @@ -44,7 +44,7 @@ var LocalPrefix string func Process(filename string, src []byte, opt *Options) ([]byte, error) { var err error if src == nil { - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 0000000000..f89112c8ee --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,28 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// When GoVersion>=1.22 and GODEBUG=gotypesalias=1, +// the Type() of the return value is a *types.Alias. +func NewAlias(pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled() { + tname := types.NewTypeName(pos, pkg, name, nil) + newAlias(tname, rhs) + return tname + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 0000000000..1872b56ff8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } + +func (*Alias) Underlying() types.Type { panic("unreachable") } + +func (*Alias) Obj() *types.TypeName { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +// Always false for go <=1.21. Ignores GODEBUG. +func enabled() bool { return false } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 0000000000..8b92116284 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,72 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" + "os" + "strings" + "sync" +) + +// Alias is an alias of types.Alias. +type Alias = types.Alias + +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } + +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a +} + +// enabled returns true when types.Aliases are enabled. +func enabled() bool { + // Use the gotypesalias value in GODEBUG if set. + godebug := os.Getenv("GODEBUG") + value := -1 // last set value. + for _, f := range strings.Split(godebug, ",") { + switch f { + case "gotypesalias=1": + value = 1 + case "gotypesalias=0": + value = 0 + } + } + switch value { + case 0: + return false + case 1: + return true + default: + return aliasesDefault() + } +} + +// aliasesDefault reports if aliases are enabled by default. +func aliasesDefault() bool { + // Dynamically check if Aliases will be produced from go/types. + aliasesDefaultOnce.Do(func() { + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, gotypesaliasDefault = pkg.Scope().Lookup("A").Type().(*types.Alias) + }) + return gotypesaliasDefault +} + +var gotypesaliasDefault bool +var aliasesDefaultOnce sync.Once diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index d15f0eb7ab..c3022a2862 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -13,11 +13,9 @@ import ( "go/token" "go/types" "strconv" -) -// DiagnoseFuzzTests controls whether the 'tests' analyzer diagnoses fuzz tests -// in Go 1.18+. -var DiagnoseFuzzTests bool = false + "golang.org/x/tools/internal/aliases" +) func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. @@ -32,7 +30,10 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos } func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - under := typ + // TODO(adonovan): think about generics, and also generic aliases. + under := aliases.Unalias(typ) + // Don't call Underlying unconditionally: although it removed + // Named and Alias, it also removes TypeParam. if n, ok := typ.(*types.Named); ok { under = n.Underlying() } @@ -46,7 +47,7 @@ func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { case u.Info()&types.IsString != 0: return &ast.BasicLit{Kind: token.STRING, Value: `""`} default: - panic("unknown basic type") + panic(fmt.Sprintf("unknown basic type %v", u)) } case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: return ast.NewIdent("nil") @@ -155,6 +156,10 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { }, }) } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } var returns []*ast.Field for i := 0; i < t.Results().Len(); i++ { r := TypeExpr(f, pkg, t.Results().At(i).Type()) diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go new file mode 100644 index 0000000000..39507723d3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go @@ -0,0 +1,113 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisinternal + +import ( + "fmt" + "go/parser" + "go/token" + "strings" +) + +// MustExtractDoc is like [ExtractDoc] but it panics on error. +// +// To use, define a doc.go file such as: +// +// // Package halting defines an analyzer of program termination. +// // +// // # Analyzer halting +// // +// // halting: reports whether execution will halt. +// // +// // The halting analyzer reports a diagnostic for functions +// // that run forever. To suppress the diagnostics, try inserting +// // a 'break' statement into each loop. +// package halting +// +// import _ "embed" +// +// //go:embed doc.go +// var doc string +// +// And declare your analyzer as: +// +// var Analyzer = &analysis.Analyzer{ +// Name: "halting", +// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// ... +// } +func MustExtractDoc(content, name string) string { + doc, err := ExtractDoc(content, name) + if err != nil { + panic(err) + } + return doc +} + +// ExtractDoc extracts a section of a package doc comment from the +// provided contents of an analyzer package's doc.go file. +// +// A section is a portion of the comment between one heading and +// the next, using this form: +// +// # Analyzer NAME +// +// NAME: SUMMARY +// +// Full description... +// +// where NAME matches the name argument, and SUMMARY is a brief +// verb-phrase that describes the analyzer. The following lines, up +// until the next heading or the end of the comment, contain the full +// description. ExtractDoc returns the portion following the colon, +// which is the form expected by Analyzer.Doc. +// +// Example: +// +// # Analyzer printf +// +// printf: checks consistency of calls to printf +// +// The printf analyzer checks consistency of calls to printf. +// Here is the complete description... +// +// This notation allows a single doc comment to provide documentation +// for multiple analyzers, each in its own section. +// The HTML anchors generated for each heading are predictable. +// +// It returns an error if the content was not a valid Go source file +// containing a package doc comment with a heading of the required +// form. +// +// This machinery enables the package documentation (typically +// accessible via the web at https://pkg.go.dev/) and the command +// documentation (typically printed to a terminal) to be derived from +// the same source and formatted appropriately. +func ExtractDoc(content, name string) (string, error) { + if content == "" { + return "", fmt.Errorf("empty Go source file") + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", content, parser.ParseComments|parser.PackageClauseOnly) + if err != nil { + return "", fmt.Errorf("not a Go source file") + } + if f.Doc == nil { + return "", fmt.Errorf("Go source file has no package doc comment") + } + for _, section := range strings.Split(f.Doc.Text(), "\n# ") { + if body := strings.TrimPrefix(section, "Analyzer "+name); body != section && + body != "" && + body[0] == '\r' || body[0] == '\n' { + body = strings.TrimSpace(body) + rest := strings.TrimPrefix(body, name+":") + if rest == body { + return "", fmt.Errorf("'Analyzer %s' heading not followed by '%s: summary...' line", name, name) + } + return strings.TrimSpace(rest), nil + } + } + return "", fmt.Errorf("package doc comment contains no 'Analyzer %s' heading", name) +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 0000000000..c0e8e731c9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 0000000000..581b26c204 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go deleted file mode 100644 index 798fe599be..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fastwalk provides a faster version of filepath.Walk for file system -// scanning tools. -package fastwalk - -import ( - "errors" - "os" - "path/filepath" - "runtime" - "sync" -) - -// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the -// symlink named in the call may be traversed. -var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") - -// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the -// callback should not be called for any other files in the current directory. -// Child directories will still be traversed. -var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") - -// Walk is a faster implementation of filepath.Walk. -// -// filepath.Walk's design necessarily calls os.Lstat on each file, -// even if the caller needs less info. -// Many tools need only the type of each file. -// On some platforms, this information is provided directly by the readdir -// system call, avoiding the need to stat each file individually. -// fastwalk_unix.go contains a fork of the syscall routines. -// -// See golang.org/issue/16399 -// -// Walk walks the file tree rooted at root, calling walkFn for -// each file or directory in the tree, including root. -// -// If fastWalk returns filepath.SkipDir, the directory is skipped. -// -// Unlike filepath.Walk: -// - file stat calls must be done by the user. -// The only provided metadata is the file type, which does not include -// any permission bits. -// - multiple goroutines stat the filesystem concurrently. The provided -// walkFn must be safe for concurrent use. -// - fastWalk can follow symlinks if walkFn returns the TraverseLink -// sentinel error. It is the walkFn's responsibility to prevent -// fastWalk from going into symlink cycles. -func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { - // TODO(bradfitz): make numWorkers configurable? We used a - // minimum of 4 to give the kernel more info about multiple - // things we want, in hopes its I/O scheduling can take - // advantage of that. Hopefully most are in cache. Maybe 4 is - // even too low of a minimum. Profile more. - numWorkers := 4 - if n := runtime.NumCPU(); n > numWorkers { - numWorkers = n - } - - // Make sure to wait for all workers to finish, otherwise - // walkFn could still be called after returning. This Wait call - // runs after close(e.donec) below. - var wg sync.WaitGroup - defer wg.Wait() - - w := &walker{ - fn: walkFn, - enqueuec: make(chan walkItem, numWorkers), // buffered for performance - workc: make(chan walkItem, numWorkers), // buffered for performance - donec: make(chan struct{}), - - // buffered for correctness & not leaking goroutines: - resc: make(chan error, numWorkers), - } - defer close(w.donec) - - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go w.doWork(&wg) - } - todo := []walkItem{{dir: root}} - out := 0 - for { - workc := w.workc - var workItem walkItem - if len(todo) == 0 { - workc = nil - } else { - workItem = todo[len(todo)-1] - } - select { - case workc <- workItem: - todo = todo[:len(todo)-1] - out++ - case it := <-w.enqueuec: - todo = append(todo, it) - case err := <-w.resc: - out-- - if err != nil { - return err - } - if out == 0 && len(todo) == 0 { - // It's safe to quit here, as long as the buffered - // enqueue channel isn't also readable, which might - // happen if the worker sends both another unit of - // work and its result before the other select was - // scheduled and both w.resc and w.enqueuec were - // readable. - select { - case it := <-w.enqueuec: - todo = append(todo, it) - default: - return nil - } - } - } - } -} - -// doWork reads directories as instructed (via workc) and runs the -// user's callback function. -func (w *walker) doWork(wg *sync.WaitGroup) { - defer wg.Done() - for { - select { - case <-w.donec: - return - case it := <-w.workc: - select { - case <-w.donec: - return - case w.resc <- w.walk(it.dir, !it.callbackDone): - } - } - } -} - -type walker struct { - fn func(path string, typ os.FileMode) error - - donec chan struct{} // closed on fastWalk's return - workc chan walkItem // to workers - enqueuec chan walkItem // from workers - resc chan error // from workers -} - -type walkItem struct { - dir string - callbackDone bool // callback already called; don't do it again -} - -func (w *walker) enqueue(it walkItem) { - select { - case w.enqueuec <- it: - case <-w.donec: - } -} - -func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { - joined := dirName + string(os.PathSeparator) + baseName - if typ == os.ModeDir { - w.enqueue(walkItem{dir: joined}) - return nil - } - - err := w.fn(joined, typ) - if typ == os.ModeSymlink { - if err == ErrTraverseLink { - // Set callbackDone so we don't call it twice for both the - // symlink-as-symlink and the symlink-as-directory later: - w.enqueue(walkItem{dir: joined, callbackDone: true}) - return nil - } - if err == filepath.SkipDir { - // Permit SkipDir on symlinks too. - return nil - } - } - return err -} - -func (w *walker) walk(root string, runUserCallback bool) error { - if runUserCallback { - err := w.fn(root, os.ModeDir) - if err == filepath.SkipDir { - return nil - } - if err != nil { - return err - } - } - - return readDir(root, w.onDirEnt) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go deleted file mode 100644 index 0ca55e0d56..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && cgo -// +build darwin,cgo - -package fastwalk - -/* -#include - -// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent** -// result pointer which triggers CGO's "Go pointer to Go pointer" check unless -// we allocat the result dirent* with malloc. -// -// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the -// directory, or a positive error number to indicate failure. -static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) { - struct dirent *result; - int ret = readdir_r(fd, entry, &result); - if (ret == 0 && result == NULL) { - ret = -1; // EOF - } - return ret; -} -*/ -import "C" - -import ( - "os" - "syscall" - "unsafe" -) - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := openDir(dirName) - if err != nil { - return &os.PathError{Op: "opendir", Path: dirName, Err: err} - } - defer C.closedir(fd) - - skipFiles := false - var dirent syscall.Dirent - for { - ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent)))) - if ret != 0 { - if ret == -1 { - break // EOF - } - if ret == int(syscall.EINTR) { - continue - } - return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)} - } - if dirent.Ino == 0 { - continue - } - typ := dtToType(dirent.Type) - if skipFiles && typ.IsRegular() { - continue - } - name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] - name = name[:dirent.Namlen] - for i, c := range name { - if c == 0 { - name = name[:i] - break - } - } - // Check for useless names before allocating a string. - if string(name) == "." || string(name) == ".." { - continue - } - if err := fn(dirName, string(name), typ); err != nil { - if err != ErrSkipFiles { - return err - } - skipFiles = true - } - } - - return nil -} - -func dtToType(typ uint8) os.FileMode { - switch typ { - case syscall.DT_BLK: - return os.ModeDevice - case syscall.DT_CHR: - return os.ModeDevice | os.ModeCharDevice - case syscall.DT_DIR: - return os.ModeDir - case syscall.DT_FIFO: - return os.ModeNamedPipe - case syscall.DT_LNK: - return os.ModeSymlink - case syscall.DT_REG: - return 0 - case syscall.DT_SOCK: - return os.ModeSocket - } - return ^os.FileMode(0) -} - -// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR -// needs to be closed with closedir(3). -func openDir(path string) (*C.DIR, error) { - name, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - for { - fd, err := C.opendir((*C.char)(unsafe.Pointer(name))) - if err != syscall.EINTR { - return fd, err - } - } -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go deleted file mode 100644 index d58595dbd3..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd - -package fastwalk - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Fileno) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go deleted file mode 100644 index d3922890b0..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (linux || (darwin && !cgo)) && !appengine -// +build linux darwin,!cgo -// +build !appengine - -package fastwalk - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return dirent.Ino -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go deleted file mode 100644 index 38a4db6af3..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (darwin && !cgo) || freebsd || openbsd || netbsd -// +build darwin,!cgo freebsd openbsd netbsd - -package fastwalk - -import "syscall" - -func direntNamlen(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Namlen) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go deleted file mode 100644 index c82e57df85..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !appengine -// +build linux,!appengine - -package fastwalk - -import ( - "bytes" - "syscall" - "unsafe" -) - -func direntNamlen(dirent *syscall.Dirent) uint64 { - const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - const nameBufLen = uint16(len(nameBuf)) - limit := dirent.Reclen - fixedHdr - if limit > nameBufLen { - limit = nameBufLen - } - nameLen := bytes.IndexByte(nameBuf[:limit], 0) - if nameLen < 0 { - panic("failed to find terminating 0 byte in dirent") - } - return uint64(nameLen) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go deleted file mode 100644 index 085d311600..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd) -// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd - -package fastwalk - -import ( - "io/ioutil" - "os" -) - -// readDir calls fn for each directory entry in dirName. -// It does not descend into directories or follow symlinks. -// If fn returns a non-nil error, readDir returns with that error -// immediately. -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fis, err := ioutil.ReadDir(dirName) - if err != nil { - return err - } - skipFiles := false - for _, fi := range fis { - if fi.Mode().IsRegular() && skipFiles { - continue - } - if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == ErrSkipFiles { - skipFiles = true - continue - } - return err - } - } - return nil -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go deleted file mode 100644 index f12f1a734c..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine -// +build linux freebsd openbsd netbsd darwin,!cgo -// +build !appengine - -package fastwalk - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const blockSize = 8 << 10 - -// unknownFileMode is a sentinel (and bogus) os.FileMode -// value used to represent a syscall.DT_UNKNOWN Dirent.Type. -const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := open(dirName, 0, 0) - if err != nil { - return &os.PathError{Op: "open", Path: dirName, Err: err} - } - defer syscall.Close(fd) - - // The buffer must be at least a block long. - buf := make([]byte, blockSize) // stack-allocated; doesn't escape - bufp := 0 // starting read position in buf - nbuf := 0 // end valid data in buf - skipFiles := false - for { - if bufp >= nbuf { - bufp = 0 - nbuf, err = readDirent(fd, buf) - if err != nil { - return os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - return nil - } - } - consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) - bufp += consumed - if name == "" || name == "." || name == ".." { - continue - } - // Fallback for filesystems (like old XFS) that don't - // support Dirent.Type and have DT_UNKNOWN (0) there - // instead. - if typ == unknownFileMode { - fi, err := os.Lstat(dirName + "/" + name) - if err != nil { - // It got deleted in the meantime. - if os.IsNotExist(err) { - continue - } - return err - } - typ = fi.Mode() & os.ModeType - } - if skipFiles && typ.IsRegular() { - continue - } - if err := fn(dirName, name, typ); err != nil { - if err == ErrSkipFiles { - skipFiles = true - continue - } - return err - } - } -} - -func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { - // golang.org/issue/37269 - dirent := &syscall.Dirent{} - copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) - if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { - panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) - } - if len(buf) < int(dirent.Reclen) { - panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) - } - consumed = int(dirent.Reclen) - if direntInode(dirent) == 0 { // File absent in directory. - return - } - switch dirent.Type { - case syscall.DT_REG: - typ = 0 - case syscall.DT_DIR: - typ = os.ModeDir - case syscall.DT_LNK: - typ = os.ModeSymlink - case syscall.DT_BLK: - typ = os.ModeDevice - case syscall.DT_FIFO: - typ = os.ModeNamedPipe - case syscall.DT_SOCK: - typ = os.ModeSocket - case syscall.DT_UNKNOWN: - typ = unknownFileMode - default: - // Skip weird things. - // It's probably a DT_WHT (http://lwn.net/Articles/325369/) - // or something. Revisit if/when this package is moved outside - // of goimports. goimports only cares about regular files, - // symlinks, and directories. - return - } - - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - nameLen := direntNamlen(dirent) - - // Special cases for common things: - if nameLen == 1 && nameBuf[0] == '.' { - name = "." - } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { - name = ".." - } else { - name = string(nameBuf[:nameLen]) - } - return -} - -// According to https://golang.org/doc/go1.14#runtime -// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS -// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases. -// -// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors. -// We need to retry in this case. -func open(path string, mode int, perm uint32) (fd int, err error) { - for { - fd, err := syscall.Open(path, mode, perm) - if err != syscall.EINTR { - return fd, err - } - } -} - -func readDirent(fd int, buf []byte) (n int, err error) { - for { - nbuf, err := syscall.ReadDirent(fd, buf) - if err != syscall.EINTR { - return nbuf, err - } - } -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d3..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de01470..d98b0db2a9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a64..39df91124a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( @@ -17,7 +29,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -209,7 +220,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func switch hdr { case "$$B\n": var data []byte - data, err = ioutil.ReadAll(buf) + data, err = io.ReadAll(buf) if err != nil { break } @@ -218,20 +229,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err @@ -251,13 +259,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func return } -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - type byPath []*types.Package func (a byPath) Len() int { return len(a) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcdd1..638fc1d3b8 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,17 +22,23 @@ import ( "strconv" "strings" + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. // -// No promises are made about the encoding other than that it can be -// decoded by the same version of IIExportShallow. If you plan to save -// export data in the file system, be sure to include a cryptographic -// digest of the executable in the key to avoid version skew. -func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of // fact iexportCommon doesn't even check for I/O errors. @@ -44,22 +50,30 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) if err != nil { return nil, err } return pkgs[0], nil } -// InsertType is the type of a function that creates a types.TypeName -// object for a named type and inserts it into the scope of the -// specified Package. -type InsertType = func(pkg *types.Package, name string) +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -313,8 +327,9 @@ type iexporter struct { out *bytes.Buffer version int - shallow bool // don't put types from other packages in the index - localpkg *types.Package // (nil in bundle mode) + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -354,6 +369,17 @@ func (p *iexporter) trace(format string, args ...interface{}) { fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) } +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + // stringOff returns the offset of s within the string section. // If not already present, it's added to the end. func (p *iexporter) stringOff(s string) uint64 { @@ -413,7 +439,6 @@ type exportWriter struct { p *iexporter data intWriter - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -436,7 +461,6 @@ func (p *iexporter) doDecl(obj types.Object) { }() } w := p.newWriter() - w.setPkg(obj.Pkg(), false) switch obj := obj.(type) { case *types.Var: @@ -457,7 +481,7 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { + if sig.TypeParams().Len() == 0 { w.tag('F') } else { w.tag('G') @@ -470,7 +494,7 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) @@ -483,14 +507,14 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -511,17 +535,17 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { w.tag('T') } else { w.tag('U') } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } underlying := obj.Type().Underlying() @@ -541,7 +565,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -673,6 +697,9 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -712,20 +739,22 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { + // TODO(adonovan): support types.Alias. + case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -764,37 +793,60 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.setPkg(pkg, true) + w.pkg(pkg) w.signature(t) case *types.Struct: w.startType(structType) n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg if n > 0 { - w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects - } else { - w.setPkg(pkg, true) + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } } + w.pkg(fieldPkg) w.uint64(uint64(n)) + for i := 0; i < n; i++ { f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } w.pos(f.Pos()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), pkg) + w.typ(f.Type(), fieldPkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } case *types.Interface: w.startType(interfaceType) - w.setPkg(pkg, true) + w.pkg(pkg) n := t.NumEmbeddeds() w.uint64(uint64(n)) for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := ft.(*types.Named); named != nil { + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) @@ -802,17 +854,23 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.typ(ft, tPkg) } + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + n = t.NumExplicitMethods() w.uint64(uint64(n)) for i := 0; i < n; i++ { m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } w.pos(m.Pos()) w.string(m.Name()) sig, _ := m.Type().(*types.Signature) w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -827,12 +885,61 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } } -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return } - - w.currPkg = pkg + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) } func (w *exportWriter) signature(sig *types.Signature) { @@ -843,14 +950,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -868,7 +975,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { @@ -913,6 +1020,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) { w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) @@ -969,6 +1087,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1306,19 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e86..4d50eb8e58 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -21,7 +21,9 @@ import ( "sort" "strings" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) type intReader struct { @@ -85,7 +87,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) if err != nil { return 0, nil, err } @@ -94,10 +96,49 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -108,7 +149,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -117,11 +158,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -139,7 +177,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data sLen := int64(r.uint64()) var fLen int64 var fileOffset []uint64 - if insert != nil { + if shallow { // Shallow mode uses a different position encoding. fLen = int64(r.uint64()) fileOffset = make([]uint64, r.uint64()) @@ -158,7 +196,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p := iimporter{ version: int(version), ipath: path, - insert: insert, + shallow: shallow, + reportf: reportf, stringData: stringData, stringCache: make(map[uint64]string), @@ -185,8 +224,10 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p.typCache[uint64(i)] = pt } - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) + for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) pkgName := p.stringAt(r.uint64()) @@ -195,30 +236,48 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] - if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - if i == 0 && !bundle { - p.localpkg = pkg - } - - p.pkgCache[pkgPathOff] = pkg + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff // Read index for package. nameIndex := make(map[string]uint64) nSyms := r.uint64() - // In shallow mode we don't expect an index for other packages. - assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } - p.pkgIndex[pkg] = nameIndex + items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex pkgList[i] = pkg } @@ -270,18 +329,25 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { typ.Complete() } + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + return pkgs, nil } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -289,8 +355,8 @@ type iimporter struct { version int ipath string - localpkg *types.Package - insert func(pkg *types.Package, name string) // "shallow" mode only + shallow bool + reportf ReportFunc // if non-nil, used to report bugs stringData []byte stringCache map[uint64]string @@ -307,6 +373,12 @@ type iimporter struct { fake fakeFileSet interfaceList []*types.Interface + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + // Arguments for calls to SetConstraint that are deferred due to recursive types later []setConstraintArgs @@ -338,13 +410,9 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { - // In "shallow" mode, call back to the application to - // find the object and insert it into the package scope. - if p.insert != nil { - assert(pkg != p.localpkg) - p.insert(pkg, name) // "can't fail" - return - } + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. errorf("%v.%v not in index", pkg, name) } @@ -456,7 +524,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := rhs.(*types.Interface) + iface, _ := aliases.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -489,7 +557,7 @@ func (r *importReader) obj(name string) { r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) case 'F', 'G': - var tparams []*typeparams.TypeParam + var tparams []*types.TypeParam if tag == 'G' { tparams = r.tparamList() } @@ -506,7 +574,7 @@ func (r *importReader) obj(name string) { r.declare(obj) if tag == 'U' { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -521,14 +589,13 @@ func (r *importReader) obj(name string) { // If the receiver has any targs, set those as the // rparams of the method (since those are the // typeparams being used in the method sig/body). - base := baseType(recv.Type()) - assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -546,7 +613,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -558,11 +625,11 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := constraint.(*types.Interface) + iface, _ := aliases.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -711,7 +778,8 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { - if r.p.insert != nil { // shallow mode + if r.p.shallow { + // precise offsets are encoded only in shallow mode return r.posv2() } if r.p.version >= iexportVersionPosCol { @@ -764,7 +832,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) + _, ok := aliases.Unalias(t).(*types.Interface) return ok } @@ -812,13 +880,28 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + fpos := r.pos() fname := r.ident() ftyp := r.typ() emb := r.bool() tag := r.string() - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field tags[i] = tag } return types.NewStruct(fields, tags) @@ -834,6 +917,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods := make([]*types.Func, r.uint64()) for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + mpos := r.pos() mname := r.ident() @@ -843,9 +931,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if base != nil { recv = types.NewVar(token.NoPos, r.currPkg, "", base) } - msig := r.signature(recv, nil, nil) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method } typ := newInterface(methods, embeddeds) @@ -882,18 +973,21 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) return t case unionType: if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -901,23 +995,43 @@ func (r *importReader) kind() itag { return itag(r.uint64()) } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -964,13 +1078,3 @@ func (r *importReader) byte() byte { } return x } - -func baseType(typ types.Type) *types.Named { - // pointer receivers are never types.Named types - if p, _ := typ.(*types.Pointer); p != nil { - typ = p.Elem() - } - // receiver base types are always (possibly generic) types.Named types - n, _ := typ.(*types.Named) - return n -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go deleted file mode 100644 index d892273efb..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGo1_11 - -func additionalPredeclared() []types.Type { - return nil -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go index edbe6ea704..0cd3b91b65 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 - package gcimporter import "go/types" diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go index 286bf44548..38b624cada 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !(go1.18 && goexperiment.unified) -// +build !go1.18 !goexperiment.unified +//go:build !goexperiment.unified +// +build !goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go index b5d69ffbe6..b5118d0b3a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 && goexperiment.unified -// +build go1.18,goexperiment.unified +//go:build goexperiment.unified +// +build goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go deleted file mode 100644 index 8eb20729c2..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" -) - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11ce2..f4edc46ab7 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -4,16 +4,16 @@ // Derived from go/internal/gcimporter/ureader.go -//go:build go1.18 -// +build go1.18 - package gcimporter import ( + "fmt" "go/token" "go/types" + "sort" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" ) @@ -62,6 +62,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) @@ -121,6 +129,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st iface.Complete() } + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + pkg.MarkComplete() return pkg } @@ -260,39 +278,9 @@ func (r *reader) doPkg() *types.Package { pkg := types.NewPackage(path, name) r.p.imports[path] = pkg - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(flattenImports(imports)) - return pkg } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { - var res []*types.Package - seen := make(map[*types.Package]struct{}) - for _, pkg := range pkgs { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - - // pkg.Imports() is already flattened. - for _, pkg := range pkg.Imports() { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - } - } - return res -} - // @@@ Types func (r *reader) typ() types.Type { @@ -563,7 +551,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d50551693f..55312522dc 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,13 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "os/exec" + "reflect" "regexp" "runtime" "strconv" @@ -19,9 +22,10 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -51,9 +55,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -61,13 +75,19 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. +// Postcondition: both error results have same nilness. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -75,23 +95,24 @@ func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) } - event.Error(ctx, "Load concurrency error, will retry serially", err) - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { // Wait for 1 worker to become available. select { case <-ctx.Done(): - return nil, nil, nil, ctx.Err() + return nil, nil, ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: defer func() { <-runner.inFlight }() } @@ -101,6 +122,7 @@ func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { // Make sure the runner is always initialized. runner.initialize() @@ -109,7 +131,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // runPiped commands. select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.serialized <- struct{}{}: defer func() { <-runner.serialized }() } @@ -119,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde for i := 0; i < maxInFlight; i++ { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: // Make sure we always "return" any workers we took. defer func() { <-runner.inFlight }() @@ -152,6 +174,7 @@ type Invocation struct { Logf func(format string, args ...interface{}) } +// Postcondition: both error results have same nilness. func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { rawError = i.run(ctx, stdout, stderr) if rawError != nil { @@ -215,6 +238,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +264,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +278,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +364,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +384,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d474..446c5846a6 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 1684053226..8361515519 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -9,23 +9,27 @@ package gopathwalk import ( "bufio" "bytes" - "fmt" - "io/ioutil" - "log" + "io" + "io/fs" "os" "path/filepath" + "runtime" "strings" + "sync" "time" - - "golang.org/x/tools/internal/fastwalk" ) // Options controls the behavior of a Walk call. type Options struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool + + // Maximum number of concurrent calls to user-provided callbacks, + // or 0 for GOMAXPROCS. + Concurrency int } // RootType indicates the type of a Root. @@ -46,22 +50,28 @@ type Root struct { Type RootType } -// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// add will be called concurrently. +// +// Unlike filepath.WalkDir, Walk follows symbolic links +// (while guarding against cycles). func Walk(roots []Root, add func(root Root, dir string), opts Options) { WalkSkip(roots, add, func(Root, string) bool { return false }, opts) } -// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to +// find packages. +// +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// For each directory that will be scanned, skip will be called (concurrently) +// For each directory that will be scanned, skip will be called // with the absolute paths of the containing source directory and the directory. // If skip returns false on a directory it will be processed. -// add will be called concurrently. -// skip will be called concurrently. +// +// Unlike filepath.WalkDir, WalkSkip follows symbolic links +// (while guarding against cycles). func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { for _, root := range roots { walkDir(root, add, skip, opts) @@ -70,30 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if opts.Logf == nil { + opts.Logf = func(format string, args ...interface{}) {} + } if _, err := os.Stat(root.Path); os.IsNotExist(err) { - if opts.Logf != nil { - opts.Logf("skipping nonexistent directory: %v", root.Path) - } + opts.Logf("skipping nonexistent directory: %v", root.Path) return } start := time.Now() - if opts.Logf != nil { - opts.Logf("gopathwalk: scanning %s", root.Path) + opts.Logf("scanning %s", root.Path) + + concurrency := opts.Concurrency + if concurrency == 0 { + // The walk be either CPU-bound or I/O-bound, depending on what the + // caller-supplied add function does and the details of the user's platform + // and machine. Rather than trying to fine-tune the concurrency level for a + // specific environment, we default to GOMAXPROCS: it is likely to be a good + // choice for a CPU-bound add function, and if it is instead I/O-bound, then + // dealing with I/O saturation is arguably the job of the kernel and/or + // runtime. (Oversaturating I/O seems unlikely to harm performance as badly + // as failing to saturate would.) + concurrency = runtime.GOMAXPROCS(0) } w := &walker{ root: root, add: add, skip: skip, opts: opts, + sem: make(chan struct{}, concurrency), } w.init() - if err := fastwalk.Walk(root.Path, w.walk); err != nil { - log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) - } - if opts.Logf != nil { - opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + w.sem <- struct{}{} + path := root.Path + if path == "" { + path = "." + } + if fi, err := os.Lstat(path); err == nil { + w.walk(path, nil, fs.FileInfoToDirEntry(fi)) + } else { + w.opts.Logf("scanning directory %v: %v", root.Path, err) } + <-w.sem + w.walking.Wait() + + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) } // walker is the callback for fastwalk.Walk. @@ -103,7 +134,18 @@ type walker struct { skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. opts Options // Options passed to Walk by the user. - ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. + walking sync.WaitGroup + sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release. + ignoredDirs []string + + added sync.Map // map[string]bool +} + +// A symlinkList is a linked list of os.FileInfos for parent directories +// reached via symlinks. +type symlinkList struct { + info os.FileInfo + prev *symlinkList } // init initializes the walker based on its Options @@ -119,14 +161,8 @@ func (w *walker) init() { for _, p := range ignoredPaths { full := filepath.Join(w.root.Path, p) - if fi, err := os.Stat(full); err == nil { - w.ignoredDirs = append(w.ignoredDirs, fi) - if w.opts.Logf != nil { - w.opts.Logf("Directory added to ignore list: %s", full) - } - } else if w.opts.Logf != nil { - w.opts.Logf("Error statting ignored directory: %v", err) - } + w.ignoredDirs = append(w.ignoredDirs, full) + w.opts.Logf("Directory added to ignore list: %s", full) } } @@ -135,13 +171,11 @@ func (w *walker) init() { // The provided path is one of the $GOPATH entries with "src" appended. func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") - slurp, err := ioutil.ReadFile(file) - if w.opts.Logf != nil { - if err != nil { - w.opts.Logf("%v", err) - } else { - w.opts.Logf("Read %s", file) - } + slurp, err := os.ReadFile(file) + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) } if err != nil { return nil @@ -160,9 +194,9 @@ func (w *walker) getIgnoredDirs(path string) []string { } // shouldSkipDir reports whether the file should be skipped or not. -func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { +func (w *walker) shouldSkipDir(dir string) bool { for _, ignoredDir := range w.ignoredDirs { - if os.SameFile(fi, ignoredDir) { + if dir == ignoredDir { return true } } @@ -174,81 +208,130 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { } // walk walks through the given path. -func (w *walker) walk(path string, typ os.FileMode) error { - if typ.IsRegular() { - dir := filepath.Dir(path) - if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { - // Doesn't make sense to have regular files - // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.ErrSkipFiles - } - if !strings.HasSuffix(path, ".go") { - return nil +// +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored. +func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) { + if d.Type()&os.ModeSymlink != 0 { + // Walk the symlink's target rather than the symlink itself. + // + // (Note that os.Stat, unlike the lower-lever os.Readlink, + // follows arbitrarily many layers of symlinks, so it will eventually + // reach either a non-symlink or a nonexistent target.) + // + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil { + w.opts.Logf("%v", err) + return } - w.add(w.root, dir) - return fastwalk.ErrSkipFiles - } - if typ == os.ModeDir { - base := filepath.Base(path) - if base == "" || base[0] == '.' || base[0] == '_' || - base == "testdata" || - (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || - (!w.opts.ModulesEnabled && base == "node_modules") { - return filepath.SkipDir + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for parent := pathSymlinks; parent != nil; parent = parent.prev { + if os.SameFile(fi, parent.info) { + return + } } - fi, err := os.Lstat(path) - if err == nil && w.shouldSkipDir(fi, path) { - return filepath.SkipDir + + pathSymlinks = &symlinkList{ + info: fi, + prev: pathSymlinks, } - return nil + d = fs.FileInfoToDirEntry(fi) } - if typ == os.ModeSymlink { - base := filepath.Base(path) - if strings.HasPrefix(base, ".#") { - // Emacs noise. - return nil + + if d.Type().IsRegular() { + if !strings.HasSuffix(path, ".go") { + return } - if w.shouldTraverse(path) { - return fastwalk.ErrTraverseLink + + dir := filepath.Dir(path) + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + // + // TODO(bcmills): there are many levels of directory within + // RootModuleCache where this also wouldn't make sense, + // Can we generalize this to any directory without a corresponding + // import path? + return + } + + if _, dup := w.added.LoadOrStore(dir, true); !dup { + w.add(w.root, dir) } } - return nil -} -// shouldTraverse reports whether the symlink fi, found in dir, -// should be followed. It makes sure symlinks were never visited -// before to avoid symlink loops. -func (w *walker) shouldTraverse(path string) bool { - ts, err := os.Stat(path) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return false + if !d.IsDir() { + return } - if !ts.IsDir() { - return false + + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") || + w.shouldSkipDir(path) { + return } - if w.shouldSkipDir(ts, filepath.Dir(path)) { - return false + + // Read the directory and walk its entries. + + f, err := os.Open(path) + if err != nil { + w.opts.Logf("%v", err) + return } - // Check for symlink loops by statting each directory component - // and seeing if any are the same file as ts. + defer f.Close() + for { - parent := filepath.Dir(path) - if parent == path { - // Made it to the root without seeing a cycle. - // Use this symlink. - return true - } - parentInfo, err := os.Stat(parent) + // We impose an arbitrary limit on the number of ReadDir results per + // directory to limit the amount of memory consumed for stale or upcoming + // directory entries. The limit trades off CPU (number of syscalls to read + // the whole directory) against RAM (reachable directory entries other than + // the one currently being processed). + // + // Since we process the directories recursively, we will end up maintaining + // a slice of entries for each level of the directory tree. + // (Compare https://go.dev/issue/36197.) + ents, err := f.ReadDir(1024) if err != nil { - return false + if err != io.EOF { + w.opts.Logf("%v", err) + } + break } - if os.SameFile(ts, parentInfo) { - // Cycle. Don't traverse. - return false + + for _, d := range ents { + nextPath := filepath.Join(path, d.Name()) + if d.IsDir() { + select { + case w.sem <- struct{}{}: + // Got a new semaphore token, so we can traverse the directory concurrently. + d := d + w.walking.Add(1) + go func() { + defer func() { + <-w.sem + w.walking.Done() + }() + w.walk(nextPath, pathSymlinks, d) + }() + continue + + default: + // No tokens available, so traverse serially. + } + } + + w.walk(nextPath, pathSymlinks, d) } - path = parent } - } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 642a5ac2d7..6a18f63a44 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -13,6 +13,8 @@ import ( "go/build" "go/parser" "go/token" + "go/types" + "io/fs" "io/ioutil" "os" "path" @@ -26,6 +28,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -106,7 +109,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { considerTests := strings.HasSuffix(filename, "_test.go") fileBase := filepath.Base(filename) - packageFileInfos, err := ioutil.ReadDir(srcDir) + packageFileInfos, err := os.ReadDir(srcDir) if err != nil { return nil } @@ -252,7 +255,7 @@ type pass struct { otherFiles []*ast.File // sibling files. // Intermediate state, generated by load. - existingImports map[string]*ImportInfo + existingImports map[string][]*ImportInfo allRefs references missingRefs references @@ -317,7 +320,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { func (p *pass) load() ([]*ImportFix, bool) { p.knownPackages = map[string]*packageInfo{} p.missingRefs = references{} - p.existingImports = map[string]*ImportInfo{} + p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. p.allRefs = collectReferences(p.f) @@ -348,7 +351,7 @@ func (p *pass) load() ([]*ImportFix, bool) { } } for _, imp := range imports { - p.existingImports[p.importIdentifier(imp)] = imp + p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp) } // Find missing references. @@ -387,36 +390,45 @@ func (p *pass) fix() ([]*ImportFix, bool) { // Found everything, or giving up. Add the new imports and remove any unused. var fixes []*ImportFix - for _, imp := range p.existingImports { - // We deliberately ignore globals here, because we can't be sure - // they're in the same package. People do things like put multiple - // main packages in the same directory, and we don't want to - // remove imports if they happen to have the same name as a var in - // a different package. - if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { - fixes = append(fixes, &ImportFix{ - StmtInfo: *imp, - IdentName: p.importIdentifier(imp), - FixType: DeleteImport, - }) - continue - } + for _, identifierImports := range p.existingImports { + for _, imp := range identifierImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } - // An existing import may need to update its import name to be correct. - if name := p.importSpecName(imp); name != imp.Name { - fixes = append(fixes, &ImportFix{ - StmtInfo: ImportInfo{ - Name: name, - ImportPath: imp.ImportPath, - }, - IdentName: p.importIdentifier(imp), - FixType: SetImportName, - }) + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } } } + // Collecting fixes involved map iteration, so sort for stability. See + // golang/go#59976. + sortFixes(fixes) + // collect selected fixes in a separate slice, so that it can be sorted + // separately. Note that these fixes must occur after fixes to existing + // imports. TODO(rfindley): figure out why. + var selectedFixes []*ImportFix for _, imp := range selected { - fixes = append(fixes, &ImportFix{ + selectedFixes = append(selectedFixes, &ImportFix{ StmtInfo: ImportInfo{ Name: p.importSpecName(imp), ImportPath: imp.ImportPath, @@ -425,8 +437,25 @@ func (p *pass) fix() ([]*ImportFix, bool) { FixType: AddImport, }) } + sortFixes(selectedFixes) - return fixes, true + return append(fixes, selectedFixes...), true +} + +func sortFixes(fixes []*ImportFix) { + sort.Slice(fixes, func(i, j int) bool { + fi, fj := fixes[i], fixes[j] + if fi.StmtInfo.ImportPath != fj.StmtInfo.ImportPath { + return fi.StmtInfo.ImportPath < fj.StmtInfo.ImportPath + } + if fi.StmtInfo.Name != fj.StmtInfo.Name { + return fi.StmtInfo.Name < fj.StmtInfo.Name + } + if fi.IdentName != fj.IdentName { + return fi.IdentName < fj.IdentName + } + return fi.FixType < fj.FixType + }) } // importSpecName gets the import name of imp in the import spec. @@ -519,7 +548,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { var fixImports = fixImportsDefault func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { - fixes, err := getFixes(fset, f, filename, env) + fixes, err := getFixes(context.Background(), fset, f, filename, env) if err != nil { return err } @@ -529,7 +558,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. -func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { abs, err := filepath.Abs(filename) if err != nil { return nil, err @@ -583,7 +612,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // Go look for candidates in $GOPATH, etc. We don't necessarily load // the real exports of sibling imports, so keep assuming their contents. - if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { return nil, err } @@ -672,20 +701,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map return result, nil } -func PrimeCache(ctx context.Context, env *ProcessEnv) error { +func PrimeCache(ctx context.Context, resolver Resolver) error { // Fully scan the disk for directories, but don't actually read any Go files. callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true + rootFound: func(root gopathwalk.Root) bool { + // See getCandidatePkgs: walking GOROOT is apparently expensive and + // unnecessary. + return root.Type != gopathwalk.RootGOROOT }, dirFound: func(pkg *pkg) bool { return false }, - packageNameLoaded: func(pkg *pkg) bool { - return false - }, + // packageNameLoaded and exportsLoaded must never be called. } - return getCandidatePkgs(ctx, callback, "", "", env) + + return resolver.scan(ctx, callback) } func candidateImportName(pkg *pkg) string { @@ -799,16 +829,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} +// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate +// imports when doing cross-platform development. +var requiredGoEnvVars = []string{ + "GO111MODULE", + "GOFLAGS", + "GOINSECURE", + "GOMOD", + "GOMODCACHE", + "GONOPROXY", + "GONOSUMDB", + "GOPATH", + "GOPROXY", + "GOROOT", + "GOSUMDB", + "GOWORK", +} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. +// +// ...a ProcessEnv *also* overwrites its Env along with derived state in the +// form of the resolver. And because it is lazily initialized, an env may just +// be broken and unusable, but there is no way for the caller to detect that: +// all queries will just fail. +// +// TODO(rfindley): refactor this package so that this type (perhaps renamed to +// just Env or Config) is an immutable configuration struct, to be exchanged +// for an initialized object via a constructor that returns an error. Perhaps +// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where +// resolver is a concrete type used for resolving imports. Via this +// refactoring, we can avoid the need to call ProcessEnv.init and +// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this +// these are misused. Also, we'd delegate the caller the decision of how to +// handle a broken environment. type ProcessEnv struct { GocmdRunner *gocommand.Runner BuildFlags []string ModFlag string - ModFile string // SkipPathInScan returns true if the path should be skipped from scans of // the RootCurrentModule root type. The function argument is a clean, @@ -818,7 +877,7 @@ type ProcessEnv struct { // Env overrides the OS environment, and can be used to specify // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because // exec.Command will not honor it. - // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + // Specifying all of requiredGoEnvVars avoids a call to `go env`. Env map[string]string WorkingDir string @@ -826,9 +885,17 @@ type ProcessEnv struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) - initialized bool + // If set, ModCache holds a shared cache of directory info to use across + // multiple ProcessEnvs. + ModCache *DirInfoCache - resolver Resolver + initialized bool // see TODO above + + // resolver and resolverErr are lazily evaluated (see GetResolver). + // This is unclean, but see the big TODO in the docstring for ProcessEnv + // above: for now, we can't be sure that the ProcessEnv is fully initialized. + resolver Resolver + resolverErr error } func (e *ProcessEnv) goEnv() (map[string]string, error) { @@ -908,20 +975,31 @@ func (e *ProcessEnv) env() []string { } func (e *ProcessEnv) GetResolver() (Resolver, error) { - if e.resolver != nil { - return e.resolver, nil - } if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { - e.resolver = newGopathResolver(e) - return e.resolver, nil + + if e.resolver == nil && e.resolverErr == nil { + // TODO(rfindley): we should only use a gopathResolver here if the working + // directory is actually *in* GOPATH. (I seem to recall an open gopls issue + // for this behavior, but I can't find it). + // + // For gopls, we can optionally explicitly choose a resolver type, since we + // already know the view type. + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + e.resolver = newGopathResolver(e) + } else { + e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache) + } } - e.resolver = newModuleResolver(e) - return e.resolver, nil + + return e.resolver, e.resolverErr } +// buildContext returns the build.Context to use for matching files. +// +// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform +// development. func (e *ProcessEnv) buildContext() (*build.Context, error) { ctx := build.Default goenv, err := e.goEnv() @@ -1001,15 +1079,23 @@ func addStdlibCandidates(pass *pass, refs references) error { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. scoreImportPath(ctx context.Context, path string) float64 - ClearForNewScan() + // ClearForNewScan returns a new Resolver based on the receiver that has + // cleared its internal caches of directory contents. + // + // The new resolver should be primed and then set via + // [ProcessEnv.UpdateResolver]. + ClearForNewScan() Resolver } // A scanCallback controls a call to scan and receives its results. @@ -1031,7 +1117,10 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []string) } -func addExternalCandidates(pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -1089,7 +1178,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) if err != nil { firstErrOnce.Do(func() { @@ -1120,6 +1209,17 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { }() for result := range results { + // Don't offer completions that would shadow predeclared + // names, such as github.com/coreos/etcd/error. + if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + // Ideally we would skip this candidate only + // if the predeclared name is actually + // referenced by the file, but that's a lot + // trickier to compute and would still create + // an import that is likely to surprise the + // user before long. + continue + } pass.addCandidate(result.imp, result.pkg) } return firstErr @@ -1162,31 +1262,22 @@ func ImportPathToAssumedName(importPath string) string { type gopathResolver struct { env *ProcessEnv walked bool - cache *dirInfoCache + cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } func newGopathResolver(env *ProcessEnv) *gopathResolver { r := &gopathResolver{ - env: env, - cache: &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - }, + env: env, + cache: NewDirInfoCache(), scanSema: make(chan struct{}, 1), } r.scanSema <- struct{}{} return r } -func (r *gopathResolver) ClearForNewScan() { - <-r.scanSema - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - r.walked = false - r.scanSema <- struct{}{} +func (r *gopathResolver) ClearForNewScan() Resolver { + return newGopathResolver(r.env) } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { @@ -1441,11 +1532,11 @@ func VendorlessPath(ipath string) string { func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { // Look for non-test, buildable .go files which could provide exports. - all, err := ioutil.ReadDir(dir) + all, err := os.ReadDir(dir) if err != nil { return "", nil, err } - var files []os.FileInfo + var files []fs.DirEntry for _, fi := range all { name := fi.Name() if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { @@ -1507,7 +1598,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 95a88383a7..660407548e 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/format" @@ -23,6 +24,7 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. @@ -66,14 +68,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { return nil, err } - return getFixes(fileSet, file, filename, opt.Env) + return getFixes(ctx, fileSet, file, filename, opt.Env) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode @@ -231,7 +236,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast src = src[:len(src)-len("}\n")] // Gofmt has also indented the function body one level. // Remove that indent. - src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n")) return matchSpace(orig, src) } return file, adjust, nil diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 7d99d04ca8..3d0f38f6c2 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -19,53 +18,93 @@ import ( "strings" "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) -// ModuleResolver implements resolver for modules using the go command as little -// as feasible. +// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning +// as fast as possible, which is desirable for a call to goimports from the +// command line, but it doesn't work as well for gopls, where it suffers from +// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216), +// both caused by populating the cache, albeit in slightly different ways. +// +// A high level list of TODOs: +// - Optimize the scan itself, as there is some redundancy statting and +// reading go.mod files. +// - Invert the relationship between ProcessEnv and Resolver (see the +// docstring of ProcessEnv). +// - Make it easier to use an external resolver implementation. +// +// Smaller TODOs are annotated in the code below. + +// ModuleResolver implements the Resolver interface for a workspace using +// modules. +// +// A goal of the ModuleResolver is to invoke the Go command as little as +// possible. To this end, it runs the Go command only for listing module +// information (i.e. `go list -m -e -json ...`). Package scanning, the process +// of loading package information for the modules, is implemented internally +// via the scan method. +// +// It has two types of state: the state derived from the go command, which +// is populated by init, and the state derived from scans, which is populated +// via scan. A root is considered scanned if it has been walked to discover +// directories. However, if the scan did not require additional information +// from the directory (such as package name or exports), the directory +// information itself may be partially populated. It will be lazily filled in +// as needed by scans, using the scanCallback. type ModuleResolver struct { - env *ProcessEnv - moduleCacheDir string - dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. - roots []gopathwalk.Root - scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. - scannedRoots map[gopathwalk.Root]bool - - initialized bool - mains []*gocommand.ModuleJSON - mainByDir map[string]*gocommand.ModuleJSON - modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or Dir. - - // moduleCacheCache stores information about the module cache. - moduleCacheCache *dirInfoCache - otherCache *dirInfoCache + env *ProcessEnv + + // Module state, populated during construction + dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory + moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset + roots []gopathwalk.Root // roots to scan, in approximate order of importance + mains []*gocommand.ModuleJSON // main modules + mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots + modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path + modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir. + + // Scanning state, populated by scan + + // scanSema prevents concurrent scans, and guards scannedRoots and the cache + // fields below (though the caches themselves are concurrency safe). + // Receive to acquire, send to release. + scanSema chan struct{} + scannedRoots map[gopathwalk.Root]bool // if true, root has been walked + + // Caches of directory info, populated by scans and scan callbacks + // + // moduleCacheCache stores cached information about roots in the module + // cache, which are immutable and therefore do not need to be invalidated. + // + // otherCache stores information about all other roots (even GOROOT), which + // may change. + moduleCacheCache *DirInfoCache + otherCache *DirInfoCache } -func newModuleResolver(e *ProcessEnv) *ModuleResolver { +// newModuleResolver returns a new module-aware goimports resolver. +// +// Note: use caution when modifying this constructor: changes must also be +// reflected in ModuleResolver.ClearForNewScan. +func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) { r := &ModuleResolver{ env: e, scanSema: make(chan struct{}, 1), } - r.scanSema <- struct{}{} - return r -} - -func (r *ModuleResolver) init() error { - if r.initialized { - return nil - } + r.scanSema <- struct{}{} // release goenv, err := r.env.goEnv() if err != nil { - return err + return nil, err } + + // TODO(rfindley): can we refactor to share logic with r.env.invokeGo? inv := gocommand.Invocation{ BuildFlags: r.env.BuildFlags, ModFlag: r.env.ModFlag, - ModFile: r.env.ModFile, Env: r.env.env(), Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, @@ -77,9 +116,12 @@ func (r *ModuleResolver) init() error { // Module vendor directories are ignored in workspace mode: // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md if len(r.env.Env["GOWORK"]) == 0 { + // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but + // they should be available from the ProcessEnv. Can we avoid the redundant + // invocation? vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { - return err + return nil, err } } @@ -100,19 +142,14 @@ func (r *ModuleResolver) init() error { // GO111MODULE=on. Other errors are fatal. if err != nil { if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { - return err + return nil, err } } } - if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { - r.moduleCacheDir = gmc - } else { - gopaths := filepath.SplitList(goenv["GOPATH"]) - if len(gopaths) == 0 { - return fmt.Errorf("empty GOPATH") - } - r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + r.moduleCacheDir = gomodcacheForEnv(goenv) + if r.moduleCacheDir == "" { + return nil, fmt.Errorf("cannot resolve GOMODCACHE") } sort.Slice(r.modsByModPath, func(i, j int) bool { @@ -123,7 +160,7 @@ func (r *ModuleResolver) init() error { }) sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.modsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) } return count(j) < count(i) // descending order }) @@ -141,7 +178,11 @@ func (r *ModuleResolver) init() error { } else { addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { - // This is redundant with the cache, but we'll skip it cheaply enough. + // This is redundant with the cache, but we'll skip it cheaply enough + // when we encounter it in the module cache scan. + // + // Including it at a lower index in r.roots than the module cache dir + // helps prioritize matches from within existing dependencies. r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) } else { r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) @@ -158,24 +199,40 @@ func (r *ModuleResolver) init() error { addDep(mod) } } + // If provided, share the moduleCacheCache. + // + // TODO(rfindley): The module cache is immutable. However, the loaded + // exports do depend on GOOS and GOARCH. Fortunately, the + // ProcessEnv.buildContext does not adjust these from build.DefaultContext + // (even though it should). So for now, this is OK to share, but we need to + // add logic for handling GOOS/GOARCH. + r.moduleCacheCache = moduleCacheCache r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) } r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { - r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - } - if r.otherCache == nil { - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } + r.moduleCacheCache = NewDirInfoCache() } - r.initialized = true - return nil + r.otherCache = NewDirInfoCache() + return r, nil +} + +// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env +// map, which must have GOMODCACHE and GOPATH populated. +// +// TODO(rfindley): this is defensive refactoring. +// 1. Is this even relevant anymore? Can't we just read GOMODCACHE. +// 2. Use this to separate module cache scanning from other scanning. +func gomodcacheForEnv(goenv map[string]string) string { + if gmc := goenv["GOMODCACHE"]; gmc != "" { + return gmc + } + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return "" + } + return filepath.Join(gopaths[0], "/pkg/mod") } func (r *ModuleResolver) initAllMods() error { @@ -206,30 +263,82 @@ func (r *ModuleResolver) initAllMods() error { return nil } -func (r *ModuleResolver) ClearForNewScan() { - <-r.scanSema - r.scannedRoots = map[gopathwalk.Root]bool{} - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, +// ClearForNewScan invalidates the last scan. +// +// It preserves the set of roots, but forgets about the set of directories. +// Though it forgets the set of module cache directories, it remembers their +// contents, since they are assumed to be immutable. +func (r *ModuleResolver) ClearForNewScan() Resolver { + <-r.scanSema // acquire r, to guard scannedRoots + r2 := &ModuleResolver{ + env: r.env, + dummyVendorMod: r.dummyVendorMod, + moduleCacheDir: r.moduleCacheDir, + roots: r.roots, + mains: r.mains, + mainByDir: r.mainByDir, + modsByModPath: r.modsByModPath, + + scanSema: make(chan struct{}, 1), + scannedRoots: make(map[gopathwalk.Root]bool), + otherCache: NewDirInfoCache(), + moduleCacheCache: r.moduleCacheCache, } - r.scanSema <- struct{}{} + r2.scanSema <- struct{}{} // r2 must start released + // Invalidate root scans. We don't need to invalidate module cache roots, + // because they are immutable. + // (We don't support a use case where GOMODCACHE is cleaned in the middle of + // e.g. a gopls session: the user must restart gopls to get accurate + // imports.) + // + // Scanning for new directories in GOMODCACHE should be handled elsewhere, + // via a call to ScanModuleCache. + for _, root := range r.roots { + if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] { + r2.scannedRoots[root] = true + } + } + r.scanSema <- struct{}{} // release r + return r2 } -func (r *ModuleResolver) ClearForNewMod() { - <-r.scanSema - *r = ModuleResolver{ - env: r.env, - moduleCacheCache: r.moduleCacheCache, - otherCache: r.otherCache, - scanSema: r.scanSema, +// ClearModuleInfo invalidates resolver state that depends on go.mod file +// contents (essentially, the output of go list -m -json ...). +// +// Notably, it does not forget directory contents, which are reset +// asynchronously via ClearForNewScan. +// +// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op. +// +// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. +func (e *ProcessEnv) ClearModuleInfo() { + if r, ok := e.resolver.(*ModuleResolver); ok { + resolver, resolverErr := newModuleResolver(e, e.ModCache) + if resolverErr == nil { + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + } + e.resolver = resolver + e.resolverErr = resolverErr } - r.init() - r.scanSema <- struct{}{} } -// findPackage returns the module and directory that contains the package at -// the given import path, or returns nil, "" if no module is in scope. +// UpdateResolver sets the resolver for the ProcessEnv to use in imports +// operations. Only for use with the result of [Resolver.ClearForNewScan]. +// +// TODO(rfindley): this awkward API is a result of the (arguably) inverted +// relationship between configuration and state described in the doc comment +// for [ProcessEnv]. +func (e *ProcessEnv) UpdateResolver(r Resolver) { + e.resolver = r + e.resolverErr = nil +} + +// findPackage returns the module and directory from within the main modules +// and their dependencies that contains the package at the given import path, +// or returns nil, "" if no module is in scope. func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. @@ -264,7 +373,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, } // Not cached. Read the filesystem. - pkgFiles, err := ioutil.ReadDir(pkgDir) + pkgFiles, err := os.ReadDir(pkgDir) if err != nil { continue } @@ -295,10 +404,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { } } -func (r *ModuleResolver) cacheKeys() []string { - return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) -} - // cachePackageName caches the package name for a dir already in the cache. func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { @@ -327,6 +432,10 @@ func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue @@ -363,15 +472,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON return modDir != mod.Dir } -func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { - readModName := func(modFile string) string { - modBytes, err := ioutil.ReadFile(modFile) - if err != nil { - return "" - } - return modulePath(modBytes) +func readModName(modFile string) string { + modBytes, err := os.ReadFile(modFile) + if err != nil { + return "" } + return modulePath(modBytes) +} +func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) { if r.dirInModuleCache(dir) { if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { index := strings.Index(dir, matches[1]+"@"+matches[2]) @@ -405,11 +514,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool { } func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if err := r.init(); err != nil { - return nil, err - } names := map[string]string{} for _, path := range importPaths { + // TODO(rfindley): shouldn't this use the dirInfoCache? _, packageDir := r.findPackage(path) if packageDir == "" { continue @@ -424,9 +531,8 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( } func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { - if err := r.init(); err != nil { - return err - } + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() processDir := func(info directoryPackageInfo) { // Skip this directory if we were not able to get the package information successfully. @@ -437,18 +543,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error if err != nil { return } - if !callback.dirFound(pkg) { return } + pkg.packageName, err = r.cachePackageName(info) if err != nil { return } - if !callback.packageNameLoaded(pkg) { return } + _, exports, err := r.loadExports(ctx, pkg, false) if err != nil { return @@ -487,7 +593,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return packageScanned } - // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { r.cacheStore(r.scanDirForPackage(root, dir)) } @@ -502,9 +607,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error select { case <-ctx.Done(): return - case <-r.scanSema: + case <-r.scanSema: // acquire } - defer func() { r.scanSema <- struct{}{} }() + defer func() { r.scanSema <- struct{}{} }() // release // We have the lock on r.scannedRoots, and no other scans can run. for _, root := range roots { if ctx.Err() != nil { @@ -606,9 +711,6 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { } func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { - if err := r.init(); err != nil { - return "", nil, err - } if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 18dada495c..cfc5465765 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -7,12 +7,16 @@ package imports import ( "context" "fmt" + "path" + "path/filepath" + "strings" "sync" + "golang.org/x/mod/module" "golang.org/x/tools/internal/gopathwalk" ) -// To find packages to import, the resolver needs to know about all of the +// To find packages to import, the resolver needs to know about all of // the packages that could be imported. This includes packages that are // already in modules that are in (1) the current module, (2) replace targets, // and (3) packages in the module cache. Packages in (1) and (2) may change over @@ -39,6 +43,8 @@ const ( exportsLoaded ) +// directoryPackageInfo holds (possibly incomplete) information about packages +// contained in a given directory. type directoryPackageInfo struct { // status indicates the extent to which this struct has been filled in. status directoryPackageStatus @@ -63,7 +69,10 @@ type directoryPackageInfo struct { packageName string // the package name, as declared in the source. // Set when status >= exportsLoaded. - + // TODO(rfindley): it's hard to see this, but exports depend implicitly on + // the default build context GOOS and GOARCH. + // + // We can make this explicit, and key exports by GOOS, GOARCH. exports []string } @@ -79,7 +88,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( return true, nil } -// dirInfoCache is a concurrency safe map for storing information about +// DirInfoCache is a concurrency-safe map for storing information about // directories that may contain packages. // // The information in this cache is built incrementally. Entries are initialized in scan. @@ -92,21 +101,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( // The information in the cache is not expected to change for the cache's // lifetime, so there is no protection against competing writes. Users should // take care not to hold the cache across changes to the underlying files. -// -// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) -type dirInfoCache struct { +type DirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. dirs map[string]*directoryPackageInfo listeners map[*int]cacheListener } +func NewDirInfoCache() *DirInfoCache { + return &DirInfoCache{ + dirs: make(map[string]*directoryPackageInfo), + listeners: make(map[*int]cacheListener), + } +} + type cacheListener func(directoryPackageInfo) // ScanAndListen calls listener on all the items in the cache, and on anything // newly added. The returned stop function waits for all in-flight callbacks to // finish and blocks new ones. -func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { +func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { ctx, cancel := context.WithCancel(ctx) // Flushing out all the callbacks is tricky without knowing how many there @@ -162,8 +176,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener } // Store stores the package info for dir. -func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { +func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() + // TODO(rfindley, golang/go#59216): should we overwrite an existing entry? + // That seems incorrect as the cache should be idempotent. _, old := d.dirs[dir] d.dirs[dir] = &info var listeners []cacheListener @@ -180,7 +196,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { } // Load returns a copy of the directoryPackageInfo for absolute directory dir. -func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { +func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) { d.mu.Lock() defer d.mu.Unlock() info, ok := d.dirs[dir] @@ -191,7 +207,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { } // Keys returns the keys currently present in d. -func (d *dirInfoCache) Keys() (keys []string) { +func (d *DirInfoCache) Keys() (keys []string) { d.mu.Lock() defer d.mu.Unlock() for key := range d.dirs { @@ -200,7 +216,7 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { +func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { return info.packageName, err } @@ -213,7 +229,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro return info.packageName, info.err } -func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { if reached, _ := info.reachedStatus(exportsLoaded); reached { return info.packageName, info.exports, info.err } @@ -234,3 +250,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d d.Store(info.dir, info) return info.packageName, info.exports, info.err } + +// ScanModuleCache walks the given directory, which must be a GOMODCACHE value, +// for directory package information, storing the results in cache. +func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) { + // Note(rfindley): it's hard to see, but this function attempts to implement + // just the side effects on cache of calling PrimeCache with a ProcessEnv + // that has the given dir as its GOMODCACHE. + // + // Teasing out the control flow, we see that we can avoid any handling of + // vendor/ and can infer module info entirely from the path, simplifying the + // logic here. + + root := gopathwalk.Root{ + Path: filepath.Clean(dir), + Type: gopathwalk.RootModuleCache, + } + + directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo { + // This is a copy of ModuleResolver.scanDirForPackage, trimmed down to + // logic that applies to a module cache directory. + + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if logf != nil { + logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath := path.Join(modPath, filepath.ToSlash(matches[3])) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + modName := readModName(filepath.Join(modDir, "go.mod")) + return directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + } + + add := func(root gopathwalk.Root, dir string) { + info := directoryInfo(root, dir) + cache.Store(info.dir, info) + } + + skip := func(_ gopathwalk.Root, dir string) bool { + // Skip directories that have already been scanned. + // + // Note that gopathwalk only adds "package" directories, which must contain + // a .go file, and all such package directories in the module cache are + // immutable. So if we can load a dir, it can be skipped. + info, ok := cache.Load(dir) + if !ok { + return false + } + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true}) +} diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 31a75949cd..8db24df2ff 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -93,6 +93,7 @@ var stdlib = map[string][]string{ "Compare", "Contains", "ContainsAny", + "ContainsFunc", "ContainsRune", "Count", "Cut", @@ -147,6 +148,12 @@ var stdlib = map[string][]string{ "TrimSpace", "TrimSuffix", }, + "cmp": { + "Compare", + "Less", + "Or", + "Ordered", + }, "compress/bzip2": { "NewReader", "StructuralError", @@ -228,6 +235,7 @@ var stdlib = map[string][]string{ "Ring", }, "context": { + "AfterFunc", "Background", "CancelCauseFunc", "CancelFunc", @@ -239,8 +247,11 @@ var stdlib = map[string][]string{ "WithCancel", "WithCancelCause", "WithDeadline", + "WithDeadlineCause", "WithTimeout", + "WithTimeoutCause", "WithValue", + "WithoutCancel", }, "crypto": { "BLAKE2b_256", @@ -445,6 +456,7 @@ var stdlib = map[string][]string{ "XORBytes", }, "crypto/tls": { + "AlertError", "Certificate", "CertificateRequestInfo", "CertificateVerificationError", @@ -476,6 +488,7 @@ var stdlib = map[string][]string{ "LoadX509KeyPair", "NewLRUClientSessionCache", "NewListener", + "NewResumptionState", "NoClientCert", "PKCS1WithSHA1", "PKCS1WithSHA256", @@ -484,6 +497,27 @@ var stdlib = map[string][]string{ "PSSWithSHA256", "PSSWithSHA384", "PSSWithSHA512", + "ParseSessionState", + "QUICClient", + "QUICConfig", + "QUICConn", + "QUICEncryptionLevel", + "QUICEncryptionLevelApplication", + "QUICEncryptionLevelEarly", + "QUICEncryptionLevelHandshake", + "QUICEncryptionLevelInitial", + "QUICEvent", + "QUICEventKind", + "QUICHandshakeDone", + "QUICNoEvent", + "QUICRejectedEarlyData", + "QUICServer", + "QUICSessionTicketOptions", + "QUICSetReadSecret", + "QUICSetWriteSecret", + "QUICTransportParameters", + "QUICTransportParametersRequired", + "QUICWriteData", "RecordHeaderError", "RenegotiateFreelyAsClient", "RenegotiateNever", @@ -493,6 +527,7 @@ var stdlib = map[string][]string{ "RequireAndVerifyClientCert", "RequireAnyClientCert", "Server", + "SessionState", "SignatureScheme", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", @@ -523,6 +558,7 @@ var stdlib = map[string][]string{ "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_RC4_128_SHA", "VerifyClientCertIfGiven", + "VersionName", "VersionSSL30", "VersionTLS10", "VersionTLS11", @@ -597,6 +633,8 @@ var stdlib = map[string][]string{ "NameMismatch", "NewCertPool", "NotAuthorizedToSign", + "OID", + "OIDFromInts", "PEMCipher", "PEMCipher3DES", "PEMCipherAES128", @@ -618,6 +656,7 @@ var stdlib = map[string][]string{ "PureEd25519", "RSA", "RevocationList", + "RevocationListEntry", "SHA1WithRSA", "SHA256WithRSA", "SHA256WithRSAPSS", @@ -670,6 +709,7 @@ var stdlib = map[string][]string{ "LevelWriteCommitted", "Named", "NamedArg", + "Null", "NullBool", "NullByte", "NullFloat64", @@ -1002,10 +1042,42 @@ var stdlib = map[string][]string{ "COMPRESS_LOOS", "COMPRESS_LOPROC", "COMPRESS_ZLIB", + "COMPRESS_ZSTD", "Chdr32", "Chdr64", "Class", "CompressionType", + "DF_1_CONFALT", + "DF_1_DIRECT", + "DF_1_DISPRELDNE", + "DF_1_DISPRELPND", + "DF_1_EDITED", + "DF_1_ENDFILTEE", + "DF_1_GLOBAL", + "DF_1_GLOBAUDIT", + "DF_1_GROUP", + "DF_1_IGNMULDEF", + "DF_1_INITFIRST", + "DF_1_INTERPOSE", + "DF_1_KMOD", + "DF_1_LOADFLTR", + "DF_1_NOCOMMON", + "DF_1_NODEFLIB", + "DF_1_NODELETE", + "DF_1_NODIRECT", + "DF_1_NODUMP", + "DF_1_NOHDR", + "DF_1_NOKSYMS", + "DF_1_NOOPEN", + "DF_1_NORELOC", + "DF_1_NOW", + "DF_1_ORIGIN", + "DF_1_PIE", + "DF_1_SINGLETON", + "DF_1_STUB", + "DF_1_SYMINTPOSE", + "DF_1_TRANS", + "DF_1_WEAKFILTER", "DF_BIND_NOW", "DF_ORIGIN", "DF_STATIC_TLS", @@ -1144,6 +1216,7 @@ var stdlib = map[string][]string{ "Dyn32", "Dyn64", "DynFlag", + "DynFlag1", "DynTag", "EI_ABIVERSION", "EI_CLASS", @@ -1852,6 +1925,7 @@ var stdlib = map[string][]string{ "R_LARCH_32", "R_LARCH_32_PCREL", "R_LARCH_64", + "R_LARCH_64_PCREL", "R_LARCH_ABS64_HI12", "R_LARCH_ABS64_LO20", "R_LARCH_ABS_HI20", @@ -1859,12 +1933,17 @@ var stdlib = map[string][]string{ "R_LARCH_ADD16", "R_LARCH_ADD24", "R_LARCH_ADD32", + "R_LARCH_ADD6", "R_LARCH_ADD64", "R_LARCH_ADD8", + "R_LARCH_ADD_ULEB128", + "R_LARCH_ALIGN", "R_LARCH_B16", "R_LARCH_B21", "R_LARCH_B26", + "R_LARCH_CFA", "R_LARCH_COPY", + "R_LARCH_DELETE", "R_LARCH_GNU_VTENTRY", "R_LARCH_GNU_VTINHERIT", "R_LARCH_GOT64_HI12", @@ -1884,6 +1963,7 @@ var stdlib = map[string][]string{ "R_LARCH_PCALA64_LO20", "R_LARCH_PCALA_HI20", "R_LARCH_PCALA_LO12", + "R_LARCH_PCREL20_S2", "R_LARCH_RELATIVE", "R_LARCH_RELAX", "R_LARCH_SOP_ADD", @@ -1914,8 +1994,10 @@ var stdlib = map[string][]string{ "R_LARCH_SUB16", "R_LARCH_SUB24", "R_LARCH_SUB32", + "R_LARCH_SUB6", "R_LARCH_SUB64", "R_LARCH_SUB8", + "R_LARCH_SUB_ULEB128", "R_LARCH_TLS_DTPMOD32", "R_LARCH_TLS_DTPMOD64", "R_LARCH_TLS_DTPREL32", @@ -1966,6 +2048,7 @@ var stdlib = map[string][]string{ "R_MIPS_LO16", "R_MIPS_NONE", "R_MIPS_PC16", + "R_MIPS_PC32", "R_MIPS_PJUMP", "R_MIPS_REL16", "R_MIPS_REL32", @@ -2111,6 +2194,7 @@ var stdlib = map[string][]string{ "R_PPC64_REL16_LO", "R_PPC64_REL24", "R_PPC64_REL24_NOTOC", + "R_PPC64_REL24_P9NOTOC", "R_PPC64_REL30", "R_PPC64_REL32", "R_PPC64_REL64", @@ -2848,6 +2932,7 @@ var stdlib = map[string][]string{ "MaxVarintLen16", "MaxVarintLen32", "MaxVarintLen64", + "NativeEndian", "PutUvarint", "PutVarint", "Read", @@ -2881,6 +2966,8 @@ var stdlib = map[string][]string{ "RegisterName", }, "encoding/hex": { + "AppendDecode", + "AppendEncode", "Decode", "DecodeString", "DecodedLen", @@ -2963,6 +3050,7 @@ var stdlib = map[string][]string{ }, "errors": { "As", + "ErrUnsupported", "Is", "Join", "New", @@ -2989,6 +3077,7 @@ var stdlib = map[string][]string{ "Arg", "Args", "Bool", + "BoolFunc", "BoolVar", "CommandLine", "ContinueOnError", @@ -3119,6 +3208,7 @@ var stdlib = map[string][]string{ "Inspect", "InterfaceType", "IsExported", + "IsGenerated", "KeyValueExpr", "LabeledStmt", "Lbl", @@ -3159,6 +3249,7 @@ var stdlib = map[string][]string{ "TypeSpec", "TypeSwitchStmt", "UnaryExpr", + "Unparen", "ValueSpec", "Var", "Visitor", @@ -3169,6 +3260,7 @@ var stdlib = map[string][]string{ "ArchChar", "Context", "Default", + "Directive", "FindOnly", "IgnoreVendor", "Import", @@ -3184,6 +3276,7 @@ var stdlib = map[string][]string{ "go/build/constraint": { "AndExpr", "Expr", + "GoVersion", "IsGoBuild", "IsPlusBuild", "NotExpr", @@ -3416,6 +3509,7 @@ var stdlib = map[string][]string{ "XOR_ASSIGN", }, "go/types": { + "Alias", "ArgumentError", "Array", "AssertableTo", @@ -3483,6 +3577,7 @@ var stdlib = map[string][]string{ "MethodVal", "MissingMethod", "Named", + "NewAlias", "NewArray", "NewChan", "NewChecker", @@ -3551,6 +3646,7 @@ var stdlib = map[string][]string{ "Uint64", "Uint8", "Uintptr", + "Unalias", "Union", "Universe", "Unsafe", @@ -3567,6 +3663,11 @@ var stdlib = map[string][]string{ "WriteSignature", "WriteType", }, + "go/version": { + "Compare", + "IsValid", + "Lang", + }, "hash": { "Hash", "Hash32", @@ -3626,6 +3727,7 @@ var stdlib = map[string][]string{ "ErrBadHTML", "ErrBranchEnd", "ErrEndContext", + "ErrJSTemplate", "ErrNoSuchTemplate", "ErrOutputContext", "ErrPartialCharset", @@ -3870,6 +3972,8 @@ var stdlib = map[string][]string{ "FileInfo", "FileInfoToDirEntry", "FileMode", + "FormatDirEntry", + "FormatFileInfo", "Glob", "GlobFS", "ModeAppend", @@ -3942,6 +4046,79 @@ var stdlib = map[string][]string{ "SetPrefix", "Writer", }, + "log/slog": { + "Any", + "AnyValue", + "Attr", + "Bool", + "BoolValue", + "Debug", + "DebugContext", + "Default", + "Duration", + "DurationValue", + "Error", + "ErrorContext", + "Float64", + "Float64Value", + "Group", + "GroupValue", + "Handler", + "HandlerOptions", + "Info", + "InfoContext", + "Int", + "Int64", + "Int64Value", + "IntValue", + "JSONHandler", + "Kind", + "KindAny", + "KindBool", + "KindDuration", + "KindFloat64", + "KindGroup", + "KindInt64", + "KindLogValuer", + "KindString", + "KindTime", + "KindUint64", + "Level", + "LevelDebug", + "LevelError", + "LevelInfo", + "LevelKey", + "LevelVar", + "LevelWarn", + "Leveler", + "Log", + "LogAttrs", + "LogValuer", + "Logger", + "MessageKey", + "New", + "NewJSONHandler", + "NewLogLogger", + "NewRecord", + "NewTextHandler", + "Record", + "SetDefault", + "SetLogLoggerLevel", + "Source", + "SourceKey", + "String", + "StringValue", + "TextHandler", + "Time", + "TimeKey", + "TimeValue", + "Uint64", + "Uint64Value", + "Value", + "Warn", + "WarnContext", + "With", + }, "log/syslog": { "Dial", "LOG_ALERT", @@ -3977,6 +4154,13 @@ var stdlib = map[string][]string{ "Priority", "Writer", }, + "maps": { + "Clone", + "Copy", + "DeleteFunc", + "Equal", + "EqualFunc", + }, "math": { "Abs", "Acos", @@ -4209,6 +4393,35 @@ var stdlib = map[string][]string{ "Uint64", "Zipf", }, + "math/rand/v2": { + "ChaCha8", + "ExpFloat64", + "Float32", + "Float64", + "Int", + "Int32", + "Int32N", + "Int64", + "Int64N", + "IntN", + "N", + "New", + "NewChaCha8", + "NewPCG", + "NewZipf", + "NormFloat64", + "PCG", + "Perm", + "Rand", + "Shuffle", + "Source", + "Uint32", + "Uint32N", + "Uint64", + "Uint64N", + "UintN", + "Zipf", + }, "mime": { "AddExtensionType", "BEncoding", @@ -4371,6 +4584,7 @@ var stdlib = map[string][]string{ "ErrNoLocation", "ErrNotMultipart", "ErrNotSupported", + "ErrSchemeMismatch", "ErrServerClosed", "ErrShortBody", "ErrSkipAltProtocol", @@ -4381,6 +4595,7 @@ var stdlib = map[string][]string{ "FS", "File", "FileServer", + "FileServerFS", "FileSystem", "Flusher", "Get", @@ -4407,6 +4622,7 @@ var stdlib = map[string][]string{ "MethodPut", "MethodTrace", "NewFileTransport", + "NewFileTransportFS", "NewRequest", "NewRequestWithContext", "NewResponseController", @@ -4440,6 +4656,7 @@ var stdlib = map[string][]string{ "Serve", "ServeContent", "ServeFile", + "ServeFileFS", "ServeMux", "ServeTLS", "Server", @@ -4947,6 +5164,7 @@ var stdlib = map[string][]string{ "StructTag", "Swapper", "Type", + "TypeFor", "TypeOf", "Uint", "Uint16", @@ -5084,6 +5302,8 @@ var stdlib = map[string][]string{ "NumCPU", "NumCgoCall", "NumGoroutine", + "PanicNilError", + "Pinner", "ReadMemStats", "ReadTrace", "SetBlockProfileRate", @@ -5172,6 +5392,38 @@ var stdlib = map[string][]string{ "Task", "WithRegion", }, + "slices": { + "BinarySearch", + "BinarySearchFunc", + "Clip", + "Clone", + "Compact", + "CompactFunc", + "Compare", + "CompareFunc", + "Concat", + "Contains", + "ContainsFunc", + "Delete", + "DeleteFunc", + "Equal", + "EqualFunc", + "Grow", + "Index", + "IndexFunc", + "Insert", + "IsSorted", + "IsSortedFunc", + "Max", + "MaxFunc", + "Min", + "MinFunc", + "Replace", + "Reverse", + "Sort", + "SortFunc", + "SortStableFunc", + }, "sort": { "Find", "Float64Slice", @@ -5242,6 +5494,7 @@ var stdlib = map[string][]string{ "Compare", "Contains", "ContainsAny", + "ContainsFunc", "ContainsRune", "Count", "Cut", @@ -5299,6 +5552,9 @@ var stdlib = map[string][]string{ "Mutex", "NewCond", "Once", + "OnceFunc", + "OnceValue", + "OnceValues", "Pool", "RWMutex", "WaitGroup", @@ -9135,10 +9391,12 @@ var stdlib = map[string][]string{ "SYS_AIO_CANCEL", "SYS_AIO_ERROR", "SYS_AIO_FSYNC", + "SYS_AIO_MLOCK", "SYS_AIO_READ", "SYS_AIO_RETURN", "SYS_AIO_SUSPEND", "SYS_AIO_SUSPEND_NOCANCEL", + "SYS_AIO_WAITCOMPLETE", "SYS_AIO_WRITE", "SYS_ALARM", "SYS_ARCH_PRCTL", @@ -9368,6 +9626,7 @@ var stdlib = map[string][]string{ "SYS_GET_MEMPOLICY", "SYS_GET_ROBUST_LIST", "SYS_GET_THREAD_AREA", + "SYS_GSSD_SYSCALL", "SYS_GTTY", "SYS_IDENTITYSVC", "SYS_IDLE", @@ -9411,8 +9670,24 @@ var stdlib = map[string][]string{ "SYS_KLDSYM", "SYS_KLDUNLOAD", "SYS_KLDUNLOADF", + "SYS_KMQ_NOTIFY", + "SYS_KMQ_OPEN", + "SYS_KMQ_SETATTR", + "SYS_KMQ_TIMEDRECEIVE", + "SYS_KMQ_TIMEDSEND", + "SYS_KMQ_UNLINK", "SYS_KQUEUE", "SYS_KQUEUE1", + "SYS_KSEM_CLOSE", + "SYS_KSEM_DESTROY", + "SYS_KSEM_GETVALUE", + "SYS_KSEM_INIT", + "SYS_KSEM_OPEN", + "SYS_KSEM_POST", + "SYS_KSEM_TIMEDWAIT", + "SYS_KSEM_TRYWAIT", + "SYS_KSEM_UNLINK", + "SYS_KSEM_WAIT", "SYS_KTIMER_CREATE", "SYS_KTIMER_DELETE", "SYS_KTIMER_GETOVERRUN", @@ -9504,11 +9779,14 @@ var stdlib = map[string][]string{ "SYS_NFSSVC", "SYS_NFSTAT", "SYS_NICE", + "SYS_NLM_SYSCALL", "SYS_NLSTAT", "SYS_NMOUNT", "SYS_NSTAT", "SYS_NTP_ADJTIME", "SYS_NTP_GETTIME", + "SYS_NUMA_GETAFFINITY", + "SYS_NUMA_SETAFFINITY", "SYS_OABI_SYSCALL_BASE", "SYS_OBREAK", "SYS_OLDFSTAT", @@ -9891,6 +10169,7 @@ var stdlib = map[string][]string{ "SYS___ACL_SET_FD", "SYS___ACL_SET_FILE", "SYS___ACL_SET_LINK", + "SYS___CAP_RIGHTS_GET", "SYS___CLONE", "SYS___DISABLE_THREADSIGNAL", "SYS___GETCWD", @@ -10574,6 +10853,7 @@ var stdlib = map[string][]string{ "Short", "T", "TB", + "Testing", "Verbose", }, "testing/fstest": { @@ -10603,6 +10883,10 @@ var stdlib = map[string][]string{ "SetupError", "Value", }, + "testing/slogtest": { + "Run", + "TestHandler", + }, "text/scanner": { "Char", "Comment", @@ -10826,6 +11110,7 @@ var stdlib = map[string][]string{ "Cs", "Cuneiform", "Cypriot", + "Cypro_Minoan", "Cyrillic", "Dash", "Deprecated", @@ -10889,6 +11174,7 @@ var stdlib = map[string][]string{ "Kaithi", "Kannada", "Katakana", + "Kawi", "Kayah_Li", "Kharoshthi", "Khitan_Small_Script", @@ -10943,6 +11229,7 @@ var stdlib = map[string][]string{ "Myanmar", "N", "Nabataean", + "Nag_Mundari", "Nandinagari", "Nd", "New_Tai_Lue", @@ -10964,6 +11251,7 @@ var stdlib = map[string][]string{ "Old_Sogdian", "Old_South_Arabian", "Old_Turkic", + "Old_Uyghur", "Oriya", "Osage", "Osmanya", @@ -11038,6 +11326,7 @@ var stdlib = map[string][]string{ "Tai_Viet", "Takri", "Tamil", + "Tangsa", "Tangut", "Telugu", "Terminal_Punctuation", @@ -11052,6 +11341,7 @@ var stdlib = map[string][]string{ "ToLower", "ToTitle", "ToUpper", + "Toto", "TurkishCase", "Ugaritic", "Unified_Ideograph", @@ -11061,6 +11351,7 @@ var stdlib = map[string][]string{ "Vai", "Variation_Selector", "Version", + "Vithkuqi", "Wancho", "Warang_Citi", "White_Space", diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f0b..44719de173 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f29..ff9437a36c 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -32,28 +34,104 @@ func GetLines(file *token.File) []int { lines []int _ []struct{} } - type tokenFile118 struct { - _ *token.FileSet // deleted in go1.19 - tokenFile119 + + if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { + panic("unexpected token.File size") } + var ptr *tokenFile119 + type uP = unsafe.Pointer + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines +} + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int type uP = unsafe.Pointer - switch unsafe.Sizeof(*file) { - case unsafe.Sizeof(tokenFile118{}): - var ptr *tokenFile118 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - case unsafe.Sizeof(tokenFile119{}): - var ptr *tokenFile119 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - default: - panic("unexpected token.File size") + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet } diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d30..8c3a42dc31 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -2,20 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural // restrictions on a type parameter. // // An external version of these APIs is available in the @@ -23,9 +13,13 @@ package typeparams import ( + "fmt" "go/ast" "go/token" "go/types" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) // UnpackIndexExpr extracts data from AST nodes that represent index @@ -41,7 +35,7 @@ func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Ex switch e := n.(type) { case *ast.IndexExpr: return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: + case *ast.IndexListExpr: return e.X, e.Lbrack, e.Indices, e.Rbrack } return nil, token.NoPos, nil, token.NoPos @@ -62,7 +56,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke Rbrack: rbrack, } default: - return &IndexListExpr{ + return &ast.IndexListExpr{ X: x, Lbrack: lbrack, Indices: indices, @@ -71,9 +65,9 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke } } -// IsTypeParam reports whether t is a type parameter. +// IsTypeParam reports whether t is a type parameter (or an alias of one). func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) + _, ok := aliases.Unalias(t).(*types.TypeParam) return ok } @@ -87,25 +81,44 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { + _, named := typesinternal.ReceiverNamed(recv) + if named == nil { // Receiver is a *types.Interface. return fn } - if ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { // Receiver base has no type parameters, so we can avoid the lookup below. return fn } - orig := NamedTypeOrigin(named) + orig := named.Origin() gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + + // In golang/go#61196, we observe another crash, this time inexplicable. + if gfn == nil { + panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) + } + return gfn.(*types.Func) } @@ -132,7 +145,10 @@ func OriginMethod(fn *types.Func) *types.Func { // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = aliases.Unalias(V) + T = aliases.Unalias(T) + // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. @@ -142,9 +158,9 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { return types.AssignableTo(V, T) } - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { return types.AssignableTo(V, T) } @@ -157,7 +173,7 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { // Minor optimization: ensure we share a context across the two // instantiations below. if ctxt == nil { - ctxt = NewContext() + ctxt = types.NewContext() } var targs []types.Type @@ -165,12 +181,12 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { targs = append(targs, vtparams.At(i)) } - vinst, err := Instantiate(ctxt, V, targs, true) + vinst, err := types.Instantiate(ctxt, V, targs, true) if err != nil { panic("type parameters should satisfy their own constraints") } - tinst, err := Instantiate(ctxt, T, targs, true) + tinst, err := types.Instantiate(ctxt, T, targs, true) if err != nil { return false } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec90..e66e9d0f48 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -5,7 +5,10 @@ package typeparams import ( + "fmt" "go/types" + + "golang.org/x/tools/internal/aliases" ) // CoreType returns the core type of T or nil if T does not have a core type. @@ -81,13 +84,13 @@ func CoreType(T types.Type) types.Type { // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, @@ -108,15 +111,27 @@ func CoreType(T types.Type) types.Type { // // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { - switch typ := typ.(type) { - case *TypeParam: +func _NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := aliases.Unalias(typ).(type) { + case *types.TypeParam: return StructuralTerms(typ) - case *Union: + case *types.Union: return UnionTermSet(typ) case *types.Interface: return InterfaceTermSet(typ) default: - return []*Term{NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() } + panic(fmt.Sprintf("%v is not a pointer", t)) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390e1..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d67148823c..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 9c631b6512..93c80fdc96 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -60,7 +60,7 @@ var ErrEmptyTypeSet = errors.New("empty type set") // // StructuralTerms makes no guarantees about the order of terms, except that it // is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { constraint := tparam.Constraint() if constraint == nil { return nil, fmt.Errorf("%s has nil constraint", tparam) @@ -78,7 +78,7 @@ func StructuralTerms(tparam *TypeParam) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { return computeTermSet(iface) } @@ -88,11 +88,11 @@ func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func UnionTermSet(union *Union) ([]*Term, error) { +func UnionTermSet(union *types.Union) ([]*types.Term, error) { return computeTermSet(union) } -func computeTermSet(typ types.Type) ([]*Term, error) { +func computeTermSet(typ types.Type) ([]*types.Term, error) { tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) if err != nil { return nil, err @@ -103,9 +103,9 @@ func computeTermSet(typ types.Type) ([]*Term, error) { if tset.terms.isAll() { return nil, nil } - var terms []*Term + var terms []*types.Term for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) + terms = append(terms, types.NewTerm(term.tilde, term.typ)) } return terms, nil } @@ -162,7 +162,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in tset.terms = allTermlist for i := 0; i < u.NumEmbeddeds(); i++ { embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { + if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } tset2, err := computeTermSetInternal(embedded, seen, depth+1) @@ -171,7 +171,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in } tset.terms = tset.terms.intersect(tset2.terms) } - case *Union: + case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil for i := 0; i < u.Len(); i++ { @@ -184,7 +184,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, err } terms = tset2.terms - case *TypeParam, *Union: + case *types.TypeParam, *types.Union: // A stand-alone type parameter or union is not permitted as union // term. return nil, fmt.Errorf("invalid union term %T", t) @@ -199,7 +199,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) } } - case *TypeParam: + case *types.TypeParam: panic("unreachable") default: // For all other types, the term set is just a single non-tilde term diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a23d..cbd12f8013 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string { var buf bytes.Buffer for i, x := range xl { if i > 0 { - buf.WriteString(" ∪ ") + buf.WriteString(" | ") } buf.WriteString(x.String()) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index b4788978ff..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index 114a36b866..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d98..7350bb702a 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types" // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ types.Type diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 0000000000..fea7c8b75e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = aliases.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go index a42b072a67..ef7ea290c0 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 - package typesinternal import ( diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 0000000000..b53f178616 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 0000000000..bbabcd22e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go new file mode 100644 index 0000000000..377bf7a53b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// toolchain is maximum version (<1.22) that the go toolchain used +// to build the current tool is known to support. +// +// When a tool is built with >=1.22, the value of toolchain is unused. +// +// x/tools does not support building with go <1.18. So we take this +// as the minimum possible maximum. +var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go new file mode 100644 index 0000000000..f65beed9d8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package versions + +func init() { + if Compare(toolchain, Go1_19) < 0 { + toolchain = Go1_19 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go new file mode 100644 index 0000000000..1a9efa126c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package versions + +func init() { + if Compare(toolchain, Go1_20) < 0 { + toolchain = Go1_20 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go new file mode 100644 index 0000000000..b7ef216dfe --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +func init() { + if Compare(toolchain, Go1_21) < 0 { + toolchain = Go1_21 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 0000000000..562eef21fa --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 0000000000..b4345d3349 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersion returns a language version (<=1.21) derived from runtime.Version() +// or an unknown future version. +func FileVersion(info *types.Info, file *ast.File) string { + // In x/tools built with Go <= 1.21, we do not have Info.FileVersions + // available. We use a go version derived from the toolchain used to + // compile the tool by default. + // This will be <= go1.21. We take this as the maximum version that + // this tool can support. + // + // There are no features currently in x/tools that need to tell fine grained + // differences for versions <1.22. + return toolchain +} + +// InitFileVersions is a noop when compiled with this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 0000000000..e8180632a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 0000000000..8d1f7453db --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "strings" +) + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE deleted file mode 100644 index e4a47e17f1..0000000000 --- a/vendor/golang.org/x/xerrors/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README deleted file mode 100644 index aac7867a56..0000000000 --- a/vendor/golang.org/x/xerrors/README +++ /dev/null @@ -1,2 +0,0 @@ -This repository holds the transition packages for the new Go 1.13 error values. -See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go deleted file mode 100644 index 4317f24833..0000000000 --- a/vendor/golang.org/x/xerrors/adaptor.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" -) - -// FormatError calls the FormatError method of f with an errors.Printer -// configured according to s and verb, and writes the result to s. -func FormatError(f Formatter, s fmt.State, verb rune) { - // Assuming this function is only called from the Format method, and given - // that FormatError takes precedence over Format, it cannot be called from - // any package that supports errors.Formatter. It is therefore safe to - // disregard that State may be a specific printer implementation and use one - // of our choice instead. - - // limitations: does not support printing error as Go struct. - - var ( - sep = " " // separator before next error - p = &state{State: s} - direct = true - ) - - var err error = f - - switch verb { - // Note that this switch must match the preference order - // for ordinary string printing (%#v before %+v, and so on). - - case 'v': - if s.Flag('#') { - if stringer, ok := err.(fmt.GoStringer); ok { - io.WriteString(&p.buf, stringer.GoString()) - goto exit - } - // proceed as if it were %v - } else if s.Flag('+') { - p.printDetail = true - sep = "\n - " - } - case 's': - case 'q', 'x', 'X': - // Use an intermediate buffer in the rare cases that precision, - // truncation, or one of the alternative verbs (q, x, and X) are - // specified. - direct = false - - default: - p.buf.WriteString("%!") - p.buf.WriteRune(verb) - p.buf.WriteByte('(') - switch { - case err != nil: - p.buf.WriteString(reflect.TypeOf(f).String()) - default: - p.buf.WriteString("") - } - p.buf.WriteByte(')') - io.Copy(s, &p.buf) - return - } - -loop: - for { - switch v := err.(type) { - case Formatter: - err = v.FormatError((*printer)(p)) - case fmt.Formatter: - v.Format(p, 'v') - break loop - default: - io.WriteString(&p.buf, v.Error()) - break loop - } - if err == nil { - break - } - if p.needColon || !p.printDetail { - p.buf.WriteByte(':') - p.needColon = false - } - p.buf.WriteString(sep) - p.inDetail = false - p.needNewline = false - } - -exit: - width, okW := s.Width() - prec, okP := s.Precision() - - if !direct || (okW && width > 0) || okP { - // Construct format string from State s. - format := []byte{'%'} - if s.Flag('-') { - format = append(format, '-') - } - if s.Flag('+') { - format = append(format, '+') - } - if s.Flag(' ') { - format = append(format, ' ') - } - if okW { - format = strconv.AppendInt(format, int64(width), 10) - } - if okP { - format = append(format, '.') - format = strconv.AppendInt(format, int64(prec), 10) - } - format = append(format, string(verb)...) - fmt.Fprintf(s, string(format), p.buf.String()) - } else { - io.Copy(s, &p.buf) - } -} - -var detailSep = []byte("\n ") - -// state tracks error printing state. It implements fmt.State. -type state struct { - fmt.State - buf bytes.Buffer - - printDetail bool - inDetail bool - needColon bool - needNewline bool -} - -func (s *state) Write(b []byte) (n int, err error) { - if s.printDetail { - if len(b) == 0 { - return 0, nil - } - if s.inDetail && s.needColon { - s.needNewline = true - if b[0] == '\n' { - b = b[1:] - } - } - k := 0 - for i, c := range b { - if s.needNewline { - if s.inDetail && s.needColon { - s.buf.WriteByte(':') - s.needColon = false - } - s.buf.Write(detailSep) - s.needNewline = false - } - if c == '\n' { - s.buf.Write(b[k:i]) - k = i + 1 - s.needNewline = true - } - } - s.buf.Write(b[k:]) - if !s.inDetail { - s.needColon = true - } - } else if !s.inDetail { - s.buf.Write(b) - } - return len(b), nil -} - -// printer wraps a state to implement an xerrors.Printer. -type printer state - -func (s *printer) Print(args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprint((*state)(s), args...) - } -} - -func (s *printer) Printf(format string, args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprintf((*state)(s), format, args...) - } -} - -func (s *printer) Detail() bool { - s.inDetail = true - return s.printDetail -} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg deleted file mode 100644 index 3f8b14b64e..0000000000 --- a/vendor/golang.org/x/xerrors/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go deleted file mode 100644 index 2ef99f5a87..0000000000 --- a/vendor/golang.org/x/xerrors/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xerrors implements functions to manipulate errors. -// -// This package is based on the Go 2 proposal for error values: -// -// https://golang.org/design/29934-error-values -// -// These functions were incorporated into the standard library's errors package -// in Go 1.13: -// - Is -// - As -// - Unwrap -// -// Also, Errorf's %w verb was incorporated into fmt.Errorf. -// -// Use this package to get equivalent behavior in all supported Go versions. -// -// No other features of this package were included in Go 1.13, and at present -// there are no plans to include any of them. -package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go deleted file mode 100644 index e88d3772d8..0000000000 --- a/vendor/golang.org/x/xerrors/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import "fmt" - -// errorString is a trivial implementation of error. -type errorString struct { - s string - frame Frame -} - -// New returns an error that formats as the given text. -// -// The returned error contains a Frame set to the caller's location and -// implements Formatter to show this information when printed with details. -func New(text string) error { - return &errorString{text, Caller(1)} -} - -func (e *errorString) Error() string { - return e.s -} - -func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *errorString) FormatError(p Printer) (next error) { - p.Print(e.s) - e.frame.Format(p) - return nil -} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go deleted file mode 100644 index 27a5d70bd6..0000000000 --- a/vendor/golang.org/x/xerrors/fmt.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/xerrors/internal" -) - -const percentBangString = "%!" - -// Errorf formats according to a format specifier and returns the string as a -// value that satisfies error. -// -// The returned error includes the file and line number of the caller when -// formatted with additional detail enabled. If the last argument is an error -// the returned error's Format method will return it if the format string ends -// with ": %s", ": %v", or ": %w". If the last argument is an error and the -// format string ends with ": %w", the returned error implements an Unwrap -// method returning it. -// -// If the format specifier includes a %w verb with an error operand in a -// position other than at the end, the returned error will still implement an -// Unwrap method returning the operand, but the error's Format method will not -// return the wrapped error. -// -// It is invalid to include more than one %w verb or to supply it with an -// operand that does not implement the error interface. The %w verb is otherwise -// a synonym for %v. -// -// Note that as of Go 1.13, the fmt.Errorf function will do error formatting, -// but it will not capture a stack backtrace. -func Errorf(format string, a ...interface{}) error { - format = formatPlusW(format) - // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. - wrap := strings.HasSuffix(format, ": %w") - idx, format2, ok := parsePercentW(format) - percentWElsewhere := !wrap && idx >= 0 - if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { - err := errorAt(a, len(a)-1) - if err == nil { - return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} - } - // TODO: this is not entirely correct. The error value could be - // printed elsewhere in format if it mixes numbered with unnumbered - // substitutions. With relatively small changes to doPrintf we can - // have it optionally ignore extra arguments and pass the argument - // list in its entirety. - msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - if wrap { - return &wrapError{msg, err, frame} - } - return &noWrapError{msg, err, frame} - } - // Support %w anywhere. - // TODO: don't repeat the wrapped error's message when %w occurs in the middle. - msg := fmt.Sprintf(format2, a...) - if idx < 0 { - return &noWrapError{msg, nil, Caller(1)} - } - err := errorAt(a, idx) - if !ok || err == nil { - // Too many %ws or argument of %w is not an error. Approximate the Go - // 1.13 fmt.Errorf message. - return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} - } - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - return &wrapError{msg, err, frame} -} - -func errorAt(args []interface{}, i int) error { - if i < 0 || i >= len(args) { - return nil - } - err, ok := args[i].(error) - if !ok { - return nil - } - return err -} - -// formatPlusW is used to avoid the vet check that will barf at %w. -func formatPlusW(s string) string { - return s -} - -// Return the index of the only %w in format, or -1 if none. -// Also return a rewritten format string with %w replaced by %v, and -// false if there is more than one %w. -// TODO: handle "%[N]w". -func parsePercentW(format string) (idx int, newFormat string, ok bool) { - // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. - idx = -1 - ok = true - n := 0 - sz := 0 - var isW bool - for i := 0; i < len(format); i += sz { - if format[i] != '%' { - sz = 1 - continue - } - // "%%" is not a format directive. - if i+1 < len(format) && format[i+1] == '%' { - sz = 2 - continue - } - sz, isW = parsePrintfVerb(format[i:]) - if isW { - if idx >= 0 { - ok = false - } else { - idx = n - } - // "Replace" the last character, the 'w', with a 'v'. - p := i + sz - 1 - format = format[:p] + "v" + format[p+1:] - } - n++ - } - return idx, format, ok -} - -// Parse the printf verb starting with a % at s[0]. -// Return how many bytes it occupies and whether the verb is 'w'. -func parsePrintfVerb(s string) (int, bool) { - // Assume only that the directive is a sequence of non-letters followed by a single letter. - sz := 0 - var r rune - for i := 1; i < len(s); i += sz { - r, sz = utf8.DecodeRuneInString(s[i:]) - if unicode.IsLetter(r) { - return i + sz, r == 'w' - } - } - return len(s), false -} - -type noWrapError struct { - msg string - err error - frame Frame -} - -func (e *noWrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *noWrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -type wrapError struct { - msg string - err error - frame Frame -} - -func (e *wrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *wrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -func (e *wrapError) Unwrap() error { - return e.err -} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go deleted file mode 100644 index 1bc9c26b97..0000000000 --- a/vendor/golang.org/x/xerrors/format.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -// A Formatter formats error messages. -type Formatter interface { - error - - // FormatError prints the receiver's first error and returns the next error in - // the error chain, if any. - FormatError(p Printer) (next error) -} - -// A Printer formats error messages. -// -// The most common implementation of Printer is the one provided by package fmt -// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message -// typically provide their own implementations. -type Printer interface { - // Print appends args to the message output. - Print(args ...interface{}) - - // Printf writes a formatted string. - Printf(format string, args ...interface{}) - - // Detail reports whether error detail is requested. - // After the first call to Detail, all text written to the Printer - // is formatted as additional detail, or ignored when - // detail has not been requested. - // If Detail returns false, the caller can avoid printing the detail at all. - Detail() bool -} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go deleted file mode 100644 index 0de628ec50..0000000000 --- a/vendor/golang.org/x/xerrors/frame.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "runtime" -) - -// A Frame contains part of a call stack. -type Frame struct { - // Make room for three PCs: the one we were asked for, what it called, - // and possibly a PC for skipPleaseUseCallersFrames. See: - // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 - frames [3]uintptr -} - -// Caller returns a Frame that describes a frame on the caller's stack. -// The argument skip is the number of frames to skip over. -// Caller(0) returns the frame for the caller of Caller. -func Caller(skip int) Frame { - var s Frame - runtime.Callers(skip+1, s.frames[:]) - return s -} - -// location reports the file, line, and function of a frame. -// -// The returned function may be "" even if file and line are not. -func (f Frame) location() (function, file string, line int) { - frames := runtime.CallersFrames(f.frames[:]) - if _, ok := frames.Next(); !ok { - return "", "", 0 - } - fr, ok := frames.Next() - if !ok { - return "", "", 0 - } - return fr.Function, fr.File, fr.Line -} - -// Format prints the stack as error detail. -// It should be called from an error's Format implementation -// after printing any other error detail. -func (f Frame) Format(p Printer) { - if p.Detail() { - function, file, line := f.location() - if function != "" { - p.Printf("%s\n ", function) - } - if file != "" { - p.Printf("%s:%d\n", file, line) - } - } -} diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go deleted file mode 100644 index 89f4eca5df..0000000000 --- a/vendor/golang.org/x/xerrors/internal/internal.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// EnableTrace indicates whether stack information should be recorded in errors. -var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go deleted file mode 100644 index 9842758ca7..0000000000 --- a/vendor/golang.org/x/xerrors/wrap.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "reflect" -) - -// A Wrapper provides context around another error. -type Wrapper interface { - // Unwrap returns the next error in the error chain. - // If there is no next error, Unwrap returns nil. - Unwrap() error -} - -// Opaque returns an error with the same error formatting as err -// but that does not match err and cannot be unwrapped. -func Opaque(err error) error { - return noWrapper{err} -} - -type noWrapper struct { - error -} - -func (e noWrapper) FormatError(p Printer) (next error) { - if f, ok := e.error.(Formatter); ok { - return f.FormatError(p) - } - p.Print(e.error) - return nil -} - -// Unwrap returns the result of calling the Unwrap method on err, if err implements -// Unwrap. Otherwise, Unwrap returns nil. -// -// Deprecated: As of Go 1.13, use errors.Unwrap instead. -func Unwrap(err error) error { - u, ok := err.(Wrapper) - if !ok { - return nil - } - return u.Unwrap() -} - -// Is reports whether any error in err's chain matches target. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -// -// Deprecated: As of Go 1.13, use errors.Is instead. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - // TODO: consider supporing target.Is(err). This would allow - // user-definable predicates, but also may allow for coping with sloppy - // APIs, thereby making it easier to get away with them. - if err = Unwrap(err); err == nil { - return false - } - } -} - -// As finds the first error in err's chain that matches the type to which target -// points, and if so, sets the target to its value and returns true. An error -// matches a type if it is assignable to the target type, or if it has a method -// As(interface{}) bool such that As(target) returns true. As will panic if target -// is not a non-nil pointer to a type which implements error or is of interface type. -// -// The As method should set the target to its value and return true if err -// matches the type to which target points. -// -// Deprecated: As of Go 1.13, use errors.As instead. -func As(err error, target interface{}) bool { - if target == nil { - panic("errors: target cannot be nil") - } - val := reflect.ValueOf(target) - typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { - panic("errors: target must be a non-nil pointer") - } - if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { - panic("errors: *target must be interface or implement error") - } - targetType := typ.Elem() - for err != nil { - if reflect.TypeOf(err).AssignableTo(targetType) { - val.Elem().Set(reflect.ValueOf(err)) - return true - } - if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { - return true - } - err = Unwrap(err) - } - return false -} - -var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS deleted file mode 100644 index f07029059d..0000000000 --- a/vendor/google.golang.org/api/AUTHORS +++ /dev/null @@ -1,11 +0,0 @@ -# This is the official list of authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. -Google Inc. -LightStep Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS deleted file mode 100644 index 788677b8f0..0000000000 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ /dev/null @@ -1,56 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# https://cla.developers.google.com/about/google-individual -# https://cla.developers.google.com/about/google-corporate -# -# The CLA can be filled out on the web: -# -# https://cla.developers.google.com/ -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name -# -# An entry with two email addresses specifies that the -# first address should be used in the submit logs and -# that the second address should be recognized as the -# same person when interacting with Rietveld. - -# Please keep the list sorted. - -Alain Vongsouvanhalainv -Andrew Gerrand -Brad Fitzpatrick -Eric Koleda -Francesc Campoy -Garrick Evans -Glenn Lewis -Ivan Krasin -Jason Hall -Johan Euphrosine -Kostik Shtoyk -Kunpei Sakai -Matthew Dolan -Matthew Whisenhunt -Michael McGreevy -Nick Craig-Wood -Robbie Trencheny -Ross Light -Sarah Adams -Scott Van Woudenberg -Takashi Matsuo diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE deleted file mode 100644 index 263aa7a0c1..0000000000 --- a/vendor/google.golang.org/api/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go deleted file mode 100644 index b5e38c6628..0000000000 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2011 Google LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package googleapi contains the common code shared by all Google API -// libraries. -package googleapi // import "google.golang.org/api/googleapi" - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "google.golang.org/api/internal/third_party/uritemplates" -) - -// ContentTyper is an interface for Readers which know (or would like -// to override) their Content-Type. If a media body doesn't implement -// ContentTyper, the type is sniffed from the content using -// http.DetectContentType. -type ContentTyper interface { - ContentType() string -} - -// A SizeReaderAt is a ReaderAt with a Size method. -// An io.SectionReader implements SizeReaderAt. -type SizeReaderAt interface { - io.ReaderAt - Size() int64 -} - -// ServerResponse is embedded in each Do response and -// provides the HTTP status code and header sent by the server. -type ServerResponse struct { - // HTTPStatusCode is the server's response status code. When using a - // resource method's Do call, this will always be in the 2xx range. - HTTPStatusCode int - // Header contains the response header fields from the server. - Header http.Header -} - -const ( - // Version defines the gax version being used. This is typically sent - // in an HTTP header to services. - Version = "0.5" - - // UserAgent is the header string used to identify this package. - UserAgent = "google-api-go-client/" + Version - - // DefaultUploadChunkSize is the default chunk size to use for resumable - // uploads if not specified by the user. - DefaultUploadChunkSize = 16 * 1024 * 1024 - - // MinUploadChunkSize is the minimum chunk size that can be used for - // resumable uploads. All user-specified chunk sizes must be multiple of - // this value. - MinUploadChunkSize = 256 * 1024 -) - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code and will always be populated. - Code int `json:"code"` - // Message is the server response message and is only populated when - // explicitly referenced by the JSON server response. - Message string `json:"message"` - // Details provide more context to an error. - Details []interface{} `json:"details"` - // Body is the raw response returned by the server. - // It is often but not always JSON, depending on how the request fails. - Body string - // Header contains the response header fields from the server. - Header http.Header - - Errors []ErrorItem - // err is typically a wrapped apierror.APIError, see - // google-api-go-client/internal/gensupport/error.go. - err error -} - -// ErrorItem is a detailed error code & message from the Google API frontend. -type ErrorItem struct { - // Reason is the typed error code. For example: "some_example". - Reason string `json:"reason"` - // Message is the human-readable description of the error. - Message string `json:"message"` -} - -func (e *Error) Error() string { - if len(e.Errors) == 0 && e.Message == "" { - return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) - if e.Message != "" { - fmt.Fprintf(&buf, "%s", e.Message) - } - if len(e.Details) > 0 { - var detailBuf bytes.Buffer - enc := json.NewEncoder(&detailBuf) - enc.SetIndent("", " ") - if err := enc.Encode(e.Details); err == nil { - fmt.Fprint(&buf, "\nDetails:") - fmt.Fprintf(&buf, "\n%s", detailBuf.String()) - - } - } - if len(e.Errors) == 0 { - return strings.TrimSpace(buf.String()) - } - if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { - fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) - return buf.String() - } - fmt.Fprintln(&buf, "\nMore details:") - for _, v := range e.Errors { - fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) - } - return buf.String() -} - -// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. -func (e *Error) Wrap(err error) { - e.err = err -} - -func (e *Error) Unwrap() error { - return e.err -} - -type errorReply struct { - Error *Error `json:"error"` -} - -// CheckResponse returns an error (of type *Error) if the response -// status code is not 2xx. -func CheckResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, err := io.ReadAll(res.Body) - if err == nil { - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - jerr.Error.Header = res.Header - return jerr.Error - } - } - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - Header: res.Header, - } -} - -// IsNotModified reports whether err is the result of the -// server replying with http.StatusNotModified. -// Such error values are sometimes returned by "Do" methods -// on calls when If-None-Match is used. -func IsNotModified(err error) bool { - if err == nil { - return false - } - ae, ok := err.(*Error) - return ok && ae.Code == http.StatusNotModified -} - -// CheckMediaResponse returns an error (of type *Error) if the response -// status code is not 2xx. Unlike CheckResponse it does not assume the -// body is a JSON error document. -// It is the caller's responsibility to close res.Body. -func CheckMediaResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - Header: res.Header, - } -} - -// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper. -type MarshalStyle bool - -// WithDataWrapper marshals JSON with a {"data": ...} wrapper. -var WithDataWrapper = MarshalStyle(true) - -// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. -var WithoutDataWrapper = MarshalStyle(false) - -func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { - buf := new(bytes.Buffer) - if wrap { - buf.Write([]byte(`{"data": `)) - } - err := json.NewEncoder(buf).Encode(v) - if err != nil { - return nil, err - } - if wrap { - buf.Write([]byte(`}`)) - } - return buf, nil -} - -// ProgressUpdater is a function that is called upon every progress update of a resumable upload. -// This is the only part of a resumable upload (from googleapi) that is usable by the developer. -// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. -type ProgressUpdater func(current, total int64) - -// MediaOption defines the interface for setting media options. -type MediaOption interface { - setOptions(o *MediaOptions) -} - -type contentTypeOption string - -func (ct contentTypeOption) setOptions(o *MediaOptions) { - o.ContentType = string(ct) - if o.ContentType == "" { - o.ForceEmptyContentType = true - } -} - -// ContentType returns a MediaOption which sets the Content-Type header for media uploads. -// If ctype is empty, the Content-Type header will be omitted. -func ContentType(ctype string) MediaOption { - return contentTypeOption(ctype) -} - -type chunkSizeOption int - -func (cs chunkSizeOption) setOptions(o *MediaOptions) { - size := int(cs) - if size%MinUploadChunkSize != 0 { - size += MinUploadChunkSize - (size % MinUploadChunkSize) - } - o.ChunkSize = size -} - -// ChunkSize returns a MediaOption which sets the chunk size for media uploads. -// size will be rounded up to the nearest multiple of 256K. -// Media which contains fewer than size bytes will be uploaded in a single request. -// Media which contains size bytes or more will be uploaded in separate chunks. -// If size is zero, media will be uploaded in a single request. -func ChunkSize(size int) MediaOption { - return chunkSizeOption(size) -} - -type chunkRetryDeadlineOption time.Duration - -func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) { - o.ChunkRetryDeadline = time.Duration(cd) -} - -// ChunkRetryDeadline returns a MediaOption which sets a per-chunk retry -// deadline. If a single chunk has been attempting to upload for longer than -// this time and the request fails, it will no longer be retried, and the error -// will be returned to the caller. -// This is only applicable for files which are large enough to require -// a multi-chunk resumable upload. -// The default value is 32s. -// To set a deadline on the entire upload, use context timeout or cancellation. -func ChunkRetryDeadline(deadline time.Duration) MediaOption { - return chunkRetryDeadlineOption(deadline) -} - -// MediaOptions stores options for customizing media upload. It is not used by developers directly. -type MediaOptions struct { - ContentType string - ForceEmptyContentType bool - ChunkSize int - ChunkRetryDeadline time.Duration -} - -// ProcessMediaOptions stores options from opts in a MediaOptions. -// It is not used by developers directly. -func ProcessMediaOptions(opts []MediaOption) *MediaOptions { - mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} - for _, o := range opts { - o.setOptions(mo) - } - return mo -} - -// ResolveRelative resolves relatives such as "http://www.golang.org/" and -// "topics/myproject/mytopic" into a single string, such as -// "http://www.golang.org/topics/myproject/mytopic". It strips all parent -// references (e.g. ../..) as well as anything after the host -// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). -// -// ResolveRelative panics if either basestr or relstr is not able to be parsed. -func ResolveRelative(basestr, relstr string) string { - u, err := url.Parse(basestr) - if err != nil { - panic(fmt.Sprintf("failed to parse %q", basestr)) - } - afterColonPath := "" - if i := strings.IndexRune(relstr, ':'); i > 0 { - afterColonPath = relstr[i+1:] - relstr = relstr[:i] - } - rel, err := url.Parse(relstr) - if err != nil { - panic(fmt.Sprintf("failed to parse %q", relstr)) - } - u = u.ResolveReference(rel) - us := u.String() - if afterColonPath != "" { - us = fmt.Sprintf("%s:%s", us, afterColonPath) - } - us = strings.Replace(us, "%7B", "{", -1) - us = strings.Replace(us, "%7D", "}", -1) - us = strings.Replace(us, "%2A", "*", -1) - return us -} - -// Expand subsitutes any {encoded} strings in the URL passed in using -// the map supplied. -// -// This calls SetOpaque to avoid encoding of the parameters in the URL path. -func Expand(u *url.URL, expansions map[string]string) { - escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) - if err == nil { - u.Path = unescaped - u.RawPath = escaped - } -} - -// CloseBody is used to close res.Body. -// Prior to calling Close, it also tries to Read a small amount to see an EOF. -// Not seeing an EOF can prevent HTTP Transports from reusing connections. -func CloseBody(res *http.Response) { - if res == nil || res.Body == nil { - return - } - // Justification for 3 byte reads: two for up to "\r\n" after - // a JSON/XML document, and then 1 to see EOF if we haven't yet. - // TODO(bradfitz): detect Go 1.3+ and skip these reads. - // See https://codereview.appspot.com/58240043 - // and https://codereview.appspot.com/49570044 - buf := make([]byte, 1) - for i := 0; i < 3; i++ { - _, err := res.Body.Read(buf) - if err != nil { - break - } - } - res.Body.Close() - -} - -// VariantType returns the type name of the given variant. -// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. -// This is used to support "variant" APIs that can return one of a number of different types. -func VariantType(t map[string]interface{}) string { - s, _ := t["type"].(string) - return s -} - -// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. -// This is used to support "variant" APIs that can return one of a number of different types. -// It reports whether the conversion was successful. -func ConvertVariant(v map[string]interface{}, dst interface{}) bool { - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(v) - if err != nil { - return false - } - return json.Unmarshal(buf.Bytes(), dst) == nil -} - -// A Field names a field to be retrieved with a partial response. -// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance -// -// Partial responses can dramatically reduce the amount of data that must be sent to your application. -// In order to request partial responses, you can specify the full list of fields -// that your application needs by adding the Fields option to your request. -// -// Field strings use camelCase with leading lower-case characters to identify fields within the response. -// -// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, -// you could request just those fields like this: -// -// svc.Events.List().Fields("nextPageToken", "items/id").Do() -// -// or if you were also interested in each Item's "Updated" field, you can combine them like this: -// -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() -// -// Another way to find field names is through the Google API explorer: -// https://developers.google.com/apis-explorer/#p/ -type Field string - -// CombineFields combines fields into a single string. -func CombineFields(s []Field) string { - r := make([]string, len(s)) - for i, v := range s { - r[i] = string(v) - } - return strings.Join(r, ",") -} - -// A CallOption is an optional argument to an API call. -// It should be treated as an opaque value by users of Google APIs. -// -// A CallOption is something that configures an API call in a way that is -// not specific to that API; for instance, controlling the quota user for -// an API call is common across many APIs, and is thus a CallOption. -type CallOption interface { - Get() (key, value string) -} - -// A MultiCallOption is an option argument to an API call and can be passed -// anywhere a CallOption is accepted. It additionally supports returning a slice -// of values for a given key. -type MultiCallOption interface { - CallOption - GetMulti() (key string, value []string) -} - -// QuotaUser returns a CallOption that will set the quota user for a call. -// The quota user can be used by server-side applications to control accounting. -// It can be an arbitrary string up to 40 characters, and will override UserIP -// if both are provided. -func QuotaUser(u string) CallOption { return quotaUser(u) } - -type quotaUser string - -func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } - -// UserIP returns a CallOption that will set the "userIp" parameter of a call. -// This should be the IP address of the originating request. -func UserIP(ip string) CallOption { return userIP(ip) } - -type userIP string - -func (i userIP) Get() (string, string) { return "userIp", string(i) } - -// Trace returns a CallOption that enables diagnostic tracing for a call. -// traceToken is an ID supplied by Google support. -func Trace(traceToken string) CallOption { return traceTok(traceToken) } - -type traceTok string - -func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } - -type queryParameter struct { - key string - values []string -} - -// QueryParameter allows setting the value(s) of an arbitrary key. -func QueryParameter(key string, values ...string) CallOption { - return queryParameter{key: key, values: append([]string{}, values...)} -} - -// Get will never actually be called -- GetMulti will. -func (q queryParameter) Get() (string, string) { - return "", "" -} - -// GetMulti returns the key and values values associated to that key. -func (q queryParameter) GetMulti() (string, []string) { - return q.key, q.values -} - -// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go deleted file mode 100644 index f5d826c2a1..0000000000 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012 Google LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package transport contains HTTP transports used to make -// authenticated API requests. -// -// This package is DEPRECATED. Users should instead use, -// -// service, err := NewService(..., option.WithAPIKey(...)) -package transport - -import ( - "errors" - "net/http" -) - -// APIKey is an HTTP Transport which wraps an underlying transport and -// appends an API Key "key" parameter to the URL of outgoing requests. -// -// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. -type APIKey struct { - // Key is the API Key to set on requests. - Key string - - // Transport is the underlying HTTP transport. - // If nil, http.DefaultTransport is used. - Transport http.RoundTripper -} - -func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.Transport - if rt == nil { - rt = http.DefaultTransport - if rt == nil { - return nil, errors.New("googleapi/transport: no Transport specified or available") - } - } - newReq := *req - args := newReq.URL.Query() - args.Set("key", t.Key) - newReq.URL.RawQuery = args.Encode() - return rt.RoundTrip(&newReq) -} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go deleted file mode 100644 index fabf74d50d..0000000000 --- a/vendor/google.golang.org/api/googleapi/types.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2013 Google LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package googleapi - -import ( - "encoding/json" - "errors" - "strconv" -) - -// Int64s is a slice of int64s that marshal as quoted strings in JSON. -type Int64s []int64 - -func (q *Int64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, int64(v)) - } - return nil -} - -// Int32s is a slice of int32s that marshal as quoted strings in JSON. -type Int32s []int32 - -func (q *Int32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, int32(v)) - } - return nil -} - -// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. -type Uint64s []uint64 - -func (q *Uint64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, uint64(v)) - } - return nil -} - -// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. -type Uint32s []uint32 - -func (q *Uint32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, uint32(v)) - } - return nil -} - -// Float64s is a slice of float64s that marshal as quoted strings in JSON. -type Float64s []float64 - -func (q *Float64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseFloat(s, 64) - if err != nil { - return err - } - *q = append(*q, float64(v)) - } - return nil -} - -func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { - dst := make([]byte, 0, 2+n*10) // somewhat arbitrary - dst = append(dst, '[') - for i := 0; i < n; i++ { - if i > 0 { - dst = append(dst, ',') - } - dst = append(dst, '"') - dst = fn(dst, i) - dst = append(dst, '"') - } - dst = append(dst, ']') - return dst, nil -} - -func (q Int64s) MarshalJSON() ([]byte, error) { - return quotedList(len(q), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, q[i], 10) - }) -} - -func (q Int32s) MarshalJSON() ([]byte, error) { - return quotedList(len(q), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, int64(q[i]), 10) - }) -} - -func (q Uint64s) MarshalJSON() ([]byte, error) { - return quotedList(len(q), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, q[i], 10) - }) -} - -func (q Uint32s) MarshalJSON() ([]byte, error) { - return quotedList(len(q), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, uint64(q[i]), 10) - }) -} - -func (q Float64s) MarshalJSON() ([]byte, error) { - return quotedList(len(q), func(dst []byte, i int) []byte { - return strconv.AppendFloat(dst, q[i], 'g', -1, 64) - }) -} - -// RawMessage is a raw encoded JSON value. -// It is identical to json.RawMessage, except it does not suffer from -// https://golang.org/issue/14493. -type RawMessage []byte - -// MarshalJSON returns m. -func (m RawMessage) MarshalJSON() ([]byte, error) { - return m, nil -} - -// UnmarshalJSON sets *m to a copy of data. -func (m *RawMessage) UnmarshalJSON(data []byte) error { - if m == nil { - return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") - } - *m = append((*m)[:0], data...) - return nil -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json deleted file mode 100644 index 0c82b86a10..0000000000 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json +++ /dev/null @@ -1,372 +0,0 @@ -{ - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." - } - } - } - }, - "basePath": "", - "baseUrl": "https://iamcredentials.googleapis.com/", - "batchPath": "batch", - "canonicalName": "IAM Credentials", - "description": "Creates short-lived credentials for impersonating IAM service accounts. To enable this API, you must enable the IAM API (iam.googleapis.com). ", - "discoveryVersion": "v1", - "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", - "fullyEncodeReservedExpansion": true, - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "id": "iamcredentials:v1", - "kind": "discovery#restDescription", - "mtlsRootUrl": "https://iamcredentials.mtls.googleapis.com/", - "name": "iamcredentials", - "ownerDomain": "google.com", - "ownerName": "Google", - "parameters": { - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "type": "string" - }, - "access_token": { - "description": "OAuth access token.", - "location": "query", - "type": "string" - }, - "alt": { - "default": "json", - "description": "Data format for response.", - "enum": [ - "json", - "media", - "proto" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "type": "string" - }, - "callback": { - "description": "JSONP", - "location": "query", - "type": "string" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "location": "query", - "type": "string" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query", - "type": "string" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "location": "query", - "type": "string" - }, - "prettyPrint": { - "default": "true", - "description": "Returns response with indentations and line breaks.", - "location": "query", - "type": "boolean" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query", - "type": "string" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query", - "type": "string" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query", - "type": "string" - } - }, - "protocol": "rest", - "resources": { - "projects": { - "resources": { - "serviceAccounts": { - "methods": { - "generateAccessToken": { - "description": "Generates an OAuth 2.0 access token for a service account.", - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", - "httpMethod": "POST", - "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "location": "path", - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:generateAccessToken", - "request": { - "$ref": "GenerateAccessTokenRequest" - }, - "response": { - "$ref": "GenerateAccessTokenResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "generateIdToken": { - "description": "Generates an OpenID Connect ID token for a service account.", - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", - "httpMethod": "POST", - "id": "iamcredentials.projects.serviceAccounts.generateIdToken", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "location": "path", - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:generateIdToken", - "request": { - "$ref": "GenerateIdTokenRequest" - }, - "response": { - "$ref": "GenerateIdTokenResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "signBlob": { - "description": "Signs a blob using a service account's system-managed private key.", - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", - "httpMethod": "POST", - "id": "iamcredentials.projects.serviceAccounts.signBlob", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "location": "path", - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:signBlob", - "request": { - "$ref": "SignBlobRequest" - }, - "response": { - "$ref": "SignBlobResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - }, - "signJwt": { - "description": "Signs a JWT using a service account's system-managed private key.", - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", - "httpMethod": "POST", - "id": "iamcredentials.projects.serviceAccounts.signJwt", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "location": "path", - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:signJwt", - "request": { - "$ref": "SignJwtRequest" - }, - "response": { - "$ref": "SignJwtResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - } - } - } - } - }, - "revision": "20211203", - "rootUrl": "https://iamcredentials.googleapis.com/", - "schemas": { - "GenerateAccessTokenRequest": { - "id": "GenerateAccessTokenRequest", - "properties": { - "delegates": { - "description": "The sequence of service accounts in a delegation chain. This field is required for [delegated requests](https://cloud.google.com/iam/help/credentials/delegated-request). For [direct requests](https://cloud.google.com/iam/help/credentials/direct-request), which are more common, do not specify this field. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "items": { - "type": "string" - }, - "type": "array" - }, - "lifetime": { - "description": "The desired lifetime duration of the access token in seconds. By default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 hours, you can add the service account as an allowed value in an Organization Policy that enforces the `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. See detailed instructions at https://cloud.google.com/iam/help/credentials/lifetime If a value is not specified, the token's lifetime will be set to a default value of 1 hour.", - "format": "google-duration", - "type": "string" - }, - "scope": { - "description": "Required. Code to identify the scopes to be included in the OAuth 2.0 access token. See https://developers.google.com/identity/protocols/googlescopes for more information. At least one value required.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "GenerateAccessTokenResponse": { - "id": "GenerateAccessTokenResponse", - "properties": { - "accessToken": { - "description": "The OAuth 2.0 access token.", - "type": "string" - }, - "expireTime": { - "description": "Token expiration time. The expiration time is always set.", - "format": "google-datetime", - "type": "string" - } - }, - "type": "object" - }, - "GenerateIdTokenRequest": { - "id": "GenerateIdTokenRequest", - "properties": { - "audience": { - "description": "Required. The audience for the token, such as the API or account that this token grants access to.", - "type": "string" - }, - "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "items": { - "type": "string" - }, - "type": "array" - }, - "includeEmail": { - "description": "Include the service account email in the token. If set to `true`, the token will contain `email` and `email_verified` claims.", - "type": "boolean" - } - }, - "type": "object" - }, - "GenerateIdTokenResponse": { - "id": "GenerateIdTokenResponse", - "properties": { - "token": { - "description": "The OpenId Connect ID token.", - "type": "string" - } - }, - "type": "object" - }, - "SignBlobRequest": { - "id": "SignBlobRequest", - "properties": { - "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "items": { - "type": "string" - }, - "type": "array" - }, - "payload": { - "description": "Required. The bytes to sign.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "SignBlobResponse": { - "id": "SignBlobResponse", - "properties": { - "keyId": { - "description": "The ID of the key used to sign the blob. The key used for signing will remain valid for at least 12 hours after the blob is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", - "type": "string" - }, - "signedBlob": { - "description": "The signature for the blob. Does not include the original blob. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the blob. As a result, the receiver can no longer verify the signature.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "SignJwtRequest": { - "id": "SignJwtRequest", - "properties": { - "delegates": { - "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - "items": { - "type": "string" - }, - "type": "array" - }, - "payload": { - "description": "Required. The JWT payload to sign. Must be a serialized JSON object that contains a JWT Claims Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}` If the JWT Claims Set contains an expiration time (`exp`) claim, it must be an integer timestamp that is not in the past and no more than 12 hours in the future.", - "type": "string" - } - }, - "type": "object" - }, - "SignJwtResponse": { - "id": "SignJwtResponse", - "properties": { - "keyId": { - "description": "The ID of the key used to sign the JWT. The key used for signing will remain valid for at least 12 hours after the JWT is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", - "type": "string" - }, - "signedJwt": { - "description": "The signed JWT. Contains the automatically generated header; the client-supplied payload; and the signature, which is generated using the key referenced by the `kid` field in the header. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the JWT. As a result, the receiver can no longer verify the signature.", - "type": "string" - } - }, - "type": "object" - } - }, - "servicePath": "", - "title": "IAM Service Account Credentials API", - "version": "v1", - "version_module": true -} \ No newline at end of file diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go deleted file mode 100644 index 0a6304d51d..0000000000 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ /dev/null @@ -1,1095 +0,0 @@ -// Copyright 2023 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated file. DO NOT EDIT. - -// Package iamcredentials provides access to the IAM Service Account Credentials API. -// -// For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials -// -// # Creating a client -// -// Usage example: -// -// import "google.golang.org/api/iamcredentials/v1" -// ... -// ctx := context.Background() -// iamcredentialsService, err := iamcredentials.NewService(ctx) -// -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. -// -// # Other authentication options -// -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: -// -// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) -// -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: -// -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) -// -// See https://godoc.org/google.golang.org/api/option/ for details on options. -package iamcredentials // import "google.golang.org/api/iamcredentials/v1" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - googleapi "google.golang.org/api/googleapi" - internal "google.golang.org/api/internal" - gensupport "google.golang.org/api/internal/gensupport" - option "google.golang.org/api/option" - internaloption "google.golang.org/api/option/internaloption" - htransport "google.golang.org/api/transport/http" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = internaloption.WithDefaultEndpoint -var _ = internal.Version - -const apiId = "iamcredentials:v1" -const apiName = "iamcredentials" -const apiVersion = "v1" -const basePath = "https://iamcredentials.googleapis.com/" -const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // See, edit, configure, and delete your Google Cloud data and see the - // email address for your Google Account. - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" -) - -// NewService creates a new Service. -func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := internaloption.WithDefaultScopes( - "https://www.googleapis.com/auth/cloud-platform", - ) - // NOTE: prepend, so we don't override user-specified scopes. - opts = append([]option.ClientOption{scopesOption}, opts...) - opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) - client, endpoint, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - s, err := New(client) - if err != nil { - return nil, err - } - if endpoint != "" { - s.BasePath = endpoint - } - return s, nil -} - -// New creates a new Service. It uses the provided http.Client for requests. -// -// Deprecated: please use NewService instead. -// To provide a custom HTTP client, use option.WithHTTPClient. -// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.ServiceAccounts = NewProjectsServiceAccountsService(s) - return rs -} - -type ProjectsService struct { - s *Service - - ServiceAccounts *ProjectsServiceAccountsService -} - -func NewProjectsServiceAccountsService(s *Service) *ProjectsServiceAccountsService { - rs := &ProjectsServiceAccountsService{s: s} - return rs -} - -type ProjectsServiceAccountsService struct { - s *Service -} - -type GenerateAccessTokenRequest struct { - // Delegates: The sequence of service accounts in a delegation chain. - // This field is required for delegated requests - // (https://cloud.google.com/iam/help/credentials/delegated-request). - // For direct requests - // (https://cloud.google.com/iam/help/credentials/direct-request), which - // are more common, do not specify this field. Each service account must - // be granted the `roles/iam.serviceAccountTokenCreator` role on its - // next service account in the chain. The last service account in the - // chain must be granted the `roles/iam.serviceAccountTokenCreator` role - // on the service account that is specified in the `name` field of the - // request. The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. - Delegates []string `json:"delegates,omitempty"` - - // Lifetime: The desired lifetime duration of the access token in - // seconds. By default, the maximum allowed value is 1 hour. To set a - // lifetime of up to 12 hours, you can add the service account as an - // allowed value in an Organization Policy that enforces the - // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` - // constraint. See detailed instructions at - // https://cloud.google.com/iam/help/credentials/lifetime If a value is - // not specified, the token's lifetime will be set to a default value of - // 1 hour. - Lifetime string `json:"lifetime,omitempty"` - - // Scope: Required. Code to identify the scopes to be included in the - // OAuth 2.0 access token. See - // https://developers.google.com/identity/protocols/googlescopes for - // more information. At least one value required. - Scope []string `json:"scope,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Delegates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Delegates") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { - type NoMethod GenerateAccessTokenRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type GenerateAccessTokenResponse struct { - // AccessToken: The OAuth 2.0 access token. - AccessToken string `json:"accessToken,omitempty"` - - // ExpireTime: Token expiration time. The expiration time is always set. - ExpireTime string `json:"expireTime,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AccessToken") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { - type NoMethod GenerateAccessTokenResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type GenerateIdTokenRequest struct { - // Audience: Required. The audience for the token, such as the API or - // account that this token grants access to. - Audience string `json:"audience,omitempty"` - - // Delegates: The sequence of service accounts in a delegation chain. - // Each service account must be granted the - // `roles/iam.serviceAccountTokenCreator` role on its next service - // account in the chain. The last service account in the chain must be - // granted the `roles/iam.serviceAccountTokenCreator` role on the - // service account that is specified in the `name` field of the request. - // The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. - Delegates []string `json:"delegates,omitempty"` - - // IncludeEmail: Include the service account email in the token. If set - // to `true`, the token will contain `email` and `email_verified` - // claims. - IncludeEmail bool `json:"includeEmail,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Audience") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Audience") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { - type NoMethod GenerateIdTokenRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type GenerateIdTokenResponse struct { - // Token: The OpenId Connect ID token. - Token string `json:"token,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Token") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Token") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { - type NoMethod GenerateIdTokenResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SignBlobRequest struct { - // Delegates: The sequence of service accounts in a delegation chain. - // Each service account must be granted the - // `roles/iam.serviceAccountTokenCreator` role on its next service - // account in the chain. The last service account in the chain must be - // granted the `roles/iam.serviceAccountTokenCreator` role on the - // service account that is specified in the `name` field of the request. - // The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. - Delegates []string `json:"delegates,omitempty"` - - // Payload: Required. The bytes to sign. - Payload string `json:"payload,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Delegates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Delegates") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { - type NoMethod SignBlobRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SignBlobResponse struct { - // KeyId: The ID of the key used to sign the blob. The key used for - // signing will remain valid for at least 12 hours after the blob is - // signed. To verify the signature, you can retrieve the public key in - // several formats from the following endpoints: - RSA public key - // wrapped in an X.509 v3 certificate: - // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT - // _EMAIL}` - Raw key in JSON format: - // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ - // EMAIL}` - JSON Web Key (JWK): - // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ - // EMAIL}` - KeyId string `json:"keyId,omitempty"` - - // SignedBlob: The signature for the blob. Does not include the original - // blob. After the key pair referenced by the `key_id` response field - // expires, Google no longer exposes the public key that can be used to - // verify the blob. As a result, the receiver can no longer verify the - // signature. - SignedBlob string `json:"signedBlob,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "KeyId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "KeyId") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { - type NoMethod SignBlobResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SignJwtRequest struct { - // Delegates: The sequence of service accounts in a delegation chain. - // Each service account must be granted the - // `roles/iam.serviceAccountTokenCreator` role on its next service - // account in the chain. The last service account in the chain must be - // granted the `roles/iam.serviceAccountTokenCreator` role on the - // service account that is specified in the `name` field of the request. - // The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. - Delegates []string `json:"delegates,omitempty"` - - // Payload: Required. The JWT payload to sign. Must be a serialized JSON - // object that contains a JWT Claims Set. For example: `{"sub": - // "user@example.com", "iat": 313435}` If the JWT Claims Set contains an - // expiration time (`exp`) claim, it must be an integer timestamp that - // is not in the past and no more than 12 hours in the future. - Payload string `json:"payload,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Delegates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Delegates") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { - type NoMethod SignJwtRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SignJwtResponse struct { - // KeyId: The ID of the key used to sign the JWT. The key used for - // signing will remain valid for at least 12 hours after the JWT is - // signed. To verify the signature, you can retrieve the public key in - // several formats from the following endpoints: - RSA public key - // wrapped in an X.509 v3 certificate: - // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT - // _EMAIL}` - Raw key in JSON format: - // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ - // EMAIL}` - JSON Web Key (JWK): - // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ - // EMAIL}` - KeyId string `json:"keyId,omitempty"` - - // SignedJwt: The signed JWT. Contains the automatically generated - // header; the client-supplied payload; and the signature, which is - // generated using the key referenced by the `kid` field in the header. - // After the key pair referenced by the `key_id` response field expires, - // Google no longer exposes the public key that can be used to verify - // the JWT. As a result, the receiver can no longer verify the - // signature. - SignedJwt string `json:"signedJwt,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "KeyId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "KeyId") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { - type NoMethod SignJwtResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "iamcredentials.projects.serviceAccounts.generateAccessToken": - -type ProjectsServiceAccountsGenerateAccessTokenCall struct { - s *Service - name string - generateaccesstokenrequest *GenerateAccessTokenRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GenerateAccessToken: Generates an OAuth 2.0 access token for a -// service account. -// -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. -func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall { - c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.generateaccesstokenrequest = generateaccesstokenrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateAccessTokenCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateAccessTokenCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateAccessToken") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "iamcredentials.projects.serviceAccounts.generateAccessToken" call. -// Exactly one of *GenerateAccessTokenResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *GenerateAccessTokenResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.CallOption) (*GenerateAccessTokenResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &GenerateAccessTokenResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Generates an OAuth 2.0 access token for a service account.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:generateAccessToken", - // "request": { - // "$ref": "GenerateAccessTokenRequest" - // }, - // "response": { - // "$ref": "GenerateAccessTokenResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "iamcredentials.projects.serviceAccounts.generateIdToken": - -type ProjectsServiceAccountsGenerateIdTokenCall struct { - s *Service - name string - generateidtokenrequest *GenerateIdTokenRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GenerateIdToken: Generates an OpenID Connect ID token for a service -// account. -// -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. -func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall { - c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.generateidtokenrequest = generateidtokenrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsServiceAccountsGenerateIdTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateIdTokenCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsServiceAccountsGenerateIdTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateIdTokenCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateIdToken") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "iamcredentials.projects.serviceAccounts.generateIdToken" call. -// Exactly one of *GenerateIdTokenResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *GenerateIdTokenResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOption) (*GenerateIdTokenResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &GenerateIdTokenResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Generates an OpenID Connect ID token for a service account.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.generateIdToken", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:generateIdToken", - // "request": { - // "$ref": "GenerateIdTokenRequest" - // }, - // "response": { - // "$ref": "GenerateIdTokenResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "iamcredentials.projects.serviceAccounts.signBlob": - -type ProjectsServiceAccountsSignBlobCall struct { - s *Service - name string - signblobrequest *SignBlobRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SignBlob: Signs a blob using a service account's system-managed -// private key. -// -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. -func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { - c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.signblobrequest = signblobrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsServiceAccountsSignBlobCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignBlobCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsServiceAccountsSignBlobCall) Context(ctx context.Context) *ProjectsServiceAccountsSignBlobCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signBlob") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "iamcredentials.projects.serviceAccounts.signBlob" call. -// Exactly one of *SignBlobResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SignBlobResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) (*SignBlobResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &SignBlobResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Signs a blob using a service account's system-managed private key.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.signBlob", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:signBlob", - // "request": { - // "$ref": "SignBlobRequest" - // }, - // "response": { - // "$ref": "SignBlobResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "iamcredentials.projects.serviceAccounts.signJwt": - -type ProjectsServiceAccountsSignJwtCall struct { - s *Service - name string - signjwtrequest *SignJwtRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SignJwt: Signs a JWT using a service account's system-managed private -// key. -// -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. -func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { - c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.signjwtrequest = signjwtrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsServiceAccountsSignJwtCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignJwtCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsServiceAccountsSignJwtCall) Context(ctx context.Context) *ProjectsServiceAccountsSignJwtCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signJwt") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "iamcredentials.projects.serviceAccounts.signJwt" call. -// Exactly one of *SignJwtResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SignJwtResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (*SignJwtResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &SignJwtResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Signs a JWT using a service account's system-managed private key.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.signJwt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:signJwt", - // "request": { - // "$ref": "SignJwtRequest" - // }, - // "response": { - // "$ref": "SignJwtResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go deleted file mode 100644 index cecbb9ba11..0000000000 --- a/vendor/google.golang.org/api/internal/cba.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// cba.go (certificate-based access) contains utils for implementing Device Certificate -// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials -// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. -// -// The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. -// -// Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. -// -// If running within Google's cloud environment, and client certificate is not specified -// and not available through DCA, we will try mTLS with credentials held by -// the Secure Session Agent, which is part of Google's cloud infrastructure. -// -// We would like to avoid introducing client-side logic that parses whether the -// endpoint override is an mTLS url, since the url pattern may change at anytime. -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. - -// Package internal supports the options and transport packages. -package internal - -import ( - "context" - "crypto/tls" - "net" - "net/url" - "os" - "strings" - - "github.com/google/s2a-go" - "github.com/google/s2a-go/fallback" - "google.golang.org/api/internal/cert" - "google.golang.org/grpc/credentials" -) - -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" - - // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. - googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" -) - -// getClientCertificateSourceAndEndpoint is a convenience function that invokes -// getClientCertificateSource and getEndpoint sequentially and returns the client -// cert source and endpoint as a tuple. -func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) - if err != nil { - return nil, "", err - } - return clientCertSource, endpoint, nil -} - -type transportConfig struct { - clientCertSource cert.Source // The client certificate source. - endpoint string // The corresponding endpoint to use based on client certificate source. - s2aAddress string // The S2A address if it can be used, otherwise an empty string. - s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. -} - -func getTransportConfig(settings *DialSettings) (*transportConfig, error) { - clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) - if err != nil { - return &transportConfig{ - clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", - }, err - } - defaultTransportConfig := transportConfig{ - clientCertSource: clientCertSource, - endpoint: endpoint, - s2aAddress: "", - s2aMTLSEndpoint: "", - } - - // Check the env to determine whether to use S2A. - if !isGoogleS2AEnabled() { - return &defaultTransportConfig, nil - } - - // If client cert is found, use that over S2A. - // If MTLS is not enabled for the endpoint, skip S2A. - if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { - return &defaultTransportConfig, nil - } - s2aMTLSEndpoint := settings.DefaultMTLSEndpoint - // If there is endpoint override, honor it. - if settings.Endpoint != "" { - s2aMTLSEndpoint = endpoint - } - s2aAddress := GetS2AAddress() - if s2aAddress == "" { - return &defaultTransportConfig, nil - } - return &transportConfig{ - clientCertSource: clientCertSource, - endpoint: endpoint, - s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, - }, nil -} - -func isGoogleS2AEnabled() bool { - return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" -} - -// getClientCertificateSource returns a default client certificate source, if -// not provided by the user. -// -// A nil default source can be returned if the source does not exist. Any exceptions -// encountered while initializing the default source will be reported as client -// error (ex. corrupt metadata file). -// -// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE -// must be set to "true" to allow certificate to be used (including user provided -// certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { - if !isClientCertificateEnabled() { - return nil, nil - } else if settings.ClientCertSource != nil { - return settings.ClientCertSource, nil - } else { - return cert.DefaultSource() - } -} - -func isClientCertificateEnabled() bool { - useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") - // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. - return strings.ToLower(useClientCert) == "true" -} - -// getEndpoint returns the endpoint for the service, taking into account the -// user-provided endpoint override "settings.Endpoint". -// -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. -// -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. -// -// If the endpoint override is an address (host:port) rather than full base -// URL (ex. https://...), then the user-provided address will be merged into -// the default endpoint. For example, WithEndpoint("myhost:8000") and -// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { - if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - return settings.DefaultMTLSEndpoint, nil - } - return settings.DefaultEndpoint, nil - } - if strings.Contains(settings.Endpoint, "://") { - // User passed in a full URL path, use it verbatim. - return settings.Endpoint, nil - } - if settings.DefaultEndpoint == "" { - // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. - // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. - return settings.Endpoint, nil - } - - // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) -} - -func getMTLSMode() string { - mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") - if mode == "" { - mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. - } - if mode == "" { - return mTLSModeAuto - } - return strings.ToLower(mode) -} - -func mergeEndpoints(baseURL, newHost string) (string, error) { - u, err := url.Parse(fixScheme(baseURL)) - if err != nil { - return "", err - } - return strings.Replace(baseURL, u.Host, newHost, 1), nil -} - -func fixScheme(baseURL string) string { - if !strings.Contains(baseURL, "://") { - return "https://" + baseURL - } - return baseURL -} - -// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the -// corresponding endpoint to use for GRPC client. -func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { - config, err := getTransportConfig(settings) - if err != nil { - return nil, "", err - } - - defaultTransportCreds := credentials.NewTLS(&tls.Config{ - GetClientCertificate: config.clientCertSource, - }) - if config.s2aAddress == "" { - return defaultTransportCreds, config.endpoint, nil - } - - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackClientHandshakeFunc: fallbackHandshake, - } - } - - s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, - }) - if err != nil { - // Use default if we cannot initialize S2A client transport credentials. - return defaultTransportCreds, config.endpoint, nil - } - return s2aTransportCreds, config.s2aMTLSEndpoint, nil -} - -// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, -// and the endpoint to use for HTTP client. -func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { - config, err := getTransportConfig(settings) - if err != nil { - return nil, nil, "", err - } - - if config.s2aAddress == "" { - return config.clientCertSource, nil, config.endpoint, nil - } - - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackURL, err := url.Parse(config.endpoint); err == nil { - if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackDialer: &s2a.FallbackDialer{ - Dialer: fallbackDialer, - ServerAddr: fallbackServerAddr, - }, - } - } - } - - dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, - }) - return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil -} - -// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. -var mtlsEndpointEnabledForS2A = func() bool { - // TODO(xmenxk): determine this via discovery config. - return true -} diff --git a/vendor/google.golang.org/api/internal/cert/default_cert.go b/vendor/google.golang.org/api/internal/cert/default_cert.go deleted file mode 100644 index 21d0251531..0000000000 --- a/vendor/google.golang.org/api/internal/cert/default_cert.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cert contains certificate tools for Google API clients. -// This package is intended to be used with crypto/tls.Config.GetClientCertificate. -// -// The certificates can be used to satisfy Google's Endpoint Validation. -// See https://cloud.google.com/endpoint-verification/docs/overview -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package cert - -import ( - "crypto/tls" - "errors" - "sync" -) - -// defaultCertData holds all the variables pertaining to -// the default certficate source created by DefaultSource. -// -// A singleton model is used to allow the source to be reused -// by the transport layer. -type defaultCertData struct { - once sync.Once - source Source - err error -} - -var ( - defaultCert defaultCertData -) - -// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. -type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - -// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. -var errSourceUnavailable = errors.New("certificate source is unavailable") - -// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. -// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. -// -// If neither source is available (due to missing configurations), a nil Source and a nil Error are -// returned to indicate that a default certificate source is unavailable. -func DefaultSource() (Source, error) { - defaultCert.once.Do(func() { - defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") - if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.source, defaultCert.err = NewSecureConnectSource("") - if errors.Is(defaultCert.err, errSourceUnavailable) { - defaultCert.source, defaultCert.err = nil, nil - } - } - }) - return defaultCert.source, defaultCert.err -} diff --git a/vendor/google.golang.org/api/internal/cert/enterprise_cert.go b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go deleted file mode 100644 index 1061b5f05f..0000000000 --- a/vendor/google.golang.org/api/internal/cert/enterprise_cert.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2022 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cert contains certificate tools for Google API clients. -// This package is intended to be used with crypto/tls.Config.GetClientCertificate. -// -// The certificates can be used to satisfy Google's Endpoint Validation. -// See https://cloud.google.com/endpoint-verification/docs/overview -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package cert - -import ( - "crypto/tls" - "errors" - - "github.com/googleapis/enterprise-certificate-proxy/client" -) - -type ecpSource struct { - key *client.Key -} - -// NewEnterpriseCertificateProxySource creates a certificate source -// using the Enterprise Certificate Proxy client, which delegates -// certifcate related operations to an OS-specific "signer binary" -// that communicates with the native keystore (ex. keychain on MacOS). -// -// The configFilePath points to a config file containing relevant parameters -// such as the certificate issuer and the location of the signer binary. -// If configFilePath is empty, the client will attempt to load the config from -// a well-known gcloud location. -func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { - key, err := client.Cred(configFilePath) - if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err - } - - return (&ecpSource{ - key: key, - }).getClientCertificate, nil -} - -func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - var cert tls.Certificate - cert.PrivateKey = s.key - cert.Certificate = s.key.CertificateChain() - return &cert, nil -} diff --git a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go deleted file mode 100644 index afd79ffe2b..0000000000 --- a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cert contains certificate tools for Google API clients. -// This package is intended to be used with crypto/tls.Config.GetClientCertificate. -// -// The certificates can be used to satisfy Google's Endpoint Validation. -// See https://cloud.google.com/endpoint-verification/docs/overview -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package cert - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - "os/user" - "path/filepath" - "sync" - "time" -) - -const ( - metadataPath = ".secureConnect" - metadataFile = "context_aware_metadata.json" -) - -type secureConnectSource struct { - metadata secureConnectMetadata - - // Cache the cert to avoid executing helper command repeatedly. - cachedCertMutex sync.Mutex - cachedCert *tls.Certificate -} - -type secureConnectMetadata struct { - Cmd []string `json:"cert_provider_command"` -} - -// NewSecureConnectSource creates a certificate source using -// the Secure Connect Helper and its associated metadata file. -// -// The configFilePath points to the location of the context aware metadata file. -// If configFilePath is empty, use the default context aware metadata location. -func NewSecureConnectSource(configFilePath string) (Source, error) { - if configFilePath == "" { - user, err := user.Current() - if err != nil { - // Error locating the default config means Secure Connect is not supported. - return nil, errSourceUnavailable - } - configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) - } - - file, err := os.ReadFile(configFilePath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err - } - - var metadata secureConnectMetadata - if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) - } - if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) - } - return (&secureConnectSource{ - metadata: metadata, - }).getClientCertificate, nil -} - -func validateMetadata(metadata secureConnectMetadata) error { - if len(metadata.Cmd) == 0 { - return errors.New("empty cert_provider_command") - } - return nil -} - -func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - s.cachedCertMutex.Lock() - defer s.cachedCertMutex.Unlock() - if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { - return s.cachedCert, nil - } - // Expand OS environment variables in the cert provider command such as "$HOME". - for i := 0; i < len(s.metadata.Cmd); i++ { - s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) - } - command := s.metadata.Cmd - data, err := exec.Command(command[0], command[1:]...).Output() - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(data, data) - if err != nil { - return nil, err - } - s.cachedCert = &cert - return &cert, nil -} - -// isCertificateExpired returns true if the given cert is expired or invalid. -func isCertificateExpired(cert *tls.Certificate) bool { - if len(cert.Certificate) == 0 { - return true - } - parsed, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return true - } - return time.Now().After(parsed.NotAfter) -} diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go deleted file mode 100644 index fedcce15b4..0000000000 --- a/vendor/google.golang.org/api/internal/conn_pool.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "google.golang.org/grpc" -) - -// ConnPool is a pool of grpc.ClientConns. -type ConnPool interface { - // Conn returns a ClientConn from the pool. - // - // Conns aren't returned to the pool. - Conn() *grpc.ClientConn - - // Num returns the number of connections in the pool. - // - // It will always return the same value. - Num() int - - // Close closes every ClientConn in the pool. - // - // The error returned by Close may be a single error or multiple errors. - Close() error - - // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. - grpc.ClientConnInterface -} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go deleted file mode 100644 index 92b3acf6ed..0000000000 --- a/vendor/google.golang.org/api/internal/creds.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2017 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "net" - "net/http" - "os" - "time" - - "golang.org/x/oauth2" - "google.golang.org/api/internal/impersonate" - - "golang.org/x/oauth2/google" -) - -const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - -// Creds returns credential information obtained from DialSettings, or if none, then -// it returns default credential information. -func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { - creds, err := baseCreds(ctx, ds) - if err != nil { - return nil, err - } - if ds.ImpersonationConfig != nil { - return impersonateCredentials(ctx, creds, ds) - } - return creds, nil -} - -func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { - if ds.InternalCredentials != nil { - return ds.InternalCredentials, nil - } - if ds.Credentials != nil { - return ds.Credentials, nil - } - if ds.CredentialsJSON != nil { - return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) - } - if ds.CredentialsFile != "" { - data, err := os.ReadFile(ds.CredentialsFile) - if err != nil { - return nil, fmt.Errorf("cannot read credentials file: %v", err) - } - return credentialsFromJSON(ctx, data, ds) - } - if ds.TokenSource != nil { - return &google.Credentials{TokenSource: ds.TokenSource}, nil - } - cred, err := google.FindDefaultCredentials(ctx, ds.GetScopes()...) - if err != nil { - return nil, err - } - if len(cred.JSON) > 0 { - return credentialsFromJSON(ctx, cred.JSON, ds) - } - // For GAE and GCE, the JSON is empty so return the default credentials directly. - return cred, nil -} - -// JSON key file type. -const ( - serviceAccountKey = "service_account" -) - -// credentialsFromJSON returns a google.Credentials from the JSON data -// -// - A self-signed JWT flow will be executed if the following conditions are -// met: -// -// (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users -// (2) No service account impersontation -// -// - Otherwise, executes standard OAuth 2.0 flow -// More details: google.aip.dev/auth/4111 -func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { - var params google.CredentialsParams - params.Scopes = ds.GetScopes() - - // Determine configurations for the OAuth2 transport, which is separate from the API transport. - // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) - if err != nil { - return nil, err - } - params.TokenURL = oauth2Endpoint - if clientCertSource != nil { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) - } - - // By default, a standard OAuth 2.0 token source is created - cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) - if err != nil { - return nil, err - } - - // Override the token source to use self-signed JWT if conditions are met - isJWTFlow, err := isSelfSignedJWTFlow(data, ds) - if err != nil { - return nil, err - } - if isJWTFlow { - ts, err := selfSignedJWTTokenSource(data, ds) - if err != nil { - return nil, err - } - cred.TokenSource = ts - } - - return cred, err -} - -func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { - if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && - ds.ImpersonationConfig == nil { - // Check if JSON is a service account and if so create a self-signed JWT. - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(data, &f); err != nil { - return false, err - } - return f.Type == serviceAccountKey, nil - } - return false, nil -} - -func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { - if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { - // Scopes are preferred in self-signed JWT unless the scope is not available - // or a custom audience is used. - return google.JWTAccessTokenSourceWithScope(data, ds.GetScopes()...) - } else if ds.GetAudience() != "" { - // Fallback to audience if scope is not provided - return google.JWTAccessTokenSourceFromJSON(data, ds.GetAudience()) - } else { - return nil, errors.New("neither scopes or audience are available for the self-signed JWT") - } -} - -// GetQuotaProject retrieves quota project with precedence being: client option, -// environment variable, creds file. -func GetQuotaProject(creds *google.Credentials, clientOpt string) string { - if clientOpt != "" { - return clientOpt - } - if env := os.Getenv(quotaProjectEnvVar); env != "" { - return env - } - if creds == nil { - return "" - } - var v struct { - QuotaProject string `json:"quota_project_id"` - } - if err := json.Unmarshal(creds.JSON, &v); err != nil { - return "" - } - return v.QuotaProject -} - -func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { - if len(ds.ImpersonationConfig.Scopes) == 0 { - ds.ImpersonationConfig.Scopes = ds.GetScopes() - } - ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) - if err != nil { - return nil, err - } - return &google.Credentials{ - TokenSource: ts, - ProjectID: creds.ProjectID, - }, nil -} - -// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. -func oauth2DialSettings(ds *DialSettings) *DialSettings { - var ods DialSettings - ods.DefaultEndpoint = google.Endpoint.TokenURL - ods.DefaultMTLSEndpoint = google.MTLSTokenURL - ods.ClientCertSource = ds.ClientCertSource - return &ods -} - -// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. -func customHTTPClient(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() - trans.TLSClientConfig = tlsConfig - return &http.Client{Transport: trans} -} - -func baseTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/buffer.go b/vendor/google.golang.org/api/internal/gensupport/buffer.go deleted file mode 100644 index 3d0817ede9..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/buffer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "bytes" - "io" - - "google.golang.org/api/googleapi" -) - -// MediaBuffer buffers data from an io.Reader to support uploading media in -// retryable chunks. It should be created with NewMediaBuffer. -type MediaBuffer struct { - media io.Reader - - chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. - err error // Any error generated when populating chunk by reading media. - - // The absolute position of chunk in the underlying media. - off int64 -} - -// NewMediaBuffer initializes a MediaBuffer. -func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { - return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} -} - -// Chunk returns the current buffered chunk, the offset in the underlying media -// from which the chunk is drawn, and the size of the chunk. -// Successive calls to Chunk return the same chunk between calls to Next. -func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { - // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if mb.err == nil && len(mb.chunk) == 0 { - mb.err = mb.loadChunk() - } - return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err -} - -// loadChunk will read from media into chunk, up to the capacity of chunk. -func (mb *MediaBuffer) loadChunk() error { - bufSize := cap(mb.chunk) - mb.chunk = mb.chunk[:bufSize] - - read := 0 - var err error - for err == nil && read < bufSize { - var n int - n, err = mb.media.Read(mb.chunk[read:]) - read += n - } - mb.chunk = mb.chunk[:read] - return err -} - -// Next advances to the next chunk, which will be returned by the next call to Chunk. -// Calls to Next without a corresponding prior call to Chunk will have no effect. -func (mb *MediaBuffer) Next() { - mb.off += int64(len(mb.chunk)) - mb.chunk = mb.chunk[0:0] -} - -type readerTyper struct { - io.Reader - googleapi.ContentTyper -} - -// ReaderAtToReader adapts a ReaderAt to be used as a Reader. -// If ra implements googleapi.ContentTyper, then the returned reader -// will also implement googleapi.ContentTyper, delegating to ra. -func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { - r := io.NewSectionReader(ra, 0, size) - if typer, ok := ra.(googleapi.ContentTyper); ok { - return readerTyper{r, typer} - } - return r -} diff --git a/vendor/google.golang.org/api/internal/gensupport/doc.go b/vendor/google.golang.org/api/internal/gensupport/doc.go deleted file mode 100644 index 752c4b411b..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gensupport is an internal implementation detail used by code -// generated by the google-api-go-generator tool. -// -// This package may be modified at any time without regard for backwards -// compatibility. It should not be used directly by API users. -package gensupport diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go deleted file mode 100644 index 886c6532b1..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/error.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 Google LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "errors" - - "github.com/googleapis/gax-go/v2/apierror" - "google.golang.org/api/googleapi" -) - -// WrapError creates an [apierror.APIError] from err, wraps it in err, and -// returns err. If err is not a [googleapi.Error] (or a -// [google.golang.org/grpc/status.Status]), it returns err without modification. -func WrapError(err error) error { - var herr *googleapi.Error - apiError, ok := apierror.ParseError(err, false) - if ok && errors.As(err, &herr) { - herr.Wrap(apiError) - } - return err -} diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go deleted file mode 100644 index eab49a11eb..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -// MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if any of the following is true: -// - it has a non-empty value -// - its field name is present in forceSendFields and it is not a nil pointer or nil interface -// - its field name is present in nullFields. -// -// The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { - if len(forceSendFields) == 0 && len(nullFields) == 0 { - return json.Marshal(schema) - } - - mustInclude := make(map[string]bool) - for _, f := range forceSendFields { - mustInclude[f] = true - } - useNull := make(map[string]bool) - useNullMaps := make(map[string]map[string]bool) - for _, nf := range nullFields { - parts := strings.SplitN(nf, ".", 2) - field := parts[0] - if len(parts) == 1 { - useNull[field] = true - } else { - if useNullMaps[field] == nil { - useNullMaps[field] = map[string]bool{} - } - useNullMaps[field][parts[1]] = true - } - } - - dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps) - if err != nil { - return nil, err - } - return json.Marshal(dataMap) -} - -func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { - m := make(map[string]interface{}) - s := reflect.ValueOf(schema) - st := s.Type() - - for i := 0; i < s.NumField(); i++ { - jsonTag := st.Field(i).Tag.Get("json") - if jsonTag == "" { - continue - } - tag, err := parseJSONTag(jsonTag) - if err != nil { - return nil, err - } - if tag.ignore { - continue - } - - v := s.Field(i) - f := st.Field(i) - - if useNull[f.Name] { - if !isEmptyValue(v) { - return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) - } - m[tag.apiName] = nil - continue - } - - if !includeField(v, f, mustInclude) { - continue - } - - // If map fields are explicitly set to null, use a map[string]interface{}. - if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { - ms, ok := v.Interface().(map[string]string) - if !ok { - mi, err := initMapSlow(v, f.Name, useNullMaps) - if err != nil { - return nil, err - } - m[tag.apiName] = mi - continue - } - mi := map[string]interface{}{} - for k, v := range ms { - mi[k] = v - } - for k := range useNullMaps[f.Name] { - mi[k] = nil - } - m[tag.apiName] = mi - continue - } - - // nil maps are treated as empty maps. - if f.Type.Kind() == reflect.Map && v.IsNil() { - m[tag.apiName] = map[string]string{} - continue - } - - // nil slices are treated as empty slices. - if f.Type.Kind() == reflect.Slice && v.IsNil() { - m[tag.apiName] = []bool{} - continue - } - - if tag.stringFormat { - m[tag.apiName] = formatAsString(v, f.Type.Kind()) - } else { - m[tag.apiName] = v.Interface() - } - } - return m, nil -} - -// initMapSlow uses reflection to build up a map object. This is slower than -// the default behavior so it should be used only as a fallback. -func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { - mi := map[string]interface{}{} - iter := rv.MapRange() - for iter.Next() { - k, ok := iter.Key().Interface().(string) - if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) - } - v := iter.Value().Interface() - mi[k] = v - } - for k := range useNullMaps[fieldName] { - mi[k] = nil - } - return mi, nil -} - -// formatAsString returns a string representation of v, dereferencing it first if possible. -func formatAsString(v reflect.Value, kind reflect.Kind) string { - if kind == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - - return fmt.Sprintf("%v", v.Interface()) -} - -// jsonTag represents a restricted version of the struct tag format used by encoding/json. -// It is used to describe the JSON encoding of fields in a Schema struct. -type jsonTag struct { - apiName string - stringFormat bool - ignore bool -} - -// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. -// The format of the tag must match that generated by the Schema.writeSchemaStruct method -// in the api generator. -func parseJSONTag(val string) (jsonTag, error) { - if val == "-" { - return jsonTag{ignore: true}, nil - } - - var tag jsonTag - - i := strings.Index(val, ",") - if i == -1 || val[:i] == "" { - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - tag = jsonTag{ - apiName: val[:i], - } - - switch val[i+1:] { - case "omitempty": - case "omitempty,string": - tag.stringFormat = true - default: - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - return tag, nil -} - -// Reports whether the struct field "f" with value "v" should be included in JSON output. -func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool { - // The regular JSON encoding of a nil pointer is "null", which means "delete this field". - // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. - // However, many fields are not pointers, so there would be no way to delete these fields. - // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. - // Deletion will be handled by a separate mechanism. - if f.Type.Kind() == reflect.Ptr && v.IsNil() { - return false - } - - // The "any" type is represented as an interface{}. If this interface - // is nil, there is no reasonable representation to send. We ignore - // these fields, for the same reasons as given above for pointers. - if f.Type.Kind() == reflect.Interface && v.IsNil() { - return false - } - - return mustInclude[f.Name] || !isEmptyValue(v) -} - -// isEmptyValue reports whether v is the empty value for its type. This -// implementation is based on that of the encoding/json package, but its -// correctness does not depend on it being identical. What's important is that -// this function return false in situations where v should not be sent as part -// of a PATCH operation. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} diff --git a/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go deleted file mode 100644 index 13c2f93020..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "encoding/json" - "errors" - "fmt" - "math" -) - -// JSONFloat64 is a float64 that supports proper unmarshaling of special float -// values in JSON, according to -// https://developers.google.com/protocol-buffers/docs/proto3#json. Although -// that is a proto-to-JSON spec, it applies to all Google APIs. -// -// The jsonpb package -// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has -// similar functionality, but only for direct translation from proto messages -// to JSON. -type JSONFloat64 float64 - -func (f *JSONFloat64) UnmarshalJSON(data []byte) error { - var ff float64 - if err := json.Unmarshal(data, &ff); err == nil { - *f = JSONFloat64(ff) - return nil - } - var s string - if err := json.Unmarshal(data, &s); err == nil { - switch s { - case "NaN": - ff = math.NaN() - case "Infinity": - ff = math.Inf(1) - case "-Infinity": - ff = math.Inf(-1) - default: - return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) - } - *f = JSONFloat64(ff) - return nil - } - return errors.New("google.golang.org/api/internal: data not float or string") -} diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go deleted file mode 100644 index c048a57084..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "bytes" - "fmt" - "io" - "mime" - "mime/multipart" - "net/http" - "net/textproto" - "strings" - "sync" - "time" - - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" -) - -type typeReader struct { - io.Reader - typ string -} - -// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body. -// Close must be called if reads from the multipartReader are abandoned before reaching EOF. -type multipartReader struct { - pr *io.PipeReader - ctype string - mu sync.Mutex - pipeOpen bool -} - -// boundary optionally specifies the MIME boundary -func newMultipartReader(parts []typeReader, boundary string) *multipartReader { - mp := &multipartReader{pipeOpen: true} - var pw *io.PipeWriter - mp.pr, pw = io.Pipe() - mpw := multipart.NewWriter(pw) - if boundary != "" { - mpw.SetBoundary(boundary) - } - mp.ctype = "multipart/related; boundary=" + mpw.Boundary() - go func() { - for _, part := range parts { - w, err := mpw.CreatePart(typeHeader(part.typ)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, part.Reader) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) - return - } - } - - mpw.Close() - pw.Close() - }() - return mp -} - -func (mp *multipartReader) Read(data []byte) (n int, err error) { - return mp.pr.Read(data) -} - -func (mp *multipartReader) Close() error { - mp.mu.Lock() - if !mp.pipeOpen { - mp.mu.Unlock() - return nil - } - mp.pipeOpen = false - mp.mu.Unlock() - return mp.pr.Close() -} - -// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. -// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. -// -// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. -func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { - return combineBodyMedia(body, bodyContentType, media, mediaContentType, "") -} - -// combineBodyMedia is CombineBodyMedia but with an optional mimeBoundary field. -func combineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType, mimeBoundary string) (io.ReadCloser, string) { - mp := newMultipartReader([]typeReader{ - {body, bodyContentType}, - {media, mediaContentType}, - }, mimeBoundary) - return mp, mp.ctype -} - -func typeHeader(contentType string) textproto.MIMEHeader { - h := make(textproto.MIMEHeader) - if contentType != "" { - h.Set("Content-Type", contentType) - } - return h -} - -// PrepareUpload determines whether the data in the supplied reader should be -// uploaded in a single request, or in sequential chunks. -// chunkSize is the size of the chunk that media should be split into. -// -// If chunkSize is zero, media is returned as the first value, and the other -// two return values are nil, true. -// -// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the -// contents of media fit in a single chunk. -// -// After PrepareUpload has been called, media should no longer be used: the -// media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { - if chunkSize == 0 { // do not chunk - return media, nil, true - } - mb = NewMediaBuffer(media, chunkSize) - _, _, _, err := mb.Chunk() - // If err is io.EOF, we can upload this in a single request. Otherwise, err is - // either nil or a non-EOF error. If it is the latter, then the next call to - // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this - // error will be handled at some point. - return nil, mb, err == io.EOF -} - -// MediaInfo holds information for media uploads. It is intended for use by generated -// code only. -type MediaInfo struct { - // At most one of Media and MediaBuffer will be set. - media io.Reader - buffer *MediaBuffer - singleChunk bool - mType string - size int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater googleapi.ProgressUpdater - chunkRetryDeadline time.Duration -} - -// NewInfoFromMedia should be invoked from the Media method of a call. It returns a -// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer -// if needed. -func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { - mi := &MediaInfo{} - opts := googleapi.ProcessMediaOptions(options) - if !opts.ForceEmptyContentType { - mi.mType = opts.ContentType - if mi.mType == "" { - r, mi.mType = gax.DetermineContentType(r) - } - } - mi.chunkRetryDeadline = opts.ChunkRetryDeadline - mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) - return mi -} - -// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a -// call. It returns a MediaInfo using the given reader, size and media type. -func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { - rdr := ReaderAtToReader(r, size) - mType := mediaType - if mType == "" { - rdr, mType = gax.DetermineContentType(rdr) - } - - return &MediaInfo{ - size: size, - mType: mType, - buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), - media: nil, - singleChunk: false, - } -} - -// SetProgressUpdater sets the progress updater for the media info. -func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { - if mi != nil { - mi.progressUpdater = pu - } -} - -// UploadType determines the type of upload: a single request, or a resumable -// series of requests. -func (mi *MediaInfo) UploadType() string { - if mi.singleChunk { - return "multipart" - } - return "resumable" -} - -// UploadRequest sets up an HTTP request for media upload. It adds headers -// as necessary, and returns a replacement for the body and a function for http.Request.GetBody. -func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { - cleanup = func() {} - if mi == nil { - return body, nil, cleanup - } - var media io.Reader - if mi.media != nil { - // This only happens when the caller has turned off chunking. In that - // case, we write all of media in a single non-retryable request. - media = mi.media - } else if mi.singleChunk { - // The data fits in a single chunk, which has now been read into the MediaBuffer. - // We obtain that chunk so we can write it in a single request. The request can - // be retried because the data is stored in the MediaBuffer. - media, _, _, _ = mi.buffer.Chunk() - } - toCleanup := []io.Closer{} - if media != nil { - fb := readerFunc(body) - fm := readerFunc(media) - combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup = append(toCleanup, combined) - if fb != nil && fm != nil { - getBody = func() (io.ReadCloser, error) { - rb := io.NopCloser(fb()) - rm := io.NopCloser(fm()) - var mimeBoundary string - if _, params, err := mime.ParseMediaType(ctype); err == nil { - mimeBoundary = params["boundary"] - } - r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) - toCleanup = append(toCleanup, r) - return r, nil - } - } - reqHeaders.Set("Content-Type", ctype) - body = combined - } - if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { - // This happens when initiating a resumable upload session. - // The initial request contains a JSON body rather than media. - // It can be retried with a getBody function that re-creates the request body. - fb := readerFunc(body) - if fb != nil { - getBody = func() (io.ReadCloser, error) { - rb := io.NopCloser(fb()) - toCleanup = append(toCleanup, rb) - return rb, nil - } - } - reqHeaders.Set("X-Upload-Content-Type", mi.mType) - } - // Ensure that any bodies created in getBody are cleaned up. - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } - return body, getBody, cleanup -} - -// readerFunc returns a function that always returns an io.Reader that has the same -// contents as r, provided that can be done without consuming r. Otherwise, it -// returns nil. -// See http.NewRequest (in net/http/request.go). -func readerFunc(r io.Reader) func() io.Reader { - switch r := r.(type) { - case *bytes.Buffer: - buf := r.Bytes() - return func() io.Reader { return bytes.NewReader(buf) } - case *bytes.Reader: - snapshot := *r - return func() io.Reader { r := snapshot; return &r } - case *strings.Reader: - snapshot := *r - return func() io.Reader { r := snapshot; return &r } - default: - return nil - } -} - -// ResumableUpload returns an appropriately configured ResumableUpload value if the -// upload is resumable, or nil otherwise. -func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { - if mi == nil || mi.singleChunk { - return nil - } - return &ResumableUpload{ - URI: locURI, - Media: mi.buffer, - MediaType: mi.mType, - Callback: func(curr int64) { - if mi.progressUpdater != nil { - mi.progressUpdater(curr, mi.size) - } - }, - ChunkRetryDeadline: mi.chunkRetryDeadline, - } -} - -// SetGetBody sets the GetBody field of req to f. This was once needed -// to gracefully support Go 1.7 and earlier which didn't have that -// field. -// -// Deprecated: the code generator no longer uses this as of -// 2019-02-19. Nothing else should be calling this anyway, but we -// won't delete this immediately; it will be deleted in as early as 6 -// months. -func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { - req.GetBody = f -} diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go deleted file mode 100644 index 1a30d2ca25..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "net/url" - - "google.golang.org/api/googleapi" -) - -// URLParams is a simplified replacement for url.Values -// that safely builds up URL parameters for encoding. -type URLParams map[string][]string - -// Get returns the first value for the given key, or "". -func (u URLParams) Get(key string) string { - vs := u[key] - if len(vs) == 0 { - return "" - } - return vs[0] -} - -// Set sets the key to value. -// It replaces any existing values. -func (u URLParams) Set(key, value string) { - u[key] = []string{value} -} - -// SetMulti sets the key to an array of values. -// It replaces any existing values. -// Note that values must not be modified after calling SetMulti -// so the caller is responsible for making a copy if necessary. -func (u URLParams) SetMulti(key string, values []string) { - u[key] = values -} - -// Encode encodes the values into “URL encoded” form -// ("bar=baz&foo=quux") sorted by key. -func (u URLParams) Encode() string { - return url.Values(u).Encode() -} - -// SetOptions sets the URL params and any additional `CallOption` or -// `MultiCallOption` passed in. -func SetOptions(u URLParams, opts ...googleapi.CallOption) { - for _, o := range opts { - m, ok := o.(googleapi.MultiCallOption) - if ok { - u.SetMulti(m.GetMulti()) - continue - } - u.Set(o.Get()) - } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go deleted file mode 100644 index 08e7aacefb..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/google/uuid" - "google.golang.org/api/internal" -) - -// ResumableUpload is used by the generated APIs to provide resumable uploads. -// It is not used by developers directly. -type ResumableUpload struct { - Client *http.Client - // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". - URI string - UserAgent string // User-Agent for header of the request - // Media is the object being uploaded. - Media *MediaBuffer - // MediaType defines the media type, e.g. "image/jpeg". - MediaType string - - mu sync.Mutex // guards progress - progress int64 // number of bytes uploaded so far - - // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. - Callback func(int64) - - // Retry optionally configures retries for requests made against the upload. - Retry *RetryConfig - - // ChunkRetryDeadline configures the per-chunk deadline after which no further - // retries should happen. - ChunkRetryDeadline time.Duration - - // Track current request invocation ID and attempt count for retry metrics - // and idempotency headers. - invocationID string - attempts int -} - -// Progress returns the number of bytes uploaded at this point. -func (rx *ResumableUpload) Progress() int64 { - rx.mu.Lock() - defer rx.mu.Unlock() - return rx.progress -} - -// doUploadRequest performs a single HTTP request to upload data. -// off specifies the offset in rx.Media from which data is drawn. -// size is the number of bytes in data. -// final specifies whether data is the final chunk to be uploaded. -func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { - req, err := http.NewRequest("POST", rx.URI, data) - if err != nil { - return nil, err - } - - req.ContentLength = size - var contentRange string - if final { - if size == 0 { - contentRange = fmt.Sprintf("bytes */%v", off) - } else { - contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) - } - } else { - contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) - } - req.Header.Set("Content-Range", contentRange) - req.Header.Set("Content-Type", rx.MediaType) - req.Header.Set("User-Agent", rx.UserAgent) - - // TODO(b/274504690): Consider dropping gccl-invocation-id key since it - // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). - baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version - invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) - req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) - - // Set idempotency token header which is used by GCS uploads. - req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) - - // Google's upload endpoint uses status code 308 for a - // different purpose than the "308 Permanent Redirect" - // since-standardized in RFC 7238. Because of the conflict in - // semantics, Google added this new request header which - // causes it to not use "308" and instead reply with 200 OK - // and sets the upload-specific "X-HTTP-Status-Code-Override: - // 308" response header. - req.Header.Set("X-GUploader-No-308", "yes") - - return SendRequest(ctx, rx.Client, req) -} - -func statusResumeIncomplete(resp *http.Response) bool { - // This is how the server signals "status resume incomplete" - // when X-GUploader-No-308 is set to "yes": - return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" -} - -// reportProgress calls a user-supplied callback to report upload progress. -// If old==updated, the callback is not called. -func (rx *ResumableUpload) reportProgress(old, updated int64) { - if updated-old == 0 { - return - } - rx.mu.Lock() - rx.progress = updated - rx.mu.Unlock() - if rx.Callback != nil { - rx.Callback(updated) - } -} - -// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. -func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { - chunk, off, size, err := rx.Media.Chunk() - - done := err == io.EOF - if !done && err != nil { - return nil, err - } - - res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) - if err != nil { - return res, err - } - - // We sent "X-GUploader-No-308: yes" (see comment elsewhere in - // this file), so we don't expect to get a 308. - if res.StatusCode == 308 { - return nil, errors.New("unexpected 308 response status code") - } - - if res.StatusCode == http.StatusOK { - rx.reportProgress(off, off+int64(size)) - } - - if statusResumeIncomplete(res) { - rx.Media.Next() - } - return res, nil -} - -// Upload starts the process of a resumable upload with a cancellable context. -// It retries using the provided back off strategy until cancelled or the -// strategy indicates to stop retrying. -// It is called from the auto-generated API code and is not visible to the user. -// Before sending an HTTP request, Upload calls any registered hook functions, -// and calls the returned functions after the request returns (see send.go). -// rx is private to the auto-generated API code. -// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. -func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - - // There are a couple of cases where it's possible for err and resp to both - // be non-nil. However, we expose a simpler contract to our callers: exactly - // one of resp and err will be non-nil. This means that any response body - // must be closed here before returning a non-nil error. - var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) { - if err != nil { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return nil, err - } - // This case is very unlikely but possible only if rx.ChunkRetryDeadline is - // set to a very small value, in which case no requests will be sent before - // the deadline. Return an error to avoid causing a panic. - if resp == nil { - return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDealine", rx.URI) - } - return resp, nil - } - // Configure retryable error criteria. - errorFunc := rx.Retry.errorFunc() - - // Configure per-chunk retry deadline. - var retryDeadline time.Duration - if rx.ChunkRetryDeadline != 0 { - retryDeadline = rx.ChunkRetryDeadline - } else { - retryDeadline = defaultRetryDeadline - } - - // Send all chunks. - for { - var pause time.Duration - - // Each chunk gets its own initialized-at-zero backoff and invocation ID. - bo := rx.Retry.backoff() - quitAfterTimer := time.NewTimer(retryDeadline) - rx.attempts = 1 - rx.invocationID = uuid.New().String() - - // Retry loop for a single chunk. - for { - pauseTimer := time.NewTimer(pause) - select { - case <-ctx.Done(): - quitAfterTimer.Stop() - pauseTimer.Stop() - if err == nil { - err = ctx.Err() - } - return prepareReturn(resp, err) - case <-pauseTimer.C: - case <-quitAfterTimer.C: - pauseTimer.Stop() - return prepareReturn(resp, err) - } - pauseTimer.Stop() - - // Check for context cancellation or timeout once more. If more than one - // case in the select statement above was satisfied at the same time, Go - // will choose one arbitrarily. - // That can cause an operation to go through even if the context was - // canceled before or the timeout was reached. - select { - case <-ctx.Done(): - quitAfterTimer.Stop() - if err == nil { - err = ctx.Err() - } - return prepareReturn(resp, err) - case <-quitAfterTimer.C: - return prepareReturn(resp, err) - default: - } - - resp, err = rx.transferChunk(ctx) - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Check if we should retry the request. - if !errorFunc(status, err) { - quitAfterTimer.Stop() - break - } - - rx.attempts++ - pause = bo.Pause() - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - } - - // If the chunk was uploaded successfully, but there's still - // more to go, upload the next chunk without any delay. - if statusResumeIncomplete(resp) { - resp.Body.Close() - continue - } - - return prepareReturn(resp, err) - } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go deleted file mode 100644 index 20b57d925f..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "errors" - "io" - "net" - "strings" - "time" - - "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" -) - -// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their -// own implementation. -type Backoff interface { - Pause() time.Duration -} - -// These are declared as global variables so that tests can overwrite them. -var ( - // Default per-chunk deadline for resumable uploads. - defaultRetryDeadline = 32 * time.Second - // Default backoff timer. - backoff = func() Backoff { - return &gax.Backoff{Initial: 100 * time.Millisecond} - } - // syscallRetryable is a platform-specific hook, specified in retryable_linux.go - syscallRetryable func(error) bool = func(err error) bool { return false } -) - -const ( - // statusTooManyRequests is returned by the storage API if the - // per-project limits have been temporarily exceeded. The request - // should be retried. - // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes - statusTooManyRequests = 429 - - // statusRequestTimeout is returned by the storage API if the - // upload connection was broken. The request should be retried. - statusRequestTimeout = 408 -) - -// shouldRetry indicates whether an error is retryable for the purposes of this -// package, unless a ShouldRetry func is specified by the RetryConfig instead. -// It follows guidance from -// https://cloud.google.com/storage/docs/exponential-backoff . -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests || status == statusRequestTimeout { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - // Transient network errors should be retried. - if syscallRetryable(err) { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true - } - } - var opErr *net.OpError - if errors.As(err, &opErr) { - if strings.Contains(opErr.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string - return true - } - } - - // If Go 1.13 error unwrapping is available, use this to examine wrapped - // errors. - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) - } - return false -} - -// RetryConfig allows configuration of backoff timing and retryable errors. -type RetryConfig struct { - Backoff *gax.Backoff - ShouldRetry func(err error) bool -} - -// Get a new backoff object based on the configured values. -func (r *RetryConfig) backoff() Backoff { - if r == nil || r.Backoff == nil { - return backoff() - } - return &gax.Backoff{ - Initial: r.Backoff.Initial, - Max: r.Backoff.Max, - Multiplier: r.Backoff.Multiplier, - } -} - -// This is kind of hacky; it is necessary because ShouldRetry expects to -// handle HTTP errors via googleapi.Error, but the error has not yet been -// wrapped with a googleapi.Error at this layer, and the ErrorFunc type -// in the manual layer does not pass in a status explicitly as it does -// here. So, we must wrap error status codes in a googleapi.Error so that -// ShouldRetry can parse this correctly. -func (r *RetryConfig) errorFunc() func(status int, err error) bool { - if r == nil || r.ShouldRetry == nil { - return shouldRetry - } - return func(status int, err error) bool { - if status >= 400 { - return r.ShouldRetry(&googleapi.Error{Code: status}) - } - return r.ShouldRetry(err) - } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go deleted file mode 100644 index a916c3da29..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package gensupport - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go deleted file mode 100644 index 693a1b1aba..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" - "time" - - "github.com/google/uuid" - "github.com/googleapis/gax-go/v2" -) - -// Use this error type to return an error which allows introspection of both -// the context error and the error from the service. -type wrappedCallErr struct { - ctxErr error - wrappedErr error -} - -func (e wrappedCallErr) Error() string { - return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) -} - -func (e wrappedCallErr) Unwrap() error { - return e.wrappedErr -} - -// Is allows errors.Is to match the error from the call as well as context -// sentinel errors. -func (e wrappedCallErr) Is(target error) bool { - return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) -} - -// SendRequest sends a single HTTP request using the given client. -// If ctx is non-nil, it calls all hooks, then sends the request with -// req.WithContext, then calls any functions returned by the hooks in -// reverse order. -func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - // Disallow Accept-Encoding because it interferes with the automatic gzip handling - // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. - if _, ok := req.Header["Accept-Encoding"]; ok { - return nil, errors.New("google api: custom Accept-Encoding headers not allowed") - } - if ctx == nil { - return client.Do(req) - } - return send(ctx, client, req) -} - -func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// SendRequestWithRetry sends a single HTTP request using the given client, -// with retries if a retryable error is returned. -// If ctx is non-nil, it calls all hooks, then sends the request with -// req.WithContext, then calls any functions returned by the hooks in -// reverse order. -func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { - // Disallow Accept-Encoding because it interferes with the automatic gzip handling - // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. - if _, ok := req.Header["Accept-Encoding"]; ok { - return nil, errors.New("google api: custom Accept-Encoding headers not allowed") - } - if ctx == nil { - return client.Do(req) - } - return sendAndRetry(ctx, client, req, retry) -} - -func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - var resp *http.Response - var err error - attempts := 1 - invocationID := uuid.New().String() - baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") - - // Loop to retry the request, up to the context deadline. - var pause time.Duration - var bo Backoff - if retry != nil && retry.Backoff != nil { - bo = &gax.Backoff{ - Initial: retry.Backoff.Initial, - Max: retry.Backoff.Max, - Multiplier: retry.Backoff.Multiplier, - } - } else { - bo = backoff() - } - - var errorFunc = retry.errorFunc() - - for { - t := time.NewTimer(pause) - select { - case <-ctx.Done(): - t.Stop() - // If we got an error and the context has been canceled, return an error acknowledging - // both the context cancelation and the service error. - if err != nil { - return resp, wrappedCallErr{ctx.Err(), err} - } - return resp, ctx.Err() - case <-t.C: - } - - if ctx.Err() != nil { - // Check for context cancellation once more. If more than one case in a - // select is satisfied at the same time, Go will choose one arbitrarily. - // That can cause an operation to go through even if the context was - // canceled before. - if err != nil { - return resp, wrappedCallErr{ctx.Err(), err} - } - return resp, ctx.Err() - } - - // Set retry metrics and idempotency headers for GCS. - // TODO(b/274504690): Consider dropping gccl-invocation-id key since it - // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). - invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) - xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") - req.Header.Set("X-Goog-Api-Client", xGoogHeader) - req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) - - resp, err = client.Do(req.WithContext(ctx)) - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Check if we can retry the request. A retry can only be done if the error - // is retryable and the request body can be re-created using GetBody (this - // will not be possible if the body was unbuffered). - if req.GetBody == nil || !errorFunc(status, err) { - break - } - attempts++ - var errBody error - req.Body, errBody = req.GetBody() - if errBody != nil { - break - } - - pause = bo.Pause() - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - } - return resp, err -} - -// DecodeResponse decodes the body of res into target. If there is no body, -// target is unchanged. -func DecodeResponse(target interface{}, res *http.Response) error { - if res.StatusCode == http.StatusNoContent { - return nil - } - return json.NewDecoder(res.Body).Decode(target) -} diff --git a/vendor/google.golang.org/api/internal/gensupport/version.go b/vendor/google.golang.org/api/internal/gensupport/version.go deleted file mode 100644 index 23f6aa24ea..0000000000 --- a/vendor/google.golang.org/api/internal/gensupport/version.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2020 Google LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "runtime" - "strings" - "unicode" -) - -// GoVersion returns the Go runtime version. The returned string -// has no whitespace. -func GoVersion() string { - return goVersion -} - -var goVersion = goVer(runtime.Version()) - -const develPrefix = "devel +" - -func goVer(s string) string { - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "" -} - -func notSemverRune(r rune) bool { - return !strings.ContainsRune("0123456789.", r) -} diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go deleted file mode 100644 index 4b2c775f21..0000000000 --- a/vendor/google.golang.org/api/internal/impersonate/impersonate.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package impersonate is used to impersonate Google Credentials. -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "golang.org/x/oauth2" -) - -// Config for generating impersonated credentials. -type Config struct { - // Target is the service account to impersonate. Required. - Target string - // Scopes the impersonated credential should have. Required. - Scopes []string - // Delegates are the service accounts in a delegation chain. Each service - // account must be granted roles/iam.serviceAccountTokenCreator on the next - // service account in the chain. Optional. - Delegates []string -} - -// TokenSource returns an impersonated TokenSource configured with the provided -// config using ts as the base credential provider for making requests. -func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { - if len(config.Scopes) == 0 { - return nil, fmt.Errorf("impersonate: scopes must be provided") - } - its := impersonatedTokenSource{ - ctx: ctx, - ts: ts, - name: formatIAMServiceAccountName(config.Target), - // Default to the longest acceptable value of one hour as the token will - // be refreshed automatically. - lifetime: "3600s", - } - - its.delegates = make([]string, len(config.Delegates)) - for i, v := range config.Delegates { - its.delegates[i] = formatIAMServiceAccountName(v) - } - its.scopes = make([]string, len(config.Scopes)) - copy(its.scopes, config.Scopes) - - return oauth2.ReuseTokenSource(nil, its), nil -} - -func formatIAMServiceAccountName(name string) string { - return fmt.Sprintf("projects/-/serviceAccounts/%s", name) -} - -type generateAccessTokenReq struct { - Delegates []string `json:"delegates,omitempty"` - Lifetime string `json:"lifetime,omitempty"` - Scope []string `json:"scope,omitempty"` -} - -type generateAccessTokenResp struct { - AccessToken string `json:"accessToken"` - ExpireTime string `json:"expireTime"` -} - -type impersonatedTokenSource struct { - ctx context.Context - ts oauth2.TokenSource - - name string - lifetime string - scopes []string - delegates []string -} - -// Token returns an impersonated Token. -func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { - hc := oauth2.NewClient(i.ctx, i.ts) - reqBody := generateAccessTokenReq{ - Delegates: i.delegates, - Lifetime: i.lifetime, - Scope: i.scopes, - } - b, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) - } - url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %v", err) - } - req = req.WithContext(i.ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := hc.Do(req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) - } - defer resp.Body.Close() - body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var accessTokenResp generateAccessTokenResp - if err := json.Unmarshal(body, &accessTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) - } - expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) - } - return &oauth2.Token{ - AccessToken: accessTokenResp.AccessToken, - Expiry: expiry, - }, nil -} diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go deleted file mode 100644 index c5b421f554..0000000000 --- a/vendor/google.golang.org/api/internal/s2a.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2023 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "encoding/json" - "log" - "sync" - "time" - - "cloud.google.com/go/compute/metadata" -) - -const configEndpointSuffix = "googleAutoMtlsConfiguration" - -// The period an MTLS config can be reused before needing refresh. -var configExpiry = time.Hour - -// GetS2AAddress returns the S2A address to be reached via plaintext connection. -func GetS2AAddress() string { - c, err := getMetadataMTLSAutoConfig().Config() - if err != nil { - return "" - } - if !c.Valid() { - return "" - } - return c.S2A.PlaintextAddress -} - -type mtlsConfigSource interface { - Config() (*mtlsConfig, error) -} - -// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. -var ( - mdsMTLSAutoConfigSource mtlsConfigSource - once sync.Once -) - -// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. -func getMetadataMTLSAutoConfig() mtlsConfigSource { - once.Do(func() { - mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ - src: &metadataMTLSAutoConfig{}, - } - }) - return mdsMTLSAutoConfigSource -} - -// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. -// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. -type reuseMTLSConfigSource struct { - src mtlsConfigSource // src.Config() is called when config is expired - mu sync.Mutex // mutex guards config - config *mtlsConfig // cached config -} - -func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { - cs.mu.Lock() - defer cs.mu.Unlock() - - if cs.config.Valid() { - return cs.config, nil - } - c, err := cs.src.Config() - if err != nil { - return nil, err - } - cs.config = c - return c, nil -} - -// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource -// It has the logic to query MDS and return an mtlsConfig -type metadataMTLSAutoConfig struct{} - -var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) -} - -func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { - resp, err := httpGetMetadataMTLSConfig() - if err != nil { - log.Printf("querying MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil - } - var config mtlsConfig - err = json.Unmarshal([]byte(resp), &config) - if err != nil { - log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil - } - - if config.S2A == nil { - log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) - return defaultMTLSConfig(), nil - } - - // set new expiry - config.Expiry = time.Now().Add(configExpiry) - return &config, nil -} - -func defaultMTLSConfig() *mtlsConfig { - return &mtlsConfig{ - S2A: &s2aAddresses{ - PlaintextAddress: "", - MTLSAddress: "", - }, - Expiry: time.Now().Add(configExpiry), - } -} - -// s2aAddresses contains the plaintext and/or MTLS S2A addresses. -type s2aAddresses struct { - // PlaintextAddress is the plaintext address to reach S2A - PlaintextAddress string `json:"plaintext_address"` - // MTLSAddress is the MTLS address to reach S2A - MTLSAddress string `json:"mtls_address"` -} - -// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. -type mtlsConfig struct { - S2A *s2aAddresses `json:"s2a"` - Expiry time.Time -} - -func (c *mtlsConfig) Valid() bool { - return c != nil && c.S2A != nil && !c.expired() -} -func (c *mtlsConfig) expired() bool { - return c.Expiry.Before(time.Now()) -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go deleted file mode 100644 index 3a3874df11..0000000000 --- a/vendor/google.golang.org/api/internal/settings.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2017 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal supports the options and transport packages. -package internal - -import ( - "crypto/tls" - "errors" - "net/http" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/api/internal/impersonate" - "google.golang.org/grpc" -) - -// DialSettings holds information needed to establish a connection with a -// Google API service. -type DialSettings struct { - Endpoint string - DefaultEndpoint string - DefaultMTLSEndpoint string - Scopes []string - DefaultScopes []string - EnableJwtWithScope bool - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - InternalCredentials *google.Credentials - UserAgent string - APIKey string - Audiences []string - DefaultAudience string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - GRPCConnPool ConnPool - GRPCConnPoolSize int - NoAuth bool - TelemetryDisabled bool - ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - CustomClaims map[string]interface{} - SkipValidation bool - ImpersonationConfig *impersonate.Config - EnableDirectPath bool - EnableDirectPathXds bool - AllowNonDefaultServiceAccount bool - - // Google API system parameters. For more information please read: - // https://cloud.google.com/apis/docs/system-parameters - QuotaProject string - RequestReason string -} - -// GetScopes returns the user-provided scopes, if set, or else falls back to the -// default scopes. -func (ds *DialSettings) GetScopes() []string { - if len(ds.Scopes) > 0 { - return ds.Scopes - } - return ds.DefaultScopes -} - -// GetAudience returns the user-provided audience, if set, or else falls back to the default audience. -func (ds *DialSettings) GetAudience() string { - if ds.HasCustomAudience() { - return ds.Audiences[0] - } - return ds.DefaultAudience -} - -// HasCustomAudience returns true if a custom audience is provided by users. -func (ds *DialSettings) HasCustomAudience() bool { - return len(ds.Audiences) > 0 -} - -// Validate reports an error if ds is invalid. -func (ds *DialSettings) Validate() error { - if ds.SkipValidation { - return nil - } - hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil - if ds.NoAuth && hasCreds { - return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") - } - // Credentials should not appear with other options. - // We currently allow TokenSource and CredentialsFile to coexist. - // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). - nCreds := 0 - if ds.Credentials != nil { - nCreds++ - } - if ds.CredentialsJSON != nil { - nCreds++ - } - if ds.CredentialsFile != "" { - nCreds++ - } - if ds.APIKey != "" { - nCreds++ - } - if ds.TokenSource != nil { - nCreds++ - } - if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { - return errors.New("WithScopes is incompatible with WithAudience") - } - // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. - if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { - return errors.New("multiple credential options provided") - } - if ds.GRPCConn != nil && ds.GRPCConnPool != nil { - return errors.New("WithGRPCConn is incompatible with WithConnPool") - } - if ds.HTTPClient != nil && ds.GRPCConnPool != nil { - return errors.New("WithHTTPClient is incompatible with WithConnPool") - } - if ds.HTTPClient != nil && ds.GRPCConn != nil { - return errors.New("WithHTTPClient is incompatible with WithGRPCConn") - } - if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { - return errors.New("WithHTTPClient is incompatible with gRPC dial options") - } - if ds.HTTPClient != nil && ds.QuotaProject != "" { - return errors.New("WithHTTPClient is incompatible with QuotaProject") - } - if ds.HTTPClient != nil && ds.RequestReason != "" { - return errors.New("WithHTTPClient is incompatible with RequestReason") - } - if ds.HTTPClient != nil && ds.ClientCertSource != nil { - return errors.New("WithHTTPClient is incompatible with WithClientCertSource") - } - if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { - return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") - } - if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { - return errors.New("WithImpersonatedCredentials requires scopes being provided") - } - return nil -} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE deleted file mode 100644 index 7109c6ef93..0000000000 --- a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA deleted file mode 100644 index c7f86fcd5f..0000000000 --- a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA +++ /dev/null @@ -1,14 +0,0 @@ -name: "uritemplates" -description: - "Package uritemplates is a level 4 implementation of RFC 6570 (URI " - "Template, http://tools.ietf.org/html/rfc6570)." - -third_party { - url { - type: GIT - value: "https://github.com/jtacoma/uritemplates" - } - version: "0.1" - last_upgrade_date { year: 2014 month: 8 day: 18 } - license_type: NOTICE -} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go deleted file mode 100644 index 8c27d19d75..0000000000 --- a/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2013 Joshua Tacoma. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uritemplates is a level 3 implementation of RFC 6570 (URI -// Template, http://tools.ietf.org/html/rfc6570). -// uritemplates does not support composite values (in Go: slices or maps) -// and so does not qualify as a level 4 implementation. -package uritemplates - -import ( - "bytes" - "errors" - "regexp" - "strconv" - "strings" -) - -var ( - unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") - reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") - validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") - hex = []byte("0123456789ABCDEF") -) - -func pctEncode(src []byte) []byte { - dst := make([]byte, len(src)*3) - for i, b := range src { - buf := dst[i*3 : i*3+3] - buf[0] = 0x25 - buf[1] = hex[b/16] - buf[2] = hex[b%16] - } - return dst -} - -// pairWriter is a convenience struct which allows escaped and unescaped -// versions of the template to be written in parallel. -type pairWriter struct { - escaped, unescaped bytes.Buffer -} - -// Write writes the provided string directly without any escaping. -func (w *pairWriter) Write(s string) { - w.escaped.WriteString(s) - w.unescaped.WriteString(s) -} - -// Escape writes the provided string, escaping the string for the -// escaped output. -func (w *pairWriter) Escape(s string, allowReserved bool) { - w.unescaped.WriteString(s) - if allowReserved { - w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } else { - w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) - } -} - -// Escaped returns the escaped string. -func (w *pairWriter) Escaped() string { - return w.escaped.String() -} - -// Unescaped returns the unescaped string. -func (w *pairWriter) Unescaped() string { - return w.unescaped.String() -} - -// A uriTemplate is a parsed representation of a URI template. -type uriTemplate struct { - raw string - parts []templatePart -} - -// parse parses a URI template string into a uriTemplate object. -func parse(rawTemplate string) (*uriTemplate, error) { - split := strings.Split(rawTemplate, "{") - parts := make([]templatePart, len(split)*2-1) - for i, s := range split { - if i == 0 { - if strings.Contains(s, "}") { - return nil, errors.New("unexpected }") - } - parts[i].raw = s - continue - } - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - return nil, errors.New("malformed template") - } - expression := subsplit[0] - var err error - parts[i*2-1], err = parseExpression(expression) - if err != nil { - return nil, err - } - parts[i*2].raw = subsplit[1] - } - return &uriTemplate{ - raw: rawTemplate, - parts: parts, - }, nil -} - -type templatePart struct { - raw string - terms []templateTerm - first string - sep string - named bool - ifemp string - allowReserved bool -} - -type templateTerm struct { - name string - explode bool - truncate int -} - -func parseExpression(expression string) (result templatePart, err error) { - switch expression[0] { - case '+': - result.sep = "," - result.allowReserved = true - expression = expression[1:] - case '.': - result.first = "." - result.sep = "." - expression = expression[1:] - case '/': - result.first = "/" - result.sep = "/" - expression = expression[1:] - case ';': - result.first = ";" - result.sep = ";" - result.named = true - expression = expression[1:] - case '?': - result.first = "?" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '&': - result.first = "&" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '#': - result.first = "#" - result.sep = "," - result.allowReserved = true - expression = expression[1:] - default: - result.sep = "," - } - rawterms := strings.Split(expression, ",") - result.terms = make([]templateTerm, len(rawterms)) - for i, raw := range rawterms { - result.terms[i], err = parseTerm(raw) - if err != nil { - break - } - } - return result, err -} - -func parseTerm(term string) (result templateTerm, err error) { - // TODO(djd): Remove "*" suffix parsing once we check that no APIs have - // mistakenly used that attribute. - if strings.HasSuffix(term, "*") { - result.explode = true - term = term[:len(term)-1] - } - split := strings.Split(term, ":") - if len(split) == 1 { - result.name = term - } else if len(split) == 2 { - result.name = split[0] - var parsed int64 - parsed, err = strconv.ParseInt(split[1], 10, 0) - result.truncate = int(parsed) - } else { - err = errors.New("multiple colons in same term") - } - if !validname.MatchString(result.name) { - err = errors.New("not a valid name: " + result.name) - } - if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifiers on same term") - } - return result, err -} - -// Expand expands a URI template with a set of values to produce the -// resultant URI. Two forms of the result are returned: one with all the -// elements escaped, and one with the elements unescaped. -func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { - var w pairWriter - for _, p := range t.parts { - p.expand(&w, values) - } - return w.Escaped(), w.Unescaped() -} - -func (tp *templatePart) expand(w *pairWriter, values map[string]string) { - if len(tp.raw) > 0 { - w.Write(tp.raw) - return - } - var first = true - for _, term := range tp.terms { - value, exists := values[term.name] - if !exists { - continue - } - if first { - w.Write(tp.first) - first = false - } else { - w.Write(tp.sep) - } - tp.expandString(w, term, value) - } -} - -func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { - if tp.named { - w.Write(name) - if empty { - w.Write(tp.ifemp) - } else { - w.Write("=") - } - } -} - -func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - tp.expandName(w, t.name, len(s) == 0) - w.Escape(s, tp.allowReserved) -} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go deleted file mode 100644 index 2e70b81543..0000000000 --- a/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uritemplates - -// Expand parses then expands a URI template with a set of values to produce -// the resultant URI. Two forms of the result are returned: one with all the -// elements escaped, and one with the elements unescaped. -func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { - template, err := parse(path) - if err != nil { - return "", "", err - } - escaped, unescaped = template.Expand(values) - return escaped, unescaped, nil -} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go deleted file mode 100644 index 46ad187ec1..0000000000 --- a/vendor/google.golang.org/api/internal/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2022 Google LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// Version is the current tagged release of the library. -const Version = "0.126.0" diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go deleted file mode 100644 index 1799b5d9af..0000000000 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package iterator provides support for standard Google API iterators. -// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. -package iterator - -import ( - "errors" - "fmt" - "reflect" -) - -// Done is returned by an iterator's Next method when the iteration is -// complete; when there are no more items to return. -var Done = errors.New("no more items in iterator") - -// We don't support mixed calls to Next and NextPage because they play -// with the paging state in incompatible ways. -var errMixed = errors.New("iterator: Next and NextPage called on same iterator") - -// PageInfo contains information about an iterator's paging state. -type PageInfo struct { - // Token is the token used to retrieve the next page of items from the - // API. You may set Token immediately after creating an iterator to - // begin iteration at a particular point. If Token is the empty string, - // the iterator will begin with the first eligible item. - // - // The result of setting Token after the first call to Next is undefined. - // - // After the underlying API method is called to retrieve a page of items, - // Token is set to the next-page token in the response. - Token string - - // MaxSize is the maximum number of items returned by a call to the API. - // Set MaxSize as a hint to optimize the buffering behavior of the iterator. - // If zero, the page size is determined by the underlying service. - // - // Use Pager to retrieve a page of a specific, exact size. - MaxSize int - - // The error state of the iterator. Manipulated by PageInfo.next and Pager. - // This is a latch: it starts as nil, and once set should never change. - err error - - // If true, no more calls to fetch should be made. Set to true when fetch - // returns an empty page token. The iterator is Done when this is true AND - // the buffer is empty. - atEnd bool - - // Function that fetches a page from the underlying service. It should pass - // the pageSize and pageToken arguments to the service, fill the buffer - // with the results from the call, and return the next-page token returned - // by the service. The function must not remove any existing items from the - // buffer. If the underlying RPC takes an int32 page size, pageSize should - // be silently truncated. - fetch func(pageSize int, pageToken string) (nextPageToken string, err error) - - // Function that returns the number of currently buffered items. - bufLen func() int - - // Function that returns the buffer, after setting the buffer variable to nil. - takeBuf func() interface{} - - // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check - // for calls to both Next and NextPage with the same iterator. - nextCalled, nextPageCalled bool -} - -// NewPageInfo exposes internals for iterator implementations. -// It is not a stable interface. -var NewPageInfo = newPageInfo - -// newPageInfo creates and returns a PageInfo and a next func. If an iterator can -// support paging, its iterator-creating method should call this. Each time the -// iterator's Next is called, it should call the returned next fn to determine -// whether a next item exists, and if so it should pop an item from the buffer. -// -// The fetch, bufLen and takeBuf arguments provide access to the iterator's -// internal slice of buffered items. They behave as described in PageInfo, above. -// -// The return value is the PageInfo.next method bound to the returned PageInfo value. -// (Returning it avoids exporting PageInfo.next.) -// -// Note: the returned PageInfo and next fn do not remove items from the buffer. -// It is up to the iterator using these to remove items from the buffer: -// typically by performing a pop in its Next. If items are not removed from the -// buffer, memory may grow unbounded. -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { - pi = &PageInfo{ - fetch: fetch, - bufLen: bufLen, - takeBuf: takeBuf, - } - return pi, pi.next -} - -// Remaining returns the number of items available before the iterator makes another API call. -func (pi *PageInfo) Remaining() int { return pi.bufLen() } - -// next provides support for an iterator's Next function. An iterator's Next -// should return the error returned by next if non-nil; else it can assume -// there is at least one item in its buffer, and it should return that item and -// remove it from the buffer. -func (pi *PageInfo) next() error { - pi.nextCalled = true - if pi.err != nil { // Once we get an error, always return it. - // TODO(jba): fix so users can retry on transient errors? Probably not worth it. - return pi.err - } - if pi.nextPageCalled { - pi.err = errMixed - return pi.err - } - // Loop until we get some items or reach the end. - for pi.bufLen() == 0 && !pi.atEnd { - if err := pi.fill(pi.MaxSize); err != nil { - pi.err = err - return pi.err - } - if pi.Token == "" { - pi.atEnd = true - } - } - // Either the buffer is non-empty or pi.atEnd is true (or both). - if pi.bufLen() == 0 { - // The buffer is empty and pi.atEnd is true, i.e. the service has no - // more items. - pi.err = Done - } - return pi.err -} - -// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the -// next-page token returned by the call. -// If fill returns a non-nil error, the buffer will be empty. -func (pi *PageInfo) fill(size int) error { - tok, err := pi.fetch(size, pi.Token) - if err != nil { - pi.takeBuf() // clear the buffer - return err - } - pi.Token = tok - return nil -} - -// Pageable is implemented by iterators that support paging. -type Pageable interface { - // PageInfo returns paging information associated with the iterator. - PageInfo() *PageInfo -} - -// Pager supports retrieving iterator items a page at a time. -type Pager struct { - pageInfo *PageInfo - pageSize int -} - -// NewPager returns a pager that uses iter. Calls to its NextPage method will -// obtain exactly pageSize items, unless fewer remain. The pageToken argument -// indicates where to start the iteration. Pass the empty string to start at -// the beginning, or pass a token retrieved from a call to Pager.NextPage. -// -// If you use an iterator with a Pager, you must not call Next on the iterator. -func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { - p := &Pager{ - pageInfo: iter.PageInfo(), - pageSize: pageSize, - } - p.pageInfo.Token = pageToken - if pageSize <= 0 { - p.pageInfo.err = errors.New("iterator: page size must be positive") - } - return p -} - -// NextPage retrieves a sequence of items from the iterator and appends them -// to slicep, which must be a pointer to a slice of the iterator's item type. -// Exactly p.pageSize items will be appended, unless fewer remain. -// -// The first return value is the page token to use for the next page of items. -// If empty, there are no more pages. Aside from checking for the end of the -// iteration, the returned page token is only needed if the iteration is to be -// resumed a later time, in another context (possibly another process). -// -// The second return value is non-nil if an error occurred. It will never be -// the special iterator sentinel value Done. To recognize the end of the -// iteration, compare nextPageToken to the empty string. -// -// It is possible for NextPage to return a single zero-length page along with -// an empty page token when there are no more items in the iteration. -func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { - p.pageInfo.nextPageCalled = true - if p.pageInfo.err != nil { - return "", p.pageInfo.err - } - if p.pageInfo.nextCalled { - p.pageInfo.err = errMixed - return "", p.pageInfo.err - } - if p.pageInfo.bufLen() > 0 { - return "", errors.New("must call NextPage with an empty buffer") - } - // The buffer must be empty here, so takeBuf is a no-op. We call it just to get - // the buffer's type. - wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) - if slicep == nil { - return "", errors.New("nil passed to Pager.NextPage") - } - vslicep := reflect.ValueOf(slicep) - if vslicep.Type() != wantSliceType { - return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) - } - for p.pageInfo.bufLen() < p.pageSize { - if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { - p.pageInfo.err = err - return "", p.pageInfo.err - } - if p.pageInfo.Token == "" { - break - } - } - e := vslicep.Elem() - e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) - return p.pageInfo.Token, nil -} diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go deleted file mode 100644 index 3b8461d1da..0000000000 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internaloption contains options used internally by Google client code. -package internaloption - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" - "google.golang.org/api/option" -) - -type defaultEndpointOption string - -func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { - settings.DefaultEndpoint = string(o) -} - -// WithDefaultEndpoint is an option that indicates the default endpoint. -// -// It should only be used internally by generated clients. -// -// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. -func WithDefaultEndpoint(url string) option.ClientOption { - return defaultEndpointOption(url) -} - -type defaultMTLSEndpointOption string - -func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { - settings.DefaultMTLSEndpoint = string(o) -} - -// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. -// -// It should only be used internally by generated clients. -func WithDefaultMTLSEndpoint(url string) option.ClientOption { - return defaultMTLSEndpointOption(url) -} - -// SkipDialSettingsValidation bypasses validation on ClientOptions. -// -// It should only be used internally. -func SkipDialSettingsValidation() option.ClientOption { - return skipDialSettingsValidation{} -} - -type skipDialSettingsValidation struct{} - -func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { - settings.SkipValidation = true -} - -// EnableDirectPath returns a ClientOption that overrides the default -// attempt to use DirectPath. -// -// It should only be used internally by generated clients. -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func EnableDirectPath(dp bool) option.ClientOption { - return enableDirectPath(dp) -} - -type enableDirectPath bool - -func (e enableDirectPath) Apply(o *internal.DialSettings) { - o.EnableDirectPath = bool(e) -} - -// EnableDirectPathXds returns a ClientOption that overrides the default -// DirectPath type. It is only valid when DirectPath is enabled. -// -// It should only be used internally by generated clients. -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func EnableDirectPathXds() option.ClientOption { - return enableDirectPathXds(true) -} - -type enableDirectPathXds bool - -func (x enableDirectPathXds) Apply(o *internal.DialSettings) { - o.EnableDirectPathXds = bool(x) -} - -// AllowNonDefaultServiceAccount returns a ClientOption that overrides the default -// requirement for using the default service account for DirectPath. -// -// It should only be used internally by generated clients. -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func AllowNonDefaultServiceAccount(nd bool) option.ClientOption { - return allowNonDefaultServiceAccount(nd) -} - -type allowNonDefaultServiceAccount bool - -func (a allowNonDefaultServiceAccount) Apply(o *internal.DialSettings) { - o.AllowNonDefaultServiceAccount = bool(a) -} - -// WithDefaultAudience returns a ClientOption that specifies a default audience -// to be used as the audience field ("aud") for the JWT token authentication. -// -// It should only be used internally by generated clients. -func WithDefaultAudience(audience string) option.ClientOption { - return withDefaultAudience(audience) -} - -type withDefaultAudience string - -func (w withDefaultAudience) Apply(o *internal.DialSettings) { - o.DefaultAudience = string(w) -} - -// WithDefaultScopes returns a ClientOption that overrides the default OAuth2 -// scopes to be used for a service. -// -// It should only be used internally by generated clients. -func WithDefaultScopes(scope ...string) option.ClientOption { - return withDefaultScopes(scope) -} - -type withDefaultScopes []string - -func (w withDefaultScopes) Apply(o *internal.DialSettings) { - o.DefaultScopes = make([]string, len(w)) - copy(o.DefaultScopes, w) -} - -// EnableJwtWithScope returns a ClientOption that specifies if scope can be used -// with self-signed JWT. -func EnableJwtWithScope() option.ClientOption { - return enableJwtWithScope(true) -} - -type enableJwtWithScope bool - -func (w enableJwtWithScope) Apply(o *internal.DialSettings) { - o.EnableJwtWithScope = bool(w) -} - -// WithCredentials returns a client option to specify credentials which will be used to authenticate API calls. -// This credential takes precedence over all other credential options. -func WithCredentials(creds *google.Credentials) option.ClientOption { - return (*withCreds)(creds) -} - -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.InternalCredentials = (*google.Credentials)(w) -} - -// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to -// create their own client options by embedding this type into their own -// client-specific option wrapper. See example for usage. -type EmbeddableAdapter struct{} - -func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go deleted file mode 100644 index b2085a1949..0000000000 --- a/vendor/google.golang.org/api/option/option.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2017 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package option contains options for Google API clients. -package option - -import ( - "crypto/tls" - "net/http" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" - "google.golang.org/api/internal/impersonate" - "google.golang.org/grpc" -) - -// A ClientOption is an option for a Google API client. -type ClientOption interface { - Apply(*internal.DialSettings) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Apply(o *internal.DialSettings) { - o.TokenSource = w.ts -} - -type withCredFile string - -func (w withCredFile) Apply(o *internal.DialSettings) { - o.CredentialsFile = string(w) -} - -// WithCredentialsFile returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials file. -func WithCredentialsFile(filename string) ClientOption { - return withCredFile(filename) -} - -// WithServiceAccountFile returns a ClientOption that uses a Google service -// account credentials file to authenticate. -// -// Deprecated: Use WithCredentialsFile instead. -func WithServiceAccountFile(filename string) ClientOption { - return WithCredentialsFile(filename) -} - -// WithCredentialsJSON returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials. -func WithCredentialsJSON(p []byte) ClientOption { - return withCredentialsJSON(p) -} - -type withCredentialsJSON []byte - -func (w withCredentialsJSON) Apply(o *internal.DialSettings) { - o.CredentialsJSON = make([]byte, len(w)) - copy(o.CredentialsJSON, w) -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Apply(o *internal.DialSettings) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -// -// If both WithScopes and WithTokenSource are used, scope settings from the -// token source will be used instead. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Apply(o *internal.DialSettings) { - o.Scopes = make([]string, len(w)) - copy(o.Scopes, w) -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. This option -// is incompatible with the [WithHTTPClient] option. If you wish to provide a -// custom client you will need to add this header via RoundTripper middleware. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } - -// WithHTTPClient returns a ClientOption that specifies the HTTP client to use -// as the basis of communications. This option may only be used with services -// that support HTTP as their communication transport. When used, the -// WithHTTPClient option takes precedent over all other supplied options. -func WithHTTPClient(client *http.Client) ClientOption { - return withHTTPClient{client} -} - -type withHTTPClient struct{ client *http.Client } - -func (w withHTTPClient) Apply(o *internal.DialSettings) { - o.HTTPClient = w.client -} - -// WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option may only be -// used with services that support gRPC as their communication transport. When -// used, the WithGRPCConn option takes precedent over all other supplied -// options. -func WithGRPCConn(conn *grpc.ClientConn) ClientOption { - return withGRPCConn{conn} -} - -type withGRPCConn struct{ conn *grpc.ClientConn } - -func (w withGRPCConn) Apply(o *internal.DialSettings) { - o.GRPCConn = w.conn -} - -// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption -// to an underlying gRPC dial. It does not work with WithGRPCConn. -func WithGRPCDialOption(opt grpc.DialOption) ClientOption { - return withGRPCDialOption{opt} -} - -type withGRPCDialOption struct{ opt grpc.DialOption } - -func (w withGRPCDialOption) Apply(o *internal.DialSettings) { - o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) -} - -// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC -// connections that requests will be balanced between. -func WithGRPCConnectionPool(size int) ClientOption { - return withGRPCConnectionPool(size) -} - -type withGRPCConnectionPool int - -func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - o.GRPCConnPoolSize = int(w) -} - -// WithAPIKey returns a ClientOption that specifies an API key to be used -// as the basis for authentication. -// -// API Keys can only be used for JSON-over-HTTP APIs, including those under -// the import path google.golang.org/api/.... -func WithAPIKey(apiKey string) ClientOption { - return withAPIKey(apiKey) -} - -type withAPIKey string - -func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } - -// WithAudiences returns a ClientOption that specifies an audience to be used -// as the audience field ("aud") for the JWT token authentication. -func WithAudiences(audience ...string) ClientOption { - return withAudiences(audience) -} - -type withAudiences []string - -func (w withAudiences) Apply(o *internal.DialSettings) { - o.Audiences = make([]string, len(w)) - copy(o.Audiences, w) -} - -// WithoutAuthentication returns a ClientOption that specifies that no -// authentication should be used. It is suitable only for testing and for -// accessing public resources, like public Google Cloud Storage buckets. -// It is an error to provide both WithoutAuthentication and any of WithAPIKey, -// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. -func WithoutAuthentication() ClientOption { - return withoutAuthentication{} -} - -type withoutAuthentication struct{} - -func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } - -// WithQuotaProject returns a ClientOption that specifies the project used -// for quota and billing purposes. -// -// For more information please read: -// https://cloud.google.com/apis/docs/system-parameters -func WithQuotaProject(quotaProject string) ClientOption { - return withQuotaProject(quotaProject) -} - -type withQuotaProject string - -func (w withQuotaProject) Apply(o *internal.DialSettings) { - o.QuotaProject = string(w) -} - -// WithRequestReason returns a ClientOption that specifies a reason for -// making the request, which is intended to be recorded in audit logging. -// An example reason would be a support-case ticket number. -// -// For more information please read: -// https://cloud.google.com/apis/docs/system-parameters -func WithRequestReason(requestReason string) ClientOption { - return withRequestReason(requestReason) -} - -type withRequestReason string - -func (w withRequestReason) Apply(o *internal.DialSettings) { - o.RequestReason = string(w) -} - -// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) -// settings on gRPC and HTTP clients. -// An example reason would be to bind custom telemetry that overrides the defaults. -func WithTelemetryDisabled() ClientOption { - return withTelemetryDisabled{} -} - -type withTelemetryDisabled struct{} - -func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { - o.TelemetryDisabled = true -} - -// ClientCertSource is a function that returns a TLS client certificate to be used -// when opening TLS connections. -// -// It follows the same semantics as crypto/tls.Config.GetClientCertificate. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - -// WithClientCertSource returns a ClientOption that specifies a -// callback function for obtaining a TLS client certificate. -// -// This option is used for supporting mTLS authentication, where the -// server validates the client certifcate when establishing a connection. -// -// The callback function will be invoked whenever the server requests a -// certificate from the client. Implementations of the callback function -// should try to ensure that a valid certificate can be repeatedly returned -// on demand for the entire life cycle of the transport client. If a nil -// Certificate is returned (i.e. no Certificate can be obtained), an error -// should be returned. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func WithClientCertSource(s ClientCertSource) ClientOption { - return withClientCertSource{s} -} - -type withClientCertSource struct{ s ClientCertSource } - -func (w withClientCertSource) Apply(o *internal.DialSettings) { - o.ClientCertSource = w.s -} - -// ImpersonateCredentials returns a ClientOption that will impersonate the -// target service account. -// -// In order to impersonate the target service account -// the base service account must have the Service Account Token Creator role, -// roles/iam.serviceAccountTokenCreator, on the target service account. -// See https://cloud.google.com/iam/docs/understanding-service-accounts. -// -// Optionally, delegates can be used during impersonation if the base service -// account lacks the token creator role on the target. When using delegates, -// each service account must be granted roles/iam.serviceAccountTokenCreator -// on the next service account in the chain. -// -// For example, if a base service account of SA1 is trying to impersonate target -// service account SA2 while using delegate service accounts DSA1 and DSA2, -// the following must be true: -// -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. -// -// The resulting impersonated credential will either have the default scopes of -// the client being instantiating or the scopes from WithScopes if provided. -// Scopes are required for creating impersonated credentials, so if this option -// is used while not using a NewClient/NewService function, WithScopes must also -// be explicitly passed in as well. -// -// If the base credential is an authorized user and not a service account, or if -// the option WithQuotaProject is set, the target service account must have a -// role that grants the serviceusage.services.use permission such as -// roles/serviceusage.serviceUsageConsumer. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. -// -// Deprecated: This option has been replaced by `impersonate` package: -// `google.golang.org/api/impersonate`. Please use the `impersonate` package -// instead with the WithTokenSource option. -func ImpersonateCredentials(target string, delegates ...string) ClientOption { - return impersonateServiceAccount{ - target: target, - delegates: delegates, - } -} - -type impersonateServiceAccount struct { - target string - delegates []string -} - -func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { - o.ImpersonationConfig = &impersonate.Config{ - Target: i.target, - } - o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) - copy(o.ImpersonationConfig.Delegates, i.delegates) -} - -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.Credentials)(w) -} - -// WithCredentials returns a ClientOption that authenticates API calls. -func WithCredentials(creds *google.Credentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json deleted file mode 100644 index edebc73ad4..0000000000 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ /dev/null @@ -1,4283 +0,0 @@ -{ - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/devstorage.full_control": { - "description": "Manage your data and permissions in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_only": { - "description": "View your data in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_write": { - "description": "Manage your data in Google Cloud Storage" - } - } - } - }, - "basePath": "/storage/v1/", - "baseUrl": "https://storage.googleapis.com/storage/v1/", - "batchPath": "batch/storage/v1", - "description": "Stores and retrieves potentially large, immutable data objects.", - "discoveryVersion": "v1", - "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"34333739363230323936363635393736363430\"", - "icons": { - "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", - "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" - }, - "id": "storage:v1", - "kind": "discovery#restDescription", - "labels": [ - "labs" - ], - "mtlsRootUrl": "https://storage.mtls.googleapis.com/", - "name": "storage", - "ownerDomain": "google.com", - "ownerName": "Google", - "parameters": { - "alt": { - "default": "json", - "description": "Data format for the response.", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query", - "type": "string" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "location": "query", - "type": "string" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query", - "type": "string" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "location": "query", - "type": "string" - }, - "prettyPrint": { - "default": "true", - "description": "Returns response with indentations and line breaks.", - "location": "query", - "type": "boolean" - }, - "quotaUser": { - "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.", - "location": "query", - "type": "string" - }, - "uploadType": { - "description": "Upload protocol for media (e.g. \"media\", \"multipart\", \"resumable\").", - "location": "query", - "type": "string" - }, - "userIp": { - "description": "Deprecated. Please use quotaUser instead.", - "location": "query", - "type": "string" - } - }, - "protocol": "rest", - "resources": { - "bucketAccessControls": { - "methods": { - "delete": { - "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - "httpMethod": "DELETE", - "id": "storage.bucketAccessControls.delete", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/acl/{entity}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "description": "Returns the ACL entry for the specified entity on the specified bucket.", - "httpMethod": "GET", - "id": "storage.bucketAccessControls.get", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/acl/{entity}", - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "description": "Creates a new ACL entry on the specified bucket.", - "httpMethod": "POST", - "id": "storage.bucketAccessControls.insert", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/acl", - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "description": "Retrieves ACL entries on the specified bucket.", - "httpMethod": "GET", - "id": "storage.bucketAccessControls.list", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/acl", - "response": { - "$ref": "BucketAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "description": "Patches an ACL entry on the specified bucket.", - "httpMethod": "PATCH", - "id": "storage.bucketAccessControls.patch", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/acl/{entity}", - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "description": "Updates an ACL entry on the specified bucket.", - "httpMethod": "PUT", - "id": "storage.bucketAccessControls.update", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/acl/{entity}", - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "buckets": { - "methods": { - "delete": { - "description": "Permanently deletes an empty bucket.", - "httpMethod": "DELETE", - "id": "storage.buckets.delete", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "If set, only deletes the bucket if its metageneration matches this value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "If set, only deletes the bucket if its metageneration does not match this value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "description": "Returns metadata for the specified bucket.", - "httpMethod": "GET", - "id": "storage.buckets.get", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}", - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "getIamPolicy": { - "description": "Returns an IAM policy for the specified bucket.", - "httpMethod": "GET", - "id": "storage.buckets.getIamPolicy", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "optionsRequestedPolicyVersion": { - "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", - "format": "int32", - "location": "query", - "minimum": "1", - "type": "integer" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/iam", - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "description": "Creates a new bucket.", - "httpMethod": "POST", - "id": "storage.buckets.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "predefinedAcl": { - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query", - "type": "string" - }, - "predefinedDefaultObjectAcl": { - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "project": { - "description": "A valid API project identifier.", - "location": "query", - "required": true, - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "b", - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "description": "Retrieves a list of buckets for a given project.", - "httpMethod": "GET", - "id": "storage.buckets.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "maxResults": { - "default": "1000", - "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "pageToken": { - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query", - "type": "string" - }, - "prefix": { - "description": "Filter results to buckets whose names begin with this prefix.", - "location": "query", - "type": "string" - }, - "project": { - "description": "A valid API project identifier.", - "location": "query", - "required": true, - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "b", - "response": { - "$ref": "Buckets" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "lockRetentionPolicy": { - "description": "Locks retention policy on a bucket.", - "httpMethod": "POST", - "id": "storage.buckets.lockRetentionPolicy", - "parameterOrder": [ - "bucket", - "ifMetagenerationMatch" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/lockRetentionPolicy", - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "patch": { - "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - "httpMethod": "PATCH", - "id": "storage.buckets.patch", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "predefinedAcl": { - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query", - "type": "string" - }, - "predefinedDefaultObjectAcl": { - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}", - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "setIamPolicy": { - "description": "Updates an IAM policy for the specified bucket.", - "httpMethod": "PUT", - "id": "storage.buckets.setIamPolicy", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/iam", - "request": { - "$ref": "Policy" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "testIamPermissions": { - "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", - "httpMethod": "GET", - "id": "storage.buckets.testIamPermissions", - "parameterOrder": [ - "bucket", - "permissions" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "permissions": { - "description": "Permissions to test.", - "location": "query", - "repeated": true, - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/iam/testPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "update": { - "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - "httpMethod": "PUT", - "id": "storage.buckets.update", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "predefinedAcl": { - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query", - "type": "string" - }, - "predefinedDefaultObjectAcl": { - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit owner, acl and defaultObjectAcl properties." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}", - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "channels": { - "methods": { - "stop": { - "description": "Stop watching resources through this channel", - "httpMethod": "POST", - "id": "storage.channels.stop", - "path": "channels/stop", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "defaultObjectAccessControls": { - "methods": { - "delete": { - "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - "httpMethod": "DELETE", - "id": "storage.defaultObjectAccessControls.delete", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - "httpMethod": "GET", - "id": "storage.defaultObjectAccessControls.get", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "description": "Creates a new default object ACL entry on the specified bucket.", - "httpMethod": "POST", - "id": "storage.defaultObjectAccessControls.insert", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/defaultObjectAcl", - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "description": "Retrieves default object ACL entries on the specified bucket.", - "httpMethod": "GET", - "id": "storage.defaultObjectAccessControls.list", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/defaultObjectAcl", - "response": { - "$ref": "ObjectAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "description": "Patches a default object ACL entry on the specified bucket.", - "httpMethod": "PATCH", - "id": "storage.defaultObjectAccessControls.patch", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "description": "Updates a default object ACL entry on the specified bucket.", - "httpMethod": "PUT", - "id": "storage.defaultObjectAccessControls.update", - "parameterOrder": [ - "bucket", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "notifications": { - "methods": { - "delete": { - "description": "Permanently deletes a notification subscription.", - "httpMethod": "DELETE", - "id": "storage.notifications.delete", - "parameterOrder": [ - "bucket", - "notification" - ], - "parameters": { - "bucket": { - "description": "The parent bucket of the notification.", - "location": "path", - "required": true, - "type": "string" - }, - "notification": { - "description": "ID of the notification to delete.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/notificationConfigs/{notification}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "description": "View a notification configuration.", - "httpMethod": "GET", - "id": "storage.notifications.get", - "parameterOrder": [ - "bucket", - "notification" - ], - "parameters": { - "bucket": { - "description": "The parent bucket of the notification.", - "location": "path", - "required": true, - "type": "string" - }, - "notification": { - "description": "Notification ID", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/notificationConfigs/{notification}", - "response": { - "$ref": "Notification" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "insert": { - "description": "Creates a notification subscription for a given bucket.", - "httpMethod": "POST", - "id": "storage.notifications.insert", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "The parent bucket of the notification.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/notificationConfigs", - "request": { - "$ref": "Notification" - }, - "response": { - "$ref": "Notification" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "description": "Retrieves a list of notification subscriptions for a given bucket.", - "httpMethod": "GET", - "id": "storage.notifications.list", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of a Google Cloud Storage bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/notificationConfigs", - "response": { - "$ref": "Notifications" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "objectAccessControls": { - "methods": { - "delete": { - "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - "httpMethod": "DELETE", - "id": "storage.objectAccessControls.delete", - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/acl/{entity}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "description": "Returns the ACL entry for the specified entity on the specified object.", - "httpMethod": "GET", - "id": "storage.objectAccessControls.get", - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/acl/{entity}", - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "description": "Creates a new ACL entry on the specified object.", - "httpMethod": "POST", - "id": "storage.objectAccessControls.insert", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/acl", - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "description": "Retrieves ACL entries on the specified object.", - "httpMethod": "GET", - "id": "storage.objectAccessControls.list", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/acl", - "response": { - "$ref": "ObjectAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "description": "Patches an ACL entry on the specified object.", - "httpMethod": "PATCH", - "id": "storage.objectAccessControls.patch", - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/acl/{entity}", - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "description": "Updates an ACL entry on the specified object.", - "httpMethod": "PUT", - "id": "storage.objectAccessControls.update", - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "parameters": { - "bucket": { - "description": "Name of a bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "entity": { - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/acl/{entity}", - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "objects": { - "methods": { - "compose": { - "description": "Concatenates a list of existing objects into a new object in the same bucket.", - "httpMethod": "POST", - "id": "storage.objects.compose", - "parameterOrder": [ - "destinationBucket", - "destinationObject" - ], - "parameters": { - "destinationBucket": { - "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", - "location": "path", - "required": true, - "type": "string" - }, - "destinationObject": { - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "destinationPredefinedAcl": { - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "kmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{destinationBucket}/o/{destinationObject}/compose", - "request": { - "$ref": "ComposeRequest" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "copy": { - "description": "Copies a source object to a destination object. Optionally overrides metadata.", - "httpMethod": "POST", - "id": "storage.objects.copy", - "parameterOrder": [ - "sourceBucket", - "sourceObject", - "destinationBucket", - "destinationObject" - ], - "parameters": { - "destinationBucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "destinationKmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query", - "type": "string" - }, - "destinationObject": { - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - "location": "path", - "required": true, - "type": "string" - }, - "destinationPredefinedAcl": { - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceGenerationMatch": { - "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceGenerationNotMatch": { - "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceMetagenerationMatch": { - "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "sourceBucket": { - "description": "Name of the bucket in which to find the source object.", - "location": "path", - "required": true, - "type": "string" - }, - "sourceGeneration": { - "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "delete": { - "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - "httpMethod": "DELETE", - "id": "storage.objects.delete", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "description": "Retrieves an object or its metadata.", - "httpMethod": "GET", - "id": "storage.objects.get", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}", - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "getIamPolicy": { - "description": "Returns an IAM policy for the specified object.", - "httpMethod": "GET", - "id": "storage.objects.getIamPolicy", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/iam", - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "insert": { - "description": "Stores a new object and metadata.", - "httpMethod": "POST", - "id": "storage.objects.insert", - "mediaUpload": { - "accept": [ - "*/*" - ], - "protocols": { - "resumable": { - "multipart": true, - "path": "/resumable/upload/storage/v1/b/{bucket}/o" - }, - "simple": { - "multipart": true, - "path": "/upload/storage/v1/b/{bucket}/o" - } - } - }, - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - "location": "path", - "required": true, - "type": "string" - }, - "contentEncoding": { - "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "kmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query", - "type": "string" - }, - "name": { - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "query", - "type": "string" - }, - "predefinedAcl": { - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o", - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaUpload": true - }, - "list": { - "description": "Retrieves a list of objects matching the criteria.", - "httpMethod": "GET", - "id": "storage.objects.list", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which to look for objects.", - "location": "path", - "required": true, - "type": "string" - }, - "delimiter": { - "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - "location": "query", - "type": "string" - }, - "endOffset": { - "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - "location": "query", - "type": "string" - }, - "includeTrailingDelimiter": { - "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - "location": "query", - "type": "boolean" - }, - "matchGlob": { - "description": "Filter results to objects and prefixes that match this glob pattern.", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "1000", - "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "pageToken": { - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query", - "type": "string" - }, - "prefix": { - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "startOffset": { - "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - }, - "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query", - "type": "boolean" - } - }, - "path": "b/{bucket}/o", - "response": { - "$ref": "Objects" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true - }, - "patch": { - "description": "Patches an object's metadata.", - "httpMethod": "PATCH", - "id": "storage.objects.patch", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "predefinedAcl": { - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request, for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}", - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "rewrite": { - "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - "httpMethod": "POST", - "id": "storage.objects.rewrite", - "parameterOrder": [ - "sourceBucket", - "sourceObject", - "destinationBucket", - "destinationObject" - ], - "parameters": { - "destinationBucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - "location": "path", - "required": true, - "type": "string" - }, - "destinationKmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - "location": "query", - "type": "string" - }, - "destinationObject": { - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "destinationPredefinedAcl": { - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceGenerationMatch": { - "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceGenerationNotMatch": { - "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceMetagenerationMatch": { - "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifSourceMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "maxBytesRewrittenPerCall": { - "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - "format": "int64", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "rewriteToken": { - "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - "location": "query", - "type": "string" - }, - "sourceBucket": { - "description": "Name of the bucket in which to find the source object.", - "location": "path", - "required": true, - "type": "string" - }, - "sourceGeneration": { - "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "RewriteResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "setIamPolicy": { - "description": "Updates an IAM policy for the specified object.", - "httpMethod": "PUT", - "id": "storage.objects.setIamPolicy", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/iam", - "request": { - "$ref": "Policy" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "testIamPermissions": { - "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - "httpMethod": "GET", - "id": "storage.objects.testIamPermissions", - "parameterOrder": [ - "bucket", - "object", - "permissions" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "permissions": { - "description": "Permissions to test.", - "location": "query", - "repeated": true, - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}/iam/testPermissions", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "update": { - "description": "Updates an object's metadata.", - "httpMethod": "PUT", - "id": "storage.objects.update", - "parameterOrder": [ - "bucket", - "object" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which the object resides.", - "location": "path", - "required": true, - "type": "string" - }, - "generation": { - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationMatch": { - "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifGenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "ifMetagenerationNotMatch": { - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query", - "type": "string" - }, - "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "path", - "required": true, - "type": "string" - }, - "predefinedAcl": { - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - } - }, - "path": "b/{bucket}/o/{object}", - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "watchAll": { - "description": "Watch for changes on all objects in a bucket.", - "httpMethod": "POST", - "id": "storage.objects.watchAll", - "parameterOrder": [ - "bucket" - ], - "parameters": { - "bucket": { - "description": "Name of the bucket in which to look for objects.", - "location": "path", - "required": true, - "type": "string" - }, - "delimiter": { - "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - "location": "query", - "type": "string" - }, - "endOffset": { - "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - "location": "query", - "type": "string" - }, - "includeTrailingDelimiter": { - "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "1000", - "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "pageToken": { - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query", - "type": "string" - }, - "prefix": { - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "startOffset": { - "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - }, - "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query", - "type": "boolean" - } - }, - "path": "b/{bucket}/o/watch", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "response": { - "$ref": "Channel" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true - } - } - }, - "projects": { - "resources": { - "hmacKeys": { - "methods": { - "create": { - "description": "Creates a new HMAC key for the specified service account.", - "httpMethod": "POST", - "id": "storage.projects.hmacKeys.create", - "parameterOrder": [ - "projectId", - "serviceAccountEmail" - ], - "parameters": { - "projectId": { - "description": "Project ID owning the service account.", - "location": "path", - "required": true, - "type": "string" - }, - "serviceAccountEmail": { - "description": "Email address of the service account.", - "location": "query", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{projectId}/hmacKeys", - "response": { - "$ref": "HmacKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "delete": { - "description": "Deletes an HMAC key.", - "httpMethod": "DELETE", - "id": "storage.projects.hmacKeys.delete", - "parameterOrder": [ - "projectId", - "accessId" - ], - "parameters": { - "accessId": { - "description": "Name of the HMAC key to be deleted.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Project ID owning the requested key", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{projectId}/hmacKeys/{accessId}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "description": "Retrieves an HMAC key's metadata", - "httpMethod": "GET", - "id": "storage.projects.hmacKeys.get", - "parameterOrder": [ - "projectId", - "accessId" - ], - "parameters": { - "accessId": { - "description": "Name of the HMAC key.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Project ID owning the service account of the requested key.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{projectId}/hmacKeys/{accessId}", - "response": { - "$ref": "HmacKeyMetadata" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only" - ] - }, - "list": { - "description": "Retrieves a list of HMAC keys matching the criteria.", - "httpMethod": "GET", - "id": "storage.projects.hmacKeys.list", - "parameterOrder": [ - "projectId" - ], - "parameters": { - "maxResults": { - "default": "250", - "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "pageToken": { - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query", - "type": "string" - }, - "projectId": { - "description": "Name of the project in which to look for HMAC keys.", - "location": "path", - "required": true, - "type": "string" - }, - "serviceAccountEmail": { - "description": "If present, only keys for the given service account are returned.", - "location": "query", - "type": "string" - }, - "showDeletedKeys": { - "description": "Whether or not to show keys in the DELETED state.", - "location": "query", - "type": "boolean" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{projectId}/hmacKeys", - "response": { - "$ref": "HmacKeysMetadata" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only" - ] - }, - "update": { - "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", - "httpMethod": "PUT", - "id": "storage.projects.hmacKeys.update", - "parameterOrder": [ - "projectId", - "accessId" - ], - "parameters": { - "accessId": { - "description": "Name of the HMAC key being updated.", - "location": "path", - "required": true, - "type": "string" - }, - "projectId": { - "description": "Project ID owning the service account of the updated key.", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{projectId}/hmacKeys/{accessId}", - "request": { - "$ref": "HmacKeyMetadata" - }, - "response": { - "$ref": "HmacKeyMetadata" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "serviceAccount": { - "methods": { - "get": { - "description": "Get the email address of this project's Google Cloud Storage service account.", - "httpMethod": "GET", - "id": "storage.projects.serviceAccount.get", - "parameterOrder": [ - "projectId" - ], - "parameters": { - "projectId": { - "description": "Project ID", - "location": "path", - "required": true, - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request.", - "location": "query", - "type": "string" - } - }, - "path": "projects/{projectId}/serviceAccount", - "response": { - "$ref": "ServiceAccount" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - } - } - } - }, - "revision": "20230301", - "rootUrl": "https://storage.googleapis.com/", - "schemas": { - "Bucket": { - "description": "A bucket.", - "id": "Bucket", - "properties": { - "acl": { - "annotations": { - "required": [ - "storage.buckets.update" - ] - }, - "description": "Access controls on the bucket.", - "items": { - "$ref": "BucketAccessControl" - }, - "type": "array" - }, - "autoclass": { - "description": "The bucket's Autoclass configuration.", - "properties": { - "enabled": { - "description": "Whether or not Autoclass is enabled on this bucket", - "type": "boolean" - }, - "toggleTime": { - "description": "A date and time in RFC 3339 format representing the instant at which \"enabled\" was last toggled.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - }, - "billing": { - "description": "The bucket's billing configuration.", - "properties": { - "requesterPays": { - "description": "When set to true, Requester Pays is enabled for this bucket.", - "type": "boolean" - } - }, - "type": "object" - }, - "cors": { - "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", - "items": { - "properties": { - "maxAgeSeconds": { - "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", - "format": "int32", - "type": "integer" - }, - "method": { - "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", - "items": { - "type": "string" - }, - "type": "array" - }, - "origin": { - "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", - "items": { - "type": "string" - }, - "type": "array" - }, - "responseHeader": { - "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" - }, - "customPlacementConfig": { - "description": "The bucket's custom placement configuration for Custom Dual Regions.", - "properties": { - "dataLocations": { - "description": "The list of regional locations in which data is placed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "defaultEventBasedHold": { - "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.", - "type": "boolean" - }, - "defaultObjectAcl": { - "description": "Default access controls to apply to new objects when no ACL is provided.", - "items": { - "$ref": "ObjectAccessControl" - }, - "type": "array" - }, - "encryption": { - "description": "Encryption configuration for a bucket.", - "properties": { - "defaultKmsKeyName": { - "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.", - "type": "string" - } - }, - "type": "object" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for the bucket.", - "type": "string" - }, - "iamConfiguration": { - "description": "The bucket's IAM configuration.", - "properties": { - "bucketPolicyOnly": { - "description": "The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.", - "properties": { - "enabled": { - "description": "If set, access is controlled only by bucket-level or above IAM policies.", - "type": "boolean" - }, - "lockedTime": { - "description": "The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - }, - "publicAccessPrevention": { - "description": "The bucket's Public Access Prevention configuration. Currently, 'inherited' and 'enforced' are supported.", - "type": "string" - }, - "uniformBucketLevelAccess": { - "description": "The bucket's uniform bucket-level access configuration.", - "properties": { - "enabled": { - "description": "If set, access is controlled only by bucket-level or above IAM policies.", - "type": "boolean" - }, - "lockedTime": { - "description": "The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "id": { - "description": "The ID of the bucket. For buckets, the id and name properties are the same.", - "type": "string" - }, - "kind": { - "default": "storage#bucket", - "description": "The kind of item this is. For buckets, this is always storage#bucket.", - "type": "string" - }, - "labels": { - "additionalProperties": { - "description": "An individual label entry.", - "type": "string" - }, - "description": "User-provided labels, in key/value pairs.", - "type": "object" - }, - "lifecycle": { - "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", - "properties": { - "rule": { - "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", - "items": { - "properties": { - "action": { - "description": "The action to take.", - "properties": { - "storageClass": { - "description": "Target storage class. Required iff the type of the action is SetStorageClass.", - "type": "string" - }, - "type": { - "description": "Type of the action. Currently, only Delete, SetStorageClass, and AbortIncompleteMultipartUpload are supported.", - "type": "string" - } - }, - "type": "object" - }, - "condition": { - "description": "The condition(s) under which the action will be taken.", - "properties": { - "age": { - "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", - "format": "int32", - "type": "integer" - }, - "createdBefore": { - "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", - "format": "date", - "type": "string" - }, - "customTimeBefore": { - "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the custom time on an object is before this date in UTC.", - "format": "date", - "type": "string" - }, - "daysSinceCustomTime": { - "description": "Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.", - "format": "int32", - "type": "integer" - }, - "daysSinceNoncurrentTime": { - "description": "Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.", - "format": "int32", - "type": "integer" - }, - "isLive": { - "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.", - "type": "boolean" - }, - "matchesPattern": { - "description": "A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the \"Early Access\" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.", - "type": "string" - }, - "matchesPrefix": { - "description": "List of object name prefixes. This condition will be satisfied when at least one of the prefixes exactly matches the beginning of the object name.", - "items": { - "type": "string" - }, - "type": "array" - }, - "matchesStorageClass": { - "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", - "items": { - "type": "string" - }, - "type": "array" - }, - "matchesSuffix": { - "description": "List of object name suffixes. This condition will be satisfied when at least one of the suffixes exactly matches the end of the object name.", - "items": { - "type": "string" - }, - "type": "array" - }, - "noncurrentTimeBefore": { - "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.", - "format": "date", - "type": "string" - }, - "numNewerVersions": { - "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "location": { - "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", - "type": "string" - }, - "locationType": { - "description": "The type of the bucket location.", - "type": "string" - }, - "logging": { - "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", - "properties": { - "logBucket": { - "description": "The destination bucket where the current bucket's logs should be placed.", - "type": "string" - }, - "logObjectPrefix": { - "description": "A prefix for log object names.", - "type": "string" - } - }, - "type": "object" - }, - "metageneration": { - "description": "The metadata generation of this bucket.", - "format": "int64", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "storage.buckets.insert" - ] - }, - "description": "The name of the bucket.", - "type": "string" - }, - "owner": { - "description": "The owner of the bucket. This is always the project team's owner group.", - "properties": { - "entity": { - "description": "The entity, in the form project-owner-projectId.", - "type": "string" - }, - "entityId": { - "description": "The ID for the entity.", - "type": "string" - } - }, - "type": "object" - }, - "projectNumber": { - "description": "The project number of the project the bucket belongs to.", - "format": "uint64", - "type": "string" - }, - "retentionPolicy": { - "description": "The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.", - "properties": { - "effectiveTime": { - "description": "Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.", - "format": "date-time", - "type": "string" - }, - "isLocked": { - "description": "Once locked, an object retention policy cannot be modified.", - "type": "boolean" - }, - "retentionPeriod": { - "description": "The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "rpo": { - "description": "The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.", - "type": "string" - }, - "satisfiesPZS": { - "description": "Reserved for future use.", - "type": "boolean" - }, - "selfLink": { - "description": "The URI of this bucket.", - "type": "string" - }, - "storageClass": { - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", - "type": "string" - }, - "timeCreated": { - "description": "The creation time of the bucket in RFC 3339 format.", - "format": "date-time", - "type": "string" - }, - "updated": { - "description": "The modification time of the bucket in RFC 3339 format.", - "format": "date-time", - "type": "string" - }, - "versioning": { - "description": "The bucket's versioning configuration.", - "properties": { - "enabled": { - "description": "While set to true, versioning is fully enabled for this bucket.", - "type": "boolean" - } - }, - "type": "object" - }, - "website": { - "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", - "properties": { - "mainPageSuffix": { - "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", - "type": "string" - }, - "notFoundPage": { - "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "BucketAccessControl": { - "description": "An access-control entry.", - "id": "BucketAccessControl", - "properties": { - "bucket": { - "description": "The name of the bucket.", - "type": "string" - }, - "domain": { - "description": "The domain associated with the entity, if any.", - "type": "string" - }, - "email": { - "description": "The email address associated with the entity, if any.", - "type": "string" - }, - "entity": { - "annotations": { - "required": [ - "storage.bucketAccessControls.insert" - ] - }, - "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", - "type": "string" - }, - "entityId": { - "description": "The ID for the entity, if any.", - "type": "string" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for the access-control entry.", - "type": "string" - }, - "id": { - "description": "The ID of the access-control entry.", - "type": "string" - }, - "kind": { - "default": "storage#bucketAccessControl", - "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", - "type": "string" - }, - "projectTeam": { - "description": "The project team associated with the entity, if any.", - "properties": { - "projectNumber": { - "description": "The project number.", - "type": "string" - }, - "team": { - "description": "The team.", - "type": "string" - } - }, - "type": "object" - }, - "role": { - "annotations": { - "required": [ - "storage.bucketAccessControls.insert" - ] - }, - "description": "The access permission for the entity.", - "type": "string" - }, - "selfLink": { - "description": "The link to this access-control entry.", - "type": "string" - } - }, - "type": "object" - }, - "BucketAccessControls": { - "description": "An access-control list.", - "id": "BucketAccessControls", - "properties": { - "items": { - "description": "The list of items.", - "items": { - "$ref": "BucketAccessControl" - }, - "type": "array" - }, - "kind": { - "default": "storage#bucketAccessControls", - "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", - "type": "string" - } - }, - "type": "object" - }, - "Buckets": { - "description": "A list of buckets.", - "id": "Buckets", - "properties": { - "items": { - "description": "The list of items.", - "items": { - "$ref": "Bucket" - }, - "type": "array" - }, - "kind": { - "default": "storage#buckets", - "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", - "type": "string" - }, - "nextPageToken": { - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", - "type": "string" - } - }, - "type": "object" - }, - "Channel": { - "description": "An notification channel used to watch for resource changes.", - "id": "Channel", - "properties": { - "address": { - "description": "The address where notifications are delivered for this channel.", - "type": "string" - }, - "expiration": { - "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", - "format": "int64", - "type": "string" - }, - "id": { - "description": "A UUID or similar unique string that identifies this channel.", - "type": "string" - }, - "kind": { - "default": "api#channel", - "description": "Identifies this as a notification channel used to watch for changes to a resource, which is \"api#channel\".", - "type": "string" - }, - "params": { - "additionalProperties": { - "description": "Declares a new parameter by name.", - "type": "string" - }, - "description": "Additional parameters controlling delivery channel behavior. Optional.", - "type": "object" - }, - "payload": { - "description": "A Boolean value to indicate whether payload is wanted. Optional.", - "type": "boolean" - }, - "resourceId": { - "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.", - "type": "string" - }, - "resourceUri": { - "description": "A version-specific identifier for the watched resource.", - "type": "string" - }, - "token": { - "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.", - "type": "string" - }, - "type": { - "description": "The type of delivery mechanism used for this channel.", - "type": "string" - } - }, - "type": "object" - }, - "ComposeRequest": { - "description": "A Compose request.", - "id": "ComposeRequest", - "properties": { - "destination": { - "$ref": "Object", - "description": "Properties of the resulting object." - }, - "kind": { - "default": "storage#composeRequest", - "description": "The kind of item this is.", - "type": "string" - }, - "sourceObjects": { - "annotations": { - "required": [ - "storage.objects.compose" - ] - }, - "description": "The list of source objects that will be concatenated into a single object.", - "items": { - "properties": { - "generation": { - "description": "The generation of this object to use as the source.", - "format": "int64", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "storage.objects.compose" - ] - }, - "description": "The source object's name. All source objects must reside in the same bucket.", - "type": "string" - }, - "objectPreconditions": { - "description": "Conditions that must be met for this operation to execute.", - "properties": { - "ifGenerationMatch": { - "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "Expr": { - "description": "Represents an expression text. Example: title: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", - "id": "Expr", - "properties": { - "description": { - "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - "type": "string" - }, - "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.", - "type": "string" - }, - "location": { - "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - "type": "string" - }, - "title": { - "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - "type": "string" - } - }, - "type": "object" - }, - "HmacKey": { - "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", - "id": "HmacKey", - "properties": { - "kind": { - "default": "storage#hmacKey", - "description": "The kind of item this is. For HMAC keys, this is always storage#hmacKey.", - "type": "string" - }, - "metadata": { - "$ref": "HmacKeyMetadata", - "description": "Key metadata." - }, - "secret": { - "description": "HMAC secret key material.", - "type": "string" - } - }, - "type": "object" - }, - "HmacKeyMetadata": { - "description": "JSON template to produce a JSON-style HMAC Key metadata resource.", - "id": "HmacKeyMetadata", - "properties": { - "accessId": { - "description": "The ID of the HMAC Key.", - "type": "string" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for the HMAC key.", - "type": "string" - }, - "id": { - "description": "The ID of the HMAC key, including the Project ID and the Access ID.", - "type": "string" - }, - "kind": { - "default": "storage#hmacKeyMetadata", - "description": "The kind of item this is. For HMAC Key metadata, this is always storage#hmacKeyMetadata.", - "type": "string" - }, - "projectId": { - "description": "Project ID owning the service account to which the key authenticates.", - "type": "string" - }, - "selfLink": { - "description": "The link to this resource.", - "type": "string" - }, - "serviceAccountEmail": { - "description": "The email address of the key's associated service account.", - "type": "string" - }, - "state": { - "description": "The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED.", - "type": "string" - }, - "timeCreated": { - "description": "The creation time of the HMAC key in RFC 3339 format.", - "format": "date-time", - "type": "string" - }, - "updated": { - "description": "The last modification time of the HMAC key metadata in RFC 3339 format.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - }, - "HmacKeysMetadata": { - "description": "A list of hmacKeys.", - "id": "HmacKeysMetadata", - "properties": { - "items": { - "description": "The list of items.", - "items": { - "$ref": "HmacKeyMetadata" - }, - "type": "array" - }, - "kind": { - "default": "storage#hmacKeysMetadata", - "description": "The kind of item this is. For lists of hmacKeys, this is always storage#hmacKeysMetadata.", - "type": "string" - }, - "nextPageToken": { - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", - "type": "string" - } - }, - "type": "object" - }, - "Notification": { - "description": "A subscription to receive Google PubSub notifications.", - "id": "Notification", - "properties": { - "custom_attributes": { - "additionalProperties": { - "type": "string" - }, - "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.", - "type": "object" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for this subscription notification.", - "type": "string" - }, - "event_types": { - "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.", - "items": { - "type": "string" - }, - "type": "array" - }, - "id": { - "description": "The ID of the notification.", - "type": "string" - }, - "kind": { - "default": "storage#notification", - "description": "The kind of item this is. For notifications, this is always storage#notification.", - "type": "string" - }, - "object_name_prefix": { - "description": "If present, only apply this notification configuration to object names that begin with this prefix.", - "type": "string" - }, - "payload_format": { - "annotations": { - "required": [ - "storage.notifications.insert" - ] - }, - "default": "JSON_API_V1", - "description": "The desired content of the Payload.", - "type": "string" - }, - "selfLink": { - "description": "The canonical URL of this notification.", - "type": "string" - }, - "topic": { - "annotations": { - "required": [ - "storage.notifications.insert" - ] - }, - "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'", - "type": "string" - } - }, - "type": "object" - }, - "Notifications": { - "description": "A list of notification subscriptions.", - "id": "Notifications", - "properties": { - "items": { - "description": "The list of items.", - "items": { - "$ref": "Notification" - }, - "type": "array" - }, - "kind": { - "default": "storage#notifications", - "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.", - "type": "string" - } - }, - "type": "object" - }, - "Object": { - "description": "An object.", - "id": "Object", - "properties": { - "acl": { - "annotations": { - "required": [ - "storage.objects.update" - ] - }, - "description": "Access controls on the object.", - "items": { - "$ref": "ObjectAccessControl" - }, - "type": "array" - }, - "bucket": { - "description": "The name of the bucket containing this object.", - "type": "string" - }, - "cacheControl": { - "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.", - "type": "string" - }, - "componentCount": { - "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", - "format": "int32", - "type": "integer" - }, - "contentDisposition": { - "description": "Content-Disposition of the object data.", - "type": "string" - }, - "contentEncoding": { - "description": "Content-Encoding of the object data.", - "type": "string" - }, - "contentLanguage": { - "description": "Content-Language of the object data.", - "type": "string" - }, - "contentType": { - "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.", - "type": "string" - }, - "crc32c": { - "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", - "type": "string" - }, - "customTime": { - "description": "A timestamp in RFC 3339 format specified by the user for an object.", - "format": "date-time", - "type": "string" - }, - "customerEncryption": { - "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", - "properties": { - "encryptionAlgorithm": { - "description": "The encryption algorithm.", - "type": "string" - }, - "keySha256": { - "description": "SHA256 hash value of the encryption key.", - "type": "string" - } - }, - "type": "object" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for the object.", - "type": "string" - }, - "eventBasedHold": { - "description": "Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is the loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false.", - "type": "boolean" - }, - "generation": { - "description": "The content generation of this object. Used for object versioning.", - "format": "int64", - "type": "string" - }, - "id": { - "description": "The ID of the object, including the bucket name, object name, and generation number.", - "type": "string" - }, - "kind": { - "default": "storage#object", - "description": "The kind of item this is. For objects, this is always storage#object.", - "type": "string" - }, - "kmsKeyName": { - "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", - "type": "string" - }, - "md5Hash": { - "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.", - "type": "string" - }, - "mediaLink": { - "description": "Media download link.", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "description": "An individual metadata entry.", - "type": "string" - }, - "description": "User-provided metadata, in key/value pairs.", - "type": "object" - }, - "metageneration": { - "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", - "format": "int64", - "type": "string" - }, - "name": { - "description": "The name of the object. Required if not specified by URL parameter.", - "type": "string" - }, - "owner": { - "description": "The owner of the object. This will always be the uploader of the object.", - "properties": { - "entity": { - "description": "The entity, in the form user-userId.", - "type": "string" - }, - "entityId": { - "description": "The ID for the entity.", - "type": "string" - } - }, - "type": "object" - }, - "retentionExpirationTime": { - "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).", - "format": "date-time", - "type": "string" - }, - "selfLink": { - "description": "The link to this object.", - "type": "string" - }, - "size": { - "description": "Content-Length of the data in bytes.", - "format": "uint64", - "type": "string" - }, - "storageClass": { - "description": "Storage class of the object.", - "type": "string" - }, - "temporaryHold": { - "description": "Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing. Note that unlike event-based hold, temporary hold does not impact retention expiration time of an object.", - "type": "boolean" - }, - "timeCreated": { - "description": "The creation time of the object in RFC 3339 format.", - "format": "date-time", - "type": "string" - }, - "timeDeleted": { - "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", - "format": "date-time", - "type": "string" - }, - "timeStorageClassUpdated": { - "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", - "format": "date-time", - "type": "string" - }, - "updated": { - "description": "The modification time of the object metadata in RFC 3339 format. Set initially to object creation time and then updated whenever any metadata of the object changes. This includes changes made by a requester, such as modifying custom metadata, as well as changes made by Cloud Storage on behalf of a requester, such as changing the storage class based on an Object Lifecycle Configuration.", - "format": "date-time", - "type": "string" - } - }, - "type": "object" - }, - "ObjectAccessControl": { - "description": "An access-control entry.", - "id": "ObjectAccessControl", - "properties": { - "bucket": { - "description": "The name of the bucket.", - "type": "string" - }, - "domain": { - "description": "The domain associated with the entity, if any.", - "type": "string" - }, - "email": { - "description": "The email address associated with the entity, if any.", - "type": "string" - }, - "entity": { - "annotations": { - "required": [ - "storage.defaultObjectAccessControls.insert", - "storage.objectAccessControls.insert" - ] - }, - "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", - "type": "string" - }, - "entityId": { - "description": "The ID for the entity, if any.", - "type": "string" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for the access-control entry.", - "type": "string" - }, - "generation": { - "description": "The content generation of the object, if applied to an object.", - "format": "int64", - "type": "string" - }, - "id": { - "description": "The ID of the access-control entry.", - "type": "string" - }, - "kind": { - "default": "storage#objectAccessControl", - "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", - "type": "string" - }, - "object": { - "description": "The name of the object, if applied to an object.", - "type": "string" - }, - "projectTeam": { - "description": "The project team associated with the entity, if any.", - "properties": { - "projectNumber": { - "description": "The project number.", - "type": "string" - }, - "team": { - "description": "The team.", - "type": "string" - } - }, - "type": "object" - }, - "role": { - "annotations": { - "required": [ - "storage.defaultObjectAccessControls.insert", - "storage.objectAccessControls.insert" - ] - }, - "description": "The access permission for the entity.", - "type": "string" - }, - "selfLink": { - "description": "The link to this access-control entry.", - "type": "string" - } - }, - "type": "object" - }, - "ObjectAccessControls": { - "description": "An access-control list.", - "id": "ObjectAccessControls", - "properties": { - "items": { - "description": "The list of items.", - "items": { - "$ref": "ObjectAccessControl" - }, - "type": "array" - }, - "kind": { - "default": "storage#objectAccessControls", - "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", - "type": "string" - } - }, - "type": "object" - }, - "Objects": { - "description": "A list of objects.", - "id": "Objects", - "properties": { - "items": { - "description": "The list of items.", - "items": { - "$ref": "Object" - }, - "type": "array" - }, - "kind": { - "default": "storage#objects", - "description": "The kind of item this is. For lists of objects, this is always storage#objects.", - "type": "string" - }, - "nextPageToken": { - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", - "type": "string" - }, - "prefixes": { - "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "Policy": { - "description": "A bucket/object IAM policy.", - "id": "Policy", - "properties": { - "bindings": { - "annotations": { - "required": [ - "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" - ] - }, - "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", - "items": { - "properties": { - "condition": { - "$ref": "Expr", - "description": "The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." - }, - "members": { - "annotations": { - "required": [ - "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" - ] - }, - "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", - "items": { - "type": "string" - }, - "type": "array" - }, - "role": { - "annotations": { - "required": [ - "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" - ] - }, - "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "etag": { - "description": "HTTP 1.1 Entity tag for the policy.", - "format": "byte", - "type": "string" - }, - "kind": { - "default": "storage#policy", - "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.", - "type": "string" - }, - "resourceId": { - "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", - "type": "string" - }, - "version": { - "description": "The IAM policy format version.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "RewriteResponse": { - "description": "A rewrite response.", - "id": "RewriteResponse", - "properties": { - "done": { - "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.", - "type": "boolean" - }, - "kind": { - "default": "storage#rewriteResponse", - "description": "The kind of item this is.", - "type": "string" - }, - "objectSize": { - "description": "The total size of the object being copied in bytes. This property is always present in the response.", - "format": "int64", - "type": "string" - }, - "resource": { - "$ref": "Object", - "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." - }, - "rewriteToken": { - "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.", - "type": "string" - }, - "totalBytesRewritten": { - "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "ServiceAccount": { - "description": "A subscription to receive Google PubSub notifications.", - "id": "ServiceAccount", - "properties": { - "email_address": { - "description": "The ID of the notification.", - "type": "string" - }, - "kind": { - "default": "storage#serviceAccount", - "description": "The kind of item this is. For notifications, this is always storage#notification.", - "type": "string" - } - }, - "type": "object" - }, - "TestIamPermissionsResponse": { - "description": "A storage.(buckets|objects).testIamPermissions response.", - "id": "TestIamPermissionsResponse", - "properties": { - "kind": { - "default": "storage#testIamPermissionsResponse", - "description": "The kind of item this is.", - "type": "string" - }, - "permissions": { - "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - } - }, - "servicePath": "storage/v1/", - "title": "Cloud Storage JSON API", - "version": "v1" -} \ No newline at end of file diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go deleted file mode 100644 index e11bf2e6d3..0000000000 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ /dev/null @@ -1,13283 +0,0 @@ -// Copyright 2023 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated file. DO NOT EDIT. - -// Package storage provides access to the Cloud Storage JSON API. -// -// This package is DEPRECATED. Use package cloud.google.com/go/storage instead. -// -// For product documentation, see: https://developers.google.com/storage/docs/json_api/ -// -// # Creating a client -// -// Usage example: -// -// import "google.golang.org/api/storage/v1" -// ... -// ctx := context.Background() -// storageService, err := storage.NewService(ctx) -// -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. -// -// # Other authentication options -// -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: -// -// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) -// -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: -// -// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) -// -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: -// -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) -// -// See https://godoc.org/google.golang.org/api/option/ for details on options. -package storage // import "google.golang.org/api/storage/v1" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/googleapis/gax-go/v2" - googleapi "google.golang.org/api/googleapi" - internal "google.golang.org/api/internal" - gensupport "google.golang.org/api/internal/gensupport" - option "google.golang.org/api/option" - internaloption "google.golang.org/api/option/internaloption" - htransport "google.golang.org/api/transport/http" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = internaloption.WithDefaultEndpoint -var _ = internal.Version - -const apiId = "storage:v1" -const apiName = "storage" -const apiVersion = "v1" -const basePath = "https://storage.googleapis.com/storage/v1/" -const mtlsBasePath = "https://storage.mtls.googleapis.com/storage/v1/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View your data across Google Cloud Platform services - CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" - - // Manage your data and permissions in Google Cloud Storage - DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" - - // View your data in Google Cloud Storage - DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" - - // Manage your data in Google Cloud Storage - DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" -) - -// NewService creates a new Service. -func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := internaloption.WithDefaultScopes( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write", - ) - // NOTE: prepend, so we don't override user-specified scopes. - opts = append([]option.ClientOption{scopesOption}, opts...) - opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) - client, endpoint, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - s, err := New(client) - if err != nil { - return nil, err - } - if endpoint != "" { - s.BasePath = endpoint - } - return s, nil -} - -// New creates a new Service. It uses the provided http.Client for requests. -// -// Deprecated: please use NewService instead. -// To provide a custom HTTP client, use option.WithHTTPClient. -// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.BucketAccessControls = NewBucketAccessControlsService(s) - s.Buckets = NewBucketsService(s) - s.Channels = NewChannelsService(s) - s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) - s.Notifications = NewNotificationsService(s) - s.ObjectAccessControls = NewObjectAccessControlsService(s) - s.Objects = NewObjectsService(s) - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - BucketAccessControls *BucketAccessControlsService - - Buckets *BucketsService - - Channels *ChannelsService - - DefaultObjectAccessControls *DefaultObjectAccessControlsService - - Notifications *NotificationsService - - ObjectAccessControls *ObjectAccessControlsService - - Objects *ObjectsService - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { - rs := &BucketAccessControlsService{s: s} - return rs -} - -type BucketAccessControlsService struct { - s *Service -} - -func NewBucketsService(s *Service) *BucketsService { - rs := &BucketsService{s: s} - return rs -} - -type BucketsService struct { - s *Service -} - -func NewChannelsService(s *Service) *ChannelsService { - rs := &ChannelsService{s: s} - return rs -} - -type ChannelsService struct { - s *Service -} - -func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { - rs := &DefaultObjectAccessControlsService{s: s} - return rs -} - -type DefaultObjectAccessControlsService struct { - s *Service -} - -func NewNotificationsService(s *Service) *NotificationsService { - rs := &NotificationsService{s: s} - return rs -} - -type NotificationsService struct { - s *Service -} - -func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { - rs := &ObjectAccessControlsService{s: s} - return rs -} - -type ObjectAccessControlsService struct { - s *Service -} - -func NewObjectsService(s *Service) *ObjectsService { - rs := &ObjectsService{s: s} - return rs -} - -type ObjectsService struct { - s *Service -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.HmacKeys = NewProjectsHmacKeysService(s) - rs.ServiceAccount = NewProjectsServiceAccountService(s) - return rs -} - -type ProjectsService struct { - s *Service - - HmacKeys *ProjectsHmacKeysService - - ServiceAccount *ProjectsServiceAccountService -} - -func NewProjectsHmacKeysService(s *Service) *ProjectsHmacKeysService { - rs := &ProjectsHmacKeysService{s: s} - return rs -} - -type ProjectsHmacKeysService struct { - s *Service -} - -func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { - rs := &ProjectsServiceAccountService{s: s} - return rs -} - -type ProjectsServiceAccountService struct { - s *Service -} - -// Bucket: A bucket. -type Bucket struct { - // Acl: Access controls on the bucket. - Acl []*BucketAccessControl `json:"acl,omitempty"` - - // Autoclass: The bucket's Autoclass configuration. - Autoclass *BucketAutoclass `json:"autoclass,omitempty"` - - // Billing: The bucket's billing configuration. - Billing *BucketBilling `json:"billing,omitempty"` - - // Cors: The bucket's Cross-Origin Resource Sharing (CORS) - // configuration. - Cors []*BucketCors `json:"cors,omitempty"` - - // CustomPlacementConfig: The bucket's custom placement configuration - // for Custom Dual Regions. - CustomPlacementConfig *BucketCustomPlacementConfig `json:"customPlacementConfig,omitempty"` - - // DefaultEventBasedHold: The default value for event-based hold on - // newly created objects in this bucket. Event-based hold is a way to - // retain objects indefinitely until an event occurs, signified by the - // hold's release. After being released, such objects will be subject to - // bucket-level retention (if any). One sample use case of this flag is - // for banks to hold loan documents for at least 3 years after loan is - // paid in full. Here, bucket-level retention is 3 years and the event - // is loan being paid in full. In this example, these objects will be - // held intact for any number of years until the event has occurred - // (event-based hold on the object is released) and then 3 more years - // after that. That means retention duration of the objects begins from - // the moment event-based hold transitioned from true to false. Objects - // under event-based hold cannot be deleted, overwritten or archived - // until the hold is removed. - DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` - - // DefaultObjectAcl: Default access controls to apply to new objects - // when no ACL is provided. - DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` - - // Encryption: Encryption configuration for a bucket. - Encryption *BucketEncryption `json:"encryption,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the bucket. - Etag string `json:"etag,omitempty"` - - // IamConfiguration: The bucket's IAM configuration. - IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"` - - // Id: The ID of the bucket. For buckets, the id and name properties are - // the same. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For buckets, this is always - // storage#bucket. - Kind string `json:"kind,omitempty"` - - // Labels: User-provided labels, in key/value pairs. - Labels map[string]string `json:"labels,omitempty"` - - // Lifecycle: The bucket's lifecycle configuration. See lifecycle - // management for more information. - Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` - - // Location: The location of the bucket. Object data for objects in the - // bucket resides in physical storage within this region. Defaults to - // US. See the developer's guide for the authoritative list. - Location string `json:"location,omitempty"` - - // LocationType: The type of the bucket location. - LocationType string `json:"locationType,omitempty"` - - // Logging: The bucket's logging configuration, which defines the - // destination bucket and optional name prefix for the current bucket's - // logs. - Logging *BucketLogging `json:"logging,omitempty"` - - // Metageneration: The metadata generation of this bucket. - Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the bucket. - Name string `json:"name,omitempty"` - - // Owner: The owner of the bucket. This is always the project team's - // owner group. - Owner *BucketOwner `json:"owner,omitempty"` - - // ProjectNumber: The project number of the project the bucket belongs - // to. - ProjectNumber uint64 `json:"projectNumber,omitempty,string"` - - // RetentionPolicy: The bucket's retention policy. The retention policy - // enforces a minimum retention time for all objects contained in the - // bucket, based on their creation time. Any attempt to overwrite or - // delete objects younger than the retention period will result in a - // PERMISSION_DENIED error. An unlocked retention policy can be modified - // or removed from the bucket via a storage.buckets.update operation. A - // locked retention policy cannot be removed or shortened in duration - // for the lifetime of the bucket. Attempting to remove or decrease - // period of a locked retention policy will result in a - // PERMISSION_DENIED error. - RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"` - - // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to - // ASYNC_TURBO to turn on Turbo Replication on a bucket. - Rpo string `json:"rpo,omitempty"` - - // SatisfiesPZS: Reserved for future use. - SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` - - // SelfLink: The URI of this bucket. - SelfLink string `json:"selfLink,omitempty"` - - // StorageClass: The bucket's default storage class, used whenever no - // storageClass is specified for a newly-created object. This defines - // how objects in the bucket are stored and determines the SLA and the - // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, - // NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If - // this value is not specified when the bucket is created, it will - // default to STANDARD. For more information, see storage classes. - StorageClass string `json:"storageClass,omitempty"` - - // TimeCreated: The creation time of the bucket in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // Updated: The modification time of the bucket in RFC 3339 format. - Updated string `json:"updated,omitempty"` - - // Versioning: The bucket's versioning configuration. - Versioning *BucketVersioning `json:"versioning,omitempty"` - - // Website: The bucket's website configuration, controlling how the - // service behaves when accessing bucket contents as a web site. See the - // Static Website Examples for more information. - Website *BucketWebsite `json:"website,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Acl") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Bucket) MarshalJSON() ([]byte, error) { - type NoMethod Bucket - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAutoclass: The bucket's Autoclass configuration. -type BucketAutoclass struct { - // Enabled: Whether or not Autoclass is enabled on this bucket - Enabled bool `json:"enabled,omitempty"` - - // ToggleTime: A date and time in RFC 3339 format representing the - // instant at which "enabled" was last toggled. - ToggleTime string `json:"toggleTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAutoclass) MarshalJSON() ([]byte, error) { - type NoMethod BucketAutoclass - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketBilling: The bucket's billing configuration. -type BucketBilling struct { - // RequesterPays: When set to true, Requester Pays is enabled for this - // bucket. - RequesterPays bool `json:"requesterPays,omitempty"` - - // ForceSendFields is a list of field names (e.g. "RequesterPays") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RequesterPays") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketBilling) MarshalJSON() ([]byte, error) { - type NoMethod BucketBilling - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BucketCors struct { - // MaxAgeSeconds: The value, in seconds, to return in the - // Access-Control-Max-Age header used in preflight responses. - MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` - - // Method: The list of HTTP methods on which to include CORS response - // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list - // of methods, and means "any method". - Method []string `json:"method,omitempty"` - - // Origin: The list of Origins eligible to receive CORS response - // headers. Note: "*" is permitted in the list of origins, and means - // "any Origin". - Origin []string `json:"origin,omitempty"` - - // ResponseHeader: The list of HTTP headers other than the simple - // response headers to give permission for the user-agent to share - // across domains. - ResponseHeader []string `json:"responseHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketCors) MarshalJSON() ([]byte, error) { - type NoMethod BucketCors - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketCustomPlacementConfig: The bucket's custom placement -// configuration for Custom Dual Regions. -type BucketCustomPlacementConfig struct { - // DataLocations: The list of regional locations in which data is - // placed. - DataLocations []string `json:"dataLocations,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DataLocations") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DataLocations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { - type NoMethod BucketCustomPlacementConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketEncryption: Encryption configuration for a bucket. -type BucketEncryption struct { - // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt - // objects inserted into this bucket, if no encryption method is - // specified. - DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BucketEncryption) MarshalJSON() ([]byte, error) { - type NoMethod BucketEncryption - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketIamConfiguration: The bucket's IAM configuration. -type BucketIamConfiguration struct { - // BucketPolicyOnly: The bucket's uniform bucket-level access - // configuration. The feature was formerly known as Bucket Policy Only. - // For backward compatibility, this field will be populated with - // identical information as the uniformBucketLevelAccess field. We - // recommend using the uniformBucketLevelAccess field to enable and - // disable the feature. - BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` - - // PublicAccessPrevention: The bucket's Public Access Prevention - // configuration. Currently, 'inherited' and 'enforced' are supported. - PublicAccessPrevention string `json:"publicAccessPrevention,omitempty"` - - // UniformBucketLevelAccess: The bucket's uniform bucket-level access - // configuration. - UniformBucketLevelAccess *BucketIamConfigurationUniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketPolicyOnly") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { - type NoMethod BucketIamConfiguration - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform -// bucket-level access configuration. The feature was formerly known as -// Bucket Policy Only. For backward compatibility, this field will be -// populated with identical information as the uniformBucketLevelAccess -// field. We recommend using the uniformBucketLevelAccess field to -// enable and disable the feature. -type BucketIamConfigurationBucketPolicyOnly struct { - // Enabled: If set, access is controlled only by bucket-level or above - // IAM policies. - Enabled bool `json:"enabled,omitempty"` - - // LockedTime: The deadline for changing - // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC - // 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed - // from true to false until the locked time, after which the field is - // immutable. - LockedTime string `json:"lockedTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { - type NoMethod BucketIamConfigurationBucketPolicyOnly - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform -// bucket-level access configuration. -type BucketIamConfigurationUniformBucketLevelAccess struct { - // Enabled: If set, access is controlled only by bucket-level or above - // IAM policies. - Enabled bool `json:"enabled,omitempty"` - - // LockedTime: The deadline for changing - // iamConfiguration.uniformBucketLevelAccess.enabled from true to false - // in RFC 3339 format. - // iamConfiguration.uniformBucketLevelAccess.enabled may be changed from - // true to false until the locked time, after which the field is - // immutable. - LockedTime string `json:"lockedTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { - type NoMethod BucketIamConfigurationUniformBucketLevelAccess - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle -// management for more information. -type BucketLifecycle struct { - // Rule: A lifecycle management rule, which is made of an action to take - // and the condition(s) under which the action will be taken. - Rule []*BucketLifecycleRule `json:"rule,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rule") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rule") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { - type NoMethod BucketLifecycle - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BucketLifecycleRule struct { - // Action: The action to take. - Action *BucketLifecycleRuleAction `json:"action,omitempty"` - - // Condition: The condition(s) under which the action will be taken. - Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { - type NoMethod BucketLifecycleRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLifecycleRuleAction: The action to take. -type BucketLifecycleRuleAction struct { - // StorageClass: Target storage class. Required iff the type of the - // action is SetStorageClass. - StorageClass string `json:"storageClass,omitempty"` - - // Type: Type of the action. Currently, only Delete, SetStorageClass, - // and AbortIncompleteMultipartUpload are supported. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "StorageClass") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "StorageClass") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { - type NoMethod BucketLifecycleRuleAction - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLifecycleRuleCondition: The condition(s) under which the action -// will be taken. -type BucketLifecycleRuleCondition struct { - // Age: Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. - Age *int64 `json:"age,omitempty"` - - // CreatedBefore: A date in RFC 3339 format with only the date part (for - // instance, "2013-01-15"). This condition is satisfied when an object - // is created before midnight of the specified date in UTC. - CreatedBefore string `json:"createdBefore,omitempty"` - - // CustomTimeBefore: A date in RFC 3339 format with only the date part - // (for instance, "2013-01-15"). This condition is satisfied when the - // custom time on an object is before this date in UTC. - CustomTimeBefore string `json:"customTimeBefore,omitempty"` - - // DaysSinceCustomTime: Number of days elapsed since the user-specified - // timestamp set on an object. The condition is satisfied if the days - // elapsed is at least this number. If no custom timestamp is specified - // on an object, the condition does not apply. - DaysSinceCustomTime int64 `json:"daysSinceCustomTime,omitempty"` - - // DaysSinceNoncurrentTime: Number of days elapsed since the noncurrent - // timestamp of an object. The condition is satisfied if the days - // elapsed is at least this number. This condition is relevant only for - // versioned objects. The value of the field must be a nonnegative - // integer. If it's zero, the object version will become eligible for - // Lifecycle action as soon as it becomes noncurrent. - DaysSinceNoncurrentTime int64 `json:"daysSinceNoncurrentTime,omitempty"` - - // IsLive: Relevant only for versioned objects. If the value is true, - // this condition matches live objects; if the value is false, it - // matches archived objects. - IsLive *bool `json:"isLive,omitempty"` - - // MatchesPattern: A regular expression that satisfies the RE2 syntax. - // This condition is satisfied when the name of the object matches the - // RE2 pattern. Note: This feature is currently in the "Early Access" - // launch stage and is only available to a whitelisted set of users; - // that means that this feature may be changed in backward-incompatible - // ways and that it is not guaranteed to be released. - MatchesPattern string `json:"matchesPattern,omitempty"` - - // MatchesPrefix: List of object name prefixes. This condition will be - // satisfied when at least one of the prefixes exactly matches the - // beginning of the object name. - MatchesPrefix []string `json:"matchesPrefix,omitempty"` - - // MatchesStorageClass: Objects having any of the storage classes - // specified by this condition will be matched. Values include - // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and - // DURABLE_REDUCED_AVAILABILITY. - MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` - - // MatchesSuffix: List of object name suffixes. This condition will be - // satisfied when at least one of the suffixes exactly matches the end - // of the object name. - MatchesSuffix []string `json:"matchesSuffix,omitempty"` - - // NoncurrentTimeBefore: A date in RFC 3339 format with only the date - // part (for instance, "2013-01-15"). This condition is satisfied when - // the noncurrent time on an object is before this date in UTC. This - // condition is relevant only for versioned objects. - NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` - - // NumNewerVersions: Relevant only for versioned objects. If the value - // is N, this condition is satisfied when there are at least N versions - // (including the live version) newer than this version of the object. - NumNewerVersions int64 `json:"numNewerVersions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Age") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Age") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { - type NoMethod BucketLifecycleRuleCondition - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketLogging: The bucket's logging configuration, which defines the -// destination bucket and optional name prefix for the current bucket's -// logs. -type BucketLogging struct { - // LogBucket: The destination bucket where the current bucket's logs - // should be placed. - LogBucket string `json:"logBucket,omitempty"` - - // LogObjectPrefix: A prefix for log object names. - LogObjectPrefix string `json:"logObjectPrefix,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LogBucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LogBucket") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketLogging) MarshalJSON() ([]byte, error) { - type NoMethod BucketLogging - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketOwner: The owner of the bucket. This is always the project -// team's owner group. -type BucketOwner struct { - // Entity: The entity, in the form project-owner-projectId. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity. - EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Entity") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketOwner) MarshalJSON() ([]byte, error) { - type NoMethod BucketOwner - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketRetentionPolicy: The bucket's retention policy. The retention -// policy enforces a minimum retention time for all objects contained in -// the bucket, based on their creation time. Any attempt to overwrite or -// delete objects younger than the retention period will result in a -// PERMISSION_DENIED error. An unlocked retention policy can be modified -// or removed from the bucket via a storage.buckets.update operation. A -// locked retention policy cannot be removed or shortened in duration -// for the lifetime of the bucket. Attempting to remove or decrease -// period of a locked retention policy will result in a -// PERMISSION_DENIED error. -type BucketRetentionPolicy struct { - // EffectiveTime: Server-determined value that indicates the time from - // which policy was enforced and effective. This value is in RFC 3339 - // format. - EffectiveTime string `json:"effectiveTime,omitempty"` - - // IsLocked: Once locked, an object retention policy cannot be modified. - IsLocked bool `json:"isLocked,omitempty"` - - // RetentionPeriod: The duration in seconds that objects need to be - // retained. Retention duration must be greater than zero and less than - // 100 years. Note that enforcement of retention periods less than a day - // is not guaranteed. Such periods should only be used for testing - // purposes. - RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "EffectiveTime") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EffectiveTime") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { - type NoMethod BucketRetentionPolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketVersioning: The bucket's versioning configuration. -type BucketVersioning struct { - // Enabled: While set to true, versioning is fully enabled for this - // bucket. - Enabled bool `json:"enabled,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { - type NoMethod BucketVersioning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketWebsite: The bucket's website configuration, controlling how -// the service behaves when accessing bucket contents as a web site. See -// the Static Website Examples for more information. -type BucketWebsite struct { - // MainPageSuffix: If the requested object path is missing, the service - // will ensure the path has a trailing '/', append this suffix, and - // attempt to retrieve the resulting object. This allows the creation of - // index.html objects to represent directory pages. - MainPageSuffix string `json:"mainPageSuffix,omitempty"` - - // NotFoundPage: If the requested object path is missing, and any - // mainPageSuffix object is missing, if applicable, the service will - // return the named object from this bucket as the content for a 404 Not - // Found result. - NotFoundPage string `json:"notFoundPage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MainPageSuffix") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { - type NoMethod BucketWebsite - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAccessControl: An access-control entry. -type BucketAccessControl struct { - // Bucket: The name of the bucket. - Bucket string `json:"bucket,omitempty"` - - // Domain: The domain associated with the entity, if any. - Domain string `json:"domain,omitempty"` - - // Email: The email address associated with the entity, if any. - Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: - // - user-userId - // - user-email - // - group-groupId - // - group-email - // - domain-domain - // - project-team-projectId - // - allUsers - // - allAuthenticatedUsers Examples: - // - The user liz@example.com would be user-liz@example.com. - // - The group example@googlegroups.com would be - // group-example@googlegroups.com. - // - To refer to all members of the Google Apps for Business domain - // example.com, the entity would be domain-example.com. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity, if any. - EntityId string `json:"entityId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the access-control entry. - Etag string `json:"etag,omitempty"` - - // Id: The ID of the access-control entry. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For bucket access control entries, - // this is always storage#bucketAccessControl. - Kind string `json:"kind,omitempty"` - - // ProjectTeam: The project team associated with the entity, if any. - ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` - - // Role: The access permission for the entity. - Role string `json:"role,omitempty"` - - // SelfLink: The link to this access-control entry. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { - type NoMethod BucketAccessControl - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAccessControlProjectTeam: The project team associated with the -// entity, if any. -type BucketAccessControlProjectTeam struct { - // ProjectNumber: The project number. - ProjectNumber string `json:"projectNumber,omitempty"` - - // Team: The team. - Team string `json:"team,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProjectNumber") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { - type NoMethod BucketAccessControlProjectTeam - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BucketAccessControls: An access-control list. -type BucketAccessControls struct { - // Items: The list of items. - Items []*BucketAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of bucket access control - // entries, this is always storage#bucketAccessControls. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { - type NoMethod BucketAccessControls - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Buckets: A list of buckets. -type Buckets struct { - // Items: The list of items. - Items []*Bucket `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of buckets, this is always - // storage#buckets. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Buckets) MarshalJSON() ([]byte, error) { - type NoMethod Buckets - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Channel: An notification channel used to watch for resource changes. -type Channel struct { - // Address: The address where notifications are delivered for this - // channel. - Address string `json:"address,omitempty"` - - // Expiration: Date and time of notification channel expiration, - // expressed as a Unix timestamp, in milliseconds. Optional. - Expiration int64 `json:"expiration,omitempty,string"` - - // Id: A UUID or similar unique string that identifies this channel. - Id string `json:"id,omitempty"` - - // Kind: Identifies this as a notification channel used to watch for - // changes to a resource, which is "api#channel". - Kind string `json:"kind,omitempty"` - - // Params: Additional parameters controlling delivery channel behavior. - // Optional. - Params map[string]string `json:"params,omitempty"` - - // Payload: A Boolean value to indicate whether payload is wanted. - // Optional. - Payload bool `json:"payload,omitempty"` - - // ResourceId: An opaque ID that identifies the resource being watched - // on this channel. Stable across different API versions. - ResourceId string `json:"resourceId,omitempty"` - - // ResourceUri: A version-specific identifier for the watched resource. - ResourceUri string `json:"resourceUri,omitempty"` - - // Token: An arbitrary string delivered to the target address with each - // notification delivered over this channel. Optional. - Token string `json:"token,omitempty"` - - // Type: The type of delivery mechanism used for this channel. - Type string `json:"type,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Address") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Address") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Channel) MarshalJSON() ([]byte, error) { - type NoMethod Channel - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ComposeRequest: A Compose request. -type ComposeRequest struct { - // Destination: Properties of the resulting object. - Destination *Object `json:"destination,omitempty"` - - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // SourceObjects: The list of source objects that will be concatenated - // into a single object. - SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Destination") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Destination") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { - type NoMethod ComposeRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ComposeRequestSourceObjects struct { - // Generation: The generation of this object to use as the source. - Generation int64 `json:"generation,omitempty,string"` - - // Name: The source object's name. All source objects must reside in the - // same bucket. - Name string `json:"name,omitempty"` - - // ObjectPreconditions: Conditions that must be met for this operation - // to execute. - ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Generation") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Generation") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { - type NoMethod ComposeRequestSourceObjects - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must -// be met for this operation to execute. -type ComposeRequestSourceObjectsObjectPreconditions struct { - // IfGenerationMatch: Only perform the composition if the generation of - // the source object that would be used matches this value. If this - // value and a generation are both specified, they must be the same - // value or the call will fail. - IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IfGenerationMatch") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { - type NoMethod ComposeRequestSourceObjectsObjectPreconditions - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Expr: Represents an expression text. Example: title: "User account -// presence" description: "Determines whether the request has a user -// account" expression: "size(request.user) > 0" -type Expr struct { - // Description: An optional description of the expression. This is a - // longer text which describes the expression, e.g. when hovered over it - // in a UI. - Description string `json:"description,omitempty"` - - // Expression: Textual representation of an expression in Common - // Expression Language syntax. The application context of the containing - // message determines which well-known feature set of CEL is supported. - Expression string `json:"expression,omitempty"` - - // Location: An optional string indicating the location of the - // expression for error reporting, e.g. a file name and a position in - // the file. - Location string `json:"location,omitempty"` - - // Title: An optional title for the expression, i.e. a short string - // describing its purpose. This can be used e.g. in UIs which allow to - // enter the expression. - Title string `json:"title,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Expr) MarshalJSON() ([]byte, error) { - type NoMethod Expr - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HmacKey: JSON template to produce a JSON-style HMAC Key resource for -// Create responses. -type HmacKey struct { - // Kind: The kind of item this is. For HMAC keys, this is always - // storage#hmacKey. - Kind string `json:"kind,omitempty"` - - // Metadata: Key metadata. - Metadata *HmacKeyMetadata `json:"metadata,omitempty"` - - // Secret: HMAC secret key material. - Secret string `json:"secret,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HmacKey) MarshalJSON() ([]byte, error) { - type NoMethod HmacKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key -// metadata resource. -type HmacKeyMetadata struct { - // AccessId: The ID of the HMAC Key. - AccessId string `json:"accessId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the HMAC key. - Etag string `json:"etag,omitempty"` - - // Id: The ID of the HMAC key, including the Project ID and the Access - // ID. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For HMAC Key metadata, this is always - // storage#hmacKeyMetadata. - Kind string `json:"kind,omitempty"` - - // ProjectId: Project ID owning the service account to which the key - // authenticates. - ProjectId string `json:"projectId,omitempty"` - - // SelfLink: The link to this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServiceAccountEmail: The email address of the key's associated - // service account. - ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` - - // State: The state of the key. Can be one of ACTIVE, INACTIVE, or - // DELETED. - State string `json:"state,omitempty"` - - // TimeCreated: The creation time of the HMAC key in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // Updated: The last modification time of the HMAC key metadata in RFC - // 3339 format. - Updated string `json:"updated,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AccessId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessId") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { - type NoMethod HmacKeyMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HmacKeysMetadata: A list of hmacKeys. -type HmacKeysMetadata struct { - // Items: The list of items. - Items []*HmacKeyMetadata `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of hmacKeys, this is always - // storage#hmacKeysMetadata. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { - type NoMethod HmacKeysMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Notification: A subscription to receive Google PubSub notifications. -type Notification struct { - // CustomAttributes: An optional list of additional attributes to attach - // to each Cloud PubSub message published for this notification - // subscription. - CustomAttributes map[string]string `json:"custom_attributes,omitempty"` - - // Etag: HTTP 1.1 Entity tag for this subscription notification. - Etag string `json:"etag,omitempty"` - - // EventTypes: If present, only send notifications about listed event - // types. If empty, sent notifications for all event types. - EventTypes []string `json:"event_types,omitempty"` - - // Id: The ID of the notification. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For notifications, this is always - // storage#notification. - Kind string `json:"kind,omitempty"` - - // ObjectNamePrefix: If present, only apply this notification - // configuration to object names that begin with this prefix. - ObjectNamePrefix string `json:"object_name_prefix,omitempty"` - - // PayloadFormat: The desired content of the Payload. - PayloadFormat string `json:"payload_format,omitempty"` - - // SelfLink: The canonical URL of this notification. - SelfLink string `json:"selfLink,omitempty"` - - // Topic: The Cloud PubSub topic to which this subscription publishes. - // Formatted as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi - // c}' - Topic string `json:"topic,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CustomAttributes") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CustomAttributes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Notification) MarshalJSON() ([]byte, error) { - type NoMethod Notification - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Notifications: A list of notification subscriptions. -type Notifications struct { - // Items: The list of items. - Items []*Notification `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of notifications, this is - // always storage#notifications. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Notifications) MarshalJSON() ([]byte, error) { - type NoMethod Notifications - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Object: An object. -type Object struct { - // Acl: Access controls on the object. - Acl []*ObjectAccessControl `json:"acl,omitempty"` - - // Bucket: The name of the bucket containing this object. - Bucket string `json:"bucket,omitempty"` - - // CacheControl: Cache-Control directive for the object data. If - // omitted, and the object is accessible to all anonymous users, the - // default will be public, max-age=3600. - CacheControl string `json:"cacheControl,omitempty"` - - // ComponentCount: Number of underlying components that make up this - // object. Components are accumulated by compose operations. - ComponentCount int64 `json:"componentCount,omitempty"` - - // ContentDisposition: Content-Disposition of the object data. - ContentDisposition string `json:"contentDisposition,omitempty"` - - // ContentEncoding: Content-Encoding of the object data. - ContentEncoding string `json:"contentEncoding,omitempty"` - - // ContentLanguage: Content-Language of the object data. - ContentLanguage string `json:"contentLanguage,omitempty"` - - // ContentType: Content-Type of the object data. If an object is stored - // without a Content-Type, it is served as application/octet-stream. - ContentType string `json:"contentType,omitempty"` - - // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; - // encoded using base64 in big-endian byte order. For more information - // about using the CRC32c checksum, see Hashes and ETags: Best - // Practices. - Crc32c string `json:"crc32c,omitempty"` - - // CustomTime: A timestamp in RFC 3339 format specified by the user for - // an object. - CustomTime string `json:"customTime,omitempty"` - - // CustomerEncryption: Metadata of customer-supplied encryption key, if - // the object is encrypted by such a key. - CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the object. - Etag string `json:"etag,omitempty"` - - // EventBasedHold: Whether an object is under event-based hold. - // Event-based hold is a way to retain objects until an event occurs, - // which is signified by the hold's release (i.e. this value is set to - // false). After being released (set to false), such objects will be - // subject to bucket-level retention (if any). One sample use case of - // this flag is for banks to hold loan documents for at least 3 years - // after loan is paid in full. Here, bucket-level retention is 3 years - // and the event is the loan being paid in full. In this example, these - // objects will be held intact for any number of years until the event - // has occurred (event-based hold on the object is released) and then 3 - // more years after that. That means retention duration of the objects - // begins from the moment event-based hold transitioned from true to - // false. - EventBasedHold bool `json:"eventBasedHold,omitempty"` - - // Generation: The content generation of this object. Used for object - // versioning. - Generation int64 `json:"generation,omitempty,string"` - - // Id: The ID of the object, including the bucket name, object name, and - // generation number. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For objects, this is always - // storage#object. - Kind string `json:"kind,omitempty"` - - // KmsKeyName: Not currently supported. Specifying the parameter causes - // the request to fail with status code 400 - Bad Request. - KmsKeyName string `json:"kmsKeyName,omitempty"` - - // Md5Hash: MD5 hash of the data; encoded using base64. For more - // information about using the MD5 hash, see Hashes and ETags: Best - // Practices. - Md5Hash string `json:"md5Hash,omitempty"` - - // MediaLink: Media download link. - MediaLink string `json:"mediaLink,omitempty"` - - // Metadata: User-provided metadata, in key/value pairs. - Metadata map[string]string `json:"metadata,omitempty"` - - // Metageneration: The version of the metadata for this object at this - // generation. Used for preconditions and for detecting changes in - // metadata. A metageneration number is only meaningful in the context - // of a particular generation of a particular object. - Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the object. Required if not specified by URL - // parameter. - Name string `json:"name,omitempty"` - - // Owner: The owner of the object. This will always be the uploader of - // the object. - Owner *ObjectOwner `json:"owner,omitempty"` - - // RetentionExpirationTime: A server-determined value that specifies the - // earliest time that the object's retention period expires. This value - // is in RFC 3339 format. Note 1: This field is not provided for objects - // with an active event-based hold, since retention expiration is - // unknown until the hold is removed. Note 2: This value can be provided - // even when temporary hold is set (so that the user can reason about - // policy without having to first unset the temporary hold). - RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"` - - // SelfLink: The link to this object. - SelfLink string `json:"selfLink,omitempty"` - - // Size: Content-Length of the data in bytes. - Size uint64 `json:"size,omitempty,string"` - - // StorageClass: Storage class of the object. - StorageClass string `json:"storageClass,omitempty"` - - // TemporaryHold: Whether an object is under temporary hold. While this - // flag is set to true, the object is protected against deletion and - // overwrites. A common use case of this flag is regulatory - // investigations where objects need to be retained while the - // investigation is ongoing. Note that unlike event-based hold, - // temporary hold does not impact retention expiration time of an - // object. - TemporaryHold bool `json:"temporaryHold,omitempty"` - - // TimeCreated: The creation time of the object in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // TimeDeleted: The deletion time of the object in RFC 3339 format. Will - // be returned if and only if this version of the object has been - // deleted. - TimeDeleted string `json:"timeDeleted,omitempty"` - - // TimeStorageClassUpdated: The time at which the object's storage class - // was last changed. When the object is initially created, it will be - // set to timeCreated. - TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` - - // Updated: The modification time of the object metadata in RFC 3339 - // format. Set initially to object creation time and then updated - // whenever any metadata of the object changes. This includes changes - // made by a requester, such as modifying custom metadata, as well as - // changes made by Cloud Storage on behalf of a requester, such as - // changing the storage class based on an Object Lifecycle - // Configuration. - Updated string `json:"updated,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Acl") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Object) MarshalJSON() ([]byte, error) { - type NoMethod Object - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectCustomerEncryption: Metadata of customer-supplied encryption -// key, if the object is encrypted by such a key. -type ObjectCustomerEncryption struct { - // EncryptionAlgorithm: The encryption algorithm. - EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` - - // KeySha256: SHA256 hash value of the encryption key. - KeySha256 string `json:"keySha256,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { - type NoMethod ObjectCustomerEncryption - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectOwner: The owner of the object. This will always be the -// uploader of the object. -type ObjectOwner struct { - // Entity: The entity, in the form user-userId. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity. - EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Entity") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { - type NoMethod ObjectOwner - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectAccessControl: An access-control entry. -type ObjectAccessControl struct { - // Bucket: The name of the bucket. - Bucket string `json:"bucket,omitempty"` - - // Domain: The domain associated with the entity, if any. - Domain string `json:"domain,omitempty"` - - // Email: The email address associated with the entity, if any. - Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: - // - user-userId - // - user-email - // - group-groupId - // - group-email - // - domain-domain - // - project-team-projectId - // - allUsers - // - allAuthenticatedUsers Examples: - // - The user liz@example.com would be user-liz@example.com. - // - The group example@googlegroups.com would be - // group-example@googlegroups.com. - // - To refer to all members of the Google Apps for Business domain - // example.com, the entity would be domain-example.com. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity, if any. - EntityId string `json:"entityId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the access-control entry. - Etag string `json:"etag,omitempty"` - - // Generation: The content generation of the object, if applied to an - // object. - Generation int64 `json:"generation,omitempty,string"` - - // Id: The ID of the access-control entry. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For object access control entries, - // this is always storage#objectAccessControl. - Kind string `json:"kind,omitempty"` - - // Object: The name of the object, if applied to an object. - Object string `json:"object,omitempty"` - - // ProjectTeam: The project team associated with the entity, if any. - ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` - - // Role: The access permission for the entity. - Role string `json:"role,omitempty"` - - // SelfLink: The link to this access-control entry. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { - type NoMethod ObjectAccessControl - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectAccessControlProjectTeam: The project team associated with the -// entity, if any. -type ObjectAccessControlProjectTeam struct { - // ProjectNumber: The project number. - ProjectNumber string `json:"projectNumber,omitempty"` - - // Team: The team. - Team string `json:"team,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProjectNumber") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { - type NoMethod ObjectAccessControlProjectTeam - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ObjectAccessControls: An access-control list. -type ObjectAccessControls struct { - // Items: The list of items. - Items []*ObjectAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of object access control - // entries, this is always storage#objectAccessControls. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { - type NoMethod ObjectAccessControls - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Objects: A list of objects. -type Objects struct { - // Items: The list of items. - Items []*Object `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of objects, this is always - // storage#objects. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Prefixes: The list of prefixes of objects matching-but-not-listed up - // to and including the requested delimiter. - Prefixes []string `json:"prefixes,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Objects) MarshalJSON() ([]byte, error) { - type NoMethod Objects - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: A bucket/object IAM policy. -type Policy struct { - // Bindings: An association between a role, which comes with a set of - // permissions, and members who may assume that role. - Bindings []*PolicyBindings `json:"bindings,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the policy. - Etag string `json:"etag,omitempty"` - - // Kind: The kind of item this is. For policies, this is always - // storage#policy. This field is ignored on input. - Kind string `json:"kind,omitempty"` - - // ResourceId: The ID of the resource to which this policy belongs. Will - // be of the form projects/_/buckets/bucket for buckets, and - // projects/_/buckets/bucket/objects/object for objects. A specific - // generation may be specified by appending #generationNumber to the end - // of the object name, e.g. - // projects/_/buckets/my-bucket/objects/data.txt#17. The current - // generation can be denoted with #0. This field is ignored on input. - ResourceId string `json:"resourceId,omitempty"` - - // Version: The IAM policy format version. - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type NoMethod Policy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type PolicyBindings struct { - // Condition: The condition that is associated with this binding. NOTE: - // an unsatisfied condition will not allow user access via current - // binding. Different bindings, including their conditions, are examined - // independently. - Condition *Expr `json:"condition,omitempty"` - - // Members: A collection of identifiers for members who may assume the - // provided role. Recognized identifiers are as follows: - // - allUsers — A special identifier that represents anyone on the - // internet; with or without a Google account. - // - allAuthenticatedUsers — A special identifier that represents - // anyone who is authenticated with a Google account or a service - // account. - // - user:emailid — An email address that represents a specific - // account. For example, user:alice@gmail.com or user:joe@example.com. - // - // - serviceAccount:emailid — An email address that represents a - // service account. For example, - // serviceAccount:my-other-app@appspot.gserviceaccount.com . - // - group:emailid — An email address that represents a Google group. - // For example, group:admins@example.com. - // - domain:domain — A Google Apps domain name that represents all the - // users of that domain. For example, domain:google.com or - // domain:example.com. - // - projectOwner:projectid — Owners of the given project. For - // example, projectOwner:my-example-project - // - projectEditor:projectid — Editors of the given project. For - // example, projectEditor:my-example-project - // - projectViewer:projectid — Viewers of the given project. For - // example, projectViewer:my-example-project - Members []string `json:"members,omitempty"` - - // Role: The role to which members belong. Two types of roles are - // supported: new IAM roles, which grant permissions that do not map - // directly to those provided by ACLs, and legacy IAM roles, which do - // map directly to ACL permissions. All roles are of the format - // roles/storage.specificRole. - // The new IAM roles are: - // - roles/storage.admin — Full control of Google Cloud Storage - // resources. - // - roles/storage.objectViewer — Read-Only access to Google Cloud - // Storage objects. - // - roles/storage.objectCreator — Access to create objects in Google - // Cloud Storage. - // - roles/storage.objectAdmin — Full control of Google Cloud Storage - // objects. The legacy IAM roles are: - // - roles/storage.legacyObjectReader — Read-only access to objects - // without listing. Equivalent to an ACL entry on an object with the - // READER role. - // - roles/storage.legacyObjectOwner — Read/write access to existing - // objects without listing. Equivalent to an ACL entry on an object with - // the OWNER role. - // - roles/storage.legacyBucketReader — Read access to buckets with - // object listing. Equivalent to an ACL entry on a bucket with the - // READER role. - // - roles/storage.legacyBucketWriter — Read access to buckets with - // object listing/creation/deletion. Equivalent to an ACL entry on a - // bucket with the WRITER role. - // - roles/storage.legacyBucketOwner — Read and write access to - // existing buckets with object listing/creation/deletion. Equivalent to - // an ACL entry on a bucket with the OWNER role. - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Condition") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Condition") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PolicyBindings) MarshalJSON() ([]byte, error) { - type NoMethod PolicyBindings - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RewriteResponse: A rewrite response. -type RewriteResponse struct { - // Done: true if the copy is finished; otherwise, false if the copy is - // in progress. This property is always present in the response. - Done bool `json:"done,omitempty"` - - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // ObjectSize: The total size of the object being copied in bytes. This - // property is always present in the response. - ObjectSize int64 `json:"objectSize,omitempty,string"` - - // Resource: A resource containing the metadata for the copied-to - // object. This property is present in the response only when copying - // completes. - Resource *Object `json:"resource,omitempty"` - - // RewriteToken: A token to use in subsequent requests to continue - // copying data. This token is present in the response only when there - // is more data to copy. - RewriteToken string `json:"rewriteToken,omitempty"` - - // TotalBytesRewritten: The total bytes written so far, which can be - // used to provide a waiting user with a progress indicator. This - // property is always present in the response. - TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Done") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Done") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { - type NoMethod RewriteResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ServiceAccount: A subscription to receive Google PubSub -// notifications. -type ServiceAccount struct { - // EmailAddress: The ID of the notification. - EmailAddress string `json:"email_address,omitempty"` - - // Kind: The kind of item this is. For notifications, this is always - // storage#notification. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "EmailAddress") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EmailAddress") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAccount - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsResponse: A -// storage.(buckets|objects).testIamPermissions response. -type TestIamPermissionsResponse struct { - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // Permissions: The permissions held by the caller. Permissions are - // always of the format storage.resource.capability, where resource is - // one of buckets or objects. The supported permissions are as follows: - // - // - storage.buckets.delete — Delete bucket. - // - storage.buckets.get — Read bucket metadata. - // - storage.buckets.getIamPolicy — Read bucket IAM policy. - // - storage.buckets.create — Create bucket. - // - storage.buckets.list — List buckets. - // - storage.buckets.setIamPolicy — Update bucket IAM policy. - // - storage.buckets.update — Update bucket metadata. - // - storage.objects.delete — Delete object. - // - storage.objects.get — Read object data and metadata. - // - storage.objects.getIamPolicy — Read object IAM policy. - // - storage.objects.create — Create object. - // - storage.objects.list — List objects. - // - storage.objects.setIamPolicy — Update object IAM policy. - // - storage.objects.update — Update object metadata. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod TestIamPermissionsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "storage.bucketAccessControls.delete": - -type BucketAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { - c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.delete" call. -func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.bucketAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.get": - -type BucketAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the ACL entry for the specified entity on the specified -// bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { - c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.get" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.insert": - -type BucketAccessControlsInsertCall struct { - s *Service - bucket string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new ACL entry on the specified bucket. -// -// - bucket: Name of a bucket. -func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { - c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.insert" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.bucketAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.list": - -type BucketAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves ACL entries on the specified bucket. -// -// - bucket: Name of a bucket. -func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { - c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.list" call. -// Exactly one of *BucketAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &BucketAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "response": { - // "$ref": "BucketAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.patch": - -type BucketAccessControlsPatchCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches an ACL entry on the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { - c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.patch" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches an ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.bucketAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.update": - -type BucketAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an ACL entry on the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { - c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketAccessControlsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.bucketAccessControls.update" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.bucketAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.delete": - -type BucketsDeleteCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes an empty bucket. -// -// - bucket: Name of a bucket. -func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { - c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the bucket if its -// metageneration matches this value. -func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If set, only deletes the bucket if its -// metageneration does not match this value. -func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.delete" call. -func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Permanently deletes an empty bucket.", - // "httpMethod": "DELETE", - // "id": "storage.buckets.delete", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the bucket if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the bucket if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.get": - -type BucketsGetCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for the specified bucket. -// -// - bucket: Name of a bucket. -func (r *BucketsService) Get(bucket string) *BucketsGetCall { - c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.get" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.get", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.getIamPolicy": - -type BucketsGetIamPolicyCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Returns an IAM policy for the specified bucket. -// -// - bucket: Name of a bucket. -func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { - c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": The IAM policy format version to be -// returned. If the optionsRequestedPolicyVersion is for an older -// version that doesn't support part of the requested IAM policy, the -// request fails. -func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns an IAM policy for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.getIamPolicy", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "optionsRequestedPolicyVersion": { - // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", - // "format": "int32", - // "location": "query", - // "minimum": "1", - // "type": "integer" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.insert": - -type BucketsInsertCall struct { - s *Service - bucket *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new bucket. -// -// - project: A valid API project identifier. -func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { - c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - c.bucket = bucket - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the bucket resource -// specifies acl or defaultObjectAcl properties, when it defaults to -// full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.insert" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.list": - -type BucketsListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of buckets for a given project. -// -// - project: A valid API project identifier. -func (r *BucketsService) List(projectid string) *BucketsListCall { - c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of buckets to return in a single response. The service will use this -// parameter or 1,000 items, whichever is smaller. -func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// buckets whose names begin with this prefix. -func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsListCall) Projection(projection string) *BucketsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.list" call. -// Exactly one of *Buckets or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Buckets.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Buckets{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of buckets for a given project.", - // "httpMethod": "GET", - // "id": "storage.buckets.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to buckets whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "response": { - // "$ref": "Buckets" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.buckets.lockRetentionPolicy": - -type BucketsLockRetentionPolicyCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// LockRetentionPolicy: Locks retention policy on a bucket. -// -// - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. -func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { - c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsLockRetentionPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.lockRetentionPolicy" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Locks retention policy on a bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.lockRetentionPolicy", - // "parameterOrder": [ - // "bucket", - // "ifMetagenerationMatch" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/lockRetentionPolicy", - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.patch": - -type BucketsPatchCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. -// -// - bucket: Name of a bucket. -func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { - c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.patch" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PATCH", - // "id": "storage.buckets.patch", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.setIamPolicy": - -type BucketsSetIamPolicyCall struct { - s *Service - bucket string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Updates an IAM policy for the specified bucket. -// -// - bucket: Name of a bucket. -func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { - c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.policy = policy - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an IAM policy for the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.buckets.setIamPolicy", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.testIamPermissions": - -type BucketsTestIamPermissionsCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Tests a set of permissions on the given bucket to -// see which, if any, are held by the caller. -// -// - bucket: Name of a bucket. -// - permissions: Permissions to test. -func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { - c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.buckets.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.update": - -type BucketsUpdateCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. -// -// - bucket: Name of a bucket. -func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { - c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BucketsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.buckets.update" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PUT", - // "id": "storage.buckets.update", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.channels.stop": - -type ChannelsStopCall struct { - s *Service - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Stop: Stop watching resources through this channel -func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { - c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.channel = channel - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChannelsStopCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.channels.stop" call. -func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Stop watching resources through this channel", - // "httpMethod": "POST", - // "id": "storage.channels.stop", - // "path": "channels/stop", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.delete": - -type DefaultObjectAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes the default object ACL entry for the -// specified entity on the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { - c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.delete" call. -func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.defaultObjectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.get": - -type DefaultObjectAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the default object ACL entry for the specified entity on -// the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { - c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.insert": - -type DefaultObjectAccessControlsInsertCall struct { - s *Service - bucket string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new default object ACL entry on the specified -// bucket. -// -// - bucket: Name of a bucket. -func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { - c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new default object ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.defaultObjectAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.list": - -type DefaultObjectAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves default object ACL entries on the specified bucket. -// -// - bucket: Name of a bucket. -func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { - c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If present, only return default ACL listing -// if the bucket's current metageneration matches this value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If present, only return default ACL -// listing if the bucket's current metageneration does not match the -// given value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves default object ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.patch": - -type DefaultObjectAccessControlsPatchCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches a default object ACL entry on the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { - c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches a default object ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.defaultObjectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.update": - -type DefaultObjectAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a default object ACL entry on the specified bucket. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { - c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.defaultObjectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a default object ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.defaultObjectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.notifications.delete": - -type NotificationsDeleteCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes a notification subscription. -// -// - bucket: The parent bucket of the notification. -// - notification: ID of the notification to delete. -func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { - c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.delete" call. -func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Permanently deletes a notification subscription.", - // "httpMethod": "DELETE", - // "id": "storage.notifications.delete", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "ID of the notification to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.get": - -type NotificationsGetCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: View a notification configuration. -// -// - bucket: The parent bucket of the notification. -// - notification: Notification ID. -func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { - c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.get" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "View a notification configuration.", - // "httpMethod": "GET", - // "id": "storage.notifications.get", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "Notification ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "response": { - // "$ref": "Notification" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.insert": - -type NotificationsInsertCall struct { - s *Service - bucket string - notification *Notification - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a notification subscription for a given bucket. -// -// - bucket: The parent bucket of the notification. -func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { - c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.insert" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a notification subscription for a given bucket.", - // "httpMethod": "POST", - // "id": "storage.notifications.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs", - // "request": { - // "$ref": "Notification" - // }, - // "response": { - // "$ref": "Notification" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.list": - -type NotificationsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of notification subscriptions for a given -// bucket. -// -// - bucket: Name of a Google Cloud Storage bucket. -func (r *NotificationsService) List(bucket string) *NotificationsListCall { - c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NotificationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.notifications.list" call. -// Exactly one of *Notifications or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notifications.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notifications{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of notification subscriptions for a given bucket.", - // "httpMethod": "GET", - // "id": "storage.notifications.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a Google Cloud Storage bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs", - // "response": { - // "$ref": "Notifications" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objectAccessControls.delete": - -type ObjectAccessControlsDeleteCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified object. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { - c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.delete" call. -func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - // "httpMethod": "DELETE", - // "id": "storage.objectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.get": - -type ObjectAccessControlsGetCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the ACL entry for the specified entity on the specified -// object. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { - c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.insert": - -type ObjectAccessControlsInsertCall struct { - s *Service - bucket string - object string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new ACL entry on the specified object. -// -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { - c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new ACL entry on the specified object.", - // "httpMethod": "POST", - // "id": "storage.objectAccessControls.insert", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.list": - -type ObjectAccessControlsListCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves ACL entries on the specified object. -// -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { - c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves ACL entries on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.list", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.patch": - -type ObjectAccessControlsPatchCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches an ACL entry on the specified object. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { - c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches an ACL entry on the specified object.", - // "httpMethod": "PATCH", - // "id": "storage.objectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.update": - -type ObjectAccessControlsUpdateCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an ACL entry on the specified object. -// -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { - c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectAccessControlsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.compose": - -type ObjectsComposeCall struct { - s *Service - destinationBucket string - destinationObject string - composerequest *ComposeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Compose: Concatenates a list of existing objects into a new object in -// the same bucket. -// -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts. -func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { - c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.composerequest = composerequest - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsComposeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.compose" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Concatenates a list of existing objects into a new object in the same bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.compose", - // "parameterOrder": [ - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{destinationBucket}/o/{destinationObject}/compose", - // "request": { - // "$ref": "ComposeRequest" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.copy": - -type ObjectsCopyCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Copy: Copies a source object to a destination object. Optionally -// overrides metadata. -// -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. -func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { - c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. Setting to 0 makes the -// operation succeed only if there are no live versions of the object. -func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. If no live object exists, the precondition fails. Setting to 0 -// makes the operation succeed only if there is a live version of the -// object. -func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { - c.urlParams_.Set("projection", projection) - return c -} - -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsCopyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.copy" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Copies a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.copy", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.delete": - -type ObjectsDeleteCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an object and its metadata. Deletions are permanent -// if versioning is not enabled for the bucket, or if the generation -// parameter is used. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { - c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// permanently deletes a specific revision of this object (as opposed to -// the latest version, the default). -func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.delete" call. -func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - // "httpMethod": "DELETE", - // "id": "storage.objects.delete", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.get": - -type ObjectsGetCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves an object or its metadata. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { - c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, gensupport.WrapError(err) - } - return res, nil -} - -// Do executes the "storage.objects.get" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an object or its metadata.", - // "httpMethod": "GET", - // "id": "storage.objects.get", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.getIamPolicy": - -type ObjectsGetIamPolicyCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Returns an IAM policy for the specified object. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { - c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns an IAM policy for the specified object.", - // "httpMethod": "GET", - // "id": "storage.objects.getIamPolicy", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.insert": - -type ObjectsInsertCall struct { - s *Service - bucket string - object *Object - urlParams_ gensupport.URLParams - mediaInfo_ *gensupport.MediaInfo - retry *gensupport.RetryConfig - ctx_ context.Context - header_ http.Header -} - -// Insert: Stores a new object and metadata. -// -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. -func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { - c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// ContentEncoding sets the optional parameter "contentEncoding": If -// set, sets the contentEncoding property of the final object to this -// value. Setting this parameter is equivalent to setting the -// contentEncoding metadata property. This can be useful when uploading -// an object with uploadType=media to indicate the encoding of the -// content being uploaded. -func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { - c.urlParams_.Set("contentEncoding", contentEncoding) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) - return c -} - -// Name sets the optional parameter "name": Name of the object. Required -// when the object metadata is not otherwise provided. Overrides the -// object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. -func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { - c.urlParams_.Set("name", name) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Media specifies the media to upload in one or more chunks. The chunk -// size may be controlled by supplying a MediaOption generated by -// googleapi.ChunkSize. The chunk size defaults to -// googleapi.DefaultUploadChunkSize.The Content-Type header used in the -// upload request will be determined by sniffing the contents of r, -// unless a MediaOption generated by googleapi.ContentType is -// supplied. -// At most one of Media and ResumableMedia may be set. -func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { - if ct := c.object.ContentType; ct != "" { - options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) - } - c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) - return c -} - -// ResumableMedia specifies the media to upload in chunks and can be -// canceled with ctx. -// -// Deprecated: use Media instead. -// -// At most one of Media and ResumableMedia may be set. mediaType -// identifies the MIME media type of the upload, such as "image/png". If -// mediaType is "", it will be auto-detected. The provided ctx will -// supersede any context previously provided to the Context method. -func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { - c.ctx_ = ctx - c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) - return c -} - -// ProgressUpdater provides a callback function that will be called -// after every chunk. It should be a low-latency function in order to -// not slow down the upload operation. This should only be called when -// using ResumableMedia (as opposed to Media). -func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { - c.mediaInfo_.SetProgressUpdater(pu) - return c -} - -// WithRetry causes the library to retry the initial request of the -// upload(for resumable uploads) or the entire upload (for multipart -// uploads) ifa transient error occurs. This is contingent on ChunkSize -// being > 0 (sothat the input data may be buffered). The backoff -// argument will be used todetermine exponential backoff timing, and the -// errorFunc is used to determinewhich errors are considered retryable. -// By default, exponetial backoff will beapplied using gax defaults, and -// the following errors are retried: -// -// - HTTP responses with codes 408, 429, 502, 503, and 504. -// -// - Transient network errors such as connection reset and -// io.ErrUnexpectedEOF. -// -// - Errors which are considered transient using the Temporary() -// interface. -// -// - Wrapped versions of these errors. -func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { - c.retry = &gensupport.RetryConfig{ - Backoff: bo, - ShouldRetry: errorFunc, - } - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -// This context will supersede any context previously provided to the -// ResumableMedia method. -func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.mediaInfo_ != nil { - urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") - c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) - } - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) - defer cleanup() - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - req.GetBody = getBody - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - if c.retry != nil { - return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) - } - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.insert" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) - if rx != nil { - rx.Client = c.s.client - rx.UserAgent = c.s.userAgent() - rx.Retry = c.retry - ctx := c.ctx_ - if ctx == nil { - ctx = context.TODO() - } - res, err = rx.Upload(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Stores a new object and metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.insert", - // "mediaUpload": { - // "accept": [ - // "*/*" - // ], - // "protocols": { - // "resumable": { - // "multipart": true, - // "path": "/resumable/upload/storage/v1/b/{bucket}/o" - // }, - // "simple": { - // "multipart": true, - // "path": "/upload/storage/v1/b/{bucket}/o" - // } - // } - // }, - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "contentEncoding": { - // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaUpload": true - // } - -} - -// method id "storage.objects.list": - -type ObjectsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of objects matching the criteria. -// -// - bucket: Name of the bucket in which to look for objects. -func (r *ObjectsService) List(bucket string) *ObjectsListCall { - c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { - c.urlParams_.Set("endOffset", endOffset) - return c -} - -// IncludeTrailingDelimiter sets the optional parameter -// "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. -func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { - c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) - return c -} - -// MatchGlob sets the optional parameter "matchGlob": Filter results to -// objects and prefixes that match this glob pattern. -func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { - c.urlParams_.Set("matchGlob", matchGlob) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { - c.urlParams_.Set("startOffset", startOffset) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.list" call. -// Exactly one of *Objects or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Objects.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Objects{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of objects matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.objects.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "matchGlob": { - // "description": "Filter results to objects and prefixes that match this glob pattern.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o", - // "response": { - // "$ref": "Objects" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.objects.patch": - -type ObjectsPatchCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches an object's metadata. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { - c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. -func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.patch" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches an object's metadata.", - // "httpMethod": "PATCH", - // "id": "storage.objects.patch", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.rewrite": - -type ObjectsRewriteCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Rewrite: Rewrites a source object to a destination object. Optionally -// overrides metadata. -// -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. -func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { - c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// MaxBytesRewrittenPerCall sets the optional parameter -// "maxBytesRewrittenPerCall": The maximum number of bytes that will be -// rewritten per rewrite request. Most callers shouldn't need to specify -// this parameter - it is primarily in place to support testing. If -// specified the value must be an integral multiple of 1 MiB (1048576). -// Also, this only applies to requests where the source and destination -// span locations and/or storage classes. Finally, this value must not -// change across rewrite calls else you'll get an error that the -// rewriteToken is invalid. -func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { - c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { - c.urlParams_.Set("projection", projection) - return c -} - -// RewriteToken sets the optional parameter "rewriteToken": Include this -// field (from the previous rewrite response) on each rewrite request -// after the first one, until the rewrite response 'done' flag is true. -// Calls that provide a rewriteToken can omit all other request fields, -// but if included those fields must match the values provided in the -// first rewrite request. -func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { - c.urlParams_.Set("rewriteToken", rewriteToken) - return c -} - -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsRewriteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.rewrite" call. -// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RewriteResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &RewriteResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.rewrite", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "maxBytesRewrittenPerCall": { - // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "rewriteToken": { - // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "RewriteResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.setIamPolicy": - -type ObjectsSetIamPolicyCall struct { - s *Service - bucket string - object string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Updates an IAM policy for the specified object. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { - c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.policy = policy - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an IAM policy for the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objects.setIamPolicy", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.testIamPermissions": - -type ObjectsTestIamPermissionsCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Tests a set of permissions on the given object to -// see which, if any, are held by the caller. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -// - permissions: Permissions to test. -func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { - c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.objects.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "object", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.update": - -type ObjectsUpdateCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an object's metadata. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { - c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.update" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an object's metadata.", - // "httpMethod": "PUT", - // "id": "storage.objects.update", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.watchAll": - -type ObjectsWatchAllCall struct { - s *Service - bucket string - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// WatchAll: Watch for changes on all objects in a bucket. -// -// - bucket: Name of the bucket in which to look for objects. -func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { - c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.channel = channel - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { - c.urlParams_.Set("endOffset", endOffset) - return c -} - -// IncludeTrailingDelimiter sets the optional parameter -// "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. -func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { - c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { - c.urlParams_.Set("projection", projection) - return c -} - -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { - c.urlParams_.Set("startOffset", startOffset) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ObjectsWatchAllCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.objects.watchAll" call. -// Exactly one of *Channel or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Channel.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Channel{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Watch for changes on all objects in a bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.watchAll", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o/watch", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "response": { - // "$ref": "Channel" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - -} - -// method id "storage.projects.hmacKeys.create": - -type ProjectsHmacKeysCreateCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates a new HMAC key for the specified service account. -// -// - projectId: Project ID owning the service account. -// - serviceAccountEmail: Email address of the service account. -func (r *ProjectsHmacKeysService) Create(projectId string, serviceAccountEmail string) *ProjectsHmacKeysCreateCall { - c := &ProjectsHmacKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsHmacKeysCreateCall) UserProject(userProject string) *ProjectsHmacKeysCreateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsHmacKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsHmacKeysCreateCall) Context(ctx context.Context) *ProjectsHmacKeysCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsHmacKeysCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.hmacKeys.create" call. -// Exactly one of *HmacKey or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *HmacKey.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &HmacKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new HMAC key for the specified service account.", - // "httpMethod": "POST", - // "id": "storage.projects.hmacKeys.create", - // "parameterOrder": [ - // "projectId", - // "serviceAccountEmail" - // ], - // "parameters": { - // "projectId": { - // "description": "Project ID owning the service account.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "serviceAccountEmail": { - // "description": "Email address of the service account.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys", - // "response": { - // "$ref": "HmacKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.projects.hmacKeys.delete": - -type ProjectsHmacKeysDeleteCall struct { - s *Service - projectId string - accessId string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an HMAC key. -// -// - accessId: Name of the HMAC key to be deleted. -// - projectId: Project ID owning the requested key. -func (r *ProjectsHmacKeysService) Delete(projectId string, accessId string) *ProjectsHmacKeysDeleteCall { - c := &ProjectsHmacKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - c.accessId = accessId - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsHmacKeysDeleteCall) UserProject(userProject string) *ProjectsHmacKeysDeleteCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsHmacKeysDeleteCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsHmacKeysDeleteCall) Context(ctx context.Context) *ProjectsHmacKeysDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - "accessId": c.accessId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.hmacKeys.delete" call. -func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Deletes an HMAC key.", - // "httpMethod": "DELETE", - // "id": "storage.projects.hmacKeys.delete", - // "parameterOrder": [ - // "projectId", - // "accessId" - // ], - // "parameters": { - // "accessId": { - // "description": "Name of the HMAC key to be deleted.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Project ID owning the requested key", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys/{accessId}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.projects.hmacKeys.get": - -type ProjectsHmacKeysGetCall struct { - s *Service - projectId string - accessId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves an HMAC key's metadata -// -// - accessId: Name of the HMAC key. -// - projectId: Project ID owning the service account of the requested -// key. -func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { - c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - c.accessId = accessId - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsHmacKeysGetCall) UserProject(userProject string) *ProjectsHmacKeysGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsHmacKeysGetCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsHmacKeysGetCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsHmacKeysGetCall) Context(ctx context.Context) *ProjectsHmacKeysGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsHmacKeysGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - "accessId": c.accessId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.hmacKeys.get" call. -// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &HmacKeyMetadata{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an HMAC key's metadata", - // "httpMethod": "GET", - // "id": "storage.projects.hmacKeys.get", - // "parameterOrder": [ - // "projectId", - // "accessId" - // ], - // "parameters": { - // "accessId": { - // "description": "Name of the HMAC key.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Project ID owning the service account of the requested key.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys/{accessId}", - // "response": { - // "$ref": "HmacKeyMetadata" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only" - // ] - // } - -} - -// method id "storage.projects.hmacKeys.list": - -type ProjectsHmacKeysListCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of HMAC keys matching the criteria. -// -// - projectId: Name of the project in which to look for HMAC keys. -func (r *ProjectsHmacKeysService) List(projectId string) *ProjectsHmacKeysListCall { - c := &ProjectsHmacKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items to return in a single page of responses. The service uses -// this parameter or 250 items, whichever is smaller. The max number of -// items per page will also be limited by the number of distinct service -// accounts in the response. If the number of service accounts in a -// single response is too high, the page will truncated and a next page -// token will be returned. -func (c *ProjectsHmacKeysListCall) MaxResults(maxResults int64) *ProjectsHmacKeysListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ProjectsHmacKeysListCall) PageToken(pageToken string) *ProjectsHmacKeysListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ServiceAccountEmail sets the optional parameter -// "serviceAccountEmail": If present, only keys for the given service -// account are returned. -func (c *ProjectsHmacKeysListCall) ServiceAccountEmail(serviceAccountEmail string) *ProjectsHmacKeysListCall { - c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) - return c -} - -// ShowDeletedKeys sets the optional parameter "showDeletedKeys": -// Whether or not to show keys in the DELETED state. -func (c *ProjectsHmacKeysListCall) ShowDeletedKeys(showDeletedKeys bool) *ProjectsHmacKeysListCall { - c.urlParams_.Set("showDeletedKeys", fmt.Sprint(showDeletedKeys)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsHmacKeysListCall) UserProject(userProject string) *ProjectsHmacKeysListCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsHmacKeysListCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsHmacKeysListCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsHmacKeysListCall) Context(ctx context.Context) *ProjectsHmacKeysListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsHmacKeysListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.hmacKeys.list" call. -// Exactly one of *HmacKeysMetadata or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HmacKeysMetadata.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMetadata, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &HmacKeysMetadata{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of HMAC keys matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.projects.hmacKeys.list", - // "parameterOrder": [ - // "projectId" - // ], - // "parameters": { - // "maxResults": { - // "default": "250", - // "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "projectId": { - // "description": "Name of the project in which to look for HMAC keys.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "serviceAccountEmail": { - // "description": "If present, only keys for the given service account are returned.", - // "location": "query", - // "type": "string" - // }, - // "showDeletedKeys": { - // "description": "Whether or not to show keys in the DELETED state.", - // "location": "query", - // "type": "boolean" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys", - // "response": { - // "$ref": "HmacKeysMetadata" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsHmacKeysListCall) Pages(ctx context.Context, f func(*HmacKeysMetadata) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.projects.hmacKeys.update": - -type ProjectsHmacKeysUpdateCall struct { - s *Service - projectId string - accessId string - hmackeymetadata *HmacKeyMetadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the state of an HMAC key. See the HMAC Key resource -// descriptor for valid states. -// -// - accessId: Name of the HMAC key being updated. -// - projectId: Project ID owning the service account of the updated -// key. -func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { - c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - c.accessId = accessId - c.hmackeymetadata = hmackeymetadata - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsHmacKeysUpdateCall) UserProject(userProject string) *ProjectsHmacKeysUpdateCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsHmacKeysUpdateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsHmacKeysUpdateCall) Context(ctx context.Context) *ProjectsHmacKeysUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - "accessId": c.accessId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.hmacKeys.update" call. -// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &HmacKeyMetadata{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", - // "httpMethod": "PUT", - // "id": "storage.projects.hmacKeys.update", - // "parameterOrder": [ - // "projectId", - // "accessId" - // ], - // "parameters": { - // "accessId": { - // "description": "Name of the HMAC key being updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Project ID owning the service account of the updated key.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys/{accessId}", - // "request": { - // "$ref": "HmacKeyMetadata" - // }, - // "response": { - // "$ref": "HmacKeyMetadata" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.projects.serviceAccount.get": - -type ProjectsServiceAccountGetCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Get the email address of this project's Google Cloud Storage -// service account. -// -// - projectId: Project ID. -func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { - c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.projectId = projectId - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsServiceAccountGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "projectId": c.projectId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.projects.serviceAccount.get" call. -// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ServiceAccount.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ServiceAccount{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Get the email address of this project's Google Cloud Storage service account.", - // "httpMethod": "GET", - // "id": "storage.projects.serviceAccount.get", - // "parameterOrder": [ - // "projectId" - // ], - // "parameters": { - // "projectId": { - // "description": "Project ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/serviceAccount", - // "response": { - // "$ref": "ServiceAccount" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go deleted file mode 100644 index 652b8eba51..0000000000 --- a/vendor/google.golang.org/api/transport/dial.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package transport - -import ( - "context" - "net/http" - - "golang.org/x/oauth2/google" - "google.golang.org/grpc" - - "google.golang.org/api/internal" - "google.golang.org/api/option" - gtransport "google.golang.org/api/transport/grpc" - htransport "google.golang.org/api/transport/http" -) - -// NewHTTPClient returns an HTTP client for use communicating with a Google cloud -// service, configured with the given ClientOptions. It also returns the endpoint -// for the service as specified in the options. -func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { - return htransport.NewClient(ctx, opts...) -} - -// DialGRPC returns a GRPC connection for use communicating with a Google cloud -// service, configured with the given ClientOptions. -func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - return gtransport.Dial(ctx, opts...) -} - -// DialGRPCInsecure returns an insecure GRPC connection for use communicating -// with fake or mock Google cloud service implementations, such as emulators. -// The connection is configured with the given ClientOptions. -func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - return gtransport.DialInsecure(ctx, opts...) -} - -// Creds constructs a google.Credentials from the information in the options, -// or obtains the default credentials in the same way as google.FindDefaultCredentials. -func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { - var ds internal.DialSettings - for _, opt := range opts { - opt.Apply(&ds) - } - return internal.Creds(ctx, &ds) -} diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go deleted file mode 100644 index 7143abee45..0000000000 --- a/vendor/google.golang.org/api/transport/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package transport provides utility methods for creating authenticated -// transports to Google's HTTP and gRPC APIs. It is intended to be used in -// conjunction with google.golang.org/api/option. -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package transport diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go deleted file mode 100644 index e1403e08ee..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2015 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package grpc supports network connections to GRPC servers. -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package grpc - -import ( - "context" - "errors" - "log" - "net" - "os" - "strings" - - "cloud.google.com/go/compute/metadata" - "go.opencensus.io/plugin/ocgrpc" - "golang.org/x/oauth2" - "google.golang.org/api/internal" - "google.golang.org/api/option" - "google.golang.org/grpc" - grpcgoogle "google.golang.org/grpc/credentials/google" - grpcinsecure "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/credentials/oauth" - - // Install grpclb, which is required for direct path. - _ "google.golang.org/grpc/balancer/grpclb" -) - -// Check env to disable DirectPath traffic. -const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" - -// Check env to decide if using google-c2p resolver for DirectPath traffic. -const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineDialerHook func(context.Context) grpc.DialOption - -// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. -var timeoutDialerOption grpc.DialOption - -// Dial returns a GRPC connection for use communicating with a Google cloud -// service, configured with the given ClientOptions. -func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - o, err := processAndValidateOpts(opts) - if err != nil { - return nil, err - } - if o.GRPCConnPool != nil { - return o.GRPCConnPool.Conn(), nil - } - // NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize) - // on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it. - // - // Connection pooling is only done via DialPool. - return dial(ctx, false, o) -} - -// DialInsecure returns an insecure GRPC connection for use communicating -// with fake or mock Google cloud service implementations, such as emulators. -// The connection is configured with the given ClientOptions. -func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { - o, err := processAndValidateOpts(opts) - if err != nil { - return nil, err - } - return dial(ctx, true, o) -} - -// DialPool returns a pool of GRPC connections for the given service. -// This differs from the connection pooling implementation used by Dial, which uses a custom GRPC load balancer. -// DialPool should be used instead of Dial when a pool is used by default or a different custom GRPC load balancer is needed. -// The context and options are shared between each Conn in the pool. -// The pool size is configured using the WithGRPCConnectionPool option. -// -// This API is subject to change as we further refine requirements. It will go away if gRPC stubs accept an interface instead of the concrete ClientConn type. See https://github.com/grpc/grpc-go/issues/1287. -func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error) { - o, err := processAndValidateOpts(opts) - if err != nil { - return nil, err - } - if o.GRPCConnPool != nil { - return o.GRPCConnPool, nil - } - poolSize := o.GRPCConnPoolSize - if o.GRPCConn != nil { - // WithGRPCConn is technically incompatible with WithGRPCConnectionPool. - // Always assume pool size is 1 when a grpc.ClientConn is explicitly used. - poolSize = 1 - } - o.GRPCConnPoolSize = 0 // we don't *need* to set this to zero, but it's safe to. - - if poolSize == 0 || poolSize == 1 { - // Fast path for common case for a connection pool with a single connection. - conn, err := dial(ctx, false, o) - if err != nil { - return nil, err - } - return &singleConnPool{conn}, nil - } - - pool := &roundRobinConnPool{} - for i := 0; i < poolSize; i++ { - conn, err := dial(ctx, false, o) - if err != nil { - defer pool.Close() // NOTE: error from Close is ignored. - return nil, err - } - pool.conns = append(pool.conns, conn) - } - return pool, nil -} - -func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { - if o.HTTPClient != nil { - return nil, errors.New("unsupported HTTP client specified") - } - if o.GRPCConn != nil { - return o.GRPCConn, nil - } - transportCreds, endpoint, err := internal.GetGRPCTransportConfigAndEndpoint(o) - if err != nil { - return nil, err - } - - if insecure { - transportCreds = grpcinsecure.NewCredentials() - } - - // Initialize gRPC dial options with transport-level security options. - grpcOpts := []grpc.DialOption{ - grpc.WithTransportCredentials(transportCreds), - } - - // Authentication can only be sent when communicating over a secure connection. - // - // TODO: Should we be more lenient in the future and allow sending credentials - // when dialing an insecure connection? - if !o.NoAuth && !insecure { - if o.APIKey != "" { - log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") - } - creds, err := internal.Creds(ctx, o) - if err != nil { - return nil, err - } - - grpcOpts = append(grpcOpts, - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), - requestReason: o.RequestReason, - }), - ) - - // Attempt Direct Path: - if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { - // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. - grpcOpts = []grpc.DialOption{ - grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} - if timeoutDialerOption != nil { - grpcOpts = append(grpcOpts, timeoutDialerOption) - } - // Check if google-c2p resolver is enabled for DirectPath - if isDirectPathXdsUsed(o) { - // google-c2p resolver target must not have a port number - if addr, _, err := net.SplitHostPort(endpoint); err == nil { - endpoint = "google-c2p:///" + addr - } else { - endpoint = "google-c2p:///" + endpoint - } - } else { - if !strings.HasPrefix(endpoint, "dns:///") { - endpoint = "dns:///" + endpoint - } - grpcOpts = append(grpcOpts, - // For now all DirectPath go clients will be using the following lb config, but in future - // when different services need different configs, then we should change this to a - // per-service config. - grpc.WithDisableServiceConfig(), - grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) - } - // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } - } - - if appengineDialerHook != nil { - // Use the Socket API on App Engine. - // appengine dialer will override socketopt dialer - grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) - } - - // Add tracing, but before the other options, so that clients can override the - // gRPC stats handler. - // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, o) - grpcOpts = append(grpcOpts, o.GRPCDialOpts...) - if o.UserAgent != "" { - grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) - } - - return grpc.DialContext(ctx, endpoint, grpcOpts...) -} - -func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { - if settings.TelemetryDisabled { - return opts - } - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - -// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. -type grpcTokenSource struct { - oauth.TokenSource - - // Additional metadata attached as headers. - quotaProject string - requestReason string -} - -// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. -func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( - map[string]string, error) { - metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) - if err != nil { - return nil, err - } - - // Attach system parameter - if ts.quotaProject != "" { - metadata["X-goog-user-project"] = ts.quotaProject - } - if ts.requestReason != "" { - metadata["X-goog-request-reason"] = ts.requestReason - } - return metadata, nil -} - -func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool { - if !o.EnableDirectPath { - return false - } - if !checkDirectPathEndPoint(endpoint) { - return false - } - if strings.EqualFold(os.Getenv(disableDirectPath), "true") { - return false - } - return true -} - -func isDirectPathXdsUsed(o *internal.DialSettings) bool { - // Method 1: Enable DirectPath xDS by env; - if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { - return true - } - // Method 2: Enable DirectPath xDS by option; - if o.EnableDirectPathXds { - return true - } - return false - -} - -func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool { - if ts == nil { - return false - } - tok, err := ts.Token() - if err != nil { - return false - } - if tok == nil { - return false - } - if o.AllowNonDefaultServiceAccount { - return true - } - if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { - return false - } - if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { - return false - } - return true -} - -func checkDirectPathEndPoint(endpoint string) bool { - // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). - // Also don't try direct path if the user has chosen an alternate name resolver - // (i.e., via ":///" prefix). - // - // TODO(cbro): once gRPC has introspectible options, check the user hasn't - // provided a custom dialer in gRPC options. - if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { - return false - } - - if endpoint == "" { - return false - } - - return true -} - -func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if err := o.Validate(); err != nil { - return nil, err - } - - return &o, nil -} - -type connPoolOption struct{ ConnPool } - -// WithConnPool returns a ClientOption that specifies the ConnPool -// connection to use as the basis of communications. -// -// This is only to be used by Google client libraries internally, for example -// when creating a longrunning API client that shares the same connection pool -// as a service client. -func WithConnPool(p ConnPool) option.ClientOption { - return connPoolOption{p} -} - -func (o connPoolOption) Apply(s *internal.DialSettings) { - s.GRPCConnPool = o.ConnPool -} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go deleted file mode 100644 index fd3dc0565d..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package grpc - -import ( - "context" - "net" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/socket" - "google.golang.org/grpc" -) - -func init() { - // NOTE: dev_appserver doesn't currently support SSL. - // When it does, this code can be removed. - if appengine.IsDevAppServer() { - return - } - - appengineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go deleted file mode 100644 index 507cd3ec63..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 && linux -// +build go1.11,linux - -package grpc - -import ( - "context" - "net" - "syscall" - - "google.golang.org/grpc" -) - -const ( - // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By - // default is 20 seconds. - tcpUserTimeoutMilliseconds = 20000 - - // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. - tcpUserTimeoutOp = 0x12 -) - -func init() { - // timeoutDialerOption is a grpc.DialOption that contains dialer with - // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. - timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) -} - -func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { - control := func(network, address string, c syscall.RawConn) error { - var syscallErr error - controlErr := c.Control(func(fd uintptr) { - syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) - }) - if syscallErr != nil { - return syscallErr - } - if controlErr != nil { - return controlErr - } - return nil - } - d := &net.Dialer{ - Control: control, - } - return d.DialContext(ctx, "tcp", addr) -} diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go deleted file mode 100644 index 4cf94a2771..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/pool.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package grpc - -import ( - "context" - "fmt" - "sync/atomic" - - "google.golang.org/api/internal" - "google.golang.org/grpc" -) - -// ConnPool is a pool of grpc.ClientConns. -type ConnPool = internal.ConnPool // NOTE(cbro): type alias to export the type. It must live in internal to avoid a circular dependency. - -var _ ConnPool = &roundRobinConnPool{} -var _ ConnPool = &singleConnPool{} - -// singleConnPool is a special case for a single connection. -type singleConnPool struct { - *grpc.ClientConn -} - -func (p *singleConnPool) Conn() *grpc.ClientConn { return p.ClientConn } -func (p *singleConnPool) Num() int { return 1 } - -type roundRobinConnPool struct { - conns []*grpc.ClientConn - - idx uint32 // access via sync/atomic -} - -func (p *roundRobinConnPool) Num() int { - return len(p.conns) -} - -func (p *roundRobinConnPool) Conn() *grpc.ClientConn { - i := atomic.AddUint32(&p.idx, 1) - return p.conns[i%uint32(len(p.conns))] -} - -func (p *roundRobinConnPool) Close() error { - var errs multiError - for _, conn := range p.conns { - if err := conn.Close(); err != nil { - errs = append(errs, err) - } - } - if len(errs) == 0 { - return nil - } - return errs -} - -func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { - return p.Conn().Invoke(ctx, method, args, reply, opts...) -} - -func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - return p.Conn().NewStream(ctx, desc, method, opts...) -} - -// multiError represents errors from multiple conns in the group. -// -// TODO: figure out how and whether this is useful to export. End users should -// not be depending on the transport/grpc package directly, so there might need -// to be some service-specific multi-error type. -type multiError []error - -func (m multiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go deleted file mode 100644 index eca0c3ba79..0000000000 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http supports network connections to HTTP servers. -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package http - -import ( - "context" - "crypto/tls" - "errors" - "net" - "net/http" - "time" - - "go.opencensus.io/plugin/ochttp" - "golang.org/x/net/http2" - "golang.org/x/oauth2" - "google.golang.org/api/googleapi/transport" - "google.golang.org/api/internal" - "google.golang.org/api/internal/cert" - "google.golang.org/api/option" - "google.golang.org/api/transport/http/internal/propagation" -) - -// NewClient returns an HTTP client for use communicating with a Google cloud -// service, configured with the given ClientOptions. It also returns the endpoint -// for the service as specified in the options. -func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { - settings, err := newSettings(opts) - if err != nil { - return nil, "", err - } - clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) - if err != nil { - return nil, "", err - } - // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? - if settings.HTTPClient != nil { - return settings.HTTPClient, endpoint, nil - } - - trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) - if err != nil { - return nil, "", err - } - return &http.Client{Transport: trans}, endpoint, nil -} - -// NewTransport creates an http.RoundTripper for use communicating with a Google -// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. -func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { - settings, err := newSettings(opts) - if err != nil { - return nil, err - } - if settings.HTTPClient != nil { - return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") - } - return newTransport(ctx, base, settings) -} - -func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { - paramTransport := ¶meterTransport{ - base: base, - userAgent: settings.UserAgent, - requestReason: settings.RequestReason, - } - var trans http.RoundTripper = paramTransport - trans = addOCTransport(trans, settings) - switch { - case settings.NoAuth: - // Do nothing. - case settings.APIKey != "": - paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) - trans = &transport.APIKey{ - Transport: trans, - Key: settings.APIKey, - } - default: - creds, err := internal.Creds(ctx, settings) - if err != nil { - return nil, err - } - paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) - ts := creds.TokenSource - if settings.ImpersonationConfig == nil && settings.TokenSource != nil { - ts = settings.TokenSource - } - trans = &oauth2.Transport{ - Base: trans, - Source: ts, - } - } - return trans, nil -} - -func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if err := o.Validate(); err != nil { - return nil, err - } - if o.GRPCConn != nil { - return nil, errors.New("unsupported gRPC connection specified") - } - return &o, nil -} - -type parameterTransport struct { - userAgent string - quotaProject string - requestReason string - - base http.RoundTripper -} - -func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base - if rt == nil { - return nil, errors.New("transport: no Transport specified") - } - newReq := *req - newReq.Header = make(http.Header) - for k, vv := range req.Header { - newReq.Header[k] = vv - } - if t.userAgent != "" { - // TODO(cbro): append to existing User-Agent header? - newReq.Header.Set("User-Agent", t.userAgent) - } - - // Attach system parameters into the header - if t.quotaProject != "" { - newReq.Header.Set("X-Goog-User-Project", t.quotaProject) - } - if t.requestReason != "" { - newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) - } - - return rt.RoundTrip(&newReq) -} - -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper - -// defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport. -// Otherwise, use a default transport, taking most defaults from -// http.DefaultTransport. -// If TLSCertificate is available, set TLSClientConfig as well. -func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - if appengineUrlfetchHook != nil { - return appengineUrlfetchHook(ctx) - } - - // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, - // which is increased due to reported performance issues under load in the GCS - // client. Transport.Clone is only available in Go 1.13 and up. - trans := clonedTransport(http.DefaultTransport) - if trans == nil { - trans = fallbackBaseTransport() - } - trans.MaxIdleConnsPerHost = 100 - - if clientCertSource != nil { - trans.TLSClientConfig = &tls.Config{ - GetClientCertificate: clientCertSource, - } - } - if dialTLSContext != nil { - // If DialTLSContext is set, TLSClientConfig wil be ignored - trans.DialTLSContext = dialTLSContext - } - - configureHTTP2(trans) - - return trans -} - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. This allows broken idle connections to be pruned more quickly, -// preventing the client from attempting to re-use connections that will no -// longer work. -func configureHTTP2(trans *http.Transport) { - http2Trans, err := http2.ConfigureTransports(trans) - if err == nil { - http2Trans.ReadIdleTimeout = time.Second * 31 - } -} - -// fallbackBaseTransport is used in httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(httpHeader, header) -} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d36e..0000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc2985208..289693613c 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3c6..5ccddd9990 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c9697674f..35ba9c8967 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aad3..6e1d041cd9 100644 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go index 576bc50132..790fca771f 100644 --- a/vendor/google.golang.org/appengine/datastore/datastore.go +++ b/vendor/google.golang.org/appengine/datastore/datastore.go @@ -5,12 +5,12 @@ package datastore import ( + "context" "errors" "fmt" "reflect" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go index 85616cf274..1ecf51885f 100644 --- a/vendor/google.golang.org/appengine/datastore/doc.go +++ b/vendor/google.golang.org/appengine/datastore/doc.go @@ -5,8 +5,7 @@ /* Package datastore provides a client for App Engine's datastore service. - -Basic Operations +# Basic Operations Entities are the unit of storage and are associated with a key. A key consists of an optional parent key, a string application ID, a string kind @@ -74,8 +73,7 @@ GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and Delete functions. They take a []*Key instead of a *Key, and may return an appengine.MultiError when encountering partial failure. - -Properties +# Properties An entity's contents can be represented by a variety of types. These are typically struct pointers, but can also be any type that implements the @@ -137,8 +135,7 @@ Example code: J int `datastore:",noindex" json:"j"` } - -Structured Properties +# Structured Properties If the struct pointed to contains other structs, then the nested or embedded structs are flattened. For example, given these definitions: @@ -179,8 +176,7 @@ equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. If an outer struct is tagged "noindex" then all of its implicit flattened fields are effectively "noindex". - -The PropertyLoadSaver Interface +# The PropertyLoadSaver Interface An entity's contents can also be represented by any type that implements the PropertyLoadSaver interface. This type may be a struct pointer, but it does @@ -230,8 +226,7 @@ Example code: The *PropertyList type implements PropertyLoadSaver, and can therefore hold an arbitrary entity's contents. - -Queries +# Queries Queries retrieve entities based on their properties or key's ancestry. Running a query yields an iterator of results: either keys or (key, entity) pairs. @@ -284,8 +279,7 @@ Example code: io.Copy(w, b) } - -Transactions +# Transactions RunInTransaction runs a function in a transaction. @@ -323,8 +317,7 @@ Example code: fmt.Fprintf(w, "Count=%d", count) } - -Metadata +# Metadata The datastore package provides access to some of App Engine's datastore metadata. This metadata includes information about the entity groups, diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go index fd598dc965..e312df519d 100644 --- a/vendor/google.golang.org/appengine/datastore/key.go +++ b/vendor/google.golang.org/appengine/datastore/key.go @@ -6,6 +6,7 @@ package datastore import ( "bytes" + "context" "encoding/base64" "encoding/gob" "errors" @@ -14,7 +15,6 @@ import ( "strings" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" diff --git a/vendor/google.golang.org/appengine/datastore/keycompat.go b/vendor/google.golang.org/appengine/datastore/keycompat.go index 371a64eeef..e852f29cf7 100644 --- a/vendor/google.golang.org/appengine/datastore/keycompat.go +++ b/vendor/google.golang.org/appengine/datastore/keycompat.go @@ -5,10 +5,9 @@ package datastore import ( + "context" "sync" - "golang.org/x/net/context" - "google.golang.org/appengine/datastore/internal/cloudkey" "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go index 6acacc3db9..e1b2d2259b 100644 --- a/vendor/google.golang.org/appengine/datastore/metadata.go +++ b/vendor/google.golang.org/appengine/datastore/metadata.go @@ -4,7 +4,7 @@ package datastore -import "golang.org/x/net/context" +import "context" // Datastore kinds for the metadata entities. const ( @@ -50,13 +50,14 @@ func keyNames(keys []*Key) []string { // The properties are returned as a map of property names to a slice of the // representation types. The representation types for the supported Go property // types are: -// "INT64": signed integers and time.Time -// "DOUBLE": float32 and float64 -// "BOOLEAN": bool -// "STRING": string, []byte and ByteString -// "POINT": appengine.GeoPoint -// "REFERENCE": *Key -// "USER": (not used in the Go runtime) +// +// "INT64": signed integers and time.Time +// "DOUBLE": float32 and float64 +// "BOOLEAN": bool +// "STRING": string, []byte and ByteString +// "POINT": appengine.GeoPoint +// "REFERENCE": *Key +// "USER": (not used in the Go runtime) func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { // TODO(djd): Support range queries. kindKey := NewKey(ctx, kindKind, kind, 0, nil) diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go index 4124534b22..b1b80bf7b6 100644 --- a/vendor/google.golang.org/appengine/datastore/query.go +++ b/vendor/google.golang.org/appengine/datastore/query.go @@ -5,6 +5,7 @@ package datastore import ( + "context" "encoding/base64" "errors" "fmt" @@ -13,7 +14,6 @@ import ( "strings" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" @@ -476,7 +476,7 @@ func callNext(c context.Context, res *pb.QueryResult, offset, count int32) error // The keys returned by GetAll will be in a 1-1 correspondence with the entities // added to dst. // -// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// If q is a “keys-only” query, GetAll ignores dst and only returns the keys. // // The running time and number of API calls made by GetAll scale linearly with // the sum of the query's offset and limit. Unless the result count is @@ -754,7 +754,7 @@ func (c Cursor) String() string { return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } -// Decode decodes a cursor from its base-64 string representation. +// DecodeCursor decodes a cursor from its base-64 string representation. func DecodeCursor(s string) (Cursor, error) { if s == "" { return Cursor{&zeroCC}, nil diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go index 2ae8428f85..06deeb43e7 100644 --- a/vendor/google.golang.org/appengine/datastore/transaction.go +++ b/vendor/google.golang.org/appengine/datastore/transaction.go @@ -5,10 +5,9 @@ package datastore import ( + "context" "errors" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" ) diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go index b8dcf8f361..1202fc1a53 100644 --- a/vendor/google.golang.org/appengine/identity.go +++ b/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20a..0569f5dd43 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35..87c33c798e 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b7..5b95c13d92 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e425..0f95aa91d5 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e3..5ad3548bf7 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3..4201b6b585 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d80672635..18ddda3a42 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd..afd0ae84fd 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a3338..86a8caf06f 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go deleted file mode 100644 index 4ec872e460..0000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +++ /dev/null @@ -1,2822 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/socket/socket_service.proto - -package socket - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RemoteSocketServiceError_ErrorCode int32 - -const ( - RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 - RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 - RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 - RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 - RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 - RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 -) - -var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ - 1: "SYSTEM_ERROR", - 2: "GAI_ERROR", - 4: "FAILURE", - 5: "PERMISSION_DENIED", - 6: "INVALID_REQUEST", - 7: "SOCKET_CLOSED", -} -var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ - "SYSTEM_ERROR": 1, - "GAI_ERROR": 2, - "FAILURE": 4, - "PERMISSION_DENIED": 5, - "INVALID_REQUEST": 6, - "SOCKET_CLOSED": 7, -} - -func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { - p := new(RemoteSocketServiceError_ErrorCode) - *p = x - return p -} -func (x RemoteSocketServiceError_ErrorCode) String() string { - return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) -} -func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") - if err != nil { - return err - } - *x = RemoteSocketServiceError_ErrorCode(value) - return nil -} -func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} -} - -type RemoteSocketServiceError_SystemError int32 - -const ( - RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 - RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 - RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 - RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 - RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 - RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 - RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 - RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 - RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 - RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 - RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 - RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 - RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 - RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 - RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 - RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 - RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 - RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 - RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 - RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 - RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 - RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 - RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 - RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 - RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 - RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 - RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 - RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 - RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 - RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 - RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 - RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 - RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 - RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 - RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 - RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 - RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 - RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 - RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 - RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 - RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 - RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 - RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 - RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 - RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 - RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 - RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 - RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 - RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 - RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 - RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 - RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 - RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 - RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 - RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 - RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 - RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 - RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 - RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 - RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 - RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 - RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 - RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 - RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 - RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 - RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 - RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 - RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 - RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 - RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 - RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 - RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 - RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 - RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 - RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 - RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 - RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 - RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 - RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 - RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 - RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 - RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 - RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 - RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 - RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 - RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 - RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 - RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 - RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 - RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 - RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 - RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 - RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 - RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 - RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 - RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 - RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 - RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 - RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 - RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 - RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 - RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 - RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 - RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 - RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 - RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 - RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 - RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 - RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 - RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 - RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 - RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 - RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 - RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 - RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 - RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 - RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 - RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 - RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 - RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 - RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 - RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 - RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 - RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 - RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 - RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 - RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 - RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 -) - -var RemoteSocketServiceError_SystemError_name = map[int32]string{ - 0: "SYS_SUCCESS", - 1: "SYS_EPERM", - 2: "SYS_ENOENT", - 3: "SYS_ESRCH", - 4: "SYS_EINTR", - 5: "SYS_EIO", - 6: "SYS_ENXIO", - 7: "SYS_E2BIG", - 8: "SYS_ENOEXEC", - 9: "SYS_EBADF", - 10: "SYS_ECHILD", - 11: "SYS_EAGAIN", - // Duplicate value: 11: "SYS_EWOULDBLOCK", - 12: "SYS_ENOMEM", - 13: "SYS_EACCES", - 14: "SYS_EFAULT", - 15: "SYS_ENOTBLK", - 16: "SYS_EBUSY", - 17: "SYS_EEXIST", - 18: "SYS_EXDEV", - 19: "SYS_ENODEV", - 20: "SYS_ENOTDIR", - 21: "SYS_EISDIR", - 22: "SYS_EINVAL", - 23: "SYS_ENFILE", - 24: "SYS_EMFILE", - 25: "SYS_ENOTTY", - 26: "SYS_ETXTBSY", - 27: "SYS_EFBIG", - 28: "SYS_ENOSPC", - 29: "SYS_ESPIPE", - 30: "SYS_EROFS", - 31: "SYS_EMLINK", - 32: "SYS_EPIPE", - 33: "SYS_EDOM", - 34: "SYS_ERANGE", - 35: "SYS_EDEADLK", - // Duplicate value: 35: "SYS_EDEADLOCK", - 36: "SYS_ENAMETOOLONG", - 37: "SYS_ENOLCK", - 38: "SYS_ENOSYS", - 39: "SYS_ENOTEMPTY", - 40: "SYS_ELOOP", - 42: "SYS_ENOMSG", - 43: "SYS_EIDRM", - 44: "SYS_ECHRNG", - 45: "SYS_EL2NSYNC", - 46: "SYS_EL3HLT", - 47: "SYS_EL3RST", - 48: "SYS_ELNRNG", - 49: "SYS_EUNATCH", - 50: "SYS_ENOCSI", - 51: "SYS_EL2HLT", - 52: "SYS_EBADE", - 53: "SYS_EBADR", - 54: "SYS_EXFULL", - 55: "SYS_ENOANO", - 56: "SYS_EBADRQC", - 57: "SYS_EBADSLT", - 59: "SYS_EBFONT", - 60: "SYS_ENOSTR", - 61: "SYS_ENODATA", - 62: "SYS_ETIME", - 63: "SYS_ENOSR", - 64: "SYS_ENONET", - 65: "SYS_ENOPKG", - 66: "SYS_EREMOTE", - 67: "SYS_ENOLINK", - 68: "SYS_EADV", - 69: "SYS_ESRMNT", - 70: "SYS_ECOMM", - 71: "SYS_EPROTO", - 72: "SYS_EMULTIHOP", - 73: "SYS_EDOTDOT", - 74: "SYS_EBADMSG", - 75: "SYS_EOVERFLOW", - 76: "SYS_ENOTUNIQ", - 77: "SYS_EBADFD", - 78: "SYS_EREMCHG", - 79: "SYS_ELIBACC", - 80: "SYS_ELIBBAD", - 81: "SYS_ELIBSCN", - 82: "SYS_ELIBMAX", - 83: "SYS_ELIBEXEC", - 84: "SYS_EILSEQ", - 85: "SYS_ERESTART", - 86: "SYS_ESTRPIPE", - 87: "SYS_EUSERS", - 88: "SYS_ENOTSOCK", - 89: "SYS_EDESTADDRREQ", - 90: "SYS_EMSGSIZE", - 91: "SYS_EPROTOTYPE", - 92: "SYS_ENOPROTOOPT", - 93: "SYS_EPROTONOSUPPORT", - 94: "SYS_ESOCKTNOSUPPORT", - 95: "SYS_EOPNOTSUPP", - // Duplicate value: 95: "SYS_ENOTSUP", - 96: "SYS_EPFNOSUPPORT", - 97: "SYS_EAFNOSUPPORT", - 98: "SYS_EADDRINUSE", - 99: "SYS_EADDRNOTAVAIL", - 100: "SYS_ENETDOWN", - 101: "SYS_ENETUNREACH", - 102: "SYS_ENETRESET", - 103: "SYS_ECONNABORTED", - 104: "SYS_ECONNRESET", - 105: "SYS_ENOBUFS", - 106: "SYS_EISCONN", - 107: "SYS_ENOTCONN", - 108: "SYS_ESHUTDOWN", - 109: "SYS_ETOOMANYREFS", - 110: "SYS_ETIMEDOUT", - 111: "SYS_ECONNREFUSED", - 112: "SYS_EHOSTDOWN", - 113: "SYS_EHOSTUNREACH", - 114: "SYS_EALREADY", - 115: "SYS_EINPROGRESS", - 116: "SYS_ESTALE", - 117: "SYS_EUCLEAN", - 118: "SYS_ENOTNAM", - 119: "SYS_ENAVAIL", - 120: "SYS_EISNAM", - 121: "SYS_EREMOTEIO", - 122: "SYS_EDQUOT", - 123: "SYS_ENOMEDIUM", - 124: "SYS_EMEDIUMTYPE", - 125: "SYS_ECANCELED", - 126: "SYS_ENOKEY", - 127: "SYS_EKEYEXPIRED", - 128: "SYS_EKEYREVOKED", - 129: "SYS_EKEYREJECTED", - 130: "SYS_EOWNERDEAD", - 131: "SYS_ENOTRECOVERABLE", - 132: "SYS_ERFKILL", -} -var RemoteSocketServiceError_SystemError_value = map[string]int32{ - "SYS_SUCCESS": 0, - "SYS_EPERM": 1, - "SYS_ENOENT": 2, - "SYS_ESRCH": 3, - "SYS_EINTR": 4, - "SYS_EIO": 5, - "SYS_ENXIO": 6, - "SYS_E2BIG": 7, - "SYS_ENOEXEC": 8, - "SYS_EBADF": 9, - "SYS_ECHILD": 10, - "SYS_EAGAIN": 11, - "SYS_EWOULDBLOCK": 11, - "SYS_ENOMEM": 12, - "SYS_EACCES": 13, - "SYS_EFAULT": 14, - "SYS_ENOTBLK": 15, - "SYS_EBUSY": 16, - "SYS_EEXIST": 17, - "SYS_EXDEV": 18, - "SYS_ENODEV": 19, - "SYS_ENOTDIR": 20, - "SYS_EISDIR": 21, - "SYS_EINVAL": 22, - "SYS_ENFILE": 23, - "SYS_EMFILE": 24, - "SYS_ENOTTY": 25, - "SYS_ETXTBSY": 26, - "SYS_EFBIG": 27, - "SYS_ENOSPC": 28, - "SYS_ESPIPE": 29, - "SYS_EROFS": 30, - "SYS_EMLINK": 31, - "SYS_EPIPE": 32, - "SYS_EDOM": 33, - "SYS_ERANGE": 34, - "SYS_EDEADLK": 35, - "SYS_EDEADLOCK": 35, - "SYS_ENAMETOOLONG": 36, - "SYS_ENOLCK": 37, - "SYS_ENOSYS": 38, - "SYS_ENOTEMPTY": 39, - "SYS_ELOOP": 40, - "SYS_ENOMSG": 42, - "SYS_EIDRM": 43, - "SYS_ECHRNG": 44, - "SYS_EL2NSYNC": 45, - "SYS_EL3HLT": 46, - "SYS_EL3RST": 47, - "SYS_ELNRNG": 48, - "SYS_EUNATCH": 49, - "SYS_ENOCSI": 50, - "SYS_EL2HLT": 51, - "SYS_EBADE": 52, - "SYS_EBADR": 53, - "SYS_EXFULL": 54, - "SYS_ENOANO": 55, - "SYS_EBADRQC": 56, - "SYS_EBADSLT": 57, - "SYS_EBFONT": 59, - "SYS_ENOSTR": 60, - "SYS_ENODATA": 61, - "SYS_ETIME": 62, - "SYS_ENOSR": 63, - "SYS_ENONET": 64, - "SYS_ENOPKG": 65, - "SYS_EREMOTE": 66, - "SYS_ENOLINK": 67, - "SYS_EADV": 68, - "SYS_ESRMNT": 69, - "SYS_ECOMM": 70, - "SYS_EPROTO": 71, - "SYS_EMULTIHOP": 72, - "SYS_EDOTDOT": 73, - "SYS_EBADMSG": 74, - "SYS_EOVERFLOW": 75, - "SYS_ENOTUNIQ": 76, - "SYS_EBADFD": 77, - "SYS_EREMCHG": 78, - "SYS_ELIBACC": 79, - "SYS_ELIBBAD": 80, - "SYS_ELIBSCN": 81, - "SYS_ELIBMAX": 82, - "SYS_ELIBEXEC": 83, - "SYS_EILSEQ": 84, - "SYS_ERESTART": 85, - "SYS_ESTRPIPE": 86, - "SYS_EUSERS": 87, - "SYS_ENOTSOCK": 88, - "SYS_EDESTADDRREQ": 89, - "SYS_EMSGSIZE": 90, - "SYS_EPROTOTYPE": 91, - "SYS_ENOPROTOOPT": 92, - "SYS_EPROTONOSUPPORT": 93, - "SYS_ESOCKTNOSUPPORT": 94, - "SYS_EOPNOTSUPP": 95, - "SYS_ENOTSUP": 95, - "SYS_EPFNOSUPPORT": 96, - "SYS_EAFNOSUPPORT": 97, - "SYS_EADDRINUSE": 98, - "SYS_EADDRNOTAVAIL": 99, - "SYS_ENETDOWN": 100, - "SYS_ENETUNREACH": 101, - "SYS_ENETRESET": 102, - "SYS_ECONNABORTED": 103, - "SYS_ECONNRESET": 104, - "SYS_ENOBUFS": 105, - "SYS_EISCONN": 106, - "SYS_ENOTCONN": 107, - "SYS_ESHUTDOWN": 108, - "SYS_ETOOMANYREFS": 109, - "SYS_ETIMEDOUT": 110, - "SYS_ECONNREFUSED": 111, - "SYS_EHOSTDOWN": 112, - "SYS_EHOSTUNREACH": 113, - "SYS_EALREADY": 114, - "SYS_EINPROGRESS": 115, - "SYS_ESTALE": 116, - "SYS_EUCLEAN": 117, - "SYS_ENOTNAM": 118, - "SYS_ENAVAIL": 119, - "SYS_EISNAM": 120, - "SYS_EREMOTEIO": 121, - "SYS_EDQUOT": 122, - "SYS_ENOMEDIUM": 123, - "SYS_EMEDIUMTYPE": 124, - "SYS_ECANCELED": 125, - "SYS_ENOKEY": 126, - "SYS_EKEYEXPIRED": 127, - "SYS_EKEYREVOKED": 128, - "SYS_EKEYREJECTED": 129, - "SYS_EOWNERDEAD": 130, - "SYS_ENOTRECOVERABLE": 131, - "SYS_ERFKILL": 132, -} - -func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { - p := new(RemoteSocketServiceError_SystemError) - *p = x - return p -} -func (x RemoteSocketServiceError_SystemError) String() string { - return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) -} -func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") - if err != nil { - return err - } - *x = RemoteSocketServiceError_SystemError(value) - return nil -} -func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} -} - -type CreateSocketRequest_SocketFamily int32 - -const ( - CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 - CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 -) - -var CreateSocketRequest_SocketFamily_name = map[int32]string{ - 1: "IPv4", - 2: "IPv6", -} -var CreateSocketRequest_SocketFamily_value = map[string]int32{ - "IPv4": 1, - "IPv6": 2, -} - -func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { - p := new(CreateSocketRequest_SocketFamily) - *p = x - return p -} -func (x CreateSocketRequest_SocketFamily) String() string { - return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) -} -func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketFamily(value) - return nil -} -func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} -} - -type CreateSocketRequest_SocketProtocol int32 - -const ( - CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 - CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 -) - -var CreateSocketRequest_SocketProtocol_name = map[int32]string{ - 1: "TCP", - 2: "UDP", -} -var CreateSocketRequest_SocketProtocol_value = map[string]int32{ - "TCP": 1, - "UDP": 2, -} - -func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { - p := new(CreateSocketRequest_SocketProtocol) - *p = x - return p -} -func (x CreateSocketRequest_SocketProtocol) String() string { - return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) -} -func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketProtocol(value) - return nil -} -func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} -} - -type SocketOption_SocketOptionLevel int32 - -const ( - SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 - SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 - SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 - SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 -) - -var SocketOption_SocketOptionLevel_name = map[int32]string{ - 0: "SOCKET_SOL_IP", - 1: "SOCKET_SOL_SOCKET", - 6: "SOCKET_SOL_TCP", - 17: "SOCKET_SOL_UDP", -} -var SocketOption_SocketOptionLevel_value = map[string]int32{ - "SOCKET_SOL_IP": 0, - "SOCKET_SOL_SOCKET": 1, - "SOCKET_SOL_TCP": 6, - "SOCKET_SOL_UDP": 17, -} - -func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { - p := new(SocketOption_SocketOptionLevel) - *p = x - return p -} -func (x SocketOption_SocketOptionLevel) String() string { - return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) -} -func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") - if err != nil { - return err - } - *x = SocketOption_SocketOptionLevel(value) - return nil -} -func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} -} - -type SocketOption_SocketOptionName int32 - -const ( - SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 - SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 - SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 - SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 - SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 -) - -var SocketOption_SocketOptionName_name = map[int32]string{ - 1: "SOCKET_SO_DEBUG", - 2: "SOCKET_SO_REUSEADDR", - 3: "SOCKET_SO_TYPE", - 4: "SOCKET_SO_ERROR", - 5: "SOCKET_SO_DONTROUTE", - 6: "SOCKET_SO_BROADCAST", - 7: "SOCKET_SO_SNDBUF", - 8: "SOCKET_SO_RCVBUF", - 9: "SOCKET_SO_KEEPALIVE", - 10: "SOCKET_SO_OOBINLINE", - 13: "SOCKET_SO_LINGER", - 20: "SOCKET_SO_RCVTIMEO", - 21: "SOCKET_SO_SNDTIMEO", - // Duplicate value: 1: "SOCKET_IP_TOS", - // Duplicate value: 2: "SOCKET_IP_TTL", - // Duplicate value: 3: "SOCKET_IP_HDRINCL", - // Duplicate value: 4: "SOCKET_IP_OPTIONS", - // Duplicate value: 1: "SOCKET_TCP_NODELAY", - // Duplicate value: 2: "SOCKET_TCP_MAXSEG", - // Duplicate value: 3: "SOCKET_TCP_CORK", - // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", - // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", - // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", - // Duplicate value: 7: "SOCKET_TCP_SYNCNT", - // Duplicate value: 8: "SOCKET_TCP_LINGER2", - // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", - // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", - 11: "SOCKET_TCP_INFO", - 12: "SOCKET_TCP_QUICKACK", -} -var SocketOption_SocketOptionName_value = map[string]int32{ - "SOCKET_SO_DEBUG": 1, - "SOCKET_SO_REUSEADDR": 2, - "SOCKET_SO_TYPE": 3, - "SOCKET_SO_ERROR": 4, - "SOCKET_SO_DONTROUTE": 5, - "SOCKET_SO_BROADCAST": 6, - "SOCKET_SO_SNDBUF": 7, - "SOCKET_SO_RCVBUF": 8, - "SOCKET_SO_KEEPALIVE": 9, - "SOCKET_SO_OOBINLINE": 10, - "SOCKET_SO_LINGER": 13, - "SOCKET_SO_RCVTIMEO": 20, - "SOCKET_SO_SNDTIMEO": 21, - "SOCKET_IP_TOS": 1, - "SOCKET_IP_TTL": 2, - "SOCKET_IP_HDRINCL": 3, - "SOCKET_IP_OPTIONS": 4, - "SOCKET_TCP_NODELAY": 1, - "SOCKET_TCP_MAXSEG": 2, - "SOCKET_TCP_CORK": 3, - "SOCKET_TCP_KEEPIDLE": 4, - "SOCKET_TCP_KEEPINTVL": 5, - "SOCKET_TCP_KEEPCNT": 6, - "SOCKET_TCP_SYNCNT": 7, - "SOCKET_TCP_LINGER2": 8, - "SOCKET_TCP_DEFER_ACCEPT": 9, - "SOCKET_TCP_WINDOW_CLAMP": 10, - "SOCKET_TCP_INFO": 11, - "SOCKET_TCP_QUICKACK": 12, -} - -func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { - p := new(SocketOption_SocketOptionName) - *p = x - return p -} -func (x SocketOption_SocketOptionName) String() string { - return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) -} -func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") - if err != nil { - return err - } - *x = SocketOption_SocketOptionName(value) - return nil -} -func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} -} - -type ShutDownRequest_How int32 - -const ( - ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 - ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 - ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 -) - -var ShutDownRequest_How_name = map[int32]string{ - 1: "SOCKET_SHUT_RD", - 2: "SOCKET_SHUT_WR", - 3: "SOCKET_SHUT_RDWR", -} -var ShutDownRequest_How_value = map[string]int32{ - "SOCKET_SHUT_RD": 1, - "SOCKET_SHUT_WR": 2, - "SOCKET_SHUT_RDWR": 3, -} - -func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { - p := new(ShutDownRequest_How) - *p = x - return p -} -func (x ShutDownRequest_How) String() string { - return proto.EnumName(ShutDownRequest_How_name, int32(x)) -} -func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") - if err != nil { - return err - } - *x = ShutDownRequest_How(value) - return nil -} -func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} -} - -type ReceiveRequest_Flags int32 - -const ( - ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 - ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 -) - -var ReceiveRequest_Flags_name = map[int32]string{ - 1: "MSG_OOB", - 2: "MSG_PEEK", -} -var ReceiveRequest_Flags_value = map[string]int32{ - "MSG_OOB": 1, - "MSG_PEEK": 2, -} - -func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { - p := new(ReceiveRequest_Flags) - *p = x - return p -} -func (x ReceiveRequest_Flags) String() string { - return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) -} -func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") - if err != nil { - return err - } - *x = ReceiveRequest_Flags(value) - return nil -} -func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} -} - -type PollEvent_PollEventFlag int32 - -const ( - PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 - PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 - PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 - PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 - PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 - PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 - PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 - PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 - PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 - PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 - PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 - PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 - PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 - PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 -) - -var PollEvent_PollEventFlag_name = map[int32]string{ - 0: "SOCKET_POLLNONE", - 1: "SOCKET_POLLIN", - 2: "SOCKET_POLLPRI", - 4: "SOCKET_POLLOUT", - 8: "SOCKET_POLLERR", - 16: "SOCKET_POLLHUP", - 32: "SOCKET_POLLNVAL", - 64: "SOCKET_POLLRDNORM", - 128: "SOCKET_POLLRDBAND", - 256: "SOCKET_POLLWRNORM", - 512: "SOCKET_POLLWRBAND", - 1024: "SOCKET_POLLMSG", - 4096: "SOCKET_POLLREMOVE", - 8192: "SOCKET_POLLRDHUP", -} -var PollEvent_PollEventFlag_value = map[string]int32{ - "SOCKET_POLLNONE": 0, - "SOCKET_POLLIN": 1, - "SOCKET_POLLPRI": 2, - "SOCKET_POLLOUT": 4, - "SOCKET_POLLERR": 8, - "SOCKET_POLLHUP": 16, - "SOCKET_POLLNVAL": 32, - "SOCKET_POLLRDNORM": 64, - "SOCKET_POLLRDBAND": 128, - "SOCKET_POLLWRNORM": 256, - "SOCKET_POLLWRBAND": 512, - "SOCKET_POLLMSG": 1024, - "SOCKET_POLLREMOVE": 4096, - "SOCKET_POLLRDHUP": 8192, -} - -func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { - p := new(PollEvent_PollEventFlag) - *p = x - return p -} -func (x PollEvent_PollEventFlag) String() string { - return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) -} -func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") - if err != nil { - return err - } - *x = PollEvent_PollEventFlag(value) - return nil -} -func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} -} - -type ResolveReply_ErrorCode int32 - -const ( - ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 - ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 - ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 - ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 - ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 - ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 - ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 - ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 - ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 - ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 - ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 - ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 - ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 - ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 - ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 -) - -var ResolveReply_ErrorCode_name = map[int32]string{ - 1: "SOCKET_EAI_ADDRFAMILY", - 2: "SOCKET_EAI_AGAIN", - 3: "SOCKET_EAI_BADFLAGS", - 4: "SOCKET_EAI_FAIL", - 5: "SOCKET_EAI_FAMILY", - 6: "SOCKET_EAI_MEMORY", - 7: "SOCKET_EAI_NODATA", - 8: "SOCKET_EAI_NONAME", - 9: "SOCKET_EAI_SERVICE", - 10: "SOCKET_EAI_SOCKTYPE", - 11: "SOCKET_EAI_SYSTEM", - 12: "SOCKET_EAI_BADHINTS", - 13: "SOCKET_EAI_PROTOCOL", - 14: "SOCKET_EAI_OVERFLOW", - 15: "SOCKET_EAI_MAX", -} -var ResolveReply_ErrorCode_value = map[string]int32{ - "SOCKET_EAI_ADDRFAMILY": 1, - "SOCKET_EAI_AGAIN": 2, - "SOCKET_EAI_BADFLAGS": 3, - "SOCKET_EAI_FAIL": 4, - "SOCKET_EAI_FAMILY": 5, - "SOCKET_EAI_MEMORY": 6, - "SOCKET_EAI_NODATA": 7, - "SOCKET_EAI_NONAME": 8, - "SOCKET_EAI_SERVICE": 9, - "SOCKET_EAI_SOCKTYPE": 10, - "SOCKET_EAI_SYSTEM": 11, - "SOCKET_EAI_BADHINTS": 12, - "SOCKET_EAI_PROTOCOL": 13, - "SOCKET_EAI_OVERFLOW": 14, - "SOCKET_EAI_MAX": 15, -} - -func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { - p := new(ResolveReply_ErrorCode) - *p = x - return p -} -func (x ResolveReply_ErrorCode) String() string { - return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) -} -func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") - if err != nil { - return err - } - *x = ResolveReply_ErrorCode(value) - return nil -} -func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} -} - -type RemoteSocketServiceError struct { - SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` - ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } -func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } -func (*RemoteSocketServiceError) ProtoMessage() {} -func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} -} -func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) -} -func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) -} -func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) -} -func (m *RemoteSocketServiceError) XXX_Size() int { - return xxx_messageInfo_RemoteSocketServiceError.Size(m) -} -func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo - -const Default_RemoteSocketServiceError_SystemError int32 = 0 - -func (m *RemoteSocketServiceError) GetSystemError() int32 { - if m != nil && m.SystemError != nil { - return *m.SystemError - } - return Default_RemoteSocketServiceError_SystemError -} - -func (m *RemoteSocketServiceError) GetErrorDetail() string { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return "" -} - -type AddressPort struct { - Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` - PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddressPort) Reset() { *m = AddressPort{} } -func (m *AddressPort) String() string { return proto.CompactTextString(m) } -func (*AddressPort) ProtoMessage() {} -func (*AddressPort) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} -} -func (m *AddressPort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddressPort.Unmarshal(m, b) -} -func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) -} -func (dst *AddressPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressPort.Merge(dst, src) -} -func (m *AddressPort) XXX_Size() int { - return xxx_messageInfo_AddressPort.Size(m) -} -func (m *AddressPort) XXX_DiscardUnknown() { - xxx_messageInfo_AddressPort.DiscardUnknown(m) -} - -var xxx_messageInfo_AddressPort proto.InternalMessageInfo - -func (m *AddressPort) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *AddressPort) GetPackedAddress() []byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *AddressPort) GetHostnameHint() string { - if m != nil && m.HostnameHint != nil { - return *m.HostnameHint - } - return "" -} - -type CreateSocketRequest struct { - Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` - Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` - ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } -func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSocketRequest) ProtoMessage() {} -func (*CreateSocketRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} -} -func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) -} -func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketRequest.Merge(dst, src) -} -func (m *CreateSocketRequest) XXX_Size() int { - return xxx_messageInfo_CreateSocketRequest.Size(m) -} -func (m *CreateSocketRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo - -const Default_CreateSocketRequest_ListenBacklog int32 = 0 - -func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { - if m != nil && m.Family != nil { - return *m.Family - } - return CreateSocketRequest_IPv4 -} - -func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return CreateSocketRequest_TCP -} - -func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { - if m != nil { - return m.SocketOptions - } - return nil -} - -func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -func (m *CreateSocketRequest) GetListenBacklog() int32 { - if m != nil && m.ListenBacklog != nil { - return *m.ListenBacklog - } - return Default_CreateSocketRequest_ListenBacklog -} - -func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *CreateSocketRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CreateSocketRequest) GetProjectId() int64 { - if m != nil && m.ProjectId != nil { - return *m.ProjectId - } - return 0 -} - -type CreateSocketReply struct { - SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } -func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } -func (*CreateSocketReply) ProtoMessage() {} -func (*CreateSocketReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} -} - -var extRange_CreateSocketReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_CreateSocketReply -} -func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) -} -func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) -} -func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketReply.Merge(dst, src) -} -func (m *CreateSocketReply) XXX_Size() int { - return xxx_messageInfo_CreateSocketReply.Size(m) -} -func (m *CreateSocketReply) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo - -func (m *CreateSocketReply) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CreateSocketReply) GetServerAddress() *AddressPort { - if m != nil { - return m.ServerAddress - } - return nil -} - -func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindRequest) Reset() { *m = BindRequest{} } -func (m *BindRequest) String() string { return proto.CompactTextString(m) } -func (*BindRequest) ProtoMessage() {} -func (*BindRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} -} -func (m *BindRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindRequest.Unmarshal(m, b) -} -func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) -} -func (dst *BindRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindRequest.Merge(dst, src) -} -func (m *BindRequest) XXX_Size() int { - return xxx_messageInfo_BindRequest.Size(m) -} -func (m *BindRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BindRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BindRequest proto.InternalMessageInfo - -func (m *BindRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *BindRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindReply) Reset() { *m = BindReply{} } -func (m *BindReply) String() string { return proto.CompactTextString(m) } -func (*BindReply) ProtoMessage() {} -func (*BindReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} -} -func (m *BindReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindReply.Unmarshal(m, b) -} -func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) -} -func (dst *BindReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindReply.Merge(dst, src) -} -func (m *BindReply) XXX_Size() int { - return xxx_messageInfo_BindReply.Size(m) -} -func (m *BindReply) XXX_DiscardUnknown() { - xxx_messageInfo_BindReply.DiscardUnknown(m) -} - -var xxx_messageInfo_BindReply proto.InternalMessageInfo - -func (m *BindReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetSocketNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } -func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameRequest) ProtoMessage() {} -func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} -} -func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) -} -func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) -} -func (m *GetSocketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketNameRequest.Size(m) -} -func (m *GetSocketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo - -func (m *GetSocketNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetSocketNameReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } -func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameReply) ProtoMessage() {} -func (*GetSocketNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} -} -func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) -} -func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameReply.Merge(dst, src) -} -func (m *GetSocketNameReply) XXX_Size() int { - return xxx_messageInfo_GetSocketNameReply.Size(m) -} -func (m *GetSocketNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo - -func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetPeerNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } -func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameRequest) ProtoMessage() {} -func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} -} -func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) -} -func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) -} -func (m *GetPeerNameRequest) XXX_Size() int { - return xxx_messageInfo_GetPeerNameRequest.Size(m) -} -func (m *GetPeerNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo - -func (m *GetPeerNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetPeerNameReply struct { - PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } -func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameReply) ProtoMessage() {} -func (*GetPeerNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} -} -func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) -} -func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameReply.Merge(dst, src) -} -func (m *GetPeerNameReply) XXX_Size() int { - return xxx_messageInfo_GetPeerNameReply.Size(m) -} -func (m *GetPeerNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo - -func (m *GetPeerNameReply) GetPeerIp() *AddressPort { - if m != nil { - return m.PeerIp - } - return nil -} - -type SocketOption struct { - Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` - Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SocketOption) Reset() { *m = SocketOption{} } -func (m *SocketOption) String() string { return proto.CompactTextString(m) } -func (*SocketOption) ProtoMessage() {} -func (*SocketOption) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} -} -func (m *SocketOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SocketOption.Unmarshal(m, b) -} -func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) -} -func (dst *SocketOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_SocketOption.Merge(dst, src) -} -func (m *SocketOption) XXX_Size() int { - return xxx_messageInfo_SocketOption.Size(m) -} -func (m *SocketOption) XXX_DiscardUnknown() { - xxx_messageInfo_SocketOption.DiscardUnknown(m) -} - -var xxx_messageInfo_SocketOption proto.InternalMessageInfo - -func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { - if m != nil && m.Level != nil { - return *m.Level - } - return SocketOption_SOCKET_SOL_IP -} - -func (m *SocketOption) GetOption() SocketOption_SocketOptionName { - if m != nil && m.Option != nil { - return *m.Option - } - return SocketOption_SOCKET_SO_DEBUG -} - -func (m *SocketOption) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } -func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsRequest) ProtoMessage() {} -func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} -} -func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) -} -func (m *SetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsRequest.Size(m) -} -func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo - -func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type SetSocketOptionsReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } -func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsReply) ProtoMessage() {} -func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} -} -func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) -} -func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) -} -func (m *SetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsReply.Size(m) -} -func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo - -type GetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } -func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsRequest) ProtoMessage() {} -func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} -} -func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) -} -func (m *GetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsRequest.Size(m) -} -func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo - -func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type GetSocketOptionsReply struct { - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } -func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsReply) ProtoMessage() {} -func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} -} -func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) -} -func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) -} -func (m *GetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsReply.Size(m) -} -func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo - -func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type ConnectRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) -} -func (dst *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(dst, src) -} -func (m *ConnectRequest) XXX_Size() int { - return xxx_messageInfo_ConnectRequest.Size(m) -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -const Default_ConnectRequest_TimeoutSeconds float64 = -1 - -func (m *ConnectRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ConnectRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *ConnectRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ConnectRequest_TimeoutSeconds -} - -type ConnectReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectReply) Reset() { *m = ConnectReply{} } -func (m *ConnectReply) String() string { return proto.CompactTextString(m) } -func (*ConnectReply) ProtoMessage() {} -func (*ConnectReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} -} - -var extRange_ConnectReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ConnectReply -} -func (m *ConnectReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectReply.Unmarshal(m, b) -} -func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) -} -func (dst *ConnectReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectReply.Merge(dst, src) -} -func (m *ConnectReply) XXX_Size() int { - return xxx_messageInfo_ConnectReply.Size(m) -} -func (m *ConnectReply) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectReply proto.InternalMessageInfo - -func (m *ConnectReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type ListenRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenRequest) Reset() { *m = ListenRequest{} } -func (m *ListenRequest) String() string { return proto.CompactTextString(m) } -func (*ListenRequest) ProtoMessage() {} -func (*ListenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} -} -func (m *ListenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenRequest.Unmarshal(m, b) -} -func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) -} -func (dst *ListenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenRequest.Merge(dst, src) -} -func (m *ListenRequest) XXX_Size() int { - return xxx_messageInfo_ListenRequest.Size(m) -} -func (m *ListenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenRequest proto.InternalMessageInfo - -func (m *ListenRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ListenRequest) GetBacklog() int32 { - if m != nil && m.Backlog != nil { - return *m.Backlog - } - return 0 -} - -type ListenReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenReply) Reset() { *m = ListenReply{} } -func (m *ListenReply) String() string { return proto.CompactTextString(m) } -func (*ListenReply) ProtoMessage() {} -func (*ListenReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} -} -func (m *ListenReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenReply.Unmarshal(m, b) -} -func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) -} -func (dst *ListenReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenReply.Merge(dst, src) -} -func (m *ListenReply) XXX_Size() int { - return xxx_messageInfo_ListenReply.Size(m) -} -func (m *ListenReply) XXX_DiscardUnknown() { - xxx_messageInfo_ListenReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenReply proto.InternalMessageInfo - -type AcceptRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } -func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } -func (*AcceptRequest) ProtoMessage() {} -func (*AcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} -} -func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) -} -func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) -} -func (dst *AcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptRequest.Merge(dst, src) -} -func (m *AcceptRequest) XXX_Size() int { - return xxx_messageInfo_AcceptRequest.Size(m) -} -func (m *AcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo - -const Default_AcceptRequest_TimeoutSeconds float64 = -1 - -func (m *AcceptRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *AcceptRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_AcceptRequest_TimeoutSeconds -} - -type AcceptReply struct { - NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` - RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptReply) Reset() { *m = AcceptReply{} } -func (m *AcceptReply) String() string { return proto.CompactTextString(m) } -func (*AcceptReply) ProtoMessage() {} -func (*AcceptReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} -} -func (m *AcceptReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptReply.Unmarshal(m, b) -} -func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) -} -func (dst *AcceptReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptReply.Merge(dst, src) -} -func (m *AcceptReply) XXX_Size() int { - return xxx_messageInfo_AcceptReply.Size(m) -} -func (m *AcceptReply) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptReply proto.InternalMessageInfo - -func (m *AcceptReply) GetNewSocketDescriptor() []byte { - if m != nil { - return m.NewSocketDescriptor - } - return nil -} - -func (m *AcceptReply) GetRemoteAddress() *AddressPort { - if m != nil { - return m.RemoteAddress - } - return nil -} - -type ShutDownRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` - SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } -func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } -func (*ShutDownRequest) ProtoMessage() {} -func (*ShutDownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} -} -func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) -} -func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) -} -func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownRequest.Merge(dst, src) -} -func (m *ShutDownRequest) XXX_Size() int { - return xxx_messageInfo_ShutDownRequest.Size(m) -} -func (m *ShutDownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo - -func (m *ShutDownRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ShutDownRequest) GetHow() ShutDownRequest_How { - if m != nil && m.How != nil { - return *m.How - } - return ShutDownRequest_SOCKET_SHUT_RD -} - -func (m *ShutDownRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return 0 -} - -type ShutDownReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } -func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } -func (*ShutDownReply) ProtoMessage() {} -func (*ShutDownReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} -} -func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) -} -func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) -} -func (dst *ShutDownReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownReply.Merge(dst, src) -} -func (m *ShutDownReply) XXX_Size() int { - return xxx_messageInfo_ShutDownReply.Size(m) -} -func (m *ShutDownReply) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo - -type CloseRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} -} -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (dst *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(dst, src) -} -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo - -const Default_CloseRequest_SendOffset int64 = -1 - -func (m *CloseRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CloseRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return Default_CloseRequest_SendOffset -} - -type CloseReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseReply) Reset() { *m = CloseReply{} } -func (m *CloseReply) String() string { return proto.CompactTextString(m) } -func (*CloseReply) ProtoMessage() {} -func (*CloseReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} -} -func (m *CloseReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseReply.Unmarshal(m, b) -} -func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) -} -func (dst *CloseReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseReply.Merge(dst, src) -} -func (m *CloseReply) XXX_Size() int { - return xxx_messageInfo_CloseReply.Size(m) -} -func (m *CloseReply) XXX_DiscardUnknown() { - xxx_messageInfo_CloseReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseReply proto.InternalMessageInfo - -type SendRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` - StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` - SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} -} -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (dst *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(dst, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -const Default_SendRequest_Flags int32 = 0 -const Default_SendRequest_TimeoutSeconds float64 = -1 - -func (m *SendRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SendRequest) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SendRequest) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *SendRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_SendRequest_Flags -} - -func (m *SendRequest) GetSendTo() *AddressPort { - if m != nil { - return m.SendTo - } - return nil -} - -func (m *SendRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_SendRequest_TimeoutSeconds -} - -type SendReply struct { - DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendReply) Reset() { *m = SendReply{} } -func (m *SendReply) String() string { return proto.CompactTextString(m) } -func (*SendReply) ProtoMessage() {} -func (*SendReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} -} -func (m *SendReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendReply.Unmarshal(m, b) -} -func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) -} -func (dst *SendReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendReply.Merge(dst, src) -} -func (m *SendReply) XXX_Size() int { - return xxx_messageInfo_SendReply.Size(m) -} -func (m *SendReply) XXX_DiscardUnknown() { - xxx_messageInfo_SendReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SendReply proto.InternalMessageInfo - -func (m *SendReply) GetDataSent() int32 { - if m != nil && m.DataSent != nil { - return *m.DataSent - } - return 0 -} - -type ReceiveRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` - Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } -func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } -func (*ReceiveRequest) ProtoMessage() {} -func (*ReceiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} -} -func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) -} -func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) -} -func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveRequest.Merge(dst, src) -} -func (m *ReceiveRequest) XXX_Size() int { - return xxx_messageInfo_ReceiveRequest.Size(m) -} -func (m *ReceiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo - -const Default_ReceiveRequest_Flags int32 = 0 -const Default_ReceiveRequest_TimeoutSeconds float64 = -1 - -func (m *ReceiveRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ReceiveRequest) GetDataSize() int32 { - if m != nil && m.DataSize != nil { - return *m.DataSize - } - return 0 -} - -func (m *ReceiveRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_ReceiveRequest_Flags -} - -func (m *ReceiveRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ReceiveRequest_TimeoutSeconds -} - -type ReceiveReply struct { - StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` - BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } -func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } -func (*ReceiveReply) ProtoMessage() {} -func (*ReceiveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} -} -func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) -} -func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) -} -func (dst *ReceiveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveReply.Merge(dst, src) -} -func (m *ReceiveReply) XXX_Size() int { - return xxx_messageInfo_ReceiveReply.Size(m) -} -func (m *ReceiveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo - -func (m *ReceiveReply) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *ReceiveReply) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ReceiveReply) GetReceivedFrom() *AddressPort { - if m != nil { - return m.ReceivedFrom - } - return nil -} - -func (m *ReceiveReply) GetBufferSize() int32 { - if m != nil && m.BufferSize != nil { - return *m.BufferSize - } - return 0 -} - -type PollEvent struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` - ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollEvent) Reset() { *m = PollEvent{} } -func (m *PollEvent) String() string { return proto.CompactTextString(m) } -func (*PollEvent) ProtoMessage() {} -func (*PollEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} -} -func (m *PollEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollEvent.Unmarshal(m, b) -} -func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) -} -func (dst *PollEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollEvent.Merge(dst, src) -} -func (m *PollEvent) XXX_Size() int { - return xxx_messageInfo_PollEvent.Size(m) -} -func (m *PollEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PollEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PollEvent proto.InternalMessageInfo - -func (m *PollEvent) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *PollEvent) GetRequestedEvents() int32 { - if m != nil && m.RequestedEvents != nil { - return *m.RequestedEvents - } - return 0 -} - -func (m *PollEvent) GetObservedEvents() int32 { - if m != nil && m.ObservedEvents != nil { - return *m.ObservedEvents - } - return 0 -} - -type PollRequest struct { - Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollRequest) Reset() { *m = PollRequest{} } -func (m *PollRequest) String() string { return proto.CompactTextString(m) } -func (*PollRequest) ProtoMessage() {} -func (*PollRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} -} -func (m *PollRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollRequest.Unmarshal(m, b) -} -func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) -} -func (dst *PollRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollRequest.Merge(dst, src) -} -func (m *PollRequest) XXX_Size() int { - return xxx_messageInfo_PollRequest.Size(m) -} -func (m *PollRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PollRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PollRequest proto.InternalMessageInfo - -const Default_PollRequest_TimeoutSeconds float64 = -1 - -func (m *PollRequest) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -func (m *PollRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_PollRequest_TimeoutSeconds -} - -type PollReply struct { - Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollReply) Reset() { *m = PollReply{} } -func (m *PollReply) String() string { return proto.CompactTextString(m) } -func (*PollReply) ProtoMessage() {} -func (*PollReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} -} -func (m *PollReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollReply.Unmarshal(m, b) -} -func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) -} -func (dst *PollReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollReply.Merge(dst, src) -} -func (m *PollReply) XXX_Size() int { - return xxx_messageInfo_PollReply.Size(m) -} -func (m *PollReply) XXX_DiscardUnknown() { - xxx_messageInfo_PollReply.DiscardUnknown(m) -} - -var xxx_messageInfo_PollReply proto.InternalMessageInfo - -func (m *PollReply) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -type ResolveRequest struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } -func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveRequest) ProtoMessage() {} -func (*ResolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} -} -func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) -} -func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) -} -func (dst *ResolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveRequest.Merge(dst, src) -} -func (m *ResolveRequest) XXX_Size() int { - return xxx_messageInfo_ResolveRequest.Size(m) -} -func (m *ResolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo - -func (m *ResolveRequest) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { - if m != nil { - return m.AddressFamilies - } - return nil -} - -type ResolveReply struct { - PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` - Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveReply) Reset() { *m = ResolveReply{} } -func (m *ResolveReply) String() string { return proto.CompactTextString(m) } -func (*ResolveReply) ProtoMessage() {} -func (*ResolveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} -} -func (m *ResolveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveReply.Unmarshal(m, b) -} -func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) -} -func (dst *ResolveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveReply.Merge(dst, src) -} -func (m *ResolveReply) XXX_Size() int { - return xxx_messageInfo_ResolveReply.Size(m) -} -func (m *ResolveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveReply proto.InternalMessageInfo - -func (m *ResolveReply) GetPackedAddress() [][]byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *ResolveReply) GetCanonicalName() string { - if m != nil && m.CanonicalName != nil { - return *m.CanonicalName - } - return "" -} - -func (m *ResolveReply) GetAliases() []string { - if m != nil { - return m.Aliases - } - return nil -} - -func init() { - proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") - proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") - proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") - proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") - proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") - proto.RegisterType((*BindReply)(nil), "appengine.BindReply") - proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") - proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") - proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") - proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") - proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") - proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") - proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") - proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") - proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") - proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") - proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") - proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") - proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") - proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") - proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") - proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") - proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") - proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") - proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") - proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") - proto.RegisterType((*SendReply)(nil), "appengine.SendReply") - proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") - proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") - proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") - proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") - proto.RegisterType((*PollReply)(nil), "appengine.PollReply") - proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") - proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) -} - -var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ - // 3088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, - 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, - 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, - 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, - 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, - 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, - 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, - 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, - 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, - 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, - 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, - 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, - 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, - 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, - 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, - 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, - 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, - 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, - 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, - 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, - 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, - 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, - 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, - 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, - 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, - 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, - 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, - 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, - 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, - 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, - 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, - 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, - 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, - 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, - 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, - 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, - 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, - 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, - 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, - 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, - 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, - 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, - 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, - 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, - 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, - 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, - 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, - 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, - 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, - 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, - 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, - 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, - 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, - 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, - 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, - 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, - 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, - 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, - 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, - 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, - 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, - 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, - 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, - 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, - 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, - 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, - 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, - 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, - 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, - 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, - 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, - 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, - 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, - 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, - 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, - 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, - 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, - 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, - 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, - 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, - 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, - 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, - 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, - 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, - 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, - 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, - 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, - 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, - 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, - 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, - 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, - 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, - 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, - 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, - 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, - 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, - 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, - 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, - 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, - 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, - 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, - 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, - 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, - 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, - 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, - 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, - 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, - 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, - 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, - 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, - 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, - 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, - 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, - 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, - 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, - 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, - 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, - 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, - 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, - 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, - 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, - 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, - 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, - 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, - 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, - 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, - 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, - 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, - 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, - 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, - 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, - 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, - 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, - 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, - 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, - 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, - 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, - 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, - 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, - 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, - 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, - 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, - 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, - 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, - 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, - 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, - 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, - 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, - 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, - 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, - 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, - 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, - 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, - 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, - 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, - 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, - 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, - 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, - 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, - 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, - 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, - 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, - 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, - 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, - 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, - 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, - 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, - 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, - 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, - 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, - 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, - 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, - 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, - 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, - 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, - 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, - 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, - 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, - 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, - 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, - 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, - 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, - 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, - 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, - 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, - 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, - 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, - 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, - 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, - 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, - 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, - 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, - 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto deleted file mode 100644 index 2fcc7953dc..0000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +++ /dev/null @@ -1,460 +0,0 @@ -syntax = "proto2"; -option go_package = "socket"; - -package appengine; - -message RemoteSocketServiceError { - enum ErrorCode { - SYSTEM_ERROR = 1; - GAI_ERROR = 2; - FAILURE = 4; - PERMISSION_DENIED = 5; - INVALID_REQUEST = 6; - SOCKET_CLOSED = 7; - } - - enum SystemError { - option allow_alias = true; - - SYS_SUCCESS = 0; - SYS_EPERM = 1; - SYS_ENOENT = 2; - SYS_ESRCH = 3; - SYS_EINTR = 4; - SYS_EIO = 5; - SYS_ENXIO = 6; - SYS_E2BIG = 7; - SYS_ENOEXEC = 8; - SYS_EBADF = 9; - SYS_ECHILD = 10; - SYS_EAGAIN = 11; - SYS_EWOULDBLOCK = 11; - SYS_ENOMEM = 12; - SYS_EACCES = 13; - SYS_EFAULT = 14; - SYS_ENOTBLK = 15; - SYS_EBUSY = 16; - SYS_EEXIST = 17; - SYS_EXDEV = 18; - SYS_ENODEV = 19; - SYS_ENOTDIR = 20; - SYS_EISDIR = 21; - SYS_EINVAL = 22; - SYS_ENFILE = 23; - SYS_EMFILE = 24; - SYS_ENOTTY = 25; - SYS_ETXTBSY = 26; - SYS_EFBIG = 27; - SYS_ENOSPC = 28; - SYS_ESPIPE = 29; - SYS_EROFS = 30; - SYS_EMLINK = 31; - SYS_EPIPE = 32; - SYS_EDOM = 33; - SYS_ERANGE = 34; - SYS_EDEADLK = 35; - SYS_EDEADLOCK = 35; - SYS_ENAMETOOLONG = 36; - SYS_ENOLCK = 37; - SYS_ENOSYS = 38; - SYS_ENOTEMPTY = 39; - SYS_ELOOP = 40; - SYS_ENOMSG = 42; - SYS_EIDRM = 43; - SYS_ECHRNG = 44; - SYS_EL2NSYNC = 45; - SYS_EL3HLT = 46; - SYS_EL3RST = 47; - SYS_ELNRNG = 48; - SYS_EUNATCH = 49; - SYS_ENOCSI = 50; - SYS_EL2HLT = 51; - SYS_EBADE = 52; - SYS_EBADR = 53; - SYS_EXFULL = 54; - SYS_ENOANO = 55; - SYS_EBADRQC = 56; - SYS_EBADSLT = 57; - SYS_EBFONT = 59; - SYS_ENOSTR = 60; - SYS_ENODATA = 61; - SYS_ETIME = 62; - SYS_ENOSR = 63; - SYS_ENONET = 64; - SYS_ENOPKG = 65; - SYS_EREMOTE = 66; - SYS_ENOLINK = 67; - SYS_EADV = 68; - SYS_ESRMNT = 69; - SYS_ECOMM = 70; - SYS_EPROTO = 71; - SYS_EMULTIHOP = 72; - SYS_EDOTDOT = 73; - SYS_EBADMSG = 74; - SYS_EOVERFLOW = 75; - SYS_ENOTUNIQ = 76; - SYS_EBADFD = 77; - SYS_EREMCHG = 78; - SYS_ELIBACC = 79; - SYS_ELIBBAD = 80; - SYS_ELIBSCN = 81; - SYS_ELIBMAX = 82; - SYS_ELIBEXEC = 83; - SYS_EILSEQ = 84; - SYS_ERESTART = 85; - SYS_ESTRPIPE = 86; - SYS_EUSERS = 87; - SYS_ENOTSOCK = 88; - SYS_EDESTADDRREQ = 89; - SYS_EMSGSIZE = 90; - SYS_EPROTOTYPE = 91; - SYS_ENOPROTOOPT = 92; - SYS_EPROTONOSUPPORT = 93; - SYS_ESOCKTNOSUPPORT = 94; - SYS_EOPNOTSUPP = 95; - SYS_ENOTSUP = 95; - SYS_EPFNOSUPPORT = 96; - SYS_EAFNOSUPPORT = 97; - SYS_EADDRINUSE = 98; - SYS_EADDRNOTAVAIL = 99; - SYS_ENETDOWN = 100; - SYS_ENETUNREACH = 101; - SYS_ENETRESET = 102; - SYS_ECONNABORTED = 103; - SYS_ECONNRESET = 104; - SYS_ENOBUFS = 105; - SYS_EISCONN = 106; - SYS_ENOTCONN = 107; - SYS_ESHUTDOWN = 108; - SYS_ETOOMANYREFS = 109; - SYS_ETIMEDOUT = 110; - SYS_ECONNREFUSED = 111; - SYS_EHOSTDOWN = 112; - SYS_EHOSTUNREACH = 113; - SYS_EALREADY = 114; - SYS_EINPROGRESS = 115; - SYS_ESTALE = 116; - SYS_EUCLEAN = 117; - SYS_ENOTNAM = 118; - SYS_ENAVAIL = 119; - SYS_EISNAM = 120; - SYS_EREMOTEIO = 121; - SYS_EDQUOT = 122; - SYS_ENOMEDIUM = 123; - SYS_EMEDIUMTYPE = 124; - SYS_ECANCELED = 125; - SYS_ENOKEY = 126; - SYS_EKEYEXPIRED = 127; - SYS_EKEYREVOKED = 128; - SYS_EKEYREJECTED = 129; - SYS_EOWNERDEAD = 130; - SYS_ENOTRECOVERABLE = 131; - SYS_ERFKILL = 132; - } - - optional int32 system_error = 1 [default=0]; - optional string error_detail = 2; -} - -message AddressPort { - required int32 port = 1; - optional bytes packed_address = 2; - - optional string hostname_hint = 3; -} - - - -message CreateSocketRequest { - enum SocketFamily { - IPv4 = 1; - IPv6 = 2; - } - - enum SocketProtocol { - TCP = 1; - UDP = 2; - } - - required SocketFamily family = 1; - required SocketProtocol protocol = 2; - - repeated SocketOption socket_options = 3; - - optional AddressPort proxy_external_ip = 4; - - optional int32 listen_backlog = 5 [default=0]; - - optional AddressPort remote_ip = 6; - - optional string app_id = 9; - - optional int64 project_id = 10; -} - -message CreateSocketReply { - optional string socket_descriptor = 1; - - optional AddressPort server_address = 3; - - optional AddressPort proxy_external_ip = 4; - - extensions 1000 to max; -} - - - -message BindRequest { - required string socket_descriptor = 1; - required AddressPort proxy_external_ip = 2; -} - -message BindReply { - optional AddressPort proxy_external_ip = 1; -} - - - -message GetSocketNameRequest { - required string socket_descriptor = 1; -} - -message GetSocketNameReply { - optional AddressPort proxy_external_ip = 2; -} - - - -message GetPeerNameRequest { - required string socket_descriptor = 1; -} - -message GetPeerNameReply { - optional AddressPort peer_ip = 2; -} - - -message SocketOption { - - enum SocketOptionLevel { - SOCKET_SOL_IP = 0; - SOCKET_SOL_SOCKET = 1; - SOCKET_SOL_TCP = 6; - SOCKET_SOL_UDP = 17; - } - - enum SocketOptionName { - option allow_alias = true; - - SOCKET_SO_DEBUG = 1; - SOCKET_SO_REUSEADDR = 2; - SOCKET_SO_TYPE = 3; - SOCKET_SO_ERROR = 4; - SOCKET_SO_DONTROUTE = 5; - SOCKET_SO_BROADCAST = 6; - SOCKET_SO_SNDBUF = 7; - SOCKET_SO_RCVBUF = 8; - SOCKET_SO_KEEPALIVE = 9; - SOCKET_SO_OOBINLINE = 10; - SOCKET_SO_LINGER = 13; - SOCKET_SO_RCVTIMEO = 20; - SOCKET_SO_SNDTIMEO = 21; - - SOCKET_IP_TOS = 1; - SOCKET_IP_TTL = 2; - SOCKET_IP_HDRINCL = 3; - SOCKET_IP_OPTIONS = 4; - - SOCKET_TCP_NODELAY = 1; - SOCKET_TCP_MAXSEG = 2; - SOCKET_TCP_CORK = 3; - SOCKET_TCP_KEEPIDLE = 4; - SOCKET_TCP_KEEPINTVL = 5; - SOCKET_TCP_KEEPCNT = 6; - SOCKET_TCP_SYNCNT = 7; - SOCKET_TCP_LINGER2 = 8; - SOCKET_TCP_DEFER_ACCEPT = 9; - SOCKET_TCP_WINDOW_CLAMP = 10; - SOCKET_TCP_INFO = 11; - SOCKET_TCP_QUICKACK = 12; - } - - required SocketOptionLevel level = 1; - required SocketOptionName option = 2; - required bytes value = 3; -} - - -message SetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message SetSocketOptionsReply { -} - -message GetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message GetSocketOptionsReply { - repeated SocketOption options = 2; -} - - -message ConnectRequest { - required string socket_descriptor = 1; - required AddressPort remote_ip = 2; - optional double timeout_seconds = 3 [default=-1]; -} - -message ConnectReply { - optional AddressPort proxy_external_ip = 1; - - extensions 1000 to max; -} - - -message ListenRequest { - required string socket_descriptor = 1; - required int32 backlog = 2; -} - -message ListenReply { -} - - -message AcceptRequest { - required string socket_descriptor = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message AcceptReply { - optional bytes new_socket_descriptor = 2; - optional AddressPort remote_address = 3; -} - - - -message ShutDownRequest { - enum How { - SOCKET_SHUT_RD = 1; - SOCKET_SHUT_WR = 2; - SOCKET_SHUT_RDWR = 3; - } - required string socket_descriptor = 1; - required How how = 2; - required int64 send_offset = 3; -} - -message ShutDownReply { -} - - - -message CloseRequest { - required string socket_descriptor = 1; - optional int64 send_offset = 2 [default=-1]; -} - -message CloseReply { -} - - - -message SendRequest { - required string socket_descriptor = 1; - required bytes data = 2 [ctype=CORD]; - required int64 stream_offset = 3; - optional int32 flags = 4 [default=0]; - optional AddressPort send_to = 5; - optional double timeout_seconds = 6 [default=-1]; -} - -message SendReply { - optional int32 data_sent = 1; -} - - -message ReceiveRequest { - enum Flags { - MSG_OOB = 1; - MSG_PEEK = 2; - } - required string socket_descriptor = 1; - required int32 data_size = 2; - optional int32 flags = 3 [default=0]; - optional double timeout_seconds = 5 [default=-1]; -} - -message ReceiveReply { - optional int64 stream_offset = 2; - optional bytes data = 3 [ctype=CORD]; - optional AddressPort received_from = 4; - optional int32 buffer_size = 5; -} - - - -message PollEvent { - - enum PollEventFlag { - SOCKET_POLLNONE = 0; - SOCKET_POLLIN = 1; - SOCKET_POLLPRI = 2; - SOCKET_POLLOUT = 4; - SOCKET_POLLERR = 8; - SOCKET_POLLHUP = 16; - SOCKET_POLLNVAL = 32; - SOCKET_POLLRDNORM = 64; - SOCKET_POLLRDBAND = 128; - SOCKET_POLLWRNORM = 256; - SOCKET_POLLWRBAND = 512; - SOCKET_POLLMSG = 1024; - SOCKET_POLLREMOVE = 4096; - SOCKET_POLLRDHUP = 8192; - }; - - required string socket_descriptor = 1; - required int32 requested_events = 2; - required int32 observed_events = 3; -} - -message PollRequest { - repeated PollEvent events = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message PollReply { - repeated PollEvent events = 2; -} - -message ResolveRequest { - required string name = 1; - repeated CreateSocketRequest.SocketFamily address_families = 2; -} - -message ResolveReply { - enum ErrorCode { - SOCKET_EAI_ADDRFAMILY = 1; - SOCKET_EAI_AGAIN = 2; - SOCKET_EAI_BADFLAGS = 3; - SOCKET_EAI_FAIL = 4; - SOCKET_EAI_FAMILY = 5; - SOCKET_EAI_MEMORY = 6; - SOCKET_EAI_NODATA = 7; - SOCKET_EAI_NONAME = 8; - SOCKET_EAI_SERVICE = 9; - SOCKET_EAI_SOCKTYPE = 10; - SOCKET_EAI_SYSTEM = 11; - SOCKET_EAI_BADHINTS = 12; - SOCKET_EAI_PROTOCOL = 13; - SOCKET_EAI_OVERFLOW = 14; - SOCKET_EAI_MAX = 15; - }; - - repeated bytes packed_address = 2; - optional string canonical_name = 3; - repeated string aliases = 4; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae6538..2ae8ab9fa4 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go index 21860ca082..6f169be487 100644 --- a/vendor/google.golang.org/appengine/namespace.go +++ b/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go deleted file mode 100644 index 3de46df826..0000000000 --- a/vendor/google.golang.org/appengine/socket/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package socket provides outbound network sockets. -// -// This package is only required in the classic App Engine environment. -// Applications running only in App Engine "flexible environment" should -// use the standard library's net package. -package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go deleted file mode 100644 index 0ad50e2d36..0000000000 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package socket - -import ( - "fmt" - "io" - "net" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" - - pb "google.golang.org/appengine/internal/socket" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - return DialTimeout(ctx, protocol, addr, 0) -} - -var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ - pb.CreateSocketRequest_IPv4, - pb.CreateSocketRequest_IPv6, -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. - if timeout > 0 { - var cancel context.CancelFunc - dialCtx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) - } - - var prot pb.CreateSocketRequest_SocketProtocol - switch protocol { - case "tcp": - prot = pb.CreateSocketRequest_TCP - case "udp": - prot = pb.CreateSocketRequest_UDP - default: - return nil, fmt.Errorf("socket: unknown protocol %q", protocol) - } - - packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - if len(packedAddrs) == 0 { - return nil, fmt.Errorf("no addresses for %q", host) - } - - packedAddr := packedAddrs[0] // use first address - fam := pb.CreateSocketRequest_IPv4 - if len(packedAddr) == net.IPv6len { - fam = pb.CreateSocketRequest_IPv6 - } - - req := &pb.CreateSocketRequest{ - Family: fam.Enum(), - Protocol: prot.Enum(), - RemoteIp: &pb.AddressPort{ - Port: proto.Int32(int32(port)), - PackedAddress: packedAddr, - }, - } - if resolved { - req.RemoteIp.HostnameHint = &host - } - res := &pb.CreateSocketReply{} - if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { - return nil, err - } - - return &Conn{ - ctx: ctx, - desc: res.GetSocketDescriptor(), - prot: prot, - local: res.ProxyExternalIp, - remote: req.RemoteIp, - }, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - packedAddrs, _, err := resolve(ctx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - addrs = make([]net.IP, len(packedAddrs)) - for i, pa := range packedAddrs { - addrs[i] = net.IP(pa) - } - return addrs, nil -} - -func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { - // Check if it's an IP address. - if ip := net.ParseIP(host); ip != nil { - if ip := ip.To4(); ip != nil { - return [][]byte{ip}, false, nil - } - return [][]byte{ip}, false, nil - } - - req := &pb.ResolveRequest{ - Name: &host, - AddressFamilies: fams, - } - res := &pb.ResolveReply{} - if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { - // XXX: need to map to pb.ResolveReply_ErrorCode? - return nil, false, err - } - return res.PackedAddress, true, nil -} - -// withDeadline is like context.WithDeadline, except it ignores the zero deadline. -func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { - if deadline.IsZero() { - return parent, func() {} - } - return context.WithDeadline(parent, deadline) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - ctx context.Context - desc string - offset int64 - - prot pb.CreateSocketRequest_SocketProtocol - local, remote *pb.AddressPort - - readDeadline, writeDeadline time.Time // optional -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - cn.ctx = ctx -} - -func (cn *Conn) Read(b []byte) (n int, err error) { - const maxRead = 1 << 20 - if len(b) > maxRead { - b = b[:maxRead] - } - - req := &pb.ReceiveRequest{ - SocketDescriptor: &cn.desc, - DataSize: proto.Int32(int32(len(b))), - } - res := &pb.ReceiveReply{} - if !cn.readDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) - defer cancel() - if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { - return 0, err - } - if len(res.Data) == 0 { - return 0, io.EOF - } - if len(res.Data) > len(b) { - return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) - } - return copy(b, res.Data), nil -} - -func (cn *Conn) Write(b []byte) (n int, err error) { - const lim = 1 << 20 // max per chunk - - for n < len(b) { - chunk := b[n:] - if len(chunk) > lim { - chunk = chunk[:lim] - } - - req := &pb.SendRequest{ - SocketDescriptor: &cn.desc, - Data: chunk, - StreamOffset: &cn.offset, - } - res := &pb.SendReply{} - if !cn.writeDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) - defer cancel() - if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { - // assume zero bytes were sent in this RPC - break - } - n += int(res.GetDataSent()) - cn.offset += int64(res.GetDataSent()) - } - - return -} - -func (cn *Conn) Close() error { - req := &pb.CloseRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.CloseReply{} - if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { - return err - } - cn.desc = "CLOSED" - return nil -} - -func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { - if ap == nil { - return nil - } - switch prot { - case pb.CreateSocketRequest_TCP: - return &net.TCPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - case pb.CreateSocketRequest_UDP: - return &net.UDPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - } - panic("unknown protocol " + prot.String()) -} - -func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } -func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } - -func (cn *Conn) SetDeadline(t time.Time) error { - cn.readDeadline = t - cn.writeDeadline = t - return nil -} - -func (cn *Conn) SetReadDeadline(t time.Time) error { - cn.readDeadline = t - return nil -} - -func (cn *Conn) SetWriteDeadline(t time.Time) error { - cn.writeDeadline = t - return nil -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - req := &pb.GetSocketNameRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.GetSocketNameReply{} - return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go deleted file mode 100644 index c804169a1c..0000000000 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package socket - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - conn, err := net.DialTimeout(protocol, addr, timeout) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - return net.LookupIP(host) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - net.Conn -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - // This function is not required in App Engine "flexible environment". -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - // This function is not required in App Engine "flexible environment". - return nil -} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go index 05642a992a..fcf3ad0a58 100644 --- a/vendor/google.golang.org/appengine/timeout.go +++ b/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46e..0000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045b..0000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d90..6c0d72418d 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/genproto/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go deleted file mode 100644 index 191bea48c8..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/api/annotations.proto - -package annotations - -import ( - reflect "reflect" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", - Filename: "google/api/annotations.proto", - }, -} - -// Extension fields to descriptorpb.MethodOptions. -var ( - // See `HttpRule`. - // - // optional google.api.HttpRule http = 72295728; - E_Http = &file_google_api_annotations_proto_extTypes[0] -) - -var File_google_api_annotations_proto protoreflect.FileDescriptor - -var file_google_api_annotations_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, - 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_google_api_annotations_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*HttpRule)(nil), // 1: google.api.HttpRule -} -var file_google_api_annotations_proto_depIdxs = []int32{ - 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.http:type_name -> google.api.HttpRule - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_annotations_proto_init() } -func file_google_api_annotations_proto_init() { - if File_google_api_annotations_proto != nil { - return - } - file_google_api_http_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_annotations_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_annotations_proto_goTypes, - DependencyIndexes: file_google_api_annotations_proto_depIdxs, - ExtensionInfos: file_google_api_annotations_proto_extTypes, - }.Build() - File_google_api_annotations_proto = out.File - file_google_api_annotations_proto_rawDesc = nil - file_google_api_annotations_proto_goTypes = nil - file_google_api_annotations_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go deleted file mode 100644 index 83774fbcbe..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ /dev/null @@ -1,1780 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.12 -// source: google/api/client.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - api "google.golang.org/genproto/googleapis/api" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - durationpb "google.golang.org/protobuf/types/known/durationpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The organization for which the client libraries are being published. -// Affects the url where generated docs are published, etc. -type ClientLibraryOrganization int32 - -const ( - // Not useful. - ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 - // Google Cloud Platform Org. - ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 - // Ads (Advertising) Org. - ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 - // Photos Org. - ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 - // Street View Org. - ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 - // Shopping Org. - ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5 - // Geo Org. - ClientLibraryOrganization_GEO ClientLibraryOrganization = 6 - // Generative AI - https://developers.generativeai.google - ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7 -) - -// Enum value maps for ClientLibraryOrganization. -var ( - ClientLibraryOrganization_name = map[int32]string{ - 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", - 1: "CLOUD", - 2: "ADS", - 3: "PHOTOS", - 4: "STREET_VIEW", - 5: "SHOPPING", - 6: "GEO", - 7: "GENERATIVE_AI", - } - ClientLibraryOrganization_value = map[string]int32{ - "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, - "CLOUD": 1, - "ADS": 2, - "PHOTOS": 3, - "STREET_VIEW": 4, - "SHOPPING": 5, - "GEO": 6, - "GENERATIVE_AI": 7, - } -) - -func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { - p := new(ClientLibraryOrganization) - *p = x - return p -} - -func (x ClientLibraryOrganization) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_client_proto_enumTypes[0].Descriptor() -} - -func (ClientLibraryOrganization) Type() protoreflect.EnumType { - return &file_google_api_client_proto_enumTypes[0] -} - -func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ClientLibraryOrganization.Descriptor instead. -func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{0} -} - -// To where should client libraries be published? -type ClientLibraryDestination int32 - -const ( - // Client libraries will neither be generated nor published to package - // managers. - ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 - // Generate the client library in a repo under github.com/googleapis, - // but don't publish it to package managers. - ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 - // Publish the library to package managers like nuget.org and npmjs.com. - ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 -) - -// Enum value maps for ClientLibraryDestination. -var ( - ClientLibraryDestination_name = map[int32]string{ - 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", - 10: "GITHUB", - 20: "PACKAGE_MANAGER", - } - ClientLibraryDestination_value = map[string]int32{ - "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, - "GITHUB": 10, - "PACKAGE_MANAGER": 20, - } -) - -func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { - p := new(ClientLibraryDestination) - *p = x - return p -} - -func (x ClientLibraryDestination) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_client_proto_enumTypes[1].Descriptor() -} - -func (ClientLibraryDestination) Type() protoreflect.EnumType { - return &file_google_api_client_proto_enumTypes[1] -} - -func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ClientLibraryDestination.Descriptor instead. -func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{1} -} - -// Required information for every language. -type CommonLanguageSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Link to automatically generated reference documentation. Example: - // https://cloud.google.com/nodejs/docs/reference/asset/latest - // - // Deprecated: Do not use. - ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` - // The destination where API teams want this client library to be published. - Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` -} - -func (x *CommonLanguageSettings) Reset() { - *x = CommonLanguageSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CommonLanguageSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CommonLanguageSettings) ProtoMessage() {} - -func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. -func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{0} -} - -// Deprecated: Do not use. -func (x *CommonLanguageSettings) GetReferenceDocsUri() string { - if x != nil { - return x.ReferenceDocsUri - } - return "" -} - -func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { - if x != nil { - return x.Destinations - } - return nil -} - -// Details about how and where to publish client libraries. -type ClientLibrarySettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Version of the API to apply these settings to. This is the full protobuf - // package for the API, ending in the version element. - // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Launch stage of this version of the API. - LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` - // When using transport=rest, the client request will encode enums as - // numbers rather than strings. - RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` - // Settings for legacy Java features, supported in the Service YAML. - JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` - // Settings for C++ client libraries. - CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` - // Settings for PHP client libraries. - PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` - // Settings for Python client libraries. - PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` - // Settings for Node client libraries. - NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` - // Settings for .NET client libraries. - DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` - // Settings for Ruby client libraries. - RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` - // Settings for Go client libraries. - GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` -} - -func (x *ClientLibrarySettings) Reset() { - *x = ClientLibrarySettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientLibrarySettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientLibrarySettings) ProtoMessage() {} - -func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. -func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{1} -} - -func (x *ClientLibrarySettings) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { - if x != nil { - return x.LaunchStage - } - return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED -} - -func (x *ClientLibrarySettings) GetRestNumericEnums() bool { - if x != nil { - return x.RestNumericEnums - } - return false -} - -func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { - if x != nil { - return x.JavaSettings - } - return nil -} - -func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { - if x != nil { - return x.CppSettings - } - return nil -} - -func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { - if x != nil { - return x.PhpSettings - } - return nil -} - -func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { - if x != nil { - return x.PythonSettings - } - return nil -} - -func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { - if x != nil { - return x.NodeSettings - } - return nil -} - -func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { - if x != nil { - return x.DotnetSettings - } - return nil -} - -func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { - if x != nil { - return x.RubySettings - } - return nil -} - -func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { - if x != nil { - return x.GoSettings - } - return nil -} - -// This message configures the settings for publishing [Google Cloud Client -// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) -// generated from the service config. -type Publishing struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of API method settings, e.g. the behavior for methods that use the - // long-running operation pattern. - MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` - // Link to a *public* URI where users can report issues. Example: - // https://issuetracker.google.com/issues/new?component=190865&template=1161103 - NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` - // Link to product home page. Example: - // https://cloud.google.com/asset-inventory/docs/overview - DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` - // Used as a tracking tag when collecting data about the APIs developer - // relations artifacts like docs, packages delivered to package managers, - // etc. Example: "speech". - ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` - // GitHub label to apply to issues and pull requests opened for this API. - GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` - // GitHub teams to be added to CODEOWNERS in the directory in GitHub - // containing source code for the client libraries for this API. - CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` - // A prefix used in sample code when demarking regions to be included in - // documentation. - DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` - // For whom the client library is being published. - Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` - // Client library settings. If the same version string appears multiple - // times in this list, then the last one wins. Settings from earlier - // settings with the same version string are discarded. - LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` - // Optional link to proto reference documentation. Example: - // https://cloud.google.com/pubsub/lite/docs/reference/rpc - ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"` -} - -func (x *Publishing) Reset() { - *x = Publishing{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Publishing) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Publishing) ProtoMessage() {} - -func (x *Publishing) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. -func (*Publishing) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{2} -} - -func (x *Publishing) GetMethodSettings() []*MethodSettings { - if x != nil { - return x.MethodSettings - } - return nil -} - -func (x *Publishing) GetNewIssueUri() string { - if x != nil { - return x.NewIssueUri - } - return "" -} - -func (x *Publishing) GetDocumentationUri() string { - if x != nil { - return x.DocumentationUri - } - return "" -} - -func (x *Publishing) GetApiShortName() string { - if x != nil { - return x.ApiShortName - } - return "" -} - -func (x *Publishing) GetGithubLabel() string { - if x != nil { - return x.GithubLabel - } - return "" -} - -func (x *Publishing) GetCodeownerGithubTeams() []string { - if x != nil { - return x.CodeownerGithubTeams - } - return nil -} - -func (x *Publishing) GetDocTagPrefix() string { - if x != nil { - return x.DocTagPrefix - } - return "" -} - -func (x *Publishing) GetOrganization() ClientLibraryOrganization { - if x != nil { - return x.Organization - } - return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED -} - -func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { - if x != nil { - return x.LibrarySettings - } - return nil -} - -func (x *Publishing) GetProtoReferenceDocumentationUri() string { - if x != nil { - return x.ProtoReferenceDocumentationUri - } - return "" -} - -// Settings for Java client libraries. -type JavaSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The package name to use in Java. Clobbers the java_package option - // set in the protobuf. This should be used **only** by APIs - // who have already set the language_settings.java.package_name" field - // in gapic.yaml. API teams should use the protobuf java_package option - // where possible. - // - // Example of a YAML configuration:: - // - // publishing: - // java_settings: - // library_package: com.google.cloud.pubsub.v1 - LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` - // Configure the Java class name to use instead of the service's for its - // corresponding generated GAPIC client. Keys are fully-qualified - // service names as they appear in the protobuf (including the full - // the language_settings.java.interface_names" field in gapic.yaml. API - // teams should otherwise use the service name as it appears in the - // protobuf. - // - // Example of a YAML configuration:: - // - // publishing: - // java_settings: - // service_class_names: - // - google.pubsub.v1.Publisher: TopicAdmin - // - google.pubsub.v1.Subscriber: SubscriptionAdmin - ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *JavaSettings) Reset() { - *x = JavaSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JavaSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JavaSettings) ProtoMessage() {} - -func (x *JavaSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. -func (*JavaSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{3} -} - -func (x *JavaSettings) GetLibraryPackage() string { - if x != nil { - return x.LibraryPackage - } - return "" -} - -func (x *JavaSettings) GetServiceClassNames() map[string]string { - if x != nil { - return x.ServiceClassNames - } - return nil -} - -func (x *JavaSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Settings for C++ client libraries. -type CppSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *CppSettings) Reset() { - *x = CppSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CppSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CppSettings) ProtoMessage() {} - -func (x *CppSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. -func (*CppSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{4} -} - -func (x *CppSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Settings for Php client libraries. -type PhpSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *PhpSettings) Reset() { - *x = PhpSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PhpSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PhpSettings) ProtoMessage() {} - -func (x *PhpSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. -func (*PhpSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{5} -} - -func (x *PhpSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Settings for Python client libraries. -type PythonSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *PythonSettings) Reset() { - *x = PythonSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PythonSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PythonSettings) ProtoMessage() {} - -func (x *PythonSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. -func (*PythonSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{6} -} - -func (x *PythonSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Settings for Node client libraries. -type NodeSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *NodeSettings) Reset() { - *x = NodeSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeSettings) ProtoMessage() {} - -func (x *NodeSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. -func (*NodeSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{7} -} - -func (x *NodeSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Settings for Dotnet client libraries. -type DotnetSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` - // Map from original service names to renamed versions. - // This is used when the default generated types - // would cause a naming conflict. (Neither name is - // fully-qualified.) - // Example: Subscriber to SubscriberServiceApi. - RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Map from full resource types to the effective short name - // for the resource. This is used when otherwise resource - // named from different services would cause naming collisions. - // Example entry: - // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" - RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // List of full resource types to ignore during generation. - // This is typically used for API-specific Location resources, - // which should be handled by the generator as if they were actually - // the common Location resources. - // Example entry: "documentai.googleapis.com/Location" - IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"` - // Namespaces which must be aliased in snippets due to - // a known (but non-generator-predictable) naming collision - ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"` - // Method signatures (in the form "service.method(signature)") - // which are provided separately, so shouldn't be generated. - // Snippets *calling* these methods are still generated, however. - HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"` -} - -func (x *DotnetSettings) Reset() { - *x = DotnetSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DotnetSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DotnetSettings) ProtoMessage() {} - -func (x *DotnetSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. -func (*DotnetSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{8} -} - -func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -func (x *DotnetSettings) GetRenamedServices() map[string]string { - if x != nil { - return x.RenamedServices - } - return nil -} - -func (x *DotnetSettings) GetRenamedResources() map[string]string { - if x != nil { - return x.RenamedResources - } - return nil -} - -func (x *DotnetSettings) GetIgnoredResources() []string { - if x != nil { - return x.IgnoredResources - } - return nil -} - -func (x *DotnetSettings) GetForcedNamespaceAliases() []string { - if x != nil { - return x.ForcedNamespaceAliases - } - return nil -} - -func (x *DotnetSettings) GetHandwrittenSignatures() []string { - if x != nil { - return x.HandwrittenSignatures - } - return nil -} - -// Settings for Ruby client libraries. -type RubySettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *RubySettings) Reset() { - *x = RubySettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RubySettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RubySettings) ProtoMessage() {} - -func (x *RubySettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. -func (*RubySettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{9} -} - -func (x *RubySettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Settings for Go client libraries. -type GoSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Some settings. - Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -} - -func (x *GoSettings) Reset() { - *x = GoSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GoSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GoSettings) ProtoMessage() {} - -func (x *GoSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. -func (*GoSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{10} -} - -func (x *GoSettings) GetCommon() *CommonLanguageSettings { - if x != nil { - return x.Common - } - return nil -} - -// Describes the generator configuration for a method. -type MethodSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The fully qualified name of the method, for which the options below apply. - // This is used to find the method to apply the options. - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - // Describes settings to use for long-running operations when generating - // API methods for RPCs. Complements RPCs that use the annotations in - // google/longrunning/operations.proto. - // - // Example of a YAML configuration:: - // - // publishing: - // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes - LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` -} - -func (x *MethodSettings) Reset() { - *x = MethodSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodSettings) ProtoMessage() {} - -func (x *MethodSettings) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. -func (*MethodSettings) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{11} -} - -func (x *MethodSettings) GetSelector() string { - if x != nil { - return x.Selector - } - return "" -} - -func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { - if x != nil { - return x.LongRunning - } - return nil -} - -// Describes settings to use when generating API methods that use the -// long-running operation pattern. -// All default values below are from those used in the client library -// generators (e.g. -// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). -type MethodSettings_LongRunning struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Initial delay after which the first poll request will be made. - // Default value: 5 seconds. - InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` - // Multiplier to gradually increase delay between subsequent polls until it - // reaches max_poll_delay. - // Default value: 1.5. - PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` - // Maximum time between two subsequent poll requests. - // Default value: 45 seconds. - MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` - // Total polling timeout. - // Default value: 5 minutes. - TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` -} - -func (x *MethodSettings_LongRunning) Reset() { - *x = MethodSettings_LongRunning{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodSettings_LongRunning) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodSettings_LongRunning) ProtoMessage() {} - -func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. -func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { - return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} -} - -func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { - if x != nil { - return x.InitialPollDelay - } - return nil -} - -func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { - if x != nil { - return x.PollDelayMultiplier - } - return 0 -} - -func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { - if x != nil { - return x.MaxPollDelay - } - return nil -} - -func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { - if x != nil { - return x.TotalPollTimeout - } - return nil -} - -var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: ([]string)(nil), - Field: 1051, - Name: "google.api.method_signature", - Tag: "bytes,1051,rep,name=method_signature", - Filename: "google/api/client.proto", - }, - { - ExtendedType: (*descriptorpb.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1049, - Name: "google.api.default_host", - Tag: "bytes,1049,opt,name=default_host", - Filename: "google/api/client.proto", - }, - { - ExtendedType: (*descriptorpb.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1050, - Name: "google.api.oauth_scopes", - Tag: "bytes,1050,opt,name=oauth_scopes", - Filename: "google/api/client.proto", - }, -} - -// Extension fields to descriptorpb.MethodOptions. -var ( - // A definition of a client library method signature. - // - // In client libraries, each proto RPC corresponds to one or more methods - // which the end user is able to call, and calls the underlying RPC. - // Normally, this method receives a single argument (a struct or instance - // corresponding to the RPC request object). Defining this field will - // add one or more overloads providing flattened or simpler method signatures - // in some languages. - // - // The fields on the method signature are provided as a comma-separated - // string. - // - // For example, the proto RPC and annotation: - // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } - // - // Would add the following Java overload (in addition to the method accepting - // the request object): - // - // public final Subscription createSubscription(String name, String topic) - // - // The following backwards-compatibility guidelines apply: - // - // - Adding this annotation to an unannotated method is backwards - // compatible. - // - Adding this annotation to a method which already has existing - // method signature annotations is backwards compatible if and only if - // the new method signature annotation is last in the sequence. - // - Modifying or removing an existing method signature annotation is - // a breaking change. - // - Re-ordering existing method signature annotations is a breaking - // change. - // - // repeated string method_signature = 1051; - E_MethodSignature = &file_google_api_client_proto_extTypes[0] -) - -// Extension fields to descriptorpb.ServiceOptions. -var ( - // The hostname for this service. - // This should be specified with no prefix or protocol. - // - // Example: - // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } - // - // optional string default_host = 1049; - E_DefaultHost = &file_google_api_client_proto_extTypes[1] - // OAuth scopes needed for the client. - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } - // - // If there is more than one scope, use a comma-separated string: - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } - // - // optional string oauth_scopes = 1050; - E_OauthScopes = &file_google_api_client_proto_extTypes[2] -) - -var File_google_api_client_proto protoreflect.FileDescriptor - -var file_google_api_client_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, - 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, - 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, - 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, - 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, - 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, - 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, - 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, - 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, - 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, - 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, - 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, - 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, - 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, - 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, - 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, - 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, - 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, - 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, - 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, - 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, - 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, - 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, - 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_client_proto_rawDescOnce sync.Once - file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc -) - -func file_google_api_client_proto_rawDescGZIP() []byte { - file_google_api_client_proto_rawDescOnce.Do(func() { - file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) - }) - return file_google_api_client_proto_rawDescData -} - -var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) -var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions -} -var file_google_api_client_proto_depIdxs = []int32{ - 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 28, // [28:31] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name -} - -func init() { file_google_api_client_proto_init() } -func file_google_api_client_proto_init() { - if File_google_api_client_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonLanguageSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientLibrarySettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Publishing); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JavaSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CppSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PhpSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PythonSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DotnetSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RubySettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GoSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodSettings_LongRunning); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 2, - NumMessages: 16, - NumExtensions: 3, - NumServices: 0, - }, - GoTypes: file_google_api_client_proto_goTypes, - DependencyIndexes: file_google_api_client_proto_depIdxs, - EnumInfos: file_google_api_client_proto_enumTypes, - MessageInfos: file_google_api_client_proto_msgTypes, - ExtensionInfos: file_google_api_client_proto_extTypes, - }.Build() - File_google_api_client_proto = out.File - file_google_api_client_proto_rawDesc = nil - file_google_api_client_proto_goTypes = nil - file_google_api_client_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go deleted file mode 100644 index dbe2e2d0c6..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/api/field_behavior.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -type FieldBehavior int32 - -const ( - // Conventional default for enums. Do not use this. - FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - FieldBehavior_OPTIONAL FieldBehavior = 1 - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - FieldBehavior_REQUIRED FieldBehavior = 2 - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - FieldBehavior_INPUT_ONLY FieldBehavior = 4 - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - FieldBehavior_IMMUTABLE FieldBehavior = 5 - // Denotes that a (repeated) field is an unordered list. - // This indicates that the service may provide the elements of the list - // in any arbitrary order, rather than the order the user originally - // provided. Additionally, the list's order may or may not be stable. - FieldBehavior_UNORDERED_LIST FieldBehavior = 6 - // Denotes that this field returns a non-empty default value if not set. - // This indicates that if the user provides the empty value in a request, - // a non-empty value will be returned. The user will not be aware of what - // non-empty value to expect. - FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 -) - -// Enum value maps for FieldBehavior. -var ( - FieldBehavior_name = map[int32]string{ - 0: "FIELD_BEHAVIOR_UNSPECIFIED", - 1: "OPTIONAL", - 2: "REQUIRED", - 3: "OUTPUT_ONLY", - 4: "INPUT_ONLY", - 5: "IMMUTABLE", - 6: "UNORDERED_LIST", - 7: "NON_EMPTY_DEFAULT", - } - FieldBehavior_value = map[string]int32{ - "FIELD_BEHAVIOR_UNSPECIFIED": 0, - "OPTIONAL": 1, - "REQUIRED": 2, - "OUTPUT_ONLY": 3, - "INPUT_ONLY": 4, - "IMMUTABLE": 5, - "UNORDERED_LIST": 6, - "NON_EMPTY_DEFAULT": 7, - } -) - -func (x FieldBehavior) Enum() *FieldBehavior { - p := new(FieldBehavior) - *p = x - return p -} - -func (x FieldBehavior) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() -} - -func (FieldBehavior) Type() protoreflect.EnumType { - return &file_google_api_field_behavior_proto_enumTypes[0] -} - -func (x FieldBehavior) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FieldBehavior.Descriptor instead. -func (FieldBehavior) EnumDescriptor() ([]byte, []int) { - return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} -} - -var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: ([]FieldBehavior)(nil), - Field: 1052, - Name: "google.api.field_behavior", - Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", - Filename: "google/api/field_behavior.proto", - }, -} - -// Extension fields to descriptorpb.FieldOptions. -var ( - // A designation of a specific field behavior (required, output only, etc.) - // in protobuf messages. - // - // Examples: - // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; - // - // repeated google.api.FieldBehavior field_behavior = 1052; - E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] -) - -var File_google_api_field_behavior_proto protoreflect.FileDescriptor - -var file_google_api_field_behavior_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, - 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, - 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, - 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, - 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, - 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, - 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, - 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_field_behavior_proto_rawDescOnce sync.Once - file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc -) - -func file_google_api_field_behavior_proto_rawDescGZIP() []byte { - file_google_api_field_behavior_proto_rawDescOnce.Do(func() { - file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) - }) - return file_google_api_field_behavior_proto_rawDescData -} - -var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_behavior_proto_goTypes = []interface{}{ - (FieldBehavior)(0), // 0: google.api.FieldBehavior - (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions -} -var file_google_api_field_behavior_proto_depIdxs = []int32{ - 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions - 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_field_behavior_proto_init() } -func file_google_api_field_behavior_proto_init() { - if File_google_api_field_behavior_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_field_behavior_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_field_behavior_proto_goTypes, - DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, - EnumInfos: file_google_api_field_behavior_proto_enumTypes, - ExtensionInfos: file_google_api_field_behavior_proto_extTypes, - }.Build() - File_google_api_field_behavior_proto = out.File - file_google_api_field_behavior_proto_rawDesc = nil - file_google_api_field_behavior_proto_goTypes = nil - file_google_api_field_behavior_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go deleted file mode 100644 index 8a0e1c345b..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ /dev/null @@ -1,782 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/api/http.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -type Http struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` -} - -func (x *Http) Reset() { - *x = Http{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http) ProtoMessage() {} - -func (x *Http) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http.ProtoReflect.Descriptor instead. -func (*Http) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{0} -} - -func (x *Http) GetRules() []*HttpRule { - if x != nil { - return x.Rules - } - return nil -} - -func (x *Http) GetFullyDecodeReservedExpansion() bool { - if x != nil { - return x.FullyDecodeReservedExpansion - } - return false -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They -// are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL -// query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP -// request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -type HttpRule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax - // details. - Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are assignable to Pattern: - // - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` -} - -func (x *HttpRule) Reset() { - *x = HttpRule{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpRule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpRule) ProtoMessage() {} - -func (x *HttpRule) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. -func (*HttpRule) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{1} -} - -func (x *HttpRule) GetSelector() string { - if x != nil { - return x.Selector - } - return "" -} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (x *HttpRule) GetGet() string { - if x, ok := x.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (x *HttpRule) GetPut() string { - if x, ok := x.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (x *HttpRule) GetPost() string { - if x, ok := x.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (x *HttpRule) GetDelete() string { - if x, ok := x.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (x *HttpRule) GetPatch() string { - if x, ok := x.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (x *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := x.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (x *HttpRule) GetBody() string { - if x != nil { - return x.Body - } - return "" -} - -func (x *HttpRule) GetResponseBody() string { - if x != nil { - return x.ResponseBody - } - return "" -} - -func (x *HttpRule) GetAdditionalBindings() []*HttpRule { - if x != nil { - return x.AdditionalBindings - } - return nil -} - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` -} - -type HttpRule_Put struct { - // Maps to HTTP PUT. Used for replacing a resource. - Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` -} - -type HttpRule_Post struct { - // Maps to HTTP POST. Used for creating a resource or performing an action. - Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` -} - -type HttpRule_Delete struct { - // Maps to HTTP DELETE. Used for deleting a resource. - Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` -} - -type HttpRule_Patch struct { - // Maps to HTTP PATCH. Used for updating a resource. - Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` -} - -type HttpRule_Custom struct { - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} - -func (*HttpRule_Put) isHttpRule_Pattern() {} - -func (*HttpRule_Post) isHttpRule_Pattern() {} - -func (*HttpRule_Delete) isHttpRule_Pattern() {} - -func (*HttpRule_Patch) isHttpRule_Pattern() {} - -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *CustomHttpPattern) Reset() { - *x = CustomHttpPattern{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_http_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomHttpPattern) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomHttpPattern) ProtoMessage() {} - -func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { - mi := &file_google_api_http_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { - return file_google_api_http_proto_rawDescGZIP(), []int{2} -} - -func (x *CustomHttpPattern) GetKind() string { - if x != nil { - return x.Kind - } - return "" -} - -func (x *CustomHttpPattern) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -var File_google_api_http_proto protoreflect.FileDescriptor - -var file_google_api_http_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, - 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, - 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, - 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, - 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, - 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, - 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, - 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_http_proto_rawDescOnce sync.Once - file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc -) - -func file_google_api_http_proto_rawDescGZIP() []byte { - file_google_api_http_proto_rawDescOnce.Do(func() { - file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) - }) - return file_google_api_http_proto_rawDescData -} - -var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_google_api_http_proto_goTypes = []interface{}{ - (*Http)(nil), // 0: google.api.Http - (*HttpRule)(nil), // 1: google.api.HttpRule - (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern -} -var file_google_api_http_proto_depIdxs = []int32{ - 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule - 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern - 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_google_api_http_proto_init() } -func file_google_api_http_proto_init() { - if File_google_api_http_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpRule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomHttpPattern); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_http_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_http_proto_goTypes, - DependencyIndexes: file_google_api_http_proto_depIdxs, - MessageInfos: file_google_api_http_proto_msgTypes, - }.Build() - File_google_api_http_proto = out.File - file_google_api_http_proto_rawDesc = nil - file_google_api_http_proto_goTypes = nil - file_google_api_http_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go deleted file mode 100644 index bbcc12d29c..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ /dev/null @@ -1,655 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/api/resource.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A description of the historical or future-looking state of the -// resource pattern. -type ResourceDescriptor_History int32 - -const ( - // The "unset" value. - ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 - // The resource originally had one pattern and launched as such, and - // additional patterns were added later. - ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 - // The resource has one pattern, but the API owner expects to add more - // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - // that from being necessary once there are multiple patterns.) - ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 -) - -// Enum value maps for ResourceDescriptor_History. -var ( - ResourceDescriptor_History_name = map[int32]string{ - 0: "HISTORY_UNSPECIFIED", - 1: "ORIGINALLY_SINGLE_PATTERN", - 2: "FUTURE_MULTI_PATTERN", - } - ResourceDescriptor_History_value = map[string]int32{ - "HISTORY_UNSPECIFIED": 0, - "ORIGINALLY_SINGLE_PATTERN": 1, - "FUTURE_MULTI_PATTERN": 2, - } -) - -func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { - p := new(ResourceDescriptor_History) - *p = x - return p -} - -func (x ResourceDescriptor_History) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_resource_proto_enumTypes[0].Descriptor() -} - -func (ResourceDescriptor_History) Type() protoreflect.EnumType { - return &file_google_api_resource_proto_enumTypes[0] -} - -func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ResourceDescriptor_History.Descriptor instead. -func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} -} - -// A flag representing a specific style that a resource claims to conform to. -type ResourceDescriptor_Style int32 - -const ( - // The unspecified value. Do not use. - ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0 - // This resource is intended to be "declarative-friendly". - // - // Declarative-friendly resources must be more strictly consistent, and - // setting this to true communicates to tools that this resource should - // adhere to declarative-friendly expectations. - // - // Note: This is used by the API linter (linter.aip.dev) to enable - // additional checks. - ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1 -) - -// Enum value maps for ResourceDescriptor_Style. -var ( - ResourceDescriptor_Style_name = map[int32]string{ - 0: "STYLE_UNSPECIFIED", - 1: "DECLARATIVE_FRIENDLY", - } - ResourceDescriptor_Style_value = map[string]int32{ - "STYLE_UNSPECIFIED": 0, - "DECLARATIVE_FRIENDLY": 1, - } -) - -func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style { - p := new(ResourceDescriptor_Style) - *p = x - return p -} - -func (x ResourceDescriptor_Style) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_resource_proto_enumTypes[1].Descriptor() -} - -func (ResourceDescriptor_Style) Type() protoreflect.EnumType { - return &file_google_api_resource_proto_enumTypes[1] -} - -func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ResourceDescriptor_Style.Descriptor instead. -func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1} -} - -// A simple descriptor of a resource type. -// -// ResourceDescriptor annotates a resource message (either by means of a -// protobuf annotation or use in the service config), and associates the -// resource's schema, the resource type, and the pattern of the resource name. -// -// Example: -// -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// -// Sometimes, resources have multiple patterns, typically because they can -// live under multiple parents. -// -// Example: -// -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -type ResourceDescriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The resource type. It must be in the format of - // {service_name}/{resource_type_kind}. The `resource_type_kind` must be - // singular and must not include version numbers. - // - // Example: `storage.googleapis.com/Bucket` - // - // The value of the resource_type_kind must follow the regular expression - // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and - // should use PascalCase (UpperCamelCase). The maximum number of - // characters allowed for the `resource_type_kind` is 100. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Optional. The relative resource name pattern associated with this resource - // type. The DNS prefix of the full resource name shouldn't be specified here. - // - // The path pattern must follow the syntax, which aligns with HTTP binding - // syntax: - // - // Template = Segment { "/" Segment } ; - // Segment = LITERAL | Variable ; - // Variable = "{" LITERAL "}" ; - // - // Examples: - // - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" - // - // The components in braces correspond to the IDs for each resource in the - // hierarchy. It is expected that, if multiple patterns are provided, - // the same component name (e.g. "project") refers to IDs of the same - // type of resource. - Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` - // Optional. The field on the resource that designates the resource name - // field. If omitted, this is assumed to be "name". - NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` - // Optional. The historical or future-looking state of the resource pattern. - // - // Example: - // - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` - // The plural name used in the resource name and permission names, such as - // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // - // Note: The plural form is required even for singleton resources. See - // https://aip.dev/156 - Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` - // The same concept of the `singular` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // Such as "project" for the `resourcemanager.googleapis.com/Project` type. - Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` - // Style flag(s) for this resource. - // These indicate that a resource is expected to conform to a given - // style. See the specific style flags for additional information. - Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"` -} - -func (x *ResourceDescriptor) Reset() { - *x = ResourceDescriptor{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_resource_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceDescriptor) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceDescriptor) ProtoMessage() {} - -func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { - mi := &file_google_api_resource_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. -func (*ResourceDescriptor) Descriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{0} -} - -func (x *ResourceDescriptor) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ResourceDescriptor) GetPattern() []string { - if x != nil { - return x.Pattern - } - return nil -} - -func (x *ResourceDescriptor) GetNameField() string { - if x != nil { - return x.NameField - } - return "" -} - -func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { - if x != nil { - return x.History - } - return ResourceDescriptor_HISTORY_UNSPECIFIED -} - -func (x *ResourceDescriptor) GetPlural() string { - if x != nil { - return x.Plural - } - return "" -} - -func (x *ResourceDescriptor) GetSingular() string { - if x != nil { - return x.Singular - } - return "" -} - -func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style { - if x != nil { - return x.Style - } - return nil -} - -// Defines a proto annotation that describes a string field that refers to -// an API resource. -type ResourceReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The resource type that the annotated field references. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } - // - // Occasionally, a field may reference an arbitrary resource. In this case, - // APIs use the special value * in their resource reference. - // - // Example: - // - // message GetIamPolicyRequest { - // string resource = 2 [(google.api.resource_reference) = { - // type: "*" - // }]; - // } - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The resource type of a child collection that the annotated field - // references. This is useful for annotating the `parent` field that - // doesn't have a fixed resource type. - // - // Example: - // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` -} - -func (x *ResourceReference) Reset() { - *x = ResourceReference{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_resource_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceReference) ProtoMessage() {} - -func (x *ResourceReference) ProtoReflect() protoreflect.Message { - mi := &file_google_api_resource_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. -func (*ResourceReference) Descriptor() ([]byte, []int) { - return file_google_api_resource_proto_rawDescGZIP(), []int{1} -} - -func (x *ResourceReference) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ResourceReference) GetChildType() string { - if x != nil { - return x.ChildType - } - return "" -} - -var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: (*ResourceReference)(nil), - Field: 1055, - Name: "google.api.resource_reference", - Tag: "bytes,1055,opt,name=resource_reference", - Filename: "google/api/resource.proto", - }, - { - ExtendedType: (*descriptorpb.FileOptions)(nil), - ExtensionType: ([]*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource_definition", - Tag: "bytes,1053,rep,name=resource_definition", - Filename: "google/api/resource.proto", - }, - { - ExtendedType: (*descriptorpb.MessageOptions)(nil), - ExtensionType: (*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource", - Tag: "bytes,1053,opt,name=resource", - Filename: "google/api/resource.proto", - }, -} - -// Extension fields to descriptorpb.FieldOptions. -var ( - // An annotation that describes a resource reference, see - // [ResourceReference][]. - // - // optional google.api.ResourceReference resource_reference = 1055; - E_ResourceReference = &file_google_api_resource_proto_extTypes[0] -) - -// Extension fields to descriptorpb.FileOptions. -var ( - // An annotation that describes a resource definition without a corresponding - // message; see [ResourceDescriptor][]. - // - // repeated google.api.ResourceDescriptor resource_definition = 1053; - E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] -) - -// Extension fields to descriptorpb.MessageOptions. -var ( - // An annotation that describes a resource definition, see - // [ResourceDescriptor][]. - // - // optional google.api.ResourceDescriptor resource = 1053; - E_Resource = &file_google_api_resource_proto_extTypes[2] -) - -var File_google_api_resource_proto protoreflect.FileDescriptor - -var file_google_api_resource_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, - 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, - 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, - 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, - 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, - 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49, - 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c, - 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, - 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c, - 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05, - 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45, - 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, - 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_google_api_resource_proto_rawDescOnce sync.Once - file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc -) - -func file_google_api_resource_proto_rawDescGZIP() []byte { - file_google_api_resource_proto_rawDescOnce.Do(func() { - file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) - }) - return file_google_api_resource_proto_rawDescData -} - -var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_api_resource_proto_goTypes = []interface{}{ - (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History - (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style - (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor - (*ResourceReference)(nil), // 3: google.api.ResourceReference - (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions - (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions - (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions -} -var file_google_api_resource_proto_depIdxs = []int32{ - 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History - 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style - 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions - 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions - 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions - 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference - 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor - 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 5, // [5:8] is the sub-list for extension type_name - 2, // [2:5] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_google_api_resource_proto_init() } -func file_google_api_resource_proto_init() { - if File_google_api_resource_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceDescriptor); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_resource_proto_rawDesc, - NumEnums: 2, - NumMessages: 2, - NumExtensions: 3, - NumServices: 0, - }, - GoTypes: file_google_api_resource_proto_goTypes, - DependencyIndexes: file_google_api_resource_proto_depIdxs, - EnumInfos: file_google_api_resource_proto_enumTypes, - MessageInfos: file_google_api_resource_proto_msgTypes, - ExtensionInfos: file_google_api_resource_proto_extTypes, - }.Build() - File_google_api_resource_proto = out.File - file_google_api_resource_proto_rawDesc = nil - file_google_api_resource_proto_goTypes = nil - file_google_api_resource_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go deleted file mode 100644 index 9a9ae04c29..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/api/routing.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the routing information that should be sent along with the request -// in the form of routing header. -// **NOTE:** All service configuration rules follow the "last one wins" order. -// -// The examples below will apply to an RPC which has the following request type: -// -// Message Definition: -// -// message Request { -// // The name of the Table -// // Values can be of the following formats: -// // - `projects//tables/` -// // - `projects//instances//tables/
` -// // - `region//zones//tables/
` -// string table_name = 1; -// -// // This value specifies routing for replication. -// // It can be in the following formats: -// // - `profiles/` -// // - a legacy `profile_id` that can be any string -// string app_profile_id = 2; -// } -// -// Example message: -// -// { -// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, -// app_profile_id: profiles/prof_qux -// } -// -// The routing header consists of one or multiple key-value pairs. Every key -// and value must be percent-encoded, and joined together in the format of -// `key1=value1&key2=value2`. -// In the examples below I am skipping the percent-encoding for readablity. -// -// # Example 1 -// -// Extracting a field from the request to put into the routing header -// unchanged, with the key equal to the field name. -// -// annotation: -// -// option (google.api.routing) = { -// // Take the `app_profile_id`. -// routing_parameters { -// field: "app_profile_id" -// } -// }; -// -// result: -// -// x-goog-request-params: app_profile_id=profiles/prof_qux -// -// # Example 2 -// -// Extracting a field from the request to put into the routing header -// unchanged, with the key different from the field name. -// -// annotation: -// -// option (google.api.routing) = { -// // Take the `app_profile_id`, but name it `routing_id` in the header. -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; -// -// result: -// -// x-goog-request-params: routing_id=profiles/prof_qux -// -// # Example 3 -// -// Extracting a field from the request to put into the routing -// header, while matching a path template syntax on the field's value. -// -// NB: it is more useful to send nothing than to send garbage for the purpose -// of dynamic routing, since garbage pollutes cache. Thus the matching. -// -// # Sub-example 3a -// -// The field matches the template. -// -// annotation: -// -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with project-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; -// -// result: -// -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz -// -// # Sub-example 3b -// -// The field does not match the template. -// -// annotation: -// -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with region-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// }; -// -// result: -// -// -// -// # Sub-example 3c -// -// Multiple alternative conflictingly named path templates are -// specified. The one that matches is used to construct the header. -// -// annotation: -// -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed, whether -// // using the region- or projects-based syntax. -// -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; -// -// result: -// -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz -// -// # Example 4 -// -// Extracting a single routing header key-value pair by matching a -// template syntax on (a part of) a single request field. -// -// annotation: -// -// option (google.api.routing) = { -// // Take just the project id from the `table_name` field. -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// }; -// -// result: -// -// x-goog-request-params: routing_id=projects/proj_foo -// -// # Example 5 -// -// Extracting a single routing header key-value pair by matching -// several conflictingly named path templates on (parts of) a single request -// field. The last template to match "wins" the conflict. -// -// annotation: -// -// option (google.api.routing) = { -// // If the `table_name` does not have instances information, -// // take just the project id for routing. -// // Otherwise take project + instance. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*/instances/*}/**" -// } -// }; -// -// result: -// -// x-goog-request-params: -// routing_id=projects/proj_foo/instances/instance_bar -// -// # Example 6 -// -// Extracting multiple routing header key-value pairs by matching -// several non-conflicting path templates on (parts of) a single request field. -// -// # Sub-example 6a -// -// Make the templates strict, so that if the `table_name` does not -// have an instance information, nothing is sent. -// -// annotation: -// -// option (google.api.routing) = { -// // The routing code needs two keys instead of one composite -// // but works only for the tables with the "project-instance" name -// // syntax. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/instances/*/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; -// -// result: -// -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar -// -// # Sub-example 6b -// -// Make the templates loose, so that if the `table_name` does not -// have an instance information, just the project id part is sent. -// -// annotation: -// -// option (google.api.routing) = { -// // The routing code wants two keys instead of one composite -// // but will work with just the `project_id` for tables without -// // an instance in the `table_name`. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; -// -// result (is the same as 6a for our example message because it has the instance -// information): -// -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar -// -// # Example 7 -// -// Extracting multiple routing header key-value pairs by matching -// several path templates on multiple request fields. -// -// NB: note that here there is no way to specify sending nothing if one of the -// fields does not match its template. E.g. if the `table_name` is in the wrong -// format, the `project_id` will not be sent, but the `routing_id` will be. -// The backend routing code has to be aware of that and be prepared to not -// receive a full complement of keys if it expects multiple. -// -// annotation: -// -// option (google.api.routing) = { -// // The routing needs both `project_id` and `routing_id` -// // (from the `app_profile_id` field) for routing. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; -// -// result: -// -// x-goog-request-params: -// project_id=projects/proj_foo&routing_id=profiles/prof_qux -// -// # Example 8 -// -// Extracting a single routing header key-value pair by matching -// several conflictingly named path templates on several request fields. The -// last template to match "wins" the conflict. -// -// annotation: -// -// option (google.api.routing) = { -// // The `routing_id` can be a project id or a region id depending on -// // the table name format, but only if the `app_profile_id` is not set. -// // If `app_profile_id` is set it should be used instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=regions/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; -// -// result: -// -// x-goog-request-params: routing_id=profiles/prof_qux -// -// # Example 9 -// -// Bringing it all together. -// -// annotation: -// -// option (google.api.routing) = { -// // For routing both `table_location` and a `routing_id` are needed. -// // -// // table_location can be either an instance id or a region+zone id. -// // -// // For `routing_id`, take the value of `app_profile_id` -// // - If it's in the format `profiles/`, send -// // just the `` part. -// // - If it's any other literal, send it as is. -// // If the `app_profile_id` is empty, and the `table_name` starts with -// // the project_id, send that instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{table_location=instances/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_location=regions/*/zones/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "profiles/{routing_id=*}" -// } -// }; -// -// result: -// -// x-goog-request-params: -// table_location=instances/instance_bar&routing_id=prof_qux -type RoutingRule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A collection of Routing Parameter specifications. - // **NOTE:** If multiple Routing Parameters describe the same key - // (via the `path_template` field or via the `field` field when - // `path_template` is not provided), "last one wins" rule - // determines which Parameter gets used. - // See the examples for more details. - RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"` -} - -func (x *RoutingRule) Reset() { - *x = RoutingRule{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_routing_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RoutingRule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RoutingRule) ProtoMessage() {} - -func (x *RoutingRule) ProtoReflect() protoreflect.Message { - mi := &file_google_api_routing_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead. -func (*RoutingRule) Descriptor() ([]byte, []int) { - return file_google_api_routing_proto_rawDescGZIP(), []int{0} -} - -func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter { - if x != nil { - return x.RoutingParameters - } - return nil -} - -// A projection from an input message to the GRPC or REST header. -type RoutingParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A request field to extract the header key-value pair from. - Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` - // A pattern matching the key-value field. Optional. - // If not specified, the whole field specified in the `field` field will be - // taken as value, and its name used as key. If specified, it MUST contain - // exactly one named segment (along with any number of unnamed segments) The - // pattern will be matched over the field specified in the `field` field, then - // if the match is successful: - // - the name of the single named segment will be used as a header name, - // - the match value of the segment will be used as a header value; - // if the match is NOT successful, nothing will be sent. - // - // Example: - // - // -- This is a field in the request message - // | that the header value will be extracted from. - // | - // | -- This is the key name in the - // | | routing header. - // V | - // field: "table_name" v - // path_template: "projects/*/{table_location=instances/*}/tables/*" - // ^ ^ - // | | - // In the {} brackets is the pattern that -- | - // specifies what to extract from the | - // field as a value to be sent. | - // | - // The string in the field must match the whole pattern -- - // before brackets, inside brackets, after brackets. - // - // When looking at this specific example, we can see that: - // - A key-value pair with the key `table_location` - // and the value matching `instances/*` should be added - // to the x-goog-request-params routing header. - // - The value is extracted from the request message's `table_name` field - // if it matches the full pattern specified: - // `projects/*/instances/*/tables/*`. - // - // **NB:** If the `path_template` field is not provided, the key name is - // equal to the field name, and the whole field should be sent as a value. - // This makes the pattern for the field and the value functionally equivalent - // to `**`, and the configuration - // - // { - // field: "table_name" - // } - // - // is a functionally equivalent shorthand to: - // - // { - // field: "table_name" - // path_template: "{table_name=**}" - // } - // - // See Example 1 for more details. - PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` -} - -func (x *RoutingParameter) Reset() { - *x = RoutingParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_routing_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RoutingParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RoutingParameter) ProtoMessage() {} - -func (x *RoutingParameter) ProtoReflect() protoreflect.Message { - mi := &file_google_api_routing_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead. -func (*RoutingParameter) Descriptor() ([]byte, []int) { - return file_google_api_routing_proto_rawDescGZIP(), []int{1} -} - -func (x *RoutingParameter) GetField() string { - if x != nil { - return x.Field - } - return "" -} - -func (x *RoutingParameter) GetPathTemplate() string { - if x != nil { - return x.PathTemplate - } - return "" -} - -var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*RoutingRule)(nil), - Field: 72295729, - Name: "google.api.routing", - Tag: "bytes,72295729,opt,name=routing", - Filename: "google/api/routing.proto", - }, -} - -// Extension fields to descriptorpb.MethodOptions. -var ( - // See RoutingRule. - // - // optional google.api.RoutingRule routing = 72295729; - E_Routing = &file_google_api_routing_proto_extTypes[0] -) - -var File_google_api_routing_proto protoreflect.FileDescriptor - -var file_google_api_routing_proto_rawDesc = []byte{ - 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, - 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1, - 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_routing_proto_rawDescOnce sync.Once - file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc -) - -func file_google_api_routing_proto_rawDescGZIP() []byte { - file_google_api_routing_proto_rawDescOnce.Do(func() { - file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData) - }) - return file_google_api_routing_proto_rawDescData -} - -var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_api_routing_proto_goTypes = []interface{}{ - (*RoutingRule)(nil), // 0: google.api.RoutingRule - (*RoutingParameter)(nil), // 1: google.api.RoutingParameter - (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions -} -var file_google_api_routing_proto_depIdxs = []int32{ - 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter - 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions - 0, // 2: google.api.routing:type_name -> google.api.RoutingRule - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_api_routing_proto_init() } -func file_google_api_routing_proto_init() { - if File_google_api_routing_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoutingRule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoutingParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_routing_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_routing_proto_goTypes, - DependencyIndexes: file_google_api_routing_proto_depIdxs, - MessageInfos: file_google_api_routing_proto_msgTypes, - ExtensionInfos: file_google_api_routing_proto_extTypes, - }.Build() - File_google_api_routing_proto = out.File - file_google_api_routing_proto_rawDesc = nil - file_google_api_routing_proto_goTypes = nil - file_google_api_routing_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go deleted file mode 100644 index 454948669d..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/api/launch_stage.proto - -package api - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The launch stage as defined by [Google Cloud Platform -// Launch Stages](https://cloud.google.com/terms/launch-stages). -type LaunchStage int32 - -const ( - // Do not use this default value. - LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 - // The feature is not yet implemented. Users can not use it. - LaunchStage_UNIMPLEMENTED LaunchStage = 6 - // Prelaunch features are hidden from users and are only visible internally. - LaunchStage_PRELAUNCH LaunchStage = 7 - // Early Access features are limited to a closed group of testers. To use - // these features, you must sign up in advance and sign a Trusted Tester - // agreement (which includes confidentiality provisions). These features may - // be unstable, changed in backward-incompatible ways, and are not - // guaranteed to be released. - LaunchStage_EARLY_ACCESS LaunchStage = 1 - // Alpha is a limited availability test for releases before they are cleared - // for widespread use. By Alpha, all significant design issues are resolved - // and we are in the process of verifying functionality. Alpha customers - // need to apply for access, agree to applicable terms, and have their - // projects allowlisted. Alpha releases don't have to be feature complete, - // no SLAs are provided, and there are no technical support obligations, but - // they will be far enough along that customers can actually use them in - // test environments or for limited-use tests -- just like they would in - // normal production cases. - LaunchStage_ALPHA LaunchStage = 2 - // Beta is the point at which we are ready to open a release for any - // customer to use. There are no SLA or technical support obligations in a - // Beta release. Products will be complete from a feature perspective, but - // may have some open outstanding issues. Beta releases are suitable for - // limited production use cases. - LaunchStage_BETA LaunchStage = 3 - // GA features are open to all developers and are considered stable and - // fully qualified for production use. - LaunchStage_GA LaunchStage = 4 - // Deprecated features are scheduled to be shut down and removed. For more - // information, see the "Deprecation Policy" section of our [Terms of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the Deprecation - // Policy](https://cloud.google.com/terms/deprecation) documentation. - LaunchStage_DEPRECATED LaunchStage = 5 -) - -// Enum value maps for LaunchStage. -var ( - LaunchStage_name = map[int32]string{ - 0: "LAUNCH_STAGE_UNSPECIFIED", - 6: "UNIMPLEMENTED", - 7: "PRELAUNCH", - 1: "EARLY_ACCESS", - 2: "ALPHA", - 3: "BETA", - 4: "GA", - 5: "DEPRECATED", - } - LaunchStage_value = map[string]int32{ - "LAUNCH_STAGE_UNSPECIFIED": 0, - "UNIMPLEMENTED": 6, - "PRELAUNCH": 7, - "EARLY_ACCESS": 1, - "ALPHA": 2, - "BETA": 3, - "GA": 4, - "DEPRECATED": 5, - } -) - -func (x LaunchStage) Enum() *LaunchStage { - p := new(LaunchStage) - *p = x - return p -} - -func (x LaunchStage) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() -} - -func (LaunchStage) Type() protoreflect.EnumType { - return &file_google_api_launch_stage_proto_enumTypes[0] -} - -func (x LaunchStage) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use LaunchStage.Descriptor instead. -func (LaunchStage) EnumDescriptor() ([]byte, []int) { - return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} -} - -var File_google_api_launch_stage_proto protoreflect.FileDescriptor - -var file_google_api_launch_stage_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, - 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, - 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, - 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, - 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, - 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, - 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, - 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, - 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, - 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, - 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_api_launch_stage_proto_rawDescOnce sync.Once - file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc -) - -func file_google_api_launch_stage_proto_rawDescGZIP() []byte { - file_google_api_launch_stage_proto_rawDescOnce.Do(func() { - file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) - }) - return file_google_api_launch_stage_proto_rawDescData -} - -var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_launch_stage_proto_goTypes = []interface{}{ - (LaunchStage)(0), // 0: google.api.LaunchStage -} -var file_google_api_launch_stage_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_api_launch_stage_proto_init() } -func file_google_api_launch_stage_proto_init() { - if File_google_api_launch_stage_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_launch_stage_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_api_launch_stage_proto_goTypes, - DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, - EnumInfos: file_google_api_launch_stage_proto_enumTypes, - }.Build() - File_google_api_launch_stage_proto = out.File - file_google_api_launch_stage_proto_rawDesc = nil - file_google_api_launch_stage_proto_goTypes = nil - file_google_api_launch_stage_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go deleted file mode 100644 index 1d3f1b5b7e..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package api - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "google.golang.org/genproto/internal" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go deleted file mode 100644 index 9fb745926a..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by aliasgen. DO NOT EDIT. - -// Package iam aliases all exported identifiers in package -// "cloud.google.com/go/iam/apiv1/iampb". -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. -// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md -// for more details. -package iam - -import ( - src "cloud.google.com/go/iam/apiv1/iampb" - grpc "google.golang.org/grpc" -) - -// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb -const ( - AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED - AuditConfigDelta_ADD = src.AuditConfigDelta_ADD - AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE - AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ - AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ - AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE - AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED - BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED - BindingDelta_ADD = src.BindingDelta_ADD - BindingDelta_REMOVE = src.BindingDelta_REMOVE -) - -// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb -var ( - AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name - AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value - AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name - AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value - BindingDelta_Action_name = src.BindingDelta_Action_name - BindingDelta_Action_value = src.BindingDelta_Action_value - File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto - File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto - File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto -) - -// Specifies the audit configuration for a service. The configuration -// determines which permission types are logged, and what identities, if any, -// are exempted from logging. An AuditConfig must have one or more -// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a -// specific service, the union of the two AuditConfigs is used for that -// service: the log_types specified in each AuditConfig are enabled, and the -// exempted_members in each AuditLogConfig are exempted. Example Policy with -// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", -// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": -// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": -// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For -// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfig = src.AuditConfig - -// One delta entry for AuditConfig. Each individual change (only one -// exempted_member in each entry) to a AuditConfig will be a separate entry. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfigDelta = src.AuditConfigDelta - -// The type of action performed on an audit configuration in a policy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfigDelta_Action = src.AuditConfigDelta_Action - -// Provides the configuration for logging a type of permissions. Example: { -// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables -// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from -// DATA_READ logging. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditLogConfig = src.AuditLogConfig - -// The list of valid permission types for which logging can be configured. -// Admin writes are always logged, and are not configurable. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditLogConfig_LogType = src.AuditLogConfig_LogType - -// Associates `members`, or principals, with a `role`. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type Binding = src.Binding - -// One delta entry for Binding. Each individual change (only one member in -// each entry) to a binding will be a separate entry. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type BindingDelta = src.BindingDelta - -// The type of action performed on a Binding in a policy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type BindingDelta_Action = src.BindingDelta_Action - -// Request message for `GetIamPolicy` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type GetIamPolicyRequest = src.GetIamPolicyRequest - -// Encapsulates settings provided to GetIamPolicy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type GetPolicyOptions = src.GetPolicyOptions - -// IAMPolicyClient is the client API for IAMPolicy service. For semantics -// around ctx use and closing/ending streaming RPCs, please refer to -// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type IAMPolicyClient = src.IAMPolicyClient - -// IAMPolicyServer is the server API for IAMPolicy service. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type IAMPolicyServer = src.IAMPolicyServer - -// An Identity and Access Management (IAM) policy, which specifies access -// controls for Google Cloud resources. A `Policy` is a collection of -// `bindings`. A `binding` binds one or more `members`, or principals, to a -// single `role`. Principals can be user accounts, service accounts, Google -// groups, and domains (such as G Suite). A `role` is a named list of -// permissions; each `role` can be an IAM predefined role or a user-created -// custom role. For some types of Google Cloud resources, a `binding` can also -// specify a `condition`, which is a logical expression that allows access to a -// resource only if the expression evaluates to `true`. A condition can add -// constraints based on attributes of the request, the resource, or both. To -// learn which resources support conditions in their IAM policies, see the [IAM -// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). -// **JSON example:** { "bindings": [ { "role": -// "roles/resourcemanager.organizationAdmin", "members": [ -// "user:mike@example.com", "group:admins@example.com", "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": -// "roles/resourcemanager.organizationViewer", "members": [ -// "user:eve@example.com" ], "condition": { "title": "expirable access", -// "description": "Does not grant access after Sep 2020", "expression": -// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": -// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - -// user:mike@example.com - group:admins@example.com - domain:google.com - -// serviceAccount:my-project-id@appspot.gserviceaccount.com role: -// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com -// role: roles/resourcemanager.organizationViewer condition: title: expirable -// access description: Does not grant access after Sep 2020 expression: -// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= -// version: 3 For a description of IAM and its features, see the [IAM -// documentation](https://cloud.google.com/iam/docs/). -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type Policy = src.Policy - -// The difference delta between two policies. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type PolicyDelta = src.PolicyDelta - -// Request message for `SetIamPolicy` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type SetIamPolicyRequest = src.SetIamPolicyRequest - -// Request message for `TestIamPermissions` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type TestIamPermissionsRequest = src.TestIamPermissionsRequest - -// Response message for `TestIamPermissions` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type TestIamPermissionsResponse = src.TestIamPermissionsResponse - -// UnimplementedIAMPolicyServer can be embedded to have forward compatible -// implementations. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer - -// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb -func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { - return src.NewIAMPolicyClient(cc) -} - -// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb -func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { - src.RegisterIAMPolicyServer(s, srv) -} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go deleted file mode 100644 index cc5d52fbcc..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/rpc/code.proto - -package code - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The canonical error codes for gRPC APIs. -// -// Sometimes multiple error codes may apply. Services should return -// the most specific error code that applies. For example, prefer -// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. -// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. -type Code int32 - -const ( - // Not an error; returned on success. - // - // HTTP Mapping: 200 OK - Code_OK Code = 0 - // The operation was cancelled, typically by the caller. - // - // HTTP Mapping: 499 Client Closed Request - Code_CANCELLED Code = 1 - // Unknown error. For example, this error may be returned when - // a `Status` value received from another address space belongs to - // an error space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - // - // HTTP Mapping: 500 Internal Server Error - Code_UNKNOWN Code = 2 - // The client specified an invalid argument. Note that this differs - // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - // - // HTTP Mapping: 400 Bad Request - Code_INVALID_ARGUMENT Code = 3 - // The deadline expired before the operation could complete. For operations - // that change the state of the system, this error may be returned - // even if the operation has completed successfully. For example, a - // successful response from a server could have been delayed long - // enough for the deadline to expire. - // - // HTTP Mapping: 504 Gateway Timeout - Code_DEADLINE_EXCEEDED Code = 4 - // Some requested entity (e.g., file or directory) was not found. - // - // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented allowlist, - // `NOT_FOUND` may be used. If a request is denied for some users within - // a class of users, such as user-based access control, `PERMISSION_DENIED` - // must be used. - // - // HTTP Mapping: 404 Not Found - Code_NOT_FOUND Code = 5 - // The entity that a client attempted to create (e.g., file or directory) - // already exists. - // - // HTTP Mapping: 409 Conflict - Code_ALREADY_EXISTS Code = 6 - // The caller does not have permission to execute the specified - // operation. `PERMISSION_DENIED` must not be used for rejections - // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` - // instead for those errors). `PERMISSION_DENIED` must not be - // used if the caller can not be identified (use `UNAUTHENTICATED` - // instead for those errors). This error code does not imply the - // request is valid or the requested entity exists or satisfies - // other pre-conditions. - // - // HTTP Mapping: 403 Forbidden - Code_PERMISSION_DENIED Code = 7 - // The request does not have valid authentication credentials for the - // operation. - // - // HTTP Mapping: 401 Unauthorized - Code_UNAUTHENTICATED Code = 16 - // Some resource has been exhausted, perhaps a per-user quota, or - // perhaps the entire file system is out of space. - // - // HTTP Mapping: 429 Too Many Requests - Code_RESOURCE_EXHAUSTED Code = 8 - // The operation was rejected because the system is not in a state - // required for the operation's execution. For example, the directory - // to be deleted is non-empty, an rmdir operation is applied to - // a non-directory, etc. - // - // Service implementors can use the following guidelines to decide - // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level. For - // example, when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence. - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. For example, if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. - // - // HTTP Mapping: 400 Bad Request - Code_FAILED_PRECONDITION Code = 9 - // The operation was aborted, typically due to a concurrency issue such as - // a sequencer check failure or transaction abort. - // - // See the guidelines above for deciding between `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 409 Conflict - Code_ABORTED Code = 10 - // The operation was attempted past the valid range. E.g., seeking or - // reading past end-of-file. - // - // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate `INVALID_ARGUMENT` if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // `OUT_OF_RANGE` if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between `FAILED_PRECONDITION` and - // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an `OUT_OF_RANGE` error to detect when - // they are done. - // - // HTTP Mapping: 400 Bad Request - Code_OUT_OF_RANGE Code = 11 - // The operation is not implemented or is not supported/enabled in this - // service. - // - // HTTP Mapping: 501 Not Implemented - Code_UNIMPLEMENTED Code = 12 - // Internal errors. This means that some invariants expected by the - // underlying system have been broken. This error code is reserved - // for serious errors. - // - // HTTP Mapping: 500 Internal Server Error - Code_INTERNAL Code = 13 - // The service is currently unavailable. This is most likely a - // transient condition, which can be corrected by retrying with - // a backoff. Note that it is not always safe to retry - // non-idempotent operations. - // - // See the guidelines above for deciding between `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 503 Service Unavailable - Code_UNAVAILABLE Code = 14 - // Unrecoverable data loss or corruption. - // - // HTTP Mapping: 500 Internal Server Error - Code_DATA_LOSS Code = 15 -) - -// Enum value maps for Code. -var ( - Code_name = map[int32]string{ - 0: "OK", - 1: "CANCELLED", - 2: "UNKNOWN", - 3: "INVALID_ARGUMENT", - 4: "DEADLINE_EXCEEDED", - 5: "NOT_FOUND", - 6: "ALREADY_EXISTS", - 7: "PERMISSION_DENIED", - 16: "UNAUTHENTICATED", - 8: "RESOURCE_EXHAUSTED", - 9: "FAILED_PRECONDITION", - 10: "ABORTED", - 11: "OUT_OF_RANGE", - 12: "UNIMPLEMENTED", - 13: "INTERNAL", - 14: "UNAVAILABLE", - 15: "DATA_LOSS", - } - Code_value = map[string]int32{ - "OK": 0, - "CANCELLED": 1, - "UNKNOWN": 2, - "INVALID_ARGUMENT": 3, - "DEADLINE_EXCEEDED": 4, - "NOT_FOUND": 5, - "ALREADY_EXISTS": 6, - "PERMISSION_DENIED": 7, - "UNAUTHENTICATED": 16, - "RESOURCE_EXHAUSTED": 8, - "FAILED_PRECONDITION": 9, - "ABORTED": 10, - "OUT_OF_RANGE": 11, - "UNIMPLEMENTED": 12, - "INTERNAL": 13, - "UNAVAILABLE": 14, - "DATA_LOSS": 15, - } -) - -func (x Code) Enum() *Code { - p := new(Code) - *p = x - return p -} - -func (x Code) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Code) Descriptor() protoreflect.EnumDescriptor { - return file_google_rpc_code_proto_enumTypes[0].Descriptor() -} - -func (Code) Type() protoreflect.EnumType { - return &file_google_rpc_code_proto_enumTypes[0] -} - -func (x Code) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Code.Descriptor instead. -func (Code) EnumDescriptor() ([]byte, []int) { - return file_google_rpc_code_proto_rawDescGZIP(), []int{0} -} - -var File_google_rpc_code_proto protoreflect.FileDescriptor - -var file_google_rpc_code_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, - 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, - 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, - 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, - 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, - 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, - 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, - 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, - 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, - 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12, - 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, - 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, - 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, - 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, - 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, - 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, - 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, - 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, - 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, - 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64, - 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_rpc_code_proto_rawDescOnce sync.Once - file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc -) - -func file_google_rpc_code_proto_rawDescGZIP() []byte { - file_google_rpc_code_proto_rawDescOnce.Do(func() { - file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData) - }) - return file_google_rpc_code_proto_rawDescData -} - -var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_rpc_code_proto_goTypes = []interface{}{ - (Code)(0), // 0: google.rpc.Code -} -var file_google_rpc_code_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_rpc_code_proto_init() } -func file_google_rpc_code_proto_init() { - if File_google_rpc_code_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_rpc_code_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_rpc_code_proto_goTypes, - DependencyIndexes: file_google_rpc_code_proto_depIdxs, - EnumInfos: file_google_rpc_code_proto_enumTypes, - }.Build() - File_google_rpc_code_proto = out.File - file_google_rpc_code_proto_rawDesc = nil - file_google_rpc_code_proto_goTypes = nil - file_google_rpc_code_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go deleted file mode 100644 index 7bd161e48a..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ /dev/null @@ -1,1314 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/rpc/error_details.proto - -package errdetails - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -type ErrorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match a - // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents - // UPPER_SNAKE_CASE. - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ErrorInfo) Reset() { - *x = ErrorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorInfo) ProtoMessage() {} - -func (x *ErrorInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. -func (*ErrorInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} -} - -func (x *ErrorInfo) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *ErrorInfo) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ErrorInfo) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -// Describes when the clients can retry a failed request. Clients could ignore -// the recommendation here or retry when this information is missing from error -// responses. -// -// It's always recommended that clients should use exponential backoff when -// retrying. -// -// Clients should wait until `retry_delay` amount of time has passed since -// receiving the error response before retrying. If retrying requests also -// fail, clients should use an exponential backoff scheme to gradually increase -// the delay between retries based on `retry_delay`, until either a maximum -// number of retries have been reached or a maximum retry delay cap has been -// reached. -type RetryInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Clients should wait at least this long between retrying the same request. - RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` -} - -func (x *RetryInfo) Reset() { - *x = RetryInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RetryInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RetryInfo) ProtoMessage() {} - -func (x *RetryInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. -func (*RetryInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} -} - -func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { - if x != nil { - return x.RetryDelay - } - return nil -} - -// Describes additional debugging info. -type DebugInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The stack trace entries indicating where the error occurred. - StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` - // Additional debugging information provided by the server. - Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` -} - -func (x *DebugInfo) Reset() { - *x = DebugInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DebugInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DebugInfo) ProtoMessage() {} - -func (x *DebugInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. -func (*DebugInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} -} - -func (x *DebugInfo) GetStackEntries() []string { - if x != nil { - return x.StackEntries - } - return nil -} - -func (x *DebugInfo) GetDetail() string { - if x != nil { - return x.Detail - } - return "" -} - -// Describes how a quota check failed. -// -// For example if a daily limit was exceeded for the calling project, -// a service could respond with a QuotaFailure detail containing the project -// id and the description of the quota limit that was exceeded. If the -// calling project hasn't enabled the service in the developer console, then -// a service could respond with the project id and set `service_disabled` -// to true. -// -// Also see RetryInfo and Help types for other details about handling a -// quota failure. -type QuotaFailure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Describes all quota violations. - Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` -} - -func (x *QuotaFailure) Reset() { - *x = QuotaFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QuotaFailure) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QuotaFailure) ProtoMessage() {} - -func (x *QuotaFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. -func (*QuotaFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} -} - -func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { - if x != nil { - return x.Violations - } - return nil -} - -// Describes what preconditions have failed. -// -// For example, if an RPC failed because it required the Terms of Service to be -// acknowledged, it could list the terms of service violation in the -// PreconditionFailure message. -type PreconditionFailure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Describes all precondition violations. - Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` -} - -func (x *PreconditionFailure) Reset() { - *x = PreconditionFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PreconditionFailure) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PreconditionFailure) ProtoMessage() {} - -func (x *PreconditionFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead. -func (*PreconditionFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4} -} - -func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { - if x != nil { - return x.Violations - } - return nil -} - -// Describes violations in a client request. This error type focuses on the -// syntactic aspects of the request. -type BadRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Describes all violations in a client request. - FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` -} - -func (x *BadRequest) Reset() { - *x = BadRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BadRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BadRequest) ProtoMessage() {} - -func (x *BadRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead. -func (*BadRequest) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5} -} - -func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { - if x != nil { - return x.FieldViolations - } - return nil -} - -// Contains metadata about the request that clients can attach when filing a bug -// or providing other forms of feedback. -type RequestInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An opaque string that should only be interpreted by the service generating - // it. For example, it can be used to identify requests in the service's logs. - RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // Any data that was used to serve this request. For example, an encrypted - // stack trace that can be sent back to the service provider for debugging. - ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` -} - -func (x *RequestInfo) Reset() { - *x = RequestInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RequestInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestInfo) ProtoMessage() {} - -func (x *RequestInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead. -func (*RequestInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6} -} - -func (x *RequestInfo) GetRequestId() string { - if x != nil { - return x.RequestId - } - return "" -} - -func (x *RequestInfo) GetServingData() string { - if x != nil { - return x.ServingData - } - return "" -} - -// Describes the resource that is being accessed. -type ResourceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A name for the type of resource being accessed, e.g. "sql table", - // "cloud storage bucket", "file", "Google calendar"; or the type URL - // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". - ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` - // The name of the resource being accessed. For example, a shared calendar - // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is - // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. - ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` - // The owner of the resource (optional). - // For example, "user:" or "project:". - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` - // Describes what error is encountered when accessing this resource. - // For example, updating a cloud project may require the `writer` permission - // on the developer console project. - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *ResourceInfo) Reset() { - *x = ResourceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceInfo) ProtoMessage() {} - -func (x *ResourceInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. -func (*ResourceInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} -} - -func (x *ResourceInfo) GetResourceType() string { - if x != nil { - return x.ResourceType - } - return "" -} - -func (x *ResourceInfo) GetResourceName() string { - if x != nil { - return x.ResourceName - } - return "" -} - -func (x *ResourceInfo) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -func (x *ResourceInfo) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -// Provides links to documentation or for performing an out of band action. -// -// For example, if a quota check failed with an error indicating the calling -// project hasn't enabled the accessed service, this can contain a URL pointing -// directly to the right place in the developer console to flip the bit. -type Help struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // URL(s) pointing to additional information on handling the current error. - Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` -} - -func (x *Help) Reset() { - *x = Help{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Help) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Help) ProtoMessage() {} - -func (x *Help) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Help.ProtoReflect.Descriptor instead. -func (*Help) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} -} - -func (x *Help) GetLinks() []*Help_Link { - if x != nil { - return x.Links - } - return nil -} - -// Provides a localized error message that is safe to return to the user -// which can be attached to an RPC error. -type LocalizedMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The locale used following the specification defined at - // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. - // Examples are: "en-US", "fr-CH", "es-MX" - Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` - // The localized error message in the above locale. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *LocalizedMessage) Reset() { - *x = LocalizedMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LocalizedMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LocalizedMessage) ProtoMessage() {} - -func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. -func (*LocalizedMessage) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} -} - -func (x *LocalizedMessage) GetLocale() string { - if x != nil { - return x.Locale - } - return "" -} - -func (x *LocalizedMessage) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -// A message type used to describe a single quota violation. For example, a -// daily quota or a custom quota that was exceeded. -type QuotaFailure_Violation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The subject on which the quota check failed. - // For example, "clientip:" or "project:". - Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` - // A description of how the quota check failed. Clients can use this - // description to find more about the quota configuration in the service's - // public documentation, or find the relevant quota limit to adjust through - // developer console. - // - // For example: "Service disabled" or "Daily Limit for read operations - // exceeded". - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *QuotaFailure_Violation) Reset() { - *x = QuotaFailure_Violation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QuotaFailure_Violation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QuotaFailure_Violation) ProtoMessage() {} - -func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. -func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *QuotaFailure_Violation) GetSubject() string { - if x != nil { - return x.Subject - } - return "" -} - -func (x *QuotaFailure_Violation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -// A message type used to describe a single precondition failure. -type PreconditionFailure_Violation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of PreconditionFailure. We recommend using a service-specific - // enum type to define the supported precondition violation subjects. For - // example, "TOS" for "Terms of Service violation". - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The subject, relative to the type, that failed. - // For example, "google.com/cloud" relative to the "TOS" type would indicate - // which terms of service is being referenced. - Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` - // A description of how the precondition failed. Developers can use this - // description to understand how to fix the failure. - // - // For example: "Terms of service not accepted". - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *PreconditionFailure_Violation) Reset() { - *x = PreconditionFailure_Violation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PreconditionFailure_Violation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PreconditionFailure_Violation) ProtoMessage() {} - -func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. -func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *PreconditionFailure_Violation) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PreconditionFailure_Violation) GetSubject() string { - if x != nil { - return x.Subject - } - return "" -} - -func (x *PreconditionFailure_Violation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -// A message type used to describe a single bad request field. -type BadRequest_FieldViolation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A path that leads to a field in the request body. The value will be a - // sequence of dot-separated identifiers that identify a protocol buffer - // field. - // - // Consider the following: - // - // message CreateContactRequest { - // message EmailAddress { - // enum Type { - // TYPE_UNSPECIFIED = 0; - // HOME = 1; - // WORK = 2; - // } - // - // optional string email = 1; - // repeated EmailType type = 2; - // } - // - // string full_name = 1; - // repeated EmailAddress email_addresses = 2; - // } - // - // In this example, in proto `field` could take one of the following values: - // - // - `full_name` for a violation in the `full_name` value - // - `email_addresses[1].email` for a violation in the `email` field of the - // first `email_addresses` message - // - `email_addresses[3].type[2]` for a violation in the second `type` - // value in the third `email_addresses` message. - // - // In JSON, the same values are represented as: - // - // - `fullName` for a violation in the `fullName` value - // - `emailAddresses[1].email` for a violation in the `email` field of the - // first `emailAddresses` message - // - `emailAddresses[3].type[2]` for a violation in the second `type` - // value in the third `emailAddresses` message. - Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` - // A description of why the request element is bad. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *BadRequest_FieldViolation) Reset() { - *x = BadRequest_FieldViolation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BadRequest_FieldViolation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BadRequest_FieldViolation) ProtoMessage() {} - -func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. -func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *BadRequest_FieldViolation) GetField() string { - if x != nil { - return x.Field - } - return "" -} - -func (x *BadRequest_FieldViolation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -// Describes a URL link. -type Help_Link struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Describes what the link offers. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // The URL of the link. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` -} - -func (x *Help_Link) Reset() { - *x = Help_Link{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Help_Link) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Help_Link) ProtoMessage() {} - -func (x *Help_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. -func (*Help_Link) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} -} - -func (x *Help_Link) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Help_Link) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -var File_google_rpc_error_details_proto protoreflect.FileDescriptor - -var file_google_rpc_error_details_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, - 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61, - 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, - 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, - 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, - 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, - 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_rpc_error_details_proto_rawDescOnce sync.Once - file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc -) - -func file_google_rpc_error_details_proto_rawDescGZIP() []byte { - file_google_rpc_error_details_proto_rawDescOnce.Do(func() { - file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) - }) - return file_google_rpc_error_details_proto_rawDescData -} - -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_google_rpc_error_details_proto_goTypes = []interface{}{ - (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo - (*RetryInfo)(nil), // 1: google.rpc.RetryInfo - (*DebugInfo)(nil), // 2: google.rpc.DebugInfo - (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure - (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure - (*BadRequest)(nil), // 5: google.rpc.BadRequest - (*RequestInfo)(nil), // 6: google.rpc.RequestInfo - (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo - (*Help)(nil), // 8: google.rpc.Help - (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage - nil, // 10: google.rpc.ErrorInfo.MetadataEntry - (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation - (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation - (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation - (*Help_Link)(nil), // 14: google.rpc.Help.Link - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration -} -var file_google_rpc_error_details_proto_depIdxs = []int32{ - 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry - 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration - 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation - 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation - 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_google_rpc_error_details_proto_init() } -func file_google_rpc_error_details_proto_init() { - if File_google_rpc_error_details_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PreconditionFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BadRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Help); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LocalizedMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure_Violation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PreconditionFailure_Violation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BadRequest_FieldViolation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Help_Link); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_rpc_error_details_proto_rawDesc, - NumEnums: 0, - NumMessages: 15, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_rpc_error_details_proto_goTypes, - DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, - MessageInfos: file_google_rpc_error_details_proto_msgTypes, - }.Build() - File_google_rpc_error_details_proto = out.File - file_google_rpc_error_details_proto_rawDesc = nil - file_google_rpc_error_details_proto_goTypes = nil - file_google_rpc_error_details_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go deleted file mode 100644 index 72afd8b000..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/date.proto - -package date - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Represents a whole or partial calendar date, such as a birthday. The time of -// day and time zone are either specified elsewhere or are insignificant. The -// date is relative to the Gregorian Calendar. This can represent one of the -// following: -// -// * A full date, with non-zero year, month, and day values -// * A month and day value, with a zero year, such as an anniversary -// * A year on its own, with zero month and day values -// * A year and month value, with a zero day, such as a credit card expiration -// date -// -// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and -// `google.protobuf.Timestamp`. -type Date struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Year of the date. Must be from 1 to 9999, or 0 to specify a date without - // a year. - Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"` - // Month of a year. Must be from 1 to 12, or 0 to specify a year without a - // month and day. - Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"` - // Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 - // to specify a year by itself or a year and month where the day isn't - // significant. - Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"` -} - -func (x *Date) Reset() { - *x = Date{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_date_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Date) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Date) ProtoMessage() {} - -func (x *Date) ProtoReflect() protoreflect.Message { - mi := &file_google_type_date_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Date.ProtoReflect.Descriptor instead. -func (*Date) Descriptor() ([]byte, []int) { - return file_google_type_date_proto_rawDescGZIP(), []int{0} -} - -func (x *Date) GetYear() int32 { - if x != nil { - return x.Year - } - return 0 -} - -func (x *Date) GetMonth() int32 { - if x != nil { - return x.Month - } - return 0 -} - -func (x *Date) GetDay() int32 { - if x != nil { - return x.Day - } - return 0 -} - -var File_google_type_date_proto protoreflect.FileDescriptor - -var file_google_type_date_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61, - 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x61, 0x79, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x44, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0xf8, - 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_date_proto_rawDescOnce sync.Once - file_google_type_date_proto_rawDescData = file_google_type_date_proto_rawDesc -) - -func file_google_type_date_proto_rawDescGZIP() []byte { - file_google_type_date_proto_rawDescOnce.Do(func() { - file_google_type_date_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_date_proto_rawDescData) - }) - return file_google_type_date_proto_rawDescData -} - -var file_google_type_date_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_type_date_proto_goTypes = []interface{}{ - (*Date)(nil), // 0: google.type.Date -} -var file_google_type_date_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_type_date_proto_init() } -func file_google_type_date_proto_init() { - if File_google_type_date_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_type_date_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Date); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_date_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_date_proto_goTypes, - DependencyIndexes: file_google_type_date_proto_depIdxs, - MessageInfos: file_google_type_date_proto_msgTypes, - }.Build() - File_google_type_date_proto = out.File - file_google_type_date_proto_rawDesc = nil - file_google_type_date_proto_goTypes = nil - file_google_type_date_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go deleted file mode 100644 index 38ef56f73c..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/expr.proto - -package expr - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Represents a textual expression in the Common Expression Language (CEL) -// syntax. CEL is a C-like expression language. The syntax and semantics of CEL -// are documented at https://github.com/google/cel-spec. -// -// Example (Comparison): -// -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" -// -// Example (Equality): -// -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == request.auth.claims.email" -// -// Example (Logic): -// -// title: "Public documents" -// description: "Determine whether the document should be publicly visible" -// expression: "document.type != 'private' && document.type != 'internal'" -// -// Example (Data Manipulation): -// -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + string(document.create_time)" -// -// The exact variables and functions that may be referenced within an expression -// are determined by the service that evaluates it. See the service -// documentation for additional information. -type Expr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Textual representation of an expression in Common Expression Language - // syntax. - Expression string `protobuf:"bytes,1,opt,name=expression,proto3" json:"expression,omitempty"` - // Optional. Title for the expression, i.e. a short string describing - // its purpose. This can be used e.g. in UIs which allow to enter the - // expression. - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - // Optional. Description of the expression. This is a longer text which - // describes the expression, e.g. when hovered over it in a UI. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // Optional. String indicating the location of the expression for error - // reporting, e.g. a file name and a position in the file. - Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` -} - -func (x *Expr) Reset() { - *x = Expr{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_expr_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expr) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expr) ProtoMessage() {} - -func (x *Expr) ProtoReflect() protoreflect.Message { - mi := &file_google_type_expr_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expr.ProtoReflect.Descriptor instead. -func (*Expr) Descriptor() ([]byte, []int) { - return file_google_type_expr_proto_rawDescGZIP(), []int{0} -} - -func (x *Expr) GetExpression() string { - if x != nil { - return x.Expression - } - return "" -} - -func (x *Expr) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Expr) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Expr) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -var File_google_type_expr_proto protoreflect.FileDescriptor - -var file_google_type_expr_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x1e, 0x0a, - 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, - 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x5a, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x45, 0x78, 0x70, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, - 0x70, 0x72, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_expr_proto_rawDescOnce sync.Once - file_google_type_expr_proto_rawDescData = file_google_type_expr_proto_rawDesc -) - -func file_google_type_expr_proto_rawDescGZIP() []byte { - file_google_type_expr_proto_rawDescOnce.Do(func() { - file_google_type_expr_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_expr_proto_rawDescData) - }) - return file_google_type_expr_proto_rawDescData -} - -var file_google_type_expr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_type_expr_proto_goTypes = []interface{}{ - (*Expr)(nil), // 0: google.type.Expr -} -var file_google_type_expr_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_type_expr_proto_init() } -func file_google_type_expr_proto_init() { - if File_google_type_expr_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_type_expr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_expr_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_expr_proto_goTypes, - DependencyIndexes: file_google_type_expr_proto_depIdxs, - MessageInfos: file_google_type_expr_proto_msgTypes, - }.Build() - File_google_type_expr_proto = out.File - file_google_type_expr_proto_rawDesc = nil - file_google_type_expr_proto_goTypes = nil - file_google_type_expr_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/google.golang.org/genproto/internal/doc.go deleted file mode 100644 index 90e89b4aa3..0000000000 --- a/vendor/google.golang.org/genproto/internal/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file makes internal an importable go package -// for use with backreferences from submodules. -package internal diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a58..ab0fbb79b8 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3efca45914..52d530d7ad 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -112,19 +111,31 @@ func (a *Attributes) String() string { sb.WriteString("{") first := true for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) first = false } sb.WriteString("}") return sb.String() } + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c0e..d79560a2e2 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -105,8 +120,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +130,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +285,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d31..a7f1eeec8e 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go deleted file mode 100644 index f070878bd9..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ /dev/null @@ -1,957 +0,0 @@ -// Copyright 2015 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file defines the GRPCLB LoadBalancing protocol. -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 -// source: grpc/lb/v1/load_balancer.proto - -package grpc_lb_v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type LoadBalanceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to LoadBalanceRequestType: - // - // *LoadBalanceRequest_InitialRequest - // *LoadBalanceRequest_ClientStats - LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` -} - -func (x *LoadBalanceRequest) Reset() { - *x = LoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoadBalanceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadBalanceRequest) ProtoMessage() {} - -func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadBalanceRequest.ProtoReflect.Descriptor instead. -func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{0} -} - -func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { - if m != nil { - return m.LoadBalanceRequestType - } - return nil -} - -func (x *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { - if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { - return x.InitialRequest - } - return nil -} - -func (x *LoadBalanceRequest) GetClientStats() *ClientStats { - if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { - return x.ClientStats - } - return nil -} - -type isLoadBalanceRequest_LoadBalanceRequestType interface { - isLoadBalanceRequest_LoadBalanceRequestType() -} - -type LoadBalanceRequest_InitialRequest struct { - // This message should be sent on the first request to the load balancer. - InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` -} - -type LoadBalanceRequest_ClientStats struct { - // The client stats should be periodically reported to the load balancer - // based on the duration defined in the InitialLoadBalanceResponse. - ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` -} - -func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} - -func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} - -type InitialLoadBalanceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the load balanced service (e.g., service.googleapis.com). Its - // length should be less than 256 bytes. - // The name might include a port number. How to handle the port number is up - // to the balancer. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *InitialLoadBalanceRequest) Reset() { - *x = InitialLoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InitialLoadBalanceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InitialLoadBalanceRequest) ProtoMessage() {} - -func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InitialLoadBalanceRequest.ProtoReflect.Descriptor instead. -func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{1} -} - -func (x *InitialLoadBalanceRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Contains the number of calls finished for a particular load balance token. -type ClientStatsPerToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // See Server.load_balance_token. - LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` - // The total number of RPCs that finished associated with the token. - NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` -} - -func (x *ClientStatsPerToken) Reset() { - *x = ClientStatsPerToken{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientStatsPerToken) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientStatsPerToken) ProtoMessage() {} - -func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientStatsPerToken.ProtoReflect.Descriptor instead. -func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{2} -} - -func (x *ClientStatsPerToken) GetLoadBalanceToken() string { - if x != nil { - return x.LoadBalanceToken - } - return "" -} - -func (x *ClientStatsPerToken) GetNumCalls() int64 { - if x != nil { - return x.NumCalls - } - return 0 -} - -// Contains client level statistics that are useful to load balancing. Each -// count except the timestamp should be reset to zero after reporting the stats. -type ClientStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The timestamp of generating the report. - Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The total number of RPCs that started. - NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` - // The total number of RPCs that finished. - NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` - // The total number of RPCs that failed to reach a server except dropped RPCs. - NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` - // The total number of RPCs that finished and are known to have been received - // by a server. - NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` - // The list of dropped calls. - CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` -} - -func (x *ClientStats) Reset() { - *x = ClientStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientStats) ProtoMessage() {} - -func (x *ClientStats) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientStats.ProtoReflect.Descriptor instead. -func (*ClientStats) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{3} -} - -func (x *ClientStats) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *ClientStats) GetNumCallsStarted() int64 { - if x != nil { - return x.NumCallsStarted - } - return 0 -} - -func (x *ClientStats) GetNumCallsFinished() int64 { - if x != nil { - return x.NumCallsFinished - } - return 0 -} - -func (x *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { - if x != nil { - return x.NumCallsFinishedWithClientFailedToSend - } - return 0 -} - -func (x *ClientStats) GetNumCallsFinishedKnownReceived() int64 { - if x != nil { - return x.NumCallsFinishedKnownReceived - } - return 0 -} - -func (x *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { - if x != nil { - return x.CallsFinishedWithDrop - } - return nil -} - -type LoadBalanceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to LoadBalanceResponseType: - // - // *LoadBalanceResponse_InitialResponse - // *LoadBalanceResponse_ServerList - // *LoadBalanceResponse_FallbackResponse - LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` -} - -func (x *LoadBalanceResponse) Reset() { - *x = LoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoadBalanceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadBalanceResponse) ProtoMessage() {} - -func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadBalanceResponse.ProtoReflect.Descriptor instead. -func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{4} -} - -func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { - if m != nil { - return m.LoadBalanceResponseType - } - return nil -} - -func (x *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { - if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { - return x.InitialResponse - } - return nil -} - -func (x *LoadBalanceResponse) GetServerList() *ServerList { - if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { - return x.ServerList - } - return nil -} - -func (x *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { - if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { - return x.FallbackResponse - } - return nil -} - -type isLoadBalanceResponse_LoadBalanceResponseType interface { - isLoadBalanceResponse_LoadBalanceResponseType() -} - -type LoadBalanceResponse_InitialResponse struct { - // This message should be sent on the first response to the client. - InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` -} - -type LoadBalanceResponse_ServerList struct { - // Contains the list of servers selected by the load balancer. The client - // should send requests to these servers in the specified order. - ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` -} - -type LoadBalanceResponse_FallbackResponse struct { - // If this field is set, then the client should eagerly enter fallback - // mode (even if there are existing, healthy connections to backends). - FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` -} - -func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} - -func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} - -func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} - -type FallbackResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *FallbackResponse) Reset() { - *x = FallbackResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FallbackResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FallbackResponse) ProtoMessage() {} - -func (x *FallbackResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FallbackResponse.ProtoReflect.Descriptor instead. -func (*FallbackResponse) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{5} -} - -type InitialLoadBalanceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This interval defines how often the client should send the client stats - // to the load balancer. Stats should only be reported when the duration is - // positive. - ClientStatsReportInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` -} - -func (x *InitialLoadBalanceResponse) Reset() { - *x = InitialLoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InitialLoadBalanceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InitialLoadBalanceResponse) ProtoMessage() {} - -func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InitialLoadBalanceResponse.ProtoReflect.Descriptor instead. -func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{6} -} - -func (x *InitialLoadBalanceResponse) GetClientStatsReportInterval() *durationpb.Duration { - if x != nil { - return x.ClientStatsReportInterval - } - return nil -} - -type ServerList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Contains a list of servers selected by the load balancer. The list will - // be updated when server resolutions change or as needed to balance load - // across more servers. The client should consume the server list in order - // unless instructed otherwise via the client_config. - Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` -} - -func (x *ServerList) Reset() { - *x = ServerList{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerList) ProtoMessage() {} - -func (x *ServerList) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerList.ProtoReflect.Descriptor instead. -func (*ServerList) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{7} -} - -func (x *ServerList) GetServers() []*Server { - if x != nil { - return x.Servers - } - return nil -} - -// Contains server information. When the drop field is not true, use the other -// fields. -type Server struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A resolved address for the server, serialized in network-byte-order. It may - // either be an IPv4 or IPv6 address. - IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` - // A resolved port number for the server. - Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - // An opaque but printable token for load reporting. The client must include - // the token of the picked server into the initial metadata when it starts a - // call to that server. The token is used by the server to verify the request - // and to allow the server to report load to the gRPC LB system. The token is - // also used in client stats for reporting dropped calls. - // - // Its length can be variable but must be less than 50 bytes. - LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` - // Indicates whether this particular request should be dropped by the client. - // If the request is dropped, there will be a corresponding entry in - // ClientStats.calls_finished_with_drop. - Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` -} - -func (x *Server) Reset() { - *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Server) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Server) ProtoMessage() {} - -func (x *Server) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Server.ProtoReflect.Descriptor instead. -func (*Server) Descriptor() ([]byte, []int) { - return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{8} -} - -func (x *Server) GetIpAddress() []byte { - if x != nil { - return x.IpAddress - } - return nil -} - -func (x *Server) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *Server) GetLoadBalanceToken() string { - if x != nil { - return x.LoadBalanceToken - } - return "" -} - -func (x *Server) GetDrop() bool { - if x != nil { - return x.Drop - } - return false -} - -var File_grpc_lb_v1_load_balancer_proto protoreflect.FileDescriptor - -var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0a, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, - 0x0a, 0x12, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x2f, 0x0a, 0x19, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x60, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x50, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x43, - 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xb0, 0x03, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2a, - 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x61, - 0x6c, 0x6c, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, - 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x2d, 0x6e, 0x75, 0x6d, 0x5f, - 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, - 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x26, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, - 0x64, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x54, 0x6f, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x65, 0x64, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, - 0x64, 0x12, 0x58, 0x0a, 0x18, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x08, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x15, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x44, 0x72, 0x6f, 0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, - 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x90, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x53, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, - 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, - 0x4b, 0x0a, 0x11, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x10, 0x66, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1c, 0x0a, 0x1a, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x46, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, - 0x0a, 0x1a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x1c, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x40, - 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, - 0x22, 0x83, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x69, - 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, - 0x0a, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x32, 0x62, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0b, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x57, 0x0a, 0x0d, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x62, - 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_lb_v1_load_balancer_proto_rawDescOnce sync.Once - file_grpc_lb_v1_load_balancer_proto_rawDescData = file_grpc_lb_v1_load_balancer_proto_rawDesc -) - -func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { - file_grpc_lb_v1_load_balancer_proto_rawDescOnce.Do(func() { - file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lb_v1_load_balancer_proto_rawDescData) - }) - return file_grpc_lb_v1_load_balancer_proto_rawDescData -} - -var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ - (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest - (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest - (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken - (*ClientStats)(nil), // 3: grpc.lb.v1.ClientStats - (*LoadBalanceResponse)(nil), // 4: grpc.lb.v1.LoadBalanceResponse - (*FallbackResponse)(nil), // 5: grpc.lb.v1.FallbackResponse - (*InitialLoadBalanceResponse)(nil), // 6: grpc.lb.v1.InitialLoadBalanceResponse - (*ServerList)(nil), // 7: grpc.lb.v1.ServerList - (*Server)(nil), // 8: grpc.lb.v1.Server - (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 10: google.protobuf.Duration -} -var file_grpc_lb_v1_load_balancer_proto_depIdxs = []int32{ - 1, // 0: grpc.lb.v1.LoadBalanceRequest.initial_request:type_name -> grpc.lb.v1.InitialLoadBalanceRequest - 3, // 1: grpc.lb.v1.LoadBalanceRequest.client_stats:type_name -> grpc.lb.v1.ClientStats - 9, // 2: grpc.lb.v1.ClientStats.timestamp:type_name -> google.protobuf.Timestamp - 2, // 3: grpc.lb.v1.ClientStats.calls_finished_with_drop:type_name -> grpc.lb.v1.ClientStatsPerToken - 6, // 4: grpc.lb.v1.LoadBalanceResponse.initial_response:type_name -> grpc.lb.v1.InitialLoadBalanceResponse - 7, // 5: grpc.lb.v1.LoadBalanceResponse.server_list:type_name -> grpc.lb.v1.ServerList - 5, // 6: grpc.lb.v1.LoadBalanceResponse.fallback_response:type_name -> grpc.lb.v1.FallbackResponse - 10, // 7: grpc.lb.v1.InitialLoadBalanceResponse.client_stats_report_interval:type_name -> google.protobuf.Duration - 8, // 8: grpc.lb.v1.ServerList.servers:type_name -> grpc.lb.v1.Server - 0, // 9: grpc.lb.v1.LoadBalancer.BalanceLoad:input_type -> grpc.lb.v1.LoadBalanceRequest - 4, // 10: grpc.lb.v1.LoadBalancer.BalanceLoad:output_type -> grpc.lb.v1.LoadBalanceResponse - 10, // [10:11] is the sub-list for method output_type - 9, // [9:10] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_grpc_lb_v1_load_balancer_proto_init() } -func file_grpc_lb_v1_load_balancer_proto_init() { - if File_grpc_lb_v1_load_balancer_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitialLoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientStatsPerToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FallbackResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitialLoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*LoadBalanceRequest_InitialRequest)(nil), - (*LoadBalanceRequest_ClientStats)(nil), - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*LoadBalanceResponse_InitialResponse)(nil), - (*LoadBalanceResponse_ServerList)(nil), - (*LoadBalanceResponse_FallbackResponse)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_lb_v1_load_balancer_proto_rawDesc, - NumEnums: 0, - NumMessages: 9, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_lb_v1_load_balancer_proto_goTypes, - DependencyIndexes: file_grpc_lb_v1_load_balancer_proto_depIdxs, - MessageInfos: file_grpc_lb_v1_load_balancer_proto_msgTypes, - }.Build() - File_grpc_lb_v1_load_balancer_proto = out.File - file_grpc_lb_v1_load_balancer_proto_rawDesc = nil - file_grpc_lb_v1_load_balancer_proto_goTypes = nil - file_grpc_lb_v1_load_balancer_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go deleted file mode 100644 index 00d0954b38..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2015 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file defines the GRPCLB LoadBalancing protocol. -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 -// source: grpc/lb/v1/load_balancer.proto - -package grpc_lb_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" -) - -// LoadBalancerClient is the client API for LoadBalancer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type LoadBalancerClient interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) -} - -type loadBalancerClient struct { - cc grpc.ClientConnInterface -} - -func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { - return &loadBalancerClient{cc} -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &loadBalancerBalanceLoadClient{stream} - return x, nil -} - -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LoadBalancerServer is the server API for LoadBalancer service. -// All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility -type LoadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error -} - -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} - -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { - return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") -} - -// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to LoadBalancerServer will -// result in compilation errors. -type UnsafeLoadBalancerServer interface { - mustEmbedUnimplementedLoadBalancerServer() -} - -func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { - s.RegisterService(&LoadBalancer_ServiceDesc, srv) -} - -func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream -} - -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var LoadBalancer_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*LoadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: _LoadBalancer_BalanceLoad_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/lb/v1/load_balancer.proto", -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go deleted file mode 100644 index 6d698229a3..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ /dev/null @@ -1,520 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclb defines a grpclb balancer. -// -// To install grpclb balancer, import this package as: -// -// import _ "google.golang.org/grpc/balancer/grpclb" -package grpclb - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/resolver/dns" - "google.golang.org/grpc/resolver" - - durationpb "github.com/golang/protobuf/ptypes/duration" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" -) - -const ( - lbTokenKey = "lb-token" - defaultFallbackTimeout = 10 * time.Second - grpclbName = "grpclb" -) - -var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") -var logger = grpclog.Component("grpclb") - -func convertDuration(d *durationpb.Duration) time.Duration { - if d == nil { - return 0 - } - return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond -} - -// Client API for LoadBalancer service. -// Mostly copied from generated pb.go file. -// To avoid circular dependency. -type loadBalancerClient struct { - cc *grpc.ClientConn -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { - desc := &grpc.StreamDesc{ - StreamName: "BalanceLoad", - ServerStreams: true, - ClientStreams: true, - } - stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &balanceLoadClientStream{stream} - return x, nil -} - -type balanceLoadClientStream struct { - grpc.ClientStream -} - -func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { - m := new(lbpb.LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func init() { - balancer.Register(newLBBuilder()) - dns.EnableSRVLookups = true -} - -// newLBBuilder creates a builder for grpclb. -func newLBBuilder() balancer.Builder { - return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) -} - -// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given -// fallbackTimeout. If no response is received from the remote balancer within -// fallbackTimeout, the backend addresses from the resolved address list will be -// used. -// -// Only call this function when a non-default fallback timeout is needed. -func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { - return &lbBuilder{ - fallbackTimeout: fallbackTimeout, - } -} - -type lbBuilder struct { - fallbackTimeout time.Duration -} - -func (b *lbBuilder) Name() string { - return grpclbName -} - -func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - // This generates a manual resolver builder with a fixed scheme. This - // scheme will be used to dial to remote LB, so we can send filtered - // address updates to remote LB ClientConn using this manual resolver. - r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} - - lb := &lbBalancer{ - cc: newLBCacheClientConn(cc), - dialTarget: opt.Target.Endpoint(), - target: opt.Target.Endpoint(), - opt: opt, - fallbackTimeout: b.fallbackTimeout, - doneCh: make(chan struct{}), - - manualResolver: r, - subConns: make(map[resolver.Address]balancer.SubConn), - scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, - clientStats: newRPCStats(), - backoff: backoff.DefaultExponential, // TODO: make backoff configurable. - } - - var err error - if opt.CredsBundle != nil { - lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) - if err != nil { - logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) - } - lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) - if err != nil { - logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) - } - } - - return lb -} - -type lbBalancer struct { - cc *lbCacheClientConn - dialTarget string // user's dial target - target string // same as dialTarget unless overridden in service config - opt balancer.BuildOptions - - usePickFirst bool - - // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb - // servers. If it's nil, use the TransportCredentials from BuildOptions - // instead. - grpclbClientConnCreds credentials.Bundle - // grpclbBackendCreds is the creds bundle to be used for addresses that are - // returned by grpclb server. If it's nil, don't set anything when creating - // SubConns. - grpclbBackendCreds credentials.Bundle - - fallbackTimeout time.Duration - doneCh chan struct{} - - // manualResolver is used in the remote LB ClientConn inside grpclb. When - // resolved address updates are received by grpclb, filtered updates will be - // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver - // The ClientConn to talk to the remote balancer. - ccRemoteLB *remoteBalancerCCWrapper - // backoff for calling remote balancer. - backoff backoff.Strategy - - // Support client side load reporting. Each picker gets a reference to this, - // and will update its content. - clientStats *rpcStats - - mu sync.Mutex // guards everything following. - // The full server list including drops, used to check if the newly received - // serverList contains anything new. Each generate picker will also have - // reference to this list to do the first layer pick. - fullServerList []*lbpb.Server - // Backend addresses. It's kept so the addresses are available when - // switching between round_robin and pickfirst. - backendAddrs []resolver.Address - // All backends addresses, with metadata set to nil. This list contains all - // backend addresses in the same order and with the same duplicates as in - // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. - backendAddrsWithoutMetadata []resolver.Address - // Roundrobin functionalities. - state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. - scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. - picker balancer.Picker - // Support fallback to resolved backend addresses if there's no response - // from remote balancer within fallbackTimeout. - remoteBalancerConnected bool - serverListReceived bool - inFallback bool - // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set - // when resolved address updates are received, and read in the goroutine - // handling fallback. - resolvedBackendAddrs []resolver.Address - connErr error // the last connection error -} - -// regeneratePicker takes a snapshot of the balancer, and generates a picker from -// it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. -// -// Caller must hold lb.mu. -func (lb *lbBalancer) regeneratePicker(resetDrop bool) { - if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} - return - } - - if lb.state == connectivity.Connecting { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} - return - } - - var readySCs []balancer.SubConn - if lb.usePickFirst { - for _, sc := range lb.subConns { - readySCs = append(readySCs, sc) - break - } - } else { - for _, a := range lb.backendAddrsWithoutMetadata { - if sc, ok := lb.subConns[a]; ok { - if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { - readySCs = append(readySCs, sc) - } - } - } - } - - if len(readySCs) <= 0 { - // If there's no ready SubConns, always re-pick. This is to avoid drops - // unless at least one SubConn is ready. Otherwise we may drop more - // often than want because of drops + re-picks(which become re-drops). - // - // This doesn't seem to be necessary after the connecting check above. - // Kept for safety. - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} - return - } - if lb.inFallback { - lb.picker = newRRPicker(readySCs) - return - } - if resetDrop { - lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) - return - } - prevLBPicker, ok := lb.picker.(*lbPicker) - if !ok { - lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) - return - } - prevLBPicker.updateReadySCs(readySCs) -} - -// aggregateSubConnStats calculate the aggregated state of SubConns in -// lb.SubConns. These SubConns are subconns in use (when switching between -// fallback and grpclb). lb.scState contains states for all SubConns, including -// those in cache (SubConns are cached for 10 seconds after remove). -// -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. -func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { - var numConnecting uint64 - - for _, sc := range lb.subConns { - if state, ok := lb.scStates[sc]; ok { - switch state { - case connectivity.Ready: - return connectivity.Ready - case connectivity.Connecting, connectivity.Idle: - numConnecting++ - } - } - } - if numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} - -func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { - s := scs.ConnectivityState - if logger.V(2) { - logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) - } - lb.mu.Lock() - defer lb.mu.Unlock() - - oldS, ok := lb.scStates[sc] - if !ok { - if logger.V(2) { - logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) - } - return - } - lb.scStates[sc] = s - switch s { - case connectivity.Idle: - sc.Connect() - case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. - delete(lb.scStates, sc) - case connectivity.TransientFailure: - lb.connErr = scs.ConnectionError - } - // Force regenerate picker if - // - this sc became ready from not-ready - // - this sc became not-ready from ready - lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) - - // Enter fallback when the aggregated state is not Ready and the connection - // to remote balancer is lost. - if lb.state != connectivity.Ready { - if !lb.inFallback && !lb.remoteBalancerConnected { - // Enter fallback. - lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) - } - } -} - -// updateStateAndPicker re-calculate the aggregated state, and regenerate picker -// if overall state is changed. -// -// If forceRegeneratePicker is true, picker will be regenerated. -func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { - oldAggrState := lb.state - lb.state = lb.aggregateSubConnStates() - // Regenerate picker when one of the following happens: - // - caller wants to regenerate - // - the aggregated state changed - if forceRegeneratePicker || (lb.state != oldAggrState) { - lb.regeneratePicker(resetDrop) - } - - lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) -} - -// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use -// resolved backends (backends received from resolver, not from remote balancer) -// if no connection to remote balancers was successful. -func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { - timer := time.NewTimer(fallbackTimeout) - defer timer.Stop() - select { - case <-timer.C: - case <-lb.doneCh: - return - } - lb.mu.Lock() - if lb.inFallback || lb.serverListReceived { - lb.mu.Unlock() - return - } - // Enter fallback. - lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) - lb.mu.Unlock() -} - -func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { - lb.mu.Lock() - defer lb.mu.Unlock() - - // grpclb uses the user's dial target to populate the `Name` field of the - // `InitialLoadBalanceRequest` message sent to the remote balancer. But when - // grpclb is used a child policy in the context of RLS, we want the `Name` - // field to be populated with the value received from the RLS server. To - // support this use case, an optional "target_name" field has been added to - // the grpclb LB policy's config. If specified, it overrides the name of - // the target to be sent to the remote balancer; if not, the target to be - // sent to the balancer will continue to be obtained from the target URI - // passed to the gRPC client channel. Whenever that target to be sent to the - // balancer is updated, we need to restart the stream to the balancer as - // this target is sent in the first message on the stream. - if gc != nil { - target := lb.dialTarget - if gc.ServiceName != "" { - target = gc.ServiceName - } - if target != lb.target { - lb.target = target - if lb.ccRemoteLB != nil { - lb.ccRemoteLB.cancelRemoteBalancerCall() - } - } - } - - newUsePickFirst := childIsPickFirst(gc) - if lb.usePickFirst == newUsePickFirst { - return - } - if logger.V(2) { - logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) - } - lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) -} - -func (lb *lbBalancer) ResolverError(error) { - // Ignore resolver errors. GRPCLB is not selected unless the resolver - // works at least once. -} - -func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if logger.V(2) { - logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) - } - gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) - lb.handleServiceConfig(gc) - - addrs := ccs.ResolverState.Addresses - - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - a.Type = resolver.Backend - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } - if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { - // Override any balancer addresses provided via - // ccs.ResolverState.Addresses. - remoteBalancerAddrs = sd.BalancerAddresses - } - - if len(backendAddrs)+len(remoteBalancerAddrs) == 0 { - // There should be at least one address, either grpclb server or - // fallback. Empty address is not valid. - return balancer.ErrBadResolverState - } - - if len(remoteBalancerAddrs) == 0 { - if lb.ccRemoteLB != nil { - lb.ccRemoteLB.close() - lb.ccRemoteLB = nil - } - } else if lb.ccRemoteLB == nil { - // First time receiving resolved addresses, create a cc to remote - // balancers. - lb.newRemoteBalancerCCWrapper() - // Start the fallback goroutine. - go lb.fallbackToBackendsAfter(lb.fallbackTimeout) - } - - if lb.ccRemoteLB != nil { - // cc to remote balancers uses lb.manualResolver. Send the updated remote - // balancer addresses to it through manualResolver. - lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) - } - - lb.mu.Lock() - lb.resolvedBackendAddrs = backendAddrs - if len(remoteBalancerAddrs) == 0 || lb.inFallback { - // If there's no remote balancer address in ClientConn update, grpclb - // enters fallback mode immediately. - // - // If a new update is received while grpclb is in fallback, update the - // list of backends being used to the new fallback backends. - lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) - } - lb.mu.Unlock() - return nil -} - -func (lb *lbBalancer) Close() { - select { - case <-lb.doneCh: - return - default: - } - close(lb.doneCh) - if lb.ccRemoteLB != nil { - lb.ccRemoteLB.close() - } - lb.cc.close() -} - -func (lb *lbBalancer) ExitIdle() {} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go deleted file mode 100644 index 8942c31310..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "encoding/json" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/serviceconfig" -) - -const ( - roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName -) - -type grpclbServiceConfig struct { - serviceconfig.LoadBalancingConfig - ChildPolicy *[]map[string]json.RawMessage - ServiceName string -} - -func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - ret := &grpclbServiceConfig{} - if err := json.Unmarshal(lbConfig, ret); err != nil { - return nil, err - } - return ret, nil -} - -func childIsPickFirst(sc *grpclbServiceConfig) bool { - if sc == nil { - return false - } - childConfigs := sc.ChildPolicy - if childConfigs == nil { - return false - } - for _, childC := range *childConfigs { - // If round_robin exists before pick_first, return false - if _, ok := childC[roundRobinName]; ok { - return false - } - // If pick_first is before round_robin, return true - if _, ok := childC[pickFirstName]; ok { - return true - } - } - return false -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go deleted file mode 100644 index 39bc5cc71e..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ /dev/null @@ -1,202 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "sync" - "sync/atomic" - - "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/status" -) - -// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map -// instead of a slice. -type rpcStats struct { - // Only access the following fields atomically. - numCallsStarted int64 - numCallsFinished int64 - numCallsFinishedWithClientFailedToSend int64 - numCallsFinishedKnownReceived int64 - - mu sync.Mutex - // map load_balance_token -> num_calls_dropped - numCallsDropped map[string]int64 -} - -func newRPCStats() *rpcStats { - return &rpcStats{ - numCallsDropped: make(map[string]int64), - } -} - -func isZeroStats(stats *lbpb.ClientStats) bool { - return len(stats.CallsFinishedWithDrop) == 0 && - stats.NumCallsStarted == 0 && - stats.NumCallsFinished == 0 && - stats.NumCallsFinishedWithClientFailedToSend == 0 && - stats.NumCallsFinishedKnownReceived == 0 -} - -// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. -func (s *rpcStats) toClientStats() *lbpb.ClientStats { - stats := &lbpb.ClientStats{ - NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), - NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), - NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), - NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), - } - s.mu.Lock() - dropped := s.numCallsDropped - s.numCallsDropped = make(map[string]int64) - s.mu.Unlock() - for token, count := range dropped { - stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ - LoadBalanceToken: token, - NumCalls: count, - }) - } - return stats -} - -func (s *rpcStats) drop(token string) { - atomic.AddInt64(&s.numCallsStarted, 1) - s.mu.Lock() - s.numCallsDropped[token]++ - s.mu.Unlock() - atomic.AddInt64(&s.numCallsFinished, 1) -} - -func (s *rpcStats) failedToSend() { - atomic.AddInt64(&s.numCallsStarted, 1) - atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) - atomic.AddInt64(&s.numCallsFinished, 1) -} - -func (s *rpcStats) knownReceived() { - atomic.AddInt64(&s.numCallsStarted, 1) - atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) - atomic.AddInt64(&s.numCallsFinished, 1) -} - -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} - -// rrPicker does roundrobin on subConns. It's typically used when there's no -// response from remote balancer, and grpclb falls back to the resolved -// backends. -// -// It guaranteed that len(subConns) > 0. -type rrPicker struct { - mu sync.Mutex - subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. - subConnsNext int -} - -func newRRPicker(readySCs []balancer.SubConn) *rrPicker { - return &rrPicker{ - subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), - } -} - -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - defer p.mu.Unlock() - sc := p.subConns[p.subConnsNext] - p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) - return balancer.PickResult{SubConn: sc}, nil -} - -// lbPicker does two layers of picks: -// -// First layer: roundrobin on all servers in serverList, including drops and backends. -// - If it picks a drop, the RPC will fail as being dropped. -// - If it picks a backend, do a second layer pick to pick the real backend. -// -// Second layer: roundrobin on all READY backends. -// -// It's guaranteed that len(serverList) > 0. -type lbPicker struct { - mu sync.Mutex - serverList []*lbpb.Server - serverListNext int - subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. - subConnsNext int - - stats *rpcStats -} - -func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { - return &lbPicker{ - serverList: serverList, - subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), - stats: stats, - } -} - -func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - defer p.mu.Unlock() - - // Layer one roundrobin on serverList. - s := p.serverList[p.serverListNext] - p.serverListNext = (p.serverListNext + 1) % len(p.serverList) - - // If it's a drop, return an error and fail the RPC. - if s.Drop { - p.stats.drop(s.LoadBalanceToken) - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") - } - - // If not a drop but there's no ready subConns. - if len(p.subConns) <= 0 { - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - - // Return the next ready subConn in the list, also collect rpc stats. - sc := p.subConns[p.subConnsNext] - p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) - done := func(info balancer.DoneInfo) { - if !info.BytesSent { - p.stats.failedToSend() - } else if info.BytesReceived { - p.stats.knownReceived() - } - } - return balancer.PickResult{SubConn: sc, Done: done}, nil -} - -func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { - p.mu.Lock() - defer p.mu.Unlock() - - p.subConns = readySCs - p.subConnsNext = p.subConnsNext % len(readySCs) -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go deleted file mode 100644 index e56006d713..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ /dev/null @@ -1,449 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "context" - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/backoff" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" -) - -// processServerList updates balancer's internal state, create/remove SubConns -// and regenerates picker using the received serverList. -func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if logger.V(2) { - logger.Infof("lbBalancer: processing server list: %+v", l) - } - lb.mu.Lock() - defer lb.mu.Unlock() - - // Set serverListReceived to true so fallback will not take effect if it has - // not hit timeout. - lb.serverListReceived = true - - // If the new server list == old server list, do nothing. - if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if logger.V(2) { - logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") - } - return - } - lb.fullServerList = l.Servers - - var backendAddrs []resolver.Address - for i, s := range l.Servers { - if s.Drop { - continue - } - - md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) - ip := net.IP(s.IpAddress) - ipStr := ip.String() - if ip.To4() == nil { - // Add square brackets to ipv6 addresses, otherwise net.Dial() and - // net.SplitHostPort() will return too many colons error. - ipStr = fmt.Sprintf("[%s]", ipStr) - } - addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) - if logger.V(2) { - logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", - i, ipStr, s.Port, s.LoadBalanceToken) - } - backendAddrs = append(backendAddrs, addr) - } - - // Call refreshSubConns to create/remove SubConns. If we are in fallback, - // this is also exiting fallback. - lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) -} - -// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes -// balancer state and picker. -// -// Caller must hold lb.mu. -func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { - opts := balancer.NewSubConnOptions{} - if !fallback { - opts.CredsBundle = lb.grpclbBackendCreds - } - - lb.backendAddrs = backendAddrs - lb.backendAddrsWithoutMetadata = nil - - fallbackModeChanged := lb.inFallback != fallback - lb.inFallback = fallback - if fallbackModeChanged && lb.inFallback { - // Clear previous received list when entering fallback, so if the server - // comes back and sends the same list again, the new addresses will be - // used. - lb.fullServerList = nil - } - - balancingPolicyChanged := lb.usePickFirst != pickFirst - oldUsePickFirst := lb.usePickFirst - lb.usePickFirst = pickFirst - - if fallbackModeChanged || balancingPolicyChanged { - // Remove all SubConns when switching balancing policy or switching - // fallback mode. - // - // For fallback mode switching with pickfirst, we want to recreate the - // SubConn because the creds could be different. - for a, sc := range lb.subConns { - if oldUsePickFirst { - // If old SubConn were created for pickfirst, bypass cache and - // remove directly. - lb.cc.cc.RemoveSubConn(sc) - } else { - lb.cc.RemoveSubConn(sc) - } - delete(lb.subConns, a) - } - } - - if lb.usePickFirst { - var ( - scKey resolver.Address - sc balancer.SubConn - ) - for scKey, sc = range lb.subConns { - break - } - if sc != nil { - if len(backendAddrs) == 0 { - lb.cc.cc.RemoveSubConn(sc) - delete(lb.subConns, scKey) - return - } - lb.cc.cc.UpdateAddresses(sc, backendAddrs) - sc.Connect() - return - } - // This bypasses the cc wrapper with SubConn cache. - sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) - if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) - return - } - sc.Connect() - lb.subConns[backendAddrs[0]] = sc - lb.scStates[sc] = connectivity.Idle - return - } - - // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick - // lookup for an address. - addrsSet := make(map[resolver.Address]struct{}) - // Create new SubConns. - for _, addr := range backendAddrs { - addrWithoutAttrs := addr - addrWithoutAttrs.Attributes = nil - addrsSet[addrWithoutAttrs] = struct{}{} - lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutAttrs) - - if _, ok := lb.subConns[addrWithoutAttrs]; !ok { - // Use addrWithMD to create the SubConn. - sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) - if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) - continue - } - lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. - if _, ok := lb.scStates[sc]; !ok { - // Only set state of new sc to IDLE. The state could already be - // READY for cached SubConns. - lb.scStates[sc] = connectivity.Idle - } - sc.Connect() - } - } - - for a, sc := range lb.subConns { - // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - lb.cc.RemoveSubConn(sc) - delete(lb.subConns, a) - // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. - } - } - - // Regenerate and update picker after refreshing subconns because with - // cache, even if SubConn was newed/removed, there might be no state - // changes (the subconn will be kept in cache, not actually - // newed/removed). - lb.updateStateAndPicker(true, true) -} - -type remoteBalancerCCWrapper struct { - cc *grpc.ClientConn - lb *lbBalancer - backoff backoff.Strategy - done chan struct{} - - streamMu sync.Mutex - streamCancel func() - - // waitgroup to wait for all goroutines to exit. - wg sync.WaitGroup -} - -func (lb *lbBalancer) newRemoteBalancerCCWrapper() { - var dopts []grpc.DialOption - if creds := lb.opt.DialCreds; creds != nil { - dopts = append(dopts, grpc.WithTransportCredentials(creds)) - } else if bundle := lb.grpclbClientConnCreds; bundle != nil { - dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) - } else { - dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - if lb.opt.Dialer != nil { - dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) - } - if lb.opt.CustomUserAgent != "" { - dopts = append(dopts, grpc.WithUserAgent(lb.opt.CustomUserAgent)) - } - // Explicitly set pickfirst as the balancer. - dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) - dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - - // Enable Keepalive for grpclb client. - dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 20 * time.Second, - Timeout: 10 * time.Second, - PermitWithoutStream: true, - })) - - // The dial target is not important. - // - // The grpclb server addresses will set field ServerName, and creds will - // receive ServerName as authority. - cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) - if err != nil { - logger.Fatalf("failed to dial: %v", err) - } - ccw := &remoteBalancerCCWrapper{ - cc: cc, - lb: lb, - backoff: lb.backoff, - done: make(chan struct{}), - } - lb.ccRemoteLB = ccw - ccw.wg.Add(1) - go ccw.watchRemoteBalancer() -} - -// close closed the ClientConn to remote balancer, and waits until all -// goroutines to finish. -func (ccw *remoteBalancerCCWrapper) close() { - close(ccw.done) - ccw.cc.Close() - ccw.wg.Wait() -} - -func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { - for { - reply, err := s.Recv() - if err != nil { - if err == io.EOF { - return errServerTerminatedConnection - } - return fmt.Errorf("grpclb: failed to recv server list: %v", err) - } - if serverList := reply.GetServerList(); serverList != nil { - ccw.lb.processServerList(serverList) - } - if reply.GetFallbackResponse() != nil { - // Eagerly enter fallback - ccw.lb.mu.Lock() - ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) - ccw.lb.mu.Unlock() - } - } -} - -func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - lastZero := false - for { - select { - case <-ticker.C: - case <-s.Context().Done(): - return - } - stats := ccw.lb.clientStats.toClientStats() - zero := isZeroStats(stats) - if zero && lastZero { - // Quash redundant empty load reports. - continue - } - lastZero = zero - t := time.Now() - stats.Timestamp = ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := s.Send(&lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ - ClientStats: stats, - }, - }); err != nil { - return - } - } -} - -func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (backoff bool, _ error) { - lbClient := &loadBalancerClient{cc: ccw.cc} - stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) - if err != nil { - return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer: %v", err) - } - ccw.lb.mu.Lock() - ccw.lb.remoteBalancerConnected = true - ccw.lb.mu.Unlock() - - // grpclb handshake on the stream. - initReq := &lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbpb.InitialLoadBalanceRequest{ - Name: ccw.lb.target, - }, - }, - } - if err := stream.Send(initReq); err != nil { - return true, fmt.Errorf("grpclb: failed to send init request: %v", err) - } - reply, err := stream.Recv() - if err != nil { - return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) - } - initResp := reply.GetInitialResponse() - if initResp == nil { - return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") - } - - ccw.wg.Add(1) - go func() { - defer ccw.wg.Done() - if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { - ccw.sendLoadReport(stream, d) - } - }() - // No backoff if init req/resp handshake was successful. - return false, ccw.readServerList(stream) -} - -// cancelRemoteBalancerCall cancels the context used by the stream to the remote -// balancer. watchRemoteBalancer() takes care of restarting this call after the -// stream fails. -func (ccw *remoteBalancerCCWrapper) cancelRemoteBalancerCall() { - ccw.streamMu.Lock() - if ccw.streamCancel != nil { - ccw.streamCancel() - ccw.streamCancel = nil - } - ccw.streamMu.Unlock() -} - -func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { - defer func() { - ccw.wg.Done() - ccw.streamMu.Lock() - if ccw.streamCancel != nil { - // This is to make sure that we don't leak the context when we are - // directly returning from inside of the below `for` loop. - ccw.streamCancel() - ccw.streamCancel = nil - } - ccw.streamMu.Unlock() - }() - - var retryCount int - var ctx context.Context - for { - ccw.streamMu.Lock() - if ccw.streamCancel != nil { - ccw.streamCancel() - ccw.streamCancel = nil - } - ctx, ccw.streamCancel = context.WithCancel(context.Background()) - ccw.streamMu.Unlock() - - doBackoff, err := ccw.callRemoteBalancer(ctx) - select { - case <-ccw.done: - return - default: - if err != nil { - if err == errServerTerminatedConnection { - logger.Info(err) - } else { - logger.Warning(err) - } - } - } - // Trigger a re-resolve when the stream errors. - ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) - - ccw.lb.mu.Lock() - ccw.lb.remoteBalancerConnected = false - ccw.lb.fullServerList = nil - // Enter fallback when connection to remote balancer is lost, and the - // aggregated state is not Ready. - if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { - // Entering fallback. - ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) - } - ccw.lb.mu.Unlock() - - if !doBackoff { - retryCount = 0 - continue - } - - timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff - select { - case <-timer.C: - case <-ccw.done: - timer.Stop() - return - } - retryCount++ - } -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go deleted file mode 100644 index 373f04b98d..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ /dev/null @@ -1,208 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/resolver" -) - -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// UpdateState calls cc.UpdateState. -func (r *lbManualResolver) UpdateState(s resolver.State) { - r.ccr.UpdateState(s) -} - -const subConnCacheTime = time.Second * 10 - -// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. -// -// Its new and remove methods are updated to do cache first. -type lbCacheClientConn struct { - cc balancer.ClientConn - timeout time.Duration - - mu sync.Mutex - // subConnCache only keeps subConns that are being deleted. - subConnCache map[resolver.Address]*subConnCacheEntry - subConnToAddr map[balancer.SubConn]resolver.Address -} - -type subConnCacheEntry struct { - sc balancer.SubConn - - cancel func() - abortDeleting bool -} - -func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { - return &lbCacheClientConn{ - cc: cc, - timeout: subConnCacheTime, - subConnCache: make(map[resolver.Address]*subConnCacheEntry), - subConnToAddr: make(map[balancer.SubConn]resolver.Address), - } -} - -func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) != 1 { - return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) - } - addrWithoutAttrs := addrs[0] - addrWithoutAttrs.Attributes = nil - - ccc.mu.Lock() - defer ccc.mu.Unlock() - if entry, ok := ccc.subConnCache[addrWithoutAttrs]; ok { - // If entry is in subConnCache, the SubConn was being deleted. - // cancel function will never be nil. - entry.cancel() - delete(ccc.subConnCache, addrWithoutAttrs) - return entry.sc, nil - } - - scNew, err := ccc.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - - ccc.subConnToAddr[scNew] = addrWithoutAttrs - return scNew, nil -} - -func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { - ccc.mu.Lock() - defer ccc.mu.Unlock() - addr, ok := ccc.subConnToAddr[sc] - if !ok { - return - } - - if entry, ok := ccc.subConnCache[addr]; ok { - if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. - delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) - } - return - } - - entry := &subConnCacheEntry{ - sc: sc, - } - ccc.subConnCache[addr] = entry - - timer := time.AfterFunc(ccc.timeout, func() { - ccc.mu.Lock() - defer ccc.mu.Unlock() - if entry.abortDeleting { - return - } - ccc.cc.RemoveSubConn(sc) - delete(ccc.subConnToAddr, sc) - delete(ccc.subConnCache, addr) - }) - entry.cancel = func() { - if !timer.Stop() { - // If stop was not successful, the timer has fired (this can only - // happen in a race). But the deleting function is blocked on ccc.mu - // because the mutex was held by the caller of this function. - // - // Set abortDeleting to true to abort the deleting function. When - // the lock is released, the deleting function will acquire the - // lock, check the value of abortDeleting and return. - entry.abortDeleting = true - } - } -} - -func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { - ccc.cc.UpdateState(s) -} - -func (ccc *lbCacheClientConn) close() { - ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. - for _, entry := range ccc.subConnCache { - entry.cancel() - } - ccc.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go deleted file mode 100644 index 04b9ad4116..0000000000 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ /dev/null @@ -1,459 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" -) - -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - -// ccBalancerWrapper sits between the ClientConn and the Balancer. -// -// ccBalancerWrapper implements methods corresponding to the ones on the -// balancer.Balancer interface. The ClientConn is free to call these methods -// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. -// -// ccBalancerWrapper also implements the balancer.ClientConn interface and is -// passed to the Balancer implementations. It invokes unexported methods on the -// ClientConn to handle these calls from the Balancer. -// -// It uses the gracefulswitch.Balancer internally to ensure that balancer -// switches happen in a graceful manner. -type ccBalancerWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions - - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer - curBalancerName string - - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. -} - -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) - ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) - return ccb -} - -// updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs - } - errCh <- ccb.balancer.UpdateClientConnState(*ccs) - }) - if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) - } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() -} - -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.ResolverError(err) - }) - ccb.mu.Unlock() -} - -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) - ccb.mu.Unlock() -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - -func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() - return - } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - - }) - ccb.mu.Unlock() - - <-done -} - -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") - } - - if len(addrs) == 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ac, err := ccb.cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) - return nil, err - } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - ac.acbw = acbw - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) -} - -func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - acbw.UpdateAddresses(addrs) -} - -func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { - return - } - - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) - ccb.cc.csMgr.updateState(s.ConnectivityState) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { - return - } - - ccb.cc.resolveNow(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - ac *addrConn // read-only - - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer -} - -func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.ac.updateAddrs(addrs) -} - -func (acbw *acBalancerWrapper) Connect() { - go acbw.ac.connect() -} - -// NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, blocks until it is or ctx expires. Returns an error when the context -// expires or the addrConn is shut down. -func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err - } - return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) -} - -// Invoke performs a unary RPC. If the addrConn is not ready, returns -// errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { - cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) - if err != nil { - return err - } - if err := cs.SendMsg(args); err != nil { - return err - } - return cs.RecvMsg(reply) -} - -type refCountedProducer struct { - producer balancer.Producer - refs int // number of current refs to the producer - close func() // underlying producer's close function -} - -func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - - // Look up existing producer from this builder. - pData := acbw.producers[pb] - if pData == nil { - // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} - acbw.producers[pb] = pData - } - // Account for this new reference. - pData.refs++ - - // Return a cleanup function wrapped in a OnceFunc to remove this reference - // and delete the refCountedProducer from the map if the total reference - // count goes to zero. - unref := func() { - acbw.mu.Lock() - pData.refs-- - if pData.refs == 0 { - defer pData.close() // Run outside the acbw mutex - delete(acbw.producers, pb) - } - acbw.mu.Unlock() - } - return pData.producer, grpcsync.OnceFunc(unref) -} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 0000000000..b5e30cff02 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,380 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + }) + if !ok { + return nil + } + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa14d..e9e97d4511 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d75..788c89c16f 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 95a7459b02..f6e815e6bf 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,10 +33,11 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -45,16 +46,14 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -118,48 +117,20 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,19 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -203,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -229,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -316,117 +304,82 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() + if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") - return nil + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -474,7 +427,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -491,7 +443,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -503,7 +455,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -539,13 +491,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -561,6 +527,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -590,7 +558,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -621,53 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -707,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -759,6 +695,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -773,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -821,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -843,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -859,22 +802,36 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), @@ -882,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -903,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -995,8 +947,9 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1031,8 +984,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1108,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1137,35 +1090,25 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1192,7 +1135,14 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() cc.mu.Lock() if cc.conns == nil { @@ -1200,34 +1150,22 @@ func (cc *ClientConn) Close() error { return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1248,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1286,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1336,12 +1274,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1537,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1625,16 +1565,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1648,6 +1579,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1774,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1807,19 +1761,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: @@ -1855,54 +1860,17 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + } else { + cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 1297765478..411e3dfd47 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182d..08476ad1fe 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go deleted file mode 100644 index 579adf210c..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/alts.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package alts implements the ALTS credential support by gRPC library, which -// encapsulates all the state needed by a client to authenticate with a server -// using ALTS and make various assertions, e.g., about the client's identity, -// role, or whether it is authorized to make a particular call. -// This package is experimental. -package alts - -import ( - "context" - "errors" - "fmt" - "net" - "sync" - "time" - - "google.golang.org/grpc/credentials" - core "google.golang.org/grpc/credentials/alts/internal" - "google.golang.org/grpc/credentials/alts/internal/handshaker" - "google.golang.org/grpc/credentials/alts/internal/handshaker/service" - altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/googlecloud" -) - -const ( - // hypervisorHandshakerServiceAddress represents the default ALTS gRPC - // handshaker service address in the hypervisor. - hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080" - // defaultTimeout specifies the server handshake timeout. - defaultTimeout = 30.0 * time.Second - // The following constants specify the minimum and maximum acceptable - // protocol versions. - protocolVersionMaxMajor = 2 - protocolVersionMaxMinor = 1 - protocolVersionMinMajor = 2 - protocolVersionMinMinor = 1 -) - -var ( - vmOnGCP bool - once sync.Once - maxRPCVersion = &altspb.RpcProtocolVersions_Version{ - Major: protocolVersionMaxMajor, - Minor: protocolVersionMaxMinor, - } - minRPCVersion = &altspb.RpcProtocolVersions_Version{ - Major: protocolVersionMinMajor, - Minor: protocolVersionMinMinor, - } - // ErrUntrustedPlatform is returned from ClientHandshake and - // ServerHandshake is running on a platform where the trustworthiness of - // the handshaker service is not guaranteed. - ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") - logger = grpclog.Component("alts") -) - -// AuthInfo exposes security information from the ALTS handshake to the -// application. This interface is to be implemented by ALTS. Users should not -// need a brand new implementation of this interface. For situations like -// testing, any new implementation should embed this interface. This allows -// ALTS to add new methods to this interface. -type AuthInfo interface { - // ApplicationProtocol returns application protocol negotiated for the - // ALTS connection. - ApplicationProtocol() string - // RecordProtocol returns the record protocol negotiated for the ALTS - // connection. - RecordProtocol() string - // SecurityLevel returns the security level of the created ALTS secure - // channel. - SecurityLevel() altspb.SecurityLevel - // PeerServiceAccount returns the peer service account. - PeerServiceAccount() string - // LocalServiceAccount returns the local service account. - LocalServiceAccount() string - // PeerRPCVersions returns the RPC version supported by the peer. - PeerRPCVersions() *altspb.RpcProtocolVersions -} - -// ClientOptions contains the client-side options of an ALTS channel. These -// options will be passed to the underlying ALTS handshaker. -type ClientOptions struct { - // TargetServiceAccounts contains a list of expected target service - // accounts. - TargetServiceAccounts []string - // HandshakerServiceAddress represents the ALTS handshaker gRPC service - // address to connect to. - HandshakerServiceAddress string -} - -// DefaultClientOptions creates a new ClientOptions object with the default -// values. -func DefaultClientOptions() *ClientOptions { - return &ClientOptions{ - HandshakerServiceAddress: hypervisorHandshakerServiceAddress, - } -} - -// ServerOptions contains the server-side options of an ALTS channel. These -// options will be passed to the underlying ALTS handshaker. -type ServerOptions struct { - // HandshakerServiceAddress represents the ALTS handshaker gRPC service - // address to connect to. - HandshakerServiceAddress string -} - -// DefaultServerOptions creates a new ServerOptions object with the default -// values. -func DefaultServerOptions() *ServerOptions { - return &ServerOptions{ - HandshakerServiceAddress: hypervisorHandshakerServiceAddress, - } -} - -// altsTC is the credentials required for authenticating a connection using ALTS. -// It implements credentials.TransportCredentials interface. -type altsTC struct { - info *credentials.ProtocolInfo - side core.Side - accounts []string - hsAddress string -} - -// NewClientCreds constructs a client-side ALTS TransportCredentials object. -func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { - return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) -} - -// NewServerCreds constructs a server-side ALTS TransportCredentials object. -func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { - return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) -} - -func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { - once.Do(func() { - vmOnGCP = googlecloud.OnGCE() - }) - if hsAddress == "" { - hsAddress = hypervisorHandshakerServiceAddress - } - return &altsTC{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: "alts", - SecurityVersion: "1.0", - }, - side: side, - accounts: accounts, - hsAddress: hsAddress, - } -} - -// ClientHandshake implements the client side handshake protocol. -func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { - if !vmOnGCP { - return nil, nil, ErrUntrustedPlatform - } - - // Connecting to ALTS handshaker service. - hsConn, err := service.Dial(g.hsAddress) - if err != nil { - return nil, nil, err - } - // Do not close hsConn since it is shared with other handshakes. - - // Possible context leak: - // The cancel function for the child context we create will only be - // called a non-nil error is returned. - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - opts := handshaker.DefaultClientHandshakerOptions() - opts.TargetName = addr - opts.TargetServiceAccounts = g.accounts - opts.RPCVersions = &altspb.RpcProtocolVersions{ - MaxRpcVersion: maxRPCVersion, - MinRpcVersion: minRPCVersion, - } - chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) - if err != nil { - return nil, nil, err - } - defer func() { - if err != nil { - chs.Close() - } - }() - secConn, authInfo, err := chs.ClientHandshake(ctx) - if err != nil { - return nil, nil, err - } - altsAuthInfo, ok := authInfo.(AuthInfo) - if !ok { - return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") - } - match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - if !match { - return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - } - return secConn, authInfo, nil -} - -// ServerHandshake implements the server side ALTS handshaker. -func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { - if !vmOnGCP { - return nil, nil, ErrUntrustedPlatform - } - // Connecting to ALTS handshaker service. - hsConn, err := service.Dial(g.hsAddress) - if err != nil { - return nil, nil, err - } - // Do not close hsConn since it's shared with other handshakes. - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - opts := handshaker.DefaultServerHandshakerOptions() - opts.RPCVersions = &altspb.RpcProtocolVersions{ - MaxRpcVersion: maxRPCVersion, - MinRpcVersion: minRPCVersion, - } - shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) - if err != nil { - return nil, nil, err - } - defer func() { - if err != nil { - shs.Close() - } - }() - secConn, authInfo, err := shs.ServerHandshake(ctx) - if err != nil { - return nil, nil, err - } - altsAuthInfo, ok := authInfo.(AuthInfo) - if !ok { - return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") - } - match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - if !match { - return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) - } - return secConn, authInfo, nil -} - -func (g *altsTC) Info() credentials.ProtocolInfo { - return *g.info -} - -func (g *altsTC) Clone() credentials.TransportCredentials { - info := *g.info - var accounts []string - if g.accounts != nil { - accounts = make([]string, len(g.accounts)) - copy(accounts, g.accounts) - } - return &altsTC{ - info: &info, - side: g.side, - hsAddress: g.hsAddress, - accounts: accounts, - } -} - -func (g *altsTC) OverrideServerName(serverNameOverride string) error { - g.info.ServerName = serverNameOverride - return nil -} - -// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. -func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { - switch { - case v1.GetMajor() > v2.GetMajor(), - v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): - return 1 - case v1.GetMajor() < v2.GetMajor(), - v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): - return -1 - } - return 0 -} - -// checkRPCVersions performs a version check between local and peer rpc protocol -// versions. This function returns true if the check passes which means both -// parties agreed on a common rpc protocol to use, and false otherwise. The -// function also returns the highest common RPC protocol version both parties -// agreed on. -func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { - if local == nil || peer == nil { - logger.Error("invalid checkRPCVersions argument, either local or peer is nil.") - return false, nil - } - - // maxCommonVersion is MIN(local.max, peer.max). - maxCommonVersion := local.GetMaxRpcVersion() - if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { - maxCommonVersion = peer.GetMaxRpcVersion() - } - - // minCommonVersion is MAX(local.min, peer.min). - minCommonVersion := peer.GetMinRpcVersion() - if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { - minCommonVersion = local.GetMinRpcVersion() - } - - if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { - return false, nil - } - return true, maxCommonVersion -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go deleted file mode 100644 index ebea57da1d..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package authinfo provide authentication information returned by handshakers. -package authinfo - -import ( - "google.golang.org/grpc/credentials" - altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" -) - -var _ credentials.AuthInfo = (*altsAuthInfo)(nil) - -// altsAuthInfo exposes security information from the ALTS handshake to the -// application. altsAuthInfo is immutable and implements credentials.AuthInfo. -type altsAuthInfo struct { - p *altspb.AltsContext - credentials.CommonAuthInfo -} - -// New returns a new altsAuthInfo object given handshaker results. -func New(result *altspb.HandshakerResult) credentials.AuthInfo { - return newAuthInfo(result) -} - -func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { - return &altsAuthInfo{ - p: &altspb.AltsContext{ - ApplicationProtocol: result.GetApplicationProtocol(), - RecordProtocol: result.GetRecordProtocol(), - // TODO: assign security level from result. - SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, - PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), - LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), - PeerRpcVersions: result.GetPeerRpcVersions(), - PeerAttributes: result.GetPeerIdentity().GetAttributes(), - }, - CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, - } -} - -// AuthType identifies the context as providing ALTS authentication information. -func (s *altsAuthInfo) AuthType() string { - return "alts" -} - -// ApplicationProtocol returns the context's application protocol. -func (s *altsAuthInfo) ApplicationProtocol() string { - return s.p.GetApplicationProtocol() -} - -// RecordProtocol returns the context's record protocol. -func (s *altsAuthInfo) RecordProtocol() string { - return s.p.GetRecordProtocol() -} - -// SecurityLevel returns the context's security level. -func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { - return s.p.GetSecurityLevel() -} - -// PeerServiceAccount returns the context's peer service account. -func (s *altsAuthInfo) PeerServiceAccount() string { - return s.p.GetPeerServiceAccount() -} - -// LocalServiceAccount returns the context's local service account. -func (s *altsAuthInfo) LocalServiceAccount() string { - return s.p.GetLocalServiceAccount() -} - -// PeerRPCVersions returns the context's peer RPC versions. -func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { - return s.p.GetPeerRpcVersions() -} - -// PeerAttributes returns the context's peer attributes. -func (s *altsAuthInfo) PeerAttributes() map[string]string { - return s.p.GetPeerAttributes() -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go deleted file mode 100644 index 3896e8cf2b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains common core functionality for ALTS. -package internal - -import ( - "context" - "net" - - "google.golang.org/grpc/credentials" -) - -const ( - // ClientSide identifies the client in this communication. - ClientSide Side = iota - // ServerSide identifies the server in this communication. - ServerSide -) - -// PeerNotRespondingError is returned when a peer server is not responding -// after a channel has been established. It is treated as a temporary connection -// error and re-connection to the server should be attempted. -var PeerNotRespondingError = &peerNotRespondingError{} - -// Side identifies the party's role: client or server. -type Side int - -type peerNotRespondingError struct{} - -// Return an error message for the purpose of logging. -func (e *peerNotRespondingError) Error() string { - return "peer server is not responding and re-connection should be attempted." -} - -// Temporary indicates if this connection error is temporary or fatal. -func (e *peerNotRespondingError) Temporary() bool { - return true -} - -// Handshaker defines a ALTS handshaker interface. -type Handshaker interface { - // ClientHandshake starts and completes a client-side handshaking and - // returns a secure connection and corresponding auth information. - ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // ServerHandshake starts and completes a server-side handshaking and - // returns a secure connection and corresponding auth information. - ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // Close terminates the Handshaker. It should be called when the caller - // obtains the secure connection. - Close() -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go deleted file mode 100644 index 43726e877b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "encoding/binary" - "fmt" - "strconv" -) - -// rekeyAEAD holds the necessary information for an AEAD based on -// AES-GCM that performs nonce-based key derivation and XORs the -// nonce with a random mask. -type rekeyAEAD struct { - kdfKey []byte - kdfCounter []byte - nonceMask []byte - nonceBuf []byte - gcmAEAD cipher.AEAD -} - -// KeySizeError signals that the given key does not have the correct size. -type KeySizeError int - -func (k KeySizeError) Error() string { - return "alts/conn: invalid key size " + strconv.Itoa(int(k)) -} - -// newRekeyAEAD creates a new instance of aes128gcm with rekeying. -// The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for -// the counter. -func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { - k := len(key) - if k != kdfKeyLen+nonceLen { - return nil, KeySizeError(k) - } - return &rekeyAEAD{ - kdfKey: key[:kdfKeyLen], - kdfCounter: make([]byte, kdfCounterLen), - nonceMask: key[kdfKeyLen:], - nonceBuf: make([]byte, nonceLen), - gcmAEAD: nil, - }, nil -} - -// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, -// and calls Seal for aes128gcm. -func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if err := s.rekeyIfRequired(nonce); err != nil { - panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) - } - maskNonce(s.nonceBuf, nonce, s.nonceMask) - return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) -} - -// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, -// and calls Open for aes128gcm. -func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if err := s.rekeyIfRequired(nonce); err != nil { - return nil, err - } - maskNonce(s.nonceBuf, nonce, s.nonceMask) - return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) -} - -// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil -// or cannot be used with given nonce. -func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { - newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] - if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { - return nil - } - copy(s.kdfCounter, newKdfCounter) - a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) - if err != nil { - return err - } - s.gcmAEAD, err = cipher.NewGCM(a) - return err -} - -// maskNonce XORs the given nonce with the mask and stores the result in dst. -func maskNonce(dst, nonce, mask []byte) { - nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) - nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) - mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) - mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) - binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) - binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) -} - -// NonceSize returns the required nonce size. -func (s *rekeyAEAD) NonceSize() int { - return s.gcmAEAD.NonceSize() -} - -// Overhead returns the ciphertext overhead. -func (s *rekeyAEAD) Overhead() int { - return s.gcmAEAD.Overhead() -} - -// hkdfExpand computes the first 16 bytes of the HKDF-expand function -// defined in RFC5869. -func hkdfExpand(key, info []byte) []byte { - mac := hmac.New(sha256.New, key) - mac.Write(info) - mac.Write([]byte{0x01}[:]) - return mac.Sum(nil)[:aeadKeyLen] -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go deleted file mode 100644 index 04e0adb6c9..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "crypto/aes" - "crypto/cipher" - - core "google.golang.org/grpc/credentials/alts/internal" -) - -const ( - // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in - // each direction). - overflowLenAES128GCM = 5 -) - -// aes128gcm is the struct that holds necessary information for ALTS record. -// The counter value is NOT included in the payload during the encryption and -// decryption operations. -type aes128gcm struct { - // inCounter is used in ALTS record to check that incoming counters are - // as expected, since ALTS record guarantees that messages are unwrapped - // in the same order that the peer wrapped them. - inCounter Counter - outCounter Counter - aead cipher.AEAD -} - -// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. -func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - a, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - return &aes128gcm{ - inCounter: NewInCounter(side, overflowLenAES128GCM), - outCounter: NewOutCounter(side, overflowLenAES128GCM), - aead: a, - }, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext do not -// overlap. -func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { - // If we need to allocate an output buffer, we want to include space for - // GCM tag to avoid forcing ALTS record to reallocate as well. - dlen := len(dst) - dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) - seq, err := s.outCounter.Value() - if err != nil { - return nil, err - } - data := out[:len(plaintext)] - copy(data, plaintext) // data may alias plaintext - - // Seal appends the ciphertext and the tag to its first argument and - // returns the updated slice. However, SliceForAppend above ensures that - // dst has enough capacity to avoid a reallocation and copy due to the - // append. - dst = s.aead.Seal(dst[:dlen], seq, data, nil) - s.outCounter.Inc() - return dst, nil -} - -func (s *aes128gcm) EncryptionOverhead() int { - return GcmTagSize -} - -func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { - seq, err := s.inCounter.Value() - if err != nil { - return nil, err - } - // If dst is equal to ciphertext[:0], ciphertext storage is reused. - plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) - if err != nil { - return nil, ErrAuth - } - s.inCounter.Inc() - return plaintext, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go deleted file mode 100644 index 6a9035ea25..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "crypto/cipher" - - core "google.golang.org/grpc/credentials/alts/internal" -) - -const ( - // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in - // each direction). - overflowLenAES128GCMRekey = 8 - nonceLen = 12 - aeadKeyLen = 16 - kdfKeyLen = 32 - kdfCounterOffset = 2 - kdfCounterLen = 6 - sizeUint64 = 8 -) - -// aes128gcmRekey is the struct that holds necessary information for ALTS record. -// The counter value is NOT included in the payload during the encryption and -// decryption operations. -type aes128gcmRekey struct { - // inCounter is used in ALTS record to check that incoming counters are - // as expected, since ALTS record guarantees that messages are unwrapped - // in the same order that the peer wrapped them. - inCounter Counter - outCounter Counter - inAEAD cipher.AEAD - outAEAD cipher.AEAD -} - -// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying -// for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used -// as a random mask for the counter. -func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { - inCounter := NewInCounter(side, overflowLenAES128GCMRekey) - outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) - inAEAD, err := newRekeyAEAD(key) - if err != nil { - return nil, err - } - outAEAD, err := newRekeyAEAD(key) - if err != nil { - return nil, err - } - return &aes128gcmRekey{ - inCounter, - outCounter, - inAEAD, - outAEAD, - }, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext do not -// overlap. -func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { - // If we need to allocate an output buffer, we want to include space for - // GCM tag to avoid forcing ALTS record to reallocate as well. - dlen := len(dst) - dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) - seq, err := s.outCounter.Value() - if err != nil { - return nil, err - } - data := out[:len(plaintext)] - copy(data, plaintext) // data may alias plaintext - - // Seal appends the ciphertext and the tag to its first argument and - // returns the updated slice. However, SliceForAppend above ensures that - // dst has enough capacity to avoid a reallocation and copy due to the - // append. - dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) - s.outCounter.Inc() - return dst, nil -} - -func (s *aes128gcmRekey) EncryptionOverhead() int { - return GcmTagSize -} - -func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { - seq, err := s.inCounter.Value() - if err != nil { - return nil, err - } - plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) - if err != nil { - return nil, ErrAuth - } - s.inCounter.Inc() - return plaintext, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go deleted file mode 100644 index 1795d0c9e3..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "encoding/binary" - "errors" - "fmt" -) - -const ( - // GcmTagSize is the GCM tag size is the difference in length between - // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto - // library. - GcmTagSize = 16 -) - -// ErrAuth occurs on authentication failure. -var ErrAuth = errors.New("message authentication failed") - -// SliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func SliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return head, tail -} - -// ParseFramedMsg parse the provided buffer and returns a frame of the format -// msgLength+msg and any remaining bytes in that buffer. -func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { - // If the size field is not complete, return the provided buffer as - // remaining buffer. - if len(b) < MsgLenFieldSize { - return nil, b, nil - } - msgLenField := b[:MsgLenFieldSize] - length := binary.LittleEndian.Uint32(msgLenField) - if length > maxLen { - return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) - } - if len(b) < int(length)+4 { // account for the first 4 msg length bytes. - // Frame is not complete yet. - return nil, b, nil - } - return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go deleted file mode 100644 index 9f00aca0b6..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import ( - "errors" -) - -const counterLen = 12 - -var ( - errInvalidCounter = errors.New("invalid counter") -) - -// Counter is a 96-bit, little-endian counter. -type Counter struct { - value [counterLen]byte - invalid bool - overflowLen int -} - -// Value returns the current value of the counter as a byte slice. -func (c *Counter) Value() ([]byte, error) { - if c.invalid { - return nil, errInvalidCounter - } - return c.value[:], nil -} - -// Inc increments the counter and checks for overflow. -func (c *Counter) Inc() { - // If the counter is already invalid, there is no need to increase it. - if c.invalid { - return - } - i := 0 - for ; i < c.overflowLen; i++ { - c.value[i]++ - if c.value[i] != 0 { - break - } - } - if i == c.overflowLen { - c.invalid = true - } -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go deleted file mode 100644 index 0d64fb37a1..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package conn contains an implementation of a secure channel created by gRPC -// handshakers. -package conn - -import ( - "encoding/binary" - "fmt" - "math" - "net" - - core "google.golang.org/grpc/credentials/alts/internal" -) - -// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. -type ALTSRecordCrypto interface { - // Encrypt encrypts the plaintext and computes the tag (if any) of dst - // and plaintext. dst and plaintext may fully overlap or not at all. - Encrypt(dst, plaintext []byte) ([]byte, error) - // EncryptionOverhead returns the tag size (if any) in bytes. - EncryptionOverhead() int - // Decrypt decrypts ciphertext and verify the tag (if any). dst and - // ciphertext may alias exactly or not at all. To reuse ciphertext's - // storage for the decrypted output, use ciphertext[:0] as dst. - Decrypt(dst, ciphertext []byte) ([]byte, error) -} - -// ALTSRecordFunc is a function type for factory functions that create -// ALTSRecordCrypto instances. -type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) - -const ( - // MsgLenFieldSize is the byte size of the frame length field of a - // framed message. - MsgLenFieldSize = 4 - // The byte size of the message type field of a framed message. - msgTypeFieldSize = 4 - // The bytes size limit for a ALTS record message. - altsRecordLengthLimit = 1024 * 1024 // 1 MiB - // The default bytes size of a ALTS record message. - altsRecordDefaultLength = 4 * 1024 // 4KiB - // Message type value included in ALTS record framing. - altsRecordMsgType = uint32(0x06) - // The initial write buffer size. - altsWriteBufferInitialSize = 32 * 1024 // 32KiB - // The maximum write buffer size. This *must* be multiple of - // altsRecordDefaultLength. - altsWriteBufferMaxSize = 512 * 1024 // 512KiB -) - -var ( - protocols = make(map[string]ALTSRecordFunc) -) - -// RegisterProtocol register a ALTS record encryption protocol. -func RegisterProtocol(protocol string, f ALTSRecordFunc) error { - if _, ok := protocols[protocol]; ok { - return fmt.Errorf("protocol %v is already registered", protocol) - } - protocols[protocol] = f - return nil -} - -// conn represents a secured connection. It implements the net.Conn interface. -type conn struct { - net.Conn - crypto ALTSRecordCrypto - // buf holds data that has been read from the connection and decrypted, - // but has not yet been returned by Read. - buf []byte - payloadLengthLimit int - // protected holds data read from the network but have not yet been - // decrypted. This data might not compose a complete frame. - protected []byte - // writeBuf is a buffer used to contain encrypted frames before being - // written to the network. - writeBuf []byte - // nextFrame stores the next frame (in protected buffer) info. - nextFrame []byte - // overhead is the calculated overhead of each frame. - overhead int -} - -// NewConn creates a new secure channel instance given the other party role and -// handshaking result. -func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { - newCrypto := protocols[recordProtocol] - if newCrypto == nil { - return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) - } - crypto, err := newCrypto(side, key) - if err != nil { - return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) - } - overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() - payloadLengthLimit := altsRecordDefaultLength - overhead - var protectedBuf []byte - if protected == nil { - // We pre-allocate protected to be of size - // 2*altsRecordDefaultLength-1 during initialization. We only - // read from the network into protected when protected does not - // contain a complete frame, which is at most - // altsRecordDefaultLength-1 (bytes). And we read at most - // altsRecordDefaultLength (bytes) data into protected at one - // time. Therefore, 2*altsRecordDefaultLength-1 is large enough - // to buffer data read from the network. - protectedBuf = make([]byte, 0, 2*altsRecordDefaultLength-1) - } else { - protectedBuf = make([]byte, len(protected)) - copy(protectedBuf, protected) - } - - altsConn := &conn{ - Conn: c, - crypto: crypto, - payloadLengthLimit: payloadLengthLimit, - protected: protectedBuf, - writeBuf: make([]byte, altsWriteBufferInitialSize), - nextFrame: protectedBuf, - overhead: overhead, - } - return altsConn, nil -} - -// Read reads and decrypts a frame from the underlying connection, and copies the -// decrypted payload into b. If the size of the payload is greater than len(b), -// Read retains the remaining bytes in an internal buffer, and subsequent calls -// to Read will read from this buffer until it is exhausted. -func (p *conn) Read(b []byte) (n int, err error) { - if len(p.buf) == 0 { - var framedMsg []byte - framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) - if err != nil { - return n, err - } - // Check whether the next frame to be decrypted has been - // completely received yet. - if len(framedMsg) == 0 { - copy(p.protected, p.nextFrame) - p.protected = p.protected[:len(p.nextFrame)] - // Always copy next incomplete frame to the beginning of - // the protected buffer and reset nextFrame to it. - p.nextFrame = p.protected - } - // Check whether a complete frame has been received yet. - for len(framedMsg) == 0 { - if len(p.protected) == cap(p.protected) { - tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) - copy(tmp, p.protected) - p.protected = tmp - } - n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) - if err != nil { - return 0, err - } - p.protected = p.protected[:len(p.protected)+n] - framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) - if err != nil { - return 0, err - } - } - // Now we have a complete frame, decrypted it. - msg := framedMsg[MsgLenFieldSize:] - msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) - if msgType&0xff != altsRecordMsgType { - return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", - msgType, altsRecordMsgType) - } - ciphertext := msg[msgTypeFieldSize:] - - // Decrypt requires that if the dst and ciphertext alias, they - // must alias exactly. Code here used to use msg[:0], but msg - // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than - // ciphertext, so they alias inexactly. Using ciphertext[:0] - // arranges the appropriate aliasing without needing to copy - // ciphertext or use a separate destination buffer. For more info - // check: https://golang.org/pkg/crypto/cipher/#AEAD. - p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) - if err != nil { - return 0, err - } - } - - n = copy(b, p.buf) - p.buf = p.buf[n:] - return n, nil -} - -// Write encrypts, frames, and writes bytes from b to the underlying connection. -func (p *conn) Write(b []byte) (n int, err error) { - n = len(b) - // Calculate the output buffer size with framing and encryption overhead. - numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) - size := len(b) + numOfFrames*p.overhead - // If writeBuf is too small, increase its size up to the maximum size. - partialBSize := len(b) - if size > altsWriteBufferMaxSize { - size = altsWriteBufferMaxSize - const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength - partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit - } - if len(p.writeBuf) < size { - p.writeBuf = make([]byte, size) - } - - for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { - partialBEnd := partialBStart + partialBSize - if partialBEnd > len(b) { - partialBEnd = len(b) - } - partialB := b[partialBStart:partialBEnd] - writeBufIndex := 0 - for len(partialB) > 0 { - payloadLen := len(partialB) - if payloadLen > p.payloadLengthLimit { - payloadLen = p.payloadLengthLimit - } - buf := partialB[:payloadLen] - partialB = partialB[payloadLen:] - - // Write buffer contains: length, type, payload, and tag - // if any. - - // 1. Fill in type field. - msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] - binary.LittleEndian.PutUint32(msg, altsRecordMsgType) - - // 2. Encrypt the payload and create a tag if any. - msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) - if err != nil { - return n, err - } - - // 3. Fill in the size field. - binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) - - // 4. Increase writeBufIndex. - writeBufIndex += len(buf) + p.overhead - } - nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) - if err != nil { - // We need to calculate the actual data size that was - // written. This means we need to remove header, - // encryption overheads, and any partially-written - // frame data. - numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) - return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err - } - } - return n, nil -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go deleted file mode 100644 index 84821fa254..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package conn - -import core "google.golang.org/grpc/credentials/alts/internal" - -// NewOutCounter returns an outgoing counter initialized to the starting sequence -// number for the client/server side of a connection. -func NewOutCounter(s core.Side, overflowLen int) (c Counter) { - c.overflowLen = overflowLen - if s == core.ServerSide { - // Server counters in ALTS record have the little-endian high bit - // set. - c.value[counterLen-1] = 0x80 - } - return -} - -// NewInCounter returns an incoming counter initialized to the starting sequence -// number for the client/server side of a connection. This is used in ALTS record -// to check that incoming counters are as expected, since ALTS record guarantees -// that messages are unwrapped in the same order that the peer wrapped them. -func NewInCounter(s core.Side, overflowLen int) (c Counter) { - c.overflowLen = overflowLen - if s == core.ClientSide { - // Server counters in ALTS record have the little-endian high bit - // set. - c.value[counterLen-1] = 0x80 - } - return -} - -// CounterFromValue creates a new counter given an initial value. -func CounterFromValue(value []byte, overflowLen int) (c Counter) { - c.overflowLen = overflowLen - copy(c.value[:], value) - return -} - -// CounterSide returns the connection side (client/server) a sequence counter is -// associated with. -func CounterSide(c []byte) core.Side { - if c[counterLen-1]&0x80 == 0x80 { - return core.ServerSide - } - return core.ClientSide -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go deleted file mode 100644 index 150ae55767..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ /dev/null @@ -1,393 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package handshaker provides ALTS handshaking functionality for GCP. -package handshaker - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "sync" - - grpc "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - core "google.golang.org/grpc/credentials/alts/internal" - "google.golang.org/grpc/credentials/alts/internal/authinfo" - "google.golang.org/grpc/credentials/alts/internal/conn" - altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" - altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" -) - -const ( - // The maximum byte size of receive frames. - frameLimit = 64 * 1024 // 64 KB - rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 -) - -var ( - hsProtocol = altspb.HandshakeProtocol_ALTS - appProtocols = []string{"grpc"} - recordProtocols = []string{rekeyRecordProtocolName} - keyLength = map[string]int{ - rekeyRecordProtocolName: 44, - } - altsRecordFuncs = map[string]conn.ALTSRecordFunc{ - // ALTS handshaker protocols. - rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { - return conn.NewAES128GCMRekey(s, keyData) - }, - } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) - // errDropped occurs when maxPendingHandshakes is reached. - errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") - // errOutOfBound occurs when the handshake service returns a consumed - // bytes value larger than the buffer that was passed to it originally. - errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") -) - -func init() { - for protocol, f := range altsRecordFuncs { - if err := conn.RegisterProtocol(protocol, f); err != nil { - panic(err) - } - } -} - -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - -// ClientHandshakerOptions contains the client handshaker options that can -// provided by the caller. -type ClientHandshakerOptions struct { - // ClientIdentity is the handshaker client local identity. - ClientIdentity *altspb.Identity - // TargetName is the server service account name for secure name - // checking. - TargetName string - // TargetServiceAccounts contains a list of expected target service - // accounts. One of these accounts should match one of the accounts in - // the handshaker results. Otherwise, the handshake fails. - TargetServiceAccounts []string - // RPCVersions specifies the gRPC versions accepted by the client. - RPCVersions *altspb.RpcProtocolVersions -} - -// ServerHandshakerOptions contains the server handshaker options that can -// provided by the caller. -type ServerHandshakerOptions struct { - // RPCVersions specifies the gRPC versions accepted by the server. - RPCVersions *altspb.RpcProtocolVersions -} - -// DefaultClientHandshakerOptions returns the default client handshaker options. -func DefaultClientHandshakerOptions() *ClientHandshakerOptions { - return &ClientHandshakerOptions{} -} - -// DefaultServerHandshakerOptions returns the default client handshaker options. -func DefaultServerHandshakerOptions() *ServerHandshakerOptions { - return &ServerHandshakerOptions{} -} - -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - -// altsHandshaker is used to complete an ALTS handshake between client and -// server. This handshaker talks to the ALTS handshaker service in the metadata -// server. -type altsHandshaker struct { - // RPC stream used to access the ALTS Handshaker service. - stream altsgrpc.HandshakerService_DoHandshakeClient - // the connection to the peer. - conn net.Conn - // a virtual connection to the ALTS handshaker service. - clientConn *grpc.ClientConn - // client handshake options. - clientOpts *ClientHandshakerOptions - // server handshake options. - serverOpts *ServerHandshakerOptions - // defines the side doing the handshake, client or server. - side core.Side -} - -// NewClientHandshaker creates a core.Handshaker that performs a client-side -// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker -// service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - return &altsHandshaker{ - stream: nil, - conn: c, - clientConn: conn, - clientOpts: opts, - side: core.ClientSide, - }, nil -} - -// NewServerHandshaker creates a core.Handshaker that performs a server-side -// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker -// service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - return &altsHandshaker{ - stream: nil, - conn: c, - clientConn: conn, - serverOpts: opts, - side: core.ServerSide, - }, nil -} - -// ClientHandshake starts and completes a client ALTS handshake for GCP. Once -// done, ClientHandshake returns a secure connection. -func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { - return nil, nil, errDropped - } - defer release() - - if h.side != core.ClientSide { - return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") - } - - // TODO(matthewstevenson88): Change unit tests to use public APIs so - // that h.stream can unconditionally be set based on h.clientConn. - if h.stream == nil { - stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) - if err != nil { - return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) - } - h.stream = stream - } - - // Create target identities from service account list. - targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) - for _, account := range h.clientOpts.TargetServiceAccounts { - targetIdentities = append(targetIdentities, &altspb.Identity{ - IdentityOneof: &altspb.Identity_ServiceAccount{ - ServiceAccount: account, - }, - }) - } - req := &altspb.HandshakerReq{ - ReqOneof: &altspb.HandshakerReq_ClientStart{ - ClientStart: &altspb.StartClientHandshakeReq{ - HandshakeSecurityProtocol: hsProtocol, - ApplicationProtocols: appProtocols, - RecordProtocols: recordProtocols, - TargetIdentities: targetIdentities, - LocalIdentity: h.clientOpts.ClientIdentity, - TargetName: h.clientOpts.TargetName, - RpcVersions: h.clientOpts.RPCVersions, - }, - }, - } - - conn, result, err := h.doHandshake(req) - if err != nil { - return nil, nil, err - } - authInfo := authinfo.New(result) - return conn, authInfo, nil -} - -// ServerHandshake starts and completes a server ALTS handshake for GCP. Once -// done, ServerHandshake returns a secure connection. -func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { - return nil, nil, errDropped - } - defer release() - - if h.side != core.ServerSide { - return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") - } - - // TODO(matthewstevenson88): Change unit tests to use public APIs so - // that h.stream can unconditionally be set based on h.clientConn. - if h.stream == nil { - stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) - if err != nil { - return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) - } - h.stream = stream - } - - p := make([]byte, frameLimit) - n, err := h.conn.Read(p) - if err != nil { - return nil, nil, err - } - - // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. - params := make(map[int32]*altspb.ServerHandshakeParameters) - params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ - RecordProtocols: recordProtocols, - } - req := &altspb.HandshakerReq{ - ReqOneof: &altspb.HandshakerReq_ServerStart{ - ServerStart: &altspb.StartServerHandshakeReq{ - ApplicationProtocols: appProtocols, - HandshakeParameters: params, - InBytes: p[:n], - RpcVersions: h.serverOpts.RPCVersions, - }, - }, - } - - conn, result, err := h.doHandshake(req) - if err != nil { - return nil, nil, err - } - authInfo := authinfo.New(result) - return conn, authInfo, nil -} - -func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { - resp, err := h.accessHandshakerService(req) - if err != nil { - return nil, nil, err - } - // Check of the returned status is an error. - if resp.GetStatus() != nil { - if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { - return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) - } - } - - var extra []byte - if req.GetServerStart() != nil { - if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { - return nil, nil, errOutOfBound - } - extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] - } - result, extra, err := h.processUntilDone(resp, extra) - if err != nil { - return nil, nil, err - } - // The handshaker returns a 128 bytes key. It should be truncated based - // on the returned record protocol. - keyLen, ok := keyLength[result.RecordProtocol] - if !ok { - return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) - } - sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) - if err != nil { - return nil, nil, err - } - return sc, result, nil -} - -func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { - if err := h.stream.Send(req); err != nil { - return nil, err - } - resp, err := h.stream.Recv() - if err != nil { - return nil, err - } - return resp, nil -} - -// processUntilDone processes the handshake until the handshaker service returns -// the results. Handshaker service takes care of frame parsing, so we read -// whatever received from the network and send it to the handshaker service. -func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { - for { - if len(resp.OutFrames) > 0 { - if _, err := h.conn.Write(resp.OutFrames); err != nil { - return nil, nil, err - } - } - if resp.Result != nil { - return resp.Result, extra, nil - } - buf := make([]byte, frameLimit) - n, err := h.conn.Read(buf) - if err != nil && err != io.EOF { - return nil, nil, err - } - // If there is nothing to send to the handshaker service, and - // nothing is received from the peer, then we are stuck. - // This covers the case when the peer is not responding. Note - // that handshaker service connection issues are caught in - // accessHandshakerService before we even get here. - if len(resp.OutFrames) == 0 && n == 0 { - return nil, nil, core.PeerNotRespondingError - } - // Append extra bytes from the previous interaction with the - // handshaker service with the current buffer read from conn. - p := append(extra, buf[:n]...) - // From here on, p and extra point to the same slice. - resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ - ReqOneof: &altspb.HandshakerReq_Next{ - Next: &altspb.NextHandshakeMessageReq{ - InBytes: p, - }, - }, - }) - if err != nil { - return nil, nil, err - } - // Set extra based on handshaker service response. - if resp.GetBytesConsumed() > uint32(len(p)) { - return nil, nil, errOutOfBound - } - extra = p[resp.GetBytesConsumed():] - } -} - -// Close terminates the Handshaker. It should be called when the caller obtains -// the secure connection. -func (h *altsHandshaker) Close() { - if h.stream != nil { - h.stream.CloseSend() - } -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go deleted file mode 100644 index e1cdafb980..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package service manages connections between the VM application and the ALTS -// handshaker service. -package service - -import ( - "sync" - - grpc "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -var ( - // mu guards hsConnMap and hsDialer. - mu sync.Mutex - // hsConn represents a mapping from a hypervisor handshaker service address - // to a corresponding connection to a hypervisor handshaker service - // instance. - hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial -) - -// Dial dials the handshake service in the hypervisor. If a connection has -// already been established, this function returns it. Otherwise, a new -// connection is created. -func Dial(hsAddress string) (*grpc.ClientConn, error) { - mu.Lock() - defer mu.Unlock() - - hsConn, ok := hsConnMap[hsAddress] - if !ok { - // Create a new connection to the handshaker service. Note that - // this connection stays open until the application is closed. - var err error - hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, err - } - hsConnMap[hsAddress] = hsConn - } - return hsConn, nil -} - -// CloseForTesting closes all open connections to the handshaker service. -// -// For testing purposes only. -func CloseForTesting() error { - for _, hsConn := range hsConnMap { - if hsConn == nil { - continue - } - if err := hsConn.Close(); err != nil { - return err - } - } - - // Reset the connection map. - hsConnMap = make(map[string]*grpc.ClientConn) - return nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go deleted file mode 100644 index 83e3bae37b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/altscontext.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 -// source: grpc/gcp/altscontext.proto - -package grpc_gcp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AltsContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this connection. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The record protocol negotiated for this connection. - RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` - // The security level of the created secure channel. - SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` - // The peer service account. - PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` - // The local service account. - LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` - // The RPC protocol versions supported by the peer. - PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` - // Additional attributes of the peer. - PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *AltsContext) Reset() { - *x = AltsContext{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AltsContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AltsContext) ProtoMessage() {} - -func (x *AltsContext) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AltsContext.ProtoReflect.Descriptor instead. -func (*AltsContext) Descriptor() ([]byte, []int) { - return file_grpc_gcp_altscontext_proto_rawDescGZIP(), []int{0} -} - -func (x *AltsContext) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *AltsContext) GetRecordProtocol() string { - if x != nil { - return x.RecordProtocol - } - return "" -} - -func (x *AltsContext) GetSecurityLevel() SecurityLevel { - if x != nil { - return x.SecurityLevel - } - return SecurityLevel_SECURITY_NONE -} - -func (x *AltsContext) GetPeerServiceAccount() string { - if x != nil { - return x.PeerServiceAccount - } - return "" -} - -func (x *AltsContext) GetLocalServiceAccount() string { - if x != nil { - return x.LocalServiceAccount - } - return "" -} - -func (x *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.PeerRpcVersions - } - return nil -} - -func (x *AltsContext) GetPeerAttributes() map[string]string { - if x != nil { - return x.PeerAttributes - } - return nil -} - -var File_grpc_gcp_altscontext_proto protoreflect.FileDescriptor - -var file_grpc_gcp_altscontext_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, - 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xf1, 0x03, 0x0a, 0x0b, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, - 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0e, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0d, 0x73, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x30, 0x0a, 0x14, - 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x65, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, - 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, - 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, - 0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x50, 0x65, - 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0x6c, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x10, 0x41, - 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, - 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_gcp_altscontext_proto_rawDescOnce sync.Once - file_grpc_gcp_altscontext_proto_rawDescData = file_grpc_gcp_altscontext_proto_rawDesc -) - -func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { - file_grpc_gcp_altscontext_proto_rawDescOnce.Do(func() { - file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_altscontext_proto_rawDescData) - }) - return file_grpc_gcp_altscontext_proto_rawDescData -} - -var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ - (*AltsContext)(nil), // 0: grpc.gcp.AltsContext - nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry - (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel - (*RpcProtocolVersions)(nil), // 3: grpc.gcp.RpcProtocolVersions -} -var file_grpc_gcp_altscontext_proto_depIdxs = []int32{ - 2, // 0: grpc.gcp.AltsContext.security_level:type_name -> grpc.gcp.SecurityLevel - 3, // 1: grpc.gcp.AltsContext.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 1, // 2: grpc.gcp.AltsContext.peer_attributes:type_name -> grpc.gcp.AltsContext.PeerAttributesEntry - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_grpc_gcp_altscontext_proto_init() } -func file_grpc_gcp_altscontext_proto_init() { - if File_grpc_gcp_altscontext_proto != nil { - return - } - file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AltsContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_gcp_altscontext_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_gcp_altscontext_proto_goTypes, - DependencyIndexes: file_grpc_gcp_altscontext_proto_depIdxs, - MessageInfos: file_grpc_gcp_altscontext_proto_msgTypes, - }.Build() - File_grpc_gcp_altscontext_proto = out.File - file_grpc_gcp_altscontext_proto_rawDesc = nil - file_grpc_gcp_altscontext_proto_goTypes = nil - file_grpc_gcp_altscontext_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go deleted file mode 100644 index 0b0093328b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ /dev/null @@ -1,1423 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 -// source: grpc/gcp/handshaker.proto - -package grpc_gcp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type HandshakeProtocol int32 - -const ( - // Default value. - HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 - // TLS handshake protocol. - HandshakeProtocol_TLS HandshakeProtocol = 1 - // Application Layer Transport Security handshake protocol. - HandshakeProtocol_ALTS HandshakeProtocol = 2 -) - -// Enum value maps for HandshakeProtocol. -var ( - HandshakeProtocol_name = map[int32]string{ - 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", - 1: "TLS", - 2: "ALTS", - } - HandshakeProtocol_value = map[string]int32{ - "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, - "TLS": 1, - "ALTS": 2, - } -) - -func (x HandshakeProtocol) Enum() *HandshakeProtocol { - p := new(HandshakeProtocol) - *p = x - return p -} - -func (x HandshakeProtocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HandshakeProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_gcp_handshaker_proto_enumTypes[0].Descriptor() -} - -func (HandshakeProtocol) Type() protoreflect.EnumType { - return &file_grpc_gcp_handshaker_proto_enumTypes[0] -} - -func (x HandshakeProtocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HandshakeProtocol.Descriptor instead. -func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} -} - -type NetworkProtocol int32 - -const ( - NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 - NetworkProtocol_TCP NetworkProtocol = 1 - NetworkProtocol_UDP NetworkProtocol = 2 -) - -// Enum value maps for NetworkProtocol. -var ( - NetworkProtocol_name = map[int32]string{ - 0: "NETWORK_PROTOCOL_UNSPECIFIED", - 1: "TCP", - 2: "UDP", - } - NetworkProtocol_value = map[string]int32{ - "NETWORK_PROTOCOL_UNSPECIFIED": 0, - "TCP": 1, - "UDP": 2, - } -) - -func (x NetworkProtocol) Enum() *NetworkProtocol { - p := new(NetworkProtocol) - *p = x - return p -} - -func (x NetworkProtocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NetworkProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_gcp_handshaker_proto_enumTypes[1].Descriptor() -} - -func (NetworkProtocol) Type() protoreflect.EnumType { - return &file_grpc_gcp_handshaker_proto_enumTypes[1] -} - -func (x NetworkProtocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NetworkProtocol.Descriptor instead. -func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} -} - -type Endpoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // IP address. It should contain an IPv4 or IPv6 string literal, e.g. - // "192.168.0.1" or "2001:db8::1". - IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` - // Port number. - Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - // Network protocol (e.g., TCP, UDP) associated with this endpoint. - Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` -} - -func (x *Endpoint) Reset() { - *x = Endpoint{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Endpoint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Endpoint) ProtoMessage() {} - -func (x *Endpoint) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. -func (*Endpoint) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} -} - -func (x *Endpoint) GetIpAddress() string { - if x != nil { - return x.IpAddress - } - return "" -} - -func (x *Endpoint) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *Endpoint) GetProtocol() NetworkProtocol { - if x != nil { - return x.Protocol - } - return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED -} - -type Identity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to IdentityOneof: - // - // *Identity_ServiceAccount - // *Identity_Hostname - IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` - // Additional attributes of the identity. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Identity) Reset() { - *x = Identity{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Identity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Identity) ProtoMessage() {} - -func (x *Identity) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Identity.ProtoReflect.Descriptor instead. -func (*Identity) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} -} - -func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { - if m != nil { - return m.IdentityOneof - } - return nil -} - -func (x *Identity) GetServiceAccount() string { - if x, ok := x.GetIdentityOneof().(*Identity_ServiceAccount); ok { - return x.ServiceAccount - } - return "" -} - -func (x *Identity) GetHostname() string { - if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { - return x.Hostname - } - return "" -} - -func (x *Identity) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -type isIdentity_IdentityOneof interface { - isIdentity_IdentityOneof() -} - -type Identity_ServiceAccount struct { - // Service account of a connection endpoint. - ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` -} - -type Identity_Hostname struct { - // Hostname of a connection endpoint. - Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` -} - -func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} - -func (*Identity_Hostname) isIdentity_IdentityOneof() {} - -type StartClientHandshakeReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Handshake security protocol requested by the client. - HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` - // The application protocols supported by the client, e.g., "h2" (for http2), - // "grpc". - ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // The record protocols supported by the client, e.g., - // "ALTSRP_GCM_AES128". - RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` - // (Optional) Describes which server identities are acceptable by the client. - // If target identities are provided and none of them matches the peer - // identity of the server, handshake will fail. - TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` - // (Optional) Application may specify a local identity. Otherwise, the - // handshaker chooses a default local identity. - LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // (Optional) Local endpoint information of the connection to the server, - // such as local IP address, port number, and network protocol. - LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` - // (Optional) Endpoint information of the remote server, such as IP address, - // port number, and network protocol. - RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` - // (Optional) If target name is provided, a secure naming check is performed - // to verify that the peer authenticated identity is indeed authorized to run - // the target name. - TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` - // (Optional) RPC protocol versions supported by the client. - RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` - // (Optional) Maximum frame size supported by the client. - MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` -} - -func (x *StartClientHandshakeReq) Reset() { - *x = StartClientHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartClientHandshakeReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartClientHandshakeReq) ProtoMessage() {} - -func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartClientHandshakeReq.ProtoReflect.Descriptor instead. -func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{2} -} - -func (x *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { - if x != nil { - return x.HandshakeSecurityProtocol - } - return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED -} - -func (x *StartClientHandshakeReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *StartClientHandshakeReq) GetRecordProtocols() []string { - if x != nil { - return x.RecordProtocols - } - return nil -} - -func (x *StartClientHandshakeReq) GetTargetIdentities() []*Identity { - if x != nil { - return x.TargetIdentities - } - return nil -} - -func (x *StartClientHandshakeReq) GetLocalIdentity() *Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { - if x != nil { - return x.LocalEndpoint - } - return nil -} - -func (x *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { - if x != nil { - return x.RemoteEndpoint - } - return nil -} - -func (x *StartClientHandshakeReq) GetTargetName() string { - if x != nil { - return x.TargetName - } - return "" -} - -func (x *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.RpcVersions - } - return nil -} - -func (x *StartClientHandshakeReq) GetMaxFrameSize() uint32 { - if x != nil { - return x.MaxFrameSize - } - return 0 -} - -type ServerHandshakeParameters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The record protocols supported by the server, e.g., - // "ALTSRP_GCM_AES128". - RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` - // (Optional) A list of local identities supported by the server, if - // specified. Otherwise, the handshaker chooses a default local identity. - LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` -} - -func (x *ServerHandshakeParameters) Reset() { - *x = ServerHandshakeParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerHandshakeParameters) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerHandshakeParameters) ProtoMessage() {} - -func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerHandshakeParameters.ProtoReflect.Descriptor instead. -func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{3} -} - -func (x *ServerHandshakeParameters) GetRecordProtocols() []string { - if x != nil { - return x.RecordProtocols - } - return nil -} - -func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity { - if x != nil { - return x.LocalIdentities - } - return nil -} - -type StartServerHandshakeReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocols supported by the server, e.g., "h2" (for http2), - // "grpc". - ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // Handshake parameters (record protocols and local identities supported by - // the server) mapped by the handshake protocol. Each handshake security - // protocol (e.g., TLS or ALTS) has its own set of record protocols and local - // identities. Since protobuf does not support enum as key to the map, the key - // to handshake_parameters is the integer value of HandshakeProtocol enum. - HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. - InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` - // (Optional) Local endpoint information of the connection to the client, - // such as local IP address, port number, and network protocol. - LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` - // (Optional) Endpoint information of the remote client, such as IP address, - // port number, and network protocol. - RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` - // (Optional) RPC protocol versions supported by the server. - RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` - // (Optional) Maximum frame size supported by the server. - MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` -} - -func (x *StartServerHandshakeReq) Reset() { - *x = StartServerHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartServerHandshakeReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartServerHandshakeReq) ProtoMessage() {} - -func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartServerHandshakeReq.ProtoReflect.Descriptor instead. -func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{4} -} - -func (x *StartServerHandshakeReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { - if x != nil { - return x.HandshakeParameters - } - return nil -} - -func (x *StartServerHandshakeReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -func (x *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { - if x != nil { - return x.LocalEndpoint - } - return nil -} - -func (x *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { - if x != nil { - return x.RemoteEndpoint - } - return nil -} - -func (x *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.RpcVersions - } - return nil -} - -func (x *StartServerHandshakeReq) GetMaxFrameSize() uint32 { - if x != nil { - return x.MaxFrameSize - } - return 0 -} - -type NextHandshakeMessageReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple NextHandshakerMessageReq - // messages. - InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *NextHandshakeMessageReq) Reset() { - *x = NextHandshakeMessageReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NextHandshakeMessageReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NextHandshakeMessageReq) ProtoMessage() {} - -func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NextHandshakeMessageReq.ProtoReflect.Descriptor instead. -func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{5} -} - -func (x *NextHandshakeMessageReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type HandshakerReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ReqOneof: - // - // *HandshakerReq_ClientStart - // *HandshakerReq_ServerStart - // *HandshakerReq_Next - ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` -} - -func (x *HandshakerReq) Reset() { - *x = HandshakerReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerReq) ProtoMessage() {} - -func (x *HandshakerReq) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerReq.ProtoReflect.Descriptor instead. -func (*HandshakerReq) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{6} -} - -func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { - if m != nil { - return m.ReqOneof - } - return nil -} - -func (x *HandshakerReq) GetClientStart() *StartClientHandshakeReq { - if x, ok := x.GetReqOneof().(*HandshakerReq_ClientStart); ok { - return x.ClientStart - } - return nil -} - -func (x *HandshakerReq) GetServerStart() *StartServerHandshakeReq { - if x, ok := x.GetReqOneof().(*HandshakerReq_ServerStart); ok { - return x.ServerStart - } - return nil -} - -func (x *HandshakerReq) GetNext() *NextHandshakeMessageReq { - if x, ok := x.GetReqOneof().(*HandshakerReq_Next); ok { - return x.Next - } - return nil -} - -type isHandshakerReq_ReqOneof interface { - isHandshakerReq_ReqOneof() -} - -type HandshakerReq_ClientStart struct { - // The start client handshake request message. - ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` -} - -type HandshakerReq_ServerStart struct { - // The start server handshake request message. - ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` -} - -type HandshakerReq_Next struct { - // The next handshake request message. - Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` -} - -func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} - -func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} - -func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} - -type HandshakerResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this connection. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The record protocol negotiated for this connection. - RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` - // Cryptographic key data. The key data may be more than the key length - // required for the record protocol, thus the client of the handshaker - // service needs to truncate the key data into the right key length. - KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` - // The authenticated identity of the peer. - PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` - // The local identity used in the handshake. - LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // Indicate whether the handshaker service client should keep the channel - // between the handshaker service open, e.g., in order to handle - // post-handshake messages in the future. - KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` - // The RPC protocol versions supported by the peer. - PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` - // The maximum frame size of the peer. - MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` -} - -func (x *HandshakerResult) Reset() { - *x = HandshakerResult{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerResult) ProtoMessage() {} - -func (x *HandshakerResult) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerResult.ProtoReflect.Descriptor instead. -func (*HandshakerResult) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{7} -} - -func (x *HandshakerResult) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *HandshakerResult) GetRecordProtocol() string { - if x != nil { - return x.RecordProtocol - } - return "" -} - -func (x *HandshakerResult) GetKeyData() []byte { - if x != nil { - return x.KeyData - } - return nil -} - -func (x *HandshakerResult) GetPeerIdentity() *Identity { - if x != nil { - return x.PeerIdentity - } - return nil -} - -func (x *HandshakerResult) GetLocalIdentity() *Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *HandshakerResult) GetKeepChannelOpen() bool { - if x != nil { - return x.KeepChannelOpen - } - return false -} - -func (x *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { - if x != nil { - return x.PeerRpcVersions - } - return nil -} - -func (x *HandshakerResult) GetMaxFrameSize() uint32 { - if x != nil { - return x.MaxFrameSize - } - return 0 -} - -type HandshakerStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code. This could be the gRPC status code. - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // The status details. - Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` -} - -func (x *HandshakerStatus) Reset() { - *x = HandshakerStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerStatus) ProtoMessage() {} - -func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerStatus.ProtoReflect.Descriptor instead. -func (*HandshakerStatus) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{8} -} - -func (x *HandshakerStatus) GetCode() uint32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *HandshakerStatus) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -type HandshakerResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Frames to be given to the peer for the NextHandshakeMessageReq. May be - // empty if no out_frames have to be sent to the peer or if in_bytes in the - // HandshakerReq are incomplete. All the non-empty out frames must be sent to - // the peer even if the handshaker status is not OK as these frames may - // contain the alert frames. - OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` - // Number of bytes in the in_bytes consumed by the handshaker. It is possible - // that part of in_bytes in HandshakerReq was unrelated to the handshake - // process. - BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` - // This is set iff the handshake was successful. out_frames may still be set - // to frames that needs to be forwarded to the peer. - Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` - // Status of the handshaker. - Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *HandshakerResp) Reset() { - *x = HandshakerResp{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HandshakerResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HandshakerResp) ProtoMessage() {} - -func (x *HandshakerResp) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HandshakerResp.ProtoReflect.Descriptor instead. -func (*HandshakerResp) Descriptor() ([]byte, []int) { - return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{9} -} - -func (x *HandshakerResp) GetOutFrames() []byte { - if x != nil { - return x.OutFrames - } - return nil -} - -func (x *HandshakerResp) GetBytesConsumed() uint32 { - if x != nil { - return x.BytesConsumed - } - return 0 -} - -func (x *HandshakerResp) GetResult() *HandshakerResult { - if x != nil { - return x.Result - } - return nil -} - -func (x *HandshakerResp) GetStatus() *HandshakerStatus { - if x != nil { - return x.Status - } - return nil -} - -var File_grpc_gcp_handshaker_proto protoreflect.FileDescriptor - -var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x74, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, - 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x35, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x12, 0x29, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, - 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, - 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xd3, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, - 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x19, - 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x29, - 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x11, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x40, - 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, - 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa5, - 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, - 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, - 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, - 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, - 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, - 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, - 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, - 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, - 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, - 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, - 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, - 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, - 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, - 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, - 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, - 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, - 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, - 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_gcp_handshaker_proto_rawDescOnce sync.Once - file_grpc_gcp_handshaker_proto_rawDescData = file_grpc_gcp_handshaker_proto_rawDesc -) - -func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { - file_grpc_gcp_handshaker_proto_rawDescOnce.Do(func() { - file_grpc_gcp_handshaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_handshaker_proto_rawDescData) - }) - return file_grpc_gcp_handshaker_proto_rawDescData -} - -var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ - (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol - (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol - (*Endpoint)(nil), // 2: grpc.gcp.Endpoint - (*Identity)(nil), // 3: grpc.gcp.Identity - (*StartClientHandshakeReq)(nil), // 4: grpc.gcp.StartClientHandshakeReq - (*ServerHandshakeParameters)(nil), // 5: grpc.gcp.ServerHandshakeParameters - (*StartServerHandshakeReq)(nil), // 6: grpc.gcp.StartServerHandshakeReq - (*NextHandshakeMessageReq)(nil), // 7: grpc.gcp.NextHandshakeMessageReq - (*HandshakerReq)(nil), // 8: grpc.gcp.HandshakerReq - (*HandshakerResult)(nil), // 9: grpc.gcp.HandshakerResult - (*HandshakerStatus)(nil), // 10: grpc.gcp.HandshakerStatus - (*HandshakerResp)(nil), // 11: grpc.gcp.HandshakerResp - nil, // 12: grpc.gcp.Identity.AttributesEntry - nil, // 13: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry - (*RpcProtocolVersions)(nil), // 14: grpc.gcp.RpcProtocolVersions -} -var file_grpc_gcp_handshaker_proto_depIdxs = []int32{ - 1, // 0: grpc.gcp.Endpoint.protocol:type_name -> grpc.gcp.NetworkProtocol - 12, // 1: grpc.gcp.Identity.attributes:type_name -> grpc.gcp.Identity.AttributesEntry - 0, // 2: grpc.gcp.StartClientHandshakeReq.handshake_security_protocol:type_name -> grpc.gcp.HandshakeProtocol - 3, // 3: grpc.gcp.StartClientHandshakeReq.target_identities:type_name -> grpc.gcp.Identity - 3, // 4: grpc.gcp.StartClientHandshakeReq.local_identity:type_name -> grpc.gcp.Identity - 2, // 5: grpc.gcp.StartClientHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint - 2, // 6: grpc.gcp.StartClientHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint - 14, // 7: grpc.gcp.StartClientHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 3, // 8: grpc.gcp.ServerHandshakeParameters.local_identities:type_name -> grpc.gcp.Identity - 13, // 9: grpc.gcp.StartServerHandshakeReq.handshake_parameters:type_name -> grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry - 2, // 10: grpc.gcp.StartServerHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint - 2, // 11: grpc.gcp.StartServerHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint - 14, // 12: grpc.gcp.StartServerHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 4, // 13: grpc.gcp.HandshakerReq.client_start:type_name -> grpc.gcp.StartClientHandshakeReq - 6, // 14: grpc.gcp.HandshakerReq.server_start:type_name -> grpc.gcp.StartServerHandshakeReq - 7, // 15: grpc.gcp.HandshakerReq.next:type_name -> grpc.gcp.NextHandshakeMessageReq - 3, // 16: grpc.gcp.HandshakerResult.peer_identity:type_name -> grpc.gcp.Identity - 3, // 17: grpc.gcp.HandshakerResult.local_identity:type_name -> grpc.gcp.Identity - 14, // 18: grpc.gcp.HandshakerResult.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions - 9, // 19: grpc.gcp.HandshakerResp.result:type_name -> grpc.gcp.HandshakerResult - 10, // 20: grpc.gcp.HandshakerResp.status:type_name -> grpc.gcp.HandshakerStatus - 5, // 21: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry.value:type_name -> grpc.gcp.ServerHandshakeParameters - 8, // 22: grpc.gcp.HandshakerService.DoHandshake:input_type -> grpc.gcp.HandshakerReq - 11, // 23: grpc.gcp.HandshakerService.DoHandshake:output_type -> grpc.gcp.HandshakerResp - 23, // [23:24] is the sub-list for method output_type - 22, // [22:23] is the sub-list for method input_type - 22, // [22:22] is the sub-list for extension type_name - 22, // [22:22] is the sub-list for extension extendee - 0, // [0:22] is the sub-list for field type_name -} - -func init() { file_grpc_gcp_handshaker_proto_init() } -func file_grpc_gcp_handshaker_proto_init() { - if File_grpc_gcp_handshaker_proto != nil { - return - } - file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Endpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartClientHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerHandshakeParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartServerHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NextHandshakeMessageReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Identity_ServiceAccount)(nil), - (*Identity_Hostname)(nil), - } - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*HandshakerReq_ClientStart)(nil), - (*HandshakerReq_ServerStart)(nil), - (*HandshakerReq_Next)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_gcp_handshaker_proto_rawDesc, - NumEnums: 2, - NumMessages: 12, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_gcp_handshaker_proto_goTypes, - DependencyIndexes: file_grpc_gcp_handshaker_proto_depIdxs, - EnumInfos: file_grpc_gcp_handshaker_proto_enumTypes, - MessageInfos: file_grpc_gcp_handshaker_proto_msgTypes, - }.Build() - File_grpc_gcp_handshaker_proto = out.File - file_grpc_gcp_handshaker_proto_rawDesc = nil - file_grpc_gcp_handshaker_proto_goTypes = nil - file_grpc_gcp_handshaker_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go deleted file mode 100644 index 39ecccf878..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 -// source: grpc/gcp/handshaker.proto - -package grpc_gcp - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" -) - -// HandshakerServiceClient is the client API for HandshakerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type HandshakerServiceClient interface { - // Handshaker service accepts a stream of handshaker request, returning a - // stream of handshaker response. Client is expected to send exactly one - // message with either client_start or server_start followed by one or more - // messages with next. Each time client sends a request, the handshaker - // service expects to respond. Client does not have to wait for service's - // response before sending next request. - DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) -} - -type handshakerServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { - return &handshakerServiceClient{cc} -} - -func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &handshakerServiceDoHandshakeClient{stream} - return x, nil -} - -type HandshakerService_DoHandshakeClient interface { - Send(*HandshakerReq) error - Recv() (*HandshakerResp, error) - grpc.ClientStream -} - -type handshakerServiceDoHandshakeClient struct { - grpc.ClientStream -} - -func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { - m := new(HandshakerResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HandshakerServiceServer is the server API for HandshakerService service. -// All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility -type HandshakerServiceServer interface { - // Handshaker service accepts a stream of handshaker request, returning a - // stream of handshaker response. Client is expected to send exactly one - // message with either client_start or server_start followed by one or more - // messages with next. Each time client sends a request, the handshaker - // service expects to respond. Client does not have to wait for service's - // response before sending next request. - DoHandshake(HandshakerService_DoHandshakeServer) error - mustEmbedUnimplementedHandshakerServiceServer() -} - -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} - -func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error { - return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") -} -func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} - -// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to HandshakerServiceServer will -// result in compilation errors. -type UnsafeHandshakerServiceServer interface { - mustEmbedUnimplementedHandshakerServiceServer() -} - -func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { - s.RegisterService(&HandshakerService_ServiceDesc, srv) -} - -func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) -} - -type HandshakerService_DoHandshakeServer interface { - Send(*HandshakerResp) error - Recv() (*HandshakerReq, error) - grpc.ServerStream -} - -type handshakerServiceDoHandshakeServer struct { - grpc.ServerStream -} - -func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { - m := new(HandshakerReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var HandshakerService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.gcp.HandshakerService", - HandlerType: (*HandshakerServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DoHandshake", - Handler: _HandshakerService_DoHandshake_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/gcp/handshaker.proto", -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go deleted file mode 100644 index c2e564c7de..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2018 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/transport_security_common.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 -// source: grpc/gcp/transport_security_common.proto - -package grpc_gcp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The security level of the created channel. The list is sorted in increasing -// level of security. This order must always be maintained. -type SecurityLevel int32 - -const ( - SecurityLevel_SECURITY_NONE SecurityLevel = 0 - SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 - SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 -) - -// Enum value maps for SecurityLevel. -var ( - SecurityLevel_name = map[int32]string{ - 0: "SECURITY_NONE", - 1: "INTEGRITY_ONLY", - 2: "INTEGRITY_AND_PRIVACY", - } - SecurityLevel_value = map[string]int32{ - "SECURITY_NONE": 0, - "INTEGRITY_ONLY": 1, - "INTEGRITY_AND_PRIVACY": 2, - } -) - -func (x SecurityLevel) Enum() *SecurityLevel { - p := new(SecurityLevel) - *p = x - return p -} - -func (x SecurityLevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SecurityLevel) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_gcp_transport_security_common_proto_enumTypes[0].Descriptor() -} - -func (SecurityLevel) Type() protoreflect.EnumType { - return &file_grpc_gcp_transport_security_common_proto_enumTypes[0] -} - -func (x SecurityLevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SecurityLevel.Descriptor instead. -func (SecurityLevel) EnumDescriptor() ([]byte, []int) { - return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} -} - -// Max and min supported RPC protocol versions. -type RpcProtocolVersions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Maximum supported RPC version. - MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` - // Minimum supported RPC version. - MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` -} - -func (x *RpcProtocolVersions) Reset() { - *x = RpcProtocolVersions{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RpcProtocolVersions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RpcProtocolVersions) ProtoMessage() {} - -func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RpcProtocolVersions.ProtoReflect.Descriptor instead. -func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { - return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} -} - -func (x *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { - if x != nil { - return x.MaxRpcVersion - } - return nil -} - -func (x *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { - if x != nil { - return x.MinRpcVersion - } - return nil -} - -// RPC version contains a major version and a minor version. -type RpcProtocolVersions_Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` -} - -func (x *RpcProtocolVersions_Version) Reset() { - *x = RpcProtocolVersions_Version{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RpcProtocolVersions_Version) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RpcProtocolVersions_Version) ProtoMessage() {} - -func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RpcProtocolVersions_Version.ProtoReflect.Descriptor instead. -func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { - return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *RpcProtocolVersions_Version) GetMajor() uint32 { - if x != nil { - return x.Major - } - return 0 -} - -func (x *RpcProtocolVersions_Version) GetMinor() uint32 { - if x != nil { - return x.Minor - } - return 0 -} - -var File_grpc_gcp_transport_security_common_proto protoreflect.FileDescriptor - -var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, - 0x78, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, - 0x69, 0x6e, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, - 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, - 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, - 0x72, 0x2a, 0x51, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x49, - 0x54, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, - 0x45, 0x47, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x49, 0x56, 0x41, - 0x43, 0x59, 0x10, 0x02, 0x42, 0x78, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x1c, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_gcp_transport_security_common_proto_rawDescOnce sync.Once - file_grpc_gcp_transport_security_common_proto_rawDescData = file_grpc_gcp_transport_security_common_proto_rawDesc -) - -func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { - file_grpc_gcp_transport_security_common_proto_rawDescOnce.Do(func() { - file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_transport_security_common_proto_rawDescData) - }) - return file_grpc_gcp_transport_security_common_proto_rawDescData -} - -var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ - (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel - (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions - (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version -} -var file_grpc_gcp_transport_security_common_proto_depIdxs = []int32{ - 2, // 0: grpc.gcp.RpcProtocolVersions.max_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version - 2, // 1: grpc.gcp.RpcProtocolVersions.min_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_grpc_gcp_transport_security_common_proto_init() } -func file_grpc_gcp_transport_security_common_proto_init() { - if File_grpc_gcp_transport_security_common_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RpcProtocolVersions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RpcProtocolVersions_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_gcp_transport_security_common_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_gcp_transport_security_common_proto_goTypes, - DependencyIndexes: file_grpc_gcp_transport_security_common_proto_depIdxs, - EnumInfos: file_grpc_gcp_transport_security_common_proto_enumTypes, - MessageInfos: file_grpc_gcp_transport_security_common_proto_msgTypes, - }.Build() - File_grpc_gcp_transport_security_common_proto = out.File - file_grpc_gcp_transport_security_common_proto_rawDesc = nil - file_grpc_gcp_transport_security_common_proto_goTypes = nil - file_grpc_gcp_transport_security_common_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go deleted file mode 100644 index cbfd056cfb..0000000000 --- a/vendor/google.golang.org/grpc/credentials/alts/utils.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package alts - -import ( - "context" - "errors" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" -) - -// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, -// if it exists. This API should be used by gRPC server RPC handlers to get -// information about the communicating peer. For client-side, use grpc.Peer() -// CallOption. -func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { - p, ok := peer.FromContext(ctx) - if !ok { - return nil, errors.New("no Peer found in Context") - } - return AuthInfoFromPeer(p) -} - -// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it -// exists. This API should be used by gRPC clients after obtaining a peer object -// using the grpc.Peer() CallOption. -func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { - altsAuthInfo, ok := p.AuthInfo.(AuthInfo) - if !ok { - return nil, errors.New("no alts.AuthInfo found in Peer") - } - return altsAuthInfo, nil -} - -// ClientAuthorizationCheck checks whether the client is authorized to access -// the requested resources based on the given expected client service accounts. -// This API should be used by gRPC server RPC handlers. This API should not be -// used by clients. -func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { - authInfo, err := AuthInfoFromContext(ctx) - if err != nil { - return status.Errorf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err) - } - peer := authInfo.PeerServiceAccount() - for _, sa := range expectedServiceAccounts { - if strings.EqualFold(peer, sa) { - return nil - } - } - return status.Errorf(codes.PermissionDenied, "Client %v is not authorized", peer) -} diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go deleted file mode 100644 index fbdf7dc299..0000000000 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package google defines credentials for google cloud services. -package google - -import ( - "context" - "fmt" - "time" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/alts" - "google.golang.org/grpc/credentials/oauth" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" -) - -const tokenRequestTimeout = 30 * time.Second - -var logger = grpclog.Component("credentials") - -// DefaultCredentialsOptions constructs options to build DefaultCredentials. -type DefaultCredentialsOptions struct { - // PerRPCCreds is a per RPC credentials that is passed to a bundle. - PerRPCCreds credentials.PerRPCCredentials -} - -// NewDefaultCredentialsWithOptions returns a credentials bundle that is -// configured to work with google services. -// -// This API is experimental. -func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { - if opts.PerRPCCreds == nil { - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() - var err error - opts.PerRPCCreds, err = newADC(ctx) - if err != nil { - logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) - } - } - c := &creds{opts: opts} - bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) - if err != nil { - logger.Warningf("NewDefaultCredentialsWithOptions: failed to create new creds: %v", err) - } - return bundle -} - -// NewDefaultCredentials returns a credentials bundle that is configured to work -// with google services. -// -// This API is experimental. -func NewDefaultCredentials() credentials.Bundle { - return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}) -} - -// NewComputeEngineCredentials returns a credentials bundle that is configured to work -// with google services. This API must only be used when running on GCE. Authentication configured -// by this API represents the GCE VM's default service account. -// -// This API is experimental. -func NewComputeEngineCredentials() credentials.Bundle { - return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{ - PerRPCCreds: oauth.NewComputeEngine(), - }) -} - -// creds implements credentials.Bundle. -type creds struct { - opts DefaultCredentialsOptions - - // Supported modes are defined in internal/internal.go. - mode string - // The active transport credentials associated with this bundle. - transportCreds credentials.TransportCredentials - // The active per RPC credentials associated with this bundle. - perRPCCreds credentials.PerRPCCredentials -} - -func (c *creds) TransportCredentials() credentials.TransportCredentials { - return c.transportCreds -} - -func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { - if c == nil { - return nil - } - return c.perRPCCreds -} - -var ( - newTLS = func() credentials.TransportCredentials { - return credentials.NewTLS(nil) - } - newALTS = func() credentials.TransportCredentials { - return alts.NewClientCreds(alts.DefaultClientOptions()) - } - newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { - return oauth.NewApplicationDefault(ctx) - } -) - -// NewWithMode should make a copy of Bundle, and switch mode. Modifying the -// existing Bundle may cause races. -func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { - newCreds := &creds{ - opts: c.opts, - mode: mode, - } - - // Create transport credentials. - switch mode { - case internal.CredsBundleModeFallback: - newCreds.transportCreds = newClusterTransportCreds(newTLS(), newALTS()) - case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: - // Only the clients can use google default credentials, so we only need - // to create new ALTS client creds here. - newCreds.transportCreds = newALTS() - default: - return nil, fmt.Errorf("unsupported mode: %v", mode) - } - - if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { - newCreds.perRPCCreds = newCreds.opts.PerRPCCreds - } - - return newCreds, nil -} diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go deleted file mode 100644 index 2c5c8b9eee..0000000000 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package google - -import ( - "context" - "net" - "net/url" - "strings" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" -) - -const cfeClusterNamePrefix = "google_cfe_" -const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" -const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" - -// clusterTransportCreds is a combo of TLS + ALTS. -// -// On the client, ClientHandshake picks TLS or ALTS based on address attributes. -// - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", or -// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", -// use TLS -// - otherwise, use ALTS -// -// - else, do TLS -// -// On the server, ServerHandshake always does TLS. -type clusterTransportCreds struct { - tls credentials.TransportCredentials - alts credentials.TransportCredentials -} - -func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clusterTransportCreds { - return &clusterTransportCreds{ - tls: tls, - alts: alts, - } -} - -// clusterName returns the xDS cluster name stored in the attributes in the -// context. -func clusterName(ctx context.Context) string { - chi := credentials.ClientHandshakeInfoFromContext(ctx) - if chi.Attributes == nil { - return "" - } - cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) - return cluster -} - -// isDirectPathCluster returns true if the cluster in the context is a -// directpath cluster, meaning ALTS should be used. -func isDirectPathCluster(ctx context.Context) bool { - cluster := clusterName(ctx) - if cluster == "" { - // No cluster; not xDS; use TLS. - return false - } - if strings.HasPrefix(cluster, cfeClusterNamePrefix) { - // xDS cluster prefixed by "google_cfe_"; use TLS. - return false - } - if !strings.HasPrefix(cluster, "xdstp:") { - // Other xDS cluster name; use ALTS. - return true - } - u, err := url.Parse(cluster) - if err != nil { - // Shouldn't happen, but assume ALTS. - return true - } - // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. - return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) -} - -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if isDirectPathCluster(ctx) { - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) - } - return c.tls.ClientHandshake(ctx, authority, rawConn) -} - -func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { - return c.tls.ServerHandshake(conn) -} - -func (c *clusterTransportCreds) Info() credentials.ProtocolInfo { - // TODO: this always returns tls.Info now, because we don't have a cluster - // name to check when this method is called. This method doesn't affect - // anything important now. We may want to revisit this if it becomes more - // important later. - return c.tls.Info() -} - -func (c *clusterTransportCreds) Clone() credentials.TransportCredentials { - return &clusterTransportCreds{ - tls: c.tls.Clone(), - alts: c.alts.Clone(), - } -} - -func (c *clusterTransportCreds) OverrideServerName(s string) error { - if err := c.tls.OverrideServerName(s); err != nil { - return err - } - return c.alts.OverrideServerName(s) -} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go deleted file mode 100644 index d475cbc089..0000000000 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ /dev/null @@ -1,244 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package oauth implements gRPC credentials using OAuth. -package oauth - -import ( - "context" - "fmt" - "net/url" - "os" - "sync" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "google.golang.org/grpc/credentials" -) - -// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. -type TokenSource struct { - oauth2.TokenSource -} - -// GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - token, err := ts.Token() - if err != nil { - return nil, err - } - ri, _ := credentials.RequestInfoFromContext(ctx) - if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": token.Type() + " " + token.AccessToken, - }, nil -} - -// RequireTransportSecurity indicates whether the credentials requires transport security. -func (ts TokenSource) RequireTransportSecurity() bool { - return true -} - -// removeServiceNameFromJWTURI removes RPC service name from URI. -func removeServiceNameFromJWTURI(uri string) (string, error) { - parsed, err := url.Parse(uri) - if err != nil { - return "", err - } - parsed.Path = "/" - return parsed.String(), nil -} - -type jwtAccess struct { - jsonKey []byte -} - -// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. -func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { - jsonKey, err := os.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewJWTAccessFromKey(jsonKey) -} - -// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. -func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { - return jwtAccess{jsonKey}, nil -} - -func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - // Remove RPC service name from URI that will be used as audience - // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. - aud, err := removeServiceNameFromJWTURI(uri[0]) - if err != nil { - return nil, err - } - // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with - // uri as the key, to avoid recreating for every RPC. - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) - if err != nil { - return nil, err - } - token, err := ts.Token() - if err != nil { - return nil, err - } - ri, _ := credentials.RequestInfoFromContext(ctx) - if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": token.Type() + " " + token.AccessToken, - }, nil -} - -func (j jwtAccess) RequireTransportSecurity() bool { - return true -} - -// oauthAccess supplies PerRPCCredentials from a given token. -type oauthAccess struct { - token oauth2.Token -} - -// NewOauthAccess constructs the PerRPCCredentials using a given token. -// -// Deprecated: use oauth.TokenSource instead. -func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { - return oauthAccess{token: *token} -} - -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - ri, _ := credentials.RequestInfoFromContext(ctx) - if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": oa.token.Type() + " " + oa.token.AccessToken, - }, nil -} - -func (oa oauthAccess) RequireTransportSecurity() bool { - return true -} - -// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from -// Google Compute Engine (GCE)'s metadata server. It is only valid to use this -// if your program is running on a GCE instance. -// TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() credentials.PerRPCCredentials { - return TokenSource{google.ComputeTokenSource("")} -} - -// serviceAccount represents PerRPCCredentials via JWT signing key. -type serviceAccount struct { - mu sync.Mutex - config *jwt.Config - t *oauth2.Token -} - -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - s.mu.Lock() - defer s.mu.Unlock() - if !s.t.Valid() { - var err error - s.t, err = s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err - } - } - ri, _ := credentials.RequestInfoFromContext(ctx) - if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { - return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) - } - return map[string]string{ - "authorization": s.t.Type() + " " + s.t.AccessToken, - }, nil -} - -func (s *serviceAccount) RequireTransportSecurity() bool { - return true -} - -// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice -// from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { - config, err := google.JWTConfigFromJSON(jsonKey, scope...) - if err != nil { - return nil, err - } - return &serviceAccount{config: config}, nil -} - -// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file -// of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { - jsonKey, err := os.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewServiceAccountFromKey(jsonKey, scope...) -} - -// NewApplicationDefault returns "Application Default Credentials". For more -// detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { - creds, err := google.FindDefaultCredentials(ctx, scope...) - if err != nil { - return nil, err - } - - // If JSON is nil, the authentication is provided by the environment and not - // with a credentials file, e.g. when code is running on Google Cloud - // Platform. Use the returned token source. - if creds.JSON == nil { - return TokenSource{creds.TokenSource}, nil - } - - // If auth is provided by env variable or creds file, the behavior will be - // different based on whether scope is set. Because the returned - // creds.TokenSource does oauth with jwt by default, and it requires scope. - // We can only use it if scope is not empty, otherwise it will fail with - // missing scope error. - // - // If scope is set, use it, it should just work. - // - // If scope is not set, we try to use jwt directly without oauth (this only - // works if it's a service account). - - if len(scope) != 0 { - return TokenSource{creds.TokenSource}, nil - } - - // Try to convert JSON to a jwt config without setting the optional scope - // parameter to check if it's a service account (the function errors if it's - // not). This is necessary because the returned config doesn't show the type - // of the account. - if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil { - // If this fails, it's not a service account, return the original - // TokenSource from above. - return TokenSource{creds.TokenSource}, nil - } - - // If it's a service account, create a JWT only access with the key. - return NewJWTAccessFromKey(creds.JSON) -} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21a..5dafd34edf 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 15a3d5102a..ba24261804 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -78,6 +78,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -235,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -398,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -472,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -622,12 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -664,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -676,3 +693,26 @@ func WithIdleTimeout(d time.Duration) DialOption { o.idleTimeout = d }) } + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a5861352..5ebf88d714 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35afe..0ee3d3bae9 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2a..ac73c9ced2 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34b..16928c9cb9 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822b..b1674d8267 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40d3..ecfd36d713 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go index b5bee48380..740745c45f 100644 --- a/vendor/google.golang.org/grpc/health/client.go +++ b/vendor/google.golang.org/grpc/health/client.go @@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 142d35f753..24299efd63 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d54..4439cda0f3 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -44,8 +44,15 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go deleted file mode 100644 index dc3dc72f6b..0000000000 --- a/vendor/google.golang.org/grpc/idle.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" -) - -// For overriding in unit tests. -var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { - return time.AfterFunc(d, f) -} - -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error -} - -// idlenessManager defines the functionality required to track RPC activity on a -// channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() -} - -type noopIdlenessManager struct{} - -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} - -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { - // State accessed atomically. - lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. - activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. - activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. - closed int32 // Boolean; True when the manager is closed. - - // Can be accessed without atomics or mutex since these are set at creation - // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - - // idleMu is used to guarantee mutual exclusion in two scenarios: - // - Opposing intentions: - // - a: Idle timeout has fired and handleIdleTimeout() is trying to put - // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() - // is trying to prevent the channel from going idle. - // - Competing intentions: - // - The channel is in idle mode and there are multiple RPCs starting at - // the same time, all trying to move the channel out of idle. Only one - // of them should succeed in doing so, while the other RPCs should - // piggyback on the first one and be successfully handled. - idleMu sync.RWMutex - actuallyIdle bool - timer *time.Timer -} - -// newIdlenessManager creates a new idleness manager implementation for the -// given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} - } - - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), - } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i -} - -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if i.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - i.timer.Reset(d) -} - -// handleIdleTimeout is the timer callback that is invoked upon expiry of the -// configured idle timeout. The channel is considered inactive if there are no -// ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { - return - } - - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // There has been activity on the channel since we last got here. Reset the - // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // Set the timer to fire after a duration of idle timeout, calculated - // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) - return - } - - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. - if i.tryEnterIdleMode() { - // Successfully entered idle mode. No timer needed until we exit idle. - return - } - - // Failed to enter idle mode due to a concurrent RPC that kept the channel - // active, or because of an error from the channel. Undo the attempt to - // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) -} - -// tryEnterIdleMode instructs the channel to enter idle mode. But before -// that, it performs a last minute check to ensure that no new RPC has come in, -// making the channel active. -// -// Return value indicates whether or not the channel moved to idle mode. -// -// Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { - // We raced and lost to a new RPC. Very rare, but stop entering idle. - return false - } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we - // checked for calls count and activity in handleIdleTimeout(), but - // before the CAS operation. So, we need to check for activity again. - return false - } - - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. - i.actuallyIdle = true - return true -} - -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { - return nil - } - - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { - // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil - } - - // Channel is either in idle mode or is in the process of moving to idle - // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) - return err - } - - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil -} - -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if !i.actuallyIdle { - // This can happen in two scenarios: - // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called - // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. - // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. - // - // Either way, nothing to do here. - return nil - } - - if err := i.enforcer.exitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) - } - - // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false - - // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) - return nil -} - -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { - return - } - - // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) - - // Decrement the active calls count. This count can temporarily go negative - // when the timer callback is in the process of moving the channel to idle - // mode, but one or more RPCs come in and complete before the timer callback - // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) -} - -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 -} - -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) - - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57be..877d78fc3d 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da5..fed1c011a3 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62a7..3c594e6e4e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d9665..94a08d6875 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f632215..0f31274a3c 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd76..11f91668ac 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -28,49 +31,50 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool + closing bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. -func (b *Unbounded) Get() <-chan interface{} { +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd792..fc094f3441 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,15 +24,14 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -40,8 +39,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +54,20 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +105,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +164,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +200,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +222,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +248,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +268,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +747,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2ce..f89e6f77bb 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e2e..1d4020f537 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e1..98288c3f86 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc4024..b5568b22e2 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b59033..9deee7f651 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 80fd5c7d2a..685a3cb41b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,10 +36,13 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c1..29f234acb1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 0000000000..7f7044e173 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go deleted file mode 100644 index 6717b757f8..0000000000 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package googlecloud contains internal helpful functions for google cloud. -package googlecloud - -import ( - "runtime" - "strings" - "sync" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -const logPrefix = "[googlecloud]" - -var ( - vmOnGCEOnce sync.Once - vmOnGCE bool - - logger = internalgrpclog.NewPrefixLogger(grpclog.Component("googlecloud"), logPrefix) -) - -// OnGCE returns whether the client is running on GCE. -// -// It provides similar functionality as metadata.OnGCE from the cloud library -// package. We keep this to avoid depending on the cloud library module. -func OnGCE() bool { - vmOnGCEOnce.Do(func() { - mf, err := manufacturer() - if err != nil { - logger.Infof("failed to read manufacturer, setting onGCE=false: %v") - return - } - vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) - }) - return vmOnGCE -} - -// isRunningOnGCE checks whether the local system, without doing a network request, is -// running on GCP. -func isRunningOnGCE(manufacturer []byte, goos string) bool { - name := string(manufacturer) - switch goos { - case "linux": - name = strings.TrimSpace(name) - return name == "Google" || name == "Google Compute Engine" - case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) - return name == "Google" - default: - return false - } -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go deleted file mode 100644 index ffa0f1ddee..0000000000 --- a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !(linux || windows) -// +build !linux,!windows - -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package googlecloud - -func manufacturer() ([]byte, error) { - return nil, nil -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go deleted file mode 100644 index 6e455fb0a8..0000000000 --- a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package googlecloud - -import "os" - -const linuxProductNameFile = "/sys/class/dmi/id/product_name" - -func manufacturer() ([]byte, error) { - return os.ReadFile(linuxProductNameFile) -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go deleted file mode 100644 index 2d7aaaaa70..0000000000 --- a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package googlecloud - -import ( - "errors" - "os/exec" - "regexp" - "strings" -) - -const ( - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" -) - -func manufacturer() ([]byte, error) { - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return []byte(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") -} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a364..bfc45102ab 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42ca..faa998de76 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index d08e3e9076..aa97273e7d 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -80,6 +80,13 @@ func Uint32() uint32 { return r.Uint32() } +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + // Shuffle implements rand.Shuffle on the grpcrand global source. var Shuffle = func(n int, f func(int, int)) { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d4117e..f7f40a16ac 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -32,14 +31,12 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -48,12 +45,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +61,40 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() - - if t.closed { - return false - } - t.callbacks.Put(f) - return true +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + return cs.callbacks.Put(f) == nil } -func (t *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) - defer close(t.Done) + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): - if !ok { - return - } - t.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() - for _, b := range backlog { - b(ctx) + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-t.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() - default: - return backlog - } - } +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 0000000000..aef8cec1ab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 0000000000..fe49cb74c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c844..6c7ea6a533 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,33 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +92,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +108,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +170,32 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +206,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e07..900bfb7160 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b51..7033191375 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948a..f0603871c9 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f33c..b66dcb2132 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,53 +47,37 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +87,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -114,7 +98,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -140,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -159,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -176,23 +151,26 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -218,28 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -278,7 +255,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +285,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +332,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +347,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -377,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -385,12 +366,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. - return "", "", errEndsWithColon + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 0000000000..c7fc557d00 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 1609116877..27cd81af9e 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f54f..03ef2fedd5 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 0000000000..4f347edd42 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 0000000000..078137b7fd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 0000000000..fd7d43a890 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81eb..83c3829826 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -535,8 +535,8 @@ const minBatchSize = 1000 // size is too low to give stream goroutines a chance to fill it up. // // Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { if l.logger.V(logLevel) { @@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) { } if !isIOError(err) { l.framer.writer.Flush() - l.conn.Close() } l.cbuf.finish() }() @@ -830,7 +829,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa0..a9d70e2a16 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -220,18 +242,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +267,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +312,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -367,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf08480..eff8799640 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -58,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -176,7 +179,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +265,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -330,7 +333,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -448,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } close(t.writerDone) }() return t, nil @@ -493,8 +502,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } @@ -566,7 +576,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -762,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +810,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +825,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +937,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1090,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1243,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1321,10 +1331,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() @@ -1399,7 +1407,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1441,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1505,14 +1506,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1522,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1550,13 +1549,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index ec4eef2134..a206e2eef7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -165,7 +162,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -233,7 +230,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -333,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() - close(t.writerDone) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + select { + case <-t.readerDone: + case <-time.After(time.Second): + } + t.conn.Close() + } }() go t.keepalive() return t, nil @@ -342,7 +347,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +374,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +517,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -561,7 +567,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,19 +598,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -630,8 +623,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -665,7 +661,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -850,7 +846,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -934,7 +930,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -980,7 +976,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } @@ -1053,12 +1054,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1240,10 +1244,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1309,10 +1309,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1349,6 +1345,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } + t.framer.writer.Flush() if retErr != nil { return false, retErr } @@ -1369,7 +1366,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-t.drainEvent.Done(): @@ -1395,11 +1392,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1431,10 +1428,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1459,6 +1458,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f5a..dc29d590e9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,15 +30,13 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -87,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +304,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +389,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 4159619878..24fa103257 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c896595..b7b8fec180 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,16 +37,13 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +53,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -269,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -390,14 +388,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -433,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -445,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -559,6 +565,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +599,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -703,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -722,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) @@ -736,7 +745,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12a..1e9485fd6e 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -153,14 +159,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ffb..a821ff9b2b 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f9759512..bf56faa76d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,31 +28,31 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -95,6 +95,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +130,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() @@ -190,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b021..5128f9364d 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,13 +25,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +45,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +64,23 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +103,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +159,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd45547854..73bd633643 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md index 04b6371afc..9ace83ccb6 100644 --- a/vendor/google.golang.org/grpc/reflection/README.md +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -2,7 +2,7 @@ Package reflection implements server reflection service. -The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. To register server reflection on a gRPC server: ```go diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 0000000000..33b907a36d --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 0000000000..6f5c786b21 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 0000000000..62b56a8be0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index d54c07676d..69fbfb621e 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index e2f9ebfbbc..c3b408392f 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -48,8 +48,9 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,9 +64,19 @@ type GRPCServer interface { var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { - svr := NewServer(ServerOptions{Services: s}) - v1alphagrpc.RegisterServerReflectionServer(s, svr) + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -120,13 +131,27 @@ type ServerOptions struct { // NewServer returns a reflection server implementation using the given options. // This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // // # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -141,7 +166,7 @@ func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { } type serverReflectionServer struct { - v1alphagrpc.UnimplementedServerReflectionServer + v1alphareflectiongrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -151,11 +176,20 @@ type serverReflectionServer struct { // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + if fd.IsPlaceholder() { + // If the given root file is a placeholder, treat it + // as missing instead of serializing it. + return nil, protoregistry.NotFound + } var r [][]byte queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] + if currentfd.IsPlaceholder() { + // Skip any missing files in the dependency graph. + continue + } if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { sentFileDescriptors[currentfd.Path()] = true fdProto := protodesc.ToFileDescriptorProto(currentfd) @@ -215,11 +249,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -228,7 +262,7 @@ func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -239,79 +273,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerR return err } - out := &v1alphapb.ServerReflectionResponse{ + out := &v1reflectionpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1alphapb.ServerReflectionRequest_FileByFilename: + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1alphapb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphapb.ListServiceResponse{ + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..14aa6f20ae --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3efd..ada5b9bb79 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []interface{} { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 353c10b69a..adf89dd9cf 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,27 +104,29 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -190,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -234,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -254,20 +255,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -293,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -322,9 +315,12 @@ type Resolver interface { Close() } -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index b408b3688f..0000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 0000000000..c79bab1214 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a30..a4b6bc6873 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -639,14 +640,18 @@ func encode(c baseCodec, msg interface{}) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -692,7 +697,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +731,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +748,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +762,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +796,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +867,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8869cc906f..e89c5ac613 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,9 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) } + internal.ServerFromContext = serverFromContext internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -81,12 +82,13 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,20 +101,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} + mdata any } // Server is a gRPC server to serve RPC requests. @@ -134,12 +136,14 @@ type Server struct { quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -164,10 +168,13 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ @@ -177,6 +184,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -228,6 +236,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -268,9 +290,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -550,6 +572,44 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -578,15 +638,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -625,7 +684,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -633,7 +692,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -648,14 +707,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -666,7 +725,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -707,7 +766,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -768,6 +827,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -875,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -908,6 +976,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -932,19 +1001,32 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + defer s.handlersWG.Done() + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -956,14 +1038,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1007,31 +1082,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1094,7 +1145,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1113,7 +1164,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1141,7 +1192,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1150,12 +1201,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1169,7 +1220,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1187,7 +1238,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1201,7 +1252,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1223,7 +1274,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1294,7 +1344,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1304,12 +1354,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1323,7 +1373,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1331,7 +1381,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1356,7 +1406,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1364,7 +1414,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1379,7 +1429,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1406,8 +1456,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1421,8 +1471,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1440,7 +1490,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1468,7 +1518,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1477,12 +1527,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1496,15 +1546,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1518,7 +1568,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1535,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1577,7 +1627,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1621,7 +1671,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1655,7 +1705,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1673,53 +1723,87 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1728,19 +1812,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1795,62 +1879,72 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1859,22 +1953,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1888,11 +1974,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // @@ -2054,12 +2179,12 @@ func validateSendCompressor(name, clientCompressors string) error { // atomicSemaphore implements a blocking, counting semaphore. acquire should be // called synchronously; release may be called asynchronously. type atomicSemaphore struct { - n int64 + n atomic.Int64 wait chan struct{} } func (q *atomicSemaphore) acquire() { - if atomic.AddInt64(&q.n, -1) < 0 { + if q.n.Add(-1) < 0 { // We ran out of quota. Block until a release happens. <-q.wait } @@ -2070,12 +2195,14 @@ func (q *atomicSemaphore) release() { // concurrent calls to acquire, but also note that with synchronous calls to // acquire, as our system does, n will never be less than -1. There are // fairness issues (queuing) to consider if this was to be generalized. - if atomic.AddInt64(&q.n, 1) <= 0 { + if q.n.Add(1) <= 0 { // An acquire was waiting on us. Unblock it. q.wait <- struct{}{} } } func newHandlerQuota(n uint32) *atomicSemaphore { - return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 0000000000..48a64cfe8e --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b78..4ab70e2d46 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index bcf2e4d81b..a93360efb8 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error has status nil, which maps to codes.OK. There // is no sensible behavior for this, so we turn it into // an error with codes.Unknown and discard the existing // status. return New(codes.Unknown, err.Error()), false } - return gs.GRPCStatus(), true + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error wraps an error that has status nil, which maps // to codes.OK. There is no sensible behavior for this, // so we turn it into an error with codes.Unknown and // discard the existing status. return New(codes.Unknown, err.Error()), false } - p := gs.GRPCStatus().Proto() + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 10092685b2..d621f52b1a 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -47,6 +48,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -54,7 +57,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +82,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +93,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +131,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +140,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +160,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,7 +176,17 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -433,7 +443,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -507,7 +517,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -788,23 +798,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +831,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +872,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +916,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +940,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +995,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1034,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1061,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1270,7 +1276,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1348,7 +1354,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1399,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1518,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1527,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1608,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1616,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1683,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1691,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1763,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40e..07f0125768 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go deleted file mode 100644 index 3f77f4876e..0000000000 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ /dev/null @@ -1,318 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bufconn provides a net.Conn implemented by a buffer and related -// dialing and listening functionality. -package bufconn - -import ( - "context" - "fmt" - "io" - "net" - "sync" - "time" -) - -// Listener implements a net.Listener that creates local, buffered net.Conns -// via its Accept and Dial method. -type Listener struct { - mu sync.Mutex - sz int - ch chan net.Conn - done chan struct{} -} - -// Implementation of net.Error providing timeout -type netErrorTimeout struct { - error -} - -func (e netErrorTimeout) Timeout() bool { return true } -func (e netErrorTimeout) Temporary() bool { return false } - -var errClosed = fmt.Errorf("closed") -var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} - -// Listen returns a Listener that can only be contacted by its own Dialers and -// creates buffered connections between the two. -func Listen(sz int) *Listener { - return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} -} - -// Accept blocks until Dial is called, then returns a net.Conn for the server -// half of the connection. -func (l *Listener) Accept() (net.Conn, error) { - select { - case <-l.done: - return nil, errClosed - case c := <-l.ch: - return c, nil - } -} - -// Close stops the listener. -func (l *Listener) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.done: - // Already closed. - break - default: - close(l.done) - } - return nil -} - -// Addr reports the address of the listener. -func (l *Listener) Addr() net.Addr { return addr{} } - -// Dial creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. -func (l *Listener) Dial() (net.Conn, error) { - return l.DialContext(context.Background()) -} - -// DialContext creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. If ctx is Done, returns ctx.Err() -func (l *Listener) DialContext(ctx context.Context) (net.Conn, error) { - p1, p2 := newPipe(l.sz), newPipe(l.sz) - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-l.done: - return nil, errClosed - case l.ch <- &conn{p1, p2}: - return &conn{p2, p1}, nil - } -} - -type pipe struct { - mu sync.Mutex - - // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. - // - // Data is read between [r, w) and written to [w, r), wrapping around the end - // of the slice if necessary. - // - // The buffer is empty if r == len(buf), otherwise if r == w, it is full. - // - // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. - buf []byte - w, r int - - wwait sync.Cond - rwait sync.Cond - - // Indicate that a write/read timeout has occurred - wtimedout bool - rtimedout bool - - wtimer *time.Timer - rtimer *time.Timer - - closed bool - writeClosed bool -} - -func newPipe(sz int) *pipe { - p := &pipe{buf: make([]byte, 0, sz)} - p.wwait.L = &p.mu - p.rwait.L = &p.mu - - p.wtimer = time.AfterFunc(0, func() {}) - p.rtimer = time.AfterFunc(0, func() {}) - return p -} - -func (p *pipe) empty() bool { - return p.r == len(p.buf) -} - -func (p *pipe) full() bool { - return p.r < len(p.buf) && p.r == p.w -} - -func (p *pipe) Read(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - // Block until p has data. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.empty() { - break - } - if p.writeClosed { - return 0, io.EOF - } - if p.rtimedout { - return 0, errTimeout - } - - p.rwait.Wait() - } - wasFull := p.full() - - n = copy(b, p.buf[p.r:len(p.buf)]) - p.r += n - if p.r == cap(p.buf) { - p.r = 0 - p.buf = p.buf[:p.w] - } - - // Signal a blocked writer, if any - if wasFull { - p.wwait.Signal() - } - - return n, nil -} - -func (p *pipe) Write(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.closed { - return 0, io.ErrClosedPipe - } - for len(b) > 0 { - // Block until p is not full. - for { - if p.closed || p.writeClosed { - return 0, io.ErrClosedPipe - } - if !p.full() { - break - } - if p.wtimedout { - return 0, errTimeout - } - - p.wwait.Wait() - } - wasEmpty := p.empty() - - end := cap(p.buf) - if p.w < p.r { - end = p.r - } - x := copy(p.buf[p.w:end], b) - b = b[x:] - n += x - p.w += x - if p.w > len(p.buf) { - p.buf = p.buf[:p.w] - } - if p.w == cap(p.buf) { - p.w = 0 - } - - // Signal a blocked reader, if any. - if wasEmpty { - p.rwait.Signal() - } - } - return n, nil -} - -func (p *pipe) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - p.closed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -func (p *pipe) closeWrite() error { - p.mu.Lock() - defer p.mu.Unlock() - p.writeClosed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -type conn struct { - io.Reader - io.Writer -} - -func (c *conn) Close() error { - err1 := c.Reader.(*pipe).Close() - err2 := c.Writer.(*pipe).closeWrite() - if err1 != nil { - return err1 - } - return err2 -} - -func (c *conn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -func (c *conn) SetReadDeadline(t time.Time) error { - p := c.Reader.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.rtimer.Stop() - p.rtimedout = false - if !t.IsZero() { - p.rtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.rtimedout = true - p.rwait.Broadcast() - }) - } - return nil -} - -func (c *conn) SetWriteDeadline(t time.Time) error { - p := c.Writer.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.wtimer.Stop() - p.wtimedout = false - if !t.IsZero() { - p.wtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.wtimedout = true - p.wwait.Broadcast() - }) - } - return nil -} - -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } - -type addr struct{} - -func (addr) Network() string { return "bufconn" } -func (addr) String() string { return "bufconn" } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b3e..9ded79321b 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3cc7540621..f1aec4c0ad 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.3" +const Version = "1.61.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b3d..5da38a4099 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,19 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -90,13 +96,15 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -104,105 +112,79 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +grpc_testing_not_regenerate +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d80..f47902371a 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb18..ae71007c18 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870e9..3f75098b6f 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -31,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -81,6 +81,25 @@ type MarshalOptions struct { // ╚═══════╧════════════════════════════╝ EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { @@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d41744..4b177c8206 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,29 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4a7..a45f112bce 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41df..95967e8112 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -33,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686cf9..e942bc983e 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1b5..a45625c8d1 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 0000000000..14656b65ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 0000000000..18f0756874 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0 +  (0 +  (0 ( \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6ebe0..d2b3ac031e 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689baee..8826bcf402 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -21,11 +21,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +59,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,12 +67,41 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + EditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } @@ -117,6 +162,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -178,6 +225,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -210,6 +259,8 @@ type ( ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + EditionFeatures EditionFeatures } Oneof struct { @@ -219,6 +270,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -268,23 +321,36 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + if fd.L1.Cardinality == protoreflect.Repeated { + return false + } + explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence + return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsPacked + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { + // proto3 repeated fields are packed by default. + return !fd.L1.HasPacked || fd.L1.IsPacked } return fd.L1.IsPacked } @@ -333,6 +399,9 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsUTF8Validated + } if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } @@ -359,10 +428,11 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9d2..237e64fd23 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,6 +115,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +124,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +160,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -166,6 +179,15 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 } + if fd.L1.Syntax == protoreflect.Editions { + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + } + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + // Must allocate all declarations before parsing each descriptor type // to ensure we handled all descriptors in "flattened ordering". if numEnums > 0 { @@ -219,6 +241,28 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd @@ -275,6 +319,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +425,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a75b..482a61cc10 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -414,6 +414,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +466,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -497,6 +504,13 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.HasEnforceUTF8 = true fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -534,6 +548,7 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte + xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -565,6 +580,12 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } + if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } + if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { + xd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -589,6 +610,13 @@ func (xd *Extension) unmarshalOptions(b []byte) { case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 0000000000..0375a49d40 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + if def, ok := defaultsCache[ed]; ok { + return def + } + panic(fmt.Sprintf("unsupported edition: %v", ed)) +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b2157..40272c893f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,27 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +102,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +205,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +218,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -204,6 +228,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -212,29 +242,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -291,12 +318,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -468,7 +524,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -478,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -490,7 +546,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -500,6 +555,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -515,7 +571,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -525,6 +580,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -534,6 +590,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -547,6 +610,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +618,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +629,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +650,9 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +664,9 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +681,9 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -624,24 +693,80 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +775,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +799,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +814,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +827,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +853,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +879,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +892,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -759,6 +902,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -816,6 +966,163 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -917,3 +1224,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 0000000000..fd9015e8ee --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b26..ad6f80c460 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea0a..49bc73e259 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc50..2b8f122c27 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63eb..f55dc01e3a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3aac..13077751e2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fac0..2ab2c62978 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a0d..629bacdced 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60ef..986322b195 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf48..517e94434c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573e3..4b020e3116 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e76586..a6e7df2443 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go deleted file mode 100644 index 61a84d3418..0000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine -// +build !purego,!appengine - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go new file mode 100644 index 0000000000..a008acd090 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -0,0 +1,95 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 0000000000..60166f2ba3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d50..a50fcfb49b 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 + Minor = 33 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946bb..e5b03b5677 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717fe..80ed16a0c2 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d0e..4fed202f9f 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda86..17899a3a76 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab331d..3c6fe57807 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b10..7543ee6b25 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb12050..baa0cc6218 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1afe..b3278163c5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,9 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + } if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +118,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -137,6 +162,34 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if canBePacked(fd) { + f.L1.HasPacked = true + f.L1.IsPacked = f.L1.EditionFeatures.IsPacked + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } @@ -151,6 +204,9 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } + if parent.Syntax() == protoreflect.Editions { + o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) + } } } return os, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e35012..254ca58542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -276,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d56487..e4dcaf876c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -107,7 +107,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } if m.Syntax() == protoreflect.Proto3 { @@ -314,8 +314,8 @@ func checkValidGroup(fd protoreflect.FieldDescriptor) error { switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case fd.Syntax() == protoreflect.Proto3: + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") case fd.FullName().Parent() != md.FullName().Parent(): diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 0000000000..2a6b29d179 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,148 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(editiondefaults.Defaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN + } + + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED + } + + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY + } + + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED + } + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceffc9..9d6e05420f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa14922b..00b01fbd8c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┐ // │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -172,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } @@ -436,7 +441,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +485,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f3d..7dcc2ff09e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -180,6 +178,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +240,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +287,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +334,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +367,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +451,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +464,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +479,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +498,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +543,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d30..60ff62b4c8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -532,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -548,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b7819..a7b0d06ff3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 591652541f..654599d449 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73fc..1603097311 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // ╔════════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,7 +65,7 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // ╔═════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // ║ string │ StringKind ║ // ╚═════════╧═════════════════════════════════════╝ // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go deleted file mode 100644 index 702ddf22a2..0000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine -// +build !purego,!appengine - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v interface{}) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x interface{}) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go new file mode 100644 index 0000000000..b1fdbe3e8e --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 0000000000..4354701117 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb5597744..6267dc52a6 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f737c..78624cf60b 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,103 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_2024": 1001, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +177,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +222,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +293,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +324,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +356,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +417,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +483,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +545,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +607,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +691,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +753,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +779,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 2: "VERIFY", + 3: "NONE", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "VERIFY": 2, + "NONE": 3, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1174,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1197,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1280,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1400,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1537,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1600,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1186,12 +1652,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -1738,7 +2204,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1772,6 +2237,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1785,7 +2252,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -1893,13 +2359,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2006,10 +2472,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2030,6 +2492,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2039,11 +2505,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2622,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2178,19 +2655,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2205,11 +2674,12 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2790,23 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features } return nil } @@ -2348,6 +2824,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2862,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2894,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2964,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +2989,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3044,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3071,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3119,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3152,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3210,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2770,28 +3303,193 @@ func (x *UninterpretedOption) GetNegativeIntValue() int64 { if x != nil && x.NegativeIntValue != nil { return *x.NegativeIntValue } - return 0 + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue - } - return 0 +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} } -func (x *UninterpretedOption) GetStringValue() []byte { +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { if x != nil { - return x.StringValue + return x.Defaults } return nil } -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition } - return "" + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN } // Encapsulates information about the original source file from which a @@ -2855,7 +3553,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3566,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3579,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3605,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3618,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3631,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3654,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3667,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3719,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3732,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3776,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3788,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3801,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3838,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3870,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3883,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3913,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +3985,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +3998,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4028,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3296,7 +4096,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -3388,7 +4188,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4201,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4214,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4275,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4288,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4301,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4350,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,250 +4388,250 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, @@ -3856,259 +4656,419 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, + 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, + 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, + 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, + 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, + 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, + 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, + 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, + 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, + 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, + 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, + 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, + 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, + 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, + 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, + 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, + 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, + 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, + 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, + 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, + 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, + 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, + 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, + 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, + 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5083,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4475,19 +5468,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -4499,7 +5494,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -4511,7 +5506,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -4523,7 +5518,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -4535,7 +5530,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -4547,7 +5542,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4559,7 +5554,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4571,6 +5566,54 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5631,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 0000000000..25de5ae008 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,177 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflect/protodesc/proto/go_features.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "google.protobuf.go", + Tag: "bytes,1002,opt,name=go", + Filename: "reflect/protodesc/proto/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional google.protobuf.GoFeatures go = 1002; + E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] +) + +var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor + +var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, + 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, + 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, + 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, + 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, + 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once + file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc +) + +func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { + file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { + file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) + }) + return file_reflect_protodesc_proto_go_features_proto_rawDescData +} + +var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet + 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_reflect_protodesc_proto_go_features_proto_init() } +func file_reflect_protodesc_proto_go_features_proto_init() { + if File_reflect_protodesc_proto_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, + DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, + MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, + ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + }.Build() + File_reflect_protodesc_proto_go_features_proto = out.File + file_reflect_protodesc_proto_go_features_proto_rawDesc = nil + file_reflect_protodesc_proto_go_features_proto_goTypes = nil + file_reflect_protodesc_proto_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto new file mode 100644 index 0000000000..d246571296 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto @@ -0,0 +1,28 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +syntax = "proto2"; + +package google.protobuf; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/protobuf/types/gofeaturespb"; + +extend google.protobuf.FeatureSet { + optional GoFeatures go = 1002; +} + +message GoFeatures { + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + optional bool legacy_unmarshal_json_enum = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + edition_defaults = { edition: EDITION_PROTO2, value: "true" }, + edition_defaults = { edition: EDITION_PROTO3, value: "false" } + ]; +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f47..9de51be540 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go deleted file mode 100644 index e8789cb331..0000000000 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ /dev/null @@ -1,588 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/field_mask.proto - -// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. -// -// The FieldMask message represents a set of symbolic field paths. -// The paths are specific to some target message type, -// which is not stored within the FieldMask message itself. -// -// # Constructing a FieldMask -// -// The New function is used construct a FieldMask: -// -// var messageType *descriptorpb.DescriptorProto -// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") -// if err != nil { -// ... // handle error -// } -// ... // make use of fm -// -// The "field.name" and "field.number" paths are valid paths according to the -// google.protobuf.DescriptorProto message. Use of a path that does not correlate -// to valid fields reachable from DescriptorProto would result in an error. -// -// Once a FieldMask message has been constructed, -// the Append method can be used to insert additional paths to the path set: -// -// var messageType *descriptorpb.DescriptorProto -// if err := fm.Append(messageType, "options"); err != nil { -// ... // handle error -// } -// -// # Type checking a FieldMask -// -// In order to verify that a FieldMask represents a set of fields that are -// reachable from some target message type, use the IsValid method: -// -// var messageType *descriptorpb.DescriptorProto -// if fm.IsValid(messageType) { -// ... // make use of fm -// } -// -// IsValid needs to be passed the target message type as an input since the -// FieldMask message itself does not store the message type that the set of paths -// are for. -package fieldmaskpb - -import ( - proto "google.golang.org/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sort "sort" - strings "strings" - sync "sync" -) - -// `FieldMask` represents a set of symbolic field paths, for example: -// -// paths: "f.a" -// paths: "f.b.d" -// -// Here `f` represents a field in some root message, `a` and `b` -// fields in the message found in `f`, and `d` a field found in the -// message in `f.b`. -// -// Field masks are used to specify a subset of fields that should be -// returned by a get operation or modified by an update operation. -// Field masks also have a custom JSON encoding (see below). -// -// # Field Masks in Projections -// -// When used in the context of a projection, a response message or -// sub-message is filtered by the API to only contain those fields as -// specified in the mask. For example, if the mask in the previous -// example is applied to a response message as follows: -// -// f { -// a : 22 -// b { -// d : 1 -// x : 2 -// } -// y : 13 -// } -// z: 8 -// -// The result will not contain specific values for fields x,y and z -// (their value will be set to the default, and omitted in proto text -// output): -// -// f { -// a : 22 -// b { -// d : 1 -// } -// } -// -// A repeated field is not allowed except at the last position of a -// paths string. -// -// If a FieldMask object is not present in a get operation, the -// operation applies to all fields (as if a FieldMask of all fields -// had been specified). -// -// Note that a field mask does not necessarily apply to the -// top-level response message. In case of a REST get operation, the -// field mask applies directly to the response, but in case of a REST -// list operation, the mask instead applies to each individual message -// in the returned resource list. In case of a REST custom method, -// other definitions may be used. Where the mask applies will be -// clearly documented together with its declaration in the API. In -// any case, the effect on the returned resource/resources is required -// behavior for APIs. -// -// # Field Masks in Update Operations -// -// A field mask in update operations specifies which fields of the -// targeted resource are going to be updated. The API is required -// to only change the values of the fields as specified in the mask -// and leave the others untouched. If a resource is passed in to -// describe the updated values, the API ignores the values of all -// fields not covered by the mask. -// -// If a repeated field is specified for an update operation, new values will -// be appended to the existing repeated field in the target resource. Note that -// a repeated field is only allowed in the last position of a `paths` string. -// -// If a sub-message is specified in the last position of the field mask for an -// update operation, then new value will be merged into the existing sub-message -// in the target resource. -// -// For example, given the target message: -// -// f { -// b { -// d: 1 -// x: 2 -// } -// c: [1] -// } -// -// And an update message: -// -// f { -// b { -// d: 10 -// } -// c: [2] -// } -// -// then if the field mask is: -// -// paths: ["f.b", "f.c"] -// -// then the result will be: -// -// f { -// b { -// d: 10 -// x: 2 -// } -// c: [1, 2] -// } -// -// An implementation may provide options to override this default behavior for -// repeated and message fields. -// -// In order to reset a field's value to the default, the field must -// be in the mask and set to the default value in the provided resource. -// Hence, in order to reset all fields of a resource, provide a default -// instance of the resource and set all fields in the mask, or do -// not provide a mask as described below. -// -// If a field mask is not present on update, the operation applies to -// all fields (as if a field mask of all fields has been specified). -// Note that in the presence of schema evolution, this may mean that -// fields the client does not know and has therefore not filled into -// the request will be reset to their default. If this is unwanted -// behavior, a specific service may require a client to always specify -// a field mask, producing an error if not. -// -// As with get operations, the location of the resource which -// describes the updated values in the request message depends on the -// operation kind. In any case, the effect of the field mask is -// required to be honored by the API. -// -// ## Considerations for HTTP REST -// -// The HTTP kind of an update operation which uses a field mask must -// be set to PATCH instead of PUT in order to satisfy HTTP semantics -// (PUT must only be used for full updates). -// -// # JSON Encoding of Field Masks -// -// In JSON, a field mask is encoded as a single string where paths are -// separated by a comma. Fields name in each path are converted -// to/from lower-camel naming conventions. -// -// As an example, consider the following message declarations: -// -// message Profile { -// User user = 1; -// Photo photo = 2; -// } -// message User { -// string display_name = 1; -// string address = 2; -// } -// -// In proto a field mask for `Profile` may look as such: -// -// mask { -// paths: "user.display_name" -// paths: "photo" -// } -// -// In JSON, the same mask is represented as below: -// -// { -// mask: "user.displayName,photo" -// } -// -// # Field Masks and Oneof Fields -// -// Field masks treat fields in oneofs just as regular fields. Consider the -// following message: -// -// message SampleMessage { -// oneof test_oneof { -// string name = 4; -// SubMessage sub_message = 9; -// } -// } -// -// The field mask can be: -// -// mask { -// paths: "name" -// } -// -// Or: -// -// mask { -// paths: "sub_message" -// } -// -// Note that oneof type names ("test_oneof" in this case) cannot be used in -// paths. -// -// ## Field Mask Verification -// -// The implementation of any API method which has a FieldMask type field in the -// request should verify the included field paths, and return an -// `INVALID_ARGUMENT` error if any path is unmappable. -type FieldMask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` -} - -// New constructs a field mask from a list of paths and verifies that -// each one is valid according to the specified message type. -func New(m proto.Message, paths ...string) (*FieldMask, error) { - x := new(FieldMask) - return x, x.Append(m, paths...) -} - -// Union returns the union of all the paths in the input field masks. -func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { - var out []string - out = append(out, mx.GetPaths()...) - out = append(out, my.GetPaths()...) - for _, m := range ms { - out = append(out, m.GetPaths()...) - } - return &FieldMask{Paths: normalizePaths(out)} -} - -// Intersect returns the intersection of all the paths in the input field masks. -func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { - var ss1, ss2 []string // reused buffers for performance - intersect := func(out, in []string) []string { - ss1 = normalizePaths(append(ss1[:0], in...)) - ss2 = normalizePaths(append(ss2[:0], out...)) - out = out[:0] - for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { - switch s1, s2 := ss1[i1], ss2[i2]; { - case hasPathPrefix(s1, s2): - out = append(out, s1) - i1++ - case hasPathPrefix(s2, s1): - out = append(out, s2) - i2++ - case lessPath(s1, s2): - i1++ - case lessPath(s2, s1): - i2++ - } - } - return out - } - - out := Union(mx, my, ms...).GetPaths() - out = intersect(out, mx.GetPaths()) - out = intersect(out, my.GetPaths()) - for _, m := range ms { - out = intersect(out, m.GetPaths()) - } - return &FieldMask{Paths: normalizePaths(out)} -} - -// IsValid reports whether all the paths are syntactically valid and -// refer to known fields in the specified message type. -// It reports false for a nil FieldMask. -func (x *FieldMask) IsValid(m proto.Message) bool { - paths := x.GetPaths() - return x != nil && numValidPaths(m, paths) == len(paths) -} - -// Append appends a list of paths to the mask and verifies that each one -// is valid according to the specified message type. -// An invalid path is not appended and breaks insertion of subsequent paths. -func (x *FieldMask) Append(m proto.Message, paths ...string) error { - numValid := numValidPaths(m, paths) - x.Paths = append(x.Paths, paths[:numValid]...) - paths = paths[numValid:] - if len(paths) > 0 { - name := m.ProtoReflect().Descriptor().FullName() - return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) - } - return nil -} - -func numValidPaths(m proto.Message, paths []string) int { - md0 := m.ProtoReflect().Descriptor() - for i, path := range paths { - md := md0 - if !rangeFields(path, func(field string) bool { - // Search the field within the message. - if md == nil { - return false // not within a message - } - fd := md.Fields().ByName(protoreflect.Name(field)) - // The real field name of a group is the message name. - if fd == nil { - gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { - fd = gd - } - } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { - fd = nil - } - if fd == nil { - return false // message has does not have this field - } - - // Identify the next message to search within. - md = fd.Message() // may be nil - - // Repeated fields are only allowed at the last position. - if fd.IsList() || fd.IsMap() { - md = nil - } - - return true - }) { - return i - } - } - return len(paths) -} - -// Normalize converts the mask to its canonical form where all paths are sorted -// and redundant paths are removed. -func (x *FieldMask) Normalize() { - x.Paths = normalizePaths(x.Paths) -} - -func normalizePaths(paths []string) []string { - sort.Slice(paths, func(i, j int) bool { - return lessPath(paths[i], paths[j]) - }) - - // Elide any path that is a prefix match on the previous. - out := paths[:0] - for _, path := range paths { - if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { - continue - } - out = append(out, path) - } - return out -} - -// hasPathPrefix is like strings.HasPrefix, but further checks for either -// an exact matche or that the prefix is delimited by a dot. -func hasPathPrefix(path, prefix string) bool { - return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') -} - -// lessPath is a lexicographical comparison where dot is specially treated -// as the smallest symbol. -func lessPath(x, y string) bool { - for i := 0; i < len(x) && i < len(y); i++ { - if x[i] != y[i] { - return (x[i] - '.') < (y[i] - '.') - } - } - return len(x) < len(y) -} - -// rangeFields is like strings.Split(path, "."), but avoids allocations by -// iterating over each field in place and calling a iterator function. -func rangeFields(path string, f func(field string) bool) bool { - for { - var field string - if i := strings.IndexByte(path, '.'); i >= 0 { - field, path = path[:i], path[i:] - } else { - field, path = path, "" - } - - if !f(field) { - return false - } - - if len(path) == 0 { - return true - } - path = strings.TrimPrefix(path, ".") - } -} - -func (x *FieldMask) Reset() { - *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldMask) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldMask) ProtoMessage() {} - -func (x *FieldMask) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. -func (*FieldMask) Descriptor() ([]byte, []int) { - return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} -} - -func (x *FieldMask) GetPaths() []string { - if x != nil { - return x.Paths - } - return nil -} - -var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor - -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, - 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc -) - -func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { - file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) - }) - return file_google_protobuf_field_mask_proto_rawDescData -} - -var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ - (*FieldMask)(nil), // 0: google.protobuf.FieldMask -} -var file_google_protobuf_field_mask_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_field_mask_proto_init() } -func file_google_protobuf_field_mask_proto_init() { - if File_google_protobuf_field_mask_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_field_mask_proto_goTypes, - DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, - MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, - }.Build() - File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil - file_google_protobuf_field_mask_proto_goTypes = nil - file_google_protobuf_field_mask_proto_depIdxs = nil -} diff --git a/vendor/honnef.co/go/tools/analysis/code/code.go b/vendor/honnef.co/go/tools/analysis/code/code.go index db7debc22a..f200363d9d 100644 --- a/vendor/honnef.co/go/tools/analysis/code/code.go +++ b/vendor/honnef.co/go/tools/analysis/code/code.go @@ -17,7 +17,6 @@ import ( "honnef.co/go/tools/go/types/typeutil" "honnef.co/go/tools/pattern" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/analysis" ) @@ -146,7 +145,7 @@ func CallName(pass *analysis.Pass, call *ast.CallExpr) string { switch idx := fun.(type) { case *ast.IndexExpr: fun = idx.X - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: fun = idx.X } @@ -277,7 +276,7 @@ func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity purity.Result return false case *ast.IndexExpr: return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: // In theory, none of the checks are necessary, as IndexListExpr only involves types. But there is no harm in // being safe. if MayHaveSideEffects(pass, expr.X, purity) { diff --git a/vendor/honnef.co/go/tools/analysis/code/visit.go b/vendor/honnef.co/go/tools/analysis/code/visit.go index f8bf2d1698..0f0d644a1e 100644 --- a/vendor/honnef.co/go/tools/analysis/code/visit.go +++ b/vendor/honnef.co/go/tools/analysis/code/visit.go @@ -30,7 +30,7 @@ func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matc // AST inspectors that already filter on nodes we're interested // in. m := &pattern.Matcher{TypesInfo: pass.TypesInfo} - ok := m.Match(q.Root, node) + ok := m.Match(q, node) return m, ok } diff --git a/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go b/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go index c558fabb00..dd6d655c32 100644 --- a/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go +++ b/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go @@ -48,6 +48,7 @@ func deprecated(pass *analysis.Pass) (interface{}, error) { } return "" } + doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) { alt := extractDeprecatedMessage(docs) if alt == "" { @@ -86,7 +87,15 @@ func deprecated(pass *analysis.Pass) (interface{}, error) { switch node.Tok { case token.TYPE, token.CONST, token.VAR: docs = append(docs, node.Doc) - return true + for i := range node.Specs { + switch n := node.Specs[i].(type) { + case *ast.ValueSpec: + names = append(names, n.Names...) + case *ast.TypeSpec: + names = append(names, n.Name) + } + } + ret = true default: return false } diff --git a/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go b/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go index eb4c1bab49..d296b5b0f7 100644 --- a/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go +++ b/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go @@ -230,6 +230,11 @@ func impl(pass *analysis.Pass, fn *ir.Function, seenFns map[*ir.Function]struct{ return neverNil case *ir.TypeAssert, *ir.ChangeInterface, *ir.Field, *ir.Const, *ir.GenericConst, *ir.Index, *ir.MapLookup, *ir.Parameter, *ir.Recv, *ir.TypeSwitch: return nilly + case *ir.CompositeValue: + // We can get here via composite literals of type parameters, for which typeutil.IsPointerLike doesn't + // currently return false (see https://staticcheck.io/issues/1364). However, we only emit ir.CompositeValue + // for value types, so we know it can't be nil. + return neverNil default: panic(fmt.Sprintf("internal error: unhandled type %T", v)) } diff --git a/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go b/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go index 4afc7a6a58..0f6895a8c3 100644 --- a/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go +++ b/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go @@ -1,5 +1,8 @@ package purity +// TODO(dh): we should split this into two facts, one tracking actual purity, and one tracking side-effects. A function +// that returns a heap allocation isn't pure, but it may be free of side effects. + import ( "go/types" "reflect" @@ -53,6 +56,52 @@ var pureStdlib = map[string]struct{}{ "strings.TrimSpace": {}, "strings.TrimSuffix": {}, "(*net/http.Request).WithContext": {}, + "time.Now": {}, + "time.Parse": {}, + "time.ParseInLocation": {}, + "time.Unix": {}, + "time.UnixMicro": {}, + "time.UnixMilli": {}, + "(time.Time).Add": {}, + "(time.Time).AddDate": {}, + "(time.Time).After": {}, + "(time.Time).Before": {}, + "(time.Time).Clock": {}, + "(time.Time).Compare": {}, + "(time.Time).Date": {}, + "(time.Time).Day": {}, + "(time.Time).Equal": {}, + "(time.Time).Format": {}, + "(time.Time).GoString": {}, + "(time.Time).GobEncode": {}, + "(time.Time).Hour": {}, + "(time.Time).ISOWeek": {}, + "(time.Time).In": {}, + "(time.Time).IsDST": {}, + "(time.Time).IsZero": {}, + "(time.Time).Local": {}, + "(time.Time).Location": {}, + "(time.Time).MarshalBinary": {}, + "(time.Time).MarshalJSON": {}, + "(time.Time).MarshalText": {}, + "(time.Time).Minute": {}, + "(time.Time).Month": {}, + "(time.Time).Nanosecond": {}, + "(time.Time).Round": {}, + "(time.Time).Second": {}, + "(time.Time).String": {}, + "(time.Time).Sub": {}, + "(time.Time).Truncate": {}, + "(time.Time).UTC": {}, + "(time.Time).Unix": {}, + "(time.Time).UnixMicro": {}, + "(time.Time).UnixMilli": {}, + "(time.Time).UnixNano": {}, + "(time.Time).Weekday": {}, + "(time.Time).Year": {}, + "(time.Time).YearDay": {}, + "(time.Time).Zone": {}, + "(time.Time).ZoneBounds": {}, } func purity(pass *analysis.Pass) (interface{}, error) { @@ -99,10 +148,26 @@ func purity(pass *analysis.Pass) (interface{}, error) { return false } + var isBasic func(typ types.Type) bool + isBasic = func(typ types.Type) bool { + switch u := typ.Underlying().(type) { + case *types.Basic: + return true + case *types.Struct: + for i := 0; i < u.NumFields(); i++ { + if !isBasic(u.Field(i).Type()) { + return false + } + } + return true + default: + return false + } + } + for _, param := range fn.Params { - // TODO(dh): this may not be strictly correct. pure code - // can, to an extent, operate on non-basic types. - if _, ok := param.Type().Underlying().(*types.Basic); !ok { + // TODO(dh): this may not be strictly correct. pure code can, to an extent, operate on non-basic types. + if !isBasic(param.Type()) { return false } } @@ -134,6 +199,18 @@ func purity(pass *analysis.Pass) (interface{}, error) { } return true } + + var isStackAddr func(ir.Value) bool + isStackAddr = func(v ir.Value) bool { + switch v := v.(type) { + case *ir.Alloc: + return !v.Heap + case *ir.FieldAddr: + return isStackAddr(v.X) + default: + return false + } + } for _, b := range fn.Blocks { for _, ins := range b.Instrs { switch ins := ins.(type) { @@ -154,13 +231,22 @@ func purity(pass *analysis.Pass) (interface{}, error) { case *ir.Panic: return false case *ir.Store: - return false + if !isStackAddr(ins.Addr) { + return false + } case *ir.FieldAddr: - return false + if !isStackAddr(ins.X) { + return false + } case *ir.Alloc: - return false + // TODO(dh): make use of proper escape analysis + if ins.Heap { + return false + } case *ir.Load: - return false + if !isStackAddr(ins.X) { + return false + } } } } diff --git a/vendor/honnef.co/go/tools/analysis/lint/lint.go b/vendor/honnef.co/go/tools/analysis/lint/lint.go index b4e37d6f40..7d116a23d0 100644 --- a/vendor/honnef.co/go/tools/analysis/lint/lint.go +++ b/vendor/honnef.co/go/tools/analysis/lint/lint.go @@ -8,6 +8,7 @@ import ( "go/ast" "go/build" "go/token" + "regexp" "strconv" "strings" @@ -206,21 +207,28 @@ func (v *VersionFlag) String() string { return fmt.Sprintf("1.%d", *v) } -func (v *VersionFlag) Set(s string) error { - if len(s) < 3 { - return fmt.Errorf("invalid Go version: %q", s) - } - if s[0] != '1' { - return fmt.Errorf("invalid Go version: %q", s) - } - if s[1] != '.' { - return fmt.Errorf("invalid Go version: %q", s) +var goVersionRE = regexp.MustCompile(`^(?:go)?1.(\d+).*$`) + +// ParseGoVersion parses Go versions of the form 1.M, 1.M.N, or 1.M.NrcR, with an optional "go" prefix. It assumes that +// versions have already been verified and are valid. +func ParseGoVersion(s string) (int, bool) { + m := goVersionRE.FindStringSubmatch(s) + if m == nil { + return 0, false } - i, err := strconv.Atoi(s[2:]) + n, err := strconv.Atoi(m[1]) if err != nil { + return 0, false + } + return n, true +} + +func (v *VersionFlag) Set(s string) error { + n, ok := ParseGoVersion(s) + if !ok { return fmt.Errorf("invalid Go version: %q", s) } - *v = VersionFlag(i) + *v = VersionFlag(n) return nil } diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go index 8d9f084cff..a815a8a843 100644 --- a/vendor/honnef.co/go/tools/config/config.go +++ b/vendor/honnef.co/go/tools/config/config.go @@ -206,7 +206,7 @@ func parseConfigs(dir string) ([]Config, error) { return nil, err } var cfg Config - _, err = toml.DecodeReader(f, &cfg) + _, err = toml.NewDecoder(f).Decode(&cfg) f.Close() if err != nil { if err, ok := err.(toml.ParseError); ok { diff --git a/vendor/honnef.co/go/tools/go/ast/astutil/util.go b/vendor/honnef.co/go/tools/go/ast/astutil/util.go index 176bcde1d0..e04e1fb0c6 100644 --- a/vendor/honnef.co/go/tools/go/ast/astutil/util.go +++ b/vendor/honnef.co/go/tools/go/ast/astutil/util.go @@ -6,8 +6,6 @@ import ( "go/token" "reflect" "strings" - - "golang.org/x/exp/typeparams" ) func IsIdent(expr ast.Expr, ident string) bool { @@ -132,7 +130,7 @@ func CopyExpr(node ast.Expr) (ast.Expr, bool) { cp.X, ok1 = CopyExpr(cp.X) cp.Index, ok2 = CopyExpr(cp.Index) return &cp, ok1 && ok2 - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: var ok bool cp := *node cp.X, ok = CopyExpr(cp.X) @@ -280,8 +278,8 @@ func Equal(a, b ast.Node) bool { case *ast.IndexExpr: b := b.(*ast.IndexExpr) return Equal(a.X, b.X) && Equal(a.Index, b.Index) - case *typeparams.IndexListExpr: - b := b.(*typeparams.IndexListExpr) + case *ast.IndexListExpr: + b := b.(*ast.IndexListExpr) if len(a.Indices) != len(b.Indices) { return false } diff --git a/vendor/honnef.co/go/tools/go/ir/UPSTREAM b/vendor/honnef.co/go/tools/go/ir/UPSTREAM index 757ebfd599..e92b016b39 100644 --- a/vendor/honnef.co/go/tools/go/ir/UPSTREAM +++ b/vendor/honnef.co/go/tools/go/ir/UPSTREAM @@ -5,5 +5,5 @@ The changes are too many to list here, and it is best to consider this package i Upstream changes still get applied when they address bugs in portions of code we have inherited. The last upstream commit we've looked at was: -915f6209478fe61eb90dbe155a8a1c58655b931f +e854e0228e2ef1cc6e42bbfde1951925096a1272 diff --git a/vendor/honnef.co/go/tools/go/ir/blockopt.go b/vendor/honnef.co/go/tools/go/ir/blockopt.go index d7a0e35676..5378861179 100644 --- a/vendor/honnef.co/go/tools/go/ir/blockopt.go +++ b/vendor/honnef.co/go/tools/go/ir/blockopt.go @@ -31,7 +31,6 @@ func markReachable(b *BasicBlock) { // deleteUnreachableBlocks marks all reachable blocks of f and // eliminates (nils) all others, including possibly cyclic subgraphs. -// func deleteUnreachableBlocks(f *Function) { const white, black = 0, -1 // We borrow b.gaps temporarily as the mark bit. @@ -64,7 +63,6 @@ func deleteUnreachableBlocks(f *Function) { // jumpThreading attempts to apply simple jump-threading to block b, // in which a->b->c become a->c if b is just a Jump. // The result is true if the optimization was applied. -// func jumpThreading(f *Function, b *BasicBlock) bool { if b.Index == 0 { return false // don't apply to entry block @@ -118,7 +116,6 @@ func jumpThreading(f *Function, b *BasicBlock) bool { // fuseBlocks attempts to apply the block fusion optimization to block // a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. // The result is true if the optimization was applied. -// func fuseBlocks(f *Function, a *BasicBlock) bool { if len(a.Succs) != 1 { return false @@ -167,7 +164,6 @@ func fuseBlocks(f *Function, a *BasicBlock) bool { // optimizeBlocks() performs some simple block optimizations on a // completed function: dead block elimination, block fusion, jump // threading. -// func optimizeBlocks(f *Function) { if debugBlockOpt { f.WriteTo(os.Stderr) diff --git a/vendor/honnef.co/go/tools/go/ir/builder.go b/vendor/honnef.co/go/tools/go/ir/builder.go index e5566ade54..82ca94ba14 100644 --- a/vendor/honnef.co/go/tools/go/ir/builder.go +++ b/vendor/honnef.co/go/tools/go/ir/builder.go @@ -63,7 +63,6 @@ type builder struct { // to t or f depending on its value, performing various simplifications. // // Postcondition: fn.currentBlock is nil. -// func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { switch e := e.(type) { case *ast.ParenExpr: @@ -102,7 +101,6 @@ func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { // logicalBinop emits code to fn to evaluate e, a &&- or // ||-expression whose reified boolean value is wanted. // The value is returned. -// func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { rhs := fn.newBasicBlock("binop.rhs") done := fn.newBasicBlock("binop.done") @@ -161,7 +159,6 @@ func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { // Multi-result expressions include CallExprs in a multi-value // assignment or return statement, and "value,ok" uses of // TypeAssertExpr, IndexExpr (when X is a map), and Recv. -// func (b *builder) exprN(fn *Function, e ast.Expr) Value { typ := fn.Pkg.typeOf(e).(*types.Tuple) switch e := e.(type) { @@ -203,7 +200,6 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { // The result is nil if no special handling was required; in this case // the caller should treat this like an ordinary library function // call. -// func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, source ast.Node) Value { switch obj.Name() { case "make": @@ -303,10 +299,10 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // addressable expression e as being a potentially escaping pointer // value. For example, in this code: // -// a := A{ -// b: [1]B{B{c: 1}} -// } -// return &a.b[0].c +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c // // the application of & causes a.b[0].c to have its address taken, // which means that ultimately the local variable a must be @@ -317,7 +313,6 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // - &x, including when implicit in method call or composite literals. // - a[:] iff a is an array (not *array) // - references to variables in lexically enclosing functions. -// func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) (RET lvalue) { switch e := e.(type) { case *ast.Ident: @@ -358,11 +353,16 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) (RET lvalue) { } wantAddr := true v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) - last := len(sel.Index()) - 1 - return &address{ - addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), - expr: e.Sel, + index := sel.Index()[len(sel.Index())-1] + vut := typeutil.CoreType(deref(v.Type())).Underlying().(*types.Struct) + fld := vut.Field(index) + // Due to the two phases of resolving AssignStmt, a panic from x.f = p() + // when x is nil is required to come after the side-effects of + // evaluating x and p(). + emit := func(fn *Function) Value { + return emitFieldSelection(fn, v, index, true, e.Sel) } + return &lazyAddress{addr: emit, t: fld.Type(), expr: e.Sel} case *ast.IndexExpr: var x Value @@ -416,12 +416,19 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) (RET lvalue) { panic("unexpected container type in IndexExpr: " + t.String()) } - v := &IndexAddr{ - X: x, - Index: b.expr(fn, e.Index), + // Due to the two phases of resolving AssignStmt, a panic from x[i] = p() + // when x is nil or i is out-of-bounds is required to come after the + // side-effects of evaluating x, i and p(). + index := b.expr(fn, e.Index) + emit := func(fn *Function) Value { + v := &IndexAddr{ + X: x, + Index: index, + } + v.setType(et) + return fn.emit(v, e) } - v.setType(et) - return &address{addr: fn.emit(v, e), expr: e} + return &lazyAddress{addr: emit, t: deref(et), expr: e} case *ast.StarExpr: return &address{addr: b.expr(fn, e.X), expr: e} @@ -472,7 +479,6 @@ func (sb *storebuf) emit(fn *Function) { // storebuf sb so that they can be executed later. This allows correct // in-place update of existing variables when the RHS is a composite // literal that may reference parts of the LHS. -// func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf, source ast.Node) { // Can we initialize it in place? if e, ok := unparen(e).(*ast.CompositeLit); ok { @@ -520,7 +526,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * switch typeutil.CoreType(loc.typ()).Underlying().(type) { case *types.Struct, *types.Array: if sb != nil { - // Make sure we don't emit DebugRefs before the store has actually occured + // Make sure we don't emit DebugRefs before the store has actually occurred if ref := makeDebugRef(fn, e, addr, true); ref != nil { sb.storeDebugRef(ref) } @@ -545,7 +551,6 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // expr lowers a single-result expression e to IR form, emitting code // to fn and returning the Value defined by the expression. -// func (b *builder) expr(fn *Function, e ast.Expr) Value { e = unparen(e) @@ -687,12 +692,12 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { } var low, high, max Value - if e.High != nil { - high = b.expr(fn, e.High) - } if e.Low != nil { low = b.expr(fn, e.Low) } + if e.High != nil { + high = b.expr(fn, e.High) + } if e.Slice3 { max = b.expr(fn, e.Max) } @@ -719,8 +724,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { if _, ok := obj.(*types.Var); ok { return emitLoad(fn, v, e) // var (address) } - instances := typeparams.GetInstances(fn.Pkg.info) - if instance, ok := instances[e]; ok { + if instance, ok := fn.Pkg.info.Instances[e]; ok { // Instantiated generic function return makeInstance(fn.Prog, v.(*Function), instance.Type.(*types.Signature), instance.TypeArgs) } @@ -837,7 +841,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { panic("unexpected container type in IndexExpr: " + t.String()) } - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: // Instantiating a generic function return b.expr(fn, e.X) @@ -866,7 +870,6 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // must thus be addressable. // // escaping is defined as per builder.addr(). -// func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection, source ast.Node) Value { var v Value if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { @@ -886,7 +889,6 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se // setCallFunc populates the function parts of a CallCommon structure // (Func, Method, Recv, Args[0]) based on the kind of invocation // occurring in e. -// func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { // Is this a method call? if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { @@ -953,7 +955,6 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { // emitCallArgs emits to f code for the actual parameters of call e to // a (possibly built-in) function of effective type sig. // The argument values are appended to args, which is then returned. -// func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { // f(x, y, z...): pass slice z straight through. if e.Ellipsis != 0 { @@ -1024,7 +1025,6 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx // setCall emits to fn code to evaluate all the parameters of a function // call e, and populates *c with those values. -// func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { // First deal with the f(...) part and optional receiver. b.setCallFunc(fn, e, c) @@ -1039,13 +1039,11 @@ func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { // assignOp emits to fn code to perform loc = val. func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, source ast.Node) { - oldv := loc.load(fn, source) - loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type(), source), loc.typ(), source), source) + loc.store(fn, emitArith(fn, op, loc.load(fn, source), val, loc.typ(), source), source) } // localValueSpec emits to fn code to define all of the vars in the // function-local ValueSpec, spec. -// func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { switch { case len(spec.Values) == len(spec.Names): @@ -1088,7 +1086,6 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // isDef is true if this is a short variable declaration (:=). // // Note the similarity with localValueSpec. -// func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool, source ast.Node) { // Side effects of all LHSs and RHSs must occur in left-to-right order. lvals := make([]lvalue, len(lhss)) @@ -1154,46 +1151,61 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // // Because the elements of a composite literal may refer to the // variables being updated, as in the second line below, +// // x := T{a: 1} // x = T{a: x.a} -// all the reads must occur before all the writes. Thus all stores to -// loc are emitted to the storebuf sb for later execution. +// +// all the reads must occur before all the writes. This is implicitly handled by the write buffering effected by +// compositeElement and explicitly by the storebuf for when we don't use CompositeValue. // // A CompositeLit may have pointer type only in the recursive (nested) // case when the type name is implicit. e.g. in []*T{{}}, the inner // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. -// func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { typ := deref(fn.Pkg.typeOf(e)) switch t := typeutil.CoreType(typ).(type) { case *types.Struct: - if !isZero && len(e.Elts) != t.NumFields() { - // memclear - sb.store(&address{addr, nil}, zeroValue(fn, deref(addr.Type()), e), e) - isZero = true - } - for i, e := range e.Elts { - fieldIndex := i - if kv, ok := e.(*ast.KeyValueExpr); ok { - fname := kv.Key.(*ast.Ident).Name - for i, n := 0, t.NumFields(); i < n; i++ { - sf := t.Field(i) - if sf.Name() == fname { - fieldIndex = i - e = kv.Value - break + lvalue := &address{addr: addr, expr: e} + if len(e.Elts) == 0 { + if !isZero { + sb.store(lvalue, zeroValue(fn, deref(addr.Type()), e), e) + } + } else { + v := &CompositeValue{ + Values: make([]Value, t.NumFields()), + } + for i := 0; i < t.NumFields(); i++ { + v.Values[i] = emitConst(fn, zeroConst(t.Field(i).Type())) + } + v.setType(typ) + + for i, e := range e.Elts { + fieldIndex := i + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + e = kv.Value + break + } } } + + ce := &compositeElement{ + cv: v, + idx: fieldIndex, + t: t.Field(fieldIndex).Type(), + expr: e, + } + b.assign(fn, ce, e, isZero, sb, e) + v.Bitmap.SetBit(&v.Bitmap, fieldIndex, 1) + v.NumSet++ } - sf := t.Field(fieldIndex) - faddr := &FieldAddr{ - X: addr, - Field: fieldIndex, - } - faddr.setType(types.NewPointer(sf.Type())) - fn.emit(faddr, e) - b.assign(fn, &address{addr: faddr, expr: e}, e, isZero, sb, e) + fn.emit(v, e) + sb.store(lvalue, v, e) } case *types.Array, *types.Slice: @@ -1207,43 +1219,96 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero case *types.Array: at = t array = addr + } - if !isZero && int64(len(e.Elts)) != at.Len() { - // memclear - sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) + var final Value + if len(e.Elts) == 0 { + if !isZero { + zc := emitConst(fn, zeroConst(at)) + final = zc } - } + } else { + if at.Len() == int64(len(e.Elts)) { + // The literal specifies all elements, so we can use a composite value + v := &CompositeValue{ + Values: make([]Value, at.Len()), + } + zc := emitConst(fn, zeroConst(at.Elem())) + for i := range v.Values { + v.Values[i] = zc + } + v.setType(at) - var idx *Const - for _, e := range e.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - idx = b.expr(fn, kv.Key).(*Const) - e = kv.Value - } else { - var idxval int64 - if idx != nil { - idxval = idx.Int64() + 1 + var idx *Const + for _, e := range e.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = emitConst(fn, intConst(idxval)).(*Const) + } + + iaddr := &compositeElement{ + cv: v, + idx: int(idx.Int64()), + t: at.Elem(), + expr: e, + } + + b.assign(fn, iaddr, e, true, sb, e) + v.Bitmap.SetBit(&v.Bitmap, int(idx.Int64()), 1) + v.NumSet++ } - idx = emitConst(fn, intConst(idxval)).(*Const) - } - iaddr := &IndexAddr{ - X: array, - Index: idx, - } - iaddr.setType(types.NewPointer(at.Elem())) - fn.emit(iaddr, e) - if t != at { // slice - // backing array is unaliased => storebuf not needed. - b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) + final = v + fn.emit(v, e) } else { - b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) + // Not all elements are specified. Populate the array with a series of stores, to guard against literals + // like []int{1<<62: 1}. + if !isZero { + // memclear + sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) + } + + var idx *Const + for _, e := range e.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = emitConst(fn, intConst(idxval)).(*Const) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr, e) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) + } else { + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) + } + } } } - if t != at { // slice + if final != nil { + sb.store(&address{addr: array}, final, e) + } s := &Slice{X: array} s.setType(typ) sb.store(&address{addr: addr, expr: e}, fn.emit(s, e), e) + } else if final != nil { + sb.store(&address{addr: array, expr: e}, final, e) } case *types.Map: @@ -1395,7 +1460,6 @@ func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { // switchStmt emits to fn code for the switch statement s, optionally // labelled by label. -// func (b *builder) switchStmtDynamic(fn *Function, s *ast.SwitchStmt, label *lblock) { // We treat SwitchStmt like a sequential if-else chain. // Multiway dispatch can be recovered later by irutil.Switches() @@ -1656,7 +1720,6 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl // selectStmt emits to fn code for the select statement s, optionally // labelled by label. -// func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (noreturn bool) { if len(s.Body.List) == 0 { instr := &Select{Blocking: true} @@ -1843,7 +1906,6 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (no // forStmt emits to fn code for the for statement s, optionally // labelled by label. -// func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { // ...init... // jump loop @@ -1900,7 +1962,6 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { // over array, *array or slice value x. // The v result is defined only if tv is non-nil. // forPos is the position of the "for" token. -// func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { // // length = len(x) @@ -1998,7 +2059,6 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast. // Range/Next/Extract to iterate over map or string value x. // tk and tv are the types of the key/value results k and v, or nil // if the respective component is not wanted. -// func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { // // it = range x @@ -2065,7 +2125,6 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast // tk is the channel's element type, or nil if the k result is // not wanted // pos is the position of the '=' or ':=' token. -// func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, source ast.Node) (k Value, loop, done *BasicBlock) { // // loop: (target of continue) @@ -2124,7 +2183,6 @@ func (v *variable) load() Value { // rangeStmt emits to fn code for the range statement s, optionally // labelled by label. -// func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock, source ast.Node) { var tk, tv types.Type if s.Key != nil && !isBlankIdent(s.Key) { @@ -2331,7 +2389,7 @@ start: block = fn.labelledBlock(s.Label)._goto } j := emitJump(fn, block, s) - j.Comment = s.Tok.String() + j.comment = s.Tok.String() fn.currentBlock = fn.newBasicBlock("unreachable") case *ast.BlockStmt: @@ -2471,7 +2529,6 @@ func (b *builder) buildFunction(fn *Function) { // buildFuncDecl builds IR code for the function or method declared // by decl in package pkg. -// func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { id := decl.Name if isBlankIdent(id) { @@ -2494,7 +2551,6 @@ func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { // need only build a single package. // // Build is idempotent and thread-safe. -// func (prog *Program) Build() { for _, p := range prog.packages { p.Build() @@ -2508,7 +2564,6 @@ func (prog *Program) Build() { // error-free). // // Build is idempotent and thread-safe. -// func (p *Package) Build() { p.buildOnce.Do(p.build) } func (p *Package) build() { diff --git a/vendor/honnef.co/go/tools/go/ir/const.go b/vendor/honnef.co/go/tools/go/ir/const.go index 9dd7e83b9d..0faf3852a6 100644 --- a/vendor/honnef.co/go/tools/go/ir/const.go +++ b/vendor/honnef.co/go/tools/go/ir/const.go @@ -19,7 +19,6 @@ import ( // NewConst returns a new constant of the specified value and type. // val must be valid according to the specification of Const.Value. -// func NewConst(val constant.Value, typ types.Type) *Const { return &Const{ register: register{ @@ -37,7 +36,6 @@ func intConst(i int64) *Const { // nilConst returns a nil constant of the specified type, which may // be any reference type, including interfaces. -// func nilConst(typ types.Type) *Const { return NewConst(nil, typ) } @@ -58,7 +56,7 @@ func zeroConst(t types.Type) Constant { switch typ := tset.CoreType().(type) { case *types.Struct: - values := make([]Constant, typ.NumFields()) + values := make([]Value, typ.NumFields()) for i := 0; i < typ.NumFields(); i++ { values[i] = zeroConst(typ.Field(i).Type()) } @@ -67,7 +65,7 @@ func zeroConst(t types.Type) Constant { Values: values, } case *types.Tuple: - values := make([]Constant, typ.Len()) + values := make([]Value, typ.Len()) for i := 0; i < typ.Len(); i++ { values[i] = zeroConst(typ.At(i).Type()) } @@ -77,7 +75,7 @@ func zeroConst(t types.Type) Constant { } } - isNillable := func(term *typeparams.Term) bool { + isNillable := func(term *types.Term) bool { switch typ := term.Type().Underlying().(type) { case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature, *typeutil.Iterator: return true @@ -93,8 +91,8 @@ func zeroConst(t types.Type) Constant { } } - isInfo := func(info types.BasicInfo) func(*typeparams.Term) bool { - return func(term *typeparams.Term) bool { + isInfo := func(info types.BasicInfo) func(*types.Term) bool { + return func(term *types.Term) bool { basic, ok := term.Type().Underlying().(*types.Basic) if !ok { return false @@ -103,7 +101,7 @@ func zeroConst(t types.Type) Constant { } } - isArray := func(term *typeparams.Term) bool { + isArray := func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Array) return ok } @@ -147,6 +145,11 @@ func (c *Const) RelString(from *types.Package) string { } func (c *Const) String() string { + if c.block == nil { + // Constants don't have a block till late in the compilation process. But we want to print consts during + // debugging. + return c.RelString(nil) + } return c.RelString(c.Parent().pkg()) } @@ -162,7 +165,7 @@ func (v *AggregateConst) RelString(pkg *types.Package) string { values := make([]string, len(v.Values)) for i, v := range v.Values { if v != nil { - values[i] = v.RelString(pkg) + values[i] = v.Name() } else { values[i] = "nil" } @@ -170,6 +173,13 @@ func (v *AggregateConst) RelString(pkg *types.Package) string { return fmt.Sprintf("AggregateConst <%s> (%s)", relType(v.Type(), pkg), strings.Join(values, ", ")) } +func (v *AggregateConst) String() string { + if v.block == nil { + return v.RelString(nil) + } + return v.RelString(v.Parent().pkg()) +} + func (v *GenericConst) RelString(pkg *types.Package) string { return fmt.Sprintf("GenericConst <%s>", relType(v.Type(), pkg)) } @@ -178,10 +188,6 @@ func (v *GenericConst) String() string { return v.RelString(v.Parent().pkg()) } -func (v *AggregateConst) String() string { - return v.RelString(v.Parent().pkg()) -} - // IsNil returns true if this constant represents a typed or untyped nil value. func (c *Const) IsNil() bool { return c.Value == nil @@ -189,7 +195,6 @@ func (c *Const) IsNil() bool { // Int64 returns the numeric value of this constant truncated to fit // a signed 64-bit integer. -// func (c *Const) Int64() int64 { switch x := constant.ToInt(c.Value); x.Kind() { case constant.Int: @@ -206,7 +211,6 @@ func (c *Const) Int64() int64 { // Uint64 returns the numeric value of this constant truncated to fit // an unsigned 64-bit integer. -// func (c *Const) Uint64() uint64 { switch x := constant.ToInt(c.Value); x.Kind() { case constant.Int: @@ -223,7 +227,6 @@ func (c *Const) Uint64() uint64 { // Float64 returns the numeric value of this constant truncated to fit // a float64. -// func (c *Const) Float64() float64 { f, _ := constant.Float64Val(c.Value) return f @@ -231,7 +234,6 @@ func (c *Const) Float64() float64 { // Complex128 returns the complex value of this constant truncated to // fit a complex128. -// func (c *Const) Complex128() complex128 { re, _ := constant.Float64Val(constant.Real(c.Value)) im, _ := constant.Float64Val(constant.Imag(c.Value)) @@ -253,7 +255,15 @@ func (c *AggregateConst) equal(o Constant) bool { return false } // TODO(dh): don't use == for types, this will miss identical pointer types, among others - return c.typ == oc.typ + if c.typ != oc.typ { + return false + } + for i, v := range c.Values { + if !v.(Constant).equal(oc.Values[i].(Constant)) { + return false + } + } + return true } func (c *ArrayConst) equal(o Constant) bool { diff --git a/vendor/honnef.co/go/tools/go/ir/create.go b/vendor/honnef.co/go/tools/go/ir/create.go index 5e7f6ed94b..28e7da7e94 100644 --- a/vendor/honnef.co/go/tools/go/ir/create.go +++ b/vendor/honnef.co/go/tools/go/ir/create.go @@ -27,7 +27,6 @@ const avgInstructionsPerBlock = 16 // NewProgram returns a new IR Program. // // mode controls diagnostics and checking during IR construction. -// func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { prog := &Program{ Fset: fset, @@ -51,7 +50,6 @@ func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { // For objects from Go source code, syntax is the associated syntax // tree (for funcs and vars only); it will be used during the build // phase. -// func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { name := obj.Name() switch obj := obj.(type) { @@ -128,7 +126,6 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { // membersFromDecl populates package pkg with members for each // typechecker object (var, func, const or type) associated with the // specified decl. -// func membersFromDecl(pkg *Package, decl ast.Decl) { switch decl := decl.(type) { case *ast.GenDecl: // import, const, type or var @@ -177,7 +174,6 @@ func membersFromDecl(pkg *Package, decl ast.Decl) { // // The real work of building IR form for each function is not done // until a subsequent call to Package.Build(). -// func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { p := &Package{ Prog: prog, @@ -260,7 +256,6 @@ var printMu sync.Mutex // AllPackages returns a new slice containing all packages in the // program prog in unspecified order. -// func (prog *Program) AllPackages() []*Package { pkgs := make([]*Package, 0, len(prog.packages)) for _, pkg := range prog.packages { @@ -282,7 +277,6 @@ func (prog *Program) AllPackages() []*Package { // false---yet this function remains very convenient. // Clients should use (*Program).Package instead where possible. // IR doesn't really need a string-keyed map of packages. -// func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] } diff --git a/vendor/honnef.co/go/tools/go/ir/doc.go b/vendor/honnef.co/go/tools/go/ir/doc.go index 7158a0aec2..5ee6637db4 100644 --- a/vendor/honnef.co/go/tools/go/ir/doc.go +++ b/vendor/honnef.co/go/tools/go/ir/doc.go @@ -39,66 +39,67 @@ // // The primary interfaces of this package are: // -// - Member: a named member of a Go package. -// - Value: an expression that yields a value. -// - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) +// - Member: a named member of a Go package. +// - Value: an expression that yields a value. +// - Instruction: a statement that consumes values and performs computation. +// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) // // A computation that yields a result implements both the Value and // Instruction interfaces. The following table shows for each // concrete type which of these interfaces it implements. // -// Value? Instruction? Member? -// *Alloc ✔ ✔ -// *BinOp ✔ ✔ -// *BlankStore ✔ -// *Builtin ✔ -// *Call ✔ ✔ -// *ChangeInterface ✔ ✔ -// *ChangeType ✔ ✔ -// *Const ✔ ✔ -// *Convert ✔ ✔ -// *DebugRef ✔ -// *Defer ✔ ✔ -// *Extract ✔ ✔ -// *Field ✔ ✔ -// *FieldAddr ✔ ✔ -// *FreeVar ✔ -// *Function ✔ ✔ (func) -// *Global ✔ ✔ (var) -// *Go ✔ ✔ -// *If ✔ -// *Index ✔ ✔ -// *IndexAddr ✔ ✔ -// *Jump ✔ -// *Load ✔ ✔ -// *MakeChan ✔ ✔ -// *MakeClosure ✔ ✔ -// *MakeInterface ✔ ✔ -// *MakeMap ✔ ✔ -// *MakeSlice ✔ ✔ -// *MapLookup ✔ ✔ -// *MapUpdate ✔ ✔ -// *NamedConst ✔ (const) -// *Next ✔ ✔ -// *Panic ✔ -// *Parameter ✔ ✔ -// *Phi ✔ ✔ -// *Range ✔ ✔ -// *Recv ✔ ✔ -// *Return ✔ -// *RunDefers ✔ -// *Select ✔ ✔ -// *Send ✔ ✔ -// *Sigma ✔ ✔ -// *Slice ✔ ✔ -// *SliceToArrayPointer ✔ ✔ -// *Store ✔ ✔ -// *StringLookup ✔ ✔ -// *Type ✔ (type) -// *TypeAssert ✔ ✔ -// *UnOp ✔ ✔ -// *Unreachable ✔ +// Value? Instruction? Member? +// *Alloc ✔ ✔ +// *BinOp ✔ ✔ +// *BlankStore ✔ +// *Builtin ✔ +// *Call ✔ ✔ +// *ChangeInterface ✔ ✔ +// *ChangeType ✔ ✔ +// *Const ✔ ✔ +// *Convert ✔ ✔ +// *DebugRef ✔ +// *Defer ✔ ✔ +// *Extract ✔ ✔ +// *Field ✔ ✔ +// *FieldAddr ✔ ✔ +// *FreeVar ✔ +// *Function ✔ ✔ (func) +// *Global ✔ ✔ (var) +// *Go ✔ ✔ +// *If ✔ +// *Index ✔ ✔ +// *IndexAddr ✔ ✔ +// *Jump ✔ +// *Load ✔ ✔ +// *MakeChan ✔ ✔ +// *MakeClosure ✔ ✔ +// *MakeInterface ✔ ✔ +// *MakeMap ✔ ✔ +// *MakeSlice ✔ ✔ +// *MapLookup ✔ ✔ +// *MapUpdate ✔ ✔ +// *NamedConst ✔ (const) +// *Next ✔ ✔ +// *Panic ✔ +// *Parameter ✔ ✔ +// *Phi ✔ ✔ +// *Range ✔ ✔ +// *Recv ✔ ✔ +// *Return ✔ +// *RunDefers ✔ +// *Select ✔ ✔ +// *Send ✔ ✔ +// *Sigma ✔ ✔ +// *Slice ✔ ✔ +// *SliceToArrayPointer ✔ ✔ +// *SliceToArray ✔ ✔ +// *Store ✔ ✔ +// *StringLookup ✔ ✔ +// *Type ✔ (type) +// *TypeAssert ✔ ✔ +// *UnOp ✔ ✔ +// *Unreachable ✔ // // Other key types in this package include: Program, Package, Function // and BasicBlock. @@ -126,5 +127,4 @@ // of trying to determine corresponding elements across the four // domains of source locations, ast.Nodes, types.Objects, // ir.Values/Instructions. -// package ir diff --git a/vendor/honnef.co/go/tools/go/ir/dom.go b/vendor/honnef.co/go/tools/go/ir/dom.go index 13ecd47cb9..4febd284ba 100644 --- a/vendor/honnef.co/go/tools/go/ir/dom.go +++ b/vendor/honnef.co/go/tools/go/ir/dom.go @@ -29,12 +29,10 @@ import ( // Idom returns the block that immediately dominates b: // its parent in the dominator tree, if any. // The entry node (b.Index==0) does not have a parent. -// func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } // Dominees returns the list of blocks that b immediately dominates: // its children in the dominator tree. -// func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } // Dominates reports whether b dominates c. @@ -50,7 +48,6 @@ func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre // DomPreorder returns a new slice containing the blocks of f in // dominator tree preorder. -// func (f *Function) DomPreorder() []*BasicBlock { n := len(f.Blocks) order := make(byDomPreorder, n) @@ -68,7 +65,6 @@ type domInfo struct { // buildDomTree computes the dominator tree of f using the LT algorithm. // Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). -// func buildDomTree(fn *Function) { // The step numbers refer to the original LT paper; the // reordering is due to Georgiadis. @@ -277,7 +273,6 @@ func buildPostDomTree(fn *Function) { // numberDomTree sets the pre- and post-order numbers of a depth-first // traversal of the dominator tree rooted at v. These are used to // answer dominance queries in constant time. -// func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { v.dom.pre = pre pre++ @@ -292,7 +287,6 @@ func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { // numberPostDomTree sets the pre- and post-order numbers of a depth-first // traversal of the post-dominator tree rooted at v. These are used to // answer post-dominance queries in constant time. -// func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { v.pdom.pre = pre pre++ @@ -310,7 +304,6 @@ func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { // computed by the LT algorithm by comparing against the dominance // relation computed by a naive Kildall-style forward dataflow // analysis (Algorithm 10.16 from the "Dragon" book). -// func sanityCheckDomTree(f *Function) { n := len(f.Blocks) @@ -395,6 +388,7 @@ func sanityCheckDomTree(f *Function) { // Printing functions ---------------------------------------- // printDomTree prints the dominator tree as text, using indentation. +// //lint:ignore U1000 used during debugging func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) @@ -405,6 +399,7 @@ func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. +// //lint:ignore U1000 used during debugging func printDomTreeDot(buf io.Writer, f *Function) { fmt.Fprintln(buf, "//", f) @@ -432,6 +427,7 @@ func printDomTreeDot(buf io.Writer, f *Function) { } // printDomTree prints the dominator tree as text, using indentation. +// //lint:ignore U1000 used during debugging func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) @@ -442,6 +438,7 @@ func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. +// //lint:ignore U1000 used during debugging func printPostDomTreeDot(buf io.Writer, f *Function) { fmt.Fprintln(buf, "//", f) diff --git a/vendor/honnef.co/go/tools/go/ir/emit.go b/vendor/honnef.co/go/tools/go/ir/emit.go index 7b23041f36..f6a1ef373b 100644 --- a/vendor/honnef.co/go/tools/go/ir/emit.go +++ b/vendor/honnef.co/go/tools/go/ir/emit.go @@ -20,7 +20,6 @@ import ( // emitNew emits to f a new (heap Alloc) instruction allocating an // object of type typ. pos is the optional source location. -// func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { v := &Alloc{Heap: true} v.setType(types.NewPointer(typ)) @@ -30,7 +29,6 @@ func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { // emitLoad emits to f an instruction to load the address addr into a // new temporary, and returns the value so defined. -// func emitLoad(f *Function, addr Value, source ast.Node) *Load { v := &Load{X: addr} v.setType(deref(addr.Type())) @@ -49,7 +47,6 @@ func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.No // emitDebugRef emits to f a DebugRef pseudo-instruction associating // expression e with value v. -// func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { ref := makeDebugRef(f, e, v, isAddr) if ref == nil { @@ -89,7 +86,6 @@ func makeDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) *DebugRef { // where op is an eager shift, logical or arithmetic operation. // (Use emitCompare() for comparisons and Builder.logicalBinop() for // non-eager operations.) -// func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value { switch op { case token.SHL, token.SHR: @@ -124,7 +120,6 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast // emitCompare emits to f code compute the boolean result of // comparison comparison 'x op y'. -// func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value { xt := x.Type().Underlying() yt := y.Type().Underlying() @@ -169,7 +164,6 @@ func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value // isValuePreserving returns true if a conversion from ut_src to // ut_dst is value-preserving, i.e. just a change of type. // Precondition: neither argument is a named type. -// func isValuePreserving(ut_src, ut_dst types.Type) bool { // Identical underlying types? if types.IdenticalIgnoreTags(ut_dst, ut_src) { @@ -194,7 +188,6 @@ func isValuePreserving(ut_src, ut_dst types.Type) bool { // and returns the converted value. Implicit conversions are required // by language assignability rules in assignments, parameter passing, // etc. -// func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { t_src := val.Type() @@ -210,8 +203,8 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { tset_src := typeutil.NewTypeSet(ut_src) // Just a change of type, but not value or representation? - if tset_src.All(func(termSrc *typeparams.Term) bool { - return tset_dst.All(func(termDst *typeparams.Term) bool { + if tset_src.All(func(termSrc *types.Term) bool { + return tset_dst.All(func(termDst *types.Term) bool { return isValuePreserving(termSrc.Type().Underlying(), termDst.Type().Underlying()) }) }) { @@ -262,8 +255,8 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { } // Conversion from slice to array pointer? - if tset_src.All(func(termSrc *typeparams.Term) bool { - return tset_dst.All(func(termDst *typeparams.Term) bool { + if tset_src.All(func(termSrc *types.Term) bool { + return tset_dst.All(func(termDst *types.Term) bool { if slice, ok := termSrc.Type().Underlying().(*types.Slice); ok { if ptr, ok := termDst.Type().Underlying().(*types.Pointer); ok { if arr, ok := ptr.Elem().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { @@ -279,11 +272,30 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { return f.emit(c, source) } + // Conversion from slice to array. This is almost the same as converting from slice to array pointer, then + // dereferencing the pointer. Except that a nil slice can be converted to [0]T, whereas converting a nil slice to + // (*[0]T) results in a nil pointer, dereferencing which would panic. To hide the extra branching we use a dedicated + // instruction, SliceToArray. + if tset_src.All(func(termSrc *types.Term) bool { + return tset_dst.All(func(termDst *types.Term) bool { + if slice, ok := termSrc.Type().Underlying().(*types.Slice); ok { + if arr, ok := termDst.Type().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { + return true + } + } + return false + }) + }) { + c := &SliceToArray{X: val} + c.setType(t_dst) + return f.emit(c, source) + } + // A representation-changing conversion? // At least one of {ut_src,ut_dst} must be *Basic. // (The other may be []byte or []rune.) - ok1 := tset_src.Any(func(term *typeparams.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) - ok2 := tset_dst.Any(func(term *typeparams.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + ok1 := tset_src.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + ok2 := tset_dst.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) if ok1 || ok2 { c := &Convert{X: val} c.setType(t_dst) @@ -295,21 +307,17 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { // emitStore emits to f an instruction to store value val at location // addr, applying implicit conversions as required by assignability rules. -// func emitStore(f *Function, addr, val Value, source ast.Node) *Store { s := &Store{ Addr: addr, Val: emitConv(f, val, deref(addr.Type()), source), } - // make sure we call getMem after the call to emitConv, which may - // itself update the memory state f.emit(s, source) return s } // emitJump emits to f a jump to target, and updates the control-flow graph. // Postcondition: f.currentBlock is nil. -// func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { b := f.currentBlock j := new(Jump) @@ -322,7 +330,6 @@ func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { // emitIf emits to f a conditional jump to tblock or fblock based on // cond, and updates the control-flow graph. // Postcondition: f.currentBlock is nil. -// func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If { b := f.currentBlock stmt := &If{Cond: cond} @@ -335,7 +342,6 @@ func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node // emitExtract emits to f an instruction to extract the index'th // component of tuple. It returns the extracted value. -// func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { e := &Extract{Tuple: tuple, Index: index} e.setType(tuple.Type().(*types.Tuple).At(index).Type()) @@ -344,7 +350,6 @@ func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { // emitTypeAssert emits to f a type assertion value := x.(t) and // returns the value. x.Type() must be an interface. -// func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { a := &TypeAssert{X: x, AssertedType: t} a.setType(t) @@ -353,7 +358,6 @@ func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { // emitTypeTest emits to f a type test value,ok := x.(t) and returns // a (value, ok) tuple. x.Type() must be an interface. -// func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { a := &TypeAssert{ X: x, @@ -372,7 +376,6 @@ func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { // Intended for wrapper methods. // Precondition: f does/will not use deferred procedure calls. // Postcondition: f.currentBlock is nil. -// func emitTailCall(f *Function, call *Call, source ast.Node) { tresults := f.Signature.Results() nr := tresults.Len() @@ -413,7 +416,6 @@ func emitTailCall(f *Function, call *Call, source ast.Node) { // If v is the address of a struct, the result will be the address of // a field; if it is the value of a struct, the result will be the // value of a field. -// func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value { for _, index := range indices { // We may have a generic type containing a pointer, or a pointer to a generic type containing a struct. A @@ -450,7 +452,6 @@ func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node // will be the field's address; otherwise the result will be the // field's value. // Ident id is used for position and debug info. -// func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { // We may have a generic type containing a pointer, or a pointer to a generic type containing a struct. A // pointer to a generic containing a pointer to a struct shouldn't be possible because the outer pointer gets @@ -484,12 +485,64 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. // zeroValue emits to f code to produce a zero value of type t, // and returns it. -// func zeroValue(f *Function, t types.Type, source ast.Node) Value { return emitConst(f, zeroConst(t)) } +type constKey struct { + typ types.Type + value constant.Value +} + func emitConst(f *Function, c Constant) Constant { - f.consts = append(f.consts, c) - return c + if f.consts == nil { + f.consts = map[constKey]constValue{} + } + + typ := c.Type() + var val constant.Value + switch c := c.(type) { + case *Const: + val = c.Value + case *ArrayConst, *GenericConst: + // These can only represent zero values, so all we need is the type + case *AggregateConst: + candidates, _ := f.aggregateConsts.At(c.typ) + for _, candidate := range candidates { + if c.equal(candidate) { + return candidate + } + } + + for i := range c.Values { + c.Values[i] = emitConst(f, c.Values[i].(Constant)) + } + + c.setBlock(f.Blocks[0]) + rands := c.Operands(nil) + updateOperandsReferrers(c, rands) + candidates = append(candidates, c) + f.aggregateConsts.Set(c.typ, candidates) + return c + + default: + panic(fmt.Sprintf("unexpected type %T", c)) + } + k := constKey{ + typ: typ, + value: val, + } + dup, ok := f.consts[k] + if ok { + return dup.c + } else { + c.setBlock(f.Blocks[0]) + f.consts[k] = constValue{ + c: c, + idx: len(f.consts), + } + rands := c.Operands(nil) + updateOperandsReferrers(c, rands) + return c + } } diff --git a/vendor/honnef.co/go/tools/go/ir/exits.go b/vendor/honnef.co/go/tools/go/ir/exits.go index 851b62c4f8..03aa2866ce 100644 --- a/vendor/honnef.co/go/tools/go/ir/exits.go +++ b/vendor/honnef.co/go/tools/go/ir/exits.go @@ -110,6 +110,19 @@ func (b *builder) buildExits(fn *Function) { // all of these call os.Exit after logging fn.NoReturn = AlwaysExits } + case "k8s.io/klog/v2": + switch obj.(*types.Func).FullName() { + case "k8s.io/klog/v2.Exit", + "k8s.io/klog/v2.ExitDepth", + "k8s.io/klog/v2.Exitf", + "k8s.io/klog/v2.Exitln", + "k8s.io/klog/v2.Fatal", + "k8s.io/klog/v2.FatalDepth", + "k8s.io/klog/v2.Fatalf", + "k8s.io/klog/v2.Fatalln": + // all of these call os.Exit after logging + fn.NoReturn = AlwaysExits + } } } @@ -330,7 +343,7 @@ func (b *builder) addUnreachables(fn *Function) { var c Call c.Call.Value = &Builtin{ name: "ir:noreturnWasPanic", - sig: types.NewSignature(nil, + sig: types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple(anonVar(types.Typ[types.Bool])), false, diff --git a/vendor/honnef.co/go/tools/go/ir/func.go b/vendor/honnef.co/go/tools/go/ir/func.go index ca42b5c0ea..4449b405d2 100644 --- a/vendor/honnef.co/go/tools/go/ir/func.go +++ b/vendor/honnef.co/go/tools/go/ir/func.go @@ -10,13 +10,15 @@ import ( "bytes" "fmt" "go/ast" - "go/constant" "go/format" "go/token" "go/types" "io" "os" + "sort" "strings" + + "honnef.co/go/tools/go/types/typeutil" ) // addEdge adds a control-flow graph edge from from to to. @@ -53,14 +55,12 @@ func (b *BasicBlock) Parent() *Function { return b.parent } // String returns a human-readable label of this block. // It is not guaranteed unique within the function. -// func (b *BasicBlock) String() string { return fmt.Sprintf("%d", b.Index) } // emit appends an instruction to the current basic block. // If the instruction defines a Value, it is returned. -// func (b *BasicBlock) emit(i Instruction, source ast.Node) Value { i.setSource(source) i.setBlock(b) @@ -112,7 +112,6 @@ func (b *BasicBlock) phis() []Instruction { // replacePred replaces all occurrences of p in b's predecessor list with q. // Ordinarily there should be at most one. -// func (b *BasicBlock) replacePred(p, q *BasicBlock) { for i, pred := range b.Preds { if pred == p { @@ -123,7 +122,6 @@ func (b *BasicBlock) replacePred(p, q *BasicBlock) { // replaceSucc replaces all occurrences of p in b's successor list with q. // Ordinarily there should be at most one. -// func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { for i, succ := range b.Succs { if succ == p { @@ -135,7 +133,6 @@ func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { // removePred removes all occurrences of p in b's // predecessor list and φ-nodes. // Ordinarily there should be at most one. -// func (b *BasicBlock) removePred(p *BasicBlock) { phis := b.phis() @@ -169,7 +166,6 @@ func (b *BasicBlock) removePred(p *BasicBlock) { // Destinations associated with unlabelled for/switch/select stmts. // We push/pop one of these as we enter/leave each construct and for // each BranchStmt we scan for the innermost target of the right type. -// type targets struct { tail *targets // rest of stack _break *BasicBlock @@ -180,7 +176,6 @@ type targets struct { // Destinations associated with a labelled block. // We populate these as labels are encountered in forward gotos or // labelled statements. -// type lblock struct { _goto *BasicBlock _break *BasicBlock @@ -189,9 +184,14 @@ type lblock struct { // labelledBlock returns the branch target associated with the // specified label, creating it if needed. -// func (f *Function) labelledBlock(label *ast.Ident) *lblock { - obj := f.Pkg.objectOf(label) + obj := f.Pkg.info.ObjectOf(label) + if obj == nil { + // Blank label, as in '_:' - don't store to f.lblocks, this label can never be referred to; just return a fresh + // lbock. + return &lblock{_goto: f.newBasicBlock(label.Name)} + } + lb := f.lblocks[obj] if lb == nil { lb = &lblock{_goto: f.newBasicBlock(label.Name)} @@ -205,7 +205,6 @@ func (f *Function) labelledBlock(label *ast.Ident) *lblock { // addParam adds a (non-escaping) parameter to f.Params of the // specified name, type and source position. -// func (f *Function) addParam(name string, typ types.Type, source ast.Node) *Parameter { var b *BasicBlock if len(f.Blocks) > 0 { @@ -240,7 +239,6 @@ func (f *Function) addParamObj(obj types.Object, source ast.Node) *Parameter { // addSpilledParam declares a parameter that is pre-spilled to the // stack; the function body will load/store the spilled location. // Subsequent lifting will eliminate spills where possible. -// func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { param := f.addParamObj(obj, source) spill := &Alloc{} @@ -255,7 +253,6 @@ func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { // startBody initializes the function prior to generating IR code for its body. // Precondition: f.Type() already set. -// func (f *Function) startBody() { entry := f.newBasicBlock("entry") f.currentBlock = entry @@ -304,7 +301,6 @@ func (f *Function) exitBlock() { // f.startBody() was called. // Postcondition: // len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) -// func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { // Receiver (at most one inner iteration). if recv != nil { @@ -366,141 +362,85 @@ func numberNodes(f *Function) { } } +func updateOperandsReferrers(instr Instruction, ops []*Value) { + for _, op := range ops { + if r := *op; r != nil { + if refs := (*op).Referrers(); refs != nil { + if len(*refs) == 0 { + // per median, each value has two referrers, so we can avoid one call into growslice + // + // Note: we experimented with allocating + // sequential scratch space, but we + // couldn't find a value that gave better + // performance than making many individual + // allocations + *refs = make([]Instruction, 1, 2) + (*refs)[0] = instr + } else { + *refs = append(*refs, instr) + } + } + } + } +} + // buildReferrers populates the def/use information in all non-nil // Value.Referrers slice. // Precondition: all such slices are initially empty. func buildReferrers(f *Function) { var rands []*Value + for _, b := range f.Blocks { for _, instr := range b.Instrs { rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if r := *rand; r != nil { - if ref := r.Referrers(); ref != nil { - if len(*ref) == 0 { - // per median, each value has two referrers, so we can avoid one call into growslice - // - // Note: we experimented with allocating - // sequential scratch space, but we - // couldn't find a value that gave better - // performance than making many individual - // allocations - *ref = make([]Instruction, 1, 2) - (*ref)[0] = instr - } else { - *ref = append(*ref, instr) - } - } - } - } + updateOperandsReferrers(instr, rands) } } + + for _, c := range f.consts { + rands = c.c.Operands(rands[:0]) + updateOperandsReferrers(c.c, rands) + } } func (f *Function) emitConsts() { - if len(f.Blocks) == 0 { + defer func() { f.consts = nil + f.aggregateConsts = typeutil.Map[[]*AggregateConst]{} + }() + + if len(f.Blocks) == 0 { return } // TODO(dh): our deduplication only works on booleans and // integers. other constants are represented as pointers to // things. - if len(f.consts) == 0 { - return - } else if len(f.consts) <= 32 { - f.emitConstsFew() - } else { - f.emitConstsMany() - } -} - -func (f *Function) emitConstsFew() { - dedup := make([]Constant, 0, 32) + head := make([]constValue, 0, len(f.consts)) for _, c := range f.consts { - if len(*c.Referrers()) == 0 { - continue - } - found := false - for _, d := range dedup { - if c.equal(d) { - replaceAll(c, d) - found = true - break - } - } - if !found { - dedup = append(dedup, c) + if len(*c.c.Referrers()) == 0 { + // TODO(dh): killing a const may make other consts dead, too + killInstruction(c.c) + } else { + head = append(head, c) } } - - instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(dedup)) - for i, c := range dedup { - instrs[i] = c - c.setBlock(f.Blocks[0]) - } - copy(instrs[len(dedup):], f.Blocks[0].Instrs) - f.Blocks[0].Instrs = instrs - f.consts = nil -} - -func (f *Function) emitConstsMany() { - type constKey struct { - typ types.Type - value constant.Value + sort.Slice(head, func(i, j int) bool { + return head[i].idx < head[j].idx + }) + entry := f.Blocks[0] + instrs := make([]Instruction, 0, len(entry.Instrs)+len(head)) + for _, c := range head { + instrs = append(instrs, c.c) } - - m := make(map[constKey]Value, len(f.consts)) - areNil := 0 - for i, c := range f.consts { - if len(*c.Referrers()) == 0 { - f.consts[i] = nil - areNil++ - continue + f.aggregateConsts.Iterate(func(key types.Type, value []*AggregateConst) { + for _, c := range value { + instrs = append(instrs, c) } + }) - var typ types.Type - var val constant.Value - switch c := c.(type) { - case *Const: - typ = c.typ - val = c.Value - case *ArrayConst: - // ArrayConst can only encode zero constants, so all we need is the type - typ = c.typ - case *AggregateConst: - // ArrayConst can only encode zero constants, so all we need is the type - typ = c.typ - case *GenericConst: - typ = c.typ - default: - panic(fmt.Sprintf("unexpected type %T", c)) - } - k := constKey{ - typ: typ, - value: val, - } - if dup, ok := m[k]; !ok { - m[k] = c - } else { - f.consts[i] = nil - areNil++ - replaceAll(c, dup) - } - } - - instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(f.consts)-areNil) - i := 0 - for _, c := range f.consts { - if c != nil { - instrs[i] = c - c.setBlock(f.Blocks[0]) - i++ - } - } - copy(instrs[i:], f.Blocks[0].Instrs) - f.Blocks[0].Instrs = instrs - f.consts = nil + instrs = append(instrs, entry.Instrs...) + entry.Instrs = instrs } // buildFakeExits ensures that every block in the function is @@ -593,7 +533,12 @@ func (f *Function) finishBody() { buildPostDomTree(f) if f.Prog.mode&NaiveForm == 0 { - lift(f) + for lift(f) { + } + if doSimplifyConstantCompositeValues { + for simplifyConstantCompositeValues(f) { + } + } } // emit constants after lifting, because lifting may produce new constants, but before other variable splitting, @@ -652,7 +597,6 @@ func (f *Function) RemoveNilBlocks() { // removeNilBlocks eliminates nils from f.Blocks and updates each // BasicBlock.Index. Use this after any pass that may delete blocks. -// func (f *Function) removeNilBlocks() { j := 0 for _, b := range f.Blocks { @@ -673,7 +617,6 @@ func (f *Function) removeNilBlocks() { // functions will include full debug info. This greatly increases the // size of the instruction stream, and causes Functions to depend upon // the ASTs, potentially keeping them live in memory for longer. -// func (pkg *Package) SetDebugMode(debug bool) { // TODO(adonovan): do we want ast.File granularity? pkg.debug = debug @@ -687,7 +630,6 @@ func (f *Function) debugInfo() bool { // addNamedLocal creates a local variable, adds it to function f and // returns it. Its name and type are taken from obj. Subsequent // calls to f.lookup(obj) will return the same local. -// func (f *Function) addNamedLocal(obj types.Object, source ast.Node) *Alloc { l := f.addLocal(obj.Type(), source) f.objects[obj] = l @@ -700,7 +642,6 @@ func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { // addLocal creates an anonymous local variable of type typ, adds it // to function f and returns it. pos is the optional source location. -// func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { v := &Alloc{} v.setType(types.NewPointer(typ)) @@ -713,7 +654,6 @@ func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { // that is local to function f or one of its enclosing functions. // If escaping, the reference comes from a potentially escaping pointer // expression and the referent must be heap-allocated. -// func (f *Function) lookup(obj types.Object, escaping bool) Value { if v, ok := f.objects[obj]; ok { if alloc, ok := v.(*Alloc); ok && escaping { @@ -750,13 +690,14 @@ func (f *Function) emit(instr Instruction, source ast.Node) Value { // The specific formatting rules are not guaranteed and may change. // // Examples: -// "math.IsNaN" // a package-level function -// "(*bytes.Buffer).Bytes" // a declared method or a wrapper -// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) -// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) -// "main.main$1" // an anonymous function in main -// "main.init#1" // a declared init function -// "main.init" // the synthesized package initializer +// +// "math.IsNaN" // a package-level function +// "(*bytes.Buffer).Bytes" // a declared method or a wrapper +// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) +// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) +// "main.main$1" // an anonymous function in main +// "main.init#1" // a declared init function +// "main.init" // the synthesized package initializer // // When these functions are referred to from within the same package // (i.e. from == f.Pkg.Object), they are rendered without the package path. @@ -766,7 +707,6 @@ func (f *Function) emit(instr Instruction, source ast.Node) Value { // (But two methods may have the same name "(T).f" if one is a synthetic // wrapper promoting a non-exported method "f" from another package; in // that case, the strings are equal but the identifiers "f" are distinct.) -// func (f *Function) RelString(from *types.Package) string { // Anonymous? if f.parent != nil { @@ -920,6 +860,10 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { default: buf.WriteString(instr.String()) } + if instr != nil && instr.Comment() != "" { + buf.WriteString(" # ") + buf.WriteString(instr.Comment()) + } buf.WriteString("\n") if f.Prog.mode&PrintSource != 0 { @@ -950,7 +894,6 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { // newBasicBlock adds to f a new basic block and returns it. It does // not automatically become the current block for subsequent calls to emit. // comment is an optional string for more readable debugging output. -// func (f *Function) newBasicBlock(comment string) *BasicBlock { var instrs []Instruction if len(f.functionBody.scratchInstructions) > 0 { @@ -985,7 +928,6 @@ func (f *Function) newBasicBlock(comment string) *BasicBlock { // "reflect" package, etc. // // TODO(adonovan): think harder about the API here. -// func (prog *Program) NewFunction(name string, sig *types.Signature, provenance Synthetic) *Function { return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} } @@ -1004,3 +946,12 @@ func (f *Function) initHTML(name string) { f.wr = NewHTMLWriter("ir.html", rel, "") } } + +func killInstruction(instr Instruction) { + ops := instr.Operands(nil) + for _, op := range ops { + if refs := (*op).Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/load.go b/vendor/honnef.co/go/tools/go/ir/irutil/load.go index 1e83effa1a..883447b7af 100644 --- a/vendor/honnef.co/go/tools/go/ir/irutil/load.go +++ b/vendor/honnef.co/go/tools/go/ir/irutil/load.go @@ -13,6 +13,7 @@ import ( "honnef.co/go/tools/go/ir" + //lint:ignore SA1019 go/loader is deprecated, but works fine for our tests "golang.org/x/tools/go/loader" "golang.org/x/tools/go/packages" ) @@ -39,7 +40,6 @@ type Options struct { // packages with well-typed syntax trees. // // The mode parameter controls diagnostics and checking during IR construction. -// func Packages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) (*ir.Program, []*ir.Package) { return doPackages(initial, mode, false, opts) } @@ -61,7 +61,6 @@ func Packages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) ( // well-typed syntax trees. // // The mode parameter controls diagnostics and checking during IR construction. -// func AllPackages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) (*ir.Program, []*ir.Package) { return doPackages(initial, mode, true, opts) } @@ -112,7 +111,6 @@ func doPackages(initial []*packages.Package, mode ir.BuilderMode, deps bool, opt // // Deprecated: use golang.org/x/tools/go/packages and the Packages // function instead; see ir.ExampleLoadPackages. -// func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { prog := ir.NewProgram(lprog.Fset, mode) @@ -139,7 +137,6 @@ func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { // The operation fails if there were any type-checking or import errors. // // See ../ir/example_test.go for an example. -// func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) { if fset == nil { panic("no token.FileSet") diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/switch.go b/vendor/honnef.co/go/tools/go/ir/irutil/switch.go index e7654e0081..afe899d86e 100644 --- a/vendor/honnef.co/go/tools/go/ir/irutil/switch.go +++ b/vendor/honnef.co/go/tools/go/ir/irutil/switch.go @@ -55,7 +55,6 @@ type TypeCase struct { // A type switch may contain duplicate types, or types assignable // to an interface type also in the list. // TODO(adonovan): eliminate such duplicates. -// type Switch struct { Start *ir.BasicBlock // block containing start of if/else chain X ir.Value // the switch operand @@ -103,7 +102,6 @@ func (sw *Switch) String() string { // Switches may even be inferred from if/else- or goto-based control flow. // (In general, the control flow constructs of the source program // cannot be faithfully reproduced from the IR.) -// func Switches(fn *ir.Function) []Switch { // Traverse the CFG in dominance order, so we don't // enter an if/else-chain in the middle. @@ -227,7 +225,6 @@ func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bo // isComparisonBlock returns the operands (v, k) if a block ends with // a comparison v==k, where k is a compile-time constant. -// func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { if n := len(b.Instrs); n >= 2 { if i, ok := b.Instrs[n-1].(*ir.If); ok { @@ -246,7 +243,6 @@ func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { // isTypeAssertBlock returns the operands (y, x, T) if a block ends with // a type assertion "if y, ok := x.(T); ok {". -// func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) { if n := len(b.Instrs); n >= 4 { if i, ok := b.Instrs[n-1].(*ir.If); ok { diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/visit.go b/vendor/honnef.co/go/tools/go/ir/irutil/visit.go index f6d0503ddd..f2135dca4b 100644 --- a/vendor/honnef.co/go/tools/go/ir/irutil/visit.go +++ b/vendor/honnef.co/go/tools/go/ir/irutil/visit.go @@ -18,7 +18,6 @@ import "honnef.co/go/tools/go/ir" // synthetic wrappers. // // Precondition: all packages are built. -// func AllFunctions(prog *ir.Program) map[*ir.Function]bool { visit := visitor{ prog: prog, diff --git a/vendor/honnef.co/go/tools/go/ir/lift.go b/vendor/honnef.co/go/tools/go/ir/lift.go index 8ab67eb8ee..1a4cd3026d 100644 --- a/vendor/honnef.co/go/tools/go/ir/lift.go +++ b/vendor/honnef.co/go/tools/go/ir/lift.go @@ -63,8 +63,7 @@ const debugLifting = false // // domFrontier's methods mutate the slice's elements but not its // length, so their receivers needn't be pointers. -// -type domFrontier [][]*BasicBlock +type domFrontier BlockMap[[]*BasicBlock] func (df domFrontier) add(u, v *BasicBlock) { df[u.Index] = append(df[u.Index], v) @@ -105,7 +104,7 @@ func buildDomFrontier(fn *Function) domFrontier { return df } -type postDomFrontier [][]*BasicBlock +type postDomFrontier BlockMap[[]*BasicBlock] func (rdf postDomFrontier) add(u, v *BasicBlock) { rdf[u.Index] = append(rdf[u.Index], v) @@ -156,6 +155,19 @@ func clearInstrs(instrs []Instruction) { } } +func numberNodesPerBlock(f *Function) { + for _, b := range f.Blocks { + var base ID + for _, instr := range b.Instrs { + if instr == nil { + continue + } + instr.setID(base) + base++ + } + } +} + // lift replaces local and new Allocs accessed only with // load/store by IR registers, inserting φ- and σ-nodes where necessary. // The result is a program in pruned SSI form. @@ -164,8 +176,7 @@ func clearInstrs(instrs []Instruction) { // - fn has no dead blocks (blockopt has run). // - Def/use info (Operands and Referrers) is up-to-date. // - The dominator tree is up-to-date. -// -func lift(fn *Function) { +func lift(fn *Function) bool { // TODO(adonovan): opt: lots of little optimizations may be // worthwhile here, especially if they cause us to avoid // buildDomFrontier. For example: @@ -187,8 +198,8 @@ func lift(fn *Function) { var df domFrontier var rdf postDomFrontier var closure *closure - var newPhis newPhiMap - var newSigmas newSigmaMap + var newPhis BlockMap[[]newPhi] + var newSigmas BlockMap[[]newSigma] // During this pass we will replace some BasicBlock.Instrs // (allocs, loads and stores) with nil, keeping a count in @@ -204,24 +215,35 @@ func lift(fn *Function) { // Determine which allocs we can lift and number them densely. // The renaming phase uses this numbering for compact maps. numAllocs := 0 + + instructions := make(BlockMap[liftInstructions], len(fn.Blocks)) + for i := range instructions { + instructions[i].insertInstructions = map[Instruction][]Instruction{} + } + + // Number nodes, for liftable + numberNodesPerBlock(fn) + for _, b := range fn.Blocks { b.gaps = 0 b.rundefers = 0 + for _, instr := range b.Instrs { switch instr := instr.(type) { case *Alloc: - if !liftable(instr) { + if !liftable(instr, instructions) { instr.index = -1 continue } + if numAllocs == 0 { df = buildDomFrontier(fn) rdf = buildPostDomFrontier(fn) if len(fn.Blocks) > 2 { closure = transitiveClosure(fn) } - newPhis = make(newPhiMap, len(fn.Blocks)) - newSigmas = make(newSigmaMap, len(fn.Blocks)) + newPhis = make(BlockMap[[]newPhi], len(fn.Blocks)) + newSigmas = make(BlockMap[[]newSigma], len(fn.Blocks)) if debugLifting { title := false @@ -236,7 +258,6 @@ func lift(fn *Function) { } } } - liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) instr.index = numAllocs numAllocs++ case *Defer: @@ -248,6 +269,39 @@ func lift(fn *Function) { } if numAllocs > 0 { + for _, b := range fn.Blocks { + work := instructions[b.Index] + for _, rename := range work.renameAllocs { + for _, instr_ := range b.Instrs[rename.startingAt:] { + replace(instr_, rename.from, rename.to) + } + } + } + + for _, b := range fn.Blocks { + work := instructions[b.Index] + if len(work.insertInstructions) != 0 { + newInstrs := make([]Instruction, 0, len(fn.Blocks)+len(work.insertInstructions)*3) + for _, instr := range b.Instrs { + if add, ok := work.insertInstructions[instr]; ok { + newInstrs = append(newInstrs, add...) + } + newInstrs = append(newInstrs, instr) + } + b.Instrs = newInstrs + } + } + + // TODO(dh): remove inserted allocs that end up unused after lifting. + + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*Alloc); ok && instr.index >= 0 { + liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) + } + } + } + // renaming maps an alloc (keyed by index) to its replacement // value. Initially the renaming contains nil, signifying the // zero constant of the appropriate type; we construct the @@ -380,6 +434,8 @@ func lift(fn *Function) { fn.Locals[i] = nil } fn.Locals = fn.Locals[:j] + + return numAllocs > 0 } func hasDirectReferrer(instr Instruction) bool { @@ -394,7 +450,7 @@ func hasDirectReferrer(instr Instruction) bool { return false } -func markLiveNodes(blocks []*BasicBlock, newPhis newPhiMap, newSigmas newSigmaMap) { +func markLiveNodes(blocks []*BasicBlock, newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { // Phis and sigmas may become dead due to optimization passes. We may also insert more nodes than strictly // necessary, e.g. sigma nodes for constants, which will never be used. @@ -461,7 +517,7 @@ func markLiveSigma(sigma *Sigma) { // and replaces trivial phis with non-phi alternatives. Phi // nodes where all edges are identical, or consist of only the phi // itself and one other value, may be replaced with the value. -func simplifyPhisAndSigmas(newPhis newPhiMap, newSigmas newSigmaMap) { +func simplifyPhisAndSigmas(newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { // temporary numbering of values used in phis so that we can build map keys var id ID for _, npList := range newPhis { @@ -652,7 +708,7 @@ func (s *BlockSet) Take() int { type closure struct { span []uint32 - reachables []interval + reachables BlockMap[interval] } type interval uint32 @@ -696,6 +752,8 @@ func (c closure) reachable(id int) []interval { } func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { + // TODO(dh): the 'current' argument seems to be unused + // TODO(dh): there's no reason for this to be a method visited[b.Index] = true for _, succ := range b.Succs { if visited[succ.Index] { @@ -707,7 +765,7 @@ func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { } func transitiveClosure(fn *Function) *closure { - reachable := make([]bool, len(fn.Blocks)) + reachable := make(BlockMap[bool], len(fn.Blocks)) c := &closure{} c.span = make([]uint32, len(fn.Blocks)+1) @@ -763,13 +821,65 @@ type newSigma struct { sigmas []*Sigma } -// newPhiMap records for each basic block, the set of newPhis that -// must be prepended to the block. -type newPhiMap [][]newPhi -type newSigmaMap [][]newSigma +type liftInstructions struct { + insertInstructions map[Instruction][]Instruction + renameAllocs []struct { + from *Alloc + to *Alloc + startingAt int + } +} + +// liftable determines if alloc can be lifted, and records instructions to split partially liftable allocs. +// +// In the trivial case, all uses of the alloc can be lifted. This is the case when it is only used for storing into and +// loading from. In that case, no instructions are recorded. +// +// In the more complex case, the alloc is used for storing into and loading from, but it is also used as a value, for +// example because it gets passed to a function, e.g. fn(&x). In this case, uses of the alloc fall into one of two +// categories: those that can be lifted and those that can't. A boundary forms between these two categories in the +// function's control flow: Once an unliftable use is encountered, the alloc is no longer liftable for the remainder of +// the basic block the use is in, nor in any blocks reachable from it. +// +// We record instructions that split the alloc into two allocs: one that is used in liftable uses, and one that is used +// in unliftable uses. Whenever we encounter a boundary between liftable and unliftable uses or blocks, we emit a pair +// of Load and Store that copy the value from the liftable alloc into the unliftable alloc. Taking these instructions +// into account, the normal lifting machinery will completely lift the liftable alloc, store the correct lifted values +// into the unliftable alloc, and will not at all lift the unliftable alloc. +// +// In Go syntax, the transformation looks somewhat like this: +// +// func foo() { +// x := 32 +// if cond { +// println(x) +// escape(&x) +// println(x) +// } else { +// println(x) +// } +// println(x) +// } +// +// transforms into +// +// func fooSplitAlloc() { +// x := 32 +// var x_ int +// if cond { +// println(x) +// x_ = x +// escape(&x_) +// println(x_) +// } else { +// println(x) +// x_ = x +// } +// println(x_) +// } +func liftable(alloc *Alloc, instructions BlockMap[liftInstructions]) bool { + fn := alloc.block.parent -func liftable(alloc *Alloc) bool { - fn := alloc.Parent() // Don't lift named return values in functions that defer // calls that may recover from panic. if fn.hasDefer { @@ -780,24 +890,264 @@ func liftable(alloc *Alloc) bool { } } - for _, instr := range *alloc.Referrers() { + type blockDesc struct { + // is the block (partially) unliftable, because it contains unliftable instructions or is reachable by an unliftable block + isUnliftable bool + hasLiftableLoad bool + hasLiftableOther bool + // we need to emit stores in predecessors because the unliftable use is in a phi + storeInPreds bool + + lastLiftable int + firstUnliftable int + } + blocks := make(BlockMap[blockDesc], len(fn.Blocks)) + for _, b := range fn.Blocks { + blocks[b.Index].lastLiftable = -1 + blocks[b.Index].firstUnliftable = len(b.Instrs) + 1 + } + + // Look at all uses of the alloc and deduce which blocks have liftable or unliftable instructions. + for _, instr := range alloc.referrers { + // Find the first unliftable use + + desc := &blocks[instr.Block().Index] + hasUnliftable := false + inHead := false switch instr := instr.(type) { case *Store: if instr.Val == alloc { - return false // address used as value - } - if instr.Addr != alloc { - panic("Alloc.Referrers is inconsistent") + hasUnliftable = true } case *Load: - if instr.X != alloc { - panic("Alloc.Referrers is inconsistent") + case *DebugRef: + case *Phi, *Sigma: + inHead = true + hasUnliftable = true + default: + hasUnliftable = true + } + + if hasUnliftable { + desc.isUnliftable = true + if int(instr.ID()) < desc.firstUnliftable { + desc.firstUnliftable = int(instr.ID()) } + if inHead { + desc.storeInPreds = true + desc.firstUnliftable = 0 + } + } + } + for _, instr := range alloc.referrers { + // Find the last liftable use, taking the previously calculated firstUnliftable into consideration + + desc := &blocks[instr.Block().Index] + if int(instr.ID()) >= desc.firstUnliftable { + continue + } + hasLiftable := false + switch instr := instr.(type) { + case *Store: + if instr.Val != alloc { + desc.hasLiftableOther = true + hasLiftable = true + } + case *Load: + desc.hasLiftableLoad = true + hasLiftable = true case *DebugRef: - // ok - default: - return false + desc.hasLiftableOther = true + } + if hasLiftable { + if int(instr.ID()) > desc.lastLiftable { + desc.lastLiftable = int(instr.ID()) + } + } + } + + for i := range blocks { + // Update firstUnliftable to be one after lastLiftable. We do this to include the unliftable's preceding + // DebugRefs in the renaming. + if blocks[i].lastLiftable == -1 && !blocks[i].storeInPreds { + // There are no liftable instructions (for this alloc) in this block. Set firstUnliftable to the + // first non-head instruction to avoid inserting the store before phi instructions, which would + // fail validation. + first := -1 + instrLoop: + for i, instr := range fn.Blocks[i].Instrs { + switch instr.(type) { + case *Phi, *Sigma: + default: + first = i + break instrLoop + } + } + blocks[i].firstUnliftable = first + } else { + blocks[i].firstUnliftable = blocks[i].lastLiftable + 1 + } + } + + // If a block is reachable by a (partially) unliftable block, then the entirety of the block is unliftable. In that + // case, stores have to be inserted in the predecessors. + // + // TODO(dh): this isn't always necessary. If the block is reachable by itself, i.e. part of a loop, then if the + // Alloc instruction is itself part of that loop, then there is a subset of instructions in the loop that can be + // lifted. For example: + // + // for { + // x := 42 + // println(x) + // escape(&x) + // } + // + // The x that escapes in one iteration of the loop isn't the same x that we read from on the next iteration. + seen := make(BlockMap[bool], len(fn.Blocks)) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if seen[b.Index] { + return + } + seen[b.Index] = true + desc := &blocks[b.Index] + desc.hasLiftableLoad = false + desc.hasLiftableOther = false + desc.isUnliftable = true + desc.firstUnliftable = 0 + desc.storeInPreds = true + for _, succ := range b.Succs { + dfs(succ) + } + } + for _, b := range fn.Blocks { + if blocks[b.Index].isUnliftable { + for _, succ := range b.Succs { + dfs(succ) + } + } + } + + hasLiftableLoad := false + hasLiftableOther := false + hasUnliftable := false + for _, b := range fn.Blocks { + desc := blocks[b.Index] + hasLiftableLoad = hasLiftableLoad || desc.hasLiftableLoad + hasLiftableOther = hasLiftableOther || desc.hasLiftableOther + if desc.isUnliftable { + hasUnliftable = true + } + } + if !hasLiftableLoad && !hasLiftableOther { + // There are no liftable uses + return false + } else if !hasUnliftable { + // The alloc is entirely liftable without splitting + return true + } else if !hasLiftableLoad { + // The alloc is not entirely liftable, and the only liftable uses are stores. While some of those stores could + // get lifted away, it would also lead to an infinite loop when lifting to a fixpoint, because the newly created + // allocs also get stored into repeatable and that's their only liftable uses. + return false + } + + // We need to insert stores for the new alloc. If a (partially) unliftable block has no unliftable + // predecessors and the use isn't in a phi node, then the store can be inserted right before the unliftable use. + // Otherwise, stores have to be inserted at the end of all liftable predecessors. + + newAlloc := &Alloc{Heap: true} + newAlloc.setBlock(alloc.block) + newAlloc.setType(alloc.typ) + newAlloc.setSource(alloc.source) + newAlloc.index = -1 + newAlloc.comment = "split alloc" + + { + work := instructions[alloc.block.Index] + work.insertInstructions[alloc] = append(work.insertInstructions[alloc], newAlloc) + } + + predHasStore := make(BlockMap[bool], len(fn.Blocks)) + for _, b := range fn.Blocks { + desc := &blocks[b.Index] + bWork := &instructions[b.Index] + + if desc.isUnliftable { + bWork.renameAllocs = append(bWork.renameAllocs, struct { + from *Alloc + to *Alloc + startingAt int + }{ + alloc, newAlloc, int(desc.firstUnliftable), + }) + } + + if !desc.isUnliftable { + continue + } + + propagate := func(in *BasicBlock, before Instruction) { + load := &Load{ + X: alloc, + } + store := &Store{ + Addr: newAlloc, + Val: load, + } + load.setType(deref(alloc.typ)) + load.setBlock(in) + load.comment = "split alloc" + store.setBlock(in) + updateOperandReferrers(load) + updateOperandReferrers(store) + store.comment = "split alloc" + + entry := &instructions[in.Index] + entry.insertInstructions[before] = append(entry.insertInstructions[before], load, store) + } + + if desc.storeInPreds { + // emit stores at the end of liftable preds + for _, pred := range b.Preds { + if blocks[pred.Index].isUnliftable { + continue + } + + if !alloc.block.Dominates(pred) { + // Consider this cfg: + // + // 1 + // /| + // / | + // ↙ ↓ + // 2--→3 + // + // with an Alloc in block 2. It doesn't make sense to insert a store in block 1 for the jump to + // block 3, because 1 can never see the Alloc in the first place. + // + // Ignoring phi nodes, an Alloc always dominates all of its uses, and phi nodes don't matter here, + // because for the incoming edges that do matter, we do emit the stores. + + continue + } + + if predHasStore[pred.Index] { + // Don't generate redundant propagations. Not only is it unnecessary, it can lead to infinite loops + // when trying to lift to a fix point, because redundant stores are liftable. + continue + } + + predHasStore[pred.Index] = true + + before := pred.Instrs[len(pred.Instrs)-1] + propagate(pred, before) + } + } else { + // emit store before the first unliftable use + before := b.Instrs[desc.firstUnliftable] + propagate(b, before) } } @@ -805,7 +1155,7 @@ func liftable(alloc *Alloc) bool { } // liftAlloc lifts alloc into registers and populates newPhis and newSigmas with all the φ- and σ-nodes it may require. -func liftAlloc(closure *closure, df domFrontier, rdf postDomFrontier, alloc *Alloc, newPhis newPhiMap, newSigmas newSigmaMap) { +func liftAlloc(closure *closure, df domFrontier, rdf postDomFrontier, alloc *Alloc, newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { fn := alloc.Parent() defblocks := fn.blockset(0) @@ -950,17 +1300,28 @@ func liftAlloc(closure *closure, df domFrontier, rdf postDomFrontier, alloc *All // replaceAll replaces all intraprocedural uses of x with y, // updating x.Referrers and y.Referrers. // Precondition: x.Referrers() != nil, i.e. x must be local to some function. -// func replaceAll(x, y Value) { var rands []*Value pxrefs := x.Referrers() pyrefs := y.Referrers() for _, instr := range *pxrefs { - rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if *rand != nil { - if *rand == x { - *rand = y + switch instr := instr.(type) { + case *CompositeValue: + // Special case CompositeValue because it might have very large lists of operands + // + // OPT(dh): this loop is still expensive for large composite values + for i, rand := range instr.Values { + if rand == x { + instr.Values[i] = y + } + } + default: + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if *rand != nil { + if *rand == x { + *rand = y + } } } } @@ -995,7 +1356,6 @@ func replace(instr Instruction, x, y Value) { // renamed returns the value to which alloc is being renamed, // constructing it lazily if it's the implicit zero initialization. -// func renamed(fn *Function, renaming []Value, alloc *Alloc) Value { v := renaming[alloc.index] if v == nil { @@ -1141,6 +1501,10 @@ func splitOnNewInformation(u *BasicBlock, renaming *StackMap) { // A slice to array pointer conversion tells us the minimum length of the slice rename(instr.X, instr, CopyInfoUnspecified, i) i++ + case *SliceToArray: + // A slice to array conversion tells us the minimum length of the slice + rename(instr.X, instr, CopyInfoUnspecified, i) + i++ case *Slice: // Slicing tells us about some of the bounds off := 0 @@ -1203,8 +1567,7 @@ func splitOnNewInformation(u *BasicBlock, renaming *StackMap) { // renaming is a map from *Alloc (keyed by index number) to its // dominating stored value; newPhis[x] is the set of new φ-nodes to be // prepended to block x. -// -func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap, newSigmas newSigmaMap) { +func rename(u *BasicBlock, renaming []Value, newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { // Each φ-node becomes the new name for its associated Alloc. for _, np := range newPhis[u.Index] { phi := np.phi @@ -1340,7 +1703,6 @@ func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap, newSigmas newSig // fresh copy of the renaming map for each subtree. r := make([]Value, len(renaming)) for _, v := range u.dom.children { - // XXX add debugging copy(r, renaming) // on entry to a block, the incoming sigma nodes become the new values for their alloc @@ -1355,3 +1717,54 @@ func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap, newSigmas newSig } } + +func simplifyConstantCompositeValues(fn *Function) bool { + changed := false + + for _, b := range fn.Blocks { + n := 0 + for _, instr := range b.Instrs { + replaced := false + + if cv, ok := instr.(*CompositeValue); ok { + ac := &AggregateConst{} + ac.typ = cv.typ + replaced = true + for _, v := range cv.Values { + if c, ok := v.(Constant); ok { + ac.Values = append(ac.Values, c) + } else { + replaced = false + break + } + } + if replaced { + replaceAll(cv, emitConst(fn, ac)) + killInstruction(cv) + } + + } + + if replaced { + changed = true + } else { + b.Instrs[n] = instr + n++ + } + } + + clearInstrs(b.Instrs[n:]) + b.Instrs = b.Instrs[:n] + } + + return changed +} + +func updateOperandReferrers(instr Instruction) { + for _, op := range instr.Operands(nil) { + refs := (*op).Referrers() + if refs != nil { + *refs = append(*refs, instr) + } + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/lvalue.go b/vendor/honnef.co/go/tools/go/ir/lvalue.go index f676a1f7ab..86eb4a5d12 100644 --- a/vendor/honnef.co/go/tools/go/ir/lvalue.go +++ b/vendor/honnef.co/go/tools/go/ir/lvalue.go @@ -15,7 +15,6 @@ import ( // An lvalue represents an assignable location that may appear on the // left-hand side of an assignment. This is a generalization of a // pointer to permit updates to elements of maps. -// type lvalue interface { store(fn *Function, v Value, source ast.Node) // stores v into the location load(fn *Function, source ast.Node) Value // loads the contents of the location @@ -52,11 +51,38 @@ func (a *address) typ() types.Type { return deref(a.addr.Type()) } +type compositeElement struct { + cv *CompositeValue + idx int + t types.Type + expr ast.Expr +} + +func (ce *compositeElement) load(fn *Function, source ast.Node) Value { + panic("not implemented") +} + +func (ce *compositeElement) store(fn *Function, v Value, source ast.Node) { + v = emitConv(fn, v, ce.t, source) + ce.cv.Values[ce.idx] = v + if ce.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, ce.expr, v, false) + } +} + +func (ce *compositeElement) address(fn *Function) Value { + panic("not implemented") +} + +func (ce *compositeElement) typ() types.Type { + return ce.t +} + // An element is an lvalue represented by m[k], the location of an // element of a map. These locations are not addressable // since pointers cannot be formed from them, but they do support // load() and store(). -// type element struct { m, k Value // map t types.Type // map element type @@ -88,9 +114,42 @@ func (e *element) typ() types.Type { return e.t } +// A lazyAddress is an lvalue whose address is the result of an instruction. +// These work like an *address except a new address.address() Value +// is created on each load, store and address call. +// A lazyAddress can be used to control when a side effect (nil pointer +// dereference, index out of bounds) of using a location happens. +type lazyAddress struct { + addr func(fn *Function) Value // emit to fn the computation of the address + t types.Type // type of the location + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (l *lazyAddress) load(fn *Function, source ast.Node) Value { + load := emitLoad(fn, l.addr(fn), source) + return load +} + +func (l *lazyAddress) store(fn *Function, v Value, source ast.Node) { + store := emitStore(fn, l.addr(fn), v, source) + if l.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, l.expr, store.Val, false) + } +} + +func (l *lazyAddress) address(fn *Function) Value { + addr := l.addr(fn) + if l.expr != nil { + emitDebugRef(fn, l.expr, addr, true) + } + return addr +} + +func (l *lazyAddress) typ() types.Type { return l.t } + // A blank is a dummy variable whose name is "_". // It is not reified: loads are illegal and stores are ignored. -// type blank struct{} func (bl blank) load(fn *Function, source ast.Node) Value { diff --git a/vendor/honnef.co/go/tools/go/ir/methods.go b/vendor/honnef.co/go/tools/go/ir/methods.go index fa45d1b9e9..b7903c8472 100644 --- a/vendor/honnef.co/go/tools/go/ir/methods.go +++ b/vendor/honnef.co/go/tools/go/ir/methods.go @@ -11,8 +11,6 @@ import ( "go/types" "honnef.co/go/tools/analysis/lint" - - "golang.org/x/exp/typeparams" ) // MethodValue returns the Function implementing method sel, building @@ -24,7 +22,6 @@ import ( // Thread-safe. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// func (prog *Program) MethodValue(sel *types.Selection) *Function { if sel.Kind() != types.MethodVal { panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) @@ -46,7 +43,6 @@ func (prog *Program) MethodValue(sel *types.Selection) *Function { // LookupMethod returns the implementation of the method of type T // identified by (pkg, name). It returns nil if the method exists but // is abstract, and panics if T has no such method. -// func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) if sel == nil { @@ -64,7 +60,7 @@ type methodSet struct { // Precondition: !isInterface(T). // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) func (prog *Program) createMethodSet(T types.Type) *methodSet { - mset, ok := prog.methodSets.At(T).(*methodSet) + mset, ok := prog.methodSets.At(T) if !ok { mset = &methodSet{mapping: make(map[string]*Function)} prog.methodSets.Set(T, mset) @@ -104,14 +100,13 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function // Thread-safe. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// func (prog *Program) RuntimeTypes() []types.Type { prog.methodsMu.Lock() defer prog.methodsMu.Unlock() var res []types.Type - prog.methodSets.Iterate(func(T types.Type, v interface{}) { - if v.(*methodSet).complete { + prog.methodSets.Iterate(func(T types.Type, v *methodSet) { + if v.complete { res = append(res, T) } }) @@ -121,7 +116,7 @@ func (prog *Program) RuntimeTypes() []types.Type { // declaredFunc returns the concrete function/method denoted by obj. // Panic ensues if there is none. func (prog *Program) declaredFunc(obj *types.Func) *Function { - if origin := typeparams.OriginMethod(obj); origin != obj { + if origin := obj.Origin(); origin != obj { // Calling method on instantiated type, create a wrapper that calls the generic type's method base := prog.packageLevelValue(origin) return makeInstance(prog, base.(*Function), obj.Type().(*types.Signature), nil) @@ -148,7 +143,6 @@ func (prog *Program) declaredFunc(obj *types.Func) *Function { // TODO(adonovan): make this faster. It accounts for 20% of SSA build time. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// func (prog *Program) needMethodsOf(T types.Type) { prog.methodsMu.Lock() prog.needMethods(T, false) @@ -159,10 +153,9 @@ func (prog *Program) needMethodsOf(T types.Type) { // Recursive case: skip => don't create methods for T. // // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// func (prog *Program) needMethods(T types.Type, skip bool) { // Each package maintains its own set of types it has visited. - if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { + if prevSkip, ok := prog.runtimeTypes.At(T); ok { // needMethods(T) was previously called if !prevSkip || skip { return // already seen, with same or false 'skip' value @@ -195,7 +188,7 @@ func (prog *Program) needMethods(T types.Type, skip bool) { case *types.Basic: // nop - case *types.Interface, *typeparams.TypeParam: + case *types.Interface, *types.TypeParam: // nop---handled by recursion over method set. case *types.Pointer: diff --git a/vendor/honnef.co/go/tools/go/ir/mode.go b/vendor/honnef.co/go/tools/go/ir/mode.go index b0b2c92919..15b5a33f77 100644 --- a/vendor/honnef.co/go/tools/go/ir/mode.go +++ b/vendor/honnef.co/go/tools/go/ir/mode.go @@ -15,9 +15,8 @@ import ( // // *BuilderMode satisfies the flag.Value interface. Example: // -// var mode = ir.BuilderMode(0) -// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } -// +// var mode = ir.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } type BuilderMode uint const ( diff --git a/vendor/honnef.co/go/tools/go/ir/print.go b/vendor/honnef.co/go/tools/go/ir/print.go index c5b51ca8a8..ad23d16dcd 100644 --- a/vendor/honnef.co/go/tools/go/ir/print.go +++ b/vendor/honnef.co/go/tools/go/ir/print.go @@ -23,7 +23,6 @@ import ( // Functions (including methods) and Globals use RelString and // all types are displayed with relType, so that only cross-package // references are package-qualified. -// func relName(v Value, i Instruction) string { if v == nil { return "" @@ -174,6 +173,7 @@ func (v *ChangeType) String() string { return printConv("ChangeType", v func (v *Convert) String() string { return printConv("Convert", v, v.X) } func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) } func (v *SliceToArrayPointer) String() string { return printConv("SliceToArrayPointer", v, v.X) } +func (v *SliceToArray) String() string { return printConv("SliceToArray", v, v.X) } func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) } func (v *MakeClosure) String() string { @@ -288,8 +288,8 @@ func (s *Jump) String() string { block = s.block.Succs[0].Index } str := fmt.Sprintf("Jump → b%d", block) - if s.Comment != "" { - str = fmt.Sprintf("%s # %s", str, s.Comment) + if s.Comment() != "" { + str = fmt.Sprintf("%s # %s", str, s.Comment()) } return str } @@ -326,6 +326,31 @@ func (s *ConstantSwitch) String() string { return b.String() } +func (v *CompositeValue) String() string { + var b bytes.Buffer + from := v.Parent().pkg() + fmt.Fprintf(&b, "CompositeValue <%s>", relType(v.Type(), from)) + if v.NumSet >= len(v.Values) { + // All values provided + fmt.Fprint(&b, " [all]") + } else if v.Bitmap.BitLen() == 0 { + // No values provided + fmt.Fprint(&b, " [none]") + } else { + // Some values provided + bits := []byte(fmt.Sprintf("%0*b", len(v.Values), &v.Bitmap)) + for i := 0; i < len(bits)/2; i++ { + o := len(bits) - 1 - i + bits[i], bits[o] = bits[o], bits[i] + } + fmt.Fprintf(&b, " [%s]", bits) + } + for _, vv := range v.Values { + fmt.Fprintf(&b, " %s", relName(vv, v)) + } + return b.String() +} + func (s *TypeSwitch) String() string { from := s.Parent().pkg() var b bytes.Buffer diff --git a/vendor/honnef.co/go/tools/go/ir/sanity.go b/vendor/honnef.co/go/tools/go/ir/sanity.go index 1788d0f137..b6c59c95fc 100644 --- a/vendor/honnef.co/go/tools/go/ir/sanity.go +++ b/vendor/honnef.co/go/tools/go/ir/sanity.go @@ -13,6 +13,8 @@ import ( "io" "os" "strings" + + "honnef.co/go/tools/go/types/typeutil" ) type sanity struct { @@ -30,7 +32,6 @@ type sanity struct { // // Sanity-checking is intended to facilitate the debugging of code // transformation passes. -// func sanityCheck(fn *Function, reporter io.Writer) bool { if reporter == nil { reporter = os.Stderr @@ -40,7 +41,6 @@ func sanityCheck(fn *Function, reporter io.Writer) bool { // mustSanityCheck is like sanityCheck but panics instead of returning // a negative result. -// func mustSanityCheck(fn *Function, reporter io.Writer) { if !sanityCheck(fn, reporter) { fn.WriteTo(os.Stderr) @@ -142,11 +142,14 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *ChangeInterface: case *ChangeType: case *SliceToArrayPointer: + case *SliceToArray: case *Convert: - if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { - if _, ok := instr.Type().Underlying().(*types.Basic); !ok { - s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) - } + tsetInstrX := typeutil.NewTypeSet(instr.X.Type().Underlying()) + tsetInstr := typeutil.NewTypeSet(instr.Type().Underlying()) + ok1 := tsetInstr.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + ok2 := tsetInstrX.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + if !ok1 && !ok2 { + s.errorf("convert %s -> %s: at least one type set must contain basic type", instr.X.Type(), instr.Type()) } case *Defer: @@ -194,6 +197,7 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *GenericConst: case *Recv: case *TypeSwitch: + case *CompositeValue: default: panic(fmt.Sprintf("Unknown instruction type: %T", instr)) } diff --git a/vendor/honnef.co/go/tools/go/ir/source.go b/vendor/honnef.co/go/tools/go/ir/source.go index 677eefbd7f..155c5f7336 100644 --- a/vendor/honnef.co/go/tools/go/ir/source.go +++ b/vendor/honnef.co/go/tools/go/ir/source.go @@ -14,8 +14,6 @@ import ( "go/ast" "go/token" "go/types" - - "golang.org/x/exp/typeparams" ) // EnclosingFunction returns the function that contains the syntax @@ -25,11 +23,10 @@ import ( // enclosed by the package's init() function. // // Returns nil if not found; reasons might include: -// - the node is not enclosed by any function. -// - the node is within an anonymous function (FuncLit) and -// its IR function has not been created yet -// (pkg.Build() has not yet been called). -// +// - the node is not enclosed by any function. +// - the node is within an anonymous function (FuncLit) and +// its IR function has not been created yet +// (pkg.Build() has not yet been called). func EnclosingFunction(pkg *Package, path []ast.Node) *Function { // Start with package-level function... fn := findEnclosingPackageLevelFunction(pkg, path) @@ -67,14 +64,12 @@ outer: // depend on whether IR code for pkg has been built, so it can be // used to quickly reject check inputs that will cause // EnclosingFunction to fail, prior to IR building. -// func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { return findEnclosingPackageLevelFunction(pkg, path) != nil } // findEnclosingPackageLevelFunction returns the Function // corresponding to the package-level function enclosing path. -// func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] switch decl := path[n-2].(type) { @@ -100,7 +95,6 @@ func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function // findNamedFunc returns the named function whose FuncDecl.Ident is at // position pos. -// func findNamedFunc(pkg *Package, pos token.Pos) *Function { for _, fn := range pkg.Functions { if fn.Pos() == pos { @@ -114,13 +108,13 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // expression e. // // It returns nil if no value was found, e.g. -// - the expression is not lexically contained within f; -// - f was not built with debug information; or -// - e is a constant expression. (For efficiency, no debug -// information is stored for constants. Use -// go/types.Info.Types[e].Value instead.) -// - e is a reference to nil or a built-in function. -// - the value was optimised away. +// - the expression is not lexically contained within f; +// - f was not built with debug information; or +// - e is a constant expression. (For efficiency, no debug +// information is stored for constants. Use +// go/types.Info.Types[e].Value instead.) +// - e is a reference to nil or a built-in function. +// - the value was optimised away. // // If e is an addressable expression used in an lvalue context, // value is the address denoted by e, and isAddr is true. @@ -132,7 +126,6 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // astutil.PathEnclosingInterval to locate the ast.Node, then // EnclosingFunction to locate the Function, then ValueForExpr to find // the ir.Value.) -// func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { if f.debugInfo() { // (opt) e = unparen(e) @@ -154,7 +147,6 @@ func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { // Package returns the IR Package corresponding to the specified // type-checker package object. // It returns nil if no such IR package has been created. -// func (prog *Program) Package(obj *types.Package) *Package { return prog.packages[obj] } @@ -163,7 +155,6 @@ func (prog *Program) Package(obj *types.Package) *Package { // the specified named object, which may be a package-level const // (*Const), var (*Global) or func (*Function) of some package in // prog. It returns nil if the object is not found. -// func (prog *Program) packageLevelValue(obj types.Object) Value { if pkg, ok := prog.packages[obj.Pkg()]; ok { return pkg.values[obj] @@ -176,16 +167,14 @@ func (prog *Program) packageLevelValue(obj types.Object) Value { // // TODO(adonovan): check the invariant that obj.Type() matches the // result's Signature, both in the params/results and in the receiver. -// func (prog *Program) FuncValue(obj *types.Func) *Function { - obj = typeparams.OriginMethod(obj) + obj = obj.Origin() fn, _ := prog.packageLevelValue(obj).(*Function) return fn } // ConstValue returns the IR Value denoted by the source-level named // constant obj. -// func (prog *Program) ConstValue(obj *types.Const) *Const { // TODO(adonovan): opt: share (don't reallocate) // Consts for const objects and constant ast.Exprs. @@ -217,8 +206,9 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { // If the identifier is a field selector and its base expression is // non-addressable, then VarValue returns the value of that field. // For example: -// func f() struct {x int} -// f().x // VarValue(x) returns a *Field instruction of type int +// +// func f() struct {x int} +// f().x // VarValue(x) returns a *Field instruction of type int // // All other identifiers denote addressable locations (variables). // For them, VarValue may return either the variable's address or its @@ -227,14 +217,14 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { // // If !isAddr, the returned value is the one associated with the // specific identifier. For example, -// var x int // VarValue(x) returns Const 0 here -// x = 1 // VarValue(x) returns Const 1 here +// +// var x int // VarValue(x) returns Const 0 here +// x = 1 // VarValue(x) returns Const 1 here // // It is not specified whether the value or the address is returned in // any particular case, as it may depend upon optimizations performed // during IR code generation, such as registerization, constant // folding, avoidance of materialization of subexpressions, etc. -// func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { // All references to a var are local to some function, possibly init. fn := EnclosingFunction(pkg, ref) diff --git a/vendor/honnef.co/go/tools/go/ir/ssa.go b/vendor/honnef.co/go/tools/go/ir/ssa.go index c51dd67d33..1ef87f9e88 100644 --- a/vendor/honnef.co/go/tools/go/ir/ssa.go +++ b/vendor/honnef.co/go/tools/go/ir/ssa.go @@ -13,12 +13,18 @@ import ( "go/constant" "go/token" "go/types" + "math/big" "sync" - "golang.org/x/exp/typeparams" "honnef.co/go/tools/go/types/typeutil" ) +const ( + // Replace CompositeValue with only constant values with AggregateConst. Currently disabled because it breaks field + // tracking in U1000. + doSimplifyConstantCompositeValues = false +) + type ID int // A Program is a partial or complete Go program converted to IR form. @@ -31,9 +37,9 @@ type Program struct { MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets methodsMu sync.Mutex // guards the following maps: - methodSets typeutil.Map // maps type to its concrete methodSet - runtimeTypes typeutil.Map // types for which rtypes are needed - canon typeutil.Map // type canonicalization map + methodSets typeutil.Map[*methodSet] // maps type to its concrete methodSet + runtimeTypes typeutil.Map[bool] // types for which rtypes are needed + canon typeutil.Map[types.Type] // type canonicalization map bounds map[*types.Func]*Function // bounds for curried x.Method closures thunks map[selectionKey]*Function // thunks for T.Method expressions } @@ -46,7 +52,6 @@ type Program struct { // Members also contains entries for "init" (the synthetic package // initializer) and "init#%d", the nth declared init function, // and unspecified other things too. -// type Package struct { Prog *Program // the owning program Pkg *types.Package // the corresponding go/types.Package @@ -68,7 +73,6 @@ type Package struct { // A Member is a member of a Go package, implemented by *NamedConst, // *Global, *Function, or *Type; they are created by package-level // const, var, func and type declarations respectively. -// type Member interface { Name() string // declared name of the package member String() string // package-qualified name of the package member @@ -93,7 +97,6 @@ type Type struct { // // NB: a NamedConst is not a Value; it contains a constant Value, which // it augments with the name and position of its 'const' declaration. -// type NamedConst struct { object *types.Const Value *Const @@ -180,11 +183,12 @@ type Value interface { // An Instruction that defines a value (e.g. BinOp) also implements // the Value interface; an Instruction that only has an effect (e.g. Store) // does not. -// type Instruction interface { setSource(ast.Node) setID(ID) + Comment() string + // String returns the disassembled form of this value. // // Examples of Instructions that are Values: @@ -264,7 +268,6 @@ type Instruction interface { // Node is provided to simplify IR graph algorithms. Clients should // use the more specific and informative Value or Instruction // interfaces where appropriate. -// type Node interface { setID(ID) @@ -344,7 +347,6 @@ func (syn Synthetic) String() string { // Syntax.Pos() always returns the position of the declaring "func" token. // // Type() returns the function's Signature. -// type Function struct { node @@ -373,13 +375,13 @@ type Function struct { type instanceWrapperMap struct { h typeutil.Hasher entries map[uint32][]struct { - key *typeparams.TypeList + key *types.TypeList val *Function } len int } -func typeListIdentical(l1, l2 *typeparams.TypeList) bool { +func typeListIdentical(l1, l2 *types.TypeList) bool { if l1.Len() != l2.Len() { return false } @@ -393,10 +395,10 @@ func typeListIdentical(l1, l2 *typeparams.TypeList) bool { return true } -func (m *instanceWrapperMap) At(key *typeparams.TypeList) *Function { +func (m *instanceWrapperMap) At(key *types.TypeList) *Function { if m.entries == nil { m.entries = make(map[uint32][]struct { - key *typeparams.TypeList + key *types.TypeList val *Function }) m.h = typeutil.MakeHasher() @@ -416,10 +418,10 @@ func (m *instanceWrapperMap) At(key *typeparams.TypeList) *Function { return nil } -func (m *instanceWrapperMap) Set(key *typeparams.TypeList, val *Function) { +func (m *instanceWrapperMap) Set(key *types.TypeList, val *Function) { if m.entries == nil { m.entries = make(map[uint32][]struct { - key *typeparams.TypeList + key *types.TypeList val *Function }) m.h = typeutil.MakeHasher() @@ -437,7 +439,7 @@ func (m *instanceWrapperMap) Set(key *typeparams.TypeList, val *Function) { } } m.entries[hash] = append(m.entries[hash], struct { - key *typeparams.TypeList + key *types.TypeList val *Function }{key, val}) m.len++ @@ -456,6 +458,11 @@ const ( NeverReturns ) +type constValue struct { + c Constant + idx int +} + type functionBody struct { // The following fields are set transiently during building, // then cleared. @@ -465,11 +472,14 @@ type functionBody struct { implicitResults []*Alloc // tuple of results targets *targets // linked stack of branch targets lblocks map[types.Object]*lblock // labelled blocks - consts []Constant - wr *HTMLWriter - fakeExits BlockSet - blocksets [5]BlockSet - hasDefer bool + + consts map[constKey]constValue + aggregateConsts typeutil.Map[[]*AggregateConst] + + wr *HTMLWriter + fakeExits BlockSet + blocksets [5]BlockSet + hasDefer bool // a contiguous block of instructions that will be used by blocks, // to avoid making multiple allocations. @@ -502,7 +512,6 @@ func (fn *Function) results() []*Alloc { // // The order of Preds and Succs is significant (to Phi and If // instructions, respectively). -// type BasicBlock struct { Index int // index of this block within Parent().Blocks Comment string // optional label; no semantic significance @@ -534,7 +543,6 @@ type BasicBlock struct { // // Pos() returns the position of the value that was captured, which // belongs to an enclosing function. -// type FreeVar struct { node @@ -548,7 +556,6 @@ type FreeVar struct { } // A Parameter represents an input parameter of a function. -// type Parameter struct { register @@ -573,10 +580,10 @@ type Parameter struct { // Pos() returns token.NoPos. // // Example printed form: -// Const {42} -// Const {"test"} -// Const {(3 + 4i)} // +// Const {42} +// Const {"test"} +// Const {(3 + 4i)} type Const struct { register @@ -586,7 +593,18 @@ type Const struct { type AggregateConst struct { register - Values []Constant + Values []Value +} + +type CompositeValue struct { + register + + // Bitmap records which elements were explicitly provided. For example, [4]byte{2: x} would have a bitmap of 0010. + Bitmap big.Int + // The number of bits set in Bitmap + NumSet int + // Dense list of values in the composite literal. Omitted elements are filled in with zero values. + Values []Value } // TODO add the element's zero constant to ArrayConst @@ -617,7 +635,6 @@ func (*GenericConst) aConstant() {} // // Pos() returns the position of the ast.ValueSpec.Names[*] // identifier. -// type Global struct { node @@ -637,20 +654,19 @@ type Global struct { // Go spec (excluding "make" and "new") or one of these ir-defined // intrinsics: // -// // wrapnilchk returns ptr if non-nil, panics otherwise. -// // (For use in indirection wrappers.) -// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T +// // wrapnilchk returns ptr if non-nil, panics otherwise. +// // (For use in indirection wrappers.) +// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T // -// // noreturnWasPanic returns true if the previously called -// // function panicked, false if it exited the process. -// func ir:noreturnWasPanic() bool +// // noreturnWasPanic returns true if the previously called +// // function panicked, false if it exited the process. +// func ir:noreturnWasPanic() bool // // Object() returns a *types.Builtin for built-ins defined by the spec, // nil for others. // // Type() returns a *types.Signature representing the effective // signature of the built-in for this call. -// type Builtin struct { node @@ -687,9 +703,9 @@ type Builtin struct { // allocates a varargs slice. // // Example printed form: -// t1 = StackAlloc <*int> -// t2 = HeapAlloc <*int> (new) // +// t1 = StackAlloc <*int> +// t2 = HeapAlloc <*int> (new) type Alloc struct { register Heap bool @@ -711,8 +727,8 @@ var _ Value = (*Sigma)(nil) // Within a block, all σ-nodes must appear before all non-σ nodes. // // Example printed form: -// t2 = Sigma [#0] t1 (x) // +// t2 = Sigma [#0] t1 (x) type Sigma struct { register From *BasicBlock @@ -749,8 +765,8 @@ type Copy struct { // during SSA renaming. // // Example printed form: -// t3 = Phi 2:t1 4:t2 (x) // +// t3 = Phi 2:t1 4:t2 (x) type Phi struct { register Edges []Value // Edges[i] is value for Block().Preds[i] @@ -769,10 +785,10 @@ type Phi struct { // Pos() returns the ast.CallExpr.Lparen, if explicit in the source. // // Example printed form: -// t3 = Call <()> println t1 t2 -// t4 = Call <()> foo$1 -// t6 = Invoke t5.String // +// t3 = Call <()> println t1 t2 +// t4 = Call <()> foo$1 +// t6 = Invoke t5.String type Call struct { register Call CallCommon @@ -783,8 +799,8 @@ type Call struct { // Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. // // Example printed form: -// t3 = BinOp {+} t2 t1 // +// t3 = BinOp {+} t2 t1 type BinOp struct { register // One of: @@ -800,10 +816,9 @@ type BinOp struct { // SUB is negation. // NOT is logical negation. // -// // Example printed form: -// t2 = UnOp {^} t1 // +// t2 = UnOp {^} t1 type UnOp struct { register Op token.Token // One of: NOT SUB XOR ! - ^ @@ -817,8 +832,8 @@ type UnOp struct { // specified. // // Example printed form: -// t2 = Load t1 // +// t2 = Load t1 type Load struct { register X Value @@ -828,11 +843,11 @@ type Load struct { // change to Type(). // // Type changes are permitted: -// - between a named type and its underlying type. -// - between two named types of the same underlying type. -// - between (possibly named) pointers to identical base types. -// - from a bidirectional channel to a read- or write-channel, -// optionally adding/removing a name. +// - between a named type and its underlying type. +// - between two named types of the same underlying type. +// - between (possibly named) pointers to identical base types. +// - from a bidirectional channel to a read- or write-channel, +// optionally adding/removing a name. // // This operation cannot fail dynamically. // @@ -840,8 +855,8 @@ type Load struct { // from an explicit conversion in the source. // // Example printed form: -// t2 = ChangeType <*T> t1 // +// t2 = ChangeType <*T> t1 type ChangeType struct { register X Value @@ -852,12 +867,13 @@ type ChangeType struct { // // A conversion may change the value and representation of its operand. // Conversions are permitted: -// - between real numeric types. -// - between complex numeric types. -// - between string and []byte or []rune. -// - between pointers and unsafe.Pointer. -// - between unsafe.Pointer and uintptr. -// - from (Unicode) integer to (UTF-8) string. +// - between real numeric types. +// - between complex numeric types. +// - between string and []byte or []rune. +// - between pointers and unsafe.Pointer. +// - between unsafe.Pointer and uintptr. +// - from (Unicode) integer to (UTF-8) string. +// // A conversion may imply a type name change also. // // This operation cannot fail dynamically. @@ -869,8 +885,8 @@ type ChangeType struct { // from an explicit conversion in the source. // // Example printed form: -// t2 = Convert <[]byte> t1 // +// t2 = Convert <[]byte> t1 type Convert struct { register X Value @@ -886,8 +902,8 @@ type Convert struct { // otherwise. // // Example printed form: -// t2 = ChangeInterface t1 // +// t2 = ChangeInterface t1 type ChangeInterface struct { register X Value @@ -900,13 +916,27 @@ type ChangeInterface struct { // from an explicit conversion in the source. // // Example printed form: -// t1 = SliceToArrayPointer <*[4]byte> t1 // +// t2 = SliceToArrayPointer <*[4]byte> t1 type SliceToArrayPointer struct { register X Value } +// The SliceToArray instruction yields the conversion of slice X to +// array. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t2 = SliceToArray <[4]byte> t1 +type SliceToArray struct { + register + X Value +} + // MakeInterface constructs an instance of an interface type from a // value of a concrete type. // @@ -914,14 +944,15 @@ type SliceToArrayPointer struct { // of X, and Program.MethodValue(m) to find the implementation of a method. // // To construct the zero value of an interface type T, use: -// NewConst(constant.MakeNil(), T, pos) +// +// NewConst(constant.MakeNil(), T, pos) // // Pos() returns the ast.CallExpr.Lparen, if the instruction arose // from an explicit conversion in the source. // // Example printed form: -// t2 = MakeInterface t1 // +// t2 = MakeInterface t1 type MakeInterface struct { register X Value @@ -936,9 +967,9 @@ type MakeInterface struct { // closure or the ast.SelectorExpr.Sel for a bound method closure. // // Example printed form: -// t1 = MakeClosure foo$1 t1 t2 -// t5 = MakeClosure (T).foo$bound t4 // +// t1 = MakeClosure foo$1 t1 t2 +// t5 = MakeClosure (T).foo$bound t4 type MakeClosure struct { register Fn Value // always a *Function @@ -954,9 +985,9 @@ type MakeClosure struct { // the ast.CompositeLit.Lbrack if created by a literal. // // Example printed form: -// t1 = MakeMap -// t2 = MakeMap t1 // +// t1 = MakeMap +// t2 = MakeMap t1 type MakeMap struct { register Reserve Value // initial space reservation; nil => default @@ -971,9 +1002,9 @@ type MakeMap struct { // created it. // // Example printed form: -// t3 = MakeChan t1 -// t4 = MakeChan t2 // +// t3 = MakeChan t1 +// t4 = MakeChan t2 type MakeChan struct { register Size Value // int; size of buffer; zero => synchronous. @@ -993,9 +1024,9 @@ type MakeChan struct { // created it. // // Example printed form: -// t3 = MakeSlice <[]string> t1 t2 -// t4 = MakeSlice t1 t2 // +// t3 = MakeSlice <[]string> t1 t2 +// t4 = MakeSlice t1 t2 type MakeSlice struct { register Len Value @@ -1016,8 +1047,8 @@ type MakeSlice struct { // NoPos if not explicit in the source (e.g. a variadic argument slice). // // Example printed form: -// t4 = Slice <[]int> t3 t2 t1 // +// t4 = Slice <[]int> t3 t2 t1 type Slice struct { register X Value // slice, string, or *array @@ -1038,8 +1069,8 @@ type Slice struct { // field, if explicit in the source. // // Example printed form: -// t2 = FieldAddr <*int> [0] (X) t1 // +// t2 = FieldAddr <*int> [0] (X) t1 type FieldAddr struct { register X Value // *struct @@ -1056,8 +1087,8 @@ type FieldAddr struct { // field, if explicit in the source. // // Example printed form: -// t2 = FieldAddr [0] (X) t1 // +// t2 = FieldAddr [0] (X) t1 type Field struct { register X Value // struct @@ -1079,8 +1110,8 @@ type Field struct { // explicit in the source. // // Example printed form: -// t3 = IndexAddr <*int> t2 t1 // +// t3 = IndexAddr <*int> t2 t1 type IndexAddr struct { register X Value // slice or *array, @@ -1093,8 +1124,8 @@ type IndexAddr struct { // explicit in the source. // // Example printed form: -// t3 = Index t2 t1 // +// t3 = Index t2 t1 type Index struct { register X Value // array @@ -1110,9 +1141,9 @@ type Index struct { // Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. // // Example printed form: -// t4 = MapLookup t3 t1 -// t6 = MapLookup <(string, bool)> t3 t2 // +// t4 = MapLookup t3 t1 +// t6 = MapLookup <(string, bool)> t3 t2 type MapLookup struct { register X Value // map @@ -1126,8 +1157,8 @@ type MapLookup struct { // Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. // // Example printed form: -// t3 = StringLookup t2 t1 // +// t3 = StringLookup t2 t1 type StringLookup struct { register X Value // string @@ -1136,7 +1167,6 @@ type StringLookup struct { // SelectState is a helper for Select. // It represents one goal state and its corresponding communication. -// type SelectState struct { Dir types.ChanDir // direction of case (SendOnly or RecvOnly) Chan Value // channel to use (for send or receive) @@ -1151,7 +1181,9 @@ type SelectState struct { // Let n be the number of States for which Dir==RECV and Tᵢ (0 ≤ i < n) // be the element type of each such state's Chan. // Select returns an n+2-tuple -// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1) +// +// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1) +// // The tuple's components, described below, must be accessed via the // Extract instruction. // @@ -1177,9 +1209,9 @@ type SelectState struct { // Pos() returns the ast.SelectStmt.Select. // // Example printed form: -// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1] -// t11 = SelectBlocking <(index int, ok bool)> [] // +// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1] +// t11 = SelectBlocking <(index int, ok bool)> [] type Select struct { register States []*SelectState @@ -1196,8 +1228,8 @@ type Select struct { // Pos() returns the ast.RangeStmt.For. // // Example printed form: -// t2 = Range t1 // +// t2 = Range t1 type Range struct { register X Value // string or map @@ -1219,9 +1251,9 @@ type Range struct { // The types of k and/or v may be types.Invalid. // // Example printed form: -// t5 = Next <(ok bool, k int, v rune)> t2 -// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 // +// t5 = Next <(ok bool, k int, v rune)> t2 +// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 type Next struct { register Iter Value @@ -1260,9 +1292,9 @@ type Next struct { // type-switch statement. // // Example printed form: -// t2 = TypeAssert t1 -// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 // +// t2 = TypeAssert t1 +// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 type TypeAssert struct { register X Value @@ -1277,8 +1309,8 @@ type TypeAssert struct { // MapLookup and others. // // Example printed form: -// t7 = Extract [1] (ok) t4 // +// t7 = Extract [1] (ok) t4 type Extract struct { register Tuple Value @@ -1295,11 +1327,10 @@ type Extract struct { // Pos() returns NoPos. // // Example printed form: -// Jump → b1 // +// Jump → b1 type Jump struct { anInstruction - Comment string } // The Unreachable pseudo-instruction signals that execution cannot @@ -1313,8 +1344,8 @@ type Jump struct { // containing BasicBlock. // // Example printed form: -// Unreachable → b1 // +// Unreachable → b1 type Unreachable struct { anInstruction } @@ -1329,8 +1360,8 @@ type Unreachable struct { // Pos() returns the *ast.IfStmt, if explicit in the source. // // Example printed form: -// If t2 → b1 b2 // +// If t2 → b1 b2 type If struct { anInstruction Cond Value @@ -1369,9 +1400,9 @@ type TypeSwitch struct { // Pos() returns the ast.ReturnStmt.Return, if explicit in the source. // // Example printed form: -// Return -// Return t1 t2 // +// Return +// Return t1 t2 type Return struct { anInstruction Results []Value @@ -1387,8 +1418,8 @@ type Return struct { // Pos() returns NoPos. // // Example printed form: -// RunDefers // +// RunDefers type RunDefers struct { anInstruction } @@ -1405,8 +1436,8 @@ type RunDefers struct { // in the source. // // Example printed form: -// Panic t1 // +// Panic t1 type Panic struct { anInstruction X Value // an interface{} @@ -1420,10 +1451,10 @@ type Panic struct { // Pos() returns the ast.GoStmt.Go. // // Example printed form: -// Go println t1 -// Go t3 -// GoInvoke t4.Bar t2 // +// Go println t1 +// Go t3 +// GoInvoke t4.Bar t2 type Go struct { anInstruction Call CallCommon @@ -1437,10 +1468,10 @@ type Go struct { // Pos() returns the ast.DeferStmt.Defer. // // Example printed form: -// Defer println t1 -// Defer t3 -// DeferInvoke t4.Bar t2 // +// Defer println t1 +// Defer t3 +// DeferInvoke t4.Bar t2 type Defer struct { anInstruction Call CallCommon @@ -1451,8 +1482,8 @@ type Defer struct { // Pos() returns the ast.SendStmt.Arrow, if explicit in the source. // // Example printed form: -// Send t2 t1 // +// Send t2 t1 type Send struct { anInstruction Chan, X Value @@ -1469,8 +1500,9 @@ type Send struct { // Pos() returns the ast.RangeStmt.For. // // Example printed form: -// t2 = Recv t1 -// t3 = Recv <(int, bool)> t1 +// +// t2 = Recv t1 +// t3 = Recv <(int, bool)> t1 type Recv struct { register Chan Value @@ -1486,8 +1518,8 @@ type Recv struct { // implementation choices, the details are not specified. // // Example printed form: -// Store {int} t2 t1 // +// Store {int} t2 t1 type Store struct { anInstruction Addr Value @@ -1502,8 +1534,8 @@ type Store struct { // Pos() returns NoPos. // // Example printed form: -// BlankStore t1 // +// BlankStore t1 type BlankStore struct { anInstruction Val Value @@ -1516,8 +1548,8 @@ type BlankStore struct { // if explicit in the source. // // Example printed form: -// MapUpdate t3 t1 t2 // +// MapUpdate t3 t1 t2 type MapUpdate struct { anInstruction Map Value @@ -1549,10 +1581,10 @@ type MapUpdate struct { // ordinary SSA renaming machinery.) // // Example printed form: -// ; *ast.CallExpr @ 102:9 is t5 -// ; var x float64 @ 109:72 is x -// ; address of *ast.CompositeLit @ 216:10 is t0 // +// ; *ast.CallExpr @ 102:9 is t5 +// ; var x float64 @ 109:72 is x +// ; address of *ast.CompositeLit @ 216:10 is t0 type DebugRef struct { anInstruction Expr ast.Expr // the referring expression (never *ast.ParenExpr) @@ -1570,7 +1602,6 @@ type DebugRef struct { // // Temporary names are automatically assigned to each register on // completion of building a function in IR form. -// type register struct { anInstruction typ types.Type // type of virtual register @@ -1599,7 +1630,12 @@ func (n *node) Pos() token.Pos { // It provides the implementations of the Block and setBlock methods. type anInstruction struct { node - block *BasicBlock // the basic block of this instruction + block *BasicBlock // the basic block of this instruction + comment string +} + +func (instr anInstruction) Comment() string { + return instr.comment } // CallCommon is contained by Go, Defer and Call to hold the @@ -1614,15 +1650,17 @@ type anInstruction struct { // 'func'. // // Value may be one of: -// (a) a *Function, indicating a statically dispatched call -// to a package-level function, an anonymous function, or -// a method of a named type. -// (b) a *MakeClosure, indicating an immediately applied -// function literal with free variables. -// (c) a *Builtin, indicating a statically dispatched call -// to a built-in function. -// (d) any other value, indicating a dynamically dispatched -// function call. +// +// (a) a *Function, indicating a statically dispatched call +// to a package-level function, an anonymous function, or +// a method of a named type. +// (b) a *MakeClosure, indicating an immediately applied +// function literal with free variables. +// (c) a *Builtin, indicating a statically dispatched call +// to a built-in function. +// (d) any other value, indicating a dynamically dispatched +// function call. +// // StaticCallee returns the identity of the callee in cases // (a) and (b), nil otherwise. // @@ -1630,9 +1668,10 @@ type anInstruction struct { // Args[0] contains the receiver parameter. // // Example printed form: -// t3 = Call <()> println t1 t2 -// Go t3 -// Defer t3 +// +// t3 = Call <()> println t1 t2 +// Go t3 +// Defer t3 // // 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon // represents a dynamically dispatched call to an interface method. @@ -1646,13 +1685,13 @@ type anInstruction struct { // receiver but the first true argument. // // Example printed form: -// t6 = Invoke t5.String -// GoInvoke t4.Bar t2 -// DeferInvoke t4.Bar t2 +// +// t6 = Invoke t5.String +// GoInvoke t4.Bar t2 +// DeferInvoke t4.Bar t2 // // For all calls to variadic functions (Signature().Variadic()), // the last element of Args is a slice. -// type CallCommon struct { Value Value // receiver (invoke mode) or func value (call mode) Method *types.Func // abstract method (invoke mode) @@ -1673,7 +1712,6 @@ func (c *CallCommon) IsInvoke() bool { // // In either "call" or "invoke" mode, if the callee is a method, its // receiver is represented by sig.Recv, not sig.Params().At(0). -// func (c *CallCommon) Signature() *types.Signature { if c.Method != nil { return c.Method.Type().(*types.Signature) @@ -1716,7 +1754,6 @@ func (c *CallCommon) Description() string { // The CallInstruction interface, implemented by *Go, *Defer and *Call, // exposes the common parts of function-calling instructions, // yet provides a way back to the Value defined by *Call alone. -// type CallInstruction interface { Instruction Common() *CallCommon // returns the common parts of the call @@ -1802,7 +1839,6 @@ func (c *NamedConst) RelString(from *types.Package) string { return relString(c, // Func returns the package-level function of the specified name, // or nil if not found. -// func (p *Package) Func(name string) (f *Function) { f, _ = p.Members[name].(*Function) return @@ -1810,7 +1846,6 @@ func (p *Package) Func(name string) (f *Function) { // Var returns the package-level variable of the specified name, // or nil if not found. -// func (p *Package) Var(name string) (g *Global) { g, _ = p.Members[name].(*Global) return @@ -1818,7 +1853,6 @@ func (p *Package) Var(name string) (g *Global) { // Const returns the package-level constant of the specified name, // or nil if not found. -// func (p *Package) Const(name string) (c *NamedConst) { c, _ = p.Members[name].(*NamedConst) return @@ -1826,7 +1860,6 @@ func (p *Package) Const(name string) (c *NamedConst) { // Type returns the package-level type of the specified name, // or nil if not found. -// func (p *Package) Type(name string) (t *Type) { t, _ = p.Members[name].(*Type) return @@ -1880,6 +1913,10 @@ func (v *SliceToArrayPointer) Operands(rands []*Value) []*Value { return append(rands, &v.X) } +func (v *SliceToArray) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + func (s *DebugRef) Operands(rands []*Value) []*Value { return append(rands, &s.X) } @@ -2042,13 +2079,26 @@ func (v *Load) Operands(rands []*Value) []*Value { return append(rands, &v.X) } +func (v *AggregateConst) Operands(rands []*Value) []*Value { + for i := range v.Values { + rands = append(rands, &v.Values[i]) + } + return rands +} + +func (v *CompositeValue) Operands(rands []*Value) []*Value { + for i := range v.Values { + rands = append(rands, &v.Values[i]) + } + return rands +} + // Non-Instruction Values: -func (v *Builtin) Operands(rands []*Value) []*Value { return rands } -func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } -func (v *Const) Operands(rands []*Value) []*Value { return rands } -func (v *ArrayConst) Operands(rands []*Value) []*Value { return rands } -func (v *AggregateConst) Operands(rands []*Value) []*Value { return rands } -func (v *GenericConst) Operands(rands []*Value) []*Value { return rands } -func (v *Function) Operands(rands []*Value) []*Value { return rands } -func (v *Global) Operands(rands []*Value) []*Value { return rands } -func (v *Parameter) Operands(rands []*Value) []*Value { return rands } +func (v *Builtin) Operands(rands []*Value) []*Value { return rands } +func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } +func (v *Const) Operands(rands []*Value) []*Value { return rands } +func (v *ArrayConst) Operands(rands []*Value) []*Value { return rands } +func (v *GenericConst) Operands(rands []*Value) []*Value { return rands } +func (v *Function) Operands(rands []*Value) []*Value { return rands } +func (v *Global) Operands(rands []*Value) []*Value { return rands } +func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/honnef.co/go/tools/go/ir/staticcheck.conf b/vendor/honnef.co/go/tools/go/ir/staticcheck.conf deleted file mode 100644 index d7b38bc356..0000000000 --- a/vendor/honnef.co/go/tools/go/ir/staticcheck.conf +++ /dev/null @@ -1,3 +0,0 @@ -# ssa/... is mostly imported from upstream and we don't want to -# deviate from it too much, hence disabling SA1019 -checks = ["inherit", "-SA1019"] diff --git a/vendor/honnef.co/go/tools/go/ir/util.go b/vendor/honnef.co/go/tools/go/ir/util.go index 550f6c9d88..0a733b654d 100644 --- a/vendor/honnef.co/go/tools/go/ir/util.go +++ b/vendor/honnef.co/go/tools/go/ir/util.go @@ -16,8 +16,6 @@ import ( "honnef.co/go/tools/go/ast/astutil" "honnef.co/go/tools/go/types/typeutil" - - "golang.org/x/exp/typeparams" ) //// AST utilities @@ -26,7 +24,6 @@ func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } // isBlankIdent returns true iff e is an Ident with name "_". // They have no associated types.Object, and thus no type. -// func isBlankIdent(e ast.Expr) bool { id, ok := e.(*ast.Ident) return ok && id.Name == "_" @@ -51,7 +48,7 @@ func isInterface(T types.Type) bool { return types.IsInterface(T) } func deref(typ types.Type) types.Type { orig := typ - if t, ok := typ.(*typeparams.TypeParam); ok { + if t, ok := typ.(*types.TypeParam); ok { if ctyp := typeutil.CoreType(t); ctyp != nil { typ = ctyp } @@ -71,7 +68,6 @@ func recvType(obj *types.Func) types.Type { // returns a closure that prints the corresponding "end" message. // Call using 'defer logStack(...)()' to show builder stack on panic. // Don't forget trailing parens! -// func logStack(format string, args ...interface{}) func() { msg := fmt.Sprintf(format, args...) io.WriteString(os.Stderr, msg) @@ -99,7 +95,7 @@ func makeLen(T types.Type) *Builtin { lenParams := types.NewTuple(anonVar(T)) return &Builtin{ name: "len", - sig: types.NewSignature(nil, lenParams, lenResults, false), + sig: types.NewSignatureType(nil, nil, nil, lenParams, lenResults, false), } } @@ -147,3 +143,6 @@ func assert(x bool) { panic("failed assertion") } } + +// BlockMap is a mapping from basic blocks (identified by their indices) to values. +type BlockMap[T any] []T diff --git a/vendor/honnef.co/go/tools/go/ir/wrappers.go b/vendor/honnef.co/go/tools/go/ir/wrappers.go index 6082d07e1e..69537fb777 100644 --- a/vendor/honnef.co/go/tools/go/ir/wrappers.go +++ b/vendor/honnef.co/go/tools/go/ir/wrappers.go @@ -22,8 +22,6 @@ package ir import ( "fmt" "go/types" - - "golang.org/x/exp/typeparams" ) // -- wrappers ----------------------------------------------------------- @@ -42,7 +40,6 @@ import ( // - the result may be a thunk or a wrapper. // // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// func makeWrapper(prog *Program, sel *types.Selection) *Function { obj := sel.Obj().(*types.Func) // the declared function sig := sel.Type().(*types.Signature) // type of this wrapper @@ -90,7 +87,7 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { var c Call c.Call.Value = &Builtin{ name: "ir:wrapnilchk", - sig: types.NewSignature(nil, + sig: types.NewSignatureType(nil, nil, nil, types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), types.NewTuple(anonVar(sel.Recv())), false), } @@ -140,7 +137,6 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { // createParams creates parameters for wrapper method fn based on its // Signature.Params, which do not include the receiver. // start is the index of the first regular parameter to use. -// func createParams(fn *Function, start int) { tparams := fn.Signature.Params() for i, n := start, tparams.Len(); i < n; i++ { @@ -159,22 +155,21 @@ func createParams(fn *Function, start int) { // Use MakeClosure with such a wrapper to construct a bound method // closure. e.g.: // -// type T int or: type T interface { meth() } -// func (t T) meth() -// var t T -// f := t.meth -// f() // calls t.meth() +// type T int or: type T interface { meth() } +// func (t T) meth() +// var t T +// f := t.meth +// f() // calls t.meth() // // f is a closure of a synthetic wrapper defined as if by: // -// f := func() { return t.meth() } +// f := func() { return t.meth() } // // Unlike makeWrapper, makeBound need perform no indirection or field // selections because that can be done before the closure is // constructed. // // EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// func makeBound(prog *Program, obj *types.Func) *Function { prog.methodsMu.Lock() defer prog.methodsMu.Unlock() @@ -226,22 +221,21 @@ func makeBound(prog *Program, obj *types.Func) *Function { // // Precondition: sel.Kind() == types.MethodExpr. // -// type T int or: type T interface { meth() } -// func (t T) meth() -// f := T.meth -// var t T -// f(t) // calls t.meth() +// type T int or: type T interface { meth() } +// func (t T) meth() +// f := T.meth +// var t T +// f(t) // calls t.meth() // // f is a synthetic wrapper defined as if by: // -// f := func(t T) { return t.meth() } +// f := func(t T) { return t.meth() } // // TODO(adonovan): opt: currently the stub is created even when used // directly in a function call: C.f(i, 0). This is less efficient // than inlining the stub. // // EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// func makeThunk(prog *Program, sel *types.Selection) *Function { if sel.Kind() != types.MethodExpr { panic(sel) @@ -259,7 +253,7 @@ func makeThunk(prog *Program, sel *types.Selection) *Function { defer prog.methodsMu.Unlock() // Canonicalize key.recv to avoid constructing duplicate thunks. - canonRecv, ok := prog.canon.At(key.recv).(types.Type) + canonRecv, ok := prog.canon.At(key.recv) if !ok { canonRecv = key.recv prog.canon.Set(key.recv, canonRecv) @@ -278,7 +272,7 @@ func makeThunk(prog *Program, sel *types.Selection) *Function { } func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { - return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) + return types.NewSignatureType(recv, nil, nil, s.Params(), s.Results(), s.Variadic()) } // selectionKey is like types.Selection but a usable map key. @@ -293,11 +287,11 @@ type selectionKey struct { // makeInstance creates a wrapper function with signature sig that calls the generic function fn. // If targs is not nil, fn is a function and targs describes the concrete type arguments. // If targs is nil, fn is a method and the type arguments are derived from the receiver. -func makeInstance(prog *Program, fn *Function, sig *types.Signature, targs *typeparams.TypeList) *Function { +func makeInstance(prog *Program, fn *Function, sig *types.Signature, targs *types.TypeList) *Function { if sig.Recv() != nil { assert(targs == nil) // Methods don't have their own type parameters, but the receiver does - targs = typeparams.NamedTypeArgs(deref(sig.Recv().Type()).(*types.Named)) + targs = deref(sig.Recv().Type()).(*types.Named).TypeArgs() } else { assert(targs != nil) } diff --git a/vendor/honnef.co/go/tools/go/ir/write.go b/vendor/honnef.co/go/tools/go/ir/write.go index b936bc9852..139c8cf32b 100644 --- a/vendor/honnef.co/go/tools/go/ir/write.go +++ b/vendor/honnef.co/go/tools/go/ir/write.go @@ -1,5 +1,5 @@ package ir func NewJump(parent *BasicBlock) *Jump { - return &Jump{anInstruction{block: parent}, ""} + return &Jump{anInstruction{block: parent}} } diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go b/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go index 9bfe8a38c2..2bf6ec6094 100644 --- a/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go +++ b/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go @@ -8,7 +8,7 @@ import ( ) type TypeSet struct { - Terms []*typeparams.Term + Terms []*types.Term empty bool } @@ -71,7 +71,7 @@ func CoreType(typ types.Type) types.Type { // All calls fn for each term in the type set and reports whether all invocations returned true. // If the type set is empty or unconstrained, All immediately returns false. -func (ts TypeSet) All(fn func(*typeparams.Term) bool) bool { +func (ts TypeSet) All(fn func(*types.Term) bool) bool { if len(ts.Terms) == 0 { return false } @@ -85,7 +85,7 @@ func (ts TypeSet) All(fn func(*typeparams.Term) bool) bool { // Any calls fn for each term in the type set and reports whether any invocation returned true. // It stops after the first call that returned true. -func (ts TypeSet) Any(fn func(*typeparams.Term) bool) bool { +func (ts TypeSet) Any(fn func(*types.Term) bool) bool { for _, term := range ts.Terms { if fn(term) { return true @@ -95,16 +95,16 @@ func (ts TypeSet) Any(fn func(*typeparams.Term) bool) bool { } // All is a wrapper for NewTypeSet(typ).All(fn). -func All(typ types.Type, fn func(*typeparams.Term) bool) bool { +func All(typ types.Type, fn func(*types.Term) bool) bool { return NewTypeSet(typ).All(fn) } // Any is a wrapper for NewTypeSet(typ).Any(fn). -func Any(typ types.Type, fn func(*typeparams.Term) bool) bool { +func Any(typ types.Type, fn func(*types.Term) bool) bool { return NewTypeSet(typ).Any(fn) } -func IsSlice(term *typeparams.Term) bool { +func IsSlice(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Slice) return ok } diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go b/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go index d35d08e00c..04d8c21ba6 100644 --- a/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go +++ b/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go @@ -9,7 +9,6 @@ import ( ) type MethodSetCache = typeutil.MethodSetCache -type Map = typeutil.Map type Hasher = typeutil.Hasher func Callee(info *types.Info, call *ast.CallExpr) types.Object { @@ -23,3 +22,31 @@ func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection func MakeHasher() Hasher { return typeutil.MakeHasher() } + +type Map[V any] struct { + m typeutil.Map +} + +func (m *Map[V]) Delete(key types.Type) bool { return m.m.Delete(key) } +func (m *Map[V]) At(key types.Type) (V, bool) { + v := m.m.At(key) + if v == nil { + var zero V + return zero, false + } else { + return v.(V), true + } +} +func (m *Map[V]) Set(key types.Type, value V) { m.m.Set(key, value) } +func (m *Map[V]) Len() int { return m.m.Len() } +func (m *Map[V]) Iterate(f func(key types.Type, value V)) { + ff := func(key types.Type, value interface{}) { + f(key, value.(V)) + } + m.m.Iterate(ff) + +} +func (m *Map[V]) Keys() []types.Type { return m.m.Keys() } +func (m *Map[V]) String() string { return m.m.String() } +func (m *Map[V]) KeysString() string { return m.m.KeysString() } +func (m *Map[V]) SetHasher(h typeutil.Hasher) { m.m.SetHasher(h) } diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/util.go b/vendor/honnef.co/go/tools/go/types/typeutil/util.go index b0aca16bdb..3a2ad973bb 100644 --- a/vendor/honnef.co/go/tools/go/types/typeutil/util.go +++ b/vendor/honnef.co/go/tools/go/types/typeutil/util.go @@ -122,6 +122,8 @@ func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Fiel if field.Anonymous() { if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { out = append(out, flattenFields(s, np, seen)...) + } else { + out = append(out, Field{field, tag, np}) } } else { out = append(out, Field{field, tag, np}) diff --git a/vendor/honnef.co/go/tools/knowledge/deprecated.go b/vendor/honnef.co/go/tools/knowledge/deprecated.go index 7412a86e94..caeb49975c 100644 --- a/vendor/honnef.co/go/tools/knowledge/deprecated.go +++ b/vendor/honnef.co/go/tools/knowledge/deprecated.go @@ -19,6 +19,9 @@ type Deprecation struct { // go/importer.ForCompiler contains "Deprecated:", but it refers to a single argument, not the whole function. // Luckily, the notice starts in the middle of a paragraph, and as such isn't detected by us. +// TODO(dh): StdlibDeprecations doesn't contain entries for internal packages and unexported API. That's fine for normal +// users, but makes the Deprecated check less useful for people working on Go itself. + // StdlibDeprecations contains a mapping of Go API (such as variables, methods, or fields, among others) // to information about when it has been deprecated. var StdlibDeprecations = map[string]Deprecation{ @@ -85,8 +88,19 @@ var StdlibDeprecations = map[string]Deprecation{ "net/http.ErrUnexpectedTrailer": {12, DeprecatedUseNoLonger}, "net/http.CloseNotifier": {11, 7}, // This is hairy. The notice says "Not all errors in the http package related to protocol errors are of type ProtocolError", but doesn't that imply that some errors do? - "net/http.ProtocolError": {8, DeprecatedUseNoLonger}, - "(crypto/x509.CertificateRequest).Attributes": {5, 3}, + "net/http.ProtocolError": {8, DeprecatedUseNoLonger}, + "(crypto/x509.CertificateRequest).Attributes": {5, 3}, + "(*crypto/x509.Certificate).CheckCRLSignature": {19, 19}, + "crypto/x509.ParseCRL": {19, 19}, + "crypto/x509.ParseDERCRL": {19, 19}, + "(*crypto/x509.Certificate).CreateCRL": {19, 19}, + "crypto/x509/pkix.TBSCertificateList": {19, 19}, + "crypto/x509/pkix.RevokedCertificate": {19, 19}, + "go/doc.ToHTML": {20, 20}, + "go/doc.ToText": {20, 20}, + "go/doc.Synopsis": {20, 20}, + "math/rand.Seed": {20, 0}, + "math/rand.Read": {20, DeprecatedNeverUse}, // These functions have no direct alternative, but they are insecure and should no longer be used. "crypto/x509.IsEncryptedPEMBlock": {16, DeprecatedNeverUse}, @@ -150,8 +164,9 @@ var StdlibDeprecations = map[string]Deprecation{ "syscall.GetQueuedCompletionStatus": {17, 0}, "syscall.CreateIoCompletionPort": {17, 0}, - // Not marked as deprecated with a recognizable header, but deprecated nonetheless. - "io/ioutil": {16, 16}, + // We choose to only track the package itself, even though all functions are derecated individually, too. Anyone + // using ioutil directly will have to import it, and this keeps the noise down. + "io/ioutil": {19, 19}, "bytes.Title": {18, 0}, "strings.Title": {18, 0}, @@ -172,53 +187,58 @@ var StdlibDeprecations = map[string]Deprecation{ "syscall.Syscall18": {18, 18}, "syscall.Syscall6": {18, 18}, "syscall.Syscall9": {18, 18}, + + "reflect.SliceHeader": {21, 17}, + "reflect.StringHeader": {21, 20}, + "crypto/elliptic.GenerateKey": {21, 21}, + "crypto/elliptic.Marshal": {21, 21}, + "crypto/elliptic.Unmarshal": {21, 21}, + "(*crypto/elliptic.CurveParams).Add": {21, 21}, + "(*crypto/elliptic.CurveParams).Double": {21, 21}, + "(*crypto/elliptic.CurveParams).IsOnCurve": {21, 21}, + "(*crypto/elliptic.CurveParams).ScalarBaseMult": {21, 21}, + "(*crypto/elliptic.CurveParams).ScalarMult": {21, 21}, + "crypto/rsa.GenerateMultiPrimeKey": {21, DeprecatedNeverUse}, + "(crypto/rsa.PrecomputedValues).CRTValues": {21, DeprecatedNeverUse}, + "(crypto/x509.RevocationList).RevokedCertificates": {21, 21}, } -// Last imported from Go at 4aa1efed4853ea067d665a952eee77c52faac774 with the following numbers of deprecations: +// Last imported from Go at c19c4c566c63818dfd059b352e52c4710eecf14d with the following numbers of deprecations: // // archive/tar/common.go:2 // archive/zip/struct.go:6 // bytes/bytes.go:1 -// cmd/compile/internal/ir/expr.go:1 -// cmd/compile/internal/ir/type.go:1 -// cmd/compile/internal/syntax/walk.go:1 -// cmd/compile/internal/types/sym.go:2 -// cmd/go/internal/modcmd/edit.go:1 -// cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt:2 -// cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt:2 -// cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt:2 -// cmd/go/testdata/script/mod_deprecate_message.txt:4 -// cmd/go/testdata/script/mod_edit.txt:1 -// cmd/go/testdata/script/mod_list_deprecated.txt:2 -// cmd/go/testdata/script/mod_list_deprecated_replace.txt:1 -// cmd/internal/obj/link.go:5 -// cmd/internal/obj/textflag.go:1 -// cmd/vendor/golang.org/x/mod/modfile/rule.go:2 -// cmd/vendor/golang.org/x/mod/semver/semver.go:1 -// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go:1 -// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go:1 -// cmd/vendor/golang.org/x/sys/windows/security_windows.go:1 -// cmd/vendor/golang.org/x/sys/windows/syscall_windows.go:2 // compress/flate/inflate.go:2 // crypto/dsa/dsa.go:1 +// crypto/elliptic/elliptic.go:8 +// crypto/elliptic/params.go:5 // crypto/rc4/rc4.go:1 -// crypto/tls/common.go:7 +// crypto/rsa/rsa.go:2 +// crypto/tls/common.go:6 // crypto/x509/cert_pool.go:1 // crypto/x509/pem_decrypt.go:3 -// crypto/x509/x509.go:1 +// crypto/x509/pkix/pkix.go:2 +// crypto/x509/x509.go:6 // database/sql/driver/driver.go:6 // debug/gosym/pclntab.go:2 // encoding/csv/reader.go:2 // encoding/json/decode.go:1 // encoding/json/encode.go:1 +// go/build/build.go:1 +// go/doc/comment.go:2 // go/doc/doc.go:1 +// go/doc/synopsis.go:1 // go/importer/importer.go:2 -// go/types/errorcodes.go:1 // go/types/interface.go:2 // go/types/signature.go:1 // image/geom.go:2 // image/jpeg/reader.go:1 +// internal/types/errors/codes.go:1 +// io/ioutil/ioutil.go:7 +// io/ioutil/tempfile.go:2 +// math/rand/rand.go:2 // net/dial.go:2 +// net/http/h2_bundle.go:1 // net/http/httptest/recorder.go:1 // net/http/httputil/persist.go:8 // net/http/request.go:6 @@ -230,7 +250,7 @@ var StdlibDeprecations = map[string]Deprecation{ // path/filepath/path_plan9.go:1 // path/filepath/path_unix.go:1 // path/filepath/path_windows.go:1 -// reflect/value.go:1 +// reflect/value.go:3 // regexp/regexp.go:1 // runtime/cpuprof.go:1 // strings/strings.go:1 @@ -249,5 +269,4 @@ var StdlibDeprecations = map[string]Deprecation{ // syscall/syscall.go:3 // syscall/syscall_windows.go:6 // text/template/parse/node.go:5 -// vendor/golang.org/x/crypto/curve25519/curve25519.go:1 // vendor/golang.org/x/text/transform/transform.go:1 diff --git a/vendor/honnef.co/go/tools/knowledge/signatures.go b/vendor/honnef.co/go/tools/knowledge/signatures.go index d072e61a46..03f4d53e86 100644 --- a/vendor/honnef.co/go/tools/knowledge/signatures.go +++ b/vendor/honnef.co/go/tools/knowledge/signatures.go @@ -6,7 +6,7 @@ import ( ) var Signatures = map[string]*types.Signature{ - "(io.Seeker).Seek": types.NewSignature(nil, + "(io.Seeker).Seek": types.NewSignatureType(nil, nil, nil, types.NewTuple( types.NewParam(token.NoPos, nil, "", types.Typ[types.Int64]), types.NewParam(token.NoPos, nil, "", types.Typ[types.Int]), @@ -18,7 +18,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(io.Writer).Write": types.NewSignature(nil, + "(io.Writer).Write": types.NewSignatureType(nil, nil, nil, types.NewTuple( types.NewParam(token.NoPos, nil, "", types.NewSlice(types.Typ[types.Byte])), ), @@ -29,7 +29,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(io.StringWriter).WriteString": types.NewSignature(nil, + "(io.StringWriter).WriteString": types.NewSignatureType(nil, nil, nil, types.NewTuple( types.NewParam(token.NoPos, nil, "", types.Typ[types.String]), ), @@ -40,7 +40,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(encoding.TextMarshaler).MarshalText": types.NewSignature(nil, + "(encoding.TextMarshaler).MarshalText": types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple( types.NewParam(token.NoPos, nil, "", types.NewSlice(types.Typ[types.Byte])), @@ -49,7 +49,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(encoding/json.Marshaler).MarshalJSON": types.NewSignature(nil, + "(encoding/json.Marshaler).MarshalJSON": types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple( types.NewParam(token.NoPos, nil, "", types.NewSlice(types.Typ[types.Byte])), @@ -58,7 +58,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(fmt.Stringer).String": types.NewSignature(nil, + "(fmt.Stringer).String": types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple( types.NewParam(token.NoPos, nil, "", types.Typ[types.String]), diff --git a/vendor/honnef.co/go/tools/pattern/convert.go b/vendor/honnef.co/go/tools/pattern/convert.go index 34e2cd4566..aed3617cd2 100644 --- a/vendor/honnef.co/go/tools/pattern/convert.go +++ b/vendor/honnef.co/go/tools/pattern/convert.go @@ -6,8 +6,6 @@ import ( "go/token" "go/types" "reflect" - - "golang.org/x/exp/typeparams" ) var astTypes = map[string]reflect.Type{ @@ -15,7 +13,7 @@ var astTypes = map[string]reflect.Type{ "RangeStmt": reflect.TypeOf(ast.RangeStmt{}), "AssignStmt": reflect.TypeOf(ast.AssignStmt{}), "IndexExpr": reflect.TypeOf(ast.IndexExpr{}), - "IndexListExpr": reflect.TypeOf(typeparams.IndexListExpr{}), + "IndexListExpr": reflect.TypeOf(ast.IndexListExpr{}), "Ident": reflect.TypeOf(ast.Ident{}), "ValueSpec": reflect.TypeOf(ast.ValueSpec{}), "GenDecl": reflect.TypeOf(ast.GenDecl{}), diff --git a/vendor/honnef.co/go/tools/pattern/doc.go b/vendor/honnef.co/go/tools/pattern/doc.go index c963bf716b..22fe2cf30a 100644 --- a/vendor/honnef.co/go/tools/pattern/doc.go +++ b/vendor/honnef.co/go/tools/pattern/doc.go @@ -1,7 +1,7 @@ /* Package pattern implements a simple language for pattern matching Go ASTs. -Design decisions and trade-offs +# Design decisions and trade-offs The language is designed specifically for the task of filtering ASTs to simplify the implementation of analyses in staticcheck. @@ -14,7 +14,7 @@ Furthermore, it is fully expected that the majority of analyses will still requi to further process the filtered AST, to make use of type information and to enforce complex invariants. It is not our goal to design a scripting language for writing entire checks in. -The language +# The language At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching. Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node. @@ -60,13 +60,13 @@ Thus, the two following forms have identical matching behavior: This section serves as an overview of the language's syntax. More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections. -Pattern matching +# Pattern matching -TODO write about pattern matching +# TODO write about pattern matching - inspired by haskell syntax, but much, much simpler and naive -Node types +# Node types The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic. @@ -246,7 +246,7 @@ The Not node negates a match. For example, (Not (Ident _)) will match all nodes ChanDir(0) -Automatic unnesting of AST nodes +# Automatic unnesting of AST nodes The Go AST has several types of nodes that wrap other nodes. To simplify matching, we automatically unwrap some of these nodes. @@ -268,6 +268,5 @@ On the flip-side, there is no way to specifically match these wrapper nodes. For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code: ((x)) += 2 - */ package pattern diff --git a/vendor/honnef.co/go/tools/pattern/fuzz.go b/vendor/honnef.co/go/tools/pattern/fuzz.go deleted file mode 100644 index 2afbb5242c..0000000000 --- a/vendor/honnef.co/go/tools/pattern/fuzz.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package pattern - -import ( - "go/ast" - goparser "go/parser" - "go/token" - "os" - "path/filepath" - "strings" -) - -var files []*ast.File - -func init() { - fset := token.NewFileSet() - filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error { - if err != nil { - // XXX error handling - panic(err) - } - if !strings.HasSuffix(path, ".go") { - return nil - } - f, err := goparser.ParseFile(fset, path, nil, 0) - if err != nil { - return nil - } - files = append(files, f) - return nil - }) -} - -func Fuzz(data []byte) int { - p := &Parser{} - pat, err := p.Parse(string(data)) - if err != nil { - if strings.Contains(err.Error(), "internal error") { - panic(err) - } - return 0 - } - _ = pat.Root.String() - - for _, f := range files { - Match(pat.Root, f) - } - return 1 -} diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go index b4edac9489..3eae3bd631 100644 --- a/vendor/honnef.co/go/tools/pattern/match.go +++ b/vendor/honnef.co/go/tools/pattern/match.go @@ -7,7 +7,7 @@ import ( "go/types" "reflect" - "golang.org/x/exp/typeparams" + "golang.org/x/tools/go/ast/astutil" ) var tokensByString = map[string]Token{ @@ -87,35 +87,56 @@ type matcher interface { Match(*Matcher, interface{}) (interface{}, bool) } -type State = map[string]interface{} +type State = map[string]any type Matcher struct { TypesInfo *types.Info State State + + bindingsMapping []string + + setBindings []uint64 } -func (m *Matcher) fork() *Matcher { - state := make(State, len(m.State)) - for k, v := range m.State { - state[k] = v - } - return &Matcher{ - TypesInfo: m.TypesInfo, - State: state, +func (m *Matcher) set(b Binding, value interface{}) { + m.State[b.Name] = value + m.setBindings[len(m.setBindings)-1] |= 1 << b.idx +} + +func (m *Matcher) push() { + m.setBindings = append(m.setBindings, 0) +} + +func (m *Matcher) pop() { + set := m.setBindings[len(m.setBindings)-1] + if set != 0 { + for i := 0; i < len(m.bindingsMapping); i++ { + if (set & (1 << i)) != 0 { + key := m.bindingsMapping[i] + delete(m.State, key) + } + } } + m.setBindings = m.setBindings[:len(m.setBindings)-1] } -func (m *Matcher) merge(mc *Matcher) { - m.State = mc.State +func (m *Matcher) merge() { + m.setBindings = m.setBindings[:len(m.setBindings)-1] } -func (m *Matcher) Match(a Node, b ast.Node) bool { +func (m *Matcher) Match(a Pattern, b ast.Node) bool { + m.bindingsMapping = a.Bindings m.State = State{} - _, ok := match(m, a, b) + m.push() + _, ok := match(m, a.Root, b) + m.merge() + if len(m.setBindings) != 0 { + panic(fmt.Sprintf("%d entries left on the stack, expected none", len(m.setBindings))) + } return ok } -func Match(a Node, b ast.Node) (*Matcher, bool) { +func Match(a Pattern, b ast.Node) (*Matcher, bool) { m := &Matcher{} ret := m.Match(a, b) return m, ret @@ -139,7 +160,11 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { case *ast.BlockStmt: return match(m, l.List, r) case *ast.FieldList: - return match(m, l.List, r) + if l == nil { + return match(m, nil, r) + } else { + return match(m, l.List, r) + } } switch r := r.(type) { @@ -202,14 +227,24 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { } } + // TODO(dh): the three blocks handling slices can be combined into a single block if we use reflection + { ln, ok1 := l.([]ast.Expr) rn, ok2 := r.([]ast.Expr) if ok1 || ok2 { if ok1 && !ok2 { - rn = []ast.Expr{r.(ast.Expr)} + cast, ok := r.(ast.Expr) + if !ok { + return nil, false + } + rn = []ast.Expr{cast} } else if !ok1 && ok2 { - ln = []ast.Expr{l.(ast.Expr)} + cast, ok := l.(ast.Expr) + if !ok { + return nil, false + } + ln = []ast.Expr{cast} } if len(ln) != len(rn) { @@ -229,9 +264,17 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { rn, ok2 := r.([]ast.Stmt) if ok1 || ok2 { if ok1 && !ok2 { - rn = []ast.Stmt{r.(ast.Stmt)} + cast, ok := r.(ast.Stmt) + if !ok { + return nil, false + } + rn = []ast.Stmt{cast} } else if !ok1 && ok2 { - ln = []ast.Stmt{l.(ast.Stmt)} + cast, ok := l.(ast.Stmt) + if !ok { + return nil, false + } + ln = []ast.Stmt{cast} } if len(ln) != len(rn) { @@ -251,9 +294,17 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { rn, ok2 := r.([]*ast.Field) if ok1 || ok2 { if ok1 && !ok2 { - rn = []*ast.Field{r.(*ast.Field)} + cast, ok := r.(*ast.Field) + if !ok { + return nil, false + } + rn = []*ast.Field{cast} } else if !ok1 && ok2 { - ln = []*ast.Field{l.(*ast.Field)} + cast, ok := l.(*ast.Field) + if !ok { + return nil, false + } + ln = []*ast.Field{cast} } if len(ln) != len(rn) { @@ -268,7 +319,7 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { } } - panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r)) + return nil, false } // Match a Node with an AST node @@ -286,6 +337,13 @@ func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { // 'a' is not a List or we'd be using its Match // implementation. + if len(b) != 1 { + return nil, false + } + return match(m, a, b[0]) + case []*ast.Field: + // 'a' is not a List or we'd be using its Match + // implementation if len(b) != 1 { return nil, false } @@ -317,6 +375,9 @@ func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { return b, true case nil: return nil, a == Nil{} + case string, token.Token: + // 'a' can't be a String, Token, or Binding or we'd be using their Match implementations. + return nil, false default: panic(fmt.Sprintf("unhandled type %T", b)) } @@ -393,7 +454,7 @@ func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) { } new, ret := match(m, b.Node, node) if ret { - m.State[b.Name] = new + m.set(b, new) } return new, ret } @@ -446,7 +507,16 @@ func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) { } func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) { - return nil, isNil(node) || reflect.ValueOf(node).IsNil() + if isNil(node) { + return nil, true + } + v := reflect.ValueOf(node) + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice: + return nil, v.IsNil() + default: + return nil, false + } } func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) { @@ -492,13 +562,14 @@ func (fn Symbol) Match(m *Matcher, node interface{}) (interface{}, bool) { return nil, false } - fun := r + fun := r.(ast.Expr) switch idx := fun.(type) { case *ast.IndexExpr: fun = idx.X - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: fun = idx.X } + fun = astutil.Unparen(fun) switch fun := fun.(type) { case *ast.Ident: @@ -540,10 +611,12 @@ func (fn Symbol) Match(m *Matcher, node interface{}) (interface{}, bool) { func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) { for _, opt := range or.Nodes { - mc := m.fork() - if ret, ok := match(mc, opt, node); ok { - m.merge(mc) + m.push() + if ret, ok := match(m, opt, node); ok { + m.merge() return ret, true + } else { + m.pop() } } return nil, false diff --git a/vendor/honnef.co/go/tools/pattern/parser.go b/vendor/honnef.co/go/tools/pattern/parser.go index 2529051df0..ba08975240 100644 --- a/vendor/honnef.co/go/tools/pattern/parser.go +++ b/vendor/honnef.co/go/tools/pattern/parser.go @@ -1,6 +1,7 @@ package pattern import ( + "errors" "fmt" "go/ast" "go/token" @@ -11,7 +12,10 @@ type Pattern struct { Root Node // Relevant contains instances of ast.Node that could potentially // initiate a successful match of the pattern. - Relevant []reflect.Type + Relevant map[reflect.Type]struct{} + + // Mapping from binding index to binding name + Bindings []string } func MustParse(s string) Pattern { @@ -23,27 +27,29 @@ func MustParse(s string) Pattern { return pat } -func roots(node Node) []reflect.Type { +func roots(node Node, m map[reflect.Type]struct{}) { switch node := node.(type) { case Or: - var out []reflect.Type for _, el := range node.Nodes { - out = append(out, roots(el)...) + roots(el, m) } - return out case Not: - return roots(node.Node) + roots(node.Node, m) case Binding: - return roots(node.Node) + roots(node.Node, m) case Nil, nil: // this branch is reached via bindings - return allTypes + for _, T := range allTypes { + m[T] = struct{}{} + } default: Ts, ok := nodeToASTTypes[reflect.TypeOf(node)] if !ok { panic(fmt.Sprintf("internal error: unhandled type %T", node)) } - return Ts + for _, T := range Ts { + m[T] = struct{}{} + } } } @@ -160,6 +166,20 @@ type Parser struct { cur item last *item items chan item + + bindings map[string]int +} + +func (p *Parser) bindingIndex(name string) int { + if p.bindings == nil { + p.bindings = map[string]int{} + } + if idx, ok := p.bindings[name]; ok { + return idx + } + idx := len(p.bindings) + p.bindings[name] = idx + return idx } func (p *Parser) Parse(s string) (Pattern, error) { @@ -185,9 +205,22 @@ func (p *Parser) Parse(s string) (Pattern, error) { if item := <-p.lex.items; item.typ != itemEOF { return Pattern{}, fmt.Errorf("unexpected token %s after end of pattern", item.typ) } + + if len(p.bindings) > 64 { + return Pattern{}, errors.New("encountered more than 64 bindings") + } + + bindings := make([]string, len(p.bindings)) + for name, idx := range p.bindings { + bindings[idx] = name + } + + relevant := map[reflect.Type]struct{}{} + roots(root, relevant) return Pattern{ Root: root, - Relevant: roots(root), + Relevant: relevant, + Bindings: bindings, }, nil } @@ -263,7 +296,14 @@ func (p *Parser) node() (Node, error) { } } - return p.populateNode(typ.val, objs) + node, err := p.populateNode(typ.val, objs) + if err != nil { + return nil, err + } + if node, ok := node.(Binding); ok { + node.idx = p.bindingIndex(node.Name) + } + return node, nil } func populateNode(typ string, objs []Node, allowTypeInfo bool) (Node, error) { @@ -287,10 +327,23 @@ func populateNode(typ string, objs []Node, allowTypeInfo bool) (Node, error) { return v.Interface().(Node), nil } } - if len(objs) != v.NumField() { - return nil, fmt.Errorf("tried to initialize node %s with %d values, expected %d", typ, len(objs), v.NumField()) + + n := -1 + for i := 0; i < T.NumField(); i++ { + if !T.Field(i).IsExported() { + break + } + n = i + } + + if len(objs) != n+1 { + return nil, fmt.Errorf("tried to initialize node %s with %d values, expected %d", typ, len(objs), n+1) } + for i := 0; i < v.NumField(); i++ { + if !T.Field(i).IsExported() { + break + } f := v.Field(i) if f.Kind() == reflect.String { if obj, ok := objs[i].(String); ok { @@ -399,10 +452,14 @@ func (p *Parser) object() (Node, error) { b = Binding{ Name: v.val, Node: o, + idx: p.bindingIndex(v.val), } } else { p.rewind() - b = Binding{Name: v.val} + b = Binding{ + Name: v.val, + idx: p.bindingIndex(v.val), + } } if p.peek().typ == itemColon { p.next() diff --git a/vendor/honnef.co/go/tools/pattern/pattern.go b/vendor/honnef.co/go/tools/pattern/pattern.go index fbbafdfac5..15886b1f3d 100644 --- a/vendor/honnef.co/go/tools/pattern/pattern.go +++ b/vendor/honnef.co/go/tools/pattern/pattern.go @@ -245,6 +245,8 @@ type SendStmt struct { type Binding struct { Name string Node Node + + idx int } type RangeStmt struct { diff --git a/vendor/honnef.co/go/tools/printf/printf.go b/vendor/honnef.co/go/tools/printf/printf.go index 754db9b16d..3ce4dc0186 100644 --- a/vendor/honnef.co/go/tools/printf/printf.go +++ b/vendor/honnef.co/go/tools/printf/printf.go @@ -2,25 +2,26 @@ // strings. // // It parses verbs according to the following syntax: -// Numeric -> '0'-'9' -// Letter -> 'a'-'z' | 'A'-'Z' -// Index -> '[' Numeric+ ']' -// Star -> '*' -// Star -> Index '*' // -// Precision -> Numeric+ | Star -// Width -> Numeric+ | Star +// Numeric -> '0'-'9' +// Letter -> 'a'-'z' | 'A'-'Z' +// Index -> '[' Numeric+ ']' +// Star -> '*' +// Star -> Index '*' // -// WidthAndPrecision -> Width '.' Precision -// WidthAndPrecision -> Width '.' -// WidthAndPrecision -> Width -// WidthAndPrecision -> '.' Precision -// WidthAndPrecision -> '.' +// Precision -> Numeric+ | Star +// Width -> Numeric+ | Star // -// Flag -> '+' | '-' | '#' | ' ' | '0' -// Verb -> Letter | '%' +// WidthAndPrecision -> Width '.' Precision +// WidthAndPrecision -> Width '.' +// WidthAndPrecision -> Width +// WidthAndPrecision -> '.' Precision +// WidthAndPrecision -> '.' // -// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb +// Flag -> '+' | '-' | '#' | ' ' | '0' +// Verb -> Letter | '%' +// +// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb package printf import ( diff --git a/vendor/honnef.co/go/tools/simple/analysis.go b/vendor/honnef.co/go/tools/simple/analysis.go index a67a3209fb..04ac2f5e06 100644 --- a/vendor/honnef.co/go/tools/simple/analysis.go +++ b/vendor/honnef.co/go/tools/simple/analysis.go @@ -4,6 +4,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "honnef.co/go/tools/analysis/facts/generated" + "honnef.co/go/tools/analysis/facts/purity" "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/internal/passes/buildir" ) @@ -55,7 +56,7 @@ var Analyzers = lint.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{ }, "S1011": { Run: CheckLoopAppend, - Requires: []*analysis.Analyzer{inspect.Analyzer, generated.Analyzer}, + Requires: []*analysis.Analyzer{inspect.Analyzer, generated.Analyzer, purity.Analyzer}, }, "S1012": { Run: CheckTimeSince, diff --git a/vendor/honnef.co/go/tools/simple/doc.go b/vendor/honnef.co/go/tools/simple/doc.go index 11cc70c3a1..421f851344 100644 --- a/vendor/honnef.co/go/tools/simple/doc.go +++ b/vendor/honnef.co/go/tools/simple/doc.go @@ -137,8 +137,21 @@ making \'s[n:len(s)]\' and \'s[n:]\' equivalent.`, Before: ` for _, e := range y { x = append(x, e) +} + +for i := range y { + x = append(x, y[i]) +} + +for i := range y { + v := y[i] + x = append(x, v) }`, - After: `x = append(x, y...)`, + + After: ` +x = append(x, y...) +x = append(x, y...) +x = append(x, y...)`, Since: "2017.1", // MergeIfAll because y might not be a slice under all build tags. MergeIf: lint.MergeIfAll, @@ -267,9 +280,9 @@ Given the following shared definitions type T1 string type T2 int - + func (T2) String() string { return "Hello, world" } - + var x string var y T1 var z T2 diff --git a/vendor/honnef.co/go/tools/simple/lint.go b/vendor/honnef.co/go/tools/simple/lint.go index 0abc5aef91..8458f7be69 100644 --- a/vendor/honnef.co/go/tools/simple/lint.go +++ b/vendor/honnef.co/go/tools/simple/lint.go @@ -13,6 +13,7 @@ import ( "honnef.co/go/tools/analysis/code" "honnef.co/go/tools/analysis/edit" + "honnef.co/go/tools/analysis/facts/purity" "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/analysis/report" "honnef.co/go/tools/go/ast/astutil" @@ -175,20 +176,20 @@ func CheckLoopCopy(pass *analysis.Pass) (interface{}, error) { report.ShortRange(), report.Fixes(edit.Fix("replace loop with assignment", edit.ReplaceWithNode(pass.Fset, node, r)))) } else { - opts := []report.Option{ - report.ShortRange(), - report.FilterGenerated(), - } tv, err := types.Eval(pass.Fset, pass.Pkg, node.Pos(), "copy") if err == nil && tv.IsBuiltin() { + to := "to" + from := "from" src := m.State["src"].(ast.Expr) if TsrcArray { + from = "from[:]" src = &ast.SliceExpr{ X: src, } } dst := m.State["dst"].(ast.Expr) if TdstArray { + to = "to[:]" dst = &ast.SliceExpr{ X: dst, } @@ -198,9 +199,13 @@ func CheckLoopCopy(pass *analysis.Pass) (interface{}, error) { Fun: &ast.Ident{Name: "copy"}, Args: []ast.Expr{dst, src}, } - opts = append(opts, report.Fixes(edit.Fix("replace loop with call to copy()", edit.ReplaceWithNode(pass.Fset, node, r)))) + opts := []report.Option{ + report.ShortRange(), + report.FilterGenerated(), + report.Fixes(edit.Fix("replace loop with call to copy()", edit.ReplaceWithNode(pass.Fset, node, r))), + } + report.Report(pass, node, fmt.Sprintf("should use copy(%s, %s) instead of a loop", to, from), opts...) } - report.Report(pass, node, "should use copy() instead of a loop", opts...) } } code.Preorder(pass, fn, (*ast.ForStmt)(nil), (*ast.RangeStmt)(nil)) @@ -232,7 +237,7 @@ func CheckIfBoolCmp(pass *analysis.Pass) (interface{}, error) { other = expr.X } - ok := typeutil.All(pass.TypesInfo.TypeOf(other), func(term *typeparams.Term) bool { + ok := typeutil.All(pass.TypesInfo.TypeOf(other), func(term *types.Term) bool { basic, ok := term.Type().Underlying().(*types.Basic) return ok && basic.Kind() == types.Bool }) @@ -597,12 +602,11 @@ func negate(expr ast.Expr) ast.Expr { // CheckRedundantNilCheckWithLen checks for the following redundant nil-checks: // -// if x == nil || len(x) == 0 {} -// if x != nil && len(x) != 0 {} -// if x != nil && len(x) == N {} (where N != 0) -// if x != nil && len(x) > N {} -// if x != nil && len(x) >= N {} (where N != 0) -// +// if x == nil || len(x) == 0 {} +// if x != nil && len(x) != 0 {} +// if x != nil && len(x) == N {} (where N != 0) +// if x != nil && len(x) > N {} +// if x != nil && len(x) >= N {} (where N != 0) func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { isConstZero := func(expr ast.Expr) (isConst bool, isZero bool) { _, ok := expr.(*ast.BasicLit) @@ -705,7 +709,7 @@ func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { // finally check that xx type is one of array, slice, map or chan // this is to prevent false positive in case if xx is a pointer to an array typ := pass.TypesInfo.TypeOf(xx) - ok = typeutil.All(typ, func(term *typeparams.Term) bool { + ok = typeutil.All(typ, func(term *types.Term) bool { switch term.Type().Underlying().(type) { case *types.Slice: return true @@ -715,7 +719,7 @@ func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { return true case *types.Pointer: return false - case *typeparams.TypeParam: + case *types.TypeParam: return false default: lint.ExhaustiveTypeSwitch(term.Type().Underlying()) @@ -766,22 +770,63 @@ func refersTo(pass *analysis.Pass, expr ast.Expr, ident types.Object) bool { } var checkLoopAppendQ = pattern.MustParse(` +(Or (RangeStmt (Ident "_") val@(Object _) _ x - [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]) `) + [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]) + (RangeStmt + idx@(Object _) + nil + _ + x + [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs (IndexExpr x idx)])])]) + (RangeStmt + idx@(Object _) + nil + _ + x + [(AssignStmt val@(Object _) ":=" (IndexExpr x idx)) + (AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]))`) func CheckLoopAppend(pass *analysis.Pass) (interface{}, error) { + pure := pass.ResultOf[purity.Analyzer].(purity.Result) + fn := func(node ast.Node) { m, ok := code.Match(pass, checkLoopAppendQ, node) if !ok { return } - val := m.State["val"].(types.Object) - if refersTo(pass, m.State["lhs"].(ast.Expr), val) { + if val, ok := m.State["val"].(types.Object); ok && refersTo(pass, m.State["lhs"].(ast.Expr), val) { + return + } + + if m.State["idx"] != nil && code.MayHaveSideEffects(pass, m.State["x"].(ast.Expr), pure) { + // When using an index-based loop, x gets evaluated repeatedly and thus should be pure. + // This doesn't matter for value-based loops, because x only gets evaluated once. + return + } + + if idx, ok := m.State["idx"].(types.Object); ok && refersTo(pass, m.State["lhs"].(ast.Expr), idx) { + // The lhs mustn't refer to the index loop variable. + return + } + + if code.MayHaveSideEffects(pass, m.State["lhs"].(ast.Expr), pure) { + // The lhs may be dynamic and return different values on each iteration. For example: + // + // func bar() map[int][]int { /* return one of several maps */ } + // + // func foo(x []int, y [][]int) { + // for i := range x { + // bar()[0] = append(bar()[0], x[i]) + // } + // } + // + // The dynamic nature of the lhs might also affect the value of the index. return } @@ -985,7 +1030,7 @@ func CheckSimplerStructConversion(pass *analysis.Pass) (interface{}, error) { return } // All fields must be initialized from the same object - if ident != nil && ident.Obj != id.Obj { + if ident != nil && pass.TypesInfo.ObjectOf(ident) != pass.TypesInfo.ObjectOf(id) { return } typ2, _ = t.(*types.Named) @@ -1046,7 +1091,7 @@ func CheckTrim(pass *analysis.Pass) (interface{}, error) { switch node1 := node1.(type) { case *ast.Ident: - return node1.Obj == node2.(*ast.Ident).Obj + return pass.TypesInfo.ObjectOf(node1) == pass.TypesInfo.ObjectOf(node2.(*ast.Ident)) case *ast.SelectorExpr, *ast.IndexExpr: return astutil.Equal(node1, node2) case *ast.BasicLit: @@ -1372,7 +1417,7 @@ func CheckDeclareAssign(pass *analysis.Pass) (interface{}, error) { } for _, lhs := range assign.Lhs { if oident, ok := lhs.(*ast.Ident); ok { - if oident.Obj == ident.Obj { + if pass.TypesInfo.ObjectOf(oident) == pass.TypesInfo.ObjectOf(ident) { num++ } } @@ -1413,7 +1458,7 @@ func CheckDeclareAssign(pass *analysis.Pass) (interface{}, error) { if !ok { continue } - if vspec.Names[0].Obj != ident.Obj { + if pass.TypesInfo.ObjectOf(vspec.Names[0]) != pass.TypesInfo.ObjectOf(ident) { continue } @@ -1618,11 +1663,11 @@ func CheckNilCheckAroundRange(pass *analysis.Pass) (interface{}, error) { if !ok { return } - ok = typeutil.All(m.State["x"].(types.Object).Type(), func(term *typeparams.Term) bool { + ok = typeutil.All(m.State["x"].(types.Object).Type(), func(term *types.Term) bool { switch term.Type().Underlying().(type) { case *types.Slice, *types.Map: return true - case *typeparams.TypeParam, *types.Chan, *types.Pointer: + case *types.TypeParam, *types.Chan, *types.Pointer: return false default: lint.ExhaustiveTypeSwitch(term.Type().Underlying()) diff --git a/vendor/honnef.co/go/tools/staticcheck/analysis.go b/vendor/honnef.co/go/tools/staticcheck/analysis.go index 171467a8f9..210c348c9c 100644 --- a/vendor/honnef.co/go/tools/staticcheck/analysis.go +++ b/vendor/honnef.co/go/tools/staticcheck/analysis.go @@ -174,7 +174,7 @@ var Analyzers = lint.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{ Requires: []*analysis.Analyzer{inspect.Analyzer, tokenfile.Analyzer}, }, "SA4017": { - Run: CheckPureFunctions, + Run: CheckSideEffectFreeCalls, Requires: []*analysis.Analyzer{buildir.Analyzer, purity.Analyzer}, }, "SA4018": { diff --git a/vendor/honnef.co/go/tools/staticcheck/doc.go b/vendor/honnef.co/go/tools/staticcheck/doc.go index 3e991b6c4a..c28fdbf04b 100644 --- a/vendor/honnef.co/go/tools/staticcheck/doc.go +++ b/vendor/honnef.co/go/tools/staticcheck/doc.go @@ -506,7 +506,7 @@ falsify results.`, }, "SA4017": { - Title: `A pure function's return value is discarded, making the call pointless`, + Title: `Discarding the return values of a function without side effects, making the call pointless`, Since: "2017.1", Severity: lint.SeverityWarning, MergeIf: lint.MergeIfAll, @@ -1286,7 +1286,7 @@ the \'else\' branch. This means that in the following example if x, ok := x.(int); ok { // ... } else { - fmt.Println("unexpected type %T", x) + fmt.Printf("unexpected type %T", x) } \'x\' in the \'else\' branch will refer to the \'x\' from \'x, ok diff --git a/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go b/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go index 15e302de4f..f65f2ddf9f 100644 --- a/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go +++ b/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go @@ -46,18 +46,18 @@ type UnsupportedTypeError struct { type encoder struct { // TODO we track addressable and non-addressable instances separately out of an abundance of caution. We don't know // if this is actually required for correctness. - seenCanAddr typeutil.Map - seenCantAddr typeutil.Map + seenCanAddr typeutil.Map[struct{}] + seenCantAddr typeutil.Map[struct{}] } func (enc *encoder) newTypeEncoder(t fakereflect.TypeAndCanAddr, stack string) *UnsupportedTypeError { - var m *typeutil.Map + var m *typeutil.Map[struct{}] if t.CanAddr() { m = &enc.seenCanAddr } else { m = &enc.seenCantAddr } - if ok := m.At(t.Type); ok != nil { + if _, ok := m.At(t.Type); ok { return nil } m.Set(t.Type, struct{}{}) diff --git a/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go b/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go index e592bfca57..64fce5f5b2 100644 --- a/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go +++ b/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go @@ -28,8 +28,8 @@ func Marshal(v types.Type) error { type Encoder struct { // TODO we track addressable and non-addressable instances separately out of an abundance of caution. We don't know // if this is actually required for correctness. - seenCanAddr typeutil.Map - seenCantAddr typeutil.Map + seenCanAddr typeutil.Map[struct{}] + seenCantAddr typeutil.Map[struct{}] } func NewEncoder() *Encoder { @@ -114,13 +114,13 @@ func (err *CyclicTypeError) Error() string { // marshalValue writes one or more XML elements representing val. // If val was obtained from a struct field, finfo must have its details. func (e *Encoder) marshalValue(val fakereflect.TypeAndCanAddr, finfo *fieldInfo, startTemplate *StartElement, stack string) error { - var m *typeutil.Map + var m *typeutil.Map[struct{}] if val.CanAddr() { m = &e.seenCanAddr } else { m = &e.seenCantAddr } - if ok := m.At(val.Type); ok != nil { + if _, ok := m.At(val.Type); ok { return nil } m.Set(val.Type, struct{}{}) diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go index 3dd0fece07..b7c0f91375 100644 --- a/vendor/honnef.co/go/tools/staticcheck/lint.go +++ b/vendor/honnef.co/go/tools/staticcheck/lint.go @@ -524,13 +524,13 @@ func checkPrintfCallImpl(carg *Argument, f ir.Value, args []ir.Value) { return true } - var seen typeutil.Map + var seen typeutil.Map[struct{}] var checkType func(verb rune, T types.Type, top bool) bool checkType = func(verb rune, T types.Type, top bool) bool { if top { - seen = typeutil.Map{} + seen = typeutil.Map[struct{}]{} } - if ok := seen.At(T); ok != nil { + if _, ok := seen.At(T); ok { return true } seen.Set(T, struct{}{}) @@ -1144,17 +1144,31 @@ func CheckDubiousDeferInChannelRangeLoop(pass *analysis.Pass) (interface{}, erro if !ok { return } + + stmts := []*ast.DeferStmt{} + exits := false fn2 := func(node ast.Node) bool { switch stmt := node.(type) { case *ast.DeferStmt: - report.Report(pass, stmt, "defers in this range loop won't run unless the channel gets closed") + stmts = append(stmts, stmt) case *ast.FuncLit: // Don't look into function bodies return false + case *ast.ReturnStmt: + exits = true + case *ast.BranchStmt: + exits = node.(*ast.BranchStmt).Tok == token.BREAK } return true } ast.Inspect(loop.Body, fn2) + + if exits { + return + } + for _, stmt := range stmts { + report.Report(pass, stmt, "defers in this range loop won't run unless the channel gets closed") + } } code.Preorder(pass, fn, (*ast.RangeStmt)(nil)) return nil, nil @@ -1296,7 +1310,7 @@ func CheckLhsRhsIdentical(pass *analysis.Pass) (interface{}, error) { // no terms, so floats are a possibility return true } - return tset.Any(func(term *typeparams.Term) bool { + return tset.Any(func(term *types.Term) bool { switch typ := term.Type().Underlying().(type) { case *types.Basic: kind := typ.Kind() @@ -1554,7 +1568,7 @@ func CheckEarlyDefer(pass *analysis.Pass) (interface{}, error) { if !ok { continue } - if ident.Obj != lhs.Obj { + if pass.TypesInfo.ObjectOf(ident) != pass.TypesInfo.ObjectOf(lhs) { continue } if sel.Sel.Name != "Close" { @@ -1598,7 +1612,7 @@ func CheckEmptyCriticalSection(pass *analysis.Pass) (interface{}, error) { if !ok { return nil, "", false } - call, ok := expr.X.(*ast.CallExpr) + call, ok := astutil.Unparen(expr.X).(*ast.CallExpr) if !ok { return nil, "", false } @@ -2085,7 +2099,7 @@ func CheckLoopCondition(pass *analysis.Pass) (interface{}, error) { if !ok { return true } - if x.Obj != lhs.Obj { + if pass.TypesInfo.ObjectOf(x) != pass.TypesInfo.ObjectOf(lhs) { return true } if _, ok := loop.Post.(*ast.IncDecStmt); !ok { @@ -2225,13 +2239,13 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { if body == nil { return } - labels := map[*ast.Object]ast.Stmt{} + labels := map[types.Object]ast.Stmt{} ast.Inspect(body, func(node ast.Node) bool { label, ok := node.(*ast.LabeledStmt) if !ok { return true } - labels[label.Label.Obj] = label.Stmt + labels[pass.TypesInfo.ObjectOf(label.Label)] = label.Stmt return true }) @@ -2243,7 +2257,7 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { body = node.Body loop = node case *ast.RangeStmt: - ok := typeutil.All(pass.TypesInfo.TypeOf(node.X), func(term *typeparams.Term) bool { + ok := typeutil.All(pass.TypesInfo.TypeOf(node.X), func(term *types.Term) bool { switch term.Type().Underlying().(type) { case *types.Slice, *types.Chan, *types.Basic, *types.Pointer, *types.Array: return true @@ -2283,11 +2297,11 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { case *ast.BranchStmt: switch stmt.Tok { case token.BREAK: - if stmt.Label == nil || labels[stmt.Label.Obj] == loop { + if stmt.Label == nil || labels[pass.TypesInfo.ObjectOf(stmt.Label)] == loop { unconditionalExit = stmt } case token.CONTINUE: - if stmt.Label == nil || labels[stmt.Label.Obj] == loop { + if stmt.Label == nil || labels[pass.TypesInfo.ObjectOf(stmt.Label)] == loop { unconditionalExit = nil return false } @@ -2309,7 +2323,7 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { unconditionalExit = nil return false case token.CONTINUE: - if branch.Label != nil && labels[branch.Label.Obj] != loop { + if branch.Label != nil && labels[pass.TypesInfo.ObjectOf(branch.Label)] != loop { return true } unconditionalExit = nil @@ -2899,7 +2913,7 @@ func CheckRepeatedIfElse(pass *analysis.Pass) (interface{}, error) { func CheckSillyBitwiseOps(pass *analysis.Pass) (interface{}, error) { fn := func(node ast.Node) { binop := node.(*ast.BinaryExpr) - if !typeutil.All(pass.TypesInfo.TypeOf(binop), func(term *typeparams.Term) bool { + if !typeutil.All(pass.TypesInfo.TypeOf(binop), func(term *types.Term) bool { b, ok := term.Type().Underlying().(*types.Basic) if !ok { return false @@ -3009,7 +3023,7 @@ func CheckNonOctalFileMode(pass *analysis.Pass) (interface{}, error) { return nil, nil } -func CheckPureFunctions(pass *analysis.Pass) (interface{}, error) { +func CheckSideEffectFreeCalls(pass *analysis.Pass) (interface{}, error) { pure := pass.ResultOf[purity.Analyzer].(purity.Result) fnLoop: @@ -3055,7 +3069,7 @@ fnLoop: // special case for benchmarks in the fmt package continue } - report.Report(pass, ins, fmt.Sprintf("%s is a pure function but its return value is ignored", callee.Object().Name())) + report.Report(pass, ins, fmt.Sprintf("%s doesn't have side effects and its return value is ignored", callee.Object().Name())) } } } @@ -3095,32 +3109,25 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { return } if ok { - switch std.AlternativeAvailableSince { - case knowledge.DeprecatedNeverUse: - // This should never be used, regardless of the - // targeted Go version. Examples include insecure - // cryptography or inherently broken APIs. - // - // We always want to flag these. - case knowledge.DeprecatedUseNoLonger: - // This should no longer be used. Using it with - // older Go versions might still make sense. - if !code.IsGoVersion(pass, std.DeprecatedSince) { - return - } - default: - if std.AlternativeAvailableSince < 0 { - panic(fmt.Sprintf("unhandled case %d", std.AlternativeAvailableSince)) - } - // Look for the first available alternative, not the first - // version something was deprecated in. If a function was - // deprecated in Go 1.6, an alternative has been available - // already in 1.0, and we're targeting 1.2, it still - // makes sense to use the alternative from 1.0, to be - // future-proof. - if !code.IsGoVersion(pass, std.AlternativeAvailableSince) { - return - } + // In the past, we made use of the AlternativeAvailableSince field. If a function was deprecated in Go + // 1.6 and an alternative had been available in Go 1.0, then we'd recommend using the alternative even + // if targeting Go 1.2. The idea was to suggest writing future-proof code by using already-existing + // alternatives. This had a major flaw, however: the user would need to use at least Go 1.6 for + // Staticcheck to know that the function had been deprecated. Thus, targeting Go 1.2 and using Go 1.2 + // would behave differently from targeting Go 1.2 and using Go 1.6. This is especially a problem if the + // user tries to ignore the warning. Depending on the Go version in use, the ignore directive may or may + // not match, causing a warning of its own. + // + // To avoid this issue, we no longer try to be smart. We now only compare the targeted version against + // the version that deprecated an object. + // + // Unfortunately, this issue also applies to AlternativeAvailableSince == DeprecatedNeverUse. Even though it + // is only applied to seriously flawed API, such as broken cryptography, users may wish to ignore those + // warnings. + // + // See also https://staticcheck.io/issues/1318. + if !code.IsGoVersion(pass, std.DeprecatedSince) { + return } } @@ -3166,6 +3173,8 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { if fn, ok := node.(*ast.FuncDecl); ok { tfn = pass.TypesInfo.ObjectOf(fn.Name) } + + // FIXME(dh): this misses dot-imported objects sel, ok := node.(*ast.SelectorExpr) if !ok { return true @@ -3173,13 +3182,31 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { obj := pass.TypesInfo.ObjectOf(sel.Sel) if obj_, ok := obj.(*types.Func); ok { - obj = typeparams.OriginMethod(obj_) + obj = obj_.Origin() } if obj.Pkg() == nil { return true } - if pass.Pkg == obj.Pkg() || obj.Pkg().Path()+"_test" == pass.Pkg.Path() { - // Don't flag stuff in our own package + + if obj.Pkg() == pass.Pkg { + // A package is allowed to use its own deprecated objects + return true + } + + // A package "foo" has two related packages "foo_test" and "foo.test", for external tests and the package main + // generated by 'go test' respectively. "foo_test" can import and use "foo", "foo.test" imports and uses "foo" + // and "foo_test". + + if strings.TrimSuffix(pass.Pkg.Path(), "_test") == obj.Pkg().Path() { + // foo_test (the external tests of foo) can use objects from foo. + return true + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == obj.Pkg().Path() { + // foo.test (the main package of foo's tests) can use objects from foo. + return true + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == strings.TrimSuffix(obj.Pkg().Path(), "_test") { + // foo.test (the main package of foo's tests) can use objects from foo's external tests. return true } @@ -3208,6 +3235,19 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { } } + if strings.TrimSuffix(pass.Pkg.Path(), "_test") == path { + // foo_test can import foo + return + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == path { + // foo.test can import foo + return + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == strings.TrimSuffix(path, "_test") { + // foo.test can import foo_test + return + } + handleDeprecation(depr, spec.Path, path, path, nil) } } @@ -3421,7 +3461,7 @@ func CheckMapBytesKey(pass *analysis.Pass) (interface{}, error) { } tset := typeutil.NewTypeSet(conv.X.Type()) // If at least one of the types is []byte, then it's more efficient to inline the conversion - if !tset.Any(func(term *typeparams.Term) bool { + if !tset.Any(func(term *types.Term) bool { s, ok := term.Type().Underlying().(*types.Slice) return ok && s.Elem().Underlying() == types.Universe.Lookup("byte").Type() }) { @@ -3996,12 +4036,12 @@ func CheckImpossibleTypeAssertion(pass *analysis.Pass) (interface{}, error) { ms := msc.MethodSet(left) for i := 0; i < righti.NumMethods(); i++ { - mr := righti.Method(i) + mr := righti.Method(i).Origin() sel := ms.Lookup(mr.Pkg(), mr.Name()) if sel == nil { continue } - ml := sel.Obj().(*types.Func) + ml := sel.Obj().(*types.Func).Origin() if types.AssignableTo(ml.Type(), mr.Type()) { continue } @@ -4135,7 +4175,7 @@ func CheckMaybeNil(pass *analysis.Pass) (interface{}, error) { ptr = instr.Addr case *ir.IndexAddr: ptr = instr.X - if typeutil.All(ptr.Type(), func(term *typeparams.Term) bool { + if typeutil.All(ptr.Type(), func(term *types.Term) bool { if _, ok := term.Type().Underlying().(*types.Slice); ok { return true } diff --git a/vendor/honnef.co/go/tools/stylecheck/lint.go b/vendor/honnef.co/go/tools/stylecheck/lint.go index 4203c610fa..a15e6d97ac 100644 --- a/vendor/honnef.co/go/tools/stylecheck/lint.go +++ b/vendor/honnef.co/go/tools/stylecheck/lint.go @@ -24,7 +24,6 @@ import ( "honnef.co/go/tools/internal/passes/buildir" "honnef.co/go/tools/pattern" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -417,9 +416,9 @@ func CheckErrorStrings(pass *analysis.Pass) (interface{}, error) { continue } for _, c := range word[n:] { - if unicode.IsUpper(c) { - // Word is probably an initialism or - // multi-word function name + if unicode.IsUpper(c) || unicode.IsDigit(c) { + // Word is probably an initialism or multi-word function name. Digits cover elliptic curves like + // P384. continue instrLoop } } @@ -846,7 +845,7 @@ func CheckExportedFunctionDocs(pass *analysis.Pass) (interface{}, error) { switch T := T.(type) { case *ast.IndexExpr: ident = T.X.(*ast.Ident) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: ident = T.X.(*ast.Ident) case *ast.Ident: ident = T diff --git a/vendor/honnef.co/go/tools/stylecheck/names.go b/vendor/honnef.co/go/tools/stylecheck/names.go index f038d0632f..d37bf6baf6 100644 --- a/vendor/honnef.co/go/tools/stylecheck/names.go +++ b/vendor/honnef.co/go/tools/stylecheck/names.go @@ -114,7 +114,11 @@ func CheckNames(pass *analysis.Pass) (interface{}, error) { return } - if code.IsInTest(pass, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + if code.IsInTest(pass, v) && + (strings.HasPrefix(v.Name.Name, "Example") || + strings.HasPrefix(v.Name.Name, "Test") || + strings.HasPrefix(v.Name.Name, "Benchmark") || + strings.HasPrefix(v.Name.Name, "Fuzz")) { return } diff --git a/vendor/honnef.co/go/tools/unused/edge.go b/vendor/honnef.co/go/tools/unused/edge.go deleted file mode 100644 index 6d32946d29..0000000000 --- a/vendor/honnef.co/go/tools/unused/edge.go +++ /dev/null @@ -1,59 +0,0 @@ -package unused - -//go:generate go run golang.org/x/tools/cmd/stringer@master -type edgeKind -type edgeKind uint64 - -func (e edgeKind) is(o edgeKind) bool { - return e&o != 0 -} - -const ( - edgeAlias edgeKind = 1 << iota - edgeBlankField - edgeAnonymousStruct - edgeCgoExported - edgeConstGroup - edgeElementType - edgeEmbeddedInterface - edgeExportedConstant - edgeExportedField - edgeExportedFunction - edgeExportedMethod - edgeExportedType - edgeExportedVariable - edgeExtendsExportedFields - edgeExtendsExportedMethodSet - edgeFieldAccess - edgeFunctionArgument - edgeFunctionResult - edgeFunctionSignature - edgeImplements - edgeInstructionOperand - edgeInterfaceCall - edgeInterfaceMethod - edgeKeyType - edgeLinkname - edgeMainFunction - edgeNamedType - edgeNetRPCRegister - edgeNoCopySentinel - edgeProvidesMethod - edgeReceiver - edgeRuntimeFunction - edgeSignature - edgeStructConversion - edgeTestSink - edgeTupleElement - edgeType - edgeTypeName - edgeUnderlyingType - edgePointerType - edgeUnsafeConversion - edgeUsedConstant - edgeVarDecl - edgeIgnored - edgeSamePointer - edgeTypeParam - edgeTypeArg - edgeUnionTerm -) diff --git a/vendor/honnef.co/go/tools/unused/edgekind_string.go b/vendor/honnef.co/go/tools/unused/edgekind_string.go deleted file mode 100644 index ae27b2507d..0000000000 --- a/vendor/honnef.co/go/tools/unused/edgekind_string.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by "stringer -type edgeKind"; DO NOT EDIT. - -package unused - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[edgeAlias-1] - _ = x[edgeBlankField-2] - _ = x[edgeAnonymousStruct-4] - _ = x[edgeCgoExported-8] - _ = x[edgeConstGroup-16] - _ = x[edgeElementType-32] - _ = x[edgeEmbeddedInterface-64] - _ = x[edgeExportedConstant-128] - _ = x[edgeExportedField-256] - _ = x[edgeExportedFunction-512] - _ = x[edgeExportedMethod-1024] - _ = x[edgeExportedType-2048] - _ = x[edgeExportedVariable-4096] - _ = x[edgeExtendsExportedFields-8192] - _ = x[edgeExtendsExportedMethodSet-16384] - _ = x[edgeFieldAccess-32768] - _ = x[edgeFunctionArgument-65536] - _ = x[edgeFunctionResult-131072] - _ = x[edgeFunctionSignature-262144] - _ = x[edgeImplements-524288] - _ = x[edgeInstructionOperand-1048576] - _ = x[edgeInterfaceCall-2097152] - _ = x[edgeInterfaceMethod-4194304] - _ = x[edgeKeyType-8388608] - _ = x[edgeLinkname-16777216] - _ = x[edgeMainFunction-33554432] - _ = x[edgeNamedType-67108864] - _ = x[edgeNetRPCRegister-134217728] - _ = x[edgeNoCopySentinel-268435456] - _ = x[edgeProvidesMethod-536870912] - _ = x[edgeReceiver-1073741824] - _ = x[edgeRuntimeFunction-2147483648] - _ = x[edgeSignature-4294967296] - _ = x[edgeStructConversion-8589934592] - _ = x[edgeTestSink-17179869184] - _ = x[edgeTupleElement-34359738368] - _ = x[edgeType-68719476736] - _ = x[edgeTypeName-137438953472] - _ = x[edgeUnderlyingType-274877906944] - _ = x[edgePointerType-549755813888] - _ = x[edgeUnsafeConversion-1099511627776] - _ = x[edgeUsedConstant-2199023255552] - _ = x[edgeVarDecl-4398046511104] - _ = x[edgeIgnored-8796093022208] - _ = x[edgeSamePointer-17592186044416] - _ = x[edgeTypeParam-35184372088832] - _ = x[edgeTypeArg-70368744177664] - _ = x[edgeUnionTerm-140737488355328] -} - -const _edgeKind_name = "edgeAliasedgeBlankFieldedgeAnonymousStructedgeCgoExportededgeConstGroupedgeElementTypeedgeEmbeddedInterfaceedgeExportedConstantedgeExportedFieldedgeExportedFunctionedgeExportedMethodedgeExportedTypeedgeExportedVariableedgeExtendsExportedFieldsedgeExtendsExportedMethodSetedgeFieldAccessedgeFunctionArgumentedgeFunctionResultedgeFunctionSignatureedgeImplementsedgeInstructionOperandedgeInterfaceCalledgeInterfaceMethodedgeKeyTypeedgeLinknameedgeMainFunctionedgeNamedTypeedgeNetRPCRegisteredgeNoCopySentineledgeProvidesMethodedgeReceiveredgeRuntimeFunctionedgeSignatureedgeStructConversionedgeTestSinkedgeTupleElementedgeTypeedgeTypeNameedgeUnderlyingTypeedgePointerTypeedgeUnsafeConversionedgeUsedConstantedgeVarDecledgeIgnorededgeSamePointeredgeTypeParamedgeTypeArgedgeUnionTerm" - -var _edgeKind_map = map[edgeKind]string{ - 1: _edgeKind_name[0:9], - 2: _edgeKind_name[9:23], - 4: _edgeKind_name[23:42], - 8: _edgeKind_name[42:57], - 16: _edgeKind_name[57:71], - 32: _edgeKind_name[71:86], - 64: _edgeKind_name[86:107], - 128: _edgeKind_name[107:127], - 256: _edgeKind_name[127:144], - 512: _edgeKind_name[144:164], - 1024: _edgeKind_name[164:182], - 2048: _edgeKind_name[182:198], - 4096: _edgeKind_name[198:218], - 8192: _edgeKind_name[218:243], - 16384: _edgeKind_name[243:271], - 32768: _edgeKind_name[271:286], - 65536: _edgeKind_name[286:306], - 131072: _edgeKind_name[306:324], - 262144: _edgeKind_name[324:345], - 524288: _edgeKind_name[345:359], - 1048576: _edgeKind_name[359:381], - 2097152: _edgeKind_name[381:398], - 4194304: _edgeKind_name[398:417], - 8388608: _edgeKind_name[417:428], - 16777216: _edgeKind_name[428:440], - 33554432: _edgeKind_name[440:456], - 67108864: _edgeKind_name[456:469], - 134217728: _edgeKind_name[469:487], - 268435456: _edgeKind_name[487:505], - 536870912: _edgeKind_name[505:523], - 1073741824: _edgeKind_name[523:535], - 2147483648: _edgeKind_name[535:554], - 4294967296: _edgeKind_name[554:567], - 8589934592: _edgeKind_name[567:587], - 17179869184: _edgeKind_name[587:599], - 34359738368: _edgeKind_name[599:615], - 68719476736: _edgeKind_name[615:623], - 137438953472: _edgeKind_name[623:635], - 274877906944: _edgeKind_name[635:653], - 549755813888: _edgeKind_name[653:668], - 1099511627776: _edgeKind_name[668:688], - 2199023255552: _edgeKind_name[688:704], - 4398046511104: _edgeKind_name[704:715], - 8796093022208: _edgeKind_name[715:726], - 17592186044416: _edgeKind_name[726:741], - 35184372088832: _edgeKind_name[741:754], - 70368744177664: _edgeKind_name[754:765], - 140737488355328: _edgeKind_name[765:778], -} - -func (i edgeKind) String() string { - if str, ok := _edgeKind_map[i]; ok { - return str - } - return "edgeKind(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/honnef.co/go/tools/unused/implements.go b/vendor/honnef.co/go/tools/unused/implements.go index f62018572b..2a202c6d73 100644 --- a/vendor/honnef.co/go/tools/unused/implements.go +++ b/vendor/honnef.co/go/tools/unused/implements.go @@ -37,7 +37,7 @@ func sameId(obj types.Object, pkg *types.Package, name string) bool { return pkg.Path() == obj.Pkg().Path() } -func (g *graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { +func implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { // fast path for common case if T.Empty() { return nil, true diff --git a/vendor/honnef.co/go/tools/unused/runtime.go b/vendor/honnef.co/go/tools/unused/runtime.go new file mode 100644 index 0000000000..11be4a34af --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/runtime.go @@ -0,0 +1,331 @@ +package unused + +// Functions defined in the Go runtime that may be called through +// compiler magic or via assembly. +var runtimeFuncs = map[string]bool{ + // Copied from cmd/compile/internal/typecheck/builtin.go, var runtimeDecls + "newobject": true, + "panicindex": true, + "panicslice": true, + "panicdivide": true, + "panicmakeslicelen": true, + "throwinit": true, + "panicwrap": true, + "gopanic": true, + "gorecover": true, + "goschedguarded": true, + "printbool": true, + "printfloat": true, + "printint": true, + "printhex": true, + "printuint": true, + "printcomplex": true, + "printstring": true, + "printpointer": true, + "printiface": true, + "printeface": true, + "printslice": true, + "printnl": true, + "printsp": true, + "printlock": true, + "printunlock": true, + "concatstring2": true, + "concatstring3": true, + "concatstring4": true, + "concatstring5": true, + "concatstrings": true, + "cmpstring": true, + "intstring": true, + "slicebytetostring": true, + "slicebytetostringtmp": true, + "slicerunetostring": true, + "stringtoslicebyte": true, + "stringtoslicerune": true, + "slicecopy": true, + "slicestringcopy": true, + "decoderune": true, + "countrunes": true, + "convI2I": true, + "convT16": true, + "convT32": true, + "convT64": true, + "convTstring": true, + "convTslice": true, + "convT2E": true, + "convT2Enoptr": true, + "convT2I": true, + "convT2Inoptr": true, + "assertE2I": true, + "assertE2I2": true, + "assertI2I": true, + "assertI2I2": true, + "panicdottypeE": true, + "panicdottypeI": true, + "panicnildottype": true, + "ifaceeq": true, + "efaceeq": true, + "fastrand": true, + "makemap64": true, + "makemap": true, + "makemap_small": true, + "mapaccess1": true, + "mapaccess1_fast32": true, + "mapaccess1_fast64": true, + "mapaccess1_faststr": true, + "mapaccess1_fat": true, + "mapaccess2": true, + "mapaccess2_fast32": true, + "mapaccess2_fast64": true, + "mapaccess2_faststr": true, + "mapaccess2_fat": true, + "mapassign": true, + "mapassign_fast32": true, + "mapassign_fast32ptr": true, + "mapassign_fast64": true, + "mapassign_fast64ptr": true, + "mapassign_faststr": true, + "mapiterinit": true, + "mapdelete": true, + "mapdelete_fast32": true, + "mapdelete_fast64": true, + "mapdelete_faststr": true, + "mapiternext": true, + "mapclear": true, + "makechan64": true, + "makechan": true, + "chanrecv1": true, + "chanrecv2": true, + "chansend1": true, + "closechan": true, + "writeBarrier": true, + "typedmemmove": true, + "typedmemclr": true, + "typedslicecopy": true, + "selectnbsend": true, + "selectnbrecv": true, + "selectnbrecv2": true, + "selectsetpc": true, + "selectgo": true, + "block": true, + "makeslice": true, + "makeslice64": true, + "growslice": true, + "memmove": true, + "memclrNoHeapPointers": true, + "memclrHasPointers": true, + "memequal": true, + "memequal8": true, + "memequal16": true, + "memequal32": true, + "memequal64": true, + "memequal128": true, + "int64div": true, + "uint64div": true, + "int64mod": true, + "uint64mod": true, + "float64toint64": true, + "float64touint64": true, + "float64touint32": true, + "int64tofloat64": true, + "uint64tofloat64": true, + "uint32tofloat64": true, + "complex128div": true, + "racefuncenter": true, + "racefuncenterfp": true, + "racefuncexit": true, + "raceread": true, + "racewrite": true, + "racereadrange": true, + "racewriterange": true, + "msanread": true, + "msanwrite": true, + "x86HasPOPCNT": true, + "x86HasSSE41": true, + "arm64HasATOMICS": true, + "mallocgc": true, + "panicshift": true, + "panicmakeslicecap": true, + "goPanicIndex": true, + "goPanicIndexU": true, + "goPanicSliceAlen": true, + "goPanicSliceAlenU": true, + "goPanicSliceAcap": true, + "goPanicSliceAcapU": true, + "goPanicSliceB": true, + "goPanicSliceBU": true, + "goPanicSlice3Alen": true, + "goPanicSlice3AlenU": true, + "goPanicSlice3Acap": true, + "goPanicSlice3AcapU": true, + "goPanicSlice3B": true, + "goPanicSlice3BU": true, + "goPanicSlice3C": true, + "goPanicSlice3CU": true, + "goPanicSliceConvert": true, + "printuintptr": true, + "convT": true, + "convTnoptr": true, + "makeslicecopy": true, + "unsafeslicecheckptr": true, + "panicunsafeslicelen": true, + "panicunsafeslicenilptr": true, + "unsafestringcheckptr": true, + "panicunsafestringlen": true, + "panicunsafestringnilptr": true, + "mulUintptr": true, + "memequal0": true, + "f32equal": true, + "f64equal": true, + "c64equal": true, + "c128equal": true, + "strequal": true, + "interequal": true, + "nilinterequal": true, + "memhash": true, + "memhash0": true, + "memhash8": true, + "memhash16": true, + "memhash32": true, + "memhash64": true, + "memhash128": true, + "f32hash": true, + "f64hash": true, + "c64hash": true, + "c128hash": true, + "strhash": true, + "interhash": true, + "nilinterhash": true, + "int64tofloat32": true, + "uint64tofloat32": true, + "getcallerpc": true, + "getcallersp": true, + "msanmove": true, + "asanread": true, + "asanwrite": true, + "checkptrAlignment": true, + "checkptrArithmetic": true, + "libfuzzerTraceCmp1": true, + "libfuzzerTraceCmp2": true, + "libfuzzerTraceCmp4": true, + "libfuzzerTraceCmp8": true, + "libfuzzerTraceConstCmp1": true, + "libfuzzerTraceConstCmp2": true, + "libfuzzerTraceConstCmp4": true, + "libfuzzerTraceConstCmp8": true, + "libfuzzerHookStrCmp": true, + "libfuzzerHookEqualFold": true, + "addCovMeta": true, + "x86HasFMA": true, + "armHasVFPv4": true, + + // Extracted from assembly code in the standard library, with the exception of the runtime package itself + "abort": true, + "aeshashbody": true, + "args": true, + "asminit": true, + "badctxt": true, + "badmcall2": true, + "badmcall": true, + "badmorestackg0": true, + "badmorestackgsignal": true, + "badsignal2": true, + "callbackasm1": true, + "callCfunction": true, + "cgocallback_gofunc": true, + "cgocallbackg": true, + "checkgoarm": true, + "check": true, + "debugCallCheck": true, + "debugCallWrap": true, + "emptyfunc": true, + "entersyscall": true, + "exit": true, + "exits": true, + "exitsyscall": true, + "externalthreadhandler": true, + "findnull": true, + "goexit1": true, + "gostring": true, + "i386_set_ldt": true, + "_initcgo": true, + "init_thread_tls": true, + "ldt0setup": true, + "libpreinit": true, + "load_g": true, + "morestack": true, + "mstart": true, + "nacl_sysinfo": true, + "nanotimeQPC": true, + "nanotime": true, + "newosproc0": true, + "newproc": true, + "newstack": true, + "noted": true, + "nowQPC": true, + "osinit": true, + "printf": true, + "racecallback": true, + "reflectcallmove": true, + "reginit": true, + "rt0_go": true, + "save_g": true, + "schedinit": true, + "setldt": true, + "settls": true, + "sighandler": true, + "sigprofNonGo": true, + "sigtrampgo": true, + "_sigtramp": true, + "sigtramp": true, + "stackcheck": true, + "syscall_chdir": true, + "syscall_chroot": true, + "syscall_close": true, + "syscall_dup2": true, + "syscall_execve": true, + "syscall_exit": true, + "syscall_fcntl": true, + "syscall_forkx": true, + "syscall_gethostname": true, + "syscall_getpid": true, + "syscall_ioctl": true, + "syscall_pipe": true, + "syscall_rawsyscall6": true, + "syscall_rawSyscall6": true, + "syscall_rawsyscall": true, + "syscall_RawSyscall": true, + "syscall_rawsysvicall6": true, + "syscall_setgid": true, + "syscall_setgroups": true, + "syscall_setpgid": true, + "syscall_setsid": true, + "syscall_setuid": true, + "syscall_syscall6": true, + "syscall_syscall": true, + "syscall_Syscall": true, + "syscall_sysvicall6": true, + "syscall_wait4": true, + "syscall_write": true, + "traceback": true, + "tstart": true, + "usplitR0": true, + "wbBufFlush": true, + "write": true, + + // Other runtime functions that can get called in non-standard ways + "bgsweep": true, + "memhash_varlen": true, + "strhashFallback": true, + "asanregisterglobals": true, + "cgoUse": true, + "cgoCheckPointer": true, + "cgoCheckResult": true, + "_cgo_panic_internal": true, + "addExitHook": true, +} + +var runtimeCoverageFuncs = map[string]bool{ + "initHook": true, + "markProfileEmitted": true, + "processCoverTestDir": true, +} diff --git a/vendor/honnef.co/go/tools/unused/serialize.go b/vendor/honnef.co/go/tools/unused/serialize.go new file mode 100644 index 0000000000..126e7400aa --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/serialize.go @@ -0,0 +1,99 @@ +package unused + +import ( + "fmt" + "go/token" + "os" + + "golang.org/x/tools/go/types/objectpath" +) + +type ObjectPath struct { + PkgPath string + ObjPath objectpath.Path +} + +// XXX make sure that node 0 always exists and is always the root + +type SerializedGraph struct { + nodes []Node + nodesByPath map[ObjectPath]NodeID + // XXX deduplicating on position is dubious for `switch x := foo.(type)`, where x will be declared many times for + // the different types, but all at the same position. On the other hand, merging these nodes is probably fine. + nodesByPosition map[token.Position]NodeID +} + +func trace(f string, args ...interface{}) { + fmt.Fprintf(os.Stderr, f, args...) + fmt.Fprintln(os.Stderr) +} + +func (g *SerializedGraph) Merge(nodes []Node) { + if g.nodesByPath == nil { + g.nodesByPath = map[ObjectPath]NodeID{} + } + if g.nodesByPosition == nil { + g.nodesByPosition = map[token.Position]NodeID{} + } + if len(g.nodes) == 0 { + // Seed nodes with a root node + g.nodes = append(g.nodes, Node{}) + } + // OPT(dh): reuse storage between calls to Merge + remapping := make([]NodeID, len(nodes)) + + // First pass: compute remapping of IDs of to-be-merged nodes + for _, n := range nodes { + // XXX Column is never 0. it's 1 if there is no column information in the export data. which sucks, because + // objects can also genuinely be in column 1. + if n.id != 0 && n.obj.Path == (ObjectPath{}) && n.obj.Position.Column == 0 { + // If the object has no path, then it couldn't have come from export data, which means it needs to have full + // position information including a column. + panic(fmt.Sprintf("object %q has no path but also no column information", n.obj.Name)) + } + + if orig, ok := g.nodesByPath[n.obj.Path]; ok { + // We already have a node for this object + trace("deduplicating %d -> %d based on path %s", n.id, orig, n.obj.Path) + remapping[n.id] = orig + } else if orig, ok := g.nodesByPosition[n.obj.Position]; ok && n.obj.Position.Column != 0 { + // We already have a node for this object + trace("deduplicating %d -> %d based on position %s", n.id, orig, n.obj.Position) + remapping[n.id] = orig + } else { + // This object is new to us; change ID to avoid collision + newID := NodeID(len(g.nodes)) + trace("new node, remapping %d -> %d", n.id, newID) + remapping[n.id] = newID + g.nodes = append(g.nodes, Node{ + id: newID, + obj: n.obj, + uses: make([]NodeID, 0, len(n.uses)), + owns: make([]NodeID, 0, len(n.owns)), + }) + if n.id == 0 { + // Our root uses all the roots of the subgraphs + g.nodes[0].uses = append(g.nodes[0].uses, newID) + } + if n.obj.Path != (ObjectPath{}) { + g.nodesByPath[n.obj.Path] = newID + } + if n.obj.Position.Column != 0 { + g.nodesByPosition[n.obj.Position] = newID + } + } + } + + // Second step: apply remapping + for _, n := range nodes { + n.id = remapping[n.id] + for i := range n.uses { + n.uses[i] = remapping[n.uses[i]] + } + for i := range n.owns { + n.owns[i] = remapping[n.owns[i]] + } + g.nodes[n.id].uses = append(g.nodes[n.id].uses, n.uses...) + g.nodes[n.id].owns = append(g.nodes[n.id].owns, n.owns...) + } +} diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go index f703ed3d1f..edd2630759 100644 --- a/vendor/honnef.co/go/tools/unused/unused.go +++ b/vendor/honnef.co/go/tools/unused/unused.go @@ -1,8 +1,6 @@ // Package unused contains code for finding unused code. package unused -// TODO(dh): don't add instantiated types/methods to the graph. add the origin types/methods. - import ( "fmt" "go/ast" @@ -12,66 +10,60 @@ import ( "reflect" "strings" - "honnef.co/go/tools/analysis/code" "honnef.co/go/tools/analysis/facts/directives" "honnef.co/go/tools/analysis/facts/generated" "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/analysis/report" "honnef.co/go/tools/go/ast/astutil" - "honnef.co/go/tools/go/ir" "honnef.co/go/tools/go/types/typeutil" - "honnef.co/go/tools/internal/passes/buildir" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/objectpath" ) -var Debug io.Writer +// OPT(dh): don't track local variables that can't have any interesting outgoing edges. For example, using a local +// variable of type int is meaningless; we don't care if `int` is used or not. +// +// Note that we do have to track variables with for example array types, because the array type could have involved a +// named constant. +// +// We probably have different culling needs depending on the mode of operation, too. If we analyze multiple packages in +// one graph (unused's "whole program" mode), we could remove further useless edges (e.g. into nodes that themselves +// have no outgoing edges and aren't meaningful objects on their own) after having analyzed a package, to keep the +// in-memory representation small on average. If we only analyze a single package, that step would just waste cycles, as +// we're about to throw the entire graph away, anyway. -// The graph we construct omits nodes along a path that do not -// contribute any new information to the solution. For example, the -// full graph for a function with a receiver would be Func -> -// Signature -> Var -> Type. However, since signatures cannot be -// unused, and receivers are always considered used, we can compact -// the graph down to Func -> Type. This makes the graph smaller, but -// harder to debug. +// TODO(dh): currently, types use methods that implement interfaces. However, this makes a method used even if the +// relevant interface is never used. What if instead interfaces used those methods? Right now we cannot do that, because +// methods use their receivers, so using a method uses the type. But do we need that edge? Is there a way to refer to a +// method without explicitly mentioning the type somewhere? If not, the edge from method to receiver is superfluous. -// TODO(dh): conversions between structs mark fields as used, but the -// conversion itself isn't part of that subgraph. even if the function -// containing the conversion is unused, the fields will be marked as -// used. +// XXX vet all code for proper use of core types // TODO(dh): we cannot observe function calls in assembly files. /* +This overview is true when using the default options. Different options may change individual behaviors. + - packages use: - (1.1) exported named types - - (1.2) exported functions + - (1.2) exported functions (but not methods!) - (1.3) exported variables - (1.4) exported constants - (1.5) init functions - (1.6) functions exported to cgo - (1.7) the main function iff in the main package - (1.8) symbols linked via go:linkname + - (1.9) objects in generated files - named types use: - (2.1) exported methods - (2.2) the type they're based on - - (2.3) all their aliases. we can't easily track uses of aliases - because go/types turns them into uses of the aliased types. assume - that if a type is used, so are all of its aliases. - - (2.4) the pointer type. this aids with eagerly implementing - interfaces. if a method that implements an interface is defined on - a pointer receiver, and the pointer type is never used, but the - named type is, then we still want to mark the method as used. - (2.5) all their type parameters. Unused type parameters are probably useless, but they're a brand new feature and we don't want to introduce false positives because we couldn't anticipate some novel use-case. - (2.6) all their type arguments -- variables and constants use: - - their types - - functions use: - (4.1) all their arguments, return parameters and receivers - (4.2) anonymous functions defined beneath them @@ -80,11 +72,14 @@ var Debug io.Writer that way we don't have to keep track of closures escaping functions. - (4.4) functions they return. we assume that someone else will call the returned function - (4.5) functions/interface methods they call - - types they instantiate or convert to + - (4.6) types they instantiate or convert to - (4.7) fields they access - - (4.8) types of all instructions - (4.9) package-level variables they assign to iff in tests (sinks for benchmarks) - (4.10) all their type parameters. See 2.5 for reasoning. + - (4.11) local variables + - Note that the majority of this is handled implicitly by seeing idents be used. In particular, unlike the old + IR-based implementation, the AST-based one doesn't care about closures, bound methods or anonymous functions. + They're all just additional nodes in the AST. - conversions use: - (5.1) when converting between two equivalent structs, the fields in @@ -107,7 +102,7 @@ var Debug io.Writer - (8.1) We do not technically care about interfaces that only consist of exported methods. Exported methods on concrete types are always marked as used. - - Any concrete type implements all known interfaces. Even if it isn't + - (8.2) Any concrete type implements all known interfaces. Even if it isn't assigned to any interfaces in our code, the user may receive a value of the type and expect to pass it back to us through an interface. @@ -126,20 +121,22 @@ var Debug io.Writer used by 8.3 just because it contributes A's methods to C. - Inherent uses: - - thunks and other generated wrappers call the real function - (9.2) variables use their types - (9.3) types use their underlying and element types - (9.4) conversions use the type they convert to - - (9.5) instructions use their operands - - (9.6) instructions use their operands' types - (9.7) variable _reads_ use variables, writes do not, except in tests - (9.8) runtime functions that may be called from user code via the compiler + - (9.9) objects named the blank identifier are used. They cannot be referred to and are usually used explicitly to + use something that would otherwise be unused. + - The majority of idents get marked as read by virtue of being in the AST. - const groups: - (10.1) if one constant out of a block of constants is used, mark all - of them used. a lot of the time, unused constants exist for the sake - of completeness. See also - https://github.com/dominikh/go-tools/issues/365 + - (10.1) if one constant out of a block of constants is used, mark all + of them used. a lot of the time, unused constants exist for the sake + of completeness. See also + https://github.com/dominikh/go-tools/issues/365 + + Do not, however, include constants named _ in constant groups. - (11.1) anonymous struct types use all their fields. we cannot @@ -155,274 +152,19 @@ var Debug io.Writer */ +var Debug io.Writer + func assert(b bool) { if !b { panic("failed assertion") } } -// /usr/lib/go/src/runtime/proc.go:433:6: func badmorestackg0 is unused (U1000) - -// Functions defined in the Go runtime that may be called through -// compiler magic or via assembly. -var runtimeFuncs = map[string]bool{ - // The first part of the list is copied from - // cmd/compile/internal/gc/builtin.go, var runtimeDecls - "newobject": true, - "panicindex": true, - "panicslice": true, - "panicdivide": true, - "panicmakeslicelen": true, - "throwinit": true, - "panicwrap": true, - "gopanic": true, - "gorecover": true, - "goschedguarded": true, - "printbool": true, - "printfloat": true, - "printint": true, - "printhex": true, - "printuint": true, - "printcomplex": true, - "printstring": true, - "printpointer": true, - "printiface": true, - "printeface": true, - "printslice": true, - "printnl": true, - "printsp": true, - "printlock": true, - "printunlock": true, - "concatstring2": true, - "concatstring3": true, - "concatstring4": true, - "concatstring5": true, - "concatstrings": true, - "cmpstring": true, - "intstring": true, - "slicebytetostring": true, - "slicebytetostringtmp": true, - "slicerunetostring": true, - "stringtoslicebyte": true, - "stringtoslicerune": true, - "slicecopy": true, - "slicestringcopy": true, - "decoderune": true, - "countrunes": true, - "convI2I": true, - "convT16": true, - "convT32": true, - "convT64": true, - "convTstring": true, - "convTslice": true, - "convT2E": true, - "convT2Enoptr": true, - "convT2I": true, - "convT2Inoptr": true, - "assertE2I": true, - "assertE2I2": true, - "assertI2I": true, - "assertI2I2": true, - "panicdottypeE": true, - "panicdottypeI": true, - "panicnildottype": true, - "ifaceeq": true, - "efaceeq": true, - "fastrand": true, - "makemap64": true, - "makemap": true, - "makemap_small": true, - "mapaccess1": true, - "mapaccess1_fast32": true, - "mapaccess1_fast64": true, - "mapaccess1_faststr": true, - "mapaccess1_fat": true, - "mapaccess2": true, - "mapaccess2_fast32": true, - "mapaccess2_fast64": true, - "mapaccess2_faststr": true, - "mapaccess2_fat": true, - "mapassign": true, - "mapassign_fast32": true, - "mapassign_fast32ptr": true, - "mapassign_fast64": true, - "mapassign_fast64ptr": true, - "mapassign_faststr": true, - "mapiterinit": true, - "mapdelete": true, - "mapdelete_fast32": true, - "mapdelete_fast64": true, - "mapdelete_faststr": true, - "mapiternext": true, - "mapclear": true, - "makechan64": true, - "makechan": true, - "chanrecv1": true, - "chanrecv2": true, - "chansend1": true, - "closechan": true, - "writeBarrier": true, - "typedmemmove": true, - "typedmemclr": true, - "typedslicecopy": true, - "selectnbsend": true, - "selectnbrecv": true, - "selectnbrecv2": true, - "selectsetpc": true, - "selectgo": true, - "block": true, - "makeslice": true, - "makeslice64": true, - "growslice": true, - "memmove": true, - "memclrNoHeapPointers": true, - "memclrHasPointers": true, - "memequal": true, - "memequal8": true, - "memequal16": true, - "memequal32": true, - "memequal64": true, - "memequal128": true, - "int64div": true, - "uint64div": true, - "int64mod": true, - "uint64mod": true, - "float64toint64": true, - "float64touint64": true, - "float64touint32": true, - "int64tofloat64": true, - "uint64tofloat64": true, - "uint32tofloat64": true, - "complex128div": true, - "racefuncenter": true, - "racefuncenterfp": true, - "racefuncexit": true, - "raceread": true, - "racewrite": true, - "racereadrange": true, - "racewriterange": true, - "msanread": true, - "msanwrite": true, - "x86HasPOPCNT": true, - "x86HasSSE41": true, - "arm64HasATOMICS": true, - - // The second part of the list is extracted from assembly code in - // the standard library, with the exception of the runtime package itself - "abort": true, - "aeshashbody": true, - "args": true, - "asminit": true, - "badctxt": true, - "badmcall2": true, - "badmcall": true, - "badmorestackg0": true, - "badmorestackgsignal": true, - "badsignal2": true, - "callbackasm1": true, - "callCfunction": true, - "cgocallback_gofunc": true, - "cgocallbackg": true, - "checkgoarm": true, - "check": true, - "debugCallCheck": true, - "debugCallWrap": true, - "emptyfunc": true, - "entersyscall": true, - "exit": true, - "exits": true, - "exitsyscall": true, - "externalthreadhandler": true, - "findnull": true, - "goexit1": true, - "gostring": true, - "i386_set_ldt": true, - "_initcgo": true, - "init_thread_tls": true, - "ldt0setup": true, - "libpreinit": true, - "load_g": true, - "morestack": true, - "mstart": true, - "nacl_sysinfo": true, - "nanotimeQPC": true, - "nanotime": true, - "newosproc0": true, - "newproc": true, - "newstack": true, - "noted": true, - "nowQPC": true, - "osinit": true, - "printf": true, - "racecallback": true, - "reflectcallmove": true, - "reginit": true, - "rt0_go": true, - "save_g": true, - "schedinit": true, - "setldt": true, - "settls": true, - "sighandler": true, - "sigprofNonGo": true, - "sigtrampgo": true, - "_sigtramp": true, - "sigtramp": true, - "stackcheck": true, - "syscall_chdir": true, - "syscall_chroot": true, - "syscall_close": true, - "syscall_dup2": true, - "syscall_execve": true, - "syscall_exit": true, - "syscall_fcntl": true, - "syscall_forkx": true, - "syscall_gethostname": true, - "syscall_getpid": true, - "syscall_ioctl": true, - "syscall_pipe": true, - "syscall_rawsyscall6": true, - "syscall_rawSyscall6": true, - "syscall_rawsyscall": true, - "syscall_RawSyscall": true, - "syscall_rawsysvicall6": true, - "syscall_setgid": true, - "syscall_setgroups": true, - "syscall_setpgid": true, - "syscall_setsid": true, - "syscall_setuid": true, - "syscall_syscall6": true, - "syscall_syscall": true, - "syscall_Syscall": true, - "syscall_sysvicall6": true, - "syscall_wait4": true, - "syscall_write": true, - "traceback": true, - "tstart": true, - "usplitR0": true, - "wbBufFlush": true, - "write": true, -} - -type pkg struct { - Fset *token.FileSet - Files []*ast.File - Pkg *types.Package - TypesInfo *types.Info - TypesSizes types.Sizes - IR *ir.Package - SrcFuncs []*ir.Function - Directives []lint.Directive -} - // TODO(dh): should we return a map instead of two slices? type Result struct { - Used []types.Object - Unused []types.Object -} - -type SerializedResult struct { - Used []SerializedObject - Unused []SerializedObject + Used []Object + Unused []Object + Quiet []Object } var Analyzer = &lint.Analyzer{ @@ -433,483 +175,326 @@ var Analyzer = &lint.Analyzer{ Name: "U1000", Doc: "Unused code", Run: run, - Requires: []*analysis.Analyzer{buildir.Analyzer, generated.Analyzer, directives.Analyzer}, + Requires: []*analysis.Analyzer{generated.Analyzer, directives.Analyzer}, ResultType: reflect.TypeOf(Result{}), }, } -type SerializedObject struct { - Name string - Position token.Position - DisplayPosition token.Position - Kind string - InGenerated bool -} - -func typString(obj types.Object) string { - switch obj := obj.(type) { - case *types.Func: - return "func" - case *types.Var: - if obj.IsField() { - return "field" - } - return "var" - case *types.Const: - return "const" - case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); ok { - return "type param" - } else { - return "type" - } - default: - return "identifier" - } +func newGraph( + fset *token.FileSet, + files []*ast.File, + pkg *types.Package, + info *types.Info, + directives []lint.Directive, + generated map[string]generated.Generator, + opts Options, +) *graph { + g := graph{ + pkg: pkg, + info: info, + files: files, + directives: directives, + generated: generated, + fset: fset, + nodes: []Node{{}}, + edges: map[edge]struct{}{}, + objects: map[types.Object]NodeID{}, + opts: opts, + } + + return &g } -func Serialize(pass *analysis.Pass, res Result, fset *token.FileSet) SerializedResult { - // OPT(dh): there's no point in serializing Used objects that are - // always used, such as exported names, blank identifiers, or - // anonymous struct fields. Used only exists to overrule Unused of - // a different package. If something can never be unused, then its - // presence in Used is useless. - // - // I'm not sure if this should happen when serializing, or when - // returning Result. - - out := SerializedResult{ - Used: make([]SerializedObject, len(res.Used)), - Unused: make([]SerializedObject, len(res.Unused)), - } - for i, obj := range res.Used { - out.Used[i] = serializeObject(pass, fset, obj) - } - for i, obj := range res.Unused { - out.Unused[i] = serializeObject(pass, fset, obj) - } - return out -} +func run(pass *analysis.Pass) (interface{}, error) { + g := newGraph( + pass.Fset, + pass.Files, + pass.Pkg, + pass.TypesInfo, + pass.ResultOf[directives.Analyzer].([]lint.Directive), + pass.ResultOf[generated.Analyzer].(map[string]generated.Generator), + DefaultOptions, + ) + g.entry() -func serializeObject(pass *analysis.Pass, fset *token.FileSet, obj types.Object) SerializedObject { - name := obj.Name() - if sig, ok := obj.Type().(*types.Signature); ok && sig.Recv() != nil { - switch sig.Recv().Type().(type) { - case *types.Named, *types.Pointer: - typ := types.TypeString(sig.Recv().Type(), func(*types.Package) string { return "" }) - if len(typ) > 0 && typ[0] == '*' { - name = fmt.Sprintf("(%s).%s", typ, obj.Name()) - } else if len(typ) > 0 { - name = fmt.Sprintf("%s.%s", typ, obj.Name()) - } - } - } - return SerializedObject{ - Name: name, - Position: fset.PositionFor(obj.Pos(), false), - DisplayPosition: report.DisplayPosition(fset, obj.Pos()), - Kind: typString(obj), - InGenerated: code.IsGenerated(pass, obj.Pos()), + sg := &SerializedGraph{ + nodes: g.nodes, } -} -func debugf(f string, v ...interface{}) { if Debug != nil { - fmt.Fprintf(Debug, f, v...) + Debug.Write([]byte(sg.Dot())) } -} -func run(pass *analysis.Pass) (interface{}, error) { - irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR) - dirs := pass.ResultOf[directives.Analyzer].([]lint.Directive) - pkg := &pkg{ - Fset: pass.Fset, - Files: pass.Files, - Pkg: pass.Pkg, - TypesInfo: pass.TypesInfo, - TypesSizes: pass.TypesSizes, - IR: irpkg.Pkg, - SrcFuncs: irpkg.SrcFuncs, - Directives: dirs, - } + return sg.Results(), nil +} - g := newGraph() - g.entry(pkg) - used, unused := results(g) +type Options struct { + FieldWritesAreUses bool + PostStatementsAreReads bool + ExportedIsUsed bool + ExportedFieldsAreUsed bool + ParametersAreUsed bool + LocalVariablesAreUsed bool + GeneratedIsUsed bool +} - if Debug != nil { - debugNode := func(n *node) { - if n.obj == nil { - debugf("n%d [label=\"Root\"];\n", n.id) - } else { - color := "red" - if n.seen { - color = "green" - } - debugf("n%d [label=%q, color=%q];\n", n.id, fmt.Sprintf("(%T) %s", n.obj, n.obj), color) - } - for _, e := range n.used { - for i := edgeKind(1); i < 64; i++ { - if e.kind.is(1 << i) { - debugf("n%d -> n%d [label=%q];\n", n.id, e.node.id, edgeKind(1< 0 && typ[0] == '*' { + name = fmt.Sprintf("(%s).%s", typ, obj.Name()) + } else if len(typ) > 0 { + name = fmt.Sprintf("%s.%s", typ, obj.Name()) + } + } + } + return Object{ + Name: name, + ShortName: obj.Name(), + Kind: typString(obj), + Path: path, + Position: g.fset.PositionFor(obj.Pos(), false), + DisplayPosition: report.DisplayPosition(g.fset, obj.Pos()), } - g.Root = g.newNode(nil) - return g } -func (g *graph) newPointer(typ types.Type) *types.Pointer { - if p, ok := g.pointers[typ]; ok { - return p - } else { - p := types.NewPointer(typ) - g.pointers[typ] = p - g.see(p) - return p +func typString(obj types.Object) string { + switch obj := obj.(type) { + case *types.Func: + return "func" + case *types.Var: + if obj.IsField() { + return "field" + } + return "var" + case *types.Const: + return "const" + case *types.TypeName: + if _, ok := obj.Type().(*types.TypeParam); ok { + return "type param" + } else { + return "type" + } + default: + return "identifier" } } -func (g *graph) color(root *node) { - if root.seen { - return +func (g *graph) newNode(obj types.Object) NodeID { + id := NodeID(len(g.nodes)) + n := Node{ + id: id, + obj: g.objectToObject(obj), } - root.seen = true - for _, e := range root.used { - g.color(e.node) + g.nodes = append(g.nodes, n) + if _, ok := g.objects[obj]; ok { + panic(fmt.Sprintf("already had a node for %s", obj)) } + g.objects[obj] = id + return id } -type constGroup struct { - // give the struct a size to get unique pointers - _ byte -} - -func (constGroup) String() string { return "const group" } - -type edge struct { - node *node - kind edgeKind -} - -type node struct { - obj interface{} - id uint64 - - // OPT(dh): evaluate using a map instead of a slice to avoid - // duplicate edges. - used []edge - - // set during final graph walk if node is reachable - seen bool - quiet bool -} - -func (g *graph) nodeMaybe(obj types.Object) (*node, bool) { - if node, ok := g.Nodes[obj]; ok { - return node, true +func (g *graph) node(obj types.Object) NodeID { + if obj == nil { + return 0 + } + obj = origin(obj) + if n, ok := g.objects[obj]; ok { + return n } - return nil, false + n := g.newNode(obj) + return n } -func (g *graph) node(obj interface{}) (n *node, new bool) { +func origin(obj types.Object) types.Object { switch obj := obj.(type) { - case types.Type: - if v := g.TypeNodes[obj]; v != nil { - return v, false - } - n = g.newNode(obj) - g.TypeNodes[obj] = n - return n, true - case types.Object: - // OPT(dh): the types.Object and default cases are identical - if node, ok := g.Nodes[obj]; ok { - return node, false - } - - n = g.newNode(obj) - g.Nodes[obj] = n - return n, true + case *types.Var: + return obj.Origin() + case *types.Func: + return obj.Origin() default: - if node, ok := g.Nodes[obj]; ok { - return node, false - } - - n = g.newNode(obj) - g.Nodes[obj] = n - return n, true + return obj } } -func (g *graph) newNode(obj interface{}) *node { - g.nodeCounter++ - return &node{ - obj: obj, - id: g.nodeCounter, +func (g *graph) addEdge(e edge) bool { + if _, ok := g.edges[e]; ok { + return false } + g.edges[e] = struct{}{} + return true } -func (n *node) use(n2 *node, kind edgeKind) { - assert(n2 != nil) - n.used = append(n.used, edge{node: n2, kind: kind}) +func (g *graph) addOwned(owner, owned NodeID) { + e := edge{owner, owned, edgeKindOwn} + if !g.addEdge(e) { + return + } + n := &g.nodes[owner] + n.owns = append(n.owns, owned) } -// isIrrelevant reports whether an object's presence in the graph is -// of any relevance. A lot of objects will never have outgoing edges, -// nor meaningful incoming ones. Examples are basic types and empty -// signatures, among many others. -// -// Dropping these objects should have no effect on correctness, but -// may improve performance. It also helps with debugging, as it -// greatly reduces the size of the graph. -func isIrrelevant(obj interface{}) bool { - if obj, ok := obj.(types.Object); ok { - switch obj := obj.(type) { - case *types.Var: - if obj.IsField() { - // We need to track package fields - return false - } - if obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() { - // We need to track package-level variables - return false - } - return isIrrelevant(obj.Type()) - default: - return false - } - } - if T, ok := obj.(types.Type); ok { - switch T := T.(type) { - case *types.Array: - return isIrrelevant(T.Elem()) - case *types.Slice: - return isIrrelevant(T.Elem()) - case *types.Basic: - return true - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - if !isIrrelevant(T.At(i).Type()) { - return false - } - } - return true - case *types.Signature: - if T.Recv() != nil { - return false - } - for i := 0; i < T.Params().Len(); i++ { - if !isIrrelevant(T.Params().At(i)) { - return false - } - } - for i := 0; i < T.Results().Len(); i++ { - if !isIrrelevant(T.Results().At(i)) { - return false - } - } - return true - case *types.Interface: - return T.NumMethods() == 0 && T.NumEmbeddeds() == 0 - case *types.Pointer: - return isIrrelevant(T.Elem()) - case *types.Map: - return isIrrelevant(T.Key()) && isIrrelevant(T.Elem()) - case *types.Struct: - return T.NumFields() == 0 - case *types.Chan: - return isIrrelevant(T.Elem()) - default: - return false - } +func (g *graph) addUse(by, used NodeID) { + e := edge{by, used, edgeKindUse} + if !g.addEdge(e) { + return } - return false + nBy := &g.nodes[by] + nBy.uses = append(nBy.uses, used) } -func (g *graph) see(obj interface{}) *node { - if isIrrelevant(obj) { - return nil +func (g *graph) see(obj, owner types.Object) { + if obj == nil { + panic("saw nil object") } - assert(obj != nil) - - if fn, ok := obj.(*types.Func); ok { - obj = typeparams.OriginMethod(fn) - } - if t, ok := obj.(*types.Named); ok { - obj = typeparams.NamedTypeOrigin(t) + if g.opts.ExportedIsUsed && obj.Pkg() != g.pkg || obj.Pkg() == nil { + return } - // add new node to graph - node, _ := g.node(obj) - - if p, ok := obj.(*types.Pointer); ok { - if pt, ok := g.pointers[p.Elem()]; ok { - // We've used graph.newPointer before we saw this pointer; add an edge that marks the two pointers as being - // identical - if p != pt { - g.use(p, pt, edgeSamePointer) - g.use(pt, p, edgeSamePointer) - } - } else { - g.pointers[p.Elem()] = p - } + nObj := g.node(obj) + if owner != nil { + nOwner := g.node(owner) + g.addOwned(nOwner, nObj) } - - return node } -func (g *graph) use(used, by interface{}, kind edgeKind) { - if isIrrelevant(used) { - return +func isIrrelevant(obj types.Object) bool { + switch obj.(type) { + case *types.PkgName: + return true + default: + return false } +} - assert(used != nil) - if obj, ok := by.(types.Object); ok && obj.Pkg() != nil { - if obj.Pkg() != g.pkg.Pkg { +func (g *graph) use(used, by types.Object) { + if g.opts.ExportedIsUsed { + if used.Pkg() != g.pkg || used.Pkg() == nil { + return + } + if by != nil && by.Pkg() != g.pkg { return } } - if fn, ok := used.(*types.Func); ok { - used = typeparams.OriginMethod(fn) - } - if fn, ok := by.(*types.Func); ok { - by = typeparams.OriginMethod(fn) - } - - if t, ok := used.(*types.Named); ok { - used = typeparams.NamedTypeOrigin(t) - } - if t, ok := by.(*types.Named); ok { - by = typeparams.NamedTypeOrigin(t) - } - - usedNode, new := g.node(used) - assert(!new) - if by == nil { - g.Root.use(usedNode, kind) - } else { - byNode, new := g.node(by) - assert(!new) - byNode.use(usedNode, kind) + if isIrrelevant(used) { + return } -} -func (g *graph) seeAndUse(used, by interface{}, kind edgeKind) *node { - n := g.see(used) - g.use(used, by, kind) - return n + nUsed := g.node(used) + nBy := g.node(by) + g.addUse(nBy, nUsed) } -func (g *graph) entry(pkg *pkg) { - g.pkg = pkg - scopes := map[*types.Scope]*ir.Function{} - for _, fn := range pkg.SrcFuncs { - if fn.Object() != nil { - scope := fn.Object().(*types.Func).Scope() - scopes[scope] = fn - } - } - - for _, f := range pkg.Files { +func (g *graph) entry() { + for _, f := range g.files { for _, cg := range f.Comments { for _, c := range cg.List { if strings.HasPrefix(c.Text, "//go:linkname ") { @@ -921,300 +506,69 @@ func (g *graph) entry(pkg *pkg) { // (1.8) packages use symbols linked via go:linkname fields := strings.Fields(c.Text) if len(fields) == 3 { - if m, ok := pkg.IR.Members[fields[1]]; ok { - var obj types.Object - switch m := m.(type) { - case *ir.Global: - obj = m.Object() - case *ir.Function: - obj = m.Object() - default: - panic(fmt.Sprintf("unhandled type: %T", m)) - } - assert(obj != nil) - g.seeAndUse(obj, nil, edgeLinkname) + obj := g.pkg.Scope().Lookup(fields[1]) + if obj == nil { + continue } + g.use(obj, nil) } } } } } - surroundingFunc := func(obj types.Object) *ir.Function { - scope := obj.Parent() - for scope != nil { - if fn := scopes[scope]; fn != nil { - return fn - } - scope = scope.Parent() + for _, f := range g.files { + for _, decl := range f.Decls { + g.decl(decl, nil) } - return nil } - // IR form won't tell us about locally scoped types that aren't - // being used. Walk the list of Defs to get all named types. - // - // IR form also won't tell us about constants; use Defs and Uses - // to determine which constants exist and which are being used. - for _, obj := range pkg.TypesInfo.Defs { - switch obj := obj.(type) { - case *types.TypeName: - // types are being handled by walking the AST - case *types.Const: - g.see(obj) - fn := surroundingFunc(obj) - if fn == nil && obj.Exported() { - // (1.4) packages use exported constants - g.use(obj, nil, edgeExportedConstant) + if g.opts.GeneratedIsUsed { + // OPT(dh): depending on the options used, we do not need to track all objects. For example, if local variables + // are always used, then it is enough to use their surrounding function. + for obj := range g.objects { + path := g.fset.PositionFor(obj.Pos(), false).Filename + if _, ok := g.generated[path]; ok { + g.use(obj, nil) } - g.typ(obj.Type(), nil) - g.seeAndUse(obj.Type(), obj, edgeType) } } - // Find constants being used inside functions, find sinks in tests - for _, fn := range pkg.SrcFuncs { - if fn.Object() != nil { - g.see(fn.Object()) - } - n := fn.Source() - if n == nil { - continue - } - ast.Inspect(n, func(n ast.Node) bool { - switch n := n.(type) { - case *ast.Ident: - obj, ok := pkg.TypesInfo.Uses[n] - if !ok { - return true - } - switch obj := obj.(type) { - case *types.Const: - g.seeAndUse(obj, owningObject(fn), edgeUsedConstant) - } - case *ast.AssignStmt: - for _, expr := range n.Lhs { - ident, ok := expr.(*ast.Ident) - if !ok { - continue - } - obj := pkg.TypesInfo.ObjectOf(ident) - if obj == nil { - continue - } - path := pkg.Fset.File(obj.Pos()).Name() - if strings.HasSuffix(path, "_test.go") { - if obj.Parent() != nil && obj.Parent().Parent() != nil && obj.Parent().Parent().Parent() == nil { - // object's scope is the package, whose - // parent is the file, whose parent is nil - - // (4.9) functions use package-level variables they assign to iff in tests (sinks for benchmarks) - // (9.7) variable _reads_ use variables, writes do not, except in tests - g.seeAndUse(obj, owningObject(fn), edgeTestSink) - } - } + processMethodSet := func(named *types.TypeName, ms *types.MethodSet) { + if g.opts.ExportedIsUsed { + for i := 0; i < ms.Len(); i++ { + m := ms.At(i) + if token.IsExported(m.Obj().Name()) { + // (2.1) named types use exported methods + // (6.4) structs use embedded fields that have exported methods + // + // By reading the selection, we read all embedded fields that are part of the path + g.readSelection(m, named) } } - - return true - }) - } - // Find constants being used in non-function contexts - for _, obj := range pkg.TypesInfo.Uses { - _, ok := obj.(*types.Const) - if !ok { - continue } - g.seeAndUse(obj, nil, edgeUsedConstant) - } - var fns []*types.Func - var fn *types.Func - var stack []ast.Node - for _, f := range pkg.Files { - ast.Inspect(f, func(n ast.Node) bool { - if n == nil { - pop := stack[len(stack)-1] - stack = stack[:len(stack)-1] - if _, ok := pop.(*ast.FuncDecl); ok { - fns = fns[:len(fns)-1] - if len(fns) == 0 { - fn = nil - } else { - fn = fns[len(fns)-1] - } - } - return true - } - stack = append(stack, n) - switch n := n.(type) { - case *ast.FuncDecl: - fn = pkg.TypesInfo.ObjectOf(n.Name).(*types.Func) - fns = append(fns, fn) - g.see(fn) - case *ast.GenDecl: - switch n.Tok { - case token.CONST: - groups := astutil.GroupSpecs(pkg.Fset, n.Specs) - for _, specs := range groups { - if len(specs) > 1 { - cg := &constGroup{} - g.see(cg) - for _, spec := range specs { - for _, name := range spec.(*ast.ValueSpec).Names { - obj := pkg.TypesInfo.ObjectOf(name) - // (10.1) const groups - g.seeAndUse(obj, cg, edgeConstGroup) - g.use(cg, obj, edgeConstGroup) - } - } - } - } - case token.VAR: - for _, spec := range n.Specs { - v := spec.(*ast.ValueSpec) - for _, name := range v.Names { - T := pkg.TypesInfo.TypeOf(name) - if fn != nil { - g.seeAndUse(T, fn, edgeVarDecl) - } else { - // TODO(dh): we likely want to make - // the type used by the variable, not - // the package containing the - // variable. But then we have to take - // special care of blank identifiers. - g.seeAndUse(T, nil, edgeVarDecl) - } - g.typ(T, nil) - } - } - case token.TYPE: - for _, spec := range n.Specs { - // go/types doesn't provide a way to go from a - // types.Named to the named type it was based on - // (the t1 in type t2 t1). Therefore we walk the - // AST and process GenDecls. - // - // (2.2) named types use the type they're based on - v := spec.(*ast.TypeSpec) - T := pkg.TypesInfo.TypeOf(v.Type) - obj := pkg.TypesInfo.ObjectOf(v.Name) - g.see(obj) - g.see(T) - g.use(T, obj, edgeType) - g.typ(obj.Type(), nil) - g.typ(T, nil) - - if v.Assign != 0 { - aliasFor := obj.(*types.TypeName).Type() - // (2.3) named types use all their aliases. we can't easily track uses of aliases - if isIrrelevant(aliasFor) { - // We do not track the type this is an - // alias for (for example builtins), so - // just mark the alias used. - // - // FIXME(dh): what about aliases declared inside functions? - g.use(obj, nil, edgeAlias) - } else { - g.see(aliasFor) - g.seeAndUse(obj, aliasFor, edgeAlias) - } - } + if _, ok := named.Type().Underlying().(*types.Interface); !ok { + // (8.0) handle interfaces + // + // We don't care about interfaces implementing interfaces; all their methods are already used, anyway + for _, iface := range g.interfaceTypes { + if sels, ok := implements(named.Type(), iface, ms); ok { + for _, sel := range sels { + // (8.2) any concrete type implements all known interfaces + // (6.3) structs use embedded fields that help implement interfaces + g.readSelection(sel, named) } } } - return true - }) + } } - for _, m := range pkg.IR.Members { - switch m := m.(type) { - case *ir.NamedConst: - // nothing to do, we collect all constants from Defs - case *ir.Global: - if m.Object() != nil { - g.see(m.Object()) - if m.Object().Exported() { - // (1.3) packages use exported variables - g.use(m.Object(), nil, edgeExportedVariable) - } - } - case *ir.Function: - mObj := owningObject(m) - if mObj != nil { - g.see(mObj) - } - //lint:ignore SA9003 handled implicitly - if m.Name() == "init" { - // (1.5) packages use init functions - // - // This is handled implicitly. The generated init - // function has no object, thus everything in it will - // be owned by the package. - } - // This branch catches top-level functions, not methods. - if m.Object() != nil && m.Object().Exported() { - // (1.2) packages use exported functions - g.use(mObj, nil, edgeExportedFunction) - } - if m.Name() == "main" && pkg.Pkg.Name() == "main" { - // (1.7) packages use the main function iff in the main package - g.use(mObj, nil, edgeMainFunction) - } - if pkg.Pkg.Path() == "runtime" && runtimeFuncs[m.Name()] { - // (9.8) runtime functions that may be called from user code via the compiler - g.use(mObj, nil, edgeRuntimeFunction) - } - if m.Source() != nil { - doc := m.Source().(*ast.FuncDecl).Doc - if doc != nil { - for _, cmt := range doc.List { - if strings.HasPrefix(cmt.Text, "//go:cgo_export_") { - // (1.6) packages use functions exported to cgo - g.use(mObj, nil, edgeCgoExported) - } - } - } - } - g.function(m) - case *ir.Type: - g.see(m.Object()) - if m.Object().Exported() { - // (1.1) packages use exported named types - g.use(m.Object(), nil, edgeExportedType) - } - g.typ(m.Type(), nil) - default: - panic(fmt.Sprintf("unreachable: %T", m)) - } - } - - // OPT(dh): can we find meaningful initial capacities for these slices? - var ifaces []*types.Interface - var notIfaces []types.Type - - for t := range g.seenTypes { - switch t := t.(type) { - case *types.Interface: - // OPT(dh): (8.1) we only need interfaces that have unexported methods - ifaces = append(ifaces, t) - default: - if _, ok := t.Underlying().(*types.Interface); !ok { - notIfaces = append(notIfaces, t) - } - } - } + for _, named := range g.namedTypes { + // OPT(dh): do we already have the method set available? + processMethodSet(named, types.NewMethodSet(named.Type())) + processMethodSet(named, types.NewMethodSet(types.NewPointer(named.Type()))) - // (8.0) handle interfaces - for _, t := range notIfaces { - ms := pkg.IR.Prog.MethodSets.MethodSet(t) - for _, iface := range ifaces { - if sels, ok := g.implements(t, iface, ms); ok { - for _, sel := range sels { - g.useMethod(t, sel, t, edgeImplements) - } - } - } } type ignoredKey struct { @@ -1222,7 +576,7 @@ func (g *graph) entry(pkg *pkg) { line int } ignores := map[ignoredKey]struct{}{} - for _, dir := range pkg.Directives { + for _, dir := range g.directives { if dir.Command != "ignore" && dir.Command != "file-ignore" { continue } @@ -1231,7 +585,7 @@ func (g *graph) entry(pkg *pkg) { } for _, check := range strings.Split(dir.Arguments[0], ",") { if check == "U1000" { - pos := pkg.Fset.PositionFor(dir.Node.Pos(), false) + pos := g.fset.PositionFor(dir.Node.Pos(), false) var key ignoredKey switch dir.Command { case "ignore": @@ -1254,46 +608,43 @@ func (g *graph) entry(pkg *pkg) { if len(ignores) > 0 { // all objects annotated with a //lint:ignore U1000 are considered used - for obj := range g.Nodes { - if obj, ok := obj.(types.Object); ok { - pos := pkg.Fset.PositionFor(obj.Pos(), false) - key1 := ignoredKey{ - pos.Filename, - pos.Line, - } - key2 := ignoredKey{ - pos.Filename, - -1, - } - _, ok := ignores[key1] - if !ok { - _, ok = ignores[key2] - } - if ok { - g.use(obj, nil, edgeIgnored) - - // use methods and fields of ignored types - if obj, ok := obj.(*types.TypeName); ok { - if obj.IsAlias() { - if typ, ok := obj.Type().(*types.Named); ok && typ.Obj().Pkg() != obj.Pkg() { - // This is an alias of a named type in another package. - // Don't walk its fields or methods; we don't have to, - // and it breaks an assertion in graph.use because we're using an object that we haven't seen before. - // - // For aliases to types in the same package, we do want to ignore the fields and methods, - // because ignoring the alias should ignore the aliased type. - continue - } + for obj := range g.objects { + pos := g.fset.PositionFor(obj.Pos(), false) + key1 := ignoredKey{ + pos.Filename, + pos.Line, + } + key2 := ignoredKey{ + pos.Filename, + -1, + } + _, ok := ignores[key1] + if !ok { + _, ok = ignores[key2] + } + if ok { + g.use(obj, nil) + + // use methods and fields of ignored types + if obj, ok := obj.(*types.TypeName); ok { + if obj.IsAlias() { + if typ, ok := obj.Type().(*types.Named); ok && (g.opts.ExportedIsUsed && typ.Obj().Pkg() != obj.Pkg() || typ.Obj().Pkg() == nil) { + // This is an alias of a named type in another package. + // Don't walk its fields or methods; we don't have to. + // + // For aliases to types in the same package, we do want to ignore the fields and methods, + // because ignoring the alias should ignore the aliased type. + continue } - if typ, ok := obj.Type().(*types.Named); ok { - for i := 0; i < typ.NumMethods(); i++ { - g.use(typ.Method(i), nil, edgeIgnored) - } + } + if typ, ok := obj.Type().(*types.Named); ok { + for i := 0; i < typ.NumMethods(); i++ { + g.use(typ.Method(i), nil) } - if typ, ok := obj.Type().Underlying().(*types.Struct); ok { - for i := 0; i < typ.NumFields(); i++ { - g.use(typ.Field(i), nil, edgeIgnored) - } + } + if typ, ok := obj.Type().Underlying().(*types.Struct); ok { + for i := 0; i < typ.NumFields(); i++ { + g.use(typ.Field(i), nil) } } } @@ -1302,497 +653,792 @@ func (g *graph) entry(pkg *pkg) { } } -func (g *graph) useMethod(t types.Type, sel *types.Selection, by interface{}, kind edgeKind) { - obj := sel.Obj().(*types.Func) - path := sel.Index() - assert(obj != nil) - if len(path) > 1 { - base := typeutil.Dereference(t).Underlying().(*types.Struct) - for _, idx := range path[:len(path)-1] { - next := base.Field(idx) - // (6.3) structs use embedded fields that help implement interfaces - g.see(base) - g.seeAndUse(next, base, edgeProvidesMethod) - base, _ = typeutil.Dereference(next.Type()).Underlying().(*types.Struct) - } - } - g.seeAndUse(obj, by, kind) -} - -func owningObject(fn *ir.Function) types.Object { - if fn.Object() != nil { - return fn.Object() - } - if fn.Parent() != nil { - return owningObject(fn.Parent()) - } - return nil +func isOfType[T any](x any) bool { + _, ok := x.(T) + return ok } -func (g *graph) function(fn *ir.Function) { - assert(fn != nil) - if fn.Package() != nil && fn.Package() != g.pkg.IR { +func (g *graph) read(node ast.Node, by types.Object) { + if node == nil { return } - if _, ok := g.seenFns[fn]; ok { - return - } - g.seenFns[fn] = struct{}{} + switch node := node.(type) { + case *ast.Ident: + // Among many other things, this handles + // (7.1) field accesses use fields - // (4.1) functions use all their arguments, return parameters and receivers - g.signature(fn.Signature, owningObject(fn)) - g.instructions(fn) - for _, anon := range fn.AnonFuncs { - // (4.2) functions use anonymous functions defined beneath them - // - // This fact is expressed implicitly. Anonymous functions have - // no types.Object, so their owner is the surrounding - // function. - g.function(anon) - } -} + obj := g.info.ObjectOf(node) + g.use(obj, by) -func (g *graph) typ(t types.Type, parent types.Type) { - if _, ok := g.seenTypes[t]; ok { - return - } + case *ast.BasicLit: + // Nothing to do - if t, ok := t.(*types.Named); ok && t.Obj().Pkg() != nil { - if t.Obj().Pkg() != g.pkg.Pkg { - return + case *ast.SliceExpr: + g.read(node.X, by) + g.read(node.Low, by) + g.read(node.High, by) + g.read(node.Max, by) + + case *ast.UnaryExpr: + g.read(node.X, by) + + case *ast.ParenExpr: + g.read(node.X, by) + + case *ast.ArrayType: + g.read(node.Len, by) + g.read(node.Elt, by) + + case *ast.SelectorExpr: + g.readSelectorExpr(node, by) + + case *ast.IndexExpr: + // Among many other things, this handles + // (2.6) named types use all their type arguments + g.read(node.X, by) + g.read(node.Index, by) + + case *ast.IndexListExpr: + // Among many other things, this handles + // (2.6) named types use all their type arguments + g.read(node.X, by) + for _, index := range node.Indices { + g.read(index, by) } - } - g.seenTypes[t] = struct{}{} - if isIrrelevant(t) { - return - } - - g.see(t) - switch t := t.(type) { - case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - g.see(t.Field(i)) - if t.Field(i).Exported() { - // (6.2) structs use exported fields - g.use(t.Field(i), t, edgeExportedField) - } else if t.Field(i).Name() == "_" { - g.use(t.Field(i), t, edgeBlankField) - } else if isNoCopyType(t.Field(i).Type()) { - // (6.1) structs use fields of type NoCopy sentinel - g.use(t.Field(i), t, edgeNoCopySentinel) - } else if parent == nil { - // (11.1) anonymous struct types use all their fields. - g.use(t.Field(i), t, edgeAnonymousStruct) + case *ast.BinaryExpr: + g.read(node.X, by) + g.read(node.Y, by) + + case *ast.CompositeLit: + g.read(node.Type, by) + // We get the type of the node itself, not of node.Type, to handle nested composite literals of the kind + // T{{...}} + typ, isStruct := typeutil.CoreType(g.info.TypeOf(node)).(*types.Struct) + + if isStruct { + unkeyed := len(node.Elts) != 0 && !isOfType[*ast.KeyValueExpr](node.Elts[0]) + if g.opts.FieldWritesAreUses && unkeyed { + // Untagged struct literal that specifies all fields. We have to manually use the fields in the type, + // because the unkeyd literal doesn't contain any nodes referring to the fields. + for i := 0; i < typ.NumFields(); i++ { + g.use(typ.Field(i), by) + } } - if t.Field(i).Anonymous() { - // does the embedded field contribute exported methods to the method set? - T := t.Field(i).Type() - if _, ok := T.Underlying().(*types.Pointer); !ok { - // An embedded field is addressable, so check - // the pointer type to get the full method set - T = g.newPointer(T) + if g.opts.FieldWritesAreUses || unkeyed { + for _, elt := range node.Elts { + g.read(elt, by) } - ms := g.pkg.IR.Prog.MethodSets.MethodSet(T) - for j := 0; j < ms.Len(); j++ { - if ms.At(j).Obj().Exported() { - // (6.4) structs use embedded fields that have exported methods (recursively) - g.use(t.Field(i), t, edgeExtendsExportedMethodSet) - break - } + } else { + for _, elt := range node.Elts { + kv := elt.(*ast.KeyValueExpr) + g.write(kv.Key, by) + g.read(kv.Value, by) } + } + } else { + for _, elt := range node.Elts { + g.read(elt, by) + } + } - seen := map[*types.Struct]struct{}{} - var hasExportedField func(t types.Type) bool - hasExportedField = func(T types.Type) bool { - t, ok := typeutil.Dereference(T).Underlying().(*types.Struct) - if !ok { - return false - } - if _, ok := seen[t]; ok { - return false - } - seen[t] = struct{}{} - for i := 0; i < t.NumFields(); i++ { - field := t.Field(i) - if field.Exported() { - return true - } - if field.Embedded() && hasExportedField(field.Type()) { - return true - } - } - return false - } - // does the embedded field contribute exported fields? - if hasExportedField(t.Field(i).Type()) { - // (6.5) structs use embedded structs that have exported fields (recursively) - g.use(t.Field(i), t, edgeExtendsExportedFields) - } + case *ast.KeyValueExpr: + g.read(node.Key, by) + g.read(node.Value, by) + + case *ast.StarExpr: + g.read(node.X, by) + + case *ast.MapType: + g.read(node.Key, by) + g.read(node.Value, by) + case *ast.FuncLit: + g.read(node.Type, by) + + // See graph.decl's handling of ast.FuncDecl for why this bit of code is necessary. + fn := g.info.TypeOf(node).(*types.Signature) + for params, i := fn.Params(), 0; i < params.Len(); i++ { + g.see(params.At(i), by) + if params.At(i).Name() == "" { + g.use(params.At(i), by) } - g.variable(t.Field(i)) } - case *types.Basic: - // Nothing to do - case *types.Named: - // (9.3) types use their underlying and element types - origin := typeparams.NamedTypeOrigin(t) - g.seeAndUse(origin.Underlying(), t, edgeUnderlyingType) - g.seeAndUse(t.Obj(), t, edgeTypeName) - g.seeAndUse(t, t.Obj(), edgeNamedType) - - // (2.4) named types use the pointer type - if _, ok := t.Underlying().(*types.Interface); !ok && t.NumMethods() > 0 { - g.seeAndUse(g.newPointer(origin), t, edgePointerType) + + g.block(node.Body, by) + + case *ast.FuncType: + m := map[*types.Var]struct{}{} + if !g.opts.ParametersAreUsed { + m = map[*types.Var]struct{}{} + // seeScope marks all local variables in the scope as used, but we don't want to unconditionally use + // parameters, as this is controlled by Options.ParametersAreUsed. Pass seeScope a list of variables it + // should skip. + for _, f := range node.Params.List { + for _, name := range f.Names { + m[g.info.ObjectOf(name).(*types.Var)] = struct{}{} + } + } } + g.seeScope(node, by, m) - // (2.5) named types use their type parameters + // (4.1) functions use all their arguments, return parameters and receivers + // (12.1) type parameters use their constraint type + g.read(node.TypeParams, by) + if g.opts.ParametersAreUsed { + g.read(node.Params, by) + } + g.read(node.Results, by) - for i := 0; i < typeparams.ForNamed(t).Len(); i++ { - tparam := typeparams.ForNamed(t).At(i) - g.seeAndUse(tparam, t, edgeTypeParam) - g.typ(tparam, nil) + case *ast.FieldList: + if node == nil { + return } - // (2.6) named types use their type arguments - for i := 0; i < typeparams.NamedTypeArgs(t).Len(); i++ { - targ := typeparams.NamedTypeArgs(t).At(i) - g.seeAndUse(targ, t, edgeTypeArg) - g.typ(t, nil) + // This branch is only hit for field lists enclosed by parentheses or square brackets, i.e. parameters. Fields + // (for structs) and method lists (for interfaces) are handled elsewhere. + + for _, field := range node.List { + if len(field.Names) == 0 { + g.read(field.Type, by) + } else { + for _, name := range field.Names { + // OPT(dh): instead of by -> name -> type, we could just emit by -> type. We don't care about the + // (un)usedness of parameters of any kind. + obj := g.info.ObjectOf(name) + g.use(obj, by) + g.read(field.Type, obj) + } + } } - for i := 0; i < t.NumMethods(); i++ { - g.see(t.Method(i)) - // don't use trackExportedIdentifier here, we care about - // all exported methods, even in package main or in tests. - if t.Method(i).Exported() { - // (2.1) named types use exported methods - g.use(t.Method(i), t, edgeExportedMethod) + case *ast.ChanType: + g.read(node.Value, by) + + case *ast.StructType: + // This is only used for anonymous struct types, not named ones. + + for _, field := range node.Fields.List { + if len(field.Names) == 0 { + // embedded field + + f := g.embeddedField(field.Type, by) + g.use(f, by) + } else { + for _, name := range field.Names { + // (11.1) anonymous struct types use all their fields + // OPT(dh): instead of by -> name -> type, we could just emit by -> type. If the type is used, then the fields are used. + obj := g.info.ObjectOf(name) + g.see(obj, by) + g.use(obj, by) + g.read(field.Type, g.info.ObjectOf(name)) + } } - g.function(g.pkg.IR.Prog.FuncValue(t.Method(i))) } - g.typ(origin.Underlying(), t) - case *types.Slice: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Map: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - // (9.3) types use their underlying and element types - g.seeAndUse(t.Key(), t, edgeKeyType) - g.typ(t.Elem(), nil) - g.typ(t.Key(), nil) - case *types.Signature: - g.signature(t, nil) - case *types.Interface: - for i := 0; i < t.NumMethods(); i++ { - m := t.Method(i) - // (8.3) All interface methods are marked as used - g.seeAndUse(m, t, edgeInterfaceMethod) - g.seeAndUse(m.Type().(*types.Signature), m, edgeSignature) - g.signature(m.Type().(*types.Signature), nil) + case *ast.TypeAssertExpr: + g.read(node.X, by) + g.read(node.Type, by) + + case *ast.InterfaceType: + if len(node.Methods.List) != 0 { + g.interfaceTypes = append(g.interfaceTypes, g.info.TypeOf(node).(*types.Interface)) } - for i := 0; i < t.NumEmbeddeds(); i++ { - tt := t.EmbeddedType(i) - // (8.4) All embedded interfaces are marked as used - g.typ(tt, nil) - g.seeAndUse(tt, t, edgeEmbeddedInterface) + for _, meth := range node.Methods.List { + switch len(meth.Names) { + case 0: + // Embedded type or type union + // (8.4) all embedded interfaces are marked as used + // (this also covers type sets) + + g.read(meth.Type, by) + case 1: + // Method + // (8.3) all interface methods are marked as used + obj := g.info.ObjectOf(meth.Names[0]) + g.see(obj, by) + g.use(obj, by) + g.read(meth.Type, obj) + default: + panic(fmt.Sprintf("unexpected number of names: %d", len(meth.Names))) + } } - case *types.Array: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Pointer: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Chan: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Tuple: - for i := 0; i < t.Len(); i++ { - // (9.3) types use their underlying and element types - g.seeAndUse(t.At(i).Type(), t, edgeTupleElement|edgeType) - g.typ(t.At(i).Type(), nil) + + case *ast.Ellipsis: + g.read(node.Elt, by) + + case *ast.CallExpr: + g.read(node.Fun, by) + for _, arg := range node.Args { + g.read(arg, by) } - case *typeutil.Iterator: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *typeparams.TypeParam: - // (9.3) types use their underlying and element types - - g.seeAndUse(t.Obj(), t, edgeTypeName) - g.seeAndUse(t, t.Obj(), edgeNamedType) - g.seeAndUse(t.Constraint(), t, edgeElementType) - g.typ(t.Constraint(), t) - case *typeparams.Union: - for i := 0; i < t.Len(); i++ { - g.seeAndUse(t.Term(i).Type(), t, edgeUnionTerm) - g.typ(t.Term(i).Type(), nil) + + // Handle conversiosn + conv := node + if len(conv.Args) != 1 || conv.Ellipsis.IsValid() { + return + } + + dst := g.info.TypeOf(conv.Fun) + src := g.info.TypeOf(conv.Args[0]) + + // XXX use DereferenceR instead + // XXX guard against infinite recursion in DereferenceR + tSrc := typeutil.CoreType(typeutil.Dereference(src)) + tDst := typeutil.CoreType(typeutil.Dereference(dst)) + stSrc, okSrc := tSrc.(*types.Struct) + stDst, okDst := tDst.(*types.Struct) + if okDst && okSrc { + // Converting between two structs. The fields are + // relevant for the conversion, but only if the + // fields are also used outside of the conversion. + // Mark fields as used by each other. + + assert(stDst.NumFields() == stSrc.NumFields()) + for i := 0; i < stDst.NumFields(); i++ { + // (5.1) when converting between two equivalent structs, the fields in + // either struct use each other. the fields are relevant for the + // conversion, but only if the fields are also accessed outside the + // conversion. + g.use(stDst.Field(i), stSrc.Field(i)) + g.use(stSrc.Field(i), stDst.Field(i)) + } + } else if okSrc && tDst == types.Typ[types.UnsafePointer] { + // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + g.useAllFieldsRecursively(stSrc, by) + } else if okDst && tSrc == types.Typ[types.UnsafePointer] { + // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + g.useAllFieldsRecursively(stDst, by) } + default: - panic(fmt.Sprintf("unreachable: %T", t)) + lint.ExhaustiveTypeSwitch(node) } } -func (g *graph) variable(v *types.Var) { - // (9.2) variables use their types - g.seeAndUse(v.Type(), v, edgeType) - g.typ(v.Type(), nil) +func (g *graph) useAllFieldsRecursively(typ types.Type, by types.Object) { + switch typ := typ.Underlying().(type) { + case *types.Struct: + for i := 0; i < typ.NumFields(); i++ { + field := typ.Field(i) + g.use(field, by) + g.useAllFieldsRecursively(field.Type(), by) + } + case *types.Array: + g.useAllFieldsRecursively(typ.Elem(), by) + default: + return + } } -func (g *graph) signature(sig *types.Signature, fn types.Object) { - var user interface{} = fn - if fn == nil { - user = sig - g.see(sig) +func (g *graph) write(node ast.Node, by types.Object) { + if node == nil { + return } - if sig.Recv() != nil { - g.seeAndUse(sig.Recv().Type(), user, edgeReceiver|edgeType) - g.typ(sig.Recv().Type(), nil) + + switch node := node.(type) { + case *ast.Ident: + obj := g.info.ObjectOf(node) + if obj == nil { + // This can happen for `switch x := v.(type)`, where that x doesn't have an object + return + } + + // (4.9) functions use package-level variables they assign to iff in tests (sinks for benchmarks) + // (9.7) variable _reads_ use variables, writes do not, except in tests + path := g.fset.File(obj.Pos()).Name() + if strings.HasSuffix(path, "_test.go") { + if isGlobal(obj) { + g.use(obj, by) + } + } + + case *ast.IndexExpr: + g.read(node.X, by) + g.read(node.Index, by) + + case *ast.SelectorExpr: + if g.opts.FieldWritesAreUses { + // Writing to a field constitutes a use. See https://staticcheck.io/issues/288 for some discussion on that. + // + // This code can also get triggered by qualified package variables, in which case it doesn't matter what we do, + // because the object is in another package. + // + // FIXME(dh): ^ isn't true if we track usedness of exported identifiers + g.readSelectorExpr(node, by) + } else { + g.read(node.X, by) + g.write(node.Sel, by) + } + + case *ast.StarExpr: + g.read(node.X, by) + + case *ast.ParenExpr: + g.write(node.X, by) + + default: + lint.ExhaustiveTypeSwitch(node) } - for i := 0; i < sig.Params().Len(); i++ { - param := sig.Params().At(i) - g.seeAndUse(param.Type(), user, edgeFunctionArgument|edgeType) - g.typ(param.Type(), nil) +} + +// readSelectorExpr reads all elements of a selector expression, including implicit fields. +func (g *graph) readSelectorExpr(sel *ast.SelectorExpr, by types.Object) { + // cover AST-based accesses + g.read(sel.X, by) + g.read(sel.Sel, by) + + tsel, ok := g.info.Selections[sel] + if !ok { + return } - for i := 0; i < sig.Results().Len(); i++ { - param := sig.Results().At(i) - g.seeAndUse(param.Type(), user, edgeFunctionResult|edgeType) - g.typ(param.Type(), nil) + g.readSelection(tsel, by) +} + +func (g *graph) readSelection(sel *types.Selection, by types.Object) { + indices := sel.Index() + base := sel.Recv() + for _, idx := range indices[:len(indices)-1] { + // XXX do we need core types here? + field := typeutil.Dereference(base.Underlying()).Underlying().(*types.Struct).Field(idx) + g.use(field, by) + base = field.Type() } - for i := 0; i < typeparams.RecvTypeParams(sig).Len(); i++ { - // We track the type parameter's constraint, not the type parameter itself. - // We never want to flag an unused type parameter. - param := typeparams.RecvTypeParams(sig).At(i).Constraint() - g.seeAndUse(param, user, edgeFunctionArgument|edgeType) - g.typ(param, nil) + + g.use(sel.Obj(), by) +} + +func (g *graph) block(block *ast.BlockStmt, by types.Object) { + if block == nil { + return } - for i := 0; i < typeparams.ForSignature(sig).Len(); i++ { - // We track the type parameter's constraint, not the type parameter itself. - // We never want to flag an unused type parameter. - param := typeparams.ForSignature(sig).At(i).Constraint() - g.seeAndUse(param, user, edgeFunctionArgument|edgeType) - g.typ(param, nil) + + g.seeScope(block, by, nil) + for _, stmt := range block.List { + g.stmt(stmt, by) } } -func (g *graph) instructions(fn *ir.Function) { - fnObj := owningObject(fn) - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - ops := instr.Operands(nil) - switch instr.(type) { - case *ir.Store: - // (9.7) variable _reads_ use variables, writes do not - ops = ops[1:] - case *ir.DebugRef: - ops = nil +func isGlobal(obj types.Object) bool { + return obj.Parent() == obj.Pkg().Scope() +} + +func (g *graph) decl(decl ast.Decl, by types.Object) { + switch decl := decl.(type) { + case *ast.GenDecl: + switch decl.Tok { + case token.IMPORT: + // Nothing to do + + case token.CONST: + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) + assert(len(vspec.Values) == 0 || len(vspec.Values) == len(vspec.Names)) + for i, name := range vspec.Names { + obj := g.info.ObjectOf(name) + g.see(obj, by) + g.read(vspec.Type, obj) + + if len(vspec.Values) != 0 { + g.read(vspec.Values[i], obj) + } + + if name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, by) + } else if token.IsExported(name.Name) && isGlobal(obj) && g.opts.ExportedIsUsed { + g.use(obj, nil) + } + } } - for _, arg := range ops { - walkPhi(*arg, func(v ir.Value) { - switch v := v.(type) { - case *ir.Function: - // (4.3) functions use closures and bound methods. - // (4.5) functions use functions they call - // (9.5) instructions use their operands - // (4.4) functions use functions they return. we assume that someone else will call the returned function - if owningObject(v) != nil { - g.seeAndUse(owningObject(v), fnObj, edgeInstructionOperand) + + groups := astutil.GroupSpecs(g.fset, decl.Specs) + for _, group := range groups { + // (10.1) if one constant out of a block of constants is used, mark all of them used + // + // We encode this as a ring. If we have a constant group 'const ( a; b; c )', then we'll produce the + // following graph: a -> b -> c -> a. + + var first, prev, last types.Object + for _, spec := range group { + for _, name := range spec.(*ast.ValueSpec).Names { + if name.Name == "_" { + // Having a blank constant in a group doesn't mark the whole group as used + continue } - g.function(v) - case *ir.Const: - // (9.6) instructions use their operands' types - g.seeAndUse(v.Type(), fnObj, edgeType) - g.typ(v.Type(), nil) - case *ir.Global: - if v.Object() != nil { - // (9.5) instructions use their operands - g.seeAndUse(v.Object(), fnObj, edgeInstructionOperand) + + obj := g.info.ObjectOf(name) + if first == nil { + first = obj + } else { + g.use(obj, prev) } + prev = obj + last = obj } - }) - } - if v, ok := instr.(ir.Value); ok { - if _, ok := v.(*ir.Range); !ok { - // See https://github.com/golang/go/issues/19670 - - // (4.8) instructions use their types - // (9.4) conversions use the type they convert to - g.seeAndUse(v.Type(), fnObj, edgeType) - g.typ(v.Type(), nil) + } + if first != nil && first != last { + g.use(first, last) } } - switch instr := instr.(type) { - case *ir.Field: - // Can't access fields via generics, for now. - - st := instr.X.Type().Underlying().(*types.Struct) - field := st.Field(instr.Field) - // (4.7) functions use fields they access - g.seeAndUse(field, fnObj, edgeFieldAccess) - case *ir.FieldAddr: - // User code can't access fields on type parameters, but composite literals are still possible, which - // compile to FieldAddr + Store. - - st := typeutil.CoreType(typeutil.Dereference(instr.X.Type())).(*types.Struct) - field := st.Field(instr.Field) - // (4.7) functions use fields they access - g.seeAndUse(field, fnObj, edgeFieldAccess) - case *ir.Store: - // nothing to do, handled generically by operands - case ir.CallInstruction: - c := instr.Common() - for _, targ := range c.TypeArgs { - g.seeAndUse(targ, fnObj, edgeTypeArg) + + case token.TYPE: + for _, spec := range decl.Specs { + tspec := spec.(*ast.TypeSpec) + obj := g.info.ObjectOf(tspec.Name).(*types.TypeName) + g.see(obj, by) + g.seeScope(tspec, obj, nil) + if !tspec.Assign.IsValid() { + g.namedTypes = append(g.namedTypes, obj) } - if !c.IsInvoke() { - // handled generically as an instruction operand - } else { - // (4.5) functions use functions/interface methods they call - g.seeAndUse(c.Method, fnObj, edgeInterfaceCall) + if token.IsExported(tspec.Name.Name) && isGlobal(obj) && g.opts.ExportedIsUsed { + // (1.1) packages use exported named types + g.use(g.info.ObjectOf(tspec.Name), nil) } - case *ir.Return: - // nothing to do, handled generically by operands - case *ir.ChangeType: - // conversion type handled generically - - s1, ok1 := typeutil.CoreType(typeutil.Dereference(instr.Type())).(*types.Struct) - s2, ok2 := typeutil.CoreType(typeutil.Dereference(instr.X.Type())).(*types.Struct) - if ok1 && ok2 { - // Converting between two structs. The fields are - // relevant for the conversion, but only if the - // fields are also used outside of the conversion. - // Mark fields as used by each other. - - assert(s1.NumFields() == s2.NumFields()) - for i := 0; i < s1.NumFields(); i++ { - g.see(s1.Field(i)) - g.see(s2.Field(i)) - // (5.1) when converting between two equivalent structs, the fields in - // either struct use each other. the fields are relevant for the - // conversion, but only if the fields are also accessed outside the - // conversion. - g.seeAndUse(s1.Field(i), s2.Field(i), edgeStructConversion) - g.seeAndUse(s2.Field(i), s1.Field(i), edgeStructConversion) - } + + // (2.5) named types use all their type parameters + g.read(tspec.TypeParams, obj) + + g.namedType(obj, tspec.Type) + + if tspec.Name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, by) } - case *ir.MakeInterface: - // nothing to do, handled generically by operands - case *ir.Slice: - // nothing to do, handled generically by operands - case *ir.RunDefers: - // nothing to do, the deferred functions are already marked use by deferring them. - case *ir.Convert: - // to unsafe.Pointer - if typ, ok := instr.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer { - if ptr, ok := instr.X.Type().Underlying().(*types.Pointer); ok { - if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { - for i := 0; i < st.NumFields(); i++ { - // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. - g.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) - } + } + + case token.VAR: + // We cannot rely on types.Initializer for package-level variables because + // - initializers are only tracked for variables that are actually initialized + // - we want to see the AST of the type, if specified, not just the rhs + + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) + for i, name := range vspec.Names { + obj := g.info.ObjectOf(name) + g.see(obj, by) + // variables and constants use their types + g.read(vspec.Type, obj) + + if len(vspec.Names) == len(vspec.Values) { + // One value per variable + g.read(vspec.Values[i], obj) + } else if len(vspec.Values) != 0 { + // Multiple variables initialized with a single rhs + // assert(len(vspec.Values) == 1) + if len(vspec.Values) != 1 { + panic(g.fset.PositionFor(vspec.Pos(), false)) } + g.read(vspec.Values[0], obj) } - } - // from unsafe.Pointer - if typ, ok := instr.X.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer { - if ptr, ok := instr.Type().Underlying().(*types.Pointer); ok { - if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { - for i := 0; i < st.NumFields(); i++ { - // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. - g.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) - } - } + + if token.IsExported(name.Name) && isGlobal(obj) && g.opts.ExportedIsUsed { + // (1.3) packages use exported variables + g.use(obj, nil) + } + + if name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, by) } } - case *ir.TypeAssert: - // nothing to do, handled generically by instruction - // type (possibly a tuple, which contains the asserted - // to type). redundantly handled by the type of - // ir.Extract, too - case *ir.MakeClosure: - // nothing to do, handled generically by operands - case *ir.Alloc: - // nothing to do - case *ir.UnOp: - // nothing to do - case *ir.BinOp: - // nothing to do - case *ir.If: - // nothing to do - case *ir.Jump: - // nothing to do - case *ir.Unreachable: - // nothing to do - case *ir.IndexAddr: - // nothing to do - case *ir.Extract: - // nothing to do - case *ir.Panic: - // nothing to do - case *ir.DebugRef: - // nothing to do - case *ir.BlankStore: - // nothing to do - case *ir.Phi: - // nothing to do - case *ir.Sigma: - // nothing to do - case *ir.MakeMap: - // nothing to do - case *ir.MapUpdate: - // nothing to do - case *ir.MapLookup: - // nothing to do - case *ir.StringLookup: - // nothing to do - case *ir.MakeSlice: - // nothing to do - case *ir.Send: - // nothing to do - case *ir.MakeChan: - // nothing to do - case *ir.Range: - // nothing to do - case *ir.Next: - // nothing to do - case *ir.Index: - // nothing to do - case *ir.Select: - // nothing to do - case *ir.ChangeInterface: - // nothing to do - case *ir.Load: - // nothing to do - case *ir.Parameter: - // nothing to do - case *ir.Const: - // nothing to do - case *ir.ArrayConst: - // nothing to do - case *ir.AggregateConst: - // nothing to do - case *ir.GenericConst: - // nothing to do - case *ir.Recv: - // nothing to do - case *ir.TypeSwitch: - // nothing to do - case *ir.ConstantSwitch: - // nothing to do - case *ir.SliceToArrayPointer: - // nothing to do + } + + default: + panic(fmt.Sprintf("unexpected token %s", decl.Tok)) + } + + case *ast.FuncDecl: + obj := g.info.ObjectOf(decl.Name).(*types.Func).Origin() + g.see(obj, nil) + + if token.IsExported(decl.Name.Name) && g.opts.ExportedIsUsed { + if decl.Recv == nil { + // (1.2) packages use exported functions + g.use(obj, nil) + } + } else if decl.Name.Name == "init" { + // (1.5) packages use init functions + g.use(obj, nil) + } else if decl.Name.Name == "main" && g.pkg.Name() == "main" { + // (1.7) packages use the main function iff in the main package + g.use(obj, nil) + } else if g.pkg.Path() == "runtime" && runtimeFuncs[decl.Name.Name] { + // (9.8) runtime functions that may be called from user code via the compiler + g.use(obj, nil) + } else if g.pkg.Path() == "runtime/coverage" && runtimeCoverageFuncs[decl.Name.Name] { + // (9.8) runtime functions that may be called from user code via the compiler + g.use(obj, nil) + } + + // (4.1) functions use their receivers + g.read(decl.Recv, obj) + g.read(decl.Type, obj) + g.block(decl.Body, obj) + + // g.read(decl.Type) will ultimately call g.seeScopes and see parameters that way. But because it relies + // entirely on the AST, it cannot resolve unnamed parameters to types.Object. For that reason we explicitly + // handle arguments here, as well as for FuncLits elsewhere. + // + // g.seeScopes can't get to the types.Signature for this function because there is no mapping from ast.FuncType to + // types.Signature, only from ast.Ident to types.Signature. + // + // This code is only really relevant when Options.ParametersAreUsed is false. Otherwise, all parameters are + // considered used, and if we never see a parameter then no harm done (we still see its type separately). + fn := g.info.TypeOf(decl.Name).(*types.Signature) + for params, i := fn.Params(), 0; i < params.Len(); i++ { + g.see(params.At(i), obj) + if params.At(i).Name() == "" { + g.use(params.At(i), obj) + } + } + + if decl.Name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, nil) + } + + if decl.Doc != nil { + for _, cmt := range decl.Doc.List { + if strings.HasPrefix(cmt.Text, "//go:cgo_export_") { + // (1.6) packages use functions exported to cgo + g.use(obj, nil) + } + } + } + + default: + // We do not cover BadDecl, but we shouldn't ever see one of those + lint.ExhaustiveTypeSwitch(decl) + } +} + +// seeScope sees all objects in node's scope. If Options.LocalVariablesAreUsed is true, all objects that aren't fields +// are marked as used. Variables set in skipLvars will not be marked as used. +func (g *graph) seeScope(node ast.Node, by types.Object, skipLvars map[*types.Var]struct{}) { + // A note on functions and scopes: for a function declaration, the body's BlockStmt can't be found in + // types.Info.Scopes. Instead, the FuncType can, and that scope will contain receivers, parameters, return + // parameters and immediate local variables. + + scope := g.info.Scopes[node] + if scope == nil { + return + } + for _, name := range scope.Names() { + obj := scope.Lookup(name) + g.see(obj, by) + + if g.opts.LocalVariablesAreUsed { + if obj, ok := obj.(*types.Var); ok && !obj.IsField() { + if _, ok := skipLvars[obj]; !ok { + g.use(obj, by) + } + } + } + } +} + +func (g *graph) stmt(stmt ast.Stmt, by types.Object) { + if stmt == nil { + return + } + + for { + // We don't care about labels, so unwrap LabeledStmts. Note that a label can itself be labeled. + if labeled, ok := stmt.(*ast.LabeledStmt); ok { + stmt = labeled.Stmt + } else { + break + } + } + + switch stmt := stmt.(type) { + case *ast.AssignStmt: + for _, lhs := range stmt.Lhs { + g.write(lhs, by) + } + for _, rhs := range stmt.Rhs { + // Note: it would be more accurate to have the rhs used by the lhs, but it ultimately doesn't matter, + // because local variables always end up used, anyway. + // + // TODO(dh): we'll have to change that once we allow tracking the usedness of parameters + g.read(rhs, by) + } + + case *ast.BlockStmt: + g.block(stmt, by) + + case *ast.BranchStmt: + // Nothing to do + + case *ast.DeclStmt: + g.decl(stmt.Decl, by) + + case *ast.DeferStmt: + g.read(stmt.Call, by) + + case *ast.ExprStmt: + g.read(stmt.X, by) + + case *ast.ForStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.read(stmt.Cond, by) + g.stmt(stmt.Post, by) + g.block(stmt.Body, by) + + case *ast.GoStmt: + g.read(stmt.Call, by) + + case *ast.IfStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.read(stmt.Cond, by) + g.block(stmt.Body, by) + g.stmt(stmt.Else, by) + + case *ast.IncDecStmt: + if g.opts.PostStatementsAreReads { + g.read(stmt.X, by) + g.write(stmt.X, by) + } else { + // We treat post-increment as a write only. This ends up using fields, and sinks in tests, but not other + // variables. + g.write(stmt.X, by) + } + + case *ast.RangeStmt: + g.seeScope(stmt, by, nil) + + g.write(stmt.Key, by) + g.write(stmt.Value, by) + g.read(stmt.X, by) + g.block(stmt.Body, by) + + case *ast.ReturnStmt: + for _, ret := range stmt.Results { + g.read(ret, by) + } + + case *ast.SelectStmt: + for _, clause_ := range stmt.Body.List { + clause := clause_.(*ast.CommClause) + g.seeScope(clause, by, nil) + switch comm := clause.Comm.(type) { + case *ast.SendStmt: + g.read(comm.Chan, by) + g.read(comm.Value, by) + case *ast.ExprStmt: + g.read(astutil.Unparen(comm.X).(*ast.UnaryExpr).X, by) + case *ast.AssignStmt: + for _, lhs := range comm.Lhs { + g.write(lhs, by) + } + for _, rhs := range comm.Rhs { + g.read(rhs, by) + } + case nil: default: - lint.ExhaustiveTypeSwitch(instr) + lint.ExhaustiveTypeSwitch(comm) + } + for _, body := range clause.Body { + g.stmt(body, by) + } + } + + case *ast.SendStmt: + g.read(stmt.Chan, by) + g.read(stmt.Value, by) + + case *ast.SwitchStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.read(stmt.Tag, by) + for _, clause_ := range stmt.Body.List { + clause := clause_.(*ast.CaseClause) + g.seeScope(clause, by, nil) + for _, expr := range clause.List { + g.read(expr, by) + } + for _, body := range clause.Body { + g.stmt(body, by) } } + + case *ast.TypeSwitchStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.stmt(stmt.Assign, by) + for _, clause_ := range stmt.Body.List { + clause := clause_.(*ast.CaseClause) + g.seeScope(clause, by, nil) + for _, expr := range clause.List { + g.read(expr, by) + } + for _, body := range clause.Body { + g.stmt(body, by) + } + } + + case *ast.EmptyStmt: + // Nothing to do + + default: + lint.ExhaustiveTypeSwitch(stmt) + } +} + +// embeddedField sees the field declared by the embedded field node, and marks the type as used by the field. +// +// Embedded fields are special in two ways: they don't have names, so we don't have immediate access to an ast.Ident to +// resolve to the field's types.Var and need to instead walk the AST, and we cannot use g.read on the type because +// eventually we do get to an ast.Ident, and ObjectOf resolves embedded fields to the field they declare, not the type. +// That's why we have code specially for handling embedded fields. +func (g *graph) embeddedField(node ast.Node, by types.Object) *types.Var { + // We need to traverse the tree to find the ast.Ident, but all the nodes we traverse should be used by the object we + // get once we resolve the ident. Collect the nodes and process them once we've found the ident. + nodes := make([]ast.Node, 0, 4) + for { + switch node_ := node.(type) { + case *ast.Ident: + // obj is the field + obj := g.info.ObjectOf(node_).(*types.Var) + // the field is declared by the enclosing type + g.see(obj, by) + for _, n := range nodes { + g.read(n, obj) + } + + if tname, ok := g.info.Uses[node_].(*types.TypeName); ok && tname.IsAlias() { + // When embedding an alias we want to use the alias, not what the alias points to. + g.use(tname, obj) + } else { + switch typ := typeutil.Dereference(g.info.TypeOf(node_)).(type) { + case *types.Named: + // (7.2) fields use their types + g.use(typ.Obj(), obj) + case *types.Basic: + // Nothing to do + default: + // Other types are only possible for aliases, which we've already handled + lint.ExhaustiveTypeSwitch(typ) + } + } + return obj + case *ast.StarExpr: + node = node_.X + case *ast.SelectorExpr: + node = node_.Sel + nodes = append(nodes, node_.X) + case *ast.IndexExpr: + node = node_.X + nodes = append(nodes, node_.Index) + case *ast.IndexListExpr: + node = node_.X + default: + lint.ExhaustiveTypeSwitch(node_) + } } } @@ -1815,41 +1461,219 @@ func isNoCopyType(typ types.Type) bool { if !ok { return false } - if named.NumMethods() != 1 { - return false - } - meth := named.Method(0) - if meth.Name() != "Lock" { - return false - } - sig := meth.Type().(*types.Signature) - if sig.Params().Len() != 0 || sig.Results().Len() != 0 { + switch num := named.NumMethods(); num { + case 1, 2: + for i := 0; i < num; i++ { + meth := named.Method(i) + if meth.Name() != "Lock" && meth.Name() != "Unlock" { + return false + } + sig := meth.Type().(*types.Signature) + if sig.Params().Len() != 0 || sig.Results().Len() != 0 { + return false + } + } + default: return false } return true } -func walkPhi(v ir.Value, fn func(v ir.Value)) { - phi, ok := v.(*ir.Phi) - if !ok { - fn(v) +func (g *graph) namedType(typ *types.TypeName, spec ast.Expr) { + // (2.2) named types use the type they're based on + + if st, ok := spec.(*ast.StructType); ok { + // Named structs are special in that its unexported fields are only used if they're being written to. That is, + // the fields are not used by the named type itself, nor are the types of the fields. + for _, field := range st.Fields.List { + seen := map[*types.Struct]struct{}{} + // For `type x struct { *x; F int }`, don't visit the embedded x + seen[g.info.TypeOf(st).(*types.Struct)] = struct{}{} + var hasExportedField func(t types.Type) bool + hasExportedField = func(T types.Type) bool { + t, ok := typeutil.Dereference(T).Underlying().(*types.Struct) + if !ok { + return false + } + if _, ok := seen[t]; ok { + return false + } + seen[t] = struct{}{} + for i := 0; i < t.NumFields(); i++ { + field := t.Field(i) + if field.Exported() { + return true + } + if field.Embedded() && hasExportedField(field.Type()) { + return true + } + } + return false + } + + if len(field.Names) == 0 { + fieldVar := g.embeddedField(field.Type, typ) + if token.IsExported(fieldVar.Name()) && g.opts.ExportedIsUsed { + // (6.2) structs use exported fields + g.use(fieldVar, typ) + } + if g.opts.ExportedIsUsed && g.opts.ExportedFieldsAreUsed && hasExportedField(fieldVar.Type()) { + // (6.5) structs use embedded structs that have exported fields (recursively) + g.use(fieldVar, typ) + } + } else { + for _, name := range field.Names { + obj := g.info.ObjectOf(name) + g.see(obj, typ) + // (7.2) fields use their types + // + // This handles aliases correctly because ObjectOf(alias) returns the TypeName of the alias, not + // what the alias points to. + g.read(field.Type, obj) + if name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, typ) + } else if token.IsExported(name.Name) && g.opts.ExportedIsUsed { + // (6.2) structs use exported fields + g.use(obj, typ) + } + + if isNoCopyType(obj.Type()) { + // (6.1) structs use fields of type NoCopy sentinel + g.use(obj, typ) + } + } + } + + } + } else { + g.read(spec, typ) + } +} + +func (g *SerializedGraph) color(rootID NodeID, states []nodeState) { + root := g.nodes[rootID] + if states[rootID].seen() { return } + states[rootID] |= nodeStateSeen + for _, n := range root.uses { + g.color(n, states) + } +} - seen := map[ir.Value]struct{}{} - var impl func(v *ir.Phi) - impl = func(v *ir.Phi) { - if _, ok := seen[v]; ok { - return +type Object struct { + Name string + ShortName string + // OPT(dh): use an enum for the kind + Kind string + Path ObjectPath + Position token.Position + DisplayPosition token.Position +} + +func (g *SerializedGraph) Results() Result { + // XXX objectpath does not return paths for unexported objects, which means that if we analyze the same code twice + // (e.g. normal and test variant), then some objects will appear multiple times, but may not be used identically. we + // have to deduplicate based on the token.Position. Actually we have to do that, anyway, because we may flag types + // local to functions. Those are probably always both used or both unused, but we don't want to flag them twice, + // either. + // + // Note, however, that we still need objectpaths to deduplicate exported identifiers when analyzing independent + // packages in whole-program mode, because if package A uses an object from package B, B will have been imported + // from export data, and we will not have column information. + // + // XXX ^ document that design requirement. + + states := g.colorAndQuieten() + + var res Result + // OPT(dh): can we find meaningful initial capacities for the used and unused slices? + for _, n := range g.nodes[1:] { + state := states[n.id] + if state.seen() { + res.Used = append(res.Used, n.obj) + } else if state.quiet() { + res.Quiet = append(res.Quiet, n.obj) + } else { + res.Unused = append(res.Unused, n.obj) } - seen[v] = struct{}{} - for _, e := range v.Edges { - if ev, ok := e.(*ir.Phi); ok { - impl(ev) - } else { - fn(e) + } + + return res +} + +func (g *SerializedGraph) colorAndQuieten() []nodeState { + states := make([]nodeState, len(g.nodes)+1) + g.color(0, states) + + var quieten func(id NodeID) + quieten = func(id NodeID) { + states[id] |= nodeStateQuiet + for _, owned := range g.nodes[id].owns { + quieten(owned) + } + } + + for _, n := range g.nodes { + if states[n.id].seen() { + continue + } + for _, owned := range n.owns { + quieten(owned) + } + } + + return states +} + +// Dot formats a graph in Graphviz dot format. +func (g *SerializedGraph) Dot() string { + b := &strings.Builder{} + states := g.colorAndQuieten() + // Note: We use addresses in our node names. This only works as long as Go's garbage collector doesn't move + // memory around in the middle of our debug printing. + debugNode := func(n Node) { + if n.id == 0 { + fmt.Fprintf(b, "n%d [label=\"Root\"];\n", n.id) + } else { + color := "red" + if states[n.id].seen() { + color = "green" + } else if states[n.id].quiet() { + color = "grey" } + label := fmt.Sprintf("%s %s\n%s", n.obj.Kind, n.obj.Name, n.obj.Position) + fmt.Fprintf(b, "n%d [label=%q, color=%q];\n", n.id, label, color) } + for _, e := range n.uses { + fmt.Fprintf(b, "n%d -> n%d;\n", n.id, e) + } + + for _, owned := range n.owns { + fmt.Fprintf(b, "n%d -> n%d [style=dashed];\n", n.id, owned) + } + } + + fmt.Fprintf(b, "digraph{\n") + for _, v := range g.nodes { + debugNode(v) } - impl(phi) + + fmt.Fprintf(b, "}\n") + + return b.String() +} + +func Graph(fset *token.FileSet, + files []*ast.File, + pkg *types.Package, + info *types.Info, + directives []lint.Directive, + generated map[string]generated.Generator, + opts Options, +) []Node { + g := newGraph(fset, files, pkg, info, directives, generated, opts) + g.entry() + return g.nodes } diff --git a/vendor/modules.txt b/vendor/modules.txt index c08d551a36..e9c213f615 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,206 +1,241 @@ -# 4d63.com/gochecknoglobals v0.1.0 +# 4d63.com/gocheckcompilerdirectives v1.2.1 +## explicit; go 1.19 +4d63.com/gocheckcompilerdirectives/checkcompilerdirectives +# 4d63.com/gochecknoglobals v0.2.1 ## explicit; go 1.15 4d63.com/gochecknoglobals/checknoglobals -# cloud.google.com/go v0.110.2 -## explicit; go 1.19 -cloud.google.com/go/internal -cloud.google.com/go/internal/optional -cloud.google.com/go/internal/trace -cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.20.1 -## explicit; go 1.19 -cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.3 -## explicit; go 1.19 -cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.13.0 +# github.com/4meepo/tagalign v1.3.3 ## explicit; go 1.19 -cloud.google.com/go/iam -cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.29.0 -## explicit; go 1.19 -cloud.google.com/go/storage -cloud.google.com/go/storage/internal -cloud.google.com/go/storage/internal/apiv2 -cloud.google.com/go/storage/internal/apiv2/stubs -# github.com/BurntSushi/toml v1.2.1 +github.com/4meepo/tagalign +# github.com/Abirdcfly/dupword v0.0.14 +## explicit; go 1.20 +github.com/Abirdcfly/dupword +# github.com/Antonboom/errname v0.1.12 +## explicit; go 1.20 +github.com/Antonboom/errname/pkg/analyzer +# github.com/Antonboom/nilnil v0.1.7 +## explicit; go 1.20 +github.com/Antonboom/nilnil/pkg/analyzer +# github.com/Antonboom/testifylint v1.2.0 +## explicit; go 1.20 +github.com/Antonboom/testifylint/analyzer +github.com/Antonboom/testifylint/internal/analysisutil +github.com/Antonboom/testifylint/internal/checkers +github.com/Antonboom/testifylint/internal/config +github.com/Antonboom/testifylint/internal/testify +# github.com/BurntSushi/toml v1.3.2 ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal # github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 ## explicit; go 1.13 github.com/Djarvur/go-err113 -# github.com/Masterminds/goutils v1.1.1 -## explicit -github.com/Masterminds/goutils +# github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 +## explicit; go 1.21 +github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer +github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment +github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern +github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure # github.com/Masterminds/semver v1.5.0 ## explicit github.com/Masterminds/semver -# github.com/Masterminds/sprig v2.22.0+incompatible -## explicit -github.com/Masterminds/sprig -# github.com/OpenPeeDeeP/depguard v1.1.1 -## explicit; go 1.13 -github.com/OpenPeeDeeP/depguard +# github.com/OpenPeeDeeP/depguard/v2 v2.2.0 +## explicit; go 1.20 +github.com/OpenPeeDeeP/depguard/v2 +github.com/OpenPeeDeeP/depguard/v2/internal/utils +# github.com/ProtonMail/go-crypto v1.1.0-alpha.0 +## explicit; go 1.17 +github.com/ProtonMail/go-crypto/bitcurves +github.com/ProtonMail/go-crypto/brainpool +github.com/ProtonMail/go-crypto/eax +github.com/ProtonMail/go-crypto/internal/byteutil +github.com/ProtonMail/go-crypto/ocb +github.com/ProtonMail/go-crypto/openpgp +github.com/ProtonMail/go-crypto/openpgp/aes/keywrap +github.com/ProtonMail/go-crypto/openpgp/armor +github.com/ProtonMail/go-crypto/openpgp/ecdh +github.com/ProtonMail/go-crypto/openpgp/ecdsa +github.com/ProtonMail/go-crypto/openpgp/ed25519 +github.com/ProtonMail/go-crypto/openpgp/ed448 +github.com/ProtonMail/go-crypto/openpgp/eddsa +github.com/ProtonMail/go-crypto/openpgp/elgamal +github.com/ProtonMail/go-crypto/openpgp/errors +github.com/ProtonMail/go-crypto/openpgp/internal/algorithm +github.com/ProtonMail/go-crypto/openpgp/internal/ecc +github.com/ProtonMail/go-crypto/openpgp/internal/encoding +github.com/ProtonMail/go-crypto/openpgp/packet +github.com/ProtonMail/go-crypto/openpgp/s2k +github.com/ProtonMail/go-crypto/openpgp/x25519 +github.com/ProtonMail/go-crypto/openpgp/x448 # github.com/agext/levenshtein v1.2.2 ## explicit github.com/agext/levenshtein +# github.com/alecthomas/go-check-sumtype v0.1.4 +## explicit; go 1.18 +github.com/alecthomas/go-check-sumtype +# github.com/alexkohler/nakedret/v2 v2.0.4 +## explicit; go 1.21 +github.com/alexkohler/nakedret/v2 # github.com/alexkohler/prealloc v1.0.0 ## explicit; go 1.15 github.com/alexkohler/prealloc/pkg -# github.com/apparentlymart/go-cidr v1.1.0 -## explicit -github.com/apparentlymart/go-cidr/cidr -# github.com/apparentlymart/go-textseg/v12 v12.0.0 -## explicit; go 1.14 -github.com/apparentlymart/go-textseg/v12/textseg -# github.com/apparentlymart/go-textseg/v13 v13.0.0 +# github.com/alingse/asasalint v0.0.11 +## explicit; go 1.18 +github.com/alingse/asasalint +# github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 -github.com/apparentlymart/go-textseg/v13/textseg -# github.com/armon/go-radix v1.0.0 -## explicit -github.com/armon/go-radix -# github.com/ashanbrown/forbidigo v1.3.0 -## explicit; go 1.12 +github.com/apparentlymart/go-textseg/v15/textseg +# github.com/ashanbrown/forbidigo v1.6.0 +## explicit; go 1.13 github.com/ashanbrown/forbidigo/forbidigo # github.com/ashanbrown/makezero v1.1.1 ## explicit; go 1.12 github.com/ashanbrown/makezero/makezero -# github.com/aws/aws-sdk-go v1.44.122 -## explicit; go 1.11 -github.com/aws/aws-sdk-go/aws -github.com/aws/aws-sdk-go/aws/arn -github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/aws/awsutil -github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/aws/client/metadata -github.com/aws/aws-sdk-go/aws/corehandlers -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/aws/credentials/processcreds -github.com/aws/aws-sdk-go/aws/credentials/ssocreds -github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/csm -github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/aws/signer/v4 -github.com/aws/aws-sdk-go/internal/context -github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/s3shared -github.com/aws/aws-sdk-go/internal/s3shared/arn -github.com/aws/aws-sdk-go/internal/s3shared/s3err -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/sdkmath -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/internal/sdkuri -github.com/aws/aws-sdk-go/internal/shareddefaults -github.com/aws/aws-sdk-go/internal/strings -github.com/aws/aws-sdk-go/internal/sync/singleflight -github.com/aws/aws-sdk-go/private/checksum -github.com/aws/aws-sdk-go/private/protocol -github.com/aws/aws-sdk-go/private/protocol/eventstream -github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil -github.com/aws/aws-sdk-go/private/protocol/jsonrpc -github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/private/protocol/query/queryutil -github.com/aws/aws-sdk-go/private/protocol/rest -github.com/aws/aws-sdk-go/private/protocol/restjson -github.com/aws/aws-sdk-go/private/protocol/restxml -github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/service/s3 -github.com/aws/aws-sdk-go/service/sso -github.com/aws/aws-sdk-go/service/sso/ssoiface -github.com/aws/aws-sdk-go/service/sts -github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d -## explicit -github.com/bgentry/go-netrc/netrc -# github.com/bgentry/speakeasy v0.1.0 -## explicit -github.com/bgentry/speakeasy -# github.com/bkielbasa/cyclop v1.2.0 -## explicit; go 1.15 +# github.com/bkielbasa/cyclop v1.2.1 +## explicit; go 1.20 github.com/bkielbasa/cyclop/pkg/analyzer -# github.com/bombsimon/wsl/v3 v3.3.0 -## explicit; go 1.12 -github.com/bombsimon/wsl/v3 +# github.com/blizzy78/varnamelen v0.8.0 +## explicit; go 1.16 +github.com/blizzy78/varnamelen +# github.com/bombsimon/wsl/v4 v4.2.1 +## explicit; go 1.21 +github.com/bombsimon/wsl/v4 +# github.com/breml/bidichk v0.2.7 +## explicit; go 1.20 +github.com/breml/bidichk/pkg/bidichk +# github.com/breml/errchkjson v0.3.6 +## explicit; go 1.20 +github.com/breml/errchkjson +# github.com/butuzov/ireturn v0.3.0 +## explicit; go 1.18 +github.com/butuzov/ireturn/analyzer +github.com/butuzov/ireturn/analyzer/internal/config +github.com/butuzov/ireturn/analyzer/internal/types +# github.com/butuzov/mirror v1.1.0 +## explicit; go 1.19 +github.com/butuzov/mirror +github.com/butuzov/mirror/internal/checker +# github.com/catenacyber/perfsprint v0.7.1 +## explicit; go 1.20 +github.com/catenacyber/perfsprint/analyzer +# github.com/ccojocar/zxcvbn-go v1.0.2 +## explicit; go 1.20 +github.com/ccojocar/zxcvbn-go +github.com/ccojocar/zxcvbn-go/adjacency +github.com/ccojocar/zxcvbn-go/data +github.com/ccojocar/zxcvbn-go/entropy +github.com/ccojocar/zxcvbn-go/frequency +github.com/ccojocar/zxcvbn-go/match +github.com/ccojocar/zxcvbn-go/matching +github.com/ccojocar/zxcvbn-go/scoring +github.com/ccojocar/zxcvbn-go/utils/math # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/charithe/durationcheck v0.0.9 +# github.com/charithe/durationcheck v0.0.10 ## explicit; go 1.14 github.com/charithe/durationcheck -# github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 +# github.com/chavacava/garif v0.1.0 ## explicit; go 1.16 github.com/chavacava/garif +# github.com/ckaznocha/intrange v0.1.0 +## explicit; go 1.21 +github.com/ckaznocha/intrange # github.com/client9/misspell v0.3.4 ## explicit github.com/client9/misspell github.com/client9/misspell/cmd/misspell -# github.com/daixiang0/gci v0.2.9 -## explicit; go 1.14 +# github.com/cloudflare/circl v1.3.7 +## explicit; go 1.19 +github.com/cloudflare/circl/dh/x25519 +github.com/cloudflare/circl/dh/x448 +github.com/cloudflare/circl/ecc/goldilocks +github.com/cloudflare/circl/internal/conv +github.com/cloudflare/circl/internal/sha3 +github.com/cloudflare/circl/math +github.com/cloudflare/circl/math/fp25519 +github.com/cloudflare/circl/math/fp448 +github.com/cloudflare/circl/math/mlsbset +github.com/cloudflare/circl/sign +github.com/cloudflare/circl/sign/ed25519 +github.com/cloudflare/circl/sign/ed448 +# github.com/curioswitch/go-reassign v0.2.0 +## explicit; go 1.18 +github.com/curioswitch/go-reassign +github.com/curioswitch/go-reassign/internal/analyzer +# github.com/daixiang0/gci v0.12.3 +## explicit; go 1.18 +github.com/daixiang0/gci/pkg/config +github.com/daixiang0/gci/pkg/format github.com/daixiang0/gci/pkg/gci +github.com/daixiang0/gci/pkg/io +github.com/daixiang0/gci/pkg/log +github.com/daixiang0/gci/pkg/parse +github.com/daixiang0/gci/pkg/section +github.com/daixiang0/gci/pkg/specificity +github.com/daixiang0/gci/pkg/utils # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/denis-tingajkin/go-header v0.4.2 -## explicit; go 1.15 -github.com/denis-tingajkin/go-header -# github.com/esimonov/ifshort v1.0.4 -## explicit; go 1.17 -github.com/esimonov/ifshort/pkg/analyzer -# github.com/ettle/strcase v0.1.1 +# github.com/denis-tingaikin/go-header v0.5.0 +## explicit; go 1.21 +github.com/denis-tingaikin/go-header +# github.com/ettle/strcase v0.2.0 ## explicit; go 1.12 github.com/ettle/strcase -# github.com/fatih/color v1.13.0 -## explicit; go 1.13 +# github.com/fatih/color v1.16.0 +## explicit; go 1.17 github.com/fatih/color # github.com/fatih/structtag v1.2.0 ## explicit; go 1.12 github.com/fatih/structtag +# github.com/firefart/nonamedreturns v1.0.4 +## explicit; go 1.18 +github.com/firefart/nonamedreturns/analyzer # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify # github.com/fzipp/gocyclo v0.6.0 ## explicit; go 1.18 github.com/fzipp/gocyclo -# github.com/go-critic/go-critic v0.6.5 -## explicit; go 1.16 +# github.com/ghostiam/protogetter v0.3.5 +## explicit; go 1.19 +github.com/ghostiam/protogetter +# github.com/go-critic/go-critic v0.11.2 +## explicit; go 1.18 github.com/go-critic/go-critic/checkers github.com/go-critic/go-critic/checkers/internal/astwalk github.com/go-critic/go-critic/checkers/internal/lintutil github.com/go-critic/go-critic/checkers/rulesdata -github.com/go-critic/go-critic/framework/linter -# github.com/go-toolsmith/astcast v1.0.0 -## explicit +github.com/go-critic/go-critic/linter +# github.com/go-toolsmith/astcast v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/astcast -# github.com/go-toolsmith/astcopy v1.0.2 +# github.com/go-toolsmith/astcopy v1.1.0 ## explicit; go 1.16 github.com/go-toolsmith/astcopy -# github.com/go-toolsmith/astequal v1.0.3 -## explicit; go 1.16 +# github.com/go-toolsmith/astequal v1.2.0 +## explicit; go 1.18 github.com/go-toolsmith/astequal -# github.com/go-toolsmith/astfmt v1.0.0 -## explicit +# github.com/go-toolsmith/astfmt v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/astfmt -# github.com/go-toolsmith/astp v1.0.0 -## explicit +# github.com/go-toolsmith/astp v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/astp -# github.com/go-toolsmith/strparse v1.0.0 -## explicit +# github.com/go-toolsmith/strparse v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/strparse -# github.com/go-toolsmith/typep v1.0.2 -## explicit +# github.com/go-toolsmith/typep v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/typep -# github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b +# github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 +## explicit; go 1.18 +github.com/go-viper/mapstructure/v2 +# github.com/go-xmlfmt/xmlfmt v1.1.2 ## explicit github.com/go-xmlfmt/xmlfmt # github.com/gobwas/glob v0.2.3 @@ -216,9 +251,6 @@ github.com/gobwas/glob/util/strings # github.com/gofrs/flock v0.8.1 ## explicit github.com/gofrs/flock -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit -github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 github.com/golang/protobuf/jsonpb @@ -228,10 +260,6 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp -# github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 -## explicit -github.com/golangci/check/cmd/structcheck -github.com/golangci/check/cmd/varcheck # github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a ## explicit github.com/golangci/dupl @@ -240,17 +268,13 @@ github.com/golangci/dupl/printer github.com/golangci/dupl/suffixtree github.com/golangci/dupl/syntax github.com/golangci/dupl/syntax/golang -# github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe -## explicit; go 1.17 -github.com/golangci/go-misc/deadcode -# github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 -## explicit; go 1.18 +# github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e +## explicit; go 1.20 github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/gofmt/internal/diff -github.com/golangci/gofmt/gofmt/internal/execabs github.com/golangci/gofmt/goimports -# github.com/golangci/golangci-lint v1.41.1 -## explicit; go 1.15 +# github.com/golangci/golangci-lint v1.57.1 +## explicit; go 1.21 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache github.com/golangci/golangci-lint/internal/errorutil @@ -258,6 +282,7 @@ github.com/golangci/golangci-lint/internal/pkgcache github.com/golangci/golangci-lint/internal/renameio github.com/golangci/golangci-lint/internal/robustio github.com/golangci/golangci-lint/pkg/commands +github.com/golangci/golangci-lint/pkg/commands/internal github.com/golangci/golangci-lint/pkg/config github.com/golangci/golangci-lint/pkg/exitcodes github.com/golangci/golangci-lint/pkg/fsutils @@ -275,22 +300,18 @@ github.com/golangci/golangci-lint/pkg/printers github.com/golangci/golangci-lint/pkg/report github.com/golangci/golangci-lint/pkg/result github.com/golangci/golangci-lint/pkg/result/processors -github.com/golangci/golangci-lint/pkg/sliceutil github.com/golangci/golangci-lint/pkg/timeutils -# github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 -## explicit -github.com/golangci/lint-1 -# github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca -## explicit -github.com/golangci/maligned -# github.com/golangci/misspell v0.3.5 -## explicit +# github.com/golangci/misspell v0.4.1 +## explicit; go 1.19 github.com/golangci/misspell -# github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 -## explicit; go 1.17 +# github.com/golangci/plugin-module-register v0.1.1 +## explicit; go 1.21 +github.com/golangci/plugin-module-register/register +# github.com/golangci/revgrep v0.5.2 +## explicit; go 1.19 github.com/golangci/revgrep -# github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 -## explicit +# github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed +## explicit; go 1.20 github.com/golangci/unconvert # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 @@ -305,42 +326,10 @@ github.com/google/go-github/v57/github # github.com/google/go-querystring v1.1.0 ## explicit; go 1.10 github.com/google/go-querystring/query -# github.com/google/s2a-go v0.1.4 -## explicit; go 1.16 -github.com/google/s2a-go -github.com/google/s2a-go/fallback -github.com/google/s2a-go/internal/authinfo -github.com/google/s2a-go/internal/handshaker -github.com/google/s2a-go/internal/handshaker/service -github.com/google/s2a-go/internal/proto/common_go_proto -github.com/google/s2a-go/internal/proto/s2a_context_go_proto -github.com/google/s2a-go/internal/proto/s2a_go_proto -github.com/google/s2a-go/internal/proto/v2/common_go_proto -github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto -github.com/google/s2a-go/internal/proto/v2/s2a_go_proto -github.com/google/s2a-go/internal/record -github.com/google/s2a-go/internal/record/internal/aeadcrypter -github.com/google/s2a-go/internal/record/internal/halfconn -github.com/google/s2a-go/internal/tokenmanager -github.com/google/s2a-go/internal/v2 -github.com/google/s2a-go/internal/v2/certverifier -github.com/google/s2a-go/internal/v2/remotesigner -github.com/google/s2a-go/internal/v2/tlsconfigstore -github.com/google/s2a-go/stream -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.3 -## explicit; go 1.19 -github.com/googleapis/enterprise-certificate-proxy/client -github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.11.0 -## explicit; go 1.19 -github.com/googleapis/gax-go/v2 -github.com/googleapis/gax-go/v2/apierror -github.com/googleapis/gax-go/v2/apierror/internal/proto -github.com/googleapis/gax-go/v2/internal -# github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 +# github.com/gordonklaus/ineffassign v0.1.0 ## explicit; go 1.14 github.com/gordonklaus/ineffassign/pkg/ineffassign # github.com/gostaticanalysis/analysisutil v0.7.1 @@ -356,8 +345,6 @@ github.com/gostaticanalysis/forcetypeassert # github.com/gostaticanalysis/nilerr v0.1.1 ## explicit; go 1.15 github.com/gostaticanalysis/nilerr -# github.com/gostaticanalysis/testutil v0.4.0 -## explicit; go 1.16 # github.com/hashicorp/errwrap v1.0.0 ## explicit github.com/hashicorp/errwrap @@ -367,29 +354,49 @@ github.com/hashicorp/go-checkpoint # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-getter v1.7.0 -## explicit; go 1.13 -github.com/hashicorp/go-getter -github.com/hashicorp/go-getter/helper/url -# github.com/hashicorp/go-hclog v1.2.0 +# github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 +## explicit; go 1.12 +github.com/hashicorp/go-cty/cty +github.com/hashicorp/go-cty/cty/convert +github.com/hashicorp/go-cty/cty/gocty +github.com/hashicorp/go-cty/cty/json +github.com/hashicorp/go-cty/cty/msgpack +github.com/hashicorp/go-cty/cty/set +# github.com/hashicorp/go-hclog v1.5.0 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.3.0 -## explicit; go 1.13 +# github.com/hashicorp/go-plugin v1.6.0 +## explicit; go 1.17 github.com/hashicorp/go-plugin +github.com/hashicorp/go-plugin/internal/cmdrunner +github.com/hashicorp/go-plugin/internal/grpcmux github.com/hashicorp/go-plugin/internal/plugin -# github.com/hashicorp/go-safetemp v1.0.0 -## explicit -github.com/hashicorp/go-safetemp -# github.com/hashicorp/go-uuid v1.0.1 +github.com/hashicorp/go-plugin/runner +# github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version +# github.com/hashicorp/hc-install v0.6.3 +## explicit; go 1.18 +github.com/hashicorp/hc-install +github.com/hashicorp/hc-install/checkpoint +github.com/hashicorp/hc-install/errors +github.com/hashicorp/hc-install/fs +github.com/hashicorp/hc-install/internal/build +github.com/hashicorp/hc-install/internal/httpclient +github.com/hashicorp/hc-install/internal/pubkey +github.com/hashicorp/hc-install/internal/releasesjson +github.com/hashicorp/hc-install/internal/src +github.com/hashicorp/hc-install/internal/validators +github.com/hashicorp/hc-install/product +github.com/hashicorp/hc-install/releases +github.com/hashicorp/hc-install/src +github.com/hashicorp/hc-install/version # github.com/hashicorp/hcl v1.0.0 ## explicit github.com/hashicorp/hcl @@ -402,102 +409,90 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/hcl/v2 v2.8.2 -## explicit; go 1.12 +# github.com/hashicorp/hcl/v2 v2.19.1 +## explicit; go 1.18 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode -github.com/hashicorp/hcl/v2/ext/dynblock -github.com/hashicorp/hcl/v2/ext/typeexpr -github.com/hashicorp/hcl/v2/gohcl -github.com/hashicorp/hcl/v2/hcldec -github.com/hashicorp/hcl/v2/hcled -github.com/hashicorp/hcl/v2/hclparse github.com/hashicorp/hcl/v2/hclsyntax -github.com/hashicorp/hcl/v2/hclwrite -github.com/hashicorp/hcl/v2/json # github.com/hashicorp/logutils v1.0.0 ## explicit github.com/hashicorp/logutils -# github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 -## explicit -github.com/hashicorp/terraform-config-inspect/tfconfig -# github.com/hashicorp/terraform-exec v0.13.3 -## explicit; go 1.14 +# github.com/hashicorp/terraform-exec v0.20.0 +## explicit; go 1.18 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec -github.com/hashicorp/terraform-exec/tfinstall -# github.com/hashicorp/terraform-json v0.10.0 -## explicit; go 1.13 +# github.com/hashicorp/terraform-json v0.21.0 +## explicit; go 1.18 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-sdk v1.17.2 -## explicit; go 1.12 -github.com/hashicorp/terraform-plugin-sdk/acctest -github.com/hashicorp/terraform-plugin-sdk/helper/acctest -github.com/hashicorp/terraform-plugin-sdk/helper/customdiff -github.com/hashicorp/terraform-plugin-sdk/helper/hashcode -github.com/hashicorp/terraform-plugin-sdk/helper/logging -github.com/hashicorp/terraform-plugin-sdk/helper/resource -github.com/hashicorp/terraform-plugin-sdk/helper/schema -github.com/hashicorp/terraform-plugin-sdk/helper/structure -github.com/hashicorp/terraform-plugin-sdk/helper/validation -github.com/hashicorp/terraform-plugin-sdk/httpclient -github.com/hashicorp/terraform-plugin-sdk/internal/addrs -github.com/hashicorp/terraform-plugin-sdk/internal/command/format -github.com/hashicorp/terraform-plugin-sdk/internal/configs -github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload -github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema -github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim -github.com/hashicorp/terraform-plugin-sdk/internal/dag -github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig -github.com/hashicorp/terraform-plugin-sdk/internal/flatmap -github.com/hashicorp/terraform-plugin-sdk/internal/helper/config -github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean -github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin -github.com/hashicorp/terraform-plugin-sdk/internal/httpclient -github.com/hashicorp/terraform-plugin-sdk/internal/initwd -github.com/hashicorp/terraform-plugin-sdk/internal/lang -github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr -github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs -github.com/hashicorp/terraform-plugin-sdk/internal/modsdir -github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps -github.com/hashicorp/terraform-plugin-sdk/internal/plans -github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange -github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert -github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery -github.com/hashicorp/terraform-plugin-sdk/internal/providers -github.com/hashicorp/terraform-plugin-sdk/internal/provisioners -github.com/hashicorp/terraform-plugin-sdk/internal/registry -github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc -github.com/hashicorp/terraform-plugin-sdk/internal/registry/response -github.com/hashicorp/terraform-plugin-sdk/internal/states -github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile -github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags -github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5 -github.com/hashicorp/terraform-plugin-sdk/internal/version -github.com/hashicorp/terraform-plugin-sdk/meta -github.com/hashicorp/terraform-plugin-sdk/plugin -github.com/hashicorp/terraform-plugin-sdk/terraform -# github.com/hashicorp/terraform-plugin-test/v2 v2.2.1 -## explicit; go 1.12 -github.com/hashicorp/terraform-plugin-test/v2 -# github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 -## explicit; go 1.12 +# github.com/hashicorp/terraform-plugin-go v0.22.0 +## explicit; go 1.21 +github.com/hashicorp/terraform-plugin-go/internal/logging +github.com/hashicorp/terraform-plugin-go/tfprotov5 +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto +github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server +github.com/hashicorp/terraform-plugin-go/tfprotov6 +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/funcerr +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tf6serverlogging +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6 +github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto +github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server +github.com/hashicorp/terraform-plugin-go/tftypes +# github.com/hashicorp/terraform-plugin-log v0.9.0 +## explicit; go 1.19 +github.com/hashicorp/terraform-plugin-log/internal/fieldutils +github.com/hashicorp/terraform-plugin-log/internal/hclogutils +github.com/hashicorp/terraform-plugin-log/internal/logging +github.com/hashicorp/terraform-plugin-log/tflog +github.com/hashicorp/terraform-plugin-log/tfsdklog +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 +## explicit; go 1.21 +github.com/hashicorp/terraform-plugin-sdk/v2/diag +github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest +github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff +github.com/hashicorp/terraform-plugin-sdk/v2/helper/id +github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging +github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource +github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry +github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema +github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure +github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation +github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs +github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema +github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim +github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode +github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest +github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags +github.com/hashicorp/terraform-plugin-sdk/v2/meta +github.com/hashicorp/terraform-plugin-sdk/v2/plugin +github.com/hashicorp/terraform-plugin-sdk/v2/terraform +# github.com/hashicorp/terraform-registry-address v0.2.3 +## explicit; go 1.19 +github.com/hashicorp/terraform-registry-address +# github.com/hashicorp/terraform-svchost v0.1.1 +## explicit; go 1.19 github.com/hashicorp/terraform-svchost -github.com/hashicorp/terraform-svchost/auth -github.com/hashicorp/terraform-svchost/disco -# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d -## explicit +# github.com/hashicorp/yamux v0.1.1 +## explicit; go 1.15 github.com/hashicorp/yamux -# github.com/huandu/xstrings v1.3.2 -## explicit; go 1.12 -github.com/huandu/xstrings -# github.com/imdario/mergo v0.3.12 -## explicit; go 1.13 -github.com/imdario/mergo -# github.com/inconshreveable/mousetrap v1.0.1 +# github.com/hexops/gotextdiff v1.0.3 +## explicit; go 1.16 +github.com/hexops/gotextdiff +github.com/hexops/gotextdiff/myers +github.com/hexops/gotextdiff/span +# github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jgautheron/goconst v1.5.1 +# github.com/jgautheron/goconst v1.7.0 ## explicit; go 1.13 github.com/jgautheron/goconst # github.com/jingyugao/rowserrcheck v1.1.1 @@ -506,56 +501,66 @@ github.com/jingyugao/rowserrcheck/passes/rowserr # github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af ## explicit; go 1.13 github.com/jirfag/go-printf-func-name/pkg/analyzer -# github.com/jmespath/go-jmespath v0.4.0 -## explicit; go 1.14 -github.com/jmespath/go-jmespath +# github.com/jjti/go-spancheck v0.5.3 +## explicit; go 1.20 +github.com/jjti/go-spancheck # github.com/julz/importas v0.1.0 ## explicit; go 1.15 github.com/julz/importas -# github.com/kisielk/errcheck v1.6.2 -## explicit; go 1.14 +# github.com/karamaru-alpha/copyloopvar v1.0.8 +## explicit; go 1.21 +github.com/karamaru-alpha/copyloopvar +# github.com/kisielk/errcheck v1.7.0 +## explicit; go 1.18 github.com/kisielk/errcheck/errcheck -# github.com/kisielk/gotool v1.0.0 -## explicit -github.com/kisielk/gotool -github.com/kisielk/gotool/internal/load -# github.com/klauspost/compress v1.15.11 -## explicit; go 1.17 -github.com/klauspost/compress -github.com/klauspost/compress/fse -github.com/klauspost/compress/huff0 -github.com/klauspost/compress/internal/cpuinfo -github.com/klauspost/compress/internal/snapref -github.com/klauspost/compress/zstd -github.com/klauspost/compress/zstd/internal/xxhash +# github.com/kkHAIKE/contextcheck v1.1.4 +## explicit; go 1.20 +github.com/kkHAIKE/contextcheck # github.com/kulti/thelper v0.6.3 ## explicit; go 1.18 github.com/kulti/thelper/pkg/analyzer -# github.com/kunwardeep/paralleltest v1.0.6 +# github.com/kunwardeep/paralleltest v1.0.10 ## explicit; go 1.17 github.com/kunwardeep/paralleltest/pkg/paralleltest -# github.com/kyoh86/exportloopref v0.1.8 -## explicit; go 1.14 +# github.com/kyoh86/exportloopref v0.1.11 +## explicit; go 1.18 github.com/kyoh86/exportloopref # github.com/ldez/gomoddirectives v0.2.3 ## explicit; go 1.16 github.com/ldez/gomoddirectives -# github.com/ldez/tagliatelle v0.3.1 -## explicit; go 1.16 +# github.com/ldez/tagliatelle v0.5.0 +## explicit; go 1.19 github.com/ldez/tagliatelle +# github.com/leonklingele/grouper v1.1.1 +## explicit; go 1.17 +github.com/leonklingele/grouper/pkg/analyzer +github.com/leonklingele/grouper/pkg/analyzer/consts +github.com/leonklingele/grouper/pkg/analyzer/globals +github.com/leonklingele/grouper/pkg/analyzer/imports +github.com/leonklingele/grouper/pkg/analyzer/types +github.com/leonklingele/grouper/pkg/analyzer/vars +# github.com/lufeee/execinquery v1.2.1 +## explicit; go 1.17 +github.com/lufeee/execinquery +# github.com/macabu/inamedparam v0.1.3 +## explicit; go 1.20 +github.com/macabu/inamedparam # github.com/magiconair/properties v1.8.6 ## explicit; go 1.13 github.com/magiconair/properties -# github.com/maratori/testpackage v1.1.0 -## explicit; go 1.18 +# github.com/maratori/testableexamples v1.0.0 +## explicit; go 1.19 +github.com/maratori/testableexamples/pkg/testableexamples +# github.com/maratori/testpackage v1.1.1 +## explicit; go 1.20 github.com/maratori/testpackage/pkg/testpackage -# github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 +# github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 ## explicit; go 1.13 github.com/matoous/godox # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.16 +# github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.9 @@ -564,27 +569,16 @@ github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.1 ## explicit github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/mbilski/exhaustivestruct v1.2.0 -## explicit; go 1.15 -github.com/mbilski/exhaustivestruct/pkg/analyzer -# github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 -## explicit -github.com/mgechev/dots -# github.com/mgechev/revive v1.2.4 -## explicit; go 1.19 +# github.com/mgechev/revive v1.3.7 +## explicit; go 1.20 github.com/mgechev/revive/config github.com/mgechev/revive/formatter +github.com/mgechev/revive/internal/ifelse github.com/mgechev/revive/internal/typeparams github.com/mgechev/revive/lint github.com/mgechev/revive/rule -# github.com/mitchellh/cli v1.1.2 -## explicit; go 1.11 -github.com/mitchellh/cli -# github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db -## explicit -github.com/mitchellh/colorstring -# github.com/mitchellh/copystructure v1.0.0 -## explicit +# github.com/mitchellh/copystructure v1.2.0 +## explicit; go 1.15 github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit @@ -598,34 +592,37 @@ github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/mitchellh/reflectwalk v1.0.1 +# github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/moricho/tparallel v0.2.1 -## explicit; go 1.15 +# github.com/moricho/tparallel v0.3.1 +## explicit; go 1.20 github.com/moricho/tparallel github.com/moricho/tparallel/pkg/ssafunc github.com/moricho/tparallel/pkg/ssainstr # github.com/nakabonne/nestif v0.3.1 ## explicit; go 1.15 github.com/nakabonne/nestif -# github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 -## explicit; go 1.14 -github.com/nbutton23/zxcvbn-go -github.com/nbutton23/zxcvbn-go/adjacency -github.com/nbutton23/zxcvbn-go/data -github.com/nbutton23/zxcvbn-go/entropy -github.com/nbutton23/zxcvbn-go/frequency -github.com/nbutton23/zxcvbn-go/match -github.com/nbutton23/zxcvbn-go/matching -github.com/nbutton23/zxcvbn-go/scoring -github.com/nbutton23/zxcvbn-go/utils/math -# github.com/nishanths/exhaustive v0.8.3 -## explicit; go 1.14 +# github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e +## explicit; go 1.12 +# github.com/nishanths/exhaustive v0.12.0 +## explicit; go 1.18 github.com/nishanths/exhaustive # github.com/nishanths/predeclared v0.2.2 ## explicit; go 1.14 github.com/nishanths/predeclared/passes/predeclared +# github.com/nunnatsa/ginkgolinter v0.16.1 +## explicit; go 1.21 +github.com/nunnatsa/ginkgolinter +github.com/nunnatsa/ginkgolinter/internal/ginkgohandler +github.com/nunnatsa/ginkgolinter/internal/gomegahandler +github.com/nunnatsa/ginkgolinter/internal/interfaces +github.com/nunnatsa/ginkgolinter/internal/intervals +github.com/nunnatsa/ginkgolinter/internal/reports +github.com/nunnatsa/ginkgolinter/internal/reverseassertion +github.com/nunnatsa/ginkgolinter/linter +github.com/nunnatsa/ginkgolinter/types +github.com/nunnatsa/ginkgolinter/version # github.com/oklog/run v1.0.0 ## explicit github.com/oklog/run @@ -635,23 +632,19 @@ github.com/olekukonko/tablewriter # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml -# github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d -## explicit -github.com/phayes/checkstyle -# github.com/pkg/errors v0.9.1 -## explicit -github.com/pkg/errors +# github.com/pelletier/go-toml/v2 v2.2.0 +## explicit; go 1.16 +github.com/pelletier/go-toml/v2 +github.com/pelletier/go-toml/v2/internal/characters +github.com/pelletier/go-toml/v2/internal/danger +github.com/pelletier/go-toml/v2/internal/tracker +github.com/pelletier/go-toml/v2/unstable # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/polyfloyd/go-errorlint v1.0.5 -## explicit; go 1.13 +# github.com/polyfloyd/go-errorlint v1.4.8 +## explicit; go 1.20 github.com/polyfloyd/go-errorlint/errorlint -# github.com/posener/complete v1.2.3 -## explicit; go 1.13 -github.com/posener/complete -github.com/posener/complete/cmd -github.com/posener/complete/cmd/install # github.com/prometheus/client_golang v1.12.1 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus @@ -670,8 +663,8 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/quasilyte/go-ruleguard v0.3.18 -## explicit; go 1.17 +# github.com/quasilyte/go-ruleguard v0.4.2 +## explicit; go 1.19 github.com/quasilyte/go-ruleguard/internal/goenv github.com/quasilyte/go-ruleguard/internal/golist github.com/quasilyte/go-ruleguard/internal/xsrcimporter @@ -687,30 +680,43 @@ github.com/quasilyte/go-ruleguard/ruleguard/quasigo/stdlib/qstrconv github.com/quasilyte/go-ruleguard/ruleguard/quasigo/stdlib/qstrings github.com/quasilyte/go-ruleguard/ruleguard/textmatch github.com/quasilyte/go-ruleguard/ruleguard/typematch -# github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f +# github.com/quasilyte/gogrep v0.5.0 ## explicit; go 1.16 github.com/quasilyte/gogrep github.com/quasilyte/gogrep/internal/stdinfo github.com/quasilyte/gogrep/nodetag -# github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 +# github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 ## explicit; go 1.14 github.com/quasilyte/regex/syntax # github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 ## explicit; go 1.17 github.com/quasilyte/stdinfo -# github.com/ryancurrah/gomodguard v1.2.4 -## explicit; go 1.16 +# github.com/ryancurrah/gomodguard v1.3.1 +## explicit; go 1.21 github.com/ryancurrah/gomodguard -# github.com/ryanrolds/sqlclosecheck v0.3.0 -## explicit; go 1.13 +# github.com/ryanrolds/sqlclosecheck v0.5.1 +## explicit; go 1.20 github.com/ryanrolds/sqlclosecheck/pkg/analyzer -# github.com/sanposhiho/wastedassign/v2 v2.0.6 +# github.com/sanposhiho/wastedassign/v2 v2.0.7 ## explicit; go 1.14 github.com/sanposhiho/wastedassign/v2 -# github.com/securego/gosec/v2 v2.13.1 +# github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 ## explicit; go 1.19 +github.com/santhosh-tekuri/jsonschema/v5 +github.com/santhosh-tekuri/jsonschema/v5/httploader +# github.com/sashamelentyev/interfacebloat v1.1.0 +## explicit; go 1.18 +github.com/sashamelentyev/interfacebloat/pkg/analyzer +# github.com/sashamelentyev/usestdlibvars v1.25.0 +## explicit; go 1.20 +github.com/sashamelentyev/usestdlibvars/pkg/analyzer +github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping +# github.com/securego/gosec/v2 v2.19.0 +## explicit; go 1.20 github.com/securego/gosec/v2 +github.com/securego/gosec/v2/analyzers github.com/securego/gosec/v2/cwe +github.com/securego/gosec/v2/issue github.com/securego/gosec/v2/rules # github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c ## explicit @@ -723,25 +729,32 @@ github.com/shurcooL/githubv4 github.com/shurcooL/graphql github.com/shurcooL/graphql/ident github.com/shurcooL/graphql/internal/jsonutil -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/sonatard/noctx v0.0.1 -## explicit; go 1.13 +# github.com/sivchari/containedctx v1.0.3 +## explicit; go 1.17 +github.com/sivchari/containedctx +# github.com/sivchari/tenv v1.7.1 +## explicit; go 1.18 +github.com/sivchari/tenv +# github.com/sonatard/noctx v0.0.2 +## explicit; go 1.20 github.com/sonatard/noctx github.com/sonatard/noctx/ngfunc github.com/sonatard/noctx/reqwithoutctx -# github.com/sourcegraph/go-diff v0.6.1 +# github.com/sourcegraph/go-diff v0.7.0 ## explicit; go 1.14 github.com/sourcegraph/go-diff/diff -# github.com/spf13/afero v1.4.1 -## explicit; go 1.13 +# github.com/spf13/afero v1.11.0 +## explicit; go 1.19 github.com/spf13/afero +github.com/spf13/afero/internal/common github.com/spf13/afero/mem # github.com/spf13/cast v1.5.0 ## explicit; go 1.18 github.com/spf13/cast -# github.com/spf13/cobra v1.6.0 +# github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 @@ -750,108 +763,144 @@ github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.7.1 -## explicit; go 1.12 +# github.com/spf13/viper v1.12.0 +## explicit; go 1.17 github.com/spf13/viper +github.com/spf13/viper/internal/encoding +github.com/spf13/viper/internal/encoding/dotenv +github.com/spf13/viper/internal/encoding/hcl +github.com/spf13/viper/internal/encoding/ini +github.com/spf13/viper/internal/encoding/javaproperties +github.com/spf13/viper/internal/encoding/json +github.com/spf13/viper/internal/encoding/toml +github.com/spf13/viper/internal/encoding/yaml # github.com/ssgreg/nlreturn/v2 v2.2.1 ## explicit; go 1.13 github.com/ssgreg/nlreturn/v2/pkg/nlreturn -# github.com/stretchr/objx v0.5.0 -## explicit; go 1.12 -github.com/stretchr/objx -# github.com/stretchr/testify v1.8.4 +# github.com/stbenjam/no-sprintf-host-port v0.1.1 +## explicit; go 1.16 +github.com/stbenjam/no-sprintf-host-port/pkg/analyzer +# github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 +github.com/stretchr/objx +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/mock # github.com/subosito/gotenv v1.4.1 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tdakkota/asciicheck v0.1.1 -## explicit; go 1.13 +# github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c +## explicit +github.com/t-yuki/gocover-cobertura +# github.com/tdakkota/asciicheck v0.2.0 +## explicit; go 1.18 github.com/tdakkota/asciicheck -# github.com/tetafro/godot v1.4.11 -## explicit; go 1.16 +# github.com/tetafro/godot v1.4.16 +## explicit; go 1.20 github.com/tetafro/godot -# github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 +# github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 ## explicit; go 1.12 github.com/timakin/bodyclose/passes/bodyclose -# github.com/tomarrell/wrapcheck/v2 v2.3.0 -## explicit; go 1.16 +# github.com/timonwong/loggercheck v0.9.4 +## explicit; go 1.18 +github.com/timonwong/loggercheck +github.com/timonwong/loggercheck/internal/bytebufferpool +github.com/timonwong/loggercheck/internal/checkers +github.com/timonwong/loggercheck/internal/checkers/printf +github.com/timonwong/loggercheck/internal/rules +github.com/timonwong/loggercheck/internal/sets +github.com/timonwong/loggercheck/internal/stringutil +# github.com/tomarrell/wrapcheck/v2 v2.8.3 +## explicit; go 1.21 github.com/tomarrell/wrapcheck/v2/wrapcheck # github.com/tommy-muehle/go-mnd/v2 v2.5.1 ## explicit; go 1.12 github.com/tommy-muehle/go-mnd/v2 github.com/tommy-muehle/go-mnd/v2/checks github.com/tommy-muehle/go-mnd/v2/config -# github.com/ulikunitz/xz v0.5.10 -## explicit; go 1.12 -github.com/ulikunitz/xz -github.com/ulikunitz/xz/internal/hash -github.com/ulikunitz/xz/internal/xlog -github.com/ulikunitz/xz/lzma -# github.com/ultraware/funlen v0.0.3 -## explicit +# github.com/ultraware/funlen v0.1.0 +## explicit; go 1.20 github.com/ultraware/funlen -# github.com/ultraware/whitespace v0.0.5 -## explicit +# github.com/ultraware/whitespace v0.1.0 +## explicit; go 1.20 github.com/ultraware/whitespace -# github.com/uudashr/gocognit v1.0.6 +# github.com/uudashr/gocognit v1.1.2 ## explicit; go 1.16 github.com/uudashr/gocognit -# github.com/vmihailenco/msgpack/v4 v4.3.12 -## explicit; go 1.11 -github.com/vmihailenco/msgpack/v4 -github.com/vmihailenco/msgpack/v4/codes -# github.com/vmihailenco/tagparser v0.1.1 -## explicit; go 1.13 -github.com/vmihailenco/tagparser -github.com/vmihailenco/tagparser/internal -github.com/vmihailenco/tagparser/internal/parser +# github.com/vmihailenco/msgpack v4.0.4+incompatible +## explicit +github.com/vmihailenco/msgpack +github.com/vmihailenco/msgpack/codes +# github.com/vmihailenco/msgpack/v5 v5.4.1 +## explicit; go 1.19 +github.com/vmihailenco/msgpack/v5 +github.com/vmihailenco/msgpack/v5/msgpcode +# github.com/vmihailenco/tagparser/v2 v2.0.0 +## explicit; go 1.15 +github.com/vmihailenco/tagparser/v2 +github.com/vmihailenco/tagparser/v2/internal +github.com/vmihailenco/tagparser/v2/internal/parser +# github.com/xen0n/gosmopolitan v1.2.2 +## explicit; go 1.19 +github.com/xen0n/gosmopolitan +# github.com/yagipy/maintidx v1.0.0 +## explicit; go 1.17 +github.com/yagipy/maintidx +github.com/yagipy/maintidx/pkg/cyc +github.com/yagipy/maintidx/pkg/halstvol # github.com/yeya24/promlinter v0.2.0 ## explicit; go 1.16 github.com/yeya24/promlinter -# github.com/zclconf/go-cty v1.8.2 -## explicit; go 1.12 +# github.com/ykadowak/zerologlint v0.1.5 +## explicit; go 1.19 +github.com/ykadowak/zerologlint +# github.com/zclconf/go-cty v1.14.2 +## explicit; go 1.18 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert +github.com/zclconf/go-cty/cty/ctystrings github.com/zclconf/go-cty/cty/function github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json -github.com/zclconf/go-cty/cty/msgpack github.com/zclconf/go-cty/cty/set -# github.com/zclconf/go-cty-yaml v1.0.2 -## explicit -github.com/zclconf/go-cty-yaml -# go.opencensus.io v0.24.0 +# gitlab.com/bosi/decorder v0.4.1 +## explicit; go 1.20 +gitlab.com/bosi/decorder +# go-simpler.org/musttag v0.9.0 +## explicit; go 1.20 +go-simpler.org/musttag +# go-simpler.org/sloglint v0.5.0 +## explicit; go 1.20 +go-simpler.org/sloglint +# go.uber.org/atomic v1.7.0 ## explicit; go 1.13 -go.opencensus.io -go.opencensus.io/internal -go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricdata -go.opencensus.io/metric/metricproducer -go.opencensus.io/plugin/ocgrpc -go.opencensus.io/plugin/ochttp -go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/resource -go.opencensus.io/stats -go.opencensus.io/stats/internal -go.opencensus.io/stats/view -go.opencensus.io/tag -go.opencensus.io/trace -go.opencensus.io/trace/internal -go.opencensus.io/trace/propagation -go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.18.0 +go.uber.org/atomic +# go.uber.org/automaxprocs v1.5.3 ## explicit; go 1.18 -golang.org/x/crypto/bcrypt +go.uber.org/automaxprocs/internal/cgroups +go.uber.org/automaxprocs/internal/runtime +go.uber.org/automaxprocs/maxprocs +# go.uber.org/multierr v1.6.0 +## explicit; go 1.12 +go.uber.org/multierr +# go.uber.org/zap v1.24.0 +## explicit; go 1.19 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore +# golang.org/x/crypto v0.21.0 +## explicit; go 1.18 +golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 -golang.org/x/crypto/chacha20poly1305 -golang.org/x/crypto/cryptobyte -golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 @@ -860,29 +909,27 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/nacl/box golang.org/x/crypto/nacl/secretbox -golang.org/x/crypto/openpgp -golang.org/x/crypto/openpgp/armor -golang.org/x/crypto/openpgp/elgamal -golang.org/x/crypto/openpgp/errors -golang.org/x/crypto/openpgp/packet -golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa -golang.org/x/crypto/scrypt +golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 +# golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc +## explicit; go 1.20 +golang.org/x/exp/constraints +golang.org/x/exp/maps +golang.org/x/exp/slices +# golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f ## explicit; go 1.18 golang.org/x/exp/typeparams -# golang.org/x/mod v0.8.0 -## explicit; go 1.17 +# golang.org/x/mod v0.16.0 +## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.22.0 ## explicit; go 1.18 -golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -890,36 +937,31 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.16.0 +# golang.org/x/oauth2 v0.18.0 ## explicit; go 1.18 golang.org/x/oauth2 -golang.org/x/oauth2/authhandler -golang.org/x/oauth2/google -golang.org/x/oauth2/google/internal/externalaccount -golang.org/x/oauth2/google/internal/externalaccountauthorizeduser -golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal -golang.org/x/oauth2/jws -golang.org/x/oauth2/jwt -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.6.0 +## explicit; go 1.18 +golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.14.0 ## explicit; go 1.18 +golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/tools v0.6.0 -## explicit; go 1.18 +# golang.org/x/tools v0.19.0 +## explicit; go 1.19 golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/passes/appends golang.org/x/tools/go/analysis/passes/asmdecl golang.org/x/tools/go/analysis/passes/assign golang.org/x/tools/go/analysis/passes/atomic @@ -932,6 +974,8 @@ golang.org/x/tools/go/analysis/passes/composite golang.org/x/tools/go/analysis/passes/copylock golang.org/x/tools/go/analysis/passes/ctrlflow golang.org/x/tools/go/analysis/passes/deepequalerrors +golang.org/x/tools/go/analysis/passes/defers +golang.org/x/tools/go/analysis/passes/directive golang.org/x/tools/go/analysis/passes/errorsas golang.org/x/tools/go/analysis/passes/fieldalignment golang.org/x/tools/go/analysis/passes/findcall @@ -950,12 +994,14 @@ golang.org/x/tools/go/analysis/passes/reflectvaluecompare golang.org/x/tools/go/analysis/passes/shadow golang.org/x/tools/go/analysis/passes/shift golang.org/x/tools/go/analysis/passes/sigchanyzer +golang.org/x/tools/go/analysis/passes/slog golang.org/x/tools/go/analysis/passes/sortslice golang.org/x/tools/go/analysis/passes/stdmethods golang.org/x/tools/go/analysis/passes/stringintconv golang.org/x/tools/go/analysis/passes/structtag golang.org/x/tools/go/analysis/passes/testinggoroutine golang.org/x/tools/go/analysis/passes/tests +golang.org/x/tools/go/analysis/passes/timeformat golang.org/x/tools/go/analysis/passes/unmarshal golang.org/x/tools/go/analysis/passes/unreachable golang.org/x/tools/go/analysis/passes/unsafeptr @@ -975,12 +1021,13 @@ golang.org/x/tools/go/ssa/ssautil golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/imports +golang.org/x/tools/internal/aliases golang.org/x/tools/internal/analysisinternal golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk @@ -990,29 +1037,8 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 -## explicit; go 1.17 -golang.org/x/xerrors -golang.org/x/xerrors/internal -# google.golang.org/api v0.126.0 -## explicit; go 1.19 -google.golang.org/api/googleapi -google.golang.org/api/googleapi/transport -google.golang.org/api/iamcredentials/v1 -google.golang.org/api/internal -google.golang.org/api/internal/cert -google.golang.org/api/internal/gensupport -google.golang.org/api/internal/impersonate -google.golang.org/api/internal/third_party/uritemplates -google.golang.org/api/iterator -google.golang.org/api/option -google.golang.org/api/option/internaloption -google.golang.org/api/storage/v1 -google.golang.org/api/transport -google.golang.org/api/transport/grpc -google.golang.org/api/transport/http -google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.7 +golang.org/x/tools/internal/versions +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/datastore @@ -1025,34 +1051,18 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc -## explicit; go 1.19 -google.golang.org/genproto/googleapis/iam/v1 -google.golang.org/genproto/googleapis/type/date -google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc -## explicit; go 1.19 -google.golang.org/genproto/googleapis/api -google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 -google.golang.org/genproto/googleapis/rpc/code -google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.56.3 -## explicit; go 1.17 +# google.golang.org/grpc v1.61.1 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb -google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 @@ -1060,16 +1070,7 @@ google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/alts -google.golang.org/grpc/credentials/alts/internal -google.golang.org/grpc/credentials/alts/internal/authinfo -google.golang.org/grpc/credentials/alts/internal/conn -google.golang.org/grpc/credentials/alts/internal/handshaker -google.golang.org/grpc/credentials/alts/internal/handshaker/service -google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp -google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/insecure -google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -1084,15 +1085,16 @@ google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig -google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -1104,21 +1106,23 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.33.0 +## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -1142,11 +1146,13 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb -google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb +# gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f +## explicit # gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 @@ -1162,8 +1168,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# honnef.co/go/tools v0.3.3 -## explicit; go 1.17 +# honnef.co/go/tools v0.4.7 +## explicit; go 1.19 honnef.co/go/tools/analysis/code honnef.co/go/tools/analysis/edit honnef.co/go/tools/analysis/facts/deprecated @@ -1192,16 +1198,13 @@ honnef.co/go/tools/staticcheck/fakereflect honnef.co/go/tools/staticcheck/fakexml honnef.co/go/tools/stylecheck honnef.co/go/tools/unused -# mvdan.cc/gofumpt v0.4.0 -## explicit; go 1.18 +# mvdan.cc/gofumpt v0.6.0 +## explicit; go 1.20 mvdan.cc/gofumpt/format +mvdan.cc/gofumpt/internal/govendor/go/doc/comment +mvdan.cc/gofumpt/internal/govendor/go/format +mvdan.cc/gofumpt/internal/govendor/go/printer mvdan.cc/gofumpt/internal/version -# mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed -## explicit -mvdan.cc/interfacer/check -# mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b -## explicit -mvdan.cc/lint -# mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 -## explicit; go 1.17 +# mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 +## explicit; go 1.20 mvdan.cc/unparam/check diff --git a/vendor/mvdan.cc/gofumpt/format/format.go b/vendor/mvdan.cc/gofumpt/format/format.go index fa4f2fcb6d..7316eb78b8 100644 --- a/vendor/mvdan.cc/gofumpt/format/format.go +++ b/vendor/mvdan.cc/gofumpt/format/format.go @@ -10,7 +10,6 @@ import ( "bytes" "fmt" "go/ast" - "go/format" "go/parser" "go/token" "os" @@ -26,22 +25,26 @@ import ( "golang.org/x/mod/semver" "golang.org/x/tools/go/ast/astutil" + "mvdan.cc/gofumpt/internal/govendor/go/format" "mvdan.cc/gofumpt/internal/version" ) // Options is the set of formatting options which affect gofumpt. type Options struct { - // LangVersion corresponds to the Go language version a piece of code is - // written in. The version is used to decide whether to apply formatting - // rules which require new language features. When inside a Go module, - // LangVersion should be: - // - // go mod edit -json | jq -r '.Go' + // TODO: link to the go/version docs once Go 1.22 is out. + // The old semver docs said: // // LangVersion is treated as a semantic version, which may start with a "v" // prefix. Like Go versions, it may also be incomplete; "1.14" is equivalent // to "1.14.0". When empty, it is equivalent to "v1", to not use language // features which could break programs. + + // LangVersion is the Go version a piece of code is written in. + // The version is used to decide whether to apply formatting + // rules which require new language features. + // When inside a Go module, LangVersion should typically be: + // + // go mod edit -json | jq -r '.Go' LangVersion string // ModulePath corresponds to the Go module path which contains the source @@ -82,22 +85,28 @@ func Source(src []byte, opts Options) ([]byte, error) { return buf.Bytes(), nil } +var rxGoVersionMajorMinor = regexp.MustCompile(`^(v|go)?([1-9]+)\.([0-9]+)`) + // File modifies a file and fset in place to follow gofumpt's format. The // changes might include manipulating adding or removing newlines in fset, // modifying the position of nodes, or modifying literal values. func File(fset *token.FileSet, file *ast.File, opts Options) { simplify(file) + // TODO: replace this hacky mess with go/version once we can rely on Go 1.22, + // as well as replacing our uses of the semver package. + // In particular, we likely want to allow any of 1.21, 1.21.2, or go1.21rc3, + // but we can rely on go/version.Lang to validate and normalize. if opts.LangVersion == "" { - opts.LangVersion = "v1" - } else if opts.LangVersion[0] != 'v' { - opts.LangVersion = "v" + opts.LangVersion + opts.LangVersion = "v1.0" } - if !semver.IsValid(opts.LangVersion) { - panic(fmt.Sprintf("invalid semver string: %q", opts.LangVersion)) + m := rxGoVersionMajorMinor.FindStringSubmatch(opts.LangVersion) + if m == nil { + panic(fmt.Sprintf("invalid Go version: %q", opts.LangVersion)) } + opts.LangVersion = "v" + m[2] + "." + m[3] f := &fumpter{ - File: fset.File(file.Pos()), + file: fset.File(file.Pos()), fset: fset, astFile: file, Options: opts, @@ -170,7 +179,7 @@ var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`) type fumpter struct { Options - *token.File + file *token.File fset *token.FileSet astFile *ast.File @@ -217,7 +226,8 @@ func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment { func (f *fumpter) addNewline(at token.Pos) { offset := f.Offset(at) - field := reflect.ValueOf(f.File).Elem().FieldByName("lines") + // TODO: replace with the new Lines method once we require Go 1.21 or later + field := reflect.ValueOf(f.file).Elem().FieldByName("lines") n := field.Len() lines := make([]int, 0, n+1) for i := 0; i < n; i++ { @@ -236,7 +246,7 @@ func (f *fumpter) addNewline(at token.Pos) { if offset >= 0 { lines = append(lines, offset) } - if !f.SetLines(lines) { + if !f.file.SetLines(lines) { panic(fmt.Sprintf("could not set lines to %v", lines)) } } @@ -245,7 +255,7 @@ func (f *fumpter) addNewline(at token.Pos) { // up on the same line. func (f *fumpter) removeLines(fromLine, toLine int) { for fromLine < toLine { - f.MergeLine(fromLine) + f.file.MergeLine(fromLine) toLine-- } } @@ -256,6 +266,18 @@ func (f *fumpter) removeLinesBetween(from, to token.Pos) { f.removeLines(f.Line(from)+1, f.Line(to)) } +func (f *fumpter) Position(p token.Pos) token.Position { + return f.file.PositionFor(p, false) +} + +func (f *fumpter) Line(p token.Pos) int { + return f.Position(p).Line +} + +func (f *fumpter) Offset(p token.Pos) int { + return f.file.Offset(p) +} + type byteCounter int func (b *byteCounter) Write(p []byte) (n int, err error) { @@ -285,14 +307,14 @@ func (f *fumpter) lineEnd(line int) token.Pos { if line < 1 { panic("illegal line number") } - total := f.LineCount() + total := f.file.LineCount() if line > total { panic("illegal line number") } if line == total { return f.astFile.End() } - return f.LineStart(line+1) - 1 + return f.file.LineStart(line+1) - 1 } // rxCommentDirective covers all common Go comment directives: @@ -305,10 +327,11 @@ func (f *fumpter) lineEnd(line int) token.Pos { // //sys(nb)? | syscall function wrapper prototypes // //nolint | nolint directive for golangci // //noinspection | noinspection directive for GoLand and friends +// //NOSONAR | NOSONAR directive for SonarQube // // Note that the "some-words:" matching expects a letter afterward, such as // "go:generate", to prevent matching false positives like "https://site". -var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|no(lint|inspection)\b)`) +var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|no(lint|inspection)\b)|NOSONAR\b`) func (f *fumpter) applyPre(c *astutil.Cursor) { f.splitLongLine(c) @@ -395,7 +418,7 @@ func (f *fumpter) applyPre(c *astutil.Cursor) { slc := []string{ "//gofumpt:diagnose", "version:", - version.String(), + version.String(""), "flags:", "-lang=" + f.LangVersion, "-modpath=" + f.ModulePath, @@ -467,13 +490,19 @@ func (f *fumpter) applyPre(c *astutil.Cursor) { specEnd := node.Specs[0].End() if len(f.commentsBetween(node.TokPos, specPos)) > 0 { - // If the single spec has any comment, it must - // go before the entire declaration now. + // If the single spec has a comment on the line above, + // the comment must go before the entire declaration now. node.TokPos = specPos } else { f.removeLines(f.Line(node.TokPos), f.Line(specPos)) } - f.removeLines(f.Line(specEnd), f.Line(node.Rparen)) + if len(f.commentsBetween(specEnd, node.Rparen)) > 0 { + // Leave one newline to not force a comment on the next line to + // become an inline comment. + f.removeLines(f.Line(specEnd)+1, f.Line(node.Rparen)) + } else { + f.removeLines(f.Line(specEnd), f.Line(node.Rparen)) + } // Remove the parentheses. go/printer will automatically // get rid of the newlines. @@ -546,12 +575,19 @@ func (f *fumpter) applyPre(c *astutil.Cursor) { if f.Line(sign.Pos()) != endLine { handleMultiLine := func(fl *ast.FieldList) { + // Refuse to insert a newline before the closing token + // if the list is empty or all in one line. if fl == nil || len(fl.List) == 0 { return } + fieldOpeningLine := f.Line(fl.Opening) + fieldClosingLine := f.Line(fl.Closing) + if fieldOpeningLine == fieldClosingLine { + return + } + lastFieldEnd := fl.List[len(fl.List)-1].End() lastFieldLine := f.Line(lastFieldEnd) - fieldClosingLine := f.Line(fl.Closing) isLastFieldOnFieldClosingLine := lastFieldLine == fieldClosingLine isLastFieldOnSigClosingLine := lastFieldLine == endLine @@ -852,9 +888,9 @@ func (f *fumpter) stmts(list []ast.Stmt) { continue // not an if following another statement } as, ok := list[i-1].(*ast.AssignStmt) - if !ok || as.Tok != token.DEFINE || + if !ok || (as.Tok != token.DEFINE && as.Tok != token.ASSIGN) || !identEqual(as.Lhs[len(as.Lhs)-1], "err") { - continue // not "..., err := ..." + continue // not ", err :=" nor ", err =" } be, ok := ifs.Cond.(*ast.BinaryExpr) if !ok || ifs.Init != nil || ifs.Else != nil { diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/doc.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/doc.go new file mode 100644 index 0000000000..45a476aa9a --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/doc.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package comment implements parsing and reformatting of Go doc comments, +(documentation comments), which are comments that immediately precede +a top-level declaration of a package, const, func, type, or var. + +Go doc comment syntax is a simplified subset of Markdown that supports +links, headings, paragraphs, lists (without nesting), and preformatted text blocks. +The details of the syntax are documented at https://go.dev/doc/comment. + +To parse the text associated with a doc comment (after removing comment markers), +use a [Parser]: + + var p comment.Parser + doc := p.Parse(text) + +The result is a [*Doc]. +To reformat it as a doc comment, HTML, Markdown, or plain text, +use a [Printer]: + + var pr comment.Printer + os.Stdout.Write(pr.Text(doc)) + +The [Parser] and [Printer] types are structs whose fields can be +modified to customize the operations. +For details, see the documentation for those types. + +Use cases that need additional control over reformatting can +implement their own logic by inspecting the parsed syntax itself. +See the documentation for [Doc], [Block], [Text] for an overview +and links to additional types. +*/ +package comment diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/html.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/html.go new file mode 100644 index 0000000000..bc076f6a58 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/html.go @@ -0,0 +1,169 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "strconv" +) + +// An htmlPrinter holds the state needed for printing a Doc as HTML. +type htmlPrinter struct { + *Printer + tight bool +} + +// HTML returns an HTML formatting of the Doc. +// See the [Printer] documentation for ways to customize the HTML output. +func (p *Printer) HTML(d *Doc) []byte { + hp := &htmlPrinter{Printer: p} + var out bytes.Buffer + for _, x := range d.Content { + hp.block(&out, x) + } + return out.Bytes() +} + +// block prints the block x to out. +func (p *htmlPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T", x) + + case *Paragraph: + if !p.tight { + out.WriteString("

") + } + p.text(out, x.Text) + out.WriteString("\n") + + case *Heading: + out.WriteString("") + p.text(out, x.Text) + out.WriteString("\n") + + case *Code: + out.WriteString("

")
+		p.escape(out, x.Text)
+		out.WriteString("
\n") + + case *List: + kind := "ol>\n" + if x.Items[0].Number == "" { + kind = "ul>\n" + } + out.WriteString("<") + out.WriteString(kind) + next := "1" + for _, item := range x.Items { + out.WriteString("") + p.tight = !x.BlankBetween() + for _, blk := range item.Content { + p.block(out, blk) + } + p.tight = false + } + out.WriteString("= 0; i-- { + if b[i] < '9' { + b[i]++ + return string(b) + } + b[i] = '0' + } + return "1" + string(b) +} + +// text prints the text sequence x to out. +func (p *htmlPrinter) text(out *bytes.Buffer, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + p.escape(out, string(t)) + case Italic: + out.WriteString("") + p.escape(out, string(t)) + out.WriteString("") + case *Link: + out.WriteString(``) + p.text(out, t.Text) + out.WriteString("") + case *DocLink: + url := p.docLinkURL(t) + if url != "" { + out.WriteString(``) + } + p.text(out, t.Text) + if url != "" { + out.WriteString("") + } + } + } +} + +// escape prints s to out as plain text, +// escaping < & " ' and > to avoid being misinterpreted +// in larger HTML constructs. +func (p *htmlPrinter) escape(out *bytes.Buffer, s string) { + start := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + case '<': + out.WriteString(s[start:i]) + out.WriteString("<") + start = i + 1 + case '&': + out.WriteString(s[start:i]) + out.WriteString("&") + start = i + 1 + case '"': + out.WriteString(s[start:i]) + out.WriteString(""") + start = i + 1 + case '\'': + out.WriteString(s[start:i]) + out.WriteString("'") + start = i + 1 + case '>': + out.WriteString(s[start:i]) + out.WriteString(">") + start = i + 1 + } + } + out.WriteString(s[start:]) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/markdown.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/markdown.go new file mode 100644 index 0000000000..d8550f2e39 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/markdown.go @@ -0,0 +1,188 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "strings" +) + +// An mdPrinter holds the state needed for printing a Doc as Markdown. +type mdPrinter struct { + *Printer + headingPrefix string + raw bytes.Buffer +} + +// Markdown returns a Markdown formatting of the Doc. +// See the [Printer] documentation for ways to customize the Markdown output. +func (p *Printer) Markdown(d *Doc) []byte { + mp := &mdPrinter{ + Printer: p, + headingPrefix: strings.Repeat("#", p.headingLevel()) + " ", + } + + var out bytes.Buffer + for i, x := range d.Content { + if i > 0 { + out.WriteByte('\n') + } + mp.block(&out, x) + } + return out.Bytes() +} + +// block prints the block x to out. +func (p *mdPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T", x) + + case *Paragraph: + p.text(out, x.Text) + out.WriteString("\n") + + case *Heading: + out.WriteString(p.headingPrefix) + p.text(out, x.Text) + if id := p.headingID(x); id != "" { + out.WriteString(" {#") + out.WriteString(id) + out.WriteString("}") + } + out.WriteString("\n") + + case *Code: + md := x.Text + for md != "" { + var line string + line, md, _ = strings.Cut(md, "\n") + if line != "" { + out.WriteString("\t") + out.WriteString(line) + } + out.WriteString("\n") + } + + case *List: + loose := x.BlankBetween() + for i, item := range x.Items { + if i > 0 && loose { + out.WriteString("\n") + } + if n := item.Number; n != "" { + out.WriteString(" ") + out.WriteString(n) + out.WriteString(". ") + } else { + out.WriteString(" - ") // SP SP - SP + } + for i, blk := range item.Content { + const fourSpace = " " + if i > 0 { + out.WriteString("\n" + fourSpace) + } + p.text(out, blk.(*Paragraph).Text) + out.WriteString("\n") + } + } + } +} + +// text prints the text sequence x to out. +func (p *mdPrinter) text(out *bytes.Buffer, x []Text) { + p.raw.Reset() + p.rawText(&p.raw, x) + line := bytes.TrimSpace(p.raw.Bytes()) + if len(line) == 0 { + return + } + switch line[0] { + case '+', '-', '*', '#': + // Escape what would be the start of an unordered list or heading. + out.WriteByte('\\') + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i := 1 + for i < len(line) && '0' <= line[i] && line[i] <= '9' { + i++ + } + if i < len(line) && (line[i] == '.' || line[i] == ')') { + // Escape what would be the start of an ordered list. + out.Write(line[:i]) + out.WriteByte('\\') + line = line[i:] + } + } + out.Write(line) +} + +// rawText prints the text sequence x to out, +// without worrying about escaping characters +// that have special meaning at the start of a Markdown line. +func (p *mdPrinter) rawText(out *bytes.Buffer, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + p.escape(out, string(t)) + case Italic: + out.WriteString("*") + p.escape(out, string(t)) + out.WriteString("*") + case *Link: + out.WriteString("[") + p.rawText(out, t.Text) + out.WriteString("](") + out.WriteString(t.URL) + out.WriteString(")") + case *DocLink: + url := p.docLinkURL(t) + if url != "" { + out.WriteString("[") + } + p.rawText(out, t.Text) + if url != "" { + out.WriteString("](") + url = strings.ReplaceAll(url, "(", "%28") + url = strings.ReplaceAll(url, ")", "%29") + out.WriteString(url) + out.WriteString(")") + } + } + } +} + +// escape prints s to out as plain text, +// escaping special characters to avoid being misinterpreted +// as Markdown markup sequences. +func (p *mdPrinter) escape(out *bytes.Buffer, s string) { + start := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + case '\n': + // Turn all \n into spaces, for a few reasons: + // - Avoid introducing paragraph breaks accidentally. + // - Avoid the need to reindent after the newline. + // - Avoid problems with Markdown renderers treating + // every mid-paragraph newline as a
. + out.WriteString(s[start:i]) + out.WriteByte(' ') + start = i + 1 + continue + case '`', '_', '*', '[', '<', '\\': + // Not all of these need to be escaped all the time, + // but is valid and easy to do so. + // We assume the Markdown is being passed to a + // Markdown renderer, not edited by a person, + // so it's fine to have escapes that are not strictly + // necessary in some cases. + out.WriteString(s[start:i]) + out.WriteByte('\\') + out.WriteByte(s[i]) + start = i + 1 + } + } + out.WriteString(s[start:]) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/parse.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/parse.go new file mode 100644 index 0000000000..372577b2b3 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/parse.go @@ -0,0 +1,1262 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// A Doc is a parsed Go doc comment. +type Doc struct { + // Content is the sequence of content blocks in the comment. + Content []Block + + // Links is the link definitions in the comment. + Links []*LinkDef +} + +// A LinkDef is a single link definition. +type LinkDef struct { + Text string // the link text + URL string // the link URL + Used bool // whether the comment uses the definition +} + +// A Block is block-level content in a doc comment, +// one of [*Code], [*Heading], [*List], or [*Paragraph]. +type Block interface { + block() +} + +// A Heading is a doc comment heading. +type Heading struct { + Text []Text // the heading text +} + +func (*Heading) block() {} + +// A List is a numbered or bullet list. +// Lists are always non-empty: len(Items) > 0. +// In a numbered list, every Items[i].Number is a non-empty string. +// In a bullet list, every Items[i].Number is an empty string. +type List struct { + // Items is the list items. + Items []*ListItem + + // ForceBlankBefore indicates that the list must be + // preceded by a blank line when reformatting the comment, + // overriding the usual conditions. See the BlankBefore method. + // + // The comment parser sets ForceBlankBefore for any list + // that is preceded by a blank line, to make sure + // the blank line is preserved when printing. + ForceBlankBefore bool + + // ForceBlankBetween indicates that list items must be + // separated by blank lines when reformatting the comment, + // overriding the usual conditions. See the BlankBetween method. + // + // The comment parser sets ForceBlankBetween for any list + // that has a blank line between any two of its items, to make sure + // the blank lines are preserved when printing. + ForceBlankBetween bool +} + +func (*List) block() {} + +// BlankBefore reports whether a reformatting of the comment +// should include a blank line before the list. +// The default rule is the same as for [BlankBetween]: +// if the list item content contains any blank lines +// (meaning at least one item has multiple paragraphs) +// then the list itself must be preceded by a blank line. +// A preceding blank line can be forced by setting [List].ForceBlankBefore. +func (l *List) BlankBefore() bool { + return l.ForceBlankBefore || l.BlankBetween() +} + +// BlankBetween reports whether a reformatting of the comment +// should include a blank line between each pair of list items. +// The default rule is that if the list item content contains any blank lines +// (meaning at least one item has multiple paragraphs) +// then list items must themselves be separated by blank lines. +// Blank line separators can be forced by setting [List].ForceBlankBetween. +func (l *List) BlankBetween() bool { + if l.ForceBlankBetween { + return true + } + for _, item := range l.Items { + if len(item.Content) != 1 { + // Unreachable for parsed comments today, + // since the only way to get multiple item.Content + // is multiple paragraphs, which must have been + // separated by a blank line. + return true + } + } + return false +} + +// A ListItem is a single item in a numbered or bullet list. +type ListItem struct { + // Number is a decimal string in a numbered list + // or an empty string in a bullet list. + Number string // "1", "2", ...; "" for bullet list + + // Content is the list content. + // Currently, restrictions in the parser and printer + // require every element of Content to be a *Paragraph. + Content []Block // Content of this item. +} + +// A Paragraph is a paragraph of text. +type Paragraph struct { + Text []Text +} + +func (*Paragraph) block() {} + +// A Code is a preformatted code block. +type Code struct { + // Text is the preformatted text, ending with a newline character. + // It may be multiple lines, each of which ends with a newline character. + // It is never empty, nor does it start or end with a blank line. + Text string +} + +func (*Code) block() {} + +// A Text is text-level content in a doc comment, +// one of [Plain], [Italic], [*Link], or [*DocLink]. +type Text interface { + text() +} + +// A Plain is a string rendered as plain text (not italicized). +type Plain string + +func (Plain) text() {} + +// An Italic is a string rendered as italicized text. +type Italic string + +func (Italic) text() {} + +// A Link is a link to a specific URL. +type Link struct { + Auto bool // is this an automatic (implicit) link of a literal URL? + Text []Text // text of link + URL string // target URL of link +} + +func (*Link) text() {} + +// A DocLink is a link to documentation for a Go package or symbol. +type DocLink struct { + Text []Text // text of link + + // ImportPath, Recv, and Name identify the Go package or symbol + // that is the link target. The potential combinations of + // non-empty fields are: + // - ImportPath: a link to another package + // - ImportPath, Name: a link to a const, func, type, or var in another package + // - ImportPath, Recv, Name: a link to a method in another package + // - Name: a link to a const, func, type, or var in this package + // - Recv, Name: a link to a method in this package + ImportPath string // import path + Recv string // receiver type, without any pointer star, for methods + Name string // const, func, type, var, or method name +} + +func (*DocLink) text() {} + +// A Parser is a doc comment parser. +// The fields in the struct can be filled in before calling Parse +// in order to customize the details of the parsing process. +type Parser struct { + // Words is a map of Go identifier words that + // should be italicized and potentially linked. + // If Words[w] is the empty string, then the word w + // is only italicized. Otherwise it is linked, using + // Words[w] as the link target. + // Words corresponds to the [go/doc.ToHTML] words parameter. + Words map[string]string + + // LookupPackage resolves a package name to an import path. + // + // If LookupPackage(name) returns ok == true, then [name] + // (or [name.Sym] or [name.Sym.Method]) + // is considered a documentation link to importPath's package docs. + // It is valid to return "", true, in which case name is considered + // to refer to the current package. + // + // If LookupPackage(name) returns ok == false, + // then [name] (or [name.Sym] or [name.Sym.Method]) + // will not be considered a documentation link, + // except in the case where name is the full (but single-element) import path + // of a package in the standard library, such as in [math] or [io.Reader]. + // LookupPackage is still called for such names, + // in order to permit references to imports of other packages + // with the same package names. + // + // Setting LookupPackage to nil is equivalent to setting it to + // a function that always returns "", false. + LookupPackage func(name string) (importPath string, ok bool) + + // LookupSym reports whether a symbol name or method name + // exists in the current package. + // + // If LookupSym("", "Name") returns true, then [Name] + // is considered a documentation link for a const, func, type, or var. + // + // Similarly, if LookupSym("Recv", "Name") returns true, + // then [Recv.Name] is considered a documentation link for + // type Recv's method Name. + // + // Setting LookupSym to nil is equivalent to setting it to a function + // that always returns false. + LookupSym func(recv, name string) (ok bool) +} + +// parseDoc is parsing state for a single doc comment. +type parseDoc struct { + *Parser + *Doc + links map[string]*LinkDef + lines []string + lookupSym func(recv, name string) bool +} + +// lookupPkg is called to look up the pkg in [pkg], [pkg.Name], and [pkg.Name.Recv]. +// If pkg has a slash, it is assumed to be the full import path and is returned with ok = true. +// +// Otherwise, pkg is probably a simple package name like "rand" (not "crypto/rand" or "math/rand"). +// d.LookupPackage provides a way for the caller to allow resolving such names with reference +// to the imports in the surrounding package. +// +// There is one collision between these two cases: single-element standard library names +// like "math" are full import paths but don't contain slashes. We let d.LookupPackage have +// the first chance to resolve it, in case there's a different package imported as math, +// and otherwise we refer to a built-in list of single-element standard library package names. +func (d *parseDoc) lookupPkg(pkg string) (importPath string, ok bool) { + if strings.Contains(pkg, "/") { // assume a full import path + if validImportPath(pkg) { + return pkg, true + } + return "", false + } + if d.LookupPackage != nil { + // Give LookupPackage a chance. + if path, ok := d.LookupPackage(pkg); ok { + return path, true + } + } + return DefaultLookupPackage(pkg) +} + +func isStdPkg(path string) bool { + // TODO(rsc): Use sort.Find once we don't have to worry about + // copying this code into older Go environments. + i := sort.Search(len(stdPkgs), func(i int) bool { return stdPkgs[i] >= path }) + return i < len(stdPkgs) && stdPkgs[i] == path +} + +// DefaultLookupPackage is the default package lookup +// function, used when [Parser].LookupPackage is nil. +// It recognizes names of the packages from the standard +// library with single-element import paths, such as math, +// which would otherwise be impossible to name. +// +// Note that the go/doc package provides a more sophisticated +// lookup based on the imports used in the current package. +func DefaultLookupPackage(name string) (importPath string, ok bool) { + if isStdPkg(name) { + return name, true + } + return "", false +} + +// Parse parses the doc comment text and returns the *Doc form. +// Comment markers (/* // and */) in the text must have already been removed. +func (p *Parser) Parse(text string) *Doc { + lines := unindent(strings.Split(text, "\n")) + d := &parseDoc{ + Parser: p, + Doc: new(Doc), + links: make(map[string]*LinkDef), + lines: lines, + lookupSym: func(recv, name string) bool { return false }, + } + if p.LookupSym != nil { + d.lookupSym = p.LookupSym + } + + // First pass: break into block structure and collect known links. + // The text is all recorded as Plain for now. + var prev span + for _, s := range parseSpans(lines) { + var b Block + switch s.kind { + default: + panic("mvdan.cc/gofumpt/internal/govendor/go/doc/comment: internal error: unknown span kind") + case spanList: + b = d.list(lines[s.start:s.end], prev.end < s.start) + case spanCode: + b = d.code(lines[s.start:s.end]) + case spanOldHeading: + b = d.oldHeading(lines[s.start]) + case spanHeading: + b = d.heading(lines[s.start]) + case spanPara: + b = d.paragraph(lines[s.start:s.end]) + } + if b != nil { + d.Content = append(d.Content, b) + } + prev = s + } + + // Second pass: interpret all the Plain text now that we know the links. + for _, b := range d.Content { + switch b := b.(type) { + case *Paragraph: + b.Text = d.parseLinkedText(string(b.Text[0].(Plain))) + case *List: + for _, i := range b.Items { + for _, c := range i.Content { + p := c.(*Paragraph) + p.Text = d.parseLinkedText(string(p.Text[0].(Plain))) + } + } + } + } + + return d.Doc +} + +// A span represents a single span of comment lines (lines[start:end]) +// of an identified kind (code, heading, paragraph, and so on). +type span struct { + start int + end int + kind spanKind +} + +// A spanKind describes the kind of span. +type spanKind int + +const ( + _ spanKind = iota + spanCode + spanHeading + spanList + spanOldHeading + spanPara +) + +func parseSpans(lines []string) []span { + var spans []span + + // The loop may process a line twice: once as unindented + // and again forced indented. So the maximum expected + // number of iterations is 2*len(lines). The repeating logic + // can be subtle, though, and to protect against introduction + // of infinite loops in future changes, we watch to see that + // we are not looping too much. A panic is better than a + // quiet infinite loop. + watchdog := 2 * len(lines) + + i := 0 + forceIndent := 0 +Spans: + for { + // Skip blank lines. + for i < len(lines) && lines[i] == "" { + i++ + } + if i >= len(lines) { + break + } + if watchdog--; watchdog < 0 { + panic("mvdan.cc/gofumpt/internal/govendor/go/doc/comment: internal error: not making progress") + } + + var kind spanKind + start := i + end := i + if i < forceIndent || indented(lines[i]) { + // Indented (or force indented). + // Ends before next unindented. (Blank lines are OK.) + // If this is an unindented list that we are heuristically treating as indented, + // then accept unindented list item lines up to the first blank lines. + // The heuristic is disabled at blank lines to contain its effect + // to non-gofmt'ed sections of the comment. + unindentedListOK := isList(lines[i]) && i < forceIndent + i++ + for i < len(lines) && (lines[i] == "" || i < forceIndent || indented(lines[i]) || (unindentedListOK && isList(lines[i]))) { + if lines[i] == "" { + unindentedListOK = false + } + i++ + } + + // Drop trailing blank lines. + end = i + for end > start && lines[end-1] == "" { + end-- + } + + // If indented lines are followed (without a blank line) + // by an unindented line ending in a brace, + // take that one line too. This fixes the common mistake + // of pasting in something like + // + // func main() { + // fmt.Println("hello, world") + // } + // + // and forgetting to indent it. + // The heuristic will never trigger on a gofmt'ed comment, + // because any gofmt'ed code block or list would be + // followed by a blank line or end of comment. + if end < len(lines) && strings.HasPrefix(lines[end], "}") { + end++ + } + + if isList(lines[start]) { + kind = spanList + } else { + kind = spanCode + } + } else { + // Unindented. Ends at next blank or indented line. + i++ + for i < len(lines) && lines[i] != "" && !indented(lines[i]) { + i++ + } + end = i + + // If unindented lines are followed (without a blank line) + // by an indented line that would start a code block, + // check whether the final unindented lines + // should be left for the indented section. + // This can happen for the common mistakes of + // unindented code or unindented lists. + // The heuristic will never trigger on a gofmt'ed comment, + // because any gofmt'ed code block would have a blank line + // preceding it after the unindented lines. + if i < len(lines) && lines[i] != "" && !isList(lines[i]) { + switch { + case isList(lines[i-1]): + // If the final unindented line looks like a list item, + // this may be the first indented line wrap of + // a mistakenly unindented list. + // Leave all the unindented list items. + forceIndent = end + end-- + for end > start && isList(lines[end-1]) { + end-- + } + + case strings.HasSuffix(lines[i-1], "{") || strings.HasSuffix(lines[i-1], `\`): + // If the final unindented line ended in { or \ + // it is probably the start of a misindented code block. + // Give the user a single line fix. + // Often that's enough; if not, the user can fix the others themselves. + forceIndent = end + end-- + } + + if start == end && forceIndent > start { + i = start + continue Spans + } + } + + // Span is either paragraph or heading. + if end-start == 1 && isHeading(lines[start]) { + kind = spanHeading + } else if end-start == 1 && isOldHeading(lines[start], lines, start) { + kind = spanOldHeading + } else { + kind = spanPara + } + } + + spans = append(spans, span{start, end, kind}) + i = end + } + + return spans +} + +// indented reports whether line is indented +// (starts with a leading space or tab). +func indented(line string) bool { + return line != "" && (line[0] == ' ' || line[0] == '\t') +} + +// unindent removes any common space/tab prefix +// from each line in lines, returning a copy of lines in which +// those prefixes have been trimmed from each line. +// It also replaces any lines containing only spaces with blank lines (empty strings). +func unindent(lines []string) []string { + // Trim leading and trailing blank lines. + for len(lines) > 0 && isBlank(lines[0]) { + lines = lines[1:] + } + for len(lines) > 0 && isBlank(lines[len(lines)-1]) { + lines = lines[:len(lines)-1] + } + if len(lines) == 0 { + return nil + } + + // Compute and remove common indentation. + prefix := leadingSpace(lines[0]) + for _, line := range lines[1:] { + if !isBlank(line) { + prefix = commonPrefix(prefix, leadingSpace(line)) + } + } + + out := make([]string, len(lines)) + for i, line := range lines { + line = strings.TrimPrefix(line, prefix) + if strings.TrimSpace(line) == "" { + line = "" + } + out[i] = line + } + for len(out) > 0 && out[0] == "" { + out = out[1:] + } + for len(out) > 0 && out[len(out)-1] == "" { + out = out[:len(out)-1] + } + return out +} + +// isBlank reports whether s is a blank line. +func isBlank(s string) bool { + return len(s) == 0 || (len(s) == 1 && s[0] == '\n') +} + +// commonPrefix returns the longest common prefix of a and b. +func commonPrefix(a, b string) string { + i := 0 + for i < len(a) && i < len(b) && a[i] == b[i] { + i++ + } + return a[0:i] +} + +// leadingSpace returns the longest prefix of s consisting of spaces and tabs. +func leadingSpace(s string) string { + i := 0 + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + return s[:i] +} + +// isOldHeading reports whether line is an old-style section heading. +// line is all[off]. +func isOldHeading(line string, all []string, off int) bool { + if off <= 0 || all[off-1] != "" || off+2 >= len(all) || all[off+1] != "" || leadingSpace(all[off+2]) != "" { + return false + } + + line = strings.TrimSpace(line) + + // a heading must start with an uppercase letter + r, _ := utf8.DecodeRuneInString(line) + if !unicode.IsLetter(r) || !unicode.IsUpper(r) { + return false + } + + // it must end in a letter or digit: + r, _ = utf8.DecodeLastRuneInString(line) + if !unicode.IsLetter(r) && !unicode.IsDigit(r) { + return false + } + + // exclude lines with illegal characters. we allow "()," + if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") { + return false + } + + // allow "'" for possessive "'s" only + for b := line; ; { + var ok bool + if _, b, ok = strings.Cut(b, "'"); !ok { + break + } + if b != "s" && !strings.HasPrefix(b, "s ") { + return false // ' not followed by s and then end-of-word + } + } + + // allow "." when followed by non-space + for b := line; ; { + var ok bool + if _, b, ok = strings.Cut(b, "."); !ok { + break + } + if b == "" || strings.HasPrefix(b, " ") { + return false // not followed by non-space + } + } + + return true +} + +// oldHeading returns the *Heading for the given old-style section heading line. +func (d *parseDoc) oldHeading(line string) Block { + return &Heading{Text: []Text{Plain(strings.TrimSpace(line))}} +} + +// isHeading reports whether line is a new-style section heading. +func isHeading(line string) bool { + return len(line) >= 2 && + line[0] == '#' && + (line[1] == ' ' || line[1] == '\t') && + strings.TrimSpace(line) != "#" +} + +// heading returns the *Heading for the given new-style section heading line. +func (d *parseDoc) heading(line string) Block { + return &Heading{Text: []Text{Plain(strings.TrimSpace(line[1:]))}} +} + +// code returns a code block built from the lines. +func (d *parseDoc) code(lines []string) *Code { + body := unindent(lines) + body = append(body, "") // to get final \n from Join + return &Code{Text: strings.Join(body, "\n")} +} + +// paragraph returns a paragraph block built from the lines. +// If the lines are link definitions, paragraph adds them to d and returns nil. +func (d *parseDoc) paragraph(lines []string) Block { + // Is this a block of known links? Handle. + var defs []*LinkDef + for _, line := range lines { + def, ok := parseLink(line) + if !ok { + goto NoDefs + } + defs = append(defs, def) + } + for _, def := range defs { + d.Links = append(d.Links, def) + if d.links[def.Text] == nil { + d.links[def.Text] = def + } + } + return nil +NoDefs: + + return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}} +} + +// parseLink parses a single link definition line: +// +// [text]: url +// +// It returns the link definition and whether the line was well formed. +func parseLink(line string) (*LinkDef, bool) { + if line == "" || line[0] != '[' { + return nil, false + } + i := strings.Index(line, "]:") + if i < 0 || i+3 >= len(line) || (line[i+2] != ' ' && line[i+2] != '\t') { + return nil, false + } + + text := line[1:i] + url := strings.TrimSpace(line[i+3:]) + j := strings.Index(url, "://") + if j < 0 || !isScheme(url[:j]) { + return nil, false + } + + // Line has right form and has valid scheme://. + // That's good enough for us - we are not as picky + // about the characters beyond the :// as we are + // when extracting inline URLs from text. + return &LinkDef{Text: text, URL: url}, true +} + +// list returns a list built from the indented lines, +// using forceBlankBefore as the value of the List's ForceBlankBefore field. +func (d *parseDoc) list(lines []string, forceBlankBefore bool) *List { + num, _, _ := listMarker(lines[0]) + var ( + list *List = &List{ForceBlankBefore: forceBlankBefore} + item *ListItem + text []string + ) + flush := func() { + if item != nil { + if para := d.paragraph(text); para != nil { + item.Content = append(item.Content, para) + } + } + text = nil + } + + for _, line := range lines { + if n, after, ok := listMarker(line); ok && (n != "") == (num != "") { + // start new list item + flush() + + item = &ListItem{Number: n} + list.Items = append(list.Items, item) + line = after + } + line = strings.TrimSpace(line) + if line == "" { + list.ForceBlankBetween = true + flush() + continue + } + text = append(text, strings.TrimSpace(line)) + } + flush() + return list +} + +// listMarker parses the line as beginning with a list marker. +// If it can do that, it returns the numeric marker ("" for a bullet list), +// the rest of the line, and ok == true. +// Otherwise, it returns "", "", false. +func listMarker(line string) (num, rest string, ok bool) { + line = strings.TrimSpace(line) + if line == "" { + return "", "", false + } + + // Can we find a marker? + if r, n := utf8.DecodeRuneInString(line); r == '•' || r == '*' || r == '+' || r == '-' { + num, rest = "", line[n:] + } else if '0' <= line[0] && line[0] <= '9' { + n := 1 + for n < len(line) && '0' <= line[n] && line[n] <= '9' { + n++ + } + if n >= len(line) || (line[n] != '.' && line[n] != ')') { + return "", "", false + } + num, rest = line[:n], line[n+1:] + } else { + return "", "", false + } + + if !indented(rest) || strings.TrimSpace(rest) == "" { + return "", "", false + } + + return num, rest, true +} + +// isList reports whether the line is the first line of a list, +// meaning starts with a list marker after any indentation. +// (The caller is responsible for checking the line is indented, as appropriate.) +func isList(line string) bool { + _, _, ok := listMarker(line) + return ok +} + +// parseLinkedText parses text that is allowed to contain explicit links, +// such as [math.Sin] or [Go home page], into a slice of Text items. +// +// A “pkg” is only assumed to be a full import path if it starts with +// a domain name (a path element with a dot) or is one of the packages +// from the standard library (“[os]”, “[encoding/json]”, and so on). +// To avoid problems with maps, generics, and array types, doc links +// must be both preceded and followed by punctuation, spaces, tabs, +// or the start or end of a line. An example problem would be treating +// map[ast.Expr]TypeAndValue as containing a link. +func (d *parseDoc) parseLinkedText(text string) []Text { + var out []Text + wrote := 0 + flush := func(i int) { + if wrote < i { + out = d.parseText(out, text[wrote:i], true) + wrote = i + } + } + + start := -1 + var buf []byte + for i := 0; i < len(text); i++ { + c := text[i] + if c == '\n' || c == '\t' { + c = ' ' + } + switch c { + case '[': + start = i + case ']': + if start >= 0 { + if def, ok := d.links[string(buf)]; ok { + def.Used = true + flush(start) + out = append(out, &Link{ + Text: d.parseText(nil, text[start+1:i], false), + URL: def.URL, + }) + wrote = i + 1 + } else if link, ok := d.docLink(text[start+1:i], text[:start], text[i+1:]); ok { + flush(start) + link.Text = d.parseText(nil, text[start+1:i], false) + out = append(out, link) + wrote = i + 1 + } + } + start = -1 + buf = buf[:0] + } + if start >= 0 && i != start { + buf = append(buf, c) + } + } + + flush(len(text)) + return out +} + +// docLink parses text, which was found inside [ ] brackets, +// as a doc link if possible, returning the DocLink and ok == true +// or else nil, false. +// The before and after strings are the text before the [ and after the ] +// on the same line. Doc links must be preceded and followed by +// punctuation, spaces, tabs, or the start or end of a line. +func (d *parseDoc) docLink(text, before, after string) (link *DocLink, ok bool) { + if before != "" { + r, _ := utf8.DecodeLastRuneInString(before) + if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' { + return nil, false + } + } + if after != "" { + r, _ := utf8.DecodeRuneInString(after) + if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' { + return nil, false + } + } + text = strings.TrimPrefix(text, "*") + pkg, name, ok := splitDocName(text) + var recv string + if ok { + pkg, recv, _ = splitDocName(pkg) + } + if pkg != "" { + if pkg, ok = d.lookupPkg(pkg); !ok { + return nil, false + } + } else { + if ok = d.lookupSym(recv, name); !ok { + return nil, false + } + } + link = &DocLink{ + ImportPath: pkg, + Recv: recv, + Name: name, + } + return link, true +} + +// If text is of the form before.Name, where Name is a capitalized Go identifier, +// then splitDocName returns before, name, true. +// Otherwise it returns text, "", false. +func splitDocName(text string) (before, name string, foundDot bool) { + i := strings.LastIndex(text, ".") + name = text[i+1:] + if !isName(name) { + return text, "", false + } + if i >= 0 { + before = text[:i] + } + return before, name, true +} + +// parseText parses s as text and returns the result of appending +// those parsed Text elements to out. +// parseText does not handle explicit links like [math.Sin] or [Go home page]: +// those are handled by parseLinkedText. +// If autoLink is true, then parseText recognizes URLs and words from d.Words +// and converts those to links as appropriate. +func (d *parseDoc) parseText(out []Text, s string, autoLink bool) []Text { + var w strings.Builder + wrote := 0 + writeUntil := func(i int) { + w.WriteString(s[wrote:i]) + wrote = i + } + flush := func(i int) { + writeUntil(i) + if w.Len() > 0 { + out = append(out, Plain(w.String())) + w.Reset() + } + } + for i := 0; i < len(s); { + t := s[i:] + if autoLink { + if url, ok := autoURL(t); ok { + flush(i) + // Note: The old comment parser would look up the URL in words + // and replace the target with words[URL] if it was non-empty. + // That would allow creating links that display as one URL but + // when clicked go to a different URL. Not sure what the point + // of that is, so we're not doing that lookup here. + out = append(out, &Link{Auto: true, Text: []Text{Plain(url)}, URL: url}) + i += len(url) + wrote = i + continue + } + if id, ok := ident(t); ok { + url, italics := d.Words[id] + if !italics { + i += len(id) + continue + } + flush(i) + if url == "" { + out = append(out, Italic(id)) + } else { + out = append(out, &Link{Auto: true, Text: []Text{Italic(id)}, URL: url}) + } + i += len(id) + wrote = i + continue + } + } + switch { + case strings.HasPrefix(t, "``"): + if len(t) >= 3 && t[2] == '`' { + // Do not convert `` inside ```, in case people are mistakenly writing Markdown. + i += 3 + for i < len(t) && t[i] == '`' { + i++ + } + break + } + writeUntil(i) + w.WriteRune('“') + i += 2 + wrote = i + case strings.HasPrefix(t, "''"): + writeUntil(i) + w.WriteRune('”') + i += 2 + wrote = i + default: + i++ + } + } + flush(len(s)) + return out +} + +// autoURL checks whether s begins with a URL that should be hyperlinked. +// If so, it returns the URL, which is a prefix of s, and ok == true. +// Otherwise it returns "", false. +// The caller should skip over the first len(url) bytes of s +// before further processing. +func autoURL(s string) (url string, ok bool) { + // Find the ://. Fast path to pick off non-URL, + // since we call this at every position in the string. + // The shortest possible URL is ftp://x, 7 bytes. + var i int + switch { + case len(s) < 7: + return "", false + case s[3] == ':': + i = 3 + case s[4] == ':': + i = 4 + case s[5] == ':': + i = 5 + case s[6] == ':': + i = 6 + default: + return "", false + } + if i+3 > len(s) || s[i:i+3] != "://" { + return "", false + } + + // Check valid scheme. + if !isScheme(s[:i]) { + return "", false + } + + // Scan host part. Must have at least one byte, + // and must start and end in non-punctuation. + i += 3 + if i >= len(s) || !isHost(s[i]) || isPunct(s[i]) { + return "", false + } + i++ + end := i + for i < len(s) && isHost(s[i]) { + if !isPunct(s[i]) { + end = i + 1 + } + i++ + } + i = end + + // At this point we are definitely returning a URL (scheme://host). + // We just have to find the longest path we can add to it. + // Heuristics abound. + // We allow parens, braces, and brackets, + // but only if they match (#5043, #22285). + // We allow .,:;?! in the path but not at the end, + // to avoid end-of-sentence punctuation (#18139, #16565). + stk := []byte{} + end = i +Path: + for ; i < len(s); i++ { + if isPunct(s[i]) { + continue + } + if !isPath(s[i]) { + break + } + switch s[i] { + case '(': + stk = append(stk, ')') + case '{': + stk = append(stk, '}') + case '[': + stk = append(stk, ']') + case ')', '}', ']': + if len(stk) == 0 || stk[len(stk)-1] != s[i] { + break Path + } + stk = stk[:len(stk)-1] + } + if len(stk) == 0 { + end = i + 1 + } + } + + return s[:end], true +} + +// isScheme reports whether s is a recognized URL scheme. +// Note that if strings of new length (beyond 3-7) +// are added here, the fast path at the top of autoURL will need updating. +func isScheme(s string) bool { + switch s { + case "file", + "ftp", + "gopher", + "http", + "https", + "mailto", + "nntp": + return true + } + return false +} + +// isHost reports whether c is a byte that can appear in a URL host, +// like www.example.com or user@[::1]:8080 +func isHost(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// isPunct reports whether c is a punctuation byte that can appear +// inside a path but not at the end. +func isPunct(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// isPath reports whether c is a (non-punctuation) path byte. +func isPath(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// isName reports whether s is a capitalized Go identifier (like Name). +func isName(s string) bool { + t, ok := ident(s) + if !ok || t != s { + return false + } + r, _ := utf8.DecodeRuneInString(s) + return unicode.IsUpper(r) +} + +// ident checks whether s begins with a Go identifier. +// If so, it returns the identifier, which is a prefix of s, and ok == true. +// Otherwise it returns "", false. +// The caller should skip over the first len(id) bytes of s +// before further processing. +func ident(s string) (id string, ok bool) { + // Scan [\pL_][\pL_0-9]* + n := 0 + for n < len(s) { + if c := s[n]; c < utf8.RuneSelf { + if isIdentASCII(c) && (n > 0 || c < '0' || c > '9') { + n++ + continue + } + break + } + r, nr := utf8.DecodeRuneInString(s[n:]) + if unicode.IsLetter(r) { + n += nr + continue + } + break + } + return s[:n], n > 0 +} + +// isIdentASCII reports whether c is an ASCII identifier byte. +func isIdentASCII(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// validImportPath reports whether path is a valid import path. +// It is a lightly edited copy of golang.org/x/mod/module.CheckImportPath. +func validImportPath(path string) bool { + if !utf8.ValidString(path) { + return false + } + if path == "" { + return false + } + if path[0] == '-' { + return false + } + if strings.Contains(path, "//") { + return false + } + if path[len(path)-1] == '/' { + return false + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if !validImportPathElem(path[elemStart:i]) { + return false + } + elemStart = i + 1 + } + } + return validImportPathElem(path[elemStart:]) +} + +func validImportPathElem(elem string) bool { + if elem == "" || elem[0] == '.' || elem[len(elem)-1] == '.' { + return false + } + for i := 0; i < len(elem); i++ { + if !importPathOK(elem[i]) { + return false + } + } + return true +} + +func importPathOK(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/print.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/print.go new file mode 100644 index 0000000000..e1c070d5a5 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/print.go @@ -0,0 +1,288 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "strings" +) + +// A Printer is a doc comment printer. +// The fields in the struct can be filled in before calling +// any of the printing methods +// in order to customize the details of the printing process. +type Printer struct { + // HeadingLevel is the nesting level used for + // HTML and Markdown headings. + // If HeadingLevel is zero, it defaults to level 3, + // meaning to use

and ###. + HeadingLevel int + + // HeadingID is a function that computes the heading ID + // (anchor tag) to use for the heading h when generating + // HTML and Markdown. If HeadingID returns an empty string, + // then the heading ID is omitted. + // If HeadingID is nil, h.DefaultID is used. + HeadingID func(h *Heading) string + + // DocLinkURL is a function that computes the URL for the given DocLink. + // If DocLinkURL is nil, then link.DefaultURL(p.DocLinkBaseURL) is used. + DocLinkURL func(link *DocLink) string + + // DocLinkBaseURL is used when DocLinkURL is nil, + // passed to [DocLink.DefaultURL] to construct a DocLink's URL. + // See that method's documentation for details. + DocLinkBaseURL string + + // TextPrefix is a prefix to print at the start of every line + // when generating text output using the Text method. + TextPrefix string + + // TextCodePrefix is the prefix to print at the start of each + // preformatted (code block) line when generating text output, + // instead of (not in addition to) TextPrefix. + // If TextCodePrefix is the empty string, it defaults to TextPrefix+"\t". + TextCodePrefix string + + // TextWidth is the maximum width text line to generate, + // measured in Unicode code points, + // excluding TextPrefix and the newline character. + // If TextWidth is zero, it defaults to 80 minus the number of code points in TextPrefix. + // If TextWidth is negative, there is no limit. + TextWidth int +} + +func (p *Printer) headingLevel() int { + if p.HeadingLevel <= 0 { + return 3 + } + return p.HeadingLevel +} + +func (p *Printer) headingID(h *Heading) string { + if p.HeadingID == nil { + return h.DefaultID() + } + return p.HeadingID(h) +} + +func (p *Printer) docLinkURL(link *DocLink) string { + if p.DocLinkURL != nil { + return p.DocLinkURL(link) + } + return link.DefaultURL(p.DocLinkBaseURL) +} + +// DefaultURL constructs and returns the documentation URL for l, +// using baseURL as a prefix for links to other packages. +// +// The possible forms returned by DefaultURL are: +// - baseURL/ImportPath, for a link to another package +// - baseURL/ImportPath#Name, for a link to a const, func, type, or var in another package +// - baseURL/ImportPath#Recv.Name, for a link to a method in another package +// - #Name, for a link to a const, func, type, or var in this package +// - #Recv.Name, for a link to a method in this package +// +// If baseURL ends in a trailing slash, then DefaultURL inserts +// a slash between ImportPath and # in the anchored forms. +// For example, here are some baseURL values and URLs they can generate: +// +// "/pkg/" → "/pkg/math/#Sqrt" +// "/pkg" → "/pkg/math#Sqrt" +// "/" → "/math/#Sqrt" +// "" → "/math#Sqrt" +func (l *DocLink) DefaultURL(baseURL string) string { + if l.ImportPath != "" { + slash := "" + if strings.HasSuffix(baseURL, "/") { + slash = "/" + } else { + baseURL += "/" + } + switch { + case l.Name == "": + return baseURL + l.ImportPath + slash + case l.Recv != "": + return baseURL + l.ImportPath + slash + "#" + l.Recv + "." + l.Name + default: + return baseURL + l.ImportPath + slash + "#" + l.Name + } + } + if l.Recv != "" { + return "#" + l.Recv + "." + l.Name + } + return "#" + l.Name +} + +// DefaultID returns the default anchor ID for the heading h. +// +// The default anchor ID is constructed by converting every +// rune that is not alphanumeric ASCII to an underscore +// and then adding the prefix “hdr-”. +// For example, if the heading text is “Go Doc Comments”, +// the default ID is “hdr-Go_Doc_Comments”. +func (h *Heading) DefaultID() string { + // Note: The “hdr-” prefix is important to avoid DOM clobbering attacks. + // See https://pkg.go.dev/github.com/google/safehtml#Identifier. + var out strings.Builder + var p textPrinter + p.oneLongLine(&out, h.Text) + s := strings.TrimSpace(out.String()) + if s == "" { + return "" + } + out.Reset() + out.WriteString("hdr-") + for _, r := range s { + if r < 0x80 && isIdentASCII(byte(r)) { + out.WriteByte(byte(r)) + } else { + out.WriteByte('_') + } + } + return out.String() +} + +type commentPrinter struct { + *Printer +} + +// Comment returns the standard Go formatting of the Doc, +// without any comment markers. +func (p *Printer) Comment(d *Doc) []byte { + cp := &commentPrinter{Printer: p} + var out bytes.Buffer + for i, x := range d.Content { + if i > 0 && blankBefore(x) { + out.WriteString("\n") + } + cp.block(&out, x) + } + + // Print one block containing all the link definitions that were used, + // and then a second block containing all the unused ones. + // This makes it easy to clean up the unused ones: gofmt and + // delete the final block. And it's a nice visual signal without + // affecting the way the comment formats for users. + for i := 0; i < 2; i++ { + used := i == 0 + first := true + for _, def := range d.Links { + if def.Used == used { + if first { + out.WriteString("\n") + first = false + } + out.WriteString("[") + out.WriteString(def.Text) + out.WriteString("]: ") + out.WriteString(def.URL) + out.WriteString("\n") + } + } + } + + return out.Bytes() +} + +// blankBefore reports whether the block x requires a blank line before it. +// All blocks do, except for Lists that return false from x.BlankBefore(). +func blankBefore(x Block) bool { + if x, ok := x.(*List); ok { + return x.BlankBefore() + } + return true +} + +// block prints the block x to out. +func (p *commentPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T", x) + + case *Paragraph: + p.text(out, "", x.Text) + out.WriteString("\n") + + case *Heading: + out.WriteString("# ") + p.text(out, "", x.Text) + out.WriteString("\n") + + case *Code: + md := x.Text + for md != "" { + var line string + line, md, _ = strings.Cut(md, "\n") + if line != "" { + out.WriteString("\t") + out.WriteString(line) + } + out.WriteString("\n") + } + + case *List: + loose := x.BlankBetween() + for i, item := range x.Items { + if i > 0 && loose { + out.WriteString("\n") + } + out.WriteString(" ") + if item.Number == "" { + out.WriteString(" - ") + } else { + out.WriteString(item.Number) + out.WriteString(". ") + } + for i, blk := range item.Content { + const fourSpace = " " + if i > 0 { + out.WriteString("\n" + fourSpace) + } + p.text(out, fourSpace, blk.(*Paragraph).Text) + out.WriteString("\n") + } + } + } +} + +// text prints the text sequence x to out. +func (p *commentPrinter) text(out *bytes.Buffer, indent string, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + p.indent(out, indent, string(t)) + case Italic: + p.indent(out, indent, string(t)) + case *Link: + if t.Auto { + p.text(out, indent, t.Text) + } else { + out.WriteString("[") + p.text(out, indent, t.Text) + out.WriteString("]") + } + case *DocLink: + out.WriteString("[") + p.text(out, indent, t.Text) + out.WriteString("]") + } + } +} + +// indent prints s to out, indenting with the indent string +// after each newline in s. +func (p *commentPrinter) indent(out *bytes.Buffer, indent, s string) { + for s != "" { + line, rest, ok := strings.Cut(s, "\n") + out.WriteString(line) + if ok { + out.WriteString("\n") + out.WriteString(indent) + } + s = rest + } +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/std.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/std.go new file mode 100644 index 0000000000..d128eda8c5 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/std.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by 'go generate' DO NOT EDIT. +//disabled go:generate ./mkstd.sh + +package comment + +var stdPkgs = []string{ + "bufio", + "bytes", + "cmp", + "context", + "crypto", + "embed", + "encoding", + "errors", + "expvar", + "flag", + "fmt", + "hash", + "html", + "image", + "io", + "log", + "maps", + "math", + "mime", + "net", + "os", + "path", + "plugin", + "reflect", + "regexp", + "runtime", + "slices", + "sort", + "strconv", + "strings", + "sync", + "syscall", + "testing", + "time", + "unicode", + "unsafe", +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/text.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/text.go new file mode 100644 index 0000000000..6f9c2e201d --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/text.go @@ -0,0 +1,337 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "sort" + "strings" + "unicode/utf8" +) + +// A textPrinter holds the state needed for printing a Doc as plain text. +type textPrinter struct { + *Printer + long strings.Builder + prefix string + codePrefix string + width int +} + +// Text returns a textual formatting of the Doc. +// See the [Printer] documentation for ways to customize the text output. +func (p *Printer) Text(d *Doc) []byte { + tp := &textPrinter{ + Printer: p, + prefix: p.TextPrefix, + codePrefix: p.TextCodePrefix, + width: p.TextWidth, + } + if tp.codePrefix == "" { + tp.codePrefix = p.TextPrefix + "\t" + } + if tp.width == 0 { + tp.width = 80 - utf8.RuneCountInString(tp.prefix) + } + + var out bytes.Buffer + for i, x := range d.Content { + if i > 0 && blankBefore(x) { + out.WriteString(tp.prefix) + writeNL(&out) + } + tp.block(&out, x) + } + anyUsed := false + for _, def := range d.Links { + if def.Used { + anyUsed = true + break + } + } + if anyUsed { + writeNL(&out) + for _, def := range d.Links { + if def.Used { + fmt.Fprintf(&out, "[%s]: %s\n", def.Text, def.URL) + } + } + } + return out.Bytes() +} + +// writeNL calls out.WriteByte('\n') +// but first trims trailing spaces on the previous line. +func writeNL(out *bytes.Buffer) { + // Trim trailing spaces. + data := out.Bytes() + n := 0 + for n < len(data) && (data[len(data)-n-1] == ' ' || data[len(data)-n-1] == '\t') { + n++ + } + if n > 0 { + out.Truncate(len(data) - n) + } + out.WriteByte('\n') +} + +// block prints the block x to out. +func (p *textPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T\n", x) + + case *Paragraph: + out.WriteString(p.prefix) + p.text(out, "", x.Text) + + case *Heading: + out.WriteString(p.prefix) + out.WriteString("# ") + p.text(out, "", x.Text) + + case *Code: + text := x.Text + for text != "" { + var line string + line, text, _ = strings.Cut(text, "\n") + if line != "" { + out.WriteString(p.codePrefix) + out.WriteString(line) + } + writeNL(out) + } + + case *List: + loose := x.BlankBetween() + for i, item := range x.Items { + if i > 0 && loose { + out.WriteString(p.prefix) + writeNL(out) + } + out.WriteString(p.prefix) + out.WriteString(" ") + if item.Number == "" { + out.WriteString(" - ") + } else { + out.WriteString(item.Number) + out.WriteString(". ") + } + for i, blk := range item.Content { + const fourSpace = " " + if i > 0 { + writeNL(out) + out.WriteString(p.prefix) + out.WriteString(fourSpace) + } + p.text(out, fourSpace, blk.(*Paragraph).Text) + } + } + } +} + +// text prints the text sequence x to out. +func (p *textPrinter) text(out *bytes.Buffer, indent string, x []Text) { + p.oneLongLine(&p.long, x) + words := strings.Fields(p.long.String()) + p.long.Reset() + + var seq []int + if p.width < 0 || len(words) == 0 { + seq = []int{0, len(words)} // one long line + } else { + seq = wrap(words, p.width-utf8.RuneCountInString(indent)) + } + for i := 0; i+1 < len(seq); i++ { + if i > 0 { + out.WriteString(p.prefix) + out.WriteString(indent) + } + for j, w := range words[seq[i]:seq[i+1]] { + if j > 0 { + out.WriteString(" ") + } + out.WriteString(w) + } + writeNL(out) + } +} + +// oneLongLine prints the text sequence x to out as one long line, +// without worrying about line wrapping. +// Explicit links have the [ ] dropped to improve readability. +func (p *textPrinter) oneLongLine(out *strings.Builder, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + out.WriteString(string(t)) + case Italic: + out.WriteString(string(t)) + case *Link: + p.oneLongLine(out, t.Text) + case *DocLink: + p.oneLongLine(out, t.Text) + } + } +} + +// wrap wraps words into lines of at most max runes, +// minimizing the sum of the squares of the leftover lengths +// at the end of each line (except the last, of course), +// with a preference for ending lines at punctuation (.,:;). +// +// The returned slice gives the indexes of the first words +// on each line in the wrapped text with a final entry of len(words). +// Thus the lines are words[seq[0]:seq[1]], words[seq[1]:seq[2]], +// ..., words[seq[len(seq)-2]:seq[len(seq)-1]]. +// +// The implementation runs in O(n log n) time, where n = len(words), +// using the algorithm described in D. S. Hirschberg and L. L. Larmore, +// “[The least weight subsequence problem],” FOCS 1985, pp. 137-143. +// +// [The least weight subsequence problem]: https://doi.org/10.1109/SFCS.1985.60 +func wrap(words []string, max int) (seq []int) { + // The algorithm requires that our scoring function be concave, + // meaning that for all i₀ ≤ i₁ < j₀ ≤ j₁, + // weight(i₀, j₀) + weight(i₁, j₁) ≤ weight(i₀, j₁) + weight(i₁, j₀). + // + // Our weights are two-element pairs [hi, lo] + // ordered by elementwise comparison. + // The hi entry counts the weight for lines that are longer than max, + // and the lo entry counts the weight for lines that are not. + // This forces the algorithm to first minimize the number of lines + // that are longer than max, which correspond to lines with + // single very long words. Having done that, it can move on to + // minimizing the lo score, which is more interesting. + // + // The lo score is the sum for each line of the square of the + // number of spaces remaining at the end of the line and a + // penalty of 64 given out for not ending the line in a + // punctuation character (.,:;). + // The penalty is somewhat arbitrarily chosen by trying + // different amounts and judging how nice the wrapped text looks. + // Roughly speaking, using 64 means that we are willing to + // end a line with eight blank spaces in order to end at a + // punctuation character, even if the next word would fit in + // those spaces. + // + // We care about ending in punctuation characters because + // it makes the text easier to skim if not too many sentences + // or phrases begin with a single word on the previous line. + + // A score is the score (also called weight) for a given line. + // add and cmp add and compare scores. + type score struct { + hi int64 + lo int64 + } + add := func(s, t score) score { return score{s.hi + t.hi, s.lo + t.lo} } + cmp := func(s, t score) int { + switch { + case s.hi < t.hi: + return -1 + case s.hi > t.hi: + return +1 + case s.lo < t.lo: + return -1 + case s.lo > t.lo: + return +1 + } + return 0 + } + + // total[j] is the total number of runes + // (including separating spaces) in words[:j]. + total := make([]int, len(words)+1) + total[0] = 0 + for i, s := range words { + total[1+i] = total[i] + utf8.RuneCountInString(s) + 1 + } + + // weight returns weight(i, j). + weight := func(i, j int) score { + // On the last line, there is zero weight for being too short. + n := total[j] - 1 - total[i] + if j == len(words) && n <= max { + return score{0, 0} + } + + // Otherwise the weight is the penalty plus the square of the number of + // characters remaining on the line or by which the line goes over. + // In the latter case, that value goes in the hi part of the score. + // (See note above.) + p := wrapPenalty(words[j-1]) + v := int64(max-n) * int64(max-n) + if n > max { + return score{v, p} + } + return score{0, v + p} + } + + // The rest of this function is “The Basic Algorithm” from + // Hirschberg and Larmore's conference paper, + // using the same names as in the paper. + f := []score{{0, 0}} + g := func(i, j int) score { return add(f[i], weight(i, j)) } + + bridge := func(a, b, c int) bool { + k := c + sort.Search(len(words)+1-c, func(k int) bool { + k += c + return cmp(g(a, k), g(b, k)) > 0 + }) + if k > len(words) { + return true + } + return cmp(g(c, k), g(b, k)) <= 0 + } + + // d is a one-ended deque implemented as a slice. + d := make([]int, 1, len(words)) + d[0] = 0 + bestleft := make([]int, 1, len(words)) + bestleft[0] = -1 + for m := 1; m < len(words); m++ { + f = append(f, g(d[0], m)) + bestleft = append(bestleft, d[0]) + for len(d) > 1 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 { + d = d[1:] // “Retire” + } + for len(d) > 1 && bridge(d[len(d)-2], d[len(d)-1], m) { + d = d[:len(d)-1] // “Fire” + } + if cmp(g(m, len(words)), g(d[len(d)-1], len(words))) < 0 { + d = append(d, m) // “Hire” + // The next few lines are not in the paper but are necessary + // to handle two-word inputs correctly. It appears to be + // just a bug in the paper's pseudocode. + if len(d) == 2 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 { + d = d[1:] + } + } + } + bestleft = append(bestleft, d[0]) + + // Recover least weight sequence from bestleft. + n := 1 + for m := len(words); m > 0; m = bestleft[m] { + n++ + } + seq = make([]int, n) + for m := len(words); m > 0; m = bestleft[m] { + n-- + seq[n] = m + } + return seq +} + +// wrapPenalty is the penalty for inserting a line break after word s. +func wrapPenalty(s string) int64 { + switch s[len(s)-1] { + case '.', ',', ':', ';': + return 0 + } + return 64 +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/format.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/format.go new file mode 100644 index 0000000000..5540686ed0 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/format.go @@ -0,0 +1,134 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format implements standard formatting of Go source. +// +// Note that formatting of Go source code changes over time, so tools relying on +// consistent formatting should execute a specific version of the gofmt binary +// instead of using this package. That way, the formatting will be stable, and +// the tools won't need to be recompiled each time gofmt changes. +// +// For example, pre-submit checks that use this package directly would behave +// differently depending on what Go version each developer uses, causing the +// check to be inherently fragile. +package format + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + + "mvdan.cc/gofumpt/internal/govendor/go/printer" +) + +// Keep these in sync with cmd/gofmt/gofmt.go. +const ( + tabWidth = 8 + printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers + + // printerNormalizeNumbers means to canonicalize number literal prefixes + // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. + // + // This value is defined in mvdan.cc/gofumpt/internal/govendor/go/printer specifically for mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt. + printerNormalizeNumbers = 1 << 30 +) + +var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth} + +const parserMode = parser.ParseComments | parser.SkipObjectResolution + +// Node formats node in canonical gofmt style and writes the result to dst. +// +// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl, +// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, +// or ast.Stmt. Node does not modify node. Imports are not sorted for +// nodes representing partial source files (for instance, if the node is +// not an *ast.File or a *printer.CommentedNode not wrapping an *ast.File). +// +// The function may return early (before the entire result is written) +// and return a formatting error, for instance due to an incorrect AST. +func Node(dst io.Writer, fset *token.FileSet, node any) error { + // Determine if we have a complete source file (file != nil). + var file *ast.File + var cnode *printer.CommentedNode + switch n := node.(type) { + case *ast.File: + file = n + case *printer.CommentedNode: + if f, ok := n.Node.(*ast.File); ok { + file = f + cnode = n + } + } + + // Sort imports if necessary. + if file != nil && hasUnsortedImports(file) { + // Make a copy of the AST because ast.SortImports is destructive. + // TODO(gri) Do this more efficiently. + var buf bytes.Buffer + err := config.Fprint(&buf, fset, file) + if err != nil { + return err + } + file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode) + if err != nil { + // We should never get here. If we do, provide good diagnostic. + return fmt.Errorf("format.Node internal error (%s)", err) + } + ast.SortImports(fset, file) + + // Use new file with sorted imports. + node = file + if cnode != nil { + node = &printer.CommentedNode{Node: file, Comments: cnode.Comments} + } + } + + return config.Fprint(dst, fset, node) +} + +// Source formats src in canonical gofmt style and returns the result +// or an (I/O or syntax) error. src is expected to be a syntactically +// correct Go source file, or a list of Go declarations or statements. +// +// If src is a partial source file, the leading and trailing space of src +// is applied to the result (such that it has the same leading and trailing +// space as src), and the result is indented by the same amount as the first +// line of src containing code. Imports are not sorted for partial source files. +func Source(src []byte) ([]byte, error) { + fset := token.NewFileSet() + file, sourceAdj, indentAdj, err := parse(fset, "", src, true) + if err != nil { + return nil, err + } + + if sourceAdj == nil { + // Complete source file. + // TODO(gri) consider doing this always. + ast.SortImports(fset, file) + } + + return format(fset, file, sourceAdj, indentAdj, src, config) +} + +func hasUnsortedImports(file *ast.File) bool { + for _, d := range file.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + return false + } + if d.Lparen.IsValid() { + // For now assume all grouped imports are unsorted. + // TODO(gri) Should check if they are sorted already. + return true + } + // Ungrouped imports are sorted by default. + } + return false +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/internal.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/internal.go new file mode 100644 index 0000000000..df03587143 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/internal.go @@ -0,0 +1,177 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri): This file and the file src/cmd/gofmt/internal.go are +// the same (but for this comment and the package name). Do not modify +// one without the other. Determine if we can factor out functionality +// in a public API. See also #11844 for context. + +package format + +import ( + "bytes" + "go/ast" + "go/parser" + "go/token" + "strings" + + "mvdan.cc/gofumpt/internal/govendor/go/printer" +) + +// parse parses src, which was read from the named file, +// as a Go source file, declaration, or statement list. +func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + err error, +) { + // Try as whole source file. + file, err = parser.ParseFile(fset, filename, src, parserMode) + // If there's no error, return. If the error is that the source file didn't begin with a + // package line and source fragments are ok, fall through to + // try as a source fragment. Stop and return on any other error. + if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { + return + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ';', not a newline, so that the line numbers + // in psrc match the ones in src. + psrc := append([]byte("package p;"), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + sourceAdj = func(src []byte, indent int) []byte { + // Remove the package clause. + // Gofmt has turned the ';' into a '\n'. + src = src[indent+len("package p\n"):] + return bytes.TrimSpace(src) + } + return + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ';', not a newline, so that the line numbers + // in fsrc match the ones in src. Add an extra '\n' before the '}' + // to make sure comments are flushed before the '}'. + fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + sourceAdj = func(src []byte, indent int) []byte { + // Cap adjusted indent to zero. + if indent < 0 { + indent = 0 + } + // Remove the wrapping. + // Gofmt has turned the "; " into a "\n\n". + // There will be two non-blank lines with indent, hence 2*indent. + src = src[2*indent+len("package p\n\nfunc _() {"):] + // Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway + src = src[:len(src)-len("}\n")] + return bytes.TrimSpace(src) + } + // Gofmt has also indented the function body one level. + // Adjust that with indentAdj. + indentAdj = -1 + } + + // Succeeded, or out of options. + return +} + +// format formats the given package file originally obtained from src +// and adjusts the result based on the original source via sourceAdj +// and indentAdj. +func format( + fset *token.FileSet, + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + src []byte, + cfg printer.Config, +) ([]byte, error) { + if sourceAdj == nil { + // Complete source file. + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + + // Partial source file. + // Determine and prepend leading space. + i, j := 0, 0 + for j < len(src) && isSpace(src[j]) { + if src[j] == '\n' { + i = j + 1 // byte offset of last line in leading space + } + j++ + } + var res []byte + res = append(res, src[:i]...) + + // Determine and prepend indentation of first code line. + // Spaces are ignored unless there are no tabs, + // in which case spaces count as one tab. + indent := 0 + hasSpace := false + for _, b := range src[i:j] { + switch b { + case ' ': + hasSpace = true + case '\t': + indent++ + } + } + if indent == 0 && hasSpace { + indent = 1 + } + for i := 0; i < indent; i++ { + res = append(res, '\t') + } + + // Format the source. + // Write it without any leading and trailing space. + cfg.Indent = indent + indentAdj + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + out := sourceAdj(buf.Bytes(), cfg.Indent) + + // If the adjusted output is empty, the source + // was empty but (possibly) for white space. + // The result is the incoming source. + if len(out) == 0 { + return src, nil + } + + // Otherwise, append output to leading space. + res = append(res, out...) + + // Determine and append trailing space. + i = len(src) + for i > 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +// isSpace reports whether the byte is a space character. +// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/comment.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/comment.go new file mode 100644 index 0000000000..1f0e7df9dd --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/comment.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package printer + +import ( + "go/ast" + "strings" + + "mvdan.cc/gofumpt/internal/govendor/go/doc/comment" +) + +// formatDocComment reformats the doc comment list, +// returning the canonical formatting. +func formatDocComment(list []*ast.Comment) []*ast.Comment { + // Extract comment text (removing comment markers). + var kind, text string + var directives []*ast.Comment + if len(list) == 1 && strings.HasPrefix(list[0].Text, "/*") { + kind = "/*" + text = list[0].Text + if !strings.Contains(text, "\n") || allStars(text) { + // Single-line /* .. */ comment in doc comment position, + // or multiline old-style comment like + // /* + // * Comment + // * text here. + // */ + // Should not happen, since it will not work well as a + // doc comment, but if it does, just ignore: + // reformatting it will only make the situation worse. + return list + } + text = text[2 : len(text)-2] // cut /* and */ + } else if strings.HasPrefix(list[0].Text, "//") { + kind = "//" + var b strings.Builder + for _, c := range list { + after, found := strings.CutPrefix(c.Text, "//") + if !found { + return list + } + // Accumulate //go:build etc lines separately. + if isDirective(after) { + directives = append(directives, c) + continue + } + b.WriteString(strings.TrimPrefix(after, " ")) + b.WriteString("\n") + } + text = b.String() + } else { + // Not sure what this is, so leave alone. + return list + } + + if text == "" { + return list + } + + // Parse comment and reformat as text. + var p comment.Parser + d := p.Parse(text) + + var pr comment.Printer + text = string(pr.Comment(d)) + + // For /* */ comment, return one big comment with text inside. + slash := list[0].Slash + if kind == "/*" { + c := &ast.Comment{ + Slash: slash, + Text: "/*\n" + text + "*/", + } + return []*ast.Comment{c} + } + + // For // comment, return sequence of // lines. + var out []*ast.Comment + for text != "" { + var line string + line, text, _ = strings.Cut(text, "\n") + if line == "" { + line = "//" + } else if strings.HasPrefix(line, "\t") { + line = "//" + line + } else { + line = "// " + line + } + out = append(out, &ast.Comment{ + Slash: slash, + Text: line, + }) + } + if len(directives) > 0 { + out = append(out, &ast.Comment{ + Slash: slash, + Text: "//", + }) + for _, c := range directives { + out = append(out, &ast.Comment{ + Slash: slash, + Text: c.Text, + }) + } + } + return out +} + +// isDirective reports whether c is a comment directive. +// See go.dev/issue/37974. +// This code is also in go/ast. +func isDirective(c string) bool { + // "//line " is a line directive. + // "//extern " is for gccgo. + // "//export " is for cgo. + // (The // has been removed.) + if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") { + return true + } + + // "//[a-z0-9]+:[a-z0-9]" + // (The // has been removed.) + colon := strings.Index(c, ":") + if colon <= 0 || colon+1 >= len(c) { + return false + } + for i := 0; i <= colon+1; i++ { + if i == colon { + continue + } + b := c[i] + if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') { + return false + } + } + return true +} + +// allStars reports whether text is the interior of an +// old-style /* */ comment with a star at the start of each line. +func allStars(text string) bool { + for i := 0; i < len(text); i++ { + if text[i] == '\n' { + j := i + 1 + for j < len(text) && (text[j] == ' ' || text[j] == '\t') { + j++ + } + if j < len(text) && text[j] != '*' { + return false + } + } + } + return true +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/gobuild.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/gobuild.go new file mode 100644 index 0000000000..f00492d077 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/gobuild.go @@ -0,0 +1,170 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package printer + +import ( + "go/build/constraint" + "sort" + "text/tabwriter" +) + +func (p *printer) fixGoBuildLines() { + if len(p.goBuild)+len(p.plusBuild) == 0 { + return + } + + // Find latest possible placement of //go:build and // +build comments. + // That's just after the last blank line before we find a non-comment. + // (We'll add another blank line after our comment block.) + // When we start dropping // +build comments, we can skip over /* */ comments too. + // Note that we are processing tabwriter input, so every comment + // begins and ends with a tabwriter.Escape byte. + // And some newlines have turned into \f bytes. + insert := 0 + for pos := 0; ; { + // Skip leading space at beginning of line. + blank := true + for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') { + pos++ + } + // Skip over // comment if any. + if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' { + blank = false + for pos < len(p.output) && !isNL(p.output[pos]) { + pos++ + } + } + // Skip over \n at end of line. + if pos >= len(p.output) || !isNL(p.output[pos]) { + break + } + pos++ + + if blank { + insert = pos + } + } + + // If there is a //go:build comment before the place we identified, + // use that point instead. (Earlier in the file is always fine.) + if len(p.goBuild) > 0 && p.goBuild[0] < insert { + insert = p.goBuild[0] + } else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert { + insert = p.plusBuild[0] + } + + var x constraint.Expr + switch len(p.goBuild) { + case 0: + // Synthesize //go:build expression from // +build lines. + for _, pos := range p.plusBuild { + y, err := constraint.Parse(p.commentTextAt(pos)) + if err != nil { + x = nil + break + } + if x == nil { + x = y + } else { + x = &constraint.AndExpr{X: x, Y: y} + } + } + case 1: + // Parse //go:build expression. + x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0])) + } + + var block []byte + if x == nil { + // Don't have a valid //go:build expression to treat as truth. + // Bring all the lines together but leave them alone. + // Note that these are already tabwriter-escaped. + for _, pos := range p.goBuild { + block = append(block, p.lineAt(pos)...) + } + for _, pos := range p.plusBuild { + block = append(block, p.lineAt(pos)...) + } + } else { + block = append(block, tabwriter.Escape) + block = append(block, "//go:build "...) + block = append(block, x.String()...) + block = append(block, tabwriter.Escape, '\n') + if len(p.plusBuild) > 0 { + lines, err := constraint.PlusBuildLines(x) + if err != nil { + lines = []string{"// +build error: " + err.Error()} + } + for _, line := range lines { + block = append(block, tabwriter.Escape) + block = append(block, line...) + block = append(block, tabwriter.Escape, '\n') + } + } + } + block = append(block, '\n') + + // Build sorted list of lines to delete from remainder of output. + toDelete := append(p.goBuild, p.plusBuild...) + sort.Ints(toDelete) + + // Collect output after insertion point, with lines deleted, into after. + var after []byte + start := insert + for _, end := range toDelete { + if end < start { + continue + } + after = appendLines(after, p.output[start:end]) + start = end + len(p.lineAt(end)) + } + after = appendLines(after, p.output[start:]) + if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) { + after = after[:n-1] + } + + p.output = p.output[:insert] + p.output = append(p.output, block...) + p.output = append(p.output, after...) +} + +// appendLines is like append(x, y...) +// but it avoids creating doubled blank lines, +// which would not be gofmt-standard output. +// It assumes that only whole blocks of lines are being appended, +// not line fragments. +func appendLines(x, y []byte) []byte { + if len(y) > 0 && isNL(y[0]) && // y starts in blank line + (len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line + y = y[1:] // delete y's leading blank line + } + return append(x, y...) +} + +func (p *printer) lineAt(start int) []byte { + pos := start + for pos < len(p.output) && !isNL(p.output[pos]) { + pos++ + } + if pos < len(p.output) { + pos++ + } + return p.output[start:pos] +} + +func (p *printer) commentTextAt(start int) string { + if start < len(p.output) && p.output[start] == tabwriter.Escape { + start++ + } + pos := start + for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) { + pos++ + } + return string(p.output[start:pos]) +} + +func isNL(b byte) bool { + return b == '\n' || b == '\f' +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/nodes.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/nodes.go new file mode 100644 index 0000000000..a58525b855 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/nodes.go @@ -0,0 +1,2001 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements printing of AST nodes; specifically +// expressions, statements, declarations, and files. It uses +// the print functionality implemented in printer.go. + +package printer + +import ( + "go/ast" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// Formatting issues: +// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration) +// when the comment spans multiple lines; if such a comment is just two lines, formatting is +// not idempotent +// - formatting of expression lists +// - should use blank instead of tab to separate one-line function bodies from +// the function header unless there is a group of consecutive one-liners + +// ---------------------------------------------------------------------------- +// Common AST nodes. + +// Print as many newlines as necessary (but at least min newlines) to get to +// the current line. ws is printed before the first line break. If newSection +// is set, the first line break is printed as formfeed. Returns 0 if no line +// breaks were printed, returns 1 if there was exactly one newline printed, +// and returns a value > 1 if there was a formfeed or more than one newline +// printed. +// +// TODO(gri): linebreak may add too many lines if the next statement at "line" +// is preceded by comments because the computation of n assumes +// the current position before the comment and the target position +// after the comment. Thus, after interspersing such comments, the +// space taken up by them is not considered to reduce the number of +// linebreaks. At the moment there is no easy way to know about +// future (not yet interspersed) comments in this function. +func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) { + n := nlimit(line - p.pos.Line) + if n < min { + n = min + } + if n > 0 { + p.print(ws) + if newSection { + p.print(formfeed) + n-- + nbreaks = 2 + } + nbreaks += n + for ; n > 0; n-- { + p.print(newline) + } + } + return +} + +// setComment sets g as the next comment if g != nil and if node comments +// are enabled - this mode is used when printing source code fragments such +// as exports only. It assumes that there is no pending comment in p.comments +// and at most one pending comment in the p.comment cache. +func (p *printer) setComment(g *ast.CommentGroup) { + if g == nil || !p.useNodeComments { + return + } + if p.comments == nil { + // initialize p.comments lazily + p.comments = make([]*ast.CommentGroup, 1) + } else if p.cindex < len(p.comments) { + // for some reason there are pending comments; this + // should never happen - handle gracefully and flush + // all comments up to g, ignore anything after that + p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL) + p.comments = p.comments[0:1] + // in debug mode, report error + p.internalError("setComment found pending comments") + } + p.comments[0] = g + p.cindex = 0 + // don't overwrite any pending comment in the p.comment cache + // (there may be a pending comment when a line comment is + // immediately followed by a lead comment with no other + // tokens between) + if p.commentOffset == infinity { + p.nextComment() // get comment ready for use + } +} + +type exprListMode uint + +const ( + commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma + noIndent // no extra indentation in multi-line lists +) + +// If indent is set, a multi-line identifier list is indented after the +// first linebreak encountered. +func (p *printer) identList(list []*ast.Ident, indent bool) { + // convert into an expression list so we can re-use exprList formatting + xlist := make([]ast.Expr, len(list)) + for i, x := range list { + xlist[i] = x + } + var mode exprListMode + if !indent { + mode = noIndent + } + p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false) +} + +const filteredMsg = "contains filtered or unexported fields" + +// Print a list of expressions. If the list spans multiple +// source lines, the original line breaks are respected between +// expressions. +// +// TODO(gri) Consider rewriting this to be independent of []ast.Expr +// so that we can use the algorithm for any kind of list +// +// (e.g., pass list via a channel over which to range). +func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) { + if len(list) == 0 { + if isIncomplete { + prev := p.posFor(prev0) + next := p.posFor(next0) + if prev.IsValid() && prev.Line == next.Line { + p.print("/* " + filteredMsg + " */") + } else { + p.print(newline) + p.print(indent, "// "+filteredMsg, unindent, newline) + } + } + return + } + + prev := p.posFor(prev0) + next := p.posFor(next0) + line := p.lineFor(list[0].Pos()) + endLine := p.lineFor(list[len(list)-1].End()) + + if prev.IsValid() && prev.Line == line && line == endLine { + // all list entries on a single line + for i, x := range list { + if i > 0 { + // use position of expression following the comma as + // comma position for correct comment placement + p.setPos(x.Pos()) + p.print(token.COMMA, blank) + } + p.expr0(x, depth) + } + if isIncomplete { + p.print(token.COMMA, blank, "/* "+filteredMsg+" */") + } + return + } + + // list entries span multiple lines; + // use source code positions to guide line breaks + + // Don't add extra indentation if noIndent is set; + // i.e., pretend that the first line is already indented. + ws := ignore + if mode&noIndent == 0 { + ws = indent + } + + // The first linebreak is always a formfeed since this section must not + // depend on any previous formatting. + prevBreak := -1 // index of last expression that was followed by a linebreak + if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 { + ws = ignore + prevBreak = 0 + } + + // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line + size := 0 + + // We use the ratio between the geometric mean of the previous key sizes and + // the current size to determine if there should be a break in the alignment. + // To compute the geometric mean we accumulate the ln(size) values (lnsum) + // and the number of sizes included (count). + lnsum := 0.0 + count := 0 + + // print all list elements + prevLine := prev.Line + for i, x := range list { + line = p.lineFor(x.Pos()) + + // Determine if the next linebreak, if any, needs to use formfeed: + // in general, use the entire node size to make the decision; for + // key:value expressions, use the key size. + // TODO(gri) for a better result, should probably incorporate both + // the key and the node size into the decision process + useFF := true + + // Determine element size: All bets are off if we don't have + // position information for the previous and next token (likely + // generated code - simply ignore the size in this case by setting + // it to 0). + prevSize := size + const infinity = 1e6 // larger than any source line + size = p.nodeSize(x, infinity) + pair, isPair := x.(*ast.KeyValueExpr) + if size <= infinity && prev.IsValid() && next.IsValid() { + // x fits on a single line + if isPair { + size = p.nodeSize(pair.Key, infinity) // size <= infinity + } + } else { + // size too large or we don't have good layout information + size = 0 + } + + // If the previous line and the current line had single- + // line-expressions and the key sizes are small or the + // ratio between the current key and the geometric mean + // if the previous key sizes does not exceed a threshold, + // align columns and do not use formfeed. + if prevSize > 0 && size > 0 { + const smallSize = 40 + if count == 0 || prevSize <= smallSize && size <= smallSize { + useFF = false + } else { + const r = 2.5 // threshold + geomean := math.Exp(lnsum / float64(count)) // count > 0 + ratio := float64(size) / geomean + useFF = r*ratio <= 1 || r <= ratio + } + } + + needsLinebreak := 0 < prevLine && prevLine < line + if i > 0 { + // Use position of expression following the comma as + // comma position for correct comment placement, but + // only if the expression is on the same line. + if !needsLinebreak { + p.setPos(x.Pos()) + } + p.print(token.COMMA) + needsBlank := true + if needsLinebreak { + // Lines are broken using newlines so comments remain aligned + // unless useFF is set or there are multiple expressions on + // the same line in which case formfeed is used. + nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) + if nbreaks > 0 { + ws = ignore + prevBreak = i + needsBlank = false // we got a line break instead + } + // If there was a new section or more than one new line + // (which means that the tabwriter will implicitly break + // the section), reset the geomean variables since we are + // starting a new group of elements with the next element. + if nbreaks > 1 { + lnsum = 0 + count = 0 + } + } + if needsBlank { + p.print(blank) + } + } + + if len(list) > 1 && isPair && size > 0 && needsLinebreak { + // We have a key:value expression that fits onto one line + // and it's not on the same line as the prior expression: + // Use a column for the key such that consecutive entries + // can align if possible. + // (needsLinebreak is set if we started a new line before) + p.expr(pair.Key) + p.setPos(pair.Colon) + p.print(token.COLON, vtab) + p.expr(pair.Value) + } else { + p.expr0(x, depth) + } + + if size > 0 { + lnsum += math.Log(float64(size)) + count++ + } + + prevLine = line + } + + if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { + // Print a terminating comma if the next token is on a new line. + p.print(token.COMMA) + if isIncomplete { + p.print(newline) + p.print("// " + filteredMsg) + } + if ws == ignore && mode&noIndent == 0 { + // unindent if we indented + p.print(unindent) + } + p.print(formfeed) // terminating comma needs a line break to look good + return + } + + if isIncomplete { + p.print(token.COMMA, newline) + p.print("// "+filteredMsg, newline) + } + + if ws == ignore && mode&noIndent == 0 { + // unindent if we indented + p.print(unindent) + } +} + +type paramMode int + +const ( + funcParam paramMode = iota + funcTParam + typeTParam +) + +func (p *printer) parameters(fields *ast.FieldList, mode paramMode) { + openTok, closeTok := token.LPAREN, token.RPAREN + if mode != funcParam { + openTok, closeTok = token.LBRACK, token.RBRACK + } + p.setPos(fields.Opening) + p.print(openTok) + if len(fields.List) > 0 { + prevLine := p.lineFor(fields.Opening) + ws := indent + for i, par := range fields.List { + // determine par begin and end line (may be different + // if there are multiple parameter names for this par + // or the type is on a separate line) + parLineBeg := p.lineFor(par.Pos()) + parLineEnd := p.lineFor(par.End()) + // separating "," if needed + needsLinebreak := 0 < prevLine && prevLine < parLineBeg + if i > 0 { + // use position of parameter following the comma as + // comma position for correct comma placement, but + // only if the next parameter is on the same line + if !needsLinebreak { + p.setPos(par.Pos()) + } + p.print(token.COMMA) + } + // separator if needed (linebreak or blank) + if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 { + // break line if the opening "(" or previous parameter ended on a different line + ws = ignore + } else if i > 0 { + p.print(blank) + } + // parameter names + if len(par.Names) > 0 { + // Very subtle: If we indented before (ws == ignore), identList + // won't indent again. If we didn't (ws == indent), identList will + // indent if the identList spans multiple lines, and it will outdent + // again at the end (and still ws == indent). Thus, a subsequent indent + // by a linebreak call after a type, or in the next multi-line identList + // will do the right thing. + p.identList(par.Names, ws == indent) + p.print(blank) + } + // parameter type + p.expr(stripParensAlways(par.Type)) + prevLine = parLineEnd + } + + // if the closing ")" is on a separate line from the last parameter, + // print an additional "," and line break + if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing { + p.print(token.COMMA) + p.linebreak(closing, 0, ignore, true) + } else if mode == typeTParam && fields.NumFields() == 1 && combinesWithName(fields.List[0].Type) { + // A type parameter list [P T] where the name P and the type expression T syntactically + // combine to another valid (value) expression requires a trailing comma, as in [P *T,] + // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list + // is not parsed as an array length [P*T]. + p.print(token.COMMA) + } + + // unindent if we indented + if ws == ignore { + p.print(unindent) + } + } + + p.setPos(fields.Closing) + p.print(closeTok) +} + +// combinesWithName reports whether a name followed by the expression x +// syntactically combines to another valid (value) expression. For instance +// using *T for x, "name *T" syntactically appears as the expression x*T. +// On the other hand, using P|Q or *P|~Q for x, "name P|Q" or name *P|~Q" +// cannot be combined into a valid (value) expression. +func combinesWithName(x ast.Expr) bool { + switch x := x.(type) { + case *ast.StarExpr: + // name *x.X combines to name*x.X if x.X is not a type element + return !isTypeElem(x.X) + case *ast.BinaryExpr: + return combinesWithName(x.X) && !isTypeElem(x.Y) + case *ast.ParenExpr: + // name(x) combines but we are making sure at + // the call site that x is never parenthesized. + panic("unexpected parenthesized expression") + } + return false +} + +// isTypeElem reports whether x is a (possibly parenthesized) type element expression. +// The result is false if x could be a type element OR an ordinary (value) expression. +func isTypeElem(x ast.Expr) bool { + switch x := x.(type) { + case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: + return true + case *ast.UnaryExpr: + return x.Op == token.TILDE + case *ast.BinaryExpr: + return isTypeElem(x.X) || isTypeElem(x.Y) + case *ast.ParenExpr: + return isTypeElem(x.X) + } + return false +} + +func (p *printer) signature(sig *ast.FuncType) { + if sig.TypeParams != nil { + p.parameters(sig.TypeParams, funcTParam) + } + if sig.Params != nil { + p.parameters(sig.Params, funcParam) + } else { + p.print(token.LPAREN, token.RPAREN) + } + res := sig.Results + n := res.NumFields() + if n > 0 { + // res != nil + p.print(blank) + if n == 1 && res.List[0].Names == nil { + // single anonymous res; no ()'s + p.expr(stripParensAlways(res.List[0].Type)) + return + } + p.parameters(res, funcParam) + } +} + +func identListSize(list []*ast.Ident, maxSize int) (size int) { + for i, x := range list { + if i > 0 { + size += len(", ") + } + size += utf8.RuneCountInString(x.Name) + if size >= maxSize { + break + } + } + return +} + +func (p *printer) isOneLineFieldList(list []*ast.Field) bool { + if len(list) != 1 { + return false // allow only one field + } + f := list[0] + if f.Tag != nil || f.Comment != nil { + return false // don't allow tags or comments + } + // only name(s) and type + const maxSize = 30 // adjust as appropriate, this is an approximate value + namesSize := identListSize(f.Names, maxSize) + if namesSize > 0 { + namesSize = 1 // blank between names and types + } + typeSize := p.nodeSize(f.Type, maxSize) + return namesSize+typeSize <= maxSize +} + +func (p *printer) setLineComment(text string) { + p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}}) +} + +func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) { + lbrace := fields.Opening + list := fields.List + rbrace := fields.Closing + hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace)) + srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace) + + if !hasComments && srcIsOneLine { + // possibly a one-line struct/interface + if len(list) == 0 { + // no blank between keyword and {} in this case + p.setPos(lbrace) + p.print(token.LBRACE) + p.setPos(rbrace) + p.print(token.RBRACE) + return + } else if p.isOneLineFieldList(list) { + // small enough - print on one line + // (don't use identList and ignore source line breaks) + p.setPos(lbrace) + p.print(token.LBRACE, blank) + f := list[0] + if isStruct { + for i, x := range f.Names { + if i > 0 { + // no comments so no need for comma position + p.print(token.COMMA, blank) + } + p.expr(x) + } + if len(f.Names) > 0 { + p.print(blank) + } + p.expr(f.Type) + } else { // interface + if len(f.Names) > 0 { + name := f.Names[0] // method name + p.expr(name) + p.signature(f.Type.(*ast.FuncType)) // don't print "func" + } else { + // embedded interface + p.expr(f.Type) + } + } + p.print(blank) + p.setPos(rbrace) + p.print(token.RBRACE) + return + } + } + // hasComments || !srcIsOneLine + + p.print(blank) + p.setPos(lbrace) + p.print(token.LBRACE, indent) + if hasComments || len(list) > 0 { + p.print(formfeed) + } + + if isStruct { + + sep := vtab + if len(list) == 1 { + sep = blank + } + var line int + for i, f := range list { + if i > 0 { + p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0) + } + extraTabs := 0 + p.setComment(f.Doc) + p.recordLine(&line) + if len(f.Names) > 0 { + // named fields + p.identList(f.Names, false) + p.print(sep) + p.expr(f.Type) + extraTabs = 1 + } else { + // anonymous field + p.expr(f.Type) + extraTabs = 2 + } + if f.Tag != nil { + if len(f.Names) > 0 && sep == vtab { + p.print(sep) + } + p.print(sep) + p.expr(f.Tag) + extraTabs = 0 + } + if f.Comment != nil { + for ; extraTabs > 0; extraTabs-- { + p.print(sep) + } + p.setComment(f.Comment) + } + } + if isIncomplete { + if len(list) > 0 { + p.print(formfeed) + } + p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment + p.setLineComment("// " + filteredMsg) + } + + } else { // interface + + var line int + var prev *ast.Ident // previous "type" identifier + for i, f := range list { + var name *ast.Ident // first name, or nil + if len(f.Names) > 0 { + name = f.Names[0] + } + if i > 0 { + // don't do a line break (min == 0) if we are printing a list of types + // TODO(gri) this doesn't work quite right if the list of types is + // spread across multiple lines + min := 1 + if prev != nil && name == prev { + min = 0 + } + p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0) + } + p.setComment(f.Doc) + p.recordLine(&line) + if name != nil { + // method + p.expr(name) + p.signature(f.Type.(*ast.FuncType)) // don't print "func" + prev = nil + } else { + // embedded interface + p.expr(f.Type) + prev = nil + } + p.setComment(f.Comment) + } + if isIncomplete { + if len(list) > 0 { + p.print(formfeed) + } + p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment + p.setLineComment("// contains filtered or unexported methods") + } + + } + p.print(unindent, formfeed) + p.setPos(rbrace) + p.print(token.RBRACE) +} + +// ---------------------------------------------------------------------------- +// Expressions + +func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { + switch e.Op.Precedence() { + case 4: + has4 = true + case 5: + has5 = true + } + + switch l := e.X.(type) { + case *ast.BinaryExpr: + if l.Op.Precedence() < e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *ast.ParenExpr and do nothing. + break + } + h4, h5, mp := walkBinary(l) + has4 = has4 || h4 + has5 = has5 || h5 + if maxProblem < mp { + maxProblem = mp + } + } + + switch r := e.Y.(type) { + case *ast.BinaryExpr: + if r.Op.Precedence() <= e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *ast.ParenExpr and do nothing. + break + } + h4, h5, mp := walkBinary(r) + has4 = has4 || h4 + has5 = has5 || h5 + if maxProblem < mp { + maxProblem = mp + } + + case *ast.StarExpr: + if e.Op == token.QUO { // `*/` + maxProblem = 5 + } + + case *ast.UnaryExpr: + switch e.Op.String() + r.Op.String() { + case "/*", "&&", "&^": + maxProblem = 5 + case "++", "--": + if maxProblem < 4 { + maxProblem = 4 + } + } + } + return +} + +func cutoff(e *ast.BinaryExpr, depth int) int { + has4, has5, maxProblem := walkBinary(e) + if maxProblem > 0 { + return maxProblem + 1 + } + if has4 && has5 { + if depth == 1 { + return 5 + } + return 4 + } + if depth == 1 { + return 6 + } + return 4 +} + +func diffPrec(expr ast.Expr, prec int) int { + x, ok := expr.(*ast.BinaryExpr) + if !ok || prec != x.Op.Precedence() { + return 1 + } + return 0 +} + +func reduceDepth(depth int) int { + depth-- + if depth < 1 { + depth = 1 + } + return depth +} + +// Format the binary expression: decide the cutoff and then format. +// Let's call depth == 1 Normal mode, and depth > 1 Compact mode. +// (Algorithm suggestion by Russ Cox.) +// +// The precedences are: +// +// 5 * / % << >> & &^ +// 4 + - | ^ +// 3 == != < <= > >= +// 2 && +// 1 || +// +// The only decision is whether there will be spaces around levels 4 and 5. +// There are never spaces at level 6 (unary), and always spaces at levels 3 and below. +// +// To choose the cutoff, look at the whole expression but excluding primary +// expressions (function calls, parenthesized exprs), and apply these rules: +// +// 1. If there is a binary operator with a right side unary operand +// that would clash without a space, the cutoff must be (in order): +// +// /* 6 +// && 6 +// &^ 6 +// ++ 5 +// -- 5 +// +// (Comparison operators always have spaces around them.) +// +// 2. If there is a mix of level 5 and level 4 operators, then the cutoff +// is 5 (use spaces to distinguish precedence) in Normal mode +// and 4 (never use spaces) in Compact mode. +// +// 3. If there are no level 4 operators or no level 5 operators, then the +// cutoff is 6 (always use spaces) in Normal mode +// and 4 (never use spaces) in Compact mode. +func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { + prec := x.Op.Precedence() + if prec < prec1 { + // parenthesis needed + // Note: The parser inserts an ast.ParenExpr node; thus this case + // can only occur if the AST is created in a different way. + p.print(token.LPAREN) + p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth + p.print(token.RPAREN) + return + } + + printBlank := prec < cutoff + + ws := indent + p.expr1(x.X, prec, depth+diffPrec(x.X, prec)) + if printBlank { + p.print(blank) + } + xline := p.pos.Line // before the operator (it may be on the next line!) + yline := p.lineFor(x.Y.Pos()) + p.setPos(x.OpPos) + p.print(x.Op) + if xline != yline && xline > 0 && yline > 0 { + // at least one line break, but respect an extra empty line + // in the source + if p.linebreak(yline, 1, ws, true) > 0 { + ws = ignore + printBlank = false // no blank after line break + } + } + if printBlank { + p.print(blank) + } + p.expr1(x.Y, prec+1, depth+1) + if ws == ignore { + p.print(unindent) + } +} + +func isBinary(expr ast.Expr) bool { + _, ok := expr.(*ast.BinaryExpr) + return ok +} + +func (p *printer) expr1(expr ast.Expr, prec1, depth int) { + p.setPos(expr.Pos()) + + switch x := expr.(type) { + case *ast.BadExpr: + p.print("BadExpr") + + case *ast.Ident: + p.print(x) + + case *ast.BinaryExpr: + if depth < 1 { + p.internalError("depth < 1:", depth) + depth = 1 + } + p.binaryExpr(x, prec1, cutoff(x, depth), depth) + + case *ast.KeyValueExpr: + p.expr(x.Key) + p.setPos(x.Colon) + p.print(token.COLON, blank) + p.expr(x.Value) + + case *ast.StarExpr: + const prec = token.UnaryPrec + if prec < prec1 { + // parenthesis needed + p.print(token.LPAREN) + p.print(token.MUL) + p.expr(x.X) + p.print(token.RPAREN) + } else { + // no parenthesis needed + p.print(token.MUL) + p.expr(x.X) + } + + case *ast.UnaryExpr: + const prec = token.UnaryPrec + if prec < prec1 { + // parenthesis needed + p.print(token.LPAREN) + p.expr(x) + p.print(token.RPAREN) + } else { + // no parenthesis needed + p.print(x.Op) + if x.Op == token.RANGE { + // TODO(gri) Remove this code if it cannot be reached. + p.print(blank) + } + p.expr1(x.X, prec, depth) + } + + case *ast.BasicLit: + if p.Config.Mode&normalizeNumbers != 0 { + x = normalizedNumber(x) + } + p.print(x) + + case *ast.FuncLit: + p.setPos(x.Type.Pos()) + p.print(token.FUNC) + // See the comment in funcDecl about how the header size is computed. + startCol := p.out.Column - len("func") + p.signature(x.Type) + p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body) + + case *ast.ParenExpr: + if _, hasParens := x.X.(*ast.ParenExpr); hasParens { + // don't print parentheses around an already parenthesized expression + // TODO(gri) consider making this more general and incorporate precedence levels + p.expr0(x.X, depth) + } else { + p.print(token.LPAREN) + p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth + p.setPos(x.Rparen) + p.print(token.RPAREN) + } + + case *ast.SelectorExpr: + p.selectorExpr(x, depth, false) + + case *ast.TypeAssertExpr: + p.expr1(x.X, token.HighestPrec, depth) + p.print(token.PERIOD) + p.setPos(x.Lparen) + p.print(token.LPAREN) + if x.Type != nil { + p.expr(x.Type) + } else { + p.print(token.TYPE) + } + p.setPos(x.Rparen) + p.print(token.RPAREN) + + case *ast.IndexExpr: + // TODO(gri): should treat[] like parentheses and undo one level of depth + p.expr1(x.X, token.HighestPrec, 1) + p.setPos(x.Lbrack) + p.print(token.LBRACK) + p.expr0(x.Index, depth+1) + p.setPos(x.Rbrack) + p.print(token.RBRACK) + + case *ast.IndexListExpr: + // TODO(gri): as for IndexExpr, should treat [] like parentheses and undo + // one level of depth + p.expr1(x.X, token.HighestPrec, 1) + p.setPos(x.Lbrack) + p.print(token.LBRACK) + p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false) + p.setPos(x.Rbrack) + p.print(token.RBRACK) + + case *ast.SliceExpr: + // TODO(gri): should treat[] like parentheses and undo one level of depth + p.expr1(x.X, token.HighestPrec, 1) + p.setPos(x.Lbrack) + p.print(token.LBRACK) + indices := []ast.Expr{x.Low, x.High} + if x.Max != nil { + indices = append(indices, x.Max) + } + // determine if we need extra blanks around ':' + var needsBlanks bool + if depth <= 1 { + var indexCount int + var hasBinaries bool + for _, x := range indices { + if x != nil { + indexCount++ + if isBinary(x) { + hasBinaries = true + } + } + } + if indexCount > 1 && hasBinaries { + needsBlanks = true + } + } + for i, x := range indices { + if i > 0 { + if indices[i-1] != nil && needsBlanks { + p.print(blank) + } + p.print(token.COLON) + if x != nil && needsBlanks { + p.print(blank) + } + } + if x != nil { + p.expr0(x, depth+1) + } + } + p.setPos(x.Rbrack) + p.print(token.RBRACK) + + case *ast.CallExpr: + if len(x.Args) > 1 { + depth++ + } + var wasIndented bool + if _, ok := x.Fun.(*ast.FuncType); ok { + // conversions to literal function types require parentheses around the type + p.print(token.LPAREN) + wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) + p.print(token.RPAREN) + } else { + wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) + } + p.setPos(x.Lparen) + p.print(token.LPAREN) + if x.Ellipsis.IsValid() { + p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis, false) + p.setPos(x.Ellipsis) + p.print(token.ELLIPSIS) + if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) { + p.print(token.COMMA, formfeed) + } + } else { + p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen, false) + } + p.setPos(x.Rparen) + p.print(token.RPAREN) + if wasIndented { + p.print(unindent) + } + + case *ast.CompositeLit: + // composite literal elements that are composite literals themselves may have the type omitted + if x.Type != nil { + p.expr1(x.Type, token.HighestPrec, depth) + } + p.level++ + p.setPos(x.Lbrace) + p.print(token.LBRACE) + p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete) + // do not insert extra line break following a /*-style comment + // before the closing '}' as it might break the code if there + // is no trailing ',' + mode := noExtraLinebreak + // do not insert extra blank following a /*-style comment + // before the closing '}' unless the literal is empty + if len(x.Elts) > 0 { + mode |= noExtraBlank + } + // need the initial indent to print lone comments with + // the proper level of indentation + p.print(indent, unindent, mode) + p.setPos(x.Rbrace) + p.print(token.RBRACE, mode) + p.level-- + + case *ast.Ellipsis: + p.print(token.ELLIPSIS) + if x.Elt != nil { + p.expr(x.Elt) + } + + case *ast.ArrayType: + p.print(token.LBRACK) + if x.Len != nil { + p.expr(x.Len) + } + p.print(token.RBRACK) + p.expr(x.Elt) + + case *ast.StructType: + p.print(token.STRUCT) + p.fieldList(x.Fields, true, x.Incomplete) + + case *ast.FuncType: + p.print(token.FUNC) + p.signature(x) + + case *ast.InterfaceType: + p.print(token.INTERFACE) + p.fieldList(x.Methods, false, x.Incomplete) + + case *ast.MapType: + p.print(token.MAP, token.LBRACK) + p.expr(x.Key) + p.print(token.RBRACK) + p.expr(x.Value) + + case *ast.ChanType: + switch x.Dir { + case ast.SEND | ast.RECV: + p.print(token.CHAN) + case ast.RECV: + p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same + case ast.SEND: + p.print(token.CHAN) + p.setPos(x.Arrow) + p.print(token.ARROW) + } + p.print(blank) + p.expr(x.Value) + + default: + panic("unreachable") + } +} + +// normalizedNumber rewrites base prefixes and exponents +// of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3), +// and removes leading 0's from integer imaginary literals (0765i to 765i). +// It leaves hexadecimal digits alone. +// +// normalizedNumber doesn't modify the ast.BasicLit value lit points to. +// If lit is not a number or a number in canonical format already, +// lit is returned as is. Otherwise a new ast.BasicLit is created. +func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit { + if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG { + return lit // not a number - nothing to do + } + if len(lit.Value) < 2 { + return lit // only one digit (common case) - nothing to do + } + // len(lit.Value) >= 2 + + // We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer + // or floating-point value, decimal or not. Instead, just consider the literal pattern. + x := lit.Value + switch x[:2] { + default: + // 0-prefix octal, decimal int, or float (possibly with 'i' suffix) + if i := strings.LastIndexByte(x, 'E'); i >= 0 { + x = x[:i] + "e" + x[i+1:] + break + } + // remove leading 0's from integer (but not floating-point) imaginary literals + if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") { + x = strings.TrimLeft(x, "0_") + if x == "i" { + x = "0i" + } + } + case "0X": + x = "0x" + x[2:] + // possibly a hexadecimal float + if i := strings.LastIndexByte(x, 'P'); i >= 0 { + x = x[:i] + "p" + x[i+1:] + } + case "0x": + // possibly a hexadecimal float + i := strings.LastIndexByte(x, 'P') + if i == -1 { + return lit // nothing to do + } + x = x[:i] + "p" + x[i+1:] + case "0O": + x = "0o" + x[2:] + case "0o": + return lit // nothing to do + case "0B": + x = "0b" + x[2:] + case "0b": + return lit // nothing to do + } + + return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x} +} + +func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool { + if x, ok := expr.(*ast.SelectorExpr); ok { + return p.selectorExpr(x, depth, true) + } + p.expr1(expr, prec1, depth) + return false +} + +// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans +// multiple lines. +func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool { + p.expr1(x.X, token.HighestPrec, depth) + p.print(token.PERIOD) + if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line { + p.print(indent, newline) + p.setPos(x.Sel.Pos()) + p.print(x.Sel) + if !isMethod { + p.print(unindent) + } + return true + } + p.setPos(x.Sel.Pos()) + p.print(x.Sel) + return false +} + +func (p *printer) expr0(x ast.Expr, depth int) { + p.expr1(x, token.LowestPrec, depth) +} + +func (p *printer) expr(x ast.Expr) { + const depth = 1 + p.expr1(x, token.LowestPrec, depth) +} + +// ---------------------------------------------------------------------------- +// Statements + +// Print the statement list indented, but without a newline after the last statement. +// Extra line breaks between statements in the source are respected but at most one +// empty line is printed between statements. +func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) { + if nindent > 0 { + p.print(indent) + } + var line int + i := 0 + for _, s := range list { + // ignore empty statements (was issue 3466) + if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty { + // nindent == 0 only for lists of switch/select case clauses; + // in those cases each clause is a new section + if len(p.output) > 0 { + // only print line break if we are not at the beginning of the output + // (i.e., we are not printing only a partial program) + p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0) + } + p.recordLine(&line) + p.stmt(s, nextIsRBrace && i == len(list)-1) + // labeled statements put labels on a separate line, but here + // we only care about the start line of the actual statement + // without label - correct line for each label + for t := s; ; { + lt, _ := t.(*ast.LabeledStmt) + if lt == nil { + break + } + line++ + t = lt.Stmt + } + i++ + } + } + if nindent > 0 { + p.print(unindent) + } +} + +// block prints an *ast.BlockStmt; it always spans at least two lines. +func (p *printer) block(b *ast.BlockStmt, nindent int) { + p.setPos(b.Lbrace) + p.print(token.LBRACE) + p.stmtList(b.List, nindent, true) + p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true) + p.setPos(b.Rbrace) + p.print(token.RBRACE) +} + +func isTypeName(x ast.Expr) bool { + switch t := x.(type) { + case *ast.Ident: + return true + case *ast.SelectorExpr: + return isTypeName(t.X) + } + return false +} + +func stripParens(x ast.Expr) ast.Expr { + if px, strip := x.(*ast.ParenExpr); strip { + // parentheses must not be stripped if there are any + // unparenthesized composite literals starting with + // a type name + ast.Inspect(px.X, func(node ast.Node) bool { + switch x := node.(type) { + case *ast.ParenExpr: + // parentheses protect enclosed composite literals + return false + case *ast.CompositeLit: + if isTypeName(x.Type) { + strip = false // do not strip parentheses + } + return false + } + // in all other cases, keep inspecting + return true + }) + if strip { + return stripParens(px.X) + } + } + return x +} + +func stripParensAlways(x ast.Expr) ast.Expr { + if x, ok := x.(*ast.ParenExpr); ok { + return stripParensAlways(x.X) + } + return x +} + +func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) { + p.print(blank) + needsBlank := false + if init == nil && post == nil { + // no semicolons required + if expr != nil { + p.expr(stripParens(expr)) + needsBlank = true + } + } else { + // all semicolons required + // (they are not separators, print them explicitly) + if init != nil { + p.stmt(init, false) + } + p.print(token.SEMICOLON, blank) + if expr != nil { + p.expr(stripParens(expr)) + needsBlank = true + } + if isForStmt { + p.print(token.SEMICOLON, blank) + needsBlank = false + if post != nil { + p.stmt(post, false) + needsBlank = true + } + } + } + if needsBlank { + p.print(blank) + } +} + +// indentList reports whether an expression list would look better if it +// were indented wholesale (starting with the very first element, rather +// than starting at the first line break). +func (p *printer) indentList(list []ast.Expr) bool { + // Heuristic: indentList reports whether there are more than one multi- + // line element in the list, or if there is any element that is not + // starting on the same line as the previous one ends. + if len(list) >= 2 { + b := p.lineFor(list[0].Pos()) + e := p.lineFor(list[len(list)-1].End()) + if 0 < b && b < e { + // list spans multiple lines + n := 0 // multi-line element count + line := b + for _, x := range list { + xb := p.lineFor(x.Pos()) + xe := p.lineFor(x.End()) + if line < xb { + // x is not starting on the same + // line as the previous one ended + return true + } + if xb < xe { + // x is a multi-line element + n++ + } + line = xe + } + return n > 1 + } + } + return false +} + +func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) { + p.setPos(stmt.Pos()) + + switch s := stmt.(type) { + case *ast.BadStmt: + p.print("BadStmt") + + case *ast.DeclStmt: + p.decl(s.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + // a "correcting" unindent immediately following a line break + // is applied before the line break if there is no comment + // between (see writeWhitespace) + p.print(unindent) + p.expr(s.Label) + p.setPos(s.Colon) + p.print(token.COLON, indent) + if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty { + if !nextIsRBrace { + p.print(newline) + p.setPos(e.Pos()) + p.print(token.SEMICOLON) + break + } + } else { + p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true) + } + p.stmt(s.Stmt, nextIsRBrace) + + case *ast.ExprStmt: + const depth = 1 + p.expr0(s.X, depth) + + case *ast.SendStmt: + const depth = 1 + p.expr0(s.Chan, depth) + p.print(blank) + p.setPos(s.Arrow) + p.print(token.ARROW, blank) + p.expr0(s.Value, depth) + + case *ast.IncDecStmt: + const depth = 1 + p.expr0(s.X, depth+1) + p.setPos(s.TokPos) + p.print(s.Tok) + + case *ast.AssignStmt: + depth := 1 + if len(s.Lhs) > 1 && len(s.Rhs) > 1 { + depth++ + } + p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false) + p.print(blank) + p.setPos(s.TokPos) + p.print(s.Tok, blank) + p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false) + + case *ast.GoStmt: + p.print(token.GO, blank) + p.expr(s.Call) + + case *ast.DeferStmt: + p.print(token.DEFER, blank) + p.expr(s.Call) + + case *ast.ReturnStmt: + p.print(token.RETURN) + if s.Results != nil { + p.print(blank) + // Use indentList heuristic to make corner cases look + // better (issue 1207). A more systematic approach would + // always indent, but this would cause significant + // reformatting of the code base and not necessarily + // lead to more nicely formatted code in general. + if p.indentList(s.Results) { + p.print(indent) + // Use NoPos so that a newline never goes before + // the results (see issue #32854). + p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false) + p.print(unindent) + } else { + p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false) + } + } + + case *ast.BranchStmt: + p.print(s.Tok) + if s.Label != nil { + p.print(blank) + p.expr(s.Label) + } + + case *ast.BlockStmt: + p.block(s, 1) + + case *ast.IfStmt: + p.print(token.IF) + p.controlClause(false, s.Init, s.Cond, nil) + p.block(s.Body, 1) + if s.Else != nil { + p.print(blank, token.ELSE, blank) + switch s.Else.(type) { + case *ast.BlockStmt, *ast.IfStmt: + p.stmt(s.Else, nextIsRBrace) + default: + // This can only happen with an incorrectly + // constructed AST. Permit it but print so + // that it can be parsed without errors. + p.print(token.LBRACE, indent, formfeed) + p.stmt(s.Else, true) + p.print(unindent, formfeed, token.RBRACE) + } + } + + case *ast.CaseClause: + if s.List != nil { + p.print(token.CASE, blank) + p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false) + } else { + p.print(token.DEFAULT) + } + p.setPos(s.Colon) + p.print(token.COLON) + p.stmtList(s.Body, 1, nextIsRBrace) + + case *ast.SwitchStmt: + p.print(token.SWITCH) + p.controlClause(false, s.Init, s.Tag, nil) + p.block(s.Body, 0) + + case *ast.TypeSwitchStmt: + p.print(token.SWITCH) + if s.Init != nil { + p.print(blank) + p.stmt(s.Init, false) + p.print(token.SEMICOLON) + } + p.print(blank) + p.stmt(s.Assign, false) + p.print(blank) + p.block(s.Body, 0) + + case *ast.CommClause: + if s.Comm != nil { + p.print(token.CASE, blank) + p.stmt(s.Comm, false) + } else { + p.print(token.DEFAULT) + } + p.setPos(s.Colon) + p.print(token.COLON) + p.stmtList(s.Body, 1, nextIsRBrace) + + case *ast.SelectStmt: + p.print(token.SELECT, blank) + body := s.Body + if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) { + // print empty select statement w/o comments on one line + p.setPos(body.Lbrace) + p.print(token.LBRACE) + p.setPos(body.Rbrace) + p.print(token.RBRACE) + } else { + p.block(body, 0) + } + + case *ast.ForStmt: + p.print(token.FOR) + p.controlClause(true, s.Init, s.Cond, s.Post) + p.block(s.Body, 1) + + case *ast.RangeStmt: + p.print(token.FOR, blank) + if s.Key != nil { + p.expr(s.Key) + if s.Value != nil { + // use position of value following the comma as + // comma position for correct comment placement + p.setPos(s.Value.Pos()) + p.print(token.COMMA, blank) + p.expr(s.Value) + } + p.print(blank) + p.setPos(s.TokPos) + p.print(s.Tok, blank) + } + p.print(token.RANGE, blank) + p.expr(stripParens(s.X)) + p.print(blank) + p.block(s.Body, 1) + + default: + panic("unreachable") + } +} + +// ---------------------------------------------------------------------------- +// Declarations + +// The keepTypeColumn function determines if the type column of a series of +// consecutive const or var declarations must be kept, or if initialization +// values (V) can be placed in the type column (T) instead. The i'th entry +// in the result slice is true if the type column in spec[i] must be kept. +// +// For example, the declaration: +// +// const ( +// foobar int = 42 // comment +// x = 7 // comment +// foo +// bar = 991 +// ) +// +// leads to the type/values matrix below. A run of value columns (V) can +// be moved into the type column if there is no type for any of the values +// in that column (we only move entire columns so that they align properly). +// +// matrix formatted result +// matrix +// T V -> T V -> true there is a T and so the type +// - V - V true column must be kept +// - - - - false +// - V V - false V is moved into T column +func keepTypeColumn(specs []ast.Spec) []bool { + m := make([]bool, len(specs)) + + populate := func(i, j int, keepType bool) { + if keepType { + for ; i < j; i++ { + m[i] = true + } + } + } + + i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run + var keepType bool + for i, s := range specs { + t := s.(*ast.ValueSpec) + if t.Values != nil { + if i0 < 0 { + // start of a run of ValueSpecs with non-nil Values + i0 = i + keepType = false + } + } else { + if i0 >= 0 { + // end of a run + populate(i0, i, keepType) + i0 = -1 + } + } + if t.Type != nil { + keepType = true + } + } + if i0 >= 0 { + // end of a run + populate(i0, len(specs), keepType) + } + + return m +} + +func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) { + p.setComment(s.Doc) + p.identList(s.Names, false) // always present + extraTabs := 3 + if s.Type != nil || keepType { + p.print(vtab) + extraTabs-- + } + if s.Type != nil { + p.expr(s.Type) + } + if s.Values != nil { + p.print(vtab, token.ASSIGN, blank) + p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) + extraTabs-- + } + if s.Comment != nil { + for ; extraTabs > 0; extraTabs-- { + p.print(vtab) + } + p.setComment(s.Comment) + } +} + +func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit { + // Note: An unmodified AST generated by go/parser will already + // contain a backward- or double-quoted path string that does + // not contain any invalid characters, and most of the work + // here is not needed. However, a modified or generated AST + // may possibly contain non-canonical paths. Do the work in + // all cases since it's not too hard and not speed-critical. + + // if we don't have a proper string, be conservative and return whatever we have + if lit.Kind != token.STRING { + return lit + } + s, err := strconv.Unquote(lit.Value) + if err != nil { + return lit + } + + // if the string is an invalid path, return whatever we have + // + // spec: "Implementation restriction: A compiler may restrict + // ImportPaths to non-empty strings using only characters belonging + // to Unicode's L, M, N, P, and S general categories (the Graphic + // characters without spaces) and may also exclude the characters + // !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character + // U+FFFD." + if s == "" { + return lit + } + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + for _, r := range s { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return lit + } + } + + // otherwise, return the double-quoted path + s = strconv.Quote(s) + if s == lit.Value { + return lit // nothing wrong with lit + } + return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s} +} + +// The parameter n is the number of specs in the group. If doIndent is set, +// multi-line identifier lists in the spec are indented when the first +// linebreak is encountered. +func (p *printer) spec(spec ast.Spec, n int, doIndent bool) { + switch s := spec.(type) { + case *ast.ImportSpec: + p.setComment(s.Doc) + if s.Name != nil { + p.expr(s.Name) + p.print(blank) + } + p.expr(sanitizeImportPath(s.Path)) + p.setComment(s.Comment) + p.setPos(s.EndPos) + + case *ast.ValueSpec: + if n != 1 { + p.internalError("expected n = 1; got", n) + } + p.setComment(s.Doc) + p.identList(s.Names, doIndent) // always present + if s.Type != nil { + p.print(blank) + p.expr(s.Type) + } + if s.Values != nil { + p.print(blank, token.ASSIGN, blank) + p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) + } + p.setComment(s.Comment) + + case *ast.TypeSpec: + p.setComment(s.Doc) + p.expr(s.Name) + if s.TypeParams != nil { + p.parameters(s.TypeParams, typeTParam) + } + if n == 1 { + p.print(blank) + } else { + p.print(vtab) + } + if s.Assign.IsValid() { + p.print(token.ASSIGN, blank) + } + p.expr(s.Type) + p.setComment(s.Comment) + + default: + panic("unreachable") + } +} + +func (p *printer) genDecl(d *ast.GenDecl) { + p.setComment(d.Doc) + p.setPos(d.Pos()) + p.print(d.Tok, blank) + + if d.Lparen.IsValid() || len(d.Specs) > 1 { + // group of parenthesized declarations + p.setPos(d.Lparen) + p.print(token.LPAREN) + if n := len(d.Specs); n > 0 { + p.print(indent, formfeed) + if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) { + // two or more grouped const/var declarations: + // determine if the type column must be kept + keepType := keepTypeColumn(d.Specs) + var line int + for i, s := range d.Specs { + if i > 0 { + p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) + } + p.recordLine(&line) + p.valueSpec(s.(*ast.ValueSpec), keepType[i]) + } + } else { + var line int + for i, s := range d.Specs { + if i > 0 { + p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) + } + p.recordLine(&line) + p.spec(s, n, false) + } + } + p.print(unindent, formfeed) + } + p.setPos(d.Rparen) + p.print(token.RPAREN) + + } else if len(d.Specs) > 0 { + // single declaration + p.spec(d.Specs[0], 1, true) + } +} + +// sizeCounter is an io.Writer which counts the number of bytes written, +// as well as whether a newline character was seen. +type sizeCounter struct { + hasNewline bool + size int +} + +func (c *sizeCounter) Write(p []byte) (int, error) { + if !c.hasNewline { + for _, b := range p { + if b == '\n' || b == '\f' { + c.hasNewline = true + break + } + } + } + c.size += len(p) + return len(p), nil +} + +// nodeSize determines the size of n in chars after formatting. +// The result is <= maxSize if the node fits on one line with at +// most maxSize chars and the formatted output doesn't contain +// any control chars. Otherwise, the result is > maxSize. +func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { + // nodeSize invokes the printer, which may invoke nodeSize + // recursively. For deep composite literal nests, this can + // lead to an exponential algorithm. Remember previous + // results to prune the recursion (was issue 1628). + if size, found := p.nodeSizes[n]; found { + return size + } + + size = maxSize + 1 // assume n doesn't fit + p.nodeSizes[n] = size + + // nodeSize computation must be independent of particular + // style so that we always get the same decision; print + // in RawFormat + cfg := Config{Mode: RawFormat} + var counter sizeCounter + if err := cfg.fprint(&counter, p.fset, n, p.nodeSizes); err != nil { + return + } + if counter.size <= maxSize && !counter.hasNewline { + // n fits in a single line + size = counter.size + p.nodeSizes[n] = size + } + return +} + +// numLines returns the number of lines spanned by node n in the original source. +func (p *printer) numLines(n ast.Node) int { + if from := n.Pos(); from.IsValid() { + if to := n.End(); to.IsValid() { + return p.lineFor(to) - p.lineFor(from) + 1 + } + } + return infinity +} + +// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's. +func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int { + pos1 := b.Pos() + pos2 := b.Rbrace + if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) { + // opening and closing brace are on different lines - don't make it a one-liner + return maxSize + 1 + } + if len(b.List) > 5 { + // too many statements - don't make it a one-liner + return maxSize + 1 + } + // otherwise, estimate body size + bodySize := p.commentSizeBefore(p.posFor(pos2)) + for i, s := range b.List { + if bodySize > maxSize { + break // no need to continue + } + if i > 0 { + bodySize += 2 // space for a semicolon and blank + } + bodySize += p.nodeSize(s, maxSize) + } + return bodySize +} + +// funcBody prints a function body following a function header of given headerSize. +// If the header's and block's size are "small enough" and the block is "simple enough", +// the block is printed on the current line, without line breaks, spaced from the header +// by sep. Otherwise the block's opening "{" is printed on the current line, followed by +// lines for the block's statements and its closing "}". +func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) { + if b == nil { + return + } + + // save/restore composite literal nesting level + defer func(level int) { + p.level = level + }(p.level) + p.level = 0 + + const maxSize = 100 + if headerSize+p.bodySize(b, maxSize) <= maxSize { + p.print(sep) + p.setPos(b.Lbrace) + p.print(token.LBRACE) + if len(b.List) > 0 { + p.print(blank) + for i, s := range b.List { + if i > 0 { + p.print(token.SEMICOLON, blank) + } + p.stmt(s, i == len(b.List)-1) + } + p.print(blank) + } + p.print(noExtraLinebreak) + p.setPos(b.Rbrace) + p.print(token.RBRACE, noExtraLinebreak) + return + } + + if sep != ignore { + p.print(blank) // always use blank + } + p.block(b, 1) +} + +// distanceFrom returns the column difference between p.out (the current output +// position) and startOutCol. If the start position is on a different line from +// the current position (or either is unknown), the result is infinity. +func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int { + if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line { + return p.out.Column - startOutCol + } + return infinity +} + +func (p *printer) funcDecl(d *ast.FuncDecl) { + p.setComment(d.Doc) + p.setPos(d.Pos()) + p.print(token.FUNC, blank) + // We have to save startCol only after emitting FUNC; otherwise it can be on a + // different line (all whitespace preceding the FUNC is emitted only when the + // FUNC is emitted). + startCol := p.out.Column - len("func ") + if d.Recv != nil { + p.parameters(d.Recv, funcParam) // method: print receiver + p.print(blank) + } + p.expr(d.Name) + p.signature(d.Type) + p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body) +} + +func (p *printer) decl(decl ast.Decl) { + switch d := decl.(type) { + case *ast.BadDecl: + p.setPos(d.Pos()) + p.print("BadDecl") + case *ast.GenDecl: + p.genDecl(d) + case *ast.FuncDecl: + p.funcDecl(d) + default: + panic("unreachable") + } +} + +// ---------------------------------------------------------------------------- +// Files + +func declToken(decl ast.Decl) (tok token.Token) { + tok = token.ILLEGAL + switch d := decl.(type) { + case *ast.GenDecl: + tok = d.Tok + case *ast.FuncDecl: + tok = token.FUNC + } + return +} + +func (p *printer) declList(list []ast.Decl) { + tok := token.ILLEGAL + for _, d := range list { + prev := tok + tok = declToken(d) + // If the declaration token changed (e.g., from CONST to TYPE) + // or the next declaration has documentation associated with it, + // print an empty line between top-level declarations. + // (because p.linebreak is called with the position of d, which + // is past any documentation, the minimum requirement is satisfied + // even w/o the extra getDoc(d) nil-check - leave it in case the + // linebreak logic improves - there's already a TODO). + if len(p.output) > 0 { + // only print line break if we are not at the beginning of the output + // (i.e., we are not printing only a partial program) + min := 1 + if prev != tok || getDoc(d) != nil { + min = 2 + } + // start a new section if the next declaration is a function + // that spans multiple lines (see also issue #19544) + p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1) + } + p.decl(d) + } +} + +func (p *printer) file(src *ast.File) { + p.setComment(src.Doc) + p.setPos(src.Pos()) + p.print(token.PACKAGE, blank) + p.expr(src.Name) + p.declList(src.Decls) + p.print(newline) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/printer.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/printer.go new file mode 100644 index 0000000000..2ab0278b08 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/printer.go @@ -0,0 +1,1435 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package printer implements printing of AST nodes. +package printer + +import ( + "fmt" + "go/ast" + "go/build/constraint" + "go/token" + "io" + "os" + "strings" + "sync" + "text/tabwriter" + "unicode" +) + +const ( + maxNewlines = 2 // max. number of newlines between source text + debug = false // enable for debugging + infinity = 1 << 30 +) + +type whiteSpace byte + +const ( + ignore = whiteSpace(0) + blank = whiteSpace(' ') + vtab = whiteSpace('\v') + newline = whiteSpace('\n') + formfeed = whiteSpace('\f') + indent = whiteSpace('>') + unindent = whiteSpace('<') +) + +// A pmode value represents the current printer mode. +type pmode int + +const ( + noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment + noExtraLinebreak // disables extra line break after /*-style comment +) + +type commentInfo struct { + cindex int // current comment index + comment *ast.CommentGroup // = printer.comments[cindex]; or nil + commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity + commentNewline bool // true if the comment group contains newlines +} + +type printer struct { + // Configuration (does not change after initialization) + Config + fset *token.FileSet + + // Current state + output []byte // raw printer result + indent int // current indentation + level int // level == 0: outside composite literal; level > 0: inside composite literal + mode pmode // current printer mode + endAlignment bool // if set, terminate alignment immediately + impliedSemi bool // if set, a linebreak implies a semicolon + lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace) + prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL + wsbuf []whiteSpace // delayed white space + goBuild []int // start index of all //go:build comments in output + plusBuild []int // start index of all // +build comments in output + + // Positions + // The out position differs from the pos position when the result + // formatting differs from the source formatting (in the amount of + // white space). If there's a difference and SourcePos is set in + // ConfigMode, //line directives are used in the output to restore + // original source positions for a reader. + pos token.Position // current position in AST (source) space + out token.Position // current position in output space + last token.Position // value of pos after calling writeString + linePtr *int // if set, record out.Line for the next token in *linePtr + sourcePosErr error // if non-nil, the first error emitting a //line directive + + // The list of all source comments, in order of appearance. + comments []*ast.CommentGroup // may be nil + useNodeComments bool // if not set, ignore lead and line comments of nodes + + // Information about p.comments[p.cindex]; set up by nextComment. + commentInfo + + // Cache of already computed node sizes. + nodeSizes map[ast.Node]int + + // Cache of most recently computed line position. + cachedPos token.Pos + cachedLine int // line corresponding to cachedPos +} + +func (p *printer) internalError(msg ...any) { + if debug { + fmt.Print(p.pos.String() + ": ") + fmt.Println(msg...) + panic("mvdan.cc/gofumpt/internal/govendor/go/printer") + } +} + +// commentsHaveNewline reports whether a list of comments belonging to +// an *ast.CommentGroup contains newlines. Because the position information +// may only be partially correct, we also have to read the comment text. +func (p *printer) commentsHaveNewline(list []*ast.Comment) bool { + // len(list) > 0 + line := p.lineFor(list[0].Pos()) + for i, c := range list { + if i > 0 && p.lineFor(list[i].Pos()) != line { + // not all comments on the same line + return true + } + if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) { + return true + } + } + _ = line + return false +} + +func (p *printer) nextComment() { + for p.cindex < len(p.comments) { + c := p.comments[p.cindex] + p.cindex++ + if list := c.List; len(list) > 0 { + p.comment = c + p.commentOffset = p.posFor(list[0].Pos()).Offset + p.commentNewline = p.commentsHaveNewline(list) + return + } + // we should not reach here (correct ASTs don't have empty + // ast.CommentGroup nodes), but be conservative and try again + } + // no more comments + p.commentOffset = infinity +} + +// commentBefore reports whether the current comment group occurs +// before the next position in the source code and printing it does +// not introduce implicit semicolons. +func (p *printer) commentBefore(next token.Position) bool { + return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline) +} + +// commentSizeBefore returns the estimated size of the +// comments on the same line before the next position. +func (p *printer) commentSizeBefore(next token.Position) int { + // save/restore current p.commentInfo (p.nextComment() modifies it) + defer func(info commentInfo) { + p.commentInfo = info + }(p.commentInfo) + + size := 0 + for p.commentBefore(next) { + for _, c := range p.comment.List { + size += len(c.Text) + } + p.nextComment() + } + return size +} + +// recordLine records the output line number for the next non-whitespace +// token in *linePtr. It is used to compute an accurate line number for a +// formatted construct, independent of pending (not yet emitted) whitespace +// or comments. +func (p *printer) recordLine(linePtr *int) { + p.linePtr = linePtr +} + +// linesFrom returns the number of output lines between the current +// output line and the line argument, ignoring any pending (not yet +// emitted) whitespace or comments. It is used to compute an accurate +// size (in number of lines) for a formatted construct. +func (p *printer) linesFrom(line int) int { + return p.out.Line - line +} + +func (p *printer) posFor(pos token.Pos) token.Position { + // not used frequently enough to cache entire token.Position + return p.fset.PositionFor(pos, false /* absolute position */) +} + +func (p *printer) lineFor(pos token.Pos) int { + if pos != p.cachedPos { + p.cachedPos = pos + p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line + } + return p.cachedLine +} + +// writeLineDirective writes a //line directive if necessary. +func (p *printer) writeLineDirective(pos token.Position) { + if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) { + if strings.ContainsAny(pos.Filename, "\r\n") { + if p.sourcePosErr == nil { + p.sourcePosErr = fmt.Errorf("mvdan.cc/gofumpt/internal/govendor/go/printer: source filename contains unexpected newline character: %q", pos.Filename) + } + return + } + + p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation + p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...) + p.output = append(p.output, tabwriter.Escape) + // p.out must match the //line directive + p.out.Filename = pos.Filename + p.out.Line = pos.Line + } +} + +// writeIndent writes indentation. +func (p *printer) writeIndent() { + // use "hard" htabs - indentation columns + // must not be discarded by the tabwriter + n := p.Config.Indent + p.indent // include base indentation + for i := 0; i < n; i++ { + p.output = append(p.output, '\t') + } + + // update positions + p.pos.Offset += n + p.pos.Column += n + p.out.Column += n +} + +// writeByte writes ch n times to p.output and updates p.pos. +// Only used to write formatting (white space) characters. +func (p *printer) writeByte(ch byte, n int) { + if p.endAlignment { + // Ignore any alignment control character; + // and at the end of the line, break with + // a formfeed to indicate termination of + // existing columns. + switch ch { + case '\t', '\v': + ch = ' ' + case '\n', '\f': + ch = '\f' + p.endAlignment = false + } + } + + if p.out.Column == 1 { + // no need to write line directives before white space + p.writeIndent() + } + + for i := 0; i < n; i++ { + p.output = append(p.output, ch) + } + + // update positions + p.pos.Offset += n + if ch == '\n' || ch == '\f' { + p.pos.Line += n + p.out.Line += n + p.pos.Column = 1 + p.out.Column = 1 + return + } + p.pos.Column += n + p.out.Column += n +} + +// writeString writes the string s to p.output and updates p.pos, p.out, +// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters +// to protect s from being interpreted by the tabwriter. +// +// Note: writeString is only used to write Go tokens, literals, and +// comments, all of which must be written literally. Thus, it is correct +// to always set isLit = true. However, setting it explicitly only when +// needed (i.e., when we don't know that s contains no tabs or line breaks) +// avoids processing extra escape characters and reduces run time of the +// printer benchmark by up to 10%. +func (p *printer) writeString(pos token.Position, s string, isLit bool) { + if p.out.Column == 1 { + if p.Config.Mode&SourcePos != 0 { + p.writeLineDirective(pos) + } + p.writeIndent() + } + + if pos.IsValid() { + // update p.pos (if pos is invalid, continue with existing p.pos) + // Note: Must do this after handling line beginnings because + // writeIndent updates p.pos if there's indentation, but p.pos + // is the position of s. + p.pos = pos + } + + if isLit { + // Protect s such that is passes through the tabwriter + // unchanged. Note that valid Go programs cannot contain + // tabwriter.Escape bytes since they do not appear in legal + // UTF-8 sequences. + p.output = append(p.output, tabwriter.Escape) + } + + if debug { + p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos! + } + p.output = append(p.output, s...) + + // update positions + nlines := 0 + var li int // index of last newline; valid if nlines > 0 + for i := 0; i < len(s); i++ { + // Raw string literals may contain any character except back quote (`). + if ch := s[i]; ch == '\n' || ch == '\f' { + // account for line break + nlines++ + li = i + // A line break inside a literal will break whatever column + // formatting is in place; ignore any further alignment through + // the end of the line. + p.endAlignment = true + } + } + p.pos.Offset += len(s) + if nlines > 0 { + p.pos.Line += nlines + p.out.Line += nlines + c := len(s) - li + p.pos.Column = c + p.out.Column = c + } else { + p.pos.Column += len(s) + p.out.Column += len(s) + } + + if isLit { + p.output = append(p.output, tabwriter.Escape) + } + + p.last = p.pos +} + +// writeCommentPrefix writes the whitespace before a comment. +// If there is any pending whitespace, it consumes as much of +// it as is likely to help position the comment nicely. +// pos is the comment position, next the position of the item +// after all pending comments, prev is the previous comment in +// a group of comments (or nil), and tok is the next token. +func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) { + if len(p.output) == 0 { + // the comment is the first item to be printed - don't write any whitespace + return + } + + if pos.IsValid() && pos.Filename != p.last.Filename { + // comment in a different file - separate with newlines + p.writeByte('\f', maxNewlines) + return + } + + if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { + // comment on the same line as last item: + // separate with at least one separator + hasSep := false + if prev == nil { + // first comment of a comment group + j := 0 + for i, ch := range p.wsbuf { + switch ch { + case blank: + // ignore any blanks before a comment + p.wsbuf[i] = ignore + continue + case vtab: + // respect existing tabs - important + // for proper formatting of commented structs + hasSep = true + continue + case indent: + // apply pending indentation + continue + } + j = i + break + } + p.writeWhitespace(j) + } + // make sure there is at least one separator + if !hasSep { + sep := byte('\t') + if pos.Line == next.Line { + // next item is on the same line as the comment + // (which must be a /*-style comment): separate + // with a blank instead of a tab + sep = ' ' + } + p.writeByte(sep, 1) + } + + } else { + // comment on a different line: + // separate with at least one line break + droppedLinebreak := false + j := 0 + for i, ch := range p.wsbuf { + switch ch { + case blank, vtab: + // ignore any horizontal whitespace before line breaks + p.wsbuf[i] = ignore + continue + case indent: + // apply pending indentation + continue + case unindent: + // if this is not the last unindent, apply it + // as it is (likely) belonging to the last + // construct (e.g., a multi-line expression list) + // and is not part of closing a block + if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent { + continue + } + // if the next token is not a closing }, apply the unindent + // if it appears that the comment is aligned with the + // token; otherwise assume the unindent is part of a + // closing block and stop (this scenario appears with + // comments before a case label where the comments + // apply to the next case instead of the current one) + if tok != token.RBRACE && pos.Column == next.Column { + continue + } + case newline, formfeed: + p.wsbuf[i] = ignore + droppedLinebreak = prev == nil // record only if first comment of a group + } + j = i + break + } + p.writeWhitespace(j) + + // determine number of linebreaks before the comment + n := 0 + if pos.IsValid() && p.last.IsValid() { + n = pos.Line - p.last.Line + if n < 0 { // should never happen + n = 0 + } + } + + // at the package scope level only (p.indent == 0), + // add an extra newline if we dropped one before: + // this preserves a blank line before documentation + // comments at the package scope level (issue 2570) + if p.indent == 0 && droppedLinebreak { + n++ + } + + // make sure there is at least one line break + // if the previous comment was a line comment + if n == 0 && prev != nil && prev.Text[1] == '/' { + n = 1 + } + + if n > 0 { + // use formfeeds to break columns before a comment; + // this is analogous to using formfeeds to separate + // individual lines of /*-style comments + p.writeByte('\f', nlimit(n)) + } + } +} + +// Returns true if s contains only white space +// (only tabs and blanks can appear in the printer's context). +func isBlank(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] > ' ' { + return false + } + } + return true +} + +// commonPrefix returns the common prefix of a and b. +func commonPrefix(a, b string) string { + i := 0 + for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') { + i++ + } + return a[0:i] +} + +// trimRight returns s with trailing whitespace removed. +func trimRight(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no +// comment line is indented, all but the first line have some form of space prefix). +// The prefix is computed using heuristics such that is likely that the comment +// contents are nicely laid out after re-printing each line using the printer's +// current indentation. +func stripCommonPrefix(lines []string) { + if len(lines) <= 1 { + return // at most one line - nothing to do + } + // len(lines) > 1 + + // The heuristic in this function tries to handle a few + // common patterns of /*-style comments: Comments where + // the opening /* and closing */ are aligned and the + // rest of the comment text is aligned and indented with + // blanks or tabs, cases with a vertical "line of stars" + // on the left, and cases where the closing */ is on the + // same line as the last comment text. + + // Compute maximum common white prefix of all but the first, + // last, and blank lines, and replace blank lines with empty + // lines (the first line starts with /* and has no prefix). + // In cases where only the first and last lines are not blank, + // such as two-line comments, or comments where all inner lines + // are blank, consider the last line for the prefix computation + // since otherwise the prefix would be empty. + // + // Note that the first and last line are never empty (they + // contain the opening /* and closing */ respectively) and + // thus they can be ignored by the blank line check. + prefix := "" + prefixSet := false + if len(lines) > 2 { + for i, line := range lines[1 : len(lines)-1] { + if isBlank(line) { + lines[1+i] = "" // range starts with lines[1] + } else { + if !prefixSet { + prefix = line + prefixSet = true + } + prefix = commonPrefix(prefix, line) + } + } + } + // If we don't have a prefix yet, consider the last line. + if !prefixSet { + line := lines[len(lines)-1] + prefix = commonPrefix(line, line) + } + + /* + * Check for vertical "line of stars" and correct prefix accordingly. + */ + lineOfStars := false + if p, _, ok := strings.Cut(prefix, "*"); ok { + // remove trailing blank from prefix so stars remain aligned + prefix = strings.TrimSuffix(p, " ") + lineOfStars = true + } else { + // No line of stars present. + // Determine the white space on the first line after the /* + // and before the beginning of the comment text, assume two + // blanks instead of the /* unless the first character after + // the /* is a tab. If the first comment line is empty but + // for the opening /*, assume up to 3 blanks or a tab. This + // whitespace may be found as suffix in the common prefix. + first := lines[0] + if isBlank(first[2:]) { + // no comment text on the first line: + // reduce prefix by up to 3 blanks or a tab + // if present - this keeps comment text indented + // relative to the /* and */'s if it was indented + // in the first place + i := len(prefix) + for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ { + i-- + } + if i == len(prefix) && i > 0 && prefix[i-1] == '\t' { + i-- + } + prefix = prefix[0:i] + } else { + // comment text on the first line + suffix := make([]byte, len(first)) + n := 2 // start after opening /* + for n < len(first) && first[n] <= ' ' { + suffix[n] = first[n] + n++ + } + if n > 2 && suffix[2] == '\t' { + // assume the '\t' compensates for the /* + suffix = suffix[2:n] + } else { + // otherwise assume two blanks + suffix[0], suffix[1] = ' ', ' ' + suffix = suffix[0:n] + } + // Shorten the computed common prefix by the length of + // suffix, if it is found as suffix of the prefix. + prefix = strings.TrimSuffix(prefix, string(suffix)) + } + } + + // Handle last line: If it only contains a closing */, align it + // with the opening /*, otherwise align the text with the other + // lines. + last := lines[len(lines)-1] + closing := "*/" + before, _, _ := strings.Cut(last, closing) // closing always present + if isBlank(before) { + // last line only contains closing */ + if lineOfStars { + closing = " */" // add blank to align final star + } + lines[len(lines)-1] = prefix + closing + } else { + // last line contains more comment text - assume + // it is aligned like the other lines and include + // in prefix computation + prefix = commonPrefix(prefix, last) + } + + // Remove the common prefix from all but the first and empty lines. + for i, line := range lines { + if i > 0 && line != "" { + lines[i] = line[len(prefix):] + } + } +} + +func (p *printer) writeComment(comment *ast.Comment) { + text := comment.Text + pos := p.posFor(comment.Pos()) + + const linePrefix = "//line " + if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) { + // Possibly a //-style line directive. + // Suspend indentation temporarily to keep line directive valid. + defer func(indent int) { p.indent = indent }(p.indent) + p.indent = 0 + } + + // shortcut common case of //-style comments + if text[1] == '/' { + if constraint.IsGoBuild(text) { + p.goBuild = append(p.goBuild, len(p.output)) + } else if constraint.IsPlusBuild(text) { + p.plusBuild = append(p.plusBuild, len(p.output)) + } + p.writeString(pos, trimRight(text), true) + return + } + + // for /*-style comments, print line by line and let the + // write function take care of the proper indentation + lines := strings.Split(text, "\n") + + // The comment started in the first column but is going + // to be indented. For an idempotent result, add indentation + // to all lines such that they look like they were indented + // before - this will make sure the common prefix computation + // is the same independent of how many times formatting is + // applied (was issue 1835). + if pos.IsValid() && pos.Column == 1 && p.indent > 0 { + for i, line := range lines[1:] { + lines[1+i] = " " + line + } + } + + stripCommonPrefix(lines) + + // write comment lines, separated by formfeed, + // without a line break after the last line + for i, line := range lines { + if i > 0 { + p.writeByte('\f', 1) + pos = p.pos + } + if len(line) > 0 { + p.writeString(pos, trimRight(line), true) + } + } +} + +// writeCommentSuffix writes a line break after a comment if indicated +// and processes any leftover indentation information. If a line break +// is needed, the kind of break (newline vs formfeed) depends on the +// pending whitespace. The writeCommentSuffix result indicates if a +// newline was written or if a formfeed was dropped from the whitespace +// buffer. +func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) { + for i, ch := range p.wsbuf { + switch ch { + case blank, vtab: + // ignore trailing whitespace + p.wsbuf[i] = ignore + case indent, unindent: + // don't lose indentation information + case newline, formfeed: + // if we need a line break, keep exactly one + // but remember if we dropped any formfeeds + if needsLinebreak { + needsLinebreak = false + wroteNewline = true + } else { + if ch == formfeed { + droppedFF = true + } + p.wsbuf[i] = ignore + } + } + } + p.writeWhitespace(len(p.wsbuf)) + + // make sure we have a line break + if needsLinebreak { + p.writeByte('\n', 1) + wroteNewline = true + } + + return +} + +// containsLinebreak reports whether the whitespace buffer contains any line breaks. +func (p *printer) containsLinebreak() bool { + for _, ch := range p.wsbuf { + if ch == newline || ch == formfeed { + return true + } + } + return false +} + +// intersperseComments consumes all comments that appear before the next token +// tok and prints it together with the buffered whitespace (i.e., the whitespace +// that needs to be written before the next token). A heuristic is used to mix +// the comments and whitespace. The intersperseComments result indicates if a +// newline was written or if a formfeed was dropped from the whitespace buffer. +func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { + var last *ast.Comment + for p.commentBefore(next) { + list := p.comment.List + changed := false + if p.lastTok != token.IMPORT && // do not rewrite cgo's import "C" comments + p.posFor(p.comment.Pos()).Column == 1 && + p.posFor(p.comment.End()+1) == next { + // Unindented comment abutting next token position: + // a top-level doc comment. + list = formatDocComment(list) + changed = true + + if len(p.comment.List) > 0 && len(list) == 0 { + // The doc comment was removed entirely. + // Keep preceding whitespace. + p.writeCommentPrefix(p.posFor(p.comment.Pos()), next, last, tok) + // Change print state to continue at next. + p.pos = next + p.last = next + // There can't be any more comments. + p.nextComment() + return p.writeCommentSuffix(false) + } + } + for _, c := range list { + p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok) + p.writeComment(c) + last = c + } + // In case list was rewritten, change print state to where + // the original list would have ended. + if len(p.comment.List) > 0 && changed { + last = p.comment.List[len(p.comment.List)-1] + p.pos = p.posFor(last.End()) + p.last = p.pos + } + p.nextComment() + } + + if last != nil { + // If the last comment is a /*-style comment and the next item + // follows on the same line but is not a comma, and not a "closing" + // token immediately following its corresponding "opening" token, + // add an extra separator unless explicitly disabled. Use a blank + // as separator unless we have pending linebreaks, they are not + // disabled, and we are outside a composite literal, in which case + // we want a linebreak (issue 15137). + // TODO(gri) This has become overly complicated. We should be able + // to track whether we're inside an expression or statement and + // use that information to decide more directly. + needsLinebreak := false + if p.mode&noExtraBlank == 0 && + last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && + tok != token.COMMA && + (tok != token.RPAREN || p.prevOpen == token.LPAREN) && + (tok != token.RBRACK || p.prevOpen == token.LBRACK) { + if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 { + needsLinebreak = true + } else { + p.writeByte(' ', 1) + } + } + // Ensure that there is a line break after a //-style comment, + // before EOF, and before a closing '}' unless explicitly disabled. + if last.Text[1] == '/' || + tok == token.EOF || + tok == token.RBRACE && p.mode&noExtraLinebreak == 0 { + needsLinebreak = true + } + return p.writeCommentSuffix(needsLinebreak) + } + + // no comment was written - we should never reach here since + // intersperseComments should not be called in that case + p.internalError("intersperseComments called without pending comments") + return +} + +// writeWhitespace writes the first n whitespace entries. +func (p *printer) writeWhitespace(n int) { + // write entries + for i := 0; i < n; i++ { + switch ch := p.wsbuf[i]; ch { + case ignore: + // ignore! + case indent: + p.indent++ + case unindent: + p.indent-- + if p.indent < 0 { + p.internalError("negative indentation:", p.indent) + p.indent = 0 + } + case newline, formfeed: + // A line break immediately followed by a "correcting" + // unindent is swapped with the unindent - this permits + // proper label positioning. If a comment is between + // the line break and the label, the unindent is not + // part of the comment whitespace prefix and the comment + // will be positioned correctly indented. + if i+1 < n && p.wsbuf[i+1] == unindent { + // Use a formfeed to terminate the current section. + // Otherwise, a long label name on the next line leading + // to a wide column may increase the indentation column + // of lines before the label; effectively leading to wrong + // indentation. + p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed + i-- // do it again + continue + } + fallthrough + default: + p.writeByte(byte(ch), 1) + } + } + + // shift remaining entries down + l := copy(p.wsbuf, p.wsbuf[n:]) + p.wsbuf = p.wsbuf[:l] +} + +// ---------------------------------------------------------------------------- +// Printing interface + +// nlimit limits n to maxNewlines. +func nlimit(n int) int { + if n > maxNewlines { + n = maxNewlines + } + return n +} + +func mayCombine(prev token.Token, next byte) (b bool) { + switch prev { + case token.INT: + b = next == '.' // 1. + case token.ADD: + b = next == '+' // ++ + case token.SUB: + b = next == '-' // -- + case token.QUO: + b = next == '*' // /* + case token.LSS: + b = next == '-' || next == '<' // <- or << + case token.AND: + b = next == '&' || next == '^' // && or &^ + } + return +} + +func (p *printer) setPos(pos token.Pos) { + if pos.IsValid() { + p.pos = p.posFor(pos) // accurate position of next item + } +} + +// print prints a list of "items" (roughly corresponding to syntactic +// tokens, but also including whitespace and formatting information). +// It is the only print function that should be called directly from +// any of the AST printing functions in nodes.go. +// +// Whitespace is accumulated until a non-whitespace token appears. Any +// comments that need to appear before that token are printed first, +// taking into account the amount and structure of any pending white- +// space for best comment placement. Then, any leftover whitespace is +// printed, followed by the actual token. +func (p *printer) print(args ...any) { + for _, arg := range args { + // information about the current arg + var data string + var isLit bool + var impliedSemi bool // value for p.impliedSemi after this arg + + // record previous opening token, if any + switch p.lastTok { + case token.ILLEGAL: + // ignore (white space) + case token.LPAREN, token.LBRACK: + p.prevOpen = p.lastTok + default: + // other tokens followed any opening token + p.prevOpen = token.ILLEGAL + } + + switch x := arg.(type) { + case pmode: + // toggle printer mode + p.mode ^= x + continue + + case whiteSpace: + if x == ignore { + // don't add ignore's to the buffer; they + // may screw up "correcting" unindents (see + // LabeledStmt) + continue + } + i := len(p.wsbuf) + if i == cap(p.wsbuf) { + // Whitespace sequences are very short so this should + // never happen. Handle gracefully (but possibly with + // bad comment placement) if it does happen. + p.writeWhitespace(i) + i = 0 + } + p.wsbuf = p.wsbuf[0 : i+1] + p.wsbuf[i] = x + if x == newline || x == formfeed { + // newlines affect the current state (p.impliedSemi) + // and not the state after printing arg (impliedSemi) + // because comments can be interspersed before the arg + // in this case + p.impliedSemi = false + } + p.lastTok = token.ILLEGAL + continue + + case *ast.Ident: + data = x.Name + impliedSemi = true + p.lastTok = token.IDENT + + case *ast.BasicLit: + data = x.Value + isLit = true + impliedSemi = true + p.lastTok = x.Kind + + case token.Token: + s := x.String() + if mayCombine(p.lastTok, s[0]) { + // the previous and the current token must be + // separated by a blank otherwise they combine + // into a different incorrect token sequence + // (except for token.INT followed by a '.' this + // should never happen because it is taken care + // of via binary expression formatting) + if len(p.wsbuf) != 0 { + p.internalError("whitespace buffer not empty") + } + p.wsbuf = p.wsbuf[0:1] + p.wsbuf[0] = ' ' + } + data = s + // some keywords followed by a newline imply a semicolon + switch x { + case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN, + token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE: + impliedSemi = true + } + p.lastTok = x + + case string: + // incorrect AST - print error message + data = x + isLit = true + impliedSemi = true + p.lastTok = token.STRING + + default: + fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg) + panic("mvdan.cc/gofumpt/internal/govendor/go/printer type") + } + // data != "" + + next := p.pos // estimated/accurate position of next item + wroteNewline, droppedFF := p.flush(next, p.lastTok) + + // intersperse extra newlines if present in the source and + // if they don't cause extra semicolons (don't do this in + // flush as it will cause extra newlines at the end of a file) + if !p.impliedSemi { + n := nlimit(next.Line - p.pos.Line) + // don't exceed maxNewlines if we already wrote one + if wroteNewline && n == maxNewlines { + n = maxNewlines - 1 + } + if n > 0 { + ch := byte('\n') + if droppedFF { + ch = '\f' // use formfeed since we dropped one before + } + p.writeByte(ch, n) + impliedSemi = false + } + } + + // the next token starts now - record its line number if requested + if p.linePtr != nil { + *p.linePtr = p.out.Line + p.linePtr = nil + } + + p.writeString(next, data, isLit) + p.impliedSemi = impliedSemi + } +} + +// flush prints any pending comments and whitespace occurring textually +// before the position of the next token tok. The flush result indicates +// if a newline was written or if a formfeed was dropped from the whitespace +// buffer. +func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { + if p.commentBefore(next) { + // if there are comments before the next item, intersperse them + wroteNewline, droppedFF = p.intersperseComments(next, tok) + } else { + // otherwise, write any leftover whitespace + p.writeWhitespace(len(p.wsbuf)) + } + return +} + +// getDoc returns the ast.CommentGroup associated with n, if any. +func getDoc(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.Field: + return n.Doc + case *ast.ImportSpec: + return n.Doc + case *ast.ValueSpec: + return n.Doc + case *ast.TypeSpec: + return n.Doc + case *ast.GenDecl: + return n.Doc + case *ast.FuncDecl: + return n.Doc + case *ast.File: + return n.Doc + } + return nil +} + +func getLastComment(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.Field: + return n.Comment + case *ast.ImportSpec: + return n.Comment + case *ast.ValueSpec: + return n.Comment + case *ast.TypeSpec: + return n.Comment + case *ast.GenDecl: + if len(n.Specs) > 0 { + return getLastComment(n.Specs[len(n.Specs)-1]) + } + case *ast.File: + if len(n.Comments) > 0 { + return n.Comments[len(n.Comments)-1] + } + } + return nil +} + +func (p *printer) printNode(node any) error { + // unpack *CommentedNode, if any + var comments []*ast.CommentGroup + if cnode, ok := node.(*CommentedNode); ok { + node = cnode.Node + comments = cnode.Comments + } + + if comments != nil { + // commented node - restrict comment list to relevant range + n, ok := node.(ast.Node) + if !ok { + goto unsupported + } + beg := n.Pos() + end := n.End() + // if the node has associated documentation, + // include that commentgroup in the range + // (the comment list is sorted in the order + // of the comment appearance in the source code) + if doc := getDoc(n); doc != nil { + beg = doc.Pos() + } + if com := getLastComment(n); com != nil { + if e := com.End(); e > end { + end = e + } + } + // token.Pos values are global offsets, we can + // compare them directly + i := 0 + for i < len(comments) && comments[i].End() < beg { + i++ + } + j := i + for j < len(comments) && comments[j].Pos() < end { + j++ + } + if i < j { + p.comments = comments[i:j] + } + } else if n, ok := node.(*ast.File); ok { + // use ast.File comments, if any + p.comments = n.Comments + } + + // if there are no comments, use node comments + p.useNodeComments = p.comments == nil + + // get comments ready for use + p.nextComment() + + p.print(pmode(0)) + + // format node + switch n := node.(type) { + case ast.Expr: + p.expr(n) + case ast.Stmt: + // A labeled statement will un-indent to position the label. + // Set p.indent to 1 so we don't get indent "underflow". + if _, ok := n.(*ast.LabeledStmt); ok { + p.indent = 1 + } + p.stmt(n, false) + case ast.Decl: + p.decl(n) + case ast.Spec: + p.spec(n, 1, false) + case []ast.Stmt: + // A labeled statement will un-indent to position the label. + // Set p.indent to 1 so we don't get indent "underflow". + for _, s := range n { + if _, ok := s.(*ast.LabeledStmt); ok { + p.indent = 1 + } + } + p.stmtList(n, 0, false) + case []ast.Decl: + p.declList(n) + case *ast.File: + p.file(n) + default: + goto unsupported + } + + return p.sourcePosErr + +unsupported: + return fmt.Errorf("mvdan.cc/gofumpt/internal/govendor/go/printer: unsupported node type %T", node) +} + +// ---------------------------------------------------------------------------- +// Trimmer + +// A trimmer is an io.Writer filter for stripping tabwriter.Escape +// characters, trailing blanks and tabs, and for converting formfeed +// and vtab characters into newlines and htabs (in case no tabwriter +// is used). Text bracketed by tabwriter.Escape characters is passed +// through unchanged. +type trimmer struct { + output io.Writer + state int + space []byte +} + +// trimmer is implemented as a state machine. +// It can be in one of the following states: +const ( + inSpace = iota // inside space + inEscape // inside text bracketed by tabwriter.Escapes + inText // inside text +) + +func (p *trimmer) resetSpace() { + p.state = inSpace + p.space = p.space[0:0] +} + +// Design note: It is tempting to eliminate extra blanks occurring in +// whitespace in this function as it could simplify some +// of the blanks logic in the node printing functions. +// However, this would mess up any formatting done by +// the tabwriter. + +var aNewline = []byte("\n") + +func (p *trimmer) Write(data []byte) (n int, err error) { + // invariants: + // p.state == inSpace: + // p.space is unwritten + // p.state == inEscape, inText: + // data[m:n] is unwritten + m := 0 + var b byte + for n, b = range data { + if b == '\v' { + b = '\t' // convert to htab + } + switch p.state { + case inSpace: + switch b { + case '\t', ' ': + p.space = append(p.space, b) + case '\n', '\f': + p.resetSpace() // discard trailing space + _, err = p.output.Write(aNewline) + case tabwriter.Escape: + _, err = p.output.Write(p.space) + p.state = inEscape + m = n + 1 // +1: skip tabwriter.Escape + default: + _, err = p.output.Write(p.space) + p.state = inText + m = n + } + case inEscape: + if b == tabwriter.Escape { + _, err = p.output.Write(data[m:n]) + p.resetSpace() + } + case inText: + switch b { + case '\t', ' ': + _, err = p.output.Write(data[m:n]) + p.resetSpace() + p.space = append(p.space, b) + case '\n', '\f': + _, err = p.output.Write(data[m:n]) + p.resetSpace() + if err == nil { + _, err = p.output.Write(aNewline) + } + case tabwriter.Escape: + _, err = p.output.Write(data[m:n]) + p.state = inEscape + m = n + 1 // +1: skip tabwriter.Escape + } + default: + panic("unreachable") + } + if err != nil { + return + } + } + n = len(data) + + switch p.state { + case inEscape, inText: + _, err = p.output.Write(data[m:n]) + p.resetSpace() + } + + return +} + +// ---------------------------------------------------------------------------- +// Public interface + +// A Mode value is a set of flags (or 0). They control printing. +type Mode uint + +const ( + RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored + TabIndent // use tabs for indentation independent of UseSpaces + UseSpaces // use spaces instead of tabs for alignment + SourcePos // emit //line directives to preserve original source positions +) + +// The mode below is not included in printer's public API because +// editing code text is deemed out of scope. Because this mode is +// unexported, it's also possible to modify or remove it based on +// the evolving needs of mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt without breaking +// users. See discussion in CL 240683. +const ( + // normalizeNumbers means to canonicalize number + // literal prefixes and exponents while printing. + // + // This value is known in and used by mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt. + // It is currently more convenient and performant for those + // packages to apply number normalization during printing, + // rather than by modifying the AST in advance. + normalizeNumbers Mode = 1 << 30 +) + +// A Config node controls the output of Fprint. +type Config struct { + Mode Mode // default: 0 + Tabwidth int // default: 8 + Indent int // default: 0 (all code is indented at least by this much) +} + +var printerPool = sync.Pool{ + New: func() any { + return &printer{ + // Whitespace sequences are short. + wsbuf: make([]whiteSpace, 0, 16), + // We start the printer with a 16K output buffer, which is currently + // larger than about 80% of Go files in the standard library. + output: make([]byte, 0, 16<<10), + } + }, +} + +func newPrinter(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) *printer { + p := printerPool.Get().(*printer) + *p = printer{ + Config: *cfg, + fset: fset, + pos: token.Position{Line: 1, Column: 1}, + out: token.Position{Line: 1, Column: 1}, + wsbuf: p.wsbuf[:0], + nodeSizes: nodeSizes, + cachedPos: -1, + output: p.output[:0], + } + return p +} + +func (p *printer) free() { + // Hard limit on buffer size; see https://golang.org/issue/23199. + if cap(p.output) > 64<<10 { + return + } + + printerPool.Put(p) +} + +// fprint implements Fprint and takes a nodesSizes map for setting up the printer state. +func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node any, nodeSizes map[ast.Node]int) (err error) { + // print node + p := newPrinter(cfg, fset, nodeSizes) + defer p.free() + if err = p.printNode(node); err != nil { + return + } + // print outstanding comments + p.impliedSemi = false // EOF acts like a newline + p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF) + + // output is buffered in p.output now. + // fix //go:build and // +build comments if needed. + p.fixGoBuildLines() + + // redirect output through a trimmer to eliminate trailing whitespace + // (Input to a tabwriter must be untrimmed since trailing tabs provide + // formatting information. The tabwriter could provide trimming + // functionality but no tabwriter is used when RawFormat is set.) + output = &trimmer{output: output} + + // redirect output through a tabwriter if necessary + if cfg.Mode&RawFormat == 0 { + minwidth := cfg.Tabwidth + + padchar := byte('\t') + if cfg.Mode&UseSpaces != 0 { + padchar = ' ' + } + + twmode := tabwriter.DiscardEmptyColumns + if cfg.Mode&TabIndent != 0 { + minwidth = 0 + twmode |= tabwriter.TabIndent + } + + output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode) + } + + // write printer result via tabwriter/trimmer to output + if _, err = output.Write(p.output); err != nil { + return + } + + // flush tabwriter, if any + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return +} + +// A CommentedNode bundles an AST node and corresponding comments. +// It may be provided as argument to any of the Fprint functions. +type CommentedNode struct { + Node any // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt + Comments []*ast.CommentGroup +} + +// Fprint "pretty-prints" an AST node to output for a given configuration cfg. +// Position information is interpreted relative to the file set fset. +// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt, +// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt. +func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node any) error { + return cfg.fprint(output, fset, node, make(map[ast.Node]int)) +} + +// Fprint "pretty-prints" an AST node to output. +// It calls Config.Fprint with default settings. +// Note that gofmt uses tabs for indentation but spaces for alignment; +// use format.Node (package mvdan.cc/gofumpt/internal/govendor/go/format) for output that matches gofmt. +func Fprint(output io.Writer, fset *token.FileSet, node any) error { + return (&Config{Tabwidth: 8}).Fprint(output, fset, node) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/version/version.go b/vendor/mvdan.cc/gofumpt/internal/version/version.go index 992930480f..785b6b317d 100644 --- a/vendor/mvdan.cc/gofumpt/internal/version/version.go +++ b/vendor/mvdan.cc/gofumpt/internal/version/version.go @@ -97,6 +97,9 @@ func goVersion() string { return runtime.Version() } -func String() string { +func String(injected string) string { + if injected != "" { + return fmt.Sprintf("%s (%s)", injected, goVersion()) + } return fmt.Sprintf("%s (%s)", gofumptVersion(), goVersion()) } diff --git a/vendor/mvdan.cc/interfacer/LICENSE b/vendor/mvdan.cc/interfacer/LICENSE deleted file mode 100644 index 7d71d51a5e..0000000000 --- a/vendor/mvdan.cc/interfacer/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Daniel Martí. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/mvdan.cc/interfacer/check/cache.go b/vendor/mvdan.cc/interfacer/check/cache.go deleted file mode 100644 index 757eca55e1..0000000000 --- a/vendor/mvdan.cc/interfacer/check/cache.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2015, Daniel Martí -// See LICENSE for licensing information - -package check - -import ( - "go/ast" - "go/types" -) - -type pkgTypes struct { - ifaces map[string]string - funcSigns map[string]bool -} - -func (p *pkgTypes) getTypes(pkg *types.Package) { - p.ifaces = make(map[string]string) - p.funcSigns = make(map[string]bool) - done := make(map[*types.Package]bool) - addTypes := func(pkg *types.Package, top bool) { - if done[pkg] { - return - } - done[pkg] = true - ifs, funs := fromScope(pkg.Scope()) - fullName := func(name string) string { - if !top { - return pkg.Path() + "." + name - } - return name - } - for iftype, name := range ifs { - // only suggest exported interfaces - if ast.IsExported(name) { - p.ifaces[iftype] = fullName(name) - } - } - for ftype := range funs { - // ignore non-exported func signatures too - p.funcSigns[ftype] = true - } - } - for _, imp := range pkg.Imports() { - addTypes(imp, false) - for _, imp2 := range imp.Imports() { - addTypes(imp2, false) - } - } - addTypes(pkg, true) -} diff --git a/vendor/mvdan.cc/interfacer/check/check.go b/vendor/mvdan.cc/interfacer/check/check.go deleted file mode 100644 index f4d3b4037b..0000000000 --- a/vendor/mvdan.cc/interfacer/check/check.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright (c) 2015, Daniel Martí -// See LICENSE for licensing information - -package check // import "mvdan.cc/interfacer/check" - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "os" - "strings" - - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" - - "github.com/kisielk/gotool" - "mvdan.cc/lint" -) - -func toDiscard(usage *varUsage) bool { - if usage.discard { - return true - } - for to := range usage.assigned { - if toDiscard(to) { - return true - } - } - return false -} - -func allCalls(usage *varUsage, all, ftypes map[string]string) { - for fname := range usage.calls { - all[fname] = ftypes[fname] - } - for to := range usage.assigned { - allCalls(to, all, ftypes) - } -} - -func (c *Checker) interfaceMatching(param *types.Var, usage *varUsage) (string, string) { - if toDiscard(usage) { - return "", "" - } - ftypes := typeFuncMap(param.Type()) - called := make(map[string]string, len(usage.calls)) - allCalls(usage, called, ftypes) - s := funcMapString(called) - return c.ifaces[s], s -} - -type varUsage struct { - calls map[string]struct{} - discard bool - - assigned map[*varUsage]struct{} -} - -type funcDecl struct { - astDecl *ast.FuncDecl - ssaFn *ssa.Function -} - -// CheckArgs checks the packages specified by their import paths in -// args. -func CheckArgs(args []string) ([]string, error) { - paths := gotool.ImportPaths(args) - conf := loader.Config{} - conf.AllowErrors = true - rest, err := conf.FromArgs(paths, false) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, fmt.Errorf("unwanted extra args: %v", rest) - } - lprog, err := conf.Load() - if err != nil { - return nil, err - } - prog := ssautil.CreateProgram(lprog, 0) - prog.Build() - c := new(Checker) - c.Program(lprog) - c.ProgramSSA(prog) - issues, err := c.Check() - if err != nil { - return nil, err - } - wd, err := os.Getwd() - if err != nil { - return nil, err - } - lines := make([]string, len(issues)) - for i, issue := range issues { - fpos := prog.Fset.Position(issue.Pos()).String() - if strings.HasPrefix(fpos, wd) { - fpos = fpos[len(wd)+1:] - } - lines[i] = fmt.Sprintf("%s: %s", fpos, issue.Message()) - } - return lines, nil -} - -type Checker struct { - lprog *loader.Program - prog *ssa.Program - - pkgTypes - *loader.PackageInfo - - funcs []*funcDecl - - ssaByPos map[token.Pos]*ssa.Function - - discardFuncs map[*types.Signature]struct{} - - vars map[*types.Var]*varUsage -} - -var ( - _ lint.Checker = (*Checker)(nil) - _ lint.WithSSA = (*Checker)(nil) -) - -func (c *Checker) Program(lprog *loader.Program) { - c.lprog = lprog -} - -func (c *Checker) ProgramSSA(prog *ssa.Program) { - c.prog = prog -} - -func (c *Checker) Check() ([]lint.Issue, error) { - var total []lint.Issue - c.ssaByPos = make(map[token.Pos]*ssa.Function) - wantPkg := make(map[*types.Package]bool) - for _, pinfo := range c.lprog.InitialPackages() { - wantPkg[pinfo.Pkg] = true - } - for fn := range ssautil.AllFunctions(c.prog) { - if fn.Pkg == nil { // builtin? - continue - } - if len(fn.Blocks) == 0 { // stub - continue - } - if !wantPkg[fn.Pkg.Pkg] { // not part of given pkgs - continue - } - c.ssaByPos[fn.Pos()] = fn - } - for _, pinfo := range c.lprog.InitialPackages() { - pkg := pinfo.Pkg - c.getTypes(pkg) - c.PackageInfo = c.lprog.AllPackages[pkg] - total = append(total, c.checkPkg()...) - } - return total, nil -} - -func (c *Checker) checkPkg() []lint.Issue { - c.discardFuncs = make(map[*types.Signature]struct{}) - c.vars = make(map[*types.Var]*varUsage) - c.funcs = c.funcs[:0] - findFuncs := func(node ast.Node) bool { - decl, ok := node.(*ast.FuncDecl) - if !ok { - return true - } - ssaFn := c.ssaByPos[decl.Name.Pos()] - if ssaFn == nil { - return true - } - fd := &funcDecl{ - astDecl: decl, - ssaFn: ssaFn, - } - if c.funcSigns[signString(fd.ssaFn.Signature)] { - // implements interface - return true - } - c.funcs = append(c.funcs, fd) - ast.Walk(c, decl.Body) - return true - } - for _, f := range c.Files { - ast.Inspect(f, findFuncs) - } - return c.packageIssues() -} - -func paramVarAndType(sign *types.Signature, i int) (*types.Var, types.Type) { - params := sign.Params() - extra := sign.Variadic() && i >= params.Len()-1 - if !extra { - if i >= params.Len() { - // builtins with multiple signatures - return nil, nil - } - vr := params.At(i) - return vr, vr.Type() - } - last := params.At(params.Len() - 1) - switch x := last.Type().(type) { - case *types.Slice: - return nil, x.Elem() - default: - return nil, x - } -} - -func (c *Checker) varUsage(e ast.Expr) *varUsage { - id, ok := e.(*ast.Ident) - if !ok { - return nil - } - param, ok := c.ObjectOf(id).(*types.Var) - if !ok { - // not a variable - return nil - } - if usage, e := c.vars[param]; e { - return usage - } - if !interesting(param.Type()) { - return nil - } - usage := &varUsage{ - calls: make(map[string]struct{}), - assigned: make(map[*varUsage]struct{}), - } - c.vars[param] = usage - return usage -} - -func (c *Checker) addUsed(e ast.Expr, as types.Type) { - if as == nil { - return - } - if usage := c.varUsage(e); usage != nil { - // using variable - iface, ok := as.Underlying().(*types.Interface) - if !ok { - usage.discard = true - return - } - for i := 0; i < iface.NumMethods(); i++ { - m := iface.Method(i) - usage.calls[m.Name()] = struct{}{} - } - } else if t, ok := c.TypeOf(e).(*types.Signature); ok { - // using func - c.discardFuncs[t] = struct{}{} - } -} - -func (c *Checker) addAssign(to, from ast.Expr) { - pto := c.varUsage(to) - pfrom := c.varUsage(from) - if pto == nil || pfrom == nil { - // either isn't interesting - return - } - pfrom.assigned[pto] = struct{}{} -} - -func (c *Checker) discard(e ast.Expr) { - if usage := c.varUsage(e); usage != nil { - usage.discard = true - } -} - -func (c *Checker) comparedWith(e, with ast.Expr) { - if _, ok := with.(*ast.BasicLit); ok { - c.discard(e) - } -} - -func (c *Checker) Visit(node ast.Node) ast.Visitor { - switch x := node.(type) { - case *ast.SelectorExpr: - if _, ok := c.TypeOf(x.Sel).(*types.Signature); !ok { - c.discard(x.X) - } - case *ast.StarExpr: - c.discard(x.X) - case *ast.UnaryExpr: - c.discard(x.X) - case *ast.IndexExpr: - c.discard(x.X) - case *ast.IncDecStmt: - c.discard(x.X) - case *ast.BinaryExpr: - switch x.Op { - case token.EQL, token.NEQ: - c.comparedWith(x.X, x.Y) - c.comparedWith(x.Y, x.X) - default: - c.discard(x.X) - c.discard(x.Y) - } - case *ast.ValueSpec: - for _, val := range x.Values { - c.addUsed(val, c.TypeOf(x.Type)) - } - case *ast.AssignStmt: - for i, val := range x.Rhs { - left := x.Lhs[i] - if x.Tok == token.ASSIGN { - c.addUsed(val, c.TypeOf(left)) - } - c.addAssign(left, val) - } - case *ast.CompositeLit: - for i, e := range x.Elts { - switch y := e.(type) { - case *ast.KeyValueExpr: - c.addUsed(y.Key, c.TypeOf(y.Value)) - c.addUsed(y.Value, c.TypeOf(y.Key)) - case *ast.Ident: - c.addUsed(y, compositeIdentType(c.TypeOf(x), i)) - } - } - case *ast.CallExpr: - switch y := c.TypeOf(x.Fun).Underlying().(type) { - case *types.Signature: - c.onMethodCall(x, y) - default: - // type conversion - if len(x.Args) == 1 { - c.addUsed(x.Args[0], y) - } - } - } - return c -} - -func compositeIdentType(t types.Type, i int) types.Type { - switch x := t.(type) { - case *types.Named: - return compositeIdentType(x.Underlying(), i) - case *types.Struct: - return x.Field(i).Type() - case *types.Array: - return x.Elem() - case *types.Slice: - return x.Elem() - } - return nil -} - -func (c *Checker) onMethodCall(ce *ast.CallExpr, sign *types.Signature) { - for i, e := range ce.Args { - paramObj, t := paramVarAndType(sign, i) - // Don't if this is a parameter being re-used as itself - // in a recursive call - if id, ok := e.(*ast.Ident); ok { - if paramObj == c.ObjectOf(id) { - continue - } - } - c.addUsed(e, t) - } - sel, ok := ce.Fun.(*ast.SelectorExpr) - if !ok { - return - } - // receiver func call on the left side - if usage := c.varUsage(sel.X); usage != nil { - usage.calls[sel.Sel.Name] = struct{}{} - } -} - -func (fd *funcDecl) paramGroups() [][]*types.Var { - astList := fd.astDecl.Type.Params.List - groups := make([][]*types.Var, len(astList)) - signIndex := 0 - for i, field := range astList { - group := make([]*types.Var, len(field.Names)) - for j := range field.Names { - group[j] = fd.ssaFn.Signature.Params().At(signIndex) - signIndex++ - } - groups[i] = group - } - return groups -} - -func (c *Checker) packageIssues() []lint.Issue { - var issues []lint.Issue - for _, fd := range c.funcs { - if _, e := c.discardFuncs[fd.ssaFn.Signature]; e { - continue - } - for _, group := range fd.paramGroups() { - issues = append(issues, c.groupIssues(fd, group)...) - } - } - return issues -} - -type Issue struct { - pos token.Pos - msg string -} - -func (i Issue) Pos() token.Pos { return i.pos } -func (i Issue) Message() string { return i.msg } - -func (c *Checker) groupIssues(fd *funcDecl, group []*types.Var) []lint.Issue { - var issues []lint.Issue - for _, param := range group { - usage := c.vars[param] - if usage == nil { - return nil - } - newType := c.paramNewType(fd.astDecl.Name.Name, param, usage) - if newType == "" { - return nil - } - issues = append(issues, Issue{ - pos: param.Pos(), - msg: fmt.Sprintf("%s can be %s", param.Name(), newType), - }) - } - return issues -} - -func willAddAllocation(t types.Type) bool { - switch t.Underlying().(type) { - case *types.Pointer, *types.Interface: - return false - } - return true -} - -func (c *Checker) paramNewType(funcName string, param *types.Var, usage *varUsage) string { - t := param.Type() - if !ast.IsExported(funcName) && willAddAllocation(t) { - return "" - } - if named := typeNamed(t); named != nil { - tname := named.Obj().Name() - vname := param.Name() - if mentionsName(funcName, tname) || mentionsName(funcName, vname) { - return "" - } - } - ifname, iftype := c.interfaceMatching(param, usage) - if ifname == "" { - return "" - } - if types.IsInterface(t.Underlying()) { - if have := funcMapString(typeFuncMap(t)); have == iftype { - return "" - } - } - return ifname -} diff --git a/vendor/mvdan.cc/interfacer/check/types.go b/vendor/mvdan.cc/interfacer/check/types.go deleted file mode 100644 index 393bb0b9fa..0000000000 --- a/vendor/mvdan.cc/interfacer/check/types.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2015, Daniel Martí -// See LICENSE for licensing information - -package check - -import ( - "bytes" - "fmt" - "go/types" - "sort" - "strings" -) - -type methoder interface { - NumMethods() int - Method(int) *types.Func -} - -func methoderFuncMap(m methoder, skip bool) map[string]string { - ifuncs := make(map[string]string, m.NumMethods()) - for i := 0; i < m.NumMethods(); i++ { - f := m.Method(i) - if !f.Exported() { - if skip { - continue - } - return nil - } - sign := f.Type().(*types.Signature) - ifuncs[f.Name()] = signString(sign) - } - return ifuncs -} - -func typeFuncMap(t types.Type) map[string]string { - switch x := t.(type) { - case *types.Pointer: - return typeFuncMap(x.Elem()) - case *types.Named: - u := x.Underlying() - if types.IsInterface(u) { - return typeFuncMap(u) - } - return methoderFuncMap(x, true) - case *types.Interface: - return methoderFuncMap(x, false) - default: - return nil - } -} - -func funcMapString(iface map[string]string) string { - fnames := make([]string, 0, len(iface)) - for fname := range iface { - fnames = append(fnames, fname) - } - sort.Strings(fnames) - var b bytes.Buffer - for i, fname := range fnames { - if i > 0 { - fmt.Fprint(&b, "; ") - } - fmt.Fprint(&b, fname, iface[fname]) - } - return b.String() -} - -func tupleJoin(buf *bytes.Buffer, t *types.Tuple) { - buf.WriteByte('(') - for i := 0; i < t.Len(); i++ { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(t.At(i).Type().String()) - } - buf.WriteByte(')') -} - -// signString is similar to Signature.String(), but it ignores -// param/result names. -func signString(sign *types.Signature) string { - var buf bytes.Buffer - tupleJoin(&buf, sign.Params()) - tupleJoin(&buf, sign.Results()) - return buf.String() -} - -func interesting(t types.Type) bool { - switch x := t.(type) { - case *types.Interface: - return x.NumMethods() > 1 - case *types.Named: - if u := x.Underlying(); types.IsInterface(u) { - return interesting(u) - } - return x.NumMethods() >= 1 - case *types.Pointer: - return interesting(x.Elem()) - default: - return false - } -} - -func anyInteresting(params *types.Tuple) bool { - for i := 0; i < params.Len(); i++ { - t := params.At(i).Type() - if interesting(t) { - return true - } - } - return false -} - -func fromScope(scope *types.Scope) (ifaces map[string]string, funcs map[string]bool) { - ifaces = make(map[string]string) - funcs = make(map[string]bool) - for _, name := range scope.Names() { - tn, ok := scope.Lookup(name).(*types.TypeName) - if !ok { - continue - } - switch x := tn.Type().Underlying().(type) { - case *types.Interface: - iface := methoderFuncMap(x, false) - if len(iface) == 0 { - continue - } - for i := 0; i < x.NumMethods(); i++ { - f := x.Method(i) - sign := f.Type().(*types.Signature) - if !anyInteresting(sign.Params()) { - continue - } - funcs[signString(sign)] = true - } - s := funcMapString(iface) - if _, e := ifaces[s]; !e { - ifaces[s] = tn.Name() - } - case *types.Signature: - if !anyInteresting(x.Params()) { - continue - } - funcs[signString(x)] = true - } - } - return ifaces, funcs -} - -func mentionsName(fname, name string) bool { - if len(name) < 2 { - return false - } - capit := strings.ToUpper(name[:1]) + name[1:] - lower := strings.ToLower(name) - return strings.Contains(fname, capit) || strings.HasPrefix(fname, lower) -} - -func typeNamed(t types.Type) *types.Named { - for { - switch x := t.(type) { - case *types.Named: - return x - case *types.Pointer: - t = x.Elem() - default: - return nil - } - } -} diff --git a/vendor/mvdan.cc/lint/.travis.yml b/vendor/mvdan.cc/lint/.travis.yml deleted file mode 100644 index 2ccdeab9ad..0000000000 --- a/vendor/mvdan.cc/lint/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.9.x - -go_import_path: mvdan.cc/lint diff --git a/vendor/mvdan.cc/lint/LICENSE b/vendor/mvdan.cc/lint/LICENSE deleted file mode 100644 index a06c5ebfc8..0000000000 --- a/vendor/mvdan.cc/lint/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017, Daniel Martí. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/mvdan.cc/lint/README.md b/vendor/mvdan.cc/lint/README.md deleted file mode 100644 index 8a9c8b51c3..0000000000 --- a/vendor/mvdan.cc/lint/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# lint - -[![GoDoc](https://godoc.org/mvdan.cc/lint?status.svg)](https://godoc.org/mvdan.cc/lint) -[![Build Status](https://travis-ci.org/mvdan/lint.svg?branch=master)](https://travis-ci.org/mvdan/lint) - -Work in progress. Its API might change before the 1.0 release. - -This package intends to define simple interfaces that Go code checkers -can implement. This would simplify calling them from Go code, as well as -running multiple linters while sharing initial loading work. - -### metalint - - go get -u mvdan.cc/lint/cmd/metalint - -The start of a linter that runs many linters leveraging the common -interface. Not stable yet. - -Linters included: - -* [unparam](https://mvdan.cc/unparam) -* [interfacer](https://github.com/mvdan/interfacer) - -### Related projects - -* [golinters](https://github.com/thomasheller/golinters) - Report on - linter support diff --git a/vendor/mvdan.cc/lint/lint.go b/vendor/mvdan.cc/lint/lint.go deleted file mode 100644 index a16789fad5..0000000000 --- a/vendor/mvdan.cc/lint/lint.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2017, Daniel Martí -// See LICENSE for licensing information - -// Package lint defines common interfaces for Go code checkers. -package lint // import "mvdan.cc/lint" - -import ( - "go/token" - - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/ssa" -) - -// A Checker points out issues in a program. -type Checker interface { - Program(*loader.Program) - Check() ([]Issue, error) -} - -type WithSSA interface { - ProgramSSA(*ssa.Program) -} - -// Issue represents an issue somewhere in a source code file. -type Issue interface { - Pos() token.Pos - Message() string -} diff --git a/vendor/mvdan.cc/unparam/check/check.go b/vendor/mvdan.cc/unparam/check/check.go index dcc541064c..39de800d95 100644 --- a/vendor/mvdan.cc/unparam/check/check.go +++ b/vendor/mvdan.cc/unparam/check/check.go @@ -21,7 +21,6 @@ import ( "sort" "strings" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" @@ -582,6 +581,8 @@ resLoop: c.addIssue(fn, res.Pos(), "result %s is never used", name) } + fnIsGeneric := fn.TypeParams().Len() > 0 + for i, par := range fn.Params { if paramsBy != "" { continue // we can't change the params @@ -590,14 +591,19 @@ resLoop: continue } c.debug("%s\n", par.String()) - switch par.Object().Name() { - case "", "_": // unnamed - c.debug(" skip - unnamed\n") + if name := par.Object().Name(); name == "" || name[0] == '_' { + c.debug(" skip - no name or underscore name\n") continue } - if stdSizes.Sizeof(par.Type()) == 0 { - c.debug(" skip - zero size\n") - continue + t := par.Type() + // asking for the size of a type param would panic, as it is unknowable + if !fnIsGeneric || !containsTypeParam(t) { + if stdSizes.Sizeof(par.Type()) == 0 { + c.debug(" skip - zero size\n") + continue + } + } else { + c.debug(" examine - type parameter\n") } reason := "is unused" constStr := c.alwaysReceivedConst(callSites, par, i) @@ -611,6 +617,30 @@ resLoop: } } +func containsTypeParam(t types.Type) bool { + switch t := t.(type) { + case *types.TypeParam, *types.Union: + return true + case *types.Struct: + nf := t.NumFields() + for i := 0; i < nf; i++ { + if containsTypeParam(t.Field(nf).Type()) { + return true + } + } + case *types.Array: + return containsTypeParam(t.Elem()) + case *types.Named: + args := t.TypeArgs() + for i := 0; i < args.Len(); i++ { + if containsTypeParam(args.At(i)) { + return true + } + } + } + return false +} + // nodeStr stringifies a syntax tree node. It is only meant for simple nodes, // such as short value expressions. func nodeStr(node ast.Node) string { @@ -881,19 +911,12 @@ func recvPrefix(recv *ast.FieldList) string { switch expr := expr.(type) { case *ast.Ident: return expr.Name + "." + case *ast.IndexExpr: + return expr.X.(*ast.Ident).Name + "." + case *ast.IndexListExpr: + return expr.X.(*ast.Ident).Name + "." default: - x, _, _, _ := typeparams.UnpackIndexExpr(expr) - if x == nil { - panic(fmt.Sprintf("unexepected receiver AST node: %T", expr)) - } - return x.(*ast.Ident).Name + "." - // TODO: remove the use of x/exp/typeparams once we drop Go 1.17 - // case *ast.IndexExpr: - // return expr.X.(*ast.Ident).Name + "." - // case *ast.IndexListExpr: - // return expr.X.(*ast.Ident).Name + "." - // default: - // panic(fmt.Sprintf("unexepected receiver AST node: %T", expr)) + panic(fmt.Sprintf("unexpected receiver AST node: %T", expr)) } } diff --git a/website/docs/d/enterprise.html.markdown b/website/docs/d/enterprise.html.markdown index b81f0b103e..251e0ee951 100644 --- a/website/docs/d/enterprise.html.markdown +++ b/website/docs/d/enterprise.html.markdown @@ -20,6 +20,7 @@ data "github_enterprise" "example" { ## Attributes Reference * `id` - The ID of the enterprise. +* `database_id` - The database ID of the enterprise. * `slug` - The URL slug identifying the enterprise. * `name` - The name of the enterprise. * `description` - The description of the enterprise. diff --git a/website/docs/d/external_groups.html.markdown b/website/docs/d/external_groups.html.markdown index e76ee87815..5443e9b2fd 100644 --- a/website/docs/d/external_groups.html.markdown +++ b/website/docs/d/external_groups.html.markdown @@ -1,6 +1,6 @@ --- layout: "github" -page_title: "GitHub: github_external_group" +page_title: "GitHub: github_external_groups" description: |- Retrieve external groups belonging to an organization. --- diff --git a/website/docs/d/ip_ranges.html.markdown b/website/docs/d/ip_ranges.html.markdown index 5bf7f75be8..7d9cbe1b53 100644 --- a/website/docs/d/ip_ranges.html.markdown +++ b/website/docs/d/ip_ranges.html.markdown @@ -35,6 +35,9 @@ data "github_ip_ranges" "test" {} * `api` - An Array of IP addresses in CIDR format for the GitHub API. * `api_ipv4` - A subset of the `api` array that contains IP addresses in IPv4 CIDR format. * `api_ipv6` - A subset of the `api` array that contains IP addresses in IPv6 CIDR format. + * `packages` - An Array of IP addresses in CIDR format specifying the A records for GitHub Packages. + * `packages_ipv4` - A subset of the `packages` array that contains IP addresses in IPv4 CIDR format. + * `packages_ipv6` - A subset of the `packages` array that contains IP addresses in IPv6 CIDR format. * `pages` - An Array of IP addresses in CIDR format specifying the A records for GitHub Pages. * `pages_ipv4` - A subset of the `pages` array that contains IP addresses in IPv4 CIDR format. * `pages_ipv6` - A subset of the `pages` array that contains IP addresses in IPv6 CIDR format. diff --git a/website/docs/d/organization.html.markdown b/website/docs/d/organization.html.markdown index f7b13acfdd..b230ad2838 100644 --- a/website/docs/d/organization.html.markdown +++ b/website/docs/d/organization.html.markdown @@ -17,6 +17,10 @@ data "github_organization" "example" { } ``` +## Argument Reference + +* `ignore_archived_repos` - Whether or not to include archived repos in the `repositories` list + ## Attributes Reference * `id` - The ID of the organization diff --git a/website/docs/d/rest_api.html.markdown b/website/docs/d/rest_api.html.markdown index 4535d79508..de628acef6 100644 --- a/website/docs/d/rest_api.html.markdown +++ b/website/docs/d/rest_api.html.markdown @@ -23,7 +23,8 @@ data "github_rest_api" "example" { ## Attributes Reference + * `id` - The GitHub API Request ID * `code` - A response status code. * `status` - A response status string. - * `headers` - A map of response headers. - * `body` - A map of response body. + * `headers` - A JSON string containing response headers. + * `body` - A JSON string containing response body. diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index f2176dd914..313221133f 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -23,7 +23,7 @@ terraform { required_providers { github = { source = "integrations/github" - version = "~> 5.0" + version = "~> 6.0" } } } @@ -118,8 +118,14 @@ The following arguments are supported in the `provider` block: * `write_delay_ms` - (Optional) The number of milliseconds to sleep in between write operations in order to satisfy the GitHub API rate limits. Defaults to 1000ms or 1 second if not provided. +* `retry_delay_ms` - (Optional) Amount of time in milliseconds to sleep in between requests to GitHub API after an error response. Defaults to 1000ms or 1 second if not provided, the max_retries must be set to greater than zero. + * `read_delay_ms` - (Optional) The number of milliseconds to sleep in between non-write operations in order to satisfy the GitHub API rate limits. Defaults to 0ms. +* `retryable_errors` - (Optional) "Allow the provider to retry after receiving an error status code, the max_retries should be set for this to work. Defaults to [500, 502, 503, 504] + +* `max_retries` - (Optional) Number of times to retry a request after receiving an error status code. Defaults to 3 + Note: If you have a PEM file on disk, you can pass it in via `pem_file = file("path/to/file.pem")`. For backwards compatibility, if more than one of `owner`, `organization`, diff --git a/website/docs/r/branch_protection.html.markdown b/website/docs/r/branch_protection.html.markdown index a9c1dcc2be..ae2c6dc3d4 100644 --- a/website/docs/r/branch_protection.html.markdown +++ b/website/docs/r/branch_protection.html.markdown @@ -11,7 +11,7 @@ Protects a GitHub branch. This resource allows you to configure branch protection for repositories in your organization. When applied, the branch will be protected from forced pushes and deletion. Additional constraints, such as required status checks or restrictions on users, teams, and apps, can also be configured. -Note: for the `push_restrictions` a given user or team must have specific write access to the repository. If specific write access not provided, github will reject the given actor, which will be the cause of terraform drift. +Note: for the `push_allowances` a given user or team must have specific write access to the repository. If specific write access not provided, github will reject the given actor, which will be the cause of terraform drift. ## Example Usage @@ -45,15 +45,17 @@ resource "github_branch_protection" "example" { ] } - push_restrictions = [ - data.github_user.example.node_id, - "/exampleuser", - "exampleorganization/exampleteam", - # you can have more than one type of restriction (teams + users). If you use - # more than one type, you must use node_ids of each user and each team. - # github_team.example.node_id - # github_user.example-2.node_id - ] + restrict_pushes { + push_allowances = [ + data.github_user.example.node_id, + "/exampleuser", + "exampleorganization/exampleteam", + # you can have more than one type of restriction (teams + users). If you use + # more than one type, you must use node_ids of each user and each team. + # github_team.example.node_id + # github_user.example-2.node_id + ] + } force_push_bypassers = [ data.github_user.example.node_id, @@ -97,11 +99,10 @@ The following arguments are supported: * `require_conversation_resolution` - (Optional) Boolean, setting this to `true` requires all conversations on code must be resolved before a pull request can be merged. * `required_status_checks` - (Optional) Enforce restrictions for required status checks. See [Required Status Checks](#required-status-checks) below for details. * `required_pull_request_reviews` - (Optional) Enforce restrictions for pull request reviews. See [Required Pull Request Reviews](#required-pull-request-reviews) below for details. -* `push_restrictions` - (Optional) The list of actor Names/IDs that may push to the branch. Actor names must either begin with a "/" for users or the organization name followed by a "/" for teams. -* `force_push_bypassers` - (Optional) The list of actor Names/IDs that are allowed to bypass force push restrictions. Actor names must either begin with a "/" for users or the organization name followed by a "/" for teams. +* `restrict_pushes` - (Optional) Restrict pushes to matching branches. See [Restrict Pushes](#restrict-pushes) below for details. +* `force_push_bypassers` - (Optional) The list of actor Names/IDs that are allowed to bypass force push restrictions. Actor names must either begin with a "/" for users or the organization name followed by a "/" for teams. If the list is not empty, `allows_force_pushes` should be set to `false`. * `allows_deletions` - (Optional) Boolean, setting this to `true` to allow the branch to be deleted. -* `allows_force_pushes` - (Optional) Boolean, setting this to `true` to allow force pushes on the branch. -* `blocks_creations` - (Optional) Boolean, setting this to `true` to block creating the branch. +* `allows_force_pushes` - (Optional) Boolean, setting this to `true` to allow force pushes on the branch to everyone. Set it to `false` if you specify `force_push_bypassers`. * `lock_branch` - (Optional) Boolean, Setting this to `true` will make the branch read-only and preventing any pushes to it. Defaults to `false` ### Required Status Checks @@ -129,6 +130,13 @@ For workflows that use reusable workflows, the pattern is ` Resources